aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--CREDITS2
-rw-r--r--Documentation/Changes31
-rw-r--r--Documentation/CodingStyle43
-rw-r--r--Documentation/DocBook/.gitignore6
-rw-r--r--Documentation/DocBook/kernel-api.tmpl6
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl22
-rw-r--r--Documentation/RCU/rcuref.txt87
-rw-r--r--Documentation/SubmittingDrivers24
-rw-r--r--Documentation/SubmittingPatches63
-rw-r--r--Documentation/applying-patches.txt81
-rw-r--r--Documentation/block/biodoc.txt10
-rw-r--r--Documentation/block/stat.txt82
-rw-r--r--Documentation/cpu-hotplug.txt357
-rw-r--r--Documentation/cpusets.txt165
-rw-r--r--Documentation/dvb/avermedia.txt3
-rw-r--r--Documentation/dvb/get_dvb_firmware23
-rw-r--r--Documentation/dvb/ttusb-dec.txt3
-rw-r--r--Documentation/fb/cyblafb/bugs1
-rw-r--r--Documentation/fb/cyblafb/fb.modes57
-rw-r--r--Documentation/fb/cyblafb/performance1
-rw-r--r--Documentation/fb/cyblafb/todo5
-rw-r--r--Documentation/fb/cyblafb/usage33
-rw-r--r--Documentation/fb/cyblafb/whatsnew29
-rw-r--r--Documentation/feature-removal-schedule.txt20
-rw-r--r--Documentation/filesystems/00-INDEX6
-rw-r--r--Documentation/filesystems/configfs/configfs.txt434
-rw-r--r--Documentation/filesystems/configfs/configfs_example.c474
-rw-r--r--Documentation/filesystems/dlmfs.txt130
-rw-r--r--Documentation/filesystems/ext3.txt181
-rw-r--r--Documentation/filesystems/ocfs2.txt55
-rw-r--r--Documentation/filesystems/proc.txt19
-rw-r--r--Documentation/filesystems/ramfs-rootfs-initramfs.txt72
-rw-r--r--Documentation/filesystems/relayfs.txt126
-rw-r--r--Documentation/filesystems/spufs.txt521
-rw-r--r--Documentation/filesystems/sysfs-pci.txt21
-rw-r--r--Documentation/hrtimers.txt178
-rw-r--r--Documentation/hwmon/w83627hf19
-rw-r--r--Documentation/i2c/busses/i2c-nforce23
-rw-r--r--Documentation/i2c/busses/i2c-parport1
-rw-r--r--Documentation/i2c/porting-clients90
-rw-r--r--Documentation/i2c/writing-clients20
-rw-r--r--Documentation/i2o/ioctl2
-rw-r--r--Documentation/kbuild/makefiles.txt4
-rw-r--r--Documentation/kbuild/modules.txt40
-rw-r--r--Documentation/kdump/gdbmacros.txt22
-rw-r--r--Documentation/kdump/kdump.txt149
-rw-r--r--Documentation/kernel-parameters.txt66
-rw-r--r--Documentation/keys-request-key.txt22
-rw-r--r--Documentation/keys.txt61
-rw-r--r--Documentation/kprobes.txt3
-rw-r--r--Documentation/laptop-mode.txt6
-rw-r--r--Documentation/locks.txt17
-rw-r--r--Documentation/md.txt120
-rw-r--r--Documentation/mutex-design.txt135
-rw-r--r--Documentation/networking/bonding.txt2
-rw-r--r--Documentation/networking/sk98lin.txt2
-rw-r--r--Documentation/oops-tracing.txt8
-rw-r--r--Documentation/pci-error-recovery.txt246
-rw-r--r--Documentation/pm.txt2
-rw-r--r--Documentation/power/interface.txt11
-rw-r--r--Documentation/power/swsusp.txt9
-rw-r--r--Documentation/powerpc/00-INDEX10
-rw-r--r--Documentation/stable_kernel_rules.txt60
-rw-r--r--Documentation/sysctl/vm.txt20
-rw-r--r--Documentation/video4linux/CARDLIST.bttv2
-rw-r--r--Documentation/video4linux/CARDLIST.cx8812
-rw-r--r--Documentation/video4linux/CARDLIST.saa71345
-rw-r--r--Documentation/video4linux/CARDLIST.tuner6
-rw-r--r--Documentation/x86_64/boot-options.txt2
-rw-r--r--Documentation/x86_64/cpu-hotplug-spec21
-rw-r--r--Kbuild5
-rw-r--r--MAINTAINERS81
-rw-r--r--Makefile99
-rw-r--r--README7
-rw-r--r--arch/alpha/Kconfig16
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c2
-rw-r--r--arch/alpha/kernel/irq.c630
-rw-r--r--arch/alpha/kernel/pci-noop.c1
-rw-r--r--arch/alpha/kernel/process.c34
-rw-r--r--arch/alpha/kernel/ptrace.c71
-rw-r--r--arch/alpha/kernel/smp.c2
-rw-r--r--arch/alpha/kernel/sys_alcor.c3
-rw-r--r--arch/alpha/kernel/sys_sio.c6
-rw-r--r--arch/alpha/mm/init.c1
-rw-r--r--arch/arm/Kconfig31
-rw-r--r--arch/arm/Makefile17
-rw-r--r--arch/arm/boot/Makefile2
-rw-r--r--arch/arm/boot/compressed/Makefile8
-rw-r--r--arch/arm/boot/compressed/head-at91rm9200.S57
-rw-r--r--arch/arm/boot/compressed/head-epxa10db.S5
-rw-r--r--arch/arm/boot/compressed/head.S46
-rw-r--r--arch/arm/common/Kconfig3
-rw-r--r--arch/arm/common/Makefile2
-rw-r--r--arch/arm/common/rtctime.c16
-rw-r--r--arch/arm/common/scoop.c3
-rw-r--r--arch/arm/common/sharpsl_pm.c839
-rw-r--r--arch/arm/configs/assabet_defconfig1
-rw-r--r--arch/arm/configs/at91rm9200dk_defconfig1009
-rw-r--r--arch/arm/configs/at91rm9200ek_defconfig998
-rw-r--r--arch/arm/configs/badge4_defconfig1
-rw-r--r--arch/arm/configs/bast_defconfig1
-rw-r--r--arch/arm/configs/cerfcube_defconfig1
-rw-r--r--arch/arm/configs/clps7500_defconfig1
-rw-r--r--arch/arm/configs/collie_defconfig1
-rw-r--r--arch/arm/configs/corgi_defconfig1
-rw-r--r--arch/arm/configs/csb337_defconfig1136
-rw-r--r--arch/arm/configs/csb637_defconfig1116
-rw-r--r--arch/arm/configs/ebsa110_defconfig1
-rw-r--r--arch/arm/configs/edb7211_defconfig1
-rw-r--r--arch/arm/configs/enp2611_defconfig1
-rw-r--r--arch/arm/configs/ep80219_defconfig1
-rw-r--r--arch/arm/configs/footbridge_defconfig1
-rw-r--r--arch/arm/configs/fortunet_defconfig1
-rw-r--r--arch/arm/configs/h3600_defconfig1
-rw-r--r--arch/arm/configs/h7201_defconfig1
-rw-r--r--arch/arm/configs/h7202_defconfig1
-rw-r--r--arch/arm/configs/hackkit_defconfig1
-rw-r--r--arch/arm/configs/integrator_defconfig1
-rw-r--r--arch/arm/configs/iq31244_defconfig1
-rw-r--r--arch/arm/configs/iq80321_defconfig1
-rw-r--r--arch/arm/configs/iq80331_defconfig1
-rw-r--r--arch/arm/configs/iq80332_defconfig1
-rw-r--r--arch/arm/configs/ixdp2400_defconfig1
-rw-r--r--arch/arm/configs/ixdp2401_defconfig1
-rw-r--r--arch/arm/configs/ixdp2800_defconfig1
-rw-r--r--arch/arm/configs/ixdp2801_defconfig1
-rw-r--r--arch/arm/configs/ixp4xx_defconfig85
-rw-r--r--arch/arm/configs/jornada720_defconfig1
-rw-r--r--arch/arm/configs/lart_defconfig1
-rw-r--r--arch/arm/configs/lpd7a400_defconfig1
-rw-r--r--arch/arm/configs/lpd7a404_defconfig1
-rw-r--r--arch/arm/configs/lubbock_defconfig1
-rw-r--r--arch/arm/configs/lusl7200_defconfig1
-rw-r--r--arch/arm/configs/mainstone_defconfig1
-rw-r--r--arch/arm/configs/mx1ads_defconfig1
-rw-r--r--arch/arm/configs/neponset_defconfig1
-rw-r--r--arch/arm/configs/netwinder_defconfig1
-rw-r--r--arch/arm/configs/omap_h2_1610_defconfig1
-rw-r--r--arch/arm/configs/pleb_defconfig1
-rw-r--r--arch/arm/configs/pxa255-idp_defconfig1
-rw-r--r--arch/arm/configs/realview_defconfig1
-rw-r--r--arch/arm/configs/rpc_defconfig1
-rw-r--r--arch/arm/configs/s3c2410_defconfig1
-rw-r--r--arch/arm/configs/shannon_defconfig1
-rw-r--r--arch/arm/configs/shark_defconfig1
-rw-r--r--arch/arm/configs/simpad_defconfig1
-rw-r--r--arch/arm/configs/smdk2410_defconfig1
-rw-r--r--arch/arm/configs/spitz_defconfig1
-rw-r--r--arch/arm/configs/versatile_defconfig1
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/apm.c20
-rw-r--r--arch/arm/kernel/asm-offsets.c9
-rw-r--r--arch/arm/kernel/dma-isa.c22
-rw-r--r--arch/arm/kernel/dma.c73
-rw-r--r--arch/arm/kernel/ecard.c7
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/fiq.c4
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/irq.c17
-rw-r--r--arch/arm/kernel/process.c16
-rw-r--r--arch/arm/kernel/ptrace.c37
-rw-r--r--arch/arm/kernel/setup.c17
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/kernel/time.c3
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kernel/vmlinux.lds.S12
-rw-r--r--arch/arm/lib/csumpartialcopy.S6
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S6
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S8
-rw-r--r--arch/arm/mach-aaec2000/clock.c28
-rw-r--r--arch/arm/mach-aaec2000/core.c2
-rw-r--r--arch/arm/mach-aaec2000/core.h2
-rw-r--r--arch/arm/mach-at91rm9200/Kconfig54
-rw-r--r--arch/arm/mach-at91rm9200/Makefile27
-rw-r--r--arch/arm/mach-at91rm9200/Makefile.boot9
-rw-r--r--arch/arm/mach-at91rm9200/board-csb337.c143
-rw-r--r--arch/arm/mach-at91rm9200/board-csb637.c116
-rw-r--r--arch/arm/mach-at91rm9200/board-dk.c138
-rw-r--r--arch/arm/mach-at91rm9200/board-ek.c131
-rw-r--r--arch/arm/mach-at91rm9200/clock.c620
-rw-r--r--arch/arm/mach-at91rm9200/common.c115
-rw-r--r--arch/arm/mach-at91rm9200/devices.c291
-rw-r--r--arch/arm/mach-at91rm9200/generic.h18
-rw-r--r--arch/arm/mach-at91rm9200/gpio.c302
-rw-r--r--arch/arm/mach-at91rm9200/irq.c170
-rw-r--r--arch/arm/mach-at91rm9200/time.c127
-rw-r--r--arch/arm/mach-epxa10db/Kconfig23
-rw-r--r--arch/arm/mach-epxa10db/Makefile11
-rw-r--r--arch/arm/mach-epxa10db/Makefile.boot2
-rw-r--r--arch/arm/mach-epxa10db/arch.c74
-rw-r--r--arch/arm/mach-epxa10db/dma.c28
-rw-r--r--arch/arm/mach-epxa10db/irq.c82
-rw-r--r--arch/arm/mach-epxa10db/mm.c71
-rw-r--r--arch/arm/mach-epxa10db/time.c78
-rw-r--r--arch/arm/mach-footbridge/dma.c1
-rw-r--r--arch/arm/mach-footbridge/netwinder-hw.c1
-rw-r--r--arch/arm/mach-imx/mx1ads.c55
-rw-r--r--arch/arm/mach-integrator/clock.c28
-rw-r--r--arch/arm/mach-integrator/core.c2
-rw-r--r--arch/arm/mach-integrator/dma.c35
-rw-r--r--arch/arm/mach-integrator/impd1.c4
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c4
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c6
-rw-r--r--arch/arm/mach-integrator/time.c7
-rw-r--r--arch/arm/mach-iop3xx/iop331-setup.c94
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig8
-rw-r--r--arch/arm/mach-ixp4xx/Makefile1
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c23
-rw-r--r--arch/arm/mach-ixp4xx/common.c20
-rw-r--r--arch/arm/mach-ixp4xx/coyote-pci.c3
-rw-r--r--arch/arm/mach-ixp4xx/coyote-setup.c10
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-setup.c15
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-pci.c5
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c13
-rw-r--r--arch/arm/mach-ixp4xx/ixdpg425-pci.c3
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-pci.c71
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-power.c67
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c135
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-pci.c8
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-power.c3
-rw-r--r--arch/arm/mach-omap1/board-palmte.c2
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c2
-rw-r--r--arch/arm/mach-omap1/clock.c2
-rw-r--r--arch/arm/mach-omap1/serial.c5
-rw-r--r--arch/arm/mach-omap2/clock.c2
-rw-r--r--arch/arm/mach-omap2/serial.c2
-rw-r--r--arch/arm/mach-omap2/timer-gp.c3
-rw-r--r--arch/arm/mach-pxa/Kconfig2
-rw-r--r--arch/arm/mach-pxa/akita-ioexp.c7
-rw-r--r--arch/arm/mach-pxa/corgi.c7
-rw-r--r--arch/arm/mach-pxa/corgi_pm.c44
-rw-r--r--arch/arm/mach-pxa/mainstone.c6
-rw-r--r--arch/arm/mach-pxa/poodle.c7
-rw-r--r--arch/arm/mach-pxa/sharpsl.h101
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c824
-rw-r--r--arch/arm/mach-pxa/spitz.c42
-rw-r--r--arch/arm/mach-pxa/spitz_pm.c46
-rw-r--r--arch/arm/mach-pxa/ssp.c17
-rw-r--r--arch/arm/mach-realview/clock.c28
-rw-r--r--arch/arm/mach-realview/core.c4
-rw-r--r--arch/arm/mach-realview/core.h3
-rw-r--r--arch/arm/mach-realview/localtimer.c1
-rw-r--r--arch/arm/mach-realview/realview_eb.c2
-rw-r--r--arch/arm/mach-rpc/dma.c19
-rw-r--r--arch/arm/mach-s3c2410/clock.c235
-rw-r--r--arch/arm/mach-s3c2410/clock.h1
-rw-r--r--arch/arm/mach-s3c2410/s3c2440-clock.c3
-rw-r--r--arch/arm/mach-s3c2410/s3c2440.c2
-rw-r--r--arch/arm/mach-s3c2410/time.c3
-rw-r--r--arch/arm/mach-s3c2410/usb-simtec.c6
-rw-r--r--arch/arm/mach-sa1100/pm.c18
-rw-r--r--arch/arm/mach-versatile/clock.c28
-rw-r--r--arch/arm/mach-versatile/core.c4
-rw-r--r--arch/arm/mach-versatile/core.h2
-rw-r--r--arch/arm/mach-versatile/versatile_ab.c2
-rw-r--r--arch/arm/mach-versatile/versatile_pb.c2
-rw-r--r--arch/arm/mm/Kconfig8
-rw-r--r--arch/arm/mm/consistent.c54
-rw-r--r--arch/arm/mm/discontig.c4
-rw-r--r--arch/arm/mm/ioremap.c49
-rw-r--r--arch/arm/mm/mm-armv.c1
-rw-r--r--arch/arm/plat-omap/clock.c17
-rw-r--r--arch/arm/plat-omap/common.c2
-rw-r--r--arch/arm/plat-omap/cpu-omap.c3
-rw-r--r--arch/arm/plat-omap/dma.c2
-rw-r--r--arch/arm/plat-omap/gpio.c2
-rw-r--r--arch/arm/plat-omap/mcbsp.c3
-rw-r--r--arch/arm/plat-omap/ocpi.c2
-rw-r--r--arch/arm/tools/mach-types30
-rw-r--r--arch/arm26/Kconfig4
-rw-r--r--arch/arm26/kernel/armksyms.c1
-rw-r--r--arch/arm26/kernel/asm-offsets.c7
-rw-r--r--arch/arm26/kernel/process.c5
-rw-r--r--arch/arm26/kernel/ptrace.c29
-rw-r--r--arch/arm26/kernel/traps.c8
-rw-r--r--arch/cris/Kconfig4
-rw-r--r--arch/cris/arch-v10/drivers/ds1302.c1
-rw-r--r--arch/cris/arch-v10/drivers/pcf8563.c1
-rw-r--r--arch/cris/arch-v10/kernel/kgdb.c6
-rw-r--r--arch/cris/arch-v10/kernel/process.c4
-rw-r--r--arch/cris/arch-v10/kernel/ptrace.c4
-rw-r--r--arch/cris/arch-v32/kernel/process.c6
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c6
-rw-r--r--arch/cris/arch-v32/kernel/smp.c4
-rw-r--r--arch/cris/arch-v32/mm/tlb.c4
-rw-r--r--arch/cris/kernel/crisksyms.c2
-rw-r--r--arch/cris/kernel/process.c28
-rw-r--r--arch/frv/Kconfig27
-rw-r--r--arch/frv/Kconfig.debug22
-rw-r--r--arch/frv/Makefile6
-rw-r--r--arch/frv/boot/Makefile4
-rw-r--r--arch/frv/kernel/Makefile2
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/frv/kernel/frv_ksyms.c27
-rw-r--r--arch/frv/kernel/futex.c242
-rw-r--r--arch/frv/kernel/irq.c17
-rw-r--r--arch/frv/kernel/module.c80
-rw-r--r--arch/frv/kernel/pm.c2
-rw-r--r--arch/frv/kernel/process.c26
-rw-r--r--arch/frv/kernel/setup.c2
-rw-r--r--arch/frv/kernel/signal.c155
-rw-r--r--arch/frv/kernel/time.c3
-rw-r--r--arch/frv/kernel/traps.c3
-rw-r--r--arch/frv/kernel/uaccess.c7
-rw-r--r--arch/frv/kernel/vmlinux.lds.S1
-rw-r--r--arch/frv/lib/Makefile2
-rw-r--r--arch/frv/lib/__ucmpdi2.S45
-rw-r--r--arch/frv/lib/atomic-ops.S92
-rw-r--r--arch/frv/lib/checksum.c31
-rw-r--r--arch/frv/mb93090-mb00/Makefile2
-rw-r--r--arch/frv/mb93090-mb00/pci-dma-nommu.c8
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c10
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c8
-rw-r--r--arch/frv/mb93090-mb00/pci-iomap.c29
-rw-r--r--arch/frv/mb93090-mb00/pci-irq.c4
-rw-r--r--arch/frv/mm/cache-page.c5
-rw-r--r--arch/frv/mm/extable.c34
-rw-r--r--arch/frv/mm/highmem.c8
-rw-r--r--arch/h8300/Kconfig4
-rw-r--r--arch/h8300/kernel/gpio.c4
-rw-r--r--arch/h8300/kernel/h8300_ksyms.c7
-rw-r--r--arch/h8300/kernel/process.c30
-rw-r--r--arch/h8300/mm/memory.c13
-rw-r--r--arch/h8300/platform/h8300h/ptrace_h8300h.c12
-rw-r--r--arch/h8300/platform/h8s/ints.c4
-rw-r--r--arch/h8300/platform/h8s/ints_h8s.c4
-rw-r--r--arch/i386/Kconfig68
-rw-r--r--arch/i386/Kconfig.cpu14
-rw-r--r--arch/i386/Kconfig.debug10
-rw-r--r--arch/i386/Makefile9
-rw-r--r--arch/i386/Makefile.cpu10
-rw-r--r--arch/i386/boot/Makefile4
-rw-r--r--arch/i386/boot/compressed/misc.c2
-rw-r--r--arch/i386/boot/install.sh14
-rw-r--r--arch/i386/boot/video.S5
-rw-r--r--arch/i386/crypto/aes-i586-asm.S40
-rw-r--r--arch/i386/crypto/aes.c56
-rw-r--r--arch/i386/kernel/Makefile7
-rw-r--r--arch/i386/kernel/acpi/boot.c2
-rw-r--r--arch/i386/kernel/apic.c131
-rw-r--r--arch/i386/kernel/apm.c102
-rw-r--r--arch/i386/kernel/cpu/amd.c14
-rw-r--r--arch/i386/kernel/cpu/changelog63
-rw-r--r--arch/i386/kernel/cpu/common.c27
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c5
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c5
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h25
-rw-r--r--arch/i386/kernel/cpu/cyrix.c27
-rw-r--r--arch/i386/kernel/cpu/intel.c5
-rw-r--r--arch/i386/kernel/cpu/mtrr/changelog229
-rw-r--r--arch/i386/kernel/cpu/mtrr/if.c1
-rw-r--r--arch/i386/kernel/cpu/proc.c27
-rw-r--r--arch/i386/kernel/cpuid.c3
-rw-r--r--arch/i386/kernel/crash.c50
-rw-r--r--arch/i386/kernel/crash_dump.c74
-rw-r--r--arch/i386/kernel/dmi_scan.c16
-rw-r--r--arch/i386/kernel/entry.S3
-rw-r--r--arch/i386/kernel/head.S27
-rw-r--r--arch/i386/kernel/i386_ksyms.c3
-rw-r--r--arch/i386/kernel/init_task.c2
-rw-r--r--arch/i386/kernel/io_apic.c6
-rw-r--r--arch/i386/kernel/ioport.c1
-rw-r--r--arch/i386/kernel/irq.c2
-rw-r--r--arch/i386/kernel/kprobes.c23
-rw-r--r--arch/i386/kernel/microcode.c5
-rw-r--r--arch/i386/kernel/mpparse.c26
-rw-r--r--arch/i386/kernel/msr.c3
-rw-r--r--arch/i386/kernel/process.c43
-rw-r--r--arch/i386/kernel/ptrace.c9
-rw-r--r--arch/i386/kernel/reboot.c13
-rw-r--r--arch/i386/kernel/scx200.c2
-rw-r--r--arch/i386/kernel/setup.c14
-rw-r--r--arch/i386/kernel/smpboot.c10
-rw-r--r--arch/i386/kernel/syscall_table.S2
-rw-r--r--arch/i386/kernel/time.c6
-rw-r--r--arch/i386/kernel/time_hpet.c2
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c2
-rw-r--r--arch/i386/kernel/traps.c94
-rw-r--r--arch/i386/kernel/vm86.c3
-rw-r--r--arch/i386/mm/init.c24
-rw-r--r--arch/i386/mm/pageattr.c31
-rw-r--r--arch/i386/pci/acpi.c2
-rw-r--r--arch/i386/pci/fixup.c7
-rw-r--r--arch/i386/pci/irq.c44
-rw-r--r--arch/ia64/Makefile7
-rw-r--r--arch/ia64/hp/sim/simserial.c1
-rw-r--r--arch/ia64/ia32/Makefile4
-rw-r--r--arch/ia64/ia32/elfcore32.h3
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/ia32/ia32_ioctl.c45
-rw-r--r--arch/ia64/ia32/ia32_signal.c4
-rw-r--r--arch/ia64/ia32/ia32_support.c4
-rw-r--r--arch/ia64/ia32/sys_ia32.c57
-rw-r--r--arch/ia64/kernel/efi.c160
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/kprobes.c11
-rw-r--r--arch/ia64/kernel/mca.c4
-rw-r--r--arch/ia64/kernel/perfmon.c33
-rw-r--r--arch/ia64/kernel/process.c12
-rw-r--r--arch/ia64/kernel/ptrace.c33
-rw-r--r--arch/ia64/kernel/salinfo.c1
-rw-r--r--arch/ia64/kernel/setup.c18
-rw-r--r--arch/ia64/kernel/signal.c10
-rw-r--r--arch/ia64/kernel/sys_ia64.c2
-rw-r--r--arch/ia64/oprofile/backtrace.c2
-rw-r--r--arch/ia64/sn/kernel/tiocx.c1
-rw-r--r--arch/m32r/Kconfig30
-rw-r--r--arch/m32r/boot/compressed/head.S5
-rw-r--r--arch/m32r/boot/setup.S24
-rw-r--r--arch/m32r/kernel/Makefile1
-rw-r--r--arch/m32r/kernel/entry.S19
-rw-r--r--arch/m32r/kernel/io_m32104ut.c298
-rw-r--r--arch/m32r/kernel/io_m32700ut.c24
-rw-r--r--arch/m32r/kernel/io_mappi.c2
-rw-r--r--arch/m32r/kernel/io_mappi2.c24
-rw-r--r--arch/m32r/kernel/io_mappi3.c51
-rw-r--r--arch/m32r/kernel/io_oaks32r.c2
-rw-r--r--arch/m32r/kernel/io_opsput.c6
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c3
-rw-r--r--arch/m32r/kernel/process.c17
-rw-r--r--arch/m32r/kernel/ptrace.c47
-rw-r--r--arch/m32r/kernel/setup.c7
-rw-r--r--arch/m32r/kernel/setup_m32104ut.c156
-rw-r--r--arch/m32r/kernel/setup_m32700ut.c8
-rw-r--r--arch/m32r/kernel/setup_mappi.c6
-rw-r--r--arch/m32r/kernel/setup_mappi2.c6
-rw-r--r--arch/m32r/kernel/setup_mappi3.c6
-rw-r--r--arch/m32r/kernel/setup_oaks32r.c6
-rw-r--r--arch/m32r/kernel/setup_opsput.c8
-rw-r--r--arch/m32r/kernel/setup_usrv.c6
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/m32r/kernel/time.c4
-rw-r--r--arch/m32r/m32104ut/defconfig.m32104ut (renamed from arch/arm/configs/epxa10db_defconfig)511
-rw-r--r--arch/m32r/mm/cache.c36
-rw-r--r--arch/m68k/Kconfig4
-rw-r--r--arch/m68k/amiga/amiints.c46
-rw-r--r--arch/m68k/amiga/amisound.c2
-rw-r--r--arch/m68k/amiga/cia.c8
-rw-r--r--arch/m68k/amiga/config.c27
-rw-r--r--arch/m68k/apollo/config.c3
-rw-r--r--arch/m68k/atari/config.c9
-rw-r--r--arch/m68k/bvme6000/rtc.c7
-rw-r--r--arch/m68k/hp300/config.c3
-rw-r--r--arch/m68k/kernel/asm-offsets.c2
-rw-r--r--arch/m68k/kernel/head.S2
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c2
-rw-r--r--arch/m68k/kernel/process.c7
-rw-r--r--arch/m68k/kernel/setup.c19
-rw-r--r--arch/m68k/kernel/signal.c62
-rw-r--r--arch/m68k/kernel/sys_m68k.c40
-rw-r--r--arch/m68k/kernel/traps.c38
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds1
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds3
-rw-r--r--arch/m68k/lib/checksum.c2
-rw-r--r--arch/m68k/mac/config.c3
-rw-r--r--arch/m68k/mac/iop.c4
-rw-r--r--arch/m68k/mac/misc.c326
-rw-r--r--arch/m68k/math-emu/multi_arith.h2
-rw-r--r--arch/m68k/mm/kmap.c12
-rw-r--r--arch/m68k/mvme16x/rtc.c7
-rw-r--r--arch/m68k/q40/config.c5
-rw-r--r--arch/m68k/sun3/config.c3
-rw-r--r--arch/m68k/sun3x/config.c4
-rw-r--r--arch/m68knommu/Kconfig4
-rw-r--r--arch/m68knommu/Makefile1
-rw-r--r--arch/m68knommu/kernel/m68k_ksyms.c4
-rw-r--r--arch/m68knommu/kernel/process.c51
-rw-r--r--arch/m68knommu/kernel/ptrace.c2
-rw-r--r--arch/m68knommu/kernel/setup.c2
-rw-r--r--arch/m68knommu/kernel/signal.c8
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/Kconfig10
-rw-r--r--arch/mips/Makefile1
-rw-r--r--arch/mips/configs/bigsur_defconfig1
-rw-r--r--arch/mips/configs/cobalt_defconfig1
-rw-r--r--arch/mips/configs/ddb5476_defconfig1
-rw-r--r--arch/mips/configs/ddb5477_defconfig1
-rw-r--r--arch/mips/configs/ev64120_defconfig1
-rw-r--r--arch/mips/configs/ev96100_defconfig1
-rw-r--r--arch/mips/configs/ip22_defconfig1
-rw-r--r--arch/mips/configs/ip27_defconfig1
-rw-r--r--arch/mips/configs/ip32_defconfig1
-rw-r--r--arch/mips/configs/it8172_defconfig1
-rw-r--r--arch/mips/configs/ivr_defconfig1
-rw-r--r--arch/mips/configs/jaguar-atx_defconfig1
-rw-r--r--arch/mips/configs/lasat200_defconfig1
-rw-r--r--arch/mips/configs/malta_defconfig10
-rw-r--r--arch/mips/configs/ocelot_3_defconfig1
-rw-r--r--arch/mips/configs/ocelot_c_defconfig1
-rw-r--r--arch/mips/configs/ocelot_defconfig1
-rw-r--r--arch/mips/configs/ocelot_g_defconfig1
-rw-r--r--arch/mips/configs/pnx8550-v2pci_defconfig1
-rw-r--r--arch/mips/configs/rbhma4500_defconfig1
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/mips/configs/sb1250-swarm_defconfig1
-rw-r--r--arch/mips/configs/yosemite_defconfig1
-rw-r--r--arch/mips/defconfig1
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/cpu-probe.c39
-rw-r--r--arch/mips/kernel/ioctl32.c50
-rw-r--r--arch/mips/kernel/process.c12
-rw-r--r--arch/mips/kernel/ptrace.c22
-rw-r--r--arch/mips/kernel/ptrace32.c44
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp_mt.c7
-rw-r--r--arch/mips/kernel/syscall.c3
-rw-r--r--arch/mips/kernel/sysirix.c1
-rw-r--r--arch/mips/kernel/time.c32
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/kernel/vpe.c11
-rw-r--r--arch/mips/lib/iomap.c2
-rw-r--r--arch/mips/math-emu/dp_fint.c2
-rw-r--r--arch/mips/math-emu/dp_flong.c2
-rw-r--r--arch/mips/math-emu/sp_fint.c2
-rw-r--r--arch/mips/math-emu/sp_flong.c2
-rw-r--r--arch/mips/mips-boards/generic/time.c33
-rw-r--r--arch/mips/mm/c-r4k.c4
-rw-r--r--arch/mips/oprofile/common.c3
-rw-r--r--arch/mips/oprofile/op_impl.h4
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c22
-rw-r--r--arch/mips/pci/fixup-capcella.c2
-rw-r--r--arch/mips/pci/fixup-mpc30x.c2
-rw-r--r--arch/mips/pci/fixup-tb0219.c2
-rw-r--r--arch/mips/pci/fixup-tb0226.c2
-rw-r--r--arch/mips/pci/fixup-tb0287.c2
-rw-r--r--arch/mips/pci/ops-vr41xx.c2
-rw-r--r--arch/mips/pci/pci-vr41xx.c2
-rw-r--r--arch/mips/pci/pci-vr41xx.h2
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c4
-rw-r--r--arch/mips/sibyte/cfe/smp.c2
-rw-r--r--arch/mips/vr41xx/casio-e55/setup.c2
-rw-r--r--arch/mips/vr41xx/common/bcu.c4
-rw-r--r--arch/mips/vr41xx/common/cmu.c4
-rw-r--r--arch/mips/vr41xx/common/icu.c4
-rw-r--r--arch/mips/vr41xx/common/init.c2
-rw-r--r--arch/mips/vr41xx/common/int-handler.S2
-rw-r--r--arch/mips/vr41xx/common/irq.c2
-rw-r--r--arch/mips/vr41xx/common/pmu.c2
-rw-r--r--arch/mips/vr41xx/common/type.c2
-rw-r--r--arch/mips/vr41xx/common/vrc4173.c4
-rw-r--r--arch/mips/vr41xx/ibm-workpad/setup.c2
-rw-r--r--arch/parisc/Kconfig3
-rw-r--r--arch/parisc/hpux/sys_hpux.c1
-rw-r--r--arch/parisc/kernel/Makefile3
-rw-r--r--arch/parisc/kernel/cache.c20
-rw-r--r--arch/parisc/kernel/drivers.c13
-rw-r--r--arch/parisc/kernel/firmware.c2
-rw-r--r--arch/parisc/kernel/hardware.c1
-rw-r--r--arch/parisc/kernel/inventory.c6
-rw-r--r--arch/parisc/kernel/ioctl32.c60
-rw-r--r--arch/parisc/kernel/pci-dma.c6
-rw-r--r--arch/parisc/kernel/pdc_chassis.c13
-rw-r--r--arch/parisc/kernel/perf.c7
-rw-r--r--arch/parisc/kernel/process.c8
-rw-r--r--arch/parisc/kernel/processor.c8
-rw-r--r--arch/parisc/kernel/setup.c10
-rw-r--r--arch/parisc/kernel/smp.c18
-rw-r--r--arch/parisc/kernel/time.c4
-rw-r--r--arch/parisc/kernel/topology.c3
-rw-r--r--arch/parisc/kernel/unaligned.c2
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S15
-rw-r--r--arch/parisc/mm/init.c29
-rw-r--r--arch/parisc/mm/ioremap.c100
-rw-r--r--arch/powerpc/Kconfig93
-rw-r--r--arch/powerpc/Kconfig.debug42
-rw-r--r--arch/powerpc/Makefile15
-rw-r--r--arch/powerpc/boot/.gitignore20
-rw-r--r--arch/powerpc/boot/Makefile81
-rw-r--r--arch/powerpc/boot/crt0.S21
-rw-r--r--arch/powerpc/boot/hack-coff.c84
-rw-r--r--arch/powerpc/boot/main.c46
-rw-r--r--arch/powerpc/boot/prom.c538
-rw-r--r--arch/powerpc/boot/prom.h36
-rw-r--r--arch/powerpc/boot/rs6000.h243
-rw-r--r--arch/powerpc/boot/stdio.c325
-rw-r--r--arch/powerpc/boot/stdio.h6
-rw-r--r--arch/powerpc/boot/string.S20
-rw-r--r--arch/powerpc/boot/zImage.coff.lds46
-rw-r--r--arch/powerpc/configs/mpc834x_sys_defconfig911
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1729
-rw-r--r--arch/powerpc/configs/ppc64_defconfig4
-rw-r--r--arch/powerpc/kernel/Makefile29
-rw-r--r--arch/powerpc/kernel/asm-offsets.c8
-rw-r--r--arch/powerpc/kernel/btext.c136
-rw-r--r--arch/powerpc/kernel/cpu_setup_power4.S4
-rw-r--r--arch/powerpc/kernel/cputable.c278
-rw-r--r--arch/powerpc/kernel/crash.c264
-rw-r--r--arch/powerpc/kernel/crash_dump.c111
-rw-r--r--arch/powerpc/kernel/dma_64.c9
-rw-r--r--arch/powerpc/kernel/entry_32.S169
-rw-r--r--arch/powerpc/kernel/entry_64.S229
-rw-r--r--arch/powerpc/kernel/fpu.S10
-rw-r--r--arch/powerpc/kernel/head_32.S56
-rw-r--r--arch/powerpc/kernel/head_64.S137
-rw-r--r--arch/powerpc/kernel/ibmebus.c396
-rw-r--r--arch/powerpc/kernel/idle_power4.S8
-rw-r--r--arch/powerpc/kernel/ioctl32.c45
-rw-r--r--arch/powerpc/kernel/irq.c77
-rw-r--r--arch/powerpc/kernel/kprobes.c27
-rw-r--r--arch/powerpc/kernel/legacy_serial.c557
-rw-r--r--arch/powerpc/kernel/lparcfg.c13
-rw-r--r--arch/powerpc/kernel/lparmap.c12
-rw-r--r--arch/powerpc/kernel/machine_kexec.c61
-rw-r--r--arch/powerpc/kernel/machine_kexec_32.c65
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c43
-rw-r--r--arch/powerpc/kernel/misc_32.S117
-rw-r--r--arch/powerpc/kernel/misc_64.S10
-rw-r--r--arch/powerpc/kernel/nvram_64.c106
-rw-r--r--arch/powerpc/kernel/paca.c47
-rw-r--r--arch/powerpc/kernel/pci_64.c97
-rw-r--r--arch/powerpc/kernel/pmc.c5
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c9
-rw-r--r--arch/powerpc/kernel/process.c22
-rw-r--r--arch/powerpc/kernel/prom.c577
-rw-r--r--arch/powerpc/kernel/prom_init.c68
-rw-r--r--arch/powerpc/kernel/prom_parse.c548
-rw-r--r--arch/powerpc/kernel/ptrace-common.h4
-rw-r--r--arch/powerpc/kernel/ptrace32.c28
-rw-r--r--arch/powerpc/kernel/rtas.c111
-rw-r--r--arch/powerpc/kernel/rtas_pci.c51
-rw-r--r--arch/powerpc/kernel/setup-common.c138
-rw-r--r--arch/powerpc/kernel/setup_32.c34
-rw-r--r--arch/powerpc/kernel/setup_64.c267
-rw-r--r--arch/powerpc/kernel/signal_32.c89
-rw-r--r--arch/powerpc/kernel/signal_64.c43
-rw-r--r--arch/powerpc/kernel/smp.c35
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c56
-rw-r--r--arch/powerpc/kernel/syscalls.c28
-rw-r--r--arch/powerpc/kernel/systbl.S22
-rw-r--r--arch/powerpc/kernel/time.c6
-rw-r--r--arch/powerpc/kernel/traps.c25
-rw-r--r--arch/powerpc/kernel/udbg.c52
-rw-r--r--arch/powerpc/kernel/udbg_16550.c71
-rw-r--r--arch/powerpc/kernel/vdso32/.gitignore1
-rw-r--r--arch/powerpc/kernel/vdso64/.gitignore1
-rw-r--r--arch/powerpc/lib/locks.c8
-rw-r--r--arch/powerpc/mm/fault.c7
-rw-r--r--arch/powerpc/mm/hash_utils_64.c9
-rw-r--r--arch/powerpc/mm/hugetlbpage.c42
-rw-r--r--arch/powerpc/mm/imalloc.c2
-rw-r--r--arch/powerpc/mm/init_32.c5
-rw-r--r--arch/powerpc/mm/mem.c13
-rw-r--r--arch/powerpc/mm/numa.c139
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/mm/slb.c16
-rw-r--r--arch/powerpc/mm/slb_low.S6
-rw-r--r--arch/powerpc/mm/stab.c16
-rw-r--r--arch/powerpc/mm/tlb_64.c2
-rw-r--r--arch/powerpc/oprofile/Makefile1
-rw-r--r--arch/powerpc/oprofile/common.c83
-rw-r--r--arch/powerpc/oprofile/op_model_7450.c206
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c4
-rw-r--r--arch/powerpc/oprofile/op_model_rs64.c3
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig27
-rw-r--r--arch/powerpc/platforms/83xx/Makefile4
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_sys.c243
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_sys.h23
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h14
-rw-r--r--arch/powerpc/platforms/83xx/pci.c99
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/Kconfig13
-rw-r--r--arch/powerpc/platforms/cell/Makefile8
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c42
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h1
-rw-r--r--arch/powerpc/platforms/cell/iommu.c225
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c229
-rw-r--r--arch/powerpc/platforms/cell/pervasive.h62
-rw-r--r--arch/powerpc/platforms/cell/setup.c95
-rw-r--r--arch/powerpc/platforms/cell/smp.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c711
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1.c133
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c88
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile54
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c308
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c167
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c794
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c255
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c486
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c131
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c461
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore.c336
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S116
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped231
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_save.c195
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_save_crt0.S102
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped191
-rw-r--r--arch/powerpc/platforms/cell/spufs/spu_utils.h160
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h163
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c2204
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c103
-rw-r--r--arch/powerpc/platforms/chrp/pci.c27
-rw-r--r--arch/powerpc/platforms/chrp/setup.c23
-rw-r--r--arch/powerpc/platforms/chrp/time.c7
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig18
-rw-r--r--arch/powerpc/platforms/iseries/Makefile4
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/iseries/iommu.h35
-rw-r--r--arch/powerpc/platforms/iseries/irq.c346
-rw-r--r--arch/powerpc/platforms/iseries/irq.h1
-rw-r--r--arch/powerpc/platforms/iseries/lpardata.c12
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c12
-rw-r--r--arch/powerpc/platforms/iseries/mf.c16
-rw-r--r--arch/powerpc/platforms/iseries/misc.S3
-rw-r--r--arch/powerpc/platforms/iseries/pci.c1
-rw-r--r--arch/powerpc/platforms/iseries/setup.c55
-rw-r--r--arch/powerpc/platforms/iseries/smp.c2
-rw-r--r--arch/powerpc/platforms/iseries/vio.c2
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c12
-rw-r--r--arch/powerpc/platforms/maple/setup.c27
-rw-r--r--arch/powerpc/platforms/powermac/Makefile7
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c547
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c498
-rw-r--r--arch/powerpc/platforms/powermac/feature.c367
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c1414
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c53
-rw-r--r--arch/powerpc/platforms/powermac/pci.c299
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c405
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c989
-rw-r--r--arch/powerpc/platforms/powermac/pic.c474
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h6
-rw-r--r--arch/powerpc/platforms/powermac/setup.c109
-rw-r--r--arch/powerpc/platforms/powermac/smp.c382
-rw-r--r--arch/powerpc/platforms/powermac/time.c15
-rw-r--r--arch/powerpc/platforms/powermac/udbg_adb.c221
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c (renamed from arch/powerpc/kernel/udbg_scc.c)68
-rw-r--r--arch/powerpc/platforms/pseries/Makefile4
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c516
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c316
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c376
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c39
-rw-r--r--arch/powerpc/platforms/pseries/hvcserver.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c14
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c80
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c174
-rw-r--r--arch/powerpc/platforms/pseries/ras.c11
-rw-r--r--arch/powerpc/platforms/pseries/ras.h9
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c100
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c4
-rw-r--r--arch/powerpc/platforms/pseries/setup.c77
-rw-r--r--arch/powerpc/platforms/pseries/smp.c6
-rw-r--r--arch/powerpc/platforms/pseries/xics.c4
-rw-r--r--arch/powerpc/sysdev/Makefile4
-rw-r--r--arch/powerpc/sysdev/dart.h41
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c (renamed from arch/powerpc/sysdev/u3_iommu.c)174
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c317
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h8
-rw-r--r--arch/powerpc/sysdev/ipic.c (renamed from arch/ppc/syslib/ipic.c)0
-rw-r--r--arch/powerpc/sysdev/ipic.h (renamed from arch/ppc/syslib/ipic.h)0
-rw-r--r--arch/powerpc/sysdev/mpic.c247
-rw-r--r--arch/powerpc/xmon/Makefile8
-rw-r--r--arch/powerpc/xmon/start.c (renamed from arch/powerpc/xmon/start_64.c)0
-rw-r--r--arch/powerpc/xmon/start_32.c441
-rw-r--r--arch/powerpc/xmon/start_8xx.c44
-rw-r--r--arch/powerpc/xmon/xmon.c22
-rw-r--r--arch/ppc/4xx_io/serial_sicc.c1
-rw-r--r--arch/ppc/Kconfig7
-rw-r--r--arch/ppc/Makefile3
-rw-r--r--arch/ppc/amiga/amiints.c40
-rw-r--r--arch/ppc/amiga/cia.c8
-rw-r--r--arch/ppc/amiga/config.c24
-rw-r--r--arch/ppc/boot/common/util.S6
-rw-r--r--arch/ppc/boot/images/Makefile2
-rw-r--r--arch/ppc/boot/simple/Makefile2
-rw-r--r--arch/ppc/configs/TQM8540_defconfig973
-rw-r--r--arch/ppc/configs/TQM8541_defconfig986
-rw-r--r--arch/ppc/configs/TQM8555_defconfig983
-rw-r--r--arch/ppc/configs/TQM8560_defconfig992
-rw-r--r--arch/ppc/kernel/Makefile2
-rw-r--r--arch/ppc/kernel/asm-offsets.c2
-rw-r--r--arch/ppc/kernel/entry.S167
-rw-r--r--arch/ppc/kernel/head_8xx.S77
-rw-r--r--arch/ppc/kernel/idle.c4
-rw-r--r--arch/ppc/kernel/machine_kexec.c6
-rw-r--r--arch/ppc/kernel/misc.S6
-rw-r--r--arch/ppc/kernel/pci.c49
-rw-r--r--arch/ppc/kernel/ppc_htab.c1
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c6
-rw-r--r--arch/ppc/kernel/process.c851
-rw-r--r--arch/ppc/kernel/setup.c3
-rw-r--r--arch/ppc/kernel/smp.c4
-rw-r--r--arch/ppc/platforms/4xx/ibm440gx.c2
-rw-r--r--arch/ppc/platforms/4xx/ibm440sp.c1
-rw-r--r--arch/ppc/platforms/83xx/mpc834x_sys.c10
-rw-r--r--arch/ppc/platforms/85xx/Kconfig28
-rw-r--r--arch/ppc/platforms/85xx/Makefile4
-rw-r--r--arch/ppc/platforms/85xx/mpc8540_ads.c14
-rw-r--r--arch/ppc/platforms/85xx/mpc8560_ads.c11
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_cds_common.c27
-rw-r--r--arch/ppc/platforms/85xx/sbc8560.c10
-rw-r--r--arch/ppc/platforms/85xx/stx_gp3.c10
-rw-r--r--arch/ppc/platforms/85xx/tqm85xx.c415
-rw-r--r--arch/ppc/platforms/85xx/tqm85xx.h56
-rw-r--r--arch/ppc/platforms/apus_setup.c30
-rw-r--r--arch/ppc/platforms/chrp_setup.c1
-rw-r--r--arch/ppc/platforms/lite5200.c2
-rw-r--r--arch/ppc/platforms/mpc5200.c53
-rw-r--r--arch/ppc/platforms/prep_setup.c2
-rw-r--r--arch/ppc/syslib/Makefile2
-rw-r--r--arch/ppc/syslib/m8xx_setup.c15
-rw-r--r--arch/ppc/syslib/m8xx_wdt.c92
-rw-r--r--arch/ppc/syslib/m8xx_wdt.h4
-rw-r--r--arch/ppc/syslib/mpc52xx_pci.c95
-rw-r--r--arch/ppc/syslib/mpc52xx_setup.c6
-rw-r--r--arch/ppc/syslib/mpc83xx_devices.c10
-rw-r--r--arch/ppc/syslib/mpc85xx_devices.c10
-rw-r--r--arch/ppc/xmon/xmon.c2
-rw-r--r--arch/s390/Kconfig37
-rw-r--r--arch/s390/Makefile6
-rw-r--r--arch/s390/appldata/appldata_base.c8
-rw-r--r--arch/s390/appldata/appldata_os.c14
-rw-r--r--arch/s390/crypto/Makefile8
-rw-r--r--arch/s390/crypto/aes_s390.c248
-rw-r--r--arch/s390/crypto/crypt_s390.h (renamed from arch/s390/crypto/crypt_z990.h)267
-rw-r--r--arch/s390/crypto/crypt_s390_query.c129
-rw-r--r--arch/s390/crypto/crypt_z990_query.c111
-rw-r--r--arch/s390/crypto/des_s390.c (renamed from arch/s390/crypto/des_z990.c)54
-rw-r--r--arch/s390/crypto/sha1_s390.c (renamed from arch/s390/crypto/sha1_z990.c)32
-rw-r--r--arch/s390/crypto/sha256_s390.c151
-rw-r--r--arch/s390/defconfig65
-rw-r--r--arch/s390/kernel/Makefile18
-rw-r--r--arch/s390/kernel/binfmt_elf32.c2
-rw-r--r--arch/s390/kernel/compat_ioctl.c81
-rw-r--r--arch/s390/kernel/compat_linux.c35
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.S2
-rw-r--r--arch/s390/kernel/cpcmd.c16
-rw-r--r--arch/s390/kernel/crash.c2
-rw-r--r--arch/s390/kernel/entry64.S18
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/machine_kexec.c2
-rw-r--r--arch/s390/kernel/module.c12
-rw-r--r--arch/s390/kernel/process.c45
-rw-r--r--arch/s390/kernel/ptrace.c79
-rw-r--r--arch/s390/kernel/reipl_diag.c2
-rw-r--r--arch/s390/kernel/s390_ksyms.c1
-rw-r--r--arch/s390/kernel/setup.c14
-rw-r--r--arch/s390/kernel/signal.c8
-rw-r--r--arch/s390/kernel/smp.c16
-rw-r--r--arch/s390/kernel/sys_s390.c12
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/kernel/traps.c16
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/lib/Makefile5
-rw-r--r--arch/s390/lib/spinlock.c2
-rw-r--r--arch/s390/mm/extmem.c2
-rw-r--r--arch/s390/mm/fault.c18
-rw-r--r--arch/s390/mm/init.c8
-rw-r--r--arch/s390/mm/mmap.c2
-rw-r--r--arch/s390/oprofile/Makefile2
-rw-r--r--arch/s390/oprofile/backtrace.c79
-rw-r--r--arch/s390/oprofile/init.c4
-rw-r--r--arch/sh/Kconfig4
-rw-r--r--arch/sh/kernel/process.c66
-rw-r--r--arch/sh/kernel/ptrace.c14
-rw-r--r--arch/sh/kernel/sh_ksyms.c2
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/sh64/Kconfig4
-rw-r--r--arch/sh64/kernel/process.c24
-rw-r--r--arch/sh64/kernel/sh_ksyms.c2
-rw-r--r--arch/sh64/kernel/time.c7
-rw-r--r--arch/sh64/lib/dbg.c2
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/kernel/process.c12
-rw-r--r--arch/sparc/kernel/ptrace.c39
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c2
-rw-r--r--arch/sparc/kernel/sun4d_smp.c2
-rw-r--r--arch/sparc/kernel/sun4m_smp.c2
-rw-r--r--arch/sparc/kernel/sys_sunos.c1
-rw-r--r--arch/sparc/kernel/traps.c4
-rw-r--r--arch/sparc64/Kconfig7
-rw-r--r--arch/sparc64/defconfig2064
-rw-r--r--arch/sparc64/kernel/Makefile4
-rw-r--r--arch/sparc64/kernel/binfmt_aout32.c2
-rw-r--r--arch/sparc64/kernel/ebus.c15
-rw-r--r--arch/sparc64/kernel/entry.S7
-rw-r--r--arch/sparc64/kernel/ioctl32.c39
-rw-r--r--arch/sparc64/kernel/kprobes.c18
-rw-r--r--arch/sparc64/kernel/power.c4
-rw-r--r--arch/sparc64/kernel/process.c10
-rw-r--r--arch/sparc64/kernel/ptrace.c80
-rw-r--r--arch/sparc64/kernel/setup.c2
-rw-r--r--arch/sparc64/kernel/smp.c2
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c2
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c37
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c1
-rw-r--r--arch/sparc64/kernel/systbls.S4
-rw-r--r--arch/sparc64/kernel/traps.c4
-rw-r--r--arch/sparc64/solaris/fs.c1
-rw-r--r--arch/um/Kconfig14
-rw-r--r--arch/um/Makefile23
-rw-r--r--arch/um/drivers/chan_kern.c275
-rw-r--r--arch/um/drivers/line.c298
-rw-r--r--arch/um/drivers/mconsole_kern.c232
-rw-r--r--arch/um/drivers/mconsole_user.c12
-rw-r--r--arch/um/drivers/net_kern.c8
-rw-r--r--arch/um/drivers/ssl.c47
-rw-r--r--arch/um/drivers/stdio_console.c33
-rw-r--r--arch/um/drivers/ubd_kern.c37
-rw-r--r--arch/um/include/chan_kern.h25
-rw-r--r--arch/um/include/choose-mode.h3
-rw-r--r--arch/um/include/irq_user.h13
-rw-r--r--arch/um/include/kern.h13
-rw-r--r--arch/um/include/kern_util.h19
-rw-r--r--arch/um/include/line.h37
-rw-r--r--arch/um/include/mconsole.h8
-rw-r--r--arch/um/include/os.h33
-rw-r--r--arch/um/include/signal_user.h28
-rw-r--r--arch/um/include/sysdep-i386/kernel-offsets.h (renamed from arch/um/sys-i386/kernel-offsets.c)5
-rw-r--r--arch/um/include/sysdep-x86_64/kernel-offsets.h (renamed from arch/um/sys-x86_64/kernel-offsets.c)2
-rw-r--r--arch/um/include/user_util.h11
-rw-r--r--arch/um/kernel/Makefile8
-rw-r--r--arch/um/kernel/asm-offsets.c2
-rw-r--r--arch/um/kernel/irq_user.c49
-rw-r--r--arch/um/kernel/process_kern.c7
-rw-r--r--arch/um/kernel/reboot.c2
-rw-r--r--arch/um/kernel/sigio_user.c2
-rw-r--r--arch/um/kernel/signal_kern.c1
-rw-r--r--arch/um/kernel/signal_user.c157
-rw-r--r--arch/um/kernel/skas/Makefile2
-rw-r--r--arch/um/kernel/skas/include/skas.h1
-rw-r--r--arch/um/kernel/skas/process.c15
-rw-r--r--arch/um/kernel/skas/process_kern.c5
-rw-r--r--arch/um/kernel/time.c9
-rw-r--r--arch/um/kernel/trap_kern.c25
-rw-r--r--arch/um/kernel/trap_user.c98
-rw-r--r--arch/um/kernel/tt/exec_kern.c3
-rw-r--r--arch/um/kernel/tt/process_kern.c9
-rw-r--r--arch/um/kernel/tt/tracer.c1
-rw-r--r--arch/um/kernel/tt/trap_user.c16
-rw-r--r--arch/um/kernel/um_arch.c15
-rw-r--r--arch/um/kernel/umid.c323
-rw-r--r--arch/um/os-Linux/Makefile8
-rw-r--r--arch/um/os-Linux/aio.c467
-rw-r--r--arch/um/os-Linux/main.c1
-rw-r--r--arch/um/os-Linux/process.c1
-rw-r--r--arch/um/os-Linux/signal.c158
-rw-r--r--arch/um/os-Linux/skas/Makefile10
-rw-r--r--arch/um/os-Linux/skas/trap.c (renamed from arch/um/kernel/skas/trap_user.c)49
-rw-r--r--arch/um/os-Linux/start_up.c1
-rw-r--r--arch/um/os-Linux/trap.c40
-rw-r--r--arch/um/os-Linux/tt.c15
-rw-r--r--arch/um/os-Linux/umid.c335
-rw-r--r--arch/um/sys-i386/signal.c1
-rw-r--r--arch/v850/Kconfig3
-rw-r--r--arch/v850/kernel/process.c26
-rw-r--r--arch/v850/kernel/ptrace.c2
-rw-r--r--arch/v850/kernel/v850_ksyms.c2
-rw-r--r--arch/x86_64/Kconfig81
-rw-r--r--arch/x86_64/Kconfig.debug10
-rw-r--r--arch/x86_64/Makefile12
-rw-r--r--arch/x86_64/boot/Makefile2
-rw-r--r--arch/x86_64/boot/compressed/misc.c2
-rw-r--r--arch/x86_64/boot/compressed/miscsetup.h39
-rw-r--r--arch/x86_64/boot/install.sh40
-rw-r--r--arch/x86_64/crypto/aes.c25
-rw-r--r--arch/x86_64/defconfig116
-rw-r--r--arch/x86_64/ia32/.gitignore1
-rw-r--r--arch/x86_64/ia32/Makefile4
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c7
-rw-r--r--arch/x86_64/ia32/ia32_ioctl.c79
-rw-r--r--arch/x86_64/ia32/ia32_signal.c26
-rw-r--r--arch/x86_64/ia32/ia32entry.S43
-rw-r--r--arch/x86_64/ia32/ptrace32.c59
-rw-r--r--arch/x86_64/ia32/sys_ia32.c19
-rw-r--r--arch/x86_64/ia32/vsyscall-sigreturn.S1
-rw-r--r--arch/x86_64/ia32/vsyscall-syscall.S1
-rw-r--r--arch/x86_64/ia32/vsyscall-sysenter.S1
-rw-r--r--arch/x86_64/kernel/Makefile9
-rw-r--r--arch/x86_64/kernel/aperture.c3
-rw-r--r--arch/x86_64/kernel/apic.c193
-rw-r--r--arch/x86_64/kernel/asm-offsets.c5
-rw-r--r--arch/x86_64/kernel/crash.c156
-rw-r--r--arch/x86_64/kernel/crash_dump.c (renamed from kernel/crash_dump.c)32
-rw-r--r--arch/x86_64/kernel/e820.c21
-rw-r--r--arch/x86_64/kernel/early_printk.c4
-rw-r--r--arch/x86_64/kernel/entry.S44
-rw-r--r--arch/x86_64/kernel/genapic_cluster.c5
-rw-r--r--arch/x86_64/kernel/genapic_flat.c10
-rw-r--r--arch/x86_64/kernel/head.S14
-rw-r--r--arch/x86_64/kernel/head64.c5
-rw-r--r--arch/x86_64/kernel/i387.c2
-rw-r--r--arch/x86_64/kernel/i8259.c5
-rw-r--r--arch/x86_64/kernel/init_task.c2
-rw-r--r--arch/x86_64/kernel/io_apic.c167
-rw-r--r--arch/x86_64/kernel/ioport.c1
-rw-r--r--arch/x86_64/kernel/irq.c6
-rw-r--r--arch/x86_64/kernel/kprobes.c16
-rw-r--r--arch/x86_64/kernel/mce.c35
-rw-r--r--arch/x86_64/kernel/mce_amd.c6
-rw-r--r--arch/x86_64/kernel/mce_intel.c6
-rw-r--r--arch/x86_64/kernel/nmi.c8
-rw-r--r--arch/x86_64/kernel/pci-dma.c286
-rw-r--r--arch/x86_64/kernel/pci-gart.c413
-rw-r--r--arch/x86_64/kernel/pci-nommu.c145
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c42
-rw-r--r--arch/x86_64/kernel/process.c87
-rw-r--r--arch/x86_64/kernel/ptrace.c19
-rw-r--r--arch/x86_64/kernel/reboot.c10
-rw-r--r--arch/x86_64/kernel/setup.c110
-rw-r--r--arch/x86_64/kernel/setup64.c46
-rw-r--r--arch/x86_64/kernel/smp.c7
-rw-r--r--arch/x86_64/kernel/smpboot.c61
-rw-r--r--arch/x86_64/kernel/suspend.c2
-rw-r--r--arch/x86_64/kernel/syscall.c2
-rw-r--r--arch/x86_64/kernel/time.c121
-rw-r--r--arch/x86_64/kernel/trampoline.S11
-rw-r--r--arch/x86_64/kernel/traps.c164
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86_64/kernel/vsmp.c45
-rw-r--r--arch/x86_64/kernel/vsyscall.c14
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c33
-rw-r--r--arch/x86_64/lib/copy_user.S244
-rw-r--r--arch/x86_64/lib/delay.c2
-rw-r--r--arch/x86_64/lib/usercopy.c12
-rw-r--r--arch/x86_64/mm/fault.c54
-rw-r--r--arch/x86_64/mm/init.c52
-rw-r--r--arch/x86_64/mm/numa.c75
-rw-r--r--arch/x86_64/mm/pageattr.c9
-rw-r--r--arch/x86_64/mm/srat.c67
-rw-r--r--arch/x86_64/pci/Makefile-BUS22
-rw-r--r--arch/xtensa/Kconfig4
-rw-r--r--arch/xtensa/kernel/process.c4
-rw-r--r--arch/xtensa/kernel/ptrace.c12
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--block/Kconfig2
-rw-r--r--block/as-iosched.c144
-rw-r--r--block/cfq-iosched.c16
-rw-r--r--block/deadline-iosched.c8
-rw-r--r--block/elevator.c89
-rw-r--r--block/ioctl.c24
-rw-r--r--block/ll_rw_blk.c659
-rw-r--r--block/scsi_ioctl.c16
-rw-r--r--crypto/Kconfig45
-rw-r--r--crypto/aes.c63
-rw-r--r--crypto/anubis.c39
-rw-r--r--crypto/api.c54
-rw-r--r--crypto/blowfish.c3
-rw-r--r--crypto/cast5.c47
-rw-r--r--crypto/cast6.c83
-rw-r--r--crypto/cipher.c5
-rw-r--r--crypto/crc32c.c1
-rw-r--r--crypto/des.c3
-rw-r--r--crypto/internal.h6
-rw-r--r--crypto/khazad.c46
-rw-r--r--crypto/md4.c1
-rw-r--r--crypto/md5.c1
-rw-r--r--crypto/michael_mic.c40
-rw-r--r--crypto/proc.c6
-rw-r--r--crypto/serpent.c2
-rw-r--r--crypto/sha1.c66
-rw-r--r--crypto/sha256.c31
-rw-r--r--crypto/sha512.c54
-rw-r--r--crypto/tcrypt.c4
-rw-r--r--crypto/tcrypt.h64
-rw-r--r--crypto/tea.c98
-rw-r--r--crypto/tgr192.c64
-rw-r--r--crypto/twofish.c13
-rw-r--r--crypto/wp512.c32
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acorn/block/mfmhd.c36
-rw-r--r--drivers/acorn/char/i2c.c1
-rw-r--r--drivers/acorn/char/pcf8583.c5
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/osl.c2
-rw-r--r--drivers/acpi/pci_irq.c7
-rw-r--r--drivers/acpi/processor_idle.c15
-rw-r--r--drivers/acpi/scan.c27
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/amba/Makefile2
-rw-r--r--drivers/amba/bus.c (renamed from arch/arm/common/amba.c)3
-rw-r--r--drivers/atm/nicstar.c5
-rw-r--r--drivers/atm/zatm.c12
-rw-r--r--drivers/base/core.c4
-rw-r--r--drivers/base/cpu.c30
-rw-r--r--drivers/base/firmware_class.c1
-rw-r--r--drivers/base/memory.c9
-rw-r--r--drivers/block/DAC960.c41
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/acsi.c26
-rw-r--r--drivers/block/amiflop.c55
-rw-r--r--drivers/block/aoe/aoeblk.c26
-rw-r--r--drivers/block/ataflop.c29
-rw-r--r--drivers/block/cciss.c198
-rw-r--r--drivers/block/cciss.h10
-rw-r--r--drivers/block/cciss_scsi.c2
-rw-r--r--drivers/block/cpqarray.c42
-rw-r--r--drivers/block/floppy.c46
-rw-r--r--drivers/block/loop.c54
-rw-r--r--drivers/block/nbd.c125
-rw-r--r--drivers/block/paride/Kconfig5
-rw-r--r--drivers/block/paride/pd.c34
-rw-r--r--drivers/block/paride/pf.c50
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/ps2esdi.c25
-rw-r--r--drivers/block/rd.c4
-rw-r--r--drivers/block/swim3.c38
-rw-r--r--drivers/block/sx8.c51
-rw-r--r--drivers/block/ub.c2
-rw-r--r--drivers/block/umem.c43
-rw-r--r--drivers/block/viodasd.c78
-rw-r--r--drivers/block/xd.c31
-rw-r--r--drivers/bluetooth/hci_ldisc.c16
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/cdrom/cdu31a.c2
-rw-r--r--drivers/cdrom/cm206.c2
-rw-r--r--drivers/cdrom/viocd.c6
-rw-r--r--drivers/char/Kconfig31
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/sworks-agp.c1
-rw-r--r--drivers/char/amiserial.c55
-rw-r--r--drivers/char/cs5535_gpio.c250
-rw-r--r--drivers/char/cyclades.c91
-rw-r--r--drivers/char/drm/Makefile4
-rw-r--r--drivers/char/drm/ati_pcigart.c23
-rw-r--r--drivers/char/drm/drm.h4
-rw-r--r--drivers/char/drm/drmP.h122
-rw-r--r--drivers/char/drm/drm_agpsupport.c133
-rw-r--r--drivers/char/drm/drm_bufs.c49
-rw-r--r--drivers/char/drm/drm_context.c2
-rw-r--r--drivers/char/drm/drm_core.h4
-rw-r--r--drivers/char/drm/drm_drv.c152
-rw-r--r--drivers/char/drm/drm_fops.c317
-rw-r--r--drivers/char/drm/drm_init.c53
-rw-r--r--drivers/char/drm/drm_ioc32.c1
-rw-r--r--drivers/char/drm/drm_ioctl.c27
-rw-r--r--drivers/char/drm/drm_lock.c1
-rw-r--r--drivers/char/drm/drm_memory.c8
-rw-r--r--drivers/char/drm/drm_memory_debug.h269
-rw-r--r--drivers/char/drm/drm_os_linux.h1
-rw-r--r--drivers/char/drm/drm_pciids.h12
-rw-r--r--drivers/char/drm/drm_proc.c16
-rw-r--r--drivers/char/drm/drm_stub.c63
-rw-r--r--drivers/char/drm/drm_sysfs.c68
-rw-r--r--drivers/char/drm/i810_dma.c49
-rw-r--r--drivers/char/drm/i810_drv.c60
-rw-r--r--drivers/char/drm/i810_drv.h10
-rw-r--r--drivers/char/drm/i830_dma.c47
-rw-r--r--drivers/char/drm/i830_drv.c57
-rw-r--r--drivers/char/drm/i830_drv.h8
-rw-r--r--drivers/char/drm/i915_dma.c52
-rw-r--r--drivers/char/drm/i915_drm.h6
-rw-r--r--drivers/char/drm/i915_drv.c66
-rw-r--r--drivers/char/drm/i915_drv.h44
-rw-r--r--drivers/char/drm/i915_ioc32.c1
-rw-r--r--drivers/char/drm/i915_irq.c48
-rw-r--r--drivers/char/drm/i915_mem.c5
-rw-r--r--drivers/char/drm/mga_dma.c158
-rw-r--r--drivers/char/drm/mga_drv.c58
-rw-r--r--drivers/char/drm/mga_drv.h14
-rw-r--r--drivers/char/drm/mga_ioc32.c1
-rw-r--r--drivers/char/drm/mga_state.c26
-rw-r--r--drivers/char/drm/r128_cce.c15
-rw-r--r--drivers/char/drm/r128_drm.h4
-rw-r--r--drivers/char/drm/r128_drv.c48
-rw-r--r--drivers/char/drm/r128_drv.h8
-rw-r--r--drivers/char/drm/r128_ioc32.c1
-rw-r--r--drivers/char/drm/r128_irq.c4
-rw-r--r--drivers/char/drm/r128_state.c42
-rw-r--r--drivers/char/drm/r300_cmdbuf.c38
-rw-r--r--drivers/char/drm/r300_reg.h1
-rw-r--r--drivers/char/drm/radeon_cp.c106
-rw-r--r--drivers/char/drm/radeon_drm.h6
-rw-r--r--drivers/char/drm/radeon_drv.c62
-rw-r--r--drivers/char/drm/radeon_drv.h41
-rw-r--r--drivers/char/drm/radeon_ioc32.c1
-rw-r--r--drivers/char/drm/radeon_state.c246
-rw-r--r--drivers/char/drm/savage_bci.c81
-rw-r--r--drivers/char/drm/savage_drv.c50
-rw-r--r--drivers/char/drm/savage_drv.h29
-rw-r--r--drivers/char/drm/savage_state.c324
-rw-r--r--drivers/char/drm/sis_drm.h25
-rw-r--r--drivers/char/drm/sis_drv.c42
-rw-r--r--drivers/char/drm/sis_drv.h4
-rw-r--r--drivers/char/drm/sis_ds.h7
-rw-r--r--drivers/char/drm/sis_mm.c30
-rw-r--r--drivers/char/drm/tdfx_drv.c42
-rw-r--r--drivers/char/drm/tdfx_drv.h7
-rw-r--r--drivers/char/drm/via_dma.c38
-rw-r--r--drivers/char/drm/via_dmablit.c805
-rw-r--r--drivers/char/drm/via_dmablit.h140
-rw-r--r--drivers/char/drm/via_drm.h60
-rw-r--r--drivers/char/drm/via_drv.c63
-rw-r--r--drivers/char/drm/via_drv.h56
-rw-r--r--drivers/char/drm/via_ds.c9
-rw-r--r--drivers/char/drm/via_irq.c53
-rw-r--r--drivers/char/drm/via_map.c47
-rw-r--r--drivers/char/drm/via_mm.c20
-rw-r--r--drivers/char/drm/via_verifier.c6
-rw-r--r--drivers/char/drm/via_verifier.h4
-rw-r--r--drivers/char/drm/via_video.c7
-rw-r--r--drivers/char/dsp56k.c29
-rw-r--r--drivers/char/epca.c17
-rw-r--r--drivers/char/esp.c67
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hvc_console.c6
-rw-r--r--drivers/char/hvcs.c10
-rw-r--r--drivers/char/hvsi.c2
-rw-r--r--drivers/char/hw_random.c71
-rw-r--r--drivers/char/ip2/i2lib.c2
-rw-r--r--drivers/char/ip2main.c26
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c6
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c3
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c10
-rw-r--r--drivers/char/isicom.c2084
-rw-r--r--drivers/char/istallion.c45
-rw-r--r--drivers/char/mem.c54
-rw-r--r--drivers/char/mmtimer.c90
-rw-r--r--drivers/char/moxa.c77
-rw-r--r--drivers/char/mxser.c4
-rw-r--r--drivers/char/n_hdlc.c19
-rw-r--r--drivers/char/n_r3964.c10
-rw-r--r--drivers/char/n_tty.c66
-rw-r--r--drivers/char/nvram.c12
-rw-r--r--drivers/char/pcmcia/synclink_cs.c32
-rw-r--r--drivers/char/pty.c4
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/rio/board.h83
-rw-r--r--drivers/char/rio/bootpkt.h11
-rw-r--r--drivers/char/rio/brates.h9
-rw-r--r--drivers/char/rio/chan.h2
-rw-r--r--drivers/char/rio/cirrus.h178
-rw-r--r--drivers/char/rio/cmd.h5
-rw-r--r--drivers/char/rio/cmdblk.h19
-rw-r--r--drivers/char/rio/cmdpkt.h213
-rw-r--r--drivers/char/rio/control.h7
-rw-r--r--drivers/char/rio/daemon.h162
-rw-r--r--drivers/char/rio/debug.h4
-rw-r--r--drivers/char/rio/defaults.h7
-rw-r--r--drivers/char/rio/eisa.h4
-rw-r--r--drivers/char/rio/enable.h4
-rw-r--r--drivers/char/rio/error.h3
-rw-r--r--drivers/char/rio/errors.h2
-rw-r--r--drivers/char/rio/formpkt.h151
-rw-r--r--drivers/char/rio/func.h51
-rw-r--r--drivers/char/rio/host.h93
-rw-r--r--drivers/char/rio/hosthw.h4
-rw-r--r--drivers/char/rio/link.h152
-rw-r--r--drivers/char/rio/linux_compat.h46
-rw-r--r--drivers/char/rio/list.h6
-rw-r--r--drivers/char/rio/lrt.h5
-rw-r--r--drivers/char/rio/ltt.h7
-rw-r--r--drivers/char/rio/lttwake.h5
-rw-r--r--drivers/char/rio/map.h27
-rw-r--r--drivers/char/rio/mca.h2
-rw-r--r--drivers/char/rio/mesg.h2
-rw-r--r--drivers/char/rio/param.h27
-rw-r--r--drivers/char/rio/parmmap.h79
-rw-r--r--drivers/char/rio/pci.h2
-rw-r--r--drivers/char/rio/phb.h285
-rw-r--r--drivers/char/rio/pkt.h72
-rw-r--r--drivers/char/rio/poll.h27
-rw-r--r--drivers/char/rio/port.h294
-rw-r--r--drivers/char/rio/proto.h132
-rw-r--r--drivers/char/rio/protsts.h2
-rw-r--r--drivers/char/rio/qbuf.h15
-rw-r--r--drivers/char/rio/rio.h15
-rw-r--r--drivers/char/rio/rio_linux.c1522
-rw-r--r--drivers/char/rio/rio_linux.h57
-rw-r--r--drivers/char/rio/rioboard.h204
-rw-r--r--drivers/char/rio/riocmd.c896
-rw-r--r--drivers/char/rio/rioctrl.c3119
-rw-r--r--drivers/char/rio/riodrvr.h128
-rw-r--r--drivers/char/rio/rioinfo.h30
-rw-r--r--drivers/char/rio/riointr.c1510
-rw-r--r--drivers/char/rio/rioioctl.h18
-rw-r--r--drivers/char/rio/rioparam.c563
-rw-r--r--drivers/char/rio/riopcicopy.c6
-rw-r--r--drivers/char/rio/rioroute.c1572
-rw-r--r--drivers/char/rio/riospace.h31
-rw-r--r--drivers/char/rio/riotable.c1019
-rw-r--r--drivers/char/rio/riotime.h4
-rw-r--r--drivers/char/rio/riotty.c1224
-rw-r--r--drivers/char/rio/riotypes.h71
-rw-r--r--drivers/char/rio/riowinif.h934
-rw-r--r--drivers/char/rio/riscos.h2
-rw-r--r--drivers/char/rio/rom.h22
-rw-r--r--drivers/char/rio/route.h61
-rw-r--r--drivers/char/rio/rtahw.h14
-rw-r--r--drivers/char/rio/rup.h39
-rw-r--r--drivers/char/rio/rupstat.h3
-rw-r--r--drivers/char/rio/sam.h13
-rw-r--r--drivers/char/rio/selftest.h48
-rw-r--r--drivers/char/rio/space.h2
-rw-r--r--drivers/char/rio/sysmap.h23
-rw-r--r--drivers/char/rio/timeouts.h7
-rw-r--r--drivers/char/rio/top.h9
-rw-r--r--drivers/char/rio/typdef.h36
-rw-r--r--drivers/char/rio/unixrup.h21
-rw-r--r--drivers/char/riscom8.c47
-rw-r--r--drivers/char/rocket.c19
-rw-r--r--drivers/char/rtc.c14
-rw-r--r--drivers/char/s3c2410-rtc.c2
-rw-r--r--drivers/char/scc.h2
-rw-r--r--drivers/char/selection.c5
-rw-r--r--drivers/char/ser_a2232.c8
-rw-r--r--drivers/char/serial167.c37
-rw-r--r--drivers/char/sonypi.c381
-rw-r--r--drivers/char/specialix.c38
-rw-r--r--drivers/char/stallion.c65
-rw-r--r--drivers/char/sx.c19
-rw-r--r--drivers/char/synclink.c51
-rw-r--r--drivers/char/synclink_gt.c4491
-rw-r--r--drivers/char/synclinkmp.c38
-rw-r--r--drivers/char/sysrq.c19
-rw-r--r--drivers/char/tb0219.c4
-rw-r--r--drivers/char/tlclk.c15
-rw-r--r--drivers/char/tpm/Makefile3
-rw-r--r--drivers/char/tpm/tpm.c3
-rw-r--r--drivers/char/tpm/tpm.h15
-rw-r--r--drivers/char/tpm/tpm_bios.c540
-rw-r--r--drivers/char/tty_io.c266
-rw-r--r--drivers/char/viocons.c45
-rw-r--r--drivers/char/vme_scc.c20
-rw-r--r--drivers/char/vr41xx_giu.c8
-rw-r--r--drivers/char/vr41xx_rtc.c9
-rw-r--r--drivers/char/vt.c33
-rw-r--r--drivers/char/watchdog/Kconfig2
-rw-r--r--drivers/char/watchdog/cpu5wdt.c9
-rw-r--r--drivers/char/watchdog/mpc8xx_wdt.c20
-rw-r--r--drivers/char/watchdog/s3c2410_wdt.c4
-rw-r--r--drivers/char/watchdog/wdt977.c216
-rw-r--r--drivers/connector/cn_proc.c15
-rw-r--r--drivers/crypto/padlock-aes.c26
-rw-r--r--drivers/crypto/padlock.h2
-rw-r--r--drivers/firmware/efivars.c2
-rw-r--r--drivers/hwmon/Kconfig12
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/adm1021.c6
-rw-r--r--drivers/hwmon/adm1025.c10
-rw-r--r--drivers/hwmon/adm1026.c10
-rw-r--r--drivers/hwmon/adm1031.c6
-rw-r--r--drivers/hwmon/adm9240.c6
-rw-r--r--drivers/hwmon/asb100.c6
-rw-r--r--drivers/hwmon/atxp1.c6
-rw-r--r--drivers/hwmon/ds1621.c6
-rw-r--r--drivers/hwmon/fscher.c6
-rw-r--r--drivers/hwmon/fscpos.c6
-rw-r--r--drivers/hwmon/gl518sm.c6
-rw-r--r--drivers/hwmon/gl520sm.c6
-rw-r--r--drivers/hwmon/hwmon-vid.c69
-rw-r--r--drivers/hwmon/it87.c18
-rw-r--r--drivers/hwmon/lm63.c6
-rw-r--r--drivers/hwmon/lm75.c6
-rw-r--r--drivers/hwmon/lm77.c6
-rw-r--r--drivers/hwmon/lm78.c13
-rw-r--r--drivers/hwmon/lm80.c6
-rw-r--r--drivers/hwmon/lm83.c6
-rw-r--r--drivers/hwmon/lm85.c50
-rw-r--r--drivers/hwmon/lm87.c6
-rw-r--r--drivers/hwmon/lm90.c6
-rw-r--r--drivers/hwmon/lm92.c6
-rw-r--r--drivers/hwmon/max1619.c6
-rw-r--r--drivers/hwmon/pc87360.c7
-rw-r--r--drivers/hwmon/sis5595.c8
-rw-r--r--drivers/hwmon/smsc47b397.c8
-rw-r--r--drivers/hwmon/smsc47m1.c7
-rw-r--r--drivers/hwmon/via686a.c8
-rw-r--r--drivers/hwmon/vt8231.c862
-rw-r--r--drivers/hwmon/w83627ehf.c7
-rw-r--r--drivers/hwmon/w83627hf.c23
-rw-r--r--drivers/hwmon/w83781d.c13
-rw-r--r--drivers/hwmon/w83792d.c83
-rw-r--r--drivers/hwmon/w83l785ts.c6
-rw-r--r--drivers/i2c/busses/Kconfig24
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c1
-rw-r--r--drivers/i2c/busses/i2c-isa.c10
-rw-r--r--drivers/i2c/busses/i2c-keywest.c751
-rw-r--r--drivers/i2c/busses/i2c-keywest.h108
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c33
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/busses/i2c-parport.h12
-rw-r--r--drivers/i2c/busses/i2c-pmac-smu.c315
-rw-r--r--drivers/i2c/busses/i2c-powermac.c290
-rw-r--r--drivers/i2c/busses/i2c-pxa.c10
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/i2c/chips/ds1337.c43
-rw-r--r--drivers/i2c/chips/ds1374.c6
-rw-r--r--drivers/i2c/chips/eeprom.c6
-rw-r--r--drivers/i2c/chips/isp1301_omap.c6
-rw-r--r--drivers/i2c/chips/m41t00.c6
-rw-r--r--drivers/i2c/chips/max6875.c6
-rw-r--r--drivers/i2c/chips/pca9539.c6
-rw-r--r--drivers/i2c/chips/pcf8574.c6
-rw-r--r--drivers/i2c/chips/pcf8591.c6
-rw-r--r--drivers/i2c/chips/rtc8564.c43
-rw-r--r--drivers/i2c/chips/tps65010.c17
-rw-r--r--drivers/i2c/chips/x1205.c6
-rw-r--r--drivers/i2c/i2c-core.c70
-rw-r--r--drivers/i2c/i2c-dev.c63
-rw-r--r--drivers/ide/ide-cd.c16
-rw-r--r--drivers/ide/ide-disk.c149
-rw-r--r--drivers/ide/ide-floppy.c12
-rw-r--r--drivers/ide/ide-io.c20
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide.c21
-rw-r--r--drivers/ide/legacy/hd.c24
-rw-r--r--drivers/ide/pci/pdc202xx_new.c2
-rw-r--r--drivers/ide/pci/serverworks.c2
-rw-r--r--drivers/ide/pci/via82cxxx.c2
-rw-r--r--drivers/ide/ppc/pmac.c6
-rw-r--r--drivers/ieee1394/Kconfig23
-rw-r--r--drivers/ieee1394/Makefile2
-rw-r--r--drivers/ieee1394/amdtp.c1
-rw-r--r--drivers/ieee1394/csr1212.c21
-rw-r--r--drivers/ieee1394/csr1212.h2
-rw-r--r--drivers/ieee1394/dma.c73
-rw-r--r--drivers/ieee1394/dv1394.c14
-rw-r--r--drivers/ieee1394/eth1394.c20
-rw-r--r--drivers/ieee1394/highlevel.c18
-rw-r--r--drivers/ieee1394/hosts.c30
-rw-r--r--drivers/ieee1394/hosts.h162
-rw-r--r--drivers/ieee1394/ieee1394-ioctl.h8
-rw-r--r--drivers/ieee1394/ieee1394.h19
-rw-r--r--drivers/ieee1394/ieee1394_core.c827
-rw-r--r--drivers/ieee1394/ieee1394_core.h100
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c389
-rw-r--r--drivers/ieee1394/iso.c102
-rw-r--r--drivers/ieee1394/nodemgr.c50
-rw-r--r--drivers/ieee1394/nodemgr.h18
-rw-r--r--drivers/ieee1394/ohci1394.c43
-rw-r--r--drivers/ieee1394/ohci1394.h4
-rw-r--r--drivers/ieee1394/pcilynx.c2
-rw-r--r--drivers/ieee1394/raw1394.c79
-rw-r--r--drivers/ieee1394/sbp2.c1040
-rw-r--r--drivers/ieee1394/sbp2.h70
-rw-r--r--drivers/ieee1394/video1394.c107
-rw-r--r--drivers/infiniband/core/cm.c45
-rw-r--r--drivers/infiniband/core/device.c23
-rw-r--r--drivers/infiniband/core/sysfs.c22
-rw-r--r--drivers/infiniband/core/ucm.c23
-rw-r--r--drivers/infiniband/core/user_mad.c4
-rw-r--r--drivers/infiniband/core/uverbs.h5
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c173
-rw-r--r--drivers/infiniband/core/uverbs_main.c8
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c19
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c23
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c32
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c54
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c132
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c267
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c105
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c10
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c24
-rw-r--r--drivers/input/evdev.c8
-rw-r--r--drivers/input/joystick/amijoy.c4
-rw-r--r--drivers/input/keyboard/corgikbd.c6
-rw-r--r--drivers/input/keyboard/spitzkbd.c27
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c9
-rw-r--r--drivers/input/mouse/amimouse.c6
-rw-r--r--drivers/input/serio/ambakmi.c15
-rw-r--r--drivers/input/serio/sa1111ps2.c1
-rw-r--r--drivers/input/serio/serport.c13
-rw-r--r--drivers/isdn/act2000/act2000.h6
-rw-r--r--drivers/isdn/act2000/capi.h90
-rw-r--r--drivers/isdn/capi/capi.c3
-rw-r--r--drivers/isdn/capi/capifs.c8
-rw-r--r--drivers/isdn/hardware/eicon/os_4bri.c3
-rw-r--r--drivers/isdn/hardware/eicon/os_bri.c1
-rw-r--r--drivers/isdn/hardware/eicon/os_pri.c1
-rw-r--r--drivers/isdn/hisax/Kconfig10
-rw-r--r--drivers/isdn/hisax/hisax.h18
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.h18
-rw-r--r--drivers/isdn/i4l/isdn_common.c112
-rw-r--r--drivers/isdn/i4l/isdn_common.h1
-rw-r--r--drivers/isdn/i4l/isdn_tty.c75
-rw-r--r--drivers/isdn/sc/command.c1
-rw-r--r--drivers/macintosh/Kconfig10
-rw-r--r--drivers/macintosh/adb-iop.c2
-rw-r--r--drivers/macintosh/macio-adb.c13
-rw-r--r--drivers/macintosh/macio_asic.c254
-rw-r--r--drivers/macintosh/mediabay.c8
-rw-r--r--drivers/macintosh/smu.c50
-rw-r--r--drivers/macintosh/therm_adt746x.c45
-rw-r--r--drivers/macintosh/therm_pm72.c17
-rw-r--r--drivers/macintosh/therm_windtunnel.c6
-rw-r--r--drivers/macintosh/via-cuda.c52
-rw-r--r--drivers/macintosh/via-macii.c4
-rw-r--r--drivers/macintosh/via-maciisi.c22
-rw-r--r--drivers/macintosh/via-pmu.c364
-rw-r--r--drivers/macintosh/via-pmu68k.c4
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c63
-rw-r--r--drivers/macintosh/windfarm_pm81.c2
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c1
-rw-r--r--drivers/macintosh/windfarm_smu_sensors.c1
-rw-r--r--drivers/md/bitmap.c114
-rw-r--r--drivers/md/dm-crypt.c5
-rw-r--r--drivers/md/dm-io.h3
-rw-r--r--drivers/md/dm-ioctl.c21
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-raid1.c13
-rw-r--r--drivers/md/dm-snap.c25
-rw-r--r--drivers/md/dm.c96
-rw-r--r--drivers/md/dm.h5
-rw-r--r--drivers/md/faulty.c9
-rw-r--r--drivers/md/kcopyd.c3
-rw-r--r--drivers/md/linear.c14
-rw-r--r--drivers/md/md.c926
-rw-r--r--drivers/md/multipath.c29
-rw-r--r--drivers/md/raid0.c32
-rw-r--r--drivers/md/raid1.c726
-rw-r--r--drivers/md/raid10.c544
-rw-r--r--drivers/md/raid5.c174
-rw-r--r--drivers/md/raid6main.c348
-rw-r--r--drivers/media/common/saa7146_core.c9
-rw-r--r--drivers/media/common/saa7146_fops.c44
-rw-r--r--drivers/media/common/saa7146_hlp.c13
-rw-r--r--drivers/media/common/saa7146_vbi.c6
-rw-r--r--drivers/media/common/saa7146_video.c9
-rw-r--r--drivers/media/dvb/b2c2/flexcop-fe-tuner.c33
-rw-r--r--drivers/media/dvb/b2c2/flexcop-reg.h4
-rw-r--r--drivers/media/dvb/b2c2/flexcop.c6
-rw-r--r--drivers/media/dvb/bt8xx/dst.c52
-rw-r--r--drivers/media/dvb/bt8xx/dst_ca.c3
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.c19
-rw-r--r--drivers/media/dvb/cinergyT2/Kconfig20
-rw-r--r--drivers/media/dvb/cinergyT2/cinergyT2.c84
-rw-r--r--drivers/media/dvb/dvb-core/Kconfig2
-rw-r--r--drivers/media/dvb/dvb-core/Makefile2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ca_en50221.c12
-rw-r--r--drivers/media/dvb/dvb-core/dvb_filter.c16
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c318
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h16
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c6
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ringbuffer.c32
-rw-r--r--drivers/media/dvb/dvb-core/dvb_ringbuffer.h8
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c22
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.h2
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig24
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c442
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.h2
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mb.c46
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c2
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u-fe.c3
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c6
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.h31
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-common.h2
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-firmware.c154
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-i2c.c8
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h14
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-init.c57
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-urb.c9
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h29
-rw-r--r--drivers/media/dvb/dvb-usb/nova-t-usb2.c7
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x-fe.c5
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x.h43
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045-fe.c3
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c2
-rw-r--r--drivers/media/dvb/frontends/Kconfig46
-rw-r--r--drivers/media/dvb/frontends/Makefile1
-rw-r--r--drivers/media/dvb/frontends/bcm3510.c9
-rw-r--r--drivers/media/dvb/frontends/cx22702.c22
-rw-r--r--drivers/media/dvb/frontends/cx24110.c23
-rw-r--r--drivers/media/dvb/frontends/cx24123.c889
-rw-r--r--drivers/media/dvb/frontends/cx24123.h51
-rw-r--r--drivers/media/dvb/frontends/dib3000mb.c3
-rw-r--r--drivers/media/dvb/frontends/dib3000mc.c3
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.c32
-rw-r--r--drivers/media/dvb/frontends/dvb-pll.h3
-rw-r--r--drivers/media/dvb/frontends/lgdt330x.c6
-rw-r--r--drivers/media/dvb/frontends/mt312.c3
-rw-r--r--drivers/media/dvb/frontends/mt352.c3
-rw-r--r--drivers/media/dvb/frontends/nxt2002.c3
-rw-r--r--drivers/media/dvb/frontends/nxt200x.c3
-rw-r--r--drivers/media/dvb/frontends/nxt6000.c10
-rw-r--r--drivers/media/dvb/frontends/or51211.c5
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c4
-rw-r--r--drivers/media/dvb/frontends/sp8870.c3
-rw-r--r--drivers/media/dvb/frontends/sp887x.c5
-rw-r--r--drivers/media/dvb/frontends/stv0299.c12
-rw-r--r--drivers/media/dvb/frontends/stv0299.h1
-rw-r--r--drivers/media/dvb/frontends/tda10021.c4
-rw-r--r--drivers/media/dvb/frontends/tda1004x.c141
-rw-r--r--drivers/media/dvb/pluto2/Kconfig2
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c3
-rw-r--r--drivers/media/dvb/ttpci/Kconfig7
-rw-r--r--drivers/media/dvb/ttpci/Makefile2
-rw-r--r--drivers/media/dvb/ttpci/av7110.c137
-rw-r--r--drivers/media/dvb/ttpci/av7110.h17
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c12
-rw-r--r--drivers/media/dvb/ttpci/av7110_hw.c7
-rw-r--r--drivers/media/dvb/ttpci/av7110_hw.h3
-rw-r--r--drivers/media/dvb/ttpci/av7110_ir.c14
-rw-r--r--drivers/media/dvb/ttpci/av7110_v4l.c118
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c313
-rw-r--r--drivers/media/dvb/ttpci/budget-core.c4
-rw-r--r--drivers/media/dvb/ttpci/budget.c13
-rw-r--r--drivers/media/dvb/ttpci/budget.h3
-rw-r--r--drivers/media/dvb/ttpci/ttpci-eeprom.c2
-rw-r--r--drivers/media/dvb/ttusb-budget/Kconfig2
-rw-r--r--drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c4
-rw-r--r--drivers/media/dvb/ttusb-dec/Kconfig13
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusb_dec.c24
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusbdecfe.c53
-rw-r--r--drivers/media/radio/miropcm20-radio.c1
-rw-r--r--drivers/media/radio/radio-aimslab.c1
-rw-r--r--drivers/media/radio/radio-aztech.c1
-rw-r--r--drivers/media/radio/radio-cadet.c1
-rw-r--r--drivers/media/radio/radio-gemtek-pci.c6
-rw-r--r--drivers/media/radio/radio-gemtek.c1
-rw-r--r--drivers/media/radio/radio-maestro.c391
-rw-r--r--drivers/media/radio/radio-maxiradio.c3
-rw-r--r--drivers/media/radio/radio-rtrack2.c1
-rw-r--r--drivers/media/radio/radio-sf16fmi.c1
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c3
-rw-r--r--drivers/media/radio/radio-terratec.c1
-rw-r--r--drivers/media/radio/radio-trust.c1
-rw-r--r--drivers/media/radio/radio-typhoon.c1
-rw-r--r--drivers/media/radio/radio-zoltrix.c1
-rw-r--r--drivers/media/video/Kconfig13
-rw-r--r--drivers/media/video/Makefile10
-rw-r--r--drivers/media/video/adv7170.c13
-rw-r--r--drivers/media/video/adv7175.c13
-rw-r--r--drivers/media/video/arv.c1
-rw-r--r--drivers/media/video/bt819.c14
-rw-r--r--drivers/media/video/bt832.c78
-rw-r--r--drivers/media/video/bt856.c13
-rw-r--r--drivers/media/video/bttv-cards.c359
-rw-r--r--drivers/media/video/bttv-driver.c68
-rw-r--r--drivers/media/video/bttv-gpio.c21
-rw-r--r--drivers/media/video/bttv-i2c.c17
-rw-r--r--drivers/media/video/bttv-input.c (renamed from drivers/media/video/ir-kbd-gpio.c)262
-rw-r--r--drivers/media/video/bttv-vbi.c57
-rw-r--r--drivers/media/video/bttv.h38
-rw-r--r--drivers/media/video/bttvp.h13
-rw-r--r--drivers/media/video/bw-qcam.c1
-rw-r--r--drivers/media/video/c-qcam.c1
-rw-r--r--drivers/media/video/compat_ioctl32.c879
-rw-r--r--drivers/media/video/cpia.c1
-rw-r--r--drivers/media/video/cpia_pp.c33
-rw-r--r--drivers/media/video/cpia_usb.c4
-rw-r--r--drivers/media/video/cs53l32a.c101
-rw-r--r--drivers/media/video/cx25840/cx25840-audio.c91
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c396
-rw-r--r--drivers/media/video/cx25840/cx25840-firmware.c36
-rw-r--r--drivers/media/video/cx25840/cx25840-vbi.c4
-rw-r--r--drivers/media/video/cx25840/cx25840.h80
-rw-r--r--drivers/media/video/cx88/Kconfig25
-rw-r--r--drivers/media/video/cx88/Makefile4
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c848
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c24
-rw-r--r--drivers/media/video/cx88/cx88-cards.c232
-rw-r--r--drivers/media/video/cx88/cx88-core.c70
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c218
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c15
-rw-r--r--drivers/media/video/cx88/cx88-input.c143
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c5
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c10
-rw-r--r--drivers/media/video/cx88/cx88-video.c100
-rw-r--r--drivers/media/video/cx88/cx88-vp3054-i2c.c173
-rw-r--r--drivers/media/video/cx88/cx88-vp3054-i2c.h35
-rw-r--r--drivers/media/video/cx88/cx88.h17
-rw-r--r--drivers/media/video/dpc7146.c3
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c8
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c61
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c4
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c170
-rw-r--r--drivers/media/video/em28xx/em28xx.h6
-rw-r--r--drivers/media/video/hexium_gemini.c3
-rw-r--r--drivers/media/video/hexium_orion.c3
-rw-r--r--drivers/media/video/indycam.c13
-rw-r--r--drivers/media/video/ir-kbd-i2c.c21
-rw-r--r--drivers/media/video/meye.c1
-rw-r--r--drivers/media/video/msp3400-driver.c1274
-rw-r--r--drivers/media/video/msp3400-kthreads.c1010
-rw-r--r--drivers/media/video/msp3400.c2229
-rw-r--r--drivers/media/video/msp3400.h102
-rw-r--r--drivers/media/video/mt20xx.c10
-rw-r--r--drivers/media/video/mxb.c4
-rw-r--r--drivers/media/video/ovcamchip/ov6x20.c3
-rw-r--r--drivers/media/video/ovcamchip/ov6x30.c3
-rw-r--r--drivers/media/video/ovcamchip/ov76be.c3
-rw-r--r--drivers/media/video/ovcamchip/ov7x10.c3
-rw-r--r--drivers/media/video/ovcamchip/ov7x20.c3
-rw-r--r--drivers/media/video/ovcamchip/ovcamchip_core.c9
-rw-r--r--drivers/media/video/pms.c1
-rw-r--r--drivers/media/video/saa5246a.c16
-rw-r--r--drivers/media/video/saa5249.c19
-rw-r--r--drivers/media/video/saa6588.c17
-rw-r--r--drivers/media/video/saa7110.c13
-rw-r--r--drivers/media/video/saa7111.c13
-rw-r--r--drivers/media/video/saa7114.c13
-rw-r--r--drivers/media/video/saa7115.c331
-rw-r--r--drivers/media/video/saa711x.c15
-rw-r--r--drivers/media/video/saa7127.c93
-rw-r--r--drivers/media/video/saa7134/Kconfig2
-rw-r--r--drivers/media/video/saa7134/saa6752hs.c16
-rw-r--r--drivers/media/video/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c85
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c141
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c3
-rw-r--r--drivers/media/video/saa7134/saa7134-i2c.c7
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c39
-rw-r--r--drivers/media/video/saa7134/saa7134-oss.c40
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c10
-rw-r--r--drivers/media/video/saa7134/saa7134.h10
-rw-r--r--drivers/media/video/saa7146.h1
-rw-r--r--drivers/media/video/saa7185.c13
-rw-r--r--drivers/media/video/saa7191.c13
-rw-r--r--drivers/media/video/stradis.c1137
-rw-r--r--drivers/media/video/tda7432.c50
-rw-r--r--drivers/media/video/tda8290.c6
-rw-r--r--drivers/media/video/tda9840.c8
-rw-r--r--drivers/media/video/tda9875.c14
-rw-r--r--drivers/media/video/tda9887.c234
-rw-r--r--drivers/media/video/tea5767.c5
-rw-r--r--drivers/media/video/tea6415c.c8
-rw-r--r--drivers/media/video/tea6420.c11
-rw-r--r--drivers/media/video/tuner-3036.c9
-rw-r--r--drivers/media/video/tuner-core.c146
-rw-r--r--drivers/media/video/tuner-simple.c889
-rw-r--r--drivers/media/video/tvaudio.c185
-rw-r--r--drivers/media/video/tveeprom.c56
-rw-r--r--drivers/media/video/tvmixer.c26
-rw-r--r--drivers/media/video/tvp5150.c545
-rw-r--r--drivers/media/video/v4l1-compat.c35
-rw-r--r--drivers/media/video/v4l2-common.c219
-rw-r--r--drivers/media/video/video-buf.c9
-rw-r--r--drivers/media/video/videocodec.c11
-rw-r--r--drivers/media/video/videodev.c15
-rw-r--r--drivers/media/video/vino.c4
-rw-r--r--drivers/media/video/vpx3220.c14
-rw-r--r--drivers/media/video/w9966.c1
-rw-r--r--drivers/media/video/wm8775.c98
-rw-r--r--drivers/media/video/zoran_card.c3
-rw-r--r--drivers/media/video/zoran_driver.c18
-rw-r--r--drivers/media/video/zr36016.c3
-rw-r--r--drivers/media/video/zr36050.c3
-rw-r--r--drivers/media/video/zr36060.c3
-rw-r--r--drivers/media/video/zr36120.c1
-rw-r--r--drivers/message/fusion/mptctl.c2
-rw-r--r--drivers/message/i2o/Kconfig12
-rw-r--r--drivers/message/i2o/README.ioctl2
-rw-r--r--drivers/message/i2o/bus-osm.c23
-rw-r--r--drivers/message/i2o/config-osm.c2
-rw-r--r--drivers/message/i2o/core.h20
-rw-r--r--drivers/message/i2o/device.c339
-rw-r--r--drivers/message/i2o/driver.c12
-rw-r--r--drivers/message/i2o/exec-osm.c114
-rw-r--r--drivers/message/i2o/i2o_block.c208
-rw-r--r--drivers/message/i2o/i2o_config.c196
-rw-r--r--drivers/message/i2o/i2o_lan.h38
-rw-r--r--drivers/message/i2o/i2o_proc.c2
-rw-r--r--drivers/message/i2o/i2o_scsi.c89
-rw-r--r--drivers/message/i2o/iop.c356
-rw-r--r--drivers/message/i2o/pci.c7
-rw-r--r--drivers/mfd/ucb1x00-core.c32
-rw-r--r--drivers/mfd/ucb1x00-ts.c1
-rw-r--r--drivers/mmc/mmc.c11
-rw-r--r--drivers/mmc/mmc_block.c223
-rw-r--r--drivers/mmc/mmci.c30
-rw-r--r--drivers/mmc/mmci.h4
-rw-r--r--drivers/mmc/wbsd.c552
-rw-r--r--drivers/mtd/maps/Kconfig8
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/epxa10db-flash.c179
-rw-r--r--drivers/mtd/mtd_blkdevs.c25
-rw-r--r--drivers/mtd/nand/au1550nd.c2
-rw-r--r--drivers/mtd/nand/rtc_from4.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c4
-rw-r--r--drivers/mtd/nand/spia.c2
-rw-r--r--drivers/mtd/onenand/generic.c1
-rw-r--r--drivers/mtd/rfd_ftl.c1
-rw-r--r--drivers/net/3c503.c16
-rw-r--r--drivers/net/3c527.h50
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Kconfig23
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/ac3200.c16
-rw-r--r--drivers/net/arm/Kconfig13
-rw-r--r--drivers/net/arm/Makefile1
-rw-r--r--drivers/net/arm/am79c961a.c12
-rw-r--r--drivers/net/arm/ether00.c1017
-rw-r--r--drivers/net/arm/ether3.c1
-rw-r--r--drivers/net/arm/etherh.c1
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bonding.h12
-rw-r--r--drivers/net/cs89x0.c140
-rw-r--r--drivers/net/cs89x0.h19
-rw-r--r--drivers/net/e100.c32
-rw-r--r--drivers/net/e1000/e1000_param.c10
-rw-r--r--drivers/net/e2100.c14
-rw-r--r--drivers/net/es3210.c14
-rw-r--r--drivers/net/forcedeth.c164
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/gianfar.h4
-rw-r--r--drivers/net/gianfar_mii.c5
-rw-r--r--drivers/net/gianfar_sysfs.c2
-rw-r--r--drivers/net/hamradio/6pack.c7
-rw-r--r--drivers/net/hamradio/mkiss.c8
-rw-r--r--drivers/net/hp-plus.c12
-rw-r--r--drivers/net/hp.c12
-rw-r--r--drivers/net/hp100.c2
-rw-r--r--drivers/net/hplance.c2
-rw-r--r--drivers/net/ibm_emac/ibm_emac.h3
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c2
-rw-r--r--drivers/net/ifb.c294
-rw-r--r--drivers/net/irda/Kconfig3
-rw-r--r--drivers/net/irda/Makefile2
-rw-r--r--drivers/net/irda/irport.c15
-rw-r--r--drivers/net/irda/irtty-sir.c18
-rw-r--r--drivers/net/irda/sir-dev.h2
-rw-r--r--drivers/net/irda/sir_core.c56
-rw-r--r--drivers/net/irda/sir_dev.c10
-rw-r--r--drivers/net/irda/sir_dongle.c2
-rw-r--r--drivers/net/irda/sir_kthread.c11
-rw-r--r--drivers/net/irda/vlsi_ir.h4
-rw-r--r--drivers/net/iseries_veth.c4
-rw-r--r--drivers/net/lance.c22
-rw-r--r--drivers/net/lne390.c14
-rw-r--r--drivers/net/mac8390.c31
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/ne.c18
-rw-r--r--drivers/net/ne2.c16
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c2
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/plip.c2
-rw-r--r--drivers/net/ppp_async.c9
-rw-r--r--drivers/net/ppp_synctty.c9
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sk98lin/skdim.c2
-rw-r--r--drivers/net/sk98lin/skge.c131
-rw-r--r--drivers/net/sk98lin/skgepnmi.c8
-rw-r--r--drivers/net/slip.c11
-rw-r--r--drivers/net/smc-ultra.c24
-rw-r--r--drivers/net/smc91x.c5
-rw-r--r--drivers/net/smc91x.h18
-rw-r--r--drivers/net/sun3lance.c2
-rw-r--r--drivers/net/tulip/tulip_core.c2
-rw-r--r--drivers/net/tulip/uli526x.c6
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/sdla.c6
-rw-r--r--drivers/net/wan/x25_asy.c7
-rw-r--r--drivers/net/wd.c14
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/atmel.c227
-rw-r--r--drivers/net/wireless/ipw2100.c5
-rw-r--r--drivers/net/wireless/strip.c10
-rw-r--r--drivers/oprofile/buffer_sync.c30
-rw-r--r--drivers/oprofile/cpu_buffer.c3
-rw-r--r--drivers/oprofile/event_buffer.c1
-rw-r--r--drivers/parisc/dino.c30
-rw-r--r--drivers/parisc/eisa.c4
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parisc/lasi.c2
-rw-r--r--drivers/parisc/lba_pci.c115
-rw-r--r--drivers/parisc/led.c21
-rw-r--r--drivers/parisc/pdc_stable.c46
-rw-r--r--drivers/parisc/power.c12
-rw-r--r--drivers/parport/Kconfig5
-rw-r--r--drivers/parport/daisy.c51
-rw-r--r--drivers/parport/ieee1284_ops.c62
-rw-r--r--drivers/parport/parport_pc.c36
-rw-r--r--drivers/parport/parport_serial.c2
-rw-r--r--drivers/parport/probe.c199
-rw-r--r--drivers/parport/share.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c6
-rw-r--r--drivers/pci/hotplug/cpqphp.h8
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c127
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c28
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c138
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c92
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c22
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c52
-rw-r--r--drivers/pci/hotplug/pciehprm_acpi.c13
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c27
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c47
-rw-r--r--drivers/pci/hotplug/shpchp.h4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c16
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c37
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c138
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c19
-rw-r--r--drivers/pci/pci.c7
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/pcie/portdrv_core.c4
-rw-r--r--drivers/pci/probe.c49
-rw-r--r--drivers/pci/proc.c7
-rw-r--r--drivers/pci/quirks.c26
-rw-r--r--drivers/pci/remove.c3
-rw-r--r--drivers/pcmcia/ds.c37
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c15
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c43
-rw-r--r--drivers/pcmcia/socket_sysfs.c25
-rw-r--r--drivers/pcmcia/vrc4171_card.c4
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c6
-rw-r--r--drivers/pcmcia/vrc4173_cardu.h2
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c45
-rw-r--r--drivers/rapidio/rio-scan.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c1
-rw-r--r--drivers/rapidio/rio.c1
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/Kconfig8
-rw-r--r--drivers/s390/block/dasd.c58
-rw-r--r--drivers/s390/block/dasd_diag.c11
-rw-r--r--drivers/s390/block/dasd_diag.h31
-rw-r--r--drivers/s390/block/dasd_eckd.c9
-rw-r--r--drivers/s390/block/dasd_fba.c6
-rw-r--r--drivers/s390/block/dasd_int.h4
-rw-r--r--drivers/s390/block/dasd_ioctl.c45
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c22
-rw-r--r--drivers/s390/char/con3215.c25
-rw-r--r--drivers/s390/char/fs3270.c20
-rw-r--r--drivers/s390/char/sclp_cpi.c2
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/sclp_tty.c21
-rw-r--r--drivers/s390/char/sclp_vt220.c12
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/tape_char.c20
-rw-r--r--drivers/s390/char/vmwatchdog.c2
-rw-r--r--drivers/s390/cio/blacklist.c234
-rw-r--r--drivers/s390/cio/blacklist.h2
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/chsc.c473
-rw-r--r--drivers/s390/cio/chsc.h13
-rw-r--r--drivers/s390/cio/cio.c168
-rw-r--r--drivers/s390/cio/cio.h11
-rw-r--r--drivers/s390/cio/cmf.c8
-rw-r--r--drivers/s390/cio/css.c297
-rw-r--r--drivers/s390/cio/css.h43
-rw-r--r--drivers/s390/cio/device.c47
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c29
-rw-r--r--drivers/s390/cio/device_id.c26
-rw-r--r--drivers/s390/cio/device_ops.c4
-rw-r--r--drivers/s390/cio/device_pgid.c56
-rw-r--r--drivers/s390/cio/device_status.c14
-rw-r--r--drivers/s390/cio/ioasm.h86
-rw-r--r--drivers/s390/cio/qdio.c713
-rw-r--r--drivers/s390/cio/qdio.h144
-rw-r--r--drivers/s390/cio/schid.h26
-rw-r--r--drivers/s390/crypto/z90common.h9
-rw-r--r--drivers/s390/crypto/z90crypt.h13
-rw-r--r--drivers/s390/crypto/z90hardware.c309
-rw-r--r--drivers/s390/crypto/z90main.c112
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/claw.c6
-rw-r--r--drivers/s390/net/ctctty.c28
-rw-r--r--drivers/s390/net/cu3088.c3
-rw-r--r--drivers/s390/net/iucv.c10
-rw-r--r--drivers/s390/net/qeth_main.c21
-rw-r--r--drivers/s390/s390_rdev.c53
-rw-r--r--drivers/s390/s390mach.c66
-rw-r--r--drivers/s390/sysinfo.c2
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile3
-rw-r--r--drivers/scsi/NCR53C9x.c5
-rw-r--r--drivers/scsi/aacraid/linit.c1
-rw-r--r--drivers/scsi/ahci.c1
-rw-r--r--drivers/scsi/arm/acornscsi.c1
-rw-r--r--drivers/scsi/arm/arxescsi.c1
-rw-r--r--drivers/scsi/arm/cumana_1.c1
-rw-r--r--drivers/scsi/arm/cumana_2.c1
-rw-r--r--drivers/scsi/arm/eesox.c1
-rw-r--r--drivers/scsi/arm/powertec.c1
-rw-r--r--drivers/scsi/ata_piix.c5
-rw-r--r--drivers/scsi/blz1230.c4
-rw-r--r--drivers/scsi/blz2060.c4
-rw-r--r--drivers/scsi/ch.c1
-rw-r--r--drivers/scsi/cyberstorm.c4
-rw-r--r--drivers/scsi/cyberstormII.c4
-rw-r--r--drivers/scsi/fastlane.c4
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/ide-scsi.c4
-rw-r--r--drivers/scsi/libata-core.c145
-rw-r--r--drivers/scsi/libata-scsi.c48
-rw-r--r--drivers/scsi/libata.h4
-rw-r--r--drivers/scsi/mac53c94.c22
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h1
-rw-r--r--drivers/scsi/mesh.c3
-rw-r--r--drivers/scsi/oktagon_esp.c2
-rw-r--r--drivers/scsi/sata_mv.c1
-rw-r--r--drivers/scsi/sata_nv.c31
-rw-r--r--drivers/scsi/sata_promise.c1
-rw-r--r--drivers/scsi/sata_sil.c1
-rw-r--r--drivers/scsi/sata_sil24.c1
-rw-r--r--drivers/scsi/sata_sis.c1
-rw-r--r--drivers/scsi/sata_svw.c1
-rw-r--r--drivers/scsi/sata_sx4.c1
-rw-r--r--drivers/scsi/sata_uli.c1
-rw-r--r--drivers/scsi/sata_via.c1
-rw-r--r--drivers/scsi/sata_vsc.c1
-rw-r--r--drivers/scsi/scsi.c109
-rw-r--r--drivers/scsi/scsi_lib.c86
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_sysfs.c31
-rw-r--r--drivers/scsi/sd.c106
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/wd33c93.c4
-rw-r--r--drivers/serial/21285.c9
-rw-r--r--drivers/serial/68328serial.c18
-rw-r--r--drivers/serial/68360serial.c54
-rw-r--r--drivers/serial/8250.c112
-rw-r--r--drivers/serial/8250.h6
-rw-r--r--drivers/serial/8250_pci.c124
-rw-r--r--drivers/serial/Kconfig69
-rw-r--r--drivers/serial/Makefile2
-rw-r--r--drivers/serial/amba-pl010.c13
-rw-r--r--drivers/serial/amba-pl011.c22
-rw-r--r--drivers/serial/at91_serial.c894
-rw-r--r--drivers/serial/au1x00_uart.c31
-rw-r--r--drivers/serial/clps711x.c2
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/serial/crisv10.c11
-rw-r--r--drivers/serial/dz.c2
-rw-r--r--drivers/serial/icom.c57
-rw-r--r--drivers/serial/imx.c3
-rw-r--r--drivers/serial/ioc4_serial.c10
-rw-r--r--drivers/serial/ip22zilog.c34
-rw-r--r--drivers/serial/m32r_sio.c31
-rw-r--r--drivers/serial/mcfserial.c25
-rw-r--r--drivers/serial/mpc52xx_uart.c74
-rw-r--r--drivers/serial/mpsc.c6
-rw-r--r--drivers/serial/mux.c9
-rw-r--r--drivers/serial/pmac_zilog.c77
-rw-r--r--drivers/serial/pxa.c8
-rw-r--r--drivers/serial/s3c2410.c17
-rw-r--r--drivers/serial/sa1100.c2
-rw-r--r--drivers/serial/serial_core.c18
-rw-r--r--drivers/serial/serial_cs.c2
-rw-r--r--drivers/serial/serial_lh7a40x.c9
-rw-r--r--drivers/serial/serial_txx9.c24
-rw-r--r--drivers/serial/sh-sci.c81
-rw-r--r--drivers/serial/sn_console.c6
-rw-r--r--drivers/serial/sunsab.c38
-rw-r--r--drivers/serial/sunsu.c32
-rw-r--r--drivers/serial/sunzilog.c34
-rw-r--r--drivers/serial/uart00.c782
-rw-r--r--drivers/serial/vr41xx_siu.c7
-rw-r--r--drivers/usb/atm/speedtch.c4
-rw-r--r--drivers/usb/class/cdc-acm.c11
-rw-r--r--drivers/usb/core/inode.c34
-rw-r--r--drivers/usb/gadget/file_storage.c4
-rw-r--r--drivers/usb/gadget/inode.c4
-rw-r--r--drivers/usb/gadget/serial.c19
-rw-r--r--drivers/usb/host/Kconfig10
-rw-r--r--drivers/usb/host/ohci-au1xxx.c1
-rw-r--r--drivers/usb/host/ohci-lh7a404.c1
-rw-r--r--drivers/usb/host/ohci-omap.c2
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c1
-rw-r--r--drivers/usb/host/ohci-s3c2410.c4
-rw-r--r--drivers/usb/image/microtek.c2
-rw-r--r--drivers/usb/media/dsbr100.c1
-rw-r--r--drivers/usb/media/ov511.c1
-rw-r--r--drivers/usb/media/pwc/pwc-if.c1
-rw-r--r--drivers/usb/media/se401.c1
-rw-r--r--drivers/usb/media/stv680.c1
-rw-r--r--drivers/usb/media/usbvideo.c1
-rw-r--r--drivers/usb/media/vicam.c1
-rw-r--r--drivers/usb/media/w9968cf.c5
-rw-r--r--drivers/usb/serial/Kconfig2
-rw-r--r--drivers/usb/serial/cyberjack.c11
-rw-r--r--drivers/usb/serial/cypress_m8.c6
-rw-r--r--drivers/usb/serial/digi_acceleport.c28
-rw-r--r--drivers/usb/serial/empeg.c16
-rw-r--r--drivers/usb/serial/ftdi_sio.c15
-rw-r--r--drivers/usb/serial/garmin_gps.c13
-rw-r--r--drivers/usb/serial/generic.c11
-rw-r--r--drivers/usb/serial/io_edgeport.c20
-rw-r--r--drivers/usb/serial/io_ti.c20
-rw-r--r--drivers/usb/serial/ipaq.c12
-rw-r--r--drivers/usb/serial/ipw.c11
-rw-r--r--drivers/usb/serial/kl5kusb105.c13
-rw-r--r--drivers/usb/serial/kobil_sct.c11
-rw-r--r--drivers/usb/serial/option.c9
-rw-r--r--drivers/usb/serial/pl2303.c8
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c20
-rw-r--r--drivers/usb/serial/usb-serial.c2
-rw-r--r--drivers/usb/serial/visor.c11
-rw-r--r--drivers/usb/serial/whiteheat.c11
-rw-r--r--drivers/video/Kconfig12
-rw-r--r--drivers/video/amba-clcd.c16
-rw-r--r--drivers/video/amifb.c36
-rw-r--r--drivers/video/arcfb.c8
-rw-r--r--drivers/video/asiliantfb.c2
-rw-r--r--drivers/video/aty/Makefile1
-rw-r--r--drivers/video/aty/atyfb.h2
-rw-r--r--drivers/video/aty/atyfb_base.c247
-rw-r--r--drivers/video/aty/mach64_ct.c17
-rw-r--r--drivers/video/aty/radeon_monitor.c2
-rw-r--r--drivers/video/aty/xlinit.c359
-rw-r--r--drivers/video/backlight/corgi_bl.c1
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/bitblit.c14
-rw-r--r--drivers/video/console/fbcon.c83
-rw-r--r--drivers/video/console/fbcon.h11
-rw-r--r--drivers/video/console/fbcon_ccw.c17
-rw-r--r--drivers/video/console/fbcon_cw.c17
-rw-r--r--drivers/video/console/fbcon_rotate.c9
-rw-r--r--drivers/video/console/fbcon_rotate.h2
-rw-r--r--drivers/video/console/fbcon_ud.c19
-rw-r--r--drivers/video/console/softcursor.c2
-rw-r--r--drivers/video/console/tileblit.c17
-rw-r--r--drivers/video/console/vgacon.c36
-rw-r--r--drivers/video/controlfb.c114
-rw-r--r--drivers/video/cyber2000fb.c1
-rw-r--r--drivers/video/cyblafb.c1524
-rw-r--r--drivers/video/fbcvt.c3
-rw-r--r--drivers/video/fbmem.c37
-rw-r--r--drivers/video/fbmon.c129
-rw-r--r--drivers/video/fbsysfs.c81
-rw-r--r--drivers/video/hgafb.c107
-rw-r--r--drivers/video/i810/i810-i2c.c3
-rw-r--r--drivers/video/i810/i810_accel.c27
-rw-r--r--drivers/video/i810/i810_gtf.c1
-rw-r--r--drivers/video/i810/i810_main.c137
-rw-r--r--drivers/video/i810/i810_main.h56
-rw-r--r--drivers/video/imsttfb.c56
-rw-r--r--drivers/video/imxfb.c6
-rw-r--r--drivers/video/kyro/STG4000InitDevice.c1
-rw-r--r--drivers/video/kyro/STG4000Interface.h3
-rw-r--r--drivers/video/kyro/STG4000OverlayDevice.c1
-rw-r--r--drivers/video/kyro/fbdev.c32
-rw-r--r--drivers/video/logo/Makefile2
-rw-r--r--drivers/video/macfb.c15
-rw-r--r--drivers/video/matrox/matroxfb_base.h4
-rw-r--r--drivers/video/matrox/matroxfb_g450.c2
-rw-r--r--drivers/video/matrox/matroxfb_maven.c11
-rw-r--r--drivers/video/matrox/matroxfb_misc.c8
-rw-r--r--drivers/video/neofb.c36
-rw-r--r--drivers/video/nvidia/nv_hw.c82
-rw-r--r--drivers/video/nvidia/nv_i2c.c12
-rw-r--r--drivers/video/nvidia/nv_proto.h2
-rw-r--r--drivers/video/nvidia/nv_setup.c37
-rw-r--r--drivers/video/nvidia/nvidia.c117
-rw-r--r--drivers/video/offb.c122
-rw-r--r--drivers/video/platinumfb.c98
-rw-r--r--drivers/video/platinumfb.h4
-rw-r--r--drivers/video/pm2fb.c21
-rw-r--r--drivers/video/riva/fbdev.c68
-rw-r--r--drivers/video/riva/rivafb-i2c.c8
-rw-r--r--drivers/video/s3c2410fb.c15
-rw-r--r--drivers/video/sa1100fb.c15
-rw-r--r--drivers/video/savage/savagefb-i2c.c44
-rw-r--r--drivers/video/savage/savagefb_accel.c8
-rw-r--r--drivers/video/savage/savagefb_driver.c53
-rw-r--r--drivers/video/skeletonfb.c482
-rw-r--r--drivers/video/sstfb.c60
-rw-r--r--drivers/video/stifb.c65
-rw-r--r--drivers/video/tdfxfb.c59
-rw-r--r--drivers/video/valkyriefb.c12
-rw-r--r--drivers/video/vesafb.c47
-rw-r--r--drivers/video/vga16fb.c8
-rw-r--r--drivers/video/vgastate.c5
-rw-r--r--drivers/w1/Kconfig2
-rw-r--r--drivers/zorro/proc.c2
-rw-r--r--fs/9p/9p.c314
-rw-r--r--fs/9p/9p.h77
-rw-r--r--fs/9p/Makefile10
-rw-r--r--fs/9p/conv.c884
-rw-r--r--fs/9p/conv.h35
-rw-r--r--fs/9p/debug.h23
-rw-r--r--fs/9p/error.c10
-rw-r--r--fs/9p/error.h2
-rw-r--r--fs/9p/fid.c5
-rw-r--r--fs/9p/mux.c1145
-rw-r--r--fs/9p/mux.h41
-rw-r--r--fs/9p/trans_fd.c53
-rw-r--r--fs/9p/trans_sock.c160
-rw-r--r--fs/9p/transport.h4
-rw-r--r--fs/9p/v9fs.c59
-rw-r--r--fs/9p/v9fs.h17
-rw-r--r--fs/9p/v9fs_vfs.h5
-rw-r--r--fs/9p/vfs_dentry.c15
-rw-r--r--fs/9p/vfs_dir.c47
-rw-r--r--fs/9p/vfs_file.c28
-rw-r--r--fs/9p/vfs_inode.c619
-rw-r--r--fs/9p/vfs_super.c14
-rw-r--r--fs/Kconfig68
-rw-r--r--fs/Kconfig.binfmt2
-rw-r--r--fs/Makefile6
-rw-r--r--fs/affs/inode.c4
-rw-r--r--fs/afs/cmservice.c2
-rw-r--r--fs/afs/dir.c2
-rw-r--r--fs/afs/volume.h4
-rw-r--r--fs/aio.c3
-rw-r--r--fs/attr.c26
-rw-r--r--fs/autofs/root.c5
-rw-r--r--fs/autofs4/autofs_i.h2
-rw-r--r--fs/autofs4/expire.c12
-rw-r--r--fs/autofs4/inode.c4
-rw-r--r--fs/autofs4/root.c26
-rw-r--r--fs/binfmt_aout.c2
-rw-r--r--fs/binfmt_elf.c26
-rw-r--r--fs/binfmt_elf_fdpic.c2
-rw-r--r--fs/binfmt_flat.c19
-rw-r--r--fs/binfmt_misc.c12
-rw-r--r--fs/bio.c27
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/buffer.c83
-rw-r--r--fs/cifs/cifs_uniupr.h2
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/inode.c11
-rw-r--r--fs/coda/cache.c2
-rw-r--r--fs/coda/dir.c4
-rw-r--r--fs/coda/file.c8
-rw-r--r--fs/compat.c22
-rw-r--r--fs/compat_ioctl.c395
-rw-r--r--fs/configfs/Makefile7
-rw-r--r--fs/configfs/configfs_internal.h142
-rw-r--r--fs/configfs/dir.c1102
-rw-r--r--fs/configfs/file.c360
-rw-r--r--fs/configfs/inode.c162
-rw-r--r--fs/configfs/item.c227
-rw-r--r--fs/configfs/mount.c159
-rw-r--r--fs/configfs/symlink.c281
-rw-r--r--fs/dcache.c41
-rw-r--r--fs/dcookies.c1
-rw-r--r--fs/debugfs/inode.c8
-rw-r--r--fs/devfs/base.c22
-rw-r--r--fs/devpts/inode.c8
-rw-r--r--fs/direct-io.c30
-rw-r--r--fs/dquot.c17
-rw-r--r--fs/drop_caches.c68
-rw-r--r--fs/exec.c16
-rw-r--r--fs/exportfs/expfs.c12
-rw-r--r--fs/ext2/acl.c11
-rw-r--r--fs/ext2/balloc.c1
-rw-r--r--fs/ext2/bitmap.c7
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/ext2.h2
-rw-r--r--fs/ext2/ioctl.c1
-rw-r--r--fs/ext2/super.c4
-rw-r--r--fs/ext2/xattr.c6
-rw-r--r--fs/ext2/xattr_trusted.c5
-rw-r--r--fs/ext2/xattr_user.c14
-rw-r--r--fs/ext3/acl.c11
-rw-r--r--fs/ext3/balloc.c3
-rw-r--r--fs/ext3/bitmap.c8
-rw-r--r--fs/ext3/bitmap.h8
-rw-r--r--fs/ext3/ialloc.c7
-rw-r--r--fs/ext3/ioctl.c1
-rw-r--r--fs/ext3/namei.c2
-rw-r--r--fs/ext3/resize.c32
-rw-r--r--fs/ext3/super.c60
-rw-r--r--fs/ext3/xattr.c6
-rw-r--r--fs/ext3/xattr_trusted.c5
-rw-r--r--fs/ext3/xattr_user.c15
-rw-r--r--fs/fat/cache.c14
-rw-r--r--fs/fat/dir.c28
-rw-r--r--fs/fat/fatent.c10
-rw-r--r--fs/fat/file.c38
-rw-r--r--fs/fat/inode.c119
-rw-r--r--fs/fat/misc.c8
-rw-r--r--fs/fcntl.c7
-rw-r--r--fs/fifo.c6
-rw-r--r--fs/file_table.c9
-rw-r--r--fs/freevxfs/vxfs_immed.c4
-rw-r--r--fs/fuse/dev.c72
-rw-r--r--fs/fuse/dir.c278
-rw-r--r--fs/fuse/file.c53
-rw-r--r--fs/fuse/fuse_i.h12
-rw-r--r--fs/fuse/inode.c14
-rw-r--r--fs/hfs/inode.c4
-rw-r--r--fs/hfsplus/bitmap.c8
-rw-r--r--fs/hfsplus/hfsplus_fs.h3
-rw-r--r--fs/hfsplus/inode.c14
-rw-r--r--fs/hfsplus/ioctl.c1
-rw-r--r--fs/hfsplus/super.c21
-rw-r--r--fs/hpfs/dir.c6
-rw-r--r--fs/hppfs/hppfs_kern.c6
-rw-r--r--fs/hugetlbfs/inode.c8
-rw-r--r--fs/inode.c57
-rw-r--r--fs/ioctl.c1
-rw-r--r--fs/ioprio.c1
-rw-r--r--fs/jbd/checkpoint.c418
-rw-r--r--fs/jffs/inode-v23.c6
-rw-r--r--fs/jffs2/fs.c1
-rw-r--r--fs/jfs/jfs_dmap.c3
-rw-r--r--fs/jfs/jfs_imap.c6
-rw-r--r--fs/jfs/jfs_incore.h4
-rw-r--r--fs/jfs/jfs_txnmgr.c6
-rw-r--r--fs/jfs/jfs_umount.c6
-rw-r--r--fs/jfs/resize.c3
-rw-r--r--fs/jfs/super.c3
-rw-r--r--fs/jfs/xattr.c67
-rw-r--r--fs/libfs.c20
-rw-r--r--fs/lockd/clntproc.c41
-rw-r--r--fs/lockd/host.c4
-rw-r--r--fs/lockd/mon.c1
-rw-r--r--fs/lockd/svc.c4
-rw-r--r--fs/lockd/svc4proc.c15
-rw-r--r--fs/lockd/svclock.c42
-rw-r--r--fs/lockd/svcproc.c14
-rw-r--r--fs/lockd/xdr4.c4
-rw-r--r--fs/locks.c27
-rw-r--r--fs/mpage.c6
-rw-r--r--fs/namei.c85
-rw-r--r--fs/namespace.c31
-rw-r--r--fs/ncpfs/dir.c2
-rw-r--r--fs/ncpfs/file.c2
-rw-r--r--fs/ncpfs/ioctl.c1
-rw-r--r--fs/ncpfs/ncplib_kernel.h4
-rw-r--r--fs/nfs/Makefile1
-rw-r--r--fs/nfs/callback.c3
-rw-r--r--fs/nfs/callback.h1
-rw-r--r--fs/nfs/callback_proc.c4
-rw-r--r--fs/nfs/delegation.c47
-rw-r--r--fs/nfs/delegation.h2
-rw-r--r--fs/nfs/dir.c14
-rw-r--r--fs/nfs/direct.c57
-rw-r--r--fs/nfs/idmap.c9
-rw-r--r--fs/nfs/inode.c203
-rw-r--r--fs/nfs/mount_clnt.c1
-rw-r--r--fs/nfs/nfs2xdr.c21
-rw-r--r--fs/nfs/nfs3proc.c70
-rw-r--r--fs/nfs/nfs3xdr.c2
-rw-r--r--fs/nfs/nfs4_fs.h20
-rw-r--r--fs/nfs/nfs4proc.c1480
-rw-r--r--fs/nfs/nfs4renewd.c14
-rw-r--r--fs/nfs/nfs4state.c183
-rw-r--r--fs/nfs/nfs4xdr.c187
-rw-r--r--fs/nfs/nfsroot.c8
-rw-r--r--fs/nfs/proc.c31
-rw-r--r--fs/nfs/read.c16
-rw-r--r--fs/nfs/sysctl.c84
-rw-r--r--fs/nfs/unlink.c30
-rw-r--r--fs/nfs/write.c89
-rw-r--r--fs/nfsd/nfs3proc.c11
-rw-r--r--fs/nfsd/nfs3xdr.c47
-rw-r--r--fs/nfsd/nfs4callback.c11
-rw-r--r--fs/nfsd/nfs4recover.c20
-rw-r--r--fs/nfsd/nfsxdr.c48
-rw-r--r--fs/nfsd/vfs.c169
-rw-r--r--fs/ntfs/attrib.c4
-rw-r--r--fs/ntfs/dir.c8
-rw-r--r--fs/ntfs/file.c20
-rw-r--r--fs/ntfs/index.c6
-rw-r--r--fs/ntfs/inode.c28
-rw-r--r--fs/ntfs/namei.c6
-rw-r--r--fs/ntfs/quota.c6
-rw-r--r--fs/ntfs/super.c44
-rw-r--r--fs/ocfs2/Makefile33
-rw-r--r--fs/ocfs2/alloc.c2040
-rw-r--r--fs/ocfs2/alloc.h82
-rw-r--r--fs/ocfs2/aops.c643
-rw-r--r--fs/ocfs2/aops.h41
-rw-r--r--fs/ocfs2/buffer_head_io.c232
-rw-r--r--fs/ocfs2/buffer_head_io.h73
-rw-r--r--fs/ocfs2/cluster/Makefile4
-rw-r--r--fs/ocfs2/cluster/endian.h30
-rw-r--r--fs/ocfs2/cluster/heartbeat.c1797
-rw-r--r--fs/ocfs2/cluster/heartbeat.h82
-rw-r--r--fs/ocfs2/cluster/masklog.c166
-rw-r--r--fs/ocfs2/cluster/masklog.h274
-rw-r--r--fs/ocfs2/cluster/nodemanager.c791
-rw-r--r--fs/ocfs2/cluster/nodemanager.h64
-rw-r--r--fs/ocfs2/cluster/ocfs2_heartbeat.h37
-rw-r--r--fs/ocfs2/cluster/ocfs2_nodemanager.h39
-rw-r--r--fs/ocfs2/cluster/quorum.c315
-rw-r--r--fs/ocfs2/cluster/quorum.h36
-rw-r--r--fs/ocfs2/cluster/sys.c124
-rw-r--r--fs/ocfs2/cluster/sys.h33
-rw-r--r--fs/ocfs2/cluster/tcp.c1829
-rw-r--r--fs/ocfs2/cluster/tcp.h113
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h174
-rw-r--r--fs/ocfs2/cluster/ver.c42
-rw-r--r--fs/ocfs2/cluster/ver.h31
-rw-r--r--fs/ocfs2/dcache.c91
-rw-r--r--fs/ocfs2/dcache.h31
-rw-r--r--fs/ocfs2/dir.c618
-rw-r--r--fs/ocfs2/dir.h54
-rw-r--r--fs/ocfs2/dlm/Makefile8
-rw-r--r--fs/ocfs2/dlm/dlmapi.h214
-rw-r--r--fs/ocfs2/dlm/dlmast.c466
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h884
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c530
-rw-r--r--fs/ocfs2/dlm/dlmconvert.h35
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c246
-rw-r--r--fs/ocfs2/dlm/dlmdebug.h30
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c1469
-rw-r--r--fs/ocfs2/dlm/dlmdomain.h36
-rw-r--r--fs/ocfs2/dlm/dlmfs.c640
-rw-r--r--fs/ocfs2/dlm/dlmfsver.c42
-rw-r--r--fs/ocfs2/dlm/dlmfsver.h31
-rw-r--r--fs/ocfs2/dlm/dlmlock.c676
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2664
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2132
-rw-r--r--fs/ocfs2/dlm/dlmthread.c692
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c672
-rw-r--r--fs/ocfs2/dlm/dlmver.c42
-rw-r--r--fs/ocfs2/dlm/dlmver.h31
-rw-r--r--fs/ocfs2/dlm/userdlm.c658
-rw-r--r--fs/ocfs2/dlm/userdlm.h111
-rw-r--r--fs/ocfs2/dlmglue.c2904
-rw-r--r--fs/ocfs2/dlmglue.h111
-rw-r--r--fs/ocfs2/endian.h45
-rw-r--r--fs/ocfs2/export.c248
-rw-r--r--fs/ocfs2/export.h31
-rw-r--r--fs/ocfs2/extent_map.c994
-rw-r--r--fs/ocfs2/extent_map.h46
-rw-r--r--fs/ocfs2/file.c1238
-rw-r--r--fs/ocfs2/file.h57
-rw-r--r--fs/ocfs2/heartbeat.c378
-rw-r--r--fs/ocfs2/heartbeat.h67
-rw-r--r--fs/ocfs2/inode.c1140
-rw-r--r--fs/ocfs2/inode.h145
-rw-r--r--fs/ocfs2/journal.c1652
-rw-r--r--fs/ocfs2/journal.h457
-rw-r--r--fs/ocfs2/localalloc.c983
-rw-r--r--fs/ocfs2/localalloc.h56
-rw-r--r--fs/ocfs2/mmap.c98
-rw-r--r--fs/ocfs2/mmap.h6
-rw-r--r--fs/ocfs2/namei.c2264
-rw-r--r--fs/ocfs2/namei.h58
-rw-r--r--fs/ocfs2/ocfs1_fs_compat.h109
-rw-r--r--fs/ocfs2/ocfs2.h464
-rw-r--r--fs/ocfs2/ocfs2_fs.h638
-rw-r--r--fs/ocfs2/ocfs2_lockid.h73
-rw-r--r--fs/ocfs2/slot_map.c303
-rw-r--r--fs/ocfs2/slot_map.h66
-rw-r--r--fs/ocfs2/suballoc.c1651
-rw-r--r--fs/ocfs2/suballoc.h132
-rw-r--r--fs/ocfs2/super.c1733
-rw-r--r--fs/ocfs2/super.h44
-rw-r--r--fs/ocfs2/symlink.c180
-rw-r--r--fs/ocfs2/symlink.h42
-rw-r--r--fs/ocfs2/sysfile.c131
-rw-r--r--fs/ocfs2/sysfile.h33
-rw-r--r--fs/ocfs2/uptodate.c544
-rw-r--r--fs/ocfs2/uptodate.h44
-rw-r--r--fs/ocfs2/ver.c43
-rw-r--r--fs/ocfs2/ver.h31
-rw-r--r--fs/ocfs2/vote.c1202
-rw-r--r--fs/ocfs2/vote.h56
-rw-r--r--fs/open.c40
-rw-r--r--fs/partitions/Kconfig22
-rw-r--r--fs/partitions/ibm.c30
-rw-r--r--fs/pipe.c46
-rw-r--r--fs/pnode.c2
-rw-r--r--fs/proc/array.c8
-rw-r--r--fs/proc/base.c1
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/internal.h4
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/proc/proc_devtree.c24
-rw-r--r--fs/proc/proc_misc.c4
-rw-r--r--fs/proc/root.c3
-rw-r--r--fs/proc/task_mmu.c127
-rw-r--r--fs/proc/vmcore.c6
-rw-r--r--fs/quota.c7
-rw-r--r--fs/ramfs/Makefile4
-rw-r--r--fs/ramfs/file-mmu.c57
-rw-r--r--fs/ramfs/file-nommu.c292
-rw-r--r--fs/ramfs/inode.c22
-rw-r--r--fs/ramfs/internal.h15
-rw-r--r--fs/read_write.c4
-rw-r--r--fs/readdir.c4
-rw-r--r--fs/reiserfs/file.c12
-rw-r--r--fs/reiserfs/inode.c14
-rw-r--r--fs/reiserfs/ioctl.c5
-rw-r--r--fs/reiserfs/journal.c5
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/reiserfs/tail_conversion.c2
-rw-r--r--fs/reiserfs/xattr.c56
-rw-r--r--fs/reiserfs/xattr_acl.c7
-rw-r--r--fs/reiserfs/xattr_trusted.c1
-rw-r--r--fs/reiserfs/xattr_user.c30
-rw-r--r--fs/relayfs/buffers.c3
-rw-r--r--fs/relayfs/inode.c226
-rw-r--r--fs/relayfs/relay.c69
-rw-r--r--fs/relayfs/relay.h4
-rw-r--r--fs/romfs/inode.c6
-rw-r--r--fs/smbfs/cache.c4
-rw-r--r--fs/smbfs/file.c7
-rw-r--r--fs/smbfs/inode.c3
-rw-r--r--fs/smbfs/proc.c3
-rw-r--r--fs/super.c5
-rw-r--r--fs/sysfs/dir.c31
-rw-r--r--fs/sysfs/file.c17
-rw-r--r--fs/sysfs/inode.c9
-rw-r--r--fs/sysfs/symlink.c5
-rw-r--r--fs/sysv/dir.c4
-rw-r--r--fs/udf/balloc.c2
-rw-r--r--fs/udf/file.c1
-rw-r--r--fs/udf/inode.c5
-rw-r--r--fs/ufs/balloc.c1
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/ufs/super.c8
-rw-r--r--fs/ufs/util.h4
-rw-r--r--fs/xattr.c199
-rw-r--r--fs/xfs/Kbuild6
-rw-r--r--fs/xfs/linux-2.6/mutex.h10
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c1088
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h10
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c1373
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h696
-rw-r--r--fs/xfs/linux-2.6/xfs_cred.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c6
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c16
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c127
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.h5
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h6
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c76
-rw-r--r--fs/xfs/linux-2.6/xfs_stats.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_stats.h18
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c19
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h19
-rw-r--r--fs/xfs/quota/xfs_dquot.c4
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c4
-rw-r--r--fs/xfs/quota/xfs_qm.c28
-rw-r--r--fs/xfs/quota/xfs_qm.h2
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c11
-rw-r--r--fs/xfs/quota/xfs_quota_priv.h2
-rw-r--r--fs/xfs/support/debug.c60
-rw-r--r--fs/xfs/support/debug.h25
-rw-r--r--fs/xfs/support/uuid.c29
-rw-r--r--fs/xfs/xfs_acl.c1
-rw-r--r--fs/xfs/xfs_arch.h22
-rw-r--r--fs/xfs/xfs_attr.c32
-rw-r--r--fs/xfs/xfs_attr_leaf.c12
-rw-r--r--fs/xfs/xfs_attr_leaf.h79
-rw-r--r--fs/xfs/xfs_bmap.c412
-rw-r--r--fs/xfs/xfs_bmap.h7
-rw-r--r--fs/xfs/xfs_clnt.h2
-rw-r--r--fs/xfs/xfs_dfrag.c16
-rw-r--r--fs/xfs/xfs_dinode.h22
-rw-r--r--fs/xfs/xfs_dir.c2
-rw-r--r--fs/xfs/xfs_dir.h2
-rw-r--r--fs/xfs/xfs_dir2.h3
-rw-r--r--fs/xfs/xfs_dir_leaf.h64
-rw-r--r--fs/xfs/xfs_dmapi.h14
-rw-r--r--fs/xfs/xfs_error.c1
-rw-r--r--fs/xfs/xfs_error.h8
-rw-r--r--fs/xfs/xfs_fs.h10
-rw-r--r--fs/xfs/xfs_fsops.c26
-rw-r--r--fs/xfs/xfs_fsops.h1
-rw-r--r--fs/xfs/xfs_iget.c5
-rw-r--r--fs/xfs/xfs_inode.c61
-rw-r--r--fs/xfs/xfs_inode.h4
-rw-r--r--fs/xfs/xfs_inode_item.c9
-rw-r--r--fs/xfs/xfs_iomap.c426
-rw-r--r--fs/xfs/xfs_itable.c5
-rw-r--r--fs/xfs/xfs_log.c123
-rw-r--r--fs/xfs/xfs_log.h19
-rw-r--r--fs/xfs/xfs_log_priv.h77
-rw-r--r--fs/xfs/xfs_log_recover.c12
-rw-r--r--fs/xfs/xfs_mount.c7
-rw-r--r--fs/xfs/xfs_mount.h5
-rw-r--r--fs/xfs/xfs_rename.c7
-rw-r--r--fs/xfs/xfs_rw.c9
-rw-r--r--fs/xfs/xfs_sb.h17
-rw-r--r--fs/xfs/xfs_trans.c14
-rw-r--r--fs/xfs/xfs_trans.h1
-rw-r--r--fs/xfs/xfs_utils.c9
-rw-r--r--fs/xfs/xfs_vfsops.c50
-rw-r--r--fs/xfs/xfs_vnodeops.c196
-rw-r--r--include/asm-alpha/atomic.h2
-rw-r--r--include/asm-alpha/cache.h1
-rw-r--r--include/asm-alpha/compiler.h2
-rw-r--r--include/asm-alpha/dma-mapping.h2
-rw-r--r--include/asm-alpha/futex.h49
-rw-r--r--include/asm-alpha/hardirq.h2
-rw-r--r--include/asm-alpha/mman.h1
-rw-r--r--include/asm-alpha/mmu_context.h6
-rw-r--r--include/asm-alpha/mutex.h9
-rw-r--r--include/asm-alpha/processor.h34
-rw-r--r--include/asm-alpha/ptrace.h6
-rw-r--r--include/asm-alpha/system.h18
-rw-r--r--include/asm-alpha/thread_info.h2
-rw-r--r--include/asm-arm/arch-aaec2000/dma.h8
-rw-r--r--include/asm-arm/arch-at91rm9200/at91rm9200.h261
-rw-r--r--include/asm-arm/arch-at91rm9200/at91rm9200_pdc.h36
-rw-r--r--include/asm-arm/arch-at91rm9200/at91rm9200_sys.h328
-rw-r--r--include/asm-arm/arch-at91rm9200/at91rm9200_usart.h123
-rw-r--r--include/asm-arm/arch-at91rm9200/board.h80
-rw-r--r--include/asm-arm/arch-at91rm9200/debug-macro.S38
-rw-r--r--include/asm-arm/arch-at91rm9200/dma.h (renamed from include/asm-arm/arch-epxa10db/param.h)4
-rw-r--r--include/asm-arm/arch-at91rm9200/entry-macro.S25
-rw-r--r--include/asm-arm/arch-at91rm9200/gpio.h193
-rw-r--r--include/asm-arm/arch-at91rm9200/hardware.h92
-rw-r--r--include/asm-arm/arch-at91rm9200/io.h (renamed from include/asm-arm/arch-epxa10db/dma.h)19
-rw-r--r--include/asm-arm/arch-at91rm9200/irqs.h52
-rw-r--r--include/asm-arm/arch-at91rm9200/memory.h (renamed from include/asm-arm/arch-epxa10db/memory.h)19
-rw-r--r--include/asm-arm/arch-at91rm9200/param.h (renamed from include/asm-arm/arch-epxa10db/vmalloc.h)14
-rw-r--r--include/asm-arm/arch-at91rm9200/pio.h115
-rw-r--r--include/asm-arm/arch-at91rm9200/system.h (renamed from include/asm-arm/arch-epxa10db/system.h)28
-rw-r--r--include/asm-arm/arch-at91rm9200/timex.h (renamed from include/asm-arm/arch-epxa10db/timex.h)18
-rw-r--r--include/asm-arm/arch-at91rm9200/uncompress.h55
-rw-r--r--include/asm-arm/arch-at91rm9200/vmalloc.h (renamed from arch/arm/mach-clps711x/dma.c)15
-rw-r--r--include/asm-arm/arch-cl7500/dma.h1
-rw-r--r--include/asm-arm/arch-cl7500/entry-macro.S2
-rw-r--r--include/asm-arm/arch-clps711x/dma.h9
-rw-r--r--include/asm-arm/arch-clps711x/entry-macro.S1
-rw-r--r--include/asm-arm/arch-clps711x/system.h2
-rw-r--r--include/asm-arm/arch-ebsa110/dma.h8
-rw-r--r--include/asm-arm/arch-ebsa285/dma.h5
-rw-r--r--include/asm-arm/arch-ebsa285/entry-macro.S2
-rw-r--r--include/asm-arm/arch-epxa10db/debug-macro.S41
-rw-r--r--include/asm-arm/arch-epxa10db/entry-macro.S25
-rw-r--r--include/asm-arm/arch-epxa10db/ether00.h482
-rw-r--r--include/asm-arm/arch-epxa10db/excalibur.h91
-rw-r--r--include/asm-arm/arch-epxa10db/hardware.h64
-rw-r--r--include/asm-arm/arch-epxa10db/int_ctrl00.h288
-rw-r--r--include/asm-arm/arch-epxa10db/io.h41
-rw-r--r--include/asm-arm/arch-epxa10db/irqs.h45
-rw-r--r--include/asm-arm/arch-epxa10db/mode_ctrl00.h80
-rw-r--r--include/asm-arm/arch-epxa10db/platform.h7
-rw-r--r--include/asm-arm/arch-epxa10db/pld_conf00.h73
-rw-r--r--include/asm-arm/arch-epxa10db/tdkphy.h209
-rw-r--r--include/asm-arm/arch-epxa10db/timer00.h98
-rw-r--r--include/asm-arm/arch-epxa10db/uart00.h181
-rw-r--r--include/asm-arm/arch-epxa10db/uncompress.h54
-rw-r--r--include/asm-arm/arch-imx/dma.h4
-rw-r--r--include/asm-arm/arch-imx/entry-macro.S2
-rw-r--r--include/asm-arm/arch-integrator/debug-macro.S2
-rw-r--r--include/asm-arm/arch-integrator/dma.h9
-rw-r--r--include/asm-arm/arch-integrator/entry-macro.S2
-rw-r--r--include/asm-arm/arch-iop3xx/dma.h7
-rw-r--r--include/asm-arm/arch-iop3xx/entry-macro.S1
-rw-r--r--include/asm-arm/arch-ixp2000/dma.h9
-rw-r--r--include/asm-arm/arch-ixp2000/enp2611.h6
-rw-r--r--include/asm-arm/arch-ixp2000/entry-macro.S1
-rw-r--r--include/asm-arm/arch-ixp2000/io.h98
-rw-r--r--include/asm-arm/arch-ixp2000/ixp2000-regs.h8
-rw-r--r--include/asm-arm/arch-ixp4xx/coyote.h5
-rw-r--r--include/asm-arm/arch-ixp4xx/dma.h3
-rw-r--r--include/asm-arm/arch-ixp4xx/entry-macro.S1
-rw-r--r--include/asm-arm/arch-ixp4xx/gtwx5715.h4
-rw-r--r--include/asm-arm/arch-ixp4xx/hardware.h1
-rw-r--r--include/asm-arm/arch-ixp4xx/irqs.h9
-rw-r--r--include/asm-arm/arch-ixp4xx/ixdp425.h3
-rw-r--r--include/asm-arm/arch-ixp4xx/memory.h25
-rw-r--r--include/asm-arm/arch-ixp4xx/nas100d.h72
-rw-r--r--include/asm-arm/arch-ixp4xx/nslu2.h3
-rw-r--r--include/asm-arm/arch-ixp4xx/platform.h24
-rw-r--r--include/asm-arm/arch-l7200/dma.h1
-rw-r--r--include/asm-arm/arch-l7200/system.h2
-rw-r--r--include/asm-arm/arch-lh7a40x/dma.h8
-rw-r--r--include/asm-arm/arch-lh7a40x/entry-macro.S2
-rw-r--r--include/asm-arm/arch-omap/dma.h3
-rw-r--r--include/asm-arm/arch-omap/entry-macro.S2
-rw-r--r--include/asm-arm/arch-omap/system.h3
-rw-r--r--include/asm-arm/arch-pxa/dma.h5
-rw-r--r--include/asm-arm/arch-pxa/entry-macro.S2
-rw-r--r--include/asm-arm/arch-pxa/pxa-regs.h12
-rw-r--r--include/asm-arm/arch-realview/debug-macro.S2
-rw-r--r--include/asm-arm/arch-realview/dma.h7
-rw-r--r--include/asm-arm/arch-realview/entry-macro.S2
-rw-r--r--include/asm-arm/arch-rpc/entry-macro.S2
-rw-r--r--include/asm-arm/arch-s3c2410/dma.h8
-rw-r--r--include/asm-arm/arch-s3c2410/entry-macro.S2
-rw-r--r--include/asm-arm/arch-sa1100/dma.h14
-rw-r--r--include/asm-arm/arch-versatile/debug-macro.S2
-rw-r--r--include/asm-arm/arch-versatile/dma.h7
-rw-r--r--include/asm-arm/arch-versatile/entry-macro.S4
-rw-r--r--include/asm-arm/atomic.h3
-rw-r--r--include/asm-arm/byteorder.h25
-rw-r--r--include/asm-arm/cache.h5
-rw-r--r--include/asm-arm/cacheflush.h1
-rw-r--r--include/asm-arm/dma.h11
-rw-r--r--include/asm-arm/futex.h49
-rw-r--r--include/asm-arm/hardware/sharpsl_pm.h94
-rw-r--r--include/asm-arm/io.h6
-rw-r--r--include/asm-arm/ioctl.h75
-rw-r--r--include/asm-arm/irq.h12
-rw-r--r--include/asm-arm/mach/dma.h4
-rw-r--r--include/asm-arm/mach/map.h3
-rw-r--r--include/asm-arm/mach/serial_at91rm9200.h36
-rw-r--r--include/asm-arm/memory.h15
-rw-r--r--include/asm-arm/mman.h1
-rw-r--r--include/asm-arm/mutex.h128
-rw-r--r--include/asm-arm/processor.h8
-rw-r--r--include/asm-arm/scatterlist.h1
-rw-r--r--include/asm-arm/system.h12
-rw-r--r--include/asm-arm/thread_info.h7
-rw-r--r--include/asm-arm26/atomic.h3
-rw-r--r--include/asm-arm26/futex.h49
-rw-r--r--include/asm-arm26/ioctl.h75
-rw-r--r--include/asm-arm26/mman.h1
-rw-r--r--include/asm-arm26/system.h12
-rw-r--r--include/asm-arm26/thread_info.h9
-rw-r--r--include/asm-cris/arch-v10/cache.h1
-rw-r--r--include/asm-cris/arch-v10/processor.h2
-rw-r--r--include/asm-cris/arch-v32/cache.h1
-rw-r--r--include/asm-cris/arch-v32/processor.h2
-rw-r--r--include/asm-cris/atomic.h3
-rw-r--r--include/asm-cris/dma-mapping.h2
-rw-r--r--include/asm-cris/futex.h49
-rw-r--r--include/asm-cris/ioctl.h84
-rw-r--r--include/asm-cris/mman.h1
-rw-r--r--include/asm-cris/mutex.h9
-rw-r--r--include/asm-cris/processor.h3
-rw-r--r--include/asm-cris/thread_info.h2
-rw-r--r--include/asm-frv/atomic.h98
-rw-r--r--include/asm-frv/bug.h1
-rw-r--r--include/asm-frv/dma-mapping.h2
-rw-r--r--include/asm-frv/futex.h42
-rw-r--r--include/asm-frv/io.h123
-rw-r--r--include/asm-frv/ioctl.h81
-rw-r--r--include/asm-frv/mb-regs.h4
-rw-r--r--include/asm-frv/mc146818rtc.h16
-rw-r--r--include/asm-frv/mman.h1
-rw-r--r--include/asm-frv/module.h16
-rw-r--r--include/asm-frv/mutex.h9
-rw-r--r--include/asm-frv/pci.h8
-rw-r--r--include/asm-frv/pgtable.h5
-rw-r--r--include/asm-frv/signal.h1
-rw-r--r--include/asm-frv/thread_info.h2
-rw-r--r--include/asm-frv/types.h1
-rw-r--r--include/asm-frv/uaccess.h8
-rw-r--r--include/asm-frv/unistd.h2
-rw-r--r--include/asm-frv/vga.h17
-rw-r--r--include/asm-frv/xor.h1
-rw-r--r--include/asm-generic/atomic.h117
-rw-r--r--include/asm-generic/dma-mapping.h2
-rw-r--r--include/asm-generic/futex.h53
-rw-r--r--include/asm-generic/ioctl.h80
-rw-r--r--include/asm-generic/mutex-dec.h110
-rw-r--r--include/asm-generic/mutex-null.h24
-rw-r--r--include/asm-generic/mutex-xchg.h117
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/asm-h8300/atomic.h3
-rw-r--r--include/asm-h8300/futex.h49
-rw-r--r--include/asm-h8300/ioctl.h81
-rw-r--r--include/asm-h8300/irq.h5
-rw-r--r--include/asm-h8300/mman.h1
-rw-r--r--include/asm-h8300/mutex.h9
-rw-r--r--include/asm-h8300/page.h6
-rw-r--r--include/asm-h8300/thread_info.h2
-rw-r--r--include/asm-i386/apic.h5
-rw-r--r--include/asm-i386/atomic.h2
-rw-r--r--include/asm-i386/bitops.h50
-rw-r--r--include/asm-i386/bugs.h23
-rw-r--r--include/asm-i386/cache.h2
-rw-r--r--include/asm-i386/cacheflush.h4
-rw-r--r--include/asm-i386/cpufeature.h1
-rw-r--r--include/asm-i386/desc.h8
-rw-r--r--include/asm-i386/dma-mapping.h14
-rw-r--r--include/asm-i386/i387.h8
-rw-r--r--include/asm-i386/io.h5
-rw-r--r--include/asm-i386/ioctl.h86
-rw-r--r--include/asm-i386/irq.h2
-rw-r--r--include/asm-i386/kexec.h47
-rw-r--r--include/asm-i386/kprobes.h9
-rw-r--r--include/asm-i386/mach-bigsmp/mach_apic.h79
-rw-r--r--include/asm-i386/mach-bigsmp/mach_apicdef.h4
-rw-r--r--include/asm-i386/mach-default/mach_ipi.h4
-rw-r--r--include/asm-i386/mman.h1
-rw-r--r--include/asm-i386/mmzone.h5
-rw-r--r--include/asm-i386/module.h4
-rw-r--r--include/asm-i386/mpspec_def.h2
-rw-r--r--include/asm-i386/mutex.h136
-rw-r--r--include/asm-i386/processor.h26
-rw-r--r--include/asm-i386/ptrace.h3
-rw-r--r--include/asm-i386/segment.h14
-rw-r--r--include/asm-i386/system.h40
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-i386/topology.h1
-rw-r--r--include/asm-i386/unistd.h5
-rw-r--r--include/asm-i386/vm86.h20
-rw-r--r--include/asm-ia64/atomic.h2
-rw-r--r--include/asm-ia64/bug.h6
-rw-r--r--include/asm-ia64/cache.h2
-rw-r--r--include/asm-ia64/compat.h2
-rw-r--r--include/asm-ia64/futex.h49
-rw-r--r--include/asm-ia64/io.h1
-rw-r--r--include/asm-ia64/ioctl.h78
-rw-r--r--include/asm-ia64/kprobes.h14
-rw-r--r--include/asm-ia64/mman.h1
-rw-r--r--include/asm-ia64/mutex.h9
-rw-r--r--include/asm-ia64/processor.h2
-rw-r--r--include/asm-ia64/ptrace.h4
-rw-r--r--include/asm-ia64/spinlock.h2
-rw-r--r--include/asm-ia64/system.h9
-rw-r--r--include/asm-ia64/thread_info.h9
-rw-r--r--include/asm-ia64/topology.h2
-rw-r--r--include/asm-ia64/unistd.h5
-rw-r--r--include/asm-m32r/assembler.h10
-rw-r--r--include/asm-m32r/atomic.h2
-rw-r--r--include/asm-m32r/cache.h2
-rw-r--r--include/asm-m32r/cacheflush.h2
-rw-r--r--include/asm-m32r/futex.h49
-rw-r--r--include/asm-m32r/ioctl.h79
-rw-r--r--include/asm-m32r/irq.h16
-rw-r--r--include/asm-m32r/m32102.h38
-rw-r--r--include/asm-m32r/m32104ut/m32104ut_pld.h163
-rw-r--r--include/asm-m32r/m32r.h8
-rw-r--r--include/asm-m32r/mman.h1
-rw-r--r--include/asm-m32r/mutex.h9
-rw-r--r--include/asm-m32r/ptrace.h3
-rw-r--r--include/asm-m32r/system.h22
-rw-r--r--include/asm-m32r/thread_info.h2
-rw-r--r--include/asm-m32r/unistd.h12
-rw-r--r--include/asm-m68k/amigahw.h12
-rw-r--r--include/asm-m68k/amigaints.h2
-rw-r--r--include/asm-m68k/atomic.h2
-rw-r--r--include/asm-m68k/cache.h2
-rw-r--r--include/asm-m68k/checksum.h2
-rw-r--r--include/asm-m68k/dsp56k.h2
-rw-r--r--include/asm-m68k/floppy.h2
-rw-r--r--include/asm-m68k/futex.h49
-rw-r--r--include/asm-m68k/hardirq.h9
-rw-r--r--include/asm-m68k/io.h49
-rw-r--r--include/asm-m68k/ioctl.h81
-rw-r--r--include/asm-m68k/irq.h11
-rw-r--r--include/asm-m68k/machdep.h1
-rw-r--r--include/asm-m68k/mman.h1
-rw-r--r--include/asm-m68k/mutex.h9
-rw-r--r--include/asm-m68k/raw_io.h40
-rw-r--r--include/asm-m68k/signal.h2
-rw-r--r--include/asm-m68k/sun3_pgtable.h2
-rw-r--r--include/asm-m68k/sun3ints.h1
-rw-r--r--include/asm-m68k/sun3xflop.h4
-rw-r--r--include/asm-m68k/thread_info.h1
-rw-r--r--include/asm-m68k/uaccess.h20
-rw-r--r--include/asm-m68k/zorro.h8
-rw-r--r--include/asm-m68knommu/atomic.h2
-rw-r--r--include/asm-m68knommu/bitops.h2
-rw-r--r--include/asm-m68knommu/futex.h49
-rw-r--r--include/asm-m68knommu/ioctl.h2
-rw-r--r--include/asm-m68knommu/irq.h6
-rw-r--r--include/asm-m68knommu/machdep.h1
-rw-r--r--include/asm-m68knommu/mutex.h9
-rw-r--r--include/asm-m68knommu/sigcontext.h1
-rw-r--r--include/asm-m68knommu/thread_info.h2
-rw-r--r--include/asm-mips/atomic.h45
-rw-r--r--include/asm-mips/cache.h1
-rw-r--r--include/asm-mips/cpu-features.h21
-rw-r--r--include/asm-mips/cpu.h20
-rw-r--r--include/asm-mips/delay.h6
-rw-r--r--include/asm-mips/dsp.h4
-rw-r--r--include/asm-mips/elf.h4
-rw-r--r--include/asm-mips/hazards.h20
-rw-r--r--include/asm-mips/interrupt.h1
-rw-r--r--include/asm-mips/mach-au1x00/au1000.h7
-rw-r--r--include/asm-mips/mach-ip22/cpu-feature-overrides.h5
-rw-r--r--include/asm-mips/mach-ip27/cpu-feature-overrides.h5
-rw-r--r--include/asm-mips/mach-ip27/topology.h1
-rw-r--r--include/asm-mips/mach-ip32/cpu-feature-overrides.h5
-rw-r--r--include/asm-mips/mach-ja/cpu-feature-overrides.h5
-rw-r--r--include/asm-mips/mach-ocelot3/cpu-feature-overrides.h5
-rw-r--r--include/asm-mips/mach-rm200/cpu-feature-overrides.h5
-rw-r--r--include/asm-mips/mach-yosemite/cpu-feature-overrides.h5
-rw-r--r--include/asm-mips/mipsregs.h2
-rw-r--r--include/asm-mips/mman.h1
-rw-r--r--include/asm-mips/mutex.h9
-rw-r--r--include/asm-mips/processor.h11
-rw-r--r--include/asm-mips/riscos-syscall.h979
-rw-r--r--include/asm-mips/system.h12
-rw-r--r--include/asm-mips/thread_info.h2
-rw-r--r--include/asm-mips/vr41xx/capcella.h2
-rw-r--r--include/asm-mips/vr41xx/e55.h2
-rw-r--r--include/asm-mips/vr41xx/giu.h2
-rw-r--r--include/asm-mips/vr41xx/mpc30x.h2
-rw-r--r--include/asm-mips/vr41xx/pci.h2
-rw-r--r--include/asm-mips/vr41xx/siu.h2
-rw-r--r--include/asm-mips/vr41xx/tb0219.h2
-rw-r--r--include/asm-mips/vr41xx/tb0226.h2
-rw-r--r--include/asm-mips/vr41xx/vr41xx.h2
-rw-r--r--include/asm-mips/vr41xx/vrc4173.h2
-rw-r--r--include/asm-mips/vr41xx/workpad.h2
-rw-r--r--include/asm-parisc/atomic.h2
-rw-r--r--include/asm-parisc/cache.h9
-rw-r--r--include/asm-parisc/futex.h49
-rw-r--r--include/asm-parisc/io.h2
-rw-r--r--include/asm-parisc/mman.h1
-rw-r--r--include/asm-parisc/mutex.h9
-rw-r--r--include/asm-parisc/page.h7
-rw-r--r--include/asm-parisc/pci.h8
-rw-r--r--include/asm-parisc/processor.h18
-rw-r--r--include/asm-parisc/system.h9
-rw-r--r--include/asm-parisc/thread_info.h3
-rw-r--r--include/asm-parisc/tlbflush.h1
-rw-r--r--include/asm-powerpc/abs_addr.h2
-rw-r--r--include/asm-powerpc/agp.h2
-rw-r--r--include/asm-powerpc/asm-compat.h3
-rw-r--r--include/asm-powerpc/atomic.h48
-rw-r--r--include/asm-powerpc/bitops.h6
-rw-r--r--include/asm-powerpc/bootx.h171
-rw-r--r--include/asm-powerpc/btext.h19
-rw-r--r--include/asm-powerpc/bug.h2
-rw-r--r--include/asm-powerpc/cache.h1
-rw-r--r--include/asm-powerpc/checksum.h2
-rw-r--r--include/asm-powerpc/compat.h2
-rw-r--r--include/asm-powerpc/cputable.h40
-rw-r--r--include/asm-powerpc/current.h2
-rw-r--r--include/asm-powerpc/delay.h2
-rw-r--r--include/asm-powerpc/dma-mapping.h4
-rw-r--r--include/asm-powerpc/dma.h2
-rw-r--r--include/asm-powerpc/eeh.h23
-rw-r--r--include/asm-powerpc/eeh_event.h9
-rw-r--r--include/asm-powerpc/elf.h20
-rw-r--r--include/asm-powerpc/firmware.h6
-rw-r--r--include/asm-powerpc/floppy.h2
-rw-r--r--include/asm-powerpc/futex.h2
-rw-r--r--include/asm-powerpc/grackle.h5
-rw-r--r--include/asm-powerpc/hardirq.h2
-rw-r--r--include/asm-powerpc/heathrow.h5
-rw-r--r--include/asm-powerpc/hvcall.h7
-rw-r--r--include/asm-powerpc/hvconsole.h2
-rw-r--r--include/asm-powerpc/hvcserver.h2
-rw-r--r--include/asm-powerpc/i8259.h2
-rw-r--r--include/asm-powerpc/ibmebus.h85
-rw-r--r--include/asm-powerpc/io.h6
-rw-r--r--include/asm-powerpc/iommu.h27
-rw-r--r--include/asm-powerpc/ipic.h (renamed from include/asm-ppc/ipic.h)0
-rw-r--r--include/asm-powerpc/iseries/hv_call.h4
-rw-r--r--include/asm-powerpc/iseries/hv_call_event.h134
-rw-r--r--include/asm-powerpc/iseries/hv_call_sc.h1
-rw-r--r--include/asm-powerpc/iseries/hv_lp_config.h1
-rw-r--r--include/asm-powerpc/iseries/hv_lp_event.h42
-rw-r--r--include/asm-powerpc/iseries/hv_types.h1
-rw-r--r--include/asm-powerpc/iseries/iseries_io.h14
-rw-r--r--include/asm-powerpc/iseries/it_exp_vpd_panel.h1
-rw-r--r--include/asm-powerpc/iseries/it_lp_naca.h22
-rw-r--r--include/asm-powerpc/iseries/it_lp_queue.h1
-rw-r--r--include/asm-powerpc/iseries/it_lp_reg_save.h5
-rw-r--r--include/asm-powerpc/iseries/lpar_map.h1
-rw-r--r--include/asm-powerpc/iseries/mf.h1
-rw-r--r--include/asm-powerpc/iseries/vio.h1
-rw-r--r--include/asm-powerpc/kdebug.h2
-rw-r--r--include/asm-powerpc/kdump.h13
-rw-r--r--include/asm-powerpc/kexec.h28
-rw-r--r--include/asm-powerpc/keylargo.h15
-rw-r--r--include/asm-powerpc/kprobes.h16
-rw-r--r--include/asm-powerpc/lmb.h2
-rw-r--r--include/asm-powerpc/lppaca.h8
-rw-r--r--include/asm-powerpc/machdep.h12
-rw-r--r--include/asm-powerpc/macio.h2
-rw-r--r--include/asm-powerpc/mman.h1
-rw-r--r--include/asm-powerpc/mmu.h9
-rw-r--r--include/asm-powerpc/mmu_context.h2
-rw-r--r--include/asm-powerpc/mmzone.h2
-rw-r--r--include/asm-powerpc/module.h2
-rw-r--r--include/asm-powerpc/mpic.h6
-rw-r--r--include/asm-powerpc/mutex.h9
-rw-r--r--include/asm-powerpc/numnodes.h2
-rw-r--r--include/asm-powerpc/nvram.h4
-rw-r--r--include/asm-powerpc/of_device.h2
-rw-r--r--include/asm-powerpc/ohare.h6
-rw-r--r--include/asm-powerpc/oprofile_impl.h33
-rw-r--r--include/asm-powerpc/pSeries_reconfig.h2
-rw-r--r--include/asm-powerpc/paca.h36
-rw-r--r--include/asm-powerpc/page.h34
-rw-r--r--include/asm-powerpc/page_32.h2
-rw-r--r--include/asm-powerpc/page_64.h12
-rw-r--r--include/asm-powerpc/param.h2
-rw-r--r--include/asm-powerpc/parport.h30
-rw-r--r--include/asm-powerpc/pci-bridge.h35
-rw-r--r--include/asm-powerpc/pci.h2
-rw-r--r--include/asm-powerpc/percpu.h56
-rw-r--r--include/asm-powerpc/pgalloc.h2
-rw-r--r--include/asm-powerpc/pgtable-64k.h6
-rw-r--r--include/asm-powerpc/pgtable.h13
-rw-r--r--include/asm-powerpc/pmac_feature.h25
-rw-r--r--include/asm-powerpc/pmac_low_i2c.h92
-rw-r--r--include/asm-powerpc/pmac_pfunc.h253
-rw-r--r--include/asm-powerpc/pmc.h2
-rw-r--r--include/asm-powerpc/ppc-pci.h25
-rw-r--r--include/asm-powerpc/ppc_asm.h79
-rw-r--r--include/asm-powerpc/processor.h5
-rw-r--r--include/asm-powerpc/prom.h79
-rw-r--r--include/asm-powerpc/ptrace.h2
-rw-r--r--include/asm-powerpc/reg.h58
-rw-r--r--include/asm-powerpc/rtas.h3
-rw-r--r--include/asm-powerpc/seccomp.h4
-rw-r--r--include/asm-powerpc/sections.h2
-rw-r--r--include/asm-powerpc/serial.h6
-rw-r--r--include/asm-powerpc/signal.h7
-rw-r--r--include/asm-powerpc/smu.h40
-rw-r--r--include/asm-powerpc/sparsemem.h10
-rw-r--r--include/asm-powerpc/spinlock.h23
-rw-r--r--include/asm-powerpc/spu.h600
-rw-r--r--include/asm-powerpc/spu_csa.h255
-rw-r--r--include/asm-powerpc/synch.h25
-rw-r--r--include/asm-powerpc/system.h29
-rw-r--r--include/asm-powerpc/tce.h2
-rw-r--r--include/asm-powerpc/thread_info.h15
-rw-r--r--include/asm-powerpc/time.h5
-rw-r--r--include/asm-powerpc/tlb.h2
-rw-r--r--include/asm-powerpc/topology.h7
-rw-r--r--include/asm-powerpc/udbg.h26
-rw-r--r--include/asm-powerpc/unistd.h4
-rw-r--r--include/asm-powerpc/vdso_datapage.h2
-rw-r--r--include/asm-powerpc/vio.h2
-rw-r--r--include/asm-ppc/bseip.h38
-rw-r--r--include/asm-ppc/btext.h2
-rw-r--r--include/asm-ppc/ibm_ocp.h1
-rw-r--r--include/asm-ppc/io.h2
-rw-r--r--include/asm-ppc/machdep.h4
-rw-r--r--include/asm-ppc/mpc52xx.h13
-rw-r--r--include/asm-ppc/mpc85xx.h4
-rw-r--r--include/asm-ppc/pci-bridge.h9
-rw-r--r--include/asm-ppc/prom.h32
-rw-r--r--include/asm-ppc/system.h22
-rw-r--r--include/asm-s390/atomic.h176
-rw-r--r--include/asm-s390/cache.h1
-rw-r--r--include/asm-s390/ccwdev.h3
-rw-r--r--include/asm-s390/elf.h2
-rw-r--r--include/asm-s390/futex.h49
-rw-r--r--include/asm-s390/ioctl.h89
-rw-r--r--include/asm-s390/kexec.h5
-rw-r--r--include/asm-s390/mman.h1
-rw-r--r--include/asm-s390/mutex.h9
-rw-r--r--include/asm-s390/processor.h8
-rw-r--r--include/asm-s390/qdio.h8
-rw-r--r--include/asm-s390/s390_rdev.h15
-rw-r--r--include/asm-s390/system.h10
-rw-r--r--include/asm-s390/thread_info.h2
-rw-r--r--include/asm-s390/uaccess.h14
-rw-r--r--include/asm-s390/unistd.h2
-rw-r--r--include/asm-s390/vtoc.h24
-rw-r--r--include/asm-sh/atomic.h3
-rw-r--r--include/asm-sh/cache.h2
-rw-r--r--include/asm-sh/futex.h49
-rw-r--r--include/asm-sh/ioctl.h76
-rw-r--r--include/asm-sh/mman.h1
-rw-r--r--include/asm-sh/mutex.h9
-rw-r--r--include/asm-sh/ptrace.h10
-rw-r--r--include/asm-sh/system.h10
-rw-r--r--include/asm-sh/thread_info.h2
-rw-r--r--include/asm-sh64/atomic.h3
-rw-r--r--include/asm-sh64/cache.h2
-rw-r--r--include/asm-sh64/futex.h49
-rw-r--r--include/asm-sh64/io.h8
-rw-r--r--include/asm-sh64/ioctl.h84
-rw-r--r--include/asm-sh64/mmu_context.h2
-rw-r--r--include/asm-sh64/mutex.h9
-rw-r--r--include/asm-sh64/pgalloc.h14
-rw-r--r--include/asm-sh64/pgtable.h24
-rw-r--r--include/asm-sh64/processor.h4
-rw-r--r--include/asm-sh64/system.h4
-rw-r--r--include/asm-sh64/thread_info.h2
-rw-r--r--include/asm-sh64/tlbflush.h2
-rw-r--r--include/asm-sh64/uaccess.h2
-rw-r--r--include/asm-sparc/atomic.h2
-rw-r--r--include/asm-sparc/cache.h1
-rw-r--r--include/asm-sparc/futex.h49
-rw-r--r--include/asm-sparc/mman.h1
-rw-r--r--include/asm-sparc/mutex.h9
-rw-r--r--include/asm-sparc/system.h12
-rw-r--r--include/asm-sparc/thread_info.h3
-rw-r--r--include/asm-sparc64/atomic.h2
-rw-r--r--include/asm-sparc64/cache.h1
-rw-r--r--include/asm-sparc64/elf.h2
-rw-r--r--include/asm-sparc64/futex.h49
-rw-r--r--include/asm-sparc64/kprobes.h10
-rw-r--r--include/asm-sparc64/mman.h1
-rw-r--r--include/asm-sparc64/mmu_context.h2
-rw-r--r--include/asm-sparc64/mutex.h9
-rw-r--r--include/asm-sparc64/processor.h5
-rw-r--r--include/asm-sparc64/system.h18
-rw-r--r--include/asm-um/cache.h3
-rw-r--r--include/asm-um/futex.h12
-rw-r--r--include/asm-um/mutex.h9
-rw-r--r--include/asm-um/processor-generic.h1
-rw-r--r--include/asm-um/rwsem.h4
-rw-r--r--include/asm-um/thread_info.h3
-rw-r--r--include/asm-v850/atomic.h3
-rw-r--r--include/asm-v850/cache.h2
-rw-r--r--include/asm-v850/futex.h49
-rw-r--r--include/asm-v850/ioctl.h81
-rw-r--r--include/asm-v850/mman.h1
-rw-r--r--include/asm-v850/mutex.h9
-rw-r--r--include/asm-v850/processor.h8
-rw-r--r--include/asm-v850/thread_info.h2
-rw-r--r--include/asm-v850/unistd.h18
-rw-r--r--include/asm-x86_64/apic.h17
-rw-r--r--include/asm-x86_64/apicdef.h1
-rw-r--r--include/asm-x86_64/atomic.h3
-rw-r--r--include/asm-x86_64/bitops.h43
-rw-r--r--include/asm-x86_64/cache.h13
-rw-r--r--include/asm-x86_64/cacheflush.h4
-rw-r--r--include/asm-x86_64/compat.h7
-rw-r--r--include/asm-x86_64/cpufeature.h3
-rw-r--r--include/asm-x86_64/desc.h18
-rw-r--r--include/asm-x86_64/dma-mapping.h221
-rw-r--r--include/asm-x86_64/dwarf2.h4
-rw-r--r--include/asm-x86_64/e820.h1
-rw-r--r--include/asm-x86_64/gart-mapping.h16
-rw-r--r--include/asm-x86_64/hw_irq.h10
-rw-r--r--include/asm-x86_64/i387.h68
-rw-r--r--include/asm-x86_64/ia32_unistd.h3
-rw-r--r--include/asm-x86_64/idle.h14
-rw-r--r--include/asm-x86_64/io.h5
-rw-r--r--include/asm-x86_64/ioctl.h76
-rw-r--r--include/asm-x86_64/ipi.h4
-rw-r--r--include/asm-x86_64/kdebug.h13
-rw-r--r--include/asm-x86_64/kexec.h37
-rw-r--r--include/asm-x86_64/kprobes.h4
-rw-r--r--include/asm-x86_64/mman.h1
-rw-r--r--include/asm-x86_64/mmu_context.h9
-rw-r--r--include/asm-x86_64/mmzone.h16
-rw-r--r--include/asm-x86_64/mpspec.h2
-rw-r--r--include/asm-x86_64/mutex.h113
-rw-r--r--include/asm-x86_64/numa.h5
-rw-r--r--include/asm-x86_64/page.h15
-rw-r--r--include/asm-x86_64/pci.h11
-rw-r--r--include/asm-x86_64/pda.h11
-rw-r--r--include/asm-x86_64/percpu.h2
-rw-r--r--include/asm-x86_64/pgtable.h34
-rw-r--r--include/asm-x86_64/processor.h12
-rw-r--r--include/asm-x86_64/proto.h15
-rw-r--r--include/asm-x86_64/segment.h4
-rw-r--r--include/asm-x86_64/smp.h1
-rw-r--r--include/asm-x86_64/swiotlb.h16
-rw-r--r--include/asm-x86_64/system.h55
-rw-r--r--include/asm-x86_64/thread_info.h3
-rw-r--r--include/asm-x86_64/timex.h16
-rw-r--r--include/asm-x86_64/topology.h1
-rw-r--r--include/asm-x86_64/uaccess.h1
-rw-r--r--include/asm-x86_64/unistd.h4
-rw-r--r--include/asm-x86_64/vsyscall.h4
-rw-r--r--include/asm-xtensa/atomic.h2
-rw-r--r--include/asm-xtensa/ioctl.h84
-rw-r--r--include/asm-xtensa/mman.h1
-rw-r--r--include/asm-xtensa/mutex.h9
-rw-r--r--include/asm-xtensa/processor.h6
-rw-r--r--include/asm-xtensa/ptrace.h4
-rw-r--r--include/asm-xtensa/thread_info.h2
-rw-r--r--include/keys/user-type.h1
-rw-r--r--include/linux/aio.h12
-rw-r--r--include/linux/amba/bus.h (renamed from include/asm-arm/hardware/amba.h)0
-rw-r--r--include/linux/amba/clcd.h (renamed from include/asm-arm/hardware/amba_clcd.h)0
-rw-r--r--include/linux/amba/kmi.h (renamed from include/asm-arm/hardware/amba_kmi.h)0
-rw-r--r--include/linux/amba/serial.h (renamed from include/asm-arm/hardware/amba_serial.h)0
-rw-r--r--include/linux/ata.h8
-rw-r--r--include/linux/atalk.h18
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/blkdev.h113
-rw-r--r--include/linux/bootmem.h46
-rw-r--r--include/linux/buffer_head.h3
-rw-r--r--include/linux/byteorder/generic.h2
-rw-r--r--include/linux/byteorder/swab.h2
-rw-r--r--include/linux/byteorder/swabb.h2
-rw-r--r--include/linux/cache.h19
-rw-r--r--include/linux/calc64.h49
-rw-r--r--include/linux/capability.h3
-rw-r--r--include/linux/clk.h (renamed from include/asm-arm/hardware/clock.h)30
-rw-r--r--include/linux/compat_ioctl.h34
-rw-r--r--include/linux/compiler-gcc.h17
-rw-r--r--include/linux/compiler-gcc2.h29
-rw-r--r--include/linux/compiler-gcc3.h17
-rw-r--r--include/linux/compiler-gcc4.h7
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/configfs.h205
-rw-r--r--include/linux/cpuset.h40
-rw-r--r--include/linux/crypto.h5
-rw-r--r--include/linux/cycx_x25.h66
-rw-r--r--include/linux/dcache.h9
-rw-r--r--include/linux/dm-ioctl.h11
-rw-r--r--include/linux/dmi.h5
-rw-r--r--include/linux/dvb/frontend.h10
-rw-r--r--include/linux/elevator.h5
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/ext3_fs_i.h2
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/fs.h75
-rw-r--r--include/linux/fsl_devices.h6
-rw-r--r--include/linux/fuse.h24
-rw-r--r--include/linux/gfp.h5
-rw-r--r--include/linux/hrtimer.h139
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--include/linux/hwmon-vid.h6
-rw-r--r--include/linux/i2c-id.h21
-rw-r--r--include/linux/i2c.h41
-rw-r--r--include/linux/i2o.h976
-rw-r--r--include/linux/ide.h7
-rw-r--r--include/linux/if_frad.h12
-rw-r--r--include/linux/inet.h2
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/ip.h10
-rw-r--r--include/linux/ipv6.h4
-rw-r--r--include/linux/irq.h13
-rw-r--r--include/linux/isdnif.h70
-rw-r--r--include/linux/isicom.h59
-rw-r--r--include/linux/jbd.h8
-rw-r--r--include/linux/jffs2_fs_i.h4
-rw-r--r--include/linux/jiffies.h18
-rw-r--r--include/linux/kbd_kern.h2
-rw-r--r--include/linux/kernel.h17
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/key.h20
-rw-r--r--include/linux/keyctl.h3
-rw-r--r--include/linux/kprobes.h6
-rw-r--r--include/linux/ktime.h284
-rw-r--r--include/linux/libata.h11
-rw-r--r--include/linux/list.h14
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/loop.h4
-rw-r--r--include/linux/memory.h8
-rw-r--r--include/linux/mempolicy.h84
-rw-r--r--include/linux/mm.h110
-rw-r--r--include/linux/mm_inline.h22
-rw-r--r--include/linux/mmc/card.h5
-rw-r--r--include/linux/mmc/mmc.h6
-rw-r--r--include/linux/mmzone.h52
-rw-r--r--include/linux/moduleparam.h2
-rw-r--r--include/linux/mount.h9
-rw-r--r--include/linux/msdos_fs.h3
-rw-r--r--include/linux/mutex-debug.h23
-rw-r--r--include/linux/mutex.h120
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/nbd.h8
-rw-r--r--include/linux/ncp.h126
-rw-r--r--include/linux/netfilter.h92
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h3
-rw-r--r--include/linux/netfilter/x_tables.h224
-rw-r--r--include/linux/netfilter/xt_CLASSIFY.h8
-rw-r--r--include/linux/netfilter/xt_CONNMARK.h25
-rw-r--r--include/linux/netfilter/xt_MARK.h21
-rw-r--r--include/linux/netfilter/xt_NFQUEUE.h16
-rw-r--r--include/linux/netfilter/xt_comment.h10
-rw-r--r--include/linux/netfilter/xt_connbytes.h25
-rw-r--r--include/linux/netfilter/xt_connmark.h18
-rw-r--r--include/linux/netfilter/xt_conntrack.h63
-rw-r--r--include/linux/netfilter/xt_dccp.h23
-rw-r--r--include/linux/netfilter/xt_helper.h8
-rw-r--r--include/linux/netfilter/xt_length.h9
-rw-r--r--include/linux/netfilter/xt_limit.h21
-rw-r--r--include/linux/netfilter/xt_mac.h8
-rw-r--r--include/linux/netfilter/xt_mark.h9
-rw-r--r--include/linux/netfilter/xt_physdev.h24
-rw-r--r--include/linux/netfilter/xt_pkttype.h8
-rw-r--r--include/linux/netfilter/xt_realm.h10
-rw-r--r--include/linux/netfilter/xt_sctp.h107
-rw-r--r--include/linux/netfilter/xt_state.h13
-rw-r--r--include/linux/netfilter/xt_string.h18
-rw-r--r--include/linux/netfilter/xt_tcpmss.h9
-rw-r--r--include/linux/netfilter/xt_tcpudp.h36
-rw-r--r--include/linux/netfilter_arp/arp_tables.h123
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h3
-rw-r--r--include/linux/netfilter_ipv4/ip_nat_protocol.h7
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h217
-rw-r--r--include/linux/netfilter_ipv4/ipt_CLASSIFY.h5
-rw-r--r--include/linux/netfilter_ipv4/ipt_CONNMARK.h16
-rw-r--r--include/linux/netfilter_ipv4/ipt_MARK.h22
-rw-r--r--include/linux/netfilter_ipv4/ipt_NFQUEUE.h8
-rw-r--r--include/linux/netfilter_ipv4/ipt_comment.h8
-rw-r--r--include/linux/netfilter_ipv4/ipt_connbytes.h31
-rw-r--r--include/linux/netfilter_ipv4/ipt_connmark.h15
-rw-r--r--include/linux/netfilter_ipv4/ipt_conntrack.h66
-rw-r--r--include/linux/netfilter_ipv4/ipt_dccp.h22
-rw-r--r--include/linux/netfilter_ipv4/ipt_helper.h7
-rw-r--r--include/linux/netfilter_ipv4/ipt_length.h6
-rw-r--r--include/linux/netfilter_ipv4/ipt_limit.h19
-rw-r--r--include/linux/netfilter_ipv4/ipt_mac.h7
-rw-r--r--include/linux/netfilter_ipv4/ipt_mark.h8
-rw-r--r--include/linux/netfilter_ipv4/ipt_physdev.h27
-rw-r--r--include/linux/netfilter_ipv4/ipt_pkttype.h7
-rw-r--r--include/linux/netfilter_ipv4/ipt_policy.h52
-rw-r--r--include/linux/netfilter_ipv4/ipt_realm.h7
-rw-r--r--include/linux/netfilter_ipv4/ipt_state.h16
-rw-r--r--include/linux/netfilter_ipv4/ipt_string.h16
-rw-r--r--include/linux/netfilter_ipv4/ipt_tcpmss.h6
-rw-r--r--include/linux/netfilter_ipv6.h5
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h208
-rw-r--r--include/linux/netfilter_ipv6/ip6t_MARK.h9
-rw-r--r--include/linux/netfilter_ipv6/ip6t_length.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_limit.h21
-rw-r--r--include/linux/netfilter_ipv6/ip6t_mac.h9
-rw-r--r--include/linux/netfilter_ipv6/ip6t_mark.h8
-rw-r--r--include/linux/netfilter_ipv6/ip6t_physdev.h27
-rw-r--r--include/linux/netfilter_ipv6/ip6t_policy.h52
-rw-r--r--include/linux/nfs_fs.h65
-rw-r--r--include/linux/nfs_idmap.h2
-rw-r--r--include/linux/nfs_page.h12
-rw-r--r--include/linux/nfs_xdr.h89
-rw-r--r--include/linux/nfsd/nfsfh.h6
-rw-r--r--include/linux/nfsd/xdr.h3
-rw-r--r--include/linux/nfsd/xdr3.h1
-rw-r--r--include/linux/page-flags.h91
-rw-r--r--include/linux/pagevec.h5
-rw-r--r--include/linux/parport.h3
-rw-r--r--include/linux/parport_pc.h4
-rw-r--r--include/linux/pci.h69
-rw-r--r--include/linux/pci_ids.h45
-rw-r--r--include/linux/pci_regs.h1
-rw-r--r--include/linux/percpu.h8
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/pipe_fs_i.h2
-rw-r--r--include/linux/pmu.h8
-rw-r--r--include/linux/posix-timers.h90
-rw-r--r--include/linux/proc_fs.h5
-rw-r--r--include/linux/ptrace.h4
-rw-r--r--include/linux/radix-tree.h1
-rw-r--r--include/linux/raid/md.h4
-rw-r--r--include/linux/raid/md_k.h80
-rw-r--r--include/linux/raid/raid1.h14
-rw-r--r--include/linux/raid/raid10.h22
-rw-r--r--include/linux/raid/raid5.h7
-rw-r--r--include/linux/ramfs.h10
-rw-r--r--include/linux/rcupdate.h37
-rw-r--r--include/linux/rcuref.h220
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/relayfs_fs.h65
-rw-r--r--include/linux/rio_drv.h5
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/rtc.h3
-rw-r--r--include/linux/sched.h91
-rw-r--r--include/linux/screen_info.h76
-rw-r--r--include/linux/sdla.h64
-rw-r--r--include/linux/seccomp.h6
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/signal.h30
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/slab.h35
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/spinlock_types_up.h14
-rw-r--r--include/linux/sunrpc/clnt.h5
-rw-r--r--include/linux/sunrpc/gss_spkm3.h2
-rw-r--r--include/linux/sunrpc/sched.h48
-rw-r--r--include/linux/sunrpc/xdr.h6
-rw-r--r--include/linux/sunrpc/xprt.h12
-rw-r--r--include/linux/suspend.h8
-rw-r--r--include/linux/swap.h11
-rw-r--r--include/linux/synclink.h9
-rw-r--r--include/linux/syscalls.h7
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/time.h186
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/tipc.h212
-rw-r--r--include/linux/tipc_config.h407
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/tty.h97
-rw-r--r--include/linux/tty_flip.h20
-rw-r--r--include/linux/tty_ldisc.h9
-rw-r--r--include/linux/video_decoder.h2
-rw-r--r--include/linux/videodev2.h16
-rw-r--r--include/linux/wavefront.h36
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--include/linux/writeback.h19
-rw-r--r--include/linux/xattr.h19
-rw-r--r--include/linux/zlib.h11
-rw-r--r--include/media/audiochip.h16
-rw-r--r--include/media/saa7146.h6
-rw-r--r--include/media/saa7146_vv.h2
-rw-r--r--include/media/tuner.h99
-rw-r--r--include/media/v4l2-common.h84
-rw-r--r--include/net/act_api.h2
-rw-r--r--include/net/dn_dev.h84
-rw-r--r--include/net/dn_nsp.h74
-rw-r--r--include/net/dst.h11
-rw-r--r--include/net/genetlink.h1
-rw-r--r--include/net/ieee80211.h6
-rw-r--r--include/net/inet_connection_sock.h4
-rw-r--r--include/net/ip.h10
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h3
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h2
-rw-r--r--include/net/pkt_sched.h23
-rw-r--r--include/net/protocol.h2
-rw-r--r--include/net/sctp/sctp.h4
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tipc/tipc.h257
-rw-r--r--include/net/tipc/tipc_bearer.h121
-rw-r--r--include/net/tipc/tipc_msg.h223
-rw-r--r--include/net/tipc/tipc_port.h108
-rw-r--r--include/net/xfrm.h9
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/scsi/scsi_driver.h1
-rw-r--r--include/scsi/scsi_host.h7
-rw-r--r--include/sound/core.h3
-rw-r--r--include/sound/wavefront.h36
-rw-r--r--include/video/cyblafb.h4
-rw-r--r--include/video/kyro.h1
-rw-r--r--include/video/neomagic.h1
-rw-r--r--include/video/newport.h5
-rw-r--r--include/video/sstfb.h1
-rw-r--r--include/video/tdfx.h86
-rw-r--r--init/Kconfig39
-rw-r--r--init/do_mounts_md.c22
-rw-r--r--init/do_mounts_rd.c4
-rw-r--r--init/main.c14
-rw-r--r--ipc/mqueue.c9
-rw-r--r--ipc/msg.c1
-rw-r--r--ipc/sem.c1
-rw-r--r--ipc/shm.c41
-rw-r--r--ipc/util.c1
-rw-r--r--kernel/Makefile5
-rw-r--r--kernel/acct.c17
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/capability.c1
-rw-r--r--kernel/compat.c20
-rw-r--r--kernel/cpuset.c547
-rw-r--r--kernel/exit.c49
-rw-r--r--kernel/fork.c50
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/hrtimer.c825
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/proc.c6
-rw-r--r--kernel/itimer.c106
-rw-r--r--kernel/kexec.c21
-rw-r--r--kernel/kprobes.c137
-rw-r--r--kernel/ksysfs.c13
-rw-r--r--kernel/module.c60
-rw-r--r--kernel/mutex-debug.c462
-rw-r--r--kernel/mutex-debug.h134
-rw-r--r--kernel/mutex.c315
-rw-r--r--kernel/mutex.h35
-rw-r--r--kernel/panic.c4
-rw-r--r--kernel/pid.c22
-rw-r--r--kernel/posix-cpu-timers.c76
-rw-r--r--kernel/posix-timers.c887
-rw-r--r--kernel/power/disk.c92
-rw-r--r--kernel/power/power.h24
-rw-r--r--kernel/power/snapshot.c89
-rw-r--r--kernel/power/swsusp.c1020
-rw-r--r--kernel/printk.c6
-rw-r--r--kernel/ptrace.c78
-rw-r--r--kernel/rcupdate.c135
-rw-r--r--kernel/rcutorture.c99
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sched.c493
-rw-r--r--kernel/signal.c138
-rw-r--r--kernel/stop_machine.c6
-rw-r--r--kernel/sys.c75
-rw-r--r--kernel/sys_ni.c24
-rw-r--r--kernel/sysctl.c29
-rw-r--r--kernel/time.c118
-rw-r--r--kernel/timer.c58
-rw-r--r--kernel/uid16.c1
-rw-r--r--kernel/workqueue.c40
-rw-r--r--lib/Kconfig.debug29
-rw-r--r--lib/bitmap.c89
-rw-r--r--lib/dec_and_lock.c49
-rw-r--r--lib/find_next_bit.c3
-rw-r--r--lib/radix-tree.c143
-rw-r--r--lib/spinlock_debug.c18
-rw-r--r--lib/swiotlb.c5
-rw-r--r--lib/zlib_deflate/deflate.c6
-rw-r--r--lib/zlib_deflate/deflate_syms.c2
-rw-r--r--lib/zlib_inflate/infblock.c4
-rw-r--r--lib/zlib_inflate/infblock.h4
-rw-r--r--lib/zlib_inflate/inflate_syms.c2
-rw-r--r--lib/zlib_inflate/inflate_sync.c4
-rw-r--r--mm/Kconfig9
-rw-r--r--mm/Makefile6
-rw-r--r--mm/bootmem.c58
-rw-r--r--mm/fadvise.c5
-rw-r--r--mm/filemap.c159
-rw-r--r--mm/filemap_xip.c8
-rw-r--r--mm/hugetlb.c194
-rw-r--r--mm/internal.h21
-rw-r--r--mm/madvise.c35
-rw-r--r--mm/memory.c34
-rw-r--r--mm/memory_hotplug.c1
-rw-r--r--mm/mempolicy.c665
-rw-r--r--mm/mlock.c1
-rw-r--r--mm/mmap.c1
-rw-r--r--mm/mremap.c1
-rw-r--r--mm/msync.c2
-rw-r--r--mm/nommu.c7
-rw-r--r--mm/oom_kill.c5
-rw-r--r--mm/page-writeback.c10
-rw-r--r--mm/page_alloc.c472
-rw-r--r--mm/pdflush.c2
-rw-r--r--mm/readahead.c15
-rw-r--r--mm/rmap.c72
-rw-r--r--mm/shmem.c42
-rw-r--r--mm/slab.c1140
-rw-r--r--mm/slob.c385
-rw-r--r--mm/sparse.c4
-rw-r--r--mm/swap.c29
-rw-r--r--mm/swap_state.c8
-rw-r--r--mm/swapfile.c43
-rw-r--r--mm/tiny-shmem.c29
-rw-r--r--mm/truncate.c45
-rw-r--r--mm/util.c39
-rw-r--r--mm/vmscan.c468
-rw-r--r--net/802/Makefile4
-rw-r--r--net/8021q/vlan.c1
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/ddp.c1
-rw-r--r--net/atm/br2684.c7
-rw-r--r--net/atm/clip.c1
-rw-r--r--net/atm/ioctl.c1
-rw-r--r--net/atm/lec.c10
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/atm/pppoatm.c1
-rw-r--r--net/atm/raw.c1
-rw-r--r--net/atm/resources.c1
-rw-r--r--net/ax25/af_ax25.c1
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/ax25/ax25_uid.c2
-rw-r--r--net/bluetooth/bnep/core.c6
-rw-r--r--net/bluetooth/bnep/sock.c1
-rw-r--r--net/bluetooth/cmtp/sock.c1
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/hci_sock.c1
-rw-r--r--net/bluetooth/hidp/sock.c1
-rw-r--r--net/bluetooth/l2cap.c1
-rw-r--r--net/bluetooth/rfcomm/tty.c10
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_ioctl.c1
-rw-r--r--net/bridge/br_netfilter.c13
-rw-r--r--net/bridge/br_sysfs_br.c1
-rw-r--r--net/bridge/br_sysfs_if.c1
-rw-r--r--net/bridge/netfilter/ebt_ip.c3
-rw-r--r--net/bridge/netfilter/ebt_log.c1
-rw-r--r--net/bridge/netfilter/ebt_stp.c5
-rw-r--r--net/bridge/netfilter/ebtables.c4
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/dev_mcast.c2
-rw-r--r--net/core/dv.c3
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/filter.c16
-rw-r--r--net/core/net-sysfs.c29
-rw-r--r--net/core/pktgen.c7
-rw-r--r--net/core/scm.c1
-rw-r--r--net/core/skbuff.c15
-rw-r--r--net/core/sock.c1
-rw-r--r--net/core/utils.c2
-rw-r--r--net/core/wireless.c3
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/decnet/af_decnet.c1
-rw-r--r--net/decnet/dn_dev.c1
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c2
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c61
-rw-r--r--net/ieee80211/ieee80211_rx.c14
-rw-r--r--net/ieee80211/ieee80211_tx.c2
-rw-r--r--net/ieee80211/ieee80211_wx.c2
-rw-r--r--net/ipv4/Makefile4
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/arp.c1
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/icmp.c3
-rw-r--r--net/ipv4/igmp.c15
-rw-r--r--net/ipv4/inet_diag.c243
-rw-r--r--net/ipv4/inetpeer.c6
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ip_gre.c35
-rw-r--r--net/ipv4/ip_input.c15
-rw-r--r--net/ipv4/ip_options.c1
-rw-r--r--net/ipv4/ip_output.c15
-rw-r--r--net/ipv4/ip_sockglue.c6
-rw-r--r--net/ipv4/ipip.c20
-rw-r--r--net/ipv4/ipmr.c23
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c1
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c2
-rw-r--r--net/ipv4/netfilter.c25
-rw-r--r--net/ipv4/netfilter/Kconfig256
-rw-r--r--net/ipv4/netfilter/Makefile22
-rw-r--r--net/ipv4/netfilter/arp_tables.c445
-rw-r--r--net/ipv4/netfilter/arpt_mangle.c7
-rw-r--r--net/ipv4/netfilter/arptable_filter.c1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_pptp.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_generic.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_gre.c5
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_icmp.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_sctp.c17
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c25
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_udp.c5
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c30
-rw-r--r--net/ipv4/netfilter/ip_nat_ftp.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_helper_pptp.c81
-rw-r--r--net/ipv4/netfilter/ip_nat_irc.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_gre.c38
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_icmp.c34
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_tcp.c36
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_udp.c36
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_unknown.c16
-rw-r--r--net/ipv4/netfilter/ip_nat_rule.c5
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c111
-rw-r--r--net/ipv4/netfilter/ip_tables.c843
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c3
-rw-r--r--net/ipv4/netfilter/ipt_DSCP.c2
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c3
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c2
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c2
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c2
-rw-r--r--net/ipv4/netfilter/ipt_NFQUEUE.c70
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c2
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c3
-rw-r--r--net/ipv4/netfilter/ipt_SAME.c2
-rw-r--r--net/ipv4/netfilter/ipt_TCPMSS.c3
-rw-r--r--net/ipv4/netfilter/ipt_TOS.c2
-rw-r--r--net/ipv4/netfilter/ipt_TTL.c2
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c2
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c4
-rw-r--r--net/ipv4/netfilter/ipt_ah.c6
-rw-r--r--net/ipv4/netfilter/ipt_dscp.c4
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c5
-rw-r--r--net/ipv4/netfilter/ipt_esp.c6
-rw-r--r--net/ipv4/netfilter/ipt_hashlimit.c3
-rw-r--r--net/ipv4/netfilter/ipt_iprange.c4
-rw-r--r--net/ipv4/netfilter/ipt_length.c64
-rw-r--r--net/ipv4/netfilter/ipt_multiport.c10
-rw-r--r--net/ipv4/netfilter/ipt_owner.c3
-rw-r--r--net/ipv4/netfilter/ipt_physdev.c135
-rw-r--r--net/ipv4/netfilter/ipt_policy.c170
-rw-r--r--net/ipv4/netfilter/ipt_recent.c6
-rw-r--r--net/ipv4/netfilter/ipt_tos.c3
-rw-r--r--net/ipv4/netfilter/ipt_ttl.c4
-rw-r--r--net/ipv4/netfilter/iptable_filter.c3
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c1
-rw-r--r--net/ipv4/netfilter/iptable_raw.c3
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c9
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_input.c31
-rw-r--r--net/ipv4/xfrm4_output.c72
-rw-r--r--net/ipv4/xfrm4_state.c15
-rw-r--r--net/ipv6/Makefile4
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/af_inet6.c5
-rw-r--r--net/ipv6/ah6.c3
-rw-r--r--net/ipv6/anycast.c5
-rw-r--r--net/ipv6/datagram.c1
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/exthdrs.c19
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/inet6_connection_sock.c1
-rw-r--r--net/ipv6/ip6_flowlabel.c9
-rw-r--r--net/ipv6/ip6_input.c23
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_tunnel.c27
-rw-r--r--net/ipv6/ipcomp6.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mcast.c20
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter.c29
-rw-r--r--net/ipv6/netfilter/Kconfig74
-rw-r--r--net/ipv6/netfilter/Makefile7
-rw-r--r--net/ipv6/netfilter/ip6_tables.c830
-rw-r--r--net/ipv6/netfilter/ip6t_HL.c2
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c7
-rw-r--r--net/ipv6/netfilter/ip6t_MARK.c81
-rw-r--r--net/ipv6/netfilter/ip6t_NFQUEUE.c70
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c3
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c2
-rw-r--r--net/ipv6/netfilter/ip6t_dst.c2
-rw-r--r--net/ipv6/netfilter/ip6t_esp.c2
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c2
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c2
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c2
-rw-r--r--net/ipv6/netfilter/ip6t_hl.c2
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c2
-rw-r--r--net/ipv6/netfilter/ip6t_length.c66
-rw-r--r--net/ipv6/netfilter/ip6t_limit.c147
-rw-r--r--net/ipv6/netfilter/ip6t_mac.c80
-rw-r--r--net/ipv6/netfilter/ip6t_mark.c66
-rw-r--r--net/ipv6/netfilter/ip6t_multiport.c3
-rw-r--r--net/ipv6/netfilter/ip6t_owner.c2
-rw-r--r--net/ipv6/netfilter/ip6t_policy.c175
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c2
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c1
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c1
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c45
-rw-r--r--net/ipv6/reassembly.c11
-rw-r--r--net/ipv6/route.c1
-rw-r--r--net/ipv6/sit.c23
-rw-r--r--net/ipv6/tcp_ipv6.c22
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_input.c21
-rw-r--r--net/ipv6/xfrm6_output.c76
-rw-r--r--net/ipv6/xfrm6_state.c17
-rw-r--r--net/ipv6/xfrm6_tunnel.c14
-rw-r--r--net/ipx/af_ipx.c1
-rw-r--r--net/irda/af_irda.c1
-rw-r--r--net/irda/irda_device.c1
-rw-r--r--net/irda/irias_object.c12
-rw-r--r--net/irda/irnet/irnet.h1
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/netfilter/Kconfig258
-rw-r--r--net/netfilter/Makefile37
-rw-r--r--net/netfilter/nf_conntrack_ftp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c18
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c24
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c4
-rw-r--r--net/netfilter/nf_conntrack_standalone.c28
-rw-r--r--net/netfilter/x_tables.c624
-rw-r--r--net/netfilter/xt_CLASSIFY.c (renamed from net/ipv4/netfilter/ipt_CLASSIFY.c)41
-rw-r--r--net/netfilter/xt_CONNMARK.c (renamed from net/ipv4/netfilter/ipt_CONNMARK.c)51
-rw-r--r--net/netfilter/xt_MARK.c (renamed from net/ipv4/netfilter/ipt_MARK.c)79
-rw-r--r--net/netfilter/xt_NFQUEUE.c107
-rw-r--r--net/netfilter/xt_NOTRACK.c (renamed from net/ipv4/netfilter/ipt_NOTRACK.c)38
-rw-r--r--net/netfilter/xt_comment.c (renamed from net/ipv4/netfilter/ipt_comment.c)35
-rw-r--r--net/netfilter/xt_connbytes.c (renamed from net/ipv4/netfilter/ipt_connbytes.c)73
-rw-r--r--net/netfilter/xt_connmark.c (renamed from net/ipv4/netfilter/ipt_connmark.c)41
-rw-r--r--net/netfilter/xt_conntrack.c (renamed from net/ipv4/netfilter/ipt_conntrack.c)110
-rw-r--r--net/netfilter/xt_dccp.c (renamed from net/ipv4/netfilter/ipt_dccp.c)105
-rw-r--r--net/netfilter/xt_helper.c (renamed from net/ipv4/netfilter/ipt_helper.c)55
-rw-r--r--net/netfilter/xt_length.c99
-rw-r--r--net/netfilter/xt_limit.c (renamed from net/ipv4/netfilter/ipt_limit.c)48
-rw-r--r--net/netfilter/xt_mac.c (renamed from net/ipv4/netfilter/ipt_mac.c)43
-rw-r--r--net/netfilter/xt_mark.c (renamed from net/ipv4/netfilter/ipt_mark.c)38
-rw-r--r--net/netfilter/xt_physdev.c (renamed from net/ipv6/netfilter/ip6t_physdev.c)82
-rw-r--r--net/netfilter/xt_pkttype.c (renamed from net/ipv4/netfilter/ipt_pkttype.c)46
-rw-r--r--net/netfilter/xt_realm.c (renamed from net/ipv4/netfilter/ipt_realm.c)25
-rw-r--r--net/netfilter/xt_sctp.c (renamed from net/ipv4/netfilter/ipt_sctp.c)109
-rw-r--r--net/netfilter/xt_state.c (renamed from net/ipv4/netfilter/ipt_state.c)50
-rw-r--r--net/netfilter/xt_string.c (renamed from net/ipv4/netfilter/ipt_string.c)40
-rw-r--r--net/netfilter/xt_tcpmss.c (renamed from net/ipv4/netfilter/ipt_tcpmss.c)69
-rw-r--r--net/netfilter/xt_tcpudp.c334
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/netlink/genetlink.c7
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/packet/af_packet.c3
-rw-r--r--net/rose/af_rose.c2
-rw-r--r--net/rxrpc/connection.c14
-rw-r--r--net/sched/Kconfig4
-rw-r--r--net/sched/Makefile14
-rw-r--r--net/sched/act_api.c4
-rw-r--r--net/sched/act_gact.c (renamed from net/sched/gact.c)3
-rw-r--r--net/sched/act_ipt.c (renamed from net/sched/ipt.c)8
-rw-r--r--net/sched/act_mirred.c (renamed from net/sched/mirred.c)3
-rw-r--r--net/sched/act_pedit.c (renamed from net/sched/pedit.c)5
-rw-r--r--net/sched/act_police.c (renamed from net/sched/police.c)17
-rw-r--r--net/sched/act_simple.c (renamed from net/sched/simple.c)3
-rw-r--r--net/sched/ematch.c1
-rw-r--r--net/sched/sch_cbq.c4
-rw-r--r--net/sched/sch_hfsc.c12
-rw-r--r--net/sched/sch_htb.c4
-rw-r--r--net/sched/sch_prio.c7
-rw-r--r--net/sched/sch_teql.c12
-rw-r--r--net/sctp/input.c1
-rw-r--r--net/sctp/ipv6.c26
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/sm_sideeffect.c3
-rw-r--r--net/sctp/sm_statefuns.c4
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c10
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c10
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c11
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c3
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_unseal.c2
-rw-r--r--net/sunrpc/auth_unix.c2
-rw-r--r--net/sunrpc/cache.c5
-rw-r--r--net/sunrpc/clnt.c114
-rw-r--r--net/sunrpc/pmap_clnt.c17
-rw-r--r--net/sunrpc/rpc_pipe.c69
-rw-r--r--net/sunrpc/sched.c222
-rw-r--r--net/sunrpc/sunrpc_syms.c4
-rw-r--r--net/sunrpc/svc.c9
-rw-r--r--net/sunrpc/svcauth_unix.c14
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/xdr.c21
-rw-r--r--net/sunrpc/xprt.c66
-rw-r--r--net/sunrpc/xprtsock.c31
-rw-r--r--net/tipc/Kconfig112
-rw-r--r--net/tipc/Makefile13
-rw-r--r--net/tipc/addr.c94
-rw-r--r--net/tipc/addr.h128
-rw-r--r--net/tipc/bcast.c806
-rw-r--r--net/tipc/bcast.h223
-rw-r--r--net/tipc/bearer.c692
-rw-r--r--net/tipc/bearer.h172
-rw-r--r--net/tipc/cluster.c576
-rw-r--r--net/tipc/cluster.h92
-rw-r--r--net/tipc/config.c718
-rw-r--r--net/tipc/config.h80
-rw-r--r--net/tipc/core.c285
-rw-r--r--net/tipc/core.h316
-rw-r--r--net/tipc/dbg.c395
-rw-r--r--net/tipc/dbg.h59
-rw-r--r--net/tipc/discover.c318
-rw-r--r--net/tipc/discover.h58
-rw-r--r--net/tipc/eth_media.c299
-rw-r--r--net/tipc/handler.c132
-rw-r--r--net/tipc/link.c3167
-rw-r--r--net/tipc/link.h296
-rw-r--r--net/tipc/msg.c334
-rw-r--r--net/tipc/msg.h818
-rw-r--r--net/tipc/name_distr.c309
-rw-r--r--net/tipc/name_distr.h48
-rw-r--r--net/tipc/name_table.c1079
-rw-r--r--net/tipc/name_table.h108
-rw-r--r--net/tipc/net.c311
-rw-r--r--net/tipc/net.h66
-rw-r--r--net/tipc/netlink.c112
-rw-r--r--net/tipc/node.c679
-rw-r--r--net/tipc/node.h144
-rw-r--r--net/tipc/node_subscr.c79
-rw-r--r--net/tipc/node_subscr.h63
-rw-r--r--net/tipc/port.c1708
-rw-r--r--net/tipc/port.h209
-rw-r--r--net/tipc/ref.c189
-rw-r--r--net/tipc/ref.h131
-rw-r--r--net/tipc/socket.c1726
-rw-r--r--net/tipc/subscr.c527
-rw-r--r--net/tipc/subscr.h80
-rw-r--r--net/tipc/user_reg.c265
-rw-r--r--net/tipc/user_reg.h48
-rw-r--r--net/tipc/zone.c169
-rw-r--r--net/tipc/zone.h71
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/wanrouter/af_wanpipe.c1
-rw-r--r--net/wanrouter/wanmain.c1
-rw-r--r--net/x25/af_x25.c8
-rw-r--r--net/xfrm/xfrm_algo.c6
-rw-r--r--net/xfrm/xfrm_policy.c17
-rw-r--r--net/xfrm/xfrm_user.c1
-rw-r--r--scripts/Kbuild.include13
-rw-r--r--scripts/Makefile.build4
-rw-r--r--scripts/Makefile.modpost2
-rw-r--r--scripts/bloat-o-meter58
-rw-r--r--scripts/kconfig/Makefile8
-rw-r--r--scripts/kconfig/conf.c18
-rw-r--r--scripts/kconfig/lxdialog/Makefile48
-rw-r--r--scripts/kconfig/lxdialog/check-lxdialog.sh67
-rw-r--r--scripts/kconfig/qconf.h6
-rwxr-xr-xscripts/kernel-doc12
-rw-r--r--scripts/mksysmap2
-rw-r--r--scripts/mod/file2alias.c2
-rw-r--r--scripts/reference_discarded.pl6
-rw-r--r--scripts/setlocalversion68
-rw-r--r--security/capability.c6
-rw-r--r--security/commoncap.c1
-rw-r--r--security/dummy.c1
-rw-r--r--security/inode.c8
-rw-r--r--security/keys/compat.c6
-rw-r--r--security/keys/internal.h6
-rw-r--r--security/keys/key.c58
-rw-r--r--security/keys/keyctl.c152
-rw-r--r--security/keys/keyring.c198
-rw-r--r--security/keys/permission.c32
-rw-r--r--security/keys/process_keys.c71
-rw-r--r--security/keys/request_key.c108
-rw-r--r--security/keys/request_key_auth.c192
-rw-r--r--security/keys/user_defined.c33
-rw-r--r--security/security.c1
-rw-r--r--security/selinux/avc.c5
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/selinux/include/av_perm_to_string.h3
-rw-r--r--security/selinux/include/av_permissions.h3
-rw-r--r--security/selinux/selinuxfs.c6
-rw-r--r--security/selinux/ss/avtab.c2
-rw-r--r--security/selinux/ss/policydb.c2
-rw-r--r--security/selinux/ss/services.c6
-rw-r--r--security/selinux/xfrm.c10
-rw-r--r--sound/arm/aaci.c2
-rw-r--r--sound/core/oss/pcm_oss.c2
-rw-r--r--sound/core/seq/seq_memory.c4
-rw-r--r--sound/isa/wavefront/wavefront_synth.c7
-rw-r--r--sound/oss/ad1848.c92
-rw-r--r--sound/oss/ad1889.c2
-rw-r--r--sound/oss/btaudio.c2
-rw-r--r--sound/oss/cmpci.c2
-rw-r--r--sound/oss/cs4281/cs4281m.c23
-rw-r--r--sound/oss/cs4281/cs4281pm-24.c39
-rw-r--r--sound/oss/cs46xx.c62
-rw-r--r--sound/oss/cs46xxpm-24.h4
-rw-r--r--sound/oss/dmasound/dac3550a.c6
-rw-r--r--sound/oss/dmasound/dmasound.h1
-rw-r--r--sound/oss/dmasound/dmasound_atari.c112
-rw-r--r--sound/oss/dmasound/dmasound_awacs.c81
-rw-r--r--sound/oss/dmasound/dmasound_paula.c14
-rw-r--r--sound/oss/dmasound/dmasound_q40.c18
-rw-r--r--sound/oss/dmasound/tas_common.c6
-rw-r--r--sound/oss/dmasound/trans_16.c1
-rw-r--r--sound/oss/emu10k1/main.c2
-rw-r--r--sound/oss/es1370.c2
-rw-r--r--sound/oss/es1371.c2
-rw-r--r--sound/oss/harmony.c4
-rw-r--r--sound/oss/i810_audio.c22
-rw-r--r--sound/oss/ite8172.c2
-rw-r--r--sound/oss/kahlua.c2
-rw-r--r--sound/oss/maestro.c151
-rw-r--r--sound/oss/nec_vrc5477.c2
-rw-r--r--sound/oss/nm256_audio.c49
-rw-r--r--sound/oss/opl3sa2.c110
-rw-r--r--sound/oss/rme96xx.c2
-rw-r--r--sound/oss/sonicvibes.c2
-rw-r--r--sound/oss/ymfpci.c2
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c2
-rw-r--r--sound/pci/emu10k1/emufx.c1
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c2
-rw-r--r--sound/ppc/keywest.c5
-rw-r--r--sound/ppc/pmac.c100
-rw-r--r--sound/ppc/pmac.h3
3906 files changed, 195823 insertions, 77630 deletions
diff --git a/.gitignore b/.gitignore
index a4b576eb9c0..3f8fb686b59 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,6 +10,7 @@
*.a
*.s
*.ko
+*.so
*.mod.c
#
diff --git a/CREDITS b/CREDITS
index 521f00d1b54..8e577ce4abe 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3203,7 +3203,7 @@ N: Eugene Surovegin
E: ebs@ebshome.net
W: http://kernel.ebshome.net/
P: 1024D/AE5467F1 FF22 39F1 6728 89F6 6E6C 2365 7602 F33D AE54 67F1
-D: Embedded PowerPC 4xx: I2C, PIC and random hacks/fixes
+D: Embedded PowerPC 4xx: EMAC, I2C, PIC and random hacks/fixes
S: Sunnyvale, California 94085
S: USA
diff --git a/Documentation/Changes b/Documentation/Changes
index 86b86399d61..fe5ae0f5502 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -31,8 +31,6 @@ al español de este documento en varios formatos.
Eine deutsche Version dieser Datei finden Sie unter
<http://www.stefan-winter.de/Changes-2.4.0.txt>.
-Last updated: October 29th, 2002
-
Chris Ricker (kaboom@gatech.edu or chris.ricker@genetics.utah.edu).
Current Minimal Requirements
@@ -48,7 +46,7 @@ necessary on all systems; obviously, if you don't have any ISDN
hardware, for example, you probably needn't concern yourself with
isdn4k-utils.
-o Gnu C 2.95.3 # gcc --version
+o Gnu C 3.2 # gcc --version
o Gnu make 3.79.1 # make --version
o binutils 2.12 # ld -v
o util-linux 2.10o # fdformat --version
@@ -74,26 +72,7 @@ GCC
---
The gcc version requirements may vary depending on the type of CPU in your
-computer. The next paragraph applies to users of x86 CPUs, but not
-necessarily to users of other CPUs. Users of other CPUs should obtain
-information about their gcc version requirements from another source.
-
-The recommended compiler for the kernel is gcc 2.95.x (x >= 3), and it
-should be used when you need absolute stability. You may use gcc 3.0.x
-instead if you wish, although it may cause problems. Later versions of gcc
-have not received much testing for Linux kernel compilation, and there are
-almost certainly bugs (mainly, but not exclusively, in the kernel) that
-will need to be fixed in order to use these compilers. In any case, using
-pgcc instead of plain gcc is just asking for trouble.
-
-The Red Hat gcc 2.96 compiler subtree can also be used to build this tree.
-You should ensure you use gcc-2.96-74 or later. gcc-2.96-54 will not build
-the kernel correctly.
-
-In addition, please pay attention to compiler optimization. Anything
-greater than -O2 may not be wise. Similarly, if you choose to use gcc-2.95.x
-or derivatives, be sure not to use -fstrict-aliasing (which, depending on
-your version of gcc 2.95.x, may necessitate using -fno-strict-aliasing).
+computer.
Make
----
@@ -322,9 +301,9 @@ Getting updated software
Kernel compilation
******************
-gcc 2.95.3
-----------
-o <ftp://ftp.gnu.org/gnu/gcc/gcc-2.95.3.tar.gz>
+gcc
+---
+o <ftp://ftp.gnu.org/gnu/gcc/>
Make
----
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index eb7db3c1922..ce5d2c038cf 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -199,7 +199,7 @@ The rationale is:
modifications are prevented
- saves the compiler work to optimize redundant code away ;)
-int fun(int )
+int fun(int a)
{
int result = 0;
char *buffer = kmalloc(SIZE);
@@ -344,7 +344,7 @@ Remember: if another thread can find your data structure, and you don't
have a reference count on it, you almost certainly have a bug.
- Chapter 11: Macros, Enums, Inline functions and RTL
+ Chapter 11: Macros, Enums and RTL
Names of macros defining constants and labels in enums are capitalized.
@@ -429,7 +429,35 @@ from void pointer to any other pointer type is guaranteed by the C programming
language.
- Chapter 14: References
+ Chapter 14: The inline disease
+
+There appears to be a common misperception that gcc has a magic "make me
+faster" speedup option called "inline". While the use of inlines can be
+appropriate (for example as a means of replacing macros, see Chapter 11), it
+very often is not. Abundant use of the inline keyword leads to a much bigger
+kernel, which in turn slows the system as a whole down, due to a bigger
+icache footprint for the CPU and simply because there is less memory
+available for the pagecache. Just think about it; a pagecache miss causes a
+disk seek, which easily takes 5 miliseconds. There are a LOT of cpu cycles
+that can go into these 5 miliseconds.
+
+A reasonable rule of thumb is to not put inline at functions that have more
+than 3 lines of code in them. An exception to this rule are the cases where
+a parameter is known to be a compiletime constant, and as a result of this
+constantness you *know* the compiler will be able to optimize most of your
+function away at compile time. For a good example of this later case, see
+the kmalloc() inline function.
+
+Often people argue that adding inline to functions that are static and used
+only once is always a win since there is no space tradeoff. While this is
+technically correct, gcc is capable of inlining these automatically without
+help, and the maintenance issue of removing the inline when a second user
+appears outweighs the potential value of the hint that tells gcc to do
+something it would have done anyway.
+
+
+
+ Chapter 15: References
The C Programming Language, Second Edition
by Brian W. Kernighan and Dennis M. Ritchie.
@@ -444,10 +472,13 @@ ISBN 0-201-61586-X.
URL: http://cm.bell-labs.com/cm/cs/tpop/
GNU manuals - where in compliance with K&R and this text - for cpp, gcc,
-gcc internals and indent, all available from http://www.gnu.org
+gcc internals and indent, all available from http://www.gnu.org/manual/
WG14 is the international standardization working group for the programming
-language C, URL: http://std.dkuug.dk/JTC1/SC22/WG14/
+language C, URL: http://www.open-std.org/JTC1/SC22/WG14/
+
+Kernel CodingStyle, by greg@kroah.com at OLS 2002:
+http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
--
-Last updated on 16 February 2004 by a community effort on LKML.
+Last updated on 30 December 2005 by a community effort on LKML.
diff --git a/Documentation/DocBook/.gitignore b/Documentation/DocBook/.gitignore
new file mode 100644
index 00000000000..c102c02ecf8
--- /dev/null
+++ b/Documentation/DocBook/.gitignore
@@ -0,0 +1,6 @@
+*.xml
+*.ps
+*.pdf
+*.html
+*.9.gz
+*.9
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 767433bdbc4..8c9c6704e85 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -54,6 +54,11 @@
!Ekernel/sched.c
!Ekernel/timer.c
</sect1>
+ <sect1><title>High-resolution timers</title>
+!Iinclude/linux/ktime.h
+!Iinclude/linux/hrtimer.h
+!Ekernel/hrtimer.c
+ </sect1>
<sect1><title>Internal Functions</title>
!Ikernel/exit.c
!Ikernel/signal.c
@@ -369,6 +374,7 @@ X!Edrivers/acpi/motherboard.c
X!Edrivers/acpi/bus.c
-->
!Edrivers/acpi/scan.c
+!Idrivers/acpi/scan.c
<!-- No correct structured comments
X!Edrivers/acpi/pci_bind.c
-->
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 90dc2de8e0a..158ffe9bfad 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -222,7 +222,7 @@
<title>Two Main Types of Kernel Locks: Spinlocks and Semaphores</title>
<para>
- There are two main types of kernel locks. The fundamental type
+ There are three main types of kernel locks. The fundamental type
is the spinlock
(<filename class="headerfile">include/asm/spinlock.h</filename>),
which is a very simple single-holder lock: if you can't get the
@@ -230,16 +230,22 @@
very small and fast, and can be used anywhere.
</para>
<para>
- The second type is a semaphore
+ The second type is a mutex
+ (<filename class="headerfile">include/linux/mutex.h</filename>): it
+ is like a spinlock, but you may block holding a mutex.
+ If you can't lock a mutex, your task will suspend itself, and be woken
+ up when the mutex is released. This means the CPU can do something
+ else while you are waiting. There are many cases when you simply
+ can't sleep (see <xref linkend="sleeping-things"/>), and so have to
+ use a spinlock instead.
+ </para>
+ <para>
+ The third type is a semaphore
(<filename class="headerfile">include/asm/semaphore.h</filename>): it
can have more than one holder at any time (the number decided at
initialization time), although it is most commonly used as a
- single-holder lock (a mutex). If you can't get a semaphore,
- your task will put itself on the queue, and be woken up when the
- semaphore is released. This means the CPU will do something
- else while you are waiting, but there are many cases when you
- simply can't sleep (see <xref linkend="sleeping-things"/>), and so
- have to use a spinlock instead.
+ single-holder lock (a mutex). If you can't get a semaphore, your
+ task will be suspended and later on woken up - just like for mutexes.
</para>
<para>
Neither type of lock is recursive: see
diff --git a/Documentation/RCU/rcuref.txt b/Documentation/RCU/rcuref.txt
index a23fee66064..3f60db41b2f 100644
--- a/Documentation/RCU/rcuref.txt
+++ b/Documentation/RCU/rcuref.txt
@@ -1,74 +1,67 @@
-Refcounter framework for elements of lists/arrays protected by
-RCU.
+Refcounter design for elements of lists/arrays protected by RCU.
Refcounting on elements of lists which are protected by traditional
reader/writer spinlocks or semaphores are straight forward as in:
-1. 2.
-add() search_and_reference()
-{ {
- alloc_object read_lock(&list_lock);
- ... search_for_element
- atomic_set(&el->rc, 1); atomic_inc(&el->rc);
- write_lock(&list_lock); ...
- add_element read_unlock(&list_lock);
- ... ...
- write_unlock(&list_lock); }
+1. 2.
+add() search_and_reference()
+{ {
+ alloc_object read_lock(&list_lock);
+ ... search_for_element
+ atomic_set(&el->rc, 1); atomic_inc(&el->rc);
+ write_lock(&list_lock); ...
+ add_element read_unlock(&list_lock);
+ ... ...
+ write_unlock(&list_lock); }
}
3. 4.
release_referenced() delete()
{ {
- ... write_lock(&list_lock);
- atomic_dec(&el->rc, relfunc) ...
- ... delete_element
-} write_unlock(&list_lock);
- ...
- if (atomic_dec_and_test(&el->rc))
- kfree(el);
- ...
+ ... write_lock(&list_lock);
+ atomic_dec(&el->rc, relfunc) ...
+ ... delete_element
+} write_unlock(&list_lock);
+ ...
+ if (atomic_dec_and_test(&el->rc))
+ kfree(el);
+ ...
}
If this list/array is made lock free using rcu as in changing the
write_lock in add() and delete() to spin_lock and changing read_lock
-in search_and_reference to rcu_read_lock(), the rcuref_get in
+in search_and_reference to rcu_read_lock(), the atomic_get in
search_and_reference could potentially hold reference to an element which
-has already been deleted from the list/array. rcuref_lf_get_rcu takes
+has already been deleted from the list/array. atomic_inc_not_zero takes
care of this scenario. search_and_reference should look as;
1. 2.
add() search_and_reference()
{ {
- alloc_object rcu_read_lock();
- ... search_for_element
- atomic_set(&el->rc, 1); if (rcuref_inc_lf(&el->rc)) {
- write_lock(&list_lock); rcu_read_unlock();
- return FAIL;
- add_element }
- ... ...
- write_unlock(&list_lock); rcu_read_unlock();
+ alloc_object rcu_read_lock();
+ ... search_for_element
+ atomic_set(&el->rc, 1); if (atomic_inc_not_zero(&el->rc)) {
+ write_lock(&list_lock); rcu_read_unlock();
+ return FAIL;
+ add_element }
+ ... ...
+ write_unlock(&list_lock); rcu_read_unlock();
} }
3. 4.
release_referenced() delete()
{ {
- ... write_lock(&list_lock);
- rcuref_dec(&el->rc, relfunc) ...
- ... delete_element
-} write_unlock(&list_lock);
- ...
- if (rcuref_dec_and_test(&el->rc))
- call_rcu(&el->head, el_free);
- ...
+ ... write_lock(&list_lock);
+ atomic_dec(&el->rc, relfunc) ...
+ ... delete_element
+} write_unlock(&list_lock);
+ ...
+ if (atomic_dec_and_test(&el->rc))
+ call_rcu(&el->head, el_free);
+ ...
}
Sometimes, reference to the element need to be obtained in the
-update (write) stream. In such cases, rcuref_inc_lf might be an overkill
-since the spinlock serialising list updates are held. rcuref_inc
+update (write) stream. In such cases, atomic_inc_not_zero might be an
+overkill since the spinlock serialising list updates are held. atomic_inc
is to be used in such cases.
-For arches which do not have cmpxchg rcuref_inc_lf
-api uses a hashed spinlock implementation and the same hashed spinlock
-is acquired in all rcuref_xxx primitives to preserve atomicity.
-Note: Use rcuref_inc api only if you need to use rcuref_inc_lf on the
-refcounter atleast at one place. Mixing rcuref_inc and atomic_xxx api
-might lead to races. rcuref_inc_lf() must be used in lockfree
-RCU critical sections only.
+
diff --git a/Documentation/SubmittingDrivers b/Documentation/SubmittingDrivers
index c3cca924e94..dd311cff1cc 100644
--- a/Documentation/SubmittingDrivers
+++ b/Documentation/SubmittingDrivers
@@ -27,18 +27,17 @@ Who To Submit Drivers To
------------------------
Linux 2.0:
- No new drivers are accepted for this kernel tree
+ No new drivers are accepted for this kernel tree.
Linux 2.2:
+ No new drivers are accepted for this kernel tree.
+
+Linux 2.4:
If the code area has a general maintainer then please submit it to
the maintainer listed in MAINTAINERS in the kernel file. If the
maintainer does not respond or you cannot find the appropriate
- maintainer then please contact the 2.2 kernel maintainer:
- Marc-Christian Petersen <m.c.p@wolk-project.de>.
-
-Linux 2.4:
- The same rules apply as 2.2. The final contact point for Linux 2.4
- submissions is Marcelo Tosatti <marcelo.tosatti@cyclades.com>.
+ maintainer then please contact Marcelo Tosatti
+ <marcelo.tosatti@cyclades.com>.
Linux 2.6:
The same rules apply as 2.4 except that you should follow linux-kernel
@@ -53,6 +52,7 @@ Licensing: The code must be released to us under the
of exclusive GPL licensing, and if you wish the driver
to be useful to other communities such as BSD you may well
wish to release under multiple licenses.
+ See accepted licenses at include/linux/module.h
Copyright: The copyright owner must agree to use of GPL.
It's best if the submitter and copyright owner
@@ -143,5 +143,13 @@ KernelNewbies:
http://kernelnewbies.org/
Linux USB project:
- http://sourceforge.net/projects/linux-usb/
+ http://linux-usb.sourceforge.net/
+
+How to NOT write kernel driver by arjanv@redhat.com
+ http://people.redhat.com/arjanv/olspaper.pdf
+
+Kernel Janitor:
+ http://janitor.kernelnewbies.org/
+--
+Last updated on 17 Nov 2005.
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 1d47e6c09dc..6198e5ebcf6 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -78,7 +78,9 @@ Randy Dunlap's patch scripts:
http://www.xenotime.net/linux/scripts/patching-scripts-002.tar.gz
Andrew Morton's patch scripts:
-http://www.zip.com.au/~akpm/linux/patches/patch-scripts-0.20
+http://www.zip.com.au/~akpm/linux/patches/
+Instead of these scripts, quilt is the recommended patch management
+tool (see above).
@@ -97,7 +99,7 @@ need to split up your patch. See #3, next.
3) Separate your changes.
-Separate each logical change into its own patch.
+Separate _logical changes_ into a single patch file.
For example, if your changes include both bug fixes and performance
enhancements for a single driver, separate those changes into two
@@ -112,6 +114,10 @@ If one patch depends on another patch in order for a change to be
complete, that is OK. Simply note "this patch depends on patch X"
in your patch description.
+If you cannot condense your patch set into a smaller set of patches,
+then only post say 15 or so at a time and wait for review and integration.
+
+
4) Select e-mail destination.
@@ -124,6 +130,10 @@ your patch to the primary Linux kernel developer's mailing list,
linux-kernel@vger.kernel.org. Most kernel developers monitor this
e-mail list, and can comment on your changes.
+
+Do not send more than 15 patches at once to the vger mailing lists!!!
+
+
Linus Torvalds is the final arbiter of all changes accepted into the
Linux kernel. His e-mail address is <torvalds@osdl.org>. He gets
a lot of e-mail, so typically you should do your best to -avoid- sending
@@ -149,6 +159,9 @@ USB, framebuffer devices, the VFS, the SCSI subsystem, etc. See the
MAINTAINERS file for a mailing list that relates specifically to
your change.
+Majordomo lists of VGER.KERNEL.ORG at:
+ <http://vger.kernel.org/vger-lists.html>
+
If changes affect userland-kernel interfaces, please send
the MAN-PAGES maintainer (as listed in the MAINTAINERS file)
a man-pages patch, or at least a notification of the change,
@@ -373,27 +386,14 @@ a diffstat, to show what files have changed, and the number of inserted
and deleted lines per file. A diffstat is especially useful on bigger
patches. Other comments relevant only to the moment or the maintainer,
not suitable for the permanent changelog, should also go here.
+Use diffstat options "-p 1 -w 70" so that filenames are listed from the
+top of the kernel source tree and don't use too much horizontal space
+(easily fit in 80 columns, maybe with some indentation).
See more details on the proper patch format in the following
references.
-13) More references for submitting patches
-
-Andrew Morton, "The perfect patch" (tpp).
- <http://www.zip.com.au/~akpm/linux/patches/stuff/tpp.txt>
-
-Jeff Garzik, "Linux kernel patch submission format."
- <http://linux.yyz.us/patch-format.html>
-
-Greg KH, "How to piss off a kernel subsystem maintainer"
- <http://www.kroah.com/log/2005/03/31/>
-
-Kernel Documentation/CodingStyle
- <http://sosdg.org/~coywolf/lxr/source/Documentation/CodingStyle>
-
-Linus Torvald's mail on the canonical patch format:
- <http://lkml.org/lkml/2005/4/7/183>
-----------------------------------
@@ -466,3 +466,30 @@ and 'extern __inline__'.
Don't try to anticipate nebulous future cases which may or may not
be useful: "Make it as simple as you can, and no simpler."
+
+
+----------------------
+SECTION 3 - REFERENCES
+----------------------
+
+Andrew Morton, "The perfect patch" (tpp).
+ <http://www.zip.com.au/~akpm/linux/patches/stuff/tpp.txt>
+
+Jeff Garzik, "Linux kernel patch submission format."
+ <http://linux.yyz.us/patch-format.html>
+
+Greg Kroah, "How to piss off a kernel subsystem maintainer".
+ <http://www.kroah.com/log/2005/03/31/>
+ <http://www.kroah.com/log/2005/07/08/>
+ <http://www.kroah.com/log/2005/10/19/>
+
+NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!.
+ <http://marc.theaimsgroup.com/?l=linux-kernel&m=112112749912944&w=2>
+
+Kernel Documentation/CodingStyle
+ <http://sosdg.org/~coywolf/lxr/source/Documentation/CodingStyle>
+
+Linus Torvald's mail on the canonical patch format:
+ <http://lkml.org/lkml/2005/4/7/183>
+--
+Last updated on 17 Nov 2005.
diff --git a/Documentation/applying-patches.txt b/Documentation/applying-patches.txt
index 681e426e248..a083ba35d1a 100644
--- a/Documentation/applying-patches.txt
+++ b/Documentation/applying-patches.txt
@@ -2,8 +2,8 @@
Applying Patches To The Linux Kernel
------------------------------------
- (Written by Jesper Juhl, August 2005)
-
+ Original by: Jesper Juhl, August 2005
+ Last update: 2006-01-05
A frequently asked question on the Linux Kernel Mailing List is how to apply
@@ -76,7 +76,7 @@ instead:
If you wish to uncompress the patch file by hand first before applying it
(what I assume you've done in the examples below), then you simply run
-gunzip or bunzip2 on the file - like this:
+gunzip or bunzip2 on the file -- like this:
gunzip patch-x.y.z.gz
bunzip2 patch-x.y.z.bz2
@@ -94,7 +94,7 @@ Common errors when patching
---
When patch applies a patch file it attempts to verify the sanity of the
file in different ways.
-Checking that the file looks like a valid patch file, checking the code
+Checking that the file looks like a valid patch file & checking the code
around the bits being modified matches the context provided in the patch are
just two of the basic sanity checks patch does.
@@ -118,16 +118,16 @@ wrong.
When patch encounters a change that it can't fix up with fuzz it rejects it
outright and leaves a file with a .rej extension (a reject file). You can
-read this file to see exactely what change couldn't be applied, so you can
+read this file to see exactly what change couldn't be applied, so you can
go fix it up by hand if you wish.
-If you don't have any third party patches applied to your kernel source, but
+If you don't have any third-party patches applied to your kernel source, but
only patches from kernel.org and you apply the patches in the correct order,
and have made no modifications yourself to the source files, then you should
never see a fuzz or reject message from patch. If you do see such messages
anyway, then there's a high risk that either your local source tree or the
patch file is corrupted in some way. In that case you should probably try
-redownloading the patch and if things are still not OK then you'd be advised
+re-downloading the patch and if things are still not OK then you'd be advised
to start with a fresh tree downloaded in full from kernel.org.
Let's look a bit more at some of the messages patch can produce.
@@ -136,7 +136,7 @@ If patch stops and presents a "File to patch:" prompt, then patch could not
find a file to be patched. Most likely you forgot to specify -p1 or you are
in the wrong directory. Less often, you'll find patches that need to be
applied with -p0 instead of -p1 (reading the patch file should reveal if
-this is the case - if so, then this is an error by the person who created
+this is the case -- if so, then this is an error by the person who created
the patch but is not fatal).
If you get "Hunk #2 succeeded at 1887 with fuzz 2 (offset 7 lines)." or a
@@ -167,22 +167,28 @@ the patch will in fact apply it.
A message similar to "patch: **** unexpected end of file in patch" or "patch
unexpectedly ends in middle of line" means that patch could make no sense of
-the file you fed to it. Either your download is broken or you tried to feed
-patch a compressed patch file without uncompressing it first.
+the file you fed to it. Either your download is broken, you tried to feed
+patch a compressed patch file without uncompressing it first, or the patch
+file that you are using has been mangled by a mail client or mail transfer
+agent along the way somewhere, e.g., by splitting a long line into two lines.
+Often these warnings can easily be fixed by joining (concatenating) the
+two lines that had been split.
As I already mentioned above, these errors should never happen if you apply
a patch from kernel.org to the correct version of an unmodified source tree.
So if you get these errors with kernel.org patches then you should probably
-assume that either your patch file or your tree is broken and I'd advice you
+assume that either your patch file or your tree is broken and I'd advise you
to start over with a fresh download of a full kernel tree and the patch you
wish to apply.
Are there any alternatives to `patch'?
---
- Yes there are alternatives. You can use the `interdiff' program
-(http://cyberelk.net/tim/patchutils/) to generate a patch representing the
-differences between two patches and then apply the result.
+ Yes there are alternatives.
+
+ You can use the `interdiff' program (http://cyberelk.net/tim/patchutils/) to
+generate a patch representing the differences between two patches and then
+apply the result.
This will let you move from something like 2.6.12.2 to 2.6.12.3 in a single
step. The -z flag to interdiff will even let you feed it patches in gzip or
bzip2 compressed form directly without the use of zcat or bzcat or manual
@@ -197,10 +203,10 @@ do the additional steps since interdiff can get things wrong in some cases.
Another alternative is `ketchup', which is a python script for automatic
downloading and applying of patches (http://www.selenic.com/ketchup/).
-Other nice tools are diffstat which shows a summary of changes made by a
-patch, lsdiff which displays a short listing of affected files in a patch
-file, along with (optionally) the line numbers of the start of each patch
-and grepdiff which displays a list of the files modified by a patch where
+ Other nice tools are diffstat, which shows a summary of changes made by a
+patch; lsdiff, which displays a short listing of affected files in a patch
+file, along with (optionally) the line numbers of the start of each patch;
+and grepdiff, which displays a list of the files modified by a patch where
the patch contains a given regular expression.
@@ -225,8 +231,8 @@ The -mm kernels live at
In place of ftp.kernel.org you can use ftp.cc.kernel.org, where cc is a
country code. This way you'll be downloading from a mirror site that's most
likely geographically closer to you, resulting in faster downloads for you,
-less bandwidth used globally and less load on the main kernel.org servers -
-these are good things, do use mirrors when possible.
+less bandwidth used globally and less load on the main kernel.org servers --
+these are good things, so do use mirrors when possible.
The 2.6.x kernels
@@ -234,14 +240,14 @@ The 2.6.x kernels
These are the base stable releases released by Linus. The highest numbered
release is the most recent.
-If regressions or other serious flaws are found then a -stable fix patch
+If regressions or other serious flaws are found, then a -stable fix patch
will be released (see below) on top of this base. Once a new 2.6.x base
kernel is released, a patch is made available that is a delta between the
previous 2.6.x kernel and the new one.
-To apply a patch moving from 2.6.11 to 2.6.12 you'd do the following (note
+To apply a patch moving from 2.6.11 to 2.6.12, you'd do the following (note
that such patches do *NOT* apply on top of 2.6.x.y kernels but on top of the
-base 2.6.x kernel - if you need to move from 2.6.x.y to 2.6.x+1 you need to
+base 2.6.x kernel -- if you need to move from 2.6.x.y to 2.6.x+1 you need to
first revert the 2.6.x.y patch).
Here are some examples:
@@ -258,12 +264,12 @@ $ patch -p1 -R < ../patch-2.6.11.1 # revert the 2.6.11.1 patch
# source dir is now 2.6.11
$ patch -p1 < ../patch-2.6.12 # apply new 2.6.12 patch
$ cd ..
-$ mv linux-2.6.11.1 inux-2.6.12 # rename source dir
+$ mv linux-2.6.11.1 linux-2.6.12 # rename source dir
The 2.6.x.y kernels
---
- Kernels with 4 digit versions are -stable kernels. They contain small(ish)
+ Kernels with 4-digit versions are -stable kernels. They contain small(ish)
critical fixes for security problems or significant regressions discovered
in a given 2.6.x kernel.
@@ -274,9 +280,14 @@ versions.
If no 2.6.x.y kernel is available, then the highest numbered 2.6.x kernel is
the current stable kernel.
+ note: the -stable team usually do make incremental patches available as well
+ as patches against the latest mainline release, but I only cover the
+ non-incremental ones below. The incremental ones can be found at
+ ftp://ftp.kernel.org/pub/linux/kernel/v2.6/incr/
+
These patches are not incremental, meaning that for example the 2.6.12.3
patch does not apply on top of the 2.6.12.2 kernel source, but rather on top
-of the base 2.6.12 kernel source.
+of the base 2.6.12 kernel source .
So, in order to apply the 2.6.12.3 patch to your existing 2.6.12.2 kernel
source you have to first back out the 2.6.12.2 patch (so you are left with a
base 2.6.12 kernel source) and then apply the new 2.6.12.3 patch.
@@ -342,12 +353,12 @@ The -git kernels
repository, hence the name).
These patches are usually released daily and represent the current state of
-Linus' tree. They are more experimental than -rc kernels since they are
+Linus's tree. They are more experimental than -rc kernels since they are
generated automatically without even a cursory glance to see if they are
sane.
-git patches are not incremental and apply either to a base 2.6.x kernel or
-a base 2.6.x-rc kernel - you can see which from their name.
+a base 2.6.x-rc kernel -- you can see which from their name.
A patch named 2.6.12-git1 applies to the 2.6.12 kernel source and a patch
named 2.6.13-rc3-git2 applies to the source of the 2.6.13-rc3 kernel.
@@ -390,12 +401,12 @@ You should generally strive to get your patches into mainline via -mm to
ensure maximum testing.
This branch is in constant flux and contains many experimental features, a
-lot of debugging patches not appropriate for mainline etc and is the most
+lot of debugging patches not appropriate for mainline etc., and is the most
experimental of the branches described in this document.
These kernels are not appropriate for use on systems that are supposed to be
stable and they are more risky to run than any of the other branches (make
-sure you have up-to-date backups - that goes for any experimental kernel but
+sure you have up-to-date backups -- that goes for any experimental kernel but
even more so for -mm kernels).
These kernels in addition to all the other experimental patches they contain
@@ -433,7 +444,11 @@ $ cd ..
$ mv linux-2.6.12-mm1 linux-2.6.13-rc3-mm3 # rename the source dir
-This concludes this list of explanations of the various kernel trees and I
-hope you are now crystal clear on how to apply the various patches and help
-testing the kernel.
+This concludes this list of explanations of the various kernel trees.
+I hope you are now clear on how to apply the various patches and help testing
+the kernel.
+
+Thank you's to Randy Dunlap, Rolf Eike Beer, Linus Torvalds, Bodo Eggert,
+Johannes Stezenbach, Grant Coady, Pavel Machek and others that I may have
+forgotten for their reviews and contributions to this document.
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 303c57a7fad..8e63831971d 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -263,14 +263,8 @@ A flag in the bio structure, BIO_BARRIER is used to identify a barrier i/o.
The generic i/o scheduler would make sure that it places the barrier request and
all other requests coming after it after all the previous requests in the
queue. Barriers may be implemented in different ways depending on the
-driver. A SCSI driver for example could make use of ordered tags to
-preserve the necessary ordering with a lower impact on throughput. For IDE
-this might be two sync cache flush: a pre and post flush when encountering
-a barrier write.
-
-There is a provision for queues to indicate what kind of barriers they
-can provide. This is as of yet unmerged, details will be added here once it
-is in the kernel.
+driver. For more details regarding I/O barriers, please read barrier.txt
+in this directory.
1.2.2 Request Priority/Latency
diff --git a/Documentation/block/stat.txt b/Documentation/block/stat.txt
new file mode 100644
index 00000000000..0dbc946de2e
--- /dev/null
+++ b/Documentation/block/stat.txt
@@ -0,0 +1,82 @@
+Block layer statistics in /sys/block/<dev>/stat
+===============================================
+
+This file documents the contents of the /sys/block/<dev>/stat file.
+
+The stat file provides several statistics about the state of block
+device <dev>.
+
+Q. Why are there multiple statistics in a single file? Doesn't sysfs
+ normally contain a single value per file?
+A. By having a single file, the kernel can guarantee that the statistics
+ represent a consistent snapshot of the state of the device. If the
+ statistics were exported as multiple files containing one statistic
+ each, it would be impossible to guarantee that a set of readings
+ represent a single point in time.
+
+The stat file consists of a single line of text containing 11 decimal
+values separated by whitespace. The fields are summarized in the
+following table, and described in more detail below.
+
+Name units description
+---- ----- -----------
+read I/Os requests number of read I/Os processed
+read merges requests number of read I/Os merged with in-queue I/O
+read sectors sectors number of sectors read
+read ticks milliseconds total wait time for read requests
+write I/Os requests number of write I/Os processed
+write merges requests number of write I/Os merged with in-queue I/O
+write sectors sectors number of sectors written
+write ticks milliseconds total wait time for write requests
+in_flight requests number of I/Os currently in flight
+io_ticks milliseconds total time this block device has been active
+time_in_queue milliseconds total wait time for all requests
+
+read I/Os, write I/Os
+=====================
+
+These values increment when an I/O request completes.
+
+read merges, write merges
+=========================
+
+These values increment when an I/O request is merged with an
+already-queued I/O request.
+
+read sectors, write sectors
+===========================
+
+These values count the number of sectors read from or written to this
+block device. The "sectors" in question are the standard UNIX 512-byte
+sectors, not any device- or filesystem-specific block size. The
+counters are incremented when the I/O completes.
+
+read ticks, write ticks
+=======================
+
+These values count the number of milliseconds that I/O requests have
+waited on this block device. If there are multiple I/O requests waiting,
+these values will increase at a rate greater than 1000/second; for
+example, if 60 read requests wait for an average of 30 ms, the read_ticks
+field will increase by 60*30 = 1800.
+
+in_flight
+=========
+
+This value counts the number of I/O requests that have been issued to
+the device driver but have not yet completed. It does not include I/O
+requests that are in the queue but not yet issued to the device driver.
+
+io_ticks
+========
+
+This value counts the number of milliseconds during which the device has
+had I/O requests queued.
+
+time_in_queue
+=============
+
+This value counts the number of milliseconds that I/O requests have waited
+on this block device. If there are multiple I/O requests waiting, this
+value will increase as the product of the number of milliseconds times the
+number of requests waiting (see "read ticks" above for an example).
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt
new file mode 100644
index 00000000000..08c5d04f308
--- /dev/null
+++ b/Documentation/cpu-hotplug.txt
@@ -0,0 +1,357 @@
+ CPU hotplug Support in Linux(tm) Kernel
+
+ Maintainers:
+ CPU Hotplug Core:
+ Rusty Russell <rusty@rustycorp.com.au>
+ Srivatsa Vaddagiri <vatsa@in.ibm.com>
+ i386:
+ Zwane Mwaikambo <zwane@arm.linux.org.uk>
+ ppc64:
+ Nathan Lynch <nathanl@austin.ibm.com>
+ Joel Schopp <jschopp@austin.ibm.com>
+ ia64/x86_64:
+ Ashok Raj <ashok.raj@intel.com>
+
+Authors: Ashok Raj <ashok.raj@intel.com>
+Lots of feedback: Nathan Lynch <nathanl@austin.ibm.com>,
+ Joel Schopp <jschopp@austin.ibm.com>
+
+Introduction
+
+Modern advances in system architectures have introduced advanced error
+reporting and correction capabilities in processors. CPU architectures permit
+partitioning support, where compute resources of a single CPU could be made
+available to virtual machine environments. There are couple OEMS that
+support NUMA hardware which are hot pluggable as well, where physical
+node insertion and removal require support for CPU hotplug.
+
+Such advances require CPUs available to a kernel to be removed either for
+provisioning reasons, or for RAS purposes to keep an offending CPU off
+system execution path. Hence the need for CPU hotplug support in the
+Linux kernel.
+
+A more novel use of CPU-hotplug support is its use today in suspend
+resume support for SMP. Dual-core and HT support makes even
+a laptop run SMP kernels which didn't support these methods. SMP support
+for suspend/resume is a work in progress.
+
+General Stuff about CPU Hotplug
+--------------------------------
+
+Command Line Switches
+---------------------
+maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using
+ maxcpus=2 will only boot 2. You can choose to bring the
+ other cpus later online, read FAQ's for more info.
+
+additional_cpus=n [x86_64 only] use this to limit hotpluggable cpus.
+ This option sets
+ cpu_possible_map = cpu_present_map + additional_cpus
+
+CPU maps and such
+-----------------
+[More on cpumaps and primitive to manipulate, please check
+include/linux/cpumask.h that has more descriptive text.]
+
+cpu_possible_map: Bitmap of possible CPUs that can ever be available in the
+system. This is used to allocate some boot time memory for per_cpu variables
+that aren't designed to grow/shrink as CPUs are made available or removed.
+Once set during boot time discovery phase, the map is static, i.e no bits
+are added or removed anytime. Trimming it accurately for your system needs
+upfront can save some boot time memory. See below for how we use heuristics
+in x86_64 case to keep this under check.
+
+cpu_online_map: Bitmap of all CPUs currently online. Its set in __cpu_up()
+after a cpu is available for kernel scheduling and ready to receive
+interrupts from devices. Its cleared when a cpu is brought down using
+__cpu_disable(), before which all OS services including interrupts are
+migrated to another target CPU.
+
+cpu_present_map: Bitmap of CPUs currently present in the system. Not all
+of them may be online. When physical hotplug is processed by the relevant
+subsystem (e.g ACPI) can change and new bit either be added or removed
+from the map depending on the event is hot-add/hot-remove. There are currently
+no locking rules as of now. Typical usage is to init topology during boot,
+at which time hotplug is disabled.
+
+You really dont need to manipulate any of the system cpu maps. They should
+be read-only for most use. When setting up per-cpu resources almost always use
+cpu_possible_map/for_each_cpu() to iterate.
+
+Never use anything other than cpumask_t to represent bitmap of CPUs.
+
+#include <linux/cpumask.h>
+
+for_each_cpu - Iterate over cpu_possible_map
+for_each_online_cpu - Iterate over cpu_online_map
+for_each_present_cpu - Iterate over cpu_present_map
+for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask.
+
+#include <linux/cpu.h>
+lock_cpu_hotplug() and unlock_cpu_hotplug():
+
+The above calls are used to inhibit cpu hotplug operations. While holding the
+cpucontrol mutex, cpu_online_map will not change. If you merely need to avoid
+cpus going away, you could also use preempt_disable() and preempt_enable()
+for those sections. Just remember the critical section cannot call any
+function that can sleep or schedule this process away. The preempt_disable()
+will work as long as stop_machine_run() is used to take a cpu down.
+
+CPU Hotplug - Frequently Asked Questions.
+
+Q: How to i enable my kernel to support CPU hotplug?
+A: When doing make defconfig, Enable CPU hotplug support
+
+ "Processor type and Features" -> Support for Hotpluggable CPUs
+
+Make sure that you have CONFIG_HOTPLUG, and CONFIG_SMP turned on as well.
+
+You would need to enable CONFIG_HOTPLUG_CPU for SMP suspend/resume support
+as well.
+
+Q: What architectures support CPU hotplug?
+A: As of 2.6.14, the following architectures support CPU hotplug.
+
+i386 (Intel), ppc, ppc64, parisc, s390, ia64 and x86_64
+
+Q: How to test if hotplug is supported on the newly built kernel?
+A: You should now notice an entry in sysfs.
+
+Check if sysfs is mounted, using the "mount" command. You should notice
+an entry as shown below in the output.
+
+....
+none on /sys type sysfs (rw)
+....
+
+if this is not mounted, do the following.
+
+#mkdir /sysfs
+#mount -t sysfs sys /sys
+
+now you should see entries for all present cpu, the following is an example
+in a 8-way system.
+
+#pwd
+#/sys/devices/system/cpu
+#ls -l
+total 0
+drwxr-xr-x 10 root root 0 Sep 19 07:44 .
+drwxr-xr-x 13 root root 0 Sep 19 07:45 ..
+drwxr-xr-x 3 root root 0 Sep 19 07:44 cpu0
+drwxr-xr-x 3 root root 0 Sep 19 07:44 cpu1
+drwxr-xr-x 3 root root 0 Sep 19 07:44 cpu2
+drwxr-xr-x 3 root root 0 Sep 19 07:44 cpu3
+drwxr-xr-x 3 root root 0 Sep 19 07:44 cpu4
+drwxr-xr-x 3 root root 0 Sep 19 07:44 cpu5
+drwxr-xr-x 3 root root 0 Sep 19 07:44 cpu6
+drwxr-xr-x 3 root root 0 Sep 19 07:48 cpu7
+
+Under each directory you would find an "online" file which is the control
+file to logically online/offline a processor.
+
+Q: Does hot-add/hot-remove refer to physical add/remove of cpus?
+A: The usage of hot-add/remove may not be very consistently used in the code.
+CONFIG_CPU_HOTPLUG enables logical online/offline capability in the kernel.
+To support physical addition/removal, one would need some BIOS hooks and
+the platform should have something like an attention button in PCI hotplug.
+CONFIG_ACPI_HOTPLUG_CPU enables ACPI support for physical add/remove of CPUs.
+
+Q: How do i logically offline a CPU?
+A: Do the following.
+
+#echo 0 > /sys/devices/system/cpu/cpuX/online
+
+once the logical offline is successful, check
+
+#cat /proc/interrupts
+
+you should now not see the CPU that you removed. Also online file will report
+the state as 0 when a cpu if offline and 1 when its online.
+
+#To display the current cpu state.
+#cat /sys/devices/system/cpu/cpuX/online
+
+Q: Why cant i remove CPU0 on some systems?
+A: Some architectures may have some special dependency on a certain CPU.
+
+For e.g in IA64 platforms we have ability to sent platform interrupts to the
+OS. a.k.a Corrected Platform Error Interrupts (CPEI). In current ACPI
+specifications, we didn't have a way to change the target CPU. Hence if the
+current ACPI version doesn't support such re-direction, we disable that CPU
+by making it not-removable.
+
+In such cases you will also notice that the online file is missing under cpu0.
+
+Q: How do i find out if a particular CPU is not removable?
+A: Depending on the implementation, some architectures may show this by the
+absence of the "online" file. This is done if it can be determined ahead of
+time that this CPU cannot be removed.
+
+In some situations, this can be a run time check, i.e if you try to remove the
+last CPU, this will not be permitted. You can find such failures by
+investigating the return value of the "echo" command.
+
+Q: What happens when a CPU is being logically offlined?
+A: The following happen, listed in no particular order :-)
+
+- A notification is sent to in-kernel registered modules by sending an event
+ CPU_DOWN_PREPARE
+- All process is migrated away from this outgoing CPU to a new CPU
+- All interrupts targeted to this CPU is migrated to a new CPU
+- timers/bottom half/task lets are also migrated to a new CPU
+- Once all services are migrated, kernel calls an arch specific routine
+ __cpu_disable() to perform arch specific cleanup.
+- Once this is successful, an event for successful cleanup is sent by an event
+ CPU_DEAD.
+
+ "It is expected that each service cleans up when the CPU_DOWN_PREPARE
+ notifier is called, when CPU_DEAD is called its expected there is nothing
+ running on behalf of this CPU that was offlined"
+
+Q: If i have some kernel code that needs to be aware of CPU arrival and
+ departure, how to i arrange for proper notification?
+A: This is what you would need in your kernel code to receive notifications.
+
+ #include <linux/cpu.h>
+ static int __cpuinit foobar_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+ {
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ foobar_online_action(cpu);
+ break;
+ case CPU_DEAD:
+ foobar_dead_action(cpu);
+ break;
+ }
+ return NOTIFY_OK;
+ }
+
+ static struct notifier_block foobar_cpu_notifer =
+ {
+ .notifier_call = foobar_cpu_callback,
+ };
+
+
+In your init function,
+
+ register_cpu_notifier(&foobar_cpu_notifier);
+
+You can fail PREPARE notifiers if something doesn't work to prepare resources.
+This will stop the activity and send a following CANCELED event back.
+
+CPU_DEAD should not be failed, its just a goodness indication, but bad
+things will happen if a notifier in path sent a BAD notify code.
+
+Q: I don't see my action being called for all CPUs already up and running?
+A: Yes, CPU notifiers are called only when new CPUs are on-lined or offlined.
+ If you need to perform some action for each cpu already in the system, then
+
+ for_each_online_cpu(i) {
+ foobar_cpu_callback(&foobar_cpu_notifier, CPU_UP_PREPARE, i);
+ foobar_cpu_callback(&foobar-cpu_notifier, CPU_ONLINE, i);
+ }
+
+Q: If i would like to develop cpu hotplug support for a new architecture,
+ what do i need at a minimum?
+A: The following are what is required for CPU hotplug infrastructure to work
+ correctly.
+
+ - Make sure you have an entry in Kconfig to enable CONFIG_HOTPLUG_CPU
+ - __cpu_up() - Arch interface to bring up a CPU
+ - __cpu_disable() - Arch interface to shutdown a CPU, no more interrupts
+ can be handled by the kernel after the routine
+ returns. Including local APIC timers etc are
+ shutdown.
+ - __cpu_die() - This actually supposed to ensure death of the CPU.
+ Actually look at some example code in other arch
+ that implement CPU hotplug. The processor is taken
+ down from the idle() loop for that specific
+ architecture. __cpu_die() typically waits for some
+ per_cpu state to be set, to ensure the processor
+ dead routine is called to be sure positively.
+
+Q: I need to ensure that a particular cpu is not removed when there is some
+ work specific to this cpu is in progress.
+A: First switch the current thread context to preferred cpu
+
+ int my_func_on_cpu(int cpu)
+ {
+ cpumask_t saved_mask, new_mask = CPU_MASK_NONE;
+ int curr_cpu, err = 0;
+
+ saved_mask = current->cpus_allowed;
+ cpu_set(cpu, new_mask);
+ err = set_cpus_allowed(current, new_mask);
+
+ if (err)
+ return err;
+
+ /*
+ * If we got scheduled out just after the return from
+ * set_cpus_allowed() before running the work, this ensures
+ * we stay locked.
+ */
+ curr_cpu = get_cpu();
+
+ if (curr_cpu != cpu) {
+ err = -EAGAIN;
+ goto ret;
+ } else {
+ /*
+ * Do work : But cant sleep, since get_cpu() disables preempt
+ */
+ }
+ ret:
+ put_cpu();
+ set_cpus_allowed(current, saved_mask);
+ return err;
+ }
+
+
+Q: How do we determine how many CPUs are available for hotplug.
+A: There is no clear spec defined way from ACPI that can give us that
+ information today. Based on some input from Natalie of Unisys,
+ that the ACPI MADT (Multiple APIC Description Tables) marks those possible
+ CPUs in a system with disabled status.
+
+ Andi implemented some simple heuristics that count the number of disabled
+ CPUs in MADT as hotpluggable CPUS. In the case there are no disabled CPUS
+ we assume 1/2 the number of CPUs currently present can be hotplugged.
+
+ Caveat: Today's ACPI MADT can only provide 256 entries since the apicid field
+ in MADT is only 8 bits.
+
+User Space Notification
+
+Hotplug support for devices is common in Linux today. Its being used today to
+support automatic configuration of network, usb and pci devices. A hotplug
+event can be used to invoke an agent script to perform the configuration task.
+
+You can add /etc/hotplug/cpu.agent to handle hotplug notification user space
+scripts.
+
+ #!/bin/bash
+ # $Id: cpu.agent
+ # Kernel hotplug params include:
+ #ACTION=%s [online or offline]
+ #DEVPATH=%s
+ #
+ cd /etc/hotplug
+ . ./hotplug.functions
+
+ case $ACTION in
+ online)
+ echo `date` ":cpu.agent" add cpu >> /tmp/hotplug.txt
+ ;;
+ offline)
+ echo `date` ":cpu.agent" remove cpu >>/tmp/hotplug.txt
+ ;;
+ *)
+ debug_mesg CPU $ACTION event not supported
+ exit 1
+ ;;
+ esac
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt
index a09a8eb8066..990998ee10b 100644
--- a/Documentation/cpusets.txt
+++ b/Documentation/cpusets.txt
@@ -14,7 +14,10 @@ CONTENTS:
1.1 What are cpusets ?
1.2 Why are cpusets needed ?
1.3 How are cpusets implemented ?
- 1.4 How do I use cpusets ?
+ 1.4 What are exclusive cpusets ?
+ 1.5 What does notify_on_release do ?
+ 1.6 What is memory_pressure ?
+ 1.7 How do I use cpusets ?
2. Usage Examples and Syntax
2.1 Basic Usage
2.2 Adding/removing cpus
@@ -49,29 +52,6 @@ its cpus_allowed vector, and the kernel page allocator will not
allocate a page on a node that is not allowed in the requesting tasks
mems_allowed vector.
-If a cpuset is cpu or mem exclusive, no other cpuset, other than a direct
-ancestor or descendent, may share any of the same CPUs or Memory Nodes.
-A cpuset that is cpu exclusive has a sched domain associated with it.
-The sched domain consists of all cpus in the current cpuset that are not
-part of any exclusive child cpusets.
-This ensures that the scheduler load balacing code only balances
-against the cpus that are in the sched domain as defined above and not
-all of the cpus in the system. This removes any overhead due to
-load balancing code trying to pull tasks outside of the cpu exclusive
-cpuset only to be prevented by the tasks' cpus_allowed mask.
-
-A cpuset that is mem_exclusive restricts kernel allocations for
-page, buffer and other data commonly shared by the kernel across
-multiple users. All cpusets, whether mem_exclusive or not, restrict
-allocations of memory for user space. This enables configuring a
-system so that several independent jobs can share common kernel
-data, such as file system pages, while isolating each jobs user
-allocation in its own cpuset. To do this, construct a large
-mem_exclusive cpuset to hold all the jobs, and construct child,
-non-mem_exclusive cpusets for each individual job. Only a small
-amount of typical kernel memory, such as requests from interrupt
-handlers, is allowed to be taken outside even a mem_exclusive cpuset.
-
User level code may create and destroy cpusets by name in the cpuset
virtual file system, manage the attributes and permissions of these
cpusets and which CPUs and Memory Nodes are assigned to each cpuset,
@@ -155,7 +135,7 @@ Cpusets extends these two mechanisms as follows:
The implementation of cpusets requires a few, simple hooks
into the rest of the kernel, none in performance critical paths:
- - in main/init.c, to initialize the root cpuset at system boot.
+ - in init/main.c, to initialize the root cpuset at system boot.
- in fork and exit, to attach and detach a task from its cpuset.
- in sched_setaffinity, to mask the requested CPUs by what's
allowed in that tasks cpuset.
@@ -166,7 +146,7 @@ into the rest of the kernel, none in performance critical paths:
and related changes in both sched.c and arch/ia64/kernel/domain.c
- in the mbind and set_mempolicy system calls, to mask the requested
Memory Nodes by what's allowed in that tasks cpuset.
- - in page_alloc, to restrict memory to allowed nodes.
+ - in page_alloc.c, to restrict memory to allowed nodes.
- in vmscan.c, to restrict page recovery to the current cpuset.
In addition a new file system, of type "cpuset" may be mounted,
@@ -192,9 +172,15 @@ containing the following files describing that cpuset:
- cpus: list of CPUs in that cpuset
- mems: list of Memory Nodes in that cpuset
+ - memory_migrate flag: if set, move pages to cpusets nodes
- cpu_exclusive flag: is cpu placement exclusive?
- mem_exclusive flag: is memory placement exclusive?
- tasks: list of tasks (by pid) attached to that cpuset
+ - notify_on_release flag: run /sbin/cpuset_release_agent on exit?
+ - memory_pressure: measure of how much paging pressure in cpuset
+
+In addition, the root cpuset only has the following file:
+ - memory_pressure_enabled flag: compute memory_pressure?
New cpusets are created using the mkdir system call or shell
command. The properties of a cpuset, such as its flags, allowed
@@ -228,7 +214,108 @@ exclusive cpuset. Also, the use of a Linux virtual file system (vfs)
to represent the cpuset hierarchy provides for a familiar permission
and name space for cpusets, with a minimum of additional kernel code.
-1.4 How do I use cpusets ?
+
+1.4 What are exclusive cpusets ?
+--------------------------------
+
+If a cpuset is cpu or mem exclusive, no other cpuset, other than
+a direct ancestor or descendent, may share any of the same CPUs or
+Memory Nodes.
+
+A cpuset that is cpu_exclusive has a scheduler (sched) domain
+associated with it. The sched domain consists of all CPUs in the
+current cpuset that are not part of any exclusive child cpusets.
+This ensures that the scheduler load balancing code only balances
+against the CPUs that are in the sched domain as defined above and
+not all of the CPUs in the system. This removes any overhead due to
+load balancing code trying to pull tasks outside of the cpu_exclusive
+cpuset only to be prevented by the tasks' cpus_allowed mask.
+
+A cpuset that is mem_exclusive restricts kernel allocations for
+page, buffer and other data commonly shared by the kernel across
+multiple users. All cpusets, whether mem_exclusive or not, restrict
+allocations of memory for user space. This enables configuring a
+system so that several independent jobs can share common kernel data,
+such as file system pages, while isolating each jobs user allocation in
+its own cpuset. To do this, construct a large mem_exclusive cpuset to
+hold all the jobs, and construct child, non-mem_exclusive cpusets for
+each individual job. Only a small amount of typical kernel memory,
+such as requests from interrupt handlers, is allowed to be taken
+outside even a mem_exclusive cpuset.
+
+
+1.5 What does notify_on_release do ?
+------------------------------------
+
+If the notify_on_release flag is enabled (1) in a cpuset, then whenever
+the last task in the cpuset leaves (exits or attaches to some other
+cpuset) and the last child cpuset of that cpuset is removed, then
+the kernel runs the command /sbin/cpuset_release_agent, supplying the
+pathname (relative to the mount point of the cpuset file system) of the
+abandoned cpuset. This enables automatic removal of abandoned cpusets.
+The default value of notify_on_release in the root cpuset at system
+boot is disabled (0). The default value of other cpusets at creation
+is the current value of their parents notify_on_release setting.
+
+
+1.6 What is memory_pressure ?
+-----------------------------
+The memory_pressure of a cpuset provides a simple per-cpuset metric
+of the rate that the tasks in a cpuset are attempting to free up in
+use memory on the nodes of the cpuset to satisfy additional memory
+requests.
+
+This enables batch managers monitoring jobs running in dedicated
+cpusets to efficiently detect what level of memory pressure that job
+is causing.
+
+This is useful both on tightly managed systems running a wide mix of
+submitted jobs, which may choose to terminate or re-prioritize jobs that
+are trying to use more memory than allowed on the nodes assigned them,
+and with tightly coupled, long running, massively parallel scientific
+computing jobs that will dramatically fail to meet required performance
+goals if they start to use more memory than allowed to them.
+
+This mechanism provides a very economical way for the batch manager
+to monitor a cpuset for signs of memory pressure. It's up to the
+batch manager or other user code to decide what to do about it and
+take action.
+
+==> Unless this feature is enabled by writing "1" to the special file
+ /dev/cpuset/memory_pressure_enabled, the hook in the rebalance
+ code of __alloc_pages() for this metric reduces to simply noticing
+ that the cpuset_memory_pressure_enabled flag is zero. So only
+ systems that enable this feature will compute the metric.
+
+Why a per-cpuset, running average:
+
+ Because this meter is per-cpuset, rather than per-task or mm,
+ the system load imposed by a batch scheduler monitoring this
+ metric is sharply reduced on large systems, because a scan of
+ the tasklist can be avoided on each set of queries.
+
+ Because this meter is a running average, instead of an accumulating
+ counter, a batch scheduler can detect memory pressure with a
+ single read, instead of having to read and accumulate results
+ for a period of time.
+
+ Because this meter is per-cpuset rather than per-task or mm,
+ the batch scheduler can obtain the key information, memory
+ pressure in a cpuset, with a single read, rather than having to
+ query and accumulate results over all the (dynamically changing)
+ set of tasks in the cpuset.
+
+A per-cpuset simple digital filter (requires a spinlock and 3 words
+of data per-cpuset) is kept, and updated by any task attached to that
+cpuset, if it enters the synchronous (direct) page reclaim code.
+
+A per-cpuset file provides an integer number representing the recent
+(half-life of 10 seconds) rate of direct page reclaims caused by
+the tasks in the cpuset, in units of reclaims attempted per second,
+times 1000.
+
+
+1.7 How do I use cpusets ?
--------------------------
In order to minimize the impact of cpusets on critical kernel
@@ -277,6 +364,30 @@ rewritten to the 'tasks' file of its cpuset. This is done to avoid
impacting the scheduler code in the kernel with a check for changes
in a tasks processor placement.
+Normally, once a page is allocated (given a physical page
+of main memory) then that page stays on whatever node it
+was allocated, so long as it remains allocated, even if the
+cpusets memory placement policy 'mems' subsequently changes.
+If the cpuset flag file 'memory_migrate' is set true, then when
+tasks are attached to that cpuset, any pages that task had
+allocated to it on nodes in its previous cpuset are migrated
+to the tasks new cpuset. Depending on the implementation,
+this migration may either be done by swapping the page out,
+so that the next time the page is referenced, it will be paged
+into the tasks new cpuset, usually on the node where it was
+referenced, or this migration may be done by directly copying
+the pages from the tasks previous cpuset to the new cpuset,
+where possible to the same node, relative to the new cpuset,
+as the node that held the page, relative to the old cpuset.
+Also if 'memory_migrate' is set true, then if that cpusets
+'mems' file is modified, pages allocated to tasks in that
+cpuset, that were on nodes in the previous setting of 'mems',
+will be moved to nodes in the new setting of 'mems.' Again,
+depending on the implementation, this might be done by swapping,
+or by direct copying. In either case, pages that were not in
+the tasks prior cpuset, or in the cpusets prior 'mems' setting,
+will not be moved.
+
There is an exception to the above. If hotplug functionality is used
to remove all the CPUs that are currently assigned to a cpuset,
then the kernel will automatically update the cpus_allowed of all
diff --git a/Documentation/dvb/avermedia.txt b/Documentation/dvb/avermedia.txt
index 2dc260b2b0a..068070ff13c 100644
--- a/Documentation/dvb/avermedia.txt
+++ b/Documentation/dvb/avermedia.txt
@@ -150,7 +150,8 @@ Getting the card going
The frontend module sp887x.o, requires an external firmware.
Please use the command "get_dvb_firmware sp887x" to download
- it. Then copy it to /usr/lib/hotplug/firmware.
+ it. Then copy it to /usr/lib/hotplug/firmware or /lib/firmware/
+ (depending on configuration of firmware hotplug).
Receiving DVB-T in Australia
diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
index be6eb4c7599..75c28a17409 100644
--- a/Documentation/dvb/get_dvb_firmware
+++ b/Documentation/dvb/get_dvb_firmware
@@ -23,7 +23,7 @@ use IO::Handle;
@components = ( "sp8870", "sp887x", "tda10045", "tda10046", "av7110", "dec2000t",
"dec2540t", "dec3000s", "vp7041", "dibusb", "nxt2002", "nxt2004",
- "or51211", "or51132_qam", "or51132_vsb");
+ "or51211", "or51132_qam", "or51132_vsb", "bluebird");
# Check args
syntax() if (scalar(@ARGV) != 1);
@@ -34,7 +34,11 @@ for ($i=0; $i < scalar(@components); $i++) {
if ($cid eq $components[$i]) {
$outfile = eval($cid);
die $@ if $@;
- print STDERR "Firmware $outfile extracted successfully. Now copy it to either /lib/firmware or /usr/lib/hotplug/firmware/ (depending on your hotplug version).\n";
+ print STDERR <<EOF;
+Firmware $outfile extracted successfully.
+Now copy it to either /usr/lib/hotplug/firmware or /lib/firmware
+(depending on configuration of firmware hotplug).
+EOF
exit(0);
}
}
@@ -243,7 +247,7 @@ sub nxt2002 {
my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
checkstandard();
-
+
wgetfile($sourcefile, $url);
unzip($sourcefile, $tmpdir);
verify("$tmpdir/SkyNETU.sys", $hash);
@@ -308,6 +312,19 @@ sub or51132_vsb {
$fwfile;
}
+sub bluebird {
+ my $url = "http://www.linuxtv.org/download/dvb/firmware/dvb-usb-bluebird-01.fw";
+ my $outfile = "dvb-usb-bluebird-01.fw";
+ my $hash = "658397cb9eba9101af9031302671f49d";
+
+ checkstandard();
+
+ wgetfile($outfile, $url);
+ verify($outfile,$hash);
+
+ $outfile;
+}
+
# ---------------------------------------------------------------
# Utilities
diff --git a/Documentation/dvb/ttusb-dec.txt b/Documentation/dvb/ttusb-dec.txt
index 5c1e984c26a..b2f271cd784 100644
--- a/Documentation/dvb/ttusb-dec.txt
+++ b/Documentation/dvb/ttusb-dec.txt
@@ -41,4 +41,5 @@ Hotplug Firmware Loading for 2.6 kernels
For 2.6 kernels the firmware is loaded at the point that the driver module is
loaded. See linux/Documentation/dvb/firmware.txt for more information.
-Copy the three files downloaded above into the /usr/lib/hotplug/firmware directory.
+Copy the three files downloaded above into the /usr/lib/hotplug/firmware or
+/lib/firmware directory (depending on configuration of firmware hotplug).
diff --git a/Documentation/fb/cyblafb/bugs b/Documentation/fb/cyblafb/bugs
index f90cc66ea91..9443a6d72cd 100644
--- a/Documentation/fb/cyblafb/bugs
+++ b/Documentation/fb/cyblafb/bugs
@@ -11,4 +11,3 @@ Untested features
All LCD stuff is untested. If it worked in tridentfb, it should work in
cyblafb. Please test and report the results to Knut_Petersen@t-online.de.
-
diff --git a/Documentation/fb/cyblafb/fb.modes b/Documentation/fb/cyblafb/fb.modes
index cf4351fc32f..fe0e5223ba8 100644
--- a/Documentation/fb/cyblafb/fb.modes
+++ b/Documentation/fb/cyblafb/fb.modes
@@ -14,142 +14,141 @@
#
mode "640x480-50"
- geometry 640 480 640 3756 8
+ geometry 640 480 2048 4096 8
timings 47619 4294967256 24 17 0 216 3
endmode
mode "640x480-60"
- geometry 640 480 640 3756 8
+ geometry 640 480 2048 4096 8
timings 39682 4294967256 24 17 0 216 3
endmode
mode "640x480-70"
- geometry 640 480 640 3756 8
+ geometry 640 480 2048 4096 8
timings 34013 4294967256 24 17 0 216 3
endmode
mode "640x480-72"
- geometry 640 480 640 3756 8
+ geometry 640 480 2048 4096 8
timings 33068 4294967256 24 17 0 216 3
endmode
mode "640x480-75"
- geometry 640 480 640 3756 8
+ geometry 640 480 2048 4096 8
timings 31746 4294967256 24 17 0 216 3
endmode
mode "640x480-80"
- geometry 640 480 640 3756 8
+ geometry 640 480 2048 4096 8
timings 29761 4294967256 24 17 0 216 3
endmode
mode "640x480-85"
- geometry 640 480 640 3756 8
+ geometry 640 480 2048 4096 8
timings 28011 4294967256 24 17 0 216 3
endmode
mode "800x600-50"
- geometry 800 600 800 3221 8
+ geometry 800 600 2048 4096 8
timings 30303 96 24 14 0 136 11
endmode
mode "800x600-60"
- geometry 800 600 800 3221 8
+ geometry 800 600 2048 4096 8
timings 25252 96 24 14 0 136 11
endmode
mode "800x600-70"
- geometry 800 600 800 3221 8
+ geometry 800 600 2048 4096 8
timings 21645 96 24 14 0 136 11
endmode
mode "800x600-72"
- geometry 800 600 800 3221 8
+ geometry 800 600 2048 4096 8
timings 21043 96 24 14 0 136 11
endmode
mode "800x600-75"
- geometry 800 600 800 3221 8
+ geometry 800 600 2048 4096 8
timings 20202 96 24 14 0 136 11
endmode
mode "800x600-80"
- geometry 800 600 800 3221 8
+ geometry 800 600 2048 4096 8
timings 18939 96 24 14 0 136 11
endmode
mode "800x600-85"
- geometry 800 600 800 3221 8
+ geometry 800 600 2048 4096 8
timings 17825 96 24 14 0 136 11
endmode
mode "1024x768-50"
- geometry 1024 768 1024 2815 8
+ geometry 1024 768 2048 4096 8
timings 19054 144 24 29 0 120 3
endmode
mode "1024x768-60"
- geometry 1024 768 1024 2815 8
+ geometry 1024 768 2048 4096 8
timings 15880 144 24 29 0 120 3
endmode
mode "1024x768-70"
- geometry 1024 768 1024 2815 8
+ geometry 1024 768 2048 4096 8
timings 13610 144 24 29 0 120 3
endmode
mode "1024x768-72"
- geometry 1024 768 1024 2815 8
+ geometry 1024 768 2048 4096 8
timings 13232 144 24 29 0 120 3
endmode
mode "1024x768-75"
- geometry 1024 768 1024 2815 8
+ geometry 1024 768 2048 4096 8
timings 12703 144 24 29 0 120 3
endmode
mode "1024x768-80"
- geometry 1024 768 1024 2815 8
+ geometry 1024 768 2048 4096 8
timings 11910 144 24 29 0 120 3
endmode
mode "1024x768-85"
- geometry 1024 768 1024 2815 8
+ geometry 1024 768 2048 4096 8
timings 11209 144 24 29 0 120 3
endmode
mode "1280x1024-50"
- geometry 1280 1024 1280 2662 8
+ geometry 1280 1024 2048 4096 8
timings 11114 232 16 39 0 160 3
endmode
mode "1280x1024-60"
- geometry 1280 1024 1280 2662 8
+ geometry 1280 1024 2048 4096 8
timings 9262 232 16 39 0 160 3
endmode
mode "1280x1024-70"
- geometry 1280 1024 1280 2662 8
+ geometry 1280 1024 2048 4096 8
timings 7939 232 16 39 0 160 3
endmode
mode "1280x1024-72"
- geometry 1280 1024 1280 2662 8
+ geometry 1280 1024 2048 4096 8
timings 7719 232 16 39 0 160 3
endmode
mode "1280x1024-75"
- geometry 1280 1024 1280 2662 8
+ geometry 1280 1024 2048 4096 8
timings 7410 232 16 39 0 160 3
endmode
mode "1280x1024-80"
- geometry 1280 1024 1280 2662 8
+ geometry 1280 1024 2048 4096 8
timings 6946 232 16 39 0 160 3
endmode
mode "1280x1024-85"
- geometry 1280 1024 1280 2662 8
+ geometry 1280 1024 2048 4096 8
timings 6538 232 16 39 0 160 3
endmode
-
diff --git a/Documentation/fb/cyblafb/performance b/Documentation/fb/cyblafb/performance
index eb4e47a9cea..8d15d5dfc6b 100644
--- a/Documentation/fb/cyblafb/performance
+++ b/Documentation/fb/cyblafb/performance
@@ -77,4 +77,3 @@ patch that speeds up kernel bitblitting a lot ( > 20%).
| | | | |
| | | | |
+-----------+-----------------+-----------------+-----------------+
-
diff --git a/Documentation/fb/cyblafb/todo b/Documentation/fb/cyblafb/todo
index 80fb2f89b6c..c5f6d0eae54 100644
--- a/Documentation/fb/cyblafb/todo
+++ b/Documentation/fb/cyblafb/todo
@@ -22,11 +22,10 @@ accelerated color blitting Who needs it? The console driver does use color
everything else is done using color expanding
blitting of 1bpp character bitmaps.
-xpanning Who needs it?
-
ioctls Who needs it?
-TV-out Will be done later
+TV-out Will be done later. Use "vga= " at boot time
+ to set a suitable video mode.
??? Feel free to contact me if you have any
feature requests
diff --git a/Documentation/fb/cyblafb/usage b/Documentation/fb/cyblafb/usage
index e627c8f5421..a39bb3d402a 100644
--- a/Documentation/fb/cyblafb/usage
+++ b/Documentation/fb/cyblafb/usage
@@ -40,6 +40,16 @@ Selecting Modes
None of the modes possible to select as startup modes are affected by
the problems described at the end of the next subsection.
+ For all startup modes cyblafb chooses a virtual x resolution of 2048,
+ the only exception is mode 1280x1024 in combination with 32 bpp. This
+ allows ywrap scrolling for all those modes if rotation is 0 or 2, and
+ also fast scrolling if rotation is 1 or 3. The default virtual y reso-
+ lution is 4096 for bpp == 8, 2048 for bpp==16 and 1024 for bpp == 32,
+ again with the only exception of 1280x1024 at 32 bpp.
+
+ Please do set your video memory size to 8 Mb in the Bios setup. Other
+ values will work, but performace is decreased for a lot of modes.
+
Mode changes using fbset
========================
@@ -54,20 +64,26 @@ Selecting Modes
- if a flat panel is found, cyblafb does not allow you
to program a resolution higher than the physical
resolution of the flat panel monitor
- - cyblafb does not allow xres to differ from xres_virtual
- cyblafb does not allow vclk to exceed 230 MHz. As 32 bpp
and (currently) 24 bit modes use a doubled vclk internally,
the dotclock limit as seen by fbset is 115 MHz for those
modes and 230 MHz for 8 and 16 bpp modes.
+ - cyblafb will allow you to select very high resolutions as
+ long as the hardware can be programmed to these modes. The
+ documented limit 1600x1200 is not enforced, but don't expect
+ perfect signal quality.
- Any request that violates the rules given above will be ignored and
- fbset will return an error.
+ Any request that violates the rules given above will be either changed
+ to something the hardware supports or an error value will be returned.
If you program a virtual y resolution higher than the hardware limit,
cyblafb will silently decrease that value to the highest possible
- value.
+ value. The same is true for a virtual x resolution that is not
+ supported by the hardware. Cyblafb tries to adapt vyres first because
+ vxres decides if ywrap scrolling is possible or not.
- Attempts to disable acceleration are ignored.
+ Attempts to disable acceleration are ignored, I believe that this is
+ safe.
Some video modes that should work do not work as expected. If you use
the standard fb.modes, fbset 640x480-60 will program that mode, but
@@ -129,10 +145,6 @@ mode 640x480 or 800x600 or 1024x768 or 1280x1024
verbosity 0 is the default, increase to at least 2 for every
bug report!
-vesafb allows cyblafb to be loaded after vesafb has been
- loaded. See sections "Module unloading ...".
-
-
Development hints
=================
@@ -195,7 +207,7 @@ a graphics mode.
After booting, load cyblafb without any mode and bpp parameter and assign
cyblafb to individual ttys using con2fb, e.g.:
- modprobe cyblafb vesafb=1
+ modprobe cyblafb
con2fb /dev/fb1 /dev/tty1
Unloading cyblafb works without problems after you assign vesafb to all
@@ -203,4 +215,3 @@ ttys again, e.g.:
con2fb /dev/fb0 /dev/tty1
rmmod cyblafb
-
diff --git a/Documentation/fb/cyblafb/whatsnew b/Documentation/fb/cyblafb/whatsnew
new file mode 100644
index 00000000000..76c07a26e04
--- /dev/null
+++ b/Documentation/fb/cyblafb/whatsnew
@@ -0,0 +1,29 @@
+0.62
+====
+
+ - the vesafb parameter has been removed as I decided to allow the
+ feature without any special parameter.
+
+ - Cyblafb does not use the vga style of panning any longer, now the
+ "right view" register in the graphics engine IO space is used. Without
+ that change it was impossible to use all available memory, and without
+ access to all available memory it is impossible to ywrap.
+
+ - The imageblit function now uses hardware acceleration for all font
+ widths. Hardware blitting across pixel column 2048 is broken in the
+ cyberblade/i1 graphics core, but we work around that hardware bug.
+
+ - modes with vxres != xres are supported now.
+
+ - ywrap scrolling is supported now and the default. This is a big
+ performance gain.
+
+ - default video modes use vyres > yres and vxres > xres to allow
+ almost optimal scrolling speed for normal and rotated screens
+
+ - some features mainly usefull for debugging the upper layers of the
+ framebuffer system have been added, have a look at the code
+
+ - fixed: Oops after unloading cyblafb when reading /proc/io*
+
+ - we work around some bugs of the higher framebuffer layers.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 9b743198f77..9474501dd6c 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -47,17 +47,6 @@ Who: Paul E. McKenney <paulmck@us.ibm.com>
---------------------------
-What: IEEE1394 Audio and Music Data Transmission Protocol driver,
- Connection Management Procedures driver
-When: November 2005
-Files: drivers/ieee1394/{amdtp,cmp}*
-Why: These are incomplete, have never worked, and are better implemented
- in userland via raw1394 (see http://freebob.sourceforge.net/ for
- example.)
-Who: Jody McIntyre <scjody@steamballoon.com>
-
----------------------------
-
What: raw1394: requests of type RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN
When: November 2005
Why: Deprecated in favour of the new ioctl-based rawiso interface, which is
@@ -82,15 +71,6 @@ Who: Mauro Carvalho Chehab <mchehab@brturbo.com.br>
---------------------------
-What: i2c sysfs name change: in1_ref, vid deprecated in favour of cpu0_vid
-When: November 2005
-Files: drivers/i2c/chips/adm1025.c, drivers/i2c/chips/adm1026.c
-Why: Match the other drivers' name for the same function, duplicate names
- will be available until removal of old names.
-Who: Grant Coady <gcoady@gmail.com>
-
----------------------------
-
What: remove EXPORT_SYMBOL(panic_timeout)
When: April 2006
Files: kernel/panic.c
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 7e17712f322..74052d22d86 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -12,10 +12,14 @@ cifs.txt
- description of the CIFS filesystem
coda.txt
- description of the CODA filesystem.
+configfs/
+ - directory containing configfs documentation and example code.
cramfs.txt
- info on the cram filesystem for small storage (ROMs etc)
devfs/
- directory containing devfs documentation.
+dlmfs.txt
+ - info on the userspace interface to the OCFS2 DLM.
ext2.txt
- info, mount options and specifications for the Ext2 filesystem.
hpfs.txt
@@ -30,6 +34,8 @@ ntfs.txt
- info and mount options for the NTFS filesystem (Windows NT).
proc.txt
- info on Linux's /proc filesystem.
+ocfs2.txt
+ - info and mount options for the OCFS2 clustered filesystem.
romfs.txt
- Description of the ROMFS filesystem.
smbfs.txt
diff --git a/Documentation/filesystems/configfs/configfs.txt b/Documentation/filesystems/configfs/configfs.txt
new file mode 100644
index 00000000000..c4ff96b7c4e
--- /dev/null
+++ b/Documentation/filesystems/configfs/configfs.txt
@@ -0,0 +1,434 @@
+
+configfs - Userspace-driven kernel object configuation.
+
+Joel Becker <joel.becker@oracle.com>
+
+Updated: 31 March 2005
+
+Copyright (c) 2005 Oracle Corporation,
+ Joel Becker <joel.becker@oracle.com>
+
+
+[What is configfs?]
+
+configfs is a ram-based filesystem that provides the converse of
+sysfs's functionality. Where sysfs is a filesystem-based view of
+kernel objects, configfs is a filesystem-based manager of kernel
+objects, or config_items.
+
+With sysfs, an object is created in kernel (for example, when a device
+is discovered) and it is registered with sysfs. Its attributes then
+appear in sysfs, allowing userspace to read the attributes via
+readdir(3)/read(2). It may allow some attributes to be modified via
+write(2). The important point is that the object is created and
+destroyed in kernel, the kernel controls the lifecycle of the sysfs
+representation, and sysfs is merely a window on all this.
+
+A configfs config_item is created via an explicit userspace operation:
+mkdir(2). It is destroyed via rmdir(2). The attributes appear at
+mkdir(2) time, and can be read or modified via read(2) and write(2).
+As with sysfs, readdir(3) queries the list of items and/or attributes.
+symlink(2) can be used to group items together. Unlike sysfs, the
+lifetime of the representation is completely driven by userspace. The
+kernel modules backing the items must respond to this.
+
+Both sysfs and configfs can and should exist together on the same
+system. One is not a replacement for the other.
+
+[Using configfs]
+
+configfs can be compiled as a module or into the kernel. You can access
+it by doing
+
+ mount -t configfs none /config
+
+The configfs tree will be empty unless client modules are also loaded.
+These are modules that register their item types with configfs as
+subsystems. Once a client subsystem is loaded, it will appear as a
+subdirectory (or more than one) under /config. Like sysfs, the
+configfs tree is always there, whether mounted on /config or not.
+
+An item is created via mkdir(2). The item's attributes will also
+appear at this time. readdir(3) can determine what the attributes are,
+read(2) can query their default values, and write(2) can store new
+values. Like sysfs, attributes should be ASCII text files, preferably
+with only one value per file. The same efficiency caveats from sysfs
+apply. Don't mix more than one attribute in one attribute file.
+
+Like sysfs, configfs expects write(2) to store the entire buffer at
+once. When writing to configfs attributes, userspace processes should
+first read the entire file, modify the portions they wish to change, and
+then write the entire buffer back. Attribute files have a maximum size
+of one page (PAGE_SIZE, 4096 on i386).
+
+When an item needs to be destroyed, remove it with rmdir(2). An
+item cannot be destroyed if any other item has a link to it (via
+symlink(2)). Links can be removed via unlink(2).
+
+[Configuring FakeNBD: an Example]
+
+Imagine there's a Network Block Device (NBD) driver that allows you to
+access remote block devices. Call it FakeNBD. FakeNBD uses configfs
+for its configuration. Obviously, there will be a nice program that
+sysadmins use to configure FakeNBD, but somehow that program has to tell
+the driver about it. Here's where configfs comes in.
+
+When the FakeNBD driver is loaded, it registers itself with configfs.
+readdir(3) sees this just fine:
+
+ # ls /config
+ fakenbd
+
+A fakenbd connection can be created with mkdir(2). The name is
+arbitrary, but likely the tool will make some use of the name. Perhaps
+it is a uuid or a disk name:
+
+ # mkdir /config/fakenbd/disk1
+ # ls /config/fakenbd/disk1
+ target device rw
+
+The target attribute contains the IP address of the server FakeNBD will
+connect to. The device attribute is the device on the server.
+Predictably, the rw attribute determines whether the connection is
+read-only or read-write.
+
+ # echo 10.0.0.1 > /config/fakenbd/disk1/target
+ # echo /dev/sda1 > /config/fakenbd/disk1/device
+ # echo 1 > /config/fakenbd/disk1/rw
+
+That's it. That's all there is. Now the device is configured, via the
+shell no less.
+
+[Coding With configfs]
+
+Every object in configfs is a config_item. A config_item reflects an
+object in the subsystem. It has attributes that match values on that
+object. configfs handles the filesystem representation of that object
+and its attributes, allowing the subsystem to ignore all but the
+basic show/store interaction.
+
+Items are created and destroyed inside a config_group. A group is a
+collection of items that share the same attributes and operations.
+Items are created by mkdir(2) and removed by rmdir(2), but configfs
+handles that. The group has a set of operations to perform these tasks
+
+A subsystem is the top level of a client module. During initialization,
+the client module registers the subsystem with configfs, the subsystem
+appears as a directory at the top of the configfs filesystem. A
+subsystem is also a config_group, and can do everything a config_group
+can.
+
+[struct config_item]
+
+ struct config_item {
+ char *ci_name;
+ char ci_namebuf[UOBJ_NAME_LEN];
+ struct kref ci_kref;
+ struct list_head ci_entry;
+ struct config_item *ci_parent;
+ struct config_group *ci_group;
+ struct config_item_type *ci_type;
+ struct dentry *ci_dentry;
+ };
+
+ void config_item_init(struct config_item *);
+ void config_item_init_type_name(struct config_item *,
+ const char *name,
+ struct config_item_type *type);
+ struct config_item *config_item_get(struct config_item *);
+ void config_item_put(struct config_item *);
+
+Generally, struct config_item is embedded in a container structure, a
+structure that actually represents what the subsystem is doing. The
+config_item portion of that structure is how the object interacts with
+configfs.
+
+Whether statically defined in a source file or created by a parent
+config_group, a config_item must have one of the _init() functions
+called on it. This initializes the reference count and sets up the
+appropriate fields.
+
+All users of a config_item should have a reference on it via
+config_item_get(), and drop the reference when they are done via
+config_item_put().
+
+By itself, a config_item cannot do much more than appear in configfs.
+Usually a subsystem wants the item to display and/or store attributes,
+among other things. For that, it needs a type.
+
+[struct config_item_type]
+
+ struct configfs_item_operations {
+ void (*release)(struct config_item *);
+ ssize_t (*show_attribute)(struct config_item *,
+ struct configfs_attribute *,
+ char *);
+ ssize_t (*store_attribute)(struct config_item *,
+ struct configfs_attribute *,
+ const char *, size_t);
+ int (*allow_link)(struct config_item *src,
+ struct config_item *target);
+ int (*drop_link)(struct config_item *src,
+ struct config_item *target);
+ };
+
+ struct config_item_type {
+ struct module *ct_owner;
+ struct configfs_item_operations *ct_item_ops;
+ struct configfs_group_operations *ct_group_ops;
+ struct configfs_attribute **ct_attrs;
+ };
+
+The most basic function of a config_item_type is to define what
+operations can be performed on a config_item. All items that have been
+allocated dynamically will need to provide the ct_item_ops->release()
+method. This method is called when the config_item's reference count
+reaches zero. Items that wish to display an attribute need to provide
+the ct_item_ops->show_attribute() method. Similarly, storing a new
+attribute value uses the store_attribute() method.
+
+[struct configfs_attribute]
+
+ struct configfs_attribute {
+ char *ca_name;
+ struct module *ca_owner;
+ mode_t ca_mode;
+ };
+
+When a config_item wants an attribute to appear as a file in the item's
+configfs directory, it must define a configfs_attribute describing it.
+It then adds the attribute to the NULL-terminated array
+config_item_type->ct_attrs. When the item appears in configfs, the
+attribute file will appear with the configfs_attribute->ca_name
+filename. configfs_attribute->ca_mode specifies the file permissions.
+
+If an attribute is readable and the config_item provides a
+ct_item_ops->show_attribute() method, that method will be called
+whenever userspace asks for a read(2) on the attribute. The converse
+will happen for write(2).
+
+[struct config_group]
+
+A config_item cannot live in a vaccum. The only way one can be created
+is via mkdir(2) on a config_group. This will trigger creation of a
+child item.
+
+ struct config_group {
+ struct config_item cg_item;
+ struct list_head cg_children;
+ struct configfs_subsystem *cg_subsys;
+ struct config_group **default_groups;
+ };
+
+ void config_group_init(struct config_group *group);
+ void config_group_init_type_name(struct config_group *group,
+ const char *name,
+ struct config_item_type *type);
+
+
+The config_group structure contains a config_item. Properly configuring
+that item means that a group can behave as an item in its own right.
+However, it can do more: it can create child items or groups. This is
+accomplished via the group operations specified on the group's
+config_item_type.
+
+ struct configfs_group_operations {
+ struct config_item *(*make_item)(struct config_group *group,
+ const char *name);
+ struct config_group *(*make_group)(struct config_group *group,
+ const char *name);
+ int (*commit_item)(struct config_item *item);
+ void (*drop_item)(struct config_group *group,
+ struct config_item *item);
+ };
+
+A group creates child items by providing the
+ct_group_ops->make_item() method. If provided, this method is called from mkdir(2) in the group's directory. The subsystem allocates a new
+config_item (or more likely, its container structure), initializes it,
+and returns it to configfs. Configfs will then populate the filesystem
+tree to reflect the new item.
+
+If the subsystem wants the child to be a group itself, the subsystem
+provides ct_group_ops->make_group(). Everything else behaves the same,
+using the group _init() functions on the group.
+
+Finally, when userspace calls rmdir(2) on the item or group,
+ct_group_ops->drop_item() is called. As a config_group is also a
+config_item, it is not necessary for a seperate drop_group() method.
+The subsystem must config_item_put() the reference that was initialized
+upon item allocation. If a subsystem has no work to do, it may omit
+the ct_group_ops->drop_item() method, and configfs will call
+config_item_put() on the item on behalf of the subsystem.
+
+IMPORTANT: drop_item() is void, and as such cannot fail. When rmdir(2)
+is called, configfs WILL remove the item from the filesystem tree
+(assuming that it has no children to keep it busy). The subsystem is
+responsible for responding to this. If the subsystem has references to
+the item in other threads, the memory is safe. It may take some time
+for the item to actually disappear from the subsystem's usage. But it
+is gone from configfs.
+
+A config_group cannot be removed while it still has child items. This
+is implemented in the configfs rmdir(2) code. ->drop_item() will not be
+called, as the item has not been dropped. rmdir(2) will fail, as the
+directory is not empty.
+
+[struct configfs_subsystem]
+
+A subsystem must register itself, ususally at module_init time. This
+tells configfs to make the subsystem appear in the file tree.
+
+ struct configfs_subsystem {
+ struct config_group su_group;
+ struct semaphore su_sem;
+ };
+
+ int configfs_register_subsystem(struct configfs_subsystem *subsys);
+ void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
+
+ A subsystem consists of a toplevel config_group and a semaphore.
+The group is where child config_items are created. For a subsystem,
+this group is usually defined statically. Before calling
+configfs_register_subsystem(), the subsystem must have initialized the
+group via the usual group _init() functions, and it must also have
+initialized the semaphore.
+ When the register call returns, the subsystem is live, and it
+will be visible via configfs. At that point, mkdir(2) can be called and
+the subsystem must be ready for it.
+
+[An Example]
+
+The best example of these basic concepts is the simple_children
+subsystem/group and the simple_child item in configfs_example.c It
+shows a trivial object displaying and storing an attribute, and a simple
+group creating and destroying these children.
+
+[Hierarchy Navigation and the Subsystem Semaphore]
+
+There is an extra bonus that configfs provides. The config_groups and
+config_items are arranged in a hierarchy due to the fact that they
+appear in a filesystem. A subsystem is NEVER to touch the filesystem
+parts, but the subsystem might be interested in this hierarchy. For
+this reason, the hierarchy is mirrored via the config_group->cg_children
+and config_item->ci_parent structure members.
+
+A subsystem can navigate the cg_children list and the ci_parent pointer
+to see the tree created by the subsystem. This can race with configfs'
+management of the hierarchy, so configfs uses the subsystem semaphore to
+protect modifications. Whenever a subsystem wants to navigate the
+hierarchy, it must do so under the protection of the subsystem
+semaphore.
+
+A subsystem will be prevented from acquiring the semaphore while a newly
+allocated item has not been linked into this hierarchy. Similarly, it
+will not be able to acquire the semaphore while a dropping item has not
+yet been unlinked. This means that an item's ci_parent pointer will
+never be NULL while the item is in configfs, and that an item will only
+be in its parent's cg_children list for the same duration. This allows
+a subsystem to trust ci_parent and cg_children while they hold the
+semaphore.
+
+[Item Aggregation Via symlink(2)]
+
+configfs provides a simple group via the group->item parent/child
+relationship. Often, however, a larger environment requires aggregation
+outside of the parent/child connection. This is implemented via
+symlink(2).
+
+A config_item may provide the ct_item_ops->allow_link() and
+ct_item_ops->drop_link() methods. If the ->allow_link() method exists,
+symlink(2) may be called with the config_item as the source of the link.
+These links are only allowed between configfs config_items. Any
+symlink(2) attempt outside the configfs filesystem will be denied.
+
+When symlink(2) is called, the source config_item's ->allow_link()
+method is called with itself and a target item. If the source item
+allows linking to target item, it returns 0. A source item may wish to
+reject a link if it only wants links to a certain type of object (say,
+in its own subsystem).
+
+When unlink(2) is called on the symbolic link, the source item is
+notified via the ->drop_link() method. Like the ->drop_item() method,
+this is a void function and cannot return failure. The subsystem is
+responsible for responding to the change.
+
+A config_item cannot be removed while it links to any other item, nor
+can it be removed while an item links to it. Dangling symlinks are not
+allowed in configfs.
+
+[Automatically Created Subgroups]
+
+A new config_group may want to have two types of child config_items.
+While this could be codified by magic names in ->make_item(), it is much
+more explicit to have a method whereby userspace sees this divergence.
+
+Rather than have a group where some items behave differently than
+others, configfs provides a method whereby one or many subgroups are
+automatically created inside the parent at its creation. Thus,
+mkdir("parent) results in "parent", "parent/subgroup1", up through
+"parent/subgroupN". Items of type 1 can now be created in
+"parent/subgroup1", and items of type N can be created in
+"parent/subgroupN".
+
+These automatic subgroups, or default groups, do not preclude other
+children of the parent group. If ct_group_ops->make_group() exists,
+other child groups can be created on the parent group directly.
+
+A configfs subsystem specifies default groups by filling in the
+NULL-terminated array default_groups on the config_group structure.
+Each group in that array is populated in the configfs tree at the same
+time as the parent group. Similarly, they are removed at the same time
+as the parent. No extra notification is provided. When a ->drop_item()
+method call notifies the subsystem the parent group is going away, it
+also means every default group child associated with that parent group.
+
+As a consequence of this, default_groups cannot be removed directly via
+rmdir(2). They also are not considered when rmdir(2) on the parent
+group is checking for children.
+
+[Committable Items]
+
+NOTE: Committable items are currently unimplemented.
+
+Some config_items cannot have a valid initial state. That is, no
+default values can be specified for the item's attributes such that the
+item can do its work. Userspace must configure one or more attributes,
+after which the subsystem can start whatever entity this item
+represents.
+
+Consider the FakeNBD device from above. Without a target address *and*
+a target device, the subsystem has no idea what block device to import.
+The simple example assumes that the subsystem merely waits until all the
+appropriate attributes are configured, and then connects. This will,
+indeed, work, but now every attribute store must check if the attributes
+are initialized. Every attribute store must fire off the connection if
+that condition is met.
+
+Far better would be an explicit action notifying the subsystem that the
+config_item is ready to go. More importantly, an explicit action allows
+the subsystem to provide feedback as to whether the attibutes are
+initialized in a way that makes sense. configfs provides this as
+committable items.
+
+configfs still uses only normal filesystem operations. An item is
+committed via rename(2). The item is moved from a directory where it
+can be modified to a directory where it cannot.
+
+Any group that provides the ct_group_ops->commit_item() method has
+committable items. When this group appears in configfs, mkdir(2) will
+not work directly in the group. Instead, the group will have two
+subdirectories: "live" and "pending". The "live" directory does not
+support mkdir(2) or rmdir(2) either. It only allows rename(2). The
+"pending" directory does allow mkdir(2) and rmdir(2). An item is
+created in the "pending" directory. Its attributes can be modified at
+will. Userspace commits the item by renaming it into the "live"
+directory. At this point, the subsystem recieves the ->commit_item()
+callback. If all required attributes are filled to satisfaction, the
+method returns zero and the item is moved to the "live" directory.
+
+As rmdir(2) does not work in the "live" directory, an item must be
+shutdown, or "uncommitted". Again, this is done via rename(2), this
+time from the "live" directory back to the "pending" one. The subsystem
+is notified by the ct_group_ops->uncommit_object() method.
+
+
diff --git a/Documentation/filesystems/configfs/configfs_example.c b/Documentation/filesystems/configfs/configfs_example.c
new file mode 100644
index 00000000000..f3c6e4946f9
--- /dev/null
+++ b/Documentation/filesystems/configfs/configfs_example.c
@@ -0,0 +1,474 @@
+/*
+ * vim: noexpandtab ts=8 sts=0 sw=8:
+ *
+ * configfs_example.c - This file is a demonstration module containing
+ * a number of configfs subsystems.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/configfs.h>
+
+
+
+/*
+ * 01-childless
+ *
+ * This first example is a childless subsystem. It cannot create
+ * any config_items. It just has attributes.
+ *
+ * Note that we are enclosing the configfs_subsystem inside a container.
+ * This is not necessary if a subsystem has no attributes directly
+ * on the subsystem. See the next example, 02-simple-children, for
+ * such a subsystem.
+ */
+
+struct childless {
+ struct configfs_subsystem subsys;
+ int showme;
+ int storeme;
+};
+
+struct childless_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct childless *, char *);
+ ssize_t (*store)(struct childless *, const char *, size_t);
+};
+
+static inline struct childless *to_childless(struct config_item *item)
+{
+ return item ? container_of(to_configfs_subsystem(to_config_group(item)), struct childless, subsys) : NULL;
+}
+
+static ssize_t childless_showme_read(struct childless *childless,
+ char *page)
+{
+ ssize_t pos;
+
+ pos = sprintf(page, "%d\n", childless->showme);
+ childless->showme++;
+
+ return pos;
+}
+
+static ssize_t childless_storeme_read(struct childless *childless,
+ char *page)
+{
+ return sprintf(page, "%d\n", childless->storeme);
+}
+
+static ssize_t childless_storeme_write(struct childless *childless,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ char *p = (char *) page;
+
+ tmp = simple_strtoul(p, &p, 10);
+ if (!p || (*p && (*p != '\n')))
+ return -EINVAL;
+
+ if (tmp > INT_MAX)
+ return -ERANGE;
+
+ childless->storeme = tmp;
+
+ return count;
+}
+
+static ssize_t childless_description_read(struct childless *childless,
+ char *page)
+{
+ return sprintf(page,
+"[01-childless]\n"
+"\n"
+"The childless subsystem is the simplest possible subsystem in\n"
+"configfs. It does not support the creation of child config_items.\n"
+"It only has a few attributes. In fact, it isn't much different\n"
+"than a directory in /proc.\n");
+}
+
+static struct childless_attribute childless_attr_showme = {
+ .attr = { .ca_owner = THIS_MODULE, .ca_name = "showme", .ca_mode = S_IRUGO },
+ .show = childless_showme_read,
+};
+static struct childless_attribute childless_attr_storeme = {
+ .attr = { .ca_owner = THIS_MODULE, .ca_name = "storeme", .ca_mode = S_IRUGO | S_IWUSR },
+ .show = childless_storeme_read,
+ .store = childless_storeme_write,
+};
+static struct childless_attribute childless_attr_description = {
+ .attr = { .ca_owner = THIS_MODULE, .ca_name = "description", .ca_mode = S_IRUGO },
+ .show = childless_description_read,
+};
+
+static struct configfs_attribute *childless_attrs[] = {
+ &childless_attr_showme.attr,
+ &childless_attr_storeme.attr,
+ &childless_attr_description.attr,
+ NULL,
+};
+
+static ssize_t childless_attr_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct childless *childless = to_childless(item);
+ struct childless_attribute *childless_attr =
+ container_of(attr, struct childless_attribute, attr);
+ ssize_t ret = 0;
+
+ if (childless_attr->show)
+ ret = childless_attr->show(childless, page);
+ return ret;
+}
+
+static ssize_t childless_attr_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct childless *childless = to_childless(item);
+ struct childless_attribute *childless_attr =
+ container_of(attr, struct childless_attribute, attr);
+ ssize_t ret = -EINVAL;
+
+ if (childless_attr->store)
+ ret = childless_attr->store(childless, page, count);
+ return ret;
+}
+
+static struct configfs_item_operations childless_item_ops = {
+ .show_attribute = childless_attr_show,
+ .store_attribute = childless_attr_store,
+};
+
+static struct config_item_type childless_type = {
+ .ct_item_ops = &childless_item_ops,
+ .ct_attrs = childless_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct childless childless_subsys = {
+ .subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "01-childless",
+ .ci_type = &childless_type,
+ },
+ },
+ },
+};
+
+
+/* ----------------------------------------------------------------- */
+
+/*
+ * 02-simple-children
+ *
+ * This example merely has a simple one-attribute child. Note that
+ * there is no extra attribute structure, as the child's attribute is
+ * known from the get-go. Also, there is no container for the
+ * subsystem, as it has no attributes of its own.
+ */
+
+struct simple_child {
+ struct config_item item;
+ int storeme;
+};
+
+static inline struct simple_child *to_simple_child(struct config_item *item)
+{
+ return item ? container_of(item, struct simple_child, item) : NULL;
+}
+
+static struct configfs_attribute simple_child_attr_storeme = {
+ .ca_owner = THIS_MODULE,
+ .ca_name = "storeme",
+ .ca_mode = S_IRUGO | S_IWUSR,
+};
+
+static struct configfs_attribute *simple_child_attrs[] = {
+ &simple_child_attr_storeme,
+ NULL,
+};
+
+static ssize_t simple_child_attr_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ ssize_t count;
+ struct simple_child *simple_child = to_simple_child(item);
+
+ count = sprintf(page, "%d\n", simple_child->storeme);
+
+ return count;
+}
+
+static ssize_t simple_child_attr_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct simple_child *simple_child = to_simple_child(item);
+ unsigned long tmp;
+ char *p = (char *) page;
+
+ tmp = simple_strtoul(p, &p, 10);
+ if (!p || (*p && (*p != '\n')))
+ return -EINVAL;
+
+ if (tmp > INT_MAX)
+ return -ERANGE;
+
+ simple_child->storeme = tmp;
+
+ return count;
+}
+
+static void simple_child_release(struct config_item *item)
+{
+ kfree(to_simple_child(item));
+}
+
+static struct configfs_item_operations simple_child_item_ops = {
+ .release = simple_child_release,
+ .show_attribute = simple_child_attr_show,
+ .store_attribute = simple_child_attr_store,
+};
+
+static struct config_item_type simple_child_type = {
+ .ct_item_ops = &simple_child_item_ops,
+ .ct_attrs = simple_child_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+
+static struct config_item *simple_children_make_item(struct config_group *group, const char *name)
+{
+ struct simple_child *simple_child;
+
+ simple_child = kmalloc(sizeof(struct simple_child), GFP_KERNEL);
+ if (!simple_child)
+ return NULL;
+
+ memset(simple_child, 0, sizeof(struct simple_child));
+
+ config_item_init_type_name(&simple_child->item, name,
+ &simple_child_type);
+
+ simple_child->storeme = 0;
+
+ return &simple_child->item;
+}
+
+static struct configfs_attribute simple_children_attr_description = {
+ .ca_owner = THIS_MODULE,
+ .ca_name = "description",
+ .ca_mode = S_IRUGO,
+};
+
+static struct configfs_attribute *simple_children_attrs[] = {
+ &simple_children_attr_description,
+ NULL,
+};
+
+static ssize_t simple_children_attr_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ return sprintf(page,
+"[02-simple-children]\n"
+"\n"
+"This subsystem allows the creation of child config_items. These\n"
+"items have only one attribute that is readable and writeable.\n");
+}
+
+static struct configfs_item_operations simple_children_item_ops = {
+ .show_attribute = simple_children_attr_show,
+};
+
+/*
+ * Note that, since no extra work is required on ->drop_item(),
+ * no ->drop_item() is provided.
+ */
+static struct configfs_group_operations simple_children_group_ops = {
+ .make_item = simple_children_make_item,
+};
+
+static struct config_item_type simple_children_type = {
+ .ct_item_ops = &simple_children_item_ops,
+ .ct_group_ops = &simple_children_group_ops,
+ .ct_attrs = simple_children_attrs,
+};
+
+static struct configfs_subsystem simple_children_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "02-simple-children",
+ .ci_type = &simple_children_type,
+ },
+ },
+};
+
+
+/* ----------------------------------------------------------------- */
+
+/*
+ * 03-group-children
+ *
+ * This example reuses the simple_children group from above. However,
+ * the simple_children group is not the subsystem itself, it is a
+ * child of the subsystem. Creation of a group in the subsystem creates
+ * a new simple_children group. That group can then have simple_child
+ * children of its own.
+ */
+
+struct simple_children {
+ struct config_group group;
+};
+
+static struct config_group *group_children_make_group(struct config_group *group, const char *name)
+{
+ struct simple_children *simple_children;
+
+ simple_children = kmalloc(sizeof(struct simple_children),
+ GFP_KERNEL);
+ if (!simple_children)
+ return NULL;
+
+ memset(simple_children, 0, sizeof(struct simple_children));
+
+ config_group_init_type_name(&simple_children->group, name,
+ &simple_children_type);
+
+ return &simple_children->group;
+}
+
+static struct configfs_attribute group_children_attr_description = {
+ .ca_owner = THIS_MODULE,
+ .ca_name = "description",
+ .ca_mode = S_IRUGO,
+};
+
+static struct configfs_attribute *group_children_attrs[] = {
+ &group_children_attr_description,
+ NULL,
+};
+
+static ssize_t group_children_attr_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ return sprintf(page,
+"[03-group-children]\n"
+"\n"
+"This subsystem allows the creation of child config_groups. These\n"
+"groups are like the subsystem simple-children.\n");
+}
+
+static struct configfs_item_operations group_children_item_ops = {
+ .show_attribute = group_children_attr_show,
+};
+
+/*
+ * Note that, since no extra work is required on ->drop_item(),
+ * no ->drop_item() is provided.
+ */
+static struct configfs_group_operations group_children_group_ops = {
+ .make_group = group_children_make_group,
+};
+
+static struct config_item_type group_children_type = {
+ .ct_item_ops = &group_children_item_ops,
+ .ct_group_ops = &group_children_group_ops,
+ .ct_attrs = group_children_attrs,
+};
+
+static struct configfs_subsystem group_children_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "03-group-children",
+ .ci_type = &group_children_type,
+ },
+ },
+};
+
+/* ----------------------------------------------------------------- */
+
+/*
+ * We're now done with our subsystem definitions.
+ * For convenience in this module, here's a list of them all. It
+ * allows the init function to easily register them. Most modules
+ * will only have one subsystem, and will only call register_subsystem
+ * on it directly.
+ */
+static struct configfs_subsystem *example_subsys[] = {
+ &childless_subsys.subsys,
+ &simple_children_subsys,
+ &group_children_subsys,
+ NULL,
+};
+
+static int __init configfs_example_init(void)
+{
+ int ret;
+ int i;
+ struct configfs_subsystem *subsys;
+
+ for (i = 0; example_subsys[i]; i++) {
+ subsys = example_subsys[i];
+
+ config_group_init(&subsys->su_group);
+ init_MUTEX(&subsys->su_sem);
+ ret = configfs_register_subsystem(subsys);
+ if (ret) {
+ printk(KERN_ERR "Error %d while registering subsystem %s\n",
+ ret,
+ subsys->su_group.cg_item.ci_namebuf);
+ goto out_unregister;
+ }
+ }
+
+ return 0;
+
+out_unregister:
+ for (; i >= 0; i--) {
+ configfs_unregister_subsystem(example_subsys[i]);
+ }
+
+ return ret;
+}
+
+static void __exit configfs_example_exit(void)
+{
+ int i;
+
+ for (i = 0; example_subsys[i]; i++) {
+ configfs_unregister_subsystem(example_subsys[i]);
+ }
+}
+
+module_init(configfs_example_init);
+module_exit(configfs_example_exit);
+MODULE_LICENSE("GPL");
diff --git a/Documentation/filesystems/dlmfs.txt b/Documentation/filesystems/dlmfs.txt
new file mode 100644
index 00000000000..9afab845a90
--- /dev/null
+++ b/Documentation/filesystems/dlmfs.txt
@@ -0,0 +1,130 @@
+dlmfs
+==================
+A minimal DLM userspace interface implemented via a virtual file
+system.
+
+dlmfs is built with OCFS2 as it requires most of its infrastructure.
+
+Project web page: http://oss.oracle.com/projects/ocfs2
+Tools web page: http://oss.oracle.com/projects/ocfs2-tools
+OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
+
+All code copyright 2005 Oracle except when otherwise noted.
+
+CREDITS
+=======
+
+Some code taken from ramfs which is Copyright (C) 2000 Linus Torvalds
+and Transmeta Corp.
+
+Mark Fasheh <mark.fasheh@oracle.com>
+
+Caveats
+=======
+- Right now it only works with the OCFS2 DLM, though support for other
+ DLM implementations should not be a major issue.
+
+Mount options
+=============
+None
+
+Usage
+=====
+
+If you're just interested in OCFS2, then please see ocfs2.txt. The
+rest of this document will be geared towards those who want to use
+dlmfs for easy to setup and easy to use clustered locking in
+userspace.
+
+Setup
+=====
+
+dlmfs requires that the OCFS2 cluster infrastructure be in
+place. Please download ocfs2-tools from the above url and configure a
+cluster.
+
+You'll want to start heartbeating on a volume which all the nodes in
+your lockspace can access. The easiest way to do this is via
+ocfs2_hb_ctl (distributed with ocfs2-tools). Right now it requires
+that an OCFS2 file system be in place so that it can automatically
+find it's heartbeat area, though it will eventually support heartbeat
+against raw disks.
+
+Please see the ocfs2_hb_ctl and mkfs.ocfs2 manual pages distributed
+with ocfs2-tools.
+
+Once you're heartbeating, DLM lock 'domains' can be easily created /
+destroyed and locks within them accessed.
+
+Locking
+=======
+
+Users may access dlmfs via standard file system calls, or they can use
+'libo2dlm' (distributed with ocfs2-tools) which abstracts the file
+system calls and presents a more traditional locking api.
+
+dlmfs handles lock caching automatically for the user, so a lock
+request for an already acquired lock will not generate another DLM
+call. Userspace programs are assumed to handle their own local
+locking.
+
+Two levels of locks are supported - Shared Read, and Exlcusive.
+Also supported is a Trylock operation.
+
+For information on the libo2dlm interface, please see o2dlm.h,
+distributed with ocfs2-tools.
+
+Lock value blocks can be read and written to a resource via read(2)
+and write(2) against the fd obtained via your open(2) call. The
+maximum currently supported LVB length is 64 bytes (though that is an
+OCFS2 DLM limitation). Through this mechanism, users of dlmfs can share
+small amounts of data amongst their nodes.
+
+mkdir(2) signals dlmfs to join a domain (which will have the same name
+as the resulting directory)
+
+rmdir(2) signals dlmfs to leave the domain
+
+Locks for a given domain are represented by regular inodes inside the
+domain directory. Locking against them is done via the open(2) system
+call.
+
+The open(2) call will not return until your lock has been granted or
+an error has occurred, unless it has been instructed to do a trylock
+operation. If the lock succeeds, you'll get an fd.
+
+open(2) with O_CREAT to ensure the resource inode is created - dlmfs does
+not automatically create inodes for existing lock resources.
+
+Open Flag Lock Request Type
+--------- -----------------
+O_RDONLY Shared Read
+O_RDWR Exclusive
+
+Open Flag Resulting Locking Behavior
+--------- --------------------------
+O_NONBLOCK Trylock operation
+
+You must provide exactly one of O_RDONLY or O_RDWR.
+
+If O_NONBLOCK is also provided and the trylock operation was valid but
+could not lock the resource then open(2) will return ETXTBUSY.
+
+close(2) drops the lock associated with your fd.
+
+Modes passed to mkdir(2) or open(2) are adhered to locally. Chown is
+supported locally as well. This means you can use them to restrict
+access to the resources via dlmfs on your local node only.
+
+The resource LVB may be read from the fd in either Shared Read or
+Exclusive modes via the read(2) system call. It can be written via
+write(2) only when open in Exclusive mode.
+
+Once written, an LVB will be visible to other nodes who obtain Read
+Only or higher level locks on the resource.
+
+See Also
+========
+http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf
+
+For more information on the VMS distributed locking API.
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 9840d5b8d5b..afb1335c05d 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -2,11 +2,11 @@
Ext3 Filesystem
===============
-ext3 was originally released in September 1999. Written by Stephen Tweedie
-for 2.2 branch, and ported to 2.4 kernels by Peter Braam, Andreas Dilger,
+Ext3 was originally released in September 1999. Written by Stephen Tweedie
+for the 2.2 branch, and ported to 2.4 kernels by Peter Braam, Andreas Dilger,
Andrew Morton, Alexander Viro, Ted Ts'o and Stephen Tweedie.
-ext3 is ext2 filesystem enhanced with journalling capabilities.
+Ext3 is the ext2 filesystem enhanced with journalling capabilities.
Options
=======
@@ -14,76 +14,81 @@ Options
When mounting an ext3 filesystem, the following option are accepted:
(*) == default
-jounal=update Update the ext3 file system's journal to the
- current format.
+journal=update Update the ext3 file system's journal to the current
+ format.
-journal=inum When a journal already exists, this option is
- ignored. Otherwise, it specifies the number of
- the inode which will represent the ext3 file
- system's journal file.
+journal=inum When a journal already exists, this option is ignored.
+ Otherwise, it specifies the number of the inode which
+ will represent the ext3 file system's journal file.
+
+journal_dev=devnum When the external journal device's major/minor numbers
+ have changed, this option allows the user to specify
+ the new journal location. The journal device is
+ identified through its new major/minor numbers encoded
+ in devnum.
noload Don't load the journal on mounting.
-data=journal All data are committed into the journal prior
- to being written into the main file system.
+data=journal All data are committed into the journal prior to being
+ written into the main file system.
data=ordered (*) All data are forced directly out to the main file
- system prior to its metadata being committed to
- the journal.
+ system prior to its metadata being committed to the
+ journal.
-data=writeback Data ordering is not preserved, data may be
- written into the main file system after its
- metadata has been committed to the journal.
+data=writeback Data ordering is not preserved, data may be written
+ into the main file system after its metadata has been
+ committed to the journal.
commit=nrsec (*) Ext3 can be told to sync all its data and metadata
every 'nrsec' seconds. The default value is 5 seconds.
- This means that if you lose your power, you will lose,
- as much, the latest 5 seconds of work (your filesystem
- will not be damaged though, thanks to journaling). This
- default value (or any low value) will hurt performance,
- but it's good for data-safety. Setting it to 0 will
- have the same effect than leaving the default 5 sec.
+ This means that if you lose your power, you will lose
+ as much as the latest 5 seconds of work (your
+ filesystem will not be damaged though, thanks to the
+ journaling). This default value (or any low value)
+ will hurt performance, but it's good for data-safety.
+ Setting it to 0 will have the same effect as leaving
+ it at the default (5 seconds).
Setting it to very large values will improve
performance.
-barrier=1 This enables/disables barriers. barrier=0 disables it,
- barrier=1 enables it.
+barrier=1 This enables/disables barriers. barrier=0 disables
+ it, barrier=1 enables it.
-orlov (*) This enables the new Orlov block allocator. It's enabled
- by default.
+orlov (*) This enables the new Orlov block allocator. It is
+ enabled by default.
-oldalloc This disables the Orlov block allocator and enables the
- old block allocator. Orlov should have better performance,
- we'd like to get some feedback if it's the contrary for
- you.
+oldalloc This disables the Orlov block allocator and enables
+ the old block allocator. Orlov should have better
+ performance - we'd like to get some feedback if it's
+ the contrary for you.
-user_xattr Enables Extended User Attributes. Additionally, you need
- to have extended attribute support enabled in the kernel
- configuration (CONFIG_EXT3_FS_XATTR). See the attr(5)
- manual page and http://acl.bestbits.at to learn more
- about extended attributes.
+user_xattr Enables Extended User Attributes. Additionally, you
+ need to have extended attribute support enabled in the
+ kernel configuration (CONFIG_EXT3_FS_XATTR). See the
+ attr(5) manual page and http://acl.bestbits.at/ to
+ learn more about extended attributes.
nouser_xattr Disables Extended User Attributes.
-acl Enables POSIX Access Control Lists support. Additionally,
- you need to have ACL support enabled in the kernel
- configuration (CONFIG_EXT3_FS_POSIX_ACL). See the acl(5)
- manual page and http://acl.bestbits.at for more
- information.
+acl Enables POSIX Access Control Lists support.
+ Additionally, you need to have ACL support enabled in
+ the kernel configuration (CONFIG_EXT3_FS_POSIX_ACL).
+ See the acl(5) manual page and http://acl.bestbits.at/
+ for more information.
-noacl This option disables POSIX Access Control List support.
+noacl This option disables POSIX Access Control List
+ support.
reservation
noreservation
-resize=
-
bsddf (*) Make 'df' act like BSD.
minixdf Make 'df' act like Minix.
check=none Don't do extra checking of bitmaps on mount.
-nocheck
+nocheck
debug Extra debugging information is sent to syslog.
@@ -92,7 +97,7 @@ errors=continue Keep going on a filesystem error.
errors=panic Panic and halt the machine if an error occurs.
grpid Give objects the same group ID as their creator.
-bsdgroups
+bsdgroups
nogrpid (*) New objects have the group ID of their creator.
sysvgroups
@@ -103,81 +108,83 @@ resuid=n The user ID which may use the reserved blocks.
sb=n Use alternate superblock at this location.
-quota Quota options are currently silently ignored.
-noquota (see fs/ext3/super.c, line 594)
+quota
+noquota
grpquota
usrquota
Specification
=============
-ext3 shares all disk implementation with ext2 filesystem, and add
-transactions capabilities to ext2. Journaling is done by the
-Journaling block device layer.
+Ext3 shares all disk implementation with the ext2 filesystem, and adds
+transactions capabilities to ext2. Journaling is done by the Journaling Block
+Device layer.
Journaling Block Device layer
-----------------------------
-The Journaling Block Device layer (JBD) isn't ext3 specific. It was
-design to add journaling capabilities on a block device. The ext3
-filesystem code will inform the JBD of modifications it is performing
-(Call a transaction). the journal support the transactions start and
-stop, and in case of crash, the journal can replayed the transactions
-to put the partition on a consistent state fastly.
+The Journaling Block Device layer (JBD) isn't ext3 specific. It was design to
+add journaling capabilities on a block device. The ext3 filesystem code will
+inform the JBD of modifications it is performing (called a transaction). The
+journal supports the transactions start and stop, and in case of crash, the
+journal can replayed the transactions to put the partition back in a
+consistent state fast.
-handles represent a single atomic update to a filesystem. JBD can
-handle external journal on a block device.
+Handles represent a single atomic update to a filesystem. JBD can handle an
+external journal on a block device.
Data Mode
---------
-There's 3 different data modes:
+There are 3 different data modes:
* writeback mode
-In data=writeback mode, ext3 does not journal data at all. This mode
-provides a similar level of journaling as XFS, JFS, and ReiserFS in its
-default mode - metadata journaling. A crash+recovery can cause
-incorrect data to appear in files which were written shortly before the
-crash. This mode will typically provide the best ext3 performance.
+In data=writeback mode, ext3 does not journal data at all. This mode provides
+a similar level of journaling as that of XFS, JFS, and ReiserFS in its default
+mode - metadata journaling. A crash+recovery can cause incorrect data to
+appear in files which were written shortly before the crash. This mode will
+typically provide the best ext3 performance.
* ordered mode
-In data=ordered mode, ext3 only officially journals metadata, but it
-logically groups metadata and data blocks into a single unit called a
-transaction. When it's time to write the new metadata out to disk, the
-associated data blocks are written first. In general, this mode
-perform slightly slower than writeback but significantly faster than
-journal mode.
+In data=ordered mode, ext3 only officially journals metadata, but it logically
+groups metadata and data blocks into a single unit called a transaction. When
+it's time to write the new metadata out to disk, the associated data blocks
+are written first. In general, this mode performs slightly slower than
+writeback but significantly faster than journal mode.
* journal mode
-data=journal mode provides full data and metadata journaling. All new
-data is written to the journal first, and then to its final location.
-In the event of a crash, the journal can be replayed, bringing both
-data and metadata into a consistent state. This mode is the slowest
-except when data needs to be read from and written to disk at the same
-time where it outperform all others mode.
+data=journal mode provides full data and metadata journaling. All new data is
+written to the journal first, and then to its final location.
+In the event of a crash, the journal can be replayed, bringing both data and
+metadata into a consistent state. This mode is the slowest except when data
+needs to be read from and written to disk at the same time where it
+outperforms all others modes.
Compatibility
-------------
Ext2 partitions can be easily convert to ext3, with `tune2fs -j <dev>`.
-Ext3 is fully compatible with Ext2. Ext3 partitions can easily be
-mounted as Ext2.
+Ext3 is fully compatible with Ext2. Ext3 partitions can easily be mounted as
+Ext2.
+
External Tools
==============
-see manual pages to know more.
+See manual pages to learn more.
+
+tune2fs: create a ext3 journal on a ext2 partition with the -j flag.
+mke2fs: create a ext3 partition with the -j flag.
+debugfs: ext2 and ext3 file system debugger.
+ext2online: online (mounted) ext2 and ext3 filesystem resizer
-tune2fs: create a ext3 journal on a ext2 partition with the -j flags
-mke2fs: create a ext3 partition with the -j flags
-debugfs: ext2 and ext3 file system debugger
References
==========
-kernel source: file:/usr/src/linux/fs/ext3
- file:/usr/src/linux/fs/jbd
+kernel source: <file:fs/ext3/>
+ <file:fs/jbd/>
-programs: http://e2fsprogs.sourceforge.net
+programs: http://e2fsprogs.sourceforge.net/
+ http://ext2resize.sourceforge.net
-useful link:
- http://www.zip.com.au/~akpm/linux/ext3/ext3-usage.html
+useful links: http://www.zip.com.au/~akpm/linux/ext3/ext3-usage.html
http://www-106.ibm.com/developerworks/linux/library/l-fs7/
http://www-106.ibm.com/developerworks/linux/library/l-fs8/
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
new file mode 100644
index 00000000000..f2595caf052
--- /dev/null
+++ b/Documentation/filesystems/ocfs2.txt
@@ -0,0 +1,55 @@
+OCFS2 filesystem
+==================
+OCFS2 is a general purpose extent based shared disk cluster file
+system with many similarities to ext3. It supports 64 bit inode
+numbers, and has automatically extending metadata groups which may
+also make it attractive for non-clustered use.
+
+You'll want to install the ocfs2-tools package in order to at least
+get "mount.ocfs2" and "ocfs2_hb_ctl".
+
+Project web page: http://oss.oracle.com/projects/ocfs2
+Tools web page: http://oss.oracle.com/projects/ocfs2-tools
+OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
+
+All code copyright 2005 Oracle except when otherwise noted.
+
+CREDITS:
+Lots of code taken from ext3 and other projects.
+
+Authors in alphabetical order:
+Joel Becker <joel.becker@oracle.com>
+Zach Brown <zach.brown@oracle.com>
+Mark Fasheh <mark.fasheh@oracle.com>
+Kurt Hackel <kurt.hackel@oracle.com>
+Sunil Mushran <sunil.mushran@oracle.com>
+Manish Singh <manish.singh@oracle.com>
+
+Caveats
+=======
+Features which OCFS2 does not support yet:
+ - sparse files
+ - extended attributes
+ - shared writeable mmap
+ - loopback is supported, but data written will not
+ be cluster coherent.
+ - quotas
+ - cluster aware flock
+ - Directory change notification (F_NOTIFY)
+ - Distributed Caching (F_SETLEASE/F_GETLEASE/break_lease)
+ - POSIX ACLs
+ - readpages / writepages (not user visible)
+
+Mount options
+=============
+
+OCFS2 supports the following mount options:
+(*) == default
+
+barrier=1 This enables/disables barriers. barrier=0 disables it,
+ barrier=1 enables it.
+errors=remount-ro(*) Remount the filesystem read-only on an error.
+errors=panic Panic and halt the machine if an error occurs.
+intr (*) Allow signals to interrupt cluster operations.
+nointr Do not allow signals to interrupt cluster
+ operations.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index d4773565ea2..944cf109a6f 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -418,7 +418,7 @@ VmallocChunk: 111088 kB
Dirty: Memory which is waiting to get written back to the disk
Writeback: Memory which is actively being written back to the disk
Mapped: files which have been mmaped, such as libraries
- Slab: in-kernel data structures cache
+ Slab: in-kernel data structures cache
CommitLimit: Based on the overcommit ratio ('vm.overcommit_ratio'),
this is the total amount of memory currently available to
be allocated on the system. This limit is only adhered to
@@ -1302,6 +1302,23 @@ VM has token based thrashing control mechanism and uses the token to prevent
unnecessary page faults in thrashing situation. The unit of the value is
second. The value would be useful to tune thrashing behavior.
+drop_caches
+-----------
+
+Writing to this will cause the kernel to drop clean caches, dentries and
+inodes from memory, causing that memory to become free.
+
+To free pagecache:
+ echo 1 > /proc/sys/vm/drop_caches
+To free dentries and inodes:
+ echo 2 > /proc/sys/vm/drop_caches
+To free pagecache, dentries and inodes:
+ echo 3 > /proc/sys/vm/drop_caches
+
+As this is a non-destructive operation and dirty objects are not freeable, the
+user should run `sync' first.
+
+
2.5 /proc/sys/dev - Device specific parameters
----------------------------------------------
diff --git a/Documentation/filesystems/ramfs-rootfs-initramfs.txt b/Documentation/filesystems/ramfs-rootfs-initramfs.txt
index b3404a03259..60ab61e54e8 100644
--- a/Documentation/filesystems/ramfs-rootfs-initramfs.txt
+++ b/Documentation/filesystems/ramfs-rootfs-initramfs.txt
@@ -143,12 +143,26 @@ as the following example:
dir /mnt 755 0 0
file /init initramfs/init.sh 755 0 0
+Run "usr/gen_init_cpio" (after the kernel build) to get a usage message
+documenting the above file format.
+
One advantage of the text file is that root access is not required to
set permissions or create device nodes in the new archive. (Note that those
two example "file" entries expect to find files named "init.sh" and "busybox" in
a directory called "initramfs", under the linux-2.6.* directory. See
Documentation/early-userspace/README for more details.)
+The kernel does not depend on external cpio tools, gen_init_cpio is created
+from usr/gen_init_cpio.c which is entirely self-contained, and the kernel's
+boot-time extractor is also (obviously) self-contained. However, if you _do_
+happen to have cpio installed, the following command line can extract the
+generated cpio image back into its component files:
+
+ cpio -i -d -H newc -F initramfs_data.cpio --no-absolute-filenames
+
+Contents of initramfs:
+----------------------
+
If you don't already understand what shared libraries, devices, and paths
you need to get a minimal root filesystem up and running, here are some
references:
@@ -161,13 +175,69 @@ designed to be a tiny C library to statically link early userspace
code against, along with some related utilities. It is BSD licensed.
I use uClibc (http://www.uclibc.org) and busybox (http://www.busybox.net)
-myself. These are LGPL and GPL, respectively.
+myself. These are LGPL and GPL, respectively. (A self-contained initramfs
+package is planned for the busybox 1.2 release.)
In theory you could use glibc, but that's not well suited for small embedded
uses like this. (A "hello world" program statically linked against glibc is
over 400k. With uClibc it's 7k. Also note that glibc dlopens libnss to do
name lookups, even when otherwise statically linked.)
+Why cpio rather than tar?
+-------------------------
+
+This decision was made back in December, 2001. The discussion started here:
+
+ http://www.uwsg.iu.edu/hypermail/linux/kernel/0112.2/1538.html
+
+And spawned a second thread (specifically on tar vs cpio), starting here:
+
+ http://www.uwsg.iu.edu/hypermail/linux/kernel/0112.2/1587.html
+
+The quick and dirty summary version (which is no substitute for reading
+the above threads) is:
+
+1) cpio is a standard. It's decades old (from the AT&T days), and already
+ widely used on Linux (inside RPM, Red Hat's device driver disks). Here's
+ a Linux Journal article about it from 1996:
+
+ http://www.linuxjournal.com/article/1213
+
+ It's not as popular as tar because the traditional cpio command line tools
+ require _truly_hideous_ command line arguments. But that says nothing
+ either way about the archive format, and there are alternative tools,
+ such as:
+
+ http://freshmeat.net/projects/afio/
+
+2) The cpio archive format chosen by the kernel is simpler and cleaner (and
+ thus easier to create and parse) than any of the (literally dozens of)
+ various tar archive formats. The complete initramfs archive format is
+ explained in buffer-format.txt, created in usr/gen_init_cpio.c, and
+ extracted in init/initramfs.c. All three together come to less than 26k
+ total of human-readable text.
+
+3) The GNU project standardizing on tar is approximately as relevant as
+ Windows standardizing on zip. Linux is not part of either, and is free
+ to make its own technical decisions.
+
+4) Since this is a kernel internal format, it could easily have been
+ something brand new. The kernel provides its own tools to create and
+ extract this format anyway. Using an existing standard was preferable,
+ but not essential.
+
+5) Al Viro made the decision (quote: "tar is ugly as hell and not going to be
+ supported on the kernel side"):
+
+ http://www.uwsg.iu.edu/hypermail/linux/kernel/0112.2/1540.html
+
+ explained his reasoning:
+
+ http://www.uwsg.iu.edu/hypermail/linux/kernel/0112.2/1550.html
+ http://www.uwsg.iu.edu/hypermail/linux/kernel/0112.2/1638.html
+
+ and, most importantly, designed and implemented the initramfs code.
+
Future directions:
------------------
diff --git a/Documentation/filesystems/relayfs.txt b/Documentation/filesystems/relayfs.txt
index d803abed29f..5832377b734 100644
--- a/Documentation/filesystems/relayfs.txt
+++ b/Documentation/filesystems/relayfs.txt
@@ -44,30 +44,41 @@ relayfs can operate in a mode where it will overwrite data not yet
collected by userspace, and not wait for it to consume it.
relayfs itself does not provide for communication of such data between
-userspace and kernel, allowing the kernel side to remain simple and not
-impose a single interface on userspace. It does provide a separate
-helper though, described below.
+userspace and kernel, allowing the kernel side to remain simple and
+not impose a single interface on userspace. It does provide a set of
+examples and a separate helper though, described below.
+
+klog and relay-apps example code
+================================
+
+relayfs itself is ready to use, but to make things easier, a couple
+simple utility functions and a set of examples are provided.
+
+The relay-apps example tarball, available on the relayfs sourceforge
+site, contains a set of self-contained examples, each consisting of a
+pair of .c files containing boilerplate code for each of the user and
+kernel sides of a relayfs application; combined these two sets of
+boilerplate code provide glue to easily stream data to disk, without
+having to bother with mundane housekeeping chores.
+
+The 'klog debugging functions' patch (klog.patch in the relay-apps
+tarball) provides a couple of high-level logging functions to the
+kernel which allow writing formatted text or raw data to a channel,
+regardless of whether a channel to write into exists or not, or
+whether relayfs is compiled into the kernel or is configured as a
+module. These functions allow you to put unconditional 'trace'
+statements anywhere in the kernel or kernel modules; only when there
+is a 'klog handler' registered will data actually be logged (see the
+klog and kleak examples for details).
+
+It is of course possible to use relayfs from scratch i.e. without
+using any of the relay-apps example code or klog, but you'll have to
+implement communication between userspace and kernel, allowing both to
+convey the state of buffers (full, empty, amount of padding).
+
+klog and the relay-apps examples can be found in the relay-apps
+tarball on http://relayfs.sourceforge.net
-klog, relay-app & librelay
-==========================
-
-relayfs itself is ready to use, but to make things easier, two
-additional systems are provided. klog is a simple wrapper to make
-writing formatted text or raw data to a channel simpler, regardless of
-whether a channel to write into exists or not, or whether relayfs is
-compiled into the kernel or is configured as a module. relay-app is
-the kernel counterpart of userspace librelay.c, combined these two
-files provide glue to easily stream data to disk, without having to
-bother with housekeeping. klog and relay-app can be used together,
-with klog providing high-level logging functions to the kernel and
-relay-app taking care of kernel-user control and disk-logging chores.
-
-It is possible to use relayfs without relay-app & librelay, but you'll
-have to implement communication between userspace and kernel, allowing
-both to convey the state of buffers (full, empty, amount of padding).
-
-klog, relay-app and librelay can be found in the relay-apps tarball on
-http://relayfs.sourceforge.net
The relayfs user space API
==========================
@@ -125,6 +136,8 @@ Here's a summary of the API relayfs provides to in-kernel clients:
relay_reset(chan)
relayfs_create_dir(name, parent)
relayfs_remove_dir(dentry)
+ relayfs_create_file(name, parent, mode, fops, data)
+ relayfs_remove_file(dentry)
channel management typically called on instigation of userspace:
@@ -141,6 +154,8 @@ Here's a summary of the API relayfs provides to in-kernel clients:
subbuf_start(buf, subbuf, prev_subbuf, prev_padding)
buf_mapped(buf, filp)
buf_unmapped(buf, filp)
+ create_buf_file(filename, parent, mode, buf, is_global)
+ remove_buf_file(dentry)
helper functions:
@@ -320,6 +335,71 @@ forces a sub-buffer switch on all the channel buffers, and can be used
to finalize and process the last sub-buffers before the channel is
closed.
+Creating non-relay files
+------------------------
+
+relay_open() automatically creates files in the relayfs filesystem to
+represent the per-cpu kernel buffers; it's often useful for
+applications to be able to create their own files alongside the relay
+files in the relayfs filesystem as well e.g. 'control' files much like
+those created in /proc or debugfs for similar purposes, used to
+communicate control information between the kernel and user sides of a
+relayfs application. For this purpose the relayfs_create_file() and
+relayfs_remove_file() API functions exist. For relayfs_create_file(),
+the caller passes in a set of user-defined file operations to be used
+for the file and an optional void * to a user-specified data item,
+which will be accessible via inode->u.generic_ip (see the relay-apps
+tarball for examples). The file_operations are a required parameter
+to relayfs_create_file() and thus the semantics of these files are
+completely defined by the caller.
+
+See the relay-apps tarball at http://relayfs.sourceforge.net for
+examples of how these non-relay files are meant to be used.
+
+Creating relay files in other filesystems
+-----------------------------------------
+
+By default of course, relay_open() creates relay files in the relayfs
+filesystem. Because relay_file_operations is exported, however, it's
+also possible to create and use relay files in other pseudo-filesytems
+such as debugfs.
+
+For this purpose, two callback functions are provided,
+create_buf_file() and remove_buf_file(). create_buf_file() is called
+once for each per-cpu buffer from relay_open() to allow the client to
+create a file to be used to represent the corresponding buffer; if
+this callback is not defined, the default implementation will create
+and return a file in the relayfs filesystem to represent the buffer.
+The callback should return the dentry of the file created to represent
+the relay buffer. Note that the parent directory passed to
+relay_open() (and passed along to the callback), if specified, must
+exist in the same filesystem the new relay file is created in. If
+create_buf_file() is defined, remove_buf_file() must also be defined;
+it's responsible for deleting the file(s) created in create_buf_file()
+and is called during relay_close().
+
+The create_buf_file() implementation can also be defined in such a way
+as to allow the creation of a single 'global' buffer instead of the
+default per-cpu set. This can be useful for applications interested
+mainly in seeing the relative ordering of system-wide events without
+the need to bother with saving explicit timestamps for the purpose of
+merging/sorting per-cpu files in a postprocessing step.
+
+To have relay_open() create a global buffer, the create_buf_file()
+implementation should set the value of the is_global outparam to a
+non-zero value in addition to creating the file that will be used to
+represent the single buffer. In the case of a global buffer,
+create_buf_file() and remove_buf_file() will be called only once. The
+normal channel-writing functions e.g. relay_write() can still be used
+- writes from any cpu will transparently end up in the global buffer -
+but since it is a global buffer, callers should make sure they use the
+proper locking for such a buffer, either by wrapping writes in a
+spinlock, or by copying a write function from relayfs_fs.h and
+creating a local version that internally does the proper locking.
+
+See the 'exported-relayfile' examples in the relay-apps tarball for
+examples of creating and using relay files in debugfs.
+
Misc
----
diff --git a/Documentation/filesystems/spufs.txt b/Documentation/filesystems/spufs.txt
new file mode 100644
index 00000000000..8edc3952eff
--- /dev/null
+++ b/Documentation/filesystems/spufs.txt
@@ -0,0 +1,521 @@
+SPUFS(2) Linux Programmer's Manual SPUFS(2)
+
+
+
+NAME
+ spufs - the SPU file system
+
+
+DESCRIPTION
+ The SPU file system is used on PowerPC machines that implement the Cell
+ Broadband Engine Architecture in order to access Synergistic Processor
+ Units (SPUs).
+
+ The file system provides a name space similar to posix shared memory or
+ message queues. Users that have write permissions on the file system
+ can use spu_create(2) to establish SPU contexts in the spufs root.
+
+ Every SPU context is represented by a directory containing a predefined
+ set of files. These files can be used for manipulating the state of the
+ logical SPU. Users can change permissions on those files, but not actu-
+ ally add or remove files.
+
+
+MOUNT OPTIONS
+ uid=<uid>
+ set the user owning the mount point, the default is 0 (root).
+
+ gid=<gid>
+ set the group owning the mount point, the default is 0 (root).
+
+
+FILES
+ The files in spufs mostly follow the standard behavior for regular sys-
+ tem calls like read(2) or write(2), but often support only a subset of
+ the operations supported on regular file systems. This list details the
+ supported operations and the deviations from the behaviour in the
+ respective man pages.
+
+ All files that support the read(2) operation also support readv(2) and
+ all files that support the write(2) operation also support writev(2).
+ All files support the access(2) and stat(2) family of operations, but
+ only the st_mode, st_nlink, st_uid and st_gid fields of struct stat
+ contain reliable information.
+
+ All files support the chmod(2)/fchmod(2) and chown(2)/fchown(2) opera-
+ tions, but will not be able to grant permissions that contradict the
+ possible operations, e.g. read access on the wbox file.
+
+ The current set of files is:
+
+
+ /mem
+ the contents of the local storage memory of the SPU. This can be
+ accessed like a regular shared memory file and contains both code and
+ data in the address space of the SPU. The possible operations on an
+ open mem file are:
+
+ read(2), pread(2), write(2), pwrite(2), lseek(2)
+ These operate as documented, with the exception that seek(2),
+ write(2) and pwrite(2) are not supported beyond the end of the
+ file. The file size is the size of the local storage of the SPU,
+ which normally is 256 kilobytes.
+
+ mmap(2)
+ Mapping mem into the process address space gives access to the
+ SPU local storage within the process address space. Only
+ MAP_SHARED mappings are allowed.
+
+
+ /mbox
+ The first SPU to CPU communication mailbox. This file is read-only and
+ can be read in units of 32 bits. The file can only be used in non-
+ blocking mode and it even poll() will not block on it. The possible
+ operations on an open mbox file are:
+
+ read(2)
+ If a count smaller than four is requested, read returns -1 and
+ sets errno to EINVAL. If there is no data available in the mail
+ box, the return value is set to -1 and errno becomes EAGAIN.
+ When data has been read successfully, four bytes are placed in
+ the data buffer and the value four is returned.
+
+
+ /ibox
+ The second SPU to CPU communication mailbox. This file is similar to
+ the first mailbox file, but can be read in blocking I/O mode, and the
+ poll familiy of system calls can be used to wait for it. The possible
+ operations on an open ibox file are:
+
+ read(2)
+ If a count smaller than four is requested, read returns -1 and
+ sets errno to EINVAL. If there is no data available in the mail
+ box and the file descriptor has been opened with O_NONBLOCK, the
+ return value is set to -1 and errno becomes EAGAIN.
+
+ If there is no data available in the mail box and the file
+ descriptor has been opened without O_NONBLOCK, the call will
+ block until the SPU writes to its interrupt mailbox channel.
+ When data has been read successfully, four bytes are placed in
+ the data buffer and the value four is returned.
+
+ poll(2)
+ Poll on the ibox file returns (POLLIN | POLLRDNORM) whenever
+ data is available for reading.
+
+
+ /wbox
+ The CPU to SPU communation mailbox. It is write-only can can be written
+ in units of 32 bits. If the mailbox is full, write() will block and
+ poll can be used to wait for it becoming empty again. The possible
+ operations on an open wbox file are: write(2) If a count smaller than
+ four is requested, write returns -1 and sets errno to EINVAL. If there
+ is no space available in the mail box and the file descriptor has been
+ opened with O_NONBLOCK, the return value is set to -1 and errno becomes
+ EAGAIN.
+
+ If there is no space available in the mail box and the file descriptor
+ has been opened without O_NONBLOCK, the call will block until the SPU
+ reads from its PPE mailbox channel. When data has been read success-
+ fully, four bytes are placed in the data buffer and the value four is
+ returned.
+
+ poll(2)
+ Poll on the ibox file returns (POLLOUT | POLLWRNORM) whenever
+ space is available for writing.
+
+
+ /mbox_stat
+ /ibox_stat
+ /wbox_stat
+ Read-only files that contain the length of the current queue, i.e. how
+ many words can be read from mbox or ibox or how many words can be
+ written to wbox without blocking. The files can be read only in 4-byte
+ units and return a big-endian binary integer number. The possible
+ operations on an open *box_stat file are:
+
+ read(2)
+ If a count smaller than four is requested, read returns -1 and
+ sets errno to EINVAL. Otherwise, a four byte value is placed in
+ the data buffer, containing the number of elements that can be
+ read from (for mbox_stat and ibox_stat) or written to (for
+ wbox_stat) the respective mail box without blocking or resulting
+ in EAGAIN.
+
+
+ /npc
+ /decr
+ /decr_status
+ /spu_tag_mask
+ /event_mask
+ /srr0
+ Internal registers of the SPU. The representation is an ASCII string
+ with the numeric value of the next instruction to be executed. These
+ can be used in read/write mode for debugging, but normal operation of
+ programs should not rely on them because access to any of them except
+ npc requires an SPU context save and is therefore very inefficient.
+
+ The contents of these files are:
+
+ npc Next Program Counter
+
+ decr SPU Decrementer
+
+ decr_status Decrementer Status
+
+ spu_tag_mask MFC tag mask for SPU DMA
+
+ event_mask Event mask for SPU interrupts
+
+ srr0 Interrupt Return address register
+
+
+ The possible operations on an open npc, decr, decr_status,
+ spu_tag_mask, event_mask or srr0 file are:
+
+ read(2)
+ When the count supplied to the read call is shorter than the
+ required length for the pointer value plus a newline character,
+ subsequent reads from the same file descriptor will result in
+ completing the string, regardless of changes to the register by
+ a running SPU task. When a complete string has been read, all
+ subsequent read operations will return zero bytes and a new file
+ descriptor needs to be opened to read the value again.
+
+ write(2)
+ A write operation on the file results in setting the register to
+ the value given in the string. The string is parsed from the
+ beginning to the first non-numeric character or the end of the
+ buffer. Subsequent writes to the same file descriptor overwrite
+ the previous setting.
+
+
+ /fpcr
+ This file gives access to the Floating Point Status and Control Regis-
+ ter as a four byte long file. The operations on the fpcr file are:
+
+ read(2)
+ If a count smaller than four is requested, read returns -1 and
+ sets errno to EINVAL. Otherwise, a four byte value is placed in
+ the data buffer, containing the current value of the fpcr regis-
+ ter.
+
+ write(2)
+ If a count smaller than four is requested, write returns -1 and
+ sets errno to EINVAL. Otherwise, a four byte value is copied
+ from the data buffer, updating the value of the fpcr register.
+
+
+ /signal1
+ /signal2
+ The two signal notification channels of an SPU. These are read-write
+ files that operate on a 32 bit word. Writing to one of these files
+ triggers an interrupt on the SPU. The value writting to the signal
+ files can be read from the SPU through a channel read or from host user
+ space through the file. After the value has been read by the SPU, it
+ is reset to zero. The possible operations on an open signal1 or sig-
+ nal2 file are:
+
+ read(2)
+ If a count smaller than four is requested, read returns -1 and
+ sets errno to EINVAL. Otherwise, a four byte value is placed in
+ the data buffer, containing the current value of the specified
+ signal notification register.
+
+ write(2)
+ If a count smaller than four is requested, write returns -1 and
+ sets errno to EINVAL. Otherwise, a four byte value is copied
+ from the data buffer, updating the value of the specified signal
+ notification register. The signal notification register will
+ either be replaced with the input data or will be updated to the
+ bitwise OR or the old value and the input data, depending on the
+ contents of the signal1_type, or signal2_type respectively,
+ file.
+
+
+ /signal1_type
+ /signal2_type
+ These two files change the behavior of the signal1 and signal2 notifi-
+ cation files. The contain a numerical ASCII string which is read as
+ either "1" or "0". In mode 0 (overwrite), the hardware replaces the
+ contents of the signal channel with the data that is written to it. in
+ mode 1 (logical OR), the hardware accumulates the bits that are subse-
+ quently written to it. The possible operations on an open signal1_type
+ or signal2_type file are:
+
+ read(2)
+ When the count supplied to the read call is shorter than the
+ required length for the digit plus a newline character, subse-
+ quent reads from the same file descriptor will result in com-
+ pleting the string. When a complete string has been read, all
+ subsequent read operations will return zero bytes and a new file
+ descriptor needs to be opened to read the value again.
+
+ write(2)
+ A write operation on the file results in setting the register to
+ the value given in the string. The string is parsed from the
+ beginning to the first non-numeric character or the end of the
+ buffer. Subsequent writes to the same file descriptor overwrite
+ the previous setting.
+
+
+EXAMPLES
+ /etc/fstab entry
+ none /spu spufs gid=spu 0 0
+
+
+AUTHORS
+ Arnd Bergmann <arndb@de.ibm.com>, Mark Nutter <mnutter@us.ibm.com>,
+ Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
+
+SEE ALSO
+ capabilities(7), close(2), spu_create(2), spu_run(2), spufs(7)
+
+
+
+Linux 2005-09-28 SPUFS(2)
+
+------------------------------------------------------------------------------
+
+SPU_RUN(2) Linux Programmer's Manual SPU_RUN(2)
+
+
+
+NAME
+ spu_run - execute an spu context
+
+
+SYNOPSIS
+ #include <sys/spu.h>
+
+ int spu_run(int fd, unsigned int *npc, unsigned int *event);
+
+DESCRIPTION
+ The spu_run system call is used on PowerPC machines that implement the
+ Cell Broadband Engine Architecture in order to access Synergistic Pro-
+ cessor Units (SPUs). It uses the fd that was returned from spu_cre-
+ ate(2) to address a specific SPU context. When the context gets sched-
+ uled to a physical SPU, it starts execution at the instruction pointer
+ passed in npc.
+
+ Execution of SPU code happens synchronously, meaning that spu_run does
+ not return while the SPU is still running. If there is a need to exe-
+ cute SPU code in parallel with other code on either the main CPU or
+ other SPUs, you need to create a new thread of execution first, e.g.
+ using the pthread_create(3) call.
+
+ When spu_run returns, the current value of the SPU instruction pointer
+ is written back to npc, so you can call spu_run again without updating
+ the pointers.
+
+ event can be a NULL pointer or point to an extended status code that
+ gets filled when spu_run returns. It can be one of the following con-
+ stants:
+
+ SPE_EVENT_DMA_ALIGNMENT
+ A DMA alignment error
+
+ SPE_EVENT_SPE_DATA_SEGMENT
+ A DMA segmentation error
+
+ SPE_EVENT_SPE_DATA_STORAGE
+ A DMA storage error
+
+ If NULL is passed as the event argument, these errors will result in a
+ signal delivered to the calling process.
+
+RETURN VALUE
+ spu_run returns the value of the spu_status register or -1 to indicate
+ an error and set errno to one of the error codes listed below. The
+ spu_status register value contains a bit mask of status codes and
+ optionally a 14 bit code returned from the stop-and-signal instruction
+ on the SPU. The bit masks for the status codes are:
+
+ 0x02 SPU was stopped by stop-and-signal.
+
+ 0x04 SPU was stopped by halt.
+
+ 0x08 SPU is waiting for a channel.
+
+ 0x10 SPU is in single-step mode.
+
+ 0x20 SPU has tried to execute an invalid instruction.
+
+ 0x40 SPU has tried to access an invalid channel.
+
+ 0x3fff0000
+ The bits masked with this value contain the code returned from
+ stop-and-signal.
+
+ There are always one or more of the lower eight bits set or an error
+ code is returned from spu_run.
+
+ERRORS
+ EAGAIN or EWOULDBLOCK
+ fd is in non-blocking mode and spu_run would block.
+
+ EBADF fd is not a valid file descriptor.
+
+ EFAULT npc is not a valid pointer or status is neither NULL nor a valid
+ pointer.
+
+ EINTR A signal occured while spu_run was in progress. The npc value
+ has been updated to the new program counter value if necessary.
+
+ EINVAL fd is not a file descriptor returned from spu_create(2).
+
+ ENOMEM Insufficient memory was available to handle a page fault result-
+ ing from an MFC direct memory access.
+
+ ENOSYS the functionality is not provided by the current system, because
+ either the hardware does not provide SPUs or the spufs module is
+ not loaded.
+
+
+NOTES
+ spu_run is meant to be used from libraries that implement a more
+ abstract interface to SPUs, not to be used from regular applications.
+ See http://www.bsc.es/projects/deepcomputing/linuxoncell/ for the rec-
+ ommended libraries.
+
+
+CONFORMING TO
+ This call is Linux specific and only implemented by the ppc64 architec-
+ ture. Programs using this system call are not portable.
+
+
+BUGS
+ The code does not yet fully implement all features lined out here.
+
+
+AUTHOR
+ Arnd Bergmann <arndb@de.ibm.com>
+
+SEE ALSO
+ capabilities(7), close(2), spu_create(2), spufs(7)
+
+
+
+Linux 2005-09-28 SPU_RUN(2)
+
+------------------------------------------------------------------------------
+
+SPU_CREATE(2) Linux Programmer's Manual SPU_CREATE(2)
+
+
+
+NAME
+ spu_create - create a new spu context
+
+
+SYNOPSIS
+ #include <sys/types.h>
+ #include <sys/spu.h>
+
+ int spu_create(const char *pathname, int flags, mode_t mode);
+
+DESCRIPTION
+ The spu_create system call is used on PowerPC machines that implement
+ the Cell Broadband Engine Architecture in order to access Synergistic
+ Processor Units (SPUs). It creates a new logical context for an SPU in
+ pathname and returns a handle to associated with it. pathname must
+ point to a non-existing directory in the mount point of the SPU file
+ system (spufs). When spu_create is successful, a directory gets cre-
+ ated on pathname and it is populated with files.
+
+ The returned file handle can only be passed to spu_run(2) or closed,
+ other operations are not defined on it. When it is closed, all associ-
+ ated directory entries in spufs are removed. When the last file handle
+ pointing either inside of the context directory or to this file
+ descriptor is closed, the logical SPU context is destroyed.
+
+ The parameter flags can be zero or any bitwise or'd combination of the
+ following constants:
+
+ SPU_RAWIO
+ Allow mapping of some of the hardware registers of the SPU into
+ user space. This flag requires the CAP_SYS_RAWIO capability, see
+ capabilities(7).
+
+ The mode parameter specifies the permissions used for creating the new
+ directory in spufs. mode is modified with the user's umask(2) value
+ and then used for both the directory and the files contained in it. The
+ file permissions mask out some more bits of mode because they typically
+ support only read or write access. See stat(2) for a full list of the
+ possible mode values.
+
+
+RETURN VALUE
+ spu_create returns a new file descriptor. It may return -1 to indicate
+ an error condition and set errno to one of the error codes listed
+ below.
+
+
+ERRORS
+ EACCESS
+ The current user does not have write access on the spufs mount
+ point.
+
+ EEXIST An SPU context already exists at the given path name.
+
+ EFAULT pathname is not a valid string pointer in the current address
+ space.
+
+ EINVAL pathname is not a directory in the spufs mount point.
+
+ ELOOP Too many symlinks were found while resolving pathname.
+
+ EMFILE The process has reached its maximum open file limit.
+
+ ENAMETOOLONG
+ pathname was too long.
+
+ ENFILE The system has reached the global open file limit.
+
+ ENOENT Part of pathname could not be resolved.
+
+ ENOMEM The kernel could not allocate all resources required.
+
+ ENOSPC There are not enough SPU resources available to create a new
+ context or the user specific limit for the number of SPU con-
+ texts has been reached.
+
+ ENOSYS the functionality is not provided by the current system, because
+ either the hardware does not provide SPUs or the spufs module is
+ not loaded.
+
+ ENOTDIR
+ A part of pathname is not a directory.
+
+
+
+NOTES
+ spu_create is meant to be used from libraries that implement a more
+ abstract interface to SPUs, not to be used from regular applications.
+ See http://www.bsc.es/projects/deepcomputing/linuxoncell/ for the rec-
+ ommended libraries.
+
+
+FILES
+ pathname must point to a location beneath the mount point of spufs. By
+ convention, it gets mounted in /spu.
+
+
+CONFORMING TO
+ This call is Linux specific and only implemented by the ppc64 architec-
+ ture. Programs using this system call are not portable.
+
+
+BUGS
+ The code does not yet fully implement all features lined out here.
+
+
+AUTHOR
+ Arnd Bergmann <arndb@de.ibm.com>
+
+SEE ALSO
+ capabilities(7), close(2), spu_run(2), spufs(7)
+
+
+
+Linux 2005-09-28 SPU_CREATE(2)
diff --git a/Documentation/filesystems/sysfs-pci.txt b/Documentation/filesystems/sysfs-pci.txt
index 988a62fae11..7ba2baa165f 100644
--- a/Documentation/filesystems/sysfs-pci.txt
+++ b/Documentation/filesystems/sysfs-pci.txt
@@ -1,4 +1,5 @@
Accessing PCI device resources through sysfs
+--------------------------------------------
sysfs, usually mounted at /sys, provides access to PCI resources on platforms
that support it. For example, a given bus might look like this:
@@ -47,14 +48,21 @@ files, each with their own function.
binary - file contains binary data
cpumask - file contains a cpumask type
-The read only files are informational, writes to them will be ignored.
-Writable files can be used to perform actions on the device (e.g. changing
-config space, detaching a device). mmapable files are available via an
-mmap of the file at offset 0 and can be used to do actual device programming
-from userspace. Note that some platforms don't support mmapping of certain
-resources, so be sure to check the return value from any attempted mmap.
+The read only files are informational, writes to them will be ignored, with
+the exception of the 'rom' file. Writable files can be used to perform
+actions on the device (e.g. changing config space, detaching a device).
+mmapable files are available via an mmap of the file at offset 0 and can be
+used to do actual device programming from userspace. Note that some platforms
+don't support mmapping of certain resources, so be sure to check the return
+value from any attempted mmap.
+
+The 'rom' file is special in that it provides read-only access to the device's
+ROM file, if available. It's disabled by default, however, so applications
+should write the string "1" to the file to enable it before attempting a read
+call, and disable it following the access by writing "0" to the file.
Accessing legacy resources through sysfs
+----------------------------------------
Legacy I/O port and ISA memory resources are also provided in sysfs if the
underlying platform supports them. They're located in the PCI class heirarchy,
@@ -75,6 +83,7 @@ simply dereference the returned pointer (after checking for errors of course)
to access legacy memory space.
Supporting PCI access on new platforms
+--------------------------------------
In order to support PCI resource mapping as described above, Linux platform
code must define HAVE_PCI_MMAP and provide a pci_mmap_page_range function.
diff --git a/Documentation/hrtimers.txt b/Documentation/hrtimers.txt
new file mode 100644
index 00000000000..7620ff735fa
--- /dev/null
+++ b/Documentation/hrtimers.txt
@@ -0,0 +1,178 @@
+
+hrtimers - subsystem for high-resolution kernel timers
+----------------------------------------------------
+
+This patch introduces a new subsystem for high-resolution kernel timers.
+
+One might ask the question: we already have a timer subsystem
+(kernel/timers.c), why do we need two timer subsystems? After a lot of
+back and forth trying to integrate high-resolution and high-precision
+features into the existing timer framework, and after testing various
+such high-resolution timer implementations in practice, we came to the
+conclusion that the timer wheel code is fundamentally not suitable for
+such an approach. We initially didnt believe this ('there must be a way
+to solve this'), and spent a considerable effort trying to integrate
+things into the timer wheel, but we failed. In hindsight, there are
+several reasons why such integration is hard/impossible:
+
+- the forced handling of low-resolution and high-resolution timers in
+ the same way leads to a lot of compromises, macro magic and #ifdef
+ mess. The timers.c code is very "tightly coded" around jiffies and
+ 32-bitness assumptions, and has been honed and micro-optimized for a
+ relatively narrow use case (jiffies in a relatively narrow HZ range)
+ for many years - and thus even small extensions to it easily break
+ the wheel concept, leading to even worse compromises. The timer wheel
+ code is very good and tight code, there's zero problems with it in its
+ current usage - but it is simply not suitable to be extended for
+ high-res timers.
+
+- the unpredictable [O(N)] overhead of cascading leads to delays which
+ necessiate a more complex handling of high resolution timers, which
+ in turn decreases robustness. Such a design still led to rather large
+ timing inaccuracies. Cascading is a fundamental property of the timer
+ wheel concept, it cannot be 'designed out' without unevitably
+ degrading other portions of the timers.c code in an unacceptable way.
+
+- the implementation of the current posix-timer subsystem on top of
+ the timer wheel has already introduced a quite complex handling of
+ the required readjusting of absolute CLOCK_REALTIME timers at
+ settimeofday or NTP time - further underlying our experience by
+ example: that the timer wheel data structure is too rigid for high-res
+ timers.
+
+- the timer wheel code is most optimal for use cases which can be
+ identified as "timeouts". Such timeouts are usually set up to cover
+ error conditions in various I/O paths, such as networking and block
+ I/O. The vast majority of those timers never expire and are rarely
+ recascaded because the expected correct event arrives in time so they
+ can be removed from the timer wheel before any further processing of
+ them becomes necessary. Thus the users of these timeouts can accept
+ the granularity and precision tradeoffs of the timer wheel, and
+ largely expect the timer subsystem to have near-zero overhead.
+ Accurate timing for them is not a core purpose - in fact most of the
+ timeout values used are ad-hoc. For them it is at most a necessary
+ evil to guarantee the processing of actual timeout completions
+ (because most of the timeouts are deleted before completion), which
+ should thus be as cheap and unintrusive as possible.
+
+The primary users of precision timers are user-space applications that
+utilize nanosleep, posix-timers and itimer interfaces. Also, in-kernel
+users like drivers and subsystems which require precise timed events
+(e.g. multimedia) can benefit from the availability of a seperate
+high-resolution timer subsystem as well.
+
+While this subsystem does not offer high-resolution clock sources just
+yet, the hrtimer subsystem can be easily extended with high-resolution
+clock capabilities, and patches for that exist and are maturing quickly.
+The increasing demand for realtime and multimedia applications along
+with other potential users for precise timers gives another reason to
+separate the "timeout" and "precise timer" subsystems.
+
+Another potential benefit is that such a seperation allows even more
+special-purpose optimization of the existing timer wheel for the low
+resolution and low precision use cases - once the precision-sensitive
+APIs are separated from the timer wheel and are migrated over to
+hrtimers. E.g. we could decrease the frequency of the timeout subsystem
+from 250 Hz to 100 HZ (or even smaller).
+
+hrtimer subsystem implementation details
+----------------------------------------
+
+the basic design considerations were:
+
+- simplicity
+
+- data structure not bound to jiffies or any other granularity. All the
+ kernel logic works at 64-bit nanoseconds resolution - no compromises.
+
+- simplification of existing, timing related kernel code
+
+another basic requirement was the immediate enqueueing and ordering of
+timers at activation time. After looking at several possible solutions
+such as radix trees and hashes, we chose the red black tree as the basic
+data structure. Rbtrees are available as a library in the kernel and are
+used in various performance-critical areas of e.g. memory management and
+file systems. The rbtree is solely used for time sorted ordering, while
+a separate list is used to give the expiry code fast access to the
+queued timers, without having to walk the rbtree.
+
+(This seperate list is also useful for later when we'll introduce
+high-resolution clocks, where we need seperate pending and expired
+queues while keeping the time-order intact.)
+
+Time-ordered enqueueing is not purely for the purposes of
+high-resolution clocks though, it also simplifies the handling of
+absolute timers based on a low-resolution CLOCK_REALTIME. The existing
+implementation needed to keep an extra list of all armed absolute
+CLOCK_REALTIME timers along with complex locking. In case of
+settimeofday and NTP, all the timers (!) had to be dequeued, the
+time-changing code had to fix them up one by one, and all of them had to
+be enqueued again. The time-ordered enqueueing and the storage of the
+expiry time in absolute time units removes all this complex and poorly
+scaling code from the posix-timer implementation - the clock can simply
+be set without having to touch the rbtree. This also makes the handling
+of posix-timers simpler in general.
+
+The locking and per-CPU behavior of hrtimers was mostly taken from the
+existing timer wheel code, as it is mature and well suited. Sharing code
+was not really a win, due to the different data structures. Also, the
+hrtimer functions now have clearer behavior and clearer names - such as
+hrtimer_try_to_cancel() and hrtimer_cancel() [which are roughly
+equivalent to del_timer() and del_timer_sync()] - so there's no direct
+1:1 mapping between them on the algorithmical level, and thus no real
+potential for code sharing either.
+
+Basic data types: every time value, absolute or relative, is in a
+special nanosecond-resolution type: ktime_t. The kernel-internal
+representation of ktime_t values and operations is implemented via
+macros and inline functions, and can be switched between a "hybrid
+union" type and a plain "scalar" 64bit nanoseconds representation (at
+compile time). The hybrid union type optimizes time conversions on 32bit
+CPUs. This build-time-selectable ktime_t storage format was implemented
+to avoid the performance impact of 64-bit multiplications and divisions
+on 32bit CPUs. Such operations are frequently necessary to convert
+between the storage formats provided by kernel and userspace interfaces
+and the internal time format. (See include/linux/ktime.h for further
+details.)
+
+hrtimers - rounding of timer values
+-----------------------------------
+
+the hrtimer code will round timer events to lower-resolution clocks
+because it has to. Otherwise it will do no artificial rounding at all.
+
+one question is, what resolution value should be returned to the user by
+the clock_getres() interface. This will return whatever real resolution
+a given clock has - be it low-res, high-res, or artificially-low-res.
+
+hrtimers - testing and verification
+----------------------------------
+
+We used the high-resolution clock subsystem ontop of hrtimers to verify
+the hrtimer implementation details in praxis, and we also ran the posix
+timer tests in order to ensure specification compliance. We also ran
+tests on low-resolution clocks.
+
+The hrtimer patch converts the following kernel functionality to use
+hrtimers:
+
+ - nanosleep
+ - itimers
+ - posix-timers
+
+The conversion of nanosleep and posix-timers enabled the unification of
+nanosleep and clock_nanosleep.
+
+The code was successfully compiled for the following platforms:
+
+ i386, x86_64, ARM, PPC, PPC64, IA64
+
+The code was run-tested on the following platforms:
+
+ i386(UP/SMP), x86_64(UP/SMP), ARM, PPC
+
+hrtimers were also integrated into the -rt tree, along with a
+hrtimers-based high-resolution clock implementation, so the hrtimers
+code got a healthy amount of testing and use in practice.
+
+ Thomas Gleixner, Ingo Molnar
diff --git a/Documentation/hwmon/w83627hf b/Documentation/hwmon/w83627hf
index 78f37c2d602..5d23776e990 100644
--- a/Documentation/hwmon/w83627hf
+++ b/Documentation/hwmon/w83627hf
@@ -54,13 +54,16 @@ If you really want i2c accesses for these Super I/O chips,
use the w83781d driver. However this is not the preferred method
now that this ISA driver has been developed.
-Technically, the w83627thf does not support a VID reading. However, it's
-possible or even likely that your mainboard maker has routed these signals
-to a specific set of general purpose IO pins (the Asus P4C800-E is one such
-board). The w83627thf driver now interprets these as VID. If the VID on
-your board doesn't work, first see doc/vid in the lm_sensors package. If
-that still doesn't help, email us at lm-sensors@lm-sensors.org.
+The w83627_HF_ uses pins 110-106 as VID0-VID4. The w83627_THF_ uses the
+same pins as GPIO[0:4]. Technically, the w83627_THF_ does not support a
+VID reading. However the two chips have the identical 128 pin package. So,
+it is possible or even likely for a w83627thf to have the VID signals routed
+to these pins despite their not being labeled for that purpose. Therefore,
+the w83627thf driver interprets these as VID. If the VID on your board
+doesn't work, first see doc/vid in the lm_sensors package[1]. If that still
+doesn't help, you may just ignore the bogus VID reading with no harm done.
-For further information on this driver see the w83781d driver
-documentation.
+For further information on this driver see the w83781d driver documentation.
+
+[1] http://www2.lm-sensors.nu/~lm78/cvs/browse.cgi/lm_sensors2/doc/vid
diff --git a/Documentation/i2c/busses/i2c-nforce2 b/Documentation/i2c/busses/i2c-nforce2
index e379e182e64..d751282d9b2 100644
--- a/Documentation/i2c/busses/i2c-nforce2
+++ b/Documentation/i2c/busses/i2c-nforce2
@@ -5,7 +5,8 @@ Supported adapters:
* nForce2 Ultra 400 MCP 10de:0084
* nForce3 Pro150 MCP 10de:00D4
* nForce3 250Gb MCP 10de:00E4
- * nForce4 MCP 10de:0052
+ * nForce4 MCP 10de:0052
+ * nForce4 MCP-04 10de:0034
Datasheet: not publically available, but seems to be similar to the
AMD-8111 SMBus 2.0 adapter.
diff --git a/Documentation/i2c/busses/i2c-parport b/Documentation/i2c/busses/i2c-parport
index 9f1d0082da1..d9f23c0763f 100644
--- a/Documentation/i2c/busses/i2c-parport
+++ b/Documentation/i2c/busses/i2c-parport
@@ -17,6 +17,7 @@ It currently supports the following devices:
* Velleman K8000 adapter
* ELV adapter
* Analog Devices evaluation boards (ADM1025, ADM1030, ADM1031, ADM1032)
+ * Barco LPT->DVI (K5800236) adapter
These devices use different pinout configurations, so you have to tell
the driver what you have, using the type module parameter. There is no
diff --git a/Documentation/i2c/porting-clients b/Documentation/i2c/porting-clients
index 184fac2377a..f03c2a02f80 100644
--- a/Documentation/i2c/porting-clients
+++ b/Documentation/i2c/porting-clients
@@ -1,10 +1,13 @@
-Revision 5, 2005-07-29
+Revision 6, 2005-11-20
Jean Delvare <khali@linux-fr.org>
Greg KH <greg@kroah.com>
This is a guide on how to convert I2C chip drivers from Linux 2.4 to
Linux 2.6. I have been using existing drivers (lm75, lm78) as examples.
Then I converted a driver myself (lm83) and updated this document.
+Note that this guide is strongly oriented towards hardware monitoring
+drivers. Many points are still valid for other type of drivers, but
+others may be irrelevant.
There are two sets of points below. The first set concerns technical
changes. The second set concerns coding policy. Both are mandatory.
@@ -22,16 +25,20 @@ Technical changes:
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
+ #include <linux/jiffies.h>
#include <linux/i2c.h>
+ #include <linux/i2c-isa.h> /* for ISA drivers */
#include <linux/hwmon.h> /* for hardware monitoring drivers */
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon-vid.h> /* if you need VRM support */
+ #include <linux/err.h> /* for class registration */
#include <asm/io.h> /* if you have I/O operations */
Please respect this inclusion order. Some extra headers may be
required for a given driver (e.g. "lm75.h").
* [Addresses] SENSORS_I2C_END becomes I2C_CLIENT_END, ISA addresses
- are no more handled by the i2c core.
+ are no more handled by the i2c core. Address ranges are no more
+ supported either, define each individual address separately.
SENSORS_INSMOD_<n> becomes I2C_CLIENT_INSMOD_<n>.
* [Client data] Get rid of sysctl_id. Try using standard names for
@@ -48,23 +55,23 @@ Technical changes:
int kind);
static void lm75_init_client(struct i2c_client *client);
static int lm75_detach_client(struct i2c_client *client);
- static void lm75_update_client(struct i2c_client *client);
+ static struct lm75_data lm75_update_device(struct device *dev);
* [Sysctl] All sysctl stuff is of course gone (defines, ctl_table
and functions). Instead, you have to define show and set functions for
each sysfs file. Only define set for writable values. Take a look at an
- existing 2.6 driver for details (lm78 for example). Don't forget
+ existing 2.6 driver for details (it87 for example). Don't forget
to define the attributes for each file (this is that step that
links callback functions). Use the file names specified in
- Documentation/i2c/sysfs-interface for the individual files. Also
+ Documentation/hwmon/sysfs-interface for the individual files. Also
convert the units these files read and write to the specified ones.
If you need to add a new type of file, please discuss it on the
sensors mailing list <lm-sensors@lm-sensors.org> by providing a
- patch to the Documentation/i2c/sysfs-interface file.
+ patch to the Documentation/hwmon/sysfs-interface file.
* [Attach] For I2C drivers, the attach function should make sure
- that the adapter's class has I2C_CLASS_HWMON, using the
- following construct:
+ that the adapter's class has I2C_CLASS_HWMON (or whatever class is
+ suitable for your driver), using the following construct:
if (!(adapter->class & I2C_CLASS_HWMON))
return 0;
ISA-only drivers of course don't need this.
@@ -72,63 +79,72 @@ Technical changes:
* [Detect] As mentioned earlier, the flags parameter is gone.
The type_name and client_name strings are replaced by a single
- name string, which will be filled with a lowercase, short string
- (typically the driver name, e.g. "lm75").
+ name string, which will be filled with a lowercase, short string.
In i2c-only drivers, drop the i2c_is_isa_adapter check, it's
useless. Same for isa-only drivers, as the test would always be
true. Only hybrid drivers (which are quite rare) still need it.
- The errorN labels are reduced to the number needed. If that number
- is 2 (i2c-only drivers), it is advised that the labels are named
- exit and exit_free. For i2c+isa drivers, labels should be named
- ERROR0, ERROR1 and ERROR2. Don't forget to properly set err before
+ The labels used for error paths are reduced to the number needed.
+ It is advised that the labels are given descriptive names such as
+ exit and exit_free. Don't forget to properly set err before
jumping to error labels. By the way, labels should be left-aligned.
Use kzalloc instead of kmalloc.
Use i2c_set_clientdata to set the client data (as opposed to
a direct access to client->data).
- Use strlcpy instead of strcpy to copy the client name.
+ Use strlcpy instead of strcpy or snprintf to copy the client name.
Replace the sysctl directory registration by calls to
device_create_file. Move the driver initialization before any
sysfs file creation.
+ Register the client with the hwmon class (using hwmon_device_register)
+ if applicable.
Drop client->id.
Drop any 24RF08 corruption prevention you find, as this is now done
at the i2c-core level, and doing it twice voids it.
+ Don't add I2C_CLIENT_ALLOW_USE to client->flags, it's the default now.
* [Init] Limits must not be set by the driver (can be done later in
user-space). Chip should not be reset default (although a module
- parameter may be used to force is), and initialization should be
+ parameter may be used to force it), and initialization should be
limited to the strictly necessary steps.
-* [Detach] Get rid of data, remove the call to
- i2c_deregister_entry. Do not log an error message if
- i2c_detach_client fails, as i2c-core will now do it for you.
-
-* [Update] Don't access client->data directly, use
- i2c_get_clientdata(client) instead.
-
-* [Interface] Init function should not print anything. Make sure
- there is a MODULE_LICENSE() line, at the bottom of the file
- (after MODULE_AUTHOR() and MODULE_DESCRIPTION(), in this order).
+* [Detach] Remove the call to i2c_deregister_entry. Do not log an
+ error message if i2c_detach_client fails, as i2c-core will now do
+ it for you.
+ Unregister from the hwmon class if applicable.
+
+* [Update] The function prototype changed, it is now
+ passed a device structure, which you have to convert to a client
+ using to_i2c_client(dev). The update function should return a
+ pointer to the client data.
+ Don't access client->data directly, use i2c_get_clientdata(client)
+ instead.
+ Use time_after() instead of direct jiffies comparison.
+
+* [Interface] Make sure there is a MODULE_LICENSE() line, at the bottom
+ of the file (after MODULE_AUTHOR() and MODULE_DESCRIPTION(), in this
+ order).
+
+* [Driver] The flags field of the i2c_driver structure is gone.
+ I2C_DF_NOTIFY is now the default behavior.
+ The i2c_driver structure has a driver member, which is itself a
+ structure, those name member should be initialized to a driver name
+ string. i2c_driver itself has no name member anymore.
Coding policy:
* [Copyright] Use (C), not (c), for copyright.
* [Debug/log] Get rid of #ifdef DEBUG/#endif constructs whenever you
- can. Calls to printk/pr_debug for debugging purposes are replaced
- by calls to dev_dbg. Here is an example on how to call it (taken
- from lm75_detect):
+ can. Calls to printk for debugging purposes are replaced by calls to
+ dev_dbg where possible, else to pr_debug. Here is an example of how
+ to call it (taken from lm75_detect):
dev_dbg(&client->dev, "Starting lm75 update\n");
Replace other printk calls with the dev_info, dev_err or dev_warn
function, as appropriate.
-* [Constants] Constants defines (registers, conversions, initial
- values) should be aligned. This greatly improves readability.
- Same goes for variables declarations. Alignments are achieved by the
- means of tabs, not spaces. Remember that tabs are set to 8 in the
- Linux kernel code.
-
-* [Structure definition] The name field should be standardized. All
- lowercase and as simple as the driver name itself (e.g. "lm75").
+* [Constants] Constants defines (registers, conversions) should be
+ aligned. This greatly improves readability.
+ Alignments are achieved by the means of tabs, not spaces. Remember
+ that tabs are set to 8 in the Linux kernel code.
* [Layout] Avoid extra empty lines between comments and what they
comment. Respect the coding style (see Documentation/CodingStyle),
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index d19993cc060..3a057c8e550 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -25,9 +25,9 @@ routines, a client structure specific information like the actual I2C
address.
static struct i2c_driver foo_driver = {
- .owner = THIS_MODULE,
- .name = "Foo version 2.3 driver",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "foo",
+ },
.attach_adapter = &foo_attach_adapter,
.detach_client = &foo_detach_client,
.command = &foo_command /* may be NULL */
@@ -36,10 +36,6 @@ static struct i2c_driver foo_driver = {
The name field must match the driver name, including the case. It must not
contain spaces, and may be up to 31 characters long.
-Don't worry about the flags field; just put I2C_DF_NOTIFY into it. This
-means that your driver will be notified when new adapters are found.
-This is almost always what you want.
-
All other fields are for call-back functions which will be explained
below.
@@ -496,17 +492,13 @@ Note that some functions are marked by `__init', and some data structures
by `__init_data'. Hose functions and structures can be removed after
kernel booting (or module loading) is completed.
+
Command function
================
A generic ioctl-like function call back is supported. You will seldom
-need this. You may even set it to NULL.
-
- /* No commands defined */
- int foo_command(struct i2c_client *client, unsigned int cmd, void *arg)
- {
- return 0;
- }
+need this, and its use is deprecated anyway, so newer design should not
+use it. Set it to NULL.
Sending and receiving
diff --git a/Documentation/i2o/ioctl b/Documentation/i2o/ioctl
index 3e174978997..1e77fac4e12 100644
--- a/Documentation/i2o/ioctl
+++ b/Documentation/i2o/ioctl
@@ -185,7 +185,7 @@ VII. Getting Parameters
ENOMEM Kernel memory allocation error
A return value of 0 does not mean that the value was actually
- properly retreived. The user should check the result list
+ properly retrieved. The user should check the result list
to determine the specific status of the transaction.
VIII. Downloading Software
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index d802ce88bed..443230b43e0 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1033,9 +1033,9 @@ When kbuild executes the following steps are followed (roughly):
Example:
#arch/i386/Makefile
- GCC_VERSION := $(call cc-version)
cflags-y += $(shell \
- if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;)
+ if [ $(call cc-version) -ge 0300 ] ; then \
+ echo "-mregparm=3"; fi ;)
In the above example -mregparm=3 is only used for gcc version greater
than or equal to gcc 3.0.
diff --git a/Documentation/kbuild/modules.txt b/Documentation/kbuild/modules.txt
index 1c0db652b36..7e77f93634e 100644
--- a/Documentation/kbuild/modules.txt
+++ b/Documentation/kbuild/modules.txt
@@ -18,6 +18,7 @@ In this document you will find information about:
=== 5. Include files
--- 5.1 How to include files from the kernel include dir
--- 5.2 External modules using an include/ dir
+ --- 5.3 External modules using several directories
=== 6. Module installation
--- 6.1 INSTALL_MOD_PATH
--- 6.2 INSTALL_MOD_DIR
@@ -344,6 +345,45 @@ directory and therefore needs to deal with this in their kbuild file.
Note that in the assignment there is no space between -I and the path.
This is a kbuild limitation: there must be no space present.
+--- 5.3 External modules using several directories
+
+ If an external module does not follow the usual kernel style but
+ decide to spread files over several directories then kbuild can
+ support this too.
+
+ Consider the following example:
+
+ |
+ +- src/complex_main.c
+ | +- hal/hardwareif.c
+ | +- hal/include/hardwareif.h
+ +- include/complex.h
+
+ To build a single module named complex.ko we then need the following
+ kbuild file:
+
+ Kbuild:
+ obj-m := complex.o
+ complex-y := src/complex_main.o
+ complex-y += src/hal/hardwareif.o
+
+ EXTRA_CFLAGS := -I$(src)/include
+ EXTRA_CFLAGS += -I$(src)src/hal/include
+
+
+ kbuild knows how to handle .o files located in another directory -
+ although this is NOT reccommended practice. The syntax is to specify
+ the directory relative to the directory where the Kbuild file is
+ located.
+
+ To find the .h files we have to explicitly tell kbuild where to look
+ for the .h files. When kbuild executes current directory is always
+ the root of the kernel tree (argument to -C) and therefore we have to
+ tell kbuild how to find the .h files using absolute paths.
+ $(src) will specify the absolute path to the directory where the
+ Kbuild file are located when being build as an external module.
+ Therefore -I$(src)/ is used to point out the directory of the Kbuild
+ file and any additional path are just appended.
=== 6. Module installation
diff --git a/Documentation/kdump/gdbmacros.txt b/Documentation/kdump/gdbmacros.txt
index bc1b9eb92ae..dcf5580380a 100644
--- a/Documentation/kdump/gdbmacros.txt
+++ b/Documentation/kdump/gdbmacros.txt
@@ -177,3 +177,25 @@ document trapinfo
'trapinfo <pid>' will tell you by which trap & possibly
addresthe kernel paniced.
end
+
+
+define dmesg
+ set $i = 0
+ set $end_idx = (log_end - 1) & (log_buf_len - 1)
+
+ while ($i < logged_chars)
+ set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1)
+
+ if ($idx + 100 <= $end_idx) || \
+ ($end_idx <= $idx && $idx + 100 < log_buf_len)
+ printf "%.100s", &log_buf[$idx]
+ set $i = $i + 100
+ else
+ printf "%c", log_buf[$idx]
+ set $i = $i + 1
+ end
+ end
+end
+document dmesg
+ print the kernel ring buffer
+end
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index 5f08f9ce604..212cf3c21ab 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -4,10 +4,10 @@ Documentation for kdump - the kexec-based crash dumping solution
DESIGN
======
-Kdump uses kexec to reboot to a second kernel whenever a dump needs to be taken.
-This second kernel is booted with very little memory. The first kernel reserves
-the section of memory that the second kernel uses. This ensures that on-going
-DMA from the first kernel does not corrupt the second kernel.
+Kdump uses kexec to reboot to a second kernel whenever a dump needs to be
+taken. This second kernel is booted with very little memory. The first kernel
+reserves the section of memory that the second kernel uses. This ensures that
+on-going DMA from the first kernel does not corrupt the second kernel.
All the necessary information about Core image is encoded in ELF format and
stored in reserved area of memory before crash. Physical address of start of
@@ -35,77 +35,82 @@ In the second kernel, "old memory" can be accessed in two ways.
SETUP
=====
-1) Download http://www.xmission.com/~ebiederm/files/kexec/kexec-tools-1.101.tar.gz
- and apply http://lse.sourceforge.net/kdump/patches/kexec-tools-1.101-kdump.patch
- and after that build the source.
+1) Download the upstream kexec-tools userspace package from
+ http://www.xmission.com/~ebiederm/files/kexec/kexec-tools-1.101.tar.gz.
-2) Download and build the appropriate (2.6.13-rc1 onwards) vanilla kernel.
+ Apply the latest consolidated kdump patch on top of kexec-tools-1.101
+ from http://lse.sourceforge.net/kdump/. This arrangment has been made
+ till all the userspace patches supporting kdump are integrated with
+ upstream kexec-tools userspace.
+2) Download and build the appropriate (2.6.13-rc1 onwards) vanilla kernels.
Two kernels need to be built in order to get this feature working.
+ Following are the steps to properly configure the two kernels specific
+ to kexec and kdump features:
- A) First kernel:
+ A) First kernel or regular kernel:
+ ----------------------------------
a) Enable "kexec system call" feature (in Processor type and features).
- CONFIG_KEXEC=y
- b) This kernel's physical load address should be the default value of
- 0x100000 (0x100000, 1 MB) (in Processor type and features).
- CONFIG_PHYSICAL_START=0x100000
- c) Enable "sysfs file system support" (in Pseudo filesystems).
- CONFIG_SYSFS=y
+ CONFIG_KEXEC=y
+ b) Enable "sysfs file system support" (in Pseudo filesystems).
+ CONFIG_SYSFS=y
+ c) make
d) Boot into first kernel with the command line parameter "crashkernel=Y@X".
Use appropriate values for X and Y. Y denotes how much memory to reserve
- for the second kernel, and X denotes at what physical address the reserved
- memory section starts. For example: "crashkernel=64M@16M".
-
- B) Second kernel:
- a) Enable "kernel crash dumps" feature (in Processor type and features).
- CONFIG_CRASH_DUMP=y
- b) Specify a suitable value for "Physical address where the kernel is
- loaded" (in Processor type and features). Typically this value
- should be same as X (See option d) above, e.g., 16 MB or 0x1000000.
- CONFIG_PHYSICAL_START=0x1000000
- c) Enable "/proc/vmcore support" (Optional, in Pseudo filesystems).
- CONFIG_PROC_VMCORE=y
- d) Disable SMP support and build a UP kernel (Until it is fixed).
- CONFIG_SMP=n
- e) Enable "Local APIC support on uniprocessors".
- CONFIG_X86_UP_APIC=y
- f) Enable "IO-APIC support on uniprocessors"
- CONFIG_X86_UP_IOAPIC=y
-
- Note: i) Options a) and b) depend upon "Configure standard kernel features
- (for small systems)" (under General setup).
- ii) Option a) also depends on CONFIG_HIGHMEM (under Processor
- type and features).
- iii) Both option a) and b) are under "Processor type and features".
-
-3) Boot into the first kernel. You are now ready to try out kexec-based crash
- dumps.
-
-4) Load the second kernel to be booted using:
+ for the second kernel, and X denotes at what physical address the
+ reserved memory section starts. For example: "crashkernel=64M@16M".
+
+
+ B) Second kernel or dump capture kernel:
+ ---------------------------------------
+ a) For i386 architecture enable Highmem support
+ CONFIG_HIGHMEM=y
+ b) Enable "kernel crash dumps" feature (under "Processor type and features")
+ CONFIG_CRASH_DUMP=y
+ c) Make sure a suitable value for "Physical address where the kernel is
+ loaded" (under "Processor type and features"). By default this value
+ is 0x1000000 (16MB) and it should be same as X (See option d above),
+ e.g., 16 MB or 0x1000000.
+ CONFIG_PHYSICAL_START=0x1000000
+ d) Enable "/proc/vmcore support" (Optional, under "Pseudo filesystems").
+ CONFIG_PROC_VMCORE=y
+
+3) After booting to regular kernel or first kernel, load the second kernel
+ using the following command:
kexec -p <second-kernel> --args-linux --elf32-core-headers
- --append="root=<root-dev> init 1 irqpoll"
-
- Note: i) <second-kernel> has to be a vmlinux image. bzImage will not work,
- as of now.
- ii) By default ELF headers are stored in ELF64 format. Option
- --elf32-core-headers forces generation of ELF32 headers. gdb can
- not open ELF64 headers on 32 bit systems. So creating ELF32
- headers can come handy for users who have got non-PAE systems and
- hence have memory less than 4GB.
- iii) Specify "irqpoll" as command line parameter. This reduces driver
- initialization failures in second kernel due to shared interrupts.
- iv) <root-dev> needs to be specified in a format corresponding to
- the root device name in the output of mount command.
- v) If you have built the drivers required to mount root file
- system as modules in <second-kernel>, then, specify
- --initrd=<initrd-for-second-kernel>.
-
-5) System reboots into the second kernel when a panic occurs. A module can be
- written to force the panic or "ALT-SysRq-c" can be used initiate a crash
- dump for testing purposes.
-
-6) Write out the dump file using
+ --append="root=<root-dev> init 1 irqpoll maxcpus=1"
+
+ Notes:
+ ======
+ i) <second-kernel> has to be a vmlinux image ie uncompressed elf image.
+ bzImage will not work, as of now.
+ ii) --args-linux has to be speicfied as if kexec it loading an elf image,
+ it needs to know that the arguments supplied are of linux type.
+ iii) By default ELF headers are stored in ELF64 format to support systems
+ with more than 4GB memory. Option --elf32-core-headers forces generation
+ of ELF32 headers. The reason for this option being, as of now gdb can
+ not open vmcore file with ELF64 headers on a 32 bit systems. So ELF32
+ headers can be used if one has non-PAE systems and hence memory less
+ than 4GB.
+ iv) Specify "irqpoll" as command line parameter. This reduces driver
+ initialization failures in second kernel due to shared interrupts.
+ v) <root-dev> needs to be specified in a format corresponding to the root
+ device name in the output of mount command.
+ vi) If you have built the drivers required to mount root file system as
+ modules in <second-kernel>, then, specify
+ --initrd=<initrd-for-second-kernel>.
+ vii) Specify maxcpus=1 as, if during first kernel run, if panic happens on
+ non-boot cpus, second kernel doesn't seem to be boot up all the cpus.
+ The other option is to always built the second kernel without SMP
+ support ie CONFIG_SMP=n
+
+4) After successfully loading the second kernel as above, if a panic occurs
+ system reboots into the second kernel. A module can be written to force
+ the panic or "ALT-SysRq-c" can be used initiate a crash dump for testing
+ purposes.
+
+5) Once the second kernel has booted, write out the dump file using
cp /proc/vmcore <dump-file>
@@ -119,9 +124,9 @@ SETUP
Entire memory: dd if=/dev/oldmem of=oldmem.001
+
ANALYSIS
========
-
Limited analysis can be done using gdb on the dump file copied out of
/proc/vmcore. Use vmlinux built with -g and run
@@ -132,15 +137,19 @@ work fine.
Note: gdb cannot analyse core files generated in ELF64 format for i386.
+Latest "crash" (crash-4.0-2.18) as available on Dave Anderson's site
+http://people.redhat.com/~anderson/ works well with kdump format.
+
+
TODO
====
-
1) Provide a kernel pages filtering mechanism so that core file size is not
insane on systems having huge memory banks.
-2) Modify "crash" tool to make it recognize this dump.
+2) Relocatable kernel can help in maintaining multiple kernels for crashdump
+ and same kernel as the first kernel can be used to capture the dump.
+
CONTACT
=======
-
Vivek Goyal (vgoyal@in.ibm.com)
Maneesh Soni (maneesh@in.ibm.com)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 61a56b100c6..fe11fccf7e4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -475,10 +475,11 @@ running once the system is up.
See Documentation/block/as-iosched.txt and
Documentation/block/deadline-iosched.txt for details.
- elfcorehdr= [IA-32]
+ elfcorehdr= [IA-32, X86_64]
Specifies physical address of start of kernel core
- image elf header.
- See Documentation/kdump.txt for details.
+ image elf header. Generally kexec loader will
+ pass this option to capture kernel.
+ See Documentation/kdump/kdump.txt for details.
enforcing [SELINUX] Set initial enforcing status.
Format: {"0" | "1"}
@@ -832,7 +833,7 @@ running once the system is up.
mem=nopentium [BUGS=IA-32] Disable usage of 4MB pages for kernel
memory.
- memmap=exactmap [KNL,IA-32] Enable setting of an exact
+ memmap=exactmap [KNL,IA-32,X86_64] Enable setting of an exact
E820 memory map, as specified by the user.
Such memmap=exactmap lines can be constructed based on
BIOS output or other requirements. See the memmap=nn@ss
@@ -855,6 +856,49 @@ running once the system is up.
mga= [HW,DRM]
+ migration_cost=
+ [KNL,SMP] debug: override scheduler migration costs
+ Format: <level-1-usecs>,<level-2-usecs>,...
+ This debugging option can be used to override the
+ default scheduler migration cost matrix. The numbers
+ are indexed by 'CPU domain distance'.
+ E.g. migration_cost=1000,2000,3000 on an SMT NUMA
+ box will set up an intra-core migration cost of
+ 1 msec, an inter-core migration cost of 2 msecs,
+ and an inter-node migration cost of 3 msecs.
+
+ WARNING: using the wrong values here can break
+ scheduler performance, so it's only for scheduler
+ development purposes, not production environments.
+
+ migration_debug=
+ [KNL,SMP] migration cost auto-detect verbosity
+ Format=<0|1|2>
+ If a system's migration matrix reported at bootup
+ seems erroneous then this option can be used to
+ increase verbosity of the detection process.
+ We default to 0 (no extra messages), 1 will print
+ some more information, and 2 will be really
+ verbose (probably only useful if you also have a
+ serial console attached to the system).
+
+ migration_factor=
+ [KNL,SMP] multiply/divide migration costs by a factor
+ Format=<percent>
+ This debug option can be used to proportionally
+ increase or decrease the auto-detected migration
+ costs for all entries of the migration matrix.
+ E.g. migration_factor=150 will increase migration
+ costs by 50%. (and thus the scheduler will be less
+ eager migrating cache-hot tasks)
+ migration_factor=80 will decrease migration costs
+ by 20%. (thus the scheduler will be more eager to
+ migrate tasks)
+
+ WARNING: using the wrong values here can break
+ scheduler performance, so it's only for scheduler
+ development purposes, not production environments.
+
mousedev.tap_time=
[MOUSE] Maximum time between finger touching and
leaving touchpad surface for touch to be considered
@@ -910,6 +954,14 @@ running once the system is up.
nfsroot= [NFS] nfs root filesystem for disk-less boxes.
See Documentation/nfsroot.txt.
+ nfs.callback_tcpport=
+ [NFS] set the TCP port on which the NFSv4 callback
+ channel should listen.
+
+ nfs.idmap_cache_timeout=
+ [NFS] set the maximum lifetime for idmapper cache
+ entries.
+
nmi_watchdog= [KNL,BUGS=IA-32] Debugging features for SMP kernels
no387 [BUGS=IA-32] Tells the kernel to use the 387 maths
@@ -990,6 +1042,8 @@ running once the system is up.
nowb [ARM]
+ nr_uarts= [SERIAL] maximum number of UARTs to be registered.
+
opl3= [HW,OSS]
Format: <io>
@@ -1168,6 +1222,10 @@ running once the system is up.
Limit processor to maximum C-state
max_cstate=9 overrides any DMI blacklist limit.
+ processor.nocst [HW,ACPI]
+ Ignore the _CST method to determine C-states,
+ instead using the legacy FADT method
+
prompt_ramdisk= [RAM] List of RAM disks to prompt for floppy disk
before loading.
See Documentation/ramdisk.txt.
diff --git a/Documentation/keys-request-key.txt b/Documentation/keys-request-key.txt
index 5f2b9c5edbb..22488d79116 100644
--- a/Documentation/keys-request-key.txt
+++ b/Documentation/keys-request-key.txt
@@ -56,10 +56,12 @@ A request proceeds in the following manner:
(4) request_key() then forks and executes /sbin/request-key with a new session
keyring that contains a link to auth key V.
- (5) /sbin/request-key execs an appropriate program to perform the actual
+ (5) /sbin/request-key assumes the authority associated with key U.
+
+ (6) /sbin/request-key execs an appropriate program to perform the actual
instantiation.
- (6) The program may want to access another key from A's context (say a
+ (7) The program may want to access another key from A's context (say a
Kerberos TGT key). It just requests the appropriate key, and the keyring
search notes that the session keyring has auth key V in its bottom level.
@@ -67,19 +69,19 @@ A request proceeds in the following manner:
UID, GID, groups and security info of process A as if it was process A,
and come up with key W.
- (7) The program then does what it must to get the data with which to
+ (8) The program then does what it must to get the data with which to
instantiate key U, using key W as a reference (perhaps it contacts a
Kerberos server using the TGT) and then instantiates key U.
- (8) Upon instantiating key U, auth key V is automatically revoked so that it
+ (9) Upon instantiating key U, auth key V is automatically revoked so that it
may not be used again.
- (9) The program then exits 0 and request_key() deletes key V and returns key
+(10) The program then exits 0 and request_key() deletes key V and returns key
U to the caller.
-This also extends further. If key W (step 5 above) didn't exist, key W would be
-created uninstantiated, another auth key (X) would be created [as per step 3]
-and another copy of /sbin/request-key spawned [as per step 4]; but the context
+This also extends further. If key W (step 7 above) didn't exist, key W would be
+created uninstantiated, another auth key (X) would be created (as per step 3)
+and another copy of /sbin/request-key spawned (as per step 4); but the context
specified by auth key X will still be process A, as it was in auth key V.
This is because process A's keyrings can't simply be attached to
@@ -138,8 +140,8 @@ until one succeeds:
(3) The process's session keyring is searched.
- (4) If the process has a request_key() authorisation key in its session
- keyring then:
+ (4) If the process has assumed the authority associated with a request_key()
+ authorisation key then:
(a) If extant, the calling process's thread keyring is searched.
diff --git a/Documentation/keys.txt b/Documentation/keys.txt
index 31154882000..aaa01b0e3ee 100644
--- a/Documentation/keys.txt
+++ b/Documentation/keys.txt
@@ -308,6 +308,8 @@ process making the call:
KEY_SPEC_USER_KEYRING -4 UID-specific keyring
KEY_SPEC_USER_SESSION_KEYRING -5 UID-session keyring
KEY_SPEC_GROUP_KEYRING -6 GID-specific keyring
+ KEY_SPEC_REQKEY_AUTH_KEY -7 assumed request_key()
+ authorisation key
The main syscalls are:
@@ -498,7 +500,11 @@ The keyctl syscall functions are:
keyring is full, error ENFILE will result.
The link procedure checks the nesting of the keyrings, returning ELOOP if
- it appears to deep or EDEADLK if the link would introduce a cycle.
+ it appears too deep or EDEADLK if the link would introduce a cycle.
+
+ Any links within the keyring to keys that match the new key in terms of
+ type and description will be discarded from the keyring as the new one is
+ added.
(*) Unlink a key or keyring from another keyring:
@@ -628,6 +634,41 @@ The keyctl syscall functions are:
there is one, otherwise the user default session keyring.
+ (*) Set the timeout on a key.
+
+ long keyctl(KEYCTL_SET_TIMEOUT, key_serial_t key, unsigned timeout);
+
+ This sets or clears the timeout on a key. The timeout can be 0 to clear
+ the timeout or a number of seconds to set the expiry time that far into
+ the future.
+
+ The process must have attribute modification access on a key to set its
+ timeout. Timeouts may not be set with this function on negative, revoked
+ or expired keys.
+
+
+ (*) Assume the authority granted to instantiate a key
+
+ long keyctl(KEYCTL_ASSUME_AUTHORITY, key_serial_t key);
+
+ This assumes or divests the authority required to instantiate the
+ specified key. Authority can only be assumed if the thread has the
+ authorisation key associated with the specified key in its keyrings
+ somewhere.
+
+ Once authority is assumed, searches for keys will also search the
+ requester's keyrings using the requester's security label, UID, GID and
+ groups.
+
+ If the requested authority is unavailable, error EPERM will be returned,
+ likewise if the authority has been revoked because the target key is
+ already instantiated.
+
+ If the specified key is 0, then any assumed authority will be divested.
+
+ The assumed authorititive key is inherited across fork and exec.
+
+
===============
KERNEL SERVICES
===============
@@ -860,24 +901,6 @@ The structure has a number of fields, some of which are mandatory:
It is safe to sleep in this method.
- (*) int (*duplicate)(struct key *key, const struct key *source);
-
- If this type of key can be duplicated, then this method should be
- provided. It is called to copy the payload attached to the source into the
- new key. The data length on the new key will have been updated and the
- quota adjusted already.
-
- This method will be called with the source key's semaphore read-locked to
- prevent its payload from being changed, thus RCU constraints need not be
- applied to the source key.
-
- This method does not have to lock the destination key in order to attach a
- payload. The fact that KEY_FLAG_INSTANTIATED is not set in key->flags
- prevents anything else from gaining access to the key.
-
- It is safe to sleep in this method.
-
-
(*) int (*update)(struct key *key, const void *data, size_t datalen);
If this type of key can be updated, then this method should be provided.
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 0541fe1de70..0ea5a0c6e82 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -411,7 +411,8 @@ int init_module(void)
printk("Couldn't find %s to plant kprobe\n", "do_fork");
return -1;
}
- if ((ret = register_kprobe(&kp) < 0)) {
+ ret = register_kprobe(&kp);
+ if (ret < 0) {
printk("register_kprobe failed, returned %d\n", ret);
return -1;
}
diff --git a/Documentation/laptop-mode.txt b/Documentation/laptop-mode.txt
index dc4e810afdc..f42e4c08935 100644
--- a/Documentation/laptop-mode.txt
+++ b/Documentation/laptop-mode.txt
@@ -3,7 +3,7 @@ How to conserve battery power using laptop-mode
Document Author: Bart Samwel (bart@samwel.tk)
Date created: January 2, 2004
-Last modified: July 10, 2004
+Last modified: December 06, 2004
Introduction
------------
@@ -33,7 +33,7 @@ or anything. Simply install all the files included in this document, and
laptop mode will automatically be started when you're on battery. For
your convenience, a tarball containing an installer can be downloaded at:
-http://www.xs4all.nl/~bsamwel/laptop_mode/tools
+http://www.xs4all.nl/~bsamwel/laptop_mode/tools/
To configure laptop mode, you need to edit the configuration file, which is
located in /etc/default/laptop-mode on Debian-based systems, or in
@@ -912,7 +912,7 @@ void usage()
exit(0);
}
-int main(int ac, char **av)
+int main(int argc, char **argv)
{
int fd;
char *disk = 0;
diff --git a/Documentation/locks.txt b/Documentation/locks.txt
index ce1be79edfb..e3b402ef33b 100644
--- a/Documentation/locks.txt
+++ b/Documentation/locks.txt
@@ -65,20 +65,3 @@ The default is to disallow mandatory locking. The intention is that
mandatory locking only be enabled on a local filesystem as the specific need
arises.
-Until an updated version of mount(8) becomes available you may have to apply
-this patch to the mount sources (based on the version distributed with Rick
-Faith's util-linux-2.5 package):
-
-*** mount.c.orig Sat Jun 8 09:14:31 1996
---- mount.c Sat Jun 8 09:13:02 1996
-***************
-*** 100,105 ****
---- 100,107 ----
- { "noauto", 0, MS_NOAUTO }, /* Can only be mounted explicitly */
- { "user", 0, MS_USER }, /* Allow ordinary user to mount */
- { "nouser", 1, MS_USER }, /* Forbid ordinary user to mount */
-+ { "mand", 0, MS_MANDLOCK }, /* Allow mandatory locks on this FS */
-+ { "nomand", 1, MS_MANDLOCK }, /* Forbid mandatory locks on this FS */
- /* add new options here */
- #ifdef MS_NOSUB
- { "sub", 1, MS_NOSUB }, /* allow submounts */
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 23e6cce40f9..03a13c462cf 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -51,6 +51,30 @@ superblock can be autodetected and run at boot time.
The kernel parameter "raid=partitionable" (or "raid=part") means
that all auto-detected arrays are assembled as partitionable.
+Boot time assembly of degraded/dirty arrays
+-------------------------------------------
+
+If a raid5 or raid6 array is both dirty and degraded, it could have
+undetectable data corruption. This is because the fact that it is
+'dirty' means that the parity cannot be trusted, and the fact that it
+is degraded means that some datablocks are missing and cannot reliably
+be reconstructed (due to no parity).
+
+For this reason, md will normally refuse to start such an array. This
+requires the sysadmin to take action to explicitly start the array
+desipite possible corruption. This is normally done with
+ mdadm --assemble --force ....
+
+This option is not really available if the array has the root
+filesystem on it. In order to support this booting from such an
+array, md supports a module parameter "start_dirty_degraded" which,
+when set to 1, bypassed the checks and will allows dirty degraded
+arrays to be started.
+
+So, to boot with a root filesystem of a dirty degraded raid[56], use
+
+ md-mod.start_dirty_degraded=1
+
Superblock formats
------------------
@@ -141,6 +165,70 @@ All md devices contain:
in a fully functional array. If this is not yet known, the file
will be empty. If an array is being resized (not currently
possible) this will contain the larger of the old and new sizes.
+ Some raid level (RAID1) allow this value to be set while the
+ array is active. This will reconfigure the array. Otherwise
+ it can only be set while assembling an array.
+
+ chunk_size
+ This is the size if bytes for 'chunks' and is only relevant to
+ raid levels that involve striping (1,4,5,6,10). The address space
+ of the array is conceptually divided into chunks and consecutive
+ chunks are striped onto neighbouring devices.
+ The size should be atleast PAGE_SIZE (4k) and should be a power
+ of 2. This can only be set while assembling an array
+
+ component_size
+ For arrays with data redundancy (i.e. not raid0, linear, faulty,
+ multipath), all components must be the same size - or at least
+ there must a size that they all provide space for. This is a key
+ part or the geometry of the array. It is measured in sectors
+ and can be read from here. Writing to this value may resize
+ the array if the personality supports it (raid1, raid5, raid6),
+ and if the component drives are large enough.
+
+ metadata_version
+ This indicates the format that is being used to record metadata
+ about the array. It can be 0.90 (traditional format), 1.0, 1.1,
+ 1.2 (newer format in varying locations) or "none" indicating that
+ the kernel isn't managing metadata at all.
+
+ level
+ The raid 'level' for this array. The name will often (but not
+ always) be the same as the name of the module that implements the
+ level. To be auto-loaded the module must have an alias
+ md-$LEVEL e.g. md-raid5
+ This can be written only while the array is being assembled, not
+ after it is started.
+
+ new_dev
+ This file can be written but not read. The value written should
+ be a block device number as major:minor. e.g. 8:0
+ This will cause that device to be attached to the array, if it is
+ available. It will then appear at md/dev-XXX (depending on the
+ name of the device) and further configuration is then possible.
+
+ sync_speed_min
+ sync_speed_max
+ This are similar to /proc/sys/dev/raid/speed_limit_{min,max}
+ however they only apply to the particular array.
+ If no value has been written to these, of if the word 'system'
+ is written, then the system-wide value is used. If a value,
+ in kibibytes-per-second is written, then it is used.
+ When the files are read, they show the currently active value
+ followed by "(local)" or "(system)" depending on whether it is
+ a locally set or system-wide value.
+
+ sync_completed
+ This shows the number of sectors that have been completed of
+ whatever the current sync_action is, followed by the number of
+ sectors in total that could need to be processed. The two
+ numbers are separated by a '/' thus effectively showing one
+ value, a fraction of the process that is complete.
+
+ sync_speed
+ This shows the current actual speed, in K/sec, of the current
+ sync_action. It is averaged over the last 30 seconds.
+
As component devices are added to an md array, they appear in the 'md'
directory as new directories named
@@ -167,6 +255,38 @@ Each directory contains:
of being recoverred to
This list make grow in future.
+ errors
+ An approximate count of read errors that have been detected on
+ this device but have not caused the device to be evicted from
+ the array (either because they were corrected or because they
+ happened while the array was read-only). When using version-1
+ metadata, this value persists across restarts of the array.
+
+ This value can be written while assembling an array thus
+ providing an ongoing count for arrays with metadata managed by
+ userspace.
+
+ slot
+ This gives the role that the device has in the array. It will
+ either be 'none' if the device is not active in the array
+ (i.e. is a spare or has failed) or an integer less than the
+ 'raid_disks' number for the array indicating which possition
+ it currently fills. This can only be set while assembling an
+ array. A device for which this is set is assumed to be working.
+
+ offset
+ This gives the location in the device (in sectors from the
+ start) where data from the array will be stored. Any part of
+ the device before this offset us not touched, unless it is
+ used for storing metadata (Formats 1.1 and 1.2).
+
+ size
+ The amount of the device, after the offset, that can be used
+ for storage of data. This will normally be the same as the
+ component_size. This can be written while assembling an
+ array. If a value less than the current component_size is
+ written, component_size will be reduced to this value.
+
An active md device will also contain and entry for each active device
in the array. These are named
diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt
new file mode 100644
index 00000000000..cbf79881a41
--- /dev/null
+++ b/Documentation/mutex-design.txt
@@ -0,0 +1,135 @@
+Generic Mutex Subsystem
+
+started by Ingo Molnar <mingo@redhat.com>
+
+ "Why on earth do we need a new mutex subsystem, and what's wrong
+ with semaphores?"
+
+firstly, there's nothing wrong with semaphores. But if the simpler
+mutex semantics are sufficient for your code, then there are a couple
+of advantages of mutexes:
+
+ - 'struct mutex' is smaller on most architectures: .e.g on x86,
+ 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
+ A smaller structure size means less RAM footprint, and better
+ CPU-cache utilization.
+
+ - tighter code. On x86 i get the following .text sizes when
+ switching all mutex-alike semaphores in the kernel to the mutex
+ subsystem:
+
+ text data bss dec hex filename
+ 3280380 868188 396860 4545428 455b94 vmlinux-semaphore
+ 3255329 865296 396732 4517357 44eded vmlinux-mutex
+
+ that's 25051 bytes of code saved, or a 0.76% win - off the hottest
+ codepaths of the kernel. (The .data savings are 2892 bytes, or 0.33%)
+ Smaller code means better icache footprint, which is one of the
+ major optimization goals in the Linux kernel currently.
+
+ - the mutex subsystem is slightly faster and has better scalability for
+ contended workloads. On an 8-way x86 system, running a mutex-based
+ kernel and testing creat+unlink+close (of separate, per-task files)
+ in /tmp with 16 parallel tasks, the average number of ops/sec is:
+
+ Semaphores: Mutexes:
+
+ $ ./test-mutex V 16 10 $ ./test-mutex V 16 10
+ 8 CPUs, running 16 tasks. 8 CPUs, running 16 tasks.
+ checking VFS performance. checking VFS performance.
+ avg loops/sec: 34713 avg loops/sec: 84153
+ CPU utilization: 63% CPU utilization: 22%
+
+ i.e. in this workload, the mutex based kernel was 2.4 times faster
+ than the semaphore based kernel, _and_ it also had 2.8 times less CPU
+ utilization. (In terms of 'ops per CPU cycle', the semaphore kernel
+ performed 551 ops/sec per 1% of CPU time used, while the mutex kernel
+ performed 3825 ops/sec per 1% of CPU time used - it was 6.9 times
+ more efficient.)
+
+ the scalability difference is visible even on a 2-way P4 HT box:
+
+ Semaphores: Mutexes:
+
+ $ ./test-mutex V 16 10 $ ./test-mutex V 16 10
+ 4 CPUs, running 16 tasks. 8 CPUs, running 16 tasks.
+ checking VFS performance. checking VFS performance.
+ avg loops/sec: 127659 avg loops/sec: 181082
+ CPU utilization: 100% CPU utilization: 34%
+
+ (the straight performance advantage of mutexes is 41%, the per-cycle
+ efficiency of mutexes is 4.1 times better.)
+
+ - there are no fastpath tradeoffs, the mutex fastpath is just as tight
+ as the semaphore fastpath. On x86, the locking fastpath is 2
+ instructions:
+
+ c0377ccb <mutex_lock>:
+ c0377ccb: f0 ff 08 lock decl (%eax)
+ c0377cce: 78 0e js c0377cde <.text.lock.mutex>
+ c0377cd0: c3 ret
+
+ the unlocking fastpath is equally tight:
+
+ c0377cd1 <mutex_unlock>:
+ c0377cd1: f0 ff 00 lock incl (%eax)
+ c0377cd4: 7e 0f jle c0377ce5 <.text.lock.mutex+0x7>
+ c0377cd6: c3 ret
+
+ - 'struct mutex' semantics are well-defined and are enforced if
+ CONFIG_DEBUG_MUTEXES is turned on. Semaphores on the other hand have
+ virtually no debugging code or instrumentation. The mutex subsystem
+ checks and enforces the following rules:
+
+ * - only one task can hold the mutex at a time
+ * - only the owner can unlock the mutex
+ * - multiple unlocks are not permitted
+ * - recursive locking is not permitted
+ * - a mutex object must be initialized via the API
+ * - a mutex object must not be initialized via memset or copying
+ * - task may not exit with mutex held
+ * - memory areas where held locks reside must not be freed
+ * - held mutexes must not be reinitialized
+ * - mutexes may not be used in irq contexts
+
+ furthermore, there are also convenience features in the debugging
+ code:
+
+ * - uses symbolic names of mutexes, whenever they are printed in debug output
+ * - point-of-acquire tracking, symbolic lookup of function names
+ * - list of all locks held in the system, printout of them
+ * - owner tracking
+ * - detects self-recursing locks and prints out all relevant info
+ * - detects multi-task circular deadlocks and prints out all affected
+ * locks and tasks (and only those tasks)
+
+Disadvantages
+-------------
+
+The stricter mutex API means you cannot use mutexes the same way you
+can use semaphores: e.g. they cannot be used from an interrupt context,
+nor can they be unlocked from a different context that which acquired
+it. [ I'm not aware of any other (e.g. performance) disadvantages from
+using mutexes at the moment, please let me know if you find any. ]
+
+Implementation of mutexes
+-------------------------
+
+'struct mutex' is the new mutex type, defined in include/linux/mutex.h
+and implemented in kernel/mutex.c. It is a counter-based mutex with a
+spinlock and a wait-list. The counter has 3 states: 1 for "unlocked",
+0 for "locked" and negative numbers (usually -1) for "locked, potential
+waiters queued".
+
+the APIs of 'struct mutex' have been streamlined:
+
+ DEFINE_MUTEX(name);
+
+ mutex_init(mutex);
+
+ void mutex_lock(struct mutex *lock);
+ int mutex_lock_interruptible(struct mutex *lock);
+ int mutex_trylock(struct mutex *lock);
+ void mutex_unlock(struct mutex *lock);
+ int mutex_is_locked(struct mutex *lock);
+
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index b0fe41da007..8d8b4e5ea18 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -945,7 +945,6 @@ bond0 Link encap:Ethernet HWaddr 00:C0:F0:1F:37:B4
collisions:0 txqueuelen:0
eth0 Link encap:Ethernet HWaddr 00:C0:F0:1F:37:B4
- inet addr:XXX.XXX.XXX.YYY Bcast:XXX.XXX.XXX.255 Mask:255.255.252.0
UP BROADCAST RUNNING SLAVE MULTICAST MTU:1500 Metric:1
RX packets:3573025 errors:0 dropped:0 overruns:0 frame:0
TX packets:1643167 errors:1 dropped:0 overruns:1 carrier:0
@@ -953,7 +952,6 @@ eth0 Link encap:Ethernet HWaddr 00:C0:F0:1F:37:B4
Interrupt:10 Base address:0x1080
eth1 Link encap:Ethernet HWaddr 00:C0:F0:1F:37:B4
- inet addr:XXX.XXX.XXX.YYY Bcast:XXX.XXX.XXX.255 Mask:255.255.252.0
UP BROADCAST RUNNING SLAVE MULTICAST MTU:1500 Metric:1
RX packets:3651769 errors:0 dropped:0 overruns:0 frame:0
TX packets:1643480 errors:0 dropped:0 overruns:0 carrier:0
diff --git a/Documentation/networking/sk98lin.txt b/Documentation/networking/sk98lin.txt
index 851fc97bb22..f9d979ee952 100644
--- a/Documentation/networking/sk98lin.txt
+++ b/Documentation/networking/sk98lin.txt
@@ -245,7 +245,7 @@ Default: Both
This parameters is only relevant if auto-negotiation for this port is
not set to "Sense". If auto-negotiation is set to "On", all three values
are possible. If it is set to "Off", only "Full" and "Half" are allowed.
-This parameter is usefull if your link partner does not support all
+This parameter is useful if your link partner does not support all
possible combinations.
Flow Control
diff --git a/Documentation/oops-tracing.txt b/Documentation/oops-tracing.txt
index 05960f8a748..2503404ae5c 100644
--- a/Documentation/oops-tracing.txt
+++ b/Documentation/oops-tracing.txt
@@ -41,11 +41,9 @@ the disk is not available then you have three options :-
run a null modem to a second machine and capture the output there
using your favourite communication program. Minicom works well.
-(3) Patch the kernel with one of the crash dump patches. These save
- data to a floppy disk or video rom or a swap partition. None of
- these are standard kernel patches so you have to find and apply
- them yourself. Search kernel archives for kmsgdump, lkcd and
- oops+smram.
+(3) Use Kdump (see Documentation/kdump/kdump.txt),
+ extract the kernel ring buffer from old memory with using dmesg
+ gdbmacro in Documentation/kdump/gdbmacros.txt.
Full Information
diff --git a/Documentation/pci-error-recovery.txt b/Documentation/pci-error-recovery.txt
new file mode 100644
index 00000000000..d089967e494
--- /dev/null
+++ b/Documentation/pci-error-recovery.txt
@@ -0,0 +1,246 @@
+
+ PCI Error Recovery
+ ------------------
+ May 31, 2005
+
+ Current document maintainer:
+ Linas Vepstas <linas@austin.ibm.com>
+
+
+Some PCI bus controllers are able to detect certain "hard" PCI errors
+on the bus, such as parity errors on the data and address busses, as
+well as SERR and PERR errors. These chipsets are then able to disable
+I/O to/from the affected device, so that, for example, a bad DMA
+address doesn't end up corrupting system memory. These same chipsets
+are also able to reset the affected PCI device, and return it to
+working condition. This document describes a generic API form
+performing error recovery.
+
+The core idea is that after a PCI error has been detected, there must
+be a way for the kernel to coordinate with all affected device drivers
+so that the pci card can be made operational again, possibly after
+performing a full electrical #RST of the PCI card. The API below
+provides a generic API for device drivers to be notified of PCI
+errors, and to be notified of, and respond to, a reset sequence.
+
+Preliminary sketch of API, cut-n-pasted-n-modified email from
+Ben Herrenschmidt, circa 5 april 2005
+
+The error recovery API support is exposed to the driver in the form of
+a structure of function pointers pointed to by a new field in struct
+pci_driver. The absence of this pointer in pci_driver denotes an
+"non-aware" driver, behaviour on these is platform dependant.
+Platforms like ppc64 can try to simulate pci hotplug remove/add.
+
+The definition of "pci_error_token" is not covered here. It is based on
+Seto's work on the synchronous error detection. We still need to define
+functions for extracting infos out of an opaque error token. This is
+separate from this API.
+
+This structure has the form:
+
+struct pci_error_handlers
+{
+ int (*error_detected)(struct pci_dev *dev, pci_error_token error);
+ int (*mmio_enabled)(struct pci_dev *dev);
+ int (*resume)(struct pci_dev *dev);
+ int (*link_reset)(struct pci_dev *dev);
+ int (*slot_reset)(struct pci_dev *dev);
+};
+
+A driver doesn't have to implement all of these callbacks. The
+only mandatory one is error_detected(). If a callback is not
+implemented, the corresponding feature is considered unsupported.
+For example, if mmio_enabled() and resume() aren't there, then the
+driver is assumed as not doing any direct recovery and requires
+a reset. If link_reset() is not implemented, the card is assumed as
+not caring about link resets, in which case, if recover is supported,
+the core can try recover (but not slot_reset() unless it really did
+reset the slot). If slot_reset() is not supported, link_reset() can
+be called instead on a slot reset.
+
+At first, the call will always be :
+
+ 1) error_detected()
+
+ Error detected. This is sent once after an error has been detected. At
+this point, the device might not be accessible anymore depending on the
+platform (the slot will be isolated on ppc64). The driver may already
+have "noticed" the error because of a failing IO, but this is the proper
+"synchronisation point", that is, it gives a chance to the driver to
+cleanup, waiting for pending stuff (timers, whatever, etc...) to
+complete; it can take semaphores, schedule, etc... everything but touch
+the device. Within this function and after it returns, the driver
+shouldn't do any new IOs. Called in task context. This is sort of a
+"quiesce" point. See note about interrupts at the end of this doc.
+
+ Result codes:
+ - PCIERR_RESULT_CAN_RECOVER:
+ Driever returns this if it thinks it might be able to recover
+ the HW by just banging IOs or if it wants to be given
+ a chance to extract some diagnostic informations (see
+ below).
+ - PCIERR_RESULT_NEED_RESET:
+ Driver returns this if it thinks it can't recover unless the
+ slot is reset.
+ - PCIERR_RESULT_DISCONNECT:
+ Return this if driver thinks it won't recover at all,
+ (this will detach the driver ? or just leave it
+ dangling ? to be decided)
+
+So at this point, we have called error_detected() for all drivers
+on the segment that had the error. On ppc64, the slot is isolated. What
+happens now typically depends on the result from the drivers. If all
+drivers on the segment/slot return PCIERR_RESULT_CAN_RECOVER, we would
+re-enable IOs on the slot (or do nothing special if the platform doesn't
+isolate slots) and call 2). If not and we can reset slots, we go to 4),
+if neither, we have a dead slot. If it's an hotplug slot, we might
+"simulate" reset by triggering HW unplug/replug though.
+
+>>> Current ppc64 implementation assumes that a device driver will
+>>> *not* schedule or semaphore in this routine; the current ppc64
+>>> implementation uses one kernel thread to notify all devices;
+>>> thus, of one device sleeps/schedules, all devices are affected.
+>>> Doing better requires complex multi-threaded logic in the error
+>>> recovery implementation (e.g. waiting for all notification threads
+>>> to "join" before proceeding with recovery.) This seems excessively
+>>> complex and not worth implementing.
+
+>>> The current ppc64 implementation doesn't much care if the device
+>>> attempts i/o at this point, or not. I/O's will fail, returning
+>>> a value of 0xff on read, and writes will be dropped. If the device
+>>> driver attempts more than 10K I/O's to a frozen adapter, it will
+>>> assume that the device driver has gone into an infinite loop, and
+>>> it will panic the the kernel.
+
+ 2) mmio_enabled()
+
+ This is the "early recovery" call. IOs are allowed again, but DMA is
+not (hrm... to be discussed, I prefer not), with some restrictions. This
+is NOT a callback for the driver to start operations again, only to
+peek/poke at the device, extract diagnostic information, if any, and
+eventually do things like trigger a device local reset or some such,
+but not restart operations. This is sent if all drivers on a segment
+agree that they can try to recover and no automatic link reset was
+performed by the HW. If the platform can't just re-enable IOs without
+a slot reset or a link reset, it doesn't call this callback and goes
+directly to 3) or 4). All IOs should be done _synchronously_ from
+within this callback, errors triggered by them will be returned via
+the normal pci_check_whatever() api, no new error_detected() callback
+will be issued due to an error happening here. However, such an error
+might cause IOs to be re-blocked for the whole segment, and thus
+invalidate the recovery that other devices on the same segment might
+have done, forcing the whole segment into one of the next states,
+that is link reset or slot reset.
+
+ Result codes:
+ - PCIERR_RESULT_RECOVERED
+ Driver returns this if it thinks the device is fully
+ functionnal and thinks it is ready to start
+ normal driver operations again. There is no
+ guarantee that the driver will actually be
+ allowed to proceed, as another driver on the
+ same segment might have failed and thus triggered a
+ slot reset on platforms that support it.
+
+ - PCIERR_RESULT_NEED_RESET
+ Driver returns this if it thinks the device is not
+ recoverable in it's current state and it needs a slot
+ reset to proceed.
+
+ - PCIERR_RESULT_DISCONNECT
+ Same as above. Total failure, no recovery even after
+ reset driver dead. (To be defined more precisely)
+
+>>> The current ppc64 implementation does not implement this callback.
+
+ 3) link_reset()
+
+ This is called after the link has been reset. This is typically
+a PCI Express specific state at this point and is done whenever a
+non-fatal error has been detected that can be "solved" by resetting
+the link. This call informs the driver of the reset and the driver
+should check if the device appears to be in working condition.
+This function acts a bit like 2) mmio_enabled(), in that the driver
+is not supposed to restart normal driver I/O operations right away.
+Instead, it should just "probe" the device to check it's recoverability
+status. If all is right, then the core will call resume() once all
+drivers have ack'd link_reset().
+
+ Result codes:
+ (identical to mmio_enabled)
+
+>>> The current ppc64 implementation does not implement this callback.
+
+ 4) slot_reset()
+
+ This is called after the slot has been soft or hard reset by the
+platform. A soft reset consists of asserting the adapter #RST line
+and then restoring the PCI BARs and PCI configuration header. If the
+platform supports PCI hotplug, then it might instead perform a hard
+reset by toggling power on the slot off/on. This call gives drivers
+the chance to re-initialize the hardware (re-download firmware, etc.),
+but drivers shouldn't restart normal I/O processing operations at
+this point. (See note about interrupts; interrupts aren't guaranteed
+to be delivered until the resume() callback has been called). If all
+device drivers report success on this callback, the patform will call
+resume() to complete the error handling and let the driver restart
+normal I/O processing.
+
+A driver can still return a critical failure for this function if
+it can't get the device operational after reset. If the platform
+previously tried a soft reset, it migh now try a hard reset (power
+cycle) and then call slot_reset() again. It the device still can't
+be recovered, there is nothing more that can be done; the platform
+will typically report a "permanent failure" in such a case. The
+device will be considered "dead" in this case.
+
+ Result codes:
+ - PCIERR_RESULT_DISCONNECT
+ Same as above.
+
+>>> The current ppc64 implementation does not try a power-cycle reset
+>>> if the driver returned PCIERR_RESULT_DISCONNECT. However, it should.
+
+ 5) resume()
+
+ This is called if all drivers on the segment have returned
+PCIERR_RESULT_RECOVERED from one of the 3 prevous callbacks.
+That basically tells the driver to restart activity, tht everything
+is back and running. No result code is taken into account here. If
+a new error happens, it will restart a new error handling process.
+
+That's it. I think this covers all the possibilities. The way those
+callbacks are called is platform policy. A platform with no slot reset
+capability for example may want to just "ignore" drivers that can't
+recover (disconnect them) and try to let other cards on the same segment
+recover. Keep in mind that in most real life cases, though, there will
+be only one driver per segment.
+
+Now, there is a note about interrupts. If you get an interrupt and your
+device is dead or has been isolated, there is a problem :)
+
+After much thinking, I decided to leave that to the platform. That is,
+the recovery API only precies that:
+
+ - There is no guarantee that interrupt delivery can proceed from any
+device on the segment starting from the error detection and until the
+restart callback is sent, at which point interrupts are expected to be
+fully operational.
+
+ - There is no guarantee that interrupt delivery is stopped, that is, ad
+river that gets an interrupts after detecting an error, or that detects
+and error within the interrupt handler such that it prevents proper
+ack'ing of the interrupt (and thus removal of the source) should just
+return IRQ_NOTHANDLED. It's up to the platform to deal with taht
+condition, typically by masking the irq source during the duration of
+the error handling. It is expected that the platform "knows" which
+interrupts are routed to error-management capable slots and can deal
+with temporarily disabling that irq number during error processing (this
+isn't terribly complex). That means some IRQ latency for other devices
+sharing the interrupt, but there is simply no other way. High end
+platforms aren't supposed to share interrupts between many devices
+anyway :)
+
+
+Revised: 31 May 2005 Linas Vepstas <linas@austin.ibm.com>
diff --git a/Documentation/pm.txt b/Documentation/pm.txt
index 2ea1149bf6b..79c0f32a760 100644
--- a/Documentation/pm.txt
+++ b/Documentation/pm.txt
@@ -218,7 +218,7 @@ proceed in the opposite direction.
Q: Who do I contact for additional information about
enabling power management for my specific driver/device?
-ACPI Development mailing list: acpi-devel@lists.sourceforge.net
+ACPI Development mailing list: linux-acpi@vger.kernel.org
System Interface -- OBSOLETE, DO NOT USE!
----------------*************************
diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt
index f5ebda5f427..bd4ffb5bd49 100644
--- a/Documentation/power/interface.txt
+++ b/Documentation/power/interface.txt
@@ -41,3 +41,14 @@ to. Writing to this file will accept one of
It will only change to 'firmware' or 'platform' if the system supports
it.
+/sys/power/image_size controls the size of the image created by
+the suspend-to-disk mechanism. It can be written a string
+representing a non-negative integer that will be used as an upper
+limit of the image size, in megabytes. The suspend-to-disk mechanism will
+do its best to ensure the image size will not exceed that number. However,
+if this turns out to be impossible, it will try to suspend anyway using the
+smallest image possible. In particular, if "0" is written to this file, the
+suspend image will be as small as possible.
+
+Reading from this file will display the current image size limit, which
+is set to 500 MB by default.
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index b0d50840788..08c79d4dc54 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -27,6 +27,11 @@ echo shutdown > /sys/power/disk; echo disk > /sys/power/state
echo platform > /sys/power/disk; echo disk > /sys/power/state
+If you want to limit the suspend image size to N megabytes, do
+
+echo N > /sys/power/image_size
+
+before suspend (it is limited to 500 MB by default).
Encrypted suspend image:
------------------------
@@ -207,7 +212,7 @@ A: Try running
cat `cat /proc/[0-9]*/maps | grep / | sed 's:.* /:/:' | sort -u` > /dev/null
-after resume. swapoff -a; swapon -a may also be usefull.
+after resume. swapoff -a; swapon -a may also be useful.
Q: What happens to devices during swsusp? They seem to be resumed
during system suspend?
@@ -318,7 +323,7 @@ to be useless to try to suspend to disk while that app is running?
A: No, it should work okay, as long as your app does not mlock()
it. Just prepare big enough swap partition.
-Q: What information is usefull for debugging suspend-to-disk problems?
+Q: What information is useful for debugging suspend-to-disk problems?
A: Well, last messages on the screen are always useful. If something
is broken, it is usually some kernel driver, therefore trying with as
diff --git a/Documentation/powerpc/00-INDEX b/Documentation/powerpc/00-INDEX
index e7bea0a407b..d6d65b9bcfe 100644
--- a/Documentation/powerpc/00-INDEX
+++ b/Documentation/powerpc/00-INDEX
@@ -8,12 +8,18 @@ please mail me.
cpu_features.txt
- info on how we support a variety of CPUs with minimal compile-time
options.
+eeh-pci-error-recovery.txt
+ - info on PCI Bus EEH Error Recovery
+hvcs.txt
+ - IBM "Hypervisor Virtual Console Server" Installation Guide
+mpc52xx.txt
+ - Linux 2.6.x on MPC52xx family
ppc_htab.txt
- info about the Linux/PPC /proc/ppc_htab entry
-smp.txt
- - use and state info about Linux/PPC on MP machines
SBC8260_memory_mapping.txt
- EST SBC8260 board info
+smp.txt
+ - use and state info about Linux/PPC on MP machines
sound.txt
- info on sound support under Linux/PPC
zImage_layout.txt
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index 2c81305090d..e409e5d0748 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -1,58 +1,56 @@
Everything you ever wanted to know about Linux 2.6 -stable releases.
-Rules on what kind of patches are accepted, and what ones are not, into
-the "-stable" tree:
+Rules on what kind of patches are accepted, and which ones are not, into the
+"-stable" tree:
- It must be obviously correct and tested.
- - It can not bigger than 100 lines, with context.
+ - It can not be bigger than 100 lines, with context.
- It must fix only one thing.
- It must fix a real bug that bothers people (not a, "This could be a
- problem..." type thing.)
+ problem..." type thing).
- It must fix a problem that causes a build error (but not for things
marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
- security issue, or some "oh, that's not good" issue. In short,
- something critical.
- - No "theoretical race condition" issues, unless an explanation of how
- the race can be exploited.
+ security issue, or some "oh, that's not good" issue. In short, something
+ critical.
+ - No "theoretical race condition" issues, unless an explanation of how the
+ race can be exploited is also provided.
- It can not contain any "trivial" fixes in it (spelling changes,
- whitespace cleanups, etc.)
+ whitespace cleanups, etc).
- It must be accepted by the relevant subsystem maintainer.
- - It must follow Documentation/SubmittingPatches rules.
+ - It must follow the Documentation/SubmittingPatches rules.
Procedure for submitting patches to the -stable tree:
- Send the patch, after verifying that it follows the above rules, to
stable@kernel.org.
- - The sender will receive an ack when the patch has been accepted into
- the queue, or a nak if the patch is rejected. This response might
- take a few days, according to the developer's schedules.
- - If accepted, the patch will be added to the -stable queue, for review
- by other developers.
+ - The sender will receive an ACK when the patch has been accepted into the
+ queue, or a NAK if the patch is rejected. This response might take a few
+ days, according to the developer's schedules.
+ - If accepted, the patch will be added to the -stable queue, for review by
+ other developers.
- Security patches should not be sent to this alias, but instead to the
- documented security@kernel.org.
+ documented security@kernel.org address.
Review cycle:
- - When the -stable maintainers decide for a review cycle, the patches
- will be sent to the review committee, and the maintainer of the
- affected area of the patch (unless the submitter is the maintainer of
- the area) and CC: to the linux-kernel mailing list.
- - The review committee has 48 hours in which to ack or nak the patch.
+ - When the -stable maintainers decide for a review cycle, the patches will be
+ sent to the review committee, and the maintainer of the affected area of
+ the patch (unless the submitter is the maintainer of the area) and CC: to
+ the linux-kernel mailing list.
+ - The review committee has 48 hours in which to ACK or NAK the patch.
- If the patch is rejected by a member of the committee, or linux-kernel
- members object to the patch, bringing up issues that the maintainers
- and members did not realize, the patch will be dropped from the
- queue.
- - At the end of the review cycle, the acked patches will be added to
- the latest -stable release, and a new -stable release will happen.
- - Security patches will be accepted into the -stable tree directly from
- the security kernel team, and not go through the normal review cycle.
+ members object to the patch, bringing up issues that the maintainers and
+ members did not realize, the patch will be dropped from the queue.
+ - At the end of the review cycle, the ACKed patches will be added to the
+ latest -stable release, and a new -stable release will happen.
+ - Security patches will be accepted into the -stable tree directly from the
+ security kernel team, and not go through the normal review cycle.
Contact the kernel security team for more details on this procedure.
Review committe:
- - This will be made up of a number of kernel developers who have
- volunteered for this task, and a few that haven't.
-
+ - This is made up of a number of kernel developers who have volunteered for
+ this task, and a few that haven't.
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 2f1aae32a5d..6910c0136f8 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -26,12 +26,13 @@ Currently, these files are in /proc/sys/vm:
- min_free_kbytes
- laptop_mode
- block_dump
+- drop-caches
==============================================================
dirty_ratio, dirty_background_ratio, dirty_expire_centisecs,
dirty_writeback_centisecs, vfs_cache_pressure, laptop_mode,
-block_dump, swap_token_timeout:
+block_dump, swap_token_timeout, drop-caches:
See Documentation/filesystems/proc.txt
@@ -102,3 +103,20 @@ This is used to force the Linux VM to keep a minimum number
of kilobytes free. The VM uses this number to compute a pages_min
value for each lowmem zone in the system. Each lowmem zone gets
a number of reserved free pages based proportionally on its size.
+
+==============================================================
+
+percpu_pagelist_fraction
+
+This is the fraction of pages at most (high mark pcp->high) in each zone that
+are allocated for each per cpu page list. The min value for this is 8. It
+means that we don't allow more than 1/8th of pages in each zone to be
+allocated in any single per_cpu_pagelist. This entry only changes the value
+of hot per cpu pagelists. User can specify a number like 100 to allocate
+1/100th of each zone to each per cpu page list.
+
+The batch value of each per cpu pagelist is also updated as a result. It is
+set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8)
+
+The initial value is zero. Kernel does not use this value at boot time to set
+the high water marks for each per cpu page list.
diff --git a/Documentation/video4linux/CARDLIST.bttv b/Documentation/video4linux/CARDLIST.bttv
index 330246ac80f..b72706c58a4 100644
--- a/Documentation/video4linux/CARDLIST.bttv
+++ b/Documentation/video4linux/CARDLIST.bttv
@@ -141,3 +141,5 @@
140 -> Osprey 440 [0070:ff07]
141 -> Asound Skyeye PCTV
142 -> Sabrent TV-FM (bttv version)
+143 -> Hauppauge ImpactVCB (bt878) [0070:13eb]
+144 -> MagicTV
diff --git a/Documentation/video4linux/CARDLIST.cx88 b/Documentation/video4linux/CARDLIST.cx88
index a1017d1a85d..56e194f1a0b 100644
--- a/Documentation/video4linux/CARDLIST.cx88
+++ b/Documentation/video4linux/CARDLIST.cx88
@@ -16,10 +16,10 @@
15 -> DViCO FusionHDTV DVB-T1 [18ac:db00]
16 -> KWorld LTV883RF
17 -> DViCO FusionHDTV 3 Gold-Q [18ac:d810]
- 18 -> Hauppauge Nova-T DVB-T [0070:9002]
+ 18 -> Hauppauge Nova-T DVB-T [0070:9002,0070:9001]
19 -> Conexant DVB-T reference design [14f1:0187]
20 -> Provideo PV259 [1540:2580]
- 21 -> DViCO FusionHDTV DVB-T Plus [18ac:db10]
+ 21 -> DViCO FusionHDTV DVB-T Plus [18ac:db10,18ac:db11]
22 -> pcHDTV HD3000 HDTV [7063:3000]
23 -> digitalnow DNTV Live! DVB-T [17de:a8a6]
24 -> Hauppauge WinTV 28xxx (Roslyn) models [0070:2801]
@@ -35,3 +35,11 @@
34 -> ATI HDTV Wonder [1002:a101]
35 -> WinFast DTV1000-T [107d:665f]
36 -> AVerTV 303 (M126) [1461:000a]
+ 37 -> Hauppauge Nova-S-Plus DVB-S [0070:9201,0070:9202]
+ 38 -> Hauppauge Nova-SE2 DVB-S [0070:9200]
+ 39 -> KWorld DVB-S 100 [17de:08b2]
+ 40 -> Hauppauge WinTV-HVR1100 DVB-T/Hybrid [0070:9400,0070:9402]
+ 41 -> Hauppauge WinTV-HVR1100 DVB-T/Hybrid (Low Profile) [0070:9800,0070:9802]
+ 42 -> digitalnow DNTV Live! DVB-T Pro [1822:0025]
+ 43 -> KWorld/VStream XPert DVB-T with cx22702 [17de:08a1]
+ 44 -> DViCO FusionHDTV DVB-T Dual Digital [18ac:db50]
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index efb708ec116..cb3a59bbeb1 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -56,7 +56,7 @@
55 -> LifeView FlyDVB-T DUO [5168:0502,5168:0306]
56 -> Avermedia AVerTV 307 [1461:a70a]
57 -> Avermedia AVerTV GO 007 FM [1461:f31f]
- 58 -> ADS Tech Instant TV (saa7135) [1421:0350,1421:0370,1421:1370]
+ 58 -> ADS Tech Instant TV (saa7135) [1421:0350,1421:0351,1421:0370,1421:1370]
59 -> Kworld/Tevion V-Stream Xpert TV PVR7134
60 -> Typhoon DVB-T Duo Digital/Analog Cardbus [4e42:0502]
61 -> Philips TOUGH DVB-T reference design [1131:2004]
@@ -81,4 +81,5 @@
80 -> ASUS Digimatrix TV [1043:0210]
81 -> Philips Tiger reference design [1131:2018]
82 -> MSI TV@Anywhere plus [1462:6231]
-
+ 83 -> Terratec Cinergy 250 PCI TV [153b:1160]
+ 84 -> LifeView FlyDVB Trio [5168:0319]
diff --git a/Documentation/video4linux/CARDLIST.tuner b/Documentation/video4linux/CARDLIST.tuner
index 9d6544ea9f4..0bf3d5bf9ef 100644
--- a/Documentation/video4linux/CARDLIST.tuner
+++ b/Documentation/video4linux/CARDLIST.tuner
@@ -40,7 +40,7 @@ tuner=38 - Philips PAL/SECAM multi (FM1216ME MK3)
tuner=39 - LG NTSC (newer TAPC series)
tuner=40 - HITACHI V7-J180AT
tuner=41 - Philips PAL_MK (FI1216 MK)
-tuner=42 - Philips 1236D ATSC/NTSC daul in
+tuner=42 - Philips 1236D ATSC/NTSC dual in
tuner=43 - Philips NTSC MK3 (FM1236MK3 or FM1236/F)
tuner=44 - Philips 4 in 1 (ATI TV Wonder Pro/Conexant)
tuner=45 - Microtune 4049 FM5
@@ -50,7 +50,7 @@ tuner=48 - Tenna TNF 8831 BGFF)
tuner=49 - Microtune 4042 FI5 ATSC/NTSC dual in
tuner=50 - TCL 2002N
tuner=51 - Philips PAL/SECAM_D (FM 1256 I-H3)
-tuner=52 - Thomson DDT 7610 (ATSC/NTSC)
+tuner=52 - Thomson DTT 7610 (ATSC/NTSC)
tuner=53 - Philips FQ1286
tuner=54 - tda8290+75
tuner=55 - TCL 2002MB
@@ -58,7 +58,7 @@ tuner=56 - Philips PAL/SECAM multi (FQ1216AME MK4)
tuner=57 - Philips FQ1236A MK4
tuner=58 - Ymec TVision TVF-8531MF/8831MF/8731MF
tuner=59 - Ymec TVision TVF-5533MF
-tuner=60 - Thomson DDT 7611 (ATSC/NTSC)
+tuner=60 - Thomson DTT 761X (ATSC/NTSC)
tuner=61 - Tena TNF9533-D/IF/TNF9533-B/DF
tuner=62 - Philips TEA5767HN FM Radio
tuner=63 - Philips FMD1216ME MK3 Hybrid Tuner
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index e566affeed7..72ab9b99b22 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -125,7 +125,7 @@ SMP
cpumask=MASK only use cpus with bits set in mask
additional_cpus=NUM Allow NUM more CPUs for hotplug
- (defaults are specified by the BIOS or half the available CPUs)
+ (defaults are specified by the BIOS, see Documentation/x86_64/cpu-hotplug-spec)
NUMA
diff --git a/Documentation/x86_64/cpu-hotplug-spec b/Documentation/x86_64/cpu-hotplug-spec
new file mode 100644
index 00000000000..5c0fa345e55
--- /dev/null
+++ b/Documentation/x86_64/cpu-hotplug-spec
@@ -0,0 +1,21 @@
+Firmware support for CPU hotplug under Linux/x86-64
+---------------------------------------------------
+
+Linux/x86-64 supports CPU hotplug now. For various reasons Linux wants to
+know in advance boot time the maximum number of CPUs that could be plugged
+into the system. ACPI 3.0 currently has no official way to supply
+this information from the firmware to the operating system.
+
+In ACPI each CPU needs an LAPIC object in the MADT table (5.2.11.5 in the
+ACPI 3.0 specification). ACPI already has the concept of disabled LAPIC
+objects by setting the Enabled bit in the LAPIC object to zero.
+
+For CPU hotplug Linux/x86-64 expects now that any possible future hotpluggable
+CPU is already available in the MADT. If the CPU is not available yet
+it should have its LAPIC Enabled bit set to 0. Linux will use the number
+of disabled LAPICs to compute the maximum number of future CPUs.
+
+In the worst case the user can overwrite this choice using a command line
+option (additional_cpus=...), but it is recommended to supply the correct
+number (or a reasonable approximation of it, with erring towards more not less)
+in the MADT to avoid manual configuration.
diff --git a/Kbuild b/Kbuild
index 79003918f37..95d6a00bace 100644
--- a/Kbuild
+++ b/Kbuild
@@ -22,8 +22,6 @@ sed-$(CONFIG_MIPS) := "/^@@@/s///p"
quiet_cmd_offsets = GEN $@
define cmd_offsets
- mkdir -p $(dir $@); \
- cat $< | \
(set -e; \
echo "#ifndef __ASM_OFFSETS_H__"; \
echo "#define __ASM_OFFSETS_H__"; \
@@ -34,7 +32,7 @@ define cmd_offsets
echo " *"; \
echo " */"; \
echo ""; \
- sed -ne $(sed-y); \
+ sed -ne $(sed-y) $<; \
echo ""; \
echo "#endif" ) > $@
endef
@@ -45,5 +43,6 @@ arch/$(ARCH)/kernel/asm-offsets.s: arch/$(ARCH)/kernel/asm-offsets.c FORCE
$(call if_changed_dep,cc_s_c)
$(obj)/$(offsets-file): arch/$(ARCH)/kernel/asm-offsets.s Kbuild
+ $(Q)mkdir -p $(dir $@)
$(call cmd,offsets)
diff --git a/MAINTAINERS b/MAINTAINERS
index 6246b7f1163..71693c5c313 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -182,7 +182,7 @@ S: Supported
ACPI
P: Len Brown
M: len.brown@intel.com
-L: acpi-devel@lists.sourceforge.net
+L: linux-acpi@vger.kernel.org
W: http://acpi.sourceforge.net/
T: git kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
S: Maintained
@@ -258,6 +258,13 @@ P: Ivan Kokshaysky
M: ink@jurassic.park.msu.ru
S: Maintained for 2.4; PCI support for 2.6.
+AMD GEODE PROCESSOR/CHIPSET SUPPORT
+P: Jordan Crouse
+M: info-linux@geode.amd.com
+L: info-linux@geode.amd.com
+W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
+S: Supported
+
APM DRIVER
P: Stephen Rothwell
M: sfr@canb.auug.org.au
@@ -539,13 +546,6 @@ W: http://linuxtv.org
T: git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git
S: Maintained
-BUSLOGIC SCSI DRIVER
-P: Leonard N. Zubkoff
-M: Leonard N. Zubkoff <lnz@dandelion.com>
-L: linux-scsi@vger.kernel.org
-W: http://www.dandelion.com/Linux/
-S: Maintained
-
COMMON INTERNET FILE SYSTEM (CIFS)
P: Steve French
M: sfrench@samba.org
@@ -554,6 +554,11 @@ W: http://us1.samba.org/samba/Linux_CIFS_client.html
T: git kernel.org:/pub/scm/linux/kernel/git/sfrench/cifs-2.6.git
S: Supported
+CONFIGFS
+P: Joel Becker
+M: Joel Becker <joel.becker@oracle.com>
+S: Supported
+
CIRRUS LOGIC GENERIC FBDEV DRIVER
P: Jeff Garzik
M: jgarzik@pobox.com
@@ -684,13 +689,6 @@ M: pc300@cyclades.com
W: http://www.cyclades.com/
S: Supported
-DAC960 RAID CONTROLLER DRIVER
-P: Dave Olien
-M dmo@osdl.org
-W: http://www.osdl.org/archive/dmo/DAC960
-L: linux-kernel@vger.kernel.org
-S: Maintained
-
DAMA SLAVE for AX.25
P: Joerg Reuter
M: jreuter@yaina.de
@@ -806,6 +804,7 @@ S: Maintained
DOCBOOK FOR DOCUMENTATION
P: Martin Waitz
M: tali@admingilde.org
+T: git http://tali.admingilde.org/git/linux-docbook.git
S: Maintained
DOUBLETALK DRIVER
@@ -922,7 +921,6 @@ S: Maintained
FARSYNC SYNCHRONOUS DRIVER
P: Kevin Curtis
M: kevin.curtis@farsite.co.uk
-M: kevin.curtis@farsite.co.uk
W: http://www.farsite.co.uk/
S: Supported
@@ -1230,7 +1228,7 @@ IEEE 1394 SUBSYSTEM
P: Ben Collins
M: bcollins@debian.org
P: Jody McIntyre
-M: scjody@steamballoon.com
+M: scjody@modernduck.com
L: linux1394-devel@lists.sourceforge.net
W: http://www.linux1394.org/
T: git kernel.org:/pub/scm/linux/kernel/git/scjody/ieee1394.git
@@ -1240,14 +1238,14 @@ IEEE 1394 OHCI DRIVER
P: Ben Collins
M: bcollins@debian.org
P: Jody McIntyre
-M: scjody@steamballoon.com
+M: scjody@modernduck.com
L: linux1394-devel@lists.sourceforge.net
W: http://www.linux1394.org/
S: Maintained
IEEE 1394 PCILYNX DRIVER
P: Jody McIntyre
-M: scjody@steamballoon.com
+M: scjody@modernduck.com
L: linux1394-devel@lists.sourceforge.net
W: http://www.linux1394.org/
S: Maintained
@@ -1693,7 +1691,6 @@ S: Maintained
MARVELL MV64340 ETHERNET DRIVER
P: Manish Lachwani
-M: Manish_Lachwani@pmc-sierra.com
L: linux-mips@linux-mips.org
L: netdev@vger.kernel.org
S: Supported
@@ -1898,6 +1895,15 @@ M: ajoshi@shell.unixbox.com
L: linux-nvidia@lists.surfsouth.com
S: Maintained
+ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
+P: Mark Fasheh
+M: mark.fasheh@oracle.com
+P: Kurt Hackel
+M: kurt.hackel@oracle.com
+L: ocfs2-devel@oss.oracle.com
+W: http://oss.oracle.com/projects/ocfs2/
+S: Supported
+
OLYMPIC NETWORK DRIVER
P: Peter De Shrijver
M: p2@ace.ulyssis.student.kuleuven.ac.be
@@ -1982,6 +1988,13 @@ M: hch@infradead.org
L: linux-abi-devel@lists.sourceforge.net
S: Maintained
+PCI ERROR RECOVERY
+P: Linas Vepstas
+M: linas@austin.ibm.com
+L: linux-kernel@vger.kernel.org
+L: linux-pci@atrey.karlin.mff.cuni.cz
+S: Supported
+
PCI SOUND DRIVERS (ES1370, ES1371 and SONICVIBES)
P: Thomas Sailer
M: sailer@ife.ee.ethz.ch
@@ -2346,13 +2359,6 @@ P: Nicolas Pitre
M: nico@cam.org
S: Maintained
-SNA NETWORK LAYER
-P: Jay Schulist
-M: jschlst@samba.org
-L: linux-sna@turbolinux.com
-W: http://www.linux-sna.org
-S: Supported
-
SOFTWARE RAID (Multiple Disks) SUPPORT
P: Ingo Molnar
M: mingo@redhat.com
@@ -2474,7 +2480,7 @@ P: Paul Mundt
M: lethal@linux-sh.org
P: Kazumoto Kojima
M: kkojima@rr.iij4u.or.jp
-L: linux-sh@m17n.org
+L: linuxsh-dev@lists.sourceforge.net
W: http://www.linux-sh.org
W: http://www.m17n.org/linux-sh/
W: http://www.rr.iij4u.or.jp/~kkojima/linux-sh4.html
@@ -2513,6 +2519,19 @@ P: Romain Lievin
M: roms@lpg.ticalc.org
S: Maintained
+TIPC NETWORK LAYER
+P: Per Liden
+M: per.liden@nospam.ericsson.com
+P: Jon Maloy
+M: jon.maloy@nospam.ericsson.com
+P: Allan Stephens
+M: allan.stephens@nospam.windriver.com
+L: tipc-discussion@lists.sourceforge.net
+W: http://tipc.sourceforge.net/
+W: http://tipc.cslab.ericsson.net/
+T: git tipc.cslab.ericsson.net:/pub/git/tipc.git
+S: Maintained
+
TLAN NETWORK DRIVER
P: Samuel Chessman
M: chessman@tux.org
@@ -2902,6 +2921,12 @@ W: http://linuxtv.org
T: git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git
S: Maintained
+VT8231 HARDWARE MONITOR DRIVER
+P: Roger Lucas
+M: roger@planbit.co.uk
+L: lm-sensors@lm-sensors.org
+S: Maintained
+
W1 DALLAS'S 1-WIRE BUS
P: Evgeniy Polyakov
M: johnpol@2ka.mipt.ru
diff --git a/Makefile b/Makefile
index 599e744d3e3..deedaf79cdc 100644
--- a/Makefile
+++ b/Makefile
@@ -141,24 +141,6 @@ VPATH := $(srctree)
export srctree objtree VPATH TOPDIR
-nullstring :=
-space := $(nullstring) # end of line
-
-# Take the contents of any files called localversion* and the config
-# variable CONFIG_LOCALVERSION and append them to KERNELRELEASE. Be
-# careful not to include files twice if building in the source
-# directory. LOCALVERSION from the command line override all of this
-
-localver := $(objtree)/localversion* $(srctree)/localversion*
-localver := $(sort $(wildcard $(localver)))
-# skip backup files (containing '~')
-localver := $(foreach f, $(localver), $(if $(findstring ~, $(f)),,$(f)))
-
-LOCALVERSION = $(subst $(space),, \
- $(shell cat /dev/null $(localver)) \
- $(patsubst "%",%,$(CONFIG_LOCALVERSION)))
-
-KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)$(LOCALVERSION)
# SUBARCH tells the usermode build what the underlying arch is. That is set
# first, and if a usermode build is happening, the "ARCH=um" on the command
@@ -251,7 +233,7 @@ export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
# If it is set to "silent_", nothing wil be printed at all, since
# the variable $(silent_cmd_cc_o_c) doesn't exist.
#
-# A simple variant is to prefix commands with $(Q) - that's usefull
+# A simple variant is to prefix commands with $(Q) - that's useful
# for commands that shall be hidden in non-verbose mode.
#
# $(Q)ln $@ :<
@@ -353,7 +335,10 @@ CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-ffreestanding
AFLAGS := -D__ASSEMBLY__
-export VERSION PATCHLEVEL SUBLEVEL EXTRAVERSION LOCALVERSION KERNELRELEASE \
+# Read KERNELRELEASE from .kernelrelease (if it exists)
+KERNELRELEASE = $(shell cat .kernelrelease 2> /dev/null)
+
+export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE \
ARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC \
CPP AR NM STRIP OBJCOPY OBJDUMP MAKE AWK GENKSYMS PERL UTS_MACHINE \
HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
@@ -551,26 +536,6 @@ export KBUILD_IMAGE ?= vmlinux
# images. Default is /boot, but you can set it to other values
export INSTALL_PATH ?= /boot
-# If CONFIG_LOCALVERSION_AUTO is set, we automatically perform some tests
-# and try to determine if the current source tree is a release tree, of any sort,
-# or if is a pure development tree.
-#
-# A 'release tree' is any tree with a git TAG associated
-# with it. The primary goal of this is to make it safe for a native
-# git/CVS/SVN user to build a release tree (i.e, 2.6.9) and also to
-# continue developing against the current Linus tree, without having the Linus
-# tree overwrite the 2.6.9 tree when installed.
-#
-# Currently, only git is supported.
-# Other SCMs can edit scripts/setlocalversion and add the appropriate
-# checks as needed.
-
-
-ifdef CONFIG_LOCALVERSION_AUTO
- localversion-auto := $(shell $(PERL) $(srctree)/scripts/setlocalversion $(srctree))
- LOCALVERSION := $(LOCALVERSION)$(localversion-auto)
-endif
-
#
# INSTALL_MOD_PATH specifies a prefix to MODLIB for module directory
# relocations required by build roots. This is not defined in the
@@ -782,6 +747,50 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
$(vmlinux-dirs): prepare scripts
$(Q)$(MAKE) $(build)=$@
+# Build the kernel release string
+# The KERNELRELEASE is stored in a file named .kernelrelease
+# to be used when executing for example make install or make modules_install
+#
+# Take the contents of any files called localversion* and the config
+# variable CONFIG_LOCALVERSION and append them to KERNELRELEASE.
+# LOCALVERSION from the command line override all of this
+
+nullstring :=
+space := $(nullstring) # end of line
+
+___localver = $(objtree)/localversion* $(srctree)/localversion*
+__localver = $(sort $(wildcard $(___localver)))
+# skip backup files (containing '~')
+_localver = $(foreach f, $(__localver), $(if $(findstring ~, $(f)),,$(f)))
+
+localver = $(subst $(space),, \
+ $(shell cat /dev/null $(_localver)) \
+ $(patsubst "%",%,$(CONFIG_LOCALVERSION)))
+
+# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called
+# and if the SCM is know a tag from the SCM is appended.
+# The appended tag is determinded by the SCM used.
+#
+# Currently, only git is supported.
+# Other SCMs can edit scripts/setlocalversion and add the appropriate
+# checks as needed.
+ifdef CONFIG_LOCALVERSION_AUTO
+ _localver-auto = $(shell $(CONFIG_SHELL) \
+ $(srctree)/scripts/setlocalversion $(srctree))
+ localver-auto = $(LOCALVERSION)$(_localver-auto)
+endif
+
+localver-full = $(localver)$(localver-auto)
+
+# Store (new) KERNELRELASE string in .kernelrelease
+kernelrelease = \
+ $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)$(localver-full)
+.kernelrelease: FORCE
+ $(Q)rm -f .kernelrelease
+ $(Q)echo $(kernelrelease) > .kernelrelease
+ $(Q)echo " Building kernel $(kernelrelease)"
+
+
# Things we need to do before we recursively start building the kernel
# or the modules are listed in "prepare".
# A multi level approach is used. prepareN is processed before prepareN-1.
@@ -798,8 +807,7 @@ $(vmlinux-dirs): prepare scripts
# and if so do:
# 1) Check that make has not been executed in the kernel src $(srctree)
# 2) Create the include2 directory, used for the second asm symlink
-
-prepare3:
+prepare3: .kernelrelease
ifneq ($(KBUILD_SRC),)
@echo ' Using $(srctree) as source for kernel'
$(Q)if [ -f $(srctree)/.config ]; then \
@@ -984,9 +992,9 @@ CLEAN_FILES += vmlinux System.map \
# Directories & files removed with 'make mrproper'
MRPROPER_DIRS += include/config include2
-MRPROPER_FILES += .config .config.old include/asm .version \
+MRPROPER_FILES += .config .config.old include/asm .version .old_version \
include/linux/autoconf.h include/linux/version.h \
- Module.symvers tags TAGS cscope*
+ .kernelrelease Module.symvers tags TAGS cscope*
# clean - Delete most, but leave enough to build external modules
#
@@ -1072,6 +1080,7 @@ help:
@echo ' tags/TAGS - Generate tags file for editors'
@echo ' cscope - Generate cscope index'
@echo ' kernelrelease - Output the release version string'
+ @echo ' kernelversion - Output the version stored in Makefile'
@echo ''
@echo 'Static analysers'
@echo ' buildcheck - List dangling references to vmlinux discarded sections'
@@ -1293,6 +1302,8 @@ checkstack:
kernelrelease:
@echo $(KERNELRELEASE)
+kernelversion:
+ @echo $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
# FIXME Should go into a make.lib or something
# ===========================================================================
diff --git a/README b/README
index 61c4f742923..cd5e2eb6213 100644
--- a/README
+++ b/README
@@ -183,11 +183,8 @@ CONFIGURING the kernel:
COMPILING the kernel:
- - Make sure you have gcc 2.95.3 available.
- gcc 2.91.66 (egcs-1.1.2), and gcc 2.7.2.3 are known to miscompile
- some parts of the kernel, and are *no longer supported*.
- Also remember to upgrade your binutils package (for as/ld/nm and company)
- if necessary. For more information, refer to Documentation/Changes.
+ - Make sure you have at least gcc 3.2 available.
+ For more information, refer to Documentation/Changes.
Please note that you can still run a.out user programs with this kernel.
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 786491f9ceb..eedf41bf705 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -18,9 +18,6 @@ config MMU
bool
default y
-config UID16
- bool
-
config RWSEM_GENERIC_SPINLOCK
bool
@@ -40,6 +37,19 @@ config GENERIC_IOMAP
bool
default n
+config GENERIC_HARDIRQS
+ bool
+ default y
+
+config GENERIC_IRQ_PROBE
+ bool
+ default y
+
+config AUTO_IRQ_AFFINITY
+ bool
+ depends on SMP
+ default y
+
source "init/Kconfig"
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index 24ae9a36607..1898ea79d0e 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -40,7 +40,6 @@
#include <asm/unistd.h>
extern struct hwrpb_struct *hwrpb;
-extern void dump_thread(struct pt_regs *, struct user *);
extern spinlock_t rtc_lock;
/* these are C runtime functions with special calling conventions: */
@@ -175,7 +174,6 @@ EXPORT_SYMBOL(up);
*/
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(flush_tlb_mm);
EXPORT_SYMBOL(flush_tlb_range);
EXPORT_SYMBOL(flush_tlb_page);
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index b6114f5c0d2..76be5cf0de1 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -32,214 +32,25 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-/*
- * Controller mappings for all interrupt sources:
- */
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
- [0 ... NR_IRQS-1] = {
- .handler = &no_irq_type,
- .lock = SPIN_LOCK_UNLOCKED
- }
-};
-
-static void register_irq_proc(unsigned int irq);
-
volatile unsigned long irq_err_count;
-/*
- * Special irq handlers.
- */
-
-irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
-{
- return IRQ_NONE;
-}
-
-/*
- * Generic no controller code
- */
-
-static void no_irq_enable_disable(unsigned int irq) { }
-static unsigned int no_irq_startup(unsigned int irq) { return 0; }
-
-static void
-no_irq_ack(unsigned int irq)
+void ack_bad_irq(unsigned int irq)
{
irq_err_count++;
printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq);
}
-struct hw_interrupt_type no_irq_type = {
- .typename = "none",
- .startup = no_irq_startup,
- .shutdown = no_irq_enable_disable,
- .enable = no_irq_enable_disable,
- .disable = no_irq_enable_disable,
- .ack = no_irq_ack,
- .end = no_irq_enable_disable,
-};
-
-int
-handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
- struct irqaction *action)
-{
- int status = 1; /* Force the "do bottom halves" bit */
- int ret;
-
- do {
- if (!(action->flags & SA_INTERRUPT))
- local_irq_enable();
- else
- local_irq_disable();
-
- ret = action->handler(irq, action->dev_id, regs);
- if (ret == IRQ_HANDLED)
- status |= action->flags;
- action = action->next;
- } while (action);
- if (status & SA_SAMPLE_RANDOM)
- add_interrupt_randomness(irq);
- local_irq_disable();
-
- return status;
-}
-
-/*
- * Generic enable/disable code: this just calls
- * down into the PIC-specific version for the actual
- * hardware disable after having gotten the irq
- * controller lock.
- */
-void inline
-disable_irq_nosync(unsigned int irq)
-{
- irq_desc_t *desc = irq_desc + irq;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock, flags);
- if (!desc->depth++) {
- desc->status |= IRQ_DISABLED;
- desc->handler->disable(irq);
- }
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-/*
- * Synchronous version of the above, making sure the IRQ is
- * no longer running on any other IRQ..
- */
-void
-disable_irq(unsigned int irq)
-{
- disable_irq_nosync(irq);
- synchronize_irq(irq);
-}
-
-void
-enable_irq(unsigned int irq)
-{
- irq_desc_t *desc = irq_desc + irq;
- unsigned long flags;
-
- spin_lock_irqsave(&desc->lock, flags);
- switch (desc->depth) {
- case 1: {
- unsigned int status = desc->status & ~IRQ_DISABLED;
- desc->status = status;
- if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
- desc->status = status | IRQ_REPLAY;
- hw_resend_irq(desc->handler,irq);
- }
- desc->handler->enable(irq);
- /* fall-through */
- }
- default:
- desc->depth--;
- break;
- case 0:
- printk(KERN_ERR "enable_irq() unbalanced from %p\n",
- __builtin_return_address(0));
- }
- spin_unlock_irqrestore(&desc->lock, flags);
-}
-
-int
-setup_irq(unsigned int irq, struct irqaction * new)
-{
- int shared = 0;
- struct irqaction *old, **p;
- unsigned long flags;
- irq_desc_t *desc = irq_desc + irq;
-
- if (desc->handler == &no_irq_type)
- return -ENOSYS;
-
- /*
- * Some drivers like serial.c use request_irq() heavily,
- * so we have to be careful not to interfere with a
- * running system.
- */
- if (new->flags & SA_SAMPLE_RANDOM) {
- /*
- * This function might sleep, we want to call it first,
- * outside of the atomic block.
- * Yes, this might clear the entropy pool if the wrong
- * driver is attempted to be loaded, without actually
- * installing a new handler, but is this really a problem,
- * only the sysadmin is able to do this.
- */
- rand_initialize_irq(irq);
- }
-
- /*
- * The following block of code has to be executed atomically
- */
- spin_lock_irqsave(&desc->lock,flags);
- p = &desc->action;
- if ((old = *p) != NULL) {
- /* Can't share interrupts unless both agree to */
- if (!(old->flags & new->flags & SA_SHIRQ)) {
- spin_unlock_irqrestore(&desc->lock,flags);
- return -EBUSY;
- }
-
- /* add new interrupt at end of irq queue */
- do {
- p = &old->next;
- old = *p;
- } while (old);
- shared = 1;
- }
-
- *p = new;
-
- if (!shared) {
- desc->depth = 0;
- desc->status &=
- ~(IRQ_DISABLED|IRQ_AUTODETECT|IRQ_WAITING|IRQ_INPROGRESS);
- desc->handler->startup(irq);
- }
- spin_unlock_irqrestore(&desc->lock,flags);
-
- return 0;
-}
-
-static struct proc_dir_entry * root_irq_dir;
-static struct proc_dir_entry * irq_dir[NR_IRQS];
-
#ifdef CONFIG_SMP
-static struct proc_dir_entry * smp_affinity_entry[NR_IRQS];
static char irq_user_affinity[NR_IRQS];
-static cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
-static void
-select_smp_affinity(int irq)
+int
+select_smp_affinity(unsigned int irq)
{
static int last_cpu;
int cpu = last_cpu + 1;
- if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq])
- return;
+ if (!irq_desc[irq].handler->set_affinity || irq_user_affinity[irq])
+ return 1;
while (!cpu_possible(cpu))
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
@@ -247,208 +58,10 @@ select_smp_affinity(int irq)
irq_affinity[irq] = cpumask_of_cpu(cpu);
irq_desc[irq].handler->set_affinity(irq, cpumask_of_cpu(cpu));
+ return 0;
}
-
-static int
-irq_affinity_read_proc (char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
- if (count - len < 2)
- return -EINVAL;
- len += sprintf(page + len, "\n");
- return len;
-}
-
-static int
-irq_affinity_write_proc(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- int irq = (long) data, full_count = count, err;
- cpumask_t new_value;
-
- if (!irq_desc[irq].handler->set_affinity)
- return -EIO;
-
- err = cpumask_parse(buffer, count, new_value);
-
- /* The special value 0 means release control of the
- affinity to kernel. */
- cpus_and(new_value, new_value, cpu_online_map);
- if (cpus_empty(new_value)) {
- irq_user_affinity[irq] = 0;
- select_smp_affinity(irq);
- }
- /* Do not allow disabling IRQs completely - it's a too easy
- way to make the system unusable accidentally :-) At least
- one online CPU still has to be targeted. */
- else {
- irq_affinity[irq] = new_value;
- irq_user_affinity[irq] = 1;
- irq_desc[irq].handler->set_affinity(irq, new_value);
- }
-
- return full_count;
-}
-
#endif /* CONFIG_SMP */
-#define MAX_NAMELEN 10
-
-static void
-register_irq_proc (unsigned int irq)
-{
- char name [MAX_NAMELEN];
-
- if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
- irq_dir[irq])
- return;
-
- memset(name, 0, MAX_NAMELEN);
- sprintf(name, "%d", irq);
-
- /* create /proc/irq/1234 */
- irq_dir[irq] = proc_mkdir(name, root_irq_dir);
-
-#ifdef CONFIG_SMP
- if (irq_desc[irq].handler->set_affinity) {
- struct proc_dir_entry *entry;
- /* create /proc/irq/1234/smp_affinity */
- entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
-
- if (entry) {
- entry->nlink = 1;
- entry->data = (void *)(long)irq;
- entry->read_proc = irq_affinity_read_proc;
- entry->write_proc = irq_affinity_write_proc;
- }
-
- smp_affinity_entry[irq] = entry;
- }
-#endif
-}
-
-void
-init_irq_proc (void)
-{
- int i;
-
- /* create /proc/irq */
- root_irq_dir = proc_mkdir("irq", NULL);
-
-#ifdef CONFIG_SMP
- /* create /proc/irq/prof_cpu_mask */
- create_prof_cpu_mask(root_irq_dir);
-#endif
-
- /*
- * Create entries for all existing IRQs.
- */
- for (i = 0; i < ACTUAL_NR_IRQS; i++) {
- if (irq_desc[i].handler == &no_irq_type)
- continue;
- register_irq_proc(i);
- }
-}
-
-int
-request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
- unsigned long irqflags, const char * devname, void *dev_id)
-{
- int retval;
- struct irqaction * action;
-
- if (irq >= ACTUAL_NR_IRQS)
- return -EINVAL;
- if (!handler)
- return -EINVAL;
-
-#if 1
- /*
- * Sanity-check: shared interrupts should REALLY pass in
- * a real dev-ID, otherwise we'll have trouble later trying
- * to figure out which interrupt is which (messes up the
- * interrupt freeing logic etc).
- */
- if ((irqflags & SA_SHIRQ) && !dev_id) {
- printk(KERN_ERR
- "Bad boy: %s (at %p) called us without a dev_id!\n",
- devname, __builtin_return_address(0));
- }
-#endif
-
- action = (struct irqaction *)
- kmalloc(sizeof(struct irqaction), GFP_KERNEL);
- if (!action)
- return -ENOMEM;
-
- action->handler = handler;
- action->flags = irqflags;
- cpus_clear(action->mask);
- action->name = devname;
- action->next = NULL;
- action->dev_id = dev_id;
-
-#ifdef CONFIG_SMP
- select_smp_affinity(irq);
-#endif
-
- retval = setup_irq(irq, action);
- if (retval)
- kfree(action);
- return retval;
-}
-
-EXPORT_SYMBOL(request_irq);
-
-void
-free_irq(unsigned int irq, void *dev_id)
-{
- irq_desc_t *desc;
- struct irqaction **p;
- unsigned long flags;
-
- if (irq >= ACTUAL_NR_IRQS) {
- printk(KERN_CRIT "Trying to free IRQ%d\n", irq);
- return;
- }
-
- desc = irq_desc + irq;
- spin_lock_irqsave(&desc->lock,flags);
- p = &desc->action;
- for (;;) {
- struct irqaction * action = *p;
- if (action) {
- struct irqaction **pp = p;
- p = &action->next;
- if (action->dev_id != dev_id)
- continue;
-
- /* Found - now remove it from the list of entries. */
- *pp = action->next;
- if (!desc->action) {
- desc->status |= IRQ_DISABLED;
- desc->handler->shutdown(irq);
- }
- spin_unlock_irqrestore(&desc->lock,flags);
-
-#ifdef CONFIG_SMP
- /* Wait to make sure it's not being used on
- another CPU. */
- while (desc->status & IRQ_INPROGRESS)
- barrier();
-#endif
- kfree(action);
- return;
- }
- printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
- spin_unlock_irqrestore(&desc->lock,flags);
- return;
- }
-}
-
-EXPORT_SYMBOL(free_irq);
-
int
show_interrupts(struct seq_file *p, void *v)
{
@@ -531,10 +144,6 @@ handle_irq(int irq, struct pt_regs * regs)
* 0 return value means that this irq is already being
* handled by some other CPU. (or is disabled)
*/
- int cpu = smp_processor_id();
- irq_desc_t *desc = irq_desc + irq;
- struct irqaction * action;
- unsigned int status;
static unsigned int illegal_count=0;
if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) {
@@ -546,229 +155,8 @@ handle_irq(int irq, struct pt_regs * regs)
}
irq_enter();
- kstat_cpu(cpu).irqs[irq]++;
- spin_lock_irq(&desc->lock); /* mask also the higher prio events */
- desc->handler->ack(irq);
- /*
- * REPLAY is when Linux resends an IRQ that was dropped earlier.
- * WAITING is used by probe to mark irqs that are being tested.
- */
- status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
- status |= IRQ_PENDING; /* we _want_ to handle it */
-
- /*
- * If the IRQ is disabled for whatever reason, we cannot
- * use the action we have.
- */
- action = NULL;
- if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
- action = desc->action;
- status &= ~IRQ_PENDING; /* we commit to handling */
- status |= IRQ_INPROGRESS; /* we are handling it */
- }
- desc->status = status;
-
- /*
- * If there is no IRQ handler or it was disabled, exit early.
- * Since we set PENDING, if another processor is handling
- * a different instance of this same irq, the other processor
- * will take care of it.
- */
- if (!action)
- goto out;
-
- /*
- * Edge triggered interrupts need to remember pending events.
- * This applies to any hw interrupts that allow a second
- * instance of the same irq to arrive while we are in handle_irq
- * or in the handler. But the code here only handles the _second_
- * instance of the irq, not the third or fourth. So it is mostly
- * useful for irq hardware that does not mask cleanly in an
- * SMP environment.
- */
- for (;;) {
- spin_unlock(&desc->lock);
- handle_IRQ_event(irq, regs, action);
- spin_lock(&desc->lock);
-
- if (!(desc->status & IRQ_PENDING)
- || (desc->status & IRQ_LEVEL))
- break;
- desc->status &= ~IRQ_PENDING;
- }
- desc->status &= ~IRQ_INPROGRESS;
-out:
- /*
- * The ->end() handler has to deal with interrupts which got
- * disabled while the handler was running.
- */
- desc->handler->end(irq);
- spin_unlock(&desc->lock);
-
+ local_irq_disable();
+ __do_IRQ(irq, regs);
+ local_irq_enable();
irq_exit();
}
-
-/*
- * IRQ autodetection code..
- *
- * This depends on the fact that any interrupt that
- * comes in on to an unassigned handler will get stuck
- * with "IRQ_WAITING" cleared and the interrupt
- * disabled.
- */
-unsigned long
-probe_irq_on(void)
-{
- int i;
- irq_desc_t *desc;
- unsigned long delay;
- unsigned long val;
-
- /* Something may have generated an irq long ago and we want to
- flush such a longstanding irq before considering it as spurious. */
- for (i = NR_IRQS-1; i >= 0; i--) {
- desc = irq_desc + i;
-
- spin_lock_irq(&desc->lock);
- if (!irq_desc[i].action)
- irq_desc[i].handler->startup(i);
- spin_unlock_irq(&desc->lock);
- }
-
- /* Wait for longstanding interrupts to trigger. */
- for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
- /* about 20ms delay */ barrier();
-
- /* enable any unassigned irqs (we must startup again here because
- if a longstanding irq happened in the previous stage, it may have
- masked itself) first, enable any unassigned irqs. */
- for (i = NR_IRQS-1; i >= 0; i--) {
- desc = irq_desc + i;
-
- spin_lock_irq(&desc->lock);
- if (!desc->action) {
- desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
- if (desc->handler->startup(i))
- desc->status |= IRQ_PENDING;
- }
- spin_unlock_irq(&desc->lock);
- }
-
- /*
- * Wait for spurious interrupts to trigger
- */
- for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
- /* about 100ms delay */ barrier();
-
- /*
- * Now filter out any obviously spurious interrupts
- */
- val = 0;
- for (i=0; i<NR_IRQS; i++) {
- irq_desc_t *desc = irq_desc + i;
- unsigned int status;
-
- spin_lock_irq(&desc->lock);
- status = desc->status;
-
- if (status & IRQ_AUTODETECT) {
- /* It triggered already - consider it spurious. */
- if (!(status & IRQ_WAITING)) {
- desc->status = status & ~IRQ_AUTODETECT;
- desc->handler->shutdown(i);
- } else
- if (i < 32)
- val |= 1 << i;
- }
- spin_unlock_irq(&desc->lock);
- }
-
- return val;
-}
-
-EXPORT_SYMBOL(probe_irq_on);
-
-/*
- * Return a mask of triggered interrupts (this
- * can handle only legacy ISA interrupts).
- */
-unsigned int
-probe_irq_mask(unsigned long val)
-{
- int i;
- unsigned int mask;
-
- mask = 0;
- for (i = 0; i < NR_IRQS; i++) {
- irq_desc_t *desc = irq_desc + i;
- unsigned int status;
-
- spin_lock_irq(&desc->lock);
- status = desc->status;
-
- if (status & IRQ_AUTODETECT) {
- /* We only react to ISA interrupts */
- if (!(status & IRQ_WAITING)) {
- if (i < 16)
- mask |= 1 << i;
- }
-
- desc->status = status & ~IRQ_AUTODETECT;
- desc->handler->shutdown(i);
- }
- spin_unlock_irq(&desc->lock);
- }
-
- return mask & val;
-}
-
-/*
- * Get the result of the IRQ probe.. A negative result means that
- * we have several candidates (but we return the lowest-numbered
- * one).
- */
-
-int
-probe_irq_off(unsigned long val)
-{
- int i, irq_found, nr_irqs;
-
- nr_irqs = 0;
- irq_found = 0;
- for (i=0; i<NR_IRQS; i++) {
- irq_desc_t *desc = irq_desc + i;
- unsigned int status;
-
- spin_lock_irq(&desc->lock);
- status = desc->status;
-
- if (status & IRQ_AUTODETECT) {
- if (!(status & IRQ_WAITING)) {
- if (!nr_irqs)
- irq_found = i;
- nr_irqs++;
- }
- desc->status = status & ~IRQ_AUTODETECT;
- desc->handler->shutdown(i);
- }
- spin_unlock_irq(&desc->lock);
- }
-
- if (nr_irqs > 1)
- irq_found = -irq_found;
- return irq_found;
-}
-
-EXPORT_SYMBOL(probe_irq_off);
-
-#ifdef CONFIG_SMP
-void synchronize_irq(unsigned int irq)
-{
- /* is there anything to synchronize with? */
- if (!irq_desc[irq].action)
- return;
-
- while (irq_desc[irq].status & IRQ_INPROGRESS)
- barrier();
-}
-#endif
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index 9903e3a7910..fff5cf93e81 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/sched.h>
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index a8682612abc..9924fd07743 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -43,6 +43,11 @@
#include "proto.h"
#include "pci_impl.h"
+/*
+ * Power off function, if any
+ */
+void (*pm_power_off)(void) = machine_power_off;
+
void
cpu_idle(void)
{
@@ -271,7 +276,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
{
extern void ret_from_fork(void);
- struct thread_info *childti = p->thread_info;
+ struct thread_info *childti = task_thread_info(p);
struct pt_regs * childregs;
struct switch_stack * childstack, *stack;
unsigned long stack_offset, settls;
@@ -280,7 +285,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
if (!(regs->ps & 8))
stack_offset = (PAGE_SIZE-1) & (unsigned long) regs;
childregs = (struct pt_regs *)
- (stack_offset + PAGE_SIZE + (long) childti);
+ (stack_offset + PAGE_SIZE + task_stack_page(p));
*childregs = *regs;
settls = regs->r20;
@@ -423,30 +428,15 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
int
dump_elf_task(elf_greg_t *dest, struct task_struct *task)
{
- struct thread_info *ti;
- struct pt_regs *pt;
-
- ti = task->thread_info;
- pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
-
- dump_elf_thread(dest, pt, ti);
-
+ dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task));
return 1;
}
int
dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
{
- struct thread_info *ti;
- struct pt_regs *pt;
- struct switch_stack *sw;
-
- ti = task->thread_info;
- pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
- sw = (struct switch_stack *)pt - 1;
-
+ struct switch_stack *sw = (struct switch_stack *)task_pt_regs(task) - 1;
memcpy(dest, sw->fp, 32 * 8);
-
return 1;
}
@@ -487,8 +477,8 @@ out:
unsigned long
thread_saved_pc(task_t *t)
{
- unsigned long base = (unsigned long)t->thread_info;
- unsigned long fp, sp = t->thread_info->pcb.ksp;
+ unsigned long base = (unsigned long)task_stack_page(t);
+ unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
if (sp > base && sp+6*8 < base + 16*1024) {
fp = ((unsigned long*)sp)[6];
@@ -518,7 +508,7 @@ get_wchan(struct task_struct *p)
pc = thread_saved_pc(p);
if (in_sched_functions(pc)) {
- schedule_frame = ((unsigned long *)p->thread_info->pcb.ksp)[6];
+ schedule_frame = ((unsigned long *)task_thread_info(p)->pcb.ksp)[6];
return ((unsigned long *)schedule_frame)[12];
}
return pc;
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index bbd37536d14..0cd060598f9 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -72,6 +72,13 @@ enum {
REG_R0 = 0, REG_F0 = 32, REG_FPCR = 63, REG_PC = 64
};
+#define PT_REG(reg) \
+ (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
+
+#define SW_REG(reg) \
+ (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
+ + offsetof(struct switch_stack, reg))
+
static int regoff[] = {
PT_REG( r0), PT_REG( r1), PT_REG( r2), PT_REG( r3),
PT_REG( r4), PT_REG( r5), PT_REG( r6), PT_REG( r7),
@@ -103,14 +110,14 @@ get_reg_addr(struct task_struct * task, unsigned long regno)
unsigned long *addr;
if (regno == 30) {
- addr = &task->thread_info->pcb.usp;
+ addr = &task_thread_info(task)->pcb.usp;
} else if (regno == 65) {
- addr = &task->thread_info->pcb.unique;
+ addr = &task_thread_info(task)->pcb.unique;
} else if (regno == 31 || regno > 65) {
zero = 0;
addr = &zero;
} else {
- addr = (void *)task->thread_info + regoff[regno];
+ addr = task_stack_page(task) + regoff[regno];
}
return addr;
}
@@ -125,7 +132,7 @@ get_reg(struct task_struct * task, unsigned long regno)
if (regno == 63) {
unsigned long fpcr = *get_reg_addr(task, regno);
unsigned long swcr
- = task->thread_info->ieee_state & IEEE_SW_MASK;
+ = task_thread_info(task)->ieee_state & IEEE_SW_MASK;
swcr = swcr_update_status(swcr, fpcr);
return fpcr | swcr;
}
@@ -139,8 +146,8 @@ static int
put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
{
if (regno == 63) {
- task->thread_info->ieee_state
- = ((task->thread_info->ieee_state & ~IEEE_SW_MASK)
+ task_thread_info(task)->ieee_state
+ = ((task_thread_info(task)->ieee_state & ~IEEE_SW_MASK)
| (data & IEEE_SW_MASK));
data = (data & FPCR_DYN_MASK) | ieee_swcr_to_fpcr(data);
}
@@ -188,35 +195,35 @@ ptrace_set_bpt(struct task_struct * child)
* branch (emulation can be tricky for fp branches).
*/
displ = ((s32)(insn << 11)) >> 9;
- child->thread_info->bpt_addr[nsaved++] = pc + 4;
+ task_thread_info(child)->bpt_addr[nsaved++] = pc + 4;
if (displ) /* guard against unoptimized code */
- child->thread_info->bpt_addr[nsaved++]
+ task_thread_info(child)->bpt_addr[nsaved++]
= pc + 4 + displ;
DBG(DBG_BPT, ("execing branch\n"));
} else if (op_code == 0x1a) {
reg_b = (insn >> 16) & 0x1f;
- child->thread_info->bpt_addr[nsaved++] = get_reg(child, reg_b);
+ task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b);
DBG(DBG_BPT, ("execing jump\n"));
} else {
- child->thread_info->bpt_addr[nsaved++] = pc + 4;
+ task_thread_info(child)->bpt_addr[nsaved++] = pc + 4;
DBG(DBG_BPT, ("execing normal insn\n"));
}
/* install breakpoints: */
for (i = 0; i < nsaved; ++i) {
- res = read_int(child, child->thread_info->bpt_addr[i],
+ res = read_int(child, task_thread_info(child)->bpt_addr[i],
(int *) &insn);
if (res < 0)
return res;
- child->thread_info->bpt_insn[i] = insn;
+ task_thread_info(child)->bpt_insn[i] = insn;
DBG(DBG_BPT, (" -> next_pc=%lx\n",
- child->thread_info->bpt_addr[i]));
- res = write_int(child, child->thread_info->bpt_addr[i],
+ task_thread_info(child)->bpt_addr[i]));
+ res = write_int(child, task_thread_info(child)->bpt_addr[i],
BREAKINST);
if (res < 0)
return res;
}
- child->thread_info->bpt_nsaved = nsaved;
+ task_thread_info(child)->bpt_nsaved = nsaved;
return 0;
}
@@ -227,9 +234,9 @@ ptrace_set_bpt(struct task_struct * child)
int
ptrace_cancel_bpt(struct task_struct * child)
{
- int i, nsaved = child->thread_info->bpt_nsaved;
+ int i, nsaved = task_thread_info(child)->bpt_nsaved;
- child->thread_info->bpt_nsaved = 0;
+ task_thread_info(child)->bpt_nsaved = 0;
if (nsaved > 2) {
printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
@@ -237,8 +244,8 @@ ptrace_cancel_bpt(struct task_struct * child)
}
for (i = 0; i < nsaved; ++i) {
- write_int(child, child->thread_info->bpt_addr[i],
- child->thread_info->bpt_insn[i]);
+ write_int(child, task_thread_info(child)->bpt_addr[i],
+ task_thread_info(child)->bpt_insn[i]);
}
return (nsaved != 0);
}
@@ -265,30 +272,16 @@ do_sys_ptrace(long request, long pid, long addr, long data,
lock_kernel();
DBG(DBG_MEM, ("request=%ld pid=%ld addr=0x%lx data=0x%lx\n",
request, pid, addr, data));
- ret = -EPERM;
if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out_notsk;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out_notsk;
- /* set the ptrace bit in the process ptrace flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
+ ret = ptrace_traceme();
goto out_notsk;
}
- if (pid == 1) /* you may not mess with init */
- goto out_notsk;
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
goto out_notsk;
+ }
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
@@ -369,7 +362,7 @@ do_sys_ptrace(long request, long pid, long addr, long data,
if (!valid_signal(data))
break;
/* Mark single stepping. */
- child->thread_info->bpt_nsaved = -1;
+ task_thread_info(child)->bpt_nsaved = -1;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
wake_up_process(child);
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index da0be346579..4b873527ce1 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -302,7 +302,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
+ hwrpb->processor_offset
+ cpuid * hwrpb->processor_size);
hwpcb = (struct pcb_struct *) cpu->hwpcb;
- ipcb = &idle->thread_info->pcb;
+ ipcb = &task_thread_info(idle)->pcb;
/* Initialize the CPU's HWPCB to something just good enough for
us to get started. Immediately after starting, we'll swpctx
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 145dcde143a..d7f0e97fe56 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -254,7 +254,7 @@ alcor_init_pci(void)
* motherboard, by looking for a 21040 TULIP in slot 6, which is
* built into XLT and BRET/MAVERICK, but not available on ALCOR.
*/
- dev = pci_find_device(PCI_VENDOR_ID_DEC,
+ dev = pci_get_device(PCI_VENDOR_ID_DEC,
PCI_DEVICE_ID_DEC_TULIP,
NULL);
if (dev && dev->devfn == PCI_DEVFN(6,0)) {
@@ -262,6 +262,7 @@ alcor_init_pci(void)
printk(KERN_INFO "%s: Detected AS500 or XLT motherboard.\n",
__FUNCTION__);
}
+ pci_dev_put(dev);
}
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index 47df48a6ddb..131a2d9f79d 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -105,7 +105,7 @@ sio_collect_irq_levels(void)
struct pci_dev *dev = NULL;
/* Iterate through the devices, collecting IRQ levels. */
- while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ for_each_pci_dev(dev) {
if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
(dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
continue;
@@ -229,8 +229,8 @@ alphabook1_init_pci(void)
*/
dev = NULL;
- while ((dev = pci_find_device(PCI_VENDOR_ID_NCR, PCI_ANY_ID, dev))) {
- if (dev->device == PCI_DEVICE_ID_NCR_53C810
+ while ((dev = pci_get_device(PCI_VENDOR_ID_NCR, PCI_ANY_ID, dev))) {
+ if (dev->device == PCI_DEVICE_ID_NCR_53C810
|| dev->device == PCI_DEVICE_ID_NCR_53C815
|| dev->device == PCI_DEVICE_ID_NCR_53C820
|| dev->device == PCI_DEVICE_ID_NCR_53C825) {
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 90752f6d886..486d7945583 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -7,6 +7,7 @@
/* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
#include <linux/config.h>
+#include <linux/pagemap.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4b15f5f1e25..50b9afa8ae6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -46,10 +46,6 @@ config MCA
<file:Documentation/mca.txt> (and especially the web page given
there) before attempting to build an MCA bus kernel.
-config UID16
- bool
- default y
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
@@ -103,13 +99,6 @@ config ARCH_EBSA110
Ethernet interface, two PCMCIA sockets, two serial ports and a
parallel port.
-config ARCH_CAMELOT
- bool "Epxa10db"
- help
- This enables support for Altera's Excalibur XA10 development board.
- If you would like to build your kernel to run on one of these boards
- then you must say 'Y' here. Otherwise say 'N'
-
config ARCH_FOOTBRIDGE
bool "FootBridge"
select FOOTBRIDGE
@@ -154,6 +143,7 @@ config ARCH_RPC
select FIQ
select TIMER_ACORN
select ARCH_MAY_HAVE_PC_FDC
+ select ISA_DMA_API
help
On the Acorn Risc-PC, Linux can support the internal IDE disk and
CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -206,6 +196,7 @@ config ARCH_IMX
config ARCH_H720X
bool "Hynix-HMS720x-based"
+ select ISA_DMA_API
help
This enables support for systems based on the Hynix HMS720x
@@ -215,12 +206,16 @@ config ARCH_AAEC2000
help
This enables support for systems based on the Agilent AAEC-2000
+config ARCH_AT91RM9200
+ bool "AT91RM9200"
+ help
+ Say Y here if you intend to run this kernel on an AT91RM9200-based
+ board.
+
endchoice
source "arch/arm/mach-clps711x/Kconfig"
-source "arch/arm/mach-epxa10db/Kconfig"
-
source "arch/arm/mach-footbridge/Kconfig"
source "arch/arm/mach-integrator/Kconfig"
@@ -255,6 +250,8 @@ source "arch/arm/mach-aaec2000/Kconfig"
source "arch/arm/mach-realview/Kconfig"
+source "arch/arm/mach-at91rm9200/Kconfig"
+
# Definitions to make life easier
config ARCH_ACORN
bool
@@ -290,12 +287,14 @@ config ISA
(MCA) or VESA. ISA is an older system, now being displaced by PCI;
newer boards don't support it. If you have ISA, say Y, otherwise N.
+# Select ISA DMA controller support
config ISA_DMA
bool
+ select ISA_DMA_API
+# Select ISA DMA interface
config ISA_DMA_API
bool
- default y
config PCI
bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB
@@ -418,7 +417,8 @@ config LEDS
ARCH_EBSA285 || ARCH_IMX || ARCH_INTEGRATOR || \
ARCH_LUBBOCK || MACH_MAINSTONE || ARCH_NETWINDER || \
ARCH_OMAP || ARCH_P720T || ARCH_PXA_IDP || \
- ARCH_SA1100 || ARCH_SHARK || ARCH_VERSATILE
+ ARCH_SA1100 || ARCH_SHARK || ARCH_VERSATILE || \
+ ARCH_AT91RM9200
help
If you say Y here, the LEDs on your machine will be used
to provide useful information about your current system status.
@@ -656,7 +656,6 @@ source "kernel/power/Kconfig"
config APM
tristate "Advanced Power Management Emulation"
- depends on PM_LEGACY
---help---
APM is a BIOS specification for saving power using several different
techniques. This is mostly useful for battery powered laptops with
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 81bd2193fe6..1fa2a101158 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -8,7 +8,7 @@
# Copyright (C) 1995-2001 by Russell King
LDFLAGS_vmlinux :=-p --no-undefined -X
-CPPFLAGS_vmlinux.lds = -DKERNEL_RAM_ADDR=$(TEXTADDR)
+CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
OBJCOPYFLAGS :=-O binary -R .note -R .comment -S
GZFLAGS :=-9
#CFLAGS +=-pipe
@@ -65,7 +65,7 @@ CHECKFLAGS += -D__arm__
#Default value
head-y := arch/arm/kernel/head.o arch/arm/kernel/init_task.o
-textaddr-y := 0xC0008000
+textofs-y := 0x00008000
machine-$(CONFIG_ARCH_RPC) := rpc
machine-$(CONFIG_ARCH_EBSA110) := ebsa110
@@ -73,22 +73,19 @@ textaddr-y := 0xC0008000
incdir-$(CONFIG_ARCH_CLPS7500) := cl7500
machine-$(CONFIG_FOOTBRIDGE) := footbridge
incdir-$(CONFIG_FOOTBRIDGE) := ebsa285
-textaddr-$(CONFIG_ARCH_CO285) := 0x60008000
machine-$(CONFIG_ARCH_CO285) := footbridge
incdir-$(CONFIG_ARCH_CO285) := ebsa285
machine-$(CONFIG_ARCH_SHARK) := shark
machine-$(CONFIG_ARCH_SA1100) := sa1100
ifeq ($(CONFIG_ARCH_SA1100),y)
# SA1111 DMA bug: we don't want the kernel to live in precious DMA-able memory
-textaddr-$(CONFIG_SA1111) := 0xc0208000
+ textofs-$(CONFIG_SA1111) := 0x00208000
endif
machine-$(CONFIG_ARCH_PXA) := pxa
machine-$(CONFIG_ARCH_L7200) := l7200
machine-$(CONFIG_ARCH_INTEGRATOR) := integrator
- machine-$(CONFIG_ARCH_CAMELOT) := epxa10db
-textaddr-$(CONFIG_ARCH_CLPS711X) := 0xc0028000
+ textofs-$(CONFIG_ARCH_CLPS711X) := 0x00028000
machine-$(CONFIG_ARCH_CLPS711X) := clps711x
-textaddr-$(CONFIG_ARCH_FORTUNET) := 0xc0008000
machine-$(CONFIG_ARCH_IOP3XX) := iop3xx
machine-$(CONFIG_ARCH_IXP4XX) := ixp4xx
machine-$(CONFIG_ARCH_IXP2000) := ixp2000
@@ -102,6 +99,7 @@ textaddr-$(CONFIG_ARCH_FORTUNET) := 0xc0008000
machine-$(CONFIG_ARCH_H720X) := h720x
machine-$(CONFIG_ARCH_AAEC2000) := aaec2000
machine-$(CONFIG_ARCH_REALVIEW) := realview
+ machine-$(CONFIG_ARCH_AT91RM9200) := at91rm9200
ifeq ($(CONFIG_ARCH_EBSA110),y)
# This is what happens if you forget the IOCS16 line.
@@ -110,7 +108,8 @@ CFLAGS_3c589_cs.o :=-DISA_SIXTEEN_BIT_PERIPHERAL
export CFLAGS_3c589_cs.o
endif
-TEXTADDR := $(textaddr-y)
+# The byte offset of the kernel image in RAM from the start of RAM.
+TEXT_OFFSET := $(textofs-y)
ifeq ($(incdir-y),)
incdir-y := $(machine-y)
@@ -123,7 +122,7 @@ else
MACHINE :=
endif
-export TEXTADDR GZFLAGS
+export TEXT_OFFSET GZFLAGS
# Do we have FASTFPE?
FASTFPE :=arch/arm/fastfpe
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 937a353bc37..a174d63395e 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -15,7 +15,7 @@ include $(srctree)/$(MACHINE)/Makefile.boot
endif
# Note: the following conditions must always be true:
-# ZRELADDR == virt_to_phys(TEXTADDR)
+# ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
# PARAMS_PHYS must be within 4MB of ZRELADDR
# INITRD_PHYS must be in RAM
ZRELADDR := $(zreladdr-y)
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 6b505ce41a7..35ffe0f4ece 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -21,10 +21,6 @@ ifeq ($(CONFIG_ARCH_SHARK),y)
OBJS += head-shark.o ofw-shark.o
endif
-ifeq ($(CONFIG_ARCH_CAMELOT),y)
-OBJS += head-epxa10db.o
-endif
-
ifeq ($(CONFIG_ARCH_L7200),y)
OBJS += head-l7200.o
endif
@@ -50,6 +46,10 @@ ifeq ($(CONFIG_PXA_SHARPSL),y)
OBJS += head-sharpsl.o
endif
+ifeq ($(CONFIG_ARCH_AT91RM9200),y)
+OBJS += head-at91rm9200.o
+endif
+
ifeq ($(CONFIG_DEBUG_ICEDCC),y)
OBJS += ice-dcc.o
endif
diff --git a/arch/arm/boot/compressed/head-at91rm9200.S b/arch/arm/boot/compressed/head-at91rm9200.S
new file mode 100644
index 00000000000..2119ea62b54
--- /dev/null
+++ b/arch/arm/boot/compressed/head-at91rm9200.S
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/arm/boot/compressed/head-at91rm9200.S
+ *
+ * Copyright (C) 2003 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <asm/mach-types.h>
+
+ .section ".start", "ax"
+
+ @ Atmel AT91RM9200-DK : 262
+ mov r3, #(MACH_TYPE_AT91RM9200DK & 0xff)
+ orr r3, r3, #(MACH_TYPE_AT91RM9200DK & 0xff00)
+ cmp r7, r3
+ beq 99f
+
+ @ Cogent CSB337 : 399
+ mov r3, #(MACH_TYPE_CSB337 & 0xff)
+ orr r3, r3, #(MACH_TYPE_CSB337 & 0xff00)
+ cmp r7, r3
+ beq 99f
+
+ @ Cogent CSB637 : 648
+ mov r3, #(MACH_TYPE_CSB637 & 0xff)
+ orr r3, r3, #(MACH_TYPE_CSB637 & 0xff00)
+ cmp r7, r3
+ beq 99f
+
+ @ Atmel AT91RM9200-EK : 705
+ mov r3, #(MACH_TYPE_AT91RM9200EK & 0xff)
+ orr r3, r3, #(MACH_TYPE_AT91RM9200EK & 0xff00)
+ cmp r7, r3
+ beq 99f
+
+ @ Conitec Carmeva : 769
+ mov r3, #(MACH_TYPE_CARMEVA & 0xff)
+ orr r3, r3, #(MACH_TYPE_CARMEVA & 0xff00)
+ cmp r7, r3
+ beq 99f
+
+ @ KwikByte KB920x : 612
+ mov r3, #(MACH_TYPE_KB9200 & 0xff)
+ orr r3, r3, #(MACH_TYPE_KB9200 & 0xff00)
+ cmp r7, r3
+ beq 99f
+
+ @ Unknown board, use the AT91RM9200DK board
+ @ mov r7, #MACH_TYPE_AT91RM9200
+ mov r7, #(MACH_TYPE_AT91RM9200DK & 0xff)
+ orr r7, r7, #(MACH_TYPE_AT91RM9200DK & 0xff00)
+
+99:
diff --git a/arch/arm/boot/compressed/head-epxa10db.S b/arch/arm/boot/compressed/head-epxa10db.S
deleted file mode 100644
index 757681f12a3..00000000000
--- a/arch/arm/boot/compressed/head-epxa10db.S
+++ /dev/null
@@ -1,5 +0,0 @@
-#include <asm/mach-types.h>
-#include <asm/arch/excalibur.h>
-
- .section ".start", "ax"
- mov r7, #MACH_TYPE_CAMELOT
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6abafb6f184..aaa47400eb9 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -84,7 +84,7 @@
kputc #'\n'
kphex r5, 8 /* decompressed kernel start */
kputc #'-'
- kphex r8, 8 /* decompressed kernel end */
+ kphex r9, 8 /* decompressed kernel end */
kputc #'>'
kphex r4, 8 /* kernel execution address */
kputc #'\n'
@@ -116,7 +116,7 @@ start:
.word start @ absolute load/run zImage address
.word _edata @ zImage end address
1: mov r7, r1 @ save architecture ID
- mov r8, #0 @ save r0
+ mov r8, r2 @ save atags pointer
#ifndef __ARM_ARCH_2__
/*
@@ -144,7 +144,7 @@ not_angel:
/*
* some architecture specific code can be inserted
- * by the linker here, but it should preserve r7 and r8.
+ * by the linker here, but it should preserve r7, r8, and r9.
*/
.text
@@ -249,16 +249,17 @@ not_relocated: mov r0, #0
* r5 = decompressed kernel start
* r6 = processor ID
* r7 = architecture ID
- * r8-r14 = unused
+ * r8 = atags pointer
+ * r9-r14 = corrupted
*/
add r1, r5, r0 @ end of decompressed kernel
adr r2, reloc_start
ldr r3, LC1
add r3, r2, r3
-1: ldmia r2!, {r8 - r13} @ copy relocation code
- stmia r1!, {r8 - r13}
- ldmia r2!, {r8 - r13}
- stmia r1!, {r8 - r13}
+1: ldmia r2!, {r9 - r14} @ copy relocation code
+ stmia r1!, {r9 - r14}
+ ldmia r2!, {r9 - r14}
+ stmia r1!, {r9 - r14}
cmp r2, r3
blo 1b
@@ -308,11 +309,12 @@ params: ldr r0, =params_phys
* r4 = kernel execution address
* r6 = processor ID
* r7 = architecture number
- * r8 = run-time address of "start"
+ * r8 = atags pointer
+ * r9 = run-time address of "start" (???)
* On exit,
- * r1, r2, r3, r8, r9, r12 corrupted
+ * r1, r2, r3, r9, r10, r12 corrupted
* This routine must preserve:
- * r4, r5, r6, r7
+ * r4, r5, r6, r7, r8
*/
.align 5
cache_on: mov r3, #8 @ cache_on function
@@ -326,15 +328,15 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
* bits for the RAM area only.
*/
mov r0, r3
- mov r8, r0, lsr #18
- mov r8, r8, lsl #18 @ start of RAM
- add r9, r8, #0x10000000 @ a reasonable RAM size
+ mov r9, r0, lsr #18
+ mov r9, r9, lsl #18 @ start of RAM
+ add r10, r9, #0x10000000 @ a reasonable RAM size
mov r1, #0x12
orr r1, r1, #3 << 10
add r2, r3, #16384
1: cmp r1, r8 @ if virt > start of RAM
orrhs r1, r1, #0x0c @ set cacheable, bufferable
- cmp r1, r9 @ if virt > end of RAM
+ cmp r1, r10 @ if virt > end of RAM
bichs r1, r1, #0x0c @ clear cacheable, bufferable
str r1, [r0], #4 @ 1:1 mapping
add r1, r1, #1048576
@@ -403,26 +405,28 @@ __common_cache_on:
* r5 = decompressed kernel start
* r6 = processor ID
* r7 = architecture ID
- * r8-r14 = unused
+ * r8 = atags pointer
+ * r9-r14 = corrupted
*/
.align 5
-reloc_start: add r8, r5, r0
+reloc_start: add r9, r5, r0
debug_reloc_start
mov r1, r4
1:
.rept 4
- ldmia r5!, {r0, r2, r3, r9 - r13} @ relocate kernel
- stmia r1!, {r0, r2, r3, r9 - r13}
+ ldmia r5!, {r0, r2, r3, r10 - r14} @ relocate kernel
+ stmia r1!, {r0, r2, r3, r10 - r14}
.endr
- cmp r5, r8
+ cmp r5, r9
blo 1b
debug_reloc_end
call_kernel: bl cache_clean_flush
bl cache_off
- mov r0, #0
+ mov r0, #0 @ must be zero
mov r1, r7 @ restore architecture number
+ mov r2, r8 @ restore atags pointer
mov pc, r4 @ call kernel
/*
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 666ba393575..d7509c7a3c5 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -23,5 +23,8 @@ config SHARP_LOCOMO
config SHARP_PARAM
bool
+config SHARPSL_PM
+ bool
+
config SHARP_SCOOP
bool
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index a87886564b1..ec8d17c9690 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -3,7 +3,6 @@
#
obj-y += rtctime.o
-obj-$(CONFIG_ARM_AMBA) += amba.o
obj-$(CONFIG_ARM_GIC) += gic.o
obj-$(CONFIG_ICST525) += icst525.o
obj-$(CONFIG_ICST307) += icst307.o
@@ -13,4 +12,5 @@ obj-$(CONFIG_DMABOUNCE) += dmabounce.o
obj-$(CONFIG_TIMER_ACORN) += time-acorn.o
obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
+obj-$(CONFIG_SHARPSL_PM) += sharpsl_pm.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
diff --git a/arch/arm/common/rtctime.c b/arch/arm/common/rtctime.c
index 72b03f201eb..48b1e19b131 100644
--- a/arch/arm/common/rtctime.c
+++ b/arch/arm/common/rtctime.c
@@ -17,7 +17,9 @@
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
+#include <linux/capability.h>
#include <linux/device.h>
+#include <linux/mutex.h>
#include <asm/rtc.h>
#include <asm/semaphore.h>
@@ -34,7 +36,7 @@ static unsigned long rtc_irq_data;
/*
* rtc_sem protects rtc_inuse and rtc_ops
*/
-static DECLARE_MUTEX(rtc_sem);
+static DEFINE_MUTEX(rtc_mutex);
static unsigned long rtc_inuse;
static struct rtc_ops *rtc_ops;
@@ -355,7 +357,7 @@ static int rtc_open(struct inode *inode, struct file *file)
{
int ret;
- down(&rtc_sem);
+ mutex_lock(&rtc_mutex);
if (rtc_inuse) {
ret = -EBUSY;
@@ -373,7 +375,7 @@ static int rtc_open(struct inode *inode, struct file *file)
rtc_inuse = 1;
}
}
- up(&rtc_sem);
+ mutex_unlock(&rtc_mutex);
return ret;
}
@@ -479,7 +481,7 @@ int register_rtc(struct rtc_ops *ops)
{
int ret = -EBUSY;
- down(&rtc_sem);
+ mutex_lock(&rtc_mutex);
if (rtc_ops == NULL) {
rtc_ops = ops;
@@ -488,7 +490,7 @@ int register_rtc(struct rtc_ops *ops)
create_proc_read_entry("driver/rtc", 0, NULL,
rtc_read_proc, ops);
}
- up(&rtc_sem);
+ mutex_unlock(&rtc_mutex);
return ret;
}
@@ -496,12 +498,12 @@ EXPORT_SYMBOL(register_rtc);
void unregister_rtc(struct rtc_ops *rtc)
{
- down(&rtc_sem);
+ mutex_lock(&rtc_mutex);
if (rtc == rtc_ops) {
remove_proc_entry("driver/rtc", NULL);
misc_deregister(&rtc_miscdev);
rtc_ops = NULL;
}
- up(&rtc_sem);
+ mutex_unlock(&rtc_mutex);
}
EXPORT_SYMBOL(unregister_rtc);
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c
index 0c3cbd9a388..a2dfe0b0f1e 100644
--- a/arch/arm/common/scoop.c
+++ b/arch/arm/common/scoop.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/string.h>
+#include <linux/slab.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/hardware/scoop.h>
@@ -33,7 +34,6 @@ void reset_scoop(struct device *dev)
SCOOP_REG(sdev->base,SCOOP_MCR) = 0x0100; // 00
SCOOP_REG(sdev->base,SCOOP_CDR) = 0x0000; // 04
- SCOOP_REG(sdev->base,SCOOP_CPR) = 0x0000; // 0C
SCOOP_REG(sdev->base,SCOOP_CCR) = 0x0000; // 10
SCOOP_REG(sdev->base,SCOOP_IMR) = 0x0000; // 18
SCOOP_REG(sdev->base,SCOOP_IRM) = 0x00FF; // 14
@@ -154,6 +154,7 @@ int __init scoop_probe(struct platform_device *pdev)
SCOOP_REG(devptr->base, SCOOP_MCR) = 0x0140;
reset_scoop(&pdev->dev);
+ SCOOP_REG(devptr->base, SCOOP_CPR) = 0x0000;
SCOOP_REG(devptr->base, SCOOP_GPCR) = inf->io_dir & 0xffff;
SCOOP_REG(devptr->base, SCOOP_GPWR) = inf->io_out & 0xffff;
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c
new file mode 100644
index 00000000000..978d32e82d3
--- /dev/null
+++ b/arch/arm/common/sharpsl_pm.c
@@ -0,0 +1,839 @@
+/*
+ * Battery and Power Management code for the Sharp SL-C7xx and SL-Cxx00
+ * series of PDAs
+ *
+ * Copyright (c) 2004-2005 Richard Purdie
+ *
+ * Based on code written by Sharp for 2.4 kernels
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/apm_bios.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+#include <asm/apm.h>
+#include <asm/arch/pm.h>
+#include <asm/arch/pxa-regs.h>
+#include <asm/arch/sharpsl.h>
+#include <asm/hardware/sharpsl_pm.h>
+
+/*
+ * Constants
+ */
+#define SHARPSL_CHARGE_ON_TIME_INTERVAL (msecs_to_jiffies(1*60*1000)) /* 1 min */
+#define SHARPSL_CHARGE_FINISH_TIME (msecs_to_jiffies(10*60*1000)) /* 10 min */
+#define SHARPSL_BATCHK_TIME (msecs_to_jiffies(15*1000)) /* 15 sec */
+#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */
+#define SHARPSL_WAIT_CO_TIME 15 /* 15 sec */
+#define SHARPSL_WAIT_DISCHARGE_ON 100 /* 100 msec */
+#define SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP 10 /* 10 msec */
+#define SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT 10 /* 10 msec */
+#define SHARPSL_CHECK_BATTERY_WAIT_TIME_ACIN 10 /* 10 msec */
+#define SHARPSL_CHARGE_WAIT_TIME 15 /* 15 msec */
+#define SHARPSL_CHARGE_CO_CHECK_TIME 5 /* 5 msec */
+#define SHARPSL_CHARGE_RETRY_CNT 1 /* eqv. 10 min */
+
+#define SHARPSL_CHARGE_ON_VOLT 0x99 /* 2.9V */
+#define SHARPSL_CHARGE_ON_TEMP 0xe0 /* 2.9V */
+#define SHARPSL_CHARGE_ON_ACIN_HIGH 0x9b /* 6V */
+#define SHARPSL_CHARGE_ON_ACIN_LOW 0x34 /* 2V */
+#define SHARPSL_FATAL_ACIN_VOLT 182 /* 3.45V */
+#define SHARPSL_FATAL_NOACIN_VOLT 170 /* 3.40V */
+
+/*
+ * Prototypes
+ */
+static int sharpsl_off_charge_battery(void);
+static int sharpsl_check_battery_temp(void);
+static int sharpsl_check_battery_voltage(void);
+static int sharpsl_ac_check(void);
+static int sharpsl_fatal_check(void);
+static int sharpsl_average_value(int ad);
+static void sharpsl_average_clear(void);
+static void sharpsl_charge_toggle(void *private_);
+static void sharpsl_battery_thread(void *private_);
+
+
+/*
+ * Variables
+ */
+struct sharpsl_pm_status sharpsl_pm;
+DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
+DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+
+
+static int get_percentage(int voltage)
+{
+ int i = sharpsl_pm.machinfo->bat_levels - 1;
+ struct battery_thresh *thresh;
+
+ if (sharpsl_pm.charge_mode == CHRG_ON)
+ thresh=sharpsl_pm.machinfo->bat_levels_acin;
+ else
+ thresh=sharpsl_pm.machinfo->bat_levels_noac;
+
+ while (i > 0 && (voltage > thresh[i].voltage))
+ i--;
+
+ return thresh[i].percentage;
+}
+
+static int get_apm_status(int voltage)
+{
+ int low_thresh, high_thresh;
+
+ if (sharpsl_pm.charge_mode == CHRG_ON) {
+ high_thresh = sharpsl_pm.machinfo->status_high_acin;
+ low_thresh = sharpsl_pm.machinfo->status_low_acin;
+ } else {
+ high_thresh = sharpsl_pm.machinfo->status_high_noac;
+ low_thresh = sharpsl_pm.machinfo->status_low_noac;
+ }
+
+ if (voltage >= high_thresh)
+ return APM_BATTERY_STATUS_HIGH;
+ if (voltage >= low_thresh)
+ return APM_BATTERY_STATUS_LOW;
+ return APM_BATTERY_STATUS_CRITICAL;
+}
+
+void sharpsl_battery_kick(void)
+{
+ schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(125));
+}
+EXPORT_SYMBOL(sharpsl_battery_kick);
+
+
+static void sharpsl_battery_thread(void *private_)
+{
+ int voltage, percent, apm_status, i = 0;
+
+ if (!sharpsl_pm.machinfo)
+ return;
+
+ sharpsl_pm.battstat.ac_status = (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN) ? APM_AC_ONLINE : APM_AC_OFFLINE);
+
+ /* Corgi cannot confirm when battery fully charged so periodically kick! */
+ if (machine_is_corgi() && (sharpsl_pm.charge_mode == CHRG_ON)
+ && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
+ schedule_work(&toggle_charger);
+
+ while(1) {
+ voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
+
+ if (voltage > 0) break;
+ if (i++ > 5) {
+ voltage = sharpsl_pm.machinfo->bat_levels_noac[0].voltage;
+ dev_warn(sharpsl_pm.dev, "Warning: Cannot read main battery!\n");
+ break;
+ }
+ }
+
+ voltage = sharpsl_average_value(voltage);
+ apm_status = get_apm_status(voltage);
+ percent = get_percentage(voltage);
+
+ /* At low battery voltages, the voltage has a tendency to start
+ creeping back up so we try to avoid this here */
+ if ((sharpsl_pm.battstat.ac_status == APM_AC_ONLINE) || (apm_status == APM_BATTERY_STATUS_HIGH) || percent <= sharpsl_pm.battstat.mainbat_percent) {
+ sharpsl_pm.battstat.mainbat_voltage = voltage;
+ sharpsl_pm.battstat.mainbat_status = apm_status;
+ sharpsl_pm.battstat.mainbat_percent = percent;
+ }
+
+ dev_dbg(sharpsl_pm.dev, "Battery: voltage: %d, status: %d, percentage: %d, time: %d\n", voltage,
+ sharpsl_pm.battstat.mainbat_status, sharpsl_pm.battstat.mainbat_percent, jiffies);
+
+ /* If battery is low. limit backlight intensity to save power. */
+ if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE)
+ && ((sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_LOW) ||
+ (sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL))) {
+ if (!(sharpsl_pm.flags & SHARPSL_BL_LIMIT)) {
+ corgibl_limit_intensity(1);
+ sharpsl_pm.flags |= SHARPSL_BL_LIMIT;
+ }
+ } else if (sharpsl_pm.flags & SHARPSL_BL_LIMIT) {
+ corgibl_limit_intensity(0);
+ sharpsl_pm.flags &= ~SHARPSL_BL_LIMIT;
+ }
+
+ /* Suspend if critical battery level */
+ if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE)
+ && (sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL)
+ && !(sharpsl_pm.flags & SHARPSL_APM_QUEUED)) {
+ sharpsl_pm.flags |= SHARPSL_APM_QUEUED;
+ dev_err(sharpsl_pm.dev, "Fatal Off\n");
+ apm_queue_event(APM_CRITICAL_SUSPEND);
+ }
+
+ schedule_delayed_work(&sharpsl_bat, SHARPSL_BATCHK_TIME);
+}
+
+void sharpsl_pm_led(int val)
+{
+ if (val == SHARPSL_LED_ERROR) {
+ dev_err(sharpsl_pm.dev, "Charging Error!\n");
+ } else if (val == SHARPSL_LED_ON) {
+ dev_dbg(sharpsl_pm.dev, "Charge LED On\n");
+
+ } else {
+ dev_dbg(sharpsl_pm.dev, "Charge LED Off\n");
+
+ }
+}
+
+static void sharpsl_charge_on(void)
+{
+ dev_dbg(sharpsl_pm.dev, "Turning Charger On\n");
+
+ sharpsl_pm.full_count = 0;
+ sharpsl_pm.charge_mode = CHRG_ON;
+ schedule_delayed_work(&toggle_charger, msecs_to_jiffies(250));
+ schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(500));
+}
+
+static void sharpsl_charge_off(void)
+{
+ dev_dbg(sharpsl_pm.dev, "Turning Charger Off\n");
+
+ sharpsl_pm.machinfo->charge(0);
+ sharpsl_pm_led(SHARPSL_LED_OFF);
+ sharpsl_pm.charge_mode = CHRG_OFF;
+
+ schedule_work(&sharpsl_bat);
+}
+
+static void sharpsl_charge_error(void)
+{
+ sharpsl_pm_led(SHARPSL_LED_ERROR);
+ sharpsl_pm.machinfo->charge(0);
+ sharpsl_pm.charge_mode = CHRG_ERROR;
+}
+
+static void sharpsl_charge_toggle(void *private_)
+{
+ dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
+
+ if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN)) {
+ sharpsl_charge_off();
+ return;
+ } else if ((sharpsl_check_battery_temp() < 0) || (sharpsl_ac_check() < 0)) {
+ sharpsl_charge_error();
+ return;
+ }
+
+ sharpsl_pm_led(SHARPSL_LED_ON);
+ sharpsl_pm.machinfo->charge(0);
+ mdelay(SHARPSL_CHARGE_WAIT_TIME);
+ sharpsl_pm.machinfo->charge(1);
+
+ sharpsl_pm.charge_start_time = jiffies;
+}
+
+static void sharpsl_ac_timer(unsigned long data)
+{
+ int acin = sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN);
+
+ dev_dbg(sharpsl_pm.dev, "AC Status: %d\n",acin);
+
+ sharpsl_average_clear();
+ if (acin && (sharpsl_pm.charge_mode != CHRG_ON))
+ sharpsl_charge_on();
+ else if (sharpsl_pm.charge_mode == CHRG_ON)
+ sharpsl_charge_off();
+
+ schedule_work(&sharpsl_bat);
+}
+
+
+irqreturn_t sharpsl_ac_isr(int irq, void *dev_id, struct pt_regs *fp)
+{
+ /* Delay the event slightly to debounce */
+ /* Must be a smaller delay than the chrg_full_isr below */
+ mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
+
+ return IRQ_HANDLED;
+}
+
+static void sharpsl_chrg_full_timer(unsigned long data)
+{
+ dev_dbg(sharpsl_pm.dev, "Charge Full at time: %lx\n", jiffies);
+
+ sharpsl_pm.full_count++;
+
+ if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN)) {
+ dev_dbg(sharpsl_pm.dev, "Charge Full: AC removed - stop charging!\n");
+ if (sharpsl_pm.charge_mode == CHRG_ON)
+ sharpsl_charge_off();
+ } else if (sharpsl_pm.full_count < 2) {
+ dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
+ schedule_work(&toggle_charger);
+ } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
+ dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
+ schedule_work(&toggle_charger);
+ } else {
+ sharpsl_charge_off();
+ sharpsl_pm.charge_mode = CHRG_DONE;
+ dev_dbg(sharpsl_pm.dev, "Charge Full: Charging Finished\n");
+ }
+}
+
+/* Charging Finished Interrupt (Not present on Corgi) */
+/* Can trigger at the same time as an AC staus change so
+ delay until after that has been processed */
+irqreturn_t sharpsl_chrg_full_isr(int irq, void *dev_id, struct pt_regs *fp)
+{
+ if (sharpsl_pm.flags & SHARPSL_SUSPENDED)
+ return IRQ_HANDLED;
+
+ /* delay until after any ac interrupt */
+ mod_timer(&sharpsl_pm.chrg_full_timer, jiffies + msecs_to_jiffies(500));
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t sharpsl_fatal_isr(int irq, void *dev_id, struct pt_regs *fp)
+{
+ int is_fatal = 0;
+
+ if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_LOCK)) {
+ dev_err(sharpsl_pm.dev, "Battery now Unlocked! Suspending.\n");
+ is_fatal = 1;
+ }
+
+ if (!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_FATAL)) {
+ dev_err(sharpsl_pm.dev, "Fatal Batt Error! Suspending.\n");
+ is_fatal = 1;
+ }
+
+ if (!(sharpsl_pm.flags & SHARPSL_APM_QUEUED) && is_fatal) {
+ sharpsl_pm.flags |= SHARPSL_APM_QUEUED;
+ apm_queue_event(APM_CRITICAL_SUSPEND);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Maintain an average of the last 10 readings
+ */
+#define SHARPSL_CNV_VALUE_NUM 10
+static int sharpsl_ad_index;
+
+static void sharpsl_average_clear(void)
+{
+ sharpsl_ad_index = 0;
+}
+
+static int sharpsl_average_value(int ad)
+{
+ int i, ad_val = 0;
+ static int sharpsl_ad[SHARPSL_CNV_VALUE_NUM+1];
+
+ if (sharpsl_pm.battstat.mainbat_status != APM_BATTERY_STATUS_HIGH) {
+ sharpsl_ad_index = 0;
+ return ad;
+ }
+
+ sharpsl_ad[sharpsl_ad_index] = ad;
+ sharpsl_ad_index++;
+ if (sharpsl_ad_index >= SHARPSL_CNV_VALUE_NUM) {
+ for (i=0; i < (SHARPSL_CNV_VALUE_NUM-1); i++)
+ sharpsl_ad[i] = sharpsl_ad[i+1];
+ sharpsl_ad_index = SHARPSL_CNV_VALUE_NUM - 1;
+ }
+ for (i=0; i < sharpsl_ad_index; i++)
+ ad_val += sharpsl_ad[i];
+
+ return (ad_val / sharpsl_ad_index);
+}
+
+/*
+ * Take an array of 5 integers, remove the maximum and minimum values
+ * and return the average.
+ */
+static int get_select_val(int *val)
+{
+ int i, j, k, temp, sum = 0;
+
+ /* Find MAX val */
+ temp = val[0];
+ j=0;
+ for (i=1; i<5; i++) {
+ if (temp < val[i]) {
+ temp = val[i];
+ j = i;
+ }
+ }
+
+ /* Find MIN val */
+ temp = val[4];
+ k=4;
+ for (i=3; i>=0; i--) {
+ if (temp > val[i]) {
+ temp = val[i];
+ k = i;
+ }
+ }
+
+ for (i=0; i<5; i++)
+ if (i != j && i != k )
+ sum += val[i];
+
+ dev_dbg(sharpsl_pm.dev, "Average: %d from values: %d, %d, %d, %d, %d\n", sum/3, val[0], val[1], val[2], val[3], val[4]);
+
+ return (sum/3);
+}
+
+static int sharpsl_check_battery_temp(void)
+{
+ int val, i, buff[5];
+
+ /* Check battery temperature */
+ for (i=0; i<5; i++) {
+ mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP);
+ sharpsl_pm.machinfo->measure_temp(1);
+ mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP);
+ buff[i] = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_TEMP);
+ sharpsl_pm.machinfo->measure_temp(0);
+ }
+
+ val = get_select_val(buff);
+
+ dev_dbg(sharpsl_pm.dev, "Temperature: %d\n", val);
+ if (val > SHARPSL_CHARGE_ON_TEMP)
+ return -1;
+
+ return 0;
+}
+
+static int sharpsl_check_battery_voltage(void)
+{
+ int val, i, buff[5];
+
+ /* disable charge, enable discharge */
+ sharpsl_pm.machinfo->charge(0);
+ sharpsl_pm.machinfo->discharge(1);
+ mdelay(SHARPSL_WAIT_DISCHARGE_ON);
+
+ if (sharpsl_pm.machinfo->discharge1)
+ sharpsl_pm.machinfo->discharge1(1);
+
+ /* Check battery voltage */
+ for (i=0; i<5; i++) {
+ buff[i] = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
+ mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT);
+ }
+
+ if (sharpsl_pm.machinfo->discharge1)
+ sharpsl_pm.machinfo->discharge1(0);
+
+ sharpsl_pm.machinfo->discharge(0);
+
+ val = get_select_val(buff);
+ dev_dbg(sharpsl_pm.dev, "Battery Voltage: %d\n", val);
+
+ if (val < SHARPSL_CHARGE_ON_VOLT)
+ return -1;
+
+ return 0;
+}
+
+static int sharpsl_ac_check(void)
+{
+ int temp, i, buff[5];
+
+ for (i=0; i<5; i++) {
+ buff[i] = sharpsl_pm.machinfo->read_devdata(SHARPSL_ACIN_VOLT);
+ mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_ACIN);
+ }
+
+ temp = get_select_val(buff);
+ dev_dbg(sharpsl_pm.dev, "AC Voltage: %d\n",temp);
+
+ if ((temp > SHARPSL_CHARGE_ON_ACIN_HIGH) || (temp < SHARPSL_CHARGE_ON_ACIN_LOW)) {
+ dev_err(sharpsl_pm.dev, "Error: AC check failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ sharpsl_pm.flags |= SHARPSL_SUSPENDED;
+ flush_scheduled_work();
+
+ if (sharpsl_pm.charge_mode == CHRG_ON)
+ sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
+ else
+ sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG;
+
+ return 0;
+}
+
+static int sharpsl_pm_resume(struct platform_device *pdev)
+{
+ /* Clear the reset source indicators as they break the bootloader upon reboot */
+ RCSR = 0x0f;
+ sharpsl_average_clear();
+ sharpsl_pm.flags &= ~SHARPSL_APM_QUEUED;
+ sharpsl_pm.flags &= ~SHARPSL_SUSPENDED;
+
+ return 0;
+}
+
+static void corgi_goto_sleep(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state)
+{
+ dev_dbg(sharpsl_pm.dev, "Time is: %08x\n",RCNR);
+
+ dev_dbg(sharpsl_pm.dev, "Offline Charge Activate = %d\n",sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG);
+ /* not charging and AC-IN! */
+
+ if ((sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG) && (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN))) {
+ dev_dbg(sharpsl_pm.dev, "Activating Offline Charger...\n");
+ sharpsl_pm.charge_mode = CHRG_OFF;
+ sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG;
+ sharpsl_off_charge_battery();
+ }
+
+ sharpsl_pm.machinfo->presuspend();
+
+ PEDR = 0xffffffff; /* clear it */
+
+ sharpsl_pm.flags &= ~SHARPSL_ALARM_ACTIVE;
+ if ((sharpsl_pm.charge_mode == CHRG_ON) && ((alarm_enable && ((alarm_time - RCNR) > (SHARPSL_BATCHK_TIME_SUSPEND + 30))) || !alarm_enable)) {
+ RTSR &= RTSR_ALE;
+ RTAR = RCNR + SHARPSL_BATCHK_TIME_SUSPEND;
+ dev_dbg(sharpsl_pm.dev, "Charging alarm at: %08x\n",RTAR);
+ sharpsl_pm.flags |= SHARPSL_ALARM_ACTIVE;
+ } else if (alarm_enable) {
+ RTSR &= RTSR_ALE;
+ RTAR = alarm_time;
+ dev_dbg(sharpsl_pm.dev, "User alarm at: %08x\n",RTAR);
+ } else {
+ dev_dbg(sharpsl_pm.dev, "No alarms set.\n");
+ }
+
+ pxa_pm_enter(state);
+
+ sharpsl_pm.machinfo->postsuspend();
+
+ dev_dbg(sharpsl_pm.dev, "Corgi woken up from suspend: %08x\n",PEDR);
+}
+
+static int corgi_enter_suspend(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state)
+{
+ if (!sharpsl_pm.machinfo->should_wakeup(!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE) && alarm_enable) )
+ {
+ if (!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE)) {
+ dev_dbg(sharpsl_pm.dev, "No user triggered wakeup events and not charging. Strange. Suspend.\n");
+ corgi_goto_sleep(alarm_time, alarm_enable, state);
+ return 1;
+ }
+ if(sharpsl_off_charge_battery()) {
+ dev_dbg(sharpsl_pm.dev, "Charging. Suspend...\n");
+ corgi_goto_sleep(alarm_time, alarm_enable, state);
+ return 1;
+ }
+ dev_dbg(sharpsl_pm.dev, "User triggered wakeup in offline charger.\n");
+ }
+
+ if ((!sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_LOCK)) || (sharpsl_fatal_check() < 0) )
+ {
+ dev_err(sharpsl_pm.dev, "Fatal condition. Suspend.\n");
+ corgi_goto_sleep(alarm_time, alarm_enable, state);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int corgi_pxa_pm_enter(suspend_state_t state)
+{
+ unsigned long alarm_time = RTAR;
+ unsigned int alarm_status = ((RTSR & RTSR_ALE) != 0);
+
+ dev_dbg(sharpsl_pm.dev, "SharpSL suspending for first time.\n");
+
+ corgi_goto_sleep(alarm_time, alarm_status, state);
+
+ while (corgi_enter_suspend(alarm_time,alarm_status,state))
+ {}
+
+ dev_dbg(sharpsl_pm.dev, "SharpSL resuming...\n");
+
+ return 0;
+}
+#endif
+
+
+/*
+ * Check for fatal battery errors
+ * Fatal returns -1
+ */
+static int sharpsl_fatal_check(void)
+{
+ int buff[5], temp, i, acin;
+
+ dev_dbg(sharpsl_pm.dev, "sharpsl_fatal_check entered\n");
+
+ /* Check AC-Adapter */
+ acin = sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN);
+
+ if (acin && (sharpsl_pm.charge_mode == CHRG_ON)) {
+ sharpsl_pm.machinfo->charge(0);
+ udelay(100);
+ sharpsl_pm.machinfo->discharge(1); /* enable discharge */
+ mdelay(SHARPSL_WAIT_DISCHARGE_ON);
+ }
+
+ if (sharpsl_pm.machinfo->discharge1)
+ sharpsl_pm.machinfo->discharge1(1);
+
+ /* Check battery : check inserting battery ? */
+ for (i=0; i<5; i++) {
+ buff[i] = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT);
+ mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT);
+ }
+
+ if (sharpsl_pm.machinfo->discharge1)
+ sharpsl_pm.machinfo->discharge1(0);
+
+ if (acin && (sharpsl_pm.charge_mode == CHRG_ON)) {
+ udelay(100);
+ sharpsl_pm.machinfo->charge(1);
+ sharpsl_pm.machinfo->discharge(0);
+ }
+
+ temp = get_select_val(buff);
+ dev_dbg(sharpsl_pm.dev, "sharpsl_fatal_check: acin: %d, discharge voltage: %d, no discharge: %d\n", acin, temp, sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT));
+
+ if ((acin && (temp < SHARPSL_FATAL_ACIN_VOLT)) ||
+ (!acin && (temp < SHARPSL_FATAL_NOACIN_VOLT)))
+ return -1;
+ return 0;
+}
+
+static int sharpsl_off_charge_error(void)
+{
+ dev_err(sharpsl_pm.dev, "Offline Charger: Error occured.\n");
+ sharpsl_pm.machinfo->charge(0);
+ sharpsl_pm_led(SHARPSL_LED_ERROR);
+ sharpsl_pm.charge_mode = CHRG_ERROR;
+ return 1;
+}
+
+/*
+ * Charging Control while suspended
+ * Return 1 - go straight to sleep
+ * Return 0 - sleep or wakeup depending on other factors
+ */
+static int sharpsl_off_charge_battery(void)
+{
+ int time;
+
+ dev_dbg(sharpsl_pm.dev, "Charge Mode: %d\n", sharpsl_pm.charge_mode);
+
+ if (sharpsl_pm.charge_mode == CHRG_OFF) {
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 1\n");
+
+ /* AC Check */
+ if ((sharpsl_ac_check() < 0) || (sharpsl_check_battery_temp() < 0))
+ return sharpsl_off_charge_error();
+
+ /* Start Charging */
+ sharpsl_pm_led(SHARPSL_LED_ON);
+ sharpsl_pm.machinfo->charge(0);
+ mdelay(SHARPSL_CHARGE_WAIT_TIME);
+ sharpsl_pm.machinfo->charge(1);
+
+ sharpsl_pm.charge_mode = CHRG_ON;
+ sharpsl_pm.full_count = 0;
+
+ return 1;
+ } else if (sharpsl_pm.charge_mode != CHRG_ON) {
+ return 1;
+ }
+
+ if (sharpsl_pm.full_count == 0) {
+ int time;
+
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 2\n");
+
+ if ((sharpsl_check_battery_temp() < 0) || (sharpsl_check_battery_voltage() < 0))
+ return sharpsl_off_charge_error();
+
+ sharpsl_pm.machinfo->charge(0);
+ mdelay(SHARPSL_CHARGE_WAIT_TIME);
+ sharpsl_pm.machinfo->charge(1);
+ sharpsl_pm.charge_mode = CHRG_ON;
+
+ mdelay(SHARPSL_CHARGE_CO_CHECK_TIME);
+
+ time = RCNR;
+ while(1) {
+ /* Check if any wakeup event had occured */
+ if (sharpsl_pm.machinfo->charger_wakeup() != 0)
+ return 0;
+ /* Check for timeout */
+ if ((RCNR - time) > SHARPSL_WAIT_CO_TIME)
+ return 1;
+ if (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_CHRGFULL)) {
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Charge full occured. Retrying to check\n");
+ sharpsl_pm.full_count++;
+ sharpsl_pm.machinfo->charge(0);
+ mdelay(SHARPSL_CHARGE_WAIT_TIME);
+ sharpsl_pm.machinfo->charge(1);
+ return 1;
+ }
+ }
+ }
+
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 3\n");
+
+ mdelay(SHARPSL_CHARGE_CO_CHECK_TIME);
+
+ time = RCNR;
+ while(1) {
+ /* Check if any wakeup event had occured */
+ if (sharpsl_pm.machinfo->charger_wakeup() != 0)
+ return 0;
+ /* Check for timeout */
+ if ((RCNR-time) > SHARPSL_WAIT_CO_TIME) {
+ if (sharpsl_pm.full_count > SHARPSL_CHARGE_RETRY_CNT) {
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Not charged sufficiently. Retrying.\n");
+ sharpsl_pm.full_count = 0;
+ }
+ sharpsl_pm.full_count++;
+ return 1;
+ }
+ if (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_CHRGFULL)) {
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Charging complete.\n");
+ sharpsl_pm_led(SHARPSL_LED_OFF);
+ sharpsl_pm.machinfo->charge(0);
+ sharpsl_pm.charge_mode = CHRG_DONE;
+ return 1;
+ }
+ }
+}
+
+
+static ssize_t battery_percentage_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n",sharpsl_pm.battstat.mainbat_percent);
+}
+
+static ssize_t battery_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n",sharpsl_pm.battstat.mainbat_voltage);
+}
+
+static DEVICE_ATTR(battery_percentage, 0444, battery_percentage_show, NULL);
+static DEVICE_ATTR(battery_voltage, 0444, battery_voltage_show, NULL);
+
+extern void (*apm_get_power_status)(struct apm_power_info *);
+
+static void sharpsl_apm_get_power_status(struct apm_power_info *info)
+{
+ info->ac_line_status = sharpsl_pm.battstat.ac_status;
+
+ if (sharpsl_pm.charge_mode == CHRG_ON)
+ info->battery_status = APM_BATTERY_STATUS_CHARGING;
+ else
+ info->battery_status = sharpsl_pm.battstat.mainbat_status;
+
+ info->battery_flag = (1 << info->battery_status);
+ info->battery_life = sharpsl_pm.battstat.mainbat_percent;
+}
+
+static struct pm_ops sharpsl_pm_ops = {
+ .pm_disk_mode = PM_DISK_FIRMWARE,
+ .prepare = pxa_pm_prepare,
+ .enter = corgi_pxa_pm_enter,
+ .finish = pxa_pm_finish,
+};
+
+static int __init sharpsl_pm_probe(struct platform_device *pdev)
+{
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
+
+ sharpsl_pm.dev = &pdev->dev;
+ sharpsl_pm.machinfo = pdev->dev.platform_data;
+ sharpsl_pm.charge_mode = CHRG_OFF;
+ sharpsl_pm.flags = 0;
+
+ init_timer(&sharpsl_pm.ac_timer);
+ sharpsl_pm.ac_timer.function = sharpsl_ac_timer;
+
+ init_timer(&sharpsl_pm.chrg_full_timer);
+ sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer;
+
+ sharpsl_pm.machinfo->init();
+
+ device_create_file(&pdev->dev, &dev_attr_battery_percentage);
+ device_create_file(&pdev->dev, &dev_attr_battery_voltage);
+
+ apm_get_power_status = sharpsl_apm_get_power_status;
+
+ pm_set_ops(&sharpsl_pm_ops);
+
+ mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
+
+ return 0;
+}
+
+static int sharpsl_pm_remove(struct platform_device *pdev)
+{
+ pm_set_ops(NULL);
+
+ device_remove_file(&pdev->dev, &dev_attr_battery_percentage);
+ device_remove_file(&pdev->dev, &dev_attr_battery_voltage);
+
+ sharpsl_pm.machinfo->exit();
+
+ del_timer_sync(&sharpsl_pm.chrg_full_timer);
+ del_timer_sync(&sharpsl_pm.ac_timer);
+
+ return 0;
+}
+
+static struct platform_driver sharpsl_pm_driver = {
+ .probe = sharpsl_pm_probe,
+ .remove = sharpsl_pm_remove,
+ .suspend = sharpsl_pm_suspend,
+ .resume = sharpsl_pm_resume,
+ .driver = {
+ .name = "sharpsl-pm",
+ },
+};
+
+static int __devinit sharpsl_pm_init(void)
+{
+ return platform_driver_register(&sharpsl_pm_driver);
+}
+
+static void sharpsl_pm_exit(void)
+{
+ platform_driver_unregister(&sharpsl_pm_driver);
+}
+
+late_initcall(sharpsl_pm_init);
+module_exit(sharpsl_pm_exit);
diff --git a/arch/arm/configs/assabet_defconfig b/arch/arm/configs/assabet_defconfig
index ccbb4c0d58c..089c9d59840 100644
--- a/arch/arm/configs/assabet_defconfig
+++ b/arch/arm/configs/assabet_defconfig
@@ -63,7 +63,6 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/at91rm9200dk_defconfig b/arch/arm/configs/at91rm9200dk_defconfig
new file mode 100644
index 00000000000..5cdd13acf8f
--- /dev/null
+++ b/arch/arm/configs/at91rm9200dk_defconfig
@@ -0,0 +1,1009 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15
+# Mon Jan 9 20:54:30 2006
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# System Type
+#
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_AAEC2000 is not set
+CONFIG_ARCH_AT91RM9200=y
+
+#
+# AT91RM9200 Implementations
+#
+
+#
+# AT91RM9200 Board Type
+#
+CONFIG_ARCH_AT91RM9200DK=y
+# CONFIG_MACH_AT91RM9200EK is not set
+# CONFIG_MACH_CSB337 is not set
+# CONFIG_MACH_CSB637 is not set
+# CONFIG_MACH_CARMEVA is not set
+# CONFIG_MACH_KB9200 is not set
+# CONFIG_MACH_ATEB9200 is not set
+
+#
+# AT91RM9200 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+
+#
+# Bus support
+#
+CONFIG_ISA_DMA_API=y
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+CONFIG_PCCARD=y
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_PCMCIA=y
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_PCMCIA_IOCTL=y
+
+#
+# PC-card bridges
+#
+CONFIG_AT91_CF=y
+
+#
+# Kernel Features
+#
+# CONFIG_PREEMPT is not set
+# CONFIG_NO_IDLE_HZ is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_LEDS=y
+CONFIG_LEDS_TIMER=y
+# CONFIG_LEDS_CPU is not set
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20410000,3145728 root=/dev/ram0 rw"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_AMDSTD_RETRY=0
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x10000000
+CONFIG_MTD_PHYSMAP_LEN=0x200000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_IMPA7 is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_AT91_DATAFLASH=y
+CONFIG_MTD_AT91_DATAFLASH_CARD=y
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_ARM_AT91_ETHER=y
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# PCMCIA network device support
+#
+# CONFIG_NET_PCMCIA is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_AT91=y
+CONFIG_SERIAL_AT91_CONSOLE=y
+# CONFIG_SERIAL_AT91_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT91_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+CONFIG_AT91_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_AT91_SPI=y
+CONFIG_AT91_SPIDEV=y
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_AT91=y
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+# CONFIG_USB_STORAGE is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_ITMTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+
+#
+# Video4Linux support is needed for USB Multimedia device support
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA2XX is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+CONFIG_USB_GADGET_AT91=y
+CONFIG_USB_AT91=y
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+
+#
+# MMC/SD Card support
+#
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_BLOCK=y
+# CONFIG_MMC_WBSD is not set
+CONFIG_MMC_AT91RM9200=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_WAITQ is not set
+# CONFIG_DEBUG_ERRORS is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/configs/at91rm9200ek_defconfig b/arch/arm/configs/at91rm9200ek_defconfig
new file mode 100644
index 00000000000..20838ccf1da
--- /dev/null
+++ b/arch/arm/configs/at91rm9200ek_defconfig
@@ -0,0 +1,998 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15
+# Mon Jan 9 20:57:31 2006
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# System Type
+#
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_AAEC2000 is not set
+CONFIG_ARCH_AT91RM9200=y
+
+#
+# AT91RM9200 Implementations
+#
+
+#
+# AT91RM9200 Board Type
+#
+# CONFIG_ARCH_AT91RM9200DK is not set
+CONFIG_MACH_AT91RM9200EK=y
+# CONFIG_MACH_CSB337 is not set
+# CONFIG_MACH_CSB637 is not set
+# CONFIG_MACH_CARMEVA is not set
+# CONFIG_MACH_KB9200 is not set
+# CONFIG_MACH_ATEB9200 is not set
+
+#
+# AT91RM9200 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+
+#
+# Bus support
+#
+CONFIG_ISA_DMA_API=y
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_PREEMPT is not set
+# CONFIG_NO_IDLE_HZ is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_LEDS=y
+CONFIG_LEDS_TIMER=y
+CONFIG_LEDS_CPU=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20410000,3145728 root=/dev/ram0 rw"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_AMDSTD_RETRY=0
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x10000000
+CONFIG_MTD_PHYSMAP_LEN=0x800000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_IMPA7 is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_AT91_DATAFLASH=y
+CONFIG_MTD_AT91_DATAFLASH_CARD=y
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_ARM_AT91_ETHER=y
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_AT91=y
+CONFIG_SERIAL_AT91_CONSOLE=y
+# CONFIG_SERIAL_AT91_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT91_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+CONFIG_AT91_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_AT91_SPI=y
+CONFIG_AT91_SPIDEV=y
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_AT91=y
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+CONFIG_FB_S1D13XXX=y
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+
+#
+# Logo configuration
+#
+# CONFIG_LOGO is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+# CONFIG_USB_STORAGE is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_ITMTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+
+#
+# Video4Linux support is needed for USB Multimedia device support
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA2XX is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+CONFIG_USB_GADGET_AT91=y
+CONFIG_USB_AT91=y
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+
+#
+# MMC/SD Card support
+#
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_BLOCK=y
+# CONFIG_MMC_WBSD is not set
+CONFIG_MMC_AT91RM9200=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_WAITQ is not set
+# CONFIG_DEBUG_ERRORS is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index 5d92af975d8..cfe6bd8e81c 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -66,7 +66,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/bast_defconfig b/arch/arm/configs/bast_defconfig
index 35e3a99bcbb..6886001b536 100644
--- a/arch/arm/configs/bast_defconfig
+++ b/arch/arm/configs/bast_defconfig
@@ -64,7 +64,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/cerfcube_defconfig b/arch/arm/configs/cerfcube_defconfig
index d8fe0f40408..f81a60005cd 100644
--- a/arch/arm/configs/cerfcube_defconfig
+++ b/arch/arm/configs/cerfcube_defconfig
@@ -65,7 +65,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/clps7500_defconfig b/arch/arm/configs/clps7500_defconfig
index 90875837140..af9ae538913 100644
--- a/arch/arm/configs/clps7500_defconfig
+++ b/arch/arm/configs/clps7500_defconfig
@@ -57,7 +57,6 @@ CONFIG_ARCH_CLPS7500=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig
index 40dfe07a8bc..15468a0cf70 100644
--- a/arch/arm/configs/collie_defconfig
+++ b/arch/arm/configs/collie_defconfig
@@ -71,7 +71,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 06229026f78..3c3461e8339 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -87,7 +87,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/csb337_defconfig b/arch/arm/configs/csb337_defconfig
new file mode 100644
index 00000000000..885a3184830
--- /dev/null
+++ b/arch/arm/configs/csb337_defconfig
@@ -0,0 +1,1136 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15
+# Mon Jan 9 21:51:31 2006
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# System Type
+#
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_AAEC2000 is not set
+CONFIG_ARCH_AT91RM9200=y
+
+#
+# AT91RM9200 Implementations
+#
+
+#
+# AT91RM9200 Board Type
+#
+# CONFIG_ARCH_AT91RM9200DK is not set
+# CONFIG_MACH_AT91RM9200EK is not set
+CONFIG_MACH_CSB337=y
+# CONFIG_MACH_CSB637 is not set
+# CONFIG_MACH_CARMEVA is not set
+# CONFIG_MACH_KB9200 is not set
+# CONFIG_MACH_ATEB9200 is not set
+
+#
+# AT91RM9200 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+
+#
+# Bus support
+#
+CONFIG_ISA_DMA_API=y
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+CONFIG_PCCARD=y
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_PCMCIA=y
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_PCMCIA_IOCTL=y
+
+#
+# PC-card bridges
+#
+CONFIG_AT91_CF=y
+
+#
+# Kernel Features
+#
+# CONFIG_PREEMPT is not set
+# CONFIG_NO_IDLE_HZ is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_LEDS=y
+CONFIG_LEDS_TIMER=y
+CONFIG_LEDS_CPU=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=32M console=ttyS0,38400 initrd=0x20410000,3145728 root=/dev/ram0 rw"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+CONFIG_MTD_CSB337=y
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_AT91_DATAFLASH is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# PCMCIA SCSI adapter support
+#
+# CONFIG_PCMCIA_AHA152X is not set
+# CONFIG_PCMCIA_FDOMAIN is not set
+# CONFIG_PCMCIA_NINJA_SCSI is not set
+# CONFIG_PCMCIA_QLOGIC is not set
+# CONFIG_PCMCIA_SYM53C500 is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_ARM_AT91_ETHER=y
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# PCMCIA network device support
+#
+# CONFIG_NET_PCMCIA is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_AT91=y
+CONFIG_SERIAL_AT91_CONSOLE=y
+# CONFIG_SERIAL_AT91_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT91_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_RTC=y
+# CONFIG_AT91_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_AT91_SPI=y
+CONFIG_AT91_SPIDEV=y
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_AT91=y
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_ITMTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+
+#
+# Video4Linux support is needed for USB Multimedia device support
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+CONFIG_USB_SERIAL_GENERIC=y
+# CONFIG_USB_SERIAL_AIRPRIME is not set
+# CONFIG_USB_SERIAL_ANYDATA is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP2101 is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+CONFIG_USB_SERIAL_FTDI_SIO=y
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+CONFIG_USB_SERIAL_KEYSPAN=y
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+CONFIG_USB_SERIAL_MCT_U232=y
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+CONFIG_USB_EZUSB=y
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA2XX is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+CONFIG_USB_GADGET_AT91=y
+CONFIG_USB_AT91=y
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+
+#
+# MMC/SD Card support
+#
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_BLOCK=y
+# CONFIG_MMC_WBSD is not set
+CONFIG_MMC_AT91RM9200=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_WAITQ is not set
+# CONFIG_DEBUG_ERRORS is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/configs/csb637_defconfig b/arch/arm/configs/csb637_defconfig
new file mode 100644
index 00000000000..95a96a5462a
--- /dev/null
+++ b/arch/arm/configs/csb637_defconfig
@@ -0,0 +1,1116 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15
+# Mon Jan 9 21:52:00 2006
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# System Type
+#
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_AAEC2000 is not set
+CONFIG_ARCH_AT91RM9200=y
+
+#
+# AT91RM9200 Implementations
+#
+
+#
+# AT91RM9200 Board Type
+#
+# CONFIG_ARCH_AT91RM9200DK is not set
+# CONFIG_MACH_AT91RM9200EK is not set
+# CONFIG_MACH_CSB337 is not set
+CONFIG_MACH_CSB637=y
+# CONFIG_MACH_CARMEVA is not set
+# CONFIG_MACH_KB9200 is not set
+# CONFIG_MACH_ATEB9200 is not set
+
+#
+# AT91RM9200 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+
+#
+# Bus support
+#
+CONFIG_ISA_DMA_API=y
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+CONFIG_PCCARD=y
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_PCMCIA=y
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_PCMCIA_IOCTL=y
+
+#
+# PC-card bridges
+#
+CONFIG_AT91_CF=y
+
+#
+# Kernel Features
+#
+# CONFIG_PREEMPT is not set
+# CONFIG_NO_IDLE_HZ is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_LEDS=y
+CONFIG_LEDS_TIMER=y
+CONFIG_LEDS_CPU=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=32M console=ttyS0,38400 initrd=0x20410000,3145728 root=/dev/ram0 rw"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+CONFIG_MTD_CSB637=y
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_AT91_DATAFLASH is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# PCMCIA SCSI adapter support
+#
+# CONFIG_PCMCIA_AHA152X is not set
+# CONFIG_PCMCIA_FDOMAIN is not set
+# CONFIG_PCMCIA_NINJA_SCSI is not set
+# CONFIG_PCMCIA_QLOGIC is not set
+# CONFIG_PCMCIA_SYM53C500 is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_ARM_AT91_ETHER=y
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# PCMCIA network device support
+#
+# CONFIG_NET_PCMCIA is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_AT91=y
+CONFIG_SERIAL_AT91_CONSOLE=y
+# CONFIG_SERIAL_AT91_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT91_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_RTC=y
+# CONFIG_AT91_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_AT91_SPI=y
+CONFIG_AT91_SPIDEV=y
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_AT91=y
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_ITMTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+
+#
+# Video4Linux support is needed for USB Multimedia device support
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+CONFIG_USB_SERIAL_GENERIC=y
+# CONFIG_USB_SERIAL_AIRPRIME is not set
+# CONFIG_USB_SERIAL_ANYDATA is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP2101 is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+CONFIG_USB_SERIAL_FTDI_SIO=y
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+CONFIG_USB_SERIAL_KEYSPAN=y
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+CONFIG_USB_SERIAL_MCT_U232=y
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+CONFIG_USB_EZUSB=y
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_WAITQ is not set
+# CONFIG_DEBUG_ERRORS is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/configs/ebsa110_defconfig b/arch/arm/configs/ebsa110_defconfig
index 6f61929b97a..afcfff6140f 100644
--- a/arch/arm/configs/ebsa110_defconfig
+++ b/arch/arm/configs/ebsa110_defconfig
@@ -63,7 +63,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
CONFIG_ARCH_EBSA110=y
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/edb7211_defconfig b/arch/arm/configs/edb7211_defconfig
index 78b08ed4d5f..6ba7355ff85 100644
--- a/arch/arm/configs/edb7211_defconfig
+++ b/arch/arm/configs/edb7211_defconfig
@@ -57,7 +57,6 @@ CONFIG_BASE_SMALL=0
CONFIG_ARCH_CLPS711X=y
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/enp2611_defconfig b/arch/arm/configs/enp2611_defconfig
index fd7c0042bcc..9592e3925c7 100644
--- a/arch/arm/configs/enp2611_defconfig
+++ b/arch/arm/configs/enp2611_defconfig
@@ -86,7 +86,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/ep80219_defconfig b/arch/arm/configs/ep80219_defconfig
index 96342afa9c5..fbe312e757c 100644
--- a/arch/arm/configs/ep80219_defconfig
+++ b/arch/arm/configs/ep80219_defconfig
@@ -64,7 +64,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
CONFIG_ARCH_IOP3XX=y
diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig
index 9737c485072..2a612d23120 100644
--- a/arch/arm/configs/footbridge_defconfig
+++ b/arch/arm/configs/footbridge_defconfig
@@ -63,7 +63,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
CONFIG_ARCH_FOOTBRIDGE=y
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/fortunet_defconfig b/arch/arm/configs/fortunet_defconfig
index b6f688d850d..65dc73a88c4 100644
--- a/arch/arm/configs/fortunet_defconfig
+++ b/arch/arm/configs/fortunet_defconfig
@@ -57,7 +57,6 @@ CONFIG_BASE_SMALL=0
CONFIG_ARCH_CLPS711X=y
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig
index b9de07de80f..7a0da0b7fac 100644
--- a/arch/arm/configs/h3600_defconfig
+++ b/arch/arm/configs/h3600_defconfig
@@ -65,7 +65,6 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/h7201_defconfig b/arch/arm/configs/h7201_defconfig
index 39c13a35454..116920aecef 100644
--- a/arch/arm/configs/h7201_defconfig
+++ b/arch/arm/configs/h7201_defconfig
@@ -60,7 +60,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/h7202_defconfig b/arch/arm/configs/h7202_defconfig
index fbf5c244c69..9d62ed16bf5 100644
--- a/arch/arm/configs/h7202_defconfig
+++ b/arch/arm/configs/h7202_defconfig
@@ -63,7 +63,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/hackkit_defconfig b/arch/arm/configs/hackkit_defconfig
index fb41a36a5a6..a45b57582b8 100644
--- a/arch/arm/configs/hackkit_defconfig
+++ b/arch/arm/configs/hackkit_defconfig
@@ -66,7 +66,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index 27ee7682525..d1ba7fdde81 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -65,7 +65,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
CONFIG_ARCH_INTEGRATOR=y
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/iq31244_defconfig b/arch/arm/configs/iq31244_defconfig
index e71443b9739..c07628ceaf0 100644
--- a/arch/arm/configs/iq31244_defconfig
+++ b/arch/arm/configs/iq31244_defconfig
@@ -65,7 +65,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
CONFIG_ARCH_IOP3XX=y
diff --git a/arch/arm/configs/iq80321_defconfig b/arch/arm/configs/iq80321_defconfig
index ab5ad23b27d..18fa1615fdf 100644
--- a/arch/arm/configs/iq80321_defconfig
+++ b/arch/arm/configs/iq80321_defconfig
@@ -64,7 +64,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
CONFIG_ARCH_IOP3XX=y
diff --git a/arch/arm/configs/iq80331_defconfig b/arch/arm/configs/iq80331_defconfig
index bb536133ef8..f50035de1ff 100644
--- a/arch/arm/configs/iq80331_defconfig
+++ b/arch/arm/configs/iq80331_defconfig
@@ -64,7 +64,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
CONFIG_ARCH_IOP3XX=y
diff --git a/arch/arm/configs/iq80332_defconfig b/arch/arm/configs/iq80332_defconfig
index 305f01f3a72..18b3f372ed6 100644
--- a/arch/arm/configs/iq80332_defconfig
+++ b/arch/arm/configs/iq80332_defconfig
@@ -64,7 +64,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
CONFIG_ARCH_IOP3XX=y
diff --git a/arch/arm/configs/ixdp2400_defconfig b/arch/arm/configs/ixdp2400_defconfig
index e6a4d2656fe..d9d6bb86a6f 100644
--- a/arch/arm/configs/ixdp2400_defconfig
+++ b/arch/arm/configs/ixdp2400_defconfig
@@ -86,7 +86,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/ixdp2401_defconfig b/arch/arm/configs/ixdp2401_defconfig
index 5572cf95d5f..2dc9d499c7d 100644
--- a/arch/arm/configs/ixdp2401_defconfig
+++ b/arch/arm/configs/ixdp2401_defconfig
@@ -86,7 +86,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/ixdp2800_defconfig b/arch/arm/configs/ixdp2800_defconfig
index 0fddbde8583..4248123815e 100644
--- a/arch/arm/configs/ixdp2800_defconfig
+++ b/arch/arm/configs/ixdp2800_defconfig
@@ -86,7 +86,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/ixdp2801_defconfig b/arch/arm/configs/ixdp2801_defconfig
index 89b9aa06aa9..ea8f4b478fa 100644
--- a/arch/arm/configs/ixdp2801_defconfig
+++ b/arch/arm/configs/ixdp2801_defconfig
@@ -86,7 +86,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index f74c926beb4..4975b914f92 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.14-rc1-git5
-# Tue Sep 20 17:26:28 2005
+# Linux kernel version: 2.6.15
+# Tue Jan 3 03:20:40 2006
#
CONFIG_ARM=y
CONFIG_MMU=y
@@ -33,6 +33,7 @@ CONFIG_SYSCTL=y
CONFIG_KOBJECT_UEVENT=y
# CONFIG_IKCONFIG is not set
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
@@ -42,7 +43,6 @@ CONFIG_BUG=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SHMEM=y
CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
@@ -62,13 +62,29 @@ CONFIG_MODVERSIONS=y
CONFIG_KMOD=y
#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
# System Type
#
# CONFIG_ARCH_CLPS7500 is not set
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
@@ -83,6 +99,7 @@ CONFIG_ARCH_IXP4XX=y
# CONFIG_ARCH_LH7A40X is not set
# CONFIG_ARCH_OMAP is not set
# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_IMX is not set
# CONFIG_ARCH_H720X is not set
# CONFIG_ARCH_AAEC2000 is not set
@@ -102,6 +119,7 @@ CONFIG_MACH_IXDPG425=y
CONFIG_MACH_IXDP465=y
CONFIG_ARCH_IXCDP1100=y
CONFIG_ARCH_PRPMC1100=y
+CONFIG_MACH_NAS100D=y
CONFIG_ARCH_IXDP4XX=y
CONFIG_CPU_IXP46X=y
# CONFIG_MACH_GTWX5715 is not set
@@ -155,6 +173,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
CONFIG_ALIGNMENT_TRAP=y
#
@@ -173,6 +192,7 @@ CONFIG_CMDLINE="console=ttyS0,115200 ip=bootp root=/dev/nfs"
# At least one emulation must be selected
#
CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
# CONFIG_FPE_FASTFPE is not set
#
@@ -187,6 +207,8 @@ CONFIG_BINFMT_ELF=y
# Power management options
#
CONFIG_PM=y
+CONFIG_PM_LEGACY=y
+# CONFIG_PM_DEBUG is not set
CONFIG_APM=y
#
@@ -271,6 +293,10 @@ CONFIG_IP_VS_SH=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
# CONFIG_NETFILTER_NETLINK is not set
#
@@ -286,6 +312,7 @@ CONFIG_IP_NF_IRC=m
# CONFIG_IP_NF_NETBIOS_NS is not set
# CONFIG_IP_NF_TFTP is not set
# CONFIG_IP_NF_AMANDA is not set
+# CONFIG_IP_NF_PPTP is not set
CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_LIMIT=m
@@ -319,6 +346,7 @@ CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_TARGET_TCPMSS=m
+# CONFIG_IP_NF_TARGET_NFQUEUE is not set
CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_NAT_NEEDED=y
CONFIG_IP_NF_TARGET_MASQUERADE=m
@@ -380,10 +408,18 @@ CONFIG_ECONET=m
CONFIG_ECONET_AUNUDP=y
CONFIG_ECONET_NATIVE=y
CONFIG_WAN_ROUTER=m
+
+#
+# QoS and/or fair queueing
+#
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CLK_JIFFIES=y
# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
# CONFIG_NET_SCH_CLK_CPU is not set
+
+#
+# Queueing/Scheduling
+#
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
# CONFIG_NET_SCH_HFSC is not set
@@ -397,8 +433,10 @@ CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
# CONFIG_NET_SCH_NETEM is not set
CONFIG_NET_SCH_INGRESS=m
-CONFIG_NET_QOS=y
-CONFIG_NET_ESTIMATOR=y
+
+#
+# Classification
+#
CONFIG_NET_CLS=y
# CONFIG_NET_CLS_BASIC is not set
CONFIG_NET_CLS_TCINDEX=m
@@ -407,13 +445,14 @@ CONFIG_NET_CLS_ROUTE=y
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
# CONFIG_CLS_U32_PERF is not set
-# CONFIG_NET_CLS_IND is not set
# CONFIG_CLS_U32_MARK is not set
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
# CONFIG_NET_EMATCH is not set
# CONFIG_NET_CLS_ACT is not set
CONFIG_NET_CLS_POLICE=y
+# CONFIG_NET_CLS_IND is not set
+CONFIG_NET_ESTIMATOR=y
#
# Network testing
@@ -437,6 +476,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_DEBUG_DRIVER is not set
#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
# Memory Technology Devices (MTD)
#
CONFIG_MTD=y
@@ -458,6 +502,7 @@ CONFIG_MTD_BLOCK=y
# CONFIG_FTL is not set
# CONFIG_NFTL is not set
# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
#
# RAM/ROM/Flash chip drivers
@@ -492,7 +537,6 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
# CONFIG_MTD_PHYSMAP is not set
# CONFIG_MTD_ARM_INTEGRATOR is not set
CONFIG_MTD_IXP4XX=y
-# CONFIG_MTD_EDB7312 is not set
# CONFIG_MTD_PCI is not set
# CONFIG_MTD_PLATRAM is not set
@@ -523,6 +567,11 @@ CONFIG_MTD_NAND_IDS=m
# CONFIG_MTD_NAND_NANDSIM is not set
#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
# Parallel port support
#
# CONFIG_PARPORT is not set
@@ -548,14 +597,6 @@ CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_ATA_OVER_ETH is not set
#
@@ -668,6 +709,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_HAPPYMEAL is not set
# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_SMC91X is not set
# CONFIG_DM9000 is not set
@@ -739,6 +781,7 @@ CONFIG_NET_RADIO=y
#
# Wireless 802.11b ISA/PCI cards support
#
+# CONFIG_AIRO is not set
CONFIG_HERMES=y
# CONFIG_PLX_HERMES is not set
# CONFIG_TMD_HERMES is not set
@@ -782,6 +825,7 @@ CONFIG_WAN_ROUTER_DRIVERS=y
#
# ATM drivers
#
+# CONFIG_ATM_DUMMY is not set
CONFIG_ATM_TCP=m
# CONFIG_ATM_LANAI is not set
# CONFIG_ATM_ENI is not set
@@ -902,6 +946,7 @@ CONFIG_IXP4XX_WATCHDOG=y
# TPM devices
#
# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
#
# I2C support
@@ -954,6 +999,7 @@ CONFIG_SENSORS_EEPROM=y
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_RTC8564 is not set
# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
@@ -1036,6 +1082,10 @@ CONFIG_USB_ARCH_HAS_OHCI=y
# CONFIG_USB is not set
#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
# USB Gadget Support
#
# CONFIG_USB_GADGET is not set
@@ -1110,6 +1160,7 @@ CONFIG_RAMFS=y
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=0
CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_SUMMARY is not set
# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
CONFIG_JFFS2_ZLIB=y
CONFIG_JFFS2_RTIME=y
@@ -1190,7 +1241,9 @@ CONFIG_DETECT_SOFTLOCKUP=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_DEBUG_USER is not set
# CONFIG_DEBUG_WAITQ is not set
CONFIG_DEBUG_ERRORS=y
diff --git a/arch/arm/configs/jornada720_defconfig b/arch/arm/configs/jornada720_defconfig
index b88aeba82bc..ad1048db96f 100644
--- a/arch/arm/configs/jornada720_defconfig
+++ b/arch/arm/configs/jornada720_defconfig
@@ -63,7 +63,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/lart_defconfig b/arch/arm/configs/lart_defconfig
index 7033829ed14..c3a93284416 100644
--- a/arch/arm/configs/lart_defconfig
+++ b/arch/arm/configs/lart_defconfig
@@ -62,7 +62,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/lpd7a400_defconfig b/arch/arm/configs/lpd7a400_defconfig
index d64706d3ff3..67eaa26c264 100644
--- a/arch/arm/configs/lpd7a400_defconfig
+++ b/arch/arm/configs/lpd7a400_defconfig
@@ -60,7 +60,6 @@ CONFIG_BASE_SMALL=0
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/lpd7a404_defconfig b/arch/arm/configs/lpd7a404_defconfig
index 87cbedfb303..208d591ebfc 100644
--- a/arch/arm/configs/lpd7a404_defconfig
+++ b/arch/arm/configs/lpd7a404_defconfig
@@ -60,7 +60,6 @@ CONFIG_BASE_SMALL=0
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/lubbock_defconfig b/arch/arm/configs/lubbock_defconfig
index 4bc8717c6f5..81daadcbe0b 100644
--- a/arch/arm/configs/lubbock_defconfig
+++ b/arch/arm/configs/lubbock_defconfig
@@ -63,7 +63,6 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/lusl7200_defconfig b/arch/arm/configs/lusl7200_defconfig
index 3ca64cabc92..42f6a77bc3c 100644
--- a/arch/arm/configs/lusl7200_defconfig
+++ b/arch/arm/configs/lusl7200_defconfig
@@ -62,7 +62,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/mainstone_defconfig b/arch/arm/configs/mainstone_defconfig
index 153d68594be..b112bd75bda 100644
--- a/arch/arm/configs/mainstone_defconfig
+++ b/arch/arm/configs/mainstone_defconfig
@@ -63,7 +63,6 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/mx1ads_defconfig b/arch/arm/configs/mx1ads_defconfig
index 6517d167acf..d16f6cd6e03 100644
--- a/arch/arm/configs/mx1ads_defconfig
+++ b/arch/arm/configs/mx1ads_defconfig
@@ -63,7 +63,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/neponset_defconfig b/arch/arm/configs/neponset_defconfig
index 7fb1f7c7bf4..3d35255c64e 100644
--- a/arch/arm/configs/neponset_defconfig
+++ b/arch/arm/configs/neponset_defconfig
@@ -65,7 +65,6 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/netwinder_defconfig b/arch/arm/configs/netwinder_defconfig
index 6e81acf94c2..2cae1ead9f9 100644
--- a/arch/arm/configs/netwinder_defconfig
+++ b/arch/arm/configs/netwinder_defconfig
@@ -58,7 +58,6 @@ CONFIG_BASE_SMALL=0
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
CONFIG_ARCH_FOOTBRIDGE=y
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/omap_h2_1610_defconfig b/arch/arm/configs/omap_h2_1610_defconfig
index 529f0f72e1e..ee3ecbd9002 100644
--- a/arch/arm/configs/omap_h2_1610_defconfig
+++ b/arch/arm/configs/omap_h2_1610_defconfig
@@ -85,7 +85,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/pleb_defconfig b/arch/arm/configs/pleb_defconfig
index 10fec890578..24e8bdd4cb9 100644
--- a/arch/arm/configs/pleb_defconfig
+++ b/arch/arm/configs/pleb_defconfig
@@ -63,7 +63,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/pxa255-idp_defconfig b/arch/arm/configs/pxa255-idp_defconfig
index 21c327883d8..b71d31a4bb5 100644
--- a/arch/arm/configs/pxa255-idp_defconfig
+++ b/arch/arm/configs/pxa255-idp_defconfig
@@ -63,7 +63,6 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/realview_defconfig b/arch/arm/configs/realview_defconfig
index 0485b2f1cc2..3f1ec4e304f 100644
--- a/arch/arm/configs/realview_defconfig
+++ b/arch/arm/configs/realview_defconfig
@@ -65,7 +65,6 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig
index 19184c1010a..b498afdc03b 100644
--- a/arch/arm/configs/rpc_defconfig
+++ b/arch/arm/configs/rpc_defconfig
@@ -66,7 +66,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 3f97590c91f..33f31080a98 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -85,7 +85,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/shannon_defconfig b/arch/arm/configs/shannon_defconfig
index e3facc4fe79..d052c8f8051 100644
--- a/arch/arm/configs/shannon_defconfig
+++ b/arch/arm/configs/shannon_defconfig
@@ -62,7 +62,6 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/shark_defconfig b/arch/arm/configs/shark_defconfig
index 271823f0d70..c48d1706226 100644
--- a/arch/arm/configs/shark_defconfig
+++ b/arch/arm/configs/shark_defconfig
@@ -66,7 +66,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/simpad_defconfig b/arch/arm/configs/simpad_defconfig
index 5373eeb7d57..2e5a616cc98 100644
--- a/arch/arm/configs/simpad_defconfig
+++ b/arch/arm/configs/simpad_defconfig
@@ -64,7 +64,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/smdk2410_defconfig b/arch/arm/configs/smdk2410_defconfig
index 2c60865fda1..4d123d33c7d 100644
--- a/arch/arm/configs/smdk2410_defconfig
+++ b/arch/arm/configs/smdk2410_defconfig
@@ -58,7 +58,6 @@ CONFIG_BASE_SMALL=0
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index 9895539533d..d1ace3abfd8 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -87,7 +87,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig
index d72f2c75426..2687a225aa6 100644
--- a/arch/arm/configs/versatile_defconfig
+++ b/arch/arm/configs/versatile_defconfig
@@ -64,7 +64,6 @@ CONFIG_KMOD=y
# CONFIG_ARCH_CLPS711X is not set
# CONFIG_ARCH_CO285 is not set
# CONFIG_ARCH_EBSA110 is not set
-# CONFIG_ARCH_CAMELOT is not set
# CONFIG_ARCH_FOOTBRIDGE is not set
# CONFIG_ARCH_INTEGRATOR is not set
# CONFIG_ARCH_IOP3XX is not set
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index c11169b5ed9..de94b0f3ee2 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -2,15 +2,16 @@
# Makefile for the linux kernel.
#
-AFLAGS_head.o := -DKERNEL_RAM_ADDR=$(TEXTADDR)
+AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
# Object file lists.
-obj-y := compat.o dma.o entry-armv.o entry-common.o irq.o \
+obj-y := compat.o entry-armv.o entry-common.o irq.o \
process.o ptrace.o semaphore.o setup.o signal.o sys_arm.o \
time.o traps.o
obj-$(CONFIG_APM) += apm.o
+obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_ARCH_ACORN) += ecard.o
obj-$(CONFIG_FOOTBRIDGE) += isa.o
obj-$(CONFIG_FIQ) += fiq.o
diff --git a/arch/arm/kernel/apm.c b/arch/arm/kernel/apm.c
index a2843be0555..766b6c05c6d 100644
--- a/arch/arm/kernel/apm.c
+++ b/arch/arm/kernel/apm.c
@@ -18,9 +18,9 @@
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
#include <linux/apm_bios.h>
+#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/pm.h>
-#include <linux/pm_legacy.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -81,6 +81,7 @@ struct apm_user {
*/
static int suspends_pending;
static int apm_disabled;
+static int arm_apm_active;
static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
@@ -477,9 +478,9 @@ static int kapmd(void *arg)
apm_event_t event;
wait_event_interruptible(kapmd_wait,
- !queue_empty(&kapmd_queue) || !pm_active);
+ !queue_empty(&kapmd_queue) || !arm_apm_active);
- if (!pm_active)
+ if (!arm_apm_active)
break;
spin_lock_irq(&kapmd_queue_lock);
@@ -522,16 +523,11 @@ static int __init apm_init(void)
return -ENODEV;
}
- if (PM_IS_ACTIVE()) {
- printk(KERN_NOTICE "apm: overridden by ACPI.\n");
- return -EINVAL;
- }
-
- pm_active = 1;
+ arm_apm_active = 1;
ret = kernel_thread(kapmd, NULL, CLONE_KERNEL);
if (ret < 0) {
- pm_active = 0;
+ arm_apm_active = 0;
return ret;
}
@@ -543,7 +539,7 @@ static int __init apm_init(void)
if (ret != 0) {
remove_proc_entry("apm", NULL);
- pm_active = 0;
+ arm_apm_active = 0;
wake_up(&kapmd_wait);
wait_for_completion(&kapmd_exit);
}
@@ -556,7 +552,7 @@ static void __exit apm_exit(void)
misc_deregister(&apm_device);
remove_proc_entry("apm", NULL);
- pm_active = 0;
+ arm_apm_active = 0;
wake_up(&kapmd_wait);
wait_for_completion(&kapmd_exit);
}
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 04d3082a7b9..0abbce8c70b 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -23,20 +23,15 @@
#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
#endif
/*
- * GCC 2.95.1, 2.95.2: ignores register clobber list in asm().
* GCC 3.0, 3.1: general bad code generation.
* GCC 3.2.0: incorrect function argument offset calculation.
* GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
* (http://gcc.gnu.org/PR8896) and incorrect structure
* initialisation in fs/jffs2/erase.c
*/
-#if __GNUC__ < 2 || \
- (__GNUC__ == 2 && __GNUC_MINOR__ < 95) || \
- (__GNUC__ == 2 && __GNUC_MINOR__ == 95 && __GNUC_PATCHLEVEL__ != 0 && \
- __GNUC_PATCHLEVEL__ < 3) || \
- (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
#error Your compiler is too buggy; it is known to miscompile kernels.
-#error Known good compilers: 2.95.3, 2.95.4, 2.96, 3.3
+#error Known good compilers: 3.3
#endif
/* Use marker if you need to separate the values later */
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c
index e9a36304ec3..03532769a97 100644
--- a/arch/arm/kernel/dma-isa.c
+++ b/arch/arm/kernel/dma-isa.c
@@ -18,7 +18,7 @@
*/
#include <linux/ioport.h>
#include <linux/init.h>
-#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <asm/dma.h>
#include <asm/io.h>
@@ -65,37 +65,41 @@ static void isa_enable_dma(dmach_t channel, dma_t *dma)
{
if (dma->invalid) {
unsigned long address, length;
- unsigned int mode, direction;
+ unsigned int mode;
+ enum dma_data_direction direction;
mode = channel & 3;
switch (dma->dma_mode & DMA_MODE_MASK) {
case DMA_MODE_READ:
mode |= ISA_DMA_MODE_READ;
- direction = PCI_DMA_FROMDEVICE;
+ direction = DMA_FROM_DEVICE;
break;
case DMA_MODE_WRITE:
mode |= ISA_DMA_MODE_WRITE;
- direction = PCI_DMA_TODEVICE;
+ direction = DMA_TO_DEVICE;
break;
case DMA_MODE_CASCADE:
mode |= ISA_DMA_MODE_CASCADE;
- direction = PCI_DMA_BIDIRECTIONAL;
+ direction = DMA_BIDIRECTIONAL;
break;
default:
- direction = PCI_DMA_NONE;
+ direction = DMA_NONE;
break;
}
- if (!dma->using_sg) {
+ if (!dma->sg) {
/*
* Cope with ISA-style drivers which expect cache
* coherence.
*/
- dma->buf.dma_address = pci_map_single(NULL,
- dma->buf.__address, dma->buf.length,
+ dma->sg = &dma->buf;
+ dma->sgcount = 1;
+ dma->buf.length = dma->count;
+ dma->buf.dma_address = dma_map_single(NULL,
+ dma->addr, dma->count,
direction);
}
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index 2b788388423..5a0f4bc5da9 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -12,8 +12,6 @@
* DMA facilities.
*/
#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/mman.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
@@ -23,8 +21,7 @@
#include <asm/mach/dma.h>
DEFINE_SPINLOCK(dma_spin_lock);
-
-#if MAX_DMA_CHANNELS > 0
+EXPORT_SYMBOL(dma_spin_lock);
static dma_t dma_chan[MAX_DMA_CHANNELS];
@@ -81,6 +78,7 @@ bad_dma:
busy:
return -EBUSY;
}
+EXPORT_SYMBOL(request_dma);
/*
* Free DMA channel
@@ -112,6 +110,7 @@ void free_dma(dmach_t channel)
bad_dma:
printk(KERN_ERR "dma: trying to free DMA%d\n", channel);
}
+EXPORT_SYMBOL(free_dma);
/* Set DMA Scatter-Gather list
*/
@@ -125,15 +124,15 @@ void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg)
dma->sg = sg;
dma->sgcount = nr_sg;
- dma->using_sg = 1;
dma->invalid = 1;
}
+EXPORT_SYMBOL(set_dma_sg);
/* Set DMA address
*
* Copy address to the structure, and set the invalid bit
*/
-void set_dma_addr (dmach_t channel, unsigned long physaddr)
+void __set_dma_addr (dmach_t channel, void *addr)
{
dma_t *dma = dma_chan + channel;
@@ -141,12 +140,11 @@ void set_dma_addr (dmach_t channel, unsigned long physaddr)
printk(KERN_ERR "dma%d: altering DMA address while "
"DMA active\n", channel);
- dma->sg = &dma->buf;
- dma->sgcount = 1;
- dma->buf.__address = bus_to_virt(physaddr);
- dma->using_sg = 0;
+ dma->sg = NULL;
+ dma->addr = addr;
dma->invalid = 1;
}
+EXPORT_SYMBOL(__set_dma_addr);
/* Set DMA byte count
*
@@ -160,12 +158,11 @@ void set_dma_count (dmach_t channel, unsigned long count)
printk(KERN_ERR "dma%d: altering DMA count while "
"DMA active\n", channel);
- dma->sg = &dma->buf;
- dma->sgcount = 1;
- dma->buf.length = count;
- dma->using_sg = 0;
+ dma->sg = NULL;
+ dma->count = count;
dma->invalid = 1;
}
+EXPORT_SYMBOL(set_dma_count);
/* Set DMA direction mode
*/
@@ -180,6 +177,7 @@ void set_dma_mode (dmach_t channel, dmamode_t mode)
dma->dma_mode = mode;
dma->invalid = 1;
}
+EXPORT_SYMBOL(set_dma_mode);
/* Enable DMA channel
*/
@@ -200,6 +198,7 @@ free_dma:
printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel);
BUG();
}
+EXPORT_SYMBOL(enable_dma);
/* Disable DMA channel
*/
@@ -220,6 +219,7 @@ free_dma:
printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel);
BUG();
}
+EXPORT_SYMBOL(disable_dma);
/*
* Is the specified DMA channel active?
@@ -233,6 +233,7 @@ void set_dma_page(dmach_t channel, char pagenr)
{
printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel);
}
+EXPORT_SYMBOL(set_dma_page);
void set_dma_speed(dmach_t channel, int cycle_ns)
{
@@ -243,6 +244,7 @@ void set_dma_speed(dmach_t channel, int cycle_ns)
ret = dma->d_ops->setspeed(channel, dma, cycle_ns);
dma->speed = ret;
}
+EXPORT_SYMBOL(set_dma_speed);
int get_dma_residue(dmach_t channel)
{
@@ -254,49 +256,12 @@ int get_dma_residue(dmach_t channel)
return ret;
}
+EXPORT_SYMBOL(get_dma_residue);
-void __init init_dma(void)
+static int __init init_dma(void)
{
arch_dma_init(dma_chan);
-}
-
-#else
-
-int request_dma(dmach_t channel, const char *device_id)
-{
- return -EINVAL;
-}
-
-int get_dma_residue(dmach_t channel)
-{
return 0;
}
-#define GLOBAL_ALIAS(_a,_b) asm (".set " #_a "," #_b "; .globl " #_a)
-GLOBAL_ALIAS(disable_dma, get_dma_residue);
-GLOBAL_ALIAS(enable_dma, get_dma_residue);
-GLOBAL_ALIAS(free_dma, get_dma_residue);
-GLOBAL_ALIAS(get_dma_list, get_dma_residue);
-GLOBAL_ALIAS(set_dma_mode, get_dma_residue);
-GLOBAL_ALIAS(set_dma_page, get_dma_residue);
-GLOBAL_ALIAS(set_dma_count, get_dma_residue);
-GLOBAL_ALIAS(set_dma_addr, get_dma_residue);
-GLOBAL_ALIAS(set_dma_sg, get_dma_residue);
-GLOBAL_ALIAS(set_dma_speed, get_dma_residue);
-GLOBAL_ALIAS(init_dma, get_dma_residue);
-
-#endif
-
-EXPORT_SYMBOL(request_dma);
-EXPORT_SYMBOL(free_dma);
-EXPORT_SYMBOL(enable_dma);
-EXPORT_SYMBOL(disable_dma);
-EXPORT_SYMBOL(set_dma_addr);
-EXPORT_SYMBOL(set_dma_count);
-EXPORT_SYMBOL(set_dma_mode);
-EXPORT_SYMBOL(set_dma_page);
-EXPORT_SYMBOL(get_dma_residue);
-EXPORT_SYMBOL(set_dma_sg);
-EXPORT_SYMBOL(set_dma_speed);
-
-EXPORT_SYMBOL(dma_spin_lock);
+core_initcall(init_dma);
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
index dceb826bd21..96fd91926c9 100644
--- a/arch/arm/kernel/ecard.c
+++ b/arch/arm/kernel/ecard.c
@@ -40,6 +40,7 @@
#include <linux/proc_fs.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/mutex.h>
#include <asm/dma.h>
#include <asm/ecard.h>
@@ -206,7 +207,7 @@ static void ecard_task_readbytes(struct ecard_request *req)
static DECLARE_WAIT_QUEUE_HEAD(ecard_wait);
static struct ecard_request *ecard_req;
-static DECLARE_MUTEX(ecard_sem);
+static DEFINE_MUTEX(ecard_mutex);
/*
* Set up the expansion card daemon's page tables.
@@ -299,7 +300,7 @@ static void ecard_call(struct ecard_request *req)
req->complete = &completion;
- down(&ecard_sem);
+ mutex_lock(&ecard_mutex);
ecard_req = req;
wake_up(&ecard_wait);
@@ -307,7 +308,7 @@ static void ecard_call(struct ecard_request *req)
* Now wait for kecardd to run.
*/
wait_for_completion(&completion);
- up(&ecard_sem);
+ mutex_unlock(&ecard_mutex);
}
/* ======================= Mid-level card control ===================== */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 2a8d27e18fa..a52baedf626 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -18,8 +18,6 @@
#include <asm/memory.h>
#include <asm/glue.h>
#include <asm/vfpmacros.h>
-#include <asm/hardware.h> /* should be moved into entry-macro.S */
-#include <asm/arch/irqs.h> /* should be moved into entry-macro.S */
#include <asm/arch/entry-macro.S>
#include "entry-header.S"
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 9299dfc2569..1ec3f7faa25 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -101,7 +101,7 @@ void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs)
ldmia %1, {r8 - r14}\n\
msr cpsr_c, %0 @ return to SVC mode\n\
mov r0, r0\n\
- ldmea fp, {fp, sp, pc}"
+ ldmfd sp, {fp, sp, pc}"
: "=&r" (tmp)
: "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
@@ -119,7 +119,7 @@ void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs)
stmia %1, {r8 - r14}\n\
msr cpsr_c, %0 @ return to SVC mode\n\
mov r0, r0\n\
- ldmea fp, {fp, sp, pc}"
+ ldmfd sp, {fp, sp, pc}"
: "=&r" (tmp)
: "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
}
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index d7d69fd7039..1e985f2cd70 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -33,6 +33,8 @@
#define MACHINFO_PGOFFIO 12
#define MACHINFO_NAME 16
+#define KERNEL_RAM_ADDR (PAGE_OFFSET + TEXT_OFFSET)
+
/*
* swapper_pg_dir is the virtual address of the initial page table.
* We place the page tables 16K below KERNEL_RAM_ADDR. Therefore, we must
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index d7099dbbb87..1d50d2b98f5 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -684,8 +684,12 @@ int setup_irq(unsigned int irq, struct irqaction *new)
spin_lock_irqsave(&irq_controller_lock, flags);
p = &desc->action;
if ((old = *p) != NULL) {
- /* Can't share interrupts unless both agree to */
- if (!(old->flags & new->flags & SA_SHIRQ)) {
+ /*
+ * Can't share interrupts unless both agree to and are
+ * the same type.
+ */
+ if (!(old->flags & new->flags & SA_SHIRQ) ||
+ (~old->flags & new->flags) & SA_TRIGGER_MASK) {
spin_unlock_irqrestore(&irq_controller_lock, flags);
return -EBUSY;
}
@@ -705,6 +709,13 @@ int setup_irq(unsigned int irq, struct irqaction *new)
desc->running = 0;
desc->pending = 0;
desc->disable_depth = 1;
+
+ if (new->flags & SA_TRIGGER_MASK &&
+ desc->chip->set_type) {
+ unsigned int type = new->flags & SA_TRIGGER_MASK;
+ desc->chip->set_type(irq, type);
+ }
+
if (!desc->noautoenable) {
desc->disable_depth = 0;
desc->chip->unmask(irq);
@@ -1027,7 +1038,6 @@ void __init init_irq_proc(void)
void __init init_IRQ(void)
{
struct irqdesc *desc;
- extern void init_dma(void);
int irq;
#ifdef CONFIG_SMP
@@ -1041,7 +1051,6 @@ void __init init_IRQ(void)
}
init_arch_irq();
- init_dma();
}
static int __init noirqdebug_setup(char *str)
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 30494aab829..4b4e4cf79c8 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -28,10 +28,9 @@
#include <linux/init.h>
#include <linux/cpu.h>
-#include <asm/system.h>
-#include <asm/io.h>
#include <asm/leds.h>
#include <asm/processor.h>
+#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/mach/time.h>
@@ -343,10 +342,10 @@ void flush_thread(void)
void release_thread(struct task_struct *dead_task)
{
#if defined(CONFIG_VFP)
- vfp_release_thread(&dead_task->thread_info->vfpstate);
+ vfp_release_thread(&task_thread_info(dead_task)->vfpstate);
#endif
#if defined(CONFIG_IWMMXT)
- iwmmxt_task_release(dead_task->thread_info);
+ iwmmxt_task_release(task_thread_info(dead_task));
#endif
}
@@ -356,10 +355,9 @@ int
copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
{
- struct thread_info *thread = p->thread_info;
- struct pt_regs *childregs;
+ struct thread_info *thread = task_thread_info(p);
+ struct pt_regs *childregs = task_pt_regs(p);
- childregs = (void *)thread + THREAD_START_SP - sizeof(*regs);
*childregs = *regs;
childregs->ARM_r0 = 0;
childregs->ARM_sp = stack_start;
@@ -461,8 +459,8 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack_start = (unsigned long)(p->thread_info + 1);
- stack_end = ((unsigned long)p->thread_info) + THREAD_SIZE;
+ stack_start = (unsigned long)end_of_stack(p);
+ stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE;
fp = thread_saved_fp(p);
do {
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 2b84f78d7b0..e591f72bcde 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -55,23 +55,6 @@
#endif
/*
- * Get the address of the live pt_regs for the specified task.
- * These are saved onto the top kernel stack when the process
- * is not running.
- *
- * Note: if a user thread is execve'd from kernel space, the
- * kernel stack will not be empty on entry to the kernel, so
- * ptracing these tasks will fail.
- */
-static inline struct pt_regs *
-get_user_regs(struct task_struct *task)
-{
- return (struct pt_regs *)
- ((unsigned long)task->thread_info + THREAD_SIZE -
- 8 - sizeof(struct pt_regs));
-}
-
-/*
* this routine will get a word off of the processes privileged stack.
* the offset is how far from the base addr as stored in the THREAD.
* this routine assumes that all the privileged stacks are in our
@@ -79,7 +62,7 @@ get_user_regs(struct task_struct *task)
*/
static inline long get_user_reg(struct task_struct *task, int offset)
{
- return get_user_regs(task)->uregs[offset];
+ return task_pt_regs(task)->uregs[offset];
}
/*
@@ -91,7 +74,7 @@ static inline long get_user_reg(struct task_struct *task, int offset)
static inline int
put_user_reg(struct task_struct *task, int offset, long data)
{
- struct pt_regs newregs, *regs = get_user_regs(task);
+ struct pt_regs newregs, *regs = task_pt_regs(task);
int ret = -EINVAL;
newregs = *regs;
@@ -421,7 +404,7 @@ void ptrace_set_bpt(struct task_struct *child)
u32 insn;
int res;
- regs = get_user_regs(child);
+ regs = task_pt_regs(child);
pc = instruction_pointer(regs);
if (thumb_mode(regs)) {
@@ -572,7 +555,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
*/
static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
{
- struct pt_regs *regs = get_user_regs(tsk);
+ struct pt_regs *regs = task_pt_regs(tsk);
return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
}
@@ -587,7 +570,7 @@ static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
ret = -EFAULT;
if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
- struct pt_regs *regs = get_user_regs(tsk);
+ struct pt_regs *regs = task_pt_regs(tsk);
ret = -EINVAL;
if (valid_user_regs(&newregs)) {
@@ -604,7 +587,7 @@ static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
*/
static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp)
{
- return copy_to_user(ufp, &tsk->thread_info->fpstate,
+ return copy_to_user(ufp, &task_thread_info(tsk)->fpstate,
sizeof(struct user_fp)) ? -EFAULT : 0;
}
@@ -613,7 +596,7 @@ static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp)
*/
static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp)
{
- struct thread_info *thread = tsk->thread_info;
+ struct thread_info *thread = task_thread_info(tsk);
thread->used_cp[1] = thread->used_cp[2] = 1;
return copy_from_user(&thread->fpstate, ufp,
sizeof(struct user_fp)) ? -EFAULT : 0;
@@ -626,7 +609,7 @@ static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp)
*/
static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
{
- struct thread_info *thread = tsk->thread_info;
+ struct thread_info *thread = task_thread_info(tsk);
void *ptr = &thread->fpstate;
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
@@ -643,7 +626,7 @@ static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
*/
static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
{
- struct thread_info *thread = tsk->thread_info;
+ struct thread_info *thread = task_thread_info(tsk);
void *ptr = &thread->fpstate;
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
@@ -779,7 +762,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#endif
case PTRACE_GET_THREAD_AREA:
- ret = put_user(child->thread_info->tp_value,
+ ret = put_user(task_thread_info(child)->tp_value,
(unsigned long __user *) data);
break;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 85774165e9f..c45d10d07bd 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -26,8 +26,6 @@
#include <asm/cpu.h>
#include <asm/elf.h>
-#include <asm/hardware.h>
-#include <asm/io.h>
#include <asm/procinfo.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
@@ -207,7 +205,7 @@ static const char *proc_arch[] = {
"5TE",
"5TEJ",
"6TEJ",
- "?(10)",
+ "7",
"?(11)",
"?(12)",
"?(13)",
@@ -260,14 +258,17 @@ int cpu_architecture(void)
{
int cpu_arch;
- if ((processor_id & 0x0000f000) == 0) {
+ if ((processor_id & 0x0008f000) == 0) {
cpu_arch = CPU_ARCH_UNKNOWN;
- } else if ((processor_id & 0x0000f000) == 0x00007000) {
+ } else if ((processor_id & 0x0008f000) == 0x00007000) {
cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
- } else {
+ } else if ((processor_id & 0x00080000) == 0x00000000) {
cpu_arch = (processor_id >> 16) & 7;
if (cpu_arch)
cpu_arch += CPU_ARCH_ARMv3;
+ } else {
+ /* the revised CPUID */
+ cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
}
return cpu_arch;
@@ -865,11 +866,11 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
- if ((processor_id & 0x0000f000) == 0x00000000) {
+ if ((processor_id & 0x0008f000) == 0x00000000) {
/* pre-ARM7 */
seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
} else {
- if ((processor_id & 0x0000f000) == 0x00007000) {
+ if ((processor_id & 0x0008f000) == 0x00007000) {
/* ARM7 */
seq_printf(m, "CPU variant\t: 0x%02x\n",
(processor_id >> 16) & 127);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 373c0959bc2..7338948bd7d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -114,7 +114,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
* We need to tell the secondary core where to find
* its stack and the page tables.
*/
- secondary_data.stack = (void *)idle->thread_info + THREAD_START_SP;
+ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
secondary_data.pgdir = virt_to_phys(pgd);
wmb();
@@ -245,7 +245,7 @@ void __cpuexit cpu_die(void)
__asm__("mov sp, %0\n"
" b secondary_start_kernel"
:
- : "r" ((void *)current->thread_info + THREAD_SIZE - 8));
+ : "r" (task_stack_page(current) + THREAD_SIZE - 8));
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index fc4729106a3..d7d932c0286 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -29,9 +29,6 @@
#include <linux/sysdev.h>
#include <linux/timer.h>
-#include <asm/hardware.h>
-#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/thread_info.h>
#include <asm/mach/time.h>
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 45e9ea6cd2a..93cfd3ffcc7 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -23,7 +23,6 @@
#include <asm/atomic.h>
#include <asm/cacheflush.h>
-#include <asm/io.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -165,7 +164,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
} else if (verify_stack(fp)) {
printk("invalid frame pointer 0x%08x", fp);
ok = 0;
- } else if (fp < (unsigned long)(tsk->thread_info + 1))
+ } else if (fp < (unsigned long)end_of_stack(tsk))
printk("frame pointer underflow");
printk("\n");
@@ -211,7 +210,7 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
if (!user_mode(regs) || in_interrupt()) {
dump_mem("Stack: ", regs->ARM_sp,
- THREAD_SIZE + (unsigned long)tsk->thread_info);
+ THREAD_SIZE + (unsigned long)task_stack_page(tsk));
dump_backtrace(regs, tsk);
dump_instr(regs);
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 9a47770114d..2b254e88595 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -17,15 +17,13 @@ jiffies = jiffies_64;
jiffies = jiffies_64 + 4;
#endif
+SECTIONS
+{
#ifdef CONFIG_XIP_KERNEL
-#define TEXTADDR XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
+ . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
#else
-#define TEXTADDR KERNEL_RAM_ADDR
+ . = PAGE_OFFSET + TEXT_OFFSET;
#endif
-
-SECTIONS
-{
- . = TEXTADDR;
.init : { /* Init code and data */
_stext = .;
_sinittext = .;
@@ -104,7 +102,7 @@ SECTIONS
#ifdef CONFIG_XIP_KERNEL
__data_loc = ALIGN(4); /* location in binary */
- . = KERNEL_RAM_ADDR;
+ . = PAGE_OFFSET + TEXT_OFFSET;
#else
. = ALIGN(THREAD_SIZE);
__data_loc = .;
diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S
index 990ee63b246..21effe0dbf9 100644
--- a/arch/arm/lib/csumpartialcopy.S
+++ b/arch/arm/lib/csumpartialcopy.S
@@ -18,11 +18,13 @@
*/
.macro save_regs
+ mov ip, sp
stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
.endm
- .macro load_regs,flags
- LOADREGS(\flags,fp,{r1, r4 - r8, fp, sp, pc})
+ .macro load_regs
+ ldmfd sp, {r1, r4 - r8, fp, sp, pc}
.endm
.macro load1b, reg1
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index 4a4609c1909..c50e8f5285d 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -23,7 +23,7 @@ len .req r2
sum .req r3
.Lzero: mov r0, sum
- load_regs ea
+ load_regs
/*
* Align an unaligned destination pointer. We know that
@@ -87,9 +87,7 @@ sum .req r3
b .Ldone
FN_ENTRY
- mov ip, sp
save_regs
- sub fp, ip, #4
cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy.
@@ -163,7 +161,7 @@ FN_ENTRY
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
- load_regs ea
+ load_regs
.Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 333bca292de..c3b93e22ea2 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -18,11 +18,13 @@
.text
.macro save_regs
+ mov ip, sp
stmfd sp!, {r1 - r2, r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
.endm
- .macro load_regs,flags
- ldm\flags fp, {r1, r2, r4-r8, fp, sp, pc}
+ .macro load_regs
+ ldmfd sp, {r1, r2, r4-r8, fp, sp, pc}
.endm
.macro load1b, reg1
@@ -100,5 +102,5 @@
6002: teq r2, r1
strneb r0, [r1], #1
bne 6002b
- load_regs ea
+ load_regs
.previous
diff --git a/arch/arm/mach-aaec2000/clock.c b/arch/arm/mach-aaec2000/clock.c
index 0340ddc4824..74aa7a39bb6 100644
--- a/arch/arm/mach-aaec2000/clock.c
+++ b/arch/arm/mach-aaec2000/clock.c
@@ -15,27 +15,28 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/string.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
#include <asm/semaphore.h>
-#include <asm/hardware/clock.h>
#include "clock.h"
static LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
struct clk *clk_get(struct device *dev, const char *id)
{
struct clk *p, *clk = ERR_PTR(-ENOENT);
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {
if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
clk = p;
break;
}
}
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return clk;
}
@@ -58,17 +59,6 @@ void clk_disable(struct clk *clk)
}
EXPORT_SYMBOL(clk_disable);
-int clk_use(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_use);
-
-void clk_unuse(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_unuse);
-
unsigned long clk_get_rate(struct clk *clk)
{
return clk->rate;
@@ -89,18 +79,18 @@ EXPORT_SYMBOL(clk_set_rate);
int clk_register(struct clk *clk)
{
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_add(&clk->node, &clocks);
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return 0;
}
EXPORT_SYMBOL(clk_register);
void clk_unregister(struct clk *clk)
{
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_del(&clk->node);
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
}
EXPORT_SYMBOL(clk_unregister);
diff --git a/arch/arm/mach-aaec2000/core.c b/arch/arm/mach-aaec2000/core.c
index 4e706d9ad36..dce4815cf53 100644
--- a/arch/arm/mach-aaec2000/core.c
+++ b/arch/arm/mach-aaec2000/core.c
@@ -20,11 +20,11 @@
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/signal.h>
+#include <linux/amba/bus.h>
#include <asm/hardware.h>
#include <asm/irq.h>
#include <asm/sizes.h>
-#include <asm/hardware/amba.h>
#include <asm/mach/flash.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/mach-aaec2000/core.h b/arch/arm/mach-aaec2000/core.h
index daefc0ea14a..b6029a95f19 100644
--- a/arch/arm/mach-aaec2000/core.h
+++ b/arch/arm/mach-aaec2000/core.h
@@ -9,7 +9,7 @@
*
*/
-#include <asm/hardware/amba_clcd.h>
+#include <linux/amba/clcd.h>
struct sys_timer;
diff --git a/arch/arm/mach-at91rm9200/Kconfig b/arch/arm/mach-at91rm9200/Kconfig
new file mode 100644
index 00000000000..4b7218fc3eb
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/Kconfig
@@ -0,0 +1,54 @@
+if ARCH_AT91RM9200
+
+menu "AT91RM9200 Implementations"
+
+comment "AT91RM9200 Board Type"
+
+config ARCH_AT91RM9200DK
+ bool "Atmel AT91RM9200-DK Development board"
+ depends on ARCH_AT91RM9200
+ help
+ Select this if you are using Atmel's AT91RM9200-DK Development board
+
+config MACH_AT91RM9200EK
+ bool "Atmel AT91RM9200-EK Evaluation Kit"
+ depends on ARCH_AT91RM9200
+ help
+ Select this if you are using Atmel's AT91RM9200-EK Evaluation Kit
+
+config MACH_CSB337
+ bool "Cogent CSB337 board"
+ depends on ARCH_AT91RM9200
+ help
+ Select this if you are using Cogent's CSB337 board
+
+config MACH_CSB637
+ bool "Cogent CSB637 board"
+ depends on ARCH_AT91RM9200
+ help
+ Select this if you are using Cogent's CSB637 board
+
+config MACH_CARMEVA
+ bool "Conitec's ARM&EVA"
+ depends on ARCH_AT91RM9200
+ help
+ Select this if you are using Conitec's AT91RM9200-MCU-Module
+
+config MACH_KB9200
+ bool "KwikByte's KB920x"
+ depends on ARCH_AT91RM9200
+ help
+ Select this if you are using KwikByte's KB920x board
+
+
+comment "AT91RM9200 Feature Selections"
+
+config AT91_PROGRAMMABLE_CLOCKS
+ bool "Programmable Clocks"
+ help
+ Select this if you need to program one or more of the PCK0..PCK3
+ programmable clock outputs.
+
+endmenu
+
+endif
diff --git a/arch/arm/mach-at91rm9200/Makefile b/arch/arm/mach-at91rm9200/Makefile
new file mode 100644
index 00000000000..75e6ee318de
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/Makefile
@@ -0,0 +1,27 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y := clock.o irq.o time.o gpio.o common.o devices.o
+obj-m :=
+obj-n :=
+obj- :=
+
+# Board-specific support
+obj-$(CONFIG_ARCH_AT91RM9200DK) += board-dk.o
+obj-$(CONFIG_MACH_AT91RM9200EK) += board-ek.o
+obj-$(CONFIG_MACH_CSB337) += board-csb337.o
+obj-$(CONFIG_MACH_CSB637) += board-csb637.o
+#obj-$(CONFIG_MACH_CARMEVA) += board-carmeva.o
+#obj-$(CONFIG_MACH_KB9200) += board-kb9202.o
+
+# LEDs support
+#led-$(CONFIG_ARCH_AT91RM9200DK) += leds.o
+#led-$(CONFIG_MACH_AT91RM9200EK) += leds.o
+#led-$(CONFIG_MACH_CSB337) += leds.o
+#led-$(CONFIG_MACH_CSB637) += leds.o
+#led-$(CONFIG_MACH_KB9200) += leds.o
+obj-$(CONFIG_LEDS) += $(led-y)
+
+# VGA support
+#obj-$(CONFIG_FB_S1D13XXX) += ics1523.o
diff --git a/arch/arm/mach-at91rm9200/Makefile.boot b/arch/arm/mach-at91rm9200/Makefile.boot
new file mode 100644
index 00000000000..e667dcc7cd3
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/Makefile.boot
@@ -0,0 +1,9 @@
+# Note: the following conditions must always be true:
+# ZRELADDR == virt_to_phys(TEXTADDR)
+# PARAMS_PHYS must be within 4MB of ZRELADDR
+# INITRD_PHYS must be in RAM
+
+ zreladdr-y := 0x20008000
+params_phys-y := 0x20000100
+initrd_phys-y := 0x20410000
+
diff --git a/arch/arm/mach-at91rm9200/board-csb337.c b/arch/arm/mach-at91rm9200/board-csb337.c
new file mode 100644
index 00000000000..4aec834ee47
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/board-csb337.c
@@ -0,0 +1,143 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/board-csb337.c
+ *
+ * Copyright (C) 2005 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/mach/serial_at91rm9200.h>
+#include <asm/arch/board.h>
+
+#include "generic.h"
+
+static void __init csb337_init_irq(void)
+{
+ /* Initialize AIC controller */
+ at91rm9200_init_irq(NULL);
+
+ /* Set up the GPIO interrupts */
+ at91_gpio_irq_setup(BGA_GPIO_BANKS);
+}
+
+/*
+ * Serial port configuration.
+ * 0 .. 3 = USART0 .. USART3
+ * 4 = DBGU
+ */
+#define CSB337_UART_MAP { 4, 1, -1, -1, -1 } /* ttyS0, ..., ttyS4 */
+#define CSB337_SERIAL_CONSOLE 0 /* ttyS0 */
+
+static void __init csb337_map_io(void)
+{
+ int serial[AT91_NR_UART] = CSB337_UART_MAP;
+ int i;
+
+ at91rm9200_map_io();
+
+ /* Initialize clocks: 3.6864 MHz crystal */
+ at91_clock_init(3686400);
+
+#ifdef CONFIG_SERIAL_AT91
+ at91_console_port = CSB337_SERIAL_CONSOLE;
+ memcpy(at91_serial_map, serial, sizeof(serial));
+
+ /* Register UARTs */
+ for (i = 0; i < AT91_NR_UART; i++) {
+ if (serial[i] >= 0)
+ at91_register_uart(i, serial[i]);
+ }
+#endif
+}
+
+static struct at91_eth_data __initdata csb337_eth_data = {
+ .phy_irq_pin = AT91_PIN_PC2,
+ .is_rmii = 0,
+};
+
+static struct at91_usbh_data __initdata csb337_usbh_data = {
+ .ports = 2,
+};
+
+static struct at91_udc_data __initdata csb337_udc_data = {
+ // this has no VBUS sensing pin
+ .pullup_pin = AT91_PIN_PA24,
+};
+
+static struct at91_cf_data __initdata csb337_cf_data = {
+ /*
+ * connector P4 on the CSB 337 mates to
+ * connector P8 on the CSB 300CF
+ */
+
+ /* CSB337 specific */
+ .det_pin = AT91_PIN_PC3,
+
+ /* CSB300CF specific */
+ .irq_pin = AT91_PIN_PA19,
+ .vcc_pin = AT91_PIN_PD0,
+ .rst_pin = AT91_PIN_PD2,
+};
+
+static struct at91_mmc_data __initdata csb337_mmc_data = {
+ .det_pin = AT91_PIN_PD5,
+ .is_b = 0,
+ .wire4 = 1,
+ .wp_pin = AT91_PIN_PD6,
+};
+
+static void __init csb337_board_init(void)
+{
+ /* Ethernet */
+ at91_add_device_eth(&csb337_eth_data);
+ /* USB Host */
+ at91_add_device_usbh(&csb337_usbh_data);
+ /* USB Device */
+ at91_add_device_udc(&csb337_udc_data);
+ /* Compact Flash */
+ at91_set_gpio_input(AT91_PIN_PB22, 1); /* IOIS16 */
+ at91_add_device_cf(&csb337_cf_data);
+ /* MMC */
+ at91_add_device_mmc(&csb337_mmc_data);
+}
+
+MACHINE_START(CSB337, "Cogent CSB337")
+ /* Maintainer: Bill Gatliff */
+ .phys_ram = AT91_SDRAM_BASE,
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91rm9200_timer,
+ .map_io = csb337_map_io,
+ .init_irq = csb337_init_irq,
+ .init_machine = csb337_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91rm9200/board-csb637.c b/arch/arm/mach-at91rm9200/board-csb637.c
new file mode 100644
index 00000000000..23e4cc21481
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/board-csb637.c
@@ -0,0 +1,116 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/board-csb637.c
+ *
+ * Copyright (C) 2005 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/mach/serial_at91rm9200.h>
+#include <asm/arch/board.h>
+
+#include "generic.h"
+
+static void __init csb637_init_irq(void)
+{
+ /* Initialize AIC controller */
+ at91rm9200_init_irq(NULL);
+
+ /* Set up the GPIO interrupts */
+ at91_gpio_irq_setup(BGA_GPIO_BANKS);
+}
+
+/*
+ * Serial port configuration.
+ * 0 .. 3 = USART0 .. USART3
+ * 4 = DBGU
+ */
+#define CSB637_UART_MAP { 4, 1, -1, -1, -1 } /* ttyS0, ..., ttyS4 */
+#define CSB637_SERIAL_CONSOLE 0 /* ttyS0 */
+
+static void __init csb637_map_io(void)
+{
+ int serial[AT91_NR_UART] = CSB637_UART_MAP;
+ int i;
+
+ at91rm9200_map_io();
+
+ /* Initialize clocks: 3.6864 MHz crystal */
+ at91_clock_init(3686400);
+
+#ifdef CONFIG_SERIAL_AT91
+ at91_console_port = CSB637_SERIAL_CONSOLE;
+ memcpy(at91_serial_map, serial, sizeof(serial));
+
+ /* Register UARTs */
+ for (i = 0; i < AT91_NR_UART; i++) {
+ if (serial[i] >= 0)
+ at91_register_uart(i, serial[i]);
+ }
+#endif
+}
+
+static struct at91_eth_data __initdata csb637_eth_data = {
+ .phy_irq_pin = AT91_PIN_PC0,
+ .is_rmii = 0,
+};
+
+static struct at91_usbh_data __initdata csb637_usbh_data = {
+ .ports = 2,
+};
+
+static struct at91_udc_data __initdata csb637_udc_data = {
+ .vbus_pin = AT91_PIN_PB28,
+ .pullup_pin = AT91_PIN_PB1,
+};
+
+static void __init csb637_board_init(void)
+{
+ /* Ethernet */
+ at91_add_device_eth(&csb637_eth_data);
+ /* USB Host */
+ at91_add_device_usbh(&csb637_usbh_data);
+ /* USB Device */
+ at91_add_device_udc(&csb637_udc_data);
+}
+
+MACHINE_START(CSB637, "Cogent CSB637")
+ /* Maintainer: Bill Gatliff */
+ .phys_ram = AT91_SDRAM_BASE,
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91rm9200_timer,
+ .map_io = csb637_map_io,
+ .init_irq = csb637_init_irq,
+ .init_machine = csb637_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91rm9200/board-dk.c b/arch/arm/mach-at91rm9200/board-dk.c
new file mode 100644
index 00000000000..8c747a31b95
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/board-dk.c
@@ -0,0 +1,138 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/board-dk.c
+ *
+ * Copyright (C) 2005 SAN People
+ *
+ * Epson S1D framebuffer glue code is:
+ * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/mach/serial_at91rm9200.h>
+#include <asm/arch/board.h>
+
+#include "generic.h"
+
+static void __init dk_init_irq(void)
+{
+ /* Initialize AIC controller */
+ at91rm9200_init_irq(NULL);
+
+ /* Set up the GPIO interrupts */
+ at91_gpio_irq_setup(BGA_GPIO_BANKS);
+}
+
+/*
+ * Serial port configuration.
+ * 0 .. 3 = USART0 .. USART3
+ * 4 = DBGU
+ */
+#define DK_UART_MAP { 4, 1, -1, -1, -1 } /* ttyS0, ..., ttyS4 */
+#define DK_SERIAL_CONSOLE 0 /* ttyS0 */
+
+static void __init dk_map_io(void)
+{
+ int serial[AT91_NR_UART] = DK_UART_MAP;
+ int i;
+
+ at91rm9200_map_io();
+
+ /* Initialize clocks: 18.432 MHz crystal */
+ at91_clock_init(18432000);
+
+#ifdef CONFIG_SERIAL_AT91
+ at91_console_port = DK_SERIAL_CONSOLE;
+ memcpy(at91_serial_map, serial, sizeof(serial));
+
+ /* Register UARTs */
+ for (i = 0; i < AT91_NR_UART; i++) {
+ if (at91_serial_map[i] >= 0)
+ at91_register_uart(i, at91_serial_map[i]);
+ }
+#endif
+}
+
+static struct at91_eth_data __initdata dk_eth_data = {
+ .phy_irq_pin = AT91_PIN_PC4,
+ .is_rmii = 1,
+};
+
+static struct at91_usbh_data __initdata dk_usbh_data = {
+ .ports = 2,
+};
+
+static struct at91_udc_data __initdata dk_udc_data = {
+ .vbus_pin = AT91_PIN_PD4,
+ .pullup_pin = AT91_PIN_PD5,
+};
+
+static struct at91_cf_data __initdata dk_cf_data = {
+ .det_pin = AT91_PIN_PB0,
+ .rst_pin = AT91_PIN_PC5,
+ // .irq_pin = ... not connected
+ // .vcc_pin = ... always powered
+};
+
+static struct at91_mmc_data __initdata dk_mmc_data = {
+ .is_b = 0,
+ .wire4 = 1,
+};
+
+static void __init dk_board_init(void)
+{
+ /* Ethernet */
+ at91_add_device_eth(&dk_eth_data);
+ /* USB Host */
+ at91_add_device_usbh(&dk_usbh_data);
+ /* USB Device */
+ at91_add_device_udc(&dk_udc_data);
+ /* Compact Flash */
+ at91_add_device_cf(&dk_cf_data);
+ /* MMC */
+ at91_set_gpio_output(AT91_PIN_PB7, 1); /* this MMC card slot can optionally use SPI signaling (CS3). default: MMC */
+ at91_add_device_mmc(&dk_mmc_data);
+ /* VGA */
+// dk_add_device_video();
+}
+
+MACHINE_START(AT91RM9200DK, "Atmel AT91RM9200-DK")
+ /* Maintainer: SAN People/Atmel */
+ .phys_ram = AT91_SDRAM_BASE,
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91rm9200_timer,
+ .map_io = dk_map_io,
+ .init_irq = dk_init_irq,
+ .init_machine = dk_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91rm9200/board-ek.c b/arch/arm/mach-at91rm9200/board-ek.c
new file mode 100644
index 00000000000..d140645711b
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/board-ek.c
@@ -0,0 +1,131 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/board-ek.c
+ *
+ * Copyright (C) 2005 SAN People
+ *
+ * Epson S1D framebuffer glue code is:
+ * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/mach/serial_at91rm9200.h>
+#include <asm/arch/board.h>
+
+#include "generic.h"
+
+static void __init ek_init_irq(void)
+{
+ /* Initialize AIC controller */
+ at91rm9200_init_irq(NULL);
+
+ /* Set up the GPIO interrupts */
+ at91_gpio_irq_setup(BGA_GPIO_BANKS);
+}
+
+/*
+ * Serial port configuration.
+ * 0 .. 3 = USART0 .. USART3
+ * 4 = DBGU
+ */
+#define EK_UART_MAP { 4, 1, -1, -1, -1 } /* ttyS0, ..., ttyS4 */
+#define EK_SERIAL_CONSOLE 0 /* ttyS0 */
+
+static void __init ek_map_io(void)
+{
+ int serial[AT91_NR_UART] = EK_UART_MAP;
+ int i;
+
+ at91rm9200_map_io();
+
+ /* Initialize clocks: 18.432 MHz crystal */
+ at91_clock_init(18432000);
+
+#ifdef CONFIG_SERIAL_AT91
+ at91_console_port = EK_SERIAL_CONSOLE;
+ memcpy(at91_serial_map, serial, sizeof(serial));
+
+ /* Register UARTs */
+ for (i = 0; i < AT91_NR_UART; i++) {
+ if (serial[i] >= 0)
+ at91_register_uart(i, serial[i]);
+ }
+#endif
+}
+
+static struct at91_eth_data __initdata ek_eth_data = {
+ .phy_irq_pin = AT91_PIN_PC4,
+ .is_rmii = 1,
+};
+
+static struct at91_usbh_data __initdata ek_usbh_data = {
+ .ports = 2,
+};
+
+static struct at91_udc_data __initdata ek_udc_data = {
+ .vbus_pin = AT91_PIN_PD4,
+ .pullup_pin = AT91_PIN_PD5,
+};
+
+static struct at91_mmc_data __initdata ek_mmc_data = {
+ .det_pin = AT91_PIN_PB27,
+ .is_b = 0,
+ .wire4 = 1,
+ .wp_pin = AT91_PIN_PA17,
+};
+
+static void __init ek_board_init(void)
+{
+ /* Ethernet */
+ at91_add_device_eth(&ek_eth_data);
+ /* USB Host */
+ at91_add_device_usbh(&ek_usbh_data);
+ /* USB Device */
+ at91_add_device_udc(&ek_udc_data);
+ /* MMC */
+ at91_set_gpio_output(AT91_PIN_PB22, 1); /* this MMC card slot can optionally use SPI signaling (CS3). default: MMC */
+ at91_add_device_mmc(&ek_mmc_data);
+ /* VGA */
+// ek_add_device_video();
+}
+
+MACHINE_START(AT91RM9200EK, "Atmel AT91RM9200-EK")
+ /* Maintainer: SAN People/Atmel */
+ .phys_ram = AT91_SDRAM_BASE,
+ .phys_io = AT91_BASE_SYS,
+ .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+ .boot_params = AT91_SDRAM_BASE + 0x100,
+ .timer = &at91rm9200_timer,
+ .map_io = ek_map_io,
+ .init_irq = ek_init_irq,
+ .init_machine = ek_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91rm9200/clock.c b/arch/arm/mach-at91rm9200/clock.c
new file mode 100644
index 00000000000..ec8195a2a3c
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/clock.c
@@ -0,0 +1,620 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/clock.c
+ *
+ * Copyright (C) 2005 David Brownell
+ * Copyright (C) 2005 Ivan Kokshaysky
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+
+#include <asm/semaphore.h>
+#include <asm/io.h>
+#include <asm/mach-types.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/board.h> /* for master clock global */
+
+#include "generic.h"
+
+#undef DEBUG
+
+/*
+ * There's a lot more which can be done with clocks, including cpufreq
+ * integration, slow clock mode support (for system suspend), letting
+ * PLLB be used at other rates (on boards that don't need USB), etc.
+ */
+
+struct clk {
+ const char *name;
+ unsigned long rate_hz;
+ struct clk *parent;
+ u32 pmc_mask;
+ void (*mode)(struct clk *, int);
+ unsigned id:2; /* PCK0..3, or 32k/main/a/b */
+ unsigned primary:1;
+ unsigned pll:1;
+ unsigned programmable:1;
+ u16 users;
+};
+
+static spinlock_t clk_lock;
+static u32 at91_pllb_usb_init;
+
+/*
+ * Four primary clock sources: two crystal oscillators (32K, main), and
+ * two PLLs. PLLA usually runs the master clock; and PLLB must run at
+ * 48 MHz (unless no USB function clocks are needed). The main clock and
+ * both PLLs are turned off to run in "slow clock mode" (system suspend).
+ */
+static struct clk clk32k = {
+ .name = "clk32k",
+ .rate_hz = AT91_SLOW_CLOCK,
+ .users = 1, /* always on */
+ .id = 0,
+ .primary = 1,
+};
+static struct clk main_clk = {
+ .name = "main",
+ .pmc_mask = 1 << 0, /* in PMC_SR */
+ .users = 1,
+ .id = 1,
+ .primary = 1,
+};
+static struct clk plla = {
+ .name = "plla",
+ .parent = &main_clk,
+ .pmc_mask = 1 << 1, /* in PMC_SR */
+ .id = 2,
+ .primary = 1,
+ .pll = 1,
+};
+
+static void pllb_mode(struct clk *clk, int is_on)
+{
+ u32 value;
+
+ if (is_on) {
+ is_on = AT91_PMC_LOCKB;
+ value = at91_pllb_usb_init;
+ } else
+ value = 0;
+
+ at91_sys_write(AT91_CKGR_PLLBR, value);
+
+ do {
+ cpu_relax();
+ } while ((at91_sys_read(AT91_PMC_SR) & AT91_PMC_LOCKB) != is_on);
+}
+
+static struct clk pllb = {
+ .name = "pllb",
+ .parent = &main_clk,
+ .pmc_mask = 1 << 2, /* in PMC_SR */
+ .mode = pllb_mode,
+ .id = 3,
+ .primary = 1,
+ .pll = 1,
+};
+
+static void pmc_sys_mode(struct clk *clk, int is_on)
+{
+ if (is_on)
+ at91_sys_write(AT91_PMC_SCER, clk->pmc_mask);
+ else
+ at91_sys_write(AT91_PMC_SCDR, clk->pmc_mask);
+}
+
+/* USB function clocks (PLLB must be 48 MHz) */
+static struct clk udpck = {
+ .name = "udpck",
+ .parent = &pllb,
+ .pmc_mask = AT91_PMC_UDP,
+ .mode = pmc_sys_mode,
+};
+static struct clk uhpck = {
+ .name = "uhpck",
+ .parent = &pllb,
+ .pmc_mask = AT91_PMC_UHP,
+ .mode = pmc_sys_mode,
+};
+
+#ifdef CONFIG_AT91_PROGRAMMABLE_CLOCKS
+/*
+ * The four programmable clocks can be parented by any primary clock.
+ * You must configure pin multiplexing to bring these signals out.
+ */
+static struct clk pck0 = {
+ .name = "pck0",
+ .pmc_mask = AT91_PMC_PCK0,
+ .mode = pmc_sys_mode,
+ .programmable = 1,
+ .id = 0,
+};
+static struct clk pck1 = {
+ .name = "pck1",
+ .pmc_mask = AT91_PMC_PCK1,
+ .mode = pmc_sys_mode,
+ .programmable = 1,
+ .id = 1,
+};
+static struct clk pck2 = {
+ .name = "pck2",
+ .pmc_mask = AT91_PMC_PCK2,
+ .mode = pmc_sys_mode,
+ .programmable = 1,
+ .id = 2,
+};
+static struct clk pck3 = {
+ .name = "pck3",
+ .pmc_mask = AT91_PMC_PCK3,
+ .mode = pmc_sys_mode,
+ .programmable = 1,
+ .id = 3,
+};
+#endif /* CONFIG_AT91_PROGRAMMABLE_CLOCKS */
+
+
+/*
+ * The master clock is divided from the CPU clock (by 1-4). It's used for
+ * memory, interfaces to on-chip peripherals, the AIC, and sometimes more
+ * (e.g baud rate generation). It's sourced from one of the primary clocks.
+ */
+static struct clk mck = {
+ .name = "mck",
+ .pmc_mask = 1 << 3, /* in PMC_SR */
+ .users = 1, /* (must be) always on */
+};
+
+static void pmc_periph_mode(struct clk *clk, int is_on)
+{
+ if (is_on)
+ at91_sys_write(AT91_PMC_PCER, clk->pmc_mask);
+ else
+ at91_sys_write(AT91_PMC_PCDR, clk->pmc_mask);
+}
+
+static struct clk udc_clk = {
+ .name = "udc_clk",
+ .parent = &mck,
+ .pmc_mask = 1 << AT91_ID_UDP,
+ .mode = pmc_periph_mode,
+};
+static struct clk ohci_clk = {
+ .name = "ohci_clk",
+ .parent = &mck,
+ .pmc_mask = 1 << AT91_ID_UHP,
+ .mode = pmc_periph_mode,
+};
+
+static struct clk *const clock_list[] = {
+ /* four primary clocks -- MUST BE FIRST! */
+ &clk32k,
+ &main_clk,
+ &plla,
+ &pllb,
+
+ /* PLLB children (USB) */
+ &udpck,
+ &uhpck,
+
+#ifdef CONFIG_AT91_PROGRAMMABLE_CLOCKS
+ /* programmable clocks */
+ &pck0,
+ &pck1,
+ &pck2,
+ &pck3,
+#endif /* CONFIG_AT91_PROGRAMMABLE_CLOCKS */
+
+ /* MCK and peripherals */
+ &mck,
+ // usart0..usart3
+ // mmc
+ &udc_clk,
+ // i2c
+ // spi
+ // ssc0..ssc2
+ // tc0..tc5
+ &ohci_clk,
+ // ether
+};
+
+
+/* clocks are all static for now; no refcounting necessary */
+struct clk *clk_get(struct device *dev, const char *id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clock_list); i++) {
+ if (strcmp(id, clock_list[i]->name) == 0)
+ return clock_list[i];
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(clk_get);
+
+void clk_put(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_put);
+
+static void __clk_enable(struct clk *clk)
+{
+ if (clk->parent)
+ __clk_enable(clk->parent);
+ if (clk->users++ == 0 && clk->mode)
+ clk->mode(clk, 1);
+}
+
+int clk_enable(struct clk *clk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clk_lock, flags);
+ __clk_enable(clk);
+ spin_unlock_irqrestore(&clk_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+static void __clk_disable(struct clk *clk)
+{
+ BUG_ON(clk->users == 0);
+ if (--clk->users == 0 && clk->mode)
+ clk->mode(clk, 0);
+ if (clk->parent)
+ __clk_disable(clk->parent);
+}
+
+void clk_disable(struct clk *clk)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clk_lock, flags);
+ __clk_disable(clk);
+ spin_unlock_irqrestore(&clk_lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ unsigned long flags;
+ unsigned long rate;
+
+ spin_lock_irqsave(&clk_lock, flags);
+ for (;;) {
+ rate = clk->rate_hz;
+ if (rate || !clk->parent)
+ break;
+ clk = clk->parent;
+ }
+ spin_unlock_irqrestore(&clk_lock, flags);
+ return rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+/*------------------------------------------------------------------------*/
+
+#ifdef CONFIG_AT91_PROGRAMMABLE_CLOCKS
+
+/*
+ * For now, only the programmable clocks support reparenting (MCK could
+ * do this too, with care) or rate changing (the PLLs could do this too,
+ * ditto MCK but that's more for cpufreq). Drivers may reparent to get
+ * a better rate match; we don't.
+ */
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned long flags;
+ unsigned prescale;
+ unsigned long actual;
+
+ if (!clk->programmable)
+ return -EINVAL;
+ spin_lock_irqsave(&clk_lock, flags);
+
+ actual = clk->parent->rate_hz;
+ for (prescale = 0; prescale < 7; prescale++) {
+ if (actual && actual <= rate)
+ break;
+ actual >>= 1;
+ }
+
+ spin_unlock_irqrestore(&clk_lock, flags);
+ return (prescale < 7) ? actual : -ENOENT;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned long flags;
+ unsigned prescale;
+ unsigned long actual;
+
+ if (!clk->programmable)
+ return -EINVAL;
+ if (clk->users)
+ return -EBUSY;
+ spin_lock_irqsave(&clk_lock, flags);
+
+ actual = clk->parent->rate_hz;
+ for (prescale = 0; prescale < 7; prescale++) {
+ if (actual && actual <= rate) {
+ u32 pckr;
+
+ pckr = at91_sys_read(AT91_PMC_PCKR(clk->id));
+ pckr &= 0x03;
+ pckr |= prescale << 2;
+ at91_sys_write(AT91_PMC_PCKR(clk->id), pckr);
+ clk->rate_hz = actual;
+ break;
+ }
+ actual >>= 1;
+ }
+
+ spin_unlock_irqrestore(&clk_lock, flags);
+ return (prescale < 7) ? actual : -ENOENT;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ return clk->parent;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ unsigned long flags;
+
+ if (clk->users)
+ return -EBUSY;
+ if (!parent->primary || !clk->programmable)
+ return -EINVAL;
+ spin_lock_irqsave(&clk_lock, flags);
+
+ clk->rate_hz = parent->rate_hz;
+ clk->parent = parent;
+ at91_sys_write(AT91_PMC_PCKR(clk->id), parent->id);
+
+ spin_unlock_irqrestore(&clk_lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+#endif /* CONFIG_AT91_PROGRAMMABLE_CLOCKS */
+
+/*------------------------------------------------------------------------*/
+
+#ifdef CONFIG_DEBUG_FS
+
+static int at91_clk_show(struct seq_file *s, void *unused)
+{
+ u32 scsr, pcsr, sr;
+ unsigned i;
+
+ seq_printf(s, "SCSR = %8x\n", scsr = at91_sys_read(AT91_PMC_SCSR));
+ seq_printf(s, "PCSR = %8x\n", pcsr = at91_sys_read(AT91_PMC_PCSR));
+
+ seq_printf(s, "MOR = %8x\n", at91_sys_read(AT91_CKGR_MOR));
+ seq_printf(s, "MCFR = %8x\n", at91_sys_read(AT91_CKGR_MCFR));
+ seq_printf(s, "PLLA = %8x\n", at91_sys_read(AT91_CKGR_PLLAR));
+ seq_printf(s, "PLLB = %8x\n", at91_sys_read(AT91_CKGR_PLLBR));
+
+ seq_printf(s, "MCKR = %8x\n", at91_sys_read(AT91_PMC_MCKR));
+ for (i = 0; i < 4; i++)
+ seq_printf(s, "PCK%d = %8x\n", i, at91_sys_read(AT91_PMC_PCKR(i)));
+ seq_printf(s, "SR = %8x\n", sr = at91_sys_read(AT91_PMC_SR));
+
+ seq_printf(s, "\n");
+
+ for (i = 0; i < ARRAY_SIZE(clock_list); i++) {
+ char *state;
+ struct clk *clk = clock_list[i];
+
+ if (clk->mode == pmc_sys_mode)
+ state = (scsr & clk->pmc_mask) ? "on" : "off";
+ else if (clk->mode == pmc_periph_mode)
+ state = (pcsr & clk->pmc_mask) ? "on" : "off";
+ else if (clk->pmc_mask)
+ state = (sr & clk->pmc_mask) ? "on" : "off";
+ else if (clk == &clk32k || clk == &main_clk)
+ state = "on";
+ else
+ state = "";
+
+ seq_printf(s, "%-10s users=%d %-3s %9ld Hz %s\n",
+ clk->name, clk->users, state, clk_get_rate(clk),
+ clk->parent ? clk->parent->name : "");
+ }
+ return 0;
+}
+
+static int at91_clk_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, at91_clk_show, NULL);
+}
+
+static struct file_operations at91_clk_operations = {
+ .open = at91_clk_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init at91_clk_debugfs_init(void)
+{
+ /* /sys/kernel/debug/at91_clk */
+ (void) debugfs_create_file("at91_clk", S_IFREG | S_IRUGO, NULL, NULL, &at91_clk_operations);
+
+ return 0;
+}
+postcore_initcall(at91_clk_debugfs_init);
+
+#endif
+
+/*------------------------------------------------------------------------*/
+
+static u32 __init at91_pll_rate(struct clk *pll, u32 freq, u32 reg)
+{
+ unsigned mul, div;
+
+ div = reg & 0xff;
+ mul = (reg >> 16) & 0x7ff;
+ if (div && mul) {
+ freq /= div;
+ freq *= mul + 1;
+ } else
+ freq = 0;
+ if (pll == &pllb && (reg & (1 << 28)))
+ freq /= 2;
+ return freq;
+}
+
+static unsigned __init at91_pll_calc(unsigned main_freq, unsigned out_freq)
+{
+ unsigned i, div = 0, mul = 0, diff = 1 << 30;
+ unsigned ret = (out_freq > 155000000) ? 0xbe00 : 0x3e00;
+
+ /* PLL output max 240 MHz (or 180 MHz per errata) */
+ if (out_freq > 240000000)
+ goto fail;
+
+ for (i = 1; i < 256; i++) {
+ int diff1;
+ unsigned input, mul1;
+
+ /*
+ * PLL input between 1MHz and 32MHz per spec, but lower
+ * frequences seem necessary in some cases so allow 100K.
+ */
+ input = main_freq / i;
+ if (input < 100000)
+ continue;
+ if (input > 32000000)
+ continue;
+
+ mul1 = out_freq / input;
+ if (mul1 > 2048)
+ continue;
+ if (mul1 < 2)
+ goto fail;
+
+ diff1 = out_freq - input * mul1;
+ if (diff1 < 0)
+ diff1 = -diff1;
+ if (diff > diff1) {
+ diff = diff1;
+ div = i;
+ mul = mul1;
+ if (diff == 0)
+ break;
+ }
+ }
+ if (i == 256 && diff > (out_freq >> 5))
+ goto fail;
+ return ret | ((mul - 1) << 16) | div;
+fail:
+ return 0;
+}
+
+int __init at91_clock_init(unsigned long main_clock)
+{
+ unsigned tmp, freq, mckr;
+
+ spin_lock_init(&clk_lock);
+
+ /*
+ * When the bootloader initialized the main oscillator correctly,
+ * there's no problem using the cycle counter. But if it didn't,
+ * or when using oscillator bypass mode, we must be told the speed
+ * of the main clock.
+ */
+ if (!main_clock) {
+ do {
+ tmp = at91_sys_read(AT91_CKGR_MCFR);
+ } while (!(tmp & 0x10000));
+ main_clock = (tmp & 0xffff) * (AT91_SLOW_CLOCK / 16);
+ }
+ main_clk.rate_hz = main_clock;
+
+ /* report if PLLA is more than mildly overclocked */
+ plla.rate_hz = at91_pll_rate(&plla, main_clock, at91_sys_read(AT91_CKGR_PLLAR));
+ if (plla.rate_hz > 209000000)
+ pr_info("Clocks: PLLA overclocked, %ld MHz\n", plla.rate_hz / 1000000);
+
+ /*
+ * USB clock init: choose 48 MHz PLLB value, turn all clocks off,
+ * disable 48MHz clock during usb peripheral suspend.
+ *
+ * REVISIT: assumes MCK doesn't derive from PLLB!
+ */
+ at91_pllb_usb_init = at91_pll_calc(main_clock, 48000000 * 2) | 0x10000000;
+ pllb.rate_hz = at91_pll_rate(&pllb, main_clock, at91_pllb_usb_init);
+ at91_sys_write(AT91_PMC_PCDR, (1 << AT91_ID_UHP) | (1 << AT91_ID_UDP));
+ at91_sys_write(AT91_PMC_SCDR, AT91_PMC_UHP | AT91_PMC_UDP);
+ at91_sys_write(AT91_CKGR_PLLBR, 0);
+ at91_sys_write(AT91_PMC_SCER, AT91_PMC_MCKUDP);
+
+ /*
+ * MCK and CPU derive from one of those primary clocks.
+ * For now, assume this parentage won't change.
+ */
+ mckr = at91_sys_read(AT91_PMC_MCKR);
+ mck.parent = clock_list[mckr & AT91_PMC_CSS];
+ mck.parent->users++;
+ freq = mck.parent->rate_hz;
+ freq /= (1 << ((mckr >> 2) & 3)); /* prescale */
+ mck.rate_hz = freq / (1 + ((mckr >> 8) & 3)); /* mdiv */
+
+ printk("Clocks: CPU %u MHz, master %u MHz, main %u.%03u MHz\n",
+ freq / 1000000, (unsigned) mck.rate_hz / 1000000,
+ (unsigned) main_clock / 1000000,
+ ((unsigned) main_clock % 1000000) / 1000);
+
+ /* FIXME get rid of master_clock global */
+ at91_master_clock = mck.rate_hz;
+
+#ifdef CONFIG_AT91_PROGRAMMABLE_CLOCKS
+ /* establish PCK0..PCK3 parentage */
+ for (tmp = 0; tmp < ARRAY_SIZE(clock_list); tmp++) {
+ struct clk *clk = clock_list[tmp], *parent;
+ u32 pckr;
+
+ if (!clk->programmable)
+ continue;
+
+ pckr = at91_sys_read(AT91_PMC_PCKR(clk->id));
+ parent = clock_list[pckr & 3];
+ clk->parent = parent;
+ clk->rate_hz = parent->rate_hz / (1 << ((pckr >> 2) & 3));
+ }
+#else
+ /* disable unused clocks */
+ at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK0 | AT91_PMC_PCK1 | AT91_PMC_PCK2 | AT91_PMC_PCK3);
+#endif /* CONFIG_AT91_PROGRAMMABLE_CLOCKS */
+
+ /* FIXME several unused clocks may still be active... provide
+ * a CONFIG option to turn off all unused clocks at some point
+ * before driver init starts.
+ */
+
+ return 0;
+}
diff --git a/arch/arm/mach-at91rm9200/common.c b/arch/arm/mach-at91rm9200/common.c
new file mode 100644
index 00000000000..3848fd2d559
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/common.c
@@ -0,0 +1,115 @@
+/*
+ * arch/arm/mach-at91rm9200/common.c
+ *
+ * Copyright (C) 2005 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <asm/arch/hardware.h>
+
+static struct map_desc at91rm9200_io_desc[] __initdata = {
+ {
+ .virtual = AT91_VA_BASE_SYS,
+ .pfn = __phys_to_pfn(AT91_BASE_SYS),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_SPI,
+ .pfn = __phys_to_pfn(AT91_BASE_SPI),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_SSC2,
+ .pfn = __phys_to_pfn(AT91_BASE_SSC2),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_SSC1,
+ .pfn = __phys_to_pfn(AT91_BASE_SSC1),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_SSC0,
+ .pfn = __phys_to_pfn(AT91_BASE_SSC0),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_US3,
+ .pfn = __phys_to_pfn(AT91_BASE_US3),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_US2,
+ .pfn = __phys_to_pfn(AT91_BASE_US2),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_US1,
+ .pfn = __phys_to_pfn(AT91_BASE_US1),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_US0,
+ .pfn = __phys_to_pfn(AT91_BASE_US0),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_EMAC,
+ .pfn = __phys_to_pfn(AT91_BASE_EMAC),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_TWI,
+ .pfn = __phys_to_pfn(AT91_BASE_TWI),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_MCI,
+ .pfn = __phys_to_pfn(AT91_BASE_MCI),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_UDP,
+ .pfn = __phys_to_pfn(AT91_BASE_UDP),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_TCB1,
+ .pfn = __phys_to_pfn(AT91_BASE_TCB1),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = AT91_VA_BASE_TCB0,
+ .pfn = __phys_to_pfn(AT91_BASE_TCB0),
+ .length = SZ_16K,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init at91rm9200_map_io(void)
+{
+ iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc));
+}
+
+
+unsigned long at91_master_clock;
+
+EXPORT_SYMBOL(at91_master_clock);
+
+
+int at91_serial_map[AT91_NR_UART];
+int at91_console_port;
+
+EXPORT_SYMBOL(at91_serial_map);
+EXPORT_SYMBOL(at91_console_port);
diff --git a/arch/arm/mach-at91rm9200/devices.c b/arch/arm/mach-at91rm9200/devices.c
new file mode 100644
index 00000000000..8df3e524565
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/devices.c
@@ -0,0 +1,291 @@
+/*
+ * arch/arm/mach-at91rm9200/devices.c
+ *
+ * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
+ * Copyright (C) 2005 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <linux/config.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/board.h>
+#include <asm/arch/pio.h>
+
+
+/* --------------------------------------------------------------------
+ * USB Host
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+static u64 ohci_dmamask = 0xffffffffUL;
+static struct at91_usbh_data usbh_data;
+
+static struct resource at91rm9200_usbh_resource[] = {
+ [0] = {
+ .start = AT91_UHP_BASE,
+ .end = AT91_UHP_BASE + SZ_1M -1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AT91_ID_UHP,
+ .end = AT91_ID_UHP,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device at91rm9200_usbh_device = {
+ .name = "at91rm9200-ohci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &ohci_dmamask,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &usbh_data,
+ },
+ .resource = at91rm9200_usbh_resource,
+ .num_resources = ARRAY_SIZE(at91rm9200_usbh_resource),
+};
+
+void __init at91_add_device_usbh(struct at91_usbh_data *data)
+{
+ if (!data)
+ return;
+
+ usbh_data = *data;
+ platform_device_register(&at91rm9200_usbh_device);
+}
+#else
+void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * USB Device (Gadget)
+ * -------------------------------------------------------------------- */
+
+#ifdef CONFIG_USB_GADGET_AT91
+static struct at91_udc_data udc_data;
+
+static struct resource at91_udc_resources[] = {
+ {
+ .start = AT91_BASE_UDP,
+ .end = AT91_BASE_UDP + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device at91rm9200_udc_device = {
+ .name = "at91_udc",
+ .id = -1,
+ .dev = {
+ .platform_data = &udc_data,
+ },
+ .resource = at91_udc_resources,
+ .num_resources = ARRAY_SIZE(at91_udc_resources),
+};
+
+void __init at91_add_device_udc(struct at91_udc_data *data)
+{
+ if (!data)
+ return;
+
+ if (data->vbus_pin) {
+ at91_set_gpio_input(data->vbus_pin, 0);
+ at91_set_deglitch(data->vbus_pin, 1);
+ }
+ if (data->pullup_pin)
+ at91_set_gpio_output(data->pullup_pin, 0);
+
+ udc_data = *data;
+ platform_device_register(&at91rm9200_udc_device);
+}
+#else
+void __init at91_add_device_udc(struct at91_udc_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * Ethernet
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_ARM_AT91_ETHER) || defined(CONFIG_ARM_AT91_ETHER_MODULE)
+static u64 eth_dmamask = 0xffffffffUL;
+static struct at91_eth_data eth_data;
+
+static struct platform_device at91rm9200_eth_device = {
+ .name = "at91_ether",
+ .id = -1,
+ .dev = {
+ .dma_mask = &eth_dmamask,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &eth_data,
+ },
+ .num_resources = 0,
+};
+
+void __init at91_add_device_eth(struct at91_eth_data *data)
+{
+ if (!data)
+ return;
+
+ if (data->phy_irq_pin) {
+ at91_set_gpio_input(data->phy_irq_pin, 0);
+ at91_set_deglitch(data->phy_irq_pin, 1);
+ }
+
+ /* Pins used for MII and RMII */
+ at91_set_A_periph(AT91_PIN_PA16, 0); /* EMDIO */
+ at91_set_A_periph(AT91_PIN_PA15, 0); /* EMDC */
+ at91_set_A_periph(AT91_PIN_PA14, 0); /* ERXER */
+ at91_set_A_periph(AT91_PIN_PA13, 0); /* ERX1 */
+ at91_set_A_periph(AT91_PIN_PA12, 0); /* ERX0 */
+ at91_set_A_periph(AT91_PIN_PA11, 0); /* ECRS_ECRSDV */
+ at91_set_A_periph(AT91_PIN_PA10, 0); /* ETX1 */
+ at91_set_A_periph(AT91_PIN_PA9, 0); /* ETX0 */
+ at91_set_A_periph(AT91_PIN_PA8, 0); /* ETXEN */
+ at91_set_A_periph(AT91_PIN_PA7, 0); /* ETXCK_EREFCK */
+
+ if (!data->is_rmii) {
+ at91_set_B_periph(AT91_PIN_PB19, 0); /* ERXCK */
+ at91_set_B_periph(AT91_PIN_PB18, 0); /* ECOL */
+ at91_set_B_periph(AT91_PIN_PB17, 0); /* ERXDV */
+ at91_set_B_periph(AT91_PIN_PB16, 0); /* ERX3 */
+ at91_set_B_periph(AT91_PIN_PB15, 0); /* ERX2 */
+ at91_set_B_periph(AT91_PIN_PB14, 0); /* ETXER */
+ at91_set_B_periph(AT91_PIN_PB13, 0); /* ETX3 */
+ at91_set_B_periph(AT91_PIN_PB12, 0); /* ETX2 */
+ }
+
+ eth_data = *data;
+ platform_device_register(&at91rm9200_eth_device);
+}
+#else
+void __init at91_add_device_eth(struct at91_eth_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * Compact Flash / PCMCIA
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE)
+static struct at91_cf_data cf_data;
+
+static struct platform_device at91rm9200_cf_device = {
+ .name = "at91_cf",
+ .id = -1,
+ .dev = {
+ .platform_data = &cf_data,
+ },
+ .num_resources = 0,
+};
+
+void __init at91_add_device_cf(struct at91_cf_data *data)
+{
+ if (!data)
+ return;
+
+ /* input/irq */
+ if (data->irq_pin) {
+ at91_set_gpio_input(data->irq_pin, 1);
+ at91_set_deglitch(data->irq_pin, 1);
+ }
+ at91_set_gpio_input(data->det_pin, 1);
+ at91_set_deglitch(data->det_pin, 1);
+
+ /* outputs, initially off */
+ if (data->vcc_pin)
+ at91_set_gpio_output(data->vcc_pin, 0);
+ at91_set_gpio_output(data->rst_pin, 0);
+
+ cf_data = *data;
+ platform_device_register(&at91rm9200_cf_device);
+}
+#else
+void __init at91_add_device_cf(struct at91_cf_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ * MMC / SD
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_MMC_AT91RM9200) || defined(CONFIG_MMC_AT91RM9200_MODULE)
+static u64 mmc_dmamask = 0xffffffffUL;
+static struct at91_mmc_data mmc_data;
+
+static struct resource at91_mmc_resources[] = {
+ {
+ .start = AT91_BASE_MCI,
+ .end = AT91_BASE_MCI + SZ_16K - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device at91rm9200_mmc_device = {
+ .name = "at91rm9200_mci",
+ .id = -1,
+ .dev = {
+ .dma_mask = &mmc_dmamask,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &mmc_data,
+ },
+ .resource = at91_mmc_resources,
+ .num_resources = ARRAY_SIZE(at91_mmc_resources),
+};
+
+void __init at91_add_device_mmc(struct at91_mmc_data *data)
+{
+ if (!data)
+ return;
+
+ /* input/irq */
+ if (data->det_pin) {
+ at91_set_gpio_input(data->det_pin, 1);
+ at91_set_deglitch(data->det_pin, 1);
+ }
+ if (data->wp_pin)
+ at91_set_gpio_input(data->wp_pin, 1);
+
+ /* CLK */
+ at91_set_A_periph(AT91_PIN_PA27, 0);
+
+ if (data->is_b) {
+ /* CMD */
+ at91_set_B_periph(AT91_PIN_PA8, 0);
+
+ /* DAT0, maybe DAT1..DAT3 */
+ at91_set_B_periph(AT91_PIN_PA9, 0);
+ if (data->wire4) {
+ at91_set_B_periph(AT91_PIN_PA10, 0);
+ at91_set_B_periph(AT91_PIN_PA11, 0);
+ at91_set_B_periph(AT91_PIN_PA12, 0);
+ }
+ } else {
+ /* CMD */
+ at91_set_A_periph(AT91_PIN_PA28, 0);
+
+ /* DAT0, maybe DAT1..DAT3 */
+ at91_set_A_periph(AT91_PIN_PA29, 0);
+ if (data->wire4) {
+ at91_set_B_periph(AT91_PIN_PB3, 0);
+ at91_set_B_periph(AT91_PIN_PB4, 0);
+ at91_set_B_periph(AT91_PIN_PB5, 0);
+ }
+ }
+
+ mmc_data = *data;
+ platform_device_register(&at91rm9200_mmc_device);
+}
+#else
+void __init at91_add_device_mmc(struct at91_mmc_data *data) {}
+#endif
+
+/* -------------------------------------------------------------------- */
diff --git a/arch/arm/mach-at91rm9200/generic.h b/arch/arm/mach-at91rm9200/generic.h
new file mode 100644
index 00000000000..9bd541eba0a
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/generic.h
@@ -0,0 +1,18 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/generic.h
+ *
+ * Copyright (C) 2005 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+void at91_gpio_irq_setup(unsigned banks);
+
+struct sys_timer;
+extern struct sys_timer at91rm9200_timer;
+
+extern void __init at91rm9200_map_io(void);
+
+extern int __init at91_clock_init(unsigned long main_clock);
diff --git a/arch/arm/mach-at91rm9200/gpio.c b/arch/arm/mach-at91rm9200/gpio.c
new file mode 100644
index 00000000000..2fd2ef583e4
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/gpio.c
@@ -0,0 +1,302 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/gpio.c
+ *
+ * Copyright (C) 2005 HP Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/mach/irq.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/gpio.h>
+
+static const u32 pio_controller_offset[4] = {
+ AT91_PIOA,
+ AT91_PIOB,
+ AT91_PIOC,
+ AT91_PIOD,
+};
+
+static inline void __iomem *pin_to_controller(unsigned pin)
+{
+ void __iomem *sys_base = (void __iomem *) AT91_VA_BASE_SYS;
+
+ pin -= PIN_BASE;
+ pin /= 32;
+ if (likely(pin < BGA_GPIO_BANKS))
+ return sys_base + pio_controller_offset[pin];
+
+ return NULL;
+}
+
+static inline unsigned pin_to_mask(unsigned pin)
+{
+ pin -= PIN_BASE;
+ return 1 << (pin % 32);
+}
+
+
+/*--------------------------------------------------------------------------*/
+
+/* Not all hardware capabilities are exposed through these calls; they
+ * only encapsulate the most common features and modes. (So if you
+ * want to change signals in groups, do it directly.)
+ *
+ * Bootloaders will usually handle some of the pin multiplexing setup.
+ * The intent is certainly that by the time Linux is fully booted, all
+ * pins should have been fully initialized. These setup calls should
+ * only be used by board setup routines, or possibly in driver probe().
+ *
+ * For bootloaders doing all that setup, these calls could be inlined
+ * as NOPs so Linux won't duplicate any setup code
+ */
+
+
+/*
+ * mux the pin to the "A" internal peripheral role.
+ */
+int __init_or_module at91_set_A_periph(unsigned pin, int use_pullup)
+{
+ void __iomem *pio = pin_to_controller(pin);
+ unsigned mask = pin_to_mask(pin);
+
+ if (!pio)
+ return -EINVAL;
+
+ __raw_writel(mask, pio + PIO_IDR);
+ __raw_writel(mask, pio + (use_pullup ? PIO_PUER : PIO_PUDR));
+ __raw_writel(mask, pio + PIO_ASR);
+ __raw_writel(mask, pio + PIO_PDR);
+ return 0;
+}
+EXPORT_SYMBOL(at91_set_A_periph);
+
+
+/*
+ * mux the pin to the "B" internal peripheral role.
+ */
+int __init_or_module at91_set_B_periph(unsigned pin, int use_pullup)
+{
+ void __iomem *pio = pin_to_controller(pin);
+ unsigned mask = pin_to_mask(pin);
+
+ if (!pio)
+ return -EINVAL;
+
+ __raw_writel(mask, pio + PIO_IDR);
+ __raw_writel(mask, pio + (use_pullup ? PIO_PUER : PIO_PUDR));
+ __raw_writel(mask, pio + PIO_BSR);
+ __raw_writel(mask, pio + PIO_PDR);
+ return 0;
+}
+EXPORT_SYMBOL(at91_set_B_periph);
+
+
+/*
+ * mux the pin to the gpio controller (instead of "A" or "B" peripheral), and
+ * configure it for an input.
+ */
+int __init_or_module at91_set_gpio_input(unsigned pin, int use_pullup)
+{
+ void __iomem *pio = pin_to_controller(pin);
+ unsigned mask = pin_to_mask(pin);
+
+ if (!pio)
+ return -EINVAL;
+
+ __raw_writel(mask, pio + PIO_IDR);
+ __raw_writel(mask, pio + (use_pullup ? PIO_PUER : PIO_PUDR));
+ __raw_writel(mask, pio + PIO_ODR);
+ __raw_writel(mask, pio + PIO_PER);
+ return 0;
+}
+EXPORT_SYMBOL(at91_set_gpio_input);
+
+
+/*
+ * mux the pin to the gpio controller (instead of "A" or "B" peripheral),
+ * and configure it for an output.
+ */
+int __init_or_module at91_set_gpio_output(unsigned pin, int value)
+{
+ void __iomem *pio = pin_to_controller(pin);
+ unsigned mask = pin_to_mask(pin);
+
+ if (!pio)
+ return -EINVAL;
+
+ __raw_writel(mask, pio + PIO_IDR);
+ __raw_writel(mask, pio + PIO_PUDR);
+ __raw_writel(mask, pio + (value ? PIO_SODR : PIO_CODR));
+ __raw_writel(mask, pio + PIO_OER);
+ __raw_writel(mask, pio + PIO_PER);
+ return 0;
+}
+EXPORT_SYMBOL(at91_set_gpio_output);
+
+
+/*
+ * enable/disable the glitch filter; mostly used with IRQ handling.
+ */
+int __init_or_module at91_set_deglitch(unsigned pin, int is_on)
+{
+ void __iomem *pio = pin_to_controller(pin);
+ unsigned mask = pin_to_mask(pin);
+
+ if (!pio)
+ return -EINVAL;
+ __raw_writel(mask, pio + (is_on ? PIO_IFER : PIO_IFDR));
+ return 0;
+}
+EXPORT_SYMBOL(at91_set_deglitch);
+
+/*--------------------------------------------------------------------------*/
+
+
+/*
+ * assuming the pin is muxed as a gpio output, set its value.
+ */
+int at91_set_gpio_value(unsigned pin, int value)
+{
+ void __iomem *pio = pin_to_controller(pin);
+ unsigned mask = pin_to_mask(pin);
+
+ if (!pio)
+ return -EINVAL;
+ __raw_writel(mask, pio + (value ? PIO_SODR : PIO_CODR));
+ return 0;
+}
+EXPORT_SYMBOL(at91_set_gpio_value);
+
+
+/*
+ * read the pin's value (works even if it's not muxed as a gpio).
+ */
+int at91_get_gpio_value(unsigned pin)
+{
+ void __iomem *pio = pin_to_controller(pin);
+ unsigned mask = pin_to_mask(pin);
+ u32 pdsr;
+
+ if (!pio)
+ return -EINVAL;
+ pdsr = __raw_readl(pio + PIO_PDSR);
+ return (pdsr & mask) != 0;
+}
+EXPORT_SYMBOL(at91_get_gpio_value);
+
+/*--------------------------------------------------------------------------*/
+
+
+/* Several AIC controller irqs are dispatched through this GPIO handler.
+ * To use any AT91_PIN_* as an externally triggered IRQ, first call
+ * at91_set_gpio_input() then maybe enable its glitch filter.
+ * Then just request_irq() with the pin ID; it works like any ARM IRQ
+ * handler, though it always triggers on rising and falling edges.
+ *
+ * Alternatively, certain pins may be used directly as IRQ0..IRQ6 after
+ * configuring them with at91_set_a_periph() or at91_set_b_periph().
+ * IRQ0..IRQ6 should be configurable, e.g. level vs edge triggering.
+ */
+
+static void gpio_irq_mask(unsigned pin)
+{
+ void __iomem *pio = pin_to_controller(pin);
+ unsigned mask = pin_to_mask(pin);
+
+ if (pio)
+ __raw_writel(mask, pio + PIO_IDR);
+}
+
+static void gpio_irq_unmask(unsigned pin)
+{
+ void __iomem *pio = pin_to_controller(pin);
+ unsigned mask = pin_to_mask(pin);
+
+ if (pio)
+ __raw_writel(mask, pio + PIO_IER);
+}
+
+static int gpio_irq_type(unsigned pin, unsigned type)
+{
+ return (type == IRQT_BOTHEDGE) ? 0 : -EINVAL;
+}
+
+static struct irqchip gpio_irqchip = {
+ .mask = gpio_irq_mask,
+ .unmask = gpio_irq_unmask,
+ .set_type = gpio_irq_type,
+};
+
+static void gpio_irq_handler(unsigned irq, struct irqdesc *desc, struct pt_regs *regs)
+{
+ unsigned pin;
+ struct irqdesc *gpio;
+ void __iomem *pio;
+ u32 isr;
+
+ pio = (void __force __iomem *) desc->chipdata;
+
+ /* temporarily mask (level sensitive) parent IRQ */
+ desc->chip->ack(irq);
+ for (;;) {
+ isr = __raw_readl(pio + PIO_ISR) & __raw_readl(pio + PIO_IMR);
+ if (!isr)
+ break;
+
+ pin = (unsigned) desc->data;
+ gpio = &irq_desc[pin];
+
+ while (isr) {
+ if (isr & 1)
+ gpio->handle(pin, gpio, regs);
+ pin++;
+ gpio++;
+ isr >>= 1;
+ }
+ }
+ desc->chip->unmask(irq);
+ /* now it may re-trigger */
+}
+
+/* call this from board-specific init_irq */
+void __init at91_gpio_irq_setup(unsigned banks)
+{
+ unsigned pioc, pin, id;
+
+ if (banks > 4)
+ banks = 4;
+ for (pioc = 0, pin = PIN_BASE, id = AT91_ID_PIOA;
+ pioc < banks;
+ pioc++, id++) {
+ void __iomem *controller;
+ unsigned i;
+
+ controller = (void __iomem *) AT91_VA_BASE_SYS + pio_controller_offset[pioc];
+ __raw_writel(~0, controller + PIO_IDR);
+
+ set_irq_data(id, (void *) pin);
+ set_irq_chipdata(id, (void __force *) controller);
+
+ for (i = 0; i < 32; i++, pin++) {
+ set_irq_chip(pin, &gpio_irqchip);
+ set_irq_handler(pin, do_simple_IRQ);
+ set_irq_flags(pin, IRQF_VALID);
+ }
+
+ set_irq_chained_handler(id, gpio_irq_handler);
+
+ /* enable the PIO peripheral clock */
+ at91_sys_write(AT91_PMC_PCER, 1 << id);
+ }
+ pr_info("AT91: %d gpio irqs in %d banks\n", pin - PIN_BASE, banks);
+}
diff --git a/arch/arm/mach-at91rm9200/irq.c b/arch/arm/mach-at91rm9200/irq.c
new file mode 100644
index 00000000000..cb62bc83a1d
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/irq.c
@@ -0,0 +1,170 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/irq.c
+ *
+ * Copyright (C) 2004 SAN People
+ * Copyright (C) 2004 ATMEL
+ * Copyright (C) Rick Bronson
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+
+#include <asm/hardware.h>
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+#include <asm/setup.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/irq.h>
+#include <asm/mach/map.h>
+
+#include "generic.h"
+
+/*
+ * The default interrupt priority levels (0 = lowest, 7 = highest).
+ */
+static unsigned int at91rm9200_default_irq_priority[NR_AIC_IRQS] __initdata = {
+ 7, /* Advanced Interrupt Controller */
+ 7, /* System Peripheral */
+ 0, /* Parallel IO Controller A */
+ 0, /* Parallel IO Controller B */
+ 0, /* Parallel IO Controller C */
+ 0, /* Parallel IO Controller D */
+ 6, /* USART 0 */
+ 6, /* USART 1 */
+ 6, /* USART 2 */
+ 6, /* USART 3 */
+ 0, /* Multimedia Card Interface */
+ 4, /* USB Device Port */
+ 0, /* Two-Wire Interface */
+ 6, /* Serial Peripheral Interface */
+ 5, /* Serial Synchronous Controller */
+ 5, /* Serial Synchronous Controller */
+ 5, /* Serial Synchronous Controller */
+ 0, /* Timer Counter 0 */
+ 0, /* Timer Counter 1 */
+ 0, /* Timer Counter 2 */
+ 0, /* Timer Counter 3 */
+ 0, /* Timer Counter 4 */
+ 0, /* Timer Counter 5 */
+ 3, /* USB Host port */
+ 3, /* Ethernet MAC */
+ 0, /* Advanced Interrupt Controller */
+ 0, /* Advanced Interrupt Controller */
+ 0, /* Advanced Interrupt Controller */
+ 0, /* Advanced Interrupt Controller */
+ 0, /* Advanced Interrupt Controller */
+ 0, /* Advanced Interrupt Controller */
+ 0 /* Advanced Interrupt Controller */
+};
+
+
+static void at91rm9200_mask_irq(unsigned int irq)
+{
+ /* Disable interrupt on AIC */
+ at91_sys_write(AT91_AIC_IDCR, 1 << irq);
+}
+
+static void at91rm9200_unmask_irq(unsigned int irq)
+{
+ /* Enable interrupt on AIC */
+ at91_sys_write(AT91_AIC_IECR, 1 << irq);
+}
+
+static int at91rm9200_irq_type(unsigned irq, unsigned type)
+{
+ unsigned int smr, srctype;
+
+ /* change triggering only for FIQ and external IRQ0..IRQ6 */
+ if ((irq < AT91_ID_IRQ0) && (irq != AT91_ID_FIQ))
+ return -EINVAL;
+
+ switch (type) {
+ case IRQT_HIGH:
+ srctype = AT91_AIC_SRCTYPE_HIGH;
+ break;
+ case IRQT_RISING:
+ srctype = AT91_AIC_SRCTYPE_RISING;
+ break;
+ case IRQT_LOW:
+ srctype = AT91_AIC_SRCTYPE_LOW;
+ break;
+ case IRQT_FALLING:
+ srctype = AT91_AIC_SRCTYPE_FALLING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ smr = at91_sys_read(AT91_AIC_SMR(irq)) & ~AT91_AIC_SRCTYPE;
+ at91_sys_write(AT91_AIC_SMR(irq), smr | srctype);
+ return 0;
+}
+
+static struct irqchip at91rm9200_irq_chip = {
+ .ack = at91rm9200_mask_irq,
+ .mask = at91rm9200_mask_irq,
+ .unmask = at91rm9200_unmask_irq,
+ .set_type = at91rm9200_irq_type,
+};
+
+/*
+ * Initialize the AIC interrupt controller.
+ */
+void __init at91rm9200_init_irq(unsigned int priority[NR_AIC_IRQS])
+{
+ unsigned int i;
+
+ /* No priority list specified for this board -> use defaults */
+ if (priority == NULL)
+ priority = at91rm9200_default_irq_priority;
+
+ /*
+ * The IVR is used by macro get_irqnr_and_base to read and verify.
+ * The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
+ */
+ for (i = 0; i < NR_AIC_IRQS; i++) {
+ /* Put irq number in Source Vector Register: */
+ at91_sys_write(AT91_AIC_SVR(i), i);
+ /* Store the Source Mode Register as defined in table above */
+ at91_sys_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);
+
+ set_irq_chip(i, &at91rm9200_irq_chip);
+ set_irq_handler(i, do_level_IRQ);
+ set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+
+ /* Perform 8 End Of Interrupt Command to make sure AIC will not Lock out nIRQ */
+ if (i < 8)
+ at91_sys_write(AT91_AIC_EOICR, 0);
+ }
+
+ /*
+ * Spurious Interrupt ID in Spurious Vector Register is NR_AIC_IRQS
+ * When there is no current interrupt, the IRQ Vector Register reads the value stored in AIC_SPU
+ */
+ at91_sys_write(AT91_AIC_SPU, NR_AIC_IRQS);
+
+ /* No debugging in AIC: Debug (Protect) Control Register */
+ at91_sys_write(AT91_AIC_DCR, 0);
+
+ /* Disable and clear all interrupts initially */
+ at91_sys_write(AT91_AIC_IDCR, 0xFFFFFFFF);
+ at91_sys_write(AT91_AIC_ICCR, 0xFFFFFFFF);
+}
diff --git a/arch/arm/mach-at91rm9200/time.c b/arch/arm/mach-at91rm9200/time.c
new file mode 100644
index 00000000000..1b6dd2deeb2
--- /dev/null
+++ b/arch/arm/mach-at91rm9200/time.c
@@ -0,0 +1,127 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/time.c
+ *
+ * Copyright (C) 2003 SAN People
+ * Copyright (C) 2003 ATMEL
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mach/time.h>
+
+/*
+ * The ST_CRTR is updated asynchronously to the master clock. It is therefore
+ * necessary to read it twice (with the same value) to ensure accuracy.
+ */
+static inline unsigned long read_CRTR(void) {
+ unsigned long x1, x2;
+
+ do {
+ x1 = at91_sys_read(AT91_ST_CRTR);
+ x2 = at91_sys_read(AT91_ST_CRTR);
+ } while (x1 != x2);
+
+ return x1;
+}
+
+/*
+ * Returns number of microseconds since last timer interrupt. Note that interrupts
+ * will have been disabled by do_gettimeofday()
+ * 'LATCH' is hwclock ticks (see CLOCK_TICK_RATE in timex.h) per jiffy.
+ * 'tick' is usecs per jiffy (linux/timex.h).
+ */
+static unsigned long at91rm9200_gettimeoffset(void)
+{
+ unsigned long elapsed;
+
+ elapsed = (read_CRTR() - at91_sys_read(AT91_ST_RTAR)) & AT91_ST_ALMV;
+
+ return (unsigned long)(elapsed * (tick_nsec / 1000)) / LATCH;
+}
+
+/*
+ * IRQ handler for the timer.
+ */
+static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned long rtar;
+
+ if (at91_sys_read(AT91_ST_SR) & AT91_ST_PITS) { /* This is a shared interrupt */
+ write_seqlock(&xtime_lock);
+
+ do {
+ timer_tick(regs);
+ rtar = (at91_sys_read(AT91_ST_RTAR) + LATCH) & AT91_ST_ALMV;
+ at91_sys_write(AT91_ST_RTAR, rtar);
+ } while (((read_CRTR() - at91_sys_read(AT91_ST_RTAR)) & AT91_ST_ALMV) >= LATCH);
+
+ write_sequnlock(&xtime_lock);
+
+ return IRQ_HANDLED;
+ }
+ else
+ return IRQ_NONE; /* not handled */
+}
+
+static struct irqaction at91rm9200_timer_irq = {
+ .name = "at91_tick",
+ .flags = SA_SHIRQ | SA_INTERRUPT,
+ .handler = at91rm9200_timer_interrupt
+};
+
+/*
+ * Set up timer interrupt.
+ */
+void __init at91rm9200_timer_init(void)
+{
+ /* Disable all timer interrupts */
+ at91_sys_write(AT91_ST_IDR, AT91_ST_PITS | AT91_ST_WDOVF | AT91_ST_RTTINC | AT91_ST_ALMS);
+ (void) at91_sys_read(AT91_ST_SR); /* Clear any pending interrupts */
+
+ /*
+ * Make IRQs happen for the system timer.
+ */
+ setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+
+ /* Set initial alarm to 0 */
+ at91_sys_write(AT91_ST_RTAR, 0);
+
+ /* Real time counter incremented every 30.51758 microseconds */
+ at91_sys_write(AT91_ST_RTMR, 1);
+
+ /* Set Period Interval timer */
+ at91_sys_write(AT91_ST_PIMR, LATCH);
+
+ /* Change the kernel's 'tick' value to 10009 usec. (the default is 10000) */
+ tick_usec = (LATCH * 1000000) / CLOCK_TICK_RATE;
+
+ /* Enable Period Interval Timer interrupt */
+ at91_sys_write(AT91_ST_IER, AT91_ST_PITS);
+}
+
+struct sys_timer at91rm9200_timer = {
+ .init = at91rm9200_timer_init,
+ .offset = at91rm9200_gettimeoffset,
+};
diff --git a/arch/arm/mach-epxa10db/Kconfig b/arch/arm/mach-epxa10db/Kconfig
deleted file mode 100644
index 55d896dd495..00000000000
--- a/arch/arm/mach-epxa10db/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
-if ARCH_CAMELOT
-
-menu "Epxa10db"
-
-comment "PLD hotswap support"
-
-config PLD
- bool
- default y
-
-config PLD_HOTSWAP
- bool "Support for PLD device hotplugging (experimental)"
- depends on EXPERIMENTAL
- help
- This enables support for the dynamic loading and configuration of
- compatible drivers when the contents of the PLD are changed. This
- is still experimental and requires configuration tools which are
- not yet generally available. Say N here. You must enable the kernel
- module loader for this feature to work.
-
-endmenu
-
-endif
diff --git a/arch/arm/mach-epxa10db/Makefile b/arch/arm/mach-epxa10db/Makefile
deleted file mode 100644
index 24fbd7d3a3c..00000000000
--- a/arch/arm/mach-epxa10db/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-# Object file lists.
-
-obj-y := arch.o irq.o mm.o time.o
-obj-m :=
-obj-n :=
-obj- :=
-
diff --git a/arch/arm/mach-epxa10db/Makefile.boot b/arch/arm/mach-epxa10db/Makefile.boot
deleted file mode 100644
index 28bec7d3fc8..00000000000
--- a/arch/arm/mach-epxa10db/Makefile.boot
+++ /dev/null
@@ -1,2 +0,0 @@
- zreladdr-y := 0x00008000
-
diff --git a/arch/arm/mach-epxa10db/arch.c b/arch/arm/mach-epxa10db/arch.c
deleted file mode 100644
index 44c56571d18..00000000000
--- a/arch/arm/mach-epxa10db/arch.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * linux/arch/arm/mach-epxa10db/arch.c
- *
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/serial_8250.h>
-
-#include <asm/hardware.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
-
-#include <asm/mach/arch.h>
-
-static struct plat_serial8250_port serial_platform_data[] = {
- {
- .iobase = 0x3f8,
- .irq = IRQ_UARTINT0,
-#error FIXME
- .uartclk = 0,
- .regshift = 0,
- .iotype = UPIO_PORT,
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
- },
- {
- .iobase = 0x2f8,
- .irq = IRQ_UARTINT1,
-#error FIXME
- .uartclk = 0,
- .regshift = 0,
- .iotype = UPIO_PORT,
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
- },
- { },
-};
-
-static struct platform_device serial_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = serial_platform_data,
- },
-};
-
-extern void epxa10db_map_io(void);
-extern void epxa10db_init_irq(void);
-extern struct sys_timer epxa10db_timer;
-
-MACHINE_START(CAMELOT, "Altera Epxa10db")
- /* Maintainer: Altera Corporation */
- .phys_ram = 0x00000000,
- .phys_io = 0x7fffc000,
- .io_pg_offst = ((0xffffc000) >> 18) & 0xfffc,
- .map_io = epxa10db_map_io,
- .init_irq = epxa10db_init_irq,
- .timer = &epxa10db_timer,
-MACHINE_END
-
diff --git a/arch/arm/mach-epxa10db/dma.c b/arch/arm/mach-epxa10db/dma.c
deleted file mode 100644
index 0151e9f1c06..00000000000
--- a/arch/arm/mach-epxa10db/dma.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * linux/arch/arm/mach-epxa10db/dma.c
- *
- * Copyright (C) 1999 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/init.h>
-
-#include <asm/dma.h>
-#include <asm/mach/dma.h>
-
-void __init arch_dma_init(dma_t *dma)
-{
-}
diff --git a/arch/arm/mach-epxa10db/irq.c b/arch/arm/mach-epxa10db/irq.c
deleted file mode 100644
index 9bf927e1330..00000000000
--- a/arch/arm/mach-epxa10db/irq.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * linux/arch/arm/mach-epxa10db/irq.c
- *
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/stddef.h>
-#include <linux/timer.h>
-#include <linux/list.h>
-#include <asm/io.h>
-#include <asm/hardware.h>
-#include <asm/irq.h>
-#include <asm/mach/irq.h>
-#include <asm/arch/platform.h>
-#include <asm/arch/int_ctrl00.h>
-
-
-static void epxa_mask_irq(unsigned int irq)
-{
- writel(1 << irq, INT_MC(IO_ADDRESS(EXC_INT_CTRL00_BASE)));
-}
-
-static void epxa_unmask_irq(unsigned int irq)
-{
- writel(1 << irq, INT_MS(IO_ADDRESS(EXC_INT_CTRL00_BASE)));
-}
-
-
-static struct irqchip epxa_irq_chip = {
- .ack = epxa_mask_irq,
- .mask = epxa_mask_irq,
- .unmask = epxa_unmask_irq,
-};
-
-static struct resource irq_resource = {
- .name = "irq_handler",
- .start = IO_ADDRESS(EXC_INT_CTRL00_BASE),
- .end = IO_ADDRESS(INT_PRIORITY_FC(EXC_INT_CTRL00_BASE))+4,
-};
-
-void __init epxa10db_init_irq(void)
-{
- unsigned int i;
-
- request_resource(&iomem_resource, &irq_resource);
-
- /*
- * This bit sets up the interrupt controller using
- * the 6 PLD interrupts mode (the default) each
- * irqs is assigned a priority which is the same
- * as its interrupt number. This scheme is used because
- * its easy, but you may want to change it depending
- * on the contents of your PLD
- */
-
- writel(3,INT_MODE(IO_ADDRESS(EXC_INT_CTRL00_BASE)));
- for (i = 0; i < NR_IRQS; i++){
- writel(i+1, INT_PRIORITY_P0(IO_ADDRESS(EXC_INT_CTRL00_BASE)) + (4*i));
- set_irq_chip(i,&epxa_irq_chip);
- set_irq_handler(i,do_level_IRQ);
- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
- }
-
- /* Disable all interrupts */
- writel(-1,INT_MC(IO_ADDRESS(EXC_INT_CTRL00_BASE)));
-
-}
diff --git a/arch/arm/mach-epxa10db/mm.c b/arch/arm/mach-epxa10db/mm.c
deleted file mode 100644
index cfd0d2182d4..00000000000
--- a/arch/arm/mach-epxa10db/mm.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * linux/arch/arm/mach-epxa10db/mm.c
- *
- * MM routines for Altera'a Epxa10db board
- *
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/hardware.h>
-#include <asm/io.h>
-#include <asm/sizes.h>
-#include <asm/page.h>
-
-#include <asm/mach/map.h>
-
-/* Page table mapping for I/O region */
-
-static struct map_desc epxa10db_io_desc[] __initdata = {
- {
- .virtual = IO_ADDRESS(EXC_REGISTERS_BASE),
- .pfn = __phys_to_pfn(EXC_REGISTERS_BASE),
- .length = SZ_16K,
- .type = MT_DEVICE
- }, {
- .virtual = IO_ADDRESS(EXC_PLD_BLOCK0_BASE),
- .pfn = __phys_to_pfn(EXC_PLD_BLOCK0_BASE),
- .length = SZ_16K,
- .type = MT_DEVICE
- }, {
- .virtual = IO_ADDRESS(EXC_PLD_BLOCK1_BASE),
- .pfn =__phys_to_pfn(EXC_PLD_BLOCK1_BASE),
- .length = SZ_16K,
- .type = MT_DEVICE
- }, {
- .virtual = IO_ADDRESS(EXC_PLD_BLOCK2_BASE),
- .physical = __phys_to_pfn(EXC_PLD_BLOCK2_BASE),
- .length = SZ_16K,
- .type = MT_DEVICE
- }, {
- .virtual = IO_ADDRESS(EXC_PLD_BLOCK3_BASE),
- .pfn = __phys_to_pfn(EXC_PLD_BLOCK3_BASE),
- .length = SZ_16K,
- .type = MT_DEVICE
- }, {
- .virtual = FLASH_VADDR(EXC_EBI_BLOCK0_BASE),
- .pfn = __phys_to_pfn(EXC_EBI_BLOCK0_BASE),
- .length = SZ_16M,
- .type = MT_DEVICE
- }
-};
-
-void __init epxa10db_map_io(void)
-{
- iotable_init(epxa10db_io_desc, ARRAY_SIZE(epxa10db_io_desc));
-}
diff --git a/arch/arm/mach-epxa10db/time.c b/arch/arm/mach-epxa10db/time.c
deleted file mode 100644
index 4b1084dde8d..00000000000
--- a/arch/arm/mach-epxa10db/time.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * linux/arch/arm/mach-epxa10db/time.c
- *
- * Copyright (C) 2000 Deep Blue Solutions
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-
-#include <asm/hardware.h>
-#include <asm/system.h>
-#include <asm/leds.h>
-
-#include <asm/mach/time.h>
-
-#define TIMER00_TYPE (volatile unsigned int*)
-#include <asm/arch/timer00.h>
-
-static int epxa10db_set_rtc(void)
-{
- return 1;
-}
-
-static int epxa10db_rtc_init(void)
-{
- set_rtc = epxa10db_set_rtc;
-
- return 0;
-}
-
-__initcall(epxa10db_rtc_init);
-
-
-/*
- * IRQ handler for the timer
- */
-static irqreturn_t
-epxa10db_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- write_seqlock(&xtime_lock);
-
- // ...clear the interrupt
- *TIMER0_CR(IO_ADDRESS(EXC_TIMER00_BASE))|=TIMER0_CR_CI_MSK;
-
- timer_tick(regs);
- write_sequnlock(&xtime_lock);
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction epxa10db_timer_irq = {
- .name = "Excalibur Timer Tick",
- .flags = SA_INTERRUPT | SA_TIMER,
- .handler = epxa10db_timer_interrupt,
-};
-
-/*
- * Set up timer interrupt, and return the current time in seconds.
- */
-static void __init epxa10db_timer_init(void)
-{
- /* Start the timer */
- *TIMER0_LIMIT(IO_ADDRESS(EXC_TIMER00_BASE))=(unsigned int)(EXC_AHB2_CLK_FREQUENCY/200);
- *TIMER0_PRESCALE(IO_ADDRESS(EXC_TIMER00_BASE))=1;
- *TIMER0_CR(IO_ADDRESS(EXC_TIMER00_BASE))=TIMER0_CR_IE_MSK | TIMER0_CR_S_MSK;
-
- setup_irq(IRQ_TIMER0, &epxa10db_timer_irq);
-}
-
-struct sys_timer epxa10db_timer = {
- .init = epxa10db_timer_init,
-};
diff --git a/arch/arm/mach-footbridge/dma.c b/arch/arm/mach-footbridge/dma.c
index a6b1396b095..7a54578b51a 100644
--- a/arch/arm/mach-footbridge/dma.c
+++ b/arch/arm/mach-footbridge/dma.c
@@ -15,6 +15,7 @@
#include <asm/dma.h>
#include <asm/io.h>
+#include <asm/scatterlist.h>
#include <asm/mach/dma.h>
#include <asm/hardware/dec21285.h>
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c
index 775f85fc851..9e563de465b 100644
--- a/arch/arm/mach-footbridge/netwinder-hw.c
+++ b/arch/arm/mach-footbridge/netwinder-hw.c
@@ -601,6 +601,7 @@ EXPORT_SYMBOL(gpio_lock);
EXPORT_SYMBOL(gpio_modify_op);
EXPORT_SYMBOL(gpio_modify_io);
EXPORT_SYMBOL(cpld_modify);
+EXPORT_SYMBOL(gpio_read);
/*
* Initialise any other hardware after we've got the PCI bus
diff --git a/arch/arm/mach-imx/mx1ads.c b/arch/arm/mach-imx/mx1ads.c
index 708e1b3faa1..c9e0cd8ed01 100644
--- a/arch/arm/mach-imx/mx1ads.c
+++ b/arch/arm/mach-imx/mx1ads.c
@@ -29,27 +29,27 @@
#include "generic.h"
#include <asm/serial.h>
-static struct resource mx1ads_resources[] = {
+static struct resource cs89x0_resources[] = {
[0] = {
- .start = IMX_CS4_VIRT,
- .end = IMX_CS4_VIRT + 16,
+ .start = IMX_CS4_PHYS + 0x300,
+ .end = IMX_CS4_PHYS + 0x300 + 16,
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = 13,
- .end = 13,
+ .start = IRQ_GPIOC(17),
+ .end = IRQ_GPIOC(17),
.flags = IORESOURCE_IRQ,
},
};
-static struct platform_device mx1ads_device = {
- .name = "mx1ads",
- .num_resources = ARRAY_SIZE(mx1ads_resources),
- .resource = mx1ads_resources,
+static struct platform_device cs89x0_device = {
+ .name = "cirrus-cs89x0",
+ .num_resources = ARRAY_SIZE(cs89x0_resources),
+ .resource = cs89x0_resources,
};
static struct platform_device *devices[] __initdata = {
- &mx1ads_device,
+ &cs89x0_device,
};
static void __init
@@ -61,45 +61,10 @@ mx1ads_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
}
-static struct map_desc mx1ads_io_desc[] __initdata = {
- {
- .virtual = IMX_CS0_VIRT,
- .pfn = __phys_to_pfn(IMX_CS0_PHYS),
- .length = IMX_CS0_SIZE,
- .type = MT_DEVICE
- }, {
- .virtual = IMX_CS1_VIRT,
- .pfn = __phys_to_pfn(IMX_CS1_PHYS),
- .length = IMX_CS1_SIZE,
- .type = MT_DEVICE
- }, {
- .virtual = IMX_CS2_VIRT,
- .pfn = __phys_to_pfn(IMX_CS2_PHYS),
- .length = IMX_CS2_SIZE,
- .type = MT_DEVICE
- }, {
- .virtual = IMX_CS3_VIRT,
- .pfn = __phys_to_pfn(IMX_CS3_PHYS),
- .length = IMX_CS3_SIZE,
- .type = MT_DEVICE
- }, {
- .virtual = IMX_CS4_VIRT,
- .pfn = __phys_to_pfn(IMX_CS4_PHYS),
- .length = IMX_CS4_SIZE,
- .type = MT_DEVICE
- }, {
- .virtual = IMX_CS5_VIRT,
- .pfn = __phys_to_pfn(IMX_CS5_PHYS),
- .length = IMX_CS5_SIZE,
- .type = MT_DEVICE
- }
-};
-
static void __init
mx1ads_map_io(void)
{
imx_map_io();
- iotable_init(mx1ads_io_desc, ARRAY_SIZE(mx1ads_io_desc));
}
MACHINE_START(MX1ADS, "Motorola MX1ADS")
diff --git a/arch/arm/mach-integrator/clock.c b/arch/arm/mach-integrator/clock.c
index 73c360685ca..95a1e263f7f 100644
--- a/arch/arm/mach-integrator/clock.c
+++ b/arch/arm/mach-integrator/clock.c
@@ -14,28 +14,29 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/string.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
#include <asm/semaphore.h>
-#include <asm/hardware/clock.h>
#include <asm/hardware/icst525.h>
#include "clock.h"
static LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
struct clk *clk_get(struct device *dev, const char *id)
{
struct clk *p, *clk = ERR_PTR(-ENOENT);
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {
if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
clk = p;
break;
}
}
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return clk;
}
@@ -58,17 +59,6 @@ void clk_disable(struct clk *clk)
}
EXPORT_SYMBOL(clk_disable);
-int clk_use(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_use);
-
-void clk_unuse(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_unuse);
-
unsigned long clk_get_rate(struct clk *clk)
{
return clk->rate;
@@ -118,18 +108,18 @@ static struct clk uart_clk = {
int clk_register(struct clk *clk)
{
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_add(&clk->node, &clocks);
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return 0;
}
EXPORT_SYMBOL(clk_register);
void clk_unregister(struct clk *clk)
{
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_del(&clk->node);
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
}
EXPORT_SYMBOL(clk_unregister);
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index dacbf504dae..20071a2767c 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -15,11 +15,11 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/smp.h>
+#include <linux/amba/bus.h>
#include <asm/hardware.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/hardware/amba.h>
#include <asm/hardware/arm_timer.h>
#include <asm/arch/cm.h>
#include <asm/system.h>
diff --git a/arch/arm/mach-integrator/dma.c b/arch/arm/mach-integrator/dma.c
deleted file mode 100644
index aae6f23cd72..00000000000
--- a/arch/arm/mach-integrator/dma.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * linux/arch/arm/mach-integrator/dma.c
- *
- * Copyright (C) 1999 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <linux/slab.h>
-#include <linux/mman.h>
-#include <linux/init.h>
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/hardware.h>
-
-#include <asm/mach/dma.h>
-
-void __init arch_dma_init(dma_t *dma)
-{
-}
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index a4bafee77a0..a85d471c5bf 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -18,11 +18,11 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/clcd.h>
#include <asm/io.h>
#include <asm/hardware/icst525.h>
-#include <asm/hardware/amba.h>
-#include <asm/hardware/amba_clcd.h>
#include <asm/arch/lm.h>
#include <asm/arch/impd1.h>
#include <asm/sizes.h>
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 4c0f7c65fac..3afedeb56a6 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -25,6 +25,8 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/sysdev.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/kmi.h>
#include <asm/hardware.h>
#include <asm/io.h>
@@ -32,8 +34,6 @@
#include <asm/setup.h>
#include <asm/param.h> /* HZ */
#include <asm/mach-types.h>
-#include <asm/hardware/amba.h>
-#include <asm/hardware/amba_kmi.h>
#include <asm/arch/lm.h>
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 93f7ccb22c2..16cf2482a3e 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -16,15 +16,15 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/sysdev.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/kmi.h>
+#include <linux/amba/clcd.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
-#include <asm/hardware/amba.h>
-#include <asm/hardware/amba_kmi.h>
-#include <asm/hardware/amba_clcd.h>
#include <asm/hardware/icst525.h>
#include <asm/arch/cm.h>
diff --git a/arch/arm/mach-integrator/time.c b/arch/arm/mach-integrator/time.c
index 1a844ca139e..3c22c16b38b 100644
--- a/arch/arm/mach-integrator/time.c
+++ b/arch/arm/mach-integrator/time.c
@@ -14,8 +14,8 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/amba/bus.h>
-#include <asm/hardware/amba.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -96,7 +96,8 @@ static struct rtc_ops rtc_ops = {
.set_alarm = rtc_set_alarm,
};
-static irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t arm_rtc_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
{
writel(0, rtc_base + RTC_EOI);
return IRQ_HANDLED;
@@ -124,7 +125,7 @@ static int rtc_probe(struct amba_device *dev, void *id)
xtime.tv_sec = __raw_readl(rtc_base + RTC_DR);
- ret = request_irq(dev->irq[0], rtc_interrupt, SA_INTERRUPT,
+ ret = request_irq(dev->irq[0], arm_rtc_interrupt, SA_INTERRUPT,
"rtc-pl030", dev);
if (ret)
goto map_out;
diff --git a/arch/arm/mach-iop3xx/iop331-setup.c b/arch/arm/mach-iop3xx/iop331-setup.c
index 53f60614498..e6ea1cba6a1 100644
--- a/arch/arm/mach-iop3xx/iop331-setup.c
+++ b/arch/arm/mach-iop3xx/iop331-setup.c
@@ -18,7 +18,7 @@
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/tty.h>
-#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
#include <asm/io.h>
#include <asm/pgtable.h>
@@ -50,32 +50,74 @@ static struct map_desc iop331_std_desc[] __initdata = {
}
};
-static struct uart_port iop331_serial_ports[] = {
- {
- .membase = (char*)(IOP331_UART0_VIRT),
- .mapbase = (IOP331_UART0_PHYS),
- .irq = IRQ_IOP331_UART0,
- .flags = UPF_SKIP_TEST,
- .iotype = UPIO_MEM,
- .regshift = 2,
- .uartclk = IOP331_UART_XTAL,
- .line = 0,
- .type = PORT_XSCALE,
- .fifosize = 32
- } , {
- .membase = (char*)(IOP331_UART1_VIRT),
- .mapbase = (IOP331_UART1_PHYS),
- .irq = IRQ_IOP331_UART1,
- .flags = UPF_SKIP_TEST,
- .iotype = UPIO_MEM,
- .regshift = 2,
- .uartclk = IOP331_UART_XTAL,
- .line = 1,
- .type = PORT_XSCALE,
- .fifosize = 32
+static struct resource iop33x_uart0_resources[] = {
+ [0] = {
+ .start = IOP331_UART0_PHYS,
+ .end = IOP331_UART0_PHYS + 0x3f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_IOP331_UART0,
+ .end = IRQ_IOP331_UART0,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource iop33x_uart1_resources[] = {
+ [0] = {
+ .start = IOP331_UART1_PHYS,
+ .end = IOP331_UART1_PHYS + 0x3f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_IOP331_UART1,
+ .end = IRQ_IOP331_UART1,
+ .flags = IORESOURCE_IRQ
}
};
+static struct plat_serial8250_port iop33x_uart0_data[] = {
+ {
+ .membase = (char*)(IOP331_UART0_VIRT),
+ .mapbase = (IOP331_UART0_PHYS),
+ .irq = IRQ_IOP331_UART0,
+ .uartclk = IOP331_UART_XTAL,
+ .regshift = 2,
+ .iotype = UPIO_MEM,
+ .flags = UPF_SKIP_TEST,
+ },
+ { },
+};
+
+static struct plat_serial8250_port iop33x_uart1_data[] = {
+ {
+ .membase = (char*)(IOP331_UART1_VIRT),
+ .mapbase = (IOP331_UART1_PHYS),
+ .irq = IRQ_IOP331_UART1,
+ .uartclk = IOP331_UART_XTAL,
+ .regshift = 2,
+ .iotype = UPIO_MEM,
+ .flags = UPF_SKIP_TEST,
+ },
+ { },
+};
+
+static struct platform_device iop33x_uart0 = {
+ .name = "serial8250",
+ .id = 0,
+ .dev.platform_data = iop33x_uart0_data,
+ .num_resources = 2,
+ .resource = iop33x_uart0_resources,
+};
+
+static struct platform_device iop33x_uart1 = {
+ .name = "serial8250",
+ .id = 1,
+ .dev.platform_data = iop33x_uart1_data,
+ .num_resources = 2,
+ .resource = iop33x_uart1_resources,
+};
+
static struct resource iop33x_i2c_0_resources[] = {
[0] = {
.start = 0xfffff680,
@@ -117,6 +159,8 @@ static struct platform_device iop33x_i2c_1_controller = {
};
static struct platform_device *iop33x_devices[] __initdata = {
+ &iop33x_uart0,
+ &iop33x_uart1,
&iop33x_i2c_0_controller,
&iop33x_i2c_1_controller
};
@@ -133,8 +177,6 @@ void __init iop33x_init(void)
void __init iop331_map_io(void)
{
iotable_init(iop331_std_desc, ARRAY_SIZE(iop331_std_desc));
- early_serial_setup(&iop331_serial_ports[0]);
- early_serial_setup(&iop331_serial_ports[1]);
}
#ifdef CONFIG_ARCH_IOP331
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 385285851cb..daadc78e271 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -71,6 +71,14 @@ config ARCH_PRPMC1100
PrPCM1100 Processor Mezanine Module. For more information on
this platform, see <file:Documentation/arm/IXP4xx>.
+config MACH_NAS100D
+ bool
+ prompt "NAS100D"
+ help
+ Say 'Y' here if you want your kernel to support Iomega's
+ NAS 100d device. For more information on this platform,
+ see http://www.nslu2-linux.org/wiki/NAS100d/HomePage
+
#
# Avila and IXDP share the same source for now. Will change in future
#
diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile
index 7a15629c18d..0471044fa17 100644
--- a/arch/arm/mach-ixp4xx/Makefile
+++ b/arch/arm/mach-ixp4xx/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_MACH_IXDPG425) += ixdpg425-pci.o coyote-setup.o
obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o coyote-setup.o
obj-$(CONFIG_MACH_GTWX5715) += gtwx5715-pci.o gtwx5715-setup.o
obj-$(CONFIG_MACH_NSLU2) += nslu2-pci.o nslu2-setup.o nslu2-power.o
+obj-$(CONFIG_MACH_NAS100D) += nas100d-pci.o nas100d-setup.o nas100d-power.o
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 9795da270e3..6e3462ed530 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -341,6 +341,29 @@ int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
}
+/*
+ * Only first 64MB of memory can be accessed via PCI.
+ * We use GFP_DMA to allocate safe buffers to do map/unmap.
+ * This is really ugly and we need a better way of specifying
+ * DMA-capable regions of memory.
+ */
+void __init ixp4xx_adjust_zones(int node, unsigned long *zone_size,
+ unsigned long *zhole_size)
+{
+ unsigned int sz = SZ_64M >> PAGE_SHIFT;
+
+ /*
+ * Only adjust if > 64M on current system
+ */
+ if (node || (zone_size[0] <= sz))
+ return;
+
+ zone_size[1] = zone_size[0] - sz;
+ zone_size[0] = sz;
+ zhole_size[1] = zhole_size[0];
+ zhole_size[0] = 0;
+}
+
void __init ixp4xx_pci_preinit(void)
{
unsigned long processor_id;
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index f3c687cf007..6b393691d0e 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -142,6 +142,8 @@ static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type)
*int_reg &= ~(IXP4XX_GPIO_STYLE_CLEAR <<
(line * IXP4XX_GPIO_STYLE_SIZE));
+ *IXP4XX_GPIO_GPISR = (1 << line);
+
/* Set the new style */
*int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
@@ -169,7 +171,7 @@ static void ixp4xx_irq_ack(unsigned int irq)
int line = (irq < 32) ? irq2gpio[irq] : -1;
if (line >= 0)
- gpio_line_isr_clear(line);
+ *IXP4XX_GPIO_GPISR = (1 << line);
}
/*
@@ -330,11 +332,27 @@ static struct platform_device *ixp46x_devices[] __initdata = {
&ixp46x_i2c_controller
};
+unsigned long ixp4xx_exp_bus_size;
+
void __init ixp4xx_sys_init(void)
{
+ ixp4xx_exp_bus_size = SZ_16M;
+
if (cpu_is_ixp46x()) {
+ int region;
+
platform_add_devices(ixp46x_devices,
ARRAY_SIZE(ixp46x_devices));
+
+ for (region = 0; region < 7; region++) {
+ if((*(IXP4XX_EXP_REG(0x4 * region)) & 0x200)) {
+ ixp4xx_exp_bus_size = SZ_32M;
+ break;
+ }
+ }
}
+
+ printk("IXP4xx: Using %uMiB expansion bus window size\n",
+ ixp4xx_exp_bus_size >> 20);
}
diff --git a/arch/arm/mach-ixp4xx/coyote-pci.c b/arch/arm/mach-ixp4xx/coyote-pci.c
index 60de8a94cff..e6b7fcd923f 100644
--- a/arch/arm/mach-ixp4xx/coyote-pci.c
+++ b/arch/arm/mach-ixp4xx/coyote-pci.c
@@ -33,9 +33,6 @@ void __init coyote_pci_preinit(void)
set_irq_type(IRQ_COYOTE_PCI_SLOT0, IRQT_LOW);
set_irq_type(IRQ_COYOTE_PCI_SLOT1, IRQT_LOW);
- gpio_line_isr_clear(COYOTE_PCI_SLOT0_PIN);
- gpio_line_isr_clear(COYOTE_PCI_SLOT1_PIN);
-
ixp4xx_pci_preinit();
}
diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c
index 050c9276891..679594a7398 100644
--- a/arch/arm/mach-ixp4xx/coyote-setup.c
+++ b/arch/arm/mach-ixp4xx/coyote-setup.c
@@ -14,6 +14,7 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_8250.h>
+#include <linux/slab.h>
#include <asm/types.h>
#include <asm/setup.h>
@@ -30,8 +31,6 @@ static struct flash_platform_data coyote_flash_data = {
};
static struct resource coyote_flash_resource = {
- .start = COYOTE_FLASH_BASE,
- .end = COYOTE_FLASH_BASE + COYOTE_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
};
@@ -81,6 +80,11 @@ static struct platform_device *coyote_devices[] __initdata = {
static void __init coyote_init(void)
{
+ ixp4xx_sys_init();
+
+ coyote_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
+ coyote_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_32M - 1;
+
*IXP4XX_EXP_CS0 |= IXP4XX_FLASH_WRITABLE;
*IXP4XX_EXP_CS1 = *IXP4XX_EXP_CS0;
@@ -91,8 +95,6 @@ static void __init coyote_init(void)
coyote_uart_data[0].irq = IRQ_IXP4XX_UART1;
}
-
- ixp4xx_sys_init();
platform_add_devices(coyote_devices, ARRAY_SIZE(coyote_devices));
}
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
index 29a6d02fa85..03867048997 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
@@ -27,6 +27,7 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_8250.h>
+#include <linux/slab.h>
#include <asm/types.h>
#include <asm/setup.h>
@@ -106,11 +107,9 @@ static struct flash_platform_data gtwx5715_flash_data = {
.width = 2,
};
-static struct resource gtwx5715_flash_resource = {
- .start = GTWX5715_FLASH_BASE,
- .end = GTWX5715_FLASH_BASE + GTWX5715_FLASH_SIZE - 1,
+static struct gtw5715_flash_resource = {
.flags = IORESOURCE_MEM,
-};
+}
static struct platform_device gtwx5715_flash = {
.name = "IXP4XX-Flash",
@@ -129,6 +128,14 @@ static struct platform_device *gtwx5715_devices[] __initdata = {
static void __init gtwx5715_init(void)
{
+ ixp4xx_sys_init();
+
+ if (!flash_resource)
+ printk(KERN_ERR "Could not allocate flash resource\n");
+
+ gtwx5715_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
+ gtwx5715_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_8M - 1;
+
platform_add_devices(gtwx5715_devices, ARRAY_SIZE(gtwx5715_devices));
}
diff --git a/arch/arm/mach-ixp4xx/ixdp425-pci.c b/arch/arm/mach-ixp4xx/ixdp425-pci.c
index f9a1d3e7d69..da415d5d7f3 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-pci.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-pci.c
@@ -32,11 +32,6 @@ void __init ixdp425_pci_preinit(void)
set_irq_type(IRQ_IXDP425_PCI_INTC, IRQT_LOW);
set_irq_type(IRQ_IXDP425_PCI_INTD, IRQT_LOW);
- gpio_line_isr_clear(IXDP425_PCI_INTA_PIN);
- gpio_line_isr_clear(IXDP425_PCI_INTB_PIN);
- gpio_line_isr_clear(IXDP425_PCI_INTC_PIN);
- gpio_line_isr_clear(IXDP425_PCI_INTD_PIN);
-
ixp4xx_pci_preinit();
}
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 3a22d84e104..c2e105c89c9 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -14,6 +14,7 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_8250.h>
+#include <linux/slab.h>
#include <asm/types.h>
#include <asm/setup.h>
@@ -30,8 +31,6 @@ static struct flash_platform_data ixdp425_flash_data = {
};
static struct resource ixdp425_flash_resource = {
- .start = IXDP425_FLASH_BASE,
- .end = IXDP425_FLASH_BASE + IXDP425_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
};
@@ -108,17 +107,13 @@ static struct platform_device *ixdp425_devices[] __initdata = {
&ixdp425_uart
};
-
static void __init ixdp425_init(void)
{
ixp4xx_sys_init();
- /*
- * IXP465 has 32MB window
- */
- if (machine_is_ixdp465()) {
- ixdp425_flash_resource.end += IXDP425_FLASH_SIZE;
- }
+ ixdp425_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
+ ixdp425_flash_resource.end =
+ IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
platform_add_devices(ixdp425_devices, ARRAY_SIZE(ixdp425_devices));
}
diff --git a/arch/arm/mach-ixp4xx/ixdpg425-pci.c b/arch/arm/mach-ixp4xx/ixdpg425-pci.c
index fe5e7660de1..526fb6175bc 100644
--- a/arch/arm/mach-ixp4xx/ixdpg425-pci.c
+++ b/arch/arm/mach-ixp4xx/ixdpg425-pci.c
@@ -32,9 +32,6 @@ void __init ixdpg425_pci_preinit(void)
set_irq_type(IRQ_IXP4XX_GPIO6, IRQT_LOW);
set_irq_type(IRQ_IXP4XX_GPIO7, IRQT_LOW);
- gpio_line_isr_clear(6);
- gpio_line_isr_clear(7);
-
ixp4xx_pci_preinit();
}
diff --git a/arch/arm/mach-ixp4xx/nas100d-pci.c b/arch/arm/mach-ixp4xx/nas100d-pci.c
new file mode 100644
index 00000000000..26b7c001ff6
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/nas100d-pci.c
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/mach-ixp4xx/nas100d-pci.c
+ *
+ * NAS 100d board-level PCI initialization
+ *
+ * based on ixdp425-pci.c:
+ * Copyright (C) 2002 Intel Corporation.
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ * Maintainer: http://www.nslu2-linux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/mach/pci.h>
+#include <asm/mach-types.h>
+
+void __init nas100d_pci_preinit(void)
+{
+ set_irq_type(IRQ_NAS100D_PCI_INTA, IRQT_LOW);
+ set_irq_type(IRQ_NAS100D_PCI_INTB, IRQT_LOW);
+ set_irq_type(IRQ_NAS100D_PCI_INTC, IRQT_LOW);
+ set_irq_type(IRQ_NAS100D_PCI_INTD, IRQT_LOW);
+ set_irq_type(IRQ_NAS100D_PCI_INTE, IRQT_LOW);
+
+ ixp4xx_pci_preinit();
+}
+
+static int __init nas100d_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static int pci_irq_table[NAS100D_PCI_MAX_DEV][NAS100D_PCI_IRQ_LINES] =
+ {
+ { IRQ_NAS100D_PCI_INTA, -1, -1 },
+ { IRQ_NAS100D_PCI_INTB, -1, -1 },
+ { IRQ_NAS100D_PCI_INTC, IRQ_NAS100D_PCI_INTD, IRQ_NAS100D_PCI_INTE },
+ };
+
+ int irq = -1;
+
+ if (slot >= 1 && slot <= NAS100D_PCI_MAX_DEV &&
+ pin >= 1 && pin <= NAS100D_PCI_IRQ_LINES)
+ irq = pci_irq_table[slot-1][pin-1];
+
+ return irq;
+}
+
+struct hw_pci __initdata nas100d_pci = {
+ .nr_controllers = 1,
+ .preinit = nas100d_pci_preinit,
+ .swizzle = pci_std_swizzle,
+ .setup = ixp4xx_setup,
+ .scan = ixp4xx_scan_bus,
+ .map_irq = nas100d_map_irq,
+};
+
+int __init nas100d_pci_init(void)
+{
+ if (machine_is_nas100d())
+ pci_common_init(&nas100d_pci);
+
+ return 0;
+}
+
+subsys_initcall(nas100d_pci_init);
diff --git a/arch/arm/mach-ixp4xx/nas100d-power.c b/arch/arm/mach-ixp4xx/nas100d-power.c
new file mode 100644
index 00000000000..2bec69bfa71
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/nas100d-power.c
@@ -0,0 +1,67 @@
+/*
+ * arch/arm/mach-ixp4xx/nas100d-power.c
+ *
+ * NAS 100d Power/Reset driver
+ *
+ * Copyright (C) 2005 Tower Technologies
+ *
+ * based on nas100d-io.c
+ * Copyright (C) 2004 Karen Spearel
+ *
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ * Maintainers: http://www.nslu2-linux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-types.h>
+
+extern void ctrl_alt_del(void);
+
+static irqreturn_t nas100d_reset_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* Signal init to do the ctrlaltdel action, this will bypass init if
+ * it hasn't started and do a kernel_restart.
+ */
+ ctrl_alt_del();
+
+ return IRQ_HANDLED;
+}
+
+static int __init nas100d_power_init(void)
+{
+ if (!(machine_is_nas100d()))
+ return 0;
+
+ set_irq_type(NAS100D_RB_IRQ, IRQT_LOW);
+
+ if (request_irq(NAS100D_RB_IRQ, &nas100d_reset_handler,
+ SA_INTERRUPT, "NAS100D reset button", NULL) < 0) {
+
+ printk(KERN_DEBUG "Reset Button IRQ %d not available\n",
+ NAS100D_RB_IRQ);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void __exit nas100d_power_exit(void)
+{
+ free_irq(NAS100D_RB_IRQ, NULL);
+}
+
+module_init(nas100d_power_init);
+module_exit(nas100d_power_exit);
+
+MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
+MODULE_DESCRIPTION("NAS100D Power/Reset driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
new file mode 100644
index 00000000000..49998a8bd4e
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -0,0 +1,135 @@
+/*
+ * arch/arm/mach-ixp4xx/nas100d-setup.c
+ *
+ * NAS 100d board-setup
+ *
+ * based ixdp425-setup.c:
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ * Author: Rod Whitby <rod@whitby.id.au>
+ * Maintainers: http://www.nslu2-linux.org/
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/flash.h>
+
+static struct flash_platform_data nas100d_flash_data = {
+ .map_name = "cfi_probe",
+ .width = 2,
+};
+
+static struct resource nas100d_flash_resource = {
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device nas100d_flash = {
+ .name = "IXP4XX-Flash",
+ .id = 0,
+ .dev.platform_data = &nas100d_flash_data,
+ .num_resources = 1,
+ .resource = &nas100d_flash_resource,
+};
+
+static struct ixp4xx_i2c_pins nas100d_i2c_gpio_pins = {
+ .sda_pin = NAS100D_SDA_PIN,
+ .scl_pin = NAS100D_SCL_PIN,
+};
+
+static struct platform_device nas100d_i2c_controller = {
+ .name = "IXP4XX-I2C",
+ .id = 0,
+ .dev.platform_data = &nas100d_i2c_gpio_pins,
+ .num_resources = 0,
+};
+
+static struct resource nas100d_uart_resources[] = {
+ {
+ .start = IXP4XX_UART1_BASE_PHYS,
+ .end = IXP4XX_UART1_BASE_PHYS + 0x0fff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IXP4XX_UART2_BASE_PHYS,
+ .end = IXP4XX_UART2_BASE_PHYS + 0x0fff,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct plat_serial8250_port nas100d_uart_data[] = {
+ {
+ .mapbase = IXP4XX_UART1_BASE_PHYS,
+ .membase = (char *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET,
+ .irq = IRQ_IXP4XX_UART1,
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = IXP4XX_UART_XTAL,
+ },
+ {
+ .mapbase = IXP4XX_UART2_BASE_PHYS,
+ .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET,
+ .irq = IRQ_IXP4XX_UART2,
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = IXP4XX_UART_XTAL,
+ },
+ { }
+};
+
+static struct platform_device nas100d_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev.platform_data = nas100d_uart_data,
+ .num_resources = 2,
+ .resource = nas100d_uart_resources,
+};
+
+static struct platform_device *nas100d_devices[] __initdata = {
+ &nas100d_i2c_controller,
+ &nas100d_flash,
+ &nas100d_uart,
+};
+
+static void nas100d_power_off(void)
+{
+ /* This causes the box to drop the power and go dead. */
+
+ /* enable the pwr cntl gpio */
+ gpio_line_config(NAS100D_PO_GPIO, IXP4XX_GPIO_OUT);
+
+ /* do the deed */
+ gpio_line_set(NAS100D_PO_GPIO, IXP4XX_GPIO_HIGH);
+}
+
+static void __init nas100d_init(void)
+{
+ ixp4xx_sys_init();
+
+ nas100d_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
+ nas100d_flash_resource.end =
+ IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
+
+ pm_power_off = nas100d_power_off;
+
+ platform_add_devices(nas100d_devices, ARRAY_SIZE(nas100d_devices));
+}
+
+MACHINE_START(NAS100D, "Iomega NAS 100d")
+ /* Maintainer: www.nslu2-linux.org */
+ .phys_ram = PHYS_OFFSET,
+ .phys_io = IXP4XX_PERIPHERAL_BASE_PHYS,
+ .io_pg_offst = ((IXP4XX_PERIPHERAL_BASE_VIRT) >> 18) & 0xFFFC,
+ .boot_params = 0x00000100,
+ .map_io = ixp4xx_map_io,
+ .init_irq = ixp4xx_init_irq,
+ .timer = &ixp4xx_timer,
+ .init_machine = nas100d_init,
+MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/nslu2-pci.c b/arch/arm/mach-ixp4xx/nslu2-pci.c
index a575f2e0b2c..ece860444d5 100644
--- a/arch/arm/mach-ixp4xx/nslu2-pci.c
+++ b/arch/arm/mach-ixp4xx/nslu2-pci.c
@@ -28,14 +28,6 @@ void __init nslu2_pci_preinit(void)
set_irq_type(IRQ_NSLU2_PCI_INTB, IRQT_LOW);
set_irq_type(IRQ_NSLU2_PCI_INTC, IRQT_LOW);
- gpio_line_isr_clear(NSLU2_PCI_INTA_PIN);
- gpio_line_isr_clear(NSLU2_PCI_INTB_PIN);
- gpio_line_isr_clear(NSLU2_PCI_INTC_PIN);
-
- /* INTD is not configured as GPIO is used
- * for the power input button.
- */
-
ixp4xx_pci_preinit();
}
diff --git a/arch/arm/mach-ixp4xx/nslu2-power.c b/arch/arm/mach-ixp4xx/nslu2-power.c
index 18fbc8c0fb3..b0ad9e901f6 100644
--- a/arch/arm/mach-ixp4xx/nslu2-power.c
+++ b/arch/arm/mach-ixp4xx/nslu2-power.c
@@ -54,9 +54,6 @@ static int __init nslu2_power_init(void)
set_irq_type(NSLU2_RB_IRQ, IRQT_LOW);
set_irq_type(NSLU2_PB_IRQ, IRQT_HIGH);
- gpio_line_isr_clear(NSLU2_RB_GPIO);
- gpio_line_isr_clear(NSLU2_PB_GPIO);
-
if (request_irq(NSLU2_RB_IRQ, &nslu2_reset_handler,
SA_INTERRUPT, "NSLU2 reset button", NULL) < 0) {
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index 540b20d78cc..5c975eb5c34 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/notifier.h>
+#include <linux/clk.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
@@ -30,7 +31,6 @@
#include <asm/arch/usb.h>
#include <asm/arch/board.h>
#include <asm/arch/common.h>
-#include <asm/hardware/clock.h>
static void __init omap_generic_init_irq(void)
{
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index bd900b7ab33..92ff5dc0735 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -184,7 +184,7 @@ static void __init omap_perseus2_map_io(void)
omap_writel(0x00000088, OMAP730_FLASH_ACFG_0);
/*
- * Ethernet support trough the debug board
+ * Ethernet support through the debug board
* CS1 timings setup
*/
omap_writel(0x0000fff3, OMAP730_FLASH_CFG_1);
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
index 4277eee44ed..9d862f86bba 100644
--- a/arch/arm/mach-omap1/clock.c
+++ b/arch/arm/mach-omap1/clock.c
@@ -16,9 +16,9 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/clk.h>
#include <asm/io.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/usb.h>
#include <asm/arch/clock.h>
diff --git a/arch/arm/mach-omap1/serial.c b/arch/arm/mach-omap1/serial.c
index 6810cfb8446..7a68f098a02 100644
--- a/arch/arm/mach-omap1/serial.c
+++ b/arch/arm/mach-omap1/serial.c
@@ -17,10 +17,10 @@
#include <linux/tty.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
+#include <linux/clk.h>
#include <asm/io.h>
#include <asm/mach-types.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/board.h>
#include <asm/arch/mux.h>
@@ -252,9 +252,8 @@ static void __init omap_serial_set_port_wakeup(int gpio_nr)
return;
}
omap_set_gpio_direction(gpio_nr, 1);
- set_irq_type(OMAP_GPIO_IRQ(gpio_nr), IRQT_RISING);
ret = request_irq(OMAP_GPIO_IRQ(gpio_nr), &omap_serial_wake_interrupt,
- 0, "serial wakeup", NULL);
+ SA_TRIGGER_RISING, "serial wakeup", NULL);
if (ret) {
omap_free_gpio(gpio_nr);
printk(KERN_ERR "No interrupt for UART wake GPIO: %i\n",
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 85818d9f263..5407b954915 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -22,10 +22,10 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <linux/clk.h>
#include <asm/io.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/clock.h>
#include <asm/arch/sram.h>
#include <asm/arch/prcm.h>
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index f4df04fe1dd..e1bd46a96e1 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -16,9 +16,9 @@
#include <linux/init.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
+#include <linux/clk.h>
#include <asm/io.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/common.h>
#include <asm/arch/board.h>
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 9ec11443200..23d36b1c40f 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -21,10 +21,11 @@
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/err.h>
+#include <linux/clk.h>
+
#include <asm/mach/time.h>
#include <asm/delay.h>
#include <asm/io.h>
-#include <asm/hardware/clock.h>
#define OMAP2_GP_TIMER1_BASE 0x48028000
#define OMAP2_GP_TIMER2_BASE 0x4802a000
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 2a58499c096..c1d77f5b382 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -112,12 +112,14 @@ config IWMMXT
config PXA_SHARP_C7xx
bool
select PXA_SSP
+ select SHARPSL_PM
help
Enable support for all Sharp C7xx models
config PXA_SHARP_Cxx00
bool
select PXA_SSP
+ select SHARPSL_PM
help
Enable common support for Sharp Cxx00 models
diff --git a/arch/arm/mach-pxa/akita-ioexp.c b/arch/arm/mach-pxa/akita-ioexp.c
index f6d73cc01f7..1b398742ab5 100644
--- a/arch/arm/mach-pxa/akita-ioexp.c
+++ b/arch/arm/mach-pxa/akita-ioexp.c
@@ -124,17 +124,16 @@ static int max7310_detach_client(struct i2c_client *client)
}
static struct i2c_driver max7310_i2c_driver = {
- .owner = THIS_MODULE,
- .name = "akita-max7310",
+ .driver = {
+ .name = "akita-max7310",
+ },
.id = I2C_DRIVERID_AKITAIOEXP,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = max7310_attach_adapter,
.detach_client = max7310_detach_client,
};
static struct i2c_client max7310_template = {
name: "akita-max7310",
- flags: I2C_CLIENT_ALLOW_USE,
driver: &max7310_i2c_driver,
};
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 100fb31b515..5a7b873f29b 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -213,15 +213,14 @@ static int corgi_mci_init(struct device *dev, irqreturn_t (*corgi_detect_int)(in
corgi_mci_platform_data.detect_delay = msecs_to_jiffies(250);
- err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_detect_int, SA_INTERRUPT,
- "MMC card detect", data);
+ err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_detect_int,
+ SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
+ "MMC card detect", data);
if (err) {
printk(KERN_ERR "corgi_mci_init: MMC/SD: can't request MMC card detect IRQ\n");
return -1;
}
- set_irq_type(CORGI_IRQ_GPIO_nSD_DETECT, IRQT_BOTHEDGE);
-
return 0;
}
diff --git a/arch/arm/mach-pxa/corgi_pm.c b/arch/arm/mach-pxa/corgi_pm.c
index 599be14754f..7a1ab73e9e1 100644
--- a/arch/arm/mach-pxa/corgi_pm.c
+++ b/arch/arm/mach-pxa/corgi_pm.c
@@ -33,19 +33,7 @@ static void corgi_charger_init(void)
pxa_gpio_mode(CORGI_GPIO_CHRG_ON | GPIO_OUT);
pxa_gpio_mode(CORGI_GPIO_CHRG_UKN | GPIO_OUT);
pxa_gpio_mode(CORGI_GPIO_KEY_INT | GPIO_IN);
-}
-
-static void corgi_charge_led(int val)
-{
- if (val == SHARPSL_LED_ERROR) {
- dev_dbg(sharpsl_pm.dev, "Charge LED Error\n");
- } else if (val == SHARPSL_LED_ON) {
- dev_dbg(sharpsl_pm.dev, "Charge LED On\n");
- GPSR0 = GPIO_bit(CORGI_GPIO_LED_ORANGE);
- } else {
- dev_dbg(sharpsl_pm.dev, "Charge LED Off\n");
- GPCR0 = GPIO_bit(CORGI_GPIO_LED_ORANGE);
- }
+ sharpsl_pm_pxa_init();
}
static void corgi_measure_temp(int on)
@@ -138,15 +126,15 @@ static int corgi_should_wakeup(unsigned int resume_on_alarm)
dev_dbg(sharpsl_pm.dev, "GPLR0 = %x,%x\n", GPLR0, PEDR);
if ((PEDR & GPIO_bit(CORGI_GPIO_AC_IN))) {
- if (STATUS_AC_IN()) {
+ if (sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN)) {
/* charge on */
dev_dbg(sharpsl_pm.dev, "ac insert\n");
sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
} else {
/* charge off */
dev_dbg(sharpsl_pm.dev, "ac remove\n");
- CHARGE_LED_OFF();
- CHARGE_OFF();
+ sharpsl_pm_led(SHARPSL_LED_OFF);
+ sharpsl_pm.machinfo->charge(0);
sharpsl_pm.charge_mode = CHRG_OFF;
}
}
@@ -172,23 +160,39 @@ static unsigned long corgi_charger_wakeup(void)
return ~GPLR0 & ( GPIO_bit(CORGI_GPIO_AC_IN) | GPIO_bit(CORGI_GPIO_KEY_INT) | GPIO_bit(CORGI_GPIO_WAKEUP) );
}
-static int corgi_acin_status(void)
+unsigned long corgipm_read_devdata(int type)
{
- return ((GPLR(CORGI_GPIO_AC_IN) & GPIO_bit(CORGI_GPIO_AC_IN)) != 0);
+ switch(type) {
+ case SHARPSL_STATUS_ACIN:
+ return ((GPLR(CORGI_GPIO_AC_IN) & GPIO_bit(CORGI_GPIO_AC_IN)) != 0);
+ case SHARPSL_STATUS_LOCK:
+ return READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_batlock);
+ case SHARPSL_STATUS_CHRGFULL:
+ return READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_batfull);
+ case SHARPSL_STATUS_FATAL:
+ return READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_fatal);
+ case SHARPSL_ACIN_VOLT:
+ return sharpsl_pm_pxa_read_max1111(MAX1111_ACIN_VOLT);
+ case SHARPSL_BATT_TEMP:
+ return sharpsl_pm_pxa_read_max1111(MAX1111_BATT_TEMP);
+ case SHARPSL_BATT_VOLT:
+ default:
+ return sharpsl_pm_pxa_read_max1111(MAX1111_BATT_VOLT);
+ }
}
static struct sharpsl_charger_machinfo corgi_pm_machinfo = {
.init = corgi_charger_init,
+ .exit = sharpsl_pm_pxa_remove,
.gpio_batlock = CORGI_GPIO_BAT_COVER,
.gpio_acin = CORGI_GPIO_AC_IN,
.gpio_batfull = CORGI_GPIO_CHRG_FULL,
- .status_acin = corgi_acin_status,
.discharge = corgi_discharge,
.charge = corgi_charge,
- .chargeled = corgi_charge_led,
.measure_temp = corgi_measure_temp,
.presuspend = corgi_presuspend,
.postsuspend = corgi_postsuspend,
+ .read_devdata = corgipm_read_devdata,
.charger_wakeup = corgi_charger_wakeup,
.should_wakeup = corgi_should_wakeup,
.bat_levels = 40,
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 277498ae5b6..8da9d3efe9a 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -427,6 +427,12 @@ static void __init mainstone_init(void)
printk(KERN_NOTICE "Mainstone configured to boot from %s\n",
mst_flash_data[0].name);
+ /* system bus arbiter setting
+ * - Core_Park
+ * - LCD_wt:DMA_wt:CORE_Wt = 2:3:4
+ */
+ ARB_CNTRL = ARB_CORE_PARK | 0x234;
+
/*
* On Mainstone, we route AC97_SYSCLK via GPIO45 to
* the audio daughter card
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index eef3de26ad3..663c9500598 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -146,15 +146,14 @@ static int poodle_mci_init(struct device *dev, irqreturn_t (*poodle_detect_int)(
poodle_mci_platform_data.detect_delay = msecs_to_jiffies(250);
- err = request_irq(POODLE_IRQ_GPIO_nSD_DETECT, poodle_detect_int, SA_INTERRUPT,
- "MMC card detect", data);
+ err = request_irq(POODLE_IRQ_GPIO_nSD_DETECT, poodle_detect_int,
+ SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
+ "MMC card detect", data);
if (err) {
printk(KERN_ERR "poodle_mci_init: MMC/SD: can't request MMC card detect IRQ\n");
return -1;
}
- set_irq_type(POODLE_IRQ_GPIO_nSD_DETECT, IRQT_BOTHEDGE);
-
return 0;
}
diff --git a/arch/arm/mach-pxa/sharpsl.h b/arch/arm/mach-pxa/sharpsl.h
index b0c40a1d667..da4769caaf7 100644
--- a/arch/arm/mach-pxa/sharpsl.h
+++ b/arch/arm/mach-pxa/sharpsl.h
@@ -1,7 +1,17 @@
/*
- * SharpSL SSP Driver
+ * Copyright (c) 2004-2005 Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
*/
+#include <asm/hardware/sharpsl_pm.h>
+
+/*
+ * SharpSL SSP Driver
+ */
struct corgissp_machinfo {
int port;
int cs_lcdcon;
@@ -14,18 +24,18 @@ struct corgissp_machinfo {
void corgi_ssp_set_machinfo(struct corgissp_machinfo *machinfo);
+
/*
* SharpSL Backlight
*/
-
void corgi_bl_set_intensity(int intensity);
void spitz_bl_set_intensity(int intensity);
void akita_bl_set_intensity(int intensity);
+
/*
* SharpSL Touchscreen Driver
*/
-
unsigned long corgi_get_hsync_len(void);
unsigned long spitz_get_hsync_len(void);
void corgi_put_hsync(void);
@@ -33,89 +43,22 @@ void spitz_put_hsync(void);
void corgi_wait_hsync(void);
void spitz_wait_hsync(void);
+
/*
* SharpSL Battery/PM Driver
*/
-struct sharpsl_charger_machinfo {
- void (*init)(void);
- int gpio_acin;
- int gpio_batfull;
- int gpio_batlock;
- int gpio_fatal;
- int (*status_acin)(void);
- void (*discharge)(int);
- void (*discharge1)(int);
- void (*charge)(int);
- void (*chargeled)(int);
- void (*measure_temp)(int);
- void (*presuspend)(void);
- void (*postsuspend)(void);
- unsigned long (*charger_wakeup)(void);
- int (*should_wakeup)(unsigned int resume_on_alarm);
- int bat_levels;
- struct battery_thresh *bat_levels_noac;
- struct battery_thresh *bat_levels_acin;
- int status_high_acin;
- int status_low_acin;
- int status_high_noac;
- int status_low_noac;
-};
-
-struct battery_thresh {
- int voltage;
- int percentage;
-};
-
-struct battery_stat {
- int ac_status; /* APM AC Present/Not Present */
- int mainbat_status; /* APM Main Battery Status */
- int mainbat_percent; /* Main Battery Percentage Charge */
- int mainbat_voltage; /* Main Battery Voltage */
-};
-
-struct sharpsl_pm_status {
- struct device *dev;
- struct timer_list ac_timer;
- struct timer_list chrg_full_timer;
-
- int charge_mode;
-#define CHRG_ERROR (-1)
-#define CHRG_OFF (0)
-#define CHRG_ON (1)
-#define CHRG_DONE (2)
-
- unsigned int flags;
-#define SHARPSL_SUSPENDED (1 << 0) /* Device is Suspended */
-#define SHARPSL_ALARM_ACTIVE (1 << 1) /* Alarm is for charging event (not user) */
-#define SHARPSL_BL_LIMIT (1 << 2) /* Backlight Intensity Limited */
-#define SHARPSL_APM_QUEUED (1 << 3) /* APM Event Queued */
-#define SHARPSL_DO_OFFLINE_CHRG (1 << 4) /* Trigger the offline charger */
+#define READ_GPIO_BIT(x) (GPLR(x) & GPIO_bit(x))
- int full_count;
- unsigned long charge_start_time;
- struct sharpsl_charger_machinfo *machinfo;
- struct battery_stat battstat;
-};
+/* MAX1111 Channel Definitions */
+#define MAX1111_BATT_VOLT 4u
+#define MAX1111_BATT_TEMP 2u
+#define MAX1111_ACIN_VOLT 6u
-extern struct sharpsl_pm_status sharpsl_pm;
extern struct battery_thresh spitz_battery_levels_acin[];
extern struct battery_thresh spitz_battery_levels_noac[];
+void sharpsl_pm_pxa_init(void);
+void sharpsl_pm_pxa_remove(void);
+int sharpsl_pm_pxa_read_max1111(int channel);
-#define READ_GPIO_BIT(x) (GPLR(x) & GPIO_bit(x))
-
-#define SHARPSL_LED_ERROR 2
-#define SHARPSL_LED_ON 1
-#define SHARPSL_LED_OFF 0
-#define CHARGE_ON() sharpsl_pm.machinfo->charge(1)
-#define CHARGE_OFF() sharpsl_pm.machinfo->charge(0)
-#define CHARGE_LED_ON() sharpsl_pm.machinfo->chargeled(SHARPSL_LED_ON)
-#define CHARGE_LED_OFF() sharpsl_pm.machinfo->chargeled(SHARPSL_LED_OFF)
-#define CHARGE_LED_ERR() sharpsl_pm.machinfo->chargeled(SHARPSL_LED_ERROR)
-#define DISCHARGE_ON() sharpsl_pm.machinfo->discharge(1)
-#define DISCHARGE_OFF() sharpsl_pm.machinfo->discharge(0)
-#define STATUS_AC_IN() sharpsl_pm.machinfo->status_acin()
-#define STATUS_BATT_LOCKED() READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_batlock)
-#define STATUS_CHRG_FULL() READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_batfull)
-#define STATUS_FATAL() READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_fatal)
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index c10be00fb52..6d402b262d8 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -15,48 +15,20 @@
#undef DEBUG
#include <linux/module.h>
-#include <linux/timer.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/apm_bios.h>
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <asm/hardware.h>
-#include <asm/hardware/scoop.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/apm.h>
-
#include <asm/arch/pm.h>
#include <asm/arch/pxa-regs.h>
#include <asm/arch/sharpsl.h>
#include "sharpsl.h"
-/*
- * Constants
- */
-#define SHARPSL_CHARGE_ON_TIME_INTERVAL (msecs_to_jiffies(1*60*1000)) /* 1 min */
-#define SHARPSL_CHARGE_FINISH_TIME (msecs_to_jiffies(10*60*1000)) /* 10 min */
-#define SHARPSL_BATCHK_TIME (msecs_to_jiffies(15*1000)) /* 15 sec */
-#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */
-#define SHARPSL_WAIT_CO_TIME 15 /* 15 sec */
-#define SHARPSL_WAIT_DISCHARGE_ON 100 /* 100 msec */
-#define SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP 10 /* 10 msec */
-#define SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT 10 /* 10 msec */
-#define SHARPSL_CHECK_BATTERY_WAIT_TIME_ACIN 10 /* 10 msec */
-#define SHARPSL_CHARGE_WAIT_TIME 15 /* 15 msec */
-#define SHARPSL_CHARGE_CO_CHECK_TIME 5 /* 5 msec */
-#define SHARPSL_CHARGE_RETRY_CNT 1 /* eqv. 10 min */
-
-#define SHARPSL_CHARGE_ON_VOLT 0x99 /* 2.9V */
-#define SHARPSL_CHARGE_ON_TEMP 0xe0 /* 2.9V */
-#define SHARPSL_CHARGE_ON_ACIN_HIGH 0x9b /* 6V */
-#define SHARPSL_CHARGE_ON_ACIN_LOW 0x34 /* 2V */
-#define SHARPSL_FATAL_ACIN_VOLT 182 /* 3.45V */
-#define SHARPSL_FATAL_NOACIN_VOLT 170 /* 3.40V */
-
struct battery_thresh spitz_battery_levels_acin[] = {
{ 213, 100},
{ 212, 98},
@@ -151,763 +123,17 @@ struct battery_thresh spitz_battery_levels_noac[] = {
#define MAXCTRL_SEL_SH 4
#define MAXCTRL_STR 1u << 7
-/* MAX1111 Channel Definitions */
-#define BATT_AD 4u
-#define BATT_THM 2u
-#define JK_VAD 6u
-
-
-/*
- * Prototypes
- */
-static int sharpsl_read_main_battery(void);
-static int sharpsl_off_charge_battery(void);
-static int sharpsl_check_battery_temp(void);
-static int sharpsl_check_battery_voltage(void);
-static int sharpsl_ac_check(void);
-static int sharpsl_fatal_check(void);
-static int sharpsl_average_value(int ad);
-static void sharpsl_average_clear(void);
-static void sharpsl_charge_toggle(void *private_);
-static void sharpsl_battery_thread(void *private_);
-
-
-/*
- * Variables
- */
-struct sharpsl_pm_status sharpsl_pm;
-DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
-DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
-
-
-static int get_percentage(int voltage)
-{
- int i = sharpsl_pm.machinfo->bat_levels - 1;
- struct battery_thresh *thresh;
-
- if (sharpsl_pm.charge_mode == CHRG_ON)
- thresh=sharpsl_pm.machinfo->bat_levels_acin;
- else
- thresh=sharpsl_pm.machinfo->bat_levels_noac;
-
- while (i > 0 && (voltage > thresh[i].voltage))
- i--;
-
- return thresh[i].percentage;
-}
-
-static int get_apm_status(int voltage)
-{
- int low_thresh, high_thresh;
-
- if (sharpsl_pm.charge_mode == CHRG_ON) {
- high_thresh = sharpsl_pm.machinfo->status_high_acin;
- low_thresh = sharpsl_pm.machinfo->status_low_acin;
- } else {
- high_thresh = sharpsl_pm.machinfo->status_high_noac;
- low_thresh = sharpsl_pm.machinfo->status_low_noac;
- }
-
- if (voltage >= high_thresh)
- return APM_BATTERY_STATUS_HIGH;
- if (voltage >= low_thresh)
- return APM_BATTERY_STATUS_LOW;
- return APM_BATTERY_STATUS_CRITICAL;
-}
-
-void sharpsl_battery_kick(void)
-{
- schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(125));
-}
-EXPORT_SYMBOL(sharpsl_battery_kick);
-
-
-static void sharpsl_battery_thread(void *private_)
-{
- int voltage, percent, apm_status, i = 0;
-
- if (!sharpsl_pm.machinfo)
- return;
-
- sharpsl_pm.battstat.ac_status = (STATUS_AC_IN() ? APM_AC_ONLINE : APM_AC_OFFLINE);
-
- /* Corgi cannot confirm when battery fully charged so periodically kick! */
- if (machine_is_corgi() && (sharpsl_pm.charge_mode == CHRG_ON)
- && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
- schedule_work(&toggle_charger);
-
- while(1) {
- voltage = sharpsl_read_main_battery();
- if (voltage > 0) break;
- if (i++ > 5) {
- voltage = sharpsl_pm.machinfo->bat_levels_noac[0].voltage;
- dev_warn(sharpsl_pm.dev, "Warning: Cannot read main battery!\n");
- break;
- }
- }
-
- voltage = sharpsl_average_value(voltage);
- apm_status = get_apm_status(voltage);
- percent = get_percentage(voltage);
-
- /* At low battery voltages, the voltage has a tendency to start
- creeping back up so we try to avoid this here */
- if ((sharpsl_pm.battstat.ac_status == APM_AC_ONLINE) || (apm_status == APM_BATTERY_STATUS_HIGH) || percent <= sharpsl_pm.battstat.mainbat_percent) {
- sharpsl_pm.battstat.mainbat_voltage = voltage;
- sharpsl_pm.battstat.mainbat_status = apm_status;
- sharpsl_pm.battstat.mainbat_percent = percent;
- }
-
- dev_dbg(sharpsl_pm.dev, "Battery: voltage: %d, status: %d, percentage: %d, time: %d\n", voltage,
- sharpsl_pm.battstat.mainbat_status, sharpsl_pm.battstat.mainbat_percent, jiffies);
-
- /* If battery is low. limit backlight intensity to save power. */
- if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE)
- && ((sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_LOW) ||
- (sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL))) {
- if (!(sharpsl_pm.flags & SHARPSL_BL_LIMIT)) {
- corgibl_limit_intensity(1);
- sharpsl_pm.flags |= SHARPSL_BL_LIMIT;
- }
- } else if (sharpsl_pm.flags & SHARPSL_BL_LIMIT) {
- corgibl_limit_intensity(0);
- sharpsl_pm.flags &= ~SHARPSL_BL_LIMIT;
- }
-
- /* Suspend if critical battery level */
- if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE)
- && (sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL)
- && !(sharpsl_pm.flags & SHARPSL_APM_QUEUED)) {
- sharpsl_pm.flags |= SHARPSL_APM_QUEUED;
- dev_err(sharpsl_pm.dev, "Fatal Off\n");
- apm_queue_event(APM_CRITICAL_SUSPEND);
- }
-
- schedule_delayed_work(&sharpsl_bat, SHARPSL_BATCHK_TIME);
-}
-
-static void sharpsl_charge_on(void)
-{
- dev_dbg(sharpsl_pm.dev, "Turning Charger On\n");
-
- sharpsl_pm.full_count = 0;
- sharpsl_pm.charge_mode = CHRG_ON;
- schedule_delayed_work(&toggle_charger, msecs_to_jiffies(250));
- schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(500));
-}
-
-static void sharpsl_charge_off(void)
-{
- dev_dbg(sharpsl_pm.dev, "Turning Charger Off\n");
-
- CHARGE_OFF();
- CHARGE_LED_OFF();
- sharpsl_pm.charge_mode = CHRG_OFF;
-
- schedule_work(&sharpsl_bat);
-}
-
-static void sharpsl_charge_error(void)
-{
- CHARGE_LED_ERR();
- CHARGE_OFF();
- sharpsl_pm.charge_mode = CHRG_ERROR;
-}
-
-static void sharpsl_charge_toggle(void *private_)
-{
- dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
-
- if (STATUS_AC_IN() == 0) {
- sharpsl_charge_off();
- return;
- } else if ((sharpsl_check_battery_temp() < 0) || (sharpsl_ac_check() < 0)) {
- sharpsl_charge_error();
- return;
- }
-
- CHARGE_LED_ON();
- CHARGE_OFF();
- mdelay(SHARPSL_CHARGE_WAIT_TIME);
- CHARGE_ON();
-
- sharpsl_pm.charge_start_time = jiffies;
-}
-
-static void sharpsl_ac_timer(unsigned long data)
-{
- int acin = STATUS_AC_IN();
-
- dev_dbg(sharpsl_pm.dev, "AC Status: %d\n",acin);
-
- sharpsl_average_clear();
- if (acin && (sharpsl_pm.charge_mode != CHRG_ON))
- sharpsl_charge_on();
- else if (sharpsl_pm.charge_mode == CHRG_ON)
- sharpsl_charge_off();
-
- schedule_work(&sharpsl_bat);
-}
-
-
-static irqreturn_t sharpsl_ac_isr(int irq, void *dev_id, struct pt_regs *fp)
-{
- /* Delay the event slightly to debounce */
- /* Must be a smaller delay than the chrg_full_isr below */
- mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
-
- return IRQ_HANDLED;
-}
-
-static void sharpsl_chrg_full_timer(unsigned long data)
-{
- dev_dbg(sharpsl_pm.dev, "Charge Full at time: %lx\n", jiffies);
-
- sharpsl_pm.full_count++;
-
- if (STATUS_AC_IN() == 0) {
- dev_dbg(sharpsl_pm.dev, "Charge Full: AC removed - stop charging!\n");
- if (sharpsl_pm.charge_mode == CHRG_ON)
- sharpsl_charge_off();
- } else if (sharpsl_pm.full_count < 2) {
- dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
- schedule_work(&toggle_charger);
- } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
- dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
- schedule_work(&toggle_charger);
- } else {
- sharpsl_charge_off();
- sharpsl_pm.charge_mode = CHRG_DONE;
- dev_dbg(sharpsl_pm.dev, "Charge Full: Charging Finished\n");
- }
-}
-
-/* Charging Finished Interrupt (Not present on Corgi) */
-/* Can trigger at the same time as an AC staus change so
- delay until after that has been processed */
-static irqreturn_t sharpsl_chrg_full_isr(int irq, void *dev_id, struct pt_regs *fp)
-{
- if (sharpsl_pm.flags & SHARPSL_SUSPENDED)
- return IRQ_HANDLED;
-
- /* delay until after any ac interrupt */
- mod_timer(&sharpsl_pm.chrg_full_timer, jiffies + msecs_to_jiffies(500));
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t sharpsl_fatal_isr(int irq, void *dev_id, struct pt_regs *fp)
-{
- int is_fatal = 0;
-
- if (STATUS_BATT_LOCKED() == 0) {
- dev_err(sharpsl_pm.dev, "Battery now Unlocked! Suspending.\n");
- is_fatal = 1;
- }
-
- if (sharpsl_pm.machinfo->gpio_fatal && (STATUS_FATAL() == 0)) {
- dev_err(sharpsl_pm.dev, "Fatal Batt Error! Suspending.\n");
- is_fatal = 1;
- }
-
- if (!(sharpsl_pm.flags & SHARPSL_APM_QUEUED) && is_fatal) {
- sharpsl_pm.flags |= SHARPSL_APM_QUEUED;
- apm_queue_event(APM_CRITICAL_SUSPEND);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * Maintain an average of the last 10 readings
- */
-#define SHARPSL_CNV_VALUE_NUM 10
-static int sharpsl_ad_index;
-
-static void sharpsl_average_clear(void)
-{
- sharpsl_ad_index = 0;
-}
-
-static int sharpsl_average_value(int ad)
-{
- int i, ad_val = 0;
- static int sharpsl_ad[SHARPSL_CNV_VALUE_NUM+1];
-
- if (sharpsl_pm.battstat.mainbat_status != APM_BATTERY_STATUS_HIGH) {
- sharpsl_ad_index = 0;
- return ad;
- }
-
- sharpsl_ad[sharpsl_ad_index] = ad;
- sharpsl_ad_index++;
- if (sharpsl_ad_index >= SHARPSL_CNV_VALUE_NUM) {
- for (i=0; i < (SHARPSL_CNV_VALUE_NUM-1); i++)
- sharpsl_ad[i] = sharpsl_ad[i+1];
- sharpsl_ad_index = SHARPSL_CNV_VALUE_NUM - 1;
- }
- for (i=0; i < sharpsl_ad_index; i++)
- ad_val += sharpsl_ad[i];
-
- return (ad_val / sharpsl_ad_index);
-}
-
-
/*
* Read MAX1111 ADC
*/
-static int read_max1111(int channel)
+int sharpsl_pm_pxa_read_max1111(int channel)
{
return corgi_ssp_max1111_get((channel << MAXCTRL_SEL_SH) | MAXCTRL_PD0 | MAXCTRL_PD1
| MAXCTRL_SGL | MAXCTRL_UNI | MAXCTRL_STR);
}
-static int sharpsl_read_main_battery(void)
-{
- return read_max1111(BATT_AD);
-}
-
-static int sharpsl_read_temp(void)
+void sharpsl_pm_pxa_init(void)
{
- int temp;
-
- sharpsl_pm.machinfo->measure_temp(1);
-
- mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP);
- temp = read_max1111(BATT_THM);
-
- sharpsl_pm.machinfo->measure_temp(0);
-
- return temp;
-}
-
-static int sharpsl_read_acin(void)
-{
- return read_max1111(JK_VAD);
-}
-
-/*
- * Take an array of 5 integers, remove the maximum and minimum values
- * and return the average.
- */
-static int get_select_val(int *val)
-{
- int i, j, k, temp, sum = 0;
-
- /* Find MAX val */
- temp = val[0];
- j=0;
- for (i=1; i<5; i++) {
- if (temp < val[i]) {
- temp = val[i];
- j = i;
- }
- }
-
- /* Find MIN val */
- temp = val[4];
- k=4;
- for (i=3; i>=0; i--) {
- if (temp > val[i]) {
- temp = val[i];
- k = i;
- }
- }
-
- for (i=0; i<5; i++)
- if (i != j && i != k )
- sum += val[i];
-
- dev_dbg(sharpsl_pm.dev, "Average: %d from values: %d, %d, %d, %d, %d\n", sum/3, val[0], val[1], val[2], val[3], val[4]);
-
- return (sum/3);
-}
-
-static int sharpsl_check_battery_temp(void)
-{
- int val, i, buff[5];
-
- /* Check battery temperature */
- for (i=0; i<5; i++) {
- mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP);
- buff[i] = sharpsl_read_temp();
- }
-
- val = get_select_val(buff);
-
- dev_dbg(sharpsl_pm.dev, "Temperature: %d\n", val);
- if (val > SHARPSL_CHARGE_ON_TEMP)
- return -1;
-
- return 0;
-}
-
-static int sharpsl_check_battery_voltage(void)
-{
- int val, i, buff[5];
-
- /* disable charge, enable discharge */
- CHARGE_OFF();
- DISCHARGE_ON();
- mdelay(SHARPSL_WAIT_DISCHARGE_ON);
-
- if (sharpsl_pm.machinfo->discharge1)
- sharpsl_pm.machinfo->discharge1(1);
-
- /* Check battery voltage */
- for (i=0; i<5; i++) {
- buff[i] = sharpsl_read_main_battery();
- mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT);
- }
-
- if (sharpsl_pm.machinfo->discharge1)
- sharpsl_pm.machinfo->discharge1(0);
-
- DISCHARGE_OFF();
-
- val = get_select_val(buff);
- dev_dbg(sharpsl_pm.dev, "Battery Voltage: %d\n", val);
-
- if (val < SHARPSL_CHARGE_ON_VOLT)
- return -1;
-
- return 0;
-}
-
-static int sharpsl_ac_check(void)
-{
- int temp, i, buff[5];
-
- for (i=0; i<5; i++) {
- buff[i] = sharpsl_read_acin();
- mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_ACIN);
- }
-
- temp = get_select_val(buff);
- dev_dbg(sharpsl_pm.dev, "AC Voltage: %d\n",temp);
-
- if ((temp > SHARPSL_CHARGE_ON_ACIN_HIGH) || (temp < SHARPSL_CHARGE_ON_ACIN_LOW)) {
- dev_err(sharpsl_pm.dev, "Error: AC check failed.\n");
- return -1;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state)
-{
- sharpsl_pm.flags |= SHARPSL_SUSPENDED;
- flush_scheduled_work();
-
- if (sharpsl_pm.charge_mode == CHRG_ON)
- sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
- else
- sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG;
-
- return 0;
-}
-
-static int sharpsl_pm_resume(struct platform_device *pdev)
-{
- /* Clear the reset source indicators as they break the bootloader upon reboot */
- RCSR = 0x0f;
- sharpsl_average_clear();
- sharpsl_pm.flags &= ~SHARPSL_APM_QUEUED;
- sharpsl_pm.flags &= ~SHARPSL_SUSPENDED;
-
- return 0;
-}
-
-static void corgi_goto_sleep(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state)
-{
- dev_dbg(sharpsl_pm.dev, "Time is: %08x\n",RCNR);
-
- dev_dbg(sharpsl_pm.dev, "Offline Charge Activate = %d\n",sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG);
- /* not charging and AC-IN! */
-
- if ((sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG) && (STATUS_AC_IN() != 0)) {
- dev_dbg(sharpsl_pm.dev, "Activating Offline Charger...\n");
- sharpsl_pm.charge_mode = CHRG_OFF;
- sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG;
- sharpsl_off_charge_battery();
- }
-
- sharpsl_pm.machinfo->presuspend();
-
- PEDR = 0xffffffff; /* clear it */
-
- sharpsl_pm.flags &= ~SHARPSL_ALARM_ACTIVE;
- if ((sharpsl_pm.charge_mode == CHRG_ON) && ((alarm_enable && ((alarm_time - RCNR) > (SHARPSL_BATCHK_TIME_SUSPEND + 30))) || !alarm_enable)) {
- RTSR &= RTSR_ALE;
- RTAR = RCNR + SHARPSL_BATCHK_TIME_SUSPEND;
- dev_dbg(sharpsl_pm.dev, "Charging alarm at: %08x\n",RTAR);
- sharpsl_pm.flags |= SHARPSL_ALARM_ACTIVE;
- } else if (alarm_enable) {
- RTSR &= RTSR_ALE;
- RTAR = alarm_time;
- dev_dbg(sharpsl_pm.dev, "User alarm at: %08x\n",RTAR);
- } else {
- dev_dbg(sharpsl_pm.dev, "No alarms set.\n");
- }
-
- pxa_pm_enter(state);
-
- sharpsl_pm.machinfo->postsuspend();
-
- dev_dbg(sharpsl_pm.dev, "Corgi woken up from suspend: %08x\n",PEDR);
-}
-
-static int corgi_enter_suspend(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state)
-{
- if (!sharpsl_pm.machinfo->should_wakeup(!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE) && alarm_enable) )
- {
- if (!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE)) {
- dev_dbg(sharpsl_pm.dev, "No user triggered wakeup events and not charging. Strange. Suspend.\n");
- corgi_goto_sleep(alarm_time, alarm_enable, state);
- return 1;
- }
- if(sharpsl_off_charge_battery()) {
- dev_dbg(sharpsl_pm.dev, "Charging. Suspend...\n");
- corgi_goto_sleep(alarm_time, alarm_enable, state);
- return 1;
- }
- dev_dbg(sharpsl_pm.dev, "User triggered wakeup in offline charger.\n");
- }
-
- if ((STATUS_BATT_LOCKED() == 0) || (sharpsl_fatal_check() < 0) )
- {
- dev_err(sharpsl_pm.dev, "Fatal condition. Suspend.\n");
- corgi_goto_sleep(alarm_time, alarm_enable, state);
- return 1;
- }
-
- return 0;
-}
-
-static int corgi_pxa_pm_enter(suspend_state_t state)
-{
- unsigned long alarm_time = RTAR;
- unsigned int alarm_status = ((RTSR & RTSR_ALE) != 0);
-
- dev_dbg(sharpsl_pm.dev, "SharpSL suspending for first time.\n");
-
- corgi_goto_sleep(alarm_time, alarm_status, state);
-
- while (corgi_enter_suspend(alarm_time,alarm_status,state))
- {}
-
- dev_dbg(sharpsl_pm.dev, "SharpSL resuming...\n");
-
- return 0;
-}
-#endif
-
-
-/*
- * Check for fatal battery errors
- * Fatal returns -1
- */
-static int sharpsl_fatal_check(void)
-{
- int buff[5], temp, i, acin;
-
- dev_dbg(sharpsl_pm.dev, "sharpsl_fatal_check entered\n");
-
- /* Check AC-Adapter */
- acin = STATUS_AC_IN();
-
- if (acin && (sharpsl_pm.charge_mode == CHRG_ON)) {
- CHARGE_OFF();
- udelay(100);
- DISCHARGE_ON(); /* enable discharge */
- mdelay(SHARPSL_WAIT_DISCHARGE_ON);
- }
-
- if (sharpsl_pm.machinfo->discharge1)
- sharpsl_pm.machinfo->discharge1(1);
-
- /* Check battery : check inserting battery ? */
- for (i=0; i<5; i++) {
- buff[i] = sharpsl_read_main_battery();
- mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT);
- }
-
- if (sharpsl_pm.machinfo->discharge1)
- sharpsl_pm.machinfo->discharge1(0);
-
- if (acin && (sharpsl_pm.charge_mode == CHRG_ON)) {
- udelay(100);
- CHARGE_ON();
- DISCHARGE_OFF();
- }
-
- temp = get_select_val(buff);
- dev_dbg(sharpsl_pm.dev, "sharpsl_fatal_check: acin: %d, discharge voltage: %d, no discharge: %d\n", acin, temp, sharpsl_read_main_battery());
-
- if ((acin && (temp < SHARPSL_FATAL_ACIN_VOLT)) ||
- (!acin && (temp < SHARPSL_FATAL_NOACIN_VOLT)))
- return -1;
- return 0;
-}
-
-static int sharpsl_off_charge_error(void)
-{
- dev_err(sharpsl_pm.dev, "Offline Charger: Error occured.\n");
- CHARGE_OFF();
- CHARGE_LED_ERR();
- sharpsl_pm.charge_mode = CHRG_ERROR;
- return 1;
-}
-
-/*
- * Charging Control while suspended
- * Return 1 - go straight to sleep
- * Return 0 - sleep or wakeup depending on other factors
- */
-static int sharpsl_off_charge_battery(void)
-{
- int time;
-
- dev_dbg(sharpsl_pm.dev, "Charge Mode: %d\n", sharpsl_pm.charge_mode);
-
- if (sharpsl_pm.charge_mode == CHRG_OFF) {
- dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 1\n");
-
- /* AC Check */
- if ((sharpsl_ac_check() < 0) || (sharpsl_check_battery_temp() < 0))
- return sharpsl_off_charge_error();
-
- /* Start Charging */
- CHARGE_LED_ON();
- CHARGE_OFF();
- mdelay(SHARPSL_CHARGE_WAIT_TIME);
- CHARGE_ON();
-
- sharpsl_pm.charge_mode = CHRG_ON;
- sharpsl_pm.full_count = 0;
-
- return 1;
- } else if (sharpsl_pm.charge_mode != CHRG_ON) {
- return 1;
- }
-
- if (sharpsl_pm.full_count == 0) {
- int time;
-
- dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 2\n");
-
- if ((sharpsl_check_battery_temp() < 0) || (sharpsl_check_battery_voltage() < 0))
- return sharpsl_off_charge_error();
-
- CHARGE_OFF();
- mdelay(SHARPSL_CHARGE_WAIT_TIME);
- CHARGE_ON();
- sharpsl_pm.charge_mode = CHRG_ON;
-
- mdelay(SHARPSL_CHARGE_CO_CHECK_TIME);
-
- time = RCNR;
- while(1) {
- /* Check if any wakeup event had occured */
- if (sharpsl_pm.machinfo->charger_wakeup() != 0)
- return 0;
- /* Check for timeout */
- if ((RCNR - time) > SHARPSL_WAIT_CO_TIME)
- return 1;
- if (STATUS_CHRG_FULL()) {
- dev_dbg(sharpsl_pm.dev, "Offline Charger: Charge full occured. Retrying to check\n");
- sharpsl_pm.full_count++;
- CHARGE_OFF();
- mdelay(SHARPSL_CHARGE_WAIT_TIME);
- CHARGE_ON();
- return 1;
- }
- }
- }
-
- dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 3\n");
-
- mdelay(SHARPSL_CHARGE_CO_CHECK_TIME);
-
- time = RCNR;
- while(1) {
- /* Check if any wakeup event had occured */
- if (sharpsl_pm.machinfo->charger_wakeup() != 0)
- return 0;
- /* Check for timeout */
- if ((RCNR-time) > SHARPSL_WAIT_CO_TIME) {
- if (sharpsl_pm.full_count > SHARPSL_CHARGE_RETRY_CNT) {
- dev_dbg(sharpsl_pm.dev, "Offline Charger: Not charged sufficiently. Retrying.\n");
- sharpsl_pm.full_count = 0;
- }
- sharpsl_pm.full_count++;
- return 1;
- }
- if (STATUS_CHRG_FULL()) {
- dev_dbg(sharpsl_pm.dev, "Offline Charger: Charging complete.\n");
- CHARGE_LED_OFF();
- CHARGE_OFF();
- sharpsl_pm.charge_mode = CHRG_DONE;
- return 1;
- }
- }
-}
-
-
-static ssize_t battery_percentage_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n",sharpsl_pm.battstat.mainbat_percent);
-}
-
-static ssize_t battery_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n",sharpsl_pm.battstat.mainbat_voltage);
-}
-
-static DEVICE_ATTR(battery_percentage, 0444, battery_percentage_show, NULL);
-static DEVICE_ATTR(battery_voltage, 0444, battery_voltage_show, NULL);
-
-extern void (*apm_get_power_status)(struct apm_power_info *);
-
-static void sharpsl_apm_get_power_status(struct apm_power_info *info)
-{
- info->ac_line_status = sharpsl_pm.battstat.ac_status;
-
- if (sharpsl_pm.charge_mode == CHRG_ON)
- info->battery_status = APM_BATTERY_STATUS_CHARGING;
- else
- info->battery_status = sharpsl_pm.battstat.mainbat_status;
-
- info->battery_flag = (1 << info->battery_status);
- info->battery_life = sharpsl_pm.battstat.mainbat_percent;
-}
-
-static struct pm_ops sharpsl_pm_ops = {
- .pm_disk_mode = PM_DISK_FIRMWARE,
- .prepare = pxa_pm_prepare,
- .enter = corgi_pxa_pm_enter,
- .finish = pxa_pm_finish,
-};
-
-static int __init sharpsl_pm_probe(struct platform_device *pdev)
-{
- if (!pdev->dev.platform_data)
- return -EINVAL;
-
- sharpsl_pm.dev = &pdev->dev;
- sharpsl_pm.machinfo = pdev->dev.platform_data;
- sharpsl_pm.charge_mode = CHRG_OFF;
- sharpsl_pm.flags = 0;
-
- sharpsl_pm.machinfo->init();
-
- init_timer(&sharpsl_pm.ac_timer);
- sharpsl_pm.ac_timer.function = sharpsl_ac_timer;
-
- init_timer(&sharpsl_pm.chrg_full_timer);
- sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer;
-
pxa_gpio_mode(sharpsl_pm.machinfo->gpio_acin | GPIO_IN);
pxa_gpio_mode(sharpsl_pm.machinfo->gpio_batfull | GPIO_IN);
pxa_gpio_mode(sharpsl_pm.machinfo->gpio_batlock | GPIO_IN);
@@ -938,26 +164,10 @@ static int __init sharpsl_pm_probe(struct platform_device *pdev)
}
else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull),IRQT_RISING);
}
-
- device_create_file(&pdev->dev, &dev_attr_battery_percentage);
- device_create_file(&pdev->dev, &dev_attr_battery_voltage);
-
- apm_get_power_status = sharpsl_apm_get_power_status;
-
- pm_set_ops(&sharpsl_pm_ops);
-
- mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
-
- return 0;
}
-static int sharpsl_pm_remove(struct platform_device *pdev)
+void sharpsl_pm_pxa_remove(void)
{
- pm_set_ops(NULL);
-
- device_remove_file(&pdev->dev, &dev_attr_battery_percentage);
- device_remove_file(&pdev->dev, &dev_attr_battery_voltage);
-
free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr);
free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr);
@@ -966,32 +176,4 @@ static int sharpsl_pm_remove(struct platform_device *pdev)
if (!machine_is_corgi())
free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr);
-
- del_timer_sync(&sharpsl_pm.chrg_full_timer);
- del_timer_sync(&sharpsl_pm.ac_timer);
-
- return 0;
}
-
-static struct platform_driver sharpsl_pm_driver = {
- .probe = sharpsl_pm_probe,
- .remove = sharpsl_pm_remove,
- .suspend = sharpsl_pm_suspend,
- .resume = sharpsl_pm_resume,
- .driver = {
- .name = "sharpsl-pm",
- },
-};
-
-static int __devinit sharpsl_pm_init(void)
-{
- return platform_driver_register(&sharpsl_pm_driver);
-}
-
-static void sharpsl_pm_exit(void)
-{
- platform_driver_unregister(&sharpsl_pm_driver);
-}
-
-late_initcall(sharpsl_pm_init);
-module_exit(sharpsl_pm_exit);
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 2df1b56615b..a9eacc06555 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -36,6 +36,7 @@
#include <asm/arch/irq.h>
#include <asm/arch/irda.h>
#include <asm/arch/mmc.h>
+#include <asm/arch/ohci.h>
#include <asm/arch/udc.h>
#include <asm/arch/pxafb.h>
#include <asm/arch/akita.h>
@@ -126,10 +127,12 @@ static void spitz_card_pwr_ctrl(int device, unsigned short new_cpr)
cpr &= ~0x0002;
if (device == SPITZ_PWR_SD)
cpr &= ~0x0004;
- write_scoop_reg(&spitzscoop_device.dev, SCOOP_CPR, cpr | new_cpr);
if (!(cpr & 0x0002) && !(cpr & 0x0004)) {
+ write_scoop_reg(&spitzscoop_device.dev, SCOOP_CPR, 0x0000);
mdelay(1);
reset_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_CF_POWER);
+ } else {
+ write_scoop_reg(&spitzscoop_device.dev, SCOOP_CPR, cpr | new_cpr);
}
}
}
@@ -293,15 +296,14 @@ static int spitz_mci_init(struct device *dev, irqreturn_t (*spitz_detect_int)(in
spitz_mci_platform_data.detect_delay = msecs_to_jiffies(250);
- err = request_irq(SPITZ_IRQ_GPIO_nSD_DETECT, spitz_detect_int, SA_INTERRUPT,
- "MMC card detect", data);
+ err = request_irq(SPITZ_IRQ_GPIO_nSD_DETECT, spitz_detect_int,
+ SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
+ "MMC card detect", data);
if (err) {
printk(KERN_ERR "spitz_mci_init: MMC/SD: can't request MMC card detect IRQ\n");
return -1;
}
- set_irq_type(SPITZ_IRQ_GPIO_nSD_DETECT, IRQT_BOTHEDGE);
-
return 0;
}
@@ -335,6 +337,35 @@ static struct pxamci_platform_data spitz_mci_platform_data = {
/*
+ * USB Host (OHCI)
+ */
+static int spitz_ohci_init(struct device *dev)
+{
+ /* Only Port 2 is connected */
+ pxa_gpio_mode(SPITZ_GPIO_USB_CONNECT | GPIO_IN);
+ pxa_gpio_mode(SPITZ_GPIO_USB_HOST | GPIO_OUT);
+ pxa_gpio_mode(SPITZ_GPIO_USB_DEVICE | GPIO_IN);
+
+ /* Setup USB Port 2 Output Control Register */
+ UP2OCR = UP2OCR_HXS | UP2OCR_HXOE | UP2OCR_DPPDE | UP2OCR_DMPDE;
+
+ GPSR(SPITZ_GPIO_USB_HOST) = GPIO_bit(SPITZ_GPIO_USB_HOST);
+
+ UHCHR = (UHCHR) &
+ ~(UHCHR_SSEP1 | UHCHR_SSEP2 | UHCHR_SSEP3 | UHCHR_SSE);
+
+ UHCRHDA |= UHCRHDA_NOCP;
+
+ return 0;
+}
+
+static struct pxaohci_platform_data spitz_ohci_platform_data = {
+ .port_mode = PMM_NPS_MODE,
+ .init = spitz_ohci_init,
+};
+
+
+/*
* Irda
*/
static void spitz_irda_transceiver_mode(struct device *dev, int mode)
@@ -411,6 +442,7 @@ static void __init common_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
pxa_set_mci_info(&spitz_mci_platform_data);
+ pxa_set_ohci_info(&spitz_ohci_platform_data);
pxa_set_ficp_info(&spitz_ficp_platform_data);
set_pxa_fb_parent(&spitzssp_device.dev);
set_pxa_fb_info(&spitz_pxafb_info);
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c
index 3ce7486daa5..5e5bdc89848 100644
--- a/arch/arm/mach-pxa/spitz_pm.c
+++ b/arch/arm/mach-pxa/spitz_pm.c
@@ -33,19 +33,7 @@ static void spitz_charger_init(void)
{
pxa_gpio_mode(SPITZ_GPIO_KEY_INT | GPIO_IN);
pxa_gpio_mode(SPITZ_GPIO_SYNC | GPIO_IN);
-}
-
-static void spitz_charge_led(int val)
-{
- if (val == SHARPSL_LED_ERROR) {
- dev_dbg(sharpsl_pm.dev, "Charge LED Error\n");
- } else if (val == SHARPSL_LED_ON) {
- dev_dbg(sharpsl_pm.dev, "Charge LED On\n");
- set_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_ORANGE);
- } else {
- dev_dbg(sharpsl_pm.dev, "Charge LED Off\n");
- reset_scoop_gpio(&spitzscoop_device.dev, SPITZ_SCP_LED_ORANGE);
- }
+ sharpsl_pm_pxa_init();
}
static void spitz_measure_temp(int on)
@@ -92,7 +80,7 @@ static void spitz_discharge1(int on)
static void spitz_presuspend(void)
{
- spitz_last_ac_status = STATUS_AC_IN();
+ spitz_last_ac_status = sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN);
/* GPIO Sleep Register */
PGSR0 = 0x00144018;
@@ -138,7 +126,7 @@ static void spitz_postsuspend(void)
static int spitz_should_wakeup(unsigned int resume_on_alarm)
{
int is_resume = 0;
- int acin = STATUS_AC_IN();
+ int acin = sharpsl_pm.machinfo->read_devdata(SHARPSL_STATUS_ACIN);
if (spitz_last_ac_status != acin) {
if (acin) {
@@ -148,8 +136,8 @@ static int spitz_should_wakeup(unsigned int resume_on_alarm)
} else {
/* charge off */
dev_dbg(sharpsl_pm.dev, "AC Removed\n");
- CHARGE_LED_OFF();
- CHARGE_OFF();
+ sharpsl_pm_led(SHARPSL_LED_OFF);
+ sharpsl_pm.machinfo->charge(0);
sharpsl_pm.charge_mode = CHRG_OFF;
}
spitz_last_ac_status = acin;
@@ -175,25 +163,41 @@ static unsigned long spitz_charger_wakeup(void)
return (~GPLR0 & GPIO_bit(SPITZ_GPIO_KEY_INT)) | (GPLR0 & GPIO_bit(SPITZ_GPIO_SYNC));
}
-static int spitz_acin_status(void)
+unsigned long spitzpm_read_devdata(int type)
{
- return (((~GPLR(SPITZ_GPIO_AC_IN)) & GPIO_bit(SPITZ_GPIO_AC_IN)) != 0);
+ switch(type) {
+ case SHARPSL_STATUS_ACIN:
+ return (((~GPLR(SPITZ_GPIO_AC_IN)) & GPIO_bit(SPITZ_GPIO_AC_IN)) != 0);
+ case SHARPSL_STATUS_LOCK:
+ return READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_batlock);
+ case SHARPSL_STATUS_CHRGFULL:
+ return READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_batfull);
+ case SHARPSL_STATUS_FATAL:
+ return READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_fatal);
+ case SHARPSL_ACIN_VOLT:
+ return sharpsl_pm_pxa_read_max1111(MAX1111_ACIN_VOLT);
+ case SHARPSL_BATT_TEMP:
+ return sharpsl_pm_pxa_read_max1111(MAX1111_BATT_TEMP);
+ case SHARPSL_BATT_VOLT:
+ default:
+ return sharpsl_pm_pxa_read_max1111(MAX1111_BATT_VOLT);
+ }
}
struct sharpsl_charger_machinfo spitz_pm_machinfo = {
.init = spitz_charger_init,
+ .exit = sharpsl_pm_pxa_remove,
.gpio_batlock = SPITZ_GPIO_BAT_COVER,
.gpio_acin = SPITZ_GPIO_AC_IN,
.gpio_batfull = SPITZ_GPIO_CHRG_FULL,
.gpio_fatal = SPITZ_GPIO_FATAL_BAT,
- .status_acin = spitz_acin_status,
.discharge = spitz_discharge,
.discharge1 = spitz_discharge1,
.charge = spitz_charge,
- .chargeled = spitz_charge_led,
.measure_temp = spitz_measure_temp,
.presuspend = spitz_presuspend,
.postsuspend = spitz_postsuspend,
+ .read_devdata = spitzpm_read_devdata,
.charger_wakeup = spitz_charger_wakeup,
.should_wakeup = spitz_should_wakeup,
.bat_levels = 40,
diff --git a/arch/arm/mach-pxa/ssp.c b/arch/arm/mach-pxa/ssp.c
index a68b30eff4d..93096befd01 100644
--- a/arch/arm/mach-pxa/ssp.c
+++ b/arch/arm/mach-pxa/ssp.c
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/init.h>
+#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/hardware.h>
@@ -59,7 +60,7 @@ static const struct ssp_info_ ssp_info[PXA_SSP_PORTS] = {
#endif
};
-static DECLARE_MUTEX(sem);
+static DEFINE_MUTEX(mutex);
static int use_count[PXA_SSP_PORTS] = {0, 0, 0};
static irqreturn_t ssp_interrupt(int irq, void *dev_id, struct pt_regs *regs)
@@ -239,16 +240,16 @@ int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags)
if (port > PXA_SSP_PORTS || port == 0)
return -ENODEV;
- down(&sem);
+ mutex_lock(&mutex);
if (use_count[port - 1]) {
- up(&sem);
+ mutex_unlock(&mutex);
return -EBUSY;
}
use_count[port - 1]++;
if (!request_mem_region(__PREG(SSCR0_P(port)), 0x2c, "SSP")) {
use_count[port - 1]--;
- up(&sem);
+ mutex_unlock(&mutex);
return -EBUSY;
}
dev->port = port;
@@ -265,13 +266,13 @@ int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags)
/* turn on SSP port clock */
pxa_set_cken(ssp_info[port-1].clock, 1);
- up(&sem);
+ mutex_unlock(&mutex);
return 0;
out_region:
release_mem_region(__PREG(SSCR0_P(port)), 0x2c);
use_count[port - 1]--;
- up(&sem);
+ mutex_unlock(&mutex);
return ret;
}
@@ -282,7 +283,7 @@ out_region:
*/
void ssp_exit(struct ssp_dev *dev)
{
- down(&sem);
+ mutex_lock(&mutex);
SSCR0_P(dev->port) &= ~SSCR0_SSE;
if (dev->port > PXA_SSP_PORTS || dev->port == 0) {
@@ -295,7 +296,7 @@ void ssp_exit(struct ssp_dev *dev)
free_irq(dev->irq, dev);
release_mem_region(__PREG(SSCR0_P(dev->port)), 0x2c);
use_count[dev->port - 1]--;
- up(&sem);
+ mutex_unlock(&mutex);
}
EXPORT_SYMBOL(ssp_write_word);
diff --git a/arch/arm/mach-realview/clock.c b/arch/arm/mach-realview/clock.c
index 002635c97bb..21325a4da9d 100644
--- a/arch/arm/mach-realview/clock.c
+++ b/arch/arm/mach-realview/clock.c
@@ -13,28 +13,29 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
#include <asm/semaphore.h>
-#include <asm/hardware/clock.h>
#include <asm/hardware/icst307.h>
#include "clock.h"
static LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
struct clk *clk_get(struct device *dev, const char *id)
{
struct clk *p, *clk = ERR_PTR(-ENOENT);
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {
if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
clk = p;
break;
}
}
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return clk;
}
@@ -57,17 +58,6 @@ void clk_disable(struct clk *clk)
}
EXPORT_SYMBOL(clk_disable);
-int clk_use(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_use);
-
-void clk_unuse(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_unuse);
-
unsigned long clk_get_rate(struct clk *clk)
{
return clk->rate;
@@ -120,18 +110,18 @@ static struct clk mmci_clk = {
int clk_register(struct clk *clk)
{
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_add(&clk->node, &clocks);
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return 0;
}
EXPORT_SYMBOL(clk_register);
void clk_unregister(struct clk *clk)
{
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_del(&clk->node);
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
}
EXPORT_SYMBOL(clk_unregister);
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index af6580f1ceb..4a222f59f2c 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -24,14 +24,14 @@
#include <linux/dma-mapping.h>
#include <linux/sysdev.h>
#include <linux/interrupt.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/clcd.h>
#include <asm/system.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/leds.h>
-#include <asm/hardware/amba.h>
-#include <asm/hardware/amba_clcd.h>
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/icst307.h>
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
index c06e6041df4..93e86d9f439 100644
--- a/arch/arm/mach-realview/core.h
+++ b/arch/arm/mach-realview/core.h
@@ -22,7 +22,8 @@
#ifndef __ASM_ARCH_REALVIEW_H
#define __ASM_ARCH_REALVIEW_H
-#include <asm/hardware/amba.h>
+#include <linux/amba/bus.h>
+
#include <asm/leds.h>
#include <asm/io.h>
diff --git a/arch/arm/mach-realview/localtimer.c b/arch/arm/mach-realview/localtimer.c
index c9d7c596b20..caf6b8bb6c9 100644
--- a/arch/arm/mach-realview/localtimer.c
+++ b/arch/arm/mach-realview/localtimer.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/smp.h>
+#include <linux/jiffies.h>
#include <asm/mach/time.h>
#include <asm/hardware/arm_twd.h>
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 7dc32503fdf..112f7592aca 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/sysdev.h>
+#include <linux/amba/bus.h>
#include <asm/hardware.h>
#include <asm/io.h>
@@ -30,7 +31,6 @@
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <asm/hardware/gic.h>
-#include <asm/hardware/amba.h>
#include <asm/hardware/icst307.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-rpc/dma.c b/arch/arm/mach-rpc/dma.c
index bc0747439fb..bd86ffba881 100644
--- a/arch/arm/mach-rpc/dma.c
+++ b/arch/arm/mach-rpc/dma.c
@@ -13,7 +13,7 @@
#include <linux/mman.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <asm/page.h>
#include <asm/dma.h>
@@ -148,11 +148,14 @@ static void iomd_enable_dma(dmach_t channel, dma_t *dma)
* Cope with ISA-style drivers which expect cache
* coherence.
*/
- if (!dma->using_sg) {
- dma->buf.dma_address = pci_map_single(NULL,
- dma->buf.__address, dma->buf.length,
+ if (!dma->sg) {
+ dma->sg = &dma->buf;
+ dma->sgcount = 1;
+ dma->buf.length = dma->count;
+ dma->buf.dma_address = dma_map_single(NULL,
+ dma->addr, dma->count,
dma->dma_mode == DMA_MODE_READ ?
- PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
iomd_writeb(DMA_CR_C, dma_base + CR);
@@ -239,7 +242,7 @@ static void floppy_enable_dma(dmach_t channel, dma_t *dma)
unsigned int fiqhandler_length;
struct pt_regs regs;
- if (dma->using_sg)
+ if (dma->sg)
BUG();
if (dma->dma_mode == DMA_MODE_READ) {
@@ -252,8 +255,8 @@ static void floppy_enable_dma(dmach_t channel, dma_t *dma)
fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
}
- regs.ARM_r9 = dma->buf.length;
- regs.ARM_r10 = (unsigned long)dma->buf.__address;
+ regs.ARM_r9 = dma->count;
+ regs.ARM_r10 = (unsigned long)dma->addr;
regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE;
if (claim_fiq(&fh)) {
diff --git a/arch/arm/mach-s3c2410/clock.c b/arch/arm/mach-s3c2410/clock.c
index 82e8253b1fa..af2f3d52b61 100644
--- a/arch/arm/mach-s3c2410/clock.c
+++ b/arch/arm/mach-s3c2410/clock.c
@@ -34,16 +34,16 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/sysdev.h>
-
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
#include <asm/hardware.h>
#include <asm/atomic.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/regs-clock.h>
#include "clock.h"
@@ -52,7 +52,7 @@
/* clock information */
static LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
/* old functions */
@@ -103,7 +103,7 @@ struct clk *clk_get(struct device *dev, const char *id)
else
idno = to_platform_device(dev)->id;
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, list) {
if (p->id == idno &&
@@ -127,7 +127,7 @@ struct clk *clk_get(struct device *dev, const char *id)
}
}
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return clk;
}
@@ -151,18 +151,6 @@ void clk_disable(struct clk *clk)
}
-int clk_use(struct clk *clk)
-{
- atomic_inc(&clk->used);
- return 0;
-}
-
-
-void clk_unuse(struct clk *clk)
-{
- atomic_dec(&clk->used);
-}
-
unsigned long clk_get_rate(struct clk *clk)
{
if (IS_ERR(clk))
@@ -196,8 +184,6 @@ EXPORT_SYMBOL(clk_get);
EXPORT_SYMBOL(clk_put);
EXPORT_SYMBOL(clk_enable);
EXPORT_SYMBOL(clk_disable);
-EXPORT_SYMBOL(clk_use);
-EXPORT_SYMBOL(clk_unuse);
EXPORT_SYMBOL(clk_get_rate);
EXPORT_SYMBOL(clk_round_rate);
EXPORT_SYMBOL(clk_set_rate);
@@ -268,100 +254,101 @@ struct clk s3c24xx_uclk = {
/* clock definitions */
static struct clk init_clocks[] = {
- { .name = "nand",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_NAND
- },
- { .name = "lcd",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_LCDC
- },
- { .name = "usb-host",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_USBH
- },
- { .name = "usb-device",
- .id = -1,
- .parent = &clk_h,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_USBD
- },
- { .name = "timers",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_PWMT
- },
- { .name = "sdi",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_SDI
- },
- { .name = "uart",
- .id = 0,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART0
- },
- { .name = "uart",
- .id = 1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART1
- },
- { .name = "uart",
- .id = 2,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART2
- },
- { .name = "gpio",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_GPIO
- },
- { .name = "rtc",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_RTC
- },
- { .name = "adc",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_ADC
- },
- { .name = "i2c",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_IIC
- },
- { .name = "iis",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_IIS
- },
- { .name = "spi",
- .id = -1,
- .parent = &clk_p,
- .enable = s3c24xx_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_SPI
- },
- { .name = "watchdog",
- .id = -1,
- .parent = &clk_p,
- .ctrlbit = 0
+ {
+ .name = "nand",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_NAND,
+ }, {
+ .name = "lcd",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_LCDC,
+ }, {
+ .name = "usb-host",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_USBH,
+ }, {
+ .name = "usb-device",
+ .id = -1,
+ .parent = &clk_h,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_USBD,
+ }, {
+ .name = "timers",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_PWMT,
+ }, {
+ .name = "sdi",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_SDI,
+ }, {
+ .name = "uart",
+ .id = 0,
+ .parent = &clk_p,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART0,
+ }, {
+ .name = "uart",
+ .id = 1,
+ .parent = &clk_p,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART1,
+ }, {
+ .name = "uart",
+ .id = 2,
+ .parent = &clk_p,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART2,
+ }, {
+ .name = "gpio",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_GPIO,
+ }, {
+ .name = "rtc",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_RTC,
+ }, {
+ .name = "adc",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_ADC,
+ }, {
+ .name = "i2c",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_IIC,
+ }, {
+ .name = "iis",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_IIS,
+ }, {
+ .name = "spi",
+ .id = -1,
+ .parent = &clk_p,
+ .enable = s3c24xx_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_SPI,
+ }, {
+ .name = "watchdog",
+ .id = -1,
+ .parent = &clk_p,
+ .ctrlbit = 0,
}
};
@@ -370,16 +357,15 @@ static struct clk init_clocks[] = {
int s3c24xx_register_clock(struct clk *clk)
{
clk->owner = THIS_MODULE;
- atomic_set(&clk->used, 0);
if (clk->enable == NULL)
clk->enable = clk_null_enable;
/* add to the list of available clocks */
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_add(&clk->list, &clocks);
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return 0;
}
@@ -406,16 +392,15 @@ int __init s3c24xx_setup_clocks(unsigned long xtal,
clk_p.rate = pclk;
clk_f.rate = fclk;
- /* it looks like just setting the register here is not good
- * enough, and causes the odd hang at initial boot time, so
- * do all of them indivdually.
- *
- * I think disabling the LCD clock if the LCD is active is
- * very dangerous, and therefore the bootloader should be
- * careful to not enable the LCD clock if it is not needed.
+ /* We must be careful disabling the clocks we are not intending to
+ * be using at boot time, as subsytems such as the LCD which do
+ * their own DMA requests to the bus can cause the system to lockup
+ * if they where in the middle of requesting bus access.
*
- * and of course, this looks neater
- */
+ * Disabling the LCD clock if the LCD is active is very dangerous,
+ * and therefore the bootloader should be careful to not enable
+ * the LCD clock if it is not needed.
+ */
s3c24xx_clk_enable(S3C2410_CLKCON_NAND, 0);
s3c24xx_clk_enable(S3C2410_CLKCON_USBH, 0);
diff --git a/arch/arm/mach-s3c2410/clock.h b/arch/arm/mach-s3c2410/clock.h
index 7953b6f397b..177d5c8decf 100644
--- a/arch/arm/mach-s3c2410/clock.h
+++ b/arch/arm/mach-s3c2410/clock.h
@@ -16,7 +16,6 @@ struct clk {
struct clk *parent;
const char *name;
int id;
- atomic_t used;
unsigned long rate;
unsigned long ctrlbit;
int (*enable)(struct clk *, int enable);
diff --git a/arch/arm/mach-s3c2410/s3c2440-clock.c b/arch/arm/mach-s3c2410/s3c2440-clock.c
index c67e0979aec..b557a2be8a0 100644
--- a/arch/arm/mach-s3c2410/s3c2440-clock.c
+++ b/arch/arm/mach-s3c2410/s3c2440-clock.c
@@ -29,16 +29,15 @@
#include <linux/err.h>
#include <linux/device.h>
#include <linux/sysdev.h>
-
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/clk.h>
#include <asm/hardware.h>
#include <asm/atomic.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/regs-clock.h>
#include "clock.h"
diff --git a/arch/arm/mach-s3c2410/s3c2440.c b/arch/arm/mach-s3c2410/s3c2440.c
index 4d63e7133b4..b7fe6d9453f 100644
--- a/arch/arm/mach-s3c2410/s3c2440.c
+++ b/arch/arm/mach-s3c2410/s3c2440.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/sysdev.h>
+#include <linux/clk.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -36,7 +37,6 @@
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/regs-clock.h>
#include <asm/arch/regs-serial.h>
diff --git a/arch/arm/mach-s3c2410/time.c b/arch/arm/mach-s3c2410/time.c
index 8a00e3c3cd0..10a2976aefd 100644
--- a/arch/arm/mach-s3c2410/time.c
+++ b/arch/arm/mach-s3c2410/time.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/err.h>
+#include <linux/clk.h>
#include <asm/system.h>
#include <asm/leds.h>
@@ -35,7 +36,6 @@
#include <asm/arch/regs-timer.h>
#include <asm/arch/regs-irq.h>
#include <asm/mach/time.h>
-#include <asm/hardware/clock.h>
#include "clock.h"
#include "cpu.h"
@@ -191,7 +191,6 @@ static void s3c2410_timer_setup (void)
if (IS_ERR(clk))
panic("failed to get clock for system timer");
- clk_use(clk);
clk_enable(clk);
pclk = clk_get_rate(clk);
diff --git a/arch/arm/mach-s3c2410/usb-simtec.c b/arch/arm/mach-s3c2410/usb-simtec.c
index 5098b50158a..495f8c6ffcb 100644
--- a/arch/arm/mach-s3c2410/usb-simtec.c
+++ b/arch/arm/mach-s3c2410/usb-simtec.c
@@ -84,13 +84,13 @@ static void usb_simtec_enableoc(struct s3c2410_hcd_info *info, int on)
int ret;
if (on) {
- ret = request_irq(IRQ_USBOC, usb_simtec_ocirq, SA_INTERRUPT,
+ ret = request_irq(IRQ_USBOC, usb_simtec_ocirq,
+ SA_INTERRUPT | SA_TRIGGER_RISING |
+ SA_TRIGGER_FALLING,
"USB Over-current", info);
if (ret != 0) {
printk(KERN_ERR "failed to request usb oc irq\n");
}
-
- set_irq_type(IRQ_USBOC, IRQT_BOTHEDGE);
} else {
free_irq(IRQ_USBOC, info);
}
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 59c7964cfe1..786c8534231 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -135,29 +135,11 @@ unsigned long sleep_phys_sp(void *sp)
}
/*
- * Called after processes are frozen, but before we shut down devices.
- */
-static int sa11x0_pm_prepare(suspend_state_t state)
-{
- return 0;
-}
-
-/*
- * Called after devices are re-setup, but before processes are thawed.
- */
-static int sa11x0_pm_finish(suspend_state_t state)
-{
- return 0;
-}
-
-/*
* Set to PM_DISK_FIRMWARE so we can quickly veto suspend-to-disk.
*/
static struct pm_ops sa11x0_pm_ops = {
.pm_disk_mode = PM_DISK_FIRMWARE,
- .prepare = sa11x0_pm_prepare,
.enter = sa11x0_pm_enter,
- .finish = sa11x0_pm_finish,
};
static int __init sa11x0_pm_init(void)
diff --git a/arch/arm/mach-versatile/clock.c b/arch/arm/mach-versatile/clock.c
index b96a2ea15d4..9858c96560e 100644
--- a/arch/arm/mach-versatile/clock.c
+++ b/arch/arm/mach-versatile/clock.c
@@ -14,28 +14,29 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/string.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
#include <asm/semaphore.h>
-#include <asm/hardware/clock.h>
#include <asm/hardware/icst307.h>
#include "clock.h"
static LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
struct clk *clk_get(struct device *dev, const char *id)
{
struct clk *p, *clk = ERR_PTR(-ENOENT);
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {
if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
clk = p;
break;
}
}
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return clk;
}
@@ -58,17 +59,6 @@ void clk_disable(struct clk *clk)
}
EXPORT_SYMBOL(clk_disable);
-int clk_use(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_use);
-
-void clk_unuse(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_unuse);
-
unsigned long clk_get_rate(struct clk *clk)
{
return clk->rate;
@@ -121,18 +111,18 @@ static struct clk mmci_clk = {
int clk_register(struct clk *clk)
{
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_add(&clk->node, &clocks);
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return 0;
}
EXPORT_SYMBOL(clk_register);
void clk_unregister(struct clk *clk)
{
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_del(&clk->node);
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
}
EXPORT_SYMBOL(clk_unregister);
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index a1ca46630dd..90023745b23 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -25,14 +25,14 @@
#include <linux/platform_device.h>
#include <linux/sysdev.h>
#include <linux/interrupt.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/clcd.h>
#include <asm/system.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/leds.h>
-#include <asm/hardware/amba.h>
-#include <asm/hardware/amba_clcd.h>
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/icst307.h>
diff --git a/arch/arm/mach-versatile/core.h b/arch/arm/mach-versatile/core.h
index 588c20669d5..afcaa858eb1 100644
--- a/arch/arm/mach-versatile/core.h
+++ b/arch/arm/mach-versatile/core.h
@@ -22,7 +22,7 @@
#ifndef __ASM_ARCH_VERSATILE_H
#define __ASM_ARCH_VERSATILE_H
-#include <asm/hardware/amba.h>
+#include <linux/amba/bus.h>
extern void __init versatile_init(void);
extern void __init versatile_init_irq(void);
diff --git a/arch/arm/mach-versatile/versatile_ab.c b/arch/arm/mach-versatile/versatile_ab.c
index 8b0b3bef24a..e74c8a2fbb9 100644
--- a/arch/arm/mach-versatile/versatile_ab.c
+++ b/arch/arm/mach-versatile/versatile_ab.c
@@ -23,12 +23,12 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/sysdev.h>
+#include <linux/amba/bus.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
-#include <asm/hardware/amba.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-versatile/versatile_pb.c b/arch/arm/mach-versatile/versatile_pb.c
index 7c3078c3891..22d5ca07f75 100644
--- a/arch/arm/mach-versatile/versatile_pb.c
+++ b/arch/arm/mach-versatile/versatile_pb.c
@@ -23,12 +23,12 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/sysdev.h>
+#include <linux/amba/bus.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
-#include <asm/hardware/amba.h>
#include <asm/mach/arch.h>
#include <asm/mach/mmc.h>
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e84fdde6edf..3b79d0e2345 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -62,8 +62,8 @@ config CPU_ARM720T
# ARM920T
config CPU_ARM920T
bool "Support ARM920T processor" if !ARCH_S3C2410
- depends on ARCH_INTEGRATOR || ARCH_S3C2410 || ARCH_IMX || ARCH_AAEC2000
- default y if ARCH_S3C2410
+ depends on ARCH_INTEGRATOR || ARCH_S3C2410 || ARCH_IMX || ARCH_AAEC2000 || ARCH_AT91RM9200
+ default y if ARCH_S3C2410 || ARCH_AT91RM9200
select CPU_32v4
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
@@ -83,8 +83,8 @@ config CPU_ARM920T
# ARM922T
config CPU_ARM922T
bool "Support ARM922T processor" if ARCH_INTEGRATOR
- depends on ARCH_CAMELOT || ARCH_LH7A40X || ARCH_INTEGRATOR
- default y if ARCH_CAMELOT || ARCH_LH7A40X
+ depends on ARCH_LH7A40X || ARCH_INTEGRATOR
+ default y if ARCH_LH7A40X
select CPU_32v4
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index dbfe9e891f0..c2ee18d2075 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -19,17 +19,26 @@
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
-#include <asm/io.h>
#include <asm/tlbflush.h>
+#include <asm/sizes.h>
+
+/* Sanity check size */
+#if (CONSISTENT_DMA_SIZE % SZ_2M)
+#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
+#endif
-#define CONSISTENT_BASE (0xffc00000)
#define CONSISTENT_END (0xffe00000)
+#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
+
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
+#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
+#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
+
/*
- * This is the page table (2MB) covering uncached, DMA consistent allocations
+ * These are the page tables (2MB each) covering uncached, DMA consistent allocations
*/
-static pte_t *consistent_pte;
+static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
static DEFINE_SPINLOCK(consistent_lock);
/*
@@ -143,7 +152,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
unsigned long order;
u64 mask = ISA_DMA_THRESHOLD, limit;
- if (!consistent_pte) {
+ if (!consistent_pte[0]) {
printk(KERN_ERR "%s: not initialised\n", __func__);
dump_stack();
return NULL;
@@ -206,9 +215,12 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
c = vm_region_alloc(&consistent_head, size,
gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
if (c) {
- pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+ pte_t *pte;
struct page *end = page + (1 << order);
+ int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+ u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+ pte = consistent_pte[idx] + off;
c->vm_pages = page;
/*
@@ -227,6 +239,11 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
set_pte(pte, mk_pte(page, prot));
page++;
pte++;
+ off++;
+ if (off >= PTRS_PER_PTE) {
+ off = 0;
+ pte = consistent_pte[++idx];
+ }
} while (size -= PAGE_SIZE);
/*
@@ -328,6 +345,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
struct vm_region *c;
unsigned long flags, addr;
pte_t *ptep;
+ int idx;
+ u32 off;
WARN_ON(irqs_disabled());
@@ -348,7 +367,9 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
size = c->vm_end - c->vm_start;
}
- ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+ idx = CONSISTENT_PTE_INDEX(c->vm_start);
+ off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+ ptep = consistent_pte[idx] + off;
addr = c->vm_start;
do {
pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
@@ -356,6 +377,11 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
ptep++;
addr += PAGE_SIZE;
+ off++;
+ if (off >= PTRS_PER_PTE) {
+ off = 0;
+ ptep = consistent_pte[++idx];
+ }
if (!pte_none(pte) && pte_present(pte)) {
pfn = pte_pfn(pte);
@@ -402,11 +428,12 @@ static int __init consistent_init(void)
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
- int ret = 0;
+ int ret = 0, i = 0;
+ u32 base = CONSISTENT_BASE;
do {
- pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
- pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
+ pgd = pgd_offset(&init_mm, base);
+ pmd = pmd_alloc(&init_mm, pgd, base);
if (!pmd) {
printk(KERN_ERR "%s: no pmd tables\n", __func__);
ret = -ENOMEM;
@@ -414,15 +441,16 @@ static int __init consistent_init(void)
}
WARN_ON(!pmd_none(*pmd));
- pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
+ pte = pte_alloc_kernel(pmd, base);
if (!pte) {
printk(KERN_ERR "%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
- consistent_pte = pte;
- } while (0);
+ consistent_pte[i++] = pte;
+ base += (1 << PGDIR_SHIFT);
+ } while (base < CONSISTENT_END);
return ret;
}
diff --git a/arch/arm/mm/discontig.c b/arch/arm/mm/discontig.c
index 0d097bb1bc4..1e560218950 100644
--- a/arch/arm/mm/discontig.c
+++ b/arch/arm/mm/discontig.c
@@ -9,10 +9,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/init.h>
+#include <linux/mmzone.h>
#include <linux/bootmem.h>
#if MAX_NUMNODES != 4 && MAX_NUMNODES != 16
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 10901398e4a..de3ce1eec2e 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -86,11 +86,12 @@ remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
}
static int
-remap_area_pages(unsigned long start, unsigned long phys_addr,
+remap_area_pages(unsigned long start, unsigned long pfn,
unsigned long size, unsigned long flags)
{
unsigned long address = start;
unsigned long end = start + size;
+ unsigned long phys_addr = __pfn_to_phys(pfn);
int err = 0;
pgd_t * dir;
@@ -130,36 +131,44 @@ remap_area_pages(unsigned long start, unsigned long phys_addr,
* mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
*/
void __iomem *
+__ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
+ unsigned long flags)
+{
+ unsigned long addr;
+ struct vm_struct * area;
+
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ addr = (unsigned long)area->addr;
+ if (remap_area_pages(addr, pfn, size, flags)) {
+ vfree(addr);
+ return NULL;
+ }
+ return (void __iomem *) (offset + (char *)addr);
+}
+EXPORT_SYMBOL(__ioremap_pfn);
+
+void __iomem *
__ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
{
- void * addr;
- struct vm_struct * area;
- unsigned long offset, last_addr;
+ unsigned long last_addr;
+ unsigned long offset = phys_addr & ~PAGE_MASK;
+ unsigned long pfn = __phys_to_pfn(phys_addr);
- /* Don't allow wraparound or zero size */
+ /*
+ * Don't allow wraparound or zero size
+ */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/*
- * Mappings have to be page-aligned
+ * Page align the mapping size
*/
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
- addr = area->addr;
- if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
- vfree(addr);
- return NULL;
- }
- return (void __iomem *) (offset + (char *)addr);
+ return __ioremap_pfn(pfn, offset, size, flags);
}
EXPORT_SYMBOL(__ioremap);
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 9e50127be63..d0245a31d4d 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -19,7 +19,6 @@
#include <asm/pgalloc.h>
#include <asm/page.h>
-#include <asm/io.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 7ce39b986e2..7ebc5a29db8 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -19,15 +19,16 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/string.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/semaphore.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/clock.h>
LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
DEFINE_SPINLOCK(clockfw_lock);
static struct clk_functions *arch_clock;
@@ -40,14 +41,14 @@ struct clk * clk_get(struct device *dev, const char *id)
{
struct clk *p, *clk = ERR_PTR(-ENOENT);
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {
if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
clk = p;
break;
}
}
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return clk;
}
@@ -249,11 +250,11 @@ void propagate_rate(struct clk * tclk)
int clk_register(struct clk *clk)
{
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_add(&clk->node, &clocks);
if (clk->init)
clk->init(clk);
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
return 0;
}
@@ -261,9 +262,9 @@ EXPORT_SYMBOL(clk_register);
void clk_unregister(struct clk *clk)
{
- down(&clocks_sem);
+ mutex_lock(&clocks_mutex);
list_del(&clk->node);
- up(&clocks_sem);
+ mutex_unlock(&clocks_mutex);
}
EXPORT_SYMBOL(clk_unregister);
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index ccdb452630c..adffc5a859e 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -18,12 +18,12 @@
#include <linux/tty.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
+#include <linux/clk.h>
#include <asm/hardware.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
-#include <asm/hardware/clock.h>
#include <asm/io.h>
#include <asm/setup.h>
diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c
index fd894bb0010..98edc9fdd6d 100644
--- a/arch/arm/plat-omap/cpu-omap.c
+++ b/arch/arm/plat-omap/cpu-omap.c
@@ -19,13 +19,12 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/clk.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/system.h>
-#include <asm/hardware/clock.h>
-
/* TODO: Add support for SDRAM timing changes */
int omap_verify_speed(struct cpufreq_policy *policy)
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index f5cc21ad095..a4e5ac77f6d 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -64,7 +64,7 @@ static int dma_chan_count;
static spinlock_t dma_chan_lock;
static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT];
-const static u8 omap1_dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
+static const u8 omap1_dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 76f721d8513..ca3681a824a 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -19,9 +19,9 @@
#include <linux/ptrace.h>
#include <linux/sysdev.h>
#include <linux/err.h>
+#include <linux/clk.h>
#include <asm/hardware.h>
-#include <asm/hardware/clock.h>
#include <asm/irq.h>
#include <asm/arch/irqs.h>
#include <asm/arch/gpio.h>
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index ea9475c8665..be0e0f32a59 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -19,6 +19,7 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/err.h>
+#include <linux/clk.h>
#include <asm/delay.h>
#include <asm/io.h>
@@ -30,8 +31,6 @@
#include <asm/arch/dsp_common.h>
#include <asm/arch/mcbsp.h>
-#include <asm/hardware/clock.h>
-
#ifdef CONFIG_MCBSP_DEBUG
#define DBG(x...) printk(x)
#else
diff --git a/arch/arm/plat-omap/ocpi.c b/arch/arm/plat-omap/ocpi.c
index b8614822748..e40fcc8b43d 100644
--- a/arch/arm/plat-omap/ocpi.c
+++ b/arch/arm/plat-omap/ocpi.c
@@ -31,9 +31,9 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/err.h>
+#include <linux/clk.h>
#include <asm/io.h>
-#include <asm/hardware/clock.h>
#include <asm/hardware.h>
#define OCPI_BASE 0xfffec320
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 465487470d0..d0f9bb5e902 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Fri Nov 25 14:43:04 2005
+# Last update: Mon Jan 9 12:56:42 2006
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -910,3 +910,31 @@ mbus MACH_MBUS MBUS 896
nadia2vb MACH_NADIA2VB NADIA2VB 897
r1000 MACH_R1000 R1000 898
hw90250 MACH_HW90250 HW90250 899
+omap_2430sdp MACH_OMAP_2430SDP OMAP_2430SDP 900
+davinci_evm MACH_DAVINCI_EVM DAVINCI_EVM 901
+omap_tornado MACH_OMAP_TORNADO OMAP_TORNADO 902
+olocreek MACH_OLOCREEK OLOCREEK 903
+palmz72 MACH_PALMZ72 PALMZ72 904
+nxdb500 MACH_NXDB500 NXDB500 905
+apf9328 MACH_APF9328 APF9328 906
+omap_wipoq MACH_OMAP_WIPOQ OMAP_WIPOQ 907
+omap_twip MACH_OMAP_TWIP OMAP_TWIP 908
+xscale_treo650 MACH_XSCALE_PALMTREO650 XSCALE_PALMTREO650 909
+acumen MACH_ACUMEN ACUMEN 910
+xp100 MACH_XP100 XP100 911
+fs2410 MACH_FS2410 FS2410 912
+pxa270_cerf MACH_PXA270_CERF PXA270_CERF 913
+sq2ftlpalm MACH_SQ2FTLPALM SQ2FTLPALM 914
+bsemserver MACH_BSEMSERVER BSEMSERVER 915
+netclient MACH_NETCLIENT NETCLIENT 916
+xscale_palmtt5 MACH_XSCALE_PALMTT5 XSCALE_PALMTT5 917
+xscale_palmtc MACH_OMAP_PALMTC OMAP_PALMTC 918
+omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919
+argonlvevb MACH_ARGONLVEVB ARGONLVEVB 920
+rea_2d MACH_REA_2D REA_2D 921
+eti3e524 MACH_TI3E524 TI3E524 922
+ateb9200 MACH_ATEB9200 ATEB9200 923
+auckland MACH_AUCKLAND AUCKLAND 924
+ak3220m MACH_AK3320M AK3320M 925
+duramax MACH_DURAMAX DURAMAX 926
+n35 MACH_N35 N35 927
diff --git a/arch/arm26/Kconfig b/arch/arm26/Kconfig
index 1f00b3d03a0..274e07019b4 100644
--- a/arch/arm26/Kconfig
+++ b/arch/arm26/Kconfig
@@ -34,10 +34,6 @@ config FORCE_MAX_ZONEORDER
int
default 9
-config UID16
- bool
- default y
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
diff --git a/arch/arm26/kernel/armksyms.c b/arch/arm26/kernel/armksyms.c
index 35514b398e2..811a6376c62 100644
--- a/arch/arm26/kernel/armksyms.c
+++ b/arch/arm26/kernel/armksyms.c
@@ -35,7 +35,6 @@
#include <asm/checksum.h>
#include <asm/mach-types.h>
-extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu(struct pt_regs *, struct user_fp_struct *);
extern void inswb(unsigned int port, void *to, int len);
extern void outswb(unsigned int port, const void *to, int len);
diff --git a/arch/arm26/kernel/asm-offsets.c b/arch/arm26/kernel/asm-offsets.c
index 4ccacaef94d..ac682d5fd03 100644
--- a/arch/arm26/kernel/asm-offsets.c
+++ b/arch/arm26/kernel/asm-offsets.c
@@ -25,13 +25,6 @@
#if defined(__APCS_32__) && defined(CONFIG_CPU_26)
#error Sorry, your compiler targets APCS-32 but this kernel requires APCS-26
#endif
-#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 95)
-#error Sorry, your compiler is known to miscompile kernels. Only use gcc 2.95.3 and later.
-#endif
-#if __GNUC__ == 2 && __GNUC_MINOR__ == 95
-/* shame we can't detect the .1 or .2 releases */
-#warning GCC 2.95.2 and earlier miscompiles kernels.
-#endif
/* Use marker if you need to separate the values later */
diff --git a/arch/arm26/kernel/process.c b/arch/arm26/kernel/process.c
index 15833a0057d..38630565917 100644
--- a/arch/arm26/kernel/process.c
+++ b/arch/arm26/kernel/process.c
@@ -277,10 +277,9 @@ int
copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
unsigned long unused, struct task_struct *p, struct pt_regs *regs)
{
- struct thread_info *thread = p->thread_info;
- struct pt_regs *childregs;
+ struct thread_info *thread = task_thread_info(p);
+ struct pt_regs *childregs = task_pt_regs(p);
- childregs = __get_user_regs(thread);
*childregs = *regs;
childregs->ARM_r0 = 0;
childregs->ARM_sp = stack_start;
diff --git a/arch/arm26/kernel/ptrace.c b/arch/arm26/kernel/ptrace.c
index 4e6b7356a72..3c3371d4683 100644
--- a/arch/arm26/kernel/ptrace.c
+++ b/arch/arm26/kernel/ptrace.c
@@ -40,21 +40,6 @@
#define BREAKINST_ARM 0xef9f0001
/*
- * Get the address of the live pt_regs for the specified task.
- * These are saved onto the top kernel stack when the process
- * is not running.
- *
- * Note: if a user thread is execve'd from kernel space, the
- * kernel stack will not be empty on entry to the kernel, so
- * ptracing these tasks will fail.
- */
-static inline struct pt_regs *
-get_user_regs(struct task_struct *task)
-{
- return __get_user_regs(task->thread_info);
-}
-
-/*
* this routine will get a word off of the processes privileged stack.
* the offset is how far from the base addr as stored in the THREAD.
* this routine assumes that all the privileged stacks are in our
@@ -62,7 +47,7 @@ get_user_regs(struct task_struct *task)
*/
static inline long get_user_reg(struct task_struct *task, int offset)
{
- return get_user_regs(task)->uregs[offset];
+ return task_pt_regs(task)->uregs[offset];
}
/*
@@ -74,7 +59,7 @@ static inline long get_user_reg(struct task_struct *task, int offset)
static inline int
put_user_reg(struct task_struct *task, int offset, long data)
{
- struct pt_regs newregs, *regs = get_user_regs(task);
+ struct pt_regs newregs, *regs = task_pt_regs(task);
int ret = -EINVAL;
newregs = *regs;
@@ -377,7 +362,7 @@ void ptrace_set_bpt(struct task_struct *child)
u32 insn;
int res;
- regs = get_user_regs(child);
+ regs = task_pt_regs(child);
pc = instruction_pointer(regs);
res = read_instr(child, pc, &insn);
@@ -500,7 +485,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
*/
static int ptrace_getregs(struct task_struct *tsk, void *uregs)
{
- struct pt_regs *regs = get_user_regs(tsk);
+ struct pt_regs *regs = task_pt_regs(tsk);
return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
}
@@ -515,7 +500,7 @@ static int ptrace_setregs(struct task_struct *tsk, void *uregs)
ret = -EFAULT;
if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
- struct pt_regs *regs = get_user_regs(tsk);
+ struct pt_regs *regs = task_pt_regs(tsk);
ret = -EINVAL;
if (valid_user_regs(&newregs)) {
@@ -532,7 +517,7 @@ static int ptrace_setregs(struct task_struct *tsk, void *uregs)
*/
static int ptrace_getfpregs(struct task_struct *tsk, void *ufp)
{
- return copy_to_user(ufp, &tsk->thread_info->fpstate,
+ return copy_to_user(ufp, &task_thread_info(tsk)->fpstate,
sizeof(struct user_fp)) ? -EFAULT : 0;
}
@@ -542,7 +527,7 @@ static int ptrace_getfpregs(struct task_struct *tsk, void *ufp)
static int ptrace_setfpregs(struct task_struct *tsk, void *ufp)
{
set_stopped_child_used_math(tsk);
- return copy_from_user(&tsk->thread_info->fpstate, ufp,
+ return copy_from_user(&task_threas_info(tsk)->fpstate, ufp,
sizeof(struct user_fp)) ? -EFAULT : 0;
}
diff --git a/arch/arm26/kernel/traps.c b/arch/arm26/kernel/traps.c
index f64f5902239..5847ea5d774 100644
--- a/arch/arm26/kernel/traps.c
+++ b/arch/arm26/kernel/traps.c
@@ -132,7 +132,7 @@ static void dump_instr(struct pt_regs *regs)
/*static*/ void __dump_stack(struct task_struct *tsk, unsigned long sp)
{
- dump_mem("Stack: ", sp, 8192+(unsigned long)tsk->thread_info);
+ dump_mem("Stack: ", sp, 8192+(unsigned long)task_stack_page(tsk));
}
void dump_stack(void)
@@ -158,7 +158,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
} else if (verify_stack(fp)) {
printk("invalid frame pointer 0x%08x", fp);
ok = 0;
- } else if (fp < (unsigned long)(tsk->thread_info + 1))
+ } else if (fp < (unsigned long)end_of_stack(tsk))
printk("frame pointer underflow");
printk("\n");
@@ -168,7 +168,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
/* FIXME - this is probably wrong.. */
void show_stack(struct task_struct *task, unsigned long *sp) {
- dump_mem("Stack: ", (unsigned long)sp, 8192+(unsigned long)task->thread_info);
+ dump_mem("Stack: ", (unsigned long)sp, 8192+(unsigned long)task_stack_page(task));
}
DEFINE_SPINLOCK(die_lock);
@@ -187,7 +187,7 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
printk("CPU: %d\n", smp_processor_id());
show_regs(regs);
printk("Process %s (pid: %d, stack limit = 0x%p)\n",
- current->comm, current->pid, tsk->thread_info + 1);
+ current->comm, current->pid, end_of_stack(tsk));
if (!user_mode(regs) || in_interrupt()) {
__dump_stack(tsk, (unsigned long)(regs + 1));
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index e5979d68e35..b8326194973 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -9,10 +9,6 @@ config MMU
bool
default y
-config UID16
- bool
- default y
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c
index 10795f67f68..b100f26497c 100644
--- a/arch/cris/arch-v10/drivers/ds1302.c
+++ b/arch/cris/arch-v10/drivers/ds1302.c
@@ -148,6 +148,7 @@
#include <linux/miscdevice.h>
#include <linux/delay.h>
#include <linux/bcd.h>
+#include <linux/capability.h>
#include <asm/uaccess.h>
#include <asm/system.h>
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c
index f2c55742e90..af517c21038 100644
--- a/arch/cris/arch-v10/drivers/pcf8563.c
+++ b/arch/cris/arch-v10/drivers/pcf8563.c
@@ -28,6 +28,7 @@
#include <linux/ioctl.h>
#include <linux/delay.h>
#include <linux/bcd.h>
+#include <linux/capability.h>
#include <asm/uaccess.h>
#include <asm/system.h>
diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c
index b72e6a91a63..34528da9881 100644
--- a/arch/cris/arch-v10/kernel/kgdb.c
+++ b/arch/cris/arch-v10/kernel/kgdb.c
@@ -569,12 +569,6 @@ gdb_cris_strtol (const char *s, char **endptr, int base)
return x;
}
-int
-double_this(int x)
-{
- return 2 * x;
-}
-
/********************************* Register image ****************************/
/* Copy the content of a register image into another. The size n is
the size of the register image. Due to struct assignment generation of
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
index 69e28b4057e..0a675ce9e09 100644
--- a/arch/cris/arch-v10/kernel/process.c
+++ b/arch/cris/arch-v10/kernel/process.c
@@ -79,7 +79,7 @@ void hard_reset_now (void)
*/
unsigned long thread_saved_pc(struct task_struct *t)
{
- return (unsigned long)user_regs(t->thread_info)->irp;
+ return task_pt_regs(t)->irp;
}
static void kernel_thread_helper(void* dummy, int (*fn)(void *), void * arg)
@@ -128,7 +128,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
* remember that the task_struct doubles as the kernel stack for the task
*/
- childregs = user_regs(p->thread_info);
+ childregs = task_pt_regs(p);
*childregs = *regs; /* struct copy of pt_regs */
diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c
index 6cbd34a27b9..f214f74f264 100644
--- a/arch/cris/arch-v10/kernel/ptrace.c
+++ b/arch/cris/arch-v10/kernel/ptrace.c
@@ -37,7 +37,7 @@ inline long get_reg(struct task_struct *task, unsigned int regno)
if (regno == PT_USP)
return task->thread.usp;
else if (regno < PT_MAX)
- return ((unsigned long *)user_regs(task->thread_info))[regno];
+ return ((unsigned long *)task_pt_regs(task))[regno];
else
return 0;
}
@@ -51,7 +51,7 @@ inline int put_reg(struct task_struct *task, unsigned int regno,
if (regno == PT_USP)
task->thread.usp = data;
else if (regno < PT_MAX)
- ((unsigned long *)user_regs(task->thread_info))[regno] = data;
+ ((unsigned long *)task_pt_regs(task))[regno] = data;
else
return -1;
return 0;
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index 882be42114f..843513102d3 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -96,7 +96,7 @@ hard_reset_now(void)
*/
unsigned long thread_saved_pc(struct task_struct *t)
{
- return (unsigned long)user_regs(t->thread_info)->erp;
+ return task_pt_regs(t)->erp;
}
static void
@@ -148,7 +148,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
* fix it up. Note: the task_struct doubles as the kernel stack for the
* task.
*/
- childregs = user_regs(p->thread_info);
+ childregs = task_pt_regs(p);
*childregs = *regs; /* Struct copy of pt_regs. */
p->set_child_tid = p->clear_child_tid = NULL;
childregs->r10 = 0; /* Child returns 0 after a fork/clone. */
@@ -157,7 +157,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
* The TLS is in $mof beacuse it is the 5th argument to sys_clone.
*/
if (p->mm && (clone_flags & CLONE_SETTLS)) {
- p->thread_info->tls = regs->mof;
+ task_thread_info(p)->tls = regs->mof;
}
/* Put the switch stack right below the pt_regs. */
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index 5528b83a622..82cf2e3624a 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -46,7 +46,7 @@ long get_reg(struct task_struct *task, unsigned int regno)
unsigned long ret;
if (regno <= PT_EDA)
- ret = ((unsigned long *)user_regs(task->thread_info))[regno];
+ ret = ((unsigned long *)task_pt_regs(task))[regno];
else if (regno == PT_USP)
ret = task->thread.usp;
else if (regno == PT_PPC)
@@ -65,13 +65,13 @@ long get_reg(struct task_struct *task, unsigned int regno)
int put_reg(struct task_struct *task, unsigned int regno, unsigned long data)
{
if (regno <= PT_EDA)
- ((unsigned long *)user_regs(task->thread_info))[regno] = data;
+ ((unsigned long *)task_pt_regs(task))[regno] = data;
else if (regno == PT_USP)
task->thread.usp = data;
else if (regno == PT_PPC) {
/* Write pseudo-PC to ERP only if changed. */
if (data != get_pseudo_pc(task))
- ((unsigned long *)user_regs(task->thread_info))[PT_ERP] = data;
+ task_pt_regs(task)->erp = data;
} else if (regno <= PT_MAX)
return put_debugreg(task->pid, regno, data);
else
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index 13867f4fad1..da40d19a151 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -113,10 +113,10 @@ smp_boot_one_cpu(int cpuid)
if (IS_ERR(idle))
panic("SMP: fork failed for CPU:%d", cpuid);
- idle->thread_info->cpu = cpuid;
+ task_thread_info(idle)->cpu = cpuid;
/* Information to the CPU that is about to boot */
- smp_init_current_idle_thread = idle->thread_info;
+ smp_init_current_idle_thread = task_thread_info(idle);
cpu_now_booting = cpuid;
/* Wait for CPU to come online */
diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c
index b08a28bb58a..9d75d769230 100644
--- a/arch/cris/arch-v32/mm/tlb.c
+++ b/arch/cris/arch-v32/mm/tlb.c
@@ -198,9 +198,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
per_cpu(current_pgd, cpu) = next->pgd;
/* Switch context in the MMU. */
- if (tsk && tsk->thread_info)
+ if (tsk && task_thread_info(tsk))
{
- SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | tsk->thread_info->tls);
+ SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | task_thread_info(tsk)->tls);
}
else
{
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c
index 85833d704eb..de39725da92 100644
--- a/arch/cris/kernel/crisksyms.c
+++ b/arch/cris/kernel/crisksyms.c
@@ -21,7 +21,6 @@
#include <asm/pgtable.h>
#include <asm/fasttimer.h>
-extern void dump_thread(struct pt_regs *, struct user *);
extern unsigned long get_cmos_time(void);
extern void __Udiv(void);
extern void __Umod(void);
@@ -33,7 +32,6 @@ extern void __lshrdi3(void);
extern void iounmap(volatile void * __iomem);
/* Platform dependent support */
-EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(get_cmos_time);
EXPORT_SYMBOL(loops_per_usec);
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 7c80afb1046..4ab3e87115b 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -257,34 +257,6 @@ void flush_thread(void)
{
}
-/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
-{
-#if 0
- int i;
-
- /* changed the size calculations - should hopefully work better. lbt */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_ssize = 0;
- for (i = 0; i < 8; i++)
- dump->u_debugreg[i] = current->debugreg[i];
-
- if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
-
- dump->regs = *regs;
-
- dump->u_fpvalid = dump_fpu (regs, &dump->i387);
-#endif
-}
-
/* Fill in the fpu structure for a core dump. */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index ec85c0d6c6d..60a617aff8b 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -6,10 +6,6 @@ config FRV
bool
default y
-config UID16
- bool
- default y
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
@@ -274,6 +270,11 @@ config GPREL_DATA_NONE
endchoice
+config FRV_ONCPU_SERIAL
+ bool "Use on-CPU serial ports"
+ select SERIAL_8250
+ default y
+
config PCI
bool "Use PCI"
depends on MB93090_MB00
@@ -305,23 +306,7 @@ config RESERVE_DMA_COHERENT
source "drivers/pci/Kconfig"
-config PCMCIA
- tristate "Use PCMCIA"
- help
- Say Y here if you want to attach PCMCIA- or PC-cards to your FR-V
- board. These are credit-card size devices such as network cards,
- modems or hard drives often used with laptops computers. There are
- actually two varieties of these cards: the older 16 bit PCMCIA cards
- and the newer 32 bit CardBus cards. If you want to use CardBus
- cards, you need to say Y here and also to "CardBus support" below.
-
- To use your PC-cards, you will need supporting software from David
- Hinds pcmcia-cs package (see the file <file:Documentation/Changes>
- for location). Please also read the PCMCIA-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as modules, choose M here: the
- modules will be called pcmcia_core and ds.
+source "drivers/pcmcia/Kconfig"
#config MATH_EMULATION
# bool "Math emulation support (EXPERIMENTAL)"
diff --git a/arch/frv/Kconfig.debug b/arch/frv/Kconfig.debug
index 0034b654995..211f01bc4ca 100644
--- a/arch/frv/Kconfig.debug
+++ b/arch/frv/Kconfig.debug
@@ -2,32 +2,10 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-config EARLY_PRINTK
- bool "Early printk"
- depends on EMBEDDED && DEBUG_KERNEL
- default n
- help
- Write kernel log output directly into the VGA buffer or to a serial
- port.
-
- This is useful for kernel debugging when your machine crashes very
- early before the console code is initialized. For normal operation
- it is not recommended because it looks ugly and doesn't cooperate
- with klogd/syslogd or the X server. You should normally N here,
- unless you want to debug such a crash.
-
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL
-config DEBUG_PAGEALLOC
- bool "Page alloc debugging"
- depends on DEBUG_KERNEL
- help
- Unmap pages from the kernel linear mapping after free_pages().
- This results in a large slowdown, but helps to find certain types
- of memory corruptions.
-
config GDBSTUB
bool "Remote GDB kernel debugging"
depends on DEBUG_KERNEL
diff --git a/arch/frv/Makefile b/arch/frv/Makefile
index 54046d2386f..90c0fb8d9dc 100644
--- a/arch/frv/Makefile
+++ b/arch/frv/Makefile
@@ -109,10 +109,10 @@ bootstrap:
$(Q)$(MAKEBOOT) bootstrap
archmrproper:
- $(Q)$(MAKE) -C arch/frv/boot mrproper
+ $(Q)$(MAKE) $(build)=arch/frv/boot mrproper
archclean:
- $(Q)$(MAKE) -C arch/frv/boot clean
+ $(Q)$(MAKE) $(build)=arch/frv/boot clean
archdep: scripts/mkdep symlinks
- $(Q)$(MAKE) -C arch/frv/boot dep
+ $(Q)$(MAKE) $(build)=arch/frv/boot dep
diff --git a/arch/frv/boot/Makefile b/arch/frv/boot/Makefile
index d75e0d77136..5dfc93fd945 100644
--- a/arch/frv/boot/Makefile
+++ b/arch/frv/boot/Makefile
@@ -57,10 +57,10 @@ initrd:
# installation
#
install: $(CONFIGURE) Image
- sh ./install.sh $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) Image $(TOPDIR)/System.map "$(INSTALL_PATH)"
+ sh ./install.sh $(KERNELRELEASE) Image $(TOPDIR)/System.map "$(INSTALL_PATH)"
zinstall: $(CONFIGURE) zImage
- sh ./install.sh $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) zImage $(TOPDIR)/System.map "$(INSTALL_PATH)"
+ sh ./install.sh $(KERNELRELEASE) zImage $(TOPDIR)/System.map "$(INSTALL_PATH)"
#
# miscellany
diff --git a/arch/frv/kernel/Makefile b/arch/frv/kernel/Makefile
index 981c2c7dec0..5a827b349b5 100644
--- a/arch/frv/kernel/Makefile
+++ b/arch/frv/kernel/Makefile
@@ -20,3 +20,5 @@ obj-$(CONFIG_FUJITSU_MB93493) += irq-mb93493.o
obj-$(CONFIG_PM) += pm.o cmode.o
obj-$(CONFIG_MB93093_PDK) += pm-mb93093.o
obj-$(CONFIG_SYSCTL) += sysctl.o
+obj-$(CONFIG_FUTEX) += futex.o
+obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index ad10ea59545..5f6548388b7 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1076,7 +1076,7 @@ __entry_work_notifysig:
LEDS 0x6410
ori.p gr4,#0,gr8
call do_notify_resume
- bra __entry_return_direct
+ bra __entry_resume_userspace
# perform syscall entry tracing
__syscall_trace_entry:
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 1a76d524719..0f1c6cbc4f5 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -16,17 +16,16 @@
#include <asm/semaphore.h>
#include <asm/checksum.h>
#include <asm/hardirq.h>
-#include <asm/current.h>
+#include <asm/cacheflush.h>
-extern void dump_thread(struct pt_regs *, struct user *);
extern long __memcpy_user(void *dst, const void *src, size_t count);
+extern long __memset_user(void *dst, const void *src, size_t count);
/* platform dependent support */
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr);
@@ -50,7 +49,11 @@ EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(__res_bus_clock_speed_HZ);
EXPORT_SYMBOL(__page_offset);
EXPORT_SYMBOL(__memcpy_user);
-EXPORT_SYMBOL(flush_dcache_page);
+EXPORT_SYMBOL(__memset_user);
+EXPORT_SYMBOL(frv_dcache_writeback);
+EXPORT_SYMBOL(frv_cache_invalidate);
+EXPORT_SYMBOL(frv_icache_invalidate);
+EXPORT_SYMBOL(frv_cache_wback_inv);
#ifndef CONFIG_MMU
EXPORT_SYMBOL(memory_start);
@@ -72,6 +75,9 @@ EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__outsl_ns);
+EXPORT_SYMBOL(__insl_ns);
+
EXPORT_SYMBOL(get_wchan);
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
@@ -80,14 +86,13 @@ EXPORT_SYMBOL(atomic_test_and_OR_mask);
EXPORT_SYMBOL(atomic_test_and_XOR_mask);
EXPORT_SYMBOL(atomic_add_return);
EXPORT_SYMBOL(atomic_sub_return);
-EXPORT_SYMBOL(__xchg_8);
-EXPORT_SYMBOL(__xchg_16);
EXPORT_SYMBOL(__xchg_32);
-EXPORT_SYMBOL(__cmpxchg_8);
-EXPORT_SYMBOL(__cmpxchg_16);
EXPORT_SYMBOL(__cmpxchg_32);
#endif
+EXPORT_SYMBOL(__debug_bug_printk);
+EXPORT_SYMBOL(__delay_loops_MHz);
+
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
@@ -101,6 +106,8 @@ extern void __divdi3(void);
extern void __lshrdi3(void);
extern void __moddi3(void);
extern void __muldi3(void);
+extern void __mulll(void);
+extern void __umulll(void);
extern void __negdi2(void);
extern void __ucmpdi2(void);
extern void __udivdi3(void);
@@ -116,8 +123,10 @@ EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
//EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__mulll);
+EXPORT_SYMBOL(__umulll);
EXPORT_SYMBOL(__negdi2);
-//EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__ucmpdi2);
//EXPORT_SYMBOL(__udivdi3);
//EXPORT_SYMBOL(__udivmoddi4);
//EXPORT_SYMBOL(__umoddi3);
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
new file mode 100644
index 00000000000..eae874a970c
--- /dev/null
+++ b/arch/frv/kernel/futex.c
@@ -0,0 +1,242 @@
+/* futex.c: futex operations
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/futex.h>
+#include <asm/futex.h>
+#include <asm/errno.h>
+#include <asm/uaccess.h>
+
+/*
+ * the various futex operations; MMU fault checking is ignored under no-MMU
+ * conditions
+ */
+static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval)
+{
+ int oldval, ret;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ "2: cst.p %3,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ " setlos 0,%2 \n"
+ "3: \n"
+ ".subsection 2 \n"
+ "4: setlos %5,%2 \n"
+ " bra 3b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .balign 8 \n"
+ " .long 1b,4b \n"
+ " .long 2b,4b \n"
+ ".previous"
+ : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
+ : "3"(oparg), "i"(-EFAULT)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ *_oldval = oldval;
+ return ret;
+}
+
+static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval)
+{
+ int oldval, ret;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " add %1,%3,%3 \n"
+ "2: cst.p %3,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ " setlos 0,%2 \n"
+ "3: \n"
+ ".subsection 2 \n"
+ "4: setlos %5,%2 \n"
+ " bra 3b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .balign 8 \n"
+ " .long 1b,4b \n"
+ " .long 2b,4b \n"
+ ".previous"
+ : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
+ : "3"(oparg), "i"(-EFAULT)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ *_oldval = oldval;
+ return ret;
+}
+
+static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval)
+{
+ int oldval, ret;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " or %1,%3,%3 \n"
+ "2: cst.p %3,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ " setlos 0,%2 \n"
+ "3: \n"
+ ".subsection 2 \n"
+ "4: setlos %5,%2 \n"
+ " bra 3b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .balign 8 \n"
+ " .long 1b,4b \n"
+ " .long 2b,4b \n"
+ ".previous"
+ : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
+ : "3"(oparg), "i"(-EFAULT)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ *_oldval = oldval;
+ return ret;
+}
+
+static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval)
+{
+ int oldval, ret;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " and %1,%3,%3 \n"
+ "2: cst.p %3,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ " setlos 0,%2 \n"
+ "3: \n"
+ ".subsection 2 \n"
+ "4: setlos %5,%2 \n"
+ " bra 3b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .balign 8 \n"
+ " .long 1b,4b \n"
+ " .long 2b,4b \n"
+ ".previous"
+ : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
+ : "3"(oparg), "i"(-EFAULT)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ *_oldval = oldval;
+ return ret;
+}
+
+static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval)
+{
+ int oldval, ret;
+
+ asm("0: \n"
+ " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
+ " ckeq icc3,cc7 \n"
+ "1: ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
+ " orcr cc7,cc7,cc3 \n" /* set CC3 to true */
+ " xor %1,%3,%3 \n"
+ "2: cst.p %3,%M0 ,cc3,#1 \n"
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
+ " beq icc3,#0,0b \n"
+ " setlos 0,%2 \n"
+ "3: \n"
+ ".subsection 2 \n"
+ "4: setlos %5,%2 \n"
+ " bra 3b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ " .balign 8 \n"
+ " .long 1b,4b \n"
+ " .long 2b,4b \n"
+ ".previous"
+ : "+U"(*uaddr), "=&r"(oldval), "=&r"(ret), "=r"(oparg)
+ : "3"(oparg), "i"(-EFAULT)
+ : "memory", "cc7", "cc3", "icc3"
+ );
+
+ *_oldval = oldval;
+ return ret;
+}
+
+/*****************************************************************************/
+/*
+ * do the futex operations
+ */
+int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ inc_preempt_count();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ ret = atomic_futex_op_xchg_set(oparg, uaddr, &oldval);
+ break;
+ case FUTEX_OP_ADD:
+ ret = atomic_futex_op_xchg_add(oparg, uaddr, &oldval);
+ break;
+ case FUTEX_OP_OR:
+ ret = atomic_futex_op_xchg_or(oparg, uaddr, &oldval);
+ break;
+ case FUTEX_OP_ANDN:
+ ret = atomic_futex_op_xchg_and(~oparg, uaddr, &oldval);
+ break;
+ case FUTEX_OP_XOR:
+ ret = atomic_futex_op_xchg_xor(oparg, uaddr, &oldval);
+ break;
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+
+ dec_preempt_count();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS; break;
+ }
+ }
+
+ return ret;
+
+} /* end futex_atomic_op_inuser() */
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index 8c524cdd271..59580c59c62 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -32,6 +32,7 @@
#include <linux/irq.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/io.h>
@@ -178,6 +179,8 @@ void disable_irq_nosync(unsigned int irq)
spin_unlock_irqrestore(&level->lock, flags);
}
+EXPORT_SYMBOL(disable_irq_nosync);
+
/**
* disable_irq - disable an irq and wait for completion
* @irq: Interrupt to disable
@@ -204,6 +207,8 @@ void disable_irq(unsigned int irq)
#endif
}
+EXPORT_SYMBOL(disable_irq);
+
/**
* enable_irq - enable handling of an irq
* @irq: Interrupt to enable
@@ -268,6 +273,8 @@ void enable_irq(unsigned int irq)
spin_unlock_irqrestore(&level->lock, flags);
}
+EXPORT_SYMBOL(enable_irq);
+
/*****************************************************************************/
/*
* handles all normal device IRQ's
@@ -425,6 +432,8 @@ int request_irq(unsigned int irq,
return retval;
}
+EXPORT_SYMBOL(request_irq);
+
/**
* free_irq - free an interrupt
* @irq: Interrupt line to free
@@ -496,6 +505,8 @@ void free_irq(unsigned int irq, void *dev_id)
}
}
+EXPORT_SYMBOL(free_irq);
+
/*
* IRQ autodetection code..
*
@@ -519,6 +530,8 @@ unsigned long probe_irq_on(void)
return 0;
}
+EXPORT_SYMBOL(probe_irq_on);
+
/*
* Return a mask of triggered interrupts (this
* can handle only legacy ISA interrupts).
@@ -542,6 +555,8 @@ unsigned int probe_irq_mask(unsigned long xmask)
return 0;
}
+EXPORT_SYMBOL(probe_irq_mask);
+
/*
* Return the one interrupt that triggered (this can
* handle any interrupt source).
@@ -571,6 +586,8 @@ int probe_irq_off(unsigned long xmask)
return -1;
}
+EXPORT_SYMBOL(probe_irq_off);
+
/* this was setup_x86_irq but it seems pretty generic */
int setup_irq(unsigned int irq, struct irqaction *new)
{
diff --git a/arch/frv/kernel/module.c b/arch/frv/kernel/module.c
new file mode 100644
index 00000000000..850d168f69f
--- /dev/null
+++ b/arch/frv/kernel/module.c
@@ -0,0 +1,80 @@
+/* module.c: FRV specific module loading bits
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ * - Derived from arch/i386/kernel/module.c, Copyright (C) 2001 Rusty Russell.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+void *module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+
+ return vmalloc_exec(size);
+}
+
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ vfree(module_region);
+ /* FIXME: If module_region == mod->init_region, trim exception
+ table entries. */
+}
+
+/* We don't need anything special. */
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ char *secstrings,
+ struct module *mod)
+{
+ return 0;
+}
+
+int apply_relocate(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", me->name);
+ return -ENOEXEC;
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", me->name);
+ return -ENOEXEC;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/frv/kernel/pm.c b/arch/frv/kernel/pm.c
index 712c3c24c95..f0b8fff3e73 100644
--- a/arch/frv/kernel/pm.c
+++ b/arch/frv/kernel/pm.c
@@ -13,6 +13,7 @@
#include <linux/config.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/pm.h>
#include <linux/pm_legacy.h>
#include <linux/sched.h>
@@ -27,6 +28,7 @@
#include "local.h"
void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
extern void frv_change_cmode(int);
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 54a452136f0..0fff8a61ef2 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -204,7 +204,7 @@ int copy_thread(int nr, unsigned long clone_flags,
regs0 = __kernel_frame0_ptr;
childregs0 = (struct pt_regs *)
- ((unsigned long) p->thread_info + THREAD_SIZE - USER_CONTEXT_SIZE);
+ (task_stack_page(p) + THREAD_SIZE - USER_CONTEXT_SIZE);
childregs = childregs0;
/* set up the userspace frame (the only place that the USP is stored) */
@@ -220,7 +220,7 @@ int copy_thread(int nr, unsigned long clone_flags,
*childregs = *regs;
childregs->sp = (unsigned long) childregs0;
childregs->next_frame = childregs0;
- childregs->gr15 = (unsigned long) p->thread_info;
+ childregs->gr15 = (unsigned long) task_thread_info(p);
childregs->gr29 = (unsigned long) p;
}
@@ -244,28 +244,6 @@ int copy_thread(int nr, unsigned long clone_flags,
} /* end copy_thread() */
/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs *regs, struct user *dump)
-{
-#if 0
- /* changed the size calculations - should hopefully work better. lbt */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = user_stack(regs) & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_ssize = 0;
-
- if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
-
- dump->regs = *(struct user_context *) regs;
-#endif
-}
-
-/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(char *name, char **argv, char **envp)
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index 767ebb55bd8..5908deae960 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -787,6 +787,7 @@ void __init setup_arch(char **cmdline_p)
#endif
/* register those serial ports that are available */
+#ifdef CONFIG_FRV_ONCPU_SERIAL
#ifndef CONFIG_GDBSTUB_UART0
__reg(UART0_BASE + UART_IER * 8) = 0;
early_serial_setup(&__frv_uart0);
@@ -795,6 +796,7 @@ void __init setup_arch(char **cmdline_p)
__reg(UART1_BASE + UART_IER * 8) = 0;
early_serial_setup(&__frv_uart1);
#endif
+#endif
#if defined(CONFIG_CHR_DEV_FLASH) || defined(CONFIG_BLK_DEV_FLASH)
/* we need to initialize the Flashrom device here since we might
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index d4ccc0728df..5b7146f54fd 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -35,7 +35,7 @@ struct fdpic_func_descriptor {
unsigned long GOT;
};
-asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
+static int do_signal(sigset_t *oldset);
/*
* Atomically swap in the new signal mask, and wait for a signal.
@@ -55,7 +55,7 @@ asmlinkage int sys_sigsuspend(int history0, int history1, old_sigset_t mask)
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
- if (do_signal(__frame, &saveset))
+ if (do_signal(&saveset))
/* return the signal number as the return value of this function
* - this is an utterly evil hack. syscalls should not invoke do_signal()
* as entry.S sets regs->gr8 to the return value of the system call
@@ -91,7 +91,7 @@ asmlinkage int sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
- if (do_signal(__frame, &saveset))
+ if (do_signal(&saveset))
/* return the signal number as the return value of this function
* - this is an utterly evil hack. syscalls should not invoke do_signal()
* as entry.S sets regs->gr8 to the return value of the system call
@@ -276,13 +276,12 @@ static int setup_sigcontext(struct sigcontext __user *sc, unsigned long mask)
* Determine which stack to use..
*/
static inline void __user *get_sigframe(struct k_sigaction *ka,
- struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp;
/* Default to using normal stack */
- sp = regs->sp;
+ sp = __frame->sp;
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
@@ -291,18 +290,19 @@ static inline void __user *get_sigframe(struct k_sigaction *ka,
}
return (void __user *) ((sp - frame_size) & ~7UL);
+
} /* end get_sigframe() */
/*****************************************************************************/
/*
*
*/
-static void setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs)
+static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
{
struct sigframe __user *frame;
int rsig;
- frame = get_sigframe(ka, regs, sizeof(*frame));
+ frame = get_sigframe(ka, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
@@ -346,47 +346,51 @@ static void setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct p
}
/* set up registers for signal handler */
- regs->sp = (unsigned long) frame;
- regs->lr = (unsigned long) &frame->retcode;
- regs->gr8 = sig;
+ __frame->sp = (unsigned long) frame;
+ __frame->lr = (unsigned long) &frame->retcode;
+ __frame->gr8 = sig;
if (get_personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor *) ka->sa.sa_handler;
- __get_user(regs->pc, &funcptr->text);
- __get_user(regs->gr15, &funcptr->GOT);
+ __get_user(__frame->pc, &funcptr->text);
+ __get_user(__frame->gr15, &funcptr->GOT);
} else {
- regs->pc = (unsigned long) ka->sa.sa_handler;
- regs->gr15 = 0;
+ __frame->pc = (unsigned long) ka->sa.sa_handler;
+ __frame->gr15 = 0;
}
set_fs(USER_DS);
+ /* the tracer may want to single-step inside the handler */
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
#if DEBUG_SIG
printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n",
- sig, current->comm, current->pid, frame, regs->pc, frame->pretcode);
+ sig, current->comm, current->pid, frame, __frame->pc,
+ frame->pretcode);
#endif
- return;
+ return 1;
give_sigsegv:
- if (sig == SIGSEGV)
- ka->sa.sa_handler = SIG_DFL;
-
force_sig(SIGSEGV, current);
+ return 0;
+
} /* end setup_frame() */
/*****************************************************************************/
/*
*
*/
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
- sigset_t *set, struct pt_regs * regs)
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set)
{
struct rt_sigframe __user *frame;
int rsig;
- frame = get_sigframe(ka, regs, sizeof(*frame));
+ frame = get_sigframe(ka, sizeof(*frame));
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
@@ -409,7 +413,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (__put_user(0, &frame->uc.uc_flags) ||
__put_user(0, &frame->uc.uc_link) ||
__put_user((void*)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) ||
- __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags) ||
+ __put_user(sas_ss_flags(__frame->sp), &frame->uc.uc_stack.ss_flags) ||
__put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size))
goto give_sigsegv;
@@ -440,34 +444,38 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
}
/* Set up registers for signal handler */
- regs->sp = (unsigned long) frame;
- regs->lr = (unsigned long) &frame->retcode;
- regs->gr8 = sig;
- regs->gr9 = (unsigned long) &frame->info;
+ __frame->sp = (unsigned long) frame;
+ __frame->lr = (unsigned long) &frame->retcode;
+ __frame->gr8 = sig;
+ __frame->gr9 = (unsigned long) &frame->info;
if (get_personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor *funcptr =
(struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
- __get_user(regs->pc, &funcptr->text);
- __get_user(regs->gr15, &funcptr->GOT);
+ __get_user(__frame->pc, &funcptr->text);
+ __get_user(__frame->gr15, &funcptr->GOT);
} else {
- regs->pc = (unsigned long) ka->sa.sa_handler;
- regs->gr15 = 0;
+ __frame->pc = (unsigned long) ka->sa.sa_handler;
+ __frame->gr15 = 0;
}
set_fs(USER_DS);
+ /* the tracer may want to single-step inside the handler */
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
+
#if DEBUG_SIG
printk("SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n",
- sig, current->comm, current->pid, frame, regs->pc, frame->pretcode);
+ sig, current->comm, current->pid, frame, __frame->pc,
+ frame->pretcode);
#endif
- return;
+ return 1;
give_sigsegv:
- if (sig == SIGSEGV)
- ka->sa.sa_handler = SIG_DFL;
force_sig(SIGSEGV, current);
+ return 0;
} /* end setup_rt_frame() */
@@ -475,43 +483,51 @@ give_sigsegv:
/*
* OK, we're invoking a handler
*/
-static void handle_signal(unsigned long sig, siginfo_t *info,
- struct k_sigaction *ka, sigset_t *oldset,
- struct pt_regs *regs)
+static int handle_signal(unsigned long sig, siginfo_t *info,
+ struct k_sigaction *ka, sigset_t *oldset)
{
+ int ret;
+
/* Are we from a system call? */
- if (in_syscall(regs)) {
+ if (in_syscall(__frame)) {
/* If so, check system call restarting.. */
- switch (regs->gr8) {
+ switch (__frame->gr8) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
- regs->gr8 = -EINTR;
+ __frame->gr8 = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
- regs->gr8 = -EINTR;
+ __frame->gr8 = -EINTR;
break;
}
+
/* fallthrough */
case -ERESTARTNOINTR:
- regs->gr8 = regs->orig_gr8;
- regs->pc -= 4;
+ __frame->gr8 = __frame->orig_gr8;
+ __frame->pc -= 4;
}
}
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(sig, ka, info, oldset, regs);
+ ret = setup_rt_frame(sig, ka, info, oldset);
else
- setup_frame(sig, ka, oldset, regs);
+ ret = setup_frame(sig, ka, oldset);
+
+ if (ret) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked, &current->blocked,
+ &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked, sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+
+ return ret;
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked, sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
} /* end handle_signal() */
/*****************************************************************************/
@@ -520,7 +536,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static int do_signal(sigset_t *oldset)
{
struct k_sigaction ka;
siginfo_t info;
@@ -532,7 +548,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
* kernel mode. Just return without doing anything
* if so.
*/
- if (!user_mode(regs))
+ if (!user_mode(__frame))
return 1;
if (try_to_freeze())
@@ -541,30 +557,29 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
if (!oldset)
oldset = &current->blocked;
- signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
- handle_signal(signr, &info, &ka, oldset, regs);
- return 1;
- }
+ signr = get_signal_to_deliver(&info, &ka, __frame, NULL);
+ if (signr > 0)
+ return handle_signal(signr, &info, &ka, oldset);
- no_signal:
+no_signal:
/* Did we come from a system call? */
- if (regs->syscallno >= 0) {
+ if (__frame->syscallno >= 0) {
/* Restart the system call - no handlers present */
- if (regs->gr8 == -ERESTARTNOHAND ||
- regs->gr8 == -ERESTARTSYS ||
- regs->gr8 == -ERESTARTNOINTR) {
- regs->gr8 = regs->orig_gr8;
- regs->pc -= 4;
+ if (__frame->gr8 == -ERESTARTNOHAND ||
+ __frame->gr8 == -ERESTARTSYS ||
+ __frame->gr8 == -ERESTARTNOINTR) {
+ __frame->gr8 = __frame->orig_gr8;
+ __frame->pc -= 4;
}
- if (regs->gr8 == -ERESTART_RESTARTBLOCK){
- regs->gr8 = __NR_restart_syscall;
- regs->pc -= 4;
+ if (__frame->gr8 == -ERESTART_RESTARTBLOCK){
+ __frame->gr8 = __NR_restart_syscall;
+ __frame->pc -= 4;
}
}
return 0;
+
} /* end do_signal() */
/*****************************************************************************/
@@ -580,6 +595,6 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(__frame, NULL);
+ do_signal(NULL);
} /* end do_notify_resume() */
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
index 2e9741227b7..24cf85f89e4 100644
--- a/arch/frv/kernel/time.c
+++ b/arch/frv/kernel/time.c
@@ -189,6 +189,8 @@ void do_gettimeofday(struct timeval *tv)
tv->tv_usec = usec;
}
+EXPORT_SYMBOL(do_gettimeofday);
+
int do_settimeofday(struct timespec *tv)
{
time_t wtm_sec, sec = tv->tv_sec;
@@ -218,6 +220,7 @@ int do_settimeofday(struct timespec *tv)
clock_was_set();
return 0;
}
+
EXPORT_SYMBOL(do_settimeofday);
/*
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c
index 89073cae4b5..9eb84b2e6ab 100644
--- a/arch/frv/kernel/traps.c
+++ b/arch/frv/kernel/traps.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <asm/setup.h>
#include <asm/fpu.h>
@@ -250,6 +251,8 @@ void dump_stack(void)
show_stack(NULL, NULL);
}
+EXPORT_SYMBOL(dump_stack);
+
void show_stack(struct task_struct *task, unsigned long *sp)
{
}
diff --git a/arch/frv/kernel/uaccess.c b/arch/frv/kernel/uaccess.c
index f3fd58a5bc4..9b751c0f0e8 100644
--- a/arch/frv/kernel/uaccess.c
+++ b/arch/frv/kernel/uaccess.c
@@ -10,6 +10,7 @@
*/
#include <linux/mm.h>
+#include <linux/module.h>
#include <asm/uaccess.h>
/*****************************************************************************/
@@ -58,8 +59,11 @@ long strncpy_from_user(char *dst, const char *src, long count)
memset(p, 0, count); /* clear remainder of buffer [security] */
return err;
+
} /* end strncpy_from_user() */
+EXPORT_SYMBOL(strncpy_from_user);
+
/*****************************************************************************/
/*
* Return the size of a string (including the ending 0)
@@ -92,4 +96,7 @@ long strnlen_user(const char *src, long count)
}
return p - src + 1; /* return length including NUL */
+
} /* end strnlen_user() */
+
+EXPORT_SYMBOL(strnlen_user);
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index fceafd2cc20..f474534ba78 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -112,6 +112,7 @@ SECTIONS
#endif
)
SCHED_TEXT
+ LOCK_TEXT
*(.fixup)
*(.gnu.warning)
*(.exitcall.exit)
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 19be2626d5e..08be305c9f4 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -3,6 +3,6 @@
#
lib-y := \
- __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o \
+ __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
checksum.o memcpy.o memset.o atomic-ops.o \
outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
diff --git a/arch/frv/lib/__ucmpdi2.S b/arch/frv/lib/__ucmpdi2.S
new file mode 100644
index 00000000000..d892f16ffaa
--- /dev/null
+++ b/arch/frv/lib/__ucmpdi2.S
@@ -0,0 +1,45 @@
+/* __ucmpdi2.S: 64-bit unsigned compare
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+ .text
+ .p2align 4
+
+###############################################################################
+#
+# int __ucmpdi2(unsigned long long a [GR8:GR9],
+# unsigned long long b [GR10:GR11])
+#
+# - returns 0, 1, or 2 as a <, =, > b respectively.
+#
+###############################################################################
+ .globl __ucmpdi2
+ .type __ucmpdi2,@function
+__ucmpdi2:
+ or.p gr8,gr0,gr4
+ subcc gr8,gr10,gr0,icc0
+ setlos.p #0,gr8
+ bclr icc0,#2 ; a.msw < b.msw
+
+ setlos.p #2,gr8
+ bhilr icc0,#0 ; a.msw > b.msw
+
+ subcc.p gr9,gr11,gr0,icc1
+ setlos #0,gr8
+ setlos.p #2,gr9
+ setlos #1,gr7
+ cknc icc1,cc6
+ cor.p gr9,gr0,gr8, cc6,#1
+ cckls icc1,cc4, cc6,#1
+ andcr cc6,cc4,cc4
+ cor gr7,gr0,gr8, cc4,#1
+ bralr
+ .size __ucmpdi2, .-__ucmpdi2
diff --git a/arch/frv/lib/atomic-ops.S b/arch/frv/lib/atomic-ops.S
index b03d510a89e..545cd325ac5 100644
--- a/arch/frv/lib/atomic-ops.S
+++ b/arch/frv/lib/atomic-ops.S
@@ -129,48 +129,6 @@ atomic_sub_return:
###############################################################################
#
-# uint8_t __xchg_8(uint8_t i, uint8_t *v)
-#
-###############################################################################
- .globl __xchg_8
- .type __xchg_8,@function
-__xchg_8:
- or.p gr8,gr8,gr10
-0:
- orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
- ckeq icc3,cc7
- ldub.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
- orcr cc7,cc7,cc3 /* set CC3 to true */
- cstb.p gr10,@(gr9,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
- beq icc3,#0,0b
- bralr
-
- .size __xchg_8, .-__xchg_8
-
-###############################################################################
-#
-# uint16_t __xchg_16(uint16_t i, uint16_t *v)
-#
-###############################################################################
- .globl __xchg_16
- .type __xchg_16,@function
-__xchg_16:
- or.p gr8,gr8,gr10
-0:
- orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
- ckeq icc3,cc7
- lduh.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
- orcr cc7,cc7,cc3 /* set CC3 to true */
- csth.p gr10,@(gr9,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
- beq icc3,#0,0b
- bralr
-
- .size __xchg_16, .-__xchg_16
-
-###############################################################################
-#
# uint32_t __xchg_32(uint32_t i, uint32_t *v)
#
###############################################################################
@@ -192,56 +150,6 @@ __xchg_32:
###############################################################################
#
-# uint8_t __cmpxchg_8(uint8_t *v, uint8_t test, uint8_t new)
-#
-###############################################################################
- .globl __cmpxchg_8
- .type __cmpxchg_8,@function
-__cmpxchg_8:
- or.p gr8,gr8,gr11
-0:
- orcc gr0,gr0,gr0,icc3
- ckeq icc3,cc7
- ldub.p @(gr11,gr0),gr8
- orcr cc7,cc7,cc3
- sub gr8,gr9,gr7
- sllicc gr7,#24,gr0,icc0
- bne icc0,#0,1f
- cstb.p gr10,@(gr11,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1
- beq icc3,#0,0b
-1:
- bralr
-
- .size __cmpxchg_8, .-__cmpxchg_8
-
-###############################################################################
-#
-# uint16_t __cmpxchg_16(uint16_t *v, uint16_t test, uint16_t new)
-#
-###############################################################################
- .globl __cmpxchg_16
- .type __cmpxchg_16,@function
-__cmpxchg_16:
- or.p gr8,gr8,gr11
-0:
- orcc gr0,gr0,gr0,icc3
- ckeq icc3,cc7
- lduh.p @(gr11,gr0),gr8
- orcr cc7,cc7,cc3
- sub gr8,gr9,gr7
- sllicc gr7,#16,gr0,icc0
- bne icc0,#0,1f
- csth.p gr10,@(gr11,gr0) ,cc3,#1
- corcc gr29,gr29,gr0 ,cc3,#1
- beq icc3,#0,0b
-1:
- bralr
-
- .size __cmpxchg_16, .-__cmpxchg_16
-
-###############################################################################
-#
# uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new)
#
###############################################################################
diff --git a/arch/frv/lib/checksum.c b/arch/frv/lib/checksum.c
index 7bf5bd6cac8..20e7dfc474e 100644
--- a/arch/frv/lib/checksum.c
+++ b/arch/frv/lib/checksum.c
@@ -33,6 +33,7 @@
#include <net/checksum.h>
#include <asm/checksum.h>
+#include <linux/module.h>
static inline unsigned short from32to16(unsigned long x)
{
@@ -115,34 +116,52 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
return result;
}
+EXPORT_SYMBOL(csum_partial);
+
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
unsigned short ip_compute_csum(const unsigned char * buff, int len)
{
- return ~do_csum(buff,len);
+ return ~do_csum(buff, len);
}
+EXPORT_SYMBOL(ip_compute_csum);
+
/*
* copy from fs while checksumming, otherwise like csum_partial
*/
-
unsigned int
-csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *csum_err)
+csum_partial_copy_from_user(const char __user *src, char *dst,
+ int len, int sum, int *csum_err)
{
- if (csum_err) *csum_err = 0;
- memcpy(dst, src, len);
+ int rem;
+
+ if (csum_err)
+ *csum_err = 0;
+
+ rem = copy_from_user(dst, src, len);
+ if (rem != 0) {
+ if (csum_err)
+ *csum_err = -EFAULT;
+ memset(dst + len - rem, 0, rem);
+ len = rem;
+ }
+
return csum_partial(dst, len, sum);
}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+
/*
* copy from ds while checksumming, otherwise like csum_partial
*/
-
unsigned int
csum_partial_copy(const char *src, char *dst, int len, int sum)
{
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
+
+EXPORT_SYMBOL(csum_partial_copy);
diff --git a/arch/frv/mb93090-mb00/Makefile b/arch/frv/mb93090-mb00/Makefile
index 3faf0f8cf9b..76595e87073 100644
--- a/arch/frv/mb93090-mb00/Makefile
+++ b/arch/frv/mb93090-mb00/Makefile
@@ -3,7 +3,7 @@
#
ifeq "$(CONFIG_PCI)" "y"
-obj-y := pci-frv.o pci-irq.o pci-vdk.o
+obj-y := pci-frv.o pci-irq.o pci-vdk.o pci-iomap.o
ifeq "$(CONFIG_MMU)" "y"
obj-y += pci-dma.o
diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c
index 2082a9647f4..4985466b1a7 100644
--- a/arch/frv/mb93090-mb00/pci-dma-nommu.c
+++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c
@@ -83,6 +83,8 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand
return NULL;
}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
struct dma_alloc_record *rec;
@@ -102,6 +104,8 @@ void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_
BUG();
}
+EXPORT_SYMBOL(dma_free_coherent);
+
/*
* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32-bit bus address to use is returned.
@@ -120,6 +124,8 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
return virt_to_bus(ptr);
}
+EXPORT_SYMBOL(dma_map_single);
+
/*
* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
@@ -150,3 +156,5 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return nents;
}
+
+EXPORT_SYMBOL(dma_map_sg);
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index 86fbdadc51b..671ce1e8434 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -28,11 +28,15 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand
return ret;
}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
consistent_free(vaddr);
}
+EXPORT_SYMBOL(dma_free_coherent);
+
/*
* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32-bit bus address to use is returned.
@@ -51,6 +55,8 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
return virt_to_bus(ptr);
}
+EXPORT_SYMBOL(dma_map_single);
+
/*
* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
@@ -96,6 +102,8 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return nents;
}
+EXPORT_SYMBOL(dma_map_sg);
+
dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
@@ -103,3 +111,5 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long off
flush_dcache_page(page);
return (dma_addr_t) page_to_phys(page) + offset;
}
+
+EXPORT_SYMBOL(dma_map_page);
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 83e5489cf03..0a26bf6f1cd 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -142,9 +142,7 @@ static void __init pcibios_allocate_resources(int pass)
u16 command;
struct resource *r, *pr;
- while (dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev),
- dev != NULL
- ) {
+ for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
for(idx = 0; idx < 6; idx++) {
r = &dev->resource[idx];
@@ -188,9 +186,7 @@ static void __init pcibios_assign_resources(void)
int idx;
struct resource *r;
- while (dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev),
- dev != NULL
- ) {
+ for_each_pci_dev(dev) {
int class = dev->class >> 8;
/* Don't touch classless devices and host bridges */
diff --git a/arch/frv/mb93090-mb00/pci-iomap.c b/arch/frv/mb93090-mb00/pci-iomap.c
new file mode 100644
index 00000000000..068fa04bd52
--- /dev/null
+++ b/arch/frv/mb93090-mb00/pci-iomap.c
@@ -0,0 +1,29 @@
+/* pci-iomap.c: description
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ unsigned long start = pci_resource_start(dev, bar);
+ unsigned long len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len || !start)
+ return NULL;
+
+ if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM))
+ return (void __iomem *) start;
+
+ return NULL;
+}
+
+EXPORT_SYMBOL(pci_iomap);
diff --git a/arch/frv/mb93090-mb00/pci-irq.c b/arch/frv/mb93090-mb00/pci-irq.c
index 24622d89b1c..c4a1144c98b 100644
--- a/arch/frv/mb93090-mb00/pci-irq.c
+++ b/arch/frv/mb93090-mb00/pci-irq.c
@@ -48,9 +48,7 @@ void __init pcibios_fixup_irqs(void)
struct pci_dev *dev = NULL;
uint8_t line, pin;
- while (dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev),
- dev != NULL
- ) {
+ for_each_pci_dev(dev) {
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (pin) {
dev->irq = pci_bus0_irq_routing[PCI_SLOT(dev->devfn)][pin - 1];
diff --git a/arch/frv/mm/cache-page.c b/arch/frv/mm/cache-page.c
index 683b5e34431..0261cbe153b 100644
--- a/arch/frv/mm/cache-page.c
+++ b/arch/frv/mm/cache-page.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/highmem.h>
+#include <linux/module.h>
#include <asm/pgalloc.h>
/*****************************************************************************/
@@ -38,6 +39,8 @@ void flush_dcache_page(struct page *page)
} /* end flush_dcache_page() */
+EXPORT_SYMBOL(flush_dcache_page);
+
/*****************************************************************************/
/*
* ICI takes a virtual address and the page may not currently have one
@@ -64,3 +67,5 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
}
} /* end flush_icache_user_range() */
+
+EXPORT_SYMBOL(flush_icache_user_range);
diff --git a/arch/frv/mm/extable.c b/arch/frv/mm/extable.c
index 41be1128dc6..caacf030ac7 100644
--- a/arch/frv/mm/extable.c
+++ b/arch/frv/mm/extable.c
@@ -43,7 +43,7 @@ static inline unsigned long search_one_table(const struct exception_table_entry
*/
unsigned long search_exception_table(unsigned long pc)
{
- unsigned long ret = 0;
+ const struct exception_table_entry *extab;
/* determine if the fault lay during a memcpy_user or a memset_user */
if (__frame->lr == (unsigned long) &__memset_user_error_lr &&
@@ -55,9 +55,10 @@ unsigned long search_exception_table(unsigned long pc)
*/
return (unsigned long) &__memset_user_error_handler;
}
- else if (__frame->lr == (unsigned long) &__memcpy_user_error_lr &&
- (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end
- ) {
+
+ if (__frame->lr == (unsigned long) &__memcpy_user_error_lr &&
+ (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end
+ ) {
/* the fault occurred in a protected memset
* - we search for the return address (in LR) instead of the program counter
* - it was probably during a copy_to/from_user()
@@ -65,27 +66,10 @@ unsigned long search_exception_table(unsigned long pc)
return (unsigned long) &__memcpy_user_error_handler;
}
-#ifndef CONFIG_MODULES
- /* there is only the kernel to search. */
- ret = search_one_table(__start___ex_table, __stop___ex_table - 1, pc);
- return ret;
-
-#else
- /* the kernel is the last "module" -- no need to treat it special */
- unsigned long flags;
- struct module *mp;
+ extab = search_exception_tables(pc);
+ if (extab)
+ return extab->fixup;
- spin_lock_irqsave(&modlist_lock, flags);
-
- for (mp = module_list; mp != NULL; mp = mp->next) {
- if (mp->ex_table_start == NULL || !(mp->flags & (MOD_RUNNING | MOD_INITIALIZING)))
- continue;
- ret = search_one_table(mp->ex_table_start, mp->ex_table_end - 1, pc);
- if (ret)
- break;
- }
+ return 0;
- spin_unlock_irqrestore(&modlist_lock, flags);
- return ret;
-#endif
} /* end search_exception_table() */
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c
index 7dc8fbf3af9..7f77db7fabc 100644
--- a/arch/frv/mm/highmem.c
+++ b/arch/frv/mm/highmem.c
@@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/highmem.h>
+#include <linux/module.h>
void *kmap(struct page *page)
{
@@ -18,6 +19,8 @@ void *kmap(struct page *page)
return kmap_high(page);
}
+EXPORT_SYMBOL(kmap);
+
void kunmap(struct page *page)
{
if (in_interrupt())
@@ -27,7 +30,12 @@ void kunmap(struct page *page)
kunmap_high(page);
}
+EXPORT_SYMBOL(kunmap);
+
struct page *kmap_atomic_to_page(void *ptr)
{
return virt_to_page(ptr);
}
+
+
+EXPORT_SYMBOL(kmap_atomic_to_page);
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 26698a49f15..80940d712ac 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -21,10 +21,6 @@ config FPU
bool
default n
-config UID16
- bool
- default y
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
diff --git a/arch/h8300/kernel/gpio.c b/arch/h8300/kernel/gpio.c
index 795682b873e..d195568ca8a 100644
--- a/arch/h8300/kernel/gpio.c
+++ b/arch/h8300/kernel/gpio.c
@@ -122,7 +122,7 @@ int h8300_get_gpio_dir(int port_bit)
static char *port_status(int portno)
{
static char result[10];
- const static char io[2]={'I','O'};
+ static const char io[2]={'I','O'};
char *rp;
int c;
unsigned char used,ddr;
@@ -143,7 +143,7 @@ static int gpio_proc_read(char *buf, char **start, off_t offset,
int len, int *unused_i, void *unused_v)
{
int c,outlen;
- const static char port_name[]="123456789ABCDEFGH";
+ static const char port_name[]="123456789ABCDEFGH";
outlen = 0;
for (c = 0; c < MAX_PORT; c++) {
if (ddrs[c] == NULL)
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
index 5a630233112..5cc76efaf7a 100644
--- a/arch/h8300/kernel/h8300_ksyms.c
+++ b/arch/h8300/kernel/h8300_ksyms.c
@@ -22,11 +22,8 @@
//asmlinkage long long __lshrdi3 (long long, int);
extern char h8300_debug_device[];
-extern void dump_thread(struct pt_regs *, struct user *);
-
/* platform dependent support */
-EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr);
@@ -103,10 +100,6 @@ EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umoddi3);
EXPORT_SYMBOL(__umodsi3);
-#ifdef MAGIC_ROM_PTR
-EXPORT_SYMBOL(is_in_rom);
-#endif
-
EXPORT_SYMBOL(h8300_reserved_gpio);
EXPORT_SYMBOL(h8300_free_gpio);
EXPORT_SYMBOL(h8300_set_gpio_dir);
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index fe21adf3e75..ed79ae20e88 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -195,7 +195,7 @@ int copy_thread(int nr, unsigned long clone_flags,
{
struct pt_regs * childregs;
- childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+ childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
*childregs = *regs;
childregs->retpc = (unsigned long) ret_from_fork;
@@ -208,34 +208,6 @@ int copy_thread(int nr, unsigned long clone_flags,
}
/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
-{
-/* changed the size calculations - should hopefully work better. lbt */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long) (current->mm->brk +
- (PAGE_SIZE-1))) >> PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_ssize = 0;
-
- dump->u_ar0 = (struct user_regs_struct *)(((int)(&dump->regs)) -((int)(dump)));
- dump->regs.er0 = regs->er0;
- dump->regs.er1 = regs->er1;
- dump->regs.er2 = regs->er2;
- dump->regs.er3 = regs->er3;
- dump->regs.er4 = regs->er4;
- dump->regs.er5 = regs->er5;
- dump->regs.er6 = regs->er6;
- dump->regs.orig_er0 = regs->orig_er0;
- dump->regs.ccr = regs->ccr;
- dump->regs.pc = regs->pc;
-}
-
-/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(char *name, char **argv, char **envp,int dummy,...)
diff --git a/arch/h8300/mm/memory.c b/arch/h8300/mm/memory.c
index f4ddece3216..81eace93f86 100644
--- a/arch/h8300/mm/memory.c
+++ b/arch/h8300/mm/memory.c
@@ -55,16 +55,3 @@ unsigned long kernel_map(unsigned long paddr, unsigned long size,
return paddr;
}
-#ifdef MAGIC_ROM_PTR
-
-int is_in_rom(unsigned long addr)
-{
- /* Anything not in operational RAM is returned as in rom! */
- if (addr < _ramstart || addr >= _ramend)
- return 1;
- else
- return 0;
-}
-
-#endif
-
diff --git a/arch/h8300/platform/h8300h/ptrace_h8300h.c b/arch/h8300/platform/h8300h/ptrace_h8300h.c
index 6ac93c05a1a..746b1ae672a 100644
--- a/arch/h8300/platform/h8300h/ptrace_h8300h.c
+++ b/arch/h8300/platform/h8300h/ptrace_h8300h.c
@@ -98,7 +98,7 @@ struct optable {
.type = jmp, \
}
-const static struct optable optable_0[] = {
+static const struct optable optable_0[] = {
OPTABLE(0x00,0xff, 1,none), /* 0x00 */
OPTABLE(0x01,0xff,-1,none), /* 0x01 */
OPTABLE(0x02,0xfe, 1,none), /* 0x02-0x03 */
@@ -131,31 +131,31 @@ const static struct optable optable_0[] = {
OPTABLE(0x80,0x80, 1,none), /* 0x80-0xff */
};
-const static struct optable optable_1[] = {
+static const struct optable optable_1[] = {
OPTABLE(0x00,0xff,-3,none), /* 0x0100 */
OPTABLE(0x40,0xf0,-3,none), /* 0x0140-0x14f */
OPTABLE(0x80,0xf0, 1,none), /* 0x0180-0x018f */
OPTABLE(0xc0,0xc0, 2,none), /* 0x01c0-0x01ff */
};
-const static struct optable optable_2[] = {
+static const struct optable optable_2[] = {
OPTABLE(0x00,0x20, 2,none), /* 0x6a0?/0x6a8?/0x6b0?/0x6b8? */
OPTABLE(0x20,0x20, 3,none), /* 0x6a2?/0x6aa?/0x6b2?/0x6ba? */
};
-const static struct optable optable_3[] = {
+static const struct optable optable_3[] = {
OPTABLE(0x69,0xfb, 2,none), /* 0x010069/0x01006d/014069/0x01406d */
OPTABLE(0x6b,0xff,-4,none), /* 0x01006b/0x01406b */
OPTABLE(0x6f,0xff, 3,none), /* 0x01006f/0x01406f */
OPTABLE(0x78,0xff, 5,none), /* 0x010078/0x014078 */
};
-const static struct optable optable_4[] = {
+static const struct optable optable_4[] = {
OPTABLE(0x00,0x78, 3,none), /* 0x0100690?/0x01006d0?/0140690/0x01406d0?/0x0100698?/0x01006d8?/0140698?/0x01406d8? */
OPTABLE(0x20,0x78, 4,none), /* 0x0100692?/0x01006d2?/0140692/0x01406d2?/0x010069a?/0x01006da?/014069a?/0x01406da? */
};
-const static struct optables_list {
+static const struct optables_list {
const struct optable *ptr;
int size;
} optables[] = {
diff --git a/arch/h8300/platform/h8s/ints.c b/arch/h8300/platform/h8s/ints.c
index 5441cdd12a3..f6ed663bdde 100644
--- a/arch/h8300/platform/h8s/ints.c
+++ b/arch/h8300/platform/h8s/ints.c
@@ -52,7 +52,7 @@ struct irq_pins {
unsigned char bit_no;
};
/* ISTR = 0 */
-const static struct irq_pins irq_assign_table0[16]={
+static const struct irq_pins irq_assign_table0[16]={
{H8300_GPIO_P5,H8300_GPIO_B0},{H8300_GPIO_P5,H8300_GPIO_B1},
{H8300_GPIO_P5,H8300_GPIO_B2},{H8300_GPIO_P5,H8300_GPIO_B3},
{H8300_GPIO_P5,H8300_GPIO_B4},{H8300_GPIO_P5,H8300_GPIO_B5},
@@ -63,7 +63,7 @@ const static struct irq_pins irq_assign_table0[16]={
{H8300_GPIO_PF,H8300_GPIO_B1},{H8300_GPIO_PF,H8300_GPIO_B2},
};
/* ISTR = 1 */
-const static struct irq_pins irq_assign_table1[16]={
+static const struct irq_pins irq_assign_table1[16]={
{H8300_GPIO_P8,H8300_GPIO_B0},{H8300_GPIO_P8,H8300_GPIO_B1},
{H8300_GPIO_P8,H8300_GPIO_B2},{H8300_GPIO_P8,H8300_GPIO_B3},
{H8300_GPIO_P8,H8300_GPIO_B4},{H8300_GPIO_P8,H8300_GPIO_B5},
diff --git a/arch/h8300/platform/h8s/ints_h8s.c b/arch/h8300/platform/h8s/ints_h8s.c
index f53de493e3e..8268dfd12f1 100644
--- a/arch/h8300/platform/h8s/ints_h8s.c
+++ b/arch/h8300/platform/h8s/ints_h8s.c
@@ -42,7 +42,7 @@ struct irq_pins {
unsigned char bit_no;
} __attribute__((aligned(1),packed));
/* ISTR = 0 */
-const static struct irq_pins irq_assign_table0[16]={
+static const struct irq_pins irq_assign_table0[16]={
{H8300_GPIO_P5,H8300_GPIO_B0},{H8300_GPIO_P5,H8300_GPIO_B1},
{H8300_GPIO_P5,H8300_GPIO_B2},{H8300_GPIO_P5,H8300_GPIO_B3},
{H8300_GPIO_P5,H8300_GPIO_B4},{H8300_GPIO_P5,H8300_GPIO_B5},
@@ -53,7 +53,7 @@ const static struct irq_pins irq_assign_table0[16]={
{H8300_GPIO_PF,H8300_GPIO_B1},{H8300_GPIO_PF,H8300_GPIO_B2},
};
/* ISTR = 1 */
-const static struct irq_pins irq_assign_table1[16]={
+static const struct irq_pins irq_assign_table1[16]={
{H8300_GPIO_P8,H8300_GPIO_B0},{H8300_GPIO_P8,H8300_GPIO_B1},
{H8300_GPIO_P8,H8300_GPIO_B2},{H8300_GPIO_P8,H8300_GPIO_B3},
{H8300_GPIO_P8,H8300_GPIO_B4},{H8300_GPIO_P8,H8300_GPIO_B5},
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 6004bb0795e..d5d0df7f04f 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -29,10 +29,6 @@ config MMU
config SBUS
bool
-config UID16
- bool
- default y
-
config GENERIC_ISA_DMA
bool
default y
@@ -45,8 +41,21 @@ config ARCH_MAY_HAVE_PC_FDC
bool
default y
+config DMI
+ bool
+ default y
+
source "init/Kconfig"
+config DOUBLEFAULT
+ default y
+ bool "Enable doublefault exception handler" if EMBEDDED
+ help
+ This option allows trapping of rare doublefault exceptions that
+ would otherwise cause a system to silently reboot. Disabling this
+ option saves about 4k and might cause you much additional grey
+ hair.
+
menu "Processor type and features"
choice
@@ -464,7 +473,6 @@ config NUMA
depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
default n if X86_PC
default y if (X86_NUMAQ || X86_SUMMIT)
- select SPARSEMEM_STATIC
# Need comments to help the hapless user trying to turn on NUMA support
comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
@@ -493,6 +501,10 @@ config HAVE_ARCH_ALLOC_REMAP
depends on NUMA
default y
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+ depends on (ARCH_SELECT_MEMORY_MODEL && X86_PC)
+
config ARCH_DISCONTIGMEM_ENABLE
def_bool y
depends on NUMA
@@ -503,7 +515,8 @@ config ARCH_DISCONTIGMEM_DEFAULT
config ARCH_SPARSEMEM_ENABLE
def_bool y
- depends on NUMA
+ depends on (NUMA || (X86_PC && EXPERIMENTAL))
+ select SPARSEMEM_STATIC
config ARCH_SELECT_MEMORY_MODEL
def_bool y
@@ -626,10 +639,6 @@ config REGPARM
and passes the first three arguments of a function call in registers.
This will probably break binary only modules.
- This feature is only enabled for gcc-3.0 and later - earlier compilers
- generate incorrect output with certain kernel constructs when
- -mregparm=3 is used.
-
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
@@ -649,17 +658,6 @@ config SECCOMP
source kernel/Kconfig.hz
-config PHYSICAL_START
- hex "Physical address where the kernel is loaded" if EMBEDDED
- default "0x100000"
- help
- This gives the physical address where the kernel is loaded.
- Primarily used in the case of kexec on panic where the
- fail safe kernel needs to run at a different address than
- the panic-ed kernel.
-
- Don't change this unless you know what you are doing.
-
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -679,11 +677,31 @@ config KEXEC
config CRASH_DUMP
bool "kernel crash dumps (EXPERIMENTAL)"
- depends on EMBEDDED
depends on EXPERIMENTAL
depends on HIGHMEM
help
Generate crash dump after being started by kexec.
+
+config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+
+ default "0x1000000" if CRASH_DUMP
+ default "0x100000"
+ help
+ This gives the physical address where the kernel is loaded. Normally
+ for regular kernels this value is 0x100000 (1MB). But in the case
+ of kexec on panic the fail safe kernel needs to run at a different
+ address than the panic-ed kernel. This option is used to set the load
+ address for kernels used to capture crash dump on being kexec'ed
+ after panic. The default value for crash dump kernels is
+ 0x1000000 (16MB). This can also be set based on the "X" value as
+ specified in the "crashkernel=YM@XM" command line boot parameter
+ passed to the panic-ed kernel. Typically this parameter is set as
+ crashkernel=64M@16M. Please take a look at
+ Documentation/kdump/kdump.txt for more details about crash dumps.
+
+ Don't change this unless you know what you are doing.
+
endmenu
@@ -699,7 +717,7 @@ depends on PM && !X86_VISWS
config APM
tristate "APM (Advanced Power Management) BIOS support"
- depends on PM && PM_LEGACY
+ depends on PM
---help---
APM is a BIOS specification for saving power using several different
techniques. This is mostly useful for battery powered laptops with
@@ -1055,3 +1073,7 @@ config X86_TRAMPOLINE
bool
depends on X86_SMP || (X86_VOYAGER && SMP)
default y
+
+config KTIME_SCALAR
+ bool
+ default y
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
index 53bbb3c008e..79603b3471f 100644
--- a/arch/i386/Kconfig.cpu
+++ b/arch/i386/Kconfig.cpu
@@ -39,6 +39,7 @@ config M386
- "Winchip-2" for IDT Winchip 2.
- "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
- "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
+ - "Geode GX/LX" For AMD Geode GX and LX processors.
- "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
- "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
@@ -171,6 +172,11 @@ config MGEODEGX1
help
Select this for a Geode GX1 (Cyrix MediaGX) chip.
+config MGEODE_LX
+ bool "Geode GX/LX"
+ help
+ Select this for AMD Geode GX and LX processors.
+
config MCYRIXIII
bool "CyrixIII/VIA-C3"
help
@@ -220,8 +226,8 @@ config X86_XADD
config X86_L1_CACHE_SHIFT
int
default "7" if MPENTIUM4 || X86_GENERIC
- default "4" if X86_ELAN || M486 || M386
- default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1
+ default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
+ default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
default "6" if MK7 || MK8 || MPENTIUMM
config RWSEM_GENERIC_SPINLOCK
@@ -290,12 +296,12 @@ config X86_INTEL_USERCOPY
config X86_USE_PPRO_CHECKSUM
bool
- depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON
+ depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX
default y
config X86_USE_3DNOW
bool
- depends on MCYRIXIII || MK7
+ depends on MCYRIXIII || MK7 || MGEODE_LX
default y
config X86_OOSTORE
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index c48b424dd64..bf32ecc9ad0 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -42,6 +42,16 @@ config DEBUG_PAGEALLOC
This results in a large slowdown, but helps to find certain types
of memory corruptions.
+config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ depends on DEBUG_KERNEL
+ help
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+ data. This option may have a slight performance impact because a
+ portion of the kernel code won't be covered by a 2MB TLB anymore.
+ If in doubt, say "N".
+
config 4KSTACKS
bool "Use 4Kb for kernel stacks instead of 8Kb"
depends on DEBUG_KERNEL
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index d121ea18460..d3c0409d201 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -39,8 +39,8 @@ include $(srctree)/arch/i386/Makefile.cpu
# -mregparm=3 works ok on gcc-3.0 and later
#
-GCC_VERSION := $(call cc-version)
-cflags-$(CONFIG_REGPARM) += $(shell if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;)
+cflags-$(CONFIG_REGPARM) += $(shell if [ $(call cc-version) -ge 0300 ] ; then \
+ echo "-mregparm=3"; fi ;)
# Disable unit-at-a-time mode, it makes gcc use a lot more stack
# due to the lack of sharing of stacklots.
@@ -103,7 +103,7 @@ AFLAGS += $(mflags-y)
boot := arch/i386/boot
.PHONY: zImage bzImage compressed zlilo bzlilo \
- zdisk bzdisk fdimage fdimage144 fdimage288 install kernel_install
+ zdisk bzdisk fdimage fdimage144 fdimage288 install
all: bzImage
@@ -125,8 +125,7 @@ zdisk bzdisk: vmlinux
fdimage fdimage144 fdimage288: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
-install: vmlinux
-install kernel_install:
+install:
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
archclean:
diff --git a/arch/i386/Makefile.cpu b/arch/i386/Makefile.cpu
index 8e51456df23..dcd936ef45d 100644
--- a/arch/i386/Makefile.cpu
+++ b/arch/i386/Makefile.cpu
@@ -1,7 +1,7 @@
# CPU tuning section - shared with UML.
# Must change only cflags-y (or [yn]), not CFLAGS! That makes a difference for UML.
-#-mtune exists since gcc 3.4, and some -mcpu flavors didn't exist in gcc 2.95.
+#-mtune exists since gcc 3.4
HAS_MTUNE := $(call cc-option-yn, -mtune=i386)
ifeq ($(HAS_MTUNE),y)
tune = $(call cc-option,-mtune=$(1),)
@@ -14,7 +14,7 @@ cflags-$(CONFIG_M386) += -march=i386
cflags-$(CONFIG_M486) += -march=i486
cflags-$(CONFIG_M586) += -march=i586
cflags-$(CONFIG_M586TSC) += -march=i586
-cflags-$(CONFIG_M586MMX) += $(call cc-option,-march=pentium-mmx,-march=i586)
+cflags-$(CONFIG_M586MMX) += -march=pentium-mmx
cflags-$(CONFIG_M686) += -march=i686
cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call tune,pentium2)
cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call tune,pentium3)
@@ -23,8 +23,8 @@ cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call tune,pentium4)
cflags-$(CONFIG_MK6) += -march=k6
# Please note, that patches that add -march=athlon-xp and friends are pointless.
# They make zero difference whatsosever to performance at this time.
-cflags-$(CONFIG_MK7) += $(call cc-option,-march=athlon,-march=i686 $(align)-functions=4)
-cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,$(call cc-option,-march=athlon,-march=i686 $(align)-functions=4))
+cflags-$(CONFIG_MK7) += -march=athlon
+cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
@@ -37,5 +37,5 @@ cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_X86_ELAN) += -march=i486
# Geode GX1 support
-cflags-$(CONFIG_MGEODEGX1) += $(call cc-option,-march=pentium-mmx,-march=i486)
+cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile
index 1e71382d413..f136752563b 100644
--- a/arch/i386/boot/Makefile
+++ b/arch/i386/boot/Makefile
@@ -100,5 +100,5 @@ zlilo: $(BOOTIMAGE)
cp System.map $(INSTALL_PATH)/
if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
-install: $(BOOTIMAGE)
- sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
+install:
+ sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c
index 82a807f9f5e..f19f3a7492a 100644
--- a/arch/i386/boot/compressed/misc.c
+++ b/arch/i386/boot/compressed/misc.c
@@ -11,7 +11,7 @@
#include <linux/linkage.h>
#include <linux/vmalloc.h>
-#include <linux/tty.h>
+#include <linux/screen_info.h>
#include <asm/io.h>
#include <asm/page.h>
diff --git a/arch/i386/boot/install.sh b/arch/i386/boot/install.sh
index f17b40dfc0f..5e44c736eea 100644
--- a/arch/i386/boot/install.sh
+++ b/arch/i386/boot/install.sh
@@ -19,6 +19,20 @@
# $4 - default install path (blank if root directory)
#
+verify () {
+ if [ ! -f "$1" ]; then
+ echo "" 1>&2
+ echo " *** Missing file: $1" 1>&2
+ echo ' *** You need to run "make" before "make install".' 1>&2
+ echo "" 1>&2
+ exit 1
+ fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
# User may have a custom install script
if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S
index 92f66947014..2ac40c8244c 100644
--- a/arch/i386/boot/video.S
+++ b/arch/i386/boot/video.S
@@ -97,7 +97,6 @@
#define PARAM_VESAPM_OFF 0x30
#define PARAM_LFB_PAGES 0x32
#define PARAM_VESA_ATTRIB 0x34
-#define PARAM_CAPABILITIES 0x36
/* Define DO_STORE according to CONFIG_VIDEO_RETAIN */
#ifdef CONFIG_VIDEO_RETAIN
@@ -234,10 +233,6 @@ mopar_gr:
movw 18(%di), %ax
movl %eax, %fs:(PARAM_LFB_SIZE)
-# store mode capabilities
- movl 10(%di), %eax
- movl %eax, %fs:(PARAM_CAPABILITIES)
-
# switching the DAC to 8-bit is for <= 8 bpp only
movw %fs:(PARAM_LFB_DEPTH), %ax
cmpw $8, %ax
diff --git a/arch/i386/crypto/aes-i586-asm.S b/arch/i386/crypto/aes-i586-asm.S
index 7b73c67cb4e..911b15377f2 100644
--- a/arch/i386/crypto/aes-i586-asm.S
+++ b/arch/i386/crypto/aes-i586-asm.S
@@ -255,18 +255,17 @@ aes_enc_blk:
xor 8(%ebp),%r4
xor 12(%ebp),%r5
- sub $8,%esp // space for register saves on stack
- add $16,%ebp // increment to next round key
- sub $10,%r3
- je 4f // 10 rounds for 128-bit key
- add $32,%ebp
- sub $2,%r3
- je 3f // 12 rounds for 128-bit key
- add $32,%ebp
-
-2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 128-bit key
+ sub $8,%esp // space for register saves on stack
+ add $16,%ebp // increment to next round key
+ cmp $12,%r3
+ jb 4f // 10 rounds for 128-bit key
+ lea 32(%ebp),%ebp
+ je 3f // 12 rounds for 192-bit key
+ lea 32(%ebp),%ebp
+
+2: fwd_rnd1( -64(%ebp) ,ft_tab) // 14 rounds for 256-bit key
fwd_rnd2( -48(%ebp) ,ft_tab)
-3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 128-bit key
+3: fwd_rnd1( -32(%ebp) ,ft_tab) // 12 rounds for 192-bit key
fwd_rnd2( -16(%ebp) ,ft_tab)
4: fwd_rnd1( (%ebp) ,ft_tab) // 10 rounds for 128-bit key
fwd_rnd2( +16(%ebp) ,ft_tab)
@@ -334,18 +333,17 @@ aes_dec_blk:
xor 8(%ebp),%r4
xor 12(%ebp),%r5
- sub $8,%esp // space for register saves on stack
- sub $16,%ebp // increment to next round key
- sub $10,%r3
- je 4f // 10 rounds for 128-bit key
- sub $32,%ebp
- sub $2,%r3
- je 3f // 12 rounds for 128-bit key
- sub $32,%ebp
+ sub $8,%esp // space for register saves on stack
+ sub $16,%ebp // increment to next round key
+ cmp $12,%r3
+ jb 4f // 10 rounds for 128-bit key
+ lea -32(%ebp),%ebp
+ je 3f // 12 rounds for 192-bit key
+ lea -32(%ebp),%ebp
-2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 128-bit key
+2: inv_rnd1( +64(%ebp), it_tab) // 14 rounds for 256-bit key
inv_rnd2( +48(%ebp), it_tab)
-3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 128-bit key
+3: inv_rnd1( +32(%ebp), it_tab) // 12 rounds for 192-bit key
inv_rnd2( +16(%ebp), it_tab)
4: inv_rnd1( (%ebp), it_tab) // 10 rounds for 128-bit key
inv_rnd2( -16(%ebp), it_tab)
diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c
index 88ee85c3b43..a50397b1d5c 100644
--- a/arch/i386/crypto/aes.c
+++ b/arch/i386/crypto/aes.c
@@ -36,6 +36,8 @@
* Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
*
*/
+
+#include <asm/byteorder.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -59,7 +61,6 @@ struct aes_ctx {
};
#define WPOLY 0x011b
-#define u32_in(x) le32_to_cpup((const __le32 *)(x))
#define bytes2word(b0, b1, b2, b3) \
(((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0))
@@ -93,7 +94,6 @@ static u32 rcon_tab[RC_LENGTH];
u32 ft_tab[4][256];
u32 fl_tab[4][256];
-static u32 ls_tab[4][256];
static u32 im_tab[4][256];
u32 il_tab[4][256];
u32 it_tab[4][256];
@@ -144,15 +144,6 @@ static void gen_tabs(void)
fl_tab[2][i] = upr(w, 2);
fl_tab[3][i] = upr(w, 3);
- /*
- * table for key schedule if fl_tab above is
- * not of the required form
- */
- ls_tab[0][i] = w;
- ls_tab[1][i] = upr(w, 1);
- ls_tab[2][i] = upr(w, 2);
- ls_tab[3][i] = upr(w, 3);
-
b = fi(inv_affine((u8)i));
w = bytes2word(fe(b), f9(b), fd(b), fb(b));
@@ -393,13 +384,14 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
int i;
u32 ss[8];
struct aes_ctx *ctx = ctx_arg;
+ const __le32 *key = (const __le32 *)in_key;
/* encryption schedule */
- ctx->ekey[0] = ss[0] = u32_in(in_key);
- ctx->ekey[1] = ss[1] = u32_in(in_key + 4);
- ctx->ekey[2] = ss[2] = u32_in(in_key + 8);
- ctx->ekey[3] = ss[3] = u32_in(in_key + 12);
+ ctx->ekey[0] = ss[0] = le32_to_cpu(key[0]);
+ ctx->ekey[1] = ss[1] = le32_to_cpu(key[1]);
+ ctx->ekey[2] = ss[2] = le32_to_cpu(key[2]);
+ ctx->ekey[3] = ss[3] = le32_to_cpu(key[3]);
switch(key_len) {
case 16:
@@ -410,8 +402,8 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
break;
case 24:
- ctx->ekey[4] = ss[4] = u32_in(in_key + 16);
- ctx->ekey[5] = ss[5] = u32_in(in_key + 20);
+ ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
+ ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
for (i = 0; i < 7; i++)
ke6(ctx->ekey, i);
kel6(ctx->ekey, 7);
@@ -419,10 +411,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
break;
case 32:
- ctx->ekey[4] = ss[4] = u32_in(in_key + 16);
- ctx->ekey[5] = ss[5] = u32_in(in_key + 20);
- ctx->ekey[6] = ss[6] = u32_in(in_key + 24);
- ctx->ekey[7] = ss[7] = u32_in(in_key + 28);
+ ctx->ekey[4] = ss[4] = le32_to_cpu(key[4]);
+ ctx->ekey[5] = ss[5] = le32_to_cpu(key[5]);
+ ctx->ekey[6] = ss[6] = le32_to_cpu(key[6]);
+ ctx->ekey[7] = ss[7] = le32_to_cpu(key[7]);
for (i = 0; i < 6; i++)
ke8(ctx->ekey, i);
kel8(ctx->ekey, 6);
@@ -436,10 +428,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
/* decryption schedule */
- ctx->dkey[0] = ss[0] = u32_in(in_key);
- ctx->dkey[1] = ss[1] = u32_in(in_key + 4);
- ctx->dkey[2] = ss[2] = u32_in(in_key + 8);
- ctx->dkey[3] = ss[3] = u32_in(in_key + 12);
+ ctx->dkey[0] = ss[0] = le32_to_cpu(key[0]);
+ ctx->dkey[1] = ss[1] = le32_to_cpu(key[1]);
+ ctx->dkey[2] = ss[2] = le32_to_cpu(key[2]);
+ ctx->dkey[3] = ss[3] = le32_to_cpu(key[3]);
switch (key_len) {
case 16:
@@ -450,8 +442,8 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
break;
case 24:
- ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16));
- ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20));
+ ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
+ ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
kdf6(ctx->dkey, 0);
for (i = 1; i < 7; i++)
kd6(ctx->dkey, i);
@@ -459,10 +451,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
break;
case 32:
- ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16));
- ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20));
- ctx->dkey[6] = ff(ss[6] = u32_in(in_key + 24));
- ctx->dkey[7] = ff(ss[7] = u32_in(in_key + 28));
+ ctx->dkey[4] = ff(ss[4] = le32_to_cpu(key[4]));
+ ctx->dkey[5] = ff(ss[5] = le32_to_cpu(key[5]));
+ ctx->dkey[6] = ff(ss[6] = le32_to_cpu(key[6]));
+ ctx->dkey[7] = ff(ss[7] = le32_to_cpu(key[7]));
kdf8(ctx->dkey, 0);
for (i = 1; i < 6; i++)
kd8(ctx->dkey, i);
@@ -484,6 +476,8 @@ static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src)
static struct crypto_alg aes_alg = {
.cra_name = "aes",
+ .cra_driver_name = "aes-i586",
+ .cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index f10de0f2c5e..60c3f76dfca 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -4,10 +4,10 @@
extra-y := head.o init_task.o vmlinux.lds
-obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o vm86.o \
+obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
- doublefault.o quirks.o i8237.o
+ quirks.o i8237.o
obj-y += cpu/
obj-y += timers/
@@ -25,6 +25,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_X86_NUMAQ) += numaq.o
obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o
obj-$(CONFIG_KPROBES) += kprobes.o
@@ -33,6 +34,8 @@ obj-y += sysenter.o vsyscall.o
obj-$(CONFIG_ACPI_SRAT) += srat.o
obj-$(CONFIG_HPET_TIMER) += time_hpet.o
obj-$(CONFIG_EFI) += efi.o efi_stub.o
+obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
+obj-$(CONFIG_VM86) += vm86.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
EXTRA_AFLAGS := -traditional
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 447fa9e33ff..2111529dea7 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -108,7 +108,7 @@ char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
if (!phys_addr || !size)
return NULL;
- if (phys_addr < (end_pfn_map << PAGE_SHIFT))
+ if (phys_addr+size <= (end_pfn_map << PAGE_SHIFT) + PAGE_SIZE)
return __va(phys_addr);
return NULL;
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 496a2c9909f..acd3f1e34ca 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -26,6 +26,7 @@
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
#include <linux/cpu.h>
+#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/smp.h>
@@ -37,10 +38,17 @@
#include <asm/i8253.h>
#include <mach_apic.h>
+#include <mach_ipi.h>
#include "io_ports.h"
/*
+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
+ * IPIs in place of local APIC timers
+ */
+static cpumask_t timer_bcast_ipi;
+
+/*
* Knob to control our willingness to enable the local APIC.
*/
int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
@@ -92,10 +100,6 @@ void __init apic_intr_init(void)
/* Using APIC to generate smp_local_timer_interrupt? */
int using_apic_timer = 0;
-static DEFINE_PER_CPU(int, prof_multiplier) = 1;
-static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
-static DEFINE_PER_CPU(int, prof_counter) = 1;
-
static int enabled_via_apicbase;
void enable_NMI_through_LVT0 (void * dummy)
@@ -721,7 +725,7 @@ static int __init apic_set_verbosity(char *str)
apic_verbosity = APIC_VERBOSE;
else
printk(KERN_WARNING "APIC Verbosity level %s not recognised"
- " use apic=verbose or apic=debug", str);
+ " use apic=verbose or apic=debug\n", str);
return 0;
}
@@ -935,11 +939,16 @@ void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;
static void __setup_APIC_LVTT(unsigned int clocks)
{
unsigned int lvtt_value, tmp_value, ver;
+ int cpu = smp_processor_id();
ver = GET_APIC_VERSION(apic_read(APIC_LVR));
lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
if (!APIC_INTEGRATED(ver))
lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
+
+ if (cpu_isset(cpu, timer_bcast_ipi))
+ lvtt_value |= APIC_LVT_MASKED;
+
apic_write_around(APIC_LVTT, lvtt_value);
/*
@@ -1072,7 +1081,7 @@ void __devinit setup_secondary_APIC_clock(void)
setup_APIC_timer(calibration_result);
}
-void __devinit disable_APIC_timer(void)
+void disable_APIC_timer(void)
{
if (using_apic_timer) {
unsigned long v;
@@ -1084,7 +1093,10 @@ void __devinit disable_APIC_timer(void)
void enable_APIC_timer(void)
{
- if (using_apic_timer) {
+ int cpu = smp_processor_id();
+
+ if (using_apic_timer &&
+ !cpu_isset(cpu, timer_bcast_ipi)) {
unsigned long v;
v = apic_read(APIC_LVTT);
@@ -1092,33 +1104,31 @@ void enable_APIC_timer(void)
}
}
-/*
- * the frequency of the profiling timer can be changed
- * by writing a multiplier value into /proc/profile.
- */
-int setup_profiling_timer(unsigned int multiplier)
+void switch_APIC_timer_to_ipi(void *cpumask)
{
- int i;
+ cpumask_t mask = *(cpumask_t *)cpumask;
+ int cpu = smp_processor_id();
- /*
- * Sanity check. [at least 500 APIC cycles should be
- * between APIC interrupts as a rule of thumb, to avoid
- * irqs flooding us]
- */
- if ( (!multiplier) || (calibration_result/multiplier < 500))
- return -EINVAL;
-
- /*
- * Set the new multiplier for each CPU. CPUs don't start using the
- * new values until the next timer interrupt in which they do process
- * accounting. At that time they also adjust their APIC timers
- * accordingly.
- */
- for (i = 0; i < NR_CPUS; ++i)
- per_cpu(prof_multiplier, i) = multiplier;
+ if (cpu_isset(cpu, mask) &&
+ !cpu_isset(cpu, timer_bcast_ipi)) {
+ disable_APIC_timer();
+ cpu_set(cpu, timer_bcast_ipi);
+ }
+}
+EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
- return 0;
+void switch_ipi_to_APIC_timer(void *cpumask)
+{
+ cpumask_t mask = *(cpumask_t *)cpumask;
+ int cpu = smp_processor_id();
+
+ if (cpu_isset(cpu, mask) &&
+ cpu_isset(cpu, timer_bcast_ipi)) {
+ cpu_clear(cpu, timer_bcast_ipi);
+ enable_APIC_timer();
+ }
}
+EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
#undef APIC_DIVISOR
@@ -1134,32 +1144,10 @@ int setup_profiling_timer(unsigned int multiplier)
inline void smp_local_timer_interrupt(struct pt_regs * regs)
{
- int cpu = smp_processor_id();
-
profile_tick(CPU_PROFILING, regs);
- if (--per_cpu(prof_counter, cpu) <= 0) {
- /*
- * The multiplier may have changed since the last time we got
- * to this point as a result of the user writing to
- * /proc/profile. In this case we need to adjust the APIC
- * timer accordingly.
- *
- * Interrupts are already masked off at this point.
- */
- per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
- if (per_cpu(prof_counter, cpu) !=
- per_cpu(prof_old_multiplier, cpu)) {
- __setup_APIC_LVTT(
- calibration_result/
- per_cpu(prof_counter, cpu));
- per_cpu(prof_old_multiplier, cpu) =
- per_cpu(prof_counter, cpu);
- }
-
#ifdef CONFIG_SMP
- update_process_times(user_mode_vm(regs));
+ update_process_times(user_mode_vm(regs));
#endif
- }
/*
* We take the 'long' return path, and there every subsystem
@@ -1206,6 +1194,43 @@ fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
irq_exit();
}
+#ifndef CONFIG_SMP
+static void up_apic_timer_interrupt_call(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ /*
+ * the NMI deadlock-detector uses this.
+ */
+ per_cpu(irq_stat, cpu).apic_timer_irqs++;
+
+ smp_local_timer_interrupt(regs);
+}
+#endif
+
+void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
+{
+ cpumask_t mask;
+
+ cpus_and(mask, cpu_online_map, timer_bcast_ipi);
+ if (!cpus_empty(mask)) {
+#ifdef CONFIG_SMP
+ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+#else
+ /*
+ * We can directly call the apic timer interrupt handler
+ * in UP case. Minus all irq related functions
+ */
+ up_apic_timer_interrupt_call(regs);
+#endif
+ }
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
/*
* This interrupt should _never_ happen with our APIC/SMP architecture
*/
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 1e60acbed3c..05312a8abb8 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -219,6 +219,7 @@
#include <linux/sched.h>
#include <linux/pm.h>
#include <linux/pm_legacy.h>
+#include <linux/capability.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/smp.h>
@@ -303,17 +304,6 @@ extern int (*console_blank_hook)(int);
#include "apm.h"
/*
- * Define to make all _set_limit calls use 64k limits. The APM 1.1 BIOS is
- * supposed to provide limit information that it recognizes. Many machines
- * do this correctly, but many others do not restrict themselves to their
- * claimed limit. When this happens, they will cause a segmentation
- * violation in the kernel at boot time. Most BIOS's, however, will
- * respect a 64k limit, so we use that. If you want to be pedantic and
- * hold your BIOS to its claims, then undefine this.
- */
-#define APM_RELAX_SEGMENTS
-
-/*
* Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend.
* This patched by Chad Miller <cmiller@surfsouth.com>, original code by
* David Chen <chen@ctpa04.mit.edu>
@@ -1075,22 +1065,23 @@ static int apm_engage_power_management(u_short device, int enable)
static int apm_console_blank(int blank)
{
- int error;
- u_short state;
+ int error, i;
+ u_short state;
+ static const u_short dev[3] = { 0x100, 0x1FF, 0x101 };
state = blank ? APM_STATE_STANDBY : APM_STATE_READY;
- /* Blank the first display device */
- error = set_power_state(0x100, state);
- if ((error != APM_SUCCESS) && (error != APM_NO_ERROR)) {
- /* try to blank them all instead */
- error = set_power_state(0x1ff, state);
- if ((error != APM_SUCCESS) && (error != APM_NO_ERROR))
- /* try to blank device one instead */
- error = set_power_state(0x101, state);
+
+ for (i = 0; i < ARRAY_SIZE(dev); i++) {
+ error = set_power_state(dev[i], state);
+
+ if ((error == APM_SUCCESS) || (error == APM_NO_ERROR))
+ return 1;
+
+ if (error == APM_NOT_ENGAGED)
+ break;
}
- if ((error == APM_SUCCESS) || (error == APM_NO_ERROR))
- return 1;
- if (error == APM_NOT_ENGAGED) {
+
+ if (error == APM_NOT_ENGAGED && state != APM_STATE_READY) {
static int tried;
int eng_error;
if (tried++ == 0) {
@@ -2233,8 +2224,8 @@ static struct dmi_system_id __initdata apm_dmi_table[] = {
static int __init apm_init(void)
{
struct proc_dir_entry *apm_proc;
+ struct desc_struct *gdt;
int ret;
- int i;
dmi_check_system(apm_dmi_table);
@@ -2301,7 +2292,9 @@ static int __init apm_init(void)
apm_info.disabled = 1;
return -ENODEV;
}
+#ifdef CONFIG_PM_LEGACY
pm_active = 1;
+#endif
/*
* Set up a segment that references the real mode segment 0x40
@@ -2312,45 +2305,30 @@ static int __init apm_init(void)
set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
+ /*
+ * Set up the long jump entry point to the APM BIOS, which is called
+ * from inline assembly.
+ */
apm_bios_entry.offset = apm_info.bios.offset;
apm_bios_entry.segment = APM_CS;
- for (i = 0; i < NR_CPUS; i++) {
- struct desc_struct *gdt = get_cpu_gdt_table(i);
- set_base(gdt[APM_CS >> 3],
- __va((unsigned long)apm_info.bios.cseg << 4));
- set_base(gdt[APM_CS_16 >> 3],
- __va((unsigned long)apm_info.bios.cseg_16 << 4));
- set_base(gdt[APM_DS >> 3],
- __va((unsigned long)apm_info.bios.dseg << 4));
-#ifndef APM_RELAX_SEGMENTS
- if (apm_info.bios.version == 0x100) {
-#endif
- /* For ASUS motherboard, Award BIOS rev 110 (and others?) */
- _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1);
- /* For some unknown machine. */
- _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1);
- /* For the DEC Hinote Ultra CT475 (and others?) */
- _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1);
-#ifndef APM_RELAX_SEGMENTS
- } else {
- _set_limit((char *)&gdt[APM_CS >> 3],
- (apm_info.bios.cseg_len - 1) & 0xffff);
- _set_limit((char *)&gdt[APM_CS_16 >> 3],
- (apm_info.bios.cseg_16_len - 1) & 0xffff);
- _set_limit((char *)&gdt[APM_DS >> 3],
- (apm_info.bios.dseg_len - 1) & 0xffff);
- /* workaround for broken BIOSes */
- if (apm_info.bios.cseg_len <= apm_info.bios.offset)
- _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1);
- if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */
- /* for the BIOS that assumes granularity = 1 */
- gdt[APM_DS >> 3].b |= 0x800000;
- printk(KERN_NOTICE "apm: we set the granularity of dseg.\n");
- }
- }
-#endif
- }
+ /*
+ * The APM 1.1 BIOS is supposed to provide limit information that it
+ * recognizes. Many machines do this correctly, but many others do
+ * not restrict themselves to their claimed limit. When this happens,
+ * they will cause a segmentation violation in the kernel at boot time.
+ * Most BIOS's, however, will respect a 64k limit, so we use that.
+ *
+ * Note we only set APM segments on CPU zero, since we pin the APM
+ * code to that CPU.
+ */
+ gdt = get_cpu_gdt_table(0);
+ set_base(gdt[APM_CS >> 3],
+ __va((unsigned long)apm_info.bios.cseg << 4));
+ set_base(gdt[APM_CS_16 >> 3],
+ __va((unsigned long)apm_info.bios.cseg_16 << 4));
+ set_base(gdt[APM_DS >> 3],
+ __va((unsigned long)apm_info.bios.dseg << 4));
apm_proc = create_proc_info_entry("apm", 0, NULL, apm_get_info);
if (apm_proc)
@@ -2407,7 +2385,9 @@ static void __exit apm_exit(void)
exit_kapmd = 1;
while (kapmd_running)
schedule();
+#ifdef CONFIG_PM_LEGACY
pm_active = 0;
+#endif
}
module_init(apm_init);
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index e344ef88cfc..333578a4e91 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -161,8 +161,13 @@ static void __init init_amd(struct cpuinfo_x86 *c)
set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
break;
}
- break;
+ if (c->x86_model == 10) {
+ /* AMD Geode LX is model 10 */
+ /* placeholder for any needed mods */
+ break;
+ }
+ break;
case 6: /* An Athlon/Duron */
/* Bit 15 of Athlon specific MSR 15, needs to be 0
@@ -211,6 +216,12 @@ static void __init init_amd(struct cpuinfo_x86 *c)
c->x86_max_cores = 1;
}
+ if (cpuid_eax(0x80000000) >= 0x80000007) {
+ c->x86_power = cpuid_edx(0x80000007);
+ if (c->x86_power & (1<<8))
+ set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
+ }
+
#ifdef CONFIG_X86_HT
/*
* On a AMD dual core setup the lower bits of the APIC id
@@ -228,6 +239,7 @@ static void __init init_amd(struct cpuinfo_x86 *c)
cpu, c->x86_max_cores, cpu_core_id[cpu]);
}
#endif
+
}
static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff --git a/arch/i386/kernel/cpu/changelog b/arch/i386/kernel/cpu/changelog
deleted file mode 100644
index cef76b80a71..00000000000
--- a/arch/i386/kernel/cpu/changelog
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Enhanced CPU type detection by Mike Jagdis, Patrick St. Jean
- * and Martin Mares, November 1997.
- *
- * Force Cyrix 6x86(MX) and M II processors to report MTRR capability
- * and Cyrix "coma bug" recognition by
- * Zoltán Böszörményi <zboszor@mail.externet.hu> February 1999.
- *
- * Force Centaur C6 processors to report MTRR capability.
- * Bart Hartgers <bart@etpmod.phys.tue.nl>, May 1999.
- *
- * Intel Mobile Pentium II detection fix. Sean Gilley, June 1999.
- *
- * IDT Winchip tweaks, misc clean ups.
- * Dave Jones <davej@suse.de>, August 1999
- *
- * Better detection of Centaur/IDT WinChip models.
- * Bart Hartgers <bart@etpmod.phys.tue.nl>, August 1999.
- *
- * Cleaned up cache-detection code
- * Dave Jones <davej@suse.de>, October 1999
- *
- * Added proper L2 cache detection for Coppermine
- * Dragan Stancevic <visitor@valinux.com>, October 1999
- *
- * Added the original array for capability flags but forgot to credit
- * myself :) (~1998) Fixed/cleaned up some cpu_model_info and other stuff
- * Jauder Ho <jauderho@carumba.com>, January 2000
- *
- * Detection for Celeron coppermine, identify_cpu() overhauled,
- * and a few other clean ups.
- * Dave Jones <davej@suse.de>, April 2000
- *
- * Pentium III FXSR, SSE support
- * General FPU state handling cleanups
- * Gareth Hughes <gareth@valinux.com>, May 2000
- *
- * Added proper Cascades CPU and L2 cache detection for Cascades
- * and 8-way type cache happy bunch from Intel:^)
- * Dragan Stancevic <visitor@valinux.com>, May 2000
- *
- * Forward port AMD Duron errata T13 from 2.2.17pre
- * Dave Jones <davej@suse.de>, August 2000
- *
- * Forward port lots of fixes/improvements from 2.2.18pre
- * Cyrix III, Pentium IV support.
- * Dave Jones <davej@suse.de>, October 2000
- *
- * Massive cleanup of CPU detection and bug handling;
- * Transmeta CPU detection,
- * H. Peter Anvin <hpa@zytor.com>, November 2000
- *
- * VIA C3 Support.
- * Dave Jones <davej@suse.de>, March 2001
- *
- * AMD Athlon/Duron/Thunderbird bluesmoke support.
- * Dave Jones <davej@suse.de>, April 2001.
- *
- * CacheSize bug workaround updates for AMD, Intel & VIA Cyrix.
- * Dave Jones <davej@suse.de>, September, October 2001.
- *
- */
-
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 31e344b26ba..15aee26ec2b 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -18,9 +18,6 @@
#include "cpu.h"
-DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
-EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
-
DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
@@ -207,7 +204,10 @@ static int __devinit have_cpuid_p(void)
/* Do minimum CPU detection early.
Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
- The others are not touched to avoid unwanted side effects. */
+ The others are not touched to avoid unwanted side effects.
+
+ WARNING: this function is only called on the BP. Don't add code here
+ that is supposed to run on all CPUs. */
static void __init early_cpu_detect(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -239,12 +239,6 @@ static void __init early_cpu_detect(void)
if (cap0 & (1<<19))
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
}
-
- early_intel_workaround(c);
-
-#ifdef CONFIG_X86_HT
- phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
-#endif
}
void __devinit generic_identify(struct cpuinfo_x86 * c)
@@ -292,6 +286,12 @@ void __devinit generic_identify(struct cpuinfo_x86 * c)
get_model_name(c); /* Default name */
}
}
+
+ early_intel_workaround(c);
+
+#ifdef CONFIG_X86_HT
+ phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
+#endif
}
static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
@@ -599,11 +599,6 @@ void __devinit cpu_init(void)
load_idt(&idt_descr);
/*
- * Delete NT
- */
- __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
-
- /*
* Set up and load the per-CPU TSS and LDT
*/
atomic_inc(&init_mm.mm_count);
@@ -617,8 +612,10 @@ void __devinit cpu_init(void)
load_TR_desc();
load_LDT(&init_mm.context);
+#ifdef CONFIG_DOUBLEFAULT
/* Set up doublefault TSS pointer in the GDT */
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
+#endif
/* Clear %fs and %gs. */
asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 871366b83b3..7975e79d5fa 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -40,8 +40,6 @@
#include <linux/acpi.h>
#include <acpi/processor.h>
-#include "speedstep-est-common.h"
-
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
@@ -367,6 +365,7 @@ acpi_cpufreq_cpu_init (
unsigned int cpu = policy->cpu;
struct cpufreq_acpi_io *data;
unsigned int result = 0;
+ struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
union acpi_object arg0 = {ACPI_TYPE_BUFFER};
u32 arg0_buf[3];
@@ -390,7 +389,7 @@ acpi_cpufreq_cpu_init (
if (result)
goto err_free;
- if (is_const_loops_cpu(cpu)) {
+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index edb9873e27e..9a826cde4fd 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -35,8 +35,6 @@
#include <asm/processor.h>
#include <asm/cpufeature.h>
-#include "speedstep-est-common.h"
-
#define PFX "speedstep-centrino: "
#define MAINTAINER "Jeremy Fitzhardinge <jeremy@goop.org>"
@@ -493,12 +491,13 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
unsigned l, h;
int ret;
int i;
+ struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
/* Only Intel makes Enhanced Speedstep-capable CPUs */
if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
- if (is_const_loops_cpu(policy->cpu)) {
+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h b/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h
deleted file mode 100644
index 5ce995c9d86..00000000000
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-est-common.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Routines common for drivers handling Enhanced Speedstep Technology
- * Copyright (C) 2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- *
- * Licensed under the terms of the GNU GPL License version 2 -- see
- * COPYING for details.
- */
-
-static inline int is_const_loops_cpu(unsigned int cpu)
-{
- struct cpuinfo_x86 *c = cpu_data + cpu;
-
- if (c->x86_vendor != X86_VENDOR_INTEL || !cpu_has(c, X86_FEATURE_EST))
- return 0;
-
- /*
- * on P-4s, the TSC runs with constant frequency independent of cpu freq
- * when we use EST
- */
- if (c->x86 == 0xf)
- return 1;
-
- return 0;
-}
-
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index ff87cc22b32..75015975d03 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -343,6 +343,31 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
}
/*
+ * Handle National Semiconductor branded processors
+ */
+static void __devinit init_nsc(struct cpuinfo_x86 *c)
+{
+ /* There may be GX1 processors in the wild that are branded
+ * NSC and not Cyrix.
+ *
+ * This function only handles the GX processor, and kicks every
+ * thing else to the Cyrix init function above - that should
+ * cover any processors that might have been branded differently
+ * after NSC aquired Cyrix.
+ *
+ * If this breaks your GX1 horribly, please e-mail
+ * info-linux@ldcmail.amd.com to tell us.
+ */
+
+ /* Handle the GX (Formally known as the GX2) */
+
+ if (c->x86 == 5 && c->x86_model == 5)
+ display_cacheinfo(c);
+ else
+ init_cyrix(c);
+}
+
+/*
* Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
* by the fact that they preserve the flags across the division of 5/2.
* PII and PPro exhibit this behavior too, but they have cpuid available.
@@ -422,7 +447,7 @@ int __init cyrix_init_cpu(void)
static struct cpu_dev nsc_cpu_dev __initdata = {
.c_vendor = "NSC",
.c_ident = { "Geode by NSC" },
- .c_init = init_cyrix,
+ .c_init = init_nsc,
.c_identify = generic_identify,
};
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 5e2da704f0f..8c0120186b9 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -183,10 +183,13 @@ static void __devinit init_intel(struct cpuinfo_x86 *c)
}
#endif
- if (c->x86 == 15)
+ if (c->x86 == 15)
set_bit(X86_FEATURE_P4, c->x86_capability);
if (c->x86 == 6)
set_bit(X86_FEATURE_P3, c->x86_capability);
+ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+ (c->x86 == 0x6 && c->x86_model >= 0x0e))
+ set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
}
diff --git a/arch/i386/kernel/cpu/mtrr/changelog b/arch/i386/kernel/cpu/mtrr/changelog
deleted file mode 100644
index af136853595..00000000000
--- a/arch/i386/kernel/cpu/mtrr/changelog
+++ /dev/null
@@ -1,229 +0,0 @@
- ChangeLog
-
- Prehistory Martin Tischhäuser <martin@ikcbarka.fzk.de>
- Initial register-setting code (from proform-1.0).
- 19971216 Richard Gooch <rgooch@atnf.csiro.au>
- Original version for /proc/mtrr interface, SMP-safe.
- v1.0
- 19971217 Richard Gooch <rgooch@atnf.csiro.au>
- Bug fix for ioctls()'s.
- Added sample code in Documentation/mtrr.txt
- v1.1
- 19971218 Richard Gooch <rgooch@atnf.csiro.au>
- Disallow overlapping regions.
- 19971219 Jens Maurer <jmaurer@menuett.rhein-main.de>
- Register-setting fixups.
- v1.2
- 19971222 Richard Gooch <rgooch@atnf.csiro.au>
- Fixups for kernel 2.1.75.
- v1.3
- 19971229 David Wragg <dpw@doc.ic.ac.uk>
- Register-setting fixups and conformity with Intel conventions.
- 19971229 Richard Gooch <rgooch@atnf.csiro.au>
- Cosmetic changes and wrote this ChangeLog ;-)
- 19980106 Richard Gooch <rgooch@atnf.csiro.au>
- Fixups for kernel 2.1.78.
- v1.4
- 19980119 David Wragg <dpw@doc.ic.ac.uk>
- Included passive-release enable code (elsewhere in PCI setup).
- v1.5
- 19980131 Richard Gooch <rgooch@atnf.csiro.au>
- Replaced global kernel lock with private spinlock.
- v1.6
- 19980201 Richard Gooch <rgooch@atnf.csiro.au>
- Added wait for other CPUs to complete changes.
- v1.7
- 19980202 Richard Gooch <rgooch@atnf.csiro.au>
- Bug fix in definition of <set_mtrr> for UP.
- v1.8
- 19980319 Richard Gooch <rgooch@atnf.csiro.au>
- Fixups for kernel 2.1.90.
- 19980323 Richard Gooch <rgooch@atnf.csiro.au>
- Move SMP BIOS fixup before secondary CPUs call <calibrate_delay>
- v1.9
- 19980325 Richard Gooch <rgooch@atnf.csiro.au>
- Fixed test for overlapping regions: confused by adjacent regions
- 19980326 Richard Gooch <rgooch@atnf.csiro.au>
- Added wbinvd in <set_mtrr_prepare>.
- 19980401 Richard Gooch <rgooch@atnf.csiro.au>
- Bug fix for non-SMP compilation.
- 19980418 David Wragg <dpw@doc.ic.ac.uk>
- Fixed-MTRR synchronisation for SMP and use atomic operations
- instead of spinlocks.
- 19980418 Richard Gooch <rgooch@atnf.csiro.au>
- Differentiate different MTRR register classes for BIOS fixup.
- v1.10
- 19980419 David Wragg <dpw@doc.ic.ac.uk>
- Bug fix in variable MTRR synchronisation.
- v1.11
- 19980419 Richard Gooch <rgooch@atnf.csiro.au>
- Fixups for kernel 2.1.97.
- v1.12
- 19980421 Richard Gooch <rgooch@atnf.csiro.au>
- Safer synchronisation across CPUs when changing MTRRs.
- v1.13
- 19980423 Richard Gooch <rgooch@atnf.csiro.au>
- Bugfix for SMP systems without MTRR support.
- v1.14
- 19980427 Richard Gooch <rgooch@atnf.csiro.au>
- Trap calls to <mtrr_add> and <mtrr_del> on non-MTRR machines.
- v1.15
- 19980427 Richard Gooch <rgooch@atnf.csiro.au>
- Use atomic bitops for setting SMP change mask.
- v1.16
- 19980428 Richard Gooch <rgooch@atnf.csiro.au>
- Removed spurious diagnostic message.
- v1.17
- 19980429 Richard Gooch <rgooch@atnf.csiro.au>
- Moved register-setting macros into this file.
- Moved setup code from init/main.c to i386-specific areas.
- v1.18
- 19980502 Richard Gooch <rgooch@atnf.csiro.au>
- Moved MTRR detection outside conditionals in <mtrr_init>.
- v1.19
- 19980502 Richard Gooch <rgooch@atnf.csiro.au>
- Documentation improvement: mention Pentium II and AGP.
- v1.20
- 19980521 Richard Gooch <rgooch@atnf.csiro.au>
- Only manipulate interrupt enable flag on local CPU.
- Allow enclosed uncachable regions.
- v1.21
- 19980611 Richard Gooch <rgooch@atnf.csiro.au>
- Always define <main_lock>.
- v1.22
- 19980901 Richard Gooch <rgooch@atnf.csiro.au>
- Removed module support in order to tidy up code.
- Added sanity check for <mtrr_add>/<mtrr_del> before <mtrr_init>.
- Created addition queue for prior to SMP commence.
- v1.23
- 19980902 Richard Gooch <rgooch@atnf.csiro.au>
- Ported patch to kernel 2.1.120-pre3.
- v1.24
- 19980910 Richard Gooch <rgooch@atnf.csiro.au>
- Removed sanity checks and addition queue: Linus prefers an OOPS.
- v1.25
- 19981001 Richard Gooch <rgooch@atnf.csiro.au>
- Fixed harmless compiler warning in include/asm-i386/mtrr.h
- Fixed version numbering and history for v1.23 -> v1.24.
- v1.26
- 19990118 Richard Gooch <rgooch@atnf.csiro.au>
- Added devfs support.
- v1.27
- 19990123 Richard Gooch <rgooch@atnf.csiro.au>
- Changed locking to spin with reschedule.
- Made use of new <smp_call_function>.
- v1.28
- 19990201 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Extended the driver to be able to use Cyrix style ARRs.
- 19990204 Richard Gooch <rgooch@atnf.csiro.au>
- Restructured Cyrix support.
- v1.29
- 19990204 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Refined ARR support: enable MAPEN in set_mtrr_prepare()
- and disable MAPEN in set_mtrr_done().
- 19990205 Richard Gooch <rgooch@atnf.csiro.au>
- Minor cleanups.
- v1.30
- 19990208 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Protect plain 6x86s (and other processors without the
- Page Global Enable feature) against accessing CR4 in
- set_mtrr_prepare() and set_mtrr_done().
- 19990210 Richard Gooch <rgooch@atnf.csiro.au>
- Turned <set_mtrr_up> and <get_mtrr> into function pointers.
- v1.31
- 19990212 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Major rewrite of cyrix_arr_init(): do not touch ARRs,
- leave them as the BIOS have set them up.
- Enable usage of all 8 ARRs.
- Avoid multiplications by 3 everywhere and other
- code clean ups/speed ups.
- 19990213 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Set up other Cyrix processors identical to the boot cpu.
- Since Cyrix don't support Intel APIC, this is l'art pour l'art.
- Weigh ARRs by size:
- If size <= 32M is given, set up ARR# we were given.
- If size > 32M is given, set up ARR7 only if it is free,
- fail otherwise.
- 19990214 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Also check for size >= 256K if we are to set up ARR7,
- mtrr_add() returns the value it gets from set_mtrr()
- 19990218 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Remove Cyrix "coma bug" workaround from here.
- Moved to linux/arch/i386/kernel/setup.c and
- linux/include/asm-i386/bugs.h
- 19990228 Richard Gooch <rgooch@atnf.csiro.au>
- Added MTRRIOC_KILL_ENTRY ioctl(2)
- Trap for counter underflow in <mtrr_file_del>.
- Trap for 4 MiB aligned regions for PPro, stepping <= 7.
- 19990301 Richard Gooch <rgooch@atnf.csiro.au>
- Created <get_free_region> hook.
- 19990305 Richard Gooch <rgooch@atnf.csiro.au>
- Temporarily disable AMD support now MTRR capability flag is set.
- v1.32
- 19990308 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Adjust my changes (19990212-19990218) to Richard Gooch's
- latest changes. (19990228-19990305)
- v1.33
- 19990309 Richard Gooch <rgooch@atnf.csiro.au>
- Fixed typo in <printk> message.
- 19990310 Richard Gooch <rgooch@atnf.csiro.au>
- Support K6-II/III based on Alan Cox's <alan@redhat.com> patches.
- v1.34
- 19990511 Bart Hartgers <bart@etpmod.phys.tue.nl>
- Support Centaur C6 MCR's.
- 19990512 Richard Gooch <rgooch@atnf.csiro.au>
- Minor cleanups.
- v1.35
- 19990707 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Check whether ARR3 is protected in cyrix_get_free_region()
- and mtrr_del(). The code won't attempt to delete or change it
- from now on if the BIOS protected ARR3. It silently skips ARR3
- in cyrix_get_free_region() or returns with an error code from
- mtrr_del().
- 19990711 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Reset some bits in the CCRs in cyrix_arr_init() to disable SMM
- if ARR3 isn't protected. This is needed because if SMM is active
- and ARR3 isn't protected then deleting and setting ARR3 again
- may lock up the processor. With SMM entirely disabled, it does
- not happen.
- 19990812 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Rearrange switch() statements so the driver accomodates to
- the fact that the AMD Athlon handles its MTRRs the same way
- as Intel does.
- 19990814 Zoltán Böszörményi <zboszor@mail.externet.hu>
- Double check for Intel in mtrr_add()'s big switch() because
- that revision check is only valid for Intel CPUs.
- 19990819 Alan Cox <alan@redhat.com>
- Tested Zoltan's changes on a pre production Athlon - 100%
- success.
- 19991008 Manfred Spraul <manfreds@colorfullife.com>
- replaced spin_lock_reschedule() with a normal semaphore.
- v1.36
- 20000221 Richard Gooch <rgooch@atnf.csiro.au>
- Compile fix if procfs and devfs not enabled.
- Formatting changes.
- v1.37
- 20001109 H. Peter Anvin <hpa@zytor.com>
- Use the new centralized CPU feature detects.
-
- v1.38
- 20010309 Dave Jones <davej@suse.de>
- Add support for Cyrix III.
-
- v1.39
- 20010312 Dave Jones <davej@suse.de>
- Ugh, I broke AMD support.
- Reworked fix by Troels Walsted Hansen <troels@thule.no>
-
- v1.40
- 20010327 Dave Jones <davej@suse.de>
- Adapted Cyrix III support to include VIA C3.
-
- v2.0
- 20020306 Patrick Mochel <mochel@osdl.org>
- Split mtrr.c -> mtrr/*.c
- Converted to Linux Kernel Coding Style
- Fixed several minor nits in form
- Moved some SMP-only functions out, so they can be used
- for power management in the future.
- TODO: Fix user interface cruft.
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c
index cf39e205d33..5ac051bb9d5 100644
--- a/arch/i386/kernel/cpu/mtrr/if.c
+++ b/arch/i386/kernel/cpu/mtrr/if.c
@@ -1,5 +1,6 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
+#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/seq_file.h>
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 6d91b274589..89a85af33d2 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -29,7 +29,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL,
- NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
+ NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
/* Transmeta-defined */
"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
@@ -40,7 +40,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
/* Other (Linux-defined) */
"cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "constant_tsc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -57,11 +57,21 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* AMD-defined (#2) */
- "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
+ "lahf_lm", "cmp_legacy", "svm", NULL, "cr8legacy", NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
+ static char *x86_power_flags[] = {
+ "ts", /* temperature sensor */
+ "fid", /* frequency id control */
+ "vid", /* voltage id control */
+ "ttp", /* thermal trip */
+ "tm",
+ "stc",
+ NULL,
+ /* nothing */ /* constant_tsc - moved to flags */
+ };
struct cpuinfo_x86 *c = v;
int i, n = c - cpu_data;
int fpu_exception;
@@ -131,6 +141,17 @@ static int show_cpuinfo(struct seq_file *m, void *v)
x86_cap_flags[i] != NULL )
seq_printf(m, " %s", x86_cap_flags[i]);
+ for (i = 0; i < 32; i++)
+ if (c->x86_power & (1 << i)) {
+ if (i < ARRAY_SIZE(x86_power_flags) &&
+ x86_power_flags[i])
+ seq_printf(m, "%s%s",
+ x86_power_flags[i][0]?" ":"",
+ x86_power_flags[i]);
+ else
+ seq_printf(m, " [%d]", i);
+ }
+
seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
c->loops_per_jiffy/(500000/HZ),
(c->loops_per_jiffy/(5000/HZ)) % 100);
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 13bae799e62..006141d1c12 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -117,14 +117,13 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
{
char __user *tmp = buf;
u32 data[4];
- size_t rv;
u32 reg = *ppos;
int cpu = iminor(file->f_dentry->d_inode);
if (count % 16)
return -EINVAL; /* Invalid chunk size */
- for (rv = 0; count; count -= 16) {
+ for (; count; count -= 16) {
do_cpuid(cpu, reg, data);
if (copy_to_user(tmp, &data, 16))
return -EFAULT;
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 0248e084017..d49dbe8dc96 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -25,7 +25,6 @@
#include <mach_ipi.h>
-note_buf_t crash_notes[NR_CPUS];
/* This keeps a track of which one is crashing cpu. */
static int crashing_cpu;
@@ -72,7 +71,9 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
* squirrelled away. ELF notes happen to provide
* all of that that no need to invent something new.
*/
- buf = &crash_notes[cpu][0];
+ buf = (u32*)per_cpu_ptr(crash_notes, cpu);
+ if (!buf)
+ return;
memset(&prstatus, 0, sizeof(prstatus));
prstatus.pr_pid = current->pid;
elf_core_copy_regs(&prstatus.pr_reg, regs);
@@ -81,51 +82,12 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
final_note(buf);
}
-static void crash_get_current_regs(struct pt_regs *regs)
-{
- __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
- __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
- __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
- __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
- __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
- __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
- __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
- __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
- __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
- __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
- __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
- __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
- __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
-
- regs->eip = (unsigned long)current_text_addr();
-}
-
-/* CPU does not save ss and esp on stack if execution is already
- * running in kernel mode at the time of NMI occurrence. This code
- * fixes it.
- */
-static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
-{
- memcpy(newregs, oldregs, sizeof(*newregs));
- newregs->esp = (unsigned long)&(oldregs->esp);
- __asm__ __volatile__("xorl %eax, %eax;");
- __asm__ __volatile__ ("movw %%ss, %%ax;" :"=a"(newregs->xss));
-}
-
-/* We may have saved_regs from where the error came from
- * or it is NULL if via a direct panic().
- */
-static void crash_save_self(struct pt_regs *saved_regs)
+static void crash_save_self(struct pt_regs *regs)
{
- struct pt_regs regs;
int cpu;
cpu = smp_processor_id();
- if (saved_regs)
- crash_setup_regs(&regs, saved_regs);
- else
- crash_get_current_regs(&regs);
- crash_save_this_cpu(&regs, cpu);
+ crash_save_this_cpu(regs, cpu);
}
#ifdef CONFIG_SMP
@@ -144,7 +106,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
local_irq_disable();
if (!user_mode(regs)) {
- crash_setup_regs(&fixed_regs, regs);
+ crash_fixup_ss_esp(&fixed_regs, regs);
regs = &fixed_regs;
}
crash_save_this_cpu(regs, cpu);
diff --git a/arch/i386/kernel/crash_dump.c b/arch/i386/kernel/crash_dump.c
new file mode 100644
index 00000000000..3f532df488b
--- /dev/null
+++ b/arch/i386/kernel/crash_dump.c
@@ -0,0 +1,74 @@
+/*
+ * kernel/crash_dump.c - Memory preserving reboot related code.
+ *
+ * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
+ * Copyright (C) IBM Corporation, 2004. All rights reserved
+ */
+
+#include <linux/errno.h>
+#include <linux/highmem.h>
+#include <linux/crash_dump.h>
+
+#include <asm/uaccess.h>
+
+static void *kdump_buf_page;
+
+/**
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+ * Copy a page from "oldmem". For this page, there is no pte mapped
+ * in the current kernel. We stitch up a pte, similar to kmap_atomic.
+ *
+ * Calling copy_to_user() in atomic context is not desirable. Hence first
+ * copying the data to a pre-allocated kernel page and then copying to user
+ * space in non-atomic context.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
+
+ if (!userbuf) {
+ memcpy(buf, (vaddr + offset), csize);
+ kunmap_atomic(vaddr, KM_PTE0);
+ } else {
+ if (!kdump_buf_page) {
+ printk(KERN_WARNING "Kdump: Kdump buffer page not"
+ " allocated\n");
+ return -EFAULT;
+ }
+ copy_page(kdump_buf_page, vaddr);
+ kunmap_atomic(vaddr, KM_PTE0);
+ if (copy_to_user(buf, (kdump_buf_page + offset), csize))
+ return -EFAULT;
+ }
+
+ return csize;
+}
+
+static int __init kdump_buf_page_init(void)
+{
+ int ret = 0;
+
+ kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!kdump_buf_page) {
+ printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
+ " page\n");
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+arch_initcall(kdump_buf_page_init);
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index 58516e2ac17..6a93d75db43 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -4,7 +4,7 @@
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/bootmem.h>
-
+#include <linux/slab.h>
static char * __init dmi_string(struct dmi_header *dm, u8 s)
{
@@ -19,7 +19,7 @@ static char * __init dmi_string(struct dmi_header *dm, u8 s)
}
if (*bp != 0) {
- str = alloc_bootmem(strlen(bp) + 1);
+ str = dmi_alloc(strlen(bp) + 1);
if (str != NULL)
strcpy(str, bp);
else
@@ -40,7 +40,7 @@ static int __init dmi_table(u32 base, int len, int num,
u8 *buf, *data;
int i = 0;
- buf = bt_ioremap(base, len);
+ buf = dmi_ioremap(base, len);
if (buf == NULL)
return -1;
@@ -65,7 +65,7 @@ static int __init dmi_table(u32 base, int len, int num,
data += 2;
i++;
}
- bt_iounmap(buf, len);
+ dmi_iounmap(buf, len);
return 0;
}
@@ -112,7 +112,7 @@ static void __init dmi_save_devices(struct dmi_header *dm)
if ((*d & 0x80) == 0)
continue;
- dev = alloc_bootmem(sizeof(*dev));
+ dev = dmi_alloc(sizeof(*dev));
if (!dev) {
printk(KERN_ERR "dmi_save_devices: out of memory.\n");
break;
@@ -131,7 +131,7 @@ static void __init dmi_save_ipmi_device(struct dmi_header *dm)
struct dmi_device *dev;
void * data;
- data = alloc_bootmem(dm->length);
+ data = dmi_alloc(dm->length);
if (data == NULL) {
printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
return;
@@ -139,7 +139,7 @@ static void __init dmi_save_ipmi_device(struct dmi_header *dm)
memcpy(data, dm, dm->length);
- dev = alloc_bootmem(sizeof(*dev));
+ dev = dmi_alloc(sizeof(*dev));
if (!dev) {
printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
return;
@@ -221,7 +221,7 @@ void __init dmi_scan_machine(void)
}
}
-out: printk(KERN_INFO "DMI not present.\n");
+out: printk(KERN_INFO "DMI not present or invalid.\n");
}
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index e50b9315524..4d704724b2f 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -323,6 +323,7 @@ work_notifysig: # deal with pending signals and
ALIGN
work_notifysig_v86:
+#ifdef CONFIG_VM86
pushl %ecx # save ti_flags for do_notify_resume
call save_v86_state # %eax contains pt_regs pointer
popl %ecx
@@ -330,6 +331,7 @@ work_notifysig_v86:
xorl %edx, %edx
call do_notify_resume
jmp resume_userspace
+#endif
# perform syscall exit tracing
ALIGN
@@ -657,6 +659,7 @@ ENTRY(spurious_interrupt_bug)
pushl $do_spurious_interrupt_bug
jmp error_code
+.section .rodata,"a"
#include "syscall_table.S"
syscall_table_size=(.-sys_call_table)
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index e437fb36749..5884469f6bf 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -504,19 +504,24 @@ ENTRY(cpu_gdt_table)
.quad 0x0000000000000000 /* 0x80 TSS descriptor */
.quad 0x0000000000000000 /* 0x88 LDT descriptor */
- /* Segments used for calling PnP BIOS */
- .quad 0x00c09a0000000000 /* 0x90 32-bit code */
- .quad 0x00809a0000000000 /* 0x98 16-bit code */
- .quad 0x0080920000000000 /* 0xa0 16-bit data */
- .quad 0x0080920000000000 /* 0xa8 16-bit data */
- .quad 0x0080920000000000 /* 0xb0 16-bit data */
+ /*
+ * Segments used for calling PnP BIOS have byte granularity.
+ * They code segments and data segments have fixed 64k limits,
+ * the transfer segment sizes are set at run time.
+ */
+ .quad 0x00409a000000ffff /* 0x90 32-bit code */
+ .quad 0x00009a000000ffff /* 0x98 16-bit code */
+ .quad 0x000092000000ffff /* 0xa0 16-bit data */
+ .quad 0x0000920000000000 /* 0xa8 16-bit data */
+ .quad 0x0000920000000000 /* 0xb0 16-bit data */
+
/*
* The APM segments have byte granularity and their bases
- * and limits are set at run time.
+ * are set at run time. All have 64k limits.
*/
- .quad 0x00409a0000000000 /* 0xb8 APM CS code */
- .quad 0x00009a0000000000 /* 0xc0 APM CS 16 code (16 bit) */
- .quad 0x0040920000000000 /* 0xc8 APM DS data */
+ .quad 0x00409a000000ffff /* 0xb8 APM CS code */
+ .quad 0x00009a000000ffff /* 0xc0 APM CS 16 code (16 bit) */
+ .quad 0x004092000000ffff /* 0xc8 APM DS data */
.quad 0x0000920000000000 /* 0xd0 - ESPFIX 16-bit SS */
.quad 0x0000000000000000 /* 0xd8 - unused */
@@ -525,3 +530,5 @@ ENTRY(cpu_gdt_table)
.quad 0x0000000000000000 /* 0xf0 - unused */
.quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
+ /* Be sure this is zeroed to avoid false validations in Xen */
+ .fill PAGE_SIZE_asm / 8 - GDT_ENTRIES,8,0
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index 180f070d03c..3999bec50c3 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -3,8 +3,7 @@
#include <asm/checksum.h>
#include <asm/desc.h>
-/* This is definitely a GPL-only symbol */
-EXPORT_SYMBOL_GPL(cpu_gdt_table);
+EXPORT_SYMBOL_GPL(cpu_gdt_descr);
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
diff --git a/arch/i386/kernel/init_task.c b/arch/i386/kernel/init_task.c
index 9caa8e8db80..cff95d10a4d 100644
--- a/arch/i386/kernel/init_task.c
+++ b/arch/i386/kernel/init_task.c
@@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task);
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's.
*/
-DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp = INIT_TSS;
+DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 22c8675c79f..f2dd218d88c 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -1649,7 +1649,7 @@ static void __init enable_IO_APIC(void)
for(apic = 0; apic < nr_ioapics; apic++) {
int pin;
/* See if any of the pins is in ExtINT mode */
- for(pin = 0; pin < nr_ioapic_registers[i]; pin++) {
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
struct IO_APIC_route_entry entry;
spin_lock_irqsave(&ioapic_lock, flags);
*(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
@@ -1722,8 +1722,8 @@ void disable_IO_APIC(void)
entry.dest_mode = 0; /* Physical */
entry.delivery_mode = dest_ExtINT; /* ExtInt */
entry.vector = 0;
- entry.dest.physical.physical_dest = 0;
-
+ entry.dest.physical.physical_dest =
+ GET_APIC_ID(apic_read(APIC_ID));
/*
* Add it to the IO-APIC irq-routing table:
diff --git a/arch/i386/kernel/ioport.c b/arch/i386/kernel/ioport.c
index b59a34dbe26..79026f026b8 100644
--- a/arch/i386/kernel/ioport.c
+++ b/arch/i386/kernel/ioport.c
@@ -7,6 +7,7 @@
#include <linux/sched.h>
#include <linux/kernel.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/ioport.h>
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 1a201a93286..f3a9c78c4a2 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -19,7 +19,7 @@
#include <linux/cpu.h>
#include <linux/delay.h>
-DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp;
+DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
EXPORT_PER_CPU_SYMBOL(irq_stat);
#ifndef CONFIG_X86_LOCAL_APIC
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 19edcd526ba..6483eeb1a4e 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -58,13 +58,9 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
- return 0;
-}
-
-void __kprobes arch_copy_kprobe(struct kprobe *p)
-{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
+ return 0;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -81,10 +77,6 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
(unsigned long) p->addr + sizeof(kprobe_opcode_t));
}
-void __kprobes arch_remove_kprobe(struct kprobe *p)
-{
-}
-
static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
@@ -196,6 +188,19 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_REENTER;
return 1;
} else {
+ if (regs->eflags & VM_MASK) {
+ /* We are in virtual-8086 mode. Return 0 */
+ goto no_kprobe;
+ }
+ if (*addr != BREAKPOINT_INSTRUCTION) {
+ /* The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ regs->eip -= sizeof(kprobe_opcode_t);
+ ret = 1;
+ goto no_kprobe;
+ }
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 165f13158c6..d3fdf0057d8 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -70,6 +70,7 @@
*/
//#define DEBUG /* pr_debug */
+#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -165,7 +166,7 @@ static void collect_cpu_info (void *unused)
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
/* see notes above for revision 1.07. Apparent chip bug */
- serialize_cpu();
+ sync_core();
/* get the current revision from MSR 0x8B */
rdmsr(MSR_IA32_UCODE_REV, val[0], uci->rev);
pr_debug("microcode: collect_cpu_info : sig=0x%x, pf=0x%x, rev=0x%x\n",
@@ -379,7 +380,7 @@ static void do_update_one (void * unused)
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
/* see notes above for revision 1.07. Apparent chip bug */
- serialize_cpu();
+ sync_core();
/* get the current revision from MSR 0x8B */
rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 1ca5269b1e8..91a64016956 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -38,6 +38,12 @@
int smp_found_config;
unsigned int __initdata maxcpus = NR_CPUS;
+#ifdef CONFIG_HOTPLUG_CPU
+#define CPU_HOTPLUG_ENABLED (1)
+#else
+#define CPU_HOTPLUG_ENABLED (0)
+#endif
+
/*
* Various Linux-internal data structures created from the
* MP-table.
@@ -219,14 +225,18 @@ static void __devinit MP_processor_info (struct mpc_config_processor *m)
cpu_set(num_processors, cpu_possible_map);
num_processors++;
- if ((num_processors > 8) &&
- ((APIC_XAPIC(ver) &&
- (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) ||
- (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)))
- def_to_bigsmp = 1;
- else
- def_to_bigsmp = 0;
-
+ if (CPU_HOTPLUG_ENABLED || (num_processors > 8)) {
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ if (!APIC_XAPIC(ver)) {
+ def_to_bigsmp = 0;
+ break;
+ }
+ /* If P4 and above fall through */
+ case X86_VENDOR_AMD:
+ def_to_bigsmp = 1;
+ }
+ }
bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
}
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 44470fea430..1d0a55e6876 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -172,7 +172,6 @@ static ssize_t msr_read(struct file *file, char __user * buf,
{
u32 __user *tmp = (u32 __user *) buf;
u32 data[2];
- size_t rv;
u32 reg = *ppos;
int cpu = iminor(file->f_dentry->d_inode);
int err;
@@ -180,7 +179,7 @@ static ssize_t msr_read(struct file *file, char __user * buf,
if (count % 8)
return -EINVAL; /* Invalid chunk size */
- for (rv = 0; count; count -= 8) {
+ for (; count; count -= 8) {
err = do_rdmsr(cpu, reg, &data[0], &data[1]);
if (err)
return err;
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 2333aead056..2185377fdde 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -48,6 +48,7 @@
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/desc.h>
+#include <asm/vm86.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif
@@ -308,9 +309,7 @@ void show_regs(struct pt_regs * regs)
cr0 = read_cr0();
cr2 = read_cr2();
cr3 = read_cr3();
- if (current_cpu_data.x86 > 4) {
- cr4 = read_cr4();
- }
+ cr4 = read_cr4_safe();
printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
show_trace(NULL, &regs->esp);
}
@@ -404,17 +403,7 @@ void flush_thread(void)
void release_thread(struct task_struct *dead_task)
{
- if (dead_task->mm) {
- // temporary debugging check
- if (dead_task->mm->context.size) {
- printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
- dead_task->comm,
- dead_task->mm->context.ldt,
- dead_task->mm->context.size);
- BUG();
- }
- }
-
+ BUG_ON(dead_task->mm);
release_vm86_irqs(dead_task);
}
@@ -435,18 +424,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
struct task_struct *tsk;
int err;
- childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
- /*
- * The below -8 is to reserve 8 bytes on top of the ring0 stack.
- * This is necessary to guarantee that the entire "struct pt_regs"
- * is accessable even if the CPU haven't stored the SS/ESP registers
- * on the stack (interrupt gate does not save these registers
- * when switching to the same priv ring).
- * Therefore beware: accessing the xss/esp fields of the
- * "struct pt_regs" is possible, but they may contain the
- * completely wrong values.
- */
- childregs = (struct pt_regs *) ((unsigned long) childregs - 8);
+ childregs = task_pt_regs(p);
*childregs = *regs;
childregs->eax = 0;
childregs->esp = esp;
@@ -551,12 +529,7 @@ EXPORT_SYMBOL(dump_thread);
*/
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
- struct pt_regs ptregs;
-
- ptregs = *(struct pt_regs *)
- ((unsigned long)tsk->thread_info +
- /* see comments in copy_thread() about -8 */
- THREAD_SIZE - sizeof(ptregs) - 8);
+ struct pt_regs ptregs = *task_pt_regs(tsk);
ptregs.xcs &= 0xffff;
ptregs.xds &= 0xffff;
ptregs.xes &= 0xffff;
@@ -612,8 +585,8 @@ static inline void disable_tsc(struct task_struct *prev_p,
* gcc should eliminate the ->thread_info dereference if
* has_secure_computing returns 0 at compile time (SECCOMP=n).
*/
- prev = prev_p->thread_info;
- next = next_p->thread_info;
+ prev = task_thread_info(prev_p);
+ next = task_thread_info(next_p);
if (has_secure_computing(prev) || has_secure_computing(next)) {
/* slow path here */
@@ -798,7 +771,7 @@ unsigned long get_wchan(struct task_struct *p)
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack_page = (unsigned long)p->thread_info;
+ stack_page = (unsigned long)task_stack_page(p);
esp = p->thread.esp;
if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
return 0;
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 5ffbb4b7ad0..5c1fb6aada5 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -32,9 +32,12 @@
* in exit.c or in signal.c.
*/
-/* determines which flags the user has access to. */
-/* 1 = access 0 = no access */
-#define FLAG_MASK 0x00044dd5
+/*
+ * Determines which flags the user has access to [1 = access, 0 = no access].
+ * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
+ * Also masks reserved bits (31-22, 15, 5, 3, 1).
+ */
+#define FLAG_MASK 0x00054dd5
/* set's the trap flag. */
#define TRAP_FLAG 0x100
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c
index 2afe0f8d555..d207242976d 100644
--- a/arch/i386/kernel/reboot.c
+++ b/arch/i386/kernel/reboot.c
@@ -12,6 +12,7 @@
#include <linux/efi.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
+#include <linux/pm.h>
#include <asm/uaccess.h>
#include <asm/apic.h>
#include <asm/desc.h>
@@ -111,12 +112,12 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
},
},
- { /* Handle problems with rebooting on HP nc6120 */
+ { /* Handle problems with rebooting on HP laptops */
.callback = set_bios_reboot,
- .ident = "HP Compaq nc6120",
+ .ident = "HP Compaq Laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6120"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
},
},
{ }
@@ -355,10 +356,10 @@ void machine_halt(void)
void machine_power_off(void)
{
- machine_shutdown();
-
- if (pm_power_off)
+ if (pm_power_off) {
+ machine_shutdown();
pm_power_off();
+ }
}
diff --git a/arch/i386/kernel/scx200.c b/arch/i386/kernel/scx200.c
index 9c968ae67c4..321f5fd26e7 100644
--- a/arch/i386/kernel/scx200.c
+++ b/arch/i386/kernel/scx200.c
@@ -143,7 +143,7 @@ static int __init scx200_init(void)
{
printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n");
- return pci_module_init(&scx200_pci_driver);
+ return pci_register_driver(&scx200_pci_driver);
}
static void __exit scx200_cleanup(void)
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index fdfcb0cba9b..51e513b4f72 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -45,6 +45,7 @@
#include <linux/nodemask.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
+#include <linux/dmi.h>
#include <video/edid.h>
@@ -146,7 +147,6 @@ EXPORT_SYMBOL(ist_info);
struct e820map e820;
extern void early_cpu_init(void);
-extern void dmi_scan_machine(void);
extern void generic_apic_probe(char *);
extern int root_mountflags;
@@ -898,7 +898,7 @@ static void __init parse_cmdline_early (char ** cmdline_p)
}
}
#endif
-#ifdef CONFIG_CRASH_DUMP
+#ifdef CONFIG_PROC_VMCORE
/* elfcorehdr= specifies the location of elf core header
* stored by the crashed kernel.
*/
@@ -954,6 +954,12 @@ efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
return 0;
}
+static int __init
+efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
+{
+ memory_present(0, start, end);
+ return 0;
+}
/*
* Find the highest page frame number we have available
@@ -965,6 +971,7 @@ void __init find_max_pfn(void)
max_pfn = 0;
if (efi_enabled) {
efi_memmap_walk(efi_find_max_pfn, &max_pfn);
+ efi_memmap_walk(efi_memory_present_wrapper, NULL);
return;
}
@@ -979,6 +986,7 @@ void __init find_max_pfn(void)
continue;
if (end > max_pfn)
max_pfn = end;
+ memory_present(0, start, end);
}
}
@@ -1576,7 +1584,7 @@ void __init setup_arch(char **cmdline_p)
if (s) {
extern void setup_early_printk(char *);
- setup_early_printk(s);
+ setup_early_printk(strchr(s, '=') + 1);
printk("early console enabled\n");
}
}
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 9ed449af8e9..255adb49826 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -875,8 +875,7 @@ static inline struct task_struct * alloc_idle_task(int cpu)
/* initialize thread_struct. we really want to avoid destroy
* idle tread
*/
- idle->thread.esp = (unsigned long)(((struct pt_regs *)
- (THREAD_SIZE + (unsigned long) idle->thread_info)) - 1);
+ idle->thread.esp = (unsigned long)task_pt_regs(idle);
init_idle(idle, cpu);
return idle;
}
@@ -903,6 +902,12 @@ static int __devinit do_boot_cpu(int apicid, int cpu)
unsigned long start_eip;
unsigned short nmi_high = 0, nmi_low = 0;
+ if (!cpu_gdt_descr[cpu].address &&
+ !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
+ printk("Failed to allocate GDT for CPU %d\n", cpu);
+ return 1;
+ }
+
++cpucount;
/*
@@ -1090,6 +1095,7 @@ static void smp_tune_scheduling (void)
cachesize = 16; /* Pentiums, 2x8kB cache */
bandwidth = 100;
}
+ max_cache_size = cachesize * 1024;
}
}
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S
index 9b21a31d4f4..6ff3e524322 100644
--- a/arch/i386/kernel/syscall_table.S
+++ b/arch/i386/kernel/syscall_table.S
@@ -1,4 +1,3 @@
-.data
ENTRY(sys_call_table)
.long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
.long sys_exit
@@ -294,3 +293,4 @@ ENTRY(sys_call_table)
.long sys_inotify_init
.long sys_inotify_add_watch
.long sys_inotify_rm_watch
+ .long sys_migrate_pages
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 41c5b2dc620..a14d594bfbe 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -302,6 +302,12 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
do_timer_interrupt(irq, regs);
write_sequnlock(&xtime_lock);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (using_apic_timer)
+ smp_send_timer_broadcast_ipi(regs);
+#endif
+
return IRQ_HANDLED;
}
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c
index 9caeaa315cd..a529f0cdce1 100644
--- a/arch/i386/kernel/time_hpet.c
+++ b/arch/i386/kernel/time_hpet.c
@@ -259,8 +259,6 @@ __setup("hpet=", hpet_setup);
#include <linux/mc146818rtc.h>
#include <linux/rtc.h>
-extern irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-
#define DEFAULT_RTC_INT_FREQ 64
#define RTC_NUM_INTS 1
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index d395e3b4248..47675bbbb31 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -330,7 +330,9 @@ int recalibrate_cpu_khz(void)
unsigned int cpu_khz_old = cpu_khz;
if (cpu_has_tsc) {
+ local_irq_disable();
init_cpu_khz();
+ local_irq_enable();
cpu_data[0].loops_per_jiffy =
cpufreq_scale(cpu_data[0].loops_per_jiffy,
cpu_khz_old,
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index ab0e9430f77..b9f0030a2eb 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -120,7 +120,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
#ifdef CONFIG_FRAME_POINTER
while (valid_stack_ptr(tinfo, (void *)ebp)) {
addr = *(unsigned long *)(ebp + 4);
- printk(" [<%08lx>] ", addr);
+ printk(KERN_EMERG " [<%08lx>] ", addr);
print_symbol("%s", addr);
printk("\n");
ebp = *(unsigned long *)ebp;
@@ -129,7 +129,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
while (valid_stack_ptr(tinfo, stack)) {
addr = *stack++;
if (__kernel_text_address(addr)) {
- printk(" [<%08lx>]", addr);
+ printk(KERN_EMERG " [<%08lx>]", addr);
print_symbol(" %s", addr);
printk("\n");
}
@@ -161,7 +161,7 @@ void show_trace(struct task_struct *task, unsigned long * stack)
stack = (unsigned long*)context->previous_esp;
if (!stack)
break;
- printk(" =======================\n");
+ printk(KERN_EMERG " =======================\n");
}
}
@@ -178,14 +178,15 @@ void show_stack(struct task_struct *task, unsigned long *esp)
}
stack = esp;
+ printk(KERN_EMERG);
for(i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(stack))
break;
if (i && ((i % 8) == 0))
- printk("\n ");
+ printk("\n" KERN_EMERG " ");
printk("%08lx ", *stack++);
}
- printk("\nCall Trace:\n");
+ printk("\n" KERN_EMERG "Call Trace:\n");
show_trace(task, esp);
}
@@ -216,18 +217,18 @@ void show_registers(struct pt_regs *regs)
ss = regs->xss & 0xffff;
}
print_modules();
- printk("CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\nEFLAGS: %08lx"
- " (%s) \n",
+ printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
+ "EFLAGS: %08lx (%s) \n",
smp_processor_id(), 0xffff & regs->xcs, regs->eip,
print_tainted(), regs->eflags, system_utsname.release);
- print_symbol("EIP is at %s\n", regs->eip);
- printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
+ print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
+ printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
regs->eax, regs->ebx, regs->ecx, regs->edx);
- printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
+ printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
regs->esi, regs->edi, regs->ebp, esp);
- printk("ds: %04x es: %04x ss: %04x\n",
+ printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
regs->xds & 0xffff, regs->xes & 0xffff, ss);
- printk("Process %s (pid: %d, threadinfo=%p task=%p)",
+ printk(KERN_EMERG "Process %s (pid: %d, threadinfo=%p task=%p)",
current->comm, current->pid, current_thread_info(), current);
/*
* When in-kernel, we also print out the stack and code at the
@@ -236,10 +237,10 @@ void show_registers(struct pt_regs *regs)
if (in_kernel) {
u8 __user *eip;
- printk("\nStack: ");
+ printk("\n" KERN_EMERG "Stack: ");
show_stack(NULL, (unsigned long*)esp);
- printk("Code: ");
+ printk(KERN_EMERG "Code: ");
eip = (u8 __user *)regs->eip - 43;
for (i = 0; i < 64; i++, eip++) {
@@ -280,15 +281,15 @@ static void handle_BUG(struct pt_regs *regs)
(unsigned long)file < PAGE_OFFSET || __get_user(c, file))
file = "<bad filename>";
- printk("------------[ cut here ]------------\n");
- printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
+ printk(KERN_EMERG "------------[ cut here ]------------\n");
+ printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
no_bug:
return;
/* Here we know it was a BUG but file-n-line is unavailable */
bug:
- printk("Kernel BUG\n");
+ printk(KERN_EMERG "Kernel BUG\n");
}
/* This is gone through when something in the kernel
@@ -306,28 +307,35 @@ void die(const char * str, struct pt_regs * regs, long err)
.lock_owner_depth = 0
};
static int die_counter;
+ unsigned long flags;
if (die.lock_owner != raw_smp_processor_id()) {
console_verbose();
- spin_lock_irq(&die.lock);
+ spin_lock_irqsave(&die.lock, flags);
die.lock_owner = smp_processor_id();
die.lock_owner_depth = 0;
bust_spinlocks(1);
}
+ else
+ local_save_flags(flags);
if (++die.lock_owner_depth < 3) {
int nl = 0;
handle_BUG(regs);
- printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+ printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
+ printk(KERN_EMERG "PREEMPT ");
nl = 1;
#endif
#ifdef CONFIG_SMP
+ if (!nl)
+ printk(KERN_EMERG);
printk("SMP ");
nl = 1;
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
+ if (!nl)
+ printk(KERN_EMERG);
printk("DEBUG_PAGEALLOC");
nl = 1;
#endif
@@ -336,11 +344,11 @@ void die(const char * str, struct pt_regs * regs, long err)
notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
show_registers(regs);
} else
- printk(KERN_ERR "Recursive die() failure, output suppressed\n");
+ printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
bust_spinlocks(0);
die.lock_owner = -1;
- spin_unlock_irq(&die.lock);
+ spin_unlock_irqrestore(&die.lock, flags);
if (kexec_should_crash(current))
crash_kexec(regs);
@@ -524,8 +532,10 @@ gp_in_kernel:
static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
{
- printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
- printk("You probably have a hardware problem with your RAM chips\n");
+ printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
+ "to continue\n");
+ printk(KERN_EMERG "You probably have a hardware problem with your RAM "
+ "chips\n");
/* Clear and disable the memory parity error line. */
clear_mem_error(reason);
@@ -535,7 +545,7 @@ static void io_check_error(unsigned char reason, struct pt_regs * regs)
{
unsigned long i;
- printk("NMI: IOCK error (debug interrupt?)\n");
+ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
show_registers(regs);
/* Re-enable the IOCK line, wait for a few seconds */
@@ -577,11 +587,11 @@ void die_nmi (struct pt_regs *regs, const char *msg)
* to get a message out.
*/
bust_spinlocks(1);
- printk(msg);
+ printk(KERN_EMERG "%s", msg);
printk(" on CPU%d, eip %08lx, registers:\n",
smp_processor_id(), regs->eip);
show_registers(regs);
- printk("console shuts up ...\n");
+ printk(KERN_EMERG "console shuts up ...\n");
console_silent();
spin_unlock(&nmi_print_lock);
bust_spinlocks(0);
@@ -987,8 +997,8 @@ asmlinkage void math_state_restore(struct pt_regs regs)
asmlinkage void math_emulate(long arg)
{
- printk("math-emulation not enabled and no coprocessor found.\n");
- printk("killing %s.\n",current->comm);
+ printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
+ printk(KERN_EMERG "killing %s.\n",current->comm);
force_sig(SIGFPE,current);
schedule();
}
@@ -1075,9 +1085,9 @@ void __init trap_init(void)
set_trap_gate(0,&divide_error);
set_intr_gate(1,&debug);
set_intr_gate(2,&nmi);
- set_system_intr_gate(3, &int3); /* int3-5 can be called from all */
+ set_system_intr_gate(3, &int3); /* int3/4 can be called from all */
set_system_gate(4,&overflow);
- set_system_gate(5,&bounds);
+ set_trap_gate(5,&bounds);
set_trap_gate(6,&invalid_op);
set_trap_gate(7,&device_not_available);
set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
@@ -1095,6 +1105,28 @@ void __init trap_init(void)
#endif
set_trap_gate(19,&simd_coprocessor_error);
+ if (cpu_has_fxsr) {
+ /*
+ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
+ * Generates a compile-time "error: zero width for bit-field" if
+ * the alignment is wrong.
+ */
+ struct fxsrAlignAssert {
+ int _:!(offsetof(struct task_struct,
+ thread.i387.fxsave) & 15);
+ };
+
+ printk(KERN_INFO "Enabling fast FPU save and restore... ");
+ set_in_cr4(X86_CR4_OSFXSR);
+ printk("done.\n");
+ }
+ if (cpu_has_xmm) {
+ printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
+ "support... ");
+ set_in_cr4(X86_CR4_OSXMMEXCPT);
+ printk("done.\n");
+ }
+
set_system_gate(SYSCALL_VECTOR,&system_call);
/*
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index fc1993564f9..0c90ae54ddf 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -30,6 +30,7 @@
*
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -310,7 +311,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
"movl %1,%%ebp\n\t"
"jmp resume_userspace"
: /* no outputs */
- :"r" (&info->regs), "r" (tsk->thread_info) : "ax");
+ :"r" (&info->regs), "r" (task_thread_info(tsk)) : "ax");
/* we never return here */
}
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 06e26f00623..7df494b51a5 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -735,6 +735,30 @@ void free_initmem(void)
printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
}
+#ifdef CONFIG_DEBUG_RODATA
+
+extern char __start_rodata, __end_rodata;
+void mark_rodata_ro(void)
+{
+ unsigned long addr = (unsigned long)&__start_rodata;
+
+ for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+ change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
+
+ printk ("Write protecting the kernel read-only data: %luk\n",
+ (unsigned long)(&__end_rodata - &__start_rodata) >> 10);
+
+ /*
+ * change_page_attr() requires a global_flush_tlb() call after it.
+ * We do this after the printk so that if something went wrong in the
+ * change, the printk gets out at least to give a better debug hint
+ * of who is the culprit.
+ */
+ global_flush_tlb();
+}
+#endif
+
+
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index f600fc244f0..d0cadb33b54 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -13,6 +13,7 @@
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
+#include <asm/sections.h>
static DEFINE_SPINLOCK(cpa_lock);
static struct list_head df_list = LIST_HEAD_INIT(df_list);
@@ -36,7 +37,8 @@ pte_t *lookup_address(unsigned long address)
return pte_offset_kernel(pmd, address);
}
-static struct page *split_large_page(unsigned long address, pgprot_t prot)
+static struct page *split_large_page(unsigned long address, pgprot_t prot,
+ pgprot_t ref_prot)
{
int i;
unsigned long addr;
@@ -54,7 +56,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot)
pbase = (pte_t *)page_address(base);
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
- addr == address ? prot : PAGE_KERNEL));
+ addr == address ? prot : ref_prot));
}
return base;
}
@@ -98,11 +100,18 @@ static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
*/
static inline void revert_page(struct page *kpte_page, unsigned long address)
{
- pte_t *linear = (pte_t *)
+ pgprot_t ref_prot;
+ pte_t *linear;
+
+ ref_prot =
+ ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
+ ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
+
+ linear = (pte_t *)
pmd_offset(pud_offset(pgd_offset_k(address), address), address);
set_pmd_pte(linear, address,
pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
- PAGE_KERNEL_LARGE));
+ ref_prot));
}
static int
@@ -123,10 +132,16 @@ __change_page_attr(struct page *page, pgprot_t prot)
if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
set_pte_atomic(kpte, mk_pte(page, prot));
} else {
- struct page *split = split_large_page(address, prot);
+ pgprot_t ref_prot;
+ struct page *split;
+
+ ref_prot =
+ ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
+ ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
+ split = split_large_page(address, prot, ref_prot);
if (!split)
return -ENOMEM;
- set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
+ set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
kpte_page = split;
}
get_page(kpte_page);
@@ -207,6 +222,10 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
return;
+ if (!enable)
+ mutex_debug_check_no_locks_freed(page_address(page),
+ numpages * PAGE_SIZE);
+
/* the return value is ignored - the calls cannot fail,
* large pages are disabled at boot time.
*/
diff --git a/arch/i386/pci/acpi.c b/arch/i386/pci/acpi.c
index 4c4522b43be..b33aea845f5 100644
--- a/arch/i386/pci/acpi.c
+++ b/arch/i386/pci/acpi.c
@@ -53,7 +53,7 @@ static int __init pci_acpi_init(void)
* don't use pci_enable_device().
*/
printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL)
+ for_each_pci_dev(dev)
acpi_pci_irq_enable(dev);
} else
printk(KERN_INFO "PCI: If a device doesn't work, try \"pci=routeirq\". If it helps, post a report\n");
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c
index eeb1b1f2d54..65f67070db6 100644
--- a/arch/i386/pci/fixup.c
+++ b/arch/i386/pci/fixup.c
@@ -413,6 +413,13 @@ static struct dmi_system_id __devinitdata toshiba_ohci1394_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"),
},
},
+ {
+ .ident = "Toshiba A40 based laptop",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
+ },
+ },
{ }
};
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index 19e6f4871d1..e715aa93003 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -78,7 +78,7 @@ static inline struct irq_routing_table * pirq_check_routing_table(u8 *addr)
for (i=0; i < rt->size; i++)
sum += addr[i];
if (!sum) {
- DBG("PCI: Interrupt Routing Table found at 0x%p\n", rt);
+ DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt);
return rt;
}
return NULL;
@@ -128,7 +128,7 @@ static void __init pirq_peer_trick(void)
#ifdef DEBUG
{
int j;
- DBG("%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
+ DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
for(j=0; j<4; j++)
DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
DBG("\n");
@@ -160,10 +160,10 @@ void eisa_set_level_irq(unsigned int irq)
return;
eisa_irq_mask |= (1 << irq);
- printk("PCI: setting IRQ %u as level-triggered\n", irq);
+ printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq);
val = inb(port);
if (!(val & mask)) {
- DBG(" -> edge");
+ DBG(KERN_DEBUG " -> edge");
outb(val | mask, port);
}
}
@@ -677,11 +677,11 @@ static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router,
{
case PCI_DEVICE_ID_AL_M1533:
case PCI_DEVICE_ID_AL_M1563:
- printk("PCI: Using ALI IRQ Router\n");
- r->name = "ALI";
- r->get = pirq_ali_get;
- r->set = pirq_ali_set;
- return 1;
+ printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n");
+ r->name = "ALI";
+ r->get = pirq_ali_get;
+ r->set = pirq_ali_set;
+ return 1;
}
return 0;
}
@@ -749,12 +749,13 @@ static void __init pirq_find_router(struct irq_router *r)
r->get = NULL;
r->set = NULL;
- DBG("PCI: Attempting to find IRQ router for %04x:%04x\n",
+ DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
rt->rtr_vendor, rt->rtr_device);
pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
if (!pirq_router_dev) {
- DBG("PCI: Interrupt router not found at %02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
+ DBG(KERN_DEBUG "PCI: Interrupt router not found at "
+ "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
return;
}
@@ -799,7 +800,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
/* Find IRQ pin */
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (!pin) {
- DBG(" -> no interrupt pin\n");
+ DBG(KERN_DEBUG " -> no interrupt pin\n");
return 0;
}
pin = pin - 1;
@@ -809,16 +810,16 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
if (!pirq_table)
return 0;
- DBG("IRQ for %s[%c]", pci_name(dev), 'A' + pin);
+ DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin);
info = pirq_get_info(dev);
if (!info) {
- DBG(" -> not found in routing table\n");
+ DBG(" -> not found in routing table\n" KERN_DEBUG);
return 0;
}
pirq = info->irq[pin].link;
mask = info->irq[pin].bitmap;
if (!pirq) {
- DBG(" -> not routed\n");
+ DBG(" -> not routed\n" KERN_DEBUG);
return 0;
}
DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
@@ -846,9 +847,12 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
* reported by the device if possible.
*/
newirq = dev->irq;
- if (!((1 << newirq) & mask)) {
+ if (newirq && !((1 << newirq) & mask)) {
if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
- else printk(KERN_WARNING "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n", newirq, pci_name(dev));
+ else printk("\n" KERN_WARNING
+ "PCI: IRQ %i for device %s doesn't match PIRQ mask "
+ "- try pci=usepirqmask\n" KERN_DEBUG, newirq,
+ pci_name(dev));
}
if (!newirq && assign) {
for (i = 0; i < 16; i++) {
@@ -923,14 +927,14 @@ static void __init pcibios_fixup_irqs(void)
struct pci_dev *dev = NULL;
u8 pin;
- DBG("PCI: IRQ fixup\n");
+ DBG(KERN_DEBUG "PCI: IRQ fixup\n");
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
/*
* If the BIOS has set an out of range IRQ number, just ignore it.
* Also keep track of which IRQ's are already in use.
*/
if (dev->irq >= 16) {
- DBG("%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
+ DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
dev->irq = 0;
}
/* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
@@ -1039,7 +1043,7 @@ static struct dmi_system_id __initdata pciirq_dmi_table[] = {
static int __init pcibios_irq_init(void)
{
- DBG("PCI: IRQ init\n");
+ DBG(KERN_DEBUG "PCI: IRQ init\n");
if (pcibios_enable_irq || raw_pci_ops == NULL)
return 0;
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 67932ad5308..f722e1a2594 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -25,7 +25,6 @@ cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f12-f15,f32-f127 \
-falign-functions=32 -frename-registers -fno-optimize-sibling-calls
CFLAGS_KERNEL := -mconstant-gp
-GCC_VERSION := $(call cc-version)
GAS_STATUS = $(shell $(srctree)/arch/ia64/scripts/check-gas "$(CC)" "$(OBJDUMP)")
CPPFLAGS += $(shell $(srctree)/arch/ia64/scripts/toolchain-flags "$(CC)" "$(OBJDUMP)" "$(READELF)")
@@ -37,11 +36,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from
ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz)
endif
-ifneq ($(shell if [ $(GCC_VERSION) -lt 0300 ] ; then echo "bad"; fi ;),)
-$(error Sorry, your compiler is too old. GCC v2.96 is known to generate bad code.)
-endif
-
-ifeq ($(GCC_VERSION),0304)
+ifeq ($(call cc-version),0304)
cflags-$(CONFIG_ITANIUM) += -mtune=merced
cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
endif
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 19ee635eeb7..a346e1833bf 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -26,6 +26,7 @@
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/capability.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/serial.h>
diff --git a/arch/ia64/ia32/Makefile b/arch/ia64/ia32/Makefile
index 2ed90da8116..61cb60affd9 100644
--- a/arch/ia64/ia32/Makefile
+++ b/arch/ia64/ia32/Makefile
@@ -2,11 +2,9 @@
# Makefile for the ia32 kernel emulation subsystem.
#
-obj-y := ia32_entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o \
+obj-y := ia32_entry.o sys_ia32.o ia32_signal.o \
ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o
-CFLAGS_ia32_ioctl.o += -Ifs/
-
# Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
# restore_ia32_fpstate_live() can be sure the live register contain user-level state.
CFLAGS_ia32_signal.o += -mfixed-range=f16-f31
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
index b73b8b6b10c..a47f63b204f 100644
--- a/arch/ia64/ia32/elfcore32.h
+++ b/arch/ia64/ia32/elfcore32.h
@@ -95,8 +95,7 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs,
static inline int elf_core_copy_task_regs(struct task_struct *t,
elf_gregset_t* elfregs)
{
- struct pt_regs *pp = ia64_task_regs(t);
- ELF_CORE_COPY_REGS((*elfregs), pp);
+ ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t));
return 1;
}
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index 494fad6bf37..95fe04400f6 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -469,7 +469,7 @@ ia32_syscall_table:
data8 sys32_epoll_wait
data8 sys_remap_file_pages
data8 sys_set_tid_address
- data8 sys32_timer_create
+ data8 compat_sys_timer_create
data8 compat_sys_timer_settime /* 260 */
data8 compat_sys_timer_gettime
data8 sys_timer_getoverrun
diff --git a/arch/ia64/ia32/ia32_ioctl.c b/arch/ia64/ia32/ia32_ioctl.c
deleted file mode 100644
index 88739394f6d..00000000000
--- a/arch/ia64/ia32/ia32_ioctl.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * IA32 Architecture-specific ioctl shim code
- *
- * Copyright (C) 2000 VA Linux Co
- * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
- * Copyright (C) 2001-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/signal.h> /* argh, msdos_fs.h isn't self-contained... */
-#include <linux/syscalls.h>
-#include "ia32priv.h"
-
-#define INCLUDES
-#include "compat_ioctl.c"
-
-#define IOCTL_NR(a) ((a) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
-
-#define DO_IOCTL(fd, cmd, arg) ({ \
- int _ret; \
- mm_segment_t _old_fs = get_fs(); \
- \
- set_fs(KERNEL_DS); \
- _ret = sys_ioctl(fd, cmd, (unsigned long)arg); \
- set_fs(_old_fs); \
- _ret; \
-})
-
-#define CODE
-#include "compat_ioctl.c"
-
-#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
-#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler), NULL },
-#define IOCTL_TABLE_START \
- struct ioctl_trans ioctl_start[] = {
-#define IOCTL_TABLE_END \
- };
-
-IOCTL_TABLE_START
-#define DECLARES
-#include "compat_ioctl.c"
-#include <linux/compat_ioctl.h>
-IOCTL_TABLE_END
-
-int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
index aa891c9bc9b..5856510210f 100644
--- a/arch/ia64/ia32/ia32_signal.c
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -255,7 +255,7 @@ save_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
*/
fp_tos = (fsr>>11)&0x7;
fr8_st_map = (8-fp_tos)&0x7;
- ptp = ia64_task_regs(tsk);
+ ptp = task_pt_regs(tsk);
fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
ia64f2ia32f(fpregp, &ptp->f8);
copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
@@ -389,7 +389,7 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
fr8_st_map = (8-fp_tos)&0x7;
fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
- ptp = ia64_task_regs(tsk);
+ ptp = task_pt_regs(tsk);
copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia32f2ia64f(&ptp->f8, fpregp);
copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index 4f630043b3a..c187743965a 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -58,7 +58,7 @@ load_desc (u16 selector)
void
ia32_load_segment_descriptors (struct task_struct *task)
{
- struct pt_regs *regs = ia64_task_regs(task);
+ struct pt_regs *regs = task_pt_regs(task);
/* Setup the segment descriptors */
regs->r24 = load_desc(regs->r16 >> 16); /* ESD */
@@ -113,7 +113,7 @@ void
ia32_load_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, tssd;
- struct pt_regs *regs = ia64_task_regs(t);
+ struct pt_regs *regs = task_pt_regs(t);
eflag = t->thread.eflag;
fsr = t->thread.fsr;
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index dc282710421..3945d378bd7 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -48,6 +48,7 @@
#include <linux/ptrace.h>
#include <linux/stat.h>
#include <linux/ipc.h>
+#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/mman.h>
@@ -1481,7 +1482,7 @@ getreg (struct task_struct *child, int regno)
{
struct pt_regs *child_regs;
- child_regs = ia64_task_regs(child);
+ child_regs = task_pt_regs(child);
switch (regno / sizeof(int)) {
case PT_EBX: return child_regs->r11;
case PT_ECX: return child_regs->r9;
@@ -1509,7 +1510,7 @@ putreg (struct task_struct *child, int regno, unsigned int value)
{
struct pt_regs *child_regs;
- child_regs = ia64_task_regs(child);
+ child_regs = task_pt_regs(child);
switch (regno / sizeof(int)) {
case PT_EBX: child_regs->r11 = value; break;
case PT_ECX: child_regs->r9 = value; break;
@@ -1625,7 +1626,7 @@ save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user
* Stack frames start with 16-bytes of temp space
*/
swp = (struct switch_stack *)(tsk->thread.ksp + 16);
- ptp = ia64_task_regs(tsk);
+ ptp = task_pt_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
put_fpreg(i, &save->st_space[i], ptp, swp, tos);
@@ -1658,7 +1659,7 @@ restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __us
* Stack frames start with 16-bytes of temp space
*/
swp = (struct switch_stack *)(tsk->thread.ksp + 16);
- ptp = ia64_task_regs(tsk);
+ ptp = task_pt_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
get_fpreg(i, &save->st_space[i], ptp, swp, tos);
@@ -1689,7 +1690,7 @@ save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user
* Stack frames start with 16-bytes of temp space
*/
swp = (struct switch_stack *)(tsk->thread.ksp + 16);
- ptp = ia64_task_regs(tsk);
+ ptp = task_pt_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
@@ -1733,7 +1734,7 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __u
* Stack frames start with 16-bytes of temp space
*/
swp = (struct switch_stack *)(tsk->thread.ksp + 16);
- ptp = ia64_task_regs(tsk);
+ ptp = task_pt_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
@@ -1761,21 +1762,15 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
lock_kernel();
if (request == PTRACE_TRACEME) {
- ret = sys_ptrace(request, pid, addr, data);
+ ret = ptrace_traceme();
goto out;
}
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
goto out;
- ret = -EPERM;
- if (pid == 1) /* no messing around with init! */
- goto out_tsk;
+ }
if (request == PTRACE_ATTACH) {
ret = sys_ptrace(request, pid, addr, data);
@@ -2559,34 +2554,6 @@ sys32_get_thread_area (struct ia32_user_desc __user *u_info)
return 0;
}
-asmlinkage long
-sys32_timer_create(u32 clock, struct compat_sigevent __user *se32, timer_t __user *timer_id)
-{
- struct sigevent se;
- mm_segment_t oldfs;
- timer_t t;
- long err;
-
- if (se32 == NULL)
- return sys_timer_create(clock, NULL, timer_id);
-
- if (get_compat_sigevent(&se, se32))
- return -EFAULT;
-
- if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
- return -EFAULT;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_timer_create(clock, (struct sigevent __user *) &se, (timer_t __user *) &t);
- set_fs(oldfs);
-
- if (!err)
- err = __put_user (t, timer_id);
-
- return err;
-}
-
long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
__u32 len_low, __u32 len_high, int advice)
{
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index a3aa45cbcfa..c485a3b32ba 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -247,6 +247,32 @@ typedef struct kern_memdesc {
static kern_memdesc_t *kern_memmap;
+#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
+
+static inline u64
+kmd_end(kern_memdesc_t *kmd)
+{
+ return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
+}
+
+static inline u64
+efi_md_end(efi_memory_desc_t *md)
+{
+ return (md->phys_addr + efi_md_size(md));
+}
+
+static inline int
+efi_wb(efi_memory_desc_t *md)
+{
+ return (md->attribute & EFI_MEMORY_WB);
+}
+
+static inline int
+efi_uc(efi_memory_desc_t *md)
+{
+ return (md->attribute & EFI_MEMORY_UC);
+}
+
static void
walk (efi_freemem_callback_t callback, void *arg, u64 attr)
{
@@ -595,8 +621,8 @@ efi_get_iobase (void)
return 0;
}
-u32
-efi_mem_type (unsigned long phys_addr)
+static efi_memory_desc_t *
+efi_memory_descriptor (unsigned long phys_addr)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
@@ -610,13 +636,13 @@ efi_mem_type (unsigned long phys_addr)
md = p;
if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
- return md->type;
+ return md;
}
return 0;
}
-u64
-efi_mem_attributes (unsigned long phys_addr)
+static int
+efi_memmap_has_mmio (void)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
@@ -629,36 +655,98 @@ efi_mem_attributes (unsigned long phys_addr)
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
- if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
- return md->attribute;
+ if (md->type == EFI_MEMORY_MAPPED_IO)
+ return 1;
}
return 0;
}
+
+u32
+efi_mem_type (unsigned long phys_addr)
+{
+ efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
+
+ if (md)
+ return md->type;
+ return 0;
+}
+
+u64
+efi_mem_attributes (unsigned long phys_addr)
+{
+ efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
+
+ if (md)
+ return md->attribute;
+ return 0;
+}
EXPORT_SYMBOL(efi_mem_attributes);
+/*
+ * Determines whether the memory at phys_addr supports the desired
+ * attribute (WB, UC, etc). If this returns 1, the caller can safely
+ * access *size bytes at phys_addr with the specified attribute.
+ */
+static int
+efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr)
+{
+ efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
+ unsigned long md_end;
+
+ if (!md || (md->attribute & attr) != attr)
+ return 0;
+
+ do {
+ md_end = efi_md_end(md);
+ if (phys_addr + *size <= md_end)
+ return 1;
+
+ md = efi_memory_descriptor(md_end);
+ if (!md || (md->attribute & attr) != attr) {
+ *size = md_end - phys_addr;
+ return 1;
+ }
+ } while (md);
+ return 0;
+}
+
+/*
+ * For /dev/mem, we only allow read & write system calls to access
+ * write-back memory, because read & write don't allow the user to
+ * control access size.
+ */
int
valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
+ return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB);
+}
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
+/*
+ * We allow mmap of anything in the EFI memory map that supports
+ * either write-back or uncacheable access. For uncacheable regions,
+ * the supported access sizes are system-dependent, and the user is
+ * responsible for using the correct size.
+ *
+ * Note that this doesn't currently allow access to hot-added memory,
+ * because that doesn't appear in the boot-time EFI memory map.
+ */
+int
+valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size)
+{
+ if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB))
+ return 1;
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
+ if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC))
+ return 1;
- if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT)) {
- if (!(md->attribute & EFI_MEMORY_WB))
- return 0;
+ /*
+ * Some firmware doesn't report MMIO regions in the EFI memory map.
+ * The Intel BigSur (a.k.a. HP i2000) has this problem. In this
+ * case, we can't use the EFI memory map to validate mmap requests.
+ */
+ if (!efi_memmap_has_mmio())
+ return 1;
- if (*size > md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr)
- *size = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - phys_addr;
- return 1;
- }
- }
return 0;
}
@@ -707,32 +795,6 @@ efi_uart_console_only(void)
return 0;
}
-#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
-
-static inline u64
-kmd_end(kern_memdesc_t *kmd)
-{
- return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
-}
-
-static inline u64
-efi_md_end(efi_memory_desc_t *md)
-{
- return (md->phys_addr + efi_md_size(md));
-}
-
-static inline int
-efi_wb(efi_memory_desc_t *md)
-{
- return (md->attribute & EFI_MEMORY_WB);
-}
-
-static inline int
-efi_uc(efi_memory_desc_t *md)
-{
- return (md->attribute & EFI_MEMORY_UC);
-}
-
/*
* Look for the first granule aligned memory descriptor memory
* that is big enough to hold EFI memory map. Make sure this
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 0741b066b98..7a6ffd61378 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1600,5 +1600,6 @@ sys_call_table:
data8 sys_inotify_init
data8 sys_inotify_add_watch
data8 sys_inotify_rm_watch
+ data8 sys_migrate_pages // 1280
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index bfe65b2e862..fbc7ea35dd5 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1060,7 +1060,7 @@ SET_REG(b5);
* the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.
*/
-#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
.prologue
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 5db9d3bcbbc..e72de580ebb 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -103,7 +103,7 @@ EXPORT_SYMBOL(unw_init_running);
#ifdef ASM_SUPPORTED
# ifdef CONFIG_SMP
-# if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+# if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
/*
* This is not a normal routine and we don't want a function descriptor for it, so we use
* a fake declaration here.
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 89a70400c4f..346fedf9ea4 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -467,10 +467,6 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
}
-void __kprobes arch_remove_kprobe(struct kprobe *p)
-{
-}
-
/*
* We are resuming execution after a single step fault, so the pt_regs
* structure reflects the register state after we executed the instruction
@@ -642,6 +638,13 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
+ } else if (!is_ia64_break_inst(regs)) {
+ /* The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
+ goto no_kprobe;
} else {
/* Not our break */
goto no_kprobe;
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 355af15287c..ee7eec9ee57 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -766,7 +766,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
l = strlen(previous_current->comm);
snprintf(comm, sizeof(comm), "%s %*s %d",
current->comm, l, previous_current->comm,
- previous_current->thread_info->cpu);
+ task_thread_info(previous_current)->cpu);
}
memcpy(current->comm, comm, sizeof(current->comm));
@@ -1423,7 +1423,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
struct thread_info *ti;
memset(p, 0, KERNEL_STACK_SIZE);
- ti = (struct thread_info *)((char *)p + IA64_TASK_SIZE);
+ ti = task_thread_info(p);
ti->flags = _TIF_MCA_INIT;
ti->preempt_count = 1;
ti->task = p;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 410d4804fa6..bd87cb6b7a8 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -38,6 +38,7 @@
#include <linux/pagemap.h>
#include <linux/mount.h>
#include <linux/bitops.h>
+#include <linux/capability.h>
#include <linux/rcupdate.h>
#include <asm/errno.h>
@@ -1709,7 +1710,7 @@ static void
pfm_syswide_force_stop(void *info)
{
pfm_context_t *ctx = (pfm_context_t *)info;
- struct pt_regs *regs = ia64_task_regs(current);
+ struct pt_regs *regs = task_pt_regs(current);
struct task_struct *owner;
unsigned long flags;
int ret;
@@ -1814,7 +1815,7 @@ pfm_flush(struct file *filp)
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
DPRINT(("ctx_state=%d is_current=%d\n",
state,
@@ -1944,7 +1945,7 @@ pfm_close(struct inode *inode, struct file *filp)
is_system = ctx->ctx_fl_system;
task = PFM_CTX_TASK(ctx);
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
DPRINT(("ctx_state=%d is_current=%d\n",
state,
@@ -4051,7 +4052,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/
ia64_psr(regs)->up = 0;
} else {
- tregs = ia64_task_regs(task);
+ tregs = task_pt_regs(task);
/*
* stop monitoring at the user level
@@ -4133,7 +4134,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
ia64_psr(regs)->up = 1;
} else {
- tregs = ia64_task_regs(ctx->ctx_task);
+ tregs = task_pt_regs(ctx->ctx_task);
/*
* start monitoring at the kernel level the next
@@ -4403,7 +4404,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
/*
* when not current, task MUST be stopped, so this is safe
*/
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
/* force a full reload */
ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
@@ -4529,7 +4530,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
/*
* per-task mode
*/
- tregs = task == current ? regs : ia64_task_regs(task);
+ tregs = task == current ? regs : task_pt_regs(task);
if (task == current) {
/*
@@ -4592,7 +4593,7 @@ pfm_exit_thread(struct task_struct *task)
{
pfm_context_t *ctx;
unsigned long flags;
- struct pt_regs *regs = ia64_task_regs(task);
+ struct pt_regs *regs = task_pt_regs(task);
int ret, state;
int free_ok = 0;
@@ -4925,7 +4926,7 @@ restart_args:
if (unlikely(ret)) goto abort_locked;
skip_fd:
- ret = (*func)(ctx, args_k, count, ia64_task_regs(current));
+ ret = (*func)(ctx, args_k, count, task_pt_regs(current));
call_made = 1;
@@ -5049,7 +5050,7 @@ pfm_handle_work(void)
pfm_clear_task_notify();
- regs = ia64_task_regs(current);
+ regs = task_pt_regs(current);
/*
* extract reason for being here and clear
@@ -5793,7 +5794,7 @@ pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_c
* on every CPU, so we can rely on the pid to identify the idle task.
*/
if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
- regs = ia64_task_regs(task);
+ regs = task_pt_regs(task);
ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
return;
}
@@ -5876,7 +5877,7 @@ pfm_save_regs(struct task_struct *task)
flags = pfm_protect_ctx_ctxsw(ctx);
if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
- struct pt_regs *regs = ia64_task_regs(task);
+ struct pt_regs *regs = task_pt_regs(task);
pfm_clear_psr_up();
@@ -6076,7 +6077,7 @@ pfm_load_regs (struct task_struct *task)
BUG_ON(psr & IA64_PSR_I);
if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
- struct pt_regs *regs = ia64_task_regs(task);
+ struct pt_regs *regs = task_pt_regs(task);
BUG_ON(ctx->ctx_smpl_hdr);
@@ -6445,7 +6446,7 @@ pfm_alt_save_pmu_state(void *data)
{
struct pt_regs *regs;
- regs = ia64_task_regs(current);
+ regs = task_pt_regs(current);
DPRINT(("called\n"));
@@ -6471,7 +6472,7 @@ pfm_alt_restore_pmu_state(void *data)
{
struct pt_regs *regs;
- regs = ia64_task_regs(current);
+ regs = task_pt_regs(current);
DPRINT(("called\n"));
@@ -6753,7 +6754,7 @@ dump_pmu_state(const char *from)
local_irq_save(flags);
this_cpu = smp_processor_id();
- regs = ia64_task_regs(current);
+ regs = task_pt_regs(current);
info = PFM_CPUINFO_GET();
dcr = ia64_getreg(_IA64_REG_CR_DCR);
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index e9904c74d2b..309d59658e5 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -328,7 +328,7 @@ ia64_save_extra (struct task_struct *task)
#endif
#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(task)))
+ if (IS_IA32_PROCESS(task_pt_regs(task)))
ia32_save_state(task);
#endif
}
@@ -353,7 +353,7 @@ ia64_load_extra (struct task_struct *task)
#endif
#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(task)))
+ if (IS_IA32_PROCESS(task_pt_regs(task)))
ia32_load_state(task);
#endif
}
@@ -488,7 +488,7 @@ copy_thread (int nr, unsigned long clone_flags,
* If we're cloning an IA32 task then save the IA32 extra
* state from the current task to the new task
*/
- if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+ if (IS_IA32_PROCESS(task_pt_regs(current))) {
ia32_save_state(p);
if (clone_flags & CLONE_SETTLS)
retval = ia32_clone_tls(p, child_ptregs);
@@ -701,7 +701,7 @@ int
kernel_thread_helper (int (*fn)(void *), void *arg)
{
#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+ if (IS_IA32_PROCESS(task_pt_regs(current))) {
/* A kernel thread is always a 64-bit process. */
current->thread.map_base = DEFAULT_MAP_BASE;
current->thread.task_size = DEFAULT_TASK_SIZE;
@@ -722,7 +722,7 @@ flush_thread (void)
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
ia64_drop_fpu(current);
#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+ if (IS_IA32_PROCESS(task_pt_regs(current))) {
ia32_drop_partial_page_list(current);
current->thread.task_size = IA32_PAGE_OFFSET;
set_fs(USER_DS);
@@ -755,7 +755,7 @@ exit_thread (void)
if (current->thread.flags & IA64_THREAD_DBG_VALID)
pfm_release_debug_registers(current);
#endif
- if (IS_IA32_PROCESS(ia64_task_regs(current)))
+ if (IS_IA32_PROCESS(task_pt_regs(current)))
ia32_drop_partial_page_list(current);
}
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 4b19d041063..eaed14aac6a 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -254,7 +254,7 @@ get_rnat (struct task_struct *task, struct switch_stack *sw,
long num_regs, nbits;
struct pt_regs *pt;
- pt = ia64_task_regs(task);
+ pt = task_pt_regs(task);
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
@@ -314,7 +314,7 @@ put_rnat (struct task_struct *task, struct switch_stack *sw,
struct pt_regs *pt;
unsigned long cfm, *urbs_kargs;
- pt = ia64_task_regs(task);
+ pt = task_pt_regs(task);
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
@@ -407,7 +407,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
urbs_end = (long *) user_rbs_end;
laddr = (unsigned long *) addr;
- child_regs = ia64_task_regs(child);
+ child_regs = task_pt_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -467,7 +467,7 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
struct pt_regs *child_regs;
laddr = (unsigned long *) addr;
- child_regs = ia64_task_regs(child);
+ child_regs = task_pt_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -567,7 +567,7 @@ thread_matches (struct task_struct *thread, unsigned long addr)
*/
return 0;
- thread_regs = ia64_task_regs(thread);
+ thread_regs = task_pt_regs(thread);
thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
return 0;
@@ -627,7 +627,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
inline void
ia64_flush_fph (struct task_struct *task)
{
- struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
+ struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
/*
* Prevent migrating this task while
@@ -653,7 +653,7 @@ ia64_flush_fph (struct task_struct *task)
void
ia64_sync_fph (struct task_struct *task)
{
- struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
+ struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
ia64_flush_fph(task);
if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
@@ -794,7 +794,7 @@ access_uarea (struct task_struct *child, unsigned long addr,
+ offsetof(struct pt_regs, reg)))
- pt = ia64_task_regs(child);
+ pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
if ((addr & 0x7) != 0) {
@@ -1120,7 +1120,7 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
return -EIO;
- pt = ia64_task_regs(child);
+ pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
unw_init_from_blocked_task(&info, child);
if (unw_unwind_to_user(&info) < 0) {
@@ -1265,7 +1265,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
return -EIO;
- pt = ia64_task_regs(child);
+ pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
unw_init_from_blocked_task(&info, child);
if (unw_unwind_to_user(&info) < 0) {
@@ -1403,7 +1403,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
void
ptrace_disable (struct task_struct *child)
{
- struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child));
+ struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
/* make sure the single step/taken-branch trap bits are not set: */
child_psr->ss = 0;
@@ -1422,14 +1422,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
lock_kernel();
ret = -EPERM;
if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- current->ptrace |= PT_PTRACED;
- ret = 0;
+ ret = ptrace_traceme();
goto out;
}
@@ -1463,7 +1456,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
if (ret < 0)
goto out_tsk;
- pt = ia64_task_regs(child);
+ pt = task_pt_regs(child);
sw = (struct switch_stack *) (child->thread.ksp + 16);
switch (request) {
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 1461dc660b4..a87a162a308 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -29,6 +29,7 @@
* Replace some NR_CPUS by cpus_online, for hotplug cpu.
*/
+#include <linux/capability.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/module.h>
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c33305d8e5e..c0766575a3a 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -60,6 +60,7 @@
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/unistd.h>
+#include <asm/system.h>
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
# error "struct cpuinfo_ia64 too big!"
@@ -695,6 +696,7 @@ static void
get_max_cacheline_size (void)
{
unsigned long line_size, max = 1;
+ unsigned int cache_size = 0;
u64 l, levels, unique_caches;
pal_cache_config_info_t cci;
s64 status;
@@ -724,6 +726,8 @@ get_max_cacheline_size (void)
line_size = 1 << cci.pcci_line_size;
if (line_size > max)
max = line_size;
+ if (cache_size < cci.pcci_cache_size)
+ cache_size = cci.pcci_cache_size;
if (!cci.pcci_unified) {
status = ia64_pal_cache_config_info(l,
/* cache_type (instruction)= */ 1,
@@ -740,6 +744,9 @@ get_max_cacheline_size (void)
ia64_i_cache_stride_shift = cci.pcci_stride;
}
out:
+#ifdef CONFIG_SMP
+ max_cache_size = max(max_cache_size, cache_size);
+#endif
if (max > ia64_max_cacheline_size)
ia64_max_cacheline_size = max;
}
@@ -794,7 +801,7 @@ cpu_init (void)
#endif
/* Clear the stack memory reserved for pt_regs: */
- memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
+ memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
ia64_set_kr(IA64_KR_FPU_OWNER, 0);
@@ -870,6 +877,15 @@ cpu_init (void)
pm_idle = default_idle;
}
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+void sched_cacheflush(void)
+{
+ ia64_sal_cache_flush(3);
+}
+
void
check_bugs (void)
{
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 58ce07efc56..463f6bb44d0 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -655,11 +655,11 @@ set_sigdelayed(pid_t pid, int signo, int code, void __user *addr)
if (!t)
return;
- t->thread_info->sigdelayed.signo = signo;
- t->thread_info->sigdelayed.code = code;
- t->thread_info->sigdelayed.addr = addr;
- t->thread_info->sigdelayed.start_time = start_time;
- t->thread_info->sigdelayed.pid = pid;
+ task_thread_info(t)->sigdelayed.signo = signo;
+ task_thread_info(t)->sigdelayed.code = code;
+ task_thread_info(t)->sigdelayed.addr = addr;
+ task_thread_info(t)->sigdelayed.start_time = start_time;
+ task_thread_info(t)->sigdelayed.pid = pid;
wmb();
set_tsk_thread_flag(t, TIF_SIGDELAYED);
}
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index f2dbcd1db0d..c7b943f1019 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -151,7 +151,7 @@ out:
asmlinkage long
sys_pipe (void)
{
- struct pt_regs *regs = ia64_task_regs(current);
+ struct pt_regs *regs = task_pt_regs(current);
int fd[2];
int retval;
diff --git a/arch/ia64/oprofile/backtrace.c b/arch/ia64/oprofile/backtrace.c
index b7dabbfb0d6..adb01566bd5 100644
--- a/arch/ia64/oprofile/backtrace.c
+++ b/arch/ia64/oprofile/backtrace.c
@@ -32,7 +32,7 @@ typedef struct
u64 *prev_pfs_loc; /* state for WAR for old spinlock ool code */
} ia64_backtrace_t;
-#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
/*
* Returns non-zero if the PC is in the spinlock contention out-of-line code
* with non-standard calling sequence (on older compilers).
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 768c21deb2e..493fb3f38dc 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/proc_fs.h>
+#include <linux/capability.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <asm/system.h>
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 4d100f3886e..a3dcc3fab4b 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -12,10 +12,6 @@ config M32R
config SBUS
bool
-config UID16
- bool
- default n
-
config GENERIC_ISA_DMA
bool
default y
@@ -81,6 +77,12 @@ config PLAT_MAPPI2
config PLAT_MAPPI3
bool "Mappi-III(M3A-2170)"
+config PLAT_M32104UT
+ bool "M32104UT"
+ help
+ The M3T-M32104UT is an reference board based on uT-Engine
+ specification. This board has a M32104 chip.
+
endchoice
choice
@@ -93,6 +95,10 @@ config CHIP_M32700
config CHIP_M32102
bool "M32102"
+config CHIP_M32104
+ bool "M32104"
+ depends on PLAT_M32104UT
+
config CHIP_VDEC2
bool "VDEC2"
@@ -115,7 +121,7 @@ config TLB_ENTRIES
config ISA_M32R
bool
- depends on CHIP_M32102
+ depends on CHIP_M32102 || CHIP_M32104
default y
config ISA_M32R2
@@ -140,6 +146,7 @@ config BUS_CLOCK
default "50000000" if PLAT_MAPPI3
default "50000000" if PLAT_M32700UT
default "50000000" if PLAT_OPSPUT
+ default "54000000" if PLAT_M32104UT
default "33333333" if PLAT_OAKS32R
default "20000000" if PLAT_MAPPI2
@@ -157,6 +164,7 @@ config MEMORY_START
default "08000000" if PLAT_USRV
default "08000000" if PLAT_M32700UT
default "08000000" if PLAT_OPSPUT
+ default "04000000" if PLAT_M32104UT
default "01000000" if PLAT_OAKS32R
config MEMORY_SIZE
@@ -166,6 +174,7 @@ config MEMORY_SIZE
default "02000000" if PLAT_USRV
default "01000000" if PLAT_M32700UT
default "01000000" if PLAT_OPSPUT
+ default "01000000" if PLAT_M32104UT
default "00800000" if PLAT_OAKS32R
config NOHIGHMEM
@@ -174,21 +183,22 @@ config NOHIGHMEM
config ARCH_DISCONTIGMEM_ENABLE
bool "Internal RAM Support"
- depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP
+ depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104
default y
source "mm/Kconfig"
config IRAM_START
hex "Internal memory start address (hex)"
- default "00f00000"
- depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP) && DISCONTIGMEM
+ default "00f00000" if !CHIP_M32104
+ default "00700000" if CHIP_M32104
+ depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104) && DISCONTIGMEM
config IRAM_SIZE
hex "Internal memory size (hex)"
- depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP) && DISCONTIGMEM
+ depends on (CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104) && DISCONTIGMEM
default "00080000" if CHIP_M32700
- default "00010000" if CHIP_M32102 || CHIP_OPSP
+ default "00010000" if CHIP_M32102 || CHIP_OPSP || CHIP_M32104
default "00008000" if CHIP_VDEC2
#
diff --git a/arch/m32r/boot/compressed/head.S b/arch/m32r/boot/compressed/head.S
index 07cfd6ad1ae..234d8b1e0ac 100644
--- a/arch/m32r/boot/compressed/head.S
+++ b/arch/m32r/boot/compressed/head.S
@@ -143,6 +143,11 @@ startup:
ldi r0, -2
ldi r1, 0x0100 ; invalidate
stb r1, @r0
+#elif defined(CONFIG_CHIP_M32104)
+ /* Cache flush */
+ ldi r0, -2
+ ldi r1, 0x0700 ; invalidate i-cache, copy back d-cache
+ sth r1, @r0
#else
#error "put your cache flush function, please"
#endif
diff --git a/arch/m32r/boot/setup.S b/arch/m32r/boot/setup.S
index 5d256434b4a..398542507d8 100644
--- a/arch/m32r/boot/setup.S
+++ b/arch/m32r/boot/setup.S
@@ -1,11 +1,10 @@
/*
* linux/arch/m32r/boot/setup.S -- A setup code.
*
- * Copyright (C) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
- * and Hitoshi Yamamoto
+ * Copyright (C) 2001-2005 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Hayato Fujiwara
*
*/
-/* $Id$ */
#include <linux/linkage.h>
#include <asm/segment.h>
@@ -80,6 +79,20 @@ ENTRY(boot)
ldi r1, #0x101 ; cache on (with invalidation)
; ldi r1, #0x00 ; cache off
st r1, @r0
+#elif defined(CONFIG_CHIP_M32104)
+ ldi r0, #-96 ; DNCR0
+ seth r1, #0x0060 ; from 0x00600000
+ or3 r1, r1, #0x0005 ; size 2MB
+ st r1, @r0
+ seth r1, #0x0100 ; from 0x01000000
+ or3 r1, r1, #0x0003 ; size 16MB
+ st r1, @+r0
+ seth r1, #0x0200 ; from 0x02000000
+ or3 r1, r1, #0x0002 ; size 32MB
+ st r1, @+r0
+ ldi r0, #-4 ;LDIMM (r0, M32R_MCCR)
+ ldi r1, #0x703 ; cache on (with invalidation)
+ st r1, @r0
#else
#error unknown chip configuration
#endif
@@ -115,10 +128,15 @@ mmu_on:
st r1, @(MATM_offset,r0) ; Set MATM (T bit ON)
ld r0, @(MATM_offset,r0) ; Check
#else
+#if defined(CONFIG_CHIP_M32700)
seth r0,#high(M32R_MCDCAR)
or3 r0,r0,#low(M32R_MCDCAR)
ld24 r1,#0x8080
st r1,@r0
+#elif defined(CONFIG_CHIP_M32104)
+ LDIMM (r2, eit_vector) ; set EVB(cr5)
+ mvtc r2, cr5
+#endif
#endif /* CONFIG_MMU */
jmp r13
nop
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile
index 6c6b6c37663..5a2fa886906 100644
--- a/arch/m32r/kernel/Makefile
+++ b/arch/m32r/kernel/Makefile
@@ -16,5 +16,6 @@ obj-$(CONFIG_PLAT_M32700UT) += setup_m32700ut.o io_m32700ut.o
obj-$(CONFIG_PLAT_OPSPUT) += setup_opsput.o io_opsput.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_PLAT_OAKS32R) += setup_oaks32r.o io_oaks32r.o
+obj-$(CONFIG_PLAT_M32104UT) += setup_m32104ut.o io_m32104ut.o
EXTRA_AFLAGS := -traditional
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 396c94218cc..3871b65f0c8 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -315,7 +315,7 @@ ENTRY(ei_handler)
mv r1, sp ; arg1(regs)
#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
|| defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
- || defined(CONFIG_CHIP_OPSP)
+ || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
; GET_ICU_STATUS;
seth r0, #shigh(M32R_ICU_ISTS_ADDR)
@@ -541,7 +541,20 @@ check_int2:
bra check_end
.fillinsn
check_end:
-#endif /* CONFIG_PLAT_OPSPUT */
+#elif defined(CONFIG_PLAT_M32104UT)
+ add3 r2, r0, #-(M32R_IRQ_INT1) ; INT1# interrupt
+ bnez r2, check_end
+ ; read ICU status register of PLD
+ seth r0, #high(PLD_ICUISTS)
+ or3 r0, r0, #low(PLD_ICUISTS)
+ lduh r0, @r0
+ slli r0, #21
+ srli r0, #27 ; ISN
+ addi r0, #(M32104UT_PLD_IRQ_BASE)
+ bra check_end
+ .fillinsn
+check_end:
+#endif /* CONFIG_PLAT_M32104UT */
bl do_IRQ
#endif /* CONFIG_SMP */
ld r14, @sp+
@@ -651,8 +664,6 @@ ENTRY(rie_handler)
/* void rie_handler(int error_code) */
SWITCH_TO_KERNEL_STACK
SAVE_ALL
- mvfc r0, bpc
- ld r1, @r0
ldi r1, #0x20 ; error_code
mv r0, sp ; pt_regs
bl do_rie_handler
diff --git a/arch/m32r/kernel/io_m32104ut.c b/arch/m32r/kernel/io_m32104ut.c
new file mode 100644
index 00000000000..d26adab9586
--- /dev/null
+++ b/arch/m32r/kernel/io_m32104ut.c
@@ -0,0 +1,298 @@
+/*
+ * linux/arch/m32r/kernel/io_m32104ut.c
+ *
+ * Typical I/O routines for M32104UT board.
+ *
+ * Copyright (c) 2001-2005 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Mamoru Sakugawa,
+ * Naoto Sugai, Hayato Fujiwara
+ */
+
+#include <linux/config.h>
+#include <asm/m32r.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+#include <linux/types.h>
+
+#define M32R_PCC_IOMAP_SIZE 0x1000
+
+#define M32R_PCC_IOSTART0 0x1000
+#define M32R_PCC_IOEND0 (M32R_PCC_IOSTART0 + M32R_PCC_IOMAP_SIZE - 1)
+
+extern void pcc_ioread_byte(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_ioread_word(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_iowrite_byte(int, unsigned long, void *, size_t, size_t, int);
+extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
+#endif /* CONFIG_PCMCIA && CONFIG_M32R_CFC */
+
+#define PORT2ADDR(port) _port2addr(port)
+
+static inline void *_port2addr(unsigned long port)
+{
+ return (void *)(port | NONCACHE_OFFSET);
+}
+
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+static inline void *__port2addr_ata(unsigned long port)
+{
+ static int dummy_reg;
+
+ switch (port) {
+ case 0x1f0: return (void *)(0x0c002000 | NONCACHE_OFFSET);
+ case 0x1f1: return (void *)(0x0c012800 | NONCACHE_OFFSET);
+ case 0x1f2: return (void *)(0x0c012002 | NONCACHE_OFFSET);
+ case 0x1f3: return (void *)(0x0c012802 | NONCACHE_OFFSET);
+ case 0x1f4: return (void *)(0x0c012004 | NONCACHE_OFFSET);
+ case 0x1f5: return (void *)(0x0c012804 | NONCACHE_OFFSET);
+ case 0x1f6: return (void *)(0x0c012006 | NONCACHE_OFFSET);
+ case 0x1f7: return (void *)(0x0c012806 | NONCACHE_OFFSET);
+ case 0x3f6: return (void *)(0x0c01200e | NONCACHE_OFFSET);
+ default: return (void *)&dummy_reg;
+ }
+}
+#endif
+
+/*
+ * M32104T-LAN is located in the extended bus space
+ * from 0x01000000 to 0x01ffffff on physical address.
+ * The base address of LAN controller(LAN91C111) is 0x300.
+ */
+#define LAN_IOSTART (0x300 | NONCACHE_OFFSET)
+#define LAN_IOEND (0x320 | NONCACHE_OFFSET)
+static inline void *_port2addr_ne(unsigned long port)
+{
+ return (void *)(port + NONCACHE_OFFSET + 0x01000000);
+}
+
+static inline void delay(void)
+{
+ __asm__ __volatile__ ("push r0; \n\t pop r0;" : : :"memory");
+}
+
+/*
+ * NIC I/O function
+ */
+
+#define PORT2ADDR_NE(port) _port2addr_ne(port)
+
+static inline unsigned char _ne_inb(void *portp)
+{
+ return *(volatile unsigned char *)portp;
+}
+
+static inline unsigned short _ne_inw(void *portp)
+{
+ return (unsigned short)le16_to_cpu(*(volatile unsigned short *)portp);
+}
+
+static inline void _ne_insb(void *portp, void *addr, unsigned long count)
+{
+ unsigned char *buf = (unsigned char *)addr;
+
+ while (count--)
+ *buf++ = _ne_inb(portp);
+}
+
+static inline void _ne_outb(unsigned char b, void *portp)
+{
+ *(volatile unsigned char *)portp = b;
+}
+
+static inline void _ne_outw(unsigned short w, void *portp)
+{
+ *(volatile unsigned short *)portp = cpu_to_le16(w);
+}
+
+unsigned char _inb(unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ return _ne_inb(PORT2ADDR_NE(port));
+
+ return *(volatile unsigned char *)PORT2ADDR(port);
+}
+
+unsigned short _inw(unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ return _ne_inw(PORT2ADDR_NE(port));
+
+ return *(volatile unsigned short *)PORT2ADDR(port);
+}
+
+unsigned long _inl(unsigned long port)
+{
+ return *(volatile unsigned long *)PORT2ADDR(port);
+}
+
+unsigned char _inb_p(unsigned long port)
+{
+ unsigned char v = _inb(port);
+ delay();
+ return (v);
+}
+
+unsigned short _inw_p(unsigned long port)
+{
+ unsigned short v = _inw(port);
+ delay();
+ return (v);
+}
+
+unsigned long _inl_p(unsigned long port)
+{
+ unsigned long v = _inl(port);
+ delay();
+ return (v);
+}
+
+void _outb(unsigned char b, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outb(b, PORT2ADDR_NE(port));
+ else
+ *(volatile unsigned char *)PORT2ADDR(port) = b;
+}
+
+void _outw(unsigned short w, unsigned long port)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_outw(w, PORT2ADDR_NE(port));
+ else
+ *(volatile unsigned short *)PORT2ADDR(port) = w;
+}
+
+void _outl(unsigned long l, unsigned long port)
+{
+ *(volatile unsigned long *)PORT2ADDR(port) = l;
+}
+
+void _outb_p(unsigned char b, unsigned long port)
+{
+ _outb(b, port);
+ delay();
+}
+
+void _outw_p(unsigned short w, unsigned long port)
+{
+ _outw(w, port);
+ delay();
+}
+
+void _outl_p(unsigned long l, unsigned long port)
+{
+ _outl(l, port);
+ delay();
+}
+
+void _insb(unsigned int port, void *addr, unsigned long count)
+{
+ if (port >= LAN_IOSTART && port < LAN_IOEND)
+ _ne_insb(PORT2ADDR_NE(port), addr, count);
+ else {
+ unsigned char *buf = addr;
+ unsigned char *portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned char *)portp;
+ }
+}
+
+void _insw(unsigned int port, void *addr, unsigned long count)
+{
+ unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ /*
+ * This portion is only used by smc91111.c to read data
+ * from the DATA_REG. Do not swap the data.
+ */
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+#endif
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ portp = __port2addr_ata(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned short *)portp;
+ }
+}
+
+void _insl(unsigned int port, void *addr, unsigned long count)
+{
+ unsigned long *buf = addr;
+ unsigned long *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *buf++ = *(volatile unsigned long *)portp;
+}
+
+void _outsb(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned char *buf = addr;
+ unsigned char *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ _ne_outb(*buf++, portp);
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned char *)portp = *buf++;
+ }
+}
+
+void _outsw(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned short *buf = addr;
+ unsigned short *portp;
+
+ if (port >= LAN_IOSTART && port < LAN_IOEND) {
+ /*
+ * This portion is only used by smc91111.c to write data
+ * into the DATA_REG. Do not swap the data.
+ */
+ portp = PORT2ADDR_NE(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
+ } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) {
+ portp = __port2addr_ata(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+#endif
+#if defined(CONFIG_PCMCIA) && defined(CONFIG_M32R_CFC)
+ } else if (port >= M32R_PCC_IOSTART0 && port <= M32R_PCC_IOEND0) {
+ pcc_iowrite_word(9, port, (void *)addr, sizeof(unsigned short),
+ count, 1);
+#endif
+ } else {
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned short *)portp = *buf++;
+ }
+}
+
+void _outsl(unsigned int port, const void *addr, unsigned long count)
+{
+ const unsigned long *buf = addr;
+ unsigned char *portp;
+
+ portp = PORT2ADDR(port);
+ while (count--)
+ *(volatile unsigned long *)portp = *buf++;
+}
diff --git a/arch/m32r/kernel/io_m32700ut.c b/arch/m32r/kernel/io_m32700ut.c
index eda9f963c1e..939932d6cc0 100644
--- a/arch/m32r/kernel/io_m32700ut.c
+++ b/arch/m32r/kernel/io_m32700ut.c
@@ -36,7 +36,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
static inline void *_port2addr(unsigned long port)
{
- return (void *)(port + NONCACHE_OFFSET);
+ return (void *)(port | NONCACHE_OFFSET);
}
#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
@@ -45,15 +45,15 @@ static inline void *__port2addr_ata(unsigned long port)
static int dummy_reg;
switch (port) {
- case 0x1f0: return (void *)0xac002000;
- case 0x1f1: return (void *)0xac012800;
- case 0x1f2: return (void *)0xac012002;
- case 0x1f3: return (void *)0xac012802;
- case 0x1f4: return (void *)0xac012004;
- case 0x1f5: return (void *)0xac012804;
- case 0x1f6: return (void *)0xac012006;
- case 0x1f7: return (void *)0xac012806;
- case 0x3f6: return (void *)0xac01200e;
+ case 0x1f0: return (void *)(0x0c002000 | NONCACHE_OFFSET);
+ case 0x1f1: return (void *)(0x0c012800 | NONCACHE_OFFSET);
+ case 0x1f2: return (void *)(0x0c012002 | NONCACHE_OFFSET);
+ case 0x1f3: return (void *)(0x0c012802 | NONCACHE_OFFSET);
+ case 0x1f4: return (void *)(0x0c012004 | NONCACHE_OFFSET);
+ case 0x1f5: return (void *)(0x0c012804 | NONCACHE_OFFSET);
+ case 0x1f6: return (void *)(0x0c012006 | NONCACHE_OFFSET);
+ case 0x1f7: return (void *)(0x0c012806 | NONCACHE_OFFSET);
+ case 0x3f6: return (void *)(0x0c01200e | NONCACHE_OFFSET);
default: return (void *)&dummy_reg;
}
}
@@ -64,8 +64,8 @@ static inline void *__port2addr_ata(unsigned long port)
* from 0x10000000 to 0x13ffffff on physical address.
* The base address of LAN controller(LAN91C111) is 0x300.
*/
-#define LAN_IOSTART 0xa0000300
-#define LAN_IOEND 0xa0000320
+#define LAN_IOSTART (0x300 | NONCACHE_OFFSET)
+#define LAN_IOEND (0x320 | NONCACHE_OFFSET)
static inline void *_port2addr_ne(unsigned long port)
{
return (void *)(port + 0x10000000);
diff --git a/arch/m32r/kernel/io_mappi.c b/arch/m32r/kernel/io_mappi.c
index 3c3da042fbd..a662b537c5b 100644
--- a/arch/m32r/kernel/io_mappi.c
+++ b/arch/m32r/kernel/io_mappi.c
@@ -31,7 +31,7 @@ extern void pcc_iowrite(int, unsigned long, void *, size_t, size_t, int);
static inline void *_port2addr(unsigned long port)
{
- return (void *)(port | (NONCACHE_OFFSET));
+ return (void *)(port | NONCACHE_OFFSET);
}
static inline void *_port2addr_ne(unsigned long port)
diff --git a/arch/m32r/kernel/io_mappi2.c b/arch/m32r/kernel/io_mappi2.c
index df3c729cb3e..e72d725606a 100644
--- a/arch/m32r/kernel/io_mappi2.c
+++ b/arch/m32r/kernel/io_mappi2.c
@@ -33,7 +33,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
static inline void *_port2addr(unsigned long port)
{
- return (void *)(port | (NONCACHE_OFFSET));
+ return (void *)(port | NONCACHE_OFFSET);
}
#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
@@ -42,22 +42,22 @@ static inline void *__port2addr_ata(unsigned long port)
static int dummy_reg;
switch (port) {
- case 0x1f0: return (void *)0xac002000;
- case 0x1f1: return (void *)0xac012800;
- case 0x1f2: return (void *)0xac012002;
- case 0x1f3: return (void *)0xac012802;
- case 0x1f4: return (void *)0xac012004;
- case 0x1f5: return (void *)0xac012804;
- case 0x1f6: return (void *)0xac012006;
- case 0x1f7: return (void *)0xac012806;
- case 0x3f6: return (void *)0xac01200e;
+ case 0x1f0: return (void *)(0x0c002000 | NONCACHE_OFFSET);
+ case 0x1f1: return (void *)(0x0c012800 | NONCACHE_OFFSET);
+ case 0x1f2: return (void *)(0x0c012002 | NONCACHE_OFFSET);
+ case 0x1f3: return (void *)(0x0c012802 | NONCACHE_OFFSET);
+ case 0x1f4: return (void *)(0x0c012004 | NONCACHE_OFFSET);
+ case 0x1f5: return (void *)(0x0c012804 | NONCACHE_OFFSET);
+ case 0x1f6: return (void *)(0x0c012006 | NONCACHE_OFFSET);
+ case 0x1f7: return (void *)(0x0c012806 | NONCACHE_OFFSET);
+ case 0x3f6: return (void *)(0x0c01200e | NONCACHE_OFFSET);
default: return (void *)&dummy_reg;
}
}
#endif
-#define LAN_IOSTART 0xa0000300
-#define LAN_IOEND 0xa0000320
+#define LAN_IOSTART (0x300 | NONCACHE_OFFSET)
+#define LAN_IOEND (0x320 | NONCACHE_OFFSET)
#ifdef CONFIG_CHIP_OPSP
static inline void *_port2addr_ne(unsigned long port)
{
diff --git a/arch/m32r/kernel/io_mappi3.c b/arch/m32r/kernel/io_mappi3.c
index f80321a5876..ed6da930bc6 100644
--- a/arch/m32r/kernel/io_mappi3.c
+++ b/arch/m32r/kernel/io_mappi3.c
@@ -33,7 +33,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
static inline void *_port2addr(unsigned long port)
{
- return (void *)(port + NONCACHE_OFFSET);
+ return (void *)(port | NONCACHE_OFFSET);
}
#if defined(CONFIG_IDE)
@@ -43,33 +43,42 @@ static inline void *__port2addr_ata(unsigned long port)
switch (port) {
/* IDE0 CF */
- case 0x1f0: return (void *)0xb4002000;
- case 0x1f1: return (void *)0xb4012800;
- case 0x1f2: return (void *)0xb4012002;
- case 0x1f3: return (void *)0xb4012802;
- case 0x1f4: return (void *)0xb4012004;
- case 0x1f5: return (void *)0xb4012804;
- case 0x1f6: return (void *)0xb4012006;
- case 0x1f7: return (void *)0xb4012806;
- case 0x3f6: return (void *)0xb401200e;
+ case 0x1f0: return (void *)(0x14002000 | NONCACHE_OFFSET);
+ case 0x1f1: return (void *)(0x14012800 | NONCACHE_OFFSET);
+ case 0x1f2: return (void *)(0x14012002 | NONCACHE_OFFSET);
+ case 0x1f3: return (void *)(0x14012802 | NONCACHE_OFFSET);
+ case 0x1f4: return (void *)(0x14012004 | NONCACHE_OFFSET);
+ case 0x1f5: return (void *)(0x14012804 | NONCACHE_OFFSET);
+ case 0x1f6: return (void *)(0x14012006 | NONCACHE_OFFSET);
+ case 0x1f7: return (void *)(0x14012806 | NONCACHE_OFFSET);
+ case 0x3f6: return (void *)(0x1401200e | NONCACHE_OFFSET);
/* IDE1 IDE */
- case 0x170: return (void *)0xb4810000; /* Data 16bit */
- case 0x171: return (void *)0xb4810002; /* Features / Error */
- case 0x172: return (void *)0xb4810004; /* Sector count */
- case 0x173: return (void *)0xb4810006; /* Sector number */
- case 0x174: return (void *)0xb4810008; /* Cylinder low */
- case 0x175: return (void *)0xb481000a; /* Cylinder high */
- case 0x176: return (void *)0xb481000c; /* Device head */
- case 0x177: return (void *)0xb481000e; /* Command */
- case 0x376: return (void *)0xb480800c; /* Device control / Alt status */
+ case 0x170: /* Data 16bit */
+ return (void *)(0x14810000 | NONCACHE_OFFSET);
+ case 0x171: /* Features / Error */
+ return (void *)(0x14810002 | NONCACHE_OFFSET);
+ case 0x172: /* Sector count */
+ return (void *)(0x14810004 | NONCACHE_OFFSET);
+ case 0x173: /* Sector number */
+ return (void *)(0x14810006 | NONCACHE_OFFSET);
+ case 0x174: /* Cylinder low */
+ return (void *)(0x14810008 | NONCACHE_OFFSET);
+ case 0x175: /* Cylinder high */
+ return (void *)(0x1481000a | NONCACHE_OFFSET);
+ case 0x176: /* Device head */
+ return (void *)(0x1481000c | NONCACHE_OFFSET);
+ case 0x177: /* Command */
+ return (void *)(0x1481000e | NONCACHE_OFFSET);
+ case 0x376: /* Device control / Alt status */
+ return (void *)(0x1480800c | NONCACHE_OFFSET);
default: return (void *)&dummy_reg;
}
}
#endif
-#define LAN_IOSTART 0xa0000300
-#define LAN_IOEND 0xa0000320
+#define LAN_IOSTART (0x300 | NONCACHE_OFFSET)
+#define LAN_IOEND (0x320 | NONCACHE_OFFSET)
static inline void *_port2addr_ne(unsigned long port)
{
return (void *)(port + 0x10000000);
diff --git a/arch/m32r/kernel/io_oaks32r.c b/arch/m32r/kernel/io_oaks32r.c
index 8be323931e4..910dd131c22 100644
--- a/arch/m32r/kernel/io_oaks32r.c
+++ b/arch/m32r/kernel/io_oaks32r.c
@@ -16,7 +16,7 @@
static inline void *_port2addr(unsigned long port)
{
- return (void *)(port | (NONCACHE_OFFSET));
+ return (void *)(port | NONCACHE_OFFSET);
}
static inline void *_port2addr_ne(unsigned long port)
diff --git a/arch/m32r/kernel/io_opsput.c b/arch/m32r/kernel/io_opsput.c
index 4793bd18e11..bec69297db3 100644
--- a/arch/m32r/kernel/io_opsput.c
+++ b/arch/m32r/kernel/io_opsput.c
@@ -36,7 +36,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
static inline void *_port2addr(unsigned long port)
{
- return (void *)(port | (NONCACHE_OFFSET));
+ return (void *)(port | NONCACHE_OFFSET);
}
/*
@@ -44,8 +44,8 @@ static inline void *_port2addr(unsigned long port)
* from 0x10000000 to 0x13ffffff on physical address.
* The base address of LAN controller(LAN91C111) is 0x300.
*/
-#define LAN_IOSTART 0xa0000300
-#define LAN_IOEND 0xa0000320
+#define LAN_IOSTART (0x300 | NONCACHE_OFFSET)
+#define LAN_IOEND (0x320 | NONCACHE_OFFSET)
static inline void *_port2addr_ne(unsigned long port)
{
return (void *)(port + 0x10000000);
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index e5ec134d81d..dbc8a392105 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -18,8 +18,6 @@
#include <asm/irq.h>
#include <asm/tlbflush.h>
-extern void dump_thread(struct pt_regs *, struct user *);
-
#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
extern struct drive_info_struct drive_info;
EXPORT_SYMBOL(drive_info);
@@ -27,7 +25,6 @@ EXPORT_SYMBOL(drive_info);
/* platform dependent support */
EXPORT_SYMBOL(boot_cpu_data);
-EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index cc4b571e5db..5dfc7ea45cf 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -50,6 +50,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
* Powermanagement idle function, if any..
*/
void (*pm_idle)(void) = NULL;
+EXPORT_SYMBOL(pm_idle);
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
void disable_hlt(void)
{
@@ -238,13 +242,10 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
int copy_thread(int nr, unsigned long clone_flags, unsigned long spu,
unsigned long unused, struct task_struct *tsk, struct pt_regs *regs)
{
- struct pt_regs *childregs;
- unsigned long sp = (unsigned long)tsk->thread_info + THREAD_SIZE;
+ struct pt_regs *childregs = task_pt_regs(tsk);
extern void ret_from_fork(void);
/* Copy registers */
- sp -= sizeof (struct pt_regs);
- childregs = (struct pt_regs *)sp;
*childregs = *regs;
childregs->spu = spu;
@@ -257,14 +258,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long spu,
}
/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
-{
- /* M32R_FIXME */
-}
-
-/*
* Capture the user space registers if the task is not running (in user space)
*/
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 078d2a0e71c..340a3bf59b8 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -35,23 +35,6 @@
#include <asm/mmu_context.h>
/*
- * Get the address of the live pt_regs for the specified task.
- * These are saved onto the top kernel stack when the process
- * is not running.
- *
- * Note: if a user thread is execve'd from kernel space, the
- * kernel stack will not be empty on entry to the kernel, so
- * ptracing these tasks will fail.
- */
-static inline struct pt_regs *
-get_user_regs(struct task_struct *task)
-{
- return (struct pt_regs *)
- ((unsigned long)task->thread_info + THREAD_SIZE
- - sizeof(struct pt_regs));
-}
-
-/*
* This routine will get a word off of the process kernel stack.
*/
static inline unsigned long int
@@ -59,7 +42,7 @@ get_stack_long(struct task_struct *task, int offset)
{
unsigned long *stack;
- stack = (unsigned long *)get_user_regs(task);
+ stack = (unsigned long *)task_pt_regs(task);
return stack[offset];
}
@@ -72,7 +55,7 @@ put_stack_long(struct task_struct *task, int offset, unsigned long data)
{
unsigned long *stack;
- stack = (unsigned long *)get_user_regs(task);
+ stack = (unsigned long *)task_pt_regs(task);
stack[offset] = data;
return 0;
@@ -208,7 +191,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
*/
static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
{
- struct pt_regs *regs = get_user_regs(tsk);
+ struct pt_regs *regs = task_pt_regs(tsk);
return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
}
@@ -223,7 +206,7 @@ static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
ret = -EFAULT;
if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
- struct pt_regs *regs = get_user_regs(tsk);
+ struct pt_regs *regs = task_pt_regs(tsk);
*regs = newregs;
ret = 0;
}
@@ -762,28 +745,16 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
int ret;
lock_kernel();
- ret = -EPERM;
if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
+ ret = ptrace_traceme();
goto out;
}
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
goto out;
+ }
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index f722ec8eb02..c2e4dccf011 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -320,6 +320,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#elif defined(CONFIG_CHIP_MP)
seq_printf(m, "cpu family\t: M32R-MP\n"
"cache size\t: I-xxKB/D-xxKB\n");
+#elif defined(CONFIG_CHIP_M32104)
+ seq_printf(m,"cpu family\t: M32104\n"
+ "cache size\t: I-8KB/D-8KB\n");
#else
seq_printf(m, "cpu family\t: Unknown\n");
#endif
@@ -340,6 +343,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "Machine\t\t: uServer\n");
#elif defined(CONFIG_PLAT_OAKS32R)
seq_printf(m, "Machine\t\t: OAKS32R\n");
+#elif defined(CONFIG_PLAT_M32104UT)
+ seq_printf(m, "Machine\t\t: M3T-M32104UT uT Engine board\n");
#else
seq_printf(m, "Machine\t\t: Unknown\n");
#endif
@@ -389,7 +394,7 @@ unsigned long cpu_initialized __initdata = 0;
*/
#if defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
|| defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
- || defined(CONFIG_CHIP_OPSP)
+ || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
void __init cpu_init (void)
{
int cpu_id = smp_processor_id();
diff --git a/arch/m32r/kernel/setup_m32104ut.c b/arch/m32r/kernel/setup_m32104ut.c
new file mode 100644
index 00000000000..6328e1357a8
--- /dev/null
+++ b/arch/m32r/kernel/setup_m32104ut.c
@@ -0,0 +1,156 @@
+/*
+ * linux/arch/m32r/kernel/setup_m32104ut.c
+ *
+ * Setup routines for M32104UT Board
+ *
+ * Copyright (c) 2002-2005 Hiroyuki Kondo, Hirokazu Takata,
+ * Hitoshi Yamamoto, Mamoru Sakugawa,
+ * Naoto Sugai, Hayato Fujiwara
+ */
+
+#include <linux/config.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <asm/system.h>
+#include <asm/m32r.h>
+#include <asm/io.h>
+
+#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
+
+icu_data_t icu_data[NR_IRQS];
+
+static void disable_m32104ut_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_ILEVEL7;
+ outl(data, port);
+}
+
+static void enable_m32104ut_irq(unsigned int irq)
+{
+ unsigned long port, data;
+
+ port = irq2port(irq);
+ data = icu_data[irq].icucr|M32R_ICUCR_IEN|M32R_ICUCR_ILEVEL6;
+ outl(data, port);
+}
+
+static void mask_and_ack_m32104ut(unsigned int irq)
+{
+ disable_m32104ut_irq(irq);
+}
+
+static void end_m32104ut_irq(unsigned int irq)
+{
+ enable_m32104ut_irq(irq);
+}
+
+static unsigned int startup_m32104ut_irq(unsigned int irq)
+{
+ enable_m32104ut_irq(irq);
+ return (0);
+}
+
+static void shutdown_m32104ut_irq(unsigned int irq)
+{
+ unsigned long port;
+
+ port = irq2port(irq);
+ outl(M32R_ICUCR_ILEVEL7, port);
+}
+
+static struct hw_interrupt_type m32104ut_irq_type =
+{
+ .typename = "M32104UT-IRQ",
+ .startup = startup_m32104ut_irq,
+ .shutdown = shutdown_m32104ut_irq,
+ .enable = enable_m32104ut_irq,
+ .disable = disable_m32104ut_irq,
+ .ack = mask_and_ack_m32104ut,
+ .end = end_m32104ut_irq
+};
+
+void __init init_IRQ(void)
+{
+ static int once = 0;
+
+ if (once)
+ return;
+ else
+ once++;
+
+#if defined(CONFIG_SMC91X)
+ /* INT#0: LAN controller on M32104UT-LAN (SMC91C111)*/
+ irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_INT0].handler = &m32104ut_irq_type;
+ irq_desc[M32R_IRQ_INT0].action = 0;
+ irq_desc[M32R_IRQ_INT0].depth = 1;
+ icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD11; /* "H" level sense */
+ disable_m32104ut_irq(M32R_IRQ_INT0);
+#endif /* CONFIG_SMC91X */
+
+ /* MFT2 : system timer */
+ irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_MFT2].handler = &m32104ut_irq_type;
+ irq_desc[M32R_IRQ_MFT2].action = 0;
+ irq_desc[M32R_IRQ_MFT2].depth = 1;
+ icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
+ disable_m32104ut_irq(M32R_IRQ_MFT2);
+
+#ifdef CONFIG_SERIAL_M32R_SIO
+ /* SIO0_R : uart receive data */
+ irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_R].handler = &m32104ut_irq_type;
+ irq_desc[M32R_IRQ_SIO0_R].action = 0;
+ irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+ icu_data[M32R_IRQ_SIO0_R].icucr = M32R_ICUCR_IEN;
+ disable_m32104ut_irq(M32R_IRQ_SIO0_R);
+
+ /* SIO0_S : uart send data */
+ irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
+ irq_desc[M32R_IRQ_SIO0_S].handler = &m32104ut_irq_type;
+ irq_desc[M32R_IRQ_SIO0_S].action = 0;
+ irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+ icu_data[M32R_IRQ_SIO0_S].icucr = M32R_ICUCR_IEN;
+ disable_m32104ut_irq(M32R_IRQ_SIO0_S);
+#endif /* CONFIG_SERIAL_M32R_SIO */
+}
+
+#if defined(CONFIG_SMC91X)
+
+#define LAN_IOSTART 0x300
+#define LAN_IOEND 0x320
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .start = (LAN_IOSTART),
+ .end = (LAN_IOEND),
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = M32R_IRQ_INT0,
+ .end = M32R_IRQ_INT0,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+#endif
+
+static int __init platform_init(void)
+{
+#if defined(CONFIG_SMC91X)
+ platform_device_register(&smc91x_device);
+#endif
+ return 0;
+}
+arch_initcall(platform_init);
diff --git a/arch/m32r/kernel/setup_m32700ut.c b/arch/m32r/kernel/setup_m32700ut.c
index cb76916b014..fad1fc99bb2 100644
--- a/arch/m32r/kernel/setup_m32700ut.c
+++ b/arch/m32r/kernel/setup_m32700ut.c
@@ -26,15 +26,7 @@
*/
#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
-#ifndef CONFIG_SMP
-typedef struct {
- unsigned long icucr; /* ICU Control Register */
-} icu_data_t;
-static icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
-#else
icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
-#endif /* CONFIG_SMP */
-
static void disable_m32700ut_irq(unsigned int irq)
{
diff --git a/arch/m32r/kernel/setup_mappi.c b/arch/m32r/kernel/setup_mappi.c
index 501d798cf05..00f253209cb 100644
--- a/arch/m32r/kernel/setup_mappi.c
+++ b/arch/m32r/kernel/setup_mappi.c
@@ -19,12 +19,6 @@
#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
-#ifndef CONFIG_SMP
-typedef struct {
- unsigned long icucr; /* ICU Control Register */
-} icu_data_t;
-#endif /* CONFIG_SMP */
-
icu_data_t icu_data[NR_IRQS];
static void disable_mappi_irq(unsigned int irq)
diff --git a/arch/m32r/kernel/setup_mappi2.c b/arch/m32r/kernel/setup_mappi2.c
index 7f2db5bfd62..eebc9d8b4e7 100644
--- a/arch/m32r/kernel/setup_mappi2.c
+++ b/arch/m32r/kernel/setup_mappi2.c
@@ -19,12 +19,6 @@
#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
-#ifndef CONFIG_SMP
-typedef struct {
- unsigned long icucr; /* ICU Control Register */
-} icu_data_t;
-#endif /* CONFIG_SMP */
-
icu_data_t icu_data[NR_IRQS];
static void disable_mappi2_irq(unsigned int irq)
diff --git a/arch/m32r/kernel/setup_mappi3.c b/arch/m32r/kernel/setup_mappi3.c
index f6ecdf7f555..d2ff021e2d3 100644
--- a/arch/m32r/kernel/setup_mappi3.c
+++ b/arch/m32r/kernel/setup_mappi3.c
@@ -19,12 +19,6 @@
#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
-#ifndef CONFIG_SMP
-typedef struct {
- unsigned long icucr; /* ICU Control Register */
-} icu_data_t;
-#endif /* CONFIG_SMP */
-
icu_data_t icu_data[NR_IRQS];
static void disable_mappi3_irq(unsigned int irq)
diff --git a/arch/m32r/kernel/setup_oaks32r.c b/arch/m32r/kernel/setup_oaks32r.c
index 45add5b76f1..0e9e63538c0 100644
--- a/arch/m32r/kernel/setup_oaks32r.c
+++ b/arch/m32r/kernel/setup_oaks32r.c
@@ -18,12 +18,6 @@
#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
-#ifndef CONFIG_SMP
-typedef struct {
- unsigned long icucr; /* ICU Control Register */
-} icu_data_t;
-#endif /* CONFIG_SMP */
-
icu_data_t icu_data[NR_IRQS];
static void disable_oaks32r_irq(unsigned int irq)
diff --git a/arch/m32r/kernel/setup_opsput.c b/arch/m32r/kernel/setup_opsput.c
index 1fbb140854e..548e8fc7949 100644
--- a/arch/m32r/kernel/setup_opsput.c
+++ b/arch/m32r/kernel/setup_opsput.c
@@ -27,15 +27,7 @@
*/
#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
-#ifndef CONFIG_SMP
-typedef struct {
- unsigned long icucr; /* ICU Control Register */
-} icu_data_t;
-static icu_data_t icu_data[OPSPUT_NUM_CPU_IRQ];
-#else
icu_data_t icu_data[OPSPUT_NUM_CPU_IRQ];
-#endif /* CONFIG_SMP */
-
static void disable_opsput_irq(unsigned int irq)
{
diff --git a/arch/m32r/kernel/setup_usrv.c b/arch/m32r/kernel/setup_usrv.c
index 634741bf9d3..64be659a23e 100644
--- a/arch/m32r/kernel/setup_usrv.c
+++ b/arch/m32r/kernel/setup_usrv.c
@@ -18,12 +18,6 @@
#define irq2port(x) (M32R_ICU_CR1_PORTL + ((x - 1) * sizeof(unsigned long)))
-#if !defined(CONFIG_SMP)
-typedef struct {
- unsigned long icucr; /* ICU Control Register */
-} icu_data_t;
-#endif /* CONFIG_SMP */
-
icu_data_t icu_data[M32700UT_NUM_CPU_IRQ];
static void disable_mappi_irq(unsigned int irq)
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index b90c54169fa..d7ec16e7fb2 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -286,7 +286,7 @@ static void __init do_boot_cpu(int phys_id)
/* So we see what's up */
printk("Booting processor %d/%d\n", phys_id, cpu_id);
stack_start.spi = (void *)idle->thread.sp;
- idle->thread_info->cpu = cpu_id;
+ task_thread_info(idle)->cpu = cpu_id;
/*
* Send Startup IPI
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 2ebce2063fe..b8e68b54230 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -57,7 +57,7 @@ static unsigned long do_gettimeoffset(void)
#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
|| defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
- || defined(CONFIG_CHIP_OPSP)
+ || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
#ifndef CONFIG_SMP
unsigned long count;
@@ -268,7 +268,7 @@ void __init time_init(void)
#if defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_XNUX2) \
|| defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_M32700) \
- || defined(CONFIG_CHIP_OPSP)
+ || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
/* M32102 MFT setup */
setup_irq(M32R_IRQ_MFT2, &irq0);
diff --git a/arch/arm/configs/epxa10db_defconfig b/arch/m32r/m32104ut/defconfig.m32104ut
index 9fb8b58c495..454de336803 100644
--- a/arch/arm/configs/epxa10db_defconfig
+++ b/arch/m32r/m32104ut/defconfig.m32104ut
@@ -1,14 +1,13 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.12-rc1-bk2
-# Sun Mar 27 22:46:51 2005
+# Linux kernel version: 2.6.14
+# Wed Nov 9 16:04:51 2005
#
-CONFIG_ARM=y
-CONFIG_MMU=y
-CONFIG_UID16=y
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_GENERIC_IOMAP=y
+CONFIG_M32R=y
+# CONFIG_UID16 is not set
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
#
# Code maturity level options
@@ -16,147 +15,174 @@ CONFIG_GENERIC_IOMAP=y
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
#
# General setup
#
CONFIG_LOCALVERSION=""
-CONFIG_SWAP=y
-CONFIG_SYSVIPC=y
+CONFIG_LOCALVERSION_AUTO=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_KOBJECT_UEVENT=y
+CONFIG_HOTPLUG=y
+# CONFIG_KOBJECT_UEVENT is not set
# CONFIG_IKCONFIG is not set
-# CONFIG_EMBEDDED is not set
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
CONFIG_BASE_FULL=y
-CONFIG_FUTEX=y
-CONFIG_EPOLL=y
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SHMEM=y
+# CONFIG_FUTEX is not set
+# CONFIG_EPOLL is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
-# CONFIG_TINY_SHMEM is not set
+CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
#
# Loadable module support
#
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
-# CONFIG_MODVERSIONS is not set
-# CONFIG_MODULE_SRCVERSION_ALL is not set
-# CONFIG_KMOD is not set
-
-#
-# System Type
-#
-# CONFIG_ARCH_CLPS7500 is not set
-# CONFIG_ARCH_CLPS711X is not set
-# CONFIG_ARCH_CO285 is not set
-# CONFIG_ARCH_EBSA110 is not set
-CONFIG_ARCH_CAMELOT=y
-# CONFIG_ARCH_FOOTBRIDGE is not set
-# CONFIG_ARCH_INTEGRATOR is not set
-# CONFIG_ARCH_IOP3XX is not set
-# CONFIG_ARCH_IXP4XX is not set
-# CONFIG_ARCH_IXP2000 is not set
-# CONFIG_ARCH_L7200 is not set
-# CONFIG_ARCH_PXA is not set
-# CONFIG_ARCH_RPC is not set
-# CONFIG_ARCH_SA1100 is not set
-# CONFIG_ARCH_S3C2410 is not set
-# CONFIG_ARCH_SHARK is not set
-# CONFIG_ARCH_LH7A40X is not set
-# CONFIG_ARCH_OMAP is not set
-# CONFIG_ARCH_VERSATILE is not set
-# CONFIG_ARCH_IMX is not set
-# CONFIG_ARCH_H720X is not set
-
-#
-# Epxa10db
-#
-
-#
-# PLD hotswap support
-#
-CONFIG_PLD=y
-# CONFIG_PLD_HOTSWAP is not set
+# CONFIG_MODULES is not set
+
+#
+# Processor type and features
+#
+# CONFIG_PLAT_MAPPI is not set
+# CONFIG_PLAT_USRV is not set
+# CONFIG_PLAT_M32700UT is not set
+# CONFIG_PLAT_OPSPUT is not set
+# CONFIG_PLAT_OAKS32R is not set
+# CONFIG_PLAT_MAPPI2 is not set
+# CONFIG_PLAT_MAPPI3 is not set
+CONFIG_PLAT_M32104UT=y
+# CONFIG_CHIP_M32700 is not set
+# CONFIG_CHIP_M32102 is not set
+CONFIG_CHIP_M32104=y
+# CONFIG_CHIP_VDEC2 is not set
+# CONFIG_CHIP_OPSP is not set
+CONFIG_ISA_M32R=y
+CONFIG_BUS_CLOCK=54000000
+CONFIG_TIMER_DIVIDE=128
+# CONFIG_CPU_LITTLE_ENDIAN is not set
+CONFIG_MEMORY_START=04000000
+CONFIG_MEMORY_SIZE=01000000
+CONFIG_NOHIGHMEM=y
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+# CONFIG_PREEMPT is not set
+# CONFIG_SMP is not set
#
-# Processor Type
+# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
#
-CONFIG_CPU_32=y
-CONFIG_CPU_ARM922T=y
-CONFIG_CPU_32v4=y
-CONFIG_CPU_ABRT_EV4T=y
-CONFIG_CPU_CACHE_V4WT=y
-CONFIG_CPU_CACHE_VIVT=y
-CONFIG_CPU_COPY_V4WB=y
-CONFIG_CPU_TLB_V4WBI=y
+# CONFIG_ISA is not set
#
-# Processor Features
+# PCCARD (PCMCIA/CardBus) support
#
-# CONFIG_ARM_THUMB is not set
-# CONFIG_CPU_ICACHE_DISABLE is not set
-# CONFIG_CPU_DCACHE_DISABLE is not set
-# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+CONFIG_PCCARD=y
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_PCMCIA=y
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_PCMCIA_IOCTL=y
#
-# Bus support
+# PC-card bridges
#
#
-# PCCARD (PCMCIA/CardBus) support
+# PCI Hotplug Support
#
-# CONFIG_PCCARD is not set
#
-# Kernel Features
+# Executable file formats
#
-# CONFIG_PREEMPT is not set
-CONFIG_ALIGNMENT_TRAP=y
+CONFIG_BINFMT_FLAT=y
+# CONFIG_BINFMT_ZFLAT is not set
+# CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_BINFMT_MISC is not set
#
-# Boot options
+# Networking
#
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="mem=32M console=ttyUA0,115200 initrd=0x00200000,8M root=/dev/ram0 rw"
-# CONFIG_XIP_KERNEL is not set
+CONFIG_NET=y
#
-# Floating point emulation
+# Networking options
#
+# CONFIG_PACKET is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
#
-# At least one emulation must be selected
+# DCCP Configuration (EXPERIMENTAL)
#
-CONFIG_FPE_NWFPE=y
-# CONFIG_FPE_NWFPE_XP is not set
-# CONFIG_FPE_FASTFPE is not set
+# CONFIG_IP_DCCP is not set
#
-# Userspace binary formats
+# SCTP Configuration (EXPERIMENTAL)
#
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BINFMT_MISC is not set
-# CONFIG_ARTHUR is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_NET_CLS_ROUTE is not set
#
-# Power management options
+# Network testing
#
-# CONFIG_PM is not set
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
#
# Device Drivers
@@ -167,7 +193,13 @@ CONFIG_BINFMT_ELF=y
#
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
#
# Memory Technology Devices (MTD)
@@ -186,30 +218,34 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
#
# Block devices
#
-# CONFIG_BLK_DEV_FD is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
-# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
# CONFIG_CDROM_PKTCDVD is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
# CONFIG_ATA_OVER_ETH is not set
#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
# SCSI device support
#
+# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
#
@@ -220,6 +256,7 @@ CONFIG_IOSCHED_CFQ=y
#
# Fusion MPT device support
#
+# CONFIG_FUSION is not set
#
# IEEE 1394 (FireWire) support
@@ -230,81 +267,26 @@ CONFIG_IOSCHED_CFQ=y
#
#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
+# Network device support
#
-CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
-# CONFIG_NETLINK_DEV is not set
-CONFIG_UNIX=y
-# CONFIG_NET_KEY is not set
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-# CONFIG_IP_ADVANCED_ROUTER is not set
-CONFIG_IP_PNP=y
-# CONFIG_IP_PNP_DHCP is not set
-# CONFIG_IP_PNP_BOOTP is not set
-# CONFIG_IP_PNP_RARP is not set
-# CONFIG_NET_IPIP is not set
-# CONFIG_NET_IPGRE is not set
-# CONFIG_IP_MROUTE is not set
-# CONFIG_ARPD is not set
-# CONFIG_SYN_COOKIES is not set
-# CONFIG_INET_AH is not set
-# CONFIG_INET_ESP is not set
-# CONFIG_INET_IPCOMP is not set
-# CONFIG_INET_TUNNEL is not set
-# CONFIG_IP_TCPDIAG is not set
-# CONFIG_IP_TCPDIAG_IPV6 is not set
-# CONFIG_IPV6 is not set
-# CONFIG_NETFILTER is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-# CONFIG_IP_SCTP is not set
-# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
-# CONFIG_VLAN_8021Q is not set
-# CONFIG_DECNET is not set
-# CONFIG_LLC2 is not set
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
-# CONFIG_X25 is not set
-# CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
-# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
-
-#
-# Network testing
-#
-# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETPOLL is not set
-# CONFIG_NET_POLL_CONTROLLER is not set
-# CONFIG_HAMRADIO is not set
-# CONFIG_IRDA is not set
-# CONFIG_BT is not set
CONFIG_NETDEVICES=y
-# CONFIG_DUMMY is not set
+CONFIG_DUMMY=y
# CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
# Ethernet (10 or 100Mbit)
#
-# CONFIG_NET_ETHERNET is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_SMC91X=y
+# CONFIG_NE2000 is not set
#
# Ethernet (1000 Mbit)
@@ -324,20 +306,20 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_RADIO is not set
#
+# PCMCIA network device support
+#
+# CONFIG_NET_PCMCIA is not set
+
+#
# Wan interfaces
#
# CONFIG_WAN is not set
-CONFIG_PPP=y
-CONFIG_PPP_MULTILINK=y
-# CONFIG_PPP_FILTER is not set
-CONFIG_PPP_ASYNC=y
-CONFIG_PPP_SYNC_TTY=y
-CONFIG_PPP_DEFLATE=y
-# CONFIG_PPP_BSDCOMP is not set
-# CONFIG_PPPOE is not set
+# CONFIG_PPP is not set
# CONFIG_SLIP is not set
# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
#
# ISDN subsystem
@@ -345,46 +327,25 @@ CONFIG_PPP_DEFLATE=y
# CONFIG_ISDN is not set
#
-# Input device support
-#
-CONFIG_INPUT=y
-
-#
-# Userland interfaces
+# Telephony Support
#
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
+# CONFIG_PHONE is not set
#
-# Input Device Drivers
+# Input device support
#
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
+# CONFIG_INPUT is not set
#
# Hardware I/O ports
#
-CONFIG_SERIO=y
-CONFIG_SERIO_SERPORT=y
-# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO is not set
# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
#
# Character devices
#
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
+# CONFIG_VT is not set
# CONFIG_SERIAL_NONSTANDARD is not set
#
@@ -395,10 +356,10 @@ CONFIG_HW_CONSOLE=y
#
# Non-8250 serial port support
#
-CONFIG_SERIAL_UART00=y
-CONFIG_SERIAL_UART00_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_M32R_SIO=y
+CONFIG_SERIAL_M32R_SIO_CONSOLE=y
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
@@ -411,8 +372,13 @@ CONFIG_LEGACY_PTY_COUNT=256
#
# Watchdog Cards
#
-# CONFIG_WATCHDOG is not set
-# CONFIG_NVRAM is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+CONFIG_SOFT_WATCHDOG=y
# CONFIG_RTC is not set
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
@@ -420,13 +386,16 @@ CONFIG_LEGACY_PTY_COUNT=256
#
# Ftape, the floppy tape device driver
#
-# CONFIG_DRM is not set
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
# CONFIG_RAW_DRIVER is not set
#
# TPM devices
#
-# CONFIG_TCG_TPM is not set
#
# I2C support
@@ -434,10 +403,25 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_I2C is not set
#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+
+#
# Misc devices
#
#
+# Multimedia Capabilities Port drivers
+#
+
+#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
@@ -453,12 +437,6 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_FB is not set
#
-# Console display driver support
-#
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-
-#
# Sound
#
# CONFIG_SOUND is not set
@@ -466,9 +444,8 @@ CONFIG_DUMMY_CONSOLE=y
#
# USB support
#
-CONFIG_USB_ARCH_HAS_HCD=y
+# CONFIG_USB_ARCH_HAS_HCD is not set
# CONFIG_USB_ARCH_HAS_OHCI is not set
-# CONFIG_USB is not set
#
# USB Gadget Support
@@ -481,25 +458,38 @@ CONFIG_USB_ARCH_HAS_HCD=y
# CONFIG_MMC is not set
#
+# InfiniBand support
+#
+
+#
+# SN Devices
+#
+
+#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
-# CONFIG_EXT3_FS is not set
-# CONFIG_JBD is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
-
-#
-# XFS support
-#
+CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
+# CONFIG_INOTIFY is not set
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=y
-CONFIG_AUTOFS4_FS=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
#
# CD-ROM/DVD Filesystems
@@ -510,8 +500,11 @@ CONFIG_AUTOFS4_FS=y
#
# DOS/FAT/NT Filesystems
#
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=932
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
# CONFIG_NTFS_FS is not set
#
@@ -519,11 +512,10 @@ CONFIG_AUTOFS4_FS=y
#
CONFIG_PROC_FS=y
CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
-# CONFIG_DEVPTS_FS_XATTR is not set
-# CONFIG_TMPFS is not set
+CONFIG_TMPFS=y
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
#
# Miscellaneous filesystems
@@ -535,7 +527,7 @@ CONFIG_RAMFS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
-# CONFIG_CRAMFS is not set
+CONFIG_CRAMFS=y
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
@@ -545,14 +537,25 @@ CONFIG_RAMFS=y
#
# Network File Systems
#
-# CONFIG_NFS_FS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
# CONFIG_NFSD is not set
-CONFIG_SMB_FS=y
-# CONFIG_SMB_NLS_DEFAULT is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
#
# Partition Types
@@ -565,7 +568,7 @@ CONFIG_MSDOS_PARTITION=y
#
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
+CONFIG_NLS_CODEPAGE_437=y
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
# CONFIG_NLS_CODEPAGE_850 is not set
@@ -582,7 +585,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_869 is not set
# CONFIG_NLS_CODEPAGE_936 is not set
# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
+CONFIG_NLS_CODEPAGE_932=y
# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_874 is not set
# CONFIG_NLS_ISO8859_8 is not set
@@ -602,7 +605,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_ISO8859_15 is not set
# CONFIG_NLS_KOI8_R is not set
# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
+CONFIG_NLS_UTF8=y
#
# Profiling support
@@ -613,11 +616,21 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
-# CONFIG_DEBUG_KERNEL is not set
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_DEBUG_BUGVERBOSE=y
-CONFIG_FRAME_POINTER=y
-# CONFIG_DEBUG_USER is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_FS is not set
+# CONFIG_FRAME_POINTER is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
#
# Security options
@@ -637,8 +650,8 @@ CONFIG_FRAME_POINTER=y
#
# Library routines
#
-CONFIG_CRC_CCITT=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
CONFIG_CRC32=y
-# CONFIG_LIBCRC32C is not set
+CONFIG_LIBCRC32C=y
CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=y
diff --git a/arch/m32r/mm/cache.c b/arch/m32r/mm/cache.c
index 31b0789c199..9f54dd93701 100644
--- a/arch/m32r/mm/cache.c
+++ b/arch/m32r/mm/cache.c
@@ -1,7 +1,7 @@
/*
* linux/arch/m32r/mm/cache.c
*
- * Copyright (C) 2002 Hirokazu Takata
+ * Copyright (C) 2002-2005 Hirokazu Takata, Hayato Fujiwara
*/
#include <linux/config.h>
@@ -9,7 +9,8 @@
#undef MCCR
-#if defined(CONFIG_CHIP_XNUX2) || defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_OPSP)
+#if defined(CONFIG_CHIP_XNUX2) || defined(CONFIG_CHIP_M32700) \
+ || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_OPSP)
/* Cache Control Register */
#define MCCR ((volatile unsigned long*)0xfffffffc)
#define MCCR_CC (1UL << 7) /* Cache mode modify bit */
@@ -26,7 +27,17 @@
#define MCCR ((volatile unsigned char*)0xfffffffe)
#define MCCR_IIV (1UL << 0) /* I-cache invalidate */
#define MCCR_ICACHE_INV MCCR_IIV
-#endif /* CONFIG_CHIP_XNUX2 || CONFIG_CHIP_M32700 */
+#elif defined(CONFIG_CHIP_M32104)
+#define MCCR ((volatile unsigned short*)0xfffffffe)
+#define MCCR_IIV (1UL << 8) /* I-cache invalidate */
+#define MCCR_DIV (1UL << 9) /* D-cache invalidate */
+#define MCCR_DCB (1UL << 10) /* D-cache copy back */
+#define MCCR_ICM (1UL << 0) /* I-cache mode [0:off,1:on] */
+#define MCCR_DCM (1UL << 1) /* D-cache mode [0:off,1:on] */
+#define MCCR_ICACHE_INV MCCR_IIV
+#define MCCR_DCACHE_CB MCCR_DCB
+#define MCCR_DCACHE_CBINV (MCCR_DIV|MCCR_DCB)
+#endif
#ifndef MCCR
#error Unknown cache type.
@@ -37,29 +48,42 @@
void _flush_cache_all(void)
{
#if defined(CONFIG_CHIP_M32102)
+ unsigned char mccr;
*MCCR = MCCR_ICACHE_INV;
+#elif defined(CONFIG_CHIP_M32104)
+ unsigned short mccr;
+
+ /* Copyback and invalidate D-cache */
+ /* Invalidate I-cache */
+ *MCCR |= (MCCR_ICACHE_INV | MCCR_DCACHE_CBINV);
#else
unsigned long mccr;
/* Copyback and invalidate D-cache */
/* Invalidate I-cache */
*MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CBINV;
- while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */
#endif
+ while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */
}
/* Copy back D-cache and invalidate I-cache all */
void _flush_cache_copyback_all(void)
{
#if defined(CONFIG_CHIP_M32102)
+ unsigned char mccr;
*MCCR = MCCR_ICACHE_INV;
+#elif defined(CONFIG_CHIP_M32104)
+ unsigned short mccr;
+
+ /* Copyback and invalidate D-cache */
+ /* Invalidate I-cache */
+ *MCCR |= (MCCR_ICACHE_INV | MCCR_DCACHE_CB);
#else
unsigned long mccr;
/* Copyback D-cache */
/* Invalidate I-cache */
*MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CB;
- while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */
-
#endif
+ while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */
}
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 1dd5d18b220..96b91982805 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -10,10 +10,6 @@ config MMU
bool
default y
-config UID16
- bool
- default y
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c
index d9edf2d1a49..b0aa61bf870 100644
--- a/arch/m68k/amiga/amiints.c
+++ b/arch/m68k/amiga/amiints.c
@@ -126,9 +126,9 @@ void __init amiga_init_IRQ(void)
gayle.inten = GAYLE_IRQ_IDE;
/* turn off all interrupts and enable the master interrupt bit */
- custom.intena = 0x7fff;
- custom.intreq = 0x7fff;
- custom.intena = IF_SETCLR | IF_INTEN;
+ amiga_custom.intena = 0x7fff;
+ amiga_custom.intreq = 0x7fff;
+ amiga_custom.intena = IF_SETCLR | IF_INTEN;
cia_init_IRQ(&ciaa_base);
cia_init_IRQ(&ciab_base);
@@ -245,7 +245,7 @@ int amiga_request_irq(unsigned int irq,
/* enable the interrupt */
if (irq < IRQ_AMIGA_PORTS && !ami_ablecount[irq])
- custom.intena = IF_SETCLR | amiga_intena_vals[irq];
+ amiga_custom.intena = IF_SETCLR | amiga_intena_vals[irq];
return error;
}
@@ -274,7 +274,7 @@ void amiga_free_irq(unsigned int irq, void *dev_id)
amiga_delete_irq(&ami_irq_list[irq], dev_id);
/* if server list empty, disable the interrupt */
if (!ami_irq_list[irq] && irq < IRQ_AMIGA_PORTS)
- custom.intena = amiga_intena_vals[irq];
+ amiga_custom.intena = amiga_intena_vals[irq];
} else {
if (ami_irq_list[irq]->dev_id != dev_id)
printk("%s: removing probably wrong IRQ %d from %s\n",
@@ -283,7 +283,7 @@ void amiga_free_irq(unsigned int irq, void *dev_id)
ami_irq_list[irq]->flags = 0;
ami_irq_list[irq]->dev_id = NULL;
ami_irq_list[irq]->devname = NULL;
- custom.intena = amiga_intena_vals[irq];
+ amiga_custom.intena = amiga_intena_vals[irq];
}
}
@@ -327,7 +327,7 @@ void amiga_enable_irq(unsigned int irq)
}
/* enable the interrupt */
- custom.intena = IF_SETCLR | amiga_intena_vals[irq];
+ amiga_custom.intena = IF_SETCLR | amiga_intena_vals[irq];
}
void amiga_disable_irq(unsigned int irq)
@@ -358,7 +358,7 @@ void amiga_disable_irq(unsigned int irq)
}
/* disable the interrupt */
- custom.intena = amiga_intena_vals[irq];
+ amiga_custom.intena = amiga_intena_vals[irq];
}
inline void amiga_do_irq(int irq, struct pt_regs *fp)
@@ -373,7 +373,7 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
kstat_cpu(0).irqs[SYS_IRQS + irq]++;
- custom.intreq = amiga_intena_vals[irq];
+ amiga_custom.intreq = amiga_intena_vals[irq];
for (node = ami_irq_list[irq]; node; node = node->next)
node->handler(irq, node->dev_id, fp);
@@ -385,23 +385,23 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
static irqreturn_t ami_int1(int irq, void *dev_id, struct pt_regs *fp)
{
- unsigned short ints = custom.intreqr & custom.intenar;
+ unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if serial transmit buffer empty, interrupt */
if (ints & IF_TBE) {
- custom.intreq = IF_TBE;
+ amiga_custom.intreq = IF_TBE;
amiga_do_irq(IRQ_AMIGA_TBE, fp);
}
/* if floppy disk transfer complete, interrupt */
if (ints & IF_DSKBLK) {
- custom.intreq = IF_DSKBLK;
+ amiga_custom.intreq = IF_DSKBLK;
amiga_do_irq(IRQ_AMIGA_DSKBLK, fp);
}
/* if software interrupt set, interrupt */
if (ints & IF_SOFT) {
- custom.intreq = IF_SOFT;
+ amiga_custom.intreq = IF_SOFT;
amiga_do_irq(IRQ_AMIGA_SOFT, fp);
}
return IRQ_HANDLED;
@@ -409,17 +409,17 @@ static irqreturn_t ami_int1(int irq, void *dev_id, struct pt_regs *fp)
static irqreturn_t ami_int3(int irq, void *dev_id, struct pt_regs *fp)
{
- unsigned short ints = custom.intreqr & custom.intenar;
+ unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if a blitter interrupt */
if (ints & IF_BLIT) {
- custom.intreq = IF_BLIT;
+ amiga_custom.intreq = IF_BLIT;
amiga_do_irq(IRQ_AMIGA_BLIT, fp);
}
/* if a copper interrupt */
if (ints & IF_COPER) {
- custom.intreq = IF_COPER;
+ amiga_custom.intreq = IF_COPER;
amiga_do_irq(IRQ_AMIGA_COPPER, fp);
}
@@ -431,29 +431,29 @@ static irqreturn_t ami_int3(int irq, void *dev_id, struct pt_regs *fp)
static irqreturn_t ami_int4(int irq, void *dev_id, struct pt_regs *fp)
{
- unsigned short ints = custom.intreqr & custom.intenar;
+ unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if audio 0 interrupt */
if (ints & IF_AUD0) {
- custom.intreq = IF_AUD0;
+ amiga_custom.intreq = IF_AUD0;
amiga_do_irq(IRQ_AMIGA_AUD0, fp);
}
/* if audio 1 interrupt */
if (ints & IF_AUD1) {
- custom.intreq = IF_AUD1;
+ amiga_custom.intreq = IF_AUD1;
amiga_do_irq(IRQ_AMIGA_AUD1, fp);
}
/* if audio 2 interrupt */
if (ints & IF_AUD2) {
- custom.intreq = IF_AUD2;
+ amiga_custom.intreq = IF_AUD2;
amiga_do_irq(IRQ_AMIGA_AUD2, fp);
}
/* if audio 3 interrupt */
if (ints & IF_AUD3) {
- custom.intreq = IF_AUD3;
+ amiga_custom.intreq = IF_AUD3;
amiga_do_irq(IRQ_AMIGA_AUD3, fp);
}
return IRQ_HANDLED;
@@ -461,7 +461,7 @@ static irqreturn_t ami_int4(int irq, void *dev_id, struct pt_regs *fp)
static irqreturn_t ami_int5(int irq, void *dev_id, struct pt_regs *fp)
{
- unsigned short ints = custom.intreqr & custom.intenar;
+ unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if serial receive buffer full interrupt */
if (ints & IF_RBF) {
@@ -471,7 +471,7 @@ static irqreturn_t ami_int5(int irq, void *dev_id, struct pt_regs *fp)
/* if a disk sync interrupt */
if (ints & IF_DSKSYN) {
- custom.intreq = IF_DSKSYN;
+ amiga_custom.intreq = IF_DSKSYN;
amiga_do_irq(IRQ_AMIGA_DSKSYN, fp);
}
return IRQ_HANDLED;
diff --git a/arch/m68k/amiga/amisound.c b/arch/m68k/amiga/amisound.c
index bd5d134e9f1..ae94db5d93b 100644
--- a/arch/m68k/amiga/amisound.c
+++ b/arch/m68k/amiga/amisound.c
@@ -24,6 +24,8 @@ static const signed char sine_data[] = {
};
#define DATA_SIZE (sizeof(sine_data)/sizeof(sine_data[0]))
+#define custom amiga_custom
+
/*
* The minimum period for audio may be modified by the frame buffer
* device since it depends on htotal (for OCS/ECS/AGA)
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
index 7d55682615e..9476eb9440f 100644
--- a/arch/m68k/amiga/cia.c
+++ b/arch/m68k/amiga/cia.c
@@ -60,7 +60,7 @@ unsigned char cia_set_irq(struct ciabase *base, unsigned char mask)
else
base->icr_data &= ~mask;
if (base->icr_data & base->icr_mask)
- custom.intreq = IF_SETCLR | base->int_mask;
+ amiga_custom.intreq = IF_SETCLR | base->int_mask;
return old & base->icr_mask;
}
@@ -89,7 +89,7 @@ unsigned char cia_able_irq(struct ciabase *base, unsigned char mask)
}
}
if (base->icr_data & base->icr_mask)
- custom.intreq = IF_SETCLR | base->int_mask;
+ amiga_custom.intreq = IF_SETCLR | base->int_mask;
return old;
}
@@ -133,7 +133,7 @@ static irqreturn_t cia_handler(int irq, void *dev_id, struct pt_regs *fp)
mach_irq = base->cia_irq;
irq = SYS_IRQS + mach_irq;
ints = cia_set_irq(base, CIA_ICR_ALL);
- custom.intreq = base->int_mask;
+ amiga_custom.intreq = base->int_mask;
for (i = 0; i < CIA_IRQS; i++, irq++, mach_irq++) {
if (ints & 1) {
kstat_cpu(0).irqs[irq]++;
@@ -162,7 +162,7 @@ void __init cia_init_IRQ(struct ciabase *base)
/* install CIA handler */
request_irq(base->handler_irq, cia_handler, 0, base->name, base);
- custom.intena = IF_SETCLR | base->int_mask;
+ amiga_custom.intena = IF_SETCLR | base->int_mask;
}
int cia_get_irq_list(struct ciabase *base, struct seq_file *p)
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 4775e18a78f..12e3706fe02 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -105,9 +105,6 @@ static int a2000_hwclk (int, struct rtc_time *);
static int amiga_set_clock_mmss (unsigned long);
static unsigned int amiga_get_ss (void);
extern void amiga_mksound( unsigned int count, unsigned int ticks );
-#ifdef CONFIG_AMIGA_FLOPPY
-extern void amiga_floppy_setup(char *, int *);
-#endif
static void amiga_reset (void);
extern void amiga_init_sound(void);
static void amiga_savekmsg_init(void);
@@ -290,7 +287,7 @@ static void __init amiga_identify(void)
case CS_OCS:
case CS_ECS:
case CS_AGA:
- switch (custom.deniseid & 0xf) {
+ switch (amiga_custom.deniseid & 0xf) {
case 0x0c:
AMIGAHW_SET(DENISE_HR);
break;
@@ -303,7 +300,7 @@ static void __init amiga_identify(void)
AMIGAHW_SET(DENISE);
break;
}
- switch ((custom.vposr>>8) & 0x7f) {
+ switch ((amiga_custom.vposr>>8) & 0x7f) {
case 0x00:
AMIGAHW_SET(AGNUS_PAL);
break;
@@ -427,13 +424,7 @@ void __init config_amiga(void)
mach_set_clock_mmss = amiga_set_clock_mmss;
mach_get_ss = amiga_get_ss;
-#ifdef CONFIG_AMIGA_FLOPPY
- mach_floppy_setup = amiga_floppy_setup;
-#endif
mach_reset = amiga_reset;
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
mach_beep = amiga_mksound;
#endif
@@ -447,9 +438,9 @@ void __init config_amiga(void)
amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */
/* clear all DMA bits */
- custom.dmacon = DMAF_ALL;
+ amiga_custom.dmacon = DMAF_ALL;
/* ensure that the DMA master bit is set */
- custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
+ amiga_custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
/* don't use Z2 RAM as system memory on Z3 capable machines */
if (AMIGAHW_PRESENT(ZORRO3)) {
@@ -830,8 +821,8 @@ static void amiga_savekmsg_init(void)
static void amiga_serial_putc(char c)
{
- custom.serdat = (unsigned char)c | 0x100;
- while (!(custom.serdatr & 0x2000))
+ amiga_custom.serdat = (unsigned char)c | 0x100;
+ while (!(amiga_custom.serdatr & 0x2000))
;
}
@@ -855,11 +846,11 @@ int amiga_serial_console_wait_key(struct console *co)
{
int ch;
- while (!(custom.intreqr & IF_RBF))
+ while (!(amiga_custom.intreqr & IF_RBF))
barrier();
- ch = custom.serdatr & 0xff;
+ ch = amiga_custom.serdatr & 0xff;
/* clear the interrupt, so that another character can be read */
- custom.intreq = IF_RBF;
+ amiga_custom.intreq = IF_RBF;
return ch;
}
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 26492947125..d401962d9b2 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -176,9 +176,6 @@ void config_apollo(void) {
mach_set_clock_mmss = dn_dummy_set_clock_mmss; /* */
mach_process_int = dn_process_int;
mach_reset = dn_dummy_reset; /* */
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
#ifdef CONFIG_HEARTBEAT
mach_heartbeat = dn_heartbeat;
#endif
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 9261d2deeaf..1012b08e552 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -52,9 +52,6 @@ int atari_rtc_year_offset;
/* local function prototypes */
static void atari_reset( void );
-#ifdef CONFIG_ATARI_FLOPPY
-extern void atari_floppy_setup(char *, int *);
-#endif
static void atari_get_model(char *model);
static int atari_get_hardware_list(char *buffer);
@@ -244,12 +241,6 @@ void __init config_atari(void)
mach_get_irq_list = show_atari_interrupts;
mach_gettimeoffset = atari_gettimeoffset;
mach_reset = atari_reset;
-#ifdef CONFIG_ATARI_FLOPPY
- mach_floppy_setup = atari_floppy_setup;
-#endif
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
mach_max_dma_address = 0xffffff;
#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
mach_beep = atari_mksound;
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c
index eb63ca6ed94..703cbc6dc9c 100644
--- a/arch/m68k/bvme6000/rtc.c
+++ b/arch/m68k/bvme6000/rtc.c
@@ -11,6 +11,7 @@
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/ioport.h>
+#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
@@ -46,6 +47,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned char msr;
unsigned long flags;
struct rtc_time wtime;
+ void __user *argp = (void __user *)arg;
switch (cmd) {
case RTC_RD_TIME: /* Read the time/date from RTC */
@@ -68,7 +70,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
} while (wtime.tm_sec != BCD2BIN(rtc->bcd_sec));
rtc->msr = msr;
local_irq_restore(flags);
- return copy_to_user((void *)arg, &wtime, sizeof wtime) ?
+ return copy_to_user(argp, &wtime, sizeof wtime) ?
-EFAULT : 0;
}
case RTC_SET_TIME: /* Set the RTC */
@@ -80,8 +82,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
- sizeof(struct rtc_time)))
+ if (copy_from_user(&rtc_tm, argp, sizeof(struct rtc_time)))
return -EFAULT;
yrs = rtc_tm.tm_year;
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
index a0b854f3f94..6d129eef370 100644
--- a/arch/m68k/hp300/config.c
+++ b/arch/m68k/hp300/config.c
@@ -261,9 +261,6 @@ void __init config_hp300(void)
#ifdef CONFIG_HEARTBEAT
mach_heartbeat = hp300_pulse;
#endif
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
mach_max_dma_address = 0xffffffff;
if (hp300_model >= HP_330 && hp300_model <= HP_433S && hp300_model != HP_350) {
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index c787c5ba951..246a8820c22 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -92,7 +92,7 @@ int main(void)
DEFINE(TRAP_TRACE, TRAP_TRACE);
/* offsets into the custom struct */
- DEFINE(CUSTOMBASE, &custom);
+ DEFINE(CUSTOMBASE, &amiga_custom);
DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index d4336d846df..70002c146ee 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -273,8 +273,10 @@
* Macintosh console support
*/
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
#define CONSOLE
#define CONSOLE_PENGUIN
+#endif
/*
* Macintosh serial debug support; outputs boot info to the printer
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 73e2f5e168d..3d7f2000b71 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -23,8 +23,6 @@ asmlinkage long long __lshrdi3 (long long, int);
asmlinkage long long __muldi3 (long long, long long);
extern char m68k_debug_device[];
-extern void dump_thread(struct pt_regs *, struct user *);
-
/* platform dependent support */
EXPORT_SYMBOL(m68k_machtype);
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 13d109328a4..3f9cb55d035 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -238,10 +238,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
{
struct pt_regs * childregs;
struct switch_stack * childstack, *stack;
- unsigned long stack_offset, *retp;
+ unsigned long *retp;
- stack_offset = THREAD_SIZE - sizeof(struct pt_regs);
- childregs = (struct pt_regs *) ((unsigned long) (p->thread_info) + stack_offset);
+ childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
*childregs = *regs;
childregs->d0 = 0;
@@ -386,7 +385,7 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack_page = (unsigned long)(p->thread_info);
+ stack_page = (unsigned long)task_stack_page(p);
fp = ((struct switch_stack *)p->thread.ksp)->a6;
do {
if (fp < stack_page+sizeof(struct thread_info) ||
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index d6ca99242e5..750d5b3c971 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -84,9 +84,6 @@ void (*mach_reset)( void );
void (*mach_halt)( void );
void (*mach_power_off)( void );
long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
-#if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY)
-void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
-#endif
#ifdef CONFIG_HEARTBEAT
void (*mach_heartbeat) (int);
EXPORT_SYMBOL(mach_heartbeat);
@@ -100,6 +97,8 @@ void (*mach_beep)(unsigned int, unsigned int);
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
int isa_type;
int isa_sex;
+EXPORT_SYMBOL(isa_type);
+EXPORT_SYMBOL(isa_sex);
#endif
extern int amiga_parse_bootinfo(const struct bi_record *);
@@ -280,6 +279,10 @@ void __init setup_arch(char **cmdline_p)
}
}
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+
switch (m68k_machtype) {
#ifdef CONFIG_AMIGA
case MACH_AMIGA:
@@ -521,16 +524,6 @@ int get_hardware_list(char *buffer)
return(len);
}
-
-#if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY)
-void __init floppy_setup(char *str, int *ints)
-{
- if (mach_floppy_setup)
- mach_floppy_setup (str, ints);
-}
-
-#endif
-
void check_bugs(void)
{
#ifndef CONFIG_M68KFPU_EMU
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 9c636a4c238..866917bfa02 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -96,7 +96,7 @@ asmlinkage int do_sigsuspend(struct pt_regs *regs)
asmlinkage int
do_rt_sigsuspend(struct pt_regs *regs)
{
- sigset_t *unewset = (sigset_t *)regs->d1;
+ sigset_t __user *unewset = (sigset_t __user *)regs->d1;
size_t sigsetsize = (size_t)regs->d2;
sigset_t saveset, newset;
@@ -122,8 +122,8 @@ do_rt_sigsuspend(struct pt_regs *regs)
}
asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction *act,
- struct old_sigaction *oact)
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+ struct old_sigaction __user *oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
@@ -154,7 +154,7 @@ sys_sigaction(int sig, const struct old_sigaction *act,
}
asmlinkage int
-sys_sigaltstack(const stack_t *uss, stack_t *uoss)
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
{
return do_sigaltstack(uss, uoss, rdusp());
}
@@ -169,10 +169,10 @@ sys_sigaltstack(const stack_t *uss, stack_t *uoss)
struct sigframe
{
- char *pretcode;
+ char __user *pretcode;
int sig;
int code;
- struct sigcontext *psc;
+ struct sigcontext __user *psc;
char retcode[8];
unsigned long extramask[_NSIG_WORDS-1];
struct sigcontext sc;
@@ -180,10 +180,10 @@ struct sigframe
struct rt_sigframe
{
- char *pretcode;
+ char __user *pretcode;
int sig;
- struct siginfo *pinfo;
- void *puc;
+ struct siginfo __user *pinfo;
+ void __user *puc;
char retcode[8];
struct siginfo info;
struct ucontext uc;
@@ -248,7 +248,7 @@ out:
#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
-static inline int rt_restore_fpu_state(struct ucontext *uc)
+static inline int rt_restore_fpu_state(struct ucontext __user *uc)
{
unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : 0;
@@ -267,7 +267,7 @@ static inline int rt_restore_fpu_state(struct ucontext *uc)
return 0;
}
- if (__get_user(*(long *)fpstate, (long *)&uc->uc_fpstate))
+ if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
goto out;
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
if (!CPU_IS_060)
@@ -306,7 +306,7 @@ static inline int rt_restore_fpu_state(struct ucontext *uc)
"m" (*fpregs.f_fpcntl));
}
if (context_size &&
- __copy_from_user(fpstate + 4, (long *)&uc->uc_fpstate + 1,
+ __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
context_size))
goto out;
__asm__ volatile (".chip 68k/68881\n\t"
@@ -319,7 +319,7 @@ out:
}
static inline int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc, void *fp,
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp,
int *pd0)
{
int fsize, formatvec;
@@ -404,10 +404,10 @@ badframe:
static inline int
rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
- struct ucontext *uc, int *pd0)
+ struct ucontext __user *uc, int *pd0)
{
int fsize, temp;
- greg_t *gregs = uc->uc_mcontext.gregs;
+ greg_t __user *gregs = uc->uc_mcontext.gregs;
unsigned long usp;
int err;
@@ -506,7 +506,7 @@ asmlinkage int do_sigreturn(unsigned long __unused)
struct switch_stack *sw = (struct switch_stack *) &__unused;
struct pt_regs *regs = (struct pt_regs *) (sw + 1);
unsigned long usp = rdusp();
- struct sigframe *frame = (struct sigframe *)(usp - 4);
+ struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
sigset_t set;
int d0;
@@ -536,7 +536,7 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused)
struct switch_stack *sw = (struct switch_stack *) &__unused;
struct pt_regs *regs = (struct pt_regs *) (sw + 1);
unsigned long usp = rdusp();
- struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
+ struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
sigset_t set;
int d0;
@@ -596,7 +596,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
}
}
-static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
+static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
{
unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : 0;
@@ -617,7 +617,7 @@ static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
".chip 68k"
: : "m" (*fpstate) : "memory");
- err |= __put_user(*(long *)fpstate, (long *)&uc->uc_fpstate);
+ err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
fpregset_t fpregs;
if (!CPU_IS_060)
@@ -642,7 +642,7 @@ static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
sizeof(fpregs));
}
if (context_size)
- err |= copy_to_user((long *)&uc->uc_fpstate + 1, fpstate + 4,
+ err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
context_size);
return err;
}
@@ -662,10 +662,10 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
save_fpu_state(sc, regs);
}
-static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
+static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
{
struct switch_stack *sw = (struct switch_stack *)regs - 1;
- greg_t *gregs = uc->uc_mcontext.gregs;
+ greg_t __user *gregs = uc->uc_mcontext.gregs;
int err = 0;
err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
@@ -753,7 +753,7 @@ static inline void push_cache (unsigned long vaddr)
}
}
-static inline void *
+static inline void __user *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
{
unsigned long usp;
@@ -766,13 +766,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
if (!on_sig_stack(usp))
usp = current->sas_ss_sp + current->sas_ss_size;
}
- return (void *)((usp - frame_size) & -8UL);
+ return (void __user *)((usp - frame_size) & -8UL);
}
static void setup_frame (int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs *regs)
{
- struct sigframe *frame;
+ struct sigframe __user *frame;
int fsize = frame_extra_sizes[regs->format];
struct sigcontext context;
int err = 0;
@@ -813,7 +813,7 @@ static void setup_frame (int sig, struct k_sigaction *ka,
err |= __put_user(frame->retcode, &frame->pretcode);
/* moveq #,d0; trap #0 */
err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
- (long *)(frame->retcode));
+ (long __user *)(frame->retcode));
if (err)
goto give_sigsegv;
@@ -849,7 +849,7 @@ give_sigsegv:
static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
- struct rt_sigframe *frame;
+ struct rt_sigframe __user *frame;
int fsize = frame_extra_sizes[regs->format];
int err = 0;
@@ -880,8 +880,8 @@ static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user((void *)current->sas_ss_sp,
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __put_user((void __user *)current->sas_ss_sp,
&frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(rdusp()),
&frame->uc.uc_stack.ss_flags);
@@ -893,8 +893,8 @@ static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(frame->retcode, &frame->pretcode);
/* moveq #,d0; notb d0; trap #0 */
err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
- (long *)(frame->retcode + 0));
- err |= __put_user(0x4e40, (short *)(frame->retcode + 4));
+ (long __user *)(frame->retcode + 0));
+ err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
if (err)
goto give_sigsegv;
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 2ed7b783f65..143c552d38f 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -6,6 +6,7 @@
* platform.
*/
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -31,7 +32,7 @@
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way unix traditionally does this, though.
*/
-asmlinkage int sys_pipe(unsigned long * fildes)
+asmlinkage int sys_pipe(unsigned long __user * fildes)
{
int fd[2];
int error;
@@ -93,7 +94,7 @@ struct mmap_arg_struct {
unsigned long offset;
};
-asmlinkage int old_mmap(struct mmap_arg_struct *arg)
+asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
{
struct mmap_arg_struct a;
int error = -EFAULT;
@@ -159,11 +160,11 @@ out:
struct sel_arg_struct {
unsigned long n;
- fd_set *inp, *outp, *exp;
- struct timeval *tvp;
+ fd_set __user *inp, *outp, *exp;
+ struct timeval __user *tvp;
};
-asmlinkage int old_select(struct sel_arg_struct *arg)
+asmlinkage int old_select(struct sel_arg_struct __user *arg)
{
struct sel_arg_struct a;
@@ -179,7 +180,7 @@ asmlinkage int old_select(struct sel_arg_struct *arg)
* This is really horribly ugly.
*/
asmlinkage int sys_ipc (uint call, int first, int second,
- int third, void *ptr, long fifth)
+ int third, void __user *ptr, long fifth)
{
int version, ret;
@@ -189,14 +190,14 @@ asmlinkage int sys_ipc (uint call, int first, int second,
if (call <= SEMCTL)
switch (call) {
case SEMOP:
- return sys_semop (first, (struct sembuf *)ptr, second);
+ return sys_semop (first, ptr, second);
case SEMGET:
return sys_semget (first, second, third);
case SEMCTL: {
union semun fourth;
if (!ptr)
return -EINVAL;
- if (get_user(fourth.__pad, (void **) ptr))
+ if (get_user(fourth.__pad, (void __user *__user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
@@ -206,31 +207,26 @@ asmlinkage int sys_ipc (uint call, int first, int second,
if (call <= MSGCTL)
switch (call) {
case MSGSND:
- return sys_msgsnd (first, (struct msgbuf *) ptr,
- second, third);
+ return sys_msgsnd (first, ptr, second, third);
case MSGRCV:
switch (version) {
case 0: {
struct ipc_kludge tmp;
if (!ptr)
return -EINVAL;
- if (copy_from_user (&tmp,
- (struct ipc_kludge *)ptr,
- sizeof (tmp)))
+ if (copy_from_user (&tmp, ptr, sizeof (tmp)))
return -EFAULT;
return sys_msgrcv (first, tmp.msgp, second,
tmp.msgtyp, third);
}
default:
- return sys_msgrcv (first,
- (struct msgbuf *) ptr,
+ return sys_msgrcv (first, ptr,
second, fifth, third);
}
case MSGGET:
return sys_msgget ((key_t) first, second);
case MSGCTL:
- return sys_msgctl (first, second,
- (struct msqid_ds *) ptr);
+ return sys_msgctl (first, second, ptr);
default:
return -ENOSYS;
}
@@ -240,20 +236,18 @@ asmlinkage int sys_ipc (uint call, int first, int second,
switch (version) {
default: {
ulong raddr;
- ret = do_shmat (first, (char *) ptr,
- second, &raddr);
+ ret = do_shmat (first, ptr, second, &raddr);
if (ret)
return ret;
- return put_user (raddr, (ulong *) third);
+ return put_user (raddr, (ulong __user *) third);
}
}
case SHMDT:
- return sys_shmdt ((char *)ptr);
+ return sys_shmdt (ptr);
case SHMGET:
return sys_shmget (first, second, third);
case SHMCTL:
- return sys_shmctl (first, second,
- (struct shmid_ds *) ptr);
+ return sys_shmctl (first, second, ptr);
default:
return -ENOSYS;
}
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index deb36e8b04a..cdf58fbb3e7 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -169,25 +169,25 @@ void __init trap_init (void)
if (CPU_IS_060 && !FPU_IS_EMU) {
/* set up IFPSP entry points */
- asmlinkage void snan_vec(void) asm ("_060_fpsp_snan");
- asmlinkage void operr_vec(void) asm ("_060_fpsp_operr");
- asmlinkage void ovfl_vec(void) asm ("_060_fpsp_ovfl");
- asmlinkage void unfl_vec(void) asm ("_060_fpsp_unfl");
- asmlinkage void dz_vec(void) asm ("_060_fpsp_dz");
- asmlinkage void inex_vec(void) asm ("_060_fpsp_inex");
- asmlinkage void fline_vec(void) asm ("_060_fpsp_fline");
- asmlinkage void unsupp_vec(void) asm ("_060_fpsp_unsupp");
- asmlinkage void effadd_vec(void) asm ("_060_fpsp_effadd");
-
- vectors[VEC_FPNAN] = snan_vec;
- vectors[VEC_FPOE] = operr_vec;
- vectors[VEC_FPOVER] = ovfl_vec;
- vectors[VEC_FPUNDER] = unfl_vec;
- vectors[VEC_FPDIVZ] = dz_vec;
- vectors[VEC_FPIR] = inex_vec;
- vectors[VEC_LINE11] = fline_vec;
- vectors[VEC_FPUNSUP] = unsupp_vec;
- vectors[VEC_UNIMPEA] = effadd_vec;
+ asmlinkage void snan_vec6(void) asm ("_060_fpsp_snan");
+ asmlinkage void operr_vec6(void) asm ("_060_fpsp_operr");
+ asmlinkage void ovfl_vec6(void) asm ("_060_fpsp_ovfl");
+ asmlinkage void unfl_vec6(void) asm ("_060_fpsp_unfl");
+ asmlinkage void dz_vec6(void) asm ("_060_fpsp_dz");
+ asmlinkage void inex_vec6(void) asm ("_060_fpsp_inex");
+ asmlinkage void fline_vec6(void) asm ("_060_fpsp_fline");
+ asmlinkage void unsupp_vec6(void) asm ("_060_fpsp_unsupp");
+ asmlinkage void effadd_vec6(void) asm ("_060_fpsp_effadd");
+
+ vectors[VEC_FPNAN] = snan_vec6;
+ vectors[VEC_FPOE] = operr_vec6;
+ vectors[VEC_FPOVER] = ovfl_vec6;
+ vectors[VEC_FPUNDER] = unfl_vec6;
+ vectors[VEC_FPDIVZ] = dz_vec6;
+ vectors[VEC_FPIR] = inex_vec6;
+ vectors[VEC_LINE11] = fline_vec6;
+ vectors[VEC_FPUNSUP] = unsupp_vec6;
+ vectors[VEC_UNIMPEA] = effadd_vec6;
}
/* if running on an amiga, make the NMI interrupt do nothing */
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index e58654f3f8d..69d1d3d30c7 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -13,6 +13,7 @@ SECTIONS
.text : {
*(.text)
SCHED_TEXT
+ LOCK_TEXT
*(.fixup)
*(.gnu.warning)
} :text = 0x4e75
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index cc37e8d3c1e..65cc39c2418 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -14,6 +14,7 @@ SECTIONS
*(.head)
*(.text)
SCHED_TEXT
+ LOCK_TEXT
*(.fixup)
*(.gnu.warning)
} :text = 0x4e75
@@ -66,7 +67,7 @@ __init_begin = .;
__initramfs_end = .;
. = ALIGN(8192);
__init_end = .;
- .init.task : { *(init_task) }
+ .data.init.task : { *(.data.init_task) }
.bss : { *(.bss) } /* BSS */
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
index 4a5c5445c61..cb13c6e3cca 100644
--- a/arch/m68k/lib/checksum.c
+++ b/arch/m68k/lib/checksum.c
@@ -134,7 +134,7 @@ EXPORT_SYMBOL(csum_partial);
*/
unsigned int
-csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst,
+csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
int len, int sum, int *csum_err)
{
/*
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index cd19cbb213e..14f8d3f4e19 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -212,9 +212,6 @@ void __init config_mac(void)
mach_reset = mac_reset;
mach_halt = mac_poweroff;
mach_power_off = mac_poweroff;
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
mach_max_dma_address = 0xffffffff;
#if 0
mach_debug_init = mac_debug_init;
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index d889ba80ccd..9179a379840 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -293,8 +293,8 @@ void __init iop_init(void)
}
for (i = 0 ; i < NUM_IOP_CHAN ; i++) {
- iop_send_queue[IOP_NUM_SCC][i] = 0;
- iop_send_queue[IOP_NUM_ISM][i] = 0;
+ iop_send_queue[IOP_NUM_SCC][i] = NULL;
+ iop_send_queue[IOP_NUM_ISM][i] = NULL;
iop_listeners[IOP_NUM_SCC][i].devname = NULL;
iop_listeners[IOP_NUM_SCC][i].handler = NULL;
iop_listeners[IOP_NUM_ISM][i].devname = NULL;
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 5b80d7cd954..bbb0c3b95e9 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -39,72 +39,163 @@
extern struct mac_booter_data mac_bi_data;
static void (*rom_reset)(void);
-#ifdef CONFIG_ADB
-/*
- * Return the current time as the number of seconds since January 1, 1904.
- */
-
-static long adb_read_time(void)
+#ifdef CONFIG_ADB_CUDA
+static long cuda_read_time(void)
{
- volatile struct adb_request req;
+ struct adb_request req;
long time;
- adb_request((struct adb_request *) &req, NULL,
- ADBREQ_RAW|ADBREQ_SYNC,
- 2, CUDA_PACKET, CUDA_GET_TIME);
+ if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
+ return 0;
+ while (!req.complete)
+ cuda_poll();
time = (req.reply[3] << 24) | (req.reply[4] << 16)
| (req.reply[5] << 8) | req.reply[6];
return time - RTC_OFFSET;
}
-/*
- * Set the current system time
- */
+static void cuda_write_time(long data)
+{
+ struct adb_request req;
+ data += RTC_OFFSET;
+ if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
+ (data >> 24) & 0xFF, (data >> 16) & 0xFF,
+ (data >> 8) & 0xFF, data & 0xFF) < 0)
+ return;
+ while (!req.complete)
+ cuda_poll();
+}
-static void adb_write_time(long data)
+static __u8 cuda_read_pram(int offset)
{
- volatile struct adb_request req;
+ struct adb_request req;
+ if (cuda_request(&req, NULL, 4, CUDA_PACKET, CUDA_GET_PRAM,
+ (offset >> 8) & 0xFF, offset & 0xFF) < 0)
+ return 0;
+ while (!req.complete)
+ cuda_poll();
+ return req.reply[3];
+}
- data += RTC_OFFSET;
+static void cuda_write_pram(int offset, __u8 data)
+{
+ struct adb_request req;
+ if (cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_SET_PRAM,
+ (offset >> 8) & 0xFF, offset & 0xFF, data) < 0)
+ return;
+ while (!req.complete)
+ cuda_poll();
+}
+#else
+#define cuda_read_time() 0
+#define cuda_write_time(n)
+#define cuda_read_pram NULL
+#define cuda_write_pram NULL
+#endif
+
+#ifdef CONFIG_ADB_PMU68K
+static long pmu_read_time(void)
+{
+ struct adb_request req;
+ long time;
+
+ if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
+ return 0;
+ while (!req.complete)
+ pmu_poll();
- adb_request((struct adb_request *) &req, NULL,
- ADBREQ_RAW|ADBREQ_SYNC,
- 6, CUDA_PACKET, CUDA_SET_TIME,
+ time = (req.reply[0] << 24) | (req.reply[1] << 16)
+ | (req.reply[2] << 8) | req.reply[3];
+ return time - RTC_OFFSET;
+}
+
+static void pmu_write_time(long data)
+{
+ struct adb_request req;
+ data += RTC_OFFSET;
+ if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
(data >> 24) & 0xFF, (data >> 16) & 0xFF,
- (data >> 8) & 0xFF, data & 0xFF);
+ (data >> 8) & 0xFF, data & 0xFF) < 0)
+ return;
+ while (!req.complete)
+ pmu_poll();
}
-/*
- * Get a byte from the NVRAM
- */
+static __u8 pmu_read_pram(int offset)
+{
+ struct adb_request req;
+ if (pmu_request(&req, NULL, 3, PMU_READ_NVRAM,
+ (offset >> 8) & 0xFF, offset & 0xFF) < 0)
+ return 0;
+ while (!req.complete)
+ pmu_poll();
+ return req.reply[3];
+}
-static __u8 adb_read_pram(int offset)
+static void pmu_write_pram(int offset, __u8 data)
{
- volatile struct adb_request req;
+ struct adb_request req;
+ if (pmu_request(&req, NULL, 4, PMU_WRITE_NVRAM,
+ (offset >> 8) & 0xFF, offset & 0xFF, data) < 0)
+ return;
+ while (!req.complete)
+ pmu_poll();
+}
+#else
+#define pmu_read_time() 0
+#define pmu_write_time(n)
+#define pmu_read_pram NULL
+#define pmu_write_pram NULL
+#endif
- adb_request((struct adb_request *) &req, NULL,
- ADBREQ_RAW|ADBREQ_SYNC,
- 4, CUDA_PACKET, CUDA_GET_PRAM,
- (offset >> 8) & 0xFF, offset & 0xFF);
- return req.reply[3];
+#ifdef CONFIG_ADB_MACIISI
+extern int maciisi_request(struct adb_request *req,
+ void (*done)(struct adb_request *), int nbytes, ...);
+
+static long maciisi_read_time(void)
+{
+ struct adb_request req;
+ long time;
+
+ if (maciisi_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME))
+ return 0;
+
+ time = (req.reply[3] << 24) | (req.reply[4] << 16)
+ | (req.reply[5] << 8) | req.reply[6];
+ return time - RTC_OFFSET;
}
-/*
- * Write a byte to the NVRAM
- */
+static void maciisi_write_time(long data)
+{
+ struct adb_request req;
+ data += RTC_OFFSET;
+ maciisi_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
+ (data >> 24) & 0xFF, (data >> 16) & 0xFF,
+ (data >> 8) & 0xFF, data & 0xFF);
+}
-static void adb_write_pram(int offset, __u8 data)
+static __u8 maciisi_read_pram(int offset)
{
- volatile struct adb_request req;
+ struct adb_request req;
+ if (maciisi_request(&req, NULL, 4, CUDA_PACKET, CUDA_GET_PRAM,
+ (offset >> 8) & 0xFF, offset & 0xFF))
+ return 0;
+ return req.reply[3];
+}
- adb_request((struct adb_request *) &req, NULL,
- ADBREQ_RAW|ADBREQ_SYNC,
- 5, CUDA_PACKET, CUDA_SET_PRAM,
- (offset >> 8) & 0xFF, offset & 0xFF,
- data);
+static void maciisi_write_pram(int offset, __u8 data)
+{
+ struct adb_request req;
+ maciisi_request(&req, NULL, 5, CUDA_PACKET, CUDA_SET_PRAM,
+ (offset >> 8) & 0xFF, offset & 0xFF, data);
}
-#endif /* CONFIG_ADB */
+#else
+#define maciisi_read_time() 0
+#define maciisi_write_time(n)
+#define maciisi_read_pram NULL
+#define maciisi_write_pram NULL
+#endif
/*
* VIA PRAM/RTC access routines
@@ -305,42 +396,55 @@ static void oss_shutdown(void)
static void cuda_restart(void)
{
- adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
- 2, CUDA_PACKET, CUDA_RESET_SYSTEM);
+ struct adb_request req;
+ if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM) < 0)
+ return;
+ while (!req.complete)
+ cuda_poll();
}
static void cuda_shutdown(void)
{
- adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
- 2, CUDA_PACKET, CUDA_POWERDOWN);
+ struct adb_request req;
+ if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN) < 0)
+ return;
+ while (!req.complete)
+ cuda_poll();
}
#endif /* CONFIG_ADB_CUDA */
-#ifdef CONFIG_ADB_PMU
+#ifdef CONFIG_ADB_PMU68K
void pmu_restart(void)
{
- adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
- 3, PMU_PACKET, PMU_SET_INTR_MASK,
- PMU_INT_ADB|PMU_INT_TICK);
-
- adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
- 2, PMU_PACKET, PMU_RESET);
+ struct adb_request req;
+ if (pmu_request(&req, NULL,
+ 2, PMU_SET_INTR_MASK, PMU_INT_ADB|PMU_INT_TICK) < 0)
+ return;
+ while (!req.complete)
+ pmu_poll();
+ if (pmu_request(&req, NULL, 1, PMU_RESET) < 0)
+ return;
+ while (!req.complete)
+ pmu_poll();
}
void pmu_shutdown(void)
{
- adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
- 3, PMU_PACKET, PMU_SET_INTR_MASK,
- PMU_INT_ADB|PMU_INT_TICK);
-
- adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
- 6, PMU_PACKET, PMU_SHUTDOWN,
- 'M', 'A', 'T', 'T');
+ struct adb_request req;
+ if (pmu_request(&req, NULL,
+ 2, PMU_SET_INTR_MASK, PMU_INT_ADB|PMU_INT_TICK) < 0)
+ return;
+ while (!req.complete)
+ pmu_poll();
+ if (pmu_request(&req, NULL, 5, PMU_SHUTDOWN, 'M', 'A', 'T', 'T') < 0)
+ return;
+ while (!req.complete)
+ pmu_poll();
}
-#endif /* CONFIG_ADB_PMU */
+#endif
/*
*-------------------------------------------------------------------
@@ -351,21 +455,22 @@ void pmu_shutdown(void)
void mac_pram_read(int offset, __u8 *buffer, int len)
{
- __u8 (*func)(int) = NULL;
+ __u8 (*func)(int);
int i;
- if (macintosh_config->adb_type == MAC_ADB_IISI ||
- macintosh_config->adb_type == MAC_ADB_PB1 ||
- macintosh_config->adb_type == MAC_ADB_PB2 ||
- macintosh_config->adb_type == MAC_ADB_CUDA) {
-#ifdef CONFIG_ADB
- func = adb_read_pram;
-#else
- return;
-#endif
- } else {
+ switch(macintosh_config->adb_type) {
+ case MAC_ADB_IISI:
+ func = maciisi_read_pram; break;
+ case MAC_ADB_PB1:
+ case MAC_ADB_PB2:
+ func = pmu_read_pram; break;
+ case MAC_ADB_CUDA:
+ func = cuda_read_pram; break;
+ default:
func = via_read_pram;
}
+ if (!func)
+ return;
for (i = 0 ; i < len ; i++) {
buffer[i] = (*func)(offset++);
}
@@ -373,21 +478,22 @@ void mac_pram_read(int offset, __u8 *buffer, int len)
void mac_pram_write(int offset, __u8 *buffer, int len)
{
- void (*func)(int, __u8) = NULL;
+ void (*func)(int, __u8);
int i;
- if (macintosh_config->adb_type == MAC_ADB_IISI ||
- macintosh_config->adb_type == MAC_ADB_PB1 ||
- macintosh_config->adb_type == MAC_ADB_PB2 ||
- macintosh_config->adb_type == MAC_ADB_CUDA) {
-#ifdef CONFIG_ADB
- func = adb_write_pram;
-#else
- return;
-#endif
- } else {
+ switch(macintosh_config->adb_type) {
+ case MAC_ADB_IISI:
+ func = maciisi_write_pram; break;
+ case MAC_ADB_PB1:
+ case MAC_ADB_PB2:
+ func = pmu_write_pram; break;
+ case MAC_ADB_CUDA:
+ func = cuda_write_pram; break;
+ default:
func = via_write_pram;
}
+ if (!func)
+ return;
for (i = 0 ; i < len ; i++) {
(*func)(offset++, buffer[i]);
}
@@ -408,7 +514,7 @@ void mac_poweroff(void)
} else if (macintosh_config->adb_type == MAC_ADB_CUDA) {
cuda_shutdown();
#endif
-#ifdef CONFIG_ADB_PMU
+#ifdef CONFIG_ADB_PMU68K
} else if (macintosh_config->adb_type == MAC_ADB_PB1
|| macintosh_config->adb_type == MAC_ADB_PB2) {
pmu_shutdown();
@@ -448,7 +554,7 @@ void mac_reset(void)
} else if (macintosh_config->adb_type == MAC_ADB_CUDA) {
cuda_restart();
#endif
-#ifdef CONFIG_ADB_PMU
+#ifdef CONFIG_ADB_PMU68K
} else if (macintosh_config->adb_type == MAC_ADB_PB1
|| macintosh_config->adb_type == MAC_ADB_PB2) {
pmu_restart();
@@ -466,12 +572,13 @@ void mac_reset(void)
/* make a 1-to-1 mapping, using the transparent tran. reg. */
unsigned long virt = (unsigned long) mac_reset;
unsigned long phys = virt_to_phys(mac_reset);
+ unsigned long addr = (phys&0xFF000000)|0x8777;
unsigned long offset = phys-virt;
local_irq_disable(); /* lets not screw this up, ok? */
__asm__ __volatile__(".chip 68030\n\t"
"pmove %0,%/tt0\n\t"
".chip 68k"
- : : "m" ((phys&0xFF000000)|0x8777));
+ : : "m" (addr));
/* Now jump to physical address so we can disable MMU */
__asm__ __volatile__(
".chip 68030\n\t"
@@ -588,20 +695,22 @@ int mac_hwclk(int op, struct rtc_time *t)
unsigned long now;
if (!op) { /* read */
- if (macintosh_config->adb_type == MAC_ADB_II) {
+ switch (macintosh_config->adb_type) {
+ case MAC_ADB_II:
+ case MAC_ADB_IOP:
now = via_read_time();
- } else
-#ifdef CONFIG_ADB
- if ((macintosh_config->adb_type == MAC_ADB_IISI) ||
- (macintosh_config->adb_type == MAC_ADB_PB1) ||
- (macintosh_config->adb_type == MAC_ADB_PB2) ||
- (macintosh_config->adb_type == MAC_ADB_CUDA)) {
- now = adb_read_time();
- } else
-#endif
- if (macintosh_config->adb_type == MAC_ADB_IOP) {
- now = via_read_time();
- } else {
+ break;
+ case MAC_ADB_IISI:
+ now = maciisi_read_time();
+ break;
+ case MAC_ADB_PB1:
+ case MAC_ADB_PB2:
+ now = pmu_read_time();
+ break;
+ case MAC_ADB_CUDA:
+ now = cuda_read_time();
+ break;
+ default:
now = 0;
}
@@ -619,15 +728,20 @@ int mac_hwclk(int op, struct rtc_time *t)
now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
t->tm_hour, t->tm_min, t->tm_sec);
- if (macintosh_config->adb_type == MAC_ADB_II) {
- via_write_time(now);
- } else if ((macintosh_config->adb_type == MAC_ADB_IISI) ||
- (macintosh_config->adb_type == MAC_ADB_PB1) ||
- (macintosh_config->adb_type == MAC_ADB_PB2) ||
- (macintosh_config->adb_type == MAC_ADB_CUDA)) {
- adb_write_time(now);
- } else if (macintosh_config->adb_type == MAC_ADB_IOP) {
+ switch (macintosh_config->adb_type) {
+ case MAC_ADB_II:
+ case MAC_ADB_IOP:
via_write_time(now);
+ break;
+ case MAC_ADB_CUDA:
+ cuda_write_time(now);
+ break;
+ case MAC_ADB_PB1:
+ case MAC_ADB_PB2:
+ pmu_write_time(now);
+ break;
+ case MAC_ADB_IISI:
+ maciisi_write_time(now);
}
#endif
}
diff --git a/arch/m68k/math-emu/multi_arith.h b/arch/m68k/math-emu/multi_arith.h
index 02251e5afd8..4ad0ca918e2 100644
--- a/arch/m68k/math-emu/multi_arith.h
+++ b/arch/m68k/math-emu/multi_arith.h
@@ -366,7 +366,7 @@ static inline void fp_submant(struct fp_ext *dest, struct fp_ext *src1,
#define fp_mul64(desth, destl, src1, src2) ({ \
asm ("mulu.l %2,%1:%0" : "=d" (destl), "=d" (desth) \
- : "g" (src1), "0" (src2)); \
+ : "dm" (src1), "0" (src2)); \
})
#define fp_div64(quot, rem, srch, srcl, div) \
asm ("divu.l %2,%1:%0" : "=d" (quot), "=d" (rem) \
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index fe2383e36b0..85ad19a0ac7 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -102,7 +102,7 @@ static inline void free_io_area(void *addr)
*/
/* Rewritten by Andreas Schwab to remove all races. */
-void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
+void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
struct vm_struct *area;
unsigned long virtaddr, retaddr;
@@ -121,7 +121,7 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
if (MACH_IS_AMIGA) {
if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
&& (cacheflag == IOMAP_NOCACHE_SER))
- return (void *)physaddr;
+ return (void __iomem *)physaddr;
}
#endif
@@ -218,21 +218,21 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
#endif
flush_tlb_all();
- return (void *)retaddr;
+ return (void __iomem *)retaddr;
}
/*
* Unmap a ioremap()ed region again
*/
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
{
#ifdef CONFIG_AMIGA
if ((!MACH_IS_AMIGA) ||
(((unsigned long)addr < 0x40000000) ||
((unsigned long)addr > 0x60000000)))
- free_io_area(addr);
+ free_io_area((__force void *)addr);
#else
- free_io_area(addr);
+ free_io_area((__force void *)addr);
#endif
}
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
index 7977eae50af..a69fe3048ed 100644
--- a/arch/m68k/mvme16x/rtc.c
+++ b/arch/m68k/mvme16x/rtc.c
@@ -11,6 +11,7 @@
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/ioport.h>
+#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
@@ -44,6 +45,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
volatile MK48T08ptr_t rtc = (MK48T08ptr_t)MVME_RTC_BASE;
unsigned long flags;
struct rtc_time wtime;
+ void __user *argp = (void __user *)arg;
switch (cmd) {
case RTC_RD_TIME: /* Read the time/date from RTC */
@@ -63,7 +65,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
wtime.tm_wday = BCD2BIN(rtc->bcd_dow)-1;
rtc->ctrl = 0;
local_irq_restore(flags);
- return copy_to_user((void *)arg, &wtime, sizeof wtime) ?
+ return copy_to_user(argp, &wtime, sizeof wtime) ?
-EFAULT : 0;
}
case RTC_SET_TIME: /* Set the RTC */
@@ -75,8 +77,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
- sizeof(struct rtc_time)))
+ if (copy_from_user(&rtc_tm, argp, sizeof(struct rtc_time)))
return -EFAULT;
yrs = rtc_tm.tm_year;
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 02b626bae4a..5e0f9b04d45 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -36,8 +36,6 @@
#include <asm/machdep.h>
#include <asm/q40_master.h>
-extern void floppy_setup(char *str, int *ints);
-
extern irqreturn_t q40_process_int (int level, struct pt_regs *regs);
extern irqreturn_t (*q40_default_handler[]) (int, void *, struct pt_regs *); /* added just for debugging */
extern void q40_init_IRQ (void);
@@ -194,9 +192,6 @@ void __init config_q40(void)
mach_heartbeat = q40_heartbeat;
#endif
mach_halt = q40_halt;
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
/* disable a few things that SMSQ might have left enabled */
q40_disable_irqs();
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 77d05bcc322..f1ca0dfbaa6 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -160,9 +160,6 @@ void __init config_sun3(void)
mach_hwclk = sun3_hwclk;
mach_halt = sun3_halt;
mach_get_hardware_list = sun3_get_hardware_list;
-#if defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
-#endif
memory_start = ((((int)&_end) + 0x2000) & ~0x1fff);
// PROM seems to want the last couple of physical pages. --m
diff --git a/arch/m68k/sun3x/config.c b/arch/m68k/sun3x/config.c
index 0ef547f5494..0920f5d3360 100644
--- a/arch/m68k/sun3x/config.c
+++ b/arch/m68k/sun3x/config.c
@@ -71,10 +71,6 @@ void __init config_sun3x(void)
mach_get_model = sun3_get_model;
mach_get_hardware_list = sun3x_get_hardware_list;
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
-
sun3_intreg = (unsigned char *)SUN3X_INTREG;
/* only the serial console is known to work anyway... */
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index b96498120fe..e2a6e864896 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -17,10 +17,6 @@ config FPU
bool
default n
-config UID16
- bool
- default y
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index b6b5c14e55f..6f880cbff1c 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -99,7 +99,6 @@ cflags-$(CONFIG_M68360) := -m68332
AFLAGS += $(cflags-y)
CFLAGS += $(cflags-y)
-CFLAGS += -O1 -g
CFLAGS += -D__linux__
CFLAGS += -DUTS_SYSNAME=\"uClinux\"
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index e93a5ad5649..eddb8d3e130 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -18,7 +18,6 @@
#include <asm/checksum.h>
#include <asm/current.h>
-extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
/* platform dependent support */
@@ -26,7 +25,6 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr);
@@ -38,8 +36,6 @@ EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(ip_fast_csum);
-EXPORT_SYMBOL(mach_enable_irq);
-EXPORT_SYMBOL(mach_disable_irq);
EXPORT_SYMBOL(kernel_thread);
/* Networking helper routines. */
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index 82e7ec88880..99bf4382479 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -198,10 +198,9 @@ int copy_thread(int nr, unsigned long clone_flags,
{
struct pt_regs * childregs;
struct switch_stack * childstack, *stack;
- unsigned long stack_offset, *retp;
+ unsigned long *retp;
- stack_offset = THREAD_SIZE - sizeof(struct pt_regs);
- childregs = (struct pt_regs *) ((unsigned long) p->thread_info + stack_offset);
+ childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
*childregs = *regs;
childregs->d0 = 0;
@@ -276,52 +275,6 @@ int dump_fpu(struct pt_regs *regs, struct user_m68kfp_struct *fpu)
}
/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
-{
- struct switch_stack *sw;
-
- /* changed the size calculations - should hopefully work better. lbt */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long) (current->mm->brk +
- (PAGE_SIZE-1))) >> PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_ssize = 0;
-
- if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
-
- dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump);
- sw = ((struct switch_stack *)regs) - 1;
- dump->regs.d1 = regs->d1;
- dump->regs.d2 = regs->d2;
- dump->regs.d3 = regs->d3;
- dump->regs.d4 = regs->d4;
- dump->regs.d5 = regs->d5;
- dump->regs.d6 = sw->d6;
- dump->regs.d7 = sw->d7;
- dump->regs.a0 = regs->a0;
- dump->regs.a1 = regs->a1;
- dump->regs.a2 = regs->a2;
- dump->regs.a3 = sw->a3;
- dump->regs.a4 = sw->a4;
- dump->regs.a5 = sw->a5;
- dump->regs.a6 = sw->a6;
- dump->regs.d0 = regs->d0;
- dump->regs.orig_d0 = regs->orig_d0;
- dump->regs.stkadj = regs->stkadj;
- dump->regs.sr = regs->sr;
- dump->regs.pc = regs->pc;
- dump->regs.fmtvec = (regs->format << 12) | regs->vector;
- /* dump floating point stuff */
- dump->u_fpvalid = dump_fpu (regs, &dump->m68kfp);
-}
-
-/*
* Generic dumping code. Used for panic and debug.
*/
void dump(struct pt_regs *fp)
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index 262ab8c72e5..382ca5797b9 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -101,7 +101,7 @@ void ptrace_disable(struct task_struct *child)
put_reg(child, PT_SR, tmp);
}
-long arch_ptrace(truct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int ret;
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index abb80fa2b94..93120b9bfff 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -65,8 +65,6 @@ void (*mach_kbd_leds) (unsigned int) = NULL;
/* machine dependent irq functions */
void (*mach_init_IRQ) (void) = NULL;
irqreturn_t (*(*mach_default_handler)[]) (int, void *, struct pt_regs *) = NULL;
-void (*mach_enable_irq) (unsigned int) = NULL;
-void (*mach_disable_irq) (unsigned int) = NULL;
int (*mach_get_irq_list) (struct seq_file *, void *) = NULL;
void (*mach_process_int) (int irq, struct pt_regs *fp) = NULL;
void (*mach_trap_init) (void);
diff --git a/arch/m68knommu/kernel/signal.c b/arch/m68knommu/kernel/signal.c
index 43a2726c0d0..e1b3aa39e27 100644
--- a/arch/m68knommu/kernel/signal.c
+++ b/arch/m68knommu/kernel/signal.c
@@ -285,6 +285,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc, void *fp,
regs->d1 = context.sc_d1;
regs->a0 = context.sc_a0;
regs->a1 = context.sc_a1;
+ ((struct switch_stack *)regs - 1)->a5 = context.sc_a5;
regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
regs->pc = context.sc_pc;
regs->orig_d0 = -1; /* disable syscall checks */
@@ -498,6 +499,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
sc->sc_d1 = regs->d1;
sc->sc_a0 = regs->a0;
sc->sc_a1 = regs->a1;
+ sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
sc->sc_sr = regs->sr;
sc->sc_pc = regs->pc;
sc->sc_formatvec = regs->format << 12 | regs->vector;
@@ -597,6 +599,9 @@ static void setup_frame (int sig, struct k_sigaction *ka,
/* Set up registers for signal handler */
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
+ ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
+ regs->format = 0x4; /*set format byte to make stack appear modulo 4
+ which it will be when doing the rte */
adjust_stack:
/* Prepare to skip over the extra stuff in the exception frame. */
@@ -664,6 +669,9 @@ static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up registers for signal handler */
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
+ ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
+ regs->format = 0x4; /*set format byte to make stack appear modulo 4
+ which it will be when doing the rte */
adjust_stack:
/* Prepare to skip over the extra stuff in the exception frame. */
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index 0eab92ca4b9..ac9de2661c0 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -129,7 +129,7 @@
*/
#if defined(CONFIG_M5208EVB)
#define RAM_START 0x40020000
-#define RAM_LENGTH 0x01e00000
+#define RAM_LENGTH 0x01fe0000
#endif
/*
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index b50be449d3f..c3e852e9953 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1471,7 +1471,7 @@ config SB1_PASS_2_1_WORKAROUNDS
config 64BIT_PHYS_ADDR
bool "Support for 64-bit physical address space"
- depends on (CPU_R4X00 || CPU_R5000 || CPU_RM7000 || CPU_RM9000 || CPU_R10000 || CPU_SB1 || CPU_MIPS32_R1 || CPU_MIPS64_R1) && 32BIT
+ depends on (CPU_R4X00 || CPU_R5000 || CPU_RM7000 || CPU_RM9000 || CPU_R10000 || CPU_SB1 || CPU_MIPS32 || CPU_MIPS64) && 32BIT
config CPU_ADVANCED
bool "Override CPU Options"
@@ -1492,14 +1492,6 @@ config CPU_HAS_LLSC
for better performance, N if you don't know. You must say Y here
for multiprocessor machines.
-config CPU_HAS_LLDSCD
- bool "lld/scd Instructions available" if CPU_ADVANCED
- default y if !CPU_ADVANCED && !CPU_R3000 && !CPU_VR41XX && !CPU_TX39XX && !CPU_MIPS32_R1
- help
- Say Y here if your CPU has the lld and scd instructions, the 64-bit
- equivalents of ll and sc. Say Y here for better performance, N if
- you don't know. You must say Y here for multiprocessor machines.
-
config CPU_HAS_WB
bool "Writeback Buffer available" if CPU_ADVANCED
default y if !CPU_ADVANCED && CPU_R3000 && MACH_DECSTATION
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index e14ba5e01a3..2a9f2ef27b2 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -93,7 +93,6 @@ endif
#
cflags-y += -I $(TOPDIR)/include/asm/gcc
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
-cflags-y += $(call cc-option, -finline-limit=100000)
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
MODFLAGS += -mlong-calls
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index 069f9d14983..6fd35377981 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -130,7 +130,6 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_SIBYTE_DMA_PAGEOPS is not set
# CONFIG_MIPS_MT is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig
index 216f4023a81..1d3ee18ea8b 100644
--- a/arch/mips/configs/cobalt_defconfig
+++ b/arch/mips/configs/cobalt_defconfig
@@ -115,7 +115,6 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_MIPS_MT is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/ddb5476_defconfig b/arch/mips/configs/ddb5476_defconfig
index bea00a9e926..a81e2de6947 100644
--- a/arch/mips/configs/ddb5476_defconfig
+++ b/arch/mips/configs/ddb5476_defconfig
@@ -116,7 +116,6 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_MIPS_MT is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/ddb5477_defconfig b/arch/mips/configs/ddb5477_defconfig
index 61f7171ca7e..f1c27c2fb03 100644
--- a/arch/mips/configs/ddb5477_defconfig
+++ b/arch/mips/configs/ddb5477_defconfig
@@ -116,7 +116,6 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_MIPS_MT is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/ev64120_defconfig b/arch/mips/configs/ev64120_defconfig
index 14e3815f11e..aa24d85ea94 100644
--- a/arch/mips/configs/ev64120_defconfig
+++ b/arch/mips/configs/ev64120_defconfig
@@ -118,7 +118,6 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/ev96100_defconfig b/arch/mips/configs/ev96100_defconfig
index 510819581d8..eeed0e5ad26 100644
--- a/arch/mips/configs/ev96100_defconfig
+++ b/arch/mips/configs/ev96100_defconfig
@@ -121,7 +121,6 @@ CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 67979e3e606..e56351abf87 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -123,7 +123,6 @@ CONFIG_IP22_CPU_SCACHE=y
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 03af44d1d84..e17d3adff02 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -119,7 +119,6 @@ CONFIG_PAGE_SIZE_4KB=y
CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_MIPS_MT is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index cba2a49cceb..967e7acd8e1 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -121,7 +121,6 @@ CONFIG_R5000_CPU_SCACHE=y
CONFIG_RM7000_CPU_SCACHE=y
# CONFIG_MIPS_MT is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/it8172_defconfig b/arch/mips/configs/it8172_defconfig
index e7ee1679af9..b5fa9639db6 100644
--- a/arch/mips/configs/it8172_defconfig
+++ b/arch/mips/configs/it8172_defconfig
@@ -117,7 +117,6 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_MIPS_MT is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/ivr_defconfig b/arch/mips/configs/ivr_defconfig
index 138c8a60a4d..71386938d47 100644
--- a/arch/mips/configs/ivr_defconfig
+++ b/arch/mips/configs/ivr_defconfig
@@ -114,7 +114,6 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_MIPS_MT is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/jaguar-atx_defconfig b/arch/mips/configs/jaguar-atx_defconfig
index 6238e0d6a43..14fb4688670 100644
--- a/arch/mips/configs/jaguar-atx_defconfig
+++ b/arch/mips/configs/jaguar-atx_defconfig
@@ -124,7 +124,6 @@ CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/lasat200_defconfig b/arch/mips/configs/lasat200_defconfig
index a7ad99b12fe..6c5df76d48d 100644
--- a/arch/mips/configs/lasat200_defconfig
+++ b/arch/mips/configs/lasat200_defconfig
@@ -121,7 +121,6 @@ CONFIG_R5000_CPU_SCACHE=y
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index d1c44216f1c..da0677a03c1 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.15-rc2
-# Thu Nov 24 01:06:35 2005
+# Linux kernel version: 2.6.15-rc5
+# Fri Dec 23 02:21:03 2005
#
CONFIG_MIPS=y
@@ -87,8 +87,8 @@ CONFIG_HAVE_STD_PC_SERIAL_PORT=y
#
# CPU selection
#
-CONFIG_CPU_MIPS32_R1=y
-# CONFIG_CPU_MIPS32_R2 is not set
+# CONFIG_CPU_MIPS32_R1 is not set
+CONFIG_CPU_MIPS32_R2=y
# CONFIG_CPU_MIPS64_R1 is not set
# CONFIG_CPU_MIPS64_R2 is not set
# CONFIG_CPU_R3000 is not set
@@ -112,7 +112,7 @@ CONFIG_SYS_HAS_CPU_MIPS64_R1=y
CONFIG_SYS_HAS_CPU_NEVADA=y
CONFIG_SYS_HAS_CPU_RM7000=y
CONFIG_CPU_MIPS32=y
-CONFIG_CPU_MIPSR1=y
+CONFIG_CPU_MIPSR2=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
diff --git a/arch/mips/configs/ocelot_3_defconfig b/arch/mips/configs/ocelot_3_defconfig
index 9081ea5a9db..7ad8718c1b6 100644
--- a/arch/mips/configs/ocelot_3_defconfig
+++ b/arch/mips/configs/ocelot_3_defconfig
@@ -122,7 +122,6 @@ CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/ocelot_c_defconfig b/arch/mips/configs/ocelot_c_defconfig
index 570fc4d1816..e8d6bb3551a 100644
--- a/arch/mips/configs/ocelot_c_defconfig
+++ b/arch/mips/configs/ocelot_c_defconfig
@@ -118,7 +118,6 @@ CONFIG_RM7000_CPU_SCACHE=y
CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_MIPS_MT is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/ocelot_defconfig b/arch/mips/configs/ocelot_defconfig
index 6634ab24715..f3787b68bdd 100644
--- a/arch/mips/configs/ocelot_defconfig
+++ b/arch/mips/configs/ocelot_defconfig
@@ -123,7 +123,6 @@ CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/ocelot_g_defconfig b/arch/mips/configs/ocelot_g_defconfig
index 4c396e1e2f0..b6126ad4d06 100644
--- a/arch/mips/configs/ocelot_g_defconfig
+++ b/arch/mips/configs/ocelot_g_defconfig
@@ -121,7 +121,6 @@ CONFIG_RM7000_CPU_SCACHE=y
CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_MIPS_MT is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/pnx8550-v2pci_defconfig b/arch/mips/configs/pnx8550-v2pci_defconfig
index d9a0d2fdba4..4c650e70813 100644
--- a/arch/mips/configs/pnx8550-v2pci_defconfig
+++ b/arch/mips/configs/pnx8550-v2pci_defconfig
@@ -116,7 +116,6 @@ CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_64BIT_PHYS_ADDR is not set
CONFIG_CPU_ADVANCED=y
CONFIG_CPU_HAS_LLSC=y
-# CONFIG_CPU_HAS_LLDSCD is not set
# CONFIG_CPU_HAS_WB is not set
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
diff --git a/arch/mips/configs/rbhma4500_defconfig b/arch/mips/configs/rbhma4500_defconfig
index 1cc14502358..9aaa43024ae 100644
--- a/arch/mips/configs/rbhma4500_defconfig
+++ b/arch/mips/configs/rbhma4500_defconfig
@@ -124,7 +124,6 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_MIPS_MT is not set
CONFIG_CPU_ADVANCED=y
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_WB=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 30975b305ae..abf61095931 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -124,7 +124,6 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/sb1250-swarm_defconfig b/arch/mips/configs/sb1250-swarm_defconfig
index 63f1be18e9b..52048c90607 100644
--- a/arch/mips/configs/sb1250-swarm_defconfig
+++ b/arch/mips/configs/sb1250-swarm_defconfig
@@ -133,7 +133,6 @@ CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_MIPS_MT is not set
CONFIG_SB1_PASS_1_WORKAROUNDS=y
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/yosemite_defconfig b/arch/mips/configs/yosemite_defconfig
index d51d5d16297..468c2e443d7 100644
--- a/arch/mips/configs/yosemite_defconfig
+++ b/arch/mips/configs/yosemite_defconfig
@@ -118,7 +118,6 @@ CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/defconfig b/arch/mips/defconfig
index 2a1b844da43..4f125e9e8e0 100644
--- a/arch/mips/defconfig
+++ b/arch/mips/defconfig
@@ -123,7 +123,6 @@ CONFIG_IP22_CPU_SCACHE=y
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 72f2126ad19..f36c4f20ee8 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -50,7 +50,7 @@ obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
obj-$(CONFIG_BINFMT_IRIX) += binfmt_irix.o
-obj-$(CONFIG_MIPS32_COMPAT) += ioctl32.o linux32.o signal32.o
+obj-$(CONFIG_MIPS32_COMPAT) += linux32.o signal32.o
obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o ptrace32.o
@@ -60,6 +60,5 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_64BIT) += cpu-bugs64.o
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
-CFLAGS_ioctl32.o += -Ifs/
EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5e1b08b00a3..fac48ad27b3 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -435,6 +435,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
}
}
+static char unknown_isa[] __initdata = KERN_ERR \
+ "Unsupported ISA type, c0.config0: %d.";
+
static inline unsigned int decode_config0(struct cpuinfo_mips *c)
{
unsigned int config0;
@@ -447,16 +450,37 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
isa = (config0 & MIPS_CONF_AT) >> 13;
switch (isa) {
case 0:
- c->isa_level = MIPS_CPU_ISA_M32;
+ switch ((config0 >> 10) & 7) {
+ case 0:
+ c->isa_level = MIPS_CPU_ISA_M32R1;
+ break;
+ case 1:
+ c->isa_level = MIPS_CPU_ISA_M32R2;
+ break;
+ default:
+ goto unknown;
+ }
break;
case 2:
- c->isa_level = MIPS_CPU_ISA_M64;
+ switch ((config0 >> 10) & 7) {
+ case 0:
+ c->isa_level = MIPS_CPU_ISA_M64R1;
+ break;
+ case 1:
+ c->isa_level = MIPS_CPU_ISA_M64R2;
+ break;
+ default:
+ goto unknown;
+ }
break;
default:
- panic("Unsupported ISA type, cp0.config0.at: %d.", isa);
+ goto unknown;
}
return config0 & MIPS_CONF_M;
+
+unknown:
+ panic(unknown_isa, config0);
}
static inline unsigned int decode_config1(struct cpuinfo_mips *c)
@@ -568,7 +592,6 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c)
break;
case PRID_IMP_34K:
c->cputype = CPU_34K;
- c->isa_level = MIPS_CPU_ISA_M32;
break;
}
}
@@ -647,7 +670,7 @@ static inline void cpu_probe_philips(struct cpuinfo_mips *c)
switch (c->processor_id & 0xff00) {
case PRID_IMP_PR4450:
c->cputype = CPU_PR4450;
- c->isa_level = MIPS_CPU_ISA_M32;
+ c->isa_level = MIPS_CPU_ISA_M32R1;
break;
default:
panic("Unknown Philips Core!"); /* REVISIT: die? */
@@ -690,8 +713,10 @@ __init void cpu_probe(void)
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
- if (c->isa_level == MIPS_CPU_ISA_M32 ||
- c->isa_level == MIPS_CPU_ISA_M64) {
+ if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
+ c->isa_level == MIPS_CPU_ISA_M32R2 ||
+ c->isa_level == MIPS_CPU_ISA_M64R1 ||
+ c->isa_level == MIPS_CPU_ISA_M64R2) {
if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D;
}
diff --git a/arch/mips/kernel/ioctl32.c b/arch/mips/kernel/ioctl32.c
deleted file mode 100644
index 9ea1fc74886..00000000000
--- a/arch/mips/kernel/ioctl32.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
- *
- * Copyright (C) 2000 Silicon Graphics, Inc.
- * Written by Ulf Carlsson (ulfc@engr.sgi.com)
- * Copyright (C) 2000, 2004 Ralf Baechle
- * Copyright (C) 2002, 2003 Maciej W. Rozycki
- */
-#define INCLUDES
-#include "compat_ioctl.c"
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/compat.h>
-#include <linux/ioctl32.h>
-#include <linux/syscalls.h>
-
-#ifdef CONFIG_SIBYTE_TBPROF
-#include <asm/sibyte/trace_prof.h>
-#endif
-
-#define A(__x) ((unsigned long)(__x))
-
-long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
-
-#define CODE
-#include "compat_ioctl.c"
-
-#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
-#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler), NULL },
-#define IOCTL_TABLE_START \
- struct ioctl_trans ioctl_start[] = {
-#define IOCTL_TABLE_END \
- };
-
-IOCTL_TABLE_START
-
-#include <linux/compat_ioctl.h>
-#define DECLARES
-#include "compat_ioctl.c"
-
-/*HANDLE_IOCTL(RTC_IRQP_READ, w_long)
-COMPATIBLE_IOCTL(RTC_IRQP_SET)
-HANDLE_IOCTL(RTC_EPOCH_READ, w_long)
-COMPATIBLE_IOCTL(RTC_EPOCH_SET)
-*/
-
-IOCTL_TABLE_END
-
-int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index dd725779d91..fa98f10d013 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -140,12 +140,12 @@ void flush_thread(void)
int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long unused, struct task_struct *p, struct pt_regs *regs)
{
- struct thread_info *ti = p->thread_info;
+ struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs;
long childksp;
p->set_child_tid = p->clear_child_tid = NULL;
- childksp = (unsigned long)ti + THREAD_SIZE - 32;
+ childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
preempt_disable();
@@ -205,7 +205,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
return 1;
}
-void dump_regs(elf_greg_t *gp, struct pt_regs *regs)
+void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
{
int i;
@@ -229,9 +229,7 @@ void dump_regs(elf_greg_t *gp, struct pt_regs *regs)
int dump_task_regs (struct task_struct *tsk, elf_gregset_t *regs)
{
- struct thread_info *ti = tsk->thread_info;
- long ksp = (unsigned long)ti + THREAD_SIZE - 32;
- dump_regs(&(*regs)[0], (struct pt_regs *) ksp - 1);
+ elf_dump_regs(*regs, task_pt_regs(tsk));
return 1;
}
@@ -409,7 +407,7 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack_page = (unsigned long)p->thread_info;
+ stack_page = (unsigned long)task_stack_page(p);
if (!stack_page || !mips_frame_info_initialized)
return 0;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 510da5fda56..f838b36cc76 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -64,8 +64,7 @@ int ptrace_getregs (struct task_struct *child, __s64 __user *data)
if (!access_ok(VERIFY_WRITE, data, 38 * 8))
return -EIO;
- regs = (struct pt_regs *) ((unsigned long) child->thread_info +
- THREAD_SIZE - 32 - sizeof(struct pt_regs));
+ regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
__put_user (regs->regs[i], data + i);
@@ -92,8 +91,7 @@ int ptrace_setregs (struct task_struct *child, __s64 __user *data)
if (!access_ok(VERIFY_READ, data, 38 * 8))
return -EIO;
- regs = (struct pt_regs *) ((unsigned long) child->thread_info +
- THREAD_SIZE - 32 - sizeof(struct pt_regs));
+ regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
__get_user (regs->regs[i], data + i);
@@ -198,8 +196,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
struct pt_regs *regs;
unsigned long tmp = 0;
- regs = (struct pt_regs *) ((unsigned long) child->thread_info +
- THREAD_SIZE - 32 - sizeof(struct pt_regs));
+ regs = task_pt_regs(child);
ret = 0; /* Default return value. */
switch (addr) {
@@ -280,12 +277,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
goto out;
}
- if (child->thread.dsp.used_dsp) {
- dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
- } else {
- tmp = -1; /* DSP registers yet used */
- }
+ dregs = __get_dsp_regs(child);
+ tmp = (unsigned long) (dregs[addr - DSP_BASE]);
break;
}
case DSP_CONTROL:
@@ -318,8 +311,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_POKEUSR: {
struct pt_regs *regs;
ret = 0;
- regs = (struct pt_regs *) ((unsigned long) child->thread_info +
- THREAD_SIZE - 32 - sizeof(struct pt_regs));
+ regs = task_pt_regs(child);
switch (addr) {
case 0 ... 31:
@@ -446,7 +438,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
case PTRACE_GET_THREAD_AREA:
- ret = put_user(child->thread_info->tp_value,
+ ret = put_user(task_thread_info(child)->tp_value,
(unsigned long __user *) data);
break;
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 9a9b0497213..0c82b25d8c6 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -57,30 +57,16 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
(unsigned long) data);
#endif
lock_kernel();
- ret = -EPERM;
if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- if ((ret = security_ptrace(current->parent, current)))
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
+ ret = ptrace_traceme();
goto out;
}
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
+ goto out;
+ }
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
@@ -140,8 +126,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
struct pt_regs *regs;
unsigned int tmp;
- regs = (struct pt_regs *) ((unsigned long) child->thread_info +
- THREAD_SIZE - 32 - sizeof(struct pt_regs));
+ regs = task_pt_regs(child);
ret = 0; /* Default return value. */
switch (addr) {
@@ -215,12 +200,8 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
ret = -EIO;
goto out_tsk;
}
- if (child->thread.dsp.used_dsp) {
- dspreg_t *dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
- } else {
- tmp = -1; /* DSP registers yet used */
- }
+ dspreg_t *dregs = __get_dsp_regs(child);
+ tmp = (unsigned long) (dregs[addr - DSP_BASE]);
break;
case DSP_CONTROL:
if (!cpu_has_dsp) {
@@ -277,8 +258,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
case PTRACE_POKEUSR: {
struct pt_regs *regs;
ret = 0;
- regs = (struct pt_regs *) ((unsigned long) child->thread_info +
- THREAD_SIZE - 32 - sizeof(struct pt_regs));
+ regs = task_pt_regs(child);
switch (addr) {
case 0 ... 31:
@@ -395,7 +375,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
break;
case PTRACE_GET_THREAD_AREA:
- ret = put_user(child->thread_info->tp_value,
+ ret = put_user(task_thread_info(child)->tp_value,
(unsigned int __user *) (unsigned long) data);
break;
@@ -409,7 +389,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
break;
case PTRACE_GET_THREAD_AREA_3264:
- ret = put_user(child->thread_info->tp_value,
+ ret = put_user(task_thread_info(child)->tp_value,
(unsigned long __user *) (unsigned long) data);
break;
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index c856dbc52ab..98b185bbc94 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -588,7 +588,7 @@ static inline int setup_sigcontext32(struct pt_regs *regs,
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
- err |= __put_user(rddsp(DSP_MASK), &sc->sc_hi1);
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
err |= __put_user(mfhi1(), &sc->sc_hi1);
err |= __put_user(mflo1(), &sc->sc_lo1);
err |= __put_user(mfhi2(), &sc->sc_hi2);
diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp_mt.c
index d429544ba4b..794a1c3de2a 100644
--- a/arch/mips/kernel/smp_mt.c
+++ b/arch/mips/kernel/smp_mt.c
@@ -287,6 +287,7 @@ void prom_prepare_cpus(unsigned int max_cpus)
*/
void prom_boot_secondary(int cpu, struct task_struct *idle)
{
+ struct thread_info *gp = task_thread_info(idle);
dvpe();
set_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -307,11 +308,9 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
write_tc_gpr_sp( __KSTK_TOS(idle));
/* global pointer */
- write_tc_gpr_gp((unsigned long)idle->thread_info);
+ write_tc_gpr_gp((unsigned long)gp);
- flush_icache_range((unsigned long)idle->thread_info,
- (unsigned long)idle->thread_info +
- sizeof(struct thread_info));
+ flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index ee98eeb65e8..332358430ff 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -9,6 +9,7 @@
*/
#include <linux/config.h>
#include <linux/a.out.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/mm.h>
@@ -262,7 +263,7 @@ asmlinkage int sys_olduname(struct oldold_utsname * name)
void sys_set_thread_area(unsigned long addr)
{
- struct thread_info *ti = current->thread_info;
+ struct thread_info *ti = task_thread_info(current);
ti->tp_value = addr;
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 52924f8ce23..0fc3730a294 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/binfmts.h>
+#include <linux/capability.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/mm.h>
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 787ed541d44..7050b4ffffc 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -507,14 +507,38 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_HANDLED;
}
+int null_perf_irq(struct pt_regs *regs)
+{
+ return 0;
+}
+
+int (*perf_irq)(struct pt_regs *regs) = null_perf_irq;
+
+EXPORT_SYMBOL(null_perf_irq);
+EXPORT_SYMBOL(perf_irq);
+
asmlinkage void ll_timer_interrupt(int irq, struct pt_regs *regs)
{
+ int r2 = cpu_has_mips_r2;
+
irq_enter();
kstat_this_cpu.irqs[irq]++;
+ /*
+ * Suckage alert:
+ * Before R2 of the architecture there was no way to see if a
+ * performance counter interrupt was pending, so we have to run the
+ * performance counter interrupt handler anyway.
+ */
+ if (!r2 || (read_c0_cause() & (1 << 26)))
+ if (perf_irq(regs))
+ goto out;
+
/* we keep interrupt disabled all the time */
- timer_interrupt(irq, NULL, regs);
+ if (!r2 || (read_c0_cause() & (1 << 30)))
+ timer_interrupt(irq, NULL, regs);
+out:
irq_exit();
}
@@ -628,9 +652,9 @@ void __init time_init(void)
mips_hpt_init = c0_hpt_init;
}
- if ((current_cpu_data.isa_level == MIPS_CPU_ISA_M32) ||
- (current_cpu_data.isa_level == MIPS_CPU_ISA_I) ||
- (current_cpu_data.isa_level == MIPS_CPU_ISA_II))
+ if (cpu_has_mips32r1 || cpu_has_mips32r2 ||
+ (current_cpu_data.isa_level == MIPS_CPU_ISA_I) ||
+ (current_cpu_data.isa_level == MIPS_CPU_ISA_II))
/*
* We need to calibrate the counter but we don't have
* 64-bit division.
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 7058893d5ad..59a187956de 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -519,7 +519,7 @@ static inline int simulate_llsc(struct pt_regs *regs)
*/
static inline int simulate_rdhwr(struct pt_regs *regs)
{
- struct thread_info *ti = current->thread_info;
+ struct thread_info *ti = task_thread_info(current);
unsigned int opcode;
if (unlikely(get_insn_opcode(regs, &opcode)))
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 06be405be39..ae83b755cf4 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -99,9 +99,9 @@ struct vpe {
/* elfloader stuff */
void *load_addr;
- u32 len;
+ unsigned long len;
char *pbuffer;
- u32 plen;
+ unsigned long plen;
unsigned long __start;
@@ -253,11 +253,11 @@ void dump_mtregs(void)
}
/* Find some VPE program space */
-static void *alloc_progmem(u32 len)
+static void *alloc_progmem(unsigned long len)
{
#ifdef CONFIG_MIPS_VPE_LOADER_TOM
/* this means you must tell linux to use less memory than you physically have */
- return (void *)((max_pfn * PAGE_SIZE) + KSEG0);
+ return pfn_to_kaddr(max_pfn);
#else
// simple grab some mem for now
return kmalloc(len, GFP_KERNEL);
@@ -1171,7 +1171,8 @@ static int __init vpe_module_init(void)
return -ENODEV;
}
- if ((major = register_chrdev(0, module_name, &vpe_fops) < 0)) {
+ major = register_chrdev(0, module_name, &vpe_fops);
+ if (major < 0) {
printk("VPE loader: unable to register character device\n");
return major;
}
diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c
index b5d5fa83376..7e2ced715cf 100644
--- a/arch/mips/lib/iomap.c
+++ b/arch/mips/lib/iomap.c
@@ -3,7 +3,7 @@
*
* This code is based on lib/iomap.c, by Linus Torvalds.
*
- * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/math-emu/dp_fint.c b/arch/mips/math-emu/dp_fint.c
index 0065deaee24..a1962eb460f 100644
--- a/arch/mips/math-emu/dp_fint.c
+++ b/arch/mips/math-emu/dp_fint.c
@@ -33,8 +33,6 @@ ieee754dp ieee754dp_fint(int x)
CLEARCX;
- xc = ( 0 ? xc : xc );
-
if (x == 0)
return ieee754dp_zero(0);
if (x == 1 || x == -1)
diff --git a/arch/mips/math-emu/dp_flong.c b/arch/mips/math-emu/dp_flong.c
index cb105b1dd86..eae90a866aa 100644
--- a/arch/mips/math-emu/dp_flong.c
+++ b/arch/mips/math-emu/dp_flong.c
@@ -33,8 +33,6 @@ ieee754dp ieee754dp_flong(s64 x)
CLEARCX;
- xc = ( 0 ? xc : xc );
-
if (x == 0)
return ieee754dp_zero(0);
if (x == 1 || x == -1)
diff --git a/arch/mips/math-emu/sp_fint.c b/arch/mips/math-emu/sp_fint.c
index 42d9ed4b9a9..7aac13afb09 100644
--- a/arch/mips/math-emu/sp_fint.c
+++ b/arch/mips/math-emu/sp_fint.c
@@ -33,8 +33,6 @@ ieee754sp ieee754sp_fint(int x)
CLEARCX;
- xc = ( 0 ? xc : xc );
-
if (x == 0)
return ieee754sp_zero(0);
if (x == 1 || x == -1)
diff --git a/arch/mips/math-emu/sp_flong.c b/arch/mips/math-emu/sp_flong.c
index 1e26795ccec..3d6c1d11c17 100644
--- a/arch/mips/math-emu/sp_flong.c
+++ b/arch/mips/math-emu/sp_flong.c
@@ -33,8 +33,6 @@ ieee754sp ieee754sp_flong(s64 x)
CLEARCX;
- xc = ( 0 ? xc : xc );
-
if (x == 0)
return ieee754sp_zero(0);
if (x == 1 || x == -1)
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c
index 72a12d931cb..93f3bf2c2b2 100644
--- a/arch/mips/mips-boards/generic/time.c
+++ b/arch/mips/mips-boards/generic/time.c
@@ -75,20 +75,31 @@ static void mips_timer_dispatch (struct pt_regs *regs)
do_IRQ (mips_cpu_timer_irq, regs);
}
+extern int null_perf_irq(struct pt_regs *regs);
+
+extern int (*perf_irq)(struct pt_regs *regs);
+
irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
-#ifdef CONFIG_SMP
+ int r2 = cpu_has_mips_r2;
int cpu = smp_processor_id();
if (cpu == 0) {
/*
- * CPU 0 handles the global timer interrupt job and process accounting
- * resets count/compare registers to trigger next timer int.
+ * CPU 0 handles the global timer interrupt job and process
+ * accounting resets count/compare registers to trigger next
+ * timer int.
*/
- (void) timer_interrupt(irq, dev_id, regs);
+ if (!r2 || (read_c0_cause() & (1 << 26)))
+ if (perf_irq(regs))
+ goto out;
+
+ /* we keep interrupt disabled all the time */
+ if (!r2 || (read_c0_cause() & (1 << 30)))
+ timer_interrupt(irq, NULL, regs);
+
scroll_display_message();
- }
- else {
+ } else {
/* Everyone else needs to reset the timer int here as
ll_local_timer_interrupt doesn't */
/*
@@ -103,16 +114,8 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
local_timer_interrupt (irq, dev_id, regs);
}
+out:
return IRQ_HANDLED;
-#else
- irqreturn_t r;
-
- r = timer_interrupt(irq, dev_id, regs);
-
- scroll_display_message();
-
- return r;
-#endif
}
/*
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 38223b44d96..422b55fab07 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1183,8 +1183,8 @@ static void __init setup_scache(void)
if (!sc_present)
return;
- if ((c->isa_level == MIPS_CPU_ISA_M32 ||
- c->isa_level == MIPS_CPU_ISA_M64) &&
+ if ((c->isa_level == MIPS_CPU_ISA_M32R1 ||
+ c->isa_level == MIPS_CPU_ISA_M64R1) &&
!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index dd2cc42f1b6..53f9889b30e 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -75,7 +75,10 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
int res;
switch (current_cpu_data.cputype) {
+ case CPU_5KC:
+ case CPU_20KC:
case CPU_24K:
+ case CPU_25KF:
lmodel = &op_model_mipsxx;
break;
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
index f0121557047..5cfce7d87a4 100644
--- a/arch/mips/oprofile/op_impl.h
+++ b/arch/mips/oprofile/op_impl.h
@@ -12,8 +12,8 @@
struct pt_regs;
-extern void null_perf_irq(struct pt_regs *regs);
-extern void (*perf_irq)(struct pt_regs *regs);
+extern int null_perf_irq(struct pt_regs *regs);
+extern int (*perf_irq)(struct pt_regs *regs);
/* Per-counter configuration as set via oprofilefs. */
struct op_counter_config {
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index d36b64dfcb2..1d1eee407fa 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -114,11 +114,12 @@ static void mipsxx_cpu_stop(void *args)
}
}
-static void mipsxx_perfcount_handler(struct pt_regs *regs)
+static int mipsxx_perfcount_handler(struct pt_regs *regs)
{
unsigned int counters = op_model_mipsxx.num_counters;
unsigned int control;
unsigned int counter;
+ int handled = 0;
switch (counters) {
#define HANDLE_COUNTER(n) \
@@ -129,12 +130,15 @@ static void mipsxx_perfcount_handler(struct pt_regs *regs)
(counter & M_COUNTER_OVERFLOW)) { \
oprofile_add_sample(regs, n); \
write_c0_perfcntr ## n(reg.counter[n]); \
+ handled = 1; \
}
HANDLE_COUNTER(3)
HANDLE_COUNTER(2)
HANDLE_COUNTER(1)
HANDLE_COUNTER(0)
}
+
+ return handled;
}
#define M_CONFIG1_PC (1 << 4)
@@ -176,17 +180,31 @@ static int __init mipsxx_init(void)
int counters;
counters = n_counters();
- if (counters == 0)
+ if (counters == 0) {
+ printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
return -ENODEV;
+ }
reset_counters(counters);
op_model_mipsxx.num_counters = counters;
switch (current_cpu_data.cputype) {
+ case CPU_20KC:
+ op_model_mipsxx.cpu_type = "mips/20K";
+ break;
+
case CPU_24K:
op_model_mipsxx.cpu_type = "mips/24K";
break;
+ case CPU_25KF:
+ op_model_mipsxx.cpu_type = "mips/25K";
+ break;
+
+ case CPU_5KC:
+ op_model_mipsxx.cpu_type = "mips/5K";
+ break;
+
default:
printk(KERN_ERR "Profiling unsupported for this CPU\n");
diff --git a/arch/mips/pci/fixup-capcella.c b/arch/mips/pci/fixup-capcella.c
index f2fc82c1c7c..1e530751936 100644
--- a/arch/mips/pci/fixup-capcella.c
+++ b/arch/mips/pci/fixup-capcella.c
@@ -1,7 +1,7 @@
/*
* fixup-cappcela.c, The ZAO Networks Capcella specific PCI fixups.
*
- * Copyright (C) 2002,2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2002,2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/fixup-mpc30x.c b/arch/mips/pci/fixup-mpc30x.c
index 4975846da75..b67ddaa4712 100644
--- a/arch/mips/pci/fixup-mpc30x.c
+++ b/arch/mips/pci/fixup-mpc30x.c
@@ -1,7 +1,7 @@
/*
* fixup-mpc30x.c, The Victor MP-C303/304 specific PCI fixups.
*
- * Copyright (C) 2002,2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2002,2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/fixup-tb0219.c b/arch/mips/pci/fixup-tb0219.c
index bc55b06e190..734f2b71e16 100644
--- a/arch/mips/pci/fixup-tb0219.c
+++ b/arch/mips/pci/fixup-tb0219.c
@@ -2,7 +2,7 @@
* fixup-tb0219.c, The TANBAC TB0219 specific PCI fixups.
*
* Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
- * Copyright (C) 2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/fixup-tb0226.c b/arch/mips/pci/fixup-tb0226.c
index b5d42b12de1..c9e7cb4361a 100644
--- a/arch/mips/pci/fixup-tb0226.c
+++ b/arch/mips/pci/fixup-tb0226.c
@@ -1,7 +1,7 @@
/*
* fixup-tb0226.c, The TANBAC TB0226 specific PCI fixups.
*
- * Copyright (C) 2002-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2002-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/fixup-tb0287.c b/arch/mips/pci/fixup-tb0287.c
index 8436d7f1fdb..fbe6bcb2819 100644
--- a/arch/mips/pci/fixup-tb0287.c
+++ b/arch/mips/pci/fixup-tb0287.c
@@ -1,7 +1,7 @@
/*
* fixup-tb0287.c, The TANBAC TB0287 specific PCI fixups.
*
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/ops-vr41xx.c b/arch/mips/pci/ops-vr41xx.c
index 430429b22ae..900c6b32576 100644
--- a/arch/mips/pci/ops-vr41xx.c
+++ b/arch/mips/pci/ops-vr41xx.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001-2003 MontaVista Software Inc.
* Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/pci-vr41xx.c b/arch/mips/pci/pci-vr41xx.c
index 91df4da7ddb..9885fa40360 100644
--- a/arch/mips/pci/pci-vr41xx.c
+++ b/arch/mips/pci/pci-vr41xx.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001-2003 MontaVista Software Inc.
* Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
* Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can redistribute it and/or modify
diff --git a/arch/mips/pci/pci-vr41xx.h b/arch/mips/pci/pci-vr41xx.h
index e087ec55641..8a35e32b837 100644
--- a/arch/mips/pci/pci-vr41xx.h
+++ b/arch/mips/pci/pci-vr41xx.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index 0527170d6ad..f17f575f58f 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -93,8 +93,8 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
*/
void prom_boot_secondary(int cpu, struct task_struct *idle)
{
- unsigned long gp = (unsigned long) idle->thread_info;
- unsigned long sp = gp + THREAD_SIZE - 32;
+ unsigned long gp = (unsigned long) task_thread_info(idle);
+ unsigned long sp = __KSTK_TOP(idle);
secondary_sp = sp;
secondary_gp = gp;
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
index 07631a97670..ce907eda221 100644
--- a/arch/mips/sgi-ip27/ip27-berr.c
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/signal.h> /* for SIGBUS */
+#include <linux/sched.h> /* schow_regs(), force_sig() */
#include <asm/module.h>
#include <asm/sn/addrs.h>
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index 3a8291b7d26..dbef3f6b565 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -168,8 +168,8 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
*/
void __init prom_boot_secondary(int cpu, struct task_struct *idle)
{
- unsigned long gp = (unsigned long) idle->thread_info;
- unsigned long sp = gp + THREAD_SIZE - 32;
+ unsigned long gp = (unsigned long)task_thread_info(idle);
+ unsigned long sp = __KSTK_TOS(idle);
LAUNCH_SLAVE(cputonasid(cpu),cputoslice(cpu),
(launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
diff --git a/arch/mips/sibyte/cfe/smp.c b/arch/mips/sibyte/cfe/smp.c
index e8485124b8f..4477af3d807 100644
--- a/arch/mips/sibyte/cfe/smp.c
+++ b/arch/mips/sibyte/cfe/smp.c
@@ -60,7 +60,7 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
__KSTK_TOS(idle),
- (unsigned long)idle->thread_info, 0);
+ (unsigned long)task_thread_info(idle), 0);
if (retval != 0)
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
}
diff --git a/arch/mips/vr41xx/casio-e55/setup.c b/arch/mips/vr41xx/casio-e55/setup.c
index d29201acc4f..814900915c2 100644
--- a/arch/mips/vr41xx/casio-e55/setup.c
+++ b/arch/mips/vr41xx/casio-e55/setup.c
@@ -1,7 +1,7 @@
/*
* setup.c, Setup for the CASIO CASSIOPEIA E-11/15/55/65.
*
- * Copyright (C) 2002-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2002-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/bcu.c b/arch/mips/vr41xx/common/bcu.c
index cdfa4273a1c..de0c1b35f11 100644
--- a/arch/mips/vr41xx/common/bcu.c
+++ b/arch/mips/vr41xx/common/bcu.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <yyuasa@mvista.com, or source@mvista.com>
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,7 +25,7 @@
* - New creation, NEC VR4122 and VR4131 are supported.
* - Added support for NEC VR4111 and VR4121.
*
- * Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
* - Added support for NEC VR4133.
*/
#include <linux/kernel.h>
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
index d758e432961..657c5133c93 100644
--- a/arch/mips/vr41xx/common/cmu.c
+++ b/arch/mips/vr41xx/common/cmu.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001-2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copuright (C) 2003-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copuright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,7 +25,7 @@
* - New creation, NEC VR4122 and VR4131 are supported.
* - Added support for NEC VR4111 and VR4121.
*
- * Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
* - Added support for NEC VR4133.
*/
#include <linux/init.h>
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index 0b73c5ab3c0..07ae19cf0c2 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001-2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,7 +25,7 @@
* - New creation, NEC VR4122 and VR4131 are supported.
* - Added support for NEC VR4111 and VR4121.
*
- * Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
* - Coped with INTASSIGN of NEC VR4133.
*/
#include <linux/errno.h>
diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c
index 578f6496ffd..707bd0933ee 100644
--- a/arch/mips/vr41xx/common/init.c
+++ b/arch/mips/vr41xx/common/init.c
@@ -1,7 +1,7 @@
/*
* init.c, Common initialization routines for NEC VR4100 series.
*
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/int-handler.S b/arch/mips/vr41xx/common/int-handler.S
index 272c13aee4f..2b6043f16d0 100644
--- a/arch/mips/vr41xx/common/int-handler.S
+++ b/arch/mips/vr41xx/common/int-handler.S
@@ -35,7 +35,7 @@
* MontaVista Software Inc. <yyuasa@mvista.com> or <source@mvista.com>
* - New creation, NEC VR4100 series are supported.
*
- * Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
* - Coped with INTASSIGN of NEC VR4133.
*/
#include <asm/asm.h>
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
index 43b214d3943..61aa264275f 100644
--- a/arch/mips/vr41xx/common/irq.c
+++ b/arch/mips/vr41xx/common/irq.c
@@ -1,7 +1,7 @@
/*
* Interrupt handing routines for NEC VR4100 series.
*
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index 53166f3598b..02bf4f7d06b 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -1,7 +1,7 @@
/*
* pmu.c, Power Management Unit routines for NEC VR4100 series.
*
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/type.c b/arch/mips/vr41xx/common/type.c
index bcb5f71b502..e0c1ac5e988 100644
--- a/arch/mips/vr41xx/common/type.c
+++ b/arch/mips/vr41xx/common/type.c
@@ -1,7 +1,7 @@
/*
* type.c, System type for NEC VR4100 series.
*
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/vrc4173.c b/arch/mips/vr41xx/common/vrc4173.c
index 462a9af30ee..3e31f8193d2 100644
--- a/arch/mips/vr41xx/common/vrc4173.c
+++ b/arch/mips/vr41xx/common/vrc4173.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001-2003 MontaVista Software Inc.
* Author: Yoichi Yuasa <yyuasa@mvista.com, or source@mvista.com>
- * Copyright (C) 2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
* Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can redistribute it and/or modify
@@ -561,7 +561,7 @@ static int __devinit vrc4173_init(void)
{
int err;
- err = pci_module_init(&vrc4173_driver);
+ err = pci_register_driver(&vrc4173_driver);
if (err < 0)
return err;
diff --git a/arch/mips/vr41xx/ibm-workpad/setup.c b/arch/mips/vr41xx/ibm-workpad/setup.c
index e4b34ad6ea6..50fe8af4c52 100644
--- a/arch/mips/vr41xx/ibm-workpad/setup.c
+++ b/arch/mips/vr41xx/ibm-workpad/setup.c
@@ -1,7 +1,7 @@
/*
* setup.c, Setup for the IBM WorkPad z50.
*
- * Copyright (C) 2002-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2002-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 874a283edb9..e77a06e9621 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -19,9 +19,6 @@ config MMU
config STACK_GROWSUP
def_bool y
-config UID16
- bool
-
config RWSEM_GENERIC_SPINLOCK
def_bool y
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c
index a64fd48fbfb..29b4d61898f 100644
--- a/arch/parisc/hpux/sys_hpux.c
+++ b/arch/parisc/hpux/sys_hpux.c
@@ -22,6 +22,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/namei.h>
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index 171f9c239f6..27827bc3717 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -6,7 +6,6 @@ extra-y := init_task.o head.o vmlinux.lds
AFLAGS_entry.o := -traditional
AFLAGS_pacache.o := -traditional
-CFLAGS_ioctl32.o := -Ifs/
obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
@@ -19,6 +18,6 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PA11) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o ioctl32.o signal32.o
+obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o
# only supported for PCX-W/U in 64-bit mode at the moment
obj-$(CONFIG_64BIT) += perf.o perf_asm.o
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index a065349aee3..d8a4ca021aa 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -29,9 +29,9 @@
#include <asm/processor.h>
#include <asm/sections.h>
-int split_tlb;
-int dcache_stride;
-int icache_stride;
+int split_tlb __read_mostly;
+int dcache_stride __read_mostly;
+int icache_stride __read_mostly;
EXPORT_SYMBOL(dcache_stride);
@@ -45,29 +45,29 @@ DEFINE_SPINLOCK(pa_tlb_lock);
EXPORT_SYMBOL(pa_tlb_lock);
#endif
-struct pdc_cache_info cache_info;
+struct pdc_cache_info cache_info __read_mostly;
#ifndef CONFIG_PA20
-static struct pdc_btlb_info btlb_info;
+static struct pdc_btlb_info btlb_info __read_mostly;
#endif
#ifdef CONFIG_SMP
void
flush_data_cache(void)
{
- on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
+ on_each_cpu(flush_data_cache_local, NULL, 1, 1);
}
void
flush_instruction_cache(void)
{
- on_each_cpu((void (*)(void *))flush_instruction_cache_local, NULL, 1, 1);
+ on_each_cpu(flush_instruction_cache_local, NULL, 1, 1);
}
#endif
void
flush_cache_all_local(void)
{
- flush_instruction_cache_local();
- flush_data_cache_local();
+ flush_instruction_cache_local(NULL);
+ flush_data_cache_local(NULL);
}
EXPORT_SYMBOL(flush_cache_all_local);
@@ -332,7 +332,7 @@ void clear_user_page_asm(void *page, unsigned long vaddr)
}
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
-int parisc_cache_flush_threshold = FLUSH_THRESHOLD;
+int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
void parisc_setup_cache_timing(void)
{
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index d016d672ec2..1eaa0d37f67 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -39,7 +39,7 @@
#include <asm/parisc-device.h>
/* See comments in include/asm-parisc/pci.h */
-struct hppa_dma_ops *hppa_dma_ops;
+struct hppa_dma_ops *hppa_dma_ops __read_mostly;
EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = {
@@ -515,8 +515,13 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
(iodc_data[5] << 8) | iodc_data[6];
dev->hpa.name = parisc_pathname(dev);
dev->hpa.start = hpa;
- if (hpa == 0xf4000000 || hpa == 0xf6000000 ||
- hpa == 0xf8000000 || hpa == 0xfa000000) {
+ /* This is awkward. The STI spec says that gfx devices may occupy
+ * 32MB or 64MB. Unfortunately, we don't know how to tell whether
+ * it's the former or the latter. Assumptions either way can hurt us.
+ */
+ if (hpa == 0xf4000000 || hpa == 0xf8000000) {
+ dev->hpa.end = hpa + 0x03ffffff;
+ } else if (hpa == 0xf6000000 || hpa == 0xfa000000) {
dev->hpa.end = hpa + 0x01ffffff;
} else {
dev->hpa.end = hpa + 0xfff;
@@ -834,7 +839,7 @@ static void print_parisc_device(struct parisc_device *dev)
if (dev->num_addrs) {
int k;
- printk(", additional addresses: ");
+ printk(", additional addresses: ");
for (k = 0; k < dev->num_addrs; k++)
printk("0x%lx ", dev->addr[k]);
}
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 553f8fe0322..2dc06b8e181 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -80,7 +80,7 @@ static unsigned long pdc_result2[32] __attribute__ ((aligned (8)));
/* Firmware needs to be initially set to narrow to determine the
* actual firmware width. */
-int parisc_narrow_firmware = 1;
+int parisc_narrow_firmware __read_mostly = 1;
#endif
/* On most currently-supported platforms, IODC I/O calls are 32-bit calls
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 2071b5bba15..3058bffd8a2 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -551,6 +551,7 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
{HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"},
{HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"},
{HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"},
+ {HPHW_BRIDGE, 0x05D, 0x0000A, 0x00, "SummitHawk Dino PCI Bridge"},
{HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"},
{HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"},
{HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"},
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 8f563871e83..4e847ba5318 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -38,7 +38,7 @@
*/
#undef DEBUG_PAT
-int pdc_type = PDC_TYPE_ILLEGAL;
+int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
void __init setup_pdc(void)
{
@@ -120,8 +120,8 @@ set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
* pdc info is bad in this case).
*/
- if ( ((start & (PAGE_SIZE - 1)) != 0)
- || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) ) {
+ if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
+ || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
panic("Memory range doesn't align with page size!\n");
}
diff --git a/arch/parisc/kernel/ioctl32.c b/arch/parisc/kernel/ioctl32.c
deleted file mode 100644
index 4eada1bb27f..00000000000
--- a/arch/parisc/kernel/ioctl32.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/* $Id: ioctl32.c,v 1.5 2002/10/18 00:21:43 varenet Exp $
- * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
- *
- * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- *
- * These routines maintain argument size conversion between 32bit and 64bit
- * ioctls.
- */
-
-#include <linux/syscalls.h>
-
-#define INCLUDES
-#include "compat_ioctl.c"
-
-#include <asm/perf.h>
-#include <asm/ioctls.h>
-
-#define CODE
-#include "compat_ioctl.c"
-
-#define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL },
-#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl)
-
-#define IOCTL_TABLE_START struct ioctl_trans ioctl_start[] = {
-#define IOCTL_TABLE_END };
-
-IOCTL_TABLE_START
-#include <linux/compat_ioctl.h>
-
-#define DECLARES
-#include "compat_ioctl.c"
-
-/* And these ioctls need translation */
-HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc)
-HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc)
-HANDLE_IOCTL(SIOCGPPPVER, dev_ifsioc)
-
-#if defined(CONFIG_GEN_RTC)
-COMPATIBLE_IOCTL(RTC_AIE_ON)
-COMPATIBLE_IOCTL(RTC_AIE_OFF)
-COMPATIBLE_IOCTL(RTC_UIE_ON)
-COMPATIBLE_IOCTL(RTC_UIE_OFF)
-COMPATIBLE_IOCTL(RTC_PIE_ON)
-COMPATIBLE_IOCTL(RTC_PIE_OFF)
-COMPATIBLE_IOCTL(RTC_WIE_ON)
-COMPATIBLE_IOCTL(RTC_WIE_OFF)
-COMPATIBLE_IOCTL(RTC_ALM_SET) /* struct rtc_time only has ints */
-COMPATIBLE_IOCTL(RTC_ALM_READ) /* struct rtc_time only has ints */
-COMPATIBLE_IOCTL(RTC_RD_TIME) /* struct rtc_time only has ints */
-COMPATIBLE_IOCTL(RTC_SET_TIME) /* struct rtc_time only has ints */
-HANDLE_IOCTL(RTC_IRQP_READ, w_long)
-COMPATIBLE_IOCTL(RTC_IRQP_SET)
-HANDLE_IOCTL(RTC_EPOCH_READ, w_long)
-COMPATIBLE_IOCTL(RTC_EPOCH_SET)
-#endif
-
-IOCTL_TABLE_END
-
-int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index f94a02ef3d9..a6caf107308 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -33,10 +33,10 @@
#include <asm/uaccess.h>
#include <asm/tlbflush.h> /* for purge_tlb_*() macros */
-static struct proc_dir_entry * proc_gsc_root = NULL;
+static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length);
-static unsigned long pcxl_used_bytes = 0;
-static unsigned long pcxl_used_pages = 0;
+static unsigned long pcxl_used_bytes __read_mostly = 0;
+static unsigned long pcxl_used_pages __read_mostly = 0;
extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
static spinlock_t pcxl_res_lock;
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c
index 52004ae28d2..2a01fe1bdc9 100644
--- a/arch/parisc/kernel/pdc_chassis.c
+++ b/arch/parisc/kernel/pdc_chassis.c
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
+#include <linux/cache.h>
#include <asm/pdc_chassis.h>
#include <asm/processor.h>
@@ -38,8 +39,8 @@
#ifdef CONFIG_PDC_CHASSIS
-static int pdc_chassis_old = 0;
-static unsigned int pdc_chassis_enabled = 1;
+static int pdc_chassis_old __read_mostly = 0;
+static unsigned int pdc_chassis_enabled __read_mostly = 1;
/**
@@ -132,7 +133,7 @@ void __init parisc_pdc_chassis_init(void)
{
#ifdef CONFIG_PDC_CHASSIS
int handle = 0;
- if (pdc_chassis_enabled) {
+ if (likely(pdc_chassis_enabled)) {
DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__);
/* Let see if we have something to handle... */
@@ -142,7 +143,7 @@ void __init parisc_pdc_chassis_init(void)
printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n");
handle = 1;
}
- else if (pdc_chassis_old) {
+ else if (unlikely(pdc_chassis_old)) {
printk(KERN_INFO "Enabling old style chassis LED panel support.\n");
handle = 1;
}
@@ -178,7 +179,7 @@ int pdc_chassis_send_status(int message)
/* Maybe we should do that in an other way ? */
int retval = 0;
#ifdef CONFIG_PDC_CHASSIS
- if (pdc_chassis_enabled) {
+ if (likely(pdc_chassis_enabled)) {
DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message);
@@ -214,7 +215,7 @@ int pdc_chassis_send_status(int message)
}
} else retval = -1;
#else
- if (pdc_chassis_old) {
+ if (unlikely(pdc_chassis_old)) {
switch (message) {
case PDC_CHASSIS_DIRECT_BSTART:
case PDC_CHASSIS_DIRECT_BCOMPLETE:
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index f6fec62b6a2..11d406cd0b3 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -42,6 +42,7 @@
* on every box.
*/
+#include <linux/capability.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
@@ -66,10 +67,10 @@ struct rdr_tbl_ent {
uint8_t write_control;
};
-static int perf_processor_interface = UNKNOWN_INTF;
-static int perf_enabled = 0;
+static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
+static int perf_enabled __read_mostly = 0;
static spinlock_t perf_lock;
-struct parisc_device *cpu_device = NULL;
+struct parisc_device *cpu_device __read_mostly = NULL;
/* RDRs to write for PCX-W */
static int perf_rdrs_W[] =
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index fee4f1f09ad..5da41677e70 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -54,7 +54,7 @@
#include <asm/uaccess.h>
#include <asm/unwind.h>
-static int hlt_counter;
+static int hlt_counter __read_mostly;
/*
* Power off function, if any
@@ -295,7 +295,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
struct task_struct * p, struct pt_regs * pregs)
{
struct pt_regs * cregs = &(p->thread.regs);
- struct thread_info *ti = p->thread_info;
+ void *stack = task_stack_page(p);
/* We have to use void * instead of a function pointer, because
* function pointers aren't a pointer to the function on 64-bit.
@@ -322,7 +322,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
*/
if (usp == 1) {
/* kernel thread */
- cregs->ksp = (((unsigned long)(ti)) + THREAD_SZ_ALGN);
+ cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN;
/* Must exit via ret_from_kernel_thread in order
* to call schedule_tail()
*/
@@ -344,7 +344,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
*/
/* Use same stack depth as parent */
- cregs->ksp = ((unsigned long)(ti))
+ cregs->ksp = (unsigned long)stack
+ (pregs->gr[21] & (THREAD_SIZE - 1));
cregs->gr[30] = usp;
if (p->personality == PER_HPUX) {
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 4f5bbcf1f5a..6df9f62cecb 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -44,10 +44,10 @@
#include <asm/irq.h> /* for struct irq_region */
#include <asm/parisc-device.h>
-struct system_cpuinfo_parisc boot_cpu_data;
+struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
EXPORT_SYMBOL(boot_cpu_data);
-struct cpuinfo_parisc cpu_data[NR_CPUS];
+struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly;
/*
** PARISC CPU driver - claim "device" and initialize CPU data structures.
@@ -378,12 +378,12 @@ show_cpuinfo (struct seq_file *m, void *v)
return 0;
}
-static struct parisc_device_id processor_tbl[] = {
+static struct parisc_device_id processor_tbl[] __read_mostly = {
{ HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
{ 0, }
};
-static struct parisc_driver cpu_driver = {
+static struct parisc_driver cpu_driver __read_mostly = {
.name = "CPU",
.id_table = processor_tbl,
.probe = processor_probe
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 73e9c34b094..4a36ec3f6ac 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -46,15 +46,15 @@
#include <asm/io.h>
#include <asm/setup.h>
-char command_line[COMMAND_LINE_SIZE];
+char command_line[COMMAND_LINE_SIZE] __read_mostly;
/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */
-struct proc_dir_entry * proc_runway_root = NULL;
-struct proc_dir_entry * proc_gsc_root = NULL;
-struct proc_dir_entry * proc_mckinley_root = NULL;
+struct proc_dir_entry * proc_runway_root __read_mostly = NULL;
+struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
+struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL;
#if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA))
-int parisc_bus_is_phys = 1; /* Assume no IOMMU is present */
+int parisc_bus_is_phys __read_mostly = 1; /* Assume no IOMMU is present */
EXPORT_SYMBOL(parisc_bus_is_phys);
#endif
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index ce89da0f654..25564b7ca6b 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -39,7 +39,7 @@
#include <asm/atomic.h>
#include <asm/current.h>
#include <asm/delay.h>
-#include <asm/pgalloc.h> /* for flush_tlb_all() proto/macro */
+#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
@@ -58,9 +58,9 @@ DEFINE_SPINLOCK(smp_lock);
volatile struct task_struct *smp_init_current_idle_task;
-static volatile int cpu_now_booting = 0; /* track which CPU is booting */
+static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */
-static int parisc_max_cpus = 1;
+static int parisc_max_cpus __read_mostly = 1;
/* online cpus are ones that we've managed to bring up completely
* possible cpus are all valid cpu
@@ -71,8 +71,8 @@ static int parisc_max_cpus = 1;
* empty in the beginning.
*/
-cpumask_t cpu_online_map = CPU_MASK_NONE; /* Bitmap of online CPUs */
-cpumask_t cpu_possible_map = CPU_MASK_ALL; /* Bitmap of Present CPUs */
+cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */
+cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
@@ -406,12 +406,10 @@ EXPORT_SYMBOL(smp_call_function);
* as we want to ensure all TLB's flushed before proceeding.
*/
-extern void flush_tlb_all_local(void);
-
void
smp_flush_tlb_all(void)
{
- on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
+ on_each_cpu(flush_tlb_all_local, NULL, 1, 1);
}
@@ -487,7 +485,7 @@ void __init smp_callin(void)
#endif
flush_cache_all_local(); /* start with known state */
- flush_tlb_all_local();
+ flush_tlb_all_local(NULL);
local_irq_enable(); /* Interrupts have been off until now */
@@ -519,7 +517,7 @@ int __init smp_boot_one_cpu(int cpuid)
if (IS_ERR(idle))
panic("SMP: fork failed for CPU:%d", cpuid);
- idle->thread_info->cpu = cpuid;
+ task_thread_info(idle)->cpu = cpuid;
/* Let _start know what logical CPU we're booting
** (offset into init_tasks[],cpu_data[])
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index cded2568078..594930bc4bc 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -36,8 +36,8 @@
/* xtime and wall_jiffies keep wall-clock time */
extern unsigned long wall_jiffies;
-static long clocktick; /* timer cycles per tick */
-static long halftick;
+static long clocktick __read_mostly; /* timer cycles per tick */
+static long halftick __read_mostly;
#ifdef CONFIG_SMP
extern void smp_do_timer(struct pt_regs *regs);
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
index ac2a4068141..3ba040050e4 100644
--- a/arch/parisc/kernel/topology.c
+++ b/arch/parisc/kernel/topology.c
@@ -20,8 +20,9 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/cpu.h>
+#include <linux/cache.h>
-static struct cpu cpu_devices[NR_CPUS];
+static struct cpu cpu_devices[NR_CPUS] __read_mostly;
static int __init topology_init(void)
{
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index eaae8a021f9..de0a1b21cb4 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -122,7 +122,7 @@
#define ERR_NOTHANDLED -1
#define ERR_PAGEFAULT -2
-int unaligned_enabled = 1;
+int unaligned_enabled __read_mostly = 1;
void die_if_kernel (char *str, struct pt_regs *regs, long err);
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index db141108412..cc1c1afc318 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -35,7 +35,7 @@ static spinlock_t unwind_lock;
* we can call unwind_init as early in the bootup process as
* possible (before the slab allocator is initialized)
*/
-static struct unwind_table kernel_unwind_table;
+static struct unwind_table kernel_unwind_table __read_mostly;
static LIST_HEAD(unwind_tables);
static inline const struct unwind_table_entry *
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index e5fac3e08c7..6d6436a6b62 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -68,7 +68,7 @@ SECTIONS
RODATA
/* writeable */
- . = ALIGN(4096); /* Make sure this is paged aligned so
+ . = ALIGN(4096); /* Make sure this is page aligned so
that we can properly leave these
as writable */
data_start = .;
@@ -105,6 +105,10 @@ SECTIONS
. = ALIGN(16);
.data.lock_aligned : { *(.data.lock_aligned) }
+ /* rarely changed data like cpu maps */
+ . = ALIGN(16);
+ .data.read_mostly : { *(.data.read_mostly) }
+
_edata = .; /* End of data section */
. = ALIGN(16384); /* init_task */
@@ -194,14 +198,7 @@ SECTIONS
#endif
}
- /* Stabs debugging sections. */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
+ STABS_DEBUG
.note 0 : { *(.note) }
}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 29b998e430e..720287d46e5 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -36,9 +36,9 @@ extern char _end; /* end of BSS, defined by linker */
extern char __init_begin, __init_end;
#ifdef CONFIG_DISCONTIGMEM
-struct node_map_data node_data[MAX_NUMNODES];
-bootmem_data_t bmem_data[MAX_NUMNODES];
-unsigned char pfnnid_map[PFNNID_MAP_MAX];
+struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
+bootmem_data_t bmem_data[MAX_NUMNODES] __read_mostly;
+unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
#endif
static struct resource data_resource = {
@@ -58,14 +58,14 @@ static struct resource pdcdata_resource = {
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
};
-static struct resource sysram_resources[MAX_PHYSMEM_RANGES];
+static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
/* The following array is initialized from the firmware specific
* information retrieved in kernel/inventory.c.
*/
-physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES];
-int npmem_ranges;
+physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
+int npmem_ranges __read_mostly;
#ifdef __LP64__
#define MAX_MEM (~0UL)
@@ -73,7 +73,7 @@ int npmem_ranges;
#define MAX_MEM (3584U*1024U*1024U)
#endif /* !__LP64__ */
-static unsigned long mem_limit = MAX_MEM;
+static unsigned long mem_limit __read_mostly = MAX_MEM;
static void __init mem_limit_func(void)
{
@@ -300,6 +300,13 @@ static void __init setup_bootmem(void)
max_pfn = start_pfn + npages;
}
+ /* IOMMU is always used to access "high mem" on those boxes
+ * that can support enough mem that a PCI device couldn't
+ * directly DMA to any physical addresses.
+ * ISA DMA support will need to revisit this.
+ */
+ max_low_pfn = max_pfn;
+
if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n");
BUG();
@@ -431,11 +438,11 @@ void free_initmem(void)
#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
& ~(VM_MAP_OFFSET-1)))
-void *vmalloc_start;
+void *vmalloc_start __read_mostly;
EXPORT_SYMBOL(vmalloc_start);
#ifdef CONFIG_PA11
-unsigned long pcxl_dma_start;
+unsigned long pcxl_dma_start __read_mostly;
#endif
void __init mem_init(void)
@@ -475,7 +482,7 @@ int do_check_pgt_cache(int low, int high)
return 0;
}
-unsigned long *empty_zero_page;
+unsigned long *empty_zero_page __read_mostly;
void show_mem(void)
{
@@ -998,7 +1005,7 @@ void flush_tlb_all(void)
void flush_tlb_all(void)
{
spin_lock(&sid_lock);
- flush_tlb_all_local();
+ flush_tlb_all_local(NULL);
recycle_sids();
spin_unlock(&sid_lock);
}
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index 5c7a1b3b932..edd9a9559cb 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -1,12 +1,9 @@
/*
* arch/parisc/mm/ioremap.c
*
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
* (C) Copyright 1995 1996 Linus Torvalds
* (C) Copyright 2001 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
*/
#include <linux/vmalloc.h>
@@ -14,81 +11,107 @@
#include <linux/module.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
-static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
+static inline void
+remap_area_pte(pte_t *pte, unsigned long address, unsigned long size,
+ unsigned long phys_addr, unsigned long flags)
{
- unsigned long end;
+ unsigned long end, pfn;
+ pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
+ _PAGE_ACCESSED | flags);
address &= ~PMD_MASK;
+
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
- if (address >= end)
- BUG();
+
+ BUG_ON(address >= end);
+
+ pfn = phys_addr >> PAGE_SHIFT;
do {
- if (!pte_none(*pte)) {
- printk(KERN_ERR "remap_area_pte: page already exists\n");
- BUG();
- }
- set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
- _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
+ BUG_ON(!pte_none(*pte));
+
+ set_pte(pte, pfn_pte(pfn, pgprot));
+
address += PAGE_SIZE;
- phys_addr += PAGE_SIZE;
+ pfn++;
pte++;
} while (address && (address < end));
}
-static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
+static inline int
+remap_area_pmd(pmd_t *pmd, unsigned long address, unsigned long size,
+ unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
address &= ~PGDIR_MASK;
+
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
+
+ BUG_ON(address >= end);
+
phys_addr -= address;
- if (address >= end)
- BUG();
do {
- pte_t * pte = pte_alloc_kernel(pmd, address);
+ pte_t *pte = pte_alloc_kernel(pmd, address);
if (!pte)
return -ENOMEM;
- remap_area_pte(pte, address, end - address, address + phys_addr, flags);
+
+ remap_area_pte(pte, address, end - address,
+ address + phys_addr, flags);
+
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address && (address < end));
+
return 0;
}
-#if (USE_HPPA_IOREMAP)
-static int remap_area_pages(unsigned long address, unsigned long phys_addr,
- unsigned long size, unsigned long flags)
+#if USE_HPPA_IOREMAP
+static int
+remap_area_pages(unsigned long address, unsigned long phys_addr,
+ unsigned long size, unsigned long flags)
{
- int error;
- pgd_t * dir;
+ pgd_t *dir;
+ int error = 0;
unsigned long end = address + size;
+ BUG_ON(address >= end);
+
phys_addr -= address;
- dir = pgd_offset(&init_mm, address);
+ dir = pgd_offset_k(address);
+
flush_cache_all();
- if (address >= end)
- BUG();
+
do {
+ pud_t *pud;
pmd_t *pmd;
- pmd = pmd_alloc(&init_mm, dir, address);
+
error = -ENOMEM;
+ pud = pud_alloc(&init_mm, dir, address);
+ if (!pud)
+ break;
+
+ pmd = pmd_alloc(&init_mm, pud, address);
if (!pmd)
break;
+
if (remap_area_pmd(pmd, address, end - address,
- phys_addr + address, flags))
+ phys_addr + address, flags))
break;
+
error = 0;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
+
flush_tlb_all();
+
return error;
}
#endif /* USE_HPPA_IOREMAP */
@@ -123,8 +146,7 @@ EXPORT_SYMBOL(__raw_bad_addr);
/*
* Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
+ * address space.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
@@ -148,8 +170,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
#endif
#else
- void * addr;
- struct vm_struct * area;
+ void *addr;
+ struct vm_struct *area;
unsigned long offset, last_addr;
/* Don't allow wraparound or zero size */
@@ -167,9 +189,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
t_addr = __va(phys_addr);
t_end = t_addr + (size - 1);
- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
+ for (page = virt_to_page(t_addr);
+ page <= virt_to_page(t_end); page++) {
if(!PageReserved(page))
return NULL;
+ }
}
/*
@@ -185,11 +209,13 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
+
addr = area->addr;
if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
vfree(addr);
return NULL;
}
+
return (void __iomem *) (offset + (char *)addr);
#endif
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index db93dbc0e21..01feed0e2a1 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -26,9 +26,6 @@ config MMU
bool
default y
-config UID16
- bool
-
config GENERIC_HARDIRQS
bool
default y
@@ -50,7 +47,7 @@ config PPC
config EARLY_PRINTK
bool
- default y if PPC64
+ default y
config COMPAT
bool
@@ -74,15 +71,39 @@ config ARCH_MAY_HAVE_PC_FDC
bool
default y
+config PPC_OF
+ def_bool y
+
+config PPC_UDBG_16550
+ bool
+ default n
+
+config CRASH_DUMP
+ bool "kernel crash dumps (EXPERIMENTAL)"
+ depends on PPC_MULTIPLATFORM
+ depends on EXPERIMENTAL
+ help
+ Build a kernel suitable for use as a kdump capture kernel.
+ The kernel will be linked at a different address than normal, and
+ so can only be used for Kdump.
+
+ Don't change this unless you know what you are doing.
+
+config GENERIC_TBSYNC
+ bool
+ default y if PPC32 && SMP
+ default n
+
menu "Processor support"
choice
prompt "Processor Type"
depends on PPC32
default 6xx
-config 6xx
+config CLASSIC32
bool "6xx/7xx/74xx"
select PPC_FPU
+ select 6xx
help
There are four families of PowerPC chips supported. The more common
types (601, 603, 604, 740, 750, 7400), the Motorola embedded
@@ -96,12 +117,20 @@ config 6xx
config PPC_52xx
bool "Freescale 52xx"
+ select 6xx
+ select PPC_FPU
config PPC_82xx
bool "Freescale 82xx"
+ select 6xx
+ select PPC_FPU
config PPC_83xx
bool "Freescale 83xx"
+ select 6xx
+ select FSL_SOC
+ select 83xx
+ select PPC_FPU
config 40x
bool "AMCC 40x"
@@ -137,6 +166,13 @@ config POWER4
depends on PPC64
def_bool y
+config 6xx
+ bool
+
+# this is temp to handle compat with arch=ppc
+config 83xx
+ bool
+
config PPC_FPU
bool
default y if PPC64
@@ -169,7 +205,7 @@ config PHYS_64BIT
config ALTIVEC
bool "AltiVec Support"
- depends on 6xx || POWER4
+ depends on CLASSIC32 || POWER4
---help---
This option enables kernel support for the Altivec extensions to the
PowerPC processor. The kernel currently supports saving and restoring
@@ -242,7 +278,7 @@ endmenu
source "init/Kconfig"
menu "Platform support"
- depends on PPC64 || 6xx
+ depends on PPC64 || CLASSIC32
choice
prompt "Machine type"
@@ -278,6 +314,7 @@ config PPC_PSERIES
select PPC_I8259
select PPC_RTAS
select RTAS_ERROR_LOGGING
+ select PPC_UDBG_16550
default y
config PPC_CHRP
@@ -287,6 +324,7 @@ config PPC_CHRP
select PPC_INDIRECT_PCI
select PPC_RTAS
select PPC_MPC106
+ select PPC_UDBG_16550
default y
config PPC_PMAC
@@ -300,6 +338,7 @@ config PPC_PMAC64
bool
depends on PPC_PMAC && POWER4
select U3_DART
+ select MPIC_BROKEN_U3
select GENERIC_TBSYNC
default y
@@ -308,6 +347,7 @@ config PPC_PREP
depends on PPC_MULTIPLATFORM && PPC32 && BROKEN
select PPC_I8259
select PPC_INDIRECT_PCI
+ select PPC_UDBG_16550
default y
config PPC_MAPLE
@@ -316,6 +356,7 @@ config PPC_MAPLE
select U3_DART
select MPIC_BROKEN_U3
select GENERIC_TBSYNC
+ select PPC_UDBG_16550
default n
help
This option enables support for the Maple 970FX Evaluation Board.
@@ -326,11 +367,7 @@ config PPC_CELL
depends on PPC_MULTIPLATFORM && PPC64
select PPC_RTAS
select MMIO_NVRAM
-
-config PPC_OF
- bool
- depends on PPC_MULTIPLATFORM # for now
- default y
+ select PPC_UDBG_16550
config XICS
depends on PPC_PSERIES
@@ -384,13 +421,14 @@ config IBMVIO
bool
default y
-config PPC_MPC106
- bool
- default n
+config IBMEBUS
+ depends on PPC_PSERIES
+ bool "Support for GX bus based adapters"
+ help
+ Bus device driver for GX bus based adapters.
-config GENERIC_TBSYNC
+config PPC_MPC106
bool
- default y if CONFIG_PPC32 && CONFIG_SMP
default n
source "drivers/cpufreq/Kconfig"
@@ -473,8 +511,10 @@ endmenu
source arch/powerpc/platforms/embedded6xx/Kconfig
source arch/powerpc/platforms/4xx/Kconfig
+source arch/powerpc/platforms/83xx/Kconfig
source arch/powerpc/platforms/85xx/Kconfig
source arch/powerpc/platforms/8xx/Kconfig
+source arch/powerpc/platforms/cell/Kconfig
menu "Kernel options"
@@ -578,11 +618,12 @@ config ARCH_SELECT_MEMORY_MODEL
depends on PPC64
config ARCH_FLATMEM_ENABLE
- def_bool y
- depends on PPC64 && !NUMA
+ def_bool y
+ depends on (PPC64 && !NUMA) || PPC32
config ARCH_SPARSEMEM_ENABLE
def_bool y
+ depends on PPC64
config ARCH_SPARSEMEM_DEFAULT
def_bool y
@@ -694,7 +735,7 @@ config PPC_I8259
config PPC_INDIRECT_PCI
bool
depends on PCI
- default y if 40x || 44x || 85xx || 83xx
+ default y if 40x || 44x || 85xx
default n
config EISA
@@ -703,13 +744,16 @@ config EISA
config SBUS
bool
+config FSL_SOC
+ bool
+
# Yes MCA RS/6000s exist but Linux-PPC does not currently support any
config MCA
bool
config PCI
- bool "PCI support" if 40x || CPM2 || 83xx || 85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
- default y if !40x && !CPM2 && !8xx && !APUS && !83xx && !85xx
+ bool "PCI support" if 40x || CPM2 || PPC_83xx || 85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
+ default y if !40x && !CPM2 && !8xx && !APUS && !PPC_83xx && !85xx
default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
default PCI_QSPAN if !4xx && !CPM2 && 8xx
help
@@ -722,11 +766,6 @@ config PCI_DOMAINS
bool
default PCI
-config MPC83xx_PCI2
- bool " Supprt for 2nd PCI host controller"
- depends on PCI && MPC834x
- default y if MPC834x_SYS
-
config PCI_QSPAN
bool "QSpan PCI"
depends on !4xx && !CPM2 && 8xx
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 30a30bf559e..9254806f703 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -115,4 +115,46 @@ config PPC_OCP
depends on IBM_OCP || XILINX_OCP
default y
+choice
+ prompt "Early debugging (dangerous)"
+ bool
+ optional
+ help
+ Enable early debugging. Careful, if you enable debugging for the
+ wrong type of machine your kernel _will not boot_.
+
+config PPC_EARLY_DEBUG_LPAR
+ bool "LPAR HV Console"
+ depends on PPC_PSERIES
+ help
+ Select this to enable early debugging for a machine with a HVC
+ console on vterm 0.
+
+config PPC_EARLY_DEBUG_G5
+ bool "Apple G5"
+ depends on PPC_PMAC64
+ help
+ Select this to enable early debugging for Apple G5 machines.
+
+config PPC_EARLY_DEBUG_RTAS
+ bool "RTAS Panel"
+ depends on PPC_RTAS
+ help
+ Select this to enable early debugging via the RTAS panel.
+
+config PPC_EARLY_DEBUG_MAPLE
+ bool "Maple real mode"
+ depends on PPC_MAPLE
+ help
+ Select this to enable early debugging for Maple.
+
+config PPC_EARLY_DEBUG_ISERIES
+ bool "iSeries HV Console"
+ depends on PPC_ISERIES
+ help
+ Select this to enable early debugging for legacy iSeries. You need
+ to hit "Ctrl-x Ctrl-x" to see the messages on the console.
+
+endchoice
+
endmenu
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index a13eb575f83..44dd82b791d 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -76,8 +76,7 @@ LINUXINCLUDE += $(LINUXINCLUDE-y)
CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__
ifeq ($(CONFIG_PPC64),y)
-GCC_VERSION := $(call cc-version)
-GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi)
+GCC_BROKEN_VEC := $(shell if [ $(call cc-version) -lt 0400 ] ; then echo "y"; fi)
ifeq ($(CONFIG_POWER4_ONLY),y)
ifeq ($(CONFIG_ALTIVEC),y)
@@ -140,18 +139,15 @@ drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
-defaultimage-$(CONFIG_PPC32) := zImage
+# Default to zImage, override when needed
+defaultimage-y := zImage
defaultimage-$(CONFIG_PPC_ISERIES) := vmlinux
-defaultimage-$(CONFIG_PPC_PSERIES) := zImage
KBUILD_IMAGE := $(defaultimage-y)
all: $(KBUILD_IMAGE)
CPPFLAGS_vmlinux.lds := -Upowerpc
-# All the instructions talk about "make bzImage".
-bzImage: zImage
-
-BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
+BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm uImage
.PHONY: $(BOOT_TARGETS)
@@ -189,10 +185,9 @@ TOUT := .tmp_gas_check
# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec
# instructions.
# gcc-3.4 and binutils-2.14 are a fatal combination.
-GCC_VERSION := $(call cc-version)
checkbin:
- @if test "$(GCC_VERSION)" = "0304" ; then \
+ @if test "$(call cc-version)" = "0304" ; then \
if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
echo 'correctly with gcc-3.4 and your version of binutils.'; \
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
new file mode 100644
index 00000000000..45c9ad23526
--- /dev/null
+++ b/arch/powerpc/boot/.gitignore
@@ -0,0 +1,20 @@
+addnote
+infblock.c
+infblock.h
+infcodes.c
+infcodes.h
+inffast.c
+inffast.h
+inflate.c
+inftrees.c
+inftrees.h
+infutil.c
+infutil.h
+kernel-vmlinux.strip.c
+kernel-vmlinux.strip.gz
+uImage
+zImage
+zImage.vmode
+zconf.h
+zlib.h
+zutil.h
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 9770f587af7..788dec4c7ef 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -25,8 +25,8 @@ HOSTCC := gcc
BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem \
$(shell $(CROSS32CC) -print-file-name=include) -fPIC
BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
-BOOTLFLAGS := -T $(srctree)/$(src)/zImage.lds
OBJCOPYFLAGS := contents,alloc,load,readonly,data
+OBJCOPY_COFF_ARGS := -O aixcoff-rs6000 --set-start 0x500000
zlib := infblock.c infcodes.c inffast.c inflate.c inftrees.c infutil.c
zlibheader := infblock.h infcodes.h inffast.h inftrees.h infutil.h
@@ -35,7 +35,7 @@ zliblinuxheader := zlib.h zconf.h zutil.h
$(addprefix $(obj)/,$(zlib) main.o): $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader))
#$(addprefix $(obj)/,main.o): $(addprefix $(obj)/,zlib.h)
-src-boot := string.S prom.c main.c div64.S crt0.S
+src-boot := crt0.S string.S prom.c stdio.c main.c div64.S
src-boot += $(zlib)
src-boot := $(addprefix $(obj)/, $(src-boot))
obj-boot := $(addsuffix .o, $(basename $(src-boot)))
@@ -70,7 +70,7 @@ quiet_cmd_bootas = BOOTAS $@
cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $<
quiet_cmd_bootld = BOOTLD $@
- cmd_bootld = $(CROSS32LD) $(BOOTLFLAGS) -o $@ $(2)
+ cmd_bootld = $(CROSS32LD) -T $(srctree)/$(src)/$(3) -o $@ $(2)
$(patsubst %.c,%.o, $(filter %.c, $(src-boot))): %.o: %.c
$(call if_changed_dep,bootcc)
@@ -87,12 +87,14 @@ obj-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.o, $(section)))
src-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.c, $(section)))
gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section)))
-hostprogs-y := addnote addRamDisk
-targets += zImage.vmode zImage.initrd.vmode zImage zImage.initrd \
- $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \
- $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \
- $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \
- vmlinux.initrd
+hostprogs-y := addnote addRamDisk hack-coff
+
+targets += zImage.vmode zImage.initrd.vmode zImage zImage.initrd \
+ zImage.coff zImage.initrd.coff \
+ $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \
+ $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \
+ $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \
+ vmlinux.initrd
extra-y := initrd.o
quiet_cmd_ramdisk = RAMDISK $@
@@ -114,6 +116,10 @@ quiet_cmd_addsection = ADDSEC $@
quiet_cmd_addnote = ADDNOTE $@
cmd_addnote = $(obj)/addnote $@
+quiet_cmd_gencoff = COFF $@
+ cmd_gencoff = $(OBJCOPY) $(OBJCOPY_COFF_ARGS) $@ && \
+ $(obj)/hack-coff $@
+
$(call gz-sec, $(required)): $(obj)/kernel-%.gz: %
$(call if_changed,gzip)
@@ -127,23 +133,66 @@ $(call obj-sec, $(required) $(initrd)): $(obj)/kernel-%.o: $(obj)/kernel-%.c
$(call if_changed_dep,bootcc)
$(call cmd,addsection)
-$(obj)/zImage.vmode: obj-boot += $(call obj-sec, $(required))
+$(obj)/zImage.vmode $(obj)/zImage.coff: obj-boot += $(call obj-sec, $(required))
$(obj)/zImage.vmode: $(call obj-sec, $(required)) $(obj-boot) $(srctree)/$(src)/zImage.lds
- $(call cmd,bootld,$(obj-boot))
+ $(call cmd,bootld,$(obj-boot),zImage.lds)
-$(obj)/zImage.initrd.vmode: obj-boot += $(call obj-sec, $(required) $(initrd))
+$(obj)/zImage.initrd.vmode $(obj)/zImage.initrd.coff: obj-boot += $(call obj-sec, $(required) $(initrd))
$(obj)/zImage.initrd.vmode: $(call obj-sec, $(required) $(initrd)) $(obj-boot) $(srctree)/$(src)/zImage.lds
- $(call cmd,bootld,$(obj-boot))
+ $(call cmd,bootld,$(obj-boot),zImage.lds)
-$(obj)/zImage: $(obj)/zImage.vmode $(obj)/addnote
+# For 32-bit powermacs, build the COFF images as well as the ELF images.
+coffimage-$(CONFIG_PPC_PMAC)-$(CONFIG_PPC32) := $(obj)/zImage.coff
+coffrdimg-$(CONFIG_PPC_PMAC)-$(CONFIG_PPC32) := $(obj)/zImage.initrd.coff
+
+$(obj)/zImage: $(obj)/zImage.vmode $(obj)/addnote $(coffimage-y-y)
@cp -f $< $@
$(call if_changed,addnote)
-$(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote
+$(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote $(coffrdimg-y-y)
@cp -f $< $@
$(call if_changed,addnote)
+$(obj)/zImage.coff: $(call obj-sec, $(required)) $(obj-boot) $(srctree)/$(src)/zImage.coff.lds $(obj)/hack-coff
+ $(call cmd,bootld,$(obj-boot),zImage.coff.lds)
+ $(call cmd,gencoff)
+
+$(obj)/zImage.initrd.coff: $(call obj-sec, $(required) $(initrd)) $(obj-boot) \
+ $(srctree)/$(src)/zImage.coff.lds $(obj)/hack-coff
+ $(call cmd,bootld,$(obj-boot),zImage.coff.lds)
+ $(call cmd,gencoff)
+
+#-----------------------------------------------------------
+# build u-boot images
+#-----------------------------------------------------------
+quiet_cmd_mygzip = GZIP $@
+cmd_mygzip = gzip -f -9 < $< > $@.$$$$ && mv $@.$$$$ $@
+
+quiet_cmd_objbin = OBJCOPY $@
+ cmd_objbin = $(OBJCOPY) -O binary $< $@
+
+quiet_cmd_uimage = UIMAGE $@
+ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A ppc -O linux -T kernel \
+ -C gzip -a 00000000 -e 00000000 -n 'Linux-$(KERNELRELEASE)' \
+ -d $< $@
+
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+targets += uImage
+extra-y += vmlinux.bin vmlinux.gz
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objbin)
+
+$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,mygzip)
+
+$(obj)/uImage: $(obj)/vmlinux.gz
+ $(Q)rm -f $@
+ $(call cmd,uimage)
+ @echo -n ' Image: $@ '
+ @if [ -f $@ ]; then echo 'is ready' ; else echo 'not made'; fi
+
install: $(CONFIGURE) $(BOOTIMAGE)
sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" "$(BOOTIMAGE)"
-clean-files := $(addprefix $(objtree)/, $(obj-boot) vmlinux.strip)
+clean-files += $(addprefix $(objtree)/, $(obj-boot) vmlinux.strip)
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index d2f2ace56cd..e0192c26037 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -12,17 +12,23 @@
#include "ppc_asm.h"
.text
+ /* a procedure descriptor used when booting this as a COFF file */
+_zimage_start_opd:
+ .long _zimage_start, 0, 0, 0
+
.globl _zimage_start
_zimage_start:
+ /* Work out the offset between the address we were linked at
+ and the address where we're running. */
bl 1f
-
-1:
- mflr r0
+1: mflr r0
lis r9,1b@ha
addi r9,r9,1b@l
subf. r0,r9,r0
- beq 3f
+ beq 3f /* if running at same address as linked */
+ /* The .got2 section contains a list of addresses, so add
+ the address offset onto each entry. */
lis r9,__got2_start@ha
addi r9,r9,__got2_start@l
lis r8,__got2_end@ha
@@ -32,15 +38,14 @@ _zimage_start:
srwi. r8,r8,2
mtctr r8
add r9,r0,r9
-2:
- lwz r8,0(r9)
+2: lwz r8,0(r9)
add r8,r8,r0
stw r8,0(r9)
addi r9,r9,4
bdnz 2b
-3:
- lis r9,_start@h
+ /* Do a cache flush for our text, in case OF didn't */
+3: lis r9,_start@h
add r9,r0,r9
lis r8,_etext@ha
addi r8,r8,_etext@l
diff --git a/arch/powerpc/boot/hack-coff.c b/arch/powerpc/boot/hack-coff.c
new file mode 100644
index 00000000000..5e5a6573a1e
--- /dev/null
+++ b/arch/powerpc/boot/hack-coff.c
@@ -0,0 +1,84 @@
+/*
+ * hack-coff.c - hack the header of an xcoff file to fill in
+ * a few fields needed by the Open Firmware xcoff loader on
+ * Power Macs but not initialized by objcopy.
+ *
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include "rs6000.h"
+
+#define AOUT_MAGIC 0x010b
+
+#define get_16be(x) ((((unsigned char *)(x))[0] << 8) \
+ + ((unsigned char *)(x))[1])
+#define put_16be(x, v) (((unsigned char *)(x))[0] = (v) >> 8, \
+ ((unsigned char *)(x))[1] = (v) & 0xff)
+#define get_32be(x) ((((unsigned char *)(x))[0] << 24) \
+ + (((unsigned char *)(x))[1] << 16) \
+ + (((unsigned char *)(x))[2] << 8) \
+ + ((unsigned char *)(x))[3])
+
+int
+main(int ac, char **av)
+{
+ int fd;
+ int i, nsect;
+ int aoutsz;
+ struct external_filehdr fhdr;
+ AOUTHDR aout;
+ struct external_scnhdr shdr;
+
+ if (ac != 2) {
+ fprintf(stderr, "Usage: hack-coff coff-file\n");
+ exit(1);
+ }
+ if ((fd = open(av[1], 2)) == -1) {
+ perror(av[2]);
+ exit(1);
+ }
+ if (read(fd, &fhdr, sizeof(fhdr)) != sizeof(fhdr))
+ goto readerr;
+ i = get_16be(fhdr.f_magic);
+ if (i != U802TOCMAGIC && i != U802WRMAGIC && i != U802ROMAGIC) {
+ fprintf(stderr, "%s: not an xcoff file\n", av[1]);
+ exit(1);
+ }
+ aoutsz = get_16be(fhdr.f_opthdr);
+ if (read(fd, &aout, aoutsz) != aoutsz)
+ goto readerr;
+ nsect = get_16be(fhdr.f_nscns);
+ for (i = 0; i < nsect; ++i) {
+ if (read(fd, &shdr, sizeof(shdr)) != sizeof(shdr))
+ goto readerr;
+ if (strcmp(shdr.s_name, ".text") == 0) {
+ put_16be(aout.o_snentry, i+1);
+ put_16be(aout.o_sntext, i+1);
+ } else if (strcmp(shdr.s_name, ".data") == 0) {
+ put_16be(aout.o_sndata, i+1);
+ } else if (strcmp(shdr.s_name, ".bss") == 0) {
+ put_16be(aout.o_snbss, i+1);
+ }
+ }
+ put_16be(aout.magic, AOUT_MAGIC);
+ if (lseek(fd, (long) sizeof(struct external_filehdr), 0) == -1
+ || write(fd, &aout, aoutsz) != aoutsz) {
+ fprintf(stderr, "%s: write error\n", av[1]);
+ exit(1);
+ }
+ close(fd);
+ exit(0);
+
+readerr:
+ fprintf(stderr, "%s: read error or file too short\n", av[1]);
+ exit(1);
+}
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index 64ec93116fa..55ec5986725 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -21,8 +21,8 @@ extern void flush_cache(void *, unsigned long);
/* Value picked to match that used by yaboot */
-#define PROG_START 0x01400000
-#define RAM_END (512<<20) // Fixme: use OF */
+#define PROG_START 0x01400000 /* only used on 64-bit systems */
+#define RAM_END (512<<20) /* Fixme: use OF */
#define ONE_MB 0x100000
extern char _start[];
@@ -160,6 +160,17 @@ static int is_elf64(void *hdr)
elfoffset = (unsigned long)elf64ph->p_offset;
vmlinux.size = (unsigned long)elf64ph->p_filesz + elfoffset;
vmlinux.memsize = (unsigned long)elf64ph->p_memsz + elfoffset;
+
+#if defined(PROG_START)
+ /*
+ * Maintain a "magic" minimum address. This keeps some older
+ * firmware platforms running.
+ */
+
+ if (claim_base < PROG_START)
+ claim_base = PROG_START;
+#endif
+
return 1;
}
@@ -206,12 +217,18 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
exit();
if (getprop(chosen_handle, "stdout", &stdout, sizeof(stdout)) != 4)
exit();
- stderr = stdout;
- if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4)
- exit();
printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, sp);
+ /*
+ * The first available claim_base must be above the end of the
+ * the loaded kernel wrapper file (_start to _end includes the
+ * initrd image if it is present) and rounded up to a nice
+ * 1 MB boundary for good measure.
+ */
+
+ claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB);
+
vmlinuz.addr = (unsigned long)_vmlinux_start;
vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
@@ -228,25 +245,6 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
exit();
}
- /*
- * The first available claim_base must be above the end of the
- * the loaded kernel wrapper file (_start to _end includes the
- * initrd image if it is present) and rounded up to a nice
- * 1 MB boundary for good measure.
- */
-
- claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB);
-
-#if defined(PROG_START)
- /*
- * Maintain a "magic" minimum address. This keeps some older
- * firmware platforms running.
- */
-
- if (claim_base < PROG_START)
- claim_base = PROG_START;
-#endif
-
/* We need to claim the memsize plus the file offset since gzip
* will expand the header (file offset), then the kernel, then
* possible rubbish we don't care about. But the kernel bss must
diff --git a/arch/powerpc/boot/prom.c b/arch/powerpc/boot/prom.c
index 4bea2f4dcb0..fa0057736f6 100644
--- a/arch/powerpc/boot/prom.c
+++ b/arch/powerpc/boot/prom.c
@@ -13,487 +13,153 @@
#include "prom.h"
int (*prom)(void *);
+phandle chosen_handle;
+ihandle stdout;
-void *chosen_handle;
-
-void *stdin;
-void *stdout;
-void *stderr;
-
-
-int
-write(void *handle, void *ptr, int nb)
-{
- struct prom_args {
- char *service;
- int nargs;
- int nret;
- void *ihandle;
- void *addr;
- int len;
- int actual;
- } args;
-
- args.service = "write";
- args.nargs = 3;
- args.nret = 1;
- args.ihandle = handle;
- args.addr = ptr;
- args.len = nb;
- args.actual = -1;
- (*prom)(&args);
- return args.actual;
-}
-
-int
-read(void *handle, void *ptr, int nb)
+int call_prom(const char *service, int nargs, int nret, ...)
{
+ int i;
struct prom_args {
- char *service;
+ const char *service;
int nargs;
int nret;
- void *ihandle;
- void *addr;
- int len;
- int actual;
- } args;
-
- args.service = "read";
- args.nargs = 3;
- args.nret = 1;
- args.ihandle = handle;
- args.addr = ptr;
- args.len = nb;
- args.actual = -1;
- (*prom)(&args);
- return args.actual;
-}
-
-void
-exit()
-{
- struct prom_args {
- char *service;
- } args;
-
- for (;;) {
- args.service = "exit";
- (*prom)(&args);
- }
-}
-
-void
-pause(void)
-{
- struct prom_args {
- char *service;
+ unsigned int args[12];
} args;
+ va_list list;
- args.service = "enter";
- (*prom)(&args);
-}
+ args.service = service;
+ args.nargs = nargs;
+ args.nret = nret;
-void *
-finddevice(const char *name)
-{
- struct prom_args {
- char *service;
- int nargs;
- int nret;
- const char *devspec;
- void *phandle;
- } args;
+ va_start(list, nret);
+ for (i = 0; i < nargs; i++)
+ args.args[i] = va_arg(list, unsigned int);
+ va_end(list);
- args.service = "finddevice";
- args.nargs = 1;
- args.nret = 1;
- args.devspec = name;
- args.phandle = (void *) -1;
- (*prom)(&args);
- return args.phandle;
-}
+ for (i = 0; i < nret; i++)
+ args.args[nargs+i] = 0;
-void *
-claim(unsigned long virt, unsigned long size, unsigned long align)
-{
- struct prom_args {
- char *service;
- int nargs;
- int nret;
- unsigned int virt;
- unsigned int size;
- unsigned int align;
- void *ret;
- } args;
+ if (prom(&args) < 0)
+ return -1;
- args.service = "claim";
- args.nargs = 3;
- args.nret = 1;
- args.virt = virt;
- args.size = size;
- args.align = align;
- (*prom)(&args);
- return args.ret;
+ return (nret > 0)? args.args[nargs]: 0;
}
-int
-getprop(void *phandle, const char *name, void *buf, int buflen)
+int call_prom_ret(const char *service, int nargs, int nret,
+ unsigned int *rets, ...)
{
+ int i;
struct prom_args {
- char *service;
+ const char *service;
int nargs;
int nret;
- void *phandle;
- const char *name;
- void *buf;
- int buflen;
- int size;
+ unsigned int args[12];
} args;
+ va_list list;
- args.service = "getprop";
- args.nargs = 4;
- args.nret = 1;
- args.phandle = phandle;
- args.name = name;
- args.buf = buf;
- args.buflen = buflen;
- args.size = -1;
- (*prom)(&args);
- return args.size;
-}
+ args.service = service;
+ args.nargs = nargs;
+ args.nret = nret;
-int
-putc(int c, void *f)
-{
- char ch = c;
+ va_start(list, rets);
+ for (i = 0; i < nargs; i++)
+ args.args[i] = va_arg(list, unsigned int);
+ va_end(list);
- if (c == '\n')
- putc('\r', f);
- return write(f, &ch, 1) == 1? c: -1;
-}
+ for (i = 0; i < nret; i++)
+ args.args[nargs+i] = 0;
-int
-putchar(int c)
-{
- return putc(c, stdout);
-}
+ if (prom(&args) < 0)
+ return -1;
-int
-fputs(char *str, void *f)
-{
- int n = strlen(str);
+ if (rets != (void *) 0)
+ for (i = 1; i < nret; ++i)
+ rets[i-1] = args.args[nargs+i];
- return write(f, str, n) == n? 0: -1;
+ return (nret > 0)? args.args[nargs]: 0;
}
-size_t strnlen(const char * s, size_t count)
+int write(void *handle, void *ptr, int nb)
{
- const char *sc;
-
- for (sc = s; count-- && *sc != '\0'; ++sc)
- /* nothing */;
- return sc - s;
+ return call_prom("write", 3, 1, handle, ptr, nb);
}
-extern unsigned int __div64_32(unsigned long long *dividend,
- unsigned int divisor);
-
-/* The unnecessary pointer compare is there
- * to check for type safety (n must be 64bit)
+/*
+ * Older OF's require that when claiming a specific range of addresses,
+ * we claim the physical space in the /memory node and the virtual
+ * space in the chosen mmu node, and then do a map operation to
+ * map virtual to physical.
*/
-# define do_div(n,base) ({ \
- unsigned int __base = (base); \
- unsigned int __rem; \
- (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \
- if (((n) >> 32) == 0) { \
- __rem = (unsigned int)(n) % __base; \
- (n) = (unsigned int)(n) / __base; \
- } else \
- __rem = __div64_32(&(n), __base); \
- __rem; \
- })
+static int need_map = -1;
+static ihandle chosen_mmu;
+static phandle memory;
-static int skip_atoi(const char **s)
+/* returns true if s2 is a prefix of s1 */
+static int string_match(const char *s1, const char *s2)
{
- int i, c;
-
- for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s)
- i = i*10 + c - '0';
- return i;
+ for (; *s2; ++s2)
+ if (*s1++ != *s2)
+ return 0;
+ return 1;
}
-#define ZEROPAD 1 /* pad with zero */
-#define SIGN 2 /* unsigned/signed long */
-#define PLUS 4 /* show plus */
-#define SPACE 8 /* space if plus */
-#define LEFT 16 /* left justified */
-#define SPECIAL 32 /* 0x */
-#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
-
-static char * number(char * str, unsigned long long num, int base, int size, int precision, int type)
+static int check_of_version(void)
{
- char c,sign,tmp[66];
- const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
- int i;
+ phandle oprom, chosen;
+ char version[64];
- if (type & LARGE)
- digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
- if (type & LEFT)
- type &= ~ZEROPAD;
- if (base < 2 || base > 36)
+ oprom = finddevice("/openprom");
+ if (oprom == (phandle) -1)
return 0;
- c = (type & ZEROPAD) ? '0' : ' ';
- sign = 0;
- if (type & SIGN) {
- if ((signed long long)num < 0) {
- sign = '-';
- num = - (signed long long)num;
- size--;
- } else if (type & PLUS) {
- sign = '+';
- size--;
- } else if (type & SPACE) {
- sign = ' ';
- size--;
+ if (getprop(oprom, "model", version, sizeof(version)) <= 0)
+ return 0;
+ version[sizeof(version)-1] = 0;
+ printf("OF version = '%s'\r\n", version);
+ if (!string_match(version, "Open Firmware, 1.")
+ && !string_match(version, "FirmWorks,3."))
+ return 0;
+ chosen = finddevice("/chosen");
+ if (chosen == (phandle) -1) {
+ chosen = finddevice("/chosen@0");
+ if (chosen == (phandle) -1) {
+ printf("no chosen\n");
+ return 0;
}
}
- if (type & SPECIAL) {
- if (base == 16)
- size -= 2;
- else if (base == 8)
- size--;
- }
- i = 0;
- if (num == 0)
- tmp[i++]='0';
- else while (num != 0) {
- tmp[i++] = digits[do_div(num, base)];
+ if (getprop(chosen, "mmu", &chosen_mmu, sizeof(chosen_mmu)) <= 0) {
+ printf("no mmu\n");
+ return 0;
}
- if (i > precision)
- precision = i;
- size -= precision;
- if (!(type&(ZEROPAD+LEFT)))
- while(size-->0)
- *str++ = ' ';
- if (sign)
- *str++ = sign;
- if (type & SPECIAL) {
- if (base==8)
- *str++ = '0';
- else if (base==16) {
- *str++ = '0';
- *str++ = digits[33];
+ memory = (ihandle) call_prom("open", 1, 1, "/memory");
+ if (memory == (ihandle) -1) {
+ memory = (ihandle) call_prom("open", 1, 1, "/memory@0");
+ if (memory == (ihandle) -1) {
+ printf("no memory node\n");
+ return 0;
}
}
- if (!(type & LEFT))
- while (size-- > 0)
- *str++ = c;
- while (i < precision--)
- *str++ = '0';
- while (i-- > 0)
- *str++ = tmp[i];
- while (size-- > 0)
- *str++ = ' ';
- return str;
+ printf("old OF detected\r\n");
+ return 1;
}
-int vsprintf(char *buf, const char *fmt, va_list args)
+void *claim(unsigned long virt, unsigned long size, unsigned long align)
{
- int len;
- unsigned long long num;
- int i, base;
- char * str;
- const char *s;
-
- int flags; /* flags to number() */
-
- int field_width; /* width of output field */
- int precision; /* min. # of digits for integers; max
- number of chars for from string */
- int qualifier; /* 'h', 'l', or 'L' for integer fields */
- /* 'z' support added 23/7/1999 S.H. */
- /* 'z' changed to 'Z' --davidm 1/25/99 */
+ int ret;
+ unsigned int result;
+ if (need_map < 0)
+ need_map = check_of_version();
+ if (align || !need_map)
+ return (void *) call_prom("claim", 3, 1, virt, size, align);
- for (str=buf ; *fmt ; ++fmt) {
- if (*fmt != '%') {
- *str++ = *fmt;
- continue;
- }
-
- /* process flags */
- flags = 0;
- repeat:
- ++fmt; /* this also skips first '%' */
- switch (*fmt) {
- case '-': flags |= LEFT; goto repeat;
- case '+': flags |= PLUS; goto repeat;
- case ' ': flags |= SPACE; goto repeat;
- case '#': flags |= SPECIAL; goto repeat;
- case '0': flags |= ZEROPAD; goto repeat;
- }
-
- /* get field width */
- field_width = -1;
- if ('0' <= *fmt && *fmt <= '9')
- field_width = skip_atoi(&fmt);
- else if (*fmt == '*') {
- ++fmt;
- /* it's the next argument */
- field_width = va_arg(args, int);
- if (field_width < 0) {
- field_width = -field_width;
- flags |= LEFT;
- }
- }
-
- /* get the precision */
- precision = -1;
- if (*fmt == '.') {
- ++fmt;
- if ('0' <= *fmt && *fmt <= '9')
- precision = skip_atoi(&fmt);
- else if (*fmt == '*') {
- ++fmt;
- /* it's the next argument */
- precision = va_arg(args, int);
- }
- if (precision < 0)
- precision = 0;
- }
-
- /* get the conversion qualifier */
- qualifier = -1;
- if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
- qualifier = *fmt;
- ++fmt;
- }
-
- /* default base */
- base = 10;
-
- switch (*fmt) {
- case 'c':
- if (!(flags & LEFT))
- while (--field_width > 0)
- *str++ = ' ';
- *str++ = (unsigned char) va_arg(args, int);
- while (--field_width > 0)
- *str++ = ' ';
- continue;
-
- case 's':
- s = va_arg(args, char *);
- if (!s)
- s = "<NULL>";
-
- len = strnlen(s, precision);
-
- if (!(flags & LEFT))
- while (len < field_width--)
- *str++ = ' ';
- for (i = 0; i < len; ++i)
- *str++ = *s++;
- while (len < field_width--)
- *str++ = ' ';
- continue;
-
- case 'p':
- if (field_width == -1) {
- field_width = 2*sizeof(void *);
- flags |= ZEROPAD;
- }
- str = number(str,
- (unsigned long) va_arg(args, void *), 16,
- field_width, precision, flags);
- continue;
-
-
- case 'n':
- if (qualifier == 'l') {
- long * ip = va_arg(args, long *);
- *ip = (str - buf);
- } else if (qualifier == 'Z') {
- size_t * ip = va_arg(args, size_t *);
- *ip = (str - buf);
- } else {
- int * ip = va_arg(args, int *);
- *ip = (str - buf);
- }
- continue;
-
- case '%':
- *str++ = '%';
- continue;
-
- /* integer number formats - set up the flags and "break" */
- case 'o':
- base = 8;
- break;
-
- case 'X':
- flags |= LARGE;
- case 'x':
- base = 16;
- break;
-
- case 'd':
- case 'i':
- flags |= SIGN;
- case 'u':
- break;
-
- default:
- *str++ = '%';
- if (*fmt)
- *str++ = *fmt;
- else
- --fmt;
- continue;
- }
- if (qualifier == 'l') {
- num = va_arg(args, unsigned long);
- if (flags & SIGN)
- num = (signed long) num;
- } else if (qualifier == 'Z') {
- num = va_arg(args, size_t);
- } else if (qualifier == 'h') {
- num = (unsigned short) va_arg(args, int);
- if (flags & SIGN)
- num = (signed short) num;
- } else {
- num = va_arg(args, unsigned int);
- if (flags & SIGN)
- num = (signed int) num;
- }
- str = number(str, num, base, field_width, precision, flags);
- }
- *str = '\0';
- return str-buf;
-}
-
-int sprintf(char * buf, const char *fmt, ...)
-{
- va_list args;
- int i;
-
- va_start(args, fmt);
- i=vsprintf(buf,fmt,args);
- va_end(args);
- return i;
-}
-
-static char sprint_buf[1024];
-
-int
-printf(const char *fmt, ...)
-{
- va_list args;
- int n;
-
- va_start(args, fmt);
- n = vsprintf(sprint_buf, fmt, args);
- va_end(args);
- write(stdout, sprint_buf, n);
- return n;
+ ret = call_prom_ret("call-method", 5, 2, &result, "claim", memory,
+ align, size, virt);
+ if (ret != 0 || result == -1)
+ return (void *) -1;
+ ret = call_prom_ret("call-method", 5, 2, &result, "claim", chosen_mmu,
+ align, size, virt);
+ /* 0x12 == coherent + read/write */
+ ret = call_prom("call-method", 6, 1, "map", chosen_mmu,
+ 0x12, size, virt, virt);
+ return (void *) virt;
}
diff --git a/arch/powerpc/boot/prom.h b/arch/powerpc/boot/prom.h
index 96ab5aec740..3e2ddd4a5a8 100644
--- a/arch/powerpc/boot/prom.h
+++ b/arch/powerpc/boot/prom.h
@@ -1,18 +1,34 @@
#ifndef _PPC_BOOT_PROM_H_
#define _PPC_BOOT_PROM_H_
+typedef void *phandle;
+typedef void *ihandle;
+
extern int (*prom) (void *);
-extern void *chosen_handle;
+extern phandle chosen_handle;
+extern ihandle stdout;
-extern void *stdin;
-extern void *stdout;
-extern void *stderr;
+int call_prom(const char *service, int nargs, int nret, ...);
+int call_prom_ret(const char *service, int nargs, int nret,
+ unsigned int *rets, ...);
extern int write(void *handle, void *ptr, int nb);
-extern int read(void *handle, void *ptr, int nb);
-extern void exit(void);
-extern void pause(void);
-extern void *finddevice(const char *);
-extern void *claim(unsigned long virt, unsigned long size, unsigned long align);
-extern int getprop(void *phandle, const char *name, void *buf, int buflen);
+extern void *claim(unsigned long virt, unsigned long size, unsigned long aln);
+
+static inline void exit(void)
+{
+ call_prom("exit", 0, 0);
+}
+
+static inline phandle finddevice(const char *name)
+{
+ return (phandle) call_prom("finddevice", 1, 1, name);
+}
+
+static inline int getprop(void *phandle, const char *name,
+ void *buf, int buflen)
+{
+ return call_prom("getprop", 4, 1, phandle, name, buf, buflen);
+}
+
#endif /* _PPC_BOOT_PROM_H_ */
diff --git a/arch/powerpc/boot/rs6000.h b/arch/powerpc/boot/rs6000.h
new file mode 100644
index 00000000000..433f45084e4
--- /dev/null
+++ b/arch/powerpc/boot/rs6000.h
@@ -0,0 +1,243 @@
+/* IBM RS/6000 "XCOFF" file definitions for BFD.
+ Copyright (C) 1990, 1991 Free Software Foundation, Inc.
+ FIXME: Can someone provide a transliteration of this name into ASCII?
+ Using the following chars caused a compiler warning on HIUX (so I replaced
+ them with octal escapes), and isn't useful without an understanding of what
+ character set it is.
+ Written by Mimi Ph\373\364ng-Th\345o V\365 of IBM
+ and John Gilmore of Cygnus Support. */
+
+/********************** FILE HEADER **********************/
+
+struct external_filehdr {
+ char f_magic[2]; /* magic number */
+ char f_nscns[2]; /* number of sections */
+ char f_timdat[4]; /* time & date stamp */
+ char f_symptr[4]; /* file pointer to symtab */
+ char f_nsyms[4]; /* number of symtab entries */
+ char f_opthdr[2]; /* sizeof(optional hdr) */
+ char f_flags[2]; /* flags */
+};
+
+ /* IBM RS/6000 */
+#define U802WRMAGIC 0730 /* writeable text segments **chh** */
+#define U802ROMAGIC 0735 /* readonly sharable text segments */
+#define U802TOCMAGIC 0737 /* readonly text segments and TOC */
+
+#define BADMAG(x) \
+ ((x).f_magic != U802ROMAGIC && (x).f_magic != U802WRMAGIC && \
+ (x).f_magic != U802TOCMAGIC)
+
+#define FILHDR struct external_filehdr
+#define FILHSZ 20
+
+
+/********************** AOUT "OPTIONAL HEADER" **********************/
+
+
+typedef struct
+{
+ unsigned char magic[2]; /* type of file */
+ unsigned char vstamp[2]; /* version stamp */
+ unsigned char tsize[4]; /* text size in bytes, padded to FW bdry */
+ unsigned char dsize[4]; /* initialized data " " */
+ unsigned char bsize[4]; /* uninitialized data " " */
+ unsigned char entry[4]; /* entry pt. */
+ unsigned char text_start[4]; /* base of text used for this file */
+ unsigned char data_start[4]; /* base of data used for this file */
+ unsigned char o_toc[4]; /* address of TOC */
+ unsigned char o_snentry[2]; /* section number of entry point */
+ unsigned char o_sntext[2]; /* section number of .text section */
+ unsigned char o_sndata[2]; /* section number of .data section */
+ unsigned char o_sntoc[2]; /* section number of TOC */
+ unsigned char o_snloader[2]; /* section number of .loader section */
+ unsigned char o_snbss[2]; /* section number of .bss section */
+ unsigned char o_algntext[2]; /* .text alignment */
+ unsigned char o_algndata[2]; /* .data alignment */
+ unsigned char o_modtype[2]; /* module type (??) */
+ unsigned char o_cputype[2]; /* cpu type */
+ unsigned char o_maxstack[4]; /* max stack size (??) */
+ unsigned char o_maxdata[4]; /* max data size (??) */
+ unsigned char o_resv2[12]; /* reserved */
+}
+AOUTHDR;
+
+#define AOUTSZ 72
+#define SMALL_AOUTSZ (28)
+#define AOUTHDRSZ 72
+
+#define RS6K_AOUTHDR_OMAGIC 0x0107 /* old: text & data writeable */
+#define RS6K_AOUTHDR_NMAGIC 0x0108 /* new: text r/o, data r/w */
+#define RS6K_AOUTHDR_ZMAGIC 0x010B /* paged: text r/o, both page-aligned */
+
+
+/********************** SECTION HEADER **********************/
+
+
+struct external_scnhdr {
+ char s_name[8]; /* section name */
+ char s_paddr[4]; /* physical address, aliased s_nlib */
+ char s_vaddr[4]; /* virtual address */
+ char s_size[4]; /* section size */
+ char s_scnptr[4]; /* file ptr to raw data for section */
+ char s_relptr[4]; /* file ptr to relocation */
+ char s_lnnoptr[4]; /* file ptr to line numbers */
+ char s_nreloc[2]; /* number of relocation entries */
+ char s_nlnno[2]; /* number of line number entries*/
+ char s_flags[4]; /* flags */
+};
+
+/*
+ * names of "special" sections
+ */
+#define _TEXT ".text"
+#define _DATA ".data"
+#define _BSS ".bss"
+#define _PAD ".pad"
+#define _LOADER ".loader"
+
+#define SCNHDR struct external_scnhdr
+#define SCNHSZ 40
+
+/* XCOFF uses a special .loader section with type STYP_LOADER. */
+#define STYP_LOADER 0x1000
+
+/* XCOFF uses a special .debug section with type STYP_DEBUG. */
+#define STYP_DEBUG 0x2000
+
+/* XCOFF handles line number or relocation overflow by creating
+ another section header with STYP_OVRFLO set. */
+#define STYP_OVRFLO 0x8000
+
+/********************** LINE NUMBERS **********************/
+
+/* 1 line number entry for every "breakpointable" source line in a section.
+ * Line numbers are grouped on a per function basis; first entry in a function
+ * grouping will have l_lnno = 0 and in place of physical address will be the
+ * symbol table index of the function name.
+ */
+struct external_lineno {
+ union {
+ char l_symndx[4]; /* function name symbol index, iff l_lnno == 0*/
+ char l_paddr[4]; /* (physical) address of line number */
+ } l_addr;
+ char l_lnno[2]; /* line number */
+};
+
+
+#define LINENO struct external_lineno
+#define LINESZ 6
+
+
+/********************** SYMBOLS **********************/
+
+#define E_SYMNMLEN 8 /* # characters in a symbol name */
+#define E_FILNMLEN 14 /* # characters in a file name */
+#define E_DIMNUM 4 /* # array dimensions in auxiliary entry */
+
+struct external_syment
+{
+ union {
+ char e_name[E_SYMNMLEN];
+ struct {
+ char e_zeroes[4];
+ char e_offset[4];
+ } e;
+ } e;
+ char e_value[4];
+ char e_scnum[2];
+ char e_type[2];
+ char e_sclass[1];
+ char e_numaux[1];
+};
+
+
+
+#define N_BTMASK (017)
+#define N_TMASK (060)
+#define N_BTSHFT (4)
+#define N_TSHIFT (2)
+
+
+union external_auxent {
+ struct {
+ char x_tagndx[4]; /* str, un, or enum tag indx */
+ union {
+ struct {
+ char x_lnno[2]; /* declaration line number */
+ char x_size[2]; /* str/union/array size */
+ } x_lnsz;
+ char x_fsize[4]; /* size of function */
+ } x_misc;
+ union {
+ struct { /* if ISFCN, tag, or .bb */
+ char x_lnnoptr[4]; /* ptr to fcn line # */
+ char x_endndx[4]; /* entry ndx past block end */
+ } x_fcn;
+ struct { /* if ISARY, up to 4 dimen. */
+ char x_dimen[E_DIMNUM][2];
+ } x_ary;
+ } x_fcnary;
+ char x_tvndx[2]; /* tv index */
+ } x_sym;
+
+ union {
+ char x_fname[E_FILNMLEN];
+ struct {
+ char x_zeroes[4];
+ char x_offset[4];
+ } x_n;
+ } x_file;
+
+ struct {
+ char x_scnlen[4]; /* section length */
+ char x_nreloc[2]; /* # relocation entries */
+ char x_nlinno[2]; /* # line numbers */
+ } x_scn;
+
+ struct {
+ char x_tvfill[4]; /* tv fill value */
+ char x_tvlen[2]; /* length of .tv */
+ char x_tvran[2][2]; /* tv range */
+ } x_tv; /* info about .tv section (in auxent of symbol .tv)) */
+
+ struct {
+ unsigned char x_scnlen[4];
+ unsigned char x_parmhash[4];
+ unsigned char x_snhash[2];
+ unsigned char x_smtyp[1];
+ unsigned char x_smclas[1];
+ unsigned char x_stab[4];
+ unsigned char x_snstab[2];
+ } x_csect;
+
+};
+
+#define SYMENT struct external_syment
+#define SYMESZ 18
+#define AUXENT union external_auxent
+#define AUXESZ 18
+#define DBXMASK 0x80 /* for dbx storage mask */
+#define SYMNAME_IN_DEBUG(symptr) ((symptr)->n_sclass & DBXMASK)
+
+
+
+/********************** RELOCATION DIRECTIVES **********************/
+
+
+struct external_reloc {
+ char r_vaddr[4];
+ char r_symndx[4];
+ char r_size[1];
+ char r_type[1];
+};
+
+
+#define RELOC struct external_reloc
+#define RELSZ 10
+
+#define DEFAULT_DATA_SECTION_ALIGNMENT 4
+#define DEFAULT_BSS_SECTION_ALIGNMENT 4
+#define DEFAULT_TEXT_SECTION_ALIGNMENT 4
+/* For new sections we havn't heard of before */
+#define DEFAULT_SECTION_ALIGNMENT 4
diff --git a/arch/powerpc/boot/stdio.c b/arch/powerpc/boot/stdio.c
new file mode 100644
index 00000000000..b5aa522f8b7
--- /dev/null
+++ b/arch/powerpc/boot/stdio.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "string.h"
+#include "stdio.h"
+#include "prom.h"
+
+size_t strnlen(const char * s, size_t count)
+{
+ const char *sc;
+
+ for (sc = s; count-- && *sc != '\0'; ++sc)
+ /* nothing */;
+ return sc - s;
+}
+
+extern unsigned int __div64_32(unsigned long long *dividend,
+ unsigned int divisor);
+
+/* The unnecessary pointer compare is there
+ * to check for type safety (n must be 64bit)
+ */
+# define do_div(n,base) ({ \
+ unsigned int __base = (base); \
+ unsigned int __rem; \
+ (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \
+ if (((n) >> 32) == 0) { \
+ __rem = (unsigned int)(n) % __base; \
+ (n) = (unsigned int)(n) / __base; \
+ } else \
+ __rem = __div64_32(&(n), __base); \
+ __rem; \
+ })
+
+static int skip_atoi(const char **s)
+{
+ int i, c;
+
+ for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s)
+ i = i*10 + c - '0';
+ return i;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SPECIAL 32 /* 0x */
+#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * str, unsigned long long num, int base, int size, int precision, int type)
+{
+ char c,sign,tmp[66];
+ const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
+ int i;
+
+ if (type & LARGE)
+ digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ if (type & LEFT)
+ type &= ~ZEROPAD;
+ if (base < 2 || base > 36)
+ return 0;
+ c = (type & ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & SIGN) {
+ if ((signed long long)num < 0) {
+ sign = '-';
+ num = - (signed long long)num;
+ size--;
+ } else if (type & PLUS) {
+ sign = '+';
+ size--;
+ } else if (type & SPACE) {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & SPECIAL) {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++]='0';
+ else while (num != 0) {
+ tmp[i++] = digits[do_div(num, base)];
+ }
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type&(ZEROPAD+LEFT)))
+ while(size-->0)
+ *str++ = ' ';
+ if (sign)
+ *str++ = sign;
+ if (type & SPECIAL) {
+ if (base==8)
+ *str++ = '0';
+ else if (base==16) {
+ *str++ = '0';
+ *str++ = digits[33];
+ }
+ }
+ if (!(type & LEFT))
+ while (size-- > 0)
+ *str++ = c;
+ while (i < precision--)
+ *str++ = '0';
+ while (i-- > 0)
+ *str++ = tmp[i];
+ while (size-- > 0)
+ *str++ = ' ';
+ return str;
+}
+
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long long num;
+ int i, base;
+ char * str;
+ const char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+ /* 'z' support added 23/7/1999 S.H. */
+ /* 'z' changed to 'Z' --davidm 1/25/99 */
+
+
+ for (str=buf ; *fmt ; ++fmt) {
+ if (*fmt != '%') {
+ *str++ = *fmt;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt) {
+ case '-': flags |= LEFT; goto repeat;
+ case '+': flags |= PLUS; goto repeat;
+ case ' ': flags |= SPACE; goto repeat;
+ case '#': flags |= SPECIAL; goto repeat;
+ case '0': flags |= ZEROPAD; goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if ('0' <= *fmt && *fmt <= '9')
+ field_width = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg(args, int);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if ('0' <= *fmt && *fmt <= '9')
+ precision = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg(args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt) {
+ case 'c':
+ if (!(flags & LEFT))
+ while (--field_width > 0)
+ *str++ = ' ';
+ *str++ = (unsigned char) va_arg(args, int);
+ while (--field_width > 0)
+ *str++ = ' ';
+ continue;
+
+ case 's':
+ s = va_arg(args, char *);
+ if (!s)
+ s = "<NULL>";
+
+ len = strnlen(s, precision);
+
+ if (!(flags & LEFT))
+ while (len < field_width--)
+ *str++ = ' ';
+ for (i = 0; i < len; ++i)
+ *str++ = *s++;
+ while (len < field_width--)
+ *str++ = ' ';
+ continue;
+
+ case 'p':
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ flags |= ZEROPAD;
+ }
+ str = number(str,
+ (unsigned long) va_arg(args, void *), 16,
+ field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ if (qualifier == 'l') {
+ long * ip = va_arg(args, long *);
+ *ip = (str - buf);
+ } else if (qualifier == 'Z') {
+ size_t * ip = va_arg(args, size_t *);
+ *ip = (str - buf);
+ } else {
+ int * ip = va_arg(args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ case '%':
+ *str++ = '%';
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ case 'u':
+ break;
+
+ default:
+ *str++ = '%';
+ if (*fmt)
+ *str++ = *fmt;
+ else
+ --fmt;
+ continue;
+ }
+ if (qualifier == 'l') {
+ num = va_arg(args, unsigned long);
+ if (flags & SIGN)
+ num = (signed long) num;
+ } else if (qualifier == 'Z') {
+ num = va_arg(args, size_t);
+ } else if (qualifier == 'h') {
+ num = (unsigned short) va_arg(args, int);
+ if (flags & SIGN)
+ num = (signed short) num;
+ } else {
+ num = va_arg(args, unsigned int);
+ if (flags & SIGN)
+ num = (signed int) num;
+ }
+ str = number(str, num, base, field_width, precision, flags);
+ }
+ *str = '\0';
+ return str-buf;
+}
+
+int sprintf(char * buf, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i=vsprintf(buf,fmt,args);
+ va_end(args);
+ return i;
+}
+
+static char sprint_buf[1024];
+
+int
+printf(const char *fmt, ...)
+{
+ va_list args;
+ int n;
+
+ va_start(args, fmt);
+ n = vsprintf(sprint_buf, fmt, args);
+ va_end(args);
+ write(stdout, sprint_buf, n);
+ return n;
+}
diff --git a/arch/powerpc/boot/stdio.h b/arch/powerpc/boot/stdio.h
index 24bd3a8dee9..eb9e16c87ae 100644
--- a/arch/powerpc/boot/stdio.h
+++ b/arch/powerpc/boot/stdio.h
@@ -7,10 +7,4 @@ extern int sprintf(char *buf, const char *fmt, ...);
extern int vsprintf(char *buf, const char *fmt, va_list args);
-extern int putc(int c, void *f);
-extern int putchar(int c);
-extern int getchar(void);
-
-extern int fputs(char *str, void *f);
-
#endif /* _PPC_BOOT_STDIO_H_ */
diff --git a/arch/powerpc/boot/string.S b/arch/powerpc/boot/string.S
index b1eeaed7db1..ac3d43b6a32 100644
--- a/arch/powerpc/boot/string.S
+++ b/arch/powerpc/boot/string.S
@@ -107,10 +107,12 @@ memcpy:
rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
addi r6,r3,-4
addi r4,r4,-4
- beq 2f /* if less than 8 bytes to do */
+ beq 3f /* if less than 8 bytes to do */
andi. r0,r6,3 /* get dest word aligned */
mtctr r7
bne 5f
+ andi. r0,r4,3 /* check src word aligned too */
+ bne 3f
1: lwz r7,4(r4)
lwzu r8,8(r4)
stw r7,4(r6)
@@ -132,6 +134,11 @@ memcpy:
bdnz 4b
blr
5: subfic r0,r0,4
+ cmpw cr1,r0,r5
+ add r7,r0,r4
+ andi. r7,r7,3 /* will source be word-aligned too? */
+ ble cr1,3b
+ bne 3b /* do byte-by-byte if not */
mtctr r0
6: lbz r7,4(r4)
addi r4,r4,1
@@ -149,10 +156,12 @@ backwards_memcpy:
rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
add r6,r3,r5
add r4,r4,r5
- beq 2f
+ beq 3f
andi. r0,r6,3
mtctr r7
bne 5f
+ andi. r0,r4,3
+ bne 3f
1: lwz r7,-4(r4)
lwzu r8,-8(r4)
stw r7,-4(r6)
@@ -171,7 +180,12 @@ backwards_memcpy:
stbu r0,-1(r6)
bdnz 4b
blr
-5: mtctr r0
+5: cmpw cr1,r0,r5
+ subf r7,r0,r4
+ andi. r7,r7,3
+ ble cr1,3b
+ bne 3b
+ mtctr r0
6: lbzu r7,-1(r4)
stbu r7,-1(r6)
bdnz 6b
diff --git a/arch/powerpc/boot/zImage.coff.lds b/arch/powerpc/boot/zImage.coff.lds
new file mode 100644
index 00000000000..6016251a1a2
--- /dev/null
+++ b/arch/powerpc/boot/zImage.coff.lds
@@ -0,0 +1,46 @@
+OUTPUT_ARCH(powerpc:common)
+ENTRY(_start)
+SECTIONS
+{
+ . = (5*1024*1024);
+ _start = .;
+ .text :
+ {
+ *(.text)
+ *(.fixup)
+ }
+ _etext = .;
+ . = ALIGN(4096);
+ .data :
+ {
+ *(.rodata*)
+ *(.data*)
+ *(.sdata*)
+ __got2_start = .;
+ *(.got2)
+ __got2_end = .;
+
+ _vmlinux_start = .;
+ *(.kernel:vmlinux.strip)
+ _vmlinux_end = .;
+
+ _initrd_start = .;
+ *(.kernel:initrd)
+ _initrd_end = .;
+ }
+
+ . = ALIGN(4096);
+ _edata = .;
+ __bss_start = .;
+ .bss :
+ {
+ *(.sbss)
+ *(.bss)
+ }
+ _end = . ;
+
+ /DISCARD/ :
+ {
+ *(.comment)
+ }
+}
diff --git a/arch/powerpc/configs/mpc834x_sys_defconfig b/arch/powerpc/configs/mpc834x_sys_defconfig
new file mode 100644
index 00000000000..3bff761965c
--- /dev/null
+++ b/arch/powerpc/configs/mpc834x_sys_defconfig
@@ -0,0 +1,911 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15-g461d4edf-dirty
+# Fri Jan 13 11:01:47 2006
+#
+# CONFIG_PPC64 is not set
+CONFIG_PPC32=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_PPC_UDBG_16550=y
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_DEFAULT_UIMAGE=y
+
+#
+# Processor support
+#
+# CONFIG_CLASSIC32 is not set
+# CONFIG_PPC_52xx is not set
+# CONFIG_PPC_82xx is not set
+CONFIG_PPC_83xx=y
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+# CONFIG_E500 is not set
+CONFIG_6xx=y
+CONFIG_83xx=y
+CONFIG_PPC_FPU=y
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_SMP is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_KMOD is not set
+
+#
+# Block layer
+#
+# CONFIG_LBD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_PPC_GEN550=y
+# CONFIG_WANT_EARLY_SERIAL is not set
+
+#
+# Platform support
+#
+CONFIG_MPC834x_SYS=y
+CONFIG_MPC834x=y
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SOFTWARE_SUSPEND is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_PPC_I8259 is not set
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_FSL_SOC=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_LEGACY_PROC is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+CONFIG_MARVELL_PHY=y
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+CONFIG_NET_PCI=y
+# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+CONFIG_GIANFAR=y
+# CONFIG_GFAR_NAPI is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_83xx_WDT=y
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+# CONFIG_NVRAM is not set
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# SN Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+
+#
+# Instrumentation Support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# SEC2.x Options
+#
+CONFIG_MPC8349E_SEC2x=y
+
+#
+# SEC2.x Test Options
+#
+CONFIG_MPC8349E_SEC2xTEST=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
new file mode 100644
index 00000000000..398203bd98e
--- /dev/null
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -0,0 +1,1729 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15-rc5
+# Tue Dec 13 17:24:05 2005
+#
+# CONFIG_PPC64 is not set
+CONFIG_PPC32=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+
+#
+# Processor support
+#
+CONFIG_6xx=y
+# CONFIG_PPC_52xx is not set
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+# CONFIG_E500 is not set
+CONFIG_PPC_FPU=y
+CONFIG_ALTIVEC=y
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_SMP is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+CONFIG_LBD=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_PPC_ISERIES is not set
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_APUS is not set
+# CONFIG_PPC_CHRP is not set
+CONFIG_PPC_PMAC=y
+CONFIG_PPC_OF=y
+CONFIG_MPIC=y
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_CRASH_DUMP is not set
+CONFIG_PPC_MPC106=y
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_PMAC=y
+CONFIG_PPC601_SYNC_FIX=y
+# CONFIG_TAU is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=m
+# CONFIG_KEXEC is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_PM=y
+# CONFIG_PM_LEGACY is not set
+CONFIG_PM_DEBUG=y
+CONFIG_SOFTWARE_SUSPEND=y
+CONFIG_PM_STD_PARTITION=""
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_PPC_I8259 is not set
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_LEGACY_PROC=y
+# CONFIG_PCI_DEBUG is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+CONFIG_PCCARD=m
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_PCMCIA=m
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_PCMCIA_IOCTL=y
+CONFIG_CARDBUS=y
+
+#
+# PC-card bridges
+#
+CONFIG_YENTA=m
+# CONFIG_PD6729 is not set
+# CONFIG_I82092 is not set
+CONFIG_PCCARD_NONSTATIC=m
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+# CONFIG_IP_NF_CT_ACCT is not set
+# CONFIG_IP_NF_CONNTRACK_MARK is not set
+# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+CONFIG_IP_NF_NETBIOS_NS=m
+CONFIG_IP_NF_TFTP=m
+CONFIG_IP_NF_AMANDA=m
+CONFIG_IP_NF_PPTP=m
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+CONFIG_IP_NF_MATCH_IPRANGE=m
+CONFIG_IP_NF_MATCH_MAC=m
+CONFIG_IP_NF_MATCH_PKTTYPE=m
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+CONFIG_IP_NF_MATCH_RECENT=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_DSCP=m
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+CONFIG_IP_NF_MATCH_HELPER=m
+CONFIG_IP_NF_MATCH_STATE=m
+CONFIG_IP_NF_MATCH_CONNTRACK=m
+CONFIG_IP_NF_MATCH_OWNER=m
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+# CONFIG_IP_NF_MATCH_REALM is not set
+# CONFIG_IP_NF_MATCH_SCTP is not set
+CONFIG_IP_NF_MATCH_DCCP=m
+# CONFIG_IP_NF_MATCH_COMMENT is not set
+# CONFIG_IP_NF_MATCH_HASHLIMIT is not set
+CONFIG_IP_NF_MATCH_STRING=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+# CONFIG_IP_NF_TARGET_LOG is not set
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+# CONFIG_IP_NF_TARGET_NFQUEUE is not set
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_SAME=m
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_AMANDA=m
+CONFIG_IP_NF_NAT_PPTP=m
+# CONFIG_IP_NF_MANGLE is not set
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_TARGET_NOTRACK=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_DCCP=m
+CONFIG_INET_DCCP_DIAG=m
+
+#
+# DCCP CCIDs Configuration (EXPERIMENTAL)
+#
+CONFIG_IP_DCCP_CCID3=m
+CONFIG_IP_DCCP_TFRC_LIB=m
+
+#
+# DCCP Kernel Hacking
+#
+# CONFIG_IP_DCCP_DEBUG is not set
+# CONFIG_IP_DCCP_UNLOAD_HACK is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+CONFIG_IRDA=m
+
+#
+# IrDA protocols
+#
+CONFIG_IRLAN=m
+CONFIG_IRNET=m
+CONFIG_IRCOMM=m
+# CONFIG_IRDA_ULTRA is not set
+
+#
+# IrDA options
+#
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRDA_FAST_RR=y
+# CONFIG_IRDA_DEBUG is not set
+
+#
+# Infrared-port device drivers
+#
+
+#
+# SIR device drivers
+#
+CONFIG_IRTTY_SIR=m
+
+#
+# Dongle support
+#
+# CONFIG_DONGLE is not set
+
+#
+# Old SIR device drivers
+#
+# CONFIG_IRPORT_SIR is not set
+
+#
+# Old Serial dongle support
+#
+
+#
+# FIR device drivers
+#
+# CONFIG_USB_IRDA is not set
+# CONFIG_SIGMATEL_FIR is not set
+# CONFIG_NSC_FIR is not set
+# CONFIG_WINBOND_FIR is not set
+# CONFIG_TOSHIBA_FIR is not set
+# CONFIG_SMC_IRCC_FIR is not set
+# CONFIG_ALI_FIR is not set
+# CONFIG_VLSI_FIR is not set
+# CONFIG_VIA_FIR is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIUSB=m
+# CONFIG_BT_HCIUSB_SCO is not set
+# CONFIG_BT_HCIUART is not set
+CONFIG_BT_HCIBCM203X=m
+# CONFIG_BT_HCIBPA10X is not set
+CONFIG_BT_HCIBFUSB=m
+# CONFIG_BT_HCIDTL1 is not set
+# CONFIG_BT_HCIBT3C is not set
+# CONFIG_BT_HCIBLUECARD is not set
+# CONFIG_BT_HCIBTUART is not set
+# CONFIG_BT_HCIVHCI is not set
+CONFIG_IEEE80211=m
+# CONFIG_IEEE80211_DEBUG is not set
+CONFIG_IEEE80211_CRYPT_WEP=m
+CONFIG_IEEE80211_CRYPT_CCMP=m
+CONFIG_IEEE80211_CRYPT_TKIP=m
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_STANDALONE is not set
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=m
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+CONFIG_MAC_FLOPPY=y
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_UB=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+CONFIG_BLK_DEV_IDECS=m
+CONFIG_BLK_DEV_IDECD=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+CONFIG_BLK_DEV_IDEFLOPPY=y
+CONFIG_BLK_DEV_IDESCSI=y
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+# CONFIG_IDE_GENERIC is not set
+CONFIG_BLK_DEV_IDEPCI=y
+CONFIG_IDEPCI_SHARE_IRQ=y
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+CONFIG_BLK_DEV_SL82C105=y
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+CONFIG_BLK_DEV_PDC202XX_NEW=y
+# CONFIG_PDC202XX_FORCE is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+CONFIG_BLK_DEV_IDE_PMAC=y
+CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
+CONFIG_BLK_DEV_IDEDMA_PMAC=y
+CONFIG_BLK_DEV_IDE_PMAC_BLINK=y
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+CONFIG_SCSI_SPI_ATTRS=y
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+CONFIG_AIC7XXX_DEBUG_ENABLE=y
+CONFIG_AIC7XXX_DEBUG_MASK=0
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
+CONFIG_SCSI_AIC7XXX_OLD=m
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+CONFIG_SCSI_QLA2XXX=y
+# CONFIG_SCSI_QLA21XX is not set
+# CONFIG_SCSI_QLA22XX is not set
+# CONFIG_SCSI_QLA2300 is not set
+# CONFIG_SCSI_QLA2322 is not set
+# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_QLA24XX is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+CONFIG_SCSI_MESH=y
+CONFIG_SCSI_MESH_SYNC_RATE=5
+CONFIG_SCSI_MESH_RESET_DELAY_MS=1000
+CONFIG_SCSI_MAC53C94=y
+
+#
+# PCMCIA SCSI adapter support
+#
+# CONFIG_PCMCIA_AHA152X is not set
+# CONFIG_PCMCIA_FDOMAIN is not set
+# CONFIG_PCMCIA_NINJA_SCSI is not set
+# CONFIG_PCMCIA_QLOGIC is not set
+# CONFIG_PCMCIA_SYM53C500 is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+# CONFIG_MD_RAID10 is not set
+CONFIG_MD_RAID5=m
+CONFIG_MD_RAID6=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+CONFIG_IEEE1394=m
+
+#
+# Subsystem Options
+#
+# CONFIG_IEEE1394_VERBOSEDEBUG is not set
+# CONFIG_IEEE1394_OUI_DB is not set
+CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
+CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
+# CONFIG_IEEE1394_EXPORT_FULL_API is not set
+
+#
+# Device Drivers
+#
+# CONFIG_IEEE1394_PCILYNX is not set
+CONFIG_IEEE1394_OHCI1394=m
+
+#
+# Protocol Drivers
+#
+CONFIG_IEEE1394_VIDEO1394=m
+CONFIG_IEEE1394_SBP2=m
+# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
+CONFIG_IEEE1394_ETH1394=m
+CONFIG_IEEE1394_DV1394=m
+CONFIG_IEEE1394_RAWIO=m
+# CONFIG_IEEE1394_CMP is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+CONFIG_ADB=y
+CONFIG_ADB_CUDA=y
+CONFIG_ADB_PMU=y
+CONFIG_PMAC_APM_EMU=y
+CONFIG_PMAC_MEDIABAY=y
+CONFIG_PMAC_BACKLIGHT=y
+CONFIG_INPUT_ADBHID=y
+CONFIG_MAC_EMUMOUSEBTN=y
+CONFIG_THERM_WINDTUNNEL=m
+CONFIG_THERM_ADT746X=m
+# CONFIG_WINDFARM is not set
+# CONFIG_ANSLCD is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_MACE=y
+# CONFIG_MACE_AAUI_PORT is not set
+CONFIG_BMAC=y
+# CONFIG_HAPPYMEAL is not set
+CONFIG_SUNGEM=y
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+CONFIG_NET_PCI=y
+CONFIG_PCNET32=y
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+# CONFIG_E100 is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_MV643XX_ETH is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+CONFIG_NET_RADIO=y
+
+#
+# Obsolete Wireless cards support (pre-802.11)
+#
+# CONFIG_STRIP is not set
+# CONFIG_PCMCIA_WAVELAN is not set
+# CONFIG_PCMCIA_NETWAVE is not set
+
+#
+# Wireless 802.11 Frequency Hopping cards support
+#
+# CONFIG_PCMCIA_RAYCS is not set
+
+#
+# Wireless 802.11b ISA/PCI cards support
+#
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_AIRO is not set
+CONFIG_HERMES=m
+CONFIG_APPLE_AIRPORT=m
+# CONFIG_PLX_HERMES is not set
+# CONFIG_TMD_HERMES is not set
+# CONFIG_NORTEL_HERMES is not set
+# CONFIG_PCI_HERMES is not set
+# CONFIG_ATMEL is not set
+
+#
+# Wireless 802.11b Pcmcia/Cardbus cards support
+#
+# CONFIG_PCMCIA_HERMES is not set
+# CONFIG_PCMCIA_SPECTRUM is not set
+# CONFIG_AIRO_CS is not set
+# CONFIG_PCMCIA_WL3501 is not set
+
+#
+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+#
+CONFIG_PRISM54=m
+# CONFIG_HOSTAP is not set
+CONFIG_NET_WIRELESS=y
+
+#
+# PCMCIA network device support
+#
+# CONFIG_NET_PCMCIA is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PPP=y
+CONFIG_PPP_MULTILINK=y
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=m
+# CONFIG_PPP_MPPE is not set
+# CONFIG_PPPOE is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO_PCIPS2 is not set
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=m
+# CONFIG_SERIAL_8250_CS is not set
+CONFIG_SERIAL_8250_NR_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=m
+# CONFIG_SERIAL_PMACZILOG is not set
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_NVRAM=y
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+CONFIG_AGP=m
+CONFIG_AGP_UNINORTH=m
+CONFIG_DRM=m
+# CONFIG_DRM_TDFX is not set
+CONFIG_DRM_R128=m
+CONFIG_DRM_RADEON=m
+# CONFIG_DRM_MGA is not set
+# CONFIG_DRM_SIS is not set
+# CONFIG_DRM_VIA is not set
+# CONFIG_DRM_SAVAGE is not set
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=m
+
+#
+# I2C Algorithms
+#
+CONFIG_I2C_ALGOBIT=y
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_KEYWEST=m
+# CONFIG_I2C_MPC is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+CONFIG_FB_MACMODES=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+CONFIG_FB_OF=y
+CONFIG_FB_CONTROL=y
+CONFIG_FB_PLATINUM=y
+CONFIG_FB_VALKYRIE=y
+CONFIG_FB_CT65550=y
+# CONFIG_FB_ASILIANT is not set
+CONFIG_FB_IMSTT=y
+# CONFIG_FB_VGA16 is not set
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_NVIDIA=y
+CONFIG_FB_NVIDIA_I2C=y
+# CONFIG_FB_RIVA is not set
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_MILLENIUM=y
+CONFIG_FB_MATROX_MYSTIQUE=y
+# CONFIG_FB_MATROX_G is not set
+# CONFIG_FB_MATROX_I2C is not set
+# CONFIG_FB_MATROX_MULTIHEAD is not set
+# CONFIG_FB_RADEON_OLD is not set
+CONFIG_FB_RADEON=y
+CONFIG_FB_RADEON_I2C=y
+# CONFIG_FB_RADEON_DEBUG is not set
+CONFIG_FB_ATY128=y
+CONFIG_FB_ATY=y
+CONFIG_FB_ATY_CT=y
+# CONFIG_FB_ATY_GENERIC_LCD is not set
+# CONFIG_FB_ATY_XL_INIT is not set
+CONFIG_FB_ATY_GX=y
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+CONFIG_FB_3DFX=y
+# CONFIG_FB_3DFX_ACCEL is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_CYBLA is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+CONFIG_SOUND=m
+CONFIG_DMASOUND_PMAC=m
+CONFIG_DMASOUND=m
+
+#
+# Advanced Linux Sound Architecture
+#
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_GENERIC_DRIVER=y
+
+#
+# Generic devices
+#
+CONFIG_SND_DUMMY=m
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+
+#
+# PCI devices
+#
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_TRIDENT is not set
+# CONFIG_SND_YMFPCI is not set
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ALS4000 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_HDA_INTEL is not set
+
+#
+# ALSA PowerMac devices
+#
+CONFIG_SND_POWERMAC=m
+# CONFIG_SND_POWERMAC_AUTO_DRC is not set
+
+#
+# USB devices
+#
+CONFIG_SND_USB_AUDIO=m
+# CONFIG_SND_USB_USX2Y is not set
+
+#
+# PCMCIA devices
+#
+
+#
+# Open Sound System
+#
+# CONFIG_SOUND_PRIME is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+CONFIG_USB_DYNAMIC_MINORS=y
+# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_EHCI_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+# CONFIG_USB_STORAGE is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+CONFIG_USB_HIDINPUT=y
+# CONFIG_HID_FF is not set
+# CONFIG_USB_HIDDEV is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_ITMTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+CONFIG_USB_APPLETOUCH=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+
+#
+# Video4Linux support is needed for USB Multimedia device support
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_CDCETHER=m
+# CONFIG_USB_NET_GL620A is not set
+CONFIG_USB_NET_NET1080=m
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+CONFIG_USB_NET_ZAURUS=m
+# CONFIG_USB_ZD1201 is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=m
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRPRIME is not set
+# CONFIG_USB_SERIAL_ANYDATA is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP2101 is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KEYSPAN=m
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+CONFIG_USB_EZUSB=y
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# SN Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_ZISOFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+CONFIG_RELAYFS_FS=m
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+CONFIG_NFSD=y
+# CONFIG_NFSD_V3 is not set
+# CONFIG_NFSD_TCP is not set
+CONFIG_LOCKD=y
+CONFIG_EXPORTFS=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+CONFIG_SMB_FS=m
+# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=m
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+
+#
+# Library routines
+#
+CONFIG_CRC_CCITT=y
+CONFIG_CRC16=y
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_KMP=m
+CONFIG_TEXTSEARCH_BM=m
+CONFIG_TEXTSEARCH_FSM=m
+
+#
+# Instrumentation Support
+#
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUGGER=y
+CONFIG_XMON=y
+CONFIG_XMON_DEFAULT=y
+# CONFIG_BDI_SWITCH is not set
+CONFIG_BOOTX_TEXT=y
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 509399eab6f..0b2b55a79c3 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -861,7 +861,7 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_PMACZILOG is not set
CONFIG_SERIAL_ICOM=m
-CONFIG_SERIAL_JSM=m
+# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
@@ -878,7 +878,7 @@ CONFIG_HVCS=m
#
# CONFIG_WATCHDOG is not set
# CONFIG_RTC is not set
-# CONFIG_GEN_RTC is not set
+CONFIG_GEN_RTC=y
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 9ed551b6c17..a94699d8dc5 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -4,7 +4,6 @@
ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
-CFLAGS_ioctl32.o += -Ifs/
endif
ifeq ($(CONFIG_PPC32),y)
CFLAGS_prom_init.o += -fPIC
@@ -12,16 +11,17 @@ CFLAGS_btext.o += -fPIC
endif
obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
- irq.o align.o signal_32.o pmc.o vdso.o
+ irq.o align.o signal_32.o pmc.o vdso.o \
+ init_task.o process.o
obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
signal_64.o ptrace32.o systbl.o \
- paca.o ioctl32.o cpu_setup_power4.o \
- firmware.o sysfs.o udbg.o idle_64.o
+ paca.o cpu_setup_power4.o \
+ firmware.o sysfs.o idle_64.o
obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
obj-$(CONFIG_POWER4) += idle_power4.o
-obj-$(CONFIG_PPC_OF) += of_device.o
+obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o
procfs-$(CONFIG_PPC64) := proc_ppc64.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64) := rtas_pci.o
@@ -30,12 +30,10 @@ obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
+obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
-obj-$(CONFIG_PPC_PSERIES) += udbg_16550.o
-obj-$(CONFIG_PPC_MAPLE) += udbg_16550.o
-udbgscc-$(CONFIG_PPC64) := udbg_scc.o
-obj-$(CONFIG_PPC_PMAC) += $(udbgscc-y)
obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
ifeq ($(CONFIG_PPC_MERGE),y)
@@ -47,26 +45,25 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-y += vmlinux.lds
-obj-y += process.o init_task.o time.o \
- prom.o traps.o setup-common.o
+obj-y += time.o prom.o traps.o setup-common.o udbg.o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o
obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o
-obj-$(CONFIG_PPC_OF) += prom_init.o
+obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_6xx) += idle_6xx.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
-
+obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
module-$(CONFIG_PPC64) += module_64.o
obj-$(CONFIG_MODULES) += $(module-y)
pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \
pci_direct_iommu.o iomap.o
obj-$(CONFIG_PCI) += $(pci64-y)
-
-kexec64-$(CONFIG_PPC64) += machine_kexec_64.o
-obj-$(CONFIG_KEXEC) += $(kexec64-y)
+kexec-$(CONFIG_PPC64) := machine_kexec_64.o crash.o
+kexec-$(CONFIG_PPC32) := machine_kexec_32.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o $(kexec-y)
ifeq ($(CONFIG_PPC_ISERIES),y)
$(obj)/head_64.o: $(obj)/lparmap.s
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 91538d2445b..840aad43a98 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -92,9 +92,9 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
-#ifdef CONFIG_PPC32
+ DEFINE(TI_SIGFRAME, offsetof(struct thread_info, nvgprs_frame));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
+#ifdef CONFIG_PPC32
DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
#endif /* CONFIG_PPC32 */
@@ -131,13 +131,11 @@ int main(void)
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
#endif /* CONFIG_HUGETLB_PAGE */
- DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
- DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
- DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
+ DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index bdfba92b2b3..6223d39177c 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -31,15 +31,18 @@ static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
-static int g_loc_X;
-static int g_loc_Y;
-static int g_max_loc_X;
-static int g_max_loc_Y;
+#define __force_data __attribute__((__section__(".data")))
-static int dispDeviceRowBytes;
-static int dispDeviceDepth;
-static int dispDeviceRect[4];
-static unsigned char *dispDeviceBase, *logicalDisplayBase;
+static int g_loc_X __force_data;
+static int g_loc_Y __force_data;
+static int g_max_loc_X __force_data;
+static int g_max_loc_Y __force_data;
+
+static int dispDeviceRowBytes __force_data;
+static int dispDeviceDepth __force_data;
+static int dispDeviceRect[4] __force_data;
+static unsigned char *dispDeviceBase __force_data;
+static unsigned char *logicalDisplayBase __force_data;
unsigned long disp_BAT[2] __initdata = {0, 0};
@@ -47,7 +50,7 @@ unsigned long disp_BAT[2] __initdata = {0, 0};
static unsigned char vga_font[cmapsz];
-int boot_text_mapped;
+int boot_text_mapped __force_data = 0;
int force_printk_to_btext = 0;
#ifdef CONFIG_PPC32
@@ -57,7 +60,7 @@ int force_printk_to_btext = 0;
*
* The display is mapped to virtual address 0xD0000000, rather
* than 1:1, because some some CHRP machines put the frame buffer
- * in the region starting at 0xC0000000 (KERNELBASE).
+ * in the region starting at 0xC0000000 (PAGE_OFFSET).
* This mapping is temporary and will disappear as soon as the
* setup done by MMU_Init() is applied.
*
@@ -66,10 +69,9 @@ int force_printk_to_btext = 0;
* is really badly aligned, but I didn't encounter this case
* yet.
*/
-void __init
-btext_prepare_BAT(void)
+void __init btext_prepare_BAT(void)
{
- unsigned long vaddr = KERNELBASE + 0x10000000;
+ unsigned long vaddr = PAGE_OFFSET + 0x10000000;
unsigned long addr;
unsigned long lowbits;
@@ -95,12 +97,13 @@ btext_prepare_BAT(void)
}
#endif
-/* This function will enable the early boot text when doing OF booting. This
- * way, xmon output should work too
+
+/* This function can be used to enable the early boot text when doing
+ * OF booting or within bootx init. It must be followed by a btext_unmap()
+ * call before the logical address becomes unuseable
*/
-void __init
-btext_setup_display(int width, int height, int depth, int pitch,
- unsigned long address)
+void __init btext_setup_display(int width, int height, int depth, int pitch,
+ unsigned long address)
{
g_loc_X = 0;
g_loc_Y = 0;
@@ -116,6 +119,11 @@ btext_setup_display(int width, int height, int depth, int pitch,
boot_text_mapped = 1;
}
+void __init btext_unmap(void)
+{
+ boot_text_mapped = 0;
+}
+
/* Here's a small text engine to use during early boot
* or for debugging purposes
*
@@ -127,7 +135,7 @@ btext_setup_display(int width, int height, int depth, int pitch,
* changes.
*/
-void map_boot_text(void)
+static void map_boot_text(void)
{
unsigned long base, offset, size;
unsigned char *vbase;
@@ -175,8 +183,9 @@ int btext_initialize(struct device_node *np)
if (prop)
address = *prop;
- /* FIXME: Add support for PCI reg properties */
-
+ /* FIXME: Add support for PCI reg properties. Right now, only
+ * reliable on macs
+ */
if (address == 0)
return -EINVAL;
@@ -184,7 +193,6 @@ int btext_initialize(struct device_node *np)
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
- logicalDisplayBase = (unsigned char *)address;
dispDeviceBase = (unsigned char *)address;
dispDeviceRowBytes = pitch;
dispDeviceDepth = depth;
@@ -197,14 +205,12 @@ int btext_initialize(struct device_node *np)
return 0;
}
-void __init init_boot_display(void)
+int __init btext_find_display(int allow_nonstdout)
{
char *name;
struct device_node *np = NULL;
int rc = -ENODEV;
- printk("trying to initialize btext ...\n");
-
name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
if (name != NULL) {
np = of_find_node_by_path(name);
@@ -218,8 +224,8 @@ void __init init_boot_display(void)
}
if (np)
rc = btext_initialize(np);
- if (rc == 0)
- return;
+ if (rc == 0 || !allow_nonstdout)
+ return rc;
for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
if (get_property(np, "linux,opened", NULL)) {
@@ -228,8 +234,9 @@ void __init init_boot_display(void)
printk("result: %d\n", rc);
}
if (rc == 0)
- return;
+ break;
}
+ return rc;
}
/* Calc the base address of a given point (x,y) */
@@ -277,44 +284,83 @@ EXPORT_SYMBOL(btext_update_display);
void btext_clearscreen(void)
{
- unsigned long *base = (unsigned long *)calc_base(0, 0);
+ unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
- (dispDeviceDepth >> 3)) >> 3;
+ (dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
{
- unsigned long *ptr = base;
+ unsigned int *ptr = base;
for(j=width; j; --j)
*(ptr++) = 0;
- base += (dispDeviceRowBytes >> 3);
+ base += (dispDeviceRowBytes >> 2);
}
}
+void btext_flushscreen(void)
+{
+ unsigned int *base = (unsigned int *)calc_base(0, 0);
+ unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
+ (dispDeviceDepth >> 3)) >> 2;
+ int i,j;
+
+ for (i=0; i < (dispDeviceRect[3] - dispDeviceRect[1]); i++)
+ {
+ unsigned int *ptr = base;
+ for(j = width; j > 0; j -= 8) {
+ __asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr));
+ ptr += 8;
+ }
+ base += (dispDeviceRowBytes >> 2);
+ }
+ __asm__ __volatile__ ("sync" ::: "memory");
+}
+
+void btext_flushline(void)
+{
+ unsigned int *base = (unsigned int *)calc_base(0, g_loc_Y << 4);
+ unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
+ (dispDeviceDepth >> 3)) >> 2;
+ int i,j;
+
+ for (i=0; i < 16; i++)
+ {
+ unsigned int *ptr = base;
+ for(j = width; j > 0; j -= 8) {
+ __asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr));
+ ptr += 8;
+ }
+ base += (dispDeviceRowBytes >> 2);
+ }
+ __asm__ __volatile__ ("sync" ::: "memory");
+}
+
+
#ifndef NO_SCROLL
static void scrollscreen(void)
{
- unsigned long *src = (unsigned long *)calc_base(0,16);
- unsigned long *dst = (unsigned long *)calc_base(0,0);
+ unsigned int *src = (unsigned int *)calc_base(0,16);
+ unsigned int *dst = (unsigned int *)calc_base(0,0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
- (dispDeviceDepth >> 3)) >> 3;
+ (dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
{
- unsigned long *src_ptr = src;
- unsigned long *dst_ptr = dst;
+ unsigned int *src_ptr = src;
+ unsigned int *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = *(src_ptr++);
- src += (dispDeviceRowBytes >> 3);
- dst += (dispDeviceRowBytes >> 3);
+ src += (dispDeviceRowBytes >> 2);
+ dst += (dispDeviceRowBytes >> 2);
}
for (i=0; i<16; i++)
{
- unsigned long *dst_ptr = dst;
+ unsigned int *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = 0;
- dst += (dispDeviceRowBytes >> 3);
+ dst += (dispDeviceRowBytes >> 2);
}
}
#endif /* ndef NO_SCROLL */
@@ -377,6 +423,14 @@ void btext_drawstring(const char *c)
btext_drawchar(*c++);
}
+void btext_drawtext(const char *c, unsigned int len)
+{
+ if (!boot_text_mapped)
+ return;
+ while (len--)
+ btext_drawchar(*c++);
+}
+
void btext_drawhex(unsigned long v)
{
char *hex_table = "0123456789abcdef";
diff --git a/arch/powerpc/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_power4.S
index cca942fe611..b61d86e7ceb 100644
--- a/arch/powerpc/kernel/cpu_setup_power4.S
+++ b/arch/powerpc/kernel/cpu_setup_power4.S
@@ -130,7 +130,7 @@ _GLOBAL(__save_cpu_setup)
mfcr r7
/* Get storage ptr */
- LOADADDR(r5,cpu_state_storage)
+ LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
/* We only deal with 970 for now */
mfspr r0,SPRN_PVR
@@ -164,7 +164,7 @@ _GLOBAL(__restore_cpu_setup)
/* Get storage ptr (FIXME when using anton reloc as we
* are running with translation disabled here
*/
- LOADADDR(r5,cpu_state_storage)
+ LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
/* We only deal with 970 for now */
mfspr r0,SPRN_PVR
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 1d85cedbbb7..10696456a4c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -55,7 +55,8 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
#define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4)
#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5)
#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS)
-
+#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
+ PPC_FEATURE_BOOKE)
/* We only set the spe features if the kernel was compiled with
* spe support
@@ -78,10 +79,9 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 8,
.cpu_setup = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/power3",
- .oprofile_model = &op_model_rs64,
-#endif
+ .oprofile_type = PPC_OPROFILE_RS64,
+ .platform = "power3",
},
{ /* Power3+ */
.pvr_mask = 0xffff0000,
@@ -93,10 +93,9 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 8,
.cpu_setup = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/power3",
- .oprofile_model = &op_model_rs64,
-#endif
+ .oprofile_type = PPC_OPROFILE_RS64,
+ .platform = "power3",
},
{ /* Northstar */
.pvr_mask = 0xffff0000,
@@ -108,10 +107,9 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 8,
.cpu_setup = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/rs64",
- .oprofile_model = &op_model_rs64,
-#endif
+ .oprofile_type = PPC_OPROFILE_RS64,
+ .platform = "rs64",
},
{ /* Pulsar */
.pvr_mask = 0xffff0000,
@@ -123,10 +121,9 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 8,
.cpu_setup = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/rs64",
- .oprofile_model = &op_model_rs64,
-#endif
+ .oprofile_type = PPC_OPROFILE_RS64,
+ .platform = "rs64",
},
{ /* I-star */
.pvr_mask = 0xffff0000,
@@ -138,10 +135,9 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 8,
.cpu_setup = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/rs64",
- .oprofile_model = &op_model_rs64,
-#endif
+ .oprofile_type = PPC_OPROFILE_RS64,
+ .platform = "rs64",
},
{ /* S-star */
.pvr_mask = 0xffff0000,
@@ -153,10 +149,9 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 8,
.cpu_setup = __setup_cpu_power3,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/rs64",
- .oprofile_model = &op_model_rs64,
-#endif
+ .oprofile_type = PPC_OPROFILE_RS64,
+ .platform = "rs64",
},
{ /* Power4 */
.pvr_mask = 0xffff0000,
@@ -168,10 +163,9 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 8,
.cpu_setup = __setup_cpu_power4,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/power4",
- .oprofile_model = &op_model_rs64,
-#endif
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .platform = "power4",
},
{ /* Power4+ */
.pvr_mask = 0xffff0000,
@@ -183,10 +177,9 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 8,
.cpu_setup = __setup_cpu_power4,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/power4",
- .oprofile_model = &op_model_power4,
-#endif
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .platform = "power4",
},
{ /* PPC970 */
.pvr_mask = 0xffff0000,
@@ -199,10 +192,9 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 8,
.cpu_setup = __setup_cpu_ppc970,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/970",
- .oprofile_model = &op_model_power4,
-#endif
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .platform = "ppc970",
},
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_PPC64) || defined(CONFIG_POWER4)
@@ -221,10 +213,9 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 8,
.cpu_setup = __setup_cpu_ppc970,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/970",
- .oprofile_model = &op_model_power4,
-#endif
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .platform = "ppc970",
},
#endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */
#ifdef CONFIG_PPC64
@@ -238,10 +229,9 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.cpu_setup = __setup_cpu_ppc970,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/970",
- .oprofile_model = &op_model_power4,
-#endif
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .platform = "ppc970",
},
{ /* Power5 GR */
.pvr_mask = 0xffff0000,
@@ -253,27 +243,25 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.cpu_setup = __setup_cpu_power4,
-#ifdef CONFIG_OPROFILE
.oprofile_cpu_type = "ppc64/power5",
- .oprofile_model = &op_model_power4,
-#endif
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .platform = "power5",
},
{ /* Power5 GS */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003b0000,
- .cpu_name = "POWER5 (gs)",
+ .cpu_name = "POWER5+ (gs)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.cpu_setup = __setup_cpu_power4,
-#ifdef CONFIG_OPROFILE
- .oprofile_cpu_type = "ppc64/power5",
- .oprofile_model = &op_model_power4,
-#endif
+ .oprofile_cpu_type = "ppc64/power5+",
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .platform = "power5+",
},
- { /* BE DD1.x */
+ { /* Cell Broadband Engine */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00700000,
.cpu_name = "Cell Broadband Engine",
@@ -283,6 +271,7 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.cpu_setup = __setup_cpu_be,
+ .platform = "ppc-cell-be",
},
{ /* default match */
.pvr_mask = 0x00000000,
@@ -294,6 +283,7 @@ struct cpu_spec cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.cpu_setup = __setup_cpu_power4,
+ .platform = "power4",
}
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
@@ -307,6 +297,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc601",
},
{ /* 603 */
.pvr_mask = 0xffff0000,
@@ -316,7 +307,8 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = COMMON_USER,
.icache_bsize = 32,
.dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
+ .cpu_setup = __setup_cpu_603,
+ .platform = "ppc603",
},
{ /* 603e */
.pvr_mask = 0xffff0000,
@@ -326,7 +318,8 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = COMMON_USER,
.icache_bsize = 32,
.dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
+ .cpu_setup = __setup_cpu_603,
+ .platform = "ppc603",
},
{ /* 603ev */
.pvr_mask = 0xffff0000,
@@ -336,7 +329,8 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = COMMON_USER,
.icache_bsize = 32,
.dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
+ .cpu_setup = __setup_cpu_603,
+ .platform = "ppc603",
},
{ /* 604 */
.pvr_mask = 0xffff0000,
@@ -347,7 +341,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 2,
- .cpu_setup = __setup_cpu_604
+ .cpu_setup = __setup_cpu_604,
+ .platform = "ppc604",
},
{ /* 604e */
.pvr_mask = 0xfffff000,
@@ -358,7 +353,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_604
+ .cpu_setup = __setup_cpu_604,
+ .platform = "ppc604",
},
{ /* 604r */
.pvr_mask = 0xffff0000,
@@ -369,7 +365,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_604
+ .cpu_setup = __setup_cpu_604,
+ .platform = "ppc604",
},
{ /* 604ev */
.pvr_mask = 0xffff0000,
@@ -380,7 +377,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_604
+ .cpu_setup = __setup_cpu_604,
+ .platform = "ppc604",
},
{ /* 740/750 (0x4202, don't support TAU ?) */
.pvr_mask = 0xffffffff,
@@ -391,7 +389,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_750
+ .cpu_setup = __setup_cpu_750,
+ .platform = "ppc750",
},
{ /* 750CX (80100 and 8010x?) */
.pvr_mask = 0xfffffff0,
@@ -402,7 +401,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_750cx
+ .cpu_setup = __setup_cpu_750cx,
+ .platform = "ppc750",
},
{ /* 750CX (82201 and 82202) */
.pvr_mask = 0xfffffff0,
@@ -413,7 +413,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_750cx
+ .cpu_setup = __setup_cpu_750cx,
+ .platform = "ppc750",
},
{ /* 750CXe (82214) */
.pvr_mask = 0xfffffff0,
@@ -424,7 +425,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_750cx
+ .cpu_setup = __setup_cpu_750cx,
+ .platform = "ppc750",
},
{ /* 750CXe "Gekko" (83214) */
.pvr_mask = 0xffffffff,
@@ -435,7 +437,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_750cx
+ .cpu_setup = __setup_cpu_750cx,
+ .platform = "ppc750",
},
{ /* 745/755 */
.pvr_mask = 0xfffff000,
@@ -446,7 +449,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_750
+ .cpu_setup = __setup_cpu_750,
+ .platform = "ppc750",
},
{ /* 750FX rev 1.x */
.pvr_mask = 0xffffff00,
@@ -457,7 +461,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_750
+ .cpu_setup = __setup_cpu_750,
+ .platform = "ppc750",
},
{ /* 750FX rev 2.0 must disable HID0[DPM] */
.pvr_mask = 0xffffffff,
@@ -468,7 +473,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_750
+ .cpu_setup = __setup_cpu_750,
+ .platform = "ppc750",
},
{ /* 750FX (All revs except 2.0) */
.pvr_mask = 0xffff0000,
@@ -479,7 +485,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_750fx
+ .cpu_setup = __setup_cpu_750fx,
+ .platform = "ppc750",
},
{ /* 750GX */
.pvr_mask = 0xffff0000,
@@ -490,7 +497,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_750fx
+ .cpu_setup = __setup_cpu_750fx,
+ .platform = "ppc750",
},
{ /* 740/750 (L2CR bit need fixup for 740) */
.pvr_mask = 0xffff0000,
@@ -501,7 +509,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_750
+ .cpu_setup = __setup_cpu_750,
+ .platform = "ppc750",
},
{ /* 7400 rev 1.1 ? (no TAU) */
.pvr_mask = 0xffffffff,
@@ -512,7 +521,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_7400
+ .cpu_setup = __setup_cpu_7400,
+ .platform = "ppc7400",
},
{ /* 7400 */
.pvr_mask = 0xffff0000,
@@ -523,7 +533,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_7400
+ .cpu_setup = __setup_cpu_7400,
+ .platform = "ppc7400",
},
{ /* 7410 */
.pvr_mask = 0xffff0000,
@@ -534,7 +545,8 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
- .cpu_setup = __setup_cpu_7410
+ .cpu_setup = __setup_cpu_7410,
+ .platform = "ppc7400",
},
{ /* 7450 2.0 - no doze/nap */
.pvr_mask = 0xffffffff,
@@ -545,7 +557,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
+ .cpu_setup = __setup_cpu_745x,
+ .oprofile_cpu_type = "ppc/7450",
+ .oprofile_type = PPC_OPROFILE_G4,
+ .platform = "ppc7450",
},
{ /* 7450 2.1 */
.pvr_mask = 0xffffffff,
@@ -556,7 +571,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
+ .cpu_setup = __setup_cpu_745x,
+ .oprofile_cpu_type = "ppc/7450",
+ .oprofile_type = PPC_OPROFILE_G4,
+ .platform = "ppc7450",
},
{ /* 7450 2.3 and newer */
.pvr_mask = 0xffff0000,
@@ -567,7 +585,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
+ .cpu_setup = __setup_cpu_745x,
+ .oprofile_cpu_type = "ppc/7450",
+ .oprofile_type = PPC_OPROFILE_G4,
+ .platform = "ppc7450",
},
{ /* 7455 rev 1.x */
.pvr_mask = 0xffffff00,
@@ -578,7 +599,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
+ .cpu_setup = __setup_cpu_745x,
+ .oprofile_cpu_type = "ppc/7450",
+ .oprofile_type = PPC_OPROFILE_G4,
+ .platform = "ppc7450",
},
{ /* 7455 rev 2.0 */
.pvr_mask = 0xffffffff,
@@ -589,7 +613,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
+ .cpu_setup = __setup_cpu_745x,
+ .oprofile_cpu_type = "ppc/7450",
+ .oprofile_type = PPC_OPROFILE_G4,
+ .platform = "ppc7450",
},
{ /* 7455 others */
.pvr_mask = 0xffff0000,
@@ -600,7 +627,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
+ .cpu_setup = __setup_cpu_745x,
+ .oprofile_cpu_type = "ppc/7450",
+ .oprofile_type = PPC_OPROFILE_G4,
+ .platform = "ppc7450",
},
{ /* 7447/7457 Rev 1.0 */
.pvr_mask = 0xffffffff,
@@ -611,7 +641,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
+ .cpu_setup = __setup_cpu_745x,
+ .oprofile_cpu_type = "ppc/7450",
+ .oprofile_type = PPC_OPROFILE_G4,
+ .platform = "ppc7450",
},
{ /* 7447/7457 Rev 1.1 */
.pvr_mask = 0xffffffff,
@@ -622,7 +655,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
+ .cpu_setup = __setup_cpu_745x,
+ .oprofile_cpu_type = "ppc/7450",
+ .oprofile_type = PPC_OPROFILE_G4,
+ .platform = "ppc7450",
},
{ /* 7447/7457 Rev 1.2 and later */
.pvr_mask = 0xffff0000,
@@ -633,7 +669,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
+ .cpu_setup = __setup_cpu_745x,
+ .oprofile_cpu_type = "ppc/7450",
+ .oprofile_type = PPC_OPROFILE_G4,
+ .platform = "ppc7450",
},
{ /* 7447A */
.pvr_mask = 0xffff0000,
@@ -644,7 +683,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
+ .cpu_setup = __setup_cpu_745x,
+ .oprofile_cpu_type = "ppc/7450",
+ .oprofile_type = PPC_OPROFILE_G4,
+ .platform = "ppc7450",
},
{ /* 7448 */
.pvr_mask = 0xffff0000,
@@ -655,7 +697,10 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
- .cpu_setup = __setup_cpu_745x
+ .cpu_setup = __setup_cpu_745x,
+ .oprofile_cpu_type = "ppc/7450",
+ .oprofile_type = PPC_OPROFILE_G4,
+ .platform = "ppc7450",
},
{ /* 82xx (8240, 8245, 8260 are all 603e cores) */
.pvr_mask = 0x7fff0000,
@@ -665,7 +710,8 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = COMMON_USER,
.icache_bsize = 32,
.dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
+ .cpu_setup = __setup_cpu_603,
+ .platform = "ppc603",
},
{ /* All G2_LE (603e core, plus some) have the same pvr */
.pvr_mask = 0x7fff0000,
@@ -675,7 +721,8 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = COMMON_USER,
.icache_bsize = 32,
.dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
+ .cpu_setup = __setup_cpu_603,
+ .platform = "ppc603",
},
{ /* e300 (a 603e core, plus some) on 83xx */
.pvr_mask = 0x7fff0000,
@@ -685,7 +732,8 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = COMMON_USER,
.icache_bsize = 32,
.dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603
+ .cpu_setup = __setup_cpu_603,
+ .platform = "ppc603",
},
{ /* default match, we assume split I/D cache & TB (non-601)... */
.pvr_mask = 0x00000000,
@@ -695,6 +743,7 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = COMMON_USER,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc603",
},
#endif /* CLASSIC_PPC */
#ifdef CONFIG_8xx
@@ -708,6 +757,7 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
.icache_bsize = 16,
.dcache_bsize = 16,
+ .platform = "ppc823",
},
#endif /* CONFIG_8xx */
#ifdef CONFIG_40x
@@ -719,6 +769,7 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
.icache_bsize = 16,
.dcache_bsize = 16,
+ .platform = "ppc403",
},
{ /* 403GCX */
.pvr_mask = 0xffffff00,
@@ -729,6 +780,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
.icache_bsize = 16,
.dcache_bsize = 16,
+ .platform = "ppc403",
},
{ /* 403G ?? */
.pvr_mask = 0xffff0000,
@@ -738,6 +790,7 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
.icache_bsize = 16,
.dcache_bsize = 16,
+ .platform = "ppc403",
},
{ /* 405GP */
.pvr_mask = 0xffff0000,
@@ -748,6 +801,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc405",
},
{ /* STB 03xxx */
.pvr_mask = 0xffff0000,
@@ -758,6 +812,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc405",
},
{ /* STB 04xxx */
.pvr_mask = 0xffff0000,
@@ -768,6 +823,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc405",
},
{ /* NP405L */
.pvr_mask = 0xffff0000,
@@ -778,6 +834,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc405",
},
{ /* NP4GS3 */
.pvr_mask = 0xffff0000,
@@ -788,6 +845,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc405",
},
{ /* NP405H */
.pvr_mask = 0xffff0000,
@@ -798,6 +856,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc405",
},
{ /* 405GPr */
.pvr_mask = 0xffff0000,
@@ -808,6 +867,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc405",
},
{ /* STBx25xx */
.pvr_mask = 0xffff0000,
@@ -818,6 +878,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc405",
},
{ /* 405LP */
.pvr_mask = 0xffff0000,
@@ -827,6 +888,7 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc405",
},
{ /* Xilinx Virtex-II Pro */
.pvr_mask = 0xffff0000,
@@ -837,6 +899,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc405",
},
{ /* 405EP */
.pvr_mask = 0xffff0000,
@@ -847,6 +910,7 @@ struct cpu_spec cpu_specs[] = {
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc405",
},
#endif /* CONFIG_40x */
@@ -856,81 +920,90 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x40000850,
.cpu_name = "440EP Rev. A",
.cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER, /* 440EP has an FPU */
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc440",
},
{
.pvr_mask = 0xf0000fff,
.pvr_value = 0x400008d3,
.cpu_name = "440EP Rev. B",
.cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER, /* 440EP has an FPU */
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc440",
},
{ /* 440GP Rev. B */
.pvr_mask = 0xf0000fff,
.pvr_value = 0x40000440,
.cpu_name = "440GP Rev. B",
.cpu_features = CPU_FTRS_44X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc440gp",
},
{ /* 440GP Rev. C */
.pvr_mask = 0xf0000fff,
.pvr_value = 0x40000481,
.cpu_name = "440GP Rev. C",
.cpu_features = CPU_FTRS_44X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc440gp",
},
{ /* 440GX Rev. A */
.pvr_mask = 0xf0000fff,
.pvr_value = 0x50000850,
.cpu_name = "440GX Rev. A",
.cpu_features = CPU_FTRS_44X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc440",
},
{ /* 440GX Rev. B */
.pvr_mask = 0xf0000fff,
.pvr_value = 0x50000851,
.cpu_name = "440GX Rev. B",
.cpu_features = CPU_FTRS_44X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc440",
},
{ /* 440GX Rev. C */
.pvr_mask = 0xf0000fff,
.pvr_value = 0x50000892,
.cpu_name = "440GX Rev. C",
.cpu_features = CPU_FTRS_44X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc440",
},
{ /* 440GX Rev. F */
.pvr_mask = 0xf0000fff,
.pvr_value = 0x50000894,
.cpu_name = "440GX Rev. F",
.cpu_features = CPU_FTRS_44X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc440",
},
{ /* 440SP Rev. A */
.pvr_mask = 0xff000fff,
.pvr_value = 0x53000891,
.cpu_name = "440SP Rev. A",
.cpu_features = CPU_FTRS_44X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc440",
},
{ /* 440SPe Rev. A */
.pvr_mask = 0xff000fff,
@@ -938,9 +1011,10 @@ struct cpu_spec cpu_specs[] = {
.cpu_name = "440SPe Rev. A",
.cpu_features = CPU_FTR_SPLIT_ID_CACHE |
CPU_FTR_USE_TB,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "ppc440",
},
#endif /* CONFIG_44x */
#ifdef CONFIG_FSL_BOOKE
@@ -950,10 +1024,11 @@ struct cpu_spec cpu_specs[] = {
.cpu_name = "e200z5",
/* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
.cpu_features = CPU_FTRS_E200,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE |
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_HAS_EFP_SINGLE |
PPC_FEATURE_UNIFIED_CACHE,
.dcache_bsize = 32,
+ .platform = "ppc5554",
},
{ /* e200z6 */
.pvr_mask = 0xfff00000,
@@ -961,11 +1036,12 @@ struct cpu_spec cpu_specs[] = {
.cpu_name = "e200z6",
/* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
.cpu_features = CPU_FTRS_E200,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE |
PPC_FEATURE_UNIFIED_CACHE,
.dcache_bsize = 32,
+ .platform = "ppc5554",
},
{ /* e500 */
.pvr_mask = 0xffff0000,
@@ -973,12 +1049,15 @@ struct cpu_spec cpu_specs[] = {
.cpu_name = "e500",
/* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
.cpu_features = CPU_FTRS_E500,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
+ .oprofile_cpu_type = "ppc/e500",
+ .oprofile_type = PPC_OPROFILE_BOOKE,
+ .platform = "ppc8540",
},
{ /* e500v2 */
.pvr_mask = 0xffff0000,
@@ -986,12 +1065,16 @@ struct cpu_spec cpu_specs[] = {
.cpu_name = "e500v2",
/* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
.cpu_features = CPU_FTRS_E500_2,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
- PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE,
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_SPE_COMP |
+ PPC_FEATURE_HAS_EFP_SINGLE |
+ PPC_FEATURE_HAS_EFP_DOUBLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
+ .oprofile_cpu_type = "ppc/e500",
+ .oprofile_type = PPC_OPROFILE_BOOKE,
+ .platform = "ppc8548",
},
#endif
#if !CLASSIC_PPC
@@ -1003,6 +1086,7 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = PPC_FEATURE_32,
.icache_bsize = 32,
.dcache_bsize = 32,
+ .platform = "powerpc",
}
#endif /* !CLASSIC_PPC */
#endif /* CONFIG_PPC32 */
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
new file mode 100644
index 00000000000..5f248e3fdf8
--- /dev/null
+++ b/arch/powerpc/kernel/crash.c
@@ -0,0 +1,264 @@
+/*
+ * Architecture specific (PPC64) functions for kexec based crash dumps.
+ *
+ * Copyright (C) 2005, IBM Corp.
+ *
+ * Created by: Haren Myneni
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/bootmem.h>
+#include <linux/crash_dump.h>
+#include <linux/delay.h>
+#include <linux/elf.h>
+#include <linux/elfcore.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <asm/processor.h>
+#include <asm/machdep.h>
+#include <asm/kdump.h>
+#include <asm/lmb.h>
+#include <asm/firmware.h>
+#include <asm/smp.h>
+
+#ifdef DEBUG
+#include <asm/udbg.h>
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+/* This keeps a track of which one is crashing cpu. */
+int crashing_cpu = -1;
+
+static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
+ size_t data_len)
+{
+ struct elf_note note;
+
+ note.n_namesz = strlen(name) + 1;
+ note.n_descsz = data_len;
+ note.n_type = type;
+ memcpy(buf, &note, sizeof(note));
+ buf += (sizeof(note) +3)/4;
+ memcpy(buf, name, note.n_namesz);
+ buf += (note.n_namesz + 3)/4;
+ memcpy(buf, data, note.n_descsz);
+ buf += (note.n_descsz + 3)/4;
+
+ return buf;
+}
+
+static void final_note(u32 *buf)
+{
+ struct elf_note note;
+
+ note.n_namesz = 0;
+ note.n_descsz = 0;
+ note.n_type = 0;
+ memcpy(buf, &note, sizeof(note));
+}
+
+static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
+{
+ struct elf_prstatus prstatus;
+ u32 *buf;
+
+ if ((cpu < 0) || (cpu >= NR_CPUS))
+ return;
+
+ /* Using ELF notes here is opportunistic.
+ * I need a well defined structure format
+ * for the data I pass, and I need tags
+ * on the data to indicate what information I have
+ * squirrelled away. ELF notes happen to provide
+ * all of that that no need to invent something new.
+ */
+ buf = &crash_notes[cpu][0];
+ memset(&prstatus, 0, sizeof(prstatus));
+ prstatus.pr_pid = current->pid;
+ elf_core_copy_regs(&prstatus.pr_reg, regs);
+ buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
+ sizeof(prstatus));
+ final_note(buf);
+}
+
+/* FIXME Merge this with xmon_save_regs ?? */
+static inline void crash_get_current_regs(struct pt_regs *regs)
+{
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__ (
+ "std 0,0(%2)\n"
+ "std 1,8(%2)\n"
+ "std 2,16(%2)\n"
+ "std 3,24(%2)\n"
+ "std 4,32(%2)\n"
+ "std 5,40(%2)\n"
+ "std 6,48(%2)\n"
+ "std 7,56(%2)\n"
+ "std 8,64(%2)\n"
+ "std 9,72(%2)\n"
+ "std 10,80(%2)\n"
+ "std 11,88(%2)\n"
+ "std 12,96(%2)\n"
+ "std 13,104(%2)\n"
+ "std 14,112(%2)\n"
+ "std 15,120(%2)\n"
+ "std 16,128(%2)\n"
+ "std 17,136(%2)\n"
+ "std 18,144(%2)\n"
+ "std 19,152(%2)\n"
+ "std 20,160(%2)\n"
+ "std 21,168(%2)\n"
+ "std 22,176(%2)\n"
+ "std 23,184(%2)\n"
+ "std 24,192(%2)\n"
+ "std 25,200(%2)\n"
+ "std 26,208(%2)\n"
+ "std 27,216(%2)\n"
+ "std 28,224(%2)\n"
+ "std 29,232(%2)\n"
+ "std 30,240(%2)\n"
+ "std 31,248(%2)\n"
+ "mfmsr %0\n"
+ "std %0, 264(%2)\n"
+ "mfctr %0\n"
+ "std %0, 280(%2)\n"
+ "mflr %0\n"
+ "std %0, 288(%2)\n"
+ "bl 1f\n"
+ "1: mflr %1\n"
+ "std %1, 256(%2)\n"
+ "mtlr %0\n"
+ "mfxer %0\n"
+ "std %0, 296(%2)\n"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "b" (regs));
+}
+
+/* We may have saved_regs from where the error came from
+ * or it is NULL if via a direct panic().
+ */
+static void crash_save_self(struct pt_regs *saved_regs)
+{
+ struct pt_regs regs;
+ int cpu;
+
+ cpu = smp_processor_id();
+ if (saved_regs)
+ memcpy(&regs, saved_regs, sizeof(regs));
+ else
+ crash_get_current_regs(&regs);
+ crash_save_this_cpu(&regs, cpu);
+}
+
+#ifdef CONFIG_SMP
+static atomic_t waiting_for_crash_ipi;
+
+void crash_ipi_callback(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ if (cpu == crashing_cpu)
+ return;
+
+ if (!cpu_online(cpu))
+ return;
+
+ if (ppc_md.kexec_cpu_down)
+ ppc_md.kexec_cpu_down(1, 1);
+
+ local_irq_disable();
+
+ crash_save_this_cpu(regs, cpu);
+ atomic_dec(&waiting_for_crash_ipi);
+ kexec_smp_wait();
+ /* NOTREACHED */
+}
+
+static void crash_kexec_prepare_cpus(void)
+{
+ unsigned int msecs;
+
+ atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+
+ crash_send_ipi(crash_ipi_callback);
+ smp_wmb();
+
+ /*
+ * FIXME: Until we will have the way to stop other CPUSs reliabally,
+ * the crash CPU will send an IPI and wait for other CPUs to
+ * respond. If not, proceed the kexec boot even though we failed to
+ * capture other CPU states.
+ */
+ msecs = 1000000;
+ while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) {
+ barrier();
+ mdelay(1);
+ }
+
+ /* Would it be better to replace the trap vector here? */
+
+ /*
+ * FIXME: In case if we do not get all CPUs, one possibility: ask the
+ * user to do soft reset such that we get all.
+ * IPI handler is already set by the panic cpu initially. Therefore,
+ * all cpus could invoke this handler from die() and the panic CPU
+ * will call machine_kexec() directly from this handler to do
+ * kexec boot.
+ */
+ if (atomic_read(&waiting_for_crash_ipi))
+ printk(KERN_ALERT "done waiting: %d cpus not responding\n",
+ atomic_read(&waiting_for_crash_ipi));
+ /* Leave the IPI callback set */
+}
+#else
+static void crash_kexec_prepare_cpus(void)
+{
+ /*
+ * move the secondarys to us so that we can copy
+ * the new kernel 0-0x100 safely
+ *
+ * do this if kexec in setup.c ?
+ */
+ smp_release_cpus();
+}
+
+#endif
+
+void default_machine_crash_shutdown(struct pt_regs *regs)
+{
+ /*
+ * This function is only called after the system
+ * has paniced or is otherwise in a critical state.
+ * The minimum amount of code to allow a kexec'd kernel
+ * to run successfully needs to happen here.
+ *
+ * In practice this means stopping other cpus in
+ * an SMP system.
+ * The kernel is broken so disable interrupts.
+ */
+ local_irq_disable();
+
+ if (ppc_md.kexec_cpu_down)
+ ppc_md.kexec_cpu_down(1, 0);
+
+ /*
+ * Make a note of crashing cpu. Will be used in machine_kexec
+ * such that another IPI will not be sent.
+ */
+ crashing_cpu = smp_processor_id();
+ crash_kexec_prepare_cpus();
+ crash_save_self(regs);
+}
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
new file mode 100644
index 00000000000..211d72653ea
--- /dev/null
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -0,0 +1,111 @@
+/*
+ * Routines for doing kexec-based kdump.
+ *
+ * Copyright (C) 2005, IBM Corp.
+ *
+ * Created by: Michael Ellerman
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#undef DEBUG
+
+#include <linux/crash_dump.h>
+#include <linux/bootmem.h>
+#include <asm/kdump.h>
+#include <asm/lmb.h>
+#include <asm/firmware.h>
+#include <asm/uaccess.h>
+
+#ifdef DEBUG
+#include <asm/udbg.h>
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+static void __init create_trampoline(unsigned long addr)
+{
+ /* The maximum range of a single instruction branch, is the current
+ * instruction's address + (32 MB - 4) bytes. For the trampoline we
+ * need to branch to current address + 32 MB. So we insert a nop at
+ * the trampoline address, then the next instruction (+ 4 bytes)
+ * does a branch to (32 MB - 4). The net effect is that when we
+ * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
+ * two instructions it doesn't require any registers.
+ */
+ create_instruction(addr, 0x60000000); /* nop */
+ create_branch(addr + 4, addr + PHYSICAL_START, 0);
+}
+
+void __init kdump_setup(void)
+{
+ unsigned long i;
+
+ DBG(" -> kdump_setup()\n");
+
+ for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
+ create_trampoline(i);
+ }
+
+ create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
+ create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
+
+ DBG(" <- kdump_setup()\n");
+}
+
+#ifdef CONFIG_PROC_VMCORE
+static int __init parse_elfcorehdr(char *p)
+{
+ if (p)
+ elfcorehdr_addr = memparse(p, &p);
+
+ return 0;
+}
+__setup("elfcorehdr=", parse_elfcorehdr);
+#endif
+
+static int __init parse_savemaxmem(char *p)
+{
+ if (p)
+ saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
+
+ return 0;
+}
+__setup("savemaxmem=", parse_savemaxmem);
+
+/*
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+ * Copy a page from "oldmem". For this page, there is no pte mapped
+ * in the current kernel. We stitch up a pte, similar to kmap_atomic.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
+
+ if (userbuf) {
+ if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) {
+ iounmap(vaddr);
+ return -EFAULT;
+ }
+ } else
+ memcpy(buf, (vaddr + offset), csize);
+
+ iounmap(vaddr);
+ return csize;
+}
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 7c3419656cc..36aaa7663f0 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -10,6 +10,7 @@
/* Include the busses we support */
#include <linux/pci.h>
#include <asm/vio.h>
+#include <asm/ibmebus.h>
#include <asm/scatterlist.h>
#include <asm/bug.h>
@@ -23,6 +24,10 @@ static struct dma_mapping_ops *get_dma_ops(struct device *dev)
if (dev->bus == &vio_bus_type)
return &vio_dma_ops;
#endif
+#ifdef CONFIG_IBMEBUS
+ if (dev->bus == &ibmebus_bus_type)
+ return &ibmebus_dma_ops;
+#endif
return NULL;
}
@@ -47,6 +52,10 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
if (dev->bus == &vio_bus_type)
return -EIO;
#endif /* CONFIG_IBMVIO */
+#ifdef CONFIG_IBMEBUS
+ if (dev->bus == &ibmebus_bus_type)
+ return -EIO;
+#endif
BUG();
return 0;
}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 2e99ae41723..d8da2a35c0a 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -200,8 +200,6 @@ _GLOBAL(DoSyscall)
bl do_show_syscall
#endif /* SHOW_SYSCALLS */
rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
- li r11,0
- stb r11,TI_SC_NOERR(r10)
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
@@ -222,25 +220,21 @@ ret_from_syscall:
bl do_show_syscall_exit
#endif
mr r6,r3
- li r11,-_LAST_ERRNO
- cmplw 0,r3,r11
rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
- blt+ 30f
- lbz r11,TI_SC_NOERR(r12)
- cmpwi r11,0
- bne 30f
- neg r3,r3
- lwz r10,_CCR(r1) /* Set SO bit in CR */
- oris r10,r10,0x1000
- stw r10,_CCR(r1)
-
/* disable interrupts so current_thread_info()->flags can't change */
-30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+ LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
SYNC
MTMSRD(r10)
lwz r9,TI_FLAGS(r12)
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+ li r8,-_LAST_ERRNO
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL)
bne- syscall_exit_work
+ cmplw 0,r3,r8
+ blt+ syscall_exit_cont
+ lwz r11,_CCR(r1) /* Load CR */
+ neg r3,r3
+ oris r11,r11,0x1000 /* Set SO bit in CR */
+ stw r11,_CCR(r1)
syscall_exit_cont:
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* If the process has its own DBCR0 value, load it up. The single
@@ -292,46 +286,113 @@ syscall_dotrace:
b syscall_dotrace_cont
syscall_exit_work:
- stw r6,RESULT(r1) /* Save result */
+ andi. r0,r9,_TIF_RESTOREALL
+ bne- 2f
+ cmplw 0,r3,r8
+ blt+ 1f
+ andi. r0,r9,_TIF_NOERROR
+ bne- 1f
+ lwz r11,_CCR(r1) /* Load CR */
+ neg r3,r3
+ oris r11,r11,0x1000 /* Set SO bit in CR */
+ stw r11,_CCR(r1)
+
+1: stw r6,RESULT(r1) /* Save result */
stw r3,GPR3(r1) /* Update return value */
- andi. r0,r9,_TIF_SYSCALL_T_OR_A
- beq 5f
- ori r10,r10,MSR_EE
- SYNC
- MTMSRD(r10) /* re-enable interrupts */
+2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
+ beq 4f
+
+ /* Clear per-syscall TIF flags if any are set, but _leave_
+ _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that
+ yet. */
+
+ li r11,_TIF_PERSYSCALL_MASK
+ addi r12,r12,TI_FLAGS
+3: lwarx r8,0,r12
+ andc r8,r8,r11
+#ifdef CONFIG_IBM405_ERR77
+ dcbt 0,r12
+#endif
+ stwcx. r8,0,r12
+ bne- 3b
+ subi r12,r12,TI_FLAGS
+
+4: /* Anything which requires enabling interrupts? */
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS)
+ beq 7f
+
+ /* Save NVGPRS if they're not saved already */
lwz r4,_TRAP(r1)
andi. r4,r4,1
- beq 4f
+ beq 5f
SAVE_NVGPRS(r1)
li r4,0xc00
stw r4,_TRAP(r1)
-4:
+
+ /* Re-enable interrupts */
+5: ori r10,r10,MSR_EE
+ SYNC
+ MTMSRD(r10)
+
+ andi. r0,r9,_TIF_SAVE_NVGPRS
+ bne save_user_nvgprs
+
+save_user_nvgprs_cont:
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+ beq 7f
+
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_syscall_trace_leave
REST_NVGPRS(r1)
-2:
- lwz r3,GPR3(r1)
+
+6: lwz r3,GPR3(r1)
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
SYNC
MTMSRD(r10) /* disable interrupts again */
rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
lwz r9,TI_FLAGS(r12)
-5:
+7:
andi. r0,r9,_TIF_NEED_RESCHED
- bne 1f
+ bne 8f
lwz r5,_MSR(r1)
andi. r5,r5,MSR_PR
- beq syscall_exit_cont
+ beq ret_from_except
andi. r0,r9,_TIF_SIGPENDING
- beq syscall_exit_cont
+ beq ret_from_except
b do_user_signal
-1:
+8:
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* re-enable interrupts */
bl schedule
- b 2b
+ b 6b
+
+save_user_nvgprs:
+ lwz r8,TI_SIGFRAME(r12)
+
+.macro savewords start, end
+ 1: stw \start,4*(\start)(r8)
+ .section __ex_table,"a"
+ .align 2
+ .long 1b,save_user_nvgprs_fault
+ .previous
+ .if \end - \start
+ savewords "(\start+1)",\end
+ .endif
+.endm
+ savewords 14,31
+ b save_user_nvgprs_cont
+
+
+save_user_nvgprs_fault:
+ li r3,11 /* SIGSEGV */
+ lwz r4,TI_TASK(r12)
+ bl force_sigsegv
+ rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
+ lwz r9,TI_FLAGS(r12)
+ b save_user_nvgprs_cont
+
#ifdef SHOW_SYSCALLS
do_show_syscall:
#ifdef SHOW_SYSCALLS_TASK
@@ -401,28 +462,10 @@ show_syscalls_task:
#endif /* SHOW_SYSCALLS */
/*
- * The sigsuspend and rt_sigsuspend system calls can call do_signal
- * and thus put the process into the stopped state where we might
- * want to examine its user state with ptrace. Therefore we need
- * to save all the nonvolatile registers (r13 - r31) before calling
- * the C code.
+ * The fork/clone functions need to copy the full register set into
+ * the child process. Therefore we need to save all the nonvolatile
+ * registers (r13 - r31) before calling the C code.
*/
- .globl ppc_sigsuspend
-ppc_sigsuspend:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_sigsuspend
-
- .globl ppc_rt_sigsuspend
-ppc_rt_sigsuspend:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30
- stw r0,_TRAP(r1)
- b sys_rt_sigsuspend
-
.globl ppc_fork
ppc_fork:
SAVE_NVGPRS(r1)
@@ -447,14 +490,6 @@ ppc_clone:
stw r0,_TRAP(r1) /* register set saved */
b sys_clone
- .globl ppc_swapcontext
-ppc_swapcontext:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_swapcontext
-
/*
* Top-level page fault handling.
* This is in assembler because if do_page_fault tells us that
@@ -626,16 +661,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
.long ret_from_except
#endif
- .globl sigreturn_exit
-sigreturn_exit:
- subi r1,r3,STACK_FRAME_OVERHEAD
- rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
- lwz r9,TI_FLAGS(r12)
- andi. r0,r9,_TIF_SYSCALL_T_OR_A
- beq+ ret_from_except_full
- bl do_syscall_trace_leave
- /* fall through */
-
.globl ret_from_except_full
ret_from_except_full:
REST_NVGPRS(r1)
@@ -658,7 +683,7 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r9,TI_FLAGS(r9)
- andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+ andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL)
bne do_work
restore_user:
@@ -963,7 +988,7 @@ _GLOBAL(enter_rtas)
stwu r1,-INT_FRAME_SIZE(r1)
mflr r0
stw r0,INT_FRAME_SIZE+4(r1)
- LOADADDR(r4, rtas)
+ LOAD_REG_ADDR(r4, rtas)
lis r6,1f@ha /* physical return address for rtas */
addi r6,r6,1f@l
tophys(r6,r6)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index bce33a38399..54203631886 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -113,9 +113,7 @@ system_call_common:
addi r9,r1,STACK_FRAME_OVERHEAD
#endif
clrrdi r11,r1,THREAD_SHIFT
- li r12,0
ld r10,TI_FLAGS(r11)
- stb r12,TI_SC_NOERR(r11)
andi. r11,r10,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
syscall_dotrace_cont:
@@ -144,24 +142,12 @@ system_call: /* label this so stack traces look sane */
bctrl /* Call handler */
syscall_exit:
+ std r3,RESULT(r1)
#ifdef SHOW_SYSCALLS
- std r3,GPR3(r1)
bl .do_show_syscall_exit
- ld r3,GPR3(r1)
+ ld r3,RESULT(r1)
#endif
- std r3,RESULT(r1)
- ld r5,_CCR(r1)
- li r10,-_LAST_ERRNO
- cmpld r3,r10
clrrdi r12,r1,THREAD_SHIFT
- bge- syscall_error
-syscall_error_cont:
-
- /* check for syscall tracing or audit */
- ld r9,TI_FLAGS(r12)
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
- bne- syscall_exit_trace
-syscall_exit_trace_cont:
/* disable interrupts so current_thread_info()->flags can't change,
and so that we don't get interrupted after loading SRR0/1. */
@@ -173,8 +159,13 @@ syscall_exit_trace_cont:
rotldi r10,r10,16
mtmsrd r10,1
ld r9,TI_FLAGS(r12)
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+ li r11,-_LAST_ERRNO
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR)
bne- syscall_exit_work
+ cmpld r3,r11
+ ld r5,_CCR(r1)
+ bge- syscall_error
+syscall_error_cont:
ld r7,_NIP(r1)
stdcx. r0,0,r1 /* to clear the reservation */
andi. r6,r8,MSR_PR
@@ -193,21 +184,12 @@ syscall_exit_trace_cont:
rfid
b . /* prevent speculative execution */
-syscall_enosys:
- li r3,-ENOSYS
- std r3,RESULT(r1)
- clrrdi r12,r1,THREAD_SHIFT
- ld r5,_CCR(r1)
-
-syscall_error:
- lbz r11,TI_SC_NOERR(r12)
- cmpwi 0,r11,0
- bne- syscall_error_cont
- neg r3,r3
+syscall_error:
oris r5,r5,0x1000 /* Set SO bit in CR */
+ neg r3,r3
std r5,_CCR(r1)
b syscall_error_cont
-
+
/* Traced system call support */
syscall_dotrace:
bl .save_nvgprs
@@ -225,21 +207,69 @@ syscall_dotrace:
ld r10,TI_FLAGS(r10)
b syscall_dotrace_cont
-syscall_exit_trace:
- std r3,GPR3(r1)
- bl .save_nvgprs
+syscall_enosys:
+ li r3,-ENOSYS
+ b syscall_exit
+
+syscall_exit_work:
+ /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
+ If TIF_NOERROR is set, just save r3 as it is. */
+
+ andi. r0,r9,_TIF_RESTOREALL
+ bne- 2f
+ cmpld r3,r11 /* r10 is -LAST_ERRNO */
+ blt+ 1f
+ andi. r0,r9,_TIF_NOERROR
+ bne- 1f
+ ld r5,_CCR(r1)
+ neg r3,r3
+ oris r5,r5,0x1000 /* Set SO bit in CR */
+ std r5,_CCR(r1)
+1: std r3,GPR3(r1)
+2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
+ beq 4f
+
+ /* Clear per-syscall TIF flags if any are set, but _leave_
+ _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that
+ yet. */
+
+ li r11,_TIF_PERSYSCALL_MASK
+ addi r12,r12,TI_FLAGS
+3: ldarx r10,0,r12
+ andc r10,r10,r11
+ stdcx. r10,0,r12
+ bne- 3b
+ subi r12,r12,TI_FLAGS
+
+4: bl .save_nvgprs
+ /* Anything else left to do? */
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS)
+ beq .ret_from_except_lite
+
+ /* Re-enable interrupts */
+ mfmsr r10
+ ori r10,r10,MSR_EE
+ mtmsrd r10,1
+
+ andi. r0,r9,_TIF_SAVE_NVGPRS
+ bne save_user_nvgprs
+
+ /* If tracing, re-enable interrupts and do it */
+save_user_nvgprs_cont:
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+ beq 5f
+
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_syscall_trace_leave
REST_NVGPRS(r1)
- ld r3,GPR3(r1)
- ld r5,_CCR(r1)
clrrdi r12,r1,THREAD_SHIFT
- b syscall_exit_trace_cont
-/* Stuff to do on exit from a system call. */
-syscall_exit_work:
- std r3,GPR3(r1)
- std r5,_CCR(r1)
+ /* Disable interrupts again and handle other work if any */
+5: mfmsr r10
+ rldicl r10,r10,48,1
+ rotldi r10,r10,16
+ mtmsrd r10,1
+
b .ret_from_except_lite
/* Save non-volatile GPRs, if not already saved. */
@@ -252,6 +282,52 @@ _GLOBAL(save_nvgprs)
std r0,_TRAP(r1)
blr
+
+save_user_nvgprs:
+ ld r10,TI_SIGFRAME(r12)
+ andi. r0,r9,_TIF_32BIT
+ beq- save_user_nvgprs_64
+
+ /* 32-bit save to userspace */
+
+.macro savewords start, end
+ 1: stw \start,4*(\start)(r10)
+ .section __ex_table,"a"
+ .align 3
+ .llong 1b,save_user_nvgprs_fault
+ .previous
+ .if \end - \start
+ savewords "(\start+1)",\end
+ .endif
+.endm
+ savewords 14,31
+ b save_user_nvgprs_cont
+
+save_user_nvgprs_64:
+ /* 64-bit save to userspace */
+
+.macro savelongs start, end
+ 1: std \start,8*(\start)(r10)
+ .section __ex_table,"a"
+ .align 3
+ .llong 1b,save_user_nvgprs_fault
+ .previous
+ .if \end - \start
+ savelongs "(\start+1)",\end
+ .endif
+.endm
+ savelongs 14,31
+ b save_user_nvgprs_cont
+
+save_user_nvgprs_fault:
+ li r3,11 /* SIGSEGV */
+ ld r4,TI_TASK(r12)
+ bl .force_sigsegv
+
+ clrrdi r12,r1,THREAD_SHIFT
+ ld r9,TI_FLAGS(r12)
+ b save_user_nvgprs_cont
+
/*
* The sigsuspend and rt_sigsuspend system calls can call do_signal
* and thus put the process into the stopped state where we might
@@ -260,35 +336,6 @@ _GLOBAL(save_nvgprs)
* the C code. Similarly, fork, vfork and clone need the full
* register state on the stack so that it can be copied to the child.
*/
-_GLOBAL(ppc32_sigsuspend)
- bl .save_nvgprs
- bl .compat_sys_sigsuspend
- b 70f
-
-_GLOBAL(ppc64_rt_sigsuspend)
- bl .save_nvgprs
- bl .sys_rt_sigsuspend
- b 70f
-
-_GLOBAL(ppc32_rt_sigsuspend)
- bl .save_nvgprs
- bl .compat_sys_rt_sigsuspend
-70: cmpdi 0,r3,0
- /* If it returned an error, we need to return via syscall_exit to set
- the SO bit in cr0 and potentially stop for ptrace. */
- bne syscall_exit
- /* If sigsuspend() returns zero, we are going into a signal handler. We
- may need to call audit_syscall_exit() to mark the exit from sigsuspend() */
-#ifdef CONFIG_AUDITSYSCALL
- ld r3,PACACURRENT(r13)
- ld r4,AUDITCONTEXT(r3)
- cmpdi 0,r4,0
- beq .ret_from_except /* No audit_context: Leave immediately. */
- li r4, 2 /* AUDITSC_FAILURE */
- li r5,-4 /* It's always -EINTR */
- bl .audit_syscall_exit
-#endif
- b .ret_from_except
_GLOBAL(ppc_fork)
bl .save_nvgprs
@@ -305,37 +352,6 @@ _GLOBAL(ppc_clone)
bl .sys_clone
b syscall_exit
-_GLOBAL(ppc32_swapcontext)
- bl .save_nvgprs
- bl .compat_sys_swapcontext
- b 80f
-
-_GLOBAL(ppc64_swapcontext)
- bl .save_nvgprs
- bl .sys_swapcontext
- b 80f
-
-_GLOBAL(ppc32_sigreturn)
- bl .compat_sys_sigreturn
- b 80f
-
-_GLOBAL(ppc32_rt_sigreturn)
- bl .compat_sys_rt_sigreturn
- b 80f
-
-_GLOBAL(ppc64_rt_sigreturn)
- bl .sys_rt_sigreturn
-
-80: cmpdi 0,r3,0
- blt syscall_exit
- clrrdi r4,r1,THREAD_SHIFT
- ld r4,TI_FLAGS(r4)
- andi. r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
- beq+ 81f
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_syscall_trace_leave
-81: b .ret_from_except
-
_GLOBAL(ret_from_fork)
bl .schedule_tail
REST_NVGPRS(r1)
@@ -495,7 +511,8 @@ restore:
cmpdi 0,r5,0
beq 4f
/* Check for pending interrupts (iSeries) */
- ld r3,PACALPPACA+LPPACAANYINT(r13)
+ ld r3,PACALPPACAPTR(r13)
+ ld r3,LPPACAANYINT(r3)
cmpdi r3,0
beq+ 4f /* skip do_IRQ if no interrupts */
@@ -673,9 +690,8 @@ _GLOBAL(enter_rtas)
std r6,PACASAVEDMSR(r13)
/* Setup our real return addr */
- SET_REG_TO_LABEL(r4,.rtas_return_loc)
- SET_REG_TO_CONST(r9,KERNELBASE)
- sub r4,r4,r9
+ LOAD_REG_ADDR(r4,.rtas_return_loc)
+ clrldi r4,r4,2 /* convert to realmode address */
mtlr r4
li r0,0
@@ -690,7 +706,7 @@ _GLOBAL(enter_rtas)
sync /* disable interrupts so SRR0/1 */
mtmsrd r0 /* don't get trashed */
- SET_REG_TO_LABEL(r4,rtas)
+ LOAD_REG_ADDR(r4, rtas)
ld r5,RTASENTRY(r4) /* get the rtas->entry value */
ld r4,RTASBASE(r4) /* get the rtas->base value */
@@ -702,8 +718,7 @@ _GLOBAL(enter_rtas)
_STATIC(rtas_return_loc)
/* relocation is off at this point */
mfspr r4,SPRN_SPRG3 /* Get PACA */
- SET_REG_TO_CONST(r5, KERNELBASE)
- sub r4,r4,r5 /* RELOC the PACA base pointer */
+ clrldi r4,r4,2 /* convert to realmode address */
mfmsr r6
li r0,MSR_RI
@@ -712,7 +727,7 @@ _STATIC(rtas_return_loc)
mtmsrd r6
ld r1,PACAR1(r4) /* Restore our SP */
- LOADADDR(r3,.rtas_restore_regs)
+ LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
mtspr SPRN_SRR0,r3
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index b780b42c95f..e4362dfa37f 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -39,9 +39,9 @@ _GLOBAL(load_up_fpu)
* to another. Instead we call giveup_fpu in switch_to.
*/
#ifndef CONFIG_SMP
- LOADBASE(r3, last_task_used_math)
+ LOAD_REG_ADDRBASE(r3, last_task_used_math)
toreal(r3)
- PPC_LL r4,OFF(last_task_used_math)(r3)
+ PPC_LL r4,ADDROFF(last_task_used_math)(r3)
PPC_LCMPI 0,r4,0
beq 1f
toreal(r4)
@@ -77,7 +77,7 @@ _GLOBAL(load_up_fpu)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
fromreal(r4)
- PPC_STL r4,OFF(last_task_used_math)(r3)
+ PPC_STL r4,ADDROFF(last_task_used_math)(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
/* we haven't used ctr or xer or lr */
@@ -113,8 +113,8 @@ _GLOBAL(giveup_fpu)
1:
#ifndef CONFIG_SMP
li r5,0
- LOADBASE(r4,last_task_used_math)
- PPC_STL r5,OFF(last_task_used_math)(r4)
+ LOAD_REG_ADDRBASE(r4,last_task_used_math)
+ PPC_STL r5,ADDROFF(last_task_used_math)(r4)
#endif /* CONFIG_SMP */
blr
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index ccdf94731e3..03b25f9359f 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -120,10 +120,25 @@ __start:
* because OF may have I/O devices mapped into that area
* (particularly on CHRP).
*/
+#ifdef CONFIG_PPC_MULTIPLATFORM
cmpwi 0,r5,0
beq 1f
bl prom_init
trap
+#endif
+
+/*
+ * Check for BootX signature when supporting PowerMac and branch to
+ * appropriate trampoline if it's present
+ */
+#ifdef CONFIG_PPC_PMAC
+1: lis r31,0x426f
+ ori r31,r31,0x6f58
+ cmpw 0,r3,r31
+ bne 1f
+ bl bootx_init
+ trap
+#endif /* CONFIG_PPC_PMAC */
1: mr r31,r3 /* save parameters */
mr r30,r4
@@ -153,6 +168,9 @@ __after_mmu_off:
bl flush_tlbs
bl initial_bats
+#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
+ bl setup_disp_bat
+#endif
/*
* Call setup_cpu for CPU 0 and initialize 6xx Idle
@@ -450,16 +468,11 @@ SystemCall:
* by executing an altivec instruction.
*/
. = 0xf00
- b Trap_0f
+ b PerformanceMonitor
. = 0xf20
b AltiVecUnavailable
-Trap_0f:
- EXCEPTION_PROLOG
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE(0xf00, unknown_exception)
-
/*
* Handle TLB miss for instruction on 603/603e.
* Note: we get an alternate set of r0 - r3 to use automatically.
@@ -703,6 +716,11 @@ AltiVecUnavailable:
#endif /* CONFIG_ALTIVEC */
EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
+PerformanceMonitor:
+ EXCEPTION_PROLOG
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ EXC_XFER_STD(0xf00, performance_monitor_exception)
+
#ifdef CONFIG_ALTIVEC
/* Note that the AltiVec support is closely modeled after the FP
* support. Changes to one are likely to be applicable to the
@@ -1306,6 +1324,32 @@ initial_bats:
blr
+#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
+setup_disp_bat:
+ /*
+ * setup the display bat prepared for us in prom.c
+ */
+ mflr r8
+ bl reloc_offset
+ mtlr r8
+ addis r8,r3,disp_BAT@ha
+ addi r8,r8,disp_BAT@l
+ cmpwi cr0,r8,0
+ beqlr
+ lwz r11,0(r8)
+ lwz r8,4(r8)
+ mfspr r9,SPRN_PVR
+ rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
+ cmpwi 0,r9,1
+ beq 1f
+ mtspr SPRN_DBAT3L,r8
+ mtspr SPRN_DBAT3U,r11
+ blr
+1: mtspr SPRN_IBAT3L,r8
+ mtspr SPRN_IBAT3U,r11
+ blr
+#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */
+
#ifdef CONFIG_8260
/* Jump into the system reset for the rom.
* We first disable the MMU, and then jump to the ROM reset address.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 8a8bf79ef04..30826846634 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -154,11 +154,15 @@ _GLOBAL(__secondary_hold)
bne 100b
#ifdef CONFIG_HMT
- b .hmt_init
+ SET_REG_IMMEDIATE(r4, .hmt_init)
+ mtctr r4
+ bctr
#else
#ifdef CONFIG_SMP
+ LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init)
+ mtctr r4
mr r3,r24
- b .pSeries_secondary_smp_init
+ bctr
#else
BUG_OPCODE
#endif
@@ -200,6 +204,21 @@ exception_marker:
#define EX_R3 64
#define EX_LR 72
+/*
+ * We're short on space and time in the exception prolog, so we can't
+ * use the normal SET_REG_IMMEDIATE macro. Normally we just need the
+ * low halfword of the address, but for Kdump we need the whole low
+ * word.
+ */
+#ifdef CONFIG_CRASH_DUMP
+#define LOAD_HANDLER(reg, label) \
+ oris reg,reg,(label)@h; /* virt addr of handler ... */ \
+ ori reg,reg,(label)@l; /* .. and the rest */
+#else
+#define LOAD_HANDLER(reg, label) \
+ ori reg,reg,(label)@l; /* virt addr of handler ... */
+#endif
+
#define EXCEPTION_PROLOG_PSERIES(area, label) \
mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
std r9,area+EX_R9(r13); /* save r9 - r12 */ \
@@ -212,7 +231,7 @@ exception_marker:
clrrdi r12,r13,32; /* get high part of &label */ \
mfmsr r10; \
mfspr r11,SPRN_SRR0; /* save SRR0 */ \
- ori r12,r12,(label)@l; /* virt addr of handler */ \
+ LOAD_HANDLER(r12,label) \
ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
mtspr SPRN_SRR0,r12; \
mfspr r12,SPRN_SRR1; /* and SRR1 */ \
@@ -236,8 +255,9 @@ exception_marker:
#define EXCEPTION_PROLOG_ISERIES_2 \
mfmsr r10; \
- ld r11,PACALPPACA+LPPACASRR0(r13); \
- ld r12,PACALPPACA+LPPACASRR1(r13); \
+ ld r12,PACALPPACAPTR(r13); \
+ ld r11,LPPACASRR0(r12); \
+ ld r12,LPPACASRR1(r12); \
ori r10,r10,MSR_RI; \
mtmsrd r10,1
@@ -553,6 +573,7 @@ slb_miss_user_pseries:
* Vectors for the FWNMI option. Share common code.
*/
.globl system_reset_fwnmi
+ .align 7
system_reset_fwnmi:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13 /* save r13 */
@@ -560,6 +581,7 @@ system_reset_fwnmi:
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
.globl machine_check_fwnmi
+ .align 7
machine_check_fwnmi:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13 /* save r13 */
@@ -614,7 +636,8 @@ data_access_slb_iSeries:
std r12,PACA_EXSLB+EX_R12(r13)
mfspr r10,SPRN_SPRG1
std r10,PACA_EXSLB+EX_R13(r13)
- ld r12,PACALPPACA+LPPACASRR1(r13);
+ ld r12,PACALPPACAPTR(r13)
+ ld r12,LPPACASRR1(r12)
b .slb_miss_realmode
STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
@@ -624,7 +647,8 @@ instruction_access_slb_iSeries:
mtspr SPRN_SPRG1,r13 /* save r13 */
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
- ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
+ ld r3,PACALPPACAPTR(r13)
+ ld r3,LPPACASRR0(r3) /* get SRR0 value */
std r9,PACA_EXSLB+EX_R9(r13)
mfcr r9
#ifdef __DISABLED__
@@ -636,7 +660,8 @@ instruction_access_slb_iSeries:
std r12,PACA_EXSLB+EX_R12(r13)
mfspr r10,SPRN_SPRG1
std r10,PACA_EXSLB+EX_R13(r13)
- ld r12,PACALPPACA+LPPACASRR1(r13);
+ ld r12,PACALPPACAPTR(r13)
+ ld r12,LPPACASRR1(r12)
b .slb_miss_realmode
#ifdef __DISABLED__
@@ -693,7 +718,7 @@ system_reset_iSeries:
lbz r23,PACAPROCSTART(r13) /* Test if this processor
* should start */
sync
- LOADADDR(r3,current_set)
+ LOAD_REG_IMMEDIATE(r3,current_set)
sldi r28,r24,3 /* get current_set[cpu#] */
ldx r3,r3,r28
addi r1,r3,THREAD_SIZE
@@ -725,16 +750,19 @@ iSeries_secondary_smp_loop:
.globl decrementer_iSeries_masked
decrementer_iSeries_masked:
li r11,1
- stb r11,PACALPPACA+LPPACADECRINT(r13)
- lwz r12,PACADEFAULTDECR(r13)
+ ld r12,PACALPPACAPTR(r13)
+ stb r11,LPPACADECRINT(r12)
+ LOAD_REG_ADDRBASE(r12,tb_ticks_per_jiffy)
+ lwz r12,ADDROFF(tb_ticks_per_jiffy)(r12)
mtspr SPRN_DEC,r12
/* fall through */
.globl hardware_interrupt_iSeries_masked
hardware_interrupt_iSeries_masked:
mtcrf 0x80,r9 /* Restore regs */
- ld r11,PACALPPACA+LPPACASRR0(r13)
- ld r12,PACALPPACA+LPPACASRR1(r13)
+ ld r12,PACALPPACAPTR(r13)
+ ld r11,LPPACASRR0(r12)
+ ld r12,LPPACASRR1(r12)
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r12
ld r9,PACA_EXGEN+EX_R9(r13)
@@ -973,7 +1001,8 @@ _GLOBAL(slb_miss_realmode)
ld r3,PACA_EXSLB+EX_R3(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
#ifdef CONFIG_PPC_ISERIES
- ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
+ ld r11,PACALPPACAPTR(r13)
+ ld r11,LPPACASRR0(r11) /* get SRR0 value */
#endif /* CONFIG_PPC_ISERIES */
mtlr r10
@@ -1345,7 +1374,7 @@ _GLOBAL(do_stab_bolted)
* fixed address (the linker can't compute (u64)&initial_stab >>
* PAGE_SHIFT).
*/
- . = STAB0_PHYS_ADDR /* 0x6000 */
+ . = STAB0_OFFSET /* 0x6000 */
.globl initial_stab
initial_stab:
.space 4096
@@ -1391,7 +1420,7 @@ _GLOBAL(pSeries_secondary_smp_init)
* physical cpu id in r24, we need to search the pacas to find
* which logical id maps to our physical one.
*/
- LOADADDR(r13, paca) /* Get base vaddr of paca array */
+ LOAD_REG_IMMEDIATE(r13, paca) /* Get base vaddr of paca array */
li r5,0 /* logical cpu id */
1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
cmpw r6,r24 /* Compare to our id */
@@ -1425,8 +1454,8 @@ _GLOBAL(pSeries_secondary_smp_init)
#ifdef CONFIG_PPC_ISERIES
_STATIC(__start_initialization_iSeries)
/* Clear out the BSS */
- LOADADDR(r11,__bss_stop)
- LOADADDR(r8,__bss_start)
+ LOAD_REG_IMMEDIATE(r11,__bss_stop)
+ LOAD_REG_IMMEDIATE(r8,__bss_start)
sub r11,r11,r8 /* bss size */
addi r11,r11,7 /* round up to an even double word */
rldicl. r11,r11,61,3 /* shift right by 3 */
@@ -1437,17 +1466,17 @@ _STATIC(__start_initialization_iSeries)
3: stdu r0,8(r8)
bdnz 3b
4:
- LOADADDR(r1,init_thread_union)
+ LOAD_REG_IMMEDIATE(r1,init_thread_union)
addi r1,r1,THREAD_SIZE
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
- LOADADDR(r3,cpu_specs)
- LOADADDR(r4,cur_cpu_spec)
+ LOAD_REG_IMMEDIATE(r3,cpu_specs)
+ LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
li r5,0
bl .identify_cpu
- LOADADDR(r2,__toc_start)
+ LOAD_REG_IMMEDIATE(r2,__toc_start)
addi r2,r2,0x4000
addi r2,r2,0x4000
@@ -1485,11 +1514,13 @@ _STATIC(__mmu_off)
*
*/
_GLOBAL(__start_initialization_multiplatform)
+#ifdef CONFIG_PPC_MULTIPLATFORM
/*
* Are we booted from a PROM Of-type client-interface ?
*/
cmpldi cr0,r5,0
bne .__boot_from_prom /* yes -> prom */
+#endif
/* Save parameters */
mr r31,r3
@@ -1505,11 +1536,12 @@ _GLOBAL(__start_initialization_multiplatform)
li r24,0
/* Switch off MMU if not already */
- LOADADDR(r4, .__after_prom_start - KERNELBASE)
+ LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
add r4,r4,r30
bl .__mmu_off
b .__after_prom_start
+#ifdef CONFIG_PPC_MULTIPLATFORM
_STATIC(__boot_from_prom)
/* Save parameters */
mr r31,r3
@@ -1524,7 +1556,7 @@ _STATIC(__boot_from_prom)
/* put a relocation offset into r3 */
bl .reloc_offset
- LOADADDR(r2,__toc_start)
+ LOAD_REG_IMMEDIATE(r2,__toc_start)
addi r2,r2,0x4000
addi r2,r2,0x4000
@@ -1542,6 +1574,7 @@ _STATIC(__boot_from_prom)
bl .prom_init
/* We never return */
trap
+#endif
/*
* At this point, r3 contains the physical address we are running at,
@@ -1550,7 +1583,7 @@ _STATIC(__boot_from_prom)
_STATIC(__after_prom_start)
/*
- * We need to run with __start at physical address 0.
+ * We need to run with __start at physical address PHYSICAL_START.
* This will leave some code in the first 256B of
* real memory, which are reserved for software use.
* The remainder of the first page is loaded with the fixed
@@ -1563,9 +1596,9 @@ _STATIC(__after_prom_start)
*/
bl .reloc_offset
mr r26,r3
- SET_REG_TO_CONST(r27,KERNELBASE)
+ LOAD_REG_IMMEDIATE(r27, KERNELBASE)
- li r3,0 /* target addr */
+ LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */
// XXX FIXME: Use phys returned by OF (r30)
add r4,r27,r26 /* source addr */
@@ -1573,7 +1606,7 @@ _STATIC(__after_prom_start)
/* i.e. where we are running */
/* the source addr */
- LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
+ LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
sub r5,r5,r27
li r6,0x100 /* Start offset, the first 0x100 */
@@ -1583,11 +1616,11 @@ _STATIC(__after_prom_start)
/* this includes the code being */
/* executed here. */
- LOADADDR(r0, 4f) /* Jump to the copy of this code */
+ LOAD_REG_IMMEDIATE(r0, 4f) /* Jump to the copy of this code */
mtctr r0 /* that we just made/relocated */
bctr
-4: LOADADDR(r5,klimit)
+4: LOAD_REG_IMMEDIATE(r5,klimit)
add r5,r5,r26
ld r5,0(r5) /* get the value of klimit */
sub r5,r5,r27
@@ -1669,7 +1702,7 @@ _GLOBAL(pmac_secondary_start)
mtmsrd r3 /* RI on */
/* Set up a paca value for this processor. */
- LOADADDR(r4, paca) /* Get base vaddr of paca array */
+ LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */
mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
add r13,r13,r4 /* for this processor. */
mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
@@ -1706,7 +1739,7 @@ _GLOBAL(__secondary_start)
bl .early_setup_secondary
/* Initialize the kernel stack. Just a repeat for iSeries. */
- LOADADDR(r3,current_set)
+ LOAD_REG_ADDR(r3, current_set)
sldi r28,r24,3 /* get current_set[cpu#] */
ldx r1,r3,r28
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
@@ -1717,8 +1750,8 @@ _GLOBAL(__secondary_start)
mtlr r7
/* enable MMU and jump to start_secondary */
- LOADADDR(r3,.start_secondary_prolog)
- SET_REG_TO_CONST(r4, MSR_KERNEL)
+ LOAD_REG_ADDR(r3, .start_secondary_prolog)
+ LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
#ifdef DO_SOFT_DISABLE
ori r4,r4,MSR_EE
#endif
@@ -1767,8 +1800,8 @@ _STATIC(start_here_multiplatform)
* be detached from the kernel completely. Besides, we need
* to clear it now for kexec-style entry.
*/
- LOADADDR(r11,__bss_stop)
- LOADADDR(r8,__bss_start)
+ LOAD_REG_IMMEDIATE(r11,__bss_stop)
+ LOAD_REG_IMMEDIATE(r8,__bss_start)
sub r11,r11,r8 /* bss size */
addi r11,r11,7 /* round up to an even double word */
rldicl. r11,r11,61,3 /* shift right by 3 */
@@ -1806,7 +1839,7 @@ _STATIC(start_here_multiplatform)
/* up the htab. This is done because we have relocated the */
/* kernel but are still running in real mode. */
- LOADADDR(r3,init_thread_union)
+ LOAD_REG_IMMEDIATE(r3,init_thread_union)
add r3,r3,r26
/* set up a stack pointer (physical address) */
@@ -1815,14 +1848,14 @@ _STATIC(start_here_multiplatform)
stdu r0,-STACK_FRAME_OVERHEAD(r1)
/* set up the TOC (physical address) */
- LOADADDR(r2,__toc_start)
+ LOAD_REG_IMMEDIATE(r2,__toc_start)
addi r2,r2,0x4000
addi r2,r2,0x4000
add r2,r2,r26
- LOADADDR(r3,cpu_specs)
+ LOAD_REG_IMMEDIATE(r3, cpu_specs)
add r3,r3,r26
- LOADADDR(r4,cur_cpu_spec)
+ LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
add r4,r4,r26
mr r5,r26
bl .identify_cpu
@@ -1838,15 +1871,15 @@ _STATIC(start_here_multiplatform)
* nowhere it can be initialized differently before we reach this
* code
*/
- LOADADDR(r27, boot_cpuid)
+ LOAD_REG_IMMEDIATE(r27, boot_cpuid)
add r27,r27,r26
lwz r27,0(r27)
- LOADADDR(r24, paca) /* Get base vaddr of paca array */
+ LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */
mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
add r13,r13,r24 /* for this processor. */
add r13,r13,r26 /* convert to physical addr */
- mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
+ mtspr SPRN_SPRG3,r13
/* Do very early kernel initializations, including initial hash table,
* stab and slb setup before we turn on relocation. */
@@ -1855,8 +1888,8 @@ _STATIC(start_here_multiplatform)
mr r3,r31
bl .early_setup
- LOADADDR(r3,.start_here_common)
- SET_REG_TO_CONST(r4, MSR_KERNEL)
+ LOAD_REG_IMMEDIATE(r3, .start_here_common)
+ LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4
rfid
@@ -1870,7 +1903,7 @@ _STATIC(start_here_common)
/* The following code sets up the SP and TOC now that we are */
/* running with translation enabled. */
- LOADADDR(r3,init_thread_union)
+ LOAD_REG_IMMEDIATE(r3,init_thread_union)
/* set up the stack */
addi r1,r3,THREAD_SIZE
@@ -1883,16 +1916,16 @@ _STATIC(start_here_common)
li r3,0
bl .do_cpu_ftr_fixups
- LOADADDR(r26, boot_cpuid)
+ LOAD_REG_IMMEDIATE(r26, boot_cpuid)
lwz r26,0(r26)
- LOADADDR(r24, paca) /* Get base vaddr of paca array */
+ LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */
mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
add r13,r13,r24 /* for this processor. */
mtspr SPRN_SPRG3,r13
/* ptr to current */
- LOADADDR(r4,init_task)
+ LOAD_REG_IMMEDIATE(r4, init_task)
std r4,PACACURRENT(r13)
/* Load the TOC */
@@ -1915,7 +1948,7 @@ _STATIC(start_here_common)
_GLOBAL(hmt_init)
#ifdef CONFIG_HMT
- LOADADDR(r5, hmt_thread_data)
+ LOAD_REG_IMMEDIATE(r5, hmt_thread_data)
mfspr r7,SPRN_PVR
srwi r7,r7,16
cmpwi r7,0x34 /* Pulsar */
@@ -1936,7 +1969,7 @@ _GLOBAL(hmt_init)
b 101f
__hmt_secondary_hold:
- LOADADDR(r5, hmt_thread_data)
+ LOAD_REG_IMMEDIATE(r5, hmt_thread_data)
clrldi r5,r5,4
li r7,0
mfspr r6,SPRN_PIR
@@ -1964,7 +1997,7 @@ __hmt_secondary_hold:
#ifdef CONFIG_HMT
_GLOBAL(hmt_start_secondary)
- LOADADDR(r4,__hmt_secondary_hold)
+ LOAD_REG_IMMEDIATE(r4,__hmt_secondary_hold)
clrldi r4,r4,4
mtspr SPRN_NIADORM, r4
mfspr r4, SPRN_MSRDORM
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
new file mode 100644
index 00000000000..e47d40ac6f3
--- /dev/null
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -0,0 +1,396 @@
+/*
+ * IBM PowerPC IBM eBus Infrastructure Support.
+ *
+ * Copyright (c) 2005 IBM Corporation
+ * Heiko J Schick <schickhj@de.ibm.com>
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/kobject.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <asm/ibmebus.h>
+#include <asm/abs_addr.h>
+
+static struct ibmebus_dev ibmebus_bus_device = { /* fake "parent" device */
+ .name = ibmebus_bus_device.ofdev.dev.bus_id,
+ .ofdev.dev.bus_id = "ibmebus",
+ .ofdev.dev.bus = &ibmebus_bus_type,
+};
+
+static void *ibmebus_alloc_coherent(struct device *dev,
+ size_t size,
+ dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+ void *mem;
+
+ mem = kmalloc(size, flag);
+ *dma_handle = (dma_addr_t)mem;
+
+ return mem;
+}
+
+static void ibmebus_free_coherent(struct device *dev,
+ size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ kfree(vaddr);
+}
+
+static dma_addr_t ibmebus_map_single(struct device *dev,
+ void *ptr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ return (dma_addr_t)(ptr);
+}
+
+static void ibmebus_unmap_single(struct device *dev,
+ dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ return;
+}
+
+static int ibmebus_map_sg(struct device *dev,
+ struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; i++) {
+ sg[i].dma_address = (dma_addr_t)page_address(sg[i].page)
+ + sg[i].offset;
+ sg[i].dma_length = sg[i].length;
+ }
+
+ return nents;
+}
+
+static void ibmebus_unmap_sg(struct device *dev,
+ struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ return;
+}
+
+static int ibmebus_dma_supported(struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+struct dma_mapping_ops ibmebus_dma_ops = {
+ .alloc_coherent = ibmebus_alloc_coherent,
+ .free_coherent = ibmebus_free_coherent,
+ .map_single = ibmebus_map_single,
+ .unmap_single = ibmebus_unmap_single,
+ .map_sg = ibmebus_map_sg,
+ .unmap_sg = ibmebus_unmap_sg,
+ .dma_supported = ibmebus_dma_supported,
+};
+
+static int ibmebus_bus_probe(struct device *dev)
+{
+ struct ibmebus_dev *ibmebusdev = to_ibmebus_dev(dev);
+ struct ibmebus_driver *ibmebusdrv = to_ibmebus_driver(dev->driver);
+ const struct of_device_id *id;
+ int error = -ENODEV;
+
+ if (!ibmebusdrv->probe)
+ return error;
+
+ id = of_match_device(ibmebusdrv->id_table, &ibmebusdev->ofdev);
+ if (id) {
+ error = ibmebusdrv->probe(ibmebusdev, id);
+ }
+
+ return error;
+}
+
+static int ibmebus_bus_remove(struct device *dev)
+{
+ struct ibmebus_dev *ibmebusdev = to_ibmebus_dev(dev);
+ struct ibmebus_driver *ibmebusdrv = to_ibmebus_driver(dev->driver);
+
+ if (ibmebusdrv->remove) {
+ return ibmebusdrv->remove(ibmebusdev);
+ }
+
+ return 0;
+}
+
+static void __devinit ibmebus_dev_release(struct device *dev)
+{
+ of_node_put(to_ibmebus_dev(dev)->ofdev.node);
+ kfree(to_ibmebus_dev(dev));
+}
+
+static ssize_t ibmebusdev_show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", to_ibmebus_dev(dev)->name);
+}
+static DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, ibmebusdev_show_name,
+ NULL);
+
+static struct ibmebus_dev* __devinit ibmebus_register_device_common(
+ struct ibmebus_dev *dev, char *name)
+{
+ int err = 0;
+
+ dev->name = name;
+ dev->ofdev.dev.parent = &ibmebus_bus_device.ofdev.dev;
+ dev->ofdev.dev.bus = &ibmebus_bus_type;
+ dev->ofdev.dev.release = ibmebus_dev_release;
+
+ /* An ibmebusdev is based on a of_device. We have to change the
+ * bus type to use our own DMA mapping operations.
+ */
+ if ((err = of_device_register(&dev->ofdev)) != 0) {
+ printk(KERN_ERR "%s: failed to register device (%d).\n",
+ __FUNCTION__, err);
+ return NULL;
+ }
+
+ device_create_file(&dev->ofdev.dev, &dev_attr_name);
+
+ return dev;
+}
+
+static struct ibmebus_dev* __devinit ibmebus_register_device_node(
+ struct device_node *dn)
+{
+ struct ibmebus_dev *dev;
+ char *loc_code;
+ int length;
+
+ loc_code = (char *)get_property(dn, "ibm,loc-code", NULL);
+ if (!loc_code) {
+ printk(KERN_WARNING "%s: node %s missing 'ibm,loc-code'\n",
+ __FUNCTION__, dn->name ? dn->name : "<unknown>");
+ return NULL;
+ }
+
+ if (strlen(loc_code) == 0) {
+ printk(KERN_WARNING "%s: 'ibm,loc-code' is invalid\n",
+ __FUNCTION__);
+ return NULL;
+ }
+
+ dev = kmalloc(sizeof(struct ibmebus_dev), GFP_KERNEL);
+ if (!dev) {
+ return NULL;
+ }
+ memset(dev, 0, sizeof(struct ibmebus_dev));
+
+ dev->ofdev.node = of_node_get(dn);
+
+ length = strlen(loc_code);
+ memcpy(dev->ofdev.dev.bus_id, loc_code
+ + (length - min(length, BUS_ID_SIZE - 1)),
+ min(length, BUS_ID_SIZE - 1));
+
+ /* Register with generic device framework. */
+ if (ibmebus_register_device_common(dev, dn->name) == NULL) {
+ kfree(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+
+static void ibmebus_probe_of_nodes(char* name)
+{
+ struct device_node *dn = NULL;
+
+ while ((dn = of_find_node_by_name(dn, name))) {
+ if (ibmebus_register_device_node(dn) == NULL) {
+ of_node_put(dn);
+
+ return;
+ }
+ }
+
+ of_node_put(dn);
+
+ return;
+}
+
+static void ibmebus_add_devices_by_id(struct of_device_id *idt)
+{
+ while (strlen(idt->name) > 0) {
+ ibmebus_probe_of_nodes(idt->name);
+ idt++;
+ }
+
+ return;
+}
+
+static int ibmebus_match_helper(struct device *dev, void *data)
+{
+ if (strcmp((char*)data, to_ibmebus_dev(dev)->name) == 0)
+ return 1;
+
+ return 0;
+}
+
+static int ibmebus_unregister_device(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_name);
+ of_device_unregister(to_of_device(dev));
+
+ return 0;
+}
+
+static void ibmebus_remove_devices_by_id(struct of_device_id *idt)
+{
+ struct device *dev;
+
+ while (strlen(idt->name) > 0) {
+ while ((dev = bus_find_device(&ibmebus_bus_type, NULL,
+ (void*)idt->name,
+ ibmebus_match_helper))) {
+ ibmebus_unregister_device(dev);
+ }
+ idt++;
+
+ }
+
+ return;
+}
+
+int ibmebus_register_driver(struct ibmebus_driver *drv)
+{
+ int err = 0;
+
+ drv->driver.name = drv->name;
+ drv->driver.bus = &ibmebus_bus_type;
+ drv->driver.probe = ibmebus_bus_probe;
+ drv->driver.remove = ibmebus_bus_remove;
+
+ if ((err = driver_register(&drv->driver) != 0))
+ return err;
+
+ ibmebus_add_devices_by_id(drv->id_table);
+
+ return 0;
+}
+EXPORT_SYMBOL(ibmebus_register_driver);
+
+void ibmebus_unregister_driver(struct ibmebus_driver *drv)
+{
+ driver_unregister(&drv->driver);
+ ibmebus_remove_devices_by_id(drv->id_table);
+}
+EXPORT_SYMBOL(ibmebus_unregister_driver);
+
+int ibmebus_request_irq(struct ibmebus_dev *dev,
+ u32 ist,
+ irqreturn_t (*handler)(int, void*, struct pt_regs *),
+ unsigned long irq_flags, const char * devname,
+ void *dev_id)
+{
+ unsigned int irq = virt_irq_create_mapping(ist);
+
+ if (irq == NO_IRQ)
+ return -EINVAL;
+
+ irq = irq_offset_up(irq);
+
+ return request_irq(irq, handler,
+ irq_flags, devname, dev_id);
+}
+EXPORT_SYMBOL(ibmebus_request_irq);
+
+void ibmebus_free_irq(struct ibmebus_dev *dev, u32 ist, void *dev_id)
+{
+ unsigned int irq = virt_irq_create_mapping(ist);
+
+ irq = irq_offset_up(irq);
+ free_irq(irq, dev_id);
+
+ return;
+}
+EXPORT_SYMBOL(ibmebus_free_irq);
+
+static int ibmebus_bus_match(struct device *dev, struct device_driver *drv)
+{
+ const struct ibmebus_dev *ebus_dev = to_ibmebus_dev(dev);
+ struct ibmebus_driver *ebus_drv = to_ibmebus_driver(drv);
+ const struct of_device_id *ids = ebus_drv->id_table;
+ const struct of_device_id *found_id;
+
+ if (!ids)
+ return 0;
+
+ found_id = of_match_device(ids, &ebus_dev->ofdev);
+ if (found_id)
+ return 1;
+
+ return 0;
+}
+
+struct bus_type ibmebus_bus_type = {
+ .name = "ibmebus",
+ .match = ibmebus_bus_match,
+};
+EXPORT_SYMBOL(ibmebus_bus_type);
+
+static int __init ibmebus_bus_init(void)
+{
+ int err;
+
+ printk(KERN_INFO "IBM eBus Device Driver\n");
+
+ err = bus_register(&ibmebus_bus_type);
+ if (err) {
+ printk(KERN_ERR ":%s: failed to register IBM eBus.\n",
+ __FUNCTION__);
+ return err;
+ }
+
+ err = device_register(&ibmebus_bus_device.ofdev.dev);
+ if (err) {
+ printk(KERN_WARNING "%s: device_register returned %i\n",
+ __FUNCTION__, err);
+ bus_unregister(&ibmebus_bus_type);
+
+ return err;
+ }
+
+ return 0;
+}
+__initcall(ibmebus_bus_init);
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 1494e2f177f..c16b4afab58 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -38,14 +38,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
/* We must dynamically check for the NAP feature as it
* can be cleared by CPU init after the fixups are done
*/
- LOADBASE(r3,cur_cpu_spec)
- ld r4,OFF(cur_cpu_spec)(r3)
+ LOAD_REG_ADDRBASE(r3,cur_cpu_spec)
+ ld r4,ADDROFF(cur_cpu_spec)(r3)
ld r4,CPU_SPEC_FEATURES(r4)
andi. r0,r4,CPU_FTR_CAN_NAP
beqlr
/* Now check if user or arch enabled NAP mode */
- LOADBASE(r3,powersave_nap)
- lwz r4,OFF(powersave_nap)(r3)
+ LOAD_REG_ADDRBASE(r3,powersave_nap)
+ lwz r4,ADDROFF(powersave_nap)(r3)
cmpwi 0,r4,0
beqlr
diff --git a/arch/powerpc/kernel/ioctl32.c b/arch/powerpc/kernel/ioctl32.c
deleted file mode 100644
index 0fa3d27fef0..00000000000
--- a/arch/powerpc/kernel/ioctl32.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
- *
- * Based on sparc64 ioctl32.c by:
- *
- * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- *
- * ppc64 changes:
- *
- * Copyright (C) 2000 Ken Aaker (kdaaker@rchland.vnet.ibm.com)
- * Copyright (C) 2001 Anton Blanchard (antonb@au.ibm.com)
- *
- * These routines maintain argument size conversion between 32bit and 64bit
- * ioctls.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#define INCLUDES
-#include "compat_ioctl.c"
-#include <linux/syscalls.h>
-
-#define CODE
-#include "compat_ioctl.c"
-
-#define HANDLE_IOCTL(cmd,handler) { cmd, (ioctl_trans_handler_t)handler, NULL },
-#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd,sys_ioctl)
-
-#define IOCTL_TABLE_START \
- struct ioctl_trans ioctl_start[] = {
-#define IOCTL_TABLE_END \
- };
-
-IOCTL_TABLE_START
-#include <linux/compat_ioctl.h>
-#define DECLARES
-#include "compat_ioctl.c"
-
-IOCTL_TABLE_END
-
-int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5a71ed9612f..d1fffce86df 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -31,7 +31,6 @@
* to reduce code space and undefined function references.
*/
-#include <linux/errno.h>
#include <linux/module.h>
#include <linux/threads.h>
#include <linux/kernel_stat.h>
@@ -44,18 +43,12 @@
#include <linux/config.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/irq.h>
-#include <linux/proc_fs.h>
-#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
-#ifdef CONFIG_PPC64
-#include <linux/kallsyms.h>
-#endif
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -66,8 +59,7 @@
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
-#ifdef CONFIG_PPC64
-#include <asm/iseries/it_lp_queue.h>
+#ifdef CONFIG_PPC_ISERIES
#include <asm/paca.h>
#endif
@@ -78,10 +70,6 @@ EXPORT_SYMBOL(__irq_offset_value);
static int ppc_spurious_interrupts;
-#if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
-extern void iSeries_smp_message_recv(struct pt_regs *);
-#endif
-
#ifdef CONFIG_PPC32
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
@@ -195,49 +183,6 @@ void fixup_irqs(cpumask_t map)
}
#endif
-#ifdef CONFIG_PPC_ISERIES
-void do_IRQ(struct pt_regs *regs)
-{
- struct paca_struct *lpaca;
-
- irq_enter();
-
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
- /* Debugging check for stack overflow: is there less than 2KB free? */
- {
- long sp;
-
- sp = __get_SP() & (THREAD_SIZE-1);
-
- if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
- printk("do_IRQ: stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
- dump_stack();
- }
- }
-#endif
-
- lpaca = get_paca();
-#ifdef CONFIG_SMP
- if (lpaca->lppaca.int_dword.fields.ipi_cnt) {
- lpaca->lppaca.int_dword.fields.ipi_cnt = 0;
- iSeries_smp_message_recv(regs);
- }
-#endif /* CONFIG_SMP */
- if (hvlpevent_is_pending())
- process_hvlpevents(regs);
-
- irq_exit();
-
- if (lpaca->lppaca.int_dword.fields.decr_int) {
- lpaca->lppaca.int_dword.fields.decr_int = 0;
- /* Signal a fake decrementer interrupt */
- timer_interrupt(regs);
- }
-}
-
-#else /* CONFIG_PPC_ISERIES */
-
void do_IRQ(struct pt_regs *regs)
{
int irq;
@@ -286,16 +231,20 @@ void do_IRQ(struct pt_regs *regs)
} else
#endif
__do_IRQ(irq, regs);
- } else
-#ifdef CONFIG_PPC32
- if (irq != -2)
-#endif
- /* That's not SMP safe ... but who cares ? */
- ppc_spurious_interrupts++;
+ } else if (irq != -2)
+ /* That's not SMP safe ... but who cares ? */
+ ppc_spurious_interrupts++;
+
irq_exit();
-}
-#endif /* CONFIG_PPC_ISERIES */
+#ifdef CONFIG_PPC_ISERIES
+ if (get_lppaca()->int_dword.fields.decr_int) {
+ get_lppaca()->int_dword.fields.decr_int = 0;
+ /* Signal a fake decrementer interrupt */
+ timer_interrupt(regs);
+ }
+#endif
+}
void __init init_IRQ(void)
{
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 5368f9c2e6b..cfab48566db 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -35,7 +35,6 @@
#include <asm/kdebug.h>
#include <asm/sstep.h>
-static DECLARE_MUTEX(kprobe_mutex);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -54,19 +53,17 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
/* insn must be on a special executable page on ppc64 */
if (!ret) {
- down(&kprobe_mutex);
p->ainsn.insn = get_insn_slot();
- up(&kprobe_mutex);
if (!p->ainsn.insn)
ret = -ENOMEM;
}
- return ret;
-}
-void __kprobes arch_copy_kprobe(struct kprobe *p)
-{
- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
- p->opcode = *p->addr;
+ if (!ret) {
+ memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ p->opcode = *p->addr;
+ }
+
+ return ret;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -182,6 +179,18 @@ static inline int kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_REENTER;
return 1;
} else {
+ if (*addr != BREAKPOINT_INSTRUCTION) {
+ /* If trap variant, then it belongs not to us */
+ kprobe_opcode_t cur_insn = *addr;
+ if (is_trap(cur_insn))
+ goto no_kprobe;
+ /* The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
+ goto no_kprobe;
+ }
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
new file mode 100644
index 00000000000..f970ace208d
--- /dev/null
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -0,0 +1,557 @@
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_core.h>
+#include <linux/console.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <asm/mmu.h>
+#include <asm/prom.h>
+#include <asm/serial.h>
+#include <asm/udbg.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(fmt...) do { printk(fmt); } while(0)
+#else
+#define DBG(fmt...) do { } while(0)
+#endif
+
+#define MAX_LEGACY_SERIAL_PORTS 8
+
+static struct plat_serial8250_port
+legacy_serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
+static struct legacy_serial_info {
+ struct device_node *np;
+ unsigned int speed;
+ unsigned int clock;
+ phys_addr_t taddr;
+} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
+static unsigned int legacy_serial_count;
+static int legacy_serial_console = -1;
+
+static int __init add_legacy_port(struct device_node *np, int want_index,
+ int iotype, phys_addr_t base,
+ phys_addr_t taddr, unsigned long irq,
+ unsigned int flags)
+{
+ u32 *clk, *spd, clock = BASE_BAUD * 16;
+ int index;
+
+ /* get clock freq. if present */
+ clk = (u32 *)get_property(np, "clock-frequency", NULL);
+ if (clk && *clk)
+ clock = *clk;
+
+ /* get default speed if present */
+ spd = (u32 *)get_property(np, "current-speed", NULL);
+
+ /* If we have a location index, then try to use it */
+ if (want_index >= 0 && want_index < MAX_LEGACY_SERIAL_PORTS)
+ index = want_index;
+ else
+ index = legacy_serial_count;
+
+ /* if our index is still out of range, that mean that
+ * array is full, we could scan for a free slot but that
+ * make little sense to bother, just skip the port
+ */
+ if (index >= MAX_LEGACY_SERIAL_PORTS)
+ return -1;
+ if (index >= legacy_serial_count)
+ legacy_serial_count = index + 1;
+
+ /* Check if there is a port who already claimed our slot */
+ if (legacy_serial_infos[index].np != 0) {
+ /* if we still have some room, move it, else override */
+ if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) {
+ printk(KERN_INFO "Moved legacy port %d -> %d\n",
+ index, legacy_serial_count);
+ legacy_serial_ports[legacy_serial_count] =
+ legacy_serial_ports[index];
+ legacy_serial_infos[legacy_serial_count] =
+ legacy_serial_infos[index];
+ legacy_serial_count++;
+ } else {
+ printk(KERN_INFO "Replacing legacy port %d\n", index);
+ }
+ }
+
+ /* Now fill the entry */
+ memset(&legacy_serial_ports[index], 0,
+ sizeof(struct plat_serial8250_port));
+ if (iotype == UPIO_PORT)
+ legacy_serial_ports[index].iobase = base;
+ else
+ legacy_serial_ports[index].mapbase = base;
+ legacy_serial_ports[index].iotype = iotype;
+ legacy_serial_ports[index].uartclk = clock;
+ legacy_serial_ports[index].irq = irq;
+ legacy_serial_ports[index].flags = flags;
+ legacy_serial_infos[index].taddr = taddr;
+ legacy_serial_infos[index].np = of_node_get(np);
+ legacy_serial_infos[index].clock = clock;
+ legacy_serial_infos[index].speed = spd ? *spd : 0;
+
+ printk(KERN_INFO "Found legacy serial port %d for %s\n",
+ index, np->full_name);
+ printk(KERN_INFO " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n",
+ (iotype == UPIO_PORT) ? "port" : "mem",
+ (unsigned long long)base, (unsigned long long)taddr, irq,
+ legacy_serial_ports[index].uartclk,
+ legacy_serial_infos[index].speed);
+
+ return index;
+}
+
+static int __init add_legacy_soc_port(struct device_node *np,
+ struct device_node *soc_dev)
+{
+ phys_addr_t addr;
+ u32 *addrp;
+ unsigned int flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ;
+
+ /* We only support ports that have a clock frequency properly
+ * encoded in the device-tree.
+ */
+ if (get_property(np, "clock-frequency", NULL) == NULL)
+ return -1;
+
+ /* Get the address */
+ addrp = of_get_address(soc_dev, 0, NULL, NULL);
+ if (addrp == NULL)
+ return -1;
+
+ addr = of_translate_address(soc_dev, addrp);
+
+ /* Add port, irq will be dealt with later. We passed a translated
+ * IO port value. It will be fixed up later along with the irq
+ */
+ return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags);
+}
+
+#ifdef CONFIG_ISA
+static int __init add_legacy_isa_port(struct device_node *np,
+ struct device_node *isa_brg)
+{
+ u32 *reg;
+ char *typep;
+ int index = -1;
+ phys_addr_t taddr;
+
+ /* Get the ISA port number */
+ reg = (u32 *)get_property(np, "reg", NULL);
+ if (reg == NULL)
+ return -1;
+
+ /* Verify it's an IO port, we don't support anything else */
+ if (!(reg[0] & 0x00000001))
+ return -1;
+
+ /* Now look for an "ibm,aix-loc" property that gives us ordering
+ * if any...
+ */
+ typep = (char *)get_property(np, "ibm,aix-loc", NULL);
+
+ /* If we have a location index, then use it */
+ if (typep && *typep == 'S')
+ index = simple_strtol(typep+1, NULL, 0) - 1;
+
+ /* Translate ISA address */
+ taddr = of_translate_address(np, reg);
+
+ /* Add port, irq will be dealt with later */
+ return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr, NO_IRQ, UPF_BOOT_AUTOCONF);
+
+}
+#endif
+
+#ifdef CONFIG_PCI
+static int __init add_legacy_pci_port(struct device_node *np,
+ struct device_node *pci_dev)
+{
+ phys_addr_t addr, base;
+ u32 *addrp;
+ unsigned int flags;
+ int iotype, index = -1, lindex = 0;
+
+ /* We only support ports that have a clock frequency properly
+ * encoded in the device-tree (that is have an fcode). Anything
+ * else can't be used that early and will be normally probed by
+ * the generic 8250_pci driver later on. The reason is that 8250
+ * compatible UARTs on PCI need all sort of quirks (port offsets
+ * etc...) that this code doesn't know about
+ */
+ if (get_property(np, "clock-frequency", NULL) == NULL)
+ return -1;
+
+ /* Get the PCI address. Assume BAR 0 */
+ addrp = of_get_pci_address(pci_dev, 0, NULL, &flags);
+ if (addrp == NULL)
+ return -1;
+
+ /* We only support BAR 0 for now */
+ iotype = (flags & IORESOURCE_MEM) ? UPIO_MEM : UPIO_PORT;
+ addr = of_translate_address(pci_dev, addrp);
+
+ /* Set the IO base to the same as the translated address for MMIO,
+ * or to the domain local IO base for PIO (it will be fixed up later)
+ */
+ if (iotype == UPIO_MEM)
+ base = addr;
+ else
+ base = addrp[2];
+
+ /* Try to guess an index... If we have subdevices of the pci dev,
+ * we get to their "reg" property
+ */
+ if (np != pci_dev) {
+ u32 *reg = (u32 *)get_property(np, "reg", NULL);
+ if (reg && (*reg < 4))
+ index = lindex = *reg;
+ }
+
+ /* Local index means it's the Nth port in the PCI chip. Unfortunately
+ * the offset to add here is device specific. We know about those
+ * EXAR ports and we default to the most common case. If your UART
+ * doesn't work for these settings, you'll have to add your own special
+ * cases here
+ */
+ if (device_is_compatible(pci_dev, "pci13a8,152") ||
+ device_is_compatible(pci_dev, "pci13a8,154") ||
+ device_is_compatible(pci_dev, "pci13a8,158")) {
+ addr += 0x200 * lindex;
+ base += 0x200 * lindex;
+ } else {
+ addr += 8 * lindex;
+ base += 8 * lindex;
+ }
+
+ /* Add port, irq will be dealt with later. We passed a translated
+ * IO port value. It will be fixed up later along with the irq
+ */
+ return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, UPF_BOOT_AUTOCONF);
+}
+#endif
+
+/*
+ * This is called very early, as part of setup_system() or eventually
+ * setup_arch(), basically before anything else in this file. This function
+ * will try to build a list of all the available 8250-compatible serial ports
+ * in the machine using the Open Firmware device-tree. It currently only deals
+ * with ISA and PCI busses but could be extended. It allows a very early boot
+ * console to be initialized, that list is also used later to provide 8250 with
+ * the machine non-PCI ports and to properly pick the default console port
+ */
+void __init find_legacy_serial_ports(void)
+{
+ struct device_node *np, *stdout = NULL;
+ char *path;
+ int index;
+
+ DBG(" -> find_legacy_serial_port()\n");
+
+ /* Now find out if one of these is out firmware console */
+ path = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
+ if (path != NULL) {
+ stdout = of_find_node_by_path(path);
+ if (stdout)
+ DBG("stdout is %s\n", stdout->full_name);
+ } else {
+ DBG(" no linux,stdout-path !\n");
+ }
+
+ /* First fill our array with SOC ports */
+ for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) {
+ struct device_node *soc = of_get_parent(np);
+ if (soc && !strcmp(soc->type, "soc")) {
+ index = add_legacy_soc_port(np, np);
+ if (index >= 0 && np == stdout)
+ legacy_serial_console = index;
+ }
+ of_node_put(soc);
+ }
+
+#ifdef CONFIG_ISA
+ /* First fill our array with ISA ports */
+ for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
+ struct device_node *isa = of_get_parent(np);
+ if (isa && !strcmp(isa->name, "isa")) {
+ index = add_legacy_isa_port(np, isa);
+ if (index >= 0 && np == stdout)
+ legacy_serial_console = index;
+ }
+ of_node_put(isa);
+ }
+#endif
+
+#ifdef CONFIG_PCI
+ /* Next, try to locate PCI ports */
+ for (np = NULL; (np = of_find_all_nodes(np));) {
+ struct device_node *pci, *parent = of_get_parent(np);
+ if (parent && !strcmp(parent->name, "isa")) {
+ of_node_put(parent);
+ continue;
+ }
+ if (strcmp(np->name, "serial") && strcmp(np->type, "serial")) {
+ of_node_put(parent);
+ continue;
+ }
+ /* Check for known pciclass, and also check wether we have
+ * a device with child nodes for ports or not
+ */
+ if (device_is_compatible(np, "pciclass,0700") ||
+ device_is_compatible(np, "pciclass,070002"))
+ pci = np;
+ else if (device_is_compatible(parent, "pciclass,0700") ||
+ device_is_compatible(parent, "pciclass,070002"))
+ pci = parent;
+ else {
+ of_node_put(parent);
+ continue;
+ }
+ index = add_legacy_pci_port(np, pci);
+ if (index >= 0 && np == stdout)
+ legacy_serial_console = index;
+ of_node_put(parent);
+ }
+#endif
+
+ DBG("legacy_serial_console = %d\n", legacy_serial_console);
+
+ /* udbg is 64 bits only for now, that will change soon though ... */
+ while (legacy_serial_console >= 0) {
+ struct legacy_serial_info *info =
+ &legacy_serial_infos[legacy_serial_console];
+ void __iomem *addr;
+
+ if (info->taddr == 0)
+ break;
+ addr = ioremap(info->taddr, 0x1000);
+ if (addr == NULL)
+ break;
+ if (info->speed == 0)
+ info->speed = udbg_probe_uart_speed(addr, info->clock);
+ DBG("default console speed = %d\n", info->speed);
+ udbg_init_uart(addr, info->speed, info->clock);
+ break;
+ }
+
+ DBG(" <- find_legacy_serial_port()\n");
+}
+
+static struct platform_device serial_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = legacy_serial_ports,
+ },
+};
+
+static void __init fixup_port_irq(int index,
+ struct device_node *np,
+ struct plat_serial8250_port *port)
+{
+ DBG("fixup_port_irq(%d)\n", index);
+
+ /* Check for interrupts in that node */
+ if (np->n_intrs > 0) {
+ port->irq = np->intrs[0].line;
+ DBG(" port %d (%s), irq=%d\n",
+ index, np->full_name, port->irq);
+ return;
+ }
+
+ /* Check for interrupts in the parent */
+ np = of_get_parent(np);
+ if (np == NULL)
+ return;
+
+ if (np->n_intrs > 0) {
+ port->irq = np->intrs[0].line;
+ DBG(" port %d (%s), irq=%d\n",
+ index, np->full_name, port->irq);
+ }
+ of_node_put(np);
+}
+
+static void __init fixup_port_pio(int index,
+ struct device_node *np,
+ struct plat_serial8250_port *port)
+{
+#ifdef CONFIG_PCI
+ struct pci_controller *hose;
+
+ DBG("fixup_port_pio(%d)\n", index);
+
+ hose = pci_find_hose_for_OF_device(np);
+ if (hose) {
+ unsigned long offset = (unsigned long)hose->io_base_virt -
+#ifdef CONFIG_PPC64
+ pci_io_base;
+#else
+ isa_io_base;
+#endif
+ DBG("port %d, IO %lx -> %lx\n",
+ index, port->iobase, port->iobase + offset);
+ port->iobase += offset;
+ }
+#endif
+}
+
+static void __init fixup_port_mmio(int index,
+ struct device_node *np,
+ struct plat_serial8250_port *port)
+{
+ DBG("fixup_port_mmio(%d)\n", index);
+
+ port->membase = ioremap(port->mapbase, 0x100);
+}
+
+/*
+ * This is called as an arch initcall, hopefully before the PCI bus is
+ * probed and/or the 8250 driver loaded since we need to register our
+ * platform devices before 8250 PCI ones are detected as some of them
+ * must properly "override" the platform ones.
+ *
+ * This function fixes up the interrupt value for platform ports as it
+ * couldn't be done earlier before interrupt maps have been parsed. It
+ * also "corrects" the IO address for PIO ports for the same reason,
+ * since earlier, the PHBs virtual IO space wasn't assigned yet. It then
+ * registers all those platform ports for use by the 8250 driver when it
+ * finally loads.
+ */
+static int __init serial_dev_init(void)
+{
+ int i;
+
+ if (legacy_serial_count == 0)
+ return -ENODEV;
+
+ /*
+ * Before we register the platfrom serial devices, we need
+ * to fixup their interrutps and their IO ports.
+ */
+ DBG("Fixing serial ports interrupts and IO ports ...\n");
+
+ for (i = 0; i < legacy_serial_count; i++) {
+ struct plat_serial8250_port *port = &legacy_serial_ports[i];
+ struct device_node *np = legacy_serial_infos[i].np;
+
+ if (port->irq == NO_IRQ)
+ fixup_port_irq(i, np, port);
+ if (port->iotype == UPIO_PORT)
+ fixup_port_pio(i, np, port);
+ if (port->iotype == UPIO_MEM)
+ fixup_port_mmio(i, np, port);
+ }
+
+ DBG("Registering platform serial ports\n");
+
+ return platform_device_register(&serial_device);
+}
+arch_initcall(serial_dev_init);
+
+
+/*
+ * This is called very early, as part of console_init() (typically just after
+ * time_init()). This function is respondible for trying to find a good
+ * default console on serial ports. It tries to match the open firmware
+ * default output with one of the available serial console drivers, either
+ * one of the platform serial ports that have been probed earlier by
+ * find_legacy_serial_ports() or some more platform specific ones.
+ */
+static int __init check_legacy_serial_console(void)
+{
+ struct device_node *prom_stdout = NULL;
+ int speed = 0, offset = 0;
+ char *name;
+ u32 *spd;
+
+ DBG(" -> check_legacy_serial_console()\n");
+
+ /* The user has requested a console so this is already set up. */
+ if (strstr(saved_command_line, "console=")) {
+ DBG(" console was specified !\n");
+ return -EBUSY;
+ }
+
+ if (!of_chosen) {
+ DBG(" of_chosen is NULL !\n");
+ return -ENODEV;
+ }
+
+ if (legacy_serial_console < 0) {
+ DBG(" legacy_serial_console not found !\n");
+ return -ENODEV;
+ }
+ /* We are getting a weird phandle from OF ... */
+ /* ... So use the full path instead */
+ name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
+ if (name == NULL) {
+ DBG(" no linux,stdout-path !\n");
+ return -ENODEV;
+ }
+ prom_stdout = of_find_node_by_path(name);
+ if (!prom_stdout) {
+ DBG(" can't find stdout package %s !\n", name);
+ return -ENODEV;
+ }
+ DBG("stdout is %s\n", prom_stdout->full_name);
+
+ name = (char *)get_property(prom_stdout, "name", NULL);
+ if (!name) {
+ DBG(" stdout package has no name !\n");
+ goto not_found;
+ }
+ spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
+ if (spd)
+ speed = *spd;
+
+ if (0)
+ ;
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ else if (strcmp(name, "serial") == 0) {
+ int i;
+ /* Look for it in probed array */
+ for (i = 0; i < legacy_serial_count; i++) {
+ if (prom_stdout != legacy_serial_infos[i].np)
+ continue;
+ offset = i;
+ speed = legacy_serial_infos[i].speed;
+ break;
+ }
+ if (i >= legacy_serial_count)
+ goto not_found;
+ }
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
+#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
+ else if (strcmp(name, "ch-a") == 0)
+ offset = 0;
+ else if (strcmp(name, "ch-b") == 0)
+ offset = 1;
+#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
+ else
+ goto not_found;
+ of_node_put(prom_stdout);
+
+ DBG("Found serial console at ttyS%d\n", offset);
+
+ if (speed) {
+ static char __initdata opt[16];
+ sprintf(opt, "%d", speed);
+ return add_preferred_console("ttyS", offset, opt);
+ } else
+ return add_preferred_console("ttyS", offset, NULL);
+
+ not_found:
+ DBG("No preferred console found !\n");
+ of_node_put(prom_stdout);
+ return -ENODEV;
+}
+console_initcall(check_legacy_serial_console);
+
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 9dda16ccde7..1ae96a8ed7e 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -55,15 +55,13 @@ static unsigned long get_purr(void)
{
unsigned long sum_purr = 0;
int cpu;
- struct paca_struct *lpaca;
for_each_cpu(cpu) {
- lpaca = paca + cpu;
- sum_purr += lpaca->lppaca.emulated_time_base;
+ sum_purr += lppaca[cpu].emulated_time_base;
#ifdef PURR_DEBUG
printk(KERN_INFO "get_purr for cpu (%d) has value (%ld) \n",
- cpu, lpaca->lppaca.emulated_time_base);
+ cpu, lppaca[cpu].emulated_time_base);
#endif
}
return sum_purr;
@@ -79,12 +77,11 @@ static int lparcfg_data(struct seq_file *m, void *v)
unsigned long pool_id, lp_index;
int shared, entitled_capacity, max_entitled_capacity;
int processors, max_processors;
- struct paca_struct *lpaca = get_paca();
unsigned long purr = get_purr();
seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
- shared = (int)(lpaca->lppaca_ptr->shared_proc);
+ shared = (int)(get_lppaca()->shared_proc);
seq_printf(m, "serial_number=%c%c%c%c%c%c%c\n",
e2a(xItExtVpdPanel.mfgID[2]),
e2a(xItExtVpdPanel.mfgID[3]),
@@ -402,7 +399,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
(h_resource >> 0 * 8) & 0xffff);
/* pool related entries are apropriate for shared configs */
- if (paca[0].lppaca.shared_proc) {
+ if (lppaca[0].shared_proc) {
h_pic(&pool_idle_time, &pool_procs);
@@ -451,7 +448,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "partition_potential_processors=%d\n",
partition_potential_processors);
- seq_printf(m, "shared_processor_mode=%d\n", paca[0].lppaca.shared_proc);
+ seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc);
return 0;
}
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
index 5a05a797485..584d1e3c013 100644
--- a/arch/powerpc/kernel/lparmap.c
+++ b/arch/powerpc/kernel/lparmap.c
@@ -7,7 +7,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <asm/mmu.h>
-#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/iseries/lpar_map.h>
const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
@@ -16,16 +16,16 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
.xSegmentTableOffs = STAB0_PAGE,
.xEsids = {
- { .xKernelEsid = GET_ESID(KERNELBASE),
- .xKernelVsid = KERNEL_VSID(KERNELBASE), },
- { .xKernelEsid = GET_ESID(VMALLOCBASE),
- .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
+ { .xKernelEsid = GET_ESID(PAGE_OFFSET),
+ .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
+ { .xKernelEsid = GET_ESID(VMALLOC_START),
+ .xKernelVsid = KERNEL_VSID(VMALLOC_START), },
},
.xRanges = {
{ .xPages = HvPagesToMap,
.xOffset = 0,
- .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT),
+ .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT),
},
},
};
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
new file mode 100644
index 00000000000..a81ca1b841e
--- /dev/null
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -0,0 +1,61 @@
+/*
+ * Code to handle transition of Linux booting another kernel.
+ *
+ * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
+ * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
+ * Copyright (C) 2005 IBM Corporation.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/kexec.h>
+#include <linux/reboot.h>
+#include <linux/threads.h>
+#include <asm/machdep.h>
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ if (ppc_md.machine_crash_shutdown)
+ ppc_md.machine_crash_shutdown(regs);
+}
+
+/*
+ * Do what every setup is needed on image and the
+ * reboot code buffer to allow us to avoid allocations
+ * later.
+ */
+int machine_kexec_prepare(struct kimage *image)
+{
+ if (ppc_md.machine_kexec_prepare)
+ return ppc_md.machine_kexec_prepare(image);
+ /*
+ * Fail if platform doesn't provide its own machine_kexec_prepare
+ * implementation.
+ */
+ return -ENOSYS;
+}
+
+void machine_kexec_cleanup(struct kimage *image)
+{
+ if (ppc_md.machine_kexec_cleanup)
+ ppc_md.machine_kexec_cleanup(image);
+}
+
+/*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+ */
+NORET_TYPE void machine_kexec(struct kimage *image)
+{
+ if (ppc_md.machine_kexec)
+ ppc_md.machine_kexec(image);
+ else {
+ /*
+ * Fall back to normal restart if platform doesn't provide
+ * its own kexec function, and user insist to kexec...
+ */
+ machine_restart(NULL);
+ }
+ for(;;);
+}
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c
new file mode 100644
index 00000000000..443606134df
--- /dev/null
+++ b/arch/powerpc/kernel/machine_kexec_32.c
@@ -0,0 +1,65 @@
+/*
+ * PPC32 code to handle Linux booting another kernel.
+ *
+ * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
+ * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
+ * Copyright (C) 2005 IBM Corporation.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/kexec.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <asm/cacheflush.h>
+#include <asm/hw_irq.h>
+#include <asm/io.h>
+
+typedef NORET_TYPE void (*relocate_new_kernel_t)(
+ unsigned long indirection_page,
+ unsigned long reboot_code_buffer,
+ unsigned long start_address) ATTRIB_NORET;
+
+/*
+ * This is a generic machine_kexec function suitable at least for
+ * non-OpenFirmware embedded platforms.
+ * It merely copies the image relocation code to the control page and
+ * jumps to it.
+ * A platform specific function may just call this one.
+ */
+void default_machine_kexec(struct kimage *image)
+{
+ const extern unsigned char relocate_new_kernel[];
+ const extern unsigned int relocate_new_kernel_size;
+ unsigned long page_list;
+ unsigned long reboot_code_buffer, reboot_code_buffer_phys;
+ relocate_new_kernel_t rnk;
+
+ /* Interrupts aren't acceptable while we reboot */
+ local_irq_disable();
+
+ page_list = image->head;
+
+ /* we need both effective and real address here */
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
+ reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer);
+
+ /* copy our kernel relocation code to the control code page */
+ memcpy((void *)reboot_code_buffer, relocate_new_kernel,
+ relocate_new_kernel_size);
+
+ flush_icache_range(reboot_code_buffer,
+ reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
+ printk(KERN_INFO "Bye!\n");
+
+ /* now call it */
+ rnk = (relocate_new_kernel_t) reboot_code_buffer;
+ (*rnk)(page_list, reboot_code_buffer_phys, image->start);
+}
+
+int default_machine_kexec_prepare(struct kimage *image)
+{
+ return 0;
+}
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 97c51e452be..d6431440c54 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -1,5 +1,5 @@
/*
- * machine_kexec.c - handle transition of Linux booting another kernel
+ * PPC64 code to handle Linux booting another kernel.
*
* Copyright (C) 2004-2005, IBM Corp.
*
@@ -28,21 +28,7 @@
#define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */
-/* Have this around till we move it into crash specific file */
-note_buf_t crash_notes[NR_CPUS];
-
-/* Dummy for now. Not sure if we need to have a crash shutdown in here
- * and if what it will achieve. Letting it be now to compile the code
- * in generic kexec environment
- */
-void machine_crash_shutdown(struct pt_regs *regs)
-{
- /* do nothing right now */
- /* smp_relase_cpus() if we want smp on panic kernel */
- /* cpu_irq_down to isolate us until we are ready */
-}
-
-int machine_kexec_prepare(struct kimage *image)
+int default_machine_kexec_prepare(struct kimage *image)
{
int i;
unsigned long begin, end; /* limits of segment */
@@ -111,11 +97,6 @@ int machine_kexec_prepare(struct kimage *image)
return 0;
}
-void machine_kexec_cleanup(struct kimage *image)
-{
- /* we do nothing in prepare that needs to be undone */
-}
-
#define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
static void copy_segments(unsigned long ind)
@@ -172,9 +153,8 @@ void kexec_copy_flush(struct kimage *image)
* including ones that were in place on the original copy
*/
for (i = 0; i < nr_segments; i++)
- flush_icache_range(ranges[i].mem + KERNELBASE,
- ranges[i].mem + KERNELBASE +
- ranges[i].memsz);
+ flush_icache_range((unsigned long)__va(ranges[i].mem),
+ (unsigned long)__va(ranges[i].mem + ranges[i].memsz));
}
#ifdef CONFIG_SMP
@@ -283,13 +263,20 @@ extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
void (*clear_all)(void)) ATTRIB_NORET;
/* too late to fail here */
-void machine_kexec(struct kimage *image)
+void default_machine_kexec(struct kimage *image)
{
-
/* prepare control code if any */
- /* shutdown other cpus into our wait loop and quiesce interrupts */
- kexec_prepare_cpus();
+ /*
+ * If the kexec boot is the normal one, need to shutdown other cpus
+ * into our wait loop and quiesce interrupts.
+ * Otherwise, in the case of crashed mode (crashing_cpu >= 0),
+ * stopping other CPUs and collecting their pt_regs is done before
+ * using debugger IPI.
+ */
+
+ if (crashing_cpu == -1)
+ kexec_prepare_cpus();
/* switch to a staticly allocated stack. Based on irq stack code.
* XXX: the task struct will likely be invalid once we do the copy!
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 624a983a967..be982023409 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -5,6 +5,10 @@
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras.
*
+ * kexec bits:
+ * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
+ * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -24,6 +28,8 @@
#include <asm/ppc_asm.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+#include <asm/kexec.h>
.text
@@ -62,7 +68,7 @@ _GLOBAL(reloc_offset)
mflr r0
bl 1f
1: mflr r3
- LOADADDR(r4,1b)
+ LOAD_REG_IMMEDIATE(r4,1b)
subf r3,r4,r3
mtlr r0
blr
@@ -74,7 +80,7 @@ _GLOBAL(add_reloc_offset)
mflr r0
bl 1f
1: mflr r5
- LOADADDR(r4,1b)
+ LOAD_REG_IMMEDIATE(r4,1b)
subf r5,r4,r5
add r3,r3,r5
mtlr r0
@@ -1006,3 +1012,110 @@ _GLOBAL(execve)
*/
_GLOBAL(__main)
blr
+
+#ifdef CONFIG_KEXEC
+ /*
+ * Must be relocatable PIC code callable as a C function.
+ */
+ .globl relocate_new_kernel
+relocate_new_kernel:
+ /* r3 = page_list */
+ /* r4 = reboot_code_buffer */
+ /* r5 = start_address */
+
+ li r0, 0
+
+ /*
+ * Set Machine Status Register to a known status,
+ * switch the MMU off and jump to 1: in a single step.
+ */
+
+ mr r8, r0
+ ori r8, r8, MSR_RI|MSR_ME
+ mtspr SPRN_SRR1, r8
+ addi r8, r4, 1f - relocate_new_kernel
+ mtspr SPRN_SRR0, r8
+ sync
+ rfi
+
+1:
+ /* from this point address translation is turned off */
+ /* and interrupts are disabled */
+
+ /* set a new stack at the bottom of our page... */
+ /* (not really needed now) */
+ addi r1, r4, KEXEC_CONTROL_CODE_SIZE - 8 /* for LR Save+Back Chain */
+ stw r0, 0(r1)
+
+ /* Do the copies */
+ li r6, 0 /* checksum */
+ mr r0, r3
+ b 1f
+
+0: /* top, read another word for the indirection page */
+ lwzu r0, 4(r3)
+
+1:
+ /* is it a destination page? (r8) */
+ rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
+ beq 2f
+
+ rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
+ b 0b
+
+2: /* is it an indirection page? (r3) */
+ rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
+ beq 2f
+
+ rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
+ subi r3, r3, 4
+ b 0b
+
+2: /* are we done? */
+ rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
+ beq 2f
+ b 3f
+
+2: /* is it a source page? (r9) */
+ rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
+ beq 0b
+
+ rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
+
+ li r7, PAGE_SIZE / 4
+ mtctr r7
+ subi r9, r9, 4
+ subi r8, r8, 4
+9:
+ lwzu r0, 4(r9) /* do the copy */
+ xor r6, r6, r0
+ stwu r0, 4(r8)
+ dcbst 0, r8
+ sync
+ icbi 0, r8
+ bdnz 9b
+
+ addi r9, r9, 4
+ addi r8, r8, 4
+ b 0b
+
+3:
+
+ /* To be certain of avoiding problems with self-modifying code
+ * execute a serializing instruction here.
+ */
+ isync
+ sync
+
+ /* jump to the entry point, usually the setup routine */
+ mtlr r5
+ blrl
+
+1: b 1b
+
+relocate_new_kernel_end:
+
+ .globl relocate_new_kernel_size
+relocate_new_kernel_size:
+ .long relocate_new_kernel_end - relocate_new_kernel
+#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae48a002f81..2778cce058e 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -39,7 +39,7 @@ _GLOBAL(reloc_offset)
mflr r0
bl 1f
1: mflr r3
- LOADADDR(r4,1b)
+ LOAD_REG_IMMEDIATE(r4,1b)
subf r3,r4,r3
mtlr r0
blr
@@ -51,7 +51,7 @@ _GLOBAL(add_reloc_offset)
mflr r0
bl 1f
1: mflr r5
- LOADADDR(r4,1b)
+ LOAD_REG_IMMEDIATE(r4,1b)
subf r5,r4,r5
add r3,r3,r5
mtlr r0
@@ -498,15 +498,15 @@ _GLOBAL(identify_cpu)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
- LOADADDR(r6,cur_cpu_spec)
+ LOAD_REG_IMMEDIATE(r6,cur_cpu_spec)
sub r6,r6,r3
ld r4,0(r6)
sub r4,r4,r3
ld r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
- LOADADDR(r6,__start___ftr_fixup)
+ LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup)
sub r6,r6,r3
- LOADADDR(r7,__stop___ftr_fixup)
+ LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup)
sub r7,r7,r3
/* Do the fixup */
1: cmpld r6,r7
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index c0fcd29918c..fd7db8d542d 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -80,80 +80,74 @@ static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
static ssize_t dev_nvram_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- ssize_t len;
- char *tmp_buffer;
- int size;
+ ssize_t ret;
+ char *tmp = NULL;
+ ssize_t size;
- if (ppc_md.nvram_size == NULL)
- return -ENODEV;
+ ret = -ENODEV;
+ if (!ppc_md.nvram_size)
+ goto out;
+
+ ret = 0;
size = ppc_md.nvram_size();
+ if (*ppos >= size || size < 0)
+ goto out;
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
- if (*ppos >= size)
- return 0;
- if (count > size)
- count = size;
+ count = min_t(size_t, count, size - *ppos);
+ count = min(count, PAGE_SIZE);
- tmp_buffer = (char *) kmalloc(count, GFP_KERNEL);
- if (!tmp_buffer) {
- printk(KERN_ERR "dev_read_nvram: kmalloc failed\n");
- return -ENOMEM;
- }
+ ret = -ENOMEM;
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp)
+ goto out;
- len = ppc_md.nvram_read(tmp_buffer, count, ppos);
- if ((long)len <= 0) {
- kfree(tmp_buffer);
- return len;
- }
+ ret = ppc_md.nvram_read(tmp, count, ppos);
+ if (ret <= 0)
+ goto out;
- if (copy_to_user(buf, tmp_buffer, len)) {
- kfree(tmp_buffer);
- return -EFAULT;
- }
+ if (copy_to_user(buf, tmp, ret))
+ ret = -EFAULT;
- kfree(tmp_buffer);
- return len;
+out:
+ kfree(tmp);
+ return ret;
}
static ssize_t dev_nvram_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
- ssize_t len;
- char * tmp_buffer;
- int size;
+ ssize_t ret;
+ char *tmp = NULL;
+ ssize_t size;
- if (ppc_md.nvram_size == NULL)
- return -ENODEV;
+ ret = -ENODEV;
+ if (!ppc_md.nvram_size)
+ goto out;
+
+ ret = 0;
size = ppc_md.nvram_size();
+ if (*ppos >= size || size < 0)
+ goto out;
- if (!access_ok(VERIFY_READ, buf, count))
- return -EFAULT;
- if (*ppos >= size)
- return 0;
- if (count > size)
- count = size;
+ count = min_t(size_t, count, size - *ppos);
+ count = min(count, PAGE_SIZE);
- tmp_buffer = (char *) kmalloc(count, GFP_KERNEL);
- if (!tmp_buffer) {
- printk(KERN_ERR "dev_nvram_write: kmalloc failed\n");
- return -ENOMEM;
- }
-
- if (copy_from_user(tmp_buffer, buf, count)) {
- kfree(tmp_buffer);
- return -EFAULT;
- }
+ ret = -ENOMEM;
+ tmp = kmalloc(count, GFP_KERNEL);
+ if (!tmp)
+ goto out;
- len = ppc_md.nvram_write(tmp_buffer, count, ppos);
- if ((long)len <= 0) {
- kfree(tmp_buffer);
- return len;
- }
+ ret = -EFAULT;
+ if (copy_from_user(tmp, buf, count))
+ goto out;
+
+ ret = ppc_md.nvram_write(tmp, count, ppos);
+
+out:
+ kfree(tmp);
+ return ret;
- kfree(tmp_buffer);
- return len;
}
static int dev_nvram_ioctl(struct inode *inode, struct file *file,
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index a7b68f911eb..5d1b708086b 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -17,6 +17,7 @@
#include <asm/page.h>
#include <asm/lppaca.h>
#include <asm/iseries/it_lp_queue.h>
+#include <asm/iseries/it_lp_reg_save.h>
#include <asm/paca.h>
@@ -24,10 +25,31 @@
* field correctly */
extern unsigned long __toc_start;
+/*
+ * iSeries structure which the hypervisor knows about - this structure
+ * should not cross a page boundary. The vpa_init/register_vpa call
+ * is now known to fail if the lppaca structure crosses a page
+ * boundary. The lppaca is also used on POWER5 pSeries boxes. The
+ * lppaca is 640 bytes long, and cannot readily change since the
+ * hypervisor knows its layout, so a 1kB alignment will suffice to
+ * ensure that it doesn't cross a page boundary.
+ */
+struct lppaca lppaca[] = {
+ [0 ... (NR_CPUS-1)] = {
+ .desc = 0xd397d781, /* "LpPa" */
+ .size = sizeof(struct lppaca),
+ .dyn_proc_status = 2,
+ .decr_val = 0x00ff0000,
+ .fpregs_in_use = 1,
+ .end_of_quantum = 0xfffffffffffffffful,
+ .slb_count = 64,
+ .vmxregs_in_use = 0,
+ },
+};
+
/* The Paca is an array with one entry per processor. Each contains an
* lppaca, which contains the information shared between the
- * hypervisor and Linux. Each also contains an ItLpRegSave area which
- * is used by the hypervisor to save registers.
+ * hypervisor and Linux.
* On systems with hardware multi-threading, there are two threads
* per processor. The Paca array must contain an entry for each thread.
* The VPD Areas will give a max logical processors = 2 * max physical
@@ -35,33 +57,18 @@ extern unsigned long __toc_start;
* processor (not thread).
*/
#define PACA_INIT_COMMON(number, start, asrr, asrv) \
+ .lppaca_ptr = &lppaca[number], \
.lock_token = 0x8000, \
.paca_index = (number), /* Paca Index */ \
- .default_decr = 0x00ff0000, /* Initial Decr */ \
.kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \
.stab_real = (asrr), /* Real pointer to segment table */ \
.stab_addr = (asrv), /* Virt pointer to segment table */ \
.cpu_start = (start), /* Processor start */ \
- .hw_cpu_id = 0xffff, \
- .lppaca = { \
- .desc = 0xd397d781, /* "LpPa" */ \
- .size = sizeof(struct lppaca), \
- .dyn_proc_status = 2, \
- .decr_val = 0x00ff0000, \
- .fpregs_in_use = 1, \
- .end_of_quantum = 0xfffffffffffffffful, \
- .slb_count = 64, \
- .vmxregs_in_use = 0, \
- }, \
+ .hw_cpu_id = 0xffff,
#ifdef CONFIG_PPC_ISERIES
#define PACA_INIT_ISERIES(number) \
- .lppaca_ptr = &paca[number].lppaca, \
- .reg_save_ptr = &paca[number].reg_save, \
- .reg_save = { \
- .xDesc = 0xd397d9e2, /* "LpRS" */ \
- .xSize = sizeof(struct ItLpRegSave) \
- }
+ .reg_save_ptr = &iseries_reg_save[number],
#define PACA_INIT(number) \
{ \
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 8b6008ab217..c367520bc1c 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -34,7 +34,7 @@
#ifdef DEBUG
#include <asm/udbg.h>
-#define DBG(fmt...) udbg_printf(fmt)
+#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif
@@ -53,6 +53,7 @@ EXPORT_SYMBOL(io_page_mask);
#ifdef CONFIG_PPC_MULTIPLATFORM
static void fixup_resource(struct resource *res, struct pci_dev *dev);
static void do_bus_setup(struct pci_bus *bus);
+static void phbs_remap_io(void);
#endif
/* pci_io_base -- the base address from which io bars are offsets.
@@ -251,7 +252,8 @@ void pcibios_free_controller(struct pci_controller *phb)
kfree(phb);
}
-static void __init pcibios_claim_one_bus(struct pci_bus *b)
+#ifndef CONFIG_PPC_ISERIES
+void __devinit pcibios_claim_one_bus(struct pci_bus *b)
{
struct pci_dev *dev;
struct pci_bus *child_bus;
@@ -271,8 +273,10 @@ static void __init pcibios_claim_one_bus(struct pci_bus *b)
list_for_each_entry(child_bus, &b->children, node)
pcibios_claim_one_bus(child_bus);
}
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
+#endif
-#ifndef CONFIG_PPC_ISERIES
static void __init pcibios_claim_of_setup(void)
{
struct pci_bus *b;
@@ -323,6 +327,7 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
addrs = (u32 *) get_property(node, "assigned-addresses", &proplen);
if (!addrs)
return;
+ DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
for (; proplen >= 20; proplen -= 20, addrs += 5) {
flags = pci_parse_of_flags(addrs[0]);
if (!flags)
@@ -332,6 +337,9 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
if (!size)
continue;
i = addrs[0] & 0xff;
+ DBG(" base: %llx, size: %llx, i: %x\n",
+ (unsigned long long)base, (unsigned long long)size, i);
+
if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
} else if (i == dev->rom_base_reg) {
@@ -362,6 +370,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
if (type == NULL)
type = "";
+ DBG(" create device, devfn: %x, type: %s\n", devfn, type);
+
memset(dev, 0, sizeof(struct pci_dev));
dev->bus = bus;
dev->sysdata = node;
@@ -375,12 +385,14 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
- dev->cfg_size = 256; /*pci_cfg_space_size(dev);*/
+ dev->cfg_size = pci_cfg_space_size(dev);
sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
dev->class = get_int_prop(node, "class-code", 0);
+ DBG(" class: 0x%x\n", dev->class);
+
dev->current_state = 4; /* unknown power state */
if (!strcmp(type, "pci")) {
@@ -402,6 +414,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
pci_parse_of_addrs(node, dev);
+ DBG(" adding to system ...\n");
+
pci_device_add(dev, bus);
/* XXX pci_scan_msi_device(dev); */
@@ -418,15 +432,21 @@ void __devinit of_scan_bus(struct device_node *node,
int reglen, devfn;
struct pci_dev *dev;
+ DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
+
while ((child = of_get_next_child(node, child)) != NULL) {
+ DBG(" * %s\n", child->full_name);
reg = (u32 *) get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20)
continue;
devfn = (reg[0] >> 8) & 0xff;
+
/* create a new pci_dev for this device */
dev = of_create_pci_dev(child, bus, devfn);
if (!dev)
continue;
+ DBG("dev header type: %x\n", dev->hdr_type);
+
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
of_scan_pci_bridge(child, dev);
@@ -446,16 +466,18 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
unsigned int flags;
u64 size;
+ DBG("of_scan_pci_bridge(%s)\n", node->full_name);
+
/* parse bus-range property */
busrange = (u32 *) get_property(node, "bus-range", &len);
if (busrange == NULL || len != 8) {
- printk(KERN_ERR "Can't get bus-range for PCI-PCI bridge %s\n",
+ printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
node->full_name);
return;
}
ranges = (u32 *) get_property(node, "ranges", &len);
if (ranges == NULL) {
- printk(KERN_ERR "Can't get ranges for PCI-PCI bridge %s\n",
+ printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
node->full_name);
return;
}
@@ -509,10 +531,13 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
}
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
+ DBG(" bus name: %s\n", bus->name);
mode = PCI_PROBE_NORMAL;
if (ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
+ DBG(" probe mode: %d\n", mode);
+
if (mode == PCI_PROBE_DEVTREE)
of_scan_bus(node, bus);
else if (mode == PCI_PROBE_NORMAL)
@@ -528,6 +553,8 @@ void __devinit scan_phb(struct pci_controller *hose)
int i, mode;
struct resource *res;
+ DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
+
bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node);
if (bus == NULL) {
printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
@@ -552,8 +579,9 @@ void __devinit scan_phb(struct pci_controller *hose)
mode = PCI_PROBE_NORMAL;
#ifdef CONFIG_PPC_MULTIPLATFORM
- if (ppc_md.pci_probe_mode)
+ if (node && ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
+ DBG(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE) {
bus->subordinate = hose->last_busno;
of_scan_bus(node, bus);
@@ -842,8 +870,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
* Returns a negative error code on failure, zero on success.
*/
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state,
- int write_combine)
+ enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
struct resource *rp;
@@ -896,6 +923,25 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
unsigned long phb_io_base_phys,
void __iomem * phb_io_base_virt)
{
+ /* Remove these asap */
+
+ struct pci_address {
+ u32 a_hi;
+ u32 a_mid;
+ u32 a_lo;
+ };
+
+ struct isa_address {
+ u32 a_hi;
+ u32 a_lo;
+ };
+
+ struct isa_range {
+ struct isa_address isa_addr;
+ struct pci_address pci_addr;
+ unsigned int size;
+ };
+
struct isa_range *range;
unsigned long pci_addr;
unsigned int isa_addr;
@@ -1173,7 +1219,7 @@ int remap_bus_range(struct pci_bus *bus)
}
EXPORT_SYMBOL(remap_bus_range);
-void phbs_remap_io(void)
+static void phbs_remap_io(void)
{
struct pci_controller *hose, *tmp;
@@ -1223,6 +1269,7 @@ void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
}
EXPORT_SYMBOL(pcibios_fixup_device_resources);
+
static void __devinit do_bus_setup(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1306,8 +1353,38 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
*end = rsrc->end + offset;
}
+struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
+{
+ if (!have_of)
+ return NULL;
+ while(node) {
+ struct pci_controller *hose, *tmp;
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
+ if (hose->arch_data == node)
+ return hose;
+ node = node->parent;
+ }
+ return NULL;
+}
+
#endif /* CONFIG_PPC_MULTIPLATFORM */
+unsigned long pci_address_to_pio(phys_addr_t address)
+{
+ struct pci_controller *hose, *tmp;
+
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ if (address >= hose->io_base_phys &&
+ address < (hose->io_base_phys + hose->pci_io_size)) {
+ unsigned long base =
+ (unsigned long)hose->io_base_virt - pci_io_base;
+ return base + (address - hose->io_base_phys);
+ }
+ }
+ return (unsigned int)-1;
+}
+EXPORT_SYMBOL_GPL(pci_address_to_pio);
+
#define IOBASE_BRIDGE_NUMBER 0
#define IOBASE_MEMORY 1
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index 2d333cc8408..e6fb194fe53 100644
--- a/arch/powerpc/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -43,8 +43,13 @@ static void dummy_perf(struct pt_regs *regs)
mtspr(SPRN_MMCR0, mmcr0);
}
#else
+/* Ensure exceptions are disabled */
static void dummy_perf(struct pt_regs *regs)
{
+ unsigned int mmcr0 = mfspr(SPRN_MMCR0);
+
+ mmcr0 &= ~(MMCR0_PMXE);
+ mtspr(SPRN_MMCR0, mmcr0);
}
#endif
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 94db2570845..d9a459c144d 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -76,11 +76,6 @@ EXPORT_SYMBOL(single_step_exception);
EXPORT_SYMBOL(sys_sigreturn);
#endif
-#if defined(CONFIG_PPC_PREP)
-EXPORT_SYMBOL(_prep_type);
-EXPORT_SYMBOL(ucSystemType);
-#endif
-
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
@@ -235,8 +230,7 @@ EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(cpm_install_handler);
EXPORT_SYMBOL(cpm_free_handler);
#endif /* CONFIG_8xx */
-#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\
- defined(CONFIG_83xx)
+#if defined(CONFIG_8xx) || defined(CONFIG_40x)
EXPORT_SYMBOL(__res);
#endif
@@ -249,7 +243,6 @@ EXPORT_SYMBOL(set_context);
extern long mol_trampoline;
EXPORT_SYMBOL(mol_trampoline); /* For MOL */
EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
-EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */
#ifdef CONFIG_SMP
extern int mmu_hash_lock;
EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 105d5609ff5..57703994a06 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -201,13 +201,13 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
}
#endif /* CONFIG_SPE */
+#ifndef CONFIG_SMP
/*
* If we are doing lazy switching of CPU state (FP, altivec or SPE),
* and the current task has some state, discard it.
*/
-static inline void discard_lazy_cpu_state(void)
+void discard_lazy_cpu_state(void)
{
-#ifndef CONFIG_SMP
preempt_disable();
if (last_task_used_math == current)
last_task_used_math = NULL;
@@ -220,9 +220,10 @@ static inline void discard_lazy_cpu_state(void)
last_task_used_spe = NULL;
#endif
preempt_enable();
-#endif /* CONFIG_SMP */
}
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_PPC_MERGE /* XXX for now */
int set_dabr(unsigned long dabr)
{
if (ppc_md.set_dabr)
@@ -231,6 +232,7 @@ int set_dabr(unsigned long dabr)
mtspr(SPRN_DABR, dabr);
return 0;
}
+#endif
#ifdef CONFIG_PPC64
DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
@@ -424,7 +426,7 @@ void show_regs(struct pt_regs * regs)
if (trap == 0x300 || trap == 0x600)
printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
printk("TASK = %p[%d] '%s' THREAD: %p",
- current, current->pid, current->comm, current->thread_info);
+ current, current->pid, current->comm, task_thread_info(current));
#ifdef CONFIG_SMP
printk(" CPU: %d", smp_processor_id());
@@ -503,7 +505,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
{
struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
- unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
+ unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
CHECK_FULL_REGS(regs);
/* Copy registers */
@@ -516,7 +518,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
#ifdef CONFIG_PPC32
childregs->gpr[2] = (unsigned long) p;
#else
- clear_ti_thread_flag(p->thread_info, TIF_32BIT);
+ clear_tsk_thread_flag(p, TIF_32BIT);
#endif
p->thread.regs = NULL; /* no user register state */
} else {
@@ -588,10 +590,8 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
* set. Do it now.
*/
if (!current->thread.regs) {
- unsigned long childregs = (unsigned long)current->thread_info +
- THREAD_SIZE;
- childregs -= sizeof(struct pt_regs);
- current->thread.regs = (struct pt_regs *)childregs;
+ struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
+ current->thread.regs = regs - 1;
}
memset(regs->gpr, 0, sizeof(regs->gpr));
@@ -767,7 +767,7 @@ out:
static int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
- unsigned long stack_page = (unsigned long)p->thread_info;
+ unsigned long stack_page = (unsigned long)task_stack_page(p);
if (sp >= stack_page + sizeof(struct thread_struct)
&& sp <= stack_page + THREAD_SIZE - nbytes)
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 3bf968e7409..d50c8df0183 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -29,6 +29,7 @@
#include <linux/initrd.h>
#include <linux/bitops.h>
#include <linux/module.h>
+#include <linux/kexec.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -37,6 +38,7 @@
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/io.h>
+#include <asm/kdump.h>
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/mmu.h>
@@ -55,21 +57,6 @@
#define DBG(fmt...)
#endif
-struct pci_reg_property {
- struct pci_address addr;
- u32 size_hi;
- u32 size_lo;
-};
-
-struct isa_reg_property {
- u32 space;
- u32 address;
- u32 size;
-};
-
-
-typedef int interpret_func(struct device_node *, unsigned long *,
- int, int, int);
static int __initdata dt_root_addr_cells;
static int __initdata dt_root_size_cells;
@@ -311,6 +298,16 @@ static int __devinit finish_node_interrupts(struct device_node *np,
int i, j, n, sense;
unsigned int *irq, virq;
struct device_node *ic;
+ int trace = 0;
+
+ //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0)
+#define TRACE(fmt...)
+
+ if (!strcmp(np->name, "smu-doorbell"))
+ trace = 1;
+
+ TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n",
+ num_interrupt_controllers);
if (num_interrupt_controllers == 0) {
/*
@@ -345,11 +342,12 @@ static int __devinit finish_node_interrupts(struct device_node *np,
}
ints = (unsigned int *) get_property(np, "interrupts", &intlen);
+ TRACE("ints=%p, intlen=%d\n", ints, intlen);
if (ints == NULL)
return 0;
intrcells = prom_n_intr_cells(np);
intlen /= intrcells * sizeof(unsigned int);
-
+ TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen);
np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
if (!np->intrs)
return -ENOMEM;
@@ -360,6 +358,7 @@ static int __devinit finish_node_interrupts(struct device_node *np,
intrcount = 0;
for (i = 0; i < intlen; ++i, ints += intrcells) {
n = map_interrupt(&irq, &ic, np, ints, intrcells);
+ TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n);
if (n <= 0)
continue;
@@ -370,6 +369,7 @@ static int __devinit finish_node_interrupts(struct device_node *np,
np->intrs[intrcount].sense = map_isa_senses[sense];
} else {
virq = virt_irq_create_mapping(irq[0]);
+ TRACE("virq=%d\n", virq);
#ifdef CONFIG_PPC64
if (virq == NO_IRQ) {
printk(KERN_CRIT "Could not allocate interrupt"
@@ -379,6 +379,12 @@ static int __devinit finish_node_interrupts(struct device_node *np,
#endif
np->intrs[intrcount].line = irq_offset_up(virq);
sense = (n > 1)? (irq[1] & 3): 1;
+
+ /* Apple uses bits in there in a different way, let's
+ * only keep the real sense bit on macs
+ */
+ if (_machine == PLATFORM_POWERMAC)
+ sense &= 0x1;
np->intrs[intrcount].sense = map_mpic_senses[sense];
}
@@ -388,12 +394,13 @@ static int __devinit finish_node_interrupts(struct device_node *np,
char *name = get_property(ic->parent, "name", NULL);
if (name && !strcmp(name, "u3"))
np->intrs[intrcount].line += 128;
- else if (!(name && !strcmp(name, "mac-io")))
+ else if (!(name && (!strcmp(name, "mac-io") ||
+ !strcmp(name, "u4"))))
/* ignore other cascaded controllers, such as
the k2-sata-root */
break;
}
-#endif
+#endif /* CONFIG_PPC64 */
if (n > 2) {
printk("hmmm, got %d intr cells for %s:", n,
np->full_name);
@@ -408,234 +415,19 @@ static int __devinit finish_node_interrupts(struct device_node *np,
return 0;
}
-static int __devinit interpret_pci_props(struct device_node *np,
- unsigned long *mem_start,
- int naddrc, int nsizec,
- int measure_only)
-{
- struct address_range *adr;
- struct pci_reg_property *pci_addrs;
- int i, l, n_addrs;
-
- pci_addrs = (struct pci_reg_property *)
- get_property(np, "assigned-addresses", &l);
- if (!pci_addrs)
- return 0;
-
- n_addrs = l / sizeof(*pci_addrs);
-
- adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
- if (!adr)
- return -ENOMEM;
-
- if (measure_only)
- return 0;
-
- np->addrs = adr;
- np->n_addrs = n_addrs;
-
- for (i = 0; i < n_addrs; i++) {
- adr[i].space = pci_addrs[i].addr.a_hi;
- adr[i].address = pci_addrs[i].addr.a_lo |
- ((u64)pci_addrs[i].addr.a_mid << 32);
- adr[i].size = pci_addrs[i].size_lo;
- }
-
- return 0;
-}
-
-static int __init interpret_dbdma_props(struct device_node *np,
- unsigned long *mem_start,
- int naddrc, int nsizec,
- int measure_only)
-{
- struct reg_property32 *rp;
- struct address_range *adr;
- unsigned long base_address;
- int i, l;
- struct device_node *db;
-
- base_address = 0;
- if (!measure_only) {
- for (db = np->parent; db != NULL; db = db->parent) {
- if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
- base_address = db->addrs[0].address;
- break;
- }
- }
- }
-
- rp = (struct reg_property32 *) get_property(np, "reg", &l);
- if (rp != 0 && l >= sizeof(struct reg_property32)) {
- i = 0;
- adr = (struct address_range *) (*mem_start);
- while ((l -= sizeof(struct reg_property32)) >= 0) {
- if (!measure_only) {
- adr[i].space = 2;
- adr[i].address = rp[i].address + base_address;
- adr[i].size = rp[i].size;
- }
- ++i;
- }
- np->addrs = adr;
- np->n_addrs = i;
- (*mem_start) += i * sizeof(struct address_range);
- }
-
- return 0;
-}
-
-static int __init interpret_macio_props(struct device_node *np,
- unsigned long *mem_start,
- int naddrc, int nsizec,
- int measure_only)
-{
- struct reg_property32 *rp;
- struct address_range *adr;
- unsigned long base_address;
- int i, l;
- struct device_node *db;
-
- base_address = 0;
- if (!measure_only) {
- for (db = np->parent; db != NULL; db = db->parent) {
- if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
- base_address = db->addrs[0].address;
- break;
- }
- }
- }
-
- rp = (struct reg_property32 *) get_property(np, "reg", &l);
- if (rp != 0 && l >= sizeof(struct reg_property32)) {
- i = 0;
- adr = (struct address_range *) (*mem_start);
- while ((l -= sizeof(struct reg_property32)) >= 0) {
- if (!measure_only) {
- adr[i].space = 2;
- adr[i].address = rp[i].address + base_address;
- adr[i].size = rp[i].size;
- }
- ++i;
- }
- np->addrs = adr;
- np->n_addrs = i;
- (*mem_start) += i * sizeof(struct address_range);
- }
-
- return 0;
-}
-
-static int __init interpret_isa_props(struct device_node *np,
- unsigned long *mem_start,
- int naddrc, int nsizec,
- int measure_only)
-{
- struct isa_reg_property *rp;
- struct address_range *adr;
- int i, l;
-
- rp = (struct isa_reg_property *) get_property(np, "reg", &l);
- if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
- i = 0;
- adr = (struct address_range *) (*mem_start);
- while ((l -= sizeof(struct isa_reg_property)) >= 0) {
- if (!measure_only) {
- adr[i].space = rp[i].space;
- adr[i].address = rp[i].address;
- adr[i].size = rp[i].size;
- }
- ++i;
- }
- np->addrs = adr;
- np->n_addrs = i;
- (*mem_start) += i * sizeof(struct address_range);
- }
-
- return 0;
-}
-
-static int __init interpret_root_props(struct device_node *np,
- unsigned long *mem_start,
- int naddrc, int nsizec,
- int measure_only)
-{
- struct address_range *adr;
- int i, l;
- unsigned int *rp;
- int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
-
- rp = (unsigned int *) get_property(np, "reg", &l);
- if (rp != 0 && l >= rpsize) {
- i = 0;
- adr = (struct address_range *) (*mem_start);
- while ((l -= rpsize) >= 0) {
- if (!measure_only) {
- adr[i].space = 0;
- adr[i].address = rp[naddrc - 1];
- adr[i].size = rp[naddrc + nsizec - 1];
- }
- ++i;
- rp += naddrc + nsizec;
- }
- np->addrs = adr;
- np->n_addrs = i;
- (*mem_start) += i * sizeof(struct address_range);
- }
-
- return 0;
-}
-
static int __devinit finish_node(struct device_node *np,
unsigned long *mem_start,
- interpret_func *ifunc,
- int naddrc, int nsizec,
int measure_only)
{
struct device_node *child;
- int *ip, rc = 0;
-
- /* get the device addresses and interrupts */
- if (ifunc != NULL)
- rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
- if (rc)
- goto out;
+ int rc = 0;
rc = finish_node_interrupts(np, mem_start, measure_only);
if (rc)
goto out;
- /* Look for #address-cells and #size-cells properties. */
- ip = (int *) get_property(np, "#address-cells", NULL);
- if (ip != NULL)
- naddrc = *ip;
- ip = (int *) get_property(np, "#size-cells", NULL);
- if (ip != NULL)
- nsizec = *ip;
-
- if (!strcmp(np->name, "device-tree") || np->parent == NULL)
- ifunc = interpret_root_props;
- else if (np->type == 0)
- ifunc = NULL;
- else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
- ifunc = interpret_pci_props;
- else if (!strcmp(np->type, "dbdma"))
- ifunc = interpret_dbdma_props;
- else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
- ifunc = interpret_macio_props;
- else if (!strcmp(np->type, "isa"))
- ifunc = interpret_isa_props;
- else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
- ifunc = interpret_root_props;
- else if (!((ifunc == interpret_dbdma_props
- || ifunc == interpret_macio_props)
- && (!strcmp(np->type, "escc")
- || !strcmp(np->type, "media-bay"))))
- ifunc = NULL;
-
for (child = np->child; child != NULL; child = child->sibling) {
- rc = finish_node(child, mem_start, ifunc,
- naddrc, nsizec, measure_only);
+ rc = finish_node(child, mem_start, measure_only);
if (rc)
goto out;
}
@@ -697,10 +489,10 @@ void __init finish_device_tree(void)
* reason and then remove those additional 16 bytes
*/
size = 16;
- finish_node(allnodes, &size, NULL, 0, 0, 1);
+ finish_node(allnodes, &size, 1);
size -= 16;
end = start = (unsigned long) __va(lmb_alloc(size, 128));
- finish_node(allnodes, &end, NULL, 0, 0, 0);
+ finish_node(allnodes, &end, 0);
BUG_ON(end != start + size);
DBG(" <- finish_device_tree\n");
@@ -1180,7 +972,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
#endif
#ifdef CONFIG_PPC_RTAS
- /* To help early debugging via the front panel, we retreive a minimal
+ /* To help early debugging via the front panel, we retrieve a minimal
* set of RTAS infos now if available
*/
{
@@ -1197,6 +989,16 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
}
#endif /* CONFIG_PPC_RTAS */
+#ifdef CONFIG_KEXEC
+ lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
+ if (lprop)
+ crashk_res.start = *lprop;
+
+ lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
+ if (lprop)
+ crashk_res.end = crashk_res.start + *lprop - 1;
+#endif
+
/* break now */
return 1;
}
@@ -1263,7 +1065,9 @@ static int __init early_init_dt_scan_memory(unsigned long node,
} else if (strcmp(type, "memory") != 0)
return 0;
- reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
+ reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
+ if (reg == NULL)
+ reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
if (reg == NULL)
return 0;
@@ -1296,17 +1100,37 @@ static int __init early_init_dt_scan_memory(unsigned long node,
static void __init early_reserve_mem(void)
{
- unsigned long base, size;
- unsigned long *reserve_map;
+ u64 base, size;
+ u64 *reserve_map;
- reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
+ reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
initial_boot_params->off_mem_rsvmap);
+#ifdef CONFIG_PPC32
+ /*
+ * Handle the case where we might be booting from an old kexec
+ * image that setup the mem_rsvmap as pairs of 32-bit values
+ */
+ if (*reserve_map > 0xffffffffull) {
+ u32 base_32, size_32;
+ u32 *reserve_map_32 = (u32 *)reserve_map;
+
+ while (1) {
+ base_32 = *(reserve_map_32++);
+ size_32 = *(reserve_map_32++);
+ if (size_32 == 0)
+ break;
+ DBG("reserving: %lx -> %lx\n", base_32, size_32);
+ lmb_reserve(base_32, size_32);
+ }
+ return;
+ }
+#endif
while (1) {
base = *(reserve_map++);
size = *(reserve_map++);
if (size == 0)
break;
- DBG("reserving: %lx -> %lx\n", base, size);
+ DBG("reserving: %llx -> %llx\n", base, size);
lmb_reserve(base, size);
}
@@ -1335,11 +1159,14 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
lmb_enforce_memory_limit(memory_limit);
lmb_analyze();
- lmb_reserve(0, __pa(klimit));
DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
/* Reserve LMB regions used by kernel, initrd, dt, etc... */
+ lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
+#ifdef CONFIG_CRASH_DUMP
+ lmb_reserve(0, KDUMP_RESERVE_LIMIT);
+#endif
early_reserve_mem();
DBG("Scanning CPUs ...\n");
@@ -1800,9 +1627,13 @@ static void of_node_release(struct kref *kref)
kfree(prop->value);
kfree(prop);
prop = next;
+
+ if (!prop) {
+ prop = node->deadprops;
+ node->deadprops = NULL;
+ }
}
kfree(node->intrs);
- kfree(node->addrs);
kfree(node->full_name);
kfree(node->data);
kfree(node);
@@ -1884,9 +1715,7 @@ void of_detach_node(const struct device_node *np)
* This should probably be split up into smaller chunks.
*/
-static int of_finish_dynamic_node(struct device_node *node,
- unsigned long *unused1, int unused2,
- int unused3, int unused4)
+static int of_finish_dynamic_node(struct device_node *node)
{
struct device_node *parent = of_get_parent(node);
int err = 0;
@@ -1907,7 +1736,8 @@ static int of_finish_dynamic_node(struct device_node *node,
return -ENODEV;
/* fix up new node's linux_phandle field */
- if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
+ if ((ibm_phandle = (unsigned int *)get_property(node,
+ "ibm,phandle", NULL)))
node->linux_phandle = *ibm_phandle;
out:
@@ -1922,7 +1752,9 @@ static int prom_reconfig_notifier(struct notifier_block *nb,
switch (action) {
case PSERIES_RECONFIG_ADD:
- err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
+ err = of_finish_dynamic_node(node);
+ if (!err)
+ finish_node(node, NULL, 0);
if (err < 0) {
printk(KERN_ERR "finish_node returned %d\n", err);
err = NOTIFY_BAD;
@@ -1947,22 +1779,32 @@ static int __init prom_reconfig_setup(void)
__initcall(prom_reconfig_setup);
#endif
-/*
- * Find a property with a given name for a given node
- * and return the value.
- */
-unsigned char *get_property(struct device_node *np, const char *name,
- int *lenp)
+struct property *of_find_property(struct device_node *np, const char *name,
+ int *lenp)
{
struct property *pp;
+ read_lock(&devtree_lock);
for (pp = np->properties; pp != 0; pp = pp->next)
if (strcmp(pp->name, name) == 0) {
if (lenp != 0)
*lenp = pp->length;
- return pp->value;
+ break;
}
- return NULL;
+ read_unlock(&devtree_lock);
+
+ return pp;
+}
+
+/*
+ * Find a property with a given name for a given node
+ * and return the value.
+ */
+unsigned char *get_property(struct device_node *np, const char *name,
+ int *lenp)
+{
+ struct property *pp = of_find_property(np,name,lenp);
+ return pp ? pp->value : NULL;
}
EXPORT_SYMBOL(get_property);
@@ -1996,175 +1838,82 @@ int prom_add_property(struct device_node* np, struct property* prop)
return 0;
}
-/* I quickly hacked that one, check against spec ! */
-static inline unsigned long
-bus_space_to_resource_flags(unsigned int bus_space)
+/*
+ * Remove a property from a node. Note that we don't actually
+ * remove it, since we have given out who-knows-how-many pointers
+ * to the data using get-property. Instead we just move the property
+ * to the "dead properties" list, so it won't be found any more.
+ */
+int prom_remove_property(struct device_node *np, struct property *prop)
{
- u8 space = (bus_space >> 24) & 0xf;
- if (space == 0)
- space = 0x02;
- if (space == 0x02)
- return IORESOURCE_MEM;
- else if (space == 0x01)
- return IORESOURCE_IO;
- else {
- printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
- bus_space);
- return 0;
- }
-}
+ struct property **next;
+ int found = 0;
-#ifdef CONFIG_PCI
-static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
- struct address_range *range)
-{
- unsigned long mask;
- int i;
-
- /* Check this one */
- mask = bus_space_to_resource_flags(range->space);
- for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
- if ((pdev->resource[i].flags & mask) == mask &&
- pdev->resource[i].start <= range->address &&
- pdev->resource[i].end > range->address) {
- if ((range->address + range->size - 1) > pdev->resource[i].end) {
- /* Add better message */
- printk(KERN_WARNING "PCI/OF resource overlap !\n");
- return NULL;
- }
- break;
- }
+ write_lock(&devtree_lock);
+ next = &np->properties;
+ while (*next) {
+ if (*next == prop) {
+ /* found the node */
+ *next = prop->next;
+ prop->next = np->deadprops;
+ np->deadprops = prop;
+ found = 1;
+ break;
+ }
+ next = &(*next)->next;
}
- if (i == DEVICE_COUNT_RESOURCE)
- return NULL;
- return &pdev->resource[i];
+ write_unlock(&devtree_lock);
+
+ if (!found)
+ return -ENODEV;
+
+#ifdef CONFIG_PROC_DEVICETREE
+ /* try to remove the proc node as well */
+ if (np->pde)
+ proc_device_tree_remove_prop(np->pde, prop);
+#endif /* CONFIG_PROC_DEVICETREE */
+
+ return 0;
}
/*
- * Request an OF device resource. Currently handles child of PCI devices,
- * or other nodes attached to the root node. Ultimately, put some
- * link to resources in the OF node.
+ * Update a property in a node. Note that we don't actually
+ * remove it, since we have given out who-knows-how-many pointers
+ * to the data using get-property. Instead we just move the property
+ * to the "dead properties" list, and add the new property to the
+ * property list
*/
-struct resource *request_OF_resource(struct device_node* node, int index,
- const char* name_postfix)
+int prom_update_property(struct device_node *np,
+ struct property *newprop,
+ struct property *oldprop)
{
- struct pci_dev* pcidev;
- u8 pci_bus, pci_devfn;
- unsigned long iomask;
- struct device_node* nd;
- struct resource* parent;
- struct resource *res = NULL;
- int nlen, plen;
-
- if (index >= node->n_addrs)
- goto fail;
-
- /* Sanity check on bus space */
- iomask = bus_space_to_resource_flags(node->addrs[index].space);
- if (iomask & IORESOURCE_MEM)
- parent = &iomem_resource;
- else if (iomask & IORESOURCE_IO)
- parent = &ioport_resource;
- else
- goto fail;
-
- /* Find a PCI parent if any */
- nd = node;
- pcidev = NULL;
- while (nd) {
- if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
- pcidev = pci_find_slot(pci_bus, pci_devfn);
- if (pcidev) break;
- nd = nd->parent;
- }
- if (pcidev)
- parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
- if (!parent) {
- printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
- node->name);
- goto fail;
- }
-
- res = __request_region(parent, node->addrs[index].address,
- node->addrs[index].size, NULL);
- if (!res)
- goto fail;
- nlen = strlen(node->name);
- plen = name_postfix ? strlen(name_postfix) : 0;
- res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
- if (res->name) {
- strcpy((char *)res->name, node->name);
- if (plen)
- strcpy((char *)res->name+nlen, name_postfix);
- }
- return res;
-fail:
- return NULL;
-}
-EXPORT_SYMBOL(request_OF_resource);
+ struct property **next;
+ int found = 0;
-int release_OF_resource(struct device_node *node, int index)
-{
- struct pci_dev* pcidev;
- u8 pci_bus, pci_devfn;
- unsigned long iomask, start, end;
- struct device_node* nd;
- struct resource* parent;
- struct resource *res = NULL;
-
- if (index >= node->n_addrs)
- return -EINVAL;
-
- /* Sanity check on bus space */
- iomask = bus_space_to_resource_flags(node->addrs[index].space);
- if (iomask & IORESOURCE_MEM)
- parent = &iomem_resource;
- else if (iomask & IORESOURCE_IO)
- parent = &ioport_resource;
- else
- return -EINVAL;
-
- /* Find a PCI parent if any */
- nd = node;
- pcidev = NULL;
- while(nd) {
- if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
- pcidev = pci_find_slot(pci_bus, pci_devfn);
- if (pcidev) break;
- nd = nd->parent;
- }
- if (pcidev)
- parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
- if (!parent) {
- printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
- node->name);
- return -ENODEV;
+ write_lock(&devtree_lock);
+ next = &np->properties;
+ while (*next) {
+ if (*next == oldprop) {
+ /* found the node */
+ newprop->next = oldprop->next;
+ *next = newprop;
+ oldprop->next = np->deadprops;
+ np->deadprops = oldprop;
+ found = 1;
+ break;
+ }
+ next = &(*next)->next;
}
+ write_unlock(&devtree_lock);
- /* Find us in the parent and its childs */
- res = parent->child;
- start = node->addrs[index].address;
- end = start + node->addrs[index].size - 1;
- while (res) {
- if (res->start == start && res->end == end &&
- (res->flags & IORESOURCE_BUSY))
- break;
- if (res->start <= start && res->end >= end)
- res = res->child;
- else
- res = res->sibling;
- }
- if (!res)
+ if (!found)
return -ENODEV;
- if (res->name) {
- kfree(res->name);
- res->name = NULL;
- }
- release_resource(res);
- kfree(res);
+#ifdef CONFIG_PROC_DEVICETREE
+ /* try to add to proc as well if it was initialized */
+ if (np->pde)
+ proc_device_tree_update_prop(np->pde, newprop, oldprop);
+#endif /* CONFIG_PROC_DEVICETREE */
return 0;
}
-EXPORT_SYMBOL(release_OF_resource);
-#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index bcdc209dca8..d963a12ec64 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -137,8 +137,8 @@ struct prom_t {
};
struct mem_map_entry {
- unsigned long base;
- unsigned long size;
+ u64 base;
+ u64 size;
};
typedef u32 cell_t;
@@ -192,6 +192,11 @@ static unsigned long __initdata alloc_bottom;
static unsigned long __initdata rmo_top;
static unsigned long __initdata ram_top;
+#ifdef CONFIG_KEXEC
+static unsigned long __initdata prom_crashk_base;
+static unsigned long __initdata prom_crashk_size;
+#endif
+
static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
static int __initdata mem_reserve_cnt;
@@ -553,7 +558,8 @@ unsigned long prom_memparse(const char *ptr, const char **retptr)
static void __init early_cmdline_parse(void)
{
struct prom_t *_prom = &RELOC(prom);
- char *opt, *p;
+ const char *opt;
+ char *p;
int l = 0;
RELOC(prom_cmd_line[0]) = 0;
@@ -590,6 +596,34 @@ static void __init early_cmdline_parse(void)
RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
#endif
}
+
+#ifdef CONFIG_KEXEC
+ /*
+ * crashkernel=size@addr specifies the location to reserve for
+ * crash kernel.
+ */
+ opt = strstr(RELOC(prom_cmd_line), RELOC("crashkernel="));
+ if (opt) {
+ opt += 12;
+ RELOC(prom_crashk_size) = prom_memparse(opt, &opt);
+
+ if (ALIGN(RELOC(prom_crashk_size), 0x1000000) !=
+ RELOC(prom_crashk_size)) {
+ prom_printf("Warning: crashkernel size is not "
+ "aligned to 16MB\n");
+ }
+
+ /*
+ * At present, the crash kernel always run at 32MB.
+ * Just ignore whatever user passed.
+ */
+ RELOC(prom_crashk_base) = 0x2000000;
+ if (*opt == '@') {
+ prom_printf("Warning: PPC64 kdump kernel always runs "
+ "at 32 MB\n");
+ }
+ }
+#endif
}
#ifdef CONFIG_PPC_PSERIES
@@ -863,9 +897,9 @@ static unsigned long __init prom_next_cell(int s, cell_t **cellp)
* If problems seem to show up, it would be a good start to track
* them down.
*/
-static void reserve_mem(unsigned long base, unsigned long size)
+static void reserve_mem(u64 base, u64 size)
{
- unsigned long top = base + size;
+ u64 top = base + size;
unsigned long cnt = RELOC(mem_reserve_cnt);
if (size == 0)
@@ -1011,6 +1045,12 @@ static void __init prom_init_mem(void)
prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
prom_printf(" ram_top : %x\n", RELOC(ram_top));
+#ifdef CONFIG_KEXEC
+ if (RELOC(prom_crashk_base)) {
+ prom_printf(" crashk_base : %x\n", RELOC(prom_crashk_base));
+ prom_printf(" crashk_size : %x\n", RELOC(prom_crashk_size));
+ }
+#endif
}
@@ -1500,6 +1540,8 @@ static int __init prom_find_machine_type(void)
#ifdef CONFIG_PPC64
if (strstr(p, RELOC("Momentum,Maple")))
return PLATFORM_MAPLE;
+ if (strstr(p, RELOC("IBM,CPB")))
+ return PLATFORM_CELL;
#endif
i += sl + 1;
}
@@ -1994,7 +2036,7 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
if (r3 && r4 && r4 != 0xdeadbeef) {
unsigned long val;
- RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
+ RELOC(prom_initrd_start) = is_kernel_addr(r3) ? __pa(r3) : r3;
RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
val = RELOC(prom_initrd_start);
@@ -2094,6 +2136,10 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
prom_init_mem();
+#ifdef CONFIG_KEXEC
+ if (RELOC(prom_crashk_base))
+ reserve_mem(RELOC(prom_crashk_base), RELOC(prom_crashk_size));
+#endif
/*
* Determine which cpu is actually running right _now_
*/
@@ -2150,6 +2196,16 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
}
#endif
+#ifdef CONFIG_KEXEC
+ if (RELOC(prom_crashk_base)) {
+ prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-base",
+ PTRRELOC(&prom_crashk_base),
+ sizeof(RELOC(prom_crashk_base)));
+ prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-size",
+ PTRRELOC(&prom_crashk_size),
+ sizeof(RELOC(prom_crashk_size)));
+ }
+#endif
/*
* Fixup any known bugs in the device-tree
*/
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
new file mode 100644
index 00000000000..a8099c80615
--- /dev/null
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -0,0 +1,548 @@
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/pci_regs.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+
+#ifdef DEBUG
+#define DBG(fmt...) do { printk(fmt); } while(0)
+#else
+#define DBG(fmt...) do { } while(0)
+#endif
+
+#ifdef CONFIG_PPC64
+#define PRu64 "%lx"
+#else
+#define PRu64 "%llx"
+#endif
+
+/* Max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
+ (ns) > 0)
+
+/* Debug utility */
+#ifdef DEBUG
+static void of_dump_addr(const char *s, u32 *addr, int na)
+{
+ printk("%s", s);
+ while(na--)
+ printk(" %08x", *(addr++));
+ printk("\n");
+}
+#else
+static void of_dump_addr(const char *s, u32 *addr, int na) { }
+#endif
+
+/* Read a big address */
+static inline u64 of_read_addr(u32 *cell, int size)
+{
+ u64 r = 0;
+ while (size--)
+ r = (r << 32) | *(cell++);
+ return r;
+}
+
+/* Callbacks for bus specific translators */
+struct of_bus {
+ const char *name;
+ const char *addresses;
+ int (*match)(struct device_node *parent);
+ void (*count_cells)(struct device_node *child,
+ int *addrc, int *sizec);
+ u64 (*map)(u32 *addr, u32 *range, int na, int ns, int pna);
+ int (*translate)(u32 *addr, u64 offset, int na);
+ unsigned int (*get_flags)(u32 *addr);
+};
+
+
+/*
+ * Default translator (generic bus)
+ */
+
+static void of_bus_default_count_cells(struct device_node *dev,
+ int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = prom_n_addr_cells(dev);
+ if (sizec)
+ *sizec = prom_n_size_cells(dev);
+}
+
+static u64 of_bus_default_map(u32 *addr, u32 *range, int na, int ns, int pna)
+{
+ u64 cp, s, da;
+
+ cp = of_read_addr(range, na);
+ s = of_read_addr(range + na + pna, ns);
+ da = of_read_addr(addr, na);
+
+ DBG("OF: default map, cp="PRu64", s="PRu64", da="PRu64"\n",
+ cp, s, da);
+
+ if (da < cp || da >= (cp + s))
+ return OF_BAD_ADDR;
+ return da - cp;
+}
+
+static int of_bus_default_translate(u32 *addr, u64 offset, int na)
+{
+ u64 a = of_read_addr(addr, na);
+ memset(addr, 0, na * 4);
+ a += offset;
+ if (na > 1)
+ addr[na - 2] = a >> 32;
+ addr[na - 1] = a & 0xffffffffu;
+
+ return 0;
+}
+
+static unsigned int of_bus_default_get_flags(u32 *addr)
+{
+ return IORESOURCE_MEM;
+}
+
+
+/*
+ * PCI bus specific translator
+ */
+
+static int of_bus_pci_match(struct device_node *np)
+{
+ /* "vci" is for the /chaos bridge on 1st-gen PCI powermacs */
+ return !strcmp(np->type, "pci") || !strcmp(np->type, "vci");
+}
+
+static void of_bus_pci_count_cells(struct device_node *np,
+ int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = 3;
+ if (sizec)
+ *sizec = 2;
+}
+
+static u64 of_bus_pci_map(u32 *addr, u32 *range, int na, int ns, int pna)
+{
+ u64 cp, s, da;
+
+ /* Check address type match */
+ if ((addr[0] ^ range[0]) & 0x03000000)
+ return OF_BAD_ADDR;
+
+ /* Read address values, skipping high cell */
+ cp = of_read_addr(range + 1, na - 1);
+ s = of_read_addr(range + na + pna, ns);
+ da = of_read_addr(addr + 1, na - 1);
+
+ DBG("OF: PCI map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da);
+
+ if (da < cp || da >= (cp + s))
+ return OF_BAD_ADDR;
+ return da - cp;
+}
+
+static int of_bus_pci_translate(u32 *addr, u64 offset, int na)
+{
+ return of_bus_default_translate(addr + 1, offset, na - 1);
+}
+
+static unsigned int of_bus_pci_get_flags(u32 *addr)
+{
+ unsigned int flags = 0;
+ u32 w = addr[0];
+
+ switch((w >> 24) & 0x03) {
+ case 0x01:
+ flags |= IORESOURCE_IO;
+ case 0x02: /* 32 bits */
+ case 0x03: /* 64 bits */
+ flags |= IORESOURCE_MEM;
+ }
+ if (w & 0x40000000)
+ flags |= IORESOURCE_PREFETCH;
+ return flags;
+}
+
+/*
+ * ISA bus specific translator
+ */
+
+static int of_bus_isa_match(struct device_node *np)
+{
+ return !strcmp(np->name, "isa");
+}
+
+static void of_bus_isa_count_cells(struct device_node *child,
+ int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = 2;
+ if (sizec)
+ *sizec = 1;
+}
+
+static u64 of_bus_isa_map(u32 *addr, u32 *range, int na, int ns, int pna)
+{
+ u64 cp, s, da;
+
+ /* Check address type match */
+ if ((addr[0] ^ range[0]) & 0x00000001)
+ return OF_BAD_ADDR;
+
+ /* Read address values, skipping high cell */
+ cp = of_read_addr(range + 1, na - 1);
+ s = of_read_addr(range + na + pna, ns);
+ da = of_read_addr(addr + 1, na - 1);
+
+ DBG("OF: ISA map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da);
+
+ if (da < cp || da >= (cp + s))
+ return OF_BAD_ADDR;
+ return da - cp;
+}
+
+static int of_bus_isa_translate(u32 *addr, u64 offset, int na)
+{
+ return of_bus_default_translate(addr + 1, offset, na - 1);
+}
+
+static unsigned int of_bus_isa_get_flags(u32 *addr)
+{
+ unsigned int flags = 0;
+ u32 w = addr[0];
+
+ if (w & 1)
+ flags |= IORESOURCE_IO;
+ else
+ flags |= IORESOURCE_MEM;
+ return flags;
+}
+
+
+/*
+ * Array of bus specific translators
+ */
+
+static struct of_bus of_busses[] = {
+ /* PCI */
+ {
+ .name = "pci",
+ .addresses = "assigned-addresses",
+ .match = of_bus_pci_match,
+ .count_cells = of_bus_pci_count_cells,
+ .map = of_bus_pci_map,
+ .translate = of_bus_pci_translate,
+ .get_flags = of_bus_pci_get_flags,
+ },
+ /* ISA */
+ {
+ .name = "isa",
+ .addresses = "reg",
+ .match = of_bus_isa_match,
+ .count_cells = of_bus_isa_count_cells,
+ .map = of_bus_isa_map,
+ .translate = of_bus_isa_translate,
+ .get_flags = of_bus_isa_get_flags,
+ },
+ /* Default */
+ {
+ .name = "default",
+ .addresses = "reg",
+ .match = NULL,
+ .count_cells = of_bus_default_count_cells,
+ .map = of_bus_default_map,
+ .translate = of_bus_default_translate,
+ .get_flags = of_bus_default_get_flags,
+ },
+};
+
+static struct of_bus *of_match_bus(struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(of_busses); i ++)
+ if (!of_busses[i].match || of_busses[i].match(np))
+ return &of_busses[i];
+ BUG();
+ return NULL;
+}
+
+static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ struct of_bus *pbus, u32 *addr,
+ int na, int ns, int pna)
+{
+ u32 *ranges;
+ unsigned int rlen;
+ int rone;
+ u64 offset = OF_BAD_ADDR;
+
+ /* Normally, an absence of a "ranges" property means we are
+ * crossing a non-translatable boundary, and thus the addresses
+ * below the current not cannot be converted to CPU physical ones.
+ * Unfortunately, while this is very clear in the spec, it's not
+ * what Apple understood, and they do have things like /uni-n or
+ * /ht nodes with no "ranges" property and a lot of perfectly
+ * useable mapped devices below them. Thus we treat the absence of
+ * "ranges" as equivalent to an empty "ranges" property which means
+ * a 1:1 translation at that level. It's up to the caller not to try
+ * to translate addresses that aren't supposed to be translated in
+ * the first place. --BenH.
+ */
+ ranges = (u32 *)get_property(parent, "ranges", &rlen);
+ if (ranges == NULL || rlen == 0) {
+ offset = of_read_addr(addr, na);
+ memset(addr, 0, pna * 4);
+ DBG("OF: no ranges, 1:1 translation\n");
+ goto finish;
+ }
+
+ DBG("OF: walking ranges...\n");
+
+ /* Now walk through the ranges */
+ rlen /= 4;
+ rone = na + pna + ns;
+ for (; rlen >= rone; rlen -= rone, ranges += rone) {
+ offset = bus->map(addr, ranges, na, ns, pna);
+ if (offset != OF_BAD_ADDR)
+ break;
+ }
+ if (offset == OF_BAD_ADDR) {
+ DBG("OF: not found !\n");
+ return 1;
+ }
+ memcpy(addr, ranges + na, 4 * pna);
+
+ finish:
+ of_dump_addr("OF: parent translation for:", addr, pna);
+ DBG("OF: with offset: "PRu64"\n", offset);
+
+ /* Translate it into parent bus space */
+ return pbus->translate(addr, offset, pna);
+}
+
+
+/*
+ * Translate an address from the device-tree into a CPU physical address,
+ * this walks up the tree and applies the various bus mappings on the
+ * way.
+ *
+ * Note: We consider that crossing any level with #size-cells == 0 to mean
+ * that translation is impossible (that is we are not dealing with a value
+ * that can be mapped to a cpu physical address). This is not really specified
+ * that way, but this is traditionally the way IBM at least do things
+ */
+u64 of_translate_address(struct device_node *dev, u32 *in_addr)
+{
+ struct device_node *parent = NULL;
+ struct of_bus *bus, *pbus;
+ u32 addr[OF_MAX_ADDR_CELLS];
+ int na, ns, pna, pns;
+ u64 result = OF_BAD_ADDR;
+
+ DBG("OF: ** translation for device %s **\n", dev->full_name);
+
+ /* Increase refcount at current level */
+ of_node_get(dev);
+
+ /* Get parent & match bus type */
+ parent = of_get_parent(dev);
+ if (parent == NULL)
+ goto bail;
+ bus = of_match_bus(parent);
+
+ /* Cound address cells & copy address locally */
+ bus->count_cells(dev, &na, &ns);
+ if (!OF_CHECK_COUNTS(na, ns)) {
+ printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
+ dev->full_name);
+ goto bail;
+ }
+ memcpy(addr, in_addr, na * 4);
+
+ DBG("OF: bus is %s (na=%d, ns=%d) on %s\n",
+ bus->name, na, ns, parent->full_name);
+ of_dump_addr("OF: translating address:", addr, na);
+
+ /* Translate */
+ for (;;) {
+ /* Switch to parent bus */
+ of_node_put(dev);
+ dev = parent;
+ parent = of_get_parent(dev);
+
+ /* If root, we have finished */
+ if (parent == NULL) {
+ DBG("OF: reached root node\n");
+ result = of_read_addr(addr, na);
+ break;
+ }
+
+ /* Get new parent bus and counts */
+ pbus = of_match_bus(parent);
+ pbus->count_cells(dev, &pna, &pns);
+ if (!OF_CHECK_COUNTS(pna, pns)) {
+ printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
+ dev->full_name);
+ break;
+ }
+
+ DBG("OF: parent bus is %s (na=%d, ns=%d) on %s\n",
+ pbus->name, pna, pns, parent->full_name);
+
+ /* Apply bus translation */
+ if (of_translate_one(dev, bus, pbus, addr, na, ns, pna))
+ break;
+
+ /* Complete the move up one level */
+ na = pna;
+ ns = pns;
+ bus = pbus;
+
+ of_dump_addr("OF: one level translation:", addr, na);
+ }
+ bail:
+ of_node_put(parent);
+ of_node_put(dev);
+
+ return result;
+}
+EXPORT_SYMBOL(of_translate_address);
+
+u32 *of_get_address(struct device_node *dev, int index, u64 *size,
+ unsigned int *flags)
+{
+ u32 *prop;
+ unsigned int psize;
+ struct device_node *parent;
+ struct of_bus *bus;
+ int onesize, i, na, ns;
+
+ /* Get parent & match bus type */
+ parent = of_get_parent(dev);
+ if (parent == NULL)
+ return NULL;
+ bus = of_match_bus(parent);
+ bus->count_cells(dev, &na, &ns);
+ of_node_put(parent);
+ if (!OF_CHECK_COUNTS(na, ns))
+ return NULL;
+
+ /* Get "reg" or "assigned-addresses" property */
+ prop = (u32 *)get_property(dev, bus->addresses, &psize);
+ if (prop == NULL)
+ return NULL;
+ psize /= 4;
+
+ onesize = na + ns;
+ for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
+ if (i == index) {
+ if (size)
+ *size = of_read_addr(prop + na, ns);
+ if (flags)
+ *flags = bus->get_flags(prop);
+ return prop;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_address);
+
+u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
+ unsigned int *flags)
+{
+ u32 *prop;
+ unsigned int psize;
+ struct device_node *parent;
+ struct of_bus *bus;
+ int onesize, i, na, ns;
+
+ /* Get parent & match bus type */
+ parent = of_get_parent(dev);
+ if (parent == NULL)
+ return NULL;
+ bus = of_match_bus(parent);
+ if (strcmp(bus->name, "pci"))
+ return NULL;
+ bus->count_cells(dev, &na, &ns);
+ of_node_put(parent);
+ if (!OF_CHECK_COUNTS(na, ns))
+ return NULL;
+
+ /* Get "reg" or "assigned-addresses" property */
+ prop = (u32 *)get_property(dev, bus->addresses, &psize);
+ if (prop == NULL)
+ return NULL;
+ psize /= 4;
+
+ onesize = na + ns;
+ for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
+ if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
+ if (size)
+ *size = of_read_addr(prop + na, ns);
+ if (flags)
+ *flags = bus->get_flags(prop);
+ return prop;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_pci_address);
+
+static int __of_address_to_resource(struct device_node *dev, u32 *addrp,
+ u64 size, unsigned int flags,
+ struct resource *r)
+{
+ u64 taddr;
+
+ if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
+ return -EINVAL;
+ taddr = of_translate_address(dev, addrp);
+ if (taddr == OF_BAD_ADDR)
+ return -EINVAL;
+ memset(r, 0, sizeof(struct resource));
+ if (flags & IORESOURCE_IO) {
+ unsigned long port;
+ port = pci_address_to_pio(taddr);
+ if (port == (unsigned long)-1)
+ return -EINVAL;
+ r->start = port;
+ r->end = port + size - 1;
+ } else {
+ r->start = taddr;
+ r->end = taddr + size - 1;
+ }
+ r->flags = flags;
+ r->name = dev->name;
+ return 0;
+}
+
+int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r)
+{
+ u32 *addrp;
+ u64 size;
+ unsigned int flags;
+
+ addrp = of_get_address(dev, index, &size, &flags);
+ if (addrp == NULL)
+ return -EINVAL;
+ return __of_address_to_resource(dev, addrp, size, flags, r);
+}
+EXPORT_SYMBOL_GPL(of_address_to_resource);
+
+int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ u32 *addrp;
+ u64 size;
+ unsigned int flags;
+
+ addrp = of_get_pci_address(dev, bar, &size, &flags);
+ if (addrp == NULL)
+ return -EINVAL;
+ return __of_address_to_resource(dev, addrp, size, flags, r);
+}
+EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
diff --git a/arch/powerpc/kernel/ptrace-common.h b/arch/powerpc/kernel/ptrace-common.h
index b1babb72967..5ccbdbe0d5c 100644
--- a/arch/powerpc/kernel/ptrace-common.h
+++ b/arch/powerpc/kernel/ptrace-common.h
@@ -62,7 +62,7 @@ static inline void set_single_step(struct task_struct *task)
struct pt_regs *regs = task->thread.regs;
if (regs != NULL)
regs->msr |= MSR_SE;
- set_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
static inline void clear_single_step(struct task_struct *task)
@@ -70,7 +70,7 @@ static inline void clear_single_step(struct task_struct *task)
struct pt_regs *regs = task->thread.regs;
if (regs != NULL)
regs->msr &= ~MSR_SE;
- clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
#ifdef CONFIG_ALTIVEC
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 61762640b87..826ee3d056d 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -45,33 +45,19 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr,
unsigned long data)
{
struct task_struct *child;
- int ret = -EPERM;
+ int ret;
lock_kernel();
if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
+ ret = ptrace_traceme();
goto out;
}
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
+ goto out;
+ }
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 4283fa33f78..7fe4a5c944c 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -17,6 +17,7 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/capability.h>
#include <linux/delay.h>
#include <asm/prom.h>
@@ -29,11 +30,17 @@
#include <asm/delay.h>
#include <asm/uaccess.h>
#include <asm/lmb.h>
+#include <asm/udbg.h>
struct rtas_t rtas = {
.lock = SPIN_LOCK_UNLOCKED
};
+struct rtas_suspend_me_data {
+ long waiting;
+ struct rtas_args *args;
+};
+
EXPORT_SYMBOL(rtas);
DEFINE_SPINLOCK(rtas_data_buf_lock);
@@ -52,7 +59,7 @@ EXPORT_SYMBOL(rtas_flash_term_hook);
* are designed only for very early low-level debugging, which
* is why the token is hard-coded to 10.
*/
-void call_rtas_display_status(unsigned char c)
+static void call_rtas_display_status(char c)
{
struct rtas_args *args = &rtas.args;
unsigned long s;
@@ -65,14 +72,14 @@ void call_rtas_display_status(unsigned char c)
args->nargs = 1;
args->nret = 1;
args->rets = (rtas_arg_t *)&(args->args[1]);
- args->args[0] = (int)c;
+ args->args[0] = (unsigned char)c;
enter_rtas(__pa(args));
spin_unlock_irqrestore(&rtas.lock, s);
}
-void call_rtas_display_status_delay(unsigned char c)
+static void call_rtas_display_status_delay(char c)
{
static int pending_newline = 0; /* did last write end with unprinted newline? */
static int width = 16;
@@ -96,6 +103,11 @@ void call_rtas_display_status_delay(unsigned char c)
}
}
+void __init udbg_init_rtas(void)
+{
+ udbg_putc = call_rtas_display_status_delay;
+}
+
void rtas_progress(char *s, unsigned short hex)
{
struct device_node *root;
@@ -549,6 +561,80 @@ void rtas_os_term(char *str)
} while (status == RTAS_BUSY);
}
+static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
+#ifdef CONFIG_PPC_PSERIES
+static void rtas_percpu_suspend_me(void *info)
+{
+ long rc;
+ long flags;
+ struct rtas_suspend_me_data *data =
+ (struct rtas_suspend_me_data *)info;
+
+ /*
+ * We use "waiting" to indicate our state. As long
+ * as it is >0, we are still trying to all join up.
+ * If it goes to 0, we have successfully joined up and
+ * one thread got H_Continue. If any error happens,
+ * we set it to <0.
+ */
+ local_irq_save(flags);
+ do {
+ rc = plpar_hcall_norets(H_JOIN);
+ smp_rmb();
+ } while (rc == H_Success && data->waiting > 0);
+ if (rc == H_Success)
+ goto out;
+
+ if (rc == H_Continue) {
+ data->waiting = 0;
+ rtas_call(ibm_suspend_me_token, 0, 1,
+ data->args->args);
+ } else {
+ data->waiting = -EBUSY;
+ printk(KERN_ERR "Error on H_Join hypervisor call\n");
+ }
+
+out:
+ /* before we restore interrupts, make sure we don't
+ * generate a spurious soft lockup errors
+ */
+ touch_softlockup_watchdog();
+ local_irq_restore(flags);
+ return;
+}
+
+static int rtas_ibm_suspend_me(struct rtas_args *args)
+{
+ int i;
+
+ struct rtas_suspend_me_data data;
+
+ data.waiting = 1;
+ data.args = args;
+
+ /* Call function on all CPUs. One of us will make the
+ * rtas call
+ */
+ if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0))
+ data.waiting = -EINVAL;
+
+ if (data.waiting != 0)
+ printk(KERN_ERR "Error doing global join\n");
+
+ /* Prod each CPU. This won't hurt, and will wake
+ * anyone we successfully put to sleep with H_Join
+ */
+ for_each_cpu(i)
+ plpar_hcall_norets(H_PROD, i);
+
+ return data.waiting;
+}
+#else /* CONFIG_PPC_PSERIES */
+static int rtas_ibm_suspend_me(struct rtas_args *args)
+{
+ return -ENOSYS;
+}
+#endif
asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
{
@@ -556,6 +642,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
unsigned long flags;
char *buff_copy, *errbuf = NULL;
int nargs;
+ int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -574,6 +661,17 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
nargs * sizeof(rtas_arg_t)) != 0)
return -EFAULT;
+ if (args.token == RTAS_UNKNOWN_SERVICE)
+ return -EINVAL;
+
+ /* Need to handle ibm,suspend_me call specially */
+ if (args.token == ibm_suspend_me_token) {
+ rc = rtas_ibm_suspend_me(&args);
+ if (rc)
+ return rc;
+ goto copy_return;
+ }
+
buff_copy = get_errorlog_buffer();
spin_lock_irqsave(&rtas.lock, flags);
@@ -597,6 +695,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
kfree(buff_copy);
}
+ copy_return:
/* Copy out args. */
if (copy_to_user(uargs->args + nargs,
args.args + nargs,
@@ -632,7 +731,7 @@ void rtas_stop_self(void)
}
/*
- * Call early during boot, before mem init or bootmem, to retreive the RTAS
+ * Call early during boot, before mem init or bootmem, to retrieve the RTAS
* informations from the device-tree and allocate the RMO buffer for userland
* accesses.
*/
@@ -668,8 +767,10 @@ void __init rtas_initialize(void)
* the stop-self token if any
*/
#ifdef CONFIG_PPC64
- if (_machine == PLATFORM_PSERIES_LPAR)
+ if (_machine == PLATFORM_PSERIES_LPAR) {
rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
+ ibm_suspend_me_token = rtas_token("ibm,suspend-me");
+ }
#endif
rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 60dec2401c2..5579f655991 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -72,7 +72,7 @@ static int of_device_available(struct device_node * dn)
return 0;
}
-static int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
+int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
{
int returnval = -1;
unsigned long buid, addr;
@@ -188,39 +188,19 @@ int is_python(struct device_node *dev)
return 0;
}
-static int get_phb_reg_prop(struct device_node *dev,
- unsigned int addr_size_words,
- struct reg_property64 *reg)
+static void python_countermeasures(struct device_node *dev)
{
- unsigned int *ui_ptr = NULL, len;
-
- /* Found a PHB, now figure out where his registers are mapped. */
- ui_ptr = (unsigned int *)get_property(dev, "reg", &len);
- if (ui_ptr == NULL)
- return 1;
-
- if (addr_size_words == 1) {
- reg->address = ((struct reg_property32 *)ui_ptr)->address;
- reg->size = ((struct reg_property32 *)ui_ptr)->size;
- } else {
- *reg = *((struct reg_property64 *)ui_ptr);
- }
-
- return 0;
-}
-
-static void python_countermeasures(struct device_node *dev,
- unsigned int addr_size_words)
-{
- struct reg_property64 reg_struct;
+ struct resource registers;
void __iomem *chip_regs;
volatile u32 val;
- if (get_phb_reg_prop(dev, addr_size_words, &reg_struct))
+ if (of_address_to_resource(dev, 0, &registers)) {
+ printk(KERN_ERR "Can't get address for Python workarounds !\n");
return;
+ }
/* Python's register file is 1 MB in size. */
- chip_regs = ioremap(reg_struct.address & ~(0xfffffUL), 0x100000);
+ chip_regs = ioremap(registers.start & ~(0xfffffUL), 0x100000);
/*
* Firmware doesn't always clear this bit which is critical
@@ -301,11 +281,10 @@ static int phb_set_bus_ranges(struct device_node *dev,
}
static int __devinit setup_phb(struct device_node *dev,
- struct pci_controller *phb,
- unsigned int addr_size_words)
+ struct pci_controller *phb)
{
if (is_python(dev))
- python_countermeasures(dev, addr_size_words);
+ python_countermeasures(dev);
if (phb_set_bus_ranges(dev, phb))
return 1;
@@ -320,8 +299,8 @@ unsigned long __init find_and_init_phbs(void)
{
struct device_node *node;
struct pci_controller *phb;
- unsigned int root_size_cells = 0;
unsigned int index;
+ unsigned int root_size_cells = 0;
unsigned int *opprop = NULL;
struct device_node *root = of_find_node_by_path("/");
@@ -343,10 +322,11 @@ unsigned long __init find_and_init_phbs(void)
phb = pcibios_alloc_controller(node);
if (!phb)
continue;
- setup_phb(node, phb, root_size_cells);
+ setup_phb(node, phb);
pci_process_bridge_OF_ranges(phb, node, 0);
pci_setup_phb_io(phb, index == 0);
#ifdef CONFIG_PPC_PSERIES
+ /* XXX This code need serious fixing ... --BenH */
if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
int addr = root_size_cells * (index + 2) - 1;
mpic_assign_isu(pSeries_mpic, index, opprop[addr]);
@@ -381,22 +361,17 @@ unsigned long __init find_and_init_phbs(void)
struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
{
- struct device_node *root = of_find_node_by_path("/");
- unsigned int root_size_cells = 0;
struct pci_controller *phb;
int primary;
- root_size_cells = prom_n_size_cells(root);
-
primary = list_empty(&hose_list);
phb = pcibios_alloc_controller(dn);
if (!phb)
return NULL;
- setup_phb(dn, phb, root_size_cells);
+ setup_phb(dn, phb);
pci_process_bridge_OF_ranges(phb, dn, primary);
pci_setup_phb_io_dynamic(phb, primary);
- of_node_put(root);
pci_devs_phb_init_dynamic(phb);
scan_phb(phb);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index bd3eb4292b5..be12041c0fc 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -93,14 +93,15 @@ EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
/* also used by kexec */
void machine_shutdown(void)
{
- if (ppc_md.nvram_sync)
- ppc_md.nvram_sync();
+ if (ppc_md.machine_shutdown)
+ ppc_md.machine_shutdown();
}
void machine_restart(char *cmd)
{
machine_shutdown();
- ppc_md.restart(cmd);
+ if (ppc_md.restart)
+ ppc_md.restart(cmd);
#ifdef CONFIG_SMP
smp_send_stop();
#endif
@@ -112,7 +113,8 @@ void machine_restart(char *cmd)
void machine_power_off(void)
{
machine_shutdown();
- ppc_md.power_off();
+ if (ppc_md.power_off)
+ ppc_md.power_off();
#ifdef CONFIG_SMP
smp_send_stop();
#endif
@@ -129,7 +131,8 @@ EXPORT_SYMBOL_GPL(pm_power_off);
void machine_halt(void)
{
machine_shutdown();
- ppc_md.halt();
+ if (ppc_md.halt)
+ ppc_md.halt();
#ifdef CONFIG_SMP
smp_send_stop();
#endif
@@ -294,129 +297,6 @@ struct seq_operations cpuinfo_op = {
.show = show_cpuinfo,
};
-#ifdef CONFIG_PPC_MULTIPLATFORM
-static int __init set_preferred_console(void)
-{
- struct device_node *prom_stdout = NULL;
- char *name;
- u32 *spd;
- int offset = 0;
-
- DBG(" -> set_preferred_console()\n");
-
- /* The user has requested a console so this is already set up. */
- if (strstr(saved_command_line, "console=")) {
- DBG(" console was specified !\n");
- return -EBUSY;
- }
-
- if (!of_chosen) {
- DBG(" of_chosen is NULL !\n");
- return -ENODEV;
- }
- /* We are getting a weird phandle from OF ... */
- /* ... So use the full path instead */
- name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
- if (name == NULL) {
- DBG(" no linux,stdout-path !\n");
- return -ENODEV;
- }
- prom_stdout = of_find_node_by_path(name);
- if (!prom_stdout) {
- DBG(" can't find stdout package %s !\n", name);
- return -ENODEV;
- }
- DBG("stdout is %s\n", prom_stdout->full_name);
-
- name = (char *)get_property(prom_stdout, "name", NULL);
- if (!name) {
- DBG(" stdout package has no name !\n");
- goto not_found;
- }
- spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
-
- if (0)
- ;
-#ifdef CONFIG_SERIAL_8250_CONSOLE
- else if (strcmp(name, "serial") == 0) {
- int i;
- u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
- if (i > 8) {
- switch (reg[1]) {
- case 0x3f8:
- offset = 0;
- break;
- case 0x2f8:
- offset = 1;
- break;
- case 0x898:
- offset = 2;
- break;
- case 0x890:
- offset = 3;
- break;
- default:
- /* We dont recognise the serial port */
- goto not_found;
- }
- }
- }
-#endif /* CONFIG_SERIAL_8250_CONSOLE */
-#ifdef CONFIG_PPC_PSERIES
- else if (strcmp(name, "vty") == 0) {
- u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL);
- char *compat = (char *)get_property(prom_stdout, "compatible", NULL);
-
- if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) {
- /* Host Virtual Serial Interface */
- switch (reg[0]) {
- case 0x30000000:
- offset = 0;
- break;
- case 0x30000001:
- offset = 1;
- break;
- default:
- goto not_found;
- }
- of_node_put(prom_stdout);
- DBG("Found hvsi console at offset %d\n", offset);
- return add_preferred_console("hvsi", offset, NULL);
- } else {
- /* pSeries LPAR virtual console */
- of_node_put(prom_stdout);
- DBG("Found hvc console\n");
- return add_preferred_console("hvc", 0, NULL);
- }
- }
-#endif /* CONFIG_PPC_PSERIES */
-#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
- else if (strcmp(name, "ch-a") == 0)
- offset = 0;
- else if (strcmp(name, "ch-b") == 0)
- offset = 1;
-#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
- else
- goto not_found;
- of_node_put(prom_stdout);
-
- DBG("Found serial console at ttyS%d\n", offset);
-
- if (spd) {
- static char __initdata opt[16];
- sprintf(opt, "%d", *spd);
- return add_preferred_console("ttyS", offset, opt);
- } else
- return add_preferred_console("ttyS", offset, NULL);
-
- not_found:
- DBG("No preferred console found !\n");
- of_node_put(prom_stdout);
- return -ENODEV;
-}
-console_initcall(set_preferred_console);
-#endif /* CONFIG_PPC_MULTIPLATFORM */
-
void __init check_for_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
@@ -442,7 +322,7 @@ void __init check_for_initrd(void)
/* If we were passed an initrd, set the ROOT_DEV properly if the values
* look sensible. If not, clear initrd reference.
*/
- if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
+ if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
initrd_end > initrd_start)
ROOT_DEV = Root_RAM0;
else
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index e5694335bf1..db72a92943b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -39,6 +39,8 @@
#include <asm/nvram.h>
#include <asm/xmon.h>
#include <asm/time.h>
+#include <asm/serial.h>
+#include <asm/udbg.h>
#include "setup.h"
@@ -172,12 +174,23 @@ void __init platform_init(void)
*/
void __init machine_init(unsigned long dt_ptr, unsigned long phys)
{
+ /* If btext is enabled, we might have a BAT setup for early display,
+ * thus we do enable some very basic udbg output
+ */
+#ifdef CONFIG_BOOTX_TEXT
+ udbg_putc = btext_drawchar;
+#endif
+
+ /* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
+ /* Check default command line */
#ifdef CONFIG_CMDLINE
- strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
+ if (cmd_line[0] == 0)
+ strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
#endif /* CONFIG_CMDLINE */
+ /* Base init based on machine type */
platform_init();
#ifdef CONFIG_6xx
@@ -282,25 +295,20 @@ void __init setup_arch(char **cmdline_p)
unflatten_device_tree();
check_for_initrd();
- finish_device_tree();
- smp_setup_cpu_maps();
+ if (ppc_md.init_early)
+ ppc_md.init_early();
-#ifdef CONFIG_BOOTX_TEXT
- init_boot_display();
-#endif
+ find_legacy_serial_ports();
+ finish_device_tree();
-#ifdef CONFIG_PPC_PMAC
- /* This could be called "early setup arch", it must be done
- * now because xmon need it
- */
- if (_machine == _MACH_Pmac)
- pmac_feature_init(); /* New cool way */
-#endif
+ smp_setup_cpu_maps();
#ifdef CONFIG_XMON_DEFAULT
xmon_init(1);
#endif
+ /* Register early console */
+ register_early_udbg_console();
#if defined(CONFIG_KGDB)
if (ppc_md.kgdb_map_scc)
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e3fb78397dc..e29b275e09e 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -33,7 +33,9 @@
#include <linux/unistd.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
+#include <linux/bootmem.h>
#include <asm/io.h>
+#include <asm/kdump.h>
#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
@@ -69,33 +71,6 @@
#define DBG(fmt...)
#endif
-/*
- * Here are some early debugging facilities. You can enable one
- * but your kernel will not boot on anything else if you do so
- */
-
-/* This one is for use on LPAR machines that support an HVC console
- * on vterm 0
- */
-extern void udbg_init_debug_lpar(void);
-/* This one is for use on Apple G5 machines
- */
-extern void udbg_init_pmac_realmode(void);
-/* That's RTAS panel debug */
-extern void call_rtas_display_status_delay(unsigned char c);
-/* Here's maple real mode debug */
-extern void udbg_init_maple_realmode(void);
-
-#define EARLY_DEBUG_INIT() do {} while(0)
-
-#if 0
-#define EARLY_DEBUG_INIT() udbg_init_debug_lpar()
-#define EARLY_DEBUG_INIT() udbg_init_maple_realmode()
-#define EARLY_DEBUG_INIT() udbg_init_pmac_realmode()
-#define EARLY_DEBUG_INIT() \
- do { udbg_putc = call_rtas_display_status_delay; } while(0)
-#endif
-
int have_of = 1;
int boot_cpuid = 0;
int boot_cpuid_phys = 0;
@@ -236,11 +211,8 @@ void __init early_setup(unsigned long dt_ptr)
struct paca_struct *lpaca = get_paca();
static struct machdep_calls **mach;
- /*
- * Enable early debugging if any specified (see top of
- * this file)
- */
- EARLY_DEBUG_INIT();
+ /* Enable early debugging if any specified (see udbg.h) */
+ udbg_early_init();
DBG(" -> early_setup()\n");
@@ -268,6 +240,10 @@ void __init early_setup(unsigned long dt_ptr)
}
ppc_md = **mach;
+#ifdef CONFIG_CRASH_DUMP
+ kdump_setup();
+#endif
+
DBG("Found, Initializing memory management...\n");
/*
@@ -317,6 +293,7 @@ void early_setup_secondary(void)
void smp_release_cpus(void)
{
extern unsigned long __secondary_hold_spinloop;
+ unsigned long *ptr;
DBG(" -> smp_release_cpus()\n");
@@ -327,7 +304,9 @@ void smp_release_cpus(void)
* This is useless but harmless on iSeries, secondaries are already
* waiting on their paca spinloops. */
- __secondary_hold_spinloop = 1;
+ ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
+ - PHYSICAL_START);
+ *ptr = 1;
mb();
DBG(" <- smp_release_cpus()\n");
@@ -430,7 +409,7 @@ void __init setup_system(void)
/*
* Fill the ppc64_caches & systemcfg structures with informations
- * retreived from the device-tree. Need to be called before
+ * retrieved from the device-tree. Need to be called before
* finish_device_tree() since the later requires some of the
* informations filled up here to properly parse the interrupt
* tree.
@@ -459,16 +438,19 @@ void __init setup_system(void)
*/
ppc_md.init_early();
+ /*
+ * We can discover serial ports now since the above did setup the
+ * hash table management for us, thus ioremap works. We do that early
+ * so that further code can be debugged
+ */
+ find_legacy_serial_ports();
+
/*
* "Finish" the device-tree, that is do the actual parsing of
* some of the properties like the interrupt map
*/
finish_device_tree();
-#ifdef CONFIG_BOOTX_TEXT
- init_boot_display();
-#endif
-
/*
* Initialize xmon
*/
@@ -507,6 +489,9 @@ void __init setup_system(void)
ppc64_caches.iline_size);
printk("htab_address = 0x%p\n", htab_address);
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
+#if PHYSICAL_START > 0
+ printk("physical_start = 0x%x\n", PHYSICAL_START);
+#endif
printk("-----------------------------------------------------\n");
mm_init_ppc64();
@@ -657,187 +642,6 @@ void ppc64_terminate_msg(unsigned int src, const char *msg)
printk("[terminate]%04x %s\n", src, msg);
}
-#ifndef CONFIG_PPC_ISERIES
-/*
- * This function can be used by platforms to "find" legacy serial ports.
- * It works for "serial" nodes under an "isa" node, and will try to
- * respect the "ibm,aix-loc" property if any. It works with up to 8
- * ports.
- */
-
-#define MAX_LEGACY_SERIAL_PORTS 8
-static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
-static unsigned int old_serial_count;
-
-void __init generic_find_legacy_serial_ports(u64 *physport,
- unsigned int *default_speed)
-{
- struct device_node *np;
- u32 *sizeprop;
-
- struct isa_reg_property {
- u32 space;
- u32 address;
- u32 size;
- };
- struct pci_reg_property {
- struct pci_address addr;
- u32 size_hi;
- u32 size_lo;
- };
-
- DBG(" -> generic_find_legacy_serial_port()\n");
-
- *physport = 0;
- if (default_speed)
- *default_speed = 0;
-
- np = of_find_node_by_path("/");
- if (!np)
- return;
-
- /* First fill our array */
- for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
- struct device_node *isa, *pci;
- struct isa_reg_property *reg;
- unsigned long phys_size, addr_size, io_base;
- u32 *rangesp;
- u32 *interrupts, *clk, *spd;
- char *typep;
- int index, rlen, rentsize;
-
- /* Ok, first check if it's under an "isa" parent */
- isa = of_get_parent(np);
- if (!isa || strcmp(isa->name, "isa")) {
- DBG("%s: no isa parent found\n", np->full_name);
- continue;
- }
-
- /* Now look for an "ibm,aix-loc" property that gives us ordering
- * if any...
- */
- typep = (char *)get_property(np, "ibm,aix-loc", NULL);
-
- /* Get the ISA port number */
- reg = (struct isa_reg_property *)get_property(np, "reg", NULL);
- if (reg == NULL)
- goto next_port;
- /* We assume the interrupt number isn't translated ... */
- interrupts = (u32 *)get_property(np, "interrupts", NULL);
- /* get clock freq. if present */
- clk = (u32 *)get_property(np, "clock-frequency", NULL);
- /* get default speed if present */
- spd = (u32 *)get_property(np, "current-speed", NULL);
- /* Default to locate at end of array */
- index = old_serial_count; /* end of the array by default */
-
- /* If we have a location index, then use it */
- if (typep && *typep == 'S') {
- index = simple_strtol(typep+1, NULL, 0) - 1;
- /* if index is out of range, use end of array instead */
- if (index >= MAX_LEGACY_SERIAL_PORTS)
- index = old_serial_count;
- /* if our index is still out of range, that mean that
- * array is full, we could scan for a free slot but that
- * make little sense to bother, just skip the port
- */
- if (index >= MAX_LEGACY_SERIAL_PORTS)
- goto next_port;
- if (index >= old_serial_count)
- old_serial_count = index + 1;
- /* Check if there is a port who already claimed our slot */
- if (serial_ports[index].iobase != 0) {
- /* if we still have some room, move it, else override */
- if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) {
- DBG("Moved legacy port %d -> %d\n", index,
- old_serial_count);
- serial_ports[old_serial_count++] =
- serial_ports[index];
- } else {
- DBG("Replacing legacy port %d\n", index);
- }
- }
- }
- if (index >= MAX_LEGACY_SERIAL_PORTS)
- goto next_port;
- if (index >= old_serial_count)
- old_serial_count = index + 1;
-
- /* Now fill the entry */
- memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port));
- serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16;
- serial_ports[index].iobase = reg->address;
- serial_ports[index].irq = interrupts ? interrupts[0] : 0;
- serial_ports[index].flags = ASYNC_BOOT_AUTOCONF;
-
- DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n",
- index,
- serial_ports[index].iobase,
- serial_ports[index].irq,
- serial_ports[index].uartclk);
-
- /* Get phys address of IO reg for port 1 */
- if (index != 0)
- goto next_port;
-
- pci = of_get_parent(isa);
- if (!pci) {
- DBG("%s: no pci parent found\n", np->full_name);
- goto next_port;
- }
-
- rangesp = (u32 *)get_property(pci, "ranges", &rlen);
- if (rangesp == NULL) {
- of_node_put(pci);
- goto next_port;
- }
- rlen /= 4;
-
- /* we need the #size-cells of the PCI bridge node itself */
- phys_size = 1;
- sizeprop = (u32 *)get_property(pci, "#size-cells", NULL);
- if (sizeprop != NULL)
- phys_size = *sizeprop;
- /* we need the parent #addr-cells */
- addr_size = prom_n_addr_cells(pci);
- rentsize = 3 + addr_size + phys_size;
- io_base = 0;
- for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) {
- if (((rangesp[0] >> 24) & 0x3) != 1)
- continue; /* not IO space */
- io_base = rangesp[3];
- if (addr_size == 2)
- io_base = (io_base << 32) | rangesp[4];
- }
- if (io_base != 0) {
- *physport = io_base + reg->address;
- if (default_speed && spd)
- *default_speed = *spd;
- }
- of_node_put(pci);
- next_port:
- of_node_put(isa);
- }
-
- DBG(" <- generic_find_legacy_serial_port()\n");
-}
-
-static struct platform_device serial_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = serial_ports,
- },
-};
-
-static int __init serial_dev_init(void)
-{
- return platform_device_register(&serial_device);
-}
-arch_initcall(serial_dev_init);
-
-#endif /* CONFIG_PPC_ISERIES */
-
int check_legacy_ioport(unsigned long base_port)
{
if (ppc_md.check_legacy_ioport == NULL)
@@ -851,3 +655,28 @@ void cpu_die(void)
if (ppc_md.cpu_die)
ppc_md.cpu_die();
}
+
+#ifdef CONFIG_SMP
+void __init setup_per_cpu_areas(void)
+{
+ int i;
+ unsigned long size;
+ char *ptr;
+
+ /* Copy section for each CPU (we discard the original) */
+ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
+#ifdef CONFIG_MODULES
+ if (size < PERCPU_ENOUGH_ROOM)
+ size = PERCPU_ENOUGH_ROOM;
+#endif
+
+ for_each_cpu(i) {
+ ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
+ if (!ptr)
+ panic("Cannot allocate cpu data for CPU %d\n", i);
+
+ paca[i].data_offset = ptr - __per_cpu_start;
+ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ }
+}
+#endif
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 5a2eba60dd3..177bba78fb0 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -76,7 +76,6 @@
* registers from *regs. This is what we need
* to do when a signal has been delivered.
*/
-#define sigreturn_exit(regs) return 0
#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
#undef __SIGNAL_FRAMESIZE
@@ -156,9 +155,17 @@ static inline int save_general_regs(struct pt_regs *regs,
elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
int i;
- for (i = 0; i <= PT_RESULT; i ++)
+ if (!FULL_REGS(regs)) {
+ set_thread_flag(TIF_SAVE_NVGPRS);
+ current_thread_info()->nvgprs_frame = frame->mc_gregs;
+ }
+
+ for (i = 0; i <= PT_RESULT; i ++) {
+ if (i == 14 && !FULL_REGS(regs))
+ i = 32;
if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
return -EFAULT;
+ }
return 0;
}
@@ -179,8 +186,6 @@ static inline int restore_general_regs(struct pt_regs *regs,
#else /* CONFIG_PPC64 */
-extern void sigreturn_exit(struct pt_regs *);
-
#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
@@ -214,6 +219,15 @@ static inline int get_old_sigaction(struct k_sigaction *new_ka,
static inline int save_general_regs(struct pt_regs *regs,
struct mcontext __user *frame)
{
+ if (!FULL_REGS(regs)) {
+ /* Zero out the unsaved GPRs to avoid information
+ leak, and set TIF_SAVE_NVGPRS to ensure that the
+ registers do actually get saved later. */
+ memset(&regs->gpr[14], 0, 18 * sizeof(unsigned long));
+ current_thread_info()->nvgprs_frame = &frame->mc_gregs;
+ set_thread_flag(TIF_SAVE_NVGPRS);
+ }
+
return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
}
@@ -256,8 +270,10 @@ long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
- if (do_signal(&saveset, regs))
- sigreturn_exit(regs);
+ if (do_signal(&saveset, regs)) {
+ set_thread_flag(TIF_RESTOREALL);
+ return 0;
+ }
}
}
@@ -292,8 +308,10 @@ long sys_rt_sigsuspend(
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
- if (do_signal(&saveset, regs))
- sigreturn_exit(regs);
+ if (do_signal(&saveset, regs)) {
+ set_thread_flag(TIF_RESTOREALL);
+ return 0;
+ }
}
}
@@ -391,9 +409,6 @@ struct rt_sigframe {
static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
int sigret)
{
-#ifdef CONFIG_PPC32
- CHECK_FULL_REGS(regs);
-#endif
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
@@ -482,6 +497,15 @@ static long restore_user_regs(struct pt_regs *regs,
if (err)
return 1;
+ /*
+ * Do this before updating the thread state in
+ * current->thread.fpr/vr/evr. That way, if we get preempted
+ * and another task grabs the FPU/Altivec/SPE, it won't be
+ * tempted to save the current CPU state into the thread_struct
+ * and corrupt what we are writing there.
+ */
+ discard_lazy_cpu_state();
+
/* force the process to reload the FP registers from
current->thread when it next does FP instructions */
regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
@@ -523,18 +547,6 @@ static long restore_user_regs(struct pt_regs *regs,
return 1;
#endif /* CONFIG_SPE */
-#ifndef CONFIG_SMP
- preempt_disable();
- if (last_task_used_math == current)
- last_task_used_math = NULL;
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
-#ifdef CONFIG_SPE
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
-#endif
- preempt_enable();
-#endif
return 0;
}
@@ -828,12 +840,6 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
regs->gpr[6] = (unsigned long) rt_sf;
regs->nip = (unsigned long) ka->sa.sa_handler;
regs->trap = 0;
-#ifdef CONFIG_PPC64
- regs->result = 0;
-
- if (test_thread_flag(TIF_SINGLESTEP))
- ptrace_notify(SIGTRAP);
-#endif
return 1;
badframe:
@@ -911,8 +917,8 @@ long sys_swapcontext(struct ucontext __user *old_ctx,
*/
if (do_setcontext(new_ctx, regs, 0))
do_exit(SIGSEGV);
- sigreturn_exit(regs);
- /* doesn't actually return back to here */
+
+ set_thread_flag(TIF_RESTOREALL);
return 0;
}
@@ -945,12 +951,11 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
* nobody does any...
*/
compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
- return (int)regs->result;
#else
do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
- sigreturn_exit(regs); /* doesn't return here */
- return 0;
#endif
+ set_thread_flag(TIF_RESTOREALL);
+ return 0;
bad:
force_sig(SIGSEGV, current);
@@ -1041,9 +1046,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
*/
do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
- sigreturn_exit(regs);
- /* doesn't actually return back to here */
-
+ set_thread_flag(TIF_RESTOREALL);
out:
return 0;
}
@@ -1107,12 +1110,6 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
regs->gpr[4] = (unsigned long) sc;
regs->nip = (unsigned long) ka->sa.sa_handler;
regs->trap = 0;
-#ifdef CONFIG_PPC64
- regs->result = 0;
-
- if (test_thread_flag(TIF_SINGLESTEP))
- ptrace_notify(SIGTRAP);
-#endif
return 1;
@@ -1160,12 +1157,8 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
|| restore_user_regs(regs, sr, 1))
goto badframe;
-#ifdef CONFIG_PPC64
- return (int)regs->result;
-#else
- sigreturn_exit(regs); /* doesn't return */
+ set_thread_flag(TIF_RESTOREALL);
return 0;
-#endif
badframe:
force_sig(SIGSEGV, current);
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 1decf278553..7b9d999e211 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -96,8 +96,10 @@ long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
- if (do_signal(&saveset, regs))
+ if (do_signal(&saveset, regs)) {
+ set_thread_flag(TIF_RESTOREALL);
return 0;
+ }
}
}
@@ -152,6 +154,14 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
err |= __put_user(0, &sc->v_regs);
#endif /* CONFIG_ALTIVEC */
err |= __put_user(&sc->gp_regs, &sc->regs);
+ if (!FULL_REGS(regs)) {
+ /* Zero out the unsaved GPRs to avoid information
+ leak, and set TIF_SAVE_NVGPRS to ensure that the
+ registers do actually get saved later. */
+ memset(&regs->gpr[14], 0, 18 * sizeof(unsigned long));
+ set_thread_flag(TIF_SAVE_NVGPRS);
+ current_thread_info()->nvgprs_frame = &sc->gp_regs;
+ }
err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
err |= __copy_to_user(&sc->fp_regs, &current->thread.fpr, FP_REGS_SIZE);
err |= __put_user(signr, &sc->signal);
@@ -197,10 +207,20 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
if (!sig)
regs->gpr[13] = save_r13;
- err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
if (set != NULL)
err |= __get_user(set->sig[0], &sc->oldmask);
+ /*
+ * Do this before updating the thread state in
+ * current->thread.fpr/vr. That way, if we get preempted
+ * and another task grabs the FPU/Altivec, it won't be
+ * tempted to save the current CPU state into the thread_struct
+ * and corrupt what we are writing there.
+ */
+ discard_lazy_cpu_state();
+
+ err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
+
#ifdef CONFIG_ALTIVEC
err |= __get_user(v_regs, &sc->v_regs);
err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
@@ -219,14 +239,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
current->thread.vrsave = 0;
#endif /* CONFIG_ALTIVEC */
-#ifndef CONFIG_SMP
- preempt_disable();
- if (last_task_used_math == current)
- last_task_used_math = NULL;
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
- preempt_enable();
-#endif
/* Force reload of FP/VEC */
regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC);
@@ -340,6 +352,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
do_exit(SIGSEGV);
/* This returns like rt_sigreturn */
+ set_thread_flag(TIF_RESTOREALL);
return 0;
}
@@ -372,7 +385,8 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
*/
do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]);
- return regs->result;
+ set_thread_flag(TIF_RESTOREALL);
+ return 0;
badframe:
#if DEBUG_SIG
@@ -454,9 +468,6 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
if (err)
goto badframe;
- if (test_thread_flag(TIF_SINGLESTEP))
- ptrace_notify(SIGTRAP);
-
return 1;
badframe:
@@ -502,6 +513,8 @@ static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
* we only get here if there is a handler, we dont restart.
*/
regs->result = -EINTR;
+ regs->gpr[3] = EINTR;
+ regs->ccr |= 0x10000000;
break;
case -ERESTARTSYS:
/* ERESTARTSYS means to restart the syscall if there is no
@@ -509,6 +522,8 @@ static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
*/
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->result = -EINTR;
+ regs->gpr[3] = EINTR;
+ regs->ccr |= 0x10000000;
break;
}
/* fallthrough */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 30374d2f88e..c8458c531b2 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -31,6 +31,7 @@
#include <linux/sysdev.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
+#include <linux/topology.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
@@ -75,6 +76,8 @@ void smp_call_function_interrupt(void);
int smt_enabled_at_boot = 1;
+static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
+
#ifdef CONFIG_MPIC
int __init smp_mpic_probe(void)
{
@@ -123,11 +126,16 @@ void smp_message_recv(int msg, struct pt_regs *regs)
/* XXX Do we have to do this? */
set_need_resched();
break;
-#ifdef CONFIG_DEBUGGER
case PPC_MSG_DEBUGGER_BREAK:
+ if (crash_ipi_function_ptr) {
+ crash_ipi_function_ptr(regs);
+ break;
+ }
+#ifdef CONFIG_DEBUGGER
debugger_ipi(regs);
break;
-#endif
+#endif /* CONFIG_DEBUGGER */
+ /* FALLTHROUGH */
default:
printk("SMP %d: smp_message_recv(): unknown msg %d\n",
smp_processor_id(), msg);
@@ -147,6 +155,17 @@ void smp_send_debugger_break(int cpu)
}
#endif
+#ifdef CONFIG_KEXEC
+void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
+{
+ crash_ipi_function_ptr = crash_ipi_callback;
+ if (crash_ipi_callback) {
+ mb();
+ smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
+ }
+}
+#endif
+
static void stop_this_cpu(void *dummy)
{
local_irq_disable();
@@ -319,8 +338,8 @@ static void __init smp_create_idle(unsigned int cpu)
#ifdef CONFIG_PPC64
paca[cpu].__current = p;
#endif
- current_set[cpu] = p->thread_info;
- p->thread_info->cpu = cpu;
+ current_set[cpu] = task_thread_info(p);
+ task_thread_info(p)->cpu = cpu;
}
void __init smp_prepare_cpus(unsigned int max_cpus)
@@ -356,7 +375,7 @@ void __devinit smp_prepare_boot_cpu(void)
#ifdef CONFIG_PPC64
paca[boot_cpuid].__current = current;
#endif
- current_set[boot_cpuid] = current->thread_info;
+ current_set[boot_cpuid] = task_thread_info(current);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -452,10 +471,6 @@ int __devinit __cpu_up(unsigned int cpu)
if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))
return -EINVAL;
-#ifdef CONFIG_PPC64
- paca[cpu].default_decr = tb_ticks_per_jiffy;
-#endif
-
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
*/
@@ -554,6 +569,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
smp_ops->setup_cpu(boot_cpuid);
set_cpus_allowed(current, old_mask);
+
+ dump_numa_cpu_topology();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 9c921d1c408..475249dc235 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -552,30 +552,6 @@ asmlinkage long compat_sys_sched_rr_get_interval(u32 pid, struct compat_timespec
return ret;
}
-asmlinkage int compat_sys_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
-{
- return sys_pciconfig_read((unsigned long) bus,
- (unsigned long) dfn,
- (unsigned long) off,
- (unsigned long) len,
- compat_ptr(ubuf));
-}
-
-asmlinkage int compat_sys_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
-{
- return sys_pciconfig_write((unsigned long) bus,
- (unsigned long) dfn,
- (unsigned long) off,
- (unsigned long) len,
- compat_ptr(ubuf));
-}
-
-asmlinkage int compat_sys_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
-{
- return sys_pciconfig_iobase(which, in_bus, in_devfn);
-}
-
-
/* Note: it is necessary to treat mode as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
@@ -956,38 +932,6 @@ long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
advice);
}
-long ppc32_timer_create(clockid_t clock,
- struct compat_sigevent __user *ev32,
- timer_t __user *timer_id)
-{
- sigevent_t event;
- timer_t t;
- long err;
- mm_segment_t savefs;
-
- if (ev32 == NULL)
- return sys_timer_create(clock, NULL, timer_id);
-
- if (get_compat_sigevent(&event, ev32))
- return -EFAULT;
-
- if (!access_ok(VERIFY_WRITE, timer_id, sizeof(timer_t)))
- return -EFAULT;
-
- savefs = get_fs();
- set_fs(KERNEL_DS);
- /* The __user pointer casts are valid due to the set_fs() */
- err = sys_timer_create(clock,
- (sigevent_t __user *) &event,
- (timer_t __user *) &t);
- set_fs(savefs);
-
- if (err == 0)
- err = __put_user(t, timer_id);
-
- return err;
-}
-
asmlinkage long compat_sys_add_key(const char __user *_type,
const char __user *_description,
const void __user *_payload,
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 91b93d917b6..ad895c99813 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -43,9 +43,6 @@
#include <asm/time.h>
#include <asm/unistd.h>
-extern unsigned long wall_jiffies;
-
-
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
@@ -311,31 +308,6 @@ int sys_olduname(struct oldold_utsname __user *name)
return error? -EFAULT: 0;
}
-#ifdef CONFIG_PPC64
-time_t sys64_time(time_t __user * tloc)
-{
- time_t secs;
- time_t usecs;
-
- long tb_delta = tb_ticks_since(tb_last_stamp);
- tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
-
- secs = xtime.tv_sec;
- usecs = (xtime.tv_nsec/1000) + tb_delta / tb_ticks_per_usec;
- while (usecs >= USEC_PER_SEC) {
- ++secs;
- usecs -= USEC_PER_SEC;
- }
-
- if (tloc) {
- if (put_user(secs,tloc))
- secs = -EFAULT;
- }
-
- return secs;
-}
-#endif
-
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low)
{
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 65eaea91b49..68013179a50 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -54,7 +54,7 @@ SYSCALL(link)
SYSCALL(unlink)
COMPAT_SYS(execve)
SYSCALL(chdir)
-SYSX(sys64_time,compat_sys_time,sys_time)
+COMPAT_SYS(time)
SYSCALL(mknod)
SYSCALL(chmod)
SYSCALL(lchown)
@@ -113,7 +113,7 @@ SYSCALL(sgetmask)
COMPAT_SYS(ssetmask)
SYSCALL(setreuid)
SYSCALL(setregid)
-SYSX(sys_ni_syscall,ppc32_sigsuspend,ppc_sigsuspend)
+SYS32ONLY(sigsuspend)
COMPAT_SYS(sigpending)
COMPAT_SYS(sethostname)
COMPAT_SYS(setrlimit)
@@ -160,7 +160,7 @@ SYSCALL(swapoff)
COMPAT_SYS(sysinfo)
COMPAT_SYS(ipc)
SYSCALL(fsync)
-SYSX(sys_ni_syscall,ppc32_sigreturn,sys_sigreturn)
+SYS32ONLY(sigreturn)
PPC_SYS(clone)
COMPAT_SYS(setdomainname)
PPC_SYS(newuname)
@@ -213,13 +213,13 @@ COMPAT_SYS(nfsservctl)
SYSCALL(setresgid)
SYSCALL(getresgid)
COMPAT_SYS(prctl)
-SYSX(ppc64_rt_sigreturn,ppc32_rt_sigreturn,sys_rt_sigreturn)
+COMPAT_SYS(rt_sigreturn)
COMPAT_SYS(rt_sigaction)
COMPAT_SYS(rt_sigprocmask)
COMPAT_SYS(rt_sigpending)
COMPAT_SYS(rt_sigtimedwait)
COMPAT_SYS(rt_sigqueueinfo)
-SYSX(ppc64_rt_sigsuspend,ppc32_rt_sigsuspend,ppc_rt_sigsuspend)
+COMPAT_SYS(rt_sigsuspend)
COMPAT_SYS(pread64)
COMPAT_SYS(pwrite64)
SYSCALL(chown)
@@ -239,9 +239,9 @@ SYS32ONLY(ftruncate64)
SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
-COMPAT_SYS(pciconfig_read)
-COMPAT_SYS(pciconfig_write)
-COMPAT_SYS(pciconfig_iobase)
+SYSCALL(pciconfig_read)
+SYSCALL(pciconfig_write)
+SYSCALL(pciconfig_iobase)
SYSCALL(ni_syscall)
SYSCALL(getdents64)
SYSCALL(pivot_root)
@@ -281,7 +281,7 @@ SYSCALL(epoll_create)
SYSCALL(epoll_ctl)
SYSCALL(epoll_wait)
SYSCALL(remap_file_pages)
-SYSX(sys_timer_create,ppc32_timer_create,sys_timer_create)
+SYSX(sys_timer_create,compat_sys_timer_create,sys_timer_create)
COMPAT_SYS(timer_settime)
COMPAT_SYS(timer_gettime)
SYSCALL(timer_getoverrun)
@@ -290,7 +290,7 @@ COMPAT_SYS(clock_settime)
COMPAT_SYS(clock_gettime)
COMPAT_SYS(clock_getres)
COMPAT_SYS(clock_nanosleep)
-SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
+COMPAT_SYS(swapcontext)
COMPAT_SYS(tgkill)
COMPAT_SYS(utimes)
COMPAT_SYS(statfs64)
@@ -319,3 +319,5 @@ COMPAT_SYS(ioprio_get)
SYSCALL(inotify_init)
SYSCALL(inotify_add_watch)
SYSCALL(inotify_rm_watch)
+SYSCALL(spu_run)
+SYSCALL(spu_create)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index de8479769bb..c4a294d657b 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -431,7 +431,7 @@ void timer_interrupt(struct pt_regs * regs)
profile_tick(CPU_PROFILING, regs);
#ifdef CONFIG_PPC_ISERIES
- get_paca()->lppaca.int_dword.fields.decr_int = 0;
+ get_lppaca()->int_dword.fields.decr_int = 0;
#endif
while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
@@ -699,10 +699,6 @@ void __init time_init(void)
div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);
tb_to_xs = res.result_low;
-#ifdef CONFIG_PPC64
- get_paca()->default_decr = tb_ticks_per_jiffy;
-#endif
-
/*
* Compute scale factor for sched_clock.
* The calibrate_decr() function has set tb_ticks_per_sec,
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1511454c469..7509aa6474f 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -31,6 +31,7 @@
#include <linux/prctl.h>
#include <linux/delay.h>
#include <linux/kprobes.h>
+#include <linux/kexec.h>
#include <asm/kdebug.h>
#include <asm/pgtable.h>
@@ -95,7 +96,7 @@ static DEFINE_SPINLOCK(die_lock);
int die(const char *str, struct pt_regs *regs, long err)
{
- static int die_counter;
+ static int die_counter, crash_dump_start = 0;
int nl = 0;
if (debugger(regs))
@@ -156,7 +157,21 @@ int die(const char *str, struct pt_regs *regs, long err)
print_modules();
show_regs(regs);
bust_spinlocks(0);
+
+ if (!crash_dump_start && kexec_should_crash(current)) {
+ crash_dump_start = 1;
+ spin_unlock_irq(&die_lock);
+ crash_kexec(regs);
+ /* NOTREACHED */
+ }
spin_unlock_irq(&die_lock);
+ if (crash_dump_start)
+ /*
+ * Only for soft-reset: Other CPUs will be responded to an IPI
+ * sent by first kexec CPU.
+ */
+ for(;;)
+ ;
if (in_interrupt())
panic("Fatal exception in interrupt");
@@ -215,8 +230,10 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
void system_reset_exception(struct pt_regs *regs)
{
/* See if any machine dependent calls */
- if (ppc_md.system_reset_exception)
- ppc_md.system_reset_exception(regs);
+ if (ppc_md.system_reset_exception) {
+ if (ppc_md.system_reset_exception(regs))
+ return;
+ }
die("System Reset", regs, SIGABRT);
@@ -886,12 +903,10 @@ void altivec_unavailable_exception(struct pt_regs *regs)
die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
}
-#if defined(CONFIG_PPC64) || defined(CONFIG_E500)
void performance_monitor_exception(struct pt_regs *regs)
{
perf_irq(regs);
}
-#endif
#ifdef CONFIG_8xx
void SoftwareEmulation(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 0d878e72fc4..3774e80094f 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -15,11 +15,36 @@
#include <linux/sched.h>
#include <linux/console.h>
#include <asm/processor.h>
+#include <asm/udbg.h>
-void (*udbg_putc)(unsigned char c);
-unsigned char (*udbg_getc)(void);
+void (*udbg_putc)(char c);
+int (*udbg_getc)(void);
int (*udbg_getc_poll)(void);
+/*
+ * Early debugging facilities. You can enable _one_ of these via .config,
+ * if you do so your kernel _will not boot_ on anything else. Be careful.
+ */
+void __init udbg_early_init(void)
+{
+#if defined(CONFIG_PPC_EARLY_DEBUG_LPAR)
+ /* For LPAR machines that have an HVC console on vterm 0 */
+ udbg_init_debug_lpar();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_G5)
+ /* For use on Apple G5 machines */
+ udbg_init_pmac_realmode();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS)
+ /* RTAS panel debug */
+ udbg_init_rtas();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE)
+ /* Maple real mode debug */
+ udbg_init_maple_realmode();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_ISERIES)
+ /* For iSeries - hit Ctrl-x Ctrl-x to see the output */
+ udbg_init_iseries();
+#endif
+}
+
/* udbg library, used by xmon et al */
void udbg_puts(const char *s)
{
@@ -57,8 +82,8 @@ int udbg_write(const char *s, int n)
int udbg_read(char *buf, int buflen)
{
- char c, *p = buf;
- int i;
+ char *p = buf;
+ int i, c;
if (!udbg_getc)
return 0;
@@ -66,8 +91,11 @@ int udbg_read(char *buf, int buflen)
for (i = 0; i < buflen; ++i) {
do {
c = udbg_getc();
+ if (c == -1 && i == 0)
+ return -1;
+
} while (c == 0x11 || c == 0x13);
- if (c == 0)
+ if (c == 0 || c == -1)
break;
*p++ = c;
}
@@ -78,7 +106,7 @@ int udbg_read(char *buf, int buflen)
#define UDBG_BUFSIZE 256
void udbg_printf(const char *fmt, ...)
{
- unsigned char buf[UDBG_BUFSIZE];
+ char buf[UDBG_BUFSIZE];
va_list args;
va_start(args, fmt);
@@ -87,6 +115,12 @@ void udbg_printf(const char *fmt, ...)
va_end(args);
}
+void __init udbg_progress(char *s, unsigned short hex)
+{
+ udbg_puts(s);
+ udbg_puts("\n");
+}
+
/*
* Early boot console based on udbg
*/
@@ -99,7 +133,7 @@ static void udbg_console_write(struct console *con, const char *s,
static struct console udbg_console = {
.name = "udbg",
.write = udbg_console_write,
- .flags = CON_PRINTBUFFER,
+ .flags = CON_PRINTBUFFER | CON_ENABLED,
.index = -1,
};
@@ -107,15 +141,19 @@ static int early_console_initialized;
void __init disable_early_printk(void)
{
+#if 1
if (!early_console_initialized)
return;
unregister_console(&udbg_console);
early_console_initialized = 0;
+#endif
}
/* called by setup_system */
void register_early_udbg_console(void)
{
+ if (early_console_initialized)
+ return;
early_console_initialized = 1;
register_console(&udbg_console);
}
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 9313574ab93..2da65a9c93f 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -43,9 +43,11 @@ struct NS16550 {
#define LSR_TEMT 0x40 /* Xmitter empty */
#define LSR_ERR 0x80 /* Error */
+#define LCR_DLAB 0x80
+
static volatile struct NS16550 __iomem *udbg_comport;
-static void udbg_550_putc(unsigned char c)
+static void udbg_550_putc(char c)
{
if (udbg_comport) {
while ((in_8(&udbg_comport->lsr) & LSR_THRE) == 0)
@@ -67,39 +69,80 @@ static int udbg_550_getc_poll(void)
return -1;
}
-static unsigned char udbg_550_getc(void)
+static int udbg_550_getc(void)
{
if (udbg_comport) {
while ((in_8(&udbg_comport->lsr) & LSR_DR) == 0)
/* wait for char */;
return in_8(&udbg_comport->rbr);
}
- return 0;
+ return -1;
}
-void udbg_init_uart(void __iomem *comport, unsigned int speed)
+void udbg_init_uart(void __iomem *comport, unsigned int speed,
+ unsigned int clock)
{
- u16 dll = speed ? (115200 / speed) : 12;
+ unsigned int dll, base_bauds = clock / 16;
+
+ if (speed == 0)
+ speed = 9600;
+ dll = base_bauds / speed;
if (comport) {
udbg_comport = (struct NS16550 __iomem *)comport;
out_8(&udbg_comport->lcr, 0x00);
out_8(&udbg_comport->ier, 0xff);
out_8(&udbg_comport->ier, 0x00);
- out_8(&udbg_comport->lcr, 0x80); /* Access baud rate */
- out_8(&udbg_comport->dll, dll & 0xff); /* 1 = 115200, 2 = 57600,
- 3 = 38400, 12 = 9600 baud */
- out_8(&udbg_comport->dlm, dll >> 8); /* dll >> 8 which should be zero
- for fast rates; */
- out_8(&udbg_comport->lcr, 0x03); /* 8 data, 1 stop, no parity */
- out_8(&udbg_comport->mcr, 0x03); /* RTS/DTR */
- out_8(&udbg_comport->fcr ,0x07); /* Clear & enable FIFOs */
+ out_8(&udbg_comport->lcr, LCR_DLAB);
+ out_8(&udbg_comport->dll, dll & 0xff);
+ out_8(&udbg_comport->dlm, dll >> 8);
+ /* 8 data, 1 stop, no parity */
+ out_8(&udbg_comport->lcr, 0x03);
+ /* RTS/DTR */
+ out_8(&udbg_comport->mcr, 0x03);
+ /* Clear & enable FIFOs */
+ out_8(&udbg_comport->fcr ,0x07);
udbg_putc = udbg_550_putc;
udbg_getc = udbg_550_getc;
udbg_getc_poll = udbg_550_getc_poll;
}
}
+unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock)
+{
+ unsigned int dll, dlm, divisor, prescaler, speed;
+ u8 old_lcr;
+ volatile struct NS16550 __iomem *port = comport;
+
+ old_lcr = in_8(&port->lcr);
+
+ /* select divisor latch registers. */
+ out_8(&port->lcr, LCR_DLAB);
+
+ /* now, read the divisor */
+ dll = in_8(&port->dll);
+ dlm = in_8(&port->dlm);
+ divisor = dlm << 8 | dll;
+
+ /* check prescaling */
+ if (in_8(&port->mcr) & 0x80)
+ prescaler = 4;
+ else
+ prescaler = 1;
+
+ /* restore the LCR */
+ out_8(&port->lcr, old_lcr);
+
+ /* calculate speed */
+ speed = (clock / prescaler) / (divisor * 16);
+
+ /* sanity check */
+ if (speed < 0 || speed > (clock / 16))
+ speed = 9600;
+
+ return speed;
+}
+
#ifdef CONFIG_PPC_MAPLE
void udbg_maple_real_putc(unsigned char c)
{
@@ -112,7 +155,7 @@ void udbg_maple_real_putc(unsigned char c)
}
}
-void udbg_init_maple_realmode(void)
+void __init udbg_init_maple_realmode(void)
{
udbg_comport = (volatile struct NS16550 __iomem *)0xf40003f8;
diff --git a/arch/powerpc/kernel/vdso32/.gitignore b/arch/powerpc/kernel/vdso32/.gitignore
new file mode 100644
index 00000000000..e45fba9d0ce
--- /dev/null
+++ b/arch/powerpc/kernel/vdso32/.gitignore
@@ -0,0 +1 @@
+vdso32.lds
diff --git a/arch/powerpc/kernel/vdso64/.gitignore b/arch/powerpc/kernel/vdso64/.gitignore
new file mode 100644
index 00000000000..3fd18cf9fec
--- /dev/null
+++ b/arch/powerpc/kernel/vdso64/.gitignore
@@ -0,0 +1 @@
+vdso64.lds
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 35bd03c41dd..8362fa272ca 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -28,15 +28,13 @@
void __spin_yield(raw_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
- struct paca_struct *holder_paca;
lock_value = lock->slock;
if (lock_value == 0)
return;
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
- holder_paca = &paca[holder_cpu];
- yield_count = holder_paca->lppaca.yield_count;
+ yield_count = lppaca[holder_cpu].yield_count;
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
@@ -60,15 +58,13 @@ void __rw_yield(raw_rwlock_t *rw)
{
int lock_value;
unsigned int holder_cpu, yield_count;
- struct paca_struct *holder_paca;
lock_value = rw->lock;
if (lock_value >= 0)
return; /* no write lock at present */
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
- holder_paca = &paca[holder_cpu];
- yield_count = holder_paca->lppaca.yield_count;
+ yield_count = lppaca[holder_cpu].yield_count;
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 93d4fbfdb72..a4815d31672 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -81,7 +81,8 @@ static int store_updates_sp(struct pt_regs *regs)
}
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
-static void do_dabr(struct pt_regs *regs, unsigned long error_code)
+static void do_dabr(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code)
{
siginfo_t info;
@@ -99,7 +100,7 @@ static void do_dabr(struct pt_regs *regs, unsigned long error_code)
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_HWBKPT;
- info.si_addr = (void __user *)regs->nip;
+ info.si_addr = (void __user *)address;
force_sig_info(SIGTRAP, &info, current);
}
#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
@@ -159,7 +160,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
if (error_code & DSISR_DABRMATCH) {
/* DABR match */
- do_dabr(regs, error_code);
+ do_dabr(regs, address, error_code);
return 0;
}
#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index a606504678b..149351a84b9 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -368,7 +368,7 @@ static unsigned long __init htab_get_table_size(void)
unsigned long mem_size, rnd_mem_size, pteg_count;
/* If hash size isn't already provided by the platform, we try to
- * retreive it from the device-tree. If it's not there neither, we
+ * retrieve it from the device-tree. If it's not there neither, we
* calculate it now based on the total RAM size
*/
if (ppc64_pft_size == 0)
@@ -456,7 +456,7 @@ void __init htab_initialize(void)
/* create bolted the linear mapping in the hash table */
for (i=0; i < lmb.memory.cnt; i++) {
- base = lmb.memory.region[i].base + KERNELBASE;
+ base = (unsigned long)__va(lmb.memory.region[i].base);
size = lmb.memory.region[i].size;
DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -498,8 +498,8 @@ void __init htab_initialize(void)
* for either 4K or 16MB pages.
*/
if (tce_alloc_start) {
- tce_alloc_start += KERNELBASE;
- tce_alloc_end += KERNELBASE;
+ tce_alloc_start = (unsigned long)__va(tce_alloc_start);
+ tce_alloc_end = (unsigned long)__va(tce_alloc_end);
if (base + size >= tce_alloc_start)
tce_alloc_start = base + size + 1;
@@ -644,6 +644,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
DBG_LOW(" -> rc=%d\n", rc);
return rc;
}
+EXPORT_SYMBOL_GPL(hash_page);
void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 54131b877da..b51bb28c054 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -549,6 +549,17 @@ fail:
return addr;
}
+static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
+{
+ struct vm_area_struct *vma;
+
+ vma = find_vma(current->mm, addr);
+ if (!vma || ((addr + len) <= vma->vm_start))
+ return 0;
+
+ return -ENOMEM;
+}
+
static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
{
unsigned long addr = 0;
@@ -618,15 +629,28 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (!cpu_has_feature(CPU_FTR_16M_PAGE))
return -EINVAL;
+ /* Paranoia, caller should have dealt with this */
+ BUG_ON((addr + len) < addr);
+
if (test_thread_flag(TIF_32BIT)) {
+ /* Paranoia, caller should have dealt with this */
+ BUG_ON((addr + len) > 0x100000000UL);
+
curareas = current->mm->context.low_htlb_areas;
- /* First see if we can do the mapping in the existing
- * low areas */
+ /* First see if we can use the hint address */
+ if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
+ areamask = LOW_ESID_MASK(addr, len);
+ if (open_low_hpage_areas(current->mm, areamask) == 0)
+ return addr;
+ }
+
+ /* Next see if we can map in the existing low areas */
addr = htlb_get_low_area(len, curareas);
if (addr != -ENOMEM)
return addr;
+ /* Finally go looking for areas to open */
lastshift = 0;
for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
! lastshift; areamask >>=1) {
@@ -641,12 +665,22 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
} else {
curareas = current->mm->context.high_htlb_areas;
- /* First see if we can do the mapping in the existing
- * high areas */
+ /* First see if we can use the hint address */
+ /* We discourage 64-bit processes from doing hugepage
+ * mappings below 4GB (must use MAP_FIXED) */
+ if ((addr >= 0x100000000UL)
+ && (htlb_check_hinted_area(addr, len) == 0)) {
+ areamask = HTLB_AREA_MASK(addr, len);
+ if (open_high_hpage_areas(current->mm, areamask) == 0)
+ return addr;
+ }
+
+ /* Next see if we can map in the existing high areas */
addr = htlb_get_high_area(len, curareas);
if (addr != -ENOMEM)
return addr;
+ /* Finally go looking for areas to open */
lastshift = 0;
for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
! lastshift; areamask >>=1) {
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index f9587bcc6a4..8b0c132bc16 100644
--- a/arch/powerpc/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
@@ -107,6 +107,7 @@ static int im_region_status(unsigned long v_addr, unsigned long size,
if (v_addr < (unsigned long) tmp->addr + tmp->size)
break;
+ *vm = NULL;
if (tmp) {
if (im_region_overlaps(v_addr, size, tmp))
return IM_REGION_OVERLAP;
@@ -127,7 +128,6 @@ static int im_region_status(unsigned long v_addr, unsigned long size,
}
}
- *vm = NULL;
return IM_REGION_UNUSED;
}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 7d4b8b5f060..7d0d75c1184 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -188,6 +188,11 @@ void __init MMU_init(void)
if (ppc_md.progress)
ppc_md.progress("MMU:exit", 0x211);
+
+ /* From now on, btext is no longer BAT mapped if it was at all */
+#ifdef CONFIG_BOOTX_TEXT
+ btext_unmap();
+#endif
}
/* This is only called until mem_init is done. */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index ed6ed2e30da..15aac0d78df 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -114,19 +114,18 @@ void online_page(struct page *page)
num_physpages++;
}
-/*
- * This works only for the non-NUMA case. Later, we'll need a lookup
- * to convert from real physical addresses to nid, that doesn't use
- * pfn_to_nid().
- */
int __devinit add_memory(u64 start, u64 size)
{
- struct pglist_data *pgdata = NODE_DATA(0);
+ struct pglist_data *pgdata;
struct zone *zone;
+ int nid;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- start += KERNELBASE;
+ nid = hot_add_scn_to_nid(start);
+ pgdata = NODE_DATA(nid);
+
+ start = __va(start);
create_section_mapping(start, start + size);
/* this should work for most non-highmem platforms */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index ba7a3055a9f..2863a912bcd 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -37,6 +37,7 @@ EXPORT_SYMBOL(node_data);
static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
static int min_common_depth;
+static int n_mem_addr_cells, n_mem_size_cells;
/*
* We need somewhere to store start/end/node for each region until we have
@@ -254,32 +255,20 @@ static int __init find_min_common_depth(void)
return depth;
}
-static int __init get_mem_addr_cells(void)
+static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
{
struct device_node *memory = NULL;
- int rc;
memory = of_find_node_by_type(memory, "memory");
if (!memory)
- return 0; /* it won't matter */
+ panic("numa.c: No memory nodes found!");
- rc = prom_n_addr_cells(memory);
- return rc;
+ *n_addr_cells = prom_n_addr_cells(memory);
+ *n_size_cells = prom_n_size_cells(memory);
+ of_node_put(memory);
}
-static int __init get_mem_size_cells(void)
-{
- struct device_node *memory = NULL;
- int rc;
-
- memory = of_find_node_by_type(memory, "memory");
- if (!memory)
- return 0; /* it won't matter */
- rc = prom_n_size_cells(memory);
- return rc;
-}
-
-static unsigned long __init read_n_cells(int n, unsigned int **buf)
+static unsigned long __devinit read_n_cells(int n, unsigned int **buf)
{
unsigned long result = 0;
@@ -386,7 +375,6 @@ static int __init parse_numa_properties(void)
{
struct device_node *cpu = NULL;
struct device_node *memory = NULL;
- int addr_cells, size_cells;
int max_domain;
unsigned long i;
@@ -425,8 +413,7 @@ static int __init parse_numa_properties(void)
}
}
- addr_cells = get_mem_addr_cells();
- size_cells = get_mem_size_cells();
+ get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
memory = NULL;
while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
unsigned long start;
@@ -436,15 +423,21 @@ static int __init parse_numa_properties(void)
unsigned int *memcell_buf;
unsigned int len;
- memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
+ memcell_buf = (unsigned int *)get_property(memory,
+ "linux,usable-memory", &len);
+ if (!memcell_buf || len <= 0)
+ memcell_buf =
+ (unsigned int *)get_property(memory, "reg",
+ &len);
if (!memcell_buf || len <= 0)
continue;
- ranges = memory->n_addrs;
+ /* ranges in cell */
+ ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
new_range:
/* these are order-sensitive, and modify the buffer pointer */
- start = read_n_cells(addr_cells, &memcell_buf);
- size = read_n_cells(size_cells, &memcell_buf);
+ start = read_n_cells(n_mem_addr_cells, &memcell_buf);
+ size = read_n_cells(n_mem_size_cells, &memcell_buf);
numa_domain = of_node_numa_domain(memory);
@@ -497,7 +490,41 @@ static void __init setup_nonnuma(void)
node_set_online(0);
}
-static void __init dump_numa_topology(void)
+void __init dump_numa_cpu_topology(void)
+{
+ unsigned int node;
+ unsigned int cpu, count;
+
+ if (min_common_depth == -1 || !numa_enabled)
+ return;
+
+ for_each_online_node(node) {
+ printk(KERN_INFO "Node %d CPUs:", node);
+
+ count = 0;
+ /*
+ * If we used a CPU iterator here we would miss printing
+ * the holes in the cpumap.
+ */
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
+ if (count == 0)
+ printk(" %u", cpu);
+ ++count;
+ } else {
+ if (count > 1)
+ printk("-%u", cpu - 1);
+ count = 0;
+ }
+ }
+
+ if (count > 1)
+ printk("-%u", NR_CPUS - 1);
+ printk("\n");
+ }
+}
+
+static void __init dump_numa_memory_topology(void)
{
unsigned int node;
unsigned int count;
@@ -529,7 +556,6 @@ static void __init dump_numa_topology(void)
printk("-0x%lx", i);
printk("\n");
}
- return;
}
/*
@@ -591,7 +617,7 @@ void __init do_init_bootmem(void)
if (parse_numa_properties())
setup_nonnuma();
else
- dump_numa_topology();
+ dump_numa_memory_topology();
register_cpu_notifier(&ppc64_numa_nb);
@@ -730,3 +756,60 @@ static int __init early_numa(char *p)
return 0;
}
early_param("numa", early_numa);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+/*
+ * Find the node associated with a hot added memory section. Section
+ * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
+ * sections are fully contained within a single LMB.
+ */
+int hot_add_scn_to_nid(unsigned long scn_addr)
+{
+ struct device_node *memory = NULL;
+ nodemask_t nodes;
+ int numa_domain = 0;
+
+ if (!numa_enabled || (min_common_depth < 0))
+ return numa_domain;
+
+ while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
+ unsigned long start, size;
+ int ranges;
+ unsigned int *memcell_buf;
+ unsigned int len;
+
+ memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
+ if (!memcell_buf || len <= 0)
+ continue;
+
+ /* ranges in cell */
+ ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
+ha_new_range:
+ start = read_n_cells(n_mem_addr_cells, &memcell_buf);
+ size = read_n_cells(n_mem_size_cells, &memcell_buf);
+ numa_domain = of_node_numa_domain(memory);
+
+ /* Domains not present at boot default to 0 */
+ if (!node_online(numa_domain))
+ numa_domain = any_online_node(NODE_MASK_ALL);
+
+ if ((scn_addr >= start) && (scn_addr < (start + size))) {
+ of_node_put(memory);
+ goto got_numa_domain;
+ }
+
+ if (--ranges) /* process all ranges in cell */
+ goto ha_new_range;
+ }
+ BUG(); /* section address should be found above */
+
+ /* Temporary code to ensure that returned node is not empty */
+got_numa_domain:
+ nodes_setall(nodes);
+ while (NODE_DATA(numa_domain)->node_spanned_pages == 0) {
+ node_clear(numa_domain, nodes);
+ numa_domain = any_online_node(nodes);
+ }
+ return numa_domain;
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 2ffca63602c..7b278d83739 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -174,7 +174,7 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
pa = addr & PAGE_MASK;
size = PAGE_ALIGN(addr + size) - pa;
- if (size == 0)
+ if ((size == 0) || (pa == 0))
return NULL;
if (mem_init_done) {
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 60e852f2f8e..ffc8ed4de62 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -75,7 +75,7 @@ static void slb_flush_and_rebolt(void)
vflags = SLB_VSID_KERNEL | virtual_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
- if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
+ if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
ksp_esid_data &= ~SLB_ESID_V;
/* We need to do this all in asm, so we're sure we don't touch
@@ -87,8 +87,8 @@ static void slb_flush_and_rebolt(void)
/* Slot 2 - kernel stack */
"slbmte %2,%3\n"
"isync"
- :: "r"(mk_vsid_data(VMALLOCBASE, vflags)),
- "r"(mk_esid_data(VMALLOCBASE, 1)),
+ :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
+ "r"(mk_esid_data(VMALLOC_START, 1)),
"r"(mk_vsid_data(ksp_esid_data, lflags)),
"r"(ksp_esid_data)
: "memory");
@@ -134,14 +134,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
else
unmapped_base = TASK_UNMAPPED_BASE_USER64;
- if (pc >= KERNELBASE)
+ if (is_kernel_addr(pc))
return;
slb_allocate(pc);
if (GET_ESID(pc) == GET_ESID(stack))
return;
- if (stack >= KERNELBASE)
+ if (is_kernel_addr(stack))
return;
slb_allocate(stack);
@@ -149,7 +149,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|| (GET_ESID(stack) == GET_ESID(unmapped_base)))
return;
- if (unmapped_base >= KERNELBASE)
+ if (is_kernel_addr(unmapped_base))
return;
slb_allocate(unmapped_base);
}
@@ -213,10 +213,10 @@ void slb_initialize(void)
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
- create_slbe(KERNELBASE, lflags, 0);
+ create_slbe(PAGE_OFFSET, lflags, 0);
/* VMALLOC space has 4K pages always for now */
- create_slbe(VMALLOCBASE, vflags, 1);
+ create_slbe(VMALLOC_START, vflags, 1);
/* We don't bolt the stack for the time being - we're in boot,
* so the stack is in the bolted segment. By the time it goes
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 950ffc5848c..d1acee38f16 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -37,9 +37,9 @@ _GLOBAL(slb_allocate_realmode)
srdi r9,r3,60 /* get region */
srdi r10,r3,28 /* get esid */
- cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
+ cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
- /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */
+ /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
blt cr7,0f /* user or kernel? */
/* kernel address: proto-VSID = ESID */
@@ -166,7 +166,7 @@ _GLOBAL(slb_allocate_user)
/*
* Finish loading of an SLB entry and return
*
- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE
+ * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
*/
slb_finish_load:
ASM_VSID_SCRAMBLE(r10,r9)
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 51e7951414e..82e4951826b 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
unsigned long entry, group, old_esid, castout_entry, i;
unsigned int global_entry;
struct stab_entry *ste, *castout_ste;
- unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE;
+ unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
vsid_data = vsid << STE_VSID_SHIFT;
esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
}
/* Dont cast out the first kernel segment */
- if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE)
+ if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
break;
castout_entry = (castout_entry + 1) & 0xf;
@@ -122,7 +122,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
unsigned long offset;
/* Kernel or user address? */
- if (ea >= KERNELBASE) {
+ if (is_kernel_addr(ea)) {
vsid = get_kernel_vsid(ea);
} else {
if ((ea >= TASK_SIZE_USER64) || (! mm))
@@ -133,7 +133,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
- if (ea < KERNELBASE) {
+ if (!is_kernel_addr(ea)) {
offset = __get_cpu_var(stab_cache_ptr);
if (offset < NR_STAB_CACHE_ENTRIES)
__get_cpu_var(stab_cache[offset++]) = stab_entry;
@@ -190,7 +190,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
entry++, ste++) {
unsigned long ea;
ea = ste->esid_data & ESID_MASK;
- if (ea < KERNELBASE) {
+ if (!is_kernel_addr(ea)) {
ste->esid_data = 0;
}
}
@@ -251,7 +251,7 @@ void stabs_alloc(void)
panic("Unable to allocate segment table for CPU %d.\n",
cpu);
- newstab += KERNELBASE;
+ newstab = (unsigned long)__va(newstab);
memset((void *)newstab, 0, HW_PAGE_SIZE);
@@ -270,11 +270,11 @@ void stabs_alloc(void)
*/
void stab_initialize(unsigned long stab)
{
- unsigned long vsid = get_kernel_vsid(KERNELBASE);
+ unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
unsigned long stabreal;
asm volatile("isync; slbia; isync":::"memory");
- make_ste(stab, GET_ESID(KERNELBASE), vsid);
+ make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
/* Order update */
asm volatile("sync":::"memory");
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 859d29a0cac..bb3afb6e631 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -168,7 +168,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
batch->mm = mm;
batch->psize = psize;
}
- if (addr < KERNELBASE) {
+ if (!is_kernel_addr(addr)) {
vsid = get_vsid(mm->context.id, addr);
WARN_ON(vsid == 0);
} else
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index 0782d0cca89..554cd7c7532 100644
--- a/arch/powerpc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -9,3 +9,4 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
oprofile-y := $(DRIVER_OBJS) common.o
oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o
oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
+oprofile-$(CONFIG_PPC32) += op_model_7450.o
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index af2c05d20ba..cc2535be3a7 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -14,9 +14,6 @@
*/
#include <linux/oprofile.h>
-#ifndef __powerpc64__
-#include <linux/slab.h>
-#endif /* ! __powerpc64__ */
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/errno.h>
@@ -31,10 +28,6 @@ static struct op_powerpc_model *model;
static struct op_counter_config ctr[OP_MAX_COUNTER];
static struct op_system_config sys;
-#ifndef __powerpc64__
-static char *cpu_type;
-#endif /* ! __powerpc64__ */
-
static void op_handle_interrupt(struct pt_regs *regs)
{
model->handle_interrupt(regs, ctr);
@@ -53,14 +46,7 @@ static int op_powerpc_setup(void)
model->reg_setup(ctr, &sys, model->num_counters);
/* Configure the registers on all cpus. */
-#ifdef __powerpc64__
on_each_cpu(model->cpu_setup, NULL, 0, 1);
-#else /* __powerpc64__ */
-#if 0
- /* FIXME: Make multi-cpu work */
- on_each_cpu(model->reg_setup, NULL, 0, 1);
-#endif
-#endif /* __powerpc64__ */
return 0;
}
@@ -95,7 +81,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
{
int i;
-#ifdef __powerpc64__
+#ifdef CONFIG_PPC64
/*
* There is one mmcr0, mmcr1 and mmcra for setting the events for
* all of the counters.
@@ -103,7 +89,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
-#endif /* __powerpc64__ */
+#endif
for (i = 0; i < model->num_counters; ++i) {
struct dentry *dir;
@@ -115,65 +101,68 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
-#ifdef __powerpc64__
+
/*
- * We dont support per counter user/kernel selection, but
- * we leave the entries because userspace expects them
+ * Classic PowerPC doesn't support per-counter
+ * control like this, but the options are
+ * expected, so they remain. For Freescale
+ * Book-E style performance monitors, we do
+ * support them.
*/
-#endif /* __powerpc64__ */
oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
-#ifndef __powerpc64__
- /* FIXME: Not sure if this is used */
-#endif /* ! __powerpc64__ */
oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
}
oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
-#ifdef __powerpc64__
+#ifdef CONFIG_PPC64
oprofilefs_create_ulong(sb, root, "backtrace_spinlocks",
&sys.backtrace_spinlocks);
-#endif /* __powerpc64__ */
+#endif
/* Default to tracing both kernel and user */
sys.enable_kernel = 1;
sys.enable_user = 1;
-#ifdef __powerpc64__
+#ifdef CONFIG_PPC64
/* Turn on backtracing through spinlocks by default */
sys.backtrace_spinlocks = 1;
-#endif /* __powerpc64__ */
+#endif
return 0;
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
-#ifndef __powerpc64__
-#ifdef CONFIG_FSL_BOOKE
- model = &op_model_fsl_booke;
+ if (!cur_cpu_spec->oprofile_cpu_type)
+ return -ENODEV;
+
+ switch (cur_cpu_spec->oprofile_type) {
+#ifdef CONFIG_PPC64
+ case PPC_OPROFILE_RS64:
+ model = &op_model_rs64;
+ break;
+ case PPC_OPROFILE_POWER4:
+ model = &op_model_power4;
+ break;
#else
- return -ENODEV;
+ case PPC_OPROFILE_G4:
+ model = &op_model_7450;
+ break;
#endif
+#ifdef CONFIG_FSL_BOOKE
+ case PPC_OPROFILE_BOOKE:
+ model = &op_model_fsl_booke;
+ break;
+#endif
+ default:
+ return -ENODEV;
+ }
- cpu_type = kmalloc(32, GFP_KERNEL);
- if (NULL == cpu_type)
- return -ENOMEM;
-
- sprintf(cpu_type, "ppc/%s", cur_cpu_spec->cpu_name);
-
- model->num_counters = cur_cpu_spec->num_pmcs;
-
- ops->cpu_type = cpu_type;
-#else /* __powerpc64__ */
- if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type)
- return -ENODEV;
- model = cur_cpu_spec->oprofile_model;
model->num_counters = cur_cpu_spec->num_pmcs;
ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
-#endif /* __powerpc64__ */
ops->create_files = op_powerpc_create_files;
ops->setup = op_powerpc_setup;
ops->shutdown = op_powerpc_shutdown;
@@ -188,8 +177,4 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
void oprofile_arch_exit(void)
{
-#ifndef __powerpc64__
- kfree(cpu_type);
- cpu_type = NULL;
-#endif /* ! __powerpc64__ */
}
diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c
new file mode 100644
index 00000000000..32abfdbb0eb
--- /dev/null
+++ b/arch/powerpc/oprofile/op_model_7450.c
@@ -0,0 +1,206 @@
+/*
+ * oprofile/op_model_7450.c
+ *
+ * Freescale 745x/744x oprofile support, based on fsl_booke support
+ * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc
+ *
+ * Author: Andy Fleming
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/oprofile.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/cputable.h>
+#include <asm/page.h>
+#include <asm/pmc.h>
+#include <asm/oprofile_impl.h>
+
+static unsigned long reset_value[OP_MAX_COUNTER];
+
+static int oprofile_running;
+static u32 mmcr0_val, mmcr1_val, mmcr2_val;
+
+#define MMCR0_PMC1_SHIFT 6
+#define MMCR0_PMC2_SHIFT 0
+#define MMCR1_PMC3_SHIFT 27
+#define MMCR1_PMC4_SHIFT 22
+#define MMCR1_PMC5_SHIFT 17
+#define MMCR1_PMC6_SHIFT 11
+
+#define mmcr0_event1(event) \
+ ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL)
+#define mmcr0_event2(event) \
+ ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL)
+
+#define mmcr1_event3(event) \
+ ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL)
+#define mmcr1_event4(event) \
+ ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL)
+#define mmcr1_event5(event) \
+ ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL)
+#define mmcr1_event6(event) \
+ ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL)
+
+#define MMCR0_INIT (MMCR0_FC | MMCR0_FCS | MMCR0_FCP | MMCR0_FCM1 | MMCR0_FCM0)
+
+/* Unfreezes the counters on this CPU, enables the interrupt,
+ * enables the counters to trigger the interrupt, and sets the
+ * counters to only count when the mark bit is not set.
+ */
+static void pmc_start_ctrs(void)
+{
+ u32 mmcr0 = mfspr(SPRN_MMCR0);
+
+ mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0);
+ mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
+
+ mtspr(SPRN_MMCR0, mmcr0);
+}
+
+/* Disables the counters on this CPU, and freezes them */
+static void pmc_stop_ctrs(void)
+{
+ u32 mmcr0 = mfspr(SPRN_MMCR0);
+
+ mmcr0 |= MMCR0_FC;
+ mmcr0 &= ~(MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
+
+ mtspr(SPRN_MMCR0, mmcr0);
+}
+
+/* Configures the counters on this CPU based on the global
+ * settings */
+static void fsl7450_cpu_setup(void *unused)
+{
+ /* freeze all counters */
+ pmc_stop_ctrs();
+
+ mtspr(SPRN_MMCR0, mmcr0_val);
+ mtspr(SPRN_MMCR1, mmcr1_val);
+ mtspr(SPRN_MMCR2, mmcr2_val);
+}
+
+#define NUM_CTRS 6
+
+/* Configures the global settings for the countes on all CPUs. */
+static void fsl7450_reg_setup(struct op_counter_config *ctr,
+ struct op_system_config *sys,
+ int num_ctrs)
+{
+ int i;
+
+ /* Our counters count up, and "count" refers to
+ * how much before the next interrupt, and we interrupt
+ * on overflow. So we calculate the starting value
+ * which will give us "count" until overflow.
+ * Then we set the events on the enabled counters */
+ for (i = 0; i < NUM_CTRS; ++i)
+ reset_value[i] = 0x80000000UL - ctr[i].count;
+
+ /* Set events for Counters 1 & 2 */
+ mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event)
+ | mmcr0_event2(ctr[1].event);
+
+ /* Setup user/kernel bits */
+ if (sys->enable_kernel)
+ mmcr0_val &= ~(MMCR0_FCS);
+
+ if (sys->enable_user)
+ mmcr0_val &= ~(MMCR0_FCP);
+
+ /* Set events for Counters 3-6 */
+ mmcr1_val = mmcr1_event3(ctr[2].event)
+ | mmcr1_event4(ctr[3].event)
+ | mmcr1_event5(ctr[4].event)
+ | mmcr1_event6(ctr[5].event);
+
+ mmcr2_val = 0;
+}
+
+/* Sets the counters on this CPU to the chosen values, and starts them */
+static void fsl7450_start(struct op_counter_config *ctr)
+{
+ int i;
+
+ mtmsr(mfmsr() | MSR_PMM);
+
+ for (i = 0; i < NUM_CTRS; ++i) {
+ if (ctr[i].enabled)
+ ctr_write(i, reset_value[i]);
+ else
+ ctr_write(i, 0);
+ }
+
+ /* Clear the freeze bit, and enable the interrupt.
+ * The counters won't actually start until the rfi clears
+ * the PMM bit */
+ pmc_start_ctrs();
+
+ oprofile_running = 1;
+}
+
+/* Stop the counters on this CPU */
+static void fsl7450_stop(void)
+{
+ /* freeze counters */
+ pmc_stop_ctrs();
+
+ oprofile_running = 0;
+
+ mb();
+}
+
+
+/* Handle the interrupt on this CPU, and log a sample for each
+ * event that triggered the interrupt */
+static void fsl7450_handle_interrupt(struct pt_regs *regs,
+ struct op_counter_config *ctr)
+{
+ unsigned long pc;
+ int is_kernel;
+ int val;
+ int i;
+
+ /* set the PMM bit (see comment below) */
+ mtmsr(mfmsr() | MSR_PMM);
+
+ pc = mfspr(SPRN_SIAR);
+ is_kernel = (pc >= KERNELBASE);
+
+ for (i = 0; i < NUM_CTRS; ++i) {
+ val = ctr_read(i);
+ if (val < 0) {
+ if (oprofile_running && ctr[i].enabled) {
+ oprofile_add_pc(pc, is_kernel, i);
+ ctr_write(i, reset_value[i]);
+ } else {
+ ctr_write(i, 0);
+ }
+ }
+ }
+
+ /* The freeze bit was set by the interrupt. */
+ /* Clear the freeze bit, and reenable the interrupt.
+ * The counters won't actually start until the rfi clears
+ * the PMM bit */
+ pmc_start_ctrs();
+}
+
+struct op_powerpc_model op_model_7450= {
+ .reg_setup = fsl7450_reg_setup,
+ .cpu_setup = fsl7450_cpu_setup,
+ .start = fsl7450_start,
+ .stop = fsl7450_stop,
+ .handle_interrupt = fsl7450_handle_interrupt,
+};
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index a3401b46f3b..659a021da0c 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -252,7 +252,7 @@ static unsigned long get_pc(struct pt_regs *regs)
return (unsigned long)__va(pc);
/* Not sure where we were */
- if (pc < KERNELBASE)
+ if (!is_kernel_addr(pc))
/* function descriptor madness */
return *((unsigned long *)kernel_unknown_bucket);
@@ -264,7 +264,7 @@ static int get_kernel(unsigned long pc)
int is_kernel;
if (!mmcra_has_sihv) {
- is_kernel = (pc >= KERNELBASE);
+ is_kernel = is_kernel_addr(pc);
} else {
unsigned long mmcra = mfspr(SPRN_MMCRA);
is_kernel = ((mmcra & MMCRA_SIPR) == 0);
diff --git a/arch/powerpc/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c
index e010b85996e..5c909ee609f 100644
--- a/arch/powerpc/oprofile/op_model_rs64.c
+++ b/arch/powerpc/oprofile/op_model_rs64.c
@@ -178,7 +178,6 @@ static void rs64_handle_interrupt(struct pt_regs *regs,
int val;
int i;
unsigned long pc = mfspr(SPRN_SIAR);
- int is_kernel = (pc >= KERNELBASE);
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
@@ -187,7 +186,7 @@ static void rs64_handle_interrupt(struct pt_regs *regs,
val = ctr_read(i);
if (val < 0) {
if (ctr[i].enabled) {
- oprofile_add_pc(pc, is_kernel, i);
+ oprofile_add_pc(pc, is_kernel_addr(pc), i);
ctr_write(i, reset_value[i]);
} else {
ctr_write(i, 0);
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
new file mode 100644
index 00000000000..7675e675dce
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -0,0 +1,27 @@
+menu "Platform support"
+ depends on PPC_83xx
+
+choice
+ prompt "Machine Type"
+ default MPC834x_SYS
+
+config MPC834x_SYS
+ bool "Freescale MPC834x SYS"
+ select DEFAULT_UIMAGE
+ help
+ This option enables support for the MPC 834x SYS evaluation board.
+
+ Be aware that PCI buses can only function when SYS board is plugged
+ into the PIB (Platform IO Board) board from Freescale which provide
+ 3 PCI slots. The PIBs PCI initialization is the bootloader's
+ responsiblilty.
+
+endchoice
+
+config MPC834x
+ bool
+ select PPC_UDBG_16550
+ select PPC_INDIRECT_PCI
+ default y if MPC834x_SYS
+
+endmenu
diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile
new file mode 100644
index 00000000000..9d8b28ef334
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for the PowerPC 83xx linux kernel.
+#
+obj-$(CONFIG_MPC834x_SYS) += mpc834x_sys.o pci.o
diff --git a/arch/powerpc/platforms/83xx/mpc834x_sys.c b/arch/powerpc/platforms/83xx/mpc834x_sys.c
new file mode 100644
index 00000000000..2098dd05a77
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/mpc834x_sys.c
@@ -0,0 +1,243 @@
+/*
+ * arch/powerpc/platforms/83xx/mpc834x_sys.c
+ *
+ * MPC834x SYS board specific routines
+ *
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/module.h>
+#include <linux/fsl_devices.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/atomic.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/ipic.h>
+#include <asm/bootinfo.h>
+#include <asm/pci-bridge.h>
+#include <asm/mpc83xx.h>
+#include <asm/irq.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <sysdev/fsl_soc.h>
+
+#include "mpc83xx.h"
+
+#ifndef CONFIG_PCI
+unsigned long isa_io_base = 0;
+unsigned long isa_mem_base = 0;
+#endif
+
+#ifdef CONFIG_PCI
+extern int mpc83xx_pci2_busno;
+
+static int
+mpc83xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+{
+ static char pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {
+ {PIRQA, PIRQB, PIRQC, PIRQD}, /* idsel 0x11 */
+ {PIRQC, PIRQD, PIRQA, PIRQB}, /* idsel 0x12 */
+ {PIRQD, PIRQA, PIRQB, PIRQC}, /* idsel 0x13 */
+ {0, 0, 0, 0},
+ {PIRQA, PIRQB, PIRQC, PIRQD}, /* idsel 0x15 */
+ {PIRQD, PIRQA, PIRQB, PIRQC}, /* idsel 0x16 */
+ {PIRQC, PIRQD, PIRQA, PIRQB}, /* idsel 0x17 */
+ {PIRQB, PIRQC, PIRQD, PIRQA}, /* idsel 0x18 */
+ {0, 0, 0, 0}, /* idsel 0x19 */
+ {0, 0, 0, 0}, /* idsel 0x20 */
+ };
+
+ const long min_idsel = 0x11, max_idsel = 0x20, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP;
+}
+
+static int
+mpc83xx_exclude_device(u_char bus, u_char devfn)
+{
+ if (bus == 0 && PCI_SLOT(devfn) == 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (mpc83xx_pci2_busno)
+ if (bus == (mpc83xx_pci2_busno) && PCI_SLOT(devfn) == 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ return PCIBIOS_SUCCESSFUL;
+}
+#endif /* CONFIG_PCI */
+
+/* ************************************************************************
+ *
+ * Setup the architecture
+ *
+ */
+static void __init
+mpc834x_sys_setup_arch(void)
+{
+ struct device_node *np;
+
+ if (ppc_md.progress)
+ ppc_md.progress("mpc834x_sys_setup_arch()", 0);
+
+ np = of_find_node_by_type(NULL, "cpu");
+ if (np != 0) {
+ unsigned int *fp = (int *) get_property(np, "clock-frequency", NULL);
+ if (fp != 0)
+ loops_per_jiffy = *fp / HZ;
+ else
+ loops_per_jiffy = 50000000 / HZ;
+ of_node_put(np);
+ }
+
+#ifdef CONFIG_PCI
+ for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
+ add_bridge(np);
+
+ ppc_md.pci_swizzle = common_swizzle;
+ ppc_md.pci_map_irq = mpc83xx_map_irq;
+ ppc_md.pci_exclude_device = mpc83xx_exclude_device;
+#endif
+
+#ifdef CONFIG_ROOT_NFS
+ ROOT_DEV = Root_NFS;
+#else
+ ROOT_DEV = Root_HDA1;
+#endif
+}
+
+void __init
+mpc834x_sys_init_IRQ(void)
+{
+ u8 senses[8] = {
+ 0, /* EXT 0 */
+ IRQ_SENSE_LEVEL, /* EXT 1 */
+ IRQ_SENSE_LEVEL, /* EXT 2 */
+ 0, /* EXT 3 */
+#ifdef CONFIG_PCI
+ IRQ_SENSE_LEVEL, /* EXT 4 */
+ IRQ_SENSE_LEVEL, /* EXT 5 */
+ IRQ_SENSE_LEVEL, /* EXT 6 */
+ IRQ_SENSE_LEVEL, /* EXT 7 */
+#else
+ 0, /* EXT 4 */
+ 0, /* EXT 5 */
+ 0, /* EXT 6 */
+ 0, /* EXT 7 */
+#endif
+ };
+
+ ipic_init(get_immrbase() + 0x00700, 0, 0, senses, 8);
+
+ /* Initialize the default interrupt mapping priorities,
+ * in case the boot rom changed something on us.
+ */
+ ipic_set_default_priority();
+}
+
+#if defined(CONFIG_I2C_MPC) && defined(CONFIG_SENSORS_DS1374)
+extern ulong ds1374_get_rtc_time(void);
+extern int ds1374_set_rtc_time(ulong);
+
+static int __init
+mpc834x_rtc_hookup(void)
+{
+ struct timespec tv;
+
+ ppc_md.get_rtc_time = ds1374_get_rtc_time;
+ ppc_md.set_rtc_time = ds1374_set_rtc_time;
+
+ tv.tv_nsec = 0;
+ tv.tv_sec = (ppc_md.get_rtc_time)();
+ do_settimeofday(&tv);
+
+ return 0;
+}
+late_initcall(mpc834x_rtc_hookup);
+#endif
+
+static void
+mpc83xx_restart(char *cmd)
+{
+#define RST_OFFSET 0x00000900
+#define RST_PROT_REG 0x00000018
+#define RST_CTRL_REG 0x0000001c
+ __be32 __iomem *reg;
+
+ // map reset register space
+ reg = ioremap(get_immrbase() + 0x900, 0xff);
+
+ local_irq_disable();
+
+ /* enable software reset "RSTE" */
+ out_be32(reg + (RST_PROT_REG >> 2), 0x52535445);
+
+ /* set software hard reset */
+ out_be32(reg + (RST_CTRL_REG >> 2), 0x52535445);
+ for(;;);
+}
+
+static long __init
+mpc83xx_time_init(void)
+{
+#define SPCR_OFFSET 0x00000110
+#define SPCR_TBEN 0x00400000
+ __be32 __iomem *spcr = ioremap(get_immrbase() + SPCR_OFFSET, 4);
+ __be32 tmp;
+
+ tmp = in_be32(spcr);
+ out_be32(spcr, tmp|SPCR_TBEN);
+
+ iounmap(spcr);
+
+ return 0;
+}
+void __init
+platform_init(void)
+{
+ /* setup the PowerPC module struct */
+ ppc_md.setup_arch = mpc834x_sys_setup_arch;
+
+ ppc_md.init_IRQ = mpc834x_sys_init_IRQ;
+ ppc_md.get_irq = ipic_get_irq;
+
+ ppc_md.restart = mpc83xx_restart;
+
+ ppc_md.time_init = mpc83xx_time_init;
+ ppc_md.set_rtc_time = NULL;
+ ppc_md.get_rtc_time = NULL;
+ ppc_md.calibrate_decr = generic_calibrate_decr;
+
+ ppc_md.progress = udbg_progress;
+
+ if (ppc_md.progress)
+ ppc_md.progress("mpc834x_sys_init(): exit", 0);
+
+ return;
+}
+
+
diff --git a/arch/powerpc/platforms/83xx/mpc834x_sys.h b/arch/powerpc/platforms/83xx/mpc834x_sys.h
new file mode 100644
index 00000000000..e4ca39f6a86
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/mpc834x_sys.h
@@ -0,0 +1,23 @@
+/*
+ * arch/powerppc/platforms/83xx/mpc834x_sys.h
+ *
+ * MPC834X SYS common board definitions
+ *
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MACH_MPC83XX_SYS_H__
+#define __MACH_MPC83XX_SYS_H__
+
+#define PIRQA MPC83xx_IRQ_EXT4
+#define PIRQB MPC83xx_IRQ_EXT5
+#define PIRQC MPC83xx_IRQ_EXT6
+#define PIRQD MPC83xx_IRQ_EXT7
+
+#endif /* __MACH_MPC83XX_SYS_H__ */
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
new file mode 100644
index 00000000000..ce9e66abef2
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -0,0 +1,14 @@
+#ifndef __MPC83XX_H__
+#define __MPC83XX_H__
+
+#include <linux/init.h>
+#include <linux/device.h>
+
+/*
+ * Declaration for the various functions exported by the
+ * mpc83xx_* files. Mostly for use by mpc83xx_setup
+ */
+
+extern int add_bridge(struct device_node *dev);
+
+#endif /* __MPC83XX_H__ */
diff --git a/arch/powerpc/platforms/83xx/pci.c b/arch/powerpc/platforms/83xx/pci.c
new file mode 100644
index 00000000000..469cdacc5bd
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/pci.c
@@ -0,0 +1,99 @@
+/*
+ * FSL SoC setup code
+ *
+ * Maintained by Kumar Gala (see MAINTAINERS for contact information)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/pci-bridge.h>
+#include <asm/prom.h>
+#include <sysdev/fsl_soc.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+int mpc83xx_pci2_busno;
+
+#ifdef CONFIG_PCI
+int __init add_bridge(struct device_node *dev)
+{
+ int len;
+ struct pci_controller *hose;
+ struct resource rsrc;
+ int *bus_range;
+ int primary = 1, has_address = 0;
+ phys_addr_t immr = get_immrbase();
+
+ DBG("Adding PCI host bridge %s\n", dev->full_name);
+
+ /* Fetch host bridge registers address */
+ has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
+
+ /* Get bus range if any */
+ bus_range = (int *) get_property(dev, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int)) {
+ printk(KERN_WARNING "Can't get bus-range for %s, assume"
+ " bus 0\n", dev->full_name);
+ }
+
+ hose = pcibios_alloc_controller();
+ if (!hose)
+ return -ENOMEM;
+ hose->arch_data = dev;
+ hose->set_cfg_type = 1;
+
+ hose->first_busno = bus_range ? bus_range[0] : 0;
+ hose->last_busno = bus_range ? bus_range[1] : 0xff;
+
+ /* MPC83xx supports up to two host controllers one at 0x8500 from immrbar
+ * the other at 0x8600, we consider the 0x8500 the primary controller
+ */
+ /* PCI 1 */
+ if ((rsrc.start & 0xfffff) == 0x8500) {
+ setup_indirect_pci(hose, immr + 0x8300, immr + 0x8304);
+ }
+ /* PCI 2*/
+ if ((rsrc.start & 0xfffff) == 0x8600) {
+ setup_indirect_pci(hose, immr + 0x8380, immr + 0x8384);
+ primary = 0;
+ hose->bus_offset = hose->first_busno;
+ mpc83xx_pci2_busno = hose->first_busno;
+ }
+
+ printk(KERN_INFO "Found MPC83xx PCI host bridge at 0x%08lx. "
+ "Firmware bus number: %d->%d\n",
+ rsrc.start, hose->first_busno, hose->last_busno);
+
+ DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
+ hose, hose->cfg_addr, hose->cfg_data);
+
+ /* Interpret the "ranges" property */
+ /* This also maps the I/O region and sets isa_io/mem_base */
+ pci_process_bridge_OF_ranges(hose, dev, primary);
+
+ return 0;
+}
+
+#endif
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 8836b3a0066..04073fd987e 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -7,6 +7,7 @@ endif
endif
obj-$(CONFIG_PPC_CHRP) += chrp/
obj-$(CONFIG_4xx) += 4xx/
+obj-$(CONFIG_PPC_83xx) += 83xx/
obj-$(CONFIG_85xx) += 85xx/
obj-$(CONFIG_PPC_PSERIES) += pseries/
obj-$(CONFIG_PPC_ISERIES) += iseries/
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
new file mode 100644
index 00000000000..3157071e241
--- /dev/null
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -0,0 +1,13 @@
+menu "Cell Broadband Engine options"
+ depends on PPC_CELL
+
+config SPU_FS
+ tristate "SPU file system"
+ default m
+ depends on PPC_CELL
+ help
+ The SPU file system is used to access Synergistic Processing
+ Units on machines implementing the Broadband Processor
+ Architecture.
+
+endmenu
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 55e094b96bc..16031b565be 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,2 +1,10 @@
obj-y += interrupt.o iommu.o setup.o spider-pic.o
+obj-y += pervasive.o
+
obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SPU_FS) += spufs/ spu-base.o
+
+spu-base-y += spu_base.o spu_priv1.o
+
+builtin-spufs-$(CONFIG_SPU_FS) += spu_syscalls.o
+obj-y += $(builtin-spufs-m)
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 7fbe78a9327..63aa52acf44 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -23,6 +23,7 @@
#include <linux/config.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/types.h>
@@ -55,6 +56,7 @@ struct iic_regs {
struct iic {
struct iic_regs __iomem *regs;
+ u8 target_id;
};
static DEFINE_PER_CPU(struct iic, iic);
@@ -172,12 +174,11 @@ int iic_get_irq(struct pt_regs *regs)
return irq;
}
-static struct iic_regs __iomem *find_iic(int cpu)
+static int setup_iic(int cpu, struct iic *iic)
{
struct device_node *np;
int nodeid = cpu / 2;
unsigned long regs;
- struct iic_regs __iomem *iic_regs;
for (np = of_find_node_by_type(NULL, "cpu");
np;
@@ -188,20 +189,23 @@ static struct iic_regs __iomem *find_iic(int cpu)
if (!np) {
printk(KERN_WARNING "IIC: CPU %d not found\n", cpu);
- iic_regs = NULL;
- } else {
- regs = *(long *)get_property(np, "iic", NULL);
-
- /* hack until we have decided on the devtree info */
- regs += 0x400;
- if (cpu & 1)
- regs += 0x20;
-
- printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs);
- iic_regs = __ioremap(regs, sizeof(struct iic_regs),
- _PAGE_NO_CACHE);
+ iic->regs = NULL;
+ iic->target_id = 0xff;
+ return -ENODEV;
}
- return iic_regs;
+
+ regs = *(long *)get_property(np, "iic", NULL);
+
+ /* hack until we have decided on the devtree info */
+ regs += 0x400;
+ if (cpu & 1)
+ regs += 0x20;
+
+ printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs);
+ iic->regs = __ioremap(regs, sizeof(struct iic_regs),
+ _PAGE_NO_CACHE);
+ iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe);
+ return 0;
}
#ifdef CONFIG_SMP
@@ -227,6 +231,12 @@ void iic_cause_IPI(int cpu, int mesg)
out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4);
}
+u8 iic_get_target_id(int cpu)
+{
+ return per_cpu(iic, cpu).target_id;
+}
+EXPORT_SYMBOL_GPL(iic_get_target_id);
+
static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
{
smp_message_recv(iic_irq_to_ipi(irq), regs);
@@ -276,7 +286,7 @@ void iic_init_IRQ(void)
irq_offset = 0;
for_each_cpu(cpu) {
iic = &per_cpu(iic, cpu);
- iic->regs = find_iic(cpu);
+ setup_iic(cpu, iic);
if (iic->regs)
out_be64(&iic->regs->prio, 0xff);
}
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
index 37d58e6fd0c..a14bd38791c 100644
--- a/arch/powerpc/platforms/cell/interrupt.h
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -54,6 +54,7 @@ extern void iic_setup_cpu(void);
extern void iic_local_enable(void);
extern void iic_local_disable(void);
+extern u8 iic_get_target_id(int cpu);
extern void spider_init_IRQ(void);
extern int spider_get_irq(unsigned long int_pending);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 74f999b4ac9..46e7cb9c3e6 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -29,6 +29,8 @@
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
#include <asm/sections.h>
#include <asm/iommu.h>
@@ -40,6 +42,7 @@
#include <asm/abs_addr.h>
#include <asm/system.h>
#include <asm/ppc-pci.h>
+#include <asm/udbg.h>
#include "iommu.h"
@@ -220,8 +223,6 @@ set_iopt_cache(void __iomem *base, unsigned long index,
{
unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR;
unsigned long __iomem *p = base + IOC_PT_CACHE_REG;
- pr_debug("iopt %02lx was v%016lx/t%016lx, store v%016lx/t%016lx\n",
- index, get_iopt_cache(base, index, &oldtag), oldtag, val, tag);
out_be64(p, val);
out_be64(&tags[index], tag);
@@ -248,67 +249,176 @@ set_iocmd_config(void __iomem *base)
out_be64(p, conf | IOCMD_CONF_TE);
}
-/* FIXME: get these from the device tree */
-#define ioc_base 0x20000511000ull
-#define ioc_mmio_base 0x20000510000ull
-#define ioid 0x48a
-#define iopt_phys_offset (- 0x20000000) /* We have a 512MB offset from the SB */
-#define io_page_size 0x1000000
-
-static unsigned long map_iopt_entry(unsigned long address)
+static void enable_mapping(void __iomem *base, void __iomem *mmio_base)
{
- switch (address >> 20) {
- case 0x600:
- address = 0x24020000000ull; /* spider i/o */
- break;
- default:
- address += iopt_phys_offset;
- break;
- }
-
- return get_iopt_entry(address, ioid, IOPT_PROT_RW);
+ set_iocmd_config(base);
+ set_iost_origin(mmio_base);
}
-static void iommu_bus_setup_null(struct pci_bus *b) { }
static void iommu_dev_setup_null(struct pci_dev *d) { }
+static void iommu_bus_setup_null(struct pci_bus *b) { }
+
+struct cell_iommu {
+ unsigned long base;
+ unsigned long mmio_base;
+ void __iomem *mapped_base;
+ void __iomem *mapped_mmio_base;
+};
+
+static struct cell_iommu cell_iommus[NR_CPUS];
/* initialize the iommu to support a simple linear mapping
* for each DMA window used by any device. For now, we
* happen to know that there is only one DMA window in use,
* starting at iopt_phys_offset. */
-static void cell_map_iommu(void)
+static void cell_do_map_iommu(struct cell_iommu *iommu,
+ unsigned int ioid,
+ unsigned long map_start,
+ unsigned long map_size)
{
- unsigned long address;
- void __iomem *base;
+ unsigned long io_address, real_address;
+ void __iomem *ioc_base, *ioc_mmio_base;
ioste ioste;
unsigned long index;
- base = __ioremap(ioc_base, 0x1000, _PAGE_NO_CACHE);
- pr_debug("%lx mapped to %p\n", ioc_base, base);
- set_iocmd_config(base);
- iounmap(base);
+ /* we pretend the io page table was at a very high address */
+ const unsigned long fake_iopt = 0x10000000000ul;
+ const unsigned long io_page_size = 0x1000000; /* use 16M pages */
+ const unsigned long io_segment_size = 0x10000000; /* 256M */
+
+ ioc_base = iommu->mapped_base;
+ ioc_mmio_base = iommu->mapped_mmio_base;
+
+ for (real_address = 0, io_address = 0;
+ io_address <= map_start + map_size;
+ real_address += io_page_size, io_address += io_page_size) {
+ ioste = get_iost_entry(fake_iopt, io_address, io_page_size);
+ if ((real_address % io_segment_size) == 0) /* segment start */
+ set_iost_cache(ioc_mmio_base,
+ io_address >> 28, ioste);
+ index = get_ioc_hash_1way(ioste, io_address);
+ pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
+ io_address, index, ioste.val);
+ set_iopt_cache(ioc_mmio_base,
+ get_ioc_hash_1way(ioste, io_address),
+ get_ioc_tag(ioste, io_address),
+ get_iopt_entry(real_address-map_start, ioid, IOPT_PROT_RW));
+ }
+}
- base = __ioremap(ioc_mmio_base, 0x1000, _PAGE_NO_CACHE);
- pr_debug("%lx mapped to %p\n", ioc_mmio_base, base);
+static void iommu_devnode_setup(struct device_node *d)
+{
+ unsigned int *ioid;
+ unsigned long *dma_window, map_start, map_size, token;
+ struct cell_iommu *iommu;
- set_iost_origin(base);
+ ioid = (unsigned int *)get_property(d, "ioid", NULL);
+ if (!ioid)
+ pr_debug("No ioid entry found !\n");
- for (address = 0; address < 0x100000000ul; address += io_page_size) {
- ioste = get_iost_entry(0x10000000000ul, address, io_page_size);
- if ((address & 0xfffffff) == 0) /* segment start */
- set_iost_cache(base, address >> 28, ioste);
- index = get_ioc_hash_1way(ioste, address);
- pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
- address, index, ioste.val);
- set_iopt_cache(base,
- get_ioc_hash_1way(ioste, address),
- get_ioc_tag(ioste, address),
- map_iopt_entry(address));
- }
- iounmap(base);
+ dma_window = (unsigned long *)get_property(d, "ibm,dma-window", NULL);
+ if (!dma_window)
+ pr_debug("No ibm,dma-window entry found !\n");
+
+ map_start = dma_window[1];
+ map_size = dma_window[2];
+ token = dma_window[0] >> 32;
+
+ iommu = &cell_iommus[token];
+
+ cell_do_map_iommu(iommu, *ioid, map_start, map_size);
+}
+
+static void iommu_bus_setup(struct pci_bus *b)
+{
+ struct device_node *d = (struct device_node *)b->sysdata;
+ iommu_devnode_setup(d);
+}
+
+
+static int cell_map_iommu_hardcoded(int num_nodes)
+{
+ struct cell_iommu *iommu = NULL;
+
+ pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__);
+
+ /* node 0 */
+ iommu = &cell_iommus[0];
+ iommu->mapped_base = __ioremap(0x20000511000, 0x1000, _PAGE_NO_CACHE);
+ iommu->mapped_mmio_base = __ioremap(0x20000510000, 0x1000, _PAGE_NO_CACHE);
+
+ enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
+
+ cell_do_map_iommu(iommu, 0x048a,
+ 0x20000000ul,0x20000000ul);
+
+ if (num_nodes < 2)
+ return 0;
+
+ /* node 1 */
+ iommu = &cell_iommus[1];
+ iommu->mapped_base = __ioremap(0x30000511000, 0x1000, _PAGE_NO_CACHE);
+ iommu->mapped_mmio_base = __ioremap(0x30000510000, 0x1000, _PAGE_NO_CACHE);
+
+ enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
+
+ cell_do_map_iommu(iommu, 0x048a,
+ 0x20000000,0x20000000ul);
+
+ return 0;
}
+static int cell_map_iommu(void)
+{
+ unsigned int num_nodes = 0, *node_id;
+ unsigned long *base, *mmio_base;
+ struct device_node *dn;
+ struct cell_iommu *iommu = NULL;
+
+ /* determine number of nodes (=iommus) */
+ pr_debug("%s(%d): determining number of nodes...", __FUNCTION__, __LINE__);
+ for(dn = of_find_node_by_type(NULL, "cpu");
+ dn;
+ dn = of_find_node_by_type(dn, "cpu")) {
+ node_id = (unsigned int *)get_property(dn, "node-id", NULL);
+
+ if (num_nodes < *node_id)
+ num_nodes = *node_id;
+ }
+
+ num_nodes++;
+ pr_debug("%i found.\n", num_nodes);
+
+ /* map the iommu registers for each node */
+ pr_debug("%s(%d): Looping through nodes\n", __FUNCTION__, __LINE__);
+ for(dn = of_find_node_by_type(NULL, "cpu");
+ dn;
+ dn = of_find_node_by_type(dn, "cpu")) {
+
+ node_id = (unsigned int *)get_property(dn, "node-id", NULL);
+ base = (unsigned long *)get_property(dn, "ioc-cache", NULL);
+ mmio_base = (unsigned long *)get_property(dn, "ioc-translation", NULL);
+
+ if (!base || !mmio_base || !node_id)
+ return cell_map_iommu_hardcoded(num_nodes);
+
+ iommu = &cell_iommus[*node_id];
+ iommu->base = *base;
+ iommu->mmio_base = *mmio_base;
+
+ iommu->mapped_base = __ioremap(*base, 0x1000, _PAGE_NO_CACHE);
+ iommu->mapped_mmio_base = __ioremap(*mmio_base, 0x1000, _PAGE_NO_CACHE);
+
+ enable_mapping(iommu->mapped_base,
+ iommu->mapped_mmio_base);
+
+ /* everything else will be done in iommu_bus_setup */
+ }
+
+ return 1;
+}
+
static void *cell_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
@@ -365,11 +475,28 @@ static int cell_dma_supported(struct device *dev, u64 mask)
void cell_init_iommu(void)
{
- cell_map_iommu();
-
- /* Direct I/O, IOMMU off */
- ppc_md.iommu_dev_setup = iommu_dev_setup_null;
- ppc_md.iommu_bus_setup = iommu_bus_setup_null;
+ int setup_bus = 0;
+
+ if (of_find_node_by_path("/mambo")) {
+ pr_info("Not using iommu on systemsim\n");
+ } else {
+
+ if (!(of_chosen &&
+ get_property(of_chosen, "linux,iommu-off", NULL)))
+ setup_bus = cell_map_iommu();
+
+ if (setup_bus) {
+ pr_debug("%s: IOMMU mapping activated\n", __FUNCTION__);
+ ppc_md.iommu_dev_setup = iommu_dev_setup_null;
+ ppc_md.iommu_bus_setup = iommu_bus_setup;
+ } else {
+ pr_debug("%s: IOMMU mapping activated, "
+ "no device action necessary\n", __FUNCTION__);
+ /* Direct I/O, IOMMU off */
+ ppc_md.iommu_dev_setup = iommu_dev_setup_null;
+ ppc_md.iommu_bus_setup = iommu_bus_setup_null;
+ }
+ }
pci_dma_ops.alloc_coherent = cell_alloc_coherent;
pci_dma_ops.free_coherent = cell_free_coherent;
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
new file mode 100644
index 00000000000..e0e051c675d
--- /dev/null
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -0,0 +1,229 @@
+/*
+ * CBE Pervasive Monitor and Debug
+ *
+ * (C) Copyright IBM Corporation 2005
+ *
+ * Authors: Maximino Aguilar (maguilar@us.ibm.com)
+ * Michael N. Day (mnday@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#undef DEBUG
+
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/types.h>
+#include <linux/kallsyms.h>
+
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/pgtable.h>
+#include <asm/reg.h>
+
+#include "pervasive.h"
+
+static DEFINE_SPINLOCK(cbe_pervasive_lock);
+struct cbe_pervasive {
+ struct pmd_regs __iomem *regs;
+ unsigned int thread;
+};
+
+/* can't use per_cpu from setup_arch */
+static struct cbe_pervasive cbe_pervasive[NR_CPUS];
+
+static void __init cbe_enable_pause_zero(void)
+{
+ unsigned long thread_switch_control;
+ unsigned long temp_register;
+ struct cbe_pervasive *p;
+ int thread;
+
+ spin_lock_irq(&cbe_pervasive_lock);
+ p = &cbe_pervasive[smp_processor_id()];
+
+ if (!cbe_pervasive->regs)
+ goto out;
+
+ pr_debug("Power Management: CPU %d\n", smp_processor_id());
+
+ /* Enable Pause(0) control bit */
+ temp_register = in_be64(&p->regs->pm_control);
+
+ out_be64(&p->regs->pm_control,
+ temp_register|PMD_PAUSE_ZERO_CONTROL);
+
+ /* Enable DEC and EE interrupt request */
+ thread_switch_control = mfspr(SPRN_TSC_CELL);
+ thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
+
+ switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) {
+ case CTRL_CT0:
+ thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
+ thread = 0;
+ break;
+ case CTRL_CT1:
+ thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
+ thread = 1;
+ break;
+ default:
+ printk(KERN_WARNING "%s: unknown configuration\n",
+ __FUNCTION__);
+ thread = -1;
+ break;
+ }
+
+ if (p->thread != thread)
+ printk(KERN_WARNING "%s: device tree inconsistant, "
+ "cpu %i: %d/%d\n", __FUNCTION__,
+ smp_processor_id(),
+ p->thread, thread);
+
+ mtspr(SPRN_TSC_CELL, thread_switch_control);
+
+out:
+ spin_unlock_irq(&cbe_pervasive_lock);
+}
+
+static void cbe_idle(void)
+{
+ unsigned long ctrl;
+
+ cbe_enable_pause_zero();
+
+ while (1) {
+ if (!need_resched()) {
+ local_irq_disable();
+ while (!need_resched()) {
+ /* go into low thread priority */
+ HMT_low();
+
+ /*
+ * atomically disable thread execution
+ * and runlatch.
+ * External and Decrementer exceptions
+ * are still handled when the thread
+ * is disabled but now enter in
+ * cbe_system_reset_exception()
+ */
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
+ mtspr(SPRN_CTRLT, ctrl);
+ }
+ /* restore thread prio */
+ HMT_medium();
+ local_irq_enable();
+ }
+
+ /*
+ * turn runlatch on again before scheduling the
+ * process we just woke up
+ */
+ ppc64_runlatch_on();
+
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
+}
+
+static int cbe_system_reset_exception(struct pt_regs *regs)
+{
+ switch (regs->msr & SRR1_WAKEMASK) {
+ case SRR1_WAKEEE:
+ do_IRQ(regs);
+ break;
+ case SRR1_WAKEDEC:
+ timer_interrupt(regs);
+ break;
+ case SRR1_WAKEMT:
+ /* no action required */
+ break;
+ default:
+ /* do system reset */
+ return 0;
+ }
+ /* everything handled */
+ return 1;
+}
+
+static int __init cbe_find_pmd_mmio(int cpu, struct cbe_pervasive *p)
+{
+ struct device_node *node;
+ unsigned int *int_servers;
+ char *addr;
+ unsigned long real_address;
+ unsigned int size;
+
+ struct pmd_regs __iomem *pmd_mmio_area;
+ int hardid, thread;
+ int proplen;
+
+ pmd_mmio_area = NULL;
+ hardid = get_hard_smp_processor_id(cpu);
+ for (node = NULL; (node = of_find_node_by_type(node, "cpu"));) {
+ int_servers = (void *) get_property(node,
+ "ibm,ppc-interrupt-server#s", &proplen);
+ if (!int_servers) {
+ printk(KERN_WARNING "%s misses "
+ "ibm,ppc-interrupt-server#s property",
+ node->full_name);
+ continue;
+ }
+ for (thread = 0; thread < proplen / sizeof (int); thread++) {
+ if (hardid == int_servers[thread]) {
+ addr = get_property(node, "pervasive", NULL);
+ goto found;
+ }
+ }
+ }
+
+ printk(KERN_WARNING "%s: CPU %d not found\n", __FUNCTION__, cpu);
+ return -EINVAL;
+
+found:
+ real_address = *(unsigned long*) addr;
+ addr += sizeof (unsigned long);
+ size = *(unsigned int*) addr;
+
+ pr_debug("pervasive area for CPU %d at %lx, size %x\n",
+ cpu, real_address, size);
+ p->regs = __ioremap(real_address, size, _PAGE_NO_CACHE);
+ p->thread = thread;
+ return 0;
+}
+
+void __init cell_pervasive_init(void)
+{
+ struct cbe_pervasive *p;
+ int cpu;
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
+ return;
+
+ for_each_cpu(cpu) {
+ p = &cbe_pervasive[cpu];
+ ret = cbe_find_pmd_mmio(cpu, p);
+ if (ret)
+ return;
+ }
+
+ ppc_md.idle_loop = cbe_idle;
+ ppc_md.system_reset_exception = cbe_system_reset_exception;
+}
diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h
new file mode 100644
index 00000000000..da1fb85ca3e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/pervasive.h
@@ -0,0 +1,62 @@
+/*
+ * Cell Pervasive Monitor and Debug interface and HW structures
+ *
+ * (C) Copyright IBM Corporation 2005
+ *
+ * Authors: Maximino Aguilar (maguilar@us.ibm.com)
+ * David J. Erb (djerb@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#ifndef PERVASIVE_H
+#define PERVASIVE_H
+
+struct pmd_regs {
+ u8 pad_0x0000_0x0800[0x0800 - 0x0000]; /* 0x0000 */
+
+ /* Thermal Sensor Registers */
+ u64 ts_ctsr1; /* 0x0800 */
+ u64 ts_ctsr2; /* 0x0808 */
+ u64 ts_mtsr1; /* 0x0810 */
+ u64 ts_mtsr2; /* 0x0818 */
+ u64 ts_itr1; /* 0x0820 */
+ u64 ts_itr2; /* 0x0828 */
+ u64 ts_gitr; /* 0x0830 */
+ u64 ts_isr; /* 0x0838 */
+ u64 ts_imr; /* 0x0840 */
+ u64 tm_cr1; /* 0x0848 */
+ u64 tm_cr2; /* 0x0850 */
+ u64 tm_simr; /* 0x0858 */
+ u64 tm_tpr; /* 0x0860 */
+ u64 tm_str1; /* 0x0868 */
+ u64 tm_str2; /* 0x0870 */
+ u64 tm_tsr; /* 0x0878 */
+
+ /* Power Management */
+ u64 pm_control; /* 0x0880 */
+#define PMD_PAUSE_ZERO_CONTROL 0x10000
+ u64 pm_status; /* 0x0888 */
+
+ /* Time Base Register */
+ u64 tbr; /* 0x0890 */
+
+ u8 pad_0x0898_0x1000 [0x1000 - 0x0898]; /* 0x0898 */
+};
+
+void __init cell_pervasive_init(void);
+
+#endif
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 9a495634d0c..b33a4443f5a 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -33,6 +33,7 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
+#include <asm/kexec.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -48,6 +49,7 @@
#include "interrupt.h"
#include "iommu.h"
+#include "pervasive.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -55,7 +57,7 @@
#define DBG(fmt...)
#endif
-void cell_show_cpuinfo(struct seq_file *m)
+static void cell_show_cpuinfo(struct seq_file *m)
{
struct device_node *root;
const char *model = "";
@@ -67,6 +69,77 @@ void cell_show_cpuinfo(struct seq_file *m)
of_node_put(root);
}
+#ifdef CONFIG_SPARSEMEM
+static int __init find_spu_node_id(struct device_node *spe)
+{
+ unsigned int *id;
+#ifdef CONFIG_NUMA
+ struct device_node *cpu;
+ cpu = spe->parent->parent;
+ id = (unsigned int *)get_property(cpu, "node-id", NULL);
+#else
+ id = NULL;
+#endif
+ return id ? *id : 0;
+}
+
+static void __init cell_spuprop_present(struct device_node *spe,
+ const char *prop, int early)
+{
+ struct address_prop {
+ unsigned long address;
+ unsigned int len;
+ } __attribute__((packed)) *p;
+ int proplen;
+
+ unsigned long start_pfn, end_pfn, pfn;
+ int node_id;
+
+ p = (void*)get_property(spe, prop, &proplen);
+ WARN_ON(proplen != sizeof (*p));
+
+ node_id = find_spu_node_id(spe);
+
+ start_pfn = p->address >> PAGE_SHIFT;
+ end_pfn = (p->address + p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ /* We need to call memory_present *before* the call to sparse_init,
+ but we can initialize the page structs only *after* that call.
+ Thus, we're being called twice. */
+ if (early)
+ memory_present(node_id, start_pfn, end_pfn);
+ else {
+ /* As the pages backing SPU LS and I/O are outside the range
+ of regular memory, their page structs were not initialized
+ by free_area_init. Do it here instead. */
+ for (pfn = start_pfn; pfn < end_pfn; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+ set_page_links(page, ZONE_DMA, node_id, pfn);
+ set_page_count(page, 1);
+ reset_page_mapcount(page);
+ SetPageReserved(page);
+ INIT_LIST_HEAD(&page->lru);
+ }
+ }
+}
+
+static void __init cell_spumem_init(int early)
+{
+ struct device_node *node;
+ for (node = of_find_node_by_type(NULL, "spe");
+ node; node = of_find_node_by_type(node, "spe")) {
+ cell_spuprop_present(node, "local-store", early);
+ cell_spuprop_present(node, "problem", early);
+ cell_spuprop_present(node, "priv1", early);
+ cell_spuprop_present(node, "priv2", early);
+ }
+}
+#else
+static void __init cell_spumem_init(int early)
+{
+}
+#endif
+
static void cell_progress(char *s, unsigned short hex)
{
printk("*** %04x : %s\n", hex, s ? s : "");
@@ -93,11 +166,14 @@ static void __init cell_setup_arch(void)
init_pci_config_tokens();
find_and_init_phbs();
spider_init_IRQ();
+ cell_pervasive_init();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
mmio_nvram_init();
+
+ cell_spumem_init(0);
}
/*
@@ -113,6 +189,8 @@ static void __init cell_init_early(void)
ppc64_interrupt_controller = IC_CELL_PIC;
+ cell_spumem_init(1);
+
DBG(" <- cell_init_early()\n");
}
@@ -125,6 +203,15 @@ static int __init cell_probe(int platform)
return 1;
}
+/*
+ * Cell has no legacy IO; anything calling this function has to
+ * fail or bad things will happen
+ */
+static int cell_check_legacy_ioport(unsigned int baseport)
+{
+ return -ENODEV;
+}
+
struct machdep_calls __initdata cell_md = {
.probe = cell_probe,
.setup_arch = cell_setup_arch,
@@ -137,5 +224,11 @@ struct machdep_calls __initdata cell_md = {
.get_rtc_time = rtas_get_rtc_time,
.set_rtc_time = rtas_set_rtc_time,
.calibrate_decr = generic_calibrate_decr,
+ .check_legacy_ioport = cell_check_legacy_ioport,
.progress = cell_progress,
+#ifdef CONFIG_KEXEC
+ .machine_kexec = default_machine_kexec,
+ .machine_kexec_prepare = default_machine_kexec_prepare,
+ .machine_crash_shutdown = default_machine_crash_shutdown,
+#endif
};
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index de96eadf419..bdf6c5fe58c 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -86,7 +86,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
pcpu = get_hard_smp_processor_id(lcpu);
/* Fixup atomic count: it exited inside IRQ handler. */
- paca[lcpu].__current->thread_info->preempt_count = 0;
+ task_thread_info(paca[lcpu].__current)->preempt_count = 0;
/*
* If the RTAS start-cpu token does not exist then presume the
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
new file mode 100644
index 00000000000..d75ae03df68
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -0,0 +1,711 @@
+/*
+ * Low-level SPU handling
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#undef DEBUG
+
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/semaphore.h>
+#include <asm/spu.h>
+#include <asm/mmu_context.h>
+
+#include "interrupt.h"
+
+static int __spu_trap_invalid_dma(struct spu *spu)
+{
+ pr_debug("%s\n", __FUNCTION__);
+ force_sig(SIGBUS, /* info, */ current);
+ return 0;
+}
+
+static int __spu_trap_dma_align(struct spu *spu)
+{
+ pr_debug("%s\n", __FUNCTION__);
+ force_sig(SIGBUS, /* info, */ current);
+ return 0;
+}
+
+static int __spu_trap_error(struct spu *spu)
+{
+ pr_debug("%s\n", __FUNCTION__);
+ force_sig(SIGILL, /* info, */ current);
+ return 0;
+}
+
+static void spu_restart_dma(struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
+ out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
+}
+
+static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ struct mm_struct *mm = spu->mm;
+ u64 esid, vsid;
+
+ pr_debug("%s\n", __FUNCTION__);
+
+ if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
+ /* SLBs are pre-loaded for context switch, so
+ * we should never get here!
+ */
+ printk("%s: invalid access during switch!\n", __func__);
+ return 1;
+ }
+ if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
+ /* Future: support kernel segments so that drivers
+ * can use SPUs.
+ */
+ pr_debug("invalid region access at %016lx\n", ea);
+ return 1;
+ }
+
+ esid = (ea & ESID_MASK) | SLB_ESID_V;
+ vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
+ if (in_hugepage_area(mm->context, ea))
+ vsid |= SLB_VSID_L;
+
+ out_be64(&priv2->slb_index_W, spu->slb_replace);
+ out_be64(&priv2->slb_vsid_RW, vsid);
+ out_be64(&priv2->slb_esid_RW, esid);
+
+ spu->slb_replace++;
+ if (spu->slb_replace >= 8)
+ spu->slb_replace = 0;
+
+ spu_restart_dma(spu);
+
+ return 0;
+}
+
+extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
+static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
+{
+ pr_debug("%s\n", __FUNCTION__);
+
+ /* Handle kernel space hash faults immediately.
+ User hash faults need to be deferred to process context. */
+ if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
+ && REGION_ID(ea) != USER_REGION_ID
+ && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
+ spu_restart_dma(spu);
+ return 0;
+ }
+
+ if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
+ printk("%s: invalid access during switch!\n", __func__);
+ return 1;
+ }
+
+ spu->dar = ea;
+ spu->dsisr = dsisr;
+ mb();
+ if (spu->stop_callback)
+ spu->stop_callback(spu);
+ return 0;
+}
+
+static int __spu_trap_mailbox(struct spu *spu)
+{
+ if (spu->ibox_callback)
+ spu->ibox_callback(spu);
+
+ /* atomically disable SPU mailbox interrupts */
+ spin_lock(&spu->register_lock);
+ spu_int_mask_and(spu, 2, ~0x1);
+ spin_unlock(&spu->register_lock);
+ return 0;
+}
+
+static int __spu_trap_stop(struct spu *spu)
+{
+ pr_debug("%s\n", __FUNCTION__);
+ spu->stop_code = in_be32(&spu->problem->spu_status_R);
+ if (spu->stop_callback)
+ spu->stop_callback(spu);
+ return 0;
+}
+
+static int __spu_trap_halt(struct spu *spu)
+{
+ pr_debug("%s\n", __FUNCTION__);
+ spu->stop_code = in_be32(&spu->problem->spu_status_R);
+ if (spu->stop_callback)
+ spu->stop_callback(spu);
+ return 0;
+}
+
+static int __spu_trap_tag_group(struct spu *spu)
+{
+ pr_debug("%s\n", __FUNCTION__);
+ /* wake_up(&spu->dma_wq); */
+ return 0;
+}
+
+static int __spu_trap_spubox(struct spu *spu)
+{
+ if (spu->wbox_callback)
+ spu->wbox_callback(spu);
+
+ /* atomically disable SPU mailbox interrupts */
+ spin_lock(&spu->register_lock);
+ spu_int_mask_and(spu, 2, ~0x10);
+ spin_unlock(&spu->register_lock);
+ return 0;
+}
+
+static irqreturn_t
+spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
+{
+ struct spu *spu;
+
+ spu = data;
+ spu->class_0_pending = 1;
+ if (spu->stop_callback)
+ spu->stop_callback(spu);
+
+ return IRQ_HANDLED;
+}
+
+int
+spu_irq_class_0_bottom(struct spu *spu)
+{
+ unsigned long stat, mask;
+
+ spu->class_0_pending = 0;
+
+ mask = spu_int_mask_get(spu, 0);
+ stat = spu_int_stat_get(spu, 0);
+
+ stat &= mask;
+
+ if (stat & 1) /* invalid MFC DMA */
+ __spu_trap_invalid_dma(spu);
+
+ if (stat & 2) /* invalid DMA alignment */
+ __spu_trap_dma_align(spu);
+
+ if (stat & 4) /* error on SPU */
+ __spu_trap_error(spu);
+
+ spu_int_stat_clear(spu, 0, stat);
+
+ return (stat & 0x7) ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
+
+static irqreturn_t
+spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
+{
+ struct spu *spu;
+ unsigned long stat, mask, dar, dsisr;
+
+ spu = data;
+
+ /* atomically read & clear class1 status. */
+ spin_lock(&spu->register_lock);
+ mask = spu_int_mask_get(spu, 1);
+ stat = spu_int_stat_get(spu, 1) & mask;
+ dar = spu_mfc_dar_get(spu);
+ dsisr = spu_mfc_dsisr_get(spu);
+ if (stat & 2) /* mapping fault */
+ spu_mfc_dsisr_set(spu, 0ul);
+ spu_int_stat_clear(spu, 1, stat);
+ spin_unlock(&spu->register_lock);
+
+ if (stat & 1) /* segment fault */
+ __spu_trap_data_seg(spu, dar);
+
+ if (stat & 2) { /* mapping fault */
+ __spu_trap_data_map(spu, dar, dsisr);
+ }
+
+ if (stat & 4) /* ls compare & suspend on get */
+ ;
+
+ if (stat & 8) /* ls compare & suspend on put */
+ ;
+
+ return stat ? IRQ_HANDLED : IRQ_NONE;
+}
+EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
+
+static irqreturn_t
+spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
+{
+ struct spu *spu;
+ unsigned long stat;
+ unsigned long mask;
+
+ spu = data;
+ stat = spu_int_stat_get(spu, 2);
+ mask = spu_int_mask_get(spu, 2);
+
+ pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
+
+ stat &= mask;
+
+ if (stat & 1) /* PPC core mailbox */
+ __spu_trap_mailbox(spu);
+
+ if (stat & 2) /* SPU stop-and-signal */
+ __spu_trap_stop(spu);
+
+ if (stat & 4) /* SPU halted */
+ __spu_trap_halt(spu);
+
+ if (stat & 8) /* DMA tag group complete */
+ __spu_trap_tag_group(spu);
+
+ if (stat & 0x10) /* SPU mailbox threshold */
+ __spu_trap_spubox(spu);
+
+ spu_int_stat_clear(spu, 2, stat);
+ return stat ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int
+spu_request_irqs(struct spu *spu)
+{
+ int ret;
+ int irq_base;
+
+ irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
+
+ snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
+ ret = request_irq(irq_base + spu->isrc,
+ spu_irq_class_0, 0, spu->irq_c0, spu);
+ if (ret)
+ goto out;
+
+ snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
+ ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
+ spu_irq_class_1, 0, spu->irq_c1, spu);
+ if (ret)
+ goto out1;
+
+ snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
+ ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
+ spu_irq_class_2, 0, spu->irq_c2, spu);
+ if (ret)
+ goto out2;
+ goto out;
+
+out2:
+ free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
+out1:
+ free_irq(irq_base + spu->isrc, spu);
+out:
+ return ret;
+}
+
+static void
+spu_free_irqs(struct spu *spu)
+{
+ int irq_base;
+
+ irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
+
+ free_irq(irq_base + spu->isrc, spu);
+ free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
+ free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
+}
+
+static LIST_HEAD(spu_list);
+static DECLARE_MUTEX(spu_mutex);
+
+static void spu_init_channels(struct spu *spu)
+{
+ static const struct {
+ unsigned channel;
+ unsigned count;
+ } zero_list[] = {
+ { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
+ { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
+ }, count_list[] = {
+ { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
+ { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
+ { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
+ };
+ struct spu_priv2 __iomem *priv2;
+ int i;
+
+ priv2 = spu->priv2;
+
+ /* initialize all channel data to zero */
+ for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
+ int count;
+
+ out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
+ for (count = 0; count < zero_list[i].count; count++)
+ out_be64(&priv2->spu_chnldata_RW, 0);
+ }
+
+ /* initialize channel counts to meaningful values */
+ for (i = 0; i < ARRAY_SIZE(count_list); i++) {
+ out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
+ out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
+ }
+}
+
+struct spu *spu_alloc(void)
+{
+ struct spu *spu;
+
+ down(&spu_mutex);
+ if (!list_empty(&spu_list)) {
+ spu = list_entry(spu_list.next, struct spu, list);
+ list_del_init(&spu->list);
+ pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
+ } else {
+ pr_debug("No SPU left\n");
+ spu = NULL;
+ }
+ up(&spu_mutex);
+
+ if (spu)
+ spu_init_channels(spu);
+
+ return spu;
+}
+EXPORT_SYMBOL_GPL(spu_alloc);
+
+void spu_free(struct spu *spu)
+{
+ down(&spu_mutex);
+ list_add_tail(&spu->list, &spu_list);
+ up(&spu_mutex);
+}
+EXPORT_SYMBOL_GPL(spu_free);
+
+static int spu_handle_mm_fault(struct spu *spu)
+{
+ struct mm_struct *mm = spu->mm;
+ struct vm_area_struct *vma;
+ u64 ea, dsisr, is_write;
+ int ret;
+
+ ea = spu->dar;
+ dsisr = spu->dsisr;
+#if 0
+ if (!IS_VALID_EA(ea)) {
+ return -EFAULT;
+ }
+#endif /* XXX */
+ if (mm == NULL) {
+ return -EFAULT;
+ }
+ if (mm->pgd == NULL) {
+ return -EFAULT;
+ }
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, ea);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= ea)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+#if 0
+ if (expand_stack(vma, ea))
+ goto bad_area;
+#endif /* XXX */
+good_area:
+ is_write = dsisr & MFC_DSISR_ACCESS_PUT;
+ if (is_write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if (dsisr & MFC_DSISR_ACCESS_DENIED)
+ goto bad_area;
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+ ret = 0;
+ switch (handle_mm_fault(mm, vma, ea, is_write)) {
+ case VM_FAULT_MINOR:
+ current->min_flt++;
+ break;
+ case VM_FAULT_MAJOR:
+ current->maj_flt++;
+ break;
+ case VM_FAULT_SIGBUS:
+ ret = -EFAULT;
+ goto bad_area;
+ case VM_FAULT_OOM:
+ ret = -ENOMEM;
+ goto bad_area;
+ default:
+ BUG();
+ }
+ up_read(&mm->mmap_sem);
+ return ret;
+
+bad_area:
+ up_read(&mm->mmap_sem);
+ return -EFAULT;
+}
+
+int spu_irq_class_1_bottom(struct spu *spu)
+{
+ u64 ea, dsisr, access, error = 0UL;
+ int ret = 0;
+
+ ea = spu->dar;
+ dsisr = spu->dsisr;
+ if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
+ access = (_PAGE_PRESENT | _PAGE_USER);
+ access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
+ if (hash_page(ea, access, 0x300) != 0)
+ error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
+ }
+ if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) ||
+ (dsisr & MFC_DSISR_ACCESS_DENIED)) {
+ if ((ret = spu_handle_mm_fault(spu)) != 0)
+ error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
+ else
+ error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
+ }
+ spu->dar = 0UL;
+ spu->dsisr = 0UL;
+ if (!error) {
+ spu_restart_dma(spu);
+ } else {
+ __spu_trap_invalid_dma(spu);
+ }
+ return ret;
+}
+
+void spu_irq_setaffinity(struct spu *spu, int cpu)
+{
+ u64 target = iic_get_target_id(cpu);
+ u64 route = target << 48 | target << 32 | target << 16;
+ spu_int_route_set(spu, route);
+}
+EXPORT_SYMBOL_GPL(spu_irq_setaffinity);
+
+static void __iomem * __init map_spe_prop(struct device_node *n,
+ const char *name)
+{
+ struct address_prop {
+ unsigned long address;
+ unsigned int len;
+ } __attribute__((packed)) *prop;
+
+ void *p;
+ int proplen;
+
+ p = get_property(n, name, &proplen);
+ if (proplen != sizeof (struct address_prop))
+ return NULL;
+
+ prop = p;
+
+ return ioremap(prop->address, prop->len);
+}
+
+static void spu_unmap(struct spu *spu)
+{
+ iounmap(spu->priv2);
+ iounmap(spu->priv1);
+ iounmap(spu->problem);
+ iounmap((u8 __iomem *)spu->local_store);
+}
+
+static int __init spu_map_device(struct spu *spu, struct device_node *spe)
+{
+ char *prop;
+ int ret;
+
+ ret = -ENODEV;
+ prop = get_property(spe, "isrc", NULL);
+ if (!prop)
+ goto out;
+ spu->isrc = *(unsigned int *)prop;
+
+ spu->name = get_property(spe, "name", NULL);
+ if (!spu->name)
+ goto out;
+
+ prop = get_property(spe, "local-store", NULL);
+ if (!prop)
+ goto out;
+ spu->local_store_phys = *(unsigned long *)prop;
+
+ /* we use local store as ram, not io memory */
+ spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
+ if (!spu->local_store)
+ goto out;
+
+ spu->problem= map_spe_prop(spe, "problem");
+ if (!spu->problem)
+ goto out_unmap;
+
+ spu->priv1= map_spe_prop(spe, "priv1");
+ /* priv1 is not available on a hypervisor */
+
+ spu->priv2= map_spe_prop(spe, "priv2");
+ if (!spu->priv2)
+ goto out_unmap;
+ ret = 0;
+ goto out;
+
+out_unmap:
+ spu_unmap(spu);
+out:
+ return ret;
+}
+
+static int __init find_spu_node_id(struct device_node *spe)
+{
+ unsigned int *id;
+ struct device_node *cpu;
+
+ cpu = spe->parent->parent;
+ id = (unsigned int *)get_property(cpu, "node-id", NULL);
+
+ return id ? *id : 0;
+}
+
+static int __init create_spu(struct device_node *spe)
+{
+ struct spu *spu;
+ int ret;
+ static int number;
+
+ ret = -ENOMEM;
+ spu = kmalloc(sizeof (*spu), GFP_KERNEL);
+ if (!spu)
+ goto out;
+
+ ret = spu_map_device(spu, spe);
+ if (ret)
+ goto out_free;
+
+ spu->node = find_spu_node_id(spe);
+ spu->stop_code = 0;
+ spu->slb_replace = 0;
+ spu->mm = NULL;
+ spu->ctx = NULL;
+ spu->rq = NULL;
+ spu->pid = 0;
+ spu->class_0_pending = 0;
+ spu->flags = 0UL;
+ spu->dar = 0UL;
+ spu->dsisr = 0UL;
+ spin_lock_init(&spu->register_lock);
+
+ spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
+ spu_mfc_sr1_set(spu, 0x33);
+
+ spu->ibox_callback = NULL;
+ spu->wbox_callback = NULL;
+ spu->stop_callback = NULL;
+
+ down(&spu_mutex);
+ spu->number = number++;
+ ret = spu_request_irqs(spu);
+ if (ret)
+ goto out_unmap;
+
+ list_add(&spu->list, &spu_list);
+ up(&spu_mutex);
+
+ pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
+ spu->name, spu->isrc, spu->local_store,
+ spu->problem, spu->priv1, spu->priv2, spu->number);
+ goto out;
+
+out_unmap:
+ up(&spu_mutex);
+ spu_unmap(spu);
+out_free:
+ kfree(spu);
+out:
+ return ret;
+}
+
+static void destroy_spu(struct spu *spu)
+{
+ list_del_init(&spu->list);
+
+ spu_free_irqs(spu);
+ spu_unmap(spu);
+ kfree(spu);
+}
+
+static void cleanup_spu_base(void)
+{
+ struct spu *spu, *tmp;
+ down(&spu_mutex);
+ list_for_each_entry_safe(spu, tmp, &spu_list, list)
+ destroy_spu(spu);
+ up(&spu_mutex);
+}
+module_exit(cleanup_spu_base);
+
+static int __init init_spu_base(void)
+{
+ struct device_node *node;
+ int ret;
+
+ ret = -ENODEV;
+ for (node = of_find_node_by_type(NULL, "spe");
+ node; node = of_find_node_by_type(node, "spe")) {
+ ret = create_spu(node);
+ if (ret) {
+ printk(KERN_WARNING "%s: Error initializing %s\n",
+ __FUNCTION__, node->name);
+ cleanup_spu_base();
+ break;
+ }
+ }
+ /* in some old firmware versions, the spe is called 'spc', so we
+ look for that as well */
+ for (node = of_find_node_by_type(NULL, "spc");
+ node; node = of_find_node_by_type(node, "spc")) {
+ ret = create_spu(node);
+ if (ret) {
+ printk(KERN_WARNING "%s: Error initializing %s\n",
+ __FUNCTION__, node->name);
+ cleanup_spu_base();
+ break;
+ }
+ }
+ return ret;
+}
+module_init(init_spu_base);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/spu_priv1.c b/arch/powerpc/platforms/cell/spu_priv1.c
new file mode 100644
index 00000000000..b2656421c7b
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_priv1.c
@@ -0,0 +1,133 @@
+/*
+ * access to SPU privileged registers
+ */
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/spu.h>
+
+void spu_int_mask_and(struct spu *spu, int class, u64 mask)
+{
+ u64 old_mask;
+
+ old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
+ out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
+}
+EXPORT_SYMBOL_GPL(spu_int_mask_and);
+
+void spu_int_mask_or(struct spu *spu, int class, u64 mask)
+{
+ u64 old_mask;
+
+ old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
+ out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
+}
+EXPORT_SYMBOL_GPL(spu_int_mask_or);
+
+void spu_int_mask_set(struct spu *spu, int class, u64 mask)
+{
+ out_be64(&spu->priv1->int_mask_RW[class], mask);
+}
+EXPORT_SYMBOL_GPL(spu_int_mask_set);
+
+u64 spu_int_mask_get(struct spu *spu, int class)
+{
+ return in_be64(&spu->priv1->int_mask_RW[class]);
+}
+EXPORT_SYMBOL_GPL(spu_int_mask_get);
+
+void spu_int_stat_clear(struct spu *spu, int class, u64 stat)
+{
+ out_be64(&spu->priv1->int_stat_RW[class], stat);
+}
+EXPORT_SYMBOL_GPL(spu_int_stat_clear);
+
+u64 spu_int_stat_get(struct spu *spu, int class)
+{
+ return in_be64(&spu->priv1->int_stat_RW[class]);
+}
+EXPORT_SYMBOL_GPL(spu_int_stat_get);
+
+void spu_int_route_set(struct spu *spu, u64 route)
+{
+ out_be64(&spu->priv1->int_route_RW, route);
+}
+EXPORT_SYMBOL_GPL(spu_int_route_set);
+
+u64 spu_mfc_dar_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->mfc_dar_RW);
+}
+EXPORT_SYMBOL_GPL(spu_mfc_dar_get);
+
+u64 spu_mfc_dsisr_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->mfc_dsisr_RW);
+}
+EXPORT_SYMBOL_GPL(spu_mfc_dsisr_get);
+
+void spu_mfc_dsisr_set(struct spu *spu, u64 dsisr)
+{
+ out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
+}
+EXPORT_SYMBOL_GPL(spu_mfc_dsisr_set);
+
+void spu_mfc_sdr_set(struct spu *spu, u64 sdr)
+{
+ out_be64(&spu->priv1->mfc_sdr_RW, sdr);
+}
+EXPORT_SYMBOL_GPL(spu_mfc_sdr_set);
+
+void spu_mfc_sr1_set(struct spu *spu, u64 sr1)
+{
+ out_be64(&spu->priv1->mfc_sr1_RW, sr1);
+}
+EXPORT_SYMBOL_GPL(spu_mfc_sr1_set);
+
+u64 spu_mfc_sr1_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->mfc_sr1_RW);
+}
+EXPORT_SYMBOL_GPL(spu_mfc_sr1_get);
+
+void spu_mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
+{
+ out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
+}
+EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_set);
+
+u64 spu_mfc_tclass_id_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->mfc_tclass_id_RW);
+}
+EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_get);
+
+void spu_tlb_invalidate(struct spu *spu)
+{
+ out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
+}
+EXPORT_SYMBOL_GPL(spu_tlb_invalidate);
+
+void spu_resource_allocation_groupID_set(struct spu *spu, u64 id)
+{
+ out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
+}
+EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_set);
+
+u64 spu_resource_allocation_groupID_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->resource_allocation_groupID_RW);
+}
+EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_get);
+
+void spu_resource_allocation_enable_set(struct spu *spu, u64 enable)
+{
+ out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
+}
+EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_set);
+
+u64 spu_resource_allocation_enable_get(struct spu *spu)
+{
+ return in_be64(&spu->priv1->resource_allocation_enable_RW);
+}
+EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_get);
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
new file mode 100644
index 00000000000..261b507a901
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -0,0 +1,88 @@
+/*
+ * SPU file system -- system call stubs
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/syscalls.h>
+
+#include <asm/spu.h>
+
+struct spufs_calls spufs_calls = {
+ .owner = NULL,
+};
+
+/* These stub syscalls are needed to have the actual implementation
+ * within a loadable module. When spufs is built into the kernel,
+ * this file is not used and the syscalls directly enter the fs code */
+
+asmlinkage long sys_spu_create(const char __user *name,
+ unsigned int flags, mode_t mode)
+{
+ long ret;
+ struct module *owner = spufs_calls.owner;
+
+ ret = -ENOSYS;
+ if (owner && try_module_get(owner)) {
+ ret = spufs_calls.create_thread(name, flags, mode);
+ module_put(owner);
+ }
+ return ret;
+}
+
+asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
+{
+ long ret;
+ struct file *filp;
+ int fput_needed;
+ struct module *owner = spufs_calls.owner;
+
+ ret = -ENOSYS;
+ if (owner && try_module_get(owner)) {
+ ret = -EBADF;
+ filp = fget_light(fd, &fput_needed);
+ if (filp) {
+ ret = spufs_calls.spu_run(filp, unpc, ustatus);
+ fput_light(filp, fput_needed);
+ }
+ module_put(owner);
+ }
+ return ret;
+}
+
+int register_spu_syscalls(struct spufs_calls *calls)
+{
+ if (spufs_calls.owner)
+ return -EBUSY;
+
+ spufs_calls.create_thread = calls->create_thread;
+ spufs_calls.spu_run = calls->spu_run;
+ smp_mb();
+ spufs_calls.owner = calls->owner;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(register_spu_syscalls);
+
+void unregister_spu_syscalls(struct spufs_calls *calls)
+{
+ BUG_ON(spufs_calls.owner != calls->owner);
+ spufs_calls.owner = NULL;
+}
+EXPORT_SYMBOL_GPL(unregister_spu_syscalls);
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
new file mode 100644
index 00000000000..a7cddf40e3d
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -0,0 +1,54 @@
+obj-$(CONFIG_SPU_FS) += spufs.o
+spufs-y += inode.o file.o context.o switch.o syscalls.o
+spufs-y += sched.o backing_ops.o hw_ops.o run.o
+
+# Rules to build switch.o with the help of SPU tool chain
+SPU_CROSS := spu-
+SPU_CC := $(SPU_CROSS)gcc
+SPU_AS := $(SPU_CROSS)gcc
+SPU_LD := $(SPU_CROSS)ld
+SPU_OBJCOPY := $(SPU_CROSS)objcopy
+SPU_CFLAGS := -O2 -Wall -I$(srctree)/include -I$(objtree)/include2
+SPU_AFLAGS := -c -D__ASSEMBLY__ -I$(srctree)/include -I$(objtree)/include2
+SPU_LDFLAGS := -N -Ttext=0x0
+
+$(obj)/switch.o: $(obj)/spu_save_dump.h $(obj)/spu_restore_dump.h
+
+# Compile SPU files
+ cmd_spu_cc = $(SPU_CC) $(SPU_CFLAGS) -c -o $@ $<
+quiet_cmd_spu_cc = SPU_CC $@
+$(obj)/spu_%.o: $(src)/spu_%.c
+ $(call if_changed,spu_cc)
+
+# Assemble SPU files
+ cmd_spu_as = $(SPU_AS) $(SPU_AFLAGS) -o $@ $<
+quiet_cmd_spu_as = SPU_AS $@
+$(obj)/spu_%.o: $(src)/spu_%.S
+ $(call if_changed,spu_as)
+
+# Link SPU Executables
+ cmd_spu_ld = $(SPU_LD) $(SPU_LDFLAGS) -o $@ $^
+quiet_cmd_spu_ld = SPU_LD $@
+$(obj)/spu_%: $(obj)/spu_%_crt0.o $(obj)/spu_%.o
+ $(call if_changed,spu_ld)
+
+# Copy into binary format
+ cmd_spu_objcopy = $(SPU_OBJCOPY) -O binary $< $@
+quiet_cmd_spu_objcopy = OBJCOPY $@
+$(obj)/spu_%.bin: $(src)/spu_%
+ $(call if_changed,spu_objcopy)
+
+# create C code from ELF executable
+cmd_hexdump = ( \
+ echo "/*" ; \
+ echo " * $*_dump.h: Copyright (C) 2005 IBM." ; \
+ echo " * Hex-dump auto generated from $*.c." ; \
+ echo " * Do not edit!" ; \
+ echo " */" ; \
+ echo "static unsigned int $*_code[] __page_aligned = {" ; \
+ hexdump -v -e '"0x" 4/1 "%02x" "," "\n"' $< ; \
+ echo "};" ; \
+ ) > $@
+quiet_cmd_hexdump = HEXDUMP $@
+$(obj)/%_dump.h: $(obj)/%.bin
+ $(call if_changed,hexdump)
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
new file mode 100644
index 00000000000..a5c489a53c6
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -0,0 +1,308 @@
+/* backing_ops.c - query/set operations on saved SPU context.
+ *
+ * Copyright (C) IBM 2005
+ * Author: Mark Nutter <mnutter@us.ibm.com>
+ *
+ * These register operations allow SPUFS to operate on saved
+ * SPU contexts rather than hardware.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/poll.h>
+
+#include <asm/io.h>
+#include <asm/spu.h>
+#include <asm/spu_csa.h>
+#include <asm/mmu_context.h>
+#include "spufs.h"
+
+/*
+ * Reads/writes to various problem and priv2 registers require
+ * state changes, i.e. generate SPU events, modify channel
+ * counts, etc.
+ */
+
+static void gen_spu_event(struct spu_context *ctx, u32 event)
+{
+ u64 ch0_cnt;
+ u64 ch0_data;
+ u64 ch1_data;
+
+ ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
+ ch0_data = ctx->csa.spu_chnldata_RW[0];
+ ch1_data = ctx->csa.spu_chnldata_RW[1];
+ ctx->csa.spu_chnldata_RW[0] |= event;
+ if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
+ ctx->csa.spu_chnlcnt_RW[0] = 1;
+ }
+}
+
+static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
+{
+ u32 mbox_stat;
+ int ret = 0;
+
+ spin_lock(&ctx->csa.register_lock);
+ mbox_stat = ctx->csa.prob.mb_stat_R;
+ if (mbox_stat & 0x0000ff) {
+ /* Read the first available word.
+ * Implementation note: the depth
+ * of pu_mb_R is currently 1.
+ */
+ *data = ctx->csa.prob.pu_mb_R;
+ ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
+ ctx->csa.spu_chnlcnt_RW[28] = 1;
+ gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
+ ret = 4;
+ }
+ spin_unlock(&ctx->csa.register_lock);
+ return ret;
+}
+
+static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
+{
+ return ctx->csa.prob.mb_stat_R;
+}
+
+static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
+ unsigned int events)
+{
+ int ret;
+ u32 stat;
+
+ ret = 0;
+ spin_lock_irq(&ctx->csa.register_lock);
+ stat = ctx->csa.prob.mb_stat_R;
+
+ /* if the requested event is there, return the poll
+ mask, otherwise enable the interrupt to get notified,
+ but first mark any pending interrupts as done so
+ we don't get woken up unnecessarily */
+
+ if (events & (POLLIN | POLLRDNORM)) {
+ if (stat & 0xff0000)
+ ret |= POLLIN | POLLRDNORM;
+ else {
+ ctx->csa.priv1.int_stat_class0_RW &= ~0x1;
+ ctx->csa.priv1.int_mask_class2_RW |= 0x1;
+ }
+ }
+ if (events & (POLLOUT | POLLWRNORM)) {
+ if (stat & 0x00ff00)
+ ret = POLLOUT | POLLWRNORM;
+ else {
+ ctx->csa.priv1.int_stat_class0_RW &= ~0x10;
+ ctx->csa.priv1.int_mask_class2_RW |= 0x10;
+ }
+ }
+ spin_unlock_irq(&ctx->csa.register_lock);
+ return ret;
+}
+
+static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
+{
+ int ret;
+
+ spin_lock(&ctx->csa.register_lock);
+ if (ctx->csa.prob.mb_stat_R & 0xff0000) {
+ /* Read the first available word.
+ * Implementation note: the depth
+ * of puint_mb_R is currently 1.
+ */
+ *data = ctx->csa.priv2.puint_mb_R;
+ ctx->csa.prob.mb_stat_R &= ~(0xff0000);
+ ctx->csa.spu_chnlcnt_RW[30] = 1;
+ gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
+ ret = 4;
+ } else {
+ /* make sure we get woken up by the interrupt */
+ ctx->csa.priv1.int_mask_class2_RW |= 0x1UL;
+ ret = 0;
+ }
+ spin_unlock(&ctx->csa.register_lock);
+ return ret;
+}
+
+static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
+{
+ int ret;
+
+ spin_lock(&ctx->csa.register_lock);
+ if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
+ int slot = ctx->csa.spu_chnlcnt_RW[29];
+ int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
+
+ /* We have space to write wbox_data.
+ * Implementation note: the depth
+ * of spu_mb_W is currently 4.
+ */
+ BUG_ON(avail != (4 - slot));
+ ctx->csa.spu_mailbox_data[slot] = data;
+ ctx->csa.spu_chnlcnt_RW[29] = ++slot;
+ ctx->csa.prob.mb_stat_R = (((4 - slot) & 0xff) << 8);
+ gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
+ ret = 4;
+ } else {
+ /* make sure we get woken up by the interrupt when space
+ becomes available */
+ ctx->csa.priv1.int_mask_class2_RW |= 0x10;
+ ret = 0;
+ }
+ spin_unlock(&ctx->csa.register_lock);
+ return ret;
+}
+
+static u32 spu_backing_signal1_read(struct spu_context *ctx)
+{
+ return ctx->csa.spu_chnldata_RW[3];
+}
+
+static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
+{
+ spin_lock(&ctx->csa.register_lock);
+ if (ctx->csa.priv2.spu_cfg_RW & 0x1)
+ ctx->csa.spu_chnldata_RW[3] |= data;
+ else
+ ctx->csa.spu_chnldata_RW[3] = data;
+ ctx->csa.spu_chnlcnt_RW[3] = 1;
+ gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
+ spin_unlock(&ctx->csa.register_lock);
+}
+
+static u32 spu_backing_signal2_read(struct spu_context *ctx)
+{
+ return ctx->csa.spu_chnldata_RW[4];
+}
+
+static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
+{
+ spin_lock(&ctx->csa.register_lock);
+ if (ctx->csa.priv2.spu_cfg_RW & 0x2)
+ ctx->csa.spu_chnldata_RW[4] |= data;
+ else
+ ctx->csa.spu_chnldata_RW[4] = data;
+ ctx->csa.spu_chnlcnt_RW[4] = 1;
+ gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
+ spin_unlock(&ctx->csa.register_lock);
+}
+
+static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
+{
+ u64 tmp;
+
+ spin_lock(&ctx->csa.register_lock);
+ tmp = ctx->csa.priv2.spu_cfg_RW;
+ if (val)
+ tmp |= 1;
+ else
+ tmp &= ~1;
+ ctx->csa.priv2.spu_cfg_RW = tmp;
+ spin_unlock(&ctx->csa.register_lock);
+}
+
+static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
+{
+ return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
+}
+
+static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
+{
+ u64 tmp;
+
+ spin_lock(&ctx->csa.register_lock);
+ tmp = ctx->csa.priv2.spu_cfg_RW;
+ if (val)
+ tmp |= 2;
+ else
+ tmp &= ~2;
+ ctx->csa.priv2.spu_cfg_RW = tmp;
+ spin_unlock(&ctx->csa.register_lock);
+}
+
+static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
+{
+ return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
+}
+
+static u32 spu_backing_npc_read(struct spu_context *ctx)
+{
+ return ctx->csa.prob.spu_npc_RW;
+}
+
+static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
+{
+ ctx->csa.prob.spu_npc_RW = val;
+}
+
+static u32 spu_backing_status_read(struct spu_context *ctx)
+{
+ return ctx->csa.prob.spu_status_R;
+}
+
+static char *spu_backing_get_ls(struct spu_context *ctx)
+{
+ return ctx->csa.lscsa->ls;
+}
+
+static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
+{
+ spin_lock(&ctx->csa.register_lock);
+ ctx->csa.prob.spu_runcntl_RW = val;
+ if (val & SPU_RUNCNTL_RUNNABLE) {
+ ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
+ } else {
+ ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
+ }
+ spin_unlock(&ctx->csa.register_lock);
+}
+
+static void spu_backing_runcntl_stop(struct spu_context *ctx)
+{
+ spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
+}
+
+struct spu_context_ops spu_backing_ops = {
+ .mbox_read = spu_backing_mbox_read,
+ .mbox_stat_read = spu_backing_mbox_stat_read,
+ .mbox_stat_poll = spu_backing_mbox_stat_poll,
+ .ibox_read = spu_backing_ibox_read,
+ .wbox_write = spu_backing_wbox_write,
+ .signal1_read = spu_backing_signal1_read,
+ .signal1_write = spu_backing_signal1_write,
+ .signal2_read = spu_backing_signal2_read,
+ .signal2_write = spu_backing_signal2_write,
+ .signal1_type_set = spu_backing_signal1_type_set,
+ .signal1_type_get = spu_backing_signal1_type_get,
+ .signal2_type_set = spu_backing_signal2_type_set,
+ .signal2_type_get = spu_backing_signal2_type_get,
+ .npc_read = spu_backing_npc_read,
+ .npc_write = spu_backing_npc_write,
+ .status_read = spu_backing_status_read,
+ .get_ls = spu_backing_get_ls,
+ .runcntl_write = spu_backing_runcntl_write,
+ .runcntl_stop = spu_backing_runcntl_stop,
+};
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
new file mode 100644
index 00000000000..336f238102f
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -0,0 +1,167 @@
+/*
+ * SPU file system -- SPU context management
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/spu.h>
+#include <asm/spu_csa.h>
+#include "spufs.h"
+
+struct spu_context *alloc_spu_context(struct address_space *local_store)
+{
+ struct spu_context *ctx;
+ ctx = kmalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ goto out;
+ /* Binding to physical processor deferred
+ * until spu_activate().
+ */
+ spu_init_csa(&ctx->csa);
+ if (!ctx->csa.lscsa) {
+ goto out_free;
+ }
+ spin_lock_init(&ctx->mmio_lock);
+ kref_init(&ctx->kref);
+ init_rwsem(&ctx->state_sema);
+ init_MUTEX(&ctx->run_sema);
+ init_waitqueue_head(&ctx->ibox_wq);
+ init_waitqueue_head(&ctx->wbox_wq);
+ init_waitqueue_head(&ctx->stop_wq);
+ ctx->ibox_fasync = NULL;
+ ctx->wbox_fasync = NULL;
+ ctx->state = SPU_STATE_SAVED;
+ ctx->local_store = local_store;
+ ctx->spu = NULL;
+ ctx->ops = &spu_backing_ops;
+ ctx->owner = get_task_mm(current);
+ goto out;
+out_free:
+ kfree(ctx);
+ ctx = NULL;
+out:
+ return ctx;
+}
+
+void destroy_spu_context(struct kref *kref)
+{
+ struct spu_context *ctx;
+ ctx = container_of(kref, struct spu_context, kref);
+ down_write(&ctx->state_sema);
+ spu_deactivate(ctx);
+ ctx->ibox_fasync = NULL;
+ ctx->wbox_fasync = NULL;
+ up_write(&ctx->state_sema);
+ spu_fini_csa(&ctx->csa);
+ kfree(ctx);
+}
+
+struct spu_context * get_spu_context(struct spu_context *ctx)
+{
+ kref_get(&ctx->kref);
+ return ctx;
+}
+
+int put_spu_context(struct spu_context *ctx)
+{
+ return kref_put(&ctx->kref, &destroy_spu_context);
+}
+
+/* give up the mm reference when the context is about to be destroyed */
+void spu_forget(struct spu_context *ctx)
+{
+ struct mm_struct *mm;
+ spu_acquire_saved(ctx);
+ mm = ctx->owner;
+ ctx->owner = NULL;
+ mmput(mm);
+ spu_release(ctx);
+}
+
+void spu_acquire(struct spu_context *ctx)
+{
+ down_read(&ctx->state_sema);
+}
+
+void spu_release(struct spu_context *ctx)
+{
+ up_read(&ctx->state_sema);
+}
+
+void spu_unmap_mappings(struct spu_context *ctx)
+{
+ unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
+}
+
+int spu_acquire_runnable(struct spu_context *ctx)
+{
+ int ret = 0;
+
+ down_read(&ctx->state_sema);
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ ctx->spu->prio = current->prio;
+ return 0;
+ }
+ up_read(&ctx->state_sema);
+
+ down_write(&ctx->state_sema);
+ /* ctx is about to be freed, can't acquire any more */
+ if (!ctx->owner) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ctx->state == SPU_STATE_SAVED) {
+ ret = spu_activate(ctx, 0);
+ if (ret)
+ goto out;
+ ctx->state = SPU_STATE_RUNNABLE;
+ }
+
+ downgrade_write(&ctx->state_sema);
+ /* On success, we return holding the lock */
+
+ return ret;
+out:
+ /* Release here, to simplify calling code. */
+ up_write(&ctx->state_sema);
+
+ return ret;
+}
+
+void spu_acquire_saved(struct spu_context *ctx)
+{
+ down_read(&ctx->state_sema);
+
+ if (ctx->state == SPU_STATE_SAVED)
+ return;
+
+ up_read(&ctx->state_sema);
+ down_write(&ctx->state_sema);
+
+ if (ctx->state == SPU_STATE_RUNNABLE) {
+ spu_deactivate(ctx);
+ ctx->state = SPU_STATE_SAVED;
+ }
+
+ downgrade_write(&ctx->state_sema);
+}
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
new file mode 100644
index 00000000000..dfa649c9b95
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -0,0 +1,794 @@
+/*
+ * SPU file system -- file contents
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/ptrace.h>
+
+#include <asm/io.h>
+#include <asm/semaphore.h>
+#include <asm/spu.h>
+#include <asm/uaccess.h>
+
+#include "spufs.h"
+
+
+static int
+spufs_mem_open(struct inode *inode, struct file *file)
+{
+ struct spufs_inode_info *i = SPUFS_I(inode);
+ file->private_data = i->i_ctx;
+ file->f_mapping = i->i_ctx->local_store;
+ return 0;
+}
+
+static ssize_t
+spufs_mem_read(struct file *file, char __user *buffer,
+ size_t size, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ char *local_store;
+ int ret;
+
+ spu_acquire(ctx);
+
+ local_store = ctx->ops->get_ls(ctx);
+ ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE);
+
+ spu_release(ctx);
+ return ret;
+}
+
+static ssize_t
+spufs_mem_write(struct file *file, const char __user *buffer,
+ size_t size, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ char *local_store;
+ int ret;
+
+ size = min_t(ssize_t, LS_SIZE - *pos, size);
+ if (size <= 0)
+ return -EFBIG;
+ *pos += size;
+
+ spu_acquire(ctx);
+
+ local_store = ctx->ops->get_ls(ctx);
+ ret = copy_from_user(local_store + *pos - size,
+ buffer, size) ? -EFAULT : size;
+
+ spu_release(ctx);
+ return ret;
+}
+
+#ifdef CONFIG_SPARSEMEM
+static struct page *
+spufs_mem_mmap_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
+{
+ struct page *page = NOPAGE_SIGBUS;
+
+ struct spu_context *ctx = vma->vm_file->private_data;
+ unsigned long offset = address - vma->vm_start;
+ offset += vma->vm_pgoff << PAGE_SHIFT;
+
+ spu_acquire(ctx);
+
+ if (ctx->state == SPU_STATE_SAVED)
+ page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
+ else
+ page = pfn_to_page((ctx->spu->local_store_phys + offset)
+ >> PAGE_SHIFT);
+
+ spu_release(ctx);
+
+ if (type)
+ *type = VM_FAULT_MINOR;
+
+ page_cache_get(page);
+ return page;
+}
+
+static struct vm_operations_struct spufs_mem_mmap_vmops = {
+ .nopage = spufs_mem_mmap_nopage,
+};
+
+static int
+spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ /* FIXME: */
+ vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
+ | _PAGE_NO_CACHE);
+
+ vma->vm_ops = &spufs_mem_mmap_vmops;
+ return 0;
+}
+#endif
+
+static struct file_operations spufs_mem_fops = {
+ .open = spufs_mem_open,
+ .read = spufs_mem_read,
+ .write = spufs_mem_write,
+ .llseek = generic_file_llseek,
+#ifdef CONFIG_SPARSEMEM
+ .mmap = spufs_mem_mmap,
+#endif
+};
+
+static int
+spufs_regs_open(struct inode *inode, struct file *file)
+{
+ struct spufs_inode_info *i = SPUFS_I(inode);
+ file->private_data = i->i_ctx;
+ return 0;
+}
+
+static ssize_t
+spufs_regs_read(struct file *file, char __user *buffer,
+ size_t size, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ int ret;
+
+ spu_acquire_saved(ctx);
+
+ ret = simple_read_from_buffer(buffer, size, pos,
+ lscsa->gprs, sizeof lscsa->gprs);
+
+ spu_release(ctx);
+ return ret;
+}
+
+static ssize_t
+spufs_regs_write(struct file *file, const char __user *buffer,
+ size_t size, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ int ret;
+
+ size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
+ if (size <= 0)
+ return -EFBIG;
+ *pos += size;
+
+ spu_acquire_saved(ctx);
+
+ ret = copy_from_user(lscsa->gprs + *pos - size,
+ buffer, size) ? -EFAULT : size;
+
+ spu_release(ctx);
+ return ret;
+}
+
+static struct file_operations spufs_regs_fops = {
+ .open = spufs_regs_open,
+ .read = spufs_regs_read,
+ .write = spufs_regs_write,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t
+spufs_fpcr_read(struct file *file, char __user * buffer,
+ size_t size, loff_t * pos)
+{
+ struct spu_context *ctx = file->private_data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ int ret;
+
+ spu_acquire_saved(ctx);
+
+ ret = simple_read_from_buffer(buffer, size, pos,
+ &lscsa->fpcr, sizeof(lscsa->fpcr));
+
+ spu_release(ctx);
+ return ret;
+}
+
+static ssize_t
+spufs_fpcr_write(struct file *file, const char __user * buffer,
+ size_t size, loff_t * pos)
+{
+ struct spu_context *ctx = file->private_data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ int ret;
+
+ size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
+ if (size <= 0)
+ return -EFBIG;
+ *pos += size;
+
+ spu_acquire_saved(ctx);
+
+ ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
+ buffer, size) ? -EFAULT : size;
+
+ spu_release(ctx);
+ return ret;
+}
+
+static struct file_operations spufs_fpcr_fops = {
+ .open = spufs_regs_open,
+ .read = spufs_fpcr_read,
+ .write = spufs_fpcr_write,
+ .llseek = generic_file_llseek,
+};
+
+/* generic open function for all pipe-like files */
+static int spufs_pipe_open(struct inode *inode, struct file *file)
+{
+ struct spufs_inode_info *i = SPUFS_I(inode);
+ file->private_data = i->i_ctx;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ u32 mbox_data;
+ int ret;
+
+ if (len < 4)
+ return -EINVAL;
+
+ spu_acquire(ctx);
+ ret = ctx->ops->mbox_read(ctx, &mbox_data);
+ spu_release(ctx);
+
+ if (!ret)
+ return -EAGAIN;
+
+ if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
+ return -EFAULT;
+
+ return 4;
+}
+
+static struct file_operations spufs_mbox_fops = {
+ .open = spufs_pipe_open,
+ .read = spufs_mbox_read,
+};
+
+static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ u32 mbox_stat;
+
+ if (len < 4)
+ return -EINVAL;
+
+ spu_acquire(ctx);
+
+ mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;
+
+ spu_release(ctx);
+
+ if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
+ return -EFAULT;
+
+ return 4;
+}
+
+static struct file_operations spufs_mbox_stat_fops = {
+ .open = spufs_pipe_open,
+ .read = spufs_mbox_stat_read,
+};
+
+/* low-level ibox access function */
+size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
+{
+ return ctx->ops->ibox_read(ctx, data);
+}
+
+static int spufs_ibox_fasync(int fd, struct file *file, int on)
+{
+ struct spu_context *ctx = file->private_data;
+
+ return fasync_helper(fd, file, on, &ctx->ibox_fasync);
+}
+
+/* interrupt-level ibox callback function. */
+void spufs_ibox_callback(struct spu *spu)
+{
+ struct spu_context *ctx = spu->ctx;
+
+ wake_up_all(&ctx->ibox_wq);
+ kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
+}
+
+static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ u32 ibox_data;
+ ssize_t ret;
+
+ if (len < 4)
+ return -EINVAL;
+
+ spu_acquire(ctx);
+
+ ret = 0;
+ if (file->f_flags & O_NONBLOCK) {
+ if (!spu_ibox_read(ctx, &ibox_data))
+ ret = -EAGAIN;
+ } else {
+ ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
+ }
+
+ spu_release(ctx);
+
+ if (ret)
+ return ret;
+
+ ret = 4;
+ if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
+{
+ struct spu_context *ctx = file->private_data;
+ unsigned int mask;
+
+ poll_wait(file, &ctx->ibox_wq, wait);
+
+ spu_acquire(ctx);
+ mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
+ spu_release(ctx);
+
+ return mask;
+}
+
+static struct file_operations spufs_ibox_fops = {
+ .open = spufs_pipe_open,
+ .read = spufs_ibox_read,
+ .poll = spufs_ibox_poll,
+ .fasync = spufs_ibox_fasync,
+};
+
+static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ u32 ibox_stat;
+
+ if (len < 4)
+ return -EINVAL;
+
+ spu_acquire(ctx);
+ ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
+ spu_release(ctx);
+
+ if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
+ return -EFAULT;
+
+ return 4;
+}
+
+static struct file_operations spufs_ibox_stat_fops = {
+ .open = spufs_pipe_open,
+ .read = spufs_ibox_stat_read,
+};
+
+/* low-level mailbox write */
+size_t spu_wbox_write(struct spu_context *ctx, u32 data)
+{
+ return ctx->ops->wbox_write(ctx, data);
+}
+
+static int spufs_wbox_fasync(int fd, struct file *file, int on)
+{
+ struct spu_context *ctx = file->private_data;
+ int ret;
+
+ ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
+
+ return ret;
+}
+
+/* interrupt-level wbox callback function. */
+void spufs_wbox_callback(struct spu *spu)
+{
+ struct spu_context *ctx = spu->ctx;
+
+ wake_up_all(&ctx->wbox_wq);
+ kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
+}
+
+static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ u32 wbox_data;
+ int ret;
+
+ if (len < 4)
+ return -EINVAL;
+
+ if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
+ return -EFAULT;
+
+ spu_acquire(ctx);
+
+ ret = 0;
+ if (file->f_flags & O_NONBLOCK) {
+ if (!spu_wbox_write(ctx, wbox_data))
+ ret = -EAGAIN;
+ } else {
+ ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
+ }
+
+ spu_release(ctx);
+
+ return ret ? ret : sizeof wbox_data;
+}
+
+static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
+{
+ struct spu_context *ctx = file->private_data;
+ unsigned int mask;
+
+ poll_wait(file, &ctx->wbox_wq, wait);
+
+ spu_acquire(ctx);
+ mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
+ spu_release(ctx);
+
+ return mask;
+}
+
+static struct file_operations spufs_wbox_fops = {
+ .open = spufs_pipe_open,
+ .write = spufs_wbox_write,
+ .poll = spufs_wbox_poll,
+ .fasync = spufs_wbox_fasync,
+};
+
+static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ u32 wbox_stat;
+
+ if (len < 4)
+ return -EINVAL;
+
+ spu_acquire(ctx);
+ wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
+ spu_release(ctx);
+
+ if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))
+ return -EFAULT;
+
+ return 4;
+}
+
+static struct file_operations spufs_wbox_stat_fops = {
+ .open = spufs_pipe_open,
+ .read = spufs_wbox_stat_read,
+};
+
+static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx = file->private_data;
+ u32 data;
+
+ if (len < 4)
+ return -EINVAL;
+
+ spu_acquire(ctx);
+ data = ctx->ops->signal1_read(ctx);
+ spu_release(ctx);
+
+ if (copy_to_user(buf, &data, 4))
+ return -EFAULT;
+
+ return 4;
+}
+
+static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx;
+ u32 data;
+
+ ctx = file->private_data;
+
+ if (len < 4)
+ return -EINVAL;
+
+ if (copy_from_user(&data, buf, 4))
+ return -EFAULT;
+
+ spu_acquire(ctx);
+ ctx->ops->signal1_write(ctx, data);
+ spu_release(ctx);
+
+ return 4;
+}
+
+static struct file_operations spufs_signal1_fops = {
+ .open = spufs_pipe_open,
+ .read = spufs_signal1_read,
+ .write = spufs_signal1_write,
+};
+
+static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx;
+ u32 data;
+
+ ctx = file->private_data;
+
+ if (len < 4)
+ return -EINVAL;
+
+ spu_acquire(ctx);
+ data = ctx->ops->signal2_read(ctx);
+ spu_release(ctx);
+
+ if (copy_to_user(buf, &data, 4))
+ return -EFAULT;
+
+ return 4;
+}
+
+static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct spu_context *ctx;
+ u32 data;
+
+ ctx = file->private_data;
+
+ if (len < 4)
+ return -EINVAL;
+
+ if (copy_from_user(&data, buf, 4))
+ return -EFAULT;
+
+ spu_acquire(ctx);
+ ctx->ops->signal2_write(ctx, data);
+ spu_release(ctx);
+
+ return 4;
+}
+
+static struct file_operations spufs_signal2_fops = {
+ .open = spufs_pipe_open,
+ .read = spufs_signal2_read,
+ .write = spufs_signal2_write,
+};
+
+static void spufs_signal1_type_set(void *data, u64 val)
+{
+ struct spu_context *ctx = data;
+
+ spu_acquire(ctx);
+ ctx->ops->signal1_type_set(ctx, val);
+ spu_release(ctx);
+}
+
+static u64 spufs_signal1_type_get(void *data)
+{
+ struct spu_context *ctx = data;
+ u64 ret;
+
+ spu_acquire(ctx);
+ ret = ctx->ops->signal1_type_get(ctx);
+ spu_release(ctx);
+
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get,
+ spufs_signal1_type_set, "%llu");
+
+static void spufs_signal2_type_set(void *data, u64 val)
+{
+ struct spu_context *ctx = data;
+
+ spu_acquire(ctx);
+ ctx->ops->signal2_type_set(ctx, val);
+ spu_release(ctx);
+}
+
+static u64 spufs_signal2_type_get(void *data)
+{
+ struct spu_context *ctx = data;
+ u64 ret;
+
+ spu_acquire(ctx);
+ ret = ctx->ops->signal2_type_get(ctx);
+ spu_release(ctx);
+
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
+ spufs_signal2_type_set, "%llu");
+
+static void spufs_npc_set(void *data, u64 val)
+{
+ struct spu_context *ctx = data;
+ spu_acquire(ctx);
+ ctx->ops->npc_write(ctx, val);
+ spu_release(ctx);
+}
+
+static u64 spufs_npc_get(void *data)
+{
+ struct spu_context *ctx = data;
+ u64 ret;
+ spu_acquire(ctx);
+ ret = ctx->ops->npc_read(ctx);
+ spu_release(ctx);
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n")
+
+static void spufs_decr_set(void *data, u64 val)
+{
+ struct spu_context *ctx = data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ spu_acquire_saved(ctx);
+ lscsa->decr.slot[0] = (u32) val;
+ spu_release(ctx);
+}
+
+static u64 spufs_decr_get(void *data)
+{
+ struct spu_context *ctx = data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ u64 ret;
+ spu_acquire_saved(ctx);
+ ret = lscsa->decr.slot[0];
+ spu_release(ctx);
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set,
+ "%llx\n")
+
+static void spufs_decr_status_set(void *data, u64 val)
+{
+ struct spu_context *ctx = data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ spu_acquire_saved(ctx);
+ lscsa->decr_status.slot[0] = (u32) val;
+ spu_release(ctx);
+}
+
+static u64 spufs_decr_status_get(void *data)
+{
+ struct spu_context *ctx = data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ u64 ret;
+ spu_acquire_saved(ctx);
+ ret = lscsa->decr_status.slot[0];
+ spu_release(ctx);
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get,
+ spufs_decr_status_set, "%llx\n")
+
+static void spufs_spu_tag_mask_set(void *data, u64 val)
+{
+ struct spu_context *ctx = data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ spu_acquire_saved(ctx);
+ lscsa->tag_mask.slot[0] = (u32) val;
+ spu_release(ctx);
+}
+
+static u64 spufs_spu_tag_mask_get(void *data)
+{
+ struct spu_context *ctx = data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ u64 ret;
+ spu_acquire_saved(ctx);
+ ret = lscsa->tag_mask.slot[0];
+ spu_release(ctx);
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get,
+ spufs_spu_tag_mask_set, "%llx\n")
+
+static void spufs_event_mask_set(void *data, u64 val)
+{
+ struct spu_context *ctx = data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ spu_acquire_saved(ctx);
+ lscsa->event_mask.slot[0] = (u32) val;
+ spu_release(ctx);
+}
+
+static u64 spufs_event_mask_get(void *data)
+{
+ struct spu_context *ctx = data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ u64 ret;
+ spu_acquire_saved(ctx);
+ ret = lscsa->event_mask.slot[0];
+ spu_release(ctx);
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get,
+ spufs_event_mask_set, "%llx\n")
+
+static void spufs_srr0_set(void *data, u64 val)
+{
+ struct spu_context *ctx = data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ spu_acquire_saved(ctx);
+ lscsa->srr0.slot[0] = (u32) val;
+ spu_release(ctx);
+}
+
+static u64 spufs_srr0_get(void *data)
+{
+ struct spu_context *ctx = data;
+ struct spu_lscsa *lscsa = ctx->csa.lscsa;
+ u64 ret;
+ spu_acquire_saved(ctx);
+ ret = lscsa->srr0.slot[0];
+ spu_release(ctx);
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set,
+ "%llx\n")
+
+struct tree_descr spufs_dir_contents[] = {
+ { "mem", &spufs_mem_fops, 0666, },
+ { "regs", &spufs_regs_fops, 0666, },
+ { "mbox", &spufs_mbox_fops, 0444, },
+ { "ibox", &spufs_ibox_fops, 0444, },
+ { "wbox", &spufs_wbox_fops, 0222, },
+ { "mbox_stat", &spufs_mbox_stat_fops, 0444, },
+ { "ibox_stat", &spufs_ibox_stat_fops, 0444, },
+ { "wbox_stat", &spufs_wbox_stat_fops, 0444, },
+ { "signal1", &spufs_signal1_fops, 0666, },
+ { "signal2", &spufs_signal2_fops, 0666, },
+ { "signal1_type", &spufs_signal1_type, 0666, },
+ { "signal2_type", &spufs_signal2_type, 0666, },
+ { "npc", &spufs_npc_ops, 0666, },
+ { "fpcr", &spufs_fpcr_fops, 0666, },
+ { "decr", &spufs_decr_ops, 0666, },
+ { "decr_status", &spufs_decr_status_ops, 0666, },
+ { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
+ { "event_mask", &spufs_event_mask_ops, 0666, },
+ { "srr0", &spufs_srr0_ops, 0666, },
+ {},
+};
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
new file mode 100644
index 00000000000..5445719bff7
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -0,0 +1,255 @@
+/* hw_ops.c - query/set operations on active SPU context.
+ *
+ * Copyright (C) IBM 2005
+ * Author: Mark Nutter <mnutter@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+
+#include <asm/io.h>
+#include <asm/spu.h>
+#include <asm/spu_csa.h>
+#include <asm/mmu_context.h>
+#include "spufs.h"
+
+static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
+{
+ struct spu *spu = ctx->spu;
+ struct spu_problem __iomem *prob = spu->problem;
+ u32 mbox_stat;
+ int ret = 0;
+
+ spin_lock_irq(&spu->register_lock);
+ mbox_stat = in_be32(&prob->mb_stat_R);
+ if (mbox_stat & 0x0000ff) {
+ *data = in_be32(&prob->pu_mb_R);
+ ret = 4;
+ }
+ spin_unlock_irq(&spu->register_lock);
+ return ret;
+}
+
+static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
+{
+ return in_be32(&ctx->spu->problem->mb_stat_R);
+}
+
+static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
+ unsigned int events)
+{
+ struct spu *spu = ctx->spu;
+ int ret = 0;
+ u32 stat;
+
+ spin_lock_irq(&spu->register_lock);
+ stat = in_be32(&spu->problem->mb_stat_R);
+
+ /* if the requested event is there, return the poll
+ mask, otherwise enable the interrupt to get notified,
+ but first mark any pending interrupts as done so
+ we don't get woken up unnecessarily */
+
+ if (events & (POLLIN | POLLRDNORM)) {
+ if (stat & 0xff0000)
+ ret |= POLLIN | POLLRDNORM;
+ else {
+ spu_int_stat_clear(spu, 2, 0x1);
+ spu_int_mask_or(spu, 2, 0x1);
+ }
+ }
+ if (events & (POLLOUT | POLLWRNORM)) {
+ if (stat & 0x00ff00)
+ ret = POLLOUT | POLLWRNORM;
+ else {
+ spu_int_stat_clear(spu, 2, 0x10);
+ spu_int_mask_or(spu, 2, 0x10);
+ }
+ }
+ spin_unlock_irq(&spu->register_lock);
+ return ret;
+}
+
+static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
+{
+ struct spu *spu = ctx->spu;
+ struct spu_problem __iomem *prob = spu->problem;
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ int ret;
+
+ spin_lock_irq(&spu->register_lock);
+ if (in_be32(&prob->mb_stat_R) & 0xff0000) {
+ /* read the first available word */
+ *data = in_be64(&priv2->puint_mb_R);
+ ret = 4;
+ } else {
+ /* make sure we get woken up by the interrupt */
+ spu_int_mask_or(spu, 2, 0x1);
+ ret = 0;
+ }
+ spin_unlock_irq(&spu->register_lock);
+ return ret;
+}
+
+static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
+{
+ struct spu *spu = ctx->spu;
+ struct spu_problem __iomem *prob = spu->problem;
+ int ret;
+
+ spin_lock_irq(&spu->register_lock);
+ if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
+ /* we have space to write wbox_data to */
+ out_be32(&prob->spu_mb_W, data);
+ ret = 4;
+ } else {
+ /* make sure we get woken up by the interrupt when space
+ becomes available */
+ spu_int_mask_or(spu, 2, 0x10);
+ ret = 0;
+ }
+ spin_unlock_irq(&spu->register_lock);
+ return ret;
+}
+
+static u32 spu_hw_signal1_read(struct spu_context *ctx)
+{
+ return in_be32(&ctx->spu->problem->signal_notify1);
+}
+
+static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
+{
+ out_be32(&ctx->spu->problem->signal_notify1, data);
+}
+
+static u32 spu_hw_signal2_read(struct spu_context *ctx)
+{
+ return in_be32(&ctx->spu->problem->signal_notify1);
+}
+
+static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
+{
+ out_be32(&ctx->spu->problem->signal_notify2, data);
+}
+
+static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
+{
+ struct spu *spu = ctx->spu;
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ u64 tmp;
+
+ spin_lock_irq(&spu->register_lock);
+ tmp = in_be64(&priv2->spu_cfg_RW);
+ if (val)
+ tmp |= 1;
+ else
+ tmp &= ~1;
+ out_be64(&priv2->spu_cfg_RW, tmp);
+ spin_unlock_irq(&spu->register_lock);
+}
+
+static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
+{
+ return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
+}
+
+static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
+{
+ struct spu *spu = ctx->spu;
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ u64 tmp;
+
+ spin_lock_irq(&spu->register_lock);
+ tmp = in_be64(&priv2->spu_cfg_RW);
+ if (val)
+ tmp |= 2;
+ else
+ tmp &= ~2;
+ out_be64(&priv2->spu_cfg_RW, tmp);
+ spin_unlock_irq(&spu->register_lock);
+}
+
+static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
+{
+ return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
+}
+
+static u32 spu_hw_npc_read(struct spu_context *ctx)
+{
+ return in_be32(&ctx->spu->problem->spu_npc_RW);
+}
+
+static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
+{
+ out_be32(&ctx->spu->problem->spu_npc_RW, val);
+}
+
+static u32 spu_hw_status_read(struct spu_context *ctx)
+{
+ return in_be32(&ctx->spu->problem->spu_status_R);
+}
+
+static char *spu_hw_get_ls(struct spu_context *ctx)
+{
+ return ctx->spu->local_store;
+}
+
+static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
+{
+ eieio();
+ out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
+}
+
+static void spu_hw_runcntl_stop(struct spu_context *ctx)
+{
+ spin_lock_irq(&ctx->spu->register_lock);
+ out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
+ while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
+ cpu_relax();
+ spin_unlock_irq(&ctx->spu->register_lock);
+}
+
+struct spu_context_ops spu_hw_ops = {
+ .mbox_read = spu_hw_mbox_read,
+ .mbox_stat_read = spu_hw_mbox_stat_read,
+ .mbox_stat_poll = spu_hw_mbox_stat_poll,
+ .ibox_read = spu_hw_ibox_read,
+ .wbox_write = spu_hw_wbox_write,
+ .signal1_read = spu_hw_signal1_read,
+ .signal1_write = spu_hw_signal1_write,
+ .signal2_read = spu_hw_signal2_read,
+ .signal2_write = spu_hw_signal2_write,
+ .signal1_type_set = spu_hw_signal1_type_set,
+ .signal1_type_get = spu_hw_signal1_type_get,
+ .signal2_type_set = spu_hw_signal2_type_set,
+ .signal2_type_get = spu_hw_signal2_type_get,
+ .npc_read = spu_hw_npc_read,
+ .npc_write = spu_hw_npc_write,
+ .status_read = spu_hw_status_read,
+ .get_ls = spu_hw_get_ls,
+ .runcntl_write = spu_hw_runcntl_write,
+ .runcntl_stop = spu_hw_runcntl_stop,
+};
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
new file mode 100644
index 00000000000..b3962c3a034
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -0,0 +1,486 @@
+/*
+ * SPU file system
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/backing-dev.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+
+#include <asm/io.h>
+#include <asm/semaphore.h>
+#include <asm/spu.h>
+#include <asm/uaccess.h>
+
+#include "spufs.h"
+
+static kmem_cache_t *spufs_inode_cache;
+
+static struct inode *
+spufs_alloc_inode(struct super_block *sb)
+{
+ struct spufs_inode_info *ei;
+
+ ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
+ if (!ei)
+ return NULL;
+ return &ei->vfs_inode;
+}
+
+static void
+spufs_destroy_inode(struct inode *inode)
+{
+ kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
+}
+
+static void
+spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
+{
+ struct spufs_inode_info *ei = p;
+
+ if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+ SLAB_CTOR_CONSTRUCTOR) {
+ inode_init_once(&ei->vfs_inode);
+ }
+}
+
+static struct inode *
+spufs_new_inode(struct super_block *sb, int mode)
+{
+ struct inode *inode;
+
+ inode = new_inode(sb);
+ if (!inode)
+ goto out;
+
+ inode->i_mode = mode;
+ inode->i_uid = current->fsuid;
+ inode->i_gid = current->fsgid;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+out:
+ return inode;
+}
+
+static int
+spufs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if ((attr->ia_valid & ATTR_SIZE) &&
+ (attr->ia_size != inode->i_size))
+ return -EINVAL;
+ return inode_setattr(inode, attr);
+}
+
+
+static int
+spufs_new_file(struct super_block *sb, struct dentry *dentry,
+ struct file_operations *fops, int mode,
+ struct spu_context *ctx)
+{
+ static struct inode_operations spufs_file_iops = {
+ .setattr = spufs_setattr,
+ };
+ struct inode *inode;
+ int ret;
+
+ ret = -ENOSPC;
+ inode = spufs_new_inode(sb, S_IFREG | mode);
+ if (!inode)
+ goto out;
+
+ ret = 0;
+ inode->i_op = &spufs_file_iops;
+ inode->i_fop = fops;
+ inode->u.generic_ip = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
+ d_add(dentry, inode);
+out:
+ return ret;
+}
+
+static void
+spufs_delete_inode(struct inode *inode)
+{
+ if (SPUFS_I(inode)->i_ctx)
+ put_spu_context(SPUFS_I(inode)->i_ctx);
+ clear_inode(inode);
+}
+
+static void spufs_prune_dir(struct dentry *dir)
+{
+ struct dentry *dentry, *tmp;
+ mutex_lock(&dir->d_inode->i_mutex);
+ list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
+ spin_lock(&dcache_lock);
+ spin_lock(&dentry->d_lock);
+ if (!(d_unhashed(dentry)) && dentry->d_inode) {
+ dget_locked(dentry);
+ __d_drop(dentry);
+ spin_unlock(&dentry->d_lock);
+ simple_unlink(dir->d_inode, dentry);
+ spin_unlock(&dcache_lock);
+ dput(dentry);
+ } else {
+ spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_lock);
+ }
+ }
+ shrink_dcache_parent(dir);
+ mutex_unlock(&dir->d_inode->i_mutex);
+}
+
+static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
+{
+ struct spu_context *ctx;
+
+ /* remove all entries */
+ mutex_lock(&root->i_mutex);
+ spufs_prune_dir(dir_dentry);
+ mutex_unlock(&root->i_mutex);
+
+ /* We have to give up the mm_struct */
+ ctx = SPUFS_I(dir_dentry->d_inode)->i_ctx;
+ spu_forget(ctx);
+
+ /* XXX Do we need to hold i_mutex here ? */
+ return simple_rmdir(root, dir_dentry);
+}
+
+static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
+ int mode, struct spu_context *ctx)
+{
+ struct dentry *dentry;
+ int ret;
+
+ while (files->name && files->name[0]) {
+ ret = -ENOMEM;
+ dentry = d_alloc_name(dir, files->name);
+ if (!dentry)
+ goto out;
+ ret = spufs_new_file(dir->d_sb, dentry, files->ops,
+ files->mode & mode, ctx);
+ if (ret)
+ goto out;
+ files++;
+ }
+ return 0;
+out:
+ spufs_prune_dir(dir);
+ return ret;
+}
+
+static int spufs_dir_close(struct inode *inode, struct file *file)
+{
+ struct inode *dir;
+ struct dentry *dentry;
+ int ret;
+
+ dentry = file->f_dentry;
+ dir = dentry->d_parent->d_inode;
+
+ ret = spufs_rmdir(dir, dentry);
+ WARN_ON(ret);
+
+ return dcache_dir_close(inode, file);
+}
+
+struct inode_operations spufs_dir_inode_operations = {
+ .lookup = simple_lookup,
+};
+
+struct file_operations spufs_context_fops = {
+ .open = dcache_dir_open,
+ .release = spufs_dir_close,
+ .llseek = dcache_dir_lseek,
+ .read = generic_read_dir,
+ .readdir = dcache_readdir,
+ .fsync = simple_sync_file,
+};
+
+static int
+spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ int ret;
+ struct inode *inode;
+ struct spu_context *ctx;
+
+ ret = -ENOSPC;
+ inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
+ if (!inode)
+ goto out;
+
+ if (dir->i_mode & S_ISGID) {
+ inode->i_gid = dir->i_gid;
+ inode->i_mode &= S_ISGID;
+ }
+ ctx = alloc_spu_context(inode->i_mapping);
+ SPUFS_I(inode)->i_ctx = ctx;
+ if (!ctx)
+ goto out_iput;
+
+ inode->i_op = &spufs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
+ if (ret)
+ goto out_free_ctx;
+
+ d_instantiate(dentry, inode);
+ dget(dentry);
+ dir->i_nlink++;
+ dentry->d_inode->i_nlink++;
+ goto out;
+
+out_free_ctx:
+ put_spu_context(ctx);
+out_iput:
+ iput(inode);
+out:
+ return ret;
+}
+
+static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt)
+{
+ int ret;
+ struct file *filp;
+
+ ret = get_unused_fd();
+ if (ret < 0) {
+ dput(dentry);
+ mntput(mnt);
+ goto out;
+ }
+
+ filp = dentry_open(dentry, mnt, O_RDONLY);
+ if (IS_ERR(filp)) {
+ put_unused_fd(ret);
+ ret = PTR_ERR(filp);
+ goto out;
+ }
+
+ filp->f_op = &spufs_context_fops;
+ fd_install(ret, filp);
+out:
+ return ret;
+}
+
+static struct file_system_type spufs_type;
+
+long spufs_create_thread(struct nameidata *nd,
+ unsigned int flags, mode_t mode)
+{
+ struct dentry *dentry;
+ int ret;
+
+ /* need to be at the root of spufs */
+ ret = -EINVAL;
+ if (nd->dentry->d_sb->s_type != &spufs_type ||
+ nd->dentry != nd->dentry->d_sb->s_root)
+ goto out;
+
+ dentry = lookup_create(nd, 1);
+ ret = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
+ goto out_dir;
+
+ ret = -EEXIST;
+ if (dentry->d_inode)
+ goto out_dput;
+
+ mode &= ~current->fs->umask;
+ ret = spufs_mkdir(nd->dentry->d_inode, dentry, mode & S_IRWXUGO);
+ if (ret)
+ goto out_dput;
+
+ /*
+ * get references for dget and mntget, will be released
+ * in error path of *_open().
+ */
+ ret = spufs_context_open(dget(dentry), mntget(nd->mnt));
+ if (ret < 0)
+ spufs_rmdir(nd->dentry->d_inode, dentry);
+
+out_dput:
+ dput(dentry);
+out_dir:
+ mutex_unlock(&nd->dentry->d_inode->i_mutex);
+out:
+ return ret;
+}
+
+/* File system initialization */
+enum {
+ Opt_uid, Opt_gid, Opt_err,
+};
+
+static match_table_t spufs_tokens = {
+ { Opt_uid, "uid=%d" },
+ { Opt_gid, "gid=%d" },
+ { Opt_err, NULL },
+};
+
+static int
+spufs_parse_options(char *options, struct inode *root)
+{
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token, option;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, spufs_tokens, args);
+ switch (token) {
+ case Opt_uid:
+ if (match_int(&args[0], &option))
+ return 0;
+ root->i_uid = option;
+ break;
+ case Opt_gid:
+ if (match_int(&args[0], &option))
+ return 0;
+ root->i_gid = option;
+ break;
+ default:
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int
+spufs_create_root(struct super_block *sb, void *data)
+{
+ struct inode *inode;
+ int ret;
+
+ ret = -ENOMEM;
+ inode = spufs_new_inode(sb, S_IFDIR | 0775);
+ if (!inode)
+ goto out;
+
+ inode->i_op = &spufs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ SPUFS_I(inode)->i_ctx = NULL;
+
+ ret = -EINVAL;
+ if (!spufs_parse_options(data, inode))
+ goto out_iput;
+
+ ret = -ENOMEM;
+ sb->s_root = d_alloc_root(inode);
+ if (!sb->s_root)
+ goto out_iput;
+
+ return 0;
+out_iput:
+ iput(inode);
+out:
+ return ret;
+}
+
+static int
+spufs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ static struct super_operations s_ops = {
+ .alloc_inode = spufs_alloc_inode,
+ .destroy_inode = spufs_destroy_inode,
+ .statfs = simple_statfs,
+ .delete_inode = spufs_delete_inode,
+ .drop_inode = generic_delete_inode,
+ };
+
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = SPUFS_MAGIC;
+ sb->s_op = &s_ops;
+
+ return spufs_create_root(sb, data);
+}
+
+static struct super_block *
+spufs_get_sb(struct file_system_type *fstype, int flags,
+ const char *name, void *data)
+{
+ return get_sb_single(fstype, flags, data, spufs_fill_super);
+}
+
+static struct file_system_type spufs_type = {
+ .owner = THIS_MODULE,
+ .name = "spufs",
+ .get_sb = spufs_get_sb,
+ .kill_sb = kill_litter_super,
+};
+
+static int spufs_init(void)
+{
+ int ret;
+ ret = -ENOMEM;
+ spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
+ sizeof(struct spufs_inode_info), 0,
+ SLAB_HWCACHE_ALIGN, spufs_init_once, NULL);
+
+ if (!spufs_inode_cache)
+ goto out;
+ if (spu_sched_init() != 0) {
+ kmem_cache_destroy(spufs_inode_cache);
+ goto out;
+ }
+ ret = register_filesystem(&spufs_type);
+ if (ret)
+ goto out_cache;
+ ret = register_spu_syscalls(&spufs_calls);
+ if (ret)
+ goto out_fs;
+ return 0;
+out_fs:
+ unregister_filesystem(&spufs_type);
+out_cache:
+ kmem_cache_destroy(spufs_inode_cache);
+out:
+ return ret;
+}
+module_init(spufs_init);
+
+static void spufs_exit(void)
+{
+ spu_sched_exit();
+ unregister_spu_syscalls(&spufs_calls);
+ unregister_filesystem(&spufs_type);
+ kmem_cache_destroy(spufs_inode_cache);
+}
+module_exit(spufs_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
new file mode 100644
index 00000000000..18ea8866c61
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -0,0 +1,131 @@
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+
+#include <asm/spu.h>
+
+#include "spufs.h"
+
+/* interrupt-level stop callback function. */
+void spufs_stop_callback(struct spu *spu)
+{
+ struct spu_context *ctx = spu->ctx;
+
+ wake_up_all(&ctx->stop_wq);
+}
+
+static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
+{
+ struct spu *spu;
+ u64 pte_fault;
+
+ *stat = ctx->ops->status_read(ctx);
+ if (ctx->state != SPU_STATE_RUNNABLE)
+ return 1;
+ spu = ctx->spu;
+ pte_fault = spu->dsisr &
+ (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED);
+ return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
+}
+
+static inline int spu_run_init(struct spu_context *ctx, u32 * npc,
+ u32 * status)
+{
+ int ret;
+
+ if ((ret = spu_acquire_runnable(ctx)) != 0)
+ return ret;
+ ctx->ops->npc_write(ctx, *npc);
+ ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
+ return 0;
+}
+
+static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
+ u32 * status)
+{
+ int ret = 0;
+
+ *status = ctx->ops->status_read(ctx);
+ *npc = ctx->ops->npc_read(ctx);
+ spu_release(ctx);
+
+ if (signal_pending(current))
+ ret = -ERESTARTSYS;
+ if (unlikely(current->ptrace & PT_PTRACED)) {
+ if ((*status & SPU_STATUS_STOPPED_BY_STOP)
+ && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
+ force_sig(SIGTRAP, current);
+ ret = -ERESTARTSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
+ u32 *status)
+{
+ int ret;
+
+ if ((ret = spu_run_fini(ctx, npc, status)) != 0)
+ return ret;
+ if (*status & (SPU_STATUS_STOPPED_BY_STOP |
+ SPU_STATUS_STOPPED_BY_HALT)) {
+ return *status;
+ }
+ if ((ret = spu_run_init(ctx, npc, status)) != 0)
+ return ret;
+ return 0;
+}
+
+static inline int spu_process_events(struct spu_context *ctx)
+{
+ struct spu *spu = ctx->spu;
+ u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED;
+ int ret = 0;
+
+ if (spu->dsisr & pte_fault)
+ ret = spu_irq_class_1_bottom(spu);
+ if (spu->class_0_pending)
+ ret = spu_irq_class_0_bottom(spu);
+ if (!ret && signal_pending(current))
+ ret = -ERESTARTSYS;
+ return ret;
+}
+
+long spufs_run_spu(struct file *file, struct spu_context *ctx,
+ u32 * npc, u32 * status)
+{
+ int ret;
+
+ if (down_interruptible(&ctx->run_sema))
+ return -ERESTARTSYS;
+
+ ret = spu_run_init(ctx, npc, status);
+ if (ret)
+ goto out;
+
+ do {
+ ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status));
+ if (unlikely(ret))
+ break;
+ if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
+ ret = spu_reacquire_runnable(ctx, npc, status);
+ if (ret)
+ goto out;
+ continue;
+ }
+ ret = spu_process_events(ctx);
+
+ } while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP |
+ SPU_STATUS_STOPPED_BY_HALT)));
+
+ ctx->ops->runcntl_stop(ctx);
+ ret = spu_run_fini(ctx, npc, status);
+ if (!ret)
+ ret = *status;
+ spu_yield(ctx);
+
+out:
+ up(&ctx->run_sema);
+ return ret;
+}
+
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
new file mode 100644
index 00000000000..963182fbd1a
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -0,0 +1,461 @@
+/* sched.c - SPU scheduler.
+ *
+ * Copyright (C) IBM 2005
+ * Author: Mark Nutter <mnutter@us.ibm.com>
+ *
+ * SPU scheduler, based on Linux thread priority. For now use
+ * a simple "cooperative" yield model with no preemption. SPU
+ * scheduling will eventually be preemptive: When a thread with
+ * a higher static priority gets ready to run, then an active SPU
+ * context will be preempted and returned to the waitq.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#undef DEBUG
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/completion.h>
+#include <linux/vmalloc.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/spu.h>
+#include <asm/spu_csa.h>
+#include "spufs.h"
+
+#define SPU_MIN_TIMESLICE (100 * HZ / 1000)
+
+#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
+struct spu_prio_array {
+ atomic_t nr_blocked;
+ unsigned long bitmap[SPU_BITMAP_SIZE];
+ wait_queue_head_t waitq[MAX_PRIO];
+};
+
+/* spu_runqueue - This is the main runqueue data structure for SPUs. */
+struct spu_runqueue {
+ struct semaphore sem;
+ unsigned long nr_active;
+ unsigned long nr_idle;
+ unsigned long nr_switches;
+ struct list_head active_list;
+ struct list_head idle_list;
+ struct spu_prio_array prio;
+};
+
+static struct spu_runqueue *spu_runqueues = NULL;
+
+static inline struct spu_runqueue *spu_rq(void)
+{
+ /* Future: make this a per-NODE array,
+ * and use cpu_to_node(smp_processor_id())
+ */
+ return spu_runqueues;
+}
+
+static inline struct spu *del_idle(struct spu_runqueue *rq)
+{
+ struct spu *spu;
+
+ BUG_ON(rq->nr_idle <= 0);
+ BUG_ON(list_empty(&rq->idle_list));
+ /* Future: Move SPU out of low-power SRI state. */
+ spu = list_entry(rq->idle_list.next, struct spu, sched_list);
+ list_del_init(&spu->sched_list);
+ rq->nr_idle--;
+ return spu;
+}
+
+static inline void del_active(struct spu_runqueue *rq, struct spu *spu)
+{
+ BUG_ON(rq->nr_active <= 0);
+ BUG_ON(list_empty(&rq->active_list));
+ list_del_init(&spu->sched_list);
+ rq->nr_active--;
+}
+
+static inline void add_idle(struct spu_runqueue *rq, struct spu *spu)
+{
+ /* Future: Put SPU into low-power SRI state. */
+ list_add_tail(&spu->sched_list, &rq->idle_list);
+ rq->nr_idle++;
+}
+
+static inline void add_active(struct spu_runqueue *rq, struct spu *spu)
+{
+ rq->nr_active++;
+ rq->nr_switches++;
+ list_add_tail(&spu->sched_list, &rq->active_list);
+}
+
+static void prio_wakeup(struct spu_runqueue *rq)
+{
+ if (atomic_read(&rq->prio.nr_blocked) && rq->nr_idle) {
+ int best = sched_find_first_bit(rq->prio.bitmap);
+ if (best < MAX_PRIO) {
+ wait_queue_head_t *wq = &rq->prio.waitq[best];
+ wake_up_interruptible_nr(wq, 1);
+ }
+ }
+}
+
+static void prio_wait(struct spu_runqueue *rq, struct spu_context *ctx,
+ u64 flags)
+{
+ int prio = current->prio;
+ wait_queue_head_t *wq = &rq->prio.waitq[prio];
+ DEFINE_WAIT(wait);
+
+ __set_bit(prio, rq->prio.bitmap);
+ atomic_inc(&rq->prio.nr_blocked);
+ prepare_to_wait_exclusive(wq, &wait, TASK_INTERRUPTIBLE);
+ if (!signal_pending(current)) {
+ up(&rq->sem);
+ up_write(&ctx->state_sema);
+ pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
+ current->pid, current->prio);
+ schedule();
+ down_write(&ctx->state_sema);
+ down(&rq->sem);
+ }
+ finish_wait(wq, &wait);
+ atomic_dec(&rq->prio.nr_blocked);
+ if (!waitqueue_active(wq))
+ __clear_bit(prio, rq->prio.bitmap);
+}
+
+static inline int is_best_prio(struct spu_runqueue *rq)
+{
+ int best_prio;
+
+ best_prio = sched_find_first_bit(rq->prio.bitmap);
+ return (current->prio < best_prio) ? 1 : 0;
+}
+
+static inline void mm_needs_global_tlbie(struct mm_struct *mm)
+{
+ /* Global TLBIE broadcast required with SPEs. */
+#if (NR_CPUS > 1)
+ __cpus_setall(&mm->cpu_vm_mask, NR_CPUS);
+#else
+ __cpus_setall(&mm->cpu_vm_mask, NR_CPUS+1); /* is this ok? */
+#endif
+}
+
+static inline void bind_context(struct spu *spu, struct spu_context *ctx)
+{
+ pr_debug("%s: pid=%d SPU=%d\n", __FUNCTION__, current->pid,
+ spu->number);
+ spu->ctx = ctx;
+ spu->flags = 0;
+ ctx->flags = 0;
+ ctx->spu = spu;
+ ctx->ops = &spu_hw_ops;
+ spu->pid = current->pid;
+ spu->prio = current->prio;
+ spu->mm = ctx->owner;
+ mm_needs_global_tlbie(spu->mm);
+ spu->ibox_callback = spufs_ibox_callback;
+ spu->wbox_callback = spufs_wbox_callback;
+ spu->stop_callback = spufs_stop_callback;
+ mb();
+ spu_unmap_mappings(ctx);
+ spu_restore(&ctx->csa, spu);
+ spu->timestamp = jiffies;
+}
+
+static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
+{
+ pr_debug("%s: unbind pid=%d SPU=%d\n", __FUNCTION__,
+ spu->pid, spu->number);
+ spu_unmap_mappings(ctx);
+ spu_save(&ctx->csa, spu);
+ spu->timestamp = jiffies;
+ ctx->state = SPU_STATE_SAVED;
+ spu->ibox_callback = NULL;
+ spu->wbox_callback = NULL;
+ spu->stop_callback = NULL;
+ spu->mm = NULL;
+ spu->pid = 0;
+ spu->prio = MAX_PRIO;
+ ctx->ops = &spu_backing_ops;
+ ctx->spu = NULL;
+ ctx->flags = 0;
+ spu->flags = 0;
+ spu->ctx = NULL;
+}
+
+static void spu_reaper(void *data)
+{
+ struct spu_context *ctx = data;
+ struct spu *spu;
+
+ down_write(&ctx->state_sema);
+ spu = ctx->spu;
+ if (spu && test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) {
+ if (atomic_read(&spu->rq->prio.nr_blocked)) {
+ pr_debug("%s: spu=%d\n", __func__, spu->number);
+ ctx->ops->runcntl_stop(ctx);
+ spu_deactivate(ctx);
+ wake_up_all(&ctx->stop_wq);
+ } else {
+ clear_bit(SPU_CONTEXT_PREEMPT, &ctx->flags);
+ }
+ }
+ up_write(&ctx->state_sema);
+ put_spu_context(ctx);
+}
+
+static void schedule_spu_reaper(struct spu_runqueue *rq, struct spu *spu)
+{
+ struct spu_context *ctx = get_spu_context(spu->ctx);
+ unsigned long now = jiffies;
+ unsigned long expire = spu->timestamp + SPU_MIN_TIMESLICE;
+
+ set_bit(SPU_CONTEXT_PREEMPT, &ctx->flags);
+ INIT_WORK(&ctx->reap_work, spu_reaper, ctx);
+ if (time_after(now, expire))
+ schedule_work(&ctx->reap_work);
+ else
+ schedule_delayed_work(&ctx->reap_work, expire - now);
+}
+
+static void check_preempt_active(struct spu_runqueue *rq)
+{
+ struct list_head *p;
+ struct spu *worst = NULL;
+
+ list_for_each(p, &rq->active_list) {
+ struct spu *spu = list_entry(p, struct spu, sched_list);
+ struct spu_context *ctx = spu->ctx;
+ if (!test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) {
+ if (!worst || (spu->prio > worst->prio)) {
+ worst = spu;
+ }
+ }
+ }
+ if (worst && (current->prio < worst->prio))
+ schedule_spu_reaper(rq, worst);
+}
+
+static struct spu *get_idle_spu(struct spu_context *ctx, u64 flags)
+{
+ struct spu_runqueue *rq;
+ struct spu *spu = NULL;
+
+ rq = spu_rq();
+ down(&rq->sem);
+ for (;;) {
+ if (rq->nr_idle > 0) {
+ if (is_best_prio(rq)) {
+ /* Fall through. */
+ spu = del_idle(rq);
+ break;
+ } else {
+ prio_wakeup(rq);
+ up(&rq->sem);
+ yield();
+ if (signal_pending(current)) {
+ return NULL;
+ }
+ rq = spu_rq();
+ down(&rq->sem);
+ continue;
+ }
+ } else {
+ check_preempt_active(rq);
+ prio_wait(rq, ctx, flags);
+ if (signal_pending(current)) {
+ prio_wakeup(rq);
+ spu = NULL;
+ break;
+ }
+ continue;
+ }
+ }
+ up(&rq->sem);
+ return spu;
+}
+
+static void put_idle_spu(struct spu *spu)
+{
+ struct spu_runqueue *rq = spu->rq;
+
+ down(&rq->sem);
+ add_idle(rq, spu);
+ prio_wakeup(rq);
+ up(&rq->sem);
+}
+
+static int get_active_spu(struct spu *spu)
+{
+ struct spu_runqueue *rq = spu->rq;
+ struct list_head *p;
+ struct spu *tmp;
+ int rc = 0;
+
+ down(&rq->sem);
+ list_for_each(p, &rq->active_list) {
+ tmp = list_entry(p, struct spu, sched_list);
+ if (tmp == spu) {
+ del_active(rq, spu);
+ rc = 1;
+ break;
+ }
+ }
+ up(&rq->sem);
+ return rc;
+}
+
+static void put_active_spu(struct spu *spu)
+{
+ struct spu_runqueue *rq = spu->rq;
+
+ down(&rq->sem);
+ add_active(rq, spu);
+ up(&rq->sem);
+}
+
+/* Lock order:
+ * spu_activate() & spu_deactivate() require the
+ * caller to have down_write(&ctx->state_sema).
+ *
+ * The rq->sem is breifly held (inside or outside a
+ * given ctx lock) for list management, but is never
+ * held during save/restore.
+ */
+
+int spu_activate(struct spu_context *ctx, u64 flags)
+{
+ struct spu *spu;
+
+ if (ctx->spu)
+ return 0;
+ spu = get_idle_spu(ctx, flags);
+ if (!spu)
+ return (signal_pending(current)) ? -ERESTARTSYS : -EAGAIN;
+ bind_context(spu, ctx);
+ /*
+ * We're likely to wait for interrupts on the same
+ * CPU that we are now on, so send them here.
+ */
+ spu_irq_setaffinity(spu, raw_smp_processor_id());
+ put_active_spu(spu);
+ return 0;
+}
+
+void spu_deactivate(struct spu_context *ctx)
+{
+ struct spu *spu;
+ int needs_idle;
+
+ spu = ctx->spu;
+ if (!spu)
+ return;
+ needs_idle = get_active_spu(spu);
+ unbind_context(spu, ctx);
+ if (needs_idle)
+ put_idle_spu(spu);
+}
+
+void spu_yield(struct spu_context *ctx)
+{
+ struct spu *spu;
+ int need_yield = 0;
+
+ down_write(&ctx->state_sema);
+ spu = ctx->spu;
+ if (spu && (sched_find_first_bit(spu->rq->prio.bitmap) < MAX_PRIO)) {
+ pr_debug("%s: yielding SPU %d\n", __FUNCTION__, spu->number);
+ spu_deactivate(ctx);
+ ctx->state = SPU_STATE_SAVED;
+ need_yield = 1;
+ } else if (spu) {
+ spu->prio = MAX_PRIO;
+ }
+ up_write(&ctx->state_sema);
+ if (unlikely(need_yield))
+ yield();
+}
+
+int __init spu_sched_init(void)
+{
+ struct spu_runqueue *rq;
+ struct spu *spu;
+ int i;
+
+ rq = spu_runqueues = kmalloc(sizeof(struct spu_runqueue), GFP_KERNEL);
+ if (!rq) {
+ printk(KERN_WARNING "%s: Unable to allocate runqueues.\n",
+ __FUNCTION__);
+ return 1;
+ }
+ memset(rq, 0, sizeof(struct spu_runqueue));
+ init_MUTEX(&rq->sem);
+ INIT_LIST_HEAD(&rq->active_list);
+ INIT_LIST_HEAD(&rq->idle_list);
+ rq->nr_active = 0;
+ rq->nr_idle = 0;
+ rq->nr_switches = 0;
+ atomic_set(&rq->prio.nr_blocked, 0);
+ for (i = 0; i < MAX_PRIO; i++) {
+ init_waitqueue_head(&rq->prio.waitq[i]);
+ __clear_bit(i, rq->prio.bitmap);
+ }
+ __set_bit(MAX_PRIO, rq->prio.bitmap);
+ for (;;) {
+ spu = spu_alloc();
+ if (!spu)
+ break;
+ pr_debug("%s: adding SPU[%d]\n", __FUNCTION__, spu->number);
+ add_idle(rq, spu);
+ spu->rq = rq;
+ spu->timestamp = jiffies;
+ }
+ if (!rq->nr_idle) {
+ printk(KERN_WARNING "%s: No available SPUs.\n", __FUNCTION__);
+ kfree(rq);
+ return 1;
+ }
+ return 0;
+}
+
+void __exit spu_sched_exit(void)
+{
+ struct spu_runqueue *rq = spu_rq();
+ struct spu *spu;
+
+ if (!rq) {
+ printk(KERN_WARNING "%s: no runqueues!\n", __FUNCTION__);
+ return;
+ }
+ while (rq->nr_idle > 0) {
+ spu = del_idle(rq);
+ if (!spu)
+ break;
+ spu_free(spu);
+ }
+ kfree(rq);
+}
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c
new file mode 100644
index 00000000000..0bf723dcd67
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c
@@ -0,0 +1,336 @@
+/*
+ * spu_restore.c
+ *
+ * (C) Copyright IBM Corp. 2005
+ *
+ * SPU-side context restore sequence outlined in
+ * Synergistic Processor Element Book IV
+ *
+ * Author: Mark Nutter <mnutter@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+
+#ifndef LS_SIZE
+#define LS_SIZE 0x40000 /* 256K (in bytes) */
+#endif
+
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+#include <spu_intrinsics.h>
+#include <asm/spu_csa.h>
+#include "spu_utils.h"
+
+#define BR_INSTR 0x327fff80 /* br -4 */
+#define NOP_INSTR 0x40200000 /* nop */
+#define HEQ_INSTR 0x7b000000 /* heq $0, $0 */
+#define STOP_INSTR 0x00000000 /* stop 0x0 */
+#define ILLEGAL_INSTR 0x00800000 /* illegal instr */
+#define RESTORE_COMPLETE 0x00003ffc /* stop 0x3ffc */
+
+static inline void fetch_regs_from_mem(addr64 lscsa_ea)
+{
+ unsigned int ls = (unsigned int)&regs_spill[0];
+ unsigned int size = sizeof(regs_spill);
+ unsigned int tag_id = 0;
+ unsigned int cmd = 0x40; /* GET */
+
+ spu_writech(MFC_LSA, ls);
+ spu_writech(MFC_EAH, lscsa_ea.ui[0]);
+ spu_writech(MFC_EAL, lscsa_ea.ui[1]);
+ spu_writech(MFC_Size, size);
+ spu_writech(MFC_TagID, tag_id);
+ spu_writech(MFC_Cmd, cmd);
+}
+
+static inline void restore_upper_240kb(addr64 lscsa_ea)
+{
+ unsigned int ls = 16384;
+ unsigned int list = (unsigned int)&dma_list[0];
+ unsigned int size = sizeof(dma_list);
+ unsigned int tag_id = 0;
+ unsigned int cmd = 0x44; /* GETL */
+
+ /* Restore, Step 4:
+ * Enqueue the GETL command (tag 0) to the MFC SPU command
+ * queue to transfer the upper 240 kb of LS from CSA.
+ */
+ spu_writech(MFC_LSA, ls);
+ spu_writech(MFC_EAH, lscsa_ea.ui[0]);
+ spu_writech(MFC_EAL, list);
+ spu_writech(MFC_Size, size);
+ spu_writech(MFC_TagID, tag_id);
+ spu_writech(MFC_Cmd, cmd);
+}
+
+static inline void restore_decr(void)
+{
+ unsigned int offset;
+ unsigned int decr_running;
+ unsigned int decr;
+
+ /* Restore, Step 6:
+ * If the LSCSA "decrementer running" flag is set
+ * then write the SPU_WrDec channel with the
+ * decrementer value from LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(decr_status);
+ decr_running = regs_spill[offset].slot[0];
+ if (decr_running) {
+ offset = LSCSA_QW_OFFSET(decr);
+ decr = regs_spill[offset].slot[0];
+ spu_writech(SPU_WrDec, decr);
+ }
+}
+
+static inline void write_ppu_mb(void)
+{
+ unsigned int offset;
+ unsigned int data;
+
+ /* Restore, Step 11:
+ * Write the MFC_WrOut_MB channel with the PPU_MB
+ * data from LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(ppu_mb);
+ data = regs_spill[offset].slot[0];
+ spu_writech(SPU_WrOutMbox, data);
+}
+
+static inline void write_ppuint_mb(void)
+{
+ unsigned int offset;
+ unsigned int data;
+
+ /* Restore, Step 12:
+ * Write the MFC_WrInt_MB channel with the PPUINT_MB
+ * data from LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(ppuint_mb);
+ data = regs_spill[offset].slot[0];
+ spu_writech(SPU_WrOutIntrMbox, data);
+}
+
+static inline void restore_fpcr(void)
+{
+ unsigned int offset;
+ vector unsigned int fpcr;
+
+ /* Restore, Step 13:
+ * Restore the floating-point status and control
+ * register from the LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(fpcr);
+ fpcr = regs_spill[offset].v;
+ spu_mtfpscr(fpcr);
+}
+
+static inline void restore_srr0(void)
+{
+ unsigned int offset;
+ unsigned int srr0;
+
+ /* Restore, Step 14:
+ * Restore the SPU SRR0 data from the LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(srr0);
+ srr0 = regs_spill[offset].slot[0];
+ spu_writech(SPU_WrSRR0, srr0);
+}
+
+static inline void restore_event_mask(void)
+{
+ unsigned int offset;
+ unsigned int event_mask;
+
+ /* Restore, Step 15:
+ * Restore the SPU_RdEventMsk data from the LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(event_mask);
+ event_mask = regs_spill[offset].slot[0];
+ spu_writech(SPU_WrEventMask, event_mask);
+}
+
+static inline void restore_tag_mask(void)
+{
+ unsigned int offset;
+ unsigned int tag_mask;
+
+ /* Restore, Step 16:
+ * Restore the SPU_RdTagMsk data from the LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(tag_mask);
+ tag_mask = regs_spill[offset].slot[0];
+ spu_writech(MFC_WrTagMask, tag_mask);
+}
+
+static inline void restore_complete(void)
+{
+ extern void exit_fini(void);
+ unsigned int *exit_instrs = (unsigned int *)exit_fini;
+ unsigned int offset;
+ unsigned int stopped_status;
+ unsigned int stopped_code;
+
+ /* Restore, Step 18:
+ * Issue a stop-and-signal instruction with
+ * "good context restore" signal value.
+ *
+ * Restore, Step 19:
+ * There may be additional instructions placed
+ * here by the PPE Sequence for SPU Context
+ * Restore in order to restore the correct
+ * "stopped state".
+ *
+ * This step is handled here by analyzing the
+ * LSCSA.stopped_status and then modifying the
+ * exit() function to behave appropriately.
+ */
+
+ offset = LSCSA_QW_OFFSET(stopped_status);
+ stopped_status = regs_spill[offset].slot[0];
+ stopped_code = regs_spill[offset].slot[1];
+
+ switch (stopped_status) {
+ case SPU_STOPPED_STATUS_P_I:
+ /* SPU_Status[P,I]=1. Add illegal instruction
+ * followed by stop-and-signal instruction after
+ * end of restore code.
+ */
+ exit_instrs[0] = RESTORE_COMPLETE;
+ exit_instrs[1] = ILLEGAL_INSTR;
+ exit_instrs[2] = STOP_INSTR | stopped_code;
+ break;
+ case SPU_STOPPED_STATUS_P_H:
+ /* SPU_Status[P,H]=1. Add 'heq $0, $0' followed
+ * by stop-and-signal instruction after end of
+ * restore code.
+ */
+ exit_instrs[0] = RESTORE_COMPLETE;
+ exit_instrs[1] = HEQ_INSTR;
+ exit_instrs[2] = STOP_INSTR | stopped_code;
+ break;
+ case SPU_STOPPED_STATUS_S_P:
+ /* SPU_Status[S,P]=1. Add nop instruction
+ * followed by 'br -4' after end of restore
+ * code.
+ */
+ exit_instrs[0] = RESTORE_COMPLETE;
+ exit_instrs[1] = STOP_INSTR | stopped_code;
+ exit_instrs[2] = NOP_INSTR;
+ exit_instrs[3] = BR_INSTR;
+ break;
+ case SPU_STOPPED_STATUS_S_I:
+ /* SPU_Status[S,I]=1. Add illegal instruction
+ * followed by 'br -4' after end of restore code.
+ */
+ exit_instrs[0] = RESTORE_COMPLETE;
+ exit_instrs[1] = ILLEGAL_INSTR;
+ exit_instrs[2] = NOP_INSTR;
+ exit_instrs[3] = BR_INSTR;
+ break;
+ case SPU_STOPPED_STATUS_I:
+ /* SPU_Status[I]=1. Add illegal instruction followed
+ * by infinite loop after end of restore sequence.
+ */
+ exit_instrs[0] = RESTORE_COMPLETE;
+ exit_instrs[1] = ILLEGAL_INSTR;
+ exit_instrs[2] = NOP_INSTR;
+ exit_instrs[3] = BR_INSTR;
+ break;
+ case SPU_STOPPED_STATUS_S:
+ /* SPU_Status[S]=1. Add two 'nop' instructions. */
+ exit_instrs[0] = RESTORE_COMPLETE;
+ exit_instrs[1] = NOP_INSTR;
+ exit_instrs[2] = NOP_INSTR;
+ exit_instrs[3] = BR_INSTR;
+ break;
+ case SPU_STOPPED_STATUS_H:
+ /* SPU_Status[H]=1. Add 'heq $0, $0' instruction
+ * after end of restore code.
+ */
+ exit_instrs[0] = RESTORE_COMPLETE;
+ exit_instrs[1] = HEQ_INSTR;
+ exit_instrs[2] = NOP_INSTR;
+ exit_instrs[3] = BR_INSTR;
+ break;
+ case SPU_STOPPED_STATUS_P:
+ /* SPU_Status[P]=1. Add stop-and-signal instruction
+ * after end of restore code.
+ */
+ exit_instrs[0] = RESTORE_COMPLETE;
+ exit_instrs[1] = STOP_INSTR | stopped_code;
+ break;
+ case SPU_STOPPED_STATUS_R:
+ /* SPU_Status[I,S,H,P,R]=0. Add infinite loop. */
+ exit_instrs[0] = RESTORE_COMPLETE;
+ exit_instrs[1] = NOP_INSTR;
+ exit_instrs[2] = NOP_INSTR;
+ exit_instrs[3] = BR_INSTR;
+ break;
+ default:
+ /* SPU_Status[R]=1. No additonal instructions. */
+ break;
+ }
+ spu_sync();
+}
+
+/**
+ * main - entry point for SPU-side context restore.
+ *
+ * This code deviates from the documented sequence in the
+ * following aspects:
+ *
+ * 1. The EA for LSCSA is passed from PPE in the
+ * signal notification channels.
+ * 2. The register spill area is pulled by SPU
+ * into LS, rather than pushed by PPE.
+ * 3. All 128 registers are restored by exit().
+ * 4. The exit() function is modified at run
+ * time in order to properly restore the
+ * SPU_Status register.
+ */
+int main()
+{
+ addr64 lscsa_ea;
+
+ lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1);
+ lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2);
+ fetch_regs_from_mem(lscsa_ea);
+
+ set_event_mask(); /* Step 1. */
+ set_tag_mask(); /* Step 2. */
+ build_dma_list(lscsa_ea); /* Step 3. */
+ restore_upper_240kb(lscsa_ea); /* Step 4. */
+ /* Step 5: done by 'exit'. */
+ restore_decr(); /* Step 6. */
+ enqueue_putllc(lscsa_ea); /* Step 7. */
+ set_tag_update(); /* Step 8. */
+ read_tag_status(); /* Step 9. */
+ read_llar_status(); /* Step 10. */
+ write_ppu_mb(); /* Step 11. */
+ write_ppuint_mb(); /* Step 12. */
+ restore_fpcr(); /* Step 13. */
+ restore_srr0(); /* Step 14. */
+ restore_event_mask(); /* Step 15. */
+ restore_tag_mask(); /* Step 16. */
+ /* Step 17. done by 'exit'. */
+ restore_complete(); /* Step 18. */
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S b/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S
new file mode 100644
index 00000000000..2905949debe
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S
@@ -0,0 +1,116 @@
+/*
+ * crt0_r.S: Entry function for SPU-side context restore.
+ *
+ * Copyright (C) 2005 IBM
+ *
+ * Entry and exit function for SPU-side of the context restore
+ * sequence. Sets up an initial stack frame, then branches to
+ * 'main'. On return, restores all 128 registers from the LSCSA
+ * and exits.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/spu_csa.h>
+
+.data
+.align 7
+.globl regs_spill
+regs_spill:
+.space SIZEOF_SPU_SPILL_REGS, 0x0
+
+.text
+.global _start
+_start:
+ /* Initialize the stack pointer to point to 16368
+ * (16kb-16). The back chain pointer is initialized
+ * to NULL.
+ */
+ il $0, 0
+ il $SP, 16368
+ stqd $0, 0($SP)
+
+ /* Allocate a minimum stack frame for the called main.
+ * This is needed so that main has a place to save the
+ * link register when it calls another function.
+ */
+ stqd $SP, -160($SP)
+ ai $SP, $SP, -160
+
+ /* Call the program's main function. */
+ brsl $0, main
+
+.global exit
+.global _exit
+exit:
+_exit:
+ /* SPU Context Restore, Step 5: Restore the remaining 112 GPRs. */
+ ila $3, regs_spill + 256
+restore_regs:
+ lqr $4, restore_reg_insts
+restore_reg_loop:
+ ai $4, $4, 4
+ .balignl 16, 0x40200000
+restore_reg_insts: /* must be quad-word aligned. */
+ lqd $16, 0($3)
+ lqd $17, 16($3)
+ lqd $18, 32($3)
+ lqd $19, 48($3)
+ andi $5, $4, 0x7F
+ stqr $4, restore_reg_insts
+ ai $3, $3, 64
+ brnz $5, restore_reg_loop
+
+ /* SPU Context Restore Step 17: Restore the first 16 GPRs. */
+ lqa $0, regs_spill + 0
+ lqa $1, regs_spill + 16
+ lqa $2, regs_spill + 32
+ lqa $3, regs_spill + 48
+ lqa $4, regs_spill + 64
+ lqa $5, regs_spill + 80
+ lqa $6, regs_spill + 96
+ lqa $7, regs_spill + 112
+ lqa $8, regs_spill + 128
+ lqa $9, regs_spill + 144
+ lqa $10, regs_spill + 160
+ lqa $11, regs_spill + 176
+ lqa $12, regs_spill + 192
+ lqa $13, regs_spill + 208
+ lqa $14, regs_spill + 224
+ lqa $15, regs_spill + 240
+
+ /* Under normal circumstances, the 'exit' function
+ * terminates with 'stop SPU_RESTORE_COMPLETE',
+ * indicating that the SPU-side restore code has
+ * completed.
+ *
+ * However it is possible that instructions immediately
+ * following the 'stop 0x3ffc' have been modified at run
+ * time so as to recreate the exact SPU_Status settings
+ * from the application, e.g. illegal instruciton, halt,
+ * etc.
+ */
+.global exit_fini
+.global _exit_fini
+exit_fini:
+_exit_fini:
+ stop SPU_RESTORE_COMPLETE
+ stop 0
+ stop 0
+ stop 0
+
+ /* Pad the size of this crt0.o to be multiple of 16 bytes. */
+.balignl 16, 0x0
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
new file mode 100644
index 00000000000..1b2355ff703
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped
@@ -0,0 +1,231 @@
+/*
+ * spu_restore_dump.h: Copyright (C) 2005 IBM.
+ * Hex-dump auto generated from spu_restore.c.
+ * Do not edit!
+ */
+static unsigned int spu_restore_code[] __page_aligned = {
+0x40800000, 0x409ff801, 0x24000080, 0x24fd8081,
+0x1cd80081, 0x33001180, 0x42030003, 0x33800284,
+0x1c010204, 0x40200000, 0x40200000, 0x40200000,
+0x34000190, 0x34004191, 0x34008192, 0x3400c193,
+0x141fc205, 0x23fffd84, 0x1c100183, 0x217ffa85,
+0x3080a000, 0x3080a201, 0x3080a402, 0x3080a603,
+0x3080a804, 0x3080aa05, 0x3080ac06, 0x3080ae07,
+0x3080b008, 0x3080b209, 0x3080b40a, 0x3080b60b,
+0x3080b80c, 0x3080ba0d, 0x3080bc0e, 0x3080be0f,
+0x00003ffc, 0x00000000, 0x00000000, 0x00000000,
+0x01a00182, 0x3ec00083, 0xb0a14103, 0x01a00204,
+0x3ec10082, 0x4202800e, 0x04000703, 0xb0a14202,
+0x21a00803, 0x3fbf028d, 0x3f20068d, 0x3fbe0682,
+0x3fe30102, 0x21a00882, 0x3f82028f, 0x3fe3078f,
+0x3fbf0784, 0x3f200204, 0x3fbe0204, 0x3fe30204,
+0x04000203, 0x21a00903, 0x40848002, 0x21a00982,
+0x40800003, 0x21a00a03, 0x40802002, 0x21a00a82,
+0x21a00083, 0x40800082, 0x21a00b02, 0x10002818,
+0x40a80002, 0x32800007, 0x4207000c, 0x18008208,
+0x40a0000b, 0x4080020a, 0x40800709, 0x00200000,
+0x42070002, 0x3ac30384, 0x1cffc489, 0x00200000,
+0x18008383, 0x38830382, 0x4cffc486, 0x3ac28185,
+0xb0408584, 0x28830382, 0x1c020387, 0x38828182,
+0xb0408405, 0x1802c408, 0x28828182, 0x217ff886,
+0x04000583, 0x21a00803, 0x3fbe0682, 0x3fe30102,
+0x04000106, 0x21a00886, 0x04000603, 0x21a00903,
+0x40803c02, 0x21a00982, 0x40800003, 0x04000184,
+0x21a00a04, 0x40802202, 0x21a00a82, 0x42028005,
+0x34208702, 0x21002282, 0x21a00804, 0x21a00886,
+0x3fbf0782, 0x3f200102, 0x3fbe0102, 0x3fe30102,
+0x21a00902, 0x40804003, 0x21a00983, 0x21a00a04,
+0x40805a02, 0x21a00a82, 0x40800083, 0x21a00b83,
+0x01a00c02, 0x01a00d83, 0x3420c282, 0x21a00e02,
+0x34210283, 0x21a00f03, 0x34200284, 0x77400200,
+0x3421c282, 0x21a00702, 0x34218283, 0x21a00083,
+0x34214282, 0x21a00b02, 0x4200480c, 0x00200000,
+0x1c010286, 0x34220284, 0x34220302, 0x0f608203,
+0x5c024204, 0x3b81810b, 0x42013c02, 0x00200000,
+0x18008185, 0x38808183, 0x3b814182, 0x21004e84,
+0x4020007f, 0x35000100, 0x000004e0, 0x000002a0,
+0x000002e8, 0x00000428, 0x00000360, 0x000002e8,
+0x000004a0, 0x00000468, 0x000003c8, 0x00000360,
+0x409ffe02, 0x30801203, 0x40800204, 0x3ec40085,
+0x10009c09, 0x3ac10606, 0xb060c105, 0x4020007f,
+0x4020007f, 0x20801203, 0x38810602, 0xb0408586,
+0x28810602, 0x32004180, 0x34204702, 0x21a00382,
+0x4020007f, 0x327fdc80, 0x409ffe02, 0x30801203,
+0x40800204, 0x3ec40087, 0x40800405, 0x00200000,
+0x40800606, 0x3ac10608, 0x3ac14609, 0x3ac1860a,
+0xb060c107, 0x20801203, 0x41004003, 0x38810602,
+0x4020007f, 0xb0408188, 0x4020007f, 0x28810602,
+0x41201002, 0x38814603, 0x10009c09, 0xb060c109,
+0x4020007f, 0x28814603, 0x41193f83, 0x38818602,
+0x60ffc003, 0xb040818a, 0x28818602, 0x32003080,
+0x409ffe02, 0x30801203, 0x40800204, 0x3ec40087,
+0x41201008, 0x10009c14, 0x40800405, 0x3ac10609,
+0x40800606, 0x3ac1460a, 0xb060c107, 0x3ac1860b,
+0x20801203, 0x38810602, 0xb0408409, 0x28810602,
+0x38814603, 0xb060c40a, 0x4020007f, 0x28814603,
+0x41193f83, 0x38818602, 0x60ffc003, 0xb040818b,
+0x28818602, 0x32002380, 0x409ffe02, 0x30801204,
+0x40800205, 0x3ec40083, 0x40800406, 0x3ac14607,
+0x3ac18608, 0xb0810103, 0x41004002, 0x20801204,
+0x4020007f, 0x38814603, 0x10009c0b, 0xb060c107,
+0x4020007f, 0x4020007f, 0x28814603, 0x38818602,
+0x4020007f, 0x4020007f, 0xb0408588, 0x28818602,
+0x4020007f, 0x32001780, 0x409ffe02, 0x1000640e,
+0x40800204, 0x30801203, 0x40800405, 0x3ec40087,
+0x40800606, 0x3ac10608, 0x3ac14609, 0x3ac1860a,
+0xb060c107, 0x20801203, 0x413d8003, 0x38810602,
+0x4020007f, 0x327fd780, 0x409ffe02, 0x10007f0c,
+0x40800205, 0x30801204, 0x40800406, 0x3ec40083,
+0x3ac14607, 0x3ac18608, 0xb0810103, 0x413d8002,
+0x20801204, 0x38814603, 0x4020007f, 0x327feb80,
+0x409ffe02, 0x30801203, 0x40800204, 0x3ec40087,
+0x40800405, 0x1000650a, 0x40800606, 0x3ac10608,
+0x3ac14609, 0x3ac1860a, 0xb060c107, 0x20801203,
+0x38810602, 0xb0408588, 0x4020007f, 0x327fc980,
+0x00400000, 0x40800003, 0x4020007f, 0x35000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save.c b/arch/powerpc/platforms/cell/spufs/spu_save.c
new file mode 100644
index 00000000000..196033b8a57
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_save.c
@@ -0,0 +1,195 @@
+/*
+ * spu_save.c
+ *
+ * (C) Copyright IBM Corp. 2005
+ *
+ * SPU-side context save sequence outlined in
+ * Synergistic Processor Element Book IV
+ *
+ * Author: Mark Nutter <mnutter@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+
+#ifndef LS_SIZE
+#define LS_SIZE 0x40000 /* 256K (in bytes) */
+#endif
+
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+#include <spu_intrinsics.h>
+#include <asm/spu_csa.h>
+#include "spu_utils.h"
+
+static inline void save_event_mask(void)
+{
+ unsigned int offset;
+
+ /* Save, Step 2:
+ * Read the SPU_RdEventMsk channel and save to the LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(event_mask);
+ regs_spill[offset].slot[0] = spu_readch(SPU_RdEventStatMask);
+}
+
+static inline void save_tag_mask(void)
+{
+ unsigned int offset;
+
+ /* Save, Step 3:
+ * Read the SPU_RdTagMsk channel and save to the LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(tag_mask);
+ regs_spill[offset].slot[0] = spu_readch(MFC_RdTagMask);
+}
+
+static inline void save_upper_240kb(addr64 lscsa_ea)
+{
+ unsigned int ls = 16384;
+ unsigned int list = (unsigned int)&dma_list[0];
+ unsigned int size = sizeof(dma_list);
+ unsigned int tag_id = 0;
+ unsigned int cmd = 0x24; /* PUTL */
+
+ /* Save, Step 7:
+ * Enqueue the PUTL command (tag 0) to the MFC SPU command
+ * queue to transfer the remaining 240 kb of LS to CSA.
+ */
+ spu_writech(MFC_LSA, ls);
+ spu_writech(MFC_EAH, lscsa_ea.ui[0]);
+ spu_writech(MFC_EAL, list);
+ spu_writech(MFC_Size, size);
+ spu_writech(MFC_TagID, tag_id);
+ spu_writech(MFC_Cmd, cmd);
+}
+
+static inline void save_fpcr(void)
+{
+ // vector unsigned int fpcr;
+ unsigned int offset;
+
+ /* Save, Step 9:
+ * Issue the floating-point status and control register
+ * read instruction, and save to the LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(fpcr);
+ regs_spill[offset].v = spu_mffpscr();
+}
+
+static inline void save_decr(void)
+{
+ unsigned int offset;
+
+ /* Save, Step 10:
+ * Read and save the SPU_RdDec channel data to
+ * the LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(decr);
+ regs_spill[offset].slot[0] = spu_readch(SPU_RdDec);
+}
+
+static inline void save_srr0(void)
+{
+ unsigned int offset;
+
+ /* Save, Step 11:
+ * Read and save the SPU_WSRR0 channel data to
+ * the LSCSA.
+ */
+ offset = LSCSA_QW_OFFSET(srr0);
+ regs_spill[offset].slot[0] = spu_readch(SPU_RdSRR0);
+}
+
+static inline void spill_regs_to_mem(addr64 lscsa_ea)
+{
+ unsigned int ls = (unsigned int)&regs_spill[0];
+ unsigned int size = sizeof(regs_spill);
+ unsigned int tag_id = 0;
+ unsigned int cmd = 0x20; /* PUT */
+
+ /* Save, Step 13:
+ * Enqueue a PUT command (tag 0) to send the LSCSA
+ * to the CSA.
+ */
+ spu_writech(MFC_LSA, ls);
+ spu_writech(MFC_EAH, lscsa_ea.ui[0]);
+ spu_writech(MFC_EAL, lscsa_ea.ui[1]);
+ spu_writech(MFC_Size, size);
+ spu_writech(MFC_TagID, tag_id);
+ spu_writech(MFC_Cmd, cmd);
+}
+
+static inline void enqueue_sync(addr64 lscsa_ea)
+{
+ unsigned int tag_id = 0;
+ unsigned int cmd = 0xCC;
+
+ /* Save, Step 14:
+ * Enqueue an MFC_SYNC command (tag 0).
+ */
+ spu_writech(MFC_TagID, tag_id);
+ spu_writech(MFC_Cmd, cmd);
+}
+
+static inline void save_complete(void)
+{
+ /* Save, Step 18:
+ * Issue a stop-and-signal instruction indicating
+ * "save complete". Note: This function will not
+ * return!!
+ */
+ spu_stop(SPU_SAVE_COMPLETE);
+}
+
+/**
+ * main - entry point for SPU-side context save.
+ *
+ * This code deviates from the documented sequence as follows:
+ *
+ * 1. The EA for LSCSA is passed from PPE in the
+ * signal notification channels.
+ * 2. All 128 registers are saved by crt0.o.
+ */
+int main()
+{
+ addr64 lscsa_ea;
+
+ lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1);
+ lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2);
+
+ /* Step 1: done by exit(). */
+ save_event_mask(); /* Step 2. */
+ save_tag_mask(); /* Step 3. */
+ set_event_mask(); /* Step 4. */
+ set_tag_mask(); /* Step 5. */
+ build_dma_list(lscsa_ea); /* Step 6. */
+ save_upper_240kb(lscsa_ea); /* Step 7. */
+ /* Step 8: done by exit(). */
+ save_fpcr(); /* Step 9. */
+ save_decr(); /* Step 10. */
+ save_srr0(); /* Step 11. */
+ enqueue_putllc(lscsa_ea); /* Step 12. */
+ spill_regs_to_mem(lscsa_ea); /* Step 13. */
+ enqueue_sync(lscsa_ea); /* Step 14. */
+ set_tag_update(); /* Step 15. */
+ read_tag_status(); /* Step 16. */
+ read_llar_status(); /* Step 17. */
+ save_complete(); /* Step 18. */
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S b/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S
new file mode 100644
index 00000000000..6659d6a66fa
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S
@@ -0,0 +1,102 @@
+/*
+ * crt0_s.S: Entry function for SPU-side context save.
+ *
+ * Copyright (C) 2005 IBM
+ *
+ * Entry function for SPU-side of the context save sequence.
+ * Saves all 128 GPRs, sets up an initial stack frame, then
+ * branches to 'main'.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/spu_csa.h>
+
+.data
+.align 7
+.globl regs_spill
+regs_spill:
+.space SIZEOF_SPU_SPILL_REGS, 0x0
+
+.text
+.global _start
+_start:
+ /* SPU Context Save Step 1: Save the first 16 GPRs. */
+ stqa $0, regs_spill + 0
+ stqa $1, regs_spill + 16
+ stqa $2, regs_spill + 32
+ stqa $3, regs_spill + 48
+ stqa $4, regs_spill + 64
+ stqa $5, regs_spill + 80
+ stqa $6, regs_spill + 96
+ stqa $7, regs_spill + 112
+ stqa $8, regs_spill + 128
+ stqa $9, regs_spill + 144
+ stqa $10, regs_spill + 160
+ stqa $11, regs_spill + 176
+ stqa $12, regs_spill + 192
+ stqa $13, regs_spill + 208
+ stqa $14, regs_spill + 224
+ stqa $15, regs_spill + 240
+
+ /* SPU Context Save, Step 8: Save the remaining 112 GPRs. */
+ ila $3, regs_spill + 256
+save_regs:
+ lqr $4, save_reg_insts
+save_reg_loop:
+ ai $4, $4, 4
+ .balignl 16, 0x40200000
+save_reg_insts: /* must be quad-word aligned. */
+ stqd $16, 0($3)
+ stqd $17, 16($3)
+ stqd $18, 32($3)
+ stqd $19, 48($3)
+ andi $5, $4, 0x7F
+ stqr $4, save_reg_insts
+ ai $3, $3, 64
+ brnz $5, save_reg_loop
+
+ /* Initialize the stack pointer to point to 16368
+ * (16kb-16). The back chain pointer is initialized
+ * to NULL.
+ */
+ il $0, 0
+ il $SP, 16368
+ stqd $0, 0($SP)
+
+ /* Allocate a minimum stack frame for the called main.
+ * This is needed so that main has a place to save the
+ * link register when it calls another function.
+ */
+ stqd $SP, -160($SP)
+ ai $SP, $SP, -160
+
+ /* Call the program's main function. */
+ brsl $0, main
+
+ /* In this case main should not return; if it does
+ * there has been an error in the sequence. Execute
+ * stop-and-signal with code=0.
+ */
+.global exit
+.global _exit
+exit:
+_exit:
+ stop 0x0
+
+ /* Pad the size of this crt0.o to be multiple of 16 bytes. */
+.balignl 16, 0x0
+
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped
new file mode 100644
index 00000000000..39e54003f1d
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped
@@ -0,0 +1,191 @@
+/*
+ * spu_save_dump.h: Copyright (C) 2005 IBM.
+ * Hex-dump auto generated from spu_save.c.
+ * Do not edit!
+ */
+static unsigned int spu_save_code[] __page_aligned = {
+0x20805000, 0x20805201, 0x20805402, 0x20805603,
+0x20805804, 0x20805a05, 0x20805c06, 0x20805e07,
+0x20806008, 0x20806209, 0x2080640a, 0x2080660b,
+0x2080680c, 0x20806a0d, 0x20806c0e, 0x20806e0f,
+0x4201c003, 0x33800184, 0x1c010204, 0x40200000,
+0x24000190, 0x24004191, 0x24008192, 0x2400c193,
+0x141fc205, 0x23fffd84, 0x1c100183, 0x217ffb85,
+0x40800000, 0x409ff801, 0x24000080, 0x24fd8081,
+0x1cd80081, 0x33000180, 0x00000000, 0x00000000,
+0x01a00182, 0x3ec00083, 0xb1c38103, 0x01a00204,
+0x3ec10082, 0x4201400d, 0xb1c38202, 0x01a00583,
+0x34218682, 0x3ed80684, 0xb0408184, 0x24218682,
+0x01a00603, 0x00200000, 0x34214682, 0x3ed40684,
+0xb0408184, 0x40800003, 0x24214682, 0x21a00083,
+0x40800082, 0x21a00b02, 0x4020007f, 0x1000251e,
+0x40a80002, 0x32800008, 0x4205c00c, 0x00200000,
+0x40a0000b, 0x3f82070f, 0x4080020a, 0x40800709,
+0x3fe3078f, 0x3fbf0783, 0x3f200183, 0x3fbe0183,
+0x3fe30187, 0x18008387, 0x4205c002, 0x3ac30404,
+0x1cffc489, 0x00200000, 0x18008403, 0x38830402,
+0x4cffc486, 0x3ac28185, 0xb0408584, 0x28830402,
+0x1c020408, 0x38828182, 0xb0408385, 0x1802c387,
+0x28828182, 0x217ff886, 0x04000582, 0x32800007,
+0x21a00802, 0x3fbf0705, 0x3f200285, 0x3fbe0285,
+0x3fe30285, 0x21a00885, 0x04000603, 0x21a00903,
+0x40803c02, 0x21a00982, 0x04000386, 0x21a00a06,
+0x40801202, 0x21a00a82, 0x73000003, 0x24200683,
+0x01a00404, 0x00200000, 0x34204682, 0x3ec40683,
+0xb0408203, 0x24204682, 0x01a00783, 0x00200000,
+0x3421c682, 0x3edc0684, 0xb0408184, 0x2421c682,
+0x21a00806, 0x21a00885, 0x3fbf0784, 0x3f200204,
+0x3fbe0204, 0x3fe30204, 0x21a00904, 0x40804002,
+0x21a00982, 0x21a00a06, 0x40805a02, 0x21a00a82,
+0x04000683, 0x21a00803, 0x21a00885, 0x21a00904,
+0x40848002, 0x21a00982, 0x21a00a06, 0x40801002,
+0x21a00a82, 0x21a00a06, 0x40806602, 0x00200000,
+0x35800009, 0x21a00a82, 0x40800083, 0x21a00b83,
+0x01a00c02, 0x01a00d83, 0x00003ffb, 0x40800003,
+0x4020007f, 0x35000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
diff --git a/arch/powerpc/platforms/cell/spufs/spu_utils.h b/arch/powerpc/platforms/cell/spufs/spu_utils.h
new file mode 100644
index 00000000000..58359feb6c9
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spu_utils.h
@@ -0,0 +1,160 @@
+/*
+ * utils.h: Utilities for SPU-side of the context switch operation.
+ *
+ * (C) Copyright IBM 2005
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPU_CONTEXT_UTILS_H_
+#define _SPU_CONTEXT_UTILS_H_
+
+/*
+ * 64-bit safe EA.
+ */
+typedef union {
+ unsigned long long ull;
+ unsigned int ui[2];
+} addr64;
+
+/*
+ * 128-bit register template.
+ */
+typedef union {
+ unsigned int slot[4];
+ vector unsigned int v;
+} spu_reg128v;
+
+/*
+ * DMA list structure.
+ */
+struct dma_list_elem {
+ unsigned int size;
+ unsigned int ea_low;
+};
+
+/*
+ * Declare storage for 8-byte aligned DMA list.
+ */
+struct dma_list_elem dma_list[15] __attribute__ ((aligned(8)));
+
+/*
+ * External definition for storage
+ * declared in crt0.
+ */
+extern spu_reg128v regs_spill[NR_SPU_SPILL_REGS];
+
+/*
+ * Compute LSCSA byte offset for a given field.
+ */
+static struct spu_lscsa *dummy = (struct spu_lscsa *)0;
+#define LSCSA_BYTE_OFFSET(_field) \
+ ((char *)(&(dummy->_field)) - (char *)(&(dummy->gprs[0].slot[0])))
+#define LSCSA_QW_OFFSET(_field) (LSCSA_BYTE_OFFSET(_field) >> 4)
+
+static inline void set_event_mask(void)
+{
+ unsigned int event_mask = 0;
+
+ /* Save, Step 4:
+ * Restore, Step 1:
+ * Set the SPU_RdEventMsk channel to zero to mask
+ * all events.
+ */
+ spu_writech(SPU_WrEventMask, event_mask);
+}
+
+static inline void set_tag_mask(void)
+{
+ unsigned int tag_mask = 1;
+
+ /* Save, Step 5:
+ * Restore, Step 2:
+ * Set the SPU_WrTagMsk channel to '01' to unmask
+ * only tag group 0.
+ */
+ spu_writech(MFC_WrTagMask, tag_mask);
+}
+
+static inline void build_dma_list(addr64 lscsa_ea)
+{
+ unsigned int ea_low;
+ int i;
+
+ /* Save, Step 6:
+ * Restore, Step 3:
+ * Update the effective address for the CSA in the
+ * pre-canned DMA-list in local storage.
+ */
+ ea_low = lscsa_ea.ui[1];
+ ea_low += LSCSA_BYTE_OFFSET(ls[16384]);
+
+ for (i = 0; i < 15; i++, ea_low += 16384) {
+ dma_list[i].size = 16384;
+ dma_list[i].ea_low = ea_low;
+ }
+}
+
+static inline void enqueue_putllc(addr64 lscsa_ea)
+{
+ unsigned int ls = 0;
+ unsigned int size = 128;
+ unsigned int tag_id = 0;
+ unsigned int cmd = 0xB4; /* PUTLLC */
+
+ /* Save, Step 12:
+ * Restore, Step 7:
+ * Send a PUTLLC (tag 0) command to the MFC using
+ * an effective address in the CSA in order to
+ * remove any possible lock-line reservation.
+ */
+ spu_writech(MFC_LSA, ls);
+ spu_writech(MFC_EAH, lscsa_ea.ui[0]);
+ spu_writech(MFC_EAL, lscsa_ea.ui[1]);
+ spu_writech(MFC_Size, size);
+ spu_writech(MFC_TagID, tag_id);
+ spu_writech(MFC_Cmd, cmd);
+}
+
+static inline void set_tag_update(void)
+{
+ unsigned int update_any = 1;
+
+ /* Save, Step 15:
+ * Restore, Step 8:
+ * Write the MFC_TagUpdate channel with '01'.
+ */
+ spu_writech(MFC_WrTagUpdate, update_any);
+}
+
+static inline void read_tag_status(void)
+{
+ /* Save, Step 16:
+ * Restore, Step 9:
+ * Read the MFC_TagStat channel data.
+ */
+ spu_readch(MFC_RdTagStat);
+}
+
+static inline void read_llar_status(void)
+{
+ /* Save, Step 17:
+ * Restore, Step 10:
+ * Read the MFC_AtomicStat channel data.
+ */
+ spu_readch(MFC_RdAtomicStat);
+}
+
+#endif /* _SPU_CONTEXT_UTILS_H_ */
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
new file mode 100644
index 00000000000..db2601f0abd
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -0,0 +1,163 @@
+/*
+ * SPU file system
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef SPUFS_H
+#define SPUFS_H
+
+#include <linux/kref.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+
+#include <asm/spu.h>
+#include <asm/spu_csa.h>
+
+/* The magic number for our file system */
+enum {
+ SPUFS_MAGIC = 0x23c9b64e,
+};
+
+struct spu_context_ops;
+
+#define SPU_CONTEXT_PREEMPT 0UL
+
+struct spu_context {
+ struct spu *spu; /* pointer to a physical SPU */
+ struct spu_state csa; /* SPU context save area. */
+ spinlock_t mmio_lock; /* protects mmio access */
+ struct address_space *local_store;/* local store backing store */
+
+ enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
+ struct rw_semaphore state_sema;
+ struct semaphore run_sema;
+
+ struct mm_struct *owner;
+
+ struct kref kref;
+ wait_queue_head_t ibox_wq;
+ wait_queue_head_t wbox_wq;
+ wait_queue_head_t stop_wq;
+ struct fasync_struct *ibox_fasync;
+ struct fasync_struct *wbox_fasync;
+ struct spu_context_ops *ops;
+ struct work_struct reap_work;
+ u64 flags;
+};
+
+/* SPU context query/set operations. */
+struct spu_context_ops {
+ int (*mbox_read) (struct spu_context * ctx, u32 * data);
+ u32(*mbox_stat_read) (struct spu_context * ctx);
+ unsigned int (*mbox_stat_poll)(struct spu_context *ctx,
+ unsigned int events);
+ int (*ibox_read) (struct spu_context * ctx, u32 * data);
+ int (*wbox_write) (struct spu_context * ctx, u32 data);
+ u32(*signal1_read) (struct spu_context * ctx);
+ void (*signal1_write) (struct spu_context * ctx, u32 data);
+ u32(*signal2_read) (struct spu_context * ctx);
+ void (*signal2_write) (struct spu_context * ctx, u32 data);
+ void (*signal1_type_set) (struct spu_context * ctx, u64 val);
+ u64(*signal1_type_get) (struct spu_context * ctx);
+ void (*signal2_type_set) (struct spu_context * ctx, u64 val);
+ u64(*signal2_type_get) (struct spu_context * ctx);
+ u32(*npc_read) (struct spu_context * ctx);
+ void (*npc_write) (struct spu_context * ctx, u32 data);
+ u32(*status_read) (struct spu_context * ctx);
+ char*(*get_ls) (struct spu_context * ctx);
+ void (*runcntl_write) (struct spu_context * ctx, u32 data);
+ void (*runcntl_stop) (struct spu_context * ctx);
+};
+
+extern struct spu_context_ops spu_hw_ops;
+extern struct spu_context_ops spu_backing_ops;
+
+struct spufs_inode_info {
+ struct spu_context *i_ctx;
+ struct inode vfs_inode;
+};
+#define SPUFS_I(inode) \
+ container_of(inode, struct spufs_inode_info, vfs_inode)
+
+extern struct tree_descr spufs_dir_contents[];
+
+/* system call implementation */
+long spufs_run_spu(struct file *file,
+ struct spu_context *ctx, u32 *npc, u32 *status);
+long spufs_create_thread(struct nameidata *nd,
+ unsigned int flags, mode_t mode);
+extern struct file_operations spufs_context_fops;
+
+/* context management */
+struct spu_context * alloc_spu_context(struct address_space *local_store);
+void destroy_spu_context(struct kref *kref);
+struct spu_context * get_spu_context(struct spu_context *ctx);
+int put_spu_context(struct spu_context *ctx);
+void spu_unmap_mappings(struct spu_context *ctx);
+
+void spu_forget(struct spu_context *ctx);
+void spu_acquire(struct spu_context *ctx);
+void spu_release(struct spu_context *ctx);
+int spu_acquire_runnable(struct spu_context *ctx);
+void spu_acquire_saved(struct spu_context *ctx);
+
+int spu_activate(struct spu_context *ctx, u64 flags);
+void spu_deactivate(struct spu_context *ctx);
+void spu_yield(struct spu_context *ctx);
+int __init spu_sched_init(void);
+void __exit spu_sched_exit(void);
+
+/*
+ * spufs_wait
+ * Same as wait_event_interruptible(), except that here
+ * we need to call spu_release(ctx) before sleeping, and
+ * then spu_acquire(ctx) when awoken.
+ */
+
+#define spufs_wait(wq, condition) \
+({ \
+ int __ret = 0; \
+ DEFINE_WAIT(__wait); \
+ for (;;) { \
+ prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (!signal_pending(current)) { \
+ spu_release(ctx); \
+ schedule(); \
+ spu_acquire(ctx); \
+ continue; \
+ } \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ finish_wait(&(wq), &__wait); \
+ __ret; \
+})
+
+size_t spu_wbox_write(struct spu_context *ctx, u32 data);
+size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
+
+/* irq callback funcs. */
+void spufs_ibox_callback(struct spu *spu);
+void spufs_wbox_callback(struct spu *spu);
+void spufs_stop_callback(struct spu *spu);
+
+#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
new file mode 100644
index 00000000000..212db28531f
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -0,0 +1,2204 @@
+/*
+ * spu_switch.c
+ *
+ * (C) Copyright IBM Corp. 2005
+ *
+ * Author: Mark Nutter <mnutter@us.ibm.com>
+ *
+ * Host-side part of SPU context switch sequence outlined in
+ * Synergistic Processor Element, Book IV.
+ *
+ * A fully premptive switch of an SPE is very expensive in terms
+ * of time and system resources. SPE Book IV indicates that SPE
+ * allocation should follow a "serially reusable device" model,
+ * in which the SPE is assigned a task until it completes. When
+ * this is not possible, this sequence may be used to premptively
+ * save, and then later (optionally) restore the context of a
+ * program executing on an SPE.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+
+#include <asm/io.h>
+#include <asm/spu.h>
+#include <asm/spu_csa.h>
+#include <asm/mmu_context.h>
+
+#include "spu_save_dump.h"
+#include "spu_restore_dump.h"
+
+#if 0
+#define POLL_WHILE_TRUE(_c) { \
+ do { \
+ } while (_c); \
+ }
+#else
+#define RELAX_SPIN_COUNT 1000
+#define POLL_WHILE_TRUE(_c) { \
+ do { \
+ int _i; \
+ for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
+ cpu_relax(); \
+ } \
+ if (unlikely(_c)) yield(); \
+ else break; \
+ } while (_c); \
+ }
+#endif /* debug */
+
+#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
+
+static inline void acquire_spu_lock(struct spu *spu)
+{
+ /* Save, Step 1:
+ * Restore, Step 1:
+ * Acquire SPU-specific mutual exclusion lock.
+ * TBD.
+ */
+}
+
+static inline void release_spu_lock(struct spu *spu)
+{
+ /* Restore, Step 76:
+ * Release SPU-specific mutual exclusion lock.
+ * TBD.
+ */
+}
+
+static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ u32 isolate_state;
+
+ /* Save, Step 2:
+ * Save, Step 6:
+ * If SPU_Status[E,L,IS] any field is '1', this
+ * SPU is in isolate state and cannot be context
+ * saved at this time.
+ */
+ isolate_state = SPU_STATUS_ISOLATED_STATE |
+ SPU_STATUS_ISOLATED_LOAD_STAUTUS | SPU_STATUS_ISOLATED_EXIT_STAUTUS;
+ return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
+}
+
+static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 3:
+ * Restore, Step 2:
+ * Save INT_Mask_class0 in CSA.
+ * Write INT_MASK_class0 with value of 0.
+ * Save INT_Mask_class1 in CSA.
+ * Write INT_MASK_class1 with value of 0.
+ * Save INT_Mask_class2 in CSA.
+ * Write INT_MASK_class2 with value of 0.
+ */
+ spin_lock_irq(&spu->register_lock);
+ if (csa) {
+ csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
+ csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
+ csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
+ }
+ spu_int_mask_set(spu, 0, 0ul);
+ spu_int_mask_set(spu, 1, 0ul);
+ spu_int_mask_set(spu, 2, 0ul);
+ eieio();
+ spin_unlock_irq(&spu->register_lock);
+}
+
+static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 4:
+ * Restore, Step 25.
+ * Set a software watchdog timer, which specifies the
+ * maximum allowable time for a context save sequence.
+ *
+ * For present, this implementation will not set a global
+ * watchdog timer, as virtualization & variable system load
+ * may cause unpredictable execution times.
+ */
+}
+
+static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 5:
+ * Restore, Step 3:
+ * Inhibit user-space access (if provided) to this
+ * SPU by unmapping the virtual pages assigned to
+ * the SPU memory-mapped I/O (MMIO) for problem
+ * state. TBD.
+ */
+}
+
+static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 7:
+ * Restore, Step 5:
+ * Set a software context switch pending flag.
+ */
+ set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
+ mb();
+}
+
+static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 8:
+ * Suspend DMA and save MFC_CNTL.
+ */
+ switch (in_be64(&priv2->mfc_control_RW) &
+ MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
+ case MFC_CNTL_SUSPEND_IN_PROGRESS:
+ POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
+ MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
+ MFC_CNTL_SUSPEND_COMPLETE);
+ /* fall through */
+ case MFC_CNTL_SUSPEND_COMPLETE:
+ if (csa) {
+ csa->priv2.mfc_control_RW =
+ in_be64(&priv2->mfc_control_RW) |
+ MFC_CNTL_SUSPEND_DMA_QUEUE;
+ }
+ break;
+ case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
+ out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
+ POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
+ MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
+ MFC_CNTL_SUSPEND_COMPLETE);
+ if (csa) {
+ csa->priv2.mfc_control_RW =
+ in_be64(&priv2->mfc_control_RW) &
+ ~MFC_CNTL_SUSPEND_DMA_QUEUE;
+ }
+ break;
+ }
+}
+
+static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save, Step 9:
+ * Save SPU_Runcntl in the CSA. This value contains
+ * the "Application Desired State".
+ */
+ csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
+}
+
+static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 10:
+ * Save MFC_SR1 in the CSA.
+ */
+ csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
+}
+
+static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save, Step 11:
+ * Read SPU_Status[R], and save to CSA.
+ */
+ if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
+ csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
+ } else {
+ u32 stopped;
+
+ out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
+ eieio();
+ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
+ SPU_STATUS_RUNNING);
+ stopped =
+ SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
+ SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
+ if ((in_be32(&prob->spu_status_R) & stopped) == 0)
+ csa->prob.spu_status_R = SPU_STATUS_RUNNING;
+ else
+ csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
+ }
+}
+
+static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 12:
+ * Read MFC_CNTL[Ds]. Update saved copy of
+ * CSA.MFC_CNTL[Ds].
+ */
+ if (in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING) {
+ csa->priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
+ csa->suspend_time = get_cycles();
+ out_be64(&priv2->spu_chnlcntptr_RW, 7ULL);
+ eieio();
+ csa->spu_chnldata_RW[7] = in_be64(&priv2->spu_chnldata_RW);
+ eieio();
+ } else {
+ csa->priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
+ }
+}
+
+static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 13:
+ * Write MFC_CNTL[Dh] set to a '1' to halt
+ * the decrementer.
+ */
+ out_be64(&priv2->mfc_control_RW, MFC_CNTL_DECREMENTER_HALTED);
+ eieio();
+}
+
+static inline void save_timebase(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 14:
+ * Read PPE Timebase High and Timebase low registers
+ * and save in CSA. TBD.
+ */
+ csa->suspend_time = get_cycles();
+}
+
+static inline void remove_other_spu_access(struct spu_state *csa,
+ struct spu *spu)
+{
+ /* Save, Step 15:
+ * Remove other SPU access to this SPU by unmapping
+ * this SPU's pages from their address space. TBD.
+ */
+}
+
+static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save, Step 16:
+ * Restore, Step 11.
+ * Write SPU_MSSync register. Poll SPU_MSSync[P]
+ * for a value of 0.
+ */
+ out_be64(&prob->spc_mssync_RW, 1UL);
+ POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
+}
+
+static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 17:
+ * Restore, Step 12.
+ * Restore, Step 48.
+ * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
+ * Then issue a PPE sync instruction.
+ */
+ spu_tlb_invalidate(spu);
+ mb();
+}
+
+static inline void handle_pending_interrupts(struct spu_state *csa,
+ struct spu *spu)
+{
+ /* Save, Step 18:
+ * Handle any pending interrupts from this SPU
+ * here. This is OS or hypervisor specific. One
+ * option is to re-enable interrupts to handle any
+ * pending interrupts, with the interrupt handlers
+ * recognizing the software Context Switch Pending
+ * flag, to ensure the SPU execution or MFC command
+ * queue is not restarted. TBD.
+ */
+}
+
+static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ int i;
+
+ /* Save, Step 19:
+ * If MFC_Cntl[Se]=0 then save
+ * MFC command queues.
+ */
+ if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
+ for (i = 0; i < 8; i++) {
+ csa->priv2.puq[i].mfc_cq_data0_RW =
+ in_be64(&priv2->puq[i].mfc_cq_data0_RW);
+ csa->priv2.puq[i].mfc_cq_data1_RW =
+ in_be64(&priv2->puq[i].mfc_cq_data1_RW);
+ csa->priv2.puq[i].mfc_cq_data2_RW =
+ in_be64(&priv2->puq[i].mfc_cq_data2_RW);
+ csa->priv2.puq[i].mfc_cq_data3_RW =
+ in_be64(&priv2->puq[i].mfc_cq_data3_RW);
+ }
+ for (i = 0; i < 16; i++) {
+ csa->priv2.spuq[i].mfc_cq_data0_RW =
+ in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
+ csa->priv2.spuq[i].mfc_cq_data1_RW =
+ in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
+ csa->priv2.spuq[i].mfc_cq_data2_RW =
+ in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
+ csa->priv2.spuq[i].mfc_cq_data3_RW =
+ in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
+ }
+ }
+}
+
+static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save, Step 20:
+ * Save the PPU_QueryMask register
+ * in the CSA.
+ */
+ csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
+}
+
+static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save, Step 21:
+ * Save the PPU_QueryType register
+ * in the CSA.
+ */
+ csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
+}
+
+static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 22:
+ * Save the MFC_CSR_TSQ register
+ * in the LSCSA.
+ */
+ csa->priv2.spu_tag_status_query_RW =
+ in_be64(&priv2->spu_tag_status_query_RW);
+}
+
+static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 23:
+ * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
+ * registers in the CSA.
+ */
+ csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
+ csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
+}
+
+static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 24:
+ * Save the MFC_CSR_ATO register in
+ * the CSA.
+ */
+ csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
+}
+
+static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 25:
+ * Save the MFC_TCLASS_ID register in
+ * the CSA.
+ */
+ csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
+}
+
+static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 26:
+ * Restore, Step 23.
+ * Write the MFC_TCLASS_ID register with
+ * the value 0x10000000.
+ */
+ spu_mfc_tclass_id_set(spu, 0x10000000);
+ eieio();
+}
+
+static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 27:
+ * Restore, Step 14.
+ * Write MFC_CNTL[Pc]=1 (purge queue).
+ */
+ out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST);
+ eieio();
+}
+
+static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 28:
+ * Poll MFC_CNTL[Ps] until value '11' is read
+ * (purge complete).
+ */
+ POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) &
+ MFC_CNTL_PURGE_DMA_COMPLETE);
+}
+
+static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ int i;
+
+ /* Save, Step 29:
+ * If MFC_SR1[R]='1', save SLBs in CSA.
+ */
+ if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
+ csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W);
+ for (i = 0; i < 8; i++) {
+ out_be64(&priv2->slb_index_W, i);
+ eieio();
+ csa->slb_esid_RW[i] = in_be64(&priv2->slb_esid_RW);
+ csa->slb_vsid_RW[i] = in_be64(&priv2->slb_vsid_RW);
+ eieio();
+ }
+ }
+}
+
+static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 30:
+ * Restore, Step 18:
+ * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
+ * MFC_SR1[TL,R,Pr,T] set correctly for the
+ * OS specific environment.
+ *
+ * Implementation note: The SPU-side code
+ * for save/restore is privileged, so the
+ * MFC_SR1[Pr] bit is not set.
+ *
+ */
+ spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
+ MFC_STATE1_RELOCATE_MASK |
+ MFC_STATE1_BUS_TLBIE_MASK));
+}
+
+static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save, Step 31:
+ * Save SPU_NPC in the CSA.
+ */
+ csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
+}
+
+static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 32:
+ * Save SPU_PrivCntl in the CSA.
+ */
+ csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
+}
+
+static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 33:
+ * Restore, Step 16:
+ * Write SPU_PrivCntl[S,Le,A] fields reset to 0.
+ */
+ out_be64(&priv2->spu_privcntl_RW, 0UL);
+ eieio();
+}
+
+static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 34:
+ * Save SPU_LSLR in the CSA.
+ */
+ csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
+}
+
+static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 35:
+ * Restore, Step 17.
+ * Reset SPU_LSLR.
+ */
+ out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
+ eieio();
+}
+
+static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 36:
+ * Save SPU_Cfg in the CSA.
+ */
+ csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
+}
+
+static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 37:
+ * Save PM_Trace_Tag_Wait_Mask in the CSA.
+ * Not performed by this implementation.
+ */
+}
+
+static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 38:
+ * Save RA_GROUP_ID register and the
+ * RA_ENABLE reigster in the CSA.
+ */
+ csa->priv1.resource_allocation_groupID_RW =
+ spu_resource_allocation_groupID_get(spu);
+ csa->priv1.resource_allocation_enable_RW =
+ spu_resource_allocation_enable_get(spu);
+}
+
+static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save, Step 39:
+ * Save MB_Stat register in the CSA.
+ */
+ csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
+}
+
+static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save, Step 40:
+ * Save the PPU_MB register in the CSA.
+ */
+ csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
+}
+
+static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 41:
+ * Save the PPUINT_MB register in the CSA.
+ */
+ csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
+}
+
+static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
+ int i;
+
+ /* Save, Step 42:
+ * Save the following CH: [0,1,3,4,24,25,27]
+ */
+ for (i = 0; i < 7; i++) {
+ idx = ch_indices[i];
+ out_be64(&priv2->spu_chnlcntptr_RW, idx);
+ eieio();
+ csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
+ csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
+ out_be64(&priv2->spu_chnldata_RW, 0UL);
+ out_be64(&priv2->spu_chnlcnt_RW, 0UL);
+ eieio();
+ }
+}
+
+static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ int i;
+
+ /* Save, Step 43:
+ * Save SPU Read Mailbox Channel.
+ */
+ out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
+ eieio();
+ csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
+ for (i = 0; i < 4; i++) {
+ csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
+ }
+ out_be64(&priv2->spu_chnlcnt_RW, 0UL);
+ eieio();
+}
+
+static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 44:
+ * Save MFC_CMD Channel.
+ */
+ out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
+ eieio();
+ csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
+ eieio();
+}
+
+static inline void reset_ch(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
+ u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
+ u64 idx;
+ int i;
+
+ /* Save, Step 45:
+ * Reset the following CH: [21, 23, 28, 30]
+ */
+ for (i = 0; i < 4; i++) {
+ idx = ch_indices[i];
+ out_be64(&priv2->spu_chnlcntptr_RW, idx);
+ eieio();
+ out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
+ eieio();
+ }
+}
+
+static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 46:
+ * Restore, Step 25.
+ * Write MFC_CNTL[Sc]=0 (resume queue processing).
+ */
+ out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
+}
+
+static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Save, Step 45:
+ * Restore, Step 19:
+ * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All.
+ */
+ if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
+ out_be64(&priv2->slb_invalidate_all_W, 0UL);
+ eieio();
+ }
+}
+
+static inline void get_kernel_slb(u64 ea, u64 slb[2])
+{
+ slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
+ slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
+
+ /* Large pages are used for kernel text/data, but not vmalloc. */
+ if (cpu_has_feature(CPU_FTR_16M_PAGE)
+ && REGION_ID(ea) == KERNEL_REGION_ID)
+ slb[0] |= SLB_VSID_L;
+}
+
+static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ out_be64(&priv2->slb_index_W, slbe);
+ eieio();
+ out_be64(&priv2->slb_vsid_RW, slb[0]);
+ out_be64(&priv2->slb_esid_RW, slb[1]);
+ eieio();
+}
+
+static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu)
+{
+ u64 code_slb[2];
+ u64 lscsa_slb[2];
+
+ /* Save, Step 47:
+ * Restore, Step 30.
+ * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
+ * register, then initialize SLB_VSID and SLB_ESID
+ * to provide access to SPU context save code and
+ * LSCSA.
+ *
+ * This implementation places both the context
+ * switch code and LSCSA in kernel address space.
+ *
+ * Further this implementation assumes that the
+ * MFC_SR1[R]=1 (in other words, assume that
+ * translation is desired by OS environment).
+ */
+ invalidate_slbs(csa, spu);
+ get_kernel_slb((unsigned long)&spu_save_code[0], code_slb);
+ get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb);
+ load_mfc_slb(spu, code_slb, 0);
+ if ((lscsa_slb[0] != code_slb[0]) || (lscsa_slb[1] != code_slb[1]))
+ load_mfc_slb(spu, lscsa_slb, 1);
+}
+
+static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
+{
+ /* Save, Step 48:
+ * Restore, Step 23.
+ * Change the software context switch pending flag
+ * to context switch active.
+ */
+ set_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags);
+ clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
+ mb();
+}
+
+static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
+{
+ unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
+ CLASS1_ENABLE_STORAGE_FAULT_INTR;
+
+ /* Save, Step 49:
+ * Restore, Step 22:
+ * Reset and then enable interrupts, as
+ * needed by OS.
+ *
+ * This implementation enables only class1
+ * (translation) interrupts.
+ */
+ spin_lock_irq(&spu->register_lock);
+ spu_int_stat_clear(spu, 0, ~0ul);
+ spu_int_stat_clear(spu, 1, ~0ul);
+ spu_int_stat_clear(spu, 2, ~0ul);
+ spu_int_mask_set(spu, 0, 0ul);
+ spu_int_mask_set(spu, 1, class1_mask);
+ spu_int_mask_set(spu, 2, 0ul);
+ spin_unlock_irq(&spu->register_lock);
+}
+
+static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
+ unsigned int ls_offset, unsigned int size,
+ unsigned int tag, unsigned int rclass,
+ unsigned int cmd)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ union mfc_tag_size_class_cmd command;
+ unsigned int transfer_size;
+ volatile unsigned int status = 0x0;
+
+ while (size > 0) {
+ transfer_size =
+ (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
+ command.u.mfc_size = transfer_size;
+ command.u.mfc_tag = tag;
+ command.u.mfc_rclassid = rclass;
+ command.u.mfc_cmd = cmd;
+ do {
+ out_be32(&prob->mfc_lsa_W, ls_offset);
+ out_be64(&prob->mfc_ea_W, ea);
+ out_be64(&prob->mfc_union_W.all64, command.all64);
+ status =
+ in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
+ if (unlikely(status & 0x2)) {
+ cpu_relax();
+ }
+ } while (status & 0x3);
+ size -= transfer_size;
+ ea += transfer_size;
+ ls_offset += transfer_size;
+ }
+ return 0;
+}
+
+static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
+{
+ unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
+ unsigned int ls_offset = 0x0;
+ unsigned int size = 16384;
+ unsigned int tag = 0;
+ unsigned int rclass = 0;
+ unsigned int cmd = MFC_PUT_CMD;
+
+ /* Save, Step 50:
+ * Issue a DMA command to copy the first 16K bytes
+ * of local storage to the CSA.
+ */
+ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
+}
+
+static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save, Step 51:
+ * Restore, Step 31.
+ * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
+ * point address of context save code in local
+ * storage.
+ *
+ * This implementation uses SPU-side save/restore
+ * programs with entry points at LSA of 0.
+ */
+ out_be32(&prob->spu_npc_RW, 0);
+ eieio();
+}
+
+static inline void set_signot1(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ union {
+ u64 ull;
+ u32 ui[2];
+ } addr64;
+
+ /* Save, Step 52:
+ * Restore, Step 32:
+ * Write SPU_Sig_Notify_1 register with upper 32-bits
+ * of the CSA.LSCSA effective address.
+ */
+ addr64.ull = (u64) csa->lscsa;
+ out_be32(&prob->signal_notify1, addr64.ui[0]);
+ eieio();
+}
+
+static inline void set_signot2(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ union {
+ u64 ull;
+ u32 ui[2];
+ } addr64;
+
+ /* Save, Step 53:
+ * Restore, Step 33:
+ * Write SPU_Sig_Notify_2 register with lower 32-bits
+ * of the CSA.LSCSA effective address.
+ */
+ addr64.ull = (u64) csa->lscsa;
+ out_be32(&prob->signal_notify2, addr64.ui[1]);
+ eieio();
+}
+
+static inline void send_save_code(struct spu_state *csa, struct spu *spu)
+{
+ unsigned long addr = (unsigned long)&spu_save_code[0];
+ unsigned int ls_offset = 0x0;
+ unsigned int size = sizeof(spu_save_code);
+ unsigned int tag = 0;
+ unsigned int rclass = 0;
+ unsigned int cmd = MFC_GETFS_CMD;
+
+ /* Save, Step 54:
+ * Issue a DMA command to copy context save code
+ * to local storage and start SPU.
+ */
+ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
+}
+
+static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Save, Step 55:
+ * Restore, Step 38.
+ * Write PPU_QueryMask=1 (enable Tag Group 0)
+ * and issue eieio instruction.
+ */
+ out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
+ eieio();
+}
+
+static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ u32 mask = MFC_TAGID_TO_TAGMASK(0);
+ unsigned long flags;
+
+ /* Save, Step 56:
+ * Restore, Step 39.
+ * Restore, Step 39.
+ * Restore, Step 46.
+ * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
+ * or write PPU_QueryType[TS]=01 and wait for Tag Group
+ * Complete Interrupt. Write INT_Stat_Class0 or
+ * INT_Stat_Class2 with value of 'handled'.
+ */
+ POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
+
+ local_irq_save(flags);
+ spu_int_stat_clear(spu, 0, ~(0ul));
+ spu_int_stat_clear(spu, 2, ~(0ul));
+ local_irq_restore(flags);
+}
+
+static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ unsigned long flags;
+
+ /* Save, Step 57:
+ * Restore, Step 40.
+ * Poll until SPU_Status[R]=0 or wait for SPU Class 0
+ * or SPU Class 2 interrupt. Write INT_Stat_class0
+ * or INT_Stat_class2 with value of handled.
+ */
+ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
+
+ local_irq_save(flags);
+ spu_int_stat_clear(spu, 0, ~(0ul));
+ spu_int_stat_clear(spu, 2, ~(0ul));
+ local_irq_restore(flags);
+}
+
+static inline int check_save_status(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ u32 complete;
+
+ /* Save, Step 54:
+ * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
+ * context save succeeded, otherwise context save
+ * failed.
+ */
+ complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
+ SPU_STATUS_STOPPED_BY_STOP);
+ return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
+}
+
+static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 4:
+ * If required, notify the "using application" that
+ * the SPU task has been terminated. TBD.
+ */
+}
+
+static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Restore, Step 7:
+ * Restore, Step 47.
+ * Write MFC_Cntl[Dh,Sc]='1','1' to suspend
+ * the queue and halt the decrementer.
+ */
+ out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
+ MFC_CNTL_DECREMENTER_HALTED);
+ eieio();
+}
+
+static inline void wait_suspend_mfc_complete(struct spu_state *csa,
+ struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Restore, Step 8:
+ * Restore, Step 47.
+ * Poll MFC_CNTL[Ss] until 11 is returned.
+ */
+ POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) &
+ MFC_CNTL_SUSPEND_COMPLETE);
+}
+
+static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Restore, Step 9:
+ * If SPU_Status[R]=1, stop SPU execution
+ * and wait for stop to complete.
+ *
+ * Returns 1 if SPU_Status[R]=1 on entry.
+ * 0 otherwise
+ */
+ if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
+ if (in_be32(&prob->spu_status_R) &
+ SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
+ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
+ SPU_STATUS_RUNNING);
+ }
+ if ((in_be32(&prob->spu_status_R) &
+ SPU_STATUS_ISOLATED_LOAD_STAUTUS)
+ || (in_be32(&prob->spu_status_R) &
+ SPU_STATUS_ISOLATED_STATE)) {
+ out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
+ eieio();
+ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
+ SPU_STATUS_RUNNING);
+ out_be32(&prob->spu_runcntl_RW, 0x2);
+ eieio();
+ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
+ SPU_STATUS_RUNNING);
+ }
+ if (in_be32(&prob->spu_status_R) &
+ SPU_STATUS_WAITING_FOR_CHANNEL) {
+ out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
+ eieio();
+ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
+ SPU_STATUS_RUNNING);
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Restore, Step 10:
+ * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
+ * release SPU from isolate state.
+ */
+ if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
+ if (in_be32(&prob->spu_status_R) &
+ SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
+ spu_mfc_sr1_set(spu,
+ MFC_STATE1_MASTER_RUN_CONTROL_MASK);
+ eieio();
+ out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
+ eieio();
+ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
+ SPU_STATUS_RUNNING);
+ }
+ if ((in_be32(&prob->spu_status_R) &
+ SPU_STATUS_ISOLATED_LOAD_STAUTUS)
+ || (in_be32(&prob->spu_status_R) &
+ SPU_STATUS_ISOLATED_STATE)) {
+ spu_mfc_sr1_set(spu,
+ MFC_STATE1_MASTER_RUN_CONTROL_MASK);
+ eieio();
+ out_be32(&prob->spu_runcntl_RW, 0x2);
+ eieio();
+ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
+ SPU_STATUS_RUNNING);
+ }
+ }
+}
+
+static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ u64 ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
+ u64 idx;
+ int i;
+
+ /* Restore, Step 20:
+ * Reset the following CH: [0,1,3,4,24,25,27]
+ */
+ for (i = 0; i < 7; i++) {
+ idx = ch_indices[i];
+ out_be64(&priv2->spu_chnlcntptr_RW, idx);
+ eieio();
+ out_be64(&priv2->spu_chnldata_RW, 0UL);
+ out_be64(&priv2->spu_chnlcnt_RW, 0UL);
+ eieio();
+ }
+}
+
+static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
+ u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
+ u64 idx;
+ int i;
+
+ /* Restore, Step 21:
+ * Reset the following CH: [21, 23, 28, 29, 30]
+ */
+ for (i = 0; i < 5; i++) {
+ idx = ch_indices[i];
+ out_be64(&priv2->spu_chnlcntptr_RW, idx);
+ eieio();
+ out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
+ eieio();
+ }
+}
+
+static inline void setup_spu_status_part1(struct spu_state *csa,
+ struct spu *spu)
+{
+ u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
+ u32 status_I = SPU_STATUS_INVALID_INSTR;
+ u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
+ u32 status_S = SPU_STATUS_SINGLE_STEP;
+ u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
+ u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
+ u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
+ u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
+ u32 status_code;
+
+ /* Restore, Step 27:
+ * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
+ * instruction sequence to the end of the SPU based restore
+ * code (after the "context restored" stop and signal) to
+ * restore the correct SPU status.
+ *
+ * NOTE: Rather than modifying the SPU executable, we
+ * instead add a new 'stopped_status' field to the
+ * LSCSA. The SPU-side restore reads this field and
+ * takes the appropriate action when exiting.
+ */
+
+ status_code =
+ (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
+ if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
+
+ /* SPU_Status[P,I]=1 - Illegal Instruction followed
+ * by Stop and Signal instruction, followed by 'br -4'.
+ *
+ */
+ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
+ csa->lscsa->stopped_status.slot[1] = status_code;
+
+ } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
+
+ /* SPU_Status[P,H]=1 - Halt Conditional, followed
+ * by Stop and Signal instruction, followed by
+ * 'br -4'.
+ */
+ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
+ csa->lscsa->stopped_status.slot[1] = status_code;
+
+ } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
+
+ /* SPU_Status[S,P]=1 - Stop and Signal instruction
+ * followed by 'br -4'.
+ */
+ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
+ csa->lscsa->stopped_status.slot[1] = status_code;
+
+ } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
+
+ /* SPU_Status[S,I]=1 - Illegal instruction followed
+ * by 'br -4'.
+ */
+ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
+ csa->lscsa->stopped_status.slot[1] = status_code;
+
+ } else if ((csa->prob.spu_status_R & status_P) == status_P) {
+
+ /* SPU_Status[P]=1 - Stop and Signal instruction
+ * followed by 'br -4'.
+ */
+ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
+ csa->lscsa->stopped_status.slot[1] = status_code;
+
+ } else if ((csa->prob.spu_status_R & status_H) == status_H) {
+
+ /* SPU_Status[H]=1 - Halt Conditional, followed
+ * by 'br -4'.
+ */
+ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
+
+ } else if ((csa->prob.spu_status_R & status_S) == status_S) {
+
+ /* SPU_Status[S]=1 - Two nop instructions.
+ */
+ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
+
+ } else if ((csa->prob.spu_status_R & status_I) == status_I) {
+
+ /* SPU_Status[I]=1 - Illegal instruction followed
+ * by 'br -4'.
+ */
+ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
+
+ }
+}
+
+static inline void setup_spu_status_part2(struct spu_state *csa,
+ struct spu *spu)
+{
+ u32 mask;
+
+ /* Restore, Step 28:
+ * If the CSA.SPU_Status[I,S,H,P,R]=0 then
+ * add a 'br *' instruction to the end of
+ * the SPU based restore code.
+ *
+ * NOTE: Rather than modifying the SPU executable, we
+ * instead add a new 'stopped_status' field to the
+ * LSCSA. The SPU-side restore reads this field and
+ * takes the appropriate action when exiting.
+ */
+ mask = SPU_STATUS_INVALID_INSTR |
+ SPU_STATUS_SINGLE_STEP |
+ SPU_STATUS_STOPPED_BY_HALT |
+ SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
+ if (!(csa->prob.spu_status_R & mask)) {
+ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
+ }
+}
+
+static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 29:
+ * Restore RA_GROUP_ID register and the
+ * RA_ENABLE reigster from the CSA.
+ */
+ spu_resource_allocation_groupID_set(spu,
+ csa->priv1.resource_allocation_groupID_RW);
+ spu_resource_allocation_enable_set(spu,
+ csa->priv1.resource_allocation_enable_RW);
+}
+
+static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
+{
+ unsigned long addr = (unsigned long)&spu_restore_code[0];
+ unsigned int ls_offset = 0x0;
+ unsigned int size = sizeof(spu_restore_code);
+ unsigned int tag = 0;
+ unsigned int rclass = 0;
+ unsigned int cmd = MFC_GETFS_CMD;
+
+ /* Restore, Step 37:
+ * Issue MFC DMA command to copy context
+ * restore code to local storage.
+ */
+ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
+}
+
+static inline void setup_decr(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 34:
+ * If CSA.MFC_CNTL[Ds]=1 (decrementer was
+ * running) then adjust decrementer, set
+ * decrementer running status in LSCSA,
+ * and set decrementer "wrapped" status
+ * in LSCSA.
+ */
+ if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
+ cycles_t resume_time = get_cycles();
+ cycles_t delta_time = resume_time - csa->suspend_time;
+
+ csa->lscsa->decr.slot[0] = delta_time;
+ }
+}
+
+static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 35:
+ * Copy the CSA.PU_MB data into the LSCSA.
+ */
+ csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
+}
+
+static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 36:
+ * Copy the CSA.PUINT_MB data into the LSCSA.
+ */
+ csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
+}
+
+static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ u32 complete;
+
+ /* Restore, Step 40:
+ * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
+ * context restore succeeded, otherwise context restore
+ * failed.
+ */
+ complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
+ SPU_STATUS_STOPPED_BY_STOP);
+ return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
+}
+
+static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Restore, Step 41:
+ * Restore SPU_PrivCntl from the CSA.
+ */
+ out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
+ eieio();
+}
+
+static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ u32 mask;
+
+ /* Restore, Step 42:
+ * If any CSA.SPU_Status[I,S,H,P]=1, then
+ * restore the error or single step state.
+ */
+ mask = SPU_STATUS_INVALID_INSTR |
+ SPU_STATUS_SINGLE_STEP |
+ SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
+ if (csa->prob.spu_status_R & mask) {
+ out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
+ eieio();
+ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
+ SPU_STATUS_RUNNING);
+ }
+}
+
+static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ u32 mask;
+
+ /* Restore, Step 43:
+ * If all CSA.SPU_Status[I,S,H,P,R]=0 then write
+ * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
+ * then write '00' to SPU_RunCntl[R0R1] and wait
+ * for SPU_Status[R]=0.
+ */
+ mask = SPU_STATUS_INVALID_INSTR |
+ SPU_STATUS_SINGLE_STEP |
+ SPU_STATUS_STOPPED_BY_HALT |
+ SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
+ if (!(csa->prob.spu_status_R & mask)) {
+ out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
+ eieio();
+ POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
+ SPU_STATUS_RUNNING);
+ out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
+ eieio();
+ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
+ SPU_STATUS_RUNNING);
+ }
+}
+
+static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
+{
+ unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
+ unsigned int ls_offset = 0x0;
+ unsigned int size = 16384;
+ unsigned int tag = 0;
+ unsigned int rclass = 0;
+ unsigned int cmd = MFC_GET_CMD;
+
+ /* Restore, Step 44:
+ * Issue a DMA command to restore the first
+ * 16kb of local storage from CSA.
+ */
+ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
+}
+
+static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 49:
+ * Write INT_MASK_class0 with value of 0.
+ * Write INT_MASK_class1 with value of 0.
+ * Write INT_MASK_class2 with value of 0.
+ * Write INT_STAT_class0 with value of -1.
+ * Write INT_STAT_class1 with value of -1.
+ * Write INT_STAT_class2 with value of -1.
+ */
+ spin_lock_irq(&spu->register_lock);
+ spu_int_mask_set(spu, 0, 0ul);
+ spu_int_mask_set(spu, 1, 0ul);
+ spu_int_mask_set(spu, 2, 0ul);
+ spu_int_stat_clear(spu, 0, ~0ul);
+ spu_int_stat_clear(spu, 1, ~0ul);
+ spu_int_stat_clear(spu, 2, ~0ul);
+ spin_unlock_irq(&spu->register_lock);
+}
+
+static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ int i;
+
+ /* Restore, Step 50:
+ * If MFC_Cntl[Se]!=0 then restore
+ * MFC command queues.
+ */
+ if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
+ for (i = 0; i < 8; i++) {
+ out_be64(&priv2->puq[i].mfc_cq_data0_RW,
+ csa->priv2.puq[i].mfc_cq_data0_RW);
+ out_be64(&priv2->puq[i].mfc_cq_data1_RW,
+ csa->priv2.puq[i].mfc_cq_data1_RW);
+ out_be64(&priv2->puq[i].mfc_cq_data2_RW,
+ csa->priv2.puq[i].mfc_cq_data2_RW);
+ out_be64(&priv2->puq[i].mfc_cq_data3_RW,
+ csa->priv2.puq[i].mfc_cq_data3_RW);
+ }
+ for (i = 0; i < 16; i++) {
+ out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
+ csa->priv2.spuq[i].mfc_cq_data0_RW);
+ out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
+ csa->priv2.spuq[i].mfc_cq_data1_RW);
+ out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
+ csa->priv2.spuq[i].mfc_cq_data2_RW);
+ out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
+ csa->priv2.spuq[i].mfc_cq_data3_RW);
+ }
+ }
+ eieio();
+}
+
+static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Restore, Step 51:
+ * Restore the PPU_QueryMask register from CSA.
+ */
+ out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
+ eieio();
+}
+
+static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Restore, Step 52:
+ * Restore the PPU_QueryType register from CSA.
+ */
+ out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
+ eieio();
+}
+
+static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Restore, Step 53:
+ * Restore the MFC_CSR_TSQ register from CSA.
+ */
+ out_be64(&priv2->spu_tag_status_query_RW,
+ csa->priv2.spu_tag_status_query_RW);
+ eieio();
+}
+
+static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Restore, Step 54:
+ * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
+ * registers from CSA.
+ */
+ out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
+ out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
+ eieio();
+}
+
+static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Restore, Step 55:
+ * Restore the MFC_CSR_ATO register from CSA.
+ */
+ out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
+}
+
+static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 56:
+ * Restore the MFC_TCLASS_ID register from CSA.
+ */
+ spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
+ eieio();
+}
+
+static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
+{
+ u64 ch0_cnt, ch0_data;
+ u64 ch1_data;
+
+ /* Restore, Step 57:
+ * Set the Lock Line Reservation Lost Event by:
+ * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
+ * 2. If CSA.SPU_Channel_0_Count=0 and
+ * CSA.SPU_Wr_Event_Mask[Lr]=1 and
+ * CSA.SPU_Event_Status[Lr]=0 then set
+ * CSA.SPU_Event_Status_Count=1.
+ */
+ ch0_cnt = csa->spu_chnlcnt_RW[0];
+ ch0_data = csa->spu_chnldata_RW[0];
+ ch1_data = csa->spu_chnldata_RW[1];
+ csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
+ if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
+ (ch1_data & MFC_LLR_LOST_EVENT)) {
+ csa->spu_chnlcnt_RW[0] = 1;
+ }
+}
+
+static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 58:
+ * If the status of the CSA software decrementer
+ * "wrapped" flag is set, OR in a '1' to
+ * CSA.SPU_Event_Status[Tm].
+ */
+ if (csa->lscsa->decr_status.slot[0] == 1) {
+ csa->spu_chnldata_RW[0] |= 0x20;
+ }
+ if ((csa->lscsa->decr_status.slot[0] == 1) &&
+ (csa->spu_chnlcnt_RW[0] == 0 &&
+ ((csa->spu_chnldata_RW[2] & 0x20) == 0x0) &&
+ ((csa->spu_chnldata_RW[0] & 0x20) != 0x1))) {
+ csa->spu_chnlcnt_RW[0] = 1;
+ }
+}
+
+static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL };
+ int i;
+
+ /* Restore, Step 59:
+ * Restore the following CH: [0,1,3,4,24,25,27]
+ */
+ for (i = 0; i < 7; i++) {
+ idx = ch_indices[i];
+ out_be64(&priv2->spu_chnlcntptr_RW, idx);
+ eieio();
+ out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
+ out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
+ eieio();
+ }
+}
+
+static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ u64 ch_indices[3] = { 9UL, 21UL, 23UL };
+ u64 ch_counts[3] = { 1UL, 16UL, 1UL };
+ u64 idx;
+ int i;
+
+ /* Restore, Step 60:
+ * Restore the following CH: [9,21,23].
+ */
+ ch_counts[0] = 1UL;
+ ch_counts[1] = csa->spu_chnlcnt_RW[21];
+ ch_counts[2] = 1UL;
+ for (i = 0; i < 3; i++) {
+ idx = ch_indices[i];
+ out_be64(&priv2->spu_chnlcntptr_RW, idx);
+ eieio();
+ out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
+ eieio();
+ }
+}
+
+static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Restore, Step 61:
+ * Restore the SPU_LSLR register from CSA.
+ */
+ out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
+ eieio();
+}
+
+static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Restore, Step 62:
+ * Restore the SPU_Cfg register from CSA.
+ */
+ out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
+ eieio();
+}
+
+static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 63:
+ * Restore PM_Trace_Tag_Wait_Mask from CSA.
+ * Not performed by this implementation.
+ */
+}
+
+static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Restore, Step 64:
+ * Restore SPU_NPC from CSA.
+ */
+ out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
+ eieio();
+}
+
+static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ int i;
+
+ /* Restore, Step 65:
+ * Restore MFC_RdSPU_MB from CSA.
+ */
+ out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
+ eieio();
+ out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
+ for (i = 0; i < 4; i++) {
+ out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
+ }
+ eieio();
+}
+
+static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+ u32 dummy = 0;
+
+ /* Restore, Step 66:
+ * If CSA.MB_Stat[P]=0 (mailbox empty) then
+ * read from the PPU_MB register.
+ */
+ if ((csa->prob.mb_stat_R & 0xFF) == 0) {
+ dummy = in_be32(&prob->pu_mb_R);
+ eieio();
+ }
+}
+
+static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ u64 dummy = 0UL;
+
+ /* Restore, Step 66:
+ * If CSA.MB_Stat[I]=0 (mailbox empty) then
+ * read from the PPUINT_MB register.
+ */
+ if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
+ dummy = in_be64(&priv2->puint_mb_R);
+ eieio();
+ spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
+ eieio();
+ }
+}
+
+static inline void restore_mfc_slbs(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+ int i;
+
+ /* Restore, Step 68:
+ * If MFC_SR1[R]='1', restore SLBs from CSA.
+ */
+ if (csa->priv1.mfc_sr1_RW & MFC_STATE1_RELOCATE_MASK) {
+ for (i = 0; i < 8; i++) {
+ out_be64(&priv2->slb_index_W, i);
+ eieio();
+ out_be64(&priv2->slb_esid_RW, csa->slb_esid_RW[i]);
+ out_be64(&priv2->slb_vsid_RW, csa->slb_vsid_RW[i]);
+ eieio();
+ }
+ out_be64(&priv2->slb_index_W, csa->priv2.slb_index_W);
+ eieio();
+ }
+}
+
+static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 69:
+ * Restore the MFC_SR1 register from CSA.
+ */
+ spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
+ eieio();
+}
+
+static inline void restore_other_spu_access(struct spu_state *csa,
+ struct spu *spu)
+{
+ /* Restore, Step 70:
+ * Restore other SPU mappings to this SPU. TBD.
+ */
+}
+
+static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_problem __iomem *prob = spu->problem;
+
+ /* Restore, Step 71:
+ * If CSA.SPU_Status[R]=1 then write
+ * SPU_RunCntl[R0R1]='01'.
+ */
+ if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
+ out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
+ eieio();
+ }
+}
+
+static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
+{
+ struct spu_priv2 __iomem *priv2 = spu->priv2;
+
+ /* Restore, Step 72:
+ * Restore the MFC_CNTL register for the CSA.
+ */
+ out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
+ eieio();
+}
+
+static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 73:
+ * Enable user-space access (if provided) to this
+ * SPU by mapping the virtual pages assigned to
+ * the SPU memory-mapped I/O (MMIO) for problem
+ * state. TBD.
+ */
+}
+
+static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 74:
+ * Reset the "context switch active" flag.
+ */
+ clear_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags);
+ mb();
+}
+
+static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
+{
+ /* Restore, Step 75:
+ * Re-enable SPU interrupts.
+ */
+ spin_lock_irq(&spu->register_lock);
+ spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
+ spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
+ spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
+ spin_unlock_irq(&spu->register_lock);
+}
+
+static int quiece_spu(struct spu_state *prev, struct spu *spu)
+{
+ /*
+ * Combined steps 2-18 of SPU context save sequence, which
+ * quiesce the SPU state (disable SPU execution, MFC command
+ * queues, decrementer, SPU interrupts, etc.).
+ *
+ * Returns 0 on success.
+ * 2 if failed step 2.
+ * 6 if failed step 6.
+ */
+
+ if (check_spu_isolate(prev, spu)) { /* Step 2. */
+ return 2;
+ }
+ disable_interrupts(prev, spu); /* Step 3. */
+ set_watchdog_timer(prev, spu); /* Step 4. */
+ inhibit_user_access(prev, spu); /* Step 5. */
+ if (check_spu_isolate(prev, spu)) { /* Step 6. */
+ return 6;
+ }
+ set_switch_pending(prev, spu); /* Step 7. */
+ save_mfc_cntl(prev, spu); /* Step 8. */
+ save_spu_runcntl(prev, spu); /* Step 9. */
+ save_mfc_sr1(prev, spu); /* Step 10. */
+ save_spu_status(prev, spu); /* Step 11. */
+ save_mfc_decr(prev, spu); /* Step 12. */
+ halt_mfc_decr(prev, spu); /* Step 13. */
+ save_timebase(prev, spu); /* Step 14. */
+ remove_other_spu_access(prev, spu); /* Step 15. */
+ do_mfc_mssync(prev, spu); /* Step 16. */
+ issue_mfc_tlbie(prev, spu); /* Step 17. */
+ handle_pending_interrupts(prev, spu); /* Step 18. */
+
+ return 0;
+}
+
+static void save_csa(struct spu_state *prev, struct spu *spu)
+{
+ /*
+ * Combine steps 19-44 of SPU context save sequence, which
+ * save regions of the privileged & problem state areas.
+ */
+
+ save_mfc_queues(prev, spu); /* Step 19. */
+ save_ppu_querymask(prev, spu); /* Step 20. */
+ save_ppu_querytype(prev, spu); /* Step 21. */
+ save_mfc_csr_tsq(prev, spu); /* Step 22. */
+ save_mfc_csr_cmd(prev, spu); /* Step 23. */
+ save_mfc_csr_ato(prev, spu); /* Step 24. */
+ save_mfc_tclass_id(prev, spu); /* Step 25. */
+ set_mfc_tclass_id(prev, spu); /* Step 26. */
+ purge_mfc_queue(prev, spu); /* Step 27. */
+ wait_purge_complete(prev, spu); /* Step 28. */
+ save_mfc_slbs(prev, spu); /* Step 29. */
+ setup_mfc_sr1(prev, spu); /* Step 30. */
+ save_spu_npc(prev, spu); /* Step 31. */
+ save_spu_privcntl(prev, spu); /* Step 32. */
+ reset_spu_privcntl(prev, spu); /* Step 33. */
+ save_spu_lslr(prev, spu); /* Step 34. */
+ reset_spu_lslr(prev, spu); /* Step 35. */
+ save_spu_cfg(prev, spu); /* Step 36. */
+ save_pm_trace(prev, spu); /* Step 37. */
+ save_mfc_rag(prev, spu); /* Step 38. */
+ save_ppu_mb_stat(prev, spu); /* Step 39. */
+ save_ppu_mb(prev, spu); /* Step 40. */
+ save_ppuint_mb(prev, spu); /* Step 41. */
+ save_ch_part1(prev, spu); /* Step 42. */
+ save_spu_mb(prev, spu); /* Step 43. */
+ save_mfc_cmd(prev, spu); /* Step 44. */
+ reset_ch(prev, spu); /* Step 45. */
+}
+
+static void save_lscsa(struct spu_state *prev, struct spu *spu)
+{
+ /*
+ * Perform steps 46-57 of SPU context save sequence,
+ * which save regions of the local store and register
+ * file.
+ */
+
+ resume_mfc_queue(prev, spu); /* Step 46. */
+ setup_mfc_slbs(prev, spu); /* Step 47. */
+ set_switch_active(prev, spu); /* Step 48. */
+ enable_interrupts(prev, spu); /* Step 49. */
+ save_ls_16kb(prev, spu); /* Step 50. */
+ set_spu_npc(prev, spu); /* Step 51. */
+ set_signot1(prev, spu); /* Step 52. */
+ set_signot2(prev, spu); /* Step 53. */
+ send_save_code(prev, spu); /* Step 54. */
+ set_ppu_querymask(prev, spu); /* Step 55. */
+ wait_tag_complete(prev, spu); /* Step 56. */
+ wait_spu_stopped(prev, spu); /* Step 57. */
+}
+
+static void harvest(struct spu_state *prev, struct spu *spu)
+{
+ /*
+ * Perform steps 2-25 of SPU context restore sequence,
+ * which resets an SPU either after a failed save, or
+ * when using SPU for first time.
+ */
+
+ disable_interrupts(prev, spu); /* Step 2. */
+ inhibit_user_access(prev, spu); /* Step 3. */
+ terminate_spu_app(prev, spu); /* Step 4. */
+ set_switch_pending(prev, spu); /* Step 5. */
+ remove_other_spu_access(prev, spu); /* Step 6. */
+ suspend_mfc(prev, spu); /* Step 7. */
+ wait_suspend_mfc_complete(prev, spu); /* Step 8. */
+ if (!suspend_spe(prev, spu)) /* Step 9. */
+ clear_spu_status(prev, spu); /* Step 10. */
+ do_mfc_mssync(prev, spu); /* Step 11. */
+ issue_mfc_tlbie(prev, spu); /* Step 12. */
+ handle_pending_interrupts(prev, spu); /* Step 13. */
+ purge_mfc_queue(prev, spu); /* Step 14. */
+ wait_purge_complete(prev, spu); /* Step 15. */
+ reset_spu_privcntl(prev, spu); /* Step 16. */
+ reset_spu_lslr(prev, spu); /* Step 17. */
+ setup_mfc_sr1(prev, spu); /* Step 18. */
+ invalidate_slbs(prev, spu); /* Step 19. */
+ reset_ch_part1(prev, spu); /* Step 20. */
+ reset_ch_part2(prev, spu); /* Step 21. */
+ enable_interrupts(prev, spu); /* Step 22. */
+ set_switch_active(prev, spu); /* Step 23. */
+ set_mfc_tclass_id(prev, spu); /* Step 24. */
+ resume_mfc_queue(prev, spu); /* Step 25. */
+}
+
+static void restore_lscsa(struct spu_state *next, struct spu *spu)
+{
+ /*
+ * Perform steps 26-40 of SPU context restore sequence,
+ * which restores regions of the local store and register
+ * file.
+ */
+
+ set_watchdog_timer(next, spu); /* Step 26. */
+ setup_spu_status_part1(next, spu); /* Step 27. */
+ setup_spu_status_part2(next, spu); /* Step 28. */
+ restore_mfc_rag(next, spu); /* Step 29. */
+ setup_mfc_slbs(next, spu); /* Step 30. */
+ set_spu_npc(next, spu); /* Step 31. */
+ set_signot1(next, spu); /* Step 32. */
+ set_signot2(next, spu); /* Step 33. */
+ setup_decr(next, spu); /* Step 34. */
+ setup_ppu_mb(next, spu); /* Step 35. */
+ setup_ppuint_mb(next, spu); /* Step 36. */
+ send_restore_code(next, spu); /* Step 37. */
+ set_ppu_querymask(next, spu); /* Step 38. */
+ wait_tag_complete(next, spu); /* Step 39. */
+ wait_spu_stopped(next, spu); /* Step 40. */
+}
+
+static void restore_csa(struct spu_state *next, struct spu *spu)
+{
+ /*
+ * Combine steps 41-76 of SPU context restore sequence, which
+ * restore regions of the privileged & problem state areas.
+ */
+
+ restore_spu_privcntl(next, spu); /* Step 41. */
+ restore_status_part1(next, spu); /* Step 42. */
+ restore_status_part2(next, spu); /* Step 43. */
+ restore_ls_16kb(next, spu); /* Step 44. */
+ wait_tag_complete(next, spu); /* Step 45. */
+ suspend_mfc(next, spu); /* Step 46. */
+ wait_suspend_mfc_complete(next, spu); /* Step 47. */
+ issue_mfc_tlbie(next, spu); /* Step 48. */
+ clear_interrupts(next, spu); /* Step 49. */
+ restore_mfc_queues(next, spu); /* Step 50. */
+ restore_ppu_querymask(next, spu); /* Step 51. */
+ restore_ppu_querytype(next, spu); /* Step 52. */
+ restore_mfc_csr_tsq(next, spu); /* Step 53. */
+ restore_mfc_csr_cmd(next, spu); /* Step 54. */
+ restore_mfc_csr_ato(next, spu); /* Step 55. */
+ restore_mfc_tclass_id(next, spu); /* Step 56. */
+ set_llr_event(next, spu); /* Step 57. */
+ restore_decr_wrapped(next, spu); /* Step 58. */
+ restore_ch_part1(next, spu); /* Step 59. */
+ restore_ch_part2(next, spu); /* Step 60. */
+ restore_spu_lslr(next, spu); /* Step 61. */
+ restore_spu_cfg(next, spu); /* Step 62. */
+ restore_pm_trace(next, spu); /* Step 63. */
+ restore_spu_npc(next, spu); /* Step 64. */
+ restore_spu_mb(next, spu); /* Step 65. */
+ check_ppu_mb_stat(next, spu); /* Step 66. */
+ check_ppuint_mb_stat(next, spu); /* Step 67. */
+ restore_mfc_slbs(next, spu); /* Step 68. */
+ restore_mfc_sr1(next, spu); /* Step 69. */
+ restore_other_spu_access(next, spu); /* Step 70. */
+ restore_spu_runcntl(next, spu); /* Step 71. */
+ restore_mfc_cntl(next, spu); /* Step 72. */
+ enable_user_access(next, spu); /* Step 73. */
+ reset_switch_active(next, spu); /* Step 74. */
+ reenable_interrupts(next, spu); /* Step 75. */
+}
+
+static int __do_spu_save(struct spu_state *prev, struct spu *spu)
+{
+ int rc;
+
+ /*
+ * SPU context save can be broken into three phases:
+ *
+ * (a) quiesce [steps 2-16].
+ * (b) save of CSA, performed by PPE [steps 17-42]
+ * (c) save of LSCSA, mostly performed by SPU [steps 43-52].
+ *
+ * Returns 0 on success.
+ * 2,6 if failed to quiece SPU
+ * 53 if SPU-side of save failed.
+ */
+
+ rc = quiece_spu(prev, spu); /* Steps 2-16. */
+ switch (rc) {
+ default:
+ case 2:
+ case 6:
+ harvest(prev, spu);
+ return rc;
+ break;
+ case 0:
+ break;
+ }
+ save_csa(prev, spu); /* Steps 17-43. */
+ save_lscsa(prev, spu); /* Steps 44-53. */
+ return check_save_status(prev, spu); /* Step 54. */
+}
+
+static int __do_spu_restore(struct spu_state *next, struct spu *spu)
+{
+ int rc;
+
+ /*
+ * SPU context restore can be broken into three phases:
+ *
+ * (a) harvest (or reset) SPU [steps 2-24].
+ * (b) restore LSCSA [steps 25-40], mostly performed by SPU.
+ * (c) restore CSA [steps 41-76], performed by PPE.
+ *
+ * The 'harvest' step is not performed here, but rather
+ * as needed below.
+ */
+
+ restore_lscsa(next, spu); /* Steps 24-39. */
+ rc = check_restore_status(next, spu); /* Step 40. */
+ switch (rc) {
+ default:
+ /* Failed. Return now. */
+ return rc;
+ break;
+ case 0:
+ /* Fall through to next step. */
+ break;
+ }
+ restore_csa(next, spu);
+
+ return 0;
+}
+
+/**
+ * spu_save - SPU context save, with locking.
+ * @prev: pointer to SPU context save area, to be saved.
+ * @spu: pointer to SPU iomem structure.
+ *
+ * Acquire locks, perform the save operation then return.
+ */
+int spu_save(struct spu_state *prev, struct spu *spu)
+{
+ int rc;
+
+ acquire_spu_lock(spu); /* Step 1. */
+ rc = __do_spu_save(prev, spu); /* Steps 2-53. */
+ release_spu_lock(spu);
+ if (rc) {
+ panic("%s failed on SPU[%d], rc=%d.\n",
+ __func__, spu->number, rc);
+ }
+ return rc;
+}
+
+/**
+ * spu_restore - SPU context restore, with harvest and locking.
+ * @new: pointer to SPU context save area, to be restored.
+ * @spu: pointer to SPU iomem structure.
+ *
+ * Perform harvest + restore, as we may not be coming
+ * from a previous succesful save operation, and the
+ * hardware state is unknown.
+ */
+int spu_restore(struct spu_state *new, struct spu *spu)
+{
+ int rc;
+
+ acquire_spu_lock(spu);
+ harvest(NULL, spu);
+ spu->stop_code = 0;
+ spu->dar = 0;
+ spu->dsisr = 0;
+ spu->slb_replace = 0;
+ spu->class_0_pending = 0;
+ rc = __do_spu_restore(new, spu);
+ release_spu_lock(spu);
+ if (rc) {
+ panic("%s failed on SPU[%d] rc=%d.\n",
+ __func__, spu->number, rc);
+ }
+ return rc;
+}
+
+/**
+ * spu_harvest - SPU harvest (reset) operation
+ * @spu: pointer to SPU iomem structure.
+ *
+ * Perform SPU harvest (reset) operation.
+ */
+void spu_harvest(struct spu *spu)
+{
+ acquire_spu_lock(spu);
+ harvest(NULL, spu);
+ release_spu_lock(spu);
+}
+
+static void init_prob(struct spu_state *csa)
+{
+ csa->spu_chnlcnt_RW[9] = 1;
+ csa->spu_chnlcnt_RW[21] = 16;
+ csa->spu_chnlcnt_RW[23] = 1;
+ csa->spu_chnlcnt_RW[28] = 1;
+ csa->spu_chnlcnt_RW[30] = 1;
+ csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
+}
+
+static void init_priv1(struct spu_state *csa)
+{
+ /* Enable decode, relocate, tlbie response, master runcntl. */
+ csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
+ MFC_STATE1_MASTER_RUN_CONTROL_MASK |
+ MFC_STATE1_PROBLEM_STATE_MASK |
+ MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
+
+ /* Set storage description. */
+ csa->priv1.mfc_sdr_RW = mfspr(SPRN_SDR1);
+
+ /* Enable OS-specific set of interrupts. */
+ csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
+ CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
+ CLASS0_ENABLE_SPU_ERROR_INTR;
+ csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
+ CLASS1_ENABLE_STORAGE_FAULT_INTR;
+ csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
+ CLASS2_ENABLE_SPU_HALT_INTR;
+}
+
+static void init_priv2(struct spu_state *csa)
+{
+ csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
+ csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
+ MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
+ MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
+}
+
+/**
+ * spu_alloc_csa - allocate and initialize an SPU context save area.
+ *
+ * Allocate and initialize the contents of an SPU context save area.
+ * This includes enabling address translation, interrupt masks, etc.,
+ * as appropriate for the given OS environment.
+ *
+ * Note that storage for the 'lscsa' is allocated separately,
+ * as it is by far the largest of the context save regions,
+ * and may need to be pinned or otherwise specially aligned.
+ */
+void spu_init_csa(struct spu_state *csa)
+{
+ struct spu_lscsa *lscsa;
+ unsigned char *p;
+
+ if (!csa)
+ return;
+ memset(csa, 0, sizeof(struct spu_state));
+
+ lscsa = vmalloc(sizeof(struct spu_lscsa));
+ if (!lscsa)
+ return;
+
+ memset(lscsa, 0, sizeof(struct spu_lscsa));
+ csa->lscsa = lscsa;
+ csa->register_lock = SPIN_LOCK_UNLOCKED;
+
+ /* Set LS pages reserved to allow for user-space mapping. */
+ for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
+ SetPageReserved(vmalloc_to_page(p));
+
+ init_prob(csa);
+ init_priv1(csa);
+ init_priv2(csa);
+}
+
+void spu_fini_csa(struct spu_state *csa)
+{
+ /* Clear reserved bit before vfree. */
+ unsigned char *p;
+ for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
+ ClearPageReserved(vmalloc_to_page(p));
+
+ vfree(csa->lscsa);
+}
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
new file mode 100644
index 00000000000..e6565a949dd
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -0,0 +1,103 @@
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+
+#include <asm/uaccess.h>
+
+#include "spufs.h"
+
+/**
+ * sys_spu_run - run code loaded into an SPU
+ *
+ * @unpc: next program counter for the SPU
+ * @ustatus: status of the SPU
+ *
+ * This system call transfers the control of execution of a
+ * user space thread to an SPU. It will return when the
+ * SPU has finished executing or when it hits an error
+ * condition and it will be interrupted if a signal needs
+ * to be delivered to a handler in user space.
+ *
+ * The next program counter is set to the passed value
+ * before the SPU starts fetching code and the user space
+ * pointer gets updated with the new value when returning
+ * from kernel space.
+ *
+ * The status value returned from spu_run reflects the
+ * value of the spu_status register after the SPU has stopped.
+ *
+ */
+static long do_spu_run(struct file *filp,
+ __u32 __user *unpc,
+ __u32 __user *ustatus)
+{
+ long ret;
+ struct spufs_inode_info *i;
+ u32 npc, status;
+
+ ret = -EFAULT;
+ if (get_user(npc, unpc) || get_user(status, ustatus))
+ goto out;
+
+ /* check if this file was created by spu_create */
+ ret = -EINVAL;
+ if (filp->f_op != &spufs_context_fops)
+ goto out;
+
+ i = SPUFS_I(filp->f_dentry->d_inode);
+ ret = spufs_run_spu(filp, i->i_ctx, &npc, &status);
+
+ if (put_user(npc, unpc) || put_user(status, ustatus))
+ ret = -EFAULT;
+out:
+ return ret;
+}
+
+#ifndef MODULE
+asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
+{
+ int fput_needed;
+ struct file *filp;
+ long ret;
+
+ ret = -EBADF;
+ filp = fget_light(fd, &fput_needed);
+ if (filp) {
+ ret = do_spu_run(filp, unpc, ustatus);
+ fput_light(filp, fput_needed);
+ }
+
+ return ret;
+}
+#endif
+
+asmlinkage long sys_spu_create(const char __user *pathname,
+ unsigned int flags, mode_t mode)
+{
+ char *tmp;
+ int ret;
+
+ tmp = getname(pathname);
+ ret = PTR_ERR(tmp);
+ if (!IS_ERR(tmp)) {
+ struct nameidata nd;
+
+ ret = path_lookup(tmp, LOOKUP_PARENT|
+ LOOKUP_OPEN|LOOKUP_CREATE, &nd);
+ if (!ret) {
+ ret = spufs_create_thread(&nd, flags, mode);
+ path_release(&nd);
+ }
+ putname(tmp);
+ }
+
+ return ret;
+}
+
+struct spufs_calls spufs_calls = {
+ .create_thread = sys_spu_create,
+ .spu_run = do_spu_run,
+ .owner = THIS_MODULE,
+};
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 82c429d487f..00c52f27ef4 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -135,12 +135,13 @@ int __init
hydra_init(void)
{
struct device_node *np;
+ struct resource r;
np = find_devices("mac-io");
- if (np == NULL || np->n_addrs == 0)
+ if (np == NULL || of_address_to_resource(np, 0, &r))
return 0;
- Hydra = ioremap(np->addrs[0].address, np->addrs[0].size);
- printk("Hydra Mac I/O at %lx\n", np->addrs[0].address);
+ Hydra = ioremap(r.start, r.end-r.start);
+ printk("Hydra Mac I/O at %lx\n", r.start);
printk("Hydra Feature_Control was %x",
in_le32(&Hydra->Feature_Control));
out_le32(&Hydra->Feature_Control, (HYDRA_FC_SCC_CELL_EN |
@@ -177,18 +178,24 @@ setup_python(struct pci_controller *hose, struct device_node *dev)
{
u32 __iomem *reg;
u32 val;
- unsigned long addr = dev->addrs[0].address;
+ struct resource r;
- setup_indirect_pci(hose, addr + 0xf8000, addr + 0xf8010);
+ if (of_address_to_resource(dev, 0, &r)) {
+ printk(KERN_ERR "No address for Python PCI controller\n");
+ return;
+ }
/* Clear the magic go-slow bit */
- reg = ioremap(dev->addrs[0].address + 0xf6000, 0x40);
+ reg = ioremap(r.start + 0xf6000, 0x40);
+ BUG_ON(!reg);
val = in_be32(&reg[12]);
if (val & PRG_CL_RESET_VALID) {
out_be32(&reg[12], val & ~PRG_CL_RESET_VALID);
in_be32(&reg[12]);
}
iounmap(reg);
+
+ setup_indirect_pci(hose, r.start + 0xf8000, r.start + 0xf8010);
}
/* Marvell Discovery II based Pegasos 2 */
@@ -218,7 +225,7 @@ chrp_find_bridges(void)
char *model, *machine;
int is_longtrail = 0, is_mot = 0, is_pegasos = 0;
struct device_node *root = find_path_device("/");
-
+ struct resource r;
/*
* The PCI host bridge nodes on some machines don't have
* properties to adequately identify them, so we have to
@@ -238,7 +245,7 @@ chrp_find_bridges(void)
continue;
++index;
/* The GG2 bridge on the LongTrail doesn't have an address */
- if (dev->n_addrs < 1 && !is_longtrail) {
+ if (of_address_to_resource(dev, 0, &r) && !is_longtrail) {
printk(KERN_WARNING "Can't use %s: no address\n",
dev->full_name);
continue;
@@ -255,8 +262,8 @@ chrp_find_bridges(void)
printk(KERN_INFO "PCI buses %d..%d",
bus_range[0], bus_range[1]);
printk(" controlled by %s", dev->type);
- if (dev->n_addrs > 0)
- printk(" at %lx", dev->addrs[0].address);
+ if (!is_longtrail)
+ printk(" at %lx", r.start);
printk("\n");
hose = pcibios_alloc_controller();
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index dda5f2c72c2..2dc87aa5962 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -49,7 +49,6 @@
#include <asm/hydra.h>
#include <asm/sections.h>
#include <asm/time.h>
-#include <asm/btext.h>
#include <asm/i8259.h>
#include <asm/mpic.h>
#include <asm/rtas.h>
@@ -58,7 +57,6 @@
#include "chrp.h"
void rtas_indicator_progress(char *, unsigned short);
-void btext_progress(char *, unsigned short);
int _chrp_type;
EXPORT_SYMBOL(_chrp_type);
@@ -264,11 +262,6 @@ void __init chrp_setup_arch(void)
ppc_md.set_rtc_time = rtas_set_rtc_time;
}
-#ifdef CONFIG_BOOTX_TEXT
- if (ppc_md.progress == NULL && boot_text_mapped)
- ppc_md.progress = btext_progress;
-#endif
-
#ifdef CONFIG_BLK_DEV_INITRD
/* this is fine for chrp */
initrd_below_start_ok = 1;
@@ -359,9 +352,10 @@ static void __init chrp_find_openpic(void)
opaddr = opprop[na-1]; /* assume 32-bit */
oplen /= na * sizeof(unsigned int);
} else {
- if (np->n_addrs == 0)
+ struct resource r;
+ if (of_address_to_resource(np, 0, &r))
return;
- opaddr = np->addrs[0].address;
+ opaddr = r.start;
oplen = 0;
}
@@ -384,7 +378,7 @@ static void __init chrp_find_openpic(void)
*/
if (oplen < len) {
printk(KERN_ERR "Insufficient addresses for distributed"
- " OpenPIC (%d < %d)\n", np->n_addrs, len);
+ " OpenPIC (%d < %d)\n", oplen, len);
len = oplen;
}
@@ -522,12 +516,3 @@ void __init chrp_init(void)
smp_ops = &chrp_smp_ops;
#endif /* CONFIG_SMP */
}
-
-#ifdef CONFIG_BOOTX_TEXT
-void
-btext_progress(char *s, unsigned short hex)
-{
- btext_drawstring(s);
- btext_drawstring("\n");
-}
-#endif /* CONFIG_BOOTX_TEXT */
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index 737ee5d9f0a..36a0f97bb7b 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -21,6 +21,7 @@
#include <linux/mc146818rtc.h>
#include <linux/init.h>
#include <linux/bcd.h>
+#include <linux/ioport.h>
#include <asm/io.h>
#include <asm/nvram.h>
@@ -37,14 +38,16 @@ static int nvram_data = NVRAM_DATA;
long __init chrp_time_init(void)
{
struct device_node *rtcs;
+ struct resource r;
int base;
rtcs = find_compatible_devices("rtc", "pnpPNP,b00");
if (rtcs == NULL)
rtcs = find_compatible_devices("rtc", "ds1385-rtc");
- if (rtcs == NULL || rtcs->addrs == NULL)
+ if (rtcs == NULL || of_address_to_resource(rtcs, 0, &r))
return 0;
- base = rtcs->addrs[0].address;
+
+ base = r.start;
nvram_as1 = 0;
nvram_as0 = base;
nvram_data = base + 1;
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 81250090f98..4fdbc9ae876 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -144,16 +144,6 @@ config LITE5200
much but it's only been tested on this board version. I think this
board is also known as IceCube.
-config MPC834x_SYS
- bool "Freescale MPC834x SYS"
- help
- This option enables support for the MPC 834x SYS evaluation board.
-
- Be aware that PCI buses can only function when SYS board is plugged
- into the PIB (Platform IO Board) board from Freescale which provide
- 3 PCI slots. The PIBs PCI initialization is the bootloader's
- responsiblilty.
-
config EV64360
bool "Marvell-EV64360BP"
help
@@ -192,14 +182,6 @@ config 8272
The MPC8272 CPM has a different internal dpram setup than other CPM2
devices
-config 83xx
- bool
- default y if MPC834x_SYS
-
-config MPC834x
- bool
- default y if MPC834x_SYS
-
config CPM2
bool
depends on 8260 || MPC8560 || MPC8555
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
index 127b465308b..ce8c0b943fa 100644
--- a/arch/powerpc/platforms/iseries/Makefile
+++ b/arch/powerpc/platforms/iseries/Makefile
@@ -1,8 +1,8 @@
EXTRA_CFLAGS += -mno-minimal-toc
obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o mf.o lpevents.o \
- hvcall.o proc.o htab.o iommu.o misc.o
-obj-$(CONFIG_PCI) += pci.o irq.o vpdinfo.o
+ hvcall.o proc.o htab.o iommu.o misc.o irq.o
+obj-$(CONFIG_PCI) += pci.o vpdinfo.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_VIOPATH) += viopath.o
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index 2b54eeb2c89..bea0b703f40 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -34,6 +34,8 @@
#include <asm/pci-bridge.h>
#include <asm/iseries/hv_call_xm.h>
+#include "iommu.h"
+
extern struct list_head iSeries_Global_Device_List;
diff --git a/arch/powerpc/platforms/iseries/iommu.h b/arch/powerpc/platforms/iseries/iommu.h
new file mode 100644
index 00000000000..cb5658fbe65
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/iommu.h
@@ -0,0 +1,35 @@
+#ifndef _PLATFORMS_ISERIES_IOMMU_H
+#define _PLATFORMS_ISERIES_IOMMU_H
+
+/*
+ * Copyright (C) 2005 Stephen Rothwell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the:
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330,
+ * Boston, MA 02111-1307 USA
+ */
+
+struct device_node;
+struct iommu_table;
+
+/* Creates table for an individual device node */
+extern void iommu_devnode_init_iSeries(struct device_node *dn);
+
+/* Get table parameters from HV */
+extern void iommu_table_getparms_iSeries(unsigned long busno,
+ unsigned char slotno, unsigned char virtbus,
+ struct iommu_table *tbl);
+
+#endif /* _PLATFORMS_ISERIES_IOMMU_H */
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index a58daa15368..be3fbfc24e6 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -35,161 +35,131 @@
#include <linux/irq.h>
#include <linux/spinlock.h>
+#include <asm/paca.h>
#include <asm/iseries/hv_types.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/iseries/hv_call_xm.h>
+#include <asm/iseries/it_lp_queue.h>
#include "irq.h"
#include "call_pci.h"
-static long Pci_Interrupt_Count;
-static long Pci_Event_Count;
-
-enum XmPciLpEvent_Subtype {
- XmPciLpEvent_BusCreated = 0, // PHB has been created
- XmPciLpEvent_BusError = 1, // PHB has failed
- XmPciLpEvent_BusFailed = 2, // Msg to Secondary, Primary failed bus
- XmPciLpEvent_NodeFailed = 4, // Multi-adapter bridge has failed
- XmPciLpEvent_NodeRecovered = 5, // Multi-adapter bridge has recovered
- XmPciLpEvent_BusRecovered = 12, // PHB has been recovered
- XmPciLpEvent_UnQuiesceBus = 18, // Secondary bus unqiescing
- XmPciLpEvent_BridgeError = 21, // Bridge Error
- XmPciLpEvent_SlotInterrupt = 22 // Slot interrupt
-};
+#if defined(CONFIG_SMP)
+extern void iSeries_smp_message_recv(struct pt_regs *);
+#endif
-struct XmPciLpEvent_BusInterrupt {
- HvBusNumber busNumber;
- HvSubBusNumber subBusNumber;
-};
+#ifdef CONFIG_PCI
-struct XmPciLpEvent_NodeInterrupt {
- HvBusNumber busNumber;
- HvSubBusNumber subBusNumber;
- HvAgentId deviceId;
+enum pci_event_type {
+ pe_bus_created = 0, /* PHB has been created */
+ pe_bus_error = 1, /* PHB has failed */
+ pe_bus_failed = 2, /* Msg to Secondary, Primary failed bus */
+ pe_node_failed = 4, /* Multi-adapter bridge has failed */
+ pe_node_recovered = 5, /* Multi-adapter bridge has recovered */
+ pe_bus_recovered = 12, /* PHB has been recovered */
+ pe_unquiese_bus = 18, /* Secondary bus unqiescing */
+ pe_bridge_error = 21, /* Bridge Error */
+ pe_slot_interrupt = 22 /* Slot interrupt */
};
-struct XmPciLpEvent {
- struct HvLpEvent hvLpEvent;
-
+struct pci_event {
+ struct HvLpEvent event;
union {
- u64 alignData; // Align on an 8-byte boundary
-
+ u64 __align; /* Align on an 8-byte boundary */
struct {
u32 fisr;
- HvBusNumber busNumber;
- HvSubBusNumber subBusNumber;
- HvAgentId deviceId;
- } slotInterrupt;
-
- struct XmPciLpEvent_BusInterrupt busFailed;
- struct XmPciLpEvent_BusInterrupt busRecovered;
- struct XmPciLpEvent_BusInterrupt busCreated;
-
- struct XmPciLpEvent_NodeInterrupt nodeFailed;
- struct XmPciLpEvent_NodeInterrupt nodeRecovered;
-
- } eventData;
-
+ HvBusNumber bus_number;
+ HvSubBusNumber sub_bus_number;
+ HvAgentId dev_id;
+ } slot;
+ struct {
+ HvBusNumber bus_number;
+ HvSubBusNumber sub_bus_number;
+ } bus;
+ struct {
+ HvBusNumber bus_number;
+ HvSubBusNumber sub_bus_number;
+ HvAgentId dev_id;
+ } node;
+ } data;
};
-static void intReceived(struct XmPciLpEvent *eventParm,
- struct pt_regs *regsParm)
+static DEFINE_SPINLOCK(pending_irqs_lock);
+static int num_pending_irqs;
+static int pending_irqs[NR_IRQS];
+
+static void int_received(struct pci_event *event, struct pt_regs *regs)
{
int irq;
-#ifdef CONFIG_IRQSTACKS
- struct thread_info *curtp, *irqtp;
-#endif
- ++Pci_Interrupt_Count;
-
- switch (eventParm->hvLpEvent.xSubtype) {
- case XmPciLpEvent_SlotInterrupt:
- irq = eventParm->hvLpEvent.xCorrelationToken;
- /* Dispatch the interrupt handlers for this irq */
-#ifdef CONFIG_IRQSTACKS
- /* Switch to the irq stack to handle this */
- curtp = current_thread_info();
- irqtp = hardirq_ctx[smp_processor_id()];
- if (curtp != irqtp) {
- irqtp->task = curtp->task;
- irqtp->flags = 0;
- call___do_IRQ(irq, regsParm, irqtp);
- irqtp->task = NULL;
- if (irqtp->flags)
- set_bits(irqtp->flags, &curtp->flags);
- } else
-#endif
- __do_IRQ(irq, regsParm);
- HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber,
- eventParm->eventData.slotInterrupt.subBusNumber,
- eventParm->eventData.slotInterrupt.deviceId);
+ switch (event->event.xSubtype) {
+ case pe_slot_interrupt:
+ irq = event->event.xCorrelationToken;
+ if (irq < NR_IRQS) {
+ spin_lock(&pending_irqs_lock);
+ pending_irqs[irq]++;
+ num_pending_irqs++;
+ spin_unlock(&pending_irqs_lock);
+ } else {
+ printk(KERN_WARNING "int_received: bad irq number %d\n",
+ irq);
+ HvCallPci_eoi(event->data.slot.bus_number,
+ event->data.slot.sub_bus_number,
+ event->data.slot.dev_id);
+ }
break;
/* Ignore error recovery events for now */
- case XmPciLpEvent_BusCreated:
- printk(KERN_INFO "intReceived: system bus %d created\n",
- eventParm->eventData.busCreated.busNumber);
+ case pe_bus_created:
+ printk(KERN_INFO "int_received: system bus %d created\n",
+ event->data.bus.bus_number);
break;
- case XmPciLpEvent_BusError:
- case XmPciLpEvent_BusFailed:
- printk(KERN_INFO "intReceived: system bus %d failed\n",
- eventParm->eventData.busFailed.busNumber);
+ case pe_bus_error:
+ case pe_bus_failed:
+ printk(KERN_INFO "int_received: system bus %d failed\n",
+ event->data.bus.bus_number);
break;
- case XmPciLpEvent_BusRecovered:
- case XmPciLpEvent_UnQuiesceBus:
- printk(KERN_INFO "intReceived: system bus %d recovered\n",
- eventParm->eventData.busRecovered.busNumber);
+ case pe_bus_recovered:
+ case pe_unquiese_bus:
+ printk(KERN_INFO "int_received: system bus %d recovered\n",
+ event->data.bus.bus_number);
break;
- case XmPciLpEvent_NodeFailed:
- case XmPciLpEvent_BridgeError:
+ case pe_node_failed:
+ case pe_bridge_error:
printk(KERN_INFO
- "intReceived: multi-adapter bridge %d/%d/%d failed\n",
- eventParm->eventData.nodeFailed.busNumber,
- eventParm->eventData.nodeFailed.subBusNumber,
- eventParm->eventData.nodeFailed.deviceId);
+ "int_received: multi-adapter bridge %d/%d/%d failed\n",
+ event->data.node.bus_number,
+ event->data.node.sub_bus_number,
+ event->data.node.dev_id);
break;
- case XmPciLpEvent_NodeRecovered:
+ case pe_node_recovered:
printk(KERN_INFO
- "intReceived: multi-adapter bridge %d/%d/%d recovered\n",
- eventParm->eventData.nodeRecovered.busNumber,
- eventParm->eventData.nodeRecovered.subBusNumber,
- eventParm->eventData.nodeRecovered.deviceId);
+ "int_received: multi-adapter bridge %d/%d/%d recovered\n",
+ event->data.node.bus_number,
+ event->data.node.sub_bus_number,
+ event->data.node.dev_id);
break;
default:
printk(KERN_ERR
- "intReceived: unrecognized event subtype 0x%x\n",
- eventParm->hvLpEvent.xSubtype);
+ "int_received: unrecognized event subtype 0x%x\n",
+ event->event.xSubtype);
break;
}
}
-static void XmPciLpEvent_handler(struct HvLpEvent *eventParm,
- struct pt_regs *regsParm)
+static void pci_event_handler(struct HvLpEvent *event, struct pt_regs *regs)
{
-#ifdef CONFIG_PCI
- ++Pci_Event_Count;
-
- if (eventParm && (eventParm->xType == HvLpEvent_Type_PciIo)) {
- switch (eventParm->xFlags.xFunction) {
- case HvLpEvent_Function_Int:
- intReceived((struct XmPciLpEvent *)eventParm, regsParm);
- break;
- case HvLpEvent_Function_Ack:
+ if (event && (event->xType == HvLpEvent_Type_PciIo)) {
+ if (hvlpevent_is_int(event))
+ int_received((struct pci_event *)event, regs);
+ else
printk(KERN_ERR
- "XmPciLpEvent_handler: unexpected ack received\n");
- break;
- default:
- printk(KERN_ERR
- "XmPciLpEvent_handler: unexpected event function %d\n",
- (int)eventParm->xFlags.xFunction);
- break;
- }
- } else if (eventParm)
+ "pci_event_handler: unexpected ack received\n");
+ } else if (event)
printk(KERN_ERR
- "XmPciLpEvent_handler: Unrecognized PCI event type 0x%x\n",
- (int)eventParm->xType);
+ "pci_event_handler: Unrecognized PCI event type 0x%x\n",
+ (int)event->xType);
else
- printk(KERN_ERR "XmPciLpEvent_handler: NULL event received\n");
-#endif
+ printk(KERN_ERR "pci_event_handler: NULL event received\n");
}
/*
@@ -199,20 +169,21 @@ static void XmPciLpEvent_handler(struct HvLpEvent *eventParm,
void __init iSeries_init_IRQ(void)
{
/* Register PCI event handler and open an event path */
- int xRc;
-
- xRc = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
- &XmPciLpEvent_handler);
- if (xRc == 0) {
- xRc = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
- if (xRc != 0)
- printk(KERN_ERR "iSeries_init_IRQ: open event path "
- "failed with rc 0x%x\n", xRc);
+ int ret;
+
+ ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
+ &pci_event_handler);
+ if (ret == 0) {
+ ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
+ if (ret != 0)
+ printk(KERN_ERR "iseries_init_IRQ: open event path "
+ "failed with rc 0x%x\n", ret);
} else
- printk(KERN_ERR "iSeries_init_IRQ: register handler "
- "failed with rc 0x%x\n", xRc);
+ printk(KERN_ERR "iseries_init_IRQ: register handler "
+ "failed with rc 0x%x\n", ret);
}
+#define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff)
#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1)
#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1)
#define REAL_IRQ_TO_FUNC(irq) ((irq) & 7)
@@ -221,40 +192,40 @@ void __init iSeries_init_IRQ(void)
* This will be called by device drivers (via enable_IRQ)
* to enable INTA in the bridge interrupt status register.
*/
-static void iSeries_enable_IRQ(unsigned int irq)
+static void iseries_enable_IRQ(unsigned int irq)
{
- u32 bus, deviceId, function, mask;
- const u32 subBus = 0;
+ u32 bus, dev_id, function, mask;
+ const u32 sub_bus = 0;
unsigned int rirq = virt_irq_to_real_map[irq];
/* The IRQ has already been locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
function = REAL_IRQ_TO_FUNC(rirq);
- deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
+ dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
/* Unmask secondary INTA */
mask = 0x80000000;
- HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask);
+ HvCallPci_unmaskInterrupts(bus, sub_bus, dev_id, mask);
}
-/* This is called by iSeries_activate_IRQs */
-static unsigned int iSeries_startup_IRQ(unsigned int irq)
+/* This is called by iseries_activate_IRQs */
+static unsigned int iseries_startup_IRQ(unsigned int irq)
{
- u32 bus, deviceId, function, mask;
- const u32 subBus = 0;
+ u32 bus, dev_id, function, mask;
+ const u32 sub_bus = 0;
unsigned int rirq = virt_irq_to_real_map[irq];
bus = REAL_IRQ_TO_BUS(rirq);
function = REAL_IRQ_TO_FUNC(rirq);
- deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
+ dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
/* Link the IRQ number to the bridge */
- HvCallXm_connectBusUnit(bus, subBus, deviceId, irq);
+ HvCallXm_connectBusUnit(bus, sub_bus, dev_id, irq);
/* Unmask bridge interrupts in the FISR */
mask = 0x01010000 << function;
- HvCallPci_unmaskFisr(bus, subBus, deviceId, mask);
- iSeries_enable_IRQ(irq);
+ HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask);
+ iseries_enable_IRQ(irq);
return 0;
}
@@ -279,78 +250,117 @@ void __init iSeries_activate_IRQs()
}
/* this is not called anywhere currently */
-static void iSeries_shutdown_IRQ(unsigned int irq)
+static void iseries_shutdown_IRQ(unsigned int irq)
{
- u32 bus, deviceId, function, mask;
- const u32 subBus = 0;
+ u32 bus, dev_id, function, mask;
+ const u32 sub_bus = 0;
unsigned int rirq = virt_irq_to_real_map[irq];
/* irq should be locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
function = REAL_IRQ_TO_FUNC(rirq);
- deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
+ dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
/* Invalidate the IRQ number in the bridge */
- HvCallXm_connectBusUnit(bus, subBus, deviceId, 0);
+ HvCallXm_connectBusUnit(bus, sub_bus, dev_id, 0);
/* Mask bridge interrupts in the FISR */
mask = 0x01010000 << function;
- HvCallPci_maskFisr(bus, subBus, deviceId, mask);
+ HvCallPci_maskFisr(bus, sub_bus, dev_id, mask);
}
/*
* This will be called by device drivers (via disable_IRQ)
* to disable INTA in the bridge interrupt status register.
*/
-static void iSeries_disable_IRQ(unsigned int irq)
+static void iseries_disable_IRQ(unsigned int irq)
{
- u32 bus, deviceId, function, mask;
- const u32 subBus = 0;
+ u32 bus, dev_id, function, mask;
+ const u32 sub_bus = 0;
unsigned int rirq = virt_irq_to_real_map[irq];
/* The IRQ has already been locked by the caller */
bus = REAL_IRQ_TO_BUS(rirq);
function = REAL_IRQ_TO_FUNC(rirq);
- deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
+ dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
/* Mask secondary INTA */
mask = 0x80000000;
- HvCallPci_maskInterrupts(bus, subBus, deviceId, mask);
+ HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask);
}
-/*
- * This does nothing because there is not enough information
- * provided to do the EOI HvCall. This is done by XmPciLpEvent.c
- */
-static void iSeries_end_IRQ(unsigned int irq)
+static void iseries_end_IRQ(unsigned int irq)
{
+ unsigned int rirq = virt_irq_to_real_map[irq];
+
+ HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
+ (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
}
static hw_irq_controller iSeries_IRQ_handler = {
.typename = "iSeries irq controller",
- .startup = iSeries_startup_IRQ,
- .shutdown = iSeries_shutdown_IRQ,
- .enable = iSeries_enable_IRQ,
- .disable = iSeries_disable_IRQ,
- .end = iSeries_end_IRQ
+ .startup = iseries_startup_IRQ,
+ .shutdown = iseries_shutdown_IRQ,
+ .enable = iseries_enable_IRQ,
+ .disable = iseries_disable_IRQ,
+ .end = iseries_end_IRQ
};
/*
* This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot
* It calculates the irq value for the slot.
- * Note that subBusNumber is always 0 (at the moment at least).
+ * Note that sub_bus is always 0 (at the moment at least).
*/
-int __init iSeries_allocate_IRQ(HvBusNumber busNumber,
- HvSubBusNumber subBusNumber, HvAgentId deviceId)
+int __init iSeries_allocate_IRQ(HvBusNumber bus,
+ HvSubBusNumber sub_bus, HvAgentId dev_id)
{
int virtirq;
unsigned int realirq;
- u8 idsel = (deviceId >> 4);
- u8 function = deviceId & 7;
+ u8 idsel = (dev_id >> 4);
+ u8 function = dev_id & 7;
- realirq = ((busNumber - 1) << 6) + ((idsel - 1) << 3) + function;
+ realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3)
+ + function;
virtirq = virt_irq_create_mapping(realirq);
irq_desc[virtirq].handler = &iSeries_IRQ_handler;
return virtirq;
}
+
+#endif /* CONFIG_PCI */
+
+/*
+ * Get the next pending IRQ.
+ */
+int iSeries_get_irq(struct pt_regs *regs)
+{
+ /* -2 means ignore this interrupt */
+ int irq = -2;
+
+#ifdef CONFIG_SMP
+ if (get_lppaca()->int_dword.fields.ipi_cnt) {
+ get_lppaca()->int_dword.fields.ipi_cnt = 0;
+ iSeries_smp_message_recv(regs);
+ }
+#endif /* CONFIG_SMP */
+ if (hvlpevent_is_pending())
+ process_hvlpevents(regs);
+
+#ifdef CONFIG_PCI
+ if (num_pending_irqs) {
+ spin_lock(&pending_irqs_lock);
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ if (pending_irqs[irq]) {
+ pending_irqs[irq]--;
+ num_pending_irqs--;
+ break;
+ }
+ }
+ spin_unlock(&pending_irqs_lock);
+ if (irq >= NR_IRQS)
+ irq = -2;
+ }
+#endif
+
+ return irq;
+}
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h
index 5f643f16ecc..b9c801ba5a4 100644
--- a/arch/powerpc/platforms/iseries/irq.h
+++ b/arch/powerpc/platforms/iseries/irq.h
@@ -4,5 +4,6 @@
extern void iSeries_init_IRQ(void);
extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId);
extern void iSeries_activate_IRQs(void);
+extern int iSeries_get_irq(struct pt_regs *);
#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/lpardata.c b/arch/powerpc/platforms/iseries/lpardata.c
index bb8c91537f3..438e2dba63b 100644
--- a/arch/powerpc/platforms/iseries/lpardata.c
+++ b/arch/powerpc/platforms/iseries/lpardata.c
@@ -93,10 +93,7 @@ struct ItLpNaca itLpNaca = {
.xPirEnvironMode = 0, /* Piranha stuff */
.xPirConsoleMode = 0,
.xPirDasdMode = 0,
- .xLparInstalled = 0,
- .xSysPartitioned = 0,
- .xHwSyncedTBs = 0,
- .xIntProcUtilHmt = 0,
+ .flags = 0,
.xSpVpdFormat = 0,
.xIntProcRatio = 0,
.xPlicVrmIndex = 0, /* VRM index of PLIC */
@@ -225,3 +222,10 @@ struct ItVpdAreas itVpdAreas = {
0,0
}
};
+
+struct ItLpRegSave iseries_reg_save[] = {
+ [0 ... (NR_CPUS-1)] = {
+ .xDesc = 0xd397d9e2, /* "LpRS" */
+ .xSize = sizeof(struct ItLpRegSave),
+ },
+};
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c
index e9fb98bf895..0b885300d1d 100644
--- a/arch/powerpc/platforms/iseries/lpevents.c
+++ b/arch/powerpc/platforms/iseries/lpevents.c
@@ -53,7 +53,7 @@ static struct HvLpEvent * get_next_hvlpevent(void)
struct HvLpEvent * event;
event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
- if (event->xFlags.xValid) {
+ if (hvlpevent_is_valid(event)) {
/* rmb() needed only for weakly consistent machines (regatta) */
rmb();
/* Set pointer to next potential event */
@@ -84,7 +84,7 @@ int hvlpevent_is_pending(void)
next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
- return next_event->xFlags.xValid |
+ return hvlpevent_is_valid(next_event) ||
hvlpevent_queue.xPlicOverflowIntPending;
}
@@ -101,18 +101,18 @@ static void hvlpevent_clear_valid(struct HvLpEvent * event)
switch (extra) {
case 3:
tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign);
- tmp->xFlags.xValid = 0;
+ hvlpevent_invalidate(tmp);
case 2:
tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign);
- tmp->xFlags.xValid = 0;
+ hvlpevent_invalidate(tmp);
case 1:
tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign);
- tmp->xFlags.xValid = 0;
+ hvlpevent_invalidate(tmp);
}
mb();
- event->xFlags.xValid = 0;
+ hvlpevent_invalidate(event);
}
void process_hvlpevents(struct pt_regs *regs)
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 49e7e4b8584..a41d8b78c0c 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -251,10 +251,7 @@ static struct pending_event *new_pending_event(void)
}
memset(ev, 0, sizeof(struct pending_event));
hev = &ev->event.hp_lp_event;
- hev->xFlags.xValid = 1;
- hev->xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
- hev->xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
- hev->xFlags.xFunction = HvLpEvent_Function_Int;
+ hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | HV_LP_EVENT_INT;
hev->xType = HvLpEvent_Type_MachineFac;
hev->xSourceLp = HvLpConfig_getLpIndex();
hev->xTargetLp = primary_lp;
@@ -518,17 +515,10 @@ static void handle_ack(struct io_mf_lp_event *event)
static void hv_handler(struct HvLpEvent *event, struct pt_regs *regs)
{
if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
- switch(event->xFlags.xFunction) {
- case HvLpEvent_Function_Ack:
+ if (hvlpevent_is_ack(event))
handle_ack((struct io_mf_lp_event *)event);
- break;
- case HvLpEvent_Function_Int:
+ else
handle_int((struct io_mf_lp_event *)event);
- break;
- default:
- printk(KERN_ERR "mf.c: non ack/int event received\n");
- break;
- }
} else
printk(KERN_ERR "mf.c: alien event received\n");
}
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
index dfe7aa1ba09..7641fc7e550 100644
--- a/arch/powerpc/platforms/iseries/misc.S
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -44,7 +44,8 @@ _GLOBAL(local_irq_restore)
/* Check pending interrupts */
/* A decrementer, IPI or PMC interrupt may have occurred
* while we were in the hypervisor (which enables) */
- ld r4,PACALPPACA+LPPACAANYINT(r13)
+ ld r4,PACALPPACAPTR(r13)
+ ld r4,LPPACAANYINT(r4)
cmpdi r4,0
beqlr
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index dafc518fbb8..a19833b880e 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -43,6 +43,7 @@
#include "irq.h"
#include "pci.h"
#include "call_pci.h"
+#include "iommu.h"
extern unsigned long io_page_mask;
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index da26639190d..3f8790146b0 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -52,6 +52,7 @@
#include <asm/iseries/mf.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/iseries/lpar_map.h>
+#include <asm/udbg.h>
#include "naca.h"
#include "setup.h"
@@ -62,10 +63,8 @@
#include "call_sm.h"
#include "call_hpt.h"
-extern void hvlog(char *fmt, ...);
-
#ifdef DEBUG
-#define DBG(fmt...) hvlog(fmt)
+#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
@@ -474,14 +473,6 @@ static unsigned long __init build_iSeries_Memory_Map(void)
printk("HPT absolute addr = %016lx, size = %dK\n",
chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
- ppc64_pft_size = __ilog2(hptSizePages * HW_PAGE_SIZE);
-
- /*
- * The actual hashed page table is in the hypervisor,
- * we have no direct access
- */
- htab_address = NULL;
-
/*
* Determine if absolute memory has any
* holes so that we can interpret the
@@ -547,7 +538,7 @@ static unsigned long __init build_iSeries_Memory_Map(void)
*/
static void __init iSeries_setup_arch(void)
{
- if (get_paca()->lppaca.shared_proc) {
+ if (get_lppaca()->shared_proc) {
ppc_md.idle_loop = iseries_shared_idle;
printk(KERN_INFO "Using shared processor idle loop\n");
} else {
@@ -571,16 +562,6 @@ static void iSeries_show_cpuinfo(struct seq_file *m)
/*
* Document me.
- * and Implement me.
- */
-static int iSeries_get_irq(struct pt_regs *regs)
-{
- /* -2 means ignore this interrupt */
- return -2;
-}
-
-/*
- * Document me.
*/
static void iSeries_restart(char *cmd)
{
@@ -666,7 +647,7 @@ static void yield_shared_processor(void)
* The decrementer stops during the yield. Force a fake decrementer
* here and let the timer_interrupt code sort out the actual time.
*/
- get_paca()->lppaca.int_dword.fields.decr_int = 1;
+ get_lppaca()->int_dword.fields.decr_int = 1;
process_iSeries_events();
}
@@ -871,6 +852,11 @@ void dt_prop_u64_list(struct iseries_flat_dt *dt, char *name, u64 *data, int n)
dt_prop(dt, name, (char *)data, sizeof(u64) * n);
}
+void dt_prop_u32_list(struct iseries_flat_dt *dt, char *name, u32 *data, int n)
+{
+ dt_prop(dt, name, (char *)data, sizeof(u32) * n);
+}
+
void dt_prop_empty(struct iseries_flat_dt *dt, char *name)
{
dt_prop(dt, name, NULL, 0);
@@ -882,6 +868,7 @@ void dt_cpus(struct iseries_flat_dt *dt)
unsigned char *p;
unsigned int i, index;
struct IoHriProcessorVpd *d;
+ u32 pft_size[2];
/* yuck */
snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
@@ -892,8 +879,11 @@ void dt_cpus(struct iseries_flat_dt *dt)
dt_prop_u32(dt, "#address-cells", 1);
dt_prop_u32(dt, "#size-cells", 0);
+ pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
+ pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
+
for (i = 0; i < NR_CPUS; i++) {
- if (paca[i].lppaca.dyn_proc_status >= 2)
+ if (lppaca[i].dyn_proc_status >= 2)
continue;
snprintf(p, 32 - (p - buf), "@%d", i);
@@ -901,7 +891,7 @@ void dt_cpus(struct iseries_flat_dt *dt)
dt_prop_str(dt, "device_type", "cpu");
- index = paca[i].lppaca.dyn_hv_phys_proc_index;
+ index = lppaca[i].dyn_hv_phys_proc_index;
d = &xIoHriProcessorVpd[index];
dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
@@ -918,6 +908,8 @@ void dt_cpus(struct iseries_flat_dt *dt)
dt_prop_u32(dt, "reg", i);
+ dt_prop_u32_list(dt, "ibm,pft-size", pft_size, 2);
+
dt_end_node(dt);
}
@@ -994,3 +986,16 @@ static int __init early_parsemem(char *p)
return 0;
}
early_param("mem", early_parsemem);
+
+static void hvputc(char c)
+{
+ if (c == '\n')
+ hvputc('\r');
+
+ HvCall_writeLogBuffer(&c, 1);
+}
+
+void __init udbg_init_iseries(void)
+{
+ udbg_putc = hvputc;
+}
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index fcb094ec6ae..6f9d407a709 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -91,7 +91,7 @@ static void smp_iSeries_kick_cpu(int nr)
BUG_ON((nr < 0) || (nr >= NR_CPUS));
/* Verify that our partition has a processor nr */
- if (paca[nr].lppaca.dyn_proc_status >= 2)
+ if (lppaca[nr].dyn_proc_status >= 2)
return;
/* The processor is currently spinning, waiting
diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c
index 384360ee06e..ad36ab0639f 100644
--- a/arch/powerpc/platforms/iseries/vio.c
+++ b/arch/powerpc/platforms/iseries/vio.c
@@ -22,6 +22,8 @@
#include <asm/iseries/hv_lp_config.h>
#include <asm/iseries/hv_call_xm.h>
+#include "iommu.h"
+
struct device *iSeries_vio_dev = &vio_bus_device.dev;
EXPORT_SYMBOL(iSeries_vio_dev);
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index 84267269559..622a30149b4 100644
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -270,7 +270,7 @@ static void handleMonitorEvent(struct HvLpEvent *event)
* First see if this is just a normal monitor message from the
* other partition
*/
- if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+ if (hvlpevent_is_int(event)) {
remoteLp = event->xSourceLp;
if (!viopathStatus[remoteLp].isActive)
sendMonMsg(remoteLp);
@@ -331,13 +331,12 @@ static void handleConfig(struct HvLpEvent *event)
{
if (!event)
return;
- if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+ if (hvlpevent_is_int(event)) {
printk(VIOPATH_KERN_WARN
"unexpected config request from partition %d",
event->xSourceLp);
- if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
- (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
+ if (hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
@@ -377,7 +376,7 @@ static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
>> VIOMAJOR_SUBTYPE_SHIFT;
- if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+ if (hvlpevent_is_int(event)) {
remoteLp = event->xSourceLp;
/*
* The isActive is checked because if the hosting partition
@@ -436,8 +435,7 @@ static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
"unexpected virtual io event subtype %d from partition %d\n",
event->xSubtype, remoteLp);
/* No handler. Ack if necessary */
- if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
- (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
+ if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 7ece8983a10..a1cb4d23672 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -51,6 +51,7 @@
#include <asm/pgtable.h>
#include <asm/bitops.h>
#include <asm/io.h>
+#include <asm/kexec.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
@@ -70,9 +71,6 @@
#define DBG(fmt...)
#endif
-extern void generic_find_legacy_serial_ports(u64 *physport,
- unsigned int *default_speed);
-
static void maple_restart(char *cmd)
{
unsigned int maple_nvram_base;
@@ -191,24 +189,10 @@ static void __init maple_init_early(void)
*/
hpte_init_native();
- /* Find the serial port */
- generic_find_legacy_serial_ports(&physport, &default_speed);
-
- DBG("phys port addr: %lx\n", (long)physport);
-
- if (physport) {
- void *comport;
- /* Map the uart for udbg. */
- comport = (void *)ioremap(physport, 16);
- udbg_init_uart(comport, default_speed);
-
- DBG("Hello World !\n");
- }
-
/* Setup interrupt mapping options */
ppc64_interrupt_controller = IC_OPEN_PIC;
- iommu_init_early_u3();
+ iommu_init_early_dart();
DBG(" <- maple_init_early\n");
}
@@ -270,7 +254,7 @@ static int __init maple_probe(int platform)
* occupies having to be broken up so the DART itself is not
* part of the cacheable linar mapping
*/
- alloc_u3_dart_table();
+ alloc_dart_table();
return 1;
}
@@ -292,4 +276,9 @@ struct machdep_calls __initdata maple_md = {
.calibrate_decr = generic_calibrate_decr,
.progress = maple_progress,
.idle_loop = native_idle,
+#ifdef CONFIG_KEXEC
+ .machine_kexec = default_machine_kexec,
+ .machine_kexec_prepare = default_machine_kexec_prepare,
+ .machine_crash_shutdown = default_machine_crash_shutdown,
+#endif
};
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index c9df44fcf57..78093d7f97a 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -1,9 +1,14 @@
+CFLAGS_bootx_init.o += -fPIC
+
obj-y += pic.o setup.o time.o feature.o pci.o \
- sleep.o low_i2c.o cache.o
+ sleep.o low_i2c.o cache.o pfunc_core.o \
+ pfunc_base.o
obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o
obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o
obj-$(CONFIG_NVRAM) += nvram.o
# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
obj-$(CONFIG_PPC64) += nvram.o
+obj-$(CONFIG_PPC32) += bootx_init.o
obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_PPC_MERGE) += udbg_scc.o udbg_adb.o
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
new file mode 100644
index 00000000000..fa8b4d7b5de
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -0,0 +1,547 @@
+/*
+ * Early boot support code for BootX bootloader
+ *
+ * Copyright (C) 2005 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/version.h>
+#include <asm/sections.h>
+#include <asm/prom.h>
+#include <asm/page.h>
+#include <asm/bootx.h>
+#include <asm/bootinfo.h>
+#include <asm/btext.h>
+#include <asm/io.h>
+
+#undef DEBUG
+#define SET_BOOT_BAT
+
+#ifdef DEBUG
+#define DBG(fmt...) do { bootx_printf(fmt); } while(0)
+#else
+#define DBG(fmt...) do { } while(0)
+#endif
+
+extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
+
+static unsigned long __initdata bootx_dt_strbase;
+static unsigned long __initdata bootx_dt_strend;
+static unsigned long __initdata bootx_node_chosen;
+static boot_infos_t * __initdata bootx_info;
+static char __initdata bootx_disp_path[256];
+
+/* Is boot-info compatible ? */
+#define BOOT_INFO_IS_COMPATIBLE(bi) \
+ ((bi)->compatible_version <= BOOT_INFO_VERSION)
+#define BOOT_INFO_IS_V2_COMPATIBLE(bi) ((bi)->version >= 2)
+#define BOOT_INFO_IS_V4_COMPATIBLE(bi) ((bi)->version >= 4)
+
+#ifdef CONFIG_BOOTX_TEXT
+static void __init bootx_printf(const char *format, ...)
+{
+ const char *p, *q, *s;
+ va_list args;
+ unsigned long v;
+
+ va_start(args, format);
+ for (p = format; *p != 0; p = q) {
+ for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
+ ;
+ if (q > p)
+ btext_drawtext(p, q - p);
+ if (*q == 0)
+ break;
+ if (*q == '\n') {
+ ++q;
+ btext_flushline();
+ btext_drawstring("\r\n");
+ btext_flushline();
+ continue;
+ }
+ ++q;
+ if (*q == 0)
+ break;
+ switch (*q) {
+ case 's':
+ ++q;
+ s = va_arg(args, const char *);
+ if (s == NULL)
+ s = "<NULL>";
+ btext_drawstring(s);
+ break;
+ case 'x':
+ ++q;
+ v = va_arg(args, unsigned long);
+ btext_drawhex(v);
+ break;
+ }
+ }
+}
+#else /* CONFIG_BOOTX_TEXT */
+static void __init bootx_printf(const char *format, ...) {}
+#endif /* CONFIG_BOOTX_TEXT */
+
+static void * __init bootx_early_getprop(unsigned long base,
+ unsigned long node,
+ char *prop)
+{
+ struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
+ u32 *ppp = &np->properties;
+
+ while(*ppp) {
+ struct bootx_dt_prop *pp =
+ (struct bootx_dt_prop *)(base + *ppp);
+
+ if (strcmp((char *)((unsigned long)pp->name + base),
+ prop) == 0) {
+ return (void *)((unsigned long)pp->value + base);
+ }
+ ppp = &pp->next;
+ }
+ return NULL;
+}
+
+#define dt_push_token(token, mem) \
+ do { \
+ *(mem) = _ALIGN_UP(*(mem),4); \
+ *((u32 *)*(mem)) = token; \
+ *(mem) += 4; \
+ } while(0)
+
+static unsigned long __init bootx_dt_find_string(char *str)
+{
+ char *s, *os;
+
+ s = os = (char *)bootx_dt_strbase;
+ s += 4;
+ while (s < (char *)bootx_dt_strend) {
+ if (strcmp(s, str) == 0)
+ return s - os;
+ s += strlen(s) + 1;
+ }
+ return 0;
+}
+
+static void __init bootx_dt_add_prop(char *name, void *data, int size,
+ unsigned long *mem_end)
+{
+ unsigned long soff = bootx_dt_find_string(name);
+ if (data == NULL)
+ size = 0;
+ if (soff == 0) {
+ bootx_printf("WARNING: Can't find string index for <%s>\n",
+ name);
+ return;
+ }
+ if (size > 0x20000) {
+ bootx_printf("WARNING: ignoring large property ");
+ bootx_printf("%s length 0x%x\n", name, size);
+ return;
+ }
+ dt_push_token(OF_DT_PROP, mem_end);
+ dt_push_token(size, mem_end);
+ dt_push_token(soff, mem_end);
+
+ /* push property content */
+ if (size && data) {
+ memcpy((void *)*mem_end, data, size);
+ *mem_end = _ALIGN_UP(*mem_end + size, 4);
+ }
+}
+
+static void __init bootx_add_chosen_props(unsigned long base,
+ unsigned long *mem_end)
+{
+ u32 val = _MACH_Pmac;
+
+ bootx_dt_add_prop("linux,platform", &val, 4, mem_end);
+
+ if (bootx_info->kernelParamsOffset) {
+ char *args = (char *)((unsigned long)bootx_info) +
+ bootx_info->kernelParamsOffset;
+ bootx_dt_add_prop("bootargs", args, strlen(args) + 1, mem_end);
+ }
+ if (bootx_info->ramDisk) {
+ val = ((unsigned long)bootx_info) + bootx_info->ramDisk;
+ bootx_dt_add_prop("linux,initrd-start", &val, 4, mem_end);
+ val += bootx_info->ramDiskSize;
+ bootx_dt_add_prop("linux,initrd-end", &val, 4, mem_end);
+ }
+ if (strlen(bootx_disp_path))
+ bootx_dt_add_prop("linux,stdout-path", bootx_disp_path,
+ strlen(bootx_disp_path) + 1, mem_end);
+}
+
+static void __init bootx_add_display_props(unsigned long base,
+ unsigned long *mem_end)
+{
+ bootx_dt_add_prop("linux,boot-display", NULL, 0, mem_end);
+ bootx_dt_add_prop("linux,opened", NULL, 0, mem_end);
+}
+
+static void __init bootx_dt_add_string(char *s, unsigned long *mem_end)
+{
+ unsigned int l = strlen(s) + 1;
+ memcpy((void *)*mem_end, s, l);
+ bootx_dt_strend = *mem_end = *mem_end + l;
+}
+
+static void __init bootx_scan_dt_build_strings(unsigned long base,
+ unsigned long node,
+ unsigned long *mem_end)
+{
+ struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
+ u32 *cpp, *ppp = &np->properties;
+ unsigned long soff;
+ char *namep;
+
+ /* Keep refs to known nodes */
+ namep = np->full_name ? (char *)(base + np->full_name) : NULL;
+ if (namep == NULL) {
+ bootx_printf("Node without a full name !\n");
+ namep = "";
+ }
+ DBG("* strings: %s\n", namep);
+
+ if (!strcmp(namep, "/chosen")) {
+ DBG(" detected /chosen ! adding properties names !\n");
+ bootx_dt_add_string("linux,platform", mem_end);
+ bootx_dt_add_string("linux,stdout-path", mem_end);
+ bootx_dt_add_string("linux,initrd-start", mem_end);
+ bootx_dt_add_string("linux,initrd-end", mem_end);
+ bootx_dt_add_string("bootargs", mem_end);
+ bootx_node_chosen = node;
+ }
+ if (node == bootx_info->dispDeviceRegEntryOffset) {
+ DBG(" detected display ! adding properties names !\n");
+ bootx_dt_add_string("linux,boot-display", mem_end);
+ bootx_dt_add_string("linux,opened", mem_end);
+ strncpy(bootx_disp_path, namep, 255);
+ }
+
+ /* get and store all property names */
+ while (*ppp) {
+ struct bootx_dt_prop *pp =
+ (struct bootx_dt_prop *)(base + *ppp);
+
+ namep = pp->name ? (char *)(base + pp->name) : NULL;
+ if (namep == NULL || strcmp(namep, "name") == 0)
+ goto next;
+ /* get/create string entry */
+ soff = bootx_dt_find_string(namep);
+ if (soff == 0)
+ bootx_dt_add_string(namep, mem_end);
+ next:
+ ppp = &pp->next;
+ }
+
+ /* do all our children */
+ cpp = &np->child;
+ while(*cpp) {
+ np = (struct bootx_dt_node *)(base + *cpp);
+ bootx_scan_dt_build_strings(base, *cpp, mem_end);
+ cpp = &np->sibling;
+ }
+}
+
+static void __init bootx_scan_dt_build_struct(unsigned long base,
+ unsigned long node,
+ unsigned long *mem_end)
+{
+ struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
+ u32 *cpp, *ppp = &np->properties;
+ char *namep, *p, *ep, *lp;
+ int l;
+
+ dt_push_token(OF_DT_BEGIN_NODE, mem_end);
+
+ /* get the node's full name */
+ namep = np->full_name ? (char *)(base + np->full_name) : NULL;
+ if (namep == NULL)
+ namep = "";
+ l = strlen(namep);
+
+ DBG("* struct: %s\n", namep);
+
+ /* Fixup an Apple bug where they have bogus \0 chars in the
+ * middle of the path in some properties, and extract
+ * the unit name (everything after the last '/').
+ */
+ memcpy((void *)*mem_end, namep, l + 1);
+ namep = (char *)*mem_end;
+ for (lp = p = namep, ep = namep + l; p < ep; p++) {
+ if (*p == '/')
+ lp = namep;
+ else if (*p != 0)
+ *lp++ = *p;
+ }
+ *lp = 0;
+ *mem_end = _ALIGN_UP((unsigned long)lp + 1, 4);
+
+ /* get and store all properties */
+ while (*ppp) {
+ struct bootx_dt_prop *pp =
+ (struct bootx_dt_prop *)(base + *ppp);
+
+ namep = pp->name ? (char *)(base + pp->name) : NULL;
+ /* Skip "name" */
+ if (namep == NULL || !strcmp(namep, "name"))
+ goto next;
+ /* Skip "bootargs" in /chosen too as we replace it */
+ if (node == bootx_node_chosen && !strcmp(namep, "bootargs"))
+ goto next;
+
+ /* push property head */
+ bootx_dt_add_prop(namep,
+ pp->value ? (void *)(base + pp->value): NULL,
+ pp->length, mem_end);
+ next:
+ ppp = &pp->next;
+ }
+
+ if (node == bootx_node_chosen)
+ bootx_add_chosen_props(base, mem_end);
+ if (node == bootx_info->dispDeviceRegEntryOffset)
+ bootx_add_display_props(base, mem_end);
+
+ /* do all our children */
+ cpp = &np->child;
+ while(*cpp) {
+ np = (struct bootx_dt_node *)(base + *cpp);
+ bootx_scan_dt_build_struct(base, *cpp, mem_end);
+ cpp = &np->sibling;
+ }
+
+ dt_push_token(OF_DT_END_NODE, mem_end);
+}
+
+static unsigned long __init bootx_flatten_dt(unsigned long start)
+{
+ boot_infos_t *bi = bootx_info;
+ unsigned long mem_start, mem_end;
+ struct boot_param_header *hdr;
+ unsigned long base;
+ u64 *rsvmap;
+
+ /* Start using memory after the big blob passed by BootX, get
+ * some space for the header
+ */
+ mem_start = mem_end = _ALIGN_UP(((unsigned long)bi) + start, 4);
+ DBG("Boot params header at: %x\n", mem_start);
+ hdr = (struct boot_param_header *)mem_start;
+ mem_end += sizeof(struct boot_param_header);
+ rsvmap = (u64 *)(_ALIGN_UP(mem_end, 8));
+ hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - mem_start;
+ mem_end = ((unsigned long)rsvmap) + 8 * sizeof(u64);
+
+ /* Get base of tree */
+ base = ((unsigned long)bi) + bi->deviceTreeOffset;
+
+ /* Build string array */
+ DBG("Building string array at: %x\n", mem_end);
+ DBG("Device Tree Base=%x\n", base);
+ bootx_dt_strbase = mem_end;
+ mem_end += 4;
+ bootx_dt_strend = mem_end;
+ bootx_scan_dt_build_strings(base, 4, &mem_end);
+ hdr->off_dt_strings = bootx_dt_strbase - mem_start;
+ hdr->dt_strings_size = bootx_dt_strend - bootx_dt_strbase;
+
+ /* Build structure */
+ mem_end = _ALIGN(mem_end, 16);
+ DBG("Building device tree structure at: %x\n", mem_end);
+ hdr->off_dt_struct = mem_end - mem_start;
+ bootx_scan_dt_build_struct(base, 4, &mem_end);
+ dt_push_token(OF_DT_END, &mem_end);
+
+ /* Finish header */
+ hdr->boot_cpuid_phys = 0;
+ hdr->magic = OF_DT_HEADER;
+ hdr->totalsize = mem_end - mem_start;
+ hdr->version = OF_DT_VERSION;
+ /* Version 16 is not backward compatible */
+ hdr->last_comp_version = 0x10;
+
+ /* Reserve the whole thing and copy the reserve map in, we
+ * also bump mem_reserve_cnt to cause further reservations to
+ * fail since it's too late.
+ */
+ mem_end = _ALIGN(mem_end, PAGE_SIZE);
+ DBG("End of boot params: %x\n", mem_end);
+ rsvmap[0] = mem_start;
+ rsvmap[1] = mem_end;
+ rsvmap[2] = 0;
+ rsvmap[3] = 0;
+
+ return (unsigned long)hdr;
+}
+
+
+#ifdef CONFIG_BOOTX_TEXT
+static void __init btext_welcome(boot_infos_t *bi)
+{
+ unsigned long flags;
+ unsigned long pvr;
+
+ bootx_printf("Welcome to Linux, kernel " UTS_RELEASE "\n");
+ bootx_printf("\nlinked at : 0x%x", KERNELBASE);
+ bootx_printf("\nframe buffer at : 0x%x", bi->dispDeviceBase);
+ bootx_printf(" (phys), 0x%x", bi->logicalDisplayBase);
+ bootx_printf(" (log)");
+ bootx_printf("\nklimit : 0x%x",(unsigned long)klimit);
+ bootx_printf("\nboot_info at : 0x%x", bi);
+ __asm__ __volatile__ ("mfmsr %0" : "=r" (flags));
+ bootx_printf("\nMSR : 0x%x", flags);
+ __asm__ __volatile__ ("mfspr %0, 287" : "=r" (pvr));
+ bootx_printf("\nPVR : 0x%x", pvr);
+ pvr >>= 16;
+ if (pvr > 1) {
+ __asm__ __volatile__ ("mfspr %0, 1008" : "=r" (flags));
+ bootx_printf("\nHID0 : 0x%x", flags);
+ }
+ if (pvr == 8 || pvr == 12 || pvr == 0x800c) {
+ __asm__ __volatile__ ("mfspr %0, 1019" : "=r" (flags));
+ bootx_printf("\nICTC : 0x%x", flags);
+ }
+#ifdef DEBUG
+ bootx_printf("\n\n");
+ bootx_printf("bi->deviceTreeOffset : 0x%x\n",
+ bi->deviceTreeOffset);
+ bootx_printf("bi->deviceTreeSize : 0x%x\n",
+ bi->deviceTreeSize);
+#endif
+ bootx_printf("\n\n");
+}
+#endif /* CONFIG_BOOTX_TEXT */
+
+void __init bootx_init(unsigned long r3, unsigned long r4)
+{
+ boot_infos_t *bi = (boot_infos_t *) r4;
+ unsigned long hdr;
+ unsigned long space;
+ unsigned long ptr, x;
+ char *model;
+ unsigned long offset = reloc_offset();
+
+ reloc_got2(offset);
+
+ bootx_info = bi;
+
+ /* We haven't cleared any bss at this point, make sure
+ * what we need is initialized
+ */
+ bootx_dt_strbase = bootx_dt_strend = 0;
+ bootx_node_chosen = 0;
+ bootx_disp_path[0] = 0;
+
+ if (!BOOT_INFO_IS_V2_COMPATIBLE(bi))
+ bi->logicalDisplayBase = bi->dispDeviceBase;
+
+#ifdef CONFIG_BOOTX_TEXT
+ btext_setup_display(bi->dispDeviceRect[2] - bi->dispDeviceRect[0],
+ bi->dispDeviceRect[3] - bi->dispDeviceRect[1],
+ bi->dispDeviceDepth, bi->dispDeviceRowBytes,
+ (unsigned long)bi->logicalDisplayBase);
+ btext_clearscreen();
+ btext_flushscreen();
+#endif /* CONFIG_BOOTX_TEXT */
+
+ /*
+ * Test if boot-info is compatible. Done only in config
+ * CONFIG_BOOTX_TEXT since there is nothing much we can do
+ * with an incompatible version, except display a message
+ * and eventually hang the processor...
+ *
+ * I'll try to keep enough of boot-info compatible in the
+ * future to always allow display of this message;
+ */
+ if (!BOOT_INFO_IS_COMPATIBLE(bi)) {
+ bootx_printf(" !!! WARNING - Incompatible version"
+ " of BootX !!!\n\n\n");
+ for (;;)
+ ;
+ }
+ if (bi->architecture != BOOT_ARCH_PCI) {
+ bootx_printf(" !!! WARNING - Usupported machine"
+ " architecture !\n");
+ for (;;)
+ ;
+ }
+
+#ifdef CONFIG_BOOTX_TEXT
+ btext_welcome(bi);
+#endif
+ /* New BootX enters kernel with MMU off, i/os are not allowed
+ * here. This hack will have been done by the boostrap anyway.
+ */
+ if (bi->version < 4) {
+ /*
+ * XXX If this is an iMac, turn off the USB controller.
+ */
+ model = (char *) bootx_early_getprop(r4 + bi->deviceTreeOffset,
+ 4, "model");
+ if (model
+ && (strcmp(model, "iMac,1") == 0
+ || strcmp(model, "PowerMac1,1") == 0)) {
+ bootx_printf("iMac,1 detected, shutting down USB \n");
+ out_le32((unsigned *)0x80880008, 1); /* XXX */
+ }
+ }
+
+ /* Get a pointer that points above the device tree, args, ramdisk,
+ * etc... to use for generating the flattened tree
+ */
+ if (bi->version < 5) {
+ space = bi->deviceTreeOffset + bi->deviceTreeSize;
+ if (bi->ramDisk)
+ space = bi->ramDisk + bi->ramDiskSize;
+ } else
+ space = bi->totalParamsSize;
+
+ bootx_printf("Total space used by parameters & ramdisk: %x \n", space);
+
+ /* New BootX will have flushed all TLBs and enters kernel with
+ * MMU switched OFF, so this should not be useful anymore.
+ */
+ if (bi->version < 4) {
+ bootx_printf("Touching pages...\n");
+
+ /*
+ * Touch each page to make sure the PTEs for them
+ * are in the hash table - the aim is to try to avoid
+ * getting DSI exceptions while copying the kernel image.
+ */
+ for (ptr = ((unsigned long) &_stext) & PAGE_MASK;
+ ptr < (unsigned long)bi + space; ptr += PAGE_SIZE)
+ x = *(volatile unsigned long *)ptr;
+ }
+
+ /* Ok, now we need to generate a flattened device-tree to pass
+ * to the kernel
+ */
+ bootx_printf("Preparing boot params...\n");
+
+ hdr = bootx_flatten_dt(space);
+
+#ifdef CONFIG_BOOTX_TEXT
+#ifdef SET_BOOT_BAT
+ bootx_printf("Preparing BAT...\n");
+ btext_prepare_BAT();
+#else
+ btext_unmap();
+#endif
+#endif
+
+ reloc_got2(-offset);
+
+ __start(hdr, KERNELBASE + offset, 0);
+}
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 39150342c6f..a415e8d2f7a 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -28,6 +28,7 @@
#include <asm/cputable.h>
#include <asm/time.h>
#include <asm/smu.h>
+#include <asm/pmac_pfunc.h>
#undef DEBUG
@@ -79,12 +80,16 @@ static struct freq_attr* g5_cpu_freqs_attr[] = {
};
/* Power mode data is an array of the 32 bits PCR values to use for
- * the various frequencies, retreived from the device-tree
+ * the various frequencies, retrieved from the device-tree
*/
static u32 *g5_pmode_data;
static int g5_pmode_max;
static int g5_pmode_cur;
+static void (*g5_switch_volt)(int speed_mode);
+static int (*g5_switch_freq)(int speed_mode);
+static int (*g5_query_freq)(void);
+
static DECLARE_MUTEX(g5_switch_mutex);
@@ -92,9 +97,11 @@ static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */
static int g5_fvt_count; /* number of op. points */
static int g5_fvt_cur; /* current op. point */
-/* ----------------- real hardware interface */
+/*
+ * SMU based voltage switching for Neo2 platforms
+ */
-static void g5_switch_volt(int speed_mode)
+static void g5_smu_switch_volt(int speed_mode)
{
struct smu_simple_cmd cmd;
@@ -105,26 +112,57 @@ static void g5_switch_volt(int speed_mode)
wait_for_completion(&comp);
}
-static int g5_switch_freq(int speed_mode)
+/*
+ * Platform function based voltage/vdnap switching for Neo2
+ */
+
+static struct pmf_function *pfunc_set_vdnap0;
+static struct pmf_function *pfunc_vdnap0_complete;
+
+static void g5_vdnap_switch_volt(int speed_mode)
{
- struct cpufreq_freqs freqs;
- int to;
+ struct pmf_args args;
+ u32 slew, done = 0;
+ unsigned long timeout;
- if (g5_pmode_cur == speed_mode)
- return 0;
+ slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0;
+ args.count = 1;
+ args.u[0].p = &slew;
- down(&g5_switch_mutex);
+ pmf_call_one(pfunc_set_vdnap0, &args);
- freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
- freqs.new = g5_cpu_freqs[speed_mode].frequency;
- freqs.cpu = 0;
+ /* It's an irq GPIO so we should be able to just block here,
+ * I'll do that later after I've properly tested the IRQ code for
+ * platform functions
+ */
+ timeout = jiffies + HZ/10;
+ while(!time_after(jiffies, timeout)) {
+ args.count = 1;
+ args.u[0].p = &done;
+ pmf_call_one(pfunc_vdnap0_complete, &args);
+ if (done)
+ break;
+ msleep(1);
+ }
+ if (done == 0)
+ printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+}
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+/*
+ * SCOM based frequency switching for 970FX rev3
+ */
+static int g5_scom_switch_freq(int speed_mode)
+{
+ unsigned long flags;
+ int to;
/* If frequency is going up, first ramp up the voltage */
if (speed_mode < g5_pmode_cur)
g5_switch_volt(speed_mode);
+ local_irq_save(flags);
+
/* Clear PCR high */
scom970_write(SCOM_PCR, 0);
/* Clear PCR low */
@@ -147,6 +185,8 @@ static int g5_switch_freq(int speed_mode)
udelay(100);
}
+ local_irq_restore(flags);
+
/* If frequency is going down, last ramp the voltage */
if (speed_mode > g5_pmode_cur)
g5_switch_volt(speed_mode);
@@ -154,14 +194,10 @@ static int g5_switch_freq(int speed_mode)
g5_pmode_cur = speed_mode;
ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
- up(&g5_switch_mutex);
-
return 0;
}
-static int g5_query_freq(void)
+static int g5_scom_query_freq(void)
{
unsigned long psr = scom970_read(SCOM_PSR);
int i;
@@ -173,7 +209,104 @@ static int g5_query_freq(void)
return i;
}
-/* ----------------- cpufreq bookkeeping */
+/*
+ * Platform function based voltage switching for PowerMac7,2 & 7,3
+ */
+
+static struct pmf_function *pfunc_cpu0_volt_high;
+static struct pmf_function *pfunc_cpu0_volt_low;
+static struct pmf_function *pfunc_cpu1_volt_high;
+static struct pmf_function *pfunc_cpu1_volt_low;
+
+static void g5_pfunc_switch_volt(int speed_mode)
+{
+ if (speed_mode == CPUFREQ_HIGH) {
+ if (pfunc_cpu0_volt_high)
+ pmf_call_one(pfunc_cpu0_volt_high, NULL);
+ if (pfunc_cpu1_volt_high)
+ pmf_call_one(pfunc_cpu1_volt_high, NULL);
+ } else {
+ if (pfunc_cpu0_volt_low)
+ pmf_call_one(pfunc_cpu0_volt_low, NULL);
+ if (pfunc_cpu1_volt_low)
+ pmf_call_one(pfunc_cpu1_volt_low, NULL);
+ }
+ msleep(10); /* should be faster , to fix */
+}
+
+/*
+ * Platform function based frequency switching for PowerMac7,2 & 7,3
+ */
+
+static struct pmf_function *pfunc_cpu_setfreq_high;
+static struct pmf_function *pfunc_cpu_setfreq_low;
+static struct pmf_function *pfunc_cpu_getfreq;
+static struct pmf_function *pfunc_slewing_done;;
+
+static int g5_pfunc_switch_freq(int speed_mode)
+{
+ struct pmf_args args;
+ u32 done = 0;
+ unsigned long timeout;
+
+ /* If frequency is going up, first ramp up the voltage */
+ if (speed_mode < g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ /* Do it */
+ if (speed_mode == CPUFREQ_HIGH)
+ pmf_call_one(pfunc_cpu_setfreq_high, NULL);
+ else
+ pmf_call_one(pfunc_cpu_setfreq_low, NULL);
+
+ /* It's an irq GPIO so we should be able to just block here,
+ * I'll do that later after I've properly tested the IRQ code for
+ * platform functions
+ */
+ timeout = jiffies + HZ/10;
+ while(!time_after(jiffies, timeout)) {
+ args.count = 1;
+ args.u[0].p = &done;
+ pmf_call_one(pfunc_slewing_done, &args);
+ if (done)
+ break;
+ msleep(1);
+ }
+ if (done == 0)
+ printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+
+ /* If frequency is going down, last ramp the voltage */
+ if (speed_mode > g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ g5_pmode_cur = speed_mode;
+ ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
+
+ return 0;
+}
+
+static int g5_pfunc_query_freq(void)
+{
+ struct pmf_args args;
+ u32 val = 0;
+
+ args.count = 1;
+ args.u[0].p = &val;
+ pmf_call_one(pfunc_cpu_getfreq, &args);
+ return val ? CPUFREQ_HIGH : CPUFREQ_LOW;
+}
+
+/*
+ * Fake voltage switching for platforms with missing support
+ */
+
+static void g5_dummy_switch_volt(int speed_mode)
+{
+}
+
+/*
+ * Common interface to the cpufreq core
+ */
static int g5_cpufreq_verify(struct cpufreq_policy *policy)
{
@@ -183,13 +316,30 @@ static int g5_cpufreq_verify(struct cpufreq_policy *policy)
static int g5_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
- unsigned int newstate = 0;
+ unsigned int newstate = 0;
+ struct cpufreq_freqs freqs;
+ int rc;
if (cpufreq_frequency_table_target(policy, g5_cpu_freqs,
target_freq, relation, &newstate))
return -EINVAL;
- return g5_switch_freq(newstate);
+ if (g5_pmode_cur == newstate)
+ return 0;
+
+ down(&g5_switch_mutex);
+
+ freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
+ freqs.new = g5_cpu_freqs[newstate].frequency;
+ freqs.cpu = 0;
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ rc = g5_switch_freq(newstate);
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ up(&g5_switch_mutex);
+
+ return rc;
}
static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
@@ -205,6 +355,7 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
+ policy->cpus = cpu_possible_map;
cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
@@ -224,19 +375,39 @@ static struct cpufreq_driver g5_cpufreq_driver = {
};
-static int __init g5_cpufreq_init(void)
+static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
{
struct device_node *cpunode;
unsigned int psize, ssize;
- struct smu_sdbp_header *shdr;
unsigned long max_freq;
- u32 *valp;
+ char *freq_method, *volt_method;
+ u32 *valp, pvr_hi;
+ int use_volts_vdnap = 0;
+ int use_volts_smu = 0;
int rc = -ENODEV;
- /* Look for CPU and SMU nodes */
- cpunode = of_find_node_by_type(NULL, "cpu");
- if (!cpunode) {
- DBG("No CPU node !\n");
+ /* Check supported platforms */
+ if (machine_is_compatible("PowerMac8,1") ||
+ machine_is_compatible("PowerMac8,2") ||
+ machine_is_compatible("PowerMac9,1"))
+ use_volts_smu = 1;
+ else if (machine_is_compatible("PowerMac11,2"))
+ use_volts_vdnap = 1;
+ else
+ return -ENODEV;
+
+ /* Get first CPU node */
+ for (cpunode = NULL;
+ (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
+ u32 *reg =
+ (u32 *)get_property(cpunode, "reg", NULL);
+ if (reg == NULL || (*reg) != 0)
+ continue;
+ if (!strcmp(cpunode->type, "cpu"))
+ break;
+ }
+ if (cpunode == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
return -ENODEV;
}
@@ -246,8 +417,9 @@ static int __init g5_cpufreq_init(void)
DBG("No cpu-version property !\n");
goto bail_noprops;
}
- if (((*valp) >> 16) != 0x3c) {
- DBG("Wrong CPU version: %08x\n", *valp);
+ pvr_hi = (*valp) >> 16;
+ if (pvr_hi != 0x3c && pvr_hi != 0x44) {
+ printk(KERN_ERR "cpufreq: Unsupported CPU version\n");
goto bail_noprops;
}
@@ -259,18 +431,50 @@ static int __init g5_cpufreq_init(void)
}
g5_pmode_max = psize / sizeof(u32) - 1;
- /* Look for the FVT table */
- shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
- if (!shdr)
- goto bail_noprops;
- g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
- ssize = (shdr->len * sizeof(u32)) - sizeof(struct smu_sdbp_header);
- g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
- g5_fvt_cur = 0;
-
- /* Sanity checking */
- if (g5_fvt_count < 1 || g5_pmode_max < 1)
- goto bail_noprops;
+ if (use_volts_smu) {
+ struct smu_sdbp_header *shdr;
+
+ /* Look for the FVT table */
+ shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
+ if (!shdr)
+ goto bail_noprops;
+ g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
+ ssize = (shdr->len * sizeof(u32)) -
+ sizeof(struct smu_sdbp_header);
+ g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
+ g5_fvt_cur = 0;
+
+ /* Sanity checking */
+ if (g5_fvt_count < 1 || g5_pmode_max < 1)
+ goto bail_noprops;
+
+ g5_switch_volt = g5_smu_switch_volt;
+ volt_method = "SMU";
+ } else if (use_volts_vdnap) {
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (root == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find root of "
+ "device tree\n");
+ goto bail_noprops;
+ }
+ pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
+ pfunc_vdnap0_complete =
+ pmf_find_function(root, "slewing-done");
+ if (pfunc_set_vdnap0 == NULL ||
+ pfunc_vdnap0_complete == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find required "
+ "platform function\n");
+ goto bail_noprops;
+ }
+
+ g5_switch_volt = g5_vdnap_switch_volt;
+ volt_method = "GPIO";
+ } else {
+ g5_switch_volt = g5_dummy_switch_volt;
+ volt_method = "none";
+ }
/*
* From what I see, clock-frequency is always the maximal frequency.
@@ -286,19 +490,23 @@ static int __init g5_cpufreq_init(void)
g5_cpu_freqs[0].frequency = max_freq;
g5_cpu_freqs[1].frequency = max_freq/2;
- /* Check current frequency */
- g5_pmode_cur = g5_query_freq();
- if (g5_pmode_cur > 1)
- /* We don't support anything but 1:1 and 1:2, fixup ... */
- g5_pmode_cur = 1;
+ /* Set callbacks */
+ g5_switch_freq = g5_scom_switch_freq;
+ g5_query_freq = g5_scom_query_freq;
+ freq_method = "SCOM";
/* Force apply current frequency to make sure everything is in
* sync (voltage is right for example). Firmware may leave us with
* a strange setting ...
*/
- g5_switch_freq(g5_pmode_cur);
+ g5_switch_volt(CPUFREQ_HIGH);
+ msleep(10);
+ g5_pmode_cur = -1;
+ g5_switch_freq(g5_query_freq());
printk(KERN_INFO "Registering G5 CPU frequency driver\n");
+ printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n",
+ freq_method, volt_method);
printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
g5_cpu_freqs[1].frequency/1000,
g5_cpu_freqs[0].frequency/1000,
@@ -317,6 +525,200 @@ static int __init g5_cpufreq_init(void)
return rc;
}
+static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
+{
+ struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL;
+ u8 *eeprom = NULL;
+ u32 *valp;
+ u64 max_freq, min_freq, ih, il;
+ int has_volt = 1, rc = 0;
+
+ /* Get first CPU node */
+ for (cpunode = NULL;
+ (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
+ if (!strcmp(cpunode->type, "cpu"))
+ break;
+ }
+ if (cpunode == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find any CPU node\n");
+ return -ENODEV;
+ }
+
+ /* Lookup the cpuid eeprom node */
+ cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
+ if (cpuid != NULL)
+ eeprom = (u8 *)get_property(cpuid, "cpuid", NULL);
+ if (eeprom == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ /* Lookup the i2c hwclock */
+ for (hwclock = NULL;
+ (hwclock = of_find_node_by_name(hwclock, "i2c-hwclock")) != NULL;){
+ char *loc = get_property(hwclock, "hwctrl-location", NULL);
+ if (loc == NULL)
+ continue;
+ if (strcmp(loc, "CPU CLOCK"))
+ continue;
+ if (!get_property(hwclock, "platform-get-frequency", NULL))
+ continue;
+ break;
+ }
+ if (hwclock == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name);
+
+ /* Now get all the platform functions */
+ pfunc_cpu_getfreq =
+ pmf_find_function(hwclock, "get-frequency");
+ pfunc_cpu_setfreq_high =
+ pmf_find_function(hwclock, "set-frequency-high");
+ pfunc_cpu_setfreq_low =
+ pmf_find_function(hwclock, "set-frequency-low");
+ pfunc_slewing_done =
+ pmf_find_function(hwclock, "slewing-done");
+ pfunc_cpu0_volt_high =
+ pmf_find_function(hwclock, "set-voltage-high-0");
+ pfunc_cpu0_volt_low =
+ pmf_find_function(hwclock, "set-voltage-low-0");
+ pfunc_cpu1_volt_high =
+ pmf_find_function(hwclock, "set-voltage-high-1");
+ pfunc_cpu1_volt_low =
+ pmf_find_function(hwclock, "set-voltage-low-1");
+
+ /* Check we have minimum requirements */
+ if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
+ pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find platform functions !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ /* Check that we have complete sets */
+ if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) {
+ pmf_put_function(pfunc_cpu0_volt_high);
+ pmf_put_function(pfunc_cpu0_volt_low);
+ pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL;
+ has_volt = 0;
+ }
+ if (!has_volt ||
+ pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) {
+ pmf_put_function(pfunc_cpu1_volt_high);
+ pmf_put_function(pfunc_cpu1_volt_low);
+ pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL;
+ }
+
+ /* Note: The device tree also contains a "platform-set-values"
+ * function for which I haven't quite figured out the usage. It
+ * might have to be called on init and/or wakeup, I'm not too sure
+ * but things seem to work fine without it so far ...
+ */
+
+ /* Get max frequency from device-tree */
+ valp = (u32 *)get_property(cpunode, "clock-frequency", NULL);
+ if (!valp) {
+ printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ max_freq = (*valp)/1000;
+
+ /* Now calculate reduced frequency by using the cpuid input freq
+ * ratio. This requires 64 bits math unless we are willing to lose
+ * some precision
+ */
+ ih = *((u32 *)(eeprom + 0x10));
+ il = *((u32 *)(eeprom + 0x20));
+ min_freq = 0;
+ if (ih != 0 && il != 0)
+ min_freq = (max_freq * il) / ih;
+
+ /* Sanity check */
+ if (min_freq >= max_freq || min_freq < 1000) {
+ printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+ g5_cpu_freqs[0].frequency = max_freq;
+ g5_cpu_freqs[1].frequency = min_freq;
+
+ /* Set callbacks */
+ g5_switch_volt = g5_pfunc_switch_volt;
+ g5_switch_freq = g5_pfunc_switch_freq;
+ g5_query_freq = g5_pfunc_query_freq;
+
+ /* Force apply current frequency to make sure everything is in
+ * sync (voltage is right for example). Firmware may leave us with
+ * a strange setting ...
+ */
+ g5_switch_volt(CPUFREQ_HIGH);
+ msleep(10);
+ g5_pmode_cur = -1;
+ g5_switch_freq(g5_query_freq());
+
+ printk(KERN_INFO "Registering G5 CPU frequency driver\n");
+ printk(KERN_INFO "Frequency method: i2c/pfunc, "
+ "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none");
+ printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ g5_cpu_freqs[1].frequency/1000,
+ g5_cpu_freqs[0].frequency/1000,
+ g5_cpu_freqs[g5_pmode_cur].frequency/1000);
+
+ rc = cpufreq_register_driver(&g5_cpufreq_driver);
+ bail:
+ if (rc != 0) {
+ pmf_put_function(pfunc_cpu_getfreq);
+ pmf_put_function(pfunc_cpu_setfreq_high);
+ pmf_put_function(pfunc_cpu_setfreq_low);
+ pmf_put_function(pfunc_slewing_done);
+ pmf_put_function(pfunc_cpu0_volt_high);
+ pmf_put_function(pfunc_cpu0_volt_low);
+ pmf_put_function(pfunc_cpu1_volt_high);
+ pmf_put_function(pfunc_cpu1_volt_low);
+ }
+ of_node_put(hwclock);
+ of_node_put(cpuid);
+ of_node_put(cpunode);
+
+ return rc;
+}
+
+static int __init g5_rm31_cpufreq_init(struct device_node *cpus)
+{
+ /* NYI */
+ return 0;
+}
+
+static int __init g5_cpufreq_init(void)
+{
+ struct device_node *cpus;
+ int rc;
+
+ cpus = of_find_node_by_path("/cpus");
+ if (cpus == NULL) {
+ DBG("No /cpus node !\n");
+ return -ENODEV;
+ }
+
+ if (machine_is_compatible("PowerMac7,2") ||
+ machine_is_compatible("PowerMac7,3"))
+ rc = g5_pm72_cpufreq_init(cpus);
+ else if (machine_is_compatible("RackMac3,1"))
+ rc = g5_rm31_cpufreq_init(cpus);
+ else
+ rc = g5_neo2_cpufreq_init(cpus);
+
+ of_node_put(cpus);
+ return rc;
+}
+
module_init(g5_cpufreq_init);
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index f6e22da2a5d..558dd069209 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -58,12 +58,11 @@ extern int powersave_lowspeed;
extern int powersave_nap;
extern struct device_node *k2_skiplist[2];
-
/*
* We use a single global lock to protect accesses. Each driver has
* to take care of its own locking
*/
-static DEFINE_SPINLOCK(feature_lock);
+DEFINE_SPINLOCK(feature_lock);
#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
@@ -101,26 +100,17 @@ static const char *macio_names[] =
"Keylargo",
"Pangea",
"Intrepid",
- "K2"
+ "K2",
+ "Shasta",
};
+struct device_node *uninorth_node;
+u32 __iomem *uninorth_base;
-/*
- * Uninorth reg. access. Note that Uni-N regs are big endian
- */
-
-#define UN_REG(r) (uninorth_base + ((r) >> 2))
-#define UN_IN(r) (in_be32(UN_REG(r)))
-#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
-#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
-#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
-
-static struct device_node *uninorth_node;
-static u32 __iomem *uninorth_base;
static u32 uninorth_rev;
-static int uninorth_u3;
-static void __iomem *u3_ht;
+static int uninorth_maj;
+static void __iomem *u3_ht_base;
/*
* For each motherboard family, we have a table of functions pointers
@@ -1399,8 +1389,15 @@ static long g5_fw_enable(struct device_node *node, long param, long value)
static long g5_mpic_enable(struct device_node *node, long param, long value)
{
unsigned long flags;
+ struct device_node *parent = of_get_parent(node);
+ int is_u3;
- if (node->parent == NULL || strcmp(node->parent->name, "u3"))
+ if (parent == NULL)
+ return 0;
+ is_u3 = strcmp(parent->name, "u3") == 0 ||
+ strcmp(parent->name, "u4") == 0;
+ of_node_put(parent);
+ if (!is_u3)
return 0;
LOCK(flags);
@@ -1445,20 +1442,53 @@ static long g5_i2s_enable(struct device_node *node, long param, long value)
/* Very crude implementation for now */
struct macio_chip *macio = &macio_chips[0];
unsigned long flags;
-
- if (value == 0)
- return 0; /* don't disable yet */
+ int cell;
+ u32 fcrs[3][3] = {
+ { 0,
+ K2_FCR1_I2S0_CELL_ENABLE |
+ K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE,
+ KL3_I2S0_CLK18_ENABLE
+ },
+ { KL0_SCC_A_INTF_ENABLE,
+ K2_FCR1_I2S1_CELL_ENABLE |
+ K2_FCR1_I2S1_CLK_ENABLE_BIT | K2_FCR1_I2S1_ENABLE,
+ KL3_I2S1_CLK18_ENABLE
+ },
+ { KL0_SCC_B_INTF_ENABLE,
+ SH_FCR1_I2S2_CELL_ENABLE |
+ SH_FCR1_I2S2_CLK_ENABLE_BIT | SH_FCR1_I2S2_ENABLE,
+ SH_FCR3_I2S2_CLK18_ENABLE
+ },
+ };
+
+ if (macio->type != macio_keylargo2 && macio->type != macio_shasta)
+ return -ENODEV;
+ if (strncmp(node->name, "i2s-", 4))
+ return -ENODEV;
+ cell = node->name[4] - 'a';
+ switch(cell) {
+ case 0:
+ case 1:
+ break;
+ case 2:
+ if (macio->type == macio_shasta)
+ break;
+ default:
+ return -ENODEV;
+ }
LOCK(flags);
- MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE |
- KL3_I2S0_CLK18_ENABLE);
- udelay(10);
- MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE |
- K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE);
+ if (value) {
+ MACIO_BIC(KEYLARGO_FCR0, fcrs[cell][0]);
+ MACIO_BIS(KEYLARGO_FCR1, fcrs[cell][1]);
+ MACIO_BIS(KEYLARGO_FCR3, fcrs[cell][2]);
+ } else {
+ MACIO_BIC(KEYLARGO_FCR3, fcrs[cell][2]);
+ MACIO_BIC(KEYLARGO_FCR1, fcrs[cell][1]);
+ MACIO_BIS(KEYLARGO_FCR0, fcrs[cell][0]);
+ }
udelay(10);
- MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
UNLOCK(flags);
- udelay(10);
return 0;
}
@@ -1473,7 +1503,7 @@ static long g5_reset_cpu(struct device_node *node, long param, long value)
struct device_node *np;
macio = &macio_chips[0];
- if (macio->type != macio_keylargo2)
+ if (macio->type != macio_keylargo2 && macio->type != macio_shasta)
return -ENODEV;
np = find_path_device("/cpus");
@@ -1512,14 +1542,17 @@ static long g5_reset_cpu(struct device_node *node, long param, long value)
*/
void g5_phy_disable_cpu1(void)
{
- UN_OUT(U3_API_PHY_CONFIG_1, 0);
+ if (uninorth_maj == 3)
+ UN_OUT(U3_API_PHY_CONFIG_1, 0);
}
#endif /* CONFIG_POWER4 */
#ifndef CONFIG_POWER4
-static void
-keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
+
+#ifdef CONFIG_PM
+
+static void keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
{
u32 temp;
@@ -1572,8 +1605,7 @@ keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
(void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
}
-static void
-pangea_shutdown(struct macio_chip *macio, int sleep_mode)
+static void pangea_shutdown(struct macio_chip *macio, int sleep_mode)
{
u32 temp;
@@ -1606,8 +1638,7 @@ pangea_shutdown(struct macio_chip *macio, int sleep_mode)
(void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
}
-static void
-intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
+static void intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
{
u32 temp;
@@ -1635,124 +1666,6 @@ intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
}
-void pmac_tweak_clock_spreading(int enable)
-{
- struct macio_chip *macio = &macio_chips[0];
-
- /* Hack for doing clock spreading on some machines PowerBooks and
- * iBooks. This implements the "platform-do-clockspreading" OF
- * property as decoded manually on various models. For safety, we also
- * check the product ID in the device-tree in cases we'll whack the i2c
- * chip to make reasonably sure we won't set wrong values in there
- *
- * Of course, ultimately, we have to implement a real parser for
- * the platform-do-* stuff...
- */
-
- if (macio->type == macio_intrepid) {
- struct device_node *clock =
- of_find_node_by_path("/uni-n@f8000000/hw-clock");
- if (clock && get_property(clock, "platform-do-clockspreading",
- NULL)) {
- printk(KERN_INFO "%sabling clock spreading on Intrepid"
- " ASIC\n", enable ? "En" : "Dis");
- if (enable)
- UN_OUT(UNI_N_CLOCK_SPREADING, 2);
- else
- UN_OUT(UNI_N_CLOCK_SPREADING, 0);
- mdelay(40);
- }
- of_node_put(clock);
- }
-
- while (machine_is_compatible("PowerBook5,2") ||
- machine_is_compatible("PowerBook5,3") ||
- machine_is_compatible("PowerBook6,2") ||
- machine_is_compatible("PowerBook6,3")) {
- struct device_node *ui2c = of_find_node_by_type(NULL, "i2c");
- struct device_node *dt = of_find_node_by_name(NULL, "device-tree");
- u8 buffer[9];
- u32 *productID;
- int i, rc, changed = 0;
-
- if (dt == NULL)
- break;
- productID = (u32 *)get_property(dt, "pid#", NULL);
- if (productID == NULL)
- break;
- while(ui2c) {
- struct device_node *p = of_get_parent(ui2c);
- if (p && !strcmp(p->name, "uni-n"))
- break;
- ui2c = of_find_node_by_type(ui2c, "i2c");
- }
- if (ui2c == NULL)
- break;
- DBG("Trying to bump clock speed for PID: %08x...\n", *productID);
- rc = pmac_low_i2c_open(ui2c, 1);
- if (rc != 0)
- break;
- pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
- rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
- DBG("read result: %d,", rc);
- if (rc != 0) {
- pmac_low_i2c_close(ui2c);
- break;
- }
- for (i=0; i<9; i++)
- DBG(" %02x", buffer[i]);
- DBG("\n");
-
- switch(*productID) {
- case 0x1182: /* AlBook 12" rev 2 */
- case 0x1183: /* iBook G4 12" */
- buffer[0] = (buffer[0] & 0x8f) | 0x70;
- buffer[2] = (buffer[2] & 0x7f) | 0x00;
- buffer[5] = (buffer[5] & 0x80) | 0x31;
- buffer[6] = (buffer[6] & 0x40) | 0xb0;
- buffer[7] = (buffer[7] & 0x00) | (enable ? 0xc0 : 0xba);
- buffer[8] = (buffer[8] & 0x00) | 0x30;
- changed = 1;
- break;
- case 0x3142: /* AlBook 15" (ATI M10) */
- case 0x3143: /* AlBook 17" (ATI M10) */
- buffer[0] = (buffer[0] & 0xaf) | 0x50;
- buffer[2] = (buffer[2] & 0x7f) | 0x00;
- buffer[5] = (buffer[5] & 0x80) | 0x31;
- buffer[6] = (buffer[6] & 0x40) | 0xb0;
- buffer[7] = (buffer[7] & 0x00) | (enable ? 0xd0 : 0xc0);
- buffer[8] = (buffer[8] & 0x00) | 0x30;
- changed = 1;
- break;
- default:
- DBG("i2c-hwclock: Machine model not handled\n");
- break;
- }
- if (!changed) {
- pmac_low_i2c_close(ui2c);
- break;
- }
- printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n",
- enable ? "En" : "Dis");
-
- pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
- rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
- DBG("write result: %d,", rc);
- pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
- rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
- DBG("read result: %d,", rc);
- if (rc != 0) {
- pmac_low_i2c_close(ui2c);
- break;
- }
- for (i=0; i<9; i++)
- DBG(" %02x", buffer[i]);
- pmac_low_i2c_close(ui2c);
- break;
- }
-}
-
-
static int
core99_sleep(void)
{
@@ -1909,6 +1822,8 @@ core99_wake_up(void)
return 0;
}
+#endif /* CONFIG_PM */
+
static long
core99_sleep_state(struct device_node *node, long param, long value)
{
@@ -1930,10 +1845,13 @@ core99_sleep_state(struct device_node *node, long param, long value)
if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
return -EPERM;
+#ifdef CONFIG_PM
if (value == 1)
return core99_sleep();
else if (value == 0)
return core99_wake_up();
+
+#endif /* CONFIG_PM */
return 0;
}
@@ -2057,7 +1975,9 @@ static struct feature_table_entry core99_features[] = {
{ PMAC_FTR_USB_ENABLE, core99_usb_enable },
{ PMAC_FTR_1394_ENABLE, core99_firewire_enable },
{ PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
+#ifdef CONFIG_PM
{ PMAC_FTR_SLEEP_STATE, core99_sleep_state },
+#endif
#ifdef CONFIG_SMP
{ PMAC_FTR_RESET_CPU, core99_reset_cpu },
#endif /* CONFIG_SMP */
@@ -2427,6 +2347,14 @@ static struct pmac_mb_def pmac_mb_defs[] = {
PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
0,
},
+ { "PowerMac11,2", "PowerMac G5 Dual Core",
+ PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
+ 0,
+ },
+ { "PowerMac12,1", "iMac G5 (iSight)",
+ PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
+ 0,
+ },
{ "RackMac3,1", "XServe G5",
PMAC_TYPE_XSERVE_G5, g5_features,
0,
@@ -2539,6 +2467,11 @@ static int __init probe_motherboard(void)
pmac_mb.model_name = "Unknown K2-based";
pmac_mb.features = g5_features;
break;
+ case macio_shasta:
+ pmac_mb.model_id = PMAC_TYPE_UNKNOWN_SHASTA;
+ pmac_mb.model_name = "Unknown Shasta-based";
+ pmac_mb.features = g5_features;
+ break;
#endif /* CONFIG_POWER4 */
default:
return -ENODEV;
@@ -2607,6 +2540,8 @@ found:
*/
static void __init probe_uninorth(void)
{
+ u32 *addrp;
+ phys_addr_t address;
unsigned long actrl;
/* Locate core99 Uni-N */
@@ -2614,22 +2549,31 @@ static void __init probe_uninorth(void)
/* Locate G5 u3 */
if (uninorth_node == NULL) {
uninorth_node = of_find_node_by_name(NULL, "u3");
- uninorth_u3 = 1;
- }
- if (uninorth_node && uninorth_node->n_addrs > 0) {
- unsigned long address = uninorth_node->addrs[0].address;
- uninorth_base = ioremap(address, 0x40000);
- uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
- if (uninorth_u3)
- u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
- } else
- uninorth_node = NULL;
-
- if (!uninorth_node)
+ uninorth_maj = 3;
+ }
+ /* Locate G5 u4 */
+ if (uninorth_node == NULL) {
+ uninorth_node = of_find_node_by_name(NULL, "u4");
+ uninorth_maj = 4;
+ }
+ if (uninorth_node == NULL)
return;
- printk(KERN_INFO "Found %s memory controller & host bridge, revision: %d\n",
- uninorth_u3 ? "U3" : "UniNorth", uninorth_rev);
+ addrp = (u32 *)get_property(uninorth_node, "reg", NULL);
+ if (addrp == NULL)
+ return;
+ address = of_translate_address(uninorth_node, addrp);
+ if (address == 0)
+ return;
+ uninorth_base = ioremap(address, 0x40000);
+ uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
+ if (uninorth_maj == 3 || uninorth_maj == 4)
+ u3_ht_base = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
+
+ printk(KERN_INFO "Found %s memory controller & host bridge"
+ " @ 0x%08x revision: 0x%02x\n", uninorth_maj == 3 ? "U3" :
+ uninorth_maj == 4 ? "U4" : "UniNorth",
+ (unsigned int)address, uninorth_rev);
printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
/* Set the arbitrer QAck delay according to what Apple does
@@ -2637,7 +2581,8 @@ static void __init probe_uninorth(void)
if (uninorth_rev < 0x11) {
actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK;
actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 :
- UNI_N_ARB_CTRL_QACK_DELAY) << UNI_N_ARB_CTRL_QACK_DELAY_SHIFT;
+ UNI_N_ARB_CTRL_QACK_DELAY) <<
+ UNI_N_ARB_CTRL_QACK_DELAY_SHIFT;
UN_OUT(UNI_N_ARB_CTRL, actrl);
}
@@ -2645,7 +2590,8 @@ static void __init probe_uninorth(void)
* revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI
* memory timeout
*/
- if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || uninorth_rev == 0xc0)
+ if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) ||
+ uninorth_rev == 0xc0)
UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff);
}
@@ -2653,18 +2599,17 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ
{
struct device_node* node;
int i;
- volatile u32 __iomem * base;
- u32* revp;
+ volatile u32 __iomem *base;
+ u32 *addrp, *revp;
+ phys_addr_t addr;
+ u64 size;
- node = find_devices(name);
- if (!node || !node->n_addrs)
- return;
- if (compat)
- do {
- if (device_is_compatible(node, compat))
- break;
- node = node->next;
- } while (node);
+ for (node = NULL; (node = of_find_node_by_name(node, name)) != NULL;) {
+ if (!compat)
+ break;
+ if (device_is_compatible(node, compat))
+ break;
+ }
if (!node)
return;
for(i=0; i<MAX_MACIO_CHIPS; i++) {
@@ -2673,22 +2618,38 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ
if (macio_chips[i].of_node == node)
return;
}
+
if (i >= MAX_MACIO_CHIPS) {
printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
return;
}
- base = ioremap(node->addrs[0].address, node->addrs[0].size);
+ addrp = of_get_pci_address(node, 0, &size, NULL);
+ if (addrp == NULL) {
+ printk(KERN_ERR "pmac_feature: %s: can't find base !\n",
+ node->full_name);
+ return;
+ }
+ addr = of_translate_address(node, addrp);
+ if (addr == 0) {
+ printk(KERN_ERR "pmac_feature: %s, can't translate base !\n",
+ node->full_name);
+ return;
+ }
+ base = ioremap(addr, (unsigned long)size);
if (!base) {
- printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n");
+ printk(KERN_ERR "pmac_feature: %s, can't map mac-io chip !\n",
+ node->full_name);
return;
}
- if (type == macio_keylargo) {
+ if (type == macio_keylargo || type == macio_keylargo2) {
u32 *did = (u32 *)get_property(node, "device-id", NULL);
if (*did == 0x00000025)
type = macio_pangea;
if (*did == 0x0000003e)
type = macio_intrepid;
+ if (*did == 0x0000004f)
+ type = macio_shasta;
}
macio_chips[i].of_node = node;
macio_chips[i].type = type;
@@ -2787,7 +2748,8 @@ set_initial_features(void)
}
#ifdef CONFIG_POWER4
- if (macio_chips[0].type == macio_keylargo2) {
+ if (macio_chips[0].type == macio_keylargo2 ||
+ macio_chips[0].type == macio_shasta) {
#ifndef CONFIG_SMP
/* On SMP machines running UP, we have the second CPU eating
* bus cycles. We need to take it off the bus. This is done
@@ -2896,12 +2858,6 @@ set_initial_features(void)
MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
}
- /* Some machine models need the clock chip to be properly setup for
- * clock spreading now. This should be a platform function but we
- * don't do these at the moment
- */
- pmac_tweak_clock_spreading(1);
-
#endif /* CONFIG_POWER4 */
/* On all machines, switch modem & serial ports off */
@@ -2929,9 +2885,6 @@ pmac_feature_init(void)
return;
}
- /* Setup low-level i2c stuffs */
- pmac_init_low_i2c();
-
/* Probe machine type */
if (probe_motherboard())
printk(KERN_WARNING "Unknown PowerMac !\n");
@@ -2942,26 +2895,6 @@ pmac_feature_init(void)
set_initial_features();
}
-int __init pmac_feature_late_init(void)
-{
-#if 0
- struct device_node *np;
-
- /* Request some resources late */
- if (uninorth_node)
- request_OF_resource(uninorth_node, 0, NULL);
- np = find_devices("hammerhead");
- if (np)
- request_OF_resource(np, 0, NULL);
- np = find_devices("interrupt-controller");
- if (np)
- request_OF_resource(np, 0, NULL);
-#endif
- return 0;
-}
-
-device_initcall(pmac_feature_late_init);
-
#if 0
static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
{
@@ -2984,9 +2917,9 @@ void __init pmac_check_ht_link(void)
u8 px_bus, px_devfn;
struct pci_controller *px_hose;
- (void)in_be32(u3_ht + U3_HT_LINK_COMMAND);
- ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG);
- ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ);
+ (void)in_be32(u3_ht_base + U3_HT_LINK_COMMAND);
+ ucfg = cfg = in_be32(u3_ht_base + U3_HT_LINK_CONFIG);
+ ufreq = freq = in_be32(u3_ht_base + U3_HT_LINK_FREQ);
dump_HT_speeds("U3 HyperTransport", cfg, freq);
pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index f3f39e8e337..535c802b369 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -1,22 +1,34 @@
/*
- * arch/ppc/platforms/pmac_low_i2c.c
+ * arch/powerpc/platforms/powermac/low_i2c.c
*
- * Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ * Copyright (C) 2003-2005 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * This file contains some low-level i2c access routines that
- * need to be used by various bits of the PowerMac platform code
- * at times where the real asynchronous & interrupt driven driver
- * cannot be used. The API borrows some semantics from the darwin
- * driver in order to ease the implementation of the platform
- * properties parser
+ * The linux i2c layer isn't completely suitable for our needs for various
+ * reasons ranging from too late initialisation to semantics not perfectly
+ * matching some requirements of the apple platform functions etc...
+ *
+ * This file thus provides a simple low level unified i2c interface for
+ * powermac that covers the various types of i2c busses used in Apple machines.
+ * For now, keywest, PMU and SMU, though we could add Cuda, or other bit
+ * banging busses found on older chipstes in earlier machines if we ever need
+ * one of them.
+ *
+ * The drivers in this file are synchronous/blocking. In addition, the
+ * keywest one is fairly slow due to the use of msleep instead of interrupts
+ * as the interrupt is currently used by i2c-keywest. In the long run, we
+ * might want to get rid of those high-level interfaces to linux i2c layer
+ * either completely (converting all drivers) or replacing them all with a
+ * single stub driver on top of this one. Once done, the interrupt will be
+ * available for our use.
*/
#undef DEBUG
+#undef DEBUG_LOW
#include <linux/config.h>
#include <linux/types.h>
@@ -25,66 +37,91 @@
#include <linux/module.h>
#include <linux/adb.h>
#include <linux/pmu.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/timer.h>
#include <asm/keylargo.h>
#include <asm/uninorth.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/machdep.h>
+#include <asm/smu.h>
+#include <asm/pmac_pfunc.h>
#include <asm/pmac_low_i2c.h>
-#define MAX_LOW_I2C_HOST 4
-
#ifdef DEBUG
#define DBG(x...) do {\
- printk(KERN_DEBUG "KW:" x); \
+ printk(KERN_DEBUG "low_i2c:" x); \
} while(0)
#else
#define DBG(x...)
#endif
-struct low_i2c_host;
-
-typedef int (*low_i2c_func_t)(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len);
+#ifdef DEBUG_LOW
+#define DBG_LOW(x...) do {\
+ printk(KERN_DEBUG "low_i2c:" x); \
+ } while(0)
+#else
+#define DBG_LOW(x...)
+#endif
-struct low_i2c_host
-{
- struct device_node *np; /* OF device node */
- struct semaphore mutex; /* Access mutex for use by i2c-keywest */
- low_i2c_func_t func; /* Access function */
- unsigned int is_open : 1; /* Poor man's access control */
- int mode; /* Current mode */
- int channel; /* Current channel */
- int num_channels; /* Number of channels */
- void __iomem *base; /* For keywest-i2c, base address */
- int bsteps; /* And register stepping */
- int speed; /* And speed */
-};
-static struct low_i2c_host low_i2c_hosts[MAX_LOW_I2C_HOST];
+static int pmac_i2c_force_poll = 1;
-/* No locking is necessary on allocation, we are running way before
- * anything can race with us
+/*
+ * A bus structure. Each bus in the system has such a structure associated.
*/
-static struct low_i2c_host *find_low_i2c_host(struct device_node *np)
+struct pmac_i2c_bus
{
- int i;
+ struct list_head link;
+ struct device_node *controller;
+ struct device_node *busnode;
+ int type;
+ int flags;
+ struct i2c_adapter *adapter;
+ void *hostdata;
+ int channel; /* some hosts have multiple */
+ int mode; /* current mode */
+ struct semaphore sem;
+ int opened;
+ int polled; /* open mode */
+ struct platform_device *platform_dev;
+
+ /* ops */
+ int (*open)(struct pmac_i2c_bus *bus);
+ void (*close)(struct pmac_i2c_bus *bus);
+ int (*xfer)(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len);
+};
- for (i = 0; i < MAX_LOW_I2C_HOST; i++)
- if (low_i2c_hosts[i].np == np)
- return &low_i2c_hosts[i];
- return NULL;
-}
+static LIST_HEAD(pmac_i2c_busses);
/*
- *
- * i2c-keywest implementation (UniNorth, U2, U3, Keylargo's)
- *
+ * Keywest implementation
*/
-/*
- * Keywest i2c definitions borrowed from drivers/i2c/i2c-keywest.h,
- * should be moved somewhere in include/asm-ppc/
- */
+struct pmac_i2c_host_kw
+{
+ struct semaphore mutex; /* Access mutex for use by
+ * i2c-keywest */
+ void __iomem *base; /* register base address */
+ int bsteps; /* register stepping */
+ int speed; /* speed */
+ int irq;
+ u8 *data;
+ unsigned len;
+ int state;
+ int rw;
+ int polled;
+ int result;
+ struct completion complete;
+ spinlock_t lock;
+ struct timer_list timeout_timer;
+};
+
/* Register indices */
typedef enum {
reg_mode = 0,
@@ -97,6 +134,8 @@ typedef enum {
reg_data
} reg_t;
+/* The Tumbler audio equalizer can be really slow sometimes */
+#define KW_POLL_TIMEOUT (2*HZ)
/* Mode register */
#define KW_I2C_MODE_100KHZ 0x00
@@ -140,8 +179,9 @@ enum {
};
#define WRONG_STATE(name) do {\
- printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s (isr: %02x)\n", \
- name, __kw_state_names[state], isr); \
+ printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s " \
+ "(isr: %02x)\n", \
+ name, __kw_state_names[host->state], isr); \
} while(0)
static const char *__kw_state_names[] = {
@@ -153,120 +193,137 @@ static const char *__kw_state_names[] = {
"state_dead"
};
-static inline u8 __kw_read_reg(struct low_i2c_host *host, reg_t reg)
+static inline u8 __kw_read_reg(struct pmac_i2c_host_kw *host, reg_t reg)
{
return readb(host->base + (((unsigned int)reg) << host->bsteps));
}
-static inline void __kw_write_reg(struct low_i2c_host *host, reg_t reg, u8 val)
+static inline void __kw_write_reg(struct pmac_i2c_host_kw *host,
+ reg_t reg, u8 val)
{
writeb(val, host->base + (((unsigned)reg) << host->bsteps));
(void)__kw_read_reg(host, reg_subaddr);
}
-#define kw_write_reg(reg, val) __kw_write_reg(host, reg, val)
-#define kw_read_reg(reg) __kw_read_reg(host, reg)
-
+#define kw_write_reg(reg, val) __kw_write_reg(host, reg, val)
+#define kw_read_reg(reg) __kw_read_reg(host, reg)
-/* Don't schedule, the g5 fan controller is too
- * timing sensitive
- */
-static u8 kw_wait_interrupt(struct low_i2c_host* host)
+static u8 kw_i2c_wait_interrupt(struct pmac_i2c_host_kw *host)
{
int i, j;
u8 isr;
- for (i = 0; i < 100000; i++) {
+ for (i = 0; i < 1000; i++) {
isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK;
if (isr != 0)
return isr;
/* This code is used with the timebase frozen, we cannot rely
- * on udelay ! For now, just use a bogus loop
+ * on udelay nor schedule when in polled mode !
+ * For now, just use a bogus loop....
*/
- for (j = 1; j < 10000; j++)
- mb();
+ if (host->polled) {
+ for (j = 1; j < 100000; j++)
+ mb();
+ } else
+ msleep(1);
}
return isr;
}
-static int kw_handle_interrupt(struct low_i2c_host *host, int state, int rw, int *rc, u8 **data, int *len, u8 isr)
+static void kw_i2c_handle_interrupt(struct pmac_i2c_host_kw *host, u8 isr)
{
u8 ack;
- DBG("kw_handle_interrupt(%s, isr: %x)\n", __kw_state_names[state], isr);
+ DBG_LOW("kw_handle_interrupt(%s, isr: %x)\n",
+ __kw_state_names[host->state], isr);
+
+ if (host->state == state_idle) {
+ printk(KERN_WARNING "low_i2c: Keywest got an out of state"
+ " interrupt, ignoring\n");
+ kw_write_reg(reg_isr, isr);
+ return;
+ }
if (isr == 0) {
- if (state != state_stop) {
- DBG("KW: Timeout !\n");
- *rc = -EIO;
+ if (host->state != state_stop) {
+ DBG_LOW("KW: Timeout !\n");
+ host->result = -EIO;
goto stop;
}
- if (state == state_stop) {
+ if (host->state == state_stop) {
ack = kw_read_reg(reg_status);
- if (!(ack & KW_I2C_STAT_BUSY)) {
- state = state_idle;
- kw_write_reg(reg_ier, 0x00);
- }
+ if (ack & KW_I2C_STAT_BUSY)
+ kw_write_reg(reg_status, 0);
+ host->state = state_idle;
+ kw_write_reg(reg_ier, 0x00);
+ if (!host->polled)
+ complete(&host->complete);
}
- return state;
+ return;
}
if (isr & KW_I2C_IRQ_ADDR) {
ack = kw_read_reg(reg_status);
- if (state != state_addr) {
+ if (host->state != state_addr) {
kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
WRONG_STATE("KW_I2C_IRQ_ADDR");
- *rc = -EIO;
+ host->result = -EIO;
goto stop;
}
- if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
- *rc = -ENODEV;
- DBG("KW: NAK on address\n");
- return state_stop;
+ if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
+ host->result = -ENODEV;
+ DBG_LOW("KW: NAK on address\n");
+ host->state = state_stop;
+ return;
} else {
- if (rw) {
- state = state_read;
- if (*len > 1)
- kw_write_reg(reg_control, KW_I2C_CTL_AAK);
+ if (host->len == 0) {
+ kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
+ goto stop;
+ }
+ if (host->rw) {
+ host->state = state_read;
+ if (host->len > 1)
+ kw_write_reg(reg_control,
+ KW_I2C_CTL_AAK);
} else {
- state = state_write;
- kw_write_reg(reg_data, **data);
- (*data)++; (*len)--;
+ host->state = state_write;
+ kw_write_reg(reg_data, *(host->data++));
+ host->len--;
}
}
kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
}
if (isr & KW_I2C_IRQ_DATA) {
- if (state == state_read) {
- **data = kw_read_reg(reg_data);
- (*data)++; (*len)--;
+ if (host->state == state_read) {
+ *(host->data++) = kw_read_reg(reg_data);
+ host->len--;
kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
- if ((*len) == 0)
- state = state_stop;
- else if ((*len) == 1)
+ if (host->len == 0)
+ host->state = state_stop;
+ else if (host->len == 1)
kw_write_reg(reg_control, 0);
- } else if (state == state_write) {
+ } else if (host->state == state_write) {
ack = kw_read_reg(reg_status);
if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
- DBG("KW: nack on data write\n");
- *rc = -EIO;
+ DBG_LOW("KW: nack on data write\n");
+ host->result = -EIO;
goto stop;
- } else if (*len) {
- kw_write_reg(reg_data, **data);
- (*data)++; (*len)--;
+ } else if (host->len) {
+ kw_write_reg(reg_data, *(host->data++));
+ host->len--;
} else {
kw_write_reg(reg_control, KW_I2C_CTL_STOP);
- state = state_stop;
- *rc = 0;
+ host->state = state_stop;
+ host->result = 0;
}
kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
} else {
kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
WRONG_STATE("KW_I2C_IRQ_DATA");
- if (state != state_stop) {
- *rc = -EIO;
+ if (host->state != state_stop) {
+ host->result = -EIO;
goto stop;
}
}
@@ -274,98 +331,194 @@ static int kw_handle_interrupt(struct low_i2c_host *host, int state, int rw, int
if (isr & KW_I2C_IRQ_STOP) {
kw_write_reg(reg_isr, KW_I2C_IRQ_STOP);
- if (state != state_stop) {
+ if (host->state != state_stop) {
WRONG_STATE("KW_I2C_IRQ_STOP");
- *rc = -EIO;
+ host->result = -EIO;
}
- return state_idle;
+ host->state = state_idle;
+ if (!host->polled)
+ complete(&host->complete);
}
if (isr & KW_I2C_IRQ_START)
kw_write_reg(reg_isr, KW_I2C_IRQ_START);
- return state;
-
+ return;
stop:
kw_write_reg(reg_control, KW_I2C_CTL_STOP);
- return state_stop;
+ host->state = state_stop;
+ return;
}
-static int keywest_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 subaddr, u8 *data, int len)
+/* Interrupt handler */
+static irqreturn_t kw_i2c_irq(int irq, void *dev_id, struct pt_regs *regs)
{
+ struct pmac_i2c_host_kw *host = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ del_timer(&host->timeout_timer);
+ kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
+ if (host->state != state_idle) {
+ host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
+ add_timer(&host->timeout_timer);
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void kw_i2c_timeout(unsigned long data)
+{
+ struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
+ if (host->state != state_idle) {
+ host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
+ add_timer(&host->timeout_timer);
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int kw_i2c_open(struct pmac_i2c_bus *bus)
+{
+ struct pmac_i2c_host_kw *host = bus->hostdata;
+ down(&host->mutex);
+ return 0;
+}
+
+static void kw_i2c_close(struct pmac_i2c_bus *bus)
+{
+ struct pmac_i2c_host_kw *host = bus->hostdata;
+ up(&host->mutex);
+}
+
+static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len)
+{
+ struct pmac_i2c_host_kw *host = bus->hostdata;
u8 mode_reg = host->speed;
- int state = state_addr;
- int rc = 0;
+ int use_irq = host->irq != NO_IRQ && !bus->polled;
/* Setup mode & subaddress if any */
- switch(host->mode) {
- case pmac_low_i2c_mode_dumb:
- printk(KERN_ERR "low_i2c: Dumb mode not supported !\n");
+ switch(bus->mode) {
+ case pmac_i2c_mode_dumb:
return -EINVAL;
- case pmac_low_i2c_mode_std:
+ case pmac_i2c_mode_std:
mode_reg |= KW_I2C_MODE_STANDARD;
+ if (subsize != 0)
+ return -EINVAL;
break;
- case pmac_low_i2c_mode_stdsub:
+ case pmac_i2c_mode_stdsub:
mode_reg |= KW_I2C_MODE_STANDARDSUB;
+ if (subsize != 1)
+ return -EINVAL;
break;
- case pmac_low_i2c_mode_combined:
+ case pmac_i2c_mode_combined:
mode_reg |= KW_I2C_MODE_COMBINED;
+ if (subsize != 1)
+ return -EINVAL;
break;
}
/* Setup channel & clear pending irqs */
kw_write_reg(reg_isr, kw_read_reg(reg_isr));
- kw_write_reg(reg_mode, mode_reg | (host->channel << 4));
+ kw_write_reg(reg_mode, mode_reg | (bus->channel << 4));
kw_write_reg(reg_status, 0);
- /* Set up address and r/w bit */
- kw_write_reg(reg_addr, addr);
+ /* Set up address and r/w bit, strip possible stale bus number from
+ * address top bits
+ */
+ kw_write_reg(reg_addr, addrdir & 0xff);
/* Set up the sub address */
if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB
|| (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED)
kw_write_reg(reg_subaddr, subaddr);
- /* Start sending address & disable interrupt*/
- kw_write_reg(reg_ier, 0 /*KW_I2C_IRQ_MASK*/);
+ /* Prepare for async operations */
+ host->data = data;
+ host->len = len;
+ host->state = state_addr;
+ host->result = 0;
+ host->rw = (addrdir & 1);
+ host->polled = bus->polled;
+
+ /* Enable interrupt if not using polled mode and interrupt is
+ * available
+ */
+ if (use_irq) {
+ /* Clear completion */
+ INIT_COMPLETION(host->complete);
+ /* Ack stale interrupts */
+ kw_write_reg(reg_isr, kw_read_reg(reg_isr));
+ /* Arm timeout */
+ host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
+ add_timer(&host->timeout_timer);
+ /* Enable emission */
+ kw_write_reg(reg_ier, KW_I2C_IRQ_MASK);
+ }
+
+ /* Start sending address */
kw_write_reg(reg_control, KW_I2C_CTL_XADDR);
- /* State machine, to turn into an interrupt handler */
- while(state != state_idle) {
- u8 isr = kw_wait_interrupt(host);
- state = kw_handle_interrupt(host, state, addr & 1, &rc, &data, &len, isr);
+ /* Wait for completion */
+ if (use_irq)
+ wait_for_completion(&host->complete);
+ else {
+ while(host->state != state_idle) {
+ unsigned long flags;
+
+ u8 isr = kw_i2c_wait_interrupt(host);
+ spin_lock_irqsave(&host->lock, flags);
+ kw_i2c_handle_interrupt(host, isr);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
}
- return rc;
+ /* Disable emission */
+ kw_write_reg(reg_ier, 0);
+
+ return host->result;
}
-static void keywest_low_i2c_add(struct device_node *np)
+static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
{
- struct low_i2c_host *host = find_low_i2c_host(NULL);
- u32 *psteps, *prate, steps, aoffset = 0;
- struct device_node *parent;
+ struct pmac_i2c_host_kw *host;
+ u32 *psteps, *prate, *addrp, steps;
+ host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL);
if (host == NULL) {
printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
np->full_name);
- return;
+ return NULL;
}
- memset(host, 0, sizeof(*host));
+ /* Apple is kind enough to provide a valid AAPL,address property
+ * on all i2c keywest nodes so far ... we would have to fallback
+ * to macio parsing if that wasn't the case
+ */
+ addrp = (u32 *)get_property(np, "AAPL,address", NULL);
+ if (addrp == NULL) {
+ printk(KERN_ERR "low_i2c: Can't find address for %s\n",
+ np->full_name);
+ kfree(host);
+ return NULL;
+ }
init_MUTEX(&host->mutex);
- host->np = of_node_get(np);
+ init_completion(&host->complete);
+ spin_lock_init(&host->lock);
+ init_timer(&host->timeout_timer);
+ host->timeout_timer.function = kw_i2c_timeout;
+ host->timeout_timer.data = (unsigned long)host;
+
psteps = (u32 *)get_property(np, "AAPL,address-step", NULL);
steps = psteps ? (*psteps) : 0x10;
for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++)
steps >>= 1;
- parent = of_get_parent(np);
- host->num_channels = 1;
- if (parent && parent->name[0] == 'u') {
- host->num_channels = 2;
- aoffset = 3;
- }
/* Select interface rate */
- host->speed = KW_I2C_MODE_100KHZ;
+ host->speed = KW_I2C_MODE_25KHZ;
prate = (u32 *)get_property(np, "AAPL,i2c-rate", NULL);
if (prate) switch(*prate) {
case 100:
@@ -378,146 +531,981 @@ static void keywest_low_i2c_add(struct device_node *np)
host->speed = KW_I2C_MODE_25KHZ;
break;
}
+ if (np->n_intrs > 0)
+ host->irq = np->intrs[0].line;
+ else
+ host->irq = NO_IRQ;
+
+ host->base = ioremap((*addrp), 0x1000);
+ if (host->base == NULL) {
+ printk(KERN_ERR "low_i2c: Can't map registers for %s\n",
+ np->full_name);
+ kfree(host);
+ return NULL;
+ }
+
+ /* Make sure IRA is disabled */
+ kw_write_reg(reg_ier, 0);
+
+ /* Request chip interrupt */
+ if (request_irq(host->irq, kw_i2c_irq, SA_SHIRQ, "keywest i2c", host))
+ host->irq = NO_IRQ;
+
+ printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",
+ *addrp, host->irq, np->full_name);
- host->mode = pmac_low_i2c_mode_std;
- host->base = ioremap(np->addrs[0].address + aoffset,
- np->addrs[0].size);
- host->func = keywest_low_i2c_func;
+ return host;
}
+
+static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
+ struct device_node *controller,
+ struct device_node *busnode,
+ int channel)
+{
+ struct pmac_i2c_bus *bus;
+
+ bus = kzalloc(sizeof(struct pmac_i2c_bus), GFP_KERNEL);
+ if (bus == NULL)
+ return;
+
+ bus->controller = of_node_get(controller);
+ bus->busnode = of_node_get(busnode);
+ bus->type = pmac_i2c_bus_keywest;
+ bus->hostdata = host;
+ bus->channel = channel;
+ bus->mode = pmac_i2c_mode_std;
+ bus->open = kw_i2c_open;
+ bus->close = kw_i2c_close;
+ bus->xfer = kw_i2c_xfer;
+ init_MUTEX(&bus->sem);
+ if (controller == busnode)
+ bus->flags = pmac_i2c_multibus;
+ list_add(&bus->link, &pmac_i2c_busses);
+
+ printk(KERN_INFO " channel %d bus %s\n", channel,
+ (controller == busnode) ? "<multibus>" : busnode->full_name);
+}
+
+static void __init kw_i2c_probe(void)
+{
+ struct device_node *np, *child, *parent;
+
+ /* Probe keywest-i2c busses */
+ for (np = NULL;
+ (np = of_find_compatible_node(np, "i2c","keywest-i2c")) != NULL;){
+ struct pmac_i2c_host_kw *host;
+ int multibus, chans, i;
+
+ /* Found one, init a host structure */
+ host = kw_i2c_host_init(np);
+ if (host == NULL)
+ continue;
+
+ /* Now check if we have a multibus setup (old style) or if we
+ * have proper bus nodes. Note that the "new" way (proper bus
+ * nodes) might cause us to not create some busses that are
+ * kept hidden in the device-tree. In the future, we might
+ * want to work around that by creating busses without a node
+ * but not for now
+ */
+ child = of_get_next_child(np, NULL);
+ multibus = !child || strcmp(child->name, "i2c-bus");
+ of_node_put(child);
+
+ /* For a multibus setup, we get the bus count based on the
+ * parent type
+ */
+ if (multibus) {
+ parent = of_get_parent(np);
+ if (parent == NULL)
+ continue;
+ chans = parent->name[0] == 'u' ? 2 : 1;
+ for (i = 0; i < chans; i++)
+ kw_i2c_add(host, np, np, i);
+ } else {
+ for (child = NULL;
+ (child = of_get_next_child(np, child)) != NULL;) {
+ u32 *reg =
+ (u32 *)get_property(child, "reg", NULL);
+ if (reg == NULL)
+ continue;
+ kw_i2c_add(host, np, child, *reg);
+ }
+ }
+ }
+}
+
+
/*
*
* PMU implementation
*
*/
-
#ifdef CONFIG_ADB_PMU
-static int pmu_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len)
+/*
+ * i2c command block to the PMU
+ */
+struct pmu_i2c_hdr {
+ u8 bus;
+ u8 mode;
+ u8 bus2;
+ u8 address;
+ u8 sub_addr;
+ u8 comb_addr;
+ u8 count;
+ u8 data[];
+};
+
+static void pmu_i2c_complete(struct adb_request *req)
{
- // TODO
- return -ENODEV;
+ complete(req->arg);
}
-static void pmu_low_i2c_add(struct device_node *np)
+static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len)
{
- struct low_i2c_host *host = find_low_i2c_host(NULL);
+ struct adb_request *req = bus->hostdata;
+ struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req->data[1];
+ struct completion comp;
+ int read = addrdir & 1;
+ int retry;
+ int rc = 0;
- if (host == NULL) {
- printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
- np->full_name);
- return;
+ /* For now, limit ourselves to 16 bytes transfers */
+ if (len > 16)
+ return -EINVAL;
+
+ init_completion(&comp);
+
+ for (retry = 0; retry < 16; retry++) {
+ memset(req, 0, sizeof(struct adb_request));
+ hdr->bus = bus->channel;
+ hdr->count = len;
+
+ switch(bus->mode) {
+ case pmac_i2c_mode_std:
+ if (subsize != 0)
+ return -EINVAL;
+ hdr->address = addrdir;
+ hdr->mode = PMU_I2C_MODE_SIMPLE;
+ break;
+ case pmac_i2c_mode_stdsub:
+ case pmac_i2c_mode_combined:
+ if (subsize != 1)
+ return -EINVAL;
+ hdr->address = addrdir & 0xfe;
+ hdr->comb_addr = addrdir;
+ hdr->sub_addr = subaddr;
+ if (bus->mode == pmac_i2c_mode_stdsub)
+ hdr->mode = PMU_I2C_MODE_STDSUB;
+ else
+ hdr->mode = PMU_I2C_MODE_COMBINED;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ INIT_COMPLETION(comp);
+ req->data[0] = PMU_I2C_CMD;
+ req->reply[0] = 0xff;
+ req->nbytes = sizeof(struct pmu_i2c_hdr) + 1;
+ req->done = pmu_i2c_complete;
+ req->arg = &comp;
+ if (!read && len) {
+ memcpy(hdr->data, data, len);
+ req->nbytes += len;
+ }
+ rc = pmu_queue_request(req);
+ if (rc)
+ return rc;
+ wait_for_completion(&comp);
+ if (req->reply[0] == PMU_I2C_STATUS_OK)
+ break;
+ msleep(15);
}
- memset(host, 0, sizeof(*host));
+ if (req->reply[0] != PMU_I2C_STATUS_OK)
+ return -EIO;
- init_MUTEX(&host->mutex);
- host->np = of_node_get(np);
- host->num_channels = 3;
- host->mode = pmac_low_i2c_mode_std;
- host->func = pmu_low_i2c_func;
+ for (retry = 0; retry < 16; retry++) {
+ memset(req, 0, sizeof(struct adb_request));
+
+ /* I know that looks like a lot, slow as hell, but darwin
+ * does it so let's be on the safe side for now
+ */
+ msleep(15);
+
+ hdr->bus = PMU_I2C_BUS_STATUS;
+
+ INIT_COMPLETION(comp);
+ req->data[0] = PMU_I2C_CMD;
+ req->reply[0] = 0xff;
+ req->nbytes = 2;
+ req->done = pmu_i2c_complete;
+ req->arg = &comp;
+ rc = pmu_queue_request(req);
+ if (rc)
+ return rc;
+ wait_for_completion(&comp);
+
+ if (req->reply[0] == PMU_I2C_STATUS_OK && !read)
+ return 0;
+ if (req->reply[0] == PMU_I2C_STATUS_DATAREAD && read) {
+ int rlen = req->reply_len - 1;
+
+ if (rlen != len) {
+ printk(KERN_WARNING "low_i2c: PMU returned %d"
+ " bytes, expected %d !\n", rlen, len);
+ return -EIO;
+ }
+ if (len)
+ memcpy(data, &req->reply[1], len);
+ return 0;
+ }
+ }
+ return -EIO;
+}
+
+static void __init pmu_i2c_probe(void)
+{
+ struct pmac_i2c_bus *bus;
+ struct device_node *busnode;
+ int channel, sz;
+
+ if (!pmu_present())
+ return;
+
+ /* There might or might not be a "pmu-i2c" node, we use that
+ * or via-pmu itself, whatever we find. I haven't seen a machine
+ * with separate bus nodes, so we assume a multibus setup
+ */
+ busnode = of_find_node_by_name(NULL, "pmu-i2c");
+ if (busnode == NULL)
+ busnode = of_find_node_by_name(NULL, "via-pmu");
+ if (busnode == NULL)
+ return;
+
+ printk(KERN_INFO "PMU i2c %s\n", busnode->full_name);
+
+ /*
+ * We add bus 1 and 2 only for now, bus 0 is "special"
+ */
+ for (channel = 1; channel <= 2; channel++) {
+ sz = sizeof(struct pmac_i2c_bus) + sizeof(struct adb_request);
+ bus = kzalloc(sz, GFP_KERNEL);
+ if (bus == NULL)
+ return;
+
+ bus->controller = busnode;
+ bus->busnode = busnode;
+ bus->type = pmac_i2c_bus_pmu;
+ bus->channel = channel;
+ bus->mode = pmac_i2c_mode_std;
+ bus->hostdata = bus + 1;
+ bus->xfer = pmu_i2c_xfer;
+ init_MUTEX(&bus->sem);
+ bus->flags = pmac_i2c_multibus;
+ list_add(&bus->link, &pmac_i2c_busses);
+
+ printk(KERN_INFO " channel %d bus <multibus>\n", channel);
+ }
}
#endif /* CONFIG_ADB_PMU */
-void __init pmac_init_low_i2c(void)
+
+/*
+ *
+ * SMU implementation
+ *
+ */
+
+#ifdef CONFIG_PMAC_SMU
+
+static void smu_i2c_complete(struct smu_i2c_cmd *cmd, void *misc)
{
- struct device_node *np;
+ complete(misc);
+}
- /* Probe keywest-i2c busses */
- np = of_find_compatible_node(NULL, "i2c", "keywest-i2c");
- while(np) {
- keywest_low_i2c_add(np);
- np = of_find_compatible_node(np, "i2c", "keywest-i2c");
+static int smu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len)
+{
+ struct smu_i2c_cmd *cmd = bus->hostdata;
+ struct completion comp;
+ int read = addrdir & 1;
+ int rc = 0;
+
+ if ((read && len > SMU_I2C_READ_MAX) ||
+ ((!read) && len > SMU_I2C_WRITE_MAX))
+ return -EINVAL;
+
+ memset(cmd, 0, sizeof(struct smu_i2c_cmd));
+ cmd->info.bus = bus->channel;
+ cmd->info.devaddr = addrdir;
+ cmd->info.datalen = len;
+
+ switch(bus->mode) {
+ case pmac_i2c_mode_std:
+ if (subsize != 0)
+ return -EINVAL;
+ cmd->info.type = SMU_I2C_TRANSFER_SIMPLE;
+ break;
+ case pmac_i2c_mode_stdsub:
+ case pmac_i2c_mode_combined:
+ if (subsize > 3 || subsize < 1)
+ return -EINVAL;
+ cmd->info.sublen = subsize;
+ /* that's big-endian only but heh ! */
+ memcpy(&cmd->info.subaddr, ((char *)&subaddr) + (4 - subsize),
+ subsize);
+ if (bus->mode == pmac_i2c_mode_stdsub)
+ cmd->info.type = SMU_I2C_TRANSFER_STDSUB;
+ else
+ cmd->info.type = SMU_I2C_TRANSFER_COMBINED;
+ break;
+ default:
+ return -EINVAL;
}
+ if (!read && len)
+ memcpy(cmd->info.data, data, len);
+
+ init_completion(&comp);
+ cmd->done = smu_i2c_complete;
+ cmd->misc = &comp;
+ rc = smu_queue_i2c(cmd);
+ if (rc < 0)
+ return rc;
+ wait_for_completion(&comp);
+ rc = cmd->status;
+
+ if (read && len)
+ memcpy(data, cmd->info.data, len);
+ return rc < 0 ? rc : 0;
+}
+
+static void __init smu_i2c_probe(void)
+{
+ struct device_node *controller, *busnode;
+ struct pmac_i2c_bus *bus;
+ u32 *reg;
+ int sz;
+
+ if (!smu_present())
+ return;
+
+ controller = of_find_node_by_name(NULL, "smu-i2c-control");
+ if (controller == NULL)
+ controller = of_find_node_by_name(NULL, "smu");
+ if (controller == NULL)
+ return;
+
+ printk(KERN_INFO "SMU i2c %s\n", controller->full_name);
+
+ /* Look for childs, note that they might not be of the right
+ * type as older device trees mix i2c busses and other thigns
+ * at the same level
+ */
+ for (busnode = NULL;
+ (busnode = of_get_next_child(controller, busnode)) != NULL;) {
+ if (strcmp(busnode->type, "i2c") &&
+ strcmp(busnode->type, "i2c-bus"))
+ continue;
+ reg = (u32 *)get_property(busnode, "reg", NULL);
+ if (reg == NULL)
+ continue;
+
+ sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd);
+ bus = kzalloc(sz, GFP_KERNEL);
+ if (bus == NULL)
+ return;
+
+ bus->controller = controller;
+ bus->busnode = of_node_get(busnode);
+ bus->type = pmac_i2c_bus_smu;
+ bus->channel = *reg;
+ bus->mode = pmac_i2c_mode_std;
+ bus->hostdata = bus + 1;
+ bus->xfer = smu_i2c_xfer;
+ init_MUTEX(&bus->sem);
+ bus->flags = 0;
+ list_add(&bus->link, &pmac_i2c_busses);
+
+ printk(KERN_INFO " channel %x bus %s\n",
+ bus->channel, busnode->full_name);
+ }
+}
+
+#endif /* CONFIG_PMAC_SMU */
+
+/*
+ *
+ * Core code
+ *
+ */
-#ifdef CONFIG_ADB_PMU
- /* Probe PMU busses */
- np = of_find_node_by_name(NULL, "via-pmu");
- if (np)
- pmu_low_i2c_add(np);
-#endif /* CONFIG_ADB_PMU */
- /* TODO: Add CUDA support as well */
+struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node)
+{
+ struct device_node *p = of_node_get(node);
+ struct device_node *prev = NULL;
+ struct pmac_i2c_bus *bus;
+
+ while(p) {
+ list_for_each_entry(bus, &pmac_i2c_busses, link) {
+ if (p == bus->busnode) {
+ if (prev && bus->flags & pmac_i2c_multibus) {
+ u32 *reg;
+ reg = (u32 *)get_property(prev, "reg",
+ NULL);
+ if (!reg)
+ continue;
+ if (((*reg) >> 8) != bus->channel)
+ continue;
+ }
+ of_node_put(p);
+ of_node_put(prev);
+ return bus;
+ }
+ }
+ of_node_put(prev);
+ prev = p;
+ p = of_get_parent(p);
+ }
+ return NULL;
}
+EXPORT_SYMBOL_GPL(pmac_i2c_find_bus);
+
+u8 pmac_i2c_get_dev_addr(struct device_node *device)
+{
+ u32 *reg = (u32 *)get_property(device, "reg", NULL);
+
+ if (reg == NULL)
+ return 0;
+
+ return (*reg) & 0xff;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_dev_addr);
+
+struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus)
+{
+ return bus->controller;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_controller);
+
+struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus)
+{
+ return bus->busnode;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_bus_node);
+
+int pmac_i2c_get_type(struct pmac_i2c_bus *bus)
+{
+ return bus->type;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_type);
+
+int pmac_i2c_get_flags(struct pmac_i2c_bus *bus)
+{
+ return bus->flags;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_flags);
+
+int pmac_i2c_get_channel(struct pmac_i2c_bus *bus)
+{
+ return bus->channel;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_channel);
+
+
+void pmac_i2c_attach_adapter(struct pmac_i2c_bus *bus,
+ struct i2c_adapter *adapter)
+{
+ WARN_ON(bus->adapter != NULL);
+ bus->adapter = adapter;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_attach_adapter);
+
+void pmac_i2c_detach_adapter(struct pmac_i2c_bus *bus,
+ struct i2c_adapter *adapter)
+{
+ WARN_ON(bus->adapter != adapter);
+ bus->adapter = NULL;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_detach_adapter);
+
+struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus)
+{
+ return bus->adapter;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_get_adapter);
+
+struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter)
+{
+ struct pmac_i2c_bus *bus;
+
+ list_for_each_entry(bus, &pmac_i2c_busses, link)
+ if (bus->adapter == adapter)
+ return bus;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_adapter_to_bus);
+
+extern int pmac_i2c_match_adapter(struct device_node *dev,
+ struct i2c_adapter *adapter)
+{
+ struct pmac_i2c_bus *bus = pmac_i2c_find_bus(dev);
+
+ if (bus == NULL)
+ return 0;
+ return (bus->adapter == adapter);
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_match_adapter);
int pmac_low_i2c_lock(struct device_node *np)
{
- struct low_i2c_host *host = find_low_i2c_host(np);
+ struct pmac_i2c_bus *bus, *found = NULL;
- if (!host)
+ list_for_each_entry(bus, &pmac_i2c_busses, link) {
+ if (np == bus->controller) {
+ found = bus;
+ break;
+ }
+ }
+ if (!found)
return -ENODEV;
- down(&host->mutex);
- return 0;
+ return pmac_i2c_open(bus, 0);
}
-EXPORT_SYMBOL(pmac_low_i2c_lock);
+EXPORT_SYMBOL_GPL(pmac_low_i2c_lock);
int pmac_low_i2c_unlock(struct device_node *np)
{
- struct low_i2c_host *host = find_low_i2c_host(np);
+ struct pmac_i2c_bus *bus, *found = NULL;
- if (!host)
+ list_for_each_entry(bus, &pmac_i2c_busses, link) {
+ if (np == bus->controller) {
+ found = bus;
+ break;
+ }
+ }
+ if (!found)
return -ENODEV;
- up(&host->mutex);
+ pmac_i2c_close(bus);
return 0;
}
-EXPORT_SYMBOL(pmac_low_i2c_unlock);
+EXPORT_SYMBOL_GPL(pmac_low_i2c_unlock);
-int pmac_low_i2c_open(struct device_node *np, int channel)
+int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled)
{
- struct low_i2c_host *host = find_low_i2c_host(np);
+ int rc;
+
+ down(&bus->sem);
+ bus->polled = polled || pmac_i2c_force_poll;
+ bus->opened = 1;
+ bus->mode = pmac_i2c_mode_std;
+ if (bus->open && (rc = bus->open(bus)) != 0) {
+ bus->opened = 0;
+ up(&bus->sem);
+ return rc;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_open);
- if (!host)
- return -ENODEV;
+void pmac_i2c_close(struct pmac_i2c_bus *bus)
+{
+ WARN_ON(!bus->opened);
+ if (bus->close)
+ bus->close(bus);
+ bus->opened = 0;
+ up(&bus->sem);
+}
+EXPORT_SYMBOL_GPL(pmac_i2c_close);
- if (channel >= host->num_channels)
+int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode)
+{
+ WARN_ON(!bus->opened);
+
+ /* Report me if you see the error below as there might be a new
+ * "combined4" mode that I need to implement for the SMU bus
+ */
+ if (mode < pmac_i2c_mode_dumb || mode > pmac_i2c_mode_combined) {
+ printk(KERN_ERR "low_i2c: Invalid mode %d requested on"
+ " bus %s !\n", mode, bus->busnode->full_name);
return -EINVAL;
-
- down(&host->mutex);
- host->is_open = 1;
- host->channel = channel;
+ }
+ bus->mode = mode;
return 0;
}
-EXPORT_SYMBOL(pmac_low_i2c_open);
+EXPORT_SYMBOL_GPL(pmac_i2c_setmode);
-int pmac_low_i2c_close(struct device_node *np)
+int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len)
{
- struct low_i2c_host *host = find_low_i2c_host(np);
+ int rc;
- if (!host)
- return -ENODEV;
+ WARN_ON(!bus->opened);
- host->is_open = 0;
- up(&host->mutex);
+ DBG("xfer() chan=%d, addrdir=0x%x, mode=%d, subsize=%d, subaddr=0x%x,"
+ " %d bytes, bus %s\n", bus->channel, addrdir, bus->mode, subsize,
+ subaddr, len, bus->busnode->full_name);
- return 0;
+ rc = bus->xfer(bus, addrdir, subsize, subaddr, data, len);
+
+#ifdef DEBUG
+ if (rc)
+ DBG("xfer error %d\n", rc);
+#endif
+ return rc;
}
-EXPORT_SYMBOL(pmac_low_i2c_close);
+EXPORT_SYMBOL_GPL(pmac_i2c_xfer);
+
+/* some quirks for platform function decoding */
+enum {
+ pmac_i2c_quirk_invmask = 0x00000001u,
+};
-int pmac_low_i2c_setmode(struct device_node *np, int mode)
+static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
+ int quirks))
{
- struct low_i2c_host *host = find_low_i2c_host(np);
+ struct pmac_i2c_bus *bus;
+ struct device_node *np;
+ static struct whitelist_ent {
+ char *name;
+ char *compatible;
+ int quirks;
+ } whitelist[] = {
+ /* XXX Study device-tree's & apple drivers are get the quirks
+ * right !
+ */
+ { "i2c-hwclock", NULL, pmac_i2c_quirk_invmask },
+ { "i2c-cpu-voltage", NULL, 0},
+ { "temp-monitor", NULL, 0 },
+ { "supply-monitor", NULL, 0 },
+ { NULL, NULL, 0 },
+ };
+
+ /* Only some devices need to have platform functions instanciated
+ * here. For now, we have a table. Others, like 9554 i2c GPIOs used
+ * on Xserve, if we ever do a driver for them, will use their own
+ * platform function instance
+ */
+ list_for_each_entry(bus, &pmac_i2c_busses, link) {
+ for (np = NULL;
+ (np = of_get_next_child(bus->busnode, np)) != NULL;) {
+ struct whitelist_ent *p;
+ /* If multibus, check if device is on that bus */
+ if (bus->flags & pmac_i2c_multibus)
+ if (bus != pmac_i2c_find_bus(np))
+ continue;
+ for (p = whitelist; p->name != NULL; p++) {
+ if (strcmp(np->name, p->name))
+ continue;
+ if (p->compatible &&
+ !device_is_compatible(np, p->compatible))
+ continue;
+ callback(np, p->quirks);
+ break;
+ }
+ }
+ }
+}
- if (!host)
- return -ENODEV;
- WARN_ON(!host->is_open);
- host->mode = mode;
+#define MAX_I2C_DATA 64
+
+struct pmac_i2c_pf_inst
+{
+ struct pmac_i2c_bus *bus;
+ u8 addr;
+ u8 buffer[MAX_I2C_DATA];
+ u8 scratch[MAX_I2C_DATA];
+ int bytes;
+ int quirks;
+};
+
+static void* pmac_i2c_do_begin(struct pmf_function *func, struct pmf_args *args)
+{
+ struct pmac_i2c_pf_inst *inst;
+ struct pmac_i2c_bus *bus;
+
+ bus = pmac_i2c_find_bus(func->node);
+ if (bus == NULL) {
+ printk(KERN_ERR "low_i2c: Can't find bus for %s (pfunc)\n",
+ func->node->full_name);
+ return NULL;
+ }
+ if (pmac_i2c_open(bus, 0)) {
+ printk(KERN_ERR "low_i2c: Can't open i2c bus for %s (pfunc)\n",
+ func->node->full_name);
+ return NULL;
+ }
+
+ /* XXX might need GFP_ATOMIC when called during the suspend process,
+ * but then, there are already lots of issues with suspending when
+ * near OOM that need to be resolved, the allocator itself should
+ * probably make GFP_NOIO implicit during suspend
+ */
+ inst = kzalloc(sizeof(struct pmac_i2c_pf_inst), GFP_KERNEL);
+ if (inst == NULL) {
+ pmac_i2c_close(bus);
+ return NULL;
+ }
+ inst->bus = bus;
+ inst->addr = pmac_i2c_get_dev_addr(func->node);
+ inst->quirks = (int)(long)func->driver_data;
+ return inst;
+}
+
+static void pmac_i2c_do_end(struct pmf_function *func, void *instdata)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ if (inst == NULL)
+ return;
+ pmac_i2c_close(inst->bus);
+ if (inst)
+ kfree(inst);
+}
+
+static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ inst->bytes = len;
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 0, 0,
+ inst->buffer, len);
+}
+
+static int pmac_i2c_do_write(PMF_STD_ARGS, u32 len, const u8 *data)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
+ (u8 *)data, len);
+}
+
+/* This function is used to do the masking & OR'ing for the "rmw" type
+ * callbacks. Ze should apply the mask and OR in the values in the
+ * buffer before writing back. The problem is that it seems that
+ * various darwin drivers implement the mask/or differently, thus
+ * we need to check the quirks first
+ */
+static void pmac_i2c_do_apply_rmw(struct pmac_i2c_pf_inst *inst,
+ u32 len, const u8 *mask, const u8 *val)
+{
+ int i;
+
+ if (inst->quirks & pmac_i2c_quirk_invmask) {
+ for (i = 0; i < len; i ++)
+ inst->scratch[i] = (inst->buffer[i] & mask[i]) | val[i];
+ } else {
+ for (i = 0; i < len; i ++)
+ inst->scratch[i] = (inst->buffer[i] & ~mask[i])
+ | (val[i] & mask[i]);
+ }
+}
+
+static int pmac_i2c_do_rmw(PMF_STD_ARGS, u32 masklen, u32 valuelen,
+ u32 totallen, const u8 *maskdata,
+ const u8 *valuedata)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ if (masklen > inst->bytes || valuelen > inst->bytes ||
+ totallen > inst->bytes || valuelen > masklen)
+ return -EINVAL;
+
+ pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
+
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0,
+ inst->scratch, totallen);
+}
+
+static int pmac_i2c_do_read_sub(PMF_STD_ARGS, u8 subaddr, u32 len)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ inst->bytes = len;
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 1, subaddr,
+ inst->buffer, len);
+}
+
+static int pmac_i2c_do_write_sub(PMF_STD_ARGS, u8 subaddr, u32 len,
+ const u8 *data)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
+ subaddr, (u8 *)data, len);
+}
+static int pmac_i2c_do_set_mode(PMF_STD_ARGS, int mode)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ return pmac_i2c_setmode(inst->bus, mode);
+}
+
+static int pmac_i2c_do_rmw_sub(PMF_STD_ARGS, u8 subaddr, u32 masklen,
+ u32 valuelen, u32 totallen, const u8 *maskdata,
+ const u8 *valuedata)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+
+ if (masklen > inst->bytes || valuelen > inst->bytes ||
+ totallen > inst->bytes || valuelen > masklen)
+ return -EINVAL;
+
+ pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata);
+
+ return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1,
+ subaddr, inst->scratch, totallen);
+}
+
+static int pmac_i2c_do_mask_and_comp(PMF_STD_ARGS, u32 len,
+ const u8 *maskdata,
+ const u8 *valuedata)
+{
+ struct pmac_i2c_pf_inst *inst = instdata;
+ int i, match;
+
+ /* Get return value pointer, it's assumed to be a u32 */
+ if (!args || !args->count || !args->u[0].p)
+ return -EINVAL;
+
+ /* Check buffer */
+ if (len > inst->bytes)
+ return -EINVAL;
+
+ for (i = 0, match = 1; match && i < len; i ++)
+ if ((inst->buffer[i] & maskdata[i]) != valuedata[i])
+ match = 0;
+ *args->u[0].p = match;
return 0;
}
-EXPORT_SYMBOL(pmac_low_i2c_setmode);
-int pmac_low_i2c_xfer(struct device_node *np, u8 addrdir, u8 subaddr, u8 *data, int len)
+static int pmac_i2c_do_delay(PMF_STD_ARGS, u32 duration)
{
- struct low_i2c_host *host = find_low_i2c_host(np);
+ msleep((duration + 999) / 1000);
+ return 0;
+}
- if (!host)
- return -ENODEV;
- WARN_ON(!host->is_open);
- return host->func(host, addrdir, subaddr, data, len);
+static struct pmf_handlers pmac_i2c_pfunc_handlers = {
+ .begin = pmac_i2c_do_begin,
+ .end = pmac_i2c_do_end,
+ .read_i2c = pmac_i2c_do_read,
+ .write_i2c = pmac_i2c_do_write,
+ .rmw_i2c = pmac_i2c_do_rmw,
+ .read_i2c_sub = pmac_i2c_do_read_sub,
+ .write_i2c_sub = pmac_i2c_do_write_sub,
+ .rmw_i2c_sub = pmac_i2c_do_rmw_sub,
+ .set_i2c_mode = pmac_i2c_do_set_mode,
+ .mask_and_compare = pmac_i2c_do_mask_and_comp,
+ .delay = pmac_i2c_do_delay,
+};
+
+static void __init pmac_i2c_dev_create(struct device_node *np, int quirks)
+{
+ DBG("dev_create(%s)\n", np->full_name);
+
+ pmf_register_driver(np, &pmac_i2c_pfunc_handlers,
+ (void *)(long)quirks);
+}
+
+static void __init pmac_i2c_dev_init(struct device_node *np, int quirks)
+{
+ DBG("dev_create(%s)\n", np->full_name);
+
+ pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
+}
+
+static void pmac_i2c_dev_suspend(struct device_node *np, int quirks)
+{
+ DBG("dev_suspend(%s)\n", np->full_name);
+ pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL);
+}
+
+static void pmac_i2c_dev_resume(struct device_node *np, int quirks)
+{
+ DBG("dev_resume(%s)\n", np->full_name);
+ pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_WAKE, NULL);
+}
+
+void pmac_pfunc_i2c_suspend(void)
+{
+ pmac_i2c_devscan(pmac_i2c_dev_suspend);
+}
+
+void pmac_pfunc_i2c_resume(void)
+{
+ pmac_i2c_devscan(pmac_i2c_dev_resume);
+}
+
+/*
+ * Initialize us: probe all i2c busses on the machine, instantiate
+ * busses and platform functions as needed.
+ */
+/* This is non-static as it might be called early by smp code */
+int __init pmac_i2c_init(void)
+{
+ static int i2c_inited;
+
+ if (i2c_inited)
+ return 0;
+ i2c_inited = 1;
+
+ /* Probe keywest-i2c busses */
+ kw_i2c_probe();
+
+#ifdef CONFIG_ADB_PMU
+ /* Probe PMU i2c busses */
+ pmu_i2c_probe();
+#endif
+
+#ifdef CONFIG_PMAC_SMU
+ /* Probe SMU i2c busses */
+ smu_i2c_probe();
+#endif
+
+ /* Now add plaform functions for some known devices */
+ pmac_i2c_devscan(pmac_i2c_dev_create);
+
+ return 0;
}
-EXPORT_SYMBOL(pmac_low_i2c_xfer);
+arch_initcall(pmac_i2c_init);
+
+/* Since pmac_i2c_init can be called too early for the platform device
+ * registration, we need to do it at a later time. In our case, subsys
+ * happens to fit well, though I agree it's a bit of a hack...
+ */
+static int __init pmac_i2c_create_platform_devices(void)
+{
+ struct pmac_i2c_bus *bus;
+ int i = 0;
+
+ /* In the case where we are initialized from smp_init(), we must
+ * not use the timer (and thus the irq). It's safe from now on
+ * though
+ */
+ pmac_i2c_force_poll = 0;
+
+ /* Create platform devices */
+ list_for_each_entry(bus, &pmac_i2c_busses, link) {
+ bus->platform_dev =
+ platform_device_alloc("i2c-powermac", i++);
+ if (bus->platform_dev == NULL)
+ return -ENOMEM;
+ bus->platform_dev->dev.platform_data = bus;
+ platform_device_add(bus->platform_dev);
+ }
+
+ /* Now call platform "init" functions */
+ pmac_i2c_devscan(pmac_i2c_dev_init);
+ return 0;
+}
+subsys_initcall(pmac_i2c_create_platform_devices);
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 4042e2f06ee..3ebd045a335 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -514,7 +514,7 @@ static void core99_nvram_sync(void)
#endif
}
-static int __init core99_nvram_setup(struct device_node *dp)
+static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
{
int i;
u32 gen_bank0, gen_bank1;
@@ -528,7 +528,7 @@ static int __init core99_nvram_setup(struct device_node *dp)
printk(KERN_ERR "nvram: can't allocate ram image\n");
return -ENOMEM;
}
- nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2);
+ nvram_data = ioremap(addr, NVRAM_SIZE*2);
nvram_naddrs = 1; /* Make sure we get the correct case */
DBG("nvram: Checking bank 0...\n");
@@ -549,6 +549,7 @@ static int __init core99_nvram_setup(struct device_node *dp)
ppc_md.nvram_write = core99_nvram_write;
ppc_md.nvram_size = core99_nvram_size;
ppc_md.nvram_sync = core99_nvram_sync;
+ ppc_md.machine_shutdown = core99_nvram_sync;
/*
* Maybe we could be smarter here though making an exclusive list
* of known flash chips is a bit nasty as older OF didn't provide us
@@ -569,34 +570,48 @@ static int __init core99_nvram_setup(struct device_node *dp)
int __init pmac_nvram_init(void)
{
struct device_node *dp;
+ struct resource r1, r2;
+ unsigned int s1 = 0, s2 = 0;
int err = 0;
nvram_naddrs = 0;
- dp = find_devices("nvram");
+ dp = of_find_node_by_name(NULL, "nvram");
if (dp == NULL) {
printk(KERN_ERR "Can't find NVRAM device\n");
return -ENODEV;
}
- nvram_naddrs = dp->n_addrs;
+
+ /* Try to obtain an address */
+ if (of_address_to_resource(dp, 0, &r1) == 0) {
+ nvram_naddrs = 1;
+ s1 = (r1.end - r1.start) + 1;
+ if (of_address_to_resource(dp, 1, &r2) == 0) {
+ nvram_naddrs = 2;
+ s2 = (r2.end - r2.start) + 1;
+ }
+ }
+
is_core_99 = device_is_compatible(dp, "nvram,flash");
- if (is_core_99)
- err = core99_nvram_setup(dp);
+ if (is_core_99) {
+ err = core99_nvram_setup(dp, r1.start);
+ goto bail;
+ }
+
#ifdef CONFIG_PPC32
- else if (_machine == _MACH_chrp && nvram_naddrs == 1) {
- nvram_data = ioremap(dp->addrs[0].address + isa_mem_base,
- dp->addrs[0].size);
+ if (_machine == _MACH_chrp && nvram_naddrs == 1) {
+ nvram_data = ioremap(r1.start, s1);
nvram_mult = 1;
ppc_md.nvram_read_val = direct_nvram_read_byte;
ppc_md.nvram_write_val = direct_nvram_write_byte;
} else if (nvram_naddrs == 1) {
- nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size);
- nvram_mult = (dp->addrs[0].size + NVRAM_SIZE - 1) / NVRAM_SIZE;
+ nvram_data = ioremap(r1.start, s1);
+ nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE;
ppc_md.nvram_read_val = direct_nvram_read_byte;
ppc_md.nvram_write_val = direct_nvram_write_byte;
} else if (nvram_naddrs == 2) {
- nvram_addr = ioremap(dp->addrs[0].address, dp->addrs[0].size);
- nvram_data = ioremap(dp->addrs[1].address, dp->addrs[1].size);
+ nvram_addr = ioremap(r1.start, s1);
+ nvram_data = ioremap(r2.start, s2);
ppc_md.nvram_read_val = indirect_nvram_read_byte;
ppc_md.nvram_write_val = indirect_nvram_write_byte;
} else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
@@ -605,13 +620,15 @@ int __init pmac_nvram_init(void)
ppc_md.nvram_read_val = pmu_nvram_read_byte;
ppc_md.nvram_write_val = pmu_nvram_write_byte;
#endif /* CONFIG_ADB_PMU */
- }
-#endif
- else {
+ } else {
printk(KERN_ERR "Incompatible type of NVRAM\n");
- return -ENXIO;
+ err = -ENXIO;
}
- lookup_partitions();
+#endif /* CONFIG_PPC32 */
+bail:
+ of_node_put(dp);
+ if (err == 0)
+ lookup_partitions();
return err;
}
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 443be526cde..f671ed25390 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -1,7 +1,7 @@
/*
* Support for PCI bridges found on Power Macintoshes.
*
- * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
+ * Copyright (C) 2003-2005 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
* Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
*
* This program is free software; you can redistribute it and/or
@@ -25,7 +25,7 @@
#include <asm/pmac_feature.h>
#include <asm/grackle.h>
#ifdef CONFIG_PPC64
-#include <asm/iommu.h>
+//#include <asm/iommu.h>
#include <asm/ppc-pci.h>
#endif
@@ -44,6 +44,7 @@ static int add_bridge(struct device_node *dev);
static int has_uninorth;
#ifdef CONFIG_PPC64
static struct pci_controller *u3_agp;
+static struct pci_controller *u4_pcie;
static struct pci_controller *u3_ht;
#endif /* CONFIG_PPC64 */
@@ -97,11 +98,8 @@ static void __init fixup_bus_range(struct device_node *bridge)
/* Lookup the "bus-range" property for the hose */
bus_range = (int *) get_property(bridge, "bus-range", &len);
- if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s\n",
- bridge->full_name);
+ if (bus_range == NULL || len < 2 * sizeof(int))
return;
- }
bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
}
@@ -128,14 +126,14 @@ static void __init fixup_bus_range(struct device_node *bridge)
*/
#define MACRISC_CFA0(devfn, off) \
- ((1 << (unsigned long)PCI_SLOT(dev_fn)) \
- | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
- | (((unsigned long)(off)) & 0xFCUL))
+ ((1 << (unsigned int)PCI_SLOT(dev_fn)) \
+ | (((unsigned int)PCI_FUNC(dev_fn)) << 8) \
+ | (((unsigned int)(off)) & 0xFCUL))
#define MACRISC_CFA1(bus, devfn, off) \
- ((((unsigned long)(bus)) << 16) \
- |(((unsigned long)(devfn)) << 8) \
- |(((unsigned long)(off)) & 0xFCUL) \
+ ((((unsigned int)(bus)) << 16) \
+ |(((unsigned int)(devfn)) << 8) \
+ |(((unsigned int)(off)) & 0xFCUL) \
|1UL)
static unsigned long macrisc_cfg_access(struct pci_controller* hose,
@@ -168,7 +166,8 @@ static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
-
+ if (offset >= 0x100)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -199,7 +198,8 @@ static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
-
+ if (offset >= 0x100)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -234,12 +234,13 @@ static struct pci_ops macrisc_pci_ops =
/*
* Verify that a specific (bus, dev_fn) exists on chaos
*/
-static int
-chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
+static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
{
struct device_node *np;
u32 *vendor, *device;
+ if (offset >= 0x100)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
np = pci_busdev_to_OF_node(bus, devfn);
if (np == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -285,15 +286,13 @@ static struct pci_ops chaos_pci_ops =
};
static void __init setup_chaos(struct pci_controller *hose,
- struct reg_property *addr)
+ struct resource *addr)
{
/* assume a `chaos' bridge */
hose->ops = &chaos_pci_ops;
- hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
- hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
+ hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
+ hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
}
-#else
-#define setup_chaos(hose, addr)
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
@@ -326,7 +325,7 @@ static int u3_ht_skip_device(struct pci_controller *hose,
else
busdn = hose->arch_data;
for (dn = busdn->child; dn; dn = dn->sibling)
- if (dn->data && PCI_DN(dn)->devfn == devfn)
+ if (PCI_DN(dn) && PCI_DN(dn)->devfn == devfn)
break;
if (dn == NULL)
return -1;
@@ -343,10 +342,10 @@ static int u3_ht_skip_device(struct pci_controller *hose,
}
#define U3_HT_CFA0(devfn, off) \
- ((((unsigned long)devfn) << 8) | offset)
+ ((((unsigned int)devfn) << 8) | offset)
#define U3_HT_CFA1(bus, devfn, off) \
(U3_HT_CFA0(devfn, off) \
- + (((unsigned long)bus) << 16) \
+ + (((unsigned int)bus) << 16) \
+ 0x01000000UL)
static unsigned long u3_ht_cfg_access(struct pci_controller* hose,
@@ -356,9 +355,11 @@ static unsigned long u3_ht_cfg_access(struct pci_controller* hose,
/* For now, we don't self probe U3 HT bridge */
if (PCI_SLOT(devfn) == 0)
return 0;
- return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
+ return ((unsigned long)hose->cfg_data) +
+ U3_HT_CFA0(devfn, offset);
} else
- return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
+ return ((unsigned long)hose->cfg_data) +
+ U3_HT_CFA1(bus, devfn, offset);
}
static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
@@ -370,7 +371,8 @@ static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
-
+ if (offset >= 0x100)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -419,7 +421,8 @@ static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
-
+ if (offset >= 0x100)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -459,6 +462,112 @@ static struct pci_ops u3_ht_pci_ops =
u3_ht_read_config,
u3_ht_write_config
};
+
+#define U4_PCIE_CFA0(devfn, off) \
+ ((1 << ((unsigned int)PCI_SLOT(dev_fn))) \
+ | (((unsigned int)PCI_FUNC(dev_fn)) << 8) \
+ | ((((unsigned int)(off)) >> 8) << 28) \
+ | (((unsigned int)(off)) & 0xfcU))
+
+#define U4_PCIE_CFA1(bus, devfn, off) \
+ ((((unsigned int)(bus)) << 16) \
+ |(((unsigned int)(devfn)) << 8) \
+ | ((((unsigned int)(off)) >> 8) << 28) \
+ |(((unsigned int)(off)) & 0xfcU) \
+ |1UL)
+
+static unsigned long u4_pcie_cfg_access(struct pci_controller* hose,
+ u8 bus, u8 dev_fn, int offset)
+{
+ unsigned int caddr;
+
+ if (bus == hose->first_busno) {
+ caddr = U4_PCIE_CFA0(dev_fn, offset);
+ } else
+ caddr = U4_PCIE_CFA1(bus, dev_fn, offset);
+
+ /* Uninorth will return garbage if we don't read back the value ! */
+ do {
+ out_le32(hose->cfg_addr, caddr);
+ } while (in_le32(hose->cfg_addr) != caddr);
+
+ offset &= 0x03;
+ return ((unsigned long)hose->cfg_data) + offset;
+}
+
+static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 *val)
+{
+ struct pci_controller *hose;
+ unsigned long addr;
+
+ hose = pci_bus_to_host(bus);
+ if (hose == NULL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (offset >= 0x1000)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ /*
+ * Note: the caller has already checked that offset is
+ * suitably aligned and that len is 1, 2 or 4.
+ */
+ switch (len) {
+ case 1:
+ *val = in_8((u8 *)addr);
+ break;
+ case 2:
+ *val = in_le16((u16 *)addr);
+ break;
+ default:
+ *val = in_le32((u32 *)addr);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val)
+{
+ struct pci_controller *hose;
+ unsigned long addr;
+
+ hose = pci_bus_to_host(bus);
+ if (hose == NULL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (offset >= 0x1000)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ /*
+ * Note: the caller has already checked that offset is
+ * suitably aligned and that len is 1, 2 or 4.
+ */
+ switch (len) {
+ case 1:
+ out_8((u8 *)addr, val);
+ (void) in_8((u8 *)addr);
+ break;
+ case 2:
+ out_le16((u16 *)addr, val);
+ (void) in_le16((u16 *)addr);
+ break;
+ default:
+ out_le32((u32 *)addr, val);
+ (void) in_le32((u32 *)addr);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops u4_pcie_pci_ops =
+{
+ u4_pcie_read_config,
+ u4_pcie_write_config
+};
+
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
@@ -532,7 +641,8 @@ static void __init init_p2pbridge(void)
}
if (early_read_config_word(hose, bus, devfn,
PCI_BRIDGE_CONTROL, &val) < 0) {
- printk(KERN_ERR "init_p2pbridge: couldn't read bridge control\n");
+ printk(KERN_ERR "init_p2pbridge: couldn't read bridge"
+ " control\n");
return;
}
val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
@@ -576,36 +686,38 @@ static void __init fixup_nec_usb2(void)
continue;
early_read_config_dword(hose, bus, devfn, 0xe4, &data);
if (data & 1UL) {
- printk("Found NEC PD720100A USB2 chip with disabled EHCI, fixing up...\n");
+ printk("Found NEC PD720100A USB2 chip with disabled"
+ " EHCI, fixing up...\n");
data &= ~1UL;
early_write_config_dword(hose, bus, devfn, 0xe4, data);
- early_write_config_byte(hose, bus, devfn | 2, PCI_INTERRUPT_LINE,
+ early_write_config_byte(hose, bus,
+ devfn | 2, PCI_INTERRUPT_LINE,
nec->intrs[0].line);
}
}
}
static void __init setup_bandit(struct pci_controller *hose,
- struct reg_property *addr)
+ struct resource *addr)
{
hose->ops = &macrisc_pci_ops;
- hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
- hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
+ hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
+ hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
init_bandit(hose);
}
static int __init setup_uninorth(struct pci_controller *hose,
- struct reg_property *addr)
+ struct resource *addr)
{
pci_assign_all_buses = 1;
has_uninorth = 1;
hose->ops = &macrisc_pci_ops;
- hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
- hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
+ hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
+ hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000);
/* We "know" that the bridge at f2000000 has the PCI slots. */
- return addr->address == 0xf2000000;
+ return addr->start == 0xf2000000;
}
-#endif
+#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
static void __init setup_u3_agp(struct pci_controller* hose)
@@ -625,15 +737,36 @@ static void __init setup_u3_agp(struct pci_controller* hose)
hose->ops = &macrisc_pci_ops;
hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
-
u3_agp = hose;
}
+static void __init setup_u4_pcie(struct pci_controller* hose)
+{
+ /* We currently only implement the "non-atomic" config space, to
+ * be optimised later.
+ */
+ hose->ops = &u4_pcie_pci_ops;
+ hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
+ hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
+
+ /* The bus contains a bridge from root -> device, we need to
+ * make it visible on bus 0 so that we pick the right type
+ * of config cycles. If we didn't, we would have to force all
+ * config cycles to be type 1. So we override the "bus-range"
+ * property here
+ */
+ hose->first_busno = 0x00;
+ hose->last_busno = 0xff;
+ u4_pcie = hose;
+}
+
static void __init setup_u3_ht(struct pci_controller* hose)
{
struct device_node *np = (struct device_node *)hose->arch_data;
+ struct pci_controller *other = NULL;
int i, cur;
+
hose->ops = &u3_ht_pci_ops;
/* We hard code the address because of the different size of
@@ -667,11 +800,20 @@ static void __init setup_u3_ht(struct pci_controller* hose)
u3_ht = hose;
- if (u3_agp == NULL) {
- DBG("U3 has no AGP, using full resource range\n");
+ if (u3_agp != NULL)
+ other = u3_agp;
+ else if (u4_pcie != NULL)
+ other = u4_pcie;
+
+ if (other == NULL) {
+ DBG("U3/4 has no AGP/PCIE, using full resource range\n");
return;
}
+ /* Fixup bus range vs. PCIE */
+ if (u4_pcie)
+ hose->last_busno = u4_pcie->first_busno - 1;
+
/* We "remove" the AGP resources from the resources allocated to HT,
* that is we create "holes". However, that code does assumptions
* that so far happen to be true (cross fingers...), typically that
@@ -679,7 +821,7 @@ static void __init setup_u3_ht(struct pci_controller* hose)
*/
cur = 0;
for (i=0; i<3; i++) {
- struct resource *res = &u3_agp->mem_resources[i];
+ struct resource *res = &other->mem_resources[i];
if (res->flags != IORESOURCE_MEM)
continue;
/* We don't care about "fine" resources */
@@ -722,7 +864,7 @@ static void __init setup_u3_ht(struct pci_controller* hose)
hose->mem_resources[cur-1].end = res->start - 1;
}
}
-#endif
+#endif /* CONFIG_PPC64 */
/*
* We assume that if we have a G3 powermac, we have one bridge called
@@ -733,24 +875,17 @@ static int __init add_bridge(struct device_node *dev)
{
int len;
struct pci_controller *hose;
-#ifdef CONFIG_PPC32
- struct reg_property *addr;
-#endif
+ struct resource rsrc;
char *disp_name;
int *bus_range;
- int primary = 1;
+ int primary = 1, has_address = 0;
DBG("Adding PCI host bridge %s\n", dev->full_name);
-#ifdef CONFIG_PPC32
- /* XXX fix this */
- addr = (struct reg_property *) get_property(dev, "reg", &len);
- if (addr == NULL || len < sizeof(*addr)) {
- printk(KERN_WARNING "Can't use %s: no address\n",
- dev->full_name);
- return -ENODEV;
- }
-#endif
+ /* Fetch host bridge registers address */
+ has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
+
+ /* Get bus range if any */
bus_range = (int *) get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %s, assume"
@@ -770,6 +905,8 @@ static int __init add_bridge(struct device_node *dev)
hose->last_busno = bus_range ? bus_range[1] : 0xff;
disp_name = NULL;
+
+ /* 64 bits only bridges */
#ifdef CONFIG_PPC64
if (device_is_compatible(dev, "u3-agp")) {
setup_u3_agp(hose);
@@ -779,28 +916,37 @@ static int __init add_bridge(struct device_node *dev)
setup_u3_ht(hose);
disp_name = "U3-HT";
primary = 1;
+ } else if (device_is_compatible(dev, "u4-pcie")) {
+ setup_u4_pcie(hose);
+ disp_name = "U4-PCIE";
+ primary = 0;
}
- printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
- disp_name, hose->first_busno, hose->last_busno);
-#else
+ printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number:"
+ " %d->%d\n", disp_name, hose->first_busno, hose->last_busno);
+#endif /* CONFIG_PPC64 */
+
+ /* 32 bits only bridges */
+#ifdef CONFIG_PPC32
if (device_is_compatible(dev, "uni-north")) {
- primary = setup_uninorth(hose, addr);
+ primary = setup_uninorth(hose, &rsrc);
disp_name = "UniNorth";
} else if (strcmp(dev->name, "pci") == 0) {
/* XXX assume this is a mpc106 (grackle) */
setup_grackle(hose);
disp_name = "Grackle (MPC106)";
} else if (strcmp(dev->name, "bandit") == 0) {
- setup_bandit(hose, addr);
+ setup_bandit(hose, &rsrc);
disp_name = "Bandit";
} else if (strcmp(dev->name, "chaos") == 0) {
- setup_chaos(hose, addr);
+ setup_chaos(hose, &rsrc);
disp_name = "Chaos";
primary = 0;
}
- printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. Firmware bus number: %d->%d\n",
- disp_name, addr->address, hose->first_busno, hose->last_busno);
-#endif
+ printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. "
+ "Firmware bus number: %d->%d\n",
+ disp_name, rsrc.start, hose->first_busno, hose->last_busno);
+#endif /* CONFIG_PPC32 */
+
DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
hose, hose->cfg_addr, hose->cfg_data);
@@ -814,8 +960,7 @@ static int __init add_bridge(struct device_node *dev)
return 0;
}
-static void __init
-pcibios_fixup_OF_interrupts(void)
+static void __init pcibios_fixup_OF_interrupts(void)
{
struct pci_dev* dev = NULL;
@@ -835,8 +980,7 @@ pcibios_fixup_OF_interrupts(void)
}
}
-void __init
-pmac_pcibios_fixup(void)
+void __init pmac_pcibios_fixup(void)
{
/* Fixup interrupts according to OF tree */
pcibios_fixup_OF_interrupts();
@@ -899,6 +1043,8 @@ void __init pmac_pci_init(void)
pci_setup_phb_io(u3_ht, 1);
if (u3_agp)
pci_setup_phb_io(u3_agp, 0);
+ if (u4_pcie)
+ pci_setup_phb_io(u4_pcie, 0);
/*
* On ppc64, fixup the IO resources on our host bridges as
@@ -911,7 +1057,8 @@ void __init pmac_pci_init(void)
/* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
* assume there is no P2P bridge on the AGP bus, which should be a
- * safe assumptions hopefully.
+ * safe assumptions for now. We should do something better in the
+ * future though
*/
if (u3_agp) {
struct device_node *np = u3_agp->arch_data;
@@ -919,7 +1066,6 @@ void __init pmac_pci_init(void)
for (np = np->child; np; np = np->sibling)
PCI_DN(np)->busno = 0xf0;
}
-
/* pmac_check_ht_link(); */
/* Tell pci.c to not use the common resource allocation mechanism */
@@ -1126,7 +1272,8 @@ void pmac_pci_fixup_pciata(struct pci_dev* dev)
good:
pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
if ((progif & 5) != 5) {
- printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n", pci_name(dev));
+ printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n",
+ pci_name(dev));
(void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
(progif & 5) != 5)
@@ -1152,7 +1299,8 @@ static void fixup_k2_sata(struct pci_dev* dev)
for (i = 0; i < 6; i++) {
dev->resource[i].start = dev->resource[i].end = 0;
dev->resource[i].flags = 0;
- pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i,
+ 0);
}
} else {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
@@ -1161,7 +1309,8 @@ static void fixup_k2_sata(struct pci_dev* dev)
for (i = 0; i < 5; i++) {
dev->resource[i].start = dev->resource[i].end = 0;
dev->resource[i].flags = 0;
- pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i,
+ 0);
}
}
}
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
new file mode 100644
index 00000000000..4ffd2a9832a
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -0,0 +1,405 @@
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include <asm/pmac_feature.h>
+#include <asm/pmac_pfunc.h>
+
+#define DBG(fmt...) printk(fmt)
+
+static irqreturn_t macio_gpio_irq(int irq, void *data, struct pt_regs *regs)
+{
+ pmf_do_irq(data);
+
+ return IRQ_HANDLED;
+}
+
+static int macio_do_gpio_irq_enable(struct pmf_function *func)
+{
+ if (func->node->n_intrs < 1)
+ return -EINVAL;
+
+ return request_irq(func->node->intrs[0].line, macio_gpio_irq, 0,
+ func->node->name, func);
+}
+
+static int macio_do_gpio_irq_disable(struct pmf_function *func)
+{
+ if (func->node->n_intrs < 1)
+ return -EINVAL;
+
+ free_irq(func->node->intrs[0].line, func);
+ return 0;
+}
+
+static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask)
+{
+ u8 __iomem *addr = (u8 __iomem *)func->driver_data;
+ unsigned long flags;
+ u8 tmp;
+
+ /* Check polarity */
+ if (args && args->count && !args->u[0].v)
+ value = ~value;
+
+ /* Toggle the GPIO */
+ spin_lock_irqsave(&feature_lock, flags);
+ tmp = readb(addr);
+ tmp = (tmp & ~mask) | (value & mask);
+ DBG("Do write 0x%02x to GPIO %s (%p)\n",
+ tmp, func->node->full_name, addr);
+ writeb(tmp, addr);
+ spin_unlock_irqrestore(&feature_lock, flags);
+
+ return 0;
+}
+
+static int macio_do_gpio_read(PMF_STD_ARGS, u8 mask, int rshift, u8 xor)
+{
+ u8 __iomem *addr = (u8 __iomem *)func->driver_data;
+ u32 value;
+
+ /* Check if we have room for reply */
+ if (args == NULL || args->count == 0 || args->u[0].p == NULL)
+ return -EINVAL;
+
+ value = readb(addr);
+ *args->u[0].p = ((value & mask) >> rshift) ^ xor;
+
+ return 0;
+}
+
+static int macio_do_delay(PMF_STD_ARGS, u32 duration)
+{
+ /* assume we can sleep ! */
+ msleep((duration + 999) / 1000);
+ return 0;
+}
+
+static struct pmf_handlers macio_gpio_handlers = {
+ .irq_enable = macio_do_gpio_irq_enable,
+ .irq_disable = macio_do_gpio_irq_disable,
+ .write_gpio = macio_do_gpio_write,
+ .read_gpio = macio_do_gpio_read,
+ .delay = macio_do_delay,
+};
+
+static void macio_gpio_init_one(struct macio_chip *macio)
+{
+ struct device_node *gparent, *gp;
+
+ /*
+ * Find the "gpio" parent node
+ */
+
+ for (gparent = NULL;
+ (gparent = of_get_next_child(macio->of_node, gparent)) != NULL;)
+ if (strcmp(gparent->name, "gpio") == 0)
+ break;
+ if (gparent == NULL)
+ return;
+
+ DBG("Installing GPIO functions for macio %s\n",
+ macio->of_node->full_name);
+
+ /*
+ * Ok, got one, we dont need anything special to track them down, so
+ * we just create them all
+ */
+ for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) {
+ u32 *reg = (u32 *)get_property(gp, "reg", NULL);
+ unsigned long offset;
+ if (reg == NULL)
+ continue;
+ offset = *reg;
+ /* Deal with old style device-tree. We can safely hard code the
+ * offset for now too even if it's a bit gross ...
+ */
+ if (offset < 0x50)
+ offset += 0x50;
+ offset += (unsigned long)macio->base;
+ pmf_register_driver(gp, &macio_gpio_handlers, (void *)offset);
+ }
+
+ DBG("Calling initial GPIO functions for macio %s\n",
+ macio->of_node->full_name);
+
+ /* And now we run all the init ones */
+ for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;)
+ pmf_do_functions(gp, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
+
+ /* Note: We do not at this point implement the "at sleep" or "at wake"
+ * functions. I yet to find any for GPIOs anyway
+ */
+}
+
+static int macio_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
+{
+ struct macio_chip *macio = func->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&feature_lock, flags);
+ MACIO_OUT32(offset, (MACIO_IN32(offset) & ~mask) | (value & mask));
+ spin_unlock_irqrestore(&feature_lock, flags);
+ return 0;
+}
+
+static int macio_do_read_reg32(PMF_STD_ARGS, u32 offset)
+{
+ struct macio_chip *macio = func->driver_data;
+
+ /* Check if we have room for reply */
+ if (args == NULL || args->count == 0 || args->u[0].p == NULL)
+ return -EINVAL;
+
+ *args->u[0].p = MACIO_IN32(offset);
+ return 0;
+}
+
+static int macio_do_write_reg8(PMF_STD_ARGS, u32 offset, u8 value, u8 mask)
+{
+ struct macio_chip *macio = func->driver_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&feature_lock, flags);
+ MACIO_OUT8(offset, (MACIO_IN8(offset) & ~mask) | (value & mask));
+ spin_unlock_irqrestore(&feature_lock, flags);
+ return 0;
+}
+
+static int macio_do_read_reg8(PMF_STD_ARGS, u32 offset)
+{
+ struct macio_chip *macio = func->driver_data;
+
+ /* Check if we have room for reply */
+ if (args == NULL || args->count == 0 || args->u[0].p == NULL)
+ return -EINVAL;
+
+ *((u8 *)(args->u[0].p)) = MACIO_IN8(offset);
+ return 0;
+}
+
+static int macio_do_read_reg32_msrx(PMF_STD_ARGS, u32 offset, u32 mask,
+ u32 shift, u32 xor)
+{
+ struct macio_chip *macio = func->driver_data;
+
+ /* Check if we have room for reply */
+ if (args == NULL || args->count == 0 || args->u[0].p == NULL)
+ return -EINVAL;
+
+ *args->u[0].p = ((MACIO_IN32(offset) & mask) >> shift) ^ xor;
+ return 0;
+}
+
+static int macio_do_read_reg8_msrx(PMF_STD_ARGS, u32 offset, u32 mask,
+ u32 shift, u32 xor)
+{
+ struct macio_chip *macio = func->driver_data;
+
+ /* Check if we have room for reply */
+ if (args == NULL || args->count == 0 || args->u[0].p == NULL)
+ return -EINVAL;
+
+ *((u8 *)(args->u[0].p)) = ((MACIO_IN8(offset) & mask) >> shift) ^ xor;
+ return 0;
+}
+
+static int macio_do_write_reg32_slm(PMF_STD_ARGS, u32 offset, u32 shift,
+ u32 mask)
+{
+ struct macio_chip *macio = func->driver_data;
+ unsigned long flags;
+ u32 tmp, val;
+
+ /* Check args */
+ if (args == NULL || args->count == 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&feature_lock, flags);
+ tmp = MACIO_IN32(offset);
+ val = args->u[0].v << shift;
+ tmp = (tmp & ~mask) | (val & mask);
+ MACIO_OUT32(offset, tmp);
+ spin_unlock_irqrestore(&feature_lock, flags);
+ return 0;
+}
+
+static int macio_do_write_reg8_slm(PMF_STD_ARGS, u32 offset, u32 shift,
+ u32 mask)
+{
+ struct macio_chip *macio = func->driver_data;
+ unsigned long flags;
+ u32 tmp, val;
+
+ /* Check args */
+ if (args == NULL || args->count == 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&feature_lock, flags);
+ tmp = MACIO_IN8(offset);
+ val = args->u[0].v << shift;
+ tmp = (tmp & ~mask) | (val & mask);
+ MACIO_OUT8(offset, tmp);
+ spin_unlock_irqrestore(&feature_lock, flags);
+ return 0;
+}
+
+static struct pmf_handlers macio_mmio_handlers = {
+ .write_reg32 = macio_do_write_reg32,
+ .read_reg32 = macio_do_read_reg32,
+ .write_reg8 = macio_do_write_reg8,
+ .read_reg32 = macio_do_read_reg8,
+ .read_reg32_msrx = macio_do_read_reg32_msrx,
+ .read_reg8_msrx = macio_do_read_reg8_msrx,
+ .write_reg32_slm = macio_do_write_reg32_slm,
+ .write_reg8_slm = macio_do_write_reg8_slm,
+ .delay = macio_do_delay,
+};
+
+static void macio_mmio_init_one(struct macio_chip *macio)
+{
+ DBG("Installing MMIO functions for macio %s\n",
+ macio->of_node->full_name);
+
+ pmf_register_driver(macio->of_node, &macio_mmio_handlers, macio);
+}
+
+static struct device_node *unin_hwclock;
+
+static int unin_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&feature_lock, flags);
+ /* This is fairly bogus in darwin, but it should work for our needs
+ * implemeted that way:
+ */
+ UN_OUT(offset, (UN_IN(offset) & ~mask) | (value & mask));
+ spin_unlock_irqrestore(&feature_lock, flags);
+ return 0;
+}
+
+
+static struct pmf_handlers unin_mmio_handlers = {
+ .write_reg32 = unin_do_write_reg32,
+ .delay = macio_do_delay,
+};
+
+static void uninorth_install_pfunc(void)
+{
+ struct device_node *np;
+
+ DBG("Installing functions for UniN %s\n",
+ uninorth_node->full_name);
+
+ /*
+ * Install handlers for the bridge itself
+ */
+ pmf_register_driver(uninorth_node, &unin_mmio_handlers, NULL);
+ pmf_do_functions(uninorth_node, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
+
+
+ /*
+ * Install handlers for the hwclock child if any
+ */
+ for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;)
+ if (strcmp(np->name, "hw-clock") == 0) {
+ unin_hwclock = np;
+ break;
+ }
+ if (unin_hwclock) {
+ DBG("Installing functions for UniN clock %s\n",
+ unin_hwclock->full_name);
+ pmf_register_driver(unin_hwclock, &unin_mmio_handlers, NULL);
+ pmf_do_functions(unin_hwclock, NULL, 0, PMF_FLAGS_ON_INIT,
+ NULL);
+ }
+}
+
+/* We export this as the SMP code might init us early */
+int __init pmac_pfunc_base_install(void)
+{
+ static int pfbase_inited;
+ int i;
+
+ if (pfbase_inited)
+ return 0;
+ pfbase_inited = 1;
+
+
+ DBG("Installing base platform functions...\n");
+
+ /*
+ * Locate mac-io chips and install handlers
+ */
+ for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
+ if (macio_chips[i].of_node) {
+ macio_mmio_init_one(&macio_chips[i]);
+ macio_gpio_init_one(&macio_chips[i]);
+ }
+ }
+
+ /*
+ * Install handlers for northbridge and direct mapped hwclock
+ * if any. We do not implement the config space access callback
+ * which is only ever used for functions that we do not call in
+ * the current driver (enabling/disabling cells in U2, mostly used
+ * to restore the PCI settings, we do that differently)
+ */
+ if (uninorth_node && uninorth_base)
+ uninorth_install_pfunc();
+
+ DBG("All base functions installed\n");
+
+ return 0;
+}
+
+arch_initcall(pmac_pfunc_base_install);
+
+#ifdef CONFIG_PM
+
+/* Those can be called by pmac_feature. Ultimately, I should use a sysdev
+ * or a device, but for now, that's good enough until I sort out some
+ * ordering issues. Also, we do not bother with GPIOs, as so far I yet have
+ * to see a case where a GPIO function has the on-suspend or on-resume bit
+ */
+void pmac_pfunc_base_suspend(void)
+{
+ int i;
+
+ for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
+ if (macio_chips[i].of_node)
+ pmf_do_functions(macio_chips[i].of_node, NULL, 0,
+ PMF_FLAGS_ON_SLEEP, NULL);
+ }
+ if (uninorth_node)
+ pmf_do_functions(uninorth_node, NULL, 0,
+ PMF_FLAGS_ON_SLEEP, NULL);
+ if (unin_hwclock)
+ pmf_do_functions(unin_hwclock, NULL, 0,
+ PMF_FLAGS_ON_SLEEP, NULL);
+}
+
+void pmac_pfunc_base_resume(void)
+{
+ int i;
+
+ if (unin_hwclock)
+ pmf_do_functions(unin_hwclock, NULL, 0,
+ PMF_FLAGS_ON_WAKE, NULL);
+ if (uninorth_node)
+ pmf_do_functions(uninorth_node, NULL, 0,
+ PMF_FLAGS_ON_WAKE, NULL);
+ for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
+ if (macio_chips[i].of_node)
+ pmf_do_functions(macio_chips[i].of_node, NULL, 0,
+ PMF_FLAGS_ON_WAKE, NULL);
+ }
+}
+
+#endif /* CONFIG_PM */
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
new file mode 100644
index 00000000000..c32c623001d
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -0,0 +1,989 @@
+/*
+ *
+ * FIXME: Properly make this race free with refcounting etc...
+ *
+ * FIXME: LOCKING !!!
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+
+#include <asm/semaphore.h>
+#include <asm/prom.h>
+#include <asm/pmac_pfunc.h>
+
+/* Debug */
+#define LOG_PARSE(fmt...)
+#define LOG_ERROR(fmt...) printk(fmt)
+#define LOG_BLOB(t,b,c)
+#define DBG(fmt...) printk(fmt)
+
+/* Command numbers */
+#define PMF_CMD_LIST 0
+#define PMF_CMD_WRITE_GPIO 1
+#define PMF_CMD_READ_GPIO 2
+#define PMF_CMD_WRITE_REG32 3
+#define PMF_CMD_READ_REG32 4
+#define PMF_CMD_WRITE_REG16 5
+#define PMF_CMD_READ_REG16 6
+#define PMF_CMD_WRITE_REG8 7
+#define PMF_CMD_READ_REG8 8
+#define PMF_CMD_DELAY 9
+#define PMF_CMD_WAIT_REG32 10
+#define PMF_CMD_WAIT_REG16 11
+#define PMF_CMD_WAIT_REG8 12
+#define PMF_CMD_READ_I2C 13
+#define PMF_CMD_WRITE_I2C 14
+#define PMF_CMD_RMW_I2C 15
+#define PMF_CMD_GEN_I2C 16
+#define PMF_CMD_SHIFT_BYTES_RIGHT 17
+#define PMF_CMD_SHIFT_BYTES_LEFT 18
+#define PMF_CMD_READ_CFG 19
+#define PMF_CMD_WRITE_CFG 20
+#define PMF_CMD_RMW_CFG 21
+#define PMF_CMD_READ_I2C_SUBADDR 22
+#define PMF_CMD_WRITE_I2C_SUBADDR 23
+#define PMF_CMD_SET_I2C_MODE 24
+#define PMF_CMD_RMW_I2C_SUBADDR 25
+#define PMF_CMD_READ_REG32_MASK_SHR_XOR 26
+#define PMF_CMD_READ_REG16_MASK_SHR_XOR 27
+#define PMF_CMD_READ_REG8_MASK_SHR_XOR 28
+#define PMF_CMD_WRITE_REG32_SHL_MASK 29
+#define PMF_CMD_WRITE_REG16_SHL_MASK 30
+#define PMF_CMD_WRITE_REG8_SHL_MASK 31
+#define PMF_CMD_MASK_AND_COMPARE 32
+#define PMF_CMD_COUNT 33
+
+/* This structure holds the state of the parser while walking through
+ * a function definition
+ */
+struct pmf_cmd {
+ const void *cmdptr;
+ const void *cmdend;
+ struct pmf_function *func;
+ void *instdata;
+ struct pmf_args *args;
+ int error;
+};
+
+#if 0
+/* Debug output */
+static void print_blob(const char *title, const void *blob, int bytes)
+{
+ printk("%s", title);
+ while(bytes--) {
+ printk("%02x ", *((u8 *)blob));
+ blob += 1;
+ }
+ printk("\n");
+}
+#endif
+
+/*
+ * Parser helpers
+ */
+
+static u32 pmf_next32(struct pmf_cmd *cmd)
+{
+ u32 value;
+ if ((cmd->cmdend - cmd->cmdptr) < 4) {
+ cmd->error = 1;
+ return 0;
+ }
+ value = *((u32 *)cmd->cmdptr);
+ cmd->cmdptr += 4;
+ return value;
+}
+
+static const void* pmf_next_blob(struct pmf_cmd *cmd, int count)
+{
+ const void *value;
+ if ((cmd->cmdend - cmd->cmdptr) < count) {
+ cmd->error = 1;
+ return NULL;
+ }
+ value = cmd->cmdptr;
+ cmd->cmdptr += count;
+ return value;
+}
+
+/*
+ * Individual command parsers
+ */
+
+#define PMF_PARSE_CALL(name, cmd, handlers, p...) \
+ do { \
+ if (cmd->error) \
+ return -ENXIO; \
+ if (handlers == NULL) \
+ return 0; \
+ if (handlers->name) \
+ return handlers->name(cmd->func, cmd->instdata, \
+ cmd->args, p); \
+ return -1; \
+ } while(0) \
+
+
+static int pmf_parser_write_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u8 value = (u8)pmf_next32(cmd);
+ u8 mask = (u8)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_gpio(value: %02x, mask: %02x)\n", value, mask);
+
+ PMF_PARSE_CALL(write_gpio, cmd, h, value, mask);
+}
+
+static int pmf_parser_read_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u8 mask = (u8)pmf_next32(cmd);
+ int rshift = (int)pmf_next32(cmd);
+ u8 xor = (u8)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_gpio(mask: %02x, rshift: %d, xor: %02x)\n",
+ mask, rshift, xor);
+
+ PMF_PARSE_CALL(read_gpio, cmd, h, mask, rshift, xor);
+}
+
+static int pmf_parser_write_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 value = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg32(offset: %08x, value: %08x, mask: %08x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(write_reg32, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_read_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg32(offset: %08x)\n", offset);
+
+ PMF_PARSE_CALL(read_reg32, cmd, h, offset);
+}
+
+
+static int pmf_parser_write_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u16 value = (u16)pmf_next32(cmd);
+ u16 mask = (u16)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg16(offset: %08x, value: %04x, mask: %04x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(write_reg16, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_read_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg16(offset: %08x)\n", offset);
+
+ PMF_PARSE_CALL(read_reg16, cmd, h, offset);
+}
+
+
+static int pmf_parser_write_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u8 value = (u16)pmf_next32(cmd);
+ u8 mask = (u16)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg8(offset: %08x, value: %02x, mask: %02x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(write_reg8, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_read_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg8(offset: %08x)\n", offset);
+
+ PMF_PARSE_CALL(read_reg8, cmd, h, offset);
+}
+
+static int pmf_parser_delay(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 duration = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: delay(duration: %d us)\n", duration);
+
+ PMF_PARSE_CALL(delay, cmd, h, duration);
+}
+
+static int pmf_parser_wait_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 value = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: wait_reg32(offset: %08x, comp_value: %08x,mask: %08x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(wait_reg32, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_wait_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u16 value = (u16)pmf_next32(cmd);
+ u16 mask = (u16)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: wait_reg16(offset: %08x, comp_value: %04x,mask: %04x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(wait_reg16, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_wait_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u8 value = (u8)pmf_next32(cmd);
+ u8 mask = (u8)pmf_next32(cmd);
+
+ LOG_PARSE("pmf: wait_reg8(offset: %08x, comp_value: %02x,mask: %02x)\n",
+ offset, value, mask);
+
+ PMF_PARSE_CALL(wait_reg8, cmd, h, offset, value, mask);
+}
+
+static int pmf_parser_read_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 bytes = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_i2c(bytes: %ud)\n", bytes);
+
+ PMF_PARSE_CALL(read_i2c, cmd, h, bytes);
+}
+
+static int pmf_parser_write_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 bytes = pmf_next32(cmd);
+ const void *blob = pmf_next_blob(cmd, bytes);
+
+ LOG_PARSE("pmf: write_i2c(bytes: %ud) ...\n", bytes);
+ LOG_BLOB("pmf: data: \n", blob, bytes);
+
+ PMF_PARSE_CALL(write_i2c, cmd, h, bytes, blob);
+}
+
+
+static int pmf_parser_rmw_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 maskbytes = pmf_next32(cmd);
+ u32 valuesbytes = pmf_next32(cmd);
+ u32 totalbytes = pmf_next32(cmd);
+ const void *maskblob = pmf_next_blob(cmd, maskbytes);
+ const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
+
+ LOG_PARSE("pmf: rmw_i2c(maskbytes: %ud, valuebytes: %ud, "
+ "totalbytes: %d) ...\n",
+ maskbytes, valuesbytes, totalbytes);
+ LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
+ LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
+
+ PMF_PARSE_CALL(rmw_i2c, cmd, h, maskbytes, valuesbytes, totalbytes,
+ maskblob, valuesblob);
+}
+
+static int pmf_parser_read_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 bytes = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_cfg(offset: %x, bytes: %ud)\n", offset, bytes);
+
+ PMF_PARSE_CALL(read_cfg, cmd, h, offset, bytes);
+}
+
+
+static int pmf_parser_write_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 bytes = pmf_next32(cmd);
+ const void *blob = pmf_next_blob(cmd, bytes);
+
+ LOG_PARSE("pmf: write_cfg(offset: %x, bytes: %ud)\n", offset, bytes);
+ LOG_BLOB("pmf: data: \n", blob, bytes);
+
+ PMF_PARSE_CALL(write_cfg, cmd, h, offset, bytes, blob);
+}
+
+static int pmf_parser_rmw_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 maskbytes = pmf_next32(cmd);
+ u32 valuesbytes = pmf_next32(cmd);
+ u32 totalbytes = pmf_next32(cmd);
+ const void *maskblob = pmf_next_blob(cmd, maskbytes);
+ const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
+
+ LOG_PARSE("pmf: rmw_cfg(maskbytes: %ud, valuebytes: %ud,"
+ " totalbytes: %d) ...\n",
+ maskbytes, valuesbytes, totalbytes);
+ LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
+ LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
+
+ PMF_PARSE_CALL(rmw_cfg, cmd, h, offset, maskbytes, valuesbytes,
+ totalbytes, maskblob, valuesblob);
+}
+
+
+static int pmf_parser_read_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u8 subaddr = (u8)pmf_next32(cmd);
+ u32 bytes = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_i2c_sub(subaddr: %x, bytes: %ud)\n",
+ subaddr, bytes);
+
+ PMF_PARSE_CALL(read_i2c_sub, cmd, h, subaddr, bytes);
+}
+
+static int pmf_parser_write_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u8 subaddr = (u8)pmf_next32(cmd);
+ u32 bytes = pmf_next32(cmd);
+ const void *blob = pmf_next_blob(cmd, bytes);
+
+ LOG_PARSE("pmf: write_i2c_sub(subaddr: %x, bytes: %ud) ...\n",
+ subaddr, bytes);
+ LOG_BLOB("pmf: data: \n", blob, bytes);
+
+ PMF_PARSE_CALL(write_i2c_sub, cmd, h, subaddr, bytes, blob);
+}
+
+static int pmf_parser_set_i2c_mode(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u32 mode = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: set_i2c_mode(mode: %d)\n", mode);
+
+ PMF_PARSE_CALL(set_i2c_mode, cmd, h, mode);
+}
+
+
+static int pmf_parser_rmw_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h)
+{
+ u8 subaddr = (u8)pmf_next32(cmd);
+ u32 maskbytes = pmf_next32(cmd);
+ u32 valuesbytes = pmf_next32(cmd);
+ u32 totalbytes = pmf_next32(cmd);
+ const void *maskblob = pmf_next_blob(cmd, maskbytes);
+ const void *valuesblob = pmf_next_blob(cmd, valuesbytes);
+
+ LOG_PARSE("pmf: rmw_i2c_sub(subaddr: %x, maskbytes: %ud, valuebytes: %ud"
+ ", totalbytes: %d) ...\n",
+ subaddr, maskbytes, valuesbytes, totalbytes);
+ LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes);
+ LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes);
+
+ PMF_PARSE_CALL(rmw_i2c_sub, cmd, h, subaddr, maskbytes, valuesbytes,
+ totalbytes, maskblob, valuesblob);
+}
+
+static int pmf_parser_read_reg32_msrx(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 xor = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg32_msrx(offset: %x, mask: %x, shift: %x,"
+ " xor: %x\n", offset, mask, shift, xor);
+
+ PMF_PARSE_CALL(read_reg32_msrx, cmd, h, offset, mask, shift, xor);
+}
+
+static int pmf_parser_read_reg16_msrx(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 xor = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg16_msrx(offset: %x, mask: %x, shift: %x,"
+ " xor: %x\n", offset, mask, shift, xor);
+
+ PMF_PARSE_CALL(read_reg16_msrx, cmd, h, offset, mask, shift, xor);
+}
+static int pmf_parser_read_reg8_msrx(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 xor = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: read_reg8_msrx(offset: %x, mask: %x, shift: %x,"
+ " xor: %x\n", offset, mask, shift, xor);
+
+ PMF_PARSE_CALL(read_reg8_msrx, cmd, h, offset, mask, shift, xor);
+}
+
+static int pmf_parser_write_reg32_slm(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg32_slm(offset: %x, shift: %x, mask: %x\n",
+ offset, shift, mask);
+
+ PMF_PARSE_CALL(write_reg32_slm, cmd, h, offset, shift, mask);
+}
+
+static int pmf_parser_write_reg16_slm(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg16_slm(offset: %x, shift: %x, mask: %x\n",
+ offset, shift, mask);
+
+ PMF_PARSE_CALL(write_reg16_slm, cmd, h, offset, shift, mask);
+}
+
+static int pmf_parser_write_reg8_slm(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 offset = pmf_next32(cmd);
+ u32 shift = pmf_next32(cmd);
+ u32 mask = pmf_next32(cmd);
+
+ LOG_PARSE("pmf: write_reg8_slm(offset: %x, shift: %x, mask: %x\n",
+ offset, shift, mask);
+
+ PMF_PARSE_CALL(write_reg8_slm, cmd, h, offset, shift, mask);
+}
+
+static int pmf_parser_mask_and_compare(struct pmf_cmd *cmd,
+ struct pmf_handlers *h)
+{
+ u32 bytes = pmf_next32(cmd);
+ const void *maskblob = pmf_next_blob(cmd, bytes);
+ const void *valuesblob = pmf_next_blob(cmd, bytes);
+
+ LOG_PARSE("pmf: mask_and_compare(length: %ud ...\n", bytes);
+ LOG_BLOB("pmf: mask data: \n", maskblob, bytes);
+ LOG_BLOB("pmf: values data: \n", valuesblob, bytes);
+
+ PMF_PARSE_CALL(mask_and_compare, cmd, h,
+ bytes, maskblob, valuesblob);
+}
+
+
+typedef int (*pmf_cmd_parser_t)(struct pmf_cmd *cmd, struct pmf_handlers *h);
+
+static pmf_cmd_parser_t pmf_parsers[PMF_CMD_COUNT] =
+{
+ NULL,
+ pmf_parser_write_gpio,
+ pmf_parser_read_gpio,
+ pmf_parser_write_reg32,
+ pmf_parser_read_reg32,
+ pmf_parser_write_reg16,
+ pmf_parser_read_reg16,
+ pmf_parser_write_reg8,
+ pmf_parser_read_reg8,
+ pmf_parser_delay,
+ pmf_parser_wait_reg32,
+ pmf_parser_wait_reg16,
+ pmf_parser_wait_reg8,
+ pmf_parser_read_i2c,
+ pmf_parser_write_i2c,
+ pmf_parser_rmw_i2c,
+ NULL, /* Bogus command */
+ NULL, /* Shift bytes right: NYI */
+ NULL, /* Shift bytes left: NYI */
+ pmf_parser_read_cfg,
+ pmf_parser_write_cfg,
+ pmf_parser_rmw_cfg,
+ pmf_parser_read_i2c_sub,
+ pmf_parser_write_i2c_sub,
+ pmf_parser_set_i2c_mode,
+ pmf_parser_rmw_i2c_sub,
+ pmf_parser_read_reg32_msrx,
+ pmf_parser_read_reg16_msrx,
+ pmf_parser_read_reg8_msrx,
+ pmf_parser_write_reg32_slm,
+ pmf_parser_write_reg16_slm,
+ pmf_parser_write_reg8_slm,
+ pmf_parser_mask_and_compare,
+};
+
+struct pmf_device {
+ struct list_head link;
+ struct device_node *node;
+ struct pmf_handlers *handlers;
+ struct list_head functions;
+ struct kref ref;
+};
+
+static LIST_HEAD(pmf_devices);
+static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED;
+
+static void pmf_release_device(struct kref *kref)
+{
+ struct pmf_device *dev = container_of(kref, struct pmf_device, ref);
+ kfree(dev);
+}
+
+static inline void pmf_put_device(struct pmf_device *dev)
+{
+ kref_put(&dev->ref, pmf_release_device);
+}
+
+static inline struct pmf_device *pmf_get_device(struct pmf_device *dev)
+{
+ kref_get(&dev->ref);
+ return dev;
+}
+
+static inline struct pmf_device *pmf_find_device(struct device_node *np)
+{
+ struct pmf_device *dev;
+
+ list_for_each_entry(dev, &pmf_devices, link) {
+ if (dev->node == np)
+ return pmf_get_device(dev);
+ }
+ return NULL;
+}
+
+static int pmf_parse_one(struct pmf_function *func,
+ struct pmf_handlers *handlers,
+ void *instdata, struct pmf_args *args)
+{
+ struct pmf_cmd cmd;
+ u32 ccode;
+ int count, rc;
+
+ cmd.cmdptr = func->data;
+ cmd.cmdend = func->data + func->length;
+ cmd.func = func;
+ cmd.instdata = instdata;
+ cmd.args = args;
+ cmd.error = 0;
+
+ LOG_PARSE("pmf: func %s, %d bytes, %s...\n",
+ func->name, func->length,
+ handlers ? "executing" : "parsing");
+
+ /* One subcommand to parse for now */
+ count = 1;
+
+ while(count-- && cmd.cmdptr < cmd.cmdend) {
+ /* Get opcode */
+ ccode = pmf_next32(&cmd);
+ /* Check if we are hitting a command list, fetch new count */
+ if (ccode == 0) {
+ count = pmf_next32(&cmd) - 1;
+ ccode = pmf_next32(&cmd);
+ }
+ if (cmd.error) {
+ LOG_ERROR("pmf: parse error, not enough data\n");
+ return -ENXIO;
+ }
+ if (ccode >= PMF_CMD_COUNT) {
+ LOG_ERROR("pmf: command code %d unknown !\n", ccode);
+ return -ENXIO;
+ }
+ if (pmf_parsers[ccode] == NULL) {
+ LOG_ERROR("pmf: no parser for command %d !\n", ccode);
+ return -ENXIO;
+ }
+ rc = pmf_parsers[ccode](&cmd, handlers);
+ if (rc != 0) {
+ LOG_ERROR("pmf: parser for command %d returned"
+ " error %d\n", ccode, rc);
+ return rc;
+ }
+ }
+
+ /* We are doing an initial parse pass, we need to adjust the size */
+ if (handlers == NULL)
+ func->length = cmd.cmdptr - func->data;
+
+ return 0;
+}
+
+static int pmf_add_function_prop(struct pmf_device *dev, void *driverdata,
+ const char *name, u32 *data,
+ unsigned int length)
+{
+ int count = 0;
+ struct pmf_function *func = NULL;
+
+ DBG("pmf: Adding functions for platform-do-%s\n", name);
+
+ while (length >= 12) {
+ /* Allocate a structure */
+ func = kzalloc(sizeof(struct pmf_function), GFP_KERNEL);
+ if (func == NULL)
+ goto bail;
+ kref_init(&func->ref);
+ INIT_LIST_HEAD(&func->irq_clients);
+ func->node = dev->node;
+ func->driver_data = driverdata;
+ func->name = name;
+ func->phandle = data[0];
+ func->flags = data[1];
+ data += 2;
+ length -= 8;
+ func->data = data;
+ func->length = length;
+ func->dev = dev;
+ DBG("pmf: idx %d: flags=%08x, phandle=%08x "
+ " %d bytes remaining, parsing...\n",
+ count+1, func->flags, func->phandle, length);
+ if (pmf_parse_one(func, NULL, NULL, NULL)) {
+ kfree(func);
+ goto bail;
+ }
+ length -= func->length;
+ data = (u32 *)(((u8 *)data) + func->length);
+ list_add(&func->link, &dev->functions);
+ pmf_get_device(dev);
+ count++;
+ }
+ bail:
+ DBG("pmf: Added %d functions\n", count);
+
+ return count;
+}
+
+static int pmf_add_functions(struct pmf_device *dev, void *driverdata)
+{
+ struct property *pp;
+#define PP_PREFIX "platform-do-"
+ const int plen = strlen(PP_PREFIX);
+ int count = 0;
+
+ for (pp = dev->node->properties; pp != 0; pp = pp->next) {
+ char *name;
+ if (strncmp(pp->name, PP_PREFIX, plen) != 0)
+ continue;
+ name = pp->name + plen;
+ if (strlen(name) && pp->length >= 12)
+ count += pmf_add_function_prop(dev, driverdata, name,
+ (u32 *)pp->value,
+ pp->length);
+ }
+ return count;
+}
+
+
+int pmf_register_driver(struct device_node *np,
+ struct pmf_handlers *handlers,
+ void *driverdata)
+{
+ struct pmf_device *dev;
+ unsigned long flags;
+ int rc = 0;
+
+ if (handlers == NULL)
+ return -EINVAL;
+
+ DBG("pmf: registering driver for node %s\n", np->full_name);
+
+ spin_lock_irqsave(&pmf_lock, flags);
+ dev = pmf_find_device(np);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ if (dev != NULL) {
+ DBG("pmf: already there !\n");
+ pmf_put_device(dev);
+ return -EBUSY;
+ }
+
+ dev = kzalloc(sizeof(struct pmf_device), GFP_KERNEL);
+ if (dev == NULL) {
+ DBG("pmf: no memory !\n");
+ return -ENOMEM;
+ }
+ kref_init(&dev->ref);
+ dev->node = of_node_get(np);
+ dev->handlers = handlers;
+ INIT_LIST_HEAD(&dev->functions);
+
+ rc = pmf_add_functions(dev, driverdata);
+ if (rc == 0) {
+ DBG("pmf: no functions, disposing.. \n");
+ of_node_put(np);
+ kfree(dev);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&pmf_lock, flags);
+ list_add(&dev->link, &pmf_devices);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmf_register_driver);
+
+struct pmf_function *pmf_get_function(struct pmf_function *func)
+{
+ if (!try_module_get(func->dev->handlers->owner))
+ return NULL;
+ kref_get(&func->ref);
+ return func;
+}
+EXPORT_SYMBOL_GPL(pmf_get_function);
+
+static void pmf_release_function(struct kref *kref)
+{
+ struct pmf_function *func =
+ container_of(kref, struct pmf_function, ref);
+ pmf_put_device(func->dev);
+ kfree(func);
+}
+
+static inline void __pmf_put_function(struct pmf_function *func)
+{
+ kref_put(&func->ref, pmf_release_function);
+}
+
+void pmf_put_function(struct pmf_function *func)
+{
+ if (func == NULL)
+ return;
+ module_put(func->dev->handlers->owner);
+ __pmf_put_function(func);
+}
+EXPORT_SYMBOL_GPL(pmf_put_function);
+
+void pmf_unregister_driver(struct device_node *np)
+{
+ struct pmf_device *dev;
+ unsigned long flags;
+
+ DBG("pmf: unregistering driver for node %s\n", np->full_name);
+
+ spin_lock_irqsave(&pmf_lock, flags);
+ dev = pmf_find_device(np);
+ if (dev == NULL) {
+ DBG("pmf: not such driver !\n");
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ return;
+ }
+ list_del(&dev->link);
+
+ while(!list_empty(&dev->functions)) {
+ struct pmf_function *func =
+ list_entry(dev->functions.next, typeof(*func), link);
+ list_del(&func->link);
+ __pmf_put_function(func);
+ }
+
+ pmf_put_device(dev);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+}
+EXPORT_SYMBOL_GPL(pmf_unregister_driver);
+
+struct pmf_function *__pmf_find_function(struct device_node *target,
+ const char *name, u32 flags)
+{
+ struct device_node *actor = of_node_get(target);
+ struct pmf_device *dev;
+ struct pmf_function *func, *result = NULL;
+ char fname[64];
+ u32 *prop, ph;
+
+ /*
+ * Look for a "platform-*" function reference. If we can't find
+ * one, then we fallback to a direct call attempt
+ */
+ snprintf(fname, 63, "platform-%s", name);
+ prop = (u32 *)get_property(target, fname, NULL);
+ if (prop == NULL)
+ goto find_it;
+ ph = *prop;
+ if (ph == 0)
+ goto find_it;
+
+ /*
+ * Ok, now try to find the actor. If we can't find it, we fail,
+ * there is no point in falling back there
+ */
+ of_node_put(actor);
+ actor = of_find_node_by_phandle(ph);
+ if (actor == NULL)
+ return NULL;
+ find_it:
+ dev = pmf_find_device(actor);
+ if (dev == NULL)
+ return NULL;
+
+ list_for_each_entry(func, &dev->functions, link) {
+ if (name && strcmp(name, func->name))
+ continue;
+ if (func->phandle && target->node != func->phandle)
+ continue;
+ if ((func->flags & flags) == 0)
+ continue;
+ result = func;
+ break;
+ }
+ of_node_put(actor);
+ pmf_put_device(dev);
+ return result;
+}
+
+
+int pmf_register_irq_client(struct device_node *target,
+ const char *name,
+ struct pmf_irq_client *client)
+{
+ struct pmf_function *func;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmf_lock, flags);
+ func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN);
+ if (func == NULL) {
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ return -ENODEV;
+ }
+ list_add(&client->link, &func->irq_clients);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmf_register_irq_client);
+
+void pmf_unregister_irq_client(struct device_node *np,
+ const char *name,
+ struct pmf_irq_client *client)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmf_lock, flags);
+ list_del(&client->link);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+}
+EXPORT_SYMBOL_GPL(pmf_unregister_irq_client);
+
+
+void pmf_do_irq(struct pmf_function *func)
+{
+ unsigned long flags;
+ struct pmf_irq_client *client;
+
+ /* For now, using a spinlock over the whole function. Can be made
+ * to drop the lock using 2 lists if necessary
+ */
+ spin_lock_irqsave(&pmf_lock, flags);
+ list_for_each_entry(client, &func->irq_clients, link) {
+ if (!try_module_get(client->owner))
+ continue;
+ client->handler(client->data);
+ module_put(client->owner);
+ }
+ spin_unlock_irqrestore(&pmf_lock, flags);
+}
+EXPORT_SYMBOL_GPL(pmf_do_irq);
+
+
+int pmf_call_one(struct pmf_function *func, struct pmf_args *args)
+{
+ struct pmf_device *dev = func->dev;
+ void *instdata = NULL;
+ int rc = 0;
+
+ DBG(" ** pmf_call_one(%s/%s) **\n", dev->node->full_name, func->name);
+
+ if (dev->handlers->begin)
+ instdata = dev->handlers->begin(func, args);
+ rc = pmf_parse_one(func, dev->handlers, instdata, args);
+ if (dev->handlers->end)
+ dev->handlers->end(func, instdata);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pmf_call_one);
+
+int pmf_do_functions(struct device_node *np, const char *name,
+ u32 phandle, u32 fflags, struct pmf_args *args)
+{
+ struct pmf_device *dev;
+ struct pmf_function *func, *tmp;
+ unsigned long flags;
+ int rc = -ENODEV;
+
+ spin_lock_irqsave(&pmf_lock, flags);
+
+ dev = pmf_find_device(np);
+ if (dev == NULL) {
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ return -ENODEV;
+ }
+ list_for_each_entry_safe(func, tmp, &dev->functions, link) {
+ if (name && strcmp(name, func->name))
+ continue;
+ if (phandle && func->phandle && phandle != func->phandle)
+ continue;
+ if ((func->flags & fflags) == 0)
+ continue;
+ if (pmf_get_function(func) == NULL)
+ continue;
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ rc = pmf_call_one(func, args);
+ pmf_put_function(func);
+ spin_lock_irqsave(&pmf_lock, flags);
+ }
+ pmf_put_device(dev);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pmf_do_functions);
+
+
+struct pmf_function *pmf_find_function(struct device_node *target,
+ const char *name)
+{
+ struct pmf_function *func;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmf_lock, flags);
+ func = __pmf_find_function(target, name, PMF_FLAGS_ON_DEMAND);
+ if (func)
+ func = pmf_get_function(func);
+ spin_unlock_irqrestore(&pmf_lock, flags);
+ return func;
+}
+EXPORT_SYMBOL_GPL(pmf_find_function);
+
+int pmf_call_function(struct device_node *target, const char *name,
+ struct pmf_args *args)
+{
+ struct pmf_function *func = pmf_find_function(target, name);
+ int rc;
+
+ if (func == NULL)
+ return -ENODEV;
+
+ rc = pmf_call_one(func, args);
+ pmf_put_function(func);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pmf_call_function);
+
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 90040c49494..18bf3011d1e 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -5,8 +5,8 @@
* in a separate file
*
* Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
- *
- * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ * Copyright (C) 2005 Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ * IBM, Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -54,12 +54,7 @@ struct pmac_irq_hw {
};
/* Default addresses */
-static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
- (struct pmac_irq_hw *) 0xf3000020,
- (struct pmac_irq_hw *) 0xf3000010,
- (struct pmac_irq_hw *) 0xf4000020,
- (struct pmac_irq_hw *) 0xf4000010,
-};
+static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4];
#define GC_LEVEL_MASK 0x3ff00000
#define OHARE_LEVEL_MASK 0x1ff00000
@@ -82,8 +77,7 @@ static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
* since it can lose interrupts (see pmac_set_irq_mask).
* -- Cort
*/
-void
-__set_lost(unsigned long irq_nr, int nokick)
+void __set_lost(unsigned long irq_nr, int nokick)
{
if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
atomic_inc(&ppc_n_lost_interrupts);
@@ -92,8 +86,7 @@ __set_lost(unsigned long irq_nr, int nokick)
}
}
-static void
-pmac_mask_and_ack_irq(unsigned int irq_nr)
+static void pmac_mask_and_ack_irq(unsigned int irq_nr)
{
unsigned long bit = 1UL << (irq_nr & 0x1f);
int i = irq_nr >> 5;
@@ -224,8 +217,7 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
return IRQ_NONE;
}
-int
-pmac_get_irq(struct pt_regs *regs)
+static int pmac_get_irq(struct pt_regs *regs)
{
int irq;
unsigned long bits = 0;
@@ -256,34 +248,40 @@ pmac_get_irq(struct pt_regs *regs)
/* This routine will fix some missing interrupt values in the device tree
* on the gatwick mac-io controller used by some PowerBooks
+ *
+ * Walking of OF nodes could use a bit more fixing up here, but it's not
+ * very important as this is all boot time code on static portions of the
+ * device-tree.
+ *
+ * However, the modifications done to "intrs" will have to be removed and
+ * replaced with proper updates of the "interrupts" properties or
+ * AAPL,interrupts, yet to be decided, once the dynamic parsing is there.
*/
-static void __init
-pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
+static void __init pmac_fix_gatwick_interrupts(struct device_node *gw,
+ int irq_base)
{
struct device_node *node;
int count;
memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool));
- node = gw->child;
count = 0;
- while(node)
- {
+ for (node = NULL; (node = of_get_next_child(gw, node)) != NULL;) {
/* Fix SCC */
- if (strcasecmp(node->name, "escc") == 0)
- if (node->child) {
- if (node->child->n_intrs < 3) {
- node->child->intrs = &gatwick_int_pool[count];
- count += 3;
- }
- node->child->n_intrs = 3;
- node->child->intrs[0].line = 15+irq_base;
- node->child->intrs[1].line = 4+irq_base;
- node->child->intrs[2].line = 5+irq_base;
- printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n",
- node->child->intrs[0].line,
- node->child->intrs[1].line,
- node->child->intrs[2].line);
+ if ((strcasecmp(node->name, "escc") == 0) && node->child) {
+ if (node->child->n_intrs < 3) {
+ node->child->intrs = &gatwick_int_pool[count];
+ count += 3;
}
+ node->child->n_intrs = 3;
+ node->child->intrs[0].line = 15+irq_base;
+ node->child->intrs[1].line = 4+irq_base;
+ node->child->intrs[2].line = 5+irq_base;
+ printk(KERN_INFO "irq: fixed SCC on gatwick"
+ " (%d,%d,%d)\n",
+ node->child->intrs[0].line,
+ node->child->intrs[1].line,
+ node->child->intrs[2].line);
+ }
/* Fix media-bay & left SWIM */
if (strcasecmp(node->name, "media-bay") == 0) {
struct device_node* ya_node;
@@ -292,12 +290,11 @@ pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
node->intrs = &gatwick_int_pool[count++];
node->n_intrs = 1;
node->intrs[0].line = 29+irq_base;
- printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n",
- node->intrs[0].line);
+ printk(KERN_INFO "irq: fixed media-bay on gatwick"
+ " (%d)\n", node->intrs[0].line);
ya_node = node->child;
- while(ya_node)
- {
+ while(ya_node) {
if (strcasecmp(ya_node->name, "floppy") == 0) {
if (ya_node->n_intrs < 2) {
ya_node->intrs = &gatwick_int_pool[count];
@@ -323,7 +320,6 @@ pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
ya_node = ya_node->sibling;
}
}
- node = node->sibling;
}
if (count > 10) {
printk("WARNING !! Gatwick interrupt pool overflow\n");
@@ -338,45 +334,41 @@ pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
* controller. If we find this second ohare, set it up and fix the
* interrupt value in the device tree for the ethernet chip.
*/
-static int __init enable_second_ohare(void)
+static void __init enable_second_ohare(struct device_node *np)
{
unsigned char bus, devfn;
unsigned short cmd;
- unsigned long addr;
- struct device_node *irqctrler = find_devices("pci106b,7");
struct device_node *ether;
- if (irqctrler == NULL || irqctrler->n_addrs <= 0)
- return -1;
- addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40);
- pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20);
- max_irqs = 64;
- if (pci_device_from_OF_node(irqctrler, &bus, &devfn) == 0) {
- struct pci_controller* hose = pci_find_hose_for_OF_device(irqctrler);
- if (!hose)
- printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
- else {
- early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
- cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- cmd &= ~PCI_COMMAND_IO;
- early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
+ /* This code doesn't strictly belong here, it could be part of
+ * either the PCI initialisation or the feature code. It's kept
+ * here for historical reasons.
+ */
+ if (pci_device_from_OF_node(np, &bus, &devfn) == 0) {
+ struct pci_controller* hose =
+ pci_find_hose_for_OF_device(np);
+ if (!hose) {
+ printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
+ return;
}
+ early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ cmd &= ~PCI_COMMAND_IO;
+ early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
}
/* Fix interrupt for the modem/ethernet combo controller. The number
- in the device tree (27) is bogus (correct for the ethernet-only
- board but not the combo ethernet/modem board).
- The real interrupt is 28 on the second controller -> 28+32 = 60.
- */
- ether = find_devices("pci1011,14");
+ * in the device tree (27) is bogus (correct for the ethernet-only
+ * board but not the combo ethernet/modem board).
+ * The real interrupt is 28 on the second controller -> 28+32 = 60.
+ */
+ ether = of_find_node_by_name(NULL, "pci1011,14");
if (ether && ether->n_intrs > 0) {
ether->intrs[0].line = 60;
printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n",
ether->intrs[0].line);
}
-
- /* Return the interrupt number of the cascade */
- return irqctrler->intrs[0].line;
+ of_node_put(ether);
}
#ifdef CONFIG_XMON
@@ -394,189 +386,251 @@ static struct irqaction gatwick_cascade_action = {
.mask = CPU_MASK_NONE,
.name = "cascade",
};
-#endif /* CONFIG_PPC32 */
-static int pmac_u3_cascade(struct pt_regs *regs, void *data)
+static void __init pmac_pic_probe_oldstyle(void)
{
- return mpic_get_one_irq((struct mpic *)data, regs);
-}
-
-void __init pmac_pic_init(void)
-{
- struct device_node *irqctrler = NULL;
- struct device_node *irqctrler2 = NULL;
- struct device_node *np;
-#ifdef CONFIG_PPC32
int i;
- unsigned long addr;
int irq_cascade = -1;
-#endif
- struct mpic *mpic1, *mpic2;
+ struct device_node *master = NULL;
+ struct device_node *slave = NULL;
+ u8 __iomem *addr;
+ struct resource r;
- /* We first try to detect Apple's new Core99 chipset, since mac-io
- * is quite different on those machines and contains an IBM MPIC2.
- */
- np = find_type_devices("open-pic");
- while (np) {
- if (np->parent && !strcmp(np->parent->name, "u3"))
- irqctrler2 = np;
- else
- irqctrler = np;
- np = np->next;
- }
- if (irqctrler != NULL && irqctrler->n_addrs > 0) {
- unsigned char senses[128];
-
- printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
- (unsigned int)irqctrler->addrs[0].address);
- pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler, 0, 0);
-
- prom_get_irq_senses(senses, 0, 128);
- mpic1 = mpic_alloc(irqctrler->addrs[0].address,
- MPIC_PRIMARY | MPIC_WANTS_RESET,
- 0, 0, 128, 252, senses, 128, " OpenPIC ");
- BUG_ON(mpic1 == NULL);
- mpic_init(mpic1);
-
- if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
- irqctrler2->n_addrs > 0) {
- printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
- (u32)irqctrler2->addrs[0].address,
- irqctrler2->intrs[0].line);
-
- pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
- prom_get_irq_senses(senses, 128, 128 + 124);
-
- /* We don't need to set MPIC_BROKEN_U3 here since we don't have
- * hypertransport interrupts routed to it
- */
- mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
- MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
- 0, 128, 124, 0, senses, 124,
- " U3-MPIC ");
- BUG_ON(mpic2 == NULL);
- mpic_init(mpic2);
- mpic_setup_cascade(irqctrler2->intrs[0].line,
- pmac_u3_cascade, mpic2);
- }
-#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
- {
- struct device_node* pswitch;
- int nmi_irq;
-
- pswitch = find_devices("programmer-switch");
- if (pswitch && pswitch->n_intrs) {
- nmi_irq = pswitch->intrs[0].line;
- mpic_irq_set_priority(nmi_irq, 9);
- setup_irq(nmi_irq, &xmon_action);
- }
- }
-#endif /* CONFIG_XMON */
- return;
- }
- irqctrler = NULL;
+ /* Set our get_irq function */
+ ppc_md.get_irq = pmac_get_irq;
-#ifdef CONFIG_PPC32
- /* Get the level/edge settings, assume if it's not
- * a Grand Central nor an OHare, then it's an Heathrow
- * (or Paddington).
+ /*
+ * Find the interrupt controller type & node
*/
- ppc_md.get_irq = pmac_get_irq;
- if (find_devices("gc"))
+
+ if ((master = of_find_node_by_name(NULL, "gc")) != NULL) {
+ max_irqs = max_real_irqs = 32;
level_mask[0] = GC_LEVEL_MASK;
- else if (find_devices("ohare")) {
+ } else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) {
+ max_irqs = max_real_irqs = 32;
level_mask[0] = OHARE_LEVEL_MASK;
+
/* We might have a second cascaded ohare */
- level_mask[1] = OHARE_LEVEL_MASK;
- } else {
+ slave = of_find_node_by_name(NULL, "pci106b,7");
+ if (slave) {
+ max_irqs = 64;
+ level_mask[1] = OHARE_LEVEL_MASK;
+ enable_second_ohare(slave);
+ }
+ } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) {
+ max_irqs = max_real_irqs = 64;
level_mask[0] = HEATHROW_LEVEL_MASK;
level_mask[1] = 0;
+
/* We might have a second cascaded heathrow */
- level_mask[2] = HEATHROW_LEVEL_MASK;
- level_mask[3] = 0;
- }
+ slave = of_find_node_by_name(master, "mac-io");
+
+ /* Check ordering of master & slave */
+ if (device_is_compatible(master, "gatwick")) {
+ struct device_node *tmp;
+ BUG_ON(slave == NULL);
+ tmp = master;
+ master = slave;
+ slave = tmp;
+ }
- /*
- * G3 powermacs and 1999 G3 PowerBooks have 64 interrupts,
- * 1998 G3 Series PowerBooks have 128,
- * other powermacs have 32.
- * The combo ethernet/modem card for the Powerstar powerbooks
- * (2400/3400/3500, ohare based) has a second ohare chip
- * effectively making a total of 64.
- */
- max_irqs = max_real_irqs = 32;
- irqctrler = find_devices("mac-io");
- if (irqctrler)
- {
- max_real_irqs = 64;
- if (irqctrler->next)
+ /* We found a slave */
+ if (slave) {
max_irqs = 128;
- else
- max_irqs = 64;
+ level_mask[2] = HEATHROW_LEVEL_MASK;
+ level_mask[3] = 0;
+ pmac_fix_gatwick_interrupts(slave, max_real_irqs);
+ }
}
+ BUG_ON(master == NULL);
+
+ /* Set the handler for the main PIC */
for ( i = 0; i < max_real_irqs ; i++ )
irq_desc[i].handler = &pmac_pic;
- /* get addresses of first controller */
- if (irqctrler) {
- if (irqctrler->n_addrs > 0) {
- addr = (unsigned long)
- ioremap(irqctrler->addrs[0].address, 0x40);
- for (i = 0; i < 2; ++i)
- pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
- (addr + (2 - i) * 0x10);
- }
+ /* Get addresses of first controller if we have a node for it */
+ BUG_ON(of_address_to_resource(master, 0, &r));
- /* get addresses of second controller */
- irqctrler = irqctrler->next;
- if (irqctrler && irqctrler->n_addrs > 0) {
- addr = (unsigned long)
- ioremap(irqctrler->addrs[0].address, 0x40);
- for (i = 2; i < 4; ++i)
- pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
- (addr + (4 - i) * 0x10);
- irq_cascade = irqctrler->intrs[0].line;
- if (device_is_compatible(irqctrler, "gatwick"))
- pmac_fix_gatwick_interrupts(irqctrler, max_real_irqs);
- }
- } else {
- /* older powermacs have a GC (grand central) or ohare at
- f3000000, with interrupt control registers at f3000020. */
- addr = (unsigned long) ioremap(0xf3000000, 0x40);
- pmac_irq_hw[0] = (volatile struct pmac_irq_hw *) (addr + 0x20);
+ /* Map interrupts of primary controller */
+ addr = (u8 __iomem *) ioremap(r.start, 0x40);
+ i = 0;
+ pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
+ (addr + 0x20);
+ if (max_real_irqs > 32)
+ pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
+ (addr + 0x10);
+ of_node_put(master);
+
+ printk(KERN_INFO "irq: Found primary Apple PIC %s for %d irqs\n",
+ master->full_name, max_real_irqs);
+
+ /* Map interrupts of cascaded controller */
+ if (slave && !of_address_to_resource(slave, 0, &r)) {
+ addr = (u8 __iomem *)ioremap(r.start, 0x40);
+ pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
+ (addr + 0x20);
+ if (max_irqs > 64)
+ pmac_irq_hw[i++] =
+ (volatile struct pmac_irq_hw __iomem *)
+ (addr + 0x10);
+ irq_cascade = slave->intrs[0].line;
+
+ printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs"
+ " cascade: %d\n", slave->full_name,
+ max_irqs - max_real_irqs, irq_cascade);
}
-
- /* PowerBooks 3400 and 3500 can have a second controller in a second
- ohare chip, on the combo ethernet/modem card */
- if (machine_is_compatible("AAPL,3400/2400")
- || machine_is_compatible("AAPL,3500"))
- irq_cascade = enable_second_ohare();
+ of_node_put(slave);
/* disable all interrupts in all controllers */
for (i = 0; i * 32 < max_irqs; ++i)
out_le32(&pmac_irq_hw[i]->enable, 0);
+
/* mark level interrupts */
for (i = 0; i < max_irqs; i++)
if (level_mask[i >> 5] & (1UL << (i & 0x1f)))
irq_desc[i].status = IRQ_LEVEL;
- /* get interrupt line of secondary interrupt controller */
- if (irq_cascade >= 0) {
- printk(KERN_INFO "irq: secondary controller on irq %d\n",
- (int)irq_cascade);
+ /* Setup handlers for secondary controller and hook cascade irq*/
+ if (slave) {
for ( i = max_real_irqs ; i < max_irqs ; i++ )
irq_desc[i].handler = &gatwick_pic;
setup_irq(irq_cascade, &gatwick_cascade_action);
}
- printk("System has %d possible interrupts\n", max_irqs);
- if (max_irqs != max_real_irqs)
- printk(KERN_DEBUG "%d interrupts on main controller\n",
- max_real_irqs);
-
+ printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
#ifdef CONFIG_XMON
setup_irq(20, &xmon_action);
-#endif /* CONFIG_XMON */
-#endif /* CONFIG_PPC32 */
+#endif
+}
+#endif /* CONFIG_PPC32 */
+
+static int pmac_u3_cascade(struct pt_regs *regs, void *data)
+{
+ return mpic_get_one_irq((struct mpic *)data, regs);
+}
+
+static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
+{
+#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
+ struct device_node* pswitch;
+ int nmi_irq;
+
+ pswitch = of_find_node_by_name(NULL, "programmer-switch");
+ if (pswitch && pswitch->n_intrs) {
+ nmi_irq = pswitch->intrs[0].line;
+ mpic_irq_set_priority(nmi_irq, 9);
+ setup_irq(nmi_irq, &xmon_action);
+ }
+ of_node_put(pswitch);
+#endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */
+}
+
+static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
+ int master)
+{
+ unsigned char senses[128];
+ int offset = master ? 0 : 128;
+ int count = master ? 128 : 124;
+ const char *name = master ? " MPIC 1 " : " MPIC 2 ";
+ struct resource r;
+ struct mpic *mpic;
+ unsigned int flags = master ? MPIC_PRIMARY : 0;
+ int rc;
+
+ rc = of_address_to_resource(np, 0, &r);
+ if (rc)
+ return NULL;
+
+ pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
+
+ prom_get_irq_senses(senses, offset, offset + count);
+
+ flags |= MPIC_WANTS_RESET;
+ if (get_property(np, "big-endian", NULL))
+ flags |= MPIC_BIG_ENDIAN;
+
+ /* Primary Big Endian means HT interrupts. This is quite dodgy
+ * but works until I find a better way
+ */
+ if (master && (flags & MPIC_BIG_ENDIAN))
+ flags |= MPIC_BROKEN_U3;
+
+ mpic = mpic_alloc(r.start, flags, 0, offset, count, master ? 252 : 0,
+ senses, count, name);
+ if (mpic == NULL)
+ return NULL;
+
+ mpic_init(mpic);
+
+ return mpic;
+ }
+
+static int __init pmac_pic_probe_mpic(void)
+{
+ struct mpic *mpic1, *mpic2;
+ struct device_node *np, *master = NULL, *slave = NULL;
+
+ /* We can have up to 2 MPICs cascaded */
+ for (np = NULL; (np = of_find_node_by_type(np, "open-pic"))
+ != NULL;) {
+ if (master == NULL &&
+ get_property(np, "interrupts", NULL) == NULL)
+ master = of_node_get(np);
+ else if (slave == NULL)
+ slave = of_node_get(np);
+ if (master && slave)
+ break;
+ }
+
+ /* Check for bogus setups */
+ if (master == NULL && slave != NULL) {
+ master = slave;
+ slave = NULL;
+ }
+
+ /* Not found, default to good old pmac pic */
+ if (master == NULL)
+ return -ENODEV;
+
+ /* Set master handler */
+ ppc_md.get_irq = mpic_get_irq;
+
+ /* Setup master */
+ mpic1 = pmac_setup_one_mpic(master, 1);
+ BUG_ON(mpic1 == NULL);
+
+ /* Install NMI if any */
+ pmac_pic_setup_mpic_nmi(mpic1);
+
+ of_node_put(master);
+
+ /* No slave, let's go out */
+ if (slave == NULL || slave->n_intrs < 1)
+ return 0;
+
+ mpic2 = pmac_setup_one_mpic(slave, 0);
+ if (mpic2 == NULL) {
+ printk(KERN_ERR "Failed to setup slave MPIC\n");
+ of_node_put(slave);
+ return 0;
+ }
+ mpic_setup_cascade(slave->intrs[0].line, pmac_u3_cascade, mpic2);
+
+ of_node_put(slave);
+ return 0;
+}
+
+
+void __init pmac_pic_init(void)
+{
+ /* We first try to detect Apple's new Core99 chipset, since mac-io
+ * is quite different on those machines and contains an IBM MPIC2.
+ */
+ if (pmac_pic_probe_mpic() == 0)
+ return;
+
+#ifdef CONFIG_PPC32
+ pmac_pic_probe_oldstyle();
+#endif
}
#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 2ad25e13423..21c7b0f8f32 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -42,10 +42,6 @@ extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
unsigned long data_port, unsigned long ctrl_port, int *irq);
extern int pmac_nvram_init(void);
-
-extern struct hw_interrupt_type pmac_pic;
-
-void pmac_pic_init(void);
-int pmac_get_irq(struct pt_regs *regs);
+extern void pmac_pic_init(void);
#endif /* __PMAC_H__ */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 7acb0546671..89c4c363616 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -60,6 +60,7 @@
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/io.h>
+#include <asm/kexec.h>
#include <asm/pci-bridge.h>
#include <asm/ohare.h>
#include <asm/mediabay.h>
@@ -74,8 +75,8 @@
#include <asm/iommu.h>
#include <asm/smu.h>
#include <asm/pmc.h>
-#include <asm/mpic.h>
#include <asm/lmb.h>
+#include <asm/udbg.h>
#include "pmac.h"
@@ -277,7 +278,7 @@ static void __init l2cr_init(void)
}
#endif
-void __init pmac_setup_arch(void)
+static void __init pmac_setup_arch(void)
{
struct device_node *cpu, *ic;
int *fp;
@@ -321,16 +322,6 @@ void __init pmac_setup_arch(void)
l2cr_init();
#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_PPC64
- /* Probe motherboard chipset */
- /* this is done earlier in setup_arch for 32-bit */
- pmac_feature_init();
-
- /* We can NAP */
- powersave_nap = 1;
- printk(KERN_INFO "Using native/NAP idle loop\n");
-#endif
-
#ifdef CONFIG_KGDB
zs_kgdb_hook(0);
#endif
@@ -354,7 +345,7 @@ void __init pmac_setup_arch(void)
#ifdef CONFIG_SMP
/* Check for Core99 */
- if (find_devices("uni-n") || find_devices("u3"))
+ if (find_devices("uni-n") || find_devices("u3") || find_devices("u4"))
smp_ops = &core99_smp_ops;
#ifdef CONFIG_PPC32
else
@@ -621,35 +612,31 @@ static void __init pmac_init_early(void)
* and call ioremap
*/
hpte_init_native();
+#endif
- /* Init SCC */
- if (strstr(cmd_line, "sccdbg")) {
- sccdbg = 1;
- udbg_init_scc(NULL);
+ /* Enable early btext debug if requested */
+ if (strstr(cmd_line, "btextdbg")) {
+ udbg_adb_init_early();
+ register_early_udbg_console();
}
- /* Setup interrupt mapping options */
- ppc64_interrupt_controller = IC_OPEN_PIC;
+ /* Probe motherboard chipset */
+ pmac_feature_init();
- iommu_init_early_u3();
-#endif
-}
+ /* We can NAP */
+ powersave_nap = 1;
+ printk(KERN_INFO "Using native/NAP idle loop\n");
+
+ /* Initialize debug stuff */
+ udbg_scc_init(!!strstr(cmd_line, "sccdbg"));
+ udbg_adb_init(!!strstr(cmd_line, "btextdbg"));
-static void __init pmac_progress(char *s, unsigned short hex)
-{
#ifdef CONFIG_PPC64
- if (sccdbg) {
- udbg_puts(s);
- udbg_puts("\n");
- return;
- }
+ /* Setup interrupt mapping options */
+ ppc64_interrupt_controller = IC_OPEN_PIC;
+
+ iommu_init_early_dart();
#endif
-#ifdef CONFIG_BOOTX_TEXT
- if (boot_text_mapped) {
- btext_drawstring(s);
- btext_drawchar('\n');
- }
-#endif /* CONFIG_BOOTX_TEXT */
}
/*
@@ -663,35 +650,14 @@ static int pmac_check_legacy_ioport(unsigned int baseport)
static int __init pmac_declare_of_platform_devices(void)
{
- struct device_node *np, *npp;
-
- np = find_devices("uni-n");
- if (np) {
- for (np = np->child; np != NULL; np = np->sibling)
- if (strncmp(np->name, "i2c", 3) == 0) {
- of_platform_device_create(np, "uni-n-i2c",
- NULL);
- break;
- }
- }
- np = find_devices("valkyrie");
+ struct device_node *np;
+
+ np = of_find_node_by_name(NULL, "valkyrie");
if (np)
of_platform_device_create(np, "valkyrie", NULL);
- np = find_devices("platinum");
+ np = of_find_node_by_name(NULL, "platinum");
if (np)
of_platform_device_create(np, "platinum", NULL);
-
- npp = of_find_node_by_name(NULL, "u3");
- if (npp) {
- for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
- if (strncmp(np->name, "i2c", 3) == 0) {
- of_platform_device_create(np, "u3-i2c", NULL);
- of_node_put(np);
- break;
- }
- }
- of_node_put(npp);
- }
np = of_find_node_by_type(NULL, "smu");
if (np) {
of_platform_device_create(np, "smu", NULL);
@@ -718,7 +684,7 @@ static int __init pmac_probe(int platform)
* occupies having to be broken up so the DART itself is not
* part of the cacheable linar mapping
*/
- alloc_u3_dart_table();
+ alloc_dart_table();
#endif
#ifdef CONFIG_PMAC_SMU
@@ -734,15 +700,17 @@ static int __init pmac_probe(int platform)
}
#ifdef CONFIG_PPC64
-static int pmac_probe_mode(struct pci_bus *bus)
+/* Move that to pci.c */
+static int pmac_pci_probe_mode(struct pci_bus *bus)
{
struct device_node *node = bus->sysdata;
/* We need to use normal PCI probing for the AGP bus,
- since the device for the AGP bridge isn't in the tree. */
- if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
+ * since the device for the AGP bridge isn't in the tree.
+ */
+ if (bus->self == NULL && (device_is_compatible(node, "u3-agp") ||
+ device_is_compatible(node, "u4-pcie")))
return PCI_PROBE_NORMAL;
-
return PCI_PROBE_DEVTREE;
}
#endif
@@ -756,7 +724,7 @@ struct machdep_calls __initdata pmac_md = {
.init_early = pmac_init_early,
.show_cpuinfo = pmac_show_cpuinfo,
.init_IRQ = pmac_pic_init,
- .get_irq = mpic_get_irq, /* changed later */
+ .get_irq = NULL, /* changed later */
.pcibios_fixup = pmac_pcibios_fixup,
.restart = pmac_restart,
.power_off = pmac_power_off,
@@ -768,12 +736,17 @@ struct machdep_calls __initdata pmac_md = {
.calibrate_decr = pmac_calibrate_decr,
.feature_call = pmac_do_feature_call,
.check_legacy_ioport = pmac_check_legacy_ioport,
- .progress = pmac_progress,
+ .progress = udbg_progress,
#ifdef CONFIG_PPC64
- .pci_probe_mode = pmac_probe_mode,
+ .pci_probe_mode = pmac_pci_probe_mode,
.idle_loop = native_idle,
.enable_pmcs = power4_enable_pmcs,
+#ifdef CONFIG_KEXEC
+ .machine_kexec = default_machine_kexec,
+ .machine_kexec_prepare = default_machine_kexec_prepare,
+ .machine_crash_shutdown = default_machine_crash_shutdown,
#endif
+#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
.pcibios_enable_device_hook = pmac_pci_enable_device_hook,
.pcibios_after_init = pmac_pcibios_after_init,
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index fb2a7c798e8..0df2cdcd805 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -52,8 +52,9 @@
#include <asm/cacheflush.h>
#include <asm/keylargo.h>
#include <asm/pmac_low_i2c.h>
+#include <asm/pmac_pfunc.h>
-#undef DEBUG
+#define DEBUG
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -62,6 +63,7 @@
#endif
extern void __secondary_start_pmac_0(void);
+extern int pmac_pfunc_base_install(void);
#ifdef CONFIG_PPC32
@@ -361,7 +363,6 @@ static void __init psurge_dual_sync_tb(int cpu_nr)
set_dec(tb_ticks_per_jiffy);
/* XXX fixme */
set_tb(0, 0);
- last_jiffy_stamp(cpu_nr) = 0;
if (cpu_nr > 0) {
mb();
@@ -429,15 +430,62 @@ struct smp_ops_t psurge_smp_ops = {
};
#endif /* CONFIG_PPC32 - actually powersurge support */
+/*
+ * Core 99 and later support
+ */
+
+static void (*pmac_tb_freeze)(int freeze);
+static unsigned long timebase;
+static int tb_req;
+
+static void smp_core99_give_timebase(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ while(!tb_req)
+ barrier();
+ tb_req = 0;
+ (*pmac_tb_freeze)(1);
+ mb();
+ timebase = get_tb();
+ mb();
+ while (timebase)
+ barrier();
+ mb();
+ (*pmac_tb_freeze)(0);
+ mb();
+
+ local_irq_restore(flags);
+}
+
+
+static void __devinit smp_core99_take_timebase(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ tb_req = 1;
+ mb();
+ while (!timebase)
+ barrier();
+ mb();
+ set_tb(timebase >> 32, timebase & 0xffffffff);
+ timebase = 0;
+ mb();
+ set_dec(tb_ticks_per_jiffy/2);
+
+ local_irq_restore(flags);
+}
+
#ifdef CONFIG_PPC64
/*
* G5s enable/disable the timebase via an i2c-connected clock chip.
*/
-static struct device_node *pmac_tb_clock_chip_host;
+static struct pmac_i2c_bus *pmac_tb_clock_chip_host;
static u8 pmac_tb_pulsar_addr;
-static void (*pmac_tb_freeze)(int freeze);
-static DEFINE_SPINLOCK(timebase_lock);
-static unsigned long timebase;
static void smp_core99_cypress_tb_freeze(int freeze)
{
@@ -447,19 +495,20 @@ static void smp_core99_cypress_tb_freeze(int freeze)
/* Strangely, the device-tree says address is 0xd2, but darwin
* accesses 0xd0 ...
*/
- pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
- rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
- 0xd0 | pmac_low_i2c_read,
- 0x81, &data, 1);
+ pmac_i2c_setmode(pmac_tb_clock_chip_host,
+ pmac_i2c_mode_combined);
+ rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
+ 0xd0 | pmac_i2c_read,
+ 1, 0x81, &data, 1);
if (rc != 0)
goto bail;
data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
- pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
- rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
- 0xd0 | pmac_low_i2c_write,
- 0x81, &data, 1);
+ pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
+ rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
+ 0xd0 | pmac_i2c_write,
+ 1, 0x81, &data, 1);
bail:
if (rc != 0) {
@@ -475,19 +524,20 @@ static void smp_core99_pulsar_tb_freeze(int freeze)
u8 data;
int rc;
- pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
- rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
- pmac_tb_pulsar_addr | pmac_low_i2c_read,
- 0x2e, &data, 1);
+ pmac_i2c_setmode(pmac_tb_clock_chip_host,
+ pmac_i2c_mode_combined);
+ rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
+ pmac_tb_pulsar_addr | pmac_i2c_read,
+ 1, 0x2e, &data, 1);
if (rc != 0)
goto bail;
data = (data & 0x88) | (freeze ? 0x11 : 0x22);
- pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
- rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
- pmac_tb_pulsar_addr | pmac_low_i2c_write,
- 0x2e, &data, 1);
+ pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub);
+ rc = pmac_i2c_xfer(pmac_tb_clock_chip_host,
+ pmac_tb_pulsar_addr | pmac_i2c_write,
+ 1, 0x2e, &data, 1);
bail:
if (rc != 0) {
printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
@@ -496,54 +546,14 @@ static void smp_core99_pulsar_tb_freeze(int freeze)
}
}
-
-static void smp_core99_give_timebase(void)
-{
- /* Open i2c bus for synchronous access */
- if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
- panic("Can't open i2c for TB sync !\n");
-
- spin_lock(&timebase_lock);
- (*pmac_tb_freeze)(1);
- mb();
- timebase = get_tb();
- spin_unlock(&timebase_lock);
-
- while (timebase)
- barrier();
-
- spin_lock(&timebase_lock);
- (*pmac_tb_freeze)(0);
- spin_unlock(&timebase_lock);
-
- /* Close i2c bus */
- pmac_low_i2c_close(pmac_tb_clock_chip_host);
-}
-
-
-static void __devinit smp_core99_take_timebase(void)
-{
- while (!timebase)
- barrier();
- spin_lock(&timebase_lock);
- set_tb(timebase >> 32, timebase & 0xffffffff);
- timebase = 0;
- spin_unlock(&timebase_lock);
-}
-
-static void __init smp_core99_setup(int ncpus)
+static void __init smp_core99_setup_i2c_hwsync(int ncpus)
{
struct device_node *cc = NULL;
struct device_node *p;
+ const char *name = NULL;
u32 *reg;
int ok;
- /* HW sync only on these platforms */
- if (!machine_is_compatible("PowerMac7,2") &&
- !machine_is_compatible("PowerMac7,3") &&
- !machine_is_compatible("RackMac3,1"))
- return;
-
/* Look for the clock chip */
while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) {
p = of_get_parent(cc);
@@ -552,124 +562,86 @@ static void __init smp_core99_setup(int ncpus)
if (!ok)
continue;
+ pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc);
+ if (pmac_tb_clock_chip_host == NULL)
+ continue;
reg = (u32 *)get_property(cc, "reg", NULL);
if (reg == NULL)
continue;
-
switch (*reg) {
case 0xd2:
- if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
+ if (device_is_compatible(cc,"pulsar-legacy-slewing")) {
pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
pmac_tb_pulsar_addr = 0xd2;
- printk(KERN_INFO "Timebase clock is Pulsar chip\n");
+ name = "Pulsar";
} else if (device_is_compatible(cc, "cy28508")) {
pmac_tb_freeze = smp_core99_cypress_tb_freeze;
- printk(KERN_INFO "Timebase clock is Cypress chip\n");
+ name = "Cypress";
}
break;
case 0xd4:
pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
pmac_tb_pulsar_addr = 0xd4;
- printk(KERN_INFO "Timebase clock is Pulsar chip\n");
+ name = "Pulsar";
break;
}
- if (pmac_tb_freeze != NULL) {
- pmac_tb_clock_chip_host = of_get_parent(cc);
- of_node_put(cc);
+ if (pmac_tb_freeze != NULL)
break;
- }
}
- if (pmac_tb_freeze == NULL) {
- smp_ops->give_timebase = smp_generic_give_timebase;
- smp_ops->take_timebase = smp_generic_take_timebase;
+ if (pmac_tb_freeze != NULL) {
+ /* Open i2c bus for synchronous access */
+ if (pmac_i2c_open(pmac_tb_clock_chip_host, 1)) {
+ printk(KERN_ERR "Failed top open i2c bus for clock"
+ " sync, fallback to software sync !\n");
+ goto no_i2c_sync;
+ }
+ printk(KERN_INFO "Processor timebase sync using %s i2c clock\n",
+ name);
+ return;
}
+ no_i2c_sync:
+ pmac_tb_freeze = NULL;
+ pmac_tb_clock_chip_host = NULL;
}
-/* nothing to do here, caches are already set up by service processor */
-static inline void __devinit core99_init_caches(int cpu)
+
+
+/*
+ * Newer G5s uses a platform function
+ */
+
+static void smp_core99_pfunc_tb_freeze(int freeze)
{
+ struct device_node *cpus;
+ struct pmf_args args;
+
+ cpus = of_find_node_by_path("/cpus");
+ BUG_ON(cpus == NULL);
+ args.count = 1;
+ args.u[0].v = !freeze;
+ pmf_call_function(cpus, "cpu-timebase", &args);
+ of_node_put(cpus);
}
#else /* CONFIG_PPC64 */
/*
- * SMP G4 powermacs use a GPIO to enable/disable the timebase.
+ * SMP G4 use a GPIO to enable/disable the timebase.
*/
static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
-static unsigned int pri_tb_hi, pri_tb_lo;
-static unsigned int pri_tb_stamp;
-
-/* not __init, called in sleep/wakeup code */
-void smp_core99_give_timebase(void)
+static void smp_core99_gpio_tb_freeze(int freeze)
{
- unsigned long flags;
- unsigned int t;
-
- /* wait for the secondary to be in take_timebase */
- for (t = 100000; t > 0 && !sec_tb_reset; --t)
- udelay(10);
- if (!sec_tb_reset) {
- printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
- return;
- }
-
- /* freeze the timebase and read it */
- /* disable interrupts so the timebase is disabled for the
- shortest possible time */
- local_irq_save(flags);
- pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
+ if (freeze)
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
+ else
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
- mb();
- pri_tb_hi = get_tbu();
- pri_tb_lo = get_tbl();
- pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
- mb();
-
- /* tell the secondary we're ready */
- sec_tb_reset = 2;
- mb();
-
- /* wait for the secondary to have taken it */
- /* note: can't use udelay here, since it needs the timebase running */
- for (t = 10000000; t > 0 && sec_tb_reset; --t)
- barrier();
- if (sec_tb_reset)
- /* XXX BUG_ON here? */
- printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
-
- /* Now, restart the timebase by leaving the GPIO to an open collector */
- pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
- pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
- local_irq_restore(flags);
}
-/* not __init, called in sleep/wakeup code */
-void smp_core99_take_timebase(void)
-{
- unsigned long flags;
-
- /* tell the primary we're here */
- sec_tb_reset = 1;
- mb();
-
- /* wait for the primary to set pri_tb_hi/lo */
- while (sec_tb_reset < 2)
- mb();
-
- /* set our stuff the same as the primary */
- local_irq_save(flags);
- set_dec(1);
- set_tb(pri_tb_hi, pri_tb_lo);
- last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
- mb();
- /* tell the primary we're done */
- sec_tb_reset = 0;
- mb();
- local_irq_restore(flags);
-}
+#endif /* !CONFIG_PPC64 */
/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
volatile static long int core99_l2_cache;
@@ -677,6 +649,7 @@ volatile static long int core99_l3_cache;
static void __devinit core99_init_caches(int cpu)
{
+#ifndef CONFIG_PPC64
if (!cpu_has_feature(CPU_FTR_L2CR))
return;
@@ -702,30 +675,76 @@ static void __devinit core99_init_caches(int cpu)
_set_L3CR(core99_l3_cache);
printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
}
+#endif /* !CONFIG_PPC64 */
}
static void __init smp_core99_setup(int ncpus)
{
- struct device_node *cpu;
- u32 *tbprop = NULL;
- int i;
+#ifdef CONFIG_PPC64
+
+ /* i2c based HW sync on some G5s */
+ if (machine_is_compatible("PowerMac7,2") ||
+ machine_is_compatible("PowerMac7,3") ||
+ machine_is_compatible("RackMac3,1"))
+ smp_core99_setup_i2c_hwsync(ncpus);
- core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
- cpu = of_find_node_by_type(NULL, "cpu");
- if (cpu != NULL) {
- tbprop = (u32 *)get_property(cpu, "timebase-enable", NULL);
- if (tbprop)
- core99_tb_gpio = *tbprop;
- of_node_put(cpu);
+ /* pfunc based HW sync on recent G5s */
+ if (pmac_tb_freeze == NULL) {
+ struct device_node *cpus =
+ of_find_node_by_path("/cpus");
+ if (cpus &&
+ get_property(cpus, "platform-cpu-timebase", NULL)) {
+ pmac_tb_freeze = smp_core99_pfunc_tb_freeze;
+ printk(KERN_INFO "Processor timebase sync using"
+ " platform function\n");
+ }
}
- /* XXX should get this from reg properties */
- for (i = 1; i < ncpus; ++i)
- smp_hw_index[i] = i;
- powersave_nap = 0;
-}
+#else /* CONFIG_PPC64 */
+
+ /* GPIO based HW sync on ppc32 Core99 */
+ if (pmac_tb_freeze == NULL && !machine_is_compatible("MacRISC4")) {
+ struct device_node *cpu;
+ u32 *tbprop = NULL;
+
+ core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
+ cpu = of_find_node_by_type(NULL, "cpu");
+ if (cpu != NULL) {
+ tbprop = (u32 *)get_property(cpu, "timebase-enable",
+ NULL);
+ if (tbprop)
+ core99_tb_gpio = *tbprop;
+ of_node_put(cpu);
+ }
+ pmac_tb_freeze = smp_core99_gpio_tb_freeze;
+ printk(KERN_INFO "Processor timebase sync using"
+ " GPIO 0x%02x\n", core99_tb_gpio);
+ }
+
+#endif /* CONFIG_PPC64 */
+
+ /* No timebase sync, fallback to software */
+ if (pmac_tb_freeze == NULL) {
+ smp_ops->give_timebase = smp_generic_give_timebase;
+ smp_ops->take_timebase = smp_generic_take_timebase;
+ printk(KERN_INFO "Processor timebase sync using software\n");
+ }
+
+#ifndef CONFIG_PPC64
+ {
+ int i;
+
+ /* XXX should get this from reg properties */
+ for (i = 1; i < ncpus; ++i)
+ smp_hw_index[i] = i;
+ }
#endif
+ /* 32 bits SMP can't NAP */
+ if (!machine_is_compatible("MacRISC4"))
+ powersave_nap = 0;
+}
+
static int __init smp_core99_probe(void)
{
struct device_node *cpus;
@@ -743,8 +762,19 @@ static int __init smp_core99_probe(void)
if (ncpus <= 1)
return 1;
+ /* We need to perform some early initialisations before we can start
+ * setting up SMP as we are running before initcalls
+ */
+ pmac_pfunc_base_install();
+ pmac_i2c_init();
+
+ /* Setup various bits like timebase sync method, ability to nap, ... */
smp_core99_setup(ncpus);
+
+ /* Install IPIs */
mpic_request_ipis();
+
+ /* Collect l2cr and l3cr values from CPU 0 */
core99_init_caches(0);
return ncpus;
@@ -753,14 +783,15 @@ static int __init smp_core99_probe(void)
static void __devinit smp_core99_kick_cpu(int nr)
{
unsigned int save_vector;
- unsigned long new_vector;
- unsigned long flags;
+ unsigned long target, flags;
volatile unsigned int *vector
= ((volatile unsigned int *)(KERNELBASE+0x100));
if (nr < 0 || nr > 3)
return;
- if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
+
+ if (ppc_md.progress)
+ ppc_md.progress("smp_core99_kick_cpu", 0x346);
local_irq_save(flags);
local_irq_disable();
@@ -768,14 +799,11 @@ static void __devinit smp_core99_kick_cpu(int nr)
/* Save reset vector */
save_vector = *vector;
- /* Setup fake reset vector that does
+ /* Setup fake reset vector that does
* b __secondary_start_pmac_0 + nr*8 - KERNELBASE
*/
- new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
- *vector = 0x48000002 + new_vector - KERNELBASE;
-
- /* flush data cache and inval instruction cache */
- flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+ target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
+ create_branch((unsigned long)vector, target, BRANCH_SET_LINK);
/* Put some life in our friend */
pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
@@ -805,17 +833,25 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
mpic_setup_this_cpu();
if (cpu_nr == 0) {
-#ifdef CONFIG_POWER4
+#ifdef CONFIG_PPC64
extern void g5_phy_disable_cpu1(void);
+ /* Close i2c bus if it was used for tb sync */
+ if (pmac_tb_clock_chip_host) {
+ pmac_i2c_close(pmac_tb_clock_chip_host);
+ pmac_tb_clock_chip_host = NULL;
+ }
+
/* If we didn't start the second CPU, we must take
* it off the bus
*/
if (machine_is_compatible("MacRISC4") &&
num_online_cpus() < 2)
g5_phy_disable_cpu1();
-#endif /* CONFIG_POWER4 */
- if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
+#endif /* CONFIG_PPC64 */
+
+ if (ppc_md.progress)
+ ppc_md.progress("core99_setup_cpu 0 done", 0x349);
}
}
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index feb0a94e781..5d9afa1fa02 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -258,15 +258,20 @@ int __init via_calibrate_decr(void)
volatile unsigned char __iomem *via;
int count = VIA_TIMER_FREQ_6 / 100;
unsigned int dstart, dend;
+ struct resource rsrc;
- vias = find_devices("via-cuda");
+ vias = of_find_node_by_name(NULL, "via-cuda");
if (vias == 0)
- vias = find_devices("via-pmu");
+ vias = of_find_node_by_name(NULL, "via-pmu");
if (vias == 0)
- vias = find_devices("via");
- if (vias == 0 || vias->n_addrs == 0)
+ vias = of_find_node_by_name(NULL, "via");
+ if (vias == 0 || of_address_to_resource(vias, 0, &rsrc))
return 0;
- via = ioremap(vias->addrs[0].address, vias->addrs[0].size);
+ via = ioremap(rsrc.start, rsrc.end - rsrc.start + 1);
+ if (via == NULL) {
+ printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
+ return 0;
+ }
/* set timer 1 for continuous interrupts */
out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT);
diff --git a/arch/powerpc/platforms/powermac/udbg_adb.c b/arch/powerpc/platforms/powermac/udbg_adb.c
new file mode 100644
index 00000000000..06c8265c2ba
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/udbg_adb.c
@@ -0,0 +1,221 @@
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/ptrace.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/cuda.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/xmon.h>
+#include <asm/prom.h>
+#include <asm/bootx.h>
+#include <asm/machdep.h>
+#include <asm/errno.h>
+#include <asm/pmac_feature.h>
+#include <asm/processor.h>
+#include <asm/delay.h>
+#include <asm/btext.h>
+#include <asm/time.h>
+#include <asm/udbg.h>
+
+/*
+ * This implementation is "special", it can "patch" the current
+ * udbg implementation and work on top of it. It must thus be
+ * initialized last
+ */
+
+static void (*udbg_adb_old_putc)(char c);
+static int (*udbg_adb_old_getc)(void);
+static int (*udbg_adb_old_getc_poll)(void);
+
+static enum {
+ input_adb_none,
+ input_adb_pmu,
+ input_adb_cuda,
+} input_type = input_adb_none;
+
+int xmon_wants_key, xmon_adb_keycode;
+
+static inline void udbg_adb_poll(void)
+{
+#ifdef CONFIG_ADB_PMU
+ if (input_type == input_adb_pmu)
+ pmu_poll_adb();
+#endif /* CONFIG_ADB_PMU */
+#ifdef CONFIG_ADB_CUDA
+ if (input_type == input_adb_cuda)
+ cuda_poll();
+#endif /* CONFIG_ADB_CUDA */
+}
+
+#ifdef CONFIG_BOOTX_TEXT
+
+static int udbg_adb_use_btext;
+static int xmon_adb_shiftstate;
+
+static unsigned char xmon_keytab[128] =
+ "asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */
+ "yt123465=97-80]o" /* 0x10 - 0x1f */
+ "u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */
+ "\t `\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
+ "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
+ "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
+
+static unsigned char xmon_shift_keytab[128] =
+ "ASDFHGZXCV\000BQWER" /* 0x00 - 0x0f */
+ "YT!@#$^%+(&_*)}O" /* 0x10 - 0x1f */
+ "U{IP\rLJ\"K:|<?NM>" /* 0x20 - 0x2f */
+ "\t ~\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
+ "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
+ "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
+
+static int udbg_adb_local_getc(void)
+{
+ int k, t, on;
+
+ xmon_wants_key = 1;
+ for (;;) {
+ xmon_adb_keycode = -1;
+ t = 0;
+ on = 0;
+ k = -1;
+ do {
+ if (--t < 0) {
+ on = 1 - on;
+ btext_drawchar(on? 0xdb: 0x20);
+ btext_drawchar('\b');
+ t = 200000;
+ }
+ udbg_adb_poll();
+ if (udbg_adb_old_getc_poll)
+ k = udbg_adb_old_getc_poll();
+ } while (k == -1 && xmon_adb_keycode == -1);
+ if (on)
+ btext_drawstring(" \b");
+ if (k != -1)
+ return k;
+ k = xmon_adb_keycode;
+
+ /* test for shift keys */
+ if ((k & 0x7f) == 0x38 || (k & 0x7f) == 0x7b) {
+ xmon_adb_shiftstate = (k & 0x80) == 0;
+ continue;
+ }
+ if (k >= 0x80)
+ continue; /* ignore up transitions */
+ k = (xmon_adb_shiftstate? xmon_shift_keytab: xmon_keytab)[k];
+ if (k != 0)
+ break;
+ }
+ xmon_wants_key = 0;
+ return k;
+}
+#endif /* CONFIG_BOOTX_TEXT */
+
+static int udbg_adb_getc(void)
+{
+#ifdef CONFIG_BOOTX_TEXT
+ if (udbg_adb_use_btext && input_type != input_adb_none)
+ return udbg_adb_local_getc();
+#endif
+ if (udbg_adb_old_getc)
+ return udbg_adb_old_getc();
+ return -1;
+}
+
+/* getc_poll() is not really used, unless you have the xmon-over modem
+ * hack that doesn't quite concern us here, thus we just poll the low level
+ * ADB driver to prevent it from timing out and call back the original poll
+ * routine.
+ */
+static int udbg_adb_getc_poll(void)
+{
+ udbg_adb_poll();
+
+ if (udbg_adb_old_getc_poll)
+ return udbg_adb_old_getc_poll();
+ return -1;
+}
+
+static void udbg_adb_putc(char c)
+{
+#ifdef CONFIG_BOOTX_TEXT
+ if (udbg_adb_use_btext)
+ btext_drawchar(c);
+#endif
+ if (udbg_adb_old_putc)
+ return udbg_adb_old_putc(c);
+}
+
+void udbg_adb_init_early(void)
+{
+#ifdef CONFIG_BOOTX_TEXT
+ if (btext_find_display(1) == 0) {
+ udbg_adb_use_btext = 1;
+ udbg_putc = udbg_adb_putc;
+ }
+#endif
+}
+
+int udbg_adb_init(int force_btext)
+{
+ struct device_node *np;
+
+ /* Capture existing callbacks */
+ udbg_adb_old_putc = udbg_putc;
+ udbg_adb_old_getc = udbg_getc;
+ udbg_adb_old_getc_poll = udbg_getc_poll;
+
+ /* Check if our early init was already called */
+ if (udbg_adb_old_putc == udbg_adb_putc)
+ udbg_adb_old_putc = NULL;
+#ifdef CONFIG_BOOTX_TEXT
+ if (udbg_adb_old_putc == btext_drawchar)
+ udbg_adb_old_putc = NULL;
+#endif
+
+ /* Set ours as output */
+ udbg_putc = udbg_adb_putc;
+ udbg_getc = udbg_adb_getc;
+ udbg_getc_poll = udbg_adb_getc_poll;
+
+#ifdef CONFIG_BOOTX_TEXT
+ /* Check if we should use btext output */
+ if (btext_find_display(force_btext) == 0)
+ udbg_adb_use_btext = 1;
+#endif
+
+ /* See if there is a keyboard in the device tree with a parent
+ * of type "adb". If not, we return a failure, but we keep the
+ * bext output set for now
+ */
+ for (np = NULL; (np = of_find_node_by_name(np, "keyboard")) != NULL;) {
+ struct device_node *parent = of_get_parent(np);
+ int found = (parent && strcmp(parent->type, "adb") == 0);
+ of_node_put(parent);
+ if (found)
+ break;
+ }
+ if (np == NULL)
+ return -ENODEV;
+ of_node_put(np);
+
+#ifdef CONFIG_ADB_PMU
+ if (find_via_pmu())
+ input_type = input_adb_pmu;
+#endif
+#ifdef CONFIG_ADB_CUDA
+ if (find_via_cuda())
+ input_type = input_adb_cuda;
+#endif
+
+ /* Same as above: nothing found, keep btext set for output */
+ if (input_type == input_adb_none)
+ return -ENODEV;
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index 820c5355150..c4352a8db64 100644
--- a/arch/powerpc/kernel/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -25,7 +25,7 @@ extern void real_writeb(u8 data, volatile u8 __iomem *addr);
static volatile u8 __iomem *sccc;
static volatile u8 __iomem *sccd;
-static void udbg_scc_putc(unsigned char c)
+static void udbg_scc_putc(char c)
{
if (sccc) {
while ((in_8(sccc) & SCC_TXRDY) == 0)
@@ -47,14 +47,14 @@ static int udbg_scc_getc_poll(void)
return -1;
}
-static unsigned char udbg_scc_getc(void)
+static int udbg_scc_getc(void)
{
if (sccc) {
while ((in_8(sccc) & SCC_RXRDY) == 0)
;
return in_8(sccd);
}
- return 0;
+ return -1;
}
static unsigned char scc_inittab[] = {
@@ -67,38 +67,59 @@ static unsigned char scc_inittab[] = {
3, 0xc1, /* rx enable, 8 bits */
};
-void udbg_init_scc(struct device_node *np)
+void udbg_scc_init(int force_scc)
{
u32 *reg;
unsigned long addr;
+ struct device_node *stdout = NULL, *escc = NULL, *macio = NULL;
+ struct device_node *ch, *ch_def = NULL, *ch_a = NULL;
+ char *path;
int i, x;
- if (np == NULL)
- np = of_find_node_by_name(NULL, "escc");
- if (np == NULL || np->parent == NULL)
- return;
+ escc = of_find_node_by_name(NULL, "escc");
+ if (escc == NULL)
+ goto bail;
+ macio = of_get_parent(escc);
+ if (macio == NULL)
+ goto bail;
+ path = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
+ if (path != NULL)
+ stdout = of_find_node_by_path(path);
+ for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) {
+ if (ch == stdout)
+ ch_def = of_node_get(ch);
+ if (strcmp(ch->name, "ch-a") == 0)
+ ch_a = of_node_get(ch);
+ }
+ if (ch_def == NULL && !force_scc)
+ goto bail;
+
+ ch = ch_def ? ch_def : ch_a;
- udbg_printf("found SCC...\n");
/* Get address within mac-io ASIC */
- reg = (u32 *)get_property(np, "reg", NULL);
+ reg = (u32 *)get_property(escc, "reg", NULL);
if (reg == NULL)
- return;
+ goto bail;
addr = reg[0];
- udbg_printf("local addr: %lx\n", addr);
+
/* Get address of mac-io PCI itself */
- reg = (u32 *)get_property(np->parent, "assigned-addresses", NULL);
+ reg = (u32 *)get_property(macio, "assigned-addresses", NULL);
if (reg == NULL)
- return;
+ goto bail;
addr += reg[2];
- udbg_printf("final addr: %lx\n", addr);
+
+ /* Lock the serial port */
+ pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch,
+ PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
+
/* Setup for 57600 8N1 */
- addr += 0x20;
+ if (ch == ch_a)
+ addr += 0x20;
sccc = (volatile u8 * __iomem) ioremap(addr & PAGE_MASK, PAGE_SIZE) ;
sccc += addr & ~PAGE_MASK;
sccd = sccc + 0x10;
- udbg_printf("ioremap result sccc: %p\n", sccc);
mb();
for (i = 20000; i != 0; --i)
@@ -113,9 +134,17 @@ void udbg_init_scc(struct device_node *np)
udbg_getc_poll = udbg_scc_getc_poll;
udbg_puts("Hello World !\n");
+
+ bail:
+ of_node_put(macio);
+ of_node_put(escc);
+ of_node_put(stdout);
+ of_node_put(ch_def);
+ of_node_put(ch_a);
}
-static void udbg_real_scc_putc(unsigned char c)
+#ifdef CONFIG_PPC64
+static void udbg_real_scc_putc(char c)
{
while ((real_readb(sccc) & SCC_TXRDY) == 0)
;
@@ -124,7 +153,7 @@ static void udbg_real_scc_putc(unsigned char c)
udbg_real_scc_putc('\r');
}
-void udbg_init_pmac_realmode(void)
+void __init udbg_init_pmac_realmode(void)
{
sccc = (volatile u8 __iomem *)0x80013020ul;
sccd = (volatile u8 __iomem *)0x80013030ul;
@@ -133,3 +162,4 @@ void udbg_init_pmac_realmode(void)
udbg_getc = NULL;
udbg_getc_poll = NULL;
}
+#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 06d5ef50121..61616d14407 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -1,10 +1,10 @@
obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \
- setup.o iommu.o ras.o rtasd.o
+ setup.o iommu.o ras.o rtasd.o pci_dlpar.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_SCANLOG) += scanlog.o
-obj-$(CONFIG_EEH) += eeh.o eeh_event.o
+obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_HVCS) += hvcserver.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index c8d2a40dc5b..83578313ee7 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -76,15 +76,14 @@
*/
#define EEH_MAX_FAILS 100000
-/* Misc forward declaraions */
-static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn);
-
/* RTAS tokens */
static int ibm_set_eeh_option;
static int ibm_set_slot_reset;
static int ibm_read_slot_reset_state;
static int ibm_read_slot_reset_state2;
static int ibm_slot_error_detail;
+static int ibm_get_config_addr_info;
+static int ibm_configure_bridge;
int eeh_subsystem_enabled;
EXPORT_SYMBOL(eeh_subsystem_enabled);
@@ -98,308 +97,23 @@ static DEFINE_SPINLOCK(slot_errbuf_lock);
static int eeh_error_buf_size;
/* System monitoring statistics */
-static DEFINE_PER_CPU(unsigned long, no_device);
-static DEFINE_PER_CPU(unsigned long, no_dn);
-static DEFINE_PER_CPU(unsigned long, no_cfg_addr);
-static DEFINE_PER_CPU(unsigned long, ignored_check);
-static DEFINE_PER_CPU(unsigned long, total_mmio_ffs);
-static DEFINE_PER_CPU(unsigned long, false_positives);
-static DEFINE_PER_CPU(unsigned long, ignored_failures);
-static DEFINE_PER_CPU(unsigned long, slot_resets);
-
-/**
- * The pci address cache subsystem. This subsystem places
- * PCI device address resources into a red-black tree, sorted
- * according to the address range, so that given only an i/o
- * address, the corresponding PCI device can be **quickly**
- * found. It is safe to perform an address lookup in an interrupt
- * context; this ability is an important feature.
- *
- * Currently, the only customer of this code is the EEH subsystem;
- * thus, this code has been somewhat tailored to suit EEH better.
- * In particular, the cache does *not* hold the addresses of devices
- * for which EEH is not enabled.
- *
- * (Implementation Note: The RB tree seems to be better/faster
- * than any hash algo I could think of for this problem, even
- * with the penalty of slow pointer chases for d-cache misses).
- */
-struct pci_io_addr_range
-{
- struct rb_node rb_node;
- unsigned long addr_lo;
- unsigned long addr_hi;
- struct pci_dev *pcidev;
- unsigned int flags;
-};
-
-static struct pci_io_addr_cache
-{
- struct rb_root rb_root;
- spinlock_t piar_lock;
-} pci_io_addr_cache_root;
-
-static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr)
-{
- struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
-
- while (n) {
- struct pci_io_addr_range *piar;
- piar = rb_entry(n, struct pci_io_addr_range, rb_node);
-
- if (addr < piar->addr_lo) {
- n = n->rb_left;
- } else {
- if (addr > piar->addr_hi) {
- n = n->rb_right;
- } else {
- pci_dev_get(piar->pcidev);
- return piar->pcidev;
- }
- }
- }
-
- return NULL;
-}
-
-/**
- * pci_get_device_by_addr - Get device, given only address
- * @addr: mmio (PIO) phys address or i/o port number
- *
- * Given an mmio phys address, or a port number, find a pci device
- * that implements this address. Be sure to pci_dev_put the device
- * when finished. I/O port numbers are assumed to be offset
- * from zero (that is, they do *not* have pci_io_addr added in).
- * It is safe to call this function within an interrupt.
- */
-static struct pci_dev *pci_get_device_by_addr(unsigned long addr)
-{
- struct pci_dev *dev;
- unsigned long flags;
-
- spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
- dev = __pci_get_device_by_addr(addr);
- spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
- return dev;
-}
-
-#ifdef DEBUG
-/*
- * Handy-dandy debug print routine, does nothing more
- * than print out the contents of our addr cache.
- */
-static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
-{
- struct rb_node *n;
- int cnt = 0;
-
- n = rb_first(&cache->rb_root);
- while (n) {
- struct pci_io_addr_range *piar;
- piar = rb_entry(n, struct pci_io_addr_range, rb_node);
- printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n",
- (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
- piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
- cnt++;
- n = rb_next(n);
- }
-}
-#endif
-
-/* Insert address range into the rb tree. */
-static struct pci_io_addr_range *
-pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
- unsigned long ahi, unsigned int flags)
-{
- struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct pci_io_addr_range *piar;
-
- /* Walk tree, find a place to insert into tree */
- while (*p) {
- parent = *p;
- piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
- if (ahi < piar->addr_lo) {
- p = &parent->rb_left;
- } else if (alo > piar->addr_hi) {
- p = &parent->rb_right;
- } else {
- if (dev != piar->pcidev ||
- alo != piar->addr_lo || ahi != piar->addr_hi) {
- printk(KERN_WARNING "PIAR: overlapping address range\n");
- }
- return piar;
- }
- }
- piar = (struct pci_io_addr_range *)kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
- if (!piar)
- return NULL;
-
- piar->addr_lo = alo;
- piar->addr_hi = ahi;
- piar->pcidev = dev;
- piar->flags = flags;
-
-#ifdef DEBUG
- printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
- alo, ahi, pci_name (dev));
-#endif
+static unsigned long no_device;
+static unsigned long no_dn;
+static unsigned long no_cfg_addr;
+static unsigned long ignored_check;
+static unsigned long total_mmio_ffs;
+static unsigned long false_positives;
+static unsigned long ignored_failures;
+static unsigned long slot_resets;
- rb_link_node(&piar->rb_node, parent, p);
- rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
-
- return piar;
-}
-
-static void __pci_addr_cache_insert_device(struct pci_dev *dev)
-{
- struct device_node *dn;
- struct pci_dn *pdn;
- int i;
- int inserted = 0;
-
- dn = pci_device_to_OF_node(dev);
- if (!dn) {
- printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev));
- return;
- }
-
- /* Skip any devices for which EEH is not enabled. */
- pdn = PCI_DN(dn);
- if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
- pdn->eeh_mode & EEH_MODE_NOCHECK) {
-#ifdef DEBUG
- printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n",
- pci_name(dev), pdn->node->full_name);
-#endif
- return;
- }
-
- /* The cache holds a reference to the device... */
- pci_dev_get(dev);
-
- /* Walk resources on this device, poke them into the tree */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- unsigned long start = pci_resource_start(dev,i);
- unsigned long end = pci_resource_end(dev,i);
- unsigned int flags = pci_resource_flags(dev,i);
-
- /* We are interested only bus addresses, not dma or other stuff */
- if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
- continue;
- if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
- continue;
- pci_addr_cache_insert(dev, start, end, flags);
- inserted = 1;
- }
-
- /* If there was nothing to add, the cache has no reference... */
- if (!inserted)
- pci_dev_put(dev);
-}
-
-/**
- * pci_addr_cache_insert_device - Add a device to the address cache
- * @dev: PCI device whose I/O addresses we are interested in.
- *
- * In order to support the fast lookup of devices based on addresses,
- * we maintain a cache of devices that can be quickly searched.
- * This routine adds a device to that cache.
- */
-static void pci_addr_cache_insert_device(struct pci_dev *dev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
- __pci_addr_cache_insert_device(dev);
- spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
-}
-
-static inline void __pci_addr_cache_remove_device(struct pci_dev *dev)
-{
- struct rb_node *n;
- int removed = 0;
-
-restart:
- n = rb_first(&pci_io_addr_cache_root.rb_root);
- while (n) {
- struct pci_io_addr_range *piar;
- piar = rb_entry(n, struct pci_io_addr_range, rb_node);
-
- if (piar->pcidev == dev) {
- rb_erase(n, &pci_io_addr_cache_root.rb_root);
- removed = 1;
- kfree(piar);
- goto restart;
- }
- n = rb_next(n);
- }
-
- /* The cache no longer holds its reference to this device... */
- if (removed)
- pci_dev_put(dev);
-}
-
-/**
- * pci_addr_cache_remove_device - remove pci device from addr cache
- * @dev: device to remove
- *
- * Remove a device from the addr-cache tree.
- * This is potentially expensive, since it will walk
- * the tree multiple times (once per resource).
- * But so what; device removal doesn't need to be that fast.
- */
-static void pci_addr_cache_remove_device(struct pci_dev *dev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
- __pci_addr_cache_remove_device(dev);
- spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
-}
-
-/**
- * pci_addr_cache_build - Build a cache of I/O addresses
- *
- * Build a cache of pci i/o addresses. This cache will be used to
- * find the pci device that corresponds to a given address.
- * This routine scans all pci busses to build the cache.
- * Must be run late in boot process, after the pci controllers
- * have been scaned for devices (after all device resources are known).
- */
-void __init pci_addr_cache_build(void)
-{
- struct device_node *dn;
- struct pci_dev *dev = NULL;
-
- if (!eeh_subsystem_enabled)
- return;
-
- spin_lock_init(&pci_io_addr_cache_root.piar_lock);
-
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- /* Ignore PCI bridges ( XXX why ??) */
- if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
- continue;
- }
- pci_addr_cache_insert_device(dev);
-
- /* Save the BAR's; firmware doesn't restore these after EEH reset */
- dn = pci_device_to_OF_node(dev);
- eeh_save_bars(dev, PCI_DN(dn));
- }
-
-#ifdef DEBUG
- /* Verify tree built up above, echo back the list of addrs. */
- pci_addr_cache_print(&pci_io_addr_cache_root);
-#endif
-}
+#define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE)
/* --------------------------------------------------------------- */
-/* Above lies the PCI Address Cache. Below lies the EEH event infrastructure */
+/* Below lies the EEH event infrastructure */
void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
{
+ int config_addr;
unsigned long flags;
int rc;
@@ -407,8 +121,13 @@ void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
spin_lock_irqsave(&slot_errbuf_lock, flags);
memset(slot_errbuf, 0, eeh_error_buf_size);
+ /* Use PE configuration address, if present */
+ config_addr = pdn->eeh_config_addr;
+ if (pdn->eeh_pe_config_addr)
+ config_addr = pdn->eeh_pe_config_addr;
+
rc = rtas_call(ibm_slot_error_detail,
- 8, 1, NULL, pdn->eeh_config_addr,
+ 8, 1, NULL, config_addr,
BUID_HI(pdn->phb->buid),
BUID_LO(pdn->phb->buid), NULL, 0,
virt_to_phys(slot_errbuf),
@@ -428,6 +147,7 @@ void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
static int read_slot_reset_state(struct pci_dn *pdn, int rets[])
{
int token, outputs;
+ int config_addr;
if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
token = ibm_read_slot_reset_state2;
@@ -438,7 +158,12 @@ static int read_slot_reset_state(struct pci_dn *pdn, int rets[])
outputs = 3;
}
- return rtas_call(token, 3, outputs, rets, pdn->eeh_config_addr,
+ /* Use PE configuration address, if present */
+ config_addr = pdn->eeh_config_addr;
+ if (pdn->eeh_pe_config_addr)
+ config_addr = pdn->eeh_pe_config_addr;
+
+ return rtas_call(token, 3, outputs, rets, config_addr,
BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid));
}
@@ -462,7 +187,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
/**
* Return the "partitionable endpoint" (pe) under which this device lies
*/
-static struct device_node * find_device_pe(struct device_node *dn)
+struct device_node * find_device_pe(struct device_node *dn)
{
while ((dn->parent) && PCI_DN(dn->parent) &&
(PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
@@ -483,8 +208,14 @@ static void __eeh_mark_slot (struct device_node *dn, int mode_flag)
{
while (dn) {
if (PCI_DN(dn)) {
+ /* Mark the pci device driver too */
+ struct pci_dev *dev = PCI_DN(dn)->pcidev;
+
PCI_DN(dn)->eeh_mode |= mode_flag;
+ if (dev && dev->driver)
+ dev->error_state = pci_channel_io_frozen;
+
if (dn->child)
__eeh_mark_slot (dn->child, mode_flag);
}
@@ -495,6 +226,11 @@ static void __eeh_mark_slot (struct device_node *dn, int mode_flag)
void eeh_mark_slot (struct device_node *dn, int mode_flag)
{
dn = find_device_pe (dn);
+
+ /* Back up one, since config addrs might be shared */
+ if (PCI_DN(dn) && PCI_DN(dn)->eeh_pe_config_addr)
+ dn = dn->parent;
+
PCI_DN(dn)->eeh_mode |= mode_flag;
__eeh_mark_slot (dn->child, mode_flag);
}
@@ -516,7 +252,13 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag)
{
unsigned long flags;
spin_lock_irqsave(&confirm_error_lock, flags);
+
dn = find_device_pe (dn);
+
+ /* Back up one, since config addrs might be shared */
+ if (PCI_DN(dn) && PCI_DN(dn)->eeh_pe_config_addr)
+ dn = dn->parent;
+
PCI_DN(dn)->eeh_mode &= ~mode_flag;
PCI_DN(dn)->eeh_check_count = 0;
__eeh_clear_slot (dn->child, mode_flag);
@@ -544,15 +286,16 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
int rets[3];
unsigned long flags;
struct pci_dn *pdn;
+ enum pci_channel_state state;
int rc = 0;
- __get_cpu_var(total_mmio_ffs)++;
+ total_mmio_ffs++;
if (!eeh_subsystem_enabled)
return 0;
if (!dn) {
- __get_cpu_var(no_dn)++;
+ no_dn++;
return 0;
}
pdn = PCI_DN(dn);
@@ -560,7 +303,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
/* Access to IO BARs might get this far and still not want checking. */
if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
pdn->eeh_mode & EEH_MODE_NOCHECK) {
- __get_cpu_var(ignored_check)++;
+ ignored_check++;
#ifdef DEBUG
printk ("EEH:ignored check (%x) for %s %s\n",
pdn->eeh_mode, pci_name (dev), dn->full_name);
@@ -568,8 +311,8 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
return 0;
}
- if (!pdn->eeh_config_addr) {
- __get_cpu_var(no_cfg_addr)++;
+ if (!pdn->eeh_config_addr && !pdn->eeh_pe_config_addr) {
+ no_cfg_addr++;
return 0;
}
@@ -611,7 +354,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
if (ret != 0) {
printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n",
ret, dn->full_name);
- __get_cpu_var(false_positives)++;
+ false_positives++;
rc = 0;
goto dn_unlock;
}
@@ -620,14 +363,14 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
if (rets[1] != 1) {
printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n",
ret, dn->full_name);
- __get_cpu_var(false_positives)++;
+ false_positives++;
rc = 0;
goto dn_unlock;
}
/* If not the kind of error we know about, punt. */
if (rets[0] != 2 && rets[0] != 4 && rets[0] != 5) {
- __get_cpu_var(false_positives)++;
+ false_positives++;
rc = 0;
goto dn_unlock;
}
@@ -635,12 +378,12 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
/* Note that config-io to empty slots may fail;
* we recognize empty because they don't have children. */
if ((rets[0] == 5) && (dn->child == NULL)) {
- __get_cpu_var(false_positives)++;
+ false_positives++;
rc = 0;
goto dn_unlock;
}
- __get_cpu_var(slot_resets)++;
+ slot_resets++;
/* Avoid repeated reports of this failure, including problems
* with other functions on this device, and functions under
@@ -648,8 +391,13 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
eeh_mark_slot (dn, EEH_MODE_ISOLATED);
spin_unlock_irqrestore(&confirm_error_lock, flags);
- eeh_send_failure_event (dn, dev, rets[0], rets[2]);
-
+ state = pci_channel_io_normal;
+ if ((rets[0] == 2) || (rets[0] == 4))
+ state = pci_channel_io_frozen;
+ if (rets[0] == 5)
+ state = pci_channel_io_perm_failure;
+ eeh_send_failure_event (dn, dev, state, rets[2]);
+
/* Most EEH events are due to device driver bugs. Having
* a stack trace will help the device-driver authors figure
* out what happened. So print that out. */
@@ -685,7 +433,7 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
addr = eeh_token_to_phys((unsigned long __force) token);
dev = pci_get_device_by_addr(addr);
if (!dev) {
- __get_cpu_var(no_device)++;
+ no_device++;
return val;
}
@@ -716,11 +464,16 @@ eeh_slot_availability(struct pci_dn *pdn)
if (rc) return rc;
if (rets[1] == 0) return -1; /* EEH is not supported */
- if (rets[0] == 0) return 0; /* Oll Korrect */
+ if (rets[0] == 0) return 0; /* Oll Korrect */
if (rets[0] == 5) {
if (rets[2] == 0) return -1; /* permanently unavailable */
return rets[2]; /* number of millisecs to wait */
}
+ if (rets[0] == 1)
+ return 250;
+
+ printk (KERN_ERR "EEH: Slot unavailable: rc=%d, rets=%d %d %d\n",
+ rc, rets[0], rets[1], rets[2]);
return -1;
}
@@ -737,6 +490,7 @@ eeh_slot_availability(struct pci_dn *pdn)
static void
rtas_pci_slot_reset(struct pci_dn *pdn, int state)
{
+ int config_addr;
int rc;
BUG_ON (pdn==NULL);
@@ -747,8 +501,13 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state)
return;
}
+ /* Use PE configuration address, if present */
+ config_addr = pdn->eeh_config_addr;
+ if (pdn->eeh_pe_config_addr)
+ config_addr = pdn->eeh_pe_config_addr;
+
rc = rtas_call(ibm_set_slot_reset,4,1, NULL,
- pdn->eeh_config_addr,
+ config_addr,
BUID_HI(pdn->phb->buid),
BUID_LO(pdn->phb->buid),
state);
@@ -761,9 +520,11 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state)
/** rtas_set_slot_reset -- assert the pci #RST line for 1/4 second
* dn -- device node to be reset.
+ *
+ * Return 0 if success, else a non-zero value.
*/
-void
+int
rtas_set_slot_reset(struct pci_dn *pdn)
{
int i, rc;
@@ -793,10 +554,21 @@ rtas_set_slot_reset(struct pci_dn *pdn)
* ready to be used; if not, wait for recovery. */
for (i=0; i<10; i++) {
rc = eeh_slot_availability (pdn);
- if (rc <= 0) break;
+ if (rc < 0)
+ printk (KERN_ERR "EEH: failed (%d) to reset slot %s\n", rc, pdn->node->full_name);
+ if (rc == 0)
+ return 0;
+ if (rc < 0)
+ return -1;
msleep (rc+100);
}
+
+ rc = eeh_slot_availability (pdn);
+ if (rc)
+ printk (KERN_ERR "EEH: timeout resetting slot %s\n", pdn->node->full_name);
+
+ return rc;
}
/* ------------------------------------------------------- */
@@ -851,7 +623,7 @@ void eeh_restore_bars(struct pci_dn *pdn)
if (!pdn)
return;
- if (! pdn->eeh_is_bridge)
+ if ((pdn->eeh_mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(pdn->class_code))
__restore_bars (pdn);
dn = pdn->node->child;
@@ -869,30 +641,30 @@ void eeh_restore_bars(struct pci_dn *pdn)
* PCI devices are added individuallly; but, for the restore,
* an entire slot is reset at a time.
*/
-static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn)
+static void eeh_save_bars(struct pci_dn *pdn)
{
int i;
- if (!pdev || !pdn )
+ if (!pdn )
return;
for (i = 0; i < 16; i++)
- pci_read_config_dword(pdev, i * 4, &pdn->config_space[i]);
-
- if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
- pdn->eeh_is_bridge = 1;
+ rtas_read_config(pdn, i * 4, 4, &pdn->config_space[i]);
}
void
rtas_configure_bridge(struct pci_dn *pdn)
{
- int token = rtas_token ("ibm,configure-bridge");
+ int config_addr;
int rc;
- if (token == RTAS_UNKNOWN_SERVICE)
- return;
- rc = rtas_call(token,3,1, NULL,
- pdn->eeh_config_addr,
+ /* Use PE configuration address, if present */
+ config_addr = pdn->eeh_config_addr;
+ if (pdn->eeh_pe_config_addr)
+ config_addr = pdn->eeh_pe_config_addr;
+
+ rc = rtas_call(ibm_configure_bridge,3,1, NULL,
+ config_addr,
BUID_HI(pdn->phb->buid),
BUID_LO(pdn->phb->buid));
if (rc) {
@@ -927,6 +699,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
int enable;
struct pci_dn *pdn = PCI_DN(dn);
+ pdn->class_code = 0;
pdn->eeh_mode = 0;
pdn->eeh_check_count = 0;
pdn->eeh_freeze_count = 0;
@@ -943,6 +716,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
pdn->eeh_mode |= EEH_MODE_NOCHECK;
return NULL;
}
+ pdn->class_code = *class_code;
/*
* Now decide if we are going to "Disable" EEH checking
@@ -953,8 +727,10 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
* But there are a few cases like display devices that make sense.
*/
enable = 1; /* i.e. we will do checking */
+#if 0
if ((*class_code >> 16) == PCI_BASE_CLASS_DISPLAY)
enable = 0;
+#endif
if (!enable)
pdn->eeh_mode |= EEH_MODE_NOCHECK;
@@ -973,8 +749,22 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
eeh_subsystem_enabled = 1;
pdn->eeh_mode |= EEH_MODE_SUPPORTED;
pdn->eeh_config_addr = regs[0];
+
+ /* If the newer, better, ibm,get-config-addr-info is supported,
+ * then use that instead. */
+ pdn->eeh_pe_config_addr = 0;
+ if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
+ unsigned int rets[2];
+ ret = rtas_call (ibm_get_config_addr_info, 4, 2, rets,
+ pdn->eeh_config_addr,
+ info->buid_hi, info->buid_lo,
+ 0);
+ if (ret == 0)
+ pdn->eeh_pe_config_addr = rets[0];
+ }
#ifdef DEBUG
- printk(KERN_DEBUG "EEH: %s: eeh enabled\n", dn->full_name);
+ printk(KERN_DEBUG "EEH: %s: eeh enabled, config=%x pe_config=%x\n",
+ dn->full_name, pdn->eeh_config_addr, pdn->eeh_pe_config_addr);
#endif
} else {
@@ -993,6 +783,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
dn->full_name);
}
+ eeh_save_bars(pdn);
return NULL;
}
@@ -1026,6 +817,8 @@ void __init eeh_init(void)
ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
+ ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
+ ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE)
return;
@@ -1080,12 +873,10 @@ void eeh_add_device_early(struct device_node *dn)
if (!dn || !PCI_DN(dn))
return;
phb = PCI_DN(dn)->phb;
- if (NULL == phb || 0 == phb->buid) {
- printk(KERN_WARNING "EEH: Expected buid but found none for %s\n",
- dn->full_name);
- dump_stack();
+
+ /* USB Bus children of PCI devices will not have BUID's */
+ if (NULL == phb || 0 == phb->buid)
return;
- }
info.buid_hi = BUID_HI(phb->buid);
info.buid_lo = BUID_LO(phb->buid);
@@ -1093,6 +884,15 @@ void eeh_add_device_early(struct device_node *dn)
}
EXPORT_SYMBOL_GPL(eeh_add_device_early);
+void eeh_add_device_tree_early(struct device_node *dn)
+{
+ struct device_node *sib;
+ for (sib = dn->child; sib; sib = sib->sibling)
+ eeh_add_device_tree_early(sib);
+ eeh_add_device_early(dn);
+}
+EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
+
/**
* eeh_add_device_late - perform EEH initialization for the indicated pci device
* @dev: pci device for which to set up EEH
@@ -1118,7 +918,6 @@ void eeh_add_device_late(struct pci_dev *dev)
pdn->pcidev = dev;
pci_addr_cache_insert_device (dev);
- eeh_save_bars(dev, pdn);
}
EXPORT_SYMBOL_GPL(eeh_add_device_late);
@@ -1147,27 +946,28 @@ void eeh_remove_device(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(eeh_remove_device);
-static int proc_eeh_show(struct seq_file *m, void *v)
-{
- unsigned int cpu;
- unsigned long ffs = 0, positives = 0, failures = 0;
- unsigned long resets = 0;
- unsigned long no_dev = 0, no_dn = 0, no_cfg = 0, no_check = 0;
-
- for_each_cpu(cpu) {
- ffs += per_cpu(total_mmio_ffs, cpu);
- positives += per_cpu(false_positives, cpu);
- failures += per_cpu(ignored_failures, cpu);
- resets += per_cpu(slot_resets, cpu);
- no_dev += per_cpu(no_device, cpu);
- no_dn += per_cpu(no_dn, cpu);
- no_cfg += per_cpu(no_cfg_addr, cpu);
- no_check += per_cpu(ignored_check, cpu);
+void eeh_remove_bus_device(struct pci_dev *dev)
+{
+ eeh_remove_device(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ struct pci_bus *bus = dev->subordinate;
+ struct list_head *ln;
+ if (!bus)
+ return;
+ for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) {
+ struct pci_dev *pdev = pci_dev_b(ln);
+ if (pdev)
+ eeh_remove_bus_device(pdev);
+ }
}
+}
+EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
+static int proc_eeh_show(struct seq_file *m, void *v)
+{
if (0 == eeh_subsystem_enabled) {
seq_printf(m, "EEH Subsystem is globally disabled\n");
- seq_printf(m, "eeh_total_mmio_ffs=%ld\n", ffs);
+ seq_printf(m, "eeh_total_mmio_ffs=%ld\n", total_mmio_ffs);
} else {
seq_printf(m, "EEH Subsystem is enabled\n");
seq_printf(m,
@@ -1179,8 +979,10 @@ static int proc_eeh_show(struct seq_file *m, void *v)
"eeh_false_positives=%ld\n"
"eeh_ignored_failures=%ld\n"
"eeh_slot_resets=%ld\n",
- no_dev, no_dn, no_cfg, no_check,
- ffs, positives, failures, resets);
+ no_device, no_dn, no_cfg_addr,
+ ignored_check, total_mmio_ffs,
+ false_positives, ignored_failures,
+ slot_resets);
}
return 0;
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
new file mode 100644
index 00000000000..d4a402c5866
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -0,0 +1,316 @@
+/*
+ * eeh_cache.c
+ * PCI address cache; allows the lookup of PCI devices based on I/O address
+ *
+ * Copyright (C) 2004 Linas Vepstas <linas@austin.ibm.com> IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <asm/pci-bridge.h>
+#include <asm/ppc-pci.h>
+
+#undef DEBUG
+
+/**
+ * The pci address cache subsystem. This subsystem places
+ * PCI device address resources into a red-black tree, sorted
+ * according to the address range, so that given only an i/o
+ * address, the corresponding PCI device can be **quickly**
+ * found. It is safe to perform an address lookup in an interrupt
+ * context; this ability is an important feature.
+ *
+ * Currently, the only customer of this code is the EEH subsystem;
+ * thus, this code has been somewhat tailored to suit EEH better.
+ * In particular, the cache does *not* hold the addresses of devices
+ * for which EEH is not enabled.
+ *
+ * (Implementation Note: The RB tree seems to be better/faster
+ * than any hash algo I could think of for this problem, even
+ * with the penalty of slow pointer chases for d-cache misses).
+ */
+struct pci_io_addr_range
+{
+ struct rb_node rb_node;
+ unsigned long addr_lo;
+ unsigned long addr_hi;
+ struct pci_dev *pcidev;
+ unsigned int flags;
+};
+
+static struct pci_io_addr_cache
+{
+ struct rb_root rb_root;
+ spinlock_t piar_lock;
+} pci_io_addr_cache_root;
+
+static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr)
+{
+ struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
+
+ while (n) {
+ struct pci_io_addr_range *piar;
+ piar = rb_entry(n, struct pci_io_addr_range, rb_node);
+
+ if (addr < piar->addr_lo) {
+ n = n->rb_left;
+ } else {
+ if (addr > piar->addr_hi) {
+ n = n->rb_right;
+ } else {
+ pci_dev_get(piar->pcidev);
+ return piar->pcidev;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * pci_get_device_by_addr - Get device, given only address
+ * @addr: mmio (PIO) phys address or i/o port number
+ *
+ * Given an mmio phys address, or a port number, find a pci device
+ * that implements this address. Be sure to pci_dev_put the device
+ * when finished. I/O port numbers are assumed to be offset
+ * from zero (that is, they do *not* have pci_io_addr added in).
+ * It is safe to call this function within an interrupt.
+ */
+struct pci_dev *pci_get_device_by_addr(unsigned long addr)
+{
+ struct pci_dev *dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
+ dev = __pci_get_device_by_addr(addr);
+ spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
+ return dev;
+}
+
+#ifdef DEBUG
+/*
+ * Handy-dandy debug print routine, does nothing more
+ * than print out the contents of our addr cache.
+ */
+static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
+{
+ struct rb_node *n;
+ int cnt = 0;
+
+ n = rb_first(&cache->rb_root);
+ while (n) {
+ struct pci_io_addr_range *piar;
+ piar = rb_entry(n, struct pci_io_addr_range, rb_node);
+ printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n",
+ (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
+ piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
+ cnt++;
+ n = rb_next(n);
+ }
+}
+#endif
+
+/* Insert address range into the rb tree. */
+static struct pci_io_addr_range *
+pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
+ unsigned long ahi, unsigned int flags)
+{
+ struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ struct pci_io_addr_range *piar;
+
+ /* Walk tree, find a place to insert into tree */
+ while (*p) {
+ parent = *p;
+ piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
+ if (ahi < piar->addr_lo) {
+ p = &parent->rb_left;
+ } else if (alo > piar->addr_hi) {
+ p = &parent->rb_right;
+ } else {
+ if (dev != piar->pcidev ||
+ alo != piar->addr_lo || ahi != piar->addr_hi) {
+ printk(KERN_WARNING "PIAR: overlapping address range\n");
+ }
+ return piar;
+ }
+ }
+ piar = (struct pci_io_addr_range *)kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
+ if (!piar)
+ return NULL;
+
+ piar->addr_lo = alo;
+ piar->addr_hi = ahi;
+ piar->pcidev = dev;
+ piar->flags = flags;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
+ alo, ahi, pci_name (dev));
+#endif
+
+ rb_link_node(&piar->rb_node, parent, p);
+ rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
+
+ return piar;
+}
+
+static void __pci_addr_cache_insert_device(struct pci_dev *dev)
+{
+ struct device_node *dn;
+ struct pci_dn *pdn;
+ int i;
+ int inserted = 0;
+
+ dn = pci_device_to_OF_node(dev);
+ if (!dn) {
+ printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev));
+ return;
+ }
+
+ /* Skip any devices for which EEH is not enabled. */
+ pdn = PCI_DN(dn);
+ if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
+ pdn->eeh_mode & EEH_MODE_NOCHECK) {
+#ifdef DEBUG
+ printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n",
+ pci_name(dev), pdn->node->full_name);
+#endif
+ return;
+ }
+
+ /* The cache holds a reference to the device... */
+ pci_dev_get(dev);
+
+ /* Walk resources on this device, poke them into the tree */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ unsigned long start = pci_resource_start(dev,i);
+ unsigned long end = pci_resource_end(dev,i);
+ unsigned int flags = pci_resource_flags(dev,i);
+
+ /* We are interested only bus addresses, not dma or other stuff */
+ if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+ if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
+ continue;
+ pci_addr_cache_insert(dev, start, end, flags);
+ inserted = 1;
+ }
+
+ /* If there was nothing to add, the cache has no reference... */
+ if (!inserted)
+ pci_dev_put(dev);
+}
+
+/**
+ * pci_addr_cache_insert_device - Add a device to the address cache
+ * @dev: PCI device whose I/O addresses we are interested in.
+ *
+ * In order to support the fast lookup of devices based on addresses,
+ * we maintain a cache of devices that can be quickly searched.
+ * This routine adds a device to that cache.
+ */
+void pci_addr_cache_insert_device(struct pci_dev *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
+ __pci_addr_cache_insert_device(dev);
+ spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
+}
+
+static inline void __pci_addr_cache_remove_device(struct pci_dev *dev)
+{
+ struct rb_node *n;
+ int removed = 0;
+
+restart:
+ n = rb_first(&pci_io_addr_cache_root.rb_root);
+ while (n) {
+ struct pci_io_addr_range *piar;
+ piar = rb_entry(n, struct pci_io_addr_range, rb_node);
+
+ if (piar->pcidev == dev) {
+ rb_erase(n, &pci_io_addr_cache_root.rb_root);
+ removed = 1;
+ kfree(piar);
+ goto restart;
+ }
+ n = rb_next(n);
+ }
+
+ /* The cache no longer holds its reference to this device... */
+ if (removed)
+ pci_dev_put(dev);
+}
+
+/**
+ * pci_addr_cache_remove_device - remove pci device from addr cache
+ * @dev: device to remove
+ *
+ * Remove a device from the addr-cache tree.
+ * This is potentially expensive, since it will walk
+ * the tree multiple times (once per resource).
+ * But so what; device removal doesn't need to be that fast.
+ */
+void pci_addr_cache_remove_device(struct pci_dev *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
+ __pci_addr_cache_remove_device(dev);
+ spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
+}
+
+/**
+ * pci_addr_cache_build - Build a cache of I/O addresses
+ *
+ * Build a cache of pci i/o addresses. This cache will be used to
+ * find the pci device that corresponds to a given address.
+ * This routine scans all pci busses to build the cache.
+ * Must be run late in boot process, after the pci controllers
+ * have been scaned for devices (after all device resources are known).
+ */
+void __init pci_addr_cache_build(void)
+{
+ struct device_node *dn;
+ struct pci_dev *dev = NULL;
+
+ spin_lock_init(&pci_io_addr_cache_root.piar_lock);
+
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ /* Ignore PCI bridges */
+ if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+ continue;
+
+ pci_addr_cache_insert_device(dev);
+
+ dn = pci_device_to_OF_node(dev);
+ pci_dev_get (dev); /* matching put is in eeh_remove_device() */
+ PCI_DN(dn)->pcidev = dev;
+ }
+
+#ifdef DEBUG
+ /* Verify tree built up above, echo back the list of addrs. */
+ pci_addr_cache_print(&pci_io_addr_cache_root);
+#endif
+}
+
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
new file mode 100644
index 00000000000..6373372932b
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -0,0 +1,376 @@
+/*
+ * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
+ * Copyright (C) 2004, 2005 Linas Vepstas <linas@linas.org>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <linas@us.ibm.com>
+ *
+ */
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/pci.h>
+#include <asm/eeh.h>
+#include <asm/eeh_event.h>
+#include <asm/ppc-pci.h>
+#include <asm/pci-bridge.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+
+
+static inline const char * pcid_name (struct pci_dev *pdev)
+{
+ if (pdev->dev.driver)
+ return pdev->dev.driver->name;
+ return "";
+}
+
+#ifdef DEBUG
+static void print_device_node_tree (struct pci_dn *pdn, int dent)
+{
+ int i;
+ if (!pdn) return;
+ for (i=0;i<dent; i++)
+ printk(" ");
+ printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n",
+ pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr,
+ pdn->eeh_pe_config_addr, pdn->node->full_name);
+ dent += 3;
+ struct device_node *pc = pdn->node->child;
+ while (pc) {
+ print_device_node_tree(PCI_DN(pc), dent);
+ pc = pc->sibling;
+ }
+}
+#endif
+
+/**
+ * irq_in_use - return true if this irq is being used
+ */
+static int irq_in_use(unsigned int irq)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct irq_desc *desc = irq_desc + irq;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (desc->action)
+ rc = 1;
+ spin_unlock_irqrestore(&desc->lock, flags);
+ return rc;
+}
+
+/* ------------------------------------------------------- */
+/** eeh_report_error - report an EEH error to each device,
+ * collect up and merge the device responses.
+ */
+
+static void eeh_report_error(struct pci_dev *dev, void *userdata)
+{
+ enum pci_ers_result rc, *res = userdata;
+ struct pci_driver *driver = dev->driver;
+
+ dev->error_state = pci_channel_io_frozen;
+
+ if (!driver)
+ return;
+
+ if (irq_in_use (dev->irq)) {
+ struct device_node *dn = pci_device_to_OF_node(dev);
+ PCI_DN(dn)->eeh_mode |= EEH_MODE_IRQ_DISABLED;
+ disable_irq_nosync(dev->irq);
+ }
+ if (!driver->err_handler)
+ return;
+ if (!driver->err_handler->error_detected)
+ return;
+
+ rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen);
+ if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+ if (*res == PCI_ERS_RESULT_NEED_RESET) return;
+ if (*res == PCI_ERS_RESULT_DISCONNECT &&
+ rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+}
+
+/** eeh_report_reset -- tell this device that the pci slot
+ * has been reset.
+ */
+
+static void eeh_report_reset(struct pci_dev *dev, void *userdata)
+{
+ struct pci_driver *driver = dev->driver;
+ struct device_node *dn = pci_device_to_OF_node(dev);
+
+ if (!driver)
+ return;
+
+ if ((PCI_DN(dn)->eeh_mode) & EEH_MODE_IRQ_DISABLED) {
+ PCI_DN(dn)->eeh_mode &= ~EEH_MODE_IRQ_DISABLED;
+ enable_irq(dev->irq);
+ }
+ if (!driver->err_handler)
+ return;
+ if (!driver->err_handler->slot_reset)
+ return;
+
+ driver->err_handler->slot_reset(dev);
+}
+
+static void eeh_report_resume(struct pci_dev *dev, void *userdata)
+{
+ struct pci_driver *driver = dev->driver;
+
+ dev->error_state = pci_channel_io_normal;
+
+ if (!driver)
+ return;
+ if (!driver->err_handler)
+ return;
+ if (!driver->err_handler->resume)
+ return;
+
+ driver->err_handler->resume(dev);
+}
+
+static void eeh_report_failure(struct pci_dev *dev, void *userdata)
+{
+ struct pci_driver *driver = dev->driver;
+
+ dev->error_state = pci_channel_io_perm_failure;
+
+ if (!driver)
+ return;
+
+ if (irq_in_use (dev->irq)) {
+ struct device_node *dn = pci_device_to_OF_node(dev);
+ PCI_DN(dn)->eeh_mode |= EEH_MODE_IRQ_DISABLED;
+ disable_irq_nosync(dev->irq);
+ }
+ if (!driver->err_handler)
+ return;
+ if (!driver->err_handler->error_detected)
+ return;
+ driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
+}
+
+/* ------------------------------------------------------- */
+/**
+ * handle_eeh_events -- reset a PCI device after hard lockup.
+ *
+ * pSeries systems will isolate a PCI slot if the PCI-Host
+ * bridge detects address or data parity errors, DMA's
+ * occuring to wild addresses (which usually happen due to
+ * bugs in device drivers or in PCI adapter firmware).
+ * Slot isolations also occur if #SERR, #PERR or other misc
+ * PCI-related errors are detected.
+ *
+ * Recovery process consists of unplugging the device driver
+ * (which generated hotplug events to userspace), then issuing
+ * a PCI #RST to the device, then reconfiguring the PCI config
+ * space for all bridges & devices under this slot, and then
+ * finally restarting the device drivers (which cause a second
+ * set of hotplug events to go out to userspace).
+ */
+
+/**
+ * eeh_reset_device() -- perform actual reset of a pci slot
+ * Args: bus: pointer to the pci bus structure corresponding
+ * to the isolated slot. A non-null value will
+ * cause all devices under the bus to be removed
+ * and then re-added.
+ * pe_dn: pointer to a "Partionable Endpoint" device node.
+ * This is the top-level structure on which pci
+ * bus resets can be performed.
+ */
+
+static int eeh_reset_device (struct pci_dn *pe_dn, struct pci_bus *bus)
+{
+ int rc;
+ if (bus)
+ pcibios_remove_pci_devices(bus);
+
+ /* Reset the pci controller. (Asserts RST#; resets config space).
+ * Reconfigure bridges and devices. Don't try to bring the system
+ * up if the reset failed for some reason. */
+ rc = rtas_set_slot_reset(pe_dn);
+ if (rc)
+ return rc;
+
+ /* New-style config addrs might be shared across multiple devices,
+ * Walk over all functions on this device */
+ if (pe_dn->eeh_pe_config_addr) {
+ struct device_node *pe = pe_dn->node;
+ pe = pe->parent->child;
+ while (pe) {
+ struct pci_dn *ppe = PCI_DN(pe);
+ if (pe_dn->eeh_pe_config_addr == ppe->eeh_pe_config_addr) {
+ rtas_configure_bridge(ppe);
+ eeh_restore_bars(ppe);
+ }
+ pe = pe->sibling;
+ }
+ } else {
+ rtas_configure_bridge(pe_dn);
+ eeh_restore_bars(pe_dn);
+ }
+
+ /* Give the system 5 seconds to finish running the user-space
+ * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
+ * this is a hack, but if we don't do this, and try to bring
+ * the device up before the scripts have taken it down,
+ * potentially weird things happen.
+ */
+ if (bus) {
+ ssleep (5);
+ pcibios_add_pci_devices(bus);
+ }
+
+ return 0;
+}
+
+/* The longest amount of time to wait for a pci device
+ * to come back on line, in seconds.
+ */
+#define MAX_WAIT_FOR_RECOVERY 15
+
+void handle_eeh_events (struct eeh_event *event)
+{
+ struct device_node *frozen_dn;
+ struct pci_dn *frozen_pdn;
+ struct pci_bus *frozen_bus;
+ int rc = 0;
+ enum pci_ers_result result = PCI_ERS_RESULT_NONE;
+
+ frozen_dn = find_device_pe(event->dn);
+ frozen_bus = pcibios_find_pci_bus(frozen_dn);
+
+ if (!frozen_dn) {
+ printk(KERN_ERR "EEH: Error: Cannot find partition endpoint for %s\n",
+ pci_name(event->dev));
+ return;
+ }
+
+ /* There are two different styles for coming up with the PE.
+ * In the old style, it was the highest EEH-capable device
+ * which was always an EADS pci bridge. In the new style,
+ * there might not be any EADS bridges, and even when there are,
+ * the firmware marks them as "EEH incapable". So another
+ * two-step is needed to find the pci bus.. */
+ if (!frozen_bus)
+ frozen_bus = pcibios_find_pci_bus (frozen_dn->parent);
+
+ if (!frozen_bus) {
+ printk(KERN_ERR "EEH: Cannot find PCI bus for %s\n",
+ frozen_dn->full_name);
+ return;
+ }
+
+#if 0
+ /* We may get "permanent failure" messages on empty slots.
+ * These are false alarms. Empty slots have no child dn. */
+ if ((event->state == pci_channel_io_perm_failure) && (frozen_device == NULL))
+ return;
+#endif
+
+ frozen_pdn = PCI_DN(frozen_dn);
+ frozen_pdn->eeh_freeze_count++;
+
+ if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES)
+ goto hard_fail;
+
+ /* If the reset state is a '5' and the time to reset is 0 (infinity)
+ * or is more then 15 seconds, then mark this as a permanent failure.
+ */
+ if ((event->state == pci_channel_io_perm_failure) &&
+ ((event->time_unavail <= 0) ||
+ (event->time_unavail > MAX_WAIT_FOR_RECOVERY*1000)))
+ goto hard_fail;
+
+ eeh_slot_error_detail(frozen_pdn, 1 /* Temporary Error */);
+ printk(KERN_WARNING
+ "EEH: This PCI device has failed %d times since last reboot: %s - %s\n",
+ frozen_pdn->eeh_freeze_count,
+ pci_name (frozen_pdn->pcidev),
+ pcid_name(frozen_pdn->pcidev));
+
+ /* Walk the various device drivers attached to this slot through
+ * a reset sequence, giving each an opportunity to do what it needs
+ * to accomplish the reset. Each child gets a report of the
+ * status ... if any child can't handle the reset, then the entire
+ * slot is dlpar removed and added.
+ */
+ pci_walk_bus(frozen_bus, eeh_report_error, &result);
+
+ /* If all device drivers were EEH-unaware, then shut
+ * down all of the device drivers, and hope they
+ * go down willingly, without panicing the system.
+ */
+ if (result == PCI_ERS_RESULT_NONE) {
+ rc = eeh_reset_device(frozen_pdn, frozen_bus);
+ if (rc)
+ goto hard_fail;
+ }
+
+ /* If any device called out for a reset, then reset the slot */
+ if (result == PCI_ERS_RESULT_NEED_RESET) {
+ rc = eeh_reset_device(frozen_pdn, NULL);
+ if (rc)
+ goto hard_fail;
+ pci_walk_bus(frozen_bus, eeh_report_reset, 0);
+ }
+
+ /* If all devices reported they can proceed, the re-enable PIO */
+ if (result == PCI_ERS_RESULT_CAN_RECOVER) {
+ /* XXX Not supported; we brute-force reset the device */
+ rc = eeh_reset_device(frozen_pdn, NULL);
+ if (rc)
+ goto hard_fail;
+ pci_walk_bus(frozen_bus, eeh_report_reset, 0);
+ }
+
+ /* Tell all device drivers that they can resume operations */
+ pci_walk_bus(frozen_bus, eeh_report_resume, 0);
+
+ return;
+
+hard_fail:
+ /*
+ * About 90% of all real-life EEH failures in the field
+ * are due to poorly seated PCI cards. Only 10% or so are
+ * due to actual, failed cards.
+ */
+ printk(KERN_ERR
+ "EEH: PCI device %s - %s has failed %d times \n"
+ "and has been permanently disabled. Please try reseating\n"
+ "this device or replacing it.\n",
+ pci_name (frozen_pdn->pcidev),
+ pcid_name(frozen_pdn->pcidev),
+ frozen_pdn->eeh_freeze_count);
+
+ eeh_slot_error_detail(frozen_pdn, 2 /* Permanent Error */);
+
+ /* Notify all devices that they're about to go down. */
+ pci_walk_bus(frozen_bus, eeh_report_failure, 0);
+
+ /* Shut down the device drivers for good. */
+ pcibios_remove_pci_devices(frozen_bus);
+}
+
+/* ---------- end of file ---------- */
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 92497333c2b..9a9961f2748 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/pci.h>
#include <asm/eeh_event.h>
+#include <asm/ppc-pci.h>
/** Overview:
* EEH error states may be detected within exception handlers;
@@ -37,31 +38,6 @@ static void eeh_thread_launcher(void *);
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
/**
- * eeh_panic - call panic() for an eeh event that cannot be handled.
- * The philosophy of this routine is that it is better to panic and
- * halt the OS than it is to risk possible data corruption by
- * oblivious device drivers that don't know better.
- *
- * @dev pci device that had an eeh event
- * @reset_state current reset state of the device slot
- */
-static void eeh_panic(struct pci_dev *dev, int reset_state)
-{
- /*
- * Since the panic_on_oops sysctl is used to halt the system
- * in light of potential corruption, we can use it here.
- */
- if (panic_on_oops) {
- panic("EEH: MMIO failure (%d) on device:%s\n", reset_state,
- pci_name(dev));
- }
- else {
- printk(KERN_INFO "EEH: Ignored MMIO failure (%d) on device:%s\n",
- reset_state, pci_name(dev));
- }
-}
-
-/**
* eeh_event_handler - dispatch EEH events. The detection of a frozen
* slot can occur inside an interrupt, where it can be hard to do
* anything about it. The goal of this routine is to pull these
@@ -82,10 +58,16 @@ static int eeh_event_handler(void * dummy)
spin_lock_irqsave(&eeh_eventlist_lock, flags);
event = NULL;
+
+ /* Unqueue the event, get ready to process. */
if (!list_empty(&eeh_eventlist)) {
event = list_entry(eeh_eventlist.next, struct eeh_event, list);
list_del(&event->list);
}
+
+ if (event)
+ eeh_mark_slot(event->dn, EEH_MODE_RECOVERING);
+
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
if (event == NULL)
break;
@@ -93,8 +75,11 @@ static int eeh_event_handler(void * dummy)
printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
pci_name(event->dev));
- eeh_panic (event->dev, event->state);
+ handle_eeh_events(event);
+
+ eeh_clear_slot(event->dn, EEH_MODE_RECOVERING);
+ pci_dev_put(event->dev);
kfree(event);
}
@@ -122,7 +107,7 @@ static void eeh_thread_launcher(void *dummy)
*/
int eeh_send_failure_event (struct device_node *dn,
struct pci_dev *dev,
- int state,
+ enum pci_channel_state state,
int time_unavail)
{
unsigned long flags;
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c
index 4d584172055..22bfb5c89db 100644
--- a/arch/powerpc/platforms/pseries/hvcserver.c
+++ b/arch/powerpc/platforms/pseries/hvcserver.c
@@ -40,7 +40,7 @@ MODULE_VERSION(HVCS_ARCH_VERSION);
* functions aren't performance sensitive, so this conversion isn't an
* issue.
*/
-int hvcs_convert(long to_convert)
+static int hvcs_convert(long to_convert)
{
switch (to_convert) {
case H_Success:
@@ -91,7 +91,7 @@ int hvcs_free_partner_info(struct list_head *head)
EXPORT_SYMBOL(hvcs_free_partner_info);
/* Helper function for hvcs_get_partner_info */
-int hvcs_next_partner(uint32_t unit_address,
+static int hvcs_next_partner(uint32_t unit_address,
unsigned long last_p_partition_ID,
unsigned long last_p_unit_address, unsigned long *pi_buff)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 2043659ea7b..48cfbfc43f9 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -51,8 +51,6 @@
#define DBG(fmt...)
-extern int is_python(struct device_node *);
-
static void tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
@@ -436,7 +434,7 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
return;
}
- ppci = pdn->data;
+ ppci = PCI_DN(pdn);
if (!ppci->iommu_table) {
/* Bussubno hasn't been copied yet.
* Do it now because iommu_table_setparms_lpar needs it.
@@ -483,10 +481,10 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
* an already allocated iommu table is found and use that.
*/
- while (dn && dn->data && PCI_DN(dn)->iommu_table == NULL)
+ while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL)
dn = dn->parent;
- if (dn && dn->data) {
+ if (dn && PCI_DN(dn)) {
PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table;
} else {
DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev));
@@ -497,7 +495,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
{
int err = NOTIFY_OK;
struct device_node *np = node;
- struct pci_dn *pci = np->data;
+ struct pci_dn *pci = PCI_DN(np);
switch (action) {
case PSERIES_RECONFIG_REMOVE:
@@ -533,7 +531,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
*/
dn = pci_device_to_OF_node(dev);
- for (pdn = dn; pdn && pdn->data && !PCI_DN(pdn)->iommu_table;
+ for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
pdn = pdn->parent) {
dma_window = (unsigned int *)
get_property(pdn, "ibm,dma-window", NULL);
@@ -552,7 +550,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
DBG("Found DMA window, allocating table\n");
}
- pci = pdn->data;
+ pci = PCI_DN(pdn);
if (!pci->iommu_table) {
/* iommu_table_setparms_lpar needs bussubno. */
pci->bussubno = pci->phb->bus->number;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index cf1bc11b334..8952528d31a 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -24,6 +24,7 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
+#include <linux/console.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
@@ -60,7 +61,7 @@ extern void pSeries_find_serial_port(void);
int vtermno; /* virtual terminal# for udbg */
#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
-static void udbg_hvsi_putc(unsigned char c)
+static void udbg_hvsi_putc(char c)
{
/* packet's seqno isn't used anyways */
uint8_t packet[] __ALIGNED__ = { 0xff, 5, 0, 0, c };
@@ -111,7 +112,7 @@ static int udbg_hvsi_getc_poll(void)
return ch;
}
-static unsigned char udbg_hvsi_getc(void)
+static int udbg_hvsi_getc(void)
{
int ch;
for (;;) {
@@ -127,7 +128,7 @@ static unsigned char udbg_hvsi_getc(void)
}
}
-static void udbg_putcLP(unsigned char c)
+static void udbg_putcLP(char c)
{
char buf[16];
unsigned long rc;
@@ -172,7 +173,7 @@ static int udbg_getc_pollLP(void)
return ch;
}
-static unsigned char udbg_getcLP(void)
+static int udbg_getcLP(void)
{
int ch;
for (;;) {
@@ -191,7 +192,7 @@ static unsigned char udbg_getcLP(void)
/* call this from early_init() for a working debug console on
* vterm capable LPAR machines
*/
-void udbg_init_debug_lpar(void)
+void __init udbg_init_debug_lpar(void)
{
vtermno = 0;
udbg_putc = udbg_putcLP;
@@ -200,73 +201,64 @@ void udbg_init_debug_lpar(void)
}
/* returns 0 if couldn't find or use /chosen/stdout as console */
-int find_udbg_vterm(void)
+void __init find_udbg_vterm(void)
{
struct device_node *stdout_node;
u32 *termno;
char *name;
- int found = 0;
+ int add_console;
/* find the boot console from /chosen/stdout */
if (!of_chosen)
- return 0;
+ return;
name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
if (name == NULL)
- return 0;
+ return;
stdout_node = of_find_node_by_path(name);
if (!stdout_node)
- return 0;
-
- /* now we have the stdout node; figure out what type of device it is. */
+ return;
name = (char *)get_property(stdout_node, "name", NULL);
if (!name) {
printk(KERN_WARNING "stdout node missing 'name' property!\n");
goto out;
}
+ /* The user has requested a console so this is already set up. */
+ add_console = !strstr(cmd_line, "console=");
- if (strncmp(name, "vty", 3) == 0) {
- if (device_is_compatible(stdout_node, "hvterm1")) {
- termno = (u32 *)get_property(stdout_node, "reg", NULL);
- if (termno) {
- vtermno = termno[0];
- udbg_putc = udbg_putcLP;
- udbg_getc = udbg_getcLP;
- udbg_getc_poll = udbg_getc_pollLP;
- found = 1;
- }
- } else if (device_is_compatible(stdout_node, "hvterm-protocol")) {
- termno = (u32 *)get_property(stdout_node, "reg", NULL);
- if (termno) {
- vtermno = termno[0];
- udbg_putc = udbg_hvsi_putc;
- udbg_getc = udbg_hvsi_getc;
- udbg_getc_poll = udbg_hvsi_getc_poll;
- found = 1;
- }
- }
- } else if (strncmp(name, "serial", 6)) {
- /* XXX fix ISA serial console */
- printk(KERN_WARNING "serial stdout on LPAR ('%s')! "
- "can't print udbg messages\n",
- stdout_node->full_name);
- } else {
- printk(KERN_WARNING "don't know how to print to stdout '%s'\n",
- stdout_node->full_name);
+ /* Check if it's a virtual terminal */
+ if (strncmp(name, "vty", 3) != 0)
+ goto out;
+ termno = (u32 *)get_property(stdout_node, "reg", NULL);
+ if (termno == NULL)
+ goto out;
+ vtermno = termno[0];
+
+ if (device_is_compatible(stdout_node, "hvterm1")) {
+ udbg_putc = udbg_putcLP;
+ udbg_getc = udbg_getcLP;
+ udbg_getc_poll = udbg_getc_pollLP;
+ if (add_console)
+ add_preferred_console("hvc", termno[0] & 0xff, NULL);
+ } else if (device_is_compatible(stdout_node, "hvterm-protocol")) {
+ vtermno = termno[0];
+ udbg_putc = udbg_hvsi_putc;
+ udbg_getc = udbg_hvsi_getc;
+ udbg_getc_poll = udbg_hvsi_getc_poll;
+ if (add_console)
+ add_preferred_console("hvsi", termno[0] & 0xff, NULL);
}
-
out:
of_node_put(stdout_node);
- return found;
}
void vpa_init(int cpu)
{
int hwcpu = get_hard_smp_processor_id(cpu);
- unsigned long vpa = __pa(&paca[cpu].lppaca);
+ unsigned long vpa = __pa(&lppaca[cpu]);
long ret;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- paca[cpu].lppaca.vmxregs_in_use = 1;
+ lppaca[cpu].vmxregs_in_use = 1;
ret = register_vpa(hwcpu, vpa);
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
new file mode 100644
index 00000000000..21934784f93
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -0,0 +1,174 @@
+/*
+ * PCI Dynamic LPAR, PCI Hot Plug and PCI EEH recovery code
+ * for RPA-compliant PPC64 platform.
+ * Copyright (C) 2003 Linda Xie <lxie@us.ibm.com>
+ * Copyright (C) 2005 International Business Machines
+ *
+ * Updates, 2005, John Rose <johnrose@austin.ibm.com>
+ * Updates, 2005, Linas Vepstas <linas@austin.ibm.com>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/pci.h>
+#include <asm/pci-bridge.h>
+
+static struct pci_bus *
+find_bus_among_children(struct pci_bus *bus,
+ struct device_node *dn)
+{
+ struct pci_bus *child = NULL;
+ struct list_head *tmp;
+ struct device_node *busdn;
+
+ busdn = pci_bus_to_OF_node(bus);
+ if (busdn == dn)
+ return bus;
+
+ list_for_each(tmp, &bus->children) {
+ child = find_bus_among_children(pci_bus_b(tmp), dn);
+ if (child)
+ break;
+ };
+ return child;
+}
+
+struct pci_bus *
+pcibios_find_pci_bus(struct device_node *dn)
+{
+ struct pci_dn *pdn = dn->data;
+
+ if (!pdn || !pdn->phb || !pdn->phb->bus)
+ return NULL;
+
+ return find_bus_among_children(pdn->phb->bus, dn);
+}
+
+/**
+ * pcibios_remove_pci_devices - remove all devices under this bus
+ *
+ * Remove all of the PCI devices under this bus both from the
+ * linux pci device tree, and from the powerpc EEH address cache.
+ */
+void
+pcibios_remove_pci_devices(struct pci_bus *bus)
+{
+ struct pci_dev *dev, *tmp;
+
+ list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
+ eeh_remove_bus_device(dev);
+ pci_remove_bus_device(dev);
+ }
+}
+
+/* Must be called before pci_bus_add_devices */
+void
+pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ /*
+ * Skip already-present devices (which are on the
+ * global device list.)
+ */
+ if (list_empty(&dev->global_list)) {
+ int i;
+
+ /* Need to setup IOMMU tables */
+ ppc_md.iommu_dev_setup(dev);
+
+ if(fix_bus)
+ pcibios_fixup_device_resources(dev, bus);
+ pci_read_irq_line(dev);
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (r->parent || !r->start || !r->flags)
+ continue;
+ pci_claim_resource(dev, i);
+ }
+ }
+ }
+}
+
+static int
+pcibios_pci_config_bridge(struct pci_dev *dev)
+{
+ u8 sec_busno;
+ struct pci_bus *child_bus;
+ struct pci_dev *child_dev;
+
+ /* Get busno of downstream bus */
+ pci_read_config_byte(dev, PCI_SECONDARY_BUS, &sec_busno);
+
+ /* Add to children of PCI bridge dev->bus */
+ child_bus = pci_add_new_bus(dev->bus, dev, sec_busno);
+ if (!child_bus) {
+ printk (KERN_ERR "%s: could not add second bus\n", __FUNCTION__);
+ return -EIO;
+ }
+ sprintf(child_bus->name, "PCI Bus #%02x", child_bus->number);
+
+ pci_scan_child_bus(child_bus);
+
+ list_for_each_entry(child_dev, &child_bus->devices, bus_list) {
+ eeh_add_device_late(child_dev);
+ }
+
+ /* Fixup new pci devices without touching bus struct */
+ pcibios_fixup_new_pci_devices(child_bus, 0);
+
+ /* Make the discovered devices available */
+ pci_bus_add_devices(child_bus);
+ return 0;
+}
+
+/**
+ * pcibios_add_pci_devices - adds new pci devices to bus
+ *
+ * This routine will find and fixup new pci devices under
+ * the indicated bus. This routine presumes that there
+ * might already be some devices under this bridge, so
+ * it carefully tries to add only new devices. (And that
+ * is how this routine differs from other, similar pcibios
+ * routines.)
+ */
+void
+pcibios_add_pci_devices(struct pci_bus * bus)
+{
+ int slotno, num;
+ struct pci_dev *dev;
+ struct device_node *dn = pci_bus_to_OF_node(bus);
+
+ eeh_add_device_tree_early(dn);
+
+ /* pci_scan_slot should find all children */
+ slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
+ num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
+ if (num) {
+ pcibios_fixup_new_pci_devices(bus, 1);
+ pci_bus_add_devices(bus);
+ }
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ eeh_add_device_late (dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ pcibios_pci_config_bridge(dev);
+ }
+}
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index fbd214d68b0..b046bcf7443 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -49,14 +49,14 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/udbg.h>
+#include <asm/firmware.h>
+
+#include "ras.h"
static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(ras_log_buf_lock);
-char mce_data_buf[RTAS_ERROR_LOG_MAX]
-;
-/* This is true if we are using the firmware NMI handler (typically LPAR) */
-extern int fwnmi_active;
+char mce_data_buf[RTAS_ERROR_LOG_MAX];
static int ras_get_sensor_state_token;
static int ras_check_exception_token;
@@ -280,7 +280,7 @@ static void fwnmi_release_errinfo(void)
printk("FWNMI: nmi-interlock failed: %d\n", ret);
}
-void pSeries_system_reset_exception(struct pt_regs *regs)
+int pSeries_system_reset_exception(struct pt_regs *regs)
{
if (fwnmi_active) {
struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs);
@@ -289,6 +289,7 @@ void pSeries_system_reset_exception(struct pt_regs *regs)
}
fwnmi_release_errinfo();
}
+ return 0; /* need to perform reset */
}
/*
diff --git a/arch/powerpc/platforms/pseries/ras.h b/arch/powerpc/platforms/pseries/ras.h
new file mode 100644
index 00000000000..0e66b0da55e
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/ras.h
@@ -0,0 +1,9 @@
+#ifndef _PSERIES_RAS_H
+#define _PSERIES_RAS_H
+
+struct pt_regs;
+
+extern int pSeries_system_reset_exception(struct pt_regs *regs);
+extern int pSeries_machine_check_exception(struct pt_regs *regs);
+
+#endif /* _PSERIES_RAS_H */
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index d8864164dbe..86cfa6ecdcf 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -350,6 +350,100 @@ static int do_remove_node(char *buf)
return rv;
}
+static char *parse_node(char *buf, size_t bufsize, struct device_node **npp)
+{
+ char *handle_str;
+ phandle handle;
+ *npp = NULL;
+
+ handle_str = buf;
+
+ buf = strchr(buf, ' ');
+ if (!buf)
+ return NULL;
+ *buf = '\0';
+ buf++;
+
+ handle = simple_strtoul(handle_str, NULL, 10);
+
+ *npp = of_find_node_by_phandle(handle);
+ return buf;
+}
+
+static int do_add_property(char *buf, size_t bufsize)
+{
+ struct property *prop = NULL;
+ struct device_node *np;
+ unsigned char *value;
+ char *name, *end;
+ int length;
+ end = buf + bufsize;
+ buf = parse_node(buf, bufsize, &np);
+
+ if (!np)
+ return -ENODEV;
+
+ if (parse_next_property(buf, end, &name, &length, &value) == NULL)
+ return -EINVAL;
+
+ prop = new_property(name, length, value, NULL);
+ if (!prop)
+ return -ENOMEM;
+
+ prom_add_property(np, prop);
+
+ return 0;
+}
+
+static int do_remove_property(char *buf, size_t bufsize)
+{
+ struct device_node *np;
+ char *tmp;
+ struct property *prop;
+ buf = parse_node(buf, bufsize, &np);
+
+ if (!np)
+ return -ENODEV;
+
+ tmp = strchr(buf,' ');
+ if (tmp)
+ *tmp = '\0';
+
+ if (strlen(buf) == 0)
+ return -EINVAL;
+
+ prop = of_find_property(np, buf, NULL);
+
+ return prom_remove_property(np, prop);
+}
+
+static int do_update_property(char *buf, size_t bufsize)
+{
+ struct device_node *np;
+ unsigned char *value;
+ char *name, *end;
+ int length;
+ struct property *newprop, *oldprop;
+ buf = parse_node(buf, bufsize, &np);
+ end = buf + bufsize;
+
+ if (!np)
+ return -ENODEV;
+
+ if (parse_next_property(buf, end, &name, &length, &value) == NULL)
+ return -EINVAL;
+
+ newprop = new_property(name, length, value, NULL);
+ if (!newprop)
+ return -ENOMEM;
+
+ oldprop = of_find_property(np, name,NULL);
+ if (!oldprop)
+ return -ENODEV;
+
+ return prom_update_property(np, newprop, oldprop);
+}
+
/**
* ofdt_write - perform operations on the Open Firmware device tree
*
@@ -392,6 +486,12 @@ static ssize_t ofdt_write(struct file *file, const char __user *buf, size_t coun
rv = do_add_node(tmp, count - (tmp - kbuf));
else if (!strcmp(kbuf, "remove_node"))
rv = do_remove_node(tmp);
+ else if (!strcmp(kbuf, "add_property"))
+ rv = do_add_property(tmp, count - (tmp - kbuf));
+ else if (!strcmp(kbuf, "remove_property"))
+ rv = do_remove_property(tmp, count - (tmp - kbuf));
+ else if (!strcmp(kbuf, "update_property"))
+ rv = do_update_property(tmp, count - (tmp - kbuf));
else
rv = -EINVAL;
out:
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index 2edc947f7c4..50643496eb6 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -192,7 +192,7 @@ struct file_operations scanlog_fops = {
.release = scanlog_release,
};
-int __init scanlog_init(void)
+static int __init scanlog_init(void)
{
struct proc_dir_entry *ent;
@@ -222,7 +222,7 @@ int __init scanlog_init(void)
return 0;
}
-void __exit scanlog_cleanup(void)
+static void __exit scanlog_cleanup(void)
{
if (proc_ppc64_scan_log_dump) {
kfree(proc_ppc64_scan_log_dump->data);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 4a465f067ed..da6cebaf72c 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -56,6 +56,7 @@
#include <asm/dma.h>
#include <asm/machdep.h>
#include <asm/irq.h>
+#include <asm/kexec.h>
#include <asm/time.h>
#include <asm/nvram.h>
#include "xics.h"
@@ -68,6 +69,7 @@
#include <asm/smp.h>
#include "plpar_wrappers.h"
+#include "ras.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -76,22 +78,15 @@
#endif
extern void find_udbg_vterm(void);
-extern void system_reset_fwnmi(void); /* from head.S */
-extern void machine_check_fwnmi(void); /* from head.S */
-extern void generic_find_legacy_serial_ports(u64 *physport,
- unsigned int *default_speed);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
-extern void pSeries_system_reset_exception(struct pt_regs *regs);
-extern int pSeries_machine_check_exception(struct pt_regs *regs);
-
static void pseries_shared_idle(void);
static void pseries_dedicated_idle(void);
struct mpic *pSeries_mpic;
-void pSeries_show_cpuinfo(struct seq_file *m)
+static void pSeries_show_cpuinfo(struct seq_file *m)
{
struct device_node *root;
const char *model = "";
@@ -105,18 +100,22 @@ void pSeries_show_cpuinfo(struct seq_file *m)
/* Initialize firmware assisted non-maskable interrupts if
* the firmware supports this feature.
- *
*/
static void __init fwnmi_init(void)
{
- int ret;
+ unsigned long system_reset_addr, machine_check_addr;
+
int ibm_nmi_register = rtas_token("ibm,nmi-register");
if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
return;
- ret = rtas_call(ibm_nmi_register, 2, 1, NULL,
- __pa((unsigned long)system_reset_fwnmi),
- __pa((unsigned long)machine_check_fwnmi));
- if (ret == 0)
+
+ /* If the kernel's not linked at zero we point the firmware at low
+ * addresses anyway, and use a trampoline to get to the real code. */
+ system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START;
+ machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
+
+ if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
+ machine_check_addr))
fwnmi_active = 1;
}
@@ -191,7 +190,7 @@ static void pseries_lpar_enable_pmcs(void)
/* instruct hypervisor to maintain PMCs */
if (firmware_has_feature(FW_FEATURE_SPLPAR))
- get_paca()->lppaca.pmcregs_in_use = 1;
+ get_lppaca()->pmcregs_in_use = 1;
}
static void __init pSeries_setup_arch(void)
@@ -235,7 +234,7 @@ static void __init pSeries_setup_arch(void)
/* Choose an idle loop */
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
vpa_init(boot_cpuid);
- if (get_paca()->lppaca.shared_proc) {
+ if (get_lppaca()->shared_proc) {
printk(KERN_INFO "Using shared processor idle loop\n");
ppc_md.idle_loop = pseries_shared_idle;
} else {
@@ -323,15 +322,18 @@ static void __init pSeries_discover_pic(void)
ppc64_interrupt_controller = IC_INVALID;
for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
typep = (char *)get_property(np, "compatible", NULL);
- if (strstr(typep, "open-pic"))
+ if (strstr(typep, "open-pic")) {
ppc64_interrupt_controller = IC_OPEN_PIC;
- else if (strstr(typep, "ppc-xicp"))
+ break;
+ } else if (strstr(typep, "ppc-xicp")) {
ppc64_interrupt_controller = IC_PPC_XIC;
- else
- printk("pSeries_discover_pic: failed to recognize"
- " interrupt-controller\n");
- break;
+ break;
+ }
}
+ if (ppc64_interrupt_controller == IC_INVALID)
+ printk("pSeries_discover_pic: failed to recognize"
+ " interrupt-controller\n");
+
}
static void pSeries_mach_cpu_die(void)
@@ -365,10 +367,7 @@ static int pseries_set_xdabr(unsigned long dabr)
*/
static void __init pSeries_init_early(void)
{
- void *comport;
int iommu_off = 0;
- unsigned int default_speed;
- u64 physport;
DBG(" -> pSeries_init_early()\n");
@@ -382,17 +381,8 @@ static void __init pSeries_init_early(void)
get_property(of_chosen, "linux,iommu-off", NULL));
}
- generic_find_legacy_serial_ports(&physport, &default_speed);
-
if (platform_is_lpar())
find_udbg_vterm();
- else if (physport) {
- /* Map the uart for udbg. */
- comport = (void *)ioremap(physport, 16);
- udbg_init_uart(comport, default_speed);
-
- DBG("Hello World !\n");
- }
if (firmware_has_feature(FW_FEATURE_DABR))
ppc_md.set_dabr = pseries_set_dabr;
@@ -454,10 +444,10 @@ DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
static inline void dedicated_idle_sleep(unsigned int cpu)
{
- struct paca_struct *ppaca = &paca[cpu ^ 1];
+ struct lppaca *plppaca = &lppaca[cpu ^ 1];
/* Only sleep if the other thread is not idle */
- if (!(ppaca->lppaca.idle)) {
+ if (!(plppaca->idle)) {
local_irq_disable();
/*
@@ -490,7 +480,6 @@ static inline void dedicated_idle_sleep(unsigned int cpu)
static void pseries_dedicated_idle(void)
{
- struct paca_struct *lpaca = get_paca();
unsigned int cpu = smp_processor_id();
unsigned long start_snooze;
unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
@@ -501,7 +490,7 @@ static void pseries_dedicated_idle(void)
* Indicate to the HV that we are idle. Now would be
* a good time to find other work to dispatch.
*/
- lpaca->lppaca.idle = 1;
+ get_lppaca()->idle = 1;
if (!need_resched()) {
start_snooze = get_tb() +
@@ -528,7 +517,7 @@ static void pseries_dedicated_idle(void)
HMT_medium();
}
- lpaca->lppaca.idle = 0;
+ get_lppaca()->idle = 0;
ppc64_runlatch_on();
preempt_enable_no_resched();
@@ -542,7 +531,6 @@ static void pseries_dedicated_idle(void)
static void pseries_shared_idle(void)
{
- struct paca_struct *lpaca = get_paca();
unsigned int cpu = smp_processor_id();
while (1) {
@@ -550,7 +538,7 @@ static void pseries_shared_idle(void)
* Indicate to the HV that we are idle. Now would be
* a good time to find other work to dispatch.
*/
- lpaca->lppaca.idle = 1;
+ get_lppaca()->idle = 1;
while (!need_resched() && !cpu_is_offline(cpu)) {
local_irq_disable();
@@ -574,7 +562,7 @@ static void pseries_shared_idle(void)
HMT_medium();
}
- lpaca->lppaca.idle = 0;
+ get_lppaca()->idle = 0;
ppc64_runlatch_on();
preempt_enable_no_resched();
@@ -598,7 +586,7 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
{
/* Don't risk a hypervisor call if we're crashing */
if (!crash_shutdown) {
- unsigned long vpa = __pa(&get_paca()->lppaca);
+ unsigned long vpa = __pa(get_lppaca());
if (unregister_vpa(hard_smp_processor_id(), vpa)) {
printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
@@ -638,5 +626,8 @@ struct machdep_calls __initdata pSeries_md = {
.machine_check_exception = pSeries_machine_check_exception,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pseries_kexec_cpu_down,
+ .machine_kexec = default_machine_kexec,
+ .machine_kexec_prepare = default_machine_kexec_prepare,
+ .machine_crash_shutdown = default_machine_crash_shutdown,
#endif
};
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 25181c594d7..8e6b1ed1396 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -93,7 +93,7 @@ static int query_cpu_stopped(unsigned int pcpu)
return cpu_status;
}
-int pSeries_cpu_disable(void)
+static int pSeries_cpu_disable(void)
{
int cpu = smp_processor_id();
@@ -109,7 +109,7 @@ int pSeries_cpu_disable(void)
return 0;
}
-void pSeries_cpu_die(unsigned int cpu)
+static void pSeries_cpu_die(unsigned int cpu)
{
int tries;
int cpu_status;
@@ -282,7 +282,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
pcpu = get_hard_smp_processor_id(lcpu);
/* Fixup atomic count: it exited inside IRQ handler. */
- paca[lcpu].__current->thread_info->preempt_count = 0;
+ task_thread_info(paca[lcpu].__current)->preempt_count = 0;
/*
* If the RTAS start-cpu token does not exist then presume the
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 0377decc071..fd823c7c9ac 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -381,7 +381,7 @@ int xics_get_irq(struct pt_regs *regs)
#ifdef CONFIG_SMP
-irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
{
int cpu = smp_processor_id();
@@ -407,7 +407,7 @@ irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
}
#endif
-#ifdef CONFIG_DEBUGGER
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
&xics_ipi_message[cpu].value)) {
mb();
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 6b7efcfc352..4c2b356774e 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -4,5 +4,7 @@ obj-$(CONFIG_PPC_I8259) += i8259.o
obj-$(CONFIG_PPC_MPC106) += grackle.o
obj-$(CONFIG_BOOKE) += dcr.o
obj-$(CONFIG_40x) += dcr.o
-obj-$(CONFIG_U3_DART) += u3_iommu.o
+obj-$(CONFIG_U3_DART) += dart_iommu.o
obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
+obj-$(CONFIG_PPC_83xx) += ipic.o
+obj-$(CONFIG_FSL_SOC) += fsl_soc.o
diff --git a/arch/powerpc/sysdev/dart.h b/arch/powerpc/sysdev/dart.h
index 33ed9ed7fc1..c2d05763ccb 100644
--- a/arch/powerpc/sysdev/dart.h
+++ b/arch/powerpc/sysdev/dart.h
@@ -20,29 +20,44 @@
#define _POWERPC_SYSDEV_DART_H
-/* physical base of DART registers */
-#define DART_BASE 0xf8033000UL
-
/* Offset from base to control register */
-#define DARTCNTL 0
+#define DART_CNTL 0
+
/* Offset from base to exception register */
-#define DARTEXCP 0x10
+#define DART_EXCP_U3 0x10
/* Offset from base to TLB tag registers */
-#define DARTTAG 0x1000
+#define DART_TAGS_U3 0x1000
+/* U4 registers */
+#define DART_BASE_U4 0x10
+#define DART_SIZE_U4 0x20
+#define DART_EXCP_U4 0x30
+#define DART_TAGS_U4 0x1000
/* Control Register fields */
-/* base address of table (pfn) */
-#define DARTCNTL_BASE_MASK 0xfffff
-#define DARTCNTL_BASE_SHIFT 12
+/* U3 registers */
+#define DART_CNTL_U3_BASE_MASK 0xfffff
+#define DART_CNTL_U3_BASE_SHIFT 12
+#define DART_CNTL_U3_FLUSHTLB 0x400
+#define DART_CNTL_U3_ENABLE 0x200
+#define DART_CNTL_U3_SIZE_MASK 0x1ff
+#define DART_CNTL_U3_SIZE_SHIFT 0
+
+/* U4 registers */
+#define DART_BASE_U4_BASE_MASK 0xffffff
+#define DART_BASE_U4_BASE_SHIFT 0
+#define DART_CNTL_U4_FLUSHTLB 0x20000000
+#define DART_CNTL_U4_ENABLE 0x80000000
+#define DART_SIZE_U4_SIZE_MASK 0x1fff
+#define DART_SIZE_U4_SIZE_SHIFT 0
+
+#define DART_REG(r) (dart + ((r) >> 2))
+#define DART_IN(r) (in_be32(DART_REG(r)))
+#define DART_OUT(r,v) (out_be32(DART_REG(r), (v)))
-#define DARTCNTL_FLUSHTLB 0x400
-#define DARTCNTL_ENABLE 0x200
/* size of table in pages */
-#define DARTCNTL_SIZE_MASK 0x1ff
-#define DARTCNTL_SIZE_SHIFT 0
/* DART table fields */
diff --git a/arch/powerpc/sysdev/u3_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 5c1a26a6d00..977de9db875 100644
--- a/arch/powerpc/sysdev/u3_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -1,25 +1,27 @@
/*
- * arch/powerpc/sysdev/u3_iommu.c
+ * arch/powerpc/sysdev/dart_iommu.c
*
* Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
+ * Copyright (C) 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>,
+ * IBM Corporation
*
* Based on pSeries_iommu.c:
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
* Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
*
- * Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu.
+ * Dynamic DMA mapping support, Apple U3, U4 & IBM CPC925 "DART" iommu.
+ *
*
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@@ -57,21 +59,22 @@ static unsigned long dart_tablesize;
static u32 *dart_vbase;
/* Mapped base address for the dart */
-static unsigned int *dart;
+static unsigned int *__iomem dart;
/* Dummy val that entries are set to when unused */
static unsigned int dart_emptyval;
-static struct iommu_table iommu_table_u3;
-static int iommu_table_u3_inited;
+static struct iommu_table iommu_table_dart;
+static int iommu_table_dart_inited;
static int dart_dirty;
+static int dart_is_u4;
#define DBG(...)
static inline void dart_tlb_invalidate_all(void)
{
unsigned long l = 0;
- unsigned int reg;
+ unsigned int reg, inv_bit;
unsigned long limit;
DBG("dart: flush\n");
@@ -81,29 +84,28 @@ static inline void dart_tlb_invalidate_all(void)
*
* Gotcha: Sometimes, the DART won't detect that the bit gets
* set. If so, clear it and set it again.
- */
+ */
limit = 0;
+ inv_bit = dart_is_u4 ? DART_CNTL_U4_FLUSHTLB : DART_CNTL_U3_FLUSHTLB;
retry:
- reg = in_be32((unsigned int *)dart+DARTCNTL);
- reg |= DARTCNTL_FLUSHTLB;
- out_be32((unsigned int *)dart+DARTCNTL, reg);
-
l = 0;
- while ((in_be32((unsigned int *)dart+DARTCNTL) & DARTCNTL_FLUSHTLB) &&
- l < (1L<<limit)) {
+ reg = DART_IN(DART_CNTL);
+ reg |= inv_bit;
+ DART_OUT(DART_CNTL, reg);
+
+ while ((DART_IN(DART_CNTL) & inv_bit) && l < (1L << limit))
l++;
- }
- if (l == (1L<<limit)) {
+ if (l == (1L << limit)) {
if (limit < 4) {
limit++;
- reg = in_be32((unsigned int *)dart+DARTCNTL);
- reg &= ~DARTCNTL_FLUSHTLB;
- out_be32((unsigned int *)dart+DARTCNTL, reg);
+ reg = DART_IN(DART_CNTL);
+ reg &= ~inv_bit;
+ DART_OUT(DART_CNTL, reg);
goto retry;
} else
- panic("U3-DART: TLB did not flush after waiting a long "
+ panic("DART: TLB did not flush after waiting a long "
"time. Buggy U3 ?");
}
}
@@ -115,7 +117,7 @@ static void dart_flush(struct iommu_table *tbl)
dart_dirty = 0;
}
-static void dart_build(struct iommu_table *tbl, long index,
+static void dart_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction)
{
@@ -128,7 +130,7 @@ static void dart_build(struct iommu_table *tbl, long index,
npages <<= DART_PAGE_FACTOR;
dp = ((unsigned int*)tbl->it_base) + index;
-
+
/* On U3, all memory is contigous, so we can move this
* out of the loop.
*/
@@ -137,7 +139,6 @@ static void dart_build(struct iommu_table *tbl, long index,
*(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK);
- rpn++;
uaddr += DART_PAGE_SIZE;
}
@@ -148,7 +149,7 @@ static void dart_build(struct iommu_table *tbl, long index,
static void dart_free(struct iommu_table *tbl, long index, long npages)
{
unsigned int *dp;
-
+
/* We don't worry about flushing the TLB cache. The only drawback of
* not doing it is that we won't catch buggy device drivers doing
* bad DMAs, but then no 32-bit architecture ever does either.
@@ -160,7 +161,7 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
npages <<= DART_PAGE_FACTOR;
dp = ((unsigned int *)tbl->it_base) + index;
-
+
while (npages--)
*(dp++) = dart_emptyval;
}
@@ -168,20 +169,25 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
static int dart_init(struct device_node *dart_node)
{
- unsigned int regword;
unsigned int i;
- unsigned long tmp;
+ unsigned long tmp, base, size;
+ struct resource r;
if (dart_tablebase == 0 || dart_tablesize == 0) {
- printk(KERN_INFO "U3-DART: table not allocated, using direct DMA\n");
+ printk(KERN_INFO "DART: table not allocated, using "
+ "direct DMA\n");
return -ENODEV;
}
+ if (of_address_to_resource(dart_node, 0, &r))
+ panic("DART: can't get register base ! ");
+
/* Make sure nothing from the DART range remains in the CPU cache
* from a previous mapping that existed before the kernel took
* over
*/
- flush_dcache_phys_range(dart_tablebase, dart_tablebase + dart_tablesize);
+ flush_dcache_phys_range(dart_tablebase,
+ dart_tablebase + dart_tablesize);
/* Allocate a spare page to map all invalid DART pages. We need to do
* that to work around what looks like a problem with the HT bridge
@@ -189,21 +195,16 @@ static int dart_init(struct device_node *dart_node)
*/
tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
if (!tmp)
- panic("U3-DART: Cannot allocate spare page!");
- dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & DARTMAP_RPNMASK);
+ panic("DART: Cannot allocate spare page!");
+ dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
+ DARTMAP_RPNMASK);
- /* Map in DART registers. FIXME: Use device node to get base address */
- dart = ioremap(DART_BASE, 0x7000);
+ /* Map in DART registers */
+ dart = ioremap(r.start, r.end - r.start + 1);
if (dart == NULL)
- panic("U3-DART: Cannot map registers!");
+ panic("DART: Cannot map registers!");
- /* Set initial control register contents: table base,
- * table size and enable bit
- */
- regword = DARTCNTL_ENABLE |
- ((dart_tablebase >> DART_PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) |
- (((dart_tablesize >> DART_PAGE_SHIFT) & DARTCNTL_SIZE_MASK)
- << DARTCNTL_SIZE_SHIFT);
+ /* Map in DART table */
dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize);
/* Fill initial table */
@@ -211,36 +212,50 @@ static int dart_init(struct device_node *dart_node)
dart_vbase[i] = dart_emptyval;
/* Initialize DART with table base and enable it. */
- out_be32((unsigned int *)dart, regword);
+ base = dart_tablebase >> DART_PAGE_SHIFT;
+ size = dart_tablesize >> DART_PAGE_SHIFT;
+ if (dart_is_u4) {
+ size &= DART_SIZE_U4_SIZE_MASK;
+ DART_OUT(DART_BASE_U4, base);
+ DART_OUT(DART_SIZE_U4, size);
+ DART_OUT(DART_CNTL, DART_CNTL_U4_ENABLE);
+ } else {
+ size &= DART_CNTL_U3_SIZE_MASK;
+ DART_OUT(DART_CNTL,
+ DART_CNTL_U3_ENABLE |
+ (base << DART_CNTL_U3_BASE_SHIFT) |
+ (size << DART_CNTL_U3_SIZE_SHIFT));
+ }
/* Invalidate DART to get rid of possible stale TLBs */
dart_tlb_invalidate_all();
- printk(KERN_INFO "U3/CPC925 DART IOMMU initialized\n");
+ printk(KERN_INFO "DART IOMMU initialized for %s type chipset\n",
+ dart_is_u4 ? "U4" : "U3");
return 0;
}
-static void iommu_table_u3_setup(void)
+static void iommu_table_dart_setup(void)
{
- iommu_table_u3.it_busno = 0;
- iommu_table_u3.it_offset = 0;
+ iommu_table_dart.it_busno = 0;
+ iommu_table_dart.it_offset = 0;
/* it_size is in number of entries */
- iommu_table_u3.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR;
+ iommu_table_dart.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR;
/* Initialize the common IOMMU code */
- iommu_table_u3.it_base = (unsigned long)dart_vbase;
- iommu_table_u3.it_index = 0;
- iommu_table_u3.it_blocksize = 1;
- iommu_init_table(&iommu_table_u3);
+ iommu_table_dart.it_base = (unsigned long)dart_vbase;
+ iommu_table_dart.it_index = 0;
+ iommu_table_dart.it_blocksize = 1;
+ iommu_init_table(&iommu_table_dart);
/* Reserve the last page of the DART to avoid possible prefetch
* past the DART mapped area
*/
- set_bit(iommu_table_u3.it_size - 1, iommu_table_u3.it_map);
+ set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map);
}
-static void iommu_dev_setup_u3(struct pci_dev *dev)
+static void iommu_dev_setup_dart(struct pci_dev *dev)
{
struct device_node *dn;
@@ -254,35 +269,39 @@ static void iommu_dev_setup_u3(struct pci_dev *dev)
dn = pci_device_to_OF_node(dev);
if (dn)
- PCI_DN(dn)->iommu_table = &iommu_table_u3;
+ PCI_DN(dn)->iommu_table = &iommu_table_dart;
}
-static void iommu_bus_setup_u3(struct pci_bus *bus)
+static void iommu_bus_setup_dart(struct pci_bus *bus)
{
struct device_node *dn;
- if (!iommu_table_u3_inited) {
- iommu_table_u3_inited = 1;
- iommu_table_u3_setup();
+ if (!iommu_table_dart_inited) {
+ iommu_table_dart_inited = 1;
+ iommu_table_dart_setup();
}
dn = pci_bus_to_OF_node(bus);
if (dn)
- PCI_DN(dn)->iommu_table = &iommu_table_u3;
+ PCI_DN(dn)->iommu_table = &iommu_table_dart;
}
static void iommu_dev_setup_null(struct pci_dev *dev) { }
static void iommu_bus_setup_null(struct pci_bus *bus) { }
-void iommu_init_early_u3(void)
+void iommu_init_early_dart(void)
{
struct device_node *dn;
/* Find the DART in the device-tree */
dn = of_find_compatible_node(NULL, "dart", "u3-dart");
- if (dn == NULL)
- return;
+ if (dn == NULL) {
+ dn = of_find_compatible_node(NULL, "dart", "u4-dart");
+ if (dn == NULL)
+ goto bail;
+ dart_is_u4 = 1;
+ }
/* Setup low level TCE operations for the core IOMMU code */
ppc_md.tce_build = dart_build;
@@ -290,24 +309,27 @@ void iommu_init_early_u3(void)
ppc_md.tce_flush = dart_flush;
/* Initialize the DART HW */
- if (dart_init(dn)) {
- /* If init failed, use direct iommu and null setup functions */
- ppc_md.iommu_dev_setup = iommu_dev_setup_null;
- ppc_md.iommu_bus_setup = iommu_bus_setup_null;
-
- /* Setup pci_dma ops */
- pci_direct_iommu_init();
- } else {
- ppc_md.iommu_dev_setup = iommu_dev_setup_u3;
- ppc_md.iommu_bus_setup = iommu_bus_setup_u3;
+ if (dart_init(dn) == 0) {
+ ppc_md.iommu_dev_setup = iommu_dev_setup_dart;
+ ppc_md.iommu_bus_setup = iommu_bus_setup_dart;
/* Setup pci_dma ops */
pci_iommu_init();
+
+ return;
}
+
+ bail:
+ /* If init failed, use direct iommu and null setup functions */
+ ppc_md.iommu_dev_setup = iommu_dev_setup_null;
+ ppc_md.iommu_bus_setup = iommu_bus_setup_null;
+
+ /* Setup pci_dma ops */
+ pci_direct_iommu_init();
}
-void __init alloc_u3_dart_table(void)
+void __init alloc_dart_table(void)
{
/* Only reserve DART space if machine has more than 2GB of RAM
* or if requested with iommu=on on cmdline.
@@ -323,5 +345,5 @@ void __init alloc_u3_dart_table(void)
dart_tablebase = (unsigned long)
abs_to_virt(lmb_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
- printk(KERN_INFO "U3-DART allocated at: %lx\n", dart_tablebase);
+ printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
}
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
new file mode 100644
index 00000000000..064c9de4773
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -0,0 +1,317 @@
+/*
+ * FSL SoC setup code
+ *
+ * Maintained by Kumar Gala (see MAINTAINERS for contact information)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/major.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <sysdev/fsl_soc.h>
+#include <mm/mmu_decl.h>
+
+static phys_addr_t immrbase = -1;
+
+phys_addr_t get_immrbase(void)
+{
+ struct device_node *soc;
+
+ if (immrbase != -1)
+ return immrbase;
+
+ soc = of_find_node_by_type(NULL, "soc");
+ if (soc != 0) {
+ unsigned int size;
+ void *prop = get_property(soc, "reg", &size);
+ immrbase = of_translate_address(soc, prop);
+ of_node_put(soc);
+ };
+
+ return immrbase;
+}
+EXPORT_SYMBOL(get_immrbase);
+
+static const char * gfar_tx_intr = "tx";
+static const char * gfar_rx_intr = "rx";
+static const char * gfar_err_intr = "error";
+
+static int __init gfar_of_init(void)
+{
+ struct device_node *np;
+ unsigned int i;
+ struct platform_device *mdio_dev, *gfar_dev;
+ struct resource res;
+ int ret;
+
+ for (np = NULL, i = 0; (np = of_find_compatible_node(np, "mdio", "gianfar")) != NULL; i++) {
+ int k;
+ struct device_node *child = NULL;
+ struct gianfar_mdio_data mdio_data;
+
+ memset(&res, 0, sizeof(res));
+ memset(&mdio_data, 0, sizeof(mdio_data));
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ goto mdio_err;
+
+ mdio_dev = platform_device_register_simple("fsl-gianfar_mdio", res.start, &res, 1);
+ if (IS_ERR(mdio_dev)) {
+ ret = PTR_ERR(mdio_dev);
+ goto mdio_err;
+ }
+
+ for (k = 0; k < 32; k++)
+ mdio_data.irq[k] = -1;
+
+ while ((child = of_get_next_child(np, child)) != NULL) {
+ if (child->n_intrs) {
+ u32 *id = (u32 *) get_property(child, "reg", NULL);
+ mdio_data.irq[*id] = child->intrs[0].line;
+ }
+ }
+
+ ret = platform_device_add_data(mdio_dev, &mdio_data, sizeof(struct gianfar_mdio_data));
+ if (ret)
+ goto mdio_unreg;
+ }
+
+ for (np = NULL, i = 0; (np = of_find_compatible_node(np, "network", "gianfar")) != NULL; i++) {
+ struct resource r[4];
+ struct device_node *phy, *mdio;
+ struct gianfar_platform_data gfar_data;
+ unsigned int *id;
+ char *model;
+ void *mac_addr;
+ phandle *ph;
+
+ memset(r, 0, sizeof(r));
+ memset(&gfar_data, 0, sizeof(gfar_data));
+
+ ret = of_address_to_resource(np, 0, &r[0]);
+ if (ret)
+ goto gfar_err;
+
+ r[1].start = np->intrs[0].line;
+ r[1].end = np->intrs[0].line;
+ r[1].flags = IORESOURCE_IRQ;
+
+ model = get_property(np, "model", NULL);
+
+ /* If we aren't the FEC we have multiple interrupts */
+ if (model && strcasecmp(model, "FEC")) {
+ r[1].name = gfar_tx_intr;
+
+ r[2].name = gfar_rx_intr;
+ r[2].start = np->intrs[1].line;
+ r[2].end = np->intrs[1].line;
+ r[2].flags = IORESOURCE_IRQ;
+
+ r[3].name = gfar_err_intr;
+ r[3].start = np->intrs[2].line;
+ r[3].end = np->intrs[2].line;
+ r[3].flags = IORESOURCE_IRQ;
+ }
+
+ gfar_dev = platform_device_register_simple("fsl-gianfar", i, &r[0], np->n_intrs + 1);
+
+ if (IS_ERR(gfar_dev)) {
+ ret = PTR_ERR(gfar_dev);
+ goto gfar_err;
+ }
+
+ mac_addr = get_property(np, "address", NULL);
+ memcpy(gfar_data.mac_addr, mac_addr, 6);
+
+ if (model && !strcasecmp(model, "TSEC"))
+ gfar_data.device_flags =
+ FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+ if (model && !strcasecmp(model, "eTSEC"))
+ gfar_data.device_flags =
+ FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+ FSL_GIANFAR_DEV_HAS_CSUM |
+ FSL_GIANFAR_DEV_HAS_VLAN |
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
+
+ ph = (phandle *) get_property(np, "phy-handle", NULL);
+ phy = of_find_node_by_phandle(*ph);
+
+ if (phy == NULL) {
+ ret = -ENODEV;
+ goto gfar_unreg;
+ }
+
+ mdio = of_get_parent(phy);
+
+ id = (u32 *) get_property(phy, "reg", NULL);
+ ret = of_address_to_resource(mdio, 0, &res);
+ if (ret) {
+ of_node_put(phy);
+ of_node_put(mdio);
+ goto gfar_unreg;
+ }
+
+ gfar_data.phy_id = *id;
+ gfar_data.bus_id = res.start;
+
+ of_node_put(phy);
+ of_node_put(mdio);
+
+ ret = platform_device_add_data(gfar_dev, &gfar_data, sizeof(struct gianfar_platform_data));
+ if (ret)
+ goto gfar_unreg;
+ }
+
+ return 0;
+
+mdio_unreg:
+ platform_device_unregister(mdio_dev);
+mdio_err:
+ return ret;
+
+gfar_unreg:
+ platform_device_unregister(gfar_dev);
+gfar_err:
+ return ret;
+}
+arch_initcall(gfar_of_init);
+
+static int __init fsl_i2c_of_init(void)
+{
+ struct device_node *np;
+ unsigned int i;
+ struct platform_device *i2c_dev;
+ int ret;
+
+ for (np = NULL, i = 0; (np = of_find_compatible_node(np, "i2c", "fsl-i2c")) != NULL; i++) {
+ struct resource r[2];
+ struct fsl_i2c_platform_data i2c_data;
+ unsigned char * flags = NULL;
+
+ memset(&r, 0, sizeof(r));
+ memset(&i2c_data, 0, sizeof(i2c_data));
+
+ ret = of_address_to_resource(np, 0, &r[0]);
+ if (ret)
+ goto i2c_err;
+
+ r[1].start = np->intrs[0].line;
+ r[1].end = np->intrs[0].line;
+ r[1].flags = IORESOURCE_IRQ;
+
+ i2c_dev = platform_device_register_simple("fsl-i2c", i, r, 2);
+ if (IS_ERR(i2c_dev)) {
+ ret = PTR_ERR(i2c_dev);
+ goto i2c_err;
+ }
+
+ i2c_data.device_flags = 0;
+ flags = get_property(np, "dfsrr", NULL);
+ if (flags)
+ i2c_data.device_flags |= FSL_I2C_DEV_SEPARATE_DFSRR;
+
+ flags = get_property(np, "fsl5200-clocking", NULL);
+ if (flags)
+ i2c_data.device_flags |= FSL_I2C_DEV_CLOCK_5200;
+
+ ret = platform_device_add_data(i2c_dev, &i2c_data, sizeof(struct fsl_i2c_platform_data));
+ if (ret)
+ goto i2c_unreg;
+ }
+
+ return 0;
+
+i2c_unreg:
+ platform_device_unregister(i2c_dev);
+i2c_err:
+ return ret;
+}
+arch_initcall(fsl_i2c_of_init);
+
+#ifdef CONFIG_PPC_83xx
+static int __init mpc83xx_wdt_init(void)
+{
+ struct resource r;
+ struct device_node *soc, *np;
+ struct platform_device *dev;
+ unsigned int *freq;
+ int ret;
+
+ np = of_find_compatible_node(NULL, "watchdog", "mpc83xx_wdt");
+
+ if (!np) {
+ ret = -ENODEV;
+ goto mpc83xx_wdt_nodev;
+ }
+
+ soc = of_find_node_by_type(NULL, "soc");
+
+ if (!soc) {
+ ret = -ENODEV;
+ goto mpc83xx_wdt_nosoc;
+ }
+
+ freq = (unsigned int *)get_property(soc, "bus-frequency", NULL);
+ if (!freq) {
+ ret = -ENODEV;
+ goto mpc83xx_wdt_err;
+ }
+
+ memset(&r, 0, sizeof(r));
+
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret)
+ goto mpc83xx_wdt_err;
+
+ dev = platform_device_register_simple("mpc83xx_wdt", 0, &r, 1);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto mpc83xx_wdt_err;
+ }
+
+ ret = platform_device_add_data(dev, freq, sizeof(int));
+ if (ret)
+ goto mpc83xx_wdt_unreg;
+
+ of_node_put(soc);
+ of_node_put(np);
+
+ return 0;
+
+mpc83xx_wdt_unreg:
+ platform_device_unregister(dev);
+mpc83xx_wdt_err:
+ of_node_put(soc);
+mpc83xx_wdt_nosoc:
+ of_node_put(np);
+mpc83xx_wdt_nodev:
+ return ret;
+}
+arch_initcall(mpc83xx_wdt_init);
+#endif
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
new file mode 100644
index 00000000000..c433d3f39ed
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -0,0 +1,8 @@
+#ifndef __PPC_FSL_SOC_H
+#define __PPC_FSL_SOC_H
+#ifdef __KERNEL__
+
+extern phys_addr_t get_immrbase(void);
+
+#endif
+#endif
diff --git a/arch/ppc/syslib/ipic.c b/arch/powerpc/sysdev/ipic.c
index 8f01e0f1d84..8f01e0f1d84 100644
--- a/arch/ppc/syslib/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
diff --git a/arch/ppc/syslib/ipic.h b/arch/powerpc/sysdev/ipic.h
index a7ce7da8785..a7ce7da8785 100644
--- a/arch/ppc/syslib/ipic.h
+++ b/arch/powerpc/sysdev/ipic.h
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 58d1cc2023c..4f26304d026 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -13,6 +13,9 @@
*/
#undef DEBUG
+#undef DEBUG_IPI
+#undef DEBUG_IRQ
+#undef DEBUG_LOW
#include <linux/config.h>
#include <linux/types.h>
@@ -45,7 +48,11 @@ static struct mpic *mpic_primary;
static DEFINE_SPINLOCK(mpic_lock);
#ifdef CONFIG_PPC32 /* XXX for now */
-#define distribute_irqs CONFIG_IRQ_ALL_CPUS
+#ifdef CONFIG_IRQ_ALL_CPUS
+#define distribute_irqs (1)
+#else
+#define distribute_irqs (0)
+#endif
#endif
/*
@@ -164,70 +171,129 @@ static void __init mpic_test_broken_ipi(struct mpic *mpic)
/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
* to force the edge setting on the MPIC and do the ack workaround.
*/
-static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source_no)
+static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
{
- if (source_no >= 128 || !mpic->fixups)
+ if (source >= 128 || !mpic->fixups)
return 0;
- return mpic->fixups[source_no].base != NULL;
+ return mpic->fixups[source].base != NULL;
}
-static inline void mpic_apic_end_irq(struct mpic *mpic, unsigned int source_no)
+
+static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
{
- struct mpic_irq_fixup *fixup = &mpic->fixups[source_no];
- u32 tmp;
+ struct mpic_irq_fixup *fixup = &mpic->fixups[source];
- spin_lock(&mpic->fixup_lock);
- writeb(0x11 + 2 * fixup->irq, fixup->base);
- tmp = readl(fixup->base + 2);
- writel(tmp | 0x80000000ul, fixup->base + 2);
- /* config writes shouldn't be posted but let's be safe ... */
- (void)readl(fixup->base + 2);
- spin_unlock(&mpic->fixup_lock);
+ if (fixup->applebase) {
+ unsigned int soff = (fixup->index >> 3) & ~3;
+ unsigned int mask = 1U << (fixup->index & 0x1f);
+ writel(mask, fixup->applebase + soff);
+ } else {
+ spin_lock(&mpic->fixup_lock);
+ writeb(0x11 + 2 * fixup->index, fixup->base + 2);
+ writel(fixup->data, fixup->base + 4);
+ spin_unlock(&mpic->fixup_lock);
+ }
}
+static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
+ unsigned int irqflags)
+{
+ struct mpic_irq_fixup *fixup = &mpic->fixups[source];
+ unsigned long flags;
+ u32 tmp;
-static void __init mpic_amd8111_read_irq(struct mpic *mpic, u8 __iomem *devbase)
+ if (fixup->base == NULL)
+ return;
+
+ DBG("startup_ht_interrupt(%u, %u) index: %d\n",
+ source, irqflags, fixup->index);
+ spin_lock_irqsave(&mpic->fixup_lock, flags);
+ /* Enable and configure */
+ writeb(0x10 + 2 * fixup->index, fixup->base + 2);
+ tmp = readl(fixup->base + 4);
+ tmp &= ~(0x23U);
+ if (irqflags & IRQ_LEVEL)
+ tmp |= 0x22;
+ writel(tmp, fixup->base + 4);
+ spin_unlock_irqrestore(&mpic->fixup_lock, flags);
+}
+
+static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
+ unsigned int irqflags)
{
- int i, irq;
+ struct mpic_irq_fixup *fixup = &mpic->fixups[source];
+ unsigned long flags;
u32 tmp;
- printk(KERN_INFO "mpic: - Workarounds on AMD 8111 @ %p\n", devbase);
+ if (fixup->base == NULL)
+ return;
- for (i=0; i < 24; i++) {
- writeb(0x10 + 2*i, devbase + 0xf2);
- tmp = readl(devbase + 0xf4);
- if ((tmp & 0x1) || !(tmp & 0x20))
- continue;
- irq = (tmp >> 16) & 0xff;
- mpic->fixups[irq].irq = i;
- mpic->fixups[irq].base = devbase + 0xf2;
- }
+ DBG("shutdown_ht_interrupt(%u, %u)\n", source, irqflags);
+
+ /* Disable */
+ spin_lock_irqsave(&mpic->fixup_lock, flags);
+ writeb(0x10 + 2 * fixup->index, fixup->base + 2);
+ tmp = readl(fixup->base + 4);
+ tmp &= ~1U;
+ writel(tmp, fixup->base + 4);
+ spin_unlock_irqrestore(&mpic->fixup_lock, flags);
}
-
-static void __init mpic_amd8131_read_irq(struct mpic *mpic, u8 __iomem *devbase)
+
+static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
+ unsigned int devfn, u32 vdid)
{
- int i, irq;
+ int i, irq, n;
+ u8 __iomem *base;
u32 tmp;
+ u8 pos;
+
+ for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
+ pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
+ u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
+ if (id == PCI_CAP_ID_HT_IRQCONF) {
+ id = readb(devbase + pos + 3);
+ if (id == 0x80)
+ break;
+ }
+ }
+ if (pos == 0)
+ return;
- printk(KERN_INFO "mpic: - Workarounds on AMD 8131 @ %p\n", devbase);
+ base = devbase + pos;
+ writeb(0x01, base + 2);
+ n = (readl(base + 4) >> 16) & 0xff;
- for (i=0; i < 4; i++) {
- writeb(0x10 + 2*i, devbase + 0xba);
- tmp = readl(devbase + 0xbc);
- if ((tmp & 0x1) || !(tmp & 0x20))
- continue;
+ printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x"
+ " has %d irqs\n",
+ devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
+
+ for (i = 0; i <= n; i++) {
+ writeb(0x10 + 2 * i, base + 2);
+ tmp = readl(base + 4);
irq = (tmp >> 16) & 0xff;
- mpic->fixups[irq].irq = i;
- mpic->fixups[irq].base = devbase + 0xba;
+ DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
+ /* mask it , will be unmasked later */
+ tmp |= 0x1;
+ writel(tmp, base + 4);
+ mpic->fixups[irq].index = i;
+ mpic->fixups[irq].base = base;
+ /* Apple HT PIC has a non-standard way of doing EOIs */
+ if ((vdid & 0xffff) == 0x106b)
+ mpic->fixups[irq].applebase = devbase + 0x60;
+ else
+ mpic->fixups[irq].applebase = NULL;
+ writeb(0x11 + 2 * i, base + 2);
+ mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
}
}
-static void __init mpic_scan_ioapics(struct mpic *mpic)
+
+static void __init mpic_scan_ht_pics(struct mpic *mpic)
{
unsigned int devfn;
u8 __iomem *cfgspace;
- printk(KERN_INFO "mpic: Setting up IO-APICs workarounds for U3\n");
+ printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
/* Allocate fixups array */
mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
@@ -237,21 +303,20 @@ static void __init mpic_scan_ioapics(struct mpic *mpic)
/* Init spinlock */
spin_lock_init(&mpic->fixup_lock);
- /* Map u3 config space. We assume all IO-APICs are on the primary bus
- * and slot will never be above "0xf" so we only need to map 32k
+ /* Map U3 config space. We assume all IO-APICs are on the primary bus
+ * so we only need to map 64kB.
*/
- cfgspace = (unsigned char __iomem *)ioremap(0xf2000000, 0x8000);
+ cfgspace = ioremap(0xf2000000, 0x10000);
BUG_ON(cfgspace == NULL);
- /* Now we scan all slots. We do a very quick scan, we read the header type,
- * vendor ID and device ID only, that's plenty enough
+ /* Now we scan all slots. We do a very quick scan, we read the header
+ * type, vendor ID and device ID only, that's plenty enough
*/
- for (devfn = 0; devfn < PCI_DEVFN(0x10,0); devfn ++) {
+ for (devfn = 0; devfn < 0x100; devfn++) {
u8 __iomem *devbase = cfgspace + (devfn << 8);
u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
u32 l = readl(devbase + PCI_VENDOR_ID);
- u16 vendor_id, device_id;
- int multifunc = 0;
+ u16 s;
DBG("devfn %x, l: %x\n", devfn, l);
@@ -259,22 +324,16 @@ static void __init mpic_scan_ioapics(struct mpic *mpic)
if (l == 0xffffffff || l == 0x00000000 ||
l == 0x0000ffff || l == 0xffff0000)
goto next;
+ /* Check if is supports capability lists */
+ s = readw(devbase + PCI_STATUS);
+ if (!(s & PCI_STATUS_CAP_LIST))
+ goto next;
+
+ mpic_scan_ht_pic(mpic, devbase, devfn, l);
- /* Check if it's a multifunction device (only really used
- * to function 0 though
- */
- multifunc = !!(hdr_type & 0x80);
- vendor_id = l & 0xffff;
- device_id = (l >> 16) & 0xffff;
-
- /* If a known device, go to fixup setup code */
- if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7460)
- mpic_amd8111_read_irq(mpic, devbase);
- if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7450)
- mpic_amd8131_read_irq(mpic, devbase);
next:
/* next device, if function 0 */
- if ((PCI_FUNC(devfn) == 0) && !multifunc)
+ if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
devfn += 7;
}
}
@@ -371,6 +430,31 @@ static void mpic_enable_irq(unsigned int irq)
break;
}
} while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+ if (mpic->flags & MPIC_BROKEN_U3) {
+ unsigned int src = irq - mpic->irq_offset;
+ if (mpic_is_ht_interrupt(mpic, src) &&
+ (irq_desc[irq].status & IRQ_LEVEL))
+ mpic_ht_end_irq(mpic, src);
+ }
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+}
+
+static unsigned int mpic_startup_irq(unsigned int irq)
+{
+#ifdef CONFIG_MPIC_BROKEN_U3
+ struct mpic *mpic = mpic_from_irq(irq);
+ unsigned int src = irq - mpic->irq_offset;
+
+ if (mpic_is_ht_interrupt(mpic, src))
+ mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
+
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+ mpic_enable_irq(irq);
+
+ return 0;
}
static void mpic_disable_irq(unsigned int irq)
@@ -394,12 +478,27 @@ static void mpic_disable_irq(unsigned int irq)
} while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
}
+static void mpic_shutdown_irq(unsigned int irq)
+{
+#ifdef CONFIG_MPIC_BROKEN_U3
+ struct mpic *mpic = mpic_from_irq(irq);
+ unsigned int src = irq - mpic->irq_offset;
+
+ if (mpic_is_ht_interrupt(mpic, src))
+ mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
+
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+ mpic_disable_irq(irq);
+}
+
static void mpic_end_irq(unsigned int irq)
{
struct mpic *mpic = mpic_from_irq(irq);
+#ifdef DEBUG_IRQ
DBG("%s: end_irq: %d\n", mpic->name, irq);
-
+#endif
/* We always EOI on end_irq() even for edge interrupts since that
* should only lower the priority, the MPIC should have properly
* latched another edge interrupt coming in anyway
@@ -408,8 +507,9 @@ static void mpic_end_irq(unsigned int irq)
#ifdef CONFIG_MPIC_BROKEN_U3
if (mpic->flags & MPIC_BROKEN_U3) {
unsigned int src = irq - mpic->irq_offset;
- if (mpic_is_ht_interrupt(mpic, src))
- mpic_apic_end_irq(mpic, src);
+ if (mpic_is_ht_interrupt(mpic, src) &&
+ (irq_desc[irq].status & IRQ_LEVEL))
+ mpic_ht_end_irq(mpic, src);
}
#endif /* CONFIG_MPIC_BROKEN_U3 */
@@ -490,6 +590,8 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
mpic->name = name;
mpic->hc_irq.typename = name;
+ mpic->hc_irq.startup = mpic_startup_irq;
+ mpic->hc_irq.shutdown = mpic_shutdown_irq;
mpic->hc_irq.enable = mpic_enable_irq;
mpic->hc_irq.disable = mpic_disable_irq;
mpic->hc_irq.end = mpic_end_irq;
@@ -658,10 +760,10 @@ void __init mpic_init(struct mpic *mpic)
mpic->irq_count = mpic->num_sources;
#ifdef CONFIG_MPIC_BROKEN_U3
- /* Do the ioapic fixups on U3 broken mpic */
+ /* Do the HT PIC fixups on U3 broken mpic */
DBG("MPIC flags: %x\n", mpic->flags);
if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
- mpic_scan_ioapics(mpic);
+ mpic_scan_ht_pics(mpic);
#endif /* CONFIG_MPIC_BROKEN_U3 */
for (i = 0; i < mpic->num_sources; i++) {
@@ -848,7 +950,9 @@ void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
BUG_ON(mpic == NULL);
+#ifdef DEBUG_IPI
DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
+#endif
mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
@@ -859,19 +963,28 @@ int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
u32 irq;
irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
+#ifdef DEBUG_LOW
DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
-
+#endif
if (mpic->cascade && irq == mpic->cascade_vec) {
+#ifdef DEBUG_LOW
DBG("%s: cascading ...\n", mpic->name);
+#endif
irq = mpic->cascade(regs, mpic->cascade_data);
mpic_eoi(mpic);
return irq;
}
if (unlikely(irq == MPIC_VEC_SPURRIOUS))
return -1;
- if (irq < MPIC_VEC_IPI_0)
+ if (irq < MPIC_VEC_IPI_0) {
+#ifdef DEBUG_IRQ
+ DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset);
+#endif
return irq + mpic->irq_offset;
+ }
+#ifdef DEBUG_IPI
DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
+#endif
return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
}
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index b20312e5ed2..109d874ecfb 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -3,9 +3,5 @@
ifdef CONFIG_PPC64
EXTRA_CFLAGS += -mno-minimal-toc
endif
-
-obj-$(CONFIG_8xx) += start_8xx.o
-obj-$(CONFIG_6xx) += start_32.o
-obj-$(CONFIG_4xx) += start_32.o
-obj-$(CONFIG_PPC64) += start_64.o
-obj-y += xmon.o ppc-dis.o ppc-opc.o setjmp.o nonstdio.o
+obj-y += xmon.o ppc-dis.o ppc-opc.o setjmp.o start.o \
+ nonstdio.o
diff --git a/arch/powerpc/xmon/start_64.c b/arch/powerpc/xmon/start.c
index 712552c4f24..712552c4f24 100644
--- a/arch/powerpc/xmon/start_64.c
+++ b/arch/powerpc/xmon/start.c
diff --git a/arch/powerpc/xmon/start_32.c b/arch/powerpc/xmon/start_32.c
deleted file mode 100644
index c2464df4217..00000000000
--- a/arch/powerpc/xmon/start_32.c
+++ /dev/null
@@ -1,441 +0,0 @@
-/*
- * Copyright (C) 1996 Paul Mackerras.
- */
-#include <linux/config.h>
-#include <linux/string.h>
-#include <asm/machdep.h>
-#include <asm/io.h>
-#include <asm/page.h>
-#include <linux/adb.h>
-#include <linux/pmu.h>
-#include <linux/cuda.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/bitops.h>
-#include <asm/xmon.h>
-#include <asm/prom.h>
-#include <asm/bootx.h>
-#include <asm/machdep.h>
-#include <asm/errno.h>
-#include <asm/pmac_feature.h>
-#include <asm/processor.h>
-#include <asm/delay.h>
-#include <asm/btext.h>
-#include <asm/time.h>
-#include "nonstdio.h"
-
-static volatile unsigned char __iomem *sccc, *sccd;
-unsigned int TXRDY, RXRDY, DLAB;
-
-static int use_serial;
-static int use_screen;
-static int via_modem;
-static int xmon_use_sccb;
-static struct device_node *channel_node;
-
-void buf_access(void)
-{
- if (DLAB)
- sccd[3] &= ~DLAB; /* reset DLAB */
-}
-
-extern int adb_init(void);
-
-#ifdef CONFIG_PPC_CHRP
-/*
- * This looks in the "ranges" property for the primary PCI host bridge
- * to find the physical address of the start of PCI/ISA I/O space.
- * It is basically a cut-down version of pci_process_bridge_OF_ranges.
- */
-static unsigned long chrp_find_phys_io_base(void)
-{
- struct device_node *node;
- unsigned int *ranges;
- unsigned long base = CHRP_ISA_IO_BASE;
- int rlen = 0;
- int np;
-
- node = find_devices("isa");
- if (node != NULL) {
- node = node->parent;
- if (node == NULL || node->type == NULL
- || strcmp(node->type, "pci") != 0)
- node = NULL;
- }
- if (node == NULL)
- node = find_devices("pci");
- if (node == NULL)
- return base;
-
- ranges = (unsigned int *) get_property(node, "ranges", &rlen);
- np = prom_n_addr_cells(node) + 5;
- while ((rlen -= np * sizeof(unsigned int)) >= 0) {
- if ((ranges[0] >> 24) == 1 && ranges[2] == 0) {
- /* I/O space starting at 0, grab the phys base */
- base = ranges[np - 3];
- break;
- }
- ranges += np;
- }
- return base;
-}
-#endif /* CONFIG_PPC_CHRP */
-
-void xmon_map_scc(void)
-{
-#ifdef CONFIG_PPC_MULTIPLATFORM
- volatile unsigned char __iomem *base;
-
- if (_machine == _MACH_Pmac) {
- struct device_node *np;
- unsigned long addr;
-#ifdef CONFIG_BOOTX_TEXT
- if (!use_screen && !use_serial
- && !machine_is_compatible("iMac")) {
- /* see if there is a keyboard in the device tree
- with a parent of type "adb" */
- for (np = find_devices("keyboard"); np; np = np->next)
- if (np->parent && np->parent->type
- && strcmp(np->parent->type, "adb") == 0)
- break;
-
- /* needs to be hacked if xmon_printk is to be used
- from within find_via_pmu() */
-#ifdef CONFIG_ADB_PMU
- if (np != NULL && boot_text_mapped && find_via_pmu())
- use_screen = 1;
-#endif
-#ifdef CONFIG_ADB_CUDA
- if (np != NULL && boot_text_mapped && find_via_cuda())
- use_screen = 1;
-#endif
- }
- if (!use_screen && (np = find_devices("escc")) != NULL) {
- /*
- * look for the device node for the serial port
- * we're using and see if it says it has a modem
- */
- char *name = xmon_use_sccb? "ch-b": "ch-a";
- char *slots;
- int l;
-
- np = np->child;
- while (np != NULL && strcmp(np->name, name) != 0)
- np = np->sibling;
- if (np != NULL) {
- /* XXX should parse this properly */
- channel_node = np;
- slots = get_property(np, "slot-names", &l);
- if (slots != NULL && l >= 10
- && strcmp(slots+4, "Modem") == 0)
- via_modem = 1;
- }
- }
- btext_drawstring("xmon uses ");
- if (use_screen)
- btext_drawstring("screen and keyboard\n");
- else {
- if (via_modem)
- btext_drawstring("modem on ");
- btext_drawstring(xmon_use_sccb? "printer": "modem");
- btext_drawstring(" port\n");
- }
-
-#endif /* CONFIG_BOOTX_TEXT */
-
-#ifdef CHRP_ESCC
- addr = 0xc1013020;
-#else
- addr = 0xf3013020;
-#endif
- TXRDY = 4;
- RXRDY = 1;
-
- np = find_devices("mac-io");
- if (np && np->n_addrs)
- addr = np->addrs[0].address + 0x13020;
- base = (volatile unsigned char *) ioremap(addr & PAGE_MASK, PAGE_SIZE);
- sccc = base + (addr & ~PAGE_MASK);
- sccd = sccc + 0x10;
-
- } else {
- base = (volatile unsigned char *) isa_io_base;
-
-#ifdef CONFIG_PPC_CHRP
- if (_machine == _MACH_chrp)
- base = (volatile unsigned char __iomem *)
- ioremap(chrp_find_phys_io_base(), 0x1000);
-#endif
-
- sccc = base + 0x3fd;
- sccd = base + 0x3f8;
- if (xmon_use_sccb) {
- sccc -= 0x100;
- sccd -= 0x100;
- }
- TXRDY = 0x20;
- RXRDY = 1;
- DLAB = 0x80;
- }
-#elif defined(CONFIG_GEMINI)
- /* should already be mapped by the kernel boot */
- sccc = (volatile unsigned char __iomem *) 0xffeffb0d;
- sccd = (volatile unsigned char __iomem *) 0xffeffb08;
- TXRDY = 0x20;
- RXRDY = 1;
- DLAB = 0x80;
-#elif defined(CONFIG_405GP)
- sccc = (volatile unsigned char __iomem *)0xef600305;
- sccd = (volatile unsigned char __iomem *)0xef600300;
- TXRDY = 0x20;
- RXRDY = 1;
- DLAB = 0x80;
-#endif /* platform */
-}
-
-static int scc_initialized = 0;
-
-void xmon_init_scc(void);
-extern void cuda_poll(void);
-
-static inline void do_poll_adb(void)
-{
-#ifdef CONFIG_ADB_PMU
- if (sys_ctrler == SYS_CTRLER_PMU)
- pmu_poll_adb();
-#endif /* CONFIG_ADB_PMU */
-#ifdef CONFIG_ADB_CUDA
- if (sys_ctrler == SYS_CTRLER_CUDA)
- cuda_poll();
-#endif /* CONFIG_ADB_CUDA */
-}
-
-int xmon_write(void *ptr, int nb)
-{
- char *p = ptr;
- int i, c, ct;
-
-#ifdef CONFIG_SMP
- static unsigned long xmon_write_lock;
- int lock_wait = 1000000;
- int locked;
-
- while ((locked = test_and_set_bit(0, &xmon_write_lock)) != 0)
- if (--lock_wait == 0)
- break;
-#endif
-
-#ifdef CONFIG_BOOTX_TEXT
- if (use_screen) {
- /* write it on the screen */
- for (i = 0; i < nb; ++i)
- btext_drawchar(*p++);
- goto out;
- }
-#endif
- if (!scc_initialized)
- xmon_init_scc();
- ct = 0;
- for (i = 0; i < nb; ++i) {
- while ((*sccc & TXRDY) == 0)
- do_poll_adb();
- c = p[i];
- if (c == '\n' && !ct) {
- c = '\r';
- ct = 1;
- --i;
- } else {
- ct = 0;
- }
- buf_access();
- *sccd = c;
- eieio();
- }
-
- out:
-#ifdef CONFIG_SMP
- if (!locked)
- clear_bit(0, &xmon_write_lock);
-#endif
- return nb;
-}
-
-int xmon_wants_key;
-int xmon_adb_keycode;
-
-#ifdef CONFIG_BOOTX_TEXT
-static int xmon_adb_shiftstate;
-
-static unsigned char xmon_keytab[128] =
- "asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */
- "yt123465=97-80]o" /* 0x10 - 0x1f */
- "u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */
- "\t `\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
- "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
- "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
-
-static unsigned char xmon_shift_keytab[128] =
- "ASDFHGZXCV\000BQWER" /* 0x00 - 0x0f */
- "YT!@#$^%+(&_*)}O" /* 0x10 - 0x1f */
- "U{IP\rLJ\"K:|<?NM>" /* 0x20 - 0x2f */
- "\t ~\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
- "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
- "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
-
-static int xmon_get_adb_key(void)
-{
- int k, t, on;
-
- xmon_wants_key = 1;
- for (;;) {
- xmon_adb_keycode = -1;
- t = 0;
- on = 0;
- do {
- if (--t < 0) {
- on = 1 - on;
- btext_drawchar(on? 0xdb: 0x20);
- btext_drawchar('\b');
- t = 200000;
- }
- do_poll_adb();
- } while (xmon_adb_keycode == -1);
- k = xmon_adb_keycode;
- if (on)
- btext_drawstring(" \b");
-
- /* test for shift keys */
- if ((k & 0x7f) == 0x38 || (k & 0x7f) == 0x7b) {
- xmon_adb_shiftstate = (k & 0x80) == 0;
- continue;
- }
- if (k >= 0x80)
- continue; /* ignore up transitions */
- k = (xmon_adb_shiftstate? xmon_shift_keytab: xmon_keytab)[k];
- if (k != 0)
- break;
- }
- xmon_wants_key = 0;
- return k;
-}
-#endif /* CONFIG_BOOTX_TEXT */
-
-int xmon_readchar(void)
-{
-#ifdef CONFIG_BOOTX_TEXT
- if (use_screen)
- return xmon_get_adb_key();
-#endif
- if (!scc_initialized)
- xmon_init_scc();
- while ((*sccc & RXRDY) == 0)
- do_poll_adb();
- buf_access();
- return *sccd;
-}
-
-int xmon_read_poll(void)
-{
- if ((*sccc & RXRDY) == 0) {
- do_poll_adb();
- return -1;
- }
- buf_access();
- return *sccd;
-}
-
-static unsigned char scc_inittab[] = {
- 13, 0, /* set baud rate divisor */
- 12, 1,
- 14, 1, /* baud rate gen enable, src=rtxc */
- 11, 0x50, /* clocks = br gen */
- 5, 0xea, /* tx 8 bits, assert DTR & RTS */
- 4, 0x46, /* x16 clock, 1 stop */
- 3, 0xc1, /* rx enable, 8 bits */
-};
-
-void xmon_init_scc(void)
-{
- if ( _machine == _MACH_chrp )
- {
- sccd[3] = 0x83; eieio(); /* LCR = 8N1 + DLAB */
- sccd[0] = 12; eieio(); /* DLL = 9600 baud */
- sccd[1] = 0; eieio();
- sccd[2] = 0; eieio(); /* FCR = 0 */
- sccd[3] = 3; eieio(); /* LCR = 8N1 */
- sccd[1] = 0; eieio(); /* IER = 0 */
- }
- else if ( _machine == _MACH_Pmac )
- {
- int i, x;
- unsigned long timeout;
-
- if (channel_node != 0)
- pmac_call_feature(
- PMAC_FTR_SCC_ENABLE,
- channel_node,
- PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
- printk(KERN_INFO "Serial port locked ON by debugger !\n");
- if (via_modem && channel_node != 0) {
- unsigned int t0;
-
- pmac_call_feature(
- PMAC_FTR_MODEM_ENABLE,
- channel_node, 0, 1);
- printk(KERN_INFO "Modem powered up by debugger !\n");
- t0 = get_tbl();
- timeout = 3 * tb_ticks_per_sec;
- if (timeout == 0)
- /* assume 25MHz if tb_ticks_per_sec not set */
- timeout = 75000000;
- while (get_tbl() - t0 < timeout)
- eieio();
- }
- /* use the B channel if requested */
- if (xmon_use_sccb) {
- sccc = (volatile unsigned char *)
- ((unsigned long)sccc & ~0x20);
- sccd = sccc + 0x10;
- }
- for (i = 20000; i != 0; --i) {
- x = *sccc; eieio();
- }
- *sccc = 9; eieio(); /* reset A or B side */
- *sccc = ((unsigned long)sccc & 0x20)? 0x80: 0x40; eieio();
- for (i = 0; i < sizeof(scc_inittab); ++i) {
- *sccc = scc_inittab[i];
- eieio();
- }
- }
- scc_initialized = 1;
- if (via_modem) {
- for (;;) {
- xmon_write("ATE1V1\r", 7);
- if (xmon_expect("OK", 5)) {
- xmon_write("ATA\r", 4);
- if (xmon_expect("CONNECT", 40))
- break;
- }
- xmon_write("+++", 3);
- xmon_expect("OK", 3);
- }
- }
-}
-
-void xmon_enter(void)
-{
-#ifdef CONFIG_ADB_PMU
- if (_machine == _MACH_Pmac) {
- pmu_suspend();
- }
-#endif
-}
-
-void xmon_leave(void)
-{
-#ifdef CONFIG_ADB_PMU
- if (_machine == _MACH_Pmac) {
- pmu_resume();
- }
-#endif
-}
diff --git a/arch/powerpc/xmon/start_8xx.c b/arch/powerpc/xmon/start_8xx.c
deleted file mode 100644
index 4c17b0486ad..00000000000
--- a/arch/powerpc/xmon/start_8xx.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 1996 Paul Mackerras.
- * Copyright (C) 2000 Dan Malek.
- * Quick hack of Paul's code to make XMON work on 8xx processors. Lots
- * of assumptions, like the SMC1 is used, it has been initialized by the
- * loader at some point, and we can just stuff and suck bytes.
- * We rely upon the 8xx uart driver to support us, as the interface
- * changes between boot up and operational phases of the kernel.
- */
-#include <linux/string.h>
-#include <asm/machdep.h>
-#include <asm/io.h>
-#include <asm/page.h>
-#include <linux/kernel.h>
-#include <asm/8xx_immap.h>
-#include <asm/mpc8xx.h>
-#include <asm/commproc.h>
-#include "nonstdio.h"
-
-extern int xmon_8xx_write(char *str, int nb);
-extern int xmon_8xx_read_poll(void);
-extern int xmon_8xx_read_char(void);
-
-void xmon_map_scc(void)
-{
- cpmp = (cpm8xx_t *)&(((immap_t *)IMAP_ADDR)->im_cpm);
-}
-
-void xmon_init_scc(void);
-
-int xmon_write(void *ptr, int nb)
-{
- return(xmon_8xx_write(ptr, nb));
-}
-
-int xmon_readchar(void)
-{
- return xmon_8xx_read_char();
-}
-
-int xmon_read_poll(void)
-{
- return(xmon_8xx_read_poll());
-}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index c45a6ad5f3b..7d02fa2a899 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -311,7 +311,7 @@ static void release_output_lock(void)
}
#endif
-int xmon_core(struct pt_regs *regs, int fromipi)
+static int xmon_core(struct pt_regs *regs, int fromipi)
{
int cmd = 0;
unsigned long msr;
@@ -450,7 +450,6 @@ int xmon_core(struct pt_regs *regs, int fromipi)
leave:
cpu_clear(cpu, cpus_in_xmon);
xmon_fault_jmp[cpu] = NULL;
-
#else
/* UP is simple... */
if (in_xmon) {
@@ -529,7 +528,7 @@ xmon_irq(int irq, void *d, struct pt_regs *regs)
return IRQ_HANDLED;
}
-int xmon_bpt(struct pt_regs *regs)
+static int xmon_bpt(struct pt_regs *regs)
{
struct bpt *bp;
unsigned long offset;
@@ -555,7 +554,7 @@ int xmon_bpt(struct pt_regs *regs)
return 1;
}
-int xmon_sstep(struct pt_regs *regs)
+static int xmon_sstep(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;
@@ -563,7 +562,7 @@ int xmon_sstep(struct pt_regs *regs)
return 1;
}
-int xmon_dabr_match(struct pt_regs *regs)
+static int xmon_dabr_match(struct pt_regs *regs)
{
if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF))
return 0;
@@ -573,7 +572,7 @@ int xmon_dabr_match(struct pt_regs *regs)
return 1;
}
-int xmon_iabr_match(struct pt_regs *regs)
+static int xmon_iabr_match(struct pt_regs *regs)
{
if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF))
return 0;
@@ -583,7 +582,7 @@ int xmon_iabr_match(struct pt_regs *regs)
return 1;
}
-int xmon_ipi(struct pt_regs *regs)
+static int xmon_ipi(struct pt_regs *regs)
{
#ifdef CONFIG_SMP
if (in_xmon && !cpu_isset(smp_processor_id(), cpus_in_xmon))
@@ -592,7 +591,7 @@ int xmon_ipi(struct pt_regs *regs)
return 0;
}
-int xmon_fault_handler(struct pt_regs *regs)
+static int xmon_fault_handler(struct pt_regs *regs)
{
struct bpt *bp;
unsigned long offset;
@@ -805,7 +804,10 @@ cmds(struct pt_regs *excp)
break;
case 'x':
case 'X':
+ return cmd;
case EOF:
+ printf(" <no input ...>\n");
+ mdelay(2000);
return cmd;
case '?':
printf(help_string);
@@ -1011,7 +1013,7 @@ static long check_bp_loc(unsigned long addr)
unsigned int instr;
addr &= ~3;
- if (addr < KERNELBASE) {
+ if (!is_kernel_addr(addr)) {
printf("Breakpoints may only be placed at kernel addresses\n");
return 0;
}
@@ -1062,7 +1064,7 @@ bpt_cmds(void)
dabr.address = 0;
dabr.enabled = 0;
if (scanhex(&dabr.address)) {
- if (dabr.address < KERNELBASE) {
+ if (!is_kernel_addr(dabr.address)) {
printf(badaddr);
break;
}
diff --git a/arch/ppc/4xx_io/serial_sicc.c b/arch/ppc/4xx_io/serial_sicc.c
index 84d96b857e4..ebc4db8fcc6 100644
--- a/arch/ppc/4xx_io/serial_sicc.c
+++ b/arch/ppc/4xx_io/serial_sicc.c
@@ -47,6 +47,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/capability.h>
#include <linux/circ_buf.h>
#include <linux/serial.h>
#include <linux/console.h>
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index cc3f64c084c..d65810108bc 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -8,9 +8,6 @@ config MMU
bool
default y
-config UID16
- bool
-
config GENERIC_HARDIRQS
bool
default y
@@ -746,6 +743,10 @@ config MPC834x
bool
default y if MPC834x_SYS
+config PPC_83xx
+ bool
+ default y if 83xx
+
config CPM1
bool
depends on 8xx
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index e719a4933af..98e940beeb3 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -128,10 +128,9 @@ TOUT := .tmp_gas_check
# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec
# instructions.
# gcc-3.4 and binutils-2.14 are a fatal combination.
-GCC_VERSION := $(call cc-version)
checkbin:
- @if test "$(GCC_VERSION)" = "0304" ; then \
+ @if test "$(call cc-version)" = "0304" ; then \
if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
echo 'correctly with gcc-3.4 and your version of binutils.'; \
diff --git a/arch/ppc/amiga/amiints.c b/arch/ppc/amiga/amiints.c
index 91195e2ce38..5f35cf3986f 100644
--- a/arch/ppc/amiga/amiints.c
+++ b/arch/ppc/amiga/amiints.c
@@ -96,8 +96,8 @@ void amiga_init_IRQ(void)
gayle.inten = GAYLE_IRQ_IDE;
/* turn off all interrupts... */
- custom.intena = 0x7fff;
- custom.intreq = 0x7fff;
+ amiga_custom.intena = 0x7fff;
+ amiga_custom.intreq = 0x7fff;
#ifdef CONFIG_APUS
/* Clear any inter-CPU interrupt requests. Circumvents bug in
@@ -110,7 +110,7 @@ void amiga_init_IRQ(void)
APUS_WRITE(APUS_IPL_EMU, IPLEMU_SETRESET | IPLEMU_IPLMASK);
#endif
/* ... and enable the master interrupt bit */
- custom.intena = IF_SETCLR | IF_INTEN;
+ amiga_custom.intena = IF_SETCLR | IF_INTEN;
cia_init_IRQ(&ciaa_base);
cia_init_IRQ(&ciab_base);
@@ -151,7 +151,7 @@ void amiga_enable_irq(unsigned int irq)
}
/* enable the interrupt */
- custom.intena = IF_SETCLR | ami_intena_vals[irq];
+ amiga_custom.intena = IF_SETCLR | ami_intena_vals[irq];
}
void amiga_disable_irq(unsigned int irq)
@@ -177,7 +177,7 @@ void amiga_disable_irq(unsigned int irq)
}
/* disable the interrupt */
- custom.intena = ami_intena_vals[irq];
+ amiga_custom.intena = ami_intena_vals[irq];
}
inline void amiga_do_irq(int irq, struct pt_regs *fp)
@@ -196,7 +196,7 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
kstat_cpu(0).irqs[irq]++;
- custom.intreq = ami_intena_vals[irq];
+ amiga_custom.intreq = ami_intena_vals[irq];
for (action = desc->action; action; action = action->next)
action->handler(irq, action->dev_id, fp);
@@ -208,40 +208,40 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
static void ami_int1(int irq, void *dev_id, struct pt_regs *fp)
{
- unsigned short ints = custom.intreqr & custom.intenar;
+ unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if serial transmit buffer empty, interrupt */
if (ints & IF_TBE) {
- custom.intreq = IF_TBE;
+ amiga_custom.intreq = IF_TBE;
amiga_do_irq(IRQ_AMIGA_TBE, fp);
}
/* if floppy disk transfer complete, interrupt */
if (ints & IF_DSKBLK) {
- custom.intreq = IF_DSKBLK;
+ amiga_custom.intreq = IF_DSKBLK;
amiga_do_irq(IRQ_AMIGA_DSKBLK, fp);
}
/* if software interrupt set, interrupt */
if (ints & IF_SOFT) {
- custom.intreq = IF_SOFT;
+ amiga_custom.intreq = IF_SOFT;
amiga_do_irq(IRQ_AMIGA_SOFT, fp);
}
}
static void ami_int3(int irq, void *dev_id, struct pt_regs *fp)
{
- unsigned short ints = custom.intreqr & custom.intenar;
+ unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if a blitter interrupt */
if (ints & IF_BLIT) {
- custom.intreq = IF_BLIT;
+ amiga_custom.intreq = IF_BLIT;
amiga_do_irq(IRQ_AMIGA_BLIT, fp);
}
/* if a copper interrupt */
if (ints & IF_COPER) {
- custom.intreq = IF_COPER;
+ amiga_custom.intreq = IF_COPER;
amiga_do_irq(IRQ_AMIGA_COPPER, fp);
}
@@ -252,36 +252,36 @@ static void ami_int3(int irq, void *dev_id, struct pt_regs *fp)
static void ami_int4(int irq, void *dev_id, struct pt_regs *fp)
{
- unsigned short ints = custom.intreqr & custom.intenar;
+ unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if audio 0 interrupt */
if (ints & IF_AUD0) {
- custom.intreq = IF_AUD0;
+ amiga_custom.intreq = IF_AUD0;
amiga_do_irq(IRQ_AMIGA_AUD0, fp);
}
/* if audio 1 interrupt */
if (ints & IF_AUD1) {
- custom.intreq = IF_AUD1;
+ amiga_custom.intreq = IF_AUD1;
amiga_do_irq(IRQ_AMIGA_AUD1, fp);
}
/* if audio 2 interrupt */
if (ints & IF_AUD2) {
- custom.intreq = IF_AUD2;
+ amiga_custom.intreq = IF_AUD2;
amiga_do_irq(IRQ_AMIGA_AUD2, fp);
}
/* if audio 3 interrupt */
if (ints & IF_AUD3) {
- custom.intreq = IF_AUD3;
+ amiga_custom.intreq = IF_AUD3;
amiga_do_irq(IRQ_AMIGA_AUD3, fp);
}
}
static void ami_int5(int irq, void *dev_id, struct pt_regs *fp)
{
- unsigned short ints = custom.intreqr & custom.intenar;
+ unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
/* if serial receive buffer full interrupt */
if (ints & IF_RBF) {
@@ -291,7 +291,7 @@ static void ami_int5(int irq, void *dev_id, struct pt_regs *fp)
/* if a disk sync interrupt */
if (ints & IF_DSKSYN) {
- custom.intreq = IF_DSKSYN;
+ amiga_custom.intreq = IF_DSKSYN;
amiga_do_irq(IRQ_AMIGA_DSKSYN, fp);
}
}
diff --git a/arch/ppc/amiga/cia.c b/arch/ppc/amiga/cia.c
index ad961465b6c..4431c58f611 100644
--- a/arch/ppc/amiga/cia.c
+++ b/arch/ppc/amiga/cia.c
@@ -66,7 +66,7 @@ static unsigned char cia_set_irq_private(struct ciabase *base,
else
base->icr_data &= ~mask;
if (base->icr_data & base->icr_mask)
- custom.intreq = IF_SETCLR | base->int_mask;
+ amiga_custom.intreq = IF_SETCLR | base->int_mask;
return old & base->icr_mask;
}
@@ -114,7 +114,7 @@ static unsigned char cia_able_irq_private(struct ciabase *base,
base->icr_mask &= CIA_ICR_ALL;
if (base->icr_data & base->icr_mask)
- custom.intreq = IF_SETCLR | base->int_mask;
+ amiga_custom.intreq = IF_SETCLR | base->int_mask;
return old;
}
@@ -145,7 +145,7 @@ static void cia_handler(int irq, void *dev_id, struct pt_regs *fp)
irq = base->cia_irq;
desc = irq_desc + irq;
ints = cia_set_irq_private(base, CIA_ICR_ALL);
- custom.intreq = base->int_mask;
+ amiga_custom.intreq = base->int_mask;
for (i = 0; i < CIA_IRQS; i++, irq++) {
if (ints & 1) {
kstat_cpu(0).irqs[irq]++;
@@ -174,5 +174,5 @@ void __init cia_init_IRQ(struct ciabase *base)
action->name = base->name;
setup_irq(base->handler_irq, &amiga_sys_irqaction[base->handler_irq-IRQ_AMIGA_AUTO]);
- custom.intena = IF_SETCLR | base->int_mask;
+ amiga_custom.intena = IF_SETCLR | base->int_mask;
}
diff --git a/arch/ppc/amiga/config.c b/arch/ppc/amiga/config.c
index af881d7454d..60e2da1c92c 100644
--- a/arch/ppc/amiga/config.c
+++ b/arch/ppc/amiga/config.c
@@ -90,9 +90,6 @@ static void a3000_gettod (int *, int *, int *, int *, int *, int *);
static void a2000_gettod (int *, int *, int *, int *, int *, int *);
static int amiga_hwclk (int, struct hwclk_time *);
static int amiga_set_clock_mmss (unsigned long);
-#ifdef CONFIG_AMIGA_FLOPPY
-extern void amiga_floppy_setup(char *, int *);
-#endif
static void amiga_reset (void);
extern void amiga_init_sound(void);
static void amiga_savekmsg_init(void);
@@ -281,7 +278,7 @@ static void __init amiga_identify(void)
case CS_OCS:
case CS_ECS:
case CS_AGA:
- switch (custom.deniseid & 0xf) {
+ switch (amiga_custom.deniseid & 0xf) {
case 0x0c:
AMIGAHW_SET(DENISE_HR);
break;
@@ -294,7 +291,7 @@ static void __init amiga_identify(void)
AMIGAHW_SET(DENISE);
break;
}
- switch ((custom.vposr>>8) & 0x7f) {
+ switch ((amiga_custom.vposr>>8) & 0x7f) {
case 0x00:
AMIGAHW_SET(AGNUS_PAL);
break;
@@ -419,9 +416,6 @@ void __init config_amiga(void)
mach_hwclk = amiga_hwclk;
mach_set_clock_mmss = amiga_set_clock_mmss;
-#ifdef CONFIG_AMIGA_FLOPPY
- mach_floppy_setup = amiga_floppy_setup;
-#endif
mach_reset = amiga_reset;
#ifdef CONFIG_HEARTBEAT
mach_heartbeat = amiga_heartbeat;
@@ -432,9 +426,9 @@ void __init config_amiga(void)
amiga_colorclock = 5*amiga_eclock; /* 3.5 MHz */
/* clear all DMA bits */
- custom.dmacon = DMAF_ALL;
+ amiga_custom.dmacon = DMAF_ALL;
/* ensure that the DMA master bit is set */
- custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
+ amiga_custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
/* request all RAM */
for (i = 0; i < m68k_num_memory; i++) {
@@ -753,9 +747,9 @@ static void amiga_savekmsg_init(void)
static void amiga_serial_putc(char c)
{
- custom.serdat = (unsigned char)c | 0x100;
+ amiga_custom.serdat = (unsigned char)c | 0x100;
mb();
- while (!(custom.serdatr & 0x2000))
+ while (!(amiga_custom.serdatr & 0x2000))
;
}
@@ -785,11 +779,11 @@ int amiga_serial_console_wait_key(struct console *co)
{
int ch;
- while (!(custom.intreqr & IF_RBF))
+ while (!(amiga_custom.intreqr & IF_RBF))
barrier();
- ch = custom.serdatr & 0xff;
+ ch = amiga_custom.serdatr & 0xff;
/* clear the interrupt, so that another character can be read */
- custom.intreq = IF_RBF;
+ amiga_custom.intreq = IF_RBF;
return ch;
}
diff --git a/arch/ppc/boot/common/util.S b/arch/ppc/boot/common/util.S
index c96c9f80521..368ec035e6c 100644
--- a/arch/ppc/boot/common/util.S
+++ b/arch/ppc/boot/common/util.S
@@ -234,7 +234,8 @@ udelay:
* First, flush the data cache in case it was enabled and may be
* holding instructions for copy back.
*/
-_GLOBAL(flush_instruction_cache)
+ .globl flush_instruction_cache
+flush_instruction_cache:
mflr r6
bl flush_data_cache
@@ -279,7 +280,8 @@ _GLOBAL(flush_instruction_cache)
* Flush data cache
* Do this by just reading lots of stuff into the cache.
*/
-_GLOBAL(flush_data_cache)
+ .globl flush_data_cache
+flush_data_cache:
lis r3,cache_flush_buffer@h
ori r3,r3,cache_flush_buffer@l
li r4,NUM_CACHE_LINES
diff --git a/arch/ppc/boot/images/Makefile b/arch/ppc/boot/images/Makefile
index 532e7ef1edb..58415d5718e 100644
--- a/arch/ppc/boot/images/Makefile
+++ b/arch/ppc/boot/images/Makefile
@@ -26,7 +26,7 @@ quiet_cmd_uimage = UIMAGE $@
targets += uImage
$(obj)/uImage: $(obj)/vmlinux.gz
$(Q)rm -f $@
- $(call if_changed,uimage)
+ $(call cmd,uimage)
@echo -n ' Image: $@ '
@if [ -f $@ ]; then echo 'is ready' ; else echo 'not made'; fi
diff --git a/arch/ppc/boot/simple/Makefile b/arch/ppc/boot/simple/Makefile
index f3e9c534aa8..9533f8de238 100644
--- a/arch/ppc/boot/simple/Makefile
+++ b/arch/ppc/boot/simple/Makefile
@@ -190,6 +190,8 @@ boot-$(CONFIG_REDWOOD_5) += embed_config.o
boot-$(CONFIG_REDWOOD_6) += embed_config.o
boot-$(CONFIG_8xx) += embed_config.o
boot-$(CONFIG_8260) += embed_config.o
+boot-$(CONFIG_EP405) += embed_config.o
+boot-$(CONFIG_XILINX_ML300) += embed_config.o
boot-$(CONFIG_BSEIP) += iic.o
boot-$(CONFIG_MBX) += iic.o pci.o qspan_pci.o
boot-$(CONFIG_MV64X60) += misc-mv64x60.o
diff --git a/arch/ppc/configs/TQM8540_defconfig b/arch/ppc/configs/TQM8540_defconfig
new file mode 100644
index 00000000000..99bf3b7a276
--- /dev/null
+++ b/arch/ppc/configs/TQM8540_defconfig
@@ -0,0 +1,973 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15-rc2
+# Fri Nov 25 17:26:50 2005
+#
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_PPC=y
+CONFIG_PPC32=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Block layer
+#
+# CONFIG_LBD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Processor
+#
+# CONFIG_6xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_POWER3 is not set
+# CONFIG_POWER4 is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_E500=y
+CONFIG_BOOKE=y
+CONFIG_FSL_BOOKE=y
+# CONFIG_PHYS_64BIT is not set
+CONFIG_SPE=y
+CONFIG_MATH_EMULATION=y
+# CONFIG_KEXEC is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+CONFIG_PPC_GEN550=y
+CONFIG_85xx=y
+CONFIG_PPC_INDIRECT_PCI_BE=y
+
+#
+# Freescale 85xx options
+#
+# CONFIG_MPC8540_ADS is not set
+# CONFIG_MPC8548_CDS is not set
+# CONFIG_MPC8555_CDS is not set
+# CONFIG_MPC8560_ADS is not set
+# CONFIG_SBC8560 is not set
+# CONFIG_STX_GP3 is not set
+CONFIG_TQM8540=y
+# CONFIG_TQM8541 is not set
+# CONFIG_TQM8555 is not set
+# CONFIG_TQM8560 is not set
+CONFIG_MPC8540=y
+
+#
+# Platform options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SOFTWARE_SUSPEND is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_PPC_I8259=y
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_LEGACY_PROC is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+# CONFIG_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_AMDSTD_RETRY=0
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_TQM85xx=y
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_IDEPCI=y
+CONFIG_IDEPCI_SHARE_IRQ=y
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_SL82C105 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+CONFIG_BLK_DEV_VIA82CXXX=y
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+CONFIG_NET_PCI=y
+# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+CONFIG_GIANFAR=y
+CONFIG_GFAR_NAPI=y
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+CONFIG_SENSORS_DS1337=y
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+CONFIG_SENSORS_LM75=y
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+CONFIG_HWMON_DEBUG_CHIP=y
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# SN Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
diff --git a/arch/ppc/configs/TQM8541_defconfig b/arch/ppc/configs/TQM8541_defconfig
new file mode 100644
index 00000000000..0ff56695d34
--- /dev/null
+++ b/arch/ppc/configs/TQM8541_defconfig
@@ -0,0 +1,986 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15-rc2
+# Wed Nov 30 13:36:28 2005
+#
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_PPC=y
+CONFIG_PPC32=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Block layer
+#
+# CONFIG_LBD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Processor
+#
+# CONFIG_6xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_POWER3 is not set
+# CONFIG_POWER4 is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_E500=y
+CONFIG_BOOKE=y
+CONFIG_FSL_BOOKE=y
+# CONFIG_PHYS_64BIT is not set
+CONFIG_SPE=y
+CONFIG_MATH_EMULATION=y
+# CONFIG_KEXEC is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+CONFIG_PPC_GEN550=y
+CONFIG_85xx=y
+CONFIG_PPC_INDIRECT_PCI_BE=y
+
+#
+# Freescale 85xx options
+#
+# CONFIG_MPC8540_ADS is not set
+# CONFIG_MPC8548_CDS is not set
+# CONFIG_MPC8555_CDS is not set
+# CONFIG_MPC8560_ADS is not set
+# CONFIG_SBC8560 is not set
+# CONFIG_STX_GP3 is not set
+# CONFIG_TQM8540 is not set
+CONFIG_TQM8541=y
+# CONFIG_TQM8555 is not set
+# CONFIG_TQM8560 is not set
+CONFIG_MPC8555=y
+
+#
+# Platform options
+#
+CONFIG_CPM2=y
+# CONFIG_PC_KEYBOARD is not set
+# CONFIG_HIGHMEM is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SOFTWARE_SUSPEND is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_PPC_I8259=y
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_LEGACY_PROC is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_AMDSTD_RETRY=0
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_TQM85xx=y
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_IDEPCI=y
+CONFIG_IDEPCI_SHARE_IRQ=y
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_SL82C105 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+CONFIG_BLK_DEV_VIA82CXXX=y
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+CONFIG_NET_PCI=y
+# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_FS_ENET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+CONFIG_GIANFAR=y
+CONFIG_GFAR_NAPI=y
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_CPM is not set
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_MPC8260 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+CONFIG_SENSORS_DS1337=y
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_MAX6900 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCF8563 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+CONFIG_SENSORS_LM75=y
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+CONFIG_HWMON_DEBUG_CHIP=y
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# SN Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+# CONFIG_SCC_ENET is not set
+# CONFIG_FEC_ENET is not set
+
+#
+# CPM2 Options
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_KGDB_CONSOLE is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
diff --git a/arch/ppc/configs/TQM8555_defconfig b/arch/ppc/configs/TQM8555_defconfig
new file mode 100644
index 00000000000..730b3db2e47
--- /dev/null
+++ b/arch/ppc/configs/TQM8555_defconfig
@@ -0,0 +1,983 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15-rc2
+# Thu Nov 24 17:10:52 2005
+#
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_PPC=y
+CONFIG_PPC32=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Block layer
+#
+# CONFIG_LBD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Processor
+#
+# CONFIG_6xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_POWER3 is not set
+# CONFIG_POWER4 is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_E500=y
+CONFIG_BOOKE=y
+CONFIG_FSL_BOOKE=y
+# CONFIG_PHYS_64BIT is not set
+CONFIG_SPE=y
+CONFIG_MATH_EMULATION=y
+# CONFIG_KEXEC is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+CONFIG_PPC_GEN550=y
+CONFIG_85xx=y
+CONFIG_PPC_INDIRECT_PCI_BE=y
+
+#
+# Freescale 85xx options
+#
+# CONFIG_MPC8540_ADS is not set
+# CONFIG_MPC8548_CDS is not set
+# CONFIG_MPC8555_CDS is not set
+# CONFIG_MPC8560_ADS is not set
+# CONFIG_SBC8560 is not set
+# CONFIG_STX_GP3 is not set
+# CONFIG_TQM8540 is not set
+# CONFIG_TQM8541 is not set
+CONFIG_TQM8555=y
+# CONFIG_TQM8560 is not set
+CONFIG_MPC8555=y
+
+#
+# Platform options
+#
+CONFIG_CPM2=y
+# CONFIG_PC_KEYBOARD is not set
+# CONFIG_HIGHMEM is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SOFTWARE_SUSPEND is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_PPC_I8259=y
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_LEGACY_PROC is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_AMDSTD_RETRY=0
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_TQM85xx=y
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_IDEPCI=y
+CONFIG_IDEPCI_SHARE_IRQ=y
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_SL82C105 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+CONFIG_BLK_DEV_VIA82CXXX=y
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+CONFIG_NET_PCI=y
+# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_FS_ENET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+CONFIG_GIANFAR=y
+CONFIG_GFAR_NAPI=y
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_CPM is not set
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+CONFIG_SENSORS_DS1337=y
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+CONFIG_SENSORS_LM75=y
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+CONFIG_HWMON_DEBUG_CHIP=y
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# SN Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+# CONFIG_SCC_ENET is not set
+# CONFIG_FEC_ENET is not set
+
+#
+# CPM2 Options
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_KGDB_CONSOLE is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
diff --git a/arch/ppc/configs/TQM8560_defconfig b/arch/ppc/configs/TQM8560_defconfig
new file mode 100644
index 00000000000..1d902072825
--- /dev/null
+++ b/arch/ppc/configs/TQM8560_defconfig
@@ -0,0 +1,992 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15-rc2
+# Wed Nov 30 16:47:53 2005
+#
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_PPC=y
+CONFIG_PPC32=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+# CONFIG_HOTPLUG is not set
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# Block layer
+#
+# CONFIG_LBD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# Processor
+#
+# CONFIG_6xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_POWER3 is not set
+# CONFIG_POWER4 is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+CONFIG_E500=y
+CONFIG_BOOKE=y
+CONFIG_FSL_BOOKE=y
+# CONFIG_PHYS_64BIT is not set
+CONFIG_SPE=y
+CONFIG_MATH_EMULATION=y
+# CONFIG_KEXEC is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
+CONFIG_85xx=y
+CONFIG_PPC_INDIRECT_PCI_BE=y
+
+#
+# Freescale 85xx options
+#
+# CONFIG_MPC8540_ADS is not set
+# CONFIG_MPC8548_CDS is not set
+# CONFIG_MPC8555_CDS is not set
+# CONFIG_MPC8560_ADS is not set
+# CONFIG_SBC8560 is not set
+# CONFIG_STX_GP3 is not set
+# CONFIG_TQM8540 is not set
+# CONFIG_TQM8541 is not set
+# CONFIG_TQM8555 is not set
+CONFIG_TQM8560=y
+CONFIG_MPC8560=y
+
+#
+# Platform options
+#
+CONFIG_CPM2=y
+# CONFIG_PC_KEYBOARD is not set
+# CONFIG_HIGHMEM is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SOFTWARE_SUSPEND is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_PPC_I8259=y
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_LEGACY_PROC is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+# CONFIG_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_AMDSTD_RETRY=0
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_TQM85xx=y
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_IDEPCI=y
+CONFIG_IDEPCI_SHARE_IRQ=y
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_SL82C105 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+CONFIG_BLK_DEV_VIA82CXXX=y
+# CONFIG_IDE_ARM is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+CONFIG_IDEDMA_AUTO=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+CONFIG_NET_PCI=y
+# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+# CONFIG_FS_ENET is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+CONFIG_GIANFAR=y
+CONFIG_GFAR_NAPI=y
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_CPM=y
+CONFIG_SERIAL_CPM_CONSOLE=y
+CONFIG_SERIAL_CPM_SCC1=y
+# CONFIG_SERIAL_CPM_SCC2 is not set
+# CONFIG_SERIAL_CPM_SCC3 is not set
+# CONFIG_SERIAL_CPM_SCC4 is not set
+# CONFIG_SERIAL_CPM_SMC1 is not set
+# CONFIG_SERIAL_CPM_SMC2 is not set
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_MPC8260 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+CONFIG_SENSORS_DS1337=y
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_MAX6900 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCF8563 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+CONFIG_SENSORS_LM75=y
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+CONFIG_HWMON_DEBUG_CHIP=y
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# SN Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+# CONFIG_SCC_ENET is not set
+# CONFIG_FEC_ENET is not set
+
+#
+# CPM2 Options
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_KGDB_CONSOLE is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index 0bb23fce429..ca020130086 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -13,7 +13,6 @@ extra-$(CONFIG_POWER4) += idle_power4.o
extra-y += vmlinux.lds
obj-y := entry.o traps.o idle.o time.o misc.o \
- process.o \
setup.o \
ppc_htab.o
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
@@ -49,5 +48,4 @@ obj-$(CONFIG_TAU) += temp.o
ifndef CONFIG_E200
obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
endif
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
endif
diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c
index fe0e767fb94..7964bf660e9 100644
--- a/arch/ppc/kernel/asm-offsets.c
+++ b/arch/ppc/kernel/asm-offsets.c
@@ -131,7 +131,7 @@ main(void)
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
- DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
+ DEFINE(TI_SIGFRAME, offsetof(struct thread_info, nvgprs_frame));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index f044edbb454..a48b950722a 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -200,8 +200,6 @@ _GLOBAL(DoSyscall)
bl do_show_syscall
#endif /* SHOW_SYSCALLS */
rlwinm r10,r1,0,0,18 /* current_thread_info() */
- li r11,0
- stb r11,TI_SC_NOERR(r10)
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
@@ -222,25 +220,21 @@ ret_from_syscall:
bl do_show_syscall_exit
#endif
mr r6,r3
- li r11,-_LAST_ERRNO
- cmplw 0,r3,r11
rlwinm r12,r1,0,0,18 /* current_thread_info() */
- blt+ 30f
- lbz r11,TI_SC_NOERR(r12)
- cmpwi r11,0
- bne 30f
- neg r3,r3
- lwz r10,_CCR(r1) /* Set SO bit in CR */
- oris r10,r10,0x1000
- stw r10,_CCR(r1)
-
/* disable interrupts so current_thread_info()->flags can't change */
-30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+ LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
SYNC
MTMSRD(r10)
lwz r9,TI_FLAGS(r12)
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+ li r8,-_LAST_ERRNO
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL)
bne- syscall_exit_work
+ cmplw 0,r3,r8
+ blt+ syscall_exit_cont
+ lwz r11,_CCR(r1) /* Load CR */
+ neg r3,r3
+ oris r11,r11,0x1000 /* Set SO bit in CR */
+ stw r11,_CCR(r1)
syscall_exit_cont:
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* If the process has its own DBCR0 value, load it up. The single
@@ -292,46 +286,113 @@ syscall_dotrace:
b syscall_dotrace_cont
syscall_exit_work:
- stw r6,RESULT(r1) /* Save result */
+ andi. r0,r9,_TIF_RESTOREALL
+ bne- 2f
+ cmplw 0,r3,r8
+ blt+ 1f
+ andi. r0,r9,_TIF_NOERROR
+ bne- 1f
+ lwz r11,_CCR(r1) /* Load CR */
+ neg r3,r3
+ oris r11,r11,0x1000 /* Set SO bit in CR */
+ stw r11,_CCR(r1)
+
+1: stw r6,RESULT(r1) /* Save result */
stw r3,GPR3(r1) /* Update return value */
- andi. r0,r9,_TIF_SYSCALL_T_OR_A
- beq 5f
- ori r10,r10,MSR_EE
- SYNC
- MTMSRD(r10) /* re-enable interrupts */
+2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
+ beq 4f
+
+ /* Clear per-syscall TIF flags if any are set, but _leave_
+ _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that
+ yet. */
+
+ li r11,_TIF_PERSYSCALL_MASK
+ addi r12,r12,TI_FLAGS
+3: lwarx r8,0,r12
+ andc r8,r8,r11
+#ifdef CONFIG_IBM405_ERR77
+ dcbt 0,r12
+#endif
+ stwcx. r8,0,r12
+ bne- 3b
+ subi r12,r12,TI_FLAGS
+
+4: /* Anything which requires enabling interrupts? */
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS)
+ beq 7f
+
+ /* Save NVGPRS if they're not saved already */
lwz r4,TRAP(r1)
andi. r4,r4,1
- beq 4f
+ beq 5f
SAVE_NVGPRS(r1)
li r4,0xc00
stw r4,TRAP(r1)
-4:
+
+ /* Re-enable interrupts */
+5: ori r10,r10,MSR_EE
+ SYNC
+ MTMSRD(r10)
+
+ andi. r0,r9,_TIF_SAVE_NVGPRS
+ bne save_user_nvgprs
+
+save_user_nvgprs_cont:
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+ beq 7f
+
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_syscall_trace_leave
REST_NVGPRS(r1)
-2:
- lwz r3,GPR3(r1)
+
+6: lwz r3,GPR3(r1)
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
SYNC
MTMSRD(r10) /* disable interrupts again */
rlwinm r12,r1,0,0,18 /* current_thread_info() */
lwz r9,TI_FLAGS(r12)
-5:
+7:
andi. r0,r9,_TIF_NEED_RESCHED
- bne 1f
+ bne 8f
lwz r5,_MSR(r1)
andi. r5,r5,MSR_PR
- beq syscall_exit_cont
+ beq ret_from_except
andi. r0,r9,_TIF_SIGPENDING
- beq syscall_exit_cont
+ beq ret_from_except
b do_user_signal
-1:
+8:
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* re-enable interrupts */
bl schedule
- b 2b
+ b 6b
+
+save_user_nvgprs:
+ lwz r8,TI_SIGFRAME(r12)
+
+.macro savewords start, end
+ 1: stw \start,4*(\start)(r8)
+ .section __ex_table,"a"
+ .align 2
+ .long 1b,save_user_nvgprs_fault
+ .previous
+ .if \end - \start
+ savewords "(\start+1)",\end
+ .endif
+.endm
+ savewords 14,31
+ b save_user_nvgprs_cont
+
+
+save_user_nvgprs_fault:
+ li r3,11 /* SIGSEGV */
+ lwz r4,TI_TASK(r12)
+ bl force_sigsegv
+ rlwinm r12,r1,0,0,18 /* current_thread_info() */
+ lwz r9,TI_FLAGS(r12)
+ b save_user_nvgprs_cont
+
#ifdef SHOW_SYSCALLS
do_show_syscall:
#ifdef SHOW_SYSCALLS_TASK
@@ -401,28 +462,10 @@ show_syscalls_task:
#endif /* SHOW_SYSCALLS */
/*
- * The sigsuspend and rt_sigsuspend system calls can call do_signal
- * and thus put the process into the stopped state where we might
- * want to examine its user state with ptrace. Therefore we need
- * to save all the nonvolatile registers (r13 - r31) before calling
- * the C code.
+ * The fork/clone functions need to copy the full register set into
+ * the child process. Therefore we need to save all the nonvolatile
+ * registers (r13 - r31) before calling the C code.
*/
- .globl ppc_sigsuspend
-ppc_sigsuspend:
- SAVE_NVGPRS(r1)
- lwz r0,TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,TRAP(r1) /* register set saved */
- b sys_sigsuspend
-
- .globl ppc_rt_sigsuspend
-ppc_rt_sigsuspend:
- SAVE_NVGPRS(r1)
- lwz r0,TRAP(r1)
- rlwinm r0,r0,0,0,30
- stw r0,TRAP(r1)
- b sys_rt_sigsuspend
-
.globl ppc_fork
ppc_fork:
SAVE_NVGPRS(r1)
@@ -447,14 +490,6 @@ ppc_clone:
stw r0,TRAP(r1) /* register set saved */
b sys_clone
- .globl ppc_swapcontext
-ppc_swapcontext:
- SAVE_NVGPRS(r1)
- lwz r0,TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,TRAP(r1) /* register set saved */
- b sys_swapcontext
-
/*
* Top-level page fault handling.
* This is in assembler because if do_page_fault tells us that
@@ -626,16 +661,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
.long ret_from_except
#endif
- .globl sigreturn_exit
-sigreturn_exit:
- subi r1,r3,STACK_FRAME_OVERHEAD
- rlwinm r12,r1,0,0,18 /* current_thread_info() */
- lwz r9,TI_FLAGS(r12)
- andi. r0,r9,_TIF_SYSCALL_T_OR_A
- beq+ ret_from_except_full
- bl do_syscall_trace_leave
- /* fall through */
-
.globl ret_from_except_full
ret_from_except_full:
REST_NVGPRS(r1)
@@ -658,7 +683,7 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
rlwinm r9,r1,0,0,18
lwz r9,TI_FLAGS(r9)
- andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
+ andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL)
bne do_work
restore_user:
diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S
index de097874222..3e6ca7f5843 100644
--- a/arch/ppc/kernel/head_8xx.S
+++ b/arch/ppc/kernel/head_8xx.S
@@ -375,6 +375,8 @@ DataStoreTLBMiss:
lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l
rlwimi r10, r11, 0, 2, 19
+ stw r12, 16(r0)
+ b LoadLargeDTLB
3:
lwz r11, 0(r10) /* Get the level 1 entry */
rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
@@ -430,6 +432,81 @@ DataStoreTLBMiss:
InstructionTLBError:
b InstructionAccess
+LoadLargeDTLB:
+ li r12, 0
+ lwz r11, 0(r10) /* Get the level 1 entry */
+ rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
+ beq 3f /* If zero, don't try to find a pte */
+
+ /* We have a pte table, so load fetch the pte from the table.
+ */
+ ori r11, r11, 1 /* Set valid bit in physical L2 page */
+ DO_8xx_CPU6(0x3b80, r3)
+ mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
+ mfspr r10, SPRN_MD_TWC /* ....and get the pte address */
+ lwz r10, 0(r10) /* Get the pte */
+
+ /* Insert the Guarded flag into the TWC from the Linux PTE.
+ * It is bit 27 of both the Linux PTE and the TWC (at least
+ * I got that right :-). It will be better when we can put
+ * this into the Linux pgd/pmd and load it in the operation
+ * above.
+ */
+ rlwimi r11, r10, 0, 27, 27
+
+ rlwimi r12, r10, 0, 0, 9 /* extract phys. addr */
+ mfspr r3, SPRN_MD_EPN
+ rlwinm r3, r3, 0, 0, 9 /* extract virtual address */
+ tophys(r3, r3)
+ cmpw r3, r12 /* only use 8M page if it is a direct
+ kernel mapping */
+ bne 1f
+ ori r11, r11, MD_PS8MEG
+ li r12, 1
+ b 2f
+1:
+ li r12, 0 /* can't use 8MB TLB, so zero r12. */
+2:
+ DO_8xx_CPU6(0x3b80, r3)
+ mtspr SPRN_MD_TWC, r11
+
+ /* The Linux PTE won't go exactly into the MMU TLB.
+ * Software indicator bits 21, 22 and 28 must be clear.
+ * Software indicator bits 24, 25, 26, and 27 must be
+ * set. All other Linux PTE bits control the behavior
+ * of the MMU.
+ */
+3: li r11, 0x00f0
+ rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
+ cmpwi r12, 1
+ bne 4f
+ ori r10, r10, 0x8
+
+ mfspr r12, SPRN_MD_EPN
+ lis r3, 0xff80 /* 10-19 must be clear for 8MB TLB */
+ ori r3, r3, 0x0fff
+ and r12, r3, r12
+ DO_8xx_CPU6(0x3780, r3)
+ mtspr SPRN_MD_EPN, r12
+
+ lis r3, 0xff80 /* 10-19 must be clear for 8MB TLB */
+ ori r3, r3, 0x0fff
+ and r10, r3, r10
+4:
+ DO_8xx_CPU6(0x3d80, r3)
+ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
+
+ mfspr r10, SPRN_M_TW /* Restore registers */
+ lwz r11, 0(r0)
+ mtcr r11
+ lwz r11, 4(r0)
+
+ lwz r12, 16(r0)
+#ifdef CONFIG_8xx_CPU6
+ lwz r3, 8(r0)
+#endif
+ rfi
+
/* This is the data TLB error on the MPC8xx. This could be due to
* many reasons, including a dirty update to a pte. We can catch that
* one here, but anything else is an error. First, we track down the
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
index 821a75e4560..1be3ca5bae4 100644
--- a/arch/ppc/kernel/idle.c
+++ b/arch/ppc/kernel/idle.c
@@ -37,7 +37,6 @@
void default_idle(void)
{
void (*powersave)(void);
- int cpu = smp_processor_id();
powersave = ppc_md.power_save;
@@ -47,7 +46,8 @@ void default_idle(void)
#ifdef CONFIG_SMP
else {
set_thread_flag(TIF_POLLING_NRFLAG);
- while (!need_resched() && !cpu_is_offline(cpu))
+ while (!need_resched() &&
+ !cpu_is_offline(smp_processor_id()))
barrier();
clear_thread_flag(TIF_POLLING_NRFLAG);
}
diff --git a/arch/ppc/kernel/machine_kexec.c b/arch/ppc/kernel/machine_kexec.c
index a882b0dbe8d..84d65a87191 100644
--- a/arch/ppc/kernel/machine_kexec.c
+++ b/arch/ppc/kernel/machine_kexec.c
@@ -28,12 +28,6 @@ typedef NORET_TYPE void (*relocate_new_kernel_t)(
const extern unsigned char relocate_new_kernel[];
const extern unsigned int relocate_new_kernel_size;
-/*
- * Provide a dummy crash_notes definition while crash dump arrives to ppc.
- * This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
- */
-note_buf_t crash_notes[NR_CPUS];
-
void machine_shutdown(void)
{
if (ppc_md.machine_shutdown)
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 5e61124581d..fb5658bba28 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -1197,7 +1197,7 @@ _GLOBAL(sys_call_table)
.long sys_ssetmask
.long sys_setreuid /* 70 */
.long sys_setregid
- .long ppc_sigsuspend
+ .long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
@@ -1303,7 +1303,7 @@ _GLOBAL(sys_call_table)
.long sys_rt_sigpending /* 175 */
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
- .long ppc_rt_sigsuspend
+ .long sys_rt_sigsuspend
.long sys_pread64
.long sys_pwrite64 /* 180 */
.long sys_chown
@@ -1374,7 +1374,7 @@ _GLOBAL(sys_call_table)
.long sys_clock_gettime
.long sys_clock_getres
.long sys_clock_nanosleep
- .long ppc_swapcontext
+ .long sys_swapcontext
.long sys_tgkill /* 250 */
.long sys_utimes
.long sys_statfs64
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index f7fae5f153b..704c846b2b0 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -503,7 +503,7 @@ pcibios_allocate_resources(int pass)
u16 command;
struct resource *r;
- while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
for (idx = 0; idx < 6; idx++) {
r = &dev->resource[idx];
@@ -540,7 +540,7 @@ pcibios_assign_resources(void)
int idx;
struct resource *r;
- while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ for_each_pci_dev(dev) {
int class = dev->class >> 8;
/* Don't touch classless devices and host bridges */
@@ -815,8 +815,7 @@ EXPORT_SYMBOL(pci_device_to_OF_node);
* to set pci_assign_all_buses to 1 and still use RTAS for PCI
* config cycles.
*/
-struct pci_controller*
-pci_find_hose_for_OF_device(struct device_node* node)
+struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
{
if (!have_of)
return NULL;
@@ -868,14 +867,15 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
*/
if (!pci_to_OF_bus_map)
return 0;
- while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- if (pci_to_OF_bus_map[dev->bus->number] != *bus)
- continue;
- if (dev->devfn != *devfn)
- continue;
- *bus = dev->bus->number;
- return 0;
- }
+
+ for_each_pci_dev(dev)
+ if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
+ dev->devfn == *devfn) {
+ *bus = dev->bus->number;
+ pci_dev_put(dev);
+ return 0;
+ }
+
return -ENODEV;
}
EXPORT_SYMBOL(pci_device_from_OF_node);
@@ -942,7 +942,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
res = NULL;
size = ranges[na+4];
- switch (ranges[0] >> 24) {
+ switch ((ranges[0] >> 24) & 0x3) {
case 1: /* I/O space */
if (ranges[2] != 0)
break;
@@ -956,6 +956,8 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
res = &hose->io_resource;
res->flags = IORESOURCE_IO;
res->start = ranges[2];
+ DBG("PCI: IO 0x%lx -> 0x%lx\n",
+ res->start, res->start + size - 1);
break;
case 2: /* memory space */
memno = 0;
@@ -973,7 +975,11 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose,
if (memno < 3) {
res = &hose->mem_resources[memno];
res->flags = IORESOURCE_MEM;
+ if(ranges[0] & 0x40000000)
+ res->flags |= IORESOURCE_PREFETCH;
res->start = ranges[na+2];
+ DBG("PCI: MEM[%d] 0x%lx -> 0x%lx\n", memno,
+ res->start, res->start + size - 1);
}
break;
}
@@ -1806,6 +1812,23 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
EXPORT_SYMBOL(pci_iomap);
EXPORT_SYMBOL(pci_iounmap);
+unsigned long pci_address_to_pio(phys_addr_t address)
+{
+ struct pci_controller* hose = hose_head;
+
+ for (; hose; hose = hose->next) {
+ unsigned int size = hose->io_resource.end -
+ hose->io_resource.start + 1;
+ if (address >= hose->io_base_phys &&
+ address < (hose->io_base_phys + size)) {
+ unsigned long base =
+ (unsigned long)hose->io_base_virt - _IO_BASE;
+ return base + (address - hose->io_base_phys);
+ }
+ }
+ return (unsigned int)-1;
+}
+EXPORT_SYMBOL(pci_address_to_pio);
/*
* Null PCI config access functions, for the case when we can't
diff --git a/arch/ppc/kernel/ppc_htab.c b/arch/ppc/kernel/ppc_htab.c
index ca810025993..2f5c7650274 100644
--- a/arch/ppc/kernel/ppc_htab.c
+++ b/arch/ppc/kernel/ppc_htab.c
@@ -16,6 +16,7 @@
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/sysctl.h>
+#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/threads.h>
#include <linux/smp_lock.h>
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index bb6a5c6a64b..fb0da7c209d 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -82,10 +82,6 @@ EXPORT_SYMBOL(ppc_n_lost_interrupts);
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL(DMA_MODE_READ);
EXPORT_SYMBOL(DMA_MODE_WRITE);
-#if defined(CONFIG_PPC_PREP)
-EXPORT_SYMBOL(_prep_type);
-EXPORT_SYMBOL(ucSystemType);
-#endif
#if !defined(__INLINE_BITOPS)
EXPORT_SYMBOL(set_bit);
@@ -274,7 +270,6 @@ EXPORT_SYMBOL(__delay);
EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(irq_desc);
EXPORT_SYMBOL(tb_ticks_per_jiffy);
-EXPORT_SYMBOL(get_wchan);
EXPORT_SYMBOL(console_drivers);
#ifdef CONFIG_XMON
EXPORT_SYMBOL(xmon);
@@ -311,7 +306,6 @@ EXPORT_SYMBOL(__res);
EXPORT_SYMBOL(next_mmu_context);
EXPORT_SYMBOL(set_context);
-EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */
EXPORT_SYMBOL(disarm_decr);
#ifdef CONFIG_PPC_STD_MMU
extern long mol_trampoline;
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
deleted file mode 100644
index 25cbdc8d294..00000000000
--- a/arch/ppc/kernel/process.c
+++ /dev/null
@@ -1,851 +0,0 @@
-/*
- * arch/ppc/kernel/process.c
- *
- * Derived from "arch/i386/kernel/process.c"
- * Copyright (C) 1995 Linus Torvalds
- *
- * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
- * Paul Mackerras (paulus@cs.anu.edu.au)
- *
- * PowerPC version
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/config.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/elf.h>
-#include <linux/init.h>
-#include <linux/prctl.h>
-#include <linux/init_task.h>
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/mqueue.h>
-#include <linux/hardirq.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/mmu.h>
-#include <asm/prom.h>
-
-extern unsigned long _get_SP(void);
-
-struct task_struct *last_task_used_math = NULL;
-struct task_struct *last_task_used_altivec = NULL;
-struct task_struct *last_task_used_spe = NULL;
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_SYMBOL(init_mm);
-
-/* this is 8kB-aligned so we can get to the thread_info struct
- at the base of it from the stack pointer with 1 integer instruction. */
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"))) =
-{ INIT_THREAD_INFO(init_task) };
-
-/* initial task structure */
-struct task_struct init_task = INIT_TASK(init_task);
-EXPORT_SYMBOL(init_task);
-
-/* only used to get secondary processor up */
-struct task_struct *current_set[NR_CPUS] = {&init_task, };
-
-#undef SHOW_TASK_SWITCHES
-#undef CHECK_STACK
-
-#if defined(CHECK_STACK)
-unsigned long
-kernel_stack_top(struct task_struct *tsk)
-{
- return ((unsigned long)tsk) + sizeof(union task_union);
-}
-
-unsigned long
-task_top(struct task_struct *tsk)
-{
- return ((unsigned long)tsk) + sizeof(struct thread_info);
-}
-
-/* check to make sure the kernel stack is healthy */
-int check_stack(struct task_struct *tsk)
-{
- unsigned long stack_top = kernel_stack_top(tsk);
- unsigned long tsk_top = task_top(tsk);
- int ret = 0;
-
-#if 0
- /* check thread magic */
- if ( tsk->thread.magic != THREAD_MAGIC )
- {
- ret |= 1;
- printk("thread.magic bad: %08x\n", tsk->thread.magic);
- }
-#endif
-
- if ( !tsk )
- printk("check_stack(): tsk bad tsk %p\n",tsk);
-
- /* check if stored ksp is bad */
- if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) )
- {
- printk("stack out of bounds: %s/%d\n"
- " tsk_top %08lx ksp %08lx stack_top %08lx\n",
- tsk->comm,tsk->pid,
- tsk_top, tsk->thread.ksp, stack_top);
- ret |= 2;
- }
-
- /* check if stack ptr RIGHT NOW is bad */
- if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) )
- {
- printk("current stack ptr out of bounds: %s/%d\n"
- " tsk_top %08lx sp %08lx stack_top %08lx\n",
- current->comm,current->pid,
- tsk_top, _get_SP(), stack_top);
- ret |= 4;
- }
-
-#if 0
- /* check amount of free stack */
- for ( i = (unsigned long *)task_top(tsk) ; i < kernel_stack_top(tsk) ; i++ )
- {
- if ( !i )
- printk("check_stack(): i = %p\n", i);
- if ( *i != 0 )
- {
- /* only notify if it's less than 900 bytes */
- if ( (i - (unsigned long *)task_top(tsk)) < 900 )
- printk("%d bytes free on stack\n",
- i - task_top(tsk));
- break;
- }
- }
-#endif
-
- if (ret)
- {
- panic("bad kernel stack");
- }
- return(ret);
-}
-#endif /* defined(CHECK_STACK) */
-
-/*
- * Make sure the floating-point register state in the
- * the thread_struct is up to date for task tsk.
- */
-void flush_fp_to_thread(struct task_struct *tsk)
-{
- if (tsk->thread.regs) {
- /*
- * We need to disable preemption here because if we didn't,
- * another process could get scheduled after the regs->msr
- * test but before we have finished saving the FP registers
- * to the thread_struct. That process could take over the
- * FPU, and then when we get scheduled again we would store
- * bogus values for the remaining FP registers.
- */
- preempt_disable();
- if (tsk->thread.regs->msr & MSR_FP) {
-#ifdef CONFIG_SMP
- /*
- * This should only ever be called for current or
- * for a stopped child process. Since we save away
- * the FP register state on context switch on SMP,
- * there is something wrong if a stopped child appears
- * to still have its FP state in the CPU registers.
- */
- BUG_ON(tsk != current);
-#endif
- giveup_fpu(current);
- }
- preempt_enable();
- }
-}
-
-void enable_kernel_fp(void)
-{
- WARN_ON(preemptible());
-
-#ifdef CONFIG_SMP
- if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
- giveup_fpu(current);
- else
- giveup_fpu(NULL); /* just enables FP for kernel */
-#else
- giveup_fpu(last_task_used_math);
-#endif /* CONFIG_SMP */
-}
-EXPORT_SYMBOL(enable_kernel_fp);
-
-int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
-{
- preempt_disable();
- if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
- giveup_fpu(tsk);
- preempt_enable();
- memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
- return 1;
-}
-
-#ifdef CONFIG_ALTIVEC
-void enable_kernel_altivec(void)
-{
- WARN_ON(preemptible());
-
-#ifdef CONFIG_SMP
- if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
- giveup_altivec(current);
- else
- giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
-#else
- giveup_altivec(last_task_used_altivec);
-#endif /* __SMP __ */
-}
-EXPORT_SYMBOL(enable_kernel_altivec);
-
-/*
- * Make sure the VMX/Altivec register state in the
- * the thread_struct is up to date for task tsk.
- */
-void flush_altivec_to_thread(struct task_struct *tsk)
-{
- if (tsk->thread.regs) {
- preempt_disable();
- if (tsk->thread.regs->msr & MSR_VEC) {
-#ifdef CONFIG_SMP
- BUG_ON(tsk != current);
-#endif
- giveup_altivec(current);
- }
- preempt_enable();
- }
-}
-
-int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
-{
- if (regs->msr & MSR_VEC)
- giveup_altivec(current);
- memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
- return 1;
-}
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_SPE
-void
-enable_kernel_spe(void)
-{
- WARN_ON(preemptible());
-
-#ifdef CONFIG_SMP
- if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
- giveup_spe(current);
- else
- giveup_spe(NULL); /* just enable SPE for kernel - force */
-#else
- giveup_spe(last_task_used_spe);
-#endif /* __SMP __ */
-}
-EXPORT_SYMBOL(enable_kernel_spe);
-
-void flush_spe_to_thread(struct task_struct *tsk)
-{
- if (tsk->thread.regs) {
- preempt_disable();
- if (tsk->thread.regs->msr & MSR_SPE) {
-#ifdef CONFIG_SMP
- BUG_ON(tsk != current);
-#endif
- giveup_spe(current);
- }
- preempt_enable();
- }
-}
-
-int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
-{
- if (regs->msr & MSR_SPE)
- giveup_spe(current);
- /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
- memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
- return 1;
-}
-#endif /* CONFIG_SPE */
-
-struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *new)
-{
- struct thread_struct *new_thread, *old_thread;
- unsigned long s;
- struct task_struct *last;
-
- local_irq_save(s);
-#ifdef CHECK_STACK
- check_stack(prev);
- check_stack(new);
-#endif
-
-#ifdef CONFIG_SMP
- /* avoid complexity of lazy save/restore of fpu
- * by just saving it every time we switch out if
- * this task used the fpu during the last quantum.
- *
- * If it tries to use the fpu again, it'll trap and
- * reload its fp regs. So we don't have to do a restore
- * every switch, just a save.
- * -- Cort
- */
- if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
- giveup_fpu(prev);
-#ifdef CONFIG_ALTIVEC
- /*
- * If the previous thread used altivec in the last quantum
- * (thus changing altivec regs) then save them.
- * We used to check the VRSAVE register but not all apps
- * set it, so we don't rely on it now (and in fact we need
- * to save & restore VSCR even if VRSAVE == 0). -- paulus
- *
- * On SMP we always save/restore altivec regs just to avoid the
- * complexity of changing processors.
- * -- Cort
- */
- if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
- giveup_altivec(prev);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
- /*
- * If the previous thread used spe in the last quantum
- * (thus changing spe regs) then save them.
- *
- * On SMP we always save/restore spe regs just to avoid the
- * complexity of changing processors.
- */
- if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
- giveup_spe(prev);
-#endif /* CONFIG_SPE */
-#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_ALTIVEC
- /* Avoid the trap. On smp this this never happens since
- * we don't set last_task_used_altivec -- Cort
- */
- if (new->thread.regs && last_task_used_altivec == new)
- new->thread.regs->msr |= MSR_VEC;
-#endif
-#ifdef CONFIG_SPE
- /* Avoid the trap. On smp this this never happens since
- * we don't set last_task_used_spe
- */
- if (new->thread.regs && last_task_used_spe == new)
- new->thread.regs->msr |= MSR_SPE;
-#endif /* CONFIG_SPE */
- new_thread = &new->thread;
- old_thread = &current->thread;
- last = _switch(old_thread, new_thread);
- local_irq_restore(s);
- return last;
-}
-
-void show_regs(struct pt_regs * regs)
-{
- int i, trap;
-
- printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n",
- regs->nip, regs->link, regs->gpr[1], regs, regs->trap,
- print_tainted());
- printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
- regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
- regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
- regs->msr&MSR_IR ? 1 : 0,
- regs->msr&MSR_DR ? 1 : 0);
- trap = TRAP(regs);
- if (trap == 0x300 || trap == 0x600)
- printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
- printk("TASK = %p[%d] '%s' THREAD: %p\n",
- current, current->pid, current->comm, current->thread_info);
- printk("Last syscall: %ld ", current->thread.last_syscall);
-
-#ifdef CONFIG_SMP
- printk(" CPU: %d", smp_processor_id());
-#endif /* CONFIG_SMP */
-
- for (i = 0; i < 32; i++) {
- long r;
- if ((i % 8) == 0)
- printk("\n" KERN_INFO "GPR%02d: ", i);
- if (__get_user(r, &regs->gpr[i]))
- break;
- printk("%08lX ", r);
- if (i == 12 && !FULL_REGS(regs))
- break;
- }
- printk("\n");
-#ifdef CONFIG_KALLSYMS
- /*
- * Lookup NIP late so we have the best change of getting the
- * above info out without failing
- */
- printk("NIP [%08lx] ", regs->nip);
- print_symbol("%s\n", regs->nip);
- printk("LR [%08lx] ", regs->link);
- print_symbol("%s\n", regs->link);
-#endif
- show_stack(current, (unsigned long *) regs->gpr[1]);
-}
-
-void exit_thread(void)
-{
- preempt_disable();
- if (last_task_used_math == current)
- last_task_used_math = NULL;
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
-#ifdef CONFIG_SPE
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
-#endif
- preempt_enable();
-}
-
-void flush_thread(void)
-{
- preempt_disable();
- if (last_task_used_math == current)
- last_task_used_math = NULL;
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
-#ifdef CONFIG_SPE
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
-#endif
- preempt_enable();
-}
-
-void
-release_thread(struct task_struct *t)
-{
-}
-
-/*
- * This gets called before we allocate a new thread and copy
- * the current task into it.
- */
-void prepare_to_copy(struct task_struct *tsk)
-{
- struct pt_regs *regs = tsk->thread.regs;
-
- if (regs == NULL)
- return;
- preempt_disable();
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
-#ifdef CONFIG_ALTIVEC
- if (regs->msr & MSR_VEC)
- giveup_altivec(current);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
- if (regs->msr & MSR_SPE)
- giveup_spe(current);
-#endif /* CONFIG_SPE */
- preempt_enable();
-}
-
-/*
- * Copy a thread..
- */
-int
-copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
- unsigned long unused,
- struct task_struct *p, struct pt_regs *regs)
-{
- struct pt_regs *childregs, *kregs;
- extern void ret_from_fork(void);
- unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
- unsigned long childframe;
-
- CHECK_FULL_REGS(regs);
- /* Copy registers */
- sp -= sizeof(struct pt_regs);
- childregs = (struct pt_regs *) sp;
- *childregs = *regs;
- if ((childregs->msr & MSR_PR) == 0) {
- /* for kernel thread, set `current' and stackptr in new task */
- childregs->gpr[1] = sp + sizeof(struct pt_regs);
- childregs->gpr[2] = (unsigned long) p;
- p->thread.regs = NULL; /* no user register state */
- } else {
- childregs->gpr[1] = usp;
- p->thread.regs = childregs;
- if (clone_flags & CLONE_SETTLS)
- childregs->gpr[2] = childregs->gpr[6];
- }
- childregs->gpr[3] = 0; /* Result from fork() */
- sp -= STACK_FRAME_OVERHEAD;
- childframe = sp;
-
- /*
- * The way this works is that at some point in the future
- * some task will call _switch to switch to the new task.
- * That will pop off the stack frame created below and start
- * the new task running at ret_from_fork. The new task will
- * do some house keeping and then return from the fork or clone
- * system call, using the stack frame created above.
- */
- sp -= sizeof(struct pt_regs);
- kregs = (struct pt_regs *) sp;
- sp -= STACK_FRAME_OVERHEAD;
- p->thread.ksp = sp;
- kregs->nip = (unsigned long)ret_from_fork;
-
- p->thread.last_syscall = -1;
-
- return 0;
-}
-
-/*
- * Set up a thread for executing a new program
- */
-void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
-{
- set_fs(USER_DS);
- memset(regs->gpr, 0, sizeof(regs->gpr));
- regs->ctr = 0;
- regs->link = 0;
- regs->xer = 0;
- regs->ccr = 0;
- regs->mq = 0;
- regs->nip = nip;
- regs->gpr[1] = sp;
- regs->msr = MSR_USER;
- preempt_disable();
- if (last_task_used_math == current)
- last_task_used_math = NULL;
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
-#ifdef CONFIG_SPE
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
-#endif
- preempt_enable();
- memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
- current->thread.fpscr.val = 0;
-#ifdef CONFIG_ALTIVEC
- memset(current->thread.vr, 0, sizeof(current->thread.vr));
- memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
- current->thread.vrsave = 0;
- current->thread.used_vr = 0;
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
- memset(current->thread.evr, 0, sizeof(current->thread.evr));
- current->thread.acc = 0;
- current->thread.spefscr = 0;
- current->thread.used_spe = 0;
-#endif /* CONFIG_SPE */
-}
-
-#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
- | PR_FP_EXC_RES | PR_FP_EXC_INV)
-
-int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
-{
- struct pt_regs *regs = tsk->thread.regs;
-
- /* This is a bit hairy. If we are an SPE enabled processor
- * (have embedded fp) we store the IEEE exception enable flags in
- * fpexc_mode. fpexc_mode is also used for setting FP exception
- * mode (asyn, precise, disabled) for 'Classic' FP. */
- if (val & PR_FP_EXC_SW_ENABLE) {
-#ifdef CONFIG_SPE
- tsk->thread.fpexc_mode = val &
- (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
-#else
- return -EINVAL;
-#endif
- } else {
- /* on a CONFIG_SPE this does not hurt us. The bits that
- * __pack_fe01 use do not overlap with bits used for
- * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
- * on CONFIG_SPE implementations are reserved so writing to
- * them does not change anything */
- if (val > PR_FP_EXC_PRECISE)
- return -EINVAL;
- tsk->thread.fpexc_mode = __pack_fe01(val);
- if (regs != NULL && (regs->msr & MSR_FP) != 0)
- regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
- | tsk->thread.fpexc_mode;
- }
- return 0;
-}
-
-int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
-{
- unsigned int val;
-
- if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
-#ifdef CONFIG_SPE
- val = tsk->thread.fpexc_mode;
-#else
- return -EINVAL;
-#endif
- else
- val = __unpack_fe01(tsk->thread.fpexc_mode);
- return put_user(val, (unsigned int __user *) adr);
-}
-
-int sys_clone(unsigned long clone_flags, unsigned long usp,
- int __user *parent_tidp, void __user *child_threadptr,
- int __user *child_tidp, int p6,
- struct pt_regs *regs)
-{
- CHECK_FULL_REGS(regs);
- if (usp == 0)
- usp = regs->gpr[1]; /* stack pointer for child */
- return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
-}
-
-int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
- unsigned long p4, unsigned long p5, unsigned long p6,
- struct pt_regs *regs)
-{
- CHECK_FULL_REGS(regs);
- return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
-}
-
-int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
- unsigned long p4, unsigned long p5, unsigned long p6,
- struct pt_regs *regs)
-{
- CHECK_FULL_REGS(regs);
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
- regs, 0, NULL, NULL);
-}
-
-int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
- unsigned long a3, unsigned long a4, unsigned long a5,
- struct pt_regs *regs)
-{
- int error;
- char * filename;
-
- filename = getname((char __user *) a0);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- preempt_disable();
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
-#ifdef CONFIG_ALTIVEC
- if (regs->msr & MSR_VEC)
- giveup_altivec(current);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
- if (regs->msr & MSR_SPE)
- giveup_spe(current);
-#endif /* CONFIG_SPE */
- preempt_enable();
- error = do_execve(filename, (char __user *__user *) a1,
- (char __user *__user *) a2, regs);
- if (error == 0) {
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
- task_unlock(current);
- }
- putname(filename);
-out:
- return error;
-}
-
-void dump_stack(void)
-{
- show_stack(current, NULL);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
-void show_stack(struct task_struct *tsk, unsigned long *stack)
-{
- unsigned long sp, stack_top, prev_sp, ret;
- int count = 0;
- unsigned long next_exc = 0;
- struct pt_regs *regs;
- extern char ret_from_except, ret_from_except_full, ret_from_syscall;
-
- sp = (unsigned long) stack;
- if (tsk == NULL)
- tsk = current;
- if (sp == 0) {
- if (tsk == current)
- asm("mr %0,1" : "=r" (sp));
- else
- sp = tsk->thread.ksp;
- }
-
- prev_sp = (unsigned long) (tsk->thread_info + 1);
- stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE;
- while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) {
- if (count == 0) {
- printk("Call trace:");
-#ifdef CONFIG_KALLSYMS
- printk("\n");
-#endif
- } else {
- if (next_exc) {
- ret = next_exc;
- next_exc = 0;
- } else
- ret = *(unsigned long *)(sp + 4);
- printk(" [%08lx] ", ret);
-#ifdef CONFIG_KALLSYMS
- print_symbol("%s", ret);
- printk("\n");
-#endif
- if (ret == (unsigned long) &ret_from_except
- || ret == (unsigned long) &ret_from_except_full
- || ret == (unsigned long) &ret_from_syscall) {
- /* sp + 16 points to an exception frame */
- regs = (struct pt_regs *) (sp + 16);
- if (sp + 16 + sizeof(*regs) <= stack_top)
- next_exc = regs->nip;
- }
- }
- ++count;
- sp = *(unsigned long *)sp;
- }
-#ifndef CONFIG_KALLSYMS
- if (count > 0)
- printk("\n");
-#endif
-}
-
-#if 0
-/*
- * Low level print for debugging - Cort
- */
-int __init ll_printk(const char *fmt, ...)
-{
- va_list args;
- char buf[256];
- int i;
-
- va_start(args, fmt);
- i=vsprintf(buf,fmt,args);
- ll_puts(buf);
- va_end(args);
- return i;
-}
-
-int lines = 24, cols = 80;
-int orig_x = 0, orig_y = 0;
-
-void puthex(unsigned long val)
-{
- unsigned char buf[10];
- int i;
- for (i = 7; i >= 0; i--)
- {
- buf[i] = "0123456789ABCDEF"[val & 0x0F];
- val >>= 4;
- }
- buf[8] = '\0';
- prom_print(buf);
-}
-
-void __init ll_puts(const char *s)
-{
- int x,y;
- char *vidmem = (char *)/*(_ISA_MEM_BASE + 0xB8000) */0xD00B8000;
- char c;
- extern int mem_init_done;
-
- if ( mem_init_done ) /* assume this means we can printk */
- {
- printk(s);
- return;
- }
-
-#if 0
- if ( have_of )
- {
- prom_print(s);
- return;
- }
-#endif
-
- /*
- * can't ll_puts on chrp without openfirmware yet.
- * vidmem just needs to be setup for it.
- * -- Cort
- */
- if ( _machine != _MACH_prep )
- return;
- x = orig_x;
- y = orig_y;
-
- while ( ( c = *s++ ) != '\0' ) {
- if ( c == '\n' ) {
- x = 0;
- if ( ++y >= lines ) {
- /*scroll();*/
- /*y--;*/
- y = 0;
- }
- } else {
- vidmem [ ( x + cols * y ) * 2 ] = c;
- if ( ++x >= cols ) {
- x = 0;
- if ( ++y >= lines ) {
- /*scroll();*/
- /*y--;*/
- y = 0;
- }
- }
- }
- }
-
- orig_x = x;
- orig_y = y;
-}
-#endif
-
-unsigned long get_wchan(struct task_struct *p)
-{
- unsigned long ip, sp;
- unsigned long stack_page = (unsigned long) p->thread_info;
- int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
- sp = p->thread.ksp;
- do {
- sp = *(unsigned long *)sp;
- if (sp < stack_page || sp >= stack_page + 8188)
- return 0;
- if (count > 0) {
- ip = *(unsigned long *)(sp + 4);
- if (!in_sched_functions(ip))
- return ip;
- }
- } while (count++ < 16);
- return 0;
-}
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 0eb0b7085e6..e707c6f6e61 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -744,6 +744,9 @@ void __init setup_arch(char **cmdline_p)
/* so udelay does something sensible, assume <= 1000 bogomips */
loops_per_jiffy = 500000000 / HZ;
+ if (ppc_md.init_early)
+ ppc_md.init_early();
+
#ifdef CONFIG_PPC_MULTIPLATFORM
/* This could be called "early setup arch", it must be done
* now because xmon need it
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index becbfa39755..e55cdda6149 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -318,7 +318,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
p = fork_idle(cpu);
if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
- p->thread_info->cpu = cpu;
+ task_thread_info(p)->cpu = cpu;
idle_tasks[cpu] = p;
}
}
@@ -369,7 +369,7 @@ int __cpu_up(unsigned int cpu)
char buf[32];
int c;
- secondary_ti = idle_tasks[cpu]->thread_info;
+ secondary_ti = task_thread_info(idle_tasks[cpu]);
mb();
/*
diff --git a/arch/ppc/platforms/4xx/ibm440gx.c b/arch/ppc/platforms/4xx/ibm440gx.c
index 956f45e4ef9..d24c09ee7b1 100644
--- a/arch/ppc/platforms/4xx/ibm440gx.c
+++ b/arch/ppc/platforms/4xx/ibm440gx.c
@@ -58,7 +58,6 @@ static struct ocp_func_emac_data ibm440gx_emac2_def = {
.wol_irq = 65, /* WOL interrupt number */
.mdio_idx = -1, /* No shared MDIO */
.tah_idx = 0, /* TAH device index */
- .jumbo = 1, /* Jumbo frames supported */
};
static struct ocp_func_emac_data ibm440gx_emac3_def = {
@@ -72,7 +71,6 @@ static struct ocp_func_emac_data ibm440gx_emac3_def = {
.wol_irq = 67, /* WOL interrupt number */
.mdio_idx = -1, /* No shared MDIO */
.tah_idx = 1, /* TAH device index */
- .jumbo = 1, /* Jumbo frames supported */
};
OCP_SYSFS_EMAC_DATA()
diff --git a/arch/ppc/platforms/4xx/ibm440sp.c b/arch/ppc/platforms/4xx/ibm440sp.c
index feb17e41ef6..71a0117d359 100644
--- a/arch/ppc/platforms/4xx/ibm440sp.c
+++ b/arch/ppc/platforms/4xx/ibm440sp.c
@@ -31,7 +31,6 @@ static struct ocp_func_emac_data ibm440sp_emac0_def = {
.wol_irq = 61, /* WOL interrupt number */
.mdio_idx = -1, /* No shared MDIO */
.tah_idx = -1, /* No TAH */
- .jumbo = 1, /* Jumbo frames supported */
};
OCP_SYSFS_EMAC_DATA()
diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.c b/arch/ppc/platforms/83xx/mpc834x_sys.c
index 04bdc39bf47..012e1e652c0 100644
--- a/arch/ppc/platforms/83xx/mpc834x_sys.c
+++ b/arch/ppc/platforms/83xx/mpc834x_sys.c
@@ -51,9 +51,6 @@
#include <syslib/ppc83xx_setup.h>
-static const char *GFAR_PHY_0 = "phy0:0";
-static const char *GFAR_PHY_1 = "phy0:1";
-
#ifndef CONFIG_PCI
unsigned long isa_io_base = 0;
unsigned long isa_mem_base = 0;
@@ -129,20 +126,21 @@ mpc834x_sys_setup_arch(void)
mdata->irq[1] = MPC83xx_IRQ_EXT2;
mdata->irq[2] = -1;
mdata->irq[31] = -1;
- mdata->paddr += binfo->bi_immr_base;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC83xx_TSEC1);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_0;
+ pdata->bus_id = 0;
+ pdata->phy_id = 0;
memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
}
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC83xx_TSEC2);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_1;
+ pdata->bus_id = 0;
+ pdata->phy_id = 1;
memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
}
diff --git a/arch/ppc/platforms/85xx/Kconfig b/arch/ppc/platforms/85xx/Kconfig
index c5bc2821d99..7ddd331a714 100644
--- a/arch/ppc/platforms/85xx/Kconfig
+++ b/arch/ppc/platforms/85xx/Kconfig
@@ -39,7 +39,7 @@ config MPC8560_ADS
config SBC8560
bool "WindRiver PowerQUICC III SBC8560"
help
- This option enables support for the WindRiver PowerQUICC III
+ This option enables support for the WindRiver PowerQUICC III
SBC8560 board.
config STX_GP3
@@ -48,6 +48,26 @@ config STX_GP3
This option enables support for the Silicon Turnkey Express GP3
board.
+config TQM8540
+ bool "TQ Components TQM8540"
+ help
+ This option enablese support for the TQ Components TQM8540 board.
+
+config TQM8541
+ bool "TQ Components TQM8541"
+ help
+ This option enablese support for the TQ Components TQM8541 board.
+
+config TQM8555
+ bool "TQ Components TQM8555"
+ help
+ This option enablese support for the TQ Components TQM8555 board.
+
+config TQM8560
+ bool "TQ Components TQM8560"
+ help
+ This option enablese support for the TQ Components TQM8560 board.
+
endchoice
# It's often necessary to know the specific 85xx processor type.
@@ -55,7 +75,7 @@ endchoice
# don't need to ask more redundant questions.
config MPC8540
bool
- depends on MPC8540_ADS
+ depends on MPC8540_ADS || TQM8540
default y
config MPC8548
@@ -65,12 +85,12 @@ config MPC8548
config MPC8555
bool
- depends on MPC8555_CDS
+ depends on MPC8555_CDS || TQM8541 || TQM8555
default y
config MPC8560
bool
- depends on SBC8560 || MPC8560_ADS || STX_GP3
+ depends on SBC8560 || MPC8560_ADS || STX_GP3 || TQM8560
default y
config 85xx_PCI2
diff --git a/arch/ppc/platforms/85xx/Makefile b/arch/ppc/platforms/85xx/Makefile
index efdf813108f..6c4753c144d 100644
--- a/arch/ppc/platforms/85xx/Makefile
+++ b/arch/ppc/platforms/85xx/Makefile
@@ -7,3 +7,7 @@ obj-$(CONFIG_MPC8555_CDS) += mpc85xx_cds_common.o
obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads_common.o mpc8560_ads.o
obj-$(CONFIG_SBC8560) += sbc85xx.o sbc8560.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
+obj-$(CONFIG_TQM8540) += tqm85xx.o
+obj-$(CONFIG_TQM8541) += tqm85xx.o
+obj-$(CONFIG_TQM8555) += tqm85xx.o
+obj-$(CONFIG_TQM8560) += tqm85xx.o
diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.c b/arch/ppc/platforms/85xx/mpc8540_ads.c
index c5cde97c6ef..2eceb1e6f4e 100644
--- a/arch/ppc/platforms/85xx/mpc8540_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8540_ads.c
@@ -52,10 +52,6 @@
#include <syslib/ppc85xx_setup.h>
-static const char *GFAR_PHY_0 = "phy0:0";
-static const char *GFAR_PHY_1 = "phy0:1";
-static const char *GFAR_PHY_3 = "phy0:3";
-
/* ************************************************************************
*
* Setup the architecture
@@ -102,27 +98,29 @@ mpc8540ads_setup_arch(void)
mdata->irq[2] = -1;
mdata->irq[3] = MPC85xx_IRQ_EXT5;
mdata->irq[31] = -1;
- mdata->paddr += binfo->bi_immr_base;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_0;
+ pdata->bus_id = 0;
+ pdata->phy_id = 0;
memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
}
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_1;
+ pdata->bus_id = 0;
+ pdata->phy_id = 1;
memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
}
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_FEC);
if (pdata) {
pdata->board_flags = 0;
- pdata->bus_id = GFAR_PHY_3;
+ pdata->bus_id = 0;
+ pdata->phy_id = 3;
memcpy(pdata->mac_addr, binfo->bi_enet2addr, 6);
}
diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c
index 8e39a551709..442c7ff195d 100644
--- a/arch/ppc/platforms/85xx/mpc8560_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8560_ads.c
@@ -56,10 +56,6 @@
#include <syslib/ppc85xx_setup.h>
-static const char *GFAR_PHY_0 = "phy0:0";
-static const char *GFAR_PHY_1 = "phy0:1";
-static const char *GFAR_PHY_3 = "phy0:3";
-
/* ************************************************************************
*
* Setup the architecture
@@ -99,20 +95,21 @@ mpc8560ads_setup_arch(void)
mdata->irq[2] = -1;
mdata->irq[3] = MPC85xx_IRQ_EXT5;
mdata->irq[31] = -1;
- mdata->paddr += binfo->bi_immr_base;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_0;
+ pdata->bus_id = 0;
+ pdata->phy_id = 0;
memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
}
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_1;
+ pdata->bus_id = 0;
+ pdata->phy_id = 1;
memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
}
diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
index 5e8cc5ec6ab..b332ebae6bd 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
+++ b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
@@ -351,10 +351,10 @@ mpc85xx_cds_fixup_via(struct pci_controller *hose)
void __init
mpc85xx_cds_pcibios_fixup(void)
{
- struct pci_dev *dev = NULL;
+ struct pci_dev *dev;
u_char c;
- if ((dev = pci_find_device(PCI_VENDOR_ID_VIA,
+ if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_82C586_1, NULL))) {
/*
* U-Boot does not set the enable bits
@@ -371,30 +371,30 @@ mpc85xx_cds_pcibios_fixup(void)
*/
dev->irq = 14;
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ pci_dev_put(dev);
}
/*
* Force legacy USB interrupt routing
*/
- if ((dev = pci_find_device(PCI_VENDOR_ID_VIA,
+ if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_82C586_2, NULL))) {
dev->irq = 10;
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 10);
+ pci_dev_put(dev);
}
- if ((dev = pci_find_device(PCI_VENDOR_ID_VIA,
+ if ((dev = pci_get_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_82C586_2, dev))) {
dev->irq = 11;
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
+ pci_dev_put(dev);
}
}
#endif /* CONFIG_PCI */
TODC_ALLOC();
-static const char *GFAR_PHY_0 = "phy0:0";
-static const char *GFAR_PHY_1 = "phy0:1";
-
/* ************************************************************************
*
* Setup the architecture
@@ -458,34 +458,37 @@ mpc85xx_cds_setup_arch(void)
mdata->irq[2] = -1;
mdata->irq[3] = -1;
mdata->irq[31] = -1;
- mdata->paddr += binfo->bi_immr_base;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_0;
+ pdata->bus_id = 0;
+ pdata->phy_id = 0;
memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
}
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_1;
+ pdata->bus_id = 0;
+ pdata->phy_id = 1;
memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
}
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC1);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_0;
+ pdata->bus_id = 0;
+ pdata->phy_id = 0;
memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
}
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC2);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_1;
+ pdata->bus_id = 0;
+ pdata->phy_id = 1;
memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
}
diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c
index 45a5b81b4ed..e777ba824aa 100644
--- a/arch/ppc/platforms/85xx/sbc8560.c
+++ b/arch/ppc/platforms/85xx/sbc8560.c
@@ -91,9 +91,6 @@ sbc8560_early_serial_map(void)
}
#endif
-static const char *GFAR_PHY_25 = "phy0:25";
-static const char *GFAR_PHY_26 = "phy0:26";
-
/* ************************************************************************
*
* Setup the architecture
@@ -136,20 +133,21 @@ sbc8560_setup_arch(void)
mdata->irq[25] = MPC85xx_IRQ_EXT6;
mdata->irq[26] = MPC85xx_IRQ_EXT7;
mdata->irq[31] = -1;
- mdata->paddr += binfo->bi_immr_base;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_25;
+ pdata->bus_id = 0;
+ pdata->phy_id = 25;
memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
}
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
if (pdata) {
pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
- pdata->bus_id = GFAR_PHY_26;
+ pdata->bus_id = 0;
+ pdata->phy_id = 26;
memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
}
diff --git a/arch/ppc/platforms/85xx/stx_gp3.c b/arch/ppc/platforms/85xx/stx_gp3.c
index 15ce9d07063..061bb7cf2d9 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.c
+++ b/arch/ppc/platforms/85xx/stx_gp3.c
@@ -93,9 +93,6 @@ static u8 gp3_openpic_initsenses[] __initdata = {
0x0, /* External 11: */
};
-static const char *GFAR_PHY_2 = "phy0:2";
-static const char *GFAR_PHY_4 = "phy0:4";
-
/*
* Setup the architecture
*/
@@ -130,20 +127,21 @@ gp3_setup_arch(void)
mdata->irq[2] = MPC85xx_IRQ_EXT5;
mdata->irq[4] = MPC85xx_IRQ_EXT5;
mdata->irq[31] = -1;
- mdata->paddr += binfo->bi_immr_base;
/* setup the board related information for the enet controllers */
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
if (pdata) {
/* pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */
- pdata->bus_id = GFAR_PHY_2;
+ pdata->bus_id = 0;
+ pdata->phy_id = 2;
memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
}
pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
if (pdata) {
/* pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */
- pdata->bus_id = GFAR_PHY_4;
+ pdata->bus_id = 0;
+ pdata->phy_id = 4;
memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
}
diff --git a/arch/ppc/platforms/85xx/tqm85xx.c b/arch/ppc/platforms/85xx/tqm85xx.c
new file mode 100644
index 00000000000..b436f4d0a3f
--- /dev/null
+++ b/arch/ppc/platforms/85xx/tqm85xx.c
@@ -0,0 +1,415 @@
+/*
+ * arch/ppc/platforms/85xx/tqm85xx.c
+ *
+ * TQM85xx (40/41/55/60) board specific routines
+ *
+ * Copyright (c) 2005 DENX Software Engineering
+ * Stefan Roese <sr@denx.de>
+ *
+ * Based on original work by
+ * Kumar Gala <galak@kernel.crashing.org>
+ * Copyright 2004 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/serial.h>
+#include <linux/tty.h> /* for linux/serial_core.h */
+#include <linux/serial_core.h>
+#include <linux/initrd.h>
+#include <linux/module.h>
+#include <linux/fsl_devices.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/atomic.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/open_pic.h>
+#include <asm/bootinfo.h>
+#include <asm/pci-bridge.h>
+#include <asm/mpc85xx.h>
+#include <asm/irq.h>
+#include <asm/immap_85xx.h>
+#include <asm/kgdb.h>
+#include <asm/ppc_sys.h>
+#include <asm/cpm2.h>
+#include <mm/mmu_decl.h>
+
+#include <syslib/ppc85xx_setup.h>
+#include <syslib/cpm2_pic.h>
+#include <syslib/ppc85xx_common.h>
+#include <syslib/ppc85xx_rio.h>
+
+#ifndef CONFIG_PCI
+unsigned long isa_io_base = 0;
+unsigned long isa_mem_base = 0;
+#endif
+
+
+extern unsigned long total_memory; /* in mm/init */
+
+unsigned char __res[sizeof (bd_t)];
+
+/* Internal interrupts are all Level Sensitive, and Positive Polarity */
+static u_char tqm85xx_openpic_initsenses[] __initdata = {
+ MPC85XX_INTERNAL_IRQ_SENSES,
+ 0x0, /* External 0: */
+ 0x0, /* External 1: */
+#if defined(CONFIG_PCI)
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 2: PCI INTA */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 3: PCI INTB */
+#else
+ 0x0, /* External 2: */
+ 0x0, /* External 3: */
+#endif
+ 0x0, /* External 4: */
+ 0x0, /* External 5: */
+ 0x0, /* External 6: */
+ 0x0, /* External 7: */
+ (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 8: PHY */
+ 0x0, /* External 9: */
+ 0x0, /* External 10: */
+ 0x0, /* External 11: */
+};
+
+/* ************************************************************************
+ *
+ * Setup the architecture
+ *
+ */
+static void __init
+tqm85xx_setup_arch(void)
+{
+ bd_t *binfo = (bd_t *) __res;
+ unsigned int freq;
+ struct gianfar_platform_data *pdata;
+ struct gianfar_mdio_data *mdata;
+
+#ifdef CONFIG_MPC8560
+ cpm2_reset();
+#endif
+
+ /* get the core frequency */
+ freq = binfo->bi_intfreq;
+
+ if (ppc_md.progress)
+ ppc_md.progress("tqm85xx_setup_arch()", 0);
+
+ /* Set loops_per_jiffy to a half-way reasonable value,
+ for use until calibrate_delay gets called. */
+ loops_per_jiffy = freq / HZ;
+
+#ifdef CONFIG_PCI
+ /* setup PCI host bridges */
+ mpc85xx_setup_hose();
+#endif
+
+#ifndef CONFIG_MPC8560
+#if defined(CONFIG_SERIAL_8250)
+ mpc85xx_early_serial_map();
+#endif
+
+#ifdef CONFIG_SERIAL_TEXT_DEBUG
+ /* Invalidate the entry we stole earlier the serial ports
+ * should be properly mapped */
+ invalidate_tlbcam_entry(num_tlbcam_entries - 1);
+#endif
+#endif /* CONFIG_MPC8560 */
+
+ /* setup the board related info for the MDIO bus */
+ mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
+
+ mdata->irq[0] = MPC85xx_IRQ_EXT8;
+ mdata->irq[1] = MPC85xx_IRQ_EXT8;
+ mdata->irq[2] = -1;
+ mdata->irq[3] = MPC85xx_IRQ_EXT8;
+ mdata->irq[31] = -1;
+
+ /* setup the board related information for the enet controllers */
+ pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
+ if (pdata) {
+ pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+ pdata->bus_id = 0;
+ pdata->phy_id = 2;
+ memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
+ }
+
+ pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
+ if (pdata) {
+ pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
+ pdata->bus_id = 0;
+ pdata->phy_id = 1;
+ memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
+ }
+
+#ifdef CONFIG_MPC8540
+ pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_FEC);
+ if (pdata) {
+ pdata->board_flags = 0;
+ pdata->bus_id = 0;
+ pdata->phy_id = 3;
+ memcpy(pdata->mac_addr, binfo->bi_enet2addr, 6);
+ }
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start)
+ ROOT_DEV = Root_RAM0;
+ else
+#endif
+#ifdef CONFIG_ROOT_NFS
+ ROOT_DEV = Root_NFS;
+#else
+ ROOT_DEV = Root_HDA1;
+#endif
+}
+
+#ifdef CONFIG_MPC8560
+static irqreturn_t cpm2_cascade(int irq, void *dev_id, struct pt_regs *regs)
+{
+ while ((irq = cpm2_get_irq(regs)) >= 0)
+ __do_IRQ(irq, regs);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction cpm2_irqaction = {
+ .handler = cpm2_cascade,
+ .flags = SA_INTERRUPT,
+ .mask = CPU_MASK_NONE,
+ .name = "cpm2_cascade",
+};
+#endif /* CONFIG_MPC8560 */
+
+void __init
+tqm85xx_init_IRQ(void)
+{
+ bd_t *binfo = (bd_t *) __res;
+
+ /* Determine the Physical Address of the OpenPIC regs */
+ phys_addr_t OpenPIC_PAddr =
+ binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET;
+ OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE);
+ OpenPIC_InitSenses = tqm85xx_openpic_initsenses;
+ OpenPIC_NumInitSenses = sizeof (tqm85xx_openpic_initsenses);
+
+ /* Skip reserved space and internal sources */
+ openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200);
+
+ /* Map PIC IRQs 0-11 */
+ openpic_set_sources(48, 12, OpenPIC_Addr + 0x10000);
+
+ /* we let openpic interrupts starting from an offset, to
+ * leave space for cascading interrupts underneath.
+ */
+ openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET);
+
+#ifdef CONFIG_MPC8560
+ /* Setup CPM2 PIC */
+ cpm2_init_IRQ();
+
+ setup_irq(MPC85xx_IRQ_CPM, &cpm2_irqaction);
+#endif /* CONFIG_MPC8560 */
+
+ return;
+}
+
+int tqm85xx_show_cpuinfo(struct seq_file *m)
+{
+ uint pvid, svid, phid1;
+ uint memsize = total_memory;
+ bd_t *binfo = (bd_t *) __res;
+ unsigned int freq;
+
+ /* get the core frequency */
+ freq = binfo->bi_intfreq;
+
+ pvid = mfspr(SPRN_PVR);
+ svid = mfspr(SPRN_SVR);
+
+ seq_printf(m, "Vendor\t\t: TQ Components\n");
+ seq_printf(m, "Machine\t\t: TQM%s\n", cur_ppc_sys_spec->ppc_sys_name);
+ seq_printf(m, "clock\t\t: %dMHz\n", freq / 1000000);
+ seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
+ seq_printf(m, "SVR\t\t: 0x%x\n", svid);
+
+ /* Display cpu Pll setting */
+ phid1 = mfspr(SPRN_HID1);
+ seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
+
+ /* Display the amount of memory */
+ seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
+
+ return 0;
+}
+
+#if defined(CONFIG_I2C) && defined(CONFIG_SENSORS_DS1337)
+extern ulong ds1337_get_rtc_time(void);
+extern int ds1337_set_rtc_time(unsigned long nowtime);
+
+static int __init
+tqm85xx_rtc_hookup(void)
+{
+ struct timespec tv;
+
+ ppc_md.set_rtc_time = ds1337_set_rtc_time;
+ ppc_md.get_rtc_time = ds1337_get_rtc_time;
+
+ tv.tv_nsec = 0;
+ tv.tv_sec = (ppc_md.get_rtc_time)();
+ do_settimeofday(&tv);
+
+ return 0;
+}
+late_initcall(tqm85xx_rtc_hookup);
+#endif
+
+#ifdef CONFIG_PCI
+/*
+ * interrupt routing
+ */
+int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+{
+ static char pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {
+ {PIRQA, PIRQB, 0, 0},
+ };
+
+ const long min_idsel = 0x1c, max_idsel = 0x1c, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP;
+}
+
+int mpc85xx_exclude_device(u_char bus, u_char devfn)
+{
+ if (bus == 0 && PCI_SLOT(devfn) == 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return PCIBIOS_SUCCESSFUL;
+}
+
+#endif /* CONFIG_PCI */
+
+#ifdef CONFIG_RAPIDIO
+void platform_rio_init(void)
+{
+ /* 512MB RIO LAW at 0xc0000000 */
+ mpc85xx_rio_setup(0xc0000000, 0x20000000);
+}
+#endif /* CONFIG_RAPIDIO */
+
+/* ************************************************************************ */
+void __init
+platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7)
+{
+ /* parse_bootinfo must always be called first */
+ parse_bootinfo(find_bootinfo());
+
+ /*
+ * If we were passed in a board information, copy it into the
+ * residual data area.
+ */
+ if (r3) {
+ memcpy((void *) __res, (void *) (r3 + KERNELBASE),
+ sizeof (bd_t));
+ }
+
+#if defined(CONFIG_SERIAL_TEXT_DEBUG) && !defined(CONFIG_MPC8560)
+ {
+ bd_t *binfo = (bd_t *) __res;
+ struct uart_port p;
+
+ /* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */
+ settlbcam(num_tlbcam_entries - 1, binfo->bi_immr_base,
+ binfo->bi_immr_base, MPC85xx_CCSRBAR_SIZE, _PAGE_IO, 0);
+
+ memset(&p, 0, sizeof (p));
+ p.iotype = SERIAL_IO_MEM;
+ p.membase = (void *) binfo->bi_immr_base + MPC85xx_UART0_OFFSET;
+ p.uartclk = binfo->bi_busfreq;
+
+ gen550_init(0, &p);
+
+ memset(&p, 0, sizeof (p));
+ p.iotype = SERIAL_IO_MEM;
+ p.membase = (void *) binfo->bi_immr_base + MPC85xx_UART1_OFFSET;
+ p.uartclk = binfo->bi_busfreq;
+
+ gen550_init(1, &p);
+ }
+#endif
+
+#if defined(CONFIG_BLK_DEV_INITRD)
+ /*
+ * If the init RAM disk has been configured in, and there's a valid
+ * starting address for it, set it up.
+ */
+ if (r4) {
+ initrd_start = r4 + KERNELBASE;
+ initrd_end = r5 + KERNELBASE;
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+ /* Copy the kernel command line arguments to a safe place. */
+
+ if (r6) {
+ *(char *) (r7 + KERNELBASE) = 0;
+ strcpy(cmd_line, (char *) (r6 + KERNELBASE));
+ }
+
+ identify_ppc_sys_by_id(mfspr(SPRN_SVR));
+
+ /* setup the PowerPC module struct */
+ ppc_md.setup_arch = tqm85xx_setup_arch;
+ ppc_md.show_cpuinfo = tqm85xx_show_cpuinfo;
+
+ ppc_md.init_IRQ = tqm85xx_init_IRQ;
+ ppc_md.get_irq = openpic_get_irq;
+
+ ppc_md.restart = mpc85xx_restart;
+ ppc_md.power_off = mpc85xx_power_off;
+ ppc_md.halt = mpc85xx_halt;
+
+ ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory;
+
+ ppc_md.time_init = NULL;
+ ppc_md.set_rtc_time = NULL;
+ ppc_md.get_rtc_time = NULL;
+ ppc_md.calibrate_decr = mpc85xx_calibrate_decr;
+
+#ifndef CONFIG_MPC8560
+#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
+ ppc_md.progress = gen550_progress;
+#endif /* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */
+#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_KGDB)
+ ppc_md.early_serial_map = mpc85xx_early_serial_map;
+#endif /* CONFIG_SERIAL_8250 && CONFIG_KGDB */
+#endif /* CONFIG_MPC8560 */
+
+ if (ppc_md.progress)
+ ppc_md.progress("tqm85xx_init(): exit", 0);
+
+ return;
+}
diff --git a/arch/ppc/platforms/85xx/tqm85xx.h b/arch/ppc/platforms/85xx/tqm85xx.h
new file mode 100644
index 00000000000..3775eb363fd
--- /dev/null
+++ b/arch/ppc/platforms/85xx/tqm85xx.h
@@ -0,0 +1,56 @@
+/*
+ * arch/ppc/platforms/85xx/tqm85xx.h
+ *
+ * TQM85xx (40/41/55/60) board definitions
+ *
+ * Copyright (c) 2005 DENX Software Engineering
+ * Stefan Roese <sr@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MACH_TQM85XX_H
+#define __MACH_TQM85XX_H
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <asm/ppcboot.h>
+
+#define BOARD_CCSRBAR ((uint)0xe0000000)
+#define CCSRBAR_SIZE ((uint)1024*1024)
+
+#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET)
+
+#define PCI_CFG_ADDR_OFFSET (0x8000)
+#define PCI_CFG_DATA_OFFSET (0x8004)
+
+/* PCI interrupt controller */
+#define PIRQA MPC85xx_IRQ_EXT2
+#define PIRQB MPC85xx_IRQ_EXT3
+
+#define MPC85XX_PCI1_LOWER_IO 0x00000000
+#define MPC85XX_PCI1_UPPER_IO 0x00ffffff
+
+#define MPC85XX_PCI1_LOWER_MEM 0x80000000
+#define MPC85XX_PCI1_UPPER_MEM 0x9fffffff
+
+#define MPC85XX_PCI1_IO_BASE 0xe2000000
+#define MPC85XX_PCI1_MEM_OFFSET 0x00000000
+
+#define MPC85XX_PCI1_IO_SIZE 0x01000000
+
+#define BASE_BAUD 115200
+
+extern void mpc85xx_setup_hose(void) __init;
+extern void mpc85xx_restart(char *cmd);
+extern void mpc85xx_power_off(void);
+extern void mpc85xx_halt(void);
+extern void mpc85xx_init_IRQ(void) __init;
+extern unsigned long mpc85xx_find_end_of_memory(void) __init;
+extern void mpc85xx_calibrate_decr(void) __init;
+
+#endif /* __MACH_TQM85XX_H */
diff --git a/arch/ppc/platforms/apus_setup.c b/arch/ppc/platforms/apus_setup.c
index 2f74fde98eb..c42c50073da 100644
--- a/arch/ppc/platforms/apus_setup.c
+++ b/arch/ppc/platforms/apus_setup.c
@@ -55,9 +55,6 @@ int (*mach_hwclk) (int, struct hwclk_time*) = NULL;
int (*mach_set_clock_mmss) (unsigned long) = NULL;
void (*mach_reset)( void );
long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
-#if defined(CONFIG_AMIGA_FLOPPY)
-void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
-#endif
#ifdef CONFIG_HEARTBEAT
void (*mach_heartbeat) (int) = NULL;
extern void apus_heartbeat (void);
@@ -76,7 +73,6 @@ struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */
struct mem_info ramdisk;
-extern void amiga_floppy_setup(char *, int *);
extern void config_amiga(void);
static int __60nsram = 0;
@@ -305,16 +301,6 @@ void kbd_reset_setup(char *str, int *ints)
{
}
-/*********************************************************** FLOPPY */
-#if defined(CONFIG_AMIGA_FLOPPY)
-__init
-void floppy_setup(char *str, int *ints)
-{
- if (mach_floppy_setup)
- mach_floppy_setup (str, ints);
-}
-#endif
-
/*********************************************************** MEMORY */
#define KMAP_MAX 32
unsigned long kmap_chunks[KMAP_MAX*3];
@@ -574,9 +560,9 @@ static __inline__ void ser_RTSon(void)
int __debug_ser_out( unsigned char c )
{
- custom.serdat = c | 0x100;
+ amiga_custom.serdat = c | 0x100;
mb();
- while (!(custom.serdatr & 0x2000))
+ while (!(amiga_custom.serdatr & 0x2000))
barrier();
return 1;
}
@@ -586,11 +572,11 @@ unsigned char __debug_ser_in( void )
unsigned char c;
/* XXX: is that ok?? derived from amiga_ser.c... */
- while( !(custom.intreqr & IF_RBF) )
+ while( !(amiga_custom.intreqr & IF_RBF) )
barrier();
- c = custom.serdatr;
+ c = amiga_custom.serdatr;
/* clear the interrupt, so that another character can be read */
- custom.intreq = IF_RBF;
+ amiga_custom.intreq = IF_RBF;
return c;
}
@@ -601,10 +587,10 @@ int __debug_serinit( void )
local_irq_save(flags);
/* turn off Rx and Tx interrupts */
- custom.intena = IF_RBF | IF_TBE;
+ amiga_custom.intena = IF_RBF | IF_TBE;
/* clear any pending interrupt */
- custom.intreq = IF_RBF | IF_TBE;
+ amiga_custom.intreq = IF_RBF | IF_TBE;
local_irq_restore(flags);
@@ -617,7 +603,7 @@ int __debug_serinit( void )
#ifdef CONFIG_KGDB
/* turn Rx interrupts on for GDB */
- custom.intena = IF_SETCLR | IF_RBF;
+ amiga_custom.intena = IF_SETCLR | IF_RBF;
ser_RTSon();
#endif
diff --git a/arch/ppc/platforms/chrp_setup.c b/arch/ppc/platforms/chrp_setup.c
index f1b70ab3c6f..056ac2a7b5c 100644
--- a/arch/ppc/platforms/chrp_setup.c
+++ b/arch/ppc/platforms/chrp_setup.c
@@ -404,7 +404,6 @@ static struct irqaction xmon_irqaction = {
void __init chrp_init_IRQ(void)
{
struct device_node *np;
- int i;
unsigned long chrp_int_ack = 0;
unsigned char init_senses[NR_IRQS - NUM_8259_INTERRUPTS];
#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
diff --git a/arch/ppc/platforms/lite5200.c b/arch/ppc/platforms/lite5200.c
index d44cc991179..7ed52dc340c 100644
--- a/arch/ppc/platforms/lite5200.c
+++ b/arch/ppc/platforms/lite5200.c
@@ -196,8 +196,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
mpc52xx_set_bat();
/* No ISA bus by default */
+#ifdef CONFIG_PCI
isa_io_base = 0;
isa_mem_base = 0;
+#endif
/* Powersave */
/* This is provided as an example on how to do it. But you
diff --git a/arch/ppc/platforms/mpc5200.c b/arch/ppc/platforms/mpc5200.c
deleted file mode 100644
index a58db438c16..00000000000
--- a/arch/ppc/platforms/mpc5200.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * arch/ppc/platforms/mpc5200.c
- *
- * OCP Definitions for the boards based on MPC5200 processor. Contains
- * definitions for every common peripherals. (Mostly all but PSCs)
- *
- * Maintainer : Sylvain Munaut <tnt@246tNt.com>
- *
- * Copyright 2004 Sylvain Munaut <tnt@246tNt.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <asm/ocp.h>
-#include <asm/mpc52xx.h>
-
-
-static struct ocp_fs_i2c_data mpc5200_i2c_def = {
- .flags = FS_I2C_CLOCK_5200,
-};
-
-
-/* Here is the core_ocp struct.
- * With all the devices common to all board. Even if port multiplexing is
- * not setup for them (if the user don't want them, just don't select the
- * config option). The potentially conflicting devices (like PSCs) goes in
- * board specific file.
- */
-struct ocp_def core_ocp[] = {
- {
- .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_IIC,
- .index = 0,
- .paddr = MPC52xx_I2C1,
- .irq = OCP_IRQ_NA, /* MPC52xx_IRQ_I2C1 - Buggy */
- .pm = OCP_CPM_NA,
- .additions = &mpc5200_i2c_def,
- },
- {
- .vendor = OCP_VENDOR_FREESCALE,
- .function = OCP_FUNC_IIC,
- .index = 1,
- .paddr = MPC52xx_I2C2,
- .irq = OCP_IRQ_NA, /* MPC52xx_IRQ_I2C2 - Buggy */
- .pm = OCP_CPM_NA,
- .additions = &mpc5200_i2c_def,
- },
- { /* Terminating entry */
- .vendor = OCP_VENDOR_INVALID
- }
-};
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index 4415748071d..d0653580200 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -72,7 +72,6 @@
TODC_ALLOC();
-unsigned char ucSystemType;
unsigned char ucBoardRev;
unsigned char ucBoardRevMaj, ucBoardRevMin;
@@ -954,7 +953,6 @@ prep_calibrate_decr(void)
static void __init
prep_init_IRQ(void)
{
- int i;
unsigned int pci_viddid, pci_did;
if (OpenPIC_Addr != NULL) {
diff --git a/arch/ppc/syslib/Makefile b/arch/ppc/syslib/Makefile
index 5b7f2b80e56..84ef03018d0 100644
--- a/arch/ppc/syslib/Makefile
+++ b/arch/ppc/syslib/Makefile
@@ -96,7 +96,7 @@ ifeq ($(CONFIG_85xx),y)
obj-$(CONFIG_PCI) += pci_auto.o
endif
obj-$(CONFIG_RAPIDIO) += ppc85xx_rio.o
-obj-$(CONFIG_83xx) += ipic.o ppc83xx_setup.o ppc_sys.o \
+obj-$(CONFIG_83xx) += ppc83xx_setup.o ppc_sys.o \
mpc83xx_sys.o mpc83xx_devices.o
ifeq ($(CONFIG_83xx),y)
obj-$(CONFIG_PCI) += pci_auto.o
diff --git a/arch/ppc/syslib/m8xx_setup.c b/arch/ppc/syslib/m8xx_setup.c
index 1cc3abe6fa4..688616de3cd 100644
--- a/arch/ppc/syslib/m8xx_setup.c
+++ b/arch/ppc/syslib/m8xx_setup.c
@@ -135,6 +135,16 @@ static struct irqaction tbint_irqaction = {
.name = "tbint",
};
+/* per-board overridable init_internal_rtc() function. */
+void __init __attribute__ ((weak))
+init_internal_rtc(void)
+{
+ /* Disable the RTC one second and alarm interrupts. */
+ out_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc, in_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc) & ~(RTCSC_SIE | RTCSC_ALE));
+ /* Enable the RTC */
+ out_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc, in_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc) | (RTCSC_RTF | RTCSC_RTE));
+}
+
/* The decrementer counts at the system (internal) clock frequency divided by
* sixteen, or external oscillator divided by four. We force the processor
* to use system clock divided by sixteen.
@@ -183,10 +193,7 @@ void __init m8xx_calibrate_decr(void)
out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_rtcsck, KAPWR_KEY);
out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_tbk, KAPWR_KEY);
- /* Disable the RTC one second and alarm interrupts. */
- out_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc, in_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc) & ~(RTCSC_SIE | RTCSC_ALE));
- /* Enable the RTC */
- out_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc, in_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc) | (RTCSC_RTF | RTCSC_RTE));
+ init_internal_rtc();
/* Enabling the decrementer also enables the timebase interrupts
* (or from the other point of view, to get decrementer interrupts
diff --git a/arch/ppc/syslib/m8xx_wdt.c b/arch/ppc/syslib/m8xx_wdt.c
index a21632d37e5..df6c9557b86 100644
--- a/arch/ppc/syslib/m8xx_wdt.c
+++ b/arch/ppc/syslib/m8xx_wdt.c
@@ -19,6 +19,7 @@
#include <syslib/m8xx_wdt.h>
static int wdt_timeout;
+int m8xx_has_internal_rtc = 0;
static irqreturn_t m8xx_wdt_interrupt(int, void *, struct pt_regs *);
static struct irqaction m8xx_wdt_irqaction = {
@@ -45,35 +46,15 @@ static irqreturn_t m8xx_wdt_interrupt(int irq, void *dev, struct pt_regs *regs)
return IRQ_HANDLED;
}
-void __init m8xx_wdt_handler_install(bd_t * binfo)
+#define SYPCR_SWP 0x1
+#define SYPCR_SWE 0x4
+
+
+void __init m8xx_wdt_install_irq(volatile immap_t *imap, bd_t *binfo)
{
- volatile immap_t *imap = (volatile immap_t *)IMAP_ADDR;
u32 pitc;
- u32 sypcr;
u32 pitrtclk;
- sypcr = in_be32(&imap->im_siu_conf.sc_sypcr);
-
- if (!(sypcr & 0x04)) {
- printk(KERN_NOTICE "m8xx_wdt: wdt disabled (SYPCR: 0x%08X)\n",
- sypcr);
- return;
- }
-
- m8xx_wdt_reset();
-
- printk(KERN_NOTICE
- "m8xx_wdt: active wdt found (SWTC: 0x%04X, SWP: 0x%01X)\n",
- (sypcr >> 16), sypcr & 0x01);
-
- wdt_timeout = (sypcr >> 16) & 0xFFFF;
-
- if (!wdt_timeout)
- wdt_timeout = 0xFFFF;
-
- if (sypcr & 0x01)
- wdt_timeout *= 2048;
-
/*
* Fire trigger if half of the wdt ticked down
*/
@@ -98,6 +79,67 @@ void __init m8xx_wdt_handler_install(bd_t * binfo)
printk(KERN_NOTICE
"m8xx_wdt: keep-alive trigger installed (PITC: 0x%04X)\n", pitc);
+}
+
+static void m8xx_wdt_timer_func(unsigned long data);
+
+static struct timer_list m8xx_wdt_timer =
+ TIMER_INITIALIZER(m8xx_wdt_timer_func, 0, 0);
+
+void m8xx_wdt_stop_timer(void)
+{
+ del_timer(&m8xx_wdt_timer);
+}
+
+void m8xx_wdt_install_timer(void)
+{
+ m8xx_wdt_timer.expires = jiffies + (HZ/2);
+ add_timer(&m8xx_wdt_timer);
+}
+
+static void m8xx_wdt_timer_func(unsigned long data)
+{
+ m8xx_wdt_reset();
+ m8xx_wdt_install_timer();
+}
+
+void __init m8xx_wdt_handler_install(bd_t * binfo)
+{
+ volatile immap_t *imap = (volatile immap_t *)IMAP_ADDR;
+ u32 sypcr;
+
+ sypcr = in_be32(&imap->im_siu_conf.sc_sypcr);
+
+ if (!(sypcr & SYPCR_SWE)) {
+ printk(KERN_NOTICE "m8xx_wdt: wdt disabled (SYPCR: 0x%08X)\n",
+ sypcr);
+ return;
+ }
+
+ m8xx_wdt_reset();
+
+ printk(KERN_NOTICE
+ "m8xx_wdt: active wdt found (SWTC: 0x%04X, SWP: 0x%01X)\n",
+ (sypcr >> 16), sypcr & SYPCR_SWP);
+
+ wdt_timeout = (sypcr >> 16) & 0xFFFF;
+
+ if (!wdt_timeout)
+ wdt_timeout = 0xFFFF;
+
+ if (sypcr & SYPCR_SWP)
+ wdt_timeout *= 2048;
+
+ m8xx_has_internal_rtc = in_be16(&imap->im_sit.sit_rtcsc) & RTCSC_RTE;
+
+ /* if the internal RTC is off use a kernel timer */
+ if (!m8xx_has_internal_rtc) {
+ if (wdt_timeout < (binfo->bi_intfreq/HZ))
+ printk(KERN_ERR "m8xx_wdt: timeout too short for ktimer!\n");
+ m8xx_wdt_install_timer();
+ } else
+ m8xx_wdt_install_irq(imap, binfo);
+
wdt_timeout /= binfo->bi_intfreq;
}
diff --git a/arch/ppc/syslib/m8xx_wdt.h b/arch/ppc/syslib/m8xx_wdt.h
index 0d81a9f8155..e75835f0012 100644
--- a/arch/ppc/syslib/m8xx_wdt.h
+++ b/arch/ppc/syslib/m8xx_wdt.h
@@ -9,8 +9,12 @@
#ifndef _PPC_SYSLIB_M8XX_WDT_H
#define _PPC_SYSLIB_M8XX_WDT_H
+extern int m8xx_has_internal_rtc;
+
extern void m8xx_wdt_handler_install(bd_t * binfo);
extern int m8xx_wdt_get_timeout(void);
extern void m8xx_wdt_reset(void);
+extern void m8xx_wdt_install_timer(void);
+extern void m8xx_wdt_stop_timer(void);
#endif /* _PPC_SYSLIB_M8XX_WDT_H */
diff --git a/arch/ppc/syslib/mpc52xx_pci.c b/arch/ppc/syslib/mpc52xx_pci.c
index 4ac19080eb8..313c96ec7eb 100644
--- a/arch/ppc/syslib/mpc52xx_pci.c
+++ b/arch/ppc/syslib/mpc52xx_pci.c
@@ -24,6 +24,12 @@
#include <asm/machdep.h>
+/* This macro is defined to activate the workaround for the bug
+ 435 of the MPC5200 (L25R). With it activated, we don't do any
+ 32 bits configuration access during type-1 cycles */
+#define MPC5200_BUG_435_WORKAROUND
+
+
static int
mpc52xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
@@ -40,17 +46,39 @@ mpc52xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
((bus->number - hose->bus_offset) << 16) |
(devfn << 8) |
(offset & 0xfc));
+ mb();
+
+#ifdef MPC5200_BUG_435_WORKAROUND
+ if (bus->number != hose->bus_offset) {
+ switch (len) {
+ case 1:
+ value = in_8(((u8 __iomem *)hose->cfg_data) + (offset & 3));
+ break;
+ case 2:
+ value = in_le16(((u16 __iomem *)hose->cfg_data) + ((offset>>1) & 1));
+ break;
+
+ default:
+ value = in_le16((u16 __iomem *)hose->cfg_data) |
+ (in_le16(((u16 __iomem *)hose->cfg_data) + 1) << 16);
+ break;
+ }
+ }
+ else
+#endif
+ {
+ value = in_le32(hose->cfg_data);
- value = in_le32(hose->cfg_data);
-
- if (len != 4) {
- value >>= ((offset & 0x3) << 3);
- value &= 0xffffffff >> (32 - (len << 3));
+ if (len != 4) {
+ value >>= ((offset & 0x3) << 3);
+ value &= 0xffffffff >> (32 - (len << 3));
+ }
}
*val = value;
out_be32(hose->cfg_addr, 0);
+ mb();
return PCIBIOS_SUCCESSFUL;
}
@@ -71,21 +99,48 @@ mpc52xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
((bus->number - hose->bus_offset) << 16) |
(devfn << 8) |
(offset & 0xfc));
+ mb();
+
+#ifdef MPC5200_BUG_435_WORKAROUND
+ if (bus->number != hose->bus_offset) {
+ switch (len) {
+ case 1:
+ out_8(((u8 __iomem *)hose->cfg_data) +
+ (offset & 3), val);
+ break;
+ case 2:
+ out_le16(((u16 __iomem *)hose->cfg_data) +
+ ((offset>>1) & 1), val);
+ break;
+
+ default:
+ out_le16((u16 __iomem *)hose->cfg_data,
+ (u16)val);
+ out_le16(((u16 __iomem *)hose->cfg_data) + 1,
+ (u16)(val>>16));
+ break;
+ }
+ }
+ else
+#endif
+ {
+ if (len != 4) {
+ value = in_le32(hose->cfg_data);
- if (len != 4) {
- value = in_le32(hose->cfg_data);
+ offset = (offset & 0x3) << 3;
+ mask = (0xffffffff >> (32 - (len << 3)));
+ mask <<= offset;
- offset = (offset & 0x3) << 3;
- mask = (0xffffffff >> (32 - (len << 3)));
- mask <<= offset;
+ value &= ~mask;
+ val = value | ((val << offset) & mask);
+ }
- value &= ~mask;
- val = value | ((val << offset) & mask);
+ out_le32(hose->cfg_data, val);
}
-
- out_le32(hose->cfg_data, val);
+ mb();
out_be32(hose->cfg_addr, 0);
+ mb();
return PCIBIOS_SUCCESSFUL;
}
@@ -99,9 +154,12 @@ static struct pci_ops mpc52xx_pci_ops = {
static void __init
mpc52xx_pci_setup(struct mpc52xx_pci __iomem *pci_regs)
{
+ u32 tmp;
/* Setup control regs */
- /* Nothing to do afaik */
+ tmp = in_be32(&pci_regs->scr);
+ tmp |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
+ out_be32(&pci_regs->scr, tmp);
/* Setup windows */
out_be32(&pci_regs->iw0btar, MPC52xx_PCI_IWBTAR_TRANSLATION(
@@ -142,16 +200,15 @@ mpc52xx_pci_setup(struct mpc52xx_pci __iomem *pci_regs)
/* Not necessary and can be a bad thing if for example the bootloader
is displaying a splash screen or ... Just left here for
documentation purpose if anyone need it */
-#if 0
- u32 tmp;
tmp = in_be32(&pci_regs->gscr);
+#if 0
out_be32(&pci_regs->gscr, tmp | MPC52xx_PCI_GSCR_PR);
udelay(50);
- out_be32(&pci_regs->gscr, tmp);
#endif
+ out_be32(&pci_regs->gscr, tmp & ~MPC52xx_PCI_GSCR_PR);
}
-static void __init
+static void
mpc52xx_pci_fixup_resources(struct pci_dev *dev)
{
int i;
diff --git a/arch/ppc/syslib/mpc52xx_setup.c b/arch/ppc/syslib/mpc52xx_setup.c
index bb2374585a7..a4a4b02227d 100644
--- a/arch/ppc/syslib/mpc52xx_setup.c
+++ b/arch/ppc/syslib/mpc52xx_setup.c
@@ -84,9 +84,11 @@ mpc52xx_set_bat(void)
void __init
mpc52xx_map_io(void)
{
- /* Here we only map the MBAR */
+ /* Here we map the MBAR and the whole upper zone. MBAR is only
+ 64k but we can't map only 64k with BATs. Map the whole
+ 0xf0000000 range is ok and helps eventual lpb devices placed there */
io_block_mapping(
- MPC52xx_MBAR_VIRT, MPC52xx_MBAR, MPC52xx_MBAR_SIZE, _PAGE_IO);
+ MPC52xx_MBAR_VIRT, MPC52xx_MBAR, 0x10000000, _PAGE_IO);
}
diff --git a/arch/ppc/syslib/mpc83xx_devices.c b/arch/ppc/syslib/mpc83xx_devices.c
index 847df440998..f9b95de70e2 100644
--- a/arch/ppc/syslib/mpc83xx_devices.c
+++ b/arch/ppc/syslib/mpc83xx_devices.c
@@ -28,7 +28,6 @@
*/
struct gianfar_mdio_data mpc83xx_mdio_pdata = {
- .paddr = 0x24520,
};
static struct gianfar_platform_data mpc83xx_tsec1_pdata = {
@@ -226,7 +225,14 @@ struct platform_device ppc_sys_platform_devices[] = {
.name = "fsl-gianfar_mdio",
.id = 0,
.dev.platform_data = &mpc83xx_mdio_pdata,
- .num_resources = 0,
+ .num_resources = 1,
+ .resource = (struct resource[]) {
+ {
+ .start = 0x24520,
+ .end = 0x2453f,
+ .flags = IORESOURCE_MEM,
+ },
+ },
},
};
diff --git a/arch/ppc/syslib/mpc85xx_devices.c b/arch/ppc/syslib/mpc85xx_devices.c
index 69949d25565..00e9b6ff2f6 100644
--- a/arch/ppc/syslib/mpc85xx_devices.c
+++ b/arch/ppc/syslib/mpc85xx_devices.c
@@ -26,7 +26,6 @@
* what CCSRBAR is, will get fixed up by mach_mpc85xx_fixup
*/
struct gianfar_mdio_data mpc85xx_mdio_pdata = {
- .paddr = MPC85xx_MIIM_OFFSET,
};
static struct gianfar_platform_data mpc85xx_tsec1_pdata = {
@@ -720,7 +719,14 @@ struct platform_device ppc_sys_platform_devices[] = {
.name = "fsl-gianfar_mdio",
.id = 0,
.dev.platform_data = &mpc85xx_mdio_pdata,
- .num_resources = 0,
+ .num_resources = 1,
+ .resource = (struct resource[]) {
+ {
+ .start = 0x24520,
+ .end = 0x2453f,
+ .flags = IORESOURCE_MEM,
+ },
+ },
},
};
diff --git a/arch/ppc/xmon/xmon.c b/arch/ppc/xmon/xmon.c
index 2b483b4f160..9075a7538e2 100644
--- a/arch/ppc/xmon/xmon.c
+++ b/arch/ppc/xmon/xmon.c
@@ -99,7 +99,7 @@ static void remove_bpts(void);
static void insert_bpts(void);
static struct bpt *at_breakpoint(unsigned pc);
static void bpt_cmds(void);
-static void cacheflush(void);
+void cacheflush(void);
#ifdef CONFIG_SMP
static void cpu_cmd(void);
#endif /* CONFIG_SMP */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 477ac2758bd..b66602ad7b3 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -23,35 +23,22 @@ config GENERIC_BUST_SPINLOCK
mainmenu "Linux Kernel Configuration"
-config ARCH_S390
+config S390
bool
default y
-config UID16
- bool
- default y
- depends on ARCH_S390X = 'n'
-
source "init/Kconfig"
menu "Base setup"
comment "Processor type and features"
-config ARCH_S390X
+config 64BIT
bool "64 bit kernel"
help
Select this option if you have a 64 bit IBM zSeries machine
and want to use the 64 bit addressing mode.
-config 64BIT
- def_bool ARCH_S390X
-
-config ARCH_S390_31
- bool
- depends on ARCH_S390X = 'n'
- default y
-
config SMP
bool "Symmetric multi-processing support"
---help---
@@ -101,20 +88,15 @@ config MATHEMU
on older S/390 machines. Say Y unless you know your machine doesn't
need this.
-config S390_SUPPORT
+config COMPAT
bool "Kernel support for 31 bit emulation"
- depends on ARCH_S390X
+ depends on 64BIT
help
Select this option if you want to enable your system kernel to
handle system-calls from ELF binaries for 31 bit ESA. This option
(and some other stuff like libraries and such) is needed for
executing 31 bit applications. It is safe to say "Y".
-config COMPAT
- bool
- depends on S390_SUPPORT
- default y
-
config SYSVIPC_COMPAT
bool
depends on COMPAT && SYSVIPC
@@ -122,7 +104,7 @@ config SYSVIPC_COMPAT
config BINFMT_ELF32
tristate "Kernel support for 31 bit ELF binaries"
- depends on S390_SUPPORT
+ depends on COMPAT
help
This allows you to run 32-bit Linux/ELF binaries on your zSeries
in 64 bit mode. Everybody wants this; say Y.
@@ -135,7 +117,7 @@ choice
config MARCH_G5
bool "S/390 model G5 and G6"
- depends on ARCH_S390_31
+ depends on !64BIT
help
Select this to build a 31 bit kernel that works
on all S/390 and zSeries machines.
@@ -240,8 +222,8 @@ config MACHCHK_WARNING
config QDIO
tristate "QDIO support"
---help---
- This driver provides the Queued Direct I/O base support for the
- IBM S/390 (G5 and G6) and eServer zSeries (z800, z890, z900 and z990).
+ This driver provides the Queued Direct I/O base support for
+ IBM mainframes.
For details please refer to the documentation provided by IBM at
<http://www10.software.ibm.com/developerworks/opensource/linux390>
@@ -263,7 +245,8 @@ config QDIO_DEBUG
bool "Extended debugging information"
depends on QDIO
help
- Say Y here to get extended debugging output in /proc/s390dbf/qdio...
+ Say Y here to get extended debugging output in
+ /sys/kernel/debug/s390dbf/qdio...
Warning: this option reduces the performance of the QDIO module.
If unsure, say N.
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 73a09a6ee6c..6c6b197898d 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -13,16 +13,14 @@
# Copyright (C) 1994 by Linus Torvalds
#
-ifdef CONFIG_ARCH_S390_31
+ifndef CONFIG_64BIT
LDFLAGS := -m elf_s390
CFLAGS += -m31
AFLAGS += -m31
UTS_MACHINE := s390
STACK_SIZE := 8192
CHECKFLAGS += -D__s390__
-endif
-
-ifdef CONFIG_ARCH_S390X
+else
LDFLAGS := -m elf64_s390
MODFLAGS += -fpic -D__PIC__
CFLAGS += -m64
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index dee6ab54984..d06a8d71c71 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -40,7 +40,7 @@
#define TOD_MICRO 0x01000 /* nr. of TOD clock units
for 1 microsecond */
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
@@ -54,13 +54,13 @@
#define APPLDATA_GEN_EVENT_RECORD 0x82
#define APPLDATA_START_CONFIG_REC 0x83
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
/*
* Parameter list for DIAGNOSE X'DC'
*/
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
struct appldata_parameter_list {
u16 diag; /* The DIAGNOSE code X'00DC' */
u8 function; /* The function code for the DIAGNOSE */
@@ -82,7 +82,7 @@ struct appldata_parameter_list {
u64 product_id_addr;
u64 buffer_addr;
};
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
/*
* /proc entries (sysctl)
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index e0a476bf4fd..99ddd3bf2fb 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -141,19 +141,19 @@ static void appldata_get_os_data(void *data)
j = 0;
for_each_online_cpu(i) {
os_data->os_cpu[j].per_cpu_user =
- kstat_cpu(i).cpustat.user;
+ cputime_to_jiffies(kstat_cpu(i).cpustat.user);
os_data->os_cpu[j].per_cpu_nice =
- kstat_cpu(i).cpustat.nice;
+ cputime_to_jiffies(kstat_cpu(i).cpustat.nice);
os_data->os_cpu[j].per_cpu_system =
- kstat_cpu(i).cpustat.system;
+ cputime_to_jiffies(kstat_cpu(i).cpustat.system);
os_data->os_cpu[j].per_cpu_idle =
- kstat_cpu(i).cpustat.idle;
+ cputime_to_jiffies(kstat_cpu(i).cpustat.idle);
os_data->os_cpu[j].per_cpu_irq =
- kstat_cpu(i).cpustat.irq;
+ cputime_to_jiffies(kstat_cpu(i).cpustat.irq);
os_data->os_cpu[j].per_cpu_softirq =
- kstat_cpu(i).cpustat.softirq;
+ cputime_to_jiffies(kstat_cpu(i).cpustat.softirq);
os_data->os_cpu[j].per_cpu_iowait =
- kstat_cpu(i).cpustat.iowait;
+ cputime_to_jiffies(kstat_cpu(i).cpustat.iowait);
j++;
}
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 96a05e6b51e..bfe2541dc5c 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -2,7 +2,9 @@
# Cryptographic API
#
-obj-$(CONFIG_CRYPTO_SHA1_Z990) += sha1_z990.o
-obj-$(CONFIG_CRYPTO_DES_Z990) += des_z990.o des_check_key.o
+obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o
+obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o
+obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o
+obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
-obj-$(CONFIG_CRYPTO_TEST) += crypt_z990_query.o
+obj-$(CONFIG_CRYPTO_TEST) += crypt_s390_query.o
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
new file mode 100644
index 00000000000..7a1033d8e00
--- /dev/null
+++ b/arch/s390/crypto/aes_s390.c
@@ -0,0 +1,248 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 implementation of the AES Cipher Algorithm.
+ *
+ * s390 Version:
+ * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation
+ * Author(s): Jan Glauber (jang@de.ibm.com)
+ *
+ * Derived from "crypto/aes.c"
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/crypto.h>
+#include "crypt_s390.h"
+
+#define AES_MIN_KEY_SIZE 16
+#define AES_MAX_KEY_SIZE 32
+
+/* data block size for all key lengths */
+#define AES_BLOCK_SIZE 16
+
+int has_aes_128 = 0;
+int has_aes_192 = 0;
+int has_aes_256 = 0;
+
+struct s390_aes_ctx {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[AES_MAX_KEY_SIZE];
+ int key_len;
+};
+
+static int aes_set_key(void *ctx, const u8 *in_key, unsigned int key_len,
+ u32 *flags)
+{
+ struct s390_aes_ctx *sctx = ctx;
+
+ switch (key_len) {
+ case 16:
+ if (!has_aes_128)
+ goto fail;
+ break;
+ case 24:
+ if (!has_aes_192)
+ goto fail;
+
+ break;
+ case 32:
+ if (!has_aes_256)
+ goto fail;
+ break;
+ default:
+ /* invalid key length */
+ goto fail;
+ break;
+ }
+
+ sctx->key_len = key_len;
+ memcpy(sctx->key, in_key, key_len);
+ return 0;
+fail:
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+}
+
+static void aes_encrypt(void *ctx, u8 *out, const u8 *in)
+{
+ const struct s390_aes_ctx *sctx = ctx;
+
+ switch (sctx->key_len) {
+ case 16:
+ crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ case 24:
+ crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ case 32:
+ crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ }
+}
+
+static void aes_decrypt(void *ctx, u8 *out, const u8 *in)
+{
+ const struct s390_aes_ctx *sctx = ctx;
+
+ switch (sctx->key_len) {
+ case 16:
+ crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ case 24:
+ crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ case 32:
+ crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
+ AES_BLOCK_SIZE);
+ break;
+ }
+}
+
+static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
+ const u8 *in, unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
+
+ switch (sctx->key_len) {
+ case 16:
+ crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, nbytes);
+ break;
+ case 24:
+ crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, nbytes);
+ break;
+ case 32:
+ crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, nbytes);
+ break;
+ }
+ return nbytes & ~(AES_BLOCK_SIZE - 1);
+}
+
+static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
+ const u8 *in, unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
+
+ switch (sctx->key_len) {
+ case 16:
+ crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, nbytes);
+ break;
+ case 24:
+ crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, nbytes);
+ break;
+ case 32:
+ crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, nbytes);
+ break;
+ }
+ return nbytes & ~(AES_BLOCK_SIZE - 1);
+}
+
+static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
+ const u8 *in, unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
+
+ memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE);
+ switch (sctx->key_len) {
+ case 16:
+ crypt_s390_kmc(KMC_AES_128_ENCRYPT, &sctx->iv, out, in, nbytes);
+ break;
+ case 24:
+ crypt_s390_kmc(KMC_AES_192_ENCRYPT, &sctx->iv, out, in, nbytes);
+ break;
+ case 32:
+ crypt_s390_kmc(KMC_AES_256_ENCRYPT, &sctx->iv, out, in, nbytes);
+ break;
+ }
+ memcpy(desc->info, &sctx->iv, AES_BLOCK_SIZE);
+
+ return nbytes & ~(AES_BLOCK_SIZE - 1);
+}
+
+static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
+ const u8 *in, unsigned int nbytes)
+{
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
+
+ memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE);
+ switch (sctx->key_len) {
+ case 16:
+ crypt_s390_kmc(KMC_AES_128_DECRYPT, &sctx->iv, out, in, nbytes);
+ break;
+ case 24:
+ crypt_s390_kmc(KMC_AES_192_DECRYPT, &sctx->iv, out, in, nbytes);
+ break;
+ case 32:
+ crypt_s390_kmc(KMC_AES_256_DECRYPT, &sctx->iv, out, in, nbytes);
+ break;
+ }
+ return nbytes & ~(AES_BLOCK_SIZE - 1);
+}
+
+
+static struct crypto_alg aes_alg = {
+ .cra_name = "aes",
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = aes_set_key,
+ .cia_encrypt = aes_encrypt,
+ .cia_decrypt = aes_decrypt,
+ .cia_encrypt_ecb = aes_encrypt_ecb,
+ .cia_decrypt_ecb = aes_decrypt_ecb,
+ .cia_encrypt_cbc = aes_encrypt_cbc,
+ .cia_decrypt_cbc = aes_decrypt_cbc,
+ }
+ }
+};
+
+static int __init aes_init(void)
+{
+ int ret;
+
+ if (crypt_s390_func_available(KM_AES_128_ENCRYPT))
+ has_aes_128 = 1;
+ if (crypt_s390_func_available(KM_AES_192_ENCRYPT))
+ has_aes_192 = 1;
+ if (crypt_s390_func_available(KM_AES_256_ENCRYPT))
+ has_aes_256 = 1;
+
+ if (!has_aes_128 && !has_aes_192 && !has_aes_256)
+ return -ENOSYS;
+
+ ret = crypto_register_alg(&aes_alg);
+ if (ret != 0)
+ printk(KERN_INFO "crypt_s390: aes_s390 couldn't be loaded.\n");
+ return ret;
+}
+
+static void __exit aes_fini(void)
+{
+ crypto_unregister_alg(&aes_alg);
+}
+
+module_init(aes_init);
+module_exit(aes_fini);
+
+MODULE_ALIAS("aes");
+
+MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
+MODULE_LICENSE("GPL");
+
diff --git a/arch/s390/crypto/crypt_z990.h b/arch/s390/crypto/crypt_s390.h
index 4df660b99e5..d1c259a7fe3 100644
--- a/arch/s390/crypto/crypt_z990.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -1,7 +1,7 @@
/*
* Cryptographic API.
*
- * Support for z990 cryptographic instructions.
+ * Support for s390 cryptographic instructions.
*
* Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation
* Author(s): Thomas Spatzier (tspat@de.ibm.com)
@@ -12,84 +12,108 @@
* any later version.
*
*/
-#ifndef _CRYPTO_ARCH_S390_CRYPT_Z990_H
-#define _CRYPTO_ARCH_S390_CRYPT_Z990_H
+#ifndef _CRYPTO_ARCH_S390_CRYPT_S390_H
+#define _CRYPTO_ARCH_S390_CRYPT_S390_H
#include <asm/errno.h>
-#define CRYPT_Z990_OP_MASK 0xFF00
-#define CRYPT_Z990_FUNC_MASK 0x00FF
+#define CRYPT_S390_OP_MASK 0xFF00
+#define CRYPT_S390_FUNC_MASK 0x00FF
-
-/*z990 cryptographic operations*/
-enum crypt_z990_operations {
- CRYPT_Z990_KM = 0x0100,
- CRYPT_Z990_KMC = 0x0200,
- CRYPT_Z990_KIMD = 0x0300,
- CRYPT_Z990_KLMD = 0x0400,
- CRYPT_Z990_KMAC = 0x0500
+/* s930 cryptographic operations */
+enum crypt_s390_operations {
+ CRYPT_S390_KM = 0x0100,
+ CRYPT_S390_KMC = 0x0200,
+ CRYPT_S390_KIMD = 0x0300,
+ CRYPT_S390_KLMD = 0x0400,
+ CRYPT_S390_KMAC = 0x0500
};
-/*function codes for KM (CIPHER MESSAGE) instruction*/
-enum crypt_z990_km_func {
- KM_QUERY = CRYPT_Z990_KM | 0,
- KM_DEA_ENCRYPT = CRYPT_Z990_KM | 1,
- KM_DEA_DECRYPT = CRYPT_Z990_KM | 1 | 0x80, //modifier bit->decipher
- KM_TDEA_128_ENCRYPT = CRYPT_Z990_KM | 2,
- KM_TDEA_128_DECRYPT = CRYPT_Z990_KM | 2 | 0x80,
- KM_TDEA_192_ENCRYPT = CRYPT_Z990_KM | 3,
- KM_TDEA_192_DECRYPT = CRYPT_Z990_KM | 3 | 0x80,
+/* function codes for KM (CIPHER MESSAGE) instruction
+ * 0x80 is the decipher modifier bit
+ */
+enum crypt_s390_km_func {
+ KM_QUERY = CRYPT_S390_KM | 0x0,
+ KM_DEA_ENCRYPT = CRYPT_S390_KM | 0x1,
+ KM_DEA_DECRYPT = CRYPT_S390_KM | 0x1 | 0x80,
+ KM_TDEA_128_ENCRYPT = CRYPT_S390_KM | 0x2,
+ KM_TDEA_128_DECRYPT = CRYPT_S390_KM | 0x2 | 0x80,
+ KM_TDEA_192_ENCRYPT = CRYPT_S390_KM | 0x3,
+ KM_TDEA_192_DECRYPT = CRYPT_S390_KM | 0x3 | 0x80,
+ KM_AES_128_ENCRYPT = CRYPT_S390_KM | 0x12,
+ KM_AES_128_DECRYPT = CRYPT_S390_KM | 0x12 | 0x80,
+ KM_AES_192_ENCRYPT = CRYPT_S390_KM | 0x13,
+ KM_AES_192_DECRYPT = CRYPT_S390_KM | 0x13 | 0x80,
+ KM_AES_256_ENCRYPT = CRYPT_S390_KM | 0x14,
+ KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80,
};
-/*function codes for KMC (CIPHER MESSAGE WITH CHAINING) instruction*/
-enum crypt_z990_kmc_func {
- KMC_QUERY = CRYPT_Z990_KMC | 0,
- KMC_DEA_ENCRYPT = CRYPT_Z990_KMC | 1,
- KMC_DEA_DECRYPT = CRYPT_Z990_KMC | 1 | 0x80, //modifier bit->decipher
- KMC_TDEA_128_ENCRYPT = CRYPT_Z990_KMC | 2,
- KMC_TDEA_128_DECRYPT = CRYPT_Z990_KMC | 2 | 0x80,
- KMC_TDEA_192_ENCRYPT = CRYPT_Z990_KMC | 3,
- KMC_TDEA_192_DECRYPT = CRYPT_Z990_KMC | 3 | 0x80,
+/* function codes for KMC (CIPHER MESSAGE WITH CHAINING)
+ * instruction
+ */
+enum crypt_s390_kmc_func {
+ KMC_QUERY = CRYPT_S390_KMC | 0x0,
+ KMC_DEA_ENCRYPT = CRYPT_S390_KMC | 0x1,
+ KMC_DEA_DECRYPT = CRYPT_S390_KMC | 0x1 | 0x80,
+ KMC_TDEA_128_ENCRYPT = CRYPT_S390_KMC | 0x2,
+ KMC_TDEA_128_DECRYPT = CRYPT_S390_KMC | 0x2 | 0x80,
+ KMC_TDEA_192_ENCRYPT = CRYPT_S390_KMC | 0x3,
+ KMC_TDEA_192_DECRYPT = CRYPT_S390_KMC | 0x3 | 0x80,
+ KMC_AES_128_ENCRYPT = CRYPT_S390_KMC | 0x12,
+ KMC_AES_128_DECRYPT = CRYPT_S390_KMC | 0x12 | 0x80,
+ KMC_AES_192_ENCRYPT = CRYPT_S390_KMC | 0x13,
+ KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80,
+ KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14,
+ KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80,
};
-/*function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) instruction*/
-enum crypt_z990_kimd_func {
- KIMD_QUERY = CRYPT_Z990_KIMD | 0,
- KIMD_SHA_1 = CRYPT_Z990_KIMD | 1,
+/* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
+ * instruction
+ */
+enum crypt_s390_kimd_func {
+ KIMD_QUERY = CRYPT_S390_KIMD | 0,
+ KIMD_SHA_1 = CRYPT_S390_KIMD | 1,
+ KIMD_SHA_256 = CRYPT_S390_KIMD | 2,
};
-/*function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) instruction*/
-enum crypt_z990_klmd_func {
- KLMD_QUERY = CRYPT_Z990_KLMD | 0,
- KLMD_SHA_1 = CRYPT_Z990_KLMD | 1,
+/* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST)
+ * instruction
+ */
+enum crypt_s390_klmd_func {
+ KLMD_QUERY = CRYPT_S390_KLMD | 0,
+ KLMD_SHA_1 = CRYPT_S390_KLMD | 1,
+ KLMD_SHA_256 = CRYPT_S390_KLMD | 2,
};
-/*function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) instruction*/
-enum crypt_z990_kmac_func {
- KMAC_QUERY = CRYPT_Z990_KMAC | 0,
- KMAC_DEA = CRYPT_Z990_KMAC | 1,
- KMAC_TDEA_128 = CRYPT_Z990_KMAC | 2,
- KMAC_TDEA_192 = CRYPT_Z990_KMAC | 3
+/* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+ * instruction
+ */
+enum crypt_s390_kmac_func {
+ KMAC_QUERY = CRYPT_S390_KMAC | 0,
+ KMAC_DEA = CRYPT_S390_KMAC | 1,
+ KMAC_TDEA_128 = CRYPT_S390_KMAC | 2,
+ KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
};
-/*status word for z990 crypto instructions' QUERY functions*/
-struct crypt_z990_query_status {
+/* status word for s390 crypto instructions' QUERY functions */
+struct crypt_s390_query_status {
u64 high;
u64 low;
};
/*
- * Standard fixup and ex_table sections for crypt_z990 inline functions.
- * label 0: the z990 crypto operation
- * label 1: just after 1 to catch illegal operation exception on non-z990
+ * Standard fixup and ex_table sections for crypt_s390 inline functions.
+ * label 0: the s390 crypto operation
+ * label 1: just after 1 to catch illegal operation exception
+ * (unsupported model)
* label 6: the return point after fixup
* label 7: set error value if exception _in_ crypto operation
* label 8: set error value if illegal operation exception
* [ret] is the variable to receive the error code
* [ERR] is the error code value
*/
-#ifndef __s390x__
-#define __crypt_z990_fixup \
+#ifndef CONFIG_64BIT
+#define __crypt_s390_fixup \
".section .fixup,\"ax\" \n" \
"7: lhi %0,%h[e1] \n" \
" bras 1,9f \n" \
@@ -105,8 +129,8 @@ struct crypt_z990_query_status {
" .long 0b,7b \n" \
" .long 1b,8b \n" \
".previous"
-#else /* __s390x__ */
-#define __crypt_z990_fixup \
+#else /* CONFIG_64BIT */
+#define __crypt_s390_fixup \
".section .fixup,\"ax\" \n" \
"7: lhi %0,%h[e1] \n" \
" jg 6b \n" \
@@ -118,25 +142,25 @@ struct crypt_z990_query_status {
" .quad 0b,7b \n" \
" .quad 1b,8b \n" \
".previous"
-#endif /* __s390x__ */
+#endif /* CONFIG_64BIT */
/*
- * Standard code for setting the result of z990 crypto instructions.
+ * Standard code for setting the result of s390 crypto instructions.
* %0: the register which will receive the result
* [result]: the register containing the result (e.g. second operand length
* to compute number of processed bytes].
*/
-#ifndef __s390x__
-#define __crypt_z990_set_result \
+#ifndef CONFIG_64BIT
+#define __crypt_s390_set_result \
" lr %0,%[result] \n"
-#else /* __s390x__ */
-#define __crypt_z990_set_result \
+#else /* CONFIG_64BIT */
+#define __crypt_s390_set_result \
" lgr %0,%[result] \n"
#endif
/*
- * Executes the KM (CIPHER MESSAGE) operation of the z990 CPU.
- * @param func: the function code passed to KM; see crypt_z990_km_func
+ * Executes the KM (CIPHER MESSAGE) operation of the CPU.
+ * @param func: the function code passed to KM; see crypt_s390_km_func
* @param param: address of parameter block; see POP for details on each func
* @param dest: address of destination memory area
* @param src: address of source memory area
@@ -145,9 +169,9 @@ struct crypt_z990_query_status {
* for encryption/decryption funcs
*/
static inline int
-crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len)
+crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len)
{
- register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
register u8* __dest asm("4") = dest;
register const u8* __src asm("2") = src;
@@ -156,26 +180,26 @@ crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len)
ret = 0;
__asm__ __volatile__ (
- "0: .insn rre,0xB92E0000,%1,%2 \n" //KM opcode
- "1: brc 1,0b \n" //handle partial completion
- __crypt_z990_set_result
+ "0: .insn rre,0xB92E0000,%1,%2 \n" /* KM opcode */
+ "1: brc 1,0b \n" /* handle partial completion */
+ __crypt_s390_set_result
"6: \n"
- __crypt_z990_fixup
+ __crypt_s390_fixup
: "+d" (ret), "+a" (__dest), "+a" (__src),
[result] "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
"a" (__param)
: "cc", "memory"
);
- if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+ if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret;
}
/*
- * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the z990 CPU.
- * @param func: the function code passed to KM; see crypt_z990_kmc_func
+ * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU.
+ * @param func: the function code passed to KM; see crypt_s390_kmc_func
* @param param: address of parameter block; see POP for details on each func
* @param dest: address of destination memory area
* @param src: address of source memory area
@@ -184,9 +208,9 @@ crypt_z990_km(long func, void* param, u8* dest, const u8* src, long src_len)
* for encryption/decryption funcs
*/
static inline int
-crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
+crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
{
- register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
register u8* __dest asm("4") = dest;
register const u8* __src asm("2") = src;
@@ -195,18 +219,18 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
ret = 0;
__asm__ __volatile__ (
- "0: .insn rre,0xB92F0000,%1,%2 \n" //KMC opcode
- "1: brc 1,0b \n" //handle partial completion
- __crypt_z990_set_result
+ "0: .insn rre,0xB92F0000,%1,%2 \n" /* KMC opcode */
+ "1: brc 1,0b \n" /* handle partial completion */
+ __crypt_s390_set_result
"6: \n"
- __crypt_z990_fixup
+ __crypt_s390_fixup
: "+d" (ret), "+a" (__dest), "+a" (__src),
[result] "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
"a" (__param)
: "cc", "memory"
);
- if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+ if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret;
@@ -214,8 +238,8 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
/*
* Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation
- * of the z990 CPU.
- * @param func: the function code passed to KM; see crypt_z990_kimd_func
+ * of the CPU.
+ * @param func: the function code passed to KM; see crypt_s390_kimd_func
* @param param: address of parameter block; see POP for details on each func
* @param src: address of source memory area
* @param src_len: length of src operand in bytes
@@ -223,9 +247,9 @@ crypt_z990_kmc(long func, void* param, u8* dest, const u8* src, long src_len)
* for digest funcs
*/
static inline int
-crypt_z990_kimd(long func, void* param, const u8* src, long src_len)
+crypt_s390_kimd(long func, void* param, const u8* src, long src_len)
{
- register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
register const u8* __src asm("2") = src;
register long __src_len asm("3") = src_len;
@@ -233,25 +257,25 @@ crypt_z990_kimd(long func, void* param, const u8* src, long src_len)
ret = 0;
__asm__ __volatile__ (
- "0: .insn rre,0xB93E0000,%1,%1 \n" //KIMD opcode
- "1: brc 1,0b \n" /*handle partical completion of kimd*/
- __crypt_z990_set_result
+ "0: .insn rre,0xB93E0000,%1,%1 \n" /* KIMD opcode */
+ "1: brc 1,0b \n" /* handle partical completion */
+ __crypt_s390_set_result
"6: \n"
- __crypt_z990_fixup
+ __crypt_s390_fixup
: "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
"a" (__param)
: "cc", "memory"
);
- if (ret >= 0 && (func & CRYPT_Z990_FUNC_MASK)){
+ if (ret >= 0 && (func & CRYPT_S390_FUNC_MASK)){
ret = src_len - ret;
}
return ret;
}
/*
- * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the z990 CPU.
- * @param func: the function code passed to KM; see crypt_z990_klmd_func
+ * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU.
+ * @param func: the function code passed to KM; see crypt_s390_klmd_func
* @param param: address of parameter block; see POP for details on each func
* @param src: address of source memory area
* @param src_len: length of src operand in bytes
@@ -259,9 +283,9 @@ crypt_z990_kimd(long func, void* param, const u8* src, long src_len)
* for digest funcs
*/
static inline int
-crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
+crypt_s390_klmd(long func, void* param, const u8* src, long src_len)
{
- register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
register const u8* __src asm("2") = src;
register long __src_len asm("3") = src_len;
@@ -269,17 +293,17 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
ret = 0;
__asm__ __volatile__ (
- "0: .insn rre,0xB93F0000,%1,%1 \n" //KLMD opcode
- "1: brc 1,0b \n" /*handle partical completion of klmd*/
- __crypt_z990_set_result
+ "0: .insn rre,0xB93F0000,%1,%1 \n" /* KLMD opcode */
+ "1: brc 1,0b \n" /* handle partical completion */
+ __crypt_s390_set_result
"6: \n"
- __crypt_z990_fixup
+ __crypt_s390_fixup
: "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
"a" (__param)
: "cc", "memory"
);
- if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+ if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret;
@@ -287,8 +311,8 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
/*
* Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation
- * of the z990 CPU.
- * @param func: the function code passed to KM; see crypt_z990_klmd_func
+ * of the CPU.
+ * @param func: the function code passed to KM; see crypt_s390_klmd_func
* @param param: address of parameter block; see POP for details on each func
* @param src: address of source memory area
* @param src_len: length of src operand in bytes
@@ -296,9 +320,9 @@ crypt_z990_klmd(long func, void* param, const u8* src, long src_len)
* for digest funcs
*/
static inline int
-crypt_z990_kmac(long func, void* param, const u8* src, long src_len)
+crypt_s390_kmac(long func, void* param, const u8* src, long src_len)
{
- register long __func asm("0") = func & CRYPT_Z990_FUNC_MASK;
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
register void* __param asm("1") = param;
register const u8* __src asm("2") = src;
register long __src_len asm("3") = src_len;
@@ -306,58 +330,58 @@ crypt_z990_kmac(long func, void* param, const u8* src, long src_len)
ret = 0;
__asm__ __volatile__ (
- "0: .insn rre,0xB91E0000,%5,%5 \n" //KMAC opcode
- "1: brc 1,0b \n" /*handle partical completion of klmd*/
- __crypt_z990_set_result
+ "0: .insn rre,0xB91E0000,%5,%5 \n" /* KMAC opcode */
+ "1: brc 1,0b \n" /* handle partical completion */
+ __crypt_s390_set_result
"6: \n"
- __crypt_z990_fixup
+ __crypt_s390_fixup
: "+d" (ret), "+a" (__src), [result] "+d" (__src_len)
: [e1] "K" (-EFAULT), [e2] "K" (-ENOSYS), "d" (__func),
"a" (__param)
: "cc", "memory"
);
- if (ret >= 0 && func & CRYPT_Z990_FUNC_MASK){
+ if (ret >= 0 && func & CRYPT_S390_FUNC_MASK){
ret = src_len - ret;
}
return ret;
}
/**
- * Tests if a specific z990 crypto function is implemented on the machine.
+ * Tests if a specific crypto function is implemented on the machine.
* @param func: the function code of the specific function; 0 if op in general
* @return 1 if func available; 0 if func or op in general not available
*/
static inline int
-crypt_z990_func_available(int func)
+crypt_s390_func_available(int func)
{
int ret;
- struct crypt_z990_query_status status = {
+ struct crypt_s390_query_status status = {
.high = 0,
.low = 0
};
- switch (func & CRYPT_Z990_OP_MASK){
- case CRYPT_Z990_KM:
- ret = crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0);
+ switch (func & CRYPT_S390_OP_MASK){
+ case CRYPT_S390_KM:
+ ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
break;
- case CRYPT_Z990_KMC:
- ret = crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0);
+ case CRYPT_S390_KMC:
+ ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
break;
- case CRYPT_Z990_KIMD:
- ret = crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0);
+ case CRYPT_S390_KIMD:
+ ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
break;
- case CRYPT_Z990_KLMD:
- ret = crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0);
+ case CRYPT_S390_KLMD:
+ ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
break;
- case CRYPT_Z990_KMAC:
- ret = crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0);
+ case CRYPT_S390_KMAC:
+ ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
break;
default:
ret = 0;
return ret;
}
if (ret >= 0){
- func &= CRYPT_Z990_FUNC_MASK;
+ func &= CRYPT_S390_FUNC_MASK;
func &= 0x7f; //mask modifier bit
if (func < 64){
ret = (status.high >> (64 - func - 1)) & 0x1;
@@ -370,5 +394,4 @@ crypt_z990_func_available(int func)
return ret;
}
-
-#endif // _CRYPTO_ARCH_S390_CRYPT_Z990_H
+#endif // _CRYPTO_ARCH_S390_CRYPT_S390_H
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c
new file mode 100644
index 00000000000..def02bdc44a
--- /dev/null
+++ b/arch/s390/crypto/crypt_s390_query.c
@@ -0,0 +1,129 @@
+/*
+ * Cryptographic API.
+ *
+ * Support for s390 cryptographic instructions.
+ * Testing module for querying processor crypto capabilities.
+ *
+ * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Thomas Spatzier (tspat@de.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/errno.h>
+#include "crypt_s390.h"
+
+static void query_available_functions(void)
+{
+ printk(KERN_INFO "#####################\n");
+
+ /* query available KM functions */
+ printk(KERN_INFO "KM_QUERY: %d\n",
+ crypt_s390_func_available(KM_QUERY));
+ printk(KERN_INFO "KM_DEA: %d\n",
+ crypt_s390_func_available(KM_DEA_ENCRYPT));
+ printk(KERN_INFO "KM_TDEA_128: %d\n",
+ crypt_s390_func_available(KM_TDEA_128_ENCRYPT));
+ printk(KERN_INFO "KM_TDEA_192: %d\n",
+ crypt_s390_func_available(KM_TDEA_192_ENCRYPT));
+ printk(KERN_INFO "KM_AES_128: %d\n",
+ crypt_s390_func_available(KM_AES_128_ENCRYPT));
+ printk(KERN_INFO "KM_AES_192: %d\n",
+ crypt_s390_func_available(KM_AES_192_ENCRYPT));
+ printk(KERN_INFO "KM_AES_256: %d\n",
+ crypt_s390_func_available(KM_AES_256_ENCRYPT));
+
+ /* query available KMC functions */
+ printk(KERN_INFO "KMC_QUERY: %d\n",
+ crypt_s390_func_available(KMC_QUERY));
+ printk(KERN_INFO "KMC_DEA: %d\n",
+ crypt_s390_func_available(KMC_DEA_ENCRYPT));
+ printk(KERN_INFO "KMC_TDEA_128: %d\n",
+ crypt_s390_func_available(KMC_TDEA_128_ENCRYPT));
+ printk(KERN_INFO "KMC_TDEA_192: %d\n",
+ crypt_s390_func_available(KMC_TDEA_192_ENCRYPT));
+ printk(KERN_INFO "KMC_AES_128: %d\n",
+ crypt_s390_func_available(KMC_AES_128_ENCRYPT));
+ printk(KERN_INFO "KMC_AES_192: %d\n",
+ crypt_s390_func_available(KMC_AES_192_ENCRYPT));
+ printk(KERN_INFO "KMC_AES_256: %d\n",
+ crypt_s390_func_available(KMC_AES_256_ENCRYPT));
+
+ /* query available KIMD fucntions */
+ printk(KERN_INFO "KIMD_QUERY: %d\n",
+ crypt_s390_func_available(KIMD_QUERY));
+ printk(KERN_INFO "KIMD_SHA_1: %d\n",
+ crypt_s390_func_available(KIMD_SHA_1));
+ printk(KERN_INFO "KIMD_SHA_256: %d\n",
+ crypt_s390_func_available(KIMD_SHA_256));
+
+ /* query available KLMD functions */
+ printk(KERN_INFO "KLMD_QUERY: %d\n",
+ crypt_s390_func_available(KLMD_QUERY));
+ printk(KERN_INFO "KLMD_SHA_1: %d\n",
+ crypt_s390_func_available(KLMD_SHA_1));
+ printk(KERN_INFO "KLMD_SHA_256: %d\n",
+ crypt_s390_func_available(KLMD_SHA_256));
+
+ /* query available KMAC functions */
+ printk(KERN_INFO "KMAC_QUERY: %d\n",
+ crypt_s390_func_available(KMAC_QUERY));
+ printk(KERN_INFO "KMAC_DEA: %d\n",
+ crypt_s390_func_available(KMAC_DEA));
+ printk(KERN_INFO "KMAC_TDEA_128: %d\n",
+ crypt_s390_func_available(KMAC_TDEA_128));
+ printk(KERN_INFO "KMAC_TDEA_192: %d\n",
+ crypt_s390_func_available(KMAC_TDEA_192));
+}
+
+static int init(void)
+{
+ struct crypt_s390_query_status status = {
+ .high = 0,
+ .low = 0
+ };
+
+ printk(KERN_INFO "crypt_s390: querying available crypto functions\n");
+ crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
+ printk(KERN_INFO "KM:\t%016llx %016llx\n",
+ (unsigned long long) status.high,
+ (unsigned long long) status.low);
+ status.high = status.low = 0;
+ crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0);
+ printk(KERN_INFO "KMC:\t%016llx %016llx\n",
+ (unsigned long long) status.high,
+ (unsigned long long) status.low);
+ status.high = status.low = 0;
+ crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0);
+ printk(KERN_INFO "KIMD:\t%016llx %016llx\n",
+ (unsigned long long) status.high,
+ (unsigned long long) status.low);
+ status.high = status.low = 0;
+ crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0);
+ printk(KERN_INFO "KLMD:\t%016llx %016llx\n",
+ (unsigned long long) status.high,
+ (unsigned long long) status.low);
+ status.high = status.low = 0;
+ crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
+ printk(KERN_INFO "KMAC:\t%016llx %016llx\n",
+ (unsigned long long) status.high,
+ (unsigned long long) status.low);
+
+ query_available_functions();
+ return -ECANCELED;
+}
+
+static void __exit cleanup(void)
+{
+}
+
+module_init(init);
+module_exit(cleanup);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/crypt_z990_query.c b/arch/s390/crypto/crypt_z990_query.c
deleted file mode 100644
index 7133983d138..00000000000
--- a/arch/s390/crypto/crypt_z990_query.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Support for z990 cryptographic instructions.
- * Testing module for querying processor crypto capabilities.
- *
- * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Thomas Spatzier (tspat@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <asm/errno.h>
-#include "crypt_z990.h"
-
-static void
-query_available_functions(void)
-{
- printk(KERN_INFO "#####################\n");
- //query available KM functions
- printk(KERN_INFO "KM_QUERY: %d\n",
- crypt_z990_func_available(KM_QUERY));
- printk(KERN_INFO "KM_DEA: %d\n",
- crypt_z990_func_available(KM_DEA_ENCRYPT));
- printk(KERN_INFO "KM_TDEA_128: %d\n",
- crypt_z990_func_available(KM_TDEA_128_ENCRYPT));
- printk(KERN_INFO "KM_TDEA_192: %d\n",
- crypt_z990_func_available(KM_TDEA_192_ENCRYPT));
- //query available KMC functions
- printk(KERN_INFO "KMC_QUERY: %d\n",
- crypt_z990_func_available(KMC_QUERY));
- printk(KERN_INFO "KMC_DEA: %d\n",
- crypt_z990_func_available(KMC_DEA_ENCRYPT));
- printk(KERN_INFO "KMC_TDEA_128: %d\n",
- crypt_z990_func_available(KMC_TDEA_128_ENCRYPT));
- printk(KERN_INFO "KMC_TDEA_192: %d\n",
- crypt_z990_func_available(KMC_TDEA_192_ENCRYPT));
- //query available KIMD fucntions
- printk(KERN_INFO "KIMD_QUERY: %d\n",
- crypt_z990_func_available(KIMD_QUERY));
- printk(KERN_INFO "KIMD_SHA_1: %d\n",
- crypt_z990_func_available(KIMD_SHA_1));
- //query available KLMD functions
- printk(KERN_INFO "KLMD_QUERY: %d\n",
- crypt_z990_func_available(KLMD_QUERY));
- printk(KERN_INFO "KLMD_SHA_1: %d\n",
- crypt_z990_func_available(KLMD_SHA_1));
- //query available KMAC functions
- printk(KERN_INFO "KMAC_QUERY: %d\n",
- crypt_z990_func_available(KMAC_QUERY));
- printk(KERN_INFO "KMAC_DEA: %d\n",
- crypt_z990_func_available(KMAC_DEA));
- printk(KERN_INFO "KMAC_TDEA_128: %d\n",
- crypt_z990_func_available(KMAC_TDEA_128));
- printk(KERN_INFO "KMAC_TDEA_192: %d\n",
- crypt_z990_func_available(KMAC_TDEA_192));
-}
-
-static int
-init(void)
-{
- struct crypt_z990_query_status status = {
- .high = 0,
- .low = 0
- };
-
- printk(KERN_INFO "crypt_z990: querying available crypto functions\n");
- crypt_z990_km(KM_QUERY, &status, NULL, NULL, 0);
- printk(KERN_INFO "KM: %016llx %016llx\n",
- (unsigned long long) status.high,
- (unsigned long long) status.low);
- status.high = status.low = 0;
- crypt_z990_kmc(KMC_QUERY, &status, NULL, NULL, 0);
- printk(KERN_INFO "KMC: %016llx %016llx\n",
- (unsigned long long) status.high,
- (unsigned long long) status.low);
- status.high = status.low = 0;
- crypt_z990_kimd(KIMD_QUERY, &status, NULL, 0);
- printk(KERN_INFO "KIMD: %016llx %016llx\n",
- (unsigned long long) status.high,
- (unsigned long long) status.low);
- status.high = status.low = 0;
- crypt_z990_klmd(KLMD_QUERY, &status, NULL, 0);
- printk(KERN_INFO "KLMD: %016llx %016llx\n",
- (unsigned long long) status.high,
- (unsigned long long) status.low);
- status.high = status.low = 0;
- crypt_z990_kmac(KMAC_QUERY, &status, NULL, 0);
- printk(KERN_INFO "KMAC: %016llx %016llx\n",
- (unsigned long long) status.high,
- (unsigned long long) status.low);
-
- query_available_functions();
- return -1;
-}
-
-static void __exit
-cleanup(void)
-{
-}
-
-module_init(init);
-module_exit(cleanup);
-
-MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/des_z990.c b/arch/s390/crypto/des_s390.c
index 813cf37b117..a38bb2a3eef 100644
--- a/arch/s390/crypto/des_z990.c
+++ b/arch/s390/crypto/des_s390.c
@@ -1,7 +1,7 @@
/*
* Cryptographic API.
*
- * z990 implementation of the DES Cipher Algorithm.
+ * s390 implementation of the DES Cipher Algorithm.
*
* Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Thomas Spatzier (tspat@de.ibm.com)
@@ -19,7 +19,7 @@
#include <linux/errno.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
-#include "crypt_z990.h"
+#include "crypt_s390.h"
#include "crypto_des.h"
#define DES_BLOCK_SIZE 8
@@ -31,17 +31,17 @@
#define DES3_192_KEY_SIZE (3 * DES_KEY_SIZE)
#define DES3_192_BLOCK_SIZE DES_BLOCK_SIZE
-struct crypt_z990_des_ctx {
+struct crypt_s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES_KEY_SIZE];
};
-struct crypt_z990_des3_128_ctx {
+struct crypt_s390_des3_128_ctx {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_128_KEY_SIZE];
};
-struct crypt_z990_des3_192_ctx {
+struct crypt_s390_des3_192_ctx {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_192_KEY_SIZE];
};
@@ -49,7 +49,7 @@ struct crypt_z990_des3_192_ctx {
static int
des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
{
- struct crypt_z990_des_ctx *dctx;
+ struct crypt_s390_des_ctx *dctx;
int ret;
dctx = ctx;
@@ -65,26 +65,26 @@ des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
static void
des_encrypt(void *ctx, u8 *dst, const u8 *src)
{
- struct crypt_z990_des_ctx *dctx;
+ struct crypt_s390_des_ctx *dctx;
dctx = ctx;
- crypt_z990_km(KM_DEA_ENCRYPT, dctx->key, dst, src, DES_BLOCK_SIZE);
+ crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, dst, src, DES_BLOCK_SIZE);
}
static void
des_decrypt(void *ctx, u8 *dst, const u8 *src)
{
- struct crypt_z990_des_ctx *dctx;
+ struct crypt_s390_des_ctx *dctx;
dctx = ctx;
- crypt_z990_km(KM_DEA_DECRYPT, dctx->key, dst, src, DES_BLOCK_SIZE);
+ crypt_s390_km(KM_DEA_DECRYPT, dctx->key, dst, src, DES_BLOCK_SIZE);
}
static struct crypto_alg des_alg = {
.cra_name = "des",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypt_z990_des_ctx),
+ .cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(des_alg.cra_list),
.cra_u = { .cipher = {
@@ -111,7 +111,7 @@ static int
des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
{
int i, ret;
- struct crypt_z990_des3_128_ctx *dctx;
+ struct crypt_s390_des3_128_ctx *dctx;
const u8* temp_key = key;
dctx = ctx;
@@ -132,20 +132,20 @@ des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
static void
des3_128_encrypt(void *ctx, u8 *dst, const u8 *src)
{
- struct crypt_z990_des3_128_ctx *dctx;
+ struct crypt_s390_des3_128_ctx *dctx;
dctx = ctx;
- crypt_z990_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src,
+ crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src,
DES3_128_BLOCK_SIZE);
}
static void
des3_128_decrypt(void *ctx, u8 *dst, const u8 *src)
{
- struct crypt_z990_des3_128_ctx *dctx;
+ struct crypt_s390_des3_128_ctx *dctx;
dctx = ctx;
- crypt_z990_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src,
+ crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src,
DES3_128_BLOCK_SIZE);
}
@@ -153,7 +153,7 @@ static struct crypto_alg des3_128_alg = {
.cra_name = "des3_ede128",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_128_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypt_z990_des3_128_ctx),
+ .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list),
.cra_u = { .cipher = {
@@ -181,7 +181,7 @@ static int
des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
{
int i, ret;
- struct crypt_z990_des3_192_ctx *dctx;
+ struct crypt_s390_des3_192_ctx *dctx;
const u8* temp_key;
dctx = ctx;
@@ -206,20 +206,20 @@ des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags)
static void
des3_192_encrypt(void *ctx, u8 *dst, const u8 *src)
{
- struct crypt_z990_des3_192_ctx *dctx;
+ struct crypt_s390_des3_192_ctx *dctx;
dctx = ctx;
- crypt_z990_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
+ crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src,
DES3_192_BLOCK_SIZE);
}
static void
des3_192_decrypt(void *ctx, u8 *dst, const u8 *src)
{
- struct crypt_z990_des3_192_ctx *dctx;
+ struct crypt_s390_des3_192_ctx *dctx;
dctx = ctx;
- crypt_z990_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
+ crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src,
DES3_192_BLOCK_SIZE);
}
@@ -227,7 +227,7 @@ static struct crypto_alg des3_192_alg = {
.cra_name = "des3_ede",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_192_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypt_z990_des3_192_ctx),
+ .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list),
.cra_u = { .cipher = {
@@ -245,9 +245,9 @@ init(void)
{
int ret;
- if (!crypt_z990_func_available(KM_DEA_ENCRYPT) ||
- !crypt_z990_func_available(KM_TDEA_128_ENCRYPT) ||
- !crypt_z990_func_available(KM_TDEA_192_ENCRYPT)){
+ if (!crypt_s390_func_available(KM_DEA_ENCRYPT) ||
+ !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) ||
+ !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)){
return -ENOSYS;
}
@@ -262,7 +262,7 @@ init(void)
return -EEXIST;
}
- printk(KERN_INFO "crypt_z990: des_z990 loaded.\n");
+ printk(KERN_INFO "crypt_s390: des_s390 loaded.\n");
return 0;
}
diff --git a/arch/s390/crypto/sha1_z990.c b/arch/s390/crypto/sha1_s390.c
index 298174ddf5b..98c896b86dc 100644
--- a/arch/s390/crypto/sha1_z990.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -1,7 +1,7 @@
/*
* Cryptographic API.
*
- * z990 implementation of the SHA1 Secure Hash Algorithm.
+ * s390 implementation of the SHA1 Secure Hash Algorithm.
*
* Derived from cryptoapi implementation, adapted for in-place
* scatterlist interface. Originally based on the public domain
@@ -28,22 +28,22 @@
#include <linux/crypto.h>
#include <asm/scatterlist.h>
#include <asm/byteorder.h>
-#include "crypt_z990.h"
+#include "crypt_s390.h"
#define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64
-struct crypt_z990_sha1_ctx {
- u64 count;
- u32 state[5];
+struct crypt_s390_sha1_ctx {
+ u64 count;
+ u32 state[5];
u32 buf_len;
- u8 buffer[2 * SHA1_BLOCK_SIZE];
+ u8 buffer[2 * SHA1_BLOCK_SIZE];
};
static void
sha1_init(void *ctx)
{
- static const struct crypt_z990_sha1_ctx initstate = {
+ static const struct crypt_s390_sha1_ctx initstate = {
.state = {
0x67452301,
0xEFCDAB89,
@@ -58,7 +58,7 @@ sha1_init(void *ctx)
static void
sha1_update(void *ctx, const u8 *data, unsigned int len)
{
- struct crypt_z990_sha1_ctx *sctx;
+ struct crypt_s390_sha1_ctx *sctx;
long imd_len;
sctx = ctx;
@@ -69,7 +69,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len)
//complete full block and hash
memcpy(sctx->buffer + sctx->buf_len, data,
SHA1_BLOCK_SIZE - sctx->buf_len);
- crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
+ crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer,
SHA1_BLOCK_SIZE);
data += SHA1_BLOCK_SIZE - sctx->buf_len;
len -= SHA1_BLOCK_SIZE - sctx->buf_len;
@@ -79,7 +79,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len)
//rest of data contains full blocks?
imd_len = len & ~0x3ful;
if (imd_len){
- crypt_z990_kimd(KIMD_SHA_1, sctx->state, data, imd_len);
+ crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len);
data += imd_len;
len -= imd_len;
}
@@ -92,7 +92,7 @@ sha1_update(void *ctx, const u8 *data, unsigned int len)
static void
-pad_message(struct crypt_z990_sha1_ctx* sctx)
+pad_message(struct crypt_s390_sha1_ctx* sctx)
{
int index;
@@ -113,11 +113,11 @@ pad_message(struct crypt_z990_sha1_ctx* sctx)
static void
sha1_final(void* ctx, u8 *out)
{
- struct crypt_z990_sha1_ctx *sctx = ctx;
+ struct crypt_s390_sha1_ctx *sctx = ctx;
//must perform manual padding
pad_message(sctx);
- crypt_z990_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
+ crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len);
//copy digest to out
memcpy(out, sctx->state, SHA1_DIGEST_SIZE);
/* Wipe context */
@@ -128,7 +128,7 @@ static struct crypto_alg alg = {
.cra_name = "sha1",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypt_z990_sha1_ctx),
+ .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .digest = {
@@ -143,10 +143,10 @@ init(void)
{
int ret = -ENOSYS;
- if (crypt_z990_func_available(KIMD_SHA_1)){
+ if (crypt_s390_func_available(KIMD_SHA_1)){
ret = crypto_register_alg(&alg);
if (ret == 0){
- printk(KERN_INFO "crypt_z990: sha1_z990 loaded.\n");
+ printk(KERN_INFO "crypt_s390: sha1_s390 loaded.\n");
}
}
return ret;
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
new file mode 100644
index 00000000000..b75bdbd476c
--- /dev/null
+++ b/arch/s390/crypto/sha256_s390.c
@@ -0,0 +1,151 @@
+/*
+ * Cryptographic API.
+ *
+ * s390 implementation of the SHA256 Secure Hash Algorithm.
+ *
+ * s390 Version:
+ * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation
+ * Author(s): Jan Glauber (jang@de.ibm.com)
+ *
+ * Derived from "crypto/sha256.c"
+ * and "arch/s390/crypto/sha1_s390.c"
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+
+#include "crypt_s390.h"
+
+#define SHA256_DIGEST_SIZE 32
+#define SHA256_BLOCK_SIZE 64
+
+struct s390_sha256_ctx {
+ u64 count;
+ u32 state[8];
+ u8 buf[2 * SHA256_BLOCK_SIZE];
+};
+
+static void sha256_init(void *ctx)
+{
+ struct s390_sha256_ctx *sctx = ctx;
+
+ sctx->state[0] = 0x6a09e667;
+ sctx->state[1] = 0xbb67ae85;
+ sctx->state[2] = 0x3c6ef372;
+ sctx->state[3] = 0xa54ff53a;
+ sctx->state[4] = 0x510e527f;
+ sctx->state[5] = 0x9b05688c;
+ sctx->state[6] = 0x1f83d9ab;
+ sctx->state[7] = 0x5be0cd19;
+ sctx->count = 0;
+ memset(sctx->buf, 0, sizeof(sctx->buf));
+}
+
+static void sha256_update(void *ctx, const u8 *data, unsigned int len)
+{
+ struct s390_sha256_ctx *sctx = ctx;
+ unsigned int index;
+
+ /* how much is already in the buffer? */
+ index = sctx->count / 8 & 0x3f;
+
+ /* update message bit length */
+ sctx->count += len * 8;
+
+ /* process one block */
+ if ((index + len) >= SHA256_BLOCK_SIZE) {
+ memcpy(sctx->buf + index, data, SHA256_BLOCK_SIZE - index);
+ crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf,
+ SHA256_BLOCK_SIZE);
+ data += SHA256_BLOCK_SIZE - index;
+ len -= SHA256_BLOCK_SIZE - index;
+ }
+
+ /* anything left? */
+ if (len)
+ memcpy(sctx->buf + index , data, len);
+}
+
+static void pad_message(struct s390_sha256_ctx* sctx)
+{
+ int index, end;
+
+ index = sctx->count / 8 & 0x3f;
+ end = index < 56 ? SHA256_BLOCK_SIZE : 2 * SHA256_BLOCK_SIZE;
+
+ /* start pad with 1 */
+ sctx->buf[index] = 0x80;
+
+ /* pad with zeros */
+ index++;
+ memset(sctx->buf + index, 0x00, end - index - 8);
+
+ /* append message length */
+ memcpy(sctx->buf + end - 8, &sctx->count, sizeof sctx->count);
+
+ sctx->count = end * 8;
+}
+
+/* Add padding and return the message digest */
+static void sha256_final(void* ctx, u8 *out)
+{
+ struct s390_sha256_ctx *sctx = ctx;
+
+ /* must perform manual padding */
+ pad_message(sctx);
+
+ crypt_s390_kimd(KIMD_SHA_256, sctx->state, sctx->buf,
+ sctx->count / 8);
+
+ /* copy digest to out */
+ memcpy(out, sctx->state, SHA256_DIGEST_SIZE);
+
+ /* wipe context */
+ memset(sctx, 0, sizeof *sctx);
+}
+
+static struct crypto_alg alg = {
+ .cra_name = "sha256",
+ .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s390_sha256_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(alg.cra_list),
+ .cra_u = { .digest = {
+ .dia_digestsize = SHA256_DIGEST_SIZE,
+ .dia_init = sha256_init,
+ .dia_update = sha256_update,
+ .dia_final = sha256_final } }
+};
+
+static int init(void)
+{
+ int ret;
+
+ if (!crypt_s390_func_available(KIMD_SHA_256))
+ return -ENOSYS;
+
+ ret = crypto_register_alg(&alg);
+ if (ret != 0)
+ printk(KERN_INFO "crypt_s390: sha256_s390 couldn't be loaded.");
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ crypto_unregister_alg(&alg);
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_ALIAS("sha256");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 45d44c6bb39..7d23edc6fac 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,12 +1,12 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.14-rc1
-# Wed Sep 14 16:46:19 2005
+# Linux kernel version: 2.6.15-rc2
+# Mon Nov 21 13:51:30 2005
#
CONFIG_MMU=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_ARCH_S390=y
+CONFIG_S390=y
CONFIG_UID16=y
#
@@ -65,15 +65,31 @@ CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
#
+# Block layer
+#
+# CONFIG_LBD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
# Base setup
#
#
# Processor type and features
#
-# CONFIG_ARCH_S390X is not set
# CONFIG_64BIT is not set
-CONFIG_ARCH_S390_31=y
CONFIG_SMP=y
CONFIG_NR_CPUS=32
CONFIG_HOTPLUG_CPU=y
@@ -97,6 +113,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
#
# I/O subsystem configuration
@@ -188,10 +205,18 @@ CONFIG_IPV6=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CLK_JIFFIES=y
# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
# CONFIG_NET_SCH_CLK_CPU is not set
+
+#
+# Queueing/Scheduling
+#
CONFIG_NET_SCH_CBQ=m
# CONFIG_NET_SCH_HTB is not set
# CONFIG_NET_SCH_HFSC is not set
@@ -204,8 +229,10 @@ CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
# CONFIG_NET_SCH_NETEM is not set
# CONFIG_NET_SCH_INGRESS is not set
-CONFIG_NET_QOS=y
-CONFIG_NET_ESTIMATOR=y
+
+#
+# Classification
+#
CONFIG_NET_CLS=y
# CONFIG_NET_CLS_BASIC is not set
CONFIG_NET_CLS_TCINDEX=m
@@ -214,18 +241,18 @@ CONFIG_NET_CLS_ROUTE=y
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
# CONFIG_CLS_U32_PERF is not set
-# CONFIG_NET_CLS_IND is not set
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
# CONFIG_NET_EMATCH is not set
# CONFIG_NET_CLS_ACT is not set
CONFIG_NET_CLS_POLICE=y
+# CONFIG_NET_CLS_IND is not set
+CONFIG_NET_ESTIMATOR=y
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
-# CONFIG_NETFILTER_NETLINK is not set
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
@@ -276,6 +303,7 @@ CONFIG_SCSI_FC_ATTRS=y
#
# SCSI low-level drivers
#
+# CONFIG_ISCSI_TCP is not set
# CONFIG_SCSI_SATA is not set
# CONFIG_SCSI_DEBUG is not set
CONFIG_ZFCP=y
@@ -292,7 +320,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_LBD is not set
# CONFIG_CDROM_PKTCDVD is not set
#
@@ -305,15 +332,8 @@ CONFIG_DASD_PROFILE=y
CONFIG_DASD_ECKD=y
CONFIG_DASD_FBA=y
CONFIG_DASD_DIAG=y
+CONFIG_DASD_EER=m
# CONFIG_DASD_CMB is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
# CONFIG_ATA_OVER_ETH is not set
#
@@ -378,7 +398,6 @@ CONFIG_S390_TAPE_34XX=m
# CONFIG_VMLOGRDR is not set
# CONFIG_VMCP is not set
# CONFIG_MONREADER is not set
-# CONFIG_DCSS_SHM is not set
#
# Cryptographic devices
@@ -593,6 +612,8 @@ CONFIG_DEBUG_PREEMPT=y
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
CONFIG_DEBUG_FS=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_RCU_TORTURE_TEST is not set
#
# Security options
@@ -609,17 +630,19 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_MD4 is not set
# CONFIG_CRYPTO_MD5 is not set
# CONFIG_CRYPTO_SHA1 is not set
-# CONFIG_CRYPTO_SHA1_Z990 is not set
+# CONFIG_CRYPTO_SHA1_S390 is not set
# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA256_S390 is not set
# CONFIG_CRYPTO_SHA512 is not set
# CONFIG_CRYPTO_WP512 is not set
# CONFIG_CRYPTO_TGR192 is not set
# CONFIG_CRYPTO_DES is not set
-# CONFIG_CRYPTO_DES_Z990 is not set
+# CONFIG_CRYPTO_DES_S390 is not set
# CONFIG_CRYPTO_BLOWFISH is not set
# CONFIG_CRYPTO_TWOFISH is not set
# CONFIG_CRYPTO_SERPENT is not set
# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_AES_S390 is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
# CONFIG_CRYPTO_TEA is not set
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 7434c32bc63..9269b5788fa 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -8,31 +8,25 @@ obj-y := bitmap.o traps.o time.o process.o \
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
semaphore.o s390_ext.o debug.o profile.o irq.o reipl_diag.o
+obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
+obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
+
extra-y += head.o init_task.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_S390_SUPPORT) += compat_linux.o compat_signal.o \
- compat_ioctl.o compat_wrapper.o \
- compat_exec_domain.o
+obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
+ compat_wrapper.o compat_exec_domain.o
obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
-obj-$(CONFIG_ARCH_S390_31) += entry.o reipl.o
-obj-$(CONFIG_ARCH_S390X) += entry64.o reipl64.o
-
obj-$(CONFIG_VIRT_TIMER) += vtime.o
# Kexec part
S390_KEXEC_OBJS := machine_kexec.o crash.o
-ifeq ($(CONFIG_ARCH_S390X),y)
-S390_KEXEC_OBJS += relocate_kernel64.o
-else
-S390_KEXEC_OBJS += relocate_kernel.o
-endif
+S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
-
#
# This is just to get the dependencies...
#
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 03ba5893f17..1f451c2cb07 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -112,7 +112,7 @@ static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
{
- struct pt_regs *ptregs = __KSTK_PTREGS(tsk);
+ struct pt_regs *ptregs = task_pt_regs(tsk);
int i;
memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
diff --git a/arch/s390/kernel/compat_ioctl.c b/arch/s390/kernel/compat_ioctl.c
deleted file mode 100644
index 6504c4e6998..00000000000
--- a/arch/s390/kernel/compat_ioctl.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
- *
- * S390 version
- * Copyright (C) 2000-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Gerhard Tonn (ton@de.ibm.com)
- * Arnd Bergmann (arndb@de.ibm.com)
- *
- * Original implementation from 32-bit Sparc compat code which is
- * Copyright (C) 2000 Silicon Graphics, Inc.
- * Written by Ulf Carlsson (ulfc@engr.sgi.com)
- */
-
-#include "compat_linux.h"
-#define INCLUDES
-#define CODE
-#include "../../../fs/compat_ioctl.c"
-#include <asm/dasd.h>
-#include <asm/cmb.h>
-#include <asm/tape390.h>
-#include <asm/ccwdev.h>
-#include "../../../drivers/s390/char/raw3270.h"
-
-static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
- unsigned long arg, struct file *f)
-{
- return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg));
-}
-
-static int do_ioctl32_ulong(unsigned int fd, unsigned int cmd,
- unsigned long arg, struct file *f)
-{
- return sys_ioctl(fd, cmd, arg);
-}
-
-#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_pointer)
-#define ULONG_IOCTL(cmd) HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)do_ioctl32_ulong)
-#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler), NULL },
-
-struct ioctl_trans ioctl_start[] = {
-/* architecture independent ioctls */
-#include <linux/compat_ioctl.h>
-#define DECLARES
-#include "../../../fs/compat_ioctl.c"
-
-/* s390 only ioctls */
-COMPATIBLE_IOCTL(DASDAPIVER)
-COMPATIBLE_IOCTL(BIODASDDISABLE)
-COMPATIBLE_IOCTL(BIODASDENABLE)
-COMPATIBLE_IOCTL(BIODASDRSRV)
-COMPATIBLE_IOCTL(BIODASDRLSE)
-COMPATIBLE_IOCTL(BIODASDSLCK)
-COMPATIBLE_IOCTL(BIODASDINFO)
-COMPATIBLE_IOCTL(BIODASDINFO2)
-COMPATIBLE_IOCTL(BIODASDFMT)
-COMPATIBLE_IOCTL(BIODASDPRRST)
-COMPATIBLE_IOCTL(BIODASDQUIESCE)
-COMPATIBLE_IOCTL(BIODASDRESUME)
-COMPATIBLE_IOCTL(BIODASDPRRD)
-COMPATIBLE_IOCTL(BIODASDPSRD)
-COMPATIBLE_IOCTL(BIODASDGATTR)
-COMPATIBLE_IOCTL(BIODASDSATTR)
-COMPATIBLE_IOCTL(BIODASDCMFENABLE)
-COMPATIBLE_IOCTL(BIODASDCMFDISABLE)
-COMPATIBLE_IOCTL(BIODASDREADALLCMB)
-
-COMPATIBLE_IOCTL(TUBICMD)
-COMPATIBLE_IOCTL(TUBOCMD)
-COMPATIBLE_IOCTL(TUBGETI)
-COMPATIBLE_IOCTL(TUBGETO)
-COMPATIBLE_IOCTL(TUBSETMOD)
-COMPATIBLE_IOCTL(TUBGETMOD)
-
-COMPATIBLE_IOCTL(TAPE390_DISPLAY)
-
-/* s390 doesn't need handlers here */
-COMPATIBLE_IOCTL(TIOCGSERIAL)
-COMPATIBLE_IOCTL(TIOCSSERIAL)
-};
-
-int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index ed877d0f27e..bf9a7a361b3 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -55,6 +55,7 @@
#include <linux/syscalls.h>
#include <linux/sysctl.h>
#include <linux/binfmts.h>
+#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/ptrace.h>
@@ -279,7 +280,7 @@ asmlinkage long sys32_getegid16(void)
static inline long get_tv32(struct timeval *o, struct compat_timeval *i)
{
- return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) ||
+ return (!access_ok(VERIFY_READ, o, sizeof(*o)) ||
(__get_user(o->tv_sec, &i->tv_sec) ||
__get_user(o->tv_usec, &i->tv_usec)));
}
@@ -1014,38 +1015,6 @@ asmlinkage long sys32_clone(struct pt_regs regs)
}
/*
- * Wrapper function for sys_timer_create.
- */
-extern asmlinkage long
-sys_timer_create(clockid_t, struct sigevent *, timer_t *);
-
-asmlinkage long
-sys32_timer_create(clockid_t which_clock, struct compat_sigevent *se32,
- timer_t *timer_id)
-{
- struct sigevent se;
- timer_t ktimer_id;
- mm_segment_t old_fs;
- long ret;
-
- if (se32 == NULL)
- return sys_timer_create(which_clock, NULL, timer_id);
-
- if (get_compat_sigevent(&se, se32))
- return -EFAULT;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_timer_create(which_clock, &se, &ktimer_id);
- set_fs(old_fs);
-
- if (!ret)
- ret = put_user (ktimer_id, timer_id);
-
- return ret;
-}
-
-/*
* 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64.
* These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE}
* because the 31 bit values differ from the 64 bit values.
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 4ff6808456e..fa2b3bc22f2 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -467,8 +467,6 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
if (err)
goto badframe;
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
set_fs (KERNEL_DS);
do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]);
set_fs (old_fs);
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 23fe94e5868..cfde1905d07 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1289,7 +1289,7 @@ sys32_timer_create_wrapper:
lgfr %r2,%r2 # timer_t (int)
llgtr %r3,%r3 # struct compat_sigevent *
llgtr %r4,%r4 # timer_t *
- jg sys32_timer_create
+ jg compat_sys_timer_create
.globl sys32_timer_settime_wrapper
sys32_timer_settime_wrapper:
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index d47fecb42cc..4ef44e536b2 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -39,7 +39,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
if (response != NULL && rlen > 0) {
memset(response, 0, rlen);
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
asm volatile ( "lra 2,0(%2)\n"
"lr 4,%3\n"
"o 4,%6\n"
@@ -55,7 +55,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
: "a" (cpcmd_buf), "d" (cmdlen),
"a" (response), "d" (rlen), "m" (mask)
: "cc", "2", "3", "4", "5" );
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
asm volatile ( "lrag 2,0(%2)\n"
"lgr 4,%3\n"
"o 4,%6\n"
@@ -73,11 +73,11 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
: "a" (cpcmd_buf), "d" (cmdlen),
"a" (response), "d" (rlen), "m" (mask)
: "cc", "2", "3", "4", "5" );
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
EBCASC(response, rlen);
} else {
return_len = 0;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
asm volatile ( "lra 2,0(%1)\n"
"lr 3,%2\n"
"diag 2,3,0x8\n"
@@ -85,7 +85,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
: "=d" (return_code)
: "a" (cpcmd_buf), "d" (cmdlen)
: "2", "3" );
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
asm volatile ( "lrag 2,0(%1)\n"
"lgr 3,%2\n"
"sam31\n"
@@ -95,7 +95,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
: "=d" (return_code)
: "a" (cpcmd_buf), "d" (cmdlen)
: "2", "3" );
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
}
spin_unlock_irqrestore(&cpcmd_lock, flags);
if (response_code != NULL)
@@ -105,7 +105,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
EXPORT_SYMBOL(__cpcmd);
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
{
char *lowbuf;
@@ -129,4 +129,4 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
}
EXPORT_SYMBOL(cpcmd);
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c
index 7bd169c58b0..926cceeae0f 100644
--- a/arch/s390/kernel/crash.c
+++ b/arch/s390/kernel/crash.c
@@ -10,8 +10,6 @@
#include <linux/threads.h>
#include <linux/kexec.h>
-note_buf_t crash_notes[NR_CPUS];
-
void machine_crash_shutdown(struct pt_regs *regs)
{
}
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 4eb71ffcf48..369ab4413ec 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -213,7 +213,7 @@ sysc_nr_ok:
mvc SP_ARGS(8,%r15),SP_R7(%r15)
sysc_do_restart:
larl %r10,sys_call_table
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ?
jno sysc_noemu
larl %r10,sys_call_table_emu # use 31 bit emulation system calls
@@ -361,7 +361,7 @@ sys_clone_glue:
la %r2,SP_PTREGS(%r15) # load pt_regs
jg sys_clone # branch to sys_clone
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
sys32_clone_glue:
la %r2,SP_PTREGS(%r15) # load pt_regs
jg sys32_clone # branch to sys32_clone
@@ -383,7 +383,7 @@ sys_execve_glue:
bnz 0(%r12) # it did fail -> store result in gpr2
b 6(%r12) # SKIP STG 2,SP_R2(15) in
# system_call/sysc_tracesys
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
sys32_execve_glue:
la %r2,SP_PTREGS(%r15) # load pt_regs
lgr %r12,%r14 # save return address
@@ -398,7 +398,7 @@ sys_sigreturn_glue:
la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
jg sys_sigreturn # branch to sys_sigreturn
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
sys32_sigreturn_glue:
la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
jg sys32_sigreturn # branch to sys32_sigreturn
@@ -408,7 +408,7 @@ sys_rt_sigreturn_glue:
la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
jg sys_rt_sigreturn # branch to sys_sigreturn
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
sys32_rt_sigreturn_glue:
la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
jg sys32_rt_sigreturn # branch to sys32_sigreturn
@@ -429,7 +429,7 @@ sys_sigsuspend_glue:
la %r14,6(%r14) # skip store of return value
jg sys_sigsuspend # branch to sys_sigsuspend
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
sys32_sigsuspend_glue:
llgfr %r4,%r4 # unsigned long
lgr %r5,%r4 # move mask back
@@ -449,7 +449,7 @@ sys_rt_sigsuspend_glue:
la %r14,6(%r14) # skip store of return value
jg sys_rt_sigsuspend # branch to sys_rt_sigsuspend
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
sys32_rt_sigsuspend_glue:
llgfr %r3,%r3 # size_t
lgr %r4,%r3 # move sigsetsize parameter
@@ -464,7 +464,7 @@ sys_sigaltstack_glue:
la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
jg sys_sigaltstack # branch to sys_sigreturn
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
sys32_sigaltstack_glue:
la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
jg sys32_sigaltstack_wrapper # branch to sys_sigreturn
@@ -1009,7 +1009,7 @@ sys_call_table:
#include "syscalls.S"
#undef SYSCALL
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
#define SYSCALL(esa,esame,emu) .long emu
.globl sys_call_table_emu
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index d31a97c89f6..ea88d066bf0 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -30,7 +30,7 @@
#include <asm/thread_info.h>
#include <asm/page.h>
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
#define ARCH_OFFSET 4
#else
#define ARCH_OFFSET 0
@@ -539,7 +539,7 @@ ipl_devno:
.word 0
.endm
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
#include "head64.S"
#else
#include "head31.S"
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 5aa71b05b8a..f0ed5c642c7 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -85,7 +85,7 @@ kexec_halt_all_cpus(void *kernel_image)
pfault_fini();
#endif
- if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
+ if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 607d506689c..c271cdab58e 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -37,11 +37,11 @@
#define DEBUGP(fmt , ...)
#endif
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
#define PLT_ENTRY_SIZE 12
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
#define PLT_ENTRY_SIZE 20
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
void *module_alloc(unsigned long size)
{
@@ -294,17 +294,17 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
unsigned int *ip;
ip = me->module_core + me->arch.plt_offset +
info->plt_offset;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
ip[1] = 0x100607f1;
ip[2] = val;
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
ip[1] = 0x100a0004;
ip[2] = 0x07f10000;
ip[3] = (unsigned int) (val >> 32);
ip[4] = (unsigned int) val;
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
info->plt_initialized = 1;
}
if (r_type == R_390_PLTOFF16 ||
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 78b64fe5e7c..2ff90a1a105 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -153,7 +153,7 @@ void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- printk("CPU: %d %s\n", tsk->thread_info->cpu, print_tainted());
+ printk("CPU: %d %s\n", task_thread_info(tsk)->cpu, print_tainted());
printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
current->comm, current->pid, (void *) tsk,
(void *) tsk->thread.ksp);
@@ -217,8 +217,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
struct pt_regs childregs;
} *frame;
- frame = ((struct fake_frame *)
- (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+ frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
p->thread.ksp = (unsigned long) frame;
/* Store access registers to kernel stack of new process. */
frame->childregs = *regs;
@@ -235,7 +234,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
/* Save access registers to new thread structure. */
save_access_regs(&p->thread.acrs[0]);
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
/*
* save fprs to current->thread.fp_regs to merge them with
* the emulated registers and then copy the result to the child.
@@ -247,7 +246,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
/* Set a new TLS ? */
if (clone_flags & CLONE_SETTLS)
p->thread.acrs[0] = regs->gprs[6];
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
/* Save the fpu registers to new thread structure. */
save_fp_regs(&p->thread.fp_regs);
p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
@@ -260,7 +259,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
p->thread.acrs[1] = (unsigned int) regs->gprs[6];
}
}
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
/* start new process with ar4 pointing to the correct address space */
p->thread.mm_segment = get_fs();
/* Don't copy debug registers */
@@ -339,51 +338,29 @@ out:
*/
int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
{
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
/*
* save fprs to current->thread.fp_regs to merge them with
* the emulated registers and then copy the result to the dump.
*/
save_fp_regs(&current->thread.fp_regs);
memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
save_fp_regs(fpregs);
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
return 1;
}
-/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
-{
-
-/* changed the size calculations - should hopefully work better. lbt */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = regs->gprs[15] & ~(PAGE_SIZE - 1);
- dump->u_tsize = current->mm->end_code >> PAGE_SHIFT;
- dump->u_dsize = (current->mm->brk + PAGE_SIZE - 1) >> PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_ssize = 0;
- if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = (TASK_SIZE - dump->start_stack) >> PAGE_SHIFT;
- memcpy(&dump->regs, regs, sizeof(s390_regs));
- dump_fpu (regs, &dump->regs.fp_regs);
- dump->regs.per_info = current->thread.per_info;
-}
-
unsigned long get_wchan(struct task_struct *p)
{
struct stack_frame *sf, *low, *high;
unsigned long return_address;
int count;
- if (!p || p == current || p->state == TASK_RUNNING || !p->thread_info)
+ if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
return 0;
- low = (struct stack_frame *) p->thread_info;
- high = (struct stack_frame *)
- ((unsigned long) p->thread_info + THREAD_SIZE) - 1;
+ low = task_stack_page(p);
+ high = (struct stack_frame *) task_pt_regs(p);
sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
if (sf <= low || sf > high)
return 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 06afa3103ac..37dfe33dab7 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -42,7 +42,7 @@
#include <asm/uaccess.h>
#include <asm/unistd.h>
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
#include "compat_ptrace.h"
#endif
@@ -52,14 +52,14 @@ FixPerRegisters(struct task_struct *task)
struct pt_regs *regs;
per_struct *per_info;
- regs = __KSTK_PTREGS(task);
+ regs = task_pt_regs(task);
per_info = (per_struct *) &task->thread.per_info;
per_info->control_regs.bits.em_instruction_fetch =
per_info->single_step | per_info->instruction_fetch;
if (per_info->single_step) {
per_info->control_regs.bits.starting_addr = 0;
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_31BIT))
per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
else
@@ -112,7 +112,7 @@ ptrace_disable(struct task_struct *child)
clear_single_step(child);
}
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
# define __ADDR_MASK 3
#else
# define __ADDR_MASK 7
@@ -138,7 +138,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell...
*/
mask = __ADDR_MASK;
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
if (addr >= (addr_t) &dummy->regs.acrs &&
addr < (addr_t) &dummy->regs.orig_gpr2)
mask = 3;
@@ -150,7 +150,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
/*
* psw and gprs are stored on the stack
*/
- tmp = *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr);
+ tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
if (addr == (addr_t) &dummy->regs.psw.mask)
/* Remove per bit from user psw. */
tmp &= ~PSW_MASK_PER;
@@ -160,7 +160,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
/*
* Very special case: old & broken 64 bit gdb reading
* from acrs[15]. Result is a 64 bit value. Read the
@@ -176,7 +176,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
/*
* orig_gpr2 is stored on the kernel stack
*/
- tmp = (addr_t) __KSTK_PTREGS(child)->orig_gpr2;
+ tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
/*
@@ -218,7 +218,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell indeed...
*/
mask = __ADDR_MASK;
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
if (addr >= (addr_t) &dummy->regs.acrs &&
addr < (addr_t) &dummy->regs.orig_gpr2)
mask = 3;
@@ -231,26 +231,26 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
* psw and gprs are stored on the stack
*/
if (addr == (addr_t) &dummy->regs.psw.mask &&
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
data != PSW_MASK_MERGE(PSW_USER32_BITS, data) &&
#endif
data != PSW_MASK_MERGE(PSW_USER_BITS, data))
/* Invalid psw mask. */
return -EINVAL;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
if (addr == (addr_t) &dummy->regs.psw.addr)
/* I'd like to reject addresses without the
high order bit but older gdb's rely on it */
data |= PSW_ADDR_AMODE;
#endif
- *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr) = data;
+ *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
/*
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
/*
* Very special case: old & broken 64 bit gdb writing
* to acrs[15] with a 64 bit value. Ignore the lower
@@ -267,7 +267,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
/*
* orig_gpr2 is stored on the kernel stack
*/
- __KSTK_PTREGS(child)->orig_gpr2 = data;
+ task_pt_regs(child)->orig_gpr2 = data;
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
/*
@@ -357,7 +357,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
return ptrace_request(child, request, addr, data);
}
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
/*
* Now the fun part starts... a 31 bit program running in the
* 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
@@ -393,15 +393,15 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
*/
if (addr == (addr_t) &dummy32->regs.psw.mask) {
/* Fake a 31 bit psw mask. */
- tmp = (__u32)(__KSTK_PTREGS(child)->psw.mask >> 32);
+ tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
/* Fake a 31 bit psw address. */
- tmp = (__u32) __KSTK_PTREGS(child)->psw.addr |
+ tmp = (__u32) task_pt_regs(child)->psw.addr |
PSW32_ADDR_AMODE31;
} else {
/* gpr 0-15 */
- tmp = *(__u32 *)((addr_t) &__KSTK_PTREGS(child)->psw +
+ tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
addr*2 + 4);
}
} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
@@ -415,7 +415,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
/*
* orig_gpr2 is stored on the kernel stack
*/
- tmp = *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4);
+ tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
/*
@@ -472,15 +472,15 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
/* Invalid psw mask. */
return -EINVAL;
- __KSTK_PTREGS(child)->psw.mask =
+ task_pt_regs(child)->psw.mask =
PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
/* Build a 64 bit psw address from 31 bit address. */
- __KSTK_PTREGS(child)->psw.addr =
+ task_pt_regs(child)->psw.addr =
(__u64) tmp & PSW32_ADDR_INSN;
} else {
/* gpr 0-15 */
- *(__u32*)((addr_t) &__KSTK_PTREGS(child)->psw
+ *(__u32*)((addr_t) &task_pt_regs(child)->psw
+ addr*2 + 4) = tmp;
}
} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
@@ -494,7 +494,7 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
/*
* orig_gpr2 is stored on the kernel stack
*/
- *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4) = tmp;
+ *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
/*
@@ -629,7 +629,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
return peek_user(child, addr, data);
if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP)
return poke_user(child, addr, data);
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
if (request == PTRACE_PEEKUSR &&
addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
return peek_user_emu31(child, addr, data);
@@ -695,7 +695,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
/* Do requests that differ for 31/64 bit */
default:
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_31BIT))
return do_ptrace_emu31(child, request, addr, data);
#endif
@@ -712,35 +712,18 @@ sys_ptrace(long request, long pid, long addr, long data)
int ret;
lock_kernel();
-
if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- ret = -EPERM;
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- goto out;
+ ret = ptrace_traceme();
+ goto out;
}
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out;
-
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
goto out;
+ }
ret = do_ptrace(child, request, addr, data);
-
put_task_struct(child);
out:
unlock_kernel();
diff --git a/arch/s390/kernel/reipl_diag.c b/arch/s390/kernel/reipl_diag.c
index 83cb42bc0b7..1f33951ba43 100644
--- a/arch/s390/kernel/reipl_diag.c
+++ b/arch/s390/kernel/reipl_diag.c
@@ -26,7 +26,7 @@ void reipl_diag(void)
" st %%r4,%0\n"
" st %%r5,%1\n"
".section __ex_table,\"a\"\n"
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
" .align 8\n"
" .quad 0b, 0b\n"
#else
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index bee654abb6d..4176c77670c 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -10,7 +10,6 @@
#include <linux/smp.h>
#include <linux/syscalls.h>
#include <linux/interrupt.h>
-#include <linux/ioctl32.h>
#include <asm/checksum.h>
#include <asm/cpcmd.h>
#include <asm/delay.h>
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 31e7b19348b..b03847d100d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -427,7 +427,7 @@ setup_lowcore(void)
__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
lc->thread_info = (unsigned long) &init_thread_union;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
lc->extended_save_area_addr = (__u32)
__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
@@ -562,21 +562,21 @@ setup_arch(char **cmdline_p)
/*
* print what head.S has found out about the machine
*/
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
printk((MACHINE_IS_VM) ?
"We are running under VM (31 bit mode)\n" :
"We are running native (31 bit mode)\n");
printk((MACHINE_HAS_IEEE) ?
"This machine has an IEEE fpu\n" :
"This machine has no IEEE fpu\n");
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
printk((MACHINE_IS_VM) ?
"We are running under VM (64 bit mode)\n" :
"We are running native (64 bit mode)\n");
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
ROOT_DEV = Root_RAM0;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */
/*
* We need some free virtual space to be able to do vmalloc.
@@ -585,9 +585,9 @@ setup_arch(char **cmdline_p)
*/
if (memory_end > 1920*1024*1024)
memory_end = 1920*1024*1024;
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
memory_end = memory_size & ~0x200000UL; /* detected in head.s */
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) &_etext;
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6e0110d7119..6ae4a77270b 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -254,9 +254,9 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
if (restore_sigregs(regs, &frame->uc.uc_mcontext))
goto badframe;
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
- do_sigaltstack(&frame->uc.uc_stack, NULL, regs->gprs[15]);
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL,
+ regs->gprs[15]) == -EFAULT)
+ goto badframe;
return regs->gprs[2];
badframe:
@@ -501,7 +501,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
if (signr > 0) {
/* Whee! Actually deliver the signal. */
-#ifdef CONFIG_S390_SUPPORT
+#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_31BIT)) {
extern void handle_signal32(unsigned long sig,
struct k_sigaction *ka,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 5856b3fda6b..cbfcfd02a43 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -263,7 +263,7 @@ static void do_machine_restart(void * __unused)
int cpu;
static atomic_t cpuid = ATOMIC_INIT(-1);
- if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
+ if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */
@@ -313,7 +313,7 @@ static void do_machine_halt(void * __unused)
{
static atomic_t cpuid = ATOMIC_INIT(-1);
- if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
+ if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
cpcmd(vmhalt_cmd, NULL, 0, NULL);
@@ -332,7 +332,7 @@ static void do_machine_power_off(void * __unused)
{
static atomic_t cpuid = ATOMIC_INIT(-1);
- if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
+ if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
cpcmd(vmpoff_cmd, NULL, 0, NULL);
@@ -402,7 +402,7 @@ static void smp_ext_bitcall_others(ec_bit_sig sig)
}
}
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
/*
* this function sends a 'purge tlb' signal to another CPU.
*/
@@ -416,7 +416,7 @@ void smp_ptlb_all(void)
on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
}
EXPORT_SYMBOL(smp_ptlb_all);
-#endif /* ! CONFIG_ARCH_S390X */
+#endif /* ! CONFIG_64BIT */
/*
* this function sends a 'reschedule' IPI to another CPU.
@@ -657,7 +657,7 @@ __cpu_up(unsigned int cpu)
idle = current_set[cpu];
cpu_lowcore = lowcore_ptr[cpu];
cpu_lowcore->kernel_stack = (unsigned long)
- idle->thread_info + (THREAD_SIZE);
+ task_stack_page(idle) + (THREAD_SIZE);
sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
- sizeof(struct pt_regs)
- sizeof(struct stack_frame));
@@ -783,7 +783,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (stack == 0ULL)
panic("smp_boot_cpus failed to allocate memory\n");
lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
lowcore_ptr[i]->extended_save_area_addr =
(__u32) __get_free_pages(GFP_KERNEL,0);
@@ -793,7 +793,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
#endif
}
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
ctl_set_bit(14, 29); /* enable extended save area */
#endif
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index efe6b83b53f..6a63553493c 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -26,9 +26,7 @@
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
-#ifdef CONFIG_ARCH_S390X
#include <linux/personality.h>
-#endif /* CONFIG_ARCH_S390X */
#include <asm/uaccess.h>
#include <asm/ipc.h>
@@ -121,7 +119,7 @@ out:
return error;
}
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
struct sel_arg_struct {
unsigned long n;
fd_set *inp, *outp, *exp;
@@ -138,7 +136,7 @@ asmlinkage long old_select(struct sel_arg_struct __user *arg)
return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
}
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
@@ -211,7 +209,7 @@ asmlinkage long sys_ipc(uint call, int first, unsigned long second,
return -EINVAL;
}
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
asmlinkage long s390x_newuname(struct new_utsname __user *name)
{
int ret = sys_newuname(name);
@@ -235,12 +233,12 @@ asmlinkage long s390x_personality(unsigned long personality)
return ret;
}
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
/*
* Wrapper function for sys_fadvise64/fadvise64_64
*/
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
asmlinkage long
s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index c36353e8c14..b0d8ca8e5ee 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -282,7 +282,7 @@ static inline void start_hz_timer(void)
{
if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
return;
- account_ticks(__KSTK_PTREGS(current));
+ account_ticks(task_pt_regs(current));
cpu_clear(smp_processor_id(), nohz_cpu_mask);
}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index c5bd36fae56..5d21e9e6e7b 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -67,13 +67,13 @@ extern pgm_check_handler_t do_monitor_call;
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
#define FOURLONG "%08lx %08lx %08lx %08lx\n"
static int kstack_depth_to_print = 12;
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
#define FOURLONG "%016lx %016lx %016lx %016lx\n"
static int kstack_depth_to_print = 20;
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
/*
* For show_trace we have tree different stack to consider:
@@ -136,8 +136,8 @@ void show_trace(struct task_struct *task, unsigned long * stack)
sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
S390_lowcore.async_stack);
if (task)
- __show_trace(sp, (unsigned long) task->thread_info,
- (unsigned long) task->thread_info + THREAD_SIZE);
+ __show_trace(sp, (unsigned long) task_stack_page(task),
+ (unsigned long) task_stack_page(task) + THREAD_SIZE);
else
__show_trace(sp, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
@@ -240,7 +240,7 @@ char *task_show_regs(struct task_struct *task, char *buffer)
{
struct pt_regs *regs;
- regs = __KSTK_PTREGS(task);
+ regs = task_pt_regs(task);
buffer += sprintf(buffer, "task: %p, ksp: %p\n",
task, (void *)task->thread.ksp);
buffer += sprintf(buffer, "User PSW : %p %p\n",
@@ -702,12 +702,12 @@ void __init trap_init(void)
pgm_check_table[0x11] = &do_dat_exception;
pgm_check_table[0x12] = &translation_exception;
pgm_check_table[0x13] = &special_op_exception;
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
pgm_check_table[0x38] = &do_dat_exception;
pgm_check_table[0x39] = &do_dat_exception;
pgm_check_table[0x3A] = &do_dat_exception;
pgm_check_table[0x3B] = &do_dat_exception;
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
pgm_check_table[0x15] = &operand_exception;
pgm_check_table[0x1C] = &space_switch_exception;
pgm_check_table[0x1D] = &hfp_sqrt_exception;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 89fdb3808bc..9289face302 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -5,7 +5,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <linux/config.h>
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
OUTPUT_ARCH(s390)
ENTRY(_start)
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index b701efa1f00..d9b97b3c597 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,6 +4,5 @@
EXTRA_AFLAGS := -traditional
-lib-y += delay.o string.o
-lib-$(CONFIG_ARCH_S390_31) += uaccess.o spinlock.o
-lib-$(CONFIG_ARCH_S390X) += uaccess64.o spinlock.o
+lib-y += delay.o string.o spinlock.o
+lib-y += $(if $(CONFIG_64BIT),uaccess64.o,uaccess.o)
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 2dc14e9c832..68d79c50208 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -29,7 +29,7 @@ __setup("spin_retry=", spin_retry_setup);
static inline void
_diag44(void)
{
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
if (MACHINE_HAS_DIAG44)
#endif
asm volatile("diag 0,0,0x44");
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 506a33b51e4..a9566bcab68 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -143,7 +143,7 @@ dcss_diag (__u8 func, void *parameter,
rx = (unsigned long) parameter;
ry = (unsigned long) func;
__asm__ __volatile__(
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
" sam31\n" // switch to 31 bit
" diag %0,%1,0x64\n"
" sam64\n" // switch back to 64 bit
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fb2607c369e..81ade401b07 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -31,17 +31,17 @@
#include <asm/uaccess.h>
#include <asm/pgtable.h>
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000
#define __FIXUP_MASK 0x7fffffff
#define __SUBCODE_MASK 0x0200
#define __PF_RES_FIELD 0ULL
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
#define __FAIL_ADDR_MASK -4096L
#define __FIXUP_MASK ~0L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
#ifdef CONFIG_SYSCTL
extern int sysctl_userprocess_debug;
@@ -393,11 +393,11 @@ int pfault_init(void)
"2:\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
" .long 0b,1b\n"
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
" .quad 0b,1b\n"
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
".previous"
: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" );
__ctl_set_bit(0, 9);
@@ -417,11 +417,11 @@ void pfault_fini(void)
"0:\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
" .long 0b,0b\n"
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
" .quad 0b,0b\n"
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
".previous"
: : "a" (&refbk), "m" (refbk) : "cc" );
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 6ec5cd981e7..df953383724 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -44,7 +44,7 @@ void diag10(unsigned long addr)
{
if (addr >= 0x7ff00000)
return;
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
asm volatile (
" sam31\n"
" diag %0,%0,0x10\n"
@@ -106,7 +106,7 @@ extern unsigned long __initdata zholes_size[];
* paging_init() sets up the page tables
*/
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
void __init paging_init(void)
{
pgd_t * pg_dir;
@@ -175,7 +175,7 @@ void __init paging_init(void)
return;
}
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
void __init paging_init(void)
{
pgd_t * pg_dir;
@@ -256,7 +256,7 @@ void __init paging_init(void)
return;
}
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
void __init mem_init(void)
{
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index fb187e5a54b..356257c171d 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -50,7 +50,7 @@ static inline unsigned long mmap_base(void)
static inline int mmap_is_legacy(void)
{
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
/*
* Force standard allocation for 64 bit programs.
*/
diff --git a/arch/s390/oprofile/Makefile b/arch/s390/oprofile/Makefile
index ec349276258..537b2d840e6 100644
--- a/arch/s390/oprofile/Makefile
+++ b/arch/s390/oprofile/Makefile
@@ -6,4 +6,4 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
-oprofile-y := $(DRIVER_OBJS) init.o
+oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
new file mode 100644
index 00000000000..bc4b84a35ca
--- /dev/null
+++ b/arch/s390/oprofile/backtrace.c
@@ -0,0 +1,79 @@
+/**
+ * arch/s390/oprofile/backtrace.c
+ *
+ * S390 Version
+ * Copyright (C) 2005 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ * Author(s): Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
+ */
+
+#include <linux/oprofile.h>
+
+#include <asm/processor.h> /* for struct stack_frame */
+
+static unsigned long
+__show_trace(unsigned int *depth, unsigned long sp,
+ unsigned long low, unsigned long high)
+{
+ struct stack_frame *sf;
+ struct pt_regs *regs;
+
+ while (*depth) {
+ sp = sp & PSW_ADDR_INSN;
+ if (sp < low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+ (*depth)--;
+ oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+
+ /* Follow the backchain. */
+ while (*depth) {
+ low = sp;
+ sp = sf->back_chain & PSW_ADDR_INSN;
+ if (!sp)
+ break;
+ if (sp <= low || sp > high - sizeof(*sf))
+ return sp;
+ sf = (struct stack_frame *) sp;
+ (*depth)--;
+ oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+
+ }
+
+ if (*depth == 0)
+ break;
+
+ /* Zero backchain detected, check for interrupt frame. */
+ sp = (unsigned long) (sf + 1);
+ if (sp <= low || sp > high - sizeof(*regs))
+ return sp;
+ regs = (struct pt_regs *) sp;
+ (*depth)--;
+ oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+ low = sp;
+ sp = regs->gprs[15];
+ }
+ return sp;
+}
+
+void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
+{
+ unsigned long head;
+ struct stack_frame* head_sf;
+
+ if (user_mode (regs))
+ return;
+
+ head = regs->gprs[15];
+ head_sf = (struct stack_frame*)head;
+
+ if (!head_sf->back_chain)
+ return;
+
+ head = head_sf->back_chain;
+
+ head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE,
+ S390_lowcore.async_stack);
+
+ __show_trace(&depth, head, S390_lowcore.thread_info,
+ S390_lowcore.thread_info + THREAD_SIZE);
+}
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index a65ead0e200..7a995113b91 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -12,8 +12,12 @@
#include <linux/init.h>
#include <linux/errno.h>
+
+extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
+
int __init oprofile_arch_init(struct oprofile_operations* ops)
{
+ ops->backtrace = s390_backtrace;
return -ENODEV;
}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 64f5ae0ff96..8cf6d437a63 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -14,10 +14,6 @@ config SUPERH
gaming console. The SuperH port has a home page at
<http://www.linux-sh.org/>.
-config UID16
- bool
- default y
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index fd4f240b833..aac15e42d03 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -191,13 +191,8 @@ void flush_thread(void)
{
#if defined(CONFIG_SH_FPU)
struct task_struct *tsk = current;
- struct pt_regs *regs = (struct pt_regs *)
- ((unsigned long)tsk->thread_info
- + THREAD_SIZE - sizeof(struct pt_regs)
- - sizeof(unsigned long));
-
/* Forget lazy FPU state */
- clear_fpu(tsk, regs);
+ clear_fpu(tsk, task_pt_regs(tsk));
clear_used_math();
#endif
}
@@ -232,13 +227,7 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
struct pt_regs ptregs;
- ptregs = *(struct pt_regs *)
- ((unsigned long)tsk->thread_info + THREAD_SIZE
- - sizeof(struct pt_regs)
-#ifdef CONFIG_SH_DSP
- - sizeof(struct pt_dspregs)
-#endif
- - sizeof(unsigned long));
+ ptregs = *task_pt_regs(tsk);
elf_core_copy_regs(regs, &ptregs);
return 1;
@@ -252,11 +241,7 @@ dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu)
#if defined(CONFIG_SH_FPU)
fpvalid = !!tsk_used_math(tsk);
if (fpvalid) {
- struct pt_regs *regs = (struct pt_regs *)
- ((unsigned long)tsk->thread_info
- + THREAD_SIZE - sizeof(struct pt_regs)
- - sizeof(unsigned long));
- unlazy_fpu(tsk, regs);
+ unlazy_fpu(tsk, task_pt_regs(tsk));
memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
}
#endif
@@ -279,18 +264,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
copy_to_stopped_child_used_math(p);
#endif
- childregs = ((struct pt_regs *)
- (THREAD_SIZE + (unsigned long) p->thread_info)
-#ifdef CONFIG_SH_DSP
- - sizeof(struct pt_dspregs)
-#endif
- - sizeof(unsigned long)) - 1;
+ childregs = task_pt_regs(p);
*childregs = *regs;
if (user_mode(regs)) {
childregs->regs[15] = usp;
} else {
- childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE;
+ childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
}
if (clone_flags & CLONE_SETTLS) {
childregs->gbr = childregs->regs[0];
@@ -305,26 +285,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
return 0;
}
-/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
-{
- dump->magic = CMAGIC;
- dump->start_code = current->mm->start_code;
- dump->start_data = current->mm->start_data;
- dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1);
- dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT;
- dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT;
- dump->u_ssize = (current->mm->start_stack - dump->start_stack +
- PAGE_SIZE - 1) >> PAGE_SHIFT;
- /* Debug registers will come here. */
-
- dump->regs = *regs;
-
- dump->u_fpvalid = dump_fpu(regs, &dump->fpu);
-}
-
/* Tracing by user break controller. */
static void
ubc_set_tracing(int asid, unsigned long pc)
@@ -353,11 +313,7 @@ ubc_set_tracing(int asid, unsigned long pc)
struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
{
#if defined(CONFIG_SH_FPU)
- struct pt_regs *regs = (struct pt_regs *)
- ((unsigned long)prev->thread_info
- + THREAD_SIZE - sizeof(struct pt_regs)
- - sizeof(unsigned long));
- unlazy_fpu(prev, regs);
+ unlazy_fpu(prev, task_pt_regs(prev));
#endif
#ifdef CONFIG_PREEMPT
@@ -366,13 +322,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
struct pt_regs *regs;
local_irq_save(flags);
- regs = (struct pt_regs *)
- ((unsigned long)prev->thread_info
- + THREAD_SIZE - sizeof(struct pt_regs)
-#ifdef CONFIG_SH_DSP
- - sizeof(struct pt_dspregs)
-#endif
- - sizeof(unsigned long));
+ regs = task_pt_regs(prev);
if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
int offset = (int)regs->regs[15];
@@ -392,7 +342,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
*/
asm volatile("ldc %0, r7_bank"
: /* no output */
- : "r" (next->thread_info));
+ : "r" (task_thread_info(next)));
#ifdef CONFIG_MMU
/* If no tasks are using the UBC, we're done */
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
index 1a8be06519e..3887b4f6feb 100644
--- a/arch/sh/kernel/ptrace.c
+++ b/arch/sh/kernel/ptrace.c
@@ -41,12 +41,7 @@ static inline int get_stack_long(struct task_struct *task, int offset)
{
unsigned char *stack;
- stack = (unsigned char *)
- task->thread_info + THREAD_SIZE - sizeof(struct pt_regs)
-#ifdef CONFIG_SH_DSP
- - sizeof(struct pt_dspregs)
-#endif
- - sizeof(unsigned long);
+ stack = (unsigned char *)task_pt_regs(task);
stack += offset;
return (*((int *)stack));
}
@@ -59,12 +54,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
{
unsigned char *stack;
- stack = (unsigned char *)
- task->thread_info + THREAD_SIZE - sizeof(struct pt_regs)
-#ifdef CONFIG_SH_DSP
- - sizeof(struct pt_dspregs)
-#endif
- - sizeof(unsigned long);
+ stack = (unsigned char *)task_pt_regs(task);
stack += offset;
*(unsigned long *) stack = data;
return 0;
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index 6954fd62470..1cf94a618be 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -21,14 +21,12 @@
#include <asm/cacheflush.h>
#include <asm/checksum.h>
-extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
extern struct hw_interrupt_type no_irq_type;
EXPORT_SYMBOL(sh_mv);
/* platform dependent support */
-EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(enable_irq);
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 59e49b18252..62c7d1c0ad7 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -103,7 +103,7 @@ int __cpu_up(unsigned int cpu)
if (IS_ERR(tsk))
panic("Failed forking idle task for cpu %d\n", cpu);
- tsk->thread_info->cpu = cpu;
+ task_thread_info(tsk)->cpu = cpu;
cpu_set(cpu, cpu_online_map);
diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig
index fb35b45dc13..07b172deb87 100644
--- a/arch/sh64/Kconfig
+++ b/arch/sh64/Kconfig
@@ -17,10 +17,6 @@ config MMU
bool
default y
-config UID16
- bool
- default y
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
diff --git a/arch/sh64/kernel/process.c b/arch/sh64/kernel/process.c
index b95d0414185..1da9c61d682 100644
--- a/arch/sh64/kernel/process.c
+++ b/arch/sh64/kernel/process.c
@@ -744,7 +744,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
}
#endif
/* Copy from sh version */
- childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p->thread_info )) - 1;
+ childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
*childregs = *regs;
@@ -752,7 +752,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
childregs->regs[15] = usp;
p->thread.uregs = childregs;
} else {
- childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE;
+ childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
}
childregs->regs[9] = 0; /* Set return value for child */
@@ -775,26 +775,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
return 0;
}
-/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
-{
- dump->magic = CMAGIC;
- dump->start_code = current->mm->start_code;
- dump->start_data = current->mm->start_data;
- dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1);
- dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT;
- dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT;
- dump->u_ssize = (current->mm->start_stack - dump->start_stack +
- PAGE_SIZE - 1) >> PAGE_SHIFT;
- /* Debug registers will come here. */
-
- dump->regs = *regs;
-
- dump->u_fpvalid = dump_fpu(regs, &dump->fpu);
-}
-
asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
diff --git a/arch/sh64/kernel/sh_ksyms.c b/arch/sh64/kernel/sh_ksyms.c
index 0b5497d70bd..472b450e61b 100644
--- a/arch/sh64/kernel/sh_ksyms.c
+++ b/arch/sh64/kernel/sh_ksyms.c
@@ -29,7 +29,6 @@
#include <asm/delay.h>
#include <asm/irq.h>
-extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
#if 0
@@ -41,7 +40,6 @@ EXPORT_SYMBOL(drive_info);
#endif
/* platform dependent support */
-EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(enable_irq);
diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
index 870fe5327e0..1195af37ee5 100644
--- a/arch/sh64/kernel/time.c
+++ b/arch/sh64/kernel/time.c
@@ -417,7 +417,7 @@ static __init unsigned int get_cpu_hz(void)
/*
** Regardless the toolchain, force the compiler to use the
** arbitrary register r3 as a clock tick counter.
- ** NOTE: r3 must be in accordance with rtc_interrupt()
+ ** NOTE: r3 must be in accordance with sh64_rtc_interrupt()
*/
register unsigned long long __rtc_irq_flag __asm__ ("r3");
@@ -482,7 +482,8 @@ static __init unsigned int get_cpu_hz(void)
#endif
}
-static irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t sh64_rtc_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
{
ctrl_outb(0, RCR1); /* Disable Carry Interrupts */
regs->regs[3] = 1; /* Using r3 */
@@ -491,7 +492,7 @@ static irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL};
-static struct irqaction irq1 = { rtc_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "rtc", NULL, NULL};
+static struct irqaction irq1 = { sh64_rtc_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "rtc", NULL, NULL};
void __init time_init(void)
{
diff --git a/arch/sh64/lib/dbg.c b/arch/sh64/lib/dbg.c
index 526fedae6db..58087331b8a 100644
--- a/arch/sh64/lib/dbg.c
+++ b/arch/sh64/lib/dbg.c
@@ -174,7 +174,7 @@ void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs)
struct ring_node *rr;
pid = current->pid;
- stack_bottom = (unsigned long) current->thread_info;
+ stack_bottom = (unsigned long) task_stack_page(current);
asm volatile("ori r15, 0, %0" : "=r" (sp));
rr = event_ring + event_ptr;
rr->evt = evt;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 56c34e7fd4e..f944b58cdfe 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -9,10 +9,6 @@ config MMU
bool
default y
-config UID16
- bool
- default y
-
config HIGHMEM
bool
default y
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c
index ea864741146..fbb05a452e5 100644
--- a/arch/sparc/kernel/process.c
+++ b/arch/sparc/kernel/process.c
@@ -302,7 +302,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
int count = 0;
if (tsk != NULL)
- task_base = (unsigned long) tsk->thread_info;
+ task_base = (unsigned long) task_stack_page(tsk);
else
task_base = (unsigned long) current_thread_info();
@@ -337,7 +337,7 @@ EXPORT_SYMBOL(dump_stack);
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
- return tsk->thread_info->kpc;
+ return task_thread_info(tsk)->kpc;
}
/*
@@ -392,7 +392,7 @@ void flush_thread(void)
/* We must fixup kregs as well. */
/* XXX This was not fixed for ti for a while, worked. Unused? */
current->thread.kregs = (struct pt_regs *)
- ((char *)current->thread_info + (THREAD_SIZE - TRACEREG_SZ));
+ (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ));
}
}
@@ -459,7 +459,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
- struct thread_info *ti = p->thread_info;
+ struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs;
char *new_stack;
@@ -482,7 +482,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
* V V (stk.fr.) V (pt_regs) { (stk.fr.) }
* +----- - - - - - ------+===========+============={+==========}+
*/
- new_stack = (char*)ti + THREAD_SIZE;
+ new_stack = task_stack_page(p) + THREAD_SIZE;
if (regs->psr & PSR_PS)
new_stack -= STACKFRAME_SZ;
new_stack -= STACKFRAME_SZ + TRACEREG_SZ;
@@ -724,7 +724,7 @@ unsigned long get_wchan(struct task_struct *task)
task->state == TASK_RUNNING)
goto out;
- fp = task->thread_info->ksp + bias;
+ fp = task_thread_info(task)->ksp + bias;
do {
/* Bogus frame pointer? */
if (fp < (task_base + sizeof(struct thread_info)) ||
diff --git a/arch/sparc/kernel/ptrace.c b/arch/sparc/kernel/ptrace.c
index 475c4c13462..1baf13ed5c3 100644
--- a/arch/sparc/kernel/ptrace.c
+++ b/arch/sparc/kernel/ptrace.c
@@ -75,7 +75,7 @@ static inline void read_sunos_user(struct pt_regs *regs, unsigned long offset,
struct task_struct *tsk, long __user *addr)
{
struct pt_regs *cregs = tsk->thread.kregs;
- struct thread_info *t = tsk->thread_info;
+ struct thread_info *t = task_thread_info(tsk);
int v;
if(offset >= 1024)
@@ -170,7 +170,7 @@ static inline void write_sunos_user(struct pt_regs *regs, unsigned long offset,
struct task_struct *tsk)
{
struct pt_regs *cregs = tsk->thread.kregs;
- struct thread_info *t = tsk->thread_info;
+ struct thread_info *t = task_thread_info(tsk);
unsigned long value = regs->u_regs[UREG_I3];
if(offset >= 1024)
@@ -286,40 +286,17 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
s, (int) request, (int) pid, addr, data, addr2);
}
#endif
- if (request == PTRACE_TRACEME) {
- int my_ret;
-
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED) {
- pt_error_return(regs, EPERM);
- goto out;
- }
- my_ret = security_ptrace(current->parent, current);
- if (my_ret) {
- pt_error_return(regs, -my_ret);
- goto out;
- }
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
+ if (request == PTRACE_TRACEME) {
+ ret = ptrace_traceme();
pt_succ_return(regs, 0);
goto out;
}
-#ifndef ALLOW_INIT_TRACING
- if (pid == 1) {
- /* Can't dork with init. */
- pt_error_return(regs, EPERM);
- goto out;
- }
-#endif
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child) {
- pt_error_return(regs, ESRCH);
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
+ pt_error_return(regs, -ret);
goto out;
}
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index 1c8fd0fd930..0b0d492c953 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -82,8 +82,6 @@ extern int __lshrdi3(int, int);
extern int __muldi3(int, int);
extern int __divdi3(int, int);
-extern void dump_thread(struct pt_regs *, struct user *);
-
/* Private functions with odd calling conventions. */
extern void ___atomic24_add(void);
extern void ___atomic24_sub(void);
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index cc1fc898495..40d426cce82 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -200,7 +200,7 @@ void __init smp4d_boot_cpus(void)
/* Cook up an idler for this guy. */
p = fork_idle(i);
cpucount++;
- current_set[i] = p->thread_info;
+ current_set[i] = task_thread_info(p);
for (no = 0; !cpu_find_by_instance(no, NULL, &mid)
&& mid != i; no++) ;
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index f113422a372..a21f27d10e5 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -173,7 +173,7 @@ void __init smp4m_boot_cpus(void)
/* Cook up an idler for this guy. */
p = fork_idle(i);
cpucount++;
- current_set[i] = p->thread_info;
+ current_set[i] = task_thread_info(p);
/* See trampoline.S for details... */
entry += ((i-1) * 3);
diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c
index d07ae02101a..288de276d9f 100644
--- a/arch/sparc/kernel/sys_sunos.c
+++ b/arch/sparc/kernel/sys_sunos.c
@@ -30,6 +30,7 @@
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c
index 3f451ae6648..41d45c298fb 100644
--- a/arch/sparc/kernel/traps.c
+++ b/arch/sparc/kernel/traps.c
@@ -291,7 +291,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
#ifndef CONFIG_SMP
if(!fpt) {
#else
- if(!(fpt->thread_info->flags & _TIF_USEDFPU)) {
+ if(!(task_thread_info(fpt)->flags & _TIF_USEDFPU)) {
#endif
fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth);
regs->psr &= ~PSR_EF;
@@ -334,7 +334,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
/* nope, better SIGFPE the offending process... */
#ifdef CONFIG_SMP
- fpt->thread_info->flags &= ~_TIF_USEDFPU;
+ task_thread_info(fpt)->flags &= ~_TIF_USEDFPU;
#endif
if(psr & PSR_PS) {
/* The first fsr store/load we tried trapped,
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index c4b7ad70cd7..ab733be9af0 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -179,7 +179,7 @@ config HUGETLB_PAGE_SIZE_512K
bool "512K"
config HUGETLB_PAGE_SIZE_64K
- depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512K
+ depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
bool "64K"
endchoice
@@ -309,11 +309,6 @@ config COMPAT
depends on SPARC32_COMPAT
default y
-config UID16
- bool
- depends on SPARC32_COMPAT
- default y
-
config BINFMT_ELF32
tristate "Kernel support for 32-bit ELF binaries"
depends on SPARC32_COMPAT
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 46a6ad60a8f..a3fb3376ffa 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,46 +1,67 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.11
-# Sun Mar 6 20:47:29 2005
+# Linux kernel version: 2.6.15
+# Mon Jan 9 14:36:29 2006
#
+CONFIG_SPARC=y
+CONFIG_SPARC64=y
CONFIG_64BIT=y
CONFIG_MMU=y
CONFIG_TIME_INTERPOLATION=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_SPARC64_PAGE_SIZE_8KB=y
+# CONFIG_SPARC64_PAGE_SIZE_64KB is not set
+# CONFIG_SPARC64_PAGE_SIZE_512KB is not set
+# CONFIG_SPARC64_PAGE_SIZE_4MB is not set
+CONFIG_SECCOMP=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
-CONFIG_LOCK_KERNEL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
#
# General setup
#
CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=15
-CONFIG_HOTPLUG=y
-CONFIG_KOBJECT_UEVENT=y
# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_UID16=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SHMEM=y
CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
#
# Loadable module support
@@ -52,20 +73,32 @@ CONFIG_OBSOLETE_MODPARM=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_KMOD=y
-CONFIG_STOP_MACHINE=y
+
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
CONFIG_SYSVIPC_COMPAT=y
#
# General machine setup
#
-CONFIG_BBC_I2C=m
-CONFIG_VT=y
-CONFIG_VT_CONSOLE=y
-CONFIG_HW_CONSOLE=y
-CONFIG_SMP=y
+# CONFIG_SMP is not set
# CONFIG_PREEMPT is not set
-CONFIG_NR_CPUS=4
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=m
# CONFIG_CPU_FREQ_DEBUG is not set
CONFIG_CPU_FREQ_STAT=m
CONFIG_CPU_FREQ_STAT_DETAILS=y
@@ -75,15 +108,22 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_USERSPACE=m
CONFIG_CPU_FREQ_GOV_ONDEMAND=m
-CONFIG_CPU_FREQ_TABLE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_US3_FREQ=m
CONFIG_US2E_FREQ=m
-CONFIG_SPARC64=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_HUGETLB_PAGE_SIZE_4MB=y
# CONFIG_HUGETLB_PAGE_SIZE_512K is not set
# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_GENERIC_ISA_DMA=y
CONFIG_SBUS=y
CONFIG_SBUSCHAR=y
@@ -91,130 +131,142 @@ CONFIG_SUN_AUXIO=y
CONFIG_SUN_IO=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
-CONFIG_RTC=y
# CONFIG_PCI_LEGACY_PROC is not set
-# CONFIG_PCI_NAMES is not set
+# CONFIG_PCI_DEBUG is not set
CONFIG_SUN_OPENPROMFS=m
CONFIG_SPARC32_COMPAT=y
CONFIG_COMPAT=y
-CONFIG_UID16=y
CONFIG_BINFMT_ELF32=y
# CONFIG_BINFMT_AOUT32 is not set
+
+#
+# Executable file formats
+#
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m
-CONFIG_SOLARIS_EMUL=m
+# CONFIG_SOLARIS_EMUL is not set
+# CONFIG_CMDLINE_BOOL is not set
#
-# Parallel port support
+# Networking
#
-CONFIG_PARPORT=m
-CONFIG_PARPORT_PC=m
-CONFIG_PARPORT_PC_FIFO=y
-# CONFIG_PARPORT_PC_SUPERIO is not set
-# CONFIG_PARPORT_SUNBPP is not set
-# CONFIG_PARPORT_OTHER is not set
-CONFIG_PARPORT_1284=y
-CONFIG_PRINTER=m
-CONFIG_ENVCTRL=m
-CONFIG_DISPLAY7SEG=m
-# CONFIG_CMDLINE_BOOL is not set
+CONFIG_NET=y
#
-# Generic Driver Options
+# Networking options
#
-CONFIG_STANDALONE=y
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_FW_LOADER=m
-# CONFIG_DEBUG_DRIVER is not set
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+CONFIG_TCP_CONG_ADVANCED=y
#
-# Graphics support
+# TCP congestion control
#
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-# CONFIG_FB_CIRRUS is not set
-CONFIG_FB_PM2=y
-# CONFIG_FB_PM2_FIFO_DISCONNECT is not set
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_IMSTT is not set
-# CONFIG_FB_BW2 is not set
-# CONFIG_FB_CG3 is not set
-CONFIG_FB_CG6=y
-# CONFIG_FB_RIVA is not set
-# CONFIG_FB_MATROX is not set
-# CONFIG_FB_RADEON_OLD is not set
-# CONFIG_FB_RADEON is not set
-# CONFIG_FB_ATY128 is not set
-CONFIG_FB_ATY=y
-CONFIG_FB_ATY_CT=y
-# CONFIG_FB_ATY_GENERIC_LCD is not set
-# CONFIG_FB_ATY_XL_INIT is not set
-CONFIG_FB_ATY_GX=y
-# CONFIG_FB_SAVAGE is not set
-# CONFIG_FB_SIS is not set
-# CONFIG_FB_NEOMAGIC is not set
-# CONFIG_FB_KYRO is not set
-# CONFIG_FB_3DFX is not set
-# CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_TRIDENT is not set
-CONFIG_FB_SBUS=y
-CONFIG_FB_FFB=y
-# CONFIG_FB_TCX is not set
-# CONFIG_FB_CG14 is not set
-# CONFIG_FB_P9100 is not set
-# CONFIG_FB_LEO is not set
-# CONFIG_FB_PCI is not set
-# CONFIG_FB_VIRTUAL is not set
+CONFIG_TCP_CONG_BIC=y
+CONFIG_TCP_CONG_CUBIC=m
+CONFIG_TCP_CONG_WESTWOOD=m
+CONFIG_TCP_CONG_HTCP=m
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_IPV6=m
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_IPV6_TUNNEL=m
+# CONFIG_NETFILTER is not set
#
-# Console display driver support
+# DCCP Configuration (EXPERIMENTAL)
#
-# CONFIG_PROM_CONSOLE is not set
-CONFIG_DUMMY_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-# CONFIG_FONT_8x8 is not set
-# CONFIG_FONT_8x16 is not set
-# CONFIG_FONT_6x11 is not set
-# CONFIG_FONT_PEARL_8x8 is not set
-# CONFIG_FONT_ACORN_8x8 is not set
-CONFIG_FONT_SUN8x16=y
-# CONFIG_FONT_SUN12x22 is not set
+CONFIG_IP_DCCP=m
+CONFIG_INET_DCCP_DIAG=m
#
-# Logo configuration
+# DCCP CCIDs Configuration (EXPERIMENTAL)
#
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_LOGO_SUN_CLUT224=y
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+CONFIG_IP_DCCP_CCID3=m
+CONFIG_IP_DCCP_TFRC_LIB=m
#
-# Serial drivers
+# DCCP Kernel Hacking
#
+# CONFIG_IP_DCCP_DEBUG is not set
+# CONFIG_IP_DCCP_UNLOAD_HACK is not set
#
-# Non-8250 serial port support
+# SCTP Configuration (EXPERIMENTAL)
#
-CONFIG_SERIAL_SUNCORE=y
-CONFIG_SERIAL_SUNZILOG=y
-CONFIG_SERIAL_SUNZILOG_CONSOLE=y
-CONFIG_SERIAL_SUNSU=y
-CONFIG_SERIAL_SUNSU_CONSOLE=y
-CONFIG_SERIAL_SUNSAB=m
-CONFIG_SERIAL_CORE=y
-CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+CONFIG_VLAN_8021Q=m
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
#
-# Misc Linux/SPARC drivers
+# QoS and/or fair queueing
#
-CONFIG_SUN_OPENPROMIO=m
-CONFIG_SUN_MOSTEK_RTC=y
-CONFIG_OBP_FLASH=m
-# CONFIG_SUN_BPP is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=m
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+CONFIG_CONNECTOR=m
#
# Memory Technology Devices (MTD)
@@ -222,10 +274,18 @@ CONFIG_OBP_FLASH=m
# CONFIG_MTD is not set
#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
# Block devices
#
# CONFIG_BLK_DEV_FD is not set
-# CONFIG_PARIDE is not set
# CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_CPQ_CISS_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
@@ -234,22 +294,13 @@ CONFIG_OBP_FLASH=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_SX8=m
+# CONFIG_BLK_DEV_SX8 is not set
CONFIG_BLK_DEV_UB=m
# CONFIG_BLK_DEV_RAM is not set
CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_INITRAMFS_SOURCE=""
CONFIG_CDROM_PKTCDVD=m
CONFIG_CDROM_PKTCDVD_BUFFERS=8
CONFIG_CDROM_PKTCDVD_WCACHE=y
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
CONFIG_ATA_OVER_ETH=m
#
@@ -265,7 +316,7 @@ CONFIG_BLK_DEV_IDE=y
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_IDETAPE=m
+# CONFIG_BLK_DEV_IDETAPE is not set
# CONFIG_BLK_DEV_IDEFLOPPY is not set
# CONFIG_BLK_DEV_IDESCSI is not set
# CONFIG_IDE_TASK_IOCTL is not set
@@ -278,7 +329,7 @@ CONFIG_BLK_DEV_IDEPCI=y
# CONFIG_IDEPCI_SHARE_IRQ is not set
# CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_BLK_DEV_GENERIC is not set
-CONFIG_BLK_DEV_OPTI621=m
+# CONFIG_BLK_DEV_OPTI621 is not set
CONFIG_BLK_DEV_IDEDMA_PCI=y
# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
CONFIG_IDEDMA_PCI_AUTO=y
@@ -286,27 +337,25 @@ CONFIG_IDEDMA_ONLYDISK=y
# CONFIG_BLK_DEV_AEC62XX is not set
CONFIG_BLK_DEV_ALI15X3=y
# CONFIG_WDC_ALI15X3 is not set
-CONFIG_BLK_DEV_AMD74XX=m
-CONFIG_BLK_DEV_CMD64X=m
-CONFIG_BLK_DEV_TRIFLEX=m
-CONFIG_BLK_DEV_CY82C693=m
-CONFIG_BLK_DEV_CS5520=m
-CONFIG_BLK_DEV_CS5530=m
-CONFIG_BLK_DEV_HPT34X=m
-# CONFIG_HPT34X_AUTODMA is not set
-CONFIG_BLK_DEV_HPT366=m
-CONFIG_BLK_DEV_SC1200=m
-CONFIG_BLK_DEV_PIIX=m
-CONFIG_BLK_DEV_NS87415=m
-CONFIG_BLK_DEV_PDC202XX_OLD=m
-# CONFIG_PDC202XX_BURST is not set
-CONFIG_BLK_DEV_PDC202XX_NEW=m
-# CONFIG_PDC202XX_FORCE is not set
-CONFIG_BLK_DEV_SVWKS=m
-CONFIG_BLK_DEV_SIIMAGE=m
-CONFIG_BLK_DEV_SLC90E66=m
-CONFIG_BLK_DEV_TRM290=m
-CONFIG_BLK_DEV_VIA82CXXX=m
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
# CONFIG_IDE_ARM is not set
CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_IDEDMA_IVB is not set
@@ -316,6 +365,7 @@ CONFIG_IDEDMA_AUTO=y
#
# SCSI device support
#
+CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_PROC_FS=y
@@ -323,11 +373,12 @@ CONFIG_SCSI_PROC_FS=y
# SCSI support type (disk, tape, CD-ROM)
#
CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
CONFIG_BLK_DEV_SR=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
+# CONFIG_CHR_DEV_SCH is not set
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
@@ -340,89 +391,42 @@ CONFIG_SCSI_CONSTANTS=y
# SCSI Transport Attributes
#
CONFIG_SCSI_SPI_ATTRS=y
-CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_ISCSI_ATTRS=m
+# CONFIG_SCSI_SAS_ATTRS is not set
#
# SCSI low-level drivers
#
-CONFIG_BLK_DEV_3W_XXXX_RAID=m
-CONFIG_SCSI_3W_9XXX=m
-CONFIG_SCSI_ACARD=m
-CONFIG_SCSI_AACRAID=m
+CONFIG_ISCSI_TCP=m
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
-CONFIG_SCSI_AIC79XX=m
-CONFIG_AIC79XX_CMDS_PER_DEVICE=32
-CONFIG_AIC79XX_RESET_DELAY_MS=15000
-# CONFIG_AIC79XX_BUILD_FIRMWARE is not set
-# CONFIG_AIC79XX_ENABLE_RD_STRM is not set
-# CONFIG_AIC79XX_DEBUG_ENABLE is not set
-CONFIG_AIC79XX_DEBUG_MASK=0
-# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
+# CONFIG_SCSI_AIC79XX is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
-CONFIG_SCSI_SATA=y
-CONFIG_SCSI_SATA_AHCI=m
-CONFIG_SCSI_SATA_SVW=m
-CONFIG_SCSI_ATA_PIIX=m
-CONFIG_SCSI_SATA_NV=m
-CONFIG_SCSI_SATA_PROMISE=m
-CONFIG_SCSI_SATA_QSTOR=m
-CONFIG_SCSI_SATA_SX4=m
-CONFIG_SCSI_SATA_SIL=m
-CONFIG_SCSI_SATA_SIS=m
-CONFIG_SCSI_SATA_ULI=m
-CONFIG_SCSI_SATA_VIA=m
-CONFIG_SCSI_SATA_VITESSE=m
-CONFIG_SCSI_DMX3191D=m
-CONFIG_SCSI_EATA_PIO=m
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
-CONFIG_SCSI_IPS=m
-CONFIG_SCSI_INITIO=m
-CONFIG_SCSI_INIA100=m
-CONFIG_SCSI_PPA=m
-CONFIG_SCSI_IMM=m
-# CONFIG_SCSI_IZIP_EPP16 is not set
-# CONFIG_SCSI_IZIP_SLOW_CTR is not set
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
-CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
-CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
-# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_IPR is not set
-CONFIG_SCSI_QLOGIC_ISP=m
-CONFIG_SCSI_QLOGIC_FC=y
-CONFIG_SCSI_QLOGIC_FC_FIRMWARE=y
+# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
-CONFIG_SCSI_QLOGICPTI=m
+# CONFIG_SCSI_QLOGICPTI is not set
CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA21XX is not set
-# CONFIG_SCSI_QLA22XX is not set
-# CONFIG_SCSI_QLA2300 is not set
-# CONFIG_SCSI_QLA2322 is not set
-# CONFIG_SCSI_QLA6312 is not set
-CONFIG_SCSI_DC395x=m
+# CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
-CONFIG_SCSI_DEBUG=m
-CONFIG_SCSI_SUNESP=y
-
-#
-# Fibre Channel support
-#
-CONFIG_FC4=m
-
-#
-# FC4 drivers
-#
-CONFIG_FC4_SOC=m
-CONFIG_FC4_SOCAL=m
-
-#
-# FC4 targets
-#
-CONFIG_SCSI_PLUTO=m
-CONFIG_SCSI_FCAL=m
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_SUNESP is not set
#
# Multi-device support (RAID and LVM)
@@ -442,427 +446,34 @@ CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
+# CONFIG_DM_MULTIPATH is not set
#
# Fusion MPT device support
#
-CONFIG_FUSION=m
-CONFIG_FUSION_MAX_SGE=40
-CONFIG_FUSION_CTL=m
-CONFIG_FUSION_LAN=m
+# CONFIG_FUSION is not set
+# CONFIG_FUSION_SPI is not set
+# CONFIG_FUSION_FC is not set
+# CONFIG_FUSION_SAS is not set
#
# IEEE 1394 (FireWire) support
#
-CONFIG_IEEE1394=m
-
-#
-# Subsystem Options
-#
-# CONFIG_IEEE1394_VERBOSEDEBUG is not set
-CONFIG_IEEE1394_OUI_DB=y
-CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y
-CONFIG_IEEE1394_CONFIG_ROM_IP1394=y
-
-#
-# Device Drivers
-#
-CONFIG_IEEE1394_PCILYNX=m
-CONFIG_IEEE1394_OHCI1394=m
-
-#
-# Protocol Drivers
-#
-CONFIG_IEEE1394_VIDEO1394=m
-CONFIG_IEEE1394_SBP2=m
-# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
-CONFIG_IEEE1394_ETH1394=m
-CONFIG_IEEE1394_DV1394=m
-CONFIG_IEEE1394_RAWIO=m
-CONFIG_IEEE1394_CMP=m
-CONFIG_IEEE1394_AMDTP=m
-
-#
-# Networking support
-#
-CONFIG_NET=y
-
-#
-# Networking options
-#
-CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
-CONFIG_NETLINK_DEV=y
-CONFIG_UNIX=y
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-# CONFIG_IP_ADVANCED_ROUTER is not set
-# CONFIG_IP_PNP is not set
-CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
-CONFIG_SYN_COOKIES=y
-CONFIG_INET_AH=y
-CONFIG_INET_ESP=y
-CONFIG_INET_IPCOMP=y
-CONFIG_INET_TUNNEL=y
-CONFIG_IP_TCPDIAG=y
-# CONFIG_IP_TCPDIAG_IPV6 is not set
-
-#
-# IP: Virtual Server Configuration
-#
-CONFIG_IP_VS=m
-# CONFIG_IP_VS_DEBUG is not set
-CONFIG_IP_VS_TAB_BITS=12
-
-#
-# IPVS transport protocol load balancing support
-#
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-
-#
-# IPVS scheduler
-#
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_DH=m
-CONFIG_IP_VS_SH=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-
-#
-# IPVS application helper
-#
-CONFIG_IP_VS_FTP=m
-CONFIG_IPV6=m
-CONFIG_IPV6_PRIVACY=y
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_INET6_TUNNEL=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-CONFIG_BRIDGE_NETFILTER=y
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_IP_NF_CONNTRACK=m
-CONFIG_IP_NF_CT_ACCT=y
-CONFIG_IP_NF_CONNTRACK_MARK=y
-CONFIG_IP_NF_CT_PROTO_SCTP=m
-CONFIG_IP_NF_FTP=m
-CONFIG_IP_NF_IRC=m
-CONFIG_IP_NF_TFTP=m
-CONFIG_IP_NF_AMANDA=m
-CONFIG_IP_NF_QUEUE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_LIMIT=m
-CONFIG_IP_NF_MATCH_IPRANGE=m
-CONFIG_IP_NF_MATCH_MAC=m
-CONFIG_IP_NF_MATCH_PKTTYPE=m
-CONFIG_IP_NF_MATCH_MARK=m
-CONFIG_IP_NF_MATCH_MULTIPORT=m
-CONFIG_IP_NF_MATCH_TOS=m
-CONFIG_IP_NF_MATCH_RECENT=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_DSCP=m
-CONFIG_IP_NF_MATCH_AH_ESP=m
-CONFIG_IP_NF_MATCH_LENGTH=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_MATCH_TCPMSS=m
-CONFIG_IP_NF_MATCH_HELPER=m
-CONFIG_IP_NF_MATCH_STATE=m
-CONFIG_IP_NF_MATCH_CONNTRACK=m
-CONFIG_IP_NF_MATCH_OWNER=m
-CONFIG_IP_NF_MATCH_PHYSDEV=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_MATCH_REALM=m
-CONFIG_IP_NF_MATCH_SCTP=m
-CONFIG_IP_NF_MATCH_COMMENT=m
-CONFIG_IP_NF_MATCH_CONNMARK=m
-CONFIG_IP_NF_MATCH_HASHLIMIT=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_IP_NF_TARGET_TCPMSS=m
-CONFIG_IP_NF_NAT=m
-CONFIG_IP_NF_NAT_NEEDED=y
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_SAME=m
-CONFIG_IP_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_NAT_IRC=m
-CONFIG_IP_NF_NAT_FTP=m
-CONFIG_IP_NF_NAT_TFTP=m
-CONFIG_IP_NF_NAT_AMANDA=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_TOS=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_DSCP=m
-CONFIG_IP_NF_TARGET_MARK=m
-CONFIG_IP_NF_TARGET_CLASSIFY=m
-CONFIG_IP_NF_TARGET_CONNMARK=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_TARGET_NOTRACK=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-
-#
-# IPv6: Netfilter Configuration
-#
-CONFIG_IP6_NF_QUEUE=m
-CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_LIMIT=m
-CONFIG_IP6_NF_MATCH_MAC=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_MULTIPORT=m
-CONFIG_IP6_NF_MATCH_OWNER=m
-CONFIG_IP6_NF_MATCH_MARK=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_AHESP=m
-CONFIG_IP6_NF_MATCH_LENGTH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_MATCH_PHYSDEV=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_LOG=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_TARGET_MARK=m
-CONFIG_IP6_NF_RAW=m
-
-#
-# DECnet: Netfilter Configuration
-#
-CONFIG_DECNET_NF_GRABULATOR=m
-
-#
-# Bridge: Netfilter Configuration
-#
-CONFIG_BRIDGE_NF_EBTABLES=m
-CONFIG_BRIDGE_EBT_BROUTE=m
-CONFIG_BRIDGE_EBT_T_FILTER=m
-CONFIG_BRIDGE_EBT_T_NAT=m
-CONFIG_BRIDGE_EBT_802_3=m
-CONFIG_BRIDGE_EBT_AMONG=m
-CONFIG_BRIDGE_EBT_ARP=m
-CONFIG_BRIDGE_EBT_IP=m
-CONFIG_BRIDGE_EBT_LIMIT=m
-CONFIG_BRIDGE_EBT_MARK=m
-CONFIG_BRIDGE_EBT_PKTTYPE=m
-CONFIG_BRIDGE_EBT_STP=m
-CONFIG_BRIDGE_EBT_VLAN=m
-CONFIG_BRIDGE_EBT_ARPREPLY=m
-CONFIG_BRIDGE_EBT_DNAT=m
-CONFIG_BRIDGE_EBT_MARK_T=m
-CONFIG_BRIDGE_EBT_REDIRECT=m
-CONFIG_BRIDGE_EBT_SNAT=m
-CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
-CONFIG_XFRM=y
-CONFIG_XFRM_USER=m
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
-CONFIG_IP_SCTP=m
-# CONFIG_SCTP_DBG_MSG is not set
-# CONFIG_SCTP_DBG_OBJCNT is not set
-# CONFIG_SCTP_HMAC_NONE is not set
-# CONFIG_SCTP_HMAC_SHA1 is not set
-CONFIG_SCTP_HMAC_MD5=y
-CONFIG_ATM=y
-CONFIG_ATM_CLIP=y
-# CONFIG_ATM_CLIP_NO_ICMP is not set
-CONFIG_ATM_LANE=m
-CONFIG_ATM_MPOA=m
-CONFIG_ATM_BR2684=m
-CONFIG_ATM_BR2684_IPFILTER=y
-CONFIG_BRIDGE=m
-CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
-CONFIG_DECNET_ROUTE_FWMARK=y
-CONFIG_LLC=m
-CONFIG_LLC2=m
-CONFIG_IPX=m
-# CONFIG_IPX_INTERN is not set
-CONFIG_ATALK=m
-# CONFIG_DEV_APPLETALK is not set
-CONFIG_X25=m
-CONFIG_LAPB=m
-CONFIG_NET_DIVERT=y
-# CONFIG_ECONET is not set
-# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
-CONFIG_NET_SCHED=y
-# CONFIG_NET_SCH_CLK_JIFFIES is not set
-# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
-CONFIG_NET_SCH_CLK_CPU=y
-CONFIG_NET_SCH_CBQ=m
-CONFIG_NET_SCH_HTB=m
-CONFIG_NET_SCH_HFSC=m
-CONFIG_NET_SCH_ATM=y
-CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RED=m
-CONFIG_NET_SCH_SFQ=m
-CONFIG_NET_SCH_TEQL=m
-CONFIG_NET_SCH_TBF=m
-CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
-CONFIG_NET_SCH_NETEM=m
-CONFIG_NET_SCH_INGRESS=m
-CONFIG_NET_QOS=y
-CONFIG_NET_ESTIMATOR=y
-CONFIG_NET_CLS=y
-CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
-CONFIG_NET_CLS_ROUTE4=m
-CONFIG_NET_CLS_ROUTE=y
-CONFIG_NET_CLS_FW=m
-CONFIG_NET_CLS_U32=m
-CONFIG_CLS_U32_PERF=y
-CONFIG_NET_CLS_IND=y
-CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_STACK=32
-CONFIG_NET_EMATCH_CMP=m
-CONFIG_NET_EMATCH_NBYTE=m
-CONFIG_NET_EMATCH_U32=m
-CONFIG_NET_EMATCH_META=m
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_POLICE=m
-CONFIG_NET_ACT_GACT=m
-CONFIG_GACT_PROB=y
-CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_IPT=m
-CONFIG_NET_ACT_PEDIT=m
-
-#
-# Network testing
-#
-CONFIG_NET_PKTGEN=m
-CONFIG_NETPOLL=y
-# CONFIG_NETPOLL_RX is not set
-# CONFIG_NETPOLL_TRAP is not set
-CONFIG_NET_POLL_CONTROLLER=y
-CONFIG_HAMRADIO=y
-
-#
-# Packet Radio protocols
-#
-CONFIG_AX25=m
-CONFIG_AX25_DAMA_SLAVE=y
-CONFIG_NETROM=m
-CONFIG_ROSE=m
-
-#
-# AX.25 network device drivers
-#
-# CONFIG_BPQETHER is not set
-# CONFIG_BAYCOM_SER_FDX is not set
-# CONFIG_BAYCOM_SER_HDX is not set
-# CONFIG_BAYCOM_PAR is not set
-# CONFIG_YAM is not set
-CONFIG_IRDA=m
-
-#
-# IrDA protocols
-#
-CONFIG_IRLAN=m
-CONFIG_IRNET=m
-CONFIG_IRCOMM=m
-CONFIG_IRDA_ULTRA=y
-
-#
-# IrDA options
-#
-CONFIG_IRDA_CACHE_LAST_LSAP=y
-CONFIG_IRDA_FAST_RR=y
-# CONFIG_IRDA_DEBUG is not set
-
-#
-# Infrared-port device drivers
-#
-
-#
-# SIR device drivers
-#
-# CONFIG_IRTTY_SIR is not set
-
-#
-# Dongle support
-#
-
-#
-# Old SIR device drivers
-#
-
-#
-# Old Serial dongle support
-#
+# CONFIG_IEEE1394 is not set
#
-# FIR device drivers
+# I2O device support
#
-# CONFIG_USB_IRDA is not set
-CONFIG_SIGMATEL_FIR=m
-# CONFIG_VLSI_FIR is not set
-CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
-CONFIG_BT_RFCOMM=m
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_CMTP=m
-CONFIG_BT_HIDP=m
+# CONFIG_I2O is not set
#
-# Bluetooth device drivers
+# Network device support
#
-CONFIG_BT_HCIUSB=m
-CONFIG_BT_HCIUSB_SCO=y
-CONFIG_BT_HCIUART=m
-CONFIG_BT_HCIUART_H4=y
-CONFIG_BT_HCIUART_BCSP=y
-CONFIG_BT_HCIUART_BCSP_TXCRC=y
-CONFIG_BT_HCIBCM203X=m
-CONFIG_BT_HCIBPA10X=m
-CONFIG_BT_HCIBFUSB=m
-CONFIG_BT_HCIVHCI=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_BONDING=m
-CONFIG_EQUALIZER=m
-CONFIG_TUN=m
-# CONFIG_ETHERTAP is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
#
# ARCnet devices
@@ -870,86 +481,73 @@ CONFIG_TUN=m
# CONFIG_ARCNET is not set
#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
CONFIG_MII=m
-CONFIG_SUNLANCE=y
-CONFIG_HAPPYMEAL=y
-CONFIG_SUNBMAC=m
-CONFIG_SUNQE=m
-CONFIG_SUNGEM=y
-CONFIG_NET_VENDOR_3COM=y
-CONFIG_VORTEX=m
-CONFIG_TYPHOON=m
+# CONFIG_SUNLANCE is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNBMAC is not set
+# CONFIG_SUNQE is not set
+# CONFIG_SUNGEM is not set
+CONFIG_CASSINI=m
+# CONFIG_NET_VENDOR_3COM is not set
#
# Tulip family network device support
#
-CONFIG_NET_TULIP=y
-CONFIG_DE2104X=m
-CONFIG_TULIP=m
-# CONFIG_TULIP_MWI is not set
-# CONFIG_TULIP_MMIO is not set
-CONFIG_TULIP_NAPI=y
-CONFIG_TULIP_NAPI_HW_MITIGATION=y
-CONFIG_DE4X5=m
-CONFIG_WINBOND_840=m
-# CONFIG_DM9102 is not set
+# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
CONFIG_NET_PCI=y
-CONFIG_PCNET32=m
+# CONFIG_PCNET32 is not set
# CONFIG_AMD8111_ETH is not set
-CONFIG_ADAPTEC_STARFIRE=m
-CONFIG_ADAPTEC_STARFIRE_NAPI=y
-CONFIG_B44=m
-CONFIG_FORCEDETH=m
-CONFIG_DGRS=m
-CONFIG_EEPRO100=m
-CONFIG_E100=m
-CONFIG_FEALNX=m
-CONFIG_NATSEMI=m
-CONFIG_NE2K_PCI=m
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+# CONFIG_E100 is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
# CONFIG_8139CP is not set
-CONFIG_8139TOO=m
-# CONFIG_8139TOO_PIO is not set
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-# CONFIG_8139TOO_8129 is not set
-# CONFIG_8139_OLD_RX_RESET is not set
-CONFIG_SIS900=m
-CONFIG_EPIC100=m
-CONFIG_SUNDANCE=m
-CONFIG_SUNDANCE_MMIO=y
-CONFIG_VIA_RHINE=m
-# CONFIG_VIA_RHINE_MMIO is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_VIA_RHINE is not set
#
# Ethernet (1000 Mbit)
#
-CONFIG_ACENIC=m
-# CONFIG_ACENIC_OMIT_TIGON_I is not set
-CONFIG_DL2K=m
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
CONFIG_E1000=m
CONFIG_E1000_NAPI=y
-CONFIG_MYRI_SBUS=m
-CONFIG_NS83820=m
-CONFIG_HAMACHI=m
-CONFIG_YELLOWFIN=m
-CONFIG_R8169=m
-CONFIG_R8169_NAPI=y
-CONFIG_R8169_VLAN=y
-CONFIG_SK98LIN=m
-CONFIG_VIA_VELOCITY=m
+# CONFIG_MYRI_SBUS is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
CONFIG_TIGON3=m
+CONFIG_BNX2=m
#
# Ethernet (10000 Mbit)
#
-CONFIG_IXGB=m
-CONFIG_IXGB_NAPI=y
-CONFIG_S2IO=m
-CONFIG_S2IO_NAPI=y
-CONFIG_2BUFF_MODE=y
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
#
# Token Ring devices
@@ -959,140 +557,31 @@ CONFIG_2BUFF_MODE=y
#
# Wireless LAN (non-hamradio)
#
-CONFIG_NET_RADIO=y
-
-#
-# Obsolete Wireless cards support (pre-802.11)
-#
-# CONFIG_STRIP is not set
-
-#
-# Wireless 802.11b ISA/PCI cards support
-#
-CONFIG_HERMES=m
-CONFIG_PLX_HERMES=m
-CONFIG_TMD_HERMES=m
-CONFIG_PCI_HERMES=m
-CONFIG_ATMEL=m
-CONFIG_PCI_ATMEL=m
-
-#
-# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
-#
-CONFIG_PRISM54=m
-CONFIG_NET_WIRELESS=y
+# CONFIG_NET_RADIO is not set
#
# Wan interfaces
#
# CONFIG_WAN is not set
-
-#
-# ATM drivers
-#
-CONFIG_ATM_TCP=m
-# CONFIG_ATM_LANAI is not set
-# CONFIG_ATM_ENI is not set
-# CONFIG_ATM_FIRESTREAM is not set
-# CONFIG_ATM_ZATM is not set
-# CONFIG_ATM_IDT77252 is not set
-# CONFIG_ATM_AMBASSADOR is not set
-# CONFIG_ATM_HORIZON is not set
-CONFIG_ATM_FORE200E_MAYBE=m
-CONFIG_ATM_FORE200E_PCA=y
-CONFIG_ATM_FORE200E_PCA_DEFAULT_FW=y
-CONFIG_ATM_FORE200E_SBA=y
-CONFIG_ATM_FORE200E_SBA_DEFAULT_FW=y
-CONFIG_ATM_FORE200E_USE_TASKLET=y
-CONFIG_ATM_FORE200E_TX_RETRY=16
-CONFIG_ATM_FORE200E_DEBUG=0
-CONFIG_ATM_FORE200E=m
-CONFIG_ATM_HE=m
-CONFIG_ATM_HE_USE_SUNI=y
-CONFIG_FDDI=y
-# CONFIG_DEFXX is not set
-CONFIG_SKFP=m
-CONFIG_HIPPI=y
-# CONFIG_ROADRUNNER is not set
-CONFIG_PLIP=m
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPPOE=m
-CONFIG_PPPOATM=m
-CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-# CONFIG_SLIP_MODE_SLIP6 is not set
-CONFIG_NET_FC=y
-CONFIG_SHAPER=m
-CONFIG_NETCONSOLE=m
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
#
# ISDN subsystem
#
-CONFIG_ISDN=m
-
-#
-# Old ISDN4Linux
-#
-# CONFIG_ISDN_I4L is not set
-
-#
-# CAPI subsystem
-#
-CONFIG_ISDN_CAPI=m
-# CONFIG_ISDN_DRV_AVMB1_VERBOSE_REASON is not set
-# CONFIG_ISDN_CAPI_MIDDLEWARE is not set
-CONFIG_ISDN_CAPI_CAPI20=m
-
-#
-# CAPI hardware drivers
-#
-
-#
-# Active AVM cards
-#
-CONFIG_CAPI_AVM=y
-CONFIG_ISDN_DRV_AVMB1_B1PCI=m
-CONFIG_ISDN_DRV_AVMB1_B1PCIV4=y
-CONFIG_ISDN_DRV_AVMB1_B1PCMCIA=m
-CONFIG_ISDN_DRV_AVMB1_T1PCI=m
-CONFIG_ISDN_DRV_AVMB1_C4=m
-
-#
-# Active Eicon DIVA Server cards
-#
-CONFIG_CAPI_EICON=y
-CONFIG_ISDN_DIVAS=m
-CONFIG_ISDN_DIVAS_BRIPCI=y
-CONFIG_ISDN_DIVAS_PRIPCI=y
-CONFIG_ISDN_DIVAS_DIVACAPI=m
-CONFIG_ISDN_DIVAS_USERIDI=m
-CONFIG_ISDN_DIVAS_MAINT=m
+# CONFIG_ISDN is not set
#
# Telephony Support
#
-CONFIG_PHONE=m
-CONFIG_PHONE_IXJ=m
-
-#
-# Unix98 PTY support
-#
-CONFIG_UNIX98_PTYS=y
-CONFIG_UNIX98_PTY_COUNT=256
-
-#
-# XFree86 DRI support
-#
-CONFIG_DRM=y
-CONFIG_DRM_TDFX=m
-# CONFIG_DRM_R128 is not set
+# CONFIG_PHONE is not set
#
# Input device support
@@ -1112,26 +601,6 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_EVBUG is not set
#
-# Input I/O drivers
-#
-CONFIG_GAMEPORT=m
-CONFIG_SOUND_GAMEPORT=m
-# CONFIG_GAMEPORT_NS558 is not set
-# CONFIG_GAMEPORT_L4 is not set
-# CONFIG_GAMEPORT_EMU10K1 is not set
-# CONFIG_GAMEPORT_VORTEX is not set
-# CONFIG_GAMEPORT_FM801 is not set
-CONFIG_GAMEPORT_CS461X=m
-CONFIG_SERIO=y
-CONFIG_SERIO_I8042=y
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_SERIO_CT82C710 is not set
-# CONFIG_SERIO_PARKBD is not set
-CONFIG_SERIO_PCIPS2=m
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIO_RAW=m
-
-#
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
@@ -1143,7 +612,7 @@ CONFIG_KEYBOARD_LKKBD=m
CONFIG_INPUT_MOUSE=y
CONFIG_MOUSE_PS2=y
CONFIG_MOUSE_SERIAL=y
-CONFIG_MOUSE_VSXXXAA=m
+# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
CONFIG_INPUT_MISC=y
@@ -1151,406 +620,250 @@ CONFIG_INPUT_SPARCSPKR=y
# CONFIG_INPUT_UINPUT is not set
#
-# I2C support
+# Hardware I/O ports
#
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=m
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_PCIPS2=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+# CONFIG_GAMEPORT is not set
#
-# I2C Algorithms
+# Character devices
#
-CONFIG_I2C_ALGOBIT=y
-CONFIG_I2C_ALGOPCF=m
-CONFIG_I2C_ALGOPCA=m
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
#
-# I2C Hardware Bus support
+# Serial drivers
#
-CONFIG_I2C_ALI1535=m
-CONFIG_I2C_ALI1563=m
-CONFIG_I2C_ALI15X3=m
-CONFIG_I2C_AMD756=m
-# CONFIG_I2C_AMD756_S4882 is not set
-CONFIG_I2C_AMD8111=m
-CONFIG_I2C_I801=m
-CONFIG_I2C_I810=m
-CONFIG_I2C_ISA=m
-CONFIG_I2C_NFORCE2=m
-CONFIG_I2C_PARPORT=m
-CONFIG_I2C_PARPORT_LIGHT=m
-CONFIG_I2C_PIIX4=m
-CONFIG_I2C_PROSAVAGE=m
-CONFIG_I2C_SAVAGE4=m
-CONFIG_SCx200_ACB=m
-CONFIG_I2C_SIS5595=m
-CONFIG_I2C_SIS630=m
-CONFIG_I2C_SIS96X=m
-CONFIG_I2C_STUB=m
-CONFIG_I2C_VIA=m
-CONFIG_I2C_VIAPRO=m
-CONFIG_I2C_VOODOO3=m
-CONFIG_I2C_PCA_ISA=m
-
-#
-# Hardware Sensors Chip support
-#
-CONFIG_I2C_SENSOR=m
-CONFIG_SENSORS_ADM1021=m
-CONFIG_SENSORS_ADM1025=m
-CONFIG_SENSORS_ADM1026=m
-CONFIG_SENSORS_ADM1031=m
-CONFIG_SENSORS_ASB100=m
-CONFIG_SENSORS_DS1621=m
-CONFIG_SENSORS_FSCHER=m
-CONFIG_SENSORS_FSCPOS=m
-CONFIG_SENSORS_GL518SM=m
-CONFIG_SENSORS_GL520SM=m
-CONFIG_SENSORS_IT87=m
-CONFIG_SENSORS_LM63=m
-CONFIG_SENSORS_LM75=m
-CONFIG_SENSORS_LM77=m
-CONFIG_SENSORS_LM78=m
-CONFIG_SENSORS_LM80=m
-CONFIG_SENSORS_LM83=m
-CONFIG_SENSORS_LM85=m
-CONFIG_SENSORS_LM87=m
-CONFIG_SENSORS_LM90=m
-CONFIG_SENSORS_MAX1619=m
-CONFIG_SENSORS_PC87360=m
-CONFIG_SENSORS_SMSC47B397=m
-CONFIG_SENSORS_SIS5595=m
-CONFIG_SENSORS_SMSC47M1=m
-CONFIG_SENSORS_VIA686A=m
-CONFIG_SENSORS_W83781D=m
-CONFIG_SENSORS_W83L785TS=m
-CONFIG_SENSORS_W83627HF=m
-
-#
-# Other I2C Chip support
-#
-CONFIG_SENSORS_EEPROM=m
-CONFIG_SENSORS_PCF8574=m
-CONFIG_SENSORS_PCF8591=m
-CONFIG_SENSORS_RTC8564=m
-# CONFIG_I2C_DEBUG_CORE is not set
-# CONFIG_I2C_DEBUG_ALGO is not set
-# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
#
-# File systems
+# Non-8250 serial port support
#
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
-# CONFIG_REISERFS_FS is not set
-CONFIG_JFS_FS=m
-CONFIG_JFS_POSIX_ACL=y
-CONFIG_JFS_SECURITY=y
-# CONFIG_JFS_DEBUG is not set
-# CONFIG_JFS_STATISTICS is not set
-CONFIG_FS_POSIX_ACL=y
+CONFIG_SERIAL_SUNCORE=y
+# CONFIG_SERIAL_SUNZILOG is not set
+CONFIG_SERIAL_SUNSU=y
+CONFIG_SERIAL_SUNSU_CONSOLE=y
+CONFIG_SERIAL_SUNSAB=m
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
#
-# XFS support
+# IPMI
#
-CONFIG_XFS_FS=m
-CONFIG_XFS_EXPORT=y
-# CONFIG_XFS_RT is not set
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_SECURITY=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_MINIX_FS=m
-CONFIG_ROMFS_FS=m
-# CONFIG_QUOTA is not set
-CONFIG_QUOTACTL=y
-CONFIG_DNOTIFY=y
-CONFIG_AUTOFS_FS=m
-CONFIG_AUTOFS4_FS=m
+# CONFIG_IPMI_HANDLER is not set
#
-# CD-ROM/DVD Filesystems
+# Watchdog Cards
#
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-# CONFIG_ZISOFS is not set
-CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
+# CONFIG_WATCHDOG is not set
+CONFIG_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
#
-# DOS/FAT/NT Filesystems
+# Ftape, the floppy tape device driver
#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
-# CONFIG_NTFS_FS is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
#
-# Pseudo filesystems
+# TPM devices
#
-CONFIG_PROC_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_SYSFS=y
-CONFIG_DEVFS_FS=y
-CONFIG_DEVFS_MOUNT=y
-# CONFIG_DEVFS_DEBUG is not set
-CONFIG_DEVPTS_FS_XATTR=y
-# CONFIG_DEVPTS_FS_SECURITY is not set
-CONFIG_TMPFS=y
-CONFIG_TMPFS_XATTR=y
-CONFIG_TMPFS_SECURITY=y
-CONFIG_HUGETLBFS=y
-CONFIG_HUGETLB_PAGE=y
-CONFIG_RAMFS=y
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
#
-# Miscellaneous filesystems
+# I2C support
#
-CONFIG_ADFS_FS=m
-# CONFIG_ADFS_FS_RW is not set
-CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-# CONFIG_BEFS_DEBUG is not set
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_CRAMFS=m
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_UFS_FS_WRITE=y
+CONFIG_I2C=y
+# CONFIG_I2C_CHARDEV is not set
#
-# Network File Systems
+# I2C Algorithms
#
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_DIRECTIO=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V4=y
-CONFIG_NFSD_TCP=y
-CONFIG_LOCKD=m
-CONFIG_LOCKD_V4=y
-CONFIG_EXPORTFS=m
-CONFIG_SUNRPC=m
-CONFIG_SUNRPC_GSS=m
-CONFIG_RPCSEC_GSS_KRB5=m
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_SMB_FS=m
-# CONFIG_SMB_NLS_DEFAULT is not set
-CONFIG_CIFS=m
-# CONFIG_CIFS_STATS is not set
-# CONFIG_CIFS_XATTR is not set
-# CONFIG_CIFS_EXPERIMENTAL is not set
-CONFIG_NCP_FS=m
-# CONFIG_NCPFS_PACKET_SIGNING is not set
-# CONFIG_NCPFS_IOCTL_LOCKING is not set
-# CONFIG_NCPFS_STRONG is not set
-# CONFIG_NCPFS_NFS_NS is not set
-# CONFIG_NCPFS_OS2_NS is not set
-# CONFIG_NCPFS_SMALLDOS is not set
-# CONFIG_NCPFS_NLS is not set
-# CONFIG_NCPFS_EXTRAS is not set
-CONFIG_CODA_FS=m
-# CONFIG_CODA_FS_OLD_API is not set
-CONFIG_AFS_FS=m
-CONFIG_RXRPC=m
+CONFIG_I2C_ALGOBIT=y
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
#
-# Partition Types
+# I2C Hardware Bus support
#
-# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MSDOS_PARTITION=y
-CONFIG_SUN_PARTITION=y
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
#
-# Native Language Support
+# Dallas's 1-wire bus
#
-CONFIG_NLS=y
-CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
+# CONFIG_W1 is not set
#
-# Multimedia devices
+# Hardware Monitoring support
#
-CONFIG_VIDEO_DEV=y
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
#
-# Video For Linux
+# Misc devices
#
#
-# Video Adapters
+# Multimedia Capabilities Port drivers
#
-CONFIG_VIDEO_BT848=m
-CONFIG_VIDEO_BWQCAM=m
-CONFIG_VIDEO_CQCAM=m
-CONFIG_VIDEO_W9966=m
-CONFIG_VIDEO_CPIA=m
-CONFIG_VIDEO_CPIA_PP=m
-CONFIG_VIDEO_CPIA_USB=m
-CONFIG_VIDEO_SAA5246A=m
-CONFIG_VIDEO_SAA5249=m
-CONFIG_TUNER_3036=m
-# CONFIG_VIDEO_STRADIS is not set
-# CONFIG_VIDEO_ZORAN is not set
-# CONFIG_VIDEO_SAA7134 is not set
-CONFIG_VIDEO_MXB=m
-CONFIG_VIDEO_DPC=m
-CONFIG_VIDEO_HEXIUM_ORION=m
-CONFIG_VIDEO_HEXIUM_GEMINI=m
-CONFIG_VIDEO_CX88=m
-CONFIG_VIDEO_OVCAMCHIP=m
#
-# Radio Adapters
+# Multimedia devices
#
-CONFIG_RADIO_GEMTEK_PCI=m
-CONFIG_RADIO_MAXIRADIO=m
-CONFIG_RADIO_MAESTRO=m
+# CONFIG_VIDEO_DEV is not set
#
# Digital Video Broadcasting Devices
#
-CONFIG_DVB=y
-CONFIG_DVB_CORE=m
-
-#
-# Supported SAA7146 based PCI Adapters
-#
-CONFIG_DVB_AV7110=m
-# CONFIG_DVB_AV7110_OSD is not set
-CONFIG_DVB_BUDGET=m
-CONFIG_DVB_BUDGET_CI=m
-CONFIG_DVB_BUDGET_AV=m
-CONFIG_DVB_BUDGET_PATCH=m
-
-#
-# Supported USB Adapters
-#
-# CONFIG_DVB_TTUSB_BUDGET is not set
-CONFIG_DVB_TTUSB_DEC=m
-CONFIG_DVB_DIBUSB=m
-# CONFIG_DVB_DIBUSB_MISDESIGNED_DEVICES is not set
-CONFIG_DVB_DIBCOM_DEBUG=y
-CONFIG_DVB_CINERGYT2=m
-# CONFIG_DVB_CINERGYT2_TUNING is not set
-
-#
-# Supported FlexCopII (B2C2) Adapters
-#
-CONFIG_DVB_B2C2_SKYSTAR=m
-CONFIG_DVB_B2C2_USB=m
+# CONFIG_DVB is not set
#
-# Supported BT878 Adapters
-#
-CONFIG_DVB_BT8XX=m
-
-#
-# Supported DVB Frontends
-#
-
-#
-# Customise DVB Frontends
-#
-
-#
-# DVB-S (satellite) frontends
-#
-CONFIG_DVB_STV0299=m
-CONFIG_DVB_CX24110=m
-CONFIG_DVB_TDA8083=m
-CONFIG_DVB_TDA80XX=m
-CONFIG_DVB_MT312=m
-CONFIG_DVB_VES1X93=m
-
-#
-# DVB-T (terrestrial) frontends
+# Graphics support
#
-CONFIG_DVB_SP8870=m
-CONFIG_DVB_SP887X=m
-CONFIG_DVB_CX22700=m
-CONFIG_DVB_CX22702=m
-CONFIG_DVB_L64781=m
-CONFIG_DVB_TDA1004X=m
-CONFIG_DVB_NXT6000=m
-CONFIG_DVB_MT352=m
-CONFIG_DVB_DIB3000MB=m
-CONFIG_DVB_DIB3000MC=m
+CONFIG_FB=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_MACMODES is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_SBUS is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON_OLD is not set
+CONFIG_FB_RADEON=y
+CONFIG_FB_RADEON_I2C=y
+# CONFIG_FB_RADEON_DEBUG is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_PCI is not set
+# CONFIG_FB_VIRTUAL is not set
#
-# DVB-C (cable) frontends
+# Console display driver support
#
-CONFIG_DVB_ATMEL_AT76C651=m
-CONFIG_DVB_VES1820=m
-CONFIG_DVB_TDA10021=m
-CONFIG_DVB_STV0297=m
+# CONFIG_PROM_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+# CONFIG_FONT_8x8 is not set
+# CONFIG_FONT_8x16 is not set
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+CONFIG_FONT_SUN8x16=y
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
#
-# ATSC (North American/Korean Terresterial DTV) frontends
+# Logo configuration
#
-CONFIG_DVB_NXT2002=m
-CONFIG_VIDEO_SAA7146=m
-CONFIG_VIDEO_SAA7146_VV=m
-CONFIG_VIDEO_VIDEOBUF=m
-CONFIG_VIDEO_TUNER=m
-CONFIG_VIDEO_BUF=m
-CONFIG_VIDEO_BTCX=m
-CONFIG_VIDEO_IR=m
-CONFIG_VIDEO_TVEEPROM=m
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_SUN_CLUT224=y
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
# Sound
@@ -1563,7 +876,6 @@ CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
-CONFIG_SND_HWDEP=m
CONFIG_SND_RAWMIDI=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
@@ -1571,8 +883,9 @@ CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_BIT32_EMUL=m
# CONFIG_SND_RTCTIMER is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
@@ -1580,59 +893,58 @@ CONFIG_SND_BIT32_EMUL=m
# Generic devices
#
CONFIG_SND_MPU401_UART=m
-CONFIG_SND_OPL3_LIB=m
-CONFIG_SND_VX_LIB=m
+CONFIG_SND_AC97_CODEC=m
+CONFIG_SND_AC97_BUS=m
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
-# CONFIG_SND_MTPAV is not set
+CONFIG_SND_MTPAV=m
# CONFIG_SND_SERIAL_U16550 is not set
# CONFIG_SND_MPU401 is not set
#
# PCI devices
#
-CONFIG_SND_AC97_CODEC=m
+# CONFIG_SND_AD1889 is not set
CONFIG_SND_ALI5451=m
-CONFIG_SND_ATIIXP=m
-CONFIG_SND_ATIIXP_MODEM=m
-CONFIG_SND_AU8810=m
-CONFIG_SND_AU8820=m
-CONFIG_SND_AU8830=m
-CONFIG_SND_AZT3328=m
-CONFIG_SND_BT87X=m
-# CONFIG_SND_BT87X_OVERCLOCK is not set
-CONFIG_SND_CS46XX=m
-# CONFIG_SND_CS46XX_NEW_DSP is not set
-CONFIG_SND_CS4281=m
-CONFIG_SND_EMU10K1=m
-CONFIG_SND_EMU10K1X=m
-CONFIG_SND_CA0106=m
-CONFIG_SND_KORG1212=m
-CONFIG_SND_MIXART=m
-CONFIG_SND_NM256=m
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
# CONFIG_SND_RME32 is not set
# CONFIG_SND_RME96 is not set
# CONFIG_SND_RME9652 is not set
-# CONFIG_SND_HDSP is not set
-CONFIG_SND_TRIDENT=m
-CONFIG_SND_YMFPCI=m
-CONFIG_SND_ALS4000=m
-CONFIG_SND_CMIPCI=m
-CONFIG_SND_ENS1370=m
-CONFIG_SND_ENS1371=m
-CONFIG_SND_ES1938=m
-CONFIG_SND_ES1968=m
-CONFIG_SND_MAESTRO3=m
-CONFIG_SND_FM801=m
-CONFIG_SND_FM801_TEA575X=m
-CONFIG_SND_ICE1712=m
-# CONFIG_SND_ICE1724 is not set
-CONFIG_SND_INTEL8X0=m
-CONFIG_SND_INTEL8X0M=m
-CONFIG_SND_SONICVIBES=m
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
# CONFIG_SND_VIA82XX is not set
-CONFIG_SND_VIA82XX_MODEM=m
-CONFIG_SND_VX222=m
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_YMFPCI is not set
#
# USB devices
@@ -1642,12 +954,20 @@ CONFIG_SND_VX222=m
#
# ALSA Sparc devices
#
-CONFIG_SND_SUN_AMD7930=m
+# CONFIG_SND_SUN_AMD7930 is not set
CONFIG_SND_SUN_CS4231=m
+# CONFIG_SND_SUN_DBRI is not set
+
+#
+# Open Sound System
+#
+# CONFIG_SOUND_PRIME is not set
#
# USB support
#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB=y
# CONFIG_USB_DEBUG is not set
@@ -1658,8 +978,6 @@ CONFIG_USB_DEVICEFS=y
# CONFIG_USB_BANDWIDTH is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
# CONFIG_USB_OTG is not set
-CONFIG_USB_ARCH_HAS_HCD=y
-CONFIG_USB_ARCH_HAS_OHCI=y
#
# USB Host Controller Drivers
@@ -1667,36 +985,29 @@ CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+# CONFIG_USB_ISP116X_HCD is not set
CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_UHCI_HCD=m
-CONFIG_USB_SL811_HCD=m
+# CONFIG_USB_SL811_HCD is not set
#
# USB Device Class drivers
#
-# CONFIG_USB_AUDIO is not set
+# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
#
-# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
#
-# CONFIG_USB_MIDI is not set
-CONFIG_USB_ACM=m
-CONFIG_USB_PRINTER=m
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+# may also be needed; see USB_STORAGE Help for more information
#
-CONFIG_USB_STORAGE=m
-# CONFIG_USB_STORAGE_DEBUG is not set
-CONFIG_USB_STORAGE_RW_DETECT=y
-# CONFIG_USB_STORAGE_DATAFAB is not set
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_DPCM=y
-CONFIG_USB_STORAGE_HP8200e=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE is not set
+# CONFIG_USB_LIBUSUAL is not set
#
# USB Input Devices
@@ -1706,135 +1017,75 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_HID_FF is not set
CONFIG_USB_HIDDEV=y
# CONFIG_USB_AIPTEK is not set
-CONFIG_USB_WACOM=m
-CONFIG_USB_KBTAB=m
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
-CONFIG_USB_MTOUCH=m
-CONFIG_USB_EGALAX=m
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_ITMTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_YEALINK is not set
# CONFIG_USB_XPAD is not set
-CONFIG_USB_ATI_REMOTE=m
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
#
# USB Imaging devices
#
-CONFIG_USB_MDC800=m
-CONFIG_USB_MICROTEK=m
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
#
# USB Multimedia devices
#
# CONFIG_USB_DABUSB is not set
-# CONFIG_USB_VICAM is not set
-# CONFIG_USB_DSBR is not set
-# CONFIG_USB_IBMCAM is not set
-# CONFIG_USB_KONICAWC is not set
-# CONFIG_USB_OV511 is not set
-# CONFIG_USB_SE401 is not set
-CONFIG_USB_SN9C102=m
-# CONFIG_USB_STV680 is not set
-CONFIG_USB_W9968CF=m
-
-#
-# USB Network Adapters
-#
-CONFIG_USB_CATC=m
-CONFIG_USB_KAWETH=m
-CONFIG_USB_PEGASUS=m
-CONFIG_USB_RTL8150=m
-CONFIG_USB_USBNET=m
-
-#
-# USB Host-to-Host Cables
-#
-CONFIG_USB_ALI_M5632=y
-CONFIG_USB_AN2720=y
-CONFIG_USB_BELKIN=y
-CONFIG_USB_GENESYS=y
-CONFIG_USB_NET1080=y
-CONFIG_USB_PL2301=y
-CONFIG_USB_KC2190=y
#
-# Intelligent USB Devices/Gadgets
+# Video4Linux support is needed for USB Multimedia device support
#
-CONFIG_USB_ARMLINUX=y
-CONFIG_USB_EPSON2888=y
-CONFIG_USB_ZAURUS=y
-CONFIG_USB_CDCETHER=y
#
# USB Network Adapters
#
-CONFIG_USB_AX8817X=y
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_USB_MON is not set
#
# USB port drivers
#
-CONFIG_USB_USS720=m
#
# USB Serial Converter support
#
-CONFIG_USB_SERIAL=m
-CONFIG_USB_SERIAL_GENERIC=y
-CONFIG_USB_SERIAL_BELKIN=m
-CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
-CONFIG_USB_SERIAL_CYPRESS_M8=m
-CONFIG_USB_SERIAL_EMPEG=m
-CONFIG_USB_SERIAL_FTDI_SIO=m
-# CONFIG_USB_SERIAL_VISOR is not set
-CONFIG_USB_SERIAL_IPAQ=m
-# CONFIG_USB_SERIAL_IR is not set
-CONFIG_USB_SERIAL_EDGEPORT=m
-# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
-CONFIG_USB_SERIAL_GARMIN=m
-CONFIG_USB_SERIAL_IPW=m
-CONFIG_USB_SERIAL_KEYSPAN_PDA=m
-CONFIG_USB_SERIAL_KEYSPAN=m
-# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set
-# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
-# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
-# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
-# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
-# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
-# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
-# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
-# CONFIG_USB_SERIAL_KEYSPAN_USA19QW is not set
-# CONFIG_USB_SERIAL_KEYSPAN_USA19QI is not set
-# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
-# CONFIG_USB_SERIAL_KEYSPAN_USA49WLC is not set
-CONFIG_USB_SERIAL_KLSI=m
-# CONFIG_USB_SERIAL_KOBIL_SCT is not set
-CONFIG_USB_SERIAL_MCT_U232=m
-CONFIG_USB_SERIAL_PL2303=m
-# CONFIG_USB_SERIAL_SAFE is not set
-CONFIG_USB_SERIAL_TI=m
-CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
-CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_EZUSB=y
+# CONFIG_USB_SERIAL is not set
#
# USB Miscellaneous drivers
#
-CONFIG_USB_EMI62=m
-CONFIG_USB_EMI26=m
-CONFIG_USB_AUERSWALD=m
-CONFIG_USB_RIO500=m
-CONFIG_USB_LEGOTOWER=m
-CONFIG_USB_LCD=m
-CONFIG_USB_LED=m
-CONFIG_USB_CYTHERM=m
-CONFIG_USB_PHIDGETKIT=m
-CONFIG_USB_PHIDGETSERVO=m
-CONFIG_USB_IDMOUSE=m
-CONFIG_USB_TEST=m
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TEST is not set
#
-# USB ATM/DSL drivers
+# USB DSL modem support
#
-CONFIG_USB_ATM=m
-CONFIG_USB_SPEEDTOUCH=m
#
# USB Gadget Support
@@ -1842,50 +1093,185 @@ CONFIG_USB_SPEEDTOUCH=m
# CONFIG_USB_GADGET is not set
#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
# InfiniBand support
#
-CONFIG_INFINIBAND=m
-CONFIG_INFINIBAND_MTHCA=m
-# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
-CONFIG_INFINIBAND_IPOIB=m
-# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
+# CONFIG_INFINIBAND is not set
#
-# Watchdog Cards
+# SN Devices
+#
+
+#
+# Misc Linux/SPARC drivers
+#
+CONFIG_SUN_OPENPROMIO=m
+CONFIG_SUN_MOSTEK_RTC=y
+# CONFIG_OBP_FLASH is not set
+# CONFIG_SUN_BPP is not set
+# CONFIG_BBC_I2C is not set
+# CONFIG_ENVCTRL is not set
+# CONFIG_DISPLAY7SEG is not set
+
+#
+# Fibre Channel support
+#
+# CONFIG_FC4 is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
#
-CONFIG_WATCHDOG=y
-# CONFIG_WATCHDOG_NOWAYOUT is not set
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
#
-# Watchdog Device Drivers
+# DOS/FAT/NT Filesystems
#
-CONFIG_SOFT_WATCHDOG=m
-CONFIG_WATCHDOG_CP1XXX=m
-CONFIG_WATCHDOG_RIO=m
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
#
-# PCI-based Watchdog Cards
+# Pseudo filesystems
#
-CONFIG_PCIPCWATCHDOG=m
-CONFIG_WDTPCI=m
-CONFIG_WDT_501_PCI=y
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_RAMFS=y
+CONFIG_RELAYFS_FS=m
+# CONFIG_CONFIGFS_FS is not set
#
-# USB-based Watchdog Cards
+# Miscellaneous filesystems
#
-CONFIG_USBPCWATCHDOG=m
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
#
-# Profiling support
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_SUN_PARTITION=y
+
+#
+# Native Language Support
+#
+CONFIG_NLS=m
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Instrumentation Support
#
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
#
# Kernel hacking
#
+CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_DETECT_SOFTLOCKUP=y
CONFIG_SCHEDSTATS=y
# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_SPINLOCK is not set
@@ -1894,12 +1280,13 @@ CONFIG_SCHEDSTATS=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
CONFIG_DEBUG_FS=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_DEBUG_STACK_USAGE is not set
-CONFIG_KPROBES=y
# CONFIG_DEBUG_DCFLUSH is not set
# CONFIG_STACK_DEBUG is not set
# CONFIG_DEBUG_BOOTMEM is not set
-CONFIG_HAVE_DEC_LOCK=y
+# CONFIG_DEBUG_PAGEALLOC is not set
#
# Security options
@@ -1945,6 +1332,7 @@ CONFIG_CRYPTO_TEST=m
# Library routines
#
CONFIG_CRC_CCITT=m
+CONFIG_CRC16=m
CONFIG_CRC32=y
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 6f00ab8b9d2..83d67eb1889 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -16,7 +16,7 @@ obj-y := process.o setup.o cpu.o idprom.o \
obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
pci_psycho.o pci_sabre.o pci_schizo.o
obj-$(CONFIG_SMP) += smp.o trampoline.o
-obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o ioctl32.o
+obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
obj-$(CONFIG_BINFMT_AOUT32) += binfmt_aout32.o
obj-$(CONFIG_MODULES) += module.o
@@ -40,5 +40,3 @@ endif
head.o: head.S ttable.S itlb_base.S dtlb_base.S dtlb_backend.S dtlb_prot.S \
etrap.S rtrap.S winfixup.S entry.S
-
-CFLAGS_ioctl32.o += -Ifs/
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
index edf52d06b28..202a80c24b6 100644
--- a/arch/sparc64/kernel/binfmt_aout32.c
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -36,8 +36,6 @@ static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs);
static int load_aout32_library(struct file*);
static int aout32_core_dump(long signr, struct pt_regs * regs, struct file *file);
-extern void dump_thread(struct pt_regs *, struct user *);
-
static struct linux_binfmt aout32_format = {
NULL, THIS_MODULE, load_aout32_binary, load_aout32_library, aout32_core_dump,
PAGE_SIZE
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
index 6ffbeb70194..7991e919d8a 100644
--- a/arch/sparc64/kernel/ebus.c
+++ b/arch/sparc64/kernel/ebus.c
@@ -527,18 +527,12 @@ static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p)
{
struct pci_dev *pdev = start;
- do {
- pdev = pci_find_device(PCI_VENDOR_ID_SUN, PCI_ANY_ID, pdev);
- if (pdev &&
- (pdev->device == PCI_DEVICE_ID_SUN_EBUS ||
- pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS))
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_SUN, PCI_ANY_ID, pdev)))
+ if (pdev->device == PCI_DEVICE_ID_SUN_EBUS ||
+ pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS)
break;
- } while (pdev != NULL);
- if (pdev && (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS))
- *is_rio_p = 1;
- else
- *is_rio_p = 0;
+ *is_rio_p = !!(pdev && (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS));
return pdev;
}
@@ -637,6 +631,7 @@ void __init ebus_init(void)
ebus->is_rio = is_rio;
++num_ebus;
}
+ pci_dev_put(pdev); /* XXX for the case, when ebusnd is 0, is it OK? */
#ifdef CONFIG_SUN_AUXIO
auxio_probe();
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 11a848402fb..71000299188 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1657,13 +1657,10 @@ ret_sys_call:
/* Check if force_successful_syscall_return()
* was invoked.
*/
- ldub [%curptr + TI_SYS_NOERROR], %l0
- brz,pt %l0, 1f
- nop
- ba,pt %xcc, 80f
+ ldub [%curptr + TI_SYS_NOERROR], %l2
+ brnz,a,pn %l2, 80f
stb %g0, [%curptr + TI_SYS_NOERROR]
-1:
cmp %o0, -ERESTART_RESTARTBLOCK
bgeu,pn %xcc, 1f
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
deleted file mode 100644
index 196b208665a..00000000000
--- a/arch/sparc64/kernel/ioctl32.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/* $Id: ioctl32.c,v 1.136 2002/01/14 09:49:52 davem Exp $
- * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
- *
- * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 2003 Pavel Machek (pavel@suse.cz)
- *
- * These routines maintain argument size conversion between 32bit and 64bit
- * ioctls.
- */
-
-#define INCLUDES
-#include "compat_ioctl.c"
-#include <linux/syscalls.h>
-
-#define CODE
-#include "compat_ioctl.c"
-
-#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
-#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler), NULL },
-#define IOCTL_TABLE_START \
- struct ioctl_trans ioctl_start[] = {
-#define IOCTL_TABLE_END \
- };
-
-IOCTL_TABLE_START
-#include <linux/compat_ioctl.h>
-#define DECLARES
-#include "compat_ioctl.c"
-#if 0
-HANDLE_IOCTL(RTC32_IRQP_READ, do_rtc_ioctl)
-HANDLE_IOCTL(RTC32_IRQP_SET, do_rtc_ioctl)
-HANDLE_IOCTL(RTC32_EPOCH_READ, do_rtc_ioctl)
-HANDLE_IOCTL(RTC32_EPOCH_SET, do_rtc_ioctl)
-#endif
-/* take care of sizeof(sizeof()) breakage */
-IOCTL_TABLE_END
-
-int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index a97b0f0727a..b9a9ce70e55 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -43,14 +43,10 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
- return 0;
-}
-
-void __kprobes arch_copy_kprobe(struct kprobe *p)
-{
p->ainsn.insn[0] = *p->addr;
p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
p->opcode = *p->addr;
+ return 0;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
@@ -65,10 +61,6 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
flushi(p->addr);
}
-void __kprobes arch_remove_kprobe(struct kprobe *p)
-{
-}
-
static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
@@ -143,6 +135,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
prepare_singlestep(p, regs, kcb);
return 1;
} else {
+ if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
+ /* The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
+ goto no_kprobe;
+ }
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs))
goto ss_probe;
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 9e8362ea310..30bcaf58e3a 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -14,6 +14,7 @@
#include <linux/signal.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <asm/system.h>
#include <asm/ebus.h>
@@ -70,6 +71,9 @@ void machine_power_off(void)
machine_halt();
}
+void (*pm_power_off)(void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
+
#ifdef CONFIG_PCI
static int powerd(void *__unused)
{
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 02f9dec1d45..1dc3650c5ca 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -390,7 +390,7 @@ void show_regs32(struct pt_regs32 *regs)
unsigned long thread_saved_pc(struct task_struct *tsk)
{
- struct thread_info *ti = tsk->thread_info;
+ struct thread_info *ti = task_thread_info(tsk);
unsigned long ret = 0xdeadbeefUL;
if (ti && ti->ksp) {
@@ -616,11 +616,11 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
- struct thread_info *t = p->thread_info;
+ struct thread_info *t = task_thread_info(p);
char *child_trap_frame;
/* Calculate offset to stack_frame & pt_regs */
- child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
+ child_trap_frame = task_stack_page(p) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
@@ -845,9 +845,9 @@ unsigned long get_wchan(struct task_struct *task)
task->state == TASK_RUNNING)
goto out;
- thread_info_base = (unsigned long) task->thread_info;
+ thread_info_base = (unsigned long) task_stack_page(task);
bias = STACK_BIAS;
- fp = task->thread_info->ksp + bias;
+ fp = task_thread_info(task)->ksp + bias;
do {
/* Bogus frame pointer? */
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 774ecbb8a03..3f9746f856d 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -198,39 +198,15 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
}
#endif
if (request == PTRACE_TRACEME) {
- int ret;
-
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED) {
- pt_error_return(regs, EPERM);
- goto out;
- }
- ret = security_ptrace(current->parent, current);
- if (ret) {
- pt_error_return(regs, -ret);
- goto out;
- }
-
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
+ ret = ptrace_traceme();
pt_succ_return(regs, 0);
goto out;
}
-#ifndef ALLOW_INIT_TRACING
- if (pid == 1) {
- /* Can't dork with init. */
- pt_error_return(regs, EPERM);
- goto out;
- }
-#endif
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child) {
- pt_error_return(regs, ESRCH);
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
+ pt_error_return(regs, -ret);
goto out;
}
@@ -320,7 +296,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_GETREGS: {
struct pt_regs32 __user *pregs =
(struct pt_regs32 __user *) addr;
- struct pt_regs *cregs = child->thread_info->kregs;
+ struct pt_regs *cregs = task_pt_regs(child);
int rval;
if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
@@ -344,11 +320,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_GETREGS64: {
struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
- struct pt_regs *cregs = child->thread_info->kregs;
+ struct pt_regs *cregs = task_pt_regs(child);
unsigned long tpc = cregs->tpc;
int rval;
- if ((child->thread_info->flags & _TIF_32BIT) != 0)
+ if ((task_thread_info(child)->flags & _TIF_32BIT) != 0)
tpc &= 0xffffffff;
if (__put_user(cregs->tstate, (&pregs->tstate)) ||
__put_user(tpc, (&pregs->tpc)) ||
@@ -372,7 +348,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_SETREGS: {
struct pt_regs32 __user *pregs =
(struct pt_regs32 __user *) addr;
- struct pt_regs *cregs = child->thread_info->kregs;
+ struct pt_regs *cregs = task_pt_regs(child);
unsigned int psr, pc, npc, y;
int i;
@@ -405,7 +381,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
case PTRACE_SETREGS64: {
struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
- struct pt_regs *cregs = child->thread_info->kregs;
+ struct pt_regs *cregs = task_pt_regs(child);
unsigned long tstate, tpc, tnpc, y;
int i;
@@ -419,7 +395,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
pt_error_return(regs, EFAULT);
goto out_tsk;
}
- if ((child->thread_info->flags & _TIF_32BIT) != 0) {
+ if ((task_thread_info(child)->flags & _TIF_32BIT) != 0) {
tpc &= 0xffffffff;
tnpc &= 0xffffffff;
}
@@ -454,11 +430,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
} fpq[16];
};
struct fps __user *fps = (struct fps __user *) addr;
- unsigned long *fpregs = child->thread_info->fpregs;
+ unsigned long *fpregs = task_thread_info(child)->fpregs;
if (copy_to_user(&fps->regs[0], fpregs,
(32 * sizeof(unsigned int))) ||
- __put_user(child->thread_info->xfsr[0], (&fps->fsr)) ||
+ __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr)) ||
__put_user(0, (&fps->fpqd)) ||
__put_user(0, (&fps->flags)) ||
__put_user(0, (&fps->extra)) ||
@@ -476,11 +452,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned long fsr;
};
struct fps __user *fps = (struct fps __user *) addr;
- unsigned long *fpregs = child->thread_info->fpregs;
+ unsigned long *fpregs = task_thread_info(child)->fpregs;
if (copy_to_user(&fps->regs[0], fpregs,
(64 * sizeof(unsigned int))) ||
- __put_user(child->thread_info->xfsr[0], (&fps->fsr))) {
+ __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
pt_error_return(regs, EFAULT);
goto out_tsk;
}
@@ -501,7 +477,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
} fpq[16];
};
struct fps __user *fps = (struct fps __user *) addr;
- unsigned long *fpregs = child->thread_info->fpregs;
+ unsigned long *fpregs = task_thread_info(child)->fpregs;
unsigned fsr;
if (copy_from_user(fpregs, &fps->regs[0],
@@ -510,11 +486,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
pt_error_return(regs, EFAULT);
goto out_tsk;
}
- child->thread_info->xfsr[0] &= 0xffffffff00000000UL;
- child->thread_info->xfsr[0] |= fsr;
- if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
- child->thread_info->gsr[0] = 0;
- child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
+ task_thread_info(child)->xfsr[0] &= 0xffffffff00000000UL;
+ task_thread_info(child)->xfsr[0] |= fsr;
+ if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
+ task_thread_info(child)->gsr[0] = 0;
+ task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
pt_succ_return(regs, 0);
goto out_tsk;
}
@@ -525,17 +501,17 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned long fsr;
};
struct fps __user *fps = (struct fps __user *) addr;
- unsigned long *fpregs = child->thread_info->fpregs;
+ unsigned long *fpregs = task_thread_info(child)->fpregs;
if (copy_from_user(fpregs, &fps->regs[0],
(64 * sizeof(unsigned int))) ||
- __get_user(child->thread_info->xfsr[0], (&fps->fsr))) {
+ __get_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
pt_error_return(regs, EFAULT);
goto out_tsk;
}
- if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
- child->thread_info->gsr[0] = 0;
- child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
+ if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
+ task_thread_info(child)->gsr[0] = 0;
+ task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
pt_succ_return(regs, 0);
goto out_tsk;
}
@@ -586,8 +562,8 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
#ifdef DEBUG_PTRACE
printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
child->pid, child->exit_code,
- child->thread_info->kregs->tpc,
- child->thread_info->kregs->tnpc);
+ task_pt_regs(child)->tpc,
+ task_pt_regs(child)->tnpc);
#endif
wake_up_process(child);
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 48180531562..250745896ae 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -520,7 +520,7 @@ void __init setup_arch(char **cmdline_p)
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
- init_task.thread_info->kregs = &fake_swapper_regs;
+ task_thread_info(&init_task)->kregs = &fake_swapper_regs;
#ifdef CONFIG_IP_PNP
if (!ic_set_manually) {
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 6efc03df51c..1fb6323e65a 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -335,7 +335,7 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
p = fork_idle(cpu);
callin_flag = 0;
- cpu_new_thread = p->thread_info;
+ cpu_new_thread = task_thread_info(p);
cpu_set(cpu, cpu_callout_map);
cpu_find_by_mid(cpu, &cpu_node);
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index fb7a5370dbf..d177d7e5c9d 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -94,7 +94,6 @@ extern void (*prom_palette)(int);
extern int __ashrdi3(int, int);
-extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
extern unsigned long phys_base;
@@ -241,7 +240,6 @@ EXPORT_SYMBOL(io_remap_pfn_range);
EXPORT_SYMBOL(_sigpause_common);
EXPORT_SYMBOL(verify_compat_iovec);
-EXPORT_SYMBOL(dump_thread);
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(pte_alloc_one_kernel);
#ifndef CONFIG_SMP
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index 7f6239ed252..d4b7a100cb8 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -11,6 +11,7 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/file.h>
@@ -1120,39 +1121,3 @@ long sys32_lookup_dcookie(unsigned long cookie_high,
return sys_lookup_dcookie((cookie_high << 32) | cookie_low,
buf, len);
}
-
-extern asmlinkage long
-sys_timer_create(clockid_t which_clock,
- struct sigevent __user *timer_event_spec,
- timer_t __user *created_timer_id);
-
-long
-sys32_timer_create(u32 clock, struct compat_sigevent __user *se32,
- timer_t __user *timer_id)
-{
- struct sigevent se;
- mm_segment_t oldfs;
- timer_t t;
- long err;
-
- if (se32 == NULL)
- return sys_timer_create(clock, NULL, timer_id);
-
- if (get_compat_sigevent(&se, se32))
- return -EFAULT;
-
- if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
- return -EFAULT;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_timer_create(clock,
- (struct sigevent __user *) &se,
- (timer_t __user *) &t);
- set_fs(oldfs);
-
- if (!err)
- err = __put_user (t, timer_id);
-
- return err;
-}
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
index bfa4aa68312..ae5b32f817f 100644
--- a/arch/sparc64/kernel/sys_sunos32.c
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
+#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/mman.h>
#include <linux/mm.h>
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index 53eaf2345fe..98d24bc0004 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -73,7 +73,7 @@ sys_call_table32:
/*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
.word sys_ni_syscall, sys32_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
- .word sys_timer_delete, sys32_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
+ .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
.word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
@@ -98,7 +98,7 @@ sys_call_table:
.word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
/*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl
.word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve
-/*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_stat64, sys_getpagesize
+/*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize
.word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall
/*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys64_munmap, sys_mprotect
.word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 5570e7bb22b..8d44ae5a15e 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1808,7 +1808,7 @@ static void user_instruction_dump (unsigned int __user *pc)
void show_stack(struct task_struct *tsk, unsigned long *_ksp)
{
unsigned long pc, fp, thread_base, ksp;
- struct thread_info *tp = tsk->thread_info;
+ void *tp = task_stack_page(tsk);
struct reg_window *rw;
int count = 0;
@@ -1862,7 +1862,7 @@ static inline int is_kernel_stack(struct task_struct *task,
return 0;
}
- thread_base = (unsigned long) task->thread_info;
+ thread_base = (unsigned long) task_stack_page(task);
thread_end = thread_base + sizeof(union thread_union);
if (rw_addr >= thread_base &&
rw_addr < thread_end &&
diff --git a/arch/sparc64/solaris/fs.c b/arch/sparc64/solaris/fs.c
index d7c99fa8966..4885ca6cbc7 100644
--- a/arch/sparc64/solaris/fs.c
+++ b/arch/sparc64/solaris/fs.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/mm.h>
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 1eb21de9d1b..8ff3bcbce5f 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -22,10 +22,6 @@ config SBUS
config PCI
bool
-config UID16
- bool
- default y
-
config GENERIC_CALIBRATE_DELAY
bool
default y
@@ -39,12 +35,12 @@ menu "UML-specific options"
config MODE_TT
bool "Tracing thread support"
- default y
+ default n
help
This option controls whether tracing thread support is compiled
- into UML. Normally, this should be set to Y. If you intend to
- use only skas mode (and the host has the skas patch applied to it),
- then it is OK to say N here.
+ into UML. This option is largely obsolete, given that skas0 provides
+ skas security and performance without needing to patch the host.
+ It is safe to say 'N' here.
config STATIC_LINK
bool "Force a static link"
@@ -83,7 +79,7 @@ config KERNEL_HALF_GIGS
of physical memory.
config MODE_SKAS
- bool "Separate Kernel Address Space support"
+ bool "Separate Kernel Address Space support" if MODE_TT
default y
help
This option controls whether skas (separate kernel address space)
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 1b12feeba36..322972fd064 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -189,6 +189,12 @@ define filechk_umlconfig
sed 's/ CONFIG/ UML_CONFIG/'
endef
+$(ARCH_DIR)/include/uml-config.h : include/linux/autoconf.h
+ $(call filechk,umlconfig)
+
+$(ARCH_DIR)/user-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.c
+ $(CC) $(USER_CFLAGS) -S -o $@ $<
+
define filechk_gen-asm-offsets
(set -e; \
echo "/*"; \
@@ -202,24 +208,13 @@ define filechk_gen-asm-offsets
echo ""; )
endef
-$(ARCH_DIR)/include/uml-config.h : include/linux/autoconf.h
- $(call filechk,umlconfig)
-
-$(ARCH_DIR)/user-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.c
- $(CC) $(USER_CFLAGS) -S -o $@ $<
-
$(ARCH_DIR)/include/user_constants.h: $(ARCH_DIR)/user-offsets.s
$(call filechk,gen-asm-offsets)
CLEAN_FILES += $(ARCH_DIR)/user-offsets.s
-$(ARCH_DIR)/kernel-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/kernel-offsets.c \
- archprepare
- $(CC) $(CFLAGS) $(NOSTDINC_FLAGS) $(CPPFLAGS) -S -o $@ $<
-
-$(ARCH_DIR)/include/kern_constants.h: $(ARCH_DIR)/kernel-offsets.s
- $(call filechk,gen-asm-offsets)
-
-CLEAN_FILES += $(ARCH_DIR)/kernel-offsets.s
+$(ARCH_DIR)/include/kern_constants.h:
+ @echo ' SYMLINK $@'
+ $(Q) ln -sf ../../../include/asm-um/asm-offsets.h $@
export SUBARCH USER_CFLAGS OS
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 5b58fad4529..ab0d0b17081 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
@@ -58,7 +58,7 @@ static void *not_configged_init(char *str, int device, struct chan_opts *opts)
{
my_puts("Using a channel type which is configured out of "
"UML\n");
- return(NULL);
+ return NULL;
}
static int not_configged_open(int input, int output, int primary, void *data,
@@ -66,7 +66,7 @@ static int not_configged_open(int input, int output, int primary, void *data,
{
my_puts("Using a channel type which is configured out of "
"UML\n");
- return(-ENODEV);
+ return -ENODEV;
}
static void not_configged_close(int fd, void *data)
@@ -79,21 +79,21 @@ static int not_configged_read(int fd, char *c_out, void *data)
{
my_puts("Using a channel type which is configured out of "
"UML\n");
- return(-EIO);
+ return -EIO;
}
static int not_configged_write(int fd, const char *buf, int len, void *data)
{
my_puts("Using a channel type which is configured out of "
"UML\n");
- return(-EIO);
+ return -EIO;
}
static int not_configged_console_write(int fd, const char *buf, int len)
{
my_puts("Using a channel type which is configured out of "
"UML\n");
- return(-EIO);
+ return -EIO;
}
static int not_configged_window_size(int fd, void *data, unsigned short *rows,
@@ -101,7 +101,7 @@ static int not_configged_window_size(int fd, void *data, unsigned short *rows,
{
my_puts("Using a channel type which is configured out of "
"UML\n");
- return(-ENODEV);
+ return -ENODEV;
}
static void not_configged_free(void *data)
@@ -135,17 +135,17 @@ int generic_read(int fd, char *c_out, void *unused)
n = os_read_file(fd, c_out, sizeof(*c_out));
if(n == -EAGAIN)
- return(0);
+ return 0;
else if(n == 0)
- return(-EIO);
- return(n);
+ return -EIO;
+ return n;
}
/* XXX Trivial wrapper around os_write_file */
int generic_write(int fd, const char *buf, int n, void *unused)
{
- return(os_write_file(fd, buf, n));
+ return os_write_file(fd, buf, n);
}
int generic_window_size(int fd, void *unused, unsigned short *rows_out,
@@ -156,14 +156,14 @@ int generic_window_size(int fd, void *unused, unsigned short *rows_out,
ret = os_window_size(fd, &rows, &cols);
if(ret < 0)
- return(ret);
+ return ret;
ret = ((*rows_out != rows) || (*cols_out != cols));
*rows_out = rows;
*cols_out = cols;
- return(ret);
+ return ret;
}
void generic_free(void *data)
@@ -186,25 +186,26 @@ static void tty_receive_char(struct tty_struct *tty, char ch)
}
}
- if((tty->flip.flag_buf_ptr == NULL) ||
- (tty->flip.char_buf_ptr == NULL))
- return;
tty_insert_flip_char(tty, ch, TTY_NORMAL);
}
-static int open_one_chan(struct chan *chan, int input, int output, int primary)
+static int open_one_chan(struct chan *chan)
{
int fd;
- if(chan->opened) return(0);
- if(chan->ops->open == NULL) fd = 0;
- else fd = (*chan->ops->open)(input, output, primary, chan->data,
- &chan->dev);
- if(fd < 0) return(fd);
+ if(chan->opened)
+ return 0;
+
+ if(chan->ops->open == NULL)
+ fd = 0;
+ else fd = (*chan->ops->open)(chan->input, chan->output, chan->primary,
+ chan->data, &chan->dev);
+ if(fd < 0)
+ return fd;
chan->fd = fd;
chan->opened = 1;
- return(0);
+ return 0;
}
int open_chan(struct list_head *chans)
@@ -215,11 +216,11 @@ int open_chan(struct list_head *chans)
list_for_each(ele, chans){
chan = list_entry(ele, struct chan, list);
- ret = open_one_chan(chan, chan->input, chan->output,
- chan->primary);
- if(chan->primary) err = ret;
+ ret = open_one_chan(chan);
+ if(chan->primary)
+ err = ret;
}
- return(err);
+ return err;
}
void chan_enable_winch(struct list_head *chans, struct tty_struct *tty)
@@ -236,20 +237,65 @@ void chan_enable_winch(struct list_head *chans, struct tty_struct *tty)
}
}
-void enable_chan(struct list_head *chans, struct tty_struct *tty)
+void enable_chan(struct line *line)
{
struct list_head *ele;
struct chan *chan;
- list_for_each(ele, chans){
+ list_for_each(ele, &line->chan_list){
chan = list_entry(ele, struct chan, list);
- if(!chan->opened) continue;
+ if(open_one_chan(chan))
+ continue;
- line_setup_irq(chan->fd, chan->input, chan->output, tty);
+ if(chan->enabled)
+ continue;
+ line_setup_irq(chan->fd, chan->input, chan->output, line,
+ chan);
+ chan->enabled = 1;
}
}
-void close_chan(struct list_head *chans)
+static LIST_HEAD(irqs_to_free);
+
+void free_irqs(void)
+{
+ struct chan *chan;
+
+ while(!list_empty(&irqs_to_free)){
+ chan = list_entry(irqs_to_free.next, struct chan, free_list);
+ list_del(&chan->free_list);
+
+ if(chan->input)
+ free_irq(chan->line->driver->read_irq, chan);
+ if(chan->output)
+ free_irq(chan->line->driver->write_irq, chan);
+ chan->enabled = 0;
+ }
+}
+
+static void close_one_chan(struct chan *chan, int delay_free_irq)
+{
+ if(!chan->opened)
+ return;
+
+ if(delay_free_irq){
+ list_add(&chan->free_list, &irqs_to_free);
+ }
+ else {
+ if(chan->input)
+ free_irq(chan->line->driver->read_irq, chan);
+ if(chan->output)
+ free_irq(chan->line->driver->write_irq, chan);
+ chan->enabled = 0;
+ }
+ if(chan->ops->close != NULL)
+ (*chan->ops->close)(chan->fd, chan->data);
+
+ chan->opened = 0;
+ chan->fd = -1;
+}
+
+void close_chan(struct list_head *chans, int delay_free_irq)
{
struct chan *chan;
@@ -259,15 +305,37 @@ void close_chan(struct list_head *chans)
* so it must be the last closed.
*/
list_for_each_entry_reverse(chan, chans, list) {
- if(!chan->opened) continue;
- if(chan->ops->close != NULL)
- (*chan->ops->close)(chan->fd, chan->data);
- chan->opened = 0;
- chan->fd = -1;
+ close_one_chan(chan, delay_free_irq);
}
}
-int write_chan(struct list_head *chans, const char *buf, int len,
+void deactivate_chan(struct list_head *chans, int irq)
+{
+ struct list_head *ele;
+
+ struct chan *chan;
+ list_for_each(ele, chans) {
+ chan = list_entry(ele, struct chan, list);
+
+ if(chan->enabled && chan->input)
+ deactivate_fd(chan->fd, irq);
+ }
+}
+
+void reactivate_chan(struct list_head *chans, int irq)
+{
+ struct list_head *ele;
+ struct chan *chan;
+
+ list_for_each(ele, chans) {
+ chan = list_entry(ele, struct chan, list);
+
+ if(chan->enabled && chan->input)
+ reactivate_fd(chan->fd, irq);
+ }
+}
+
+int write_chan(struct list_head *chans, const char *buf, int len,
int write_irq)
{
struct list_head *ele;
@@ -285,7 +353,7 @@ int write_chan(struct list_head *chans, const char *buf, int len,
reactivate_fd(chan->fd, write_irq);
}
}
- return(ret);
+ return ret;
}
int console_write_chan(struct list_head *chans, const char *buf, int len)
@@ -301,19 +369,18 @@ int console_write_chan(struct list_head *chans, const char *buf, int len)
n = chan->ops->console_write(chan->fd, buf, len);
if(chan->primary) ret = n;
}
- return(ret);
+ return ret;
}
-int console_open_chan(struct line *line, struct console *co, struct chan_opts *opts)
+int console_open_chan(struct line *line, struct console *co,
+ struct chan_opts *opts)
{
- if (!list_empty(&line->chan_list))
- return 0;
+ int err;
+
+ err = open_chan(&line->chan_list);
+ if(err)
+ return err;
- if (0 != parse_chan_pair(line->init_str, &line->chan_list,
- line->init_pri, co->index, opts))
- return -1;
- if (0 != open_chan(&line->chan_list))
- return -1;
printk("Console initialized on /dev/%s%d\n",co->name,co->index);
return 0;
}
@@ -327,32 +394,36 @@ int chan_window_size(struct list_head *chans, unsigned short *rows_out,
list_for_each(ele, chans){
chan = list_entry(ele, struct chan, list);
if(chan->primary){
- if(chan->ops->window_size == NULL) return(0);
- return(chan->ops->window_size(chan->fd, chan->data,
- rows_out, cols_out));
+ if(chan->ops->window_size == NULL)
+ return 0;
+ return chan->ops->window_size(chan->fd, chan->data,
+ rows_out, cols_out);
}
}
- return(0);
+ return 0;
}
-void free_one_chan(struct chan *chan)
+void free_one_chan(struct chan *chan, int delay_free_irq)
{
list_del(&chan->list);
+
+ close_one_chan(chan, delay_free_irq);
+
if(chan->ops->free != NULL)
(*chan->ops->free)(chan->data);
- free_irq_by_fd(chan->fd);
+
if(chan->primary && chan->output) ignore_sigio_fd(chan->fd);
kfree(chan);
}
-void free_chan(struct list_head *chans)
+void free_chan(struct list_head *chans, int delay_free_irq)
{
struct list_head *ele, *next;
struct chan *chan;
list_for_each_safe(ele, next, chans){
chan = list_entry(ele, struct chan, list);
- free_one_chan(chan);
+ free_one_chan(chan, delay_free_irq);
}
}
@@ -363,23 +434,23 @@ static int one_chan_config_string(struct chan *chan, char *str, int size,
if(chan == NULL){
CONFIG_CHUNK(str, size, n, "none", 1);
- return(n);
+ return n;
}
CONFIG_CHUNK(str, size, n, chan->ops->type, 0);
if(chan->dev == NULL){
CONFIG_CHUNK(str, size, n, "", 1);
- return(n);
+ return n;
}
CONFIG_CHUNK(str, size, n, ":", 0);
CONFIG_CHUNK(str, size, n, chan->dev, 0);
- return(n);
+ return n;
}
-static int chan_pair_config_string(struct chan *in, struct chan *out,
+static int chan_pair_config_string(struct chan *in, struct chan *out,
char *str, int size, char **error_out)
{
int n;
@@ -390,7 +461,7 @@ static int chan_pair_config_string(struct chan *in, struct chan *out,
if(in == out){
CONFIG_CHUNK(str, size, n, "", 1);
- return(n);
+ return n;
}
CONFIG_CHUNK(str, size, n, ",", 1);
@@ -399,10 +470,10 @@ static int chan_pair_config_string(struct chan *in, struct chan *out,
size -= n;
CONFIG_CHUNK(str, size, n, "", 1);
- return(n);
+ return n;
}
-int chan_config_string(struct list_head *chans, char *str, int size,
+int chan_config_string(struct list_head *chans, char *str, int size,
char **error_out)
{
struct list_head *ele;
@@ -418,7 +489,7 @@ int chan_config_string(struct list_head *chans, char *str, int size,
out = chan;
}
- return(chan_pair_config_string(in, out, str, size, error_out));
+ return chan_pair_config_string(in, out, str, size, error_out);
}
struct chan_type {
@@ -462,7 +533,7 @@ struct chan_type chan_table[] = {
#endif
};
-static struct chan *parse_chan(char *str, int pri, int device,
+static struct chan *parse_chan(struct line *line, char *str, int device,
struct chan_opts *opts)
{
struct chan_type *entry;
@@ -484,36 +555,42 @@ static struct chan *parse_chan(char *str, int pri, int device,
if(ops == NULL){
my_printf("parse_chan couldn't parse \"%s\"\n",
str);
- return(NULL);
+ return NULL;
}
- if(ops->init == NULL) return(NULL);
+ if(ops->init == NULL)
+ return NULL;
data = (*ops->init)(str, device, opts);
- if(data == NULL) return(NULL);
+ if(data == NULL)
+ return NULL;
chan = kmalloc(sizeof(*chan), GFP_ATOMIC);
- if(chan == NULL) return(NULL);
+ if(chan == NULL)
+ return NULL;
*chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list),
+ .free_list =
+ LIST_HEAD_INIT(chan->free_list),
+ .line = line,
.primary = 1,
.input = 0,
.output = 0,
.opened = 0,
+ .enabled = 0,
.fd = -1,
- .pri = pri,
.ops = ops,
.data = data });
- return(chan);
+ return chan;
}
-int parse_chan_pair(char *str, struct list_head *chans, int pri, int device,
+int parse_chan_pair(char *str, struct line *line, int device,
struct chan_opts *opts)
{
+ struct list_head *chans = &line->chan_list;
struct chan *new, *chan;
char *in, *out;
if(!list_empty(chans)){
chan = list_entry(chans->next, struct chan, list);
- if(chan->pri >= pri) return(0);
- free_chan(chans);
+ free_chan(chans, 0);
INIT_LIST_HEAD(chans);
}
@@ -522,24 +599,30 @@ int parse_chan_pair(char *str, struct list_head *chans, int pri, int device,
in = str;
*out = '\0';
out++;
- new = parse_chan(in, pri, device, opts);
- if(new == NULL) return(-1);
+ new = parse_chan(line, in, device, opts);
+ if(new == NULL)
+ return -1;
+
new->input = 1;
list_add(&new->list, chans);
- new = parse_chan(out, pri, device, opts);
- if(new == NULL) return(-1);
+ new = parse_chan(line, out, device, opts);
+ if(new == NULL)
+ return -1;
+
list_add(&new->list, chans);
new->output = 1;
}
else {
- new = parse_chan(str, pri, device, opts);
- if(new == NULL) return(-1);
+ new = parse_chan(line, str, device, opts);
+ if(new == NULL)
+ return -1;
+
list_add(&new->list, chans);
new->input = 1;
new->output = 1;
}
- return(0);
+ return 0;
}
int chan_out_fd(struct list_head *chans)
@@ -550,9 +633,9 @@ int chan_out_fd(struct list_head *chans)
list_for_each(ele, chans){
chan = list_entry(ele, struct chan, list);
if(chan->primary && chan->output)
- return(chan->fd);
+ return chan->fd;
}
- return(-1);
+ return -1;
}
void chan_interrupt(struct list_head *chans, struct work_struct *task,
@@ -567,9 +650,8 @@ void chan_interrupt(struct list_head *chans, struct work_struct *task,
chan = list_entry(ele, struct chan, list);
if(!chan->input || (chan->ops->read == NULL)) continue;
do {
- if((tty != NULL) &&
- (tty->flip.count >= TTY_FLIPBUF_SIZE)){
- schedule_work(task);
+ if (tty && !tty_buffer_request_room(tty, 1)) {
+ schedule_delayed_work(task, 1);
goto out;
}
err = chan->ops->read(chan->fd, &c, chan->data);
@@ -582,29 +664,12 @@ void chan_interrupt(struct list_head *chans, struct work_struct *task,
if(chan->primary){
if(tty != NULL)
tty_hangup(tty);
- line_disable(tty, irq);
- close_chan(chans);
- free_chan(chans);
+ close_chan(chans, 1);
return;
}
- else {
- if(chan->ops->close != NULL)
- chan->ops->close(chan->fd, chan->data);
- free_one_chan(chan);
- }
+ else close_one_chan(chan, 1);
}
}
out:
if(tty) tty_flip_buffer_push(tty);
}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index e0fdffa2d54..46ceb25a995 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
@@ -23,8 +23,9 @@
static irqreturn_t line_interrupt(int irq, void *data, struct pt_regs *unused)
{
- struct tty_struct *tty = data;
- struct line *line = tty->driver_data;
+ struct chan *chan = data;
+ struct line *line = chan->line;
+ struct tty_struct *tty = line->tty;
if (line)
chan_interrupt(&line->chan_list, &line->task, tty, irq);
@@ -33,10 +34,11 @@ static irqreturn_t line_interrupt(int irq, void *data, struct pt_regs *unused)
static void line_timer_cb(void *arg)
{
- struct tty_struct *tty = arg;
- struct line *line = tty->driver_data;
+ struct line *line = arg;
- line_interrupt(line->driver->read_irq, arg, NULL);
+ if(!line->throttled)
+ chan_interrupt(&line->chan_list, &line->task, line->tty,
+ line->driver->read_irq);
}
/* Returns the free space inside the ring buffer of this line.
@@ -124,7 +126,8 @@ static int buffer_data(struct line *line, const char *buf, int len)
if (len < end){
memcpy(line->tail, buf, len);
line->tail += len;
- } else {
+ }
+ else {
/* The circular buffer is wrapping */
memcpy(line->tail, buf, end);
buf += end;
@@ -170,7 +173,7 @@ static int flush_buffer(struct line *line)
}
count = line->tail - line->head;
- n = write_chan(&line->chan_list, line->head, count,
+ n = write_chan(&line->chan_list, line->head, count,
line->driver->write_irq);
if(n < 0)
@@ -227,7 +230,7 @@ int line_write(struct tty_struct *tty, const unsigned char *buf, int len)
if (err <= 0 && (err != -EAGAIN || !ret))
ret = err;
} else {
- n = write_chan(&line->chan_list, buf, len,
+ n = write_chan(&line->chan_list, buf, len,
line->driver->write_irq);
if (n < 0) {
ret = n;
@@ -338,11 +341,36 @@ int line_ioctl(struct tty_struct *tty, struct file * file,
return ret;
}
+void line_throttle(struct tty_struct *tty)
+{
+ struct line *line = tty->driver_data;
+
+ deactivate_chan(&line->chan_list, line->driver->read_irq);
+ line->throttled = 1;
+}
+
+void line_unthrottle(struct tty_struct *tty)
+{
+ struct line *line = tty->driver_data;
+
+ line->throttled = 0;
+ chan_interrupt(&line->chan_list, &line->task, tty,
+ line->driver->read_irq);
+
+ /* Maybe there is enough stuff pending that calling the interrupt
+ * throttles us again. In this case, line->throttled will be 1
+ * again and we shouldn't turn the interrupt back on.
+ */
+ if(!line->throttled)
+ reactivate_chan(&line->chan_list, line->driver->read_irq);
+}
+
static irqreturn_t line_write_interrupt(int irq, void *data,
struct pt_regs *unused)
{
- struct tty_struct *tty = data;
- struct line *line = tty->driver_data;
+ struct chan *chan = data;
+ struct line *line = chan->line;
+ struct tty_struct *tty = line->tty;
int err;
/* Interrupts are enabled here because we registered the interrupt with
@@ -364,7 +392,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data,
if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) &&
(tty->ldisc.write_wakeup != NULL))
(tty->ldisc.write_wakeup)(tty);
-
+
/* BLOCKING mode
* In blocking mode, everything sleeps on tty->write_wait.
* Sleeping in the console driver would break non-blocking
@@ -376,53 +404,29 @@ static irqreturn_t line_write_interrupt(int irq, void *data,
return IRQ_HANDLED;
}
-int line_setup_irq(int fd, int input, int output, struct tty_struct *tty)
+int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
{
- struct line *line = tty->driver_data;
struct line_driver *driver = line->driver;
int err = 0, flags = SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM;
if (input)
err = um_request_irq(driver->read_irq, fd, IRQ_READ,
- line_interrupt, flags,
- driver->read_irq_name, tty);
+ line_interrupt, flags,
+ driver->read_irq_name, data);
if (err)
return err;
if (output)
err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
- line_write_interrupt, flags,
- driver->write_irq_name, tty);
+ line_write_interrupt, flags,
+ driver->write_irq_name, data);
line->have_irq = 1;
return err;
}
-void line_disable(struct tty_struct *tty, int current_irq)
-{
- struct line *line = tty->driver_data;
-
- if(!line->have_irq)
- return;
-
- if(line->driver->read_irq == current_irq)
- free_irq_later(line->driver->read_irq, tty);
- else {
- free_irq(line->driver->read_irq, tty);
- }
-
- if(line->driver->write_irq == current_irq)
- free_irq_later(line->driver->write_irq, tty);
- else {
- free_irq(line->driver->write_irq, tty);
- }
-
- line->have_irq = 0;
-}
-
-int line_open(struct line *lines, struct tty_struct *tty,
- struct chan_opts *opts)
+int line_open(struct line *lines, struct tty_struct *tty)
{
struct line *line;
- int err = 0;
+ int err = -ENODEV;
line = &lines[tty->index];
tty->driver_data = line;
@@ -430,31 +434,29 @@ int line_open(struct line *lines, struct tty_struct *tty,
/* The IRQ which takes this lock is not yet enabled and won't be run
* before the end, so we don't need to use spin_lock_irq.*/
spin_lock(&line->lock);
- if (tty->count == 1) {
- if (!line->valid) {
- err = -ENODEV;
- goto out;
- }
- if (list_empty(&line->chan_list)) {
- err = parse_chan_pair(line->init_str, &line->chan_list,
- line->init_pri, tty->index, opts);
- if(err) goto out;
- err = open_chan(&line->chan_list);
- if(err) goto out;
+
+ tty->driver_data = line;
+ line->tty = tty;
+ if(!line->valid)
+ goto out;
+
+ if(tty->count == 1){
+ /* Here the device is opened, if necessary, and interrupt
+ * is registered.
+ */
+ enable_chan(line);
+ INIT_WORK(&line->task, line_timer_cb, line);
+
+ if(!line->sigio){
+ chan_enable_winch(&line->chan_list, tty);
+ line->sigio = 1;
}
- /* Here the interrupt is registered.*/
- enable_chan(&line->chan_list, tty);
- INIT_WORK(&line->task, line_timer_cb, tty);
- }
- if(!line->sigio){
- chan_enable_winch(&line->chan_list, tty);
- line->sigio = 1;
+ chan_window_size(&line->chan_list, &tty->winsize.ws_row,
+ &tty->winsize.ws_col);
}
- chan_window_size(&line->chan_list, &tty->winsize.ws_row,
- &tty->winsize.ws_col);
- line->count++;
+ err = 0;
out:
spin_unlock(&line->lock);
return err;
@@ -474,15 +476,14 @@ void line_close(struct tty_struct *tty, struct file * filp)
/* We ignore the error anyway! */
flush_buffer(line);
- line->count--;
- if (tty->count == 1) {
- line_disable(tty, -1);
+ if(tty->count == 1){
+ line->tty = NULL;
tty->driver_data = NULL;
- }
- if((line->count == 0) && line->sigio){
- unregister_winch(tty);
- line->sigio = 0;
+ if(line->sigio){
+ unregister_winch(tty);
+ line->sigio = 0;
+ }
}
spin_unlock_irq(&line->lock);
@@ -493,17 +494,15 @@ void close_lines(struct line *lines, int nlines)
int i;
for(i = 0; i < nlines; i++)
- close_chan(&lines[i].chan_list);
+ close_chan(&lines[i].chan_list, 0);
}
/* Common setup code for both startup command line and mconsole initialization.
* @lines contains the the array (of size @num) to modify;
* @init is the setup string;
- * @all_allowed is a boolean saying if we can setup the whole @lines
- * at once. For instance, it will be usually true for startup init. (where we
- * can use con=xterm) and false for mconsole.*/
+ */
-int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed)
+int line_setup(struct line *lines, unsigned int num, char *init)
{
int i, n;
char *end;
@@ -512,10 +511,11 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed
/* We said con=/ssl= instead of con#=, so we are configuring all
* consoles at once.*/
n = -1;
- } else {
+ }
+ else {
n = simple_strtoul(init, &end, 0);
if(*end != '='){
- printk(KERN_ERR "line_setup failed to parse \"%s\"\n",
+ printk(KERN_ERR "line_setup failed to parse \"%s\"\n",
init);
return 0;
}
@@ -527,8 +527,9 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed
printk("line_setup - %d out of range ((0 ... %d) allowed)\n",
n, num - 1);
return 0;
- } else if (n >= 0){
- if (lines[n].count > 0) {
+ }
+ else if (n >= 0){
+ if (lines[n].tty != NULL) {
printk("line_setup - device %d is open\n", n);
return 0;
}
@@ -539,13 +540,10 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed
else {
lines[n].init_str = init;
lines[n].valid = 1;
- }
+ }
}
- } else if(!all_allowed){
- printk("line_setup - can't configure all devices from "
- "mconsole\n");
- return 0;
- } else {
+ }
+ else {
for(i = 0; i < num; i++){
if(lines[i].init_pri <= INIT_ALL){
lines[i].init_pri = INIT_ALL;
@@ -557,18 +555,33 @@ int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed
}
}
}
- return 1;
+ return n == -1 ? num : n;
}
-int line_config(struct line *lines, unsigned int num, char *str)
+int line_config(struct line *lines, unsigned int num, char *str,
+ struct chan_opts *opts)
{
- char *new = uml_strdup(str);
+ struct line *line;
+ char *new;
+ int n;
+ if(*str == '='){
+ printk("line_config - can't configure all devices from "
+ "mconsole\n");
+ return 1;
+ }
+
+ new = kstrdup(str, GFP_KERNEL);
if(new == NULL){
- printk("line_config - uml_strdup failed\n");
- return -ENOMEM;
+ printk("line_config - kstrdup failed\n");
+ return 1;
}
- return !line_setup(lines, num, new, 0);
+ n = line_setup(lines, num, new);
+ if(n < 0)
+ return 1;
+
+ line = &lines[n];
+ return parse_chan_pair(line->init_str, line, n, opts);
}
int line_get_config(char *name, struct line *lines, unsigned int num, char *str,
@@ -594,7 +607,7 @@ int line_get_config(char *name, struct line *lines, unsigned int num, char *str,
spin_lock(&line->lock);
if(!line->valid)
CONFIG_CHUNK(str, size, n, "none", 1);
- else if(line->count == 0)
+ else if(line->tty == NULL)
CONFIG_CHUNK(str, size, n, line->init_str, 1);
else n = chan_config_string(&line->chan_list, str, size, error_out);
spin_unlock(&line->lock);
@@ -619,14 +632,18 @@ int line_id(char **str, int *start_out, int *end_out)
int line_remove(struct line *lines, unsigned int num, int n)
{
+ int err;
char config[sizeof("conxxxx=none\0")];
sprintf(config, "%d=none", n);
- return !line_setup(lines, num, config, 0);
+ err = line_setup(lines, num, config);
+ if(err >= 0)
+ err = 0;
+ return err;
}
struct tty_driver *line_register_devfs(struct lines *set,
- struct line_driver *line_driver,
+ struct line_driver *line_driver,
struct tty_operations *ops, struct line *lines,
int nlines)
{
@@ -655,7 +672,7 @@ struct tty_driver *line_register_devfs(struct lines *set,
}
for(i = 0; i < nlines; i++){
- if(!lines[i].valid)
+ if(!lines[i].valid)
tty_unregister_device(driver, i);
}
@@ -663,24 +680,28 @@ struct tty_driver *line_register_devfs(struct lines *set,
return driver;
}
-static spinlock_t winch_handler_lock;
-LIST_HEAD(winch_handlers);
+static DEFINE_SPINLOCK(winch_handler_lock);
+static LIST_HEAD(winch_handlers);
-void lines_init(struct line *lines, int nlines)
+void lines_init(struct line *lines, int nlines, struct chan_opts *opts)
{
struct line *line;
int i;
- spin_lock_init(&winch_handler_lock);
for(i = 0; i < nlines; i++){
line = &lines[i];
INIT_LIST_HEAD(&line->chan_list);
- spin_lock_init(&line->lock);
- if(line->init_str != NULL){
- line->init_str = uml_strdup(line->init_str);
- if(line->init_str == NULL)
- printk("lines_init - uml_strdup returned "
- "NULL\n");
+
+ if(line->init_str == NULL)
+ continue;
+
+ line->init_str = kstrdup(line->init_str, GFP_KERNEL);
+ if(line->init_str == NULL)
+ printk("lines_init - kstrdup returned NULL\n");
+
+ if(parse_chan_pair(line->init_str, line, i, opts)){
+ printk("parse_chan_pair failed for device %d\n", i);
+ line->valid = 0;
}
}
}
@@ -717,8 +738,7 @@ irqreturn_t winch_interrupt(int irq, void *data, struct pt_regs *unused)
tty = winch->tty;
if (tty != NULL) {
line = tty->driver_data;
- chan_window_size(&line->chan_list,
- &tty->winsize.ws_row,
+ chan_window_size(&line->chan_list, &tty->winsize.ws_row,
&tty->winsize.ws_col);
kill_pg(tty->pgrp, SIGWINCH, 1);
}
@@ -749,60 +769,54 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty)
spin_unlock(&winch_handler_lock);
if(um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
- SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
+ SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
"winch", winch) < 0)
printk("register_winch_irq - failed to register IRQ\n");
}
+static void free_winch(struct winch *winch)
+{
+ list_del(&winch->list);
+
+ if(winch->pid != -1)
+ os_kill_process(winch->pid, 1);
+ if(winch->fd != -1)
+ os_close_file(winch->fd);
+
+ free_irq(WINCH_IRQ, winch);
+ kfree(winch);
+}
+
static void unregister_winch(struct tty_struct *tty)
{
struct list_head *ele;
- struct winch *winch, *found = NULL;
+ struct winch *winch;
spin_lock(&winch_handler_lock);
+
list_for_each(ele, &winch_handlers){
winch = list_entry(ele, struct winch, list);
if(winch->tty == tty){
- found = winch;
- break;
+ free_winch(winch);
+ break;
}
}
- if(found == NULL)
- goto err;
-
- list_del(&winch->list);
- spin_unlock(&winch_handler_lock);
-
- if(winch->pid != -1)
- os_kill_process(winch->pid, 1);
-
- free_irq(WINCH_IRQ, winch);
- kfree(winch);
-
- return;
-err:
spin_unlock(&winch_handler_lock);
}
-/* XXX: No lock as it's an exitcall... is this valid? Depending on cleanup
- * order... are we sure that nothing else is done on the list? */
static void winch_cleanup(void)
{
- struct list_head *ele;
+ struct list_head *ele, *next;
struct winch *winch;
- list_for_each(ele, &winch_handlers){
+ spin_lock(&winch_handler_lock);
+
+ list_for_each_safe(ele, next, &winch_handlers){
winch = list_entry(ele, struct winch, list);
- if(winch->fd != -1){
- /* Why is this different from the above free_irq(),
- * which deactivates SIGIO? This searches the FD
- * somewhere else and removes it from the list... */
- deactivate_fd(winch->fd, WINCH_IRQ);
- os_close_file(winch->fd);
- }
- if(winch->pid != -1)
- os_kill_process(winch->pid, 1);
+ free_winch(winch);
}
+
+ spin_unlock(&winch_handler_lock);
}
__uml_exitcall(winch_cleanup);
@@ -811,10 +825,10 @@ char *add_xterm_umid(char *base)
char *umid, *title;
int len;
- umid = get_umid(1);
- if(umid == NULL)
+ umid = get_umid();
+ if(*umid == '\0')
return base;
-
+
len = strlen(base) + strlen(" ()") + strlen(umid) + 1;
title = kmalloc(len, GFP_KERNEL);
if(title == NULL){
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 12c95368124..e3d57656717 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -20,6 +20,7 @@
#include "linux/namei.h"
#include "linux/proc_fs.h"
#include "linux/syscalls.h"
+#include "linux/console.h"
#include "asm/irq.h"
#include "asm/uaccess.h"
#include "user_util.h"
@@ -34,7 +35,7 @@
#include "irq_kern.h"
#include "choose-mode.h"
-static int do_unlink_socket(struct notifier_block *notifier,
+static int do_unlink_socket(struct notifier_block *notifier,
unsigned long what, void *data)
{
return(mconsole_unlink_socket());
@@ -46,12 +47,12 @@ static struct notifier_block reboot_notifier = {
.priority = 0,
};
-/* Safe without explicit locking for now. Tasklets provide their own
+/* Safe without explicit locking for now. Tasklets provide their own
* locking, and the interrupt handler is safe because it can't interrupt
* itself and it can only happen on CPU 0.
*/
-LIST_HEAD(mc_requests);
+static LIST_HEAD(mc_requests);
static void mc_work_proc(void *unused)
{
@@ -60,7 +61,7 @@ static void mc_work_proc(void *unused)
while(!list_empty(&mc_requests)){
local_save_flags(flags);
- req = list_entry(mc_requests.next, struct mconsole_entry,
+ req = list_entry(mc_requests.next, struct mconsole_entry,
list);
list_del(&req->list);
local_irq_restore(flags);
@@ -69,7 +70,7 @@ static void mc_work_proc(void *unused)
}
}
-DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
+static DECLARE_WORK(mconsole_work, mc_work_proc, NULL);
static irqreturn_t mconsole_interrupt(int irq, void *dev_id,
struct pt_regs *regs)
@@ -103,8 +104,8 @@ void mconsole_version(struct mc_request *req)
{
char version[256];
- sprintf(version, "%s %s %s %s %s", system_utsname.sysname,
- system_utsname.nodename, system_utsname.release,
+ sprintf(version, "%s %s %s %s %s", system_utsname.sysname,
+ system_utsname.nodename, system_utsname.release,
system_utsname.version, system_utsname.machine);
mconsole_reply(req, version, 0, 0);
}
@@ -348,7 +349,7 @@ static struct mc_device *mconsole_find_dev(char *name)
#define CONFIG_BUF_SIZE 64
-static void mconsole_get_config(int (*get_config)(char *, char *, int,
+static void mconsole_get_config(int (*get_config)(char *, char *, int,
char **),
struct mc_request *req, char *name)
{
@@ -389,7 +390,6 @@ static void mconsole_get_config(int (*get_config)(char *, char *, int,
out:
if(buf != default_buf)
kfree(buf);
-
}
void mconsole_config(struct mc_request *req)
@@ -420,9 +420,9 @@ void mconsole_config(struct mc_request *req)
void mconsole_remove(struct mc_request *req)
{
- struct mc_device *dev;
+ struct mc_device *dev;
char *ptr = req->request.data, *err_msg = "";
- char error[256];
+ char error[256];
int err, start, end, n;
ptr += strlen("remove");
@@ -433,37 +433,112 @@ void mconsole_remove(struct mc_request *req)
return;
}
- ptr = &ptr[strlen(dev->name)];
-
- err = 1;
- n = (*dev->id)(&ptr, &start, &end);
- if(n < 0){
- err_msg = "Couldn't parse device number";
- goto out;
- }
- else if((n < start) || (n > end)){
- sprintf(error, "Invalid device number - must be between "
- "%d and %d", start, end);
- err_msg = error;
- goto out;
- }
+ ptr = &ptr[strlen(dev->name)];
+
+ err = 1;
+ n = (*dev->id)(&ptr, &start, &end);
+ if(n < 0){
+ err_msg = "Couldn't parse device number";
+ goto out;
+ }
+ else if((n < start) || (n > end)){
+ sprintf(error, "Invalid device number - must be between "
+ "%d and %d", start, end);
+ err_msg = error;
+ goto out;
+ }
err = (*dev->remove)(n);
- switch(err){
- case -ENODEV:
- err_msg = "Device doesn't exist";
- break;
- case -EBUSY:
- err_msg = "Device is currently open";
- break;
- default:
- break;
- }
- out:
+ switch(err){
+ case -ENODEV:
+ err_msg = "Device doesn't exist";
+ break;
+ case -EBUSY:
+ err_msg = "Device is currently open";
+ break;
+ default:
+ break;
+ }
+out:
mconsole_reply(req, err_msg, err, 0);
}
+static DEFINE_SPINLOCK(console_lock);
+static LIST_HEAD(clients);
+static char console_buf[MCONSOLE_MAX_DATA];
+static int console_index = 0;
+
+static void console_write(struct console *console, const char *string,
+ unsigned len)
+{
+ struct list_head *ele;
+ int n;
+
+ if(list_empty(&clients))
+ return;
+
+ while(1){
+ n = min(len, ARRAY_SIZE(console_buf) - console_index);
+ strncpy(&console_buf[console_index], string, n);
+ console_index += n;
+ string += n;
+ len -= n;
+ if(len == 0)
+ return;
+
+ list_for_each(ele, &clients){
+ struct mconsole_entry *entry;
+
+ entry = list_entry(ele, struct mconsole_entry, list);
+ mconsole_reply_len(&entry->request, console_buf,
+ console_index, 0, 1);
+ }
+
+ console_index = 0;
+ }
+}
+
+static struct console mc_console = { .name = "mc",
+ .write = console_write,
+ .flags = CON_ENABLED,
+ .index = -1 };
+
+static int mc_add_console(void)
+{
+ register_console(&mc_console);
+ return 0;
+}
+
+late_initcall(mc_add_console);
+
+static void with_console(struct mc_request *req, void (*proc)(void *),
+ void *arg)
+{
+ struct mconsole_entry entry;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&entry.list);
+ entry.request = *req;
+ list_add(&entry.list, &clients);
+ spin_lock_irqsave(&console_lock, flags);
+
+ (*proc)(arg);
+
+ mconsole_reply_len(req, console_buf, console_index, 0, 0);
+ console_index = 0;
+
+ spin_unlock_irqrestore(&console_lock, flags);
+ list_del(&entry.list);
+}
+
#ifdef CONFIG_MAGIC_SYSRQ
+static void sysrq_proc(void *arg)
+{
+ char *op = arg;
+
+ handle_sysrq(*op, &current->thread.regs, NULL);
+}
+
void mconsole_sysrq(struct mc_request *req)
{
char *ptr = req->request.data;
@@ -471,8 +546,13 @@ void mconsole_sysrq(struct mc_request *req)
ptr += strlen("sysrq");
while(isspace(*ptr)) ptr++;
- mconsole_reply(req, "", 0, 0);
- handle_sysrq(*ptr, &current->thread.regs, NULL);
+ /* With 'b', the system will shut down without a chance to reply,
+ * so in this case, we reply first.
+ */
+ if(*ptr == 'b')
+ mconsole_reply(req, "", 0, 0);
+
+ with_console(req, sysrq_proc, ptr);
}
#else
void mconsole_sysrq(struct mc_request *req)
@@ -481,6 +561,14 @@ void mconsole_sysrq(struct mc_request *req)
}
#endif
+static void stack_proc(void *arg)
+{
+ struct task_struct *from = current, *to = arg;
+
+ to->thread.saved_task = from;
+ switch_to(from, to, from);
+}
+
/* Mconsole stack trace
* Added by Allan Graves, Jeff Dike
* Dumps a stacks registers to the linux console.
@@ -488,37 +576,34 @@ void mconsole_sysrq(struct mc_request *req)
*/
void do_stack(struct mc_request *req)
{
- char *ptr = req->request.data;
- int pid_requested= -1;
- struct task_struct *from = NULL;
+ char *ptr = req->request.data;
+ int pid_requested= -1;
+ struct task_struct *from = NULL;
struct task_struct *to = NULL;
- /* Would be nice:
- * 1) Send showregs output to mconsole.
+ /* Would be nice:
+ * 1) Send showregs output to mconsole.
* 2) Add a way to stack dump all pids.
*/
- ptr += strlen("stack");
- while(isspace(*ptr)) ptr++;
-
- /* Should really check for multiple pids or reject bad args here */
- /* What do the arguments in mconsole_reply mean? */
- if(sscanf(ptr, "%d", &pid_requested) == 0){
- mconsole_reply(req, "Please specify a pid", 1, 0);
- return;
- }
+ ptr += strlen("stack");
+ while(isspace(*ptr)) ptr++;
- from = current;
- to = find_task_by_pid(pid_requested);
+ /* Should really check for multiple pids or reject bad args here */
+ /* What do the arguments in mconsole_reply mean? */
+ if(sscanf(ptr, "%d", &pid_requested) == 0){
+ mconsole_reply(req, "Please specify a pid", 1, 0);
+ return;
+ }
- if((to == NULL) || (pid_requested == 0)) {
- mconsole_reply(req, "Couldn't find that pid", 1, 0);
- return;
- }
- to->thread.saved_task = current;
+ from = current;
- switch_to(from, to, from);
- mconsole_reply(req, "Stack Dumped to console and message log", 0, 0);
+ to = find_task_by_pid(pid_requested);
+ if((to == NULL) || (pid_requested == 0)) {
+ mconsole_reply(req, "Couldn't find that pid", 1, 0);
+ return;
+ }
+ with_console(req, stack_proc, to);
}
void mconsole_stack(struct mc_request *req)
@@ -534,9 +619,9 @@ void mconsole_stack(struct mc_request *req)
/* Changed by mconsole_setup, which is __setup, and called before SMP is
* active.
*/
-static char *notify_socket = NULL;
+static char *notify_socket = NULL;
-int mconsole_init(void)
+static int mconsole_init(void)
{
/* long to avoid size mismatch warnings from gcc */
long sock;
@@ -563,16 +648,16 @@ int mconsole_init(void)
}
if(notify_socket != NULL){
- notify_socket = uml_strdup(notify_socket);
+ notify_socket = kstrdup(notify_socket, GFP_KERNEL);
if(notify_socket != NULL)
mconsole_notify(notify_socket, MCONSOLE_SOCKET,
- mconsole_socket_name,
+ mconsole_socket_name,
strlen(mconsole_socket_name) + 1);
else printk(KERN_ERR "mconsole_setup failed to strdup "
"string\n");
}
- printk("mconsole (version %d) initialized on %s\n",
+ printk("mconsole (version %d) initialized on %s\n",
MCONSOLE_VERSION, mconsole_socket_name);
return(0);
}
@@ -585,7 +670,7 @@ static int write_proc_mconsole(struct file *file, const char __user *buffer,
char *buf;
buf = kmalloc(count + 1, GFP_KERNEL);
- if(buf == NULL)
+ if(buf == NULL)
return(-ENOMEM);
if(copy_from_user(buf, buffer, count)){
@@ -661,7 +746,7 @@ static int notify_panic(struct notifier_block *self, unsigned long unused1,
if(notify_socket == NULL) return(0);
- mconsole_notify(notify_socket, MCONSOLE_PANIC, message,
+ mconsole_notify(notify_socket, MCONSOLE_PANIC, message,
strlen(message) + 1);
return(0);
}
@@ -686,14 +771,3 @@ char *mconsole_notify_socket(void)
}
EXPORT_SYMBOL(mconsole_notify_socket);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/drivers/mconsole_user.c b/arch/um/drivers/mconsole_user.c
index 310c1f823f2..4b109fe7fff 100644
--- a/arch/um/drivers/mconsole_user.c
+++ b/arch/um/drivers/mconsole_user.c
@@ -122,12 +122,12 @@ int mconsole_get_request(int fd, struct mc_request *req)
return(1);
}
-int mconsole_reply(struct mc_request *req, char *str, int err, int more)
+int mconsole_reply_len(struct mc_request *req, const char *str, int total,
+ int err, int more)
{
struct mconsole_reply reply;
- int total, len, n;
+ int len, n;
- total = strlen(str);
do {
reply.err = err;
@@ -155,6 +155,12 @@ int mconsole_reply(struct mc_request *req, char *str, int err, int more)
return(0);
}
+int mconsole_reply(struct mc_request *req, const char *str, int err, int more)
+{
+ return mconsole_reply_len(req, str, strlen(str), err, more);
+}
+
+
int mconsole_unlink_socket(void)
{
unlink(mconsole_socket_name);
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 84c73a300ac..fb1f9fb9b87 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -34,7 +34,7 @@
#define DRIVER_NAME "uml-netdev"
static DEFINE_SPINLOCK(opened_lock);
-LIST_HEAD(opened);
+static LIST_HEAD(opened);
static int uml_net_rx(struct net_device *dev)
{
@@ -150,6 +150,7 @@ static int uml_net_close(struct net_device *dev)
if(lp->close != NULL)
(*lp->close)(lp->fd, &lp->user);
lp->fd = -1;
+ list_del(&lp->list);
spin_unlock(&lp->lock);
return 0;
@@ -266,7 +267,7 @@ void uml_net_user_timer_expire(unsigned long _conn)
}
static DEFINE_SPINLOCK(devices_lock);
-static struct list_head devices = LIST_HEAD_INIT(devices);
+static LIST_HEAD(devices);
static struct platform_driver uml_net_driver = {
.driver = {
@@ -586,7 +587,7 @@ static int net_config(char *str)
err = eth_parse(str, &n, &str);
if(err) return(err);
- str = uml_strdup(str);
+ str = kstrdup(str, GFP_KERNEL);
if(str == NULL){
printk(KERN_ERR "net_config failed to strdup string\n");
return(-1);
@@ -715,6 +716,7 @@ static void close_devices(void)
list_for_each(ele, &opened){
lp = list_entry(ele, struct uml_net_private, list);
+ free_irq(lp->dev->irq, lp->dev);
if((lp->close != NULL) && (lp->fd >= 0))
(*lp->close)(lp->fd, &lp->user);
if(lp->remove != NULL) (*lp->remove)(&lp->user);
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 62e04ecfada..a32ef55cb24 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -69,7 +69,7 @@ static struct line_driver driver = {
.name = "ssl",
.config = ssl_config,
.get_config = ssl_get_config,
- .id = line_id,
+ .id = line_id,
.remove = ssl_remove,
},
};
@@ -84,26 +84,23 @@ static struct lines lines = LINES_INIT(NR_PORTS);
static int ssl_config(char *str)
{
- return(line_config(serial_lines,
- sizeof(serial_lines)/sizeof(serial_lines[0]), str));
+ return line_config(serial_lines, ARRAY_SIZE(serial_lines), str, &opts);
}
static int ssl_get_config(char *dev, char *str, int size, char **error_out)
{
- return(line_get_config(dev, serial_lines,
- sizeof(serial_lines)/sizeof(serial_lines[0]),
- str, size, error_out));
+ return line_get_config(dev, serial_lines, ARRAY_SIZE(serial_lines), str,
+ size, error_out);
}
static int ssl_remove(int n)
{
- return line_remove(serial_lines,
- sizeof(serial_lines)/sizeof(serial_lines[0]), n);
+ return line_remove(serial_lines, ARRAY_SIZE(serial_lines), n);
}
int ssl_open(struct tty_struct *tty, struct file *filp)
{
- return line_open(serial_lines, tty, &opts);
+ return line_open(serial_lines, tty);
}
#if 0
@@ -112,16 +109,6 @@ static void ssl_flush_buffer(struct tty_struct *tty)
return;
}
-static void ssl_throttle(struct tty_struct * tty)
-{
- printk(KERN_ERR "Someone should implement ssl_throttle\n");
-}
-
-static void ssl_unthrottle(struct tty_struct * tty)
-{
- printk(KERN_ERR "Someone should implement ssl_unthrottle\n");
-}
-
static void ssl_stop(struct tty_struct *tty)
{
printk(KERN_ERR "Someone should implement ssl_stop\n");
@@ -148,9 +135,9 @@ static struct tty_operations ssl_ops = {
.flush_chars = line_flush_chars,
.set_termios = line_set_termios,
.ioctl = line_ioctl,
+ .throttle = line_throttle,
+ .unthrottle = line_unthrottle,
#if 0
- .throttle = ssl_throttle,
- .unthrottle = ssl_unthrottle,
.stop = ssl_stop,
.start = ssl_start,
.hangup = ssl_hangup,
@@ -183,7 +170,7 @@ static int ssl_console_setup(struct console *co, char *options)
{
struct line *line = &serial_lines[co->index];
- return console_open_chan(line,co,&opts);
+ return console_open_chan(line, co, &opts);
}
static struct console ssl_cons = {
@@ -199,12 +186,13 @@ int ssl_init(void)
{
char *new_title;
- printk(KERN_INFO "Initializing software serial port version %d\n",
+ printk(KERN_INFO "Initializing software serial port version %d\n",
ssl_version);
ssl_driver = line_register_devfs(&lines, &driver, &ssl_ops,
- serial_lines, ARRAY_SIZE(serial_lines));
+ serial_lines,
+ ARRAY_SIZE(serial_lines));
- lines_init(serial_lines, sizeof(serial_lines)/sizeof(serial_lines[0]));
+ lines_init(serial_lines, ARRAY_SIZE(serial_lines), &opts);
new_title = add_xterm_umid(opts.xterm_title);
if (new_title != NULL)
@@ -212,7 +200,7 @@ int ssl_init(void)
ssl_init_done = 1;
register_console(&ssl_cons);
- return(0);
+ return 0;
}
late_initcall(ssl_init);
@@ -220,16 +208,13 @@ static void ssl_exit(void)
{
if (!ssl_init_done)
return;
- close_lines(serial_lines,
- sizeof(serial_lines)/sizeof(serial_lines[0]));
+ close_lines(serial_lines, ARRAY_SIZE(serial_lines));
}
__uml_exitcall(ssl_exit);
static int ssl_chan_setup(char *str)
{
- return(line_setup(serial_lines,
- sizeof(serial_lines)/sizeof(serial_lines[0]),
- str, 1));
+ return line_setup(serial_lines, ARRAY_SIZE(serial_lines), str);
}
__setup("ssl", ssl_chan_setup);
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 005aa6333b6..61db8b2fc83 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -75,7 +75,7 @@ static struct line_driver driver = {
.name = "con",
.config = con_config,
.get_config = con_get_config,
- .id = line_id,
+ .id = line_id,
.remove = con_remove,
},
};
@@ -86,28 +86,27 @@ static struct lines console_lines = LINES_INIT(MAX_TTYS);
* individual elements are protected by individual semaphores.
*/
struct line vts[MAX_TTYS] = { LINE_INIT(CONFIG_CON_ZERO_CHAN, &driver),
- [ 1 ... MAX_TTYS - 1 ] =
+ [ 1 ... MAX_TTYS - 1 ] =
LINE_INIT(CONFIG_CON_CHAN, &driver) };
static int con_config(char *str)
{
- return(line_config(vts, sizeof(vts)/sizeof(vts[0]), str));
+ return line_config(vts, ARRAY_SIZE(vts), str, &opts);
}
static int con_get_config(char *dev, char *str, int size, char **error_out)
{
- return(line_get_config(dev, vts, sizeof(vts)/sizeof(vts[0]), str,
- size, error_out));
+ return line_get_config(dev, vts, ARRAY_SIZE(vts), str, size, error_out);
}
static int con_remove(int n)
{
- return line_remove(vts, sizeof(vts)/sizeof(vts[0]), n);
+ return line_remove(vts, ARRAY_SIZE(vts), n);
}
static int con_open(struct tty_struct *tty, struct file *filp)
{
- return line_open(vts, tty, &opts);
+ return line_open(vts, tty);
}
static int con_init_done = 0;
@@ -117,16 +116,18 @@ static struct tty_operations console_ops = {
.close = line_close,
.write = line_write,
.put_char = line_put_char,
- .write_room = line_write_room,
+ .write_room = line_write_room,
.chars_in_buffer = line_chars_in_buffer,
.flush_buffer = line_flush_buffer,
.flush_chars = line_flush_chars,
.set_termios = line_set_termios,
.ioctl = line_ioctl,
+ .throttle = line_throttle,
+ .unthrottle = line_unthrottle,
};
static void uml_console_write(struct console *console, const char *string,
- unsigned len)
+ unsigned len)
{
struct line *line = &vts[console->index];
unsigned long flags;
@@ -146,7 +147,7 @@ static int uml_console_setup(struct console *co, char *options)
{
struct line *line = &vts[co->index];
- return console_open_chan(line,co,&opts);
+ return console_open_chan(line, co, &opts);
}
static struct console stdiocons = {
@@ -156,7 +157,7 @@ static struct console stdiocons = {
.setup = uml_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
- .data = &vts,
+ .data = &vts,
};
int stdio_init(void)
@@ -166,11 +167,11 @@ int stdio_init(void)
console_driver = line_register_devfs(&console_lines, &driver,
&console_ops, vts,
ARRAY_SIZE(vts));
- if (NULL == console_driver)
+ if (console_driver == NULL)
return -1;
printk(KERN_INFO "Initialized stdio console driver\n");
- lines_init(vts, sizeof(vts)/sizeof(vts[0]));
+ lines_init(vts, ARRAY_SIZE(vts), &opts);
new_title = add_xterm_umid(opts.xterm_title);
if(new_title != NULL)
@@ -178,7 +179,7 @@ int stdio_init(void)
con_init_done = 1;
register_console(&stdiocons);
- return(0);
+ return 0;
}
late_initcall(stdio_init);
@@ -186,13 +187,13 @@ static void console_exit(void)
{
if (!con_init_done)
return;
- close_lines(vts, sizeof(vts)/sizeof(vts[0]));
+ close_lines(vts, ARRAY_SIZE(vts));
}
__uml_exitcall(console_exit);
static int console_chan_setup(char *str)
{
- return(line_setup(vts, sizeof(vts)/sizeof(vts[0]), str, 1));
+ return line_setup(vts, ARRAY_SIZE(vts), str);
}
__setup("con", console_chan_setup);
__channel_help(console_chan_setup, "con");
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 93898917cbe..7696f8d2d89 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -117,6 +117,7 @@ static int ubd_open(struct inode * inode, struct file * filp);
static int ubd_release(struct inode * inode, struct file * file);
static int ubd_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg);
+static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
#define MAX_DEV (8)
@@ -125,6 +126,7 @@ static struct block_device_operations ubd_blops = {
.open = ubd_open,
.release = ubd_release,
.ioctl = ubd_ioctl,
+ .getgeo = ubd_getgeo,
};
/* Protected by the queue_lock */
@@ -706,7 +708,7 @@ static int ubd_config(char *str)
{
int n, err;
- str = uml_strdup(str);
+ str = kstrdup(str, GFP_KERNEL);
if(str == NULL){
printk(KERN_ERR "ubd_config failed to strdup string\n");
return(1);
@@ -1058,10 +1060,19 @@ static void do_ubd_request(request_queue_t *q)
}
}
+static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct ubd *dev = bdev->bd_disk->private_data;
+
+ geo->heads = 128;
+ geo->sectors = 32;
+ geo->cylinders = dev->size / (128 * 32 * 512);
+ return 0;
+}
+
static int ubd_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg)
{
- struct hd_geometry __user *loc = (struct hd_geometry __user *) arg;
struct ubd *dev = inode->i_bdev->bd_disk->private_data;
struct hd_driveid ubd_id = {
.cyls = 0,
@@ -1070,16 +1081,7 @@ static int ubd_ioctl(struct inode * inode, struct file * file,
};
switch (cmd) {
- struct hd_geometry g;
struct cdrom_volctrl volume;
- case HDIO_GETGEO:
- if(!loc) return(-EINVAL);
- g.heads = 128;
- g.sectors = 32;
- g.cylinders = dev->size / (128 * 32 * 512);
- g.start = get_start_sect(inode->i_bdev);
- return(copy_to_user(loc, &g, sizeof(g)) ? -EFAULT : 0);
-
case HDIO_GET_IDENTITY:
ubd_id.cyls = dev->size / (128 * 32 * 512);
if(copy_to_user((char __user *) arg, (char *) &ubd_id,
@@ -1387,15 +1389,6 @@ int io_thread(void *arg)
printk("io_thread - write failed, fd = %d, err = %d\n",
kernel_fd, -n);
}
-}
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+ return 0;
+}
diff --git a/arch/um/include/chan_kern.h b/arch/um/include/chan_kern.h
index da9a6717e7a..1bb5e9d9427 100644
--- a/arch/um/include/chan_kern.h
+++ b/arch/um/include/chan_kern.h
@@ -14,21 +14,23 @@
struct chan {
struct list_head list;
+ struct list_head free_list;
+ struct line *line;
char *dev;
unsigned int primary:1;
unsigned int input:1;
unsigned int output:1;
unsigned int opened:1;
+ unsigned int enabled:1;
int fd;
- enum chan_init_pri pri;
struct chan_ops *ops;
void *data;
};
extern void chan_interrupt(struct list_head *chans, struct work_struct *task,
struct tty_struct *tty, int irq);
-extern int parse_chan_pair(char *str, struct list_head *chans, int pri,
- int device, struct chan_opts *opts);
+extern int parse_chan_pair(char *str, struct line *line, int device,
+ struct chan_opts *opts);
extern int open_chan(struct list_head *chans);
extern int write_chan(struct list_head *chans, const char *buf, int len,
int write_irq);
@@ -36,9 +38,11 @@ extern int console_write_chan(struct list_head *chans, const char *buf,
int len);
extern int console_open_chan(struct line *line, struct console *co,
struct chan_opts *opts);
-extern void close_chan(struct list_head *chans);
+extern void deactivate_chan(struct list_head *chans, int irq);
+extern void reactivate_chan(struct list_head *chans, int irq);
extern void chan_enable_winch(struct list_head *chans, struct tty_struct *tty);
-extern void enable_chan(struct list_head *chans, struct tty_struct *tty);
+extern void enable_chan(struct line *line);
+extern void close_chan(struct list_head *chans, int delay_free_irq);
extern int chan_window_size(struct list_head *chans,
unsigned short *rows_out,
unsigned short *cols_out);
@@ -47,14 +51,3 @@ extern int chan_config_string(struct list_head *chans, char *str, int size,
char **error_out);
#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/include/choose-mode.h b/arch/um/include/choose-mode.h
index f25fa83a5da..b87b36a87d9 100644
--- a/arch/um/include/choose-mode.h
+++ b/arch/um/include/choose-mode.h
@@ -23,6 +23,9 @@ static inline void *__choose_mode(void *tt, void *skas) {
#elif defined(UML_CONFIG_MODE_TT)
#define CHOOSE_MODE(tt, skas) (tt)
+
+#else
+#error CONFIG_MODE_SKAS and CONFIG_MODE_TT are both disabled
#endif
#define CHOOSE_MODE_PROC(tt, skas, args...) \
diff --git a/arch/um/include/irq_user.h b/arch/um/include/irq_user.h
index f724b717213..b61deb8b362 100644
--- a/arch/um/include/irq_user.h
+++ b/arch/um/include/irq_user.h
@@ -18,19 +18,8 @@ extern int deactivate_all_fds(void);
extern void forward_interrupts(int pid);
extern void init_irq_signals(int on_sigstack);
extern void forward_ipi(int fd, int pid);
-extern void free_irq_later(int irq, void *dev_id);
extern int activate_ipi(int fd, int pid);
extern unsigned long irq_lock(void);
extern void irq_unlock(unsigned long flags);
-#endif
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+#endif
diff --git a/arch/um/include/kern.h b/arch/um/include/kern.h
index 1e3170768b5..7d223beccbc 100644
--- a/arch/um/include/kern.h
+++ b/arch/um/include/kern.h
@@ -17,7 +17,7 @@ extern int errno;
extern int clone(int (*proc)(void *), void *sp, int flags, void *data);
extern int sleep(int);
-extern int printf(char *fmt, ...);
+extern int printf(const char *fmt, ...);
extern char *strerror(int errnum);
extern char *ptsname(int __fd);
extern int munmap(void *, int);
@@ -35,15 +35,6 @@ extern int read(unsigned int, char *, int);
extern int pipe(int *);
extern int sched_yield(void);
extern int ptrace(int op, int pid, long addr, long data);
+
#endif
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h
index e5fec557019..8f4e46d677a 100644
--- a/arch/um/include/kern_util.h
+++ b/arch/um/include/kern_util.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
@@ -10,6 +10,19 @@
#include "sysdep/ptrace.h"
#include "sysdep/faultinfo.h"
+typedef void (*kern_hndl)(int, union uml_pt_regs *);
+
+struct kern_handlers {
+ kern_hndl relay_signal;
+ kern_hndl winch;
+ kern_hndl bus_handler;
+ kern_hndl page_fault;
+ kern_hndl sigio_handler;
+ kern_hndl timer_handler;
+};
+
+extern struct kern_handlers handlinfo_kern;
+
extern int ncpus;
extern char *linux_prog;
extern char *gdb_init;
@@ -51,8 +64,6 @@ extern void timer_handler(int sig, union uml_pt_regs *regs);
extern int set_signals(int enable);
extern void force_sigbus(void);
extern int pid_to_processor_id(int pid);
-extern void block_signals(void);
-extern void unblock_signals(void);
extern void deliver_signals(void *t);
extern int next_syscall_index(int max);
extern int next_trap_index(int max);
@@ -111,6 +122,8 @@ extern void arch_switch(void);
extern void free_irq(unsigned int, void *);
extern int um_in_interrupt(void);
extern int cpu(void);
+extern void segv_handler(int sig, union uml_pt_regs *regs);
+extern void sigio_handler(int sig, union uml_pt_regs *regs);
#endif
diff --git a/arch/um/include/line.h b/arch/um/include/line.h
index 5323d22a6ca..6f4d680dc1d 100644
--- a/arch/um/include/line.h
+++ b/arch/um/include/line.h
@@ -32,11 +32,13 @@ struct line_driver {
};
struct line {
+ struct tty_struct *tty;
char *init_str;
int init_pri;
struct list_head chan_list;
int valid;
int count;
+ int throttled;
/*This lock is actually, mostly, local to*/
spinlock_t lock;
@@ -58,14 +60,15 @@ struct line {
#define LINE_INIT(str, d) \
{ init_str : str, \
init_pri : INIT_STATIC, \
- chan_list : { }, \
valid : 1, \
+ throttled : 0, \
+ lock : SPIN_LOCK_UNLOCKED, \
buffer : NULL, \
head : NULL, \
tail : NULL, \
sigio : 0, \
- driver : d, \
- have_irq : 0 }
+ driver : d, \
+ have_irq : 0 }
struct lines {
int num;
@@ -74,11 +77,11 @@ struct lines {
#define LINES_INIT(n) { num : n }
extern void line_close(struct tty_struct *tty, struct file * filp);
-extern int line_open(struct line *lines, struct tty_struct *tty,
- struct chan_opts *opts);
-extern int line_setup(struct line *lines, unsigned int sizeof_lines, char *init,
- int all_allowed);
-extern int line_write(struct tty_struct *tty, const unsigned char *buf, int len);
+extern int line_open(struct line *lines, struct tty_struct *tty);
+extern int line_setup(struct line *lines, unsigned int sizeof_lines,
+ char *init);
+extern int line_write(struct tty_struct *tty, const unsigned char *buf,
+ int len);
extern void line_put_char(struct tty_struct *tty, unsigned char ch);
extern void line_set_termios(struct tty_struct *tty, struct termios * old);
extern int line_chars_in_buffer(struct tty_struct *tty);
@@ -87,23 +90,27 @@ extern void line_flush_chars(struct tty_struct *tty);
extern int line_write_room(struct tty_struct *tty);
extern int line_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg);
+extern void line_throttle(struct tty_struct *tty);
+extern void line_unthrottle(struct tty_struct *tty);
extern char *add_xterm_umid(char *base);
-extern int line_setup_irq(int fd, int input, int output, struct tty_struct *tty);
+extern int line_setup_irq(int fd, int input, int output, struct line *line,
+ void *data);
extern void line_close_chan(struct line *line);
-extern void line_disable(struct tty_struct *tty, int current_irq);
-extern struct tty_driver * line_register_devfs(struct lines *set,
- struct line_driver *line_driver,
+extern struct tty_driver * line_register_devfs(struct lines *set,
+ struct line_driver *line_driver,
struct tty_operations *driver,
struct line *lines,
int nlines);
-extern void lines_init(struct line *lines, int nlines);
+extern void lines_init(struct line *lines, int nlines, struct chan_opts *opts);
extern void close_lines(struct line *lines, int nlines);
-extern int line_config(struct line *lines, unsigned int sizeof_lines, char *str);
+extern int line_config(struct line *lines, unsigned int sizeof_lines,
+ char *str, struct chan_opts *opts);
extern int line_id(char **str, int *start_out, int *end_out);
extern int line_remove(struct line *lines, unsigned int sizeof_lines, int n);
-extern int line_get_config(char *dev, struct line *lines, unsigned int sizeof_lines, char *str,
+extern int line_get_config(char *dev, struct line *lines,
+ unsigned int sizeof_lines, char *str,
int size, char **error_out);
#endif
diff --git a/arch/um/include/mconsole.h b/arch/um/include/mconsole.h
index b1b512f4703..58f67d39110 100644
--- a/arch/um/include/mconsole.h
+++ b/arch/um/include/mconsole.h
@@ -32,7 +32,7 @@ struct mconsole_reply {
struct mconsole_notify {
u32 magic;
- u32 version;
+ u32 version;
enum { MCONSOLE_SOCKET, MCONSOLE_PANIC, MCONSOLE_HANG,
MCONSOLE_USER_NOTIFY } type;
u32 len;
@@ -66,7 +66,9 @@ struct mc_request
extern char mconsole_socket_name[];
extern int mconsole_unlink_socket(void);
-extern int mconsole_reply(struct mc_request *req, char *reply, int err,
+extern int mconsole_reply_len(struct mc_request *req, const char *reply,
+ int len, int err, int more);
+extern int mconsole_reply(struct mc_request *req, const char *str, int err,
int more);
extern void mconsole_version(struct mc_request *req);
@@ -84,7 +86,7 @@ extern void mconsole_proc(struct mc_request *req);
extern void mconsole_stack(struct mc_request *req);
extern int mconsole_get_request(int fd, struct mc_request *req);
-extern int mconsole_notify(char *sock_name, int type, const void *data,
+extern int mconsole_notify(char *sock_name, int type, const void *data,
int len);
extern char *mconsole_notify_socket(void);
extern void lock_notify(void);
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index 2cccfa5b8ab..dd72d66cf0e 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -9,6 +9,8 @@
#include "uml-config.h"
#include "asm/types.h"
#include "../os/include/file.h"
+#include "sysdep/ptrace.h"
+#include "kern_util.h"
#define OS_TYPE_FILE 1
#define OS_TYPE_DIR 2
@@ -213,15 +215,24 @@ extern int run_helper_thread(int (*proc)(void *), void *arg,
int stack_order);
extern int helper_wait(int pid);
-#endif
+/* umid.c */
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+extern int umid_file_name(char *name, char *buf, int len);
+extern int set_umid(char *name);
+extern char *get_umid(void);
+
+/* signal.c */
+extern void set_sigstack(void *sig_stack, int size);
+extern void remove_sigstack(void);
+extern void set_handler(int sig, void (*handler)(int), int flags, ...);
+extern int change_sig(int signal, int on);
+extern void block_signals(void);
+extern void unblock_signals(void);
+extern int get_signals(void);
+extern int set_signals(int enable);
+
+/* trap.c */
+extern void os_fill_handlinfo(struct kern_handlers h);
+extern void do_longjmp(void *p, int val);
+
+#endif
diff --git a/arch/um/include/signal_user.h b/arch/um/include/signal_user.h
deleted file mode 100644
index b075e543d86..00000000000
--- a/arch/um/include/signal_user.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2001 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SIGNAL_USER_H__
-#define __SIGNAL_USER_H__
-
-extern int signal_stack_size;
-
-extern int change_sig(int signal, int on);
-extern void set_sigstack(void *stack, int size);
-extern void set_handler(int sig, void (*handler)(int), int flags, ...);
-extern int set_signals(int enable);
-extern int get_signals(void);
-
-#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/sys-i386/kernel-offsets.c b/arch/um/include/sysdep-i386/kernel-offsets.h
index 35db8505750..82f96c57414 100644
--- a/arch/um/sys-i386/kernel-offsets.c
+++ b/arch/um/include/sysdep-i386/kernel-offsets.h
@@ -1,12 +1,9 @@
-#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/sched.h>
-#include <linux/time.h>
#include <linux/elf.h>
-#include <asm/page.h>
#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define STR(x) #x
#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : )
diff --git a/arch/um/sys-x86_64/kernel-offsets.c b/arch/um/include/sysdep-x86_64/kernel-offsets.h
index bfcb104b846..5ce93abd0b5 100644
--- a/arch/um/sys-x86_64/kernel-offsets.c
+++ b/arch/um/include/sysdep-x86_64/kernel-offsets.h
@@ -6,7 +6,7 @@
#include <asm/page.h>
#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define DEFINE_STR1(x) #x
#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " DEFINE_STR1(val) " " #val: : )
diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h
index bb505e01d99..c1dbd77b073 100644
--- a/arch/um/include/user_util.h
+++ b/arch/um/include/user_util.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
@@ -23,12 +23,7 @@ struct cpu_task {
extern struct cpu_task cpu_tasks[];
-struct signal_info {
- void (*handler)(int, union uml_pt_regs *);
- int is_irq;
-};
-
-extern struct signal_info sig_info[];
+extern void (*sig_info[])(int, union uml_pt_regs *);
extern unsigned long low_physmem;
extern unsigned long high_physmem;
@@ -64,8 +59,6 @@ extern void setup_machinename(char *machine_out);
extern void setup_hostinfo(void);
extern void do_exec(int old_pid, int new_pid);
extern void tracer_panic(char *msg, ...);
-extern char *get_umid(int only_if_set);
-extern void do_longjmp(void *p, int val);
extern int detach(int pid, int sig);
extern int attach(int pid);
extern void kill_child_dead(int pid);
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 3de9d21e36b..193cc2b7448 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -9,9 +9,9 @@ clean-files :=
obj-y = config.o exec_kern.o exitcode.o \
init_task.o irq.o irq_user.o ksyms.o mem.o physmem.o \
process_kern.o ptrace.o reboot.o resource.o sigio_user.o sigio_kern.o \
- signal_kern.o signal_user.o smp.o syscall_kern.o sysrq.o time.o \
- time_kern.o tlb.o trap_kern.o trap_user.o uaccess.o um_arch.o \
- umid.o user_util.o
+ signal_kern.o smp.o syscall_kern.o sysrq.o time.o \
+ time_kern.o tlb.o trap_kern.o uaccess.o um_arch.o umid.o \
+ user_util.o
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
obj-$(CONFIG_GPROF) += gprof_syms.o
@@ -24,7 +24,7 @@ obj-$(CONFIG_MODE_SKAS) += skas/
user-objs-$(CONFIG_TTY_LOG) += tty_log.o
-USER_OBJS := $(user-objs-y) config.o time.o tty_log.o umid.o user_util.o
+USER_OBJS := $(user-objs-y) config.o time.o tty_log.o user_util.o
include arch/um/scripts/Makefile.rules
diff --git a/arch/um/kernel/asm-offsets.c b/arch/um/kernel/asm-offsets.c
index c13a64a288f..91ea538e161 100644
--- a/arch/um/kernel/asm-offsets.c
+++ b/arch/um/kernel/asm-offsets.c
@@ -1 +1 @@
-/* Dummy file to make kbuild happy - unused! */
+#include "sysdep/kernel-offsets.h"
diff --git a/arch/um/kernel/irq_user.c b/arch/um/kernel/irq_user.c
index c3ccaf24f3e..0e32f5f4a88 100644
--- a/arch/um/kernel/irq_user.c
+++ b/arch/um/kernel/irq_user.c
@@ -15,7 +15,6 @@
#include "kern_util.h"
#include "user.h"
#include "process.h"
-#include "signal_user.h"
#include "sigio.h"
#include "irq_user.h"
#include "os.h"
@@ -29,7 +28,6 @@ struct irq_fd {
int pid;
int events;
int current_events;
- int freed;
};
static struct irq_fd *active_fds = NULL;
@@ -41,9 +39,11 @@ static int pollfds_size = 0;
extern int io_count, intr_count;
+extern void free_irqs(void);
+
void sigio_handler(int sig, union uml_pt_regs *regs)
{
- struct irq_fd *irq_fd, *next;
+ struct irq_fd *irq_fd;
int i, n;
if(smp_sigio_handler()) return;
@@ -66,29 +66,15 @@ void sigio_handler(int sig, union uml_pt_regs *regs)
irq_fd = irq_fd->next;
}
- for(irq_fd = active_fds; irq_fd != NULL; irq_fd = next){
- next = irq_fd->next;
+ for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
if(irq_fd->current_events != 0){
irq_fd->current_events = 0;
do_IRQ(irq_fd->irq, regs);
-
- /* This is here because the next irq may be
- * freed in the handler. If a console goes
- * away, both the read and write irqs will be
- * freed. After do_IRQ, ->next will point to
- * a good IRQ.
- * Irqs can't be freed inside their handlers,
- * so the next best thing is to have them
- * marked as needing freeing, so that they
- * can be freed here.
- */
- next = irq_fd->next;
- if(irq_fd->freed){
- free_irq(irq_fd->irq, irq_fd->id);
- }
}
}
}
+
+ free_irqs();
}
int activate_ipi(int fd, int pid)
@@ -136,8 +122,7 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
.irq = irq,
.pid = pid,
.events = events,
- .current_events = 0,
- .freed = 0 } );
+ .current_events = 0 } );
/* Critical section - locked by a spinlock because this stuff can
* be changed from interrupt handlers. The stuff above is done
@@ -313,26 +298,6 @@ static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
return(irq);
}
-void free_irq_later(int irq, void *dev_id)
-{
- struct irq_fd *irq_fd;
- unsigned long flags;
-
- flags = irq_lock();
- for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
- if((irq_fd->irq == irq) && (irq_fd->id == dev_id))
- break;
- }
- if(irq_fd == NULL){
- printk("free_irq_later found no irq, irq = %d, "
- "dev_id = 0x%p\n", irq, dev_id);
- goto out;
- }
- irq_fd->freed = 1;
- out:
- irq_unlock(flags);
-}
-
void reactivate_fd(int fd, int irqnum)
{
struct irq_fd *irq;
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c
index 34b54a3e213..7f13b85d265 100644
--- a/arch/um/kernel/process_kern.c
+++ b/arch/um/kernel/process_kern.c
@@ -36,7 +36,6 @@
#include "kern_util.h"
#include "kern.h"
#include "signal_kern.h"
-#include "signal_user.h"
#include "init.h"
#include "irq_user.h"
#include "mem_user.h"
@@ -108,7 +107,7 @@ void set_current(void *t)
{
struct task_struct *task = t;
- cpu_tasks[task->thread_info->cpu] = ((struct cpu_task)
+ cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
{ external_pid(task), task });
}
@@ -324,10 +323,6 @@ int user_context(unsigned long sp)
return(stack != (unsigned long) current_thread);
}
-extern void remove_umid_dir(void);
-
-__uml_exitcall(remove_umid_dir);
-
extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
void do_uml_exitcalls(void)
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index a637e885c58..6f1a3a28811 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -12,6 +12,8 @@
#include "mode.h"
#include "choose-mode.h"
+void (*pm_power_off)(void);
+
#ifdef CONFIG_SMP
static void kill_idlers(int me)
{
diff --git a/arch/um/kernel/sigio_user.c b/arch/um/kernel/sigio_user.c
index 48b1f644b9a..62e5cfdf218 100644
--- a/arch/um/kernel/sigio_user.c
+++ b/arch/um/kernel/sigio_user.c
@@ -216,6 +216,8 @@ static int write_sigio_thread(void *unused)
"err = %d\n", -n);
}
}
+
+ return 0;
}
static int need_poll(int n)
diff --git a/arch/um/kernel/signal_kern.c b/arch/um/kernel/signal_kern.c
index 03618bd13d5..7b0e0e81c16 100644
--- a/arch/um/kernel/signal_kern.c
+++ b/arch/um/kernel/signal_kern.c
@@ -22,7 +22,6 @@
#include "asm/ucontext.h"
#include "kern_util.h"
#include "signal_kern.h"
-#include "signal_user.h"
#include "kern.h"
#include "frame_kern.h"
#include "sigcontext.h"
diff --git a/arch/um/kernel/signal_user.c b/arch/um/kernel/signal_user.c
deleted file mode 100644
index 62f457835fb..00000000000
--- a/arch/um/kernel/signal_user.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <signal.h>
-#include <errno.h>
-#include <stdarg.h>
-#include <string.h>
-#include <sys/mman.h>
-#include "user_util.h"
-#include "kern_util.h"
-#include "user.h"
-#include "signal_user.h"
-#include "signal_kern.h"
-#include "sysdep/sigcontext.h"
-#include "sigcontext.h"
-
-void set_sigstack(void *sig_stack, int size)
-{
- stack_t stack = ((stack_t) { .ss_flags = 0,
- .ss_sp = (__ptr_t) sig_stack,
- .ss_size = size - sizeof(void *) });
-
- if(sigaltstack(&stack, NULL) != 0)
- panic("enabling signal stack failed, errno = %d\n", errno);
-}
-
-void set_handler(int sig, void (*handler)(int), int flags, ...)
-{
- struct sigaction action;
- va_list ap;
- int mask;
-
- va_start(ap, flags);
- action.sa_handler = handler;
- sigemptyset(&action.sa_mask);
- while((mask = va_arg(ap, int)) != -1){
- sigaddset(&action.sa_mask, mask);
- }
- va_end(ap);
- action.sa_flags = flags;
- action.sa_restorer = NULL;
- if(sigaction(sig, &action, NULL) < 0)
- panic("sigaction failed");
-}
-
-int change_sig(int signal, int on)
-{
- sigset_t sigset, old;
-
- sigemptyset(&sigset);
- sigaddset(&sigset, signal);
- sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, &old);
- return(!sigismember(&old, signal));
-}
-
-/* Both here and in set/get_signal we don't touch SIGPROF, because we must not
- * disable profiling; it's safe because the profiling code does not interact
- * with the kernel code at all.*/
-
-static void change_signals(int type)
-{
- sigset_t mask;
-
- sigemptyset(&mask);
- sigaddset(&mask, SIGVTALRM);
- sigaddset(&mask, SIGALRM);
- sigaddset(&mask, SIGIO);
- if(sigprocmask(type, &mask, NULL) < 0)
- panic("Failed to change signal mask - errno = %d", errno);
-}
-
-void block_signals(void)
-{
- change_signals(SIG_BLOCK);
-}
-
-void unblock_signals(void)
-{
- change_signals(SIG_UNBLOCK);
-}
-
-/* These are the asynchronous signals. SIGVTALRM and SIGARLM are handled
- * together under SIGVTALRM_BIT. SIGPROF is excluded because we want to
- * be able to profile all of UML, not just the non-critical sections. If
- * profiling is not thread-safe, then that is not my problem. We can disable
- * profiling when SMP is enabled in that case.
- */
-#define SIGIO_BIT 0
-#define SIGVTALRM_BIT 1
-
-static int enable_mask(sigset_t *mask)
-{
- int sigs;
-
- sigs = sigismember(mask, SIGIO) ? 0 : 1 << SIGIO_BIT;
- sigs |= sigismember(mask, SIGVTALRM) ? 0 : 1 << SIGVTALRM_BIT;
- sigs |= sigismember(mask, SIGALRM) ? 0 : 1 << SIGVTALRM_BIT;
- return(sigs);
-}
-
-int get_signals(void)
-{
- sigset_t mask;
-
- if(sigprocmask(SIG_SETMASK, NULL, &mask) < 0)
- panic("Failed to get signal mask");
- return(enable_mask(&mask));
-}
-
-int set_signals(int enable)
-{
- sigset_t mask;
- int ret;
-
- sigemptyset(&mask);
- if(enable & (1 << SIGIO_BIT))
- sigaddset(&mask, SIGIO);
- if(enable & (1 << SIGVTALRM_BIT)){
- sigaddset(&mask, SIGVTALRM);
- sigaddset(&mask, SIGALRM);
- }
-
- /* This is safe - sigprocmask is guaranteed to copy locally the
- * value of new_set, do his work and then, at the end, write to
- * old_set.
- */
- if(sigprocmask(SIG_UNBLOCK, &mask, &mask) < 0)
- panic("Failed to enable signals");
- ret = enable_mask(&mask);
- sigemptyset(&mask);
- if((enable & (1 << SIGIO_BIT)) == 0)
- sigaddset(&mask, SIGIO);
- if((enable & (1 << SIGVTALRM_BIT)) == 0){
- sigaddset(&mask, SIGVTALRM);
- sigaddset(&mask, SIGALRM);
- }
- if(sigprocmask(SIG_BLOCK, &mask, NULL) < 0)
- panic("Failed to block signals");
-
- return(ret);
-}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/kernel/skas/Makefile b/arch/um/kernel/skas/Makefile
index 8de471b59c1..7a9fc16d71d 100644
--- a/arch/um/kernel/skas/Makefile
+++ b/arch/um/kernel/skas/Makefile
@@ -4,7 +4,7 @@
#
obj-y := clone.o exec_kern.o mem.o mem_user.o mmu.o process.o process_kern.o \
- syscall.o tlb.o trap_user.o uaccess.o
+ syscall.o tlb.o uaccess.o
USER_OBJS := process.o clone.o
diff --git a/arch/um/kernel/skas/include/skas.h b/arch/um/kernel/skas/include/skas.h
index daa2f85b684..01d489de398 100644
--- a/arch/um/kernel/skas/include/skas.h
+++ b/arch/um/kernel/skas/include/skas.h
@@ -22,7 +22,6 @@ extern int start_idle_thread(void *stack, void *switch_buf_ptr,
extern int user_thread(unsigned long stack, int flags);
extern void userspace(union uml_pt_regs *regs);
extern void new_thread_proc(void *stack, void (*handler)(int sig));
-extern void remove_sigstack(void);
extern void new_thread_handler(int sig);
extern void handle_syscall(union uml_pt_regs *regs);
extern int map(struct mm_id * mm_idp, unsigned long virt,
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 599d679bd4f..3b3955d8440 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -31,7 +31,6 @@
#include "proc_mm.h"
#include "skas_ptrace.h"
#include "chan_user.h"
-#include "signal_user.h"
#include "registers.h"
#include "mem.h"
#include "uml-config.h"
@@ -69,7 +68,7 @@ void wait_stub_done(int pid, int sig, char * fname)
if((n < 0) || !WIFSTOPPED(status) ||
(WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status) != SIGTRAP)){
- unsigned long regs[FRAME_SIZE];
+ unsigned long regs[HOST_FRAME_SIZE];
if(ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
printk("Failed to get registers from stub, "
"errno = %d\n", errno);
@@ -77,7 +76,7 @@ void wait_stub_done(int pid, int sig, char * fname)
int i;
printk("Stub registers -\n");
- for(i = 0; i < FRAME_SIZE; i++)
+ for(i = 0; i < HOST_FRAME_SIZE; i++)
printk("\t%d - %lx\n", i, regs[i]);
}
panic("%s : failed to wait for SIGUSR1/SIGTRAP, "
@@ -514,16 +513,6 @@ int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr)
siglongjmp(**switch_buf, 1);
}
-void remove_sigstack(void)
-{
- stack_t stack = ((stack_t) { .ss_flags = SS_DISABLE,
- .ss_sp = NULL,
- .ss_size = 0 });
-
- if(sigaltstack(&stack, NULL) != 0)
- panic("disabling signal stack failed, errno = %d\n", errno);
-}
-
void initial_thread_cb_skas(void (*proc)(void *), void *arg)
{
sigjmp_buf here;
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
index 9c990253966..dc41c6dc2f3 100644
--- a/arch/um/kernel/skas/process_kern.c
+++ b/arch/um/kernel/skas/process_kern.c
@@ -14,7 +14,6 @@
#include "asm/atomic.h"
#include "kern_util.h"
#include "time_user.h"
-#include "signal_user.h"
#include "skas.h"
#include "os.h"
#include "user_util.h"
@@ -119,7 +118,7 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
handler = new_thread_handler;
}
- new_thread(p->thread_info, &p->thread.mode.skas.switch_buf,
+ new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf,
&p->thread.mode.skas.fork_buf, handler);
return(0);
}
@@ -186,7 +185,7 @@ int start_uml_skas(void)
init_task.thread.request.u.thread.proc = start_kernel_proc;
init_task.thread.request.u.thread.arg = NULL;
- return(start_idle_thread(init_task.thread_info,
+ return(start_idle_thread(task_stack_page(&init_task),
&init_task.thread.mode.skas.switch_buf,
&init_task.thread.mode.skas.fork_buf));
}
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index c40b611e3d9..8fa2ae7f302 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -14,9 +14,9 @@
#include "kern_util.h"
#include "user.h"
#include "process.h"
-#include "signal_user.h"
#include "time_user.h"
#include "kern_constants.h"
+#include "os.h"
/* XXX This really needs to be declared and initialized in a kernel file since
* it's in <linux/time.h>
@@ -99,7 +99,8 @@ void uml_idle_timer(void)
set_interval(ITIMER_REAL);
}
-extern int do_posix_clock_monotonic_gettime(struct timespec *tp);
+extern void ktime_get_ts(struct timespec *ts);
+#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
void time_init(void)
{
@@ -114,8 +115,8 @@ void time_init(void)
wall_to_monotonic.tv_nsec = -now.tv_nsec;
}
-/* Declared in linux/time.h, which can't be included here */
-extern void clock_was_set(void);
+/* Defined in linux/ktimer.h, which can't be included here */
+#define clock_was_set() do { } while (0)
void do_gettimeofday(struct timeval *tv)
{
diff --git a/arch/um/kernel/trap_kern.c b/arch/um/kernel/trap_kern.c
index 0d4c10a7360..d56046c2aba 100644
--- a/arch/um/kernel/trap_kern.c
+++ b/arch/um/kernel/trap_kern.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
@@ -26,9 +26,13 @@
#include "mconsole_kern.h"
#include "mem.h"
#include "mem_kern.h"
+#include "sysdep/sigcontext.h"
+#include "sysdep/ptrace.h"
+#include "os.h"
#ifdef CONFIG_MODE_SKAS
#include "skas.h"
#endif
+#include "os.h"
/* Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by segv(). */
int handle_page_fault(unsigned long address, unsigned long ip,
@@ -125,6 +129,25 @@ out_of_memory:
goto out;
}
+void segv_handler(int sig, union uml_pt_regs *regs)
+{
+ struct faultinfo * fi = UPT_FAULTINFO(regs);
+
+ if(UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)){
+ bad_segv(*fi, UPT_IP(regs));
+ return;
+ }
+ segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs);
+}
+
+struct kern_handlers handlinfo_kern = {
+ .relay_signal = relay_signal,
+ .winch = winch,
+ .bus_handler = relay_signal,
+ .page_fault = segv_handler,
+ .sigio_handler = sigio_handler,
+ .timer_handler = timer_handler
+};
/*
* We give a *copy* of the faultinfo in the regs to segv.
* This must be done, since nesting SEGVs could overwrite
diff --git a/arch/um/kernel/trap_user.c b/arch/um/kernel/trap_user.c
deleted file mode 100644
index e9ccd6b8d3c..00000000000
--- a/arch/um/kernel/trap_user.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#include <stdlib.h>
-#include <errno.h>
-#include <setjmp.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/wait.h>
-#include <asm/page.h>
-#include <asm/unistd.h>
-#include <asm/ptrace.h>
-#include "init.h"
-#include "sysdep/ptrace.h"
-#include "sigcontext.h"
-#include "sysdep/sigcontext.h"
-#include "irq_user.h"
-#include "signal_user.h"
-#include "time_user.h"
-#include "task.h"
-#include "mode.h"
-#include "choose-mode.h"
-#include "kern_util.h"
-#include "user_util.h"
-#include "os.h"
-
-void kill_child_dead(int pid)
-{
- kill(pid, SIGKILL);
- kill(pid, SIGCONT);
- do {
- int n;
- CATCH_EINTR(n = waitpid(pid, NULL, 0));
- if (n > 0)
- kill(pid, SIGCONT);
- else
- break;
- } while(1);
-}
-
-void segv_handler(int sig, union uml_pt_regs *regs)
-{
- struct faultinfo * fi = UPT_FAULTINFO(regs);
-
- if(UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)){
- bad_segv(*fi, UPT_IP(regs));
- return;
- }
- segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs);
-}
-
-void usr2_handler(int sig, union uml_pt_regs *regs)
-{
- CHOOSE_MODE(syscall_handler_tt(sig, regs), (void) 0);
-}
-
-struct signal_info sig_info[] = {
- [ SIGTRAP ] { .handler = relay_signal,
- .is_irq = 0 },
- [ SIGFPE ] { .handler = relay_signal,
- .is_irq = 0 },
- [ SIGILL ] { .handler = relay_signal,
- .is_irq = 0 },
- [ SIGWINCH ] { .handler = winch,
- .is_irq = 1 },
- [ SIGBUS ] { .handler = bus_handler,
- .is_irq = 0 },
- [ SIGSEGV] { .handler = segv_handler,
- .is_irq = 0 },
- [ SIGIO ] { .handler = sigio_handler,
- .is_irq = 1 },
- [ SIGVTALRM ] { .handler = timer_handler,
- .is_irq = 1 },
- [ SIGALRM ] { .handler = timer_handler,
- .is_irq = 1 },
- [ SIGUSR2 ] { .handler = usr2_handler,
- .is_irq = 0 },
-};
-
-void do_longjmp(void *b, int val)
-{
- sigjmp_buf *buf = b;
-
- siglongjmp(*buf, val);
-}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/kernel/tt/exec_kern.c b/arch/um/kernel/tt/exec_kern.c
index 065b504a653..8f40e483873 100644
--- a/arch/um/kernel/tt/exec_kern.c
+++ b/arch/um/kernel/tt/exec_kern.c
@@ -14,7 +14,6 @@
#include "kern_util.h"
#include "irq_user.h"
#include "time_user.h"
-#include "signal_user.h"
#include "mem_user.h"
#include "os.h"
#include "tlb.h"
@@ -40,7 +39,7 @@ void flush_thread_tt(void)
do_exit(SIGKILL);
}
- new_pid = start_fork_tramp(current->thread_info, stack, 0, exec_tramp);
+ new_pid = start_fork_tramp(task_stack_page(current), stack, 0, exec_tramp);
if(new_pid < 0){
printk(KERN_ERR
"flush_thread : new thread failed, errno = %d\n",
diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c
index cfaa373a6e7..62535303aa2 100644
--- a/arch/um/kernel/tt/process_kern.c
+++ b/arch/um/kernel/tt/process_kern.c
@@ -13,7 +13,6 @@
#include "asm/ptrace.h"
#include "asm/tlbflush.h"
#include "irq_user.h"
-#include "signal_user.h"
#include "kern_util.h"
#include "user_util.h"
#include "os.h"
@@ -37,7 +36,7 @@ void switch_to_tt(void *prev, void *next)
from = prev;
to = next;
- cpu = from->thread_info->cpu;
+ cpu = task_thread_info(from)->cpu;
if(cpu == 0)
forward_interrupts(to->thread.mode.tt.extern_pid);
#ifdef CONFIG_SMP
@@ -254,7 +253,7 @@ int copy_thread_tt(int nr, unsigned long clone_flags, unsigned long sp,
clone_flags &= CLONE_VM;
p->thread.temp_stack = stack;
- new_pid = start_fork_tramp(p->thread_info, stack, clone_flags, tramp);
+ new_pid = start_fork_tramp(task_stack_page(p), stack, clone_flags, tramp);
if(new_pid < 0){
printk(KERN_ERR "copy_thread : clone failed - errno = %d\n",
-new_pid);
@@ -344,7 +343,7 @@ int do_proc_op(void *t, int proc_id)
pid = thread->request.u.exec.pid;
do_exec(thread->mode.tt.extern_pid, pid);
thread->mode.tt.extern_pid = pid;
- cpu_tasks[task->thread_info->cpu].pid = pid;
+ cpu_tasks[task_thread_info(task)->cpu].pid = pid;
break;
case OP_FORK:
attach_process(thread->request.u.fork.pid);
@@ -426,7 +425,7 @@ int start_uml_tt(void)
int pages;
pages = (1 << CONFIG_KERNEL_STACK_ORDER);
- sp = (void *) ((unsigned long) init_task.thread_info) +
+ sp = task_stack_page(&init_task) +
pages * PAGE_SIZE - sizeof(unsigned long);
return(tracer(start_kernel_proc, sp));
}
diff --git a/arch/um/kernel/tt/tracer.c b/arch/um/kernel/tt/tracer.c
index d11e7399d7a..71daae24e48 100644
--- a/arch/um/kernel/tt/tracer.c
+++ b/arch/um/kernel/tt/tracer.c
@@ -19,7 +19,6 @@
#include "sigcontext.h"
#include "sysdep/sigcontext.h"
#include "os.h"
-#include "signal_user.h"
#include "user_util.h"
#include "mem_user.h"
#include "process.h"
diff --git a/arch/um/kernel/tt/trap_user.c b/arch/um/kernel/tt/trap_user.c
index fc108615bea..a414c529fbc 100644
--- a/arch/um/kernel/tt/trap_user.c
+++ b/arch/um/kernel/tt/trap_user.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
@@ -8,18 +8,18 @@
#include <signal.h>
#include "sysdep/ptrace.h"
#include "sysdep/sigcontext.h"
-#include "signal_user.h"
#include "user_util.h"
#include "kern_util.h"
#include "task.h"
#include "tt.h"
+#include "os.h"
void sig_handler_common_tt(int sig, void *sc_ptr)
{
struct sigcontext *sc = sc_ptr;
struct tt_regs save_regs, *r;
- struct signal_info *info;
int save_errno = errno, is_user;
+ void (*handler)(int, union uml_pt_regs *);
/* This is done because to allow SIGSEGV to be delivered inside a SEGV
* handler. This can happen in copy_user, and if SEGV is disabled,
@@ -40,10 +40,14 @@ void sig_handler_common_tt(int sig, void *sc_ptr)
if(sig != SIGUSR2)
r->syscall = -1;
- info = &sig_info[sig];
- if(!info->is_irq) unblock_signals();
+ handler = sig_info[sig];
+
+ /* unblock SIGALRM, SIGVTALRM, SIGIO if sig isn't IRQ signal */
+ if (sig != SIGIO && sig != SIGWINCH &&
+ sig != SIGVTALRM && sig != SIGALRM)
+ unblock_signals();
- (*info->handler)(sig, (union uml_pt_regs *) r);
+ handler(sig, (union uml_pt_regs *) r);
if(is_user){
interrupt_end();
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 142a9493912..e2d3ca445ef 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (C) 2000, 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
@@ -146,8 +146,8 @@ void set_cmdline(char *cmd)
if(CHOOSE_MODE(honeypot, 0)) return;
- umid = get_umid(1);
- if(umid != NULL){
+ umid = get_umid();
+ if(*umid != '\0'){
snprintf(argv1_begin,
(argv1_end - argv1_begin) * sizeof(*ptr),
"(%s) ", umid);
@@ -243,10 +243,6 @@ static int __init mode_tt_setup(char *line, int *add)
return(0);
}
-#else
-
-#error Either CONFIG_MODE_TT or CONFIG_MODE_SKAS must be enabled
-
#endif
#endif
#endif
@@ -363,6 +359,11 @@ int linux_main(int argc, char **argv)
uml_start = CHOOSE_MODE_PROC(set_task_sizes_tt, set_task_sizes_skas, 0,
&host_task_size, &task_size);
+ /*
+ * Setting up handlers to 'sig_info' struct
+ */
+ os_fill_handlinfo(handlinfo_kern);
+
brk_start = (unsigned long) sbrk(0);
CHOOSE_MODE_PROC(before_mem_tt, before_mem_skas, brk_start);
/* Increase physical memory size for exec-shield users
diff --git a/arch/um/kernel/umid.c b/arch/um/kernel/umid.c
index 0b21d59ba0c..4eaee823bfd 100644
--- a/arch/um/kernel/umid.c
+++ b/arch/um/kernel/umid.c
@@ -3,61 +3,30 @@
* Licensed under the GPL
*/
-#include <stdio.h>
-#include <unistd.h>
-#include <errno.h>
-#include <string.h>
-#include <stdlib.h>
-#include <dirent.h>
-#include <signal.h>
-#include <sys/stat.h>
-#include <sys/param.h>
-#include "user.h"
-#include "umid.h"
+#include "asm/errno.h"
#include "init.h"
#include "os.h"
-#include "user_util.h"
-#include "choose-mode.h"
+#include "kern.h"
+#include "linux/kernel.h"
-#define UMID_LEN 64
-#define UML_DIR "~/.uml/"
-
-/* Changed by set_umid and make_umid, which are run early in boot */
-static char umid[UMID_LEN] = { 0 };
-
-/* Changed by set_uml_dir and make_uml_dir, which are run early in boot */
-static char *uml_dir = UML_DIR;
-
-/* Changed by set_umid */
-static int umid_is_random = 1;
+/* Changed by set_umid_arg */
static int umid_inited = 0;
-/* Have we created the files? Should we remove them? */
-static int umid_owned = 0;
-static int make_umid(int (*printer)(const char *fmt, ...));
-
-static int __init set_umid(char *name, int is_random,
- int (*printer)(const char *fmt, ...))
+static int __init set_umid_arg(char *name, int *add)
{
- if(umid_inited){
- (*printer)("Unique machine name can't be set twice\n");
- return(-1);
- }
+ int err;
- if(strlen(name) > UMID_LEN - 1)
- (*printer)("Unique machine name is being truncated to %d "
- "characters\n", UMID_LEN);
- strlcpy(umid, name, sizeof(umid));
+ if(umid_inited)
+ return 0;
- umid_is_random = is_random;
- umid_inited = 1;
- return 0;
-}
-
-static int __init set_umid_arg(char *name, int *add)
-{
*add = 0;
- return(set_umid(name, 0, printf));
+ err = set_umid(name);
+ if(err == -EEXIST)
+ printf("umid '%s' already in use\n", name);
+ else if(!err)
+ umid_inited = 1;
+
+ return 0;
}
__uml_setup("umid=", set_umid_arg,
@@ -66,265 +35,3 @@ __uml_setup("umid=", set_umid_arg,
" is used for naming the pid file and management console socket.\n\n"
);
-int __init umid_file_name(char *name, char *buf, int len)
-{
- int n;
-
- if(!umid_inited && make_umid(printk)) return(-1);
-
- n = strlen(uml_dir) + strlen(umid) + strlen(name) + 1;
- if(n > len){
- printk("umid_file_name : buffer too short\n");
- return(-1);
- }
-
- sprintf(buf, "%s%s/%s", uml_dir, umid, name);
- return(0);
-}
-
-extern int tracing_pid;
-
-static void __init create_pid_file(void)
-{
- char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
- char pid[sizeof("nnnnn\0")];
- int fd, n;
-
- if(umid_file_name("pid", file, sizeof(file)))
- return;
-
- fd = os_open_file(file, of_create(of_excl(of_rdwr(OPENFLAGS()))),
- 0644);
- if(fd < 0){
- printf("Open of machine pid file \"%s\" failed: %s\n",
- file, strerror(-fd));
- return;
- }
-
- sprintf(pid, "%d\n", os_getpid());
- n = os_write_file(fd, pid, strlen(pid));
- if(n != strlen(pid))
- printf("Write of pid file failed - err = %d\n", -n);
- os_close_file(fd);
-}
-
-static int actually_do_remove(char *dir)
-{
- DIR *directory;
- struct dirent *ent;
- int len;
- char file[256];
-
- directory = opendir(dir);
- if(directory == NULL){
- printk("actually_do_remove : couldn't open directory '%s', "
- "errno = %d\n", dir, errno);
- return(1);
- }
- while((ent = readdir(directory)) != NULL){
- if(!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
- continue;
- len = strlen(dir) + sizeof("/") + strlen(ent->d_name) + 1;
- if(len > sizeof(file)){
- printk("Not deleting '%s' from '%s' - name too long\n",
- ent->d_name, dir);
- continue;
- }
- sprintf(file, "%s/%s", dir, ent->d_name);
- if(unlink(file) < 0){
- printk("actually_do_remove : couldn't remove '%s' "
- "from '%s', errno = %d\n", ent->d_name, dir,
- errno);
- return(1);
- }
- }
- if(rmdir(dir) < 0){
- printk("actually_do_remove : couldn't rmdir '%s', "
- "errno = %d\n", dir, errno);
- return(1);
- }
- return(0);
-}
-
-void remove_umid_dir(void)
-{
- char dir[strlen(uml_dir) + UMID_LEN + 1];
- if (!umid_owned)
- return;
-
- sprintf(dir, "%s%s", uml_dir, umid);
- actually_do_remove(dir);
-}
-
-char *get_umid(int only_if_set)
-{
- if(only_if_set && umid_is_random)
- return NULL;
- return umid;
-}
-
-static int not_dead_yet(char *dir)
-{
- char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
- char pid[sizeof("nnnnn\0")], *end;
- int dead, fd, p, n;
-
- sprintf(file, "%s/pid", dir);
- dead = 0;
- fd = os_open_file(file, of_read(OPENFLAGS()), 0);
- if(fd < 0){
- if(fd != -ENOENT){
- printk("not_dead_yet : couldn't open pid file '%s', "
- "err = %d\n", file, -fd);
- return(1);
- }
- dead = 1;
- }
- if(fd > 0){
- n = os_read_file(fd, pid, sizeof(pid));
- if(n < 0){
- printk("not_dead_yet : couldn't read pid file '%s', "
- "err = %d\n", file, -n);
- return(1);
- }
- p = strtoul(pid, &end, 0);
- if(end == pid){
- printk("not_dead_yet : couldn't parse pid file '%s', "
- "errno = %d\n", file, errno);
- dead = 1;
- }
- if(((kill(p, 0) < 0) && (errno == ESRCH)) ||
- (p == CHOOSE_MODE(tracing_pid, os_getpid())))
- dead = 1;
- }
- if(!dead)
- return(1);
- return(actually_do_remove(dir));
-}
-
-static int __init set_uml_dir(char *name, int *add)
-{
- if((strlen(name) > 0) && (name[strlen(name) - 1] != '/')){
- uml_dir = malloc(strlen(name) + 2);
- if(uml_dir == NULL){
- printf("Failed to malloc uml_dir - error = %d\n",
- errno);
- uml_dir = name;
- /* Return 0 here because do_initcalls doesn't look at
- * the return value.
- */
- return(0);
- }
- sprintf(uml_dir, "%s/", name);
- }
- else uml_dir = name;
- return(0);
-}
-
-static int __init make_uml_dir(void)
-{
- char dir[MAXPATHLEN + 1] = { '\0' };
- int len;
-
- if(*uml_dir == '~'){
- char *home = getenv("HOME");
-
- if(home == NULL){
- printf("make_uml_dir : no value in environment for "
- "$HOME\n");
- exit(1);
- }
- strlcpy(dir, home, sizeof(dir));
- uml_dir++;
- }
- strlcat(dir, uml_dir, sizeof(dir));
- len = strlen(dir);
- if (len > 0 && dir[len - 1] != '/')
- strlcat(dir, "/", sizeof(dir));
-
- uml_dir = malloc(strlen(dir) + 1);
- if (uml_dir == NULL) {
- printf("make_uml_dir : malloc failed, errno = %d\n", errno);
- exit(1);
- }
- strcpy(uml_dir, dir);
-
- if((mkdir(uml_dir, 0777) < 0) && (errno != EEXIST)){
- printf("Failed to mkdir %s: %s\n", uml_dir, strerror(errno));
- return(-1);
- }
- return 0;
-}
-
-static int __init make_umid(int (*printer)(const char *fmt, ...))
-{
- int fd, err;
- char tmp[strlen(uml_dir) + UMID_LEN + 1];
-
- strlcpy(tmp, uml_dir, sizeof(tmp));
-
- if(!umid_inited){
- strcat(tmp, "XXXXXX");
- fd = mkstemp(tmp);
- if(fd < 0){
- (*printer)("make_umid - mkstemp(%s) failed: %s\n",
- tmp,strerror(errno));
- return(1);
- }
-
- os_close_file(fd);
- /* There's a nice tiny little race between this unlink and
- * the mkdir below. It'd be nice if there were a mkstemp
- * for directories.
- */
- unlink(tmp);
- set_umid(&tmp[strlen(uml_dir)], 1, printer);
- }
-
- sprintf(tmp, "%s%s", uml_dir, umid);
-
- err = mkdir(tmp, 0777);
- if(err < 0){
- if(errno == EEXIST){
- if(not_dead_yet(tmp)){
- (*printer)("umid '%s' is in use\n", umid);
- umid_owned = 0;
- return(-1);
- }
- err = mkdir(tmp, 0777);
- }
- }
- if(err < 0){
- (*printer)("Failed to create %s - errno = %d\n", umid, errno);
- return(-1);
- }
-
- umid_owned = 1;
- return 0;
-}
-
-__uml_setup("uml_dir=", set_uml_dir,
-"uml_dir=<directory>\n"
-" The location to place the pid and umid files.\n\n"
-);
-
-static int __init make_umid_setup(void)
-{
- /* one function with the ordering we need ... */
- make_uml_dir();
- make_umid(printf);
- create_pid_file();
- return 0;
-}
-__uml_postsetup(make_umid_setup);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index b83ac8e21c3..40c7d6b1df6 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -4,11 +4,13 @@
#
obj-y = aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \
- start_up.o time.o tt.o tty.o uaccess.o user_syms.o drivers/ \
- sys-$(SUBARCH)/
+ start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o user_syms.o \
+ drivers/ sys-$(SUBARCH)/
+
+obj-$(CONFIG_MODE_SKAS) += skas/
USER_OBJS := aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \
- start_up.o time.o tt.o tty.o uaccess.o
+ start_up.o time.o trap.o tt.o tty.o uaccess.o umid.o
elf_aux.o: $(ARCH_DIR)/kernel-offsets.h
CFLAGS_elf_aux.o += -I$(objtree)/arch/um
diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c
index ffa759addd3..f897140cc4a 100644
--- a/arch/um/os-Linux/aio.c
+++ b/arch/um/os-Linux/aio.c
@@ -16,12 +16,12 @@
#include "mode.h"
struct aio_thread_req {
- enum aio_type type;
- int io_fd;
- unsigned long long offset;
- char *buf;
- int len;
- struct aio_context *aio;
+ enum aio_type type;
+ int io_fd;
+ unsigned long long offset;
+ char *buf;
+ int len;
+ struct aio_context *aio;
};
static int aio_req_fd_r = -1;
@@ -38,18 +38,18 @@ static int aio_req_fd_w = -1;
static long io_setup(int n, aio_context_t *ctxp)
{
- return syscall(__NR_io_setup, n, ctxp);
+ return syscall(__NR_io_setup, n, ctxp);
}
static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp)
{
- return syscall(__NR_io_submit, ctx, nr, iocbpp);
+ return syscall(__NR_io_submit, ctx, nr, iocbpp);
}
static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
- struct io_event *events, struct timespec *timeout)
+ struct io_event *events, struct timespec *timeout)
{
- return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout);
+ return syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout);
}
#endif
@@ -66,243 +66,245 @@ static long io_getevents(aio_context_t ctx_id, long min_nr, long nr,
*/
static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf,
- int len, unsigned long long offset, struct aio_context *aio)
+ int len, unsigned long long offset, struct aio_context *aio)
{
- struct iocb iocb, *iocbp = &iocb;
- char c;
- int err;
-
- iocb = ((struct iocb) { .aio_data = (unsigned long) aio,
- .aio_reqprio = 0,
- .aio_fildes = fd,
- .aio_buf = (unsigned long) buf,
- .aio_nbytes = len,
- .aio_offset = offset,
- .aio_reserved1 = 0,
- .aio_reserved2 = 0,
- .aio_reserved3 = 0 });
-
- switch(type){
- case AIO_READ:
- iocb.aio_lio_opcode = IOCB_CMD_PREAD;
- err = io_submit(ctx, 1, &iocbp);
- break;
- case AIO_WRITE:
- iocb.aio_lio_opcode = IOCB_CMD_PWRITE;
- err = io_submit(ctx, 1, &iocbp);
- break;
- case AIO_MMAP:
- iocb.aio_lio_opcode = IOCB_CMD_PREAD;
- iocb.aio_buf = (unsigned long) &c;
- iocb.aio_nbytes = sizeof(c);
- err = io_submit(ctx, 1, &iocbp);
- break;
- default:
- printk("Bogus op in do_aio - %d\n", type);
- err = -EINVAL;
- break;
- }
-
- if(err > 0)
- err = 0;
+ struct iocb iocb, *iocbp = &iocb;
+ char c;
+ int err;
+
+ iocb = ((struct iocb) { .aio_data = (unsigned long) aio,
+ .aio_reqprio = 0,
+ .aio_fildes = fd,
+ .aio_buf = (unsigned long) buf,
+ .aio_nbytes = len,
+ .aio_offset = offset,
+ .aio_reserved1 = 0,
+ .aio_reserved2 = 0,
+ .aio_reserved3 = 0 });
+
+ switch(type){
+ case AIO_READ:
+ iocb.aio_lio_opcode = IOCB_CMD_PREAD;
+ err = io_submit(ctx, 1, &iocbp);
+ break;
+ case AIO_WRITE:
+ iocb.aio_lio_opcode = IOCB_CMD_PWRITE;
+ err = io_submit(ctx, 1, &iocbp);
+ break;
+ case AIO_MMAP:
+ iocb.aio_lio_opcode = IOCB_CMD_PREAD;
+ iocb.aio_buf = (unsigned long) &c;
+ iocb.aio_nbytes = sizeof(c);
+ err = io_submit(ctx, 1, &iocbp);
+ break;
+ default:
+ printk("Bogus op in do_aio - %d\n", type);
+ err = -EINVAL;
+ break;
+ }
+
+ if(err > 0)
+ err = 0;
else
err = -errno;
- return err;
+ return err;
}
static aio_context_t ctx = 0;
static int aio_thread(void *arg)
{
- struct aio_thread_reply reply;
- struct io_event event;
- int err, n, reply_fd;
-
- signal(SIGWINCH, SIG_IGN);
-
- while(1){
- n = io_getevents(ctx, 1, 1, &event, NULL);
- if(n < 0){
- if(errno == EINTR)
- continue;
- printk("aio_thread - io_getevents failed, "
- "errno = %d\n", errno);
- }
- else {
- reply = ((struct aio_thread_reply)
- { .data = (void *) (long) event.data,
- .err = event.res });
+ struct aio_thread_reply reply;
+ struct io_event event;
+ int err, n, reply_fd;
+
+ signal(SIGWINCH, SIG_IGN);
+
+ while(1){
+ n = io_getevents(ctx, 1, 1, &event, NULL);
+ if(n < 0){
+ if(errno == EINTR)
+ continue;
+ printk("aio_thread - io_getevents failed, "
+ "errno = %d\n", errno);
+ }
+ else {
+ reply = ((struct aio_thread_reply)
+ { .data = (void *) (long) event.data,
+ .err = event.res });
reply_fd = ((struct aio_context *) reply.data)->reply_fd;
err = os_write_file(reply_fd, &reply, sizeof(reply));
- if(err != sizeof(reply))
+ if(err != sizeof(reply))
printk("aio_thread - write failed, fd = %d, "
- "err = %d\n", aio_req_fd_r, -err);
- }
- }
- return 0;
+ "err = %d\n", aio_req_fd_r, -err);
+ }
+ }
+ return 0;
}
#endif
static int do_not_aio(struct aio_thread_req *req)
{
- char c;
- int err;
-
- switch(req->type){
- case AIO_READ:
- err = os_seek_file(req->io_fd, req->offset);
- if(err)
- goto out;
-
- err = os_read_file(req->io_fd, req->buf, req->len);
- break;
- case AIO_WRITE:
- err = os_seek_file(req->io_fd, req->offset);
- if(err)
- goto out;
-
- err = os_write_file(req->io_fd, req->buf, req->len);
- break;
- case AIO_MMAP:
- err = os_seek_file(req->io_fd, req->offset);
- if(err)
- goto out;
-
- err = os_read_file(req->io_fd, &c, sizeof(c));
- break;
- default:
- printk("do_not_aio - bad request type : %d\n", req->type);
- err = -EINVAL;
- break;
- }
-
- out:
- return err;
+ char c;
+ int err;
+
+ switch(req->type){
+ case AIO_READ:
+ err = os_seek_file(req->io_fd, req->offset);
+ if(err)
+ goto out;
+
+ err = os_read_file(req->io_fd, req->buf, req->len);
+ break;
+ case AIO_WRITE:
+ err = os_seek_file(req->io_fd, req->offset);
+ if(err)
+ goto out;
+
+ err = os_write_file(req->io_fd, req->buf, req->len);
+ break;
+ case AIO_MMAP:
+ err = os_seek_file(req->io_fd, req->offset);
+ if(err)
+ goto out;
+
+ err = os_read_file(req->io_fd, &c, sizeof(c));
+ break;
+ default:
+ printk("do_not_aio - bad request type : %d\n", req->type);
+ err = -EINVAL;
+ break;
+ }
+
+out:
+ return err;
}
static int not_aio_thread(void *arg)
{
- struct aio_thread_req req;
- struct aio_thread_reply reply;
- int err;
-
- signal(SIGWINCH, SIG_IGN);
- while(1){
- err = os_read_file(aio_req_fd_r, &req, sizeof(req));
- if(err != sizeof(req)){
- if(err < 0)
- printk("not_aio_thread - read failed, "
- "fd = %d, err = %d\n", aio_req_fd_r,
- -err);
- else {
- printk("not_aio_thread - short read, fd = %d, "
- "length = %d\n", aio_req_fd_r, err);
- }
- continue;
- }
- err = do_not_aio(&req);
- reply = ((struct aio_thread_reply) { .data = req.aio,
- .err = err });
- err = os_write_file(req.aio->reply_fd, &reply, sizeof(reply));
- if(err != sizeof(reply))
- printk("not_aio_thread - write failed, fd = %d, "
- "err = %d\n", aio_req_fd_r, -err);
- }
+ struct aio_thread_req req;
+ struct aio_thread_reply reply;
+ int err;
+
+ signal(SIGWINCH, SIG_IGN);
+ while(1){
+ err = os_read_file(aio_req_fd_r, &req, sizeof(req));
+ if(err != sizeof(req)){
+ if(err < 0)
+ printk("not_aio_thread - read failed, "
+ "fd = %d, err = %d\n", aio_req_fd_r,
+ -err);
+ else {
+ printk("not_aio_thread - short read, fd = %d, "
+ "length = %d\n", aio_req_fd_r, err);
+ }
+ continue;
+ }
+ err = do_not_aio(&req);
+ reply = ((struct aio_thread_reply) { .data = req.aio,
+ .err = err });
+ err = os_write_file(req.aio->reply_fd, &reply, sizeof(reply));
+ if(err != sizeof(reply))
+ printk("not_aio_thread - write failed, fd = %d, "
+ "err = %d\n", aio_req_fd_r, -err);
+ }
+
+ return 0;
}
static int aio_pid = -1;
static int init_aio_24(void)
{
- unsigned long stack;
- int fds[2], err;
-
- err = os_pipe(fds, 1, 1);
- if(err)
- goto out;
-
- aio_req_fd_w = fds[0];
- aio_req_fd_r = fds[1];
- err = run_helper_thread(not_aio_thread, NULL,
- CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
- if(err < 0)
- goto out_close_pipe;
-
- aio_pid = err;
- goto out;
-
- out_close_pipe:
- os_close_file(fds[0]);
- os_close_file(fds[1]);
- aio_req_fd_w = -1;
- aio_req_fd_r = -1;
- out:
+ unsigned long stack;
+ int fds[2], err;
+
+ err = os_pipe(fds, 1, 1);
+ if(err)
+ goto out;
+
+ aio_req_fd_w = fds[0];
+ aio_req_fd_r = fds[1];
+ err = run_helper_thread(not_aio_thread, NULL,
+ CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
+ if(err < 0)
+ goto out_close_pipe;
+
+ aio_pid = err;
+ goto out;
+
+out_close_pipe:
+ os_close_file(fds[0]);
+ os_close_file(fds[1]);
+ aio_req_fd_w = -1;
+ aio_req_fd_r = -1;
+out:
#ifndef HAVE_AIO_ABI
printk("/usr/include/linux/aio_abi.h not present during build\n");
#endif
printk("2.6 host AIO support not used - falling back to I/O "
"thread\n");
- return 0;
+ return 0;
}
#ifdef HAVE_AIO_ABI
#define DEFAULT_24_AIO 0
static int init_aio_26(void)
{
- unsigned long stack;
- int err;
+ unsigned long stack;
+ int err;
- if(io_setup(256, &ctx)){
+ if(io_setup(256, &ctx)){
err = -errno;
- printk("aio_thread failed to initialize context, err = %d\n",
- errno);
- return err;
- }
+ printk("aio_thread failed to initialize context, err = %d\n",
+ errno);
+ return err;
+ }
- err = run_helper_thread(aio_thread, NULL,
- CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
- if(err < 0)
- return err;
+ err = run_helper_thread(aio_thread, NULL,
+ CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0);
+ if(err < 0)
+ return err;
- aio_pid = err;
+ aio_pid = err;
printk("Using 2.6 host AIO\n");
- return 0;
+ return 0;
}
static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
unsigned long long offset, struct aio_context *aio)
{
- struct aio_thread_reply reply;
- int err;
-
- err = do_aio(ctx, type, io_fd, buf, len, offset, aio);
- if(err){
- reply = ((struct aio_thread_reply) { .data = aio,
- .err = err });
- err = os_write_file(aio->reply_fd, &reply, sizeof(reply));
- if(err != sizeof(reply))
- printk("submit_aio_26 - write failed, "
- "fd = %d, err = %d\n", aio->reply_fd, -err);
- else err = 0;
- }
-
- return err;
+ struct aio_thread_reply reply;
+ int err;
+
+ err = do_aio(ctx, type, io_fd, buf, len, offset, aio);
+ if(err){
+ reply = ((struct aio_thread_reply) { .data = aio,
+ .err = err });
+ err = os_write_file(aio->reply_fd, &reply, sizeof(reply));
+ if(err != sizeof(reply))
+ printk("submit_aio_26 - write failed, "
+ "fd = %d, err = %d\n", aio->reply_fd, -err);
+ else err = 0;
+ }
+
+ return err;
}
#else
#define DEFAULT_24_AIO 1
static int init_aio_26(void)
{
- return -ENOSYS;
+ return -ENOSYS;
}
static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len,
unsigned long long offset, struct aio_context *aio)
{
- return -ENOSYS;
+ return -ENOSYS;
}
#endif
@@ -310,8 +312,8 @@ static int aio_24 = DEFAULT_24_AIO;
static int __init set_aio_24(char *name, int *add)
{
- aio_24 = 1;
- return 0;
+ aio_24 = 1;
+ return 0;
}
__uml_setup("aio=2.4", set_aio_24,
@@ -328,28 +330,27 @@ __uml_setup("aio=2.4", set_aio_24,
static int init_aio(void)
{
- int err;
-
- CHOOSE_MODE(({
- if(!aio_24){
- printk("Disabling 2.6 AIO in tt mode\n");
- aio_24 = 1;
- } }), (void) 0);
-
- if(!aio_24){
- err = init_aio_26();
- if(err && (errno == ENOSYS)){
- printk("2.6 AIO not supported on the host - "
- "reverting to 2.4 AIO\n");
- aio_24 = 1;
- }
- else return err;
- }
-
- if(aio_24)
- return init_aio_24();
-
- return 0;
+ int err;
+
+ CHOOSE_MODE(({ if(!aio_24){
+ printk("Disabling 2.6 AIO in tt mode\n");
+ aio_24 = 1;
+ } }), (void) 0);
+
+ if(!aio_24){
+ err = init_aio_26();
+ if(err && (errno == ENOSYS)){
+ printk("2.6 AIO not supported on the host - "
+ "reverting to 2.4 AIO\n");
+ aio_24 = 1;
+ }
+ else return err;
+ }
+
+ if(aio_24)
+ return init_aio_24();
+
+ return 0;
}
/* The reason for the __initcall/__uml_exitcall asymmetry is that init_aio
@@ -362,8 +363,8 @@ __initcall(init_aio);
static void exit_aio(void)
{
- if(aio_pid != -1)
- os_kill_process(aio_pid, 1);
+ if(aio_pid != -1)
+ os_kill_process(aio_pid, 1);
}
__uml_exitcall(exit_aio);
@@ -371,30 +372,30 @@ __uml_exitcall(exit_aio);
static int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len,
unsigned long long offset, struct aio_context *aio)
{
- struct aio_thread_req req = { .type = type,
- .io_fd = io_fd,
- .offset = offset,
- .buf = buf,
- .len = len,
- .aio = aio,
- };
- int err;
-
- err = os_write_file(aio_req_fd_w, &req, sizeof(req));
- if(err == sizeof(req))
- err = 0;
-
- return err;
+ struct aio_thread_req req = { .type = type,
+ .io_fd = io_fd,
+ .offset = offset,
+ .buf = buf,
+ .len = len,
+ .aio = aio,
+ };
+ int err;
+
+ err = os_write_file(aio_req_fd_w, &req, sizeof(req));
+ if(err == sizeof(req))
+ err = 0;
+
+ return err;
}
int submit_aio(enum aio_type type, int io_fd, char *buf, int len,
- unsigned long long offset, int reply_fd,
- struct aio_context *aio)
+ unsigned long long offset, int reply_fd,
+ struct aio_context *aio)
{
- aio->reply_fd = reply_fd;
- if(aio_24)
- return submit_aio_24(type, io_fd, buf, len, offset, aio);
- else {
- return submit_aio_26(type, io_fd, buf, len, offset, aio);
- }
+ aio->reply_fd = reply_fd;
+ if(aio_24)
+ return submit_aio_24(type, io_fd, buf, len, offset, aio);
+ else {
+ return submit_aio_26(type, io_fd, buf, len, offset, aio);
+ }
}
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index 23da27d2256..172c8474453 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -16,7 +16,6 @@
#include "user_util.h"
#include "kern_util.h"
#include "mem_user.h"
-#include "signal_user.h"
#include "time_user.h"
#include "irq_user.h"
#include "user.h"
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index d9c52387c4a..39815c6b5e4 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -15,7 +15,6 @@
#include "os.h"
#include "user.h"
#include "user_util.h"
-#include "signal_user.h"
#include "process.h"
#include "irq_user.h"
#include "kern_util.h"
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index c7bfd5ee392..c1f46a0fef1 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -4,9 +4,22 @@
*/
#include <signal.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <sys/mman.h>
+#include "user_util.h"
+#include "kern_util.h"
+#include "user.h"
+#include "signal_kern.h"
+#include "sysdep/sigcontext.h"
+#include "sysdep/signal.h"
+#include "sigcontext.h"
#include "time_user.h"
#include "mode.h"
-#include "sysdep/signal.h"
void sig_handler(ARCH_SIGHDLR_PARAM)
{
@@ -36,13 +49,138 @@ void alarm_handler(ARCH_SIGHDLR_PARAM)
switch_timers(1);
}
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
+void set_sigstack(void *sig_stack, int size)
+{
+ stack_t stack = ((stack_t) { .ss_flags = 0,
+ .ss_sp = (__ptr_t) sig_stack,
+ .ss_size = size - sizeof(void *) });
+
+ if(sigaltstack(&stack, NULL) != 0)
+ panic("enabling signal stack failed, errno = %d\n", errno);
+}
+
+void remove_sigstack(void)
+{
+ stack_t stack = ((stack_t) { .ss_flags = SS_DISABLE,
+ .ss_sp = NULL,
+ .ss_size = 0 });
+
+ if(sigaltstack(&stack, NULL) != 0)
+ panic("disabling signal stack failed, errno = %d\n", errno);
+}
+
+void set_handler(int sig, void (*handler)(int), int flags, ...)
+{
+ struct sigaction action;
+ va_list ap;
+ int mask;
+
+ va_start(ap, flags);
+ action.sa_handler = handler;
+ sigemptyset(&action.sa_mask);
+ while((mask = va_arg(ap, int)) != -1){
+ sigaddset(&action.sa_mask, mask);
+ }
+ va_end(ap);
+ action.sa_flags = flags;
+ action.sa_restorer = NULL;
+ if(sigaction(sig, &action, NULL) < 0)
+ panic("sigaction failed");
+}
+
+int change_sig(int signal, int on)
+{
+ sigset_t sigset, old;
+
+ sigemptyset(&sigset);
+ sigaddset(&sigset, signal);
+ sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, &old);
+ return(!sigismember(&old, signal));
+}
+
+/* Both here and in set/get_signal we don't touch SIGPROF, because we must not
+ * disable profiling; it's safe because the profiling code does not interact
+ * with the kernel code at all.*/
+
+static void change_signals(int type)
+{
+ sigset_t mask;
+
+ sigemptyset(&mask);
+ sigaddset(&mask, SIGVTALRM);
+ sigaddset(&mask, SIGALRM);
+ sigaddset(&mask, SIGIO);
+ if(sigprocmask(type, &mask, NULL) < 0)
+ panic("Failed to change signal mask - errno = %d", errno);
+}
+
+void block_signals(void)
+{
+ change_signals(SIG_BLOCK);
+}
+
+void unblock_signals(void)
+{
+ change_signals(SIG_UNBLOCK);
+}
+
+/* These are the asynchronous signals. SIGVTALRM and SIGARLM are handled
+ * together under SIGVTALRM_BIT. SIGPROF is excluded because we want to
+ * be able to profile all of UML, not just the non-critical sections. If
+ * profiling is not thread-safe, then that is not my problem. We can disable
+ * profiling when SMP is enabled in that case.
*/
+#define SIGIO_BIT 0
+#define SIGVTALRM_BIT 1
+
+static int enable_mask(sigset_t *mask)
+{
+ int sigs;
+
+ sigs = sigismember(mask, SIGIO) ? 0 : 1 << SIGIO_BIT;
+ sigs |= sigismember(mask, SIGVTALRM) ? 0 : 1 << SIGVTALRM_BIT;
+ sigs |= sigismember(mask, SIGALRM) ? 0 : 1 << SIGVTALRM_BIT;
+ return(sigs);
+}
+
+int get_signals(void)
+{
+ sigset_t mask;
+
+ if(sigprocmask(SIG_SETMASK, NULL, &mask) < 0)
+ panic("Failed to get signal mask");
+ return(enable_mask(&mask));
+}
+
+int set_signals(int enable)
+{
+ sigset_t mask;
+ int ret;
+
+ sigemptyset(&mask);
+ if(enable & (1 << SIGIO_BIT))
+ sigaddset(&mask, SIGIO);
+ if(enable & (1 << SIGVTALRM_BIT)){
+ sigaddset(&mask, SIGVTALRM);
+ sigaddset(&mask, SIGALRM);
+ }
+
+ /* This is safe - sigprocmask is guaranteed to copy locally the
+ * value of new_set, do his work and then, at the end, write to
+ * old_set.
+ */
+ if(sigprocmask(SIG_UNBLOCK, &mask, &mask) < 0)
+ panic("Failed to enable signals");
+ ret = enable_mask(&mask);
+ sigemptyset(&mask);
+ if((enable & (1 << SIGIO_BIT)) == 0)
+ sigaddset(&mask, SIGIO);
+ if((enable & (1 << SIGVTALRM_BIT)) == 0){
+ sigaddset(&mask, SIGVTALRM);
+ sigaddset(&mask, SIGALRM);
+ }
+ if(sigprocmask(SIG_BLOCK, &mask, NULL) < 0)
+ panic("Failed to block signals");
+
+ return(ret);
+}
diff --git a/arch/um/os-Linux/skas/Makefile b/arch/um/os-Linux/skas/Makefile
new file mode 100644
index 00000000000..eab5386d60a
--- /dev/null
+++ b/arch/um/os-Linux/skas/Makefile
@@ -0,0 +1,10 @@
+#
+# Copyright (C) 2002 - 2004 Jeff Dike (jdike@addtoit.com)
+# Licensed under the GPL
+#
+
+obj-y := trap.o
+
+USER_OBJS := trap.o
+
+include arch/um/scripts/Makefile.rules
diff --git a/arch/um/kernel/skas/trap_user.c b/arch/um/os-Linux/skas/trap.c
index 9950a6716fe..9ad5fbec459 100644
--- a/arch/um/kernel/skas/trap_user.c
+++ b/arch/um/os-Linux/skas/trap.c
@@ -1,11 +1,10 @@
-/*
+/*
* Copyright (C) 2002 - 2003 Jeff Dike (jdike@addtoit.com)
* Licensed under the GPL
*/
#include <signal.h>
#include <errno.h>
-#include "signal_user.h"
#include "user_util.h"
#include "kern_util.h"
#include "task.h"
@@ -14,12 +13,13 @@
#include "ptrace_user.h"
#include "sysdep/ptrace.h"
#include "sysdep/ptrace_user.h"
+#include "os.h"
void sig_handler_common_skas(int sig, void *sc_ptr)
{
struct sigcontext *sc = sc_ptr;
struct skas_regs *r;
- struct signal_info *info;
+ void (*handler)(int, union uml_pt_regs *);
int save_errno = errno;
int save_user;
@@ -34,17 +34,22 @@ void sig_handler_common_skas(int sig, void *sc_ptr)
r = &TASK_REGS(get_current())->skas;
save_user = r->is_user;
r->is_user = 0;
- if ( sig == SIGFPE || sig == SIGSEGV ||
- sig == SIGBUS || sig == SIGILL ||
- sig == SIGTRAP ) {
- GET_FAULTINFO_FROM_SC(r->faultinfo, sc);
- }
+ if ( sig == SIGFPE || sig == SIGSEGV ||
+ sig == SIGBUS || sig == SIGILL ||
+ sig == SIGTRAP ) {
+ GET_FAULTINFO_FROM_SC(r->faultinfo, sc);
+ }
change_sig(SIGUSR1, 1);
- info = &sig_info[sig];
- if(!info->is_irq) unblock_signals();
- (*info->handler)(sig, (union uml_pt_regs *) r);
+ handler = sig_info[sig];
+
+ /* unblock SIGALRM, SIGVTALRM, SIGIO if sig isn't IRQ signal */
+ if (sig != SIGIO && sig != SIGWINCH &&
+ sig != SIGVTALRM && sig != SIGALRM)
+ unblock_signals();
+
+ handler(sig, (union uml_pt_regs *) r);
errno = save_errno;
r->is_user = save_user;
@@ -54,25 +59,15 @@ extern int ptrace_faultinfo;
void user_signal(int sig, union uml_pt_regs *regs, int pid)
{
- struct signal_info *info;
- int segv = ((sig == SIGFPE) || (sig == SIGSEGV) || (sig == SIGBUS) ||
- (sig == SIGILL) || (sig == SIGTRAP));
+ void (*handler)(int, union uml_pt_regs *);
+ int segv = ((sig == SIGFPE) || (sig == SIGSEGV) || (sig == SIGBUS) ||
+ (sig == SIGILL) || (sig == SIGTRAP));
if (segv)
get_skas_faultinfo(pid, &regs->skas.faultinfo);
- info = &sig_info[sig];
- (*info->handler)(sig, regs);
+
+ handler = sig_info[sig];
+ handler(sig, (union uml_pt_regs *) regs);
unblock_signals();
}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 29a9e3f4376..b47e5e71d1a 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -24,7 +24,6 @@
#include "kern_util.h"
#include "user.h"
#include "signal_kern.h"
-#include "signal_user.h"
#include "sysdep/ptrace.h"
#include "sysdep/sigcontext.h"
#include "irq_user.h"
diff --git a/arch/um/os-Linux/trap.c b/arch/um/os-Linux/trap.c
new file mode 100644
index 00000000000..321e1c8e227
--- /dev/null
+++ b/arch/um/os-Linux/trap.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#include <stdlib.h>
+#include <signal.h>
+#include <setjmp.h>
+#include "kern_util.h"
+#include "user_util.h"
+#include "os.h"
+#include "mode.h"
+
+void usr2_handler(int sig, union uml_pt_regs *regs)
+{
+ CHOOSE_MODE(syscall_handler_tt(sig, regs), (void) 0);
+}
+
+void (*sig_info[NSIG])(int, union uml_pt_regs *);
+
+void os_fill_handlinfo(struct kern_handlers h)
+{
+ sig_info[SIGTRAP] = h.relay_signal;
+ sig_info[SIGFPE] = h.relay_signal;
+ sig_info[SIGILL] = h.relay_signal;
+ sig_info[SIGWINCH] = h.winch;
+ sig_info[SIGBUS] = h.bus_handler;
+ sig_info[SIGSEGV] = h.page_fault;
+ sig_info[SIGIO] = h.sigio_handler;
+ sig_info[SIGVTALRM] = h.timer_handler;
+ sig_info[SIGALRM] = h.timer_handler;
+ sig_info[SIGUSR2] = usr2_handler;
+}
+
+void do_longjmp(void *b, int val)
+{
+ sigjmp_buf *buf = b;
+
+ siglongjmp(*buf, val);
+}
diff --git a/arch/um/os-Linux/tt.c b/arch/um/os-Linux/tt.c
index a6db8877931..cb2648b79d0 100644
--- a/arch/um/os-Linux/tt.c
+++ b/arch/um/os-Linux/tt.c
@@ -23,7 +23,6 @@
#include "kern_util.h"
#include "user.h"
#include "signal_kern.h"
-#include "signal_user.h"
#include "sysdep/ptrace.h"
#include "sysdep/sigcontext.h"
#include "irq_user.h"
@@ -50,6 +49,20 @@ int protect_memory(unsigned long addr, unsigned long len, int r, int w, int x,
return(0);
}
+void kill_child_dead(int pid)
+{
+ kill(pid, SIGKILL);
+ kill(pid, SIGCONT);
+ do {
+ int n;
+ CATCH_EINTR(n = waitpid(pid, NULL, 0));
+ if (n > 0)
+ kill(pid, SIGCONT);
+ else
+ break;
+ } while(1);
+}
+
/*
*-------------------------
* only for tt mode (will be deleted in future...)
diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c
new file mode 100644
index 00000000000..ecf107ae5ac
--- /dev/null
+++ b/arch/um/os-Linux/umid.c
@@ -0,0 +1,335 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+#include <dirent.h>
+#include <sys/fcntl.h>
+#include <sys/stat.h>
+#include <sys/param.h>
+#include "init.h"
+#include "os.h"
+#include "user.h"
+#include "mode.h"
+
+#define UML_DIR "~/.uml/"
+
+#define UMID_LEN 64
+
+/* Changed by set_umid, which is run early in boot */
+char umid[UMID_LEN] = { 0 };
+
+/* Changed by set_uml_dir and make_uml_dir, which are run early in boot */
+static char *uml_dir = UML_DIR;
+
+static int __init make_uml_dir(void)
+{
+ char dir[512] = { '\0' };
+ int len, err;
+
+ if(*uml_dir == '~'){
+ char *home = getenv("HOME");
+
+ err = -ENOENT;
+ if(home == NULL){
+ printk("make_uml_dir : no value in environment for "
+ "$HOME\n");
+ goto err;
+ }
+ strlcpy(dir, home, sizeof(dir));
+ uml_dir++;
+ }
+ strlcat(dir, uml_dir, sizeof(dir));
+ len = strlen(dir);
+ if (len > 0 && dir[len - 1] != '/')
+ strlcat(dir, "/", sizeof(dir));
+
+ err = -ENOMEM;
+ uml_dir = malloc(strlen(dir) + 1);
+ if (uml_dir == NULL) {
+ printf("make_uml_dir : malloc failed, errno = %d\n", errno);
+ goto err;
+ }
+ strcpy(uml_dir, dir);
+
+ if((mkdir(uml_dir, 0777) < 0) && (errno != EEXIST)){
+ printf("Failed to mkdir '%s': %s\n", uml_dir, strerror(errno));
+ err = -errno;
+ goto err_free;
+ }
+ return 0;
+
+err_free:
+ free(uml_dir);
+err:
+ uml_dir = NULL;
+ return err;
+}
+
+static int actually_do_remove(char *dir)
+{
+ DIR *directory;
+ struct dirent *ent;
+ int len;
+ char file[256];
+
+ directory = opendir(dir);
+ if(directory == NULL)
+ return -errno;
+
+ while((ent = readdir(directory)) != NULL){
+ if(!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
+ continue;
+ len = strlen(dir) + sizeof("/") + strlen(ent->d_name) + 1;
+ if(len > sizeof(file))
+ return -E2BIG;
+
+ sprintf(file, "%s/%s", dir, ent->d_name);
+ if(unlink(file) < 0)
+ return -errno;
+ }
+ if(rmdir(dir) < 0)
+ return -errno;
+
+ return 0;
+}
+
+/* This says that there isn't already a user of the specified directory even if
+ * there are errors during the checking. This is because if these errors
+ * happen, the directory is unusable by the pre-existing UML, so we might as
+ * well take it over. This could happen either by
+ * the existing UML somehow corrupting its umid directory
+ * something other than UML sticking stuff in the directory
+ * this boot racing with a shutdown of the other UML
+ * In any of these cases, the directory isn't useful for anything else.
+ */
+
+static int not_dead_yet(char *dir)
+{
+ char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
+ char pid[sizeof("nnnnn\0")], *end;
+ int dead, fd, p, n, err;
+
+ n = snprintf(file, sizeof(file), "%s/pid", dir);
+ if(n >= sizeof(file)){
+ printk("not_dead_yet - pid filename too long\n");
+ err = -E2BIG;
+ goto out;
+ }
+
+ dead = 0;
+ fd = open(file, O_RDONLY);
+ if(fd < 0){
+ if(fd != -ENOENT){
+ printk("not_dead_yet : couldn't open pid file '%s', "
+ "err = %d\n", file, -fd);
+ }
+ goto out;
+ }
+
+ err = 0;
+ n = read(fd, pid, sizeof(pid));
+ if(n <= 0){
+ printk("not_dead_yet : couldn't read pid file '%s', "
+ "err = %d\n", file, -n);
+ goto out_close;
+ }
+
+ p = strtoul(pid, &end, 0);
+ if(end == pid){
+ printk("not_dead_yet : couldn't parse pid file '%s', "
+ "errno = %d\n", file, errno);
+ goto out_close;
+ }
+
+ if((kill(p, 0) == 0) || (errno != ESRCH))
+ return 1;
+
+ err = actually_do_remove(dir);
+ if(err)
+ printk("not_dead_yet - actually_do_remove failed with "
+ "err = %d\n", err);
+
+ return err;
+
+ out_close:
+ close(fd);
+ out:
+ return 0;
+}
+
+static void __init create_pid_file(void)
+{
+ char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
+ char pid[sizeof("nnnnn\0")];
+ int fd, n;
+
+ if(umid_file_name("pid", file, sizeof(file)))
+ return;
+
+ fd = open(file, O_RDWR | O_CREAT | O_EXCL, 0644);
+ if(fd < 0){
+ printk("Open of machine pid file \"%s\" failed: %s\n",
+ file, strerror(-fd));
+ return;
+ }
+
+ snprintf(pid, sizeof(pid), "%d\n", getpid());
+ n = write(fd, pid, strlen(pid));
+ if(n != strlen(pid))
+ printk("Write of pid file failed - err = %d\n", -n);
+
+ close(fd);
+}
+
+int __init set_umid(char *name)
+{
+ if(strlen(name) > UMID_LEN - 1)
+ return -E2BIG;
+
+ strlcpy(umid, name, sizeof(umid));
+
+ return 0;
+}
+
+static int umid_setup = 0;
+
+int __init make_umid(void)
+{
+ int fd, err;
+ char tmp[256];
+
+ if(umid_setup)
+ return 0;
+
+ make_uml_dir();
+
+ if(*umid == '\0'){
+ strlcpy(tmp, uml_dir, sizeof(tmp));
+ strlcat(tmp, "XXXXXX", sizeof(tmp));
+ fd = mkstemp(tmp);
+ if(fd < 0){
+ printk("make_umid - mkstemp(%s) failed: %s\n",
+ tmp, strerror(errno));
+ err = -errno;
+ goto err;
+ }
+
+ close(fd);
+
+ set_umid(&tmp[strlen(uml_dir)]);
+
+ /* There's a nice tiny little race between this unlink and
+ * the mkdir below. It'd be nice if there were a mkstemp
+ * for directories.
+ */
+ if(unlink(tmp)){
+ err = -errno;
+ goto err;
+ }
+ }
+
+ snprintf(tmp, sizeof(tmp), "%s%s", uml_dir, umid);
+ err = mkdir(tmp, 0777);
+ if(err < 0){
+ err = -errno;
+ if(errno != EEXIST)
+ goto err;
+
+ if(not_dead_yet(tmp) < 0)
+ goto err;
+
+ err = mkdir(tmp, 0777);
+ }
+ if(err < 0){
+ printk("Failed to create '%s' - err = %d\n", umid, err);
+ goto err_rmdir;
+ }
+
+ umid_setup = 1;
+
+ create_pid_file();
+
+ return 0;
+
+ err_rmdir:
+ rmdir(tmp);
+ err:
+ return err;
+}
+
+static int __init make_umid_init(void)
+{
+ make_umid();
+
+ return 0;
+}
+
+__initcall(make_umid_init);
+
+int __init umid_file_name(char *name, char *buf, int len)
+{
+ int n, err;
+
+ err = make_umid();
+ if(err)
+ return err;
+
+ n = snprintf(buf, len, "%s%s/%s", uml_dir, umid, name);
+ if(n >= len){
+ printk("umid_file_name : buffer too short\n");
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+char *get_umid(void)
+{
+ return umid;
+}
+
+static int __init set_uml_dir(char *name, int *add)
+{
+ if(*name == '\0'){
+ printf("uml_dir can't be an empty string\n");
+ return 0;
+ }
+
+ if(name[strlen(name) - 1] == '/'){
+ uml_dir = name;
+ return 0;
+ }
+
+ uml_dir = malloc(strlen(name) + 2);
+ if(uml_dir == NULL){
+ printf("Failed to malloc uml_dir - error = %d\n", errno);
+
+ /* Return 0 here because do_initcalls doesn't look at
+ * the return value.
+ */
+ return 0;
+ }
+ sprintf(uml_dir, "%s/", name);
+
+ return 0;
+}
+
+__uml_setup("uml_dir=", set_uml_dir,
+"uml_dir=<directory>\n"
+" The location to place the pid and umid files.\n\n"
+);
+
+static void remove_umid_dir(void)
+{
+ char dir[strlen(uml_dir) + UMID_LEN + 1], err;
+
+ sprintf(dir, "%s%s", uml_dir, umid);
+ err = actually_do_remove(dir);
+ if(err)
+ printf("remove_umid_dir - actually_do_remove failed with "
+ "err = %d\n", err);
+}
+
+__uml_exitcall(remove_umid_dir);
diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c
index 16bc19928b3..7cd1a82dc8c 100644
--- a/arch/um/sys-i386/signal.c
+++ b/arch/um/sys-i386/signal.c
@@ -10,7 +10,6 @@
#include "asm/uaccess.h"
#include "asm/unistd.h"
#include "frame_kern.h"
-#include "signal_user.h"
#include "sigcontext.h"
#include "registers.h"
#include "mode.h"
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index 31086590323..04494638b96 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -10,9 +10,6 @@ mainmenu "uClinux/v850 (w/o MMU) Kernel Configuration"
config MMU
bool
default n
-config UID16
- bool
- default n
config RWSEM_GENERIC_SPINLOCK
bool
default y
diff --git a/arch/v850/kernel/process.c b/arch/v850/kernel/process.c
index 39cf247cdae..eb909937958 100644
--- a/arch/v850/kernel/process.c
+++ b/arch/v850/kernel/process.c
@@ -114,7 +114,7 @@ int copy_thread (int nr, unsigned long clone_flags,
struct task_struct *p, struct pt_regs *regs)
{
/* Start pushing stuff from the top of the child's kernel stack. */
- unsigned long orig_ksp = (unsigned long)p->thread_info + THREAD_SIZE;
+ unsigned long orig_ksp = task_tos(p);
unsigned long ksp = orig_ksp;
/* We push two `state save' stack fames (see entry.S) on the new
kernel stack:
@@ -164,30 +164,6 @@ int copy_thread (int nr, unsigned long clone_flags,
}
/*
- * fill in the user structure for a core dump..
- */
-void dump_thread (struct pt_regs *regs, struct user *dump)
-{
-#if 0 /* Later. XXX */
- dump->magic = CMAGIC;
- dump->start_code = 0;
- dump->start_stack = regs->gpr[GPR_SP];
- dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
- dump->u_dsize = ((unsigned long) (current->mm->brk +
- (PAGE_SIZE-1))) >> PAGE_SHIFT;
- dump->u_dsize -= dump->u_tsize;
- dump->u_ssize = 0;
-
- if (dump->start_stack < TASK_SIZE)
- dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
-
- dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump);
- dump->regs = *regs;
- dump->u_fpvalid = 0;
-#endif
-}
-
-/*
* sys_execve() executes a new program.
*/
int sys_execve (char *name, char **argv, char **envp, struct pt_regs *regs)
diff --git a/arch/v850/kernel/ptrace.c b/arch/v850/kernel/ptrace.c
index 18492d02aaf..67e05750966 100644
--- a/arch/v850/kernel/ptrace.c
+++ b/arch/v850/kernel/ptrace.c
@@ -58,7 +58,7 @@ static v850_reg_t *reg_save_addr (unsigned reg_offs, struct task_struct *t)
regs = thread_saved_regs (t);
else
/* Register saved during kernel entry (or not available). */
- regs = task_regs (t);
+ regs = task_pt_regs (t);
return (v850_reg_t *)((char *)regs + reg_offs);
}
diff --git a/arch/v850/kernel/v850_ksyms.c b/arch/v850/kernel/v850_ksyms.c
index 0ca64900dd9..8ffc29c1c89 100644
--- a/arch/v850/kernel/v850_ksyms.c
+++ b/arch/v850/kernel/v850_ksyms.c
@@ -21,8 +21,6 @@ extern void *trap_table;
EXPORT_SYMBOL (trap_table);
/* platform dependent support */
-extern void dump_thread (struct pt_regs *, struct user *);
-EXPORT_SYMBOL (dump_thread);
EXPORT_SYMBOL (kernel_thread);
EXPORT_SYMBOL (enable_irq);
EXPORT_SYMBOL (disable_irq);
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 6ece645e4db..2efc4be2270 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -69,12 +69,34 @@ config ARCH_MAY_HAVE_PC_FDC
bool
default y
+config DMI
+ bool
+ default y
+
source "init/Kconfig"
menu "Processor type and features"
choice
+ prompt "Subarchitecture Type"
+ default X86_PC
+
+config X86_PC
+ bool "PC-compatible"
+ help
+ Choose this option if your computer is a standard PC or compatible.
+
+config X86_VSMP
+ bool "Support for ScaleMP vSMP"
+ help
+ Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
+ supposed to run on these EM64T-based machines. Only choose this option
+ if you have one of these machines.
+
+endchoice
+
+choice
prompt "Processor family"
default MK8
@@ -347,32 +369,24 @@ config HPET_EMULATE_RTC
depends on HPET_TIMER && RTC=y
config GART_IOMMU
- bool "IOMMU support"
+ bool "K8 GART IOMMU support"
default y
+ select SWIOTLB
depends on PCI
help
Support the IOMMU. Needed to run systems with more than 3GB of memory
properly with 32-bit PCI devices that do not support DAC (Double Address
Cycle). The IOMMU can be turned off at runtime with the iommu=off parameter.
Normally the kernel will take the right choice by itself.
- This option includes a driver for the AMD Opteron/Athlon64 IOMMU
- and a software emulation used on some other systems.
+ This option includes a driver for the AMD Opteron/Athlon64 northbridge IOMMU
+ and a software emulation used on other systems.
If unsure, say Y.
# need this always enabled with GART_IOMMU for the VIA workaround
config SWIOTLB
- bool
- depends on GART_IOMMU
- default y
-
-config DUMMY_IOMMU
bool
- depends on !GART_IOMMU && !SWIOTLB
default y
- help
- Don't use IOMMU code. This will cause problems when you have more than 4GB
- of memory and any 32-bit devices. Don't turn on unless you know what you
- are doing.
+ depends on GART_IOMMU
config X86_MCE
bool "Machine check support" if EMBEDDED
@@ -399,17 +413,6 @@ config X86_MCE_AMD
Additional support for AMD specific MCE features such as
the DRAM Error Threshold.
-config PHYSICAL_START
- hex "Physical address where the kernel is loaded" if EMBEDDED
- default "0x100000"
- help
- This gives the physical address where the kernel is loaded.
- Primarily used in the case of kexec on panic where the
- fail safe kernel needs to run at a different address than
- the panic-ed kernel.
-
- Don't change this unless you know what you are doing.
-
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -427,6 +430,31 @@ config KEXEC
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
+config CRASH_DUMP
+ bool "kernel crash dumps (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ Generate crash dump after being started by kexec.
+
+config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+ default "0x1000000" if CRASH_DUMP
+ default "0x100000"
+ help
+ This gives the physical address where the kernel is loaded. Normally
+ for regular kernels this value is 0x100000 (1MB). But in the case
+ of kexec on panic the fail safe kernel needs to run at a different
+ address than the panic-ed kernel. This option is used to set the load
+ address for kernels used to capture crash dump on being kexec'ed
+ after panic. The default value for crash dump kernels is
+ 0x1000000 (16MB). This can also be set based on the "X" value as
+ specified in the "crashkernel=YM@XM" command line boot parameter
+ passed to the panic-ed kernel. Typically this parameter is set as
+ crashkernel=64M@16M. Please take a look at
+ Documentation/kdump/kdump.txt for more details about crash dumps.
+
+ Don't change this unless you know what you are doing.
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
@@ -542,11 +570,6 @@ config SYSVIPC_COMPAT
depends on COMPAT && SYSVIPC
default y
-config UID16
- bool
- depends on IA32_EMULATION
- default y
-
endmenu
source "net/Kconfig"
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index e2c6e64a85e..fcb06a50fdd 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -9,6 +9,16 @@ config INIT_DEBUG
Fill __init and __initdata at the end of boot. This helps debugging
illegal uses of __init and __initdata after initialization.
+config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ depends on DEBUG_KERNEL
+ help
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const data.
+ This option may have a slight performance impact because a portion
+ of the kernel code won't be covered by a 2MB TLB anymore.
+ If in doubt, say "N".
+
config IOMMU_DEBUG
depends on GART_IOMMU && DEBUG_KERNEL
bool "Enable IOMMU debugging"
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index a9cd42e6182..d7fd46479c5 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -31,6 +31,7 @@ cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
CFLAGS += $(cflags-y)
+CFLAGS += -m64
CFLAGS += -mno-red-zone
CFLAGS += -mcmodel=kernel
CFLAGS += -pipe
@@ -38,8 +39,10 @@ CFLAGS += -pipe
# actually it makes the kernel smaller too.
CFLAGS += -fno-reorder-blocks
CFLAGS += -Wno-sign-compare
-ifneq ($(CONFIG_DEBUG_INFO),y)
+ifneq ($(CONFIG_UNWIND_INFO),y)
CFLAGS += -fno-asynchronous-unwind-tables
+endif
+ifneq ($(CONFIG_DEBUG_INFO),y)
# -fweb shrinks the kernel a bit, but the difference is very small
# it also messes up debugging, so don't use it for now.
#CFLAGS += $(call cc-option,-fweb)
@@ -50,6 +53,8 @@ CFLAGS += $(call cc-option,-funit-at-a-time)
# prevent gcc from generating any FP code by mistake
CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
+AFLAGS += -m64
+
head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o
libs-y += arch/x86_64/lib/
@@ -80,9 +85,12 @@ bzlilo: vmlinux
bzdisk: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zdisk
-install fdimage fdimage144 fdimage288: vmlinux
+fdimage fdimage144 fdimage288: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
+install:
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
+
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile
index 18c6e915d69..29f8396ed15 100644
--- a/arch/x86_64/boot/Makefile
+++ b/arch/x86_64/boot/Makefile
@@ -98,5 +98,5 @@ zlilo: $(BOOTIMAGE)
cp System.map $(INSTALL_PATH)/
if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
-install: $(BOOTIMAGE)
+install:
sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
diff --git a/arch/x86_64/boot/compressed/misc.c b/arch/x86_64/boot/compressed/misc.c
index 0e10fd84c7c..cf4b88c416d 100644
--- a/arch/x86_64/boot/compressed/misc.c
+++ b/arch/x86_64/boot/compressed/misc.c
@@ -9,7 +9,7 @@
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
*/
-#include "miscsetup.h"
+#include <linux/screen_info.h>
#include <asm/io.h>
#include <asm/page.h>
diff --git a/arch/x86_64/boot/compressed/miscsetup.h b/arch/x86_64/boot/compressed/miscsetup.h
deleted file mode 100644
index bb162053170..00000000000
--- a/arch/x86_64/boot/compressed/miscsetup.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#define NULL 0
-//typedef unsigned int size_t;
-
-
-struct screen_info {
- unsigned char orig_x; /* 0x00 */
- unsigned char orig_y; /* 0x01 */
- unsigned short dontuse1; /* 0x02 -- EXT_MEM_K sits here */
- unsigned short orig_video_page; /* 0x04 */
- unsigned char orig_video_mode; /* 0x06 */
- unsigned char orig_video_cols; /* 0x07 */
- unsigned short unused2; /* 0x08 */
- unsigned short orig_video_ega_bx; /* 0x0a */
- unsigned short unused3; /* 0x0c */
- unsigned char orig_video_lines; /* 0x0e */
- unsigned char orig_video_isVGA; /* 0x0f */
- unsigned short orig_video_points; /* 0x10 */
-
- /* VESA graphic mode -- linear frame buffer */
- unsigned short lfb_width; /* 0x12 */
- unsigned short lfb_height; /* 0x14 */
- unsigned short lfb_depth; /* 0x16 */
- unsigned long lfb_base; /* 0x18 */
- unsigned long lfb_size; /* 0x1c */
- unsigned short dontuse2, dontuse3; /* 0x20 -- CL_MAGIC and CL_OFFSET here */
- unsigned short lfb_linelength; /* 0x24 */
- unsigned char red_size; /* 0x26 */
- unsigned char red_pos; /* 0x27 */
- unsigned char green_size; /* 0x28 */
- unsigned char green_pos; /* 0x29 */
- unsigned char blue_size; /* 0x2a */
- unsigned char blue_pos; /* 0x2b */
- unsigned char rsvd_size; /* 0x2c */
- unsigned char rsvd_pos; /* 0x2d */
- unsigned short vesapm_seg; /* 0x2e */
- unsigned short vesapm_off; /* 0x30 */
- unsigned short pages; /* 0x32 */
- /* 0x34 -- 0x3f reserved for future expansion */
-};
diff --git a/arch/x86_64/boot/install.sh b/arch/x86_64/boot/install.sh
index 198af15a775..baaa2369bdb 100644
--- a/arch/x86_64/boot/install.sh
+++ b/arch/x86_64/boot/install.sh
@@ -1,40 +1,2 @@
#!/bin/sh
-#
-# arch/x86_64/boot/install.sh
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# Copyright (C) 1995 by Linus Torvalds
-#
-# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
-#
-# "make install" script for i386 architecture
-#
-# Arguments:
-# $1 - kernel version
-# $2 - kernel image file
-# $3 - kernel map file
-# $4 - default install path (blank if root directory)
-#
-
-# User may have a custom install script
-
-if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
-if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
-
-# Default install - same as make zlilo
-
-if [ -f $4/vmlinuz ]; then
- mv $4/vmlinuz $4/vmlinuz.old
-fi
-
-if [ -f $4/System.map ]; then
- mv $4/System.map $4/System.old
-fi
-
-cat $2 > $4/vmlinuz
-cp $3 $4/System.map
-
-if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
+. $srctree/arch/i386/boot/install.sh
diff --git a/arch/x86_64/crypto/aes.c b/arch/x86_64/crypto/aes.c
index acfdaa28791..fb1b961a2e2 100644
--- a/arch/x86_64/crypto/aes.c
+++ b/arch/x86_64/crypto/aes.c
@@ -74,8 +74,6 @@ static inline u8 byte(const u32 x, const unsigned n)
return x >> (n << 3);
}
-#define u32_in(x) le32_to_cpu(*(const __le32 *)(x))
-
struct aes_ctx
{
u32 key_length;
@@ -234,6 +232,7 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
u32 *flags)
{
struct aes_ctx *ctx = ctx_arg;
+ const __le32 *key = (const __le32 *)in_key;
u32 i, j, t, u, v, w;
if (key_len != 16 && key_len != 24 && key_len != 32) {
@@ -243,10 +242,10 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
ctx->key_length = key_len;
- D_KEY[key_len + 24] = E_KEY[0] = u32_in(in_key);
- D_KEY[key_len + 25] = E_KEY[1] = u32_in(in_key + 4);
- D_KEY[key_len + 26] = E_KEY[2] = u32_in(in_key + 8);
- D_KEY[key_len + 27] = E_KEY[3] = u32_in(in_key + 12);
+ D_KEY[key_len + 24] = E_KEY[0] = le32_to_cpu(key[0]);
+ D_KEY[key_len + 25] = E_KEY[1] = le32_to_cpu(key[1]);
+ D_KEY[key_len + 26] = E_KEY[2] = le32_to_cpu(key[2]);
+ D_KEY[key_len + 27] = E_KEY[3] = le32_to_cpu(key[3]);
switch (key_len) {
case 16:
@@ -256,17 +255,17 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
break;
case 24:
- E_KEY[4] = u32_in(in_key + 16);
- t = E_KEY[5] = u32_in(in_key + 20);
+ E_KEY[4] = le32_to_cpu(key[4]);
+ t = E_KEY[5] = le32_to_cpu(key[5]);
for (i = 0; i < 8; ++i)
loop6 (i);
break;
case 32:
- E_KEY[4] = u32_in(in_key + 16);
- E_KEY[5] = u32_in(in_key + 20);
- E_KEY[6] = u32_in(in_key + 24);
- t = E_KEY[7] = u32_in(in_key + 28);
+ E_KEY[4] = le32_to_cpu(key[4]);
+ E_KEY[5] = le32_to_cpu(key[5]);
+ E_KEY[6] = le32_to_cpu(key[6]);
+ t = E_KEY[7] = le32_to_cpu(key[7]);
for (i = 0; i < 7; ++i)
loop8(i);
break;
@@ -290,6 +289,8 @@ extern void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in);
static struct crypto_alg aes_alg = {
.cra_name = "aes",
+ .cra_driver_name = "aes-x86_64",
+ .cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 5d56542fb68..054dcd8a5e9 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.14-git7
-# Sat Nov 5 15:55:50 2005
+# Linux kernel version: 2.6.15-git7
+# Wed Jan 11 11:57:36 2006
#
CONFIG_X86_64=y
CONFIG_64BIT=y
@@ -15,6 +15,7 @@ CONFIG_EARLY_PRINTK=y
CONFIG_GENERIC_ISA_DMA=y
CONFIG_GENERIC_IOMAP=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_DMI=y
#
# Code maturity level options
@@ -35,18 +36,21 @@ CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
-CONFIG_HOTPLUG=y
-CONFIG_KOBJECT_UEVENT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_UID16=y
+CONFIG_VM86=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
+CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
@@ -55,8 +59,10 @@ CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
#
# Loadable module support
@@ -71,8 +77,28 @@ CONFIG_OBSOLETE_MODPARM=y
CONFIG_STOP_MACHINE=y
#
+# Block layer
+#
+CONFIG_LBD=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+
+#
# Processor type and features
#
+CONFIG_X86_PC=y
+# CONFIG_X86_VSMP is not set
# CONFIG_MK8 is not set
# CONFIG_MPSC is not set
CONFIG_GENERIC_CPU=y
@@ -89,14 +115,14 @@ CONFIG_X86_LOCAL_APIC=y
CONFIG_MTRR=y
CONFIG_SMP=y
CONFIG_SCHED_SMT=y
-CONFIG_PREEMPT_NONE=y
-# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
CONFIG_PREEMPT_BKL=y
CONFIG_NUMA=y
CONFIG_K8_NUMA=y
CONFIG_X86_64_ACPI_NUMA=y
-# CONFIG_NUMA_EMU is not set
+CONFIG_NUMA_EMU=y
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -109,6 +135,7 @@ CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_NEED_MULTIPLE_NODES=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_MIGRATION=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_NR_CPUS=32
CONFIG_HOTPLUG_CPU=y
@@ -120,8 +147,9 @@ CONFIG_SWIOTLB=y
CONFIG_X86_MCE=y
CONFIG_X86_MCE_INTEL=y
CONFIG_X86_MCE_AMD=y
-CONFIG_PHYSICAL_START=0x100000
# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+CONFIG_PHYSICAL_START=0x100000
CONFIG_SECCOMP=y
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
@@ -136,6 +164,7 @@ CONFIG_GENERIC_PENDING_IRQ=y
# Power management options
#
CONFIG_PM=y
+# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
CONFIG_SOFTWARE_SUSPEND=y
CONFIG_PM_STD_PARTITION=""
@@ -152,7 +181,7 @@ CONFIG_ACPI_AC=y
CONFIG_ACPI_BATTERY=y
CONFIG_ACPI_BUTTON=y
# CONFIG_ACPI_VIDEO is not set
-CONFIG_ACPI_HOTKEY=m
+# CONFIG_ACPI_HOTKEY is not set
CONFIG_ACPI_FAN=y
CONFIG_ACPI_PROCESSOR=y
CONFIG_ACPI_HOTPLUG_CPU=y
@@ -205,7 +234,7 @@ CONFIG_PCI=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
CONFIG_UNORDERED_IO=y
-# CONFIG_PCIEPORTBUS is not set
+CONFIG_PCIEPORTBUS=y
CONFIG_PCI_MSI=y
# CONFIG_PCI_LEGACY_PROC is not set
# CONFIG_PCI_DEBUG is not set
@@ -229,7 +258,6 @@ CONFIG_IA32_EMULATION=y
CONFIG_IA32_AOUT=y
CONFIG_COMPAT=y
CONFIG_SYSVIPC_COMPAT=y
-CONFIG_UID16=y
#
# Networking
@@ -294,8 +322,11 @@ CONFIG_IPV6=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
@@ -315,7 +346,7 @@ CONFIG_IPV6=y
#
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
+CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
#
@@ -356,21 +387,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
-CONFIG_LBD=y
# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
-CONFIG_DEFAULT_DEADLINE=y
-# CONFIG_DEFAULT_CFQ is not set
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
# CONFIG_ATA_OVER_ETH is not set
#
@@ -410,7 +427,7 @@ CONFIG_IDEDMA_PCI_AUTO=y
# CONFIG_BLK_DEV_AEC62XX is not set
# CONFIG_BLK_DEV_ALI15X3 is not set
CONFIG_BLK_DEV_AMD74XX=y
-# CONFIG_BLK_DEV_ATIIXP is not set
+CONFIG_BLK_DEV_ATIIXP=y
# CONFIG_BLK_DEV_CMD64X is not set
# CONFIG_BLK_DEV_TRIFLEX is not set
# CONFIG_BLK_DEV_CY82C693 is not set
@@ -458,20 +475,21 @@ CONFIG_BLK_DEV_SD=y
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
+CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
#
# SCSI Transport Attributes
#
CONFIG_SCSI_SPI_ATTRS=y
-# CONFIG_SCSI_FC_ATTRS is not set
+CONFIG_SCSI_FC_ATTRS=y
# CONFIG_SCSI_ISCSI_ATTRS is not set
# CONFIG_SCSI_SAS_ATTRS is not set
#
# SCSI low-level drivers
#
+# CONFIG_ISCSI_TCP is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@@ -485,11 +503,13 @@ CONFIG_AIC79XX_RESET_DELAY_MS=4000
# CONFIG_AIC79XX_DEBUG_ENABLE is not set
CONFIG_AIC79XX_DEBUG_MASK=0
# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
-# CONFIG_MEGARAID_NEWGEN is not set
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=y
+CONFIG_MEGARAID_MAILBOX=y
# CONFIG_MEGARAID_LEGACY is not set
-# CONFIG_MEGARAID_SAS is not set
+CONFIG_MEGARAID_SAS=y
CONFIG_SCSI_SATA=y
-# CONFIG_SCSI_SATA_AHCI is not set
+CONFIG_SCSI_SATA_AHCI=y
# CONFIG_SCSI_SATA_SVW is not set
CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_MV is not set
@@ -498,7 +518,7 @@ CONFIG_SCSI_SATA_NV=y
# CONFIG_SCSI_SATA_QSTOR is not set
# CONFIG_SCSI_SATA_PROMISE is not set
# CONFIG_SCSI_SATA_SX4 is not set
-# CONFIG_SCSI_SATA_SIL is not set
+CONFIG_SCSI_SATA_SIL=y
# CONFIG_SCSI_SATA_SIL24 is not set
# CONFIG_SCSI_SATA_SIS is not set
# CONFIG_SCSI_SATA_ULI is not set
@@ -518,12 +538,7 @@ CONFIG_SCSI_SATA_INTEL_COMBINED=y
# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA21XX is not set
-# CONFIG_SCSI_QLA22XX is not set
-# CONFIG_SCSI_QLA2300 is not set
-# CONFIG_SCSI_QLA2322 is not set
-# CONFIG_SCSI_QLA6312 is not set
-# CONFIG_SCSI_QLA24XX is not set
+# CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE is not set
# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
@@ -633,6 +648,7 @@ CONFIG_E1000=y
# CONFIG_R8169 is not set
# CONFIG_SIS190 is not set
# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
# CONFIG_SK98LIN is not set
# CONFIG_VIA_VELOCITY is not set
CONFIG_TIGON3=y
@@ -645,7 +661,6 @@ CONFIG_TIGON3=y
# CONFIG_IXGB is not set
CONFIG_S2IO=m
# CONFIG_S2IO_NAPI is not set
-# CONFIG_2BUFF_MODE is not set
#
# Token Ring devices
@@ -744,6 +759,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_ACPI is not set
CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
# CONFIG_SERIAL_8250_EXTENDED is not set
#
@@ -751,7 +767,6 @@ CONFIG_SERIAL_8250_NR_UARTS=4
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
-# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
@@ -817,10 +832,10 @@ CONFIG_AGP_INTEL=y
# CONFIG_DRM is not set
# CONFIG_MWAVE is not set
CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=256
CONFIG_HPET=y
# CONFIG_HPET_RTC_IRQ is not set
CONFIG_HPET_MMAP=y
-CONFIG_MAX_RAW_DEVS=256
# CONFIG_HANGCHECK_TIMER is not set
#
@@ -892,6 +907,7 @@ CONFIG_SOUND=y
# Open Sound System
#
CONFIG_SOUND_PRIME=y
+CONFIG_OBSOLETE_OSS_DRIVER=y
# CONFIG_SOUND_BT878 is not set
# CONFIG_SOUND_CMPCI is not set
# CONFIG_SOUND_EMU10K1 is not set
@@ -968,7 +984,8 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_SDDR09 is not set
# CONFIG_USB_STORAGE_SDDR55 is not set
# CONFIG_USB_STORAGE_JUMPSHOT is not set
-# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_LIBUSUAL is not set
#
# USB Input Devices
@@ -988,6 +1005,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_YEALINK is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
# CONFIG_USB_KEYSPAN_REMOTE is not set
# CONFIG_USB_APPLETOUCH is not set
@@ -1097,6 +1115,7 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y
@@ -1135,6 +1154,7 @@ CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y
CONFIG_RELAYFS_FS=y
+# CONFIG_CONFIGFS_FS is not set
#
# Miscellaneous filesystems
@@ -1232,21 +1252,23 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
#
-# Profiling support
+# Instrumentation Support
#
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
+CONFIG_KPROBES=y
#
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
-CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
@@ -1256,8 +1278,8 @@ CONFIG_DEBUG_FS=y
# CONFIG_FRAME_POINTER is not set
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_INIT_DEBUG=y
+# CONFIG_DEBUG_RODATA is not set
# CONFIG_IOMMU_DEBUG is not set
-CONFIG_KPROBES=y
#
# Security options
diff --git a/arch/x86_64/ia32/.gitignore b/arch/x86_64/ia32/.gitignore
deleted file mode 100644
index 48ab174fe5f..00000000000
--- a/arch/x86_64/ia32/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vsyscall*.so
diff --git a/arch/x86_64/ia32/Makefile b/arch/x86_64/ia32/Makefile
index f76217d8f57..051608d5592 100644
--- a/arch/x86_64/ia32/Makefile
+++ b/arch/x86_64/ia32/Makefile
@@ -2,8 +2,7 @@
# Makefile for the ia32 kernel emulation subsystem.
#
-obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_ioctl.o \
- ia32_signal.o tls32.o \
+obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o tls32.o \
ia32_binfmt.o fpu32.o ptrace32.o syscall32.o syscall32_syscall.o
sysv-$(CONFIG_SYSVIPC) := ipc32.o
@@ -29,4 +28,3 @@ $(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
AFLAGS_vsyscall-sysenter.o = -m32
AFLAGS_vsyscall-syscall.o = -m32
-CFLAGS_ia32_ioctl.o += -Ifs/
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 2b760d0d9ce..029bddab045 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -197,8 +197,7 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
{
- struct pt_regs *pp = (struct pt_regs *)(t->thread.rsp0);
- --pp;
+ struct pt_regs *pp = task_pt_regs(t);
ELF_CORE_COPY_REGS((*elfregs), pp);
/* fix wrong segments */
(*elfregs)[7] = t->thread.ds;
@@ -217,7 +216,7 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
if (!tsk_used_math(tsk))
return 0;
if (!regs)
- regs = ((struct pt_regs *)tsk->thread.rsp0) - 1;
+ regs = task_pt_regs(tsk);
if (tsk == current)
unlazy_fpu(tsk);
set_fs(KERNEL_DS);
@@ -233,7 +232,7 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
static inline int
elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
{
- struct pt_regs *regs = ((struct pt_regs *)(t->thread.rsp0))-1;
+ struct pt_regs *regs = task_pt_regs(t);
if (!tsk_used_math(t))
return 0;
if (t == current)
diff --git a/arch/x86_64/ia32/ia32_ioctl.c b/arch/x86_64/ia32/ia32_ioctl.c
deleted file mode 100644
index e335bd0b637..00000000000
--- a/arch/x86_64/ia32/ia32_ioctl.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/* $Id: ia32_ioctl.c,v 1.25 2002/10/11 07:17:06 ak Exp $
- * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
- *
- * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
- *
- * These routines maintain argument size conversion between 32bit and 64bit
- * ioctls.
- */
-
-#define INCLUDES
-#include <linux/syscalls.h>
-#include "compat_ioctl.c"
-#include <asm/ia32.h>
-
-#define CODE
-#include "compat_ioctl.c"
-
-#define RTC_IRQP_READ32 _IOR('p', 0x0b, unsigned int) /* Read IRQ rate */
-#define RTC_IRQP_SET32 _IOW('p', 0x0c, unsigned int) /* Set IRQ rate */
-#define RTC_EPOCH_READ32 _IOR('p', 0x0d, unsigned) /* Read epoch */
-#define RTC_EPOCH_SET32 _IOW('p', 0x0e, unsigned) /* Set epoch */
-
-static int rtc32_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
-{
- unsigned long val;
- mm_segment_t oldfs = get_fs();
- int ret;
-
- switch (cmd) {
- case RTC_IRQP_READ32:
- set_fs(KERNEL_DS);
- ret = sys_ioctl(fd, RTC_IRQP_READ, (unsigned long)&val);
- set_fs(oldfs);
- if (!ret)
- ret = put_user(val, (unsigned int __user *) arg);
- return ret;
-
- case RTC_IRQP_SET32:
- cmd = RTC_IRQP_SET;
- break;
-
- case RTC_EPOCH_READ32:
- set_fs(KERNEL_DS);
- ret = sys_ioctl(fd, RTC_EPOCH_READ, (unsigned long) &val);
- set_fs(oldfs);
- if (!ret)
- ret = put_user(val, (unsigned int __user *) arg);
- return ret;
-
- case RTC_EPOCH_SET32:
- cmd = RTC_EPOCH_SET;
- break;
- }
- return sys_ioctl(fd,cmd,arg);
-}
-
-
-#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler) },
-#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd,sys_ioctl)
-
-struct ioctl_trans ioctl_start[] = {
-#include <linux/compat_ioctl.h>
-#define DECLARES
-#include "compat_ioctl.c"
-
-/* And these ioctls need translation */
-/* realtime device */
-HANDLE_IOCTL(RTC_IRQP_READ, rtc32_ioctl)
-HANDLE_IOCTL(RTC_IRQP_READ32,rtc32_ioctl)
-HANDLE_IOCTL(RTC_IRQP_SET32, rtc32_ioctl)
-HANDLE_IOCTL(RTC_EPOCH_READ32, rtc32_ioctl)
-HANDLE_IOCTL(RTC_EPOCH_SET32, rtc32_ioctl)
-/* take care of sizeof(sizeof()) breakage */
-};
-
-int ioctl_table_size = ARRAY_SIZE(ioctl_start);
-
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c
index 0903cc1faef..e0a92439f63 100644
--- a/arch/x86_64/ia32/ia32_signal.c
+++ b/arch/x86_64/ia32/ia32_signal.c
@@ -353,7 +353,6 @@ ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __
struct pt_regs *regs, unsigned int mask)
{
int tmp, err = 0;
- u32 eflags;
tmp = 0;
__asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
@@ -378,10 +377,7 @@ ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __
err |= __put_user(current->thread.trap_no, &sc->trapno);
err |= __put_user(current->thread.error_code, &sc->err);
err |= __put_user((u32)regs->rip, &sc->eip);
- eflags = regs->eflags;
- if (current->ptrace & PT_PTRACED)
- eflags &= ~TF_MASK;
- err |= __put_user((u32)eflags, &sc->eflags);
+ err |= __put_user((u32)regs->eflags, &sc->eflags);
err |= __put_user((u32)regs->rsp, &sc->esp_at_signal);
tmp = save_i387_ia32(current, fpstate, regs, 0);
@@ -505,13 +501,9 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
regs->ss = __USER32_DS;
set_fs(USER_DS);
- if (regs->eflags & TF_MASK) {
- if (current->ptrace & PT_PTRACED) {
- ptrace_notify(SIGTRAP);
- } else {
- regs->eflags &= ~TF_MASK;
- }
- }
+ regs->eflags &= ~TF_MASK;
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
#if DEBUG_SIG
printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
@@ -605,13 +597,9 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->ss = __USER32_DS;
set_fs(USER_DS);
- if (regs->eflags & TF_MASK) {
- if (current->ptrace & PT_PTRACED) {
- ptrace_notify(SIGTRAP);
- } else {
- regs->eflags &= ~TF_MASK;
- }
- }
+ regs->eflags &= ~TF_MASK;
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
#if DEBUG_SIG
printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index e0eb0c712fe..58f5bfb52c6 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -35,6 +35,18 @@
movq %rax,R8(%rsp)
.endm
+ .macro CFI_STARTPROC32 simple
+ CFI_STARTPROC \simple
+ CFI_UNDEFINED r8
+ CFI_UNDEFINED r9
+ CFI_UNDEFINED r10
+ CFI_UNDEFINED r11
+ CFI_UNDEFINED r12
+ CFI_UNDEFINED r13
+ CFI_UNDEFINED r14
+ CFI_UNDEFINED r15
+ .endm
+
/*
* 32bit SYSENTER instruction entry.
*
@@ -55,7 +67,7 @@
* with the int 0x80 path.
*/
ENTRY(ia32_sysenter_target)
- CFI_STARTPROC simple
+ CFI_STARTPROC32 simple
CFI_DEF_CFA rsp,0
CFI_REGISTER rsp,rbp
swapgs
@@ -92,6 +104,7 @@ ENTRY(ia32_sysenter_target)
.quad 1b,ia32_badarg
.previous
GET_THREAD_INFO(%r10)
+ orl $TS_COMPAT,threadinfo_status(%r10)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
@@ -105,6 +118,7 @@ sysenter_do_call:
cli
testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
jnz int_ret_from_sys_call
+ andl $~TS_COMPAT,threadinfo_status(%r10)
/* clear IF, that popfq doesn't enable interrupts early */
andl $~0x200,EFLAGS-R11(%rsp)
RESTORE_ARGS 1,24,1,1,1,1
@@ -161,7 +175,7 @@ sysenter_tracesys:
* with the int 0x80 path.
*/
ENTRY(ia32_cstar_target)
- CFI_STARTPROC simple
+ CFI_STARTPROC32 simple
CFI_DEF_CFA rsp,0
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
@@ -191,6 +205,7 @@ ENTRY(ia32_cstar_target)
.quad 1b,ia32_badarg
.previous
GET_THREAD_INFO(%r10)
+ orl $TS_COMPAT,threadinfo_status(%r10)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
CFI_REMEMBER_STATE
jnz cstar_tracesys
@@ -204,6 +219,7 @@ cstar_do_call:
cli
testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
jnz int_ret_from_sys_call
+ andl $~TS_COMPAT,threadinfo_status(%r10)
RESTORE_ARGS 1,-ARG_SKIP,1,1,1
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
@@ -276,6 +292,7 @@ ENTRY(ia32_syscall)
this could be a problem. */
SAVE_ARGS 0,0,1
GET_THREAD_INFO(%r10)
+ orl $TS_COMPAT,threadinfo_status(%r10)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
jnz ia32_tracesys
ia32_do_syscall:
@@ -318,7 +335,7 @@ quiet_ni_syscall:
jmp ia32_ptregs_common
.endm
- CFI_STARTPROC
+ CFI_STARTPROC32
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
@@ -333,15 +350,26 @@ quiet_ni_syscall:
ENTRY(ia32_ptregs_common)
popq %r11
- CFI_ADJUST_CFA_OFFSET -8
- CFI_REGISTER rip, r11
+ CFI_ENDPROC
+ CFI_STARTPROC32 simple
+ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
+ CFI_REL_OFFSET rax,RAX-ARGOFFSET
+ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
+ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
+ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
+ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
+/* CFI_REL_OFFSET cs,CS-ARGOFFSET*/
+/* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
+ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
+/* CFI_REL_OFFSET ss,SS-ARGOFFSET*/
SAVE_REST
call *%rax
RESTORE_REST
jmp ia32_sysret /* misbalances the return cache */
CFI_ENDPROC
- .data
+ .section .rodata,"a"
.align 8
.globl ia32_sys_call_table
ia32_sys_call_table:
@@ -608,7 +636,7 @@ ia32_sys_call_table:
.quad sys_epoll_wait
.quad sys_remap_file_pages
.quad sys_set_tid_address
- .quad sys32_timer_create
+ .quad compat_sys_timer_create
.quad compat_sys_timer_settime /* 260 */
.quad compat_sys_timer_gettime
.quad sys_timer_getoverrun
@@ -643,6 +671,7 @@ ia32_sys_call_table:
.quad sys_inotify_init
.quad sys_inotify_add_watch
.quad sys_inotify_rm_watch
+ .quad sys_migrate_pages
ia32_syscall_end:
.rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
.quad ni_syscall
diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c
index 2a925e2af39..23a4515a73b 100644
--- a/arch/x86_64/ia32/ptrace32.c
+++ b/arch/x86_64/ia32/ptrace32.c
@@ -28,9 +28,12 @@
#include <asm/i387.h>
#include <asm/fpu32.h>
-/* determines which flags the user has access to. */
-/* 1 = access 0 = no access */
-#define FLAG_MASK 0x44dd5UL
+/*
+ * Determines which flags the user has access to [1 = access, 0 = no access].
+ * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
+ * Also masks reserved bits (31-22, 15, 5, 3, 1).
+ */
+#define FLAG_MASK 0x54dd5UL
#define R32(l,q) \
case offsetof(struct user32, regs.l): stack[offsetof(struct pt_regs, q)/8] = val; break
@@ -38,7 +41,7 @@
static int putreg32(struct task_struct *child, unsigned regno, u32 val)
{
int i;
- __u64 *stack = (__u64 *)(child->thread.rsp0 - sizeof(struct pt_regs));
+ __u64 *stack = (__u64 *)task_pt_regs(child);
switch (regno) {
case offsetof(struct user32, regs.fs):
@@ -134,7 +137,7 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 val)
static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
{
- __u64 *stack = (__u64 *)(child->thread.rsp0 - sizeof(struct pt_regs));
+ __u64 *stack = (__u64 *)task_pt_regs(child);
switch (regno) {
case offsetof(struct user32, regs.fs):
@@ -196,36 +199,6 @@ static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
#undef R32
-static struct task_struct *find_target(int request, int pid, int *err)
-{
- struct task_struct *child;
-
- *err = -EPERM;
- if (pid == 1)
- return NULL;
-
- *err = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (child) {
- *err = -EPERM;
- if (child->pid == 1)
- goto out;
- *err = ptrace_check_attach(child, request == PTRACE_KILL);
- if (*err < 0)
- goto out;
- return child;
- }
- out:
- if (child)
- put_task_struct(child);
- return NULL;
-
-}
-
asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
{
struct task_struct *child;
@@ -254,11 +227,18 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
break;
}
- child = find_target(request, pid, &ret);
- if (!child)
- return ret;
+ if (request == PTRACE_TRACEME)
+ return ptrace_traceme();
+
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
- childregs = (struct pt_regs *)(child->thread.rsp0 - sizeof(struct pt_regs));
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret < 0)
+ goto out;
+
+ childregs = task_pt_regs(child);
switch (request) {
case PTRACE_PEEKDATA:
@@ -373,6 +353,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
break;
}
+ out:
put_task_struct(child);
return ret;
}
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index 5389df610e7..54481af5344 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -969,25 +969,6 @@ long sys32_kill(int pid, int sig)
return sys_kill(pid, sig);
}
-extern asmlinkage long
-sys_timer_create(clockid_t which_clock,
- struct sigevent __user *timer_event_spec,
- timer_t __user * created_timer_id);
-
-long
-sys32_timer_create(u32 clock, struct compat_sigevent __user *se32, timer_t __user *timer_id)
-{
- struct sigevent __user *p = NULL;
- if (se32) {
- struct sigevent se;
- p = compat_alloc_user_space(sizeof(struct sigevent));
- if (get_compat_sigevent(&se, se32) ||
- copy_to_user(p, &se, sizeof(se)))
- return -EFAULT;
- }
- return sys_timer_create(clock, p, timer_id);
-}
-
long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
__u32 len_low, __u32 len_high, int advice)
{
diff --git a/arch/x86_64/ia32/vsyscall-sigreturn.S b/arch/x86_64/ia32/vsyscall-sigreturn.S
index 8b5a4b060bb..d90321fe9bb 100644
--- a/arch/x86_64/ia32/vsyscall-sigreturn.S
+++ b/arch/x86_64/ia32/vsyscall-sigreturn.S
@@ -7,6 +7,7 @@
* by doing ".balign 32" must match in both versions of the page.
*/
+ .code32
.section .text.sigreturn,"ax"
.balign 32
.globl __kernel_sigreturn
diff --git a/arch/x86_64/ia32/vsyscall-syscall.S b/arch/x86_64/ia32/vsyscall-syscall.S
index b024965bb68..cf9ef678de3 100644
--- a/arch/x86_64/ia32/vsyscall-syscall.S
+++ b/arch/x86_64/ia32/vsyscall-syscall.S
@@ -6,6 +6,7 @@
#include <asm/asm-offsets.h>
#include <asm/segment.h>
+ .code32
.text
.section .text.vsyscall,"ax"
.globl __kernel_vsyscall
diff --git a/arch/x86_64/ia32/vsyscall-sysenter.S b/arch/x86_64/ia32/vsyscall-sysenter.S
index 71f3de586b5..ae056e553d1 100644
--- a/arch/x86_64/ia32/vsyscall-sysenter.S
+++ b/arch/x86_64/ia32/vsyscall-sysenter.S
@@ -5,6 +5,7 @@
#include <asm/ia32_unistd.h>
#include <asm/asm-offsets.h>
+ .code32
.text
.section .text.vsyscall,"ax"
.globl __kernel_vsyscall
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index fe4cbd1c4b2..72fe60c20d3 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -7,7 +7,8 @@ EXTRA_AFLAGS := -traditional
obj-y := process.o signal.o entry.o traps.o irq.o \
ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
x8664_ksyms.o i387.o syscall.o vsyscall.o \
- setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o
+ setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
+ dmi_scan.o pci-dma.o pci-nommu.o
obj-$(CONFIG_X86_MCE) += mce.o
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
@@ -22,14 +23,16 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o mpparse.o \
genapic.o genapic_cluster.o genapic_flat.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_PM) += suspend.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o
-obj-$(CONFIG_DUMMY_IOMMU) += pci-nommu.o pci-dma.o
+obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o
+obj-$(CONFIG_X86_VSMP) += vsmp.o
obj-$(CONFIG_MODULES) += module.o
@@ -46,3 +49,5 @@ intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
quirks-y += ../../i386/kernel/quirks.o
i8237-y += ../../i386/kernel/i8237.o
msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
+dmi_scan-y += ../../i386/kernel/dmi_scan.o
+
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index c7f4fdd20f0..e4e2b7d01f8 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -23,6 +23,7 @@
#include <asm/io.h>
#include <asm/proto.h>
#include <asm/pci-direct.h>
+#include <asm/dma.h>
int iommu_aperture;
int iommu_aperture_disabled __initdata = 0;
@@ -247,7 +248,7 @@ void __init iommu_hole_init(void)
/* Got the aperture from the AGP bridge */
} else if (swiotlb && !valid_agp) {
/* Do nothing */
- } else if ((!no_iommu && end_pfn >= 0xffffffff>>PAGE_SHIFT) ||
+ } else if ((!no_iommu && end_pfn >= MAX_DMA32_PFN) ||
force_iommu ||
valid_agp ||
fallback_aper_force) {
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 18691ce4c75..8fdd089fd17 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -25,6 +25,7 @@
#include <linux/mc146818rtc.h>
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
+#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/smp.h>
@@ -33,36 +34,36 @@
#include <asm/pgalloc.h>
#include <asm/mach_apic.h>
#include <asm/nmi.h>
+#include <asm/idle.h>
int apic_verbosity;
int disable_apic_timer __initdata;
+/*
+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
+ * IPIs in place of local APIC timers
+ */
+static cpumask_t timer_interrupt_broadcast_ipi_mask;
+
/* Using APIC to generate smp_local_timer_interrupt? */
int using_apic_timer = 0;
-static DEFINE_PER_CPU(int, prof_multiplier) = 1;
-static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
-static DEFINE_PER_CPU(int, prof_counter) = 1;
-
static void apic_pm_activate(void);
void enable_NMI_through_LVT0 (void * dummy)
{
- unsigned int v, ver;
+ unsigned int v;
- ver = apic_read(APIC_LVR);
- ver = GET_APIC_VERSION(ver);
v = APIC_DM_NMI; /* unmask and set to NMI */
- apic_write_around(APIC_LVT0, v);
+ apic_write(APIC_LVT0, v);
}
int get_maxlvt(void)
{
- unsigned int v, ver, maxlvt;
+ unsigned int v, maxlvt;
v = apic_read(APIC_LVR);
- ver = GET_APIC_VERSION(v);
maxlvt = GET_APIC_MAXLVT(v);
return maxlvt;
}
@@ -80,33 +81,33 @@ void clear_local_APIC(void)
*/
if (maxlvt >= 3) {
v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
- apic_write_around(APIC_LVTERR, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
}
/*
* Careful: we have to set masks only first to deassert
* any level-triggered sources.
*/
v = apic_read(APIC_LVTT);
- apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT0);
- apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT1);
- apic_write_around(APIC_LVT1, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
if (maxlvt >= 4) {
v = apic_read(APIC_LVTPC);
- apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
}
/*
* Clean APIC state for other OSs:
*/
- apic_write_around(APIC_LVTT, APIC_LVT_MASKED);
- apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
- apic_write_around(APIC_LVT1, APIC_LVT_MASKED);
+ apic_write(APIC_LVTT, APIC_LVT_MASKED);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED);
+ apic_write(APIC_LVT1, APIC_LVT_MASKED);
if (maxlvt >= 3)
- apic_write_around(APIC_LVTERR, APIC_LVT_MASKED);
+ apic_write(APIC_LVTERR, APIC_LVT_MASKED);
if (maxlvt >= 4)
- apic_write_around(APIC_LVTPC, APIC_LVT_MASKED);
+ apic_write(APIC_LVTPC, APIC_LVT_MASKED);
v = GET_APIC_VERSION(apic_read(APIC_LVR));
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
@@ -151,7 +152,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
value &= ~APIC_VECTOR_MASK;
value |= APIC_SPIV_APIC_ENABLED;
value |= 0xf;
- apic_write_around(APIC_SPIV, value);
+ apic_write(APIC_SPIV, value);
if (!virt_wire_setup) {
/* For LVT0 make it edge triggered, active high, external and enabled */
@@ -161,11 +162,11 @@ void disconnect_bsp_APIC(int virt_wire_setup)
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
- apic_write_around(APIC_LVT0, value);
+ apic_write(APIC_LVT0, value);
}
else {
/* Disable LVT0 */
- apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED);
}
/* For LVT1 make it edge triggered, active high, nmi and enabled */
@@ -176,7 +177,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
- apic_write_around(APIC_LVT1, value);
+ apic_write(APIC_LVT1, value);
}
}
@@ -192,7 +193,7 @@ void disable_local_APIC(void)
*/
value = apic_read(APIC_SPIV);
value &= ~APIC_SPIV_APIC_ENABLED;
- apic_write_around(APIC_SPIV, value);
+ apic_write(APIC_SPIV, value);
}
/*
@@ -269,7 +270,7 @@ void __init sync_Arb_IDs(void)
apic_wait_icr_idle();
apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
- apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
+ apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
| APIC_DM_INIT);
}
@@ -280,7 +281,7 @@ extern void __error_in_apic_c (void);
*/
void __init init_bsp_APIC(void)
{
- unsigned int value, ver;
+ unsigned int value;
/*
* Don't do the setup now if we have a SMP BIOS as the
@@ -290,7 +291,6 @@ void __init init_bsp_APIC(void)
return;
value = apic_read(APIC_LVR);
- ver = GET_APIC_VERSION(value);
/*
* Do not trust the local APIC being empty at bootup.
@@ -305,22 +305,21 @@ void __init init_bsp_APIC(void)
value |= APIC_SPIV_APIC_ENABLED;
value |= APIC_SPIV_FOCUS_DISABLED;
value |= SPURIOUS_APIC_VECTOR;
- apic_write_around(APIC_SPIV, value);
+ apic_write(APIC_SPIV, value);
/*
* Set up the virtual wire mode.
*/
- apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
+ apic_write(APIC_LVT0, APIC_DM_EXTINT);
value = APIC_DM_NMI;
- apic_write_around(APIC_LVT1, value);
+ apic_write(APIC_LVT1, value);
}
void __cpuinit setup_local_APIC (void)
{
- unsigned int value, ver, maxlvt;
+ unsigned int value, maxlvt;
value = apic_read(APIC_LVR);
- ver = GET_APIC_VERSION(value);
if ((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f)
__error_in_apic_c();
@@ -345,7 +344,7 @@ void __cpuinit setup_local_APIC (void)
*/
value = apic_read(APIC_TASKPRI);
value &= ~APIC_TPRI_MASK;
- apic_write_around(APIC_TASKPRI, value);
+ apic_write(APIC_TASKPRI, value);
/*
* Now that we are all set up, enable the APIC
@@ -387,7 +386,7 @@ void __cpuinit setup_local_APIC (void)
* Set spurious IRQ vector
*/
value |= SPURIOUS_APIC_VECTOR;
- apic_write_around(APIC_SPIV, value);
+ apic_write(APIC_SPIV, value);
/*
* Set up LVT0, LVT1:
@@ -407,7 +406,7 @@ void __cpuinit setup_local_APIC (void)
value = APIC_DM_EXTINT | APIC_LVT_MASKED;
apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", smp_processor_id());
}
- apic_write_around(APIC_LVT0, value);
+ apic_write(APIC_LVT0, value);
/*
* only the BP should see the LINT1 NMI signal, obviously.
@@ -416,14 +415,14 @@ void __cpuinit setup_local_APIC (void)
value = APIC_DM_NMI;
else
value = APIC_DM_NMI | APIC_LVT_MASKED;
- apic_write_around(APIC_LVT1, value);
+ apic_write(APIC_LVT1, value);
{
unsigned oldvalue;
maxlvt = get_maxlvt();
oldvalue = apic_read(APIC_ESR);
value = ERROR_APIC_VECTOR; // enables sending errors
- apic_write_around(APIC_LVTERR, value);
+ apic_write(APIC_LVTERR, value);
/*
* spec says clear errors after enabling vector.
*/
@@ -660,20 +659,25 @@ void __init init_apic_mappings(void)
static void __setup_APIC_LVTT(unsigned int clocks)
{
unsigned int lvtt_value, tmp_value, ver;
+ int cpu = smp_processor_id();
ver = GET_APIC_VERSION(apic_read(APIC_LVR));
lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
- apic_write_around(APIC_LVTT, lvtt_value);
+
+ if (cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask))
+ lvtt_value |= APIC_LVT_MASKED;
+
+ apic_write(APIC_LVTT, lvtt_value);
/*
* Divide PICLK by 16
*/
tmp_value = apic_read(APIC_TDCR);
- apic_write_around(APIC_TDCR, (tmp_value
+ apic_write(APIC_TDCR, (tmp_value
& ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
| APIC_TDR_DIV_16);
- apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
+ apic_write(APIC_TMICT, clocks/APIC_DIVISOR);
}
static void setup_APIC_timer(unsigned int clocks)
@@ -682,12 +686,6 @@ static void setup_APIC_timer(unsigned int clocks)
local_irq_save(flags);
- /* For some reasons this doesn't work on Simics, so fake it for now */
- if (!strstr(boot_cpu_data.x86_model_id, "Screwdriver")) {
- __setup_APIC_LVTT(clocks);
- return;
- }
-
/* wait for irq slice */
if (vxtime.hpet_address) {
int trigger = hpet_readl(HPET_T0_CMP);
@@ -700,7 +698,7 @@ static void setup_APIC_timer(unsigned int clocks)
outb_p(0x00, 0x43);
c2 = inb_p(0x40);
c2 |= inb_p(0x40) << 8;
- do {
+ do {
c1 = c2;
outb_p(0x00, 0x43);
c2 = inb_p(0x40);
@@ -785,52 +783,68 @@ void __cpuinit setup_secondary_APIC_clock(void)
local_irq_enable();
}
-void __cpuinit disable_APIC_timer(void)
+void disable_APIC_timer(void)
{
if (using_apic_timer) {
unsigned long v;
v = apic_read(APIC_LVTT);
- apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
}
}
void enable_APIC_timer(void)
{
- if (using_apic_timer) {
+ int cpu = smp_processor_id();
+
+ if (using_apic_timer &&
+ !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
unsigned long v;
v = apic_read(APIC_LVTT);
- apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED);
+ apic_write(APIC_LVTT, v & ~APIC_LVT_MASKED);
}
}
-/*
- * the frequency of the profiling timer can be changed
- * by writing a multiplier value into /proc/profile.
- */
-int setup_profiling_timer(unsigned int multiplier)
+void switch_APIC_timer_to_ipi(void *cpumask)
{
- int i;
+ cpumask_t mask = *(cpumask_t *)cpumask;
+ int cpu = smp_processor_id();
- /*
- * Sanity check. [at least 500 APIC cycles should be
- * between APIC interrupts as a rule of thumb, to avoid
- * irqs flooding us]
- */
- if ( (!multiplier) || (calibration_result/multiplier < 500))
- return -EINVAL;
-
- /*
- * Set the new multiplier for each CPU. CPUs don't start using the
- * new values until the next timer interrupt in which they do process
- * accounting. At that time they also adjust their APIC timers
- * accordingly.
- */
- for (i = 0; i < NR_CPUS; ++i)
- per_cpu(prof_multiplier, i) = multiplier;
+ if (cpu_isset(cpu, mask) &&
+ !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
+ disable_APIC_timer();
+ cpu_set(cpu, timer_interrupt_broadcast_ipi_mask);
+ }
+}
+EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
- return 0;
+void smp_send_timer_broadcast_ipi(void)
+{
+ cpumask_t mask;
+
+ cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
+ if (!cpus_empty(mask)) {
+ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+ }
+}
+
+void switch_ipi_to_APIC_timer(void *cpumask)
+{
+ cpumask_t mask = *(cpumask_t *)cpumask;
+ int cpu = smp_processor_id();
+
+ if (cpu_isset(cpu, mask) &&
+ cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
+ cpu_clear(cpu, timer_interrupt_broadcast_ipi_mask);
+ enable_APIC_timer();
+ }
+}
+EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
}
#ifdef CONFIG_X86_MCE_AMD
@@ -857,32 +871,10 @@ void setup_threshold_lvt(unsigned long lvt_off)
void smp_local_timer_interrupt(struct pt_regs *regs)
{
- int cpu = smp_processor_id();
-
profile_tick(CPU_PROFILING, regs);
- if (--per_cpu(prof_counter, cpu) <= 0) {
- /*
- * The multiplier may have changed since the last time we got
- * to this point as a result of the user writing to
- * /proc/profile. In this case we need to adjust the APIC
- * timer accordingly.
- *
- * Interrupts are already masked off at this point.
- */
- per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
- if (per_cpu(prof_counter, cpu) !=
- per_cpu(prof_old_multiplier, cpu)) {
- __setup_APIC_LVTT(calibration_result/
- per_cpu(prof_counter, cpu));
- per_cpu(prof_old_multiplier, cpu) =
- per_cpu(prof_counter, cpu);
- }
-
#ifdef CONFIG_SMP
- update_process_times(user_mode(regs));
+ update_process_times(user_mode(regs));
#endif
- }
-
/*
* We take the 'long' return path, and there every subsystem
* grabs the appropriate locks (kernel lock/ irq lock).
@@ -920,6 +912,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
* Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do.
*/
+ exit_idle();
irq_enter();
smp_local_timer_interrupt(regs);
irq_exit();
@@ -979,6 +972,7 @@ __init int oem_force_hpet_timer(void)
asmlinkage void smp_spurious_interrupt(void)
{
unsigned int v;
+ exit_idle();
irq_enter();
/*
* Check if this really is a spurious interrupt and ACK it
@@ -1014,6 +1008,7 @@ asmlinkage void smp_error_interrupt(void)
{
unsigned int v, v1;
+ exit_idle();
irq_enter();
/* First tickle the hardware, only then report what went on. -- REW */
v = apic_read(APIC_ESR);
@@ -1060,7 +1055,7 @@ int __init APIC_init_uniprocessor (void)
connect_bsp_APIC();
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
- apic_write_around(APIC_ID, boot_cpu_id);
+ apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
setup_local_APIC();
diff --git a/arch/x86_64/kernel/asm-offsets.c b/arch/x86_64/kernel/asm-offsets.c
index aaa6d383351..cfb4f9cebea 100644
--- a/arch/x86_64/kernel/asm-offsets.c
+++ b/arch/x86_64/kernel/asm-offsets.c
@@ -33,6 +33,7 @@ int main(void)
ENTRY(flags);
ENTRY(addr_limit);
ENTRY(preempt_count);
+ ENTRY(status);
BLANK();
#undef ENTRY
#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
@@ -64,5 +65,9 @@ int main(void)
DEFINE(pbe_address, offsetof(struct pbe, address));
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
DEFINE(pbe_next, offsetof(struct pbe, next));
+ BLANK();
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ DEFINE(DEBUG_IST, DEBUG_STACK);
+#endif
return 0;
}
diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c
index 535e0446607..4e6c3b729e3 100644
--- a/arch/x86_64/kernel/crash.c
+++ b/arch/x86_64/kernel/crash.c
@@ -11,19 +11,156 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/smp.h>
+#include <linux/irq.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
+#include <linux/delay.h>
+#include <linux/elf.h>
+#include <linux/elfcore.h>
#include <asm/processor.h>
#include <asm/hardirq.h>
#include <asm/nmi.h>
#include <asm/hw_irq.h>
+#include <asm/mach_apic.h>
-note_buf_t crash_notes[NR_CPUS];
+/* This keeps a track of which one is crashing cpu. */
+static int crashing_cpu;
+
+static u32 *append_elf_note(u32 *buf, char *name, unsigned type,
+ void *data, size_t data_len)
+{
+ struct elf_note note;
+
+ note.n_namesz = strlen(name) + 1;
+ note.n_descsz = data_len;
+ note.n_type = type;
+ memcpy(buf, &note, sizeof(note));
+ buf += (sizeof(note) +3)/4;
+ memcpy(buf, name, note.n_namesz);
+ buf += (note.n_namesz + 3)/4;
+ memcpy(buf, data, note.n_descsz);
+ buf += (note.n_descsz + 3)/4;
+
+ return buf;
+}
+
+static void final_note(u32 *buf)
+{
+ struct elf_note note;
+
+ note.n_namesz = 0;
+ note.n_descsz = 0;
+ note.n_type = 0;
+ memcpy(buf, &note, sizeof(note));
+}
+
+static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
+{
+ struct elf_prstatus prstatus;
+ u32 *buf;
+
+ if ((cpu < 0) || (cpu >= NR_CPUS))
+ return;
+
+ /* Using ELF notes here is opportunistic.
+ * I need a well defined structure format
+ * for the data I pass, and I need tags
+ * on the data to indicate what information I have
+ * squirrelled away. ELF notes happen to provide
+ * all of that that no need to invent something new.
+ */
+
+ buf = (u32*)per_cpu_ptr(crash_notes, cpu);
+
+ if (!buf)
+ return;
+
+ memset(&prstatus, 0, sizeof(prstatus));
+ prstatus.pr_pid = current->pid;
+ elf_core_copy_regs(&prstatus.pr_reg, regs);
+ buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
+ sizeof(prstatus));
+ final_note(buf);
+}
+
+static void crash_save_self(struct pt_regs *regs)
+{
+ int cpu;
+
+ cpu = smp_processor_id();
+ crash_save_this_cpu(regs, cpu);
+}
+
+#ifdef CONFIG_SMP
+static atomic_t waiting_for_crash_ipi;
+
+static int crash_nmi_callback(struct pt_regs *regs, int cpu)
+{
+ /*
+ * Don't do anything if this handler is invoked on crashing cpu.
+ * Otherwise, system will completely hang. Crashing cpu can get
+ * an NMI if system was initially booted with nmi_watchdog parameter.
+ */
+ if (cpu == crashing_cpu)
+ return 1;
+ local_irq_disable();
+
+ crash_save_this_cpu(regs, cpu);
+ disable_local_APIC();
+ atomic_dec(&waiting_for_crash_ipi);
+ /* Assume hlt works */
+ for(;;)
+ asm("hlt");
+
+ return 1;
+}
+
+static void smp_send_nmi_allbutself(void)
+{
+ send_IPI_allbutself(APIC_DM_NMI);
+}
+
+/*
+ * This code is a best effort heuristic to get the
+ * other cpus to stop executing. So races with
+ * cpu hotplug shouldn't matter.
+ */
+
+static void nmi_shootdown_cpus(void)
+{
+ unsigned long msecs;
+
+ atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+ set_nmi_callback(crash_nmi_callback);
+
+ /*
+ * Ensure the new callback function is set before sending
+ * out the NMI
+ */
+ wmb();
+
+ smp_send_nmi_allbutself();
+
+ msecs = 1000; /* Wait at most a second for the other cpus to stop */
+ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+ mdelay(1);
+ msecs--;
+ }
+ /* Leave the nmi callback set */
+ disable_local_APIC();
+}
+#else
+static void nmi_shootdown_cpus(void)
+{
+ /* There are no cpus to shootdown */
+}
+#endif
void machine_crash_shutdown(struct pt_regs *regs)
{
- /* This function is only called after the system
+ /*
+ * This function is only called after the system
* has paniced or is otherwise in a critical state.
* The minimum amount of code to allow a kexec'd kernel
* to run successfully needs to happen here.
@@ -31,4 +168,19 @@ void machine_crash_shutdown(struct pt_regs *regs)
* In practice this means shooting down the other cpus in
* an SMP system.
*/
+ /* The kernel is broken so disable interrupts */
+ local_irq_disable();
+
+ /* Make a note of crashing cpu. Will be used in NMI callback.*/
+ crashing_cpu = smp_processor_id();
+ nmi_shootdown_cpus();
+
+ if(cpu_has_apic)
+ disable_local_APIC();
+
+#if defined(CONFIG_X86_IO_APIC)
+ disable_IO_APIC();
+#endif
+
+ crash_save_self(regs);
}
diff --git a/kernel/crash_dump.c b/arch/x86_64/kernel/crash_dump.c
index 334c37f5218..942deac4d43 100644
--- a/kernel/crash_dump.c
+++ b/arch/x86_64/kernel/crash_dump.c
@@ -5,18 +5,11 @@
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
-#include <linux/smp_lock.h>
#include <linux/errno.h>
-#include <linux/proc_fs.h>
-#include <linux/bootmem.h>
-#include <linux/highmem.h>
#include <linux/crash_dump.h>
-#include <asm/io.h>
#include <asm/uaccess.h>
-
-/* Stores the physical address of elf header of crash image. */
-unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+#include <asm/io.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
@@ -32,30 +25,23 @@ unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ size_t csize, unsigned long offset, int userbuf)
{
- void *page, *vaddr;
+ void *vaddr;
if (!csize)
return 0;
- page = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
- copy_page(page, vaddr);
- kunmap_atomic(vaddr, KM_PTE0);
+ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
if (userbuf) {
- if (copy_to_user(buf, (page + offset), csize)) {
- kfree(page);
+ if (copy_to_user(buf, (vaddr + offset), csize)) {
+ iounmap(vaddr);
return -EFAULT;
}
- } else {
- memcpy(buf, (page + offset), csize);
- }
+ } else
+ memcpy(buf, (vaddr + offset), csize);
- kfree(page);
+ iounmap(vaddr);
return csize;
}
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 17579a1a174..293cd71a266 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -559,6 +559,27 @@ void __init parse_memopt(char *p, char **from)
end_user_pfn >>= PAGE_SHIFT;
}
+void __init parse_memmapopt(char *p, char **from)
+{
+ unsigned long long start_at, mem_size;
+
+ mem_size = memparse(p, from);
+ p = *from;
+ if (*p == '@') {
+ start_at = memparse(p+1, from);
+ add_memory_region(start_at, mem_size, E820_RAM);
+ } else if (*p == '#') {
+ start_at = memparse(p+1, from);
+ add_memory_region(start_at, mem_size, E820_ACPI);
+ } else if (*p == '$') {
+ start_at = memparse(p+1, from);
+ add_memory_region(start_at, mem_size, E820_RESERVED);
+ } else {
+ end_user_pfn = (mem_size >> PAGE_SHIFT);
+ }
+ p = *from;
+}
+
unsigned long pci_mem_start = 0xaeedbabe;
/*
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index 9cd968dd0f5..6dffb498ccd 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -216,7 +216,7 @@ void early_printk(const char *fmt, ...)
va_end(ap);
}
-static int keep_early;
+static int __initdata keep_early;
int __init setup_early_printk(char *opt)
{
@@ -226,8 +226,6 @@ int __init setup_early_printk(char *opt)
if (early_console_initialized)
return -1;
- opt = strchr(opt, '=') + 1;
-
strlcpy(buf,opt,sizeof(buf));
space = strchr(buf, ' ');
if (space)
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 9ff42041bb6..632fc0f59fc 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -313,6 +313,7 @@ int_with_check:
movl threadinfo_flags(%rcx),%edx
andl %edi,%edx
jnz int_careful
+ andl $~TS_COMPAT,threadinfo_status(%rcx)
jmp retint_swapgs
/* Either reschedule or signal or syscall exit tracking needed. */
@@ -673,7 +674,10 @@ ENTRY(spurious_interrupt)
/* error code is on the stack already */
/* handle NMI like exceptions that can happen everywhere */
- .macro paranoidentry sym
+#ifndef DEBUG_IST
+# define DEBUG_IST 0
+#endif
+ .macro paranoidentry sym, ist=0
SAVE_ALL
cld
movl $1,%ebx
@@ -683,10 +687,20 @@ ENTRY(spurious_interrupt)
js 1f
swapgs
xorl %ebx,%ebx
-1: movq %rsp,%rdi
+1:
+ .if \ist
+ movq %gs:pda_data_offset, %rbp
+ .endif
+ movq %rsp,%rdi
movq ORIG_RAX(%rsp),%rsi
movq $-1,ORIG_RAX(%rsp)
+ .if \ist
+ subq $EXCEPTION_STACK_SIZE, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+ .endif
call \sym
+ .if \ist
+ addq $EXCEPTION_STACK_SIZE, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+ .endif
cli
.endm
@@ -754,7 +768,7 @@ error_exit:
jnz retint_careful
swapgs
RESTORE_ARGS 0,8,0
- iretq
+ jmp iret_label
CFI_ENDPROC
error_kernelspace:
@@ -904,7 +918,7 @@ KPROBE_ENTRY(debug)
INTR_FRAME
pushq $0
CFI_ADJUST_CFA_OFFSET 8
- paranoidentry do_debug
+ paranoidentry do_debug, DEBUG_IST
jmp paranoid_exit
CFI_ENDPROC
.previous .text
@@ -959,7 +973,12 @@ paranoid_schedule:
CFI_ENDPROC
KPROBE_ENTRY(int3)
- zeroentry do_int3
+ INTR_FRAME
+ pushq $0
+ CFI_ADJUST_CFA_OFFSET 8
+ paranoidentry do_int3, DEBUG_IST
+ jmp paranoid_exit
+ CFI_ENDPROC
.previous .text
ENTRY(overflow)
@@ -1021,23 +1040,18 @@ ENTRY(machine_check)
CFI_ENDPROC
#endif
-ENTRY(call_debug)
- zeroentry do_call_debug
-
ENTRY(call_softirq)
CFI_STARTPROC
movq %gs:pda_irqstackptr,%rax
- pushq %r15
- CFI_ADJUST_CFA_OFFSET 8
- movq %rsp,%r15
- CFI_DEF_CFA_REGISTER r15
+ movq %rsp,%rdx
+ CFI_DEF_CFA_REGISTER rdx
incl %gs:pda_irqcount
cmove %rax,%rsp
+ pushq %rdx
+ /*todo CFI_DEF_CFA_EXPRESSION ...*/
call __do_softirq
- movq %r15,%rsp
+ popq %rsp
CFI_DEF_CFA_REGISTER rsp
decl %gs:pda_irqcount
- popq %r15
- CFI_ADJUST_CFA_OFFSET -8
ret
CFI_ENDPROC
diff --git a/arch/x86_64/kernel/genapic_cluster.c b/arch/x86_64/kernel/genapic_cluster.c
index a472d62f899..43fcf62fef0 100644
--- a/arch/x86_64/kernel/genapic_cluster.c
+++ b/arch/x86_64/kernel/genapic_cluster.c
@@ -72,14 +72,11 @@ static void cluster_send_IPI_mask(cpumask_t mask, int vector)
static void cluster_send_IPI_allbutself(int vector)
{
cpumask_t mask = cpu_online_map;
- int me = get_cpu(); /* Ensure we are not preempted when we clear */
- cpu_clear(me, mask);
+ cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
cluster_send_IPI_mask(mask, vector);
-
- put_cpu();
}
static void cluster_send_IPI_all(int vector)
diff --git a/arch/x86_64/kernel/genapic_flat.c b/arch/x86_64/kernel/genapic_flat.c
index 9da3edb799e..1a2ab825be9 100644
--- a/arch/x86_64/kernel/genapic_flat.c
+++ b/arch/x86_64/kernel/genapic_flat.c
@@ -83,12 +83,11 @@ static void flat_send_IPI_allbutself(int vector)
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
#else
cpumask_t allbutme = cpu_online_map;
- int me = get_cpu(); /* Ensure we are not preempted when we clear */
- cpu_clear(me, allbutme);
+
+ cpu_clear(smp_processor_id(), allbutme);
if (!cpus_empty(allbutme))
flat_send_IPI_mask(allbutme, vector);
- put_cpu();
#endif
}
@@ -149,10 +148,9 @@ static void physflat_send_IPI_mask(cpumask_t cpumask, int vector)
static void physflat_send_IPI_allbutself(int vector)
{
cpumask_t allbutme = cpu_online_map;
- int me = get_cpu();
- cpu_clear(me, allbutme);
+
+ cpu_clear(smp_processor_id(), allbutme);
physflat_send_IPI_mask(allbutme, vector);
- put_cpu();
}
static void physflat_send_IPI_all(int vector)
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 15290968e49..38fc3d5112e 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -379,14 +379,14 @@ gdt:
* Also sysret mandates a special GDT layout
*/
-.align L1_CACHE_BYTES
+.align PAGE_SIZE
/* The TLS descriptors are currently at a different place compared to i386.
Hopefully nobody expects them at a fixed place (Wine?) */
ENTRY(cpu_gdt_table)
.quad 0x0000000000000000 /* NULL descriptor */
- .quad 0x008f9a000000ffff /* __KERNEL_COMPAT32_CS */
+ .quad 0x0 /* unused */
.quad 0x00af9a000000ffff /* __KERNEL_CS */
.quad 0x00cf92000000ffff /* __KERNEL_DS */
.quad 0x00cffa000000ffff /* __USER32_CS */
@@ -396,15 +396,15 @@ ENTRY(cpu_gdt_table)
.quad 0,0 /* TSS */
.quad 0,0 /* LDT */
.quad 0,0,0 /* three TLS descriptors */
- .quad 0x00009a000000ffff /* __KERNEL16_CS - 16bit PM for S3 wakeup. */
- /* base must be patched for real base address. */
+ .quad 0 /* unused */
gdt_end:
/* asm/segment.h:GDT_ENTRIES must match this */
/* This should be a multiple of the cache line size */
- /* GDTs of other CPUs: */
- .fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
+ /* GDTs of other CPUs are now dynamically allocated */
+
+ /* zero the remaining page */
+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
- .align L1_CACHE_BYTES
ENTRY(idt_table)
.rept 256
.quad 0
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index b675c5add01..cea20a66c15 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -92,6 +92,9 @@ void __init x86_64_start_kernel(char * real_mode_data)
memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t));
asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
+ for (i = 0; i < NR_CPUS; i++)
+ cpu_pda(i) = &boot_cpu_pda[i];
+
pda_init(0);
copy_bootdata(real_mode_data);
#ifdef CONFIG_SMP
@@ -99,7 +102,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
#endif
s = strstr(saved_command_line, "earlyprintk=");
if (s != NULL)
- setup_early_printk(s);
+ setup_early_printk(strchr(s, '=') + 1);
#ifdef CONFIG_NUMA
s = strstr(saved_command_line, "numa=");
if (s != NULL)
diff --git a/arch/x86_64/kernel/i387.c b/arch/x86_64/kernel/i387.c
index d9b22b633e3..a5d7e16b928 100644
--- a/arch/x86_64/kernel/i387.c
+++ b/arch/x86_64/kernel/i387.c
@@ -95,7 +95,7 @@ int save_i387(struct _fpstate __user *buf)
if (!used_math())
return 0;
clear_used_math(); /* trigger finit */
- if (tsk->thread_info->status & TS_USEDFPU) {
+ if (task_thread_info(tsk)->status & TS_USEDFPU) {
err = save_i387_checking((struct i387_fxsave_struct __user *)buf);
if (err) return err;
stts();
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 6e5101ad3d1..5ecd34ab8c2 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -133,7 +133,7 @@ static void end_8259A_irq (unsigned int irq)
{
if (irq > 256) {
char var;
- printk("return %p stack %p ti %p\n", __builtin_return_address(0), &var, current->thread_info);
+ printk("return %p stack %p ti %p\n", __builtin_return_address(0), &var, task_thread_info(current));
BUG();
}
@@ -549,10 +549,9 @@ void __init init_IRQ(void)
int vector = FIRST_EXTERNAL_VECTOR + i;
if (i >= NR_IRQS)
break;
- if (vector != IA32_SYSCALL_VECTOR && vector != KDB_VECTOR) {
+ if (vector != IA32_SYSCALL_VECTOR)
set_intr_gate(vector, interrupt[i]);
}
- }
#ifdef CONFIG_SMP
/*
diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c
index e0ba5c1043f..ce31d904d60 100644
--- a/arch/x86_64/kernel/init_task.c
+++ b/arch/x86_64/kernel/init_task.c
@@ -44,6 +44,6 @@ EXPORT_SYMBOL(init_task);
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
-DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp = INIT_TSS;
+DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 97154ab058b..e8cf44ef877 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -37,6 +37,7 @@
#include <asm/proto.h>
#include <asm/mach_apic.h>
#include <asm/acpi.h>
+#include <asm/dma.h>
#define __apicdebuginit __init
@@ -46,6 +47,9 @@ static int no_timer_check;
int disable_timer_pin_1 __initdata;
+/* Where if anywhere is the i8259 connect in external int mode */
+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
+
static DEFINE_SPINLOCK(ioapic_lock);
/*
@@ -260,9 +264,6 @@ __setup("apic", enable_ioapic_setup);
void __init check_ioapic(void)
{
int num,slot,func;
- if (ioapic_force)
- return;
-
/* Poor man's PCI discovery */
for (num = 0; num < 32; num++) {
for (slot = 0; slot < 32; slot++) {
@@ -284,7 +285,7 @@ void __init check_ioapic(void)
switch (vendor) {
case PCI_VENDOR_ID_VIA:
#ifdef CONFIG_GART_IOMMU
- if ((end_pfn >= (0xffffffff>>PAGE_SHIFT) ||
+ if ((end_pfn > MAX_DMA32_PFN ||
force_iommu) &&
!iommu_aperture_allowed) {
printk(KERN_INFO
@@ -363,7 +364,7 @@ static int find_irq_entry(int apic, int pin, int type)
/*
* Find the pin to which IRQ[irq] (ISA) is connected
*/
-static int find_isa_irq_pin(int irq, int type)
+static int __init find_isa_irq_pin(int irq, int type)
{
int i;
@@ -381,6 +382,31 @@ static int find_isa_irq_pin(int irq, int type)
return -1;
}
+static int __init find_isa_irq_apic(int irq, int type)
+{
+ int i;
+
+ for (i = 0; i < mp_irq_entries; i++) {
+ int lbus = mp_irqs[i].mpc_srcbus;
+
+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
+ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
+ (mp_irqs[i].mpc_irqtype == type) &&
+ (mp_irqs[i].mpc_srcbusirq == irq))
+ break;
+ }
+ if (i < mp_irq_entries) {
+ int apic;
+ for(apic = 0; apic < nr_ioapics; apic++) {
+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
+ return apic;
+ }
+ }
+
+ return -1;
+}
+
/*
* Find a specific PCI IRQ entry.
* Not an __init, possibly needed by modules
@@ -874,7 +900,7 @@ static void __init setup_IO_APIC_irqs(void)
* Set up the 8259A-master output pin as broadcast to all
* CPUs.
*/
-static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
+static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
{
struct IO_APIC_route_entry entry;
unsigned long flags;
@@ -884,7 +910,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
disable_8259A_irq(0);
/* mask LVT0 */
- apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
/*
* We use logical delivery to get the timer IRQ
@@ -908,8 +934,8 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
* Add it to the IO-APIC irq-routing table:
*/
spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
- io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
spin_unlock_irqrestore(&ioapic_lock, flags);
enable_8259A_irq(0);
@@ -1188,7 +1214,8 @@ void __apicdebuginit print_PIC(void)
static void __init enable_IO_APIC(void)
{
union IO_APIC_reg_01 reg_01;
- int i;
+ int i8259_apic, i8259_pin;
+ int i, apic;
unsigned long flags;
for (i = 0; i < PIN_MAP_SIZE; i++) {
@@ -1202,11 +1229,48 @@ static void __init enable_IO_APIC(void)
/*
* The number of IO-APIC IRQ registers (== #pins):
*/
- for (i = 0; i < nr_ioapics; i++) {
+ for (apic = 0; apic < nr_ioapics; apic++) {
spin_lock_irqsave(&ioapic_lock, flags);
- reg_01.raw = io_apic_read(i, 1);
+ reg_01.raw = io_apic_read(apic, 1);
spin_unlock_irqrestore(&ioapic_lock, flags);
- nr_ioapic_registers[i] = reg_01.bits.entries+1;
+ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
+ }
+ for(apic = 0; apic < nr_ioapics; apic++) {
+ int pin;
+ /* See if any of the pins is in ExtINT mode */
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+ struct IO_APIC_route_entry entry;
+ spin_lock_irqsave(&ioapic_lock, flags);
+ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
+ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+
+ /* If the interrupt line is enabled and in ExtInt mode
+ * I have found the pin where the i8259 is connected.
+ */
+ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
+ ioapic_i8259.apic = apic;
+ ioapic_i8259.pin = pin;
+ goto found_i8259;
+ }
+ }
+ }
+ found_i8259:
+ /* Look to see what if the MP table has reported the ExtINT */
+ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
+ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
+ /* Trust the MP table if nothing is setup in the hardware */
+ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
+ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
+ ioapic_i8259.pin = i8259_pin;
+ ioapic_i8259.apic = i8259_apic;
+ }
+ /* Complain if the MP table and the hardware disagree */
+ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
+ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
+ {
+ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
}
/*
@@ -1220,7 +1284,6 @@ static void __init enable_IO_APIC(void)
*/
void disable_IO_APIC(void)
{
- int pin;
/*
* Clear the IO-APIC before rebooting:
*/
@@ -1231,8 +1294,7 @@ void disable_IO_APIC(void)
* Put that IOAPIC in virtual wire mode
* so legacy interrupts can be delivered.
*/
- pin = find_isa_irq_pin(0, mp_ExtINT);
- if (pin != -1) {
+ if (ioapic_i8259.pin != -1) {
struct IO_APIC_route_entry entry;
unsigned long flags;
@@ -1243,21 +1305,23 @@ void disable_IO_APIC(void)
entry.polarity = 0; /* High */
entry.delivery_status = 0;
entry.dest_mode = 0; /* Physical */
- entry.delivery_mode = 7; /* ExtInt */
+ entry.delivery_mode = dest_ExtINT; /* ExtInt */
entry.vector = 0;
- entry.dest.physical.physical_dest = 0;
-
+ entry.dest.physical.physical_dest =
+ GET_APIC_ID(apic_read(APIC_ID));
/*
* Add it to the IO-APIC irq-routing table:
*/
spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
- io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
+ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
+ *(((int *)&entry)+1));
+ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
+ *(((int *)&entry)+0));
spin_unlock_irqrestore(&ioapic_lock, flags);
}
- disconnect_bsp_APIC(pin != -1);
+ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
}
/*
@@ -1571,7 +1635,7 @@ static void enable_lapic_irq (unsigned int irq)
unsigned long v;
v = apic_read(APIC_LVT0);
- apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
+ apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
}
static void disable_lapic_irq (unsigned int irq)
@@ -1579,7 +1643,7 @@ static void disable_lapic_irq (unsigned int irq)
unsigned long v;
v = apic_read(APIC_LVT0);
- apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
}
static void ack_lapic_irq (unsigned int irq)
@@ -1626,20 +1690,21 @@ static void setup_nmi (void)
*/
static inline void unlock_ExtINT_logic(void)
{
- int pin, i;
+ int apic, pin, i;
struct IO_APIC_route_entry entry0, entry1;
unsigned char save_control, save_freq_select;
unsigned long flags;
- pin = find_isa_irq_pin(8, mp_INT);
+ pin = find_isa_irq_pin(8, mp_INT);
+ apic = find_isa_irq_apic(8, mp_INT);
if (pin == -1)
return;
spin_lock_irqsave(&ioapic_lock, flags);
- *(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin);
- *(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin);
+ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
+ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
spin_unlock_irqrestore(&ioapic_lock, flags);
- clear_IO_APIC_pin(0, pin);
+ clear_IO_APIC_pin(apic, pin);
memset(&entry1, 0, sizeof(entry1));
@@ -1652,8 +1717,8 @@ static inline void unlock_ExtINT_logic(void)
entry1.vector = 0;
spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
- io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
spin_unlock_irqrestore(&ioapic_lock, flags);
save_control = CMOS_READ(RTC_CONTROL);
@@ -1671,11 +1736,11 @@ static inline void unlock_ExtINT_logic(void)
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
- clear_IO_APIC_pin(0, pin);
+ clear_IO_APIC_pin(apic, pin);
spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
- io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -1687,7 +1752,7 @@ static inline void unlock_ExtINT_logic(void)
*/
static inline void check_timer(void)
{
- int pin1, pin2;
+ int apic1, pin1, apic2, pin2;
int vector;
/*
@@ -1704,14 +1769,17 @@ static inline void check_timer(void)
* the 8259A which implies the virtual wire has to be
* disabled in the local APIC.
*/
- apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
enable_8259A_irq(0);
- pin1 = find_isa_irq_pin(0, mp_INT);
- pin2 = find_isa_irq_pin(0, mp_ExtINT);
+ pin1 = find_isa_irq_pin(0, mp_INT);
+ apic1 = find_isa_irq_apic(0, mp_INT);
+ pin2 = ioapic_i8259.pin;
+ apic2 = ioapic_i8259.apic;
- apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2);
+ apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
+ vector, apic1, pin1, apic2, pin2);
if (pin1 != -1) {
/*
@@ -1729,17 +1797,20 @@ static inline void check_timer(void)
clear_IO_APIC_pin(0, pin1);
return;
}
- clear_IO_APIC_pin(0, pin1);
- apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
+ clear_IO_APIC_pin(apic1, pin1);
+ apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
+ "connected to IO-APIC\n");
}
- apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
+ apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
+ "through the 8259A ... ");
if (pin2 != -1) {
- apic_printk(APIC_VERBOSE,"\n..... (found pin %d) ...", pin2);
+ apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
+ apic2, pin2);
/*
* legacy devices should be connected to IO APIC #0
*/
- setup_ExtINT_IRQ0_pin(pin2, vector);
+ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
if (timer_irq_works()) {
printk("works.\n");
nmi_watchdog_default();
@@ -1751,7 +1822,7 @@ static inline void check_timer(void)
/*
* Cleanup, just in case ...
*/
- clear_IO_APIC_pin(0, pin2);
+ clear_IO_APIC_pin(apic2, pin2);
}
printk(" failed.\n");
@@ -1764,21 +1835,21 @@ static inline void check_timer(void)
disable_8259A_irq(0);
irq_desc[0].handler = &lapic_irq_type;
- apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
+ apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
enable_8259A_irq(0);
if (timer_irq_works()) {
apic_printk(APIC_QUIET, " works.\n");
return;
}
- apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
apic_printk(APIC_VERBOSE," failed.\n");
apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
init_8259A(0);
make_8259A_irq(0);
- apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
+ apic_write(APIC_LVT0, APIC_DM_EXTINT);
unlock_ExtINT_logic();
diff --git a/arch/x86_64/kernel/ioport.c b/arch/x86_64/kernel/ioport.c
index cde0e868efe..b81614970ec 100644
--- a/arch/x86_64/kernel/ioport.c
+++ b/arch/x86_64/kernel/ioport.c
@@ -7,6 +7,7 @@
#include <linux/sched.h>
#include <linux/kernel.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/ioport.h>
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index d6a04a8320a..30d2a1e545f 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/io_apic.h>
+#include <asm/idle.h>
atomic_t irq_err_count;
#ifdef CONFIG_X86_IO_APIC
@@ -69,13 +70,13 @@ skip:
seq_printf(p, "NMI: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
- seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
+ seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
seq_putc(p, '\n');
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(p, "LOC: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
- seq_printf(p, "%10u ", cpu_pda[j].apic_timer_irqs);
+ seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
seq_putc(p, '\n');
#endif
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
@@ -98,6 +99,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
/* high bits used in ret_from_ code */
unsigned irq = regs->orig_rax & 0xff;
+ exit_idle();
irq_enter();
__do_IRQ(irq, regs);
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index afe11f4fbd1..8b866a8572c 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -42,8 +42,8 @@
#include <asm/pgtable.h>
#include <asm/kdebug.h>
-static DECLARE_MUTEX(kprobe_mutex);
void jprobe_return_end(void);
+static void __kprobes arch_copy_kprobe(struct kprobe *p);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -69,12 +69,11 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
/* insn: must be on special executable page on x86_64. */
- down(&kprobe_mutex);
p->ainsn.insn = get_insn_slot();
- up(&kprobe_mutex);
if (!p->ainsn.insn) {
return -ENOMEM;
}
+ arch_copy_kprobe(p);
return 0;
}
@@ -181,7 +180,7 @@ static inline s32 *is_riprel(u8 *insn)
return NULL;
}
-void __kprobes arch_copy_kprobe(struct kprobe *p)
+static void __kprobes arch_copy_kprobe(struct kprobe *p)
{
s32 *ripdisp;
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
@@ -335,6 +334,15 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
return 1;
}
} else {
+ if (*addr != BREAKPOINT_INSTRUCTION) {
+ /* The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ regs->rip = (unsigned long)addr;
+ ret = 1;
+ goto no_kprobe;
+ }
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 183dc610542..13a2eada6c9 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -15,6 +15,7 @@
#include <linux/sysdev.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
+#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/percpu.h>
#include <linux/ctype.h>
@@ -23,9 +24,10 @@
#include <asm/mce.h>
#include <asm/kdebug.h>
#include <asm/uaccess.h>
+#include <asm/smp.h>
#define MISC_MCELOG_MINOR 227
-#define NR_BANKS 5
+#define NR_BANKS 6
static int mce_dont_init;
@@ -91,6 +93,7 @@ void mce_log(struct mce *mce)
static void print_mce(struct mce *m)
{
printk(KERN_EMERG "\n"
+ KERN_EMERG "HARDWARE ERROR\n"
KERN_EMERG
"CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
m->cpu, m->mcgstatus, m->bank, m->status);
@@ -109,6 +112,9 @@ static void print_mce(struct mce *m)
if (m->misc)
printk("MISC %Lx ", m->misc);
printk("\n");
+ printk(KERN_EMERG "This is not a software problem!\n");
+ printk(KERN_EMERG
+ "Run through mcelog --ascii to decode and contact your hardware vendor\n");
}
static void mce_panic(char *msg, struct mce *backup, unsigned long start)
@@ -168,12 +174,12 @@ void do_machine_check(struct pt_regs * regs, long error_code)
int panicm_found = 0;
if (regs)
- notify_die(DIE_NMI, "machine check", regs, error_code, 255, SIGKILL);
+ notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL);
if (!banks)
return;
memset(&m, 0, sizeof(struct mce));
- m.cpu = hard_smp_processor_id();
+ m.cpu = safe_smp_processor_id();
rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
if (!(m.mcgstatus & MCG_STATUS_RIPV))
kill_it = 1;
@@ -573,6 +579,10 @@ ACCESSOR(bank1ctl,bank[1],mce_restart())
ACCESSOR(bank2ctl,bank[2],mce_restart())
ACCESSOR(bank3ctl,bank[3],mce_restart())
ACCESSOR(bank4ctl,bank[4],mce_restart())
+ACCESSOR(bank5ctl,bank[5],mce_restart())
+static struct sysdev_attribute * bank_attributes[NR_BANKS] = {
+ &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
+ &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl};
ACCESSOR(tolerant,tolerant,)
ACCESSOR(check_interval,check_interval,mce_restart())
@@ -580,6 +590,7 @@ ACCESSOR(check_interval,check_interval,mce_restart())
static __cpuinit int mce_create_device(unsigned int cpu)
{
int err;
+ int i;
if (!mce_available(&cpu_data[cpu]))
return -EIO;
@@ -589,11 +600,9 @@ static __cpuinit int mce_create_device(unsigned int cpu)
err = sysdev_register(&per_cpu(device_mce,cpu));
if (!err) {
- sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank0ctl);
- sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank1ctl);
- sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank2ctl);
- sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank3ctl);
- sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank4ctl);
+ for (i = 0; i < banks; i++)
+ sysdev_create_file(&per_cpu(device_mce,cpu),
+ bank_attributes[i]);
sysdev_create_file(&per_cpu(device_mce,cpu), &attr_tolerant);
sysdev_create_file(&per_cpu(device_mce,cpu), &attr_check_interval);
}
@@ -603,11 +612,11 @@ static __cpuinit int mce_create_device(unsigned int cpu)
#ifdef CONFIG_HOTPLUG_CPU
static __cpuinit void mce_remove_device(unsigned int cpu)
{
- sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank0ctl);
- sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank1ctl);
- sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank2ctl);
- sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank3ctl);
- sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank4ctl);
+ int i;
+
+ for (i = 0; i < banks; i++)
+ sysdev_remove_file(&per_cpu(device_mce,cpu),
+ bank_attributes[i]);
sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_tolerant);
sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval);
sysdev_unregister(&per_cpu(device_mce,cpu));
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index 1f76175ace0..d3ad7d81266 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -27,6 +27,7 @@
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/percpu.h>
+#include <asm/idle.h>
#define PFX "mce_threshold: "
#define VERSION "version 1.00.9"
@@ -140,6 +141,7 @@ asmlinkage void mce_threshold_interrupt(void)
struct mce m;
ack_APIC_irq();
+ exit_idle();
irq_enter();
memset(&m, 0, sizeof(m));
@@ -318,7 +320,7 @@ static struct kobj_type threshold_ktype = {
static __cpuinit int threshold_create_bank(unsigned int cpu, int bank)
{
int err = 0;
- struct threshold_bank *b = 0;
+ struct threshold_bank *b = NULL;
#ifdef CONFIG_SMP
if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */
@@ -407,7 +409,7 @@ static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank)
if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) {
sprintf(name, "bank%i", bank);
sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name);
- per_cpu(threshold_banks, cpu)[bank] = 0;
+ per_cpu(threshold_banks, cpu)[bank] = NULL;
} else {
kobject_unregister(&b->kobj);
kfree(per_cpu(threshold_banks, cpu)[bank]);
diff --git a/arch/x86_64/kernel/mce_intel.c b/arch/x86_64/kernel/mce_intel.c
index 0be0a795981..8f533d2c40c 100644
--- a/arch/x86_64/kernel/mce_intel.c
+++ b/arch/x86_64/kernel/mce_intel.c
@@ -10,6 +10,7 @@
#include <asm/msr.h>
#include <asm/mce.h>
#include <asm/hw_irq.h>
+#include <asm/idle.h>
static DEFINE_PER_CPU(unsigned long, next_check);
@@ -19,6 +20,7 @@ asmlinkage void smp_thermal_interrupt(void)
ack_APIC_irq();
+ exit_idle();
irq_enter();
if (time_before(jiffies, __get_cpu_var(next_check)))
goto done;
@@ -78,7 +80,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
h = THERMAL_APIC_VECTOR;
h |= (APIC_DM_FIXED | APIC_LVT_MASKED);
- apic_write_around(APIC_LVTTHMR, h);
+ apic_write(APIC_LVTTHMR, h);
rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h);
@@ -87,7 +89,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h);
l = apic_read(APIC_LVTTHMR);
- apic_write_around(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
+ apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
cpu, tm2 ? "TM2" : "TM1");
return;
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 39d445e16f2..5fae6f0cd99 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -151,23 +151,25 @@ int __init check_nmi_watchdog (void)
printk(KERN_INFO "testing NMI watchdog ... ");
+#ifdef CONFIG_SMP
if (nmi_watchdog == NMI_LOCAL_APIC)
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
+#endif
for (cpu = 0; cpu < NR_CPUS; cpu++)
- counts[cpu] = cpu_pda[cpu].__nmi_count;
+ counts[cpu] = cpu_pda(cpu)->__nmi_count;
local_irq_enable();
mdelay((10*1000)/nmi_hz); // wait 10 ticks
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu))
continue;
- if (cpu_pda[cpu].__nmi_count - counts[cpu] <= 5) {
+ if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
endflag = 1;
printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
cpu,
counts[cpu],
- cpu_pda[cpu].__nmi_count);
+ cpu_pda(cpu)->__nmi_count);
nmi_active = 0;
lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
nmi_perfctr_msr = 0;
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index cab471cf3ed..2f5d8328e2b 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -8,53 +8,259 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <asm/io.h>
+#include <asm/proto.h>
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scatter-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
+int iommu_merge __read_mostly = 0;
+EXPORT_SYMBOL(iommu_merge);
+
+dma_addr_t bad_dma_address __read_mostly;
+EXPORT_SYMBOL(bad_dma_address);
+
+/* This tells the BIO block layer to assume merging. Default to off
+ because we cannot guarantee merging later. */
+int iommu_bio_merge __read_mostly = 0;
+EXPORT_SYMBOL(iommu_bio_merge);
+
+int iommu_sac_force __read_mostly = 0;
+EXPORT_SYMBOL(iommu_sac_force);
+
+int no_iommu __read_mostly;
+#ifdef CONFIG_IOMMU_DEBUG
+int panic_on_overflow __read_mostly = 1;
+int force_iommu __read_mostly = 1;
+#else
+int panic_on_overflow __read_mostly = 0;
+int force_iommu __read_mostly= 0;
+#endif
+
+/* Dummy device used for NULL arguments (normally ISA). Better would
+ be probably a smaller DMA mask, but this is bug-to-bug compatible
+ to i386. */
+struct device fallback_dev = {
+ .bus_id = "fallback device",
+ .coherent_dma_mask = 0xffffffff,
+ .dma_mask = &fallback_dev.coherent_dma_mask,
+};
+
+/* Allocate DMA memory on node near device */
+noinline static void *
+dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
+{
+ struct page *page;
+ int node;
+ if (dev->bus == &pci_bus_type)
+ node = pcibus_to_node(to_pci_dev(dev)->bus);
+ else
+ node = numa_node_id();
+ page = alloc_pages_node(node, gfp, order);
+ return page ? page_address(page) : NULL;
+}
+
+/*
+ * Allocate memory for a coherent mapping.
*/
-int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction)
+void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp)
{
- int i;
-
- BUG_ON(direction == DMA_NONE);
- for (i = 0; i < nents; i++ ) {
- struct scatterlist *s = &sg[i];
- BUG_ON(!s->page);
- s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
- s->dma_length = s->length;
+ void *memory;
+ unsigned long dma_mask = 0;
+ u64 bus;
+
+ if (!dev)
+ dev = &fallback_dev;
+ dma_mask = dev->coherent_dma_mask;
+ if (dma_mask == 0)
+ dma_mask = 0xffffffff;
+
+ /* Kludge to make it bug-to-bug compatible with i386. i386
+ uses the normal dma_mask for alloc_coherent. */
+ dma_mask &= *dev->dma_mask;
+
+ /* Why <=? Even when the mask is smaller than 4GB it is often
+ larger than 16MB and in this case we have a chance of
+ finding fitting memory in the next higher zone first. If
+ not retry with true GFP_DMA. -AK */
+ if (dma_mask <= 0xffffffff)
+ gfp |= GFP_DMA32;
+
+ again:
+ memory = dma_alloc_pages(dev, gfp, get_order(size));
+ if (memory == NULL)
+ return NULL;
+
+ {
+ int high, mmu;
+ bus = virt_to_bus(memory);
+ high = (bus + size) >= dma_mask;
+ mmu = high;
+ if (force_iommu && !(gfp & GFP_DMA))
+ mmu = 1;
+ else if (high) {
+ free_pages((unsigned long)memory,
+ get_order(size));
+
+ /* Don't use the 16MB ZONE_DMA unless absolutely
+ needed. It's better to use remapping first. */
+ if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) {
+ gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
+ goto again;
+ }
+
+ if (dma_ops->alloc_coherent)
+ return dma_ops->alloc_coherent(dev, size,
+ dma_handle, gfp);
+ return NULL;
+ }
+
+ memset(memory, 0, size);
+ if (!mmu) {
+ *dma_handle = virt_to_bus(memory);
+ return memory;
+ }
+ }
+
+ if (dma_ops->alloc_coherent) {
+ free_pages((unsigned long)memory, get_order(size));
+ gfp &= ~(GFP_DMA|GFP_DMA32);
+ return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
+ }
+
+ if (dma_ops->map_simple) {
+ *dma_handle = dma_ops->map_simple(dev, memory,
+ size,
+ PCI_DMA_BIDIRECTIONAL);
+ if (*dma_handle != bad_dma_address)
+ return memory;
}
- return nents;
-}
-EXPORT_SYMBOL(dma_map_sg);
+ if (panic_on_overflow)
+ panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
+ free_pages((unsigned long)memory, get_order(size));
+ return NULL;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
+/*
+ * Unmap coherent memory.
+ * The caller must ensure that the device has finished accessing the mapping.
*/
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, int dir)
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t bus)
+{
+ if (dma_ops->unmap_single)
+ dma_ops->unmap_single(dev, bus, size, 0);
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+int dma_supported(struct device *dev, u64 mask)
+{
+ if (dma_ops->dma_supported)
+ return dma_ops->dma_supported(dev, mask);
+
+ /* Copied from i386. Doesn't make much sense, because it will
+ only work for pci_alloc_coherent.
+ The caller just has to use GFP_DMA in this case. */
+ if (mask < 0x00ffffff)
+ return 0;
+
+ /* Tell the device to use SAC when IOMMU force is on. This
+ allows the driver to use cheaper accesses in some cases.
+
+ Problem with this is that if we overflow the IOMMU area and
+ return DAC as fallback address the device may not handle it
+ correctly.
+
+ As a special case some controllers have a 39bit address
+ mode that is as efficient as 32bit (aic79xx). Don't force
+ SAC for these. Assume all masks <= 40 bits are of this
+ type. Normally this doesn't make any difference, but gives
+ more gentle handling of IOMMU overflow. */
+ if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
+ printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(dma_supported);
+
+int dma_set_mask(struct device *dev, u64 mask)
{
- int i;
- for (i = 0; i < nents; i++) {
- struct scatterlist *s = &sg[i];
- BUG_ON(s->page == NULL);
- BUG_ON(s->dma_address == 0);
- dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
- }
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+ *dev->dma_mask = mask;
+ return 0;
}
+EXPORT_SYMBOL(dma_set_mask);
+
+/* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
+ [,forcesac][,fullflush][,nomerge][,biomerge]
+ size set size of iommu (in bytes)
+ noagp don't initialize the AGP driver and use full aperture.
+ off don't use the IOMMU
+ leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
+ memaper[=order] allocate an own aperture over RAM with size 32MB^order.
+ noforce don't force IOMMU usage. Default.
+ force Force IOMMU.
+ merge Do lazy merging. This may improve performance on some block devices.
+ Implies force (experimental)
+ biomerge Do merging at the BIO layer. This is more efficient than merge,
+ but should be only done with very big IOMMUs. Implies merge,force.
+ nomerge Don't do SG merging.
+ forcesac For SAC mode for masks <40bits (experimental)
+ fullflush Flush IOMMU on each allocation (default)
+ nofullflush Don't use IOMMU fullflush
+ allowed overwrite iommu off workarounds for specific chipsets.
+ soft Use software bounce buffering (default for Intel machines)
+ noaperture Don't touch the aperture for AGP.
+*/
+__init int iommu_setup(char *p)
+{
+ iommu_merge = 1;
-EXPORT_SYMBOL(dma_unmap_sg);
+ while (*p) {
+ if (!strncmp(p,"off",3))
+ no_iommu = 1;
+ /* gart_parse_options has more force support */
+ if (!strncmp(p,"force",5))
+ force_iommu = 1;
+ if (!strncmp(p,"noforce",7)) {
+ iommu_merge = 0;
+ force_iommu = 0;
+ }
+
+ if (!strncmp(p, "biomerge",8)) {
+ iommu_bio_merge = 4096;
+ iommu_merge = 1;
+ force_iommu = 1;
+ }
+ if (!strncmp(p, "panic",5))
+ panic_on_overflow = 1;
+ if (!strncmp(p, "nopanic",7))
+ panic_on_overflow = 0;
+ if (!strncmp(p, "merge",5)) {
+ iommu_merge = 1;
+ force_iommu = 1;
+ }
+ if (!strncmp(p, "nomerge",7))
+ iommu_merge = 0;
+ if (!strncmp(p, "forcesac",8))
+ iommu_sac_force = 1;
+
+#ifdef CONFIG_SWIOTLB
+ if (!strncmp(p, "soft",4))
+ swiotlb = 1;
+#endif
+
+#ifdef CONFIG_GART_IOMMU
+ gart_parse_options(p);
+#endif
+
+ p += strcspn(p, ",");
+ if (*p == ',')
+ ++p;
+ }
+ return 1;
+}
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 2e28e855ec3..c37fc7726ba 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -30,8 +30,8 @@
#include <asm/proto.h>
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
-
-dma_addr_t bad_dma_address;
+#include <asm/swiotlb.h>
+#include <asm/dma.h>
unsigned long iommu_bus_base; /* GART remapping area (physical) */
static unsigned long iommu_size; /* size of remapping area bytes */
@@ -39,18 +39,6 @@ static unsigned long iommu_pages; /* .. and in pages */
u32 *iommu_gatt_base; /* Remapping table */
-int no_iommu;
-static int no_agp;
-#ifdef CONFIG_IOMMU_DEBUG
-int panic_on_overflow = 1;
-int force_iommu = 1;
-#else
-int panic_on_overflow = 0;
-int force_iommu = 0;
-#endif
-int iommu_merge = 1;
-int iommu_sac_force = 0;
-
/* If this is disabled the IOMMU will use an optimized flushing strategy
of only flushing when an mapping is reused. With it true the GART is flushed
for every mapping. Problem is that doing the lazy flush seems to trigger
@@ -58,10 +46,6 @@ int iommu_sac_force = 0;
also seen with Qlogic at least). */
int iommu_fullflush = 1;
-/* This tells the BIO block layer to assume merging. Default to off
- because we cannot guarantee merging later. */
-int iommu_bio_merge = 0;
-
#define MAX_NB 8
/* Allocation bitmap for the remapping area */
@@ -102,16 +86,6 @@ AGPEXTERN __u32 *agp_gatt_table;
static unsigned long next_bit; /* protected by iommu_bitmap_lock */
static int need_flush; /* global flush state. set for each gart wrap */
-static dma_addr_t dma_map_area(struct device *dev, unsigned long phys_mem,
- size_t size, int dir, int do_panic);
-
-/* Dummy device used for NULL arguments (normally ISA). Better would
- be probably a smaller DMA mask, but this is bug-to-bug compatible to i386. */
-static struct device fallback_dev = {
- .bus_id = "fallback device",
- .coherent_dma_mask = 0xffffffff,
- .dma_mask = &fallback_dev.coherent_dma_mask,
-};
static unsigned long alloc_iommu(int size)
{
@@ -185,114 +159,7 @@ static void flush_gart(struct device *dev)
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}
-/* Allocate DMA memory on node near device */
-noinline
-static void *dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
-{
- struct page *page;
- int node;
- if (dev->bus == &pci_bus_type)
- node = pcibus_to_node(to_pci_dev(dev)->bus);
- else
- node = numa_node_id();
- page = alloc_pages_node(node, gfp, order);
- return page ? page_address(page) : NULL;
-}
-
-/*
- * Allocate memory for a coherent mapping.
- */
-void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp)
-{
- void *memory;
- unsigned long dma_mask = 0;
- u64 bus;
- if (!dev)
- dev = &fallback_dev;
- dma_mask = dev->coherent_dma_mask;
- if (dma_mask == 0)
- dma_mask = 0xffffffff;
-
- /* Kludge to make it bug-to-bug compatible with i386. i386
- uses the normal dma_mask for alloc_coherent. */
- dma_mask &= *dev->dma_mask;
-
- /* Why <=? Even when the mask is smaller than 4GB it is often larger
- than 16MB and in this case we have a chance of finding fitting memory
- in the next higher zone first. If not retry with true GFP_DMA. -AK */
- if (dma_mask <= 0xffffffff)
- gfp |= GFP_DMA32;
-
- again:
- memory = dma_alloc_pages(dev, gfp, get_order(size));
- if (memory == NULL)
- return NULL;
-
- {
- int high, mmu;
- bus = virt_to_bus(memory);
- high = (bus + size) >= dma_mask;
- mmu = high;
- if (force_iommu && !(gfp & GFP_DMA))
- mmu = 1;
- if (no_iommu || dma_mask < 0xffffffffUL) {
- if (high) {
- free_pages((unsigned long)memory,
- get_order(size));
-
- if (swiotlb) {
- return
- swiotlb_alloc_coherent(dev, size,
- dma_handle,
- gfp);
- }
-
- if (!(gfp & GFP_DMA)) {
- gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
- goto again;
- }
- return NULL;
- }
- mmu = 0;
- }
- memset(memory, 0, size);
- if (!mmu) {
- *dma_handle = virt_to_bus(memory);
- return memory;
- }
- }
-
- *dma_handle = dma_map_area(dev, bus, size, PCI_DMA_BIDIRECTIONAL, 0);
- if (*dma_handle == bad_dma_address)
- goto error;
- flush_gart(dev);
- return memory;
-
-error:
- if (panic_on_overflow)
- panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n", size);
- free_pages((unsigned long)memory, get_order(size));
- return NULL;
-}
-
-/*
- * Unmap coherent memory.
- * The caller must ensure that the device has finished accessing the mapping.
- */
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t bus)
-{
- if (swiotlb) {
- swiotlb_free_coherent(dev, size, vaddr, bus);
- return;
- }
-
- dma_unmap_single(dev, bus, size, 0);
- free_pages((unsigned long)vaddr, get_order(size));
-}
#ifdef CONFIG_IOMMU_LEAK
@@ -326,7 +193,7 @@ void dump_leak(void)
#define CLEAR_LEAK(x)
#endif
-static void iommu_full(struct device *dev, size_t size, int dir, int do_panic)
+static void iommu_full(struct device *dev, size_t size, int dir)
{
/*
* Ran out of IOMMU space for this operation. This is very bad.
@@ -342,11 +209,11 @@ static void iommu_full(struct device *dev, size_t size, int dir, int do_panic)
"PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
size, dev->bus_id);
- if (size > PAGE_SIZE*EMERGENCY_PAGES && do_panic) {
+ if (size > PAGE_SIZE*EMERGENCY_PAGES) {
if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
panic("PCI-DMA: Memory would be corrupted\n");
if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
- panic("PCI-DMA: Random memory would be DMAed\n");
+ panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
}
#ifdef CONFIG_IOMMU_LEAK
@@ -385,8 +252,8 @@ static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t
/* Map a single continuous physical area into the IOMMU.
* Caller needs to check if the iommu is needed and flush.
*/
-static dma_addr_t dma_map_area(struct device *dev, unsigned long phys_mem,
- size_t size, int dir, int do_panic)
+static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
+ size_t size, int dir)
{
unsigned long npages = to_pages(phys_mem, size);
unsigned long iommu_page = alloc_iommu(npages);
@@ -396,7 +263,7 @@ static dma_addr_t dma_map_area(struct device *dev, unsigned long phys_mem,
return phys_mem;
if (panic_on_overflow)
panic("dma_map_area overflow %lu bytes\n", size);
- iommu_full(dev, size, dir, do_panic);
+ iommu_full(dev, size, dir);
return bad_dma_address;
}
@@ -408,15 +275,21 @@ static dma_addr_t dma_map_area(struct device *dev, unsigned long phys_mem,
return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}
+static dma_addr_t gart_map_simple(struct device *dev, char *buf,
+ size_t size, int dir)
+{
+ dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
+ flush_gart(dev);
+ return map;
+}
+
/* Map a single area into the IOMMU */
-dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, int dir)
+dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
{
unsigned long phys_mem, bus;
BUG_ON(dir == DMA_NONE);
- if (swiotlb)
- return swiotlb_map_single(dev,addr,size,dir);
if (!dev)
dev = &fallback_dev;
@@ -424,10 +297,24 @@ dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, int dir)
if (!need_iommu(dev, phys_mem, size))
return phys_mem;
- bus = dma_map_area(dev, phys_mem, size, dir, 1);
- flush_gart(dev);
+ bus = gart_map_simple(dev, addr, size, dir);
return bus;
-}
+}
+
+/*
+ * Wrapper for pci_unmap_single working with scatterlists.
+ */
+void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
+{
+ int i;
+
+ for (i = 0; i < nents; i++) {
+ struct scatterlist *s = &sg[i];
+ if (!s->dma_length || !s->length)
+ break;
+ dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
+ }
+}
/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
@@ -443,10 +330,10 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
struct scatterlist *s = &sg[i];
unsigned long addr = page_to_phys(s->page) + s->offset;
if (nonforced_iommu(dev, addr, s->length)) {
- addr = dma_map_area(dev, addr, s->length, dir, 0);
+ addr = dma_map_area(dev, addr, s->length, dir);
if (addr == bad_dma_address) {
if (i > 0)
- dma_unmap_sg(dev, sg, i, dir);
+ gart_unmap_sg(dev, sg, i, dir);
nents = 0;
sg[0].dma_length = 0;
break;
@@ -515,7 +402,7 @@ static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
* DMA map all entries in a scatterlist.
* Merge chunks that have page aligned sizes into a continuous mapping.
*/
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
+int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
{
int i;
int out;
@@ -527,8 +414,6 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
if (nents == 0)
return 0;
- if (swiotlb)
- return swiotlb_map_sg(dev,sg,nents,dir);
if (!dev)
dev = &fallback_dev;
@@ -571,13 +456,13 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
error:
flush_gart(NULL);
- dma_unmap_sg(dev, sg, nents, dir);
+ gart_unmap_sg(dev, sg, nents, dir);
/* When it was forced try again unforced */
if (force_iommu)
return dma_map_sg_nonforce(dev, sg, nents, dir);
if (panic_on_overflow)
panic("dma_map_sg: overflow on %lu pages\n", pages);
- iommu_full(dev, pages << PAGE_SHIFT, dir, 0);
+ iommu_full(dev, pages << PAGE_SHIFT, dir);
for (i = 0; i < nents; i++)
sg[i].dma_address = bad_dma_address;
return 0;
@@ -586,18 +471,13 @@ error:
/*
* Free a DMA mapping.
*/
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction)
{
unsigned long iommu_page;
int npages;
int i;
- if (swiotlb) {
- swiotlb_unmap_single(dev,dma_addr,size,direction);
- return;
- }
-
if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
dma_addr >= iommu_bus_base + iommu_size)
return;
@@ -610,68 +490,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
free_iommu(iommu_page, npages);
}
-/*
- * Wrapper for pci_unmap_single working with scatterlists.
- */
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
-{
- int i;
- if (swiotlb) {
- swiotlb_unmap_sg(dev,sg,nents,dir);
- return;
- }
- for (i = 0; i < nents; i++) {
- struct scatterlist *s = &sg[i];
- if (!s->dma_length || !s->length)
- break;
- dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
- }
-}
-
-int dma_supported(struct device *dev, u64 mask)
-{
- /* Copied from i386. Doesn't make much sense, because it will
- only work for pci_alloc_coherent.
- The caller just has to use GFP_DMA in this case. */
- if (mask < 0x00ffffff)
- return 0;
-
- /* Tell the device to use SAC when IOMMU force is on.
- This allows the driver to use cheaper accesses in some cases.
-
- Problem with this is that if we overflow the IOMMU area
- and return DAC as fallback address the device may not handle it correctly.
-
- As a special case some controllers have a 39bit address mode
- that is as efficient as 32bit (aic79xx). Don't force SAC for these.
- Assume all masks <= 40 bits are of this type. Normally this doesn't
- make any difference, but gives more gentle handling of IOMMU overflow. */
- if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
- printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
- return 0;
- }
-
- return 1;
-}
-
-int dma_get_cache_alignment(void)
-{
- return boot_cpu_data.x86_clflush_size;
-}
-
-EXPORT_SYMBOL(dma_unmap_sg);
-EXPORT_SYMBOL(dma_map_sg);
-EXPORT_SYMBOL(dma_map_single);
-EXPORT_SYMBOL(dma_unmap_single);
-EXPORT_SYMBOL(dma_supported);
-EXPORT_SYMBOL(no_iommu);
-EXPORT_SYMBOL(force_iommu);
-EXPORT_SYMBOL(bad_dma_address);
-EXPORT_SYMBOL(iommu_bio_merge);
-EXPORT_SYMBOL(iommu_sac_force);
-EXPORT_SYMBOL(dma_get_cache_alignment);
-EXPORT_SYMBOL(dma_alloc_coherent);
-EXPORT_SYMBOL(dma_free_coherent);
+static int no_agp;
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
{
@@ -772,12 +591,27 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
nommu:
/* Should not happen anymore */
printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
- KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.");
+ KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
return -1;
}
extern int agp_amd64_init(void);
+static struct dma_mapping_ops gart_dma_ops = {
+ .mapping_error = NULL,
+ .map_single = gart_map_single,
+ .map_simple = gart_map_simple,
+ .unmap_single = gart_unmap_single,
+ .sync_single_for_cpu = NULL,
+ .sync_single_for_device = NULL,
+ .sync_single_range_for_cpu = NULL,
+ .sync_single_range_for_device = NULL,
+ .sync_sg_for_cpu = NULL,
+ .sync_sg_for_device = NULL,
+ .map_sg = gart_map_sg,
+ .unmap_sg = gart_unmap_sg,
+};
+
static int __init pci_iommu_init(void)
{
struct agp_kern_info info;
@@ -799,16 +633,15 @@ static int __init pci_iommu_init(void)
if (swiotlb) {
no_iommu = 1;
- printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
return -1;
}
if (no_iommu ||
- (!force_iommu && end_pfn < 0xffffffff>>PAGE_SHIFT) ||
+ (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
!iommu_aperture ||
(no_agp && init_k8_gatt(&info) < 0)) {
- printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
no_iommu = 1;
+ no_iommu_init();
return -1;
}
@@ -885,100 +718,50 @@ static int __init pci_iommu_init(void)
flush_gart(NULL);
+ printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
+ dma_ops = &gart_dma_ops;
+
return 0;
}
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
-/* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
- [,forcesac][,fullflush][,nomerge][,biomerge]
- size set size of iommu (in bytes)
- noagp don't initialize the AGP driver and use full aperture.
- off don't use the IOMMU
- leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
- memaper[=order] allocate an own aperture over RAM with size 32MB^order.
- noforce don't force IOMMU usage. Default.
- force Force IOMMU.
- merge Do lazy merging. This may improve performance on some block devices.
- Implies force (experimental)
- biomerge Do merging at the BIO layer. This is more efficient than merge,
- but should be only done with very big IOMMUs. Implies merge,force.
- nomerge Don't do SG merging.
- forcesac For SAC mode for masks <40bits (experimental)
- fullflush Flush IOMMU on each allocation (default)
- nofullflush Don't use IOMMU fullflush
- allowed overwrite iommu off workarounds for specific chipsets.
- soft Use software bounce buffering (default for Intel machines)
- noaperture Don't touch the aperture for AGP.
-*/
-__init int iommu_setup(char *p)
-{
- int arg;
-
- while (*p) {
- if (!strncmp(p,"noagp",5))
- no_agp = 1;
- if (!strncmp(p,"off",3))
- no_iommu = 1;
- if (!strncmp(p,"force",5)) {
- force_iommu = 1;
- iommu_aperture_allowed = 1;
- }
- if (!strncmp(p,"allowed",7))
- iommu_aperture_allowed = 1;
- if (!strncmp(p,"noforce",7)) {
- iommu_merge = 0;
- force_iommu = 0;
- }
- if (!strncmp(p, "memaper", 7)) {
- fallback_aper_force = 1;
- p += 7;
- if (*p == '=') {
- ++p;
- if (get_option(&p, &arg))
- fallback_aper_order = arg;
- }
- }
- if (!strncmp(p, "biomerge",8)) {
- iommu_bio_merge = 4096;
- iommu_merge = 1;
- force_iommu = 1;
- }
- if (!strncmp(p, "panic",5))
- panic_on_overflow = 1;
- if (!strncmp(p, "nopanic",7))
- panic_on_overflow = 0;
- if (!strncmp(p, "merge",5)) {
- iommu_merge = 1;
- force_iommu = 1;
- }
- if (!strncmp(p, "nomerge",7))
- iommu_merge = 0;
- if (!strncmp(p, "forcesac",8))
- iommu_sac_force = 1;
- if (!strncmp(p, "fullflush",8))
- iommu_fullflush = 1;
- if (!strncmp(p, "nofullflush",11))
- iommu_fullflush = 0;
- if (!strncmp(p, "soft",4))
- swiotlb = 1;
- if (!strncmp(p, "noaperture",10))
- fix_aperture = 0;
+void gart_parse_options(char *p)
+{
+ int arg;
+
#ifdef CONFIG_IOMMU_LEAK
- if (!strncmp(p,"leak",4)) {
- leak_trace = 1;
- p += 4;
- if (*p == '=') ++p;
- if (isdigit(*p) && get_option(&p, &arg))
- iommu_leak_pages = arg;
- } else
+ if (!strncmp(p,"leak",4)) {
+ leak_trace = 1;
+ p += 4;
+ if (*p == '=') ++p;
+ if (isdigit(*p) && get_option(&p, &arg))
+ iommu_leak_pages = arg;
+ }
#endif
- if (isdigit(*p) && get_option(&p, &arg))
- iommu_size = arg;
- p += strcspn(p, ",");
- if (*p == ',')
- ++p;
- }
- return 1;
-}
+ if (isdigit(*p) && get_option(&p, &arg))
+ iommu_size = arg;
+ if (!strncmp(p, "fullflush",8))
+ iommu_fullflush = 1;
+ if (!strncmp(p, "nofullflush",11))
+ iommu_fullflush = 0;
+ if (!strncmp(p,"noagp",5))
+ no_agp = 1;
+ if (!strncmp(p, "noaperture",10))
+ fix_aperture = 0;
+ /* duplicated from pci-dma.c */
+ if (!strncmp(p,"force",5))
+ iommu_aperture_allowed = 1;
+ if (!strncmp(p,"allowed",7))
+ iommu_aperture_allowed = 1;
+ if (!strncmp(p, "memaper", 7)) {
+ fallback_aper_force = 1;
+ p += 7;
+ if (*p == '=') {
+ ++p;
+ if (get_option(&p, &arg))
+ fallback_aper_order = arg;
+ }
+ }
+}
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 5a981dca87f..e4156497519 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -6,89 +6,94 @@
#include <linux/string.h>
#include <asm/proto.h>
#include <asm/processor.h>
+#include <asm/dma.h>
-int iommu_merge = 0;
-EXPORT_SYMBOL(iommu_merge);
-
-dma_addr_t bad_dma_address;
-EXPORT_SYMBOL(bad_dma_address);
-
-int iommu_bio_merge = 0;
-EXPORT_SYMBOL(iommu_bio_merge);
-
-int iommu_sac_force = 0;
-EXPORT_SYMBOL(iommu_sac_force);
-
-/*
- * Dummy IO MMU functions
- */
-
-void *dma_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static int
+check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
- void *ret;
- u64 mask;
- int order = get_order(size);
-
- if (hwdev)
- mask = hwdev->coherent_dma_mask & *hwdev->dma_mask;
- else
- mask = 0xffffffff;
- for (;;) {
- ret = (void *)__get_free_pages(gfp, order);
- if (ret == NULL)
- return NULL;
- *dma_handle = virt_to_bus(ret);
- if ((*dma_handle & ~mask) == 0)
- break;
- free_pages((unsigned long)ret, order);
- if (gfp & GFP_DMA)
- return NULL;
- gfp |= GFP_DMA;
+ if (hwdev && bus + size > *hwdev->dma_mask) {
+ printk(KERN_ERR
+ "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
+ name, (long long)bus, size, (long long)*hwdev->dma_mask);
+ return 0;
}
+ return 1;
+}
- memset(ret, 0, size);
- return ret;
+static dma_addr_t
+nommu_map_single(struct device *hwdev, void *ptr, size_t size,
+ int direction)
+{
+ dma_addr_t bus = virt_to_bus(ptr);
+ if (!check_addr("map_single", hwdev, bus, size))
+ return bad_dma_address;
+ return bus;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
+ int direction)
{
- free_pages((unsigned long)vaddr, get_order(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
-int dma_supported(struct device *hwdev, u64 mask)
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scatter-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction)
{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- * RED-PEN this won't work for pci_map_single. Caller has to
- * use GFP_DMA in the first place.
- */
- if (mask < 0x00ffffff)
- return 0;
+ int i;
- return 1;
-}
-EXPORT_SYMBOL(dma_supported);
+ BUG_ON(direction == DMA_NONE);
+ for (i = 0; i < nents; i++ ) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(!s->page);
+ s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
+ return 0;
+ s->dma_length = s->length;
+ }
+ return nents;
+}
-int dma_get_cache_alignment(void)
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+void nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int dir)
{
- return boot_cpu_data.x86_clflush_size;
}
-EXPORT_SYMBOL(dma_get_cache_alignment);
-static int __init check_ram(void)
-{
- if (end_pfn >= 0xffffffff>>PAGE_SHIFT) {
- printk(
- KERN_ERR "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
- KERN_ERR "WARNING 32bit PCI may malfunction.\n");
- }
- return 0;
-}
-__initcall(check_ram);
+struct dma_mapping_ops nommu_dma_ops = {
+ .map_single = nommu_map_single,
+ .unmap_single = nommu_unmap_single,
+ .map_sg = nommu_map_sg,
+ .unmap_sg = nommu_unmap_sg,
+ .is_phys = 1,
+};
+void __init no_iommu_init(void)
+{
+ if (dma_ops)
+ return;
+ printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
+ dma_ops = &nommu_dma_ops;
+ if (end_pfn > MAX_DMA32_PFN) {
+ printk(KERN_ERR
+ "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
+ KERN_ERR "WARNING 32bit PCI may malfunction.\n"
+ KERN_ERR "You might want to enable CONFIG_GART_IOMMU\n");
+ }
+}
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
new file mode 100644
index 00000000000..3569a25ad7f
--- /dev/null
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -0,0 +1,42 @@
+/* Glue code to lib/swiotlb.c */
+
+#include <linux/pci.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <asm/dma-mapping.h>
+#include <asm/proto.h>
+#include <asm/swiotlb.h>
+#include <asm/dma.h>
+
+int swiotlb __read_mostly;
+EXPORT_SYMBOL(swiotlb);
+
+struct dma_mapping_ops swiotlb_dma_ops = {
+ .mapping_error = swiotlb_dma_mapping_error,
+ .alloc_coherent = swiotlb_alloc_coherent,
+ .free_coherent = swiotlb_free_coherent,
+ .map_single = swiotlb_map_single,
+ .unmap_single = swiotlb_unmap_single,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
+ .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .map_sg = swiotlb_map_sg,
+ .unmap_sg = swiotlb_unmap_sg,
+ .dma_supported = NULL,
+};
+
+void pci_swiotlb_init(void)
+{
+ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
+ if (!iommu_aperture && !no_iommu &&
+ (end_pfn > MAX_DMA32_PFN || force_iommu))
+ swiotlb = 1;
+ if (swiotlb) {
+ swiotlb_init();
+ printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
+ dma_ops = &swiotlb_dma_ops;
+ }
+}
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 7519fc520eb..8ded407e4a9 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -36,6 +36,7 @@
#include <linux/utsname.h>
#include <linux/random.h>
#include <linux/kprobes.h>
+#include <linux/notifier.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -50,13 +51,12 @@
#include <asm/desc.h>
#include <asm/proto.h>
#include <asm/ia32.h>
+#include <asm/idle.h>
asmlinkage extern void ret_from_fork(void);
unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
-static atomic_t hlt_counter = ATOMIC_INIT(0);
-
unsigned long boot_option_idle_override = 0;
EXPORT_SYMBOL(boot_option_idle_override);
@@ -66,19 +66,49 @@ EXPORT_SYMBOL(boot_option_idle_override);
void (*pm_idle)(void);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-void disable_hlt(void)
+static struct notifier_block *idle_notifier;
+static DEFINE_SPINLOCK(idle_notifier_lock);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&idle_notifier_lock, flags);
+ notifier_chain_register(&idle_notifier, n);
+ spin_unlock_irqrestore(&idle_notifier_lock, flags);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
{
- atomic_inc(&hlt_counter);
+ unsigned long flags;
+ spin_lock_irqsave(&idle_notifier_lock, flags);
+ notifier_chain_unregister(&idle_notifier, n);
+ spin_unlock_irqrestore(&idle_notifier_lock, flags);
}
+EXPORT_SYMBOL(idle_notifier_unregister);
-EXPORT_SYMBOL(disable_hlt);
+enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
+static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
-void enable_hlt(void)
+void enter_idle(void)
{
- atomic_dec(&hlt_counter);
+ __get_cpu_var(idle_state) = CPU_IDLE;
+ notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}
-EXPORT_SYMBOL(enable_hlt);
+static void __exit_idle(void)
+{
+ __get_cpu_var(idle_state) = CPU_NOT_IDLE;
+ notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+}
+
+/* Called from interrupts to signify idle end */
+void exit_idle(void)
+{
+ if (current->pid | read_pda(irqcount))
+ return;
+ __exit_idle();
+}
/*
* We use this if we don't have any better
@@ -88,21 +118,16 @@ void default_idle(void)
{
local_irq_enable();
- if (!atomic_read(&hlt_counter)) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb__after_clear_bit();
- while (!need_resched()) {
- local_irq_disable();
- if (!need_resched())
- safe_halt();
- else
- local_irq_enable();
- }
- set_thread_flag(TIF_POLLING_NRFLAG);
- } else {
- while (!need_resched())
- cpu_relax();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ smp_mb__after_clear_bit();
+ while (!need_resched()) {
+ local_irq_disable();
+ if (!need_resched())
+ safe_halt();
+ else
+ local_irq_enable();
}
+ set_thread_flag(TIF_POLLING_NRFLAG);
}
/*
@@ -157,7 +182,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
DECLARE_PER_CPU(int, cpu_state);
#include <asm/nmi.h>
-/* We don't actually take CPU down, just spin without interrupts. */
+/* We halt the CPU with physical CPU hotplug */
static inline void play_dead(void)
{
idle_task_exit();
@@ -166,8 +191,9 @@ static inline void play_dead(void)
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
+ local_irq_disable();
while (1)
- safe_halt();
+ halt();
}
#else
static inline void play_dead(void)
@@ -200,7 +226,9 @@ void cpu_idle (void)
idle = default_idle;
if (cpu_is_offline(smp_processor_id()))
play_dead();
+ enter_idle();
idle();
+ __exit_idle();
}
preempt_enable_no_resched();
@@ -423,7 +451,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
struct task_struct *me = current;
childregs = ((struct pt_regs *)
- (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+ (THREAD_SIZE + task_stack_page(p))) - 1;
*childregs = *regs;
childregs->rax = 0;
@@ -435,7 +463,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
p->thread.rsp0 = (unsigned long) (childregs+1);
p->thread.userrsp = me->thread.userrsp;
- set_ti_thread_flag(p->thread_info, TIF_FORK);
+ set_tsk_thread_flag(p, TIF_FORK);
p->thread.fs = me->thread.fs;
p->thread.gs = me->thread.gs;
@@ -562,7 +590,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
write_pda(oldrsp, next->userrsp);
write_pda(pcurrent, next_p);
write_pda(kernelstack,
- (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
+ task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
/*
* Now maybe reload the debug registers
@@ -676,7 +704,7 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state==TASK_RUNNING)
return 0;
- stack = (unsigned long)p->thread_info;
+ stack = (unsigned long)task_stack_page(p);
if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
return 0;
fp = *(u64 *)(p->thread.rsp);
@@ -794,8 +822,7 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
struct pt_regs *pp, ptregs;
- pp = (struct pt_regs *)(tsk->thread.rsp0);
- --pp;
+ pp = task_pt_regs(tsk);
ptregs = *pp;
ptregs.cs &= 0xffff;
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index a87b6cebe80..53205622351 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -36,9 +36,12 @@
* in exit.c or in signal.c.
*/
-/* determines which flags the user has access to. */
-/* 1 = access 0 = no access */
-#define FLAG_MASK 0x44dd5UL
+/*
+ * Determines which flags the user has access to [1 = access, 0 = no access].
+ * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
+ * Also masks reserved bits (63-22, 15, 5, 3, 1).
+ */
+#define FLAG_MASK 0x54dd5UL
/* set's the trap flag. */
#define TRAP_FLAG 0x100UL
@@ -64,12 +67,6 @@ static inline unsigned long get_stack_long(struct task_struct *task, int offset)
return (*((unsigned long *)stack));
}
-static inline struct pt_regs *get_child_regs(struct task_struct *task)
-{
- struct pt_regs *regs = (void *)task->thread.rsp0;
- return regs - 1;
-}
-
/*
* this routine will put a word on the processes privileged stack.
* the offset is how far from the base addr as stored in the TSS.
@@ -167,7 +164,7 @@ static int is_at_popf(struct task_struct *child, struct pt_regs *regs)
static void set_singlestep(struct task_struct *child)
{
- struct pt_regs *regs = get_child_regs(child);
+ struct pt_regs *regs = task_pt_regs(child);
/*
* Always set TIF_SINGLESTEP - this guarantees that
@@ -205,7 +202,7 @@ static void clear_singlestep(struct task_struct *child)
/* But touch TF only if it was set by us.. */
if (child->ptrace & PT_DTRACE) {
- struct pt_regs *regs = get_child_regs(child);
+ struct pt_regs *regs = task_pt_regs(child);
regs->eflags &= ~TRAP_FLAG;
child->ptrace &= ~PT_DTRACE;
}
diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c
index 75235ed2b31..57117b8beb2 100644
--- a/arch/x86_64/kernel/reboot.c
+++ b/arch/x86_64/kernel/reboot.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/string.h>
+#include <linux/pm.h>
#include <asm/io.h>
#include <asm/kdebug.h>
#include <asm/delay.h>
@@ -154,10 +155,11 @@ void machine_halt(void)
void machine_power_off(void)
{
- if (!reboot_force) {
- machine_shutdown();
- }
- if (pm_power_off)
+ if (pm_power_off) {
+ if (!reboot_force) {
+ machine_shutdown();
+ }
pm_power_off();
+ }
}
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 64c4534b930..28895c03cb1 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -35,6 +35,7 @@
#include <asm/processor.h>
#include <linux/console.h>
#include <linux/seq_file.h>
+#include <linux/crash_dump.h>
#include <linux/root_dev.h>
#include <linux/pci.h>
#include <linux/acpi.h>
@@ -43,6 +44,8 @@
#include <linux/mmzone.h>
#include <linux/kexec.h>
#include <linux/cpufreq.h>
+#include <linux/dmi.h>
+#include <linux/dma-mapping.h>
#include <asm/mtrr.h>
#include <asm/uaccess.h>
@@ -61,7 +64,9 @@
#include <asm/setup.h>
#include <asm/mach_apic.h>
#include <asm/numa.h>
+#include <asm/swiotlb.h>
#include <asm/sections.h>
+#include <asm/gart-mapping.h>
/*
* Machine setup..
@@ -86,11 +91,6 @@ int bootloader_type;
unsigned long saved_video_mode;
-#ifdef CONFIG_SWIOTLB
-int swiotlb;
-EXPORT_SYMBOL(swiotlb);
-#endif
-
/*
* Setup options
*/
@@ -275,10 +275,7 @@ static __init void parse_cmdline_early (char ** cmdline_p)
{
char c = ' ', *to = command_line, *from = COMMAND_LINE;
int len = 0;
-
- /* Save unparsed command line copy for /proc/cmdline */
- memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
- saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+ int userdef = 0;
for (;;) {
if (c != ' ')
@@ -346,10 +343,14 @@ static __init void parse_cmdline_early (char ** cmdline_p)
!memcmp(from, "disableapic", 11))
disable_apic = 1;
- if (!memcmp(from, "noapic", 6))
+ /* Don't confuse with noapictimer */
+ if (!memcmp(from, "noapic", 6) &&
+ (from[6] == ' ' || from[6] == 0))
skip_ioapic_setup = 1;
- if (!memcmp(from, "apic", 4)) {
+ /* Make sure to not confuse with apic= */
+ if (!memcmp(from, "apic", 4) &&
+ (from[4] == ' ' || from[4] == 0)) {
skip_ioapic_setup = 0;
ioapic_force = 1;
}
@@ -357,16 +358,36 @@ static __init void parse_cmdline_early (char ** cmdline_p)
if (!memcmp(from, "mem=", 4))
parse_memopt(from+4, &from);
+ if (!memcmp(from, "memmap=", 7)) {
+ /* exactmap option is for used defined memory */
+ if (!memcmp(from+7, "exactmap", 8)) {
+#ifdef CONFIG_CRASH_DUMP
+ /* If we are doing a crash dump, we
+ * still need to know the real mem
+ * size before original memory map is
+ * reset.
+ */
+ saved_max_pfn = e820_end_of_ram();
+#endif
+ from += 8+7;
+ end_pfn_map = 0;
+ e820.nr_map = 0;
+ userdef = 1;
+ }
+ else {
+ parse_memmapopt(from+7, &from);
+ userdef = 1;
+ }
+ }
+
#ifdef CONFIG_NUMA
if (!memcmp(from, "numa=", 5))
numa_setup(from+5);
#endif
-#ifdef CONFIG_GART_IOMMU
if (!memcmp(from,"iommu=",6)) {
iommu_setup(from+6);
}
-#endif
if (!memcmp(from,"oops=panic", 10))
panic_on_oops = 1;
@@ -395,6 +416,14 @@ static __init void parse_cmdline_early (char ** cmdline_p)
}
#endif
+#ifdef CONFIG_PROC_VMCORE
+ /* elfcorehdr= specifies the location of elf core header
+ * stored by the crashed kernel. This option will be passed
+ * by kexec loader to the capture kernel.
+ */
+ else if(!memcmp(from, "elfcorehdr=", 11))
+ elfcorehdr_addr = memparse(from+11, &from);
+#endif
next_char:
c = *(from++);
if (!c)
@@ -403,6 +432,10 @@ static __init void parse_cmdline_early (char ** cmdline_p)
break;
*(to++) = c;
}
+ if (userdef) {
+ printk(KERN_INFO "user-defined physical RAM map:\n");
+ e820_print_map("user");
+ }
*to = '\0';
*cmdline_p = command_line;
}
@@ -443,6 +476,8 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
+extern char __vsyscall_0;
+
/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
@@ -454,11 +489,17 @@ void apply_alternatives(void *start, void *end)
struct alt_instr *a;
int diff, i, k;
for (a = start; (void *)a < end; a++) {
+ u8 *instr;
+
if (!boot_cpu_has(a->cpuid))
continue;
BUG_ON(a->replacementlen > a->instrlen);
- __inline_memcpy(a->instr, a->replacement, a->replacementlen);
+ instr = a->instr;
+ /* vsyscall code is not mapped yet. resolve it manually. */
+ if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
+ instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
+ __inline_memcpy(instr, a->replacement, a->replacementlen);
diff = a->instrlen - a->replacementlen;
/* Pad the rest with nops */
@@ -466,7 +507,7 @@ void apply_alternatives(void *start, void *end)
k = diff;
if (k > ASM_NOP_MAX)
k = ASM_NOP_MAX;
- __inline_memcpy(a->instr + i, k8_nops[k], k);
+ __inline_memcpy(instr + i, k8_nops[k], k);
}
}
}
@@ -670,6 +711,8 @@ void __init setup_arch(char **cmdline_p)
acpi_boot_init();
#endif
+ init_cpu_to_node();
+
#ifdef CONFIG_X86_LOCAL_APIC
/*
* get boot-time SMP configuration:
@@ -834,7 +877,6 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
static int __init init_amd(struct cpuinfo_x86 *c)
{
int r;
- int level;
#ifdef CONFIG_SMP
unsigned long value;
@@ -857,11 +899,6 @@ static int __init init_amd(struct cpuinfo_x86 *c)
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
clear_bit(0*32+31, &c->x86_capability);
- /* C-stepping K8? */
- level = cpuid_eax(1);
- if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
- set_bit(X86_FEATURE_K8_C, &c->x86_capability);
-
r = get_model_name(c);
if (!r) {
switch (c->x86) {
@@ -874,6 +911,10 @@ static int __init init_amd(struct cpuinfo_x86 *c)
}
display_cacheinfo(c);
+ /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
+ if (c->x86_power & (1<<8))
+ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+
if (c->extended_cpuid_level >= 0x80000008) {
c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
if (c->x86_max_cores & (c->x86_max_cores - 1))
@@ -992,8 +1033,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
if (c->x86 == 15)
c->x86_cache_alignment = c->x86_clflush_size * 2;
- if (c->x86 >= 15)
+ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+ (c->x86 == 0x6 && c->x86_model >= 0x0e))
set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
c->x86_max_cores = intel_num_cpu_cores(c);
srat_detect_node();
@@ -1192,7 +1235,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
- NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
+ NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
/* Transmeta-defined */
"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
@@ -1220,7 +1263,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* AMD-defined (#2) */
- "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
+ "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -1231,7 +1274,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"vid", /* voltage id control */
"ttp", /* thermal trip */
"tm",
- "stc"
+ "stc",
+ NULL,
+ /* nothing */ /* constant_tsc - moved to flags */
};
@@ -1311,8 +1356,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
unsigned i;
for (i = 0; i < 32; i++)
if (c->x86_power & (1 << i)) {
- if (i < ARRAY_SIZE(x86_power_flags))
- seq_printf(m, " %s", x86_power_flags[i]);
+ if (i < ARRAY_SIZE(x86_power_flags) &&
+ x86_power_flags[i])
+ seq_printf(m, "%s%s",
+ x86_power_flags[i][0]?" ":"",
+ x86_power_flags[i]);
else
seq_printf(m, " [%d]", i);
}
@@ -1344,3 +1392,11 @@ struct seq_operations cpuinfo_op = {
.stop = c_stop,
.show = show_cpuinfo,
};
+
+static int __init run_dmi_scan(void)
+{
+ dmi_scan_machine();
+ return 0;
+}
+core_initcall(run_dmi_scan);
+
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 06dc354375c..6eff51e9400 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -30,7 +30,8 @@ char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
-struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned;
+struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
+struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
@@ -110,18 +111,18 @@ void __init setup_per_cpu_areas(void)
}
if (!ptr)
panic("Cannot allocate cpu data for CPU %d\n", i);
- cpu_pda[i].data_offset = ptr - __per_cpu_start;
+ cpu_pda(i)->data_offset = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
}
}
void pda_init(int cpu)
{
- struct x8664_pda *pda = &cpu_pda[cpu];
+ struct x8664_pda *pda = cpu_pda(cpu);
/* Setup up data that may be needed in __get_free_pages early */
asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
- wrmsrl(MSR_GS_BASE, cpu_pda + cpu);
+ wrmsrl(MSR_GS_BASE, pda);
pda->cpunumber = cpu;
pda->irqcount = -1;
@@ -145,7 +146,7 @@ void pda_init(int cpu)
pda->irqstackptr += IRQSTACKSIZE-64;
}
-char boot_exception_stacks[N_EXCEPTION_STACKS * EXCEPTION_STKSZ]
+char boot_exception_stacks[(N_EXCEPTION_STACKS - 2) * EXCEPTION_STKSZ + DEBUG_STKSZ]
__attribute__((section(".bss.page_aligned")));
/* May not be marked __init: used by software suspend */
@@ -213,23 +214,14 @@ void __cpuinit cpu_init (void)
* Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor:
*/
- if (cpu) {
- memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
- }
+ if (cpu)
+ memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
cpu_gdt_descr[cpu].size = GDT_SIZE;
- cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
asm volatile("lidt %0" :: "m" (idt_descr));
- memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
-
- /*
- * Delete NT
- */
-
- asm volatile("pushfq ; popq %%rax ; btr $14,%%rax ; pushq %%rax ; popfq" ::: "eax");
-
+ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
syscall_init();
wrmsrl(MSR_FS_BASE, 0);
@@ -243,13 +235,27 @@ void __cpuinit cpu_init (void)
*/
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
if (cpu) {
- estacks = (char *)__get_free_pages(GFP_ATOMIC,
- EXCEPTION_STACK_ORDER);
+ static const unsigned int order[N_EXCEPTION_STACKS] = {
+ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
+ [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
+ };
+
+ estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
if (!estacks)
panic("Cannot allocate exception stack %ld %d\n",
v, cpu);
}
- estacks += EXCEPTION_STKSZ;
+ switch (v + 1) {
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ case DEBUG_STACK:
+ cpu_pda[cpu].debugstack = (unsigned long)estacks;
+ estacks += DEBUG_STKSZ;
+ break;
+#endif
+ default:
+ estacks += EXCEPTION_STKSZ;
+ break;
+ }
t->ist[v] = (unsigned long)estacks;
}
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index cfc3d9dccbd..19ef012b1f1 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -27,6 +27,7 @@
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/apicdef.h>
+#include <asm/idle.h>
/*
* Smarter SMP flushing macros.
@@ -280,11 +281,6 @@ void flush_tlb_all(void)
on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
}
-void smp_kdb_stop(void)
-{
- send_IPI_allbutself(KDB_VECTOR);
-}
-
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
@@ -512,6 +508,7 @@ asmlinkage void smp_call_function_interrupt(void)
/*
* At this point the info structure may be out of scope unless wait==1
*/
+ exit_idle();
irq_enter();
(*func)(info);
irq_exit();
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index ecbd7b83acc..a28756ef7ce 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -335,7 +335,13 @@ static __cpuinit void sync_tsc(unsigned int master)
static void __cpuinit tsc_sync_wait(void)
{
- if (notscsync || !cpu_has_tsc)
+ /*
+ * When the CPU has synchronized TSCs assume the BIOS
+ * or the hardware already synced. Otherwise we could
+ * mess up a possible perfect synchronization with a
+ * not-quite-perfect algorithm.
+ */
+ if (notscsync || !cpu_has_tsc || !unsynchronized_tsc())
return;
sync_tsc(0);
}
@@ -646,6 +652,7 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
} while (send_status && (timeout++ < 1000));
+ mb();
atomic_set(&init_deasserted, 1);
num_starts = 2;
@@ -659,7 +666,6 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
for (j = 1; j <= num_starts; j++) {
Dprintk("Sending STARTUP #%d.\n",j);
- apic_read_around(APIC_SPIV);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
Dprintk("After apic_write.\n");
@@ -698,7 +704,6 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
* Due to the Pentium erratum 3AP.
*/
if (maxlvt > 3) {
- apic_read_around(APIC_SPIV);
apic_write(APIC_ESR, 0);
}
accept_status = (apic_read(APIC_ESR) & 0xEF);
@@ -743,11 +748,35 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
};
DECLARE_WORK(work, do_fork_idle, &c_idle);
+ /* allocate memory for gdts of secondary cpus. Hotplug is considered */
+ if (!cpu_gdt_descr[cpu].address &&
+ !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
+ printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
+ return -1;
+ }
+
+ /* Allocate node local memory for AP pdas */
+ if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
+ struct x8664_pda *newpda, *pda;
+ int node = cpu_to_node(cpu);
+ pda = cpu_pda(cpu);
+ newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC,
+ node);
+ if (newpda) {
+ memcpy(newpda, pda, sizeof (struct x8664_pda));
+ cpu_pda(cpu) = newpda;
+ } else
+ printk(KERN_ERR
+ "Could not allocate node local PDA for CPU %d on node %d\n",
+ cpu, node);
+ }
+
+
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
- (THREAD_SIZE + (unsigned long) c_idle.idle->thread_info)) - 1);
+ (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
init_idle(c_idle.idle, cpu);
goto do_rest;
}
@@ -778,14 +807,14 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
do_rest:
- cpu_pda[cpu].pcurrent = c_idle.idle;
+ cpu_pda(cpu)->pcurrent = c_idle.idle;
start_rip = setup_trampoline();
init_rsp = c_idle.idle->thread.rsp;
per_cpu(init_tss,cpu).rsp0 = init_rsp;
initial_code = start_secondary;
- clear_ti_thread_flag(c_idle.idle->thread_info, TIF_FORK);
+ clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,
cpus_weight(cpu_present_map),
@@ -811,11 +840,8 @@ do_rest:
/*
* Be paranoid about clearing APIC errors.
*/
- if (APIC_INTEGRATED(apic_version[apicid])) {
- apic_read_around(APIC_SPIV);
- apic_write(APIC_ESR, 0);
- apic_read(APIC_ESR);
- }
+ apic_write(APIC_ESR, 0);
+ apic_read(APIC_ESR);
/*
* Status is now clean
@@ -927,8 +953,8 @@ int additional_cpus __initdata = -1;
*
* Three ways to find out the number of additional hotplug CPUs:
* - If the BIOS specified disabled CPUs in ACPI/mptables use that.
- * - otherwise use half of the available CPUs or 2, whatever is more.
* - The user can overwrite it with additional_cpus=NUM
+ * - Otherwise don't reserve additional CPUs.
* We do this because additional CPUs waste a lot of memory.
* -AK
*/
@@ -938,13 +964,10 @@ __init void prefill_possible_map(void)
int possible;
if (additional_cpus == -1) {
- if (disabled_cpus > 0) {
+ if (disabled_cpus > 0)
additional_cpus = disabled_cpus;
- } else {
- additional_cpus = num_processors / 2;
- if (additional_cpus == 0)
- additional_cpus = 2;
- }
+ else
+ additional_cpus = 0;
}
possible = num_processors + additional_cpus;
if (possible > NR_CPUS)
@@ -996,7 +1019,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
/*
* If we couldn't find a local APIC, then get out of here now!
*/
- if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
+ if (!cpu_has_apic) {
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
boot_cpu_id);
printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index fd2bef78088..ecbd34c1093 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -120,7 +120,7 @@ void fix_processor_context(void)
set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
- cpu_gdt_table[cpu][GDT_ENTRY_TSS].type = 9;
+ cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9;
syscall_init(); /* This sets MSR_*STAR and related */
load_TR_desc(); /* This does ltr */
diff --git a/arch/x86_64/kernel/syscall.c b/arch/x86_64/kernel/syscall.c
index e263685f864..7c176b3edde 100644
--- a/arch/x86_64/kernel/syscall.c
+++ b/arch/x86_64/kernel/syscall.c
@@ -19,7 +19,7 @@ typedef void (*sys_call_ptr_t)(void);
extern void sys_ni_syscall(void);
-sys_call_ptr_t sys_call_table[__NR_syscall_max+1] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
/* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */
[0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm-x86_64/unistd.h>
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 74102796e5c..f8c47c68844 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -71,14 +71,6 @@ unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
struct timespec __xtime __section_xtime;
struct timezone __sys_tz __section_sys_tz;
-static inline void rdtscll_sync(unsigned long *tsc)
-{
-#ifdef CONFIG_SMP
- sync_core();
-#endif
- rdtscll(*tsc);
-}
-
/*
* do_gettimeoffset() returns microseconds since last timer interrupt was
* triggered by hardware. A memory read of HPET is slower than a register read
@@ -93,7 +85,7 @@ static inline unsigned int do_gettimeoffset_tsc(void)
{
unsigned long t;
unsigned long x;
- rdtscll_sync(&t);
+ t = get_cycles_sync();
if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
return x;
@@ -259,8 +251,8 @@ static void set_rtc_mmss(unsigned long nowtime)
#endif
{
- BIN_TO_BCD(real_seconds);
- BIN_TO_BCD(real_minutes);
+ BIN_TO_BCD(real_seconds);
+ BIN_TO_BCD(real_minutes);
CMOS_WRITE(real_seconds, RTC_SECONDS);
CMOS_WRITE(real_minutes, RTC_MINUTES);
}
@@ -297,25 +289,21 @@ unsigned long long monotonic_clock(void)
last_offset = vxtime.last;
base = monotonic_base;
this_offset = hpet_readl(HPET_COUNTER);
-
} while (read_seqretry(&xtime_lock, seq));
offset = (this_offset - last_offset);
offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
return base + offset;
- }else{
+ } else {
do {
seq = read_seqbegin(&xtime_lock);
last_offset = vxtime.last_tsc;
base = monotonic_base;
} while (read_seqretry(&xtime_lock, seq));
- sync_core();
- rdtscll(this_offset);
+ this_offset = get_cycles_sync();
offset = (this_offset - last_offset)*1000/cpu_khz;
return base + offset;
}
-
-
}
EXPORT_SYMBOL(monotonic_clock);
@@ -391,7 +379,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
delay = LATCH - 1 - delay;
}
- rdtscll_sync(&tsc);
+ tsc = get_cycles_sync();
if (vxtime.mode == VXTIME_HPET) {
if (offset - vxtime.last > hpet_tick) {
@@ -471,6 +459,11 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
write_sequnlock(&xtime_lock);
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (using_apic_timer)
+ smp_send_timer_broadcast_ipi();
+#endif
+
return IRQ_HANDLED;
}
@@ -509,10 +502,10 @@ unsigned long long sched_clock(void)
return cycles_2_ns(a);
}
-unsigned long get_cmos_time(void)
+static unsigned long get_cmos_time(void)
{
- unsigned int timeout, year, mon, day, hour, min, sec;
- unsigned char last, this;
+ unsigned int timeout = 1000000, year, mon, day, hour, min, sec;
+ unsigned char uip = 0, this = 0;
unsigned long flags;
/*
@@ -525,45 +518,41 @@ unsigned long get_cmos_time(void)
spin_lock_irqsave(&rtc_lock, flags);
- timeout = 1000000;
- last = this = 0;
-
- while (timeout && last && !this) {
- last = this;
+ while (timeout && (!uip || this)) {
+ uip |= this;
this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
timeout--;
}
-/*
- * Here we are safe to assume the registers won't change for a whole second, so
- * we just go ahead and read them.
- */
-
- sec = CMOS_READ(RTC_SECONDS);
- min = CMOS_READ(RTC_MINUTES);
- hour = CMOS_READ(RTC_HOURS);
- day = CMOS_READ(RTC_DAY_OF_MONTH);
- mon = CMOS_READ(RTC_MONTH);
- year = CMOS_READ(RTC_YEAR);
+ /*
+ * Here we are safe to assume the registers won't change for a whole
+ * second, so we just go ahead and read them.
+ */
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
spin_unlock_irqrestore(&rtc_lock, flags);
-/*
- * We know that x86-64 always uses BCD format, no need to check the config
- * register.
- */
+ /*
+ * We know that x86-64 always uses BCD format, no need to check the
+ * config register.
+ */
- BCD_TO_BIN(sec);
- BCD_TO_BIN(min);
- BCD_TO_BIN(hour);
- BCD_TO_BIN(day);
- BCD_TO_BIN(mon);
- BCD_TO_BIN(year);
+ BCD_TO_BIN(sec);
+ BCD_TO_BIN(min);
+ BCD_TO_BIN(hour);
+ BCD_TO_BIN(day);
+ BCD_TO_BIN(mon);
+ BCD_TO_BIN(year);
-/*
- * x86-64 systems only exists since 2002.
- * This will work up to Dec 31, 2100
- */
+ /*
+ * x86-64 systems only exists since 2002.
+ * This will work up to Dec 31, 2100
+ */
year += 2000;
return mktime(year, mon, day, hour, min, sec);
@@ -695,8 +684,7 @@ static unsigned int __init hpet_calibrate_tsc(void)
do {
local_irq_disable();
hpet_now = hpet_readl(HPET_COUNTER);
- sync_core();
- rdtscl(tsc_now);
+ tsc_now = get_cycles_sync();
local_irq_restore(flags);
} while ((tsc_now - tsc_start) < TICK_COUNT &&
(hpet_now - hpet_start) < TICK_COUNT);
@@ -726,11 +714,9 @@ static unsigned int __init pit_calibrate_tsc(void)
outb(0xb0, 0x43);
outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
- rdtscll(start);
- sync_core();
+ start = get_cycles_sync();
while ((inb(0x61) & 0x20) == 0);
- sync_core();
- rdtscll(end);
+ end = get_cycles_sync();
spin_unlock_irqrestore(&i8253_lock, flags);
@@ -744,7 +730,7 @@ static __init int late_hpet_init(void)
unsigned int ntimer;
if (!vxtime.hpet_address)
- return -1;
+ return -1;
memset(&hd, 0, sizeof (hd));
@@ -878,8 +864,6 @@ static struct irqaction irq0 = {
timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
};
-extern void __init config_acpi_tables(void);
-
void __init time_init(void)
{
char *timename;
@@ -934,7 +918,7 @@ void __init time_init(void)
vxtime.mode = VXTIME_TSC;
vxtime.quot = (1000000L << 32) / vxtime_hz;
vxtime.tsc_quot = (1000L << 32) / cpu_khz;
- rdtscll_sync(&vxtime.last_tsc);
+ vxtime.last_tsc = get_cycles_sync();
setup_irq(0, &irq0);
set_cyc2ns_scale(cpu_khz);
@@ -948,7 +932,7 @@ void __init time_init(void)
* Make an educated guess if the TSC is trustworthy and synchronized
* over all CPUs.
*/
-static __init int unsynchronized_tsc(void)
+__init int unsynchronized_tsc(void)
{
#ifdef CONFIG_SMP
if (oem_force_hpet_timer())
@@ -959,7 +943,7 @@ static __init int unsynchronized_tsc(void)
return 0;
#endif
/* Assume multi socket systems are not synchronized */
- return num_online_cpus() > 1;
+ return num_present_cpus() > 1;
}
/*
@@ -999,6 +983,10 @@ __setup("report_lost_ticks", time_setup);
static long clock_cmos_diff;
static unsigned long sleep_start;
+/*
+ * sysfs support for the timer.
+ */
+
static int timer_suspend(struct sys_device *dev, pm_message_t state)
{
/*
@@ -1041,7 +1029,6 @@ static struct sysdev_class timer_sysclass = {
set_kset_name("timer"),
};
-
/* XXX this driverfs stuff should probably go elsewhere later -john */
static struct sys_device device_timer = {
.id = 0,
@@ -1075,8 +1062,6 @@ device_initcall(time_init_device);
*/
#include <linux/rtc.h>
-extern irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-
#define DEFAULT_RTC_INT_FREQ 64
#define RTC_NUM_INTS 1
@@ -1285,8 +1270,6 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
#endif
-
-
static int __init nohpet_setup(char *s)
{
nohpet = 1;
@@ -1303,5 +1286,3 @@ static int __init notsc_setup(char *s)
}
__setup("notsc", notsc_setup);
-
-
diff --git a/arch/x86_64/kernel/trampoline.S b/arch/x86_64/kernel/trampoline.S
index 6d9c9a8e7d0..23a03eb91fc 100644
--- a/arch/x86_64/kernel/trampoline.S
+++ b/arch/x86_64/kernel/trampoline.S
@@ -42,8 +42,15 @@ r_base = .
movl $0xA5A5A5A5, trampoline_data - r_base
# write marker for master knows we're running
- lidt idt_48 - r_base # load idt with 0, 0
- lgdt gdt_48 - r_base # load gdt with whatever is appropriate
+ /*
+ * GDT tables in non default location kernel can be beyond 16MB and
+ * lgdt will not be able to load the address as in real mode default
+ * operand size is 16bit. Use lgdtl instead to force operand size
+ * to 32 bit.
+ */
+
+ lidtl idt_48 - r_base # load idt with 0, 0
+ lgdtl gdt_48 - r_base # load gdt with whatever is appropriate
xor %ax, %ax
inc %ax # protected mode (PE) bit
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index bf337f49318..8bb0aeda78b 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -70,7 +70,6 @@ asmlinkage void reserved(void);
asmlinkage void alignment_check(void);
asmlinkage void machine_check(void);
asmlinkage void spurious_interrupt_bug(void);
-asmlinkage void call_debug(void);
struct notifier_block *die_chain;
static DEFINE_SPINLOCK(die_notifier_lock);
@@ -121,19 +120,31 @@ int printk_address(unsigned long address)
static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
unsigned *usedp, const char **idp)
{
- static const char ids[N_EXCEPTION_STACKS][8] = {
+ static char ids[][8] = {
[DEBUG_STACK - 1] = "#DB",
[NMI_STACK - 1] = "NMI",
[DOUBLEFAULT_STACK - 1] = "#DF",
[STACKFAULT_STACK - 1] = "#SS",
[MCE_STACK - 1] = "#MC",
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
+#endif
};
unsigned k;
for (k = 0; k < N_EXCEPTION_STACKS; k++) {
unsigned long end;
- end = per_cpu(init_tss, cpu).ist[k];
+ switch (k + 1) {
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ case DEBUG_STACK:
+ end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
+ break;
+#endif
+ default:
+ end = per_cpu(init_tss, cpu).ist[k];
+ break;
+ }
if (stack >= end)
continue;
if (stack >= end - EXCEPTION_STKSZ) {
@@ -143,6 +154,22 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
*idp = ids[k];
return (unsigned long *)end;
}
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
+ unsigned j = N_EXCEPTION_STACKS - 1;
+
+ do {
+ ++j;
+ end -= EXCEPTION_STKSZ;
+ ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
+ } while (stack < end - EXCEPTION_STKSZ);
+ if (*usedp & (1U << j))
+ break;
+ *usedp |= 1U << j;
+ *idp = ids[j];
+ return (unsigned long *)end;
+ }
+#endif
}
return NULL;
}
@@ -156,9 +183,8 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
void show_trace(unsigned long *stack)
{
- unsigned long addr;
const unsigned cpu = safe_smp_processor_id();
- unsigned long *irqstack_end = (unsigned long *)cpu_pda[cpu].irqstackptr;
+ unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
int i;
unsigned used = 0;
@@ -166,8 +192,14 @@ void show_trace(unsigned long *stack)
#define HANDLE_STACK(cond) \
do while (cond) { \
- addr = *stack++; \
+ unsigned long addr = *stack++; \
if (kernel_text_address(addr)) { \
+ if (i > 50) { \
+ printk("\n "); \
+ i = 0; \
+ } \
+ else \
+ i += printk(" "); \
/* \
* If the address is either in the text segment of the \
* kernel, or in the region which contains vmalloc'ed \
@@ -177,25 +209,19 @@ void show_trace(unsigned long *stack)
* out the call path that was taken. \
*/ \
i += printk_address(addr); \
- if (i > 50) { \
- printk("\n "); \
- i = 0; \
- } \
- else \
- i += printk(" "); \
} \
} while (0)
- for(i = 0; ; ) {
+ for(i = 11; ; ) {
const char *id;
unsigned long *estack_end;
estack_end = in_exception_stack(cpu, (unsigned long)stack,
&used, &id);
if (estack_end) {
- i += printk(" <%s> ", id);
+ i += printk(" <%s>", id);
HANDLE_STACK (stack < estack_end);
- i += printk(" <EOE> ");
+ i += printk(" <EOE>");
stack = (unsigned long *) estack_end[-2];
continue;
}
@@ -205,11 +231,11 @@ void show_trace(unsigned long *stack)
(IRQSTACKSIZE - 64) / sizeof(*irqstack);
if (stack >= irqstack && stack < irqstack_end) {
- i += printk(" <IRQ> ");
+ i += printk(" <IRQ>");
HANDLE_STACK (stack < irqstack_end);
stack = (unsigned long *) (irqstack_end[-1]);
irqstack_end = NULL;
- i += printk(" <EOI> ");
+ i += printk(" <EOI>");
continue;
}
}
@@ -226,8 +252,8 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
unsigned long *stack;
int i;
const int cpu = safe_smp_processor_id();
- unsigned long *irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
- unsigned long *irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE);
+ unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
+ unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
// debugging aid: "show_stack(NULL, NULL);" prints the
// back trace for this cpu.
@@ -275,14 +301,14 @@ void show_registers(struct pt_regs *regs)
int in_kernel = !user_mode(regs);
unsigned long rsp;
const int cpu = safe_smp_processor_id();
- struct task_struct *cur = cpu_pda[cpu].pcurrent;
+ struct task_struct *cur = cpu_pda(cpu)->pcurrent;
rsp = regs->rsp;
printk("CPU %d ", cpu);
__show_regs(regs);
printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
- cur->comm, cur->pid, cur->thread_info, cur);
+ cur->comm, cur->pid, task_thread_info(cur), cur);
/*
* When in-kernel, we also print out the stack and code at the
@@ -314,20 +340,26 @@ bad:
void handle_BUG(struct pt_regs *regs)
{
struct bug_frame f;
- char tmp;
+ long len;
+ const char *prefix = "";
if (user_mode(regs))
return;
- if (__copy_from_user(&f, (struct bug_frame *) regs->rip,
+ if (__copy_from_user(&f, (const void __user *) regs->rip,
sizeof(struct bug_frame)))
return;
if (f.filename >= 0 ||
f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
return;
- if (__get_user(tmp, (char *)(long)f.filename))
+ len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
+ if (len < 0 || len >= PATH_MAX)
f.filename = (int)(long)"unmapped filename";
+ else if (len > 50) {
+ f.filename += len - 50;
+ prefix = "...";
+ }
printk("----------- [cut here ] --------- [please bite here ] ---------\n");
- printk(KERN_ALERT "Kernel BUG at %.50s:%d\n", (char *)(long)f.filename, f.line);
+ printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
}
#ifdef CONFIG_BUG
@@ -382,7 +414,7 @@ void __die(const char * str, struct pt_regs * regs, long err)
printk("DEBUG_PAGEALLOC");
#endif
printk("\n");
- notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
+ notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
show_registers(regs);
/* Executive summary in case the oops scrolled away */
printk(KERN_ALERT "RIP ");
@@ -399,11 +431,6 @@ void die(const char * str, struct pt_regs * regs, long err)
oops_end(flags);
do_exit(SIGSEGV);
}
-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-{
- if (!(regs->eflags & VM_MASK) && (regs->cs == __KERNEL_CS))
- die(str, regs, err);
-}
void die_nmi(char *str, struct pt_regs *regs)
{
@@ -426,19 +453,20 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
struct pt_regs * regs, long error_code,
siginfo_t *info)
{
+ struct task_struct *tsk = current;
+
conditional_sti(regs);
- if (user_mode(regs)) {
- struct task_struct *tsk = current;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
+ if (user_mode(regs)) {
if (exception_trace && unhandled_signal(tsk, signr))
printk(KERN_INFO
"%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
tsk->comm, tsk->pid, str,
regs->rip,regs->rsp,error_code);
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = trapnr;
if (info)
force_sig_info(signr, info, tsk);
else
@@ -485,7 +513,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
DO_ERROR( 4, SIGSEGV, "overflow", overflow)
DO_ERROR( 5, SIGSEGV, "bounds", bounds)
-DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->rip)
+DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
@@ -493,24 +521,41 @@ DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
DO_ERROR(18, SIGSEGV, "reserved", reserved)
DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
-DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
+
+asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
+{
+ static const char str[] = "double fault";
+ struct task_struct *tsk = current;
+
+ /* Return not checked because double check cannot be ignored */
+ notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
+
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 8;
+
+ /* This is always a kernel trap and never fixable (and thus must
+ never return). */
+ for (;;)
+ die(str, regs, error_code);
+}
asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
long error_code)
{
+ struct task_struct *tsk = current;
+
conditional_sti(regs);
- if (user_mode(regs)) {
- struct task_struct *tsk = current;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 13;
+ if (user_mode(regs)) {
if (exception_trace && unhandled_signal(tsk, SIGSEGV))
printk(KERN_INFO
"%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
tsk->comm, tsk->pid,
regs->rip,regs->rsp,error_code);
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = 13;
force_sig(SIGSEGV, tsk);
return;
}
@@ -573,7 +618,7 @@ asmlinkage void default_do_nmi(struct pt_regs *regs)
reason = get_nmi_reason();
if (!(reason & 0xc0)) {
- if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
== NOTIFY_STOP)
return;
#ifdef CONFIG_X86_LOCAL_APIC
@@ -589,7 +634,7 @@ asmlinkage void default_do_nmi(struct pt_regs *regs)
unknown_nmi_error(reason, regs);
return;
}
- if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
+ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
return;
/* AK: following checks seem to be broken on modern chipsets. FIXME */
@@ -600,6 +645,7 @@ asmlinkage void default_do_nmi(struct pt_regs *regs)
io_check_error(reason, regs);
}
+/* runs on IST stack. */
asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
{
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
@@ -620,7 +666,7 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
;
/* Exception from user space */
else if (user_mode(eregs))
- regs = ((struct pt_regs *)current->thread.rsp0) - 1;
+ regs = task_pt_regs(current);
/* Exception from kernel and interrupts are enabled. Move to
kernel process stack. */
else if (eregs->eflags & X86_EFLAGS_IF)
@@ -684,11 +730,9 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
- if (!user_mode(regs))
- goto clear_dr7;
+ info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
+ force_sig_info(SIGTRAP, &info, tsk);
- info.si_addr = (void __user *)regs->rip;
- force_sig_info(SIGTRAP, &info, tsk);
clear_dr7:
set_debugreg(0UL, 7);
return;
@@ -698,7 +742,7 @@ clear_TF_reenable:
regs->eflags &= ~TF_MASK;
}
-static int kernel_math_error(struct pt_regs *regs, char *str)
+static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->rip);
@@ -706,8 +750,9 @@ static int kernel_math_error(struct pt_regs *regs, char *str)
regs->rip = fixup->fixup;
return 1;
}
- notify_die(DIE_GPF, str, regs, 0, 16, SIGFPE);
+ notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
/* Illegal floating point operation in the kernel */
+ current->thread.trap_no = trapnr;
die(str, regs, 0);
return 0;
}
@@ -726,7 +771,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
conditional_sti(regs);
if (!user_mode(regs) &&
- kernel_math_error(regs, "kernel x87 math error"))
+ kernel_math_error(regs, "kernel x87 math error", 16))
return;
/*
@@ -795,7 +840,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
conditional_sti(regs);
if (!user_mode(regs) &&
- kernel_math_error(regs, "kernel simd math error"))
+ kernel_math_error(regs, "kernel simd math error", 19))
return;
/*
@@ -867,12 +912,7 @@ asmlinkage void math_state_restore(void)
if (!used_math())
init_fpu(me);
restore_fpu_checking(&me->thread.i387.fxsave);
- me->thread_info->status |= TS_USEDFPU;
-}
-
-void do_call_debug(struct pt_regs *regs)
-{
- notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
+ task_thread_info(me)->status |= TS_USEDFPU;
}
void __init trap_init(void)
@@ -880,9 +920,9 @@ void __init trap_init(void)
set_intr_gate(0,&divide_error);
set_intr_gate_ist(1,&debug,DEBUG_STACK);
set_intr_gate_ist(2,&nmi,NMI_STACK);
- set_system_gate(3,&int3);
- set_system_gate(4,&overflow); /* int4-5 can be called from all */
- set_system_gate(5,&bounds);
+ set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */
+ set_system_gate(4,&overflow); /* int4 can be called from all */
+ set_intr_gate(5,&bounds);
set_intr_gate(6,&invalid_op);
set_intr_gate(7,&device_not_available);
set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK);
@@ -904,8 +944,6 @@ void __init trap_init(void)
set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
#endif
- set_intr_gate(KDB_VECTOR, call_debug);
-
/*
* Should be a barrier for any external CPU state.
*/
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 58b19215b4b..b0eed1faf74 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -8,6 +8,8 @@
#include <asm/page.h>
#include <linux/config.h>
+#undef i386 /* in case the preprocessor is a 32bit one */
+
OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
@@ -189,7 +191,7 @@ SECTIONS
/* Sections to be discarded */
/DISCARD/ : {
*(.exitcall.exit)
-#ifndef CONFIG_DEBUG_INFO
+#ifndef CONFIG_UNWIND_INFO
*(.eh_frame)
#endif
}
diff --git a/arch/x86_64/kernel/vsmp.c b/arch/x86_64/kernel/vsmp.c
new file mode 100644
index 00000000000..92f70c74965
--- /dev/null
+++ b/arch/x86_64/kernel/vsmp.c
@@ -0,0 +1,45 @@
+/*
+ * vSMPowered(tm) systems specific initialization
+ * Copyright (C) 2005 ScaleMP Inc.
+ *
+ * Use of this code is subject to the terms and conditions of the
+ * GNU general public license version 2. See "COPYING" or
+ * http://www.gnu.org/licenses/gpl.html
+ *
+ * Ravikiran Thirumalai <kiran@scalemp.com>,
+ * Shai Fultheim <shai@scalemp.com>
+ */
+
+#include <linux/init.h>
+#include <linux/pci_ids.h>
+#include <linux/pci_regs.h>
+#include <asm/pci-direct.h>
+
+static int __init vsmp_init(void)
+{
+ void *address;
+ unsigned int cap, ctl;
+
+ /* Check if we are running on a ScaleMP vSMP box */
+ if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) != PCI_VENDOR_ID_SCALEMP) ||
+ (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) != PCI_DEVICE_ID_SCALEMP_VSMP_CTL))
+ return 0;
+
+ /* set vSMP magic bits to indicate vSMP capable kernel */
+ address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8);
+ cap = readl(address);
+ ctl = readl(address + 4);
+ printk("vSMP CTL: capabilities:0x%08x control:0x%08x\n", cap, ctl);
+ if (cap & ctl & (1 << 4)) {
+ /* Turn on vSMP IRQ fastpath handling (see system.h) */
+ ctl &= ~(1 << 4);
+ writel(ctl, address + 4);
+ ctl = readl(address + 4);
+ printk("vSMP CTL: control set to:0x%08x\n", ctl);
+ }
+
+ iounmap(address);
+ return 0;
+}
+
+core_initcall(vsmp_init);
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 70a0bd16085..9468fb20b0b 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -35,14 +35,13 @@
#include <asm/io.h>
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
-#define force_inline __attribute__((always_inline)) inline
int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
#include <asm/unistd.h>
-static force_inline void timeval_normalize(struct timeval * tv)
+static __always_inline void timeval_normalize(struct timeval * tv)
{
time_t __sec;
@@ -53,7 +52,7 @@ static force_inline void timeval_normalize(struct timeval * tv)
}
}
-static force_inline void do_vgettimeofday(struct timeval * tv)
+static __always_inline void do_vgettimeofday(struct timeval * tv)
{
long sequence, t;
unsigned long sec, usec;
@@ -66,8 +65,7 @@ static force_inline void do_vgettimeofday(struct timeval * tv)
(__jiffies - __wall_jiffies) * (1000000 / HZ);
if (__vxtime.mode != VXTIME_HPET) {
- sync_core();
- rdtscll(t);
+ t = get_cycles_sync();
if (t < __vxtime.last_tsc)
t = __vxtime.last_tsc;
usec += ((t - __vxtime.last_tsc) *
@@ -84,12 +82,12 @@ static force_inline void do_vgettimeofday(struct timeval * tv)
}
/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
-static force_inline void do_get_tz(struct timezone * tz)
+static __always_inline void do_get_tz(struct timezone * tz)
{
*tz = __sys_tz;
}
-static force_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
+static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
{
int ret;
asm volatile("vsysc2: syscall"
@@ -98,7 +96,7 @@ static force_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
return ret;
}
-static force_inline long time_syscall(long *t)
+static __always_inline long time_syscall(long *t)
{
long secs;
asm volatile("vsysc1: syscall"
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 4a54221e10b..b614d54d2ae 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -13,7 +13,6 @@
#include <linux/string.h>
#include <linux/syscalls.h>
#include <linux/tty.h>
-#include <linux/ioctl32.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
@@ -45,22 +44,15 @@ extern struct drive_info_struct drive_info;
EXPORT_SYMBOL(drive_info);
#endif
-extern unsigned long get_cmos_time(void);
-
/* platform dependent support */
EXPORT_SYMBOL(boot_cpu_data);
//EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(ioremap_nocache);
EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
-EXPORT_SYMBOL(disable_irq_nosync);
-EXPORT_SYMBOL(probe_irq_mask);
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(pm_idle);
EXPORT_SYMBOL(pm_power_off);
-EXPORT_SYMBOL(get_cmos_time);
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
@@ -84,9 +76,6 @@ EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
-EXPORT_SYMBOL(strpbrk);
-EXPORT_SYMBOL(strstr);
-
EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
@@ -98,25 +87,18 @@ EXPORT_SYMBOL(copy_in_user);
EXPORT_SYMBOL(strnlen_user);
#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_alloc_consistent);
-EXPORT_SYMBOL(pci_free_consistent);
-#endif
-
-#ifdef CONFIG_PCI
EXPORT_SYMBOL(pci_mem_start);
#endif
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
-EXPORT_SYMBOL(cpu_pda);
+EXPORT_SYMBOL(_cpu_pda);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(__write_lock_failed);
EXPORT_SYMBOL(__read_lock_failed);
-EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(cpu_callout_map);
#endif
@@ -137,30 +119,17 @@ EXPORT_SYMBOL_GPL(unset_nmi_callback);
#undef memcpy
#undef memset
#undef memmove
-#undef memchr
#undef strlen
-#undef strncmp
-#undef strncpy
-#undef strchr
extern void * memset(void *,int,__kernel_size_t);
extern size_t strlen(const char *);
extern void * memmove(void * dest,const void *src,size_t count);
-extern void *memchr(const void *s, int c, size_t n);
extern void * memcpy(void *,const void *,__kernel_size_t);
extern void * __memcpy(void *,const void *,__kernel_size_t);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);
diff --git a/arch/x86_64/lib/copy_user.S b/arch/x86_64/lib/copy_user.S
index dfa358b05c8..79422b6559c 100644
--- a/arch/x86_64/lib/copy_user.S
+++ b/arch/x86_64/lib/copy_user.S
@@ -4,12 +4,9 @@
* Functions to copy from and to user space.
*/
-#define FIX_ALIGNMENT 1
-
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
- #include <asm/cpufeature.h>
/* Standard copy_to_user with segment limit checking */
.globl copy_to_user
@@ -21,23 +18,7 @@ copy_to_user:
jc bad_to_user
cmpq threadinfo_addr_limit(%rax),%rcx
jae bad_to_user
-2:
- .byte 0xe9 /* 32bit jump */
- .long .Lcug-1f
-1:
-
- .section .altinstr_replacement,"ax"
-3: .byte 0xe9 /* replacement jmp with 8 bit immediate */
- .long copy_user_generic_c-1b /* offset */
- .previous
- .section .altinstructions,"a"
- .align 8
- .quad 2b
- .quad 3b
- .byte X86_FEATURE_K8_C
- .byte 5
- .byte 5
- .previous
+ jmp copy_user_generic
/* Standard copy_from_user with segment limit checking */
.globl copy_from_user
@@ -72,223 +53,44 @@ bad_to_user:
* rsi source
* rdx count
*
+ * Only 4GB of copy is supported. This shouldn't be a problem
+ * because the kernel normally only writes from/to page sized chunks
+ * even if user space passed a longer buffer.
+ * And more would be dangerous because both Intel and AMD have
+ * errata with rep movsq > 4GB. If someone feels the need to fix
+ * this please consider this.
+ *
* Output:
* eax uncopied bytes or 0 if successful.
*/
- .globl copy_user_generic
- .p2align 4
-copy_user_generic:
- .byte 0x66,0x66,0x90 /* 5 byte nop for replacement jump */
- .byte 0x66,0x90
-1:
- .section .altinstr_replacement,"ax"
-2: .byte 0xe9 /* near jump with 32bit immediate */
- .long copy_user_generic_c-1b /* offset */
- .previous
- .section .altinstructions,"a"
- .align 8
- .quad copy_user_generic
- .quad 2b
- .byte X86_FEATURE_K8_C
- .byte 5
- .byte 5
- .previous
-.Lcug:
- pushq %rbx
- xorl %eax,%eax /*zero for the exception handler */
-
-#ifdef FIX_ALIGNMENT
- /* check for bad alignment of destination */
- movl %edi,%ecx
- andl $7,%ecx
- jnz .Lbad_alignment
-.Lafter_bad_alignment:
-#endif
- movq %rdx,%rcx
-
- movl $64,%ebx
- shrq $6,%rdx
- decq %rdx
- js .Lhandle_tail
-
- .p2align 4
-.Lloop:
-.Ls1: movq (%rsi),%r11
-.Ls2: movq 1*8(%rsi),%r8
-.Ls3: movq 2*8(%rsi),%r9
-.Ls4: movq 3*8(%rsi),%r10
-.Ld1: movq %r11,(%rdi)
-.Ld2: movq %r8,1*8(%rdi)
-.Ld3: movq %r9,2*8(%rdi)
-.Ld4: movq %r10,3*8(%rdi)
-
-.Ls5: movq 4*8(%rsi),%r11
-.Ls6: movq 5*8(%rsi),%r8
-.Ls7: movq 6*8(%rsi),%r9
-.Ls8: movq 7*8(%rsi),%r10
-.Ld5: movq %r11,4*8(%rdi)
-.Ld6: movq %r8,5*8(%rdi)
-.Ld7: movq %r9,6*8(%rdi)
-.Ld8: movq %r10,7*8(%rdi)
-
- decq %rdx
-
- leaq 64(%rsi),%rsi
- leaq 64(%rdi),%rdi
-
- jns .Lloop
-
- .p2align 4
-.Lhandle_tail:
- movl %ecx,%edx
- andl $63,%ecx
- shrl $3,%ecx
- jz .Lhandle_7
- movl $8,%ebx
- .p2align 4
-.Lloop_8:
-.Ls9: movq (%rsi),%r8
-.Ld9: movq %r8,(%rdi)
- decl %ecx
- leaq 8(%rdi),%rdi
- leaq 8(%rsi),%rsi
- jnz .Lloop_8
-
-.Lhandle_7:
- movl %edx,%ecx
- andl $7,%ecx
- jz .Lende
- .p2align 4
-.Lloop_1:
-.Ls10: movb (%rsi),%bl
-.Ld10: movb %bl,(%rdi)
- incq %rdi
- incq %rsi
- decl %ecx
- jnz .Lloop_1
-
-.Lende:
- popq %rbx
- ret
-
-#ifdef FIX_ALIGNMENT
- /* align destination */
- .p2align 4
-.Lbad_alignment:
- movl $8,%r9d
- subl %ecx,%r9d
- movl %r9d,%ecx
- cmpq %r9,%rdx
- jz .Lhandle_7
- js .Lhandle_7
-.Lalign_1:
-.Ls11: movb (%rsi),%bl
-.Ld11: movb %bl,(%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz .Lalign_1
- subq %r9,%rdx
- jmp .Lafter_bad_alignment
-#endif
-
- /* table sorted by exception address */
- .section __ex_table,"a"
- .align 8
- .quad .Ls1,.Ls1e
- .quad .Ls2,.Ls2e
- .quad .Ls3,.Ls3e
- .quad .Ls4,.Ls4e
- .quad .Ld1,.Ls1e
- .quad .Ld2,.Ls2e
- .quad .Ld3,.Ls3e
- .quad .Ld4,.Ls4e
- .quad .Ls5,.Ls5e
- .quad .Ls6,.Ls6e
- .quad .Ls7,.Ls7e
- .quad .Ls8,.Ls8e
- .quad .Ld5,.Ls5e
- .quad .Ld6,.Ls6e
- .quad .Ld7,.Ls7e
- .quad .Ld8,.Ls8e
- .quad .Ls9,.Le_quad
- .quad .Ld9,.Le_quad
- .quad .Ls10,.Le_byte
- .quad .Ld10,.Le_byte
-#ifdef FIX_ALIGNMENT
- .quad .Ls11,.Lzero_rest
- .quad .Ld11,.Lzero_rest
-#endif
- .quad .Le5,.Le_zero
- .previous
-
- /* compute 64-offset for main loop. 8 bytes accuracy with error on the
- pessimistic side. this is gross. it would be better to fix the
- interface. */
- /* eax: zero, ebx: 64 */
-.Ls1e: addl $8,%eax
-.Ls2e: addl $8,%eax
-.Ls3e: addl $8,%eax
-.Ls4e: addl $8,%eax
-.Ls5e: addl $8,%eax
-.Ls6e: addl $8,%eax
-.Ls7e: addl $8,%eax
-.Ls8e: addl $8,%eax
- addq %rbx,%rdi /* +64 */
- subq %rax,%rdi /* correct destination with computed offset */
-
- shlq $6,%rdx /* loop counter * 64 (stride length) */
- addq %rax,%rdx /* add offset to loopcnt */
- andl $63,%ecx /* remaining bytes */
- addq %rcx,%rdx /* add them */
- jmp .Lzero_rest
-
- /* exception on quad word loop in tail handling */
- /* ecx: loopcnt/8, %edx: length, rdi: correct */
-.Le_quad:
- shll $3,%ecx
- andl $7,%edx
- addl %ecx,%edx
- /* edx: bytes to zero, rdi: dest, eax:zero */
-.Lzero_rest:
- movq %rdx,%rcx
-.Le_byte:
- xorl %eax,%eax
-.Le5: rep
- stosb
- /* when there is another exception while zeroing the rest just return */
-.Le_zero:
- movq %rdx,%rax
- jmp .Lende
-
- /* C stepping K8 run faster using the string copy instructions.
- This is also a lot simpler. Use them when possible.
- Patch in jmps to this code instead of copying it fully
- to avoid unwanted aliasing in the exception tables. */
-
- /* rdi destination
- * rsi source
- * rdx count
- *
- * Output:
- * eax uncopied bytes or 0 if successfull.
- */
-copy_user_generic_c:
+ .globl copy_user_generic
+copy_user_generic:
movl %edx,%ecx
shrl $3,%ecx
andl $7,%edx
+ jz 5f
1: rep
movsq
movl %edx,%ecx
+ xor %eax,%eax
2: rep
movsb
-4: movl %ecx,%eax
ret
+ /* align here? */
+5: xorl %eax,%eax
+6: rep movsq
+ ret
+
+ .section .fixup,"ax"
3: lea (%rdx,%rcx,8),%rax
ret
-
+4: movl %ecx,%eax
+ ret
+ .previous
+
.section __ex_table,"a"
.quad 1b,3b
.quad 2b,4b
+ .quad 6b,4b
.previous
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index 841bd738a18..03c460cbdd1 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -39,7 +39,7 @@ void __delay(unsigned long loops)
inline void __const_udelay(unsigned long xloops)
{
- __delay(((xloops * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) * HZ);
+ __delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32);
}
void __udelay(unsigned long usecs)
diff --git a/arch/x86_64/lib/usercopy.c b/arch/x86_64/lib/usercopy.c
index db8abba1ad8..9bc2c295818 100644
--- a/arch/x86_64/lib/usercopy.c
+++ b/arch/x86_64/lib/usercopy.c
@@ -109,14 +109,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
* Return 0 on exception, a value greater than N if too long
*/
-long strnlen_user(const char __user *s, long n)
+long __strnlen_user(const char __user *s, long n)
{
long res = 0;
char c;
- if (!access_ok(VERIFY_READ, s, n))
- return 0;
-
while (1) {
if (res>n)
return n+1;
@@ -129,6 +126,13 @@ long strnlen_user(const char __user *s, long n)
}
}
+long strnlen_user(const char __user *s, long n)
+{
+ if (!access_ok(VERIFY_READ, s, n))
+ return 0;
+ return __strnlen_user(s, n);
+}
+
long strlen_user(const char __user *s)
{
long res = 0;
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 3a63707a698..26eac194064 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -35,6 +35,13 @@
#include <asm-generic/sections.h>
#include <asm/kdebug.h>
+/* Page fault error code bits */
+#define PF_PROT (1<<0) /* or no page found */
+#define PF_WRITE (1<<1)
+#define PF_USER (1<<2)
+#define PF_RSVD (1<<3)
+#define PF_INSTR (1<<4)
+
void bust_spinlocks(int yes)
{
int loglevel_save = console_loglevel;
@@ -68,7 +75,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
unsigned char *max_instr;
/* If it was a exec fault ignore */
- if (error_code & (1<<4))
+ if (error_code & PF_INSTR)
return 0;
instr = (unsigned char *)convert_rip_to_linear(current, regs);
@@ -222,17 +229,22 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
unsigned long error_code)
{
unsigned long flags = oops_begin();
+ struct task_struct *tsk;
printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
current->comm, address);
dump_pagetable(address);
+ tsk = current;
+ tsk->thread.cr2 = address;
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code;
__die("Bad pagetable", regs, error_code);
oops_end(flags);
do_exit(SIGKILL);
}
/*
- * Handle a fault on the vmalloc or module mapping area
+ * Handle a fault on the vmalloc area
*
* This assumes no large pages in there.
*/
@@ -278,7 +290,6 @@ static int vmalloc_fault(unsigned long address)
that. */
if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
BUG();
- __flush_tlb_all();
return 0;
}
@@ -289,12 +300,6 @@ int exception_trace = 1;
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
- *
- * error_code:
- * bit 0 == 0 means no page found, 1 means protection fault
- * bit 1 == 0 means read, 1 means write
- * bit 2 == 0 means kernel, 1 means user-mode
- * bit 3 == 1 means fault was an instruction fetch
*/
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
unsigned long error_code)
@@ -337,12 +342,16 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
*
* This verifies that the fault happens in kernel space
* (error_code & 4) == 0, and that the fault was not a
- * protection error (error_code & 1) == 0.
+ * protection error (error_code & 9) == 0.
*/
if (unlikely(address >= TASK_SIZE64)) {
- if (!(error_code & 5) &&
- ((address >= VMALLOC_START && address < VMALLOC_END) ||
- (address >= MODULES_VADDR && address < MODULES_END))) {
+ /*
+ * Don't check for the module range here: its PML4
+ * is always initialized because it's shared with the main
+ * kernel text. Only vmalloc may need PML4 syncups.
+ */
+ if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
+ ((address >= VMALLOC_START && address < VMALLOC_END))) {
if (vmalloc_fault(address) < 0)
goto bad_area_nosemaphore;
return;
@@ -354,7 +363,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}
- if (unlikely(error_code & (1 << 3)))
+ if (unlikely(error_code & PF_RSVD))
pgtable_bad(address, regs, error_code);
/*
@@ -381,7 +390,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
- if ((error_code & 4) == 0 &&
+ if ((error_code & PF_USER) == 0 &&
!search_exception_tables(regs->rip))
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
@@ -408,17 +417,17 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
good_area:
info.si_code = SEGV_ACCERR;
write = 0;
- switch (error_code & 3) {
+ switch (error_code & (PF_PROT|PF_WRITE)) {
default: /* 3: write, present */
/* fall through */
- case 2: /* write, not present */
+ case PF_WRITE: /* write, not present */
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
write++;
break;
- case 1: /* read, present */
+ case PF_PROT: /* read, present */
goto bad_area;
- case 0: /* read, not present */
+ case 0: /* read, not present */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
@@ -453,7 +462,7 @@ bad_area:
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
- if (error_code & 4) {
+ if (error_code & PF_USER) {
if (is_prefetch(regs, address, error_code))
return;
@@ -521,6 +530,9 @@ no_context:
printk_address(regs->rip);
printk("\n");
dump_pagetable(address);
+ tsk->thread.cr2 = address;
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code;
__die("Oops", regs, error_code);
/* Executive summary in case the body of the oops scrolled away */
printk(KERN_EMERG "CR2: %016lx\n", address);
@@ -546,7 +558,7 @@ do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
- if (!(error_code & 4))
+ if (!(error_code & PF_USER))
goto no_context;
tsk->thread.cr2 = address;
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index c016dfe8478..eca60125efc 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -23,6 +23,7 @@
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <asm/processor.h>
#include <asm/system.h>
@@ -38,11 +39,16 @@
#include <asm/proto.h>
#include <asm/smp.h>
#include <asm/sections.h>
+#include <asm/dma-mapping.h>
+#include <asm/swiotlb.h>
#ifndef Dprintk
#define Dprintk(x...)
#endif
+struct dma_mapping_ops* dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
static unsigned long dma_reserve __initdata;
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -249,14 +255,26 @@ static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned lon
static void __init find_early_table_space(unsigned long end)
{
- unsigned long puds, pmds, tables;
+ unsigned long puds, pmds, tables, start;
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
- table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
+ /* Put page tables beyond the DMA zones if possible.
+ RED-PEN might be better to spread them out more over
+ memory to avoid hotspots */
+ if (end > MAX_DMA32_PFN<<PAGE_SHIFT)
+ start = MAX_DMA32_PFN << PAGE_SHIFT;
+ else if (end > MAX_DMA_PFN << PAGE_SHIFT)
+ start = MAX_DMA_PFN << PAGE_SHIFT;
+ else
+ start = 0x8000;
+
+ table_start = find_e820_area(start, end, tables);
+ if (table_start == -1)
+ table_start = find_e820_area(0x8000, end, tables);
if (table_start == -1UL)
panic("Cannot find space for the kernel page tables");
@@ -423,12 +441,9 @@ void __init mem_init(void)
long codesize, reservedpages, datasize, initsize;
#ifdef CONFIG_SWIOTLB
- if (!iommu_aperture &&
- (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))
- swiotlb = 1;
- if (swiotlb)
- swiotlb_init();
+ pci_swiotlb_init();
#endif
+ no_iommu_init();
/* How many end-of-memory variables you have, grandma! */
max_low_pfn = end_pfn;
@@ -498,6 +513,29 @@ void free_initmem(void)
printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
}
+#ifdef CONFIG_DEBUG_RODATA
+
+extern char __start_rodata, __end_rodata;
+void mark_rodata_ro(void)
+{
+ unsigned long addr = (unsigned long)&__start_rodata;
+
+ for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+ change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
+
+ printk ("Write protecting the kernel read-only data: %luk\n",
+ (&__end_rodata - &__start_rodata) >> 10);
+
+ /*
+ * change_page_attr_addr() requires a global_flush_tlb() call after it.
+ * We do this after the printk so that if something went wrong in the
+ * change, the printk gets out at least to give a better debug hint
+ * of who is the culprit.
+ */
+ global_flush_tlb();
+}
+#endif
+
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 15b67d2760c..6ef9f9a7623 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -46,8 +46,8 @@ int numa_off __initdata;
* 0 if memnodmap[] too small (of shift too small)
* -1 if node overlap or lost ram (shift too big)
*/
-static int __init populate_memnodemap(
- const struct node *nodes, int numnodes, int shift)
+static int __init
+populate_memnodemap(const struct node *nodes, int numnodes, int shift)
{
int i;
int res = -1;
@@ -81,7 +81,7 @@ int __init compute_hash_shift(struct node *nodes, int numnodes)
while (populate_memnodemap(nodes, numnodes, shift + 1) >= 0)
shift++;
- printk(KERN_DEBUG "Using %d for the hash shift.\n",
+ printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
shift);
if (populate_memnodemap(nodes, numnodes, shift) != 1) {
@@ -110,7 +110,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
start = round_up(start, ZONE_ALIGN);
- printk("Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
+ printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
start_pfn = start >> PAGE_SHIFT;
end_pfn = end >> PAGE_SHIFT;
@@ -156,7 +156,7 @@ void __init setup_node_zones(int nodeid)
start_pfn = node_start_pfn(nodeid);
end_pfn = node_end_pfn(nodeid);
- Dprintk(KERN_INFO "setting up node %d %lx-%lx\n",
+ Dprintk(KERN_INFO "Setting up node %d %lx-%lx\n",
nodeid, start_pfn, end_pfn);
size_zones(zones, holes, start_pfn, end_pfn);
@@ -200,7 +200,7 @@ static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
while ((x << 1) < sz)
x <<= 1;
if (x < sz/2)
- printk("Numa emulation unbalanced. Complain to maintainer\n");
+ printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n");
sz = x;
}
@@ -272,7 +272,7 @@ __cpuinit void numa_add_cpu(int cpu)
void __cpuinit numa_set_node(int cpu, int node)
{
- cpu_pda[cpu].nodenumber = node;
+ cpu_pda(cpu)->nodenumber = node;
cpu_to_node[cpu] = node;
}
@@ -330,8 +330,69 @@ __init int numa_setup(char *opt)
return 1;
}
+/*
+ * Setup early cpu_to_node.
+ *
+ * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
+ * and apicid_to_node[] tables have valid entries for a CPU.
+ * This means we skip cpu_to_node[] initialisation for NUMA
+ * emulation and faking node case (when running a kernel compiled
+ * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
+ * is already initialized in a round robin manner at numa_init_array,
+ * prior to this call, and this initialization is good enough
+ * for the fake NUMA cases.
+ */
+void __init init_cpu_to_node(void)
+{
+ int i;
+ for (i = 0; i < NR_CPUS; i++) {
+ u8 apicid = x86_cpu_to_apicid[i];
+ if (apicid == BAD_APICID)
+ continue;
+ if (apicid_to_node[apicid] == NUMA_NO_NODE)
+ continue;
+ cpu_to_node[i] = apicid_to_node[apicid];
+ }
+}
+
EXPORT_SYMBOL(cpu_to_node);
EXPORT_SYMBOL(node_to_cpumask);
EXPORT_SYMBOL(memnode_shift);
EXPORT_SYMBOL(memnodemap);
EXPORT_SYMBOL(node_data);
+
+#ifdef CONFIG_DISCONTIGMEM
+/*
+ * Functions to convert PFNs from/to per node page addresses.
+ * These are out of line because they are quite big.
+ * They could be all tuned by pre caching more state.
+ * Should do that.
+ */
+
+/* Requires pfn_valid(pfn) to be true */
+struct page *pfn_to_page(unsigned long pfn)
+{
+ int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT);
+ return (pfn - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map;
+}
+EXPORT_SYMBOL(pfn_to_page);
+
+unsigned long page_to_pfn(struct page *page)
+{
+ return (long)(((page) - page_zone(page)->zone_mem_map) +
+ page_zone(page)->zone_start_pfn);
+}
+EXPORT_SYMBOL(page_to_pfn);
+
+int pfn_valid(unsigned long pfn)
+{
+ unsigned nid;
+ if (pfn >= num_physpages)
+ return 0;
+ nid = pfn_to_nid(pfn);
+ if (nid == 0xff)
+ return 0;
+ return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
+}
+EXPORT_SYMBOL(pfn_valid);
+#endif
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index b90e8fe9eeb..35f1f1aab06 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -128,6 +128,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
pte_t *kpte;
struct page *kpte_page;
unsigned kpte_flags;
+ pgprot_t ref_prot2;
kpte = lookup_address(address);
if (!kpte) return 0;
kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
@@ -140,10 +141,14 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
* split_large_page will take the reference for this change_page_attr
* on the split page.
*/
- struct page *split = split_large_page(address, prot, ref_prot);
+
+ struct page *split;
+ ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
+
+ split = split_large_page(address, prot, ref_prot2);
if (!split)
return -ENOMEM;
- set_pte(kpte,mk_pte(split, ref_prot));
+ set_pte(kpte,mk_pte(split, ref_prot2));
kpte_page = split;
}
get_page(kpte_page);
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 33340bd1e32..8b7f85608fa 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -17,21 +17,23 @@
#include <linux/topology.h>
#include <asm/proto.h>
#include <asm/numa.h>
+#include <asm/e820.h>
static struct acpi_table_slit *acpi_slit;
static nodemask_t nodes_parsed __initdata;
static nodemask_t nodes_found __initdata;
static struct node nodes[MAX_NUMNODES] __initdata;
-static __u8 pxm2node[256] = { [0 ... 255] = 0xff };
+static u8 pxm2node[256] = { [0 ... 255] = 0xff };
static int node_to_pxm(int n);
int pxm_to_node(int pxm)
{
if ((unsigned)pxm >= 256)
- return 0;
- return pxm2node[pxm];
+ return -1;
+ /* Extend 0xff to (int)-1 */
+ return (signed char)pxm2node[pxm];
}
static __init int setup_node(int pxm)
@@ -91,9 +93,36 @@ static __init inline int srat_disabled(void)
return numa_off || acpi_numa < 0;
}
+/*
+ * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
+ * up the NUMA heuristics which wants the local node to have a smaller
+ * distance than the others.
+ * Do some quick checks here and only use the SLIT if it passes.
+ */
+static __init int slit_valid(struct acpi_table_slit *slit)
+{
+ int i, j;
+ int d = slit->localities;
+ for (i = 0; i < d; i++) {
+ for (j = 0; j < d; j++) {
+ u8 val = slit->entry[d*i + j];
+ if (i == j) {
+ if (val != 10)
+ return 0;
+ } else if (val <= 10)
+ return 0;
+ }
+ }
+ return 1;
+}
+
/* Callback for SLIT parsing */
void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{
+ if (!slit_valid(slit)) {
+ printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
+ return;
+ }
acpi_slit = slit;
}
@@ -168,12 +197,39 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
nd->start, nd->end);
}
+/* Sanity check to catch more bad SRATs (they are amazingly common).
+ Make sure the PXMs cover all memory. */
+static int nodes_cover_memory(void)
+{
+ int i;
+ unsigned long pxmram, e820ram;
+
+ pxmram = 0;
+ for_each_node_mask(i, nodes_parsed) {
+ unsigned long s = nodes[i].start >> PAGE_SHIFT;
+ unsigned long e = nodes[i].end >> PAGE_SHIFT;
+ pxmram += e - s;
+ pxmram -= e820_hole_size(s, e);
+ }
+
+ e820ram = end_pfn - e820_hole_size(0, end_pfn);
+ if (pxmram < e820ram) {
+ printk(KERN_ERR
+ "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
+ (pxmram << PAGE_SHIFT) >> 20,
+ (e820ram << PAGE_SHIFT) >> 20);
+ return 0;
+ }
+ return 1;
+}
+
void __init acpi_numa_arch_fixup(void) {}
/* Use the information discovered above to actually set up the nodes. */
int __init acpi_scan_nodes(unsigned long start, unsigned long end)
{
int i;
+
if (acpi_numa <= 0)
return -1;
@@ -184,6 +240,11 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
node_clear(i, nodes_parsed);
}
+ if (!nodes_cover_memory()) {
+ bad_srat();
+ return -1;
+ }
+
memnode_shift = compute_hash_shift(nodes, nodes_weight(nodes_parsed));
if (memnode_shift < 0) {
printk(KERN_ERR
diff --git a/arch/x86_64/pci/Makefile-BUS b/arch/x86_64/pci/Makefile-BUS
deleted file mode 100644
index 4f0c05abd40..00000000000
--- a/arch/x86_64/pci/Makefile-BUS
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Makefile for X86_64 specific PCI routines
-#
-# Reuse the i386 PCI subsystem
-#
-CFLAGS += -I arch/i386/pci
-
-obj-y := i386.o
-obj-$(CONFIG_PCI_DIRECT)+= direct.o
-obj-y += fixup.o
-obj-$(CONFIG_ACPI) += acpi.o
-obj-y += legacy.o irq.o common.o
-# mmconfig has a 64bit special
-obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
-
-direct-y += ../../i386/pci/direct.o
-acpi-y += ../../i386/pci/acpi.o
-legacy-y += ../../i386/pci/legacy.o
-irq-y += ../../i386/pci/irq.o
-common-y += ../../i386/pci/common.o
-fixup-y += ../../i386/pci/fixup.o
-i386-y += ../../i386/pci/i386.o
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 7e841aa2a4a..7ee4a14ec3b 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -18,10 +18,6 @@ config XTENSA
with reasonable minimum requirements. The Xtensa Linux project has
a home page at <http://xtensa.sourceforge.net/>.
-config UID16
- bool
- default n
-
config RWSEM_XCHGADD_ALGORITHM
bool
default y
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 6a44b54ae81..f1f596644bf 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -145,7 +145,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
int user_mode = user_mode(regs);
/* Set up new TSS. */
- tos = (unsigned long)p->thread_info + THREAD_SIZE;
+ tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
if (user_mode)
childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
else
@@ -217,7 +217,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
unsigned long get_wchan(struct task_struct *p)
{
unsigned long sp, pc;
- unsigned long stack_page = (unsigned long) p->thread_info;
+ unsigned long stack_page = (unsigned long) task_stack_page(p);
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index ab5c4c65b5c..4cc85285a70 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -72,7 +72,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
struct pt_regs *regs;
unsigned long tmp;
- regs = xtensa_pt_regs(child);
+ regs = task_pt_regs(child);
tmp = 0; /* Default return value. */
switch(addr) {
@@ -149,7 +149,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_POKEUSR:
{
struct pt_regs *regs;
- regs = xtensa_pt_regs(child);
+ regs = task_pt_regs(child);
switch (addr) {
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
@@ -240,7 +240,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
* elf_gregset_t format. */
xtensa_gregset_t format;
- struct pt_regs *regs = xtensa_pt_regs(child);
+ struct pt_regs *regs = task_pt_regs(child);
do_copy_regs (&format, regs, child);
@@ -257,7 +257,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
* values in the elf_gregset_t format. */
xtensa_gregset_t format;
- struct pt_regs *regs = xtensa_pt_regs(child);
+ struct pt_regs *regs = task_pt_regs(child);
if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
ret = -EFAULT;
@@ -281,7 +281,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
* elf_fpregset_t format. */
elf_fpregset_t fpregs;
- struct pt_regs *regs = xtensa_pt_regs(child);
+ struct pt_regs *regs = task_pt_regs(child);
do_save_fpregs (&fpregs, regs, child);
@@ -299,7 +299,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
* values in the elf_fpregset_t format.
*/
elf_fpregset_t fpregs;
- struct pt_regs *regs = xtensa_pt_regs(child);
+ struct pt_regs *regs = task_pt_regs(child);
ret = 0;
if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index cb6e38ed2b1..937d81f62f4 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -201,7 +201,7 @@ again:
if ((signed long)(get_ccount() - next) > 0)
goto again;
- /* Allow platform to do something usefull (Wdog). */
+ /* Allow platform to do something useful (Wdog). */
platform_heartbeat();
diff --git a/block/Kconfig b/block/Kconfig
index eb48edb80c1..377f6dd20e1 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -5,7 +5,7 @@
#for instance.
config LBD
bool "Support for Large Block Devices"
- depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML
+ depends on X86 || (MIPS && 32BIT) || PPC32 || (S390 && !64BIT) || SUPERH || UML
help
Say Y here if you want to attach large (bigger than 2TB) discs to
your machine, or if you want to have a raid or loopback device
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 43fa2049568..8da3cf66894 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -182,6 +182,9 @@ struct as_rq {
static kmem_cache_t *arq_pool;
+static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
+static void as_antic_stop(struct as_data *ad);
+
/*
* IO Context helper functions
*/
@@ -370,7 +373,7 @@ static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
* existing request against the same sector), which can happen when using
* direct IO, then return the alias.
*/
-static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
+static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
{
struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
struct rb_node *parent = NULL;
@@ -397,6 +400,16 @@ static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
return NULL;
}
+static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
+{
+ struct as_rq *alias;
+
+ while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) {
+ as_move_to_dispatch(ad, alias);
+ as_antic_stop(ad);
+ }
+}
+
static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
{
if (!ON_RB(&arq->rb_node)) {
@@ -1133,23 +1146,6 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
/*
* take it off the sort and fifo list, add to dispatch queue
*/
- while (!list_empty(&rq->queuelist)) {
- struct request *__rq = list_entry_rq(rq->queuelist.next);
- struct as_rq *__arq = RQ_DATA(__rq);
-
- list_del(&__rq->queuelist);
-
- elv_dispatch_add_tail(ad->q, __rq);
-
- if (__arq->io_context && __arq->io_context->aic)
- atomic_inc(&__arq->io_context->aic->nr_dispatched);
-
- WARN_ON(__arq->state != AS_RQ_QUEUED);
- __arq->state = AS_RQ_DISPATCHED;
-
- ad->nr_dispatched++;
- }
-
as_remove_queued_request(ad->q, rq);
WARN_ON(arq->state != AS_RQ_QUEUED);
@@ -1326,49 +1322,12 @@ fifo_expired:
}
/*
- * Add arq to a list behind alias
- */
-static inline void
-as_add_aliased_request(struct as_data *ad, struct as_rq *arq,
- struct as_rq *alias)
-{
- struct request *req = arq->request;
- struct list_head *insert = alias->request->queuelist.prev;
-
- /*
- * Transfer list of aliases
- */
- while (!list_empty(&req->queuelist)) {
- struct request *__rq = list_entry_rq(req->queuelist.next);
- struct as_rq *__arq = RQ_DATA(__rq);
-
- list_move_tail(&__rq->queuelist, &alias->request->queuelist);
-
- WARN_ON(__arq->state != AS_RQ_QUEUED);
- }
-
- /*
- * Another request with the same start sector on the rbtree.
- * Link this request to that sector. They are untangled in
- * as_move_to_dispatch
- */
- list_add(&arq->request->queuelist, insert);
-
- /*
- * Don't want to have to handle merges.
- */
- as_del_arq_hash(arq);
- arq->request->flags |= REQ_NOMERGE;
-}
-
-/*
* add arq to rbtree and fifo
*/
static void as_add_request(request_queue_t *q, struct request *rq)
{
struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = RQ_DATA(rq);
- struct as_rq *alias;
int data_dir;
arq->state = AS_RQ_NEW;
@@ -1387,33 +1346,17 @@ static void as_add_request(request_queue_t *q, struct request *rq)
atomic_inc(&arq->io_context->aic->nr_queued);
}
- alias = as_add_arq_rb(ad, arq);
- if (!alias) {
- /*
- * set expire time (only used for reads) and add to fifo list
- */
- arq->expires = jiffies + ad->fifo_expire[data_dir];
- list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
+ as_add_arq_rb(ad, arq);
+ if (rq_mergeable(arq->request))
+ as_add_arq_hash(ad, arq);
- if (rq_mergeable(arq->request))
- as_add_arq_hash(ad, arq);
- as_update_arq(ad, arq); /* keep state machine up to date */
-
- } else {
- as_add_aliased_request(ad, arq, alias);
-
- /*
- * have we been anticipating this request?
- * or does it come from the same process as the one we are
- * anticipating for?
- */
- if (ad->antic_status == ANTIC_WAIT_REQ
- || ad->antic_status == ANTIC_WAIT_NEXT) {
- if (as_can_break_anticipation(ad, arq))
- as_antic_stop(ad);
- }
- }
+ /*
+ * set expire time (only used for reads) and add to fifo list
+ */
+ arq->expires = jiffies + ad->fifo_expire[data_dir];
+ list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
+ as_update_arq(ad, arq); /* keep state machine up to date */
arq->state = AS_RQ_QUEUED;
}
@@ -1536,23 +1479,8 @@ static void as_merged_request(request_queue_t *q, struct request *req)
* if the merge was a front merge, we need to reposition request
*/
if (rq_rb_key(req) != arq->rb_key) {
- struct as_rq *alias, *next_arq = NULL;
-
- if (ad->next_arq[arq->is_sync] == arq)
- next_arq = as_find_next_arq(ad, arq);
-
- /*
- * Note! We should really be moving any old aliased requests
- * off this request and try to insert them into the rbtree. We
- * currently don't bother. Ditto the next function.
- */
as_del_arq_rb(ad, arq);
- if ((alias = as_add_arq_rb(ad, arq))) {
- list_del_init(&arq->fifo);
- as_add_aliased_request(ad, arq, alias);
- if (next_arq)
- ad->next_arq[arq->is_sync] = next_arq;
- }
+ as_add_arq_rb(ad, arq);
/*
* Note! At this stage of this and the next function, our next
* request may not be optimal - eg the request may have "grown"
@@ -1579,18 +1507,8 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
as_add_arq_hash(ad, arq);
if (rq_rb_key(req) != arq->rb_key) {
- struct as_rq *alias, *next_arq = NULL;
-
- if (ad->next_arq[arq->is_sync] == arq)
- next_arq = as_find_next_arq(ad, arq);
-
as_del_arq_rb(ad, arq);
- if ((alias = as_add_arq_rb(ad, arq))) {
- list_del_init(&arq->fifo);
- as_add_aliased_request(ad, arq, alias);
- if (next_arq)
- ad->next_arq[arq->is_sync] = next_arq;
- }
+ as_add_arq_rb(ad, arq);
}
/*
@@ -1610,18 +1528,6 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
}
/*
- * Transfer list of aliases
- */
- while (!list_empty(&next->queuelist)) {
- struct request *__rq = list_entry_rq(next->queuelist.next);
- struct as_rq *__arq = RQ_DATA(__rq);
-
- list_move_tail(&__rq->queuelist, &req->queuelist);
-
- WARN_ON(__arq->state != AS_RQ_QUEUED);
- }
-
- /*
* kill knowledge of next, this one is a goner
*/
as_remove_queued_request(q, next);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ee0bb41694b..74fae2daf87 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -25,15 +25,15 @@
/*
* tunables
*/
-static int cfq_quantum = 4; /* max queue in one round of service */
-static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
-static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
-static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
-static int cfq_back_penalty = 2; /* penalty of a backwards seek */
+static const int cfq_quantum = 4; /* max queue in one round of service */
+static const int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
+static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
+static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
+static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
-static int cfq_slice_sync = HZ / 10;
+static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
-static int cfq_slice_async_rq = 2;
+static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 100;
#define CFQ_IDLE_GRACE (HZ / 10)
@@ -45,7 +45,7 @@ static int cfq_slice_idle = HZ / 100;
/*
* disable queueing at the driver/hardware level
*/
-static int cfq_max_depth = 2;
+static const int cfq_max_depth = 2;
/*
* for the hash of cfqq inside the cfqd
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 9cbec09e841..27e494b1bf9 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -19,10 +19,10 @@
/*
* See Documentation/block/deadline-iosched.txt
*/
-static int read_expire = HZ / 2; /* max time before a read is submitted. */
-static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
-static int writes_starved = 2; /* max times reads can starve a write */
-static int fifo_batch = 16; /* # of sequential requests treated as one
+static const int read_expire = HZ / 2; /* max time before a read is submitted. */
+static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
+static const int writes_starved = 2; /* max times reads can starve a write */
+static const int fifo_batch = 16; /* # of sequential requests treated as one
by the above parameters. For throughput. */
static const int deadline_hash_shift = 5;
diff --git a/block/elevator.c b/block/elevator.c
index 6c3fc8a10bf..1d0759178e4 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -64,7 +64,7 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
}
EXPORT_SYMBOL(elv_rq_merge_ok);
-inline int elv_try_merge(struct request *__rq, struct bio *bio)
+static inline int elv_try_merge(struct request *__rq, struct bio *bio)
{
int ret = ELEVATOR_NO_MERGE;
@@ -80,7 +80,6 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio)
return ret;
}
-EXPORT_SYMBOL(elv_try_merge);
static struct elevator_type *elevator_find(const char *name)
{
@@ -304,15 +303,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
rq->flags &= ~REQ_STARTED;
- /*
- * if this is the flush, requeue the original instead and drop the flush
- */
- if (rq->flags & REQ_BAR_FLUSH) {
- clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
- rq = rq->end_io_data;
- }
-
- __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+ __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
}
static void elv_drain_elevator(request_queue_t *q)
@@ -332,8 +323,19 @@ static void elv_drain_elevator(request_queue_t *q)
void __elv_add_request(request_queue_t *q, struct request *rq, int where,
int plug)
{
+ struct list_head *pos;
+ unsigned ordseq;
+
+ if (q->ordcolor)
+ rq->flags |= REQ_ORDERED_COLOR;
+
if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
/*
+ * toggle ordered color
+ */
+ q->ordcolor ^= 1;
+
+ /*
* barriers implicitly indicate back insertion
*/
if (where == ELEVATOR_INSERT_SORT)
@@ -393,6 +395,30 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
q->elevator->ops->elevator_add_req_fn(q, rq);
break;
+ case ELEVATOR_INSERT_REQUEUE:
+ /*
+ * If ordered flush isn't in progress, we do front
+ * insertion; otherwise, requests should be requeued
+ * in ordseq order.
+ */
+ rq->flags |= REQ_SOFTBARRIER;
+
+ if (q->ordseq == 0) {
+ list_add(&rq->queuelist, &q->queue_head);
+ break;
+ }
+
+ ordseq = blk_ordered_req_seq(rq);
+
+ list_for_each(pos, &q->queue_head) {
+ struct request *pos_rq = list_entry_rq(pos);
+ if (ordseq <= blk_ordered_req_seq(pos_rq))
+ break;
+ }
+
+ list_add_tail(&rq->queuelist, pos);
+ break;
+
default:
printk(KERN_ERR "%s: bad insertion point %d\n",
__FUNCTION__, where);
@@ -422,25 +448,16 @@ static inline struct request *__elv_next_request(request_queue_t *q)
{
struct request *rq;
- if (unlikely(list_empty(&q->queue_head) &&
- !q->elevator->ops->elevator_dispatch_fn(q, 0)))
- return NULL;
-
- rq = list_entry_rq(q->queue_head.next);
-
- /*
- * if this is a barrier write and the device has to issue a
- * flush sequence to support it, check how far we are
- */
- if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
- BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
+ while (1) {
+ while (!list_empty(&q->queue_head)) {
+ rq = list_entry_rq(q->queue_head.next);
+ if (blk_do_ordered(q, &rq))
+ return rq;
+ }
- if (q->ordered == QUEUE_ORDERED_FLUSH &&
- !blk_barrier_preflush(rq))
- rq = blk_start_pre_flush(q, rq);
+ if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
+ return NULL;
}
-
- return rq;
}
struct request *elv_next_request(request_queue_t *q)
@@ -498,7 +515,7 @@ struct request *elv_next_request(request_queue_t *q)
blkdev_dequeue_request(rq);
rq->flags |= REQ_QUIET;
end_that_request_chunk(rq, 0, nr_bytes);
- end_that_request_last(rq);
+ end_that_request_last(rq, 0);
} else {
printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
ret);
@@ -597,6 +614,20 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq);
}
+
+ /*
+ * Check if the queue is waiting for fs requests to be
+ * drained for flush sequence.
+ */
+ if (unlikely(q->ordseq)) {
+ struct request *first_rq = list_entry_rq(q->queue_head.next);
+ if (q->in_flight == 0 &&
+ blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
+ blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
+ blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
+ q->request_fn(q);
+ }
+ }
}
int elv_register_queue(struct request_queue *q)
diff --git a/block/ioctl.c b/block/ioctl.c
index 6e278474f9a..e1109491c23 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -1,6 +1,7 @@
-#include <linux/sched.h> /* for capable() */
+#include <linux/capability.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
+#include <linux/hdreg.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include <linux/smp_lock.h>
@@ -245,6 +246,27 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
set_device_ro(bdev, n);
unlock_kernel();
return 0;
+ case HDIO_GETGEO: {
+ struct hd_geometry geo;
+
+ if (!arg)
+ return -EINVAL;
+ if (!disk->fops->getgeo)
+ return -ENOTTY;
+
+ /*
+ * We need to set the startsect first, the driver may
+ * want to override it.
+ */
+ geo.start = get_start_sect(bdev);
+ ret = disk->fops->getgeo(bdev, &geo);
+ if (ret)
+ return ret;
+ if (copy_to_user((struct hd_geometry __user *)arg, &geo,
+ sizeof(geo)))
+ return -EFAULT;
+ return 0;
+ }
}
lock_kernel();
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index d4beb9a89ee..8e27d0ab0d7 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -26,7 +26,8 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
-#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
/*
* for max sense size
@@ -36,6 +37,8 @@
static void blk_unplug_work(void *data);
static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
+static void init_request_from_bio(struct request *req, struct bio *bio);
+static int __make_request(request_queue_t *q, struct bio *bio);
/*
* For the allocated request tables
@@ -60,13 +63,15 @@ static wait_queue_head_t congestion_wqh[2] = {
/*
* Controlling structure to kblockd
*/
-static struct workqueue_struct *kblockd_workqueue;
+static struct workqueue_struct *kblockd_workqueue;
unsigned long blk_max_low_pfn, blk_max_pfn;
EXPORT_SYMBOL(blk_max_low_pfn);
EXPORT_SYMBOL(blk_max_pfn);
+static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+
/* Amount of time in which a process may batch requests */
#define BLK_BATCH_TIME (HZ/50UL)
@@ -205,6 +210,13 @@ void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
EXPORT_SYMBOL(blk_queue_merge_bvec);
+void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
+{
+ q->softirq_done_fn = fn;
+}
+
+EXPORT_SYMBOL(blk_queue_softirq_done);
+
/**
* blk_queue_make_request - define an alternate make_request function for a device
* @q: the request queue for the device to be affected
@@ -268,6 +280,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
static inline void rq_init(request_queue_t *q, struct request *rq)
{
INIT_LIST_HEAD(&rq->queuelist);
+ INIT_LIST_HEAD(&rq->donelist);
rq->errors = 0;
rq->rq_status = RQ_ACTIVE;
@@ -284,12 +297,13 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
rq->sense = NULL;
rq->end_io = NULL;
rq->end_io_data = NULL;
+ rq->completion_data = NULL;
}
/**
* blk_queue_ordered - does this queue support ordered writes
- * @q: the request queue
- * @flag: see below
+ * @q: the request queue
+ * @ordered: one of QUEUE_ORDERED_*
*
* Description:
* For journalled file systems, doing ordered writes on a commit
@@ -298,28 +312,30 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
* feature should call this function and indicate so.
*
**/
-void blk_queue_ordered(request_queue_t *q, int flag)
-{
- switch (flag) {
- case QUEUE_ORDERED_NONE:
- if (q->flush_rq)
- kmem_cache_free(request_cachep, q->flush_rq);
- q->flush_rq = NULL;
- q->ordered = flag;
- break;
- case QUEUE_ORDERED_TAG:
- q->ordered = flag;
- break;
- case QUEUE_ORDERED_FLUSH:
- q->ordered = flag;
- if (!q->flush_rq)
- q->flush_rq = kmem_cache_alloc(request_cachep,
- GFP_KERNEL);
- break;
- default:
- printk("blk_queue_ordered: bad value %d\n", flag);
- break;
+int blk_queue_ordered(request_queue_t *q, unsigned ordered,
+ prepare_flush_fn *prepare_flush_fn)
+{
+ if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
+ prepare_flush_fn == NULL) {
+ printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
+ return -EINVAL;
}
+
+ if (ordered != QUEUE_ORDERED_NONE &&
+ ordered != QUEUE_ORDERED_DRAIN &&
+ ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
+ ordered != QUEUE_ORDERED_DRAIN_FUA &&
+ ordered != QUEUE_ORDERED_TAG &&
+ ordered != QUEUE_ORDERED_TAG_FLUSH &&
+ ordered != QUEUE_ORDERED_TAG_FUA) {
+ printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
+ return -EINVAL;
+ }
+
+ q->next_ordered = ordered;
+ q->prepare_flush_fn = prepare_flush_fn;
+
+ return 0;
}
EXPORT_SYMBOL(blk_queue_ordered);
@@ -344,167 +360,265 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn);
/*
* Cache flushing for ordered writes handling
*/
-static void blk_pre_flush_end_io(struct request *flush_rq)
+inline unsigned blk_ordered_cur_seq(request_queue_t *q)
{
- struct request *rq = flush_rq->end_io_data;
- request_queue_t *q = rq->q;
-
- elv_completed_request(q, flush_rq);
-
- rq->flags |= REQ_BAR_PREFLUSH;
-
- if (!flush_rq->errors)
- elv_requeue_request(q, rq);
- else {
- q->end_flush_fn(q, flush_rq);
- clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
- q->request_fn(q);
- }
+ if (!q->ordseq)
+ return 0;
+ return 1 << ffz(q->ordseq);
}
-static void blk_post_flush_end_io(struct request *flush_rq)
+unsigned blk_ordered_req_seq(struct request *rq)
{
- struct request *rq = flush_rq->end_io_data;
request_queue_t *q = rq->q;
- elv_completed_request(q, flush_rq);
+ BUG_ON(q->ordseq == 0);
- rq->flags |= REQ_BAR_POSTFLUSH;
+ if (rq == &q->pre_flush_rq)
+ return QUEUE_ORDSEQ_PREFLUSH;
+ if (rq == &q->bar_rq)
+ return QUEUE_ORDSEQ_BAR;
+ if (rq == &q->post_flush_rq)
+ return QUEUE_ORDSEQ_POSTFLUSH;
- q->end_flush_fn(q, flush_rq);
- clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
- q->request_fn(q);
+ if ((rq->flags & REQ_ORDERED_COLOR) ==
+ (q->orig_bar_rq->flags & REQ_ORDERED_COLOR))
+ return QUEUE_ORDSEQ_DRAIN;
+ else
+ return QUEUE_ORDSEQ_DONE;
}
-struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
+void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
{
- struct request *flush_rq = q->flush_rq;
-
- BUG_ON(!blk_barrier_rq(rq));
+ struct request *rq;
+ int uptodate;
- if (test_and_set_bit(QUEUE_FLAG_FLUSH, &q->queue_flags))
- return NULL;
+ if (error && !q->orderr)
+ q->orderr = error;
- rq_init(q, flush_rq);
- flush_rq->elevator_private = NULL;
- flush_rq->flags = REQ_BAR_FLUSH;
- flush_rq->rq_disk = rq->rq_disk;
- flush_rq->rl = NULL;
+ BUG_ON(q->ordseq & seq);
+ q->ordseq |= seq;
- /*
- * prepare_flush returns 0 if no flush is needed, just mark both
- * pre and post flush as done in that case
- */
- if (!q->prepare_flush_fn(q, flush_rq)) {
- rq->flags |= REQ_BAR_PREFLUSH | REQ_BAR_POSTFLUSH;
- clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
- return rq;
- }
+ if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
+ return;
/*
- * some drivers dequeue requests right away, some only after io
- * completion. make sure the request is dequeued.
+ * Okay, sequence complete.
*/
- if (!list_empty(&rq->queuelist))
- blkdev_dequeue_request(rq);
+ rq = q->orig_bar_rq;
+ uptodate = q->orderr ? q->orderr : 1;
- flush_rq->end_io_data = rq;
- flush_rq->end_io = blk_pre_flush_end_io;
+ q->ordseq = 0;
- __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
- return flush_rq;
+ end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
+ end_that_request_last(rq, uptodate);
}
-static void blk_start_post_flush(request_queue_t *q, struct request *rq)
+static void pre_flush_end_io(struct request *rq, int error)
{
- struct request *flush_rq = q->flush_rq;
+ elv_completed_request(rq->q, rq);
+ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
+}
- BUG_ON(!blk_barrier_rq(rq));
+static void bar_end_io(struct request *rq, int error)
+{
+ elv_completed_request(rq->q, rq);
+ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
+}
- rq_init(q, flush_rq);
- flush_rq->elevator_private = NULL;
- flush_rq->flags = REQ_BAR_FLUSH;
- flush_rq->rq_disk = rq->rq_disk;
- flush_rq->rl = NULL;
+static void post_flush_end_io(struct request *rq, int error)
+{
+ elv_completed_request(rq->q, rq);
+ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
+}
- if (q->prepare_flush_fn(q, flush_rq)) {
- flush_rq->end_io_data = rq;
- flush_rq->end_io = blk_post_flush_end_io;
+static void queue_flush(request_queue_t *q, unsigned which)
+{
+ struct request *rq;
+ rq_end_io_fn *end_io;
- __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
- q->request_fn(q);
+ if (which == QUEUE_ORDERED_PREFLUSH) {
+ rq = &q->pre_flush_rq;
+ end_io = pre_flush_end_io;
+ } else {
+ rq = &q->post_flush_rq;
+ end_io = post_flush_end_io;
}
+
+ rq_init(q, rq);
+ rq->flags = REQ_HARDBARRIER;
+ rq->elevator_private = NULL;
+ rq->rq_disk = q->bar_rq.rq_disk;
+ rq->rl = NULL;
+ rq->end_io = end_io;
+ q->prepare_flush_fn(q, rq);
+
+ __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
}
-static inline int blk_check_end_barrier(request_queue_t *q, struct request *rq,
- int sectors)
+static inline struct request *start_ordered(request_queue_t *q,
+ struct request *rq)
{
- if (sectors > rq->nr_sectors)
- sectors = rq->nr_sectors;
+ q->bi_size = 0;
+ q->orderr = 0;
+ q->ordered = q->next_ordered;
+ q->ordseq |= QUEUE_ORDSEQ_STARTED;
+
+ /*
+ * Prep proxy barrier request.
+ */
+ blkdev_dequeue_request(rq);
+ q->orig_bar_rq = rq;
+ rq = &q->bar_rq;
+ rq_init(q, rq);
+ rq->flags = bio_data_dir(q->orig_bar_rq->bio);
+ rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
+ rq->elevator_private = NULL;
+ rq->rl = NULL;
+ init_request_from_bio(rq, q->orig_bar_rq->bio);
+ rq->end_io = bar_end_io;
- rq->nr_sectors -= sectors;
- return rq->nr_sectors;
+ /*
+ * Queue ordered sequence. As we stack them at the head, we
+ * need to queue in reverse order. Note that we rely on that
+ * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
+ * request gets inbetween ordered sequence.
+ */
+ if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
+ queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
+ else
+ q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
+
+ __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+
+ if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
+ queue_flush(q, QUEUE_ORDERED_PREFLUSH);
+ rq = &q->pre_flush_rq;
+ } else
+ q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
+
+ if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
+ q->ordseq |= QUEUE_ORDSEQ_DRAIN;
+ else
+ rq = NULL;
+
+ return rq;
}
-static int __blk_complete_barrier_rq(request_queue_t *q, struct request *rq,
- int sectors, int queue_locked)
+int blk_do_ordered(request_queue_t *q, struct request **rqp)
{
- if (q->ordered != QUEUE_ORDERED_FLUSH)
- return 0;
- if (!blk_fs_request(rq) || !blk_barrier_rq(rq))
- return 0;
- if (blk_barrier_postflush(rq))
- return 0;
+ struct request *rq = *rqp, *allowed_rq;
+ int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
- if (!blk_check_end_barrier(q, rq, sectors)) {
- unsigned long flags = 0;
+ if (!q->ordseq) {
+ if (!is_barrier)
+ return 1;
- if (!queue_locked)
- spin_lock_irqsave(q->queue_lock, flags);
+ if (q->next_ordered != QUEUE_ORDERED_NONE) {
+ *rqp = start_ordered(q, rq);
+ return 1;
+ } else {
+ /*
+ * This can happen when the queue switches to
+ * ORDERED_NONE while this request is on it.
+ */
+ blkdev_dequeue_request(rq);
+ end_that_request_first(rq, -EOPNOTSUPP,
+ rq->hard_nr_sectors);
+ end_that_request_last(rq, -EOPNOTSUPP);
+ *rqp = NULL;
+ return 0;
+ }
+ }
- blk_start_post_flush(q, rq);
+ if (q->ordered & QUEUE_ORDERED_TAG) {
+ if (is_barrier && rq != &q->bar_rq)
+ *rqp = NULL;
+ return 1;
+ }
- if (!queue_locked)
- spin_unlock_irqrestore(q->queue_lock, flags);
+ switch (blk_ordered_cur_seq(q)) {
+ case QUEUE_ORDSEQ_PREFLUSH:
+ allowed_rq = &q->pre_flush_rq;
+ break;
+ case QUEUE_ORDSEQ_BAR:
+ allowed_rq = &q->bar_rq;
+ break;
+ case QUEUE_ORDSEQ_POSTFLUSH:
+ allowed_rq = &q->post_flush_rq;
+ break;
+ default:
+ allowed_rq = NULL;
+ break;
}
+ if (rq != allowed_rq &&
+ (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
+ rq == &q->post_flush_rq))
+ *rqp = NULL;
+
return 1;
}
-/**
- * blk_complete_barrier_rq - complete possible barrier request
- * @q: the request queue for the device
- * @rq: the request
- * @sectors: number of sectors to complete
- *
- * Description:
- * Used in driver end_io handling to determine whether to postpone
- * completion of a barrier request until a post flush has been done. This
- * is the unlocked variant, used if the caller doesn't already hold the
- * queue lock.
- **/
-int blk_complete_barrier_rq(request_queue_t *q, struct request *rq, int sectors)
+static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
{
- return __blk_complete_barrier_rq(q, rq, sectors, 0);
+ request_queue_t *q = bio->bi_private;
+ struct bio_vec *bvec;
+ int i;
+
+ /*
+ * This is dry run, restore bio_sector and size. We'll finish
+ * this request again with the original bi_end_io after an
+ * error occurs or post flush is complete.
+ */
+ q->bi_size += bytes;
+
+ if (bio->bi_size)
+ return 1;
+
+ /* Rewind bvec's */
+ bio->bi_idx = 0;
+ bio_for_each_segment(bvec, bio, i) {
+ bvec->bv_len += bvec->bv_offset;
+ bvec->bv_offset = 0;
+ }
+
+ /* Reset bio */
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+ bio->bi_size = q->bi_size;
+ bio->bi_sector -= (q->bi_size >> 9);
+ q->bi_size = 0;
+
+ return 0;
}
-EXPORT_SYMBOL(blk_complete_barrier_rq);
-/**
- * blk_complete_barrier_rq_locked - complete possible barrier request
- * @q: the request queue for the device
- * @rq: the request
- * @sectors: number of sectors to complete
- *
- * Description:
- * See blk_complete_barrier_rq(). This variant must be used if the caller
- * holds the queue lock.
- **/
-int blk_complete_barrier_rq_locked(request_queue_t *q, struct request *rq,
- int sectors)
+static inline int ordered_bio_endio(struct request *rq, struct bio *bio,
+ unsigned int nbytes, int error)
{
- return __blk_complete_barrier_rq(q, rq, sectors, 1);
+ request_queue_t *q = rq->q;
+ bio_end_io_t *endio;
+ void *private;
+
+ if (&q->bar_rq != rq)
+ return 0;
+
+ /*
+ * Okay, this is the barrier request in progress, dry finish it.
+ */
+ if (error && !q->orderr)
+ q->orderr = error;
+
+ endio = bio->bi_end_io;
+ private = bio->bi_private;
+ bio->bi_end_io = flush_dry_bio_endio;
+ bio->bi_private = q;
+
+ bio_endio(bio, nbytes, error);
+
+ bio->bi_end_io = endio;
+ bio->bi_private = private;
+
+ return 1;
}
-EXPORT_SYMBOL(blk_complete_barrier_rq_locked);
/**
* blk_queue_bounce_limit - set bounce buffer limit for queue
@@ -1039,12 +1153,13 @@ void blk_queue_invalidate_tags(request_queue_t *q)
EXPORT_SYMBOL(blk_queue_invalidate_tags);
-static char *rq_flags[] = {
+static const char * const rq_flags[] = {
"REQ_RW",
"REQ_FAILFAST",
"REQ_SORTED",
"REQ_SOFTBARRIER",
"REQ_HARDBARRIER",
+ "REQ_FUA",
"REQ_CMD",
"REQ_NOMERGE",
"REQ_STARTED",
@@ -1064,6 +1179,7 @@ static char *rq_flags[] = {
"REQ_PM_SUSPEND",
"REQ_PM_RESUME",
"REQ_PM_SHUTDOWN",
+ "REQ_ORDERED_COLOR",
};
void blk_dump_rq_flags(struct request *rq, char *msg)
@@ -1641,8 +1757,6 @@ void blk_cleanup_queue(request_queue_t * q)
if (q->queue_tags)
__blk_queue_free_tags(q);
- blk_queue_ordered(q, QUEUE_ORDERED_NONE);
-
kmem_cache_free(requestq_cachep, q);
}
@@ -1667,8 +1781,6 @@ static int blk_init_free_list(request_queue_t *q)
return 0;
}
-static int __make_request(request_queue_t *, struct bio *);
-
request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
{
return blk_alloc_queue_node(gfp_mask, -1);
@@ -1908,40 +2020,40 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
{
struct request *rq = NULL;
struct request_list *rl = &q->rq;
- struct io_context *ioc = current_io_context(GFP_ATOMIC);
- int priv;
-
- if (rl->count[rw]+1 >= q->nr_requests) {
- /*
- * The queue will fill after this allocation, so set it as
- * full, and mark this process as "batching". This process
- * will be allowed to complete a batch of requests, others
- * will be blocked.
- */
- if (!blk_queue_full(q, rw)) {
- ioc_set_batching(q, ioc);
- blk_set_queue_full(q, rw);
- }
- }
+ struct io_context *ioc = NULL;
+ int may_queue, priv;
- switch (elv_may_queue(q, rw, bio)) {
- case ELV_MQUEUE_NO:
- goto rq_starved;
- case ELV_MQUEUE_MAY:
- break;
- case ELV_MQUEUE_MUST:
- goto get_rq;
- }
+ may_queue = elv_may_queue(q, rw, bio);
+ if (may_queue == ELV_MQUEUE_NO)
+ goto rq_starved;
- if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
- /*
- * The queue is full and the allocating process is not a
- * "batcher", and not exempted by the IO scheduler
- */
- goto out;
+ if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
+ if (rl->count[rw]+1 >= q->nr_requests) {
+ ioc = current_io_context(GFP_ATOMIC);
+ /*
+ * The queue will fill after this allocation, so set
+ * it as full, and mark this process as "batching".
+ * This process will be allowed to complete a batch of
+ * requests, others will be blocked.
+ */
+ if (!blk_queue_full(q, rw)) {
+ ioc_set_batching(q, ioc);
+ blk_set_queue_full(q, rw);
+ } else {
+ if (may_queue != ELV_MQUEUE_MUST
+ && !ioc_batching(q, ioc)) {
+ /*
+ * The queue is full and the allocating
+ * process is not a "batcher", and not
+ * exempted by the IO scheduler
+ */
+ goto out;
+ }
+ }
+ }
+ set_queue_congested(q, rw);
}
-get_rq:
/*
* Only allow batching queuers to allocate up to 50% over the defined
* limit of requests, otherwise we could have thousands of requests
@@ -1952,8 +2064,6 @@ get_rq:
rl->count[rw]++;
rl->starved[rw] = 0;
- if (rl->count[rw] >= queue_congestion_on_threshold(q))
- set_queue_congested(q, rw);
priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
if (priv)
@@ -1962,7 +2072,7 @@ get_rq:
spin_unlock_irq(q->queue_lock);
rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
- if (!rq) {
+ if (unlikely(!rq)) {
/*
* Allocation failed presumably due to memory. Undo anything
* we might have messed up.
@@ -1987,6 +2097,12 @@ rq_starved:
goto out;
}
+ /*
+ * ioc may be NULL here, and ioc_batching will be false. That's
+ * OK, if the queue is under the request limit then requests need
+ * not count toward the nr_batch_requests limit. There will always
+ * be some limit enforced by BLK_BATCH_TIME.
+ */
if (ioc_batching(q, ioc))
ioc->nr_batch_requests--;
@@ -2313,7 +2429,7 @@ EXPORT_SYMBOL(blk_rq_map_kern);
*/
void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
struct request *rq, int at_head,
- void (*done)(struct request *))
+ rq_end_io_fn *done)
{
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
@@ -2517,7 +2633,7 @@ EXPORT_SYMBOL(blk_put_request);
* blk_end_sync_rq - executes a completion event on a request
* @rq: request to complete
*/
-void blk_end_sync_rq(struct request *rq)
+void blk_end_sync_rq(struct request *rq, int error)
{
struct completion *waiting = rq->waiting;
@@ -2631,29 +2747,35 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
return 0;
}
-/**
- * blk_attempt_remerge - attempt to remerge active head with next request
- * @q: The &request_queue_t belonging to the device
- * @rq: The head request (usually)
- *
- * Description:
- * For head-active devices, the queue can easily be unplugged so quickly
- * that proper merging is not done on the front request. This may hurt
- * performance greatly for some devices. The block layer cannot safely
- * do merging on that first request for these queues, but the driver can
- * call this function and make it happen any way. Only the driver knows
- * when it is safe to do so.
- **/
-void blk_attempt_remerge(request_queue_t *q, struct request *rq)
+static void init_request_from_bio(struct request *req, struct bio *bio)
{
- unsigned long flags;
+ req->flags |= REQ_CMD;
- spin_lock_irqsave(q->queue_lock, flags);
- attempt_back_merge(q, rq);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
+ /*
+ * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
+ */
+ if (bio_rw_ahead(bio) || bio_failfast(bio))
+ req->flags |= REQ_FAILFAST;
+
+ /*
+ * REQ_BARRIER implies no merging, but lets make it explicit
+ */
+ if (unlikely(bio_barrier(bio)))
+ req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
-EXPORT_SYMBOL(blk_attempt_remerge);
+ req->errors = 0;
+ req->hard_sector = req->sector = bio->bi_sector;
+ req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
+ req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
+ req->nr_phys_segments = bio_phys_segments(req->q, bio);
+ req->nr_hw_segments = bio_hw_segments(req->q, bio);
+ req->buffer = bio_data(bio); /* see ->buffer comment above */
+ req->waiting = NULL;
+ req->bio = req->biotail = bio;
+ req->ioprio = bio_prio(bio);
+ req->rq_disk = bio->bi_bdev->bd_disk;
+ req->start_time = jiffies;
+}
static int __make_request(request_queue_t *q, struct bio *bio)
{
@@ -2680,7 +2802,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
spin_lock_prefetch(q->queue_lock);
barrier = bio_barrier(bio);
- if (unlikely(barrier) && (q->ordered == QUEUE_ORDERED_NONE)) {
+ if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
err = -EOPNOTSUPP;
goto end_io;
}
@@ -2750,33 +2872,7 @@ get_rq:
* We don't worry about that case for efficiency. It won't happen
* often, and the elevators are able to handle it.
*/
-
- req->flags |= REQ_CMD;
-
- /*
- * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
- */
- if (bio_rw_ahead(bio) || bio_failfast(bio))
- req->flags |= REQ_FAILFAST;
-
- /*
- * REQ_BARRIER implies no merging, but lets make it explicit
- */
- if (unlikely(barrier))
- req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
-
- req->errors = 0;
- req->hard_sector = req->sector = sector;
- req->hard_nr_sectors = req->nr_sectors = nr_sectors;
- req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
- req->nr_phys_segments = bio_phys_segments(q, bio);
- req->nr_hw_segments = bio_hw_segments(q, bio);
- req->buffer = bio_data(bio); /* see ->buffer comment above */
- req->waiting = NULL;
- req->bio = req->biotail = bio;
- req->ioprio = prio;
- req->rq_disk = bio->bi_bdev->bd_disk;
- req->start_time = jiffies;
+ init_request_from_bio(req, bio);
spin_lock_irq(q->queue_lock);
if (elv_queue_empty(q))
@@ -3067,7 +3163,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
if (nr_bytes >= bio->bi_size) {
req->bio = bio->bi_next;
nbytes = bio->bi_size;
- bio_endio(bio, nbytes, error);
+ if (!ordered_bio_endio(req, bio, nbytes, error))
+ bio_endio(bio, nbytes, error);
next_idx = 0;
bio_nbytes = 0;
} else {
@@ -3122,7 +3219,8 @@ static int __end_that_request_first(struct request *req, int uptodate,
* if the request wasn't completed, update state
*/
if (bio_nbytes) {
- bio_endio(bio, bio_nbytes, error);
+ if (!ordered_bio_endio(req, bio, bio_nbytes, error))
+ bio_endio(bio, bio_nbytes, error);
bio->bi_idx += next_idx;
bio_iovec(bio)->bv_offset += nr_bytes;
bio_iovec(bio)->bv_len -= nr_bytes;
@@ -3177,11 +3275,100 @@ int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
EXPORT_SYMBOL(end_that_request_chunk);
/*
+ * splice the completion data to a local structure and hand off to
+ * process_completion_queue() to complete the requests
+ */
+static void blk_done_softirq(struct softirq_action *h)
+{
+ struct list_head *cpu_list;
+ LIST_HEAD(local_list);
+
+ local_irq_disable();
+ cpu_list = &__get_cpu_var(blk_cpu_done);
+ list_splice_init(cpu_list, &local_list);
+ local_irq_enable();
+
+ while (!list_empty(&local_list)) {
+ struct request *rq = list_entry(local_list.next, struct request, donelist);
+
+ list_del_init(&rq->donelist);
+ rq->q->softirq_done_fn(rq);
+ }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
+{
+ /*
+ * If a CPU goes away, splice its entries to the current CPU
+ * and trigger a run of the softirq
+ */
+ if (action == CPU_DEAD) {
+ int cpu = (unsigned long) hcpu;
+
+ local_irq_disable();
+ list_splice_init(&per_cpu(blk_cpu_done, cpu),
+ &__get_cpu_var(blk_cpu_done));
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ local_irq_enable();
+ }
+
+ return NOTIFY_OK;
+}
+
+
+static struct notifier_block __devinitdata blk_cpu_notifier = {
+ .notifier_call = blk_cpu_notify,
+};
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/**
+ * blk_complete_request - end I/O on a request
+ * @req: the request being processed
+ *
+ * Description:
+ * Ends all I/O on a request. It does not handle partial completions,
+ * unless the driver actually implements this in its completionc callback
+ * through requeueing. Theh actual completion happens out-of-order,
+ * through a softirq handler. The user must have registered a completion
+ * callback through blk_queue_softirq_done().
+ **/
+
+void blk_complete_request(struct request *req)
+{
+ struct list_head *cpu_list;
+ unsigned long flags;
+
+ BUG_ON(!req->q->softirq_done_fn);
+
+ local_irq_save(flags);
+
+ cpu_list = &__get_cpu_var(blk_cpu_done);
+ list_add_tail(&req->donelist, cpu_list);
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+ local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(blk_complete_request);
+
+/*
* queue lock must be held
*/
-void end_that_request_last(struct request *req)
+void end_that_request_last(struct request *req, int uptodate)
{
struct gendisk *disk = req->rq_disk;
+ int error;
+
+ /*
+ * extend uptodate bool to allow < 0 value to be direct io error
+ */
+ error = 0;
+ if (end_io_error(uptodate))
+ error = !uptodate ? -EIO : uptodate;
if (unlikely(laptop_mode) && blk_fs_request(req))
laptop_io_completion();
@@ -3196,7 +3383,7 @@ void end_that_request_last(struct request *req)
disk->in_flight--;
}
if (req->end_io)
- req->end_io(req);
+ req->end_io(req, error);
else
__blk_put_request(req->q, req);
}
@@ -3208,7 +3395,7 @@ void end_request(struct request *req, int uptodate)
if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
}
}
@@ -3246,6 +3433,8 @@ EXPORT_SYMBOL(kblockd_flush);
int __init blk_dev_init(void)
{
+ int i;
+
kblockd_workqueue = create_workqueue("kblockd");
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
@@ -3259,6 +3448,14 @@ int __init blk_dev_init(void)
iocontext_cachep = kmem_cache_create("blkdev_ioc",
sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
+ for (i = 0; i < NR_CPUS; i++)
+ INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+
+ open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
+#ifdef CONFIG_HOTPLUG_CPU
+ register_cpu_notifier(&blk_cpu_notifier);
+#endif
+
blk_max_low_pfn = max_low_pfn;
blk_max_pfn = max_pfn;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 1d8852f7bbf..cc72210687e 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include <linux/module.h>
#include <linux/blkdev.h>
+#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/cdrom.h>
#include <linux/slab.h>
@@ -46,7 +47,7 @@ EXPORT_SYMBOL(scsi_command_size);
static int sg_get_version(int __user *p)
{
- static int sg_version_num = 30527;
+ static const int sg_version_num = 30527;
return put_user(sg_version_num, p);
}
@@ -190,16 +191,21 @@ static int verify_command(struct file *file, unsigned char *cmd)
safe_for_write(GPCMD_SET_STREAMING),
};
unsigned char type = cmd_type[cmd[0]];
+ int has_write_perm = 0;
/* Anybody who can open the device can do a read-safe command */
if (type & CMD_READ_SAFE)
return 0;
+ /*
+ * file can be NULL from ioctl_by_bdev()...
+ */
+ if (file)
+ has_write_perm = file->f_mode & FMODE_WRITE;
+
/* Write-safe commands just require a writable open.. */
- if (type & CMD_WRITE_SAFE) {
- if (file->f_mode & FMODE_WRITE)
- return 0;
- }
+ if ((type & CMD_WRITE_SAFE) && has_write_perm)
+ return 0;
/* And root can do any command.. */
if (capable(CAP_SYS_RAWIO))
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 89299f4ffe1..c442f2e7ce4 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -40,10 +40,11 @@ config CRYPTO_SHA1
help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
-config CRYPTO_SHA1_Z990
- tristate "SHA1 digest algorithm for IBM zSeries z990"
- depends on CRYPTO && ARCH_S390
+config CRYPTO_SHA1_S390
+ tristate "SHA1 digest algorithm (s390)"
+ depends on CRYPTO && S390
help
+ This is the s390 hardware accelerated implementation of the
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
config CRYPTO_SHA256
@@ -55,6 +56,16 @@ config CRYPTO_SHA256
This version of SHA implements a 256 bit hash with 128 bits of
security against collision attacks.
+config CRYPTO_SHA256_S390
+ tristate "SHA256 digest algorithm (s390)"
+ depends on CRYPTO && S390
+ help
+ This is the s390 hardware accelerated implementation of the
+ SHA256 secure hash standard (DFIPS 180-2).
+
+ This version of SHA implements a 256 bit hash with 128 bits of
+ security against collision attacks.
+
config CRYPTO_SHA512
tristate "SHA384 and SHA512 digest algorithms"
depends on CRYPTO
@@ -98,9 +109,9 @@ config CRYPTO_DES
help
DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
-config CRYPTO_DES_Z990
- tristate "DES and Triple DES cipher algorithms for IBM zSeries z990"
- depends on CRYPTO && ARCH_S390
+config CRYPTO_DES_S390
+ tristate "DES and Triple DES cipher algorithms (s390)"
+ depends on CRYPTO && S390
help
DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
@@ -146,7 +157,7 @@ config CRYPTO_SERPENT
config CRYPTO_AES
tristate "AES cipher algorithms"
- depends on CRYPTO && !(X86 || UML_X86)
+ depends on CRYPTO
help
AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm.
@@ -204,6 +215,26 @@ config CRYPTO_AES_X86_64
See <http://csrc.nist.gov/encryption/aes/> for more information.
+config CRYPTO_AES_S390
+ tristate "AES cipher algorithms (s390)"
+ depends on CRYPTO && S390
+ help
+ This is the s390 hardware accelerated implementation of the
+ AES cipher algorithms (FIPS-197). AES uses the Rijndael
+ algorithm.
+
+ Rijndael appears to be consistently a very good performer in
+ both hardware and software across a wide range of computing
+ environments regardless of its use in feedback or non-feedback
+ modes. Its key setup time is excellent, and its key agility is
+ good. Rijndael's very low memory requirements make it very well
+ suited for restricted-space environments, in which it also
+ demonstrates excellent performance. Rijndael's operations are
+ among the easiest to defend against power and timing attacks.
+
+ On s390 the System z9-109 currently only supports the key size
+ of 128 bit.
+
config CRYPTO_CAST5
tristate "CAST5 (CAST-128) cipher algorithm"
depends on CRYPTO
diff --git a/crypto/aes.c b/crypto/aes.c
index 5df92888ef5..0a6a5c14368 100644
--- a/crypto/aes.c
+++ b/crypto/aes.c
@@ -73,9 +73,6 @@ byte(const u32 x, const unsigned n)
return x >> (n << 3);
}
-#define u32_in(x) le32_to_cpu(*(const u32 *)(x))
-#define u32_out(to, from) (*(u32 *)(to) = cpu_to_le32(from))
-
struct aes_ctx {
int key_length;
u32 E[60];
@@ -256,6 +253,7 @@ static int
aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
{
struct aes_ctx *ctx = ctx_arg;
+ const __le32 *key = (const __le32 *)in_key;
u32 i, t, u, v, w;
if (key_len != 16 && key_len != 24 && key_len != 32) {
@@ -265,10 +263,10 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
ctx->key_length = key_len;
- E_KEY[0] = u32_in (in_key);
- E_KEY[1] = u32_in (in_key + 4);
- E_KEY[2] = u32_in (in_key + 8);
- E_KEY[3] = u32_in (in_key + 12);
+ E_KEY[0] = le32_to_cpu(key[0]);
+ E_KEY[1] = le32_to_cpu(key[1]);
+ E_KEY[2] = le32_to_cpu(key[2]);
+ E_KEY[3] = le32_to_cpu(key[3]);
switch (key_len) {
case 16:
@@ -278,17 +276,17 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
break;
case 24:
- E_KEY[4] = u32_in (in_key + 16);
- t = E_KEY[5] = u32_in (in_key + 20);
+ E_KEY[4] = le32_to_cpu(key[4]);
+ t = E_KEY[5] = le32_to_cpu(key[5]);
for (i = 0; i < 8; ++i)
loop6 (i);
break;
case 32:
- E_KEY[4] = u32_in (in_key + 16);
- E_KEY[5] = u32_in (in_key + 20);
- E_KEY[6] = u32_in (in_key + 24);
- t = E_KEY[7] = u32_in (in_key + 28);
+ E_KEY[4] = le32_to_cpu(key[4]);
+ E_KEY[5] = le32_to_cpu(key[5]);
+ E_KEY[6] = le32_to_cpu(key[6]);
+ t = E_KEY[7] = le32_to_cpu(key[7]);
for (i = 0; i < 7; ++i)
loop8 (i);
break;
@@ -324,13 +322,15 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags)
static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
{
const struct aes_ctx *ctx = ctx_arg;
+ const __le32 *src = (const __le32 *)in;
+ __le32 *dst = (__le32 *)out;
u32 b0[4], b1[4];
const u32 *kp = E_KEY + 4;
- b0[0] = u32_in (in) ^ E_KEY[0];
- b0[1] = u32_in (in + 4) ^ E_KEY[1];
- b0[2] = u32_in (in + 8) ^ E_KEY[2];
- b0[3] = u32_in (in + 12) ^ E_KEY[3];
+ b0[0] = le32_to_cpu(src[0]) ^ E_KEY[0];
+ b0[1] = le32_to_cpu(src[1]) ^ E_KEY[1];
+ b0[2] = le32_to_cpu(src[2]) ^ E_KEY[2];
+ b0[3] = le32_to_cpu(src[3]) ^ E_KEY[3];
if (ctx->key_length > 24) {
f_nround (b1, b0, kp);
@@ -353,10 +353,10 @@ static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
f_nround (b1, b0, kp);
f_lround (b0, b1, kp);
- u32_out (out, b0[0]);
- u32_out (out + 4, b0[1]);
- u32_out (out + 8, b0[2]);
- u32_out (out + 12, b0[3]);
+ dst[0] = cpu_to_le32(b0[0]);
+ dst[1] = cpu_to_le32(b0[1]);
+ dst[2] = cpu_to_le32(b0[2]);
+ dst[3] = cpu_to_le32(b0[3]);
}
/* decrypt a block of text */
@@ -377,14 +377,16 @@ static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in)
static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in)
{
const struct aes_ctx *ctx = ctx_arg;
+ const __le32 *src = (const __le32 *)in;
+ __le32 *dst = (__le32 *)out;
u32 b0[4], b1[4];
const int key_len = ctx->key_length;
const u32 *kp = D_KEY + key_len + 20;
- b0[0] = u32_in (in) ^ E_KEY[key_len + 24];
- b0[1] = u32_in (in + 4) ^ E_KEY[key_len + 25];
- b0[2] = u32_in (in + 8) ^ E_KEY[key_len + 26];
- b0[3] = u32_in (in + 12) ^ E_KEY[key_len + 27];
+ b0[0] = le32_to_cpu(src[0]) ^ E_KEY[key_len + 24];
+ b0[1] = le32_to_cpu(src[1]) ^ E_KEY[key_len + 25];
+ b0[2] = le32_to_cpu(src[2]) ^ E_KEY[key_len + 26];
+ b0[3] = le32_to_cpu(src[3]) ^ E_KEY[key_len + 27];
if (key_len > 24) {
i_nround (b1, b0, kp);
@@ -407,18 +409,21 @@ static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in)
i_nround (b1, b0, kp);
i_lround (b0, b1, kp);
- u32_out (out, b0[0]);
- u32_out (out + 4, b0[1]);
- u32_out (out + 8, b0[2]);
- u32_out (out + 12, b0[3]);
+ dst[0] = cpu_to_le32(b0[0]);
+ dst[1] = cpu_to_le32(b0[1]);
+ dst[2] = cpu_to_le32(b0[2]);
+ dst[3] = cpu_to_le32(b0[3]);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
+ .cra_driver_name = "aes-generic",
+ .cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
+ .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
.cra_u = {
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 3925eb0133c..2c796bdb91a 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -32,8 +32,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <asm/byteorder.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
+#include <linux/types.h>
#define ANUBIS_MIN_KEY_SIZE 16
#define ANUBIS_MAX_KEY_SIZE 40
@@ -461,8 +463,8 @@ static const u32 rc[] = {
static int anubis_setkey(void *ctx_arg, const u8 *in_key,
unsigned int key_len, u32 *flags)
{
-
- int N, R, i, pos, r;
+ const __be32 *key = (const __be32 *)in_key;
+ int N, R, i, r;
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];
@@ -483,13 +485,8 @@ static int anubis_setkey(void *ctx_arg, const u8 *in_key,
ctx->R = R = 8 + N;
/* * map cipher key to initial key state (mu): */
- for (i = 0, pos = 0; i < N; i++, pos += 4) {
- kappa[i] =
- (in_key[pos ] << 24) ^
- (in_key[pos + 1] << 16) ^
- (in_key[pos + 2] << 8) ^
- (in_key[pos + 3] );
- }
+ for (i = 0; i < N; i++)
+ kappa[i] = be32_to_cpu(key[i]);
/*
* generate R + 1 round keys:
@@ -578,7 +575,9 @@ static int anubis_setkey(void *ctx_arg, const u8 *in_key,
static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
u8 *ciphertext, const u8 *plaintext, const int R)
{
- int i, pos, r;
+ const __be32 *src = (const __be32 *)plaintext;
+ __be32 *dst = (__be32 *)ciphertext;
+ int i, r;
u32 state[4];
u32 inter[4];
@@ -586,14 +585,8 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
* map plaintext block to cipher state (mu)
* and add initial round key (sigma[K^0]):
*/
- for (i = 0, pos = 0; i < 4; i++, pos += 4) {
- state[i] =
- (plaintext[pos ] << 24) ^
- (plaintext[pos + 1] << 16) ^
- (plaintext[pos + 2] << 8) ^
- (plaintext[pos + 3] ) ^
- roundKey[0][i];
- }
+ for (i = 0; i < 4; i++)
+ state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i];
/*
* R - 1 full rounds:
@@ -663,13 +656,8 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
* map cipher state to ciphertext block (mu^{-1}):
*/
- for (i = 0, pos = 0; i < 4; i++, pos += 4) {
- u32 w = inter[i];
- ciphertext[pos ] = (u8)(w >> 24);
- ciphertext[pos + 1] = (u8)(w >> 16);
- ciphertext[pos + 2] = (u8)(w >> 8);
- ciphertext[pos + 3] = (u8)(w );
- }
+ for (i = 0; i < 4; i++)
+ dst[i] = cpu_to_be32(inter[i]);
}
static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
@@ -689,6 +677,7 @@ static struct crypto_alg anubis_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ANUBIS_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct anubis_ctx),
+ .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(anubis_alg.cra_list),
.cra_u = { .cipher = {
diff --git a/crypto/api.c b/crypto/api.c
index 40ae42e9b6a..e26156f7183 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -3,6 +3,7 @@
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
*
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
* and Nettle, by Niels Möller.
@@ -18,9 +19,11 @@
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include "internal.h"
LIST_HEAD(crypto_alg_list);
@@ -39,6 +42,7 @@ static inline void crypto_alg_put(struct crypto_alg *alg)
static struct crypto_alg *crypto_alg_lookup(const char *name)
{
struct crypto_alg *q, *alg = NULL;
+ int best = -1;
if (!name)
return NULL;
@@ -46,11 +50,23 @@ static struct crypto_alg *crypto_alg_lookup(const char *name)
down_read(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (!(strcmp(q->cra_name, name))) {
- if (crypto_alg_get(q))
- alg = q;
+ int exact, fuzzy;
+
+ exact = !strcmp(q->cra_driver_name, name);
+ fuzzy = !strcmp(q->cra_name, name);
+ if (!exact && !(fuzzy && q->cra_priority > best))
+ continue;
+
+ if (unlikely(!crypto_alg_get(q)))
+ continue;
+
+ best = q->cra_priority;
+ if (alg)
+ crypto_alg_put(alg);
+ alg = q;
+
+ if (exact)
break;
- }
}
up_read(&crypto_alg_sem);
@@ -207,9 +223,26 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
kfree(tfm);
}
+static inline int crypto_set_driver_name(struct crypto_alg *alg)
+{
+ static const char suffix[] = "-generic";
+ char *driver_name = (char *)alg->cra_driver_name;
+ int len;
+
+ if (*driver_name)
+ return 0;
+
+ len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
+ if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME)
+ return -ENAMETOOLONG;
+
+ memcpy(driver_name + len, suffix, sizeof(suffix));
+ return 0;
+}
+
int crypto_register_alg(struct crypto_alg *alg)
{
- int ret = 0;
+ int ret;
struct crypto_alg *q;
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
@@ -218,13 +251,20 @@ int crypto_register_alg(struct crypto_alg *alg)
if (alg->cra_alignmask & alg->cra_blocksize)
return -EINVAL;
- if (alg->cra_blocksize > PAGE_SIZE)
+ if (alg->cra_blocksize > PAGE_SIZE / 8)
+ return -EINVAL;
+
+ if (alg->cra_priority < 0)
return -EINVAL;
+ ret = crypto_set_driver_name(alg);
+ if (unlikely(ret))
+ return ret;
+
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (!(strcmp(q->cra_name, alg->cra_name))) {
+ if (!strcmp(q->cra_driver_name, alg->cra_driver_name)) {
ret = -EEXIST;
goto out;
}
diff --git a/crypto/blowfish.c b/crypto/blowfish.c
index a8b29d54e7d..7f710b201f2 100644
--- a/crypto/blowfish.c
+++ b/crypto/blowfish.c
@@ -19,8 +19,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <asm/byteorder.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
+#include <linux/types.h>
#define BF_BLOCK_SIZE 8
#define BF_MIN_KEY_SIZE 4
@@ -451,6 +453,7 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
+ .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cipher = {
diff --git a/crypto/cast5.c b/crypto/cast5.c
index bc42f42b4fe..8834c8580c0 100644
--- a/crypto/cast5.c
+++ b/crypto/cast5.c
@@ -21,11 +21,13 @@
*/
+#include <asm/byteorder.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/types.h>
#define CAST5_BLOCK_SIZE 8
#define CAST5_MIN_KEY_SIZE 5
@@ -578,6 +580,8 @@ static const u32 sb8[256] = {
static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
{
struct cast5_ctx *c = (struct cast5_ctx *) ctx;
+ const __be32 *src = (const __be32 *)inbuf;
+ __be32 *dst = (__be32 *)outbuf;
u32 l, r, t;
u32 I; /* used by the Fx macros */
u32 *Km;
@@ -589,8 +593,8 @@ static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
/* (L0,R0) <-- (m1...m64). (Split the plaintext into left and
* right 32-bit halves L0 = m1...m32 and R0 = m33...m64.)
*/
- l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3];
- r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7];
+ l = be32_to_cpu(src[0]);
+ r = be32_to_cpu(src[1]);
/* (16 rounds) for i from 1 to 16, compute Li and Ri as follows:
* Li = Ri-1;
@@ -634,19 +638,15 @@ static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
/* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and
* concatenate to form the ciphertext.) */
- outbuf[0] = (r >> 24) & 0xff;
- outbuf[1] = (r >> 16) & 0xff;
- outbuf[2] = (r >> 8) & 0xff;
- outbuf[3] = r & 0xff;
- outbuf[4] = (l >> 24) & 0xff;
- outbuf[5] = (l >> 16) & 0xff;
- outbuf[6] = (l >> 8) & 0xff;
- outbuf[7] = l & 0xff;
+ dst[0] = cpu_to_be32(r);
+ dst[1] = cpu_to_be32(l);
}
static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
{
struct cast5_ctx *c = (struct cast5_ctx *) ctx;
+ const __be32 *src = (const __be32 *)inbuf;
+ __be32 *dst = (__be32 *)outbuf;
u32 l, r, t;
u32 I;
u32 *Km;
@@ -655,8 +655,8 @@ static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
Km = c->Km;
Kr = c->Kr;
- l = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3];
- r = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7];
+ l = be32_to_cpu(src[0]);
+ r = be32_to_cpu(src[1]);
if (!(c->rr)) {
t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]);
@@ -690,14 +690,8 @@ static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf)
t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
}
- outbuf[0] = (r >> 24) & 0xff;
- outbuf[1] = (r >> 16) & 0xff;
- outbuf[2] = (r >> 8) & 0xff;
- outbuf[3] = r & 0xff;
- outbuf[4] = (l >> 24) & 0xff;
- outbuf[5] = (l >> 16) & 0xff;
- outbuf[6] = (l >> 8) & 0xff;
- outbuf[7] = l & 0xff;
+ dst[0] = cpu_to_be32(r);
+ dst[1] = cpu_to_be32(l);
}
static void key_schedule(u32 * x, u32 * z, u32 * k)
@@ -782,7 +776,7 @@ cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags)
u32 x[4];
u32 z[4];
u32 k[16];
- u8 p_key[16];
+ __be32 p_key[4];
struct cast5_ctx *c = (struct cast5_ctx *) ctx;
if (key_len < 5 || key_len > 16) {
@@ -796,12 +790,10 @@ cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags)
memcpy(p_key, key, key_len);
- x[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3];
- x[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7];
- x[2] =
- p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11];
- x[3] =
- p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15];
+ x[0] = be32_to_cpu(p_key[0]);
+ x[1] = be32_to_cpu(p_key[1]);
+ x[2] = be32_to_cpu(p_key[2]);
+ x[3] = be32_to_cpu(p_key[3]);
key_schedule(x, z, k);
for (i = 0; i < 16; i++)
@@ -817,6 +809,7 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast5_ctx),
+ .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
diff --git a/crypto/cast6.c b/crypto/cast6.c
index 3eb08107342..9e28740ba77 100644
--- a/crypto/cast6.c
+++ b/crypto/cast6.c
@@ -18,11 +18,13 @@
*/
+#include <asm/byteorder.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/string.h>
+#include <linux/types.h>
#define CAST6_BLOCK_SIZE 16
#define CAST6_MIN_KEY_SIZE 16
@@ -384,7 +386,7 @@ cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags)
{
int i;
u32 key[8];
- u8 p_key[32]; /* padded key */
+ __be32 p_key[8]; /* padded key */
struct cast6_ctx *c = (struct cast6_ctx *) ctx;
if (key_len < 16 || key_len > 32 || key_len % 4 != 0) {
@@ -395,14 +397,14 @@ cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags)
memset (p_key, 0, 32);
memcpy (p_key, in_key, key_len);
- key[0] = p_key[0] << 24 | p_key[1] << 16 | p_key[2] << 8 | p_key[3]; /* A */
- key[1] = p_key[4] << 24 | p_key[5] << 16 | p_key[6] << 8 | p_key[7]; /* B */
- key[2] = p_key[8] << 24 | p_key[9] << 16 | p_key[10] << 8 | p_key[11]; /* C */
- key[3] = p_key[12] << 24 | p_key[13] << 16 | p_key[14] << 8 | p_key[15]; /* D */
- key[4] = p_key[16] << 24 | p_key[17] << 16 | p_key[18] << 8 | p_key[19]; /* E */
- key[5] = p_key[20] << 24 | p_key[21] << 16 | p_key[22] << 8 | p_key[23]; /* F */
- key[6] = p_key[24] << 24 | p_key[25] << 16 | p_key[26] << 8 | p_key[27]; /* G */
- key[7] = p_key[28] << 24 | p_key[29] << 16 | p_key[30] << 8 | p_key[31]; /* H */
+ key[0] = be32_to_cpu(p_key[0]); /* A */
+ key[1] = be32_to_cpu(p_key[1]); /* B */
+ key[2] = be32_to_cpu(p_key[2]); /* C */
+ key[3] = be32_to_cpu(p_key[3]); /* D */
+ key[4] = be32_to_cpu(p_key[4]); /* E */
+ key[5] = be32_to_cpu(p_key[5]); /* F */
+ key[6] = be32_to_cpu(p_key[6]); /* G */
+ key[7] = be32_to_cpu(p_key[7]); /* H */
@@ -444,14 +446,16 @@ static inline void QBAR (u32 * block, u8 * Kr, u32 * Km) {
static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
struct cast6_ctx * c = (struct cast6_ctx *)ctx;
+ const __be32 *src = (const __be32 *)inbuf;
+ __be32 *dst = (__be32 *)outbuf;
u32 block[4];
u32 * Km;
u8 * Kr;
- block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3];
- block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7];
- block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11];
- block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15];
+ block[0] = be32_to_cpu(src[0]);
+ block[1] = be32_to_cpu(src[1]);
+ block[2] = be32_to_cpu(src[2]);
+ block[3] = be32_to_cpu(src[3]);
Km = c->Km[0]; Kr = c->Kr[0]; Q (block, Kr, Km);
Km = c->Km[1]; Kr = c->Kr[1]; Q (block, Kr, Km);
@@ -465,35 +469,25 @@ static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
Km = c->Km[9]; Kr = c->Kr[9]; QBAR (block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; QBAR (block, Kr, Km);
Km = c->Km[11]; Kr = c->Kr[11]; QBAR (block, Kr, Km);
-
- outbuf[0] = (block[0] >> 24) & 0xff;
- outbuf[1] = (block[0] >> 16) & 0xff;
- outbuf[2] = (block[0] >> 8) & 0xff;
- outbuf[3] = block[0] & 0xff;
- outbuf[4] = (block[1] >> 24) & 0xff;
- outbuf[5] = (block[1] >> 16) & 0xff;
- outbuf[6] = (block[1] >> 8) & 0xff;
- outbuf[7] = block[1] & 0xff;
- outbuf[8] = (block[2] >> 24) & 0xff;
- outbuf[9] = (block[2] >> 16) & 0xff;
- outbuf[10] = (block[2] >> 8) & 0xff;
- outbuf[11] = block[2] & 0xff;
- outbuf[12] = (block[3] >> 24) & 0xff;
- outbuf[13] = (block[3] >> 16) & 0xff;
- outbuf[14] = (block[3] >> 8) & 0xff;
- outbuf[15] = block[3] & 0xff;
+
+ dst[0] = cpu_to_be32(block[0]);
+ dst[1] = cpu_to_be32(block[1]);
+ dst[2] = cpu_to_be32(block[2]);
+ dst[3] = cpu_to_be32(block[3]);
}
static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
struct cast6_ctx * c = (struct cast6_ctx *)ctx;
+ const __be32 *src = (const __be32 *)inbuf;
+ __be32 *dst = (__be32 *)outbuf;
u32 block[4];
u32 * Km;
u8 * Kr;
- block[0] = inbuf[0] << 24 | inbuf[1] << 16 | inbuf[2] << 8 | inbuf[3];
- block[1] = inbuf[4] << 24 | inbuf[5] << 16 | inbuf[6] << 8 | inbuf[7];
- block[2] = inbuf[8] << 24 | inbuf[9] << 16 | inbuf[10] << 8 | inbuf[11];
- block[3] = inbuf[12] << 24 | inbuf[13] << 16 | inbuf[14] << 8 | inbuf[15];
+ block[0] = be32_to_cpu(src[0]);
+ block[1] = be32_to_cpu(src[1]);
+ block[2] = be32_to_cpu(src[2]);
+ block[3] = be32_to_cpu(src[3]);
Km = c->Km[11]; Kr = c->Kr[11]; Q (block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; Q (block, Kr, Km);
@@ -508,22 +502,10 @@ static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) {
Km = c->Km[1]; Kr = c->Kr[1]; QBAR (block, Kr, Km);
Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km);
- outbuf[0] = (block[0] >> 24) & 0xff;
- outbuf[1] = (block[0] >> 16) & 0xff;
- outbuf[2] = (block[0] >> 8) & 0xff;
- outbuf[3] = block[0] & 0xff;
- outbuf[4] = (block[1] >> 24) & 0xff;
- outbuf[5] = (block[1] >> 16) & 0xff;
- outbuf[6] = (block[1] >> 8) & 0xff;
- outbuf[7] = block[1] & 0xff;
- outbuf[8] = (block[2] >> 24) & 0xff;
- outbuf[9] = (block[2] >> 16) & 0xff;
- outbuf[10] = (block[2] >> 8) & 0xff;
- outbuf[11] = block[2] & 0xff;
- outbuf[12] = (block[3] >> 24) & 0xff;
- outbuf[13] = (block[3] >> 16) & 0xff;
- outbuf[14] = (block[3] >> 8) & 0xff;
- outbuf[15] = block[3] & 0xff;
+ dst[0] = cpu_to_be32(block[0]);
+ dst[1] = cpu_to_be32(block[1]);
+ dst[2] = cpu_to_be32(block[2]);
+ dst[3] = cpu_to_be32(block[3]);
}
static struct crypto_alg alg = {
@@ -531,6 +513,7 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_ctx),
+ .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
diff --git a/crypto/cipher.c b/crypto/cipher.c
index dfd4bcfc597..65bcea0cd17 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -212,9 +212,10 @@ static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
struct crypto_tfm *tfm = desc->tfm;
void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
int bsize = crypto_tfm_alg_blocksize(tfm);
+ unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
- u8 stack[src == dst ? bsize : 0];
- u8 *buf = stack;
+ u8 stack[src == dst ? bsize + alignmask : 0];
+ u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
u8 **dst_p = src == dst ? &buf : &dst;
void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
diff --git a/crypto/crc32c.c b/crypto/crc32c.c
index 256956cd937..953362423a5 100644
--- a/crypto/crc32c.c
+++ b/crypto/crc32c.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/crc32c.h>
+#include <linux/types.h>
#include <asm/byteorder.h>
#define CHKSUM_BLOCK_SIZE 32
diff --git a/crypto/des.c b/crypto/des.c
index a3c863dddde..7bb548653dc 100644
--- a/crypto/des.c
+++ b/crypto/des.c
@@ -12,11 +12,13 @@
*
*/
+#include <asm/byteorder.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/crypto.h>
+#include <linux/types.h>
#define DES_KEY_SIZE 8
#define DES_EXPKEY_WORDS 32
@@ -947,6 +949,7 @@ static struct crypto_alg des_alg = {
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_ctx),
.cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(des_alg.cra_list),
.cra_u = { .cipher = {
.cia_min_keysize = DES_KEY_SIZE,
diff --git a/crypto/internal.h b/crypto/internal.h
index 37aa652ce5c..959e602909a 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -2,6 +2,7 @@
* Cryptographic API.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -16,10 +17,15 @@
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/list.h>
#include <linux/kernel.h>
+#include <linux/rwsem.h>
#include <linux/slab.h>
#include <asm/kmap_types.h>
+extern struct list_head crypto_alg_list;
+extern struct rw_semaphore crypto_alg_sem;
+
extern enum km_type crypto_km_types[];
static inline enum km_type crypto_kmap_type(int out)
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 738cb0dd1e7..807f2bf4ea2 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -22,8 +22,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <asm/byteorder.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
+#include <linux/types.h>
#define KHAZAD_KEY_SIZE 16
#define KHAZAD_BLOCK_SIZE 8
@@ -755,8 +757,8 @@ static const u64 c[KHAZAD_ROUNDS + 1] = {
static int khazad_setkey(void *ctx_arg, const u8 *in_key,
unsigned int key_len, u32 *flags)
{
-
struct khazad_ctx *ctx = ctx_arg;
+ const __be64 *key = (const __be64 *)in_key;
int r;
const u64 *S = T7;
u64 K2, K1;
@@ -767,22 +769,8 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key,
return -EINVAL;
}
- K2 = ((u64)in_key[ 0] << 56) ^
- ((u64)in_key[ 1] << 48) ^
- ((u64)in_key[ 2] << 40) ^
- ((u64)in_key[ 3] << 32) ^
- ((u64)in_key[ 4] << 24) ^
- ((u64)in_key[ 5] << 16) ^
- ((u64)in_key[ 6] << 8) ^
- ((u64)in_key[ 7] );
- K1 = ((u64)in_key[ 8] << 56) ^
- ((u64)in_key[ 9] << 48) ^
- ((u64)in_key[10] << 40) ^
- ((u64)in_key[11] << 32) ^
- ((u64)in_key[12] << 24) ^
- ((u64)in_key[13] << 16) ^
- ((u64)in_key[14] << 8) ^
- ((u64)in_key[15] );
+ K2 = be64_to_cpu(key[0]);
+ K1 = be64_to_cpu(key[1]);
/* setup the encrypt key */
for (r = 0; r <= KHAZAD_ROUNDS; r++) {
@@ -820,19 +808,12 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key,
static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
u8 *ciphertext, const u8 *plaintext)
{
-
+ const __be64 *src = (const __be64 *)plaintext;
+ __be64 *dst = (__be64 *)ciphertext;
int r;
u64 state;
- state = ((u64)plaintext[0] << 56) ^
- ((u64)plaintext[1] << 48) ^
- ((u64)plaintext[2] << 40) ^
- ((u64)plaintext[3] << 32) ^
- ((u64)plaintext[4] << 24) ^
- ((u64)plaintext[5] << 16) ^
- ((u64)plaintext[6] << 8) ^
- ((u64)plaintext[7] ) ^
- roundKey[0];
+ state = be64_to_cpu(*src) ^ roundKey[0];
for (r = 1; r < KHAZAD_ROUNDS; r++) {
state = T0[(int)(state >> 56) ] ^
@@ -856,15 +837,7 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
(T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^
roundKey[KHAZAD_ROUNDS];
- ciphertext[0] = (u8)(state >> 56);
- ciphertext[1] = (u8)(state >> 48);
- ciphertext[2] = (u8)(state >> 40);
- ciphertext[3] = (u8)(state >> 32);
- ciphertext[4] = (u8)(state >> 24);
- ciphertext[5] = (u8)(state >> 16);
- ciphertext[6] = (u8)(state >> 8);
- ciphertext[7] = (u8)(state );
-
+ *dst = cpu_to_be64(state);
}
static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
@@ -884,6 +857,7 @@ static struct crypto_alg khazad_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = KHAZAD_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct khazad_ctx),
+ .cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(khazad_alg.cra_list),
.cra_u = { .cipher = {
diff --git a/crypto/md4.c b/crypto/md4.c
index bef6a9e5ac9..a2d6df5c0f8 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -24,6 +24,7 @@
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/types.h>
#include <asm/byteorder.h>
#define MD4_DIGEST_SIZE 16
diff --git a/crypto/md5.c b/crypto/md5.c
index 1ed45f9c263..7f041aef5da 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/crypto.h>
+#include <linux/types.h>
#include <asm/byteorder.h>
#define MD5_DIGEST_SIZE 16
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index a470bcb3693..4f6ab23e14a 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -10,10 +10,12 @@
* published by the Free Software Foundation.
*/
+#include <asm/byteorder.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/crypto.h>
+#include <linux/types.h>
struct michael_mic_ctx {
@@ -43,21 +45,6 @@ do { \
} while (0)
-static inline u32 get_le32(const u8 *p)
-{
- return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
-}
-
-
-static inline void put_le32(u8 *p, u32 v)
-{
- p[0] = v;
- p[1] = v >> 8;
- p[2] = v >> 16;
- p[3] = v >> 24;
-}
-
-
static void michael_init(void *ctx)
{
struct michael_mic_ctx *mctx = ctx;
@@ -68,6 +55,7 @@ static void michael_init(void *ctx)
static void michael_update(void *ctx, const u8 *data, unsigned int len)
{
struct michael_mic_ctx *mctx = ctx;
+ const __le32 *src;
if (mctx->pending_len) {
int flen = 4 - mctx->pending_len;
@@ -81,21 +69,23 @@ static void michael_update(void *ctx, const u8 *data, unsigned int len)
if (mctx->pending_len < 4)
return;
- mctx->l ^= get_le32(mctx->pending);
+ src = (const __le32 *)mctx->pending;
+ mctx->l ^= le32_to_cpup(src);
michael_block(mctx->l, mctx->r);
mctx->pending_len = 0;
}
+ src = (const __le32 *)data;
+
while (len >= 4) {
- mctx->l ^= get_le32(data);
+ mctx->l ^= le32_to_cpup(src++);
michael_block(mctx->l, mctx->r);
- data += 4;
len -= 4;
}
if (len > 0) {
mctx->pending_len = len;
- memcpy(mctx->pending, data, len);
+ memcpy(mctx->pending, src, len);
}
}
@@ -104,6 +94,7 @@ static void michael_final(void *ctx, u8 *out)
{
struct michael_mic_ctx *mctx = ctx;
u8 *data = mctx->pending;
+ __le32 *dst = (__le32 *)out;
/* Last block and padding (0x5a, 4..7 x 0) */
switch (mctx->pending_len) {
@@ -125,8 +116,8 @@ static void michael_final(void *ctx, u8 *out)
/* l ^= 0; */
michael_block(mctx->l, mctx->r);
- put_le32(out, mctx->l);
- put_le32(out + 4, mctx->r);
+ dst[0] = cpu_to_le32(mctx->l);
+ dst[1] = cpu_to_le32(mctx->r);
}
@@ -134,13 +125,16 @@ static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen,
u32 *flags)
{
struct michael_mic_ctx *mctx = ctx;
+ const __le32 *data = (const __le32 *)key;
+
if (keylen != 8) {
if (flags)
*flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
- mctx->l = get_le32(key);
- mctx->r = get_le32(key + 4);
+
+ mctx->l = le32_to_cpu(data[0]);
+ mctx->r = le32_to_cpu(data[1]);
return 0;
}
diff --git a/crypto/proc.c b/crypto/proc.c
index 630ba91c08f..c0a5dd7ce2c 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -4,6 +4,7 @@
* Procfs information.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -18,9 +19,6 @@
#include <linux/seq_file.h>
#include "internal.h"
-extern struct list_head crypto_alg_list;
-extern struct rw_semaphore crypto_alg_sem;
-
static void *c_start(struct seq_file *m, loff_t *pos)
{
struct list_head *v;
@@ -53,7 +51,9 @@ static int c_show(struct seq_file *m, void *p)
struct crypto_alg *alg = (struct crypto_alg *)p;
seq_printf(m, "name : %s\n", alg->cra_name);
+ seq_printf(m, "driver : %s\n", alg->cra_driver_name);
seq_printf(m, "module : %s\n", module_name(alg->cra_module));
+ seq_printf(m, "priority : %d\n", alg->cra_priority);
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_CIPHER:
diff --git a/crypto/serpent.c b/crypto/serpent.c
index 3cf2c5067ee..52ad1a49262 100644
--- a/crypto/serpent.c
+++ b/crypto/serpent.c
@@ -20,6 +20,7 @@
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <linux/crypto.h>
+#include <linux/types.h>
/* Key is padded to the maximum of 256 bits before round key generation.
* Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
@@ -552,6 +553,7 @@ static struct crypto_alg tnepres_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
+ .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(serpent_alg.cra_list),
.cra_u = { .cipher = {
diff --git a/crypto/sha1.c b/crypto/sha1.c
index 4016f3b8ce9..21571ed35b7 100644
--- a/crypto/sha1.c
+++ b/crypto/sha1.c
@@ -21,6 +21,7 @@
#include <linux/mm.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
+#include <linux/types.h>
#include <asm/scatterlist.h>
#include <asm/byteorder.h>
@@ -48,23 +49,33 @@ static void sha1_init(void *ctx)
static void sha1_update(void *ctx, const u8 *data, unsigned int len)
{
struct sha1_ctx *sctx = ctx;
- unsigned int i, j;
- u32 temp[SHA_WORKSPACE_WORDS];
+ unsigned int partial, done;
+ const u8 *src;
- j = (sctx->count >> 3) & 0x3f;
- sctx->count += len << 3;
+ partial = sctx->count & 0x3f;
+ sctx->count += len;
+ done = 0;
+ src = data;
- if ((j + len) > 63) {
- memcpy(&sctx->buffer[j], data, (i = 64-j));
- sha_transform(sctx->state, sctx->buffer, temp);
- for ( ; i + 63 < len; i += 64) {
- sha_transform(sctx->state, &data[i], temp);
+ if ((partial + len) > 63) {
+ u32 temp[SHA_WORKSPACE_WORDS];
+
+ if (partial) {
+ done = -partial;
+ memcpy(sctx->buffer + partial, data, done + 64);
+ src = sctx->buffer;
}
- j = 0;
+
+ do {
+ sha_transform(sctx->state, src, temp);
+ done += 64;
+ src = data + done;
+ } while (done + 63 < len);
+
+ memset(temp, 0, sizeof(temp));
+ partial = 0;
}
- else i = 0;
- memset(temp, 0, sizeof(temp));
- memcpy(&sctx->buffer[j], &data[i], len - i);
+ memcpy(sctx->buffer + partial, src, len - done);
}
@@ -72,37 +83,24 @@ static void sha1_update(void *ctx, const u8 *data, unsigned int len)
static void sha1_final(void* ctx, u8 *out)
{
struct sha1_ctx *sctx = ctx;
- u32 i, j, index, padlen;
- u64 t;
- u8 bits[8] = { 0, };
+ __be32 *dst = (__be32 *)out;
+ u32 i, index, padlen;
+ __be64 bits;
static const u8 padding[64] = { 0x80, };
- t = sctx->count;
- bits[7] = 0xff & t; t>>=8;
- bits[6] = 0xff & t; t>>=8;
- bits[5] = 0xff & t; t>>=8;
- bits[4] = 0xff & t; t>>=8;
- bits[3] = 0xff & t; t>>=8;
- bits[2] = 0xff & t; t>>=8;
- bits[1] = 0xff & t; t>>=8;
- bits[0] = 0xff & t;
+ bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 */
- index = (sctx->count >> 3) & 0x3f;
+ index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
sha1_update(sctx, padding, padlen);
/* Append length */
- sha1_update(sctx, bits, sizeof bits);
+ sha1_update(sctx, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
- for (i = j = 0; i < 5; i++, j += 4) {
- u32 t2 = sctx->state[i];
- out[j+3] = t2 & 0xff; t2>>=8;
- out[j+2] = t2 & 0xff; t2>>=8;
- out[j+1] = t2 & 0xff; t2>>=8;
- out[j ] = t2 & 0xff;
- }
+ for (i = 0; i < 5; i++)
+ dst[i] = cpu_to_be32(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof *sctx);
diff --git a/crypto/sha256.c b/crypto/sha256.c
index c78da50a9b7..9d5ef674d6a 100644
--- a/crypto/sha256.c
+++ b/crypto/sha256.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
+#include <linux/types.h>
#include <asm/scatterlist.h>
#include <asm/byteorder.h>
@@ -279,22 +280,15 @@ static void sha256_update(void *ctx, const u8 *data, unsigned int len)
static void sha256_final(void* ctx, u8 *out)
{
struct sha256_ctx *sctx = ctx;
- u8 bits[8];
- unsigned int index, pad_len, t;
- int i, j;
+ __be32 *dst = (__be32 *)out;
+ __be32 bits[2];
+ unsigned int index, pad_len;
+ int i;
static const u8 padding[64] = { 0x80, };
/* Save number of bits */
- t = sctx->count[0];
- bits[7] = t; t >>= 8;
- bits[6] = t; t >>= 8;
- bits[5] = t; t >>= 8;
- bits[4] = t;
- t = sctx->count[1];
- bits[3] = t; t >>= 8;
- bits[2] = t; t >>= 8;
- bits[1] = t; t >>= 8;
- bits[0] = t;
+ bits[1] = cpu_to_be32(sctx->count[0]);
+ bits[0] = cpu_to_be32(sctx->count[1]);
/* Pad out to 56 mod 64. */
index = (sctx->count[0] >> 3) & 0x3f;
@@ -302,16 +296,11 @@ static void sha256_final(void* ctx, u8 *out)
sha256_update(sctx, padding, pad_len);
/* Append length (before padding) */
- sha256_update(sctx, bits, 8);
+ sha256_update(sctx, (const u8 *)bits, sizeof(bits));
/* Store state in digest */
- for (i = j = 0; i < 8; i++, j += 4) {
- t = sctx->state[i];
- out[j+3] = t; t >>= 8;
- out[j+2] = t; t >>= 8;
- out[j+1] = t; t >>= 8;
- out[j ] = t;
- }
+ for (i = 0; i < 8; i++)
+ dst[i] = cpu_to_be32(sctx->state[i]);
/* Zeroize sensitive information. */
memset(sctx, 0, sizeof(*sctx));
diff --git a/crypto/sha512.c b/crypto/sha512.c
index c663438322e..3e6e9392310 100644
--- a/crypto/sha512.c
+++ b/crypto/sha512.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/crypto.h>
+#include <linux/types.h>
#include <asm/scatterlist.h>
#include <asm/byteorder.h>
@@ -235,39 +236,17 @@ static void
sha512_final(void *ctx, u8 *hash)
{
struct sha512_ctx *sctx = ctx;
-
static u8 padding[128] = { 0x80, };
-
- u32 t;
- u64 t2;
- u8 bits[128];
+ __be64 *dst = (__be64 *)hash;
+ __be32 bits[4];
unsigned int index, pad_len;
- int i, j;
-
- index = pad_len = t = i = j = 0;
- t2 = 0;
+ int i;
/* Save number of bits */
- t = sctx->count[0];
- bits[15] = t; t>>=8;
- bits[14] = t; t>>=8;
- bits[13] = t; t>>=8;
- bits[12] = t;
- t = sctx->count[1];
- bits[11] = t; t>>=8;
- bits[10] = t; t>>=8;
- bits[9 ] = t; t>>=8;
- bits[8 ] = t;
- t = sctx->count[2];
- bits[7 ] = t; t>>=8;
- bits[6 ] = t; t>>=8;
- bits[5 ] = t; t>>=8;
- bits[4 ] = t;
- t = sctx->count[3];
- bits[3 ] = t; t>>=8;
- bits[2 ] = t; t>>=8;
- bits[1 ] = t; t>>=8;
- bits[0 ] = t;
+ bits[3] = cpu_to_be32(sctx->count[0]);
+ bits[2] = cpu_to_be32(sctx->count[1]);
+ bits[1] = cpu_to_be32(sctx->count[2]);
+ bits[0] = cpu_to_be32(sctx->count[3]);
/* Pad out to 112 mod 128. */
index = (sctx->count[0] >> 3) & 0x7f;
@@ -275,21 +254,12 @@ sha512_final(void *ctx, u8 *hash)
sha512_update(sctx, padding, pad_len);
/* Append length (before padding) */
- sha512_update(sctx, bits, 16);
+ sha512_update(sctx, (const u8 *)bits, sizeof(bits));
/* Store state in digest */
- for (i = j = 0; i < 8; i++, j += 8) {
- t2 = sctx->state[i];
- hash[j+7] = (char)t2 & 0xff; t2>>=8;
- hash[j+6] = (char)t2 & 0xff; t2>>=8;
- hash[j+5] = (char)t2 & 0xff; t2>>=8;
- hash[j+4] = (char)t2 & 0xff; t2>>=8;
- hash[j+3] = (char)t2 & 0xff; t2>>=8;
- hash[j+2] = (char)t2 & 0xff; t2>>=8;
- hash[j+1] = (char)t2 & 0xff; t2>>=8;
- hash[j ] = (char)t2 & 0xff;
- }
-
+ for (i = 0; i < 8; i++)
+ dst[i] = cpu_to_be64(sctx->state[i]);
+
/* Zeroize sensitive information. */
memset(sctx, 0, sizeof(struct sha512_ctx));
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 53f4ee804bd..49e344f0080 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -805,6 +805,8 @@ static void do_test(void)
//AES
test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS);
test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS);
+ test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS);
+ test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS);
//CAST5
test_cipher ("cast5", MODE_ECB, ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS);
@@ -910,6 +912,8 @@ static void do_test(void)
case 10:
test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS);
test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS);
+ test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS);
+ test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS);
break;
case 11:
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 522ffd4b6f4..733d07ed75e 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -1836,6 +1836,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
*/
#define AES_ENC_TEST_VECTORS 3
#define AES_DEC_TEST_VECTORS 3
+#define AES_CBC_ENC_TEST_VECTORS 2
+#define AES_CBC_DEC_TEST_VECTORS 2
static struct cipher_testvec aes_enc_tv_template[] = {
{ /* From FIPS-197 */
@@ -1911,6 +1913,68 @@ static struct cipher_testvec aes_dec_tv_template[] = {
},
};
+static struct cipher_testvec aes_cbc_enc_tv_template[] = {
+ { /* From RFC 3602 */
+ .key = { 0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b,
+ 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 },
+ .klen = 16,
+ .iv = { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30,
+ 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 },
+ .input = { "Single block msg" },
+ .ilen = 16,
+ .result = { 0xe3, 0x53, 0x77, 0x9c, 0x10, 0x79, 0xae, 0xb8,
+ 0x27, 0x08, 0x94, 0x2d, 0xbe, 0x77, 0x18, 0x1a },
+ .rlen = 16,
+ }, {
+ .key = { 0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0,
+ 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a },
+ .klen = 16,
+ .iv = { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28,
+ 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 },
+ .input = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
+ .ilen = 32,
+ .result = { 0xd2, 0x96, 0xcd, 0x94, 0xc2, 0xcc, 0xcf, 0x8a,
+ 0x3a, 0x86, 0x30, 0x28, 0xb5, 0xe1, 0xdc, 0x0a,
+ 0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9,
+ 0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 },
+ .rlen = 32,
+ },
+};
+
+static struct cipher_testvec aes_cbc_dec_tv_template[] = {
+ { /* From RFC 3602 */
+ .key = { 0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b,
+ 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 },
+ .klen = 16,
+ .iv = { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30,
+ 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 },
+ .input = { 0xe3, 0x53, 0x77, 0x9c, 0x10, 0x79, 0xae, 0xb8,
+ 0x27, 0x08, 0x94, 0x2d, 0xbe, 0x77, 0x18, 0x1a },
+ .ilen = 16,
+ .result = { "Single block msg" },
+ .rlen = 16,
+ }, {
+ .key = { 0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0,
+ 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a },
+ .klen = 16,
+ .iv = { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28,
+ 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 },
+ .input = { 0xd2, 0x96, 0xcd, 0x94, 0xc2, 0xcc, 0xcf, 0x8a,
+ 0x3a, 0x86, 0x30, 0x28, 0xb5, 0xe1, 0xdc, 0x0a,
+ 0x75, 0x86, 0x60, 0x2d, 0x25, 0x3c, 0xff, 0xf9,
+ 0x1b, 0x82, 0x66, 0xbe, 0xa6, 0xd6, 0x1a, 0xb1 },
+ .ilen = 32,
+ .result = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
+ .rlen = 32,
+ },
+};
+
/* Cast5 test vectors from RFC 2144 */
#define CAST5_ENC_TEST_VECTORS 3
#define CAST5_DEC_TEST_VECTORS 3
diff --git a/crypto/tea.c b/crypto/tea.c
index 5924efdd3a1..a6a02b30e47 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -22,8 +22,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <asm/byteorder.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
+#include <linux/types.h>
#define TEA_KEY_SIZE 16
#define TEA_BLOCK_SIZE 8
@@ -35,9 +37,6 @@
#define XTEA_ROUNDS 32
#define XTEA_DELTA 0x9e3779b9
-#define u32_in(x) le32_to_cpu(*(const __le32 *)(x))
-#define u32_out(to, from) (*(__le32 *)(to) = cpu_to_le32(from))
-
struct tea_ctx {
u32 KEY[4];
};
@@ -49,8 +48,8 @@ struct xtea_ctx {
static int tea_setkey(void *ctx_arg, const u8 *in_key,
unsigned int key_len, u32 *flags)
{
-
struct tea_ctx *ctx = ctx_arg;
+ const __le32 *key = (const __le32 *)in_key;
if (key_len != 16)
{
@@ -58,10 +57,10 @@ static int tea_setkey(void *ctx_arg, const u8 *in_key,
return -EINVAL;
}
- ctx->KEY[0] = u32_in (in_key);
- ctx->KEY[1] = u32_in (in_key + 4);
- ctx->KEY[2] = u32_in (in_key + 8);
- ctx->KEY[3] = u32_in (in_key + 12);
+ ctx->KEY[0] = le32_to_cpu(key[0]);
+ ctx->KEY[1] = le32_to_cpu(key[1]);
+ ctx->KEY[2] = le32_to_cpu(key[2]);
+ ctx->KEY[3] = le32_to_cpu(key[3]);
return 0;
@@ -73,9 +72,11 @@ static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = ctx_arg;
+ const __le32 *in = (const __le32 *)src;
+ __le32 *out = (__le32 *)dst;
- y = u32_in (src);
- z = u32_in (src + 4);
+ y = le32_to_cpu(in[0]);
+ z = le32_to_cpu(in[1]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
@@ -90,19 +91,20 @@ static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
}
- u32_out (dst, y);
- u32_out (dst + 4, z);
+ out[0] = cpu_to_le32(y);
+ out[1] = cpu_to_le32(z);
}
static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
{
u32 y, z, n, sum;
u32 k0, k1, k2, k3;
-
struct tea_ctx *ctx = ctx_arg;
+ const __le32 *in = (const __le32 *)src;
+ __le32 *out = (__le32 *)dst;
- y = u32_in (src);
- z = u32_in (src + 4);
+ y = le32_to_cpu(in[0]);
+ z = le32_to_cpu(in[1]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
@@ -119,16 +121,15 @@ static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
sum -= TEA_DELTA;
}
- u32_out (dst, y);
- u32_out (dst + 4, z);
-
+ out[0] = cpu_to_le32(y);
+ out[1] = cpu_to_le32(z);
}
static int xtea_setkey(void *ctx_arg, const u8 *in_key,
unsigned int key_len, u32 *flags)
{
-
struct xtea_ctx *ctx = ctx_arg;
+ const __le32 *key = (const __le32 *)in_key;
if (key_len != 16)
{
@@ -136,10 +137,10 @@ static int xtea_setkey(void *ctx_arg, const u8 *in_key,
return -EINVAL;
}
- ctx->KEY[0] = u32_in (in_key);
- ctx->KEY[1] = u32_in (in_key + 4);
- ctx->KEY[2] = u32_in (in_key + 8);
- ctx->KEY[3] = u32_in (in_key + 12);
+ ctx->KEY[0] = le32_to_cpu(key[0]);
+ ctx->KEY[1] = le32_to_cpu(key[1]);
+ ctx->KEY[2] = le32_to_cpu(key[2]);
+ ctx->KEY[3] = le32_to_cpu(key[3]);
return 0;
@@ -147,14 +148,15 @@ static int xtea_setkey(void *ctx_arg, const u8 *in_key,
static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
{
-
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = ctx_arg;
+ const __le32 *in = (const __le32 *)src;
+ __le32 *out = (__le32 *)dst;
- y = u32_in (src);
- z = u32_in (src + 4);
+ y = le32_to_cpu(in[0]);
+ z = le32_to_cpu(in[1]);
while (sum != limit) {
y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]);
@@ -162,19 +164,19 @@ static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]);
}
- u32_out (dst, y);
- u32_out (dst + 4, z);
-
+ out[0] = cpu_to_le32(y);
+ out[1] = cpu_to_le32(z);
}
static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
{
-
u32 y, z, sum;
struct tea_ctx *ctx = ctx_arg;
+ const __le32 *in = (const __le32 *)src;
+ __le32 *out = (__le32 *)dst;
- y = u32_in (src);
- z = u32_in (src + 4);
+ y = le32_to_cpu(in[0]);
+ z = le32_to_cpu(in[1]);
sum = XTEA_DELTA * XTEA_ROUNDS;
@@ -184,22 +186,22 @@ static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]);
}
- u32_out (dst, y);
- u32_out (dst + 4, z);
-
+ out[0] = cpu_to_le32(y);
+ out[1] = cpu_to_le32(z);
}
static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
{
-
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = ctx_arg;
+ const __le32 *in = (const __le32 *)src;
+ __le32 *out = (__le32 *)dst;
- y = u32_in (src);
- z = u32_in (src + 4);
+ y = le32_to_cpu(in[0]);
+ z = le32_to_cpu(in[1]);
while (sum != limit) {
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
@@ -207,19 +209,19 @@ static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src)
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
}
- u32_out (dst, y);
- u32_out (dst + 4, z);
-
+ out[0] = cpu_to_le32(y);
+ out[1] = cpu_to_le32(z);
}
static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
{
-
u32 y, z, sum;
struct tea_ctx *ctx = ctx_arg;
+ const __le32 *in = (const __le32 *)src;
+ __le32 *out = (__le32 *)dst;
- y = u32_in (src);
- z = u32_in (src + 4);
+ y = le32_to_cpu(in[0]);
+ z = le32_to_cpu(in[1]);
sum = XTEA_DELTA * XTEA_ROUNDS;
@@ -229,9 +231,8 @@ static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src)
y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3];
}
- u32_out (dst, y);
- u32_out (dst + 4, z);
-
+ out[0] = cpu_to_le32(y);
+ out[1] = cpu_to_le32(z);
}
static struct crypto_alg tea_alg = {
@@ -239,6 +240,7 @@ static struct crypto_alg tea_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct tea_ctx),
+ .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(tea_alg.cra_list),
.cra_u = { .cipher = {
@@ -254,6 +256,7 @@ static struct crypto_alg xtea_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
+ .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(xtea_alg.cra_list),
.cra_u = { .cipher = {
@@ -269,6 +272,7 @@ static struct crypto_alg xeta_alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
+ .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(xtea_alg.cra_list),
.cra_u = { .cipher = {
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index f0a45cf716d..2d8e44f6fbe 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -24,8 +24,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <asm/byteorder.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
+#include <linux/types.h>
#define TGR192_DIGEST_SIZE 24
#define TGR160_DIGEST_SIZE 20
@@ -467,18 +469,10 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data)
u64 a, b, c, aa, bb, cc;
u64 x[8];
int i;
- const u8 *ptr = data;
-
- for (i = 0; i < 8; i++, ptr += 8) {
- x[i] = (((u64)ptr[7] ) << 56) ^
- (((u64)ptr[6] & 0xffL) << 48) ^
- (((u64)ptr[5] & 0xffL) << 40) ^
- (((u64)ptr[4] & 0xffL) << 32) ^
- (((u64)ptr[3] & 0xffL) << 24) ^
- (((u64)ptr[2] & 0xffL) << 16) ^
- (((u64)ptr[1] & 0xffL) << 8) ^
- (((u64)ptr[0] & 0xffL) );
- }
+ const __le64 *ptr = (const __le64 *)data;
+
+ for (i = 0; i < 8; i++)
+ x[i] = le64_to_cpu(ptr[i]);
/* save */
a = aa = tctx->a;
@@ -558,9 +552,10 @@ static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len)
static void tgr192_final(void *ctx, u8 * out)
{
struct tgr192_ctx *tctx = ctx;
+ __be64 *dst = (__be64 *)out;
+ __be64 *be64p;
+ __le32 *le32p;
u32 t, msb, lsb;
- u8 *p;
- int i, j;
tgr192_update(tctx, NULL, 0); /* flush */ ;
@@ -594,41 +589,16 @@ static void tgr192_final(void *ctx, u8 * out)
memset(tctx->hash, 0, 56); /* fill next block with zeroes */
}
/* append the 64 bit count */
- tctx->hash[56] = lsb;
- tctx->hash[57] = lsb >> 8;
- tctx->hash[58] = lsb >> 16;
- tctx->hash[59] = lsb >> 24;
- tctx->hash[60] = msb;
- tctx->hash[61] = msb >> 8;
- tctx->hash[62] = msb >> 16;
- tctx->hash[63] = msb >> 24;
+ le32p = (__le32 *)&tctx->hash[56];
+ le32p[0] = cpu_to_le32(lsb);
+ le32p[1] = cpu_to_le32(msb);
+
tgr192_transform(tctx, tctx->hash);
- p = tctx->hash;
- *p++ = tctx->a >> 56; *p++ = tctx->a >> 48; *p++ = tctx->a >> 40;
- *p++ = tctx->a >> 32; *p++ = tctx->a >> 24; *p++ = tctx->a >> 16;
- *p++ = tctx->a >> 8; *p++ = tctx->a;\
- *p++ = tctx->b >> 56; *p++ = tctx->b >> 48; *p++ = tctx->b >> 40;
- *p++ = tctx->b >> 32; *p++ = tctx->b >> 24; *p++ = tctx->b >> 16;
- *p++ = tctx->b >> 8; *p++ = tctx->b;
- *p++ = tctx->c >> 56; *p++ = tctx->c >> 48; *p++ = tctx->c >> 40;
- *p++ = tctx->c >> 32; *p++ = tctx->c >> 24; *p++ = tctx->c >> 16;
- *p++ = tctx->c >> 8; *p++ = tctx->c;
-
-
- /* unpack the hash */
- j = 7;
- for (i = 0; i < 8; i++) {
- out[j--] = (tctx->a >> 8 * i) & 0xff;
- }
- j = 15;
- for (i = 0; i < 8; i++) {
- out[j--] = (tctx->b >> 8 * i) & 0xff;
- }
- j = 23;
- for (i = 0; i < 8; i++) {
- out[j--] = (tctx->c >> 8 * i) & 0xff;
- }
+ be64p = (__be64 *)tctx->hash;
+ dst[0] = be64p[0] = cpu_to_be64(tctx->a);
+ dst[1] = be64p[1] = cpu_to_be64(tctx->b);
+ dst[2] = be64p[2] = cpu_to_be64(tctx->c);
}
static void tgr160_final(void *ctx, u8 * out)
diff --git a/crypto/twofish.c b/crypto/twofish.c
index 4efff8cf995..a26d885486f 100644
--- a/crypto/twofish.c
+++ b/crypto/twofish.c
@@ -37,6 +37,8 @@
* Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
* Third Edition.
*/
+
+#include <asm/byteorder.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -621,13 +623,11 @@ static const u8 calc_sb_tbl[512] = {
* whitening subkey number m. */
#define INPACK(n, x, m) \
- x = in[4 * (n)] ^ (in[4 * (n) + 1] << 8) \
- ^ (in[4 * (n) + 2] << 16) ^ (in[4 * (n) + 3] << 24) ^ ctx->w[m]
+ x = le32_to_cpu(src[n]) ^ ctx->w[m]
#define OUTUNPACK(n, x, m) \
x ^= ctx->w[m]; \
- out[4 * (n)] = x; out[4 * (n) + 1] = x >> 8; \
- out[4 * (n) + 2] = x >> 16; out[4 * (n) + 3] = x >> 24
+ dst[n] = cpu_to_le32(x)
#define TF_MIN_KEY_SIZE 16
#define TF_MAX_KEY_SIZE 32
@@ -804,6 +804,8 @@ static int twofish_setkey(void *cx, const u8 *key,
static void twofish_encrypt(void *cx, u8 *out, const u8 *in)
{
struct twofish_ctx *ctx = cx;
+ const __le32 *src = (const __le32 *)in;
+ __le32 *dst = (__le32 *)out;
/* The four 32-bit chunks of the text. */
u32 a, b, c, d;
@@ -839,6 +841,8 @@ static void twofish_encrypt(void *cx, u8 *out, const u8 *in)
static void twofish_decrypt(void *cx, u8 *out, const u8 *in)
{
struct twofish_ctx *ctx = cx;
+ const __le32 *src = (const __le32 *)in;
+ __le32 *dst = (__le32 *)out;
/* The four 32-bit chunks of the text. */
u32 a, b, c, d;
@@ -875,6 +879,7 @@ static struct crypto_alg alg = {
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
+ .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = { .cipher = {
diff --git a/crypto/wp512.c b/crypto/wp512.c
index fd6e20e1f29..b226a126cfa 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -22,8 +22,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <asm/byteorder.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
+#include <linux/types.h>
#define WP512_DIGEST_SIZE 64
#define WP384_DIGEST_SIZE 48
@@ -778,19 +780,10 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) {
u64 block[8]; /* mu(buffer) */
u64 state[8]; /* the cipher state */
u64 L[8];
- u8 *buffer = wctx->buffer;
+ const __be64 *buffer = (const __be64 *)wctx->buffer;
- for (i = 0; i < 8; i++, buffer += 8) {
- block[i] =
- (((u64)buffer[0] ) << 56) ^
- (((u64)buffer[1] & 0xffL) << 48) ^
- (((u64)buffer[2] & 0xffL) << 40) ^
- (((u64)buffer[3] & 0xffL) << 32) ^
- (((u64)buffer[4] & 0xffL) << 24) ^
- (((u64)buffer[5] & 0xffL) << 16) ^
- (((u64)buffer[6] & 0xffL) << 8) ^
- (((u64)buffer[7] & 0xffL) );
- }
+ for (i = 0; i < 8; i++)
+ block[i] = be64_to_cpu(buffer[i]);
state[0] = block[0] ^ (K[0] = wctx->hash[0]);
state[1] = block[1] ^ (K[1] = wctx->hash[1]);
@@ -1069,7 +1062,7 @@ static void wp512_final(void *ctx, u8 *out)
u8 *bitLength = wctx->bitLength;
int bufferBits = wctx->bufferBits;
int bufferPos = wctx->bufferPos;
- u8 *digest = out;
+ __be64 *digest = (__be64 *)out;
buffer[bufferPos] |= 0x80U >> (bufferBits & 7);
bufferPos++;
@@ -1088,17 +1081,8 @@ static void wp512_final(void *ctx, u8 *out)
memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES],
bitLength, WP512_LENGTHBYTES);
wp512_process_buffer(wctx);
- for (i = 0; i < WP512_DIGEST_SIZE/8; i++) {
- digest[0] = (u8)(wctx->hash[i] >> 56);
- digest[1] = (u8)(wctx->hash[i] >> 48);
- digest[2] = (u8)(wctx->hash[i] >> 40);
- digest[3] = (u8)(wctx->hash[i] >> 32);
- digest[4] = (u8)(wctx->hash[i] >> 24);
- digest[5] = (u8)(wctx->hash[i] >> 16);
- digest[6] = (u8)(wctx->hash[i] >> 8);
- digest[7] = (u8)(wctx->hash[i] );
- digest += 8;
- }
+ for (i = 0; i < WP512_DIGEST_SIZE/8; i++)
+ digest[i] = cpu_to_be64(wctx->hash[i]);
wctx->bufferBits = bufferBits;
wctx->bufferPos = bufferPos;
}
diff --git a/drivers/Makefile b/drivers/Makefile
index ea410b6b764..7fc3f0f08b2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_ACPI) += acpi/
# PnP must come after ACPI since it will eventually need to check if acpi
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
+obj-$(CONFIG_ARM_AMBA) += amba/
# char/ comes before serial/ etc so that the VT console is the boot-time
# default.
diff --git a/drivers/acorn/block/mfmhd.c b/drivers/acorn/block/mfmhd.c
index 4b65f74d66b..ce074f6f336 100644
--- a/drivers/acorn/block/mfmhd.c
+++ b/drivers/acorn/block/mfmhd.c
@@ -129,19 +129,6 @@ static DEFINE_SPINLOCK(mfm_lock);
#define MAJOR_NR MFM_ACORN_MAJOR
#define QUEUE (mfm_queue)
#define CURRENT elv_next_request(mfm_queue)
-/*
- * This sort of stuff should be in a header file shared with ide.c, hd.c, xd.c etc
- */
-#ifndef HDIO_GETGEO
-#define HDIO_GETGEO 0x301
-struct hd_geometry {
- unsigned char heads;
- unsigned char sectors;
- unsigned short cylinders;
- unsigned long start;
-};
-#endif
-
/*
* Configuration section
@@ -1153,22 +1140,13 @@ static int mfm_initdrives(void)
* The 'front' end of the mfm driver follows...
*/
-static int mfm_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long arg)
+static int mfm_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct mfm_info *p = inode->i_bdev->bd_disk->private_data;
- struct hd_geometry *geo = (struct hd_geometry *) arg;
- if (cmd != HDIO_GETGEO)
- return -EINVAL;
- if (!arg)
- return -EINVAL;
- if (put_user (p->heads, &geo->heads))
- return -EFAULT;
- if (put_user (p->sectors, &geo->sectors))
- return -EFAULT;
- if (put_user (p->cylinders, &geo->cylinders))
- return -EFAULT;
- if (put_user (get_start_sect(inode->i_bdev), &geo->start))
- return -EFAULT;
+ struct mfm_info *p = bdev->bd_disk->private_data;
+
+ geo->heads = p->heads;
+ geo->sectors = p->sectors;
+ geo->cylinders = p->cylinders;
return 0;
}
@@ -1219,7 +1197,7 @@ void xd_set_geometry(struct block_device *bdev, unsigned char secsptrack,
static struct block_device_operations mfm_fops =
{
.owner = THIS_MODULE,
- .ioctl = mfm_ioctl,
+ .getgeo = mfm_getgeo,
};
/*
diff --git a/drivers/acorn/char/i2c.c b/drivers/acorn/char/i2c.c
index c22bb9dca1e..c26c08b3682 100644
--- a/drivers/acorn/char/i2c.c
+++ b/drivers/acorn/char/i2c.c
@@ -12,6 +12,7 @@
* On Acorn machines, the following i2c devices are on the bus:
* - PCF8583 real time clock & static RAM
*/
+#include <linux/capability.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/time.h>
diff --git a/drivers/acorn/char/pcf8583.c b/drivers/acorn/char/pcf8583.c
index e26f007a141..9b49f316ae9 100644
--- a/drivers/acorn/char/pcf8583.c
+++ b/drivers/acorn/char/pcf8583.c
@@ -257,9 +257,10 @@ pcf8583_command(struct i2c_client *client, unsigned int cmd, void *arg)
}
static struct i2c_driver pcf8583_driver = {
- .name = "PCF8583",
+ .driver = {
+ .name = "PCF8583",
+ },
.id = I2C_DRIVERID_PCF8583,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = pcf8583_probe,
.detach_client = pcf8583_detach,
.command = pcf8583_command
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index fce21c25752..6d61945260a 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -168,7 +168,6 @@ config ACPI_NUMA
config ACPI_ASUS
tristate "ASUS/Medion Laptop Extras"
depends on X86
- default y
---help---
This driver provides support for extra features of ACPI-compatible
ASUS laptops. As some of Medion laptops are made by ASUS, it may also
@@ -209,7 +208,6 @@ config ACPI_IBM
config ACPI_TOSHIBA
tristate "Toshiba Laptop Extras"
depends on X86
- default y
---help---
This driver adds support for access to certain system settings
on "legacy free" Toshiba laptops. These laptops can be recognized by
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index e3cd0b16031..20c9a37643c 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -204,11 +204,13 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size,
return AE_OK;
}
+EXPORT_SYMBOL_GPL(acpi_os_map_memory);
void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
{
iounmap(virt);
}
+EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
#ifdef ACPI_FUTURE_USAGE
acpi_status
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 09567c2edcf..e567c03b238 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -361,8 +361,7 @@ acpi_pci_irq_derive(struct pci_dev *dev,
if ((bridge->class >> 8) == PCI_CLASS_BRIDGE_CARDBUS) {
/* PC card has the same IRQ as its cardbridge */
- pci_read_config_byte(bridge, PCI_INTERRUPT_PIN,
- &bridge_pin);
+ bridge_pin = bridge->pin;
if (!bridge_pin) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"No interrupt pin configured for device %s\n",
@@ -412,7 +411,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
if (!dev)
return_VALUE(-EINVAL);
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ pin = dev->pin;
if (!pin) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"No interrupt pin configured for device %s\n",
@@ -503,7 +502,7 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
if (!dev || !dev->bus)
return_VOID;
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ pin = dev->pin;
if (!pin)
return_VOID;
pin--;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 807b0df308f..cc049338e41 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -843,6 +843,15 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
unsigned int i;
unsigned int working = 0;
+#ifdef ARCH_APICTIMER_STOPS_ON_C3
+ struct cpuinfo_x86 *c = cpu_data + pr->id;
+ cpumask_t mask = cpumask_of_cpu(pr->id);
+
+ if (c->x86_vendor == X86_VENDOR_INTEL) {
+ on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
+ }
+#endif
+
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
struct acpi_processor_cx *cx = &pr->power.states[i];
@@ -857,6 +866,12 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx);
+#ifdef ARCH_APICTIMER_STOPS_ON_C3
+ if (c->x86_vendor == X86_VENDOR_INTEL) {
+ on_each_cpu(switch_APIC_timer_to_ipi,
+ &mask, 1, 1);
+ }
+#endif
break;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0745d20afb8..3b26a710436 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -475,8 +475,10 @@ static LIST_HEAD(acpi_bus_drivers);
static DECLARE_MUTEX(acpi_bus_drivers_lock);
/**
- * acpi_bus_match
- * --------------
+ * acpi_bus_match - match device IDs to driver's supported IDs
+ * @device: the device that we are trying to match to a driver
+ * @driver: driver whose device id table is being checked
+ *
* Checks the device's hardware (_HID) or compatible (_CID) ids to see if it
* matches the specified driver's criteria.
*/
@@ -489,8 +491,10 @@ acpi_bus_match(struct acpi_device *device, struct acpi_driver *driver)
}
/**
- * acpi_bus_driver_init
- * --------------------
+ * acpi_bus_driver_init - add a device to a driver
+ * @device: the device to add and initialize
+ * @driver: driver for the device
+ *
* Used to initialize a device via its device driver. Called whenever a
* driver is bound to a device. Invokes the driver's add() and start() ops.
*/
@@ -603,8 +607,9 @@ static int acpi_driver_detach(struct acpi_driver *drv)
}
/**
- * acpi_bus_register_driver
- * ------------------------
+ * acpi_bus_register_driver - register a driver with the ACPI bus
+ * @driver: driver being registered
+ *
* Registers a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and binds. Returns the
* number of devices that were claimed by the driver, or a negative
@@ -633,8 +638,9 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
EXPORT_SYMBOL(acpi_bus_register_driver);
/**
- * acpi_bus_unregister_driver
- * --------------------------
+ * acpi_bus_unregister_driver - unregisters a driver with the APIC bus
+ * @driver: driver to unregister
+ *
* Unregisters a driver with the ACPI bus. Searches the namespace for all
* devices that match the driver's criteria and unbinds.
*/
@@ -660,8 +666,9 @@ int acpi_bus_unregister_driver(struct acpi_driver *driver)
EXPORT_SYMBOL(acpi_bus_unregister_driver);
/**
- * acpi_bus_find_driver
- * --------------------
+ * acpi_bus_find_driver - check if there is a driver installed for the device
+ * @device: device that we are trying to find a supporting driver for
+ *
* Parses the list of registered drivers looking for a driver applicable for
* the specified device.
*/
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index a2bf25b05e1..31d4f3ffc26 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -37,7 +37,7 @@
#define PREFIX "ACPI: "
-#define ACPI_MAX_TABLES 256
+#define ACPI_MAX_TABLES 128
static char *acpi_table_signatures[ACPI_TABLE_COUNT] = {
[ACPI_TABLE_UNKNOWN] = "????",
@@ -74,7 +74,7 @@ struct acpi_table_sdt {
static unsigned long sdt_pa; /* Physical Address */
static unsigned long sdt_count; /* Table count */
-static struct acpi_table_sdt sdt_entry[ACPI_MAX_TABLES];
+static struct acpi_table_sdt sdt_entry[ACPI_MAX_TABLES] __initdata;
void acpi_table_print(struct acpi_table_header *header, unsigned long phys_addr)
{
diff --git a/drivers/amba/Makefile b/drivers/amba/Makefile
new file mode 100644
index 00000000000..40fe74097be
--- /dev/null
+++ b/drivers/amba/Makefile
@@ -0,0 +1,2 @@
+obj-y += bus.o
+
diff --git a/arch/arm/common/amba.c b/drivers/amba/bus.c
index c95ec9eab99..889855d8d9f 100644
--- a/arch/arm/common/amba.c
+++ b/drivers/amba/bus.c
@@ -12,10 +12,9 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/amba/bus.h>
#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/hardware/amba.h>
#include <asm/sizes.h>
#define to_amba_device(d) container_of(d, struct amba_device, dev)
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index c57e20dcb0f..074abc81ec3 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -2126,8 +2126,7 @@ static void process_rsq(ns_dev *card)
if (!ns_rsqe_valid(card->rsq.next))
return;
- while (ns_rsqe_valid(card->rsq.next))
- {
+ do {
dequeue_rx(card, card->rsq.next);
ns_rsqe_init(card->rsq.next);
previous = card->rsq.next;
@@ -2135,7 +2134,7 @@ static void process_rsq(ns_dev *card)
card->rsq.next = card->rsq.base;
else
card->rsq.next++;
- }
+ } while (ns_rsqe_valid(card->rsq.next));
writel((((u32) previous) - ((u32) card->rsq.base)),
card->membase + RSQH);
}
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 55959e4d1cb..f484747f255 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -669,11 +669,13 @@ printk("NONONONOO!!!!\n");
u32 *put;
int i;
- dsc = (u32 *) kmalloc(uPD98401_TXPD_SIZE*2+
- uPD98401_TXBD_SIZE*ATM_SKB(skb)->iovcnt,GFP_ATOMIC);
+ dsc = kmalloc(uPD98401_TXPD_SIZE * 2 +
+ uPD98401_TXBD_SIZE * ATM_SKB(skb)->iovcnt, GFP_ATOMIC);
if (!dsc) {
- if (vcc->pop) vcc->pop(vcc,skb);
- else dev_kfree_skb_irq(skb);
+ if (vcc->pop)
+ vcc->pop(vcc, skb);
+ else
+ dev_kfree_skb_irq(skb);
return -EAGAIN;
}
/* @@@ should check alignment */
@@ -683,7 +685,7 @@ printk("NONONONOO!!!!\n");
(ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ?
uPD98401_CLPM_1 : uPD98401_CLPM_0));
dsc[1] = 0;
- dsc[2] = ATM_SKB(skb)->iovcnt*uPD98401_TXBD_SIZE;
+ dsc[2] = ATM_SKB(skb)->iovcnt * uPD98401_TXBD_SIZE;
dsc[3] = virt_to_bus(put);
for (i = 0; i < ATM_SKB(skb)->iovcnt; i++) {
*put++ = ((struct iovec *) skb->data)[i].iov_len;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index fd8059920db..6b355bd7816 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -161,8 +161,8 @@ static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
return count;
}
-/**
- * device_subsys - structure to be registered with kobject core.
+/*
+ * devices_subsys - structure to be registered with kobject core.
*/
decl_subsys(devices, &ktype_device, &device_uevent_ops);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 281d26784d2..07a7f97e1de 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -83,6 +83,31 @@ static inline void register_cpu_control(struct cpu *cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
+#ifdef CONFIG_KEXEC
+#include <linux/kexec.h>
+
+static ssize_t show_crash_notes(struct sys_device *dev, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ ssize_t rc;
+ unsigned long long addr;
+ int cpunum;
+
+ cpunum = cpu->sysdev.id;
+
+ /*
+ * Might be reading other cpu's data based on which cpu read thread
+ * has been scheduled. But cpu data (memory) is allocated once during
+ * boot up and this data does not change there after. Hence this
+ * operation should be safe. No locking required.
+ */
+ addr = __pa(per_cpu_ptr(crash_notes, cpunum));
+ rc = sprintf(buf, "%Lx\n", addr);
+ return rc;
+}
+static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
+#endif
+
/*
* register_cpu - Setup a driverfs device for a CPU.
* @cpu - Callers can set the cpu->no_control field to 1, to indicate not to
@@ -108,6 +133,11 @@ int __devinit register_cpu(struct cpu *cpu, int num, struct node *root)
register_cpu_control(cpu);
if (!error)
cpu_sys_devices[num] = &cpu->sysdev;
+
+#ifdef CONFIG_KEXEC
+ if (!error)
+ error = sysdev_create_file(&cpu->sysdev, &attr_crash_notes);
+#endif
return error;
}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 5b3d5e9ddcb..3d384e3d34d 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -7,6 +7,7 @@
*
*/
+#include <linux/capability.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 7e1d077874d..d1a05224627 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -13,8 +13,8 @@
#include <linux/sysdev.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/sched.h> /* capable() */
#include <linux/topology.h>
+#include <linux/capability.h>
#include <linux/device.h>
#include <linux/memory.h>
#include <linux/kobject.h>
@@ -49,12 +49,12 @@ static struct kset_uevent_ops memory_uevent_ops = {
static struct notifier_block *memory_chain;
-static int register_memory_notifier(struct notifier_block *nb)
+int register_memory_notifier(struct notifier_block *nb)
{
return notifier_chain_register(&memory_chain, nb);
}
-static void unregister_memory_notifier(struct notifier_block *nb)
+void unregister_memory_notifier(struct notifier_block *nb)
{
notifier_chain_unregister(&memory_chain, nb);
}
@@ -62,8 +62,7 @@ static void unregister_memory_notifier(struct notifier_block *nb)
/*
* register_memory - Setup a sysfs device for a memory block
*/
-static int
-register_memory(struct memory_block *memory, struct mem_section *section,
+int register_memory(struct memory_block *memory, struct mem_section *section,
struct node *root)
{
int error;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 70eaa5c7ac0..6ede1f352c2 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -92,34 +92,28 @@ static int DAC960_open(struct inode *inode, struct file *file)
return 0;
}
-static int DAC960_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct gendisk *disk = inode->i_bdev->bd_disk;
+ struct gendisk *disk = bdev->bd_disk;
DAC960_Controller_T *p = disk->queue->queuedata;
int drive_nr = (long)disk->private_data;
- struct hd_geometry g;
- struct hd_geometry __user *loc = (struct hd_geometry __user *)arg;
-
- if (cmd != HDIO_GETGEO || !loc)
- return -EINVAL;
if (p->FirmwareType == DAC960_V1_Controller) {
- g.heads = p->V1.GeometryTranslationHeads;
- g.sectors = p->V1.GeometryTranslationSectors;
- g.cylinders = p->V1.LogicalDriveInformation[drive_nr].
- LogicalDriveSize / (g.heads * g.sectors);
+ geo->heads = p->V1.GeometryTranslationHeads;
+ geo->sectors = p->V1.GeometryTranslationSectors;
+ geo->cylinders = p->V1.LogicalDriveInformation[drive_nr].
+ LogicalDriveSize / (geo->heads * geo->sectors);
} else {
DAC960_V2_LogicalDeviceInfo_T *i =
p->V2.LogicalDeviceInformation[drive_nr];
switch (i->DriveGeometry) {
case DAC960_V2_Geometry_128_32:
- g.heads = 128;
- g.sectors = 32;
+ geo->heads = 128;
+ geo->sectors = 32;
break;
case DAC960_V2_Geometry_255_63:
- g.heads = 255;
- g.sectors = 63;
+ geo->heads = 255;
+ geo->sectors = 63;
break;
default:
DAC960_Error("Illegal Logical Device Geometry %d\n",
@@ -127,12 +121,11 @@ static int DAC960_ioctl(struct inode *inode, struct file *file,
return -EINVAL;
}
- g.cylinders = i->ConfigurableDeviceSize / (g.heads * g.sectors);
+ geo->cylinders = i->ConfigurableDeviceSize /
+ (geo->heads * geo->sectors);
}
- g.start = get_start_sect(inode->i_bdev);
-
- return copy_to_user(loc, &g, sizeof g) ? -EFAULT : 0;
+ return 0;
}
static int DAC960_media_changed(struct gendisk *disk)
@@ -157,7 +150,7 @@ static int DAC960_revalidate_disk(struct gendisk *disk)
static struct block_device_operations DAC960_BlockDeviceOperations = {
.owner = THIS_MODULE,
.open = DAC960_open,
- .ioctl = DAC960_ioctl,
+ .getgeo = DAC960_getgeo,
.media_changed = DAC960_media_changed,
.revalidate_disk = DAC960_revalidate_disk,
};
@@ -3471,7 +3464,7 @@ static inline boolean DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
- end_that_request_last(Request);
+ end_that_request_last(Request, UpToDate);
if (Command->Completion) {
complete(Command->Completion);
@@ -3767,7 +3760,7 @@ static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
if (SenseKey == DAC960_SenseKey_VendorSpecific &&
AdditionalSenseCode == 0x80 &&
AdditionalSenseCodeQualifier <
- sizeof(DAC960_EventMessages) / sizeof(char *))
+ ARRAY_SIZE(DAC960_EventMessages))
DAC960_Critical("Physical Device %d:%d %s\n", Controller,
EventLogEntry->Channel,
EventLogEntry->TargetID,
@@ -7186,7 +7179,7 @@ static int DAC960_init_module(void)
{
int ret;
- ret = pci_module_init(&DAC960_pci_driver);
+ ret = pci_register_driver(&DAC960_pci_driver);
#ifdef DAC960_GAM_MINOR
if (!ret)
DAC960_gam_init();
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index c4b9d2adfc0..139cbba7618 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -117,7 +117,7 @@ config BLK_DEV_XD
config PARIDE
tristate "Parallel port IDE device support"
- depends on PARPORT
+ depends on PARPORT_PC
---help---
There are many external CD-ROM and disk devices that connect through
your computer's parallel port. Most of them are actually IDE devices
diff --git a/drivers/block/acsi.c b/drivers/block/acsi.c
index 5d2d649f7e8..196c0ec9cd5 100644
--- a/drivers/block/acsi.c
+++ b/drivers/block/acsi.c
@@ -1079,6 +1079,19 @@ static void redo_acsi_request( void )
*
***********************************************************************/
+static int acsi_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct acsi_info_struct *aip = bdev->bd_disk->private_data;
+
+ /*
+ * Just fake some geometry here, it's nonsense anyway
+ * To make it easy, use Adaptec's usual 64/32 mapping
+ */
+ geo->heads = 64;
+ geo->sectors = 32;
+ geo->cylinders = aip->size >> 11;
+ return 0;
+}
static int acsi_ioctl( struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg )
@@ -1086,18 +1099,6 @@ static int acsi_ioctl( struct inode *inode, struct file *file,
struct gendisk *disk = inode->i_bdev->bd_disk;
struct acsi_info_struct *aip = disk->private_data;
switch (cmd) {
- case HDIO_GETGEO:
- /* HDIO_GETGEO is supported more for getting the partition's
- * start sector... */
- { struct hd_geometry *geo = (struct hd_geometry *)arg;
- /* just fake some geometry here, it's nonsense anyway; to make it
- * easy, use Adaptec's usual 64/32 mapping */
- put_user( 64, &geo->heads );
- put_user( 32, &geo->sectors );
- put_user( aip->size >> 11, &geo->cylinders );
- put_user(get_start_sect(inode->i_bdev), &geo->start);
- return 0;
- }
case SCSI_IOCTL_GET_IDLUN:
/* SCSI compatible GET_IDLUN call to get target's ID and LUN number */
put_user( aip->target | (aip->lun << 8),
@@ -1592,6 +1593,7 @@ static struct block_device_operations acsi_fops = {
.open = acsi_open,
.release = acsi_release,
.ioctl = acsi_ioctl,
+ .getgeo = acsi_getgeo,
.media_changed = acsi_media_change,
.revalidate_disk= acsi_revalidate,
};
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 0acbfff8ad2..b6e29095621 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -131,7 +131,7 @@ static struct fd_drive_type drive_types[] = {
{ FD_DD_5, "DD 5.25", 40, 2, 14716, 13630, 1, 40, 81, 6, 30, 2},
{ FD_NODRIVE, "No Drive", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
};
-static int num_dr_types = sizeof(drive_types) / sizeof(drive_types[0]);
+static int num_dr_types = ARRAY_SIZE(drive_types);
static int amiga_read(int), dos_read(int);
static void amiga_write(int), dos_write(int);
@@ -194,6 +194,8 @@ static DECLARE_WAIT_QUEUE_HEAD(ms_wait);
*/
#define MAX_ERRORS 12
+#define custom amiga_custom
+
/* Prevent "aliased" accesses. */
static int fd_ref[4] = { 0,0,0,0 };
static int fd_device[4] = { 0, 0, 0, 0 };
@@ -1424,25 +1426,24 @@ static void do_fd_request(request_queue_t * q)
redo_fd_request();
}
+static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ int drive = MINOR(bdev->bd_dev) & 3;
+
+ geo->heads = unit[drive].type->heads;
+ geo->sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult;
+ geo->cylinders = unit[drive].type->tracks;
+ return 0;
+}
+
static int fd_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long param)
{
int drive = iminor(inode) & 3;
static struct floppy_struct getprm;
+ void __user *argp = (void __user *)param;
switch(cmd){
- case HDIO_GETGEO:
- {
- struct hd_geometry loc;
- loc.heads = unit[drive].type->heads;
- loc.sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult;
- loc.cylinders = unit[drive].type->tracks;
- loc.start = 0;
- if (copy_to_user((void *)param, (void *)&loc,
- sizeof(struct hd_geometry)))
- return -EFAULT;
- break;
- }
case FDFMTBEG:
get_fdc(drive);
if (fd_ref[drive] > 1) {
@@ -1486,9 +1487,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
getprm.head=unit[drive].type->heads;
getprm.sect=unit[drive].dtype->sects * unit[drive].type->sect_mult;
getprm.size=unit[drive].blocks;
- if (copy_to_user((void *)param,
- (void *)&getprm,
- sizeof(struct floppy_struct)))
+ if (copy_to_user(argp, &getprm, sizeof(struct floppy_struct)))
return -EFAULT;
break;
case FDSETPRM:
@@ -1500,8 +1499,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
break;
#ifdef RAW_IOCTL
case IOCTL_RAW_TRACK:
- if (copy_to_user((void *)param, raw_buf,
- unit[drive].type->read_size))
+ if (copy_to_user(argp, raw_buf, unit[drive].type->read_size))
return -EFAULT;
else
return unit[drive].type->read_size;
@@ -1652,15 +1650,10 @@ static struct block_device_operations floppy_fops = {
.open = floppy_open,
.release = floppy_release,
.ioctl = fd_ioctl,
+ .getgeo = fd_getgeo,
.media_changed = amiga_floppy_change,
};
-void __init amiga_floppy_setup (char *str, int *ints)
-{
- printk (KERN_INFO "amiflop: Setting default df0 to %x\n", ints[1]);
- fd_def_df0 = ints[1];
-}
-
static int __init fd_probe_drives(void)
{
int drive,drives,nomem;
@@ -1846,4 +1839,18 @@ void cleanup_module(void)
unregister_blkdev(FLOPPY_MAJOR, "fd");
}
#endif
+
+#else
+static int __init amiga_floppy_setup (char *str)
+{
+ int n;
+ if (!MACH_IS_AMIGA)
+ return 0;
+ if (!get_option(&str, &n))
+ return 0;
+ printk (KERN_INFO "amiflop: Setting default df0 to %x\n", n);
+ fd_def_df0 = n;
+}
+
+__setup("floppy=", amiga_floppy_setup);
#endif
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 0e97fcb9f3a..c05ee8bffd9 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -169,38 +169,26 @@ aoeblk_make_request(request_queue_t *q, struct bio *bio)
return 0;
}
-/* This ioctl implementation expects userland to have the device node
- * permissions set so that only priviledged users can open an aoe
- * block device directly.
- */
static int
-aoeblk_ioctl(struct inode *inode, struct file *filp, uint cmd, ulong arg)
+aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct aoedev *d;
-
- if (!arg)
- return -EINVAL;
+ struct aoedev *d = bdev->bd_disk->private_data;
- d = inode->i_bdev->bd_disk->private_data;
if ((d->flags & DEVFL_UP) == 0) {
printk(KERN_ERR "aoe: aoeblk_ioctl: disk not up\n");
return -ENODEV;
}
- if (cmd == HDIO_GETGEO) {
- d->geo.start = get_start_sect(inode->i_bdev);
- if (!copy_to_user((void __user *) arg, &d->geo, sizeof d->geo))
- return 0;
- return -EFAULT;
- }
- printk(KERN_INFO "aoe: aoeblk_ioctl: unknown ioctl %d\n", cmd);
- return -EINVAL;
+ geo->cylinders = d->geo.cylinders;
+ geo->heads = d->geo.heads;
+ geo->sectors = d->geo.sectors;
+ return 0;
}
static struct block_device_operations aoe_bdops = {
.open = aoeblk_open,
.release = aoeblk_release,
- .ioctl = aoeblk_ioctl,
+ .getgeo = aoeblk_getgeo,
.owner = THIS_MODULE,
};
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 22bda05fc69..f8ce235ccfc 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -181,7 +181,7 @@ static struct {
{ 6, TYPE_HD }, /* 31: H1640 <- was H1600 == h1600 for PC */
};
-#define NUM_DISK_MINORS (sizeof(minor2disktype)/sizeof(*minor2disktype))
+#define NUM_DISK_MINORS ARRAY_SIZE(minor2disktype)
/*
* Maximum disk size (in kilobytes). This default is used whenever the
@@ -1361,7 +1361,7 @@ static int floppy_revalidate(struct gendisk *disk)
formats, for 'permanent user-defined' parameter:
restore default_params[] here if flagged valid! */
if (default_params[drive].blocks == 0)
- UDT = 0;
+ UDT = NULL;
else
UDT = &default_params[drive];
}
@@ -1495,6 +1495,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
struct floppy_struct getprm;
int settype;
struct floppy_struct setprm;
+ void __user *argp = (void __user *)param;
switch (cmd) {
case FDGETPRM:
@@ -1521,7 +1522,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
getprm.head = 2;
getprm.track = dtp->blocks/dtp->spt/2;
getprm.stretch = dtp->stretch;
- if (copy_to_user((void *)param, &getprm, sizeof(getprm)))
+ if (copy_to_user(argp, &getprm, sizeof(getprm)))
return -EFAULT;
return 0;
}
@@ -1540,7 +1541,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
/* get the parameters from user space */
if (floppy->ref != 1 && floppy->ref != -1)
return -EBUSY;
- if (copy_from_user(&setprm, (void *) param, sizeof(setprm)))
+ if (copy_from_user(&setprm, argp, sizeof(setprm)))
return -EFAULT;
/*
* first of all: check for floppy change and revalidate,
@@ -1647,7 +1648,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
case FDFMTTRK:
if (floppy->ref != 1 && floppy->ref != -1)
return -EBUSY;
- if (copy_from_user(&fmt_desc, (void *) param, sizeof(fmt_desc)))
+ if (copy_from_user(&fmt_desc, argp, sizeof(fmt_desc)))
return -EFAULT;
return do_format(drive, type, &fmt_desc);
case FDCLRPRM:
@@ -1950,14 +1951,20 @@ Enomem:
return -ENOMEM;
}
-
-void __init atari_floppy_setup( char *str, int *ints )
+#ifndef MODULE
+static int __init atari_floppy_setup(char *str)
{
+ int ints[3 + FD_MAX_UNITS];
int i;
+
+ if (!MACH_IS_ATARI)
+ return 0;
+
+ str = get_options(str, 3 + FD_MAX_UNITS, ints);
if (ints[0] < 1) {
printk(KERN_ERR "ataflop_setup: no arguments!\n" );
- return;
+ return 0;
}
else if (ints[0] > 2+FD_MAX_UNITS) {
printk(KERN_ERR "ataflop_setup: too many arguments\n" );
@@ -1977,9 +1984,13 @@ void __init atari_floppy_setup( char *str, int *ints )
else
UserSteprate[i-3] = ints[i];
}
+ return 1;
}
-static void atari_floppy_exit(void)
+__setup("floppy=", atari_floppy_setup);
+#endif
+
+static void __exit atari_floppy_exit(void)
{
int i;
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index c3441b3f086..12d7b9bdfa9 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1,6 +1,6 @@
/*
* Disk Array driver for HP SA 5xxx and 6xxx Controllers
- * Copyright 2000, 2005 Hewlett-Packard Development Company, L.P.
+ * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -47,12 +47,12 @@
#include <linux/completion.h>
#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 2.6.8)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,8)
+#define DRIVER_NAME "HP CISS Driver (v 2.6.10)"
+#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,10)
/* Embedded module documentation macros - see modules.h */
MODULE_AUTHOR("Hewlett-Packard Company");
-MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.8");
+MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.10");
MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
" SA6i P600 P800 P400 P400i E200 E200i");
MODULE_LICENSE("GPL");
@@ -103,7 +103,7 @@ static const struct pci_device_id cciss_pci_device_id[] = {
};
MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
-#define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
+#define NR_PRODUCTS ARRAY_SIZE(products)
/* board_id = Subsystem Device ID & Vendor ID
* product = Marketing Name for the board
@@ -153,6 +153,7 @@ static int cciss_open(struct inode *inode, struct file *filep);
static int cciss_release(struct inode *inode, struct file *filep);
static int cciss_ioctl(struct inode *inode, struct file *filep,
unsigned int cmd, unsigned long arg);
+static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static int revalidate_allvol(ctlr_info_t *host);
static int cciss_revalidate(struct gendisk *disk);
@@ -166,7 +167,7 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
unsigned int block_size, InquiryData_struct *inq_buff,
drive_info_struct *drv);
static void cciss_getgeometry(int cntl_num);
-
+static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, __u32);
static void start_io( ctlr_info_t *h);
static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
@@ -194,6 +195,7 @@ static struct block_device_operations cciss_fops = {
.open = cciss_open,
.release = cciss_release,
.ioctl = cciss_ioctl,
+ .getgeo = cciss_getgeo,
#ifdef CONFIG_COMPAT
.compat_ioctl = cciss_compat_ioctl,
#endif
@@ -282,7 +284,7 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
h->product_name,
(unsigned long)h->board_id,
h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
- (unsigned int)h->intr,
+ (unsigned int)h->intr[SIMPLE_MODE_INT],
h->num_luns,
h->Qdepth, h->commands_outstanding,
h->maxQsinceinit, h->max_outstanding, h->maxSG);
@@ -633,6 +635,20 @@ static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned
return err;
}
#endif
+
+static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ drive_info_struct *drv = get_drv(bdev->bd_disk);
+
+ if (!drv->cylinders)
+ return -ENXIO;
+
+ geo->heads = drv->heads;
+ geo->sectors = drv->sectors;
+ geo->cylinders = drv->cylinders;
+ return 0;
+}
+
/*
* ioctl
*/
@@ -651,21 +667,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
#endif /* CCISS_DEBUG */
switch(cmd) {
- case HDIO_GETGEO:
- {
- struct hd_geometry driver_geo;
- if (drv->cylinders) {
- driver_geo.heads = drv->heads;
- driver_geo.sectors = drv->sectors;
- driver_geo.cylinders = drv->cylinders;
- } else
- return -ENXIO;
- driver_geo.start= get_start_sect(inode->i_bdev);
- if (copy_to_user(argp, &driver_geo, sizeof(struct hd_geometry)))
- return -EFAULT;
- return(0);
- }
-
case CCISS_GETPCIINFO:
{
cciss_pci_info_struct pciinfo;
@@ -2177,16 +2178,48 @@ static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
start_io(h);
}
+
+static void cciss_softirq_done(struct request *rq)
+{
+ CommandList_struct *cmd = rq->completion_data;
+ ctlr_info_t *h = hba[cmd->ctlr];
+ u64bit temp64;
+ int i, ddir;
+
+ if (cmd->Request.Type.Direction == XFER_READ)
+ ddir = PCI_DMA_FROMDEVICE;
+ else
+ ddir = PCI_DMA_TODEVICE;
+
+ /* command did not need to be retried */
+ /* unmap the DMA mapping for all the scatter gather elements */
+ for(i=0; i<cmd->Header.SGList; i++) {
+ temp64.val32.lower = cmd->SG[i].Addr.lower;
+ temp64.val32.upper = cmd->SG[i].Addr.upper;
+ pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
+ }
+
+ complete_buffers(rq->bio, rq->errors);
+
+#ifdef CCISS_DEBUG
+ printk("Done with %p\n", rq);
+#endif /* CCISS_DEBUG */
+
+ spin_lock_irq(&h->lock);
+ end_that_request_last(rq, rq->errors);
+ cmd_free(h, cmd,1);
+ spin_unlock_irq(&h->lock);
+}
+
/* checks the status of the job and calls complete buffers to mark all
- * buffers for the completed job.
+ * buffers for the completed job. Note that this function does not need
+ * to hold the hba/queue lock.
*/
static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
int timeout)
{
int status = 1;
- int i;
int retry_cmd = 0;
- u64bit temp64;
if (timeout)
status = 0;
@@ -2294,24 +2327,10 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
resend_cciss_cmd(h,cmd);
return;
}
- /* command did not need to be retried */
- /* unmap the DMA mapping for all the scatter gather elements */
- for(i=0; i<cmd->Header.SGList; i++) {
- temp64.val32.lower = cmd->SG[i].Addr.lower;
- temp64.val32.upper = cmd->SG[i].Addr.upper;
- pci_unmap_page(hba[cmd->ctlr]->pdev,
- temp64.val, cmd->SG[i].Len,
- (cmd->Request.Type.Direction == XFER_READ) ?
- PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
- }
- complete_buffers(cmd->rq->bio, status);
-#ifdef CCISS_DEBUG
- printk("Done with %p\n", cmd->rq);
-#endif /* CCISS_DEBUG */
-
- end_that_request_last(cmd->rq);
- cmd_free(h,cmd,1);
+ cmd->rq->completion_data = cmd;
+ cmd->rq->errors = status;
+ blk_complete_request(cmd->rq);
}
/*
@@ -2661,6 +2680,60 @@ static int find_PCI_BAR_index(struct pci_dev *pdev,
return -1;
}
+/* If MSI/MSI-X is supported by the kernel we will try to enable it on
+ * controllers that are capable. If not, we use IO-APIC mode.
+ */
+
+static void __devinit cciss_interrupt_mode(ctlr_info_t *c, struct pci_dev *pdev, __u32 board_id)
+{
+#ifdef CONFIG_PCI_MSI
+ int err;
+ struct msix_entry cciss_msix_entries[4] = {{0,0}, {0,1},
+ {0,2}, {0,3}};
+
+ /* Some boards advertise MSI but don't really support it */
+ if ((board_id == 0x40700E11) ||
+ (board_id == 0x40800E11) ||
+ (board_id == 0x40820E11) ||
+ (board_id == 0x40830E11))
+ goto default_int_mode;
+
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
+ err = pci_enable_msix(pdev, cciss_msix_entries, 4);
+ if (!err) {
+ c->intr[0] = cciss_msix_entries[0].vector;
+ c->intr[1] = cciss_msix_entries[1].vector;
+ c->intr[2] = cciss_msix_entries[2].vector;
+ c->intr[3] = cciss_msix_entries[3].vector;
+ c->msix_vector = 1;
+ return;
+ }
+ if (err > 0) {
+ printk(KERN_WARNING "cciss: only %d MSI-X vectors "
+ "available\n", err);
+ } else {
+ printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
+ err);
+ }
+ }
+ if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
+ if (!pci_enable_msi(pdev)) {
+ c->intr[SIMPLE_MODE_INT] = pdev->irq;
+ c->msi_vector = 1;
+ return;
+ } else {
+ printk(KERN_WARNING "cciss: MSI init failed\n");
+ c->intr[SIMPLE_MODE_INT] = pdev->irq;
+ return;
+ }
+ }
+#endif /* CONFIG_PCI_MSI */
+ /* if we get here we're going to use the default interrupt mode */
+default_int_mode:
+ c->intr[SIMPLE_MODE_INT] = pdev->irq;
+ return;
+}
+
static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
{
ushort subsystem_vendor_id, subsystem_device_id, command;
@@ -2721,7 +2794,10 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
printk("board_id = %x\n", board_id);
#endif /* CCISS_DEBUG */
- c->intr = pdev->irq;
+/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
+ * else we use the IO-APIC interrupt assigned to us by system ROM.
+ */
+ cciss_interrupt_mode(c, pdev, board_id);
/*
* Memory base addr is first addr , the second points to the config
@@ -2775,7 +2851,7 @@ static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
c->board_id = board_id;
#ifdef CCISS_DEBUG
- print_cfg_table(c->cfgtable);
+ print_cfg_table(c->cfgtable);
#endif /* CCISS_DEBUG */
for(i=0; i<NR_PRODUCTS; i++) {
@@ -3060,7 +3136,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
* 8 controller support.
*/
if (i < MAX_CTLR_ORIG)
- hba[i]->major = MAJOR_NR + i;
+ hba[i]->major = COMPAQ_CISS_MAJOR + i;
rc = register_blkdev(hba[i]->major, hba[i]->devname);
if(rc == -EBUSY || rc == -EINVAL) {
printk(KERN_ERR
@@ -3075,11 +3151,11 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
/* make sure the board interrupts are off */
hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
- if( request_irq(hba[i]->intr, do_cciss_intr,
+ if( request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
hba[i]->devname, hba[i])) {
printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
- hba[i]->intr, hba[i]->devname);
+ hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
goto clean2;
}
hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
@@ -3141,15 +3217,17 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
drv->queue = q;
q->backing_dev_info.ra_pages = READ_AHEAD;
- blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
+ blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
- /* This is a hardware imposed limit. */
- blk_queue_max_hw_segments(q, MAXSGENTRIES);
+ /* This is a hardware imposed limit. */
+ blk_queue_max_hw_segments(q, MAXSGENTRIES);
- /* This is a limit in the driver and could be eliminated. */
- blk_queue_max_phys_segments(q, MAXSGENTRIES);
+ /* This is a limit in the driver and could be eliminated. */
+ blk_queue_max_phys_segments(q, MAXSGENTRIES);
- blk_queue_max_sectors(q, 512);
+ blk_queue_max_sectors(q, 512);
+
+ blk_queue_softirq_done(q, cciss_softirq_done);
q->queuedata = hba[i];
sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
@@ -3185,7 +3263,7 @@ clean4:
NR_CMDS * sizeof( ErrorInfo_struct),
hba[i]->errinfo_pool,
hba[i]->errinfo_pool_dhandle);
- free_irq(hba[i]->intr, hba[i]);
+ free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
clean2:
unregister_blkdev(hba[i]->major, hba[i]->devname);
clean1:
@@ -3226,7 +3304,15 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev)
printk(KERN_WARNING "Error Flushing cache on controller %d\n",
i);
}
- free_irq(hba[i]->intr, hba[i]);
+ free_irq(hba[i]->intr[2], hba[i]);
+
+#ifdef CONFIG_PCI_MSI
+ if (hba[i]->msix_vector)
+ pci_disable_msix(hba[i]->pdev);
+ else if (hba[i]->msi_vector)
+ pci_disable_msi(hba[i]->pdev);
+#endif /* CONFIG_PCI_MSI */
+
pci_set_drvdata(pdev, NULL);
iounmap(hba[i]->vaddr);
cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
@@ -3274,7 +3360,7 @@ static int __init cciss_init(void)
printk(KERN_INFO DRIVER_NAME "\n");
/* Register for our PCI devices */
- return pci_module_init(&cciss_pci_driver);
+ return pci_register_driver(&cciss_pci_driver);
}
static void __exit cciss_cleanup(void)
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 3b0858c8389..b24fc0553cc 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -13,8 +13,6 @@
#define IO_OK 0
#define IO_ERROR 1
-#define MAJOR_NR COMPAQ_CISS_MAJOR
-
struct ctlr_info;
typedef struct ctlr_info ctlr_info_t;
@@ -65,7 +63,6 @@ struct ctlr_info
unsigned long io_mem_addr;
unsigned long io_mem_length;
CfgTable_struct __iomem *cfgtable;
- unsigned int intr;
int interrupts_enabled;
int major;
int max_commands;
@@ -74,6 +71,13 @@ struct ctlr_info
int num_luns;
int highest_lun;
int usage_count; /* number of opens all all minor devices */
+# define DOORBELL_INT 0
+# define PERF_MODE_INT 1
+# define SIMPLE_MODE_INT 2
+# define MEMQ_MODE_INT 3
+ unsigned int intr[4];
+ unsigned int msix_vector;
+ unsigned int msi_vector;
// information about each logical volume
drive_info_struct drv[CISS_MAX_LUN];
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 2942d32280a..9e35de05d5c 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -714,7 +714,7 @@ cciss_scsi_detect(int ctlr)
((struct cciss_scsi_adapter_data_t *)
hba[ctlr]->scsi_ctlr)->scsi_host = (void *) sh;
sh->hostdata[0] = (unsigned long) hba[ctlr];
- sh->irq = hba[ctlr]->intr;
+ sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT];
sh->unique_id = sh->irq;
error = scsi_add_host(sh, &hba[ctlr]->pdev->dev);
if (error)
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index cf1822a6361..862b9abac0a 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -72,11 +72,11 @@ static ctlr_info_t *hba[MAX_CTLR];
static int eisa[8];
-#define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
+#define NR_PRODUCTS ARRAY_SIZE(products)
/* board_id = Subsystem Device ID & Vendor ID
* product = Marketing Name for the board
- * access = Address of the struct of function pointers
+ * access = Address of the struct of function pointers
*/
static struct board_type products[] = {
{ 0x0040110E, "IDA", &smart1_access },
@@ -160,6 +160,7 @@ static int sendcmd(
static int ida_open(struct inode *inode, struct file *filep);
static int ida_release(struct inode *inode, struct file *filep);
static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
+static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
static void do_ida_request(request_queue_t *q);
@@ -199,6 +200,7 @@ static struct block_device_operations ida_fops = {
.open = ida_open,
.release = ida_release,
.ioctl = ida_ioctl,
+ .getgeo = ida_getgeo,
.revalidate_disk= ida_revalidate,
};
@@ -1036,7 +1038,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
complete_buffers(cmd->rq->bio, ok);
DBGPX(printk("Done with %p\n", cmd->rq););
- end_that_request_last(cmd->rq);
+ end_that_request_last(cmd->rq, ok ? 1 : -EIO);
}
/*
@@ -1124,6 +1126,23 @@ static void ida_timer(unsigned long tdata)
h->misc_tflags = 0;
}
+static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ drv_info_t *drv = get_drv(bdev->bd_disk);
+
+ if (drv->cylinders) {
+ geo->heads = drv->heads;
+ geo->sectors = drv->sectors;
+ geo->cylinders = drv->cylinders;
+ } else {
+ geo->heads = 0xff;
+ geo->sectors = 0x3f;
+ geo->cylinders = drv->nr_blks / (0xff*0x3f);
+ }
+
+ return 0;
+}
+
/*
* ida_ioctl does some miscellaneous stuff like reporting drive geometry,
* setting readahead and submitting commands from userspace to the controller.
@@ -1133,27 +1152,10 @@ static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
int error;
- int diskinfo[4];
- struct hd_geometry __user *geo = (struct hd_geometry __user *)arg;
ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
ida_ioctl_t *my_io;
switch(cmd) {
- case HDIO_GETGEO:
- if (drv->cylinders) {
- diskinfo[0] = drv->heads;
- diskinfo[1] = drv->sectors;
- diskinfo[2] = drv->cylinders;
- } else {
- diskinfo[0] = 0xff;
- diskinfo[1] = 0x3f;
- diskinfo[2] = drv->nr_blks / (0xff*0x3f);
- }
- put_user(diskinfo[0], &geo->heads);
- put_user(diskinfo[1], &geo->sectors);
- put_user(diskinfo[2], &geo->cylinders);
- put_user(get_start_sect(inode->i_bdev), &geo->start);
- return 0;
case IDAGETDRVINFO:
if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
return -EFAULT;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index f7e765a1d31..374621a512e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -479,7 +479,6 @@ static struct floppy_struct floppy_type[32] = {
{ 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
};
-#define NUMBER(x) (sizeof(x) / sizeof(*(x)))
#define SECTSIZE (_FD_SECTSIZE(*floppy))
/* Auto-detection: Disk type used until the next media change occurs. */
@@ -2301,7 +2300,7 @@ static void floppy_end_request(struct request *req, int uptodate)
add_disk_randomness(req->rq_disk);
floppy_off((long)req->rq_disk->private_data);
blkdev_dequeue_request(req);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
/* We're done with the request */
current_req = NULL;
@@ -3445,6 +3444,23 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
return 0;
}
+static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ int drive = (long)bdev->bd_disk->private_data;
+ int type = ITYPE(drive_state[drive].fd_device);
+ struct floppy_struct *g;
+ int ret;
+
+ ret = get_floppy_geometry(drive, type, &g);
+ if (ret)
+ return ret;
+
+ geo->heads = g->head;
+ geo->sectors = g->sect;
+ geo->cylinders = g->track;
+ return 0;
+}
+
static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long param)
{
@@ -3474,23 +3490,6 @@ static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
cmd = FDEJECT;
}
- /* generic block device ioctls */
- switch (cmd) {
- /* the following have been inspired by the corresponding
- * code for other block devices. */
- struct floppy_struct *g;
- case HDIO_GETGEO:
- {
- struct hd_geometry loc;
- ECALL(get_floppy_geometry(drive, type, &g));
- loc.heads = g->head;
- loc.sectors = g->sect;
- loc.cylinders = g->track;
- loc.start = 0;
- return _COPYOUT(loc);
- }
- }
-
/* convert the old style command into a new style command */
if ((cmd & 0xff00) == 0x0200) {
ECALL(normalize_ioctl(&cmd, &size));
@@ -3645,7 +3644,7 @@ static void __init config_types(void)
const char *name = NULL;
static char temparea[32];
- if (type < NUMBER(default_drive_params)) {
+ if (type < ARRAY_SIZE(default_drive_params)) {
params = &default_drive_params[type].params;
if (type) {
name = default_drive_params[type].name;
@@ -3938,6 +3937,7 @@ static struct block_device_operations floppy_fops = {
.open = floppy_open,
.release = floppy_release,
.ioctl = fd_ioctl,
+ .getgeo = fd_getgeo,
.media_changed = check_floppy_change,
.revalidate_disk = floppy_revalidate,
};
@@ -3960,7 +3960,7 @@ static void __init register_devfs_entries(int drive)
{
int base_minor = (drive < 4) ? drive : (124 + drive);
- if (UDP->cmos < NUMBER(default_drive_params)) {
+ if (UDP->cmos < ARRAY_SIZE(default_drive_params)) {
int i = 0;
do {
int minor = base_minor + (table_sup[UDP->cmos][i] << 2);
@@ -4218,7 +4218,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
!(allowed_drive_mask & (1 << drive)) ||
fdc_state[FDC(drive)].version == FDC_NONE)
return NULL;
- if (((*part >> 2) & 0x1f) >= NUMBER(floppy_type))
+ if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
return NULL;
*part = 0;
return get_disk(disks[drive]);
@@ -4570,7 +4570,7 @@ static void unregister_devfs_entries(int drive)
{
int i;
- if (UDP->cmos < NUMBER(default_drive_params)) {
+ if (UDP->cmos < ARRAY_SIZE(default_drive_params)) {
i = 0;
do {
devfs_remove("floppy/%d%s", drive,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 96c664af8d0..864729046e2 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -213,9 +213,9 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
struct address_space_operations *aops = mapping->a_ops;
pgoff_t index;
unsigned offset, bv_offs;
- int len, ret = 0;
+ int len, ret;
- down(&mapping->host->i_sem);
+ mutex_lock(&mapping->host->i_mutex);
index = pos >> PAGE_CACHE_SHIFT;
offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
bv_offs = bvec->bv_offset;
@@ -232,9 +232,15 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
page = grab_cache_page(mapping, index);
if (unlikely(!page))
goto fail;
- if (unlikely(aops->prepare_write(file, page, offset,
- offset + size)))
+ ret = aops->prepare_write(file, page, offset,
+ offset + size);
+ if (unlikely(ret)) {
+ if (ret == AOP_TRUNCATED_PAGE) {
+ page_cache_release(page);
+ continue;
+ }
goto unlock;
+ }
transfer_result = lo_do_transfer(lo, WRITE, page, offset,
bvec->bv_page, bv_offs, size, IV);
if (unlikely(transfer_result)) {
@@ -251,9 +257,15 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
kunmap_atomic(kaddr, KM_USER0);
}
flush_dcache_page(page);
- if (unlikely(aops->commit_write(file, page, offset,
- offset + size)))
+ ret = aops->commit_write(file, page, offset,
+ offset + size);
+ if (unlikely(ret)) {
+ if (ret == AOP_TRUNCATED_PAGE) {
+ page_cache_release(page);
+ continue;
+ }
goto unlock;
+ }
if (unlikely(transfer_result))
goto unlock;
bv_offs += size;
@@ -264,8 +276,9 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
unlock_page(page);
page_cache_release(page);
}
+ ret = 0;
out:
- up(&mapping->host->i_sem);
+ mutex_unlock(&mapping->host->i_mutex);
return ret;
unlock:
unlock_page(page);
@@ -514,12 +527,12 @@ static int loop_make_request(request_queue_t *q, struct bio *old_bio)
lo->lo_pending++;
loop_add_bio(lo, old_bio);
spin_unlock_irq(&lo->lo_lock);
- up(&lo->lo_bh_mutex);
+ complete(&lo->lo_bh_done);
return 0;
out:
if (lo->lo_pending == 0)
- up(&lo->lo_bh_mutex);
+ complete(&lo->lo_bh_done);
spin_unlock_irq(&lo->lo_lock);
bio_io_error(old_bio, old_bio->bi_size);
return 0;
@@ -580,23 +593,20 @@ static int loop_thread(void *data)
lo->lo_pending = 1;
/*
- * up sem, we are running
+ * complete it, we are running
*/
- up(&lo->lo_sem);
+ complete(&lo->lo_done);
for (;;) {
int pending;
- /*
- * interruptible just to not contribute to load avg
- */
- if (down_interruptible(&lo->lo_bh_mutex))
+ if (wait_for_completion_interruptible(&lo->lo_bh_done))
continue;
spin_lock_irq(&lo->lo_lock);
/*
- * could be upped because of tear-down, not pending work
+ * could be completed because of tear-down, not pending work
*/
if (unlikely(!lo->lo_pending)) {
spin_unlock_irq(&lo->lo_lock);
@@ -619,7 +629,7 @@ static int loop_thread(void *data)
break;
}
- up(&lo->lo_sem);
+ complete(&lo->lo_done);
return 0;
}
@@ -830,7 +840,7 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
set_blocksize(bdev, lo_blocksize);
kernel_thread(loop_thread, lo, CLONE_KERNEL);
- down(&lo->lo_sem);
+ wait_for_completion(&lo->lo_done);
return 0;
out_putf:
@@ -896,10 +906,10 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
lo->lo_state = Lo_rundown;
lo->lo_pending--;
if (!lo->lo_pending)
- up(&lo->lo_bh_mutex);
+ complete(&lo->lo_bh_done);
spin_unlock_irq(&lo->lo_lock);
- down(&lo->lo_sem);
+ wait_for_completion(&lo->lo_done);
lo->lo_backing_file = NULL;
@@ -1276,8 +1286,8 @@ static int __init loop_init(void)
if (!lo->lo_queue)
goto out_mem4;
init_MUTEX(&lo->lo_ctl_mutex);
- init_MUTEX_LOCKED(&lo->lo_sem);
- init_MUTEX_LOCKED(&lo->lo_bh_mutex);
+ init_completion(&lo->lo_done);
+ init_completion(&lo->lo_bh_done);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
disk->major = LOOP_MAJOR;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9e268ddedfb..6997d8e6bfb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -54,11 +54,15 @@
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/ioctl.h>
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <net/sock.h>
#include <linux/devfs_fs_kernel.h>
#include <asm/uaccess.h>
+#include <asm/system.h>
#include <asm/types.h>
#include <linux/nbd.h>
@@ -136,7 +140,7 @@ static void nbd_end_request(struct request *req)
spin_lock_irqsave(q->queue_lock, flags);
if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
}
spin_unlock_irqrestore(q->queue_lock, flags);
}
@@ -170,7 +174,6 @@ static int sock_xmit(struct socket *sock, int send, void *buf, int size,
msg.msg_namelen = 0;
msg.msg_control = NULL;
msg.msg_controllen = 0;
- msg.msg_namelen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
if (send)
@@ -230,14 +233,6 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
request.len = htonl(size);
memcpy(request.handle, &req, sizeof(req));
- down(&lo->tx_lock);
-
- if (!sock || !lo->sock) {
- printk(KERN_ERR "%s: Attempted send on closed socket\n",
- lo->disk->disk_name);
- goto error_out;
- }
-
dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
lo->disk->disk_name, req,
nbdcmd_to_ascii(nbd_cmd(req)),
@@ -276,11 +271,9 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
}
}
}
- up(&lo->tx_lock);
return 0;
error_out:
- up(&lo->tx_lock);
return 1;
}
@@ -289,9 +282,14 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle)
struct request *req;
struct list_head *tmp;
struct request *xreq;
+ int err;
memcpy(&xreq, handle, sizeof(xreq));
+ err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq);
+ if (unlikely(err))
+ goto out;
+
spin_lock(&lo->queue_lock);
list_for_each(tmp, &lo->queue_head) {
req = list_entry(tmp, struct request, queuelist);
@@ -302,7 +300,11 @@ static struct request *nbd_find_request(struct nbd_device *lo, char *handle)
return req;
}
spin_unlock(&lo->queue_lock);
- return NULL;
+
+ err = -ENOENT;
+
+out:
+ return ERR_PTR(err);
}
static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec)
@@ -331,7 +333,11 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
goto harderror;
}
req = nbd_find_request(lo, reply.handle);
- if (req == NULL) {
+ if (unlikely(IS_ERR(req))) {
+ result = PTR_ERR(req);
+ if (result != -ENOENT)
+ goto harderror;
+
printk(KERN_ERR "%s: Unexpected reply (%p)\n",
lo->disk->disk_name, reply.handle);
result = -EBADR;
@@ -395,19 +401,24 @@ static void nbd_clear_que(struct nbd_device *lo)
BUG_ON(lo->magic != LO_MAGIC);
- do {
- req = NULL;
- spin_lock(&lo->queue_lock);
- if (!list_empty(&lo->queue_head)) {
- req = list_entry(lo->queue_head.next, struct request, queuelist);
- list_del_init(&req->queuelist);
- }
- spin_unlock(&lo->queue_lock);
- if (req) {
- req->errors++;
- nbd_end_request(req);
- }
- } while (req);
+ /*
+ * Because we have set lo->sock to NULL under the tx_lock, all
+ * modifications to the list must have completed by now. For
+ * the same reason, the active_req must be NULL.
+ *
+ * As a consequence, we don't need to take the spin lock while
+ * purging the list here.
+ */
+ BUG_ON(lo->sock);
+ BUG_ON(lo->active_req);
+
+ while (!list_empty(&lo->queue_head)) {
+ req = list_entry(lo->queue_head.next, struct request,
+ queuelist);
+ list_del_init(&req->queuelist);
+ req->errors++;
+ nbd_end_request(req);
+ }
}
/*
@@ -435,11 +446,6 @@ static void do_nbd_request(request_queue_t * q)
BUG_ON(lo->magic != LO_MAGIC);
- if (!lo->file) {
- printk(KERN_ERR "%s: Request when not-ready\n",
- lo->disk->disk_name);
- goto error_out;
- }
nbd_cmd(req) = NBD_CMD_READ;
if (rq_data_dir(req) == WRITE) {
nbd_cmd(req) = NBD_CMD_WRITE;
@@ -453,32 +459,34 @@ static void do_nbd_request(request_queue_t * q)
req->errors = 0;
spin_unlock_irq(q->queue_lock);
- spin_lock(&lo->queue_lock);
-
- if (!lo->file) {
- spin_unlock(&lo->queue_lock);
- printk(KERN_ERR "%s: failed between accept and semaphore, file lost\n",
- lo->disk->disk_name);
+ down(&lo->tx_lock);
+ if (unlikely(!lo->sock)) {
+ up(&lo->tx_lock);
+ printk(KERN_ERR "%s: Attempted send on closed socket\n",
+ lo->disk->disk_name);
req->errors++;
nbd_end_request(req);
spin_lock_irq(q->queue_lock);
continue;
}
- list_add(&req->queuelist, &lo->queue_head);
- spin_unlock(&lo->queue_lock);
+ lo->active_req = req;
if (nbd_send_req(lo, req) != 0) {
printk(KERN_ERR "%s: Request send failed\n",
lo->disk->disk_name);
- if (nbd_find_request(lo, (char *)&req) != NULL) {
- /* we still own req */
- req->errors++;
- nbd_end_request(req);
- } else /* we're racing with nbd_clear_que */
- printk(KERN_DEBUG "nbd: can't find req\n");
+ req->errors++;
+ nbd_end_request(req);
+ } else {
+ spin_lock(&lo->queue_lock);
+ list_add(&req->queuelist, &lo->queue_head);
+ spin_unlock(&lo->queue_lock);
}
+ lo->active_req = NULL;
+ up(&lo->tx_lock);
+ wake_up_all(&lo->active_wq);
+
spin_lock_irq(q->queue_lock);
continue;
@@ -529,17 +537,10 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
down(&lo->tx_lock);
lo->sock = NULL;
up(&lo->tx_lock);
- spin_lock(&lo->queue_lock);
file = lo->file;
lo->file = NULL;
- spin_unlock(&lo->queue_lock);
nbd_clear_que(lo);
- spin_lock(&lo->queue_lock);
- if (!list_empty(&lo->queue_head)) {
- printk(KERN_ERR "nbd: disconnect: some requests are in progress -> please try again.\n");
- error = -EBUSY;
- }
- spin_unlock(&lo->queue_lock);
+ BUG_ON(!list_empty(&lo->queue_head));
if (file)
fput(file);
return error;
@@ -598,24 +599,19 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
lo->sock = NULL;
}
up(&lo->tx_lock);
- spin_lock(&lo->queue_lock);
file = lo->file;
lo->file = NULL;
- spin_unlock(&lo->queue_lock);
nbd_clear_que(lo);
printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name);
if (file)
fput(file);
return lo->harderror;
case NBD_CLEAR_QUE:
- down(&lo->tx_lock);
- if (lo->sock) {
- up(&lo->tx_lock);
- return 0; /* probably should be error, but that would
- * break "nbd-client -d", so just return 0 */
- }
- up(&lo->tx_lock);
- nbd_clear_que(lo);
+ /*
+ * This is for compatibility only. The queue is always cleared
+ * by NBD_DO_IT or NBD_CLEAR_SOCK.
+ */
+ BUG_ON(!lo->sock && !list_empty(&lo->queue_head));
return 0;
case NBD_PRINT_DEBUG:
printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
@@ -688,6 +684,7 @@ static int __init nbd_init(void)
spin_lock_init(&nbd_dev[i].queue_lock);
INIT_LIST_HEAD(&nbd_dev[i].queue_head);
init_MUTEX(&nbd_dev[i].tx_lock);
+ init_waitqueue_head(&nbd_dev[i].active_wq);
nbd_dev[i].blksize = 1024;
nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */
disk->major = NBD_MAJOR;
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
index 17ff4056125..c0d2854dd09 100644
--- a/drivers/block/paride/Kconfig
+++ b/drivers/block/paride/Kconfig
@@ -4,11 +4,12 @@
# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module,
# PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option
# controls the choices given to the user ...
+# PARIDE only supports PC style parports. Tough for USB or other parports...
config PARIDE_PARPORT
tristate
depends on PARIDE!=n
- default m if PARPORT=m
- default y if PARPORT!=m
+ default m if PARPORT_PC=m
+ default y if PARPORT_PC!=m
comment "Parallel IDE high-level drivers"
depends on PARIDE
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index fa49d62626b..62d2464c12f 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -747,32 +747,33 @@ static int pd_open(struct inode *inode, struct file *file)
return 0;
}
+static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct pd_unit *disk = bdev->bd_disk->private_data;
+
+ if (disk->alt_geom) {
+ geo->heads = PD_LOG_HEADS;
+ geo->sectors = PD_LOG_SECTS;
+ geo->cylinders = disk->capacity / (geo->heads * geo->sectors);
+ } else {
+ geo->heads = disk->heads;
+ geo->sectors = disk->sectors;
+ geo->cylinders = disk->cylinders;
+ }
+
+ return 0;
+}
+
static int pd_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct pd_unit *disk = inode->i_bdev->bd_disk->private_data;
- struct hd_geometry __user *geo = (struct hd_geometry __user *) arg;
- struct hd_geometry g;
switch (cmd) {
case CDROMEJECT:
if (disk->access == 1)
pd_special_command(disk, pd_eject);
return 0;
- case HDIO_GETGEO:
- if (disk->alt_geom) {
- g.heads = PD_LOG_HEADS;
- g.sectors = PD_LOG_SECTS;
- g.cylinders = disk->capacity / (g.heads * g.sectors);
- } else {
- g.heads = disk->heads;
- g.sectors = disk->sectors;
- g.cylinders = disk->cylinders;
- }
- g.start = get_start_sect(inode->i_bdev);
- if (copy_to_user(geo, &g, sizeof(struct hd_geometry)))
- return -EFAULT;
- return 0;
default:
return -EINVAL;
}
@@ -815,6 +816,7 @@ static struct block_device_operations pd_fops = {
.open = pd_open,
.release = pd_release,
.ioctl = pd_ioctl,
+ .getgeo = pd_getgeo,
.media_changed = pd_check_media,
.revalidate_disk= pd_revalidate
};
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index e9746af29b9..852b564e903 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -205,6 +205,7 @@ static int pf_open(struct inode *inode, struct file *file);
static void do_pf_request(request_queue_t * q);
static int pf_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
+static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static int pf_release(struct inode *inode, struct file *file);
@@ -266,6 +267,7 @@ static struct block_device_operations pf_fops = {
.open = pf_open,
.release = pf_release,
.ioctl = pf_ioctl,
+ .getgeo = pf_getgeo,
.media_changed = pf_check_media,
};
@@ -313,34 +315,34 @@ static int pf_open(struct inode *inode, struct file *file)
return 0;
}
-static int pf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct pf_unit *pf = inode->i_bdev->bd_disk->private_data;
- struct hd_geometry __user *geo = (struct hd_geometry __user *) arg;
- struct hd_geometry g;
- sector_t capacity;
-
- if (cmd == CDROMEJECT) {
- if (pf->access == 1) {
- pf_eject(pf);
- return 0;
- }
- return -EBUSY;
- }
- if (cmd != HDIO_GETGEO)
- return -EINVAL;
- capacity = get_capacity(pf->disk);
+ struct pf_unit *pf = bdev->bd_disk->private_data;
+ sector_t capacity = get_capacity(pf->disk);
+
if (capacity < PF_FD_MAX) {
- g.cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT);
- g.heads = PF_FD_HDS;
- g.sectors = PF_FD_SPT;
+ geo->cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT);
+ geo->heads = PF_FD_HDS;
+ geo->sectors = PF_FD_SPT;
} else {
- g.cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT);
- g.heads = PF_HD_HDS;
- g.sectors = PF_HD_SPT;
+ geo->cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT);
+ geo->heads = PF_HD_HDS;
+ geo->sectors = PF_HD_SPT;
}
- if (copy_to_user(geo, &g, sizeof(g)))
- return -EFAULT;
+
+ return 0;
+}
+
+static int pf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct pf_unit *pf = inode->i_bdev->bd_disk->private_data;
+
+ if (cmd != CDROMEJECT)
+ return -EINVAL;
+
+ if (pf->access != 1)
+ return -EBUSY;
+ pf_eject(pf);
return 0;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index c0233efabeb..51b7a5c5b77 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1955,9 +1955,12 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
if ((ret = blkdev_get(pd->bdev, FMODE_READ, O_RDONLY)))
goto out;
+ if ((ret = bd_claim(pd->bdev, pd)))
+ goto out_putdev;
+
if ((ret = pkt_get_last_written(pd, &lba))) {
printk("pktcdvd: pkt_get_last_written failed\n");
- goto out_putdev;
+ goto out_unclaim;
}
set_capacity(pd->disk, lba << 2);
@@ -1967,7 +1970,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
q = bdev_get_queue(pd->bdev);
if (write) {
if ((ret = pkt_open_write(pd)))
- goto out_putdev;
+ goto out_unclaim;
/*
* Some CDRW drives can not handle writes larger than one packet,
* even if the size is a multiple of the packet size.
@@ -1982,13 +1985,15 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
}
if ((ret = pkt_set_segment_merging(pd, q)))
- goto out_putdev;
+ goto out_unclaim;
if (write)
printk("pktcdvd: %lukB available on disc\n", lba << 1);
return 0;
+out_unclaim:
+ bd_release(pd->bdev);
out_putdev:
blkdev_put(pd->bdev);
out:
@@ -2007,6 +2012,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+ bd_release(pd->bdev);
blkdev_put(pd->bdev);
}
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
index 29d1518be72..43415f69839 100644
--- a/drivers/block/ps2esdi.c
+++ b/drivers/block/ps2esdi.c
@@ -81,8 +81,7 @@ static void (*current_int_handler) (u_int) = NULL;
static void ps2esdi_normal_interrupt_handler(u_int);
static void ps2esdi_initial_reset_int_handler(u_int);
static void ps2esdi_geometry_int_handler(u_int);
-static int ps2esdi_ioctl(struct inode *inode, struct file *file,
- u_int cmd, u_long arg);
+static int ps2esdi_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static int ps2esdi_read_status_words(int num_words, int max_words, u_short * buffer);
@@ -132,7 +131,7 @@ static struct ps2esdi_i_struct ps2esdi_info[MAX_HD] =
static struct block_device_operations ps2esdi_fops =
{
.owner = THIS_MODULE,
- .ioctl = ps2esdi_ioctl,
+ .getgeo = ps2esdi_getgeo,
};
static struct gendisk *ps2esdi_gendisk[2];
@@ -1058,21 +1057,13 @@ static void dump_cmd_complete_status(u_int int_ret_code)
}
-static int ps2esdi_ioctl(struct inode *inode,
- struct file *file, u_int cmd, u_long arg)
+static int ps2esdi_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct ps2esdi_i_struct *p = inode->i_bdev->bd_disk->private_data;
- struct ps2esdi_geometry geom;
-
- if (cmd != HDIO_GETGEO)
- return -EINVAL;
- memset(&geom, 0, sizeof(geom));
- geom.heads = p->head;
- geom.sectors = p->sect;
- geom.cylinders = p->cyl;
- geom.start = get_start_sect(inode->i_bdev);
- if (copy_to_user((void __user *)arg, &geom, sizeof(geom)))
- return -EFAULT;
+ struct ps2esdi_i_struct *p = bdev->bd_disk->private_data;
+
+ geo->heads = p->head;
+ geo->sectors = p->sect;
+ geo->cylinders = p->cyl;
return 0;
}
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 68c60a5bcda..ffd6abd6d5a 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -154,7 +154,7 @@ static int ramdisk_commit_write(struct file *file, struct page *page,
/*
* ->writepage to the the blockdev's mapping has to redirty the page so that the
- * VM doesn't go and steal it. We return WRITEPAGE_ACTIVATE so that the VM
+ * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM
* won't try to (pointlessly) write the page again for a while.
*
* Really, these pages should not be on the LRU at all.
@@ -165,7 +165,7 @@ static int ramdisk_writepage(struct page *page, struct writeback_control *wbc)
make_page_uptodate(page);
SetPageDirty(page);
if (wbc->for_reclaim)
- return WRITEPAGE_ACTIVATE;
+ return AOP_WRITEPAGE_ACTIVATE;
unlock_page(page);
return 0;
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index af7cb2bfd67..01f042f6f1c 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1083,23 +1083,33 @@ static int swim3_add_device(struct device_node *swim)
{
struct device_node *mediabay;
struct floppy_state *fs = &floppy_states[floppy_count];
+ struct resource res_reg, res_dma;
- if (swim->n_addrs < 2)
- {
- printk(KERN_INFO "swim3: expecting 2 addrs (n_addrs:%d, n_intrs:%d)\n",
- swim->n_addrs, swim->n_intrs);
+ if (of_address_to_resource(swim, 0, &res_reg) ||
+ of_address_to_resource(swim, 1, &res_dma)) {
+ printk(KERN_ERR "swim3: Can't get addresses\n");
return -EINVAL;
}
-
- if (swim->n_intrs < 2)
- {
- printk(KERN_INFO "swim3: expecting 2 intrs (n_addrs:%d, n_intrs:%d)\n",
- swim->n_addrs, swim->n_intrs);
+ if (request_mem_region(res_reg.start, res_reg.end - res_reg.start + 1,
+ " (reg)") == NULL) {
+ printk(KERN_ERR "swim3: Can't request register space\n");
+ return -EINVAL;
+ }
+ if (request_mem_region(res_dma.start, res_dma.end - res_dma.start + 1,
+ " (dma)") == NULL) {
+ release_mem_region(res_reg.start,
+ res_reg.end - res_reg.start + 1);
+ printk(KERN_ERR "swim3: Can't request DMA space\n");
return -EINVAL;
}
- if (!request_OF_resource(swim, 0, NULL)) {
- printk(KERN_INFO "swim3: can't request IO resource !\n");
+ if (swim->n_intrs < 2) {
+ printk(KERN_INFO "swim3: expecting 2 intrs (n_intrs:%d)\n",
+ swim->n_intrs);
+ release_mem_region(res_reg.start,
+ res_reg.end - res_reg.start + 1);
+ release_mem_region(res_dma.start,
+ res_dma.end - res_dma.start + 1);
return -EINVAL;
}
@@ -1110,10 +1120,8 @@ static int swim3_add_device(struct device_node *swim)
memset(fs, 0, sizeof(*fs));
spin_lock_init(&fs->lock);
fs->state = idle;
- fs->swim3 = (struct swim3 __iomem *)
- ioremap(swim->addrs[0].address, 0x200);
- fs->dma = (struct dbdma_regs __iomem *)
- ioremap(swim->addrs[1].address, 0x200);
+ fs->swim3 = (struct swim3 __iomem *)ioremap(res_reg.start, 0x200);
+ fs->dma = (struct dbdma_regs __iomem *)ioremap(res_dma.start, 0x200);
fs->swim3_intr = swim->intrs[0].line;
fs->dma_intr = swim->intrs[1].line;
fs->cur_cyl = -1;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 1ded3b43345..2ae08b343b9 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -27,8 +27,8 @@
#include <linux/time.h>
#include <linux/hdreg.h>
#include <linux/dma-mapping.h>
+#include <linux/completion.h>
#include <asm/io.h>
-#include <asm/semaphore.h>
#include <asm/uaccess.h>
#if 0
@@ -303,7 +303,7 @@ struct carm_host {
struct work_struct fsm_task;
- struct semaphore probe_sem;
+ struct completion probe_comp;
};
struct carm_response {
@@ -407,8 +407,7 @@ struct carm_array_info {
static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static void carm_remove_one (struct pci_dev *pdev);
-static int carm_bdev_ioctl(struct inode *ino, struct file *fil,
- unsigned int cmd, unsigned long arg);
+static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static struct pci_device_id carm_pci_tbl[] = {
{ PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
@@ -426,7 +425,7 @@ static struct pci_driver carm_driver = {
static struct block_device_operations carm_bd_ops = {
.owner = THIS_MODULE,
- .ioctl = carm_bdev_ioctl,
+ .getgeo = carm_bdev_getgeo,
};
static unsigned int carm_host_id;
@@ -434,32 +433,14 @@ static unsigned long carm_major_alloc;
-static int carm_bdev_ioctl(struct inode *ino, struct file *fil,
- unsigned int cmd, unsigned long arg)
+static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- void __user *usermem = (void __user *) arg;
- struct carm_port *port = ino->i_bdev->bd_disk->private_data;
- struct hd_geometry geom;
+ struct carm_port *port = bdev->bd_disk->private_data;
- switch (cmd) {
- case HDIO_GETGEO:
- if (!usermem)
- return -EINVAL;
-
- geom.heads = (u8) port->dev_geom_head;
- geom.sectors = (u8) port->dev_geom_sect;
- geom.cylinders = port->dev_geom_cyl;
- geom.start = get_start_sect(ino->i_bdev);
-
- if (copy_to_user(usermem, &geom, sizeof(geom)))
- return -EFAULT;
- return 0;
-
- default:
- break;
- }
-
- return -EOPNOTSUPP;
+ geo->heads = (u8) port->dev_geom_head;
+ geo->sectors = (u8) port->dev_geom_sect;
+ geo->cylinders = port->dev_geom_cyl;
+ return 0;
}
static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
@@ -770,7 +751,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
assert(rc == 0);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
rc = carm_put_request(host, crq);
assert(rc == 0);
@@ -1365,7 +1346,7 @@ static void carm_fsm_task (void *_data)
}
case HST_PROBE_FINISHED:
- up(&host->probe_sem);
+ complete(&host->probe_comp);
break;
case HST_ERROR:
@@ -1641,7 +1622,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
host->flags = pci_dac ? FL_DAC : 0;
spin_lock_init(&host->lock);
INIT_WORK(&host->fsm_task, carm_fsm_task, host);
- init_MUTEX_LOCKED(&host->probe_sem);
+ init_completion(&host->probe_comp);
for (i = 0; i < ARRAY_SIZE(host->req); i++)
host->req[i].tag = i;
@@ -1710,8 +1691,8 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_out_free_irq;
- DPRINTK("waiting for probe_sem\n");
- down(&host->probe_sem);
+ DPRINTK("waiting for probe_comp\n");
+ wait_for_completion(&host->probe_comp);
printk(KERN_INFO "%s: pci %s, ports %d, io %lx, irq %u, major %d\n",
host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
@@ -1774,7 +1755,7 @@ static void carm_remove_one (struct pci_dev *pdev)
static int __init carm_init(void)
{
- return pci_module_init(&carm_driver);
+ return pci_register_driver(&carm_driver);
}
static void __exit carm_exit(void)
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 10740a06508..a05fe5843e6 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -951,7 +951,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
static void ub_end_rq(struct request *rq, int uptodate)
{
end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
- end_that_request_last(rq);
+ end_that_request_last(rq, uptodate);
}
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 0f48301342d..a3614e6a68d 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -809,34 +809,23 @@ static int mm_revalidate(struct gendisk *disk)
set_capacity(disk, card->mm_size << 1);
return 0;
}
-/*
------------------------------------------------------------------------------------
--- mm_ioctl
------------------------------------------------------------------------------------
-*/
-static int mm_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
+
+static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- if (cmd == HDIO_GETGEO) {
- struct cardinfo *card = i->i_bdev->bd_disk->private_data;
- int size = card->mm_size * (1024 / MM_HARDSECT);
- struct hd_geometry geo;
- /*
- * get geometry: we have to fake one... trim the size to a
- * multiple of 2048 (1M): tell we have 32 sectors, 64 heads,
- * whatever cylinders.
- */
- geo.heads = 64;
- geo.sectors = 32;
- geo.start = get_start_sect(i->i_bdev);
- geo.cylinders = size / (geo.heads * geo.sectors);
-
- if (copy_to_user((void __user *) arg, &geo, sizeof(geo)))
- return -EFAULT;
- return 0;
- }
+ struct cardinfo *card = bdev->bd_disk->private_data;
+ int size = card->mm_size * (1024 / MM_HARDSECT);
- return -EINVAL;
+ /*
+ * get geometry: we have to fake one... trim the size to a
+ * multiple of 2048 (1M): tell we have 32 sectors, 64 heads,
+ * whatever cylinders.
+ */
+ geo->heads = 64;
+ geo->sectors = 32;
+ geo->cylinders = size / (geo->heads * geo->sectors);
+ return 0;
}
+
/*
-----------------------------------------------------------------------------------
-- mm_check_change
@@ -855,7 +844,7 @@ static int mm_check_change(struct gendisk *disk)
*/
static struct block_device_operations mm_fops = {
.owner = THIS_MODULE,
- .ioctl = mm_ioctl,
+ .getgeo = mm_getgeo,
.revalidate_disk= mm_revalidate,
.media_changed = mm_check_change,
};
@@ -1185,7 +1174,7 @@ static int __init mm_init(void)
printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n");
- retval = pci_module_init(&mm_pci_driver);
+ retval = pci_register_driver(&mm_pci_driver);
if (retval)
return -ENOMEM;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 2d518aa2720..f63e07bd9f9 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -247,43 +247,17 @@ static int viodasd_release(struct inode *ino, struct file *fil)
/* External ioctl entry point.
*/
-static int viodasd_ioctl(struct inode *ino, struct file *fil,
- unsigned int cmd, unsigned long arg)
+static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- unsigned char sectors;
- unsigned char heads;
- unsigned short cylinders;
- struct hd_geometry *geo;
- struct gendisk *gendisk;
- struct viodasd_device *d;
+ struct gendisk *disk = bdev->bd_disk;
+ struct viodasd_device *d = disk->private_data;
- switch (cmd) {
- case HDIO_GETGEO:
- geo = (struct hd_geometry *)arg;
- if (geo == NULL)
- return -EINVAL;
- if (!access_ok(VERIFY_WRITE, geo, sizeof(*geo)))
- return -EFAULT;
- gendisk = ino->i_bdev->bd_disk;
- d = gendisk->private_data;
- sectors = d->sectors;
- if (sectors == 0)
- sectors = 32;
- heads = d->tracks;
- if (heads == 0)
- heads = 64;
- cylinders = d->cylinders;
- if (cylinders == 0)
- cylinders = get_capacity(gendisk) / (sectors * heads);
- if (__put_user(sectors, &geo->sectors) ||
- __put_user(heads, &geo->heads) ||
- __put_user(cylinders, &geo->cylinders) ||
- __put_user(get_start_sect(ino->i_bdev), &geo->start))
- return -EFAULT;
- return 0;
- }
+ geo->sectors = d->sectors ? d->sectors : 0;
+ geo->heads = d->tracks ? d->tracks : 64;
+ geo->cylinders = d->cylinders ? d->cylinders :
+ get_capacity(disk) / (geo->cylinders * geo->heads);
- return -EINVAL;
+ return 0;
}
/*
@@ -293,7 +267,7 @@ static struct block_device_operations viodasd_fops = {
.owner = THIS_MODULE,
.open = viodasd_open,
.release = viodasd_release,
- .ioctl = viodasd_ioctl,
+ .getgeo = viodasd_getgeo,
};
/*
@@ -305,7 +279,7 @@ static void viodasd_end_request(struct request *req, int uptodate,
if (end_that_request_first(req, uptodate, num_sectors))
return;
add_disk_randomness(req->rq_disk);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
}
/*
@@ -319,6 +293,7 @@ static int send_request(struct request *req)
u16 viocmd;
HvLpEvent_Rc hvrc;
struct vioblocklpevent *bevent;
+ struct HvLpEvent *hev;
struct scatterlist sg[VIOMAXBLOCKDMA];
int sgindex;
int statindex;
@@ -373,22 +348,19 @@ static int send_request(struct request *req)
* token so we can match the response up later
*/
memset(bevent, 0, sizeof(struct vioblocklpevent));
- bevent->event.xFlags.xValid = 1;
- bevent->event.xFlags.xFunction = HvLpEvent_Function_Int;
- bevent->event.xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
- bevent->event.xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
- bevent->event.xType = HvLpEvent_Type_VirtualIo;
- bevent->event.xSubtype = viocmd;
- bevent->event.xSourceLp = HvLpConfig_getLpIndex();
- bevent->event.xTargetLp = viopath_hostLp;
- bevent->event.xSizeMinus1 =
+ hev = &bevent->event;
+ hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK |
+ HV_LP_EVENT_INT;
+ hev->xType = HvLpEvent_Type_VirtualIo;
+ hev->xSubtype = viocmd;
+ hev->xSourceLp = HvLpConfig_getLpIndex();
+ hev->xTargetLp = viopath_hostLp;
+ hev->xSizeMinus1 =
offsetof(struct vioblocklpevent, u.rw_data.dma_info) +
(sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1;
- bevent->event.xSourceInstanceId =
- viopath_sourceinst(viopath_hostLp);
- bevent->event.xTargetInstanceId =
- viopath_targetinst(viopath_hostLp);
- bevent->event.xCorrelationToken = (u64)req;
+ hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp);
+ hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp);
+ hev->xCorrelationToken = (u64)req;
bevent->version = VIOVERSION;
bevent->disk = DEVICE_NO(d);
bevent->u.rw_data.offset = start;
@@ -675,10 +647,10 @@ static void handle_block_event(struct HvLpEvent *event)
/* Notification that a partition went away! */
return;
/* First, we should NEVER get an int here...only acks */
- if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+ if (hvlpevent_is_int(event)) {
printk(VIOD_KERN_WARNING
"Yikes! got an int in viodasd event handler!\n");
- if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
+ if (hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
@@ -721,7 +693,7 @@ static void handle_block_event(struct HvLpEvent *event)
default:
printk(VIOD_KERN_WARNING "invalid subtype!");
- if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
+ if (hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 68b6d7b154c..cbce7c5e944 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -128,9 +128,12 @@ static DEFINE_SPINLOCK(xd_lock);
static struct gendisk *xd_gendisk[2];
+static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
+
static struct block_device_operations xd_fops = {
.owner = THIS_MODULE,
.ioctl = xd_ioctl,
+ .getgeo = xd_getgeo,
};
static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
static u_char xd_drives, xd_irq = 5, xd_dma = 3, xd_maxsectors;
@@ -276,11 +279,11 @@ static u_char __init xd_detect (u_char *controller, unsigned int *address)
return(1);
}
- for (i = 0; i < (sizeof(xd_bases) / sizeof(xd_bases[0])); i++) {
+ for (i = 0; i < ARRAY_SIZE(xd_bases); i++) {
void __iomem *p = ioremap(xd_bases[i], 0x2000);
if (!p)
continue;
- for (j = 1; j < (sizeof(xd_sigs) / sizeof(xd_sigs[0])); j++) {
+ for (j = 1; j < ARRAY_SIZE(xd_sigs); j++) {
const char *s = xd_sigs[j].string;
if (check_signature(p + xd_sigs[j].offset, s, strlen(s))) {
*controller = j;
@@ -330,22 +333,20 @@ static void do_xd_request (request_queue_t * q)
}
}
+static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ XD_INFO *p = bdev->bd_disk->private_data;
+
+ geo->heads = p->heads;
+ geo->sectors = p->sectors;
+ geo->cylinders = p->cylinders;
+ return 0;
+}
+
/* xd_ioctl: handle device ioctl's */
static int xd_ioctl (struct inode *inode,struct file *file,u_int cmd,u_long arg)
{
- XD_INFO *p = inode->i_bdev->bd_disk->private_data;
-
switch (cmd) {
- case HDIO_GETGEO:
- {
- struct hd_geometry g;
- struct hd_geometry __user *geom= (void __user *)arg;
- g.heads = p->heads;
- g.sectors = p->sectors;
- g.cylinders = p->cylinders;
- g.start = get_start_sect(inode->i_bdev);
- return copy_to_user(geom, &g, sizeof(g)) ? -EFAULT : 0;
- }
case HDIO_SET_DMA:
if (!capable(CAP_SYS_ADMIN)) return -EACCES;
if (xdc_busy) return -EBUSY;
@@ -1017,7 +1018,7 @@ static void __init do_xd_setup (int *integers)
case 2: if ((integers[2] > 0) && (integers[2] < 16))
xd_irq = integers[2];
case 1: xd_override = 1;
- if ((integers[1] >= 0) && (integers[1] < (sizeof(xd_sigs) / sizeof(xd_sigs[0]))))
+ if ((integers[1] >= 0) && (integers[1] < ARRAY_SIZE(xd_sigs)))
xd_type = integers[1];
case 0: break;
default:printk("xd: too many parameters for xd\n");
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 573ff6c1be5..613673b12fa 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -279,6 +279,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
tty->disc_data = hu;
hu->tty = tty;
+ tty->receive_room = 65536;
spin_lock_init(&hu->rx_lock);
@@ -348,20 +349,6 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
hci_uart_tx_wakeup(hu);
}
-/* hci_uart_tty_room()
- *
- * Callback function from tty driver. Return the amount of
- * space left in the receiver's buffer to decide if remote
- * transmitter is to be throttled.
- *
- * Arguments: tty pointer to associated tty instance data
- * Return Value: number of bytes left in receive buffer
- */
-static int hci_uart_tty_room (struct tty_struct *tty)
-{
- return 65536;
-}
-
/* hci_uart_tty_receive()
*
* Called by tty low level driver when receive data is
@@ -544,7 +531,6 @@ static int __init hci_uart_init(void)
hci_uart_ldisc.write = hci_uart_tty_write;
hci_uart_ldisc.ioctl = hci_uart_tty_ioctl;
hci_uart_ldisc.poll = hci_uart_tty_poll;
- hci_uart_ldisc.receive_room = hci_uart_tty_room;
hci_uart_ldisc.receive_buf = hci_uart_tty_receive;
hci_uart_ldisc.write_wakeup = hci_uart_tty_wakeup;
hci_uart_ldisc.owner = THIS_MODULE;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 15396034841..879bbc26ce9 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1131,7 +1131,7 @@ int open_for_data(struct cdrom_device_info * cdi)
This ensures that the drive gets unlocked after a mount fails. This
is a goto to avoid bloating the driver with redundant code. */
clean_up_and_return:
- cdinfo(CD_WARNING, "open failed.\n");
+ cdinfo(CD_OPEN, "open failed.\n");
if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
cdo->lock_door(cdi, 0);
cdinfo(CD_OPEN, "door unlocked.\n");
diff --git a/drivers/cdrom/cdu31a.c b/drivers/cdrom/cdu31a.c
index ac96de15d83..378e88d2075 100644
--- a/drivers/cdrom/cdu31a.c
+++ b/drivers/cdrom/cdu31a.c
@@ -1402,7 +1402,7 @@ static void do_cdu31a_request(request_queue_t * q)
if (!end_that_request_first(req, 1, nblock)) {
spin_lock_irq(q->queue_lock);
blkdev_dequeue_request(req);
- end_that_request_last(req);
+ end_that_request_last(req, 1);
spin_unlock_irq(q->queue_lock);
}
continue;
diff --git a/drivers/cdrom/cm206.c b/drivers/cdrom/cm206.c
index 01f03517332..ce127f7ec0f 100644
--- a/drivers/cdrom/cm206.c
+++ b/drivers/cdrom/cm206.c
@@ -32,7 +32,7 @@ History:
18 mrt 1995: 0.24 Working background read-ahead. (still problems)
26 mrt 1995: 0.25 Multi-session ioctl added (kernel v1.2).
Statistics implemented, though separate stats206.h.
- Accessible trough ioctl 0x1000 (just a number).
+ Accessible through ioctl 0x1000 (just a number).
Hard to choose between v1.2 development and 1.1.75.
Bottom-half doesn't work with 1.2...
0.25a: fixed... typo. Still problems...
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index b5191780ecc..193446e6a08 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -542,10 +542,10 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
/* Notification that a partition went away! */
return;
/* First, we should NEVER get an int here...only acks */
- if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+ if (hvlpevent_is_int(event)) {
printk(VIOCD_KERN_WARNING
"Yikes! got an int in viocd event handler!\n");
- if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
+ if (hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
@@ -616,7 +616,7 @@ return_complete:
printk(VIOCD_KERN_WARNING
"message with invalid subtype %0x04X!\n",
event->xSubtype & VIOMINOR_SUBTYPE_MASK);
- if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
+ if (hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 84e68cdd451..d6fcd0a36f9 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -80,7 +80,7 @@ config SERIAL_NONSTANDARD
config COMPUTONE
tristate "Computone IntelliPort Plus serial support"
- depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ depends on SERIAL_NONSTANDARD
---help---
This driver supports the entire family of Intelliport II/Plus
controllers with the exception of the MicroChannel controllers and
@@ -153,7 +153,7 @@ config DIGIEPCA
config ESPSERIAL
tristate "Hayes ESP serial port support"
- depends on SERIAL_NONSTANDARD && ISA && BROKEN_ON_SMP && ISA_DMA_API
+ depends on SERIAL_NONSTANDARD && ISA && ISA_DMA_API
help
This is a driver which supports Hayes ESP serial ports. Both single
port cards and multiport cards are supported. Make sure to read
@@ -166,7 +166,7 @@ config ESPSERIAL
config MOXA_INTELLIO
tristate "Moxa Intellio support"
- depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ depends on SERIAL_NONSTANDARD
help
Say Y here if you have a Moxa Intellio multiport serial card.
@@ -220,6 +220,14 @@ config SYNCLINKMP
The module will be called synclinkmp. If you want to do that, say M
here.
+config SYNCLINK_GT
+ tristate "SyncLink GT/AC support"
+ depends on SERIAL_NONSTANDARD
+ help
+ Support for SyncLink GT and SyncLink AC families of
+ synchronous and asynchronous serial adapters
+ manufactured by Microgate Systems, Ltd. (www.microgate.com)
+
config N_HDLC
tristate "HDLC line discipline support"
depends on SERIAL_NONSTANDARD
@@ -282,7 +290,7 @@ config SX
config RIO
tristate "Specialix RIO system support"
- depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP && !64BIT
help
This is a driver for the Specialix RIO, a smart serial card which
drives an outboard box that can support up to 128 ports. Product
@@ -687,7 +695,7 @@ config NVRAM
config RTC
tristate "Enhanced Real Time Clock Support"
- depends on !PPC32 && !PARISC && !IA64 && !M68K && (!SPARC || PCI)
+ depends on !PPC32 && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -735,7 +743,7 @@ config SGI_IP27_RTC
config GEN_RTC
tristate "Generic /dev/rtc emulation"
- depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC
+ depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC && !FRV
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -928,6 +936,15 @@ config SCx200_GPIO
If compiled as a module, it will be called scx200_gpio.
+config CS5535_GPIO
+ tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
+ depends on X86_32
+ help
+ Give userspace access to the GPIO pins on the AMD CS5535 and
+ CS5536 Geode companion devices.
+
+ If compiled as a module, it will be called cs5535_gpio.
+
config GPIO_VR41XX
tristate "NEC VR4100 series General-purpose I/O Unit support"
depends on CPU_VR41XX
@@ -985,7 +1002,7 @@ config HPET_MMAP
config HANGCHECK_TIMER
tristate "Hangcheck timer"
- depends on X86 || IA64 || PPC64 || ARCH_S390
+ depends on X86 || IA64 || PPC64 || S390
help
The hangcheck-timer module detects when the system has gone
out to lunch past a certain margin. It can reboot the system
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 4aeae687e88..503dd901d40 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_RISCOM8) += riscom8.o
obj-$(CONFIG_ISI) += isicom.o
obj-$(CONFIG_SYNCLINK) += synclink.o
obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
+obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_N_HDLC) += n_hdlc.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
@@ -80,6 +81,7 @@ obj-$(CONFIG_PPDEV) += ppdev.o
obj-$(CONFIG_NWBUTTON) += nwbutton.o
obj-$(CONFIG_NWFLASH) += nwflash.o
obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
+obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_TANBAC_TB0219) += tb0219.o
obj-$(CONFIG_TELCLOCK) += tlclk.o
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 3f8f7fa6b0f..268f78d926d 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/jiffies.h>
#include <linux/agp_backend.h>
#include "agp.h"
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index a124f8c5d06..667a21c72ed 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -99,6 +99,7 @@ static char *serial_version = "4.30";
#define _INLINE_ inline
#endif
+#define custom amiga_custom
static char *serial_name = "Amiga-builtin serial driver";
static struct tty_driver *serial_driver;
@@ -116,7 +117,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
static struct serial_state rs_table[1];
-#define NR_PORTS (sizeof(rs_table)/sizeof(struct serial_state))
+#define NR_PORTS ARRAY_SIZE(rs_table)
/*
* tmp_buf is used as a temporary buffer by serial_write. We need to
@@ -265,8 +266,9 @@ static _INLINE_ void receive_chars(struct async_struct *info)
int status;
int serdatr;
struct tty_struct *tty = info->tty;
- unsigned char ch;
+ unsigned char ch, flag;
struct async_icount *icount;
+ int oe = 0;
icount = &info->state->icount;
@@ -282,15 +284,12 @@ static _INLINE_ void receive_chars(struct async_struct *info)
status |= UART_LSR_OE;
ch = serdatr & 0xff;
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- goto ignore_char;
- *tty->flip.char_buf_ptr = ch;
icount->rx++;
#ifdef SERIAL_DEBUG_INTR
printk("DR%02x:%02x...", ch, status);
#endif
- *tty->flip.flag_buf_ptr = 0;
+ flag = TTY_NORMAL;
/*
* We don't handle parity or frame errors - but I have left
@@ -319,7 +318,7 @@ static _INLINE_ void receive_chars(struct async_struct *info)
* should be ignored.
*/
if (status & info->ignore_status_mask)
- goto ignore_char;
+ goto out;
status &= info->read_status_mask;
@@ -327,33 +326,28 @@ static _INLINE_ void receive_chars(struct async_struct *info)
#ifdef SERIAL_DEBUG_INTR
printk("handling break....");
#endif
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ flag = TTY_BREAK;
if (info->flags & ASYNC_SAK)
do_SAK(tty);
} else if (status & UART_LSR_PE)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (status & UART_LSR_FE)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
+ flag = TTY_FRAME;
if (status & UART_LSR_OE) {
/*
* Overrun is special, since it's
* reported immediately, and doesn't
* affect the current character
*/
- if (tty->flip.count < TTY_FLIPBUF_SIZE) {
- tty->flip.count++;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- }
+ oe = 1;
}
}
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- ignore_char:
-
+ tty_insert_flip_char(tty, ch, flag);
+ if (oe == 1)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
tty_flip_buffer_push(tty);
+out:
+ return;
}
static _INLINE_ void transmit_chars(struct async_struct *info)
@@ -1095,7 +1089,7 @@ static void rs_unthrottle(struct tty_struct * tty)
*/
static int get_serial_info(struct async_struct * info,
- struct serial_struct * retinfo)
+ struct serial_struct __user * retinfo)
{
struct serial_struct tmp;
struct serial_state *state = info->state;
@@ -1119,7 +1113,7 @@ static int get_serial_info(struct async_struct * info,
}
static int set_serial_info(struct async_struct * info,
- struct serial_struct * new_info)
+ struct serial_struct __user * new_info)
{
struct serial_struct new_serial;
struct serial_state old_state, *state;
@@ -1200,7 +1194,7 @@ check_and_exit:
* transmit holding register is empty. This functionality
* allows an RS485 driver to be written in user space.
*/
-static int get_lsr_info(struct async_struct * info, unsigned int *value)
+static int get_lsr_info(struct async_struct * info, unsigned int __user *value)
{
unsigned char status;
unsigned int result;
@@ -1291,6 +1285,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
struct async_struct * info = (struct async_struct *)tty->driver_data;
struct async_icount cprev, cnow; /* kernel counter temps */
struct serial_icounter_struct icount;
+ void __user *argp = (void __user *)arg;
unsigned long flags;
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
@@ -1305,19 +1300,17 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
switch (cmd) {
case TIOCGSERIAL:
- return get_serial_info(info,
- (struct serial_struct *) arg);
+ return get_serial_info(info, argp);
case TIOCSSERIAL:
- return set_serial_info(info,
- (struct serial_struct *) arg);
+ return set_serial_info(info, argp);
case TIOCSERCONFIG:
return 0;
case TIOCSERGETLSR: /* Get line status register */
- return get_lsr_info(info, (unsigned int *) arg);
+ return get_lsr_info(info, argp);
case TIOCSERGSTRUCT:
- if (copy_to_user((struct async_struct *) arg,
+ if (copy_to_user(argp,
info, sizeof(struct async_struct)))
return -EFAULT;
return 0;
@@ -1376,7 +1369,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
icount.brk = cnow.brk;
icount.buf_overrun = cnow.buf_overrun;
- if (copy_to_user((void *)arg, &icount, sizeof(icount)))
+ if (copy_to_user(argp, &icount, sizeof(icount)))
return -EFAULT;
return 0;
case TIOCSERGWILD:
diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c
new file mode 100644
index 00000000000..5d72f50de1a
--- /dev/null
+++ b/drivers/char/cs5535_gpio.c
@@ -0,0 +1,250 @@
+/*
+ * AMD CS5535/CS5536 GPIO driver.
+ * Allows a user space process to play with the GPIO pins.
+ *
+ * Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+
+#define NAME "cs5535_gpio"
+
+MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
+MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO Pin Driver");
+MODULE_LICENSE("GPL");
+
+static int major;
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+static ulong mask;
+module_param(mask, ulong, 0);
+MODULE_PARM_DESC(mask, "GPIO channel mask");
+
+#define MSR_LBAR_GPIO 0x5140000C
+
+static u32 gpio_base;
+
+static struct pci_device_id divil_pci[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
+ { } /* NULL entry */
+};
+
+static struct cdev cs5535_gpio_cdev;
+
+/* reserve 32 entries even though some aren't usable */
+#define CS5535_GPIO_COUNT 32
+
+/* IO block size */
+#define CS5535_GPIO_SIZE 256
+
+struct gpio_regmap {
+ u32 rd_offset;
+ u32 wr_offset;
+ char on;
+ char off;
+};
+static struct gpio_regmap rm[] =
+{
+ { 0x30, 0x00, '1', '0' }, /* GPIOx_READ_BACK / GPIOx_OUT_VAL */
+ { 0x20, 0x20, 'I', 'i' }, /* GPIOx_IN_EN */
+ { 0x04, 0x04, 'O', 'o' }, /* GPIOx_OUT_EN */
+ { 0x08, 0x08, 't', 'T' }, /* GPIOx_OUT_OD_EN */
+ { 0x18, 0x18, 'P', 'p' }, /* GPIOx_OUT_PU_EN */
+ { 0x1c, 0x1c, 'D', 'd' }, /* GPIOx_OUT_PD_EN */
+};
+
+
+/**
+ * Gets the register offset for the GPIO bank.
+ * Low (0-15) starts at 0x00, high (16-31) starts at 0x80
+ */
+static inline u32 cs5535_lowhigh_base(int reg)
+{
+ return (reg & 0x10) << 3;
+}
+
+static ssize_t cs5535_gpio_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ u32 m = iminor(file->f_dentry->d_inode);
+ int i, j;
+ u32 base = gpio_base + cs5535_lowhigh_base(m);
+ u32 m0, m1;
+ char c;
+
+ /**
+ * Creates the mask for atomic bit programming.
+ * The high 16 bits and the low 16 bits are used to set the mask.
+ * For example, GPIO 15 maps to 31,15: 0,1 => On; 1,0=> Off
+ */
+ m1 = 1 << (m & 0x0F);
+ m0 = m1 << 16;
+
+ for (i = 0; i < len; ++i) {
+ if (get_user(c, data+i))
+ return -EFAULT;
+
+ for (j = 0; j < ARRAY_SIZE(rm); j++) {
+ if (c == rm[j].on) {
+ outl(m1, base + rm[j].wr_offset);
+ break;
+ } else if (c == rm[j].off) {
+ outl(m0, base + rm[j].wr_offset);
+ break;
+ }
+ }
+ }
+ *ppos = 0;
+ return len;
+}
+
+static ssize_t cs5535_gpio_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ u32 m = iminor(file->f_dentry->d_inode);
+ u32 base = gpio_base + cs5535_lowhigh_base(m);
+ int rd_bit = 1 << (m & 0x0f);
+ int i;
+ char ch;
+ ssize_t count = 0;
+
+ if (*ppos >= ARRAY_SIZE(rm))
+ return 0;
+
+ for (i = *ppos; (i < (*ppos + len)) && (i < ARRAY_SIZE(rm)); i++) {
+ ch = (inl(base + rm[i].rd_offset) & rd_bit) ?
+ rm[i].on : rm[i].off;
+
+ if (put_user(ch, buf+count))
+ return -EFAULT;
+
+ count++;
+ }
+
+ /* add a line-feed if there is room */
+ if ((i == ARRAY_SIZE(rm)) && (count < len)) {
+ put_user('\n', buf + count);
+ count++;
+ }
+
+ *ppos += count;
+ return count;
+}
+
+static int cs5535_gpio_open(struct inode *inode, struct file *file)
+{
+ u32 m = iminor(inode);
+
+ /* the mask says which pins are usable by this driver */
+ if ((mask & (1 << m)) == 0)
+ return -EINVAL;
+
+ return nonseekable_open(inode, file);
+}
+
+static struct file_operations cs5535_gpio_fops = {
+ .owner = THIS_MODULE,
+ .write = cs5535_gpio_write,
+ .read = cs5535_gpio_read,
+ .open = cs5535_gpio_open
+};
+
+static int __init cs5535_gpio_init(void)
+{
+ dev_t dev_id;
+ u32 low, hi;
+ int retval;
+
+ if (pci_dev_present(divil_pci) == 0) {
+ printk(KERN_WARNING NAME ": DIVIL not found\n");
+ return -ENODEV;
+ }
+
+ /* Grab the GPIO I/O range */
+ rdmsr(MSR_LBAR_GPIO, low, hi);
+
+ /* Check the mask and whether GPIO is enabled (sanity check) */
+ if (hi != 0x0000f001) {
+ printk(KERN_WARNING NAME ": GPIO not enabled\n");
+ return -ENODEV;
+ }
+
+ /* Mask off the IO base address */
+ gpio_base = low & 0x0000ff00;
+
+ /**
+ * Some GPIO pins
+ * 31-29,23 : reserved (always mask out)
+ * 28 : Power Button
+ * 26 : PME#
+ * 22-16 : LPC
+ * 14,15 : SMBus
+ * 9,8 : UART1
+ * 7 : PCI INTB
+ * 3,4 : UART2/DDC
+ * 2 : IDE_IRQ0
+ * 0 : PCI INTA
+ *
+ * If a mask was not specified, be conservative and only allow:
+ * 1,2,5,6,10-13,24,25,27
+ */
+ if (mask != 0)
+ mask &= 0x1f7fffff;
+ else
+ mask = 0x0b003c66;
+
+ if (request_region(gpio_base, CS5535_GPIO_SIZE, NAME) == 0) {
+ printk(KERN_ERR NAME ": can't allocate I/O for GPIO\n");
+ return -ENODEV;
+ }
+
+ if (major) {
+ dev_id = MKDEV(major, 0);
+ retval = register_chrdev_region(dev_id, CS5535_GPIO_COUNT,
+ NAME);
+ } else {
+ retval = alloc_chrdev_region(&dev_id, 0, CS5535_GPIO_COUNT,
+ NAME);
+ major = MAJOR(dev_id);
+ }
+
+ if (retval) {
+ release_region(gpio_base, CS5535_GPIO_SIZE);
+ return -1;
+ }
+
+ printk(KERN_DEBUG NAME ": base=%#x mask=%#lx major=%d\n",
+ gpio_base, mask, major);
+
+ cdev_init(&cs5535_gpio_cdev, &cs5535_gpio_fops);
+ cdev_add(&cs5535_gpio_cdev, dev_id, CS5535_GPIO_COUNT);
+
+ return 0;
+}
+
+static void __exit cs5535_gpio_cleanup(void)
+{
+ dev_t dev_id = MKDEV(major, 0);
+ unregister_chrdev_region(dev_id, CS5535_GPIO_COUNT);
+ if (gpio_base != 0)
+ release_region(gpio_base, CS5535_GPIO_SIZE);
+}
+
+module_init(cs5535_gpio_init);
+module_exit(cs5535_gpio_cleanup);
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index c7f818cd7b0..39c61a71176 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -641,6 +641,7 @@ static char rcsid[] =
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/major.h>
#include <linux/string.h>
@@ -723,7 +724,7 @@ static unsigned int cy_isa_addresses[] = {
0xDE000,
0,0,0,0,0,0,0,0
};
-#define NR_ISA_ADDRS (sizeof(cy_isa_addresses)/sizeof(unsigned char*))
+#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses)
#ifdef MODULE
static long maddr[NR_CARDS] = { 0, };
@@ -1086,7 +1087,7 @@ cyy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
int had_work;
int mdm_change;
int mdm_status;
-
+ int len;
if((cinfo = (struct cyclades_card *)dev_id) == 0){
#ifdef CY_DEBUG_INTERRUPTS
printk("cyy_interrupt: spurious interrupt %d\n\r", irq);
@@ -1163,63 +1164,43 @@ cyy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
info->icount.rx++;
continue;
}
- if (tty->flip.count < TTY_FLIPBUF_SIZE){
- tty->flip.count++;
+ if (tty_buffer_request_room(tty, 1)) {
if (data & info->read_status_mask){
if(data & CyBREAK){
- *tty->flip.flag_buf_ptr++ =
- TTY_BREAK;
- *tty->flip.char_buf_ptr++ =
- cy_readb(base_addr+(CyRDSR<<index));
+ tty_insert_flip_char(tty, cy_readb(base_addr+(CyRDSR<<index)), TTY_BREAK);
info->icount.rx++;
if (info->flags & ASYNC_SAK){
do_SAK(tty);
}
}else if(data & CyFRAME){
- *tty->flip.flag_buf_ptr++ =
- TTY_FRAME;
- *tty->flip.char_buf_ptr++ =
- cy_readb(base_addr+(CyRDSR<<index));
+ tty_insert_flip_char(tty, cy_readb(base_addr+(CyRDSR<<index)), TTY_FRAME);
info->icount.rx++;
info->idle_stats.frame_errs++;
}else if(data & CyPARITY){
- *tty->flip.flag_buf_ptr++ =
- TTY_PARITY;
- *tty->flip.char_buf_ptr++ =
- cy_readb(base_addr+(CyRDSR<<index));
+ /* Pieces of seven... */
+ tty_insert_flip_char(tty, cy_readb(base_addr+(CyRDSR<<index)), TTY_PARITY);
info->icount.rx++;
info->idle_stats.parity_errs++;
}else if(data & CyOVERRUN){
- *tty->flip.flag_buf_ptr++ =
- TTY_OVERRUN;
- *tty->flip.char_buf_ptr++ = 0;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
info->icount.rx++;
/* If the flip buffer itself is
overflowing, we still lose
the next incoming character.
*/
- if(tty->flip.count
- < TTY_FLIPBUF_SIZE){
- tty->flip.count++;
- *tty->flip.flag_buf_ptr++ =
- TTY_NORMAL;
- *tty->flip.char_buf_ptr++ =
- cy_readb(base_addr+(CyRDSR<<index));
- info->icount.rx++;
- }
+ tty_insert_flip_char(tty, cy_readb(base_addr+(CyRDSR<<index)), TTY_FRAME);
+ info->icount.rx++;
info->idle_stats.overruns++;
/* These two conditions may imply */
/* a normal read should be done. */
/* }else if(data & CyTIMEOUT){ */
/* }else if(data & CySPECHAR){ */
- }else{
- *tty->flip.flag_buf_ptr++ = 0;
- *tty->flip.char_buf_ptr++ = 0;
- info->icount.rx++;
+ }else {
+ tty_insert_flip_char(tty, 0, TTY_NORMAL);
+ info->icount.rx++;
}
}else{
- *tty->flip.flag_buf_ptr++ = 0;
- *tty->flip.char_buf_ptr++ = 0;
+ tty_insert_flip_char(tty, 0, TTY_NORMAL);
info->icount.rx++;
}
}else{
@@ -1240,14 +1221,10 @@ cyy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
info->mon.char_max = char_count;
info->mon.char_last = char_count;
#endif
- while(char_count--){
- if (tty->flip.count >= TTY_FLIPBUF_SIZE){
- break;
- }
- tty->flip.count++;
+ len = tty_buffer_request_room(tty, char_count);
+ while(len--){
data = cy_readb(base_addr+(CyRDSR<<index));
- *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
- *tty->flip.char_buf_ptr++ = data;
+ tty_insert_flip_char(tty, data, TTY_NORMAL);
info->idle_stats.recv_bytes++;
info->icount.rx++;
#ifdef CY_16Y_HACK
@@ -1256,7 +1233,7 @@ cyy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
info->idle_stats.recv_idle = jiffies;
}
- schedule_delayed_work(&tty->flip.work, 1);
+ schedule_delayed_work(&tty->buf.work, 1);
}
/* end of service */
cy_writeb(base_addr+(CyRIR<<index), (save_xir & 0x3f));
@@ -1551,6 +1528,7 @@ cyz_handle_rx(struct cyclades_port *info,
struct cyclades_card *cinfo = &cy_card[info->card];
struct tty_struct *tty = info->tty;
volatile int char_count;
+ int len;
#ifdef BLOCKMOVE
int small_count;
#else
@@ -1606,18 +1584,11 @@ cyz_handle_rx(struct cyclades_port *info,
tty->flip.count += small_count;
}
#else
- while(char_count--){
- if (tty->flip.count >= N_TTY_BUF_SIZE - tty->read_cnt)
- break;
-
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- break;
-
+ len = tty_buffer_request_room(tty, char_count);
+ while(len--){
data = cy_readb(cinfo->base_addr + rx_bufaddr + new_rx_get);
new_rx_get = (new_rx_get + 1) & (rx_bufsize - 1);
- tty->flip.count++;
- *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
- *tty->flip.char_buf_ptr++ = data;
+ tty_insert_flip_char(tty, data, TTY_NORMAL);
info->idle_stats.recv_bytes++;
info->icount.rx++;
}
@@ -1635,7 +1606,7 @@ cyz_handle_rx(struct cyclades_port *info,
}
#endif
info->idle_stats.recv_idle = jiffies;
- schedule_delayed_work(&tty->flip.work, 1);
+ schedule_delayed_work(&tty->buf.work, 1);
}
/* Update rx_get */
cy_writel(&buf_ctrl->rx_get, new_rx_get);
@@ -1763,23 +1734,17 @@ cyz_handle_cmd(struct cyclades_card *cinfo)
switch(cmd) {
case C_CM_PR_ERROR:
- tty->flip.count++;
- *tty->flip.flag_buf_ptr++ = TTY_PARITY;
- *tty->flip.char_buf_ptr++ = 0;
+ tty_insert_flip_char(tty, 0, TTY_PARITY);
info->icount.rx++;
special_count++;
break;
case C_CM_FR_ERROR:
- tty->flip.count++;
- *tty->flip.flag_buf_ptr++ = TTY_FRAME;
- *tty->flip.char_buf_ptr++ = 0;
+ tty_insert_flip_char(tty, 0, TTY_FRAME);
info->icount.rx++;
special_count++;
break;
case C_CM_RXBRK:
- tty->flip.count++;
- *tty->flip.flag_buf_ptr++ = TTY_BREAK;
- *tty->flip.char_buf_ptr++ = 0;
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
info->icount.rx++;
special_count++;
break;
@@ -1844,7 +1809,7 @@ cyz_handle_cmd(struct cyclades_card *cinfo)
if(delta_count)
cy_sched_event(info, Cy_EVENT_DELTA_WAKEUP);
if(special_count)
- schedule_delayed_work(&tty->flip.work, 1);
+ schedule_delayed_work(&tty->buf.work, 1);
}
}
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
index e41060c7622..9d180c42816 100644
--- a/drivers/char/drm/Makefile
+++ b/drivers/char/drm/Makefile
@@ -3,7 +3,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
- drm_drv.o drm_fops.o drm_init.o drm_ioctl.o drm_irq.o \
+ drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
drm_sysfs.o
@@ -18,7 +18,7 @@ radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o
ffb-objs := ffb_drv.o ffb_context.o
sis-objs := sis_drv.o sis_ds.o sis_mm.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
-via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o
+via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
ifeq ($(CONFIG_COMPAT),y)
drm-objs += drm_ioc32.o
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c
index efff0eec618..5485382cade 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/char/drm/ati_pcigart.c
@@ -52,7 +52,7 @@
# define ATI_MAX_PCIGART_PAGES 8192 /**< 32 MB aperture, 4K pages */
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
-static unsigned long drm_ati_alloc_pcigart_table(void)
+static void *drm_ati_alloc_pcigart_table(void)
{
unsigned long address;
struct page *page;
@@ -72,27 +72,26 @@ static unsigned long drm_ati_alloc_pcigart_table(void)
}
DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
- return address;
+ return (void *)address;
}
-static void drm_ati_free_pcigart_table(unsigned long address)
+static void drm_ati_free_pcigart_table(void *address)
{
struct page *page;
int i;
DRM_DEBUG("%s\n", __FUNCTION__);
- page = virt_to_page(address);
+ page = virt_to_page((unsigned long)address);
for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) {
__put_page(page);
ClearPageReserved(page);
}
- free_pages(address, ATI_PCIGART_TABLE_ORDER);
+ free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER);
}
-int drm_ati_pcigart_cleanup(drm_device_t * dev,
- drm_ati_pcigart_info * gart_info)
+int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
{
drm_sg_mem_t *entry = dev->sg;
unsigned long pages;
@@ -136,10 +135,10 @@ int drm_ati_pcigart_cleanup(drm_device_t * dev,
EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
-int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
+int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
{
drm_sg_mem_t *entry = dev->sg;
- unsigned long address = 0;
+ void *address = NULL;
unsigned long pages;
u32 *pci_gart, page_base, bus_address = 0;
int i, j, ret = 0;
@@ -163,7 +162,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
goto done;
}
- bus_address = pci_map_single(dev->pdev, (void *)address,
+ bus_address = pci_map_single(dev->pdev, address,
ATI_PCIGART_TABLE_PAGES *
PAGE_SIZE, PCI_DMA_TODEVICE);
if (bus_address == 0) {
@@ -176,7 +175,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
address = gart_info->addr;
bus_address = gart_info->bus_addr;
DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
- bus_address, address);
+ bus_address, (unsigned long)address);
}
pci_gart = (u32 *) address;
@@ -195,7 +194,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
if (entry->busaddr[i] == 0) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
- address = 0;
+ address = NULL;
bus_address = 0;
goto done;
}
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 64d6237fdd0..9da0ddb892b 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -90,8 +90,8 @@
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
-#define _DRM_LOCK_HELD 0x80000000 /**< Hardware lock is held */
-#define _DRM_LOCK_CONT 0x40000000 /**< Hardware lock is contended */
+#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 3dc3c9d79ae..54b561e6948 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -144,20 +144,6 @@
/** \name Backward compatibility section */
/*@{*/
-#ifndef MODULE_LICENSE
-#define MODULE_LICENSE(x)
-#endif
-
-#ifndef preempt_disable
-#define preempt_disable()
-#define preempt_enable()
-#endif
-
-#ifndef pte_offset_map
-#define pte_offset_map pte_offset
-#define pte_unmap(pte)
-#endif
-
#define DRM_RPR_ARG(vma) vma,
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
@@ -286,10 +272,13 @@ typedef int drm_ioctl_t(struct inode *inode, struct file *filp,
typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
unsigned long arg);
+#define DRM_AUTH 0x1
+#define DRM_MASTER 0x2
+#define DRM_ROOT_ONLY 0x4
+
typedef struct drm_ioctl_desc {
drm_ioctl_t *func;
- int auth_needed;
- int root_only;
+ int flags;
} drm_ioctl_desc_t;
typedef struct drm_devstate {
@@ -384,6 +373,7 @@ typedef struct drm_buf_entry {
/** File private data */
typedef struct drm_file {
int authenticated;
+ int master;
int minor;
pid_t pid;
uid_t uid;
@@ -532,8 +522,9 @@ typedef struct drm_vbl_sig {
typedef struct ati_pcigart_info {
int gart_table_location;
int is_pcie;
- unsigned long addr;
+ void *addr;
dma_addr_t bus_addr;
+ drm_local_map_t mapping;
} drm_ati_pcigart_info;
/**
@@ -544,16 +535,14 @@ typedef struct ati_pcigart_info {
struct drm_device;
struct drm_driver {
- int (*preinit) (struct drm_device *, unsigned long flags);
- void (*prerelease) (struct drm_device *, struct file * filp);
- void (*pretakedown) (struct drm_device *);
- int (*postcleanup) (struct drm_device *);
- int (*presetup) (struct drm_device *);
- int (*postsetup) (struct drm_device *);
+ int (*load) (struct drm_device *, unsigned long flags);
+ int (*firstopen) (struct drm_device *);
+ int (*open) (struct drm_device *, drm_file_t *);
+ void (*preclose) (struct drm_device *, struct file * filp);
+ void (*postclose) (struct drm_device *, drm_file_t *);
+ void (*lastclose) (struct drm_device *);
+ int (*unload) (struct drm_device *);
int (*dma_ioctl) (DRM_IOCTL_ARGS);
- int (*open_helper) (struct drm_device *, drm_file_t *);
- void (*free_filp_priv) (struct drm_device *, drm_file_t *);
- void (*release) (struct drm_device *, struct file * filp);
void (*dma_ready) (struct drm_device *);
int (*dma_quiescent) (struct drm_device *);
int (*context_ctor) (struct drm_device * dev, int context);
@@ -561,8 +550,9 @@ struct drm_driver {
int (*kernel_context_switch) (struct drm_device * dev, int old,
int new);
void (*kernel_context_switch_unlock) (struct drm_device * dev,
- drm_lock_t * lock);
+ drm_lock_t *lock);
int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
+ int (*dri_library_name) (struct drm_device *dev, char *buf);
/**
* Called by \c drm_device_is_agp. Typically used to determine if a
@@ -579,16 +569,24 @@ struct drm_driver {
/* these have to be filled in */
- int (*postinit) (struct drm_device *, unsigned long flags);
- irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
+ irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device * dev);
void (*irq_postinstall) (struct drm_device * dev);
void (*irq_uninstall) (struct drm_device * dev);
void (*reclaim_buffers) (struct drm_device * dev, struct file * filp);
+ void (*reclaim_buffers_locked) (struct drm_device *dev,
+ struct file *filp);
unsigned long (*get_map_ofs) (drm_map_t * map);
unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
- int (*version) (drm_version_t * version);
+
+ int major;
+ int minor;
+ int patchlevel;
+ char *name;
+ char *desc;
+ char *date;
+
u32 driver_features;
int dev_priv_size;
drm_ioctl_desc_t *ioctls;
@@ -752,19 +750,43 @@ static inline int drm_core_has_MTRR(struct drm_device *dev)
{
return drm_core_check_feature(dev, DRIVER_USE_MTRR);
}
+
+#define DRM_MTRR_WC MTRR_TYPE_WRCOMB
+
+static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
+ unsigned int flags)
+{
+ return mtrr_add(offset, size, flags, 1);
+}
+
+static inline int drm_mtrr_del(int handle, unsigned long offset,
+ unsigned long size, unsigned int flags)
+{
+ return mtrr_del(handle, offset, size);
+}
+
#else
#define drm_core_has_MTRR(dev) (0)
+
+#define DRM_MTRR_WC 0
+
+static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
+ unsigned int flags)
+{
+ return 0;
+}
+
+static inline int drm_mtrr_del(int handle, unsigned long offset,
+ unsigned long size, unsigned int flags)
+{
+ return 0;
+}
#endif
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
- /* Misc. support (drm_init.h) */
-extern int drm_flags;
-extern void drm_parse_options(char *s);
-extern int drm_cpu_valid(void);
-
/* Driver support (drm_drv.h) */
extern int drm_init(struct drm_driver *driver);
extern void drm_exit(struct drm_driver *driver);
@@ -772,12 +794,11 @@ extern int drm_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_takedown(drm_device_t * dev);
+extern int drm_lastclose(drm_device_t *dev);
/* Device support (drm_fops.h) */
extern int drm_open(struct inode *inode, struct file *filp);
extern int drm_stub_open(struct inode *inode, struct file *filp);
-extern int drm_flush(struct file *filp);
extern int drm_fasync(int fd, struct file *filp, int on);
extern int drm_release(struct inode *inode, struct file *filp);
@@ -819,6 +840,8 @@ extern int drm_getstats(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_setversion(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
+extern int drm_noop(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
/* Context IOCTL support (drm_context.h) */
extern int drm_resctx(struct inode *inode, struct file *filp,
@@ -857,10 +880,6 @@ extern int drm_getmagic(struct inode *inode, struct file *filp,
extern int drm_authmagic(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
- /* Placeholder for ioctls past */
-extern int drm_noop(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-
/* Locking IOCTL support (drm_lock.h) */
extern int drm_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@@ -873,6 +892,7 @@ extern int drm_lock_free(drm_device_t * dev,
/* Buffer management support (drm_bufs.h) */
extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request);
+extern int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request);
extern int drm_addmap(drm_device_t * dev, unsigned int offset,
unsigned int size, drm_map_type_t type,
drm_map_flags_t flags, drm_local_map_t ** map_ptr);
@@ -908,8 +928,8 @@ extern void drm_core_reclaim_buffers(drm_device_t * dev, struct file *filp);
/* IRQ support (drm_irq.h) */
extern int drm_control(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_irq_uninstall(drm_device_t * dev);
extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
+extern int drm_irq_uninstall(drm_device_t * dev);
extern void drm_driver_irq_preinstall(drm_device_t * dev);
extern void drm_driver_irq_postinstall(drm_device_t * dev);
extern void drm_driver_irq_uninstall(drm_device_t * dev);
@@ -933,13 +953,17 @@ extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t * info);
extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_agp_alloc(struct inode *inode, struct file *filp,
+extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request);
+extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_agp_free(struct inode *inode, struct file *filp,
+extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request);
+extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_agp_unbind(struct inode *inode, struct file *filp,
+extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request);
+extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_agp_bind(struct inode *inode, struct file *filp,
+extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request);
+extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge,
size_t pages, u32 type);
@@ -991,10 +1015,8 @@ extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner,
char *name);
extern void drm_sysfs_destroy(struct drm_sysfs_class *cs);
extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
- dev_t dev,
- struct device *device,
- const char *fmt, ...);
-extern void drm_sysfs_device_remove(dev_t dev);
+ drm_head_t *head);
+extern void drm_sysfs_device_remove(struct class_device *class_dev);
/* Inline replacements for DRM_IOREMAP macros */
static __inline__ void drm_core_ioremap(struct drm_map *map,
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 2b6453a9ffc..fabc930c67a 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -1,5 +1,5 @@
/**
- * \file drm_agpsupport.h
+ * \file drm_agpsupport.c
* DRM support for AGP/GART backend
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
@@ -91,7 +91,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
/**
* Acquire the AGP device.
*
- * \param dev DRM device that is to acquire AGP
+ * \param dev DRM device that is to acquire AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device hasn't been acquired before and calls
@@ -134,7 +134,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
/**
* Release the AGP device.
*
- * \param dev DRM device that is to release AGP
+ * \param dev DRM device that is to release AGP.
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
@@ -147,7 +147,6 @@ int drm_agp_release(drm_device_t * dev)
dev->agp->acquired = 0;
return 0;
}
-
EXPORT_SYMBOL(drm_agp_release);
int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
@@ -208,30 +207,22 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
* Verifies the AGP device is present and has been acquired, allocates the
* memory via alloc_agp() and creates a drm_agp_mem entry for it.
*/
-int drm_agp_alloc(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_buffer_t request;
drm_agp_mem_t *entry;
DRM_AGP_MEM *memory;
unsigned long pages;
u32 type;
- drm_agp_buffer_t __user *argp = (void __user *)arg;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user(&request, argp, sizeof(request)))
- return -EFAULT;
if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
return -ENOMEM;
memset(entry, 0, sizeof(*entry));
- pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
- type = (u32) request.type;
-
+ pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+ type = (u32) request->type;
if (!(memory = drm_alloc_agp(dev, pages, type))) {
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -ENOMEM;
@@ -247,16 +238,39 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
dev->agp->memory->prev = entry;
dev->agp->memory = entry;
- request.handle = entry->handle;
- request.physical = memory->physical;
+ request->handle = entry->handle;
+ request->physical = memory->physical;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_agp_alloc);
+
+int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->head->dev;
+ drm_agp_buffer_t request;
+ drm_agp_buffer_t __user *argp = (void __user *)arg;
+ int err;
+
+ if (copy_from_user(&request, argp, sizeof(request)))
+ return -EFAULT;
+
+ err = drm_agp_alloc(dev, &request);
+ if (err)
+ return err;
if (copy_to_user(argp, &request, sizeof(request))) {
+ drm_agp_mem_t *entry = dev->agp->memory;
+
dev->agp->memory = entry->next;
dev->agp->memory->prev = NULL;
- drm_free_agp(memory, pages);
+ drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -EFAULT;
}
+
return 0;
}
@@ -293,21 +307,14 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
* Verifies the AGP device is present and acquired, looks-up the AGP memory
* entry and passes it to the unbind_agp() function.
*/
-int drm_agp_unbind(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_binding_t request;
drm_agp_mem_t *entry;
int ret;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user
- (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
- return -EFAULT;
- if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
+ if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
return -EINVAL;
if (!entry->bound)
return -EINVAL;
@@ -316,6 +323,21 @@ int drm_agp_unbind(struct inode *inode, struct file *filp,
entry->bound = 0;
return ret;
}
+EXPORT_SYMBOL(drm_agp_unbind);
+
+int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->head->dev;
+ drm_agp_binding_t request;
+
+ if (copy_from_user
+ (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
+ return -EFAULT;
+
+ return drm_agp_unbind(dev, &request);
+}
/**
* Bind AGP memory into the GATT (ioctl)
@@ -330,26 +352,19 @@ int drm_agp_unbind(struct inode *inode, struct file *filp,
* is currently bound into the GATT. Looks-up the AGP memory entry and passes
* it to bind_agp() function.
*/
-int drm_agp_bind(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_binding_t request;
drm_agp_mem_t *entry;
int retcode;
int page;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user
- (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
- return -EFAULT;
- if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
+ if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
return -EINVAL;
if (entry->bound)
return -EINVAL;
- page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
if ((retcode = drm_bind_agp(entry->memory, page)))
return retcode;
entry->bound = dev->agp->base + (page << PAGE_SHIFT);
@@ -357,6 +372,21 @@ int drm_agp_bind(struct inode *inode, struct file *filp,
dev->agp->base, entry->bound);
return 0;
}
+EXPORT_SYMBOL(drm_agp_bind);
+
+int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->head->dev;
+ drm_agp_binding_t request;
+
+ if (copy_from_user
+ (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
+ return -EFAULT;
+
+ return drm_agp_bind(dev, &request);
+}
/**
* Free AGP memory (ioctl).
@@ -372,20 +402,13 @@ int drm_agp_bind(struct inode *inode, struct file *filp,
* unbind_agp(). Frees it via free_agp() as well as the entry itself
* and unlinks from the doubly linked list it's inserted in.
*/
-int drm_agp_free(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_buffer_t request;
drm_agp_mem_t *entry;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- if (copy_from_user
- (&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
- return -EFAULT;
- if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
+ if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
return -EINVAL;
if (entry->bound)
drm_unbind_agp(entry->memory);
@@ -402,12 +425,30 @@ int drm_agp_free(struct inode *inode, struct file *filp,
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return 0;
}
+EXPORT_SYMBOL(drm_agp_free);
+
+int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->head->dev;
+ drm_agp_buffer_t request;
+
+ if (copy_from_user
+ (&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
+ return -EFAULT;
+
+ return drm_agp_free(dev, &request);
+}
/**
* Initialize the AGP resources.
*
* \return pointer to a drm_agp_head structure.
*
+ * Gets the drm_agp_t structure which is made available by the agpgart module
+ * via the inter_module_* functions. Creates and initializes a drm_agp_head
+ * structure.
*/
drm_agp_head_t *drm_agp_init(drm_device_t * dev)
{
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 319bdea8de8..1db12dcb680 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -36,22 +36,21 @@
#include <linux/vmalloc.h>
#include "drmP.h"
-unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource)
+unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
{
return pci_resource_start(dev->pdev, resource);
}
-
EXPORT_SYMBOL(drm_get_resource_start);
-unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource)
+unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
{
return pci_resource_len(dev->pdev, resource);
}
EXPORT_SYMBOL(drm_get_resource_len);
-static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
- drm_local_map_t * map)
+static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
+ drm_local_map_t *map)
{
struct list_head *list;
@@ -74,7 +73,7 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
#ifdef _LP64
static __inline__ unsigned int HandleID(unsigned long lhandle,
- drm_device_t * dev)
+ drm_device_t *dev)
{
static unsigned int map32_handle = START_RANGE;
unsigned int hash;
@@ -155,7 +154,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
- if (map->offset + map->size < map->offset ||
+ if (map->offset + (map->size-1) < map->offset ||
map->offset < virt_to_phys(high_memory)) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL;
@@ -301,6 +300,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
return -EFAULT;
}
+ if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
+ return -EPERM;
+
err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
&maplist);
@@ -332,7 +334,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
*
* \sa drm_addmap
*/
-int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
+int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
{
struct list_head *list;
drm_map_list_t *r_list = NULL;
@@ -384,10 +386,9 @@ int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
return 0;
}
-
EXPORT_SYMBOL(drm_rmmap_locked);
-int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
+int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
{
int ret;
@@ -397,7 +398,6 @@ int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
return ret;
}
-
EXPORT_SYMBOL(drm_rmmap);
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
@@ -548,7 +548,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+ DRM_DEBUG("agp_offset: %lx\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
@@ -649,6 +649,8 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
}
dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += byte_count >> PAGE_SHIFT;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -664,7 +666,6 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
atomic_dec(&dev->buf_alloc);
return 0;
}
-
EXPORT_SYMBOL(drm_addbufs_agp);
#endif /* __OS_HAS_AGP */
@@ -689,9 +690,13 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
return -EINVAL;
+
if (!dma)
return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
count = request->count;
order = drm_order(request->size);
size = 1 << order;
@@ -882,7 +887,6 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
return 0;
}
-
EXPORT_SYMBOL(drm_addbufs_pci);
static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
@@ -908,6 +912,9 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
if (!dma)
return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
count = request->count;
order = drm_order(request->size);
size = 1 << order;
@@ -1026,6 +1033,8 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
}
dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += byte_count >> PAGE_SHIFT;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -1042,7 +1051,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
return 0;
}
-static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
+int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_entry_t *entry;
@@ -1065,6 +1074,9 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
if (!dma)
return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
count = request->count;
order = drm_order(request->size);
size = 1 << order;
@@ -1181,6 +1193,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
}
dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += byte_count >> PAGE_SHIFT;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -1196,6 +1210,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
atomic_dec(&dev->buf_alloc);
return 0;
}
+EXPORT_SYMBOL(drm_addbufs_fb);
+
/**
* Add buffers for DMA transfers (ioctl).
@@ -1577,5 +1593,6 @@ int drm_order(unsigned long size)
return order;
}
-
EXPORT_SYMBOL(drm_order);
+
+
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index bd958d69a2a..f8425452694 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -433,7 +433,7 @@ int drm_addctx(struct inode *inode, struct file *filp,
if (ctx.handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_ctor)
if (!dev->driver->context_ctor(dev, ctx.handle)) {
- DRM_DEBUG( "Running out of ctxs or memory.\n");
+ DRM_DEBUG("Running out of ctxs or memory.\n");
return -ENOMEM;
}
}
diff --git a/drivers/char/drm/drm_core.h b/drivers/char/drm/drm_core.h
index cc97bb906dd..f4f9db6c7ed 100644
--- a/drivers/char/drm/drm_core.h
+++ b/drivers/char/drm/drm_core.h
@@ -24,11 +24,11 @@
#define CORE_NAME "drm"
#define CORE_DESC "DRM shared core routines"
-#define CORE_DATE "20040925"
+#define CORE_DATE "20051102"
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 2
#define CORE_MAJOR 1
#define CORE_MINOR 0
-#define CORE_PATCHLEVEL 0
+#define CORE_PATCHLEVEL 1
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 4dff7554eb0..c4fa5a29582 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -56,66 +56,66 @@ static int drm_version(struct inode *inode, struct file *filp,
/** Ioctl table */
static drm_ioctl_desc_t drm_ioctls[] = {
- [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, 0, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, 0, 1},
-
- [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, 1, 1},
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, 1, 0},
-
- [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, 1, 0},
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, 1, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, 1, 0},
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, 1, 1},
-
- [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, 1, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, 1, 0},
-
- [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, 1, 0},
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, 1, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, 1, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, 1, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH},
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH},
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH},
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH},
+
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH},
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH},
/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, 1, 1},
+ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
#if __OS_HAS_AGP
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, 1, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind, 1, 1},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
#endif
- [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, 1, 1},
- [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, 1, 1},
+ [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
};
#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls )
@@ -129,7 +129,7 @@ static drm_ioctl_desc_t drm_ioctls[] = {
*
* \sa drm_device
*/
-int drm_takedown(drm_device_t * dev)
+int drm_lastclose(drm_device_t * dev)
{
drm_magic_entry_t *pt, *next;
drm_map_list_t *r_list;
@@ -138,9 +138,9 @@ int drm_takedown(drm_device_t * dev)
DRM_DEBUG("\n");
- if (dev->driver->pretakedown)
- dev->driver->pretakedown(dev);
- DRM_DEBUG("driver pretakedown completed\n");
+ if (dev->driver->lastclose)
+ dev->driver->lastclose(dev);
+ DRM_DEBUG("driver lastclose completed\n");
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
@@ -233,7 +233,7 @@ int drm_takedown(drm_device_t * dev)
}
up(&dev->struct_sem);
- DRM_DEBUG("takedown completed\n");
+ DRM_DEBUG("lastclose completed\n");
return 0;
}
@@ -281,7 +281,7 @@ EXPORT_SYMBOL(drm_init);
/**
* Called via cleanup_module() at module unload time.
*
- * Cleans up all DRM device, calling takedown().
+ * Cleans up all DRM device, calling drm_lastclose().
*
* \sa drm_init
*/
@@ -294,7 +294,7 @@ static void drm_cleanup(drm_device_t * dev)
return;
}
- drm_takedown(dev);
+ drm_lastclose(dev);
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
@@ -317,8 +317,8 @@ static void drm_cleanup(drm_device_t * dev)
dev->agp = NULL;
}
- if (dev->driver->postcleanup)
- dev->driver->postcleanup(dev);
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
drm_put_head(&dev->primary);
if (drm_put_dev(dev))
@@ -342,12 +342,12 @@ void drm_exit(struct drm_driver *driver)
if (head->dev->driver != driver)
continue;
dev = head->dev;
- }
- if (dev) {
- /* release the pci driver */
- if (dev->pdev)
- pci_dev_put(dev->pdev);
- drm_cleanup(dev);
+ if (dev) {
+ /* release the pci driver */
+ if (dev->pdev)
+ pci_dev_put(dev->pdev);
+ drm_cleanup(dev);
+ }
}
DRM_INFO("Module unloaded\n");
}
@@ -432,14 +432,17 @@ static int drm_version(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->head->dev;
drm_version_t __user *argp = (void __user *)arg;
drm_version_t version;
- int ret;
+ int len;
if (copy_from_user(&version, argp, sizeof(version)))
return -EFAULT;
- /* version is a required function to return the personality module version */
- if ((ret = dev->driver->version(&version)))
- return ret;
+ version.version_major = dev->driver->major;
+ version.version_minor = dev->driver->minor;
+ version.version_patchlevel = dev->driver->patchlevel;
+ DRM_COPY(version.name, dev->driver->name);
+ DRM_COPY(version.date, dev->driver->date);
+ DRM_COPY(version.desc, dev->driver->desc);
if (copy_to_user(argp, &version, sizeof(version)))
return -EFAULT;
@@ -493,8 +496,9 @@ int drm_ioctl(struct inode *inode, struct file *filp,
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
- } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) ||
- (ioctl->auth_needed && !priv->authenticated)) {
+ } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
+ ((ioctl->flags & DRM_AUTH) && !priv->authenticated) ||
+ ((ioctl->flags & DRM_MASTER) && !priv->master)) {
retcode = -EACCES;
} else {
retcode = func(inode, filp, cmd, arg);
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index bf0a740122b..403f44a1bf0 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -35,6 +35,7 @@
*/
#include "drmP.h"
+#include "drm_sarea.h"
#include <linux/poll.h>
static int drm_open_helper(struct inode *inode, struct file *filp,
@@ -42,15 +43,21 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
static int drm_setup(drm_device_t * dev)
{
+ drm_local_map_t *map;
int i;
int ret;
- if (dev->driver->presetup) {
- ret = dev->driver->presetup(dev);
+ if (dev->driver->firstopen) {
+ ret = dev->driver->firstopen(dev);
if (ret != 0)
return ret;
}
+ /* prebuild the SAREA */
+ i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
+ if (i != 0)
+ return i;
+
atomic_set(&dev->ioctl_count, 0);
atomic_set(&dev->vma_count, 0);
dev->buf_use = 0;
@@ -109,8 +116,6 @@ static int drm_setup(drm_device_t * dev)
* drm_select_queue fails between the time the interrupt is
* initialized and the time the queues are initialized.
*/
- if (dev->driver->postsetup)
- dev->driver->postsetup(dev);
return 0;
}
@@ -154,10 +159,168 @@ int drm_open(struct inode *inode, struct file *filp)
return retcode;
}
-
EXPORT_SYMBOL(drm_open);
/**
+ * File \c open operation.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ *
+ * Puts the dev->fops corresponding to the device minor number into
+ * \p filp, call the \c open method, and restore the file operations.
+ */
+int drm_stub_open(struct inode *inode, struct file *filp)
+{
+ drm_device_t *dev = NULL;
+ int minor = iminor(inode);
+ int err = -ENODEV;
+ struct file_operations *old_fops;
+
+ DRM_DEBUG("\n");
+
+ if (!((minor >= 0) && (minor < drm_cards_limit)))
+ return -ENODEV;
+
+ if (!drm_heads[minor])
+ return -ENODEV;
+
+ if (!(dev = drm_heads[minor]->dev))
+ return -ENODEV;
+
+ old_fops = filp->f_op;
+ filp->f_op = fops_get(&dev->driver->fops);
+ if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
+ fops_put(filp->f_op);
+ filp->f_op = fops_get(old_fops);
+ }
+ fops_put(old_fops);
+
+ return err;
+}
+
+/**
+ * Check whether DRI will run on this CPU.
+ *
+ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
+ */
+static int drm_cpu_valid(void)
+{
+#if defined(__i386__)
+ if (boot_cpu_data.x86 == 3)
+ return 0; /* No cmpxchg on a 386 */
+#endif
+#if defined(__sparc__) && !defined(__sparc_v9__)
+ return 0; /* No cmpxchg before v9 sparc. */
+#endif
+ return 1;
+}
+
+/**
+ * Called whenever a process opens /dev/drm.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param dev device.
+ * \return zero on success or a negative number on failure.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+static int drm_open_helper(struct inode *inode, struct file *filp,
+ drm_device_t * dev)
+{
+ int minor = iminor(inode);
+ drm_file_t *priv;
+ int ret;
+
+ if (filp->f_flags & O_EXCL)
+ return -EBUSY; /* No exclusive opens */
+ if (!drm_cpu_valid())
+ return -EINVAL;
+
+ DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
+
+ priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
+ if (!priv)
+ return -ENOMEM;
+
+ memset(priv, 0, sizeof(*priv));
+ filp->private_data = priv;
+ priv->uid = current->euid;
+ priv->pid = current->pid;
+ priv->minor = minor;
+ priv->head = drm_heads[minor];
+ priv->ioctl_count = 0;
+ /* for compatibility root is always authenticated */
+ priv->authenticated = capable(CAP_SYS_ADMIN);
+ priv->lock_count = 0;
+
+ if (dev->driver->open) {
+ ret = dev->driver->open(dev, priv);
+ if (ret < 0)
+ goto out_free;
+ }
+
+ down(&dev->struct_sem);
+ if (!dev->file_last) {
+ priv->next = NULL;
+ priv->prev = NULL;
+ dev->file_first = priv;
+ dev->file_last = priv;
+ /* first opener automatically becomes master */
+ priv->master = 1;
+ } else {
+ priv->next = NULL;
+ priv->prev = dev->file_last;
+ dev->file_last->next = priv;
+ dev->file_last = priv;
+ }
+ up(&dev->struct_sem);
+
+#ifdef __alpha__
+ /*
+ * Default the hose
+ */
+ if (!dev->hose) {
+ struct pci_dev *pci_dev;
+ pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
+ if (pci_dev) {
+ dev->hose = pci_dev->sysdata;
+ pci_dev_put(pci_dev);
+ }
+ if (!dev->hose) {
+ struct pci_bus *b = pci_bus_b(pci_root_buses.next);
+ if (b)
+ dev->hose = b->sysdata;
+ }
+ }
+#endif
+
+ return 0;
+ out_free:
+ drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
+ filp->private_data = NULL;
+ return ret;
+}
+
+/** No-op. */
+int drm_fasync(int fd, struct file *filp, int on)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->head->dev;
+ int retcode;
+
+ DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
+ (long)old_encode_dev(priv->head->device));
+ retcode = fasync_helper(fd, filp, on, &dev->buf_async);
+ if (retcode < 0)
+ return retcode;
+ return 0;
+}
+EXPORT_SYMBOL(drm_fasync);
+
+/**
* Release file.
*
* \param inode device inode
@@ -167,7 +330,7 @@ EXPORT_SYMBOL(drm_open);
* If the hardware lock is held then free it, and take it again for the kernel
* context since it's necessary to reclaim buffers. Unlink the file private
* data from its list and free it. Decreases the open count and if it reaches
- * zero calls takedown().
+ * zero calls drm_lastclose().
*/
int drm_release(struct inode *inode, struct file *filp)
{
@@ -180,8 +343,8 @@ int drm_release(struct inode *inode, struct file *filp)
DRM_DEBUG("open_count = %d\n", dev->open_count);
- if (dev->driver->prerelease)
- dev->driver->prerelease(dev, filp);
+ if (dev->driver->preclose)
+ dev->driver->preclose(dev, filp);
/* ========================================================
* Begin inline drm_release
@@ -197,8 +360,8 @@ int drm_release(struct inode *inode, struct file *filp)
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- if (dev->driver->release)
- dev->driver->release(dev, filp);
+ if (dev->driver->reclaim_buffers_locked)
+ dev->driver->reclaim_buffers_locked(dev, filp);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
@@ -207,7 +370,7 @@ int drm_release(struct inode *inode, struct file *filp)
hardware at this point, possibly
processed via a callback to the X
server. */
- } else if (dev->driver->release && priv->lock_count
+ } else if (dev->driver->reclaim_buffers_locked && priv->lock_count
&& dev->lock.hw_lock) {
/* The lock is required to reclaim buffers */
DECLARE_WAITQUEUE(entry, current);
@@ -237,15 +400,14 @@ int drm_release(struct inode *inode, struct file *filp)
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
if (!retcode) {
- if (dev->driver->release)
- dev->driver->release(dev, filp);
+ dev->driver->reclaim_buffers_locked(dev, filp);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT);
}
}
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)
- && !dev->driver->release) {
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+ !dev->driver->reclaim_buffers_locked) {
dev->driver->reclaim_buffers(dev, filp);
}
@@ -292,9 +454,8 @@ int drm_release(struct inode *inode, struct file *filp)
}
up(&dev->struct_sem);
- if (dev->driver->free_filp_priv)
- dev->driver->free_filp_priv(dev, priv);
-
+ if (dev->driver->postclose)
+ dev->driver->postclose(dev, priv);
drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
/* ========================================================
@@ -313,7 +474,7 @@ int drm_release(struct inode *inode, struct file *filp)
}
spin_unlock(&dev->count_lock);
unlock_kernel();
- return drm_takedown(dev);
+ return drm_lastclose(dev);
}
spin_unlock(&dev->count_lock);
@@ -321,129 +482,11 @@ int drm_release(struct inode *inode, struct file *filp)
return retcode;
}
-
EXPORT_SYMBOL(drm_release);
-/**
- * Called whenever a process opens /dev/drm.
- *
- * \param inode device inode.
- * \param filp file pointer.
- * \param dev device.
- * \return zero on success or a negative number on failure.
- *
- * Creates and initializes a drm_file structure for the file private data in \p
- * filp and add it into the double linked list in \p dev.
- */
-static int drm_open_helper(struct inode *inode, struct file *filp,
- drm_device_t * dev)
-{
- int minor = iminor(inode);
- drm_file_t *priv;
- int ret;
-
- if (filp->f_flags & O_EXCL)
- return -EBUSY; /* No exclusive opens */
- if (!drm_cpu_valid())
- return -EINVAL;
-
- DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
-
- priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
- if (!priv)
- return -ENOMEM;
-
- memset(priv, 0, sizeof(*priv));
- filp->private_data = priv;
- priv->uid = current->euid;
- priv->pid = current->pid;
- priv->minor = minor;
- priv->head = drm_heads[minor];
- priv->ioctl_count = 0;
- priv->authenticated = capable(CAP_SYS_ADMIN);
- priv->lock_count = 0;
-
- if (dev->driver->open_helper) {
- ret = dev->driver->open_helper(dev, priv);
- if (ret < 0)
- goto out_free;
- }
-
- down(&dev->struct_sem);
- if (!dev->file_last) {
- priv->next = NULL;
- priv->prev = NULL;
- dev->file_first = priv;
- dev->file_last = priv;
- } else {
- priv->next = NULL;
- priv->prev = dev->file_last;
- dev->file_last->next = priv;
- dev->file_last = priv;
- }
- up(&dev->struct_sem);
-
-#ifdef __alpha__
- /*
- * Default the hose
- */
- if (!dev->hose) {
- struct pci_dev *pci_dev;
- pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
- if (pci_dev) {
- dev->hose = pci_dev->sysdata;
- pci_dev_put(pci_dev);
- }
- if (!dev->hose) {
- struct pci_bus *b = pci_bus_b(pci_root_buses.next);
- if (b)
- dev->hose = b->sysdata;
- }
- }
-#endif
-
- return 0;
- out_free:
- drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
- filp->private_data = NULL;
- return ret;
-}
-
-/** No-op. */
-int drm_flush(struct file *filp)
-{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
-
- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
- current->pid, (long)old_encode_dev(priv->head->device),
- dev->open_count);
- return 0;
-}
-
-EXPORT_SYMBOL(drm_flush);
-
-/** No-op. */
-int drm_fasync(int fd, struct file *filp, int on)
-{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- int retcode;
-
- DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
- (long)old_encode_dev(priv->head->device));
- retcode = fasync_helper(fd, filp, on, &dev->buf_async);
- if (retcode < 0)
- return retcode;
- return 0;
-}
-
-EXPORT_SYMBOL(drm_fasync);
-
/** No-op. */
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
{
return 0;
}
-
EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/char/drm/drm_init.c b/drivers/char/drm/drm_init.c
deleted file mode 100644
index 754b934715c..00000000000
--- a/drivers/char/drm/drm_init.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * \file drm_init.c
- * Setup/Cleanup for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Check whether DRI will run on this CPU.
- *
- * \return non-zero if the DRI will run on this CPU, or zero otherwise.
- */
-int drm_cpu_valid(void)
-{
-#if defined(__i386__)
- if (boot_cpu_data.x86 == 3)
- return 0; /* No cmpxchg on a 386 */
-#endif
-#if defined(__sparc__) && !defined(__sparc_v9__)
- return 0; /* No cmpxchg before v9 sparc. */
-#endif
- return 1;
-}
diff --git a/drivers/char/drm/drm_ioc32.c b/drivers/char/drm/drm_ioc32.c
index dd91ff6b474..e9e2db18952 100644
--- a/drivers/char/drm/drm_ioc32.c
+++ b/drivers/char/drm/drm_ioc32.c
@@ -28,7 +28,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
-#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm_core.h"
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index 9b0feba6b06..bcd4e604d3e 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -137,17 +137,22 @@ int drm_setunique(struct inode *inode, struct file *filp,
static int drm_set_busid(drm_device_t * dev)
{
+ int len;
+
if (dev->unique != NULL)
return EBUSY;
- dev->unique_len = 20;
+ dev->unique_len = 40;
dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
if (dev->unique == NULL)
return ENOMEM;
- snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
+ len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
+ if (len > dev->unique_len)
+ DRM_ERROR("Unique buffer overflowed\n");
+
dev->devname =
drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
2, DRM_MEM_DRIVER);
@@ -239,7 +244,7 @@ int drm_getclient(struct inode *inode, struct file *filp,
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
- drm_client_t __user *argp = (void __user *)arg;
+ drm_client_t __user *argp = (drm_client_t __user *)arg;
drm_client_t client;
drm_file_t *pt;
int idx;
@@ -262,7 +267,7 @@ int drm_getclient(struct inode *inode, struct file *filp,
client.iocs = pt->ioctl_count;
up(&dev->struct_sem);
- if (copy_to_user((drm_client_t __user *) arg, &client, sizeof(client)))
+ if (copy_to_user(argp, &client, sizeof(client)))
return -EFAULT;
return 0;
}
@@ -325,17 +330,13 @@ int drm_setversion(DRM_IOCTL_ARGS)
drm_set_version_t retv;
int if_version;
drm_set_version_t __user *argp = (void __user *)data;
- drm_version_t version;
DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv));
- memset(&version, 0, sizeof(version));
-
- dev->driver->version(&version);
retv.drm_di_major = DRM_IF_MAJOR;
retv.drm_di_minor = DRM_IF_MINOR;
- retv.drm_dd_major = version.version_major;
- retv.drm_dd_minor = version.version_minor;
+ retv.drm_dd_major = dev->driver->major;
+ retv.drm_dd_minor = dev->driver->minor;
DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv));
@@ -343,7 +344,7 @@ int drm_setversion(DRM_IOCTL_ARGS)
if (sv.drm_di_major != DRM_IF_MAJOR ||
sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
return EINVAL;
- if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor);
+ if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
dev->if_version = DRM_MAX(if_version, dev->if_version);
if (sv.drm_di_minor >= 1) {
/*
@@ -354,9 +355,9 @@ int drm_setversion(DRM_IOCTL_ARGS)
}
if (sv.drm_dd_major != -1) {
- if (sv.drm_dd_major != version.version_major ||
+ if (sv.drm_dd_major != dev->driver->major ||
sv.drm_dd_minor < 0
- || sv.drm_dd_minor > version.version_minor)
+ || sv.drm_dd_minor > dev->driver->minor)
return EINVAL;
if (dev->driver->set_version)
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c
index b48a595d54e..f9e45303498 100644
--- a/drivers/char/drm/drm_lock.c
+++ b/drivers/char/drm/drm_lock.c
@@ -130,7 +130,6 @@ int drm_lock(struct inode *inode, struct file *filp,
/* dev->driver->kernel_context_switch isn't used by any of the x86
* drivers but is used by the Sparc driver.
*/
-
if (dev->driver->kernel_context_switch &&
dev->last_context != lock.context) {
dev->driver->kernel_context_switch(dev, dev->last_context,
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index abef2acf99f..8074771e348 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -145,30 +145,22 @@ DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
}
-EXPORT_SYMBOL(drm_alloc_agp);
-
/** Wrapper around agp_free_memory() */
int drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
return drm_agp_free_memory(handle) ? 0 : -EINVAL;
}
-EXPORT_SYMBOL(drm_free_agp);
-
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
{
return drm_agp_bind_memory(handle, start);
}
-EXPORT_SYMBOL(drm_bind_agp);
-
/** Wrapper around agp_unbind_memory() */
int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
-
-EXPORT_SYMBOL(drm_unbind_agp);
#endif /* agp */
#endif /* debug_memory */
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
index b370aca718d..e84605fc54a 100644
--- a/drivers/char/drm/drm_memory_debug.h
+++ b/drivers/char/drm/drm_memory_debug.h
@@ -1,5 +1,5 @@
/**
- * \file drm_memory.h
+ * \file drm_memory_debug.h
* Memory management wrappers for DRM.
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
@@ -43,42 +43,41 @@ typedef struct drm_mem_stats {
unsigned long bytes_freed;
} drm_mem_stats_t;
-static DEFINE_SPINLOCK(DRM(mem_lock));
-static unsigned long DRM(ram_available) = 0; /* In pages */
-static unsigned long DRM(ram_used) = 0;
-static drm_mem_stats_t DRM(mem_stats)[] =
+static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
+static unsigned long drm_ram_available = 0; /* In pages */
+static unsigned long drm_ram_used = 0;
+static drm_mem_stats_t drm_mem_stats[] =
{
- [DRM_MEM_DMA] = {
- "dmabufs"},[DRM_MEM_SAREA] = {
- "sareas"},[DRM_MEM_DRIVER] = {
- "driver"},[DRM_MEM_MAGIC] = {
- "magic"},[DRM_MEM_IOCTLS] = {
- "ioctltab"},[DRM_MEM_MAPS] = {
- "maplist"},[DRM_MEM_VMAS] = {
- "vmalist"},[DRM_MEM_BUFS] = {
- "buflist"},[DRM_MEM_SEGS] = {
- "seglist"},[DRM_MEM_PAGES] = {
- "pagelist"},[DRM_MEM_FILES] = {
- "files"},[DRM_MEM_QUEUES] = {
- "queues"},[DRM_MEM_CMDS] = {
- "commands"},[DRM_MEM_MAPPINGS] = {
- "mappings"},[DRM_MEM_BUFLISTS] = {
- "buflists"},[DRM_MEM_AGPLISTS] = {
- "agplist"},[DRM_MEM_SGLISTS] = {
- "sglist"},[DRM_MEM_TOTALAGP] = {
- "totalagp"},[DRM_MEM_BOUNDAGP] = {
- "boundagp"},[DRM_MEM_CTXBITMAP] = {
- "ctxbitmap"},[DRM_MEM_CTXLIST] = {
- "ctxlist"},[DRM_MEM_STUB] = {
- "stub"}, {
- NULL, 0,} /* Last entry must be null */
+ [DRM_MEM_DMA] = {"dmabufs"},
+ [DRM_MEM_SAREA] = {"sareas"},
+ [DRM_MEM_DRIVER] = {"driver"},
+ [DRM_MEM_MAGIC] = {"magic"},
+ [DRM_MEM_IOCTLS] = {"ioctltab"},
+ [DRM_MEM_MAPS] = {"maplist"},
+ [DRM_MEM_VMAS] = {"vmalist"},
+ [DRM_MEM_BUFS] = {"buflist"},
+ [DRM_MEM_SEGS] = {"seglist"},
+ [DRM_MEM_PAGES] = {"pagelist"},
+ [DRM_MEM_FILES] = {"files"},
+ [DRM_MEM_QUEUES] = {"queues"},
+ [DRM_MEM_CMDS] = {"commands"},
+ [DRM_MEM_MAPPINGS] = {"mappings"},
+ [DRM_MEM_BUFLISTS] = {"buflists"},
+ [DRM_MEM_AGPLISTS] = {"agplist"},
+ [DRM_MEM_SGLISTS] = {"sglist"},
+ [DRM_MEM_TOTALAGP] = {"totalagp"},
+ [DRM_MEM_BOUNDAGP] = {"boundagp"},
+ [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
+ [DRM_MEM_CTXLIST] = {"ctxlist"},
+ [DRM_MEM_STUB] = {"stub"},
+ {NULL, 0,} /* Last entry must be null */
};
-void DRM(mem_init) (void) {
+void drm_mem_init (void) {
drm_mem_stats_t *mem;
struct sysinfo si;
- for (mem = DRM(mem_stats); mem->name; ++mem) {
+ for (mem = drm_mem_stats; mem->name; ++mem) {
mem->succeed_count = 0;
mem->free_count = 0;
mem->fail_count = 0;
@@ -87,13 +86,13 @@ void DRM(mem_init) (void) {
}
si_meminfo(&si);
- DRM(ram_available) = si.totalram;
- DRM(ram_used) = 0;
+ drm_ram_available = si.totalram;
+ drm_ram_used = 0;
}
/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
-static int DRM(_mem_info) (char *buf, char **start, off_t offset,
+static int drm__mem_info (char *buf, char **start, off_t offset,
int request, int *eof, void *data) {
drm_mem_stats_t *pt;
int len = 0;
@@ -112,11 +111,11 @@ static int DRM(_mem_info) (char *buf, char **start, off_t offset,
" | allocs bytes\n\n");
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
"system", 0, 0, 0,
- DRM(ram_available) << (PAGE_SHIFT - 10));
+ drm_ram_available << (PAGE_SHIFT - 10));
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
- "locked", 0, 0, 0, DRM(ram_used) >> 10);
+ "locked", 0, 0, 0, drm_ram_used >> 10);
DRM_PROC_PRINT("\n");
- for (pt = DRM(mem_stats); pt->name; pt++) {
+ for (pt = drm_mem_stats; pt->name; pt++) {
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
pt->name,
pt->succeed_count,
@@ -135,17 +134,17 @@ static int DRM(_mem_info) (char *buf, char **start, off_t offset,
return len - offset;
}
-int DRM(mem_info) (char *buf, char **start, off_t offset,
+int drm_mem_info (char *buf, char **start, off_t offset,
int len, int *eof, void *data) {
int ret;
- spin_lock(&DRM(mem_lock));
- ret = DRM(_mem_info) (buf, start, offset, len, eof, data);
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ret = drm__mem_info (buf, start, offset, len, eof, data);
+ spin_unlock(&drm_mem_lock);
return ret;
}
-void *DRM(alloc) (size_t size, int area) {
+void *drm_alloc (size_t size, int area) {
void *pt;
if (!size) {
@@ -154,41 +153,41 @@ void *DRM(alloc) (size_t size, int area) {
}
if (!(pt = kmalloc(size, GFP_KERNEL))) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[area].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].fail_count;
+ spin_unlock(&drm_mem_lock);
return NULL;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[area].succeed_count;
- DRM(mem_stats)[area].bytes_allocated += size;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_allocated += size;
+ spin_unlock(&drm_mem_lock);
return pt;
}
-void *DRM(calloc) (size_t nmemb, size_t size, int area) {
+void *drm_calloc (size_t nmemb, size_t size, int area) {
void *addr;
- addr = DRM(alloc) (nmemb * size, area);
+ addr = drm_alloc (nmemb * size, area);
if (addr != NULL)
memset((void *)addr, 0, size * nmemb);
return addr;
}
-void *DRM(realloc) (void *oldpt, size_t oldsize, size_t size, int area) {
+void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
void *pt;
- if (!(pt = DRM(alloc) (size, area)))
+ if (!(pt = drm_alloc (size, area)))
return NULL;
if (oldpt && oldsize) {
memcpy(pt, oldpt, oldsize);
- DRM(free) (oldpt, oldsize, area);
+ drm_free (oldpt, oldsize, area);
}
return pt;
}
-void DRM(free) (void *pt, size_t size, int area) {
+void drm_free (void *pt, size_t size, int area) {
int alloc_count;
int free_count;
@@ -196,43 +195,43 @@ void DRM(free) (void *pt, size_t size, int area) {
DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
else
kfree(pt);
- spin_lock(&DRM(mem_lock));
- DRM(mem_stats)[area].bytes_freed += size;
- free_count = ++DRM(mem_stats)[area].free_count;
- alloc_count = DRM(mem_stats)[area].succeed_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ drm_mem_stats[area].bytes_freed += size;
+ free_count = ++drm_mem_stats[area].free_count;
+ alloc_count = drm_mem_stats[area].succeed_count;
+ spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
-unsigned long DRM(alloc_pages) (int order, int area) {
+unsigned long drm_alloc_pages (int order, int area) {
unsigned long address;
unsigned long bytes = PAGE_SIZE << order;
unsigned long addr;
unsigned int sz;
- spin_lock(&DRM(mem_lock));
- if ((DRM(ram_used) >> PAGE_SHIFT)
- > (DRM_RAM_PERCENT * DRM(ram_available)) / 100) {
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ if ((drm_ram_used >> PAGE_SHIFT)
+ > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
+ spin_unlock(&drm_mem_lock);
return 0;
}
- spin_unlock(&DRM(mem_lock));
+ spin_unlock(&drm_mem_lock);
address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
if (!address) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[area].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].fail_count;
+ spin_unlock(&drm_mem_lock);
return 0;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[area].succeed_count;
- DRM(mem_stats)[area].bytes_allocated += bytes;
- DRM(ram_used) += bytes;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_allocated += bytes;
+ drm_ram_used += bytes;
+ spin_unlock(&drm_mem_lock);
/* Zero outside the lock */
memset((void *)address, 0, bytes);
@@ -246,7 +245,7 @@ unsigned long DRM(alloc_pages) (int order, int area) {
return address;
}
-void DRM(free_pages) (unsigned long address, int order, int area) {
+void drm_free_pages (unsigned long address, int order, int area) {
unsigned long bytes = PAGE_SIZE << order;
int alloc_count;
int free_count;
@@ -264,12 +263,12 @@ void DRM(free_pages) (unsigned long address, int order, int area) {
free_pages(address, order);
}
- spin_lock(&DRM(mem_lock));
- free_count = ++DRM(mem_stats)[area].free_count;
- alloc_count = DRM(mem_stats)[area].succeed_count;
- DRM(mem_stats)[area].bytes_freed += bytes;
- DRM(ram_used) -= bytes;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[area].free_count;
+ alloc_count = drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_freed += bytes;
+ drm_ram_used -= bytes;
+ spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(area,
"Excess frees: %d frees, %d allocs\n",
@@ -277,7 +276,7 @@ void DRM(free_pages) (unsigned long address, int order, int area) {
}
}
-void *DRM(ioremap) (unsigned long offset, unsigned long size,
+void *drm_ioremap (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
@@ -288,19 +287,19 @@ void *DRM(ioremap) (unsigned long offset, unsigned long size,
}
if (!(pt = drm_ioremap(offset, size, dev))) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
+ spin_unlock(&drm_mem_lock);
return NULL;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
- DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+ drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
+ spin_unlock(&drm_mem_lock);
return pt;
}
-void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size,
+void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
@@ -311,19 +310,19 @@ void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size,
}
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
+ spin_unlock(&drm_mem_lock);
return NULL;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
- DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+ drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
+ spin_unlock(&drm_mem_lock);
return pt;
}
-void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
+void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
int alloc_count;
int free_count;
@@ -333,11 +332,11 @@ void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
else
drm_ioremapfree(pt, size, dev);
- spin_lock(&DRM(mem_lock));
- DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
- free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
- alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
+ free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+ spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Excess frees: %d frees, %d allocs\n",
@@ -347,7 +346,7 @@ void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
#if __OS_HAS_AGP
-DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) {
+DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
DRM_AGP_MEM *handle;
if (!pages) {
@@ -355,21 +354,21 @@ DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) {
return NULL;
}
- if ((handle = DRM(agp_allocate_memory) (pages, type))) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
- DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
+ if ((handle = drm_agp_allocate_memory (pages, type))) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
+= pages << PAGE_SHIFT;
- spin_unlock(&DRM(mem_lock));
+ spin_unlock(&drm_mem_lock);
return handle;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
+ spin_unlock(&drm_mem_lock);
return NULL;
}
-int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
+int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
int alloc_count;
int free_count;
int retval = -EINVAL;
@@ -380,13 +379,13 @@ int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
return retval;
}
- if (DRM(agp_free_memory) (handle)) {
- spin_lock(&DRM(mem_lock));
- free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
- alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
- DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
+ if (drm_agp_free_memory (handle)) {
+ spin_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
+= pages << PAGE_SHIFT;
- spin_unlock(&DRM(mem_lock));
+ spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
"Excess frees: %d frees, %d allocs\n",
@@ -397,7 +396,7 @@ int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
return retval;
}
-int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) {
+int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
int retcode = -EINVAL;
if (!handle) {
@@ -406,21 +405,21 @@ int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) {
return retcode;
}
- if (!(retcode = DRM(agp_bind_memory) (handle, start))) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
- DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
+ if (!(retcode = drm_agp_bind_memory (handle, start))) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
+= handle->page_count << PAGE_SHIFT;
- spin_unlock(&DRM(mem_lock));
+ spin_unlock(&drm_mem_lock);
return retcode;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
+ spin_unlock(&drm_mem_lock);
return retcode;
}
-int DRM(unbind_agp) (DRM_AGP_MEM * handle) {
+int drm_unbind_agp (DRM_AGP_MEM * handle) {
int alloc_count;
int free_count;
int retcode = -EINVAL;
@@ -431,14 +430,14 @@ int DRM(unbind_agp) (DRM_AGP_MEM * handle) {
return retcode;
}
- if ((retcode = DRM(agp_unbind_memory) (handle)))
+ if ((retcode = drm_agp_unbind_memory (handle)))
return retcode;
- spin_lock(&DRM(mem_lock));
- free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
- alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
- DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
+ spin_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
+= handle->page_count << PAGE_SHIFT;
- spin_unlock(&DRM(mem_lock));
+ spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
"Excess frees: %d frees, %d allocs\n",
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h
index d51aeb4966f..695115d7038 100644
--- a/drivers/char/drm/drm_os_linux.h
+++ b/drivers/char/drm/drm_os_linux.h
@@ -13,6 +13,7 @@
#define DRM_ERR(d) -(d)
/** Current process ID */
#define DRM_CURRENTPID current->pid
+#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
#define DRM_UDELAY(d) udelay(d)
/** Read a byte from a MMIO region */
#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index d66dc55e29a..5b1d3a04458 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -46,6 +46,7 @@
{0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
+ {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
{0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
{0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
@@ -69,6 +70,7 @@
{0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+ {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
{0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
@@ -82,10 +84,13 @@
{0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
{0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \
{0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \
{0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+ {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+ {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
{0, 0, 0}
#define r128_PCI_IDS \
@@ -176,7 +181,7 @@
#define viadrv_PCI_IDS \
{0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
{0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
@@ -196,6 +201,10 @@
{0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
+#define gamma_PCI_IDS \
+ {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
#define savage_PCI_IDS \
{0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
{0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
@@ -234,3 +243,4 @@
{0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
+
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 3f452f763f0..6f943e3309e 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -61,16 +61,14 @@ static struct drm_proc_list {
const char *name; /**< file name */
int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
} drm_proc_list[] = {
- {
- "name", drm_name_info}, {
- "mem", drm_mem_info}, {
- "vm", drm_vm_info}, {
- "clients", drm_clients_info}, {
- "queues", drm_queues_info}, {
- "bufs", drm_bufs_info},
+ {"name", drm_name_info},
+ {"mem", drm_mem_info},
+ {"vm", drm_vm_info},
+ {"clients", drm_clients_info},
+ {"queues", drm_queues_info},
+ {"bufs", drm_bufs_info},
#if DRM_DEBUG_CODE
- {
- "vma", drm_vma_info},
+ {"vma", drm_vma_info},
#endif
};
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 60b6f8e8bf6..42d766359ca 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -93,8 +93,8 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
dev->driver = driver;
- if (dev->driver->preinit)
- if ((retcode = dev->driver->preinit(dev, ent->driver_data)))
+ if (dev->driver->load)
+ if ((retcode = dev->driver->load(dev, ent->driver_data)))
goto error_out_unreg;
if (drm_core_has_AGP(dev)) {
@@ -124,47 +124,10 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
return 0;
error_out_unreg:
- drm_takedown(dev);
+ drm_lastclose(dev);
return retcode;
}
-/**
- * File \c open operation.
- *
- * \param inode device inode.
- * \param filp file pointer.
- *
- * Puts the dev->fops corresponding to the device minor number into
- * \p filp, call the \c open method, and restore the file operations.
- */
-int drm_stub_open(struct inode *inode, struct file *filp)
-{
- drm_device_t *dev = NULL;
- int minor = iminor(inode);
- int err = -ENODEV;
- struct file_operations *old_fops;
-
- DRM_DEBUG("\n");
-
- if (!((minor >= 0) && (minor < drm_cards_limit)))
- return -ENODEV;
-
- if (!drm_heads[minor])
- return -ENODEV;
-
- if (!(dev = drm_heads[minor]->dev))
- return -ENODEV;
-
- old_fops = filp->f_op;
- filp->f_op = fops_get(&dev->driver->fops);
- if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
- fops_put(filp->f_op);
- filp->f_op = fops_get(old_fops);
- }
- fops_put(old_fops);
-
- return err;
-}
/**
* Get a secondary minor number.
@@ -200,11 +163,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head)
goto err_g1;
}
- head->dev_class = drm_sysfs_device_add(drm_class,
- MKDEV(DRM_MAJOR,
- minor),
- &dev->pdev->dev,
- "card%d", minor);
+ head->dev_class = drm_sysfs_device_add(drm_class, head);
if (IS_ERR(head->dev_class)) {
printk(KERN_ERR
"DRM: Error sysfs_device_add.\n");
@@ -258,11 +217,10 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
}
if ((ret = drm_get_head(dev, &dev->primary)))
goto err_g1;
-
- /* postinit is a required function to display the signon banner */
- /* drivers add secondary heads here if needed */
- if ((ret = dev->driver->postinit(dev, ent->driver_data)))
- goto err_g1;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, dev->primary.minor);
return 0;
@@ -318,10 +276,9 @@ int drm_put_head(drm_head_t * head)
DRM_DEBUG("release secondary minor %d\n", minor);
drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
- drm_sysfs_device_remove(MKDEV(DRM_MAJOR, head->minor));
+ drm_sysfs_device_remove(head->dev_class);
- *head = (drm_head_t) {
- .dev = NULL};
+ *head = (drm_head_t) {.dev = NULL};
drm_heads[minor] = NULL;
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index 6d344976191..68e43ddc16a 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -15,8 +15,6 @@
#include <linux/device.h>
#include <linux/kdev_t.h>
#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/string.h>
#include "drm_core.h"
#include "drmP.h"
@@ -28,15 +26,11 @@ struct drm_sysfs_class {
#define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class)
struct simple_dev {
- struct list_head node;
dev_t dev;
struct class_device class_dev;
};
#define to_simple_dev(d) container_of(d, struct simple_dev, class_dev)
-static LIST_HEAD(simple_dev_list);
-static DEFINE_SPINLOCK(simple_dev_list_lock);
-
static void release_simple_dev(struct class_device *class_dev)
{
struct simple_dev *s_dev = to_simple_dev(class_dev);
@@ -124,6 +118,18 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs)
class_unregister(&cs->class);
}
+static ssize_t show_dri(struct class_device *class_device, char *buf)
+{
+ drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev;
+ if (dev->driver->dri_library_name)
+ return dev->driver->dri_library_name(dev, buf);
+ return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
+}
+
+static struct class_device_attribute class_device_attrs[] = {
+ __ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
+};
+
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
* @cs: pointer to the struct drm_sysfs_class that this device should be registered to.
@@ -138,13 +144,11 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs)
* Note: the struct drm_sysfs_class passed to this function must have previously been
* created with a call to drm_sysfs_create().
*/
-struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
- struct device *device,
- const char *fmt, ...)
+struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
+ drm_head_t *head)
{
- va_list args;
struct simple_dev *s_dev = NULL;
- int retval;
+ int i, retval;
if ((cs == NULL) || (IS_ERR(cs))) {
retval = -ENODEV;
@@ -158,26 +162,23 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
}
memset(s_dev, 0x00, sizeof(*s_dev));
- s_dev->dev = dev;
- s_dev->class_dev.dev = device;
+ s_dev->dev = MKDEV(DRM_MAJOR, head->minor);
+ s_dev->class_dev.dev = &(head->dev->pdev)->dev;
s_dev->class_dev.class = &cs->class;
- va_start(args, fmt);
- vsnprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, fmt, args);
- va_end(args);
+ snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor);
retval = class_device_register(&s_dev->class_dev);
if (retval)
goto error;
class_device_create_file(&s_dev->class_dev, &cs->attr);
+ class_set_devdata(&s_dev->class_dev, head);
- spin_lock(&simple_dev_list_lock);
- list_add(&s_dev->node, &simple_dev_list);
- spin_unlock(&simple_dev_list_lock);
-
+ for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
+ class_device_create_file(&s_dev->class_dev, &class_device_attrs[i]);
return &s_dev->class_dev;
- error:
+error:
kfree(s_dev);
return ERR_PTR(retval);
}
@@ -189,23 +190,12 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
* This call unregisters and cleans up a class device that was created with a
* call to drm_sysfs_device_add()
*/
-void drm_sysfs_device_remove(dev_t dev)
+void drm_sysfs_device_remove(struct class_device *class_dev)
{
- struct simple_dev *s_dev = NULL;
- int found = 0;
-
- spin_lock(&simple_dev_list_lock);
- list_for_each_entry(s_dev, &simple_dev_list, node) {
- if (s_dev->dev == dev) {
- found = 1;
- break;
- }
- }
- if (found) {
- list_del(&s_dev->node);
- spin_unlock(&simple_dev_list_lock);
- class_device_unregister(&s_dev->class_dev);
- } else {
- spin_unlock(&simple_dev_list_lock);
- }
+ struct simple_dev *s_dev = to_simple_dev(class_dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
+ class_device_remove_file(&s_dev->class_dev, &class_device_attrs[i]);
+ class_device_unregister(&s_dev->class_dev);
}
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index dba502373da..cc1b8908687 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -114,7 +114,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static struct file_operations i810_buffer_fops = {
.open = drm_open,
- .flush = drm_flush,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
@@ -1319,12 +1318,24 @@ static int i810_flip_bufs(struct inode *inode, struct file *filp,
return 0;
}
-void i810_driver_pretakedown(drm_device_t * dev)
+int i810_driver_load(drm_device_t *dev, unsigned long flags)
+{
+ /* i810 has 4 more counters */
+ dev->counters += 4;
+ dev->types[6] = _DRM_STAT_IRQ;
+ dev->types[7] = _DRM_STAT_PRIMARY;
+ dev->types[8] = _DRM_STAT_SECONDARY;
+ dev->types[9] = _DRM_STAT_DMA;
+
+ return 0;
+}
+
+void i810_driver_lastclose(drm_device_t * dev)
{
i810_dma_cleanup(dev);
}
-void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp)
+void i810_driver_preclose(drm_device_t * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1334,7 +1345,7 @@ void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp)
}
}
-void i810_driver_release(drm_device_t * dev, struct file *filp)
+void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
{
i810_reclaim_buffers(dev, filp);
}
@@ -1346,21 +1357,21 @@ int i810_driver_dma_quiescent(drm_device_t * dev)
}
drm_ioctl_desc_t i810_ioctls[] = {
- [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, 1, 1},
- [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, 1, 1},
- [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, 1, 0},
- [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, 1, 0}
+ [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH}
};
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
diff --git a/drivers/char/drm/i810_drv.c b/drivers/char/drm/i810_drv.c
index 070cef6c2b4..dfe6ad2b6a6 100644
--- a/drivers/char/drm/i810_drv.c
+++ b/drivers/char/drm/i810_drv.c
@@ -38,38 +38,6 @@
#include "drm_pciids.h"
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
- /* i810 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
- DRIVER_NAME,
- DRIVER_MAJOR,
- DRIVER_MINOR,
- DRIVER_PATCHLEVEL,
- DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
- );
- return 0;
-}
-
-static int version(drm_version_t * version)
-{
- int len;
-
- version->version_major = DRIVER_MAJOR;
- version->version_minor = DRIVER_MINOR;
- version->version_patchlevel = DRIVER_PATCHLEVEL;
- DRM_COPY(version->name, DRIVER_NAME);
- DRM_COPY(version->date, DRIVER_DATE);
- DRM_COPY(version->desc, DRIVER_DESC);
- return 0;
-}
-
static struct pci_device_id pciidlist[] = {
i810_PCI_IDS
};
@@ -79,16 +47,14 @@ static struct drm_driver driver = {
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
- .pretakedown = i810_driver_pretakedown,
- .prerelease = i810_driver_prerelease,
+ .load = i810_driver_load,
+ .lastclose = i810_driver_lastclose,
+ .preclose = i810_driver_preclose,
.device_is_agp = i810_driver_device_is_agp,
- .release = i810_driver_release,
+ .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
.dma_quiescent = i810_driver_dma_quiescent,
- .reclaim_buffers = i810_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
- .postinit = postinit,
- .version = version,
.ioctls = i810_ioctls,
.fops = {
.owner = THIS_MODULE,
@@ -98,13 +64,19 @@ static struct drm_driver driver = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
- }
- ,
+ },
+
.pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- }
- ,
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ },
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
static int __init i810_init(void)
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index c78f36aaa2f..a18b80d9192 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -116,9 +116,13 @@ typedef struct drm_i810_private {
extern void i810_reclaim_buffers(drm_device_t * dev, struct file *filp);
extern int i810_driver_dma_quiescent(drm_device_t * dev);
-extern void i810_driver_release(drm_device_t * dev, struct file *filp);
-extern void i810_driver_pretakedown(drm_device_t * dev);
-extern void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp);
+extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
+ struct file *filp);
+extern int i810_driver_load(struct drm_device *, unsigned long flags);
+extern void i810_driver_lastclose(drm_device_t * dev);
+extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp);
+extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
+ struct file *filp);
extern int i810_driver_device_is_agp(drm_device_t * dev);
extern drm_ioctl_desc_t i810_ioctls[];
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index dc94f191442..4fea32aed6d 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -116,7 +116,6 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static struct file_operations i830_buffer_fops = {
.open = drm_open,
- .flush = drm_flush,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = i830_mmap_buffers,
@@ -1517,12 +1516,24 @@ static int i830_setparam(struct inode *inode, struct file *filp,
return 0;
}
-void i830_driver_pretakedown(drm_device_t * dev)
+int i830_driver_load(drm_device_t *dev, unsigned long flags)
+{
+ /* i830 has 4 more counters */
+ dev->counters += 4;
+ dev->types[6] = _DRM_STAT_IRQ;
+ dev->types[7] = _DRM_STAT_PRIMARY;
+ dev->types[8] = _DRM_STAT_SECONDARY;
+ dev->types[9] = _DRM_STAT_DMA;
+
+ return 0;
+}
+
+void i830_driver_lastclose(drm_device_t * dev)
{
i830_dma_cleanup(dev);
}
-void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp)
+void i830_driver_preclose(drm_device_t * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1532,7 +1543,7 @@ void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp)
}
}
-void i830_driver_release(drm_device_t * dev, struct file *filp)
+void i830_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
{
i830_reclaim_buffers(dev, filp);
}
@@ -1544,20 +1555,20 @@ int i830_driver_dma_quiescent(drm_device_t * dev)
}
drm_ioctl_desc_t i830_ioctls[] = {
- [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, 1, 1},
- [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, 1, 0},
- [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, 1, 0}
+ [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, DRM_AUTH}
};
int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
diff --git a/drivers/char/drm/i830_drv.c b/drivers/char/drm/i830_drv.c
index acd821e8fe4..722658188f5 100644
--- a/drivers/char/drm/i830_drv.c
+++ b/drivers/char/drm/i830_drv.c
@@ -40,37 +40,6 @@
#include "drm_pciids.h"
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
- DRIVER_NAME,
- DRIVER_MAJOR,
- DRIVER_MINOR,
- DRIVER_PATCHLEVEL,
- DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
- );
- return 0;
-}
-
-static int version(drm_version_t * version)
-{
- int len;
-
- version->version_major = DRIVER_MAJOR;
- version->version_minor = DRIVER_MINOR;
- version->version_patchlevel = DRIVER_PATCHLEVEL;
- DRM_COPY(version->name, DRIVER_NAME);
- DRM_COPY(version->date, DRIVER_DATE);
- DRM_COPY(version->desc, DRIVER_DESC);
- return 0;
-}
-
static struct pci_device_id pciidlist[] = {
i830_PCI_IDS
};
@@ -83,12 +52,12 @@ static struct drm_driver driver = {
.driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
#endif
.dev_priv_size = sizeof(drm_i830_buf_priv_t),
- .pretakedown = i830_driver_pretakedown,
- .prerelease = i830_driver_prerelease,
+ .load = i830_driver_load,
+ .lastclose = i830_driver_lastclose,
+ .preclose = i830_driver_preclose,
.device_is_agp = i830_driver_device_is_agp,
- .release = i830_driver_release,
+ .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
.dma_quiescent = i830_driver_dma_quiescent,
- .reclaim_buffers = i830_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
#if USE_IRQS
@@ -97,8 +66,6 @@ static struct drm_driver driver = {
.irq_uninstall = i830_driver_irq_uninstall,
.irq_handler = i830_driver_irq_handler,
#endif
- .postinit = postinit,
- .version = version,
.ioctls = i830_ioctls,
.fops = {
.owner = THIS_MODULE,
@@ -108,13 +75,19 @@ static struct drm_driver driver = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
- }
- ,
+ },
+
.pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- }
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ },
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
static int __init i830_init(void)
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index bc4bd49fb0c..bf9075b576b 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -136,10 +136,12 @@ extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
extern void i830_driver_irq_preinstall(drm_device_t * dev);
extern void i830_driver_irq_postinstall(drm_device_t * dev);
extern void i830_driver_irq_uninstall(drm_device_t * dev);
-extern void i830_driver_pretakedown(drm_device_t * dev);
-extern void i830_driver_release(drm_device_t * dev, struct file *filp);
+extern int i830_driver_load(struct drm_device *, unsigned long flags);
+extern void i830_driver_preclose(drm_device_t * dev, DRMFILE filp);
+extern void i830_driver_lastclose(drm_device_t * dev);
+extern void i830_driver_reclaim_buffers_locked(drm_device_t * dev,
+ struct file *filp);
extern int i830_driver_dma_quiescent(drm_device_t * dev);
-extern void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp);
extern int i830_driver_device_is_agp(drm_device_t * dev);
#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index f3aa0c37012..9140703da1b 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -1,7 +1,6 @@
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
*/
-/**************************************************************************
- *
+/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
@@ -25,7 +24,7 @@
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- **************************************************************************/
+ */
#include "drmP.h"
#include "drm.h"
@@ -196,7 +195,7 @@ static int i915_initialize(drm_device_t * dev,
return 0;
}
-static int i915_resume(drm_device_t * dev)
+static int i915_dma_resume(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -253,7 +252,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS)
retcode = i915_dma_cleanup(dev);
break;
case I915_RESUME_DMA:
- retcode = i915_resume(dev);
+ retcode = i915_dma_resume(dev);
break;
default:
retcode = -EINVAL;
@@ -654,6 +653,9 @@ static int i915_getparam(DRM_IOCTL_ARGS)
case I915_PARAM_ALLOW_BATCHBUFFER:
value = dev_priv->allow_batchbuffer ? 1 : 0;
break;
+ case I915_PARAM_LAST_DISPATCH:
+ value = READ_BREADCRUMB(dev_priv);
+ break;
default:
DRM_ERROR("Unkown parameter %d\n", param.param);
return DRM_ERR(EINVAL);
@@ -699,7 +701,19 @@ static int i915_setparam(DRM_IOCTL_ARGS)
return 0;
}
-void i915_driver_pretakedown(drm_device_t * dev)
+int i915_driver_load(drm_device_t *dev, unsigned long flags)
+{
+ /* i915 has 4 more counters */
+ dev->counters += 4;
+ dev->types[6] = _DRM_STAT_IRQ;
+ dev->types[7] = _DRM_STAT_PRIMARY;
+ dev->types[8] = _DRM_STAT_SECONDARY;
+ dev->types[9] = _DRM_STAT_DMA;
+
+ return 0;
+}
+
+void i915_driver_lastclose(drm_device_t * dev)
{
if (dev->dev_private) {
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -708,7 +722,7 @@ void i915_driver_pretakedown(drm_device_t * dev)
i915_dma_cleanup(dev);
}
-void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp)
+void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -717,18 +731,18 @@ void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp)
}
drm_ioctl_desc_t i915_ioctls[] = {
- [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, 1, 1},
- [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, 1, 0},
- [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, 1, 0},
- [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, 1, 0},
- [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, 1, 0},
- [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, 1, 0},
- [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, 1, 0},
- [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, 1, 1},
- [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, 1, 0},
- [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, 1, 0},
- [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, 1, 1},
- [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, 1, 0}
+ [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH}
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/char/drm/i915_drm.h b/drivers/char/drm/i915_drm.h
index 23e027d2908..77412ddac00 100644
--- a/drivers/char/drm/i915_drm.h
+++ b/drivers/char/drm/i915_drm.h
@@ -1,5 +1,4 @@
-/**************************************************************************
- *
+/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
@@ -23,7 +22,7 @@
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- **************************************************************************/
+ */
#ifndef _I915_DRM_H_
#define _I915_DRM_H_
@@ -152,6 +151,7 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
+#define I915_PARAM_LAST_DISPATCH 3
typedef struct drm_i915_getparam {
int param;
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index 0508240f4e3..8e2e6095c4b 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -1,6 +1,6 @@
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
*/
-/**************************************************************************
+/*
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
@@ -25,7 +25,7 @@
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- **************************************************************************/
+ */
#include "drmP.h"
#include "drm.h"
@@ -34,48 +34,22 @@
#include "drm_pciids.h"
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
- DRIVER_NAME,
- DRIVER_MAJOR,
- DRIVER_MINOR,
- DRIVER_PATCHLEVEL,
- DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
- );
- return 0;
-}
-
-static int version(drm_version_t * version)
-{
- int len;
-
- version->version_major = DRIVER_MAJOR;
- version->version_minor = DRIVER_MINOR;
- version->version_patchlevel = DRIVER_PATCHLEVEL;
- DRM_COPY(version->name, DRIVER_NAME);
- DRM_COPY(version->date, DRIVER_DATE);
- DRM_COPY(version->desc, DRIVER_DESC);
- return 0;
-}
-
static struct pci_device_id pciidlist[] = {
i915_PCI_IDS
};
static struct drm_driver driver = {
+ /* don't use mtrr's here, the Xserver or user space app should
+ * deal with them for intel hardware.
+ */
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
- .pretakedown = i915_driver_pretakedown,
- .prerelease = i915_driver_prerelease,
+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+ .load = i915_driver_load,
+ .lastclose = i915_driver_lastclose,
+ .preclose = i915_driver_preclose,
.device_is_agp = i915_driver_device_is_agp,
+ .vblank_wait = i915_driver_vblank_wait,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
@@ -83,8 +57,6 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
- .postinit = postinit,
- .version = version,
.ioctls = i915_ioctls,
.fops = {
.owner = THIS_MODULE,
@@ -97,11 +69,19 @@ static struct drm_driver driver = {
#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
#endif
- },
+ },
+
.pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- }
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ },
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
static int __init i915_init(void)
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 17e457c73dc..c6c71b45f10 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -1,6 +1,6 @@
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
*/
-/**************************************************************************
+/*
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
@@ -25,7 +25,7 @@
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- **************************************************************************/
+ */
#ifndef _I915_DRV_H_
#define _I915_DRV_H_
@@ -37,21 +37,18 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20040405"
+#define DRIVER_DATE "20051209"
/* Interface history:
*
* 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 1
+#define DRIVER_MINOR 3
#define DRIVER_PATCHLEVEL 0
-/* We use our own dma mechanisms, not the drm template code. However,
- * the shared IRQ code is useful to us:
- */
-#define __HAVE_PM 1
-
typedef struct _drm_i915_ring_buffer {
int tail_mask;
unsigned long Start;
@@ -97,6 +94,7 @@ typedef struct drm_i915_private {
int tex_lru_log_granularity;
int allow_batchbuffer;
struct mem_block *agp_heap;
+ unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
} drm_i915_private_t;
extern drm_ioctl_desc_t i915_ioctls[];
@@ -104,14 +102,18 @@ extern int i915_max_ioctl;
/* i915_dma.c */
extern void i915_kernel_lost_context(drm_device_t * dev);
-extern void i915_driver_pretakedown(drm_device_t * dev);
-extern void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp);
+extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern void i915_driver_lastclose(drm_device_t * dev);
+extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp);
extern int i915_driver_device_is_agp(drm_device_t * dev);
+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
/* i915_irq.c */
extern int i915_irq_emit(DRM_IOCTL_ARGS);
extern int i915_irq_wait(DRM_IOCTL_ARGS);
+extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(drm_device_t * dev);
extern void i915_driver_irq_postinstall(drm_device_t * dev);
@@ -125,13 +127,10 @@ extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(drm_device_t * dev,
DRMFILE filp, struct mem_block *heap);
-extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
-
-#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
-#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
-#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
-#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
+#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
+#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
+#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
#define I915_VERBOSE 0
@@ -195,6 +194,13 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define PPCR 0x61204
#define PPCR_ON (1<<0)
+#define DVOB 0x61140
+#define DVOB_ON (1<<31)
+#define DVOC 0x61160
+#define DVOC_ON (1<<31)
+#define LVDS 0x61180
+#define LVDS_ON (1<<31)
+
#define ADPA 0x61100
#define ADPA_DPMS_MASK (~(3<<10))
#define ADPA_DPMS_ON (0<<10)
@@ -258,4 +264,6 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+#define READ_BREADCRUMB(dev_priv) (((u32 *)(dev_priv->hw_status_page))[5])
+
#endif
diff --git a/drivers/char/drm/i915_ioc32.c b/drivers/char/drm/i915_ioc32.c
index 2218a946ec8..296248cdc76 100644
--- a/drivers/char/drm/i915_ioc32.c
+++ b/drivers/char/drm/i915_ioc32.c
@@ -30,7 +30,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
-#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index 4fa448ee846..a1381c61aa6 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -1,7 +1,6 @@
-/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
+/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
*/
-/**************************************************************************
- *
+/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
@@ -25,16 +24,18 @@
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- **************************************************************************/
+ */
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
-#define USER_INT_FLAG 0x2
+#define USER_INT_FLAG (1<<1)
+#define VSYNC_PIPEB_FLAG (1<<5)
+#define VSYNC_PIPEA_FLAG (1<<7)
+
#define MAX_NOPID ((u32)~0)
-#define READ_BREADCRUMB(dev_priv) (((u32*)(dev_priv->hw_status_page))[5])
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
@@ -43,7 +44,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
u16 temp;
temp = I915_READ16(I915REG_INT_IDENTITY_R);
- temp &= USER_INT_FLAG;
+ temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG);
DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
@@ -51,7 +52,15 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
- DRM_WAKEUP(&dev_priv->irq_queue);
+
+ if (temp & USER_INT_FLAG)
+ DRM_WAKEUP(&dev_priv->irq_queue);
+
+ if (temp & VSYNC_PIPEA_FLAG) {
+ atomic_inc(&dev->vbl_received);
+ DRM_WAKEUP(&dev->vbl_queue);
+ drm_vbl_send_signals(dev);
+ }
return IRQ_HANDLED;
}
@@ -102,6 +111,27 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
return ret;
}
+int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned int cur_vblank;
+ int ret = 0;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+ (((cur_vblank = atomic_read(&dev->vbl_received))
+ - *sequence) <= (1<<23)));
+
+ *sequence = cur_vblank;
+
+ return ret;
+}
+
+
/* Needs the lock as it touches the ring.
*/
int i915_irq_emit(DRM_IOCTL_ARGS)
@@ -165,7 +195,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG);
+ I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | VSYNC_PIPEA_FLAG);
DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
}
diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c
index 13176d136a9..ba87ff17ff6 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/char/drm/i915_mem.c
@@ -1,7 +1,6 @@
/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
*/
-/**************************************************************************
- *
+/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
@@ -25,7 +24,7 @@
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- **************************************************************************/
+ */
#include "drmP.h"
#include "drm.h"
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index 70dc7f64b7b..c2a4bac1452 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -44,7 +44,9 @@
#define MGA_DEFAULT_USEC_TIMEOUT 10000
#define MGA_FREELIST_DEBUG 0
-static int mga_do_cleanup_dma(drm_device_t * dev);
+#define MINIMAL_CLEANUP 0
+#define FULL_CLEANUP 1
+static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup);
/* ================================================================
* Engine control
@@ -391,7 +393,7 @@ int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
* DMA initialization, cleanup
*/
-int mga_driver_preinit(drm_device_t * dev, unsigned long flags)
+int mga_driver_load(drm_device_t * dev, unsigned long flags)
{
drm_mga_private_t *dev_priv;
@@ -405,6 +407,14 @@ int mga_driver_preinit(drm_device_t * dev, unsigned long flags)
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
dev_priv->chipset = flags;
+ dev_priv->mmio_base = drm_get_resource_start(dev, 1);
+ dev_priv->mmio_size = drm_get_resource_len(dev, 1);
+
+ dev->counters += 3;
+ dev->types[6] = _DRM_STAT_IRQ;
+ dev->types[7] = _DRM_STAT_PRIMARY;
+ dev->types[8] = _DRM_STAT_SECONDARY;
+
return 0;
}
@@ -438,17 +448,19 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
drm_buf_desc_t req;
drm_agp_mode_t mode;
drm_agp_info_t info;
+ drm_agp_buffer_t agp_req;
+ drm_agp_binding_t bind_req;
/* Acquire AGP. */
err = drm_agp_acquire(dev);
if (err) {
- DRM_ERROR("Unable to acquire AGP\n");
+ DRM_ERROR("Unable to acquire AGP: %d\n", err);
return err;
}
err = drm_agp_info(dev, &info);
if (err) {
- DRM_ERROR("Unable to get AGP info\n");
+ DRM_ERROR("Unable to get AGP info: %d\n", err);
return err;
}
@@ -472,18 +484,24 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
}
/* Allocate and bind AGP memory. */
- dev_priv->agp_pages = agp_size / PAGE_SIZE;
- dev_priv->agp_mem = drm_alloc_agp(dev, dev_priv->agp_pages, 0);
- if (dev_priv->agp_mem == NULL) {
- dev_priv->agp_pages = 0;
+ agp_req.size = agp_size;
+ agp_req.type = 0;
+ err = drm_agp_alloc(dev, &agp_req);
+ if (err) {
+ dev_priv->agp_size = 0;
DRM_ERROR("Unable to allocate %uMB AGP memory\n",
dma_bs->agp_size);
- return DRM_ERR(ENOMEM);
+ return err;
}
+
+ dev_priv->agp_size = agp_size;
+ dev_priv->agp_handle = agp_req.handle;
- err = drm_bind_agp(dev_priv->agp_mem, 0);
+ bind_req.handle = agp_req.handle;
+ bind_req.offset = 0;
+ err = drm_agp_bind(dev, &bind_req);
if (err) {
- DRM_ERROR("Unable to bind AGP memory\n");
+ DRM_ERROR("Unable to bind AGP memory: %d\n", err);
return err;
}
@@ -497,7 +515,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
err = drm_addmap(dev, offset, warp_size,
_DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
if (err) {
- DRM_ERROR("Unable to map WARP microcode\n");
+ DRM_ERROR("Unable to map WARP microcode: %d\n", err);
return err;
}
@@ -505,7 +523,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
err = drm_addmap(dev, offset, dma_bs->primary_size,
_DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
if (err) {
- DRM_ERROR("Unable to map primary DMA region\n");
+ DRM_ERROR("Unable to map primary DMA region: %d\n", err);
return err;
}
@@ -513,7 +531,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
err = drm_addmap(dev, offset, secondary_size,
_DRM_AGP, 0, &dev->agp_buffer_map);
if (err) {
- DRM_ERROR("Unable to map secondary DMA region\n");
+ DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
return err;
}
@@ -525,15 +543,29 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
err = drm_addbufs_agp(dev, &req);
if (err) {
- DRM_ERROR("Unable to add secondary DMA buffers\n");
+ DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
return err;
}
+ {
+ drm_map_list_t *_entry;
+ unsigned long agp_token = 0;
+
+ list_for_each_entry(_entry, &dev->maplist->head, head) {
+ if (_entry->map == dev->agp_buffer_map)
+ agp_token = _entry->user_token;
+ }
+ if (!agp_token)
+ return -EFAULT;
+
+ dev->agp_buffer_token = agp_token;
+ }
+
offset += secondary_size;
err = drm_addmap(dev, offset, agp_size - offset,
_DRM_AGP, 0, &dev_priv->agp_textures);
if (err) {
- DRM_ERROR("Unable to map AGP texture region\n");
+ DRM_ERROR("Unable to map AGP texture region %d\n", err);
return err;
}
@@ -603,7 +635,8 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
_DRM_READ_ONLY, &dev_priv->warp);
if (err != 0) {
- DRM_ERROR("Unable to create mapping for WARP microcode\n");
+ DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
+ err);
return err;
}
@@ -622,7 +655,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
}
if (err != 0) {
- DRM_ERROR("Unable to allocate primary DMA region\n");
+ DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
return DRM_ERR(ENOMEM);
}
@@ -646,7 +679,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
}
if (bin_count == 0) {
- DRM_ERROR("Unable to add secondary DMA buffers\n");
+ DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
return err;
}
@@ -682,7 +715,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
_DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
if (err) {
- DRM_ERROR("Unable to map MMIO region\n");
+ DRM_ERROR("Unable to map MMIO region: %d\n", err);
return err;
}
@@ -690,7 +723,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
_DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
&dev_priv->status);
if (err) {
- DRM_ERROR("Unable to map status region\n");
+ DRM_ERROR("Unable to map status region: %d\n", err);
return err;
}
@@ -708,7 +741,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
*/
if (err) {
- mga_do_cleanup_dma(dev);
+ mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
}
/* Not only do we want to try and initialized PCI cards for PCI DMA,
@@ -731,35 +764,32 @@ int mga_dma_bootstrap(DRM_IOCTL_ARGS)
DRM_DEVICE;
drm_mga_dma_bootstrap_t bootstrap;
int err;
+ static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
+ const drm_mga_private_t *const dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
DRM_COPY_FROM_USER_IOCTL(bootstrap,
(drm_mga_dma_bootstrap_t __user *) data,
sizeof(bootstrap));
err = mga_do_dma_bootstrap(dev, &bootstrap);
- if (!err) {
- static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
- const drm_mga_private_t *const dev_priv =
- (drm_mga_private_t *) dev->dev_private;
-
- if (dev_priv->agp_textures != NULL) {
- bootstrap.texture_handle =
- dev_priv->agp_textures->offset;
- bootstrap.texture_size = dev_priv->agp_textures->size;
- } else {
- bootstrap.texture_handle = 0;
- bootstrap.texture_size = 0;
- }
+ if (err) {
+ mga_do_cleanup_dma(dev, FULL_CLEANUP);
+ return err;
+ }
- bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07];
- if (DRM_COPY_TO_USER((void __user *)data, &bootstrap,
- sizeof(bootstrap))) {
- err = DRM_ERR(EFAULT);
- }
+ if (dev_priv->agp_textures != NULL) {
+ bootstrap.texture_handle = dev_priv->agp_textures->offset;
+ bootstrap.texture_size = dev_priv->agp_textures->size;
} else {
- mga_do_cleanup_dma(dev);
+ bootstrap.texture_handle = 0;
+ bootstrap.texture_size = 0;
}
+ bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07];
+ DRM_COPY_TO_USER_IOCTL((drm_mga_dma_bootstrap_t __user *)data,
+ bootstrap, sizeof(bootstrap));
+
return err;
}
@@ -853,13 +883,13 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
ret = mga_warp_install_microcode(dev_priv);
if (ret < 0) {
- DRM_ERROR("failed to install WARP ucode!\n");
+ DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
return ret;
}
ret = mga_warp_init(dev_priv);
if (ret < 0) {
- DRM_ERROR("failed to init WARP engine!\n");
+ DRM_ERROR("failed to init WARP engine!: %d\n", ret);
return ret;
}
@@ -904,7 +934,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
return 0;
}
-static int mga_do_cleanup_dma(drm_device_t * dev)
+static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup)
{
int err = 0;
DRM_DEBUG("\n");
@@ -932,31 +962,39 @@ static int mga_do_cleanup_dma(drm_device_t * dev)
if (dev_priv->used_new_dma_init) {
#if __OS_HAS_AGP
- if (dev_priv->agp_mem != NULL) {
- dev_priv->agp_textures = NULL;
- drm_unbind_agp(dev_priv->agp_mem);
+ if (dev_priv->agp_handle != 0) {
+ drm_agp_binding_t unbind_req;
+ drm_agp_buffer_t free_req;
+
+ unbind_req.handle = dev_priv->agp_handle;
+ drm_agp_unbind(dev, &unbind_req);
- drm_free_agp(dev_priv->agp_mem,
- dev_priv->agp_pages);
- dev_priv->agp_pages = 0;
- dev_priv->agp_mem = NULL;
+ free_req.handle = dev_priv->agp_handle;
+ drm_agp_free(dev, &free_req);
+
+ dev_priv->agp_textures = NULL;
+ dev_priv->agp_size = 0;
+ dev_priv->agp_handle = 0;
}
if ((dev->agp != NULL) && dev->agp->acquired) {
err = drm_agp_release(dev);
}
#endif
- dev_priv->used_new_dma_init = 0;
}
dev_priv->warp = NULL;
dev_priv->primary = NULL;
- dev_priv->mmio = NULL;
- dev_priv->status = NULL;
dev_priv->sarea = NULL;
dev_priv->sarea_priv = NULL;
dev->agp_buffer_map = NULL;
+ if (full_cleanup) {
+ dev_priv->mmio = NULL;
+ dev_priv->status = NULL;
+ dev_priv->used_new_dma_init = 0;
+ }
+
memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
dev_priv->warp_pipe = 0;
memset(dev_priv->warp_pipe_phys, 0,
@@ -967,7 +1005,7 @@ static int mga_do_cleanup_dma(drm_device_t * dev)
}
}
- return err;
+ return 0;
}
int mga_dma_init(DRM_IOCTL_ARGS)
@@ -985,11 +1023,11 @@ int mga_dma_init(DRM_IOCTL_ARGS)
case MGA_INIT_DMA:
err = mga_do_init_dma(dev, &init);
if (err) {
- (void)mga_do_cleanup_dma(dev);
+ (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
return err;
case MGA_CLEANUP_DMA:
- return mga_do_cleanup_dma(dev);
+ return mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
return DRM_ERR(EINVAL);
@@ -1118,7 +1156,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS)
/**
* Called just before the module is unloaded.
*/
-int mga_driver_postcleanup(drm_device_t * dev)
+int mga_driver_unload(drm_device_t * dev)
{
drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
dev->dev_private = NULL;
@@ -1129,9 +1167,9 @@ int mga_driver_postcleanup(drm_device_t * dev)
/**
* Called when the last opener of the device is closed.
*/
-void mga_driver_pretakedown(drm_device_t * dev)
+void mga_driver_lastclose(drm_device_t * dev)
{
- mga_do_cleanup_dma(dev);
+ mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
int mga_driver_dma_quiescent(drm_device_t * dev)
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c
index 1713451a5cc..9f7ed0e0351 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/char/drm/mga_drv.c
@@ -38,41 +38,6 @@
#include "drm_pciids.h"
static int mga_driver_device_is_agp(drm_device_t * dev);
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
- drm_mga_private_t *const dev_priv =
- (drm_mga_private_t *) dev->dev_private;
-
- dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
- dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
-
- dev->counters += 3;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
-
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
- DRIVER_NAME,
- DRIVER_MAJOR,
- DRIVER_MINOR,
- DRIVER_PATCHLEVEL,
- DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
- );
- return 0;
-}
-
-static int version(drm_version_t * version)
-{
- int len;
-
- version->version_major = DRIVER_MAJOR;
- version->version_minor = DRIVER_MINOR;
- version->version_patchlevel = DRIVER_PATCHLEVEL;
- DRM_COPY(version->name, DRIVER_NAME);
- DRM_COPY(version->date, DRIVER_DATE);
- DRM_COPY(version->desc, DRIVER_DESC);
- return 0;
-}
static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
@@ -80,12 +45,12 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
+ DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
- .preinit = mga_driver_preinit,
- .postcleanup = mga_driver_postcleanup,
- .pretakedown = mga_driver_pretakedown,
+ .load = mga_driver_load,
+ .unload = mga_driver_unload,
+ .lastclose = mga_driver_lastclose,
.dma_quiescent = mga_driver_dma_quiescent,
.device_is_agp = mga_driver_device_is_agp,
.vblank_wait = mga_driver_vblank_wait,
@@ -96,8 +61,6 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
- .postinit = postinit,
- .version = version,
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
.fops = {
@@ -113,9 +76,16 @@ static struct drm_driver driver = {
#endif
},
.pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- }
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ },
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
static int __init mga_init(void)
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index 461728e6a58..6b0c5319350 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -38,11 +38,11 @@
#define DRIVER_NAME "mga"
#define DRIVER_DESC "Matrox G200/G400"
-#define DRIVER_DATE "20050607"
+#define DRIVER_DATE "20051102"
#define DRIVER_MAJOR 3
#define DRIVER_MINOR 2
-#define DRIVER_PATCHLEVEL 0
+#define DRIVER_PATCHLEVEL 1
typedef struct drm_mga_primary_buffer {
u8 *start;
@@ -144,22 +144,22 @@ typedef struct drm_mga_private {
drm_local_map_t *primary;
drm_local_map_t *agp_textures;
- DRM_AGP_MEM *agp_mem;
- unsigned int agp_pages;
+ unsigned long agp_handle;
+ unsigned int agp_size;
} drm_mga_private_t;
extern drm_ioctl_desc_t mga_ioctls[];
extern int mga_max_ioctl;
/* mga_dma.c */
-extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags);
extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
extern int mga_dma_init(DRM_IOCTL_ARGS);
extern int mga_dma_flush(DRM_IOCTL_ARGS);
extern int mga_dma_reset(DRM_IOCTL_ARGS);
extern int mga_dma_buffers(DRM_IOCTL_ARGS);
-extern int mga_driver_postcleanup(drm_device_t * dev);
-extern void mga_driver_pretakedown(drm_device_t * dev);
+extern int mga_driver_load(drm_device_t *dev, unsigned long flags);
+extern int mga_driver_unload(drm_device_t * dev);
+extern void mga_driver_lastclose(drm_device_t * dev);
extern int mga_driver_dma_quiescent(drm_device_t * dev);
extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
diff --git a/drivers/char/drm/mga_ioc32.c b/drivers/char/drm/mga_ioc32.c
index 24a9d4e86af..54a18eb2fc0 100644
--- a/drivers/char/drm/mga_ioc32.c
+++ b/drivers/char/drm/mga_ioc32.c
@@ -31,7 +31,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
-#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 47f54b5ae95..2837e669183 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -1127,19 +1127,19 @@ static int mga_wait_fence(DRM_IOCTL_ARGS)
}
drm_ioctl_desc_t mga_ioctls[] = {
- [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1},
- [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0},
- [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0},
- [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0},
- [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0},
- [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0},
- [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0},
- [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0},
- [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0},
- [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0},
- [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0},
- [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0},
- [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1},
+ [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
};
int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index 7452753d4d0..db5a60450e6 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -1,6 +1,7 @@
-/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
+/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
- *
+ */
+/*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -559,7 +560,8 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
if (dev_priv->is_pci) {
#endif
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
- dev_priv->gart_info.addr = dev_priv->gart_info.bus_addr = 0;
+ dev_priv->gart_info.addr = NULL;
+ dev_priv->gart_info.bus_addr = 0;
dev_priv->gart_info.is_pcie = 0;
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
DRM_ERROR("failed to init PCI GART!\n");
@@ -601,15 +603,16 @@ int r128_do_cleanup_cce(drm_device_t * dev)
drm_core_ioremapfree(dev_priv->cce_ring, dev);
if (dev_priv->ring_rptr != NULL)
drm_core_ioremapfree(dev_priv->ring_rptr, dev);
- if (dev->agp_buffer_map != NULL)
+ if (dev->agp_buffer_map != NULL) {
drm_core_ioremapfree(dev->agp_buffer_map, dev);
+ dev->agp_buffer_map = NULL;
+ }
} else
#endif
{
if (dev_priv->gart_info.bus_addr)
if (!drm_ati_pcigart_cleanup(dev,
- &dev_priv->
- gart_info))
+ &dev_priv->gart_info))
DRM_ERROR
("failed to cleanup PCI GART!\n");
}
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
index 5ddc0320241..5d835b006f5 100644
--- a/drivers/char/drm/r128_drm.h
+++ b/drivers/char/drm/r128_drm.h
@@ -1,7 +1,7 @@
/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
- *
- * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ */
+/* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
diff --git a/drivers/char/drm/r128_drv.c b/drivers/char/drm/r128_drv.c
index 1661e735140..e20450ae220 100644
--- a/drivers/char/drm/r128_drv.c
+++ b/drivers/char/drm/r128_drv.c
@@ -37,31 +37,6 @@
#include "drm_pciids.h"
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
- DRIVER_NAME,
- DRIVER_MAJOR,
- DRIVER_MINOR,
- DRIVER_PATCHLEVEL,
- DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
- );
- return 0;
-}
-
-static int version(drm_version_t * version)
-{
- int len;
-
- version->version_major = DRIVER_MAJOR;
- version->version_minor = DRIVER_MINOR;
- version->version_patchlevel = DRIVER_PATCHLEVEL;
- DRM_COPY(version->name, DRIVER_NAME);
- DRM_COPY(version->date, DRIVER_DATE);
- DRM_COPY(version->desc, DRIVER_DESC);
- return 0;
-}
-
static struct pci_device_id pciidlist[] = {
r128_PCI_IDS
};
@@ -72,8 +47,8 @@ static struct drm_driver driver = {
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
- .prerelease = r128_driver_prerelease,
- .pretakedown = r128_driver_pretakedown,
+ .preclose = r128_driver_preclose,
+ .lastclose = r128_driver_lastclose,
.vblank_wait = r128_driver_vblank_wait,
.irq_preinstall = r128_driver_irq_preinstall,
.irq_postinstall = r128_driver_irq_postinstall,
@@ -82,8 +57,6 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
- .postinit = postinit,
- .version = version,
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
.fops = {
@@ -97,12 +70,19 @@ static struct drm_driver driver = {
#ifdef CONFIG_COMPAT
.compat_ioctl = r128_compat_ioctl,
#endif
- }
- ,
+ },
+
.pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- }
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ },
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
static int __init r128_init(void)
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index 5c79e40eb88..94abffb2cca 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -1,7 +1,7 @@
/* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
* Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ */
+/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
@@ -154,8 +154,8 @@ extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
extern void r128_driver_irq_preinstall(drm_device_t * dev);
extern void r128_driver_irq_postinstall(drm_device_t * dev);
extern void r128_driver_irq_uninstall(drm_device_t * dev);
-extern void r128_driver_pretakedown(drm_device_t * dev);
-extern void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp);
+extern void r128_driver_lastclose(drm_device_t * dev);
+extern void r128_driver_preclose(drm_device_t * dev, DRMFILE filp);
extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
diff --git a/drivers/char/drm/r128_ioc32.c b/drivers/char/drm/r128_ioc32.c
index 1e2e367b8b8..9dd6d4116e4 100644
--- a/drivers/char/drm/r128_ioc32.c
+++ b/drivers/char/drm/r128_ioc32.c
@@ -30,7 +30,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
-#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"
diff --git a/drivers/char/drm/r128_irq.c b/drivers/char/drm/r128_irq.c
index 27eb0e31bd3..87f8ca2b068 100644
--- a/drivers/char/drm/r128_irq.c
+++ b/drivers/char/drm/r128_irq.c
@@ -1,5 +1,5 @@
-/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*-
- *
+/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
+/*
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
diff --git a/drivers/char/drm/r128_state.c b/drivers/char/drm/r128_state.c
index 14479cc08a5..caeecc2c36d 100644
--- a/drivers/char/drm/r128_state.c
+++ b/drivers/char/drm/r128_state.c
@@ -1,7 +1,7 @@
/* r128_state.c -- State support for r128 -*- linux-c -*-
* Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ */
+/* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -1674,7 +1674,7 @@ static int r128_getparam(DRM_IOCTL_ARGS)
return 0;
}
-void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp)
+void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1684,29 +1684,29 @@ void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp)
}
}
-void r128_driver_pretakedown(drm_device_t * dev)
+void r128_driver_lastclose(drm_device_t * dev)
{
r128_do_cleanup_cce(dev);
}
drm_ioctl_desc_t r128_ioctls[] = {
- [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, 1, 1},
- [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, 1, 1},
- [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, 1, 1},
- [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, 1, 1},
- [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, 1, 0},
- [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, 1, 0},
- [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, 1, 0},
- [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, 1, 0},
- [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, 1, 0},
- [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, 1, 0},
- [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, 1, 0},
- [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, 1, 0},
- [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, 1, 0},
- [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, 1, 0},
- [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, 1, 0},
- [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, 1, 1},
- [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, 1, 0},
+ [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH},
};
int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
index 3a1ac5f78b4..291dbf4c818 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -52,8 +52,8 @@ static const int r300_cliprect_cntl[4] = {
* Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
* buffer, starting with index n.
*/
-static int r300_emit_cliprects(drm_radeon_private_t * dev_priv,
- drm_radeon_kcmd_buffer_t * cmdbuf, int n)
+static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf, int n)
{
drm_clip_rect_t box;
int nr;
@@ -216,6 +216,7 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_TX_UNK1_0, 16);
ADD_RANGE(R300_TX_SIZE_0, 16);
ADD_RANGE(R300_TX_FORMAT_0, 16);
+ ADD_RANGE(R300_TX_PITCH_0, 16);
/* Texture offset is dangerous and needs more checking */
ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
ADD_RANGE(R300_TX_UNK4_0, 16);
@@ -242,7 +243,7 @@ static __inline__ int r300_check_range(unsigned reg, int count)
/* we expect offsets passed to the framebuffer to be either within video memory or
within AGP space */
-static __inline__ int r300_check_offset(drm_radeon_private_t * dev_priv,
+static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv,
u32 offset)
{
/* we realy want to check against end of video aperture
@@ -317,8 +318,8 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
*
* Note that checks are performed on contents and addresses of the registers
*/
-static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv,
- drm_radeon_kcmd_buffer_t * cmdbuf,
+static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
int reg;
@@ -363,8 +364,8 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv,
* the graphics card.
* Called by r300_do_cp_cmdbuf.
*/
-static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv,
- drm_radeon_kcmd_buffer_t * cmdbuf,
+static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
int sz;
@@ -400,8 +401,8 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv,
* Emit a clear packet from userspace.
* Called by r300_emit_packet3.
*/
-static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv,
- drm_radeon_kcmd_buffer_t * cmdbuf)
+static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
{
RING_LOCALS;
@@ -421,8 +422,8 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv,
return 0;
}
-static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv,
- drm_radeon_kcmd_buffer_t * cmdbuf,
+static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
u32 header)
{
int count, i, k;
@@ -489,8 +490,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv,
return 0;
}
-static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv,
- drm_radeon_kcmd_buffer_t * cmdbuf)
+static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
{
u32 header;
int count;
@@ -554,8 +555,8 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv,
* Emit a rendering packet3 from userspace.
* Called by r300_do_cp_cmdbuf.
*/
-static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv,
- drm_radeon_kcmd_buffer_t * cmdbuf,
+static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
{
int n;
@@ -623,7 +624,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv,
/**
* Emit the sequence to pacify R300.
*/
-static __inline__ void r300_pacify(drm_radeon_private_t * dev_priv)
+static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
{
RING_LOCALS;
@@ -657,9 +658,10 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
* commands on the DMA ring buffer.
* Called by the ioctl handler function radeon_cp_cmdbuf.
*/
-int r300_do_cp_cmdbuf(drm_device_t * dev,
+int r300_do_cp_cmdbuf(drm_device_t *dev,
DRMFILE filp,
- drm_file_t * filp_priv, drm_radeon_kcmd_buffer_t * cmdbuf)
+ drm_file_t *filp_priv,
+ drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index e5b73c00239..a0ed20e2522 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -797,6 +797,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
# define R300_TX_FORMAT_YUV_MODE 0x00800000
+#define R300_TX_PITCH_0 0x4500
#define R300_TX_OFFSET_0 0x4540
/* BEGIN: Guess from R200 */
# define R300_TXO_ENDIAN_NO_SWAP (0 << 0)
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 342302d4674..915665c7fe7 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -1,5 +1,5 @@
-/* radeon_cp.c -- CP support for Radeon -*- linux-c -*-
- *
+/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
+/*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
@@ -824,7 +824,7 @@ static int RADEON_READ_PLL(drm_device_t * dev, int addr)
return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
}
-static int RADEON_READ_PCIE(drm_radeon_private_t * dev_priv, int addr)
+static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
{
RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
return RADEON_READ(RADEON_PCIE_DATA);
@@ -1125,7 +1125,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
| (dev_priv->fb_location >> 16));
#if __OS_HAS_AGP
- if (!dev_priv->is_pci) {
+ if (dev_priv->flags & CHIP_IS_AGP) {
RADEON_WRITE(RADEON_MC_AGP_LOCATION,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
@@ -1152,7 +1152,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
dev_priv->ring.tail = cur_read_ptr;
#if __OS_HAS_AGP
- if (!dev_priv->is_pci) {
+ if (dev_priv->flags & CHIP_IS_AGP) {
/* set RADEON_AGP_BASE here instead of relying on X from user space */
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
@@ -1278,13 +1278,15 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
/* Enable or disable PCI GART on the chip */
static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
{
- u32 tmp = RADEON_READ(RADEON_AIC_CNTL);
+ u32 tmp;
if (dev_priv->flags & CHIP_IS_PCIE) {
radeon_set_pciegart(dev_priv, on);
return;
}
+ tmp = RADEON_READ(RADEON_AIC_CNTL);
+
if (on) {
RADEON_WRITE(RADEON_AIC_CNTL,
tmp | RADEON_PCIGART_TRANSLATE_EN);
@@ -1312,13 +1314,17 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
+
DRM_DEBUG("\n");
- dev_priv->is_pci = init->is_pci;
+ if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP))
+ {
+ DRM_DEBUG("Forcing AGP card to PCI mode\n");
+ dev_priv->flags &= ~CHIP_IS_AGP;
+ }
- if (dev_priv->is_pci && !dev->sg) {
+ if ((!(dev_priv->flags & CHIP_IS_AGP)) && !dev->sg) {
DRM_ERROR("PCI GART memory not allocated!\n");
- dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@@ -1327,12 +1333,11 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if (dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
DRM_DEBUG("TIMEOUT problem!\n");
- dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
- switch (init->func) {
+ switch(init->func) {
case RADEON_INIT_R200_CP:
dev_priv->microcode_version = UCODE_R200;
break;
@@ -1353,7 +1358,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
(init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
- dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@@ -1416,8 +1420,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
DRM_GETSAREA();
- dev_priv->fb_offset = init->fb_offset;
- dev_priv->mmio_offset = init->mmio_offset;
dev_priv->ring_offset = init->ring_offset;
dev_priv->ring_rptr_offset = init->ring_rptr_offset;
dev_priv->buffers_offset = init->buffers_offset;
@@ -1425,29 +1427,19 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
- dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
- dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio) {
- DRM_ERROR("could not find mmio region!\n");
- dev->dev_private = (void *)dev_priv;
- radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
- }
dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
if (!dev_priv->cp_ring) {
DRM_ERROR("could not find cp ring region!\n");
- dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
if (!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
- dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@@ -1455,7 +1447,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("could not find dma buffer region!\n");
- dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@@ -1465,7 +1456,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
drm_core_findmap(dev, init->gart_textures_offset);
if (!dev_priv->gart_textures) {
DRM_ERROR("could not find GART texture region!\n");
- dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@@ -1476,7 +1466,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
init->sarea_priv_offset);
#if __OS_HAS_AGP
- if (!dev_priv->is_pci) {
+ if (dev_priv->flags & CHIP_IS_AGP) {
drm_core_ioremap(dev_priv->cp_ring, dev);
drm_core_ioremap(dev_priv->ring_rptr, dev);
drm_core_ioremap(dev->agp_buffer_map, dev);
@@ -1484,7 +1474,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
!dev_priv->ring_rptr->handle ||
!dev->agp_buffer_map->handle) {
DRM_ERROR("could not find ioremap agp regions!\n");
- dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL);
}
@@ -1525,7 +1514,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
+ RADEON_READ(RADEON_CONFIG_APER_SIZE);
#if __OS_HAS_AGP
- if (!dev_priv->is_pci)
+ if (dev_priv->flags & CHIP_IS_AGP)
dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
- dev->agp->base
+ dev_priv->gart_vm_start);
@@ -1551,7 +1540,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
#if __OS_HAS_AGP
- if (!dev_priv->is_pci) {
+ if (dev_priv->flags & CHIP_IS_AGP) {
/* Turn off PCI GART */
radeon_set_pcigart(dev_priv, 0);
} else
@@ -1561,25 +1550,28 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if (dev_priv->pcigart_offset) {
dev_priv->gart_info.bus_addr =
dev_priv->pcigart_offset + dev_priv->fb_location;
+ dev_priv->gart_info.mapping.offset =
+ dev_priv->gart_info.bus_addr;
+ dev_priv->gart_info.mapping.size =
+ RADEON_PCIGART_TABLE_SIZE;
+
+ drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr =
- (unsigned long)drm_ioremap(dev_priv->gart_info.
- bus_addr,
- RADEON_PCIGART_TABLE_SIZE,
- dev);
+ dev_priv->gart_info.mapping.handle;
dev_priv->gart_info.is_pcie =
!!(dev_priv->flags & CHIP_IS_PCIE);
dev_priv->gart_info.gart_table_location =
DRM_ATI_GART_FB;
- DRM_DEBUG("Setting phys_pci_gart to %08lX %08lX\n",
+ DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
dev_priv->gart_info.addr,
dev_priv->pcigart_offset);
} else {
dev_priv->gart_info.gart_table_location =
DRM_ATI_GART_MAIN;
- dev_priv->gart_info.addr =
- dev_priv->gart_info.bus_addr = 0;
+ dev_priv->gart_info.addr = NULL;
+ dev_priv->gart_info.bus_addr = 0;
if (dev_priv->flags & CHIP_IS_PCIE) {
DRM_ERROR
("Cannot use PCI Express without GART in FB memory\n");
@@ -1590,7 +1582,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
DRM_ERROR("failed to init PCI GART!\n");
- dev->dev_private = (void *)dev_priv;
radeon_do_cleanup_cp(dev);
return DRM_ERR(ENOMEM);
}
@@ -1604,8 +1595,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->last_buf = 0;
- dev->dev_private = (void *)dev_priv;
-
radeon_do_engine_reset(dev);
return 0;
@@ -1624,11 +1613,15 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
drm_irq_uninstall(dev);
#if __OS_HAS_AGP
- if (!dev_priv->is_pci) {
- if (dev_priv->cp_ring != NULL)
+ if (dev_priv->flags & CHIP_IS_AGP) {
+ if (dev_priv->cp_ring != NULL) {
drm_core_ioremapfree(dev_priv->cp_ring, dev);
- if (dev_priv->ring_rptr != NULL)
+ dev_priv->cp_ring = NULL;
+ }
+ if (dev_priv->ring_rptr != NULL) {
drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+ dev_priv->ring_rptr = NULL;
+ }
if (dev->agp_buffer_map != NULL) {
drm_core_ioremapfree(dev->agp_buffer_map, dev);
dev->agp_buffer_map = NULL;
@@ -1636,17 +1629,20 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
} else
#endif
{
- if (dev_priv->gart_info.bus_addr)
+
+ if (dev_priv->gart_info.bus_addr) {
+ /* Turn off PCI GART */
+ radeon_set_pcigart(dev_priv, 0);
if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
DRM_ERROR("failed to cleanup PCI GART!\n");
+ }
- if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) {
- drm_ioremapfree((void *)dev_priv->gart_info.addr,
- RADEON_PCIGART_TABLE_SIZE, dev);
+ if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
+ {
+ drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
dev_priv->gart_info.addr = 0;
}
}
-
/* only clear to the start of flags */
memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
@@ -1672,7 +1668,7 @@ static int radeon_do_resume_cp(drm_device_t * dev)
DRM_DEBUG("Starting radeon_do_resume_cp()\n");
#if __OS_HAS_AGP
- if (!dev_priv->is_pci) {
+ if (dev_priv->flags & CHIP_IS_AGP) {
/* Turn off PCI GART */
radeon_set_pcigart(dev_priv, 0);
} else
@@ -2103,7 +2099,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS)
return ret;
}
-int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
+int radeon_driver_load(struct drm_device *dev, unsigned long flags)
{
drm_radeon_private_t *dev_priv;
int ret = 0;
@@ -2136,11 +2132,14 @@ int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
dev_priv->flags |= CHIP_IS_PCIE;
DRM_DEBUG("%s card detected\n",
- ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
+ ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI"))));
return ret;
}
-int radeon_presetup(struct drm_device *dev)
+/* Create mappings for registers and framebuffer so userland doesn't necessarily
+ * have to find them.
+ */
+int radeon_driver_firstopen(struct drm_device *dev)
{
int ret;
drm_local_map_t *map;
@@ -2161,12 +2160,11 @@ int radeon_presetup(struct drm_device *dev)
return 0;
}
-int radeon_driver_postcleanup(struct drm_device *dev)
+int radeon_driver_unload(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
-
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
dev->dev_private = NULL;
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index 1cd81a671a3..9c177a6b2a4 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -624,6 +624,11 @@ typedef struct drm_radeon_indirect {
int discard;
} drm_radeon_indirect_t;
+/* enum for card type parameters */
+#define RADEON_CARD_PCI 0
+#define RADEON_CARD_AGP 1
+#define RADEON_CARD_PCIE 2
+
/* 1.3: An ioctl to get parameters that aren't available to the 3d
* client any other way.
*/
@@ -640,6 +645,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_SAREA_HANDLE 9
#define RADEON_PARAM_GART_TEX_HANDLE 10
#define RADEON_PARAM_SCRATCH_OFFSET 11
+#define RADEON_PARAM_CARD_TYPE 12
typedef struct drm_radeon_getparam {
int param;
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index ee49670d816..b04ed1b562b 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -42,29 +42,15 @@ int radeon_no_wb;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n");
module_param_named(no_wb, radeon_no_wb, int, 0444);
-static int postinit(struct drm_device *dev, unsigned long flags)
+static int dri_library_name(struct drm_device *dev, char *buf)
{
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
- DRIVER_NAME,
- DRIVER_MAJOR,
- DRIVER_MINOR,
- DRIVER_PATCHLEVEL,
- DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
- );
- return 0;
-}
-
-static int version(drm_version_t * version)
-{
- int len;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ int family = dev_priv->flags & CHIP_FAMILY_MASK;
- version->version_major = DRIVER_MAJOR;
- version->version_minor = DRIVER_MINOR;
- version->version_patchlevel = DRIVER_PATCHLEVEL;
- DRM_COPY(version->name, DRIVER_NAME);
- DRM_COPY(version->date, DRIVER_DATE);
- DRM_COPY(version->desc, DRIVER_DESC);
- return 0;
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (family < CHIP_R200) ? "radeon" :
+ ((family < CHIP_R300) ? "r200" :
+ "r300"));
}
static struct pci_device_id pciidlist[] = {
@@ -77,23 +63,22 @@ static struct drm_driver driver = {
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
- .preinit = radeon_driver_preinit,
- .presetup = radeon_presetup,
- .postcleanup = radeon_driver_postcleanup,
- .prerelease = radeon_driver_prerelease,
- .pretakedown = radeon_driver_pretakedown,
- .open_helper = radeon_driver_open_helper,
+ .load = radeon_driver_load,
+ .firstopen = radeon_driver_firstopen,
+ .open = radeon_driver_open,
+ .preclose = radeon_driver_preclose,
+ .postclose = radeon_driver_postclose,
+ .lastclose = radeon_driver_lastclose,
+ .unload = radeon_driver_unload,
.vblank_wait = radeon_driver_vblank_wait,
+ .dri_library_name = dri_library_name,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,
.irq_uninstall = radeon_driver_irq_uninstall,
.irq_handler = radeon_driver_irq_handler,
- .free_filp_priv = radeon_driver_free_filp_priv,
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
- .postinit = postinit,
- .version = version,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
.fops = {
@@ -107,12 +92,19 @@ static struct drm_driver driver = {
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_compat_ioctl,
#endif
- }
- ,
+ },
+
.pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- }
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ },
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
static int __init radeon_init(void)
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index d92ccee3e54..498b19b1d64 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -38,7 +38,7 @@
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
-#define DRIVER_DATE "20050911"
+#define DRIVER_DATE "20051229"
/* Interface history:
*
@@ -73,7 +73,7 @@
* 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
* and GL_EXT_blend_[func|equation]_separate on r200
* 1.12- Add R300 CP microcode support - this just loads the CP on r300
- * (No 3D support yet - just microcode loading)
+ * (No 3D support yet - just microcode loading).
* 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
* - Add hyperz support, add hyperz flags to clear ioctl.
* 1.14- Add support for color tiling
@@ -88,14 +88,13 @@
* R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
* (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
* 1.19- Add support for gart table in FB memory and PCIE r300
+ * 1.20- Add support for r300 texrect
+ * 1.21- Add support for card type getparam
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 19
+#define DRIVER_MINOR 21
#define DRIVER_PATCHLEVEL 0
-#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
-#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
-
/*
* Radeon chip families
*/
@@ -103,8 +102,8 @@ enum radeon_family {
CHIP_R100,
CHIP_RS100,
CHIP_RV100,
- CHIP_R200,
CHIP_RV200,
+ CHIP_R200,
CHIP_RS200,
CHIP_R250,
CHIP_RS250,
@@ -138,6 +137,9 @@ enum radeon_chip_flags {
CHIP_IS_PCIE = 0x00200000UL,
};
+#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
+#define SET_RING_HEAD(dev_priv,val) DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
+
typedef struct drm_radeon_freelist {
unsigned int age;
drm_buf_t *buf;
@@ -245,8 +247,6 @@ typedef struct drm_radeon_private {
drm_radeon_depth_clear_t depth_clear;
- unsigned long fb_offset;
- unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
@@ -273,7 +273,6 @@ typedef struct drm_radeon_private {
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
- int is_pci;
} drm_radeon_private_t;
typedef struct drm_radeon_buf_priv {
@@ -330,17 +329,14 @@ extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
extern void radeon_driver_irq_preinstall(drm_device_t * dev);
extern void radeon_driver_irq_postinstall(drm_device_t * dev);
extern void radeon_driver_irq_uninstall(drm_device_t * dev);
-extern void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp);
-extern void radeon_driver_pretakedown(drm_device_t * dev);
-extern int radeon_driver_open_helper(drm_device_t * dev,
- drm_file_t * filp_priv);
-extern void radeon_driver_free_filp_priv(drm_device_t * dev,
- drm_file_t * filp_priv);
-
-extern int radeon_preinit(struct drm_device *dev, unsigned long flags);
-extern int radeon_postinit(struct drm_device *dev, unsigned long flags);
-extern int radeon_postcleanup(struct drm_device *dev);
+extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
+extern int radeon_driver_unload(struct drm_device *dev);
+extern int radeon_driver_firstopen(struct drm_device *dev);
+extern void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp);
+extern void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp);
+extern void radeon_driver_lastclose(drm_device_t * dev);
+extern int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv);
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@@ -364,6 +360,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
*/
#define RADEON_AGP_COMMAND 0x0f60
+#define RADEON_AGP_COMMAND_PCI_CONFIG 0x0060 /* offset in PCI config */
+# define RADEON_AGP_ENABLE (1<<8)
#define RADEON_AUX_SCISSOR_CNTL 0x26f0
# define RADEON_EXCLUSIVE_SCISSOR_0 (1 << 24)
# define RADEON_EXCLUSIVE_SCISSOR_1 (1 << 25)
@@ -651,6 +649,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
#define RADEON_WAIT_UNTIL 0x1720
# define RADEON_WAIT_CRTC_PFLIP (1 << 0)
+# define RADEON_WAIT_2D_IDLE (1 << 14)
+# define RADEON_WAIT_3D_IDLE (1 << 15)
# define RADEON_WAIT_2D_IDLECLEAN (1 << 16)
# define RADEON_WAIT_3D_IDLECLEAN (1 << 17)
# define RADEON_WAIT_HOST_IDLECLEAN (1 << 18)
@@ -1105,7 +1105,6 @@ do { \
write = 0; \
_tab += _i; \
} \
- \
while (_size > 0) { \
*(ring + write) = *_tab++; \
write++; \
diff --git a/drivers/char/drm/radeon_ioc32.c b/drivers/char/drm/radeon_ioc32.c
index fef4a2b84c1..0ccfd3618ff 100644
--- a/drivers/char/drm/radeon_ioc32.c
+++ b/drivers/char/drm/radeon_ioc32.c
@@ -28,7 +28,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
-#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 231ac1438c6..7bc27516d42 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -1,5 +1,5 @@
-/* radeon_state.c -- State support for Radeon -*- linux-c -*-
- *
+/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
+/*
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* All Rights Reserved.
*
@@ -72,10 +72,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_MISC:
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
- &data[(RADEON_RB3D_DEPTHOFFSET
- -
- RADEON_PP_MISC) /
- 4])) {
+ &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
DRM_ERROR("Invalid depth buffer offset\n");
return DRM_ERR(EINVAL);
}
@@ -83,10 +80,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_CNTL:
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
- &data[(RADEON_RB3D_COLOROFFSET
- -
- RADEON_PP_CNTL) /
- 4])) {
+ &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
DRM_ERROR("Invalid colour buffer offset\n");
return DRM_ERR(EINVAL);
}
@@ -109,10 +103,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_TXFILTER_1:
case RADEON_EMIT_PP_TXFILTER_2:
if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
- &data[(RADEON_PP_TXOFFSET_0
- -
- RADEON_PP_TXFILTER_0) /
- 4])) {
+ &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
DRM_ERROR("Invalid R100 texture offset\n");
return DRM_ERR(EINVAL);
}
@@ -126,8 +117,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case R200_EMIT_PP_CUBIC_OFFSETS_5:{
int i;
for (i = 0; i < 5; i++) {
- if (radeon_check_and_fixup_offset
- (dev_priv, filp_priv, &data[i])) {
+ if (radeon_check_and_fixup_offset(dev_priv,
+ filp_priv,
+ &data[i])) {
DRM_ERROR
("Invalid R200 cubic texture offset\n");
return DRM_ERR(EINVAL);
@@ -239,8 +231,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
dev_priv,
- drm_file_t * filp_priv,
- drm_radeon_kcmd_buffer_t *cmdbuf,
+ drm_file_t *filp_priv,
+ drm_radeon_kcmd_buffer_t *
+ cmdbuf,
unsigned int *cmdsz)
{
u32 *cmd = (u32 *) cmdbuf->buf;
@@ -555,7 +548,8 @@ static struct {
{R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
{R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
{R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
- {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
+ {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
+ "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
{R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
{R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
{R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
@@ -569,7 +563,7 @@ static struct {
{R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
"R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
{R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
- {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
+ {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
{R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
{R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
{R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
@@ -592,7 +586,7 @@ static struct {
{RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
{RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
{R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
- {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
+ {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
{R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
{R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
{R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
@@ -985,8 +979,8 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
* rendering a quad into just those buffers. Thus, we have to
* make sure the 3D engine is configured correctly.
*/
- if ((dev_priv->microcode_version == UCODE_R200) &&
- (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+ else if ((dev_priv->microcode_version == UCODE_R200) &&
+ (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
int tempPP_CNTL;
int tempRE_CNTL;
@@ -1637,6 +1631,14 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
(u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
dwords = size / 4;
+#define RADEON_COPY_MT(_buf, _data, _width) \
+ do { \
+ if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
+ DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
+ return DRM_ERR(EFAULT); \
+ } \
+ } while(0)
+
if (microtile) {
/* texture micro tiling in use, minimum texture width is thus 16 bytes.
however, we cannot use blitter directly for texture width < 64 bytes,
@@ -1648,46 +1650,19 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
from user space. */
if (tex->height == 1) {
if (tex_width >= 64 || tex_width <= 16) {
- if (DRM_COPY_FROM_USER(buffer, data,
- tex_width *
- sizeof(u32))) {
- DRM_ERROR
- ("EFAULT on pad, %d bytes\n",
- tex_width);
- return DRM_ERR(EFAULT);
- }
+ RADEON_COPY_MT(buffer, data,
+ (int)(tex_width * sizeof(u32)));
} else if (tex_width == 32) {
- if (DRM_COPY_FROM_USER
- (buffer, data, 16)) {
- DRM_ERROR
- ("EFAULT on pad, %d bytes\n",
- tex_width);
- return DRM_ERR(EFAULT);
- }
- if (DRM_COPY_FROM_USER
- (buffer + 8, data + 16, 16)) {
- DRM_ERROR
- ("EFAULT on pad, %d bytes\n",
- tex_width);
- return DRM_ERR(EFAULT);
- }
+ RADEON_COPY_MT(buffer, data, 16);
+ RADEON_COPY_MT(buffer + 8,
+ data + 16, 16);
}
} else if (tex_width >= 64 || tex_width == 16) {
- if (DRM_COPY_FROM_USER(buffer, data,
- dwords * sizeof(u32))) {
- DRM_ERROR("EFAULT on data, %d dwords\n",
- dwords);
- return DRM_ERR(EFAULT);
- }
+ RADEON_COPY_MT(buffer, data,
+ (int)(dwords * sizeof(u32)));
} else if (tex_width < 16) {
for (i = 0; i < tex->height; i++) {
- if (DRM_COPY_FROM_USER
- (buffer, data, tex_width)) {
- DRM_ERROR
- ("EFAULT on pad, %d bytes\n",
- tex_width);
- return DRM_ERR(EFAULT);
- }
+ RADEON_COPY_MT(buffer, data, tex_width);
buffer += 4;
data += tex_width;
}
@@ -1695,37 +1670,13 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
/* TODO: make sure this works when not fitting in one buffer
(i.e. 32bytes x 2048...) */
for (i = 0; i < tex->height; i += 2) {
- if (DRM_COPY_FROM_USER
- (buffer, data, 16)) {
- DRM_ERROR
- ("EFAULT on pad, %d bytes\n",
- tex_width);
- return DRM_ERR(EFAULT);
- }
+ RADEON_COPY_MT(buffer, data, 16);
data += 16;
- if (DRM_COPY_FROM_USER
- (buffer + 8, data, 16)) {
- DRM_ERROR
- ("EFAULT on pad, %d bytes\n",
- tex_width);
- return DRM_ERR(EFAULT);
- }
+ RADEON_COPY_MT(buffer + 8, data, 16);
data += 16;
- if (DRM_COPY_FROM_USER
- (buffer + 4, data, 16)) {
- DRM_ERROR
- ("EFAULT on pad, %d bytes\n",
- tex_width);
- return DRM_ERR(EFAULT);
- }
+ RADEON_COPY_MT(buffer + 4, data, 16);
data += 16;
- if (DRM_COPY_FROM_USER
- (buffer + 12, data, 16)) {
- DRM_ERROR
- ("EFAULT on pad, %d bytes\n",
- tex_width);
- return DRM_ERR(EFAULT);
- }
+ RADEON_COPY_MT(buffer + 12, data, 16);
data += 16;
buffer += 16;
}
@@ -1735,31 +1686,22 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
/* Texture image width is larger than the minimum, so we
* can upload it directly.
*/
- if (DRM_COPY_FROM_USER(buffer, data,
- dwords * sizeof(u32))) {
- DRM_ERROR("EFAULT on data, %d dwords\n",
- dwords);
- return DRM_ERR(EFAULT);
- }
+ RADEON_COPY_MT(buffer, data,
+ (int)(dwords * sizeof(u32)));
} else {
/* Texture image width is less than the minimum, so we
* need to pad out each image scanline to the minimum
* width.
*/
for (i = 0; i < tex->height; i++) {
- if (DRM_COPY_FROM_USER
- (buffer, data, tex_width)) {
- DRM_ERROR
- ("EFAULT on pad, %d bytes\n",
- tex_width);
- return DRM_ERR(EFAULT);
- }
+ RADEON_COPY_MT(buffer, data, tex_width);
buffer += 8;
data += tex_width;
}
}
}
+#undef RADEON_COPY_MT
buf->filp = filp;
buf->used = size;
offset = dev_priv->gart_buffers_offset + buf->offset;
@@ -1821,7 +1763,7 @@ static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
}
static void radeon_apply_surface_regs(int surf_index,
- drm_radeon_private_t * dev_priv)
+ drm_radeon_private_t *dev_priv)
{
if (!dev_priv->mmio)
return;
@@ -1847,8 +1789,8 @@ static void radeon_apply_surface_regs(int surf_index,
* freed, we suddenly need two surfaces to store A and C, which might
* not always be available.
*/
-static int alloc_surface(drm_radeon_surface_alloc_t * new,
- drm_radeon_private_t * dev_priv, DRMFILE filp)
+static int alloc_surface(drm_radeon_surface_alloc_t *new,
+ drm_radeon_private_t *dev_priv, DRMFILE filp)
{
struct radeon_virt_surface *s;
int i;
@@ -2158,6 +2100,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
LOCK_TEST_WITH_RETURN(dev, filp);
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return DRM_ERR(EINVAL);
+ }
+
DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
@@ -2596,9 +2543,9 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
return 0;
}
-static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
+static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
drm_radeon_cmd_header_t header,
- drm_radeon_kcmd_buffer_t * cmdbuf)
+ drm_radeon_kcmd_buffer_t *cmdbuf)
{
int sz = header.scalars.count;
int start = header.scalars.offset;
@@ -2618,9 +2565,9 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
/* God this is ugly
*/
-static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
+static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
drm_radeon_cmd_header_t header,
- drm_radeon_kcmd_buffer_t * cmdbuf)
+ drm_radeon_kcmd_buffer_t *cmdbuf)
{
int sz = header.scalars.count;
int start = ((unsigned int)header.scalars.offset) + 0x100;
@@ -2638,9 +2585,9 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
return 0;
}
-static __inline__ int radeon_emit_vectors(drm_radeon_private_t * dev_priv,
+static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
drm_radeon_cmd_header_t header,
- drm_radeon_kcmd_buffer_t * cmdbuf)
+ drm_radeon_kcmd_buffer_t *cmdbuf)
{
int sz = header.vectors.count;
int start = header.vectors.offset;
@@ -2685,8 +2632,8 @@ static int radeon_emit_packet3(drm_device_t * dev,
return 0;
}
-static int radeon_emit_packet3_cliprect(drm_device_t * dev,
- drm_file_t * filp_priv,
+static int radeon_emit_packet3_cliprect(drm_device_t *dev,
+ drm_file_t *filp_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
int orig_nbox)
{
@@ -2818,7 +2765,8 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
if (kbuf == NULL)
return DRM_ERR(ENOMEM);
- if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, cmdbuf.bufsz)) {
+ if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf,
+ cmdbuf.bufsz)) {
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
return DRM_ERR(EFAULT);
}
@@ -2981,7 +2929,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
value = dev_priv->gart_vm_start;
break;
case RADEON_PARAM_REGISTER_HANDLE:
- value = dev_priv->mmio_offset;
+ value = dev_priv->mmio->offset;
break;
case RADEON_PARAM_STATUS_HANDLE:
value = dev_priv->ring_rptr_offset;
@@ -3004,6 +2952,15 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
case RADEON_PARAM_GART_TEX_HANDLE:
value = dev_priv->gart_textures_offset;
break;
+
+ case RADEON_PARAM_CARD_TYPE:
+ if (dev_priv->flags & CHIP_IS_PCIE)
+ value = RADEON_CARD_PCIE;
+ else if (dev_priv->flags & CHIP_IS_AGP)
+ value = RADEON_CARD_AGP;
+ else
+ value = RADEON_CARD_PCI;
+ break;
default:
return DRM_ERR(EINVAL);
}
@@ -3066,10 +3023,11 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
/* When a client dies:
* - Check for and clean up flipped page state
* - Free any alloced GART memory.
+ * - Free any alloced radeon surfaces.
*
* DRM infrastructure takes care of reclaiming dma buffers.
*/
-void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp)
+void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
{
if (dev->dev_private) {
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -3082,16 +3040,17 @@ void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp)
}
}
-void radeon_driver_pretakedown(drm_device_t * dev)
+void radeon_driver_lastclose(drm_device_t * dev)
{
radeon_do_release(dev);
}
-int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv)
+int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_driver_file_fields *radeon_priv;
+ DRM_DEBUG("\n");
radeon_priv =
(struct drm_radeon_driver_file_fields *)
drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
@@ -3100,6 +3059,7 @@ int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv)
return -ENOMEM;
filp_priv->driver_priv = radeon_priv;
+
if (dev_priv)
radeon_priv->radeon_fb_delta = dev_priv->fb_location;
else
@@ -3107,7 +3067,7 @@ int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv)
return 0;
}
-void radeon_driver_free_filp_priv(drm_device_t * dev, drm_file_t * filp_priv)
+void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv)
{
struct drm_radeon_driver_file_fields *radeon_priv =
filp_priv->driver_priv;
@@ -3116,33 +3076,33 @@ void radeon_driver_free_filp_priv(drm_device_t * dev, drm_file_t * filp_priv)
}
drm_ioctl_desc_t radeon_ioctls[] = {
- [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, 1, 1},
- [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, 1, 1},
- [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, 1, 1},
- [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, 1, 1},
- [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, 1, 1},
- [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, 1, 1},
- [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, 1, 0}
+ [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, DRM_AUTH}
};
int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
index 6d10515795c..0d426deeefe 100644
--- a/drivers/char/drm/savage_bci.c
+++ b/drivers/char/drm/savage_bci.c
@@ -533,16 +533,32 @@ static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
}
+int savage_driver_load(drm_device_t *dev, unsigned long chipset)
+{
+ drm_savage_private_t *dev_priv;
+
+ dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
+ if (dev_priv == NULL)
+ return DRM_ERR(ENOMEM);
+
+ memset(dev_priv, 0, sizeof(drm_savage_private_t));
+ dev->dev_private = (void *)dev_priv;
+
+ dev_priv->chipset = (enum savage_family)chipset;
+
+ return 0;
+}
+
+
/*
* Initalize mappings. On Savage4 and SavageIX the alignment
* and size of the aperture is not suitable for automatic MTRR setup
- * in drm_addmap. Therefore we do it manually before the maps are
- * initialized. We also need to take care of deleting the MTRRs in
- * postcleanup.
+ * in drm_addmap. Therefore we add them manually before the maps are
+ * initialized, and tear them down on last close.
*/
-int savage_preinit(drm_device_t * dev, unsigned long chipset)
+int savage_driver_firstopen(drm_device_t *dev)
{
- drm_savage_private_t *dev_priv;
+ drm_savage_private_t *dev_priv = dev->dev_private;
unsigned long mmio_base, fb_base, fb_size, aperture_base;
/* fb_rsrc and aper_rsrc aren't really used currently, but still exist
* in case we decide we need information on the BAR for BSD in the
@@ -551,14 +567,6 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
unsigned int fb_rsrc, aper_rsrc;
int ret = 0;
- dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
- if (dev_priv == NULL)
- return DRM_ERR(ENOMEM);
-
- memset(dev_priv, 0, sizeof(drm_savage_private_t));
- dev->dev_private = (void *)dev_priv;
- dev_priv->chipset = (enum savage_family)chipset;
-
dev_priv->mtrr[0].handle = -1;
dev_priv->mtrr[1].handle = -1;
dev_priv->mtrr[2].handle = -1;
@@ -576,26 +584,24 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
dev_priv->mtrr[0].base = fb_base;
dev_priv->mtrr[0].size = 0x01000000;
dev_priv->mtrr[0].handle =
- mtrr_add(dev_priv->mtrr[0].base,
- dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB,
- 1);
+ drm_mtrr_add(dev_priv->mtrr[0].base,
+ dev_priv->mtrr[0].size, DRM_MTRR_WC);
dev_priv->mtrr[1].base = fb_base + 0x02000000;
dev_priv->mtrr[1].size = 0x02000000;
dev_priv->mtrr[1].handle =
- mtrr_add(dev_priv->mtrr[1].base,
- dev_priv->mtrr[1].size, MTRR_TYPE_WRCOMB,
- 1);
+ drm_mtrr_add(dev_priv->mtrr[1].base,
+ dev_priv->mtrr[1].size, DRM_MTRR_WC);
dev_priv->mtrr[2].base = fb_base + 0x04000000;
dev_priv->mtrr[2].size = 0x04000000;
dev_priv->mtrr[2].handle =
- mtrr_add(dev_priv->mtrr[2].base,
- dev_priv->mtrr[2].size, MTRR_TYPE_WRCOMB,
- 1);
+ drm_mtrr_add(dev_priv->mtrr[2].base,
+ dev_priv->mtrr[2].size, DRM_MTRR_WC);
} else {
DRM_ERROR("strange pci_resource_len %08lx\n",
drm_get_resource_len(dev, 0));
}
- } else if (chipset != S3_SUPERSAVAGE && chipset != S3_SAVAGE2000) {
+ } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
+ dev_priv->chipset != S3_SAVAGE2000) {
mmio_base = drm_get_resource_start(dev, 0);
fb_rsrc = 1;
fb_base = drm_get_resource_start(dev, 1);
@@ -609,9 +615,8 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
dev_priv->mtrr[0].base = fb_base;
dev_priv->mtrr[0].size = 0x08000000;
dev_priv->mtrr[0].handle =
- mtrr_add(dev_priv->mtrr[0].base,
- dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB,
- 1);
+ drm_mtrr_add(dev_priv->mtrr[0].base,
+ dev_priv->mtrr[0].size, DRM_MTRR_WC);
} else {
DRM_ERROR("strange pci_resource_len %08lx\n",
drm_get_resource_len(dev, 1));
@@ -648,16 +653,21 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
/*
* Delete MTRRs and free device-private data.
*/
-int savage_postcleanup(drm_device_t * dev)
+void savage_driver_lastclose(drm_device_t *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
int i;
for (i = 0; i < 3; ++i)
if (dev_priv->mtrr[i].handle >= 0)
- mtrr_del(dev_priv->mtrr[i].handle,
+ drm_mtrr_del(dev_priv->mtrr[i].handle,
dev_priv->mtrr[i].base,
- dev_priv->mtrr[i].size);
+ dev_priv->mtrr[i].size, DRM_MTRR_WC);
+}
+
+int savage_driver_unload(drm_device_t *dev)
+{
+ drm_savage_private_t *dev_priv = dev->dev_private;
drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
@@ -994,8 +1004,7 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
* DMA buffer management
*/
-static int savage_bci_get_buffers(DRMFILE filp, drm_device_t * dev,
- drm_dma_t * d)
+static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
{
drm_buf_t *buf;
int i;
@@ -1057,7 +1066,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS)
return ret;
}
-void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp)
+void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
{
drm_device_dma_t *dma = dev->dma;
drm_savage_private_t *dev_priv = dev->dev_private;
@@ -1090,10 +1099,10 @@ void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp)
}
drm_ioctl_desc_t savage_ioctls[] = {
- [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1},
- [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0},
- [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0},
- [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0},
+ [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH},
};
int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/char/drm/savage_drv.c b/drivers/char/drm/savage_drv.c
index 22d799cde41..aa6c0d1a82f 100644
--- a/drivers/char/drm/savage_drv.c
+++ b/drivers/char/drm/savage_drv.c
@@ -30,31 +30,6 @@
#include "drm_pciids.h"
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
- DRIVER_NAME,
- DRIVER_MAJOR,
- DRIVER_MINOR,
- DRIVER_PATCHLEVEL,
- DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
- );
- return 0;
-}
-
-static int version(drm_version_t * version)
-{
- int len;
-
- version->version_major = DRIVER_MAJOR;
- version->version_minor = DRIVER_MINOR;
- version->version_patchlevel = DRIVER_PATCHLEVEL;
- DRM_COPY(version->name, DRIVER_NAME);
- DRM_COPY(version->date, DRIVER_DATE);
- DRM_COPY(version->desc, DRIVER_DESC);
- return 0;
-}
-
static struct pci_device_id pciidlist[] = {
savage_PCI_IDS
};
@@ -63,13 +38,13 @@ static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
.dev_priv_size = sizeof(drm_savage_buf_priv_t),
- .preinit = savage_preinit,
- .postinit = postinit,
- .postcleanup = savage_postcleanup,
+ .load = savage_driver_load,
+ .firstopen = savage_driver_firstopen,
+ .lastclose = savage_driver_lastclose,
+ .unload = savage_driver_unload,
.reclaim_buffers = savage_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
- .version = version,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = {
@@ -80,12 +55,19 @@ static struct drm_driver driver = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
- }
- ,
+ },
+
.pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- }
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ },
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
static int __init savage_init(void)
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h
index a4b0fa998a9..dd46cb85439 100644
--- a/drivers/char/drm/savage_drv.h
+++ b/drivers/char/drm/savage_drv.h
@@ -1,5 +1,5 @@
-/* savage_drv.h -- Private header for the savage driver
- *
+/* savage_drv.h -- Private header for the savage driver */
+/*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
@@ -192,7 +192,7 @@ typedef struct drm_savage_private {
/* Err, there is a macro wait_event in include/linux/wait.h.
* Avoid unwanted macro expansion. */
void (*emit_clip_rect) (struct drm_savage_private * dev_priv,
- drm_clip_rect_t * pbox);
+ const drm_clip_rect_t * pbox);
void (*dma_flush) (struct drm_savage_private * dev_priv);
} drm_savage_private_t;
@@ -208,16 +208,18 @@ extern void savage_dma_reset(drm_savage_private_t * dev_priv);
extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page);
extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv,
unsigned int n);
-extern int savage_preinit(drm_device_t * dev, unsigned long chipset);
-extern int savage_postcleanup(drm_device_t * dev);
+extern int savage_driver_load(drm_device_t *dev, unsigned long chipset);
+extern int savage_driver_firstopen(drm_device_t *dev);
+extern void savage_driver_lastclose(drm_device_t *dev);
+extern int savage_driver_unload(drm_device_t *dev);
extern int savage_do_cleanup_bci(drm_device_t * dev);
extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp);
/* state functions */
extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
- drm_clip_rect_t * pbox);
+ const drm_clip_rect_t * pbox);
extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
- drm_clip_rect_t * pbox);
+ const drm_clip_rect_t * pbox);
#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
@@ -500,15 +502,6 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
-#define BCI_COPY_FROM_USER(src,n) do { \
- unsigned int i; \
- for (i = 0; i < n; ++i) { \
- uint32_t val; \
- DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \
- BCI_WRITE(val); \
- } \
-} while(0)
-
/*
* command DMA support
*/
@@ -534,8 +527,8 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
-#define DMA_COPY_FROM_USER(src,n) do { \
- DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \
+#define DMA_COPY(src, n) do { \
+ memcpy(dma_ptr, (src), (n)*4); \
dma_ptr += n; \
} while(0)
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
index e87a5d59b99..ef2581d1614 100644
--- a/drivers/char/drm/savage_state.c
+++ b/drivers/char/drm/savage_state.c
@@ -27,7 +27,7 @@
#include "savage_drv.h"
void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
- drm_clip_rect_t * pbox)
+ const drm_clip_rect_t * pbox)
{
uint32_t scstart = dev_priv->state.s3d.new_scstart;
uint32_t scend = dev_priv->state.s3d.new_scend;
@@ -53,7 +53,7 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
}
void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
- drm_clip_rect_t * pbox)
+ const drm_clip_rect_t * pbox)
{
uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
@@ -115,18 +115,19 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
#define SAVE_STATE(reg,where) \
if(start <= reg && start+count > reg) \
- DRM_GET_USER_UNCHECKED(dev_priv->state.where, &regs[reg-start])
+ dev_priv->state.where = regs[reg - start]
#define SAVE_STATE_MASK(reg,where,mask) do { \
if(start <= reg && start+count > reg) { \
uint32_t tmp; \
- DRM_GET_USER_UNCHECKED(tmp, &regs[reg-start]); \
+ tmp = regs[reg - start]; \
dev_priv->state.where = (tmp & (mask)) | \
(dev_priv->state.where & ~(mask)); \
} \
} while (0)
+
static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
unsigned int start, unsigned int count,
- const uint32_t __user * regs)
+ const uint32_t *regs)
{
if (start < SAVAGE_TEXPALADDR_S3D ||
start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
@@ -148,8 +149,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
return savage_verify_texaddr(dev_priv, 0,
- dev_priv->state.s3d.
- texaddr);
+ dev_priv->state.s3d.texaddr);
}
return 0;
@@ -157,7 +157,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
unsigned int start, unsigned int count,
- const uint32_t __user * regs)
+ const uint32_t *regs)
{
int ret = 0;
@@ -174,19 +174,18 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
~SAVAGE_SCISSOR_MASK_S4);
/* if any texture regs were changed ... */
- if (start <= SAVAGE_TEXDESCR_S4 && start + count > SAVAGE_TEXPALADDR_S4) {
+ if (start <= SAVAGE_TEXDESCR_S4 &&
+ start + count > SAVAGE_TEXPALADDR_S4) {
/* ... check texture state */
SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
- ret |=
- savage_verify_texaddr(dev_priv, 0,
- dev_priv->state.s4.texaddr0);
+ ret |= savage_verify_texaddr(dev_priv, 0,
+ dev_priv->state.s4.texaddr0);
if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
- ret |=
- savage_verify_texaddr(dev_priv, 1,
- dev_priv->state.s4.texaddr1);
+ ret |= savage_verify_texaddr(dev_priv, 1,
+ dev_priv->state.s4.texaddr1);
}
return ret;
@@ -197,7 +196,7 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
static int savage_dispatch_state(drm_savage_private_t * dev_priv,
const drm_savage_cmd_header_t * cmd_header,
- const uint32_t __user * regs)
+ const uint32_t *regs)
{
unsigned int count = cmd_header->state.count;
unsigned int start = cmd_header->state.start;
@@ -209,9 +208,6 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
if (!count)
return 0;
- if (DRM_VERIFYAREA_READ(regs, count * 4))
- return DRM_ERR(EFAULT);
-
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
ret = savage_verify_state_s3d(dev_priv, start, count, regs);
if (ret != 0)
@@ -236,8 +232,8 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
/* scissor regs are emitted in savage_dispatch_draw */
if (start < SAVAGE_DRAWCTRL0_S4) {
if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
- count2 =
- count - (SAVAGE_DRAWCTRL1_S4 + 1 - start);
+ count2 = count -
+ (SAVAGE_DRAWCTRL1_S4 + 1 - start);
if (start + count > SAVAGE_DRAWCTRL0_S4)
count = SAVAGE_DRAWCTRL0_S4 - start;
} else if (start <= SAVAGE_DRAWCTRL1_S4) {
@@ -263,7 +259,7 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
while (count > 0) {
unsigned int n = count < 255 ? count : 255;
DMA_SET_REGISTERS(start, n);
- DMA_COPY_FROM_USER(regs, n);
+ DMA_COPY(regs, n);
count -= n;
start += n;
regs += n;
@@ -421,8 +417,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
const drm_savage_cmd_header_t * cmd_header,
- const uint32_t __user * vtxbuf,
- unsigned int vb_size, unsigned int vb_stride)
+ const uint32_t *vtxbuf, unsigned int vb_size,
+ unsigned int vb_stride)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->prim.prim;
@@ -507,8 +503,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
for (i = start; i < start + count; ++i) {
unsigned int j = i + reorder[i % 3];
- DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
- vtx_size);
+ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
}
DMA_COMMIT();
@@ -517,13 +512,12 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
DMA_DRAW_PRIMITIVE(count, prim, skip);
if (vb_stride == vtx_size) {
- DMA_COPY_FROM_USER(&vtxbuf[vb_stride * start],
- vtx_size * count);
+ DMA_COPY(&vtxbuf[vb_stride * start],
+ vtx_size * count);
} else {
for (i = start; i < start + count; ++i) {
- DMA_COPY_FROM_USER(&vtxbuf
- [vb_stride * i],
- vtx_size);
+ DMA_COPY(&vtxbuf [vb_stride * i],
+ vtx_size);
}
}
@@ -541,7 +535,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
const drm_savage_cmd_header_t * cmd_header,
- const uint16_t __user * usr_idx,
+ const uint16_t *idx,
const drm_buf_t * dmabuf)
{
unsigned char reorder = 0;
@@ -628,11 +622,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
while (n != 0) {
/* Can emit up to 255 indices (85 triangles) at once. */
unsigned int count = n > 255 ? 255 : n;
- /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
- uint16_t idx[255];
- /* Copy and check indices */
- DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2);
+ /* check indices */
for (i = 0; i < count; ++i) {
if (idx[i] > dmabuf->total / 32) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
@@ -652,8 +643,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
for (i = 1; i + 1 < count; i += 2)
BCI_WRITE(idx[i + reorder[i % 3]] |
- (idx[i + 1 + reorder[(i + 1) % 3]] <<
- 16));
+ (idx[i + 1 +
+ reorder[(i + 1) % 3]] << 16));
if (i < count)
BCI_WRITE(idx[i + reorder[i % 3]]);
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
@@ -674,7 +665,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
BCI_WRITE(idx[i]);
}
- usr_idx += count;
+ idx += count;
n -= count;
prim |= BCI_CMD_DRAW_CONT;
@@ -685,8 +676,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
const drm_savage_cmd_header_t * cmd_header,
- const uint16_t __user * usr_idx,
- const uint32_t __user * vtxbuf,
+ const uint16_t *idx,
+ const uint32_t *vtxbuf,
unsigned int vb_size, unsigned int vb_stride)
{
unsigned char reorder = 0;
@@ -751,11 +742,8 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
while (n != 0) {
/* Can emit up to 255 vertices (85 triangles) at once. */
unsigned int count = n > 255 ? 255 : n;
- /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
- uint16_t idx[255];
-
- /* Copy and check indices */
- DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2);
+
+ /* Check indices */
for (i = 0; i < count; ++i) {
if (idx[i] > vb_size / (vb_stride * 4)) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
@@ -775,8 +763,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
for (i = 0; i < count; ++i) {
unsigned int j = idx[i + reorder[i % 3]];
- DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
- vtx_size);
+ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
}
DMA_COMMIT();
@@ -786,14 +773,13 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
for (i = 0; i < count; ++i) {
unsigned int j = idx[i];
- DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
- vtx_size);
+ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
}
DMA_COMMIT();
}
- usr_idx += count;
+ idx += count;
n -= count;
prim |= BCI_CMD_DRAW_CONT;
@@ -804,11 +790,11 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
const drm_savage_cmd_header_t * cmd_header,
- const drm_savage_cmd_header_t __user * data,
+ const drm_savage_cmd_header_t *data,
unsigned int nbox,
- const drm_clip_rect_t __user * usr_boxes)
+ const drm_clip_rect_t *boxes)
{
- unsigned int flags = cmd_header->clear0.flags, mask, value;
+ unsigned int flags = cmd_header->clear0.flags;
unsigned int clear_cmd;
unsigned int i, nbufs;
DMA_LOCALS;
@@ -816,9 +802,6 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
if (nbox == 0)
return 0;
- DRM_GET_USER_UNCHECKED(mask, &data->clear1.mask);
- DRM_GET_USER_UNCHECKED(value, &data->clear1.value);
-
clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
BCI_CMD_SET_ROP(clear_cmd, 0xCC);
@@ -828,21 +811,19 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
if (nbufs == 0)
return 0;
- if (mask != 0xffffffff) {
+ if (data->clear1.mask != 0xffffffff) {
/* set mask */
BEGIN_DMA(2);
DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
- DMA_WRITE(mask);
+ DMA_WRITE(data->clear1.mask);
DMA_COMMIT();
}
for (i = 0; i < nbox; ++i) {
- drm_clip_rect_t box;
unsigned int x, y, w, h;
unsigned int buf;
- DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
- x = box.x1, y = box.y1;
- w = box.x2 - box.x1;
- h = box.y2 - box.y1;
+ x = boxes[i].x1, y = boxes[i].y1;
+ w = boxes[i].x2 - boxes[i].x1;
+ h = boxes[i].y2 - boxes[i].y1;
BEGIN_DMA(nbufs * 6);
for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
if (!(flags & buf))
@@ -862,13 +843,13 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
DMA_WRITE(dev_priv->depth_bd);
break;
}
- DMA_WRITE(value);
+ DMA_WRITE(data->clear1.value);
DMA_WRITE(BCI_X_Y(x, y));
DMA_WRITE(BCI_W_H(w, h));
}
DMA_COMMIT();
}
- if (mask != 0xffffffff) {
+ if (data->clear1.mask != 0xffffffff) {
/* reset mask */
BEGIN_DMA(2);
DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
@@ -880,8 +861,7 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
}
static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
- unsigned int nbox,
- const drm_clip_rect_t __user * usr_boxes)
+ unsigned int nbox, const drm_clip_rect_t *boxes)
{
unsigned int swap_cmd;
unsigned int i;
@@ -895,16 +875,14 @@ static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
BCI_CMD_SET_ROP(swap_cmd, 0xCC);
for (i = 0; i < nbox; ++i) {
- drm_clip_rect_t box;
- DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
-
BEGIN_DMA(6);
DMA_WRITE(swap_cmd);
DMA_WRITE(dev_priv->back_offset);
DMA_WRITE(dev_priv->back_bd);
- DMA_WRITE(BCI_X_Y(box.x1, box.y1));
- DMA_WRITE(BCI_X_Y(box.x1, box.y1));
- DMA_WRITE(BCI_W_H(box.x2 - box.x1, box.y2 - box.y1));
+ DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
+ DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
+ DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
+ boxes[i].y2 - boxes[i].y1));
DMA_COMMIT();
}
@@ -912,68 +890,52 @@ static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
}
static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
- const drm_savage_cmd_header_t __user * start,
- const drm_savage_cmd_header_t __user * end,
+ const drm_savage_cmd_header_t *start,
+ const drm_savage_cmd_header_t *end,
const drm_buf_t * dmabuf,
- const unsigned int __user * usr_vtxbuf,
+ const unsigned int *vtxbuf,
unsigned int vb_size, unsigned int vb_stride,
unsigned int nbox,
- const drm_clip_rect_t __user * usr_boxes)
+ const drm_clip_rect_t *boxes)
{
unsigned int i, j;
int ret;
for (i = 0; i < nbox; ++i) {
- drm_clip_rect_t box;
- const drm_savage_cmd_header_t __user *usr_cmdbuf;
- DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
- dev_priv->emit_clip_rect(dev_priv, &box);
+ const drm_savage_cmd_header_t *cmdbuf;
+ dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
- usr_cmdbuf = start;
- while (usr_cmdbuf < end) {
+ cmdbuf = start;
+ while (cmdbuf < end) {
drm_savage_cmd_header_t cmd_header;
- DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
- sizeof(cmd_header));
- usr_cmdbuf++;
+ cmd_header = *cmdbuf;
+ cmdbuf++;
switch (cmd_header.cmd.cmd) {
case SAVAGE_CMD_DMA_PRIM:
- ret =
- savage_dispatch_dma_prim(dev_priv,
- &cmd_header,
- dmabuf);
+ ret = savage_dispatch_dma_prim(
+ dev_priv, &cmd_header, dmabuf);
break;
case SAVAGE_CMD_VB_PRIM:
- ret =
- savage_dispatch_vb_prim(dev_priv,
- &cmd_header,
- (const uint32_t
- __user *)
- usr_vtxbuf, vb_size,
- vb_stride);
+ ret = savage_dispatch_vb_prim(
+ dev_priv, &cmd_header,
+ vtxbuf, vb_size, vb_stride);
break;
case SAVAGE_CMD_DMA_IDX:
j = (cmd_header.idx.count + 3) / 4;
/* j was check in savage_bci_cmdbuf */
- ret =
- savage_dispatch_dma_idx(dev_priv,
- &cmd_header,
- (const uint16_t
- __user *)
- usr_cmdbuf, dmabuf);
- usr_cmdbuf += j;
+ ret = savage_dispatch_dma_idx(dev_priv,
+ &cmd_header, (const uint16_t *)cmdbuf,
+ dmabuf);
+ cmdbuf += j;
break;
case SAVAGE_CMD_VB_IDX:
j = (cmd_header.idx.count + 3) / 4;
/* j was check in savage_bci_cmdbuf */
- ret =
- savage_dispatch_vb_idx(dev_priv,
- &cmd_header,
- (const uint16_t
- __user *)usr_cmdbuf,
- (const uint32_t
- __user *)usr_vtxbuf,
- vb_size, vb_stride);
- usr_cmdbuf += j;
+ ret = savage_dispatch_vb_idx(dev_priv,
+ &cmd_header, (const uint16_t *)cmdbuf,
+ (const uint32_t *)vtxbuf, vb_size,
+ vb_stride);
+ cmdbuf += j;
break;
default:
/* What's the best return code? EFAULT? */
@@ -998,10 +960,10 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
drm_device_dma_t *dma = dev->dma;
drm_buf_t *dmabuf;
drm_savage_cmdbuf_t cmdbuf;
- drm_savage_cmd_header_t __user *usr_cmdbuf;
- drm_savage_cmd_header_t __user *first_draw_cmd;
- unsigned int __user *usr_vtxbuf;
- drm_clip_rect_t __user *usr_boxes;
+ drm_savage_cmd_header_t *kcmd_addr = NULL;
+ drm_savage_cmd_header_t *first_draw_cmd;
+ unsigned int *kvb_addr = NULL;
+ drm_clip_rect_t *kbox_addr = NULL;
unsigned int i, j;
int ret = 0;
@@ -1024,15 +986,53 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
dmabuf = NULL;
}
- usr_cmdbuf = (drm_savage_cmd_header_t __user *) cmdbuf.cmd_addr;
- usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr;
- usr_boxes = (drm_clip_rect_t __user *) cmdbuf.box_addr;
- if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size * 8)) ||
- (cmdbuf.vb_size && DRM_VERIFYAREA_READ(usr_vtxbuf, cmdbuf.vb_size))
- || (cmdbuf.nbox
- && DRM_VERIFYAREA_READ(usr_boxes,
- cmdbuf.nbox * sizeof(drm_clip_rect_t))))
- return DRM_ERR(EFAULT);
+ /* Copy the user buffers into kernel temporary areas. This hasn't been
+ * a performance loss compared to VERIFYAREA_READ/
+ * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
+ * for locking on FreeBSD.
+ */
+ if (cmdbuf.size) {
+ kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER);
+ if (kcmd_addr == NULL)
+ return ENOMEM;
+
+ if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr,
+ cmdbuf.size * 8))
+ {
+ drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
+ return DRM_ERR(EFAULT);
+ }
+ cmdbuf.cmd_addr = kcmd_addr;
+ }
+ if (cmdbuf.vb_size) {
+ kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER);
+ if (kvb_addr == NULL) {
+ ret = DRM_ERR(ENOMEM);
+ goto done;
+ }
+
+ if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr,
+ cmdbuf.vb_size)) {
+ ret = DRM_ERR(EFAULT);
+ goto done;
+ }
+ cmdbuf.vb_addr = kvb_addr;
+ }
+ if (cmdbuf.nbox) {
+ kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t),
+ DRM_MEM_DRIVER);
+ if (kbox_addr == NULL) {
+ ret = DRM_ERR(ENOMEM);
+ goto done;
+ }
+
+ if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr,
+ cmdbuf.nbox * sizeof(drm_clip_rect_t))) {
+ ret = DRM_ERR(EFAULT);
+ goto done;
+ }
+ cmdbuf.box_addr = kbox_addr;
+ }
/* Make sure writes to DMA buffers are finished before sending
* DMA commands to the graphics hardware. */
@@ -1046,9 +1046,8 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
first_draw_cmd = NULL;
while (i < cmdbuf.size) {
drm_savage_cmd_header_t cmd_header;
- DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
- sizeof(cmd_header));
- usr_cmdbuf++;
+ cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr;
+ cmdbuf.cmd_addr++;
i++;
/* Group drawing commands with same state to minimize
@@ -1068,21 +1067,18 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
case SAVAGE_CMD_DMA_PRIM:
case SAVAGE_CMD_VB_PRIM:
if (!first_draw_cmd)
- first_draw_cmd = usr_cmdbuf - 1;
- usr_cmdbuf += j;
+ first_draw_cmd = cmdbuf.cmd_addr - 1;
+ cmdbuf.cmd_addr += j;
i += j;
break;
default:
if (first_draw_cmd) {
- ret =
- savage_dispatch_draw(dev_priv,
- first_draw_cmd,
- usr_cmdbuf - 1, dmabuf,
- usr_vtxbuf,
- cmdbuf.vb_size,
- cmdbuf.vb_stride,
- cmdbuf.nbox,
- usr_boxes);
+ ret = savage_dispatch_draw(
+ dev_priv, first_draw_cmd,
+ cmdbuf.cmd_addr - 1,
+ dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size,
+ cmdbuf.vb_stride,
+ cmdbuf.nbox, cmdbuf.box_addr);
if (ret != 0)
return ret;
first_draw_cmd = NULL;
@@ -1098,12 +1094,12 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
DRM_ERROR("command SAVAGE_CMD_STATE extends "
"beyond end of command buffer\n");
DMA_FLUSH();
- return DRM_ERR(EINVAL);
+ ret = DRM_ERR(EINVAL);
+ goto done;
}
ret = savage_dispatch_state(dev_priv, &cmd_header,
- (uint32_t __user *)
- usr_cmdbuf);
- usr_cmdbuf += j;
+ (const uint32_t *)cmdbuf.cmd_addr);
+ cmdbuf.cmd_addr += j;
i += j;
break;
case SAVAGE_CMD_CLEAR:
@@ -1111,39 +1107,40 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
"beyond end of command buffer\n");
DMA_FLUSH();
- return DRM_ERR(EINVAL);
+ ret = DRM_ERR(EINVAL);
+ goto done;
}
ret = savage_dispatch_clear(dev_priv, &cmd_header,
- usr_cmdbuf,
- cmdbuf.nbox, usr_boxes);
- usr_cmdbuf++;
+ cmdbuf.cmd_addr,
+ cmdbuf.nbox, cmdbuf.box_addr);
+ cmdbuf.cmd_addr++;
i++;
break;
case SAVAGE_CMD_SWAP:
- ret = savage_dispatch_swap(dev_priv,
- cmdbuf.nbox, usr_boxes);
+ ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox,
+ cmdbuf.box_addr);
break;
default:
DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
DMA_FLUSH();
- return DRM_ERR(EINVAL);
+ ret = DRM_ERR(EINVAL);
+ goto done;
}
if (ret != 0) {
DMA_FLUSH();
- return ret;
+ goto done;
}
}
if (first_draw_cmd) {
- ret =
- savage_dispatch_draw(dev_priv, first_draw_cmd, usr_cmdbuf,
- dmabuf, usr_vtxbuf, cmdbuf.vb_size,
- cmdbuf.vb_stride, cmdbuf.nbox,
- usr_boxes);
+ ret = savage_dispatch_draw (
+ dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf,
+ cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride,
+ cmdbuf.nbox, cmdbuf.box_addr);
if (ret != 0) {
DMA_FLUSH();
- return ret;
+ goto done;
}
}
@@ -1157,5 +1154,12 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
savage_freelist_put(dev, dmabuf);
}
- return 0;
+done:
+ /* If we didn't need to allocate them, these'll be NULL */
+ drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
+ drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER);
+ drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t),
+ DRM_MEM_DRIVER);
+
+ return ret;
}
diff --git a/drivers/char/drm/sis_drm.h b/drivers/char/drm/sis_drm.h
index 8f273da76dd..30f7b382746 100644
--- a/drivers/char/drm/sis_drm.h
+++ b/drivers/char/drm/sis_drm.h
@@ -1,3 +1,28 @@
+/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
+/*
+ * Copyright 2005 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
#ifndef __SIS_DRM_H__
#define __SIS_DRM_H__
diff --git a/drivers/char/drm/sis_drv.c b/drivers/char/drm/sis_drv.c
index 3cef10643a8..6f6d7d613ed 100644
--- a/drivers/char/drm/sis_drv.c
+++ b/drivers/char/drm/sis_drv.c
@@ -32,31 +32,6 @@
#include "drm_pciids.h"
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
- DRIVER_NAME,
- DRIVER_MAJOR,
- DRIVER_MINOR,
- DRIVER_PATCHLEVEL,
- DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
- );
- return 0;
-}
-
-static int version(drm_version_t * version)
-{
- int len;
-
- version->version_major = DRIVER_MAJOR;
- version->version_minor = DRIVER_MINOR;
- version->version_patchlevel = DRIVER_PATCHLEVEL;
- DRM_COPY(version->name, DRIVER_NAME);
- DRM_COPY(version->date, DRIVER_DATE);
- DRM_COPY(version->desc, DRIVER_DESC);
- return 0;
-}
-
static struct pci_device_id pciidlist[] = {
sisdrv_PCI_IDS
};
@@ -68,8 +43,6 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
- .postinit = postinit,
- .version = version,
.ioctls = sis_ioctls,
.fops = {
.owner = THIS_MODULE,
@@ -79,11 +52,18 @@ static struct drm_driver driver = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
- },
+ },
.pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- }
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ },
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
static int __init sis_init(void)
diff --git a/drivers/char/drm/sis_drv.h b/drivers/char/drm/sis_drv.h
index b1fddad83a9..e218e526950 100644
--- a/drivers/char/drm/sis_drv.h
+++ b/drivers/char/drm/sis_drv.h
@@ -1,5 +1,5 @@
-/* sis_drv.h -- Private header for sis driver -*- linux-c -*-
- *
+/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
+/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
diff --git a/drivers/char/drm/sis_ds.h b/drivers/char/drm/sis_ds.h
index da850b4f544..94f2b4728b6 100644
--- a/drivers/char/drm/sis_ds.h
+++ b/drivers/char/drm/sis_ds.h
@@ -1,6 +1,7 @@
-/* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*-
+/* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by sclin@sis.com.tw
- *
+ */
+/*
* Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
* All rights reserved.
*
@@ -35,7 +36,7 @@
#define SET_SIZE 5000
-typedef unsigned int ITEM_TYPE;
+typedef unsigned long ITEM_TYPE;
typedef struct {
ITEM_TYPE val;
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index a8529728fa6..6774d2fe345 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -86,7 +86,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
{
drm_sis_mem_t fb;
struct sis_memreq req;
- drm_sis_mem_t __user *argp = (void __user *)data;
+ drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
int retval = 0;
DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
@@ -110,7 +110,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
- DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, req.offset);
+ DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset);
return retval;
}
@@ -127,9 +127,9 @@ static int sis_fb_free(DRM_IOCTL_ARGS)
if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
retval = DRM_ERR(EINVAL);
- sis_free((u32) fb.free);
+ sis_free(fb.free);
- DRM_DEBUG("free fb, offset = %lu\n", fb.free);
+ DRM_DEBUG("free fb, offset = 0x%lx\n", fb.free);
return retval;
}
@@ -176,7 +176,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t __user *argp = (void __user *)data;
+ drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
drm_sis_mem_t fb;
PMemBlock block;
int retval = 0;
@@ -267,7 +267,7 @@ static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t __user *argp = (void __user *)data;
+ drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
drm_sis_mem_t agp;
PMemBlock block;
int retval = 0;
@@ -367,7 +367,7 @@ int sis_final_context(struct drm_device *dev, int context)
if (i < MAX_CONTEXT) {
set_t *set;
- unsigned int item;
+ ITEM_TYPE item;
int retval;
DRM_DEBUG("find socket %d, context = %d\n", i, context);
@@ -376,7 +376,7 @@ int sis_final_context(struct drm_device *dev, int context)
set = global_ppriv[i].sets[0];
retval = setFirst(set, &item);
while (retval) {
- DRM_DEBUG("free video memory 0x%x\n", item);
+ DRM_DEBUG("free video memory 0x%lx\n", item);
#if defined(__linux__) && defined(CONFIG_FB_SIS)
sis_free(item);
#else
@@ -390,7 +390,7 @@ int sis_final_context(struct drm_device *dev, int context)
set = global_ppriv[i].sets[1];
retval = setFirst(set, &item);
while (retval) {
- DRM_DEBUG("free agp memory 0x%x\n", item);
+ DRM_DEBUG("free agp memory 0x%lx\n", item);
mmFreeMem((PMemBlock) item);
retval = setNext(set, &item);
}
@@ -403,12 +403,12 @@ int sis_final_context(struct drm_device *dev, int context)
}
drm_ioctl_desc_t sis_ioctls[] = {
- [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, 1, 0},
- [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, 1, 0},
- [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, 1, 1},
- [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, 1, 0},
- [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, 1, 0},
- [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, 1, 1}
+ [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}
};
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/char/drm/tdfx_drv.c b/drivers/char/drm/tdfx_drv.c
index c275cbb6e9c..baa4416032a 100644
--- a/drivers/char/drm/tdfx_drv.c
+++ b/drivers/char/drm/tdfx_drv.c
@@ -36,31 +36,6 @@
#include "drm_pciids.h"
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
- DRIVER_NAME,
- DRIVER_MAJOR,
- DRIVER_MINOR,
- DRIVER_PATCHLEVEL,
- DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
- );
- return 0;
-}
-
-static int version(drm_version_t * version)
-{
- int len;
-
- version->version_major = DRIVER_MAJOR;
- version->version_minor = DRIVER_MINOR;
- version->version_patchlevel = DRIVER_PATCHLEVEL;
- DRM_COPY(version->name, DRIVER_NAME);
- DRM_COPY(version->date, DRIVER_DATE);
- DRM_COPY(version->desc, DRIVER_DESC);
- return 0;
-}
-
static struct pci_device_id pciidlist[] = {
tdfx_PCI_IDS
};
@@ -70,8 +45,6 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
- .postinit = postinit,
- .version = version,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -80,11 +53,18 @@ static struct drm_driver driver = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
- },
+ },
.pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- }
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ },
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
static int __init tdfx_init(void)
diff --git a/drivers/char/drm/tdfx_drv.h b/drivers/char/drm/tdfx_drv.h
index a582a3db4c7..84204ec1b04 100644
--- a/drivers/char/drm/tdfx_drv.h
+++ b/drivers/char/drm/tdfx_drv.h
@@ -1,6 +1,7 @@
/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
* Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
- *
+ */
+/*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
@@ -30,10 +31,6 @@
#ifndef __TDFX_H__
#define __TDFX_H__
-/* This remains constant for all DRM template files.
- */
-#define DRM(x) tdfx_##x
-
/* General customization:
*/
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index d4b1766608b..593c0b8f650 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -213,7 +213,9 @@ static int via_initialize(drm_device_t * dev,
dev_priv->dma_wrap = init->size;
dev_priv->dma_offset = init->offset;
dev_priv->last_pause_ptr = NULL;
- dev_priv->hw_addr_ptr = dev_priv->mmio->handle + init->reg_pause_addr;
+ dev_priv->hw_addr_ptr =
+ (volatile uint32_t *)((char *)dev_priv->mmio->handle +
+ init->reg_pause_addr);
via_cmdbuf_start(dev_priv);
@@ -232,13 +234,13 @@ int via_dma_init(DRM_IOCTL_ARGS)
switch (init.func) {
case VIA_INIT_DMA:
- if (!capable(CAP_SYS_ADMIN))
+ if (!DRM_SUSER(DRM_CURPROC))
retcode = DRM_ERR(EPERM);
else
retcode = via_initialize(dev, dev_priv, &init);
break;
case VIA_CLEANUP_DMA:
- if (!capable(CAP_SYS_ADMIN))
+ if (!DRM_SUSER(DRM_CURPROC))
retcode = DRM_ERR(EPERM);
else
retcode = via_dma_cleanup(dev);
@@ -349,9 +351,6 @@ int via_cmdbuffer(DRM_IOCTL_ARGS)
return 0;
}
-extern int
-via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
- unsigned int size);
static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
drm_via_cmdbuffer_t * cmd)
{
@@ -450,9 +449,9 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
if ((count <= 8) && (count >= 0)) {
uint32_t rgtr, ptr;
rgtr = *(dev_priv->hw_addr_ptr);
- ptr = ((char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
- dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4 -
- CMDBUF_ALIGNMENT_SIZE;
+ ptr = ((volatile char *)dev_priv->last_pause_ptr -
+ dev_priv->dma_ptr) + dev_priv->dma_offset +
+ (uint32_t) dev_priv->agpAddr + 4 - CMDBUF_ALIGNMENT_SIZE;
if (rgtr <= ptr) {
DRM_ERROR
("Command regulator\npaused at count %d, address %x, "
@@ -472,7 +471,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
&& count--) ;
rgtr = *(dev_priv->hw_addr_ptr);
- ptr = ((char *)paused_at - dev_priv->dma_ptr) +
+ ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
ptr_low = (ptr > 3 * CMDBUF_ALIGNMENT_SIZE) ?
@@ -724,3 +723,22 @@ int via_cmdbuf_size(DRM_IOCTL_ARGS)
sizeof(d_siz));
return ret;
}
+
+drm_ioctl_desc_t via_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER},
+ [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER},
+ [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER},
+ [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH}
+};
+
+int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
new file mode 100644
index 00000000000..9d5e027dae0
--- /dev/null
+++ b/drivers/char/drm/via_dmablit.c
@@ -0,0 +1,805 @@
+/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
+ *
+ * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Hellstrom.
+ * Partially based on code obtained from Digeo Inc.
+ */
+
+
+/*
+ * Unmaps the DMA mappings.
+ * FIXME: Is this a NoOp on x86? Also
+ * FIXME: What happens if this one is called and a pending blit has previously done
+ * the same DMA mappings?
+ */
+
+#include "drmP.h"
+#include "via_drm.h"
+#include "via_drv.h"
+#include "via_dmablit.h"
+
+#include <linux/pagemap.h>
+
+#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
+#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
+#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
+
+typedef struct _drm_via_descriptor {
+ uint32_t mem_addr;
+ uint32_t dev_addr;
+ uint32_t size;
+ uint32_t next;
+} drm_via_descriptor_t;
+
+
+/*
+ * Unmap a DMA mapping.
+ */
+
+
+
+static void
+via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+{
+ int num_desc = vsg->num_desc;
+ unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
+ unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
+ drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ descriptor_this_page;
+ dma_addr_t next = vsg->chain_start;
+
+ while(num_desc--) {
+ if (descriptor_this_page-- == 0) {
+ cur_descriptor_page--;
+ descriptor_this_page = vsg->descriptors_per_page - 1;
+ desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ descriptor_this_page;
+ }
+ dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
+ dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
+ next = (dma_addr_t) desc_ptr->next;
+ desc_ptr--;
+ }
+}
+
+/*
+ * If mode = 0, count how many descriptors are needed.
+ * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
+ * Descriptors are run in reverse order by the hardware because we are not allowed to update the
+ * 'next' field without syncing calls when the descriptor is already mapped.
+ */
+
+static void
+via_map_blit_for_device(struct pci_dev *pdev,
+ const drm_via_dmablit_t *xfer,
+ drm_via_sg_info_t *vsg,
+ int mode)
+{
+ unsigned cur_descriptor_page = 0;
+ unsigned num_descriptors_this_page = 0;
+ unsigned char *mem_addr = xfer->mem_addr;
+ unsigned char *cur_mem;
+ unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
+ uint32_t fb_addr = xfer->fb_addr;
+ uint32_t cur_fb;
+ unsigned long line_len;
+ unsigned remaining_len;
+ int num_desc = 0;
+ int cur_line;
+ dma_addr_t next = 0 | VIA_DMA_DPR_EC;
+ drm_via_descriptor_t *desc_ptr = 0;
+
+ if (mode == 1)
+ desc_ptr = vsg->desc_pages[cur_descriptor_page];
+
+ for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
+
+ line_len = xfer->line_length;
+ cur_fb = fb_addr;
+ cur_mem = mem_addr;
+
+ while (line_len > 0) {
+
+ remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
+ line_len -= remaining_len;
+
+ if (mode == 1) {
+ desc_ptr->mem_addr =
+ dma_map_page(&pdev->dev,
+ vsg->pages[VIA_PFN(cur_mem) -
+ VIA_PFN(first_addr)],
+ VIA_PGOFF(cur_mem), remaining_len,
+ vsg->direction);
+ desc_ptr->dev_addr = cur_fb;
+
+ desc_ptr->size = remaining_len;
+ desc_ptr->next = (uint32_t) next;
+ next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
+ DMA_TO_DEVICE);
+ desc_ptr++;
+ if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
+ num_descriptors_this_page = 0;
+ desc_ptr = vsg->desc_pages[++cur_descriptor_page];
+ }
+ }
+
+ num_desc++;
+ cur_mem += remaining_len;
+ cur_fb += remaining_len;
+ }
+
+ mem_addr += xfer->mem_stride;
+ fb_addr += xfer->fb_stride;
+ }
+
+ if (mode == 1) {
+ vsg->chain_start = next;
+ vsg->state = dr_via_device_mapped;
+ }
+ vsg->num_desc = num_desc;
+}
+
+/*
+ * Function that frees up all resources for a blit. It is usable even if the
+ * blit info has only be partially built as long as the status enum is consistent
+ * with the actual status of the used resources.
+ */
+
+
+void
+via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+{
+ struct page *page;
+ int i;
+
+ switch(vsg->state) {
+ case dr_via_device_mapped:
+ via_unmap_blit_from_device(pdev, vsg);
+ case dr_via_desc_pages_alloc:
+ for (i=0; i<vsg->num_desc_pages; ++i) {
+ if (vsg->desc_pages[i] != NULL)
+ free_page((unsigned long)vsg->desc_pages[i]);
+ }
+ kfree(vsg->desc_pages);
+ case dr_via_pages_locked:
+ for (i=0; i<vsg->num_pages; ++i) {
+ if ( NULL != (page = vsg->pages[i])) {
+ if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
+ SetPageDirty(page);
+ page_cache_release(page);
+ }
+ }
+ case dr_via_pages_alloc:
+ vfree(vsg->pages);
+ default:
+ vsg->state = dr_via_sg_init;
+ }
+ if (vsg->bounce_buffer) {
+ vfree(vsg->bounce_buffer);
+ vsg->bounce_buffer = NULL;
+ }
+ vsg->free_on_sequence = 0;
+}
+
+/*
+ * Fire a blit engine.
+ */
+
+static void
+via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+ VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
+ VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+ VIA_DMA_CSR_DE);
+ VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
+ VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
+ VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
+}
+
+/*
+ * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
+ * occur here if the calling user does not have access to the submitted address.
+ */
+
+static int
+via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+{
+ int ret;
+ unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
+ vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
+ first_pfn + 1;
+
+ if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
+ return DRM_ERR(ENOMEM);
+ memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
+ down_read(&current->mm->mmap_sem);
+ ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
+ vsg->num_pages, vsg->direction, 0, vsg->pages, NULL);
+
+ up_read(&current->mm->mmap_sem);
+ if (ret != vsg->num_pages) {
+ if (ret < 0)
+ return ret;
+ vsg->state = dr_via_pages_locked;
+ return DRM_ERR(EINVAL);
+ }
+ vsg->state = dr_via_pages_locked;
+ DRM_DEBUG("DMA pages locked\n");
+ return 0;
+}
+
+/*
+ * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
+ * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
+ * quite large for some blits, and pages don't need to be contingous.
+ */
+
+static int
+via_alloc_desc_pages(drm_via_sg_info_t *vsg)
+{
+ int i;
+
+ vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
+ vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
+ vsg->descriptors_per_page;
+
+ if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
+ return DRM_ERR(ENOMEM);
+
+ memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
+ vsg->state = dr_via_desc_pages_alloc;
+ for (i=0; i<vsg->num_desc_pages; ++i) {
+ if (NULL == (vsg->desc_pages[i] =
+ (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
+ return DRM_ERR(ENOMEM);
+ }
+ DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
+ vsg->num_desc);
+ return 0;
+}
+
+static void
+via_abort_dmablit(drm_device_t *dev, int engine)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
+}
+
+static void
+via_dmablit_engine_off(drm_device_t *dev, int engine)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+}
+
+
+
+/*
+ * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
+ * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
+ * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
+ * the workqueue task takes care of processing associated with the old blit.
+ */
+
+void
+via_dmablit_handler(drm_device_t *dev, int engine, int from_irq)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+ int cur;
+ int done_transfer;
+ unsigned long irqsave=0;
+ uint32_t status = 0;
+
+ DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
+ engine, from_irq, (unsigned long) blitq);
+
+ if (from_irq) {
+ spin_lock(&blitq->blit_lock);
+ } else {
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ }
+
+ done_transfer = blitq->is_active &&
+ (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
+ done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
+
+ cur = blitq->cur;
+ if (done_transfer) {
+
+ blitq->blits[cur]->aborted = blitq->aborting;
+ blitq->done_blit_handle++;
+ DRM_WAKEUP(blitq->blit_queue + cur);
+
+ cur++;
+ if (cur >= VIA_NUM_BLIT_SLOTS)
+ cur = 0;
+ blitq->cur = cur;
+
+ /*
+ * Clear transfer done flag.
+ */
+
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
+
+ blitq->is_active = 0;
+ blitq->aborting = 0;
+ schedule_work(&blitq->wq);
+
+ } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
+
+ /*
+ * Abort transfer after one second.
+ */
+
+ via_abort_dmablit(dev, engine);
+ blitq->aborting = 1;
+ blitq->end = jiffies + DRM_HZ;
+ }
+
+ if (!blitq->is_active) {
+ if (blitq->num_outstanding) {
+ via_fire_dmablit(dev, blitq->blits[cur], engine);
+ blitq->is_active = 1;
+ blitq->cur = cur;
+ blitq->num_outstanding--;
+ blitq->end = jiffies + DRM_HZ;
+ if (!timer_pending(&blitq->poll_timer)) {
+ blitq->poll_timer.expires = jiffies+1;
+ add_timer(&blitq->poll_timer);
+ }
+ } else {
+ if (timer_pending(&blitq->poll_timer)) {
+ del_timer(&blitq->poll_timer);
+ }
+ via_dmablit_engine_off(dev, engine);
+ }
+ }
+
+ if (from_irq) {
+ spin_unlock(&blitq->blit_lock);
+ } else {
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+ }
+}
+
+
+
+/*
+ * Check whether this blit is still active, performing necessary locking.
+ */
+
+static int
+via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
+{
+ unsigned long irqsave;
+ uint32_t slot;
+ int active;
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+ /*
+ * Allow for handle wraparounds.
+ */
+
+ active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
+ ((blitq->cur_blit_handle - handle) <= (1 << 23));
+
+ if (queue && active) {
+ slot = handle - blitq->done_blit_handle + blitq->cur -1;
+ if (slot >= VIA_NUM_BLIT_SLOTS) {
+ slot -= VIA_NUM_BLIT_SLOTS;
+ }
+ *queue = blitq->blit_queue + slot;
+ }
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ return active;
+}
+
+/*
+ * Sync. Wait for at least three seconds for the blit to be performed.
+ */
+
+static int
+via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine)
+{
+
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+ wait_queue_head_t *queue;
+ int ret = 0;
+
+ if (via_dmablit_active(blitq, engine, handle, &queue)) {
+ DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
+ !via_dmablit_active(blitq, engine, handle, NULL));
+ }
+ DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
+ handle, engine, ret);
+
+ return ret;
+}
+
+
+/*
+ * A timer that regularly polls the blit engine in cases where we don't have interrupts:
+ * a) Broken hardware (typically those that don't have any video capture facility).
+ * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
+ * The timer and hardware IRQ's can and do work in parallel. If the hardware has
+ * irqs, it will shorten the latency somewhat.
+ */
+
+
+
+static void
+via_dmablit_timer(unsigned long data)
+{
+ drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+ drm_device_t *dev = blitq->dev;
+ int engine = (int)
+ (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
+
+ DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
+ (unsigned long) jiffies);
+
+ via_dmablit_handler(dev, engine, 0);
+
+ if (!timer_pending(&blitq->poll_timer)) {
+ blitq->poll_timer.expires = jiffies+1;
+ add_timer(&blitq->poll_timer);
+ }
+ via_dmablit_handler(dev, engine, 0);
+
+}
+
+
+
+
+/*
+ * Workqueue task that frees data and mappings associated with a blit.
+ * Also wakes up waiting processes. Each of these tasks handles one
+ * blit engine only and may not be called on each interrupt.
+ */
+
+
+static void
+via_dmablit_workqueue(void *data)
+{
+ drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+ drm_device_t *dev = blitq->dev;
+ unsigned long irqsave;
+ drm_via_sg_info_t *cur_sg;
+ int cur_released;
+
+
+ DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
+ (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+ while(blitq->serviced != blitq->cur) {
+
+ cur_released = blitq->serviced++;
+
+ DRM_DEBUG("Releasing blit slot %d\n", cur_released);
+
+ if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
+ blitq->serviced = 0;
+
+ cur_sg = blitq->blits[cur_released];
+ blitq->num_free++;
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ DRM_WAKEUP(&blitq->busy_queue);
+
+ via_free_sg_info(dev->pdev, cur_sg);
+ kfree(cur_sg);
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ }
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+}
+
+
+/*
+ * Init all blit engines. Currently we use two, but some hardware have 4.
+ */
+
+
+void
+via_init_dmablit(drm_device_t *dev)
+{
+ int i,j;
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ drm_via_blitq_t *blitq;
+
+ pci_set_master(dev->pdev);
+
+ for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
+ blitq = dev_priv->blit_queues + i;
+ blitq->dev = dev;
+ blitq->cur_blit_handle = 0;
+ blitq->done_blit_handle = 0;
+ blitq->head = 0;
+ blitq->cur = 0;
+ blitq->serviced = 0;
+ blitq->num_free = VIA_NUM_BLIT_SLOTS;
+ blitq->num_outstanding = 0;
+ blitq->is_active = 0;
+ blitq->aborting = 0;
+ blitq->blit_lock = SPIN_LOCK_UNLOCKED;
+ for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
+ DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
+ }
+ DRM_INIT_WAITQUEUE(&blitq->busy_queue);
+ INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
+ init_timer(&blitq->poll_timer);
+ blitq->poll_timer.function = &via_dmablit_timer;
+ blitq->poll_timer.data = (unsigned long) blitq;
+ }
+}
+
+/*
+ * Build all info and do all mappings required for a blit.
+ */
+
+
+static int
+via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+{
+ int draw = xfer->to_fb;
+ int ret = 0;
+
+ vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ vsg->bounce_buffer = 0;
+
+ vsg->state = dr_via_sg_init;
+
+ if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
+ DRM_ERROR("Zero size bitblt.\n");
+ return DRM_ERR(EINVAL);
+ }
+
+ /*
+ * Below check is a driver limitation, not a hardware one. We
+ * don't want to lock unused pages, and don't want to incoporate the
+ * extra logic of avoiding them. Make sure there are no.
+ * (Not a big limitation anyway.)
+ */
+
+ if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) ||
+ (xfer->mem_stride > 2048*4)) {
+ DRM_ERROR("Too large system memory stride. Stride: %d, "
+ "Length: %d\n", xfer->mem_stride, xfer->line_length);
+ return DRM_ERR(EINVAL);
+ }
+
+ if (xfer->num_lines > 2048) {
+ DRM_ERROR("Too many PCI DMA bitblt lines.\n");
+ return DRM_ERR(EINVAL);
+ }
+
+ /*
+ * we allow a negative fb stride to allow flipping of images in
+ * transfer.
+ */
+
+ if (xfer->mem_stride < xfer->line_length ||
+ abs(xfer->fb_stride) < xfer->line_length) {
+ DRM_ERROR("Invalid frame-buffer / memory stride.\n");
+ return DRM_ERR(EINVAL);
+ }
+
+ /*
+ * A hardware bug seems to be worked around if system memory addresses start on
+ * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
+ * about this. Meanwhile, impose the following restrictions:
+ */
+
+#ifdef VIA_BUGFREE
+ if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
+ ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) {
+ DRM_ERROR("Invalid DRM bitblt alignment.\n");
+ return DRM_ERR(EINVAL);
+ }
+#else
+ if ((((unsigned long)xfer->mem_addr & 15) ||
+ ((unsigned long)xfer->fb_addr & 3)) || (xfer->mem_stride & 15) ||
+ (xfer->fb_stride & 3)) {
+ DRM_ERROR("Invalid DRM bitblt alignment.\n");
+ return DRM_ERR(EINVAL);
+ }
+#endif
+
+ if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
+ DRM_ERROR("Could not lock DMA pages.\n");
+ via_free_sg_info(dev->pdev, vsg);
+ return ret;
+ }
+
+ via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
+ if (0 != (ret = via_alloc_desc_pages(vsg))) {
+ DRM_ERROR("Could not allocate DMA descriptor pages.\n");
+ via_free_sg_info(dev->pdev, vsg);
+ return ret;
+ }
+ via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
+
+ return 0;
+}
+
+
+/*
+ * Reserve one free slot in the blit queue. Will wait for one second for one
+ * to become available. Otherwise -EBUSY is returned.
+ */
+
+static int
+via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
+{
+ int ret=0;
+ unsigned long irqsave;
+
+ DRM_DEBUG("Num free is %d\n", blitq->num_free);
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ while(blitq->num_free == 0) {
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
+ if (ret) {
+ return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret;
+ }
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ }
+
+ blitq->num_free--;
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+ return 0;
+}
+
+/*
+ * Hand back a free slot if we changed our mind.
+ */
+
+static void
+via_dmablit_release_slot(drm_via_blitq_t *blitq)
+{
+ unsigned long irqsave;
+
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+ blitq->num_free++;
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+ DRM_WAKEUP( &blitq->busy_queue );
+}
+
+/*
+ * Grab a free slot. Build blit info and queue a blit.
+ */
+
+
+static int
+via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ drm_via_sg_info_t *vsg;
+ drm_via_blitq_t *blitq;
+ int ret;
+ int engine;
+ unsigned long irqsave;
+
+ if (dev_priv == NULL) {
+ DRM_ERROR("Called without initialization.\n");
+ return DRM_ERR(EINVAL);
+ }
+
+ engine = (xfer->to_fb) ? 0 : 1;
+ blitq = dev_priv->blit_queues + engine;
+ if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
+ return ret;
+ }
+ if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
+ via_dmablit_release_slot(blitq);
+ return DRM_ERR(ENOMEM);
+ }
+ if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
+ via_dmablit_release_slot(blitq);
+ kfree(vsg);
+ return ret;
+ }
+ spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+ blitq->blits[blitq->head++] = vsg;
+ if (blitq->head >= VIA_NUM_BLIT_SLOTS)
+ blitq->head = 0;
+ blitq->num_outstanding++;
+ xfer->sync.sync_handle = ++blitq->cur_blit_handle;
+
+ spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+ xfer->sync.engine = engine;
+
+ via_dmablit_handler(dev, engine, 0);
+
+ return 0;
+}
+
+/*
+ * Sync on a previously submitted blit. Note that the X server use signals extensively, and
+ * that there is a very big proability that this IOCTL will be interrupted by a signal. In that
+ * case it returns with -EAGAIN for the signal to be delivered.
+ * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
+ */
+
+int
+via_dma_blit_sync( DRM_IOCTL_ARGS )
+{
+ drm_via_blitsync_t sync;
+ int err;
+ DRM_DEVICE;
+
+ DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync));
+
+ if (sync.engine >= VIA_NUM_BLIT_ENGINES)
+ return DRM_ERR(EINVAL);
+
+ err = via_dmablit_sync(dev, sync.sync_handle, sync.engine);
+
+ if (DRM_ERR(EINTR) == err)
+ err = DRM_ERR(EAGAIN);
+
+ return err;
+}
+
+
+/*
+ * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
+ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
+ * be reissued. See the above IOCTL code.
+ */
+
+int
+via_dma_blit( DRM_IOCTL_ARGS )
+{
+ drm_via_dmablit_t xfer;
+ int err;
+ DRM_DEVICE;
+
+ DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer));
+
+ err = via_dmablit(dev, &xfer);
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer));
+
+ return err;
+}
diff --git a/drivers/char/drm/via_dmablit.h b/drivers/char/drm/via_dmablit.h
new file mode 100644
index 00000000000..f4036cd5d0e
--- /dev/null
+++ b/drivers/char/drm/via_dmablit.h
@@ -0,0 +1,140 @@
+/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
+ *
+ * Copyright 2005 Thomas Hellstrom.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Hellstrom.
+ * Register info from Digeo Inc.
+ */
+
+#ifndef _VIA_DMABLIT_H
+#define _VIA_DMABLIT_H
+
+#include <linux/dma-mapping.h>
+
+#define VIA_NUM_BLIT_ENGINES 2
+#define VIA_NUM_BLIT_SLOTS 8
+
+struct _drm_via_descriptor;
+
+typedef struct _drm_via_sg_info {
+ struct page **pages;
+ unsigned long num_pages;
+ struct _drm_via_descriptor **desc_pages;
+ int num_desc_pages;
+ int num_desc;
+ enum dma_data_direction direction;
+ unsigned char *bounce_buffer;
+ dma_addr_t chain_start;
+ uint32_t free_on_sequence;
+ unsigned int descriptors_per_page;
+ int aborted;
+ enum {
+ dr_via_device_mapped,
+ dr_via_desc_pages_alloc,
+ dr_via_pages_locked,
+ dr_via_pages_alloc,
+ dr_via_sg_init
+ } state;
+} drm_via_sg_info_t;
+
+typedef struct _drm_via_blitq {
+ drm_device_t *dev;
+ uint32_t cur_blit_handle;
+ uint32_t done_blit_handle;
+ unsigned serviced;
+ unsigned head;
+ unsigned cur;
+ unsigned num_free;
+ unsigned num_outstanding;
+ unsigned long end;
+ int aborting;
+ int is_active;
+ drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
+ spinlock_t blit_lock;
+ wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
+ wait_queue_head_t busy_queue;
+ struct work_struct wq;
+ struct timer_list poll_timer;
+} drm_via_blitq_t;
+
+
+/*
+ * PCI DMA Registers
+ * Channels 2 & 3 don't seem to be implemented in hardware.
+ */
+
+#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
+#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
+#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
+#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
+
+#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
+#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
+#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
+#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
+
+#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
+#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
+#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
+#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
+
+#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
+#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
+#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
+#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
+
+#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
+#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
+#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
+#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
+
+#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
+#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
+#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
+#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
+
+#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
+
+/* Define for DMA engine */
+/* DPR */
+#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
+#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
+#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
+
+/* MR */
+#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
+#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
+#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
+
+/* CSR */
+#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
+#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
+#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
+#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
+#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
+#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
+
+
+
+#endif
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h
index ebde9206115..47f0b5b2637 100644
--- a/drivers/char/drm/via_drm.h
+++ b/drivers/char/drm/via_drm.h
@@ -75,6 +75,8 @@
#define DRM_VIA_CMDBUF_SIZE 0x0b
#define NOT_USED
#define DRM_VIA_WAIT_IRQ 0x0d
+#define DRM_VIA_DMA_BLIT 0x0e
+#define DRM_VIA_BLIT_SYNC 0x0f
#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
@@ -89,6 +91,8 @@
#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
drm_via_cmdbuf_size_t)
#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
+#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
+#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
@@ -103,8 +107,12 @@
#define VIA_BACK 0x2
#define VIA_DEPTH 0x4
#define VIA_STENCIL 0x8
-#define VIDEO 0
-#define AGP 1
+#define VIA_MEM_VIDEO 0 /* matches drm constant */
+#define VIA_MEM_AGP 1 /* matches drm constant */
+#define VIA_MEM_SYSTEM 2
+#define VIA_MEM_MIXED 3
+#define VIA_MEM_UNKNOWN 4
+
typedef struct {
uint32_t offset;
uint32_t size;
@@ -192,6 +200,9 @@ typedef struct _drm_via_sarea {
unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
+ /* Used by the 3d driver only at this point, for pageflipping:
+ */
+ unsigned int pfCurrentOffset;
} drm_via_sarea_t;
typedef struct _drm_via_cmdbuf_size {
@@ -212,6 +223,16 @@ typedef enum {
#define VIA_IRQ_FLAGS_MASK 0xF0000000
+enum drm_via_irqs {
+ drm_via_irq_hqv0 = 0,
+ drm_via_irq_hqv1,
+ drm_via_irq_dma0_dd,
+ drm_via_irq_dma0_td,
+ drm_via_irq_dma1_dd,
+ drm_via_irq_dma1_td,
+ drm_via_irq_num
+};
+
struct drm_via_wait_irq_request {
unsigned irq;
via_irq_seq_type_t type;
@@ -224,20 +245,25 @@ typedef union drm_via_irqwait {
struct drm_wait_vblank_reply reply;
} drm_via_irqwait_t;
-#ifdef __KERNEL__
-
-int via_fb_init(DRM_IOCTL_ARGS);
-int via_mem_alloc(DRM_IOCTL_ARGS);
-int via_mem_free(DRM_IOCTL_ARGS);
-int via_agp_init(DRM_IOCTL_ARGS);
-int via_map_init(DRM_IOCTL_ARGS);
-int via_decoder_futex(DRM_IOCTL_ARGS);
-int via_dma_init(DRM_IOCTL_ARGS);
-int via_cmdbuffer(DRM_IOCTL_ARGS);
-int via_flush_ioctl(DRM_IOCTL_ARGS);
-int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
-int via_cmdbuf_size(DRM_IOCTL_ARGS);
-int via_wait_irq(DRM_IOCTL_ARGS);
+typedef struct drm_via_blitsync {
+ uint32_t sync_handle;
+ unsigned engine;
+} drm_via_blitsync_t;
+
+typedef struct drm_via_dmablit {
+ uint32_t num_lines;
+ uint32_t line_length;
+
+ uint32_t fb_addr;
+ uint32_t fb_stride;
+
+ unsigned char *mem_addr;
+ uint32_t mem_stride;
+
+ int bounce_buffer;
+ int to_fb;
+
+ drm_via_blitsync_t sync;
+} drm_via_dmablit_t;
-#endif
#endif /* _VIA_DRM_H_ */
diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c
index 016665e0c69..3f012255d31 100644
--- a/drivers/char/drm/via_drv.c
+++ b/drivers/char/drm/via_drv.c
@@ -29,54 +29,21 @@
#include "drm_pciids.h"
-static int postinit(struct drm_device *dev, unsigned long flags)
+static int dri_library_name(struct drm_device *dev, char *buf)
{
- DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
- DRIVER_NAME,
- DRIVER_MAJOR,
- DRIVER_MINOR,
- DRIVER_PATCHLEVEL,
- DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
- );
- return 0;
-}
-
-static int version(drm_version_t * version)
-{
- int len;
-
- version->version_major = DRIVER_MAJOR;
- version->version_minor = DRIVER_MINOR;
- version->version_patchlevel = DRIVER_PATCHLEVEL;
- DRM_COPY(version->name, DRIVER_NAME);
- DRM_COPY(version->date, DRIVER_DATE);
- DRM_COPY(version->desc, DRIVER_DESC);
- return 0;
+ return snprintf(buf, PAGE_SIZE, "unichrome");
}
static struct pci_device_id pciidlist[] = {
viadrv_PCI_IDS
};
-static drm_ioctl_desc_t ioctls[] = {
- [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, 1, 0}
-};
-
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+ .load = via_driver_load,
+ .unload = via_driver_unload,
.context_ctor = via_init_context,
.context_dtor = via_final_context,
.vblank_wait = via_driver_vblank_wait,
@@ -85,13 +52,11 @@ static struct drm_driver driver = {
.irq_uninstall = via_driver_irq_uninstall,
.irq_handler = via_driver_irq_handler,
.dma_quiescent = via_driver_dma_quiescent,
+ .dri_library_name = dri_library_name,
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
- .postinit = postinit,
- .version = version,
- .ioctls = ioctls,
- .num_ioctls = DRM_ARRAY_SIZE(ioctls),
+ .ioctls = via_ioctls,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -100,15 +65,23 @@ static struct drm_driver driver = {
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
- },
+ },
.pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- }
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ },
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
};
static int __init via_init(void)
{
+ driver.num_ioctls = via_max_ioctl;
via_init_command_verifier();
return drm_init(&driver);
}
diff --git a/drivers/char/drm/via_drv.h b/drivers/char/drm/via_drv.h
index 7d5daf43797..aad4f99f540 100644
--- a/drivers/char/drm/via_drv.h
+++ b/drivers/char/drm/via_drv.h
@@ -24,24 +24,26 @@
#ifndef _VIA_DRV_H_
#define _VIA_DRV_H_
-#define DRIVER_AUTHOR "VIA"
+#define DRIVER_AUTHOR "Various"
#define DRIVER_NAME "via"
#define DRIVER_DESC "VIA Unichrome / Pro"
-#define DRIVER_DATE "20050523"
+#define DRIVER_DATE "20051116"
#define DRIVER_MAJOR 2
-#define DRIVER_MINOR 6
-#define DRIVER_PATCHLEVEL 3
+#define DRIVER_MINOR 7
+#define DRIVER_PATCHLEVEL 4
#include "via_verifier.h"
+#include "via_dmablit.h"
+
#define VIA_PCI_BUF_SIZE 60000
#define VIA_FIRE_BUF_SIZE 1024
-#define VIA_NUM_IRQS 2
+#define VIA_NUM_IRQS 4
typedef struct drm_via_ring_buffer {
- drm_map_t map;
+ drm_local_map_t map;
char *virtual_start;
} drm_via_ring_buffer_t;
@@ -56,9 +58,9 @@ typedef struct drm_via_irq {
typedef struct drm_via_private {
drm_via_sarea_t *sarea_priv;
- drm_map_t *sarea;
- drm_map_t *fb;
- drm_map_t *mmio;
+ drm_local_map_t *sarea;
+ drm_local_map_t *fb;
+ drm_local_map_t *mmio;
unsigned long agpAddr;
wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
char *dma_ptr;
@@ -82,8 +84,15 @@ typedef struct drm_via_private {
maskarray_t *irq_masks;
uint32_t irq_enable_mask;
uint32_t irq_pending_mask;
+ int *irq_map;
+ drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
} drm_via_private_t;
+enum via_family {
+ VIA_OTHER = 0,
+ VIA_PRO_GROUP_A,
+};
+
/* VIA MMIO register access */
#define VIA_BASE ((dev_priv->mmio))
@@ -92,12 +101,31 @@ typedef struct drm_via_private {
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
+extern drm_ioctl_desc_t via_ioctls[];
+extern int via_max_ioctl;
+
+extern int via_fb_init(DRM_IOCTL_ARGS);
+extern int via_mem_alloc(DRM_IOCTL_ARGS);
+extern int via_mem_free(DRM_IOCTL_ARGS);
+extern int via_agp_init(DRM_IOCTL_ARGS);
+extern int via_map_init(DRM_IOCTL_ARGS);
+extern int via_decoder_futex(DRM_IOCTL_ARGS);
+extern int via_dma_init(DRM_IOCTL_ARGS);
+extern int via_cmdbuffer(DRM_IOCTL_ARGS);
+extern int via_flush_ioctl(DRM_IOCTL_ARGS);
+extern int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
+extern int via_cmdbuf_size(DRM_IOCTL_ARGS);
+extern int via_wait_irq(DRM_IOCTL_ARGS);
+extern int via_dma_blit_sync( DRM_IOCTL_ARGS );
+extern int via_dma_blit( DRM_IOCTL_ARGS );
+
+extern int via_driver_load(drm_device_t *dev, unsigned long chipset);
+extern int via_driver_unload(drm_device_t *dev);
+
extern int via_init_context(drm_device_t * dev, int context);
extern int via_final_context(drm_device_t * dev, int context);
extern int via_do_cleanup_map(drm_device_t * dev);
-extern int via_map_init(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
@@ -111,8 +139,10 @@ extern int via_driver_dma_quiescent(drm_device_t * dev);
extern void via_init_futex(drm_via_private_t * dev_priv);
extern void via_cleanup_futex(drm_via_private_t * dev_priv);
extern void via_release_futex(drm_via_private_t * dev_priv, int context);
+extern int via_driver_irq_wait(drm_device_t * dev, unsigned int irq,
+ int force_sequence, unsigned int *sequence);
-extern int via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
- unsigned int size);
+extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq);
+extern void via_init_dmablit(drm_device_t *dev);
#endif
diff --git a/drivers/char/drm/via_ds.c b/drivers/char/drm/via_ds.c
index 5c71e089246..9429736b3b9 100644
--- a/drivers/char/drm/via_ds.c
+++ b/drivers/char/drm/via_ds.c
@@ -22,14 +22,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/pci.h>
-#include <asm/io.h>
+#include "drmP.h"
#include "via_ds.h"
extern unsigned int VIA_DEBUG;
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index d023add1929..56d7e3daea1 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -50,6 +50,15 @@
#define VIA_IRQ_HQV1_ENABLE (1 << 25)
#define VIA_IRQ_HQV0_PENDING (1 << 9)
#define VIA_IRQ_HQV1_PENDING (1 << 10)
+#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
+#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
+#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
+#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
+#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
+#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
+#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
+#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
+
/*
* Device-specific IRQs go here. This type might need to be extended with
@@ -61,13 +70,24 @@ static maskarray_t via_pro_group_a_irqs[] = {
{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
0x00000000},
{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
- 0x00000000}
+ 0x00000000},
+ {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+ {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
};
static int via_num_pro_group_a =
sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
+static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
-static maskarray_t via_unichrome_irqs[] = { };
+static maskarray_t via_unichrome_irqs[] = {
+ {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+ {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
+};
static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
+static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
static unsigned time_diff(struct timeval *now, struct timeval *then)
{
@@ -113,6 +133,11 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
atomic_inc(&cur_irq->irq_received);
DRM_WAKEUP(&cur_irq->irq_queue);
handled = 1;
+ if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
+ via_dmablit_handler(dev, 0, 1);
+ } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
+ via_dmablit_handler(dev, 1, 1);
+ }
}
cur_irq++;
}
@@ -165,7 +190,7 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
return ret;
}
-static int
+int
via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
unsigned int *sequence)
{
@@ -174,6 +199,7 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int ret = 0;
maskarray_t *masks = dev_priv->irq_masks;
+ int real_irq;
DRM_DEBUG("%s\n", __FUNCTION__);
@@ -182,15 +208,23 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
return DRM_ERR(EINVAL);
}
- if (irq >= dev_priv->num_irqs) {
+ if (irq >= drm_via_irq_num) {
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
irq);
return DRM_ERR(EINVAL);
}
- cur_irq += irq;
+ real_irq = dev_priv->irq_map[irq];
+
+ if (real_irq < 0) {
+ DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
+ __FUNCTION__, irq);
+ return DRM_ERR(EINVAL);
+ }
+
+ cur_irq += real_irq;
- if (masks[irq][2] && !force_sequence) {
+ if (masks[real_irq][2] && !force_sequence) {
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
masks[irq][4]));
@@ -226,6 +260,8 @@ void via_driver_irq_preinstall(drm_device_t * dev)
via_pro_group_a_irqs : via_unichrome_irqs;
dev_priv->num_irqs = (dev_priv->pro_group_a) ?
via_num_pro_group_a : via_num_unichrome;
+ dev_priv->irq_map = (dev_priv->pro_group_a) ?
+ via_irqmap_pro_group_a : via_irqmap_unichrome;
for (i = 0; i < dev_priv->num_irqs; ++i) {
atomic_set(&cur_irq->irq_received, 0);
@@ -241,7 +277,7 @@ void via_driver_irq_preinstall(drm_device_t * dev)
dev_priv->last_vblank_valid = 0;
- // Clear VSync interrupt regs
+ /* Clear VSync interrupt regs */
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status &
~(dev_priv->irq_enable_mask));
@@ -291,8 +327,7 @@ void via_driver_irq_uninstall(drm_device_t * dev)
int via_wait_irq(DRM_IOCTL_ARGS)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ DRM_DEVICE;
drm_via_irqwait_t __user *argp = (void __user *)data;
drm_via_irqwait_t irqwait;
struct timeval now;
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index 6bd6ac52ad1..c6a08e96285 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -27,16 +27,10 @@
static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
{
- drm_via_private_t *dev_priv;
+ drm_via_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
- dev_priv = drm_alloc(sizeof(drm_via_private_t), DRM_MEM_DRIVER);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- memset(dev_priv, 0, sizeof(drm_via_private_t));
-
DRM_GETSAREA();
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
@@ -67,7 +61,8 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
dev_priv->agpAddr = init->agpAddr;
via_init_futex(dev_priv);
- dev_priv->pro_group_a = (dev->pdev->device == 0x3118);
+
+ via_init_dmablit(dev);
dev->dev_private = (void *)dev_priv;
return 0;
@@ -75,15 +70,7 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
int via_do_cleanup_map(drm_device_t * dev)
{
- if (dev->dev_private) {
-
- drm_via_private_t *dev_priv = dev->dev_private;
-
- via_dma_cleanup(dev);
-
- drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
- dev->dev_private = NULL;
- }
+ via_dma_cleanup(dev);
return 0;
}
@@ -107,3 +94,29 @@ int via_map_init(DRM_IOCTL_ARGS)
return -EINVAL;
}
+
+int via_driver_load(drm_device_t *dev, unsigned long chipset)
+{
+ drm_via_private_t *dev_priv;
+
+ dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
+ if (dev_priv == NULL)
+ return DRM_ERR(ENOMEM);
+
+ dev->dev_private = (void *)dev_priv;
+
+ if (chipset == VIA_PRO_GROUP_A)
+ dev_priv->pro_group_a = 1;
+
+ return 0;
+}
+
+int via_driver_unload(drm_device_t *dev)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+
+ drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
+
+ return 0;
+}
+
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c
index 3baddacdff2..33e0cb12e4c 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/char/drm/via_mm.c
@@ -42,7 +42,7 @@ static int via_agp_free(drm_via_mem_t * mem);
static int via_fb_alloc(drm_via_mem_t * mem);
static int via_fb_free(drm_via_mem_t * mem);
-static int add_alloc_set(int context, int type, unsigned int val)
+static int add_alloc_set(int context, int type, unsigned long val)
{
int i, retval = 0;
@@ -56,7 +56,7 @@ static int add_alloc_set(int context, int type, unsigned int val)
return retval;
}
-static int del_alloc_set(int context, int type, unsigned int val)
+static int del_alloc_set(int context, int type, unsigned long val)
{
int i, retval = 0;
@@ -199,13 +199,13 @@ int via_mem_alloc(DRM_IOCTL_ARGS)
sizeof(mem));
switch (mem.type) {
- case VIDEO:
+ case VIA_MEM_VIDEO:
if (via_fb_alloc(&mem) < 0)
return -EFAULT;
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
sizeof(mem));
return 0;
- case AGP:
+ case VIA_MEM_AGP:
if (via_agp_alloc(&mem) < 0)
return -EFAULT;
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
@@ -232,7 +232,7 @@ static int via_fb_alloc(drm_via_mem_t * mem)
if (block) {
fb.offset = block->ofs;
fb.free = (unsigned long)block;
- if (!add_alloc_set(fb.context, VIDEO, fb.free)) {
+ if (!add_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
DRM_DEBUG("adding to allocation set fails\n");
via_mmFreeMem((PMemBlock) fb.free);
retval = -1;
@@ -269,7 +269,7 @@ static int via_agp_alloc(drm_via_mem_t * mem)
if (block) {
agp.offset = block->ofs;
agp.free = (unsigned long)block;
- if (!add_alloc_set(agp.context, AGP, agp.free)) {
+ if (!add_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
DRM_DEBUG("adding to allocation set fails\n");
via_mmFreeMem((PMemBlock) agp.free);
retval = -1;
@@ -297,11 +297,11 @@ int via_mem_free(DRM_IOCTL_ARGS)
switch (mem.type) {
- case VIDEO:
+ case VIA_MEM_VIDEO:
if (via_fb_free(&mem) == 0)
return 0;
break;
- case AGP:
+ case VIA_MEM_AGP:
if (via_agp_free(&mem) == 0)
return 0;
break;
@@ -329,7 +329,7 @@ static int via_fb_free(drm_via_mem_t * mem)
via_mmFreeMem((PMemBlock) fb.free);
- if (!del_alloc_set(fb.context, VIDEO, fb.free)) {
+ if (!del_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
retval = -1;
}
@@ -352,7 +352,7 @@ static int via_agp_free(drm_via_mem_t * mem)
via_mmFreeMem((PMemBlock) agp.free);
- if (!del_alloc_set(agp.context, AGP, agp.free)) {
+ if (!del_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
retval = -1;
}
diff --git a/drivers/char/drm/via_verifier.c b/drivers/char/drm/via_verifier.c
index 4ac495f297f..70c897c8876 100644
--- a/drivers/char/drm/via_verifier.c
+++ b/drivers/char/drm/via_verifier.c
@@ -237,7 +237,7 @@ static hazard_t table3[256];
static __inline__ int
eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
{
- if ((*buf - buf_end) >= num_words) {
+ if ((buf_end - *buf) >= num_words) {
*buf += num_words;
return 0;
}
@@ -249,14 +249,14 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
* Partially stolen from drm_memory.h
*/
-static __inline__ drm_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq,
+static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
unsigned long offset,
unsigned long size,
drm_device_t * dev)
{
struct list_head *list;
drm_map_list_t *r_list;
- drm_map_t *map = seq->map_cache;
+ drm_local_map_t *map = seq->map_cache;
if (map && map->offset <= offset
&& (offset + size) <= (map->offset + map->size)) {
diff --git a/drivers/char/drm/via_verifier.h b/drivers/char/drm/via_verifier.h
index eb4eda34434..256590fcc22 100644
--- a/drivers/char/drm/via_verifier.h
+++ b/drivers/char/drm/via_verifier.h
@@ -47,7 +47,7 @@ typedef struct {
int agp_texture;
int multitex;
drm_device_t *dev;
- drm_map_t *map_cache;
+ drm_local_map_t *map_cache;
uint32_t vertex_count;
int agp;
const uint32_t *buf_start;
@@ -55,5 +55,7 @@ typedef struct {
extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
drm_device_t * dev, int agp);
+extern int via_parse_command_stream(drm_device_t *dev, const uint32_t *buf,
+ unsigned int size);
#endif
diff --git a/drivers/char/drm/via_video.c b/drivers/char/drm/via_video.c
index 7fab9fbdf42..300ac61b09e 100644
--- a/drivers/char/drm/via_video.c
+++ b/drivers/char/drm/via_video.c
@@ -50,8 +50,11 @@ void via_release_futex(drm_via_private_t * dev_priv, int context)
unsigned int i;
volatile int *lock;
+ if (!dev_priv->sarea_priv)
+ return;
+
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
- lock = (int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
+ lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
if (_DRM_LOCK_IS_HELD(*lock)
&& (*lock & _DRM_LOCK_CONT)) {
@@ -79,7 +82,7 @@ int via_decoder_futex(DRM_IOCTL_ARGS)
if (fx.lock > VIA_NR_XVMC_LOCKS)
return -EFAULT;
- lock = (int *)XVMCLOCKPTR(sAPriv, fx.lock);
+ lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock);
switch (fx.func) {
case VIA_FUTEX_WAIT:
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 8693835cb2d..e233cf280bc 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -165,7 +165,7 @@ static int dsp56k_reset(void)
return 0;
}
-static int dsp56k_upload(u_char *bin, int len)
+static int dsp56k_upload(u_char __user *bin, int len)
{
int i;
u_char *p;
@@ -199,7 +199,7 @@ static int dsp56k_upload(u_char *bin, int len)
return 0;
}
-static ssize_t dsp56k_read(struct file *file, char *buf, size_t count,
+static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct inode *inode = file->f_dentry->d_inode;
@@ -225,10 +225,10 @@ static ssize_t dsp56k_read(struct file *file, char *buf, size_t count,
}
case 2: /* 16 bit */
{
- short *data;
+ short __user *data;
count /= 2;
- data = (short*) buf;
+ data = (short __user *) buf;
handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
put_user(dsp56k_host_interface.data.w[1], data+n++));
return 2*n;
@@ -244,10 +244,10 @@ static ssize_t dsp56k_read(struct file *file, char *buf, size_t count,
}
case 4: /* 32 bit */
{
- long *data;
+ long __user *data;
count /= 4;
- data = (long*) buf;
+ data = (long __user *) buf;
handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
put_user(dsp56k_host_interface.data.l, data+n++));
return 4*n;
@@ -262,7 +262,7 @@ static ssize_t dsp56k_read(struct file *file, char *buf, size_t count,
}
}
-static ssize_t dsp56k_write(struct file *file, const char *buf, size_t count,
+static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
{
struct inode *inode = file->f_dentry->d_inode;
@@ -287,10 +287,10 @@ static ssize_t dsp56k_write(struct file *file, const char *buf, size_t count,
}
case 2: /* 16 bit */
{
- const short *data;
+ const short __user *data;
count /= 2;
- data = (const short *)buf;
+ data = (const short __user *)buf;
handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
get_user(dsp56k_host_interface.data.w[1], data+n++));
return 2*n;
@@ -306,10 +306,10 @@ static ssize_t dsp56k_write(struct file *file, const char *buf, size_t count,
}
case 4: /* 32 bit */
{
- const long *data;
+ const long __user *data;
count /= 4;
- data = (const long *)buf;
+ data = (const long __user *)buf;
handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
get_user(dsp56k_host_interface.data.l, data+n++));
return 4*n;
@@ -328,6 +328,7 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
int dev = iminor(inode) & 0x0f;
+ void __user *argp = (void __user *)arg;
switch(dev)
{
@@ -336,9 +337,9 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
switch(cmd) {
case DSP56K_UPLOAD:
{
- char *bin;
+ char __user *bin;
int r, len;
- struct dsp56k_upload *binary = (struct dsp56k_upload *) arg;
+ struct dsp56k_upload __user *binary = argp;
if(get_user(len, &binary->len) < 0)
return -EFAULT;
@@ -372,7 +373,7 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
case DSP56K_HOST_FLAGS:
{
int dir, out, status;
- struct dsp56k_host_flags *hf = (struct dsp56k_host_flags*) arg;
+ struct dsp56k_host_flags __user *hf = argp;
if(get_user(dir, &hf->dir) < 0)
return -EFAULT;
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 407708a001e..765c5c108bf 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -1786,9 +1786,7 @@ static void doevent(int crd)
if (tty) { /* Begin if valid tty */
if (event & BREAK_IND) { /* Begin if BREAK_IND */
/* A break has been indicated */
- tty->flip.count++;
- *tty->flip.flag_buf_ptr++ = TTY_BREAK;
- *tty->flip.char_buf_ptr++ = 0;
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
tty_schedule_flip(tty);
} else if (event & LOWTX_IND) { /* Begin LOWTX_IND */
if (ch->statusflags & LOWWAIT)
@@ -2124,7 +2122,6 @@ static void receive_data(struct channel *ch)
int dataToRead, wrapgap, bytesAvailable;
unsigned int tail, head;
unsigned int wrapmask;
- int rc;
/* ---------------------------------------------------------------
This routine is called by doint when a receive data event
@@ -2162,16 +2159,15 @@ static void receive_data(struct channel *ch)
return;
}
- if (tty->flip.count == TTY_FLIPBUF_SIZE)
+ if (tty_buffer_request_room(tty, bytesAvailable + 1) == 0)
return;
if (readb(&bc->orun)) {
writeb(0, &bc->orun);
printk(KERN_WARNING "epca; overrun! DigiBoard device %s\n",tty->name);
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
rxwinon(ch);
- rptr = tty->flip.char_buf_ptr;
- rc = tty->flip.count;
while (bytesAvailable > 0) { /* Begin while there is data on the card */
wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail;
/* ---------------------------------------------------------------
@@ -2183,8 +2179,7 @@ static void receive_data(struct channel *ch)
/* --------------------------------------------------------------
Make sure we don't overflow the buffer
----------------------------------------------------------------- */
- if ((rc + dataToRead) > TTY_FLIPBUF_SIZE)
- dataToRead = TTY_FLIPBUF_SIZE - rc;
+ dataToRead = tty_prepare_flip_string(tty, &rptr, dataToRead);
if (dataToRead == 0)
break;
/* ---------------------------------------------------------------
@@ -2192,13 +2187,9 @@ static void receive_data(struct channel *ch)
for translation if necessary.
------------------------------------------------------------------ */
memcpy_fromio(rptr, ch->rxptr + tail, dataToRead);
- rc += dataToRead;
- rptr += dataToRead;
tail = (tail + dataToRead) & wrapmask;
bytesAvailable -= dataToRead;
} /* End while there is data on the card */
- tty->flip.count = rc;
- tty->flip.char_buf_ptr = rptr;
globalwinon(ch);
writew(tail, &bc->rout);
/* Must be called with global data */
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index 9f53d2fcc36..e469f641c72 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -345,26 +345,22 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes)
for (i = 0; i < num_bytes; i++) {
if (!(err_buf->data[i] & status_mask)) {
- *(tty->flip.char_buf_ptr++) = pio_buf->data[i];
+ int flag = 0;
if (err_buf->data[i] & 0x04) {
- *(tty->flip.flag_buf_ptr++) = TTY_BREAK;
-
+ flag = TTY_BREAK;
if (info->flags & ASYNC_SAK)
do_SAK(tty);
}
else if (err_buf->data[i] & 0x02)
- *(tty->flip.flag_buf_ptr++) = TTY_FRAME;
+ flag = TTY_FRAME;
else if (err_buf->data[i] & 0x01)
- *(tty->flip.flag_buf_ptr++) = TTY_PARITY;
- else
- *(tty->flip.flag_buf_ptr++) = 0;
-
- tty->flip.count++;
+ flag = TTY_PARITY;
+ tty_insert_flip_char(tty, pio_buf->data[i], flag);
}
}
- schedule_delayed_work(&tty->flip.work, 1);
+ schedule_delayed_work(&tty->buf.work, 1);
info->stat_flags &= ~ESP_STAT_RX_TIMEOUT;
release_pio_buffer(pio_buf);
@@ -397,7 +393,6 @@ static inline void receive_chars_dma_done(struct esp_struct *info,
int num_bytes;
unsigned long flags;
-
flags=claim_dma_lock();
disable_dma(dma);
clear_dma_ff(dma);
@@ -408,38 +403,31 @@ static inline void receive_chars_dma_done(struct esp_struct *info,
info->icount.rx += num_bytes;
- memcpy(tty->flip.char_buf_ptr, dma_buffer, num_bytes);
- tty->flip.char_buf_ptr += num_bytes;
- tty->flip.count += num_bytes;
- memset(tty->flip.flag_buf_ptr, 0, num_bytes);
- tty->flip.flag_buf_ptr += num_bytes;
-
if (num_bytes > 0) {
- tty->flip.flag_buf_ptr--;
+ tty_insert_flip_string(tty, dma_buffer, num_bytes - 1);
status &= (0x1c & info->read_status_mask);
+
+ /* Is the status significant or do we throw the last byte ? */
+ if (!(status & info->ignore_status_mask)) {
+ int statflag = 0;
- if (status & info->ignore_status_mask) {
- tty->flip.count--;
- tty->flip.char_buf_ptr--;
- tty->flip.flag_buf_ptr--;
- } else if (status & 0x10) {
- *tty->flip.flag_buf_ptr = TTY_BREAK;
- (info->icount.brk)++;
- if (info->flags & ASYNC_SAK)
- do_SAK(tty);
- } else if (status & 0x08) {
- *tty->flip.flag_buf_ptr = TTY_FRAME;
- (info->icount.frame)++;
- }
- else if (status & 0x04) {
- *tty->flip.flag_buf_ptr = TTY_PARITY;
- (info->icount.parity)++;
+ if (status & 0x10) {
+ statflag = TTY_BREAK;
+ (info->icount.brk)++;
+ if (info->flags & ASYNC_SAK)
+ do_SAK(tty);
+ } else if (status & 0x08) {
+ statflag = TTY_FRAME;
+ (info->icount.frame)++;
+ }
+ else if (status & 0x04) {
+ statflag = TTY_PARITY;
+ (info->icount.parity)++;
+ }
+ tty_insert_flip_char(tty, dma_buffer[num_bytes - 1], statflag);
}
-
- tty->flip.flag_buf_ptr++;
-
- schedule_delayed_work(&tty->flip.work, 1);
+ schedule_delayed_work(&tty->buf.work, 1);
}
if (dma_bytes != num_bytes) {
@@ -693,8 +681,7 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id,
num_bytes = serial_in(info, UART_ESI_STAT1) << 8;
num_bytes |= serial_in(info, UART_ESI_STAT2);
- if (num_bytes > (TTY_FLIPBUF_SIZE - info->tty->flip.count))
- num_bytes = TTY_FLIPBUF_SIZE - info->tty->flip.count;
+ num_bytes = tty_buffer_request_room(info->tty, num_bytes);
if (num_bytes) {
if (dma_bytes ||
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 66e53dd450f..40a67c86420 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -120,7 +120,7 @@ __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
#if defined(CONFIG_X86)
# define HAVE_MONOTONIC
# define TIMER_FREQ 1000000000ULL
-#elif defined(CONFIG_ARCH_S390)
+#elif defined(CONFIG_S390)
/* FA240000 is 1 Second in the IBM time universe (Page 4-38 Principles of Op for zSeries */
# define TIMER_FREQ 0xFA240000ULL
#elif defined(CONFIG_IA64)
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index f9217763467..1994a92d473 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -597,9 +597,7 @@ static int hvc_poll(struct hvc_struct *hp)
/* Read data if any */
for (;;) {
- int count = N_INBUF;
- if (count > (TTY_FLIPBUF_SIZE - tty->flip.count))
- count = TTY_FLIPBUF_SIZE - tty->flip.count;
+ int count = tty_buffer_request_room(tty, N_INBUF);
/* If flip is full, just reschedule a later read */
if (count == 0) {
@@ -635,7 +633,7 @@ static int hvc_poll(struct hvc_struct *hp)
tty_insert_flip_char(tty, buf[i], 0);
}
- if (tty->flip.count)
+ if (count)
tty_schedule_flip(tty);
/*
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 53dc77c760f..831eb4e8d9d 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -456,12 +456,11 @@ static int hvcs_io(struct hvcs_struct *hvcsd)
/* remove the read masks */
hvcsd->todo_mask &= ~(HVCS_READ_MASK);
- if ((tty->flip.count + HVCS_BUFF_LEN) < TTY_FLIPBUF_SIZE) {
+ if (tty_buffer_request_room(tty, HVCS_BUFF_LEN) >= HVCS_BUFF_LEN) {
got = hvc_get_chars(unit_address,
&buf[0],
HVCS_BUFF_LEN);
- for (i=0;got && i<got;i++)
- tty_insert_flip_char(tty, buf[i], TTY_NORMAL);
+ tty_insert_flip_string(tty, buf, got);
}
/* Give the TTY time to process the data we just sent. */
@@ -469,10 +468,9 @@ static int hvcs_io(struct hvcs_struct *hvcsd)
hvcsd->todo_mask |= HVCS_QUICK_READ;
spin_unlock_irqrestore(&hvcsd->lock, flags);
- if (tty->flip.count) {
- /* This is synch because tty->low_latency == 1 */
+ /* This is synch because tty->low_latency == 1 */
+ if(got)
tty_flip_buffer_push(tty);
- }
if (!got) {
/* Do this _after_ the flip_buffer_push */
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index a22aa940e01..a9522189fc9 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -197,7 +197,7 @@ static inline void print_state(struct hvsi_struct *hp)
};
const char *name = state_names[hp->state];
- if (hp->state > (sizeof(state_names)/sizeof(char*)))
+ if (hp->state > ARRAY_SIZE(state_names))
name = "UNKNOWN";
pr_debug("hvsi%i: state = %s\n", hp->index, name);
diff --git a/drivers/char/hw_random.c b/drivers/char/hw_random.c
index 6f673d2de0b..b3bc2e37e61 100644
--- a/drivers/char/hw_random.c
+++ b/drivers/char/hw_random.c
@@ -1,4 +1,9 @@
/*
+ Added support for the AMD Geode LX RNG
+ (c) Copyright 2004-2005 Advanced Micro Devices, Inc.
+
+ derived from
+
Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
(c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
@@ -95,6 +100,11 @@ static unsigned int via_data_present (void);
static u32 via_data_read (void);
#endif
+static int __init geode_init(struct pci_dev *dev);
+static void geode_cleanup(void);
+static unsigned int geode_data_present (void);
+static u32 geode_data_read (void);
+
struct rng_operations {
int (*init) (struct pci_dev *dev);
void (*cleanup) (void);
@@ -122,6 +132,7 @@ enum {
rng_hw_intel,
rng_hw_amd,
rng_hw_via,
+ rng_hw_geode,
};
static struct rng_operations rng_vendor_ops[] = {
@@ -139,6 +150,9 @@ static struct rng_operations rng_vendor_ops[] = {
/* rng_hw_via */
{ via_init, via_cleanup, via_data_present, via_data_read, 1 },
#endif
+
+ /* rng_hw_geode */
+ { geode_init, geode_cleanup, geode_data_present, geode_data_read, 4 }
};
/*
@@ -155,10 +169,14 @@ static struct pci_device_id rng_pci_tbl[] = {
{ 0x8086, 0x2418, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
{ 0x8086, 0x2428, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
+ { 0x8086, 0x2430, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
{ 0x8086, 0x2448, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
{ 0x8086, 0x244e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
{ 0x8086, 0x245e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_intel },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, rng_hw_geode },
+
{ 0, }, /* terminate list */
};
MODULE_DEVICE_TABLE (pci, rng_pci_tbl);
@@ -460,6 +478,57 @@ static void via_cleanup(void)
}
#endif
+/***********************************************************************
+ *
+ * AMD Geode RNG operations
+ *
+ */
+
+static void __iomem *geode_rng_base = NULL;
+
+#define GEODE_RNG_DATA_REG 0x50
+#define GEODE_RNG_STATUS_REG 0x54
+
+static u32 geode_data_read(void)
+{
+ u32 val;
+
+ assert(geode_rng_base != NULL);
+ val = readl(geode_rng_base + GEODE_RNG_DATA_REG);
+ return val;
+}
+
+static unsigned int geode_data_present(void)
+{
+ u32 val;
+
+ assert(geode_rng_base != NULL);
+ val = readl(geode_rng_base + GEODE_RNG_STATUS_REG);
+ return val;
+}
+
+static void geode_cleanup(void)
+{
+ iounmap(geode_rng_base);
+ geode_rng_base = NULL;
+}
+
+static int geode_init(struct pci_dev *dev)
+{
+ unsigned long rng_base = pci_resource_start(dev, 0);
+
+ if (rng_base == 0)
+ return 1;
+
+ geode_rng_base = ioremap(rng_base, 0x58);
+
+ if (geode_rng_base == NULL) {
+ printk(KERN_ERR PFX "Cannot ioremap RNG memory\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
/***********************************************************************
*
@@ -574,7 +643,7 @@ static int __init rng_init (void)
DPRINTK ("ENTER\n");
- /* Probe for Intel, AMD RNGs */
+ /* Probe for Intel, AMD, Geode RNGs */
for_each_pci_dev(pdev) {
ent = pci_match_id(rng_pci_tbl, pdev);
if (ent) {
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c
index ba85eb1b6ec..fc944d375be 100644
--- a/drivers/char/ip2/i2lib.c
+++ b/drivers/char/ip2/i2lib.c
@@ -854,7 +854,7 @@ i2Input(i2ChanStrPtr pCh)
count += IBUF_SIZE;
}
// Don't give more than can be taken by the line discipline
- amountToMove = pCh->pTTY->ldisc.receive_room( pCh->pTTY );
+ amountToMove = pCh->pTTY->receive_room;
if (count > amountToMove) {
count = amountToMove;
}
diff --git a/drivers/char/ip2main.c b/drivers/char/ip2main.c
index d815d197dc3..56e93a5a1e2 100644
--- a/drivers/char/ip2main.c
+++ b/drivers/char/ip2main.c
@@ -172,7 +172,7 @@ static int Fip_firmware_size;
/* Private (static) functions */
static int ip2_open(PTTY, struct file *);
static void ip2_close(PTTY, struct file *);
-static int ip2_write(PTTY, int, const unsigned char *, int);
+static int ip2_write(PTTY, const unsigned char *, int);
static void ip2_putchar(PTTY, unsigned char);
static void ip2_flush_chars(PTTY);
static int ip2_write_room(PTTY);
@@ -1713,7 +1713,7 @@ ip2_hangup ( PTTY tty )
/* */
/******************************************************************************/
static int
-ip2_write( PTTY tty, int user, const unsigned char *pData, int count)
+ip2_write( PTTY tty, const unsigned char *pData, int count)
{
i2ChanStrPtr pCh = tty->driver_data;
int bytesSent = 0;
@@ -1726,7 +1726,7 @@ ip2_write( PTTY tty, int user, const unsigned char *pData, int count)
/* This is the actual move bit. Make sure it does what we need!!!!! */
WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags);
- bytesSent = i2Output( pCh, pData, count, user );
+ bytesSent = i2Output( pCh, pData, count, 0 );
WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags);
ip2trace (CHANN, ITRC_WRITE, ITRC_RETURN, 1, bytesSent );
@@ -2001,7 +2001,9 @@ ip2_stop ( PTTY tty )
static int ip2_tiocmget(struct tty_struct *tty, struct file *file)
{
i2ChanStrPtr pCh = DevTable[tty->index];
+#ifdef ENABLE_DSSNOW
wait_queue_t wait;
+#endif
if (pCh == NULL)
return -ENODEV;
@@ -2089,15 +2091,17 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
{
wait_queue_t wait;
i2ChanStrPtr pCh = DevTable[tty->index];
+ i2eBordStrPtr pB;
struct async_icount cprev, cnow; /* kernel counter temps */
struct serial_icounter_struct __user *p_cuser;
int rc = 0;
unsigned long flags;
void __user *argp = (void __user *)arg;
- if ( pCh == NULL ) {
+ if ( pCh == NULL )
return -ENODEV;
- }
+
+ pB = pCh->pMyBord;
ip2trace (CHANN, ITRC_IOCTL, ITRC_ENTER, 2, cmd, arg );
@@ -2206,9 +2210,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
* for masking). Caller should use TIOCGICOUNT to see which one it was
*/
case TIOCMIWAIT:
- save_flags(flags);cli();
+ WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags);
cprev = pCh->icount; /* note the counters on entry */
- restore_flags(flags);
+ WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags);
i2QueueCommands(PTYPE_BYPASS, pCh, 100, 4,
CMD_DCD_REP, CMD_CTS_REP, CMD_DSR_REP, CMD_RI_REP);
init_waitqueue_entry(&wait, current);
@@ -2228,9 +2232,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
rc = -ERESTARTSYS;
break;
}
- save_flags(flags);cli();
+ WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags);
cnow = pCh->icount; /* atomic copy */
- restore_flags(flags);
+ WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags);
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
rc = -EIO; /* no change => rc */
@@ -2268,9 +2272,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
case TIOCGICOUNT:
ip2trace (CHANN, ITRC_IOCTL, 11, 1, rc );
- save_flags(flags);cli();
+ WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags);
cnow = pCh->icount;
- restore_flags(flags);
+ WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags);
p_cuser = argp;
rc = put_user(cnow.cts, &p_cuser->cts);
rc = put_user(cnow.dsr, &p_cuser->dsr);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 1f56b4cf0f5..0097f06fa67 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -57,6 +57,7 @@ static int initialized = 0;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_ipmi_root = NULL;
+EXPORT_SYMBOL(proc_ipmi_root);
#endif /* CONFIG_PROC_FS */
#define MAX_EVENTS_IN_QUEUE 25
@@ -787,7 +788,6 @@ int ipmi_destroy_user(ipmi_user_t user)
int i;
unsigned long flags;
struct cmd_rcvr *rcvr;
- struct list_head *entry1, *entry2;
struct cmd_rcvr *rcvrs = NULL;
user->valid = 1;
@@ -812,8 +812,7 @@ int ipmi_destroy_user(ipmi_user_t user)
* synchronize_rcu()) then free everything in that list.
*/
down(&intf->cmd_rcvrs_lock);
- list_for_each_safe_rcu(entry1, entry2, &intf->cmd_rcvrs) {
- rcvr = list_entry(entry1, struct cmd_rcvr, link);
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
if (rcvr->user == user) {
list_del_rcu(&rcvr->link);
rcvr->next = rcvrs;
@@ -3297,6 +3296,5 @@ EXPORT_SYMBOL(ipmi_get_my_address);
EXPORT_SYMBOL(ipmi_set_my_LUN);
EXPORT_SYMBOL(ipmi_get_my_LUN);
EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
-EXPORT_SYMBOL(proc_ipmi_root);
EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
EXPORT_SYMBOL(ipmi_free_recv_msg);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index e053eade036..49c09ae004b 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -612,11 +612,14 @@ static int ipmi_poweroff_init (void)
#endif
rv = ipmi_smi_watcher_register(&smi_watcher);
+
+#ifdef CONFIG_PROC_FS
if (rv) {
unregister_sysctl_table(ipmi_table_header);
printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
goto out_err;
}
+#endif
out_err:
return rv;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index beea450ee4b..c67ef3e47ad 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1056,7 +1056,7 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
#define IPMI_MEM_ADDR_SPACE 1
#define IPMI_IO_ADDR_SPACE 2
-#if defined(CONFIG_ACPI) || defined(CONFIG_X86) || defined(CONFIG_PCI)
+#if defined(CONFIG_ACPI) || defined(CONFIG_DMI) || defined(CONFIG_PCI)
static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
{
int i;
@@ -1669,7 +1669,7 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info)
}
#endif
-#ifdef CONFIG_X86
+#ifdef CONFIG_DMI
typedef struct dmi_ipmi_data
{
u8 type;
@@ -1829,7 +1829,7 @@ static int try_init_smbios(int intf_num, struct smi_info **new_info)
ipmi_data->slave_addr);
return 0;
}
-#endif /* CONFIG_X86 */
+#endif /* CONFIG_DMI */
#ifdef CONFIG_PCI
@@ -2222,7 +2222,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
if (rv && si_trydefaults)
rv = try_init_acpi(intf_num, &new_smi);
#endif
-#ifdef CONFIG_X86
+#ifdef CONFIG_DMI
if (rv && si_trydefaults)
rv = try_init_smbios(intf_num, &new_smi);
#endif
@@ -2433,7 +2433,7 @@ static __init int init_ipmi_si(void)
printk(KERN_INFO "IPMI System Interface driver.\n");
-#ifdef CONFIG_X86
+#ifdef CONFIG_DMI
dmi_find_bmc();
#endif
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 1bbf507adda..e9ebabaf8cb 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -20,7 +20,7 @@
* 3/9/99 sameer Added support for ISI4616 cards.
*
* 16/9/99 sameer We do not force RTS low anymore.
- * This is to prevent the firmware
+ * This is to prevent the firmware
* from getting confused.
*
* 26/10/99 sameer Cosmetic changes:The driver now
@@ -31,28 +31,28 @@
*
* 10/5/00 sameer Fixed isicom_shutdown_board()
* to not lower DTR on all the ports
- * when the last port on the card is
+ * when the last port on the card is
* closed.
*
* 10/5/00 sameer Signal mask setup command added
- * to isicom_setup_port and
+ * to isicom_setup_port and
* isicom_shutdown_port.
*
* 24/5/00 sameer The driver is now SMP aware.
- *
- *
+ *
+ *
* 27/11/00 Vinayak P Risbud Fixed the Driver Crash Problem
- *
- *
+ *
+ *
* 03/01/01 anil .s Added support for resetting the
* internal modems on ISI cards.
*
* 08/02/01 anil .s Upgraded the driver for kernel
* 2.4.x
*
- * 11/04/01 Kevin Fixed firmware load problem with
+ * 11/04/01 Kevin Fixed firmware load problem with
* ISIHP-4X card
- *
+ *
* 30/04/01 anil .s Fixed the remote login through
* ISI port problem. Now the link
* does not go down before password
@@ -62,9 +62,9 @@
* among ISI-PCI cards.
*
* 03/05/01 anil .s Added support to display the version
- * info during insmod as well as module
+ * info during insmod as well as module
* listing by lsmod.
- *
+ *
* 10/05/01 anil .s Done the modifications to the source
* file and Install script so that the
* same installation can be used for
@@ -73,19 +73,19 @@
* 06/06/01 anil .s Now we drop both dtr and rts during
* shutdown_port as well as raise them
* during isicom_config_port.
- *
+ *
* 09/06/01 acme@conectiva.com.br use capable, not suser, do
* restore_flags on failure in
* isicom_send_break, verify put_user
* result
*
- * 11/02/03 ranjeeth Added support for 230 Kbps and 460 Kbps
- * Baud index extended to 21
- *
- * 20/03/03 ranjeeth Made to work for Linux Advanced server.
- * Taken care of license warning.
- *
- * 10/12/03 Ravindra Made to work for Fedora Core 1 of
+ * 11/02/03 ranjeeth Added support for 230 Kbps and 460 Kbps
+ * Baud index extended to 21
+ *
+ * 20/03/03 ranjeeth Made to work for Linux Advanced server.
+ * Taken care of license warning.
+ *
+ * 10/12/03 Ravindra Made to work for Fedora Core 1 of
* Red Hat Distribution
*
* 06/01/05 Alan Cox Merged the ISI and base kernel strands
@@ -93,10 +93,10 @@
*
* ***********************************************************
*
- * To use this driver you also need the support package. You
+ * To use this driver you also need the support package. You
* can find this in RPM format on
* ftp://ftp.linux.org.uk/pub/linux/alan
- *
+ *
* You can find the original tools for this direct from Multitech
* ftp://ftp.multitech.com/ISI-Cards/
*
@@ -107,20 +107,20 @@
* Omit those entries for boards you don't have installed.
*
* TODO
- * Hotplug
* Merge testing
* 64-bit verification
*/
#include <linux/module.h>
+#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/termios.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/serial.h>
#include <linux/mm.h>
-#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/delay.h>
@@ -134,50 +134,62 @@
#include <linux/isicom.h>
+#define InterruptTheCard(base) outw(0, (base) + 0xc)
+#define ClearInterrupt(base) inw((base) + 0x0a)
+
+#ifdef DEBUG
+#define pr_dbg(str...) printk(KERN_DEBUG "ISICOM: " str)
+#define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c))
+#else
+#define pr_dbg(str...) do { } while (0)
+#define isicom_paranoia_check(a, b, c) 0
+#endif
+
+static int isicom_probe(struct pci_dev *, const struct pci_device_id *);
+static void __devexit isicom_remove(struct pci_dev *);
+
static struct pci_device_id isicom_pci_tbl[] = {
- { VENDOR_ID, 0x2028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { VENDOR_ID, 0x2051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { VENDOR_ID, 0x2052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { VENDOR_ID, 0x2053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { VENDOR_ID, 0x2054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { VENDOR_ID, 0x2055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { VENDOR_ID, 0x2056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { VENDOR_ID, 0x2057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { VENDOR_ID, 0x2058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_DEVICE(VENDOR_ID, 0x2028) },
+ { PCI_DEVICE(VENDOR_ID, 0x2051) },
+ { PCI_DEVICE(VENDOR_ID, 0x2052) },
+ { PCI_DEVICE(VENDOR_ID, 0x2053) },
+ { PCI_DEVICE(VENDOR_ID, 0x2054) },
+ { PCI_DEVICE(VENDOR_ID, 0x2055) },
+ { PCI_DEVICE(VENDOR_ID, 0x2056) },
+ { PCI_DEVICE(VENDOR_ID, 0x2057) },
+ { PCI_DEVICE(VENDOR_ID, 0x2058) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, isicom_pci_tbl);
+static struct pci_driver isicom_driver = {
+ .name = "isicom",
+ .id_table = isicom_pci_tbl,
+ .probe = isicom_probe,
+ .remove = __devexit_p(isicom_remove)
+};
+
static int prev_card = 3; /* start servicing isi_card[0] */
static struct tty_driver *isicom_normal;
static struct timer_list tx;
static char re_schedule = 1;
-#ifdef ISICOM_DEBUG
-static unsigned long tx_count = 0;
-#endif
-
-static int ISILoad_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
static void isicom_tx(unsigned long _data);
-static void isicom_start(struct tty_struct * tty);
-
-static unsigned char * tmp_buf;
-static DECLARE_MUTEX(tmp_buf_sem);
+static void isicom_start(struct tty_struct *tty);
/* baud index mappings from linux defns to isi */
static signed char linuxb_to_isib[] = {
- -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 13, 15, 16, 17,
- 18, 19
+ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 13, 15, 16, 17, 18, 19
};
struct isi_board {
- unsigned short base;
+ unsigned long base;
unsigned char irq;
unsigned char port_count;
unsigned short status;
- unsigned short port_status; /* each bit represents a single port */
+ unsigned short port_status; /* each bit for each port */
unsigned short shift_count;
struct isi_port * ports;
signed char count;
@@ -192,9 +204,9 @@ struct isi_port {
int count;
int blocked_open;
int close_delay;
- unsigned short channel;
- unsigned short status;
- unsigned short closing_wait;
+ u16 channel;
+ u16 status;
+ u16 closing_wait;
struct isi_board * card;
struct tty_struct * tty;
wait_queue_head_t close_wait;
@@ -215,35 +227,37 @@ static struct isi_port isi_ports[PORT_COUNT];
* the kernel lock for the card and have the card in a position that
* it wants to talk.
*/
-
+
static int lock_card(struct isi_board *card)
{
char retries;
- unsigned short base = card->base;
+ unsigned long base = card->base;
for (retries = 0; retries < 100; retries++) {
spin_lock_irqsave(&card->card_lock, card->flags);
if (inw(base + 0xe) & 0x1) {
- return 1;
+ return 1;
} else {
spin_unlock_irqrestore(&card->card_lock, card->flags);
udelay(1000); /* 1ms */
}
}
- printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%x)\n", card->base);
+ printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n",
+ card->base);
+
return 0; /* Failed to aquire the card! */
}
static int lock_card_at_interrupt(struct isi_board *card)
{
unsigned char retries;
- unsigned short base = card->base;
+ unsigned long base = card->base;
for (retries = 0; retries < 200; retries++) {
spin_lock_irqsave(&card->card_lock, card->flags);
if (inw(base + 0xe) & 0x1)
- return 1;
+ return 1;
else
spin_unlock_irqrestore(&card->card_lock, card->flags);
}
@@ -259,373 +273,141 @@ static void unlock_card(struct isi_board *card)
/*
* ISI Card specific ops ...
*/
-
-static void raise_dtr(struct isi_port * port)
+
+static void raise_dtr(struct isi_port *port)
{
- struct isi_board * card = port->card;
- unsigned short base = card->base;
- unsigned char channel = port->channel;
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
if (!lock_card(card))
return;
- outw(0x8000 | (channel << card->shift_count) | 0x02 , base);
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
outw(0x0504, base);
InterruptTheCard(base);
port->status |= ISI_DTR;
unlock_card(card);
}
-static inline void drop_dtr(struct isi_port * port)
-{
- struct isi_board * card = port->card;
- unsigned short base = card->base;
- unsigned char channel = port->channel;
+static inline void drop_dtr(struct isi_port *port)
+{
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
if (!lock_card(card))
return;
- outw(0x8000 | (channel << card->shift_count) | 0x02 , base);
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
outw(0x0404, base);
- InterruptTheCard(base);
+ InterruptTheCard(base);
port->status &= ~ISI_DTR;
unlock_card(card);
}
-static inline void raise_rts(struct isi_port * port)
+static inline void raise_rts(struct isi_port *port)
{
- struct isi_board * card = port->card;
- unsigned short base = card->base;
- unsigned char channel = port->channel;
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
if (!lock_card(card))
return;
- outw(0x8000 | (channel << card->shift_count) | 0x02 , base);
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
outw(0x0a04, base);
- InterruptTheCard(base);
+ InterruptTheCard(base);
port->status |= ISI_RTS;
unlock_card(card);
}
-static inline void drop_rts(struct isi_port * port)
+static inline void drop_rts(struct isi_port *port)
{
- struct isi_board * card = port->card;
- unsigned short base = card->base;
- unsigned char channel = port->channel;
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
if (!lock_card(card))
return;
- outw(0x8000 | (channel << card->shift_count) | 0x02 , base);
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
outw(0x0804, base);
- InterruptTheCard(base);
+ InterruptTheCard(base);
port->status &= ~ISI_RTS;
unlock_card(card);
}
-static inline void raise_dtr_rts(struct isi_port * port)
+static inline void raise_dtr_rts(struct isi_port *port)
{
- struct isi_board * card = port->card;
- unsigned short base = card->base;
- unsigned char channel = port->channel;
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
if (!lock_card(card))
return;
- outw(0x8000 | (channel << card->shift_count) | 0x02 , base);
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
outw(0x0f04, base);
InterruptTheCard(base);
port->status |= (ISI_DTR | ISI_RTS);
unlock_card(card);
}
-static void drop_dtr_rts(struct isi_port * port)
+static void drop_dtr_rts(struct isi_port *port)
{
- struct isi_board * card = port->card;
- unsigned short base = card->base;
- unsigned char channel = port->channel;
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
if (!lock_card(card))
return;
- outw(0x8000 | (channel << card->shift_count) | 0x02 , base);
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
outw(0x0c04, base);
- InterruptTheCard(base);
+ InterruptTheCard(base);
port->status &= ~(ISI_RTS | ISI_DTR);
unlock_card(card);
}
-static inline void kill_queue(struct isi_port * port, short queue)
+static inline void kill_queue(struct isi_port *port, short queue)
{
- struct isi_board * card = port->card;
- unsigned short base = card->base;
- unsigned char channel = port->channel;
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+ u16 channel = port->channel;
if (!lock_card(card))
return;
- outw(0x8000 | (channel << card->shift_count) | 0x02 , base);
+ outw(0x8000 | (channel << card->shift_count) | 0x02, base);
outw((queue << 8) | 0x06, base);
- InterruptTheCard(base);
+ InterruptTheCard(base);
unlock_card(card);
}
-
-/*
- * Firmware loader driver specific routines. This needs to mostly die
- * and be replaced with request_firmware.
- */
-
-static struct file_operations ISILoad_fops = {
- .owner = THIS_MODULE,
- .ioctl = ISILoad_ioctl,
-};
-
-static struct miscdevice isiloader_device = {
- ISILOAD_MISC_MINOR, "isictl", &ISILoad_fops
-};
-
-
-static inline int WaitTillCardIsFree(unsigned short base)
-{
- unsigned long count=0;
- while( (!(inw(base+0xe) & 0x1)) && (count++ < 6000000));
- if (inw(base+0xe)&0x1)
- return 0;
- else
- return 1;
-}
-
-static int ISILoad_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- unsigned int card, i, j, signature, status, portcount = 0;
- unsigned long t;
- unsigned short word_count, base;
- bin_frame frame;
- void __user *argp = (void __user *)arg;
- /* exec_record exec_rec; */
-
- if(get_user(card, (int __user *)argp))
- return -EFAULT;
-
- if(card < 0 || card >= BOARD_COUNT)
- return -ENXIO;
-
- base=isi_card[card].base;
-
- if(base==0)
- return -ENXIO; /* disabled or not used */
-
- switch(cmd) {
- case MIOCTL_RESET_CARD:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- printk(KERN_DEBUG "ISILoad:Resetting Card%d at 0x%x ",card+1,base);
-
- inw(base+0x8);
-
- for(t=jiffies+HZ/100;time_before(jiffies, t););
-
- outw(0,base+0x8); /* Reset */
-
- for(j=1;j<=3;j++) {
- for(t=jiffies+HZ;time_before(jiffies, t););
- printk(".");
- }
- signature=(inw(base+0x4)) & 0xff;
- if (isi_card[card].isa) {
-
- if (!(inw(base+0xe) & 0x1) || (inw(base+0x2))) {
-#ifdef ISICOM_DEBUG
- printk("\nbase+0x2=0x%x , base+0xe=0x%x",inw(base+0x2),inw(base+0xe));
-#endif
- printk("\nISILoad:ISA Card%d reset failure (Possible bad I/O Port Address 0x%x).\n",card+1,base);
- return -EIO;
- }
- }
- else {
- portcount = inw(base+0x2);
- if (!(inw(base+0xe) & 0x1) || ((portcount!=0) && (portcount!=4) && (portcount!=8))) {
-#ifdef ISICOM_DEBUG
- printk("\nbase+0x2=0x%x , base+0xe=0x%x",inw(base+0x2),inw(base+0xe));
-#endif
- printk("\nISILoad:PCI Card%d reset failure (Possible bad I/O Port Address 0x%x).\n",card+1,base);
- return -EIO;
- }
- }
- switch(signature) {
- case 0xa5:
- case 0xbb:
- case 0xdd:
- if (isi_card[card].isa)
- isi_card[card].port_count = 8;
- else {
- if (portcount == 4)
- isi_card[card].port_count = 4;
- else
- isi_card[card].port_count = 8;
- }
- isi_card[card].shift_count = 12;
- break;
-
- case 0xcc: isi_card[card].port_count = 16;
- isi_card[card].shift_count = 11;
- break;
-
- default: printk("ISILoad:Card%d reset failure (Possible bad I/O Port Address 0x%x).\n",card+1,base);
-#ifdef ISICOM_DEBUG
- printk("Sig=0x%x\n",signature);
-#endif
- return -EIO;
- }
- printk("-Done\n");
- return put_user(signature,(unsigned __user *)argp);
-
- case MIOCTL_LOAD_FIRMWARE:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if(copy_from_user(&frame, argp, sizeof(bin_frame)))
- return -EFAULT;
-
- if (WaitTillCardIsFree(base))
- return -EIO;
-
- outw(0xf0,base); /* start upload sequence */
- outw(0x00,base);
- outw((frame.addr), base);/* lsb of adderess */
-
- word_count=(frame.count >> 1) + frame.count % 2;
- outw(word_count, base);
- InterruptTheCard(base);
-
- for(i=0;i<=0x2f;i++); /* a wee bit of delay */
-
- if (WaitTillCardIsFree(base))
- return -EIO;
-
- if ((status=inw(base+0x4))!=0) {
- printk(KERN_WARNING "ISILoad:Card%d rejected load header:\nAddress:0x%x \nCount:0x%x \nStatus:0x%x \n",
- card+1, frame.addr, frame.count, status);
- return -EIO;
- }
- outsw(base, (void *) frame.bin_data, word_count);
-
- InterruptTheCard(base);
-
- for(i=0;i<=0x0f;i++); /* another wee bit of delay */
-
- if (WaitTillCardIsFree(base))
- return -EIO;
-
- if ((status=inw(base+0x4))!=0) {
- printk(KERN_ERR "ISILoad:Card%d got out of sync.Card Status:0x%x\n",card+1, status);
- return -EIO;
- }
- return 0;
-
- case MIOCTL_READ_FIRMWARE:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if(copy_from_user(&frame, argp, sizeof(bin_header)))
- return -EFAULT;
-
- if (WaitTillCardIsFree(base))
- return -EIO;
-
- outw(0xf1,base); /* start download sequence */
- outw(0x00,base);
- outw((frame.addr), base);/* lsb of adderess */
-
- word_count=(frame.count >> 1) + frame.count % 2;
- outw(word_count+1, base);
- InterruptTheCard(base);
-
- for(i=0;i<=0xf;i++); /* a wee bit of delay */
-
- if (WaitTillCardIsFree(base))
- return -EIO;
-
- if ((status=inw(base+0x4))!=0) {
- printk(KERN_WARNING "ISILoad:Card%d rejected verify header:\nAddress:0x%x \nCount:0x%x \nStatus:0x%x \n",
- card+1, frame.addr, frame.count, status);
- return -EIO;
- }
-
- inw(base);
- insw(base, frame.bin_data, word_count);
- InterruptTheCard(base);
-
- for(i=0;i<=0x0f;i++); /* another wee bit of delay */
-
- if (WaitTillCardIsFree(base))
- return -EIO;
-
- if ((status=inw(base+0x4))!=0) {
- printk(KERN_ERR "ISILoad:Card%d verify got out of sync.Card Status:0x%x\n",card+1, status);
- return -EIO;
- }
-
- if(copy_to_user(argp, &frame, sizeof(bin_frame)))
- return -EFAULT;
- return 0;
-
- case MIOCTL_XFER_CTRL:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (WaitTillCardIsFree(base))
- return -EIO;
-
- outw(0xf2, base);
- outw(0x800, base);
- outw(0x0, base);
- outw(0x0, base);
- InterruptTheCard(base);
- outw(0x0, base+0x4); /* for ISI4608 cards */
-
- isi_card[card].status |= FIRMWARE_LOADED;
- return 0;
-
- default:
-#ifdef ISICOM_DEBUG
- printk(KERN_DEBUG "ISILoad: Received Ioctl cmd 0x%x.\n", cmd);
-#endif
- return -ENOIOCTLCMD;
-
- }
-
-}
-
-
/*
* ISICOM Driver specific routines ...
*
*/
-
-static inline int isicom_paranoia_check(struct isi_port const * port, char *name,
- const char * routine)
+
+static inline int __isicom_paranoia_check(struct isi_port const *port,
+ char *name, const char *routine)
{
-#ifdef ISICOM_DEBUG
- static const char * badmagic =
- KERN_WARNING "ISICOM: Warning: bad isicom magic for dev %s in %s.\n";
- static const char * badport =
- KERN_WARNING "ISICOM: Warning: NULL isicom port for dev %s in %s.\n";
if (!port) {
- printk(badport, name, routine);
+ printk(KERN_WARNING "ISICOM: Warning: bad isicom magic for "
+ "dev %s in %s.\n", name, routine);
return 1;
}
if (port->magic != ISICOM_MAGIC) {
- printk(badmagic, name, routine);
+ printk(KERN_WARNING "ISICOM: Warning: NULL isicom port for "
+ "dev %s in %s.\n", name, routine);
return 1;
- }
-#endif
+ }
+
return 0;
}
-
+
/*
- * Transmitter.
+ * Transmitter.
*
* We shovel data into the card buffers on a regular basis. The card
* will do the rest of the work for us.
@@ -635,25 +417,21 @@ static void isicom_tx(unsigned long _data)
{
short count = (BOARD_COUNT-1), card, base;
short txcount, wrd, residue, word_count, cnt;
- struct isi_port * port;
- struct tty_struct * tty;
-
-#ifdef ISICOM_DEBUG
- ++tx_count;
-#endif
-
+ struct isi_port *port;
+ struct tty_struct *tty;
+
/* find next active board */
card = (prev_card + 1) & 0x0003;
while(count-- > 0) {
- if (isi_card[card].status & BOARD_ACTIVE)
+ if (isi_card[card].status & BOARD_ACTIVE)
break;
- card = (card + 1) & 0x0003;
+ card = (card + 1) & 0x0003;
}
if (!(isi_card[card].status & BOARD_ACTIVE))
goto sched_again;
-
+
prev_card = card;
-
+
count = isi_card[card].port_count;
port = isi_card[card].ports;
base = isi_card[card].base;
@@ -662,18 +440,18 @@ static void isicom_tx(unsigned long _data)
continue;
/* port not active or tx disabled to force flow control */
if (!(port->flags & ASYNC_INITIALIZED) ||
- !(port->status & ISI_TXOK))
+ !(port->status & ISI_TXOK))
unlock_card(&isi_card[card]);
continue;
-
+
tty = port->tty;
-
-
- if(tty == NULL) {
+
+
+ if (tty == NULL) {
unlock_card(&isi_card[card]);
continue;
}
-
+
txcount = min_t(short, TX_SIZE, port->xmit_cnt);
if (txcount <= 0 || tty->stopped || tty->hw_stopped) {
unlock_card(&isi_card[card]);
@@ -681,44 +459,45 @@ static void isicom_tx(unsigned long _data)
}
if (!(inw(base + 0x02) & (1 << port->channel))) {
unlock_card(&isi_card[card]);
- continue;
+ continue;
}
-#ifdef ISICOM_DEBUG
- printk(KERN_DEBUG "ISICOM: txing %d bytes, port%d.\n",
- txcount, port->channel+1);
-#endif
- outw((port->channel << isi_card[card].shift_count) | txcount
- , base);
+ pr_dbg("txing %d bytes, port%d.\n", txcount,
+ port->channel + 1);
+ outw((port->channel << isi_card[card].shift_count) | txcount,
+ base);
residue = NO;
- wrd = 0;
+ wrd = 0;
while (1) {
- cnt = min_t(int, txcount, (SERIAL_XMIT_SIZE - port->xmit_tail));
+ cnt = min_t(int, txcount, (SERIAL_XMIT_SIZE
+ - port->xmit_tail));
if (residue == YES) {
residue = NO;
if (cnt > 0) {
- wrd |= (port->xmit_buf[port->xmit_tail] << 8);
- port->xmit_tail = (port->xmit_tail + 1) & (SERIAL_XMIT_SIZE - 1);
+ wrd |= (port->xmit_buf[port->xmit_tail]
+ << 8);
+ port->xmit_tail = (port->xmit_tail + 1)
+ & (SERIAL_XMIT_SIZE - 1);
port->xmit_cnt--;
txcount--;
cnt--;
- outw(wrd, base);
- }
- else {
+ outw(wrd, base);
+ } else {
outw(wrd, base);
break;
}
- }
+ }
if (cnt <= 0) break;
word_count = cnt >> 1;
- outsw(base, port->xmit_buf+port->xmit_tail, word_count);
- port->xmit_tail = (port->xmit_tail + (word_count << 1)) &
- (SERIAL_XMIT_SIZE - 1);
+ outsw(base, port->xmit_buf+port->xmit_tail,word_count);
+ port->xmit_tail = (port->xmit_tail
+ + (word_count << 1)) & (SERIAL_XMIT_SIZE - 1);
txcount -= (word_count << 1);
port->xmit_cnt -= (word_count << 1);
if (cnt & 0x0001) {
residue = YES;
wrd = port->xmit_buf[port->xmit_tail];
- port->xmit_tail = (port->xmit_tail + 1) & (SERIAL_XMIT_SIZE - 1);
+ port->xmit_tail = (port->xmit_tail + 1)
+ & (SERIAL_XMIT_SIZE - 1);
port->xmit_cnt--;
txcount--;
}
@@ -730,81 +509,82 @@ static void isicom_tx(unsigned long _data)
if (port->xmit_cnt <= WAKEUP_CHARS)
schedule_work(&port->bh_tqueue);
unlock_card(&isi_card[card]);
- }
+ }
+
+ /* schedule another tx for hopefully in about 10ms */
+sched_again:
+ if (!re_schedule) {
+ re_schedule = 2;
+ return;
+ }
- /* schedule another tx for hopefully in about 10ms */
-sched_again:
- if (!re_schedule)
- return;
init_timer(&tx);
tx.expires = jiffies + HZ/100;
tx.data = 0;
tx.function = isicom_tx;
add_timer(&tx);
-
- return;
-}
-
+
+ return;
+}
+
/* Interrupt handlers */
-
-static void isicom_bottomhalf(void * data)
+
+static void isicom_bottomhalf(void *data)
{
- struct isi_port * port = (struct isi_port *) data;
- struct tty_struct * tty = port->tty;
-
+ struct isi_port *port = (struct isi_port *) data;
+ struct tty_struct *tty = port->tty;
+
if (!tty)
return;
- tty_wakeup(tty);
+ tty_wakeup(tty);
wake_up_interruptible(&tty->write_wait);
-}
-
+}
+
/*
- * Main interrupt handler routine
+ * Main interrupt handler routine
*/
-
-static irqreturn_t isicom_interrupt(int irq, void *dev_id,
- struct pt_regs *regs)
+
+static irqreturn_t isicom_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
- struct isi_board * card;
- struct isi_port * port;
- struct tty_struct * tty;
- unsigned short base, header, word_count, count;
- unsigned char channel;
+ struct isi_board *card = dev_id;
+ struct isi_port *port;
+ struct tty_struct *tty;
+ unsigned long base;
+ u16 header, word_count, count, channel;
short byte_count;
-
- card = (struct isi_board *) dev_id;
+ unsigned char *rp;
if (!card || !(card->status & FIRMWARE_LOADED))
return IRQ_NONE;
-
+
base = card->base;
spin_lock(&card->card_lock);
-
+
if (card->isa == NO) {
/*
- * disable any interrupts from the PCI card and lower the
- * interrupt line
+ * disable any interrupts from the PCI card and lower the
+ * interrupt line
*/
outw(0x8000, base+0x04);
ClearInterrupt(base);
}
-
+
inw(base); /* get the dummy word out */
header = inw(base);
channel = (header & 0x7800) >> card->shift_count;
byte_count = header & 0xff;
if (channel + 1 > card->port_count) {
- printk(KERN_WARNING "ISICOM: isicom_interrupt(0x%x): %d(channel) > port_count.\n",
- base, channel+1);
+ printk(KERN_WARNING "ISICOM: isicom_interrupt(0x%lx): "
+ "%d(channel) > port_count.\n", base, channel+1);
if (card->isa)
ClearInterrupt(base);
else
- outw(0x0000, base+0x04); /* enable interrupts */
+ outw(0x0000, base+0x04); /* enable interrupts */
spin_unlock(&card->card_lock);
- return IRQ_HANDLED;
+ return IRQ_HANDLED;
}
port = card->ports + channel;
if (!(port->flags & ASYNC_INITIALIZED)) {
@@ -813,8 +593,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id,
else
outw(0x0000, base+0x04); /* enable interrupts */
return IRQ_HANDLED;
- }
-
+ }
+
tty = port->tty;
if (tty == NULL) {
word_count = byte_count >> 1;
@@ -831,224 +611,204 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id,
spin_unlock(&card->card_lock);
return IRQ_HANDLED;
}
-
+
if (header & 0x8000) { /* Status Packet */
header = inw(base);
switch(header & 0xff) {
- case 0: /* Change in EIA signals */
-
- if (port->flags & ASYNC_CHECK_CD) {
- if (port->status & ISI_DCD) {
- if (!(header & ISI_DCD)) {
- /* Carrier has been lost */
-#ifdef ISICOM_DEBUG
- printk(KERN_DEBUG "ISICOM: interrupt: DCD->low.\n");
-#endif
- port->status &= ~ISI_DCD;
- schedule_work(&port->hangup_tq);
- }
- }
- else {
- if (header & ISI_DCD) {
- /* Carrier has been detected */
-#ifdef ISICOM_DEBUG
- printk(KERN_DEBUG "ISICOM: interrupt: DCD->high.\n");
-#endif
- port->status |= ISI_DCD;
- wake_up_interruptible(&port->open_wait);
- }
- }
- }
- else {
- if (header & ISI_DCD)
- port->status |= ISI_DCD;
- else
+ case 0: /* Change in EIA signals */
+ if (port->flags & ASYNC_CHECK_CD) {
+ if (port->status & ISI_DCD) {
+ if (!(header & ISI_DCD)) {
+ /* Carrier has been lost */
+ pr_dbg("interrupt: DCD->low.\n"
+ );
port->status &= ~ISI_DCD;
- }
-
- if (port->flags & ASYNC_CTS_FLOW) {
- if (port->tty->hw_stopped) {
- if (header & ISI_CTS) {
- port->tty->hw_stopped = 0;
- /* start tx ing */
- port->status |= (ISI_TXOK | ISI_CTS);
- schedule_work(&port->bh_tqueue);
- }
- }
- else {
- if (!(header & ISI_CTS)) {
- port->tty->hw_stopped = 1;
- /* stop tx ing */
- port->status &= ~(ISI_TXOK | ISI_CTS);
- }
+ schedule_work(&port->hangup_tq);
}
+ } else if (header & ISI_DCD) {
+ /* Carrier has been detected */
+ pr_dbg("interrupt: DCD->high.\n");
+ port->status |= ISI_DCD;
+ wake_up_interruptible(&port->open_wait);
}
- else {
- if (header & ISI_CTS)
- port->status |= ISI_CTS;
- else
- port->status &= ~ISI_CTS;
- }
-
- if (header & ISI_DSR)
- port->status |= ISI_DSR;
+ } else {
+ if (header & ISI_DCD)
+ port->status |= ISI_DCD;
else
- port->status &= ~ISI_DSR;
-
- if (header & ISI_RI)
- port->status |= ISI_RI;
+ port->status &= ~ISI_DCD;
+ }
+
+ if (port->flags & ASYNC_CTS_FLOW) {
+ if (port->tty->hw_stopped) {
+ if (header & ISI_CTS) {
+ port->tty->hw_stopped = 0;
+ /* start tx ing */
+ port->status |= (ISI_TXOK
+ | ISI_CTS);
+ schedule_work(&port->bh_tqueue);
+ }
+ } else if (!(header & ISI_CTS)) {
+ port->tty->hw_stopped = 1;
+ /* stop tx ing */
+ port->status &= ~(ISI_TXOK | ISI_CTS);
+ }
+ } else {
+ if (header & ISI_CTS)
+ port->status |= ISI_CTS;
else
- port->status &= ~ISI_RI;
-
- break;
-
- case 1: /* Received Break !!! */
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- break;
- *tty->flip.flag_buf_ptr++ = TTY_BREAK;
- *tty->flip.char_buf_ptr++ = 0;
- tty->flip.count++;
- if (port->flags & ASYNC_SAK)
- do_SAK(tty);
- schedule_delayed_work(&tty->flip.work, 1);
- break;
-
- case 2: /* Statistics */
- printk(KERN_DEBUG "ISICOM: isicom_interrupt: stats!!!.\n");
- break;
-
- default:
- printk(KERN_WARNING "ISICOM: Intr: Unknown code in status packet.\n");
- break;
- }
- }
- else { /* Data Packet */
- count = min_t(unsigned short, byte_count, (TTY_FLIPBUF_SIZE - tty->flip.count));
-#ifdef ISICOM_DEBUG
- printk(KERN_DEBUG "ISICOM: Intr: Can rx %d of %d bytes.\n",
- count, byte_count);
-#endif
+ port->status &= ~ISI_CTS;
+ }
+
+ if (header & ISI_DSR)
+ port->status |= ISI_DSR;
+ else
+ port->status &= ~ISI_DSR;
+
+ if (header & ISI_RI)
+ port->status |= ISI_RI;
+ else
+ port->status &= ~ISI_RI;
+
+ break;
+
+ case 1: /* Received Break !!! */
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ if (port->flags & ASYNC_SAK)
+ do_SAK(tty);
+ tty_flip_buffer_push(tty);
+ break;
+
+ case 2: /* Statistics */
+ pr_dbg("isicom_interrupt: stats!!!.\n");
+ break;
+
+ default:
+ pr_dbg("Intr: Unknown code in status packet.\n");
+ break;
+ }
+ } else { /* Data Packet */
+
+ count = tty_prepare_flip_string(tty, &rp, byte_count & ~1);
+ pr_dbg("Intr: Can rx %d of %d bytes.\n", count, byte_count);
word_count = count >> 1;
- insw(base, tty->flip.char_buf_ptr, word_count);
- tty->flip.char_buf_ptr += (word_count << 1);
+ insw(base, rp, word_count);
byte_count -= (word_count << 1);
if (count & 0x0001) {
- *tty->flip.char_buf_ptr++ = (char)(inw(base) & 0xff);
+ tty_insert_flip_char(tty, inw(base) & 0xff,
+ TTY_NORMAL);
byte_count -= 2;
- }
- memset(tty->flip.flag_buf_ptr, 0, count);
- tty->flip.flag_buf_ptr += count;
- tty->flip.count += count;
-
+ }
if (byte_count > 0) {
- printk(KERN_DEBUG "ISICOM: Intr(0x%x:%d): Flip buffer overflow! dropping bytes...\n",
- base, channel+1);
+ pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping "
+ "bytes...\n", base, channel + 1);
while(byte_count > 0) { /* drain out unread xtra data */
inw(base);
byte_count -= 2;
}
}
- schedule_delayed_work(&tty->flip.work, 1);
+ tty_flip_buffer_push(tty);
}
if (card->isa == YES)
ClearInterrupt(base);
else
- outw(0x0000, base+0x04); /* enable interrupts */
+ outw(0x0000, base+0x04); /* enable interrupts */
+
return IRQ_HANDLED;
-}
+}
-static void isicom_config_port(struct isi_port * port)
+static void isicom_config_port(struct isi_port *port)
{
- struct isi_board * card = port->card;
- struct tty_struct * tty;
+ struct isi_board *card = port->card;
+ struct tty_struct *tty;
unsigned long baud;
- unsigned short channel_setup, base = card->base;
- unsigned short channel = port->channel, shift_count = card->shift_count;
+ unsigned long base = card->base;
+ u16 channel_setup, channel = port->channel,
+ shift_count = card->shift_count;
unsigned char flow_ctrl;
-
+
if (!(tty = port->tty) || !tty->termios)
return;
baud = C_BAUD(tty);
if (baud & CBAUDEX) {
baud &= ~CBAUDEX;
-
+
/* if CBAUDEX bit is on and the baud is set to either 50 or 75
* then the card is programmed for 57.6Kbps or 115Kbps
* respectively.
- */
-
+ */
+
if (baud < 1 || baud > 2)
port->tty->termios->c_cflag &= ~CBAUDEX;
else
baud += 15;
- }
+ }
if (baud == 15) {
-
- /* the ASYNC_SPD_HI and ASYNC_SPD_VHI options are set
+
+ /* the ASYNC_SPD_HI and ASYNC_SPD_VHI options are set
* by the set_serial_info ioctl ... this is done by
* the 'setserial' utility.
- */
-
+ */
+
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
- baud++; /* 57.6 Kbps */
+ baud++; /* 57.6 Kbps */
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
- baud +=2; /* 115 Kbps */
+ baud +=2; /* 115 Kbps */
}
if (linuxb_to_isib[baud] == -1) {
/* hang up */
- drop_dtr(port);
- return;
- }
- else
+ drop_dtr(port);
+ return;
+ }
+ else
raise_dtr(port);
-
+
if (lock_card(card)) {
outw(0x8000 | (channel << shift_count) |0x03, base);
outw(linuxb_to_isib[baud] << 8 | 0x03, base);
channel_setup = 0;
switch(C_CSIZE(tty)) {
- case CS5:
- channel_setup |= ISICOM_CS5;
- break;
- case CS6:
- channel_setup |= ISICOM_CS6;
- break;
- case CS7:
- channel_setup |= ISICOM_CS7;
- break;
- case CS8:
- channel_setup |= ISICOM_CS8;
- break;
+ case CS5:
+ channel_setup |= ISICOM_CS5;
+ break;
+ case CS6:
+ channel_setup |= ISICOM_CS6;
+ break;
+ case CS7:
+ channel_setup |= ISICOM_CS7;
+ break;
+ case CS8:
+ channel_setup |= ISICOM_CS8;
+ break;
}
-
+
if (C_CSTOPB(tty))
channel_setup |= ISICOM_2SB;
if (C_PARENB(tty)) {
channel_setup |= ISICOM_EVPAR;
if (C_PARODD(tty))
- channel_setup |= ISICOM_ODPAR;
+ channel_setup |= ISICOM_ODPAR;
}
- outw(channel_setup, base);
+ outw(channel_setup, base);
InterruptTheCard(base);
- unlock_card(card);
- }
+ unlock_card(card);
+ }
if (C_CLOCAL(tty))
port->flags &= ~ASYNC_CHECK_CD;
else
- port->flags |= ASYNC_CHECK_CD;
-
+ port->flags |= ASYNC_CHECK_CD;
+
/* flow control settings ...*/
flow_ctrl = 0;
port->flags &= ~ASYNC_CTS_FLOW;
if (C_CRTSCTS(tty)) {
port->flags |= ASYNC_CTS_FLOW;
flow_ctrl |= ISICOM_CTSRTS;
- }
- if (I_IXON(tty))
+ }
+ if (I_IXON(tty))
flow_ctrl |= ISICOM_RESPOND_XONXOFF;
if (I_IXOFF(tty))
- flow_ctrl |= ISICOM_INITIATE_XONXOFF;
-
+ flow_ctrl |= ISICOM_INITIATE_XONXOFF;
+
if (lock_card(card)) {
outw(0x8000 | (channel << shift_count) |0x04, base);
outw(flow_ctrl << 8 | 0x05, base);
@@ -1056,22 +816,22 @@ static void isicom_config_port(struct isi_port * port)
InterruptTheCard(base);
unlock_card(card);
}
-
+
/* rx enabled -> enable port for rx on the card */
if (C_CREAD(tty)) {
card->port_status |= (1 << channel);
outw(card->port_status, base + 0x02);
}
}
-
-/* open et all */
-static inline void isicom_setup_board(struct isi_board * bp)
+/* open et all */
+
+static inline void isicom_setup_board(struct isi_board *bp)
{
int channel;
- struct isi_port * port;
+ struct isi_port *port;
unsigned long flags;
-
+
spin_lock_irqsave(&bp->card_lock, flags);
if (bp->status & BOARD_ACTIVE) {
spin_unlock_irqrestore(&bp->card_lock, flags);
@@ -1080,53 +840,54 @@ static inline void isicom_setup_board(struct isi_board * bp)
port = bp->ports;
bp->status |= BOARD_ACTIVE;
spin_unlock_irqrestore(&bp->card_lock, flags);
- for(channel = 0; channel < bp->port_count; channel++, port++)
+ for (channel = 0; channel < bp->port_count; channel++, port++)
drop_dtr_rts(port);
return;
}
-
-static int isicom_setup_port(struct isi_port * port)
+
+static int isicom_setup_port(struct isi_port *port)
{
- struct isi_board * card = port->card;
+ struct isi_board *card = port->card;
unsigned long flags;
-
+
if (port->flags & ASYNC_INITIALIZED) {
return 0;
}
if (!port->xmit_buf) {
unsigned long page;
-
+
if (!(page = get_zeroed_page(GFP_KERNEL)))
return -ENOMEM;
-
+
if (port->xmit_buf) {
free_page(page);
return -ERESTARTSYS;
}
- port->xmit_buf = (unsigned char *) page;
- }
+ port->xmit_buf = (unsigned char *) page;
+ }
spin_lock_irqsave(&card->card_lock, flags);
if (port->tty)
clear_bit(TTY_IO_ERROR, &port->tty->flags);
if (port->count == 1)
card->count++;
-
+
port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
-
+
/* discard any residual data */
kill_queue(port, ISICOM_KILLTX | ISICOM_KILLRX);
-
+
isicom_config_port(port);
port->flags |= ASYNC_INITIALIZED;
spin_unlock_irqrestore(&card->card_lock, flags);
-
- return 0;
-}
-
-static int block_til_ready(struct tty_struct * tty, struct file * filp, struct isi_port * port)
+
+ return 0;
+}
+
+static int block_til_ready(struct tty_struct *tty, struct file *filp,
+ struct isi_port *port)
{
- struct isi_board * card = port->card;
+ struct isi_board *card = port->card;
int do_clocal = 0, retval;
unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
@@ -1134,30 +895,27 @@ static int block_til_ready(struct tty_struct * tty, struct file * filp, struct i
/* block if port is in the process of being closed */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
-#ifdef ISICOM_DEBUG
- printk(KERN_DEBUG "ISICOM: block_til_ready: close in progress.\n");
-#endif
+ pr_dbg("block_til_ready: close in progress.\n");
interruptible_sleep_on(&port->close_wait);
if (port->flags & ASYNC_HUP_NOTIFY)
return -EAGAIN;
else
return -ERESTARTSYS;
}
-
+
/* if non-blocking mode is set ... */
-
- if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) {
-#ifdef ISICOM_DEBUG
- printk(KERN_DEBUG "ISICOM: block_til_ready: non-block mode.\n");
-#endif
+
+ if ((filp->f_flags & O_NONBLOCK) ||
+ (tty->flags & (1 << TTY_IO_ERROR))) {
+ pr_dbg("block_til_ready: non-block mode.\n");
port->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
- }
-
+ return 0;
+ }
+
if (C_CLOCAL(tty))
do_clocal = 1;
-
- /* block waiting for DCD to be asserted, and while
+
+ /* block waiting for DCD to be asserted, and while
callout dev is busy */
retval = 0;
add_wait_queue(&port->open_wait, &wait);
@@ -1167,27 +925,27 @@ static int block_til_ready(struct tty_struct * tty, struct file * filp, struct i
port->count--;
port->blocked_open++;
spin_unlock_irqrestore(&card->card_lock, flags);
-
+
while (1) {
raise_dtr_rts(port);
set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
+ if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
if (port->flags & ASYNC_HUP_NOTIFY)
retval = -EAGAIN;
else
retval = -ERESTARTSYS;
break;
- }
+ }
if (!(port->flags & ASYNC_CLOSING) &&
- (do_clocal || (port->status & ISI_DCD))) {
+ (do_clocal || (port->status & ISI_DCD))) {
break;
- }
+ }
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
+ schedule();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&port->open_wait, &wait);
@@ -1201,11 +959,11 @@ static int block_til_ready(struct tty_struct * tty, struct file * filp, struct i
port->flags |= ASYNC_NORMAL_ACTIVE;
return 0;
}
-
-static int isicom_open(struct tty_struct * tty, struct file * filp)
+
+static int isicom_open(struct tty_struct *tty, struct file *filp)
{
- struct isi_port * port;
- struct isi_board * card;
+ struct isi_port *port;
+ struct isi_board *card;
unsigned int line, board;
int error;
@@ -1214,20 +972,20 @@ static int isicom_open(struct tty_struct * tty, struct file * filp)
return -ENODEV;
board = BOARD(line);
card = &isi_card[board];
-
+
if (!(card->status & FIRMWARE_LOADED))
return -ENODEV;
-
+
/* open on a port greater than the port count for the card !!! */
if (line > ((board * 16) + card->port_count - 1))
return -ENODEV;
- port = &isi_ports[line];
+ port = &isi_ports[line];
if (isicom_paranoia_check(port, tty->name, "isicom_open"))
return -ENODEV;
-
- isicom_setup_board(card);
-
+
+ isicom_setup_board(card);
+
port->count++;
tty->driver_data = port;
port->tty = tty;
@@ -1236,12 +994,12 @@ static int isicom_open(struct tty_struct * tty, struct file * filp)
if ((error = block_til_ready(tty, filp, port))!=0)
return error;
- return 0;
+ return 0;
}
-
+
/* close et all */
-static inline void isicom_shutdown_board(struct isi_board * bp)
+static inline void isicom_shutdown_board(struct isi_board *bp)
{
unsigned long flags;
@@ -1252,15 +1010,15 @@ static inline void isicom_shutdown_board(struct isi_board * bp)
spin_unlock_irqrestore(&bp->card_lock, flags);
}
-static void isicom_shutdown_port(struct isi_port * port)
+static void isicom_shutdown_port(struct isi_port *port)
{
- struct isi_board * card = port->card;
- struct tty_struct * tty;
+ struct isi_board *card = port->card;
+ struct tty_struct *tty;
unsigned long flags;
-
+
tty = port->tty;
- spin_lock_irqsave(&card->card_lock, flags);
+ spin_lock_irqsave(&card->card_lock, flags);
if (!(port->flags & ASYNC_INITIALIZED)) {
spin_unlock_irqrestore(&card->card_lock, flags);
return;
@@ -1268,93 +1026,91 @@ static void isicom_shutdown_port(struct isi_port * port)
if (port->xmit_buf) {
free_page((unsigned long) port->xmit_buf);
port->xmit_buf = NULL;
- }
+ }
port->flags &= ~ASYNC_INITIALIZED;
/* 3rd October 2000 : Vinayak P Risbud */
port->tty = NULL;
spin_unlock_irqrestore(&card->card_lock, flags);
-
+
/*Fix done by Anil .S on 30-04-2001
remote login through isi port has dtr toggle problem
due to which the carrier drops before the password prompt
- appears on the remote end. Now we drop the dtr only if the
+ appears on the remote end. Now we drop the dtr only if the
HUPCL(Hangup on close) flag is set for the tty*/
-
- if (C_HUPCL(tty))
+
+ if (C_HUPCL(tty))
/* drop dtr on this port */
drop_dtr(port);
-
- /* any other port uninits */
+
+ /* any other port uninits */
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
-
+
if (--card->count < 0) {
- printk(KERN_DEBUG "ISICOM: isicom_shutdown_port: bad board(0x%x) count %d.\n",
+ pr_dbg("isicom_shutdown_port: bad board(0x%lx) count %d.\n",
card->base, card->count);
- card->count = 0;
+ card->count = 0;
}
-
- /* last port was closed , shutdown that boad too */
- if(C_HUPCL(tty)) {
+
+ /* last port was closed, shutdown that boad too */
+ if (C_HUPCL(tty)) {
if (!card->count)
isicom_shutdown_board(card);
}
}
-static void isicom_close(struct tty_struct * tty, struct file * filp)
+static void isicom_close(struct tty_struct *tty, struct file *filp)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
- struct isi_board * card = port->card;
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
unsigned long flags;
-
+
if (!port)
return;
if (isicom_paranoia_check(port, tty->name, "isicom_close"))
return;
-
-#ifdef ISICOM_DEBUG
- printk(KERN_DEBUG "ISICOM: Close start!!!.\n");
-#endif
-
+
+ pr_dbg("Close start!!!.\n");
+
spin_lock_irqsave(&card->card_lock, flags);
if (tty_hung_up_p(filp)) {
spin_unlock_irqrestore(&card->card_lock, flags);
return;
}
-
+
if (tty->count == 1 && port->count != 1) {
- printk(KERN_WARNING "ISICOM:(0x%x) isicom_close: bad port count"
- "tty->count = 1 port count = %d.\n",
+ printk(KERN_WARNING "ISICOM:(0x%lx) isicom_close: bad port "
+ "count tty->count = 1 port count = %d.\n",
card->base, port->count);
port->count = 1;
}
if (--port->count < 0) {
- printk(KERN_WARNING "ISICOM:(0x%x) isicom_close: bad port count for"
- "channel%d = %d", card->base, port->channel,
+ printk(KERN_WARNING "ISICOM:(0x%lx) isicom_close: bad port "
+ "count for channel%d = %d", card->base, port->channel,
port->count);
- port->count = 0;
+ port->count = 0;
}
-
+
if (port->count) {
spin_unlock_irqrestore(&card->card_lock, flags);
return;
- }
+ }
port->flags |= ASYNC_CLOSING;
tty->closing = 1;
spin_unlock_irqrestore(&card->card_lock, flags);
-
+
if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
tty_wait_until_sent(tty, port->closing_wait);
- /* indicate to the card that no more data can be received
+ /* indicate to the card that no more data can be received
on this port */
spin_lock_irqsave(&card->card_lock, flags);
- if (port->flags & ASYNC_INITIALIZED) {
+ if (port->flags & ASYNC_INITIALIZED) {
card->port_status &= ~(1 << port->channel);
outw(card->port_status, card->base + 0x02);
- }
+ }
isicom_shutdown_port(port);
spin_unlock_irqrestore(&card->card_lock, flags);
-
+
if (tty->driver->flush_buffer)
tty->driver->flush_buffer(tty);
tty_ldisc_flush(tty);
@@ -1365,65 +1121,65 @@ static void isicom_close(struct tty_struct * tty, struct file * filp)
if (port->blocked_open) {
spin_unlock_irqrestore(&card->card_lock, flags);
if (port->close_delay) {
-#ifdef ISICOM_DEBUG
- printk(KERN_DEBUG "ISICOM: scheduling until time out.\n");
-#endif
- msleep_interruptible(jiffies_to_msecs(port->close_delay));
+ pr_dbg("scheduling until time out.\n");
+ msleep_interruptible(
+ jiffies_to_msecs(port->close_delay));
}
spin_lock_irqsave(&card->card_lock, flags);
wake_up_interruptible(&port->open_wait);
- }
+ }
port->flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING);
wake_up_interruptible(&port->close_wait);
spin_unlock_irqrestore(&card->card_lock, flags);
}
/* write et all */
-static int isicom_write(struct tty_struct * tty,
- const unsigned char * buf, int count)
+static int isicom_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
- struct isi_board * card = port->card;
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
unsigned long flags;
int cnt, total = 0;
if (isicom_paranoia_check(port, tty->name, "isicom_write"))
return 0;
-
- if (!tty || !port->xmit_buf || !tmp_buf)
+
+ if (!tty || !port->xmit_buf)
return 0;
-
+
spin_lock_irqsave(&card->card_lock, flags);
-
- while(1) {
- cnt = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - port->xmit_head));
- if (cnt <= 0)
+
+ while(1) {
+ cnt = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt
+ - 1, SERIAL_XMIT_SIZE - port->xmit_head));
+ if (cnt <= 0)
break;
-
+
memcpy(port->xmit_buf + port->xmit_head, buf, cnt);
- port->xmit_head = (port->xmit_head + cnt) & (SERIAL_XMIT_SIZE - 1);
+ port->xmit_head = (port->xmit_head + cnt) & (SERIAL_XMIT_SIZE
+ - 1);
port->xmit_cnt += cnt;
buf += cnt;
count -= cnt;
total += cnt;
- }
+ }
if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped)
port->status |= ISI_TXOK;
spin_unlock_irqrestore(&card->card_lock, flags);
- return total;
+ return total;
}
/* put_char et all */
-static void isicom_put_char(struct tty_struct * tty, unsigned char ch)
+static void isicom_put_char(struct tty_struct *tty, unsigned char ch)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
- struct isi_board * card = port->card;
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
unsigned long flags;
-
+
if (isicom_paranoia_check(port, tty->name, "isicom_put_char"))
return;
-
+
if (!tty || !port->xmit_buf)
return;
@@ -1432,7 +1188,7 @@ static void isicom_put_char(struct tty_struct * tty, unsigned char ch)
spin_unlock_irqrestore(&card->card_lock, flags);
return;
}
-
+
port->xmit_buf[port->xmit_head++] = ch;
port->xmit_head &= (SERIAL_XMIT_SIZE - 1);
port->xmit_cnt++;
@@ -1440,30 +1196,31 @@ static void isicom_put_char(struct tty_struct * tty, unsigned char ch)
}
/* flush_chars et all */
-static void isicom_flush_chars(struct tty_struct * tty)
+static void isicom_flush_chars(struct tty_struct *tty)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
-
+ struct isi_port *port = tty->driver_data;
+
if (isicom_paranoia_check(port, tty->name, "isicom_flush_chars"))
return;
-
- if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || !port->xmit_buf)
+
+ if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
+ !port->xmit_buf)
return;
-
+
/* this tells the transmitter to consider this port for
data output to the card ... that's the best we can do. */
- port->status |= ISI_TXOK;
+ port->status |= ISI_TXOK;
}
/* write_room et all */
-static int isicom_write_room(struct tty_struct * tty)
+static int isicom_write_room(struct tty_struct *tty)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
+ struct isi_port *port = tty->driver_data;
int free;
if (isicom_paranoia_check(port, tty->name, "isicom_write_room"))
return 0;
-
+
free = SERIAL_XMIT_SIZE - port->xmit_cnt - 1;
if (free < 0)
free = 0;
@@ -1471,23 +1228,24 @@ static int isicom_write_room(struct tty_struct * tty)
}
/* chars_in_buffer et all */
-static int isicom_chars_in_buffer(struct tty_struct * tty)
+static int isicom_chars_in_buffer(struct tty_struct *tty)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
+ struct isi_port *port = tty->driver_data;
if (isicom_paranoia_check(port, tty->name, "isicom_chars_in_buffer"))
return 0;
return port->xmit_cnt;
}
/* ioctl et all */
-static inline void isicom_send_break(struct isi_port * port, unsigned long length)
+static inline void isicom_send_break(struct isi_port *port,
+ unsigned long length)
{
- struct isi_board * card = port->card;
- unsigned short base = card->base;
-
- if(!lock_card(card))
+ struct isi_board *card = port->card;
+ unsigned long base = card->base;
+
+ if (!lock_card(card))
return;
-
+
outw(0x8000 | ((port->channel) << (card->shift_count)) | 0x3, base);
outw((length & 0xff) << 8 | 0x00, base);
outw((length & 0xff00), base);
@@ -1498,13 +1256,13 @@ static inline void isicom_send_break(struct isi_port * port, unsigned long lengt
static int isicom_tiocmget(struct tty_struct *tty, struct file *file)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
+ struct isi_port *port = tty->driver_data;
/* just send the port status */
- unsigned short status = port->status;
+ u16 status = port->status;
if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
return -ENODEV;
-
+
return ((status & ISI_RTS) ? TIOCM_RTS : 0) |
((status & ISI_DTR) ? TIOCM_DTR : 0) |
((status & ISI_DCD) ? TIOCM_CAR : 0) |
@@ -1514,13 +1272,13 @@ static int isicom_tiocmget(struct tty_struct *tty, struct file *file)
}
static int isicom_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
+ unsigned int set, unsigned int clear)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
-
+ struct isi_port *port = tty->driver_data;
+
if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
return -ENODEV;
-
+
if (set & TIOCM_RTS)
raise_rts(port);
if (set & TIOCM_DTR)
@@ -1532,46 +1290,46 @@ static int isicom_tiocmset(struct tty_struct *tty, struct file *file,
drop_dtr(port);
return 0;
-}
+}
-static int isicom_set_serial_info(struct isi_port * port,
- struct serial_struct __user *info)
+static int isicom_set_serial_info(struct isi_port *port,
+ struct serial_struct __user *info)
{
struct serial_struct newinfo;
int reconfig_port;
- if(copy_from_user(&newinfo, info, sizeof(newinfo)))
+ if (copy_from_user(&newinfo, info, sizeof(newinfo)))
return -EFAULT;
-
- reconfig_port = ((port->flags & ASYNC_SPD_MASK) !=
- (newinfo.flags & ASYNC_SPD_MASK));
-
+
+ reconfig_port = ((port->flags & ASYNC_SPD_MASK) !=
+ (newinfo.flags & ASYNC_SPD_MASK));
+
if (!capable(CAP_SYS_ADMIN)) {
if ((newinfo.close_delay != port->close_delay) ||
- (newinfo.closing_wait != port->closing_wait) ||
- ((newinfo.flags & ~ASYNC_USR_MASK) !=
- (port->flags & ~ASYNC_USR_MASK)))
+ (newinfo.closing_wait != port->closing_wait) ||
+ ((newinfo.flags & ~ASYNC_USR_MASK) !=
+ (port->flags & ~ASYNC_USR_MASK)))
return -EPERM;
port->flags = ((port->flags & ~ ASYNC_USR_MASK) |
(newinfo.flags & ASYNC_USR_MASK));
- }
+ }
else {
port->close_delay = newinfo.close_delay;
- port->closing_wait = newinfo.closing_wait;
- port->flags = ((port->flags & ~ASYNC_FLAGS) |
+ port->closing_wait = newinfo.closing_wait;
+ port->flags = ((port->flags & ~ASYNC_FLAGS) |
(newinfo.flags & ASYNC_FLAGS));
}
if (reconfig_port) {
isicom_config_port(port);
}
- return 0;
-}
+ return 0;
+}
-static int isicom_get_serial_info(struct isi_port * port,
- struct serial_struct __user *info)
+static int isicom_get_serial_info(struct isi_port *port,
+ struct serial_struct __user *info)
{
struct serial_struct out_info;
-
+
memset(&out_info, 0, sizeof(out_info));
/* out_info.type = ? */
out_info.line = port - isi_ports;
@@ -1581,15 +1339,15 @@ static int isicom_get_serial_info(struct isi_port * port,
/* out_info.baud_base = ? */
out_info.close_delay = port->close_delay;
out_info.closing_wait = port->closing_wait;
- if(copy_to_user(info, &out_info, sizeof(out_info)))
+ if (copy_to_user(info, &out_info, sizeof(out_info)))
return -EFAULT;
return 0;
-}
+}
-static int isicom_ioctl(struct tty_struct * tty, struct file * filp,
- unsigned int cmd, unsigned long arg)
+static int isicom_ioctl(struct tty_struct *tty, struct file *filp,
+ unsigned int cmd, unsigned long arg)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
+ struct isi_port *port = tty->driver_data;
void __user *argp = (void __user *)arg;
int retval;
@@ -1597,139 +1355,141 @@ static int isicom_ioctl(struct tty_struct * tty, struct file * filp,
return -ENODEV;
switch(cmd) {
- case TCSBRK:
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- if (!arg)
- isicom_send_break(port, HZ/4);
- return 0;
-
- case TCSBRKP:
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- isicom_send_break(port, arg ? arg * (HZ/10) : HZ/4);
- return 0;
-
- case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
-
- case TIOCSSOFTCAR:
- if(get_user(arg, (unsigned long __user *) argp))
- return -EFAULT;
- tty->termios->c_cflag =
- ((tty->termios->c_cflag & ~CLOCAL) |
- (arg ? CLOCAL : 0));
- return 0;
-
- case TIOCGSERIAL:
- return isicom_get_serial_info(port, argp);
-
- case TIOCSSERIAL:
- return isicom_set_serial_info(port, argp);
-
- default:
- return -ENOIOCTLCMD;
+ case TCSBRK:
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ tty_wait_until_sent(tty, 0);
+ if (!arg)
+ isicom_send_break(port, HZ/4);
+ return 0;
+
+ case TCSBRKP:
+ retval = tty_check_change(tty);
+ if (retval)
+ return retval;
+ tty_wait_until_sent(tty, 0);
+ isicom_send_break(port, arg ? arg * (HZ/10) : HZ/4);
+ return 0;
+
+ case TIOCGSOFTCAR:
+ return put_user(C_CLOCAL(tty) ? 1 : 0,
+ (unsigned long __user *)argp);
+
+ case TIOCSSOFTCAR:
+ if (get_user(arg, (unsigned long __user *) argp))
+ return -EFAULT;
+ tty->termios->c_cflag =
+ ((tty->termios->c_cflag & ~CLOCAL) |
+ (arg ? CLOCAL : 0));
+ return 0;
+
+ case TIOCGSERIAL:
+ return isicom_get_serial_info(port, argp);
+
+ case TIOCSSERIAL:
+ return isicom_set_serial_info(port, argp);
+
+ default:
+ return -ENOIOCTLCMD;
}
return 0;
}
/* set_termios et all */
-static void isicom_set_termios(struct tty_struct * tty, struct termios * old_termios)
+static void isicom_set_termios(struct tty_struct *tty,
+ struct termios *old_termios)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
-
+ struct isi_port *port = tty->driver_data;
+
if (isicom_paranoia_check(port, tty->name, "isicom_set_termios"))
return;
-
+
if (tty->termios->c_cflag == old_termios->c_cflag &&
- tty->termios->c_iflag == old_termios->c_iflag)
+ tty->termios->c_iflag == old_termios->c_iflag)
return;
-
+
isicom_config_port(port);
-
+
if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios->c_cflag & CRTSCTS)) {
+ !(tty->termios->c_cflag & CRTSCTS)) {
tty->hw_stopped = 0;
- isicom_start(tty);
- }
+ isicom_start(tty);
+ }
}
/* throttle et all */
-static void isicom_throttle(struct tty_struct * tty)
+static void isicom_throttle(struct tty_struct *tty)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
- struct isi_board * card = port->card;
-
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
+
if (isicom_paranoia_check(port, tty->name, "isicom_throttle"))
return;
-
+
/* tell the card that this port cannot handle any more data for now */
card->port_status &= ~(1 << port->channel);
outw(card->port_status, card->base + 0x02);
}
/* unthrottle et all */
-static void isicom_unthrottle(struct tty_struct * tty)
+static void isicom_unthrottle(struct tty_struct *tty)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
- struct isi_board * card = port->card;
-
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
+
if (isicom_paranoia_check(port, tty->name, "isicom_unthrottle"))
return;
-
+
/* tell the card that this port is ready to accept more data */
card->port_status |= (1 << port->channel);
outw(card->port_status, card->base + 0x02);
}
/* stop et all */
-static void isicom_stop(struct tty_struct * tty)
+static void isicom_stop(struct tty_struct *tty)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
+ struct isi_port *port = tty->driver_data;
if (isicom_paranoia_check(port, tty->name, "isicom_stop"))
return;
-
+
/* this tells the transmitter not to consider this port for
data output to the card. */
port->status &= ~ISI_TXOK;
}
/* start et all */
-static void isicom_start(struct tty_struct * tty)
+static void isicom_start(struct tty_struct *tty)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
-
+ struct isi_port *port = tty->driver_data;
+
if (isicom_paranoia_check(port, tty->name, "isicom_start"))
return;
-
+
/* this tells the transmitter to consider this port for
data output to the card. */
port->status |= ISI_TXOK;
}
/* hangup et all */
-static void do_isicom_hangup(void * data)
+static void do_isicom_hangup(void *data)
{
- struct isi_port * port = (struct isi_port *) data;
- struct tty_struct * tty;
-
+ struct isi_port *port = data;
+ struct tty_struct *tty;
+
tty = port->tty;
if (tty)
tty_hangup(tty);
}
-static void isicom_hangup(struct tty_struct * tty)
+static void isicom_hangup(struct tty_struct *tty)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
-
+ struct isi_port *port = tty->driver_data;
+
if (isicom_paranoia_check(port, tty->name, "isicom_hangup"))
return;
-
+
isicom_shutdown_port(port);
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
@@ -1738,342 +1498,540 @@ static void isicom_hangup(struct tty_struct * tty)
}
/* flush_buffer et all */
-static void isicom_flush_buffer(struct tty_struct * tty)
+static void isicom_flush_buffer(struct tty_struct *tty)
{
- struct isi_port * port = (struct isi_port *) tty->driver_data;
- struct isi_board * card = port->card;
+ struct isi_port *port = tty->driver_data;
+ struct isi_board *card = port->card;
unsigned long flags;
-
+
if (isicom_paranoia_check(port, tty->name, "isicom_flush_buffer"))
return;
-
+
spin_lock_irqsave(&card->card_lock, flags);
port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
-
+
wake_up_interruptible(&tty->write_wait);
tty_wakeup(tty);
}
+/*
+ * Driver init and deinit functions
+ */
-static int __devinit register_ioregion(void)
+static int __devinit isicom_register_ioregion(struct pci_dev *pdev,
+ const unsigned int index)
{
- int count, done=0;
- for (count=0; count < BOARD_COUNT; count++ ) {
- if (isi_card[count].base)
- if (!request_region(isi_card[count].base,16,ISICOM_NAME)) {
- printk(KERN_DEBUG "ISICOM: I/O Region 0x%x-0x%x is busy. Card%d will be disabled.\n",
- isi_card[count].base,isi_card[count].base+15,count+1);
- isi_card[count].base=0;
- done++;
- }
- }
- return done;
+ struct isi_board *board = pci_get_drvdata(pdev);
+
+ if (!board->base)
+ return -EINVAL;
+
+ if (!request_region(board->base, 16, ISICOM_NAME)) {
+ dev_dbg(&pdev->dev, "I/O Region 0x%lx-0x%lx is busy. Card%d "
+ "will be disabled.\n", board->base, board->base + 15,
+ index + 1);
+ return -EBUSY;
+ }
+
+ return 0;
}
-static void unregister_ioregion(void)
+static void isicom_unregister_ioregion(struct pci_dev *pdev)
{
- int count;
- for (count=0; count < BOARD_COUNT; count++ )
- if (isi_card[count].base) {
- release_region(isi_card[count].base,16);
-#ifdef ISICOM_DEBUG
- printk(KERN_DEBUG "ISICOM: I/O Region 0x%x-0x%x released for Card%d.\n",isi_card[count].base,isi_card[count].base+15,count+1);
-#endif
- }
+ struct isi_board *board = pci_get_drvdata(pdev);
+
+ if (!board->base)
+ return;
+
+ release_region(board->base, 16);
+ dev_dbg(&pdev->dev, "I/O Region 0x%lx-0x%lx released.\n",
+ board->base, board->base + 15);
+ board->base = 0;
}
static struct tty_operations isicom_ops = {
- .open = isicom_open,
- .close = isicom_close,
- .write = isicom_write,
- .put_char = isicom_put_char,
- .flush_chars = isicom_flush_chars,
- .write_room = isicom_write_room,
+ .open = isicom_open,
+ .close = isicom_close,
+ .write = isicom_write,
+ .put_char = isicom_put_char,
+ .flush_chars = isicom_flush_chars,
+ .write_room = isicom_write_room,
.chars_in_buffer = isicom_chars_in_buffer,
- .ioctl = isicom_ioctl,
- .set_termios = isicom_set_termios,
- .throttle = isicom_throttle,
- .unthrottle = isicom_unthrottle,
- .stop = isicom_stop,
- .start = isicom_start,
- .hangup = isicom_hangup,
- .flush_buffer = isicom_flush_buffer,
- .tiocmget = isicom_tiocmget,
- .tiocmset = isicom_tiocmset,
+ .ioctl = isicom_ioctl,
+ .set_termios = isicom_set_termios,
+ .throttle = isicom_throttle,
+ .unthrottle = isicom_unthrottle,
+ .stop = isicom_stop,
+ .start = isicom_start,
+ .hangup = isicom_hangup,
+ .flush_buffer = isicom_flush_buffer,
+ .tiocmget = isicom_tiocmget,
+ .tiocmset = isicom_tiocmset,
};
-static int __devinit register_drivers(void)
+static int __devinit isicom_register_tty_driver(void)
{
- int error;
+ int error = -ENOMEM;
/* tty driver structure initialization */
isicom_normal = alloc_tty_driver(PORT_COUNT);
if (!isicom_normal)
- return -ENOMEM;
-
- isicom_normal->owner = THIS_MODULE;
- isicom_normal->name = "ttyM";
- isicom_normal->devfs_name = "isicom/";
- isicom_normal->major = ISICOM_NMAJOR;
- isicom_normal->minor_start = 0;
- isicom_normal->type = TTY_DRIVER_TYPE_SERIAL;
- isicom_normal->subtype = SERIAL_TYPE_NORMAL;
- isicom_normal->init_termios = tty_std_termios;
- isicom_normal->init_termios.c_cflag =
- B9600 | CS8 | CREAD | HUPCL |CLOCAL;
- isicom_normal->flags = TTY_DRIVER_REAL_RAW;
+ goto end;
+
+ isicom_normal->owner = THIS_MODULE;
+ isicom_normal->name = "ttyM";
+ isicom_normal->devfs_name = "isicom/";
+ isicom_normal->major = ISICOM_NMAJOR;
+ isicom_normal->minor_start = 0;
+ isicom_normal->type = TTY_DRIVER_TYPE_SERIAL;
+ isicom_normal->subtype = SERIAL_TYPE_NORMAL;
+ isicom_normal->init_termios = tty_std_termios;
+ isicom_normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL |
+ CLOCAL;
+ isicom_normal->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(isicom_normal, &isicom_ops);
-
- if ((error=tty_register_driver(isicom_normal))!=0) {
- printk(KERN_DEBUG "ISICOM: Couldn't register the dialin driver, error=%d\n",
+
+ if ((error = tty_register_driver(isicom_normal))) {
+ pr_dbg("Couldn't register the dialin driver, error=%d\n",
error);
put_tty_driver(isicom_normal);
- return error;
}
- return 0;
+end:
+ return error;
}
-static void unregister_drivers(void)
+static void isicom_unregister_tty_driver(void)
{
- int error = tty_unregister_driver(isicom_normal);
- if (error)
- printk(KERN_DEBUG "ISICOM: couldn't unregister normal driver error=%d.\n",error);
+ int error;
+
+ if ((error = tty_unregister_driver(isicom_normal)))
+ pr_dbg("couldn't unregister normal driver, error=%d.\n", error);
+
put_tty_driver(isicom_normal);
}
-static int __devinit register_isr(void)
+static int __devinit isicom_register_isr(struct pci_dev *pdev,
+ const unsigned int index)
{
- int count, done=0;
- unsigned long irqflags;
-
- for (count=0; count < BOARD_COUNT; count++ ) {
- if (isi_card[count].base) {
- irqflags = (isi_card[count].isa == YES) ?
- SA_INTERRUPT :
- (SA_INTERRUPT | SA_SHIRQ);
-
- if (request_irq(isi_card[count].irq,
- isicom_interrupt,
- irqflags,
- ISICOM_NAME, &isi_card[count])) {
-
- printk(KERN_WARNING "ISICOM: Could not"
- " install handler at Irq %d."
- " Card%d will be disabled.\n",
- isi_card[count].irq, count+1);
-
- release_region(isi_card[count].base,16);
- isi_card[count].base=0;
- }
- else
- done++;
- }
- }
- return done;
+ struct isi_board *board = pci_get_drvdata(pdev);
+ unsigned long irqflags = SA_INTERRUPT;
+ int retval = -EINVAL;
+
+ if (!board->base)
+ goto end;
+
+ if (board->isa == NO)
+ irqflags |= SA_SHIRQ;
+
+ retval = request_irq(board->irq, isicom_interrupt, irqflags,
+ ISICOM_NAME, board);
+ if (retval < 0)
+ dev_warn(&pdev->dev, "Could not install handler at Irq %d. "
+ "Card%d will be disabled.\n", board->irq, index + 1);
+ else
+ retval = 0;
+end:
+ return retval;
}
-static void __exit unregister_isr(void)
+static int __devinit reset_card(struct pci_dev *pdev,
+ const unsigned int card, unsigned int *signature)
{
- int count;
+ struct isi_board *board = pci_get_drvdata(pdev);
+ unsigned long base = board->base;
+ unsigned int portcount = 0;
+ int retval = 0;
+
+ dev_dbg(&pdev->dev, "ISILoad:Resetting Card%d at 0x%lx\n", card + 1,
+ base);
- for (count=0; count < BOARD_COUNT; count++ ) {
- if (isi_card[count].base)
- free_irq(isi_card[count].irq, &isi_card[count]);
+ inw(base + 0x8);
+
+ mdelay(10);
+
+ outw(0, base + 0x8); /* Reset */
+
+ msleep(3000);
+
+ *signature = inw(base + 0x4) & 0xff;
+
+ if (board->isa == YES) {
+ if (!(inw(base + 0xe) & 0x1) || (inw(base + 0x2))) {
+ dev_dbg(&pdev->dev, "base+0x2=0x%lx, base+0xe=0x%lx\n",
+ inw(base + 0x2), inw(base + 0xe));
+ dev_err(&pdev->dev, "ISILoad:ISA Card%d reset failure "
+ "(Possible bad I/O Port Address 0x%lx).\n",
+ card + 1, base);
+ retval = -EIO;
+ goto end;
+ }
+ } else {
+ portcount = inw(base + 0x2);
+ if (!(inw(base + 0xe) & 0x1) || ((portcount != 0) &&
+ (portcount != 4) && (portcount != 8))) {
+ dev_dbg(&pdev->dev, "base+0x2=0x%lx, base+0xe=0x%lx\n",
+ inw(base + 0x2), inw(base + 0xe));
+ dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure "
+ "(Possible bad I/O Port Address 0x%lx).\n",
+ card + 1, base);
+ retval = -EIO;
+ goto end;
+ }
}
+
+ switch (*signature) {
+ case 0xa5:
+ case 0xbb:
+ case 0xdd:
+ board->port_count = (board->isa == NO && portcount == 4) ? 4 :
+ 8;
+ board->shift_count = 12;
+ break;
+ case 0xcc:
+ board->port_count = 16;
+ board->shift_count = 11;
+ break;
+ default:
+ dev_warn(&pdev->dev, "ISILoad:Card%d reset failure (Possible "
+ "bad I/O Port Address 0x%lx).\n", card + 1, base);
+ dev_dbg(&pdev->dev, "Sig=0x%lx\n", signature);
+ retval = -EIO;
+ }
+ dev_info(&pdev->dev, "-Done\n");
+
+end:
+ return retval;
}
-static int __devinit isicom_init(void)
+static inline int WaitTillCardIsFree(u16 base)
{
- int card, channel, base;
- struct isi_port * port;
- unsigned long page;
-
- if (!tmp_buf) {
- page = get_zeroed_page(GFP_KERNEL);
- if (!page) {
-#ifdef ISICOM_DEBUG
- printk(KERN_DEBUG "ISICOM: Couldn't allocate page for tmp_buf.\n");
-#else
- printk(KERN_ERR "ISICOM: Not enough memory...\n");
-#endif
- return 0;
- }
- tmp_buf = (unsigned char *) page;
- }
-
- if (!register_ioregion())
- {
- printk(KERN_ERR "ISICOM: All required I/O space found busy.\n");
- free_page((unsigned long)tmp_buf);
- return 0;
- }
- if (register_drivers())
- {
- unregister_ioregion();
- free_page((unsigned long)tmp_buf);
- return 0;
- }
- if (!register_isr())
- {
- unregister_drivers();
- /* ioports already uregistered in register_isr */
- free_page((unsigned long)tmp_buf);
- return 0;
+ unsigned long count = 0;
+
+ while (!(inw(base + 0xe) & 0x1) && count++ < 100)
+ msleep(5);
+
+ return !(inw(base + 0xe) & 0x1);
+}
+
+static int __devinit load_firmware(struct pci_dev *pdev,
+ const unsigned int index, const unsigned int signature)
+{
+ struct isi_board *board = pci_get_drvdata(pdev);
+ const struct firmware *fw;
+ unsigned long base = board->base;
+ unsigned int a;
+ u16 word_count, status;
+ int retval = -EIO;
+ char *name;
+ u8 *data;
+
+ struct stframe {
+ u16 addr;
+ u16 count;
+ u8 data[0];
+ } *frame;
+
+ switch (signature) {
+ case 0xa5:
+ name = "isi608.bin";
+ break;
+ case 0xbb:
+ name = "isi608em.bin";
+ break;
+ case 0xcc:
+ name = "isi616em.bin";
+ break;
+ case 0xdd:
+ name = "isi4608.bin";
+ break;
+ case 0xee:
+ name = "isi4616.bin";
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown signature.\n");
+ goto end;
+ }
+
+ retval = request_firmware(&fw, name, &pdev->dev);
+ if (retval)
+ goto end;
+
+ for (frame = (struct stframe *)fw->data;
+ frame < (struct stframe *)(fw->data + fw->size);
+ frame++) {
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ outw(0xf0, base); /* start upload sequence */
+ outw(0x00, base);
+ outw(frame->addr, base); /* lsb of address */
+
+ word_count = frame->count / 2 + frame->count % 2;
+ outw(word_count, base);
+ InterruptTheCard(base);
+
+ udelay(100); /* 0x2f */
+
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ if ((status = inw(base + 0x4)) != 0) {
+ dev_warn(&pdev->dev, "Card%d rejected load header:\n"
+ "Address:0x%x\nCount:0x%x\nStatus:0x%x\n",
+ index + 1, frame->addr, frame->count, status);
+ goto errrelfw;
+ }
+ outsw(base, frame->data, word_count);
+
+ InterruptTheCard(base);
+
+ udelay(50); /* 0x0f */
+
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ if ((status = inw(base + 0x4)) != 0) {
+ dev_err(&pdev->dev, "Card%d got out of sync.Card "
+ "Status:0x%x\n", index + 1, status);
+ goto errrelfw;
+ }
+ }
+
+ retval = -EIO;
+
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ outw(0xf2, base);
+ outw(0x800, base);
+ outw(0x0, base);
+ outw(0x0, base);
+ InterruptTheCard(base);
+ outw(0x0, base + 0x4); /* for ISI4608 cards */
+
+/* XXX: should we test it by reading it back and comparing with original like
+ * in load firmware package? */
+ for (frame = (struct stframe*)fw->data;
+ frame < (struct stframe*)(fw->data + fw->size);
+ frame++) {
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ outw(0xf1, base); /* start download sequence */
+ outw(0x00, base);
+ outw(frame->addr, base); /* lsb of address */
+
+ word_count = (frame->count >> 1) + frame->count % 2;
+ outw(word_count + 1, base);
+ InterruptTheCard(base);
+
+ udelay(50); /* 0xf */
+
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ if ((status = inw(base + 0x4)) != 0) {
+ dev_warn(&pdev->dev, "Card%d rejected verify header:\n"
+ "Address:0x%x\nCount:0x%x\nStatus: 0x%x\n",
+ index + 1, frame->addr, frame->count, status);
+ goto errrelfw;
+ }
+
+ data = kmalloc(word_count * 2, GFP_KERNEL);
+ inw(base);
+ insw(base, data, word_count);
+ InterruptTheCard(base);
+
+ for (a = 0; a < frame->count; a++)
+ if (data[a] != frame->data[a]) {
+ kfree(data);
+ dev_err(&pdev->dev, "Card%d, firmware upload "
+ "failed\n", index + 1);
+ goto errrelfw;
+ }
+ kfree(data);
+
+ udelay(50); /* 0xf */
+
+ if (WaitTillCardIsFree(base))
+ goto errrelfw;
+
+ if ((status = inw(base + 0x4)) != 0) {
+ dev_err(&pdev->dev, "Card%d verify got out of sync. "
+ "Card Status:0x%x\n", index + 1, status);
+ goto errrelfw;
+ }
}
-
- memset(isi_ports, 0, sizeof(isi_ports));
- for (card = 0; card < BOARD_COUNT; card++) {
- port = &isi_ports[card * 16];
- isi_card[card].ports = port;
- spin_lock_init(&isi_card[card].card_lock);
- base = isi_card[card].base;
- for (channel = 0; channel < 16; channel++, port++) {
- port->magic = ISICOM_MAGIC;
- port->card = &isi_card[card];
- port->channel = channel;
- port->close_delay = 50 * HZ/100;
- port->closing_wait = 3000 * HZ/100;
- INIT_WORK(&port->hangup_tq, do_isicom_hangup, port);
- INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port);
- port->status = 0;
- init_waitqueue_head(&port->open_wait);
- init_waitqueue_head(&port->close_wait);
- /* . . . */
- }
- }
-
- return 1;
+
+ board->status |= FIRMWARE_LOADED;
+ retval = 0;
+
+errrelfw:
+ release_firmware(fw);
+end:
+ return retval;
}
/*
* Insmod can set static symbols so keep these static
*/
-
static int io[4];
static int irq[4];
+static int card;
-MODULE_AUTHOR("MultiTech");
-MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech");
-MODULE_LICENSE("GPL");
-module_param_array(io, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O ports for the cards");
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(irq, "Interrupts for the cards");
+static int __devinit isicom_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned int ioaddr, signature, index;
+ int retval = -EPERM;
+ u8 pciirq;
+ struct isi_board *board = NULL;
+
+ if (card >= BOARD_COUNT)
+ goto err;
+
+ ioaddr = pci_resource_start(pdev, 3);
+ /* i.e at offset 0x1c in the PCI configuration register space. */
+ pciirq = pdev->irq;
+ dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device);
+
+ /* allot the first empty slot in the array */
+ for (index = 0; index < BOARD_COUNT; index++)
+ if (isi_card[index].base == 0) {
+ board = &isi_card[index];
+ break;
+ }
+
+ board->base = ioaddr;
+ board->irq = pciirq;
+ board->isa = NO;
+ card++;
+
+ pci_set_drvdata(pdev, board);
+
+ retval = isicom_register_ioregion(pdev, index);
+ if (retval < 0)
+ goto err;
+
+ retval = isicom_register_isr(pdev, index);
+ if (retval < 0)
+ goto errunrr;
+
+ retval = reset_card(pdev, index, &signature);
+ if (retval < 0)
+ goto errunri;
+
+ retval = load_firmware(pdev, index, signature);
+ if (retval < 0)
+ goto errunri;
+
+ return 0;
+
+errunri:
+ free_irq(board->irq, board);
+errunrr:
+ isicom_unregister_ioregion(pdev);
+err:
+ board->base = 0;
+ return retval;
+}
+
+static void __devexit isicom_remove(struct pci_dev *pdev)
+{
+ struct isi_board *board = pci_get_drvdata(pdev);
+
+ free_irq(board->irq, board);
+ isicom_unregister_ioregion(pdev);
+}
static int __devinit isicom_setup(void)
{
- struct pci_dev *dev = NULL;
- int retval, card, idx, count;
- unsigned char pciirq;
- unsigned int ioaddr;
-
+ int retval, idx, channel;
+ struct isi_port *port;
+
card = 0;
- for(idx=0; idx < BOARD_COUNT; idx++) {
- if (io[idx]) {
- isi_card[idx].base=io[idx];
- isi_card[idx].irq=irq[idx];
- isi_card[idx].isa=YES;
- card++;
- }
- else {
- isi_card[idx].base = 0;
- isi_card[idx].irq = 0;
- }
- }
-
- for (idx=0 ;idx < card; idx++) {
- if (!((isi_card[idx].irq==2)||(isi_card[idx].irq==3)||
- (isi_card[idx].irq==4)||(isi_card[idx].irq==5)||
- (isi_card[idx].irq==7)||(isi_card[idx].irq==10)||
- (isi_card[idx].irq==11)||(isi_card[idx].irq==12)||
- (isi_card[idx].irq==15))) {
-
- if (isi_card[idx].base) {
- printk(KERN_ERR "ISICOM: Irq %d unsupported. Disabling Card%d...\n",
- isi_card[idx].irq, idx+1);
- isi_card[idx].base=0;
- card--;
- }
- }
- }
-
- if (card < BOARD_COUNT) {
- for (idx=0; idx < DEVID_COUNT; idx++) {
- dev = NULL;
- for (;;){
- if (!(dev = pci_find_device(VENDOR_ID, isicom_pci_tbl[idx].device, dev)))
- break;
- if (card >= BOARD_COUNT)
- break;
-
- if (pci_enable_device(dev))
- break;
+ memset(isi_ports, 0, sizeof(isi_ports));
- /* found a PCI ISI card! */
- ioaddr = pci_resource_start (dev, 3); /* i.e at offset 0x1c in the
- * PCI configuration register
- * space.
- */
- pciirq = dev->irq;
- printk(KERN_INFO "ISI PCI Card(Device ID 0x%x)\n", isicom_pci_tbl[idx].device);
- /*
- * allot the first empty slot in the array
- */
- for (count=0; count < BOARD_COUNT; count++) {
- if (isi_card[count].base == 0) {
- isi_card[count].base = ioaddr;
- isi_card[count].irq = pciirq;
- isi_card[count].isa = NO;
- card++;
- break;
- }
- }
- }
- if (card >= BOARD_COUNT) break;
- }
+ for(idx = 0; idx < BOARD_COUNT; idx++) {
+ port = &isi_ports[idx * 16];
+ isi_card[idx].ports = port;
+ spin_lock_init(&isi_card[idx].card_lock);
+ for (channel = 0; channel < 16; channel++, port++) {
+ port->magic = ISICOM_MAGIC;
+ port->card = &isi_card[idx];
+ port->channel = channel;
+ port->close_delay = 50 * HZ/100;
+ port->closing_wait = 3000 * HZ/100;
+ INIT_WORK(&port->hangup_tq, do_isicom_hangup, port);
+ INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port);
+ port->status = 0;
+ init_waitqueue_head(&port->open_wait);
+ init_waitqueue_head(&port->close_wait);
+ /* . . . */
+ }
+ isi_card[idx].base = 0;
+ isi_card[idx].irq = 0;
+
+ if (!io[idx])
+ continue;
+
+ if (irq[idx] == 2 || irq[idx] == 3 || irq[idx] == 4 ||
+ irq[idx] == 5 || irq[idx] == 7 ||
+ irq[idx] == 10 || irq[idx] == 11 ||
+ irq[idx] == 12 || irq[idx] == 15) {
+ printk(KERN_ERR "ISICOM: ISA not supported yet.\n");
+ retval = -EINVAL;
+ goto error;
+ } else
+ printk(KERN_ERR "ISICOM: Irq %d unsupported. "
+ "Disabling Card%d...\n", irq[idx], idx + 1);
}
-
- if (!(isi_card[0].base || isi_card[1].base || isi_card[2].base || isi_card[3].base)) {
- printk(KERN_ERR "ISICOM: No valid card configuration. Driver cannot be initialized...\n");
- return -EIO;
- }
- retval = misc_register(&isiloader_device);
+ retval = isicom_register_tty_driver();
+ if (retval < 0)
+ goto error;
+
+ retval = pci_register_driver(&isicom_driver);
if (retval < 0) {
- printk(KERN_ERR "ISICOM: Unable to register firmware loader driver.\n");
- return retval;
- }
-
- if (!isicom_init()) {
- if (misc_deregister(&isiloader_device))
- printk(KERN_ERR "ISICOM: Unable to unregister Firmware Loader driver\n");
- return -EIO;
+ printk(KERN_ERR "ISICOM: Unable to register pci driver.\n");
+ goto errtty;
}
-
+
init_timer(&tx);
tx.expires = jiffies + 1;
tx.data = 0;
tx.function = isicom_tx;
re_schedule = 1;
add_timer(&tx);
-
+
return 0;
+errtty:
+ isicom_unregister_tty_driver();
+error:
+ return retval;
}
static void __exit isicom_exit(void)
{
+ unsigned int index = 0;
+
re_schedule = 0;
- /* FIXME */
- msleep(1000);
- unregister_isr();
- unregister_drivers();
- unregister_ioregion();
- if(tmp_buf)
- free_page((unsigned long)tmp_buf);
- if (misc_deregister(&isiloader_device))
- printk(KERN_ERR "ISICOM: Unable to unregister Firmware Loader driver\n");
+
+ while (re_schedule != 2 && index++ < 100)
+ msleep(10);
+
+ pci_unregister_driver(&isicom_driver);
+ isicom_unregister_tty_driver();
}
module_init(isicom_setup);
module_exit(isicom_exit);
+
+MODULE_AUTHOR("MultiTech");
+MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech");
+MODULE_LICENSE("GPL");
+module_param_array(io, int, NULL, 0);
+MODULE_PARM_DESC(io, "I/O ports for the cards");
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "Interrupts for the cards");
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index ce3bc0d45f1..28c5a3193b8 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -135,7 +135,7 @@ static stlconf_t stli_brdconf[] = {
/*{ BRD_ECP, 0x2a0, 0, 0xcc000, 0, 0 },*/
};
-static int stli_nrbrds = sizeof(stli_brdconf) / sizeof(stlconf_t);
+static int stli_nrbrds = ARRAY_SIZE(stli_brdconf);
/*
* There is some experimental EISA board detection code in this driver.
@@ -406,7 +406,7 @@ static unsigned long stli_eisamemprobeaddrs[] = {
0xff000000, 0xff010000, 0xff020000, 0xff030000,
};
-static int stli_eisamempsize = sizeof(stli_eisamemprobeaddrs) / sizeof(unsigned long);
+static int stli_eisamempsize = ARRAY_SIZE(stli_eisamemprobeaddrs);
/*
* Define the Stallion PCI vendor and device IDs.
@@ -899,15 +899,13 @@ static void stli_argbrds(void)
{
stlconf_t conf;
stlibrd_t *brdp;
- int nrargs, i;
+ int i;
#ifdef DEBUG
printk("stli_argbrds()\n");
#endif
- nrargs = sizeof(stli_brdsp) / sizeof(char **);
-
- for (i = stli_nrbrds; (i < nrargs); i++) {
+ for (i = stli_nrbrds; i < ARRAY_SIZE(stli_brdsp); i++) {
memset(&conf, 0, sizeof(conf));
if (stli_parsebrd(&conf, stli_brdsp[i]) == 0)
continue;
@@ -967,7 +965,7 @@ static unsigned long stli_atol(char *str)
static int stli_parsebrd(stlconf_t *confp, char **argp)
{
char *sp;
- int nrbrdnames, i;
+ int i;
#ifdef DEBUG
printk("stli_parsebrd(confp=%x,argp=%x)\n", (int) confp, (int) argp);
@@ -979,14 +977,13 @@ static int stli_parsebrd(stlconf_t *confp, char **argp)
for (sp = argp[0], i = 0; ((*sp != 0) && (i < 25)); sp++, i++)
*sp = TOLOWER(*sp);
- nrbrdnames = sizeof(stli_brdstr) / sizeof(stlibrdtype_t);
- for (i = 0; (i < nrbrdnames); i++) {
+ for (i = 0; i < ARRAY_SIZE(stli_brdstr); i++) {
if (strcmp(stli_brdstr[i].name, argp[0]) == 0)
break;
}
- if (i >= nrbrdnames) {
+ if (i == ARRAY_SIZE(stli_brdstr)) {
printk("STALLION: unknown board name, %s?\n", argp[0]);
- return(0);
+ return 0;
}
confp->brdtype = stli_brdstr[i].type;
@@ -2714,17 +2711,13 @@ static void stli_read(stlibrd_t *brdp, stliport_t *portp)
stlen = size - tail;
}
- len = MIN(len, (TTY_FLIPBUF_SIZE - tty->flip.count));
+ len = tty_buffer_request_room(tty, len);
+ /* FIXME : iomap ? */
shbuf = (volatile char *) EBRDGETMEMPTR(brdp, portp->rxoffset);
while (len > 0) {
stlen = MIN(len, stlen);
- memcpy(tty->flip.char_buf_ptr, (char *) (shbuf + tail), stlen);
- memset(tty->flip.flag_buf_ptr, 0, stlen);
- tty->flip.char_buf_ptr += stlen;
- tty->flip.flag_buf_ptr += stlen;
- tty->flip.count += stlen;
-
+ tty_insert_flip_string(tty, (char *)(shbuf + tail), stlen);
len -= stlen;
tail += stlen;
if (tail >= size) {
@@ -2909,16 +2902,12 @@ static int stli_hostcmd(stlibrd_t *brdp, stliport_t *portp)
if ((nt.data & DT_RXBREAK) && (portp->rxmarkmsk & BRKINT)) {
if (tty != (struct tty_struct *) NULL) {
- if (tty->flip.count < TTY_FLIPBUF_SIZE) {
- tty->flip.count++;
- *tty->flip.flag_buf_ptr++ = TTY_BREAK;
- *tty->flip.char_buf_ptr++ = 0;
- if (portp->flags & ASYNC_SAK) {
- do_SAK(tty);
- EBRDENABLE(brdp);
- }
- tty_schedule_flip(tty);
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
+ if (portp->flags & ASYNC_SAK) {
+ do_SAK(tty);
+ EBRDENABLE(brdp);
}
+ tty_schedule_flip(tty);
}
}
@@ -4943,7 +4932,7 @@ static int stli_portcmdstats(stliport_t *portp)
if (portp->tty != (struct tty_struct *) NULL) {
if (portp->tty->driver_data == portp) {
stli_comstats.ttystate = portp->tty->flags;
- stli_comstats.rxbuffered = portp->tty->flip.count;
+ stli_comstats.rxbuffered = -1 /*portp->tty->flip.count*/;
if (portp->tty->termios != (struct termios *) NULL) {
stli_comstats.cflags = portp->tty->termios->c_cflag;
stli_comstats.iflags = portp->tty->termios->c_iflag;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 91dd669273e..29c41f4418c 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -101,6 +101,11 @@ static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
return 1;
}
+
+static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t *size)
+{
+ return 1;
+}
#endif
/*
@@ -228,26 +233,36 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
return written;
}
+#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
+static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+{
+#ifdef pgprot_noncached
+ unsigned long offset = pfn << PAGE_SHIFT;
+
+ if (uncached_access(file, offset))
+ return pgprot_noncached(vma_prot);
+#endif
+ return vma_prot;
+}
+#endif
+
static int mmap_mem(struct file * file, struct vm_area_struct * vma)
{
-#if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
+ size_t size = vma->vm_end - vma->vm_start;
+
+ if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, &size))
+ return -EINVAL;
+
vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
+ size,
vma->vm_page_prot);
-#elif defined(pgprot_noncached)
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- int uncached;
-
- uncached = uncached_access(file, offset);
- if (uncached)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#endif
/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
if (remap_pfn_range(vma,
vma->vm_start,
vma->vm_pgoff,
- vma->vm_end-vma->vm_start,
+ size,
vma->vm_page_prot))
return -EAGAIN;
return 0;
@@ -519,7 +534,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
return virtr + wrote;
}
-#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
static ssize_t read_port(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
@@ -726,7 +741,7 @@ static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
{
loff_t ret;
- down(&file->f_dentry->d_inode->i_sem);
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (orig) {
case 0:
file->f_pos = offset;
@@ -741,7 +756,7 @@ static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
default:
ret = -EINVAL;
}
- up(&file->f_dentry->d_inode->i_sem);
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return ret;
}
@@ -780,7 +795,7 @@ static struct file_operations null_fops = {
.write = write_null,
};
-#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
static struct file_operations port_fops = {
.llseek = memory_lseek,
.read = read_port,
@@ -817,7 +832,7 @@ static ssize_t kmsg_write(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
char *tmp;
- int ret;
+ ssize_t ret;
tmp = kmalloc(count + 1, GFP_KERNEL);
if (tmp == NULL)
@@ -826,6 +841,9 @@ static ssize_t kmsg_write(struct file * file, const char __user * buf,
if (!copy_from_user(tmp, buf, count)) {
tmp[count] = 0;
ret = printk("%s", tmp);
+ if (ret > count)
+ /* printk can add a prefix */
+ ret = count;
}
kfree(tmp);
return ret;
@@ -847,7 +865,7 @@ static int memory_open(struct inode * inode, struct file * filp)
case 3:
filp->f_op = &null_fops;
break;
-#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
case 4:
filp->f_op = &port_fops;
break;
@@ -894,7 +912,7 @@ static const struct {
{1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
{2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
{3, "null", S_IRUGO | S_IWUGO, &null_fops},
-#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
{4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
#endif
{5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index 78c89a3e782..c92378121b4 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -1,11 +1,11 @@
/*
- * Intel Multimedia Timer device implementation for SGI SN platforms.
+ * Timer device implementation for SGI SN platforms.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2001-2006 Silicon Graphics, Inc. All rights reserved.
*
* This driver exports an API that should be supportable by any HPET or IA-PC
* multimedia timer. The code below is currently specific to the SGI Altix
@@ -45,7 +45,7 @@ MODULE_LICENSE("GPL");
/* name of the device, usually in /dev */
#define MMTIMER_NAME "mmtimer"
#define MMTIMER_DESC "SGI Altix RTC Timer"
-#define MMTIMER_VERSION "2.0"
+#define MMTIMER_VERSION "2.1"
#define RTC_BITS 55 /* 55 bits for this implementation */
@@ -227,10 +227,7 @@ typedef struct mmtimer {
struct tasklet_struct tasklet;
} mmtimer_t;
-/*
- * Total number of comparators is comparators/node * MAX nodes/running kernel
- */
-static mmtimer_t timers[NUM_COMPARATORS*MAX_COMPACT_NODES];
+static mmtimer_t ** timers;
/**
* mmtimer_ioctl - ioctl interface for /dev/mmtimer
@@ -441,29 +438,29 @@ static irqreturn_t
mmtimer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
int i;
- mmtimer_t *base = timers + cpu_to_node(smp_processor_id()) *
- NUM_COMPARATORS;
unsigned long expires = 0;
int result = IRQ_NONE;
+ unsigned indx = cpu_to_node(smp_processor_id());
/*
* Do this once for each comparison register
*/
for (i = 0; i < NUM_COMPARATORS; i++) {
+ mmtimer_t *base = timers[indx] + i;
/* Make sure this doesn't get reused before tasklet_sched */
- spin_lock(&base[i].lock);
- if (base[i].cpu == smp_processor_id()) {
- if (base[i].timer)
- expires = base[i].timer->it.mmtimer.expires;
+ spin_lock(&base->lock);
+ if (base->cpu == smp_processor_id()) {
+ if (base->timer)
+ expires = base->timer->it.mmtimer.expires;
/* expires test won't work with shared irqs */
if ((mmtimer_int_pending(i) > 0) ||
(expires && (expires < rtc_time()))) {
mmtimer_clr_int_pending(i);
- tasklet_schedule(&base[i].tasklet);
+ tasklet_schedule(&base->tasklet);
result = IRQ_HANDLED;
}
}
- spin_unlock(&base[i].lock);
+ spin_unlock(&base->lock);
expires = 0;
}
return result;
@@ -523,7 +520,7 @@ static int sgi_timer_del(struct k_itimer *timr)
{
int i = timr->it.mmtimer.clock;
cnodeid_t nodeid = timr->it.mmtimer.node;
- mmtimer_t *t = timers + nodeid * NUM_COMPARATORS +i;
+ mmtimer_t *t = timers[nodeid] + i;
unsigned long irqflags;
if (i != TIMER_OFF) {
@@ -609,11 +606,11 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
preempt_disable();
nodeid = cpu_to_node(smp_processor_id());
- base = timers + nodeid * NUM_COMPARATORS;
retry:
/* Don't use an allocated timer, or a deleted one that's pending */
for(i = 0; i< NUM_COMPARATORS; i++) {
- if (!base[i].timer && !base[i].tasklet.state) {
+ base = timers[nodeid] + i;
+ if (!base->timer && !base->tasklet.state) {
break;
}
}
@@ -623,14 +620,14 @@ retry:
return -EBUSY;
}
- spin_lock_irqsave(&base[i].lock, irqflags);
+ spin_lock_irqsave(&base->lock, irqflags);
- if (base[i].timer || base[i].tasklet.state != 0) {
- spin_unlock_irqrestore(&base[i].lock, irqflags);
+ if (base->timer || base->tasklet.state != 0) {
+ spin_unlock_irqrestore(&base->lock, irqflags);
goto retry;
}
- base[i].timer = timr;
- base[i].cpu = smp_processor_id();
+ base->timer = timr;
+ base->cpu = smp_processor_id();
timr->it.mmtimer.clock = i;
timr->it.mmtimer.node = nodeid;
@@ -645,11 +642,11 @@ retry:
}
} else {
timr->it.mmtimer.expires -= period;
- if (reschedule_periodic_timer(base+i))
+ if (reschedule_periodic_timer(base))
err = -EINVAL;
}
- spin_unlock_irqrestore(&base[i].lock, irqflags);
+ spin_unlock_irqrestore(&base->lock, irqflags);
preempt_enable();
@@ -675,6 +672,7 @@ static struct k_clock sgi_clock = {
static int __init mmtimer_init(void)
{
unsigned i;
+ cnodeid_t node, maxn = -1;
if (!ia64_platform_is("sn2"))
return -1;
@@ -691,14 +689,6 @@ static int __init mmtimer_init(void)
mmtimer_femtoperiod = ((unsigned long)1E15 + sn_rtc_cycles_per_second /
2) / sn_rtc_cycles_per_second;
- for (i=0; i< NUM_COMPARATORS*MAX_COMPACT_NODES; i++) {
- spin_lock_init(&timers[i].lock);
- timers[i].timer = NULL;
- timers[i].cpu = 0;
- timers[i].i = i % NUM_COMPARATORS;
- tasklet_init(&timers[i].tasklet, mmtimer_tasklet, (unsigned long) (timers+i));
- }
-
if (request_irq(SGI_MMTIMER_VECTOR, mmtimer_interrupt, SA_PERCPU_IRQ, MMTIMER_NAME, NULL)) {
printk(KERN_WARNING "%s: unable to allocate interrupt.",
MMTIMER_NAME);
@@ -712,6 +702,40 @@ static int __init mmtimer_init(void)
return -1;
}
+ /* Get max numbered node, calculate slots needed */
+ for_each_online_node(node) {
+ maxn = node;
+ }
+ maxn++;
+
+ /* Allocate list of node ptrs to mmtimer_t's */
+ timers = kmalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL);
+ if (timers == NULL) {
+ printk(KERN_ERR "%s: failed to allocate memory for device\n",
+ MMTIMER_NAME);
+ return -1;
+ }
+
+ /* Allocate mmtimer_t's for each online node */
+ for_each_online_node(node) {
+ timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node);
+ if (timers[node] == NULL) {
+ printk(KERN_ERR "%s: failed to allocate memory for device\n",
+ MMTIMER_NAME);
+ return -1;
+ }
+ for (i=0; i< NUM_COMPARATORS; i++) {
+ mmtimer_t * base = timers[node] + i;
+
+ spin_lock_init(&base->lock);
+ base->timer = NULL;
+ base->cpu = 0;
+ base->i = i;
+ tasklet_init(&base->tasklet, mmtimer_tasklet,
+ (unsigned long) (base));
+ }
+ }
+
sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second;
register_posix_clock(CLOCK_SGI_CYCLE, &sgi_clock);
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 79e490ef2cf..f43c2e04ead 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -269,7 +269,7 @@ static int MoxaPortDCDChange(int);
static int MoxaPortDCDON(int);
static void MoxaPortFlushData(int, int);
static int MoxaPortWriteData(int, unsigned char *, int);
-static int MoxaPortReadData(int, unsigned char *, int);
+static int MoxaPortReadData(int, struct tty_struct *tty);
static int MoxaPortTxQueue(int);
static int MoxaPortRxQueue(int);
static int MoxaPortTxFree(int);
@@ -301,6 +301,8 @@ static struct tty_operations moxa_ops = {
.tiocmset = moxa_tiocmset,
};
+static spinlock_t moxa_lock = SPIN_LOCK_UNLOCKED;
+
#ifdef CONFIG_PCI
static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board)
{
@@ -448,7 +450,7 @@ static int __init moxa_init(void)
#ifdef CONFIG_PCI
{
struct pci_dev *p = NULL;
- int n = (sizeof(moxa_pcibrds) / sizeof(moxa_pcibrds[0])) - 1;
+ int n = ARRAY_SIZE(moxa_pcibrds) - 1;
i = 0;
while (i < n) {
while ((p = pci_get_device(moxa_pcibrds[i].vendor, moxa_pcibrds[i].device, p))!=NULL)
@@ -645,10 +647,10 @@ static int moxa_write(struct tty_struct *tty,
if (ch == NULL)
return (0);
port = ch->port;
- save_flags(flags);
- cli();
+
+ spin_lock_irqsave(&moxa_lock, flags);
len = MoxaPortWriteData(port, (unsigned char *) buf, count);
- restore_flags(flags);
+ spin_unlock_irqrestore(&moxa_lock, flags);
/*********************************************
if ( !(ch->statusflags & LOWWAIT) &&
@@ -723,11 +725,10 @@ static void moxa_put_char(struct tty_struct *tty, unsigned char c)
if (ch == NULL)
return;
port = ch->port;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&moxa_lock, flags);
moxaXmitBuff[0] = c;
MoxaPortWriteData(port, moxaXmitBuff, 1);
- restore_flags(flags);
+ spin_unlock_irqrestore(&moxa_lock, flags);
/************************************************
if ( !(ch->statusflags & LOWWAIT) && (MoxaPortTxFree(port) <= 100) )
*************************************************/
@@ -1030,12 +1031,12 @@ static int block_till_ready(struct tty_struct *tty, struct file *filp,
printk("block_til_ready before block: ttys%d, count = %d\n",
ch->line, ch->count);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&moxa_lock, flags);
if (!tty_hung_up_p(filp))
ch->count--;
- restore_flags(flags);
ch->blocked_open++;
+ spin_unlock_irqrestore(&moxa_lock, flags);
+
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (tty_hung_up_p(filp) ||
@@ -1062,17 +1063,21 @@ static int block_till_ready(struct tty_struct *tty, struct file *filp,
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&ch->open_wait, &wait);
+
+ spin_lock_irqsave(&moxa_lock, flags);
if (!tty_hung_up_p(filp))
ch->count++;
ch->blocked_open--;
+ spin_unlock_irqrestore(&moxa_lock, flags);
#ifdef SERIAL_DEBUG_OPEN
printk("block_til_ready after blocking: ttys%d, count = %d\n",
ch->line, ch->count);
#endif
if (retval)
return (retval);
+ /* FIXME: review to see if we need to use set_bit on these */
ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
- return (0);
+ return 0;
}
static void setup_empty_event(struct tty_struct *tty)
@@ -1080,15 +1085,14 @@ static void setup_empty_event(struct tty_struct *tty)
struct moxa_str *ch = tty->driver_data;
unsigned long flags;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&moxa_lock, flags);
ch->statusflags |= EMPTYWAIT;
moxaEmptyTimer_on[ch->port] = 0;
del_timer(&moxaEmptyTimer[ch->port]);
moxaEmptyTimer[ch->port].expires = jiffies + HZ;
moxaEmptyTimer_on[ch->port] = 1;
add_timer(&moxaEmptyTimer[ch->port]);
- restore_flags(flags);
+ spin_unlock_irqrestore(&moxa_lock, flags);
}
static void check_xmit_empty(unsigned long data)
@@ -1135,8 +1139,6 @@ static void receive_data(struct moxa_str *ch)
{
struct tty_struct *tp;
struct termios *ts;
- int i, count, rc, space;
- unsigned char *charptr, *flagptr;
unsigned long flags;
ts = NULL;
@@ -1150,24 +1152,10 @@ static void receive_data(struct moxa_str *ch)
MoxaPortFlushData(ch->port, 0);
return;
}
- space = TTY_FLIPBUF_SIZE - tp->flip.count;
- if (space <= 0)
- return;
- charptr = tp->flip.char_buf_ptr;
- flagptr = tp->flip.flag_buf_ptr;
- rc = tp->flip.count;
- save_flags(flags);
- cli();
- count = MoxaPortReadData(ch->port, charptr, space);
- restore_flags(flags);
- for (i = 0; i < count; i++)
- *flagptr++ = 0;
- charptr += count;
- rc += count;
- tp->flip.count = rc;
- tp->flip.char_buf_ptr = charptr;
- tp->flip.flag_buf_ptr = flagptr;
- tty_schedule_flip(ch->tty);
+ spin_lock_irqsave(&moxa_lock, flags);
+ MoxaPortReadData(ch->port, tp);
+ spin_unlock_irqrestore(&moxa_lock, flags);
+ tty_schedule_flip(tp);
}
#define Magic_code 0x404
@@ -1661,6 +1649,8 @@ int MoxaDriverIoctl(unsigned int cmd, unsigned long arg, int port)
case MOXA_FIND_BOARD:
case MOXA_LOAD_C320B:
case MOXA_LOAD_CODE:
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
break;
}
@@ -1774,7 +1764,7 @@ int MoxaPortsOfCard(int cardno)
* 14. MoxaPortDCDON(int port); *
* 15. MoxaPortFlushData(int port, int mode); *
* 16. MoxaPortWriteData(int port, unsigned char * buffer, int length); *
- * 17. MoxaPortReadData(int port, unsigned char * buffer, int length); *
+ * 17. MoxaPortReadData(int port, struct tty_struct *tty); *
* 18. MoxaPortTxBufSize(int port); *
* 19. MoxaPortRxBufSize(int port); *
* 20. MoxaPortTxQueue(int port); *
@@ -2003,10 +1993,9 @@ int MoxaPortsOfCard(int cardno)
*
* Function 21: Read data.
* Syntax:
- * int MoxaPortReadData(int port, unsigned char * buffer, int length);
+ * int MoxaPortReadData(int port, struct tty_struct *tty);
* int port : port number (0 - 127)
- * unsigned char * buffer : pointer to read data buffer.
- * int length : read data buffer length
+ * struct tty_struct *tty : tty for data
*
* return: 0 - length : real read data length
*
@@ -2504,7 +2493,7 @@ int MoxaPortWriteData(int port, unsigned char * buffer, int len)
return (total);
}
-int MoxaPortReadData(int port, unsigned char * buffer, int space)
+int MoxaPortReadData(int port, struct tty_struct *tty)
{
register ushort head, pageofs;
int i, count, cnt, len, total, remain;
@@ -2522,9 +2511,9 @@ int MoxaPortReadData(int port, unsigned char * buffer, int space)
count = (tail >= head) ? (tail - head)
: (tail - head + rx_mask + 1);
if (count == 0)
- return (0);
+ return 0;
- total = (space > count) ? count : space;
+ total = count;
remain = count - total;
moxaLog.rxcnt[port] += total;
count = total;
@@ -2539,7 +2528,7 @@ int MoxaPortReadData(int port, unsigned char * buffer, int space)
len = (count > len) ? len : count;
ofs = baseAddr + DynPage_addr + bufhead + head;
for (i = 0; i < len; i++)
- *buffer++ = readb(ofs + i);
+ tty_insert_flip_char(tty, readb(ofs + i), TTY_NORMAL);
head = (head + len) & rx_mask;
count -= len;
}
@@ -2556,7 +2545,7 @@ int MoxaPortReadData(int port, unsigned char * buffer, int space)
writew(pageno, baseAddr + Control_reg);
ofs = baseAddr + DynPage_addr + pageofs;
for (i = 0; i < cnt; i++)
- *buffer++ = readb(ofs + i);
+ tty_insert_flip_char(tty, readb(ofs + i), TTY_NORMAL);
if (count == 0) {
writew((head + len) & rx_mask, ofsAddr + RXrptr);
break;
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 26448f17680..ea725a9964e 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -813,7 +813,7 @@ static int mxser_init(void)
/* start finding PCI board here */
#ifdef CONFIG_PCI
- n = (sizeof(mxser_pcibrds) / sizeof(mxser_pcibrds[0])) - 1;
+ n = ARRAY_SIZE(mxser_pcibrds) - 1;
index = 0;
b = 0;
while (b < n) {
@@ -1982,7 +1982,7 @@ static void mxser_receive_chars(struct mxser_struct *info, int *status)
spin_lock_irqsave(&info->slock, flags);
- recv_room = tty->ldisc.receive_room(tty);
+ recv_room = tty->receive_room;
if ((recv_room == 0) && (!info->ldisc_stop_rx)) {
//mxser_throttle(tty);
mxser_stoprx(tty);
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index a133a62f3d5..9f54733f162 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -191,7 +191,6 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait);
static int n_hdlc_tty_open(struct tty_struct *tty);
static void n_hdlc_tty_close(struct tty_struct *tty);
-static int n_hdlc_tty_room(struct tty_struct *tty);
static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp,
char *fp, int count);
static void n_hdlc_tty_wakeup(struct tty_struct *tty);
@@ -212,7 +211,6 @@ static struct tty_ldisc n_hdlc_ldisc = {
.ioctl = n_hdlc_tty_ioctl,
.poll = n_hdlc_tty_poll,
.receive_buf = n_hdlc_tty_receive,
- .receive_room = n_hdlc_tty_room,
.write_wakeup = n_hdlc_tty_wakeup,
};
@@ -337,6 +335,7 @@ static int n_hdlc_tty_open (struct tty_struct *tty)
tty->disc_data = n_hdlc;
n_hdlc->tty = tty;
+ tty->receive_room = 65536;
#if defined(TTY_NO_WRITE_SPLIT)
/* change tty_io write() to not split large writes into 8K chunks */
@@ -478,22 +477,6 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
} /* end of n_hdlc_tty_wakeup() */
/**
- * n_hdlc_tty_room - Return the amount of space left in the receiver's buffer
- * @tty - pointer to associated tty instance data
- *
- * Callback function from tty driver. Return the amount of space left in the
- * receiver's buffer to decide if remote transmitter is to be throttled.
- */
-static int n_hdlc_tty_room(struct tty_struct *tty)
-{
- if (debuglevel >= DEBUG_LEVEL_INFO)
- printk("%s(%d)n_hdlc_tty_room() called\n",__FILE__,__LINE__);
- /* always return a larger number to prevent */
- /* throttling of remote transmitter. */
- return 65536;
-} /* end of n_hdlc_tty_root() */
-
-/**
* n_hdlc_tty_receive - Called by tty driver when receive data is available
* @tty - pointer to tty instance data
* @data - pointer to received data
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index 853c98cee64..c48de09d68f 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -147,7 +147,6 @@ static unsigned int r3964_poll(struct tty_struct * tty, struct file * file,
struct poll_table_struct *wait);
static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count);
-static int r3964_receive_room(struct tty_struct *tty);
static struct tty_ldisc tty_ldisc_N_R3964 = {
.owner = THIS_MODULE,
@@ -161,7 +160,6 @@ static struct tty_ldisc tty_ldisc_N_R3964 = {
.set_termios = r3964_set_termios,
.poll = r3964_poll,
.receive_buf = r3964_receive_buf,
- .receive_room = r3964_receive_room,
};
@@ -1119,6 +1117,7 @@ static int r3964_open(struct tty_struct *tty)
pInfo->nRetry = 0;
tty->disc_data = pInfo;
+ tty->receive_room = 65536;
init_timer(&pInfo->tmr);
pInfo->tmr.data = (unsigned long)pInfo;
@@ -1405,12 +1404,5 @@ static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
}
}
-static int r3964_receive_room(struct tty_struct *tty)
-{
- TRACE_L("receive_room");
- return -1;
-}
-
-
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_R3964);
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index c556f4d3ccd..ccad7ae9454 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -78,7 +78,32 @@ static inline void free_buf(unsigned char *buf)
free_page((unsigned long) buf);
}
-static inline void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
+/**
+ * n_tty_set__room - receive space
+ * @tty: terminal
+ *
+ * Called by the driver to find out how much data it is
+ * permitted to feed to the line discipline without any being lost
+ * and thus to manage flow control. Not serialized. Answers for the
+ * "instant".
+ */
+
+static void n_tty_set_room(struct tty_struct *tty)
+{
+ int left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
+
+ /*
+ * If we are doing input canonicalization, and there are no
+ * pending newlines, let characters through without limit, so
+ * that erase characters will be handled. Other excess
+ * characters will be beeped.
+ */
+ if (left <= 0)
+ left = tty->icanon && !tty->canon_data;
+ tty->receive_room = left;
+}
+
+static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
{
if (tty->read_cnt < N_TTY_BUF_SIZE) {
tty->read_buf[tty->read_head] = c;
@@ -87,7 +112,7 @@ static inline void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
}
}
-static inline void put_tty_queue(unsigned char c, struct tty_struct *tty)
+static void put_tty_queue(unsigned char c, struct tty_struct *tty)
{
unsigned long flags;
/*
@@ -136,6 +161,7 @@ static void reset_buffer_flags(struct tty_struct *tty)
spin_unlock_irqrestore(&tty->read_lock, flags);
tty->canon_head = tty->canon_data = tty->erasing = 0;
memset(&tty->read_flags, 0, sizeof tty->read_flags);
+ n_tty_set_room(tty);
check_unthrottle(tty);
}
@@ -838,30 +864,6 @@ send_signal:
put_tty_queue(c, tty);
}
-/**
- * n_tty_receive_room - receive space
- * @tty: terminal
- *
- * Called by the driver to find out how much data it is
- * permitted to feed to the line discipline without any being lost
- * and thus to manage flow control. Not serialized. Answers for the
- * "instant".
- */
-
-static int n_tty_receive_room(struct tty_struct *tty)
-{
- int left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
-
- /*
- * If we are doing input canonicalization, and there are no
- * pending newlines, let characters through without limit, so
- * that erase characters will be handled. Other excess
- * characters will be beeped.
- */
- if (left <= 0)
- left = tty->icanon && !tty->canon_data;
- return left;
-}
/**
* n_tty_write_wakeup - asynchronous I/O notifier
@@ -953,6 +955,8 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
tty->driver->flush_chars(tty);
}
+ n_tty_set_room(tty);
+
if (!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
@@ -964,7 +968,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
* mode. We don't want to throttle the driver if we're in
* canonical mode and don't have a newline yet!
*/
- if (n_tty_receive_room(tty) < TTY_THRESHOLD_THROTTLE) {
+ if (tty->receive_room < TTY_THRESHOLD_THROTTLE) {
/* check TTY_THROTTLED first so it indicates our state */
if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
tty->driver->throttle)
@@ -999,6 +1003,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct termios * old)
if (test_bit(TTY_HW_COOK_IN, &tty->flags)) {
tty->raw = 1;
tty->real_raw = 1;
+ n_tty_set_room(tty);
return;
}
if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
@@ -1051,6 +1056,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct termios * old)
else
tty->real_raw = 0;
}
+ n_tty_set_room(tty);
}
/**
@@ -1130,7 +1136,7 @@ static inline int input_available_p(struct tty_struct *tty, int amt)
*
*/
-static inline int copy_from_read_buf(struct tty_struct *tty,
+static int copy_from_read_buf(struct tty_struct *tty,
unsigned char __user **b,
size_t *nr)
@@ -1308,6 +1314,7 @@ do_it_again:
retval = -ERESTARTSYS;
break;
}
+ n_tty_set_room(tty);
clear_bit(TTY_DONT_FLIP, &tty->flags);
timeout = schedule_timeout(timeout);
set_bit(TTY_DONT_FLIP, &tty->flags);
@@ -1401,6 +1408,8 @@ do_it_again:
} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
goto do_it_again;
+ n_tty_set_room(tty);
+
return retval;
}
@@ -1553,7 +1562,6 @@ struct tty_ldisc tty_ldisc_N_TTY = {
normal_poll, /* poll */
NULL, /* hangup */
n_tty_receive_buf, /* receive_buf */
- n_tty_receive_room, /* receive_room */
n_tty_write_wakeup /* write_wakeup */
};
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 1af733d0732..3556ccd7757 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -557,13 +557,13 @@ pc_proc_infos(unsigned char *nvram, char *buffer, int *len,
(nvram[6] & 1) ? (nvram[6] >> 6) + 1 : 0);
PRINT_PROC("Floppy 0 type : ");
type = nvram[2] >> 4;
- if (type < sizeof (floppy_types) / sizeof (*floppy_types))
+ if (type < ARRAY_SIZE(floppy_types))
PRINT_PROC("%s\n", floppy_types[type]);
else
PRINT_PROC("%d (unknown)\n", type);
PRINT_PROC("Floppy 1 type : ");
type = nvram[2] & 0x0f;
- if (type < sizeof (floppy_types) / sizeof (*floppy_types))
+ if (type < ARRAY_SIZE(floppy_types))
PRINT_PROC("%s\n", floppy_types[type]);
else
PRINT_PROC("%d (unknown)\n", type);
@@ -843,8 +843,6 @@ static char *colors[] = {
"2", "4", "16", "256", "65536", "??", "??", "??"
};
-#define fieldsize(a) (sizeof(a)/sizeof(*a))
-
static int
atari_proc_infos(unsigned char *nvram, char *buffer, int *len,
off_t *begin, off_t offset, int size)
@@ -856,7 +854,7 @@ atari_proc_infos(unsigned char *nvram, char *buffer, int *len,
PRINT_PROC("Checksum status : %svalid\n", checksum ? "" : "not ");
PRINT_PROC("Boot preference : ");
- for (i = fieldsize(boot_prefs) - 1; i >= 0; --i) {
+ for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) {
if (nvram[1] == boot_prefs[i].val) {
PRINT_PROC("%s\n", boot_prefs[i].name);
break;
@@ -878,12 +876,12 @@ atari_proc_infos(unsigned char *nvram, char *buffer, int *len,
return 1;
PRINT_PROC("OS language : ");
- if (nvram[6] < fieldsize(languages))
+ if (nvram[6] < ARRAY_SIZE(languages))
PRINT_PROC("%s\n", languages[nvram[6]]);
else
PRINT_PROC("%u (undefined)\n", nvram[6]);
PRINT_PROC("Keyboard language: ");
- if (nvram[7] < fieldsize(languages))
+ if (nvram[7] < ARRAY_SIZE(languages))
PRINT_PROC("%s\n", languages[nvram[7]]);
else
PRINT_PROC("%u (undefined)\n", nvram[7]);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index cf45b100eff..8a8ca32822b 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -1007,8 +1007,9 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
static void rx_ready_async(MGSLPC_INFO *info, int tcd)
{
- unsigned char data, status;
+ unsigned char data, status, flag;
int fifo_count;
+ int work = 0;
struct tty_struct *tty = info->tty;
struct mgsl_icount *icount = &info->icount;
@@ -1023,20 +1024,16 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd)
fifo_count = 32;
} else
fifo_count = 32;
-
+
+ tty_buffer_request_room(tty, fifo_count);
/* Flush received async data to receive data buffer. */
while (fifo_count) {
data = read_reg(info, CHA + RXFIFO);
status = read_reg(info, CHA + RXFIFO);
fifo_count -= 2;
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- break;
-
- *tty->flip.char_buf_ptr = data;
icount->rx++;
-
- *tty->flip.flag_buf_ptr = 0;
+ flag = TTY_NORMAL;
// if no frameing/crc error then save data
// BIT7:parity error
@@ -1055,26 +1052,23 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd)
status &= info->read_status_mask;
if (status & BIT7)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (status & BIT6)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
+ flag = TTY_FRAME;
}
-
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ work += tty_insert_flip_char(tty, data, flag);
}
issue_command(info, CHA, CMD_RXFIFO);
if (debug_level >= DEBUG_LEVEL_ISR) {
- printk("%s(%d):rx_ready_async count=%d\n",
- __FILE__,__LINE__,tty->flip.count);
+ printk("%s(%d):rx_ready_async",
+ __FILE__,__LINE__);
printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
__FILE__,__LINE__,icount->rx,icount->brk,
icount->parity,icount->frame,icount->overrun);
}
- if (tty->flip.count)
+ if (work)
tty_flip_buffer_push(tty);
}
@@ -4005,7 +3999,7 @@ BOOLEAN register_test(MGSLPC_INFO *info)
{
static unsigned char patterns[] =
{ 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f };
- static unsigned int count = sizeof(patterns) / sizeof(patterns[0]);
+ static unsigned int count = ARRAY_SIZE(patterns);
unsigned int i;
BOOLEAN rc = TRUE;
unsigned long flags;
@@ -4016,7 +4010,7 @@ BOOLEAN register_test(MGSLPC_INFO *info)
for (i = 0; i < count; i++) {
write_reg(info, XAD1, patterns[i]);
write_reg(info, XAD2, patterns[(i + 1) % count]);
- if ((read_reg(info, XAD1) != patterns[i]) ||
+ if ((read_reg(info, XAD1) != patterns[i]) ||
(read_reg(info, XAD2) != patterns[(i + 1) % count])) {
rc = FALSE;
break;
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 49f3997fd25..9b5a2c0e700 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -111,7 +111,7 @@ static int pty_write(struct tty_struct * tty, const unsigned char *buf, int coun
if (!to || tty->stopped)
return 0;
- c = to->ldisc.receive_room(to);
+ c = to->receive_room;
if (c > count)
c = count;
to->ldisc.receive_buf(to, buf, NULL, c);
@@ -126,7 +126,7 @@ static int pty_write_room(struct tty_struct *tty)
if (!to || tty->stopped)
return 0;
- return to->ldisc.receive_room(to);
+ return to->receive_room;
}
/*
diff --git a/drivers/char/random.c b/drivers/char/random.c
index bdfdfd28594..86be04b241e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -632,7 +632,7 @@ out:
preempt_enable();
}
-extern void add_input_randomness(unsigned int type, unsigned int code,
+void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value)
{
static unsigned char last_value;
diff --git a/drivers/char/rio/board.h b/drivers/char/rio/board.h
index 0b397e1c8f1..29c98020409 100644
--- a/drivers/char/rio/board.h
+++ b/drivers/char/rio/board.h
@@ -52,63 +52,57 @@ static char *_board_h_sccs_ = "@(#)board.h 1.2";
/*
** The shape of the Host Control area, at offset 0x7C00, Write Only
*/
-struct s_Ctrl
-{
- BYTE DpCtl; /* 7C00 */
- BYTE Dp_Unused2_[127];
- BYTE DpIntSet; /* 7C80 */
- BYTE Dp_Unused3_[127];
- BYTE DpTpuReset; /* 7D00 */
- BYTE Dp_Unused4_[127];
- BYTE DpIntReset; /* 7D80 */
- BYTE Dp_Unused5_[127];
+struct s_Ctrl {
+ BYTE DpCtl; /* 7C00 */
+ BYTE Dp_Unused2_[127];
+ BYTE DpIntSet; /* 7C80 */
+ BYTE Dp_Unused3_[127];
+ BYTE DpTpuReset; /* 7D00 */
+ BYTE Dp_Unused4_[127];
+ BYTE DpIntReset; /* 7D80 */
+ BYTE Dp_Unused5_[127];
};
/*
** The PROM data area on the host (0x7C00), Read Only
*/
-struct s_Prom
-{
- WORD DpSlxCode[2];
- WORD DpRev;
- WORD Dp_Unused6_;
- WORD DpUniq[4];
- WORD DpJahre;
- WORD DpWoche;
- WORD DpHwFeature[5];
- WORD DpOemId;
- WORD DpSiggy[16];
+struct s_Prom {
+ WORD DpSlxCode[2];
+ WORD DpRev;
+ WORD Dp_Unused6_;
+ WORD DpUniq[4];
+ WORD DpJahre;
+ WORD DpWoche;
+ WORD DpHwFeature[5];
+ WORD DpOemId;
+ WORD DpSiggy[16];
};
/*
** Union of the Ctrl and Prom areas
*/
-union u_CtrlProm /* This is the control/PROM area (0x7C00) */
-{
- struct s_Ctrl DpCtrl;
- struct s_Prom DpProm;
+union u_CtrlProm { /* This is the control/PROM area (0x7C00) */
+ struct s_Ctrl DpCtrl;
+ struct s_Prom DpProm;
};
/*
** The top end of memory!
*/
-struct s_ParmMapS /* Area containing Parm Map Pointer */
-{
- BYTE Dp_Unused8_[DP_PARMMAP_ADDR];
- WORD DpParmMapAd;
+struct s_ParmMapS { /* Area containing Parm Map Pointer */
+ BYTE Dp_Unused8_[DP_PARMMAP_ADDR];
+ WORD DpParmMapAd;
};
-struct s_StartUpS
-{
- BYTE Dp_Unused9_[DP_STARTUP_ADDR];
- BYTE Dp_LongJump[0x4];
- BYTE Dp_Unused10_[2];
- BYTE Dp_ShortJump[0x2];
+struct s_StartUpS {
+ BYTE Dp_Unused9_[DP_STARTUP_ADDR];
+ BYTE Dp_LongJump[0x4];
+ BYTE Dp_Unused10_[2];
+ BYTE Dp_ShortJump[0x2];
};
-union u_Sram2ParmMap /* This is the top of memory (0x7E00-0x7FFF) */
-{
- BYTE DpSramMem[DP_SRAM2_SIZE];
+union u_Sram2ParmMap { /* This is the top of memory (0x7E00-0x7FFF) */
+ BYTE DpSramMem[DP_SRAM2_SIZE];
struct s_ParmMapS DpParmMapS;
struct s_StartUpS DpStartUpS;
};
@@ -116,13 +110,12 @@ union u_Sram2ParmMap /* This is the top of memory (0x7E00-0x7FFF) */
/*
** This is the DP RAM overlay.
*/
-struct DpRam
-{
- BYTE DpSram1[DP_SRAM1_SIZE]; /* 0000 - 7BFF */
- union u_CtrlProm DpCtrlProm; /* 7C00 - 7DFF */
- union u_Sram2ParmMap DpSram2ParmMap; /* 7E00 - 7FFF */
- BYTE DpScratch[DP_SCRATCH_SIZE]; /* 8000 - 8FFF */
- BYTE DpSram3[DP_SRAM3_SIZE]; /* 9000 - FFFF */
+struct DpRam {
+ BYTE DpSram1[DP_SRAM1_SIZE]; /* 0000 - 7BFF */
+ union u_CtrlProm DpCtrlProm; /* 7C00 - 7DFF */
+ union u_Sram2ParmMap DpSram2ParmMap; /* 7E00 - 7FFF */
+ BYTE DpScratch[DP_SCRATCH_SIZE]; /* 8000 - 8FFF */
+ BYTE DpSram3[DP_SRAM3_SIZE]; /* 9000 - FFFF */
};
#define DpControl DpCtrlProm.DpCtrl.DpCtl
diff --git a/drivers/char/rio/bootpkt.h b/drivers/char/rio/bootpkt.h
index c329aeb7c87..602266e0c08 100644
--- a/drivers/char/rio/bootpkt.h
+++ b/drivers/char/rio/bootpkt.h
@@ -41,7 +41,7 @@
#ifndef lint
#ifdef SCCS
-static char *_rio_bootpkt_h_sccs = "@(#)bootpkt.h 1.1" ;
+static char *_rio_bootpkt_h_sccs = "@(#)bootpkt.h 1.1";
#endif
#endif
@@ -49,14 +49,13 @@ static char *_rio_bootpkt_h_sccs = "@(#)bootpkt.h 1.1" ;
* Overlayed onto the Data fields of a regular
* Packet
************************************************/
-typedef struct BOOT_PKT BOOT_PKT ;
+typedef struct BOOT_PKT BOOT_PKT;
struct BOOT_PKT {
- short seq_num ;
- char data[10] ;
- } ;
+ short seq_num;
+ char data[10];
+};
#endif
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/brates.h b/drivers/char/rio/brates.h
index bd4fc84ec6c..dd686d58fd6 100644
--- a/drivers/char/rio/brates.h
+++ b/drivers/char/rio/brates.h
@@ -97,11 +97,10 @@
#define MAX_RATE B2000
-struct baud_rate /* Tag for baud rates */
-{
- /* short host_rate,*/ /* As passed by the driver */
- short divisor, /* The divisor */
- prescaler; /* The pre-scaler */
+struct baud_rate { /* Tag for baud rates */
+ /* short host_rate, *//* As passed by the driver */
+ short divisor, /* The divisor */
+ prescaler; /* The pre-scaler */
};
#endif
diff --git a/drivers/char/rio/chan.h b/drivers/char/rio/chan.h
index 5b306543328..af14311f9b6 100644
--- a/drivers/char/rio/chan.h
+++ b/drivers/char/rio/chan.h
@@ -21,7 +21,7 @@
#ifndef lint
#ifdef SCCS
-static char *_rio_chan_h_sccs = "@(#)chan.h 1.1" ;
+static char *_rio_chan_h_sccs = "@(#)chan.h 1.1";
#endif
#endif
diff --git a/drivers/char/rio/cirrus.h b/drivers/char/rio/cirrus.h
index cf056a990f1..217ff09f2fa 100644
--- a/drivers/char/rio/cirrus.h
+++ b/drivers/char/rio/cirrus.h
@@ -73,20 +73,20 @@
#define TIMER_TICK 0x82
#define STOP_BREAK 0x83
#define BASE(a) ((a) < 4 ? (short*)CIRRUS_FIRST : ((a) < 8 ? (short *)CIRRUS_SECOND : ((a) < 12 ? (short*)CIRRUS_THIRD : (short *)CIRRUS_FOURTH)))
-#define txack1 ((short *)0x7104)
-#define rxack1 ((short *)0x7102)
+#define txack1 ((short *)0x7104)
+#define rxack1 ((short *)0x7102)
#define mdack1 ((short *)0x7106)
-#define txack2 ((short *)0x7006)
-#define rxack2 ((short *)0x7004)
-#define mdack2 ((short *)0x7100)
+#define txack2 ((short *)0x7006)
+#define rxack2 ((short *)0x7004)
+#define mdack2 ((short *)0x7100)
#define int_latch ((short *) 0x7800)
-#define int_status ((short *) 0x7c00)
-#define tx1_pending 0x20
-#define rx1_pending 0x10
-#define md1_pending 0x40
-#define tx2_pending 0x02
-#define rx2_pending 0x01
-#define md2_pending 0x40
+#define int_status ((short *) 0x7c00)
+#define tx1_pending 0x20
+#define rx1_pending 0x10
+#define md1_pending 0x40
+#define tx2_pending 0x02
+#define rx2_pending 0x01
+#define md2_pending 0x40
#define module1_bits 0x07
#define module1_modern 0x08
#define module2_bits 0x70
@@ -113,65 +113,65 @@
NB. These registers are relative values on 8 bit boundaries whereas
on the RTA's the CIRRUS registers are on word boundaries. Use pointer
arithmetic (short *) to obtain the real addresses required */
-#define ccr 0x05 /* Channel Command Register */
-#define ier 0x06 /* Interrupt Enable Register */
-#define cor1 0x08 /* Channel Option Register 1 */
-#define cor2 0x09 /* Channel Option Register 2 */
-#define cor3 0x0a /* Channel Option Register 3 */
-#define cor4 0x1e /* Channel Option Register 4 */
-#define cor5 0x1f /* Channel Option Register 5 */
-
-#define ccsr 0x0b /* Channel Control Status Register */
-#define rdcr 0x0e /* Receive Data Count Register */
-#define tdcr 0x12 /* Transmit Data Count Register */
-#define mcor1 0x15 /* Modem Change Option Register 1 */
-#define mcor2 0x16 /* Modem Change Option Regsiter 2 */
-
-#define livr 0x18 /* Local Interrupt Vector Register */
-#define schr1 0x1a /* Special Character Register 1 */
-#define schr2 0x1b /* Special Character Register 2 */
-#define schr3 0x1c /* Special Character Register 3 */
-#define schr4 0x1d /* Special Character Register 4 */
-
-#define rtr 0x20 /* Receive Timer Register */
-#define rtpr 0x21 /* Receive Timeout Period Register */
-#define lnc 0x24 /* Lnext character */
-
-#define rivr 0x43 /* Receive Interrupt Vector Register */
-#define tivr 0x42 /* Transmit Interrupt Vector Register */
-#define mivr 0x41 /* Modem Interrupt Vector Register */
-#define gfrcr 0x40 /* Global Firmware Revision code Reg */
-#define ricr 0x44 /* Receive Interrupting Channel Reg */
-#define ticr 0x45 /* Transmit Interrupting Channel Reg */
-#define micr 0x46 /* Modem Interrupting Channel Register */
-
-#define gcr 0x4b /* Global configuration register*/
-#define misr 0x4c /* Modem interrupt status register */
+#define ccr 0x05 /* Channel Command Register */
+#define ier 0x06 /* Interrupt Enable Register */
+#define cor1 0x08 /* Channel Option Register 1 */
+#define cor2 0x09 /* Channel Option Register 2 */
+#define cor3 0x0a /* Channel Option Register 3 */
+#define cor4 0x1e /* Channel Option Register 4 */
+#define cor5 0x1f /* Channel Option Register 5 */
+
+#define ccsr 0x0b /* Channel Control Status Register */
+#define rdcr 0x0e /* Receive Data Count Register */
+#define tdcr 0x12 /* Transmit Data Count Register */
+#define mcor1 0x15 /* Modem Change Option Register 1 */
+#define mcor2 0x16 /* Modem Change Option Regsiter 2 */
+
+#define livr 0x18 /* Local Interrupt Vector Register */
+#define schr1 0x1a /* Special Character Register 1 */
+#define schr2 0x1b /* Special Character Register 2 */
+#define schr3 0x1c /* Special Character Register 3 */
+#define schr4 0x1d /* Special Character Register 4 */
+
+#define rtr 0x20 /* Receive Timer Register */
+#define rtpr 0x21 /* Receive Timeout Period Register */
+#define lnc 0x24 /* Lnext character */
+
+#define rivr 0x43 /* Receive Interrupt Vector Register */
+#define tivr 0x42 /* Transmit Interrupt Vector Register */
+#define mivr 0x41 /* Modem Interrupt Vector Register */
+#define gfrcr 0x40 /* Global Firmware Revision code Reg */
+#define ricr 0x44 /* Receive Interrupting Channel Reg */
+#define ticr 0x45 /* Transmit Interrupting Channel Reg */
+#define micr 0x46 /* Modem Interrupting Channel Register */
+
+#define gcr 0x4b /* Global configuration register */
+#define misr 0x4c /* Modem interrupt status register */
#define rbusr 0x59
#define tbusr 0x5a
#define mbusr 0x5b
-#define eoir 0x60 /* End Of Interrupt Register */
-#define rdsr 0x62 /* Receive Data / Status Register */
-#define tdr 0x63 /* Transmit Data Register */
-#define svrr 0x67 /* Service Request Register */
+#define eoir 0x60 /* End Of Interrupt Register */
+#define rdsr 0x62 /* Receive Data / Status Register */
+#define tdr 0x63 /* Transmit Data Register */
+#define svrr 0x67 /* Service Request Register */
-#define car 0x68 /* Channel Access Register */
-#define mir 0x69 /* Modem Interrupt Register */
-#define tir 0x6a /* Transmit Interrupt Register */
-#define rir 0x6b /* Receive Interrupt Register */
-#define msvr1 0x6c /* Modem Signal Value Register 1 */
-#define msvr2 0x6d /* Modem Signal Value Register 2*/
-#define psvr 0x6f /* Printer Signal Value Register*/
+#define car 0x68 /* Channel Access Register */
+#define mir 0x69 /* Modem Interrupt Register */
+#define tir 0x6a /* Transmit Interrupt Register */
+#define rir 0x6b /* Receive Interrupt Register */
+#define msvr1 0x6c /* Modem Signal Value Register 1 */
+#define msvr2 0x6d /* Modem Signal Value Register 2 */
+#define psvr 0x6f /* Printer Signal Value Register */
-#define tbpr 0x72 /* Transmit Baud Rate Period Register */
-#define tcor 0x76 /* Transmit Clock Option Register */
+#define tbpr 0x72 /* Transmit Baud Rate Period Register */
+#define tcor 0x76 /* Transmit Clock Option Register */
-#define rbpr 0x78 /* Receive Baud Rate Period Register */
-#define rber 0x7a /* Receive Baud Rate Extension Register */
-#define rcor 0x7c /* Receive Clock Option Register*/
-#define ppr 0x7e /* Prescalar Period Register */
+#define rbpr 0x78 /* Receive Baud Rate Period Register */
+#define rber 0x7a /* Receive Baud Rate Extension Register */
+#define rcor 0x7c /* Receive Clock Option Register */
+#define ppr 0x7e /* Prescalar Period Register */
/* Misc registers used for forcing the 1400 out of its reset woes */
#define airl 0x6d
@@ -192,10 +192,10 @@
/* RDSR - when status read from FIFO */
#define RDSR_BREAK 0x08 /* Break received */
-#define RDSR_TIMEOUT 0x80 /* No new data timeout */
-#define RDSR_SC1 0x10 /* Special char 1 (tx XON) matched */
-#define RDSR_SC2 0x20 /* Special char 2 (tx XOFF) matched */
-#define RDSR_SC12_MASK 0x30 /* Mask for special chars 1 and 2 */
+#define RDSR_TIMEOUT 0x80 /* No new data timeout */
+#define RDSR_SC1 0x10 /* Special char 1 (tx XON) matched */
+#define RDSR_SC2 0x20 /* Special char 2 (tx XOFF) matched */
+#define RDSR_SC12_MASK 0x30 /* Mask for special chars 1 and 2 */
/* PPR */
#define PPR_DEFAULT 0x31 /* Default value - for a 25Mhz clock gives
@@ -244,7 +244,7 @@
#define IER_TIMEOUT 0x01 /* Timeout on no data */
#define IER_DEFAULT 0x94 /* Default values */
-#define IER_PARALLEL 0x84 /* Default for Parallel */
+#define IER_PARALLEL 0x84 /* Default for Parallel */
#define IER_EMPTY 0x92 /* Transmitter empty rather than ready */
/* COR1 - Driver only */
@@ -264,11 +264,11 @@
#define COR1_7BITS 0x02 /* 7 data bits */
#define COR1_8BITS 0x03 /* 8 data bits */
-#define COR1_HOST 0xef /* Safe host bits */
+#define COR1_HOST 0xef /* Safe host bits */
/* RTA only */
-#define COR1_CINPCK 0x00 /* Check parity of received characters */
-#define COR1_CNINPCK 0x10 /* Don't check parity */
+#define COR1_CINPCK 0x00 /* Check parity of received characters */
+#define COR1_CNINPCK 0x10 /* Don't check parity */
/* COR2 bits for both RTA and driver use */
#define COR2_IXANY 0x80 /* IXANY - any character is XON */
@@ -293,9 +293,9 @@
#define COR3_FCT 0x20 /* Flow control transparency */
#define COR3_SCD12 0x10 /* Special character detect for SCHR's 1 + 2 */
#define COR3_FIFO12 0x0c /* 12 chars for receive FIFO threshold */
-#define COR3_FIFO10 0x0a /* 10 chars for receive FIFO threshold */
-#define COR3_FIFO8 0x08 /* 8 chars for receive FIFO threshold */
-#define COR3_FIFO6 0x06 /* 6 chars for receive FIFO threshold */
+#define COR3_FIFO10 0x0a /* 10 chars for receive FIFO threshold */
+#define COR3_FIFO8 0x08 /* 8 chars for receive FIFO threshold */
+#define COR3_FIFO6 0x06 /* 6 chars for receive FIFO threshold */
#define COR3_THRESHOLD COR3_FIFO8 /* MUST BE LESS THAN MCOR_THRESHOLD */
@@ -386,7 +386,7 @@
#define MCOR_THRESHBITS 0x0F /* mask for ANDing out the above */
-#define MCOR_THRESHOLD MCOR_THRESH9 /* MUST BE GREATER THAN COR3_THRESHOLD */
+#define MCOR_THRESHOLD MCOR_THRESH9 /* MUST BE GREATER THAN COR3_THRESHOLD */
/* RTPR */
@@ -429,25 +429,25 @@
#define CONFIG 0x01 /* Configure a port */
#define MOPEN 0x02 /* Modem open (block for DCD) */
#define CLOSE 0x03 /* Close a port */
-#define WFLUSH (0x04 | PRE_EMPTIVE) /* Write flush */
-#define RFLUSH (0x05 | PRE_EMPTIVE) /* Read flush */
-#define RESUME (0x06 | PRE_EMPTIVE) /* Resume if xoffed */
-#define SBREAK 0x07 /* Start break */
+#define WFLUSH (0x04 | PRE_EMPTIVE) /* Write flush */
+#define RFLUSH (0x05 | PRE_EMPTIVE) /* Read flush */
+#define RESUME (0x06 | PRE_EMPTIVE) /* Resume if xoffed */
+#define SBREAK 0x07 /* Start break */
#define EBREAK 0x08 /* End break */
-#define SUSPEND (0x09 | PRE_EMPTIVE) /* Susp op (behave as tho xoffed) */
-#define FCLOSE (0x0a | PRE_EMPTIVE) /* Force close */
-#define XPRINT 0x0b /* Xprint packet */
-#define MBIS (0x0c | PRE_EMPTIVE) /* Set modem lines */
-#define MBIC (0x0d | PRE_EMPTIVE) /* Clear modem lines */
-#define MSET (0x0e | PRE_EMPTIVE) /* Set modem lines */
+#define SUSPEND (0x09 | PRE_EMPTIVE) /* Susp op (behave as tho xoffed) */
+#define FCLOSE (0x0a | PRE_EMPTIVE) /* Force close */
+#define XPRINT 0x0b /* Xprint packet */
+#define MBIS (0x0c | PRE_EMPTIVE) /* Set modem lines */
+#define MBIC (0x0d | PRE_EMPTIVE) /* Clear modem lines */
+#define MSET (0x0e | PRE_EMPTIVE) /* Set modem lines */
#define PCLOSE 0x0f /* Pseudo close - Leaves rx/tx enabled */
-#define MGET (0x10 | PRE_EMPTIVE) /* Force update of modem status */
-#define MEMDUMP (0x11 | PRE_EMPTIVE) /* Send back mem from addr supplied */
-#define READ_REGISTER (0x12 | PRE_EMPTIVE) /* Read CD1400 register (debug) */
+#define MGET (0x10 | PRE_EMPTIVE) /* Force update of modem status */
+#define MEMDUMP (0x11 | PRE_EMPTIVE) /* Send back mem from addr supplied */
+#define READ_REGISTER (0x12 | PRE_EMPTIVE) /* Read CD1400 register (debug) */
/* "Command" packets going from remote to host COMPLETE and MODEM_STATUS
use data[4] / data[3] to indicate current state and modem status respectively
-*/
+*/
#define COMPLETE (0x20 | PRE_EMPTIVE)
/* Command complete */
diff --git a/drivers/char/rio/cmd.h b/drivers/char/rio/cmd.h
index c369edaea2b..797b62400c9 100644
--- a/drivers/char/rio/cmd.h
+++ b/drivers/char/rio/cmd.h
@@ -42,7 +42,7 @@
#ifndef lint
#ifdef SCCS
-static char *_rio_cmd_h_sccs = "@(#)cmd.h 1.1" ;
+static char *_rio_cmd_h_sccs = "@(#)cmd.h 1.1";
#endif
#endif
@@ -52,7 +52,7 @@ static char *_rio_cmd_h_sccs = "@(#)cmd.h 1.1" ;
#define CMD_IGNORE_PKT ( (ushort) 0)
#define CMD_STATUS_REQ ( (ushort) 1)
-#define CMD_UNIT_STATUS_REQ ( (ushort) 2) /* Is this needed ??? */
+#define CMD_UNIT_STATUS_REQ ( (ushort) 2) /* Is this needed ??? */
#define CMD_CONF_PORT ( (ushort) 3)
#define CMD_CONF_UNIT ( (ushort) 4)
#define CMD_ROUTE_MAP_REQ ( (ushort) 5)
@@ -81,4 +81,3 @@ static char *_rio_cmd_h_sccs = "@(#)cmd.h 1.1" ;
#endif
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/cmdblk.h b/drivers/char/rio/cmdblk.h
index 2b8efbdbee1..a9a8c45631d 100644
--- a/drivers/char/rio/cmdblk.h
+++ b/drivers/char/rio/cmdblk.h
@@ -44,16 +44,15 @@ static char *_cmdblk_h_sccs_ = "@(#)cmdblk.h 1.2";
** a rup.
*/
-struct CmdBlk
-{
- struct CmdBlk *NextP; /* Pointer to next command block */
- struct PKT Packet; /* A packet, to copy to the rup */
- /* The func to call to check if OK */
- int (*PreFuncP)(int, struct CmdBlk *);
- int PreArg; /* The arg for the func */
- /* The func to call when completed */
- int (*PostFuncP)(int, struct CmdBlk *);
- int PostArg; /* The arg for the func */
+struct CmdBlk {
+ struct CmdBlk *NextP; /* Pointer to next command block */
+ struct PKT Packet; /* A packet, to copy to the rup */
+ /* The func to call to check if OK */
+ int (*PreFuncP) (int, struct CmdBlk *);
+ int PreArg; /* The arg for the func */
+ /* The func to call when completed */
+ int (*PostFuncP) (int, struct CmdBlk *);
+ int PostArg; /* The arg for the func */
};
#define NUM_RIO_CMD_BLKS (3 * (MAX_RUP * 4 + LINKS_PER_UNIT * 4))
diff --git a/drivers/char/rio/cmdpkt.h b/drivers/char/rio/cmdpkt.h
index 46befd354f2..77cee8df68e 100644
--- a/drivers/char/rio/cmdpkt.h
+++ b/drivers/char/rio/cmdpkt.h
@@ -54,135 +54,112 @@ static char *_cmdpkt_h_sccs_ = "@(#)cmdpkt.h 1.2";
** This structure overlays a PktCmd->CmdData structure, and so starts
** at Data[2] in the actual pkt!
*/
-struct BootSequence
-{
- WORD NumPackets;
- WORD LoadBase;
- WORD CodeSize;
+struct BootSequence {
+ WORD NumPackets;
+ WORD LoadBase;
+ WORD CodeSize;
};
#define BOOT_SEQUENCE_LEN 8
-struct SamTop
-{
- BYTE Unit;
- BYTE Link;
+struct SamTop {
+ BYTE Unit;
+ BYTE Link;
};
-struct CmdHdr
-{
- BYTE PcCommand;
- union
- {
- BYTE PcPhbNum;
- BYTE PcLinkNum;
- BYTE PcIDNum;
- } U0;
+struct CmdHdr {
+ BYTE PcCommand;
+ union {
+ BYTE PcPhbNum;
+ BYTE PcLinkNum;
+ BYTE PcIDNum;
+ } U0;
};
-struct PktCmd
-{
- union
- {
- struct
- {
- struct CmdHdr CmdHdr;
- struct BootSequence PcBootSequence;
- } S1;
- struct
- {
- WORD PcSequence;
- BYTE PcBootData[RTA_BOOT_DATA_SIZE];
- } S2;
- struct
- {
- WORD __crud__;
- BYTE PcUniqNum[4]; /* this is really a uint. */
- BYTE PcModuleTypes; /* what modules are fitted */
- } S3;
- struct
- {
- struct CmdHdr CmdHdr;
- BYTE __undefined__;
- BYTE PcModemStatus;
- BYTE PcPortStatus;
- BYTE PcSubCommand; /* commands like mem or register dump */
- WORD PcSubAddr; /* Address for command */
- BYTE PcSubData[64]; /* Date area for command */
- } S4;
- struct
- {
- struct CmdHdr CmdHdr;
- BYTE PcCommandText[1];
- BYTE __crud__[20];
- BYTE PcIDNum2; /* It had to go somewhere! */
- } S5;
- struct
- {
- struct CmdHdr CmdHdr;
- struct SamTop Topology[LINKS_PER_UNIT];
- } S6;
- } U1;
+struct PktCmd {
+ union {
+ struct {
+ struct CmdHdr CmdHdr;
+ struct BootSequence PcBootSequence;
+ } S1;
+ struct {
+ WORD PcSequence;
+ BYTE PcBootData[RTA_BOOT_DATA_SIZE];
+ } S2;
+ struct {
+ WORD __crud__;
+ BYTE PcUniqNum[4]; /* this is really a uint. */
+ BYTE PcModuleTypes; /* what modules are fitted */
+ } S3;
+ struct {
+ struct CmdHdr CmdHdr;
+ BYTE __undefined__;
+ BYTE PcModemStatus;
+ BYTE PcPortStatus;
+ BYTE PcSubCommand; /* commands like mem or register dump */
+ WORD PcSubAddr; /* Address for command */
+ BYTE PcSubData[64]; /* Date area for command */
+ } S4;
+ struct {
+ struct CmdHdr CmdHdr;
+ BYTE PcCommandText[1];
+ BYTE __crud__[20];
+ BYTE PcIDNum2; /* It had to go somewhere! */
+ } S5;
+ struct {
+ struct CmdHdr CmdHdr;
+ struct SamTop Topology[LINKS_PER_UNIT];
+ } S6;
+ } U1;
};
-struct PktCmd_M
-{
- union
- {
- struct
- {
- struct
- {
- uchar PcCommand;
- union
- {
- uchar PcPhbNum;
- uchar PcLinkNum;
- uchar PcIDNum;
- } U0;
- } CmdHdr;
- struct
- {
- ushort NumPackets;
- ushort LoadBase;
- ushort CodeSize;
- } PcBootSequence;
- } S1;
- struct
- {
- ushort PcSequence;
- uchar PcBootData[RTA_BOOT_DATA_SIZE];
- } S2;
- struct
- {
- ushort __crud__;
- uchar PcUniqNum[4]; /* this is really a uint. */
- uchar PcModuleTypes; /* what modules are fitted */
- } S3;
- struct
- {
- ushort __cmd_hdr__;
- uchar __undefined__;
- uchar PcModemStatus;
- uchar PcPortStatus;
- uchar PcSubCommand;
- ushort PcSubAddr;
- uchar PcSubData[64];
- } S4;
- struct
- {
- ushort __cmd_hdr__;
- uchar PcCommandText[1];
- uchar __crud__[20];
- uchar PcIDNum2; /* Tacked on end */
- } S5;
- struct
- {
- ushort __cmd_hdr__;
- struct Top Topology[LINKS_PER_UNIT];
- } S6;
- } U1;
+struct PktCmd_M {
+ union {
+ struct {
+ struct {
+ uchar PcCommand;
+ union {
+ uchar PcPhbNum;
+ uchar PcLinkNum;
+ uchar PcIDNum;
+ } U0;
+ } CmdHdr;
+ struct {
+ ushort NumPackets;
+ ushort LoadBase;
+ ushort CodeSize;
+ } PcBootSequence;
+ } S1;
+ struct {
+ ushort PcSequence;
+ uchar PcBootData[RTA_BOOT_DATA_SIZE];
+ } S2;
+ struct {
+ ushort __crud__;
+ uchar PcUniqNum[4]; /* this is really a uint. */
+ uchar PcModuleTypes; /* what modules are fitted */
+ } S3;
+ struct {
+ ushort __cmd_hdr__;
+ uchar __undefined__;
+ uchar PcModemStatus;
+ uchar PcPortStatus;
+ uchar PcSubCommand;
+ ushort PcSubAddr;
+ uchar PcSubData[64];
+ } S4;
+ struct {
+ ushort __cmd_hdr__;
+ uchar PcCommandText[1];
+ uchar __crud__[20];
+ uchar PcIDNum2; /* Tacked on end */
+ } S5;
+ struct {
+ ushort __cmd_hdr__;
+ struct Top Topology[LINKS_PER_UNIT];
+ } S6;
+ } U1;
};
#define Command U1.S1.CmdHdr.PcCommand
diff --git a/drivers/char/rio/control.h b/drivers/char/rio/control.h
index 1712f6261dd..6853d03304a 100644
--- a/drivers/char/rio/control.h
+++ b/drivers/char/rio/control.h
@@ -51,12 +51,11 @@
#define UFOAD ( CONTROL + 4 )
#define IWAIT ( CONTROL + 5 )
-#define IFOAD_MAGIC 0xF0AD /* of course */
+#define IFOAD_MAGIC 0xF0AD /* of course */
#define ZOMBIE_MAGIC (~0xDEAD) /* not dead -> zombie */
-#define UFOAD_MAGIC 0xD1E /* kill-your-neighbour */
-#define IWAIT_MAGIC 0xB1DE /* Bide your time */
+#define UFOAD_MAGIC 0xD1E /* kill-your-neighbour */
+#define IWAIT_MAGIC 0xB1DE /* Bide your time */
#endif
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/daemon.h b/drivers/char/rio/daemon.h
index 62dba0e68b3..28a991bd4fe 100644
--- a/drivers/char/rio/daemon.h
+++ b/drivers/char/rio/daemon.h
@@ -44,18 +44,16 @@ static char *_daemon_h_sccs_ = "@(#)daemon.h 1.3";
** structures used on /dev/rio
*/
-struct Error
-{
- uint Error;
- uint Entry;
- uint Other;
+struct Error {
+ uint Error;
+ uint Entry;
+ uint Other;
};
-struct DownLoad
-{
- char *DataP;
- uint Count;
- uint ProductCode;
+struct DownLoad {
+ char *DataP;
+ uint Count;
+ uint ProductCode;
};
/*
@@ -66,46 +64,41 @@ struct DownLoad
#endif
#ifndef MAX_XP_CTRL_LEN
-#define MAX_XP_CTRL_LEN 16 /* ALSO IN PORT.H */
+#define MAX_XP_CTRL_LEN 16 /* ALSO IN PORT.H */
#endif
-struct PortSetup
-{
- uint From; /* Set/Clear XP & IXANY Control from this port.... */
- uint To; /* .... to this port */
- uint XpCps; /* at this speed */
- char XpOn[MAX_XP_CTRL_LEN]; /* this is the start string */
- char XpOff[MAX_XP_CTRL_LEN]; /* this is the stop string */
- uchar IxAny; /* enable/disable IXANY */
- uchar IxOn; /* enable/disable IXON */
- uchar Lock; /* lock port params */
- uchar Store; /* store params across closes */
- uchar Drain; /* close only when drained */
+struct PortSetup {
+ uint From; /* Set/Clear XP & IXANY Control from this port.... */
+ uint To; /* .... to this port */
+ uint XpCps; /* at this speed */
+ char XpOn[MAX_XP_CTRL_LEN]; /* this is the start string */
+ char XpOff[MAX_XP_CTRL_LEN]; /* this is the stop string */
+ uchar IxAny; /* enable/disable IXANY */
+ uchar IxOn; /* enable/disable IXON */
+ uchar Lock; /* lock port params */
+ uchar Store; /* store params across closes */
+ uchar Drain; /* close only when drained */
};
-struct LpbReq
-{
- uint Host;
- uint Link;
- struct LPB *LpbP;
+struct LpbReq {
+ uint Host;
+ uint Link;
+ struct LPB *LpbP;
};
-struct RupReq
-{
- uint HostNum;
- uint RupNum;
- struct RUP *RupP;
+struct RupReq {
+ uint HostNum;
+ uint RupNum;
+ struct RUP *RupP;
};
-struct PortReq
-{
- uint SysPort;
- struct Port *PortP;
+struct PortReq {
+ uint SysPort;
+ struct Port *PortP;
};
-struct StreamInfo
-{
- uint SysPort;
+struct StreamInfo {
+ uint SysPort;
#if 0
queue_t RQueue;
queue_t WQueue;
@@ -115,68 +108,59 @@ struct StreamInfo
#endif
};
-struct HostReq
-{
- uint HostNum;
- struct Host *HostP;
+struct HostReq {
+ uint HostNum;
+ struct Host *HostP;
};
-struct HostDpRam
-{
- uint HostNum;
- struct DpRam *DpRamP;
+struct HostDpRam {
+ uint HostNum;
+ struct DpRam *DpRamP;
};
-struct DebugCtrl
-{
- uint SysPort;
- uint Debug;
- uint Wait;
+struct DebugCtrl {
+ uint SysPort;
+ uint Debug;
+ uint Wait;
};
-struct MapInfo
-{
- uint FirstPort; /* 8 ports, starting from this (tty) number */
- uint RtaUnique; /* reside on this RTA (unique number) */
+struct MapInfo {
+ uint FirstPort; /* 8 ports, starting from this (tty) number */
+ uint RtaUnique; /* reside on this RTA (unique number) */
};
-struct MapIn
-{
- uint NumEntries; /* How many port sets are we mapping? */
- struct MapInfo *MapInfoP; /* Pointer to (user space) info */
+struct MapIn {
+ uint NumEntries; /* How many port sets are we mapping? */
+ struct MapInfo *MapInfoP; /* Pointer to (user space) info */
};
-struct SendPack
-{
- unsigned int PortNum;
- unsigned char Len;
- unsigned char Data[PKT_MAX_DATA_LEN];
+struct SendPack {
+ unsigned int PortNum;
+ unsigned char Len;
+ unsigned char Data[PKT_MAX_DATA_LEN];
};
-struct SpecialRupCmd
-{
- struct PKT Packet;
- unsigned short Host;
- unsigned short RupNum;
+struct SpecialRupCmd {
+ struct PKT Packet;
+ unsigned short Host;
+ unsigned short RupNum;
};
-struct IdentifyRta
-{
- ulong RtaUnique;
- uchar ID;
+struct IdentifyRta {
+ ulong RtaUnique;
+ uchar ID;
};
-struct KillNeighbour
-{
- ulong UniqueNum;
- uchar Link;
+struct KillNeighbour {
+ ulong UniqueNum;
+ uchar Link;
};
struct rioVersion {
- char version[MAX_VERSION_LEN];
- char relid[MAX_VERSION_LEN];
- int buildLevel;
- char buildDate[MAX_VERSION_LEN];
+ char version[MAX_VERSION_LEN];
+ char relid[MAX_VERSION_LEN];
+ int buildLevel;
+ char buildDate[MAX_VERSION_LEN];
};
@@ -316,16 +300,16 @@ struct rioVersion {
#define RIO_SET_XP_CPS rIOCW(155,int)
#define RIO_GET_IXANY rIOCR(156,int) /* ixany allowed? */
#define RIO_SET_IXANY rIOCW(157,int)
-#define RIO_SET_IXANY_ON rIOCN(158) /* allow ixany */
-#define RIO_SET_IXANY_OFF rIOCN(159) /* disallow ixany */
+#define RIO_SET_IXANY_ON rIOCN(158) /* allow ixany */
+#define RIO_SET_IXANY_OFF rIOCN(159) /* disallow ixany */
#define RIO_GET_MODEM rIOCR(160,int) /* port is modem/direct line? */
#define RIO_SET_MODEM rIOCW(161,int)
-#define RIO_SET_MODEM_ON rIOCN(162) /* port is a modem */
-#define RIO_SET_MODEM_OFF rIOCN(163) /* port is direct */
+#define RIO_SET_MODEM_ON rIOCN(162) /* port is a modem */
+#define RIO_SET_MODEM_OFF rIOCN(163) /* port is direct */
#define RIO_GET_IXON rIOCR(164,int) /* ixon allowed? */
#define RIO_SET_IXON rIOCW(165,int)
-#define RIO_SET_IXON_ON rIOCN(166) /* allow ixon */
-#define RIO_SET_IXON_OFF rIOCN(167) /* disallow ixon */
+#define RIO_SET_IXON_ON rIOCN(166) /* allow ixon */
+#define RIO_SET_IXON_OFF rIOCN(167) /* disallow ixon */
#define RIO_GET_SIVIEW ((('s')<<8) | 106) /* backwards compatible with SI */
diff --git a/drivers/char/rio/debug.h b/drivers/char/rio/debug.h
index b6e0d093555..6ae95c00db4 100644
--- a/drivers/char/rio/debug.h
+++ b/drivers/char/rio/debug.h
@@ -33,7 +33,7 @@
#define DBPACKET(pkt, opt, str, chn) debug_packet((pkt), (opt), (str), (chn))
#else
#define DBPACKET(pkt, opt, str, c)
-#endif /* DCIRRUS */
+#endif /* DCIRRUS */
-#endif /* _debug_h_ */
+#endif /* _debug_h_ */
diff --git a/drivers/char/rio/defaults.h b/drivers/char/rio/defaults.h
index 2e7309e2762..5b600c32ac0 100644
--- a/drivers/char/rio/defaults.h
+++ b/drivers/char/rio/defaults.h
@@ -37,13 +37,13 @@
#ifndef lint
#ifdef SCCS
-static char *_rio_defaults_h_sccs = "@(#)defaults.h 1.1" ;
+static char *_rio_defaults_h_sccs = "@(#)defaults.h 1.1";
#endif
#endif
-#define MILLISECOND (int) (1000/64) /* 15.625 low ticks */
-#define SECOND (int) 15625 /* Low priority ticks */
+#define MILLISECOND (int) (1000/64) /* 15.625 low ticks */
+#define SECOND (int) 15625 /* Low priority ticks */
#ifdef RTA
#define RX_LIMIT (ushort) 3
@@ -56,4 +56,3 @@ static char *_rio_defaults_h_sccs = "@(#)defaults.h 1.1" ;
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/eisa.h b/drivers/char/rio/eisa.h
index 59371b0528b..c2abaf0eab0 100644
--- a/drivers/char/rio/eisa.h
+++ b/drivers/char/rio/eisa.h
@@ -60,7 +60,7 @@ static char *_eisa_h_sccs_ = "@(#)eisa.h 1.2";
#define EISA_PRODUCT_IDENT_LO 0xC80 /* where RIO_EISA_IDENT is */
#define EISA_PRODUCT_IDENT_HI 0xC81
-#define EISA_PRODUCT_NUMBER 0xC82 /* where PROD_CODE is */
+#define EISA_PRODUCT_NUMBER 0xC82 /* where PROD_CODE is */
#define EISA_REVISION_NUMBER 0xC83 /* revision (1dp) */
#define EISA_ENABLE 0xC84 /* set LSB to enable card */
#define EISA_UNIQUE_NUM_0 0xC88 /* vomit */
@@ -101,4 +101,4 @@ static char *_eisa_h_sccs_ = "@(#)eisa.h 1.2";
#define INBZ(z,x) inb(((z)<<12) | (x))
#define OUTBZ(z,x,y) outb((((z)<<12) | (x)), y)
-#endif /* __rio_eisa_h__ */
+#endif /* __rio_eisa_h__ */
diff --git a/drivers/char/rio/enable.h b/drivers/char/rio/enable.h
index 8e9a419e15b..e06673fa48c 100644
--- a/drivers/char/rio/enable.h
+++ b/drivers/char/rio/enable.h
@@ -36,7 +36,7 @@
#ifndef lint
#ifdef SCCS
-static char *_rio_enable_h_sccs = "@(#)enable.h 1.1" ;
+static char *_rio_enable_h_sccs = "@(#)enable.h 1.1";
#endif
#endif
@@ -46,5 +46,3 @@ static char *_rio_enable_h_sccs = "@(#)enable.h 1.1" ;
/*********** end of file ***********/
-
-
diff --git a/drivers/char/rio/error.h b/drivers/char/rio/error.h
index 229438e355f..f20f0789db8 100644
--- a/drivers/char/rio/error.h
+++ b/drivers/char/rio/error.h
@@ -80,6 +80,3 @@
/*********** end of file ***********/
-
-
-
diff --git a/drivers/char/rio/errors.h b/drivers/char/rio/errors.h
index f920b9f3e2b..1d0d8914433 100644
--- a/drivers/char/rio/errors.h
+++ b/drivers/char/rio/errors.h
@@ -101,4 +101,4 @@ static char *_errors_h_sccs_ = "@(#)errors.h 1.2";
#define NOT_ENOUGH_CORE_FOR_PCI_COPY 53
-#endif /* __rio_errors_h__ */
+#endif /* __rio_errors_h__ */
diff --git a/drivers/char/rio/formpkt.h b/drivers/char/rio/formpkt.h
index a8b65ae0de9..3c7c91ace3e 100644
--- a/drivers/char/rio/formpkt.h
+++ b/drivers/char/rio/formpkt.h
@@ -41,114 +41,113 @@
#ifndef lint
#ifdef SCCS
-static char *_rio_formpkt_h_sccs = "@(#)formpkt.h 1.1" ;
+static char *_rio_formpkt_h_sccs = "@(#)formpkt.h 1.1";
#endif
#endif
-typedef struct FORM_BOOT_PKT_1 FORM_BOOT_PKT_1 ;
+typedef struct FORM_BOOT_PKT_1 FORM_BOOT_PKT_1;
struct FORM_BOOT_PKT_1 {
- ushort pkt_number ;
- ushort pkt_total ;
- ushort boot_top ;
- } ;
+ ushort pkt_number;
+ ushort pkt_total;
+ ushort boot_top;
+};
-typedef struct FORM_BOOT_PKT_2 FORM_BOOT_PKT_2 ;
+typedef struct FORM_BOOT_PKT_2 FORM_BOOT_PKT_2;
struct FORM_BOOT_PKT_2 {
- ushort pkt_number ;
- char boot_data[10] ;
- } ;
+ ushort pkt_number;
+ char boot_data[10];
+};
-typedef struct FORM_ATTACH_RTA FORM_ATTACH_RTA ;
-struct FORM_ATTACH_RTA {
- char cmd_code ;
- char booter_serial[4] ;
- char booter_link ;
- char bootee_serial[4] ;
- char bootee_link ;
- } ;
+typedef struct FORM_ATTACH_RTA FORM_ATTACH_RTA;
+struct FORM_ATTACH_RTA {
+ char cmd_code;
+ char booter_serial[4];
+ char booter_link;
+ char bootee_serial[4];
+ char bootee_link;
+};
-typedef struct FORM_BOOT_ID FORM_BOOT_ID ;
-struct FORM_BOOT_ID {
- char cmd_code ;
- char bootee_serial[4] ;
- char bootee_prod_id ;
- char bootee_link ;
- } ;
+typedef struct FORM_BOOT_ID FORM_BOOT_ID;
+struct FORM_BOOT_ID {
+ char cmd_code;
+ char bootee_serial[4];
+ char bootee_prod_id;
+ char bootee_link;
+};
-typedef struct FORM_ROUTE_1 FORM_ROUTE_1 ;
+typedef struct FORM_ROUTE_1 FORM_ROUTE_1;
struct FORM_ROUTE_1 {
- char cmd_code ;
- char pkt_number ;
- char total_in_sequence ;
- char unit_id ;
- char host_unit_id ;
- } ;
-
-typedef struct FORM_ROUTE_2 FORM_ROUTE_2 ;
+ char cmd_code;
+ char pkt_number;
+ char total_in_sequence;
+ char unit_id;
+ char host_unit_id;
+};
+
+typedef struct FORM_ROUTE_2 FORM_ROUTE_2;
struct FORM_ROUTE_2 {
- char cmd_code ;
- char pkt_number ;
- char total_in_sequence ;
- char route_data[9] ;
- } ;
+ char cmd_code;
+ char pkt_number;
+ char total_in_sequence;
+ char route_data[9];
+};
-typedef struct FORM_ROUTE_REQ FORM_ROUTE_REQ ;
+typedef struct FORM_ROUTE_REQ FORM_ROUTE_REQ;
struct FORM_ROUTE_REQ {
- char cmd_code ;
- char pkt_number ;
- char total_in_sequence ;
- char route_data[10] ;
- } ;
+ char cmd_code;
+ char pkt_number;
+ char total_in_sequence;
+ char route_data[10];
+};
-typedef struct FORM_ERROR FORM_ERROR ;
+typedef struct FORM_ERROR FORM_ERROR;
struct FORM_ERROR {
- char cmd_code ;
- char error_code ;
+ char cmd_code;
+ char error_code;
- } ;
+};
-typedef struct FORM_STATUS FORM_STATUS ;
+typedef struct FORM_STATUS FORM_STATUS;
struct FORM_STATUS {
- char cmd_code ;
- char status_code ;
- char last_packet_valid ;
- char tx_buffer ;
- char rx_buffer ;
- char port_status ;
- char phb_status ;
- } ;
+ char cmd_code;
+ char status_code;
+ char last_packet_valid;
+ char tx_buffer;
+ char rx_buffer;
+ char port_status;
+ char phb_status;
+};
-typedef struct FORM_LINK_STATUS FORM_LINK_STATUS ;
+typedef struct FORM_LINK_STATUS FORM_LINK_STATUS;
struct FORM_LINK_STATUS {
- char cmd_code ;
- char status_code ;
- char link_number ;
- ushort rx_errors ;
- ushort tx_errors ;
- ushort csum_errors ;
- ushort disconnects ;
- } ;
+ char cmd_code;
+ char status_code;
+ char link_number;
+ ushort rx_errors;
+ ushort tx_errors;
+ ushort csum_errors;
+ ushort disconnects;
+};
-typedef struct FORM_PARTITION FORM_PARTITION ;
+typedef struct FORM_PARTITION FORM_PARTITION;
struct FORM_PARTITION {
- char cmd_code ;
- char status_code ;
- char port_number ;
- char tx_max ;
- char rx_max ;
- char rx_limit ;
- } ;
+ char cmd_code;
+ char status_code;
+ char port_number;
+ char tx_max;
+ char rx_max;
+ char rx_limit;
+};
#endif
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/func.h b/drivers/char/rio/func.h
index 01987c6dc39..b4778410ec6 100644
--- a/drivers/char/rio/func.h
+++ b/drivers/char/rio/func.h
@@ -47,20 +47,19 @@ int RIOBootCodeHOST(struct rio_info *, register struct DownLoad *);
int RIOBootCodeUNKNOWN(struct rio_info *, struct DownLoad *);
void msec_timeout(struct Host *);
int RIOBootRup(struct rio_info *, uint, struct Host *, struct PKT *);
-int RIOBootOk(struct rio_info *,struct Host *, ulong);
-int RIORtaBound(struct rio_info *, uint);
+int RIOBootOk(struct rio_info *, struct Host *, ulong);
+int RIORtaBound(struct rio_info *, uint);
void FillSlot(int, int, uint, struct Host *);
/* riocmd.c */
int RIOFoadRta(struct Host *, struct Map *);
int RIOZombieRta(struct Host *, struct Map *);
-int RIOCommandRta(struct rio_info *, uint, int (* func)( struct Host *,
- struct Map *));
-int RIOIdentifyRta(struct rio_info *, caddr_t);
+int RIOCommandRta(struct rio_info *, uint, int (*func) (struct Host *, struct Map *));
+int RIOIdentifyRta(struct rio_info *, caddr_t);
int RIOKillNeighbour(struct rio_info *, caddr_t);
int RIOSuspendBootRta(struct Host *, int, int);
int RIOFoadWakeup(struct rio_info *);
-struct CmdBlk * RIOGetCmdBlk(void);
+struct CmdBlk *RIOGetCmdBlk(void);
void RIOFreeCmdBlk(struct CmdBlk *);
int RIOQueueCmdBlk(struct Host *, uint, struct CmdBlk *);
void RIOPollHostCommands(struct rio_info *, struct Host *);
@@ -71,13 +70,13 @@ void ShowPacket(uint, struct PKT *);
/* rioctrl.c */
int copyin(int, caddr_t, int);
-int riocontrol(struct rio_info *, dev_t,int,caddr_t,int);
-int RIOPreemptiveCmd(struct rio_info *,struct Port *,uchar);
+int riocontrol(struct rio_info *, dev_t, int, caddr_t, int);
+int RIOPreemptiveCmd(struct rio_info *, struct Port *, uchar);
/* rioinit.c */
void rioinit(struct rio_info *, struct RioHostInfo *);
void RIOInitHosts(struct rio_info *, struct RioHostInfo *);
-void RIOISAinit(struct rio_info *,int);
+void RIOISAinit(struct rio_info *, int);
int RIODoAT(struct rio_info *, int, int);
caddr_t RIOCheckForATCard(int);
int RIOAssignAT(struct rio_info *, int, caddr_t, int);
@@ -85,7 +84,7 @@ int RIOBoardTest(paddr_t, caddr_t, uchar, int);
void RIOAllocDataStructs(struct rio_info *);
void RIOSetupDataStructs(struct rio_info *);
int RIODefaultName(struct rio_info *, struct Host *, uint);
-struct rioVersion * RIOVersid(void);
+struct rioVersion *RIOVersid(void);
int RIOMapin(paddr_t, int, caddr_t *);
void RIOMapout(paddr_t, long, caddr_t);
void RIOHostReset(uint, volatile struct DpRam *, uint);
@@ -108,7 +107,7 @@ void remove_receive(struct Port *);
/* rioroute.c */
int RIORouteRup(struct rio_info *, uint, struct Host *, struct PKT *);
-void RIOFixPhbs(struct rio_info *, struct Host *, uint);
+void RIOFixPhbs(struct rio_info *, struct Host *, uint);
uint GetUnitType(uint);
int RIOSetChange(struct rio_info *);
int RIOFindFreeID(struct rio_info *, struct Host *, uint *, uint *);
@@ -116,9 +115,9 @@ int RIOFindFreeID(struct rio_info *, struct Host *, uint *, uint *);
/* riotty.c */
-int riotopen(struct tty_struct * tty, struct file * filp);
-int riotclose(void *ptr);
-int riotioctl(struct rio_info *, struct tty_struct *, register int, register caddr_t);
+int riotopen(struct tty_struct *tty, struct file *filp);
+int riotclose(void *ptr);
+int riotioctl(struct rio_info *, struct tty_struct *, register int, register caddr_t);
void ttyseth(struct Port *, struct ttystatics *, struct old_sgttyb *sg);
/* riotable.c */
@@ -127,27 +126,27 @@ int RIOApel(struct rio_info *);
int RIODeleteRta(struct rio_info *, struct Map *);
int RIOAssignRta(struct rio_info *, struct Map *);
int RIOReMapPorts(struct rio_info *, struct Host *, struct Map *);
-int RIOChangeName(struct rio_info *, struct Map*);
+int RIOChangeName(struct rio_info *, struct Map *);
#if 0
/* riodrvr.c */
-struct rio_info * rio_install(struct RioHostInfo *);
+struct rio_info *rio_install(struct RioHostInfo *);
int rio_uninstall(register struct rio_info *);
int rio_open(struct rio_info *, int, struct file *);
int rio_close(struct rio_info *, struct file *);
int rio_read(struct rio_info *, struct file *, char *, int);
-int rio_write(struct rio_info *, struct file * f, char *, int);
+int rio_write(struct rio_info *, struct file *f, char *, int);
int rio_ioctl(struct rio_info *, struct file *, int, char *);
-int rio_select(struct rio_info *, struct file * f, int, struct sel *);
-int rio_intr(char *);
-int rio_isr_thread(char *);
-struct rio_info * rio_info_store( int cmd, struct rio_info * p);
+int rio_select(struct rio_info *, struct file *f, int, struct sel *);
+int rio_intr(char *);
+int rio_isr_thread(char *);
+struct rio_info *rio_info_store(int cmd, struct rio_info *p);
#endif
-extern int rio_pcicopy(char *src, char *dst, int n);
-extern int rio_minor (struct tty_struct *tty);
-extern int rio_ismodem (struct tty_struct *tty);
+extern int rio_pcicopy(char *src, char *dst, int n);
+extern int rio_minor(struct tty_struct *tty);
+extern int rio_ismodem(struct tty_struct *tty);
-extern void rio_start_card_running (struct Host * HostP);
+extern void rio_start_card_running(struct Host *HostP);
-#endif /* __func_h_def */
+#endif /* __func_h_def */
diff --git a/drivers/char/rio/host.h b/drivers/char/rio/host.h
index 4c65963870a..f7dfcedf7d4 100644
--- a/drivers/char/rio/host.h
+++ b/drivers/char/rio/host.h
@@ -49,33 +49,32 @@ static char *_host_h_sccs_ = "@(#)host.h 1.2";
** Host data structure. This is used for the software equiv. of
** the host.
*/
-struct Host
-{
- uchar Type; /* RIO_EISA, RIO_MCA, ... */
- uchar Ivec; /* POLLED or ivec number */
- uchar Mode; /* Control stuff */
- uchar Slot; /* Slot */
- volatile caddr_t Caddr; /* KV address of DPRAM */
- volatile struct DpRam *CardP; /* KV address of DPRAM, with overlay */
- paddr_t PaddrP; /* Phys. address of DPRAM */
- char Name[MAX_NAME_LEN]; /* The name of the host */
- uint UniqueNum; /* host unique number */
- spinlock_t HostLock; /* Lock structure for MPX */
- /*struct pci_devinfo PciDevInfo; *//* PCI Bus/Device/Function stuff */
- /*struct lockb HostLock; *//* Lock structure for MPX */
- uint WorkToBeDone; /* set to true each interrupt */
- uint InIntr; /* Being serviced? */
- uint IntSrvDone;/* host's interrupt has been serviced */
- int (*Copy)( caddr_t, caddr_t, int ); /* copy func */
- struct timer_list timer;
- /*
- ** I M P O R T A N T !
- **
- ** The rest of this data structure is cleared to zero after
- ** a RIO_HOST_FOAD command.
- */
-
- ulong Flags; /* Whats going down */
+struct Host {
+ uchar Type; /* RIO_EISA, RIO_MCA, ... */
+ uchar Ivec; /* POLLED or ivec number */
+ uchar Mode; /* Control stuff */
+ uchar Slot; /* Slot */
+ volatile caddr_t Caddr; /* KV address of DPRAM */
+ volatile struct DpRam *CardP; /* KV address of DPRAM, with overlay */
+ paddr_t PaddrP; /* Phys. address of DPRAM */
+ char Name[MAX_NAME_LEN]; /* The name of the host */
+ uint UniqueNum; /* host unique number */
+ spinlock_t HostLock; /* Lock structure for MPX */
+ /*struct pci_devinfo PciDevInfo; *//* PCI Bus/Device/Function stuff */
+ /*struct lockb HostLock; *//* Lock structure for MPX */
+ uint WorkToBeDone; /* set to true each interrupt */
+ uint InIntr; /* Being serviced? */
+ uint IntSrvDone; /* host's interrupt has been serviced */
+ int (*Copy) (caddr_t, caddr_t, int); /* copy func */
+ struct timer_list timer;
+ /*
+ ** I M P O R T A N T !
+ **
+ ** The rest of this data structure is cleared to zero after
+ ** a RIO_HOST_FOAD command.
+ */
+
+ ulong Flags; /* Whats going down */
#define RC_WAITING 0
#define RC_STARTUP 1
#define RC_RUNNING 2
@@ -93,25 +92,25 @@ struct Host
#define RC_BOOT_OWN 0x10 /* Only boot RTAs bound to this system */
#define RC_BOOT_NONE 0x20 /* Don't boot any RTAs (slave mode) */
- struct Top Topology[LINKS_PER_UNIT]; /* one per link */
- struct Map Mapping[MAX_RUP]; /* Mappings for host */
- struct PHB *PhbP; /* Pointer to the PHB array */
- ushort *PhbNumP; /* Ptr to Number of PHB's */
- struct LPB *LinkStrP ; /* Link Structure Array */
- struct RUP *RupP; /* Sixteen real rups here */
- struct PARM_MAP *ParmMapP; /* points to the parmmap */
- uint ExtraUnits[MAX_EXTRA_UNITS]; /* unknown things */
- uint NumExtraBooted; /* how many of the above */
- /*
- ** Twenty logical rups.
- ** The first sixteen are the real Rup entries (above), the last four
- ** are the link RUPs.
- */
- struct UnixRup UnixRups[MAX_RUP+LINKS_PER_UNIT];
- int timeout_id; /* For calling 100 ms delays */
- int timeout_sem;/* For calling 100 ms delays */
- long locks; /* long req'd for set_bit --RR */
- char ____end_marker____;
+ struct Top Topology[LINKS_PER_UNIT]; /* one per link */
+ struct Map Mapping[MAX_RUP]; /* Mappings for host */
+ struct PHB *PhbP; /* Pointer to the PHB array */
+ ushort *PhbNumP; /* Ptr to Number of PHB's */
+ struct LPB *LinkStrP; /* Link Structure Array */
+ struct RUP *RupP; /* Sixteen real rups here */
+ struct PARM_MAP *ParmMapP; /* points to the parmmap */
+ uint ExtraUnits[MAX_EXTRA_UNITS]; /* unknown things */
+ uint NumExtraBooted; /* how many of the above */
+ /*
+ ** Twenty logical rups.
+ ** The first sixteen are the real Rup entries (above), the last four
+ ** are the link RUPs.
+ */
+ struct UnixRup UnixRups[MAX_RUP + LINKS_PER_UNIT];
+ int timeout_id; /* For calling 100 ms delays */
+ int timeout_sem; /* For calling 100 ms delays */
+ long locks; /* long req'd for set_bit --RR */
+ char ____end_marker____;
};
#define Control CardP->DpControl
#define SetInt CardP->DpSetInt
@@ -129,6 +128,6 @@ struct Host
#define Year CardP->DpYear
#define Week CardP->DpWeek
-#define RIO_DUMBPARM 0x0860 /* what not to expect */
+#define RIO_DUMBPARM 0x0860 /* what not to expect */
#endif
diff --git a/drivers/char/rio/hosthw.h b/drivers/char/rio/hosthw.h
index f6f31ece6e3..6281fe47f4e 100644
--- a/drivers/char/rio/hosthw.h
+++ b/drivers/char/rio/hosthw.h
@@ -37,7 +37,7 @@
#ifndef lint
#ifdef SCCS_LABELS
-static char *_rio_hosthw_h_sccs = "@(#)hosthw.h 1.2" ;
+static char *_rio_hosthw_h_sccs = "@(#)hosthw.h 1.2";
#endif
#endif
@@ -53,5 +53,3 @@ static char *_rio_hosthw_h_sccs = "@(#)hosthw.h 1.2" ;
/*********** end of file ***********/
-
-
diff --git a/drivers/char/rio/link.h b/drivers/char/rio/link.h
index 972250348f4..bfba5b0c033 100644
--- a/drivers/char/rio/link.h
+++ b/drivers/char/rio/link.h
@@ -70,27 +70,27 @@
#define DIE_NOW (ushort) 0x0200
/* Boot request stuff */
-#define BOOT_REQUEST ((ushort) 0) /* Request for a boot */
-#define BOOT_ABORT ((ushort) 1) /* Abort a boot */
-#define BOOT_SEQUENCE ((ushort) 2) /* Packet with the number of packets
- and load address */
-#define BOOT_COMPLETED ((ushort) 3) /* Boot completed */
+#define BOOT_REQUEST ((ushort) 0) /* Request for a boot */
+#define BOOT_ABORT ((ushort) 1) /* Abort a boot */
+#define BOOT_SEQUENCE ((ushort) 2) /* Packet with the number of packets
+ and load address */
+#define BOOT_COMPLETED ((ushort) 3) /* Boot completed */
/* States that a link can be in */
-#define LINK_DISCONNECTED ((ushort) 0) /* Disconnected */
-#define LINK_BOOT1 ((ushort) 1) /* Trying to send 1st stage boot */
-#define LINK_BOOT2 ((ushort) 2) /* Trying to send 2nd stage boot */
-#define LINK_BOOT2WAIT ((ushort) 3) /* Waiting for selftest results */
-#define LINK_BOOT3 ((ushort) 4) /* Trying to send 3rd stage boots */
-#define LINK_SYNC ((ushort) 5) /* Syncing */
+#define LINK_DISCONNECTED ((ushort) 0) /* Disconnected */
+#define LINK_BOOT1 ((ushort) 1) /* Trying to send 1st stage boot */
+#define LINK_BOOT2 ((ushort) 2) /* Trying to send 2nd stage boot */
+#define LINK_BOOT2WAIT ((ushort) 3) /* Waiting for selftest results */
+#define LINK_BOOT3 ((ushort) 4) /* Trying to send 3rd stage boots */
+#define LINK_SYNC ((ushort) 5) /* Syncing */
-#define LINK_INTRO ((ushort) 10) /* Introductory packet */
-#define LINK_SUPPLYID ((ushort) 11) /* Trying to supply an ID */
-#define LINK_TOPOLOGY ((ushort) 12) /* Send a topology update */
-#define LINK_REQUESTID ((ushort) 13) /* Waiting for an ID */
-#define LINK_CONNECTED ((ushort) 14) /* Connected */
+#define LINK_INTRO ((ushort) 10) /* Introductory packet */
+#define LINK_SUPPLYID ((ushort) 11) /* Trying to supply an ID */
+#define LINK_TOPOLOGY ((ushort) 12) /* Send a topology update */
+#define LINK_REQUESTID ((ushort) 13) /* Waiting for an ID */
+#define LINK_CONNECTED ((ushort) 14) /* Connected */
-#define LINK_INTERCONNECT ((ushort) 20) /* Subnets interconnected */
+#define LINK_INTERCONNECT ((ushort) 20) /* Subnets interconnected */
#define LINK_SPARE ((ushort) 40)
@@ -103,12 +103,12 @@
** LED stuff
*/
#if defined(RTA)
-#define LED_OFF ((ushort) 0) /* LED off */
-#define LED_RED ((ushort) 1) /* LED Red */
-#define LED_GREEN ((ushort) 2) /* LED Green */
-#define LED_ORANGE ((ushort) 4) /* LED Orange */
-#define LED_1TO8_OPEN ((ushort) 1) /* Port 1->8 LED on */
-#define LED_9TO16_OPEN ((ushort) 2) /* Port 9->16 LED on */
+#define LED_OFF ((ushort) 0) /* LED off */
+#define LED_RED ((ushort) 1) /* LED Red */
+#define LED_GREEN ((ushort) 2) /* LED Green */
+#define LED_ORANGE ((ushort) 4) /* LED Orange */
+#define LED_1TO8_OPEN ((ushort) 1) /* Port 1->8 LED on */
+#define LED_9TO16_OPEN ((ushort) 2) /* Port 9->16 LED on */
#define LED_SET_COLOUR(colour) (link->led = (colour))
#define LED_OR_COLOUR(colour) (link->led |= (colour))
#define LED_TIMEOUT(time) (link->led_timeout = RioTimePlus(RioTime(),(time)))
@@ -116,72 +116,72 @@
#define LED_SET_COLOUR(colour)
#define LED_OR_COLOUR(colour)
#define LED_TIMEOUT(time)
-#endif /* RTA */
+#endif /* RTA */
struct LPB {
- WORD link_number ; /* Link Number */
- Channel_ptr in_ch ; /* Link In Channel */
- Channel_ptr out_ch ; /* Link Out Channel */
+ WORD link_number; /* Link Number */
+ Channel_ptr in_ch; /* Link In Channel */
+ Channel_ptr out_ch; /* Link Out Channel */
#ifdef RTA
- uchar stat_led ; /* Port open leds */
- uchar led ; /* True, light led! */
+ uchar stat_led; /* Port open leds */
+ uchar led; /* True, light led! */
#endif
- BYTE attached_serial[4]; /* Attached serial number */
- BYTE attached_host_serial[4];
- /* Serial number of Host who
- booted the other end */
- WORD descheduled ; /* Currently Descheduled */
- WORD state; /* Current state */
- WORD send_poll ; /* Send a Poll Packet */
- Process_ptr ltt_p ; /* Process Descriptor */
- Process_ptr lrt_p ; /* Process Descriptor */
- WORD lrt_status ; /* Current lrt status */
- WORD ltt_status ; /* Current ltt status */
- WORD timeout ; /* Timeout value */
- WORD topology; /* Topology bits */
- WORD mon_ltt ;
- WORD mon_lrt ;
- WORD WaitNoBoot ; /* Secs to hold off booting */
- PKT_ptr add_packet_list; /* Add packets to here */
- PKT_ptr remove_packet_list; /* Send packets from here */
+ BYTE attached_serial[4]; /* Attached serial number */
+ BYTE attached_host_serial[4];
+ /* Serial number of Host who
+ booted the other end */
+ WORD descheduled; /* Currently Descheduled */
+ WORD state; /* Current state */
+ WORD send_poll; /* Send a Poll Packet */
+ Process_ptr ltt_p; /* Process Descriptor */
+ Process_ptr lrt_p; /* Process Descriptor */
+ WORD lrt_status; /* Current lrt status */
+ WORD ltt_status; /* Current ltt status */
+ WORD timeout; /* Timeout value */
+ WORD topology; /* Topology bits */
+ WORD mon_ltt;
+ WORD mon_lrt;
+ WORD WaitNoBoot; /* Secs to hold off booting */
+ PKT_ptr add_packet_list; /* Add packets to here */
+ PKT_ptr remove_packet_list; /* Send packets from here */
#ifdef RTA
#ifdef DCIRRUS
-#define QBUFS_PER_REDIRECT (4 / PKTS_PER_BUFFER + 1)
+#define QBUFS_PER_REDIRECT (4 / PKTS_PER_BUFFER + 1)
#else
-#define QBUFS_PER_REDIRECT (8 / PKTS_PER_BUFFER + 1)
+#define QBUFS_PER_REDIRECT (8 / PKTS_PER_BUFFER + 1)
#endif
- PKT_ptr_ptr rd_add ; /* Add a new Packet here */
- Q_BUF_ptr rd_add_qb; /* Pointer to the add Q buf */
- PKT_ptr_ptr rd_add_st_qbb ; /* Pointer to start of the Q's buf */
- PKT_ptr_ptr rd_add_end_qbb ; /* Pointer to the end of the Q's buf */
- PKT_ptr_ptr rd_remove ; /* Remove a Packet here */
- Q_BUF_ptr rd_remove_qb ; /* Pointer to the remove Q buf */
- PKT_ptr_ptr rd_remove_st_qbb ; /* Pointer to the start of the Q buf */
- PKT_ptr_ptr rd_remove_end_qbb ; /* Pointer to the end of the Q buf */
- ushort pkts_in_q ; /* Packets in queue */
+ PKT_ptr_ptr rd_add; /* Add a new Packet here */
+ Q_BUF_ptr rd_add_qb; /* Pointer to the add Q buf */
+ PKT_ptr_ptr rd_add_st_qbb; /* Pointer to start of the Q's buf */
+ PKT_ptr_ptr rd_add_end_qbb; /* Pointer to the end of the Q's buf */
+ PKT_ptr_ptr rd_remove; /* Remove a Packet here */
+ Q_BUF_ptr rd_remove_qb; /* Pointer to the remove Q buf */
+ PKT_ptr_ptr rd_remove_st_qbb; /* Pointer to the start of the Q buf */
+ PKT_ptr_ptr rd_remove_end_qbb; /* Pointer to the end of the Q buf */
+ ushort pkts_in_q; /* Packets in queue */
#endif
- Channel_ptr lrt_fail_chan ; /* Lrt's failure channel */
- Channel_ptr ltt_fail_chan ; /* Ltt's failure channel */
+ Channel_ptr lrt_fail_chan; /* Lrt's failure channel */
+ Channel_ptr ltt_fail_chan; /* Ltt's failure channel */
#if defined (HOST) || defined (INKERNEL)
- /* RUP structure for HOST to driver communications */
- struct RUP rup ;
+ /* RUP structure for HOST to driver communications */
+ struct RUP rup;
#endif
- struct RUP link_rup; /* RUP for the link (POLL,
- topology etc.) */
- WORD attached_link ; /* Number of attached link */
- WORD csum_errors ; /* csum errors */
- WORD num_disconnects ; /* number of disconnects */
- WORD num_sync_rcvd ; /* # sync's received */
- WORD num_sync_rqst ; /* # sync requests */
- WORD num_tx ; /* Num pkts sent */
- WORD num_rx ; /* Num pkts received */
- WORD module_attached; /* Module tpyes of attached */
- WORD led_timeout; /* LED timeout */
- WORD first_port; /* First port to service */
- WORD last_port; /* Last port to service */
- } ;
+ struct RUP link_rup; /* RUP for the link (POLL,
+ topology etc.) */
+ WORD attached_link; /* Number of attached link */
+ WORD csum_errors; /* csum errors */
+ WORD num_disconnects; /* number of disconnects */
+ WORD num_sync_rcvd; /* # sync's received */
+ WORD num_sync_rqst; /* # sync requests */
+ WORD num_tx; /* Num pkts sent */
+ WORD num_rx; /* Num pkts received */
+ WORD module_attached; /* Module tpyes of attached */
+ WORD led_timeout; /* LED timeout */
+ WORD first_port; /* First port to service */
+ WORD last_port; /* Last port to service */
+};
#endif
diff --git a/drivers/char/rio/linux_compat.h b/drivers/char/rio/linux_compat.h
index d53843abe02..17a14c4a342 100644
--- a/drivers/char/rio/linux_compat.h
+++ b/drivers/char/rio/linux_compat.h
@@ -41,7 +41,7 @@
#endif
struct ttystatics {
- struct termios tm;
+ struct termios tm;
};
#define bzero(d, n) memset((d), 0, (n))
@@ -97,26 +97,24 @@ the older driver. The older driver includes "brates.h" which shadows
the definitions from Linux, and is incompatible... */
/* RxBaud and TxBaud definitions... */
-#define RIO_B0 0x00 /* RTS / DTR signals dropped */
-#define RIO_B50 0x01 /* 50 baud */
-#define RIO_B75 0x02 /* 75 baud */
-#define RIO_B110 0x03 /* 110 baud */
-#define RIO_B134 0x04 /* 134.5 baud */
-#define RIO_B150 0x05 /* 150 baud */
-#define RIO_B200 0x06 /* 200 baud */
-#define RIO_B300 0x07 /* 300 baud */
-#define RIO_B600 0x08 /* 600 baud */
-#define RIO_B1200 0x09 /* 1200 baud */
-#define RIO_B1800 0x0A /* 1800 baud */
-#define RIO_B2400 0x0B /* 2400 baud */
-#define RIO_B4800 0x0C /* 4800 baud */
-#define RIO_B9600 0x0D /* 9600 baud */
-#define RIO_B19200 0x0E /* 19200 baud */
-#define RIO_B38400 0x0F /* 38400 baud */
-#define RIO_B56000 0x10 /* 56000 baud */
-#define RIO_B57600 0x11 /* 57600 baud */
-#define RIO_B64000 0x12 /* 64000 baud */
-#define RIO_B115200 0x13 /* 115200 baud */
-#define RIO_B2000 0x14 /* 2000 baud */
-
-
+#define RIO_B0 0x00 /* RTS / DTR signals dropped */
+#define RIO_B50 0x01 /* 50 baud */
+#define RIO_B75 0x02 /* 75 baud */
+#define RIO_B110 0x03 /* 110 baud */
+#define RIO_B134 0x04 /* 134.5 baud */
+#define RIO_B150 0x05 /* 150 baud */
+#define RIO_B200 0x06 /* 200 baud */
+#define RIO_B300 0x07 /* 300 baud */
+#define RIO_B600 0x08 /* 600 baud */
+#define RIO_B1200 0x09 /* 1200 baud */
+#define RIO_B1800 0x0A /* 1800 baud */
+#define RIO_B2400 0x0B /* 2400 baud */
+#define RIO_B4800 0x0C /* 4800 baud */
+#define RIO_B9600 0x0D /* 9600 baud */
+#define RIO_B19200 0x0E /* 19200 baud */
+#define RIO_B38400 0x0F /* 38400 baud */
+#define RIO_B56000 0x10 /* 56000 baud */
+#define RIO_B57600 0x11 /* 57600 baud */
+#define RIO_B64000 0x12 /* 64000 baud */
+#define RIO_B115200 0x13 /* 115200 baud */
+#define RIO_B2000 0x14 /* 2000 baud */
diff --git a/drivers/char/rio/list.h b/drivers/char/rio/list.h
index a4f7f1f5625..36aad4c9cb3 100644
--- a/drivers/char/rio/list.h
+++ b/drivers/char/rio/list.h
@@ -38,7 +38,7 @@
#ifdef SCCS_LABELS
#ifndef lint
-static char *_rio_list_h_sccs = "@(#)list.h 1.9" ;
+static char *_rio_list_h_sccs = "@(#)list.h 1.9";
#endif
#endif
@@ -166,7 +166,7 @@ static char *_rio_list_h_sccs = "@(#)list.h 1.9" ;
#endif
-#else /* !IN_KERNEL */
+#else /* !IN_KERNEL */
#define ZERO_PTR NULL
@@ -192,5 +192,5 @@ static char *_rio_list_h_sccs = "@(#)list.h 1.9" ;
#define splx(oldspl) if ((oldspl) == 0) spl0()
#endif
-#endif /* ifndef _list.h */
+#endif /* ifndef _list.h */
/*********** end of file ***********/
diff --git a/drivers/char/rio/lrt.h b/drivers/char/rio/lrt.h
index bbac8fa18fe..b41764d7a22 100644
--- a/drivers/char/rio/lrt.h
+++ b/drivers/char/rio/lrt.h
@@ -36,7 +36,7 @@
#ifndef lint
#ifdef SCCS_LABELS
-static char *_rio_lrt_h_sccs = "@(#)lrt.h 1.1" ;
+static char *_rio_lrt_h_sccs = "@(#)lrt.h 1.1";
#endif
#endif
@@ -50,6 +50,3 @@ static char *_rio_lrt_h_sccs = "@(#)lrt.h 1.1" ;
/*********** end of file ***********/
-
-
-
diff --git a/drivers/char/rio/ltt.h b/drivers/char/rio/ltt.h
index f27dcecf03c..ab04004d404 100644
--- a/drivers/char/rio/ltt.h
+++ b/drivers/char/rio/ltt.h
@@ -36,7 +36,7 @@
#ifndef lint
#ifdef SCCS_LABELS
-static char *_rio_ltt_h_sccs = "@(#)ltt.h 1.1" ;
+static char *_rio_ltt_h_sccs = "@(#)ltt.h 1.1";
#endif
#endif
@@ -45,11 +45,8 @@ static char *_rio_ltt_h_sccs = "@(#)ltt.h 1.1" ;
#else
#define LTT_STACK (ushort) 200
#endif
-
-/*********** end of file ***********/
-
-
+/*********** end of file ***********/
diff --git a/drivers/char/rio/lttwake.h b/drivers/char/rio/lttwake.h
index fe17d0ee493..fdf0c1f250a 100644
--- a/drivers/char/rio/lttwake.h
+++ b/drivers/char/rio/lttwake.h
@@ -39,7 +39,7 @@
#ifndef lint
#ifdef SCCS_LABELS
-static char *_rio_lttwake_h_sccs = "@(#)lttwake.h 1.1" ;
+static char *_rio_lttwake_h_sccs = "@(#)lttwake.h 1.1";
#endif
#endif
@@ -48,6 +48,3 @@ static char *_rio_lttwake_h_sccs = "@(#)lttwake.h 1.1" ;
/*********** end of file ***********/
-
-
-
diff --git a/drivers/char/rio/map.h b/drivers/char/rio/map.h
index 400645a1ff2..97fe287aab2 100644
--- a/drivers/char/rio/map.h
+++ b/drivers/char/rio/map.h
@@ -46,21 +46,20 @@ static char *_map_h_sccs_ = "@(#)map.h 1.2";
#define TOTAL_MAP_ENTRIES (MAX_MAP_ENTRY*RIO_SLOTS)
#define MAX_NAME_LEN 32
-struct Map
-{
- uint HostUniqueNum; /* Supporting hosts unique number */
- uint RtaUniqueNum; /* Unique number */
+struct Map {
+ uint HostUniqueNum; /* Supporting hosts unique number */
+ uint RtaUniqueNum; /* Unique number */
/*
- ** The next two IDs must be swapped on big-endian architectures
- ** when using a v2.04 /etc/rio/config with a v3.00 driver (when
- ** upgrading for example).
- */
- ushort ID; /* ID used in the subnet */
- ushort ID2; /* ID of 2nd block of 8 for 16 port */
- ulong Flags; /* Booted, ID Given, Disconnected */
- ulong SysPort; /* First tty mapped to this port */
- struct Top Topology[LINKS_PER_UNIT]; /* ID connected to each link */
- char Name[MAX_NAME_LEN]; /* Cute name by which RTA is known */
+ ** The next two IDs must be swapped on big-endian architectures
+ ** when using a v2.04 /etc/rio/config with a v3.00 driver (when
+ ** upgrading for example).
+ */
+ ushort ID; /* ID used in the subnet */
+ ushort ID2; /* ID of 2nd block of 8 for 16 port */
+ ulong Flags; /* Booted, ID Given, Disconnected */
+ ulong SysPort; /* First tty mapped to this port */
+ struct Top Topology[LINKS_PER_UNIT]; /* ID connected to each link */
+ char Name[MAX_NAME_LEN]; /* Cute name by which RTA is known */
};
/*
diff --git a/drivers/char/rio/mca.h b/drivers/char/rio/mca.h
index 08a327e473a..d01e76be7a1 100644
--- a/drivers/char/rio/mca.h
+++ b/drivers/char/rio/mca.h
@@ -70,4 +70,4 @@ static char *_mca_h_sccs_ = "@(#)mca.h 1.2";
#define RIO_MCA_DEFAULT_MODE SLOW_LINKS
-#endif /* __rio_mca_h__ */
+#endif /* __rio_mca_h__ */
diff --git a/drivers/char/rio/mesg.h b/drivers/char/rio/mesg.h
index 9cf6c0bacea..dd9be586ec6 100644
--- a/drivers/char/rio/mesg.h
+++ b/drivers/char/rio/mesg.h
@@ -38,4 +38,4 @@ static char *_mesg_h_sccs_ = "@(#)mesg.h 1.2";
#endif
-#endif /* __rio_mesg_h__ */
+#endif /* __rio_mesg_h__ */
diff --git a/drivers/char/rio/param.h b/drivers/char/rio/param.h
index 2dc30b9aab3..de7e57180c9 100644
--- a/drivers/char/rio/param.h
+++ b/drivers/char/rio/param.h
@@ -42,20 +42,19 @@ static char *_param_h_sccs_ = "@(#)param.h 1.2";
** the param command block, as used in OPEN and PARAM calls.
*/
-struct phb_param
-{
- BYTE Cmd; /* It is very important that these line up */
- BYTE Cor1; /* with what is expected at the other end. */
- BYTE Cor2; /* to confirm that you've got it right, */
- BYTE Cor4; /* check with cirrus/cirrus.h */
- BYTE Cor5;
- BYTE TxXon; /* Transmit X-On character */
- BYTE TxXoff; /* Transmit X-Off character */
- BYTE RxXon; /* Receive X-On character */
- BYTE RxXoff; /* Receive X-Off character */
- BYTE LNext; /* Literal-next character */
- BYTE TxBaud; /* Transmit baudrate */
- BYTE RxBaud; /* Receive baudrate */
+struct phb_param {
+ BYTE Cmd; /* It is very important that these line up */
+ BYTE Cor1; /* with what is expected at the other end. */
+ BYTE Cor2; /* to confirm that you've got it right, */
+ BYTE Cor4; /* check with cirrus/cirrus.h */
+ BYTE Cor5;
+ BYTE TxXon; /* Transmit X-On character */
+ BYTE TxXoff; /* Transmit X-Off character */
+ BYTE RxXon; /* Receive X-On character */
+ BYTE RxXoff; /* Receive X-Off character */
+ BYTE LNext; /* Literal-next character */
+ BYTE TxBaud; /* Transmit baudrate */
+ BYTE RxBaud; /* Receive baudrate */
};
#endif
diff --git a/drivers/char/rio/parmmap.h b/drivers/char/rio/parmmap.h
index 46f99dfdac8..fe4e0056706 100644
--- a/drivers/char/rio/parmmap.h
+++ b/drivers/char/rio/parmmap.h
@@ -44,53 +44,50 @@
#endif
#endif
-typedef struct PARM_MAP PARM_MAP ;
+typedef struct PARM_MAP PARM_MAP;
-struct PARM_MAP
-{
-PHB_ptr phb_ptr ; /* Pointer to the PHB array */
-WORD_ptr phb_num_ptr ; /* Ptr to Number of PHB's */
-FREE_LIST_ptr free_list; /* Free List pointer */
-FREE_LIST_ptr free_list_end; /* Free List End pointer */
-Q_BUF_ptr_ptr q_free_list_ptr ; /* Ptr to Q_BUF variable */
-BYTE_ptr unit_id_ptr ; /* Unit Id */
-LPB_ptr link_str_ptr ; /* Link Structure Array */
-BYTE_ptr bootloader_1 ; /* 1st Stage Boot Loader */
-BYTE_ptr bootloader_2 ; /* 2nd Stage Boot Loader */
-WORD_ptr port_route_map_ptr ; /* Port Route Map */
-ROUTE_STR_ptr route_ptr ; /* Unit Route Map */
-NUMBER_ptr map_present ; /* Route Map present */
-NUMBER pkt_num ; /* Total number of packets */
-NUMBER q_num ; /* Total number of Q packets */
-WORD buffers_per_port ; /* Number of buffers per port */
-WORD heap_size ; /* Initial size of heap */
-WORD heap_left ; /* Current Heap left */
-WORD error ; /* Error code */
-WORD tx_max; /* Max number of tx pkts per phb */
-WORD rx_max; /* Max number of rx pkts per phb */
-WORD rx_limit; /* For high / low watermarks */
-NUMBER links ; /* Links to use */
-NUMBER timer ; /* Interrupts per second */
-RUP_ptr rups ; /* Pointer to the RUPs */
-WORD max_phb ; /* Mostly for debugging */
-WORD living ; /* Just increments!! */
-WORD init_done ; /* Initialisation over */
-WORD booting_link ;
-WORD idle_count ; /* Idle time counter */
-WORD busy_count ; /* Busy counter */
-WORD idle_control ; /* Control Idle Process */
+struct PARM_MAP {
+ PHB_ptr phb_ptr; /* Pointer to the PHB array */
+ WORD_ptr phb_num_ptr; /* Ptr to Number of PHB's */
+ FREE_LIST_ptr free_list; /* Free List pointer */
+ FREE_LIST_ptr free_list_end; /* Free List End pointer */
+ Q_BUF_ptr_ptr q_free_list_ptr; /* Ptr to Q_BUF variable */
+ BYTE_ptr unit_id_ptr; /* Unit Id */
+ LPB_ptr link_str_ptr; /* Link Structure Array */
+ BYTE_ptr bootloader_1; /* 1st Stage Boot Loader */
+ BYTE_ptr bootloader_2; /* 2nd Stage Boot Loader */
+ WORD_ptr port_route_map_ptr; /* Port Route Map */
+ ROUTE_STR_ptr route_ptr; /* Unit Route Map */
+ NUMBER_ptr map_present; /* Route Map present */
+ NUMBER pkt_num; /* Total number of packets */
+ NUMBER q_num; /* Total number of Q packets */
+ WORD buffers_per_port; /* Number of buffers per port */
+ WORD heap_size; /* Initial size of heap */
+ WORD heap_left; /* Current Heap left */
+ WORD error; /* Error code */
+ WORD tx_max; /* Max number of tx pkts per phb */
+ WORD rx_max; /* Max number of rx pkts per phb */
+ WORD rx_limit; /* For high / low watermarks */
+ NUMBER links; /* Links to use */
+ NUMBER timer; /* Interrupts per second */
+ RUP_ptr rups; /* Pointer to the RUPs */
+ WORD max_phb; /* Mostly for debugging */
+ WORD living; /* Just increments!! */
+ WORD init_done; /* Initialisation over */
+ WORD booting_link;
+ WORD idle_count; /* Idle time counter */
+ WORD busy_count; /* Busy counter */
+ WORD idle_control; /* Control Idle Process */
#if defined(HOST) || defined(INKERNEL)
-WORD tx_intr; /* TX interrupt pending */
-WORD rx_intr; /* RX interrupt pending */
-WORD rup_intr; /* RUP interrupt pending */
+ WORD tx_intr; /* TX interrupt pending */
+ WORD rx_intr; /* RX interrupt pending */
+ WORD rup_intr; /* RUP interrupt pending */
#endif
#if defined(RTA)
-WORD dying_count; /* Count of processes dead */
+ WORD dying_count; /* Count of processes dead */
#endif
-} ;
+};
#endif
/*********** end of file ***********/
-
-
diff --git a/drivers/char/rio/pci.h b/drivers/char/rio/pci.h
index dc635bd2519..1eba9118079 100644
--- a/drivers/char/rio/pci.h
+++ b/drivers/char/rio/pci.h
@@ -73,4 +73,4 @@ static char *_pci_h_sccs_ = "@(#)pci.h 1.2";
#define RIO_PCI_DEFAULT_MODE 0x05
-#endif /* __rio_pci_h__ */
+#endif /* __rio_pci_h__ */
diff --git a/drivers/char/rio/phb.h b/drivers/char/rio/phb.h
index e1483a0e30b..3baebf8513a 100644
--- a/drivers/char/rio/phb.h
+++ b/drivers/char/rio/phb.h
@@ -58,37 +58,37 @@
/*************************************************
* Handshake asserted. Deasserted by the LTT(s)
************************************************/
-#define PHB_HANDSHAKE_SET ((ushort) 0x001) /* Set by LRT */
+#define PHB_HANDSHAKE_SET ((ushort) 0x001) /* Set by LRT */
-#define PHB_HANDSHAKE_RESET ((ushort) 0x002) /* Set by ISR / driver */
+#define PHB_HANDSHAKE_RESET ((ushort) 0x002) /* Set by ISR / driver */
#define PHB_HANDSHAKE_FLAGS (PHB_HANDSHAKE_RESET | PHB_HANDSHAKE_SET)
- /* Reset by ltt */
+ /* Reset by ltt */
/*************************************************
* Maximum number of PHB's
************************************************/
#if defined (HOST) || defined (INKERNEL)
-#define MAX_PHB ((ushort) 128) /* range 0-127 */
+#define MAX_PHB ((ushort) 128) /* range 0-127 */
#else
-#define MAX_PHB ((ushort) 8) /* range 0-7 */
+#define MAX_PHB ((ushort) 8) /* range 0-7 */
#endif
/*************************************************
* Defines for the mode fields
************************************************/
-#define TXPKT_INCOMPLETE 0x0001 /* Previous tx packet not completed */
-#define TXINTR_ENABLED 0x0002 /* Tx interrupt is enabled */
-#define TX_TAB3 0x0004 /* TAB3 mode */
-#define TX_OCRNL 0x0008 /* OCRNL mode */
-#define TX_ONLCR 0x0010 /* ONLCR mode */
-#define TX_SENDSPACES 0x0020 /* Send n spaces command needs
- completing */
-#define TX_SENDNULL 0x0040 /* Escaping NULL needs completing */
-#define TX_SENDLF 0x0080 /* LF -> CR LF needs completing */
-#define TX_PARALLELBUG 0x0100 /* CD1400 LF -> CR LF bug on parallel
- port */
+#define TXPKT_INCOMPLETE 0x0001 /* Previous tx packet not completed */
+#define TXINTR_ENABLED 0x0002 /* Tx interrupt is enabled */
+#define TX_TAB3 0x0004 /* TAB3 mode */
+#define TX_OCRNL 0x0008 /* OCRNL mode */
+#define TX_ONLCR 0x0010 /* ONLCR mode */
+#define TX_SENDSPACES 0x0020 /* Send n spaces command needs
+ completing */
+#define TX_SENDNULL 0x0040 /* Escaping NULL needs completing */
+#define TX_SENDLF 0x0080 /* LF -> CR LF needs completing */
+#define TX_PARALLELBUG 0x0100 /* CD1400 LF -> CR LF bug on parallel
+ port */
#define TX_HANGOVER (TX_SENDSPACES | TX_SENDLF | TX_SENDNULL)
#define TX_DTRFLOW 0x0200 /* DTR tx flow control */
#define TX_DTRFLOWED 0x0400 /* DTR is low - don't allow more data
@@ -96,34 +96,34 @@
#define TX_DATAINFIFO 0x0800 /* There is data in the FIFO */
#define TX_BUSY 0x1000 /* Data in FIFO, shift or holding regs */
-#define RX_SPARE 0x0001 /* SPARE */
-#define RXINTR_ENABLED 0x0002 /* Rx interrupt enabled */
-#define RX_ICRNL 0x0008 /* ICRNL mode */
-#define RX_INLCR 0x0010 /* INLCR mode */
-#define RX_IGNCR 0x0020 /* IGNCR mode */
-#define RX_CTSFLOW 0x0040 /* CTSFLOW enabled */
-#define RX_IXOFF 0x0080 /* IXOFF enabled */
-#define RX_CTSFLOWED 0x0100 /* CTSFLOW and CTS dropped */
-#define RX_IXOFFED 0x0200 /* IXOFF and xoff sent */
-#define RX_BUFFERED 0x0400 /* Try and pass on complete packets */
+#define RX_SPARE 0x0001 /* SPARE */
+#define RXINTR_ENABLED 0x0002 /* Rx interrupt enabled */
+#define RX_ICRNL 0x0008 /* ICRNL mode */
+#define RX_INLCR 0x0010 /* INLCR mode */
+#define RX_IGNCR 0x0020 /* IGNCR mode */
+#define RX_CTSFLOW 0x0040 /* CTSFLOW enabled */
+#define RX_IXOFF 0x0080 /* IXOFF enabled */
+#define RX_CTSFLOWED 0x0100 /* CTSFLOW and CTS dropped */
+#define RX_IXOFFED 0x0200 /* IXOFF and xoff sent */
+#define RX_BUFFERED 0x0400 /* Try and pass on complete packets */
-#define PORT_ISOPEN 0x0001 /* Port open? */
-#define PORT_HUPCL 0x0002 /* Hangup on close? */
-#define PORT_MOPENPEND 0x0004 /* Modem open pending */
-#define PORT_ISPARALLEL 0x0008 /* Parallel port */
-#define PORT_BREAK 0x0010 /* Port on break */
-#define PORT_STATUSPEND 0x0020 /* Status packet pending */
-#define PORT_BREAKPEND 0x0040 /* Break packet pending */
-#define PORT_MODEMPEND 0x0080 /* Modem status packet pending */
-#define PORT_PARALLELBUG 0x0100 /* CD1400 LF -> CR LF bug on parallel
- port */
-#define PORT_FULLMODEM 0x0200 /* Full modem signals */
-#define PORT_RJ45 0x0400 /* RJ45 connector - no RI signal */
-#define PORT_RESTRICTED 0x0600 /* Restricted connector - no RI / DTR */
+#define PORT_ISOPEN 0x0001 /* Port open? */
+#define PORT_HUPCL 0x0002 /* Hangup on close? */
+#define PORT_MOPENPEND 0x0004 /* Modem open pending */
+#define PORT_ISPARALLEL 0x0008 /* Parallel port */
+#define PORT_BREAK 0x0010 /* Port on break */
+#define PORT_STATUSPEND 0x0020 /* Status packet pending */
+#define PORT_BREAKPEND 0x0040 /* Break packet pending */
+#define PORT_MODEMPEND 0x0080 /* Modem status packet pending */
+#define PORT_PARALLELBUG 0x0100 /* CD1400 LF -> CR LF bug on parallel
+ port */
+#define PORT_FULLMODEM 0x0200 /* Full modem signals */
+#define PORT_RJ45 0x0400 /* RJ45 connector - no RI signal */
+#define PORT_RESTRICTED 0x0600 /* Restricted connector - no RI / DTR */
-#define PORT_MODEMBITS 0x0600 /* Mask for modem fields */
+#define PORT_MODEMBITS 0x0600 /* Mask for modem fields */
-#define PORT_WCLOSE 0x0800 /* Waiting for close */
+#define PORT_WCLOSE 0x0800 /* Waiting for close */
#define PORT_HANDSHAKEFIX 0x1000 /* Port has H/W flow control fix */
#define PORT_WASPCLOSED 0x2000 /* Port closed with PCLOSE */
#define DUMPMODE 0x4000 /* Dump RTA mem */
@@ -155,139 +155,128 @@
#define rx_end u4.s1.rx_end_ptr_ptr
#define rx_remove u4.s1.rx_remove_ptr_ptr
#endif
-typedef struct PHB PHB ;
+typedef struct PHB PHB;
struct PHB {
#ifdef RTA
- ushort port;
+ ushort port;
#endif
#ifdef INKERNEL
- WORD source;
+ WORD source;
#else
- union
- {
- ushort source; /* Complete source */
- struct
- {
- unsigned char unit; /* Source unit */
- unsigned char port; /* Source port */
- } s2;
- } u2;
+ union {
+ ushort source; /* Complete source */
+ struct {
+ unsigned char unit; /* Source unit */
+ unsigned char port; /* Source port */
+ } s2;
+ } u2;
#endif
- WORD handshake ;
- WORD status ;
- NUMBER timeout ; /* Maximum of 1.9 seconds */
- WORD link ; /* Send down this link */
+ WORD handshake;
+ WORD status;
+ NUMBER timeout; /* Maximum of 1.9 seconds */
+ WORD link; /* Send down this link */
#ifdef INKERNEL
- WORD destination;
+ WORD destination;
#else
- union
- {
- ushort destination; /* Complete destination */
- struct
- {
- unsigned char unit; /* Destination unit */
- unsigned char port; /* Destination port */
- } s1;
- } u1;
+ union {
+ ushort destination; /* Complete destination */
+ struct {
+ unsigned char unit; /* Destination unit */
+ unsigned char port; /* Destination port */
+ } s1;
+ } u1;
#endif
#ifdef RTA
- ushort tx_pkts_added;
- ushort tx_pkts_removed;
- Q_BUF_ptr tx_q_start ; /* Start of the Q list chain */
- short num_tx_q_bufs ; /* Number of Q buffers in the chain */
- PKT_ptr_ptr tx_add ; /* Add a new Packet here */
- Q_BUF_ptr tx_add_qb; /* Pointer to the add Q buf */
- PKT_ptr_ptr tx_add_st_qbb ; /* Pointer to start of the Q's buf */
- PKT_ptr_ptr tx_add_end_qbb ; /* Pointer to the end of the Q's buf */
- PKT_ptr_ptr tx_remove ; /* Remove a Packet here */
- Q_BUF_ptr tx_remove_qb ; /* Pointer to the remove Q buf */
- PKT_ptr_ptr tx_remove_st_qbb ; /* Pointer to the start of the Q buf */
- PKT_ptr_ptr tx_remove_end_qbb ; /* Pointer to the end of the Q buf */
+ ushort tx_pkts_added;
+ ushort tx_pkts_removed;
+ Q_BUF_ptr tx_q_start; /* Start of the Q list chain */
+ short num_tx_q_bufs; /* Number of Q buffers in the chain */
+ PKT_ptr_ptr tx_add; /* Add a new Packet here */
+ Q_BUF_ptr tx_add_qb; /* Pointer to the add Q buf */
+ PKT_ptr_ptr tx_add_st_qbb; /* Pointer to start of the Q's buf */
+ PKT_ptr_ptr tx_add_end_qbb; /* Pointer to the end of the Q's buf */
+ PKT_ptr_ptr tx_remove; /* Remove a Packet here */
+ Q_BUF_ptr tx_remove_qb; /* Pointer to the remove Q buf */
+ PKT_ptr_ptr tx_remove_st_qbb; /* Pointer to the start of the Q buf */
+ PKT_ptr_ptr tx_remove_end_qbb; /* Pointer to the end of the Q buf */
#endif
#ifdef INKERNEL
- PKT_ptr_ptr tx_start ;
- PKT_ptr_ptr tx_end ;
- PKT_ptr_ptr tx_add ;
- PKT_ptr_ptr tx_remove ;
+ PKT_ptr_ptr tx_start;
+ PKT_ptr_ptr tx_end;
+ PKT_ptr_ptr tx_add;
+ PKT_ptr_ptr tx_remove;
#endif
#ifdef HOST
- union
- {
- struct
- {
- PKT_ptr_ptr tx_start_ptr_ptr;
- PKT_ptr_ptr tx_end_ptr_ptr;
- PKT_ptr_ptr tx_add_ptr_ptr;
- PKT_ptr_ptr tx_remove_ptr_ptr;
- } s1;
- struct
- {
- ushort * tx_start_ptr;
- ushort * tx_end_ptr;
- ushort * tx_add_ptr;
- ushort * tx_remove_ptr;
- } s2;
- } u3;
+ union {
+ struct {
+ PKT_ptr_ptr tx_start_ptr_ptr;
+ PKT_ptr_ptr tx_end_ptr_ptr;
+ PKT_ptr_ptr tx_add_ptr_ptr;
+ PKT_ptr_ptr tx_remove_ptr_ptr;
+ } s1;
+ struct {
+ ushort *tx_start_ptr;
+ ushort *tx_end_ptr;
+ ushort *tx_add_ptr;
+ ushort *tx_remove_ptr;
+ } s2;
+ } u3;
#endif
#ifdef RTA
- ushort rx_pkts_added;
- ushort rx_pkts_removed;
- Q_BUF_ptr rx_q_start ; /* Start of the Q list chain */
- short num_rx_q_bufs ; /* Number of Q buffers in the chain */
- PKT_ptr_ptr rx_add ; /* Add a new Packet here */
- Q_BUF_ptr rx_add_qb ; /* Pointer to the add Q buf */
- PKT_ptr_ptr rx_add_st_qbb ; /* Pointer to start of the Q's buf */
- PKT_ptr_ptr rx_add_end_qbb ; /* Pointer to the end of the Q's buf */
- PKT_ptr_ptr rx_remove ; /* Remove a Packet here */
- Q_BUF_ptr rx_remove_qb ; /* Pointer to the remove Q buf */
- PKT_ptr_ptr rx_remove_st_qbb ; /* Pointer to the start of the Q buf */
- PKT_ptr_ptr rx_remove_end_qbb ; /* Pointer to the end of the Q buf */
+ ushort rx_pkts_added;
+ ushort rx_pkts_removed;
+ Q_BUF_ptr rx_q_start; /* Start of the Q list chain */
+ short num_rx_q_bufs; /* Number of Q buffers in the chain */
+ PKT_ptr_ptr rx_add; /* Add a new Packet here */
+ Q_BUF_ptr rx_add_qb; /* Pointer to the add Q buf */
+ PKT_ptr_ptr rx_add_st_qbb; /* Pointer to start of the Q's buf */
+ PKT_ptr_ptr rx_add_end_qbb; /* Pointer to the end of the Q's buf */
+ PKT_ptr_ptr rx_remove; /* Remove a Packet here */
+ Q_BUF_ptr rx_remove_qb; /* Pointer to the remove Q buf */
+ PKT_ptr_ptr rx_remove_st_qbb; /* Pointer to the start of the Q buf */
+ PKT_ptr_ptr rx_remove_end_qbb; /* Pointer to the end of the Q buf */
#endif
#ifdef INKERNEL
- PKT_ptr_ptr rx_start ;
- PKT_ptr_ptr rx_end ;
- PKT_ptr_ptr rx_add ;
- PKT_ptr_ptr rx_remove ;
+ PKT_ptr_ptr rx_start;
+ PKT_ptr_ptr rx_end;
+ PKT_ptr_ptr rx_add;
+ PKT_ptr_ptr rx_remove;
#endif
#ifdef HOST
- union
- {
- struct
- {
- PKT_ptr_ptr rx_start_ptr_ptr;
- PKT_ptr_ptr rx_end_ptr_ptr;
- PKT_ptr_ptr rx_add_ptr_ptr;
- PKT_ptr_ptr rx_remove_ptr_ptr;
- } s1;
- struct
- {
- ushort * rx_start_ptr;
- ushort * rx_end_ptr;
- ushort * rx_add_ptr;
- ushort * rx_remove_ptr;
- } s2;
- } u4;
+ union {
+ struct {
+ PKT_ptr_ptr rx_start_ptr_ptr;
+ PKT_ptr_ptr rx_end_ptr_ptr;
+ PKT_ptr_ptr rx_add_ptr_ptr;
+ PKT_ptr_ptr rx_remove_ptr_ptr;
+ } s1;
+ struct {
+ ushort *rx_start_ptr;
+ ushort *rx_end_ptr;
+ ushort *rx_add_ptr;
+ ushort *rx_remove_ptr;
+ } s2;
+ } u4;
#endif
-#ifdef RTA /* some fields for the remotes */
- ushort flush_count; /* Count of write flushes */
- ushort txmode; /* Modes for tx */
- ushort rxmode; /* Modes for rx */
- ushort portmode; /* Generic modes */
- ushort column; /* TAB3 column count */
- ushort tx_subscript; /* (TX) Subscript into data field */
- ushort rx_subscript; /* (RX) Subscript into data field */
- PKT_ptr rx_incomplete; /* Hold an incomplete packet here */
- ushort modem_bits; /* Modem bits to mask */
- ushort lastModem; /* Modem control lines. */
- ushort addr; /* Address for sub commands */
- ushort MonitorTstate; /* TRUE if monitoring tstop */
+#ifdef RTA /* some fields for the remotes */
+ ushort flush_count; /* Count of write flushes */
+ ushort txmode; /* Modes for tx */
+ ushort rxmode; /* Modes for rx */
+ ushort portmode; /* Generic modes */
+ ushort column; /* TAB3 column count */
+ ushort tx_subscript; /* (TX) Subscript into data field */
+ ushort rx_subscript; /* (RX) Subscript into data field */
+ PKT_ptr rx_incomplete; /* Hold an incomplete packet here */
+ ushort modem_bits; /* Modem bits to mask */
+ ushort lastModem; /* Modem control lines. */
+ ushort addr; /* Address for sub commands */
+ ushort MonitorTstate; /* TRUE if monitoring tstop */
#endif
- } ;
+};
#endif
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/pkt.h b/drivers/char/rio/pkt.h
index 66bb2ff0f69..882fd429ac2 100644
--- a/drivers/char/rio/pkt.h
+++ b/drivers/char/rio/pkt.h
@@ -69,52 +69,44 @@
#define CONTROL_PKT_TTL_MASK (PKT_TTL_MASK << 8)
#define CONTROL_DATA_WNDW (DATA_WNDW << 8)
-struct PKT {
+struct PKT {
#ifdef INKERNEL
- BYTE dest_unit ; /* Destination Unit Id */
- BYTE dest_port ; /* Destination POrt */
- BYTE src_unit ; /* Source Unit Id */
- BYTE src_port ; /* Source POrt */
+ BYTE dest_unit; /* Destination Unit Id */
+ BYTE dest_port; /* Destination POrt */
+ BYTE src_unit; /* Source Unit Id */
+ BYTE src_port; /* Source POrt */
#else
- union
- {
- ushort destination; /* Complete destination */
- struct
- {
- unsigned char unit; /* Destination unit */
- unsigned char port; /* Destination port */
- } s1;
- } u1;
- union
- {
- ushort source; /* Complete source */
- struct
- {
- unsigned char unit; /* Source unit */
- unsigned char port; /* Source port */
- } s2;
- } u2;
+ union {
+ ushort destination; /* Complete destination */
+ struct {
+ unsigned char unit; /* Destination unit */
+ unsigned char port; /* Destination port */
+ } s1;
+ } u1;
+ union {
+ ushort source; /* Complete source */
+ struct {
+ unsigned char unit; /* Source unit */
+ unsigned char port; /* Source port */
+ } s2;
+ } u2;
#endif
#ifdef INKERNEL
- BYTE len ;
- BYTE control;
+ BYTE len;
+ BYTE control;
#else
- union
- {
- ushort control;
- struct
- {
- unsigned char len;
- unsigned char control;
- } s3;
- } u3;
+ union {
+ ushort control;
+ struct {
+ unsigned char len;
+ unsigned char control;
+ } s3;
+ } u3;
#endif
- BYTE data[PKT_MAX_DATA_LEN] ;
- /* Actual data :-) */
- WORD csum ; /* C-SUM */
- } ;
+ BYTE data[PKT_MAX_DATA_LEN];
+ /* Actual data :-) */
+ WORD csum; /* C-SUM */
+};
#endif
/*********** end of file ***********/
-
-
diff --git a/drivers/char/rio/poll.h b/drivers/char/rio/poll.h
index d9b8e983e17..9616ee4c6cd 100644
--- a/drivers/char/rio/poll.h
+++ b/drivers/char/rio/poll.h
@@ -39,7 +39,7 @@
#ifndef lint
#ifdef SCCS_LABELS
-static char *_rio_poll_h_sccs = "@(#)poll.h 1.2" ;
+static char *_rio_poll_h_sccs = "@(#)poll.h 1.2";
#endif
#endif
@@ -54,23 +54,20 @@ static char *_rio_poll_h_sccs = "@(#)poll.h 1.2" ;
#define POLL_PERIOD (int) SECOND
/* The various poll commands */
-#define POLL_POLL 0 /* We are connected and happy.. */
-#define POLL_INTRO 1 /* Introduction packet */
-#define POLL_TOPOLOGY 2 /* Topology update */
-#define POLL_ASSIGN 3 /* ID assign */
-#define POLL_FOAD 4 /* F*** Off And Die */
-#define POLL_LMD 5 /* Let Me Die */
-#define POLL_DYB 6 /* Die You Ba***** */
+#define POLL_POLL 0 /* We are connected and happy.. */
+#define POLL_INTRO 1 /* Introduction packet */
+#define POLL_TOPOLOGY 2 /* Topology update */
+#define POLL_ASSIGN 3 /* ID assign */
+#define POLL_FOAD 4 /* F*** Off And Die */
+#define POLL_LMD 5 /* Let Me Die */
+#define POLL_DYB 6 /* Die You Ba***** */
/* The way data fields are split up for POLL packets */
-#define POLL_HOST_SERIAL 2 /* Host who booted me */
-#define POLL_MY_SERIAL 6 /* My serial number */
-#define POLL_YOUR_ID 1 /* Your ID number */
-#define POLL_TOPOLOGY_FIELDS 2 /* Topology maps */
+#define POLL_HOST_SERIAL 2 /* Host who booted me */
+#define POLL_MY_SERIAL 6 /* My serial number */
+#define POLL_YOUR_ID 1 /* Your ID number */
+#define POLL_TOPOLOGY_FIELDS 2 /* Topology maps */
#endif
/*********** end of file ***********/
-
-
-
diff --git a/drivers/char/rio/port.h b/drivers/char/rio/port.h
index 8506af06aa9..c99b1e70fdc 100644
--- a/drivers/char/rio/port.h
+++ b/drivers/char/rio/port.h
@@ -46,96 +46,94 @@ static char *_port_h_sccs_ = "@(#)port.h 1.3";
*/
#ifdef STATS
-struct RIOStats
-{
+struct RIOStats {
/*
- ** interrupt statistics
- */
- uint BreakIntCnt;
- uint ModemOffCnt;
- uint ModemOnCnt;
- uint RxIntCnt;
- uint TxIntCnt;
+ ** interrupt statistics
+ */
+ uint BreakIntCnt;
+ uint ModemOffCnt;
+ uint ModemOnCnt;
+ uint RxIntCnt;
+ uint TxIntCnt;
/*
- ** throughput statistics
- */
- uint RxCharCnt;
- uint RxPktCnt;
- uint RxSaveCnt;
- uint TxCharCnt;
- uint TxPktCnt;
+ ** throughput statistics
+ */
+ uint RxCharCnt;
+ uint RxPktCnt;
+ uint RxSaveCnt;
+ uint TxCharCnt;
+ uint TxPktCnt;
/*
- ** driver entry statistics
- */
- uint CloseCnt;
- uint IoctlCnt;
- uint OpenCnt;
- uint ReadCnt;
- uint WriteCnt;
+ ** driver entry statistics
+ */
+ uint CloseCnt;
+ uint IoctlCnt;
+ uint OpenCnt;
+ uint ReadCnt;
+ uint WriteCnt;
/*
- ** proc statistics
- */
- uint BlockCnt;
- uint OutputCnt;
- uint ResumeCnt;
- uint RflushCnt;
- uint SuspendCnt;
- uint TbreakCnt;
- uint TimeoutCnt;
- uint UnblockCnt;
- uint WflushCnt;
- uint WFBodgeCnt;
+ ** proc statistics
+ */
+ uint BlockCnt;
+ uint OutputCnt;
+ uint ResumeCnt;
+ uint RflushCnt;
+ uint SuspendCnt;
+ uint TbreakCnt;
+ uint TimeoutCnt;
+ uint UnblockCnt;
+ uint WflushCnt;
+ uint WFBodgeCnt;
};
#endif
/*
** Port data structure
*/
-struct Port
-{
- struct gs_port gs;
- int PortNum; /* RIO port no., 0-511 */
- struct Host *HostP;
- volatile caddr_t Caddr;
- ushort HostPort; /* Port number on host card */
- uchar RupNum; /* Number of RUP for port */
- uchar ID2; /* Second ID of RTA for port */
- ulong State; /* FLAGS for open & xopen */
-#define RIO_LOPEN 0x00001 /* Local open */
-#define RIO_MOPEN 0x00002 /* Modem open */
-#define RIO_WOPEN 0x00004 /* Waiting for open */
-#define RIO_CLOSING 0x00008 /* The port is being close */
-#define RIO_XPBUSY 0x00010 /* Transparent printer busy */
-#define RIO_BREAKING 0x00020 /* Break in progress */
-#define RIO_DIRECT 0x00040 /* Doing Direct output */
-#define RIO_EXCLUSIVE 0x00080 /* Stream open for exclusive use */
-#define RIO_NDELAY 0x00100 /* Stream is open FNDELAY */
-#define RIO_CARR_ON 0x00200 /* Stream has carrier present */
-#define RIO_XPWANTR 0x00400 /* Stream wanted by Xprint */
-#define RIO_RBLK 0x00800 /* Stream is read-blocked */
-#define RIO_BUSY 0x01000 /* Stream is BUSY for write */
-#define RIO_TIMEOUT 0x02000 /* Stream timeout in progress */
-#define RIO_TXSTOP 0x04000 /* Stream output is stopped */
-#define RIO_WAITFLUSH 0x08000 /* Stream waiting for flush */
-#define RIO_DYNOROD 0x10000 /* Drain failed */
-#define RIO_DELETED 0x20000 /* RTA has been deleted */
-#define RIO_ISSCANCODE 0x40000 /* This line is in scancode mode */
+struct Port {
+ struct gs_port gs;
+ int PortNum; /* RIO port no., 0-511 */
+ struct Host *HostP;
+ volatile caddr_t Caddr;
+ ushort HostPort; /* Port number on host card */
+ uchar RupNum; /* Number of RUP for port */
+ uchar ID2; /* Second ID of RTA for port */
+ ulong State; /* FLAGS for open & xopen */
+#define RIO_LOPEN 0x00001 /* Local open */
+#define RIO_MOPEN 0x00002 /* Modem open */
+#define RIO_WOPEN 0x00004 /* Waiting for open */
+#define RIO_CLOSING 0x00008 /* The port is being close */
+#define RIO_XPBUSY 0x00010 /* Transparent printer busy */
+#define RIO_BREAKING 0x00020 /* Break in progress */
+#define RIO_DIRECT 0x00040 /* Doing Direct output */
+#define RIO_EXCLUSIVE 0x00080 /* Stream open for exclusive use */
+#define RIO_NDELAY 0x00100 /* Stream is open FNDELAY */
+#define RIO_CARR_ON 0x00200 /* Stream has carrier present */
+#define RIO_XPWANTR 0x00400 /* Stream wanted by Xprint */
+#define RIO_RBLK 0x00800 /* Stream is read-blocked */
+#define RIO_BUSY 0x01000 /* Stream is BUSY for write */
+#define RIO_TIMEOUT 0x02000 /* Stream timeout in progress */
+#define RIO_TXSTOP 0x04000 /* Stream output is stopped */
+#define RIO_WAITFLUSH 0x08000 /* Stream waiting for flush */
+#define RIO_DYNOROD 0x10000 /* Drain failed */
+#define RIO_DELETED 0x20000 /* RTA has been deleted */
+#define RIO_ISSCANCODE 0x40000 /* This line is in scancode mode */
#define RIO_USING_EUC 0x100000 /* Using extended Unix chars */
#define RIO_CAN_COOK 0x200000 /* This line can do cooking */
-#define RIO_TRIAD_MODE 0x400000 /* Enable TRIAD special ops. */
-#define RIO_TRIAD_BLOCK 0x800000 /* Next read will block */
-#define RIO_TRIAD_FUNC 0x1000000 /* Seen a function key coming in */
-#define RIO_THROTTLE_RX 0x2000000 /* RX needs to be throttled. */
+#define RIO_TRIAD_MODE 0x400000 /* Enable TRIAD special ops. */
+#define RIO_TRIAD_BLOCK 0x800000 /* Next read will block */
+#define RIO_TRIAD_FUNC 0x1000000 /* Seen a function key coming in */
+#define RIO_THROTTLE_RX 0x2000000 /* RX needs to be throttled. */
- ulong Config; /* FLAGS for NOREAD.... */
-#define RIO_NOREAD 0x0001 /* Are not allowed to read port */
-#define RIO_NOWRITE 0x0002 /* Are not allowed to write port */
-#define RIO_NOXPRINT 0x0004 /* Are not allowed to xprint port */
-#define RIO_NOMASK 0x0007 /* All not allowed things */
-#define RIO_IXANY 0x0008 /* Port is allowed ixany */
-#define RIO_MODEM 0x0010 /* Stream is a modem device */
-#define RIO_IXON 0x0020 /* Port is allowed ixon */
-#define RIO_WAITDRAIN 0x0040 /* Wait for port to completely drain */
+ ulong Config; /* FLAGS for NOREAD.... */
+#define RIO_NOREAD 0x0001 /* Are not allowed to read port */
+#define RIO_NOWRITE 0x0002 /* Are not allowed to write port */
+#define RIO_NOXPRINT 0x0004 /* Are not allowed to xprint port */
+#define RIO_NOMASK 0x0007 /* All not allowed things */
+#define RIO_IXANY 0x0008 /* Port is allowed ixany */
+#define RIO_MODEM 0x0010 /* Stream is a modem device */
+#define RIO_IXON 0x0020 /* Port is allowed ixon */
+#define RIO_WAITDRAIN 0x0040 /* Wait for port to completely drain */
#define RIO_MAP_50_TO_50 0x0080 /* Map 50 baud to 50 baud */
#define RIO_MAP_110_TO_110 0x0100 /* Map 110 baud to 110 baud */
@@ -144,92 +142,90 @@ struct Port
** As LynxOS does not appear to support Hardware Flow Control .....
** Define our own flow control flags in 'Config'.
*/
-#define RIO_CTSFLOW 0x0200 /* RIO's own CTSFLOW flag */
-#define RIO_RTSFLOW 0x0400 /* RIO's own RTSFLOW flag */
+#define RIO_CTSFLOW 0x0200 /* RIO's own CTSFLOW flag */
+#define RIO_RTSFLOW 0x0400 /* RIO's own RTSFLOW flag */
- struct PHB *PhbP; /* pointer to PHB for port */
- WORD *TxAdd; /* Add packets here */
- WORD *TxStart; /* Start of add array */
- WORD *TxEnd; /* End of add array */
- WORD *RxRemove; /* Remove packets here */
- WORD *RxStart; /* Start of remove array */
- WORD *RxEnd; /* End of remove array */
- uint RtaUniqueNum; /* Unique number of RTA */
- ushort PortState; /* status of port */
- ushort ModemState; /* status of modem lines */
- ulong ModemLines; /* Modem bits sent to RTA */
- uchar CookMode; /* who expands CR/LF? */
- uchar ParamSem; /* Prevent write during param */
- uchar Mapped; /* if port mapped onto host */
- uchar SecondBlock; /* if port belongs to 2nd block
- of 16 port RTA */
- uchar InUse; /* how many pre-emptive cmds */
- uchar Lock; /* if params locked */
- uchar Store; /* if params stored across closes */
- uchar FirstOpen; /* TRUE if first time port opened */
- uchar FlushCmdBodge; /* if doing a (non)flush */
- uchar MagicFlags; /* require intr processing */
+ struct PHB *PhbP; /* pointer to PHB for port */
+ WORD *TxAdd; /* Add packets here */
+ WORD *TxStart; /* Start of add array */
+ WORD *TxEnd; /* End of add array */
+ WORD *RxRemove; /* Remove packets here */
+ WORD *RxStart; /* Start of remove array */
+ WORD *RxEnd; /* End of remove array */
+ uint RtaUniqueNum; /* Unique number of RTA */
+ ushort PortState; /* status of port */
+ ushort ModemState; /* status of modem lines */
+ ulong ModemLines; /* Modem bits sent to RTA */
+ uchar CookMode; /* who expands CR/LF? */
+ uchar ParamSem; /* Prevent write during param */
+ uchar Mapped; /* if port mapped onto host */
+ uchar SecondBlock; /* if port belongs to 2nd block
+ of 16 port RTA */
+ uchar InUse; /* how many pre-emptive cmds */
+ uchar Lock; /* if params locked */
+ uchar Store; /* if params stored across closes */
+ uchar FirstOpen; /* TRUE if first time port opened */
+ uchar FlushCmdBodge; /* if doing a (non)flush */
+ uchar MagicFlags; /* require intr processing */
#define MAGIC_FLUSH 0x01 /* mirror of WflushFlag */
#define MAGIC_REBOOT 0x02 /* RTA re-booted, re-open ports */
#define MORE_OUTPUT_EYGOR 0x04 /* riotproc failed to empty clists */
- uchar WflushFlag; /* 1 How many WFLUSHs active */
+ uchar WflushFlag; /* 1 How many WFLUSHs active */
/*
** Transparent print stuff
*/
- struct Xprint
- {
+ struct Xprint {
#ifndef MAX_XP_CTRL_LEN
-#define MAX_XP_CTRL_LEN 16 /* ALSO IN DAEMON.H */
+#define MAX_XP_CTRL_LEN 16 /* ALSO IN DAEMON.H */
#endif
- uint XpCps;
- char XpOn[MAX_XP_CTRL_LEN];
- char XpOff[MAX_XP_CTRL_LEN];
- ushort XpLen; /* strlen(XpOn)+strlen(XpOff) */
- uchar XpActive;
- uchar XpLastTickOk; /* TRUE if we can process */
+ uint XpCps;
+ char XpOn[MAX_XP_CTRL_LEN];
+ char XpOff[MAX_XP_CTRL_LEN];
+ ushort XpLen; /* strlen(XpOn)+strlen(XpOff) */
+ uchar XpActive;
+ uchar XpLastTickOk; /* TRUE if we can process */
#define XP_OPEN 00001
#define XP_RUNABLE 00002
- struct ttystatics *XttyP;
- } Xprint;
+ struct ttystatics *XttyP;
+ } Xprint;
#ifdef VPIX
- v86_t *StashP;
- uint IntMask;
- struct termss VpixSs;
- uchar ModemStatusReg; /* Modem status register */
+ v86_t *StashP;
+ uint IntMask;
+ struct termss VpixSs;
+ uchar ModemStatusReg; /* Modem status register */
#endif
- uchar RxDataStart;
- uchar Cor2Copy; /* copy of COR2 */
- char *Name; /* points to the Rta's name */
+ uchar RxDataStart;
+ uchar Cor2Copy; /* copy of COR2 */
+ char *Name; /* points to the Rta's name */
#ifdef STATS
- struct RIOStats Stat; /* ports statistics */
+ struct RIOStats Stat; /* ports statistics */
#endif
- char *TxRingBuffer;
- ushort TxBufferIn; /* New data arrives here */
- ushort TxBufferOut; /* Intr removes data here */
- ushort OldTxBufferOut; /* Indicates if draining */
- int TimeoutId; /* Timeout ID */
- uint Debug;
- uchar WaitUntilBooted; /* True if open should block */
- uint statsGather; /* True if gathering stats */
- ulong txchars; /* Chars transmitted */
- ulong rxchars; /* Chars received */
- ulong opens; /* port open count */
- ulong closes; /* port close count */
- ulong ioctls; /* ioctl count */
- uchar LastRxTgl; /* Last state of rx toggle bit */
- spinlock_t portSem; /* Lock using this sem */
- int MonitorTstate; /* Monitoring ? */
- int timeout_id; /* For calling 100 ms delays */
- int timeout_sem;/* For calling 100 ms delays */
- int firstOpen; /* First time open ? */
- char * p; /* save the global struc here .. */
+ char *TxRingBuffer;
+ ushort TxBufferIn; /* New data arrives here */
+ ushort TxBufferOut; /* Intr removes data here */
+ ushort OldTxBufferOut; /* Indicates if draining */
+ int TimeoutId; /* Timeout ID */
+ uint Debug;
+ uchar WaitUntilBooted; /* True if open should block */
+ uint statsGather; /* True if gathering stats */
+ ulong txchars; /* Chars transmitted */
+ ulong rxchars; /* Chars received */
+ ulong opens; /* port open count */
+ ulong closes; /* port close count */
+ ulong ioctls; /* ioctl count */
+ uchar LastRxTgl; /* Last state of rx toggle bit */
+ spinlock_t portSem; /* Lock using this sem */
+ int MonitorTstate; /* Monitoring ? */
+ int timeout_id; /* For calling 100 ms delays */
+ int timeout_sem; /* For calling 100 ms delays */
+ int firstOpen; /* First time open ? */
+ char *p; /* save the global struc here .. */
};
-struct ModuleInfo
-{
- char *Name;
- uint Flags[4]; /* one per port on a module */
+struct ModuleInfo {
+ char *Name;
+ uint Flags[4]; /* one per port on a module */
};
#endif
@@ -238,8 +234,8 @@ struct ModuleInfo
** runs into problems with differing struct sizes between driver and config.
*/
struct PortParams {
- uint Port;
- ulong Config;
- ulong State;
- struct ttystatics *TtyP;
+ uint Port;
+ ulong Config;
+ ulong State;
+ struct ttystatics *TtyP;
};
diff --git a/drivers/char/rio/proto.h b/drivers/char/rio/proto.h
index ddff0ef84e3..f9a3376333e 100644
--- a/drivers/char/rio/proto.h
+++ b/drivers/char/rio/proto.h
@@ -23,15 +23,15 @@
/*
** boot.c
*/
-void init_boot( char *p, short stage);
+void init_boot(char *p, short stage);
/*
** disconct.c
*/
-void kill_boot ( LPB *link );
-void disconnected( LPB *link );
-short boot_3( LPB *link, PKT *pkt );
-short send_3_pkt( LPB *link, PKT *pkt);
+void kill_boot(LPB * link);
+void disconnected(LPB * link);
+short boot_3(LPB * link, PKT * pkt);
+short send_3_pkt(LPB * link, PKT * pkt);
/*
** error.c
@@ -41,116 +41,116 @@ void du_error(void);
/*
** formpkt.c
*/
-ushort sum_it( PKT *pkt ) ;
-void form_rup_pkt( RUP *form_rup, PKT *pkt );
-void form_poll_pkt ( int type, LPB *link, int node );
-void form_route_pkt ( int type, PKT *pkt, LPB *link );
+ushort sum_it(PKT * pkt);
+void form_rup_pkt(RUP * form_rup, PKT * pkt);
+void form_poll_pkt(int type, LPB * link, int node);
+void form_route_pkt(int type, PKT * pkt, LPB * link);
/*
** idle.c
*/
-void idle( Process *idle_p );
+void idle(Process * idle_p);
/*
** init.c
*/
void general_init(void);
-void mem_halt( int error);
+void mem_halt(int error);
/*
** linkinit.c
*/
-void initlink( u_short number, LPB *link);
-void runlink( LPB *link);
+void initlink(u_short number, LPB * link);
+void runlink(LPB * link);
/*
** list.c
*/
PKT *get_free_start(void);
-void put_free_start( PKT *pkt);
+void put_free_start(PKT * pkt);
#ifdef HOST
-int can_remove_transmit ( PKT **pkt, PKT *pointer );
+int can_remove_transmit(PKT ** pkt, PKT * pointer);
#endif
#ifdef RTA
-int spl7 ( void );
-int spl0 ( void );
-Q_BUF *get_free_q( void );
+int spl7(void);
+int spl0(void);
+Q_BUF *get_free_q(void);
PKT *get_free_end(void);
-int add_end( PKT *pkt, PHB *phb, int type);
-unsigned short free_packets( PHB *phb, int type);
-int can_remove_start( PKT **pkt, PHB *phb, int type);
-int can_add_start( PHB *phb, int type);
-int can_add_end( PHB *phb, int type);
-void put_free_end( PKT *pkt);
-int remove_start( PKT **pkt, PHB *phb, int type);
+int add_end(PKT * pkt, PHB * phb, int type);
+unsigned short free_packets(PHB * phb, int type);
+int can_remove_start(PKT ** pkt, PHB * phb, int type);
+int can_add_start(PHB * phb, int type);
+int can_add_end(PHB * phb, int type);
+void put_free_end(PKT * pkt);
+int remove_start(PKT ** pkt, PHB * phb, int type);
#endif
/*
** Lrt.c
*/
-void lrt( Process *lrt_p, LPB *link );
+void lrt(Process * lrt_p, LPB * link);
#ifdef RTA
-void set_led_red ( LPB *link );
+void set_led_red(LPB * link);
#endif
/*
** ltt.c
*/
-void ltt( Process *ltt_p, LPB *link, PHB *phb_ptr[] );
-void send_poll ( LPB *link );
-void request_id ( LPB *link );
-void send_topology_update ( LPB *link );
-void send_topology ( LPB *link );
-void supply_id ( LPB *link );
+void ltt(Process * ltt_p, LPB * link, PHB * phb_ptr[]);
+void send_poll(LPB * link);
+void request_id(LPB * link);
+void send_topology_update(LPB * link);
+void send_topology(LPB * link);
+void supply_id(LPB * link);
#ifdef RTA
-void redirect_queue ( LPB *link, ushort flush );
-int obtain_rup ( int rup_number, PKT **pkt_address, LPB *link );
+void redirect_queue(LPB * link, ushort flush);
+int obtain_rup(int rup_number, PKT ** pkt_address, LPB * link);
#endif
#ifdef TESTING_PERF
-int consume_cpu( void );
+int consume_cpu(void);
#endif
/*
** lttwake.c
*/
#ifdef HOST
-void ltt_wakeup( Process *ltt_wakeup_p );
+void ltt_wakeup(Process * ltt_wakeup_p);
#endif
/*
** mapgen.c
*/
-void generate_id_map( short mapping, ROUTE_STR route[] );
-void gen_map( int mapping, int looking_at, int come_from, ROUTE_STR route[], int link, int *ttl );
-void adjust_ttl( int mapping, int looking_at, int come_from, ROUTE_STR route[], int link, int *ttl);
+void generate_id_map(short mapping, ROUTE_STR route[]);
+void gen_map(int mapping, int looking_at, int come_from, ROUTE_STR route[], int link, int *ttl);
+void adjust_ttl(int mapping, int looking_at, int come_from, ROUTE_STR route[], int link, int *ttl);
void init_sys_map(void);
/*
** mmu.c
*/
-char *rio_malloc( unsigned int amount);
-char *rio_calloc( unsigned int num, unsigned int size);
-ERROR rio_mmu_init( uint total_mem );
+char *rio_malloc(unsigned int amount);
+char *rio_calloc(unsigned int num, unsigned int size);
+ERROR rio_mmu_init(uint total_mem);
/*
** partn.c
*/
-void partition_tx( struct PHB *phb, u_short tx_size, u_short rx_size, u_short rx_limit);
+void partition_tx(struct PHB *phb, u_short tx_size, u_short rx_size, u_short rx_limit);
/*
** poll.c
*/
-void tx_poll( Process *tx_poll_p);
+void tx_poll(Process * tx_poll_p);
/*
** process.c
*/
-int get_proc_space( Process **pd, int **pws, int wssize);
+int get_proc_space(Process ** pd, int **pws, int wssize);
/*
** readrom.c
@@ -160,85 +160,85 @@ void read_serial_number(char *buf);
/*
** rio.c
*/
-int main( void );
+int main(void);
/*
** route.c
*/
-void route_update ( PKT *pkt, LPB *link);
+void route_update(PKT * pkt, LPB * link);
/*
** rtainit.c
*/
#if defined(RTA)
void rta_init(ushort RtaType);
-#endif /* defined(RTA) */
+#endif /* defined(RTA) */
/*
** rupboot.c
*/
-void rup_boot( PKT *pkt, RUP *this_rup, LPB *link);
+void rup_boot(PKT * pkt, RUP * this_rup, LPB * link);
#ifdef RTA
-void kill_your_neighbour( int link_to_kill );
+void kill_your_neighbour(int link_to_kill);
#endif
/*
** rupcmd.c
*/
-void rup_command( PKT *pkt, struct RUP *this_rup, LPB *link);
+void rup_command(PKT * pkt, struct RUP *this_rup, LPB * link);
/*
** ruperr.c
*/
-void rup_error( PKT *pkt, RUP *this_rup, LPB *link );
-void illegal_cmd( PKT *src_pkt );
+void rup_error(PKT * pkt, RUP * this_rup, LPB * link);
+void illegal_cmd(PKT * src_pkt);
/*
** ruppoll.c
*/
-void rup_poll( PKT *pkt, RUP *this_rup, LPB *link );
+void rup_poll(PKT * pkt, RUP * this_rup, LPB * link);
/*
** ruppower.c
*/
-void rup_power( PKT *pkt, RUP *this_rup, LPB *link );
+void rup_power(PKT * pkt, RUP * this_rup, LPB * link);
/*
** ruprm.c
*/
-void rup_route_map( PKT *pkt, RUP *this_rup, LPB *link);
+void rup_route_map(PKT * pkt, RUP * this_rup, LPB * link);
/*
** rupstat.c
*/
-void rup_status( PKT *pkt, RUP *this_rup, LPB *link);
+void rup_status(PKT * pkt, RUP * this_rup, LPB * link);
/*
** rupsync.c
*/
-void rup_sync( PKT *pkt);
+void rup_sync(PKT * pkt);
/*
** rxpkt.c
*/
-ERROR rx_pkt( PKT_ptr_ptr pkt_address, LPB *link);
+ERROR rx_pkt(PKT_ptr_ptr pkt_address, LPB * link);
/*
** sendsts.c
*/
-void send_status( PKT *requesting_pkt, RUP *this_rup);
+void send_status(PKT * requesting_pkt, RUP * this_rup);
/*
** serial.c
*/
-void assign_serial ( char *ser_in, char *ser_out);
-int cmp_serial ( char *ser_1, char *ser_2);
+void assign_serial(char *ser_in, char *ser_out);
+int cmp_serial(char *ser_1, char *ser_2);
/*
** txpkt.c
*/
-ERROR tx_pkt( PKT *pkt, LPB *link);
-short send_sync( LPB *link);
+ERROR tx_pkt(PKT * pkt, LPB * link);
+short send_sync(LPB * link);
-#endif /* _prototypes_h */
+#endif /* _prototypes_h */
diff --git a/drivers/char/rio/protsts.h b/drivers/char/rio/protsts.h
index 848111ac938..69fc4bc3415 100644
--- a/drivers/char/rio/protsts.h
+++ b/drivers/char/rio/protsts.h
@@ -115,5 +115,3 @@
#endif
/*********** end of file ***********/
-
-
diff --git a/drivers/char/rio/qbuf.h b/drivers/char/rio/qbuf.h
index 1fce02f8fcf..acd9e8e5307 100644
--- a/drivers/char/rio/qbuf.h
+++ b/drivers/char/rio/qbuf.h
@@ -40,7 +40,7 @@
#ifndef lint
#ifdef SCCS_LABELS
-static char *_rio_qbuf_h_sccs = "@(#)qbuf.h 1.1" ;
+static char *_rio_qbuf_h_sccs = "@(#)qbuf.h 1.1";
#endif
#endif
@@ -52,16 +52,15 @@ static char *_rio_qbuf_h_sccs = "@(#)qbuf.h 1.1" ;
#define PKTS_PER_BUFFER (220 / PKT_LENGTH)
#endif
-typedef struct Q_BUF Q_BUF ;
-struct Q_BUF {
- Q_BUF_ptr next ;
- Q_BUF_ptr prev ;
- PKT_ptr buf[PKTS_PER_BUFFER] ;
- } ;
+typedef struct Q_BUF Q_BUF;
+struct Q_BUF {
+ Q_BUF_ptr next;
+ Q_BUF_ptr prev;
+ PKT_ptr buf[PKTS_PER_BUFFER];
+};
#endif
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/rio.h b/drivers/char/rio/rio.h
index 13a9931958b..7f45e1ab533 100644
--- a/drivers/char/rio/rio.h
+++ b/drivers/char/rio/rio.h
@@ -72,8 +72,8 @@ static char *_rio_h_sccs_ = "@(#)rio.h 1.3";
#define RIO_HOSTS 4 /* number of hosts that can be found */
#define PORTS_PER_HOST 128 /* number of ports per host */
#define LINKS_PER_UNIT 4 /* number of links from a host */
-#define RIO_PORTS (PORTS_PER_HOST * RIO_HOSTS) /* max. no. of ports */
-#define RTAS_PER_HOST (MAX_RUP) /* number of RTAs per host */
+#define RIO_PORTS (PORTS_PER_HOST * RIO_HOSTS) /* max. no. of ports */
+#define RTAS_PER_HOST (MAX_RUP) /* number of RTAs per host */
#define PORTS_PER_RTA (PORTS_PER_HOST/RTAS_PER_HOST) /* ports on a rta */
#define PORTS_PER_MODULE 4 /* number of ports on a plug-in module */
/* number of modules on an RTA */
@@ -216,10 +216,9 @@ static char *_rio_h_sccs_ = "@(#)rio.h 1.3";
#define RIO_PRI (PZERO+10)
#define RIO_CLOSE_PRI PZERO-1 /* uninterruptible sleeps for close */
-typedef struct DbInf
-{
- uint Flag;
- char Name[8];
+typedef struct DbInf {
+ uint Flag;
+ char Name[8];
} DbInf;
#ifndef TRUE
@@ -251,7 +250,7 @@ typedef struct DbInf
*((uint *)PK) = PP->PacketInfo; \
}
-#define RIO_LINK_ENABLE 0x80FF /* FF is a hack, mainly for Mips, to */
+#define RIO_LINK_ENABLE 0x80FF /* FF is a hack, mainly for Mips, to */
/* prevent a really stupid race condition. */
#define NOT_INITIALISED 0
@@ -291,4 +290,4 @@ typedef struct DbInf
#define DIST_LINESW_OUTPUT 0x40
#define DIST_LINESW_MDMINT 0x80
-#endif /* __rio_h__ */
+#endif /* __rio_h__ */
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index d7d484024e2..8825bd61b7d 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -33,7 +33,7 @@
* */
#include <linux/module.h>
-#include <linux/config.h>
+#include <linux/config.h>
#include <linux/kdev_t.h>
#include <asm/io.h>
#include <linux/kernel.h>
@@ -56,10 +56,6 @@
#include <linux/generic_serial.h>
#include <asm/uaccess.h>
-#if BITS_PER_LONG != 32
-# error FIXME: this driver only works on 32-bit platforms
-#endif
-
#include "linux_compat.h"
#include "typdef.h"
#include "pkt.h"
@@ -116,7 +112,7 @@ more than 512 ports.... */
#define PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8 0x2000
#endif
-#ifndef RIO_WINDOW_LEN
+#ifndef RIO_WINDOW_LEN
#define RIO_WINDOW_LEN 0x10000
#endif
@@ -144,34 +140,51 @@ more than 512 ports.... */
*/
#define RIO_REPORT_FIFO
#define RIO_REPORT_OVERRUN
-#endif
+#endif
/* These constants are derived from SCO Source */
static struct Conf
-RIOConf =
-{
- /* locator */ "RIO Config here",
- /* startuptime */ HZ*2, /* how long to wait for card to run */
- /* slowcook */ 0, /* TRUE -> always use line disc. */
- /* intrpolltime */ 1, /* The frequency of OUR polls */
- /* breakinterval */ 25, /* x10 mS XXX: units seem to be 1ms not 10! -- REW*/
- /* timer */ 10, /* mS */
- /* RtaLoadBase */ 0x7000,
- /* HostLoadBase */ 0x7C00,
- /* XpHz */ 5, /* number of Xprint hits per second */
- /* XpCps */ 120, /* Xprint characters per second */
- /* XpOn */ "\033d#", /* start Xprint for a wyse 60 */
- /* XpOff */ "\024", /* end Xprint for a wyse 60 */
- /* MaxXpCps */ 2000, /* highest Xprint speed */
- /* MinXpCps */ 10, /* slowest Xprint speed */
- /* SpinCmds */ 1, /* non-zero for mega fast boots */
- /* First Addr */ 0x0A0000, /* First address to look at */
- /* Last Addr */ 0xFF0000, /* Last address looked at */
- /* BufferSize */ 1024, /* Bytes per port of buffering */
- /* LowWater */ 256, /* how much data left before wakeup */
- /* LineLength */ 80, /* how wide is the console? */
- /* CmdTimeout */ HZ, /* how long a close command may take */
+ RIOConf = {
+ /* locator */ "RIO Config here",
+ /* startuptime */ HZ * 2,
+ /* how long to wait for card to run */
+ /* slowcook */ 0,
+ /* TRUE -> always use line disc. */
+ /* intrpolltime */ 1,
+ /* The frequency of OUR polls */
+ /* breakinterval */ 25,
+ /* x10 mS XXX: units seem to be 1ms not 10! -- REW */
+ /* timer */ 10,
+ /* mS */
+ /* RtaLoadBase */ 0x7000,
+ /* HostLoadBase */ 0x7C00,
+ /* XpHz */ 5,
+ /* number of Xprint hits per second */
+ /* XpCps */ 120,
+ /* Xprint characters per second */
+ /* XpOn */ "\033d#",
+ /* start Xprint for a wyse 60 */
+ /* XpOff */ "\024",
+ /* end Xprint for a wyse 60 */
+ /* MaxXpCps */ 2000,
+ /* highest Xprint speed */
+ /* MinXpCps */ 10,
+ /* slowest Xprint speed */
+ /* SpinCmds */ 1,
+ /* non-zero for mega fast boots */
+ /* First Addr */ 0x0A0000,
+ /* First address to look at */
+ /* Last Addr */ 0xFF0000,
+ /* Last address looked at */
+ /* BufferSize */ 1024,
+ /* Bytes per port of buffering */
+ /* LowWater */ 256,
+ /* how much data left before wakeup */
+ /* LineLength */ 80,
+ /* how wide is the console? */
+ /* CmdTimeout */ HZ,
+ /* how long a close command may take */
};
@@ -179,21 +192,20 @@ RIOConf =
/* Function prototypes */
-static void rio_disable_tx_interrupts (void * ptr);
-static void rio_enable_tx_interrupts (void * ptr);
-static void rio_disable_rx_interrupts (void * ptr);
-static void rio_enable_rx_interrupts (void * ptr);
-static int rio_get_CD (void * ptr);
-static void rio_shutdown_port (void * ptr);
-static int rio_set_real_termios (void *ptr);
-static void rio_hungup (void *ptr);
-static void rio_close (void *ptr);
-static int rio_chars_in_buffer (void * ptr);
-static int rio_fw_ioctl (struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+static void rio_disable_tx_interrupts(void *ptr);
+static void rio_enable_tx_interrupts(void *ptr);
+static void rio_disable_rx_interrupts(void *ptr);
+static void rio_enable_rx_interrupts(void *ptr);
+static int rio_get_CD(void *ptr);
+static void rio_shutdown_port(void *ptr);
+static int rio_set_real_termios(void *ptr);
+static void rio_hungup(void *ptr);
+static void rio_close(void *ptr);
+static int rio_chars_in_buffer(void *ptr);
+static int rio_fw_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
static int rio_init_drivers(void);
-static void my_hd (void *addr, int len);
+static void my_hd(void *addr, int len);
static struct tty_driver *rio_driver, *rio_driver2;
@@ -213,9 +225,9 @@ static int rio_poll = 1;
/* These are the only open spaces in my computer. Yours may have more
or less.... */
-static int rio_probe_addrs[]= {0xc0000, 0xd0000, 0xe0000};
+static int rio_probe_addrs[] = { 0xc0000, 0xd0000, 0xe0000 };
-#define NR_RIO_ADDRS (sizeof(rio_probe_addrs)/sizeof (int))
+#define NR_RIO_ADDRS ARRAY_SIZE(rio_probe_addrs)
/* Set the mask to all-ones. This alas, only supports 32 interrupts.
@@ -231,17 +243,17 @@ module_param(rio_debug, int, 0644);
module_param(rio_irqmask, long, 0);
static struct real_driver rio_real_driver = {
- rio_disable_tx_interrupts,
- rio_enable_tx_interrupts,
- rio_disable_rx_interrupts,
- rio_enable_rx_interrupts,
- rio_get_CD,
- rio_shutdown_port,
- rio_set_real_termios,
- rio_chars_in_buffer,
- rio_close,
- rio_hungup,
- NULL
+ rio_disable_tx_interrupts,
+ rio_enable_tx_interrupts,
+ rio_disable_rx_interrupts,
+ rio_enable_rx_interrupts,
+ rio_get_CD,
+ rio_shutdown_port,
+ rio_set_real_termios,
+ rio_chars_in_buffer,
+ rio_close,
+ rio_hungup,
+ NULL
};
/*
@@ -250,8 +262,8 @@ static struct real_driver rio_real_driver = {
*/
static struct file_operations rio_fw_fops = {
- .owner = THIS_MODULE,
- .ioctl = rio_fw_ioctl,
+ .owner = THIS_MODULE,
+ .ioctl = rio_fw_ioctl,
};
static struct miscdevice rio_fw_device = {
@@ -266,25 +278,22 @@ static struct miscdevice rio_fw_device = {
/* This doesn't work. Who's paranoid around here? Not me! */
-static inline int rio_paranoia_check(struct rio_port const * port,
- char *name, const char *routine)
+static inline int rio_paranoia_check(struct rio_port const *port, char *name, const char *routine)
{
- static const char *badmagic =
- KERN_ERR "rio: Warning: bad rio port magic number for device %s in %s\n";
- static const char *badinfo =
- KERN_ERR "rio: Warning: null rio port for device %s in %s\n";
-
- if (!port) {
- printk (badinfo, name, routine);
- return 1;
- }
- if (port->magic != RIO_MAGIC) {
- printk (badmagic, name, routine);
- return 1;
- }
-
- return 0;
+ static const char *badmagic = KERN_ERR "rio: Warning: bad rio port magic number for device %s in %s\n";
+ static const char *badinfo = KERN_ERR "rio: Warning: null rio port for device %s in %s\n";
+
+ if (!port) {
+ printk(badinfo, name, routine);
+ return 1;
+ }
+ if (port->magic != RIO_MAGIC) {
+ printk(badmagic, name, routine);
+ return 1;
+ }
+
+ return 0;
}
#else
#define rio_paranoia_check(a,b,c) 0
@@ -292,53 +301,53 @@ static inline int rio_paranoia_check(struct rio_port const * port,
#ifdef DEBUG
-static void my_hd (void *ad, int len)
+static void my_hd(void *ad, int len)
{
- int i, j, ch;
- unsigned char *addr = ad;
-
- for (i=0;i<len;i+=16) {
- rio_dprintk (RIO_DEBUG_PARAM, "%08x ", (int) addr+i);
- for (j=0;j<16;j++) {
- rio_dprintk (RIO_DEBUG_PARAM, "%02x %s", addr[j+i], (j==7)?" ":"");
- }
- for (j=0;j<16;j++) {
- ch = addr[j+i];
- rio_dprintk (RIO_DEBUG_PARAM, "%c", (ch < 0x20)?'.':((ch > 0x7f)?'.':ch));
- }
- rio_dprintk (RIO_DEBUG_PARAM, "\n");
- }
+ int i, j, ch;
+ unsigned char *addr = ad;
+
+ for (i = 0; i < len; i += 16) {
+ rio_dprintk(RIO_DEBUG_PARAM, "%08x ", (int) addr + i);
+ for (j = 0; j < 16; j++) {
+ rio_dprintk(RIO_DEBUG_PARAM, "%02x %s", addr[j + i], (j == 7) ? " " : "");
+ }
+ for (j = 0; j < 16; j++) {
+ ch = addr[j + i];
+ rio_dprintk(RIO_DEBUG_PARAM, "%c", (ch < 0x20) ? '.' : ((ch > 0x7f) ? '.' : ch));
+ }
+ rio_dprintk(RIO_DEBUG_PARAM, "\n");
+ }
}
#else
#define my_hd(ad,len) do{/* nothing*/ } while (0)
#endif
-/* Delay a number of jiffies, allowing a signal to interrupt */
-int RIODelay (struct Port *PortP, int njiffies)
+/* Delay a number of jiffies, allowing a signal to interrupt */
+int RIODelay(struct Port *PortP, int njiffies)
{
- func_enter ();
+ func_enter();
- rio_dprintk (RIO_DEBUG_DELAY, "delaying %d jiffies\n", njiffies);
- msleep_interruptible(jiffies_to_msecs(njiffies));
- func_exit();
+ rio_dprintk(RIO_DEBUG_DELAY, "delaying %d jiffies\n", njiffies);
+ msleep_interruptible(jiffies_to_msecs(njiffies));
+ func_exit();
- if (signal_pending(current))
- return RIO_FAIL;
- else
- return !RIO_FAIL;
+ if (signal_pending(current))
+ return RIO_FAIL;
+ else
+ return !RIO_FAIL;
}
-/* Delay a number of jiffies, disallowing a signal to interrupt */
-int RIODelay_ni (struct Port *PortP, int njiffies)
+/* Delay a number of jiffies, disallowing a signal to interrupt */
+int RIODelay_ni(struct Port *PortP, int njiffies)
{
- func_enter ();
+ func_enter();
- rio_dprintk (RIO_DEBUG_DELAY, "delaying %d jiffies (ni)\n", njiffies);
- msleep(jiffies_to_msecs(njiffies));
- func_exit();
- return !RIO_FAIL;
+ rio_dprintk(RIO_DEBUG_DELAY, "delaying %d jiffies (ni)\n", njiffies);
+ msleep(jiffies_to_msecs(njiffies));
+ func_exit();
+ return !RIO_FAIL;
}
@@ -354,126 +363,121 @@ int rio_ismodem(struct tty_struct *tty)
}
-static int rio_set_real_termios (void *ptr)
+static int rio_set_real_termios(void *ptr)
{
- int rv, modem;
- struct tty_struct *tty;
- func_enter();
+ int rv, modem;
+ struct tty_struct *tty;
+ func_enter();
- tty = ((struct Port *)ptr)->gs.tty;
+ tty = ((struct Port *) ptr)->gs.tty;
- modem = rio_ismodem(tty);
+ modem = rio_ismodem(tty);
- rv = RIOParam( (struct Port *) ptr, CONFIG, modem, 1);
+ rv = RIOParam((struct Port *) ptr, CONFIG, modem, 1);
- func_exit ();
+ func_exit();
- return rv;
+ return rv;
}
-static void rio_reset_interrupt (struct Host *HostP)
+static void rio_reset_interrupt(struct Host *HostP)
{
- func_enter();
+ func_enter();
- switch( HostP->Type ) {
- case RIO_AT:
- case RIO_MCA:
- case RIO_PCI:
- WBYTE(HostP->ResetInt , 0xff);
- }
+ switch (HostP->Type) {
+ case RIO_AT:
+ case RIO_MCA:
+ case RIO_PCI:
+ WBYTE(HostP->ResetInt, 0xff);
+ }
- func_exit();
+ func_exit();
}
-static irqreturn_t rio_interrupt (int irq, void *ptr, struct pt_regs *regs)
+static irqreturn_t rio_interrupt(int irq, void *ptr, struct pt_regs *regs)
{
- struct Host *HostP;
- func_enter ();
+ struct Host *HostP;
+ func_enter();
- HostP = (struct Host*)ptr; /* &p->RIOHosts[(long)ptr]; */
- rio_dprintk (RIO_DEBUG_IFLOW, "rio: enter rio_interrupt (%d/%d)\n",
- irq, HostP->Ivec);
+ HostP = (struct Host *) ptr; /* &p->RIOHosts[(long)ptr]; */
+ rio_dprintk(RIO_DEBUG_IFLOW, "rio: enter rio_interrupt (%d/%d)\n", irq, HostP->Ivec);
- /* AAargh! The order in which to do these things is essential and
- not trivial.
-
- - Rate limit goes before "recursive". Otherwise a series of
- recursive calls will hang the machine in the interrupt routine.
+ /* AAargh! The order in which to do these things is essential and
+ not trivial.
- - hardware twiddling goes before "recursive". Otherwise when we
- poll the card, and a recursive interrupt happens, we won't
- ack the card, so it might keep on interrupting us. (especially
- level sensitive interrupt systems like PCI).
+ - Rate limit goes before "recursive". Otherwise a series of
+ recursive calls will hang the machine in the interrupt routine.
- - Rate limit goes before hardware twiddling. Otherwise we won't
- catch a card that has gone bonkers.
+ - hardware twiddling goes before "recursive". Otherwise when we
+ poll the card, and a recursive interrupt happens, we won't
+ ack the card, so it might keep on interrupting us. (especially
+ level sensitive interrupt systems like PCI).
- - The "initialized" test goes after the hardware twiddling. Otherwise
- the card will stick us in the interrupt routine again.
+ - Rate limit goes before hardware twiddling. Otherwise we won't
+ catch a card that has gone bonkers.
- - The initialized test goes before recursive.
- */
+ - The "initialized" test goes after the hardware twiddling. Otherwise
+ the card will stick us in the interrupt routine again.
+
+ - The initialized test goes before recursive.
+ */
#ifdef IRQ_RATE_LIMIT
- /* Aaargh! I'm ashamed. This costs more lines-of-code than the
- actual interrupt routine!. (Well, used to when I wrote that comment) */
- {
- static int lastjif;
- static int nintr=0;
-
- if (lastjif == jiffies) {
- if (++nintr > IRQ_RATE_LIMIT) {
- free_irq (HostP->Ivec, ptr);
- printk (KERN_ERR "rio: Too many interrupts. Turning off interrupt %d.\n",
- HostP->Ivec);
- }
- } else {
- lastjif = jiffies;
- nintr = 0;
- }
- }
+ /* Aaargh! I'm ashamed. This costs more lines-of-code than the
+ actual interrupt routine!. (Well, used to when I wrote that comment) */
+ {
+ static int lastjif;
+ static int nintr = 0;
+
+ if (lastjif == jiffies) {
+ if (++nintr > IRQ_RATE_LIMIT) {
+ free_irq(HostP->Ivec, ptr);
+ printk(KERN_ERR "rio: Too many interrupts. Turning off interrupt %d.\n", HostP->Ivec);
+ }
+ } else {
+ lastjif = jiffies;
+ nintr = 0;
+ }
+ }
#endif
- rio_dprintk (RIO_DEBUG_IFLOW, "rio: We've have noticed the interrupt\n");
- if (HostP->Ivec == irq) {
- /* Tell the card we've noticed the interrupt. */
- rio_reset_interrupt (HostP);
- }
-
- if ((HostP->Flags & RUN_STATE) != RC_RUNNING)
- return IRQ_HANDLED;
-
- if (test_and_set_bit (RIO_BOARD_INTR_LOCK, &HostP->locks)) {
- printk (KERN_ERR "Recursive interrupt! (host %d/irq%d)\n",
- (int) ptr, HostP->Ivec);
- return IRQ_HANDLED;
- }
-
- RIOServiceHost(p, HostP, irq);
-
- rio_dprintk ( RIO_DEBUG_IFLOW, "riointr() doing host %d type %d\n",
- (int) ptr, HostP->Type);
-
- clear_bit (RIO_BOARD_INTR_LOCK, &HostP->locks);
- rio_dprintk (RIO_DEBUG_IFLOW, "rio: exit rio_interrupt (%d/%d)\n",
- irq, HostP->Ivec);
- func_exit ();
- return IRQ_HANDLED;
+ rio_dprintk(RIO_DEBUG_IFLOW, "rio: We've have noticed the interrupt\n");
+ if (HostP->Ivec == irq) {
+ /* Tell the card we've noticed the interrupt. */
+ rio_reset_interrupt(HostP);
+ }
+
+ if ((HostP->Flags & RUN_STATE) != RC_RUNNING)
+ return IRQ_HANDLED;
+
+ if (test_and_set_bit(RIO_BOARD_INTR_LOCK, &HostP->locks)) {
+ printk(KERN_ERR "Recursive interrupt! (host %d/irq%d)\n", (int) ptr, HostP->Ivec);
+ return IRQ_HANDLED;
+ }
+
+ RIOServiceHost(p, HostP, irq);
+
+ rio_dprintk(RIO_DEBUG_IFLOW, "riointr() doing host %d type %d\n", (int) ptr, HostP->Type);
+
+ clear_bit(RIO_BOARD_INTR_LOCK, &HostP->locks);
+ rio_dprintk(RIO_DEBUG_IFLOW, "rio: exit rio_interrupt (%d/%d)\n", irq, HostP->Ivec);
+ func_exit();
+ return IRQ_HANDLED;
}
-static void rio_pollfunc (unsigned long data)
+static void rio_pollfunc(unsigned long data)
{
- func_enter ();
+ func_enter();
- rio_interrupt (0, &p->RIOHosts[data], NULL);
- p->RIOHosts[data].timer.expires = jiffies + rio_poll;
- add_timer (&p->RIOHosts[data].timer);
+ rio_interrupt(0, &p->RIOHosts[data], NULL);
+ p->RIOHosts[data].timer.expires = jiffies + rio_poll;
+ add_timer(&p->RIOHosts[data].timer);
- func_exit ();
+ func_exit();
}
@@ -485,106 +489,106 @@ static void rio_pollfunc (unsigned long data)
/* Ehhm. I don't know how to fiddle with interrupts on the Specialix
cards. .... Hmm. Ok I figured it out. You don't. -- REW */
-static void rio_disable_tx_interrupts (void * ptr)
+static void rio_disable_tx_interrupts(void *ptr)
{
- func_enter();
+ func_enter();
- /* port->gs.flags &= ~GS_TX_INTEN; */
+ /* port->gs.flags &= ~GS_TX_INTEN; */
- func_exit();
+ func_exit();
}
-static void rio_enable_tx_interrupts (void * ptr)
+static void rio_enable_tx_interrupts(void *ptr)
{
- struct Port *PortP = ptr;
- /* int hn; */
+ struct Port *PortP = ptr;
+ /* int hn; */
- func_enter();
+ func_enter();
- /* hn = PortP->HostP - p->RIOHosts;
+ /* hn = PortP->HostP - p->RIOHosts;
- rio_dprintk (RIO_DEBUG_TTY, "Pushing host %d\n", hn);
- rio_interrupt (-1,(void *) hn, NULL); */
+ rio_dprintk (RIO_DEBUG_TTY, "Pushing host %d\n", hn);
+ rio_interrupt (-1,(void *) hn, NULL); */
- RIOTxEnable((char *) PortP);
+ RIOTxEnable((char *) PortP);
- /*
- * In general we cannot count on "tx empty" interrupts, although
- * the interrupt routine seems to be able to tell the difference.
- */
- PortP->gs.flags &= ~GS_TX_INTEN;
+ /*
+ * In general we cannot count on "tx empty" interrupts, although
+ * the interrupt routine seems to be able to tell the difference.
+ */
+ PortP->gs.flags &= ~GS_TX_INTEN;
- func_exit();
+ func_exit();
}
-static void rio_disable_rx_interrupts (void * ptr)
+static void rio_disable_rx_interrupts(void *ptr)
{
- func_enter();
- func_exit();
+ func_enter();
+ func_exit();
}
-static void rio_enable_rx_interrupts (void * ptr)
+static void rio_enable_rx_interrupts(void *ptr)
{
- /* struct rio_port *port = ptr; */
- func_enter();
- func_exit();
+ /* struct rio_port *port = ptr; */
+ func_enter();
+ func_exit();
}
/* Jeez. Isn't this simple? */
-static int rio_get_CD (void * ptr)
+static int rio_get_CD(void *ptr)
{
- struct Port *PortP = ptr;
- int rv;
+ struct Port *PortP = ptr;
+ int rv;
- func_enter();
- rv = (PortP->ModemState & MSVR1_CD) != 0;
+ func_enter();
+ rv = (PortP->ModemState & MSVR1_CD) != 0;
+
+ rio_dprintk(RIO_DEBUG_INIT, "Getting CD status: %d\n", rv);
- rio_dprintk (RIO_DEBUG_INIT, "Getting CD status: %d\n", rv);
-
- func_exit();
- return rv;
+ func_exit();
+ return rv;
}
/* Jeez. Isn't this simple? Actually, we can sync with the actual port
by just pushing stuff into the queue going to the port... */
-static int rio_chars_in_buffer (void * ptr)
+static int rio_chars_in_buffer(void *ptr)
{
- func_enter();
+ func_enter();
- func_exit();
- return 0;
+ func_exit();
+ return 0;
}
/* Nothing special here... */
-static void rio_shutdown_port (void * ptr)
+static void rio_shutdown_port(void *ptr)
{
- struct Port *PortP;
+ struct Port *PortP;
- func_enter();
+ func_enter();
- PortP = (struct Port *)ptr;
- PortP->gs.tty = NULL;
+ PortP = (struct Port *) ptr;
+ PortP->gs.tty = NULL;
#if 0
- port->gs.flags &= ~ GS_ACTIVE;
- if (!port->gs.tty) {
- rio_dprintk (RIO_DBUG_TTY, "No tty.\n");
- return;
- }
- if (!port->gs.tty->termios) {
- rio_dprintk (RIO_DEBUG_TTY, "No termios.\n");
- return;
- }
- if (port->gs.tty->termios->c_cflag & HUPCL) {
- rio_setsignals (port, 0, 0);
- }
+ port->gs.flags &= ~GS_ACTIVE;
+ if (!port->gs.tty) {
+ rio_dprintk(RIO_DBUG_TTY, "No tty.\n");
+ return;
+ }
+ if (!port->gs.tty->termios) {
+ rio_dprintk(RIO_DEBUG_TTY, "No termios.\n");
+ return;
+ }
+ if (port->gs.tty->termios->c_cflag & HUPCL) {
+ rio_setsignals(port, 0, 0);
+ }
#endif
- func_exit();
+ func_exit();
}
@@ -595,16 +599,16 @@ static void rio_shutdown_port (void * ptr)
running minicom on a serial port that is driven by a modularized
driver. Have the modem hangup. Then remove the driver module. Then
exit minicom. I expect an "oops". -- REW */
-static void rio_hungup (void *ptr)
+static void rio_hungup(void *ptr)
{
- struct Port *PortP;
+ struct Port *PortP;
- func_enter();
-
- PortP = (struct Port *)ptr;
- PortP->gs.tty = NULL;
+ func_enter();
- func_exit ();
+ PortP = (struct Port *) ptr;
+ PortP->gs.tty = NULL;
+
+ func_exit();
}
@@ -612,146 +616,135 @@ static void rio_hungup (void *ptr)
this.
rs_close (...){save_flags;cli;real_close();dec_use_count;restore_flags;}
*/
-static void rio_close (void *ptr)
+static void rio_close(void *ptr)
{
- struct Port *PortP;
+ struct Port *PortP;
- func_enter ();
+ func_enter();
- PortP = (struct Port *)ptr;
+ PortP = (struct Port *) ptr;
- riotclose (ptr);
+ riotclose(ptr);
- if(PortP->gs.count) {
- printk (KERN_ERR "WARNING port count:%d\n", PortP->gs.count);
- PortP->gs.count = 0;
- }
+ if (PortP->gs.count) {
+ printk(KERN_ERR "WARNING port count:%d\n", PortP->gs.count);
+ PortP->gs.count = 0;
+ }
- PortP->gs.tty = NULL;
- func_exit ();
+ PortP->gs.tty = NULL;
+ func_exit();
}
-static int rio_fw_ioctl (struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int rio_fw_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
{
- int rc = 0;
- func_enter();
+ int rc = 0;
+ func_enter();
- /* The "dev" argument isn't used. */
- rc = riocontrol (p, 0, cmd, (void *)arg, capable(CAP_SYS_ADMIN));
+ /* The "dev" argument isn't used. */
+ rc = riocontrol(p, 0, cmd, (void *) arg, capable(CAP_SYS_ADMIN));
- func_exit ();
- return rc;
+ func_exit();
+ return rc;
}
-extern int RIOShortCommand(struct rio_info *p, struct Port *PortP,
- int command, int len, int arg);
+extern int RIOShortCommand(struct rio_info *p, struct Port *PortP, int command, int len, int arg);
-static int rio_ioctl (struct tty_struct * tty, struct file * filp,
- unsigned int cmd, unsigned long arg)
+static int rio_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, unsigned long arg)
{
- int rc;
- struct Port *PortP;
- int ival;
+ int rc;
+ struct Port *PortP;
+ int ival;
- func_enter();
+ func_enter();
- PortP = (struct Port *)tty->driver_data;
+ PortP = (struct Port *) tty->driver_data;
- rc = 0;
- switch (cmd) {
+ rc = 0;
+ switch (cmd) {
#if 0
- case TIOCGSOFTCAR:
- rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
- (unsigned int *) arg);
- break;
+ case TIOCGSOFTCAR:
+ rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0), (unsigned int *) arg);
+ break;
#endif
- case TIOCSSOFTCAR:
- if ((rc = get_user(ival, (unsigned int *) arg)) == 0) {
- tty->termios->c_cflag =
- (tty->termios->c_cflag & ~CLOCAL) |
- (ival ? CLOCAL : 0);
- }
- break;
- case TIOCGSERIAL:
- rc = -EFAULT;
- if (access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(struct serial_struct)))
- rc = gs_getserial(&PortP->gs, (struct serial_struct *) arg);
- break;
- case TCSBRK:
- if ( PortP->State & RIO_DELETED ) {
- rio_dprintk (RIO_DEBUG_TTY, "BREAK on deleted RTA\n");
- rc = -EIO;
- } else {
- if (RIOShortCommand(p, PortP, SBREAK, 2, 250) == RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
- rc = -EIO;
- }
- }
- break;
- case TCSBRKP:
- if ( PortP->State & RIO_DELETED ) {
- rio_dprintk (RIO_DEBUG_TTY, "BREAK on deleted RTA\n");
- rc = -EIO;
- } else {
- int l;
- l = arg?arg*100:250;
- if (l > 255) l = 255;
- if (RIOShortCommand(p, PortP, SBREAK, 2, arg?arg*100:250) == RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
- rc = -EIO;
- }
- }
- break;
- case TIOCSSERIAL:
- rc = -EFAULT;
- if (access_ok(VERIFY_READ, (void *) arg,
- sizeof(struct serial_struct)))
- rc = gs_setserial(&PortP->gs, (struct serial_struct *) arg);
- break;
+ case TIOCSSOFTCAR:
+ if ((rc = get_user(ival, (unsigned int *) arg)) == 0) {
+ tty->termios->c_cflag = (tty->termios->c_cflag & ~CLOCAL) | (ival ? CLOCAL : 0);
+ }
+ break;
+ case TIOCGSERIAL:
+ rc = -EFAULT;
+ if (access_ok(VERIFY_WRITE, (void *) arg, sizeof(struct serial_struct)))
+ rc = gs_getserial(&PortP->gs, (struct serial_struct *) arg);
+ break;
+ case TCSBRK:
+ if (PortP->State & RIO_DELETED) {
+ rio_dprintk(RIO_DEBUG_TTY, "BREAK on deleted RTA\n");
+ rc = -EIO;
+ } else {
+ if (RIOShortCommand(p, PortP, SBREAK, 2, 250) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
+ rc = -EIO;
+ }
+ }
+ break;
+ case TCSBRKP:
+ if (PortP->State & RIO_DELETED) {
+ rio_dprintk(RIO_DEBUG_TTY, "BREAK on deleted RTA\n");
+ rc = -EIO;
+ } else {
+ int l;
+ l = arg ? arg * 100 : 250;
+ if (l > 255)
+ l = 255;
+ if (RIOShortCommand(p, PortP, SBREAK, 2, arg ? arg * 100 : 250) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
+ rc = -EIO;
+ }
+ }
+ break;
+ case TIOCSSERIAL:
+ rc = -EFAULT;
+ if (access_ok(VERIFY_READ, (void *) arg, sizeof(struct serial_struct)))
+ rc = gs_setserial(&PortP->gs, (struct serial_struct *) arg);
+ break;
#if 0
- /*
- * note: these IOCTLs no longer reach here. Use
- * tiocmset/tiocmget driver methods instead. The
- * #if 0 disablement predates this comment.
- */
- case TIOCMGET:
- rc = -EFAULT;
- if (access_ok(VERIFY_WRITE, (void *) arg,
- sizeof(unsigned int))) {
- rc = 0;
- ival = rio_getsignals(port);
- put_user(ival, (unsigned int *) arg);
- }
- break;
- case TIOCMBIS:
- if ((rc = get_user(ival, (unsigned int *) arg)) == 0) {
- rio_setsignals(port, ((ival & TIOCM_DTR) ? 1 : -1),
- ((ival & TIOCM_RTS) ? 1 : -1));
- }
- break;
- case TIOCMBIC:
- if ((rc = get_user(ival, (unsigned int *) arg)) == 0) {
- rio_setsignals(port, ((ival & TIOCM_DTR) ? 0 : -1),
- ((ival & TIOCM_RTS) ? 0 : -1));
- }
- break;
- case TIOCMSET:
- if ((rc = get_user(ival, (unsigned int *) arg)) == 0) {
- rio_setsignals(port, ((ival & TIOCM_DTR) ? 1 : 0),
- ((ival & TIOCM_RTS) ? 1 : 0));
- }
- break;
+ /*
+ * note: these IOCTLs no longer reach here. Use
+ * tiocmset/tiocmget driver methods instead. The
+ * #if 0 disablement predates this comment.
+ */
+ case TIOCMGET:
+ rc = -EFAULT;
+ if (access_ok(VERIFY_WRITE, (void *) arg, sizeof(unsigned int))) {
+ rc = 0;
+ ival = rio_getsignals(port);
+ put_user(ival, (unsigned int *) arg);
+ }
+ break;
+ case TIOCMBIS:
+ if ((rc = get_user(ival, (unsigned int *) arg)) == 0) {
+ rio_setsignals(port, ((ival & TIOCM_DTR) ? 1 : -1), ((ival & TIOCM_RTS) ? 1 : -1));
+ }
+ break;
+ case TIOCMBIC:
+ if ((rc = get_user(ival, (unsigned int *) arg)) == 0) {
+ rio_setsignals(port, ((ival & TIOCM_DTR) ? 0 : -1), ((ival & TIOCM_RTS) ? 0 : -1));
+ }
+ break;
+ case TIOCMSET:
+ if ((rc = get_user(ival, (unsigned int *) arg)) == 0) {
+ rio_setsignals(port, ((ival & TIOCM_DTR) ? 1 : 0), ((ival & TIOCM_RTS) ? 1 : 0));
+ }
+ break;
#endif
- default:
- rc = -ENOIOCTLCMD;
- break;
- }
- func_exit();
- return rc;
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+ func_exit();
+ return rc;
}
@@ -771,37 +764,37 @@ static int rio_ioctl (struct tty_struct * tty, struct file * filp,
* flow control scheme is in use for that port. -- Simon Allen
*/
-static void rio_throttle (struct tty_struct * tty)
+static void rio_throttle(struct tty_struct *tty)
{
- struct Port *port = (struct Port *)tty->driver_data;
-
- func_enter();
- /* If the port is using any type of input flow
- * control then throttle the port.
- */
-
- if((tty->termios->c_cflag & CRTSCTS) || (I_IXOFF(tty)) ) {
- port->State |= RIO_THROTTLE_RX;
- }
-
- func_exit();
+ struct Port *port = (struct Port *) tty->driver_data;
+
+ func_enter();
+ /* If the port is using any type of input flow
+ * control then throttle the port.
+ */
+
+ if ((tty->termios->c_cflag & CRTSCTS) || (I_IXOFF(tty))) {
+ port->State |= RIO_THROTTLE_RX;
+ }
+
+ func_exit();
}
-static void rio_unthrottle (struct tty_struct * tty)
+static void rio_unthrottle(struct tty_struct *tty)
{
- struct Port *port = (struct Port *)tty->driver_data;
+ struct Port *port = (struct Port *) tty->driver_data;
- func_enter();
- /* Always unthrottle even if flow control is not enabled on
- * this port in case we disabled flow control while the port
- * was throttled
- */
+ func_enter();
+ /* Always unthrottle even if flow control is not enabled on
+ * this port in case we disabled flow control while the port
+ * was throttled
+ */
- port->State &= ~RIO_THROTTLE_RX;
+ port->State &= ~RIO_THROTTLE_RX;
- func_exit();
- return;
+ func_exit();
+ return;
}
@@ -813,35 +806,34 @@ static void rio_unthrottle (struct tty_struct * tty)
* ********************************************************************** */
-static struct vpd_prom *get_VPD_PROM (struct Host *hp)
+static struct vpd_prom *get_VPD_PROM(struct Host *hp)
{
- static struct vpd_prom vpdp;
- char *p;
- int i;
+ static struct vpd_prom vpdp;
+ char *p;
+ int i;
+
+ func_enter();
+ rio_dprintk(RIO_DEBUG_PROBE, "Going to verify vpd prom at %p.\n", hp->Caddr + RIO_VPD_ROM);
- func_enter();
- rio_dprintk (RIO_DEBUG_PROBE, "Going to verify vpd prom at %p.\n",
- hp->Caddr + RIO_VPD_ROM);
+ p = (char *) &vpdp;
+ for (i = 0; i < sizeof(struct vpd_prom); i++)
+ *p++ = readb(hp->Caddr + RIO_VPD_ROM + i * 2);
+ /* read_rio_byte (hp, RIO_VPD_ROM + i*2); */
- p = (char *) &vpdp;
- for (i=0;i< sizeof (struct vpd_prom);i++)
- *p++ = readb (hp->Caddr+RIO_VPD_ROM + i*2);
- /* read_rio_byte (hp, RIO_VPD_ROM + i*2); */
+ /* Terminate the identifier string.
+ *** requires one extra byte in struct vpd_prom *** */
+ *p++ = 0;
- /* Terminate the identifier string.
- *** requires one extra byte in struct vpd_prom *** */
- *p++=0;
+ if (rio_debug & RIO_DEBUG_PROBE)
+ my_hd((char *) &vpdp, 0x20);
- if (rio_debug & RIO_DEBUG_PROBE)
- my_hd ((char *)&vpdp, 0x20);
-
- func_exit();
+ func_exit();
- return &vpdp;
+ return &vpdp;
}
static struct tty_operations rio_ops = {
- .open = riotopen,
+ .open = riotopen,
.close = gs_close,
.write = gs_write,
.put_char = gs_put_char,
@@ -893,7 +885,7 @@ static int rio_init_drivers(void)
rio_driver2->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(rio_driver2, &rio_ops);
- rio_dprintk (RIO_DEBUG_INIT, "set_termios = %p\n", gs_set_termios);
+ rio_dprintk(RIO_DEBUG_INIT, "set_termios = %p\n", gs_set_termios);
if ((error = tty_register_driver(rio_driver)))
goto out2;
@@ -901,116 +893,111 @@ static int rio_init_drivers(void)
goto out3;
func_exit();
return 0;
-out3:
+ out3:
tty_unregister_driver(rio_driver);
-out2:
+ out2:
put_tty_driver(rio_driver2);
-out1:
+ out1:
put_tty_driver(rio_driver);
-out:
- printk(KERN_ERR "rio: Couldn't register a rio driver, error = %d\n",
- error);
+ out:
+ printk(KERN_ERR "rio: Couldn't register a rio driver, error = %d\n", error);
return 1;
}
-static void * ckmalloc (int size)
+static void *ckmalloc(int size)
{
- void *p;
+ void *p;
- p = kmalloc(size, GFP_KERNEL);
- if (p)
- memset(p, 0, size);
- return p;
+ p = kmalloc(size, GFP_KERNEL);
+ if (p)
+ memset(p, 0, size);
+ return p;
}
-static int rio_init_datastructures (void)
+static int rio_init_datastructures(void)
{
- int i;
- struct Port *port;
- func_enter();
-
- /* Many drivers statically allocate the maximum number of ports
- There is no reason not to allocate them dynamically. Is there? -- REW */
- /* However, the RIO driver allows users to configure their first
- RTA as the ports numbered 504-511. We therefore need to allocate
- the whole range. :-( -- REW */
-
+ int i;
+ struct Port *port;
+ func_enter();
+
+ /* Many drivers statically allocate the maximum number of ports
+ There is no reason not to allocate them dynamically. Is there? -- REW */
+ /* However, the RIO driver allows users to configure their first
+ RTA as the ports numbered 504-511. We therefore need to allocate
+ the whole range. :-( -- REW */
+
#define RI_SZ sizeof(struct rio_info)
#define HOST_SZ sizeof(struct Host)
#define PORT_SZ sizeof(struct Port *)
#define TMIO_SZ sizeof(struct termios *)
- rio_dprintk (RIO_DEBUG_INIT, "getting : %d %d %d %d %d bytes\n",
- RI_SZ,
- RIO_HOSTS * HOST_SZ,
- RIO_PORTS * PORT_SZ,
- RIO_PORTS * TMIO_SZ,
- RIO_PORTS * TMIO_SZ);
-
- if (!(p = ckmalloc ( RI_SZ))) goto free0;
- if (!(p->RIOHosts = ckmalloc (RIO_HOSTS * HOST_SZ))) goto free1;
- if (!(p->RIOPortp = ckmalloc (RIO_PORTS * PORT_SZ))) goto free2;
- p->RIOConf = RIOConf;
- rio_dprintk (RIO_DEBUG_INIT, "Got : %p %p %p\n",
- p, p->RIOHosts, p->RIOPortp);
+ rio_dprintk(RIO_DEBUG_INIT, "getting : %d %d %d %d %d bytes\n", RI_SZ, RIO_HOSTS * HOST_SZ, RIO_PORTS * PORT_SZ, RIO_PORTS * TMIO_SZ, RIO_PORTS * TMIO_SZ);
+
+ if (!(p = ckmalloc(RI_SZ)))
+ goto free0;
+ if (!(p->RIOHosts = ckmalloc(RIO_HOSTS * HOST_SZ)))
+ goto free1;
+ if (!(p->RIOPortp = ckmalloc(RIO_PORTS * PORT_SZ)))
+ goto free2;
+ p->RIOConf = RIOConf;
+ rio_dprintk(RIO_DEBUG_INIT, "Got : %p %p %p\n", p, p->RIOHosts, p->RIOPortp);
#if 1
- for (i = 0; i < RIO_PORTS; i++) {
- port = p->RIOPortp[i] = ckmalloc (sizeof (struct Port));
- if (!port) {
- goto free6;
- }
- rio_dprintk (RIO_DEBUG_INIT, "initing port %d (%d)\n", i, port->Mapped);
- port->PortNum = i;
- port->gs.magic = RIO_MAGIC;
- port->gs.close_delay = HZ/2;
- port->gs.closing_wait = 30 * HZ;
- port->gs.rd = &rio_real_driver;
- spin_lock_init(&port->portSem);
- /*
- * Initializing wait queue
- */
- init_waitqueue_head(&port->gs.open_wait);
- init_waitqueue_head(&port->gs.close_wait);
- }
+ for (i = 0; i < RIO_PORTS; i++) {
+ port = p->RIOPortp[i] = ckmalloc(sizeof(struct Port));
+ if (!port) {
+ goto free6;
+ }
+ rio_dprintk(RIO_DEBUG_INIT, "initing port %d (%d)\n", i, port->Mapped);
+ port->PortNum = i;
+ port->gs.magic = RIO_MAGIC;
+ port->gs.close_delay = HZ / 2;
+ port->gs.closing_wait = 30 * HZ;
+ port->gs.rd = &rio_real_driver;
+ spin_lock_init(&port->portSem);
+ /*
+ * Initializing wait queue
+ */
+ init_waitqueue_head(&port->gs.open_wait);
+ init_waitqueue_head(&port->gs.close_wait);
+ }
#else
- /* We could postpone initializing them to when they are configured. */
+ /* We could postpone initializing them to when they are configured. */
#endif
-
- if (rio_debug & RIO_DEBUG_INIT) {
- my_hd (&rio_real_driver, sizeof (rio_real_driver));
- }
-
- func_exit();
- return 0;
+ if (rio_debug & RIO_DEBUG_INIT) {
+ my_hd(&rio_real_driver, sizeof(rio_real_driver));
+ }
+
+
+ func_exit();
+ return 0;
- free6:for (i--;i>=0;i--)
- kfree (p->RIOPortp[i]);
+ free6:for (i--; i >= 0; i--)
+ kfree(p->RIOPortp[i]);
/*free5:
free4:
- free3:*/kfree (p->RIOPortp);
- free2:kfree (p->RIOHosts);
- free1:
- rio_dprintk (RIO_DEBUG_INIT, "Not enough memory! %p %p %p\n",
- p, p->RIOHosts, p->RIOPortp);
- kfree(p);
- free0:
- return -ENOMEM;
+ free3:*/ kfree(p->RIOPortp);
+ free2:kfree(p->RIOHosts);
+ free1:
+ rio_dprintk(RIO_DEBUG_INIT, "Not enough memory! %p %p %p\n", p, p->RIOHosts, p->RIOPortp);
+ kfree(p);
+ free0:
+ return -ENOMEM;
}
-static void __exit rio_release_drivers(void)
+static void __exit rio_release_drivers(void)
{
- func_enter();
- tty_unregister_driver(rio_driver2);
- tty_unregister_driver(rio_driver);
- put_tty_driver(rio_driver2);
- put_tty_driver(rio_driver);
- func_exit();
+ func_enter();
+ tty_unregister_driver(rio_driver2);
+ tty_unregister_driver(rio_driver);
+ put_tty_driver(rio_driver2);
+ put_tty_driver(rio_driver);
+ func_exit();
}
@@ -1021,7 +1008,7 @@ static void __exit rio_release_drivers(void)
There is another bit besides Bit 17. Turning that bit off
(on boards shipped with the fix in the eeprom) results in a
hang on the next access to the card.
- */
+ */
/********************************************************
* Setting bit 17 in the CNTRL register of the PLX 9050 *
@@ -1034,319 +1021,293 @@ static void __exit rio_release_drivers(void)
EEprom. As the bit is read/write for the CPU, we can fix it here,
if we detect that it isn't set correctly. -- REW */
-static void fix_rio_pci (struct pci_dev *pdev)
+static void fix_rio_pci(struct pci_dev *pdev)
{
- unsigned int hwbase;
- unsigned long rebase;
- unsigned int t;
+ unsigned int hwbase;
+ unsigned long rebase;
+ unsigned int t;
#define CNTRL_REG_OFFSET 0x50
#define CNTRL_REG_GOODVALUE 0x18260000
- pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &hwbase);
- hwbase &= PCI_BASE_ADDRESS_MEM_MASK;
- rebase = (ulong) ioremap(hwbase, 0x80);
- t = readl (rebase + CNTRL_REG_OFFSET);
- if (t != CNTRL_REG_GOODVALUE) {
- printk (KERN_DEBUG "rio: performing cntrl reg fix: %08x -> %08x\n",
- t, CNTRL_REG_GOODVALUE);
- writel (CNTRL_REG_GOODVALUE, rebase + CNTRL_REG_OFFSET);
- }
- iounmap((char*) rebase);
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &hwbase);
+ hwbase &= PCI_BASE_ADDRESS_MEM_MASK;
+ rebase = (ulong) ioremap(hwbase, 0x80);
+ t = readl(rebase + CNTRL_REG_OFFSET);
+ if (t != CNTRL_REG_GOODVALUE) {
+ printk(KERN_DEBUG "rio: performing cntrl reg fix: %08x -> %08x\n", t, CNTRL_REG_GOODVALUE);
+ writel(CNTRL_REG_GOODVALUE, rebase + CNTRL_REG_OFFSET);
+ }
+ iounmap((char *) rebase);
}
#endif
-static int __init rio_init(void)
+static int __init rio_init(void)
{
- int found = 0;
- int i;
- struct Host *hp;
- int retval;
- struct vpd_prom *vpdp;
- int okboard;
+ int found = 0;
+ int i;
+ struct Host *hp;
+ int retval;
+ struct vpd_prom *vpdp;
+ int okboard;
#ifdef CONFIG_PCI
- struct pci_dev *pdev = NULL;
- unsigned int tint;
- unsigned short tshort;
+ struct pci_dev *pdev = NULL;
+ unsigned int tint;
+ unsigned short tshort;
#endif
- func_enter();
- rio_dprintk (RIO_DEBUG_INIT, "Initing rio module... (rio_debug=%d)\n",
- rio_debug);
-
- if (abs ((long) (&rio_debug) - rio_debug) < 0x10000) {
- printk (KERN_WARNING "rio: rio_debug is an address, instead of a value. "
- "Assuming -1. Was %x/%p.\n", rio_debug, &rio_debug);
- rio_debug=-1;
- }
-
- if (misc_register(&rio_fw_device) < 0) {
- printk(KERN_ERR "RIO: Unable to register firmware loader driver.\n");
- return -EIO;
- }
-
- retval = rio_init_datastructures ();
- if (retval < 0) {
- misc_deregister(&rio_fw_device);
- return retval;
- }
-
+ func_enter();
+ rio_dprintk(RIO_DEBUG_INIT, "Initing rio module... (rio_debug=%d)\n", rio_debug);
+
+ if (abs((long) (&rio_debug) - rio_debug) < 0x10000) {
+ printk(KERN_WARNING "rio: rio_debug is an address, instead of a value. " "Assuming -1. Was %x/%p.\n", rio_debug, &rio_debug);
+ rio_debug = -1;
+ }
+
+ if (misc_register(&rio_fw_device) < 0) {
+ printk(KERN_ERR "RIO: Unable to register firmware loader driver.\n");
+ return -EIO;
+ }
+
+ retval = rio_init_datastructures();
+ if (retval < 0) {
+ misc_deregister(&rio_fw_device);
+ return retval;
+ }
#ifdef CONFIG_PCI
- /* First look for the JET devices: */
- while ((pdev = pci_get_device (PCI_VENDOR_ID_SPECIALIX,
- PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8,
- pdev))) {
- if (pci_enable_device(pdev)) continue;
-
- /* Specialix has a whole bunch of cards with
- 0x2000 as the device ID. They say its because
- the standard requires it. Stupid standard. */
- /* It seems that reading a word doesn't work reliably on 2.0.
- Also, reading a non-aligned dword doesn't work. So we read the
- whole dword at 0x2c and extract the word at 0x2e (SUBSYSTEM_ID)
- ourselves */
- /* I don't know why the define doesn't work, constant 0x2c does --REW */
- pci_read_config_dword (pdev, 0x2c, &tint);
- tshort = (tint >> 16) & 0xffff;
- rio_dprintk (RIO_DEBUG_PROBE, "Got a specialix card: %x.\n", tint);
- if (tshort != 0x0100) {
- rio_dprintk (RIO_DEBUG_PROBE, "But it's not a RIO card (%d)...\n",
- tshort);
- continue;
- }
- rio_dprintk (RIO_DEBUG_PROBE, "cp1\n");
-
- pci_read_config_dword(pdev, PCI_BASE_ADDRESS_2, &tint);
-
- hp = &p->RIOHosts[p->RIONumHosts];
- hp->PaddrP = tint & PCI_BASE_ADDRESS_MEM_MASK;
- hp->Ivec = pdev->irq;
- if (((1 << hp->Ivec) & rio_irqmask) == 0)
- hp->Ivec = 0;
- hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN);
- hp->CardP = (struct DpRam *) hp->Caddr;
- hp->Type = RIO_PCI;
- hp->Copy = rio_pcicopy;
- hp->Mode = RIO_PCI_BOOT_FROM_RAM;
- spin_lock_init(&hp->HostLock);
- rio_reset_interrupt (hp);
- rio_start_card_running (hp);
-
- rio_dprintk (RIO_DEBUG_PROBE, "Going to test it (%p/%p).\n",
- (void *)p->RIOHosts[p->RIONumHosts].PaddrP,
- p->RIOHosts[p->RIONumHosts].Caddr);
- if (RIOBoardTest( p->RIOHosts[p->RIONumHosts].PaddrP,
- p->RIOHosts[p->RIONumHosts].Caddr,
- RIO_PCI, 0 ) == RIO_SUCCESS) {
- rio_dprintk (RIO_DEBUG_INIT, "Done RIOBoardTest\n");
- WBYTE(p->RIOHosts[p->RIONumHosts].ResetInt, 0xff);
- p->RIOHosts[p->RIONumHosts].UniqueNum =
- ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[0]) &0xFF)<< 0)|
- ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[1]) &0xFF)<< 8)|
- ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[2]) &0xFF)<<16)|
- ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[3]) &0xFF)<<24);
- rio_dprintk (RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n",
- p->RIOHosts[p->RIONumHosts].UniqueNum);
-
- fix_rio_pci (pdev);
- p->RIOLastPCISearch = RIO_SUCCESS;
- p->RIONumHosts++;
- found++;
- } else {
- iounmap((char*) (p->RIOHosts[p->RIONumHosts].Caddr));
- }
- }
-
- /* Then look for the older PCI card.... : */
-
- /* These older PCI cards have problems (only byte-mode access is
- supported), which makes them a bit awkward to support.
- They also have problems sharing interrupts. Be careful.
- (The driver now refuses to share interrupts for these
- cards. This should be sufficient).
- */
-
- /* Then look for the older RIO/PCI devices: */
- while ((pdev = pci_get_device (PCI_VENDOR_ID_SPECIALIX,
- PCI_DEVICE_ID_SPECIALIX_RIO,
- pdev))) {
- if (pci_enable_device(pdev)) continue;
+ /* First look for the JET devices: */
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8, pdev))) {
+ if (pci_enable_device(pdev))
+ continue;
+
+ /* Specialix has a whole bunch of cards with
+ 0x2000 as the device ID. They say its because
+ the standard requires it. Stupid standard. */
+ /* It seems that reading a word doesn't work reliably on 2.0.
+ Also, reading a non-aligned dword doesn't work. So we read the
+ whole dword at 0x2c and extract the word at 0x2e (SUBSYSTEM_ID)
+ ourselves */
+ /* I don't know why the define doesn't work, constant 0x2c does --REW */
+ pci_read_config_dword(pdev, 0x2c, &tint);
+ tshort = (tint >> 16) & 0xffff;
+ rio_dprintk(RIO_DEBUG_PROBE, "Got a specialix card: %x.\n", tint);
+ if (tshort != 0x0100) {
+ rio_dprintk(RIO_DEBUG_PROBE, "But it's not a RIO card (%d)...\n", tshort);
+ continue;
+ }
+ rio_dprintk(RIO_DEBUG_PROBE, "cp1\n");
+
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_2, &tint);
+
+ hp = &p->RIOHosts[p->RIONumHosts];
+ hp->PaddrP = tint & PCI_BASE_ADDRESS_MEM_MASK;
+ hp->Ivec = pdev->irq;
+ if (((1 << hp->Ivec) & rio_irqmask) == 0)
+ hp->Ivec = 0;
+ hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN);
+ hp->CardP = (struct DpRam *) hp->Caddr;
+ hp->Type = RIO_PCI;
+ hp->Copy = rio_pcicopy;
+ hp->Mode = RIO_PCI_BOOT_FROM_RAM;
+ spin_lock_init(&hp->HostLock);
+ rio_reset_interrupt(hp);
+ rio_start_card_running(hp);
+
+ rio_dprintk(RIO_DEBUG_PROBE, "Going to test it (%p/%p).\n", (void *) p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr);
+ if (RIOBoardTest(p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr, RIO_PCI, 0) == RIO_SUCCESS) {
+ rio_dprintk(RIO_DEBUG_INIT, "Done RIOBoardTest\n");
+ WBYTE(p->RIOHosts[p->RIONumHosts].ResetInt, 0xff);
+ p->RIOHosts[p->RIONumHosts].UniqueNum =
+ ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[0]) & 0xFF) << 0) |
+ ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[1]) & 0xFF) << 8) | ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[2]) & 0xFF) << 16) | ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[3]) & 0xFF) << 24);
+ rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n", p->RIOHosts[p->RIONumHosts].UniqueNum);
+
+ fix_rio_pci(pdev);
+ p->RIOLastPCISearch = RIO_SUCCESS;
+ p->RIONumHosts++;
+ found++;
+ } else {
+ iounmap((char *) (p->RIOHosts[p->RIONumHosts].Caddr));
+ }
+ }
+
+ /* Then look for the older PCI card.... : */
+
+ /* These older PCI cards have problems (only byte-mode access is
+ supported), which makes them a bit awkward to support.
+ They also have problems sharing interrupts. Be careful.
+ (The driver now refuses to share interrupts for these
+ cards. This should be sufficient).
+ */
+
+ /* Then look for the older RIO/PCI devices: */
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_RIO, pdev))) {
+ if (pci_enable_device(pdev))
+ continue;
#ifdef CONFIG_RIO_OLDPCI
- pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &tint);
-
- hp = &p->RIOHosts[p->RIONumHosts];
- hp->PaddrP = tint & PCI_BASE_ADDRESS_MEM_MASK;
- hp->Ivec = pdev->irq;
- if (((1 << hp->Ivec) & rio_irqmask) == 0)
- hp->Ivec = 0;
- hp->Ivec |= 0x8000; /* Mark as non-sharable */
- hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN);
- hp->CardP = (struct DpRam *) hp->Caddr;
- hp->Type = RIO_PCI;
- hp->Copy = rio_pcicopy;
- hp->Mode = RIO_PCI_BOOT_FROM_RAM;
- spin_lock_init(&hp->HostLock);
-
- rio_dprintk (RIO_DEBUG_PROBE, "Ivec: %x\n", hp->Ivec);
- rio_dprintk (RIO_DEBUG_PROBE, "Mode: %x\n", hp->Mode);
-
- rio_reset_interrupt (hp);
- rio_start_card_running (hp);
- rio_dprintk (RIO_DEBUG_PROBE, "Going to test it (%p/%p).\n",
- (void *)p->RIOHosts[p->RIONumHosts].PaddrP,
- p->RIOHosts[p->RIONumHosts].Caddr);
- if (RIOBoardTest( p->RIOHosts[p->RIONumHosts].PaddrP,
- p->RIOHosts[p->RIONumHosts].Caddr,
- RIO_PCI, 0 ) == RIO_SUCCESS) {
- WBYTE(p->RIOHosts[p->RIONumHosts].ResetInt, 0xff);
- p->RIOHosts[p->RIONumHosts].UniqueNum =
- ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[0]) &0xFF)<< 0)|
- ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[1]) &0xFF)<< 8)|
- ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[2]) &0xFF)<<16)|
- ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[3]) &0xFF)<<24);
- rio_dprintk (RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n",
- p->RIOHosts[p->RIONumHosts].UniqueNum);
-
- p->RIOLastPCISearch = RIO_SUCCESS;
- p->RIONumHosts++;
- found++;
- } else {
- iounmap((char*) (p->RIOHosts[p->RIONumHosts].Caddr));
- }
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &tint);
+
+ hp = &p->RIOHosts[p->RIONumHosts];
+ hp->PaddrP = tint & PCI_BASE_ADDRESS_MEM_MASK;
+ hp->Ivec = pdev->irq;
+ if (((1 << hp->Ivec) & rio_irqmask) == 0)
+ hp->Ivec = 0;
+ hp->Ivec |= 0x8000; /* Mark as non-sharable */
+ hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN);
+ hp->CardP = (struct DpRam *) hp->Caddr;
+ hp->Type = RIO_PCI;
+ hp->Copy = rio_pcicopy;
+ hp->Mode = RIO_PCI_BOOT_FROM_RAM;
+ spin_lock_init(&hp->HostLock);
+
+ rio_dprintk(RIO_DEBUG_PROBE, "Ivec: %x\n", hp->Ivec);
+ rio_dprintk(RIO_DEBUG_PROBE, "Mode: %x\n", hp->Mode);
+
+ rio_reset_interrupt(hp);
+ rio_start_card_running(hp);
+ rio_dprintk(RIO_DEBUG_PROBE, "Going to test it (%p/%p).\n", (void *) p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr);
+ if (RIOBoardTest(p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr, RIO_PCI, 0) == RIO_SUCCESS) {
+ WBYTE(p->RIOHosts[p->RIONumHosts].ResetInt, 0xff);
+ p->RIOHosts[p->RIONumHosts].UniqueNum =
+ ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[0]) & 0xFF) << 0) |
+ ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[1]) & 0xFF) << 8) | ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[2]) & 0xFF) << 16) | ((RBYTE(p->RIOHosts[p->RIONumHosts].Unique[3]) & 0xFF) << 24);
+ rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n", p->RIOHosts[p->RIONumHosts].UniqueNum);
+
+ p->RIOLastPCISearch = RIO_SUCCESS;
+ p->RIONumHosts++;
+ found++;
+ } else {
+ iounmap((char *) (p->RIOHosts[p->RIONumHosts].Caddr));
+ }
#else
- printk (KERN_ERR "Found an older RIO PCI card, but the driver is not "
- "compiled to support it.\n");
+ printk(KERN_ERR "Found an older RIO PCI card, but the driver is not " "compiled to support it.\n");
#endif
- }
-#endif /* PCI */
-
- /* Now probe for ISA cards... */
- for (i=0;i<NR_RIO_ADDRS;i++) {
- hp = &p->RIOHosts[p->RIONumHosts];
- hp->PaddrP = rio_probe_addrs[i];
- /* There was something about the IRQs of these cards. 'Forget what.--REW */
- hp->Ivec = 0;
- hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN);
- hp->CardP = (struct DpRam *) hp->Caddr;
- hp->Type = RIO_AT;
- hp->Copy = rio_pcicopy; /* AT card PCI???? - PVDL
- * -- YES! this is now a normal copy. Only the
- * old PCI card uses the special PCI copy.
- * Moreover, the ISA card will work with the
- * special PCI copy anyway. -- REW */
- hp->Mode = 0;
- spin_lock_init(&hp->HostLock);
-
- vpdp = get_VPD_PROM (hp);
- rio_dprintk (RIO_DEBUG_PROBE, "Got VPD ROM\n");
- okboard = 0;
- if ((strncmp (vpdp->identifier, RIO_ISA_IDENT, 16) == 0) ||
- (strncmp (vpdp->identifier, RIO_ISA2_IDENT, 16) == 0) ||
- (strncmp (vpdp->identifier, RIO_ISA3_IDENT, 16) == 0)) {
- /* Board is present... */
- if (RIOBoardTest (hp->PaddrP,
- hp->Caddr, RIO_AT, 0) == RIO_SUCCESS) {
- /* ... and feeling fine!!!! */
- rio_dprintk (RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n",
- p->RIOHosts[p->RIONumHosts].UniqueNum);
- if (RIOAssignAT(p, hp->PaddrP, hp->Caddr, 0)) {
- rio_dprintk (RIO_DEBUG_PROBE, "Hmm Tested ok, host%d uniqid = %x.\n",
- p->RIONumHosts,
- p->RIOHosts[p->RIONumHosts-1].UniqueNum);
- okboard++;
- found++;
- }
- }
-
- if (!okboard)
- iounmap ((char*) (hp->Caddr));
- }
- }
-
-
- for (i=0;i<p->RIONumHosts;i++) {
- hp = &p->RIOHosts[i];
- if (hp->Ivec) {
- int mode = SA_SHIRQ;
- if (hp->Ivec & 0x8000) {mode = 0; hp->Ivec &= 0x7fff;}
- rio_dprintk (RIO_DEBUG_INIT, "Requesting interrupt hp: %p rio_interrupt: %d Mode: %x\n", hp,hp->Ivec, hp->Mode);
- retval = request_irq (hp->Ivec, rio_interrupt, mode, "rio", hp);
- rio_dprintk (RIO_DEBUG_INIT, "Return value from request_irq: %d\n", retval);
- if (retval) {
- printk(KERN_ERR "rio: Cannot allocate irq %d.\n", hp->Ivec);
- hp->Ivec = 0;
- }
- rio_dprintk (RIO_DEBUG_INIT, "Got irq %d.\n", hp->Ivec);
- if (hp->Ivec != 0){
- rio_dprintk (RIO_DEBUG_INIT, "Enabling interrupts on rio card.\n");
- hp->Mode |= RIO_PCI_INT_ENABLE;
- } else
- hp->Mode &= !RIO_PCI_INT_ENABLE;
- rio_dprintk (RIO_DEBUG_INIT, "New Mode: %x\n", hp->Mode);
- rio_start_card_running (hp);
- }
- /* Init the timer "always" to make sure that it can safely be
- deleted when we unload... */
-
- init_timer (&hp->timer);
- if (!hp->Ivec) {
- rio_dprintk (RIO_DEBUG_INIT, "Starting polling at %dj intervals.\n",
- rio_poll);
- hp->timer.data = i;
- hp->timer.function = rio_pollfunc;
- hp->timer.expires = jiffies + rio_poll;
- add_timer (&hp->timer);
- }
- }
-
- if (found) {
- rio_dprintk (RIO_DEBUG_INIT, "rio: total of %d boards detected.\n", found);
- rio_init_drivers ();
- } else {
- /* deregister the misc device we created earlier */
- misc_deregister(&rio_fw_device);
- }
-
- func_exit();
- return found?0:-EIO;
+ }
+#endif /* PCI */
+
+ /* Now probe for ISA cards... */
+ for (i = 0; i < NR_RIO_ADDRS; i++) {
+ hp = &p->RIOHosts[p->RIONumHosts];
+ hp->PaddrP = rio_probe_addrs[i];
+ /* There was something about the IRQs of these cards. 'Forget what.--REW */
+ hp->Ivec = 0;
+ hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN);
+ hp->CardP = (struct DpRam *) hp->Caddr;
+ hp->Type = RIO_AT;
+ hp->Copy = rio_pcicopy; /* AT card PCI???? - PVDL
+ * -- YES! this is now a normal copy. Only the
+ * old PCI card uses the special PCI copy.
+ * Moreover, the ISA card will work with the
+ * special PCI copy anyway. -- REW */
+ hp->Mode = 0;
+ spin_lock_init(&hp->HostLock);
+
+ vpdp = get_VPD_PROM(hp);
+ rio_dprintk(RIO_DEBUG_PROBE, "Got VPD ROM\n");
+ okboard = 0;
+ if ((strncmp(vpdp->identifier, RIO_ISA_IDENT, 16) == 0) || (strncmp(vpdp->identifier, RIO_ISA2_IDENT, 16) == 0) || (strncmp(vpdp->identifier, RIO_ISA3_IDENT, 16) == 0)) {
+ /* Board is present... */
+ if (RIOBoardTest(hp->PaddrP, hp->Caddr, RIO_AT, 0) == RIO_SUCCESS) {
+ /* ... and feeling fine!!!! */
+ rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n", p->RIOHosts[p->RIONumHosts].UniqueNum);
+ if (RIOAssignAT(p, hp->PaddrP, hp->Caddr, 0)) {
+ rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, host%d uniqid = %x.\n", p->RIONumHosts, p->RIOHosts[p->RIONumHosts - 1].UniqueNum);
+ okboard++;
+ found++;
+ }
+ }
+
+ if (!okboard)
+ iounmap((char *) (hp->Caddr));
+ }
+ }
+
+
+ for (i = 0; i < p->RIONumHosts; i++) {
+ hp = &p->RIOHosts[i];
+ if (hp->Ivec) {
+ int mode = SA_SHIRQ;
+ if (hp->Ivec & 0x8000) {
+ mode = 0;
+ hp->Ivec &= 0x7fff;
+ }
+ rio_dprintk(RIO_DEBUG_INIT, "Requesting interrupt hp: %p rio_interrupt: %d Mode: %x\n", hp, hp->Ivec, hp->Mode);
+ retval = request_irq(hp->Ivec, rio_interrupt, mode, "rio", hp);
+ rio_dprintk(RIO_DEBUG_INIT, "Return value from request_irq: %d\n", retval);
+ if (retval) {
+ printk(KERN_ERR "rio: Cannot allocate irq %d.\n", hp->Ivec);
+ hp->Ivec = 0;
+ }
+ rio_dprintk(RIO_DEBUG_INIT, "Got irq %d.\n", hp->Ivec);
+ if (hp->Ivec != 0) {
+ rio_dprintk(RIO_DEBUG_INIT, "Enabling interrupts on rio card.\n");
+ hp->Mode |= RIO_PCI_INT_ENABLE;
+ } else
+ hp->Mode &= !RIO_PCI_INT_ENABLE;
+ rio_dprintk(RIO_DEBUG_INIT, "New Mode: %x\n", hp->Mode);
+ rio_start_card_running(hp);
+ }
+ /* Init the timer "always" to make sure that it can safely be
+ deleted when we unload... */
+
+ init_timer(&hp->timer);
+ if (!hp->Ivec) {
+ rio_dprintk(RIO_DEBUG_INIT, "Starting polling at %dj intervals.\n", rio_poll);
+ hp->timer.data = i;
+ hp->timer.function = rio_pollfunc;
+ hp->timer.expires = jiffies + rio_poll;
+ add_timer(&hp->timer);
+ }
+ }
+
+ if (found) {
+ rio_dprintk(RIO_DEBUG_INIT, "rio: total of %d boards detected.\n", found);
+ rio_init_drivers();
+ } else {
+ /* deregister the misc device we created earlier */
+ misc_deregister(&rio_fw_device);
+ }
+
+ func_exit();
+ return found ? 0 : -EIO;
}
-static void __exit rio_exit (void)
+static void __exit rio_exit(void)
{
- int i;
- struct Host *hp;
-
- func_enter();
+ int i;
+ struct Host *hp;
+
+ func_enter();
- for (i=0,hp=p->RIOHosts;i<p->RIONumHosts;i++, hp++) {
- RIOHostReset (hp->Type, hp->CardP, hp->Slot);
- if (hp->Ivec) {
- free_irq (hp->Ivec, hp);
- rio_dprintk (RIO_DEBUG_INIT, "freed irq %d.\n", hp->Ivec);
- }
- /* It is safe/allowed to del_timer a non-active timer */
- del_timer (&hp->timer);
- }
+ for (i = 0, hp = p->RIOHosts; i < p->RIONumHosts; i++, hp++) {
+ RIOHostReset(hp->Type, hp->CardP, hp->Slot);
+ if (hp->Ivec) {
+ free_irq(hp->Ivec, hp);
+ rio_dprintk(RIO_DEBUG_INIT, "freed irq %d.\n", hp->Ivec);
+ }
+ /* It is safe/allowed to del_timer a non-active timer */
+ del_timer(&hp->timer);
+ }
- if (misc_deregister(&rio_fw_device) < 0) {
- printk (KERN_INFO "rio: couldn't deregister control-device\n");
- }
+ if (misc_deregister(&rio_fw_device) < 0) {
+ printk(KERN_INFO "rio: couldn't deregister control-device\n");
+ }
- rio_dprintk (RIO_DEBUG_CLEANUP, "Cleaning up drivers\n");
+ rio_dprintk(RIO_DEBUG_CLEANUP, "Cleaning up drivers\n");
- rio_release_drivers ();
+ rio_release_drivers();
- /* Release dynamically allocated memory */
- kfree (p->RIOPortp);
- kfree (p->RIOHosts);
- kfree (p);
+ /* Release dynamically allocated memory */
+ kfree(p->RIOPortp);
+ kfree(p->RIOHosts);
+ kfree(p);
- func_exit();
+ func_exit();
}
module_init(rio_init);
@@ -1372,4 +1333,3 @@ module_exit(rio_exit);
* tab-width: 8
* End:
*/
-
diff --git a/drivers/char/rio/rio_linux.h b/drivers/char/rio/rio_linux.h
index 1fba19d5b66..4ce77fb1fae 100644
--- a/drivers/char/rio/rio_linux.h
+++ b/drivers/char/rio/rio_linux.h
@@ -37,15 +37,15 @@
struct vpd_prom {
- unsigned short id;
- char hwrev;
- char hwass;
- int uniqid;
- char myear;
- char mweek;
- char hw_feature[5];
- char oem_id;
- char identifier[16];
+ unsigned short id;
+ char hwrev;
+ char hwass;
+ int uniqid;
+ char myear;
+ char mweek;
+ char hw_feature[5];
+ char oem_id;
+ char identifier[16];
};
@@ -75,13 +75,13 @@ struct vpd_prom {
(L_ISIG(tty)))
-#endif /* __KERNEL__ */
+#endif /* __KERNEL__ */
#define RIO_BOARD_INTR_LOCK 1
-#ifndef RIOCTL_MISC_MINOR
+#ifndef RIOCTL_MISC_MINOR
/* Allow others to gather this into "major.h" or something like that */
#define RIOCTL_MISC_MINOR 169
#endif
@@ -121,39 +121,39 @@ struct vpd_prom {
spin_unlock_irqrestore(sem, flags)
#define rio_spin_lock(sem) \
- spin_lock(sem)
+ spin_lock(sem)
#define rio_spin_unlock(sem) \
- spin_unlock(sem)
+ spin_unlock(sem)
#endif
#ifdef CONFIG_RIO_OLDPCI
-static inline void *rio_memcpy_toio (void *dummy, void *dest, void *source, int n)
+static inline void *rio_memcpy_toio(void *dummy, void *dest, void *source, int n)
{
- char *dst = dest;
- char *src = source;
+ char *dst = dest;
+ char *src = source;
- while (n--) {
- writeb (*src++, dst++);
- (void) readb (dummy);
- }
+ while (n--) {
+ writeb(*src++, dst++);
+ (void) readb(dummy);
+ }
- return dest;
+ return dest;
}
-static inline void *rio_memcpy_fromio (void *dest, void *source, int n)
+static inline void *rio_memcpy_fromio(void *dest, void *source, int n)
{
- char *dst = dest;
- char *src = source;
+ char *dst = dest;
+ char *src = source;
- while (n--)
- *dst++ = readb (src++);
+ while (n--)
+ *dst++ = readb(src++);
- return dest;
+ return dest;
}
#else
@@ -179,9 +179,8 @@ static inline void *rio_memcpy_fromio (void *dest, void *source, int n)
#define func_exit() rio_dprintk (RIO_DEBUG_FLOW, "rio: exit %s\n", __FUNCTION__)
#define func_enter2() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s (port %d)\n",__FUNCTION__, port->line)
#else
-#define rio_dprintk(f, str...) /* nothing */
+#define rio_dprintk(f, str...) /* nothing */
#define func_enter()
#define func_exit()
#define func_enter2()
#endif
-
diff --git a/drivers/char/rio/rioboard.h b/drivers/char/rio/rioboard.h
index cc6ac6a98f6..822c071a693 100644
--- a/drivers/char/rio/rioboard.h
+++ b/drivers/char/rio/rioboard.h
@@ -35,7 +35,7 @@
*/
-#ifndef _rioboard_h /* If RIOBOARD.H not already defined */
+#ifndef _rioboard_h /* If RIOBOARD.H not already defined */
#define _rioboard_h 1
/*****************************************************************************
@@ -46,7 +46,7 @@
/* Hardware Registers... */
-#define RIO_REG_BASE 0x7C00 /* Base of control registers */
+#define RIO_REG_BASE 0x7C00 /* Base of control registers */
#define RIO_CONFIG RIO_REG_BASE + 0x0000 /* WRITE: Configuration Register */
#define RIO_INTSET RIO_REG_BASE + 0x0080 /* WRITE: Interrupt Set */
@@ -58,30 +58,30 @@
#define RIO_RESETSTAT RIO_REG_BASE + 0x0100 /* READ: Reset Status (Jet boards only) */
/* RIO_VPD_ROM definitions... */
-#define VPD_SLX_ID1 0x00 /* READ: Specialix Identifier #1 */
-#define VPD_SLX_ID2 0x01 /* READ: Specialix Identifier #2 */
-#define VPD_HW_REV 0x02 /* READ: Hardware Revision */
-#define VPD_HW_ASSEM 0x03 /* READ: Hardware Assembly Level */
-#define VPD_UNIQUEID4 0x04 /* READ: Unique Identifier #4 */
-#define VPD_UNIQUEID3 0x05 /* READ: Unique Identifier #3 */
-#define VPD_UNIQUEID2 0x06 /* READ: Unique Identifier #2 */
-#define VPD_UNIQUEID1 0x07 /* READ: Unique Identifier #1 */
-#define VPD_MANU_YEAR 0x08 /* READ: Year Of Manufacture (0 = 1970) */
-#define VPD_MANU_WEEK 0x09 /* READ: Week Of Manufacture (0 = week 1 Jan) */
-#define VPD_HWFEATURE1 0x0A /* READ: Hardware Feature Byte 1 */
-#define VPD_HWFEATURE2 0x0B /* READ: Hardware Feature Byte 2 */
-#define VPD_HWFEATURE3 0x0C /* READ: Hardware Feature Byte 3 */
-#define VPD_HWFEATURE4 0x0D /* READ: Hardware Feature Byte 4 */
-#define VPD_HWFEATURE5 0x0E /* READ: Hardware Feature Byte 5 */
-#define VPD_OEMID 0x0F /* READ: OEM Identifier */
-#define VPD_IDENT 0x10 /* READ: Identifier string (16 bytes) */
+#define VPD_SLX_ID1 0x00 /* READ: Specialix Identifier #1 */
+#define VPD_SLX_ID2 0x01 /* READ: Specialix Identifier #2 */
+#define VPD_HW_REV 0x02 /* READ: Hardware Revision */
+#define VPD_HW_ASSEM 0x03 /* READ: Hardware Assembly Level */
+#define VPD_UNIQUEID4 0x04 /* READ: Unique Identifier #4 */
+#define VPD_UNIQUEID3 0x05 /* READ: Unique Identifier #3 */
+#define VPD_UNIQUEID2 0x06 /* READ: Unique Identifier #2 */
+#define VPD_UNIQUEID1 0x07 /* READ: Unique Identifier #1 */
+#define VPD_MANU_YEAR 0x08 /* READ: Year Of Manufacture (0 = 1970) */
+#define VPD_MANU_WEEK 0x09 /* READ: Week Of Manufacture (0 = week 1 Jan) */
+#define VPD_HWFEATURE1 0x0A /* READ: Hardware Feature Byte 1 */
+#define VPD_HWFEATURE2 0x0B /* READ: Hardware Feature Byte 2 */
+#define VPD_HWFEATURE3 0x0C /* READ: Hardware Feature Byte 3 */
+#define VPD_HWFEATURE4 0x0D /* READ: Hardware Feature Byte 4 */
+#define VPD_HWFEATURE5 0x0E /* READ: Hardware Feature Byte 5 */
+#define VPD_OEMID 0x0F /* READ: OEM Identifier */
+#define VPD_IDENT 0x10 /* READ: Identifier string (16 bytes) */
#define VPD_IDENT_LEN 0x10
/* VPD ROM Definitions... */
#define SLX_ID1 0x4D
#define SLX_ID2 0x98
-#define PRODUCT_ID(a) ((a>>4)&0xF) /* Use to obtain Product ID from VPD_UNIQUEID1 */
+#define PRODUCT_ID(a) ((a>>4)&0xF) /* Use to obtain Product ID from VPD_UNIQUEID1 */
#define ID_SX_ISA 0x2
#define ID_RIO_EISA 0x3
@@ -101,7 +101,7 @@
/* Firmware load position... */
-#define FIRMWARELOADADDR 0x7C00 /* Firmware is loaded _before_ this address */
+#define FIRMWARELOADADDR 0x7C00 /* Firmware is loaded _before_ this address */
/*****************************************************************************
***************************** *****************************
@@ -112,14 +112,14 @@
/* Control Register Definitions... */
#define RIO_ISA_IDENT "JBJGPGGHINSMJPJR"
-#define RIO_ISA_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_ISA_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_ISA_CFG_IRQMASK 0x30 /* Interrupt mask */
-#define RIO_ISA_CFG_IRQ12 0x10 /* Interrupt Level 12 */
-#define RIO_ISA_CFG_IRQ11 0x20 /* Interrupt Level 11 */
-#define RIO_ISA_CFG_IRQ9 0x30 /* Interrupt Level 9 */
-#define RIO_ISA_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
-#define RIO_ISA_CFG_WAITSTATE0 0x80 /* 0 waitstates, else 1 */
+#define RIO_ISA_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
+#define RIO_ISA_CFG_BUSENABLE 0x02 /* Enable processor bus */
+#define RIO_ISA_CFG_IRQMASK 0x30 /* Interrupt mask */
+#define RIO_ISA_CFG_IRQ12 0x10 /* Interrupt Level 12 */
+#define RIO_ISA_CFG_IRQ11 0x20 /* Interrupt Level 11 */
+#define RIO_ISA_CFG_IRQ9 0x30 /* Interrupt Level 9 */
+#define RIO_ISA_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
+#define RIO_ISA_CFG_WAITSTATE0 0x80 /* 0 waitstates, else 1 */
/*****************************************************************************
***************************** *****************************
@@ -130,17 +130,17 @@
/* Control Register Definitions... */
#define RIO_ISA2_IDENT "JBJGPGGHINSMJPJR"
-#define RIO_ISA2_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_ISA2_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_ISA2_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
-#define RIO_ISA2_CFG_16BIT 0x08 /* 16bit mode, else 8bit */
-#define RIO_ISA2_CFG_IRQMASK 0x30 /* Interrupt mask */
-#define RIO_ISA2_CFG_IRQ15 0x00 /* Interrupt Level 15 */
-#define RIO_ISA2_CFG_IRQ12 0x10 /* Interrupt Level 12 */
-#define RIO_ISA2_CFG_IRQ11 0x20 /* Interrupt Level 11 */
-#define RIO_ISA2_CFG_IRQ9 0x30 /* Interrupt Level 9 */
-#define RIO_ISA2_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
-#define RIO_ISA2_CFG_WAITSTATE0 0x80 /* 0 waitstates, else 1 */
+#define RIO_ISA2_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
+#define RIO_ISA2_CFG_BUSENABLE 0x02 /* Enable processor bus */
+#define RIO_ISA2_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
+#define RIO_ISA2_CFG_16BIT 0x08 /* 16bit mode, else 8bit */
+#define RIO_ISA2_CFG_IRQMASK 0x30 /* Interrupt mask */
+#define RIO_ISA2_CFG_IRQ15 0x00 /* Interrupt Level 15 */
+#define RIO_ISA2_CFG_IRQ12 0x10 /* Interrupt Level 12 */
+#define RIO_ISA2_CFG_IRQ11 0x20 /* Interrupt Level 11 */
+#define RIO_ISA2_CFG_IRQ9 0x30 /* Interrupt Level 9 */
+#define RIO_ISA2_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
+#define RIO_ISA2_CFG_WAITSTATE0 0x80 /* 0 waitstates, else 1 */
/*****************************************************************************
***************************** ******************************
@@ -151,14 +151,14 @@
/* Control Register Definitions... */
#define RIO_ISA3_IDENT "JET HOST BY KEV#"
-#define RIO_ISA3_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_ISA3_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
-#define RIO_ISA32_CFG_IRQMASK 0xF30 /* Interrupt mask */
-#define RIO_ISA3_CFG_IRQ15 0xF0 /* Interrupt Level 15 */
-#define RIO_ISA3_CFG_IRQ12 0xC0 /* Interrupt Level 12 */
-#define RIO_ISA3_CFG_IRQ11 0xB0 /* Interrupt Level 11 */
-#define RIO_ISA3_CFG_IRQ10 0xA0 /* Interrupt Level 10 */
-#define RIO_ISA3_CFG_IRQ9 0x90 /* Interrupt Level 9 */
+#define RIO_ISA3_CFG_BUSENABLE 0x02 /* Enable processor bus */
+#define RIO_ISA3_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
+#define RIO_ISA32_CFG_IRQMASK 0xF30 /* Interrupt mask */
+#define RIO_ISA3_CFG_IRQ15 0xF0 /* Interrupt Level 15 */
+#define RIO_ISA3_CFG_IRQ12 0xC0 /* Interrupt Level 12 */
+#define RIO_ISA3_CFG_IRQ11 0xB0 /* Interrupt Level 11 */
+#define RIO_ISA3_CFG_IRQ10 0xA0 /* Interrupt Level 10 */
+#define RIO_ISA3_CFG_IRQ9 0x90 /* Interrupt Level 9 */
/*****************************************************************************
********************************* ********************************
@@ -169,9 +169,9 @@
/* Control Register Definitions... */
#define RIO_MCA_IDENT "JBJGPGGHINSMJPJR"
-#define RIO_MCA_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_MCA_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_MCA_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
+#define RIO_MCA_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
+#define RIO_MCA_CFG_BUSENABLE 0x02 /* Enable processor bus */
+#define RIO_MCA_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
/*****************************************************************************
******************************** ********************************
@@ -185,35 +185,35 @@
#define EISA_PRODUCT_NUMBER 0xC82
#define EISA_REVISION_NUMBER 0xC83
#define EISA_CARD_ENABLE 0xC84
-#define EISA_VPD_UNIQUEID4 0xC88 /* READ: Unique Identifier #4 */
-#define EISA_VPD_UNIQUEID3 0xC8A /* READ: Unique Identifier #3 */
-#define EISA_VPD_UNIQUEID2 0xC90 /* READ: Unique Identifier #2 */
-#define EISA_VPD_UNIQUEID1 0xC92 /* READ: Unique Identifier #1 */
-#define EISA_VPD_MANU_YEAR 0xC98 /* READ: Year Of Manufacture (0 = 1970) */
-#define EISA_VPD_MANU_WEEK 0xC9A /* READ: Week Of Manufacture (0 = week 1 Jan) */
+#define EISA_VPD_UNIQUEID4 0xC88 /* READ: Unique Identifier #4 */
+#define EISA_VPD_UNIQUEID3 0xC8A /* READ: Unique Identifier #3 */
+#define EISA_VPD_UNIQUEID2 0xC90 /* READ: Unique Identifier #2 */
+#define EISA_VPD_UNIQUEID1 0xC92 /* READ: Unique Identifier #1 */
+#define EISA_VPD_MANU_YEAR 0xC98 /* READ: Year Of Manufacture (0 = 1970) */
+#define EISA_VPD_MANU_WEEK 0xC9A /* READ: Week Of Manufacture (0 = week 1 Jan) */
#define EISA_MEM_ADDR_23_16 0xC00
#define EISA_MEM_ADDR_31_24 0xC01
-#define EISA_RIO_CONFIG 0xC02 /* WRITE: Configuration Register */
-#define EISA_RIO_INTSET 0xC03 /* WRITE: Interrupt Set */
-#define EISA_RIO_INTRESET 0xC03 /* READ: Interrupt Reset */
+#define EISA_RIO_CONFIG 0xC02 /* WRITE: Configuration Register */
+#define EISA_RIO_INTSET 0xC03 /* WRITE: Interrupt Set */
+#define EISA_RIO_INTRESET 0xC03 /* READ: Interrupt Reset */
/* Control Register Definitions... */
-#define RIO_EISA_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_EISA_CFG_LINK20 0x02 /* 20Mbps link, else 10Mbps */
-#define RIO_EISA_CFG_BUSENABLE 0x04 /* Enable processor bus */
-#define RIO_EISA_CFG_PROCRUN 0x08 /* Processor running, else reset */
-#define RIO_EISA_CFG_IRQMASK 0xF0 /* Interrupt mask */
-#define RIO_EISA_CFG_IRQ15 0xF0 /* Interrupt Level 15 */
-#define RIO_EISA_CFG_IRQ14 0xE0 /* Interrupt Level 14 */
-#define RIO_EISA_CFG_IRQ12 0xC0 /* Interrupt Level 12 */
-#define RIO_EISA_CFG_IRQ11 0xB0 /* Interrupt Level 11 */
-#define RIO_EISA_CFG_IRQ10 0xA0 /* Interrupt Level 10 */
-#define RIO_EISA_CFG_IRQ9 0x90 /* Interrupt Level 9 */
-#define RIO_EISA_CFG_IRQ7 0x70 /* Interrupt Level 7 */
-#define RIO_EISA_CFG_IRQ6 0x60 /* Interrupt Level 6 */
-#define RIO_EISA_CFG_IRQ5 0x50 /* Interrupt Level 5 */
-#define RIO_EISA_CFG_IRQ4 0x40 /* Interrupt Level 4 */
-#define RIO_EISA_CFG_IRQ3 0x30 /* Interrupt Level 3 */
+#define RIO_EISA_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
+#define RIO_EISA_CFG_LINK20 0x02 /* 20Mbps link, else 10Mbps */
+#define RIO_EISA_CFG_BUSENABLE 0x04 /* Enable processor bus */
+#define RIO_EISA_CFG_PROCRUN 0x08 /* Processor running, else reset */
+#define RIO_EISA_CFG_IRQMASK 0xF0 /* Interrupt mask */
+#define RIO_EISA_CFG_IRQ15 0xF0 /* Interrupt Level 15 */
+#define RIO_EISA_CFG_IRQ14 0xE0 /* Interrupt Level 14 */
+#define RIO_EISA_CFG_IRQ12 0xC0 /* Interrupt Level 12 */
+#define RIO_EISA_CFG_IRQ11 0xB0 /* Interrupt Level 11 */
+#define RIO_EISA_CFG_IRQ10 0xA0 /* Interrupt Level 10 */
+#define RIO_EISA_CFG_IRQ9 0x90 /* Interrupt Level 9 */
+#define RIO_EISA_CFG_IRQ7 0x70 /* Interrupt Level 7 */
+#define RIO_EISA_CFG_IRQ6 0x60 /* Interrupt Level 6 */
+#define RIO_EISA_CFG_IRQ5 0x50 /* Interrupt Level 5 */
+#define RIO_EISA_CFG_IRQ4 0x40 /* Interrupt Level 4 */
+#define RIO_EISA_CFG_IRQ3 0x30 /* Interrupt Level 3 */
/*****************************************************************************
******************************** ********************************
@@ -224,20 +224,20 @@
/* Control Register Definitions... */
#define RIO_SBUS_IDENT "JBPGK#\0\0\0\0\0\0\0\0\0\0"
-#define RIO_SBUS_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_SBUS_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_SBUS_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
-#define RIO_SBUS_CFG_IRQMASK 0x38 /* Interrupt mask */
-#define RIO_SBUS_CFG_IRQNONE 0x00 /* No Interrupt */
-#define RIO_SBUS_CFG_IRQ7 0x38 /* Interrupt Level 7 */
-#define RIO_SBUS_CFG_IRQ6 0x30 /* Interrupt Level 6 */
-#define RIO_SBUS_CFG_IRQ5 0x28 /* Interrupt Level 5 */
-#define RIO_SBUS_CFG_IRQ4 0x20 /* Interrupt Level 4 */
-#define RIO_SBUS_CFG_IRQ3 0x18 /* Interrupt Level 3 */
-#define RIO_SBUS_CFG_IRQ2 0x10 /* Interrupt Level 2 */
-#define RIO_SBUS_CFG_IRQ1 0x08 /* Interrupt Level 1 */
-#define RIO_SBUS_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
-#define RIO_SBUS_CFG_PROC25 0x80 /* 25Mhz processor clock, else 20Mhz */
+#define RIO_SBUS_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
+#define RIO_SBUS_CFG_BUSENABLE 0x02 /* Enable processor bus */
+#define RIO_SBUS_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
+#define RIO_SBUS_CFG_IRQMASK 0x38 /* Interrupt mask */
+#define RIO_SBUS_CFG_IRQNONE 0x00 /* No Interrupt */
+#define RIO_SBUS_CFG_IRQ7 0x38 /* Interrupt Level 7 */
+#define RIO_SBUS_CFG_IRQ6 0x30 /* Interrupt Level 6 */
+#define RIO_SBUS_CFG_IRQ5 0x28 /* Interrupt Level 5 */
+#define RIO_SBUS_CFG_IRQ4 0x20 /* Interrupt Level 4 */
+#define RIO_SBUS_CFG_IRQ3 0x18 /* Interrupt Level 3 */
+#define RIO_SBUS_CFG_IRQ2 0x10 /* Interrupt Level 2 */
+#define RIO_SBUS_CFG_IRQ1 0x08 /* Interrupt Level 1 */
+#define RIO_SBUS_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
+#define RIO_SBUS_CFG_PROC25 0x80 /* 25Mhz processor clock, else 20Mhz */
/*****************************************************************************
********************************* ********************************
@@ -248,18 +248,18 @@
/* Control Register Definitions... */
#define RIO_PCI_IDENT "ECDDPGJGJHJRGSK#"
-#define RIO_PCI_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_PCI_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_PCI_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
-#define RIO_PCI_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
-#define RIO_PCI_CFG_PROC25 0x80 /* 25Mhz processor clock, else 20Mhz */
+#define RIO_PCI_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
+#define RIO_PCI_CFG_BUSENABLE 0x02 /* Enable processor bus */
+#define RIO_PCI_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
+#define RIO_PCI_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
+#define RIO_PCI_CFG_PROC25 0x80 /* 25Mhz processor clock, else 20Mhz */
/* PCI Definitions... */
-#define SPX_VENDOR_ID 0x11CB /* Assigned by the PCI SIG */
-#define SPX_DEVICE_ID 0x8000 /* RIO bridge boards */
-#define SPX_PLXDEVICE_ID 0x2000 /* PLX bridge boards */
+#define SPX_VENDOR_ID 0x11CB /* Assigned by the PCI SIG */
+#define SPX_DEVICE_ID 0x8000 /* RIO bridge boards */
+#define SPX_PLXDEVICE_ID 0x2000 /* PLX bridge boards */
#define SPX_SUB_VENDOR_ID SPX_VENDOR_ID /* Same as vendor id */
-#define RIO_SUB_SYS_ID 0x0800 /* RIO PCI board */
+#define RIO_SUB_SYS_ID 0x0800 /* RIO PCI board */
/*****************************************************************************
***************************** ******************************
@@ -270,11 +270,11 @@
/* Control Register Definitions... */
#define RIO_PCI2_IDENT "JET HOST BY KEV#"
-#define RIO_PCI2_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_PCI2_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
+#define RIO_PCI2_CFG_BUSENABLE 0x02 /* Enable processor bus */
+#define RIO_PCI2_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
/* PCI Definitions... */
-#define RIO2_SUB_SYS_ID 0x0100 /* RIO (Jet) PCI board */
+#define RIO2_SUB_SYS_ID 0x0100 /* RIO (Jet) PCI board */
#endif /*_rioboard_h */
diff --git a/drivers/char/rio/riocmd.c b/drivers/char/rio/riocmd.c
index 533085ec6f1..b97dd9fdb6b 100644
--- a/drivers/char/rio/riocmd.c
+++ b/drivers/char/rio/riocmd.c
@@ -83,102 +83,98 @@ static char *_riocmd_c_sccs_ = "@(#)riocmd.c 1.2";
static struct IdentifyRta IdRta;
static struct KillNeighbour KillUnit;
-int
-RIOFoadRta(struct Host *HostP, struct Map *MapP)
+int RIOFoadRta(struct Host *HostP, struct Map *MapP)
{
struct CmdBlk *CmdBlkP;
- rio_dprintk (RIO_DEBUG_CMD, "FOAD RTA\n");
+ rio_dprintk(RIO_DEBUG_CMD, "FOAD RTA\n");
CmdBlkP = RIOGetCmdBlk();
- if ( !CmdBlkP ) {
- rio_dprintk (RIO_DEBUG_CMD, "FOAD RTA: GetCmdBlk failed\n");
+ if (!CmdBlkP) {
+ rio_dprintk(RIO_DEBUG_CMD, "FOAD RTA: GetCmdBlk failed\n");
return -ENXIO;
}
CmdBlkP->Packet.dest_unit = MapP->ID;
CmdBlkP->Packet.dest_port = BOOT_RUP;
- CmdBlkP->Packet.src_unit = 0;
- CmdBlkP->Packet.src_port = BOOT_RUP;
- CmdBlkP->Packet.len = 0x84;
- CmdBlkP->Packet.data[0] = IFOAD;
- CmdBlkP->Packet.data[1] = 0;
- CmdBlkP->Packet.data[2] = IFOAD_MAGIC & 0xFF;
- CmdBlkP->Packet.data[3] = (IFOAD_MAGIC >> 8) & 0xFF;
-
- if ( RIOQueueCmdBlk( HostP, MapP->ID-1, CmdBlkP) == RIO_FAIL ) {
- rio_dprintk (RIO_DEBUG_CMD, "FOAD RTA: Failed to queue foad command\n");
+ CmdBlkP->Packet.src_unit = 0;
+ CmdBlkP->Packet.src_port = BOOT_RUP;
+ CmdBlkP->Packet.len = 0x84;
+ CmdBlkP->Packet.data[0] = IFOAD;
+ CmdBlkP->Packet.data[1] = 0;
+ CmdBlkP->Packet.data[2] = IFOAD_MAGIC & 0xFF;
+ CmdBlkP->Packet.data[3] = (IFOAD_MAGIC >> 8) & 0xFF;
+
+ if (RIOQueueCmdBlk(HostP, MapP->ID - 1, CmdBlkP) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_CMD, "FOAD RTA: Failed to queue foad command\n");
return -EIO;
}
return 0;
}
-int
-RIOZombieRta(struct Host *HostP, struct Map *MapP)
+int RIOZombieRta(struct Host *HostP, struct Map *MapP)
{
struct CmdBlk *CmdBlkP;
- rio_dprintk (RIO_DEBUG_CMD, "ZOMBIE RTA\n");
+ rio_dprintk(RIO_DEBUG_CMD, "ZOMBIE RTA\n");
CmdBlkP = RIOGetCmdBlk();
- if ( !CmdBlkP ) {
- rio_dprintk (RIO_DEBUG_CMD, "ZOMBIE RTA: GetCmdBlk failed\n");
+ if (!CmdBlkP) {
+ rio_dprintk(RIO_DEBUG_CMD, "ZOMBIE RTA: GetCmdBlk failed\n");
return -ENXIO;
}
CmdBlkP->Packet.dest_unit = MapP->ID;
CmdBlkP->Packet.dest_port = BOOT_RUP;
- CmdBlkP->Packet.src_unit = 0;
- CmdBlkP->Packet.src_port = BOOT_RUP;
- CmdBlkP->Packet.len = 0x84;
- CmdBlkP->Packet.data[0] = ZOMBIE;
- CmdBlkP->Packet.data[1] = 0;
- CmdBlkP->Packet.data[2] = ZOMBIE_MAGIC & 0xFF;
- CmdBlkP->Packet.data[3] = (ZOMBIE_MAGIC >> 8) & 0xFF;
-
- if ( RIOQueueCmdBlk( HostP, MapP->ID-1, CmdBlkP) == RIO_FAIL ) {
- rio_dprintk (RIO_DEBUG_CMD, "ZOMBIE RTA: Failed to queue zombie command\n");
+ CmdBlkP->Packet.src_unit = 0;
+ CmdBlkP->Packet.src_port = BOOT_RUP;
+ CmdBlkP->Packet.len = 0x84;
+ CmdBlkP->Packet.data[0] = ZOMBIE;
+ CmdBlkP->Packet.data[1] = 0;
+ CmdBlkP->Packet.data[2] = ZOMBIE_MAGIC & 0xFF;
+ CmdBlkP->Packet.data[3] = (ZOMBIE_MAGIC >> 8) & 0xFF;
+
+ if (RIOQueueCmdBlk(HostP, MapP->ID - 1, CmdBlkP) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_CMD, "ZOMBIE RTA: Failed to queue zombie command\n");
return -EIO;
}
return 0;
}
-int
-RIOCommandRta(struct rio_info *p, uint RtaUnique,
- int (* func)(struct Host *HostP, struct Map *MapP))
+int RIOCommandRta(struct rio_info *p, uint RtaUnique, int (*func) (struct Host * HostP, struct Map * MapP))
{
uint Host;
- rio_dprintk (RIO_DEBUG_CMD, "Command RTA 0x%x func 0x%x\n", RtaUnique, (int)func);
+ rio_dprintk(RIO_DEBUG_CMD, "Command RTA 0x%x func 0x%x\n", RtaUnique, (int) func);
- if ( !RtaUnique )
- return(0);
+ if (!RtaUnique)
+ return (0);
- for ( Host = 0; Host < p->RIONumHosts; Host++ ) {
+ for (Host = 0; Host < p->RIONumHosts; Host++) {
uint Rta;
struct Host *HostP = &p->RIOHosts[Host];
- for ( Rta = 0; Rta < RTAS_PER_HOST; Rta++ ) {
+ for (Rta = 0; Rta < RTAS_PER_HOST; Rta++) {
struct Map *MapP = &HostP->Mapping[Rta];
- if ( MapP->RtaUniqueNum == RtaUnique ) {
+ if (MapP->RtaUniqueNum == RtaUnique) {
uint Link;
/*
- ** now, lets just check we have a route to it...
- ** IF the routing stuff is working, then one of the
- ** topology entries for this unit will have a legit
- ** route *somewhere*. We care not where - if its got
- ** any connections, we can get to it.
- */
- for ( Link = 0; Link < LINKS_PER_UNIT; Link++ ) {
- if ( MapP->Topology[Link].Unit <= (uchar)MAX_RUP ) {
+ ** now, lets just check we have a route to it...
+ ** IF the routing stuff is working, then one of the
+ ** topology entries for this unit will have a legit
+ ** route *somewhere*. We care not where - if its got
+ ** any connections, we can get to it.
+ */
+ for (Link = 0; Link < LINKS_PER_UNIT; Link++) {
+ if (MapP->Topology[Link].Unit <= (uchar) MAX_RUP) {
/*
- ** Its worth trying the operation...
- */
- return (*func)( HostP, MapP );
+ ** Its worth trying the operation...
+ */
+ return (*func) (HostP, MapP);
}
}
}
@@ -188,60 +184,59 @@ RIOCommandRta(struct rio_info *p, uint RtaUnique,
}
-int
-RIOIdentifyRta(struct rio_info *p, caddr_t arg)
+int RIOIdentifyRta(struct rio_info *p, caddr_t arg)
{
uint Host;
- if ( copyin( (int)arg, (caddr_t)&IdRta, sizeof(IdRta) ) == COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CMD, "RIO_IDENTIFY_RTA copy failed\n");
+ if (copyin((int) arg, (caddr_t) & IdRta, sizeof(IdRta)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CMD, "RIO_IDENTIFY_RTA copy failed\n");
p->RIOError.Error = COPYIN_FAILED;
return -EFAULT;
}
- for ( Host = 0 ; Host < p->RIONumHosts; Host++ ) {
+ for (Host = 0; Host < p->RIONumHosts; Host++) {
uint Rta;
struct Host *HostP = &p->RIOHosts[Host];
- for ( Rta = 0; Rta < RTAS_PER_HOST; Rta++ ) {
+ for (Rta = 0; Rta < RTAS_PER_HOST; Rta++) {
struct Map *MapP = &HostP->Mapping[Rta];
- if ( MapP->RtaUniqueNum == IdRta.RtaUnique ) {
+ if (MapP->RtaUniqueNum == IdRta.RtaUnique) {
uint Link;
/*
- ** now, lets just check we have a route to it...
- ** IF the routing stuff is working, then one of the
- ** topology entries for this unit will have a legit
- ** route *somewhere*. We care not where - if its got
- ** any connections, we can get to it.
- */
- for ( Link = 0; Link < LINKS_PER_UNIT; Link++ ) {
- if ( MapP->Topology[Link].Unit <= (uchar)MAX_RUP ) {
+ ** now, lets just check we have a route to it...
+ ** IF the routing stuff is working, then one of the
+ ** topology entries for this unit will have a legit
+ ** route *somewhere*. We care not where - if its got
+ ** any connections, we can get to it.
+ */
+ for (Link = 0; Link < LINKS_PER_UNIT; Link++) {
+ if (MapP->Topology[Link].Unit <= (uchar) MAX_RUP) {
/*
- ** Its worth trying the operation...
- */
+ ** Its worth trying the operation...
+ */
struct CmdBlk *CmdBlkP;
- rio_dprintk (RIO_DEBUG_CMD, "IDENTIFY RTA\n");
+ rio_dprintk(RIO_DEBUG_CMD, "IDENTIFY RTA\n");
CmdBlkP = RIOGetCmdBlk();
- if ( !CmdBlkP ) {
- rio_dprintk (RIO_DEBUG_CMD, "IDENTIFY RTA: GetCmdBlk failed\n");
+ if (!CmdBlkP) {
+ rio_dprintk(RIO_DEBUG_CMD, "IDENTIFY RTA: GetCmdBlk failed\n");
return -ENXIO;
}
-
+
CmdBlkP->Packet.dest_unit = MapP->ID;
CmdBlkP->Packet.dest_port = BOOT_RUP;
- CmdBlkP->Packet.src_unit = 0;
- CmdBlkP->Packet.src_port = BOOT_RUP;
- CmdBlkP->Packet.len = 0x84;
- CmdBlkP->Packet.data[0] = IDENTIFY;
- CmdBlkP->Packet.data[1] = 0;
- CmdBlkP->Packet.data[2] = IdRta.ID;
-
- if ( RIOQueueCmdBlk( HostP, MapP->ID-1, CmdBlkP) == RIO_FAIL ) {
- rio_dprintk (RIO_DEBUG_CMD, "IDENTIFY RTA: Failed to queue command\n");
+ CmdBlkP->Packet.src_unit = 0;
+ CmdBlkP->Packet.src_port = BOOT_RUP;
+ CmdBlkP->Packet.len = 0x84;
+ CmdBlkP->Packet.data[0] = IDENTIFY;
+ CmdBlkP->Packet.data[1] = 0;
+ CmdBlkP->Packet.data[2] = IdRta.ID;
+
+ if (RIOQueueCmdBlk(HostP, MapP->ID - 1, CmdBlkP) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_CMD, "IDENTIFY RTA: Failed to queue command\n");
return -EIO;
}
return 0;
@@ -249,114 +244,110 @@ RIOIdentifyRta(struct rio_info *p, caddr_t arg)
}
}
}
- }
+ }
return -ENOENT;
}
-int
-RIOKillNeighbour(struct rio_info *p, caddr_t arg)
+int RIOKillNeighbour(struct rio_info *p, caddr_t arg)
{
uint Host;
uint ID;
struct Host *HostP;
struct CmdBlk *CmdBlkP;
- rio_dprintk (RIO_DEBUG_CMD, "KILL HOST NEIGHBOUR\n");
+ rio_dprintk(RIO_DEBUG_CMD, "KILL HOST NEIGHBOUR\n");
- if ( copyin( (int)arg, (caddr_t)&KillUnit, sizeof(KillUnit) ) == COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CMD, "RIO_KILL_NEIGHBOUR copy failed\n");
+ if (copyin((int) arg, (caddr_t) & KillUnit, sizeof(KillUnit)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CMD, "RIO_KILL_NEIGHBOUR copy failed\n");
p->RIOError.Error = COPYIN_FAILED;
return -EFAULT;
}
- if ( KillUnit.Link > 3 )
+ if (KillUnit.Link > 3)
return -ENXIO;
-
+
CmdBlkP = RIOGetCmdBlk();
- if ( !CmdBlkP ) {
- rio_dprintk (RIO_DEBUG_CMD, "UFOAD: GetCmdBlk failed\n");
+ if (!CmdBlkP) {
+ rio_dprintk(RIO_DEBUG_CMD, "UFOAD: GetCmdBlk failed\n");
return -ENXIO;
}
CmdBlkP->Packet.dest_unit = 0;
- CmdBlkP->Packet.src_unit = 0;
+ CmdBlkP->Packet.src_unit = 0;
CmdBlkP->Packet.dest_port = BOOT_RUP;
- CmdBlkP->Packet.src_port = BOOT_RUP;
- CmdBlkP->Packet.len = 0x84;
- CmdBlkP->Packet.data[0] = UFOAD;
- CmdBlkP->Packet.data[1] = KillUnit.Link;
- CmdBlkP->Packet.data[2] = UFOAD_MAGIC & 0xFF;
- CmdBlkP->Packet.data[3] = (UFOAD_MAGIC >> 8) & 0xFF;
-
- for ( Host = 0; Host < p->RIONumHosts; Host++ ) {
+ CmdBlkP->Packet.src_port = BOOT_RUP;
+ CmdBlkP->Packet.len = 0x84;
+ CmdBlkP->Packet.data[0] = UFOAD;
+ CmdBlkP->Packet.data[1] = KillUnit.Link;
+ CmdBlkP->Packet.data[2] = UFOAD_MAGIC & 0xFF;
+ CmdBlkP->Packet.data[3] = (UFOAD_MAGIC >> 8) & 0xFF;
+
+ for (Host = 0; Host < p->RIONumHosts; Host++) {
ID = 0;
HostP = &p->RIOHosts[Host];
- if ( HostP->UniqueNum == KillUnit.UniqueNum ) {
- if ( RIOQueueCmdBlk( HostP, RTAS_PER_HOST+KillUnit.Link,
- CmdBlkP) == RIO_FAIL ) {
- rio_dprintk (RIO_DEBUG_CMD, "UFOAD: Failed queue command\n");
+ if (HostP->UniqueNum == KillUnit.UniqueNum) {
+ if (RIOQueueCmdBlk(HostP, RTAS_PER_HOST + KillUnit.Link, CmdBlkP) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_CMD, "UFOAD: Failed queue command\n");
return -EIO;
}
return 0;
}
- for ( ID=0; ID < RTAS_PER_HOST; ID++ ) {
- if ( HostP->Mapping[ID].RtaUniqueNum == KillUnit.UniqueNum ) {
- CmdBlkP->Packet.dest_unit = ID+1;
- if ( RIOQueueCmdBlk( HostP, ID, CmdBlkP) == RIO_FAIL ) {
- rio_dprintk (RIO_DEBUG_CMD, "UFOAD: Failed queue command\n");
+ for (ID = 0; ID < RTAS_PER_HOST; ID++) {
+ if (HostP->Mapping[ID].RtaUniqueNum == KillUnit.UniqueNum) {
+ CmdBlkP->Packet.dest_unit = ID + 1;
+ if (RIOQueueCmdBlk(HostP, ID, CmdBlkP) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_CMD, "UFOAD: Failed queue command\n");
return -EIO;
}
return 0;
}
}
}
- RIOFreeCmdBlk( CmdBlkP );
+ RIOFreeCmdBlk(CmdBlkP);
return -ENXIO;
}
-int
-RIOSuspendBootRta(struct Host *HostP, int ID, int Link)
+int RIOSuspendBootRta(struct Host *HostP, int ID, int Link)
{
struct CmdBlk *CmdBlkP;
- rio_dprintk (RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA ID %d, link %c\n", ID, 'A' + Link);
+ rio_dprintk(RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA ID %d, link %c\n", ID, 'A' + Link);
CmdBlkP = RIOGetCmdBlk();
- if ( !CmdBlkP ) {
- rio_dprintk (RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA: GetCmdBlk failed\n");
+ if (!CmdBlkP) {
+ rio_dprintk(RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA: GetCmdBlk failed\n");
return -ENXIO;
}
CmdBlkP->Packet.dest_unit = ID;
CmdBlkP->Packet.dest_port = BOOT_RUP;
- CmdBlkP->Packet.src_unit = 0;
- CmdBlkP->Packet.src_port = BOOT_RUP;
- CmdBlkP->Packet.len = 0x84;
- CmdBlkP->Packet.data[0] = IWAIT;
- CmdBlkP->Packet.data[1] = Link;
- CmdBlkP->Packet.data[2] = IWAIT_MAGIC & 0xFF;
- CmdBlkP->Packet.data[3] = (IWAIT_MAGIC >> 8) & 0xFF;
-
- if ( RIOQueueCmdBlk( HostP, ID - 1, CmdBlkP) == RIO_FAIL ) {
- rio_dprintk (RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA: Failed to queue iwait command\n");
+ CmdBlkP->Packet.src_unit = 0;
+ CmdBlkP->Packet.src_port = BOOT_RUP;
+ CmdBlkP->Packet.len = 0x84;
+ CmdBlkP->Packet.data[0] = IWAIT;
+ CmdBlkP->Packet.data[1] = Link;
+ CmdBlkP->Packet.data[2] = IWAIT_MAGIC & 0xFF;
+ CmdBlkP->Packet.data[3] = (IWAIT_MAGIC >> 8) & 0xFF;
+
+ if (RIOQueueCmdBlk(HostP, ID - 1, CmdBlkP) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA: Failed to queue iwait command\n");
return -EIO;
}
return 0;
}
-int
-RIOFoadWakeup(struct rio_info *p)
+int RIOFoadWakeup(struct rio_info *p)
{
int port;
register struct Port *PortP;
unsigned long flags;
- for ( port=0; port<RIO_PORTS; port++) {
+ for (port = 0; port < RIO_PORTS; port++) {
PortP = p->RIOPortp[port];
rio_spin_lock_irqsave(&PortP->portSem, flags);
@@ -377,16 +368,15 @@ RIOFoadWakeup(struct rio_info *p)
PortP->TxBufferOut = 0;
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
}
- return(0);
+ return (0);
}
/*
** Incoming command on the COMMAND_RUP to be processed.
*/
-static int
-RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, PKT *PacketP)
+static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, PKT * PacketP)
{
- struct PktCmd *PktCmdP = (struct PktCmd *)PacketP->data;
+ struct PktCmd *PktCmdP = (struct PktCmd *) PacketP->data;
struct Port *PortP;
struct UnixRup *UnixRupP;
ushort SysPort;
@@ -395,128 +385,114 @@ RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, PKT *PacketP)
ushort subCommand;
unsigned long flags;
- func_enter ();
+ func_enter();
#ifdef CHECK
- CheckHost( Host );
- CheckHostP( HostP );
- CheckPacketP( PacketP );
+ CheckHost(Host);
+ CheckHostP(HostP);
+ CheckPacketP(PacketP);
#endif
/*
- ** 16 port RTA note:
- ** Command rup packets coming from the RTA will have pkt->data[1] (which
- ** translates to PktCmdP->PhbNum) set to the host port number for the
- ** particular unit. To access the correct BaseSysPort for a 16 port RTA,
- ** we can use PhbNum to get the rup number for the appropriate 8 port
- ** block (for the first block, this should be equal to 'Rup').
- */
- rup = RBYTE(PktCmdP->PhbNum) / (ushort)PORTS_PER_RTA;
+ ** 16 port RTA note:
+ ** Command rup packets coming from the RTA will have pkt->data[1] (which
+ ** translates to PktCmdP->PhbNum) set to the host port number for the
+ ** particular unit. To access the correct BaseSysPort for a 16 port RTA,
+ ** we can use PhbNum to get the rup number for the appropriate 8 port
+ ** block (for the first block, this should be equal to 'Rup').
+ */
+ rup = RBYTE(PktCmdP->PhbNum) / (ushort) PORTS_PER_RTA;
UnixRupP = &HostP->UnixRups[rup];
- SysPort = UnixRupP->BaseSysPort +
- (RBYTE(PktCmdP->PhbNum) % (ushort)PORTS_PER_RTA);
- rio_dprintk (RIO_DEBUG_CMD, "Command on rup %d, port %d\n", rup, SysPort);
+ SysPort = UnixRupP->BaseSysPort + (RBYTE(PktCmdP->PhbNum) % (ushort) PORTS_PER_RTA);
+ rio_dprintk(RIO_DEBUG_CMD, "Command on rup %d, port %d\n", rup, SysPort);
#ifdef CHECK
- CheckRup( rup );
- CheckUnixRupP( UnixRupP );
+ CheckRup(rup);
+ CheckUnixRupP(UnixRupP);
#endif
- if ( UnixRupP->BaseSysPort == NO_PORT ) {
- rio_dprintk (RIO_DEBUG_CMD, "OBSCURE ERROR!\n");
- rio_dprintk (RIO_DEBUG_CMD, "Diagnostics follow. Please WRITE THESE DOWN and report them to Specialix Technical Support\n");
- rio_dprintk (RIO_DEBUG_CMD, "CONTROL information: Host number %d, name ``%s''\n",
- HostP-p->RIOHosts, HostP->Name );
- rio_dprintk (RIO_DEBUG_CMD, "CONTROL information: Rup number 0x%x\n", rup);
-
- if ( Rup >= (ushort)MAX_RUP ) {
- rio_dprintk (RIO_DEBUG_CMD, "CONTROL information: This is the RUP for RTA ``%s''\n",
- HostP->Mapping[Rup].Name);
+ if (UnixRupP->BaseSysPort == NO_PORT) {
+ rio_dprintk(RIO_DEBUG_CMD, "OBSCURE ERROR!\n");
+ rio_dprintk(RIO_DEBUG_CMD, "Diagnostics follow. Please WRITE THESE DOWN and report them to Specialix Technical Support\n");
+ rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: Host number %d, name ``%s''\n", HostP - p->RIOHosts, HostP->Name);
+ rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: Rup number 0x%x\n", rup);
+
+ if (Rup >= (ushort) MAX_RUP) {
+ rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: This is the RUP for RTA ``%s''\n", HostP->Mapping[Rup].Name);
} else
- rio_dprintk (RIO_DEBUG_CMD, "CONTROL information: This is the RUP for link ``%c'' of host ``%s''\n",
- ('A' + Rup - MAX_RUP), HostP->Name);
-
- rio_dprintk (RIO_DEBUG_CMD, "PACKET information: Destination 0x%x:0x%x\n",
- PacketP->dest_unit, PacketP->dest_port );
- rio_dprintk (RIO_DEBUG_CMD, "PACKET information: Source 0x%x:0x%x\n",
- PacketP->src_unit, PacketP->src_port );
- rio_dprintk (RIO_DEBUG_CMD, "PACKET information: Length 0x%x (%d)\n", PacketP->len,PacketP->len );
- rio_dprintk (RIO_DEBUG_CMD, "PACKET information: Control 0x%x (%d)\n", PacketP->control, PacketP->control);
- rio_dprintk (RIO_DEBUG_CMD, "PACKET information: Check 0x%x (%d)\n", PacketP->csum, PacketP->csum );
- rio_dprintk (RIO_DEBUG_CMD, "COMMAND information: Host Port Number 0x%x, "
- "Command Code 0x%x\n", PktCmdP->PhbNum, PktCmdP->Command );
+ rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: This is the RUP for link ``%c'' of host ``%s''\n", ('A' + Rup - MAX_RUP), HostP->Name);
+
+ rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Destination 0x%x:0x%x\n", PacketP->dest_unit, PacketP->dest_port);
+ rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Source 0x%x:0x%x\n", PacketP->src_unit, PacketP->src_port);
+ rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Length 0x%x (%d)\n", PacketP->len, PacketP->len);
+ rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Control 0x%x (%d)\n", PacketP->control, PacketP->control);
+ rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Check 0x%x (%d)\n", PacketP->csum, PacketP->csum);
+ rio_dprintk(RIO_DEBUG_CMD, "COMMAND information: Host Port Number 0x%x, " "Command Code 0x%x\n", PktCmdP->PhbNum, PktCmdP->Command);
return TRUE;
}
-
#ifdef CHECK
- CheckSysPort( SysPort );
+ CheckSysPort(SysPort);
#endif
- PortP = p->RIOPortp[ SysPort ];
+ PortP = p->RIOPortp[SysPort];
rio_spin_lock_irqsave(&PortP->portSem, flags);
- switch( RBYTE(PktCmdP->Command) ) {
- case BREAK_RECEIVED:
- rio_dprintk (RIO_DEBUG_CMD, "Received a break!\n");
- /* If the current line disc. is not multi-threading and
- the current processor is not the default, reset rup_intr
- and return FALSE to ensure that the command packet is
- not freed. */
- /* Call tmgr HANGUP HERE */
- /* Fix this later when every thing works !!!! RAMRAJ */
- gs_got_break (&PortP->gs);
+ switch (RBYTE(PktCmdP->Command)) {
+ case BREAK_RECEIVED:
+ rio_dprintk(RIO_DEBUG_CMD, "Received a break!\n");
+ /* If the current line disc. is not multi-threading and
+ the current processor is not the default, reset rup_intr
+ and return FALSE to ensure that the command packet is
+ not freed. */
+ /* Call tmgr HANGUP HERE */
+ /* Fix this later when every thing works !!!! RAMRAJ */
+ gs_got_break(&PortP->gs);
+ break;
+
+ case COMPLETE:
+ rio_dprintk(RIO_DEBUG_CMD, "Command complete on phb %d host %d\n", RBYTE(PktCmdP->PhbNum), HostP - p->RIOHosts);
+ subCommand = 1;
+ switch (RBYTE(PktCmdP->SubCommand)) {
+ case MEMDUMP:
+ rio_dprintk(RIO_DEBUG_CMD, "Memory dump cmd (0x%x) from addr 0x%x\n", RBYTE(PktCmdP->SubCommand), RWORD(PktCmdP->SubAddr));
break;
+ case READ_REGISTER:
+ rio_dprintk(RIO_DEBUG_CMD, "Read register (0x%x)\n", RWORD(PktCmdP->SubAddr));
+ p->CdRegister = (RBYTE(PktCmdP->ModemStatus) & MSVR1_HOST);
+ break;
+ default:
+ subCommand = 0;
+ break;
+ }
+ if (subCommand)
+ break;
+ rio_dprintk(RIO_DEBUG_CMD, "New status is 0x%x was 0x%x\n", RBYTE(PktCmdP->PortStatus), PortP->PortState);
+ if (PortP->PortState != RBYTE(PktCmdP->PortStatus)) {
+ rio_dprintk(RIO_DEBUG_CMD, "Mark status & wakeup\n");
+ PortP->PortState = RBYTE(PktCmdP->PortStatus);
+ /* What should we do here ...
+ wakeup( &PortP->PortState );
+ */
+ } else
+ rio_dprintk(RIO_DEBUG_CMD, "No change\n");
- case COMPLETE:
- rio_dprintk (RIO_DEBUG_CMD, "Command complete on phb %d host %d\n",
- RBYTE(PktCmdP->PhbNum), HostP-p->RIOHosts);
- subCommand = 1;
- switch (RBYTE(PktCmdP->SubCommand)) {
- case MEMDUMP :
- rio_dprintk (RIO_DEBUG_CMD, "Memory dump cmd (0x%x) from addr 0x%x\n",
- RBYTE(PktCmdP->SubCommand), RWORD(PktCmdP->SubAddr));
- break;
- case READ_REGISTER :
- rio_dprintk (RIO_DEBUG_CMD, "Read register (0x%x)\n", RWORD(PktCmdP->SubAddr));
- p->CdRegister = (RBYTE(PktCmdP->ModemStatus) & MSVR1_HOST);
- break;
- default :
- subCommand = 0;
- break;
- }
- if (subCommand)
- break;
- rio_dprintk (RIO_DEBUG_CMD, "New status is 0x%x was 0x%x\n",
- RBYTE(PktCmdP->PortStatus),PortP->PortState);
- if (PortP->PortState != RBYTE(PktCmdP->PortStatus)) {
- rio_dprintk (RIO_DEBUG_CMD, "Mark status & wakeup\n");
- PortP->PortState = RBYTE(PktCmdP->PortStatus);
- /* What should we do here ...
- wakeup( &PortP->PortState );
- */
- } else
- rio_dprintk (RIO_DEBUG_CMD, "No change\n");
-
- /* FALLTHROUGH */
- case MODEM_STATUS:
+ /* FALLTHROUGH */
+ case MODEM_STATUS:
+ /*
+ ** Knock out the tbusy and tstop bits, as these are not relevant
+ ** to the check for modem status change (they're just there because
+ ** it's a convenient place to put them!).
+ */
+ ReportedModemStatus = RBYTE(PktCmdP->ModemStatus);
+ if ((PortP->ModemState & MSVR1_HOST) == (ReportedModemStatus & MSVR1_HOST)) {
+ rio_dprintk(RIO_DEBUG_CMD, "Modem status unchanged 0x%x\n", PortP->ModemState);
/*
- ** Knock out the tbusy and tstop bits, as these are not relevant
- ** to the check for modem status change (they're just there because
- ** it's a convenient place to put them!).
- */
- ReportedModemStatus = RBYTE(PktCmdP->ModemStatus);
- if ((PortP->ModemState & MSVR1_HOST) ==
- (ReportedModemStatus & MSVR1_HOST)) {
- rio_dprintk (RIO_DEBUG_CMD, "Modem status unchanged 0x%x\n", PortP->ModemState);
- /*
- ** Update ModemState just in case tbusy or tstop states have
- ** changed.
- */
- PortP->ModemState = ReportedModemStatus;
- }
- else {
- rio_dprintk (RIO_DEBUG_CMD, "Modem status change from 0x%x to 0x%x\n",
- PortP->ModemState, ReportedModemStatus);
- PortP->ModemState = ReportedModemStatus;
+ ** Update ModemState just in case tbusy or tstop states have
+ ** changed.
+ */
+ PortP->ModemState = ReportedModemStatus;
+ } else {
+ rio_dprintk(RIO_DEBUG_CMD, "Modem status change from 0x%x to 0x%x\n", PortP->ModemState, ReportedModemStatus);
+ PortP->ModemState = ReportedModemStatus;
#ifdef MODEM_SUPPORT
- if ( PortP->Mapped ) {
+ if (PortP->Mapped) {
/***********************************************************\
*************************************************************
*** ***
@@ -525,68 +501,67 @@ RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, PKT *PacketP)
*************************************************************
\***********************************************************/
/*
- ** If the device is a modem, then check the modem
- ** carrier.
- */
+ ** If the device is a modem, then check the modem
+ ** carrier.
+ */
if (PortP->gs.tty == NULL)
break;
if (PortP->gs.tty->termios == NULL)
break;
-
- if (!(PortP->gs.tty->termios->c_cflag & CLOCAL) &&
- ((PortP->State & (RIO_MOPEN|RIO_WOPEN)))) {
- rio_dprintk (RIO_DEBUG_CMD, "Is there a Carrier?\n");
- /*
- ** Is there a carrier?
- */
- if ( PortP->ModemState & MSVR1_CD ) {
- /*
- ** Has carrier just appeared?
- */
+ if (!(PortP->gs.tty->termios->c_cflag & CLOCAL) && ((PortP->State & (RIO_MOPEN | RIO_WOPEN)))) {
+
+ rio_dprintk(RIO_DEBUG_CMD, "Is there a Carrier?\n");
+ /*
+ ** Is there a carrier?
+ */
+ if (PortP->ModemState & MSVR1_CD) {
+ /*
+ ** Has carrier just appeared?
+ */
if (!(PortP->State & RIO_CARR_ON)) {
- rio_dprintk (RIO_DEBUG_CMD, "Carrier just came up.\n");
+ rio_dprintk(RIO_DEBUG_CMD, "Carrier just came up.\n");
PortP->State |= RIO_CARR_ON;
- /*
- ** wakeup anyone in WOPEN
- */
- if (PortP->State & (PORT_ISOPEN | RIO_WOPEN) )
- wake_up_interruptible (&PortP->gs.open_wait);
+ /*
+ ** wakeup anyone in WOPEN
+ */
+ if (PortP->State & (PORT_ISOPEN | RIO_WOPEN))
+ wake_up_interruptible(&PortP->gs.open_wait);
#ifdef STATS
- PortP->Stat.ModemOnCnt++;
+ PortP->Stat.ModemOnCnt++;
#endif
- }
+ }
} else {
- /*
- ** Has carrier just dropped?
- */
+ /*
+ ** Has carrier just dropped?
+ */
if (PortP->State & RIO_CARR_ON) {
- if (PortP->State & (PORT_ISOPEN|RIO_WOPEN|RIO_MOPEN))
- tty_hangup (PortP->gs.tty);
+ if (PortP->State & (PORT_ISOPEN | RIO_WOPEN | RIO_MOPEN))
+ tty_hangup(PortP->gs.tty);
PortP->State &= ~RIO_CARR_ON;
- rio_dprintk (RIO_DEBUG_CMD, "Carrirer just went down\n");
+ rio_dprintk(RIO_DEBUG_CMD, "Carrirer just went down\n");
#ifdef STATS
- PortP->Stat.ModemOffCnt++;
+ PortP->Stat.ModemOffCnt++;
#endif
+ }
+ }
+ }
}
- }
- }
- }
#endif
- }
- break;
+ }
+ break;
- default:
- rio_dprintk (RIO_DEBUG_CMD, "Unknown command %d on CMD_RUP of host %d\n",
- RBYTE(PktCmdP->Command),HostP-p->RIOHosts);
- break;
+ default:
+ rio_dprintk(RIO_DEBUG_CMD, "Unknown command %d on CMD_RUP of host %d\n", RBYTE(PktCmdP->Command), HostP - p->RIOHosts);
+ break;
}
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- func_exit ();
+ func_exit();
return TRUE;
}
+
/*
** The command mechanism:
** Each rup has a chain of commands associated with it.
@@ -600,12 +575,11 @@ RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, PKT *PacketP)
/*
** Allocate an empty command block.
*/
-struct CmdBlk *
-RIOGetCmdBlk(void)
+struct CmdBlk *RIOGetCmdBlk(void)
{
struct CmdBlk *CmdBlkP;
- CmdBlkP = (struct CmdBlk *)sysbrk(sizeof(struct CmdBlk));
+ CmdBlkP = (struct CmdBlk *) sysbrk(sizeof(struct CmdBlk));
if (CmdBlkP)
bzero(CmdBlkP, sizeof(struct CmdBlk));
@@ -615,31 +589,29 @@ RIOGetCmdBlk(void)
/*
** Return a block to the head of the free list.
*/
-void
-RIOFreeCmdBlk(struct CmdBlk *CmdBlkP)
+void RIOFreeCmdBlk(struct CmdBlk *CmdBlkP)
{
- sysfree((void *)CmdBlkP, sizeof(struct CmdBlk));
+ sysfree((void *) CmdBlkP, sizeof(struct CmdBlk));
}
/*
** attach a command block to the list of commands to be performed for
** a given rup.
*/
-int
-RIOQueueCmdBlk(struct Host *HostP, uint Rup, struct CmdBlk *CmdBlkP)
+int RIOQueueCmdBlk(struct Host *HostP, uint Rup, struct CmdBlk *CmdBlkP)
{
struct CmdBlk **Base;
struct UnixRup *UnixRupP;
unsigned long flags;
#ifdef CHECK
- CheckHostP( HostP );
- CheckRup( Rup );
- CheckCmdBlkP( CmdBlkP );
+ CheckHostP(HostP);
+ CheckRup(Rup);
+ CheckCmdBlkP(CmdBlkP);
#endif
- if ( Rup >= (ushort)(MAX_RUP+LINKS_PER_UNIT) ) {
- rio_dprintk (RIO_DEBUG_CMD, "Illegal rup number %d in RIOQueueCmdBlk\n",Rup);
- RIOFreeCmdBlk( CmdBlkP );
+ if (Rup >= (ushort) (MAX_RUP + LINKS_PER_UNIT)) {
+ rio_dprintk(RIO_DEBUG_CMD, "Illegal rup number %d in RIOQueueCmdBlk\n", Rup);
+ RIOFreeCmdBlk(CmdBlkP);
return RIO_FAIL;
}
@@ -648,57 +620,52 @@ RIOQueueCmdBlk(struct Host *HostP, uint Rup, struct CmdBlk *CmdBlkP)
rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
/*
- ** If the RUP is currently inactive, then put the request
- ** straight on the RUP....
- */
- if ( (UnixRupP->CmdsWaitingP == NULL) && (UnixRupP->CmdPendingP == NULL) &&
- (RWORD(UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE ) &&
- (CmdBlkP->PreFuncP ? (*CmdBlkP->PreFuncP)(CmdBlkP->PreArg,CmdBlkP)
- :TRUE)) {
- rio_dprintk (RIO_DEBUG_CMD, "RUP inactive-placing command straight on. Cmd byte is 0x%x\n",
- CmdBlkP->Packet.data[0]);
+ ** If the RUP is currently inactive, then put the request
+ ** straight on the RUP....
+ */
+ if ((UnixRupP->CmdsWaitingP == NULL) && (UnixRupP->CmdPendingP == NULL) && (RWORD(UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE) && (CmdBlkP->PreFuncP ? (*CmdBlkP->PreFuncP) (CmdBlkP->PreArg, CmdBlkP)
+ : TRUE)) {
+ rio_dprintk(RIO_DEBUG_CMD, "RUP inactive-placing command straight on. Cmd byte is 0x%x\n", CmdBlkP->Packet.data[0]);
/*
- ** Whammy! blat that pack!
- */
- HostP->Copy( (caddr_t)&CmdBlkP->Packet,
- RIO_PTR(HostP->Caddr, UnixRupP->RupP->txpkt ), sizeof(PKT) );
+ ** Whammy! blat that pack!
+ */
+ HostP->Copy((caddr_t) & CmdBlkP->Packet, RIO_PTR(HostP->Caddr, UnixRupP->RupP->txpkt), sizeof(PKT));
/*
- ** place command packet on the pending position.
- */
+ ** place command packet on the pending position.
+ */
UnixRupP->CmdPendingP = CmdBlkP;
/*
- ** set the command register
- */
- WWORD(UnixRupP->RupP->txcontrol , TX_PACKET_READY);
+ ** set the command register
+ */
+ WWORD(UnixRupP->RupP->txcontrol, TX_PACKET_READY);
rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
return RIO_SUCCESS;
}
- rio_dprintk (RIO_DEBUG_CMD, "RUP active - en-queing\n");
+ rio_dprintk(RIO_DEBUG_CMD, "RUP active - en-queing\n");
- if ( UnixRupP->CmdsWaitingP != NULL)
- rio_dprintk (RIO_DEBUG_CMD, "Rup active - command waiting\n");
- if ( UnixRupP->CmdPendingP != NULL )
- rio_dprintk (RIO_DEBUG_CMD, "Rup active - command pending\n");
- if ( RWORD(UnixRupP->RupP->txcontrol) != TX_RUP_INACTIVE )
- rio_dprintk (RIO_DEBUG_CMD, "Rup active - command rup not ready\n");
+ if (UnixRupP->CmdsWaitingP != NULL)
+ rio_dprintk(RIO_DEBUG_CMD, "Rup active - command waiting\n");
+ if (UnixRupP->CmdPendingP != NULL)
+ rio_dprintk(RIO_DEBUG_CMD, "Rup active - command pending\n");
+ if (RWORD(UnixRupP->RupP->txcontrol) != TX_RUP_INACTIVE)
+ rio_dprintk(RIO_DEBUG_CMD, "Rup active - command rup not ready\n");
Base = &UnixRupP->CmdsWaitingP;
- rio_dprintk (RIO_DEBUG_CMD, "First try to queue cmdblk 0x%x at 0x%x\n", (int)CmdBlkP,(int)Base);
+ rio_dprintk(RIO_DEBUG_CMD, "First try to queue cmdblk 0x%x at 0x%x\n", (int) CmdBlkP, (int) Base);
- while ( *Base ) {
- rio_dprintk (RIO_DEBUG_CMD, "Command cmdblk 0x%x here\n", (int)(*Base));
+ while (*Base) {
+ rio_dprintk(RIO_DEBUG_CMD, "Command cmdblk 0x%x here\n", (int) (*Base));
Base = &((*Base)->NextP);
- rio_dprintk (RIO_DEBUG_CMD, "Now try to queue cmd cmdblk 0x%x at 0x%x\n",
- (int)CmdBlkP,(int)Base);
+ rio_dprintk(RIO_DEBUG_CMD, "Now try to queue cmd cmdblk 0x%x at 0x%x\n", (int) CmdBlkP, (int) Base);
}
- rio_dprintk (RIO_DEBUG_CMD, "Will queue cmdblk 0x%x at 0x%x\n",(int)CmdBlkP,(int)Base);
+ rio_dprintk(RIO_DEBUG_CMD, "Will queue cmdblk 0x%x at 0x%x\n", (int) CmdBlkP, (int) Base);
*Base = CmdBlkP;
@@ -713,8 +680,7 @@ RIOQueueCmdBlk(struct Host *HostP, uint Rup, struct CmdBlk *CmdBlkP)
** Here we go - if there is an empty rup, fill it!
** must be called at splrio() or higher.
*/
-void
-RIOPollHostCommands(struct rio_info *p, struct Host *HostP)
+void RIOPollHostCommands(struct rio_info *p, struct Host *HostP)
{
register struct CmdBlk *CmdBlkP;
register struct UnixRup *UnixRupP;
@@ -723,262 +689,246 @@ RIOPollHostCommands(struct rio_info *p, struct Host *HostP)
unsigned long flags;
- Rup = MAX_RUP+LINKS_PER_UNIT;
+ Rup = MAX_RUP + LINKS_PER_UNIT;
- do { /* do this loop for each RUP */
+ do { /* do this loop for each RUP */
/*
- ** locate the rup we are processing & lock it
- */
+ ** locate the rup we are processing & lock it
+ */
UnixRupP = &HostP->UnixRups[--Rup];
spin_lock_irqsave(&UnixRupP->RupLock, flags);
/*
- ** First check for incoming commands:
- */
- if ( RWORD(UnixRupP->RupP->rxcontrol) != RX_RUP_INACTIVE ) {
+ ** First check for incoming commands:
+ */
+ if (RWORD(UnixRupP->RupP->rxcontrol) != RX_RUP_INACTIVE) {
int FreeMe;
- PacketP =(PKT *)RIO_PTR(HostP->Caddr,RWORD(UnixRupP->RupP->rxpkt));
+ PacketP = (PKT *) RIO_PTR(HostP->Caddr, RWORD(UnixRupP->RupP->rxpkt));
- ShowPacket( DBG_CMD, PacketP );
+ ShowPacket(DBG_CMD, PacketP);
- switch ( RBYTE(PacketP->dest_port) ) {
- case BOOT_RUP:
- rio_dprintk (RIO_DEBUG_CMD, "Incoming Boot %s packet '%x'\n",
- RBYTE(PacketP->len) & 0x80 ? "Command":"Data",
- RBYTE(PacketP->data[0]));
- rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
- FreeMe= RIOBootRup(p, Rup,HostP,PacketP);
- rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
- break;
+ switch (RBYTE(PacketP->dest_port)) {
+ case BOOT_RUP:
+ rio_dprintk(RIO_DEBUG_CMD, "Incoming Boot %s packet '%x'\n", RBYTE(PacketP->len) & 0x80 ? "Command" : "Data", RBYTE(PacketP->data[0]));
+ rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
+ FreeMe = RIOBootRup(p, Rup, HostP, PacketP);
+ rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
+ break;
- case COMMAND_RUP:
- /*
- ** Free the RUP lock as loss of carrier causes a
- ** ttyflush which will (eventually) call another
- ** routine that uses the RUP lock.
- */
- rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
- FreeMe= RIOCommandRup(p, Rup,HostP,PacketP);
- if (PacketP->data[5] == MEMDUMP) {
- rio_dprintk (RIO_DEBUG_CMD, "Memdump from 0x%x complete\n",
- *(ushort *) &(PacketP->data[6]));
- HostP->Copy( (caddr_t)&(PacketP->data[8]),
- (caddr_t)p->RIOMemDump, 32 );
- }
- rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
- break;
+ case COMMAND_RUP:
+ /*
+ ** Free the RUP lock as loss of carrier causes a
+ ** ttyflush which will (eventually) call another
+ ** routine that uses the RUP lock.
+ */
+ rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
+ FreeMe = RIOCommandRup(p, Rup, HostP, PacketP);
+ if (PacketP->data[5] == MEMDUMP) {
+ rio_dprintk(RIO_DEBUG_CMD, "Memdump from 0x%x complete\n", *(ushort *) & (PacketP->data[6]));
+ HostP->Copy((caddr_t) & (PacketP->data[8]), (caddr_t) p->RIOMemDump, 32);
+ }
+ rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
+ break;
- case ROUTE_RUP:
- rio_spin_unlock_irqrestore( &UnixRupP->RupLock, flags);
- FreeMe = RIORouteRup(p, Rup, HostP, PacketP );
- rio_spin_lock_irqsave( &UnixRupP->RupLock, flags );
- break;
+ case ROUTE_RUP:
+ rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
+ FreeMe = RIORouteRup(p, Rup, HostP, PacketP);
+ rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
+ break;
- default:
- rio_dprintk (RIO_DEBUG_CMD, "Unknown RUP %d\n", RBYTE(PacketP->dest_port));
- FreeMe = 1;
- break;
+ default:
+ rio_dprintk(RIO_DEBUG_CMD, "Unknown RUP %d\n", RBYTE(PacketP->dest_port));
+ FreeMe = 1;
+ break;
}
- if ( FreeMe ) {
- rio_dprintk (RIO_DEBUG_CMD, "Free processed incoming command packet\n");
- put_free_end(HostP,PacketP);
+ if (FreeMe) {
+ rio_dprintk(RIO_DEBUG_CMD, "Free processed incoming command packet\n");
+ put_free_end(HostP, PacketP);
- WWORD(UnixRupP->RupP->rxcontrol , RX_RUP_INACTIVE);
+ WWORD(UnixRupP->RupP->rxcontrol, RX_RUP_INACTIVE);
- if ( RWORD(UnixRupP->RupP->handshake)==PHB_HANDSHAKE_SET ) {
- rio_dprintk (RIO_DEBUG_CMD, "Handshake rup %d\n",Rup);
- WWORD(UnixRupP->RupP->handshake,
- PHB_HANDSHAKE_SET|PHB_HANDSHAKE_RESET);
+ if (RWORD(UnixRupP->RupP->handshake) == PHB_HANDSHAKE_SET) {
+ rio_dprintk(RIO_DEBUG_CMD, "Handshake rup %d\n", Rup);
+ WWORD(UnixRupP->RupP->handshake, PHB_HANDSHAKE_SET | PHB_HANDSHAKE_RESET);
}
}
}
/*
- ** IF a command was running on the port,
- ** and it has completed, then tidy it up.
- */
- if ( (CmdBlkP = UnixRupP->CmdPendingP) && /* ASSIGN! */
- (RWORD(UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE)) {
+ ** IF a command was running on the port,
+ ** and it has completed, then tidy it up.
+ */
+ if ((CmdBlkP = UnixRupP->CmdPendingP) && /* ASSIGN! */
+ (RWORD(UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE)) {
/*
- ** we are idle.
- ** there is a command in pending.
- ** Therefore, this command has finished.
- ** So, wakeup whoever is waiting for it (and tell them
- ** what happened).
- */
- if ( CmdBlkP->Packet.dest_port == BOOT_RUP )
- rio_dprintk (RIO_DEBUG_CMD, "Free Boot %s Command Block '%x'\n",
- CmdBlkP->Packet.len & 0x80 ? "Command":"Data",
- CmdBlkP->Packet.data[0]);
-
- rio_dprintk (RIO_DEBUG_CMD, "Command 0x%x completed\n",(int)CmdBlkP);
+ ** we are idle.
+ ** there is a command in pending.
+ ** Therefore, this command has finished.
+ ** So, wakeup whoever is waiting for it (and tell them
+ ** what happened).
+ */
+ if (CmdBlkP->Packet.dest_port == BOOT_RUP)
+ rio_dprintk(RIO_DEBUG_CMD, "Free Boot %s Command Block '%x'\n", CmdBlkP->Packet.len & 0x80 ? "Command" : "Data", CmdBlkP->Packet.data[0]);
+
+ rio_dprintk(RIO_DEBUG_CMD, "Command 0x%x completed\n", (int) CmdBlkP);
/*
- ** Clear the Rup lock to prevent mutual exclusion.
- */
- if ( CmdBlkP->PostFuncP ) {
+ ** Clear the Rup lock to prevent mutual exclusion.
+ */
+ if (CmdBlkP->PostFuncP) {
rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
- (*CmdBlkP->PostFuncP) (CmdBlkP->PostArg,CmdBlkP);
+ (*CmdBlkP->PostFuncP) (CmdBlkP->PostArg, CmdBlkP);
rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
}
/*
- ** ....clear the pending flag....
- */
+ ** ....clear the pending flag....
+ */
UnixRupP->CmdPendingP = NULL;
/*
- ** ....and return the command block to the freelist.
- */
- RIOFreeCmdBlk( CmdBlkP );
+ ** ....and return the command block to the freelist.
+ */
+ RIOFreeCmdBlk(CmdBlkP);
}
/*
- ** If there is a command for this rup, and the rup
- ** is idle, then process the command
- */
- if ( (CmdBlkP = UnixRupP->CmdsWaitingP) && /* ASSIGN! */
- (UnixRupP->CmdPendingP == NULL) &&
- (RWORD(UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE)) {
+ ** If there is a command for this rup, and the rup
+ ** is idle, then process the command
+ */
+ if ((CmdBlkP = UnixRupP->CmdsWaitingP) && /* ASSIGN! */
+ (UnixRupP->CmdPendingP == NULL) && (RWORD(UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE)) {
/*
- ** if the pre-function is non-zero, call it.
- ** If it returns RIO_FAIL then don't
- ** send this command yet!
- */
+ ** if the pre-function is non-zero, call it.
+ ** If it returns RIO_FAIL then don't
+ ** send this command yet!
+ */
#ifdef CHECK
- CheckCmdBlkP (CmdBlkP);
+ CheckCmdBlkP(CmdBlkP);
#endif
- if ( !(CmdBlkP->PreFuncP ?
- (*CmdBlkP->PreFuncP)(CmdBlkP->PreArg, CmdBlkP) : TRUE)) {
- rio_dprintk (RIO_DEBUG_CMD, "Not ready to start command 0x%x\n",(int)CmdBlkP);
- }
- else {
- rio_dprintk (RIO_DEBUG_CMD, "Start new command 0x%x Cmd byte is 0x%x\n",
- (int)CmdBlkP, CmdBlkP->Packet.data[0]);
+ if (!(CmdBlkP->PreFuncP ? (*CmdBlkP->PreFuncP) (CmdBlkP->PreArg, CmdBlkP) : TRUE)) {
+ rio_dprintk(RIO_DEBUG_CMD, "Not ready to start command 0x%x\n", (int) CmdBlkP);
+ } else {
+ rio_dprintk(RIO_DEBUG_CMD, "Start new command 0x%x Cmd byte is 0x%x\n", (int) CmdBlkP, CmdBlkP->Packet.data[0]);
/*
- ** Whammy! blat that pack!
- */
+ ** Whammy! blat that pack!
+ */
#ifdef CHECK
- CheckPacketP ((PKT *)RIO_PTR(HostP->Caddr, UnixRupP->RupP->txpkt));
+ CheckPacketP((PKT *) RIO_PTR(HostP->Caddr, UnixRupP->RupP->txpkt));
#endif
- HostP->Copy( (caddr_t)&CmdBlkP->Packet,
- RIO_PTR(HostP->Caddr, UnixRupP->RupP->txpkt), sizeof(PKT));
+ HostP->Copy((caddr_t) & CmdBlkP->Packet, RIO_PTR(HostP->Caddr, UnixRupP->RupP->txpkt), sizeof(PKT));
/*
- ** remove the command from the rup command queue...
- */
+ ** remove the command from the rup command queue...
+ */
UnixRupP->CmdsWaitingP = CmdBlkP->NextP;
/*
- ** ...and place it on the pending position.
- */
+ ** ...and place it on the pending position.
+ */
UnixRupP->CmdPendingP = CmdBlkP;
/*
- ** set the command register
- */
- WWORD(UnixRupP->RupP->txcontrol,TX_PACKET_READY);
+ ** set the command register
+ */
+ WWORD(UnixRupP->RupP->txcontrol, TX_PACKET_READY);
/*
- ** the command block will be freed
- ** when the command has been processed.
- */
+ ** the command block will be freed
+ ** when the command has been processed.
+ */
}
}
spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
- } while ( Rup );
+ } while (Rup);
}
-int
-RIOWFlushMark(int iPortP, struct CmdBlk *CmdBlkP)
+int RIOWFlushMark(int iPortP, struct CmdBlk *CmdBlkP)
{
- struct Port * PortP = (struct Port *)iPortP;
+ struct Port *PortP = (struct Port *) iPortP;
unsigned long flags;
rio_spin_lock_irqsave(&PortP->portSem, flags);
#ifdef CHECK
- CheckPortP( PortP );
+ CheckPortP(PortP);
#endif
PortP->WflushFlag++;
PortP->MagicFlags |= MAGIC_FLUSH;
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return RIOUnUse( iPortP, CmdBlkP );
+ return RIOUnUse(iPortP, CmdBlkP);
}
-int
-RIORFlushEnable(int iPortP, struct CmdBlk *CmdBlkP)
+int RIORFlushEnable(int iPortP, struct CmdBlk *CmdBlkP)
{
- struct Port * PortP = (struct Port *)iPortP;
+ struct Port *PortP = (struct Port *) iPortP;
PKT *PacketP;
unsigned long flags;
rio_spin_lock_irqsave(&PortP->portSem, flags);
- while ( can_remove_receive(&PacketP, PortP) ) {
+ while (can_remove_receive(&PacketP, PortP)) {
remove_receive(PortP);
- ShowPacket(DBG_PROC, PacketP );
- put_free_end( PortP->HostP, PacketP );
+ ShowPacket(DBG_PROC, PacketP);
+ put_free_end(PortP->HostP, PacketP);
}
- if ( RWORD(PortP->PhbP->handshake)==PHB_HANDSHAKE_SET ) {
+ if (RWORD(PortP->PhbP->handshake) == PHB_HANDSHAKE_SET) {
/*
- ** MAGIC! (Basically, handshake the RX buffer, so that
- ** the RTAs upstream can be re-enabled.)
- */
- rio_dprintk (RIO_DEBUG_CMD, "Util: Set RX handshake bit\n");
- WWORD(PortP->PhbP->handshake, PHB_HANDSHAKE_SET|PHB_HANDSHAKE_RESET);
+ ** MAGIC! (Basically, handshake the RX buffer, so that
+ ** the RTAs upstream can be re-enabled.)
+ */
+ rio_dprintk(RIO_DEBUG_CMD, "Util: Set RX handshake bit\n");
+ WWORD(PortP->PhbP->handshake, PHB_HANDSHAKE_SET | PHB_HANDSHAKE_RESET);
}
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return RIOUnUse( iPortP, CmdBlkP );
+ return RIOUnUse(iPortP, CmdBlkP);
}
-int
-RIOUnUse(int iPortP, struct CmdBlk *CmdBlkP)
+int RIOUnUse(int iPortP, struct CmdBlk *CmdBlkP)
{
- struct Port * PortP = (struct Port *)iPortP;
+ struct Port *PortP = (struct Port *) iPortP;
unsigned long flags;
rio_spin_lock_irqsave(&PortP->portSem, flags);
#ifdef CHECK
- CheckPortP( PortP );
+ CheckPortP(PortP);
#endif
- rio_dprintk (RIO_DEBUG_CMD, "Decrement in use count for port\n");
+ rio_dprintk(RIO_DEBUG_CMD, "Decrement in use count for port\n");
if (PortP->InUse) {
- if ( --PortP->InUse != NOT_INUSE ) {
+ if (--PortP->InUse != NOT_INUSE) {
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
return 0;
}
}
/*
- ** While PortP->InUse is set (i.e. a preemptive command has been sent to
- ** the RTA and is awaiting completion), any transmit data is prevented from
- ** being transferred from the write queue into the transmit packets
- ** (add_transmit) and no furthur transmit interrupt will be sent for that
- ** data. The next interrupt will occur up to 500ms later (RIOIntr is called
- ** twice a second as a saftey measure). This was the case when kermit was
- ** used to send data into a RIO port. After each packet was sent, TCFLSH
- ** was called to flush the read queue preemptively. PortP->InUse was
- ** incremented, thereby blocking the 6 byte acknowledgement packet
- ** transmitted back. This acknowledgment hung around for 500ms before
- ** being sent, thus reducing input performance substantially!.
- ** When PortP->InUse becomes NOT_INUSE, we must ensure that any data
- ** hanging around in the transmit buffer is sent immediately.
- */
+ ** While PortP->InUse is set (i.e. a preemptive command has been sent to
+ ** the RTA and is awaiting completion), any transmit data is prevented from
+ ** being transferred from the write queue into the transmit packets
+ ** (add_transmit) and no furthur transmit interrupt will be sent for that
+ ** data. The next interrupt will occur up to 500ms later (RIOIntr is called
+ ** twice a second as a saftey measure). This was the case when kermit was
+ ** used to send data into a RIO port. After each packet was sent, TCFLSH
+ ** was called to flush the read queue preemptively. PortP->InUse was
+ ** incremented, thereby blocking the 6 byte acknowledgement packet
+ ** transmitted back. This acknowledgment hung around for 500ms before
+ ** being sent, thus reducing input performance substantially!.
+ ** When PortP->InUse becomes NOT_INUSE, we must ensure that any data
+ ** hanging around in the transmit buffer is sent immediately.
+ */
WWORD(PortP->HostP->ParmMapP->tx_intr, 1);
/* What to do here ..
- wakeup( (caddr_t)&(PortP->InUse) );
- */
+ wakeup( (caddr_t)&(PortP->InUse) );
+ */
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
return 0;
}
-void
-ShowPacket(uint Flags, struct PKT *PacketP)
+void ShowPacket(uint Flags, struct PKT *PacketP)
{
}
diff --git a/drivers/char/rio/rioctrl.c b/drivers/char/rio/rioctrl.c
index b4d1a23e27e..0b7700d2f04 100644
--- a/drivers/char/rio/rioctrl.c
+++ b/drivers/char/rio/rioctrl.c
@@ -82,16 +82,16 @@ static char *_rioctrl_c_sccs_ = "@(#)rioctrl.c 1.3";
#include "rioioctl.h"
-static struct LpbReq LpbReq;
-static struct RupReq RupReq;
-static struct PortReq PortReq;
-static struct HostReq HostReq;
+static struct LpbReq LpbReq;
+static struct RupReq RupReq;
+static struct PortReq PortReq;
+static struct HostReq HostReq;
static struct HostDpRam HostDpRam;
static struct DebugCtrl DebugCtrl;
-static struct Map MapEnt;
+static struct Map MapEnt;
static struct PortSetup PortSetup;
-static struct DownLoad DownLoad;
-static struct SendPack SendPack;
+static struct DownLoad DownLoad;
+static struct SendPack SendPack;
/* static struct StreamInfo StreamInfo; */
/* static char modemtable[RIO_PORTS]; */
static struct SpecialRupCmd SpecialRupCmd;
@@ -99,18 +99,18 @@ static struct PortParams PortParams;
static struct portStats portStats;
static struct SubCmdStruct {
- ushort Host;
- ushort Rup;
- ushort Port;
- ushort Addr;
+ ushort Host;
+ ushort Rup;
+ ushort Port;
+ ushort Addr;
} SubCmd;
struct PortTty {
- uint port;
- struct ttystatics Tty;
+ uint port;
+ struct ttystatics Tty;
};
-static struct PortTty PortTty;
+static struct PortTty PortTty;
typedef struct ttystatics TERMIO;
/*
@@ -121,267 +121,264 @@ typedef struct ttystatics TERMIO;
** The RIOBootCodeUNKNOWN entry is there to politely tell the calling
** process to bog off.
*/
-static int
-(*RIOBootTable[MAX_PRODUCT])(struct rio_info *, struct DownLoad *) =
-{
-/* 0 */ RIOBootCodeHOST, /* Host Card */
-/* 1 */ RIOBootCodeRTA, /* RTA */
+static int
+ (*RIOBootTable[MAX_PRODUCT]) (struct rio_info *, struct DownLoad *) = {
+ /* 0 */ RIOBootCodeHOST,
+ /* Host Card */
+ /* 1 */ RIOBootCodeRTA,
+ /* RTA */
};
#define drv_makedev(maj, min) ((((uint) maj & 0xff) << 8) | ((uint) min & 0xff))
-int copyin (int arg, caddr_t dp, int siz)
+int copyin(int arg, caddr_t dp, int siz)
{
- int rv;
+ int rv;
- rio_dprintk (RIO_DEBUG_CTRL, "Copying %d bytes from user %p to %p.\n", siz, (void *)arg, dp);
- rv = copy_from_user (dp, (void *)arg, siz);
- if (rv) return COPYFAIL;
- else return rv;
+ rio_dprintk(RIO_DEBUG_CTRL, "Copying %d bytes from user %p to %p.\n", siz, (void *) arg, dp);
+ rv = copy_from_user(dp, (void *) arg, siz);
+ if (rv)
+ return COPYFAIL;
+ else
+ return rv;
}
-static int copyout (caddr_t dp, int arg, int siz)
+static int copyout(caddr_t dp, int arg, int siz)
{
- int rv;
+ int rv;
- rio_dprintk (RIO_DEBUG_CTRL, "Copying %d bytes to user %p from %p.\n", siz, (void *)arg, dp);
- rv = copy_to_user ((void *)arg, dp, siz);
- if (rv) return COPYFAIL;
- else return rv;
+ rio_dprintk(RIO_DEBUG_CTRL, "Copying %d bytes to user %p from %p.\n", siz, (void *) arg, dp);
+ rv = copy_to_user((void *) arg, dp, siz);
+ if (rv)
+ return COPYFAIL;
+ else
+ return rv;
}
-int
-riocontrol(p, dev, cmd, arg, su)
-struct rio_info * p;
-dev_t dev;
-int cmd;
-caddr_t arg;
-int su;
+int riocontrol(p, dev, cmd, arg, su)
+struct rio_info *p;
+dev_t dev;
+int cmd;
+caddr_t arg;
+int su;
{
- uint Host; /* leave me unsigned! */
- uint port; /* and me! */
- struct Host *HostP;
- ushort loop;
- int Entry;
- struct Port *PortP;
- PKT *PacketP;
- int retval = 0;
+ uint Host; /* leave me unsigned! */
+ uint port; /* and me! */
+ struct Host *HostP;
+ ushort loop;
+ int Entry;
+ struct Port *PortP;
+ PKT *PacketP;
+ int retval = 0;
unsigned long flags;
-
- func_enter ();
-
+
+ func_enter();
+
/* Confuse the compiler to think that we've initialized these */
- Host=0;
+ Host = 0;
PortP = NULL;
- rio_dprintk (RIO_DEBUG_CTRL, "control ioctl cmd: 0x%x arg: 0x%x\n", cmd, (int)arg);
+ rio_dprintk(RIO_DEBUG_CTRL, "control ioctl cmd: 0x%x arg: 0x%x\n", cmd, (int) arg);
switch (cmd) {
/*
- ** RIO_SET_TIMER
- **
- ** Change the value of the host card interrupt timer.
- ** If the host card number is -1 then all host cards are changed
- ** otherwise just the specified host card will be changed.
- */
- case RIO_SET_TIMER:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_SET_TIMER to %dms\n", (uint)arg);
- {
- int host, value;
- host = (uint)arg >> 16;
- value = (uint)arg & 0x0000ffff;
- if (host == -1) {
- for (host = 0; host < p->RIONumHosts; host++) {
- if (p->RIOHosts[host].Flags == RC_RUNNING) {
- WWORD(p->RIOHosts[host].ParmMapP->timer , value);
- }
- }
- } else if (host >= p->RIONumHosts) {
- return -EINVAL;
- } else {
- if ( p->RIOHosts[host].Flags == RC_RUNNING ) {
- WWORD(p->RIOHosts[host].ParmMapP->timer , value);
+ ** RIO_SET_TIMER
+ **
+ ** Change the value of the host card interrupt timer.
+ ** If the host card number is -1 then all host cards are changed
+ ** otherwise just the specified host card will be changed.
+ */
+ case RIO_SET_TIMER:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET_TIMER to %dms\n", (uint) arg);
+ {
+ int host, value;
+ host = (uint) arg >> 16;
+ value = (uint) arg & 0x0000ffff;
+ if (host == -1) {
+ for (host = 0; host < p->RIONumHosts; host++) {
+ if (p->RIOHosts[host].Flags == RC_RUNNING) {
+ WWORD(p->RIOHosts[host].ParmMapP->timer, value);
}
}
- }
- return 0;
-
- case RIO_IDENTIFY_DRIVER:
- /*
- ** 15.10.1998 ARG - ESIL 0760 part fix
- ** Added driver ident string output.
- **
-#ifndef __THIS_RELEASE__
-#warning Driver Version string not defined !
-#endif
- cprintf("%s %s %s %s\n",
- RIO_DRV_STR,
- __THIS_RELEASE__,
- __DATE__, __TIME__ );
-
- return 0;
-
- case RIO_DISPLAY_HOST_CFG:
- **
- ** 15.10.1998 ARG - ESIL 0760 part fix
- ** Added driver host card ident string output.
- **
- ** Note that the only types currently supported
- ** are ISA and PCI. Also this driver does not
- ** (yet) distinguish between the Old PCI card
- ** and the Jet PCI card. In fact I think this
- ** driver only supports JET PCI !
- **
-
- for (Host = 0; Host < p->RIONumHosts; Host++)
- {
- HostP = &(p->RIOHosts[Host]);
-
- switch ( HostP->Type )
- {
- case RIO_AT :
- strcpy( host_type, RIO_AT_HOST_STR );
- break;
-
- case RIO_PCI :
- strcpy( host_type, RIO_PCI_HOST_STR );
- break;
-
- default :
- strcpy( host_type, "Unknown" );
- break;
+ } else if (host >= p->RIONumHosts) {
+ return -EINVAL;
+ } else {
+ if (p->RIOHosts[host].Flags == RC_RUNNING) {
+ WWORD(p->RIOHosts[host].ParmMapP->timer, value);
}
+ }
+ }
+ return 0;
- cprintf(
- "RIO Host %d - Type:%s Addr:%X IRQ:%d\n",
- Host, host_type,
- (uint)HostP->PaddrP,
- (int)HostP->Ivec - 32 );
+ case RIO_IDENTIFY_DRIVER:
+ /*
+ ** 15.10.1998 ARG - ESIL 0760 part fix
+ ** Added driver ident string output.
+ **
+ #ifndef __THIS_RELEASE__
+ #warning Driver Version string not defined !
+ #endif
+ cprintf("%s %s %s %s\n",
+ RIO_DRV_STR,
+ __THIS_RELEASE__,
+ __DATE__, __TIME__ );
+
+ return 0;
+
+ case RIO_DISPLAY_HOST_CFG:
+ **
+ ** 15.10.1998 ARG - ESIL 0760 part fix
+ ** Added driver host card ident string output.
+ **
+ ** Note that the only types currently supported
+ ** are ISA and PCI. Also this driver does not
+ ** (yet) distinguish between the Old PCI card
+ ** and the Jet PCI card. In fact I think this
+ ** driver only supports JET PCI !
+ **
+
+ for (Host = 0; Host < p->RIONumHosts; Host++)
+ {
+ HostP = &(p->RIOHosts[Host]);
+
+ switch ( HostP->Type )
+ {
+ case RIO_AT :
+ strcpy( host_type, RIO_AT_HOST_STR );
+ break;
+
+ case RIO_PCI :
+ strcpy( host_type, RIO_PCI_HOST_STR );
+ break;
+
+ default :
+ strcpy( host_type, "Unknown" );
+ break;
+ }
+
+ cprintf(
+ "RIO Host %d - Type:%s Addr:%X IRQ:%d\n",
+ Host, host_type,
+ (uint)HostP->PaddrP,
+ (int)HostP->Ivec - 32 );
+ }
+ return 0;
+ **
+ */
+
+ case RIO_FOAD_RTA:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_FOAD_RTA\n");
+ return RIOCommandRta(p, (uint) arg, RIOFoadRta);
+
+ case RIO_ZOMBIE_RTA:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_ZOMBIE_RTA\n");
+ return RIOCommandRta(p, (uint) arg, RIOZombieRta);
+
+ case RIO_IDENTIFY_RTA:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_IDENTIFY_RTA\n");
+ return RIOIdentifyRta(p, arg);
+
+ case RIO_KILL_NEIGHBOUR:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_KILL_NEIGHBOUR\n");
+ return RIOKillNeighbour(p, arg);
+
+ case SPECIAL_RUP_CMD:
+ {
+ struct CmdBlk *CmdBlkP;
+
+ rio_dprintk(RIO_DEBUG_CTRL, "SPECIAL_RUP_CMD\n");
+ if (copyin((int) arg, (caddr_t) & SpecialRupCmd, sizeof(SpecialRupCmd)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "SPECIAL_RUP_CMD copy failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ CmdBlkP = RIOGetCmdBlk();
+ if (!CmdBlkP) {
+ rio_dprintk(RIO_DEBUG_CTRL, "SPECIAL_RUP_CMD GetCmdBlk failed\n");
+ return -ENXIO;
+ }
+ CmdBlkP->Packet = SpecialRupCmd.Packet;
+ if (SpecialRupCmd.Host >= p->RIONumHosts)
+ SpecialRupCmd.Host = 0;
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue special rup command for host %d rup %d\n", SpecialRupCmd.Host, SpecialRupCmd.RupNum);
+ if (RIOQueueCmdBlk(&p->RIOHosts[SpecialRupCmd.Host], SpecialRupCmd.RupNum, CmdBlkP) == RIO_FAIL) {
+ cprintf("FAILED TO QUEUE SPECIAL RUP COMMAND\n");
}
return 0;
- **
- */
-
- case RIO_FOAD_RTA:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_FOAD_RTA\n");
- return RIOCommandRta(p, (uint)arg, RIOFoadRta);
-
- case RIO_ZOMBIE_RTA:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_ZOMBIE_RTA\n");
- return RIOCommandRta(p, (uint)arg, RIOZombieRta);
-
- case RIO_IDENTIFY_RTA:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_IDENTIFY_RTA\n");
- return RIOIdentifyRta(p, arg);
-
- case RIO_KILL_NEIGHBOUR:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_KILL_NEIGHBOUR\n");
- return RIOKillNeighbour(p, arg);
-
- case SPECIAL_RUP_CMD:
- {
- struct CmdBlk *CmdBlkP;
-
- rio_dprintk (RIO_DEBUG_CTRL, "SPECIAL_RUP_CMD\n");
- if (copyin((int)arg, (caddr_t)&SpecialRupCmd,
- sizeof(SpecialRupCmd)) == COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "SPECIAL_RUP_CMD copy failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- CmdBlkP = RIOGetCmdBlk();
- if ( !CmdBlkP ) {
- rio_dprintk (RIO_DEBUG_CTRL, "SPECIAL_RUP_CMD GetCmdBlk failed\n");
- return -ENXIO;
- }
- CmdBlkP->Packet = SpecialRupCmd.Packet;
- if ( SpecialRupCmd.Host >= p->RIONumHosts )
- SpecialRupCmd.Host = 0;
- rio_dprintk (RIO_DEBUG_CTRL, "Queue special rup command for host %d rup %d\n",
- SpecialRupCmd.Host, SpecialRupCmd.RupNum);
- if (RIOQueueCmdBlk(&p->RIOHosts[SpecialRupCmd.Host],
- SpecialRupCmd.RupNum, CmdBlkP) == RIO_FAIL) {
- cprintf("FAILED TO QUEUE SPECIAL RUP COMMAND\n");
- }
- return 0;
- }
+ }
- case RIO_DEBUG_MEM:
+ case RIO_DEBUG_MEM:
#ifdef DEBUG_MEM_SUPPORT
-RIO_DEBUG_CTRL, if (su)
- return rio_RIODebugMemory(RIO_DEBUG_CTRL, arg);
- else
+ RIO_DEBUG_CTRL, if (su)
+ return rio_RIODebugMemory(RIO_DEBUG_CTRL, arg);
+ else
#endif
- return -EPERM;
-
- case RIO_ALL_MODEM:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_ALL_MODEM\n");
- p->RIOError.Error = IOCTL_COMMAND_UNKNOWN;
- return -EINVAL;
-
- case RIO_GET_TABLE:
- /*
- ** Read the routing table from the device driver to user space
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GET_TABLE\n");
+ return -EPERM;
- if ((retval = RIOApel(p)) != 0)
- return retval;
+ case RIO_ALL_MODEM:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_ALL_MODEM\n");
+ p->RIOError.Error = IOCTL_COMMAND_UNKNOWN;
+ return -EINVAL;
- if (copyout((caddr_t)p->RIOConnectTable, (int)arg,
- TOTAL_MAP_ENTRIES*sizeof(struct Map)) == COPYFAIL) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GET_TABLE copy failed\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
+ case RIO_GET_TABLE:
+ /*
+ ** Read the routing table from the device driver to user space
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_TABLE\n");
+
+ if ((retval = RIOApel(p)) != 0)
+ return retval;
+
+ if (copyout((caddr_t) p->RIOConnectTable, (int) arg, TOTAL_MAP_ENTRIES * sizeof(struct Map)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_TABLE copy failed\n");
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+
+ {
+ int entry;
+ rio_dprintk(RIO_DEBUG_CTRL, "*****\nMAP ENTRIES\n");
+ for (entry = 0; entry < TOTAL_MAP_ENTRIES; entry++) {
+ if ((p->RIOConnectTable[entry].ID == 0) && (p->RIOConnectTable[entry].HostUniqueNum == 0) && (p->RIOConnectTable[entry].RtaUniqueNum == 0))
+ continue;
+
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.HostUniqueNum = 0x%x\n", entry, p->RIOConnectTable[entry].HostUniqueNum);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.RtaUniqueNum = 0x%x\n", entry, p->RIOConnectTable[entry].RtaUniqueNum);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.ID = 0x%x\n", entry, p->RIOConnectTable[entry].ID);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.ID2 = 0x%x\n", entry, p->RIOConnectTable[entry].ID2);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Flags = 0x%x\n", entry, (int) p->RIOConnectTable[entry].Flags);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.SysPort = 0x%x\n", entry, (int) p->RIOConnectTable[entry].SysPort);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[0].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[0].Unit);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[0].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[0].Link);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[1].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[1].Unit);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[1].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[1].Link);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[2].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[2].Unit);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[2].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[2].Link);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[3].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[3].Unit);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[4].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[3].Link);
+ rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Name = %s\n", entry, p->RIOConnectTable[entry].Name);
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "*****\nEND MAP ENTRIES\n");
+ }
+ p->RIOQuickCheck = NOT_CHANGED; /* a table has been gotten */
+ return 0;
- {
- int entry;
- rio_dprintk (RIO_DEBUG_CTRL, "*****\nMAP ENTRIES\n");
- for ( entry=0; entry<TOTAL_MAP_ENTRIES; entry++ )
- {
- if ((p->RIOConnectTable[entry].ID == 0) &&
- (p->RIOConnectTable[entry].HostUniqueNum == 0) &&
- (p->RIOConnectTable[entry].RtaUniqueNum == 0)) continue;
-
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.HostUniqueNum = 0x%x\n", entry, p->RIOConnectTable[entry].HostUniqueNum );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.RtaUniqueNum = 0x%x\n", entry, p->RIOConnectTable[entry].RtaUniqueNum );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.ID = 0x%x\n", entry, p->RIOConnectTable[entry].ID );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.ID2 = 0x%x\n", entry, p->RIOConnectTable[entry].ID2 );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.Flags = 0x%x\n", entry, (int)p->RIOConnectTable[entry].Flags );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.SysPort = 0x%x\n", entry, (int)p->RIOConnectTable[entry].SysPort );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.Top[0].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[0].Unit );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.Top[0].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[0].Link );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.Top[1].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[1].Unit );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.Top[1].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[1].Link );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.Top[2].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[2].Unit );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.Top[2].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[2].Link );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.Top[3].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[3].Unit );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.Top[4].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[3].Link );
- rio_dprintk (RIO_DEBUG_CTRL, "Map entry %d.Name = %s\n", entry, p->RIOConnectTable[entry].Name );
- }
- rio_dprintk (RIO_DEBUG_CTRL, "*****\nEND MAP ENTRIES\n");
- }
- p->RIOQuickCheck = NOT_CHANGED; /* a table has been gotten */
- return 0;
-
- case RIO_PUT_TABLE:
- /*
- ** Write the routing table to the device driver from user space
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_PUT_TABLE\n");
-
- if ( !su ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_PUT_TABLE !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if ( copyin((int)arg, (caddr_t)&p->RIOConnectTable[0],
- TOTAL_MAP_ENTRIES*sizeof(struct Map) ) == COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_PUT_TABLE copy failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
+ case RIO_PUT_TABLE:
+ /*
+ ** Write the routing table to the device driver from user space
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_TABLE\n");
+
+ if (!su) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_TABLE !Root\n");
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ if (copyin((int) arg, (caddr_t) & p->RIOConnectTable[0], TOTAL_MAP_ENTRIES * sizeof(struct Map)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_TABLE copy failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
/*
***********************************
{
@@ -409,1353 +406,1244 @@ RIO_DEBUG_CTRL, if (su)
}
***********************************
*/
- return RIONewTable(p);
-
- case RIO_GET_BINDINGS :
- /*
- ** Send bindings table, containing unique numbers of RTAs owned
- ** by this system to user space
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GET_BINDINGS\n");
+ return RIONewTable(p);
- if ( !su )
- {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GET_BINDINGS !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copyout((caddr_t) p->RIOBindTab, (int)arg,
- (sizeof(ulong) * MAX_RTA_BINDINGS)) == COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GET_BINDINGS copy failed\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_PUT_BINDINGS :
+ case RIO_GET_BINDINGS:
+ /*
+ ** Send bindings table, containing unique numbers of RTAs owned
+ ** by this system to user space
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_BINDINGS\n");
+
+ if (!su) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_BINDINGS !Root\n");
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ if (copyout((caddr_t) p->RIOBindTab, (int) arg, (sizeof(ulong) * MAX_RTA_BINDINGS)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_BINDINGS copy failed\n");
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return 0;
+
+ case RIO_PUT_BINDINGS:
+ /*
+ ** Receive a bindings table, containing unique numbers of RTAs owned
+ ** by this system
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_BINDINGS\n");
+
+ if (!su) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_BINDINGS !Root\n");
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ if (copyin((int) arg, (caddr_t) & p->RIOBindTab[0], (sizeof(ulong) * MAX_RTA_BINDINGS)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_BINDINGS copy failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ return 0;
+
+ case RIO_BIND_RTA:
+ {
+ int EmptySlot = -1;
/*
- ** Receive a bindings table, containing unique numbers of RTAs owned
- ** by this system
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_PUT_BINDINGS\n");
-
- if ( !su )
- {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_PUT_BINDINGS !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copyin((int)arg, (caddr_t)&p->RIOBindTab[0],
- (sizeof(ulong) * MAX_RTA_BINDINGS))==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_PUT_BINDINGS copy failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_BIND_RTA :
- {
- int EmptySlot = -1;
- /*
- ** Bind this RTA to host, so that it will be booted by
- ** host in 'boot owned RTAs' mode.
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_BIND_RTA\n");
-
- if ( !su ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_BIND_RTA !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- for (Entry = 0; Entry < MAX_RTA_BINDINGS; Entry++) {
- if ((EmptySlot == -1) && (p->RIOBindTab[Entry] == 0L))
- EmptySlot = Entry;
- else if (p->RIOBindTab[Entry] == (int) arg) {
- /*
- ** Already exists - delete
- */
- p->RIOBindTab[Entry] = 0L;
- rio_dprintk (RIO_DEBUG_CTRL, "Removing Rta %x from p->RIOBindTab\n",
- (int) arg);
- return 0;
- }
- }
+ ** Bind this RTA to host, so that it will be booted by
+ ** host in 'boot owned RTAs' mode.
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_BIND_RTA\n");
+
+ if (!su) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_BIND_RTA !Root\n");
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ for (Entry = 0; Entry < MAX_RTA_BINDINGS; Entry++) {
+ if ((EmptySlot == -1) && (p->RIOBindTab[Entry] == 0L))
+ EmptySlot = Entry;
+ else if (p->RIOBindTab[Entry] == (int) arg) {
/*
- ** Dosen't exist - add
- */
- if (EmptySlot != -1) {
- p->RIOBindTab[EmptySlot] = (int) arg;
- rio_dprintk (RIO_DEBUG_CTRL, "Adding Rta %x to p->RIOBindTab\n",
- (int) arg);
- }
- else {
- rio_dprintk (RIO_DEBUG_CTRL, "p->RIOBindTab full! - Rta %x not added\n",
- (int) arg);
- return -ENOMEM;
- }
+ ** Already exists - delete
+ */
+ p->RIOBindTab[Entry] = 0L;
+ rio_dprintk(RIO_DEBUG_CTRL, "Removing Rta %x from p->RIOBindTab\n", (int) arg);
return 0;
}
-
- case RIO_RESUME :
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_RESUME\n");
- port = (uint) arg;
- if ((port < 0) || (port > 511)) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_RESUME: Bad port number %d\n", port);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- PortP = p->RIOPortp[port];
- if (!PortP->Mapped) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_RESUME: Port %d not mapped\n", port);
- p->RIOError.Error = PORT_NOT_MAPPED_INTO_SYSTEM;
- return -EINVAL;
- }
- if (!(PortP->State & (RIO_LOPEN | RIO_MOPEN))) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_RESUME: Port %d not open\n", port);
- return -EINVAL;
- }
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- if (RIOPreemptiveCmd(p, (p->RIOPortp[port]), RESUME) ==
- RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_RESUME failed\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return -EBUSY;
- }
- else {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_RESUME: Port %d resumed\n", port);
- PortP->State |= RIO_BUSY;
- }
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return retval;
-
- case RIO_ASSIGN_RTA:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_ASSIGN_RTA\n");
- if ( !su ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_ASSIGN_RTA !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copyin((int)arg, (caddr_t)&MapEnt, sizeof(MapEnt))
- == COPYFAIL) {
- rio_dprintk (RIO_DEBUG_CTRL, "Copy from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- return RIOAssignRta(p, &MapEnt);
-
- case RIO_CHANGE_NAME:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_CHANGE_NAME\n");
- if ( !su ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_CHANGE_NAME !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copyin((int)arg, (caddr_t)&MapEnt, sizeof(MapEnt))
- == COPYFAIL) {
- rio_dprintk (RIO_DEBUG_CTRL, "Copy from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- return RIOChangeName(p, &MapEnt);
-
- case RIO_DELETE_RTA:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_DELETE_RTA\n");
- if ( !su ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_DELETE_RTA !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copyin((int)arg, (caddr_t)&MapEnt, sizeof(MapEnt))
- == COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "Copy from data space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- return RIODeleteRta(p, &MapEnt);
-
- case RIO_QUICK_CHECK:
- /*
- ** 09.12.1998 ARG - ESIL 0776 part fix
- ** A customer was using this to get the RTAs
- ** connect/disconnect status.
- ** RIOConCon() had been botched use RIOHalted
- ** to keep track of RTA connections and
- ** disconnections. That has been changed and
- ** RIORtaDisCons in the rio_info struct now
- ** does the job. So we need to return the value
- ** of RIORtaCons instead of RIOHalted.
- **
- if (copyout((caddr_t)&p->RIOHalted,(int)arg,
- sizeof(uint))==COPYFAIL) {
- **
- */
-
- if (copyout((caddr_t)&p->RIORtaDisCons,(int)arg,
- sizeof(uint))==COPYFAIL) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_LAST_ERROR:
- if (copyout((caddr_t)&p->RIOError, (int)arg,
- sizeof(struct Error)) ==COPYFAIL )
- return -EFAULT;
- return 0;
-
- case RIO_GET_LOG:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GET_LOG\n");
+ }
+ /*
+ ** Dosen't exist - add
+ */
+ if (EmptySlot != -1) {
+ p->RIOBindTab[EmptySlot] = (int) arg;
+ rio_dprintk(RIO_DEBUG_CTRL, "Adding Rta %x to p->RIOBindTab\n", (int) arg);
+ } else {
+ rio_dprintk(RIO_DEBUG_CTRL, "p->RIOBindTab full! - Rta %x not added\n", (int) arg);
+ return -ENOMEM;
+ }
+ return 0;
+ }
+
+ case RIO_RESUME:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME\n");
+ port = (uint) arg;
+ if ((port < 0) || (port > 511)) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME: Bad port number %d\n", port);
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+ PortP = p->RIOPortp[port];
+ if (!PortP->Mapped) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME: Port %d not mapped\n", port);
+ p->RIOError.Error = PORT_NOT_MAPPED_INTO_SYSTEM;
+ return -EINVAL;
+ }
+ if (!(PortP->State & (RIO_LOPEN | RIO_MOPEN))) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME: Port %d not open\n", port);
+ return -EINVAL;
+ }
+
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ if (RIOPreemptiveCmd(p, (p->RIOPortp[port]), RESUME) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME failed\n");
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return -EBUSY;
+ } else {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME: Port %d resumed\n", port);
+ PortP->State |= RIO_BUSY;
+ }
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return retval;
+
+ case RIO_ASSIGN_RTA:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_ASSIGN_RTA\n");
+ if (!su) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_ASSIGN_RTA !Root\n");
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ if (copyin((int) arg, (caddr_t) & MapEnt, sizeof(MapEnt))
+ == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "Copy from user space failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ return RIOAssignRta(p, &MapEnt);
+
+ case RIO_CHANGE_NAME:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_CHANGE_NAME\n");
+ if (!su) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_CHANGE_NAME !Root\n");
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ if (copyin((int) arg, (caddr_t) & MapEnt, sizeof(MapEnt))
+ == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "Copy from user space failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ return RIOChangeName(p, &MapEnt);
+
+ case RIO_DELETE_RTA:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_DELETE_RTA\n");
+ if (!su) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_DELETE_RTA !Root\n");
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ if (copyin((int) arg, (caddr_t) & MapEnt, sizeof(MapEnt))
+ == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "Copy from data space failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ return RIODeleteRta(p, &MapEnt);
+
+ case RIO_QUICK_CHECK:
+ /*
+ ** 09.12.1998 ARG - ESIL 0776 part fix
+ ** A customer was using this to get the RTAs
+ ** connect/disconnect status.
+ ** RIOConCon() had been botched use RIOHalted
+ ** to keep track of RTA connections and
+ ** disconnections. That has been changed and
+ ** RIORtaDisCons in the rio_info struct now
+ ** does the job. So we need to return the value
+ ** of RIORtaCons instead of RIOHalted.
+ **
+ if (copyout((caddr_t)&p->RIOHalted,(int)arg,
+ sizeof(uint))==COPYFAIL) {
+ **
+ */
+
+ if (copyout((caddr_t) & p->RIORtaDisCons, (int) arg, sizeof(uint)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return 0;
+
+ case RIO_LAST_ERROR:
+ if (copyout((caddr_t) & p->RIOError, (int) arg, sizeof(struct Error)) == COPYFAIL)
+ return -EFAULT;
+ return 0;
+
+ case RIO_GET_LOG:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_LOG\n");
#ifdef LOGGING
- RIOGetLog(arg);
- return 0;
+ RIOGetLog(arg);
+ return 0;
#else
- return -EINVAL;
+ return -EINVAL;
#endif
- case RIO_GET_MODTYPE:
- if ( copyin( (int)arg, (caddr_t)&port,
- sizeof(uint)) == COPYFAIL )
- {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "Get module type for port %d\n", port);
- if ( port < 0 || port > 511 )
- {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GET_MODTYPE: Bad port number %d\n", port);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- PortP = (p->RIOPortp[port]);
- if (!PortP->Mapped)
- {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GET_MODTYPE: Port %d not mapped\n", port);
- p->RIOError.Error = PORT_NOT_MAPPED_INTO_SYSTEM;
- return -EINVAL;
- }
- /*
- ** Return module type of port
- */
- port = PortP->HostP->UnixRups[PortP->RupNum].ModTypes;
- if (copyout((caddr_t)&port, (int)arg,
- sizeof(uint)) == COPYFAIL) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return(0);
- /*
- ** 02.03.1999 ARG - ESIL 0820 fix
- ** We are no longer using "Boot Mode", so these ioctls
- ** are not required :
- **
- case RIO_GET_BOOT_MODE :
- rio_dprint(RIO_DEBUG_CTRL, ("Get boot mode - %x\n", p->RIOBootMode));
- **
- ** Return boot state of system - BOOT_ALL, BOOT_OWN or BOOT_NONE
- **
- if (copyout((caddr_t)&p->RIOBootMode, (int)arg,
- sizeof(p->RIOBootMode)) == COPYFAIL) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return(0);
-
- case RIO_SET_BOOT_MODE :
- p->RIOBootMode = (uint) arg;
- rio_dprint(RIO_DEBUG_CTRL, ("Set boot mode to 0x%x\n", p->RIOBootMode));
- return(0);
- **
- ** End ESIL 0820 fix
- */
-
- case RIO_BLOCK_OPENS:
- rio_dprintk (RIO_DEBUG_CTRL, "Opens block until booted\n");
- for ( Entry=0; Entry < RIO_PORTS; Entry++ ) {
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- p->RIOPortp[Entry]->WaitUntilBooted = 1;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- }
- return 0;
-
- case RIO_SETUP_PORTS:
- rio_dprintk (RIO_DEBUG_CTRL, "Setup ports\n");
- if (copyin((int)arg, (caddr_t)&PortSetup, sizeof(PortSetup))
- == COPYFAIL ) {
- p->RIOError.Error = COPYIN_FAILED;
- rio_dprintk (RIO_DEBUG_CTRL, "EFAULT");
- return -EFAULT;
- }
- if ( PortSetup.From > PortSetup.To ||
- PortSetup.To >= RIO_PORTS ) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- rio_dprintk (RIO_DEBUG_CTRL, "ENXIO");
- return -ENXIO;
- }
- if ( PortSetup.XpCps > p->RIOConf.MaxXpCps ||
- PortSetup.XpCps < p->RIOConf.MinXpCps ) {
- p->RIOError.Error = XPRINT_CPS_OUT_OF_RANGE;
- rio_dprintk (RIO_DEBUG_CTRL, "EINVAL");
- return -EINVAL;
- }
- if ( !p->RIOPortp ) {
- cprintf("No p->RIOPortp array!\n");
- rio_dprintk (RIO_DEBUG_CTRL, "No p->RIOPortp array!\n");
- return -EIO;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "entering loop (%d %d)!\n", PortSetup.From, PortSetup.To);
- for (loop=PortSetup.From; loop<=PortSetup.To; loop++) {
- rio_dprintk (RIO_DEBUG_CTRL, "in loop (%d)!\n", loop);
+ case RIO_GET_MODTYPE:
+ if (copyin((int) arg, (caddr_t) & port, sizeof(uint)) == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "Get module type for port %d\n", port);
+ if (port < 0 || port > 511) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_MODTYPE: Bad port number %d\n", port);
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+ PortP = (p->RIOPortp[port]);
+ if (!PortP->Mapped) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_MODTYPE: Port %d not mapped\n", port);
+ p->RIOError.Error = PORT_NOT_MAPPED_INTO_SYSTEM;
+ return -EINVAL;
+ }
+ /*
+ ** Return module type of port
+ */
+ port = PortP->HostP->UnixRups[PortP->RupNum].ModTypes;
+ if (copyout((caddr_t) & port, (int) arg, sizeof(uint)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return (0);
+ /*
+ ** 02.03.1999 ARG - ESIL 0820 fix
+ ** We are no longer using "Boot Mode", so these ioctls
+ ** are not required :
+ **
+ case RIO_GET_BOOT_MODE :
+ rio_dprint(RIO_DEBUG_CTRL, ("Get boot mode - %x\n", p->RIOBootMode));
+ **
+ ** Return boot state of system - BOOT_ALL, BOOT_OWN or BOOT_NONE
+ **
+ if (copyout((caddr_t)&p->RIOBootMode, (int)arg,
+ sizeof(p->RIOBootMode)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return(0);
+
+ case RIO_SET_BOOT_MODE :
+ p->RIOBootMode = (uint) arg;
+ rio_dprint(RIO_DEBUG_CTRL, ("Set boot mode to 0x%x\n", p->RIOBootMode));
+ return(0);
+ **
+ ** End ESIL 0820 fix
+ */
+
+ case RIO_BLOCK_OPENS:
+ rio_dprintk(RIO_DEBUG_CTRL, "Opens block until booted\n");
+ for (Entry = 0; Entry < RIO_PORTS; Entry++) {
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ p->RIOPortp[Entry]->WaitUntilBooted = 1;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ }
+ return 0;
+
+ case RIO_SETUP_PORTS:
+ rio_dprintk(RIO_DEBUG_CTRL, "Setup ports\n");
+ if (copyin((int) arg, (caddr_t) & PortSetup, sizeof(PortSetup))
+ == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ rio_dprintk(RIO_DEBUG_CTRL, "EFAULT");
+ return -EFAULT;
+ }
+ if (PortSetup.From > PortSetup.To || PortSetup.To >= RIO_PORTS) {
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ rio_dprintk(RIO_DEBUG_CTRL, "ENXIO");
+ return -ENXIO;
+ }
+ if (PortSetup.XpCps > p->RIOConf.MaxXpCps || PortSetup.XpCps < p->RIOConf.MinXpCps) {
+ p->RIOError.Error = XPRINT_CPS_OUT_OF_RANGE;
+ rio_dprintk(RIO_DEBUG_CTRL, "EINVAL");
+ return -EINVAL;
+ }
+ if (!p->RIOPortp) {
+ cprintf("No p->RIOPortp array!\n");
+ rio_dprintk(RIO_DEBUG_CTRL, "No p->RIOPortp array!\n");
+ return -EIO;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "entering loop (%d %d)!\n", PortSetup.From, PortSetup.To);
+ for (loop = PortSetup.From; loop <= PortSetup.To; loop++) {
+ rio_dprintk(RIO_DEBUG_CTRL, "in loop (%d)!\n", loop);
#if 0
- PortP = p->RIOPortp[loop];
- if ( !PortP->TtyP )
- PortP->TtyP = &p->channel[loop];
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- if ( PortSetup.IxAny )
- PortP->Config |= RIO_IXANY;
- else
- PortP->Config &= ~RIO_IXANY;
- if ( PortSetup.IxOn )
- PortP->Config |= RIO_IXON;
- else
- PortP->Config &= ~RIO_IXON;
-
- /*
- ** If the port needs to wait for all a processes output
- ** to drain before closing then this flag will be set.
- */
- if (PortSetup.Drain) {
- PortP->Config |= RIO_WAITDRAIN;
- } else {
- PortP->Config &= ~RIO_WAITDRAIN;
- }
- /*
- ** Store settings if locking or unlocking port or if the
- ** port is not locked, when setting the store option.
- */
- if (PortP->Mapped &&
- ((PortSetup.Lock && !PortP->Lock) ||
- (!PortP->Lock &&
- (PortSetup.Store && !PortP->Store)))) {
- PortP->StoredTty.iflag = PortP->TtyP->tm.c_iflag;
- PortP->StoredTty.oflag = PortP->TtyP->tm.c_oflag;
- PortP->StoredTty.cflag = PortP->TtyP->tm.c_cflag;
- PortP->StoredTty.lflag = PortP->TtyP->tm.c_lflag;
- PortP->StoredTty.line = PortP->TtyP->tm.c_line;
- bcopy(PortP->TtyP->tm.c_cc, PortP->StoredTty.cc,
- NCC + 5);
- }
- PortP->Lock = PortSetup.Lock;
- PortP->Store = PortSetup.Store;
- PortP->Xprint.XpCps = PortSetup.XpCps;
- bcopy(PortSetup.XpOn,PortP->Xprint.XpOn,MAX_XP_CTRL_LEN);
- bcopy(PortSetup.XpOff,PortP->Xprint.XpOff,MAX_XP_CTRL_LEN);
- PortP->Xprint.XpOn[MAX_XP_CTRL_LEN-1] = '\0';
- PortP->Xprint.XpOff[MAX_XP_CTRL_LEN-1] = '\0';
- PortP->Xprint.XpLen = RIOStrlen(PortP->Xprint.XpOn)+
- RIOStrlen(PortP->Xprint.XpOff);
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
-#endif
- }
- rio_dprintk (RIO_DEBUG_CTRL, "after loop (%d)!\n", loop);
- rio_dprintk (RIO_DEBUG_CTRL, "Retval:%x\n", retval);
- return retval;
-
- case RIO_GET_PORT_SETUP :
- rio_dprintk (RIO_DEBUG_CTRL, "Get port setup\n");
- if (copyin((int)arg, (caddr_t)&PortSetup, sizeof(PortSetup))
- == COPYFAIL ) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if ( PortSetup.From >= RIO_PORTS ) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
+ PortP = p->RIOPortp[loop];
+ if (!PortP->TtyP)
+ PortP->TtyP = &p->channel[loop];
+
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ if (PortSetup.IxAny)
+ PortP->Config |= RIO_IXANY;
+ else
+ PortP->Config &= ~RIO_IXANY;
+ if (PortSetup.IxOn)
+ PortP->Config |= RIO_IXON;
+ else
+ PortP->Config &= ~RIO_IXON;
- port = PortSetup.To = PortSetup.From;
- PortSetup.IxAny = (p->RIOPortp[port]->Config & RIO_IXANY) ?
- 1 : 0;
- PortSetup.IxOn = (p->RIOPortp[port]->Config & RIO_IXON) ?
- 1 : 0;
- PortSetup.Drain = (p->RIOPortp[port]->Config & RIO_WAITDRAIN) ?
- 1 : 0;
- PortSetup.Store = p->RIOPortp[port]->Store;
- PortSetup.Lock = p->RIOPortp[port]->Lock;
- PortSetup.XpCps = p->RIOPortp[port]->Xprint.XpCps;
- bcopy(p->RIOPortp[port]->Xprint.XpOn, PortSetup.XpOn,
- MAX_XP_CTRL_LEN);
- bcopy(p->RIOPortp[port]->Xprint.XpOff, PortSetup.XpOff,
- MAX_XP_CTRL_LEN);
- PortSetup.XpOn[MAX_XP_CTRL_LEN-1] = '\0';
- PortSetup.XpOff[MAX_XP_CTRL_LEN-1] = '\0';
-
- if ( copyout((caddr_t)&PortSetup,(int)arg,sizeof(PortSetup))
- ==COPYFAIL ) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_GET_PORT_PARAMS :
- rio_dprintk (RIO_DEBUG_CTRL, "Get port params\n");
- if (copyin( (int)arg, (caddr_t)&PortParams,
- sizeof(struct PortParams)) == COPYFAIL) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (PortParams.Port >= RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[PortParams.Port]);
- PortParams.Config = PortP->Config;
- PortParams.State = PortP->State;
- rio_dprintk (RIO_DEBUG_CTRL, "Port %d\n", PortParams.Port);
-
- if (copyout((caddr_t)&PortParams, (int)arg,
- sizeof(struct PortParams)) == COPYFAIL ) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_GET_PORT_TTY :
- rio_dprintk (RIO_DEBUG_CTRL, "Get port tty\n");
- if (copyin((int)arg, (caddr_t)&PortTty, sizeof(struct PortTty))
- == COPYFAIL) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if ( PortTty.port >= RIO_PORTS ) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
-
- rio_dprintk (RIO_DEBUG_CTRL, "Port %d\n", PortTty.port);
- PortP = (p->RIOPortp[PortTty.port]);
+ /*
+ ** If the port needs to wait for all a processes output
+ ** to drain before closing then this flag will be set.
+ */
+ if (PortSetup.Drain) {
+ PortP->Config |= RIO_WAITDRAIN;
+ } else {
+ PortP->Config &= ~RIO_WAITDRAIN;
+ }
+ /*
+ ** Store settings if locking or unlocking port or if the
+ ** port is not locked, when setting the store option.
+ */
+ if (PortP->Mapped && ((PortSetup.Lock && !PortP->Lock) || (!PortP->Lock && (PortSetup.Store && !PortP->Store)))) {
+ PortP->StoredTty.iflag = PortP->TtyP->tm.c_iflag;
+ PortP->StoredTty.oflag = PortP->TtyP->tm.c_oflag;
+ PortP->StoredTty.cflag = PortP->TtyP->tm.c_cflag;
+ PortP->StoredTty.lflag = PortP->TtyP->tm.c_lflag;
+ PortP->StoredTty.line = PortP->TtyP->tm.c_line;
+ bcopy(PortP->TtyP->tm.c_cc, PortP->StoredTty.cc, NCC + 5);
+ }
+ PortP->Lock = PortSetup.Lock;
+ PortP->Store = PortSetup.Store;
+ PortP->Xprint.XpCps = PortSetup.XpCps;
+ bcopy(PortSetup.XpOn, PortP->Xprint.XpOn, MAX_XP_CTRL_LEN);
+ bcopy(PortSetup.XpOff, PortP->Xprint.XpOff, MAX_XP_CTRL_LEN);
+ PortP->Xprint.XpOn[MAX_XP_CTRL_LEN - 1] = '\0';
+ PortP->Xprint.XpOff[MAX_XP_CTRL_LEN - 1] = '\0';
+ PortP->Xprint.XpLen = RIOStrlen(PortP->Xprint.XpOn) + RIOStrlen(PortP->Xprint.XpOff);
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+#endif
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "after loop (%d)!\n", loop);
+ rio_dprintk(RIO_DEBUG_CTRL, "Retval:%x\n", retval);
+ return retval;
+
+ case RIO_GET_PORT_SETUP:
+ rio_dprintk(RIO_DEBUG_CTRL, "Get port setup\n");
+ if (copyin((int) arg, (caddr_t) & PortSetup, sizeof(PortSetup))
+ == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (PortSetup.From >= RIO_PORTS) {
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+
+ port = PortSetup.To = PortSetup.From;
+ PortSetup.IxAny = (p->RIOPortp[port]->Config & RIO_IXANY) ? 1 : 0;
+ PortSetup.IxOn = (p->RIOPortp[port]->Config & RIO_IXON) ? 1 : 0;
+ PortSetup.Drain = (p->RIOPortp[port]->Config & RIO_WAITDRAIN) ? 1 : 0;
+ PortSetup.Store = p->RIOPortp[port]->Store;
+ PortSetup.Lock = p->RIOPortp[port]->Lock;
+ PortSetup.XpCps = p->RIOPortp[port]->Xprint.XpCps;
+ bcopy(p->RIOPortp[port]->Xprint.XpOn, PortSetup.XpOn, MAX_XP_CTRL_LEN);
+ bcopy(p->RIOPortp[port]->Xprint.XpOff, PortSetup.XpOff, MAX_XP_CTRL_LEN);
+ PortSetup.XpOn[MAX_XP_CTRL_LEN - 1] = '\0';
+ PortSetup.XpOff[MAX_XP_CTRL_LEN - 1] = '\0';
+
+ if (copyout((caddr_t) & PortSetup, (int) arg, sizeof(PortSetup))
+ == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return retval;
+
+ case RIO_GET_PORT_PARAMS:
+ rio_dprintk(RIO_DEBUG_CTRL, "Get port params\n");
+ if (copyin((int) arg, (caddr_t) & PortParams, sizeof(struct PortParams)) == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (PortParams.Port >= RIO_PORTS) {
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+ PortP = (p->RIOPortp[PortParams.Port]);
+ PortParams.Config = PortP->Config;
+ PortParams.State = PortP->State;
+ rio_dprintk(RIO_DEBUG_CTRL, "Port %d\n", PortParams.Port);
+
+ if (copyout((caddr_t) & PortParams, (int) arg, sizeof(struct PortParams)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return retval;
+
+ case RIO_GET_PORT_TTY:
+ rio_dprintk(RIO_DEBUG_CTRL, "Get port tty\n");
+ if (copyin((int) arg, (caddr_t) & PortTty, sizeof(struct PortTty))
+ == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (PortTty.port >= RIO_PORTS) {
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+
+ rio_dprintk(RIO_DEBUG_CTRL, "Port %d\n", PortTty.port);
+ PortP = (p->RIOPortp[PortTty.port]);
#if 0
- PortTty.Tty.tm.c_iflag = PortP->TtyP->tm.c_iflag;
- PortTty.Tty.tm.c_oflag = PortP->TtyP->tm.c_oflag;
- PortTty.Tty.tm.c_cflag = PortP->TtyP->tm.c_cflag;
- PortTty.Tty.tm.c_lflag = PortP->TtyP->tm.c_lflag;
+ PortTty.Tty.tm.c_iflag = PortP->TtyP->tm.c_iflag;
+ PortTty.Tty.tm.c_oflag = PortP->TtyP->tm.c_oflag;
+ PortTty.Tty.tm.c_cflag = PortP->TtyP->tm.c_cflag;
+ PortTty.Tty.tm.c_lflag = PortP->TtyP->tm.c_lflag;
#endif
- if (copyout((caddr_t)&PortTty, (int)arg,
- sizeof(struct PortTty)) == COPYFAIL) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_SET_PORT_TTY :
- if (copyin((int)arg, (caddr_t)&PortTty,
- sizeof(struct PortTty)) == COPYFAIL) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "Set port %d tty\n", PortTty.port);
- if (PortTty.port >= (ushort) RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[PortTty.port]);
+ if (copyout((caddr_t) & PortTty, (int) arg, sizeof(struct PortTty)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return retval;
+
+ case RIO_SET_PORT_TTY:
+ if (copyin((int) arg, (caddr_t) & PortTty, sizeof(struct PortTty)) == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "Set port %d tty\n", PortTty.port);
+ if (PortTty.port >= (ushort) RIO_PORTS) {
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+ PortP = (p->RIOPortp[PortTty.port]);
#if 0
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->TtyP->tm.c_iflag = PortTty.Tty.tm.c_iflag;
- PortP->TtyP->tm.c_oflag = PortTty.Tty.tm.c_oflag;
- PortP->TtyP->tm.c_cflag = PortTty.Tty.tm.c_cflag;
- PortP->TtyP->tm.c_lflag = PortTty.Tty.tm.c_lflag;
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ PortP->TtyP->tm.c_iflag = PortTty.Tty.tm.c_iflag;
+ PortP->TtyP->tm.c_oflag = PortTty.Tty.tm.c_oflag;
+ PortP->TtyP->tm.c_cflag = PortTty.Tty.tm.c_cflag;
+ PortP->TtyP->tm.c_lflag = PortTty.Tty.tm.c_lflag;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
#endif
- RIOParam(PortP, CONFIG, PortP->State & RIO_MODEM, OK_TO_SLEEP);
- return retval;
-
- case RIO_SET_PORT_PARAMS :
- rio_dprintk (RIO_DEBUG_CTRL, "Set port params\n");
- if ( copyin((int)arg, (caddr_t)&PortParams, sizeof(PortParams))
- == COPYFAIL ) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (PortParams.Port >= (ushort) RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[PortParams.Port]);
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->Config = PortParams.Config;
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
- return retval;
-
- case RIO_GET_PORT_STATS :
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GET_PORT_STATS\n");
- if ( copyin((int)arg, (caddr_t)&portStats,
- sizeof(struct portStats)) == COPYFAIL ) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if ( portStats.port >= RIO_PORTS ) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[portStats.port]);
- portStats.gather = PortP->statsGather;
- portStats.txchars = PortP->txchars;
- portStats.rxchars = PortP->rxchars;
- portStats.opens = PortP->opens;
- portStats.closes = PortP->closes;
- portStats.ioctls = PortP->ioctls;
- if ( copyout((caddr_t)&portStats, (int)arg,
- sizeof(struct portStats)) == COPYFAIL ) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_RESET_PORT_STATS :
- port = (uint) arg;
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_RESET_PORT_STATS\n");
- if ( port >= RIO_PORTS ) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[port]);
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->txchars = 0;
- PortP->rxchars = 0;
- PortP->opens = 0;
- PortP->closes = 0;
- PortP->ioctls = 0;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return retval;
-
- case RIO_GATHER_PORT_STATS :
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GATHER_PORT_STATS\n");
- if ( copyin( (int)arg, (caddr_t)&portStats,
- sizeof(struct portStats)) == COPYFAIL ) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if ( portStats.port >= RIO_PORTS ) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[portStats.port]);
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->statsGather = portStats.gather;
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
- return retval;
+ RIOParam(PortP, CONFIG, PortP->State & RIO_MODEM, OK_TO_SLEEP);
+ return retval;
+
+ case RIO_SET_PORT_PARAMS:
+ rio_dprintk(RIO_DEBUG_CTRL, "Set port params\n");
+ if (copyin((int) arg, (caddr_t) & PortParams, sizeof(PortParams))
+ == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (PortParams.Port >= (ushort) RIO_PORTS) {
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+ PortP = (p->RIOPortp[PortParams.Port]);
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ PortP->Config = PortParams.Config;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return retval;
+
+ case RIO_GET_PORT_STATS:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_PORT_STATS\n");
+ if (copyin((int) arg, (caddr_t) & portStats, sizeof(struct portStats)) == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (portStats.port >= RIO_PORTS) {
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+ PortP = (p->RIOPortp[portStats.port]);
+ portStats.gather = PortP->statsGather;
+ portStats.txchars = PortP->txchars;
+ portStats.rxchars = PortP->rxchars;
+ portStats.opens = PortP->opens;
+ portStats.closes = PortP->closes;
+ portStats.ioctls = PortP->ioctls;
+ if (copyout((caddr_t) & portStats, (int) arg, sizeof(struct portStats)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return retval;
+
+ case RIO_RESET_PORT_STATS:
+ port = (uint) arg;
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESET_PORT_STATS\n");
+ if (port >= RIO_PORTS) {
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+ PortP = (p->RIOPortp[port]);
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ PortP->txchars = 0;
+ PortP->rxchars = 0;
+ PortP->opens = 0;
+ PortP->closes = 0;
+ PortP->ioctls = 0;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return retval;
+
+ case RIO_GATHER_PORT_STATS:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GATHER_PORT_STATS\n");
+ if (copyin((int) arg, (caddr_t) & portStats, sizeof(struct portStats)) == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (portStats.port >= RIO_PORTS) {
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+ PortP = (p->RIOPortp[portStats.port]);
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ PortP->statsGather = portStats.gather;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return retval;
#ifdef DEBUG_SUPPORTED
- case RIO_READ_LEVELS:
- {
- int num;
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_READ_LEVELS\n");
- for ( num=0; RIODbInf[num].Flag; num++ ) ;
- rio_dprintk (RIO_DEBUG_CTRL, "%d levels to copy\n",num);
- if (copyout((caddr_t)RIODbInf,(int)arg,
- sizeof(struct DbInf)*(num+1))==COPYFAIL) {
- rio_dprintk (RIO_DEBUG_CTRL, "ReadLevels Copy failed\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "%d levels to copied\n",num);
- return retval;
- }
+ case RIO_READ_LEVELS:
+ {
+ int num;
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_LEVELS\n");
+ for (num = 0; RIODbInf[num].Flag; num++);
+ rio_dprintk(RIO_DEBUG_CTRL, "%d levels to copy\n", num);
+ if (copyout((caddr_t) RIODbInf, (int) arg, sizeof(struct DbInf) * (num + 1)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "ReadLevels Copy failed\n");
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "%d levels to copied\n", num);
+ return retval;
+ }
#endif
- case RIO_READ_CONFIG:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_READ_CONFIG\n");
- if (copyout((caddr_t)&p->RIOConf, (int)arg,
- sizeof(struct Conf)) ==COPYFAIL ) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_SET_CONFIG:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_SET_CONFIG\n");
- if ( !su ) {
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
+ case RIO_READ_CONFIG:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_CONFIG\n");
+ if (copyout((caddr_t) & p->RIOConf, (int) arg, sizeof(struct Conf)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return retval;
+
+ case RIO_SET_CONFIG:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET_CONFIG\n");
+ if (!su) {
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ if (copyin((int) arg, (caddr_t) & p->RIOConf, sizeof(struct Conf))
+ == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ /*
+ ** move a few value around
+ */
+ for (Host = 0; Host < p->RIONumHosts; Host++)
+ if ((p->RIOHosts[Host].Flags & RUN_STATE) == RC_RUNNING)
+ WWORD(p->RIOHosts[Host].ParmMapP->timer, p->RIOConf.Timer);
+ return retval;
+
+ case RIO_START_POLLER:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_START_POLLER\n");
+ return -EINVAL;
+
+ case RIO_STOP_POLLER:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_STOP_POLLER\n");
+ if (!su) {
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ p->RIOPolling = NOT_POLLING;
+ return retval;
+
+ case RIO_SETDEBUG:
+ case RIO_GETDEBUG:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_SETDEBUG/RIO_GETDEBUG\n");
+ if (copyin((int) arg, (caddr_t) & DebugCtrl, sizeof(DebugCtrl))
+ == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (DebugCtrl.SysPort == NO_PORT) {
+ if (cmd == RIO_SETDEBUG) {
+ if (!su) {
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
}
- if ( copyin((int)arg, (caddr_t)&p->RIOConf, sizeof(struct Conf) )
- ==COPYFAIL ) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
+ p->rio_debug = DebugCtrl.Debug;
+ p->RIODebugWait = DebugCtrl.Wait;
+ rio_dprintk(RIO_DEBUG_CTRL, "Set global debug to 0x%x set wait to 0x%x\n", p->rio_debug, p->RIODebugWait);
+ } else {
+ rio_dprintk(RIO_DEBUG_CTRL, "Get global debug 0x%x wait 0x%x\n", p->rio_debug, p->RIODebugWait);
+ DebugCtrl.Debug = p->rio_debug;
+ DebugCtrl.Wait = p->RIODebugWait;
+ if (copyout((caddr_t) & DebugCtrl, (int) arg, sizeof(DebugCtrl)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET/GET DEBUG: bad port number %d\n", DebugCtrl.SysPort);
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
}
- /*
- ** move a few value around
- */
- for (Host=0; Host < p->RIONumHosts; Host++)
- if ( (p->RIOHosts[Host].Flags & RUN_STATE) == RC_RUNNING )
- WWORD(p->RIOHosts[Host].ParmMapP->timer ,
- p->RIOConf.Timer);
- return retval;
-
- case RIO_START_POLLER:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_START_POLLER\n");
- return -EINVAL;
+ }
+ } else if (DebugCtrl.SysPort >= RIO_PORTS && DebugCtrl.SysPort != NO_PORT) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET/GET DEBUG: bad port number %d\n", DebugCtrl.SysPort);
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ } else if (cmd == RIO_SETDEBUG) {
+ if (!su) {
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ p->RIOPortp[DebugCtrl.SysPort]->Debug = DebugCtrl.Debug;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_SETDEBUG 0x%x\n", p->RIOPortp[DebugCtrl.SysPort]->Debug);
+ } else {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GETDEBUG 0x%x\n", p->RIOPortp[DebugCtrl.SysPort]->Debug);
+ DebugCtrl.Debug = p->RIOPortp[DebugCtrl.SysPort]->Debug;
+ if (copyout((caddr_t) & DebugCtrl, (int) arg, sizeof(DebugCtrl)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_GETDEBUG: Bad copy to user space\n");
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ }
+ return retval;
- case RIO_STOP_POLLER:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_STOP_POLLER\n");
- if ( !su ) {
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- p->RIOPolling = NOT_POLLING;
- return retval;
-
- case RIO_SETDEBUG:
- case RIO_GETDEBUG:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_SETDEBUG/RIO_GETDEBUG\n");
- if ( copyin( (int)arg, (caddr_t)&DebugCtrl, sizeof(DebugCtrl) )
- ==COPYFAIL ) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if ( DebugCtrl.SysPort == NO_PORT ) {
- if ( cmd == RIO_SETDEBUG ) {
- if ( !su ) {
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- p->rio_debug = DebugCtrl.Debug;
- p->RIODebugWait = DebugCtrl.Wait;
- rio_dprintk (RIO_DEBUG_CTRL, "Set global debug to 0x%x set wait to 0x%x\n",
- p->rio_debug,p->RIODebugWait);
- }
- else {
- rio_dprintk (RIO_DEBUG_CTRL, "Get global debug 0x%x wait 0x%x\n",
- p->rio_debug,p->RIODebugWait);
- DebugCtrl.Debug = p->rio_debug;
- DebugCtrl.Wait = p->RIODebugWait;
- if ( copyout((caddr_t)&DebugCtrl,(int)arg,
- sizeof(DebugCtrl)) == COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_SET/GET DEBUG: bad port number %d\n",
- DebugCtrl.SysPort);
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- }
- }
- else if ( DebugCtrl.SysPort >= RIO_PORTS &&
- DebugCtrl.SysPort != NO_PORT ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_SET/GET DEBUG: bad port number %d\n",
- DebugCtrl.SysPort);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- else if ( cmd == RIO_SETDEBUG ) {
- if ( !su ) {
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- p->RIOPortp[DebugCtrl.SysPort]->Debug = DebugCtrl.Debug;
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_SETDEBUG 0x%x\n",
- p->RIOPortp[DebugCtrl.SysPort]->Debug);
- }
- else {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GETDEBUG 0x%x\n",
- p->RIOPortp[DebugCtrl.SysPort]->Debug);
- DebugCtrl.Debug = p->RIOPortp[DebugCtrl.SysPort]->Debug;
- if ( copyout((caddr_t)&DebugCtrl,(int)arg,
- sizeof(DebugCtrl))==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_GETDEBUG: Bad copy to user space\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- }
- return retval;
-
- case RIO_VERSID:
- /*
- ** Enquire about the release and version.
- ** We return MAX_VERSION_LEN bytes, being a
- ** textual null terminated string.
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_VERSID\n");
- if ( copyout( (caddr_t)RIOVersid(),
- (int)arg,
- sizeof(struct rioVersion) ) == COPYFAIL )
- {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_VERSID: Bad copy to user space (host=%d)\n", Host);
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
+ case RIO_VERSID:
+ /*
+ ** Enquire about the release and version.
+ ** We return MAX_VERSION_LEN bytes, being a
+ ** textual null terminated string.
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_VERSID\n");
+ if (copyout((caddr_t) RIOVersid(), (int) arg, sizeof(struct rioVersion)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_VERSID: Bad copy to user space (host=%d)\n", Host);
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return retval;
- /*
- ** !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- ** !! commented out previous 'RIO_VERSID' functionality !!
- ** !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- **
- case RIO_VERSID:
- **
- ** Enquire about the release and version.
- ** We return MAX_VERSION_LEN bytes, being a textual null
- ** terminated string.
- **
- rio_dprint(RIO_DEBUG_CTRL, ("RIO_VERSID\n"));
- if (copyout((caddr_t)RIOVersid(),
- (int)arg, MAX_VERSION_LEN ) == COPYFAIL ) {
- rio_dprint(RIO_DEBUG_CTRL, ("RIO_VERSID: Bad copy to user space\n",Host));
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
- **
- ** !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- */
-
- case RIO_NUM_HOSTS:
- /*
- ** Enquire as to the number of hosts located
- ** at init time.
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_NUM_HOSTS\n");
- if (copyout((caddr_t)&p->RIONumHosts, (int)arg,
- sizeof(p->RIONumHosts) )==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_NUM_HOSTS: Bad copy to user space\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_HOST_FOAD:
- /*
- ** Kill host. This may not be in the final version...
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_FOAD %d\n", (int)arg);
- if ( !su ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_FOAD: Not super user\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- p->RIOHalted = 1;
- p->RIOSystemUp = 0;
-
- for ( Host=0; Host<p->RIONumHosts; Host++ ) {
- (void)RIOBoardTest( p->RIOHosts[Host].PaddrP,
- p->RIOHosts[Host].Caddr, p->RIOHosts[Host].Type,
- p->RIOHosts[Host].Slot );
- bzero( (caddr_t)&p->RIOHosts[Host].Flags,
- ((int)&p->RIOHosts[Host].____end_marker____) -
- ((int)&p->RIOHosts[Host].Flags) );
- p->RIOHosts[Host].Flags = RC_WAITING;
+ /*
+ ** !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ ** !! commented out previous 'RIO_VERSID' functionality !!
+ ** !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ **
+ case RIO_VERSID:
+ **
+ ** Enquire about the release and version.
+ ** We return MAX_VERSION_LEN bytes, being a textual null
+ ** terminated string.
+ **
+ rio_dprint(RIO_DEBUG_CTRL, ("RIO_VERSID\n"));
+ if (copyout((caddr_t)RIOVersid(),
+ (int)arg, MAX_VERSION_LEN ) == COPYFAIL ) {
+ rio_dprint(RIO_DEBUG_CTRL, ("RIO_VERSID: Bad copy to user space\n",Host));
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return retval;
+ **
+ ** !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ */
+
+ case RIO_NUM_HOSTS:
+ /*
+ ** Enquire as to the number of hosts located
+ ** at init time.
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_NUM_HOSTS\n");
+ if (copyout((caddr_t) & p->RIONumHosts, (int) arg, sizeof(p->RIONumHosts)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_NUM_HOSTS: Bad copy to user space\n");
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return retval;
+
+ case RIO_HOST_FOAD:
+ /*
+ ** Kill host. This may not be in the final version...
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_FOAD %d\n", (int) arg);
+ if (!su) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_FOAD: Not super user\n");
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ p->RIOHalted = 1;
+ p->RIOSystemUp = 0;
+
+ for (Host = 0; Host < p->RIONumHosts; Host++) {
+ (void) RIOBoardTest(p->RIOHosts[Host].PaddrP, p->RIOHosts[Host].Caddr, p->RIOHosts[Host].Type, p->RIOHosts[Host].Slot);
+ bzero((caddr_t) & p->RIOHosts[Host].Flags, ((int) &p->RIOHosts[Host].____end_marker____) - ((int) &p->RIOHosts[Host].Flags));
+ p->RIOHosts[Host].Flags = RC_WAITING;
#if 0
- RIOSetupDataStructs(p);
+ RIOSetupDataStructs(p);
#endif
- }
- RIOFoadWakeup(p);
- p->RIONumBootPkts = 0;
- p->RIOBooting = 0;
+ }
+ RIOFoadWakeup(p);
+ p->RIONumBootPkts = 0;
+ p->RIOBooting = 0;
#ifdef RINGBUFFER_SUPPORT
- for( loop=0; loop<RIO_PORTS; loop++ )
- if ( p->RIOPortp[loop]->TxRingBuffer )
- sysfree((void *)p->RIOPortp[loop]->TxRingBuffer,
- RIOBufferSize );
+ for (loop = 0; loop < RIO_PORTS; loop++)
+ if (p->RIOPortp[loop]->TxRingBuffer)
+ sysfree((void *) p->RIOPortp[loop]->TxRingBuffer, RIOBufferSize);
#endif
#if 0
- bzero((caddr_t)&p->RIOPortp[0],RIO_PORTS*sizeof(struct Port));
+ bzero((caddr_t) & p->RIOPortp[0], RIO_PORTS * sizeof(struct Port));
#else
- printk ("HEEEEELP!\n");
+ printk("HEEEEELP!\n");
#endif
- for( loop=0; loop<RIO_PORTS; loop++ ) {
+ for (loop = 0; loop < RIO_PORTS; loop++) {
#if 0
- p->RIOPortp[loop]->TtyP = &p->channel[loop];
+ p->RIOPortp[loop]->TtyP = &p->channel[loop];
#endif
-
- spin_lock_init(&p->RIOPortp[loop]->portSem);
- p->RIOPortp[loop]->InUse = NOT_INUSE;
- }
-
- p->RIOSystemUp = 0;
- return retval;
-
- case RIO_DOWNLOAD:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_DOWNLOAD\n");
- if ( !su ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Not super user\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if ( copyin((int)arg, (caddr_t)&DownLoad,
- sizeof(DownLoad) )==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "Copied in download code for product code 0x%x\n",
- DownLoad.ProductCode);
-
- /*
- ** It is important that the product code is an unsigned object!
- */
- if ( DownLoad.ProductCode > MAX_PRODUCT ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Bad product code %d passed\n",
- DownLoad.ProductCode);
- p->RIOError.Error = NO_SUCH_PRODUCT;
- return -ENXIO;
- }
- /*
- ** do something!
- */
- retval = (*(RIOBootTable[DownLoad.ProductCode]))(p, &DownLoad);
- /* <-- Panic */
- p->RIOHalted = 0;
- /*
- ** and go back, content with a job well completed.
- */
- return retval;
-
- case RIO_PARMS:
- {
- uint host;
-
- if (copyin((int)arg, (caddr_t)&host,
- sizeof(host) ) == COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL,
- "RIO_HOST_REQ: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- /*
- ** Fetch the parmmap
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_PARMS\n");
- if ( copyout( (caddr_t)p->RIOHosts[host].ParmMapP,
- (int)arg, sizeof(PARM_MAP) )==COPYFAIL ) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_PARMS: Copy out to user space failed\n");
- return -EFAULT;
- }
- }
- return retval;
-
- case RIO_HOST_REQ:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_REQ\n");
- if (copyin((int)arg, (caddr_t)&HostReq,
- sizeof(HostReq) )==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_REQ: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if ( HostReq.HostNum >= p->RIONumHosts ) {
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_REQ: Illegal host number %d\n",
- HostReq.HostNum);
- return -ENXIO;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "Request for host %d\n", HostReq.HostNum);
- if (copyout((caddr_t)&p->RIOHosts[HostReq.HostNum],
- (int)HostReq.HostP,sizeof(struct Host) ) == COPYFAIL) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_REQ: Bad copy to user space\n");
- return -EFAULT;
- }
- return retval;
-
- case RIO_HOST_DPRAM:
- rio_dprintk (RIO_DEBUG_CTRL, "Request for DPRAM\n");
- if ( copyin( (int)arg, (caddr_t)&HostDpRam,
- sizeof(HostDpRam) )==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if ( HostDpRam.HostNum >= p->RIONumHosts ) {
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Illegal host number %d\n",
- HostDpRam.HostNum);
- return -ENXIO;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "Request for host %d\n", HostDpRam.HostNum);
-
- if (p->RIOHosts[HostDpRam.HostNum].Type == RIO_PCI) {
- int off;
- /* It's hardware like this that really gets on my tits. */
- static unsigned char copy[sizeof(struct DpRam)];
- for ( off=0; off<sizeof(struct DpRam); off++ )
- copy[off] = p->RIOHosts[HostDpRam.HostNum].Caddr[off];
- if ( copyout( (caddr_t)copy, (int)HostDpRam.DpRamP,
- sizeof(struct DpRam) ) == COPYFAIL ) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Bad copy to user space\n");
- return -EFAULT;
- }
- }
- else if (copyout((caddr_t)p->RIOHosts[HostDpRam.HostNum].Caddr,
- (int)HostDpRam.DpRamP,
- sizeof(struct DpRam) ) == COPYFAIL ) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Bad copy to user space\n");
- return -EFAULT;
- }
- return retval;
-
- case RIO_SET_BUSY:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_SET_BUSY\n");
- if ( (int)arg < 0 || (int)arg > 511 ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_SET_BUSY: Bad port number %d\n",(int)arg);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- p->RIOPortp[(int)arg]->State |= RIO_BUSY;
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
- return retval;
-
- case RIO_HOST_PORT:
- /*
- ** The daemon want port information
- ** (probably for debug reasons)
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_PORT\n");
- if ( copyin((int)arg, (caddr_t)&PortReq,
- sizeof(PortReq) )==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_PORT: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
-
- if (PortReq.SysPort >= RIO_PORTS) { /* SysPort is unsigned */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_PORT: Illegal port number %d\n",
- PortReq.SysPort);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "Request for port %d\n", PortReq.SysPort);
- if (copyout((caddr_t)p->RIOPortp[PortReq.SysPort],
- (int)PortReq.PortP,
- sizeof(struct Port) ) == COPYFAIL) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_PORT: Bad copy to user space\n");
- return -EFAULT;
- }
- return retval;
-
- case RIO_HOST_RUP:
- /*
- ** The daemon want rup information
- ** (probably for debug reasons)
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_RUP\n");
- if (copyin((int)arg, (caddr_t)&RupReq,
- sizeof(RupReq) )==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_RUP: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (RupReq.HostNum >= p->RIONumHosts) { /* host is unsigned */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_RUP: Illegal host number %d\n",
- RupReq.HostNum);
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- if ( RupReq.RupNum >= MAX_RUP+LINKS_PER_UNIT ) { /* eek! */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_RUP: Illegal rup number %d\n",
- RupReq.RupNum);
- p->RIOError.Error = RUP_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- HostP = &p->RIOHosts[RupReq.HostNum];
-
- if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_RUP: Host %d not running\n",
- RupReq.HostNum);
- p->RIOError.Error = HOST_NOT_RUNNING;
- return -EIO;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "Request for rup %d from host %d\n",
- RupReq.RupNum,RupReq.HostNum);
-
- if (copyout((caddr_t)HostP->UnixRups[RupReq.RupNum].RupP,
- (int)RupReq.RupP,sizeof(struct RUP) ) == COPYFAIL) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_RUP: Bad copy to user space\n");
- return -EFAULT;
- }
- return retval;
-
- case RIO_HOST_LPB:
- /*
- ** The daemon want lpb information
- ** (probably for debug reasons)
- */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_LPB\n");
- if (copyin((int)arg, (caddr_t)&LpbReq,
- sizeof(LpbReq) )==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_LPB: Bad copy from user space\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (LpbReq.Host >= p->RIONumHosts) { /* host is unsigned */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_LPB: Illegal host number %d\n",
- LpbReq.Host);
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- if ( LpbReq.Link >= LINKS_PER_UNIT ) { /* eek! */
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_LPB: Illegal link number %d\n",
- LpbReq.Link);
- p->RIOError.Error = LINK_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- HostP = &p->RIOHosts[LpbReq.Host];
-
- if ( (HostP->Flags & RUN_STATE) != RC_RUNNING ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_LPB: Host %d not running\n",
- LpbReq.Host );
- p->RIOError.Error = HOST_NOT_RUNNING;
- return -EIO;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "Request for lpb %d from host %d\n",
- LpbReq.Link, LpbReq.Host);
-
- if (copyout((caddr_t)&HostP->LinkStrP[LpbReq.Link],
- (int)LpbReq.LpbP,sizeof(struct LPB) ) == COPYFAIL) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_HOST_LPB: Bad copy to user space\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- /*
- ** Here 3 IOCTL's that allow us to change the way in which
- ** rio logs errors. send them just to syslog or send them
- ** to both syslog and console or send them to just the console.
- **
- ** See RioStrBuf() in util.c for the other half.
- */
- case RIO_SYSLOG_ONLY:
- p->RIOPrintLogState = PRINT_TO_LOG; /* Just syslog */
- return 0;
-
- case RIO_SYSLOG_CONS:
- p->RIOPrintLogState = PRINT_TO_LOG_CONS;/* syslog and console */
- return 0;
-
- case RIO_CONS_ONLY:
- p->RIOPrintLogState = PRINT_TO_CONS; /* Just console */
- return 0;
-
- case RIO_SIGNALS_ON:
- if ( p->RIOSignalProcess ) {
- p->RIOError.Error = SIGNALS_ALREADY_SET;
- return -EBUSY;
- }
- p->RIOSignalProcess = getpid();
- p->RIOPrintDisabled = DONT_PRINT;
- return retval;
-
- case RIO_SIGNALS_OFF:
- if ( p->RIOSignalProcess != getpid() ) {
- p->RIOError.Error = NOT_RECEIVING_PROCESS;
- return -EPERM;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "Clear signal process to zero\n");
- p->RIOSignalProcess = 0;
- return retval;
-
- case RIO_SET_BYTE_MODE:
- for ( Host=0; Host<p->RIONumHosts; Host++ )
- if ( p->RIOHosts[Host].Type == RIO_AT )
- p->RIOHosts[Host].Mode &= ~WORD_OPERATION;
- return retval;
-
- case RIO_SET_WORD_MODE:
- for ( Host=0; Host<p->RIONumHosts; Host++ )
- if ( p->RIOHosts[Host].Type == RIO_AT )
- p->RIOHosts[Host].Mode |= WORD_OPERATION;
- return retval;
-
- case RIO_SET_FAST_BUS:
- for ( Host=0; Host<p->RIONumHosts; Host++ )
- if ( p->RIOHosts[Host].Type == RIO_AT )
- p->RIOHosts[Host].Mode |= FAST_AT_BUS;
- return retval;
-
- case RIO_SET_SLOW_BUS:
- for ( Host=0; Host<p->RIONumHosts; Host++ )
- if ( p->RIOHosts[Host].Type == RIO_AT )
- p->RIOHosts[Host].Mode &= ~FAST_AT_BUS;
- return retval;
-
- case RIO_MAP_B50_TO_50:
- case RIO_MAP_B50_TO_57600:
- case RIO_MAP_B110_TO_110:
- case RIO_MAP_B110_TO_115200:
- rio_dprintk (RIO_DEBUG_CTRL, "Baud rate mapping\n");
- port = (uint) arg;
- if ( port < 0 || port > 511 ) {
- rio_dprintk (RIO_DEBUG_CTRL, "Baud rate mapping: Bad port number %d\n", port);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- switch( cmd )
- {
- case RIO_MAP_B50_TO_50 :
- p->RIOPortp[port]->Config |= RIO_MAP_50_TO_50;
- break;
- case RIO_MAP_B50_TO_57600 :
- p->RIOPortp[port]->Config &= ~RIO_MAP_50_TO_50;
- break;
- case RIO_MAP_B110_TO_110 :
- p->RIOPortp[port]->Config |= RIO_MAP_110_TO_110;
- break;
- case RIO_MAP_B110_TO_115200 :
- p->RIOPortp[port]->Config &= ~RIO_MAP_110_TO_110;
- break;
- }
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
- return retval;
-
- case RIO_STREAM_INFO:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_STREAM_INFO\n");
- return -EINVAL;
+ spin_lock_init(&p->RIOPortp[loop]->portSem);
+ p->RIOPortp[loop]->InUse = NOT_INUSE;
+ }
+
+ p->RIOSystemUp = 0;
+ return retval;
+
+ case RIO_DOWNLOAD:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_DOWNLOAD\n");
+ if (!su) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Not super user\n");
+ p->RIOError.Error = NOT_SUPER_USER;
+ return -EPERM;
+ }
+ if (copyin((int) arg, (caddr_t) & DownLoad, sizeof(DownLoad)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Copy in from user space failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "Copied in download code for product code 0x%x\n", DownLoad.ProductCode);
- case RIO_SEND_PACKET:
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_SEND_PACKET\n");
- if ( copyin( (int)arg, (caddr_t)&SendPack,
- sizeof(SendPack) )==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_SEND_PACKET: Bad copy from user space\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if ( SendPack.PortNum >= 128 ) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
-
- PortP = p->RIOPortp[SendPack.PortNum];
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- if ( !can_add_transmit(&PacketP,PortP) ) {
- p->RIOError.Error = UNIT_IS_IN_USE;
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
- return -ENOSPC;
- }
-
- for ( loop=0; loop<(ushort)(SendPack.Len & 127); loop++ )
- WBYTE(PacketP->data[loop], SendPack.Data[loop] );
-
- WBYTE(PacketP->len, SendPack.Len);
-
- add_transmit( PortP );
- /*
- ** Count characters transmitted for port statistics reporting
- */
- if (PortP->statsGather)
- PortP->txchars += (SendPack.Len & 127);
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
- return retval;
-
- case RIO_NO_MESG:
- if ( su )
- p->RIONoMessage = 1;
- return su ? 0 : -EPERM;
-
- case RIO_MESG:
- if ( su )
- p->RIONoMessage = 0;
- return su ? 0 : -EPERM;
-
- case RIO_WHAT_MESG:
- if ( copyout( (caddr_t)&p->RIONoMessage, (int)arg,
- sizeof(p->RIONoMessage) )==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_WHAT_MESG: Bad copy to user space\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_MEM_DUMP :
- if (copyin((int)arg, (caddr_t)&SubCmd,
- sizeof(struct SubCmdStruct)) == COPYFAIL) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_MEM_DUMP host %d rup %d addr %x\n",
- SubCmd.Host, SubCmd.Rup, SubCmd.Addr);
-
- if (SubCmd.Rup >= MAX_RUP+LINKS_PER_UNIT ) {
- p->RIOError.Error = RUP_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- if (SubCmd.Host >= p->RIONumHosts ) {
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- port = p->RIOHosts[SubCmd.Host].
- UnixRups[SubCmd.Rup].BaseSysPort;
-
- PortP = p->RIOPortp[port];
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- if ( RIOPreemptiveCmd(p, PortP, MEMDUMP ) == RIO_FAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_MEM_DUMP failed\n");
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
- return -EBUSY;
- }
- else
- PortP->State |= RIO_BUSY;
-
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
- if ( copyout( (caddr_t)p->RIOMemDump, (int)arg,
- MEMDUMP_SIZE) == COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_MEM_DUMP copy failed\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_TICK:
- if ((int)arg < 0 || (int)arg >= p->RIONumHosts)
- return -EINVAL;
- rio_dprintk (RIO_DEBUG_CTRL, "Set interrupt for host %d\n", (int)arg);
- WBYTE(p->RIOHosts[(int)arg].SetInt , 0xff);
- return 0;
-
- case RIO_TOCK:
- if ((int)arg < 0 || (int)arg >= p->RIONumHosts)
- return -EINVAL;
- rio_dprintk (RIO_DEBUG_CTRL, "Clear interrupt for host %d\n", (int)arg);
- WBYTE((p->RIOHosts[(int)arg].ResetInt) , 0xff);
- return 0;
-
- case RIO_READ_CHECK:
- /* Check reads for pkts with data[0] the same */
- p->RIOReadCheck = !p->RIOReadCheck;
- if (copyout((caddr_t)&p->RIOReadCheck,(int)arg,
- sizeof(uint))== COPYFAIL) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_READ_REGISTER :
- if (copyin((int)arg, (caddr_t)&SubCmd,
- sizeof(struct SubCmdStruct)) == COPYFAIL) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_READ_REGISTER host %d rup %d port %d reg %x\n",
- SubCmd.Host, SubCmd.Rup, SubCmd.Port, SubCmd.Addr);
-
- if (SubCmd.Port > 511) {
- rio_dprintk (RIO_DEBUG_CTRL, "Baud rate mapping: Bad port number %d\n",
- SubCmd.Port);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- if (SubCmd.Rup >= MAX_RUP+LINKS_PER_UNIT ) {
- p->RIOError.Error = RUP_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- if (SubCmd.Host >= p->RIONumHosts ) {
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- port = p->RIOHosts[SubCmd.Host].
- UnixRups[SubCmd.Rup].BaseSysPort + SubCmd.Port;
- PortP = p->RIOPortp[port];
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- if (RIOPreemptiveCmd(p, PortP, READ_REGISTER) == RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_READ_REGISTER failed\n");
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
- return -EBUSY;
- }
- else
- PortP->State |= RIO_BUSY;
-
- rio_spin_unlock_irqrestore( &PortP->portSem , flags);
- if (copyout((caddr_t)&p->CdRegister, (int)arg,
- sizeof(uint)) == COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_CTRL, "RIO_READ_REGISTER copy failed\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
- /*
- ** rio_make_dev: given port number (0-511) ORed with port type
- ** (RIO_DEV_DIRECT, RIO_DEV_MODEM, RIO_DEV_XPRINT) return dev_t
- ** value to pass to mknod to create the correct device node.
- */
- case RIO_MAKE_DEV:
- {
- uint port = (uint)arg & RIO_MODEM_MASK;
-
- switch ( (uint)arg & RIO_DEV_MASK ) {
- case RIO_DEV_DIRECT:
- arg = (caddr_t)drv_makedev(MAJOR(dev), port);
- rio_dprintk (RIO_DEBUG_CTRL, "Makedev direct 0x%x is 0x%x\n",port, (int)arg);
- return (int)arg;
- case RIO_DEV_MODEM:
- arg = (caddr_t)drv_makedev(MAJOR(dev), (port|RIO_MODEM_BIT) );
- rio_dprintk (RIO_DEBUG_CTRL, "Makedev modem 0x%x is 0x%x\n",port, (int)arg);
- return (int)arg;
- case RIO_DEV_XPRINT:
- arg = (caddr_t)drv_makedev(MAJOR(dev), port);
- rio_dprintk (RIO_DEBUG_CTRL, "Makedev printer 0x%x is 0x%x\n",port, (int)arg);
- return (int)arg;
- }
- rio_dprintk (RIO_DEBUG_CTRL, "MAKE Device is called\n");
- return -EINVAL;
- }
- /*
- ** rio_minor: given a dev_t from a stat() call, return
- ** the port number (0-511) ORed with the port type
- ** ( RIO_DEV_DIRECT, RIO_DEV_MODEM, RIO_DEV_XPRINT )
- */
- case RIO_MINOR:
- {
- dev_t dv;
- int mino;
-
- dv = (dev_t)((int)arg);
- mino = RIO_UNMODEM(dv);
+ /*
+ ** It is important that the product code is an unsigned object!
+ */
+ if (DownLoad.ProductCode > MAX_PRODUCT) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Bad product code %d passed\n", DownLoad.ProductCode);
+ p->RIOError.Error = NO_SUCH_PRODUCT;
+ return -ENXIO;
+ }
+ /*
+ ** do something!
+ */
+ retval = (*(RIOBootTable[DownLoad.ProductCode])) (p, &DownLoad);
+ /* <-- Panic */
+ p->RIOHalted = 0;
+ /*
+ ** and go back, content with a job well completed.
+ */
+ return retval;
+
+ case RIO_PARMS:
+ {
+ uint host;
+
+ if (copyin((int) arg, (caddr_t) & host, sizeof(host)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_REQ: Copy in from user space failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ /*
+ ** Fetch the parmmap
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_PARMS\n");
+ if (copyout((caddr_t) p->RIOHosts[host].ParmMapP, (int) arg, sizeof(PARM_MAP)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_PARMS: Copy out to user space failed\n");
+ return -EFAULT;
+ }
+ }
+ return retval;
+
+ case RIO_HOST_REQ:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_REQ\n");
+ if (copyin((int) arg, (caddr_t) & HostReq, sizeof(HostReq)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_REQ: Copy in from user space failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (HostReq.HostNum >= p->RIONumHosts) {
+ p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_REQ: Illegal host number %d\n", HostReq.HostNum);
+ return -ENXIO;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "Request for host %d\n", HostReq.HostNum);
+
+ if (copyout((caddr_t) & p->RIOHosts[HostReq.HostNum], (int) HostReq.HostP, sizeof(struct Host)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_REQ: Bad copy to user space\n");
+ return -EFAULT;
+ }
+ return retval;
+
+ case RIO_HOST_DPRAM:
+ rio_dprintk(RIO_DEBUG_CTRL, "Request for DPRAM\n");
+ if (copyin((int) arg, (caddr_t) & HostDpRam, sizeof(HostDpRam)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Copy in from user space failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (HostDpRam.HostNum >= p->RIONumHosts) {
+ p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Illegal host number %d\n", HostDpRam.HostNum);
+ return -ENXIO;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "Request for host %d\n", HostDpRam.HostNum);
+
+ if (p->RIOHosts[HostDpRam.HostNum].Type == RIO_PCI) {
+ int off;
+ /* It's hardware like this that really gets on my tits. */
+ static unsigned char copy[sizeof(struct DpRam)];
+ for (off = 0; off < sizeof(struct DpRam); off++)
+ copy[off] = p->RIOHosts[HostDpRam.HostNum].Caddr[off];
+ if (copyout((caddr_t) copy, (int) HostDpRam.DpRamP, sizeof(struct DpRam)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Bad copy to user space\n");
+ return -EFAULT;
+ }
+ } else if (copyout((caddr_t) p->RIOHosts[HostDpRam.HostNum].Caddr, (int) HostDpRam.DpRamP, sizeof(struct DpRam)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Bad copy to user space\n");
+ return -EFAULT;
+ }
+ return retval;
+
+ case RIO_SET_BUSY:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET_BUSY\n");
+ if ((int) arg < 0 || (int) arg > 511) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET_BUSY: Bad port number %d\n", (int) arg);
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ p->RIOPortp[(int) arg]->State |= RIO_BUSY;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return retval;
+
+ case RIO_HOST_PORT:
+ /*
+ ** The daemon want port information
+ ** (probably for debug reasons)
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_PORT\n");
+ if (copyin((int) arg, (caddr_t) & PortReq, sizeof(PortReq)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_PORT: Copy in from user space failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+
+ if (PortReq.SysPort >= RIO_PORTS) { /* SysPort is unsigned */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_PORT: Illegal port number %d\n", PortReq.SysPort);
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "Request for port %d\n", PortReq.SysPort);
+ if (copyout((caddr_t) p->RIOPortp[PortReq.SysPort], (int) PortReq.PortP, sizeof(struct Port)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_PORT: Bad copy to user space\n");
+ return -EFAULT;
+ }
+ return retval;
+
+ case RIO_HOST_RUP:
+ /*
+ ** The daemon want rup information
+ ** (probably for debug reasons)
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP\n");
+ if (copyin((int) arg, (caddr_t) & RupReq, sizeof(RupReq)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP: Copy in from user space failed\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (RupReq.HostNum >= p->RIONumHosts) { /* host is unsigned */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP: Illegal host number %d\n", RupReq.HostNum);
+ p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+ if (RupReq.RupNum >= MAX_RUP + LINKS_PER_UNIT) { /* eek! */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP: Illegal rup number %d\n", RupReq.RupNum);
+ p->RIOError.Error = RUP_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+ HostP = &p->RIOHosts[RupReq.HostNum];
+
+ if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP: Host %d not running\n", RupReq.HostNum);
+ p->RIOError.Error = HOST_NOT_RUNNING;
+ return -EIO;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "Request for rup %d from host %d\n", RupReq.RupNum, RupReq.HostNum);
+
+ if (copyout((caddr_t) HostP->UnixRups[RupReq.RupNum].RupP, (int) RupReq.RupP, sizeof(struct RUP)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP: Bad copy to user space\n");
+ return -EFAULT;
+ }
+ return retval;
+
+ case RIO_HOST_LPB:
+ /*
+ ** The daemon want lpb information
+ ** (probably for debug reasons)
+ */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB\n");
+ if (copyin((int) arg, (caddr_t) & LpbReq, sizeof(LpbReq)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB: Bad copy from user space\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (LpbReq.Host >= p->RIONumHosts) { /* host is unsigned */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB: Illegal host number %d\n", LpbReq.Host);
+ p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+ if (LpbReq.Link >= LINKS_PER_UNIT) { /* eek! */
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB: Illegal link number %d\n", LpbReq.Link);
+ p->RIOError.Error = LINK_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+ HostP = &p->RIOHosts[LpbReq.Host];
+
+ if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB: Host %d not running\n", LpbReq.Host);
+ p->RIOError.Error = HOST_NOT_RUNNING;
+ return -EIO;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "Request for lpb %d from host %d\n", LpbReq.Link, LpbReq.Host);
+
+ if (copyout((caddr_t) & HostP->LinkStrP[LpbReq.Link], (int) LpbReq.LpbP, sizeof(struct LPB)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB: Bad copy to user space\n");
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return retval;
- if ( RIO_ISMODEM(dv) ) {
- rio_dprintk (RIO_DEBUG_CTRL, "Minor for device 0x%x: modem %d\n", dv, mino);
- arg = (caddr_t)(mino | RIO_DEV_MODEM);
- }
- else {
- rio_dprintk (RIO_DEBUG_CTRL, "Minor for device 0x%x: direct %d\n", dv, mino);
- arg = (caddr_t)(mino | RIO_DEV_DIRECT);
- }
- return (int)arg;
- }
+ /*
+ ** Here 3 IOCTL's that allow us to change the way in which
+ ** rio logs errors. send them just to syslog or send them
+ ** to both syslog and console or send them to just the console.
+ **
+ ** See RioStrBuf() in util.c for the other half.
+ */
+ case RIO_SYSLOG_ONLY:
+ p->RIOPrintLogState = PRINT_TO_LOG; /* Just syslog */
+ return 0;
+
+ case RIO_SYSLOG_CONS:
+ p->RIOPrintLogState = PRINT_TO_LOG_CONS; /* syslog and console */
+ return 0;
+
+ case RIO_CONS_ONLY:
+ p->RIOPrintLogState = PRINT_TO_CONS; /* Just console */
+ return 0;
+
+ case RIO_SIGNALS_ON:
+ if (p->RIOSignalProcess) {
+ p->RIOError.Error = SIGNALS_ALREADY_SET;
+ return -EBUSY;
+ }
+ p->RIOSignalProcess = getpid();
+ p->RIOPrintDisabled = DONT_PRINT;
+ return retval;
+
+ case RIO_SIGNALS_OFF:
+ if (p->RIOSignalProcess != getpid()) {
+ p->RIOError.Error = NOT_RECEIVING_PROCESS;
+ return -EPERM;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "Clear signal process to zero\n");
+ p->RIOSignalProcess = 0;
+ return retval;
+
+ case RIO_SET_BYTE_MODE:
+ for (Host = 0; Host < p->RIONumHosts; Host++)
+ if (p->RIOHosts[Host].Type == RIO_AT)
+ p->RIOHosts[Host].Mode &= ~WORD_OPERATION;
+ return retval;
+
+ case RIO_SET_WORD_MODE:
+ for (Host = 0; Host < p->RIONumHosts; Host++)
+ if (p->RIOHosts[Host].Type == RIO_AT)
+ p->RIOHosts[Host].Mode |= WORD_OPERATION;
+ return retval;
+
+ case RIO_SET_FAST_BUS:
+ for (Host = 0; Host < p->RIONumHosts; Host++)
+ if (p->RIOHosts[Host].Type == RIO_AT)
+ p->RIOHosts[Host].Mode |= FAST_AT_BUS;
+ return retval;
+
+ case RIO_SET_SLOW_BUS:
+ for (Host = 0; Host < p->RIONumHosts; Host++)
+ if (p->RIOHosts[Host].Type == RIO_AT)
+ p->RIOHosts[Host].Mode &= ~FAST_AT_BUS;
+ return retval;
+
+ case RIO_MAP_B50_TO_50:
+ case RIO_MAP_B50_TO_57600:
+ case RIO_MAP_B110_TO_110:
+ case RIO_MAP_B110_TO_115200:
+ rio_dprintk(RIO_DEBUG_CTRL, "Baud rate mapping\n");
+ port = (uint) arg;
+ if (port < 0 || port > 511) {
+ rio_dprintk(RIO_DEBUG_CTRL, "Baud rate mapping: Bad port number %d\n", port);
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ switch (cmd) {
+ case RIO_MAP_B50_TO_50:
+ p->RIOPortp[port]->Config |= RIO_MAP_50_TO_50;
+ break;
+ case RIO_MAP_B50_TO_57600:
+ p->RIOPortp[port]->Config &= ~RIO_MAP_50_TO_50;
+ break;
+ case RIO_MAP_B110_TO_110:
+ p->RIOPortp[port]->Config |= RIO_MAP_110_TO_110;
+ break;
+ case RIO_MAP_B110_TO_115200:
+ p->RIOPortp[port]->Config &= ~RIO_MAP_110_TO_110;
+ break;
+ }
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return retval;
+
+ case RIO_STREAM_INFO:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_STREAM_INFO\n");
+ return -EINVAL;
+
+ case RIO_SEND_PACKET:
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_SEND_PACKET\n");
+ if (copyin((int) arg, (caddr_t) & SendPack, sizeof(SendPack)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_SEND_PACKET: Bad copy from user space\n");
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ if (SendPack.PortNum >= 128) {
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -ENXIO;
+ }
+
+ PortP = p->RIOPortp[SendPack.PortNum];
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+
+ if (!can_add_transmit(&PacketP, PortP)) {
+ p->RIOError.Error = UNIT_IS_IN_USE;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return -ENOSPC;
+ }
+
+ for (loop = 0; loop < (ushort) (SendPack.Len & 127); loop++)
+ WBYTE(PacketP->data[loop], SendPack.Data[loop]);
+
+ WBYTE(PacketP->len, SendPack.Len);
+
+ add_transmit(PortP);
+ /*
+ ** Count characters transmitted for port statistics reporting
+ */
+ if (PortP->statsGather)
+ PortP->txchars += (SendPack.Len & 127);
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return retval;
+
+ case RIO_NO_MESG:
+ if (su)
+ p->RIONoMessage = 1;
+ return su ? 0 : -EPERM;
+
+ case RIO_MESG:
+ if (su)
+ p->RIONoMessage = 0;
+ return su ? 0 : -EPERM;
+
+ case RIO_WHAT_MESG:
+ if (copyout((caddr_t) & p->RIONoMessage, (int) arg, sizeof(p->RIONoMessage)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_WHAT_MESG: Bad copy to user space\n");
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return 0;
+
+ case RIO_MEM_DUMP:
+ if (copyin((int) arg, (caddr_t) & SubCmd, sizeof(struct SubCmdStruct)) == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_MEM_DUMP host %d rup %d addr %x\n", SubCmd.Host, SubCmd.Rup, SubCmd.Addr);
+
+ if (SubCmd.Rup >= MAX_RUP + LINKS_PER_UNIT) {
+ p->RIOError.Error = RUP_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+
+ if (SubCmd.Host >= p->RIONumHosts) {
+ p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+
+ port = p->RIOHosts[SubCmd.Host].UnixRups[SubCmd.Rup].BaseSysPort;
+
+ PortP = p->RIOPortp[port];
+
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+
+ if (RIOPreemptiveCmd(p, PortP, MEMDUMP) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_MEM_DUMP failed\n");
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return -EBUSY;
+ } else
+ PortP->State |= RIO_BUSY;
+
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ if (copyout((caddr_t) p->RIOMemDump, (int) arg, MEMDUMP_SIZE) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_MEM_DUMP copy failed\n");
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return 0;
+
+ case RIO_TICK:
+ if ((int) arg < 0 || (int) arg >= p->RIONumHosts)
+ return -EINVAL;
+ rio_dprintk(RIO_DEBUG_CTRL, "Set interrupt for host %d\n", (int) arg);
+ WBYTE(p->RIOHosts[(int) arg].SetInt, 0xff);
+ return 0;
+
+ case RIO_TOCK:
+ if ((int) arg < 0 || (int) arg >= p->RIONumHosts)
+ return -EINVAL;
+ rio_dprintk(RIO_DEBUG_CTRL, "Clear interrupt for host %d\n", (int) arg);
+ WBYTE((p->RIOHosts[(int) arg].ResetInt), 0xff);
+ return 0;
+
+ case RIO_READ_CHECK:
+ /* Check reads for pkts with data[0] the same */
+ p->RIOReadCheck = !p->RIOReadCheck;
+ if (copyout((caddr_t) & p->RIOReadCheck, (int) arg, sizeof(uint)) == COPYFAIL) {
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return 0;
+
+ case RIO_READ_REGISTER:
+ if (copyin((int) arg, (caddr_t) & SubCmd, sizeof(struct SubCmdStruct)) == COPYFAIL) {
+ p->RIOError.Error = COPYIN_FAILED;
+ return -EFAULT;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_REGISTER host %d rup %d port %d reg %x\n", SubCmd.Host, SubCmd.Rup, SubCmd.Port, SubCmd.Addr);
+
+ if (SubCmd.Port > 511) {
+ rio_dprintk(RIO_DEBUG_CTRL, "Baud rate mapping: Bad port number %d\n", SubCmd.Port);
+ p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+
+ if (SubCmd.Rup >= MAX_RUP + LINKS_PER_UNIT) {
+ p->RIOError.Error = RUP_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+
+ if (SubCmd.Host >= p->RIONumHosts) {
+ p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+
+ port = p->RIOHosts[SubCmd.Host].UnixRups[SubCmd.Rup].BaseSysPort + SubCmd.Port;
+ PortP = p->RIOPortp[port];
+
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+
+ if (RIOPreemptiveCmd(p, PortP, READ_REGISTER) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_REGISTER failed\n");
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return -EBUSY;
+ } else
+ PortP->State |= RIO_BUSY;
+
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ if (copyout((caddr_t) & p->CdRegister, (int) arg, sizeof(uint)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_REGISTER copy failed\n");
+ p->RIOError.Error = COPYOUT_FAILED;
+ return -EFAULT;
+ }
+ return 0;
+ /*
+ ** rio_make_dev: given port number (0-511) ORed with port type
+ ** (RIO_DEV_DIRECT, RIO_DEV_MODEM, RIO_DEV_XPRINT) return dev_t
+ ** value to pass to mknod to create the correct device node.
+ */
+ case RIO_MAKE_DEV:
+ {
+ uint port = (uint) arg & RIO_MODEM_MASK;
+
+ switch ((uint) arg & RIO_DEV_MASK) {
+ case RIO_DEV_DIRECT:
+ arg = (caddr_t) drv_makedev(MAJOR(dev), port);
+ rio_dprintk(RIO_DEBUG_CTRL, "Makedev direct 0x%x is 0x%x\n", port, (int) arg);
+ return (int) arg;
+ case RIO_DEV_MODEM:
+ arg = (caddr_t) drv_makedev(MAJOR(dev), (port | RIO_MODEM_BIT));
+ rio_dprintk(RIO_DEBUG_CTRL, "Makedev modem 0x%x is 0x%x\n", port, (int) arg);
+ return (int) arg;
+ case RIO_DEV_XPRINT:
+ arg = (caddr_t) drv_makedev(MAJOR(dev), port);
+ rio_dprintk(RIO_DEBUG_CTRL, "Makedev printer 0x%x is 0x%x\n", port, (int) arg);
+ return (int) arg;
+ }
+ rio_dprintk(RIO_DEBUG_CTRL, "MAKE Device is called\n");
+ return -EINVAL;
+ }
+ /*
+ ** rio_minor: given a dev_t from a stat() call, return
+ ** the port number (0-511) ORed with the port type
+ ** ( RIO_DEV_DIRECT, RIO_DEV_MODEM, RIO_DEV_XPRINT )
+ */
+ case RIO_MINOR:
+ {
+ dev_t dv;
+ int mino;
+
+ dv = (dev_t) ((int) arg);
+ mino = RIO_UNMODEM(dv);
+
+ if (RIO_ISMODEM(dv)) {
+ rio_dprintk(RIO_DEBUG_CTRL, "Minor for device 0x%x: modem %d\n", dv, mino);
+ arg = (caddr_t) (mino | RIO_DEV_MODEM);
+ } else {
+ rio_dprintk(RIO_DEBUG_CTRL, "Minor for device 0x%x: direct %d\n", dv, mino);
+ arg = (caddr_t) (mino | RIO_DEV_DIRECT);
+ }
+ return (int) arg;
+ }
}
- rio_dprintk (RIO_DEBUG_CTRL, "INVALID DAEMON IOCTL 0x%x\n",cmd);
+ rio_dprintk(RIO_DEBUG_CTRL, "INVALID DAEMON IOCTL 0x%x\n", cmd);
p->RIOError.Error = IOCTL_COMMAND_UNKNOWN;
- func_exit ();
+ func_exit();
return -EINVAL;
}
/*
** Pre-emptive commands go on RUPs and are only one byte long.
*/
-int
-RIOPreemptiveCmd(p, PortP, Cmd)
-struct rio_info * p;
+int RIOPreemptiveCmd(p, PortP, Cmd)
+struct rio_info *p;
struct Port *PortP;
uchar Cmd;
{
@@ -1766,104 +1654,99 @@ uchar Cmd;
int port;
#ifdef CHECK
- CheckPortP( PortP );
+ CheckPortP(PortP);
#endif
- if ( PortP->State & RIO_DELETED ) {
- rio_dprintk (RIO_DEBUG_CTRL, "Preemptive command to deleted RTA ignored\n");
+ if (PortP->State & RIO_DELETED) {
+ rio_dprintk(RIO_DEBUG_CTRL, "Preemptive command to deleted RTA ignored\n");
return RIO_FAIL;
}
- if (((int)((char)PortP->InUse) == -1) || ! (CmdBlkP = RIOGetCmdBlk()) ) {
- rio_dprintk (RIO_DEBUG_CTRL, "Cannot allocate command block for command %d on port %d\n",
- Cmd, PortP->PortNum);
+ if (((int) ((char) PortP->InUse) == -1) || !(CmdBlkP = RIOGetCmdBlk())) {
+ rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block for command %d on port %d\n", Cmd, PortP->PortNum);
return RIO_FAIL;
}
- rio_dprintk (RIO_DEBUG_CTRL, "Command blk 0x%x - InUse now %d\n",
- (int)CmdBlkP,PortP->InUse);
+ rio_dprintk(RIO_DEBUG_CTRL, "Command blk 0x%x - InUse now %d\n", (int) CmdBlkP, PortP->InUse);
- PktCmdP = (struct PktCmd_M *)&CmdBlkP->Packet.data[0];
+ PktCmdP = (struct PktCmd_M *) &CmdBlkP->Packet.data[0];
- CmdBlkP->Packet.src_unit = 0;
+ CmdBlkP->Packet.src_unit = 0;
if (PortP->SecondBlock)
rup = PortP->ID2;
else
rup = PortP->RupNum;
CmdBlkP->Packet.dest_unit = rup;
- CmdBlkP->Packet.src_port = COMMAND_RUP;
+ CmdBlkP->Packet.src_port = COMMAND_RUP;
CmdBlkP->Packet.dest_port = COMMAND_RUP;
- CmdBlkP->Packet.len = PKT_CMD_BIT | 2;
- CmdBlkP->PostFuncP = RIOUnUse;
- CmdBlkP->PostArg = (int)PortP;
- PktCmdP->Command = Cmd;
- port = PortP->HostPort % (ushort)PORTS_PER_RTA;
+ CmdBlkP->Packet.len = PKT_CMD_BIT | 2;
+ CmdBlkP->PostFuncP = RIOUnUse;
+ CmdBlkP->PostArg = (int) PortP;
+ PktCmdP->Command = Cmd;
+ port = PortP->HostPort % (ushort) PORTS_PER_RTA;
/*
- ** Index ports 8-15 for 2nd block of 16 port RTA.
- */
+ ** Index ports 8-15 for 2nd block of 16 port RTA.
+ */
if (PortP->SecondBlock)
port += (ushort) PORTS_PER_RTA;
- PktCmdP->PhbNum = port;
-
- switch ( Cmd ) {
- case MEMDUMP:
- rio_dprintk (RIO_DEBUG_CTRL, "Queue MEMDUMP command blk 0x%x (addr 0x%x)\n",
- (int)CmdBlkP, (int)SubCmd.Addr);
- PktCmdP->SubCommand = MEMDUMP;
- PktCmdP->SubAddr = SubCmd.Addr;
- break;
- case FCLOSE:
- rio_dprintk (RIO_DEBUG_CTRL, "Queue FCLOSE command blk 0x%x\n",(int)CmdBlkP);
- break;
- case READ_REGISTER:
- rio_dprintk (RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) command blk 0x%x\n",
- (int)SubCmd.Addr, (int)CmdBlkP);
- PktCmdP->SubCommand = READ_REGISTER;
- PktCmdP->SubAddr = SubCmd.Addr;
- break;
- case RESUME:
- rio_dprintk (RIO_DEBUG_CTRL, "Queue RESUME command blk 0x%x\n",(int)CmdBlkP);
- break;
- case RFLUSH:
- rio_dprintk (RIO_DEBUG_CTRL, "Queue RFLUSH command blk 0x%x\n",(int)CmdBlkP);
- CmdBlkP->PostFuncP = RIORFlushEnable;
- break;
- case SUSPEND:
- rio_dprintk (RIO_DEBUG_CTRL, "Queue SUSPEND command blk 0x%x\n",(int)CmdBlkP);
- break;
-
- case MGET :
- rio_dprintk (RIO_DEBUG_CTRL, "Queue MGET command blk 0x%x\n", (int)CmdBlkP);
- break;
-
- case MSET :
- case MBIC :
- case MBIS :
- CmdBlkP->Packet.data[4] = (char) PortP->ModemLines;
- rio_dprintk (RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command blk 0x%x\n", (int)CmdBlkP);
- break;
-
- case WFLUSH:
- /*
- ** If we have queued up the maximum number of Write flushes
- ** allowed then we should not bother sending any more to the
- ** RTA.
- */
- if ((int)((char)PortP->WflushFlag) == (int)-1) {
- rio_dprintk (RIO_DEBUG_CTRL, "Trashed WFLUSH, WflushFlag about to wrap!");
- RIOFreeCmdBlk(CmdBlkP);
- return(RIO_FAIL);
- } else {
- rio_dprintk (RIO_DEBUG_CTRL, "Queue WFLUSH command blk 0x%x\n",
- (int)CmdBlkP);
- CmdBlkP->PostFuncP = RIOWFlushMark;
- }
- break;
+ PktCmdP->PhbNum = port;
+
+ switch (Cmd) {
+ case MEMDUMP:
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk 0x%x (addr 0x%x)\n", (int) CmdBlkP, (int) SubCmd.Addr);
+ PktCmdP->SubCommand = MEMDUMP;
+ PktCmdP->SubAddr = SubCmd.Addr;
+ break;
+ case FCLOSE:
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk 0x%x\n", (int) CmdBlkP);
+ break;
+ case READ_REGISTER:
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) command blk 0x%x\n", (int) SubCmd.Addr, (int) CmdBlkP);
+ PktCmdP->SubCommand = READ_REGISTER;
+ PktCmdP->SubAddr = SubCmd.Addr;
+ break;
+ case RESUME:
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk 0x%x\n", (int) CmdBlkP);
+ break;
+ case RFLUSH:
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk 0x%x\n", (int) CmdBlkP);
+ CmdBlkP->PostFuncP = RIORFlushEnable;
+ break;
+ case SUSPEND:
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk 0x%x\n", (int) CmdBlkP);
+ break;
+
+ case MGET:
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk 0x%x\n", (int) CmdBlkP);
+ break;
+
+ case MSET:
+ case MBIC:
+ case MBIS:
+ CmdBlkP->Packet.data[4] = (char) PortP->ModemLines;
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command blk 0x%x\n", (int) CmdBlkP);
+ break;
+
+ case WFLUSH:
+ /*
+ ** If we have queued up the maximum number of Write flushes
+ ** allowed then we should not bother sending any more to the
+ ** RTA.
+ */
+ if ((int) ((char) PortP->WflushFlag) == (int) -1) {
+ rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, WflushFlag about to wrap!");
+ RIOFreeCmdBlk(CmdBlkP);
+ return (RIO_FAIL);
+ } else {
+ rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command blk 0x%x\n", (int) CmdBlkP);
+ CmdBlkP->PostFuncP = RIOWFlushMark;
+ }
+ break;
}
PortP->InUse++;
- Ret = RIOQueueCmdBlk( PortP->HostP, rup, CmdBlkP );
+ Ret = RIOQueueCmdBlk(PortP->HostP, rup, CmdBlkP);
return Ret;
}
diff --git a/drivers/char/rio/riodrvr.h b/drivers/char/rio/riodrvr.h
index bc38ac5dfbd..663ee0914ed 100644
--- a/drivers/char/rio/riodrvr.h
+++ b/drivers/char/rio/riodrvr.h
@@ -33,7 +33,7 @@
#ifndef __riodrvr_h
#define __riodrvr_h
-#include <asm/param.h> /* for HZ */
+#include <asm/param.h> /* for HZ */
#ifdef SCCS_LABELS
static char *_riodrvr_h_sccs_ = "@(#)riodrvr.h 1.3";
@@ -44,15 +44,15 @@ static char *_riodrvr_h_sccs_ = "@(#)riodrvr.h 1.3";
struct rio_info {
- int mode; /* Intr or polled, word/byte */
- spinlock_t RIOIntrSem; /* Interrupt thread sem */
- int current_chan; /* current channel */
- int RIOFailed; /* Not initialised ? */
- int RIOInstallAttempts; /* no. of rio-install() calls */
- int RIOLastPCISearch; /* status of last search */
- int RIONumHosts; /* Number of RIO Hosts */
- struct Host * RIOHosts; /* RIO Host values */
- struct Port **RIOPortp; /* RIO port values */
+ int mode; /* Intr or polled, word/byte */
+ spinlock_t RIOIntrSem; /* Interrupt thread sem */
+ int current_chan; /* current channel */
+ int RIOFailed; /* Not initialised ? */
+ int RIOInstallAttempts; /* no. of rio-install() calls */
+ int RIOLastPCISearch; /* status of last search */
+ int RIONumHosts; /* Number of RIO Hosts */
+ struct Host *RIOHosts; /* RIO Host values */
+ struct Port **RIOPortp; /* RIO port values */
/*
** 02.03.1999 ARG - ESIL 0820 fix
** We no longer use RIOBootMode
@@ -60,9 +60,9 @@ struct rio_info {
int RIOBootMode; * RIO boot mode *
**
*/
- int RIOPrintDisabled; /* RIO printing disabled ? */
- int RIOPrintLogState; /* RIO printing state ? */
- int RIOPolling; /* Polling ? */
+ int RIOPrintDisabled; /* RIO printing disabled ? */
+ int RIOPrintLogState; /* RIO printing state ? */
+ int RIOPolling; /* Polling ? */
/*
** 09.12.1998 ARG - ESIL 0776 part fix
** The 'RIO_QUICK_CHECK' ioctl was using RIOHalted.
@@ -70,61 +70,61 @@ struct rio_info {
** updated in RIOConCon() - to keep track of RTA connections/disconnections.
** 'RIO_QUICK_CHECK' now returns the value of RIORtaDisCons.
*/
- int RIOHalted; /* halted ? */
- int RIORtaDisCons; /* RTA connections/disconnections */
- uint RIOReadCheck; /* Rio read check */
- uint RIONoMessage; /* To display message or not */
- uint RIONumBootPkts; /* how many packets for an RTA */
- uint RIOBootCount; /* size of RTA code */
- uint RIOBooting; /* count of outstanding boots */
- uint RIOSystemUp; /* Booted ?? */
- uint RIOCounting; /* for counting interrupts */
- uint RIOIntCount; /* # of intr since last check */
- uint RIOTxCount; /* number of xmit intrs */
- uint RIORxCount; /* number of rx intrs */
- uint RIORupCount; /* number of rup intrs */
- int RIXTimer;
- int RIOBufferSize; /* Buffersize */
- int RIOBufferMask; /* Buffersize */
-
- int RIOFirstMajor; /* First host card's major no */
-
- uint RIOLastPortsMapped; /* highest port number known */
- uint RIOFirstPortsMapped; /* lowest port number known */
-
- uint RIOLastPortsBooted; /* highest port number running */
- uint RIOFirstPortsBooted; /* lowest port number running */
-
- uint RIOLastPortsOpened; /* highest port number running */
- uint RIOFirstPortsOpened; /* lowest port number running */
+ int RIOHalted; /* halted ? */
+ int RIORtaDisCons; /* RTA connections/disconnections */
+ uint RIOReadCheck; /* Rio read check */
+ uint RIONoMessage; /* To display message or not */
+ uint RIONumBootPkts; /* how many packets for an RTA */
+ uint RIOBootCount; /* size of RTA code */
+ uint RIOBooting; /* count of outstanding boots */
+ uint RIOSystemUp; /* Booted ?? */
+ uint RIOCounting; /* for counting interrupts */
+ uint RIOIntCount; /* # of intr since last check */
+ uint RIOTxCount; /* number of xmit intrs */
+ uint RIORxCount; /* number of rx intrs */
+ uint RIORupCount; /* number of rup intrs */
+ int RIXTimer;
+ int RIOBufferSize; /* Buffersize */
+ int RIOBufferMask; /* Buffersize */
+
+ int RIOFirstMajor; /* First host card's major no */
+
+ uint RIOLastPortsMapped; /* highest port number known */
+ uint RIOFirstPortsMapped; /* lowest port number known */
+
+ uint RIOLastPortsBooted; /* highest port number running */
+ uint RIOFirstPortsBooted; /* lowest port number running */
+
+ uint RIOLastPortsOpened; /* highest port number running */
+ uint RIOFirstPortsOpened; /* lowest port number running */
/* Flag to say that the topology information has been changed. */
- uint RIOQuickCheck;
- uint CdRegister; /* ??? */
- int RIOSignalProcess; /* Signalling process */
- int rio_debug; /* To debug ... */
- int RIODebugWait; /* For what ??? */
- int tpri; /* Thread prio */
- int tid; /* Thread id */
- uint _RIO_Polled; /* Counter for polling */
- uint _RIO_Interrupted; /* Counter for interrupt */
- int intr_tid; /* iointset return value */
- int TxEnSem; /* TxEnable Semaphore */
-
-
- struct Error RIOError; /* to Identify what went wrong */
- struct Conf RIOConf; /* Configuration ??? */
- struct ttystatics channel[RIO_PORTS]; /* channel information */
- char RIOBootPackets[1+(SIXTY_FOUR_K/RTA_BOOT_DATA_SIZE)]
- [RTA_BOOT_DATA_SIZE];
- struct Map RIOConnectTable[TOTAL_MAP_ENTRIES];
- struct Map RIOSavedTable[TOTAL_MAP_ENTRIES];
+ uint RIOQuickCheck;
+ uint CdRegister; /* ??? */
+ int RIOSignalProcess; /* Signalling process */
+ int rio_debug; /* To debug ... */
+ int RIODebugWait; /* For what ??? */
+ int tpri; /* Thread prio */
+ int tid; /* Thread id */
+ uint _RIO_Polled; /* Counter for polling */
+ uint _RIO_Interrupted; /* Counter for interrupt */
+ int intr_tid; /* iointset return value */
+ int TxEnSem; /* TxEnable Semaphore */
+
+
+ struct Error RIOError; /* to Identify what went wrong */
+ struct Conf RIOConf; /* Configuration ??? */
+ struct ttystatics channel[RIO_PORTS]; /* channel information */
+ char RIOBootPackets[1 + (SIXTY_FOUR_K / RTA_BOOT_DATA_SIZE)]
+ [RTA_BOOT_DATA_SIZE];
+ struct Map RIOConnectTable[TOTAL_MAP_ENTRIES];
+ struct Map RIOSavedTable[TOTAL_MAP_ENTRIES];
/* RTA to host binding table for master/slave operation */
- ulong RIOBindTab[MAX_RTA_BINDINGS];
+ ulong RIOBindTab[MAX_RTA_BINDINGS];
/* RTA memory dump variable */
- uchar RIOMemDump[MEMDUMP_SIZE];
- struct ModuleInfo RIOModuleTypes[MAX_MODULE_TYPES];
+ uchar RIOMemDump[MEMDUMP_SIZE];
+ struct ModuleInfo RIOModuleTypes[MAX_MODULE_TYPES];
};
@@ -141,4 +141,4 @@ struct rio_info {
#define WRBYTE(x,y) *(volatile unsigned char *)((x)) = \
(unsigned char)(y)
-#endif /* __riodrvr.h */
+#endif /* __riodrvr.h */
diff --git a/drivers/char/rio/rioinfo.h b/drivers/char/rio/rioinfo.h
index e08421c9558..8de7966e603 100644
--- a/drivers/char/rio/rioinfo.h
+++ b/drivers/char/rio/rioinfo.h
@@ -41,29 +41,29 @@ static char *_rioinfo_h_sccs_ = "@(#)rioinfo.h 1.2";
** Host card data structure
*/
struct RioHostInfo {
- long location; /* RIO Card Base I/O address */
- long vector; /* RIO Card IRQ vector */
- int bus; /* ISA/EISA/MCA/PCI */
- int mode; /* pointer to host mode - INTERRUPT / POLLED */
+ long location; /* RIO Card Base I/O address */
+ long vector; /* RIO Card IRQ vector */
+ int bus; /* ISA/EISA/MCA/PCI */
+ int mode; /* pointer to host mode - INTERRUPT / POLLED */
struct old_sgttyb
- * Sg; /* pointer to default term characteristics */
+ *Sg; /* pointer to default term characteristics */
};
/* Mode in rio device info */
-#define INTERRUPTED_MODE 0x01 /* Interrupt is generated */
-#define POLLED_MODE 0x02 /* No interrupt */
-#define AUTO_MODE 0x03 /* Auto mode */
+#define INTERRUPTED_MODE 0x01 /* Interrupt is generated */
+#define POLLED_MODE 0x02 /* No interrupt */
+#define AUTO_MODE 0x03 /* Auto mode */
-#define WORD_ACCESS_MODE 0x10 /* Word Access Mode */
-#define BYTE_ACCESS_MODE 0x20 /* Byte Access Mode */
+#define WORD_ACCESS_MODE 0x10 /* Word Access Mode */
+#define BYTE_ACCESS_MODE 0x20 /* Byte Access Mode */
/* Bus type that RIO supports */
-#define ISA_BUS 0x01 /* The card is ISA */
-#define EISA_BUS 0x02 /* The card is EISA */
-#define MCA_BUS 0x04 /* The card is MCA */
-#define PCI_BUS 0x08 /* The card is PCI */
+#define ISA_BUS 0x01 /* The card is ISA */
+#define EISA_BUS 0x02 /* The card is EISA */
+#define MCA_BUS 0x04 /* The card is MCA */
+#define PCI_BUS 0x08 /* The card is PCI */
/*
** 11.11.1998 ARG - ESIL ???? part fix
@@ -93,4 +93,4 @@ struct RioHostInfo {
'V' - '@' /* literal next char */ \
}
-#endif /* __rioinfo_h */
+#endif /* __rioinfo_h */
diff --git a/drivers/char/rio/riointr.c b/drivers/char/rio/riointr.c
index e42e7b50bf6..34d8787557a 100644
--- a/drivers/char/rio/riointr.c
+++ b/drivers/char/rio/riointr.c
@@ -38,6 +38,7 @@ static char *_riointr_c_sccs_ = "@(#)riointr.c 1.2";
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/string.h>
@@ -87,99 +88,93 @@ static char *_riointr_c_sccs_ = "@(#)riointr.c 1.2";
static void RIOReceive(struct rio_info *, struct Port *);
-static char *firstchars (char *p, int nch)
+static char *firstchars(char *p, int nch)
{
- static char buf[2][128];
- static int t=0;
- t = ! t;
- memcpy (buf[t], p, nch);
- buf[t][nch] = 0;
- return buf[t];
+ static char buf[2][128];
+ static int t = 0;
+ t = !t;
+ memcpy(buf[t], p, nch);
+ buf[t][nch] = 0;
+ return buf[t];
}
#define INCR( P, I ) ((P) = (((P)+(I)) & p->RIOBufferMask))
/* Enable and start the transmission of packets */
-void
-RIOTxEnable(en)
-char * en;
+void RIOTxEnable(en)
+char *en;
{
- struct Port * PortP;
- struct rio_info *p;
- struct tty_struct* tty;
- int c;
- struct PKT * PacketP;
- unsigned long flags;
-
- PortP = (struct Port *)en;
- p = (struct rio_info *)PortP->p;
- tty = PortP->gs.tty;
-
-
- rio_dprintk (RIO_DEBUG_INTR, "tx port %d: %d chars queued.\n",
- PortP->PortNum, PortP->gs.xmit_cnt);
-
- if (!PortP->gs.xmit_cnt) return;
-
-
- /* This routine is an order of magnitude simpler than the specialix
- version. One of the disadvantages is that this version will send
- an incomplete packet (usually 64 bytes instead of 72) once for
- every 4k worth of data. Let's just say that this won't influence
- performance significantly..... */
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- while (can_add_transmit( &PacketP, PortP )) {
- c = PortP->gs.xmit_cnt;
- if (c > PKT_MAX_DATA_LEN) c = PKT_MAX_DATA_LEN;
-
- /* Don't copy past the end of the source buffer */
- if (c > SERIAL_XMIT_SIZE - PortP->gs.xmit_tail)
- c = SERIAL_XMIT_SIZE - PortP->gs.xmit_tail;
-
- { int t;
- t = (c > 10)?10:c;
-
- rio_dprintk (RIO_DEBUG_INTR, "rio: tx port %d: copying %d chars: %s - %s\n",
- PortP->PortNum, c,
- firstchars (PortP->gs.xmit_buf + PortP->gs.xmit_tail , t),
- firstchars (PortP->gs.xmit_buf + PortP->gs.xmit_tail + c-t, t));
- }
- /* If for one reason or another, we can't copy more data,
- we're done! */
- if (c == 0) break;
-
- rio_memcpy_toio (PortP->HostP->Caddr, (caddr_t)PacketP->data,
- PortP->gs.xmit_buf + PortP->gs.xmit_tail, c);
- /* udelay (1); */
-
- writeb (c, &(PacketP->len));
- if (!( PortP->State & RIO_DELETED ) ) {
- add_transmit ( PortP );
- /*
- ** Count chars tx'd for port statistics reporting
- */
- if ( PortP->statsGather )
- PortP->txchars += c;
- }
- PortP->gs.xmit_tail = (PortP->gs.xmit_tail + c) & (SERIAL_XMIT_SIZE-1);
- PortP->gs.xmit_cnt -= c;
- }
-
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
-
- if (PortP->gs.xmit_cnt <= (PortP->gs.wakeup_chars + 2*PKT_MAX_DATA_LEN)) {
- rio_dprintk (RIO_DEBUG_INTR, "Waking up.... ldisc:%d (%d/%d)....",
- (int)(PortP->gs.tty->flags & (1 << TTY_DO_WRITE_WAKEUP)),
- PortP->gs.wakeup_chars, PortP->gs.xmit_cnt);
- if ((PortP->gs.tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- PortP->gs.tty->ldisc.write_wakeup)
- (PortP->gs.tty->ldisc.write_wakeup)(PortP->gs.tty);
- rio_dprintk (RIO_DEBUG_INTR, "(%d/%d)\n",
- PortP->gs.wakeup_chars, PortP->gs.xmit_cnt);
- wake_up_interruptible(&PortP->gs.tty->write_wait);
- }
+ struct Port *PortP;
+ struct rio_info *p;
+ struct tty_struct *tty;
+ int c;
+ struct PKT *PacketP;
+ unsigned long flags;
+
+ PortP = (struct Port *) en;
+ p = (struct rio_info *) PortP->p;
+ tty = PortP->gs.tty;
+
+
+ rio_dprintk(RIO_DEBUG_INTR, "tx port %d: %d chars queued.\n", PortP->PortNum, PortP->gs.xmit_cnt);
+
+ if (!PortP->gs.xmit_cnt)
+ return;
+
+
+ /* This routine is an order of magnitude simpler than the specialix
+ version. One of the disadvantages is that this version will send
+ an incomplete packet (usually 64 bytes instead of 72) once for
+ every 4k worth of data. Let's just say that this won't influence
+ performance significantly..... */
+
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+
+ while (can_add_transmit(&PacketP, PortP)) {
+ c = PortP->gs.xmit_cnt;
+ if (c > PKT_MAX_DATA_LEN)
+ c = PKT_MAX_DATA_LEN;
+
+ /* Don't copy past the end of the source buffer */
+ if (c > SERIAL_XMIT_SIZE - PortP->gs.xmit_tail)
+ c = SERIAL_XMIT_SIZE - PortP->gs.xmit_tail;
+
+ {
+ int t;
+ t = (c > 10) ? 10 : c;
+
+ rio_dprintk(RIO_DEBUG_INTR, "rio: tx port %d: copying %d chars: %s - %s\n", PortP->PortNum, c, firstchars(PortP->gs.xmit_buf + PortP->gs.xmit_tail, t), firstchars(PortP->gs.xmit_buf + PortP->gs.xmit_tail + c - t, t));
+ }
+ /* If for one reason or another, we can't copy more data,
+ we're done! */
+ if (c == 0)
+ break;
+
+ rio_memcpy_toio(PortP->HostP->Caddr, (caddr_t) PacketP->data, PortP->gs.xmit_buf + PortP->gs.xmit_tail, c);
+ /* udelay (1); */
+
+ writeb(c, &(PacketP->len));
+ if (!(PortP->State & RIO_DELETED)) {
+ add_transmit(PortP);
+ /*
+ ** Count chars tx'd for port statistics reporting
+ */
+ if (PortP->statsGather)
+ PortP->txchars += c;
+ }
+ PortP->gs.xmit_tail = (PortP->gs.xmit_tail + c) & (SERIAL_XMIT_SIZE - 1);
+ PortP->gs.xmit_cnt -= c;
+ }
+
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+
+ if (PortP->gs.xmit_cnt <= (PortP->gs.wakeup_chars + 2 * PKT_MAX_DATA_LEN)) {
+ rio_dprintk(RIO_DEBUG_INTR, "Waking up.... ldisc:%d (%d/%d)....", (int) (PortP->gs.tty->flags & (1 << TTY_DO_WRITE_WAKEUP)), PortP->gs.wakeup_chars, PortP->gs.xmit_cnt);
+ if ((PortP->gs.tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && PortP->gs.tty->ldisc.write_wakeup)
+ (PortP->gs.tty->ldisc.write_wakeup) (PortP->gs.tty);
+ rio_dprintk(RIO_DEBUG_INTR, "(%d/%d)\n", PortP->gs.wakeup_chars, PortP->gs.xmit_cnt);
+ wake_up_interruptible(&PortP->gs.tty->write_wait);
+ }
}
@@ -188,361 +183,349 @@ char * en;
** RIO Host Service routine. Does all the work traditionally associated with an
** interrupt.
*/
-static int RupIntr;
-static int RxIntr;
-static int TxIntr;
-void
-RIOServiceHost(p, HostP, From)
-struct rio_info * p;
+static int RupIntr;
+static int RxIntr;
+static int TxIntr;
+void RIOServiceHost(p, HostP, From)
+struct rio_info *p;
struct Host *HostP;
-int From;
+int From;
{
- rio_spin_lock (&HostP->HostLock);
- if ( (HostP->Flags & RUN_STATE) != RC_RUNNING ) {
- static int t =0;
- rio_spin_unlock (&HostP->HostLock);
- if ((t++ % 200) == 0)
- rio_dprintk (RIO_DEBUG_INTR, "Interrupt but host not running. flags=%x.\n", (int)HostP->Flags);
- return;
- }
- rio_spin_unlock (&HostP->HostLock);
-
- if ( RWORD( HostP->ParmMapP->rup_intr ) ) {
- WWORD( HostP->ParmMapP->rup_intr , 0 );
- p->RIORupCount++;
- RupIntr++;
- rio_dprintk (RIO_DEBUG_INTR, "rio: RUP interrupt on host %d\n", HostP-p->RIOHosts);
- RIOPollHostCommands(p, HostP );
- }
-
- if ( RWORD( HostP->ParmMapP->rx_intr ) ) {
- int port;
-
- WWORD( HostP->ParmMapP->rx_intr , 0 );
- p->RIORxCount++;
- RxIntr++;
-
- rio_dprintk (RIO_DEBUG_INTR, "rio: RX interrupt on host %d\n", HostP-p->RIOHosts);
- /*
- ** Loop through every port. If the port is mapped into
- ** the system ( i.e. has /dev/ttyXXXX associated ) then it is
- ** worth checking. If the port isn't open, grab any packets
- ** hanging on its receive queue and stuff them on the free
- ** list; check for commands on the way.
- */
- for ( port=p->RIOFirstPortsBooted;
- port<p->RIOLastPortsBooted+PORTS_PER_RTA; port++ ) {
- struct Port *PortP = p->RIOPortp[port];
- struct tty_struct *ttyP;
- struct PKT *PacketP;
-
- /*
- ** not mapped in - most of the RIOPortp[] information
- ** has not been set up!
- ** Optimise: ports come in bundles of eight.
- */
- if ( !PortP->Mapped ) {
- port += 7;
- continue; /* with the next port */
- }
-
- /*
- ** If the host board isn't THIS host board, check the next one.
- ** optimise: ports come in bundles of eight.
- */
- if ( PortP->HostP != HostP ) {
- port += 7;
- continue;
- }
-
- /*
- ** Let us see - is the port open? If not, then don't service it.
- */
- if ( !( PortP->PortState & PORT_ISOPEN ) ) {
- continue;
- }
-
- /*
- ** find corresponding tty structure. The process of mapping
- ** the ports puts these here.
- */
- ttyP = PortP->gs.tty;
-
- /*
- ** Lock the port before we begin working on it.
- */
- rio_spin_lock(&PortP->portSem);
-
- /*
- ** Process received data if there is any.
- */
- if ( can_remove_receive( &PacketP, PortP ) )
- RIOReceive(p, PortP);
-
- /*
- ** If there is no data left to be read from the port, and
- ** it's handshake bit is set, then we must clear the handshake,
- ** so that that downstream RTA is re-enabled.
- */
- if ( !can_remove_receive( &PacketP, PortP ) &&
- ( RWORD( PortP->PhbP->handshake )==PHB_HANDSHAKE_SET ) ) {
+ rio_spin_lock(&HostP->HostLock);
+ if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
+ static int t = 0;
+ rio_spin_unlock(&HostP->HostLock);
+ if ((t++ % 200) == 0)
+ rio_dprintk(RIO_DEBUG_INTR, "Interrupt but host not running. flags=%x.\n", (int) HostP->Flags);
+ return;
+ }
+ rio_spin_unlock(&HostP->HostLock);
+
+ if (RWORD(HostP->ParmMapP->rup_intr)) {
+ WWORD(HostP->ParmMapP->rup_intr, 0);
+ p->RIORupCount++;
+ RupIntr++;
+ rio_dprintk(RIO_DEBUG_INTR, "rio: RUP interrupt on host %d\n", HostP - p->RIOHosts);
+ RIOPollHostCommands(p, HostP);
+ }
+
+ if (RWORD(HostP->ParmMapP->rx_intr)) {
+ int port;
+
+ WWORD(HostP->ParmMapP->rx_intr, 0);
+ p->RIORxCount++;
+ RxIntr++;
+
+ rio_dprintk(RIO_DEBUG_INTR, "rio: RX interrupt on host %d\n", HostP - p->RIOHosts);
+ /*
+ ** Loop through every port. If the port is mapped into
+ ** the system ( i.e. has /dev/ttyXXXX associated ) then it is
+ ** worth checking. If the port isn't open, grab any packets
+ ** hanging on its receive queue and stuff them on the free
+ ** list; check for commands on the way.
+ */
+ for (port = p->RIOFirstPortsBooted; port < p->RIOLastPortsBooted + PORTS_PER_RTA; port++) {
+ struct Port *PortP = p->RIOPortp[port];
+ struct tty_struct *ttyP;
+ struct PKT *PacketP;
+
+ /*
+ ** not mapped in - most of the RIOPortp[] information
+ ** has not been set up!
+ ** Optimise: ports come in bundles of eight.
+ */
+ if (!PortP->Mapped) {
+ port += 7;
+ continue; /* with the next port */
+ }
+
+ /*
+ ** If the host board isn't THIS host board, check the next one.
+ ** optimise: ports come in bundles of eight.
+ */
+ if (PortP->HostP != HostP) {
+ port += 7;
+ continue;
+ }
+
+ /*
+ ** Let us see - is the port open? If not, then don't service it.
+ */
+ if (!(PortP->PortState & PORT_ISOPEN)) {
+ continue;
+ }
+
+ /*
+ ** find corresponding tty structure. The process of mapping
+ ** the ports puts these here.
+ */
+ ttyP = PortP->gs.tty;
+
+ /*
+ ** Lock the port before we begin working on it.
+ */
+ rio_spin_lock(&PortP->portSem);
+
+ /*
+ ** Process received data if there is any.
+ */
+ if (can_remove_receive(&PacketP, PortP))
+ RIOReceive(p, PortP);
+
+ /*
+ ** If there is no data left to be read from the port, and
+ ** it's handshake bit is set, then we must clear the handshake,
+ ** so that that downstream RTA is re-enabled.
+ */
+ if (!can_remove_receive(&PacketP, PortP) && (RWORD(PortP->PhbP->handshake) == PHB_HANDSHAKE_SET)) {
/*
- ** MAGIC! ( Basically, handshake the RX buffer, so that
- ** the RTAs upstream can be re-enabled. )
- */
- rio_dprintk (RIO_DEBUG_INTR, "Set RX handshake bit\n");
- WWORD( PortP->PhbP->handshake,
- PHB_HANDSHAKE_SET|PHB_HANDSHAKE_RESET );
- }
- rio_spin_unlock(&PortP->portSem);
- }
- }
-
- if ( RWORD( HostP->ParmMapP->tx_intr ) ) {
- int port;
-
- WWORD( HostP->ParmMapP->tx_intr , 0);
-
- p->RIOTxCount++;
- TxIntr++;
- rio_dprintk (RIO_DEBUG_INTR, "rio: TX interrupt on host %d\n", HostP-p->RIOHosts);
-
- /*
- ** Loop through every port.
- ** If the port is mapped into the system ( i.e. has /dev/ttyXXXX
- ** associated ) then it is worth checking.
- */
- for ( port=p->RIOFirstPortsBooted;
- port<p->RIOLastPortsBooted+PORTS_PER_RTA; port++ ) {
- struct Port *PortP = p->RIOPortp[port];
- struct tty_struct *ttyP;
- struct PKT *PacketP;
-
- /*
- ** not mapped in - most of the RIOPortp[] information
- ** has not been set up!
- */
- if ( !PortP->Mapped ) {
- port += 7;
- continue; /* with the next port */
- }
-
- /*
- ** If the host board isn't running, then its data structures
- ** are no use to us - continue quietly.
- */
- if ( PortP->HostP != HostP ) {
- port += 7;
- continue; /* with the next port */
- }
-
- /*
- ** Let us see - is the port open? If not, then don't service it.
- */
- if ( !( PortP->PortState & PORT_ISOPEN ) ) {
- continue;
- }
-
- rio_dprintk (RIO_DEBUG_INTR, "rio: Looking into port %d.\n", port);
- /*
- ** Lock the port before we begin working on it.
- */
- rio_spin_lock(&PortP->portSem);
-
- /*
- ** If we can't add anything to the transmit queue, then
- ** we need do none of this processing.
- */
- if ( !can_add_transmit( &PacketP, PortP ) ) {
- rio_dprintk (RIO_DEBUG_INTR, "Can't add to port, so skipping.\n");
- rio_spin_unlock(&PortP->portSem);
- continue;
- }
-
- /*
- ** find corresponding tty structure. The process of mapping
- ** the ports puts these here.
- */
- ttyP = PortP->gs.tty;
- /* If ttyP is NULL, the port is getting closed. Forget about it. */
- if (!ttyP) {
- rio_dprintk (RIO_DEBUG_INTR, "no tty, so skipping.\n");
- rio_spin_unlock(&PortP->portSem);
- continue;
- }
- /*
- ** If there is more room available we start up the transmit
- ** data process again. This can be direct I/O, if the cookmode
- ** is set to COOK_RAW or COOK_MEDIUM, or will be a call to the
- ** riotproc( T_OUTPUT ) if we are in COOK_WELL mode, to fetch
- ** characters via the line discipline. We must always call
- ** the line discipline,
- ** so that user input characters can be echoed correctly.
- **
- ** ++++ Update +++++
- ** With the advent of double buffering, we now see if
- ** TxBufferOut-In is non-zero. If so, then we copy a packet
- ** to the output place, and set it going. If this empties
- ** the buffer, then we must issue a wakeup( ) on OUT.
- ** If it frees space in the buffer then we must issue
- ** a wakeup( ) on IN.
- **
- ** ++++ Extra! Extra! If PortP->WflushFlag is set, then we
- ** have to send a WFLUSH command down the PHB, to mark the
- ** end point of a WFLUSH. We also need to clear out any
- ** data from the double buffer! ( note that WflushFlag is a
- ** *count* of the number of WFLUSH commands outstanding! )
- **
- ** ++++ And there's more!
- ** If an RTA is powered off, then on again, and rebooted,
- ** whilst it has ports open, then we need to re-open the ports.
- ** ( reasonable enough ). We can't do this when we spot the
- ** re-boot, in interrupt time, because the queue is probably
- ** full. So, when we come in here, we need to test if any
- ** ports are in this condition, and re-open the port before
- ** we try to send any more data to it. Now, the re-booted
- ** RTA will be discarding packets from the PHB until it
- ** receives this open packet, but don't worry tooo much
- ** about that. The one thing that is interesting is the
- ** combination of this effect and the WFLUSH effect!
- */
- /* For now don't handle RTA reboots. -- REW.
- Reenabled. Otherwise RTA reboots didn't work. Duh. -- REW */
- if ( PortP->MagicFlags ) {
-#if 1
- if ( PortP->MagicFlags & MAGIC_REBOOT ) {
- /*
- ** well, the RTA has been rebooted, and there is room
- ** on its queue to add the open packet that is required.
- **
- ** The messy part of this line is trying to decide if
- ** we need to call the Param function as a tty or as
- ** a modem.
- ** DONT USE CLOCAL AS A TEST FOR THIS!
- **
- ** If we can't param the port, then move on to the
- ** next port.
- */
- PortP->InUse = NOT_INUSE;
-
- rio_spin_unlock(&PortP->portSem);
- if ( RIOParam(PortP, OPEN, ((PortP->Cor2Copy &
- (COR2_RTSFLOW|COR2_CTSFLOW ) )==
- (COR2_RTSFLOW|COR2_CTSFLOW ) ) ?
- TRUE : FALSE, DONT_SLEEP ) == RIO_FAIL ) {
- continue; /* with next port */
- }
- rio_spin_lock(&PortP->portSem);
- PortP->MagicFlags &= ~MAGIC_REBOOT;
+ ** MAGIC! ( Basically, handshake the RX buffer, so that
+ ** the RTAs upstream can be re-enabled. )
+ */
+ rio_dprintk(RIO_DEBUG_INTR, "Set RX handshake bit\n");
+ WWORD(PortP->PhbP->handshake, PHB_HANDSHAKE_SET | PHB_HANDSHAKE_RESET);
+ }
+ rio_spin_unlock(&PortP->portSem);
+ }
}
+
+ if (RWORD(HostP->ParmMapP->tx_intr)) {
+ int port;
+
+ WWORD(HostP->ParmMapP->tx_intr, 0);
+
+ p->RIOTxCount++;
+ TxIntr++;
+ rio_dprintk(RIO_DEBUG_INTR, "rio: TX interrupt on host %d\n", HostP - p->RIOHosts);
+
+ /*
+ ** Loop through every port.
+ ** If the port is mapped into the system ( i.e. has /dev/ttyXXXX
+ ** associated ) then it is worth checking.
+ */
+ for (port = p->RIOFirstPortsBooted; port < p->RIOLastPortsBooted + PORTS_PER_RTA; port++) {
+ struct Port *PortP = p->RIOPortp[port];
+ struct tty_struct *ttyP;
+ struct PKT *PacketP;
+
+ /*
+ ** not mapped in - most of the RIOPortp[] information
+ ** has not been set up!
+ */
+ if (!PortP->Mapped) {
+ port += 7;
+ continue; /* with the next port */
+ }
+
+ /*
+ ** If the host board isn't running, then its data structures
+ ** are no use to us - continue quietly.
+ */
+ if (PortP->HostP != HostP) {
+ port += 7;
+ continue; /* with the next port */
+ }
+
+ /*
+ ** Let us see - is the port open? If not, then don't service it.
+ */
+ if (!(PortP->PortState & PORT_ISOPEN)) {
+ continue;
+ }
+
+ rio_dprintk(RIO_DEBUG_INTR, "rio: Looking into port %d.\n", port);
+ /*
+ ** Lock the port before we begin working on it.
+ */
+ rio_spin_lock(&PortP->portSem);
+
+ /*
+ ** If we can't add anything to the transmit queue, then
+ ** we need do none of this processing.
+ */
+ if (!can_add_transmit(&PacketP, PortP)) {
+ rio_dprintk(RIO_DEBUG_INTR, "Can't add to port, so skipping.\n");
+ rio_spin_unlock(&PortP->portSem);
+ continue;
+ }
+
+ /*
+ ** find corresponding tty structure. The process of mapping
+ ** the ports puts these here.
+ */
+ ttyP = PortP->gs.tty;
+ /* If ttyP is NULL, the port is getting closed. Forget about it. */
+ if (!ttyP) {
+ rio_dprintk(RIO_DEBUG_INTR, "no tty, so skipping.\n");
+ rio_spin_unlock(&PortP->portSem);
+ continue;
+ }
+ /*
+ ** If there is more room available we start up the transmit
+ ** data process again. This can be direct I/O, if the cookmode
+ ** is set to COOK_RAW or COOK_MEDIUM, or will be a call to the
+ ** riotproc( T_OUTPUT ) if we are in COOK_WELL mode, to fetch
+ ** characters via the line discipline. We must always call
+ ** the line discipline,
+ ** so that user input characters can be echoed correctly.
+ **
+ ** ++++ Update +++++
+ ** With the advent of double buffering, we now see if
+ ** TxBufferOut-In is non-zero. If so, then we copy a packet
+ ** to the output place, and set it going. If this empties
+ ** the buffer, then we must issue a wakeup( ) on OUT.
+ ** If it frees space in the buffer then we must issue
+ ** a wakeup( ) on IN.
+ **
+ ** ++++ Extra! Extra! If PortP->WflushFlag is set, then we
+ ** have to send a WFLUSH command down the PHB, to mark the
+ ** end point of a WFLUSH. We also need to clear out any
+ ** data from the double buffer! ( note that WflushFlag is a
+ ** *count* of the number of WFLUSH commands outstanding! )
+ **
+ ** ++++ And there's more!
+ ** If an RTA is powered off, then on again, and rebooted,
+ ** whilst it has ports open, then we need to re-open the ports.
+ ** ( reasonable enough ). We can't do this when we spot the
+ ** re-boot, in interrupt time, because the queue is probably
+ ** full. So, when we come in here, we need to test if any
+ ** ports are in this condition, and re-open the port before
+ ** we try to send any more data to it. Now, the re-booted
+ ** RTA will be discarding packets from the PHB until it
+ ** receives this open packet, but don't worry tooo much
+ ** about that. The one thing that is interesting is the
+ ** combination of this effect and the WFLUSH effect!
+ */
+ /* For now don't handle RTA reboots. -- REW.
+ Reenabled. Otherwise RTA reboots didn't work. Duh. -- REW */
+ if (PortP->MagicFlags) {
+#if 1
+ if (PortP->MagicFlags & MAGIC_REBOOT) {
+ /*
+ ** well, the RTA has been rebooted, and there is room
+ ** on its queue to add the open packet that is required.
+ **
+ ** The messy part of this line is trying to decide if
+ ** we need to call the Param function as a tty or as
+ ** a modem.
+ ** DONT USE CLOCAL AS A TEST FOR THIS!
+ **
+ ** If we can't param the port, then move on to the
+ ** next port.
+ */
+ PortP->InUse = NOT_INUSE;
+
+ rio_spin_unlock(&PortP->portSem);
+ if (RIOParam(PortP, OPEN, ((PortP->Cor2Copy & (COR2_RTSFLOW | COR2_CTSFLOW)) == (COR2_RTSFLOW | COR2_CTSFLOW)) ? TRUE : FALSE, DONT_SLEEP) == RIO_FAIL) {
+ continue; /* with next port */
+ }
+ rio_spin_lock(&PortP->portSem);
+ PortP->MagicFlags &= ~MAGIC_REBOOT;
+ }
#endif
- /*
- ** As mentioned above, this is a tacky hack to cope
- ** with WFLUSH
- */
- if ( PortP->WflushFlag ) {
- rio_dprintk (RIO_DEBUG_INTR, "Want to WFLUSH mark this port\n");
-
- if ( PortP->InUse )
- rio_dprintk (RIO_DEBUG_INTR, "FAILS - PORT IS IN USE\n");
- }
-
- while ( PortP->WflushFlag &&
- can_add_transmit( &PacketP, PortP ) &&
- ( PortP->InUse == NOT_INUSE ) ) {
- int p;
- struct PktCmd *PktCmdP;
-
- rio_dprintk (RIO_DEBUG_INTR, "Add WFLUSH marker to data queue\n");
- /*
- ** make it look just like a WFLUSH command
- */
- PktCmdP = ( struct PktCmd * )&PacketP->data[0];
-
- WBYTE( PktCmdP->Command , WFLUSH );
-
- p = PortP->HostPort % ( ushort )PORTS_PER_RTA;
-
- /*
- ** If second block of ports for 16 port RTA, add 8
- ** to index 8-15.
- */
- if ( PortP->SecondBlock )
- p += PORTS_PER_RTA;
-
- WBYTE( PktCmdP->PhbNum, p );
-
- /*
- ** to make debuggery easier
- */
- WBYTE( PacketP->data[ 2], 'W' );
- WBYTE( PacketP->data[ 3], 'F' );
- WBYTE( PacketP->data[ 4], 'L' );
- WBYTE( PacketP->data[ 5], 'U' );
- WBYTE( PacketP->data[ 6], 'S' );
- WBYTE( PacketP->data[ 7], 'H' );
- WBYTE( PacketP->data[ 8], ' ' );
- WBYTE( PacketP->data[ 9], '0'+PortP->WflushFlag );
- WBYTE( PacketP->data[10], ' ' );
- WBYTE( PacketP->data[11], ' ' );
- WBYTE( PacketP->data[12], '\0' );
-
- /*
- ** its two bytes long!
- */
- WBYTE( PacketP->len , PKT_CMD_BIT | 2 );
-
- /*
- ** queue it!
- */
- if ( !( PortP->State & RIO_DELETED ) ) {
- add_transmit( PortP );
- /*
- ** Count chars tx'd for port statistics reporting
- */
- if ( PortP->statsGather )
- PortP->txchars += 2;
- }
-
- if ( --( PortP->WflushFlag ) == 0 ) {
- PortP->MagicFlags &= ~MAGIC_FLUSH;
- }
-
- rio_dprintk (RIO_DEBUG_INTR, "Wflush count now stands at %d\n",
- PortP->WflushFlag);
- }
- if ( PortP->MagicFlags & MORE_OUTPUT_EYGOR ) {
- if ( PortP->MagicFlags & MAGIC_FLUSH ) {
- PortP->MagicFlags |= MORE_OUTPUT_EYGOR;
- }
- else {
- if ( !can_add_transmit( &PacketP, PortP ) ) {
- rio_spin_unlock(&PortP->portSem);
- continue;
- }
- rio_spin_unlock(&PortP->portSem);
- RIOTxEnable((char *)PortP);
- rio_spin_lock(&PortP->portSem);
- PortP->MagicFlags &= ~MORE_OUTPUT_EYGOR;
- }
+ /*
+ ** As mentioned above, this is a tacky hack to cope
+ ** with WFLUSH
+ */
+ if (PortP->WflushFlag) {
+ rio_dprintk(RIO_DEBUG_INTR, "Want to WFLUSH mark this port\n");
+
+ if (PortP->InUse)
+ rio_dprintk(RIO_DEBUG_INTR, "FAILS - PORT IS IN USE\n");
+ }
+
+ while (PortP->WflushFlag && can_add_transmit(&PacketP, PortP) && (PortP->InUse == NOT_INUSE)) {
+ int p;
+ struct PktCmd *PktCmdP;
+
+ rio_dprintk(RIO_DEBUG_INTR, "Add WFLUSH marker to data queue\n");
+ /*
+ ** make it look just like a WFLUSH command
+ */
+ PktCmdP = (struct PktCmd *) &PacketP->data[0];
+
+ WBYTE(PktCmdP->Command, WFLUSH);
+
+ p = PortP->HostPort % (ushort) PORTS_PER_RTA;
+
+ /*
+ ** If second block of ports for 16 port RTA, add 8
+ ** to index 8-15.
+ */
+ if (PortP->SecondBlock)
+ p += PORTS_PER_RTA;
+
+ WBYTE(PktCmdP->PhbNum, p);
+
+ /*
+ ** to make debuggery easier
+ */
+ WBYTE(PacketP->data[2], 'W');
+ WBYTE(PacketP->data[3], 'F');
+ WBYTE(PacketP->data[4], 'L');
+ WBYTE(PacketP->data[5], 'U');
+ WBYTE(PacketP->data[6], 'S');
+ WBYTE(PacketP->data[7], 'H');
+ WBYTE(PacketP->data[8], ' ');
+ WBYTE(PacketP->data[9], '0' + PortP->WflushFlag);
+ WBYTE(PacketP->data[10], ' ');
+ WBYTE(PacketP->data[11], ' ');
+ WBYTE(PacketP->data[12], '\0');
+
+ /*
+ ** its two bytes long!
+ */
+ WBYTE(PacketP->len, PKT_CMD_BIT | 2);
+
+ /*
+ ** queue it!
+ */
+ if (!(PortP->State & RIO_DELETED)) {
+ add_transmit(PortP);
+ /*
+ ** Count chars tx'd for port statistics reporting
+ */
+ if (PortP->statsGather)
+ PortP->txchars += 2;
+ }
+
+ if (--(PortP->WflushFlag) == 0) {
+ PortP->MagicFlags &= ~MAGIC_FLUSH;
+ }
+
+ rio_dprintk(RIO_DEBUG_INTR, "Wflush count now stands at %d\n", PortP->WflushFlag);
+ }
+ if (PortP->MagicFlags & MORE_OUTPUT_EYGOR) {
+ if (PortP->MagicFlags & MAGIC_FLUSH) {
+ PortP->MagicFlags |= MORE_OUTPUT_EYGOR;
+ } else {
+ if (!can_add_transmit(&PacketP, PortP)) {
+ rio_spin_unlock(&PortP->portSem);
+ continue;
+ }
+ rio_spin_unlock(&PortP->portSem);
+ RIOTxEnable((char *) PortP);
+ rio_spin_lock(&PortP->portSem);
+ PortP->MagicFlags &= ~MORE_OUTPUT_EYGOR;
+ }
+ }
+ }
+
+
+ /*
+ ** If we can't add anything to the transmit queue, then
+ ** we need do none of the remaining processing.
+ */
+ if (!can_add_transmit(&PacketP, PortP)) {
+ rio_spin_unlock(&PortP->portSem);
+ continue;
+ }
+
+ rio_spin_unlock(&PortP->portSem);
+ RIOTxEnable((char *) PortP);
+ }
}
- }
-
-
- /*
- ** If we can't add anything to the transmit queue, then
- ** we need do none of the remaining processing.
- */
- if (!can_add_transmit( &PacketP, PortP ) ) {
- rio_spin_unlock(&PortP->portSem);
- continue;
- }
-
- rio_spin_unlock(&PortP->portSem);
- RIOTxEnable((char *)PortP);
- }
- }
}
/*
@@ -550,180 +533,162 @@ int From;
** NB: Called with the tty locked. The spl from the lockb( ) is passed.
** we return the ttySpl level that we re-locked at.
*/
-static void
-RIOReceive(p, PortP)
-struct rio_info * p;
-struct Port * PortP;
+static void RIOReceive(p, PortP)
+struct rio_info *p;
+struct Port *PortP;
{
- struct tty_struct *TtyP;
- register ushort transCount;
- struct PKT *PacketP;
- register uint DataCnt;
- uchar * ptr;
- int copied =0;
-
- static int intCount, RxIntCnt;
-
- /*
- ** The receive data process is to remove packets from the
- ** PHB until there aren't any more or the current cblock
- ** is full. When this occurs, there will be some left over
- ** data in the packet, that we must do something with.
- ** As we haven't unhooked the packet from the read list
- ** yet, we can just leave the packet there, having first
- ** made a note of how far we got. This means that we need
- ** a pointer per port saying where we start taking the
- ** data from - this will normally be zero, but when we
- ** run out of space it will be set to the offset of the
- ** next byte to copy from the packet data area. The packet
- ** length field is decremented by the number of bytes that
- ** we succesfully removed from the packet. When this reaches
- ** zero, we reset the offset pointer to be zero, and free
- ** the packet from the front of the queue.
- */
-
- intCount++;
-
- TtyP = PortP->gs.tty;
- if (!TtyP) {
- rio_dprintk (RIO_DEBUG_INTR, "RIOReceive: tty is null. \n");
- return;
- }
-
- if (PortP->State & RIO_THROTTLE_RX) {
- rio_dprintk (RIO_DEBUG_INTR, "RIOReceive: Throttled. Can't handle more input.\n");
- return;
- }
-
- if ( PortP->State & RIO_DELETED )
- {
- while ( can_remove_receive( &PacketP, PortP ) )
- {
- remove_receive( PortP );
- put_free_end( PortP->HostP, PacketP );
+ struct tty_struct *TtyP;
+ register ushort transCount;
+ struct PKT *PacketP;
+ register uint DataCnt;
+ uchar *ptr;
+ unsigned char *buf;
+ int copied = 0;
+
+ static int intCount, RxIntCnt;
+
+ /*
+ ** The receive data process is to remove packets from the
+ ** PHB until there aren't any more or the current cblock
+ ** is full. When this occurs, there will be some left over
+ ** data in the packet, that we must do something with.
+ ** As we haven't unhooked the packet from the read list
+ ** yet, we can just leave the packet there, having first
+ ** made a note of how far we got. This means that we need
+ ** a pointer per port saying where we start taking the
+ ** data from - this will normally be zero, but when we
+ ** run out of space it will be set to the offset of the
+ ** next byte to copy from the packet data area. The packet
+ ** length field is decremented by the number of bytes that
+ ** we succesfully removed from the packet. When this reaches
+ ** zero, we reset the offset pointer to be zero, and free
+ ** the packet from the front of the queue.
+ */
+
+ intCount++;
+
+ TtyP = PortP->gs.tty;
+ if (!TtyP) {
+ rio_dprintk(RIO_DEBUG_INTR, "RIOReceive: tty is null. \n");
+ return;
}
- }
- else
- {
- /*
- ** loop, just so long as:
- ** i ) there's some data ( i.e. can_remove_receive )
- ** ii ) we haven't been blocked
- ** iii ) there's somewhere to put the data
- ** iv ) we haven't outstayed our welcome
- */
- transCount = 1;
- while ( can_remove_receive(&PacketP, PortP)
- && transCount)
- {
+
+ if (PortP->State & RIO_THROTTLE_RX) {
+ rio_dprintk(RIO_DEBUG_INTR, "RIOReceive: Throttled. Can't handle more input.\n");
+ return;
+ }
+
+ if (PortP->State & RIO_DELETED) {
+ while (can_remove_receive(&PacketP, PortP)) {
+ remove_receive(PortP);
+ put_free_end(PortP->HostP, PacketP);
+ }
+ } else {
+ /*
+ ** loop, just so long as:
+ ** i ) there's some data ( i.e. can_remove_receive )
+ ** ii ) we haven't been blocked
+ ** iii ) there's somewhere to put the data
+ ** iv ) we haven't outstayed our welcome
+ */
+ transCount = 1;
+ while (can_remove_receive(&PacketP, PortP)
+ && transCount) {
#ifdef STATS
- PortP->Stat.RxIntCnt++;
-#endif /* STATS */
- RxIntCnt++;
-
- /*
- ** check that it is not a command!
- */
- if ( PacketP->len & PKT_CMD_BIT ) {
- rio_dprintk (RIO_DEBUG_INTR, "RIO: unexpected command packet received on PHB\n");
- /* rio_dprint(RIO_DEBUG_INTR, (" sysport = %d\n", p->RIOPortp->PortNum)); */
- rio_dprintk (RIO_DEBUG_INTR, " dest_unit = %d\n", PacketP->dest_unit);
- rio_dprintk (RIO_DEBUG_INTR, " dest_port = %d\n", PacketP->dest_port);
- rio_dprintk (RIO_DEBUG_INTR, " src_unit = %d\n", PacketP->src_unit);
- rio_dprintk (RIO_DEBUG_INTR, " src_port = %d\n", PacketP->src_port);
- rio_dprintk (RIO_DEBUG_INTR, " len = %d\n", PacketP->len);
- rio_dprintk (RIO_DEBUG_INTR, " control = %d\n", PacketP->control);
- rio_dprintk (RIO_DEBUG_INTR, " csum = %d\n", PacketP->csum);
- rio_dprintk (RIO_DEBUG_INTR, " data bytes: ");
- for ( DataCnt=0; DataCnt<PKT_MAX_DATA_LEN; DataCnt++ )
- rio_dprintk (RIO_DEBUG_INTR, "%d\n", PacketP->data[DataCnt]);
- remove_receive( PortP );
- put_free_end( PortP->HostP, PacketP );
- continue; /* with next packet */
- }
-
- /*
- ** How many characters can we move 'upstream' ?
- **
- ** Determine the minimum of the amount of data
- ** available and the amount of space in which to
- ** put it.
- **
- ** 1. Get the packet length by masking 'len'
- ** for only the length bits.
- ** 2. Available space is [buffer size] - [space used]
- **
- ** Transfer count is the minimum of packet length
- ** and available space.
- */
-
- transCount = min_t(unsigned int, PacketP->len & PKT_LEN_MASK,
- TTY_FLIPBUF_SIZE - TtyP->flip.count);
- rio_dprintk (RIO_DEBUG_REC, "port %d: Copy %d bytes\n",
- PortP->PortNum, transCount);
- /*
- ** To use the following 'kkprintfs' for debugging - change the '#undef'
- ** to '#define', (this is the only place ___DEBUG_IT___ occurs in the
- ** driver).
- */
+ PortP->Stat.RxIntCnt++;
+#endif /* STATS */
+ RxIntCnt++;
+
+ /*
+ ** check that it is not a command!
+ */
+ if (PacketP->len & PKT_CMD_BIT) {
+ rio_dprintk(RIO_DEBUG_INTR, "RIO: unexpected command packet received on PHB\n");
+ /* rio_dprint(RIO_DEBUG_INTR, (" sysport = %d\n", p->RIOPortp->PortNum)); */
+ rio_dprintk(RIO_DEBUG_INTR, " dest_unit = %d\n", PacketP->dest_unit);
+ rio_dprintk(RIO_DEBUG_INTR, " dest_port = %d\n", PacketP->dest_port);
+ rio_dprintk(RIO_DEBUG_INTR, " src_unit = %d\n", PacketP->src_unit);
+ rio_dprintk(RIO_DEBUG_INTR, " src_port = %d\n", PacketP->src_port);
+ rio_dprintk(RIO_DEBUG_INTR, " len = %d\n", PacketP->len);
+ rio_dprintk(RIO_DEBUG_INTR, " control = %d\n", PacketP->control);
+ rio_dprintk(RIO_DEBUG_INTR, " csum = %d\n", PacketP->csum);
+ rio_dprintk(RIO_DEBUG_INTR, " data bytes: ");
+ for (DataCnt = 0; DataCnt < PKT_MAX_DATA_LEN; DataCnt++)
+ rio_dprintk(RIO_DEBUG_INTR, "%d\n", PacketP->data[DataCnt]);
+ remove_receive(PortP);
+ put_free_end(PortP->HostP, PacketP);
+ continue; /* with next packet */
+ }
+
+ /*
+ ** How many characters can we move 'upstream' ?
+ **
+ ** Determine the minimum of the amount of data
+ ** available and the amount of space in which to
+ ** put it.
+ **
+ ** 1. Get the packet length by masking 'len'
+ ** for only the length bits.
+ ** 2. Available space is [buffer size] - [space used]
+ **
+ ** Transfer count is the minimum of packet length
+ ** and available space.
+ */
+
+ transCount = tty_buffer_request_room(TtyP, PacketP->len & PKT_LEN_MASK);
+ rio_dprintk(RIO_DEBUG_REC, "port %d: Copy %d bytes\n", PortP->PortNum, transCount);
+ /*
+ ** To use the following 'kkprintfs' for debugging - change the '#undef'
+ ** to '#define', (this is the only place ___DEBUG_IT___ occurs in the
+ ** driver).
+ */
#undef ___DEBUG_IT___
#ifdef ___DEBUG_IT___
- kkprintf("I:%d R:%d P:%d Q:%d C:%d F:%x ",
- intCount,
- RxIntCnt,
- PortP->PortNum,
- TtyP->rxqueue.count,
- transCount,
- TtyP->flags );
+ kkprintf("I:%d R:%d P:%d Q:%d C:%d F:%x ", intCount, RxIntCnt, PortP->PortNum, TtyP->rxqueue.count, transCount, TtyP->flags);
#endif
- ptr = (uchar *) PacketP->data + PortP->RxDataStart;
-
- rio_memcpy_fromio (TtyP->flip.char_buf_ptr, ptr, transCount);
- memset(TtyP->flip.flag_buf_ptr, TTY_NORMAL, transCount);
+ ptr = (uchar *) PacketP->data + PortP->RxDataStart;
+ tty_prepare_flip_string(TtyP, &buf, transCount);
+ rio_memcpy_fromio(buf, ptr, transCount);
#ifdef STATS
- /*
- ** keep a count for statistical purposes
- */
- PortP->Stat.RxCharCnt += transCount;
+ /*
+ ** keep a count for statistical purposes
+ */
+ PortP->Stat.RxCharCnt += transCount;
#endif
- PortP->RxDataStart += transCount;
- PacketP->len -= transCount;
- copied += transCount;
- TtyP->flip.count += transCount;
- TtyP->flip.char_buf_ptr += transCount;
- TtyP->flip.flag_buf_ptr += transCount;
+ PortP->RxDataStart += transCount;
+ PacketP->len -= transCount;
+ copied += transCount;
#ifdef ___DEBUG_IT___
- kkprintf("T:%d L:%d\n", DataCnt, PacketP->len );
+ kkprintf("T:%d L:%d\n", DataCnt, PacketP->len);
#endif
- if ( PacketP->len == 0 )
- {
+ if (PacketP->len == 0) {
/*
- ** If we have emptied the packet, then we can
- ** free it, and reset the start pointer for
- ** the next packet.
- */
- remove_receive( PortP );
- put_free_end( PortP->HostP, PacketP );
- PortP->RxDataStart = 0;
+ ** If we have emptied the packet, then we can
+ ** free it, and reset the start pointer for
+ ** the next packet.
+ */
+ remove_receive(PortP);
+ put_free_end(PortP->HostP, PacketP);
+ PortP->RxDataStart = 0;
#ifdef STATS
/*
- ** more lies ( oops, I mean statistics )
- */
- PortP->Stat.RxPktCnt++;
-#endif /* STATS */
- }
+ ** more lies ( oops, I mean statistics )
+ */
+ PortP->Stat.RxPktCnt++;
+#endif /* STATS */
+ }
+ }
+ }
+ if (copied) {
+ rio_dprintk(RIO_DEBUG_REC, "port %d: pushing tty flip buffer: %d total bytes copied.\n", PortP->PortNum, copied);
+ tty_flip_buffer_push(TtyP);
}
- }
- if (copied) {
- rio_dprintk (RIO_DEBUG_REC, "port %d: pushing tty flip buffer: %d total bytes copied.\n", PortP->PortNum, copied);
- tty_flip_buffer_push (TtyP);
- }
- return;
+ return;
}
#ifdef FUTURE_RELEASE
@@ -731,221 +696,210 @@ struct Port * PortP;
** The proc routine called by the line discipline to do the work for it.
** The proc routine works hand in hand with the interrupt routine.
*/
-int
-riotproc(p, tp, cmd, port)
-struct rio_info * p;
+int riotproc(p, tp, cmd, port)
+struct rio_info *p;
register struct ttystatics *tp;
int cmd;
-int port;
+int port;
{
register struct Port *PortP;
int SysPort;
struct PKT *PacketP;
- SysPort = port; /* Believe me, it works. */
+ SysPort = port; /* Believe me, it works. */
- if ( SysPort < 0 || SysPort >= RIO_PORTS ) {
- rio_dprintk (RIO_DEBUG_INTR, "Illegal port %d derived from TTY in riotproc()\n",SysPort);
+ if (SysPort < 0 || SysPort >= RIO_PORTS) {
+ rio_dprintk(RIO_DEBUG_INTR, "Illegal port %d derived from TTY in riotproc()\n", SysPort);
return 0;
}
PortP = p->RIOPortp[SysPort];
- if ((uint)PortP->PhbP < (uint)PortP->Caddr ||
- (uint)PortP->PhbP >= (uint)PortP->Caddr+SIXTY_FOUR_K ) {
- rio_dprintk (RIO_DEBUG_INTR, "RIO: NULL or BAD PhbP on sys port %d in proc routine\n",
- SysPort);
- rio_dprintk (RIO_DEBUG_INTR, " PortP = 0x%x\n",PortP);
- rio_dprintk (RIO_DEBUG_INTR, " PortP->PhbP = 0x%x\n",PortP->PhbP);
- rio_dprintk (RIO_DEBUG_INTR, " PortP->Caddr = 0x%x\n",PortP->PhbP);
- rio_dprintk (RIO_DEBUG_INTR, " PortP->HostPort = 0x%x\n",PortP->HostPort);
+ if ((uint) PortP->PhbP < (uint) PortP->Caddr || (uint) PortP->PhbP >= (uint) PortP->Caddr + SIXTY_FOUR_K) {
+ rio_dprintk(RIO_DEBUG_INTR, "RIO: NULL or BAD PhbP on sys port %d in proc routine\n", SysPort);
+ rio_dprintk(RIO_DEBUG_INTR, " PortP = 0x%x\n", PortP);
+ rio_dprintk(RIO_DEBUG_INTR, " PortP->PhbP = 0x%x\n", PortP->PhbP);
+ rio_dprintk(RIO_DEBUG_INTR, " PortP->Caddr = 0x%x\n", PortP->PhbP);
+ rio_dprintk(RIO_DEBUG_INTR, " PortP->HostPort = 0x%x\n", PortP->HostPort);
return 0;
}
- switch(cmd) {
- case T_WFLUSH:
- rio_dprintk (RIO_DEBUG_INTR, "T_WFLUSH\n");
+ switch (cmd) {
+ case T_WFLUSH:
+ rio_dprintk(RIO_DEBUG_INTR, "T_WFLUSH\n");
+ /*
+ ** Because of the spooky way the RIO works, we don't need
+ ** to issue a flush command on any of the SET*F commands,
+ ** as that causes trouble with getty and login, which issue
+ ** these commands to incur a READ flush, and rely on the fact
+ ** that the line discipline does a wait for drain for them.
+ ** As the rio doesn't wait for drain, the write flush would
+ ** destroy the Password: prompt. This isn't very friendly, so
+ ** here we only issue a WFLUSH command if we are in the interrupt
+ ** routine, or we aren't executing a SET*F command.
+ */
+ if (PortP->HostP->InIntr || !PortP->FlushCmdBodge) {
/*
- ** Because of the spooky way the RIO works, we don't need
- ** to issue a flush command on any of the SET*F commands,
- ** as that causes trouble with getty and login, which issue
- ** these commands to incur a READ flush, and rely on the fact
- ** that the line discipline does a wait for drain for them.
- ** As the rio doesn't wait for drain, the write flush would
- ** destroy the Password: prompt. This isn't very friendly, so
- ** here we only issue a WFLUSH command if we are in the interrupt
- ** routine, or we aren't executing a SET*F command.
- */
- if ( PortP->HostP->InIntr || !PortP->FlushCmdBodge ) {
- /*
- ** form a wflush packet - 1 byte long, no data
- */
- if ( PortP->State & RIO_DELETED ) {
- rio_dprintk (RIO_DEBUG_INTR, "WFLUSH on deleted RTA\n");
- }
- else {
- if ( RIOPreemptiveCmd(p, PortP, WFLUSH ) == RIO_FAIL ) {
- rio_dprintk (RIO_DEBUG_INTR, "T_WFLUSH Command failed\n");
- }
- else
- rio_dprintk (RIO_DEBUG_INTR, "T_WFLUSH Command\n");
- }
- /*
- ** WFLUSH operation - flush the data!
- */
- PortP->TxBufferIn = PortP->TxBufferOut = 0;
- }
- else {
- rio_dprintk (RIO_DEBUG_INTR, "T_WFLUSH Command ignored\n");
+ ** form a wflush packet - 1 byte long, no data
+ */
+ if (PortP->State & RIO_DELETED) {
+ rio_dprintk(RIO_DEBUG_INTR, "WFLUSH on deleted RTA\n");
+ } else {
+ if (RIOPreemptiveCmd(p, PortP, WFLUSH) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_INTR, "T_WFLUSH Command failed\n");
+ } else
+ rio_dprintk(RIO_DEBUG_INTR, "T_WFLUSH Command\n");
}
/*
- ** sort out the line discipline
- */
- if (PortP->CookMode == COOK_WELL)
- goto start;
- break;
-
- case T_RESUME:
- rio_dprintk (RIO_DEBUG_INTR, "T_RESUME\n");
- /*
- ** send pre-emptive resume packet
- */
- if ( PortP->State & RIO_DELETED ) {
- rio_dprintk (RIO_DEBUG_INTR, "RESUME on deleted RTA\n");
+ ** WFLUSH operation - flush the data!
+ */
+ PortP->TxBufferIn = PortP->TxBufferOut = 0;
+ } else {
+ rio_dprintk(RIO_DEBUG_INTR, "T_WFLUSH Command ignored\n");
+ }
+ /*
+ ** sort out the line discipline
+ */
+ if (PortP->CookMode == COOK_WELL)
+ goto start;
+ break;
+
+ case T_RESUME:
+ rio_dprintk(RIO_DEBUG_INTR, "T_RESUME\n");
+ /*
+ ** send pre-emptive resume packet
+ */
+ if (PortP->State & RIO_DELETED) {
+ rio_dprintk(RIO_DEBUG_INTR, "RESUME on deleted RTA\n");
+ } else {
+ if (RIOPreemptiveCmd(p, PortP, RESUME) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_INTR, "T_RESUME Command failed\n");
}
- else {
- if ( RIOPreemptiveCmd(p, PortP, RESUME ) == RIO_FAIL ) {
- rio_dprintk (RIO_DEBUG_INTR, "T_RESUME Command failed\n");
- }
+ }
+ /*
+ ** and re-start the sender software!
+ */
+ if (PortP->CookMode == COOK_WELL)
+ goto start;
+ break;
+
+ case T_TIME:
+ rio_dprintk(RIO_DEBUG_INTR, "T_TIME\n");
+ /*
+ ** T_TIME is called when xDLY is set in oflags and
+ ** the line discipline timeout has expired. It's
+ ** function in life is to clear the TIMEOUT flag
+ ** and to re-start output to the port.
+ */
+ /*
+ ** Fall through and re-start output
+ */
+ case T_OUTPUT:
+ start:
+ if (PortP->MagicFlags & MAGIC_FLUSH) {
+ PortP->MagicFlags |= MORE_OUTPUT_EYGOR;
+ return 0;
+ }
+ RIOTxEnable((char *) PortP);
+ PortP->MagicFlags &= ~MORE_OUTPUT_EYGOR;
+ /*rio_dprint(RIO_DEBUG_INTR, PortP,DBG_PROC,"T_OUTPUT finished\n"); */
+ break;
+
+ case T_SUSPEND:
+ rio_dprintk(RIO_DEBUG_INTR, "T_SUSPEND\n");
+ /*
+ ** send a suspend pre-emptive packet.
+ */
+ if (PortP->State & RIO_DELETED) {
+ rio_dprintk(RIO_DEBUG_INTR, "SUSPEND deleted RTA\n");
+ } else {
+ if (RIOPreemptiveCmd(p, PortP, SUSPEND) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_INTR, "T_SUSPEND Command failed\n");
}
- /*
- ** and re-start the sender software!
- */
- if (PortP->CookMode == COOK_WELL)
- goto start;
- break;
-
- case T_TIME:
- rio_dprintk (RIO_DEBUG_INTR, "T_TIME\n");
- /*
- ** T_TIME is called when xDLY is set in oflags and
- ** the line discipline timeout has expired. It's
- ** function in life is to clear the TIMEOUT flag
- ** and to re-start output to the port.
- */
- /*
- ** Fall through and re-start output
- */
- case T_OUTPUT:
-start:
- if ( PortP->MagicFlags & MAGIC_FLUSH ) {
- PortP->MagicFlags |= MORE_OUTPUT_EYGOR;
+ }
+ /*
+ ** done!
+ */
+ break;
+
+ case T_BLOCK:
+ rio_dprintk(RIO_DEBUG_INTR, "T_BLOCK\n");
+ break;
+
+ case T_RFLUSH:
+ rio_dprintk(RIO_DEBUG_INTR, "T_RFLUSH\n");
+ if (PortP->State & RIO_DELETED) {
+ rio_dprintk(RIO_DEBUG_INTR, "RFLUSH on deleted RTA\n");
+ PortP->RxDataStart = 0;
+ } else {
+ if (RIOPreemptiveCmd(p, PortP, RFLUSH) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_INTR, "T_RFLUSH Command failed\n");
return 0;
}
- RIOTxEnable((char *)PortP);
- PortP->MagicFlags &= ~MORE_OUTPUT_EYGOR;
- /*rio_dprint(RIO_DEBUG_INTR, PortP,DBG_PROC,"T_OUTPUT finished\n");*/
- break;
-
- case T_SUSPEND:
- rio_dprintk (RIO_DEBUG_INTR, "T_SUSPEND\n");
- /*
- ** send a suspend pre-emptive packet.
- */
- if ( PortP->State & RIO_DELETED ) {
- rio_dprintk (RIO_DEBUG_INTR, "SUSPEND deleted RTA\n");
- }
- else {
- if ( RIOPreemptiveCmd(p, PortP, SUSPEND ) == RIO_FAIL ) {
- rio_dprintk (RIO_DEBUG_INTR, "T_SUSPEND Command failed\n");
- }
+ PortP->RxDataStart = 0;
+ while (can_remove_receive(&PacketP, PortP)) {
+ remove_receive(PortP);
+ ShowPacket(DBG_PROC, PacketP);
+ put_free_end(PortP->HostP, PacketP);
}
- /*
- ** done!
- */
- break;
-
- case T_BLOCK:
- rio_dprintk (RIO_DEBUG_INTR, "T_BLOCK\n");
- break;
-
- case T_RFLUSH:
- rio_dprintk (RIO_DEBUG_INTR, "T_RFLUSH\n");
- if ( PortP->State & RIO_DELETED ) {
- rio_dprintk (RIO_DEBUG_INTR, "RFLUSH on deleted RTA\n");
- PortP->RxDataStart = 0;
- }
- else {
- if ( RIOPreemptiveCmd( p, PortP, RFLUSH ) == RIO_FAIL ) {
- rio_dprintk (RIO_DEBUG_INTR, "T_RFLUSH Command failed\n");
- return 0;
- }
- PortP->RxDataStart = 0;
- while ( can_remove_receive(&PacketP, PortP) ) {
- remove_receive(PortP);
- ShowPacket(DBG_PROC, PacketP );
- put_free_end(PortP->HostP, PacketP );
- }
- if ( PortP->PhbP->handshake == PHB_HANDSHAKE_SET ) {
- /*
- ** MAGIC!
- */
- rio_dprintk (RIO_DEBUG_INTR, "Set receive handshake bit\n");
- PortP->PhbP->handshake |= PHB_HANDSHAKE_RESET;
- }
- }
- break;
- /* FALLTHROUGH */
- case T_UNBLOCK:
- rio_dprintk (RIO_DEBUG_INTR, "T_UNBLOCK\n");
- /*
- ** If there is any data to receive set a timeout to service it.
- */
- RIOReceive(p, PortP);
- break;
-
- case T_BREAK:
- rio_dprintk (RIO_DEBUG_INTR, "T_BREAK\n");
- /*
- ** Send a break command. For Sys V
- ** this is a timed break, so we
- ** send a SBREAK[time] packet
- */
- /*
- ** Build a BREAK command
- */
- if ( PortP->State & RIO_DELETED ) {
- rio_dprintk (RIO_DEBUG_INTR, "BREAK on deleted RTA\n");
+ if (PortP->PhbP->handshake == PHB_HANDSHAKE_SET) {
+ /*
+ ** MAGIC!
+ */
+ rio_dprintk(RIO_DEBUG_INTR, "Set receive handshake bit\n");
+ PortP->PhbP->handshake |= PHB_HANDSHAKE_RESET;
}
- else {
- if (RIOShortCommand(PortP,SBREAK,2,
- p->RIOConf.BreakInterval)==RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
- }
+ }
+ break;
+ /* FALLTHROUGH */
+ case T_UNBLOCK:
+ rio_dprintk(RIO_DEBUG_INTR, "T_UNBLOCK\n");
+ /*
+ ** If there is any data to receive set a timeout to service it.
+ */
+ RIOReceive(p, PortP);
+ break;
+
+ case T_BREAK:
+ rio_dprintk(RIO_DEBUG_INTR, "T_BREAK\n");
+ /*
+ ** Send a break command. For Sys V
+ ** this is a timed break, so we
+ ** send a SBREAK[time] packet
+ */
+ /*
+ ** Build a BREAK command
+ */
+ if (PortP->State & RIO_DELETED) {
+ rio_dprintk(RIO_DEBUG_INTR, "BREAK on deleted RTA\n");
+ } else {
+ if (RIOShortCommand(PortP, SBREAK, 2, p->RIOConf.BreakInterval) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
}
-
- /*
- ** done!
- */
- break;
-
- case T_INPUT:
- rio_dprintk (RIO_DEBUG_INTR, "Proc T_INPUT called - I don't know what to do!\n");
- break;
- case T_PARM:
- rio_dprintk (RIO_DEBUG_INTR, "Proc T_PARM called - I don't know what to do!\n");
- break;
-
- case T_SWTCH:
- rio_dprintk (RIO_DEBUG_INTR, "Proc T_SWTCH called - I don't know what to do!\n");
- break;
-
- default:
- rio_dprintk (RIO_DEBUG_INTR, "Proc UNKNOWN command %d\n",cmd);
+ }
+
+ /*
+ ** done!
+ */
+ break;
+
+ case T_INPUT:
+ rio_dprintk(RIO_DEBUG_INTR, "Proc T_INPUT called - I don't know what to do!\n");
+ break;
+ case T_PARM:
+ rio_dprintk(RIO_DEBUG_INTR, "Proc T_PARM called - I don't know what to do!\n");
+ break;
+
+ case T_SWTCH:
+ rio_dprintk(RIO_DEBUG_INTR, "Proc T_SWTCH called - I don't know what to do!\n");
+ break;
+
+ default:
+ rio_dprintk(RIO_DEBUG_INTR, "Proc UNKNOWN command %d\n", cmd);
}
/*
- ** T_OUTPUT returns without passing through this point!
- */
- /*rio_dprint(RIO_DEBUG_INTR, PortP,DBG_PROC,"riotproc done\n");*/
- return(0);
+ ** T_OUTPUT returns without passing through this point!
+ */
+ /*rio_dprint(RIO_DEBUG_INTR, PortP,DBG_PROC,"riotproc done\n"); */
+ return (0);
}
#endif
diff --git a/drivers/char/rio/rioioctl.h b/drivers/char/rio/rioioctl.h
index c3d679733c9..14b83fae75c 100644
--- a/drivers/char/rio/rioioctl.h
+++ b/drivers/char/rio/rioioctl.h
@@ -42,14 +42,14 @@ static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2";
*/
struct portStats {
- int port;
- int gather;
- ulong txchars;
- ulong rxchars;
- ulong opens;
- ulong closes;
- ulong ioctls;
-};
+ int port;
+ int gather;
+ ulong txchars;
+ ulong rxchars;
+ ulong opens;
+ ulong closes;
+ ulong ioctls;
+};
#define rIOC ('r'<<8)
@@ -100,4 +100,4 @@ struct portStats {
#define RIO_RESET_PORT_STATS (RIOC | 194)
#define RIO_GET_PORT_STATS (RIOC | 195)
-#endif /* __rioioctl_h__ */
+#endif /* __rioioctl_h__ */
diff --git a/drivers/char/rio/rioparam.c b/drivers/char/rio/rioparam.c
index f10916326ec..4cc7f4942bf 100644
--- a/drivers/char/rio/rioparam.c
+++ b/drivers/char/rio/rioparam.c
@@ -157,46 +157,44 @@ static char *_rioparam_c_sccs_ = "@(#)rioparam.c 1.3";
** NB. for MPX
** tty lock must NOT have been previously acquired.
*/
-int
-RIOParam(PortP, cmd, Modem, SleepFlag)
+int RIOParam(PortP, cmd, Modem, SleepFlag)
struct Port *PortP;
int cmd;
int Modem;
-int SleepFlag;
+int SleepFlag;
{
register struct tty_struct *TtyP;
- int retval;
+ int retval;
register struct phb_param *phb_param_ptr;
PKT *PacketP;
int res;
- uchar Cor1=0, Cor2=0, Cor4=0, Cor5=0;
- uchar TxXon=0, TxXoff=0, RxXon=0, RxXoff=0;
- uchar LNext=0, TxBaud=0, RxBaud=0;
- int retries = 0xff;
+ uchar Cor1 = 0, Cor2 = 0, Cor4 = 0, Cor5 = 0;
+ uchar TxXon = 0, TxXoff = 0, RxXon = 0, RxXoff = 0;
+ uchar LNext = 0, TxBaud = 0, RxBaud = 0;
+ int retries = 0xff;
unsigned long flags;
- func_enter ();
+ func_enter();
TtyP = PortP->gs.tty;
- rio_dprintk (RIO_DEBUG_PARAM, "RIOParam: Port:%d cmd:%d Modem:%d SleepFlag:%d Mapped: %d, tty=%p\n",
- PortP->PortNum, cmd, Modem, SleepFlag, PortP->Mapped, TtyP);
+ rio_dprintk(RIO_DEBUG_PARAM, "RIOParam: Port:%d cmd:%d Modem:%d SleepFlag:%d Mapped: %d, tty=%p\n", PortP->PortNum, cmd, Modem, SleepFlag, PortP->Mapped, TtyP);
if (!TtyP) {
- rio_dprintk (RIO_DEBUG_PARAM, "Can't call rioparam with null tty.\n");
+ rio_dprintk(RIO_DEBUG_PARAM, "Can't call rioparam with null tty.\n");
- func_exit ();
+ func_exit();
- return RIO_FAIL;
+ return RIO_FAIL;
}
- rio_spin_lock_irqsave(&PortP->portSem, flags );
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
if (cmd == OPEN) {
/*
- ** If the port is set to store or lock the parameters, and it is
- ** paramed with OPEN, we want to restore the saved port termio, but
- ** only if StoredTermio has been saved, i.e. NOT 1st open after reboot.
- */
+ ** If the port is set to store or lock the parameters, and it is
+ ** paramed with OPEN, we want to restore the saved port termio, but
+ ** only if StoredTermio has been saved, i.e. NOT 1st open after reboot.
+ */
#if 0
if (PortP->FirstOpen) {
PortP->StoredTty.iflag = TtyP->tm.c_iflag;
@@ -207,9 +205,8 @@ int SleepFlag;
for (i = 0; i < NCC + 5; i++)
PortP->StoredTty.cc[i] = TtyP->tm.c_cc[i];
PortP->FirstOpen = 0;
- }
- else if (PortP->Store || PortP->Lock) {
- rio_dprintk (RIO_DEBUG_PARAM, "OPEN: Restoring stored/locked params\n");
+ } else if (PortP->Store || PortP->Lock) {
+ rio_dprintk(RIO_DEBUG_PARAM, "OPEN: Restoring stored/locked params\n");
TtyP->tm.c_iflag = PortP->StoredTty.iflag;
TtyP->tm.c_oflag = PortP->StoredTty.oflag;
TtyP->tm.c_cflag = PortP->StoredTty.cflag;
@@ -222,230 +219,222 @@ int SleepFlag;
}
/*
- ** wait for space
- */
- while ( !(res=can_add_transmit(&PacketP,PortP)) ||
- (PortP->InUse != NOT_INUSE) ) {
- if (retries -- <= 0) {
+ ** wait for space
+ */
+ while (!(res = can_add_transmit(&PacketP, PortP)) || (PortP->InUse != NOT_INUSE)) {
+ if (retries-- <= 0) {
break;
}
- if ( PortP->InUse != NOT_INUSE ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Port IN_USE for pre-emptive command\n");
+ if (PortP->InUse != NOT_INUSE) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Port IN_USE for pre-emptive command\n");
}
- if ( !res ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Port has no space on transmit queue\n");
+ if (!res) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Port has no space on transmit queue\n");
}
- if ( SleepFlag != OK_TO_SLEEP ) {
- rio_spin_unlock_irqrestore( &PortP->portSem, flags);
+ if (SleepFlag != OK_TO_SLEEP) {
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
func_exit();
-
+
return RIO_FAIL;
}
- rio_dprintk (RIO_DEBUG_PARAM, "wait for can_add_transmit\n");
- rio_spin_unlock_irqrestore( &PortP->portSem, flags);
+ rio_dprintk(RIO_DEBUG_PARAM, "wait for can_add_transmit\n");
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
retval = RIODelay(PortP, HUNDRED_MS);
- rio_spin_lock_irqsave( &PortP->portSem, flags);
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
if (retval == RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_PARAM, "wait for can_add_transmit broken by signal\n");
- rio_spin_unlock_irqrestore( &PortP->portSem, flags);
+ rio_dprintk(RIO_DEBUG_PARAM, "wait for can_add_transmit broken by signal\n");
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
pseterr(EINTR);
func_exit();
return RIO_FAIL;
}
- if ( PortP->State & RIO_DELETED ) {
- rio_spin_unlock_irqrestore( &PortP->portSem, flags);
- func_exit ();
+ if (PortP->State & RIO_DELETED) {
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ func_exit();
return RIO_SUCCESS;
}
}
if (!res) {
- rio_spin_unlock_irqrestore( &PortP->portSem, flags);
- func_exit ();
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ func_exit();
return RIO_FAIL;
}
- rio_dprintk (RIO_DEBUG_PARAM, "can_add_transmit() returns %x\n",res);
- rio_dprintk (RIO_DEBUG_PARAM, "Packet is 0x%x\n",(int) PacketP);
+ rio_dprintk(RIO_DEBUG_PARAM, "can_add_transmit() returns %x\n", res);
+ rio_dprintk(RIO_DEBUG_PARAM, "Packet is 0x%x\n", (int) PacketP);
- phb_param_ptr = (struct phb_param *)PacketP->data;
+ phb_param_ptr = (struct phb_param *) PacketP->data;
#if 0
/*
- ** COR 1
- */
- if ( TtyP->tm.c_iflag & INPCK ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Parity checking on input enabled\n");
+ ** COR 1
+ */
+ if (TtyP->tm.c_iflag & INPCK) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Parity checking on input enabled\n");
Cor1 |= COR1_INPCK;
}
#endif
- switch ( TtyP->termios->c_cflag & CSIZE ) {
- case CS5:
+ switch (TtyP->termios->c_cflag & CSIZE) {
+ case CS5:
{
- rio_dprintk (RIO_DEBUG_PARAM, "5 bit data\n");
+ rio_dprintk(RIO_DEBUG_PARAM, "5 bit data\n");
Cor1 |= COR1_5BITS;
break;
}
- case CS6:
+ case CS6:
{
- rio_dprintk (RIO_DEBUG_PARAM, "6 bit data\n");
+ rio_dprintk(RIO_DEBUG_PARAM, "6 bit data\n");
Cor1 |= COR1_6BITS;
break;
}
- case CS7:
+ case CS7:
{
- rio_dprintk (RIO_DEBUG_PARAM, "7 bit data\n");
+ rio_dprintk(RIO_DEBUG_PARAM, "7 bit data\n");
Cor1 |= COR1_7BITS;
break;
}
- case CS8:
+ case CS8:
{
- rio_dprintk (RIO_DEBUG_PARAM, "8 bit data\n");
+ rio_dprintk(RIO_DEBUG_PARAM, "8 bit data\n");
Cor1 |= COR1_8BITS;
break;
}
}
- if ( TtyP->termios->c_cflag & CSTOPB ) {
- rio_dprintk (RIO_DEBUG_PARAM, "2 stop bits\n");
+ if (TtyP->termios->c_cflag & CSTOPB) {
+ rio_dprintk(RIO_DEBUG_PARAM, "2 stop bits\n");
Cor1 |= COR1_2STOP;
- }
- else {
- rio_dprintk (RIO_DEBUG_PARAM, "1 stop bit\n");
+ } else {
+ rio_dprintk(RIO_DEBUG_PARAM, "1 stop bit\n");
Cor1 |= COR1_1STOP;
}
- if ( TtyP->termios->c_cflag & PARENB ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Enable parity\n");
+ if (TtyP->termios->c_cflag & PARENB) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Enable parity\n");
Cor1 |= COR1_NORMAL;
- }
- else {
- rio_dprintk (RIO_DEBUG_PARAM, "Disable parity\n");
+ } else {
+ rio_dprintk(RIO_DEBUG_PARAM, "Disable parity\n");
Cor1 |= COR1_NOP;
}
- if ( TtyP->termios->c_cflag & PARODD ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Odd parity\n");
+ if (TtyP->termios->c_cflag & PARODD) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Odd parity\n");
Cor1 |= COR1_ODD;
- }
- else {
- rio_dprintk (RIO_DEBUG_PARAM, "Even parity\n");
- Cor1 |= COR1_EVEN;
+ } else {
+ rio_dprintk(RIO_DEBUG_PARAM, "Even parity\n");
+ Cor1 |= COR1_EVEN;
}
/*
- ** COR 2
- */
- if ( TtyP->termios->c_iflag & IXON ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Enable start/stop output control\n");
+ ** COR 2
+ */
+ if (TtyP->termios->c_iflag & IXON) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop output control\n");
Cor2 |= COR2_IXON;
- }
- else {
- if ( PortP->Config & RIO_IXON ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Force enable start/stop output control\n");
+ } else {
+ if (PortP->Config & RIO_IXON) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Force enable start/stop output control\n");
Cor2 |= COR2_IXON;
- }
- else
- rio_dprintk (RIO_DEBUG_PARAM, "IXON has been disabled.\n");
+ } else
+ rio_dprintk(RIO_DEBUG_PARAM, "IXON has been disabled.\n");
}
if (TtyP->termios->c_iflag & IXANY) {
- if ( PortP->Config & RIO_IXANY ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Enable any key to restart output\n");
+ if (PortP->Config & RIO_IXANY) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Enable any key to restart output\n");
Cor2 |= COR2_IXANY;
- }
- else
- rio_dprintk (RIO_DEBUG_PARAM, "IXANY has been disabled due to sanity reasons.\n");
+ } else
+ rio_dprintk(RIO_DEBUG_PARAM, "IXANY has been disabled due to sanity reasons.\n");
}
- if ( TtyP->termios->c_iflag & IXOFF ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Enable start/stop input control 2\n");
+ if (TtyP->termios->c_iflag & IXOFF) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop input control 2\n");
Cor2 |= COR2_IXOFF;
}
- if ( TtyP->termios->c_cflag & HUPCL ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Hangup on last close\n");
+ if (TtyP->termios->c_cflag & HUPCL) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Hangup on last close\n");
Cor2 |= COR2_HUPCL;
}
- if ( C_CRTSCTS (TtyP)) {
- rio_dprintk (RIO_DEBUG_PARAM, "Rx hardware flow control enabled\n");
+ if (C_CRTSCTS(TtyP)) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control enabled\n");
Cor2 |= COR2_CTSFLOW;
Cor2 |= COR2_RTSFLOW;
} else {
- rio_dprintk (RIO_DEBUG_PARAM, "Rx hardware flow control disabled\n");
+ rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control disabled\n");
Cor2 &= ~COR2_CTSFLOW;
Cor2 &= ~COR2_RTSFLOW;
}
- if ( TtyP->termios->c_cflag & CLOCAL ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Local line\n");
- }
- else {
- rio_dprintk (RIO_DEBUG_PARAM, "Possible Modem line\n");
+ if (TtyP->termios->c_cflag & CLOCAL) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Local line\n");
+ } else {
+ rio_dprintk(RIO_DEBUG_PARAM, "Possible Modem line\n");
}
/*
- ** COR 4 (there is no COR 3)
- */
- if ( TtyP->termios->c_iflag & IGNBRK ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Ignore break condition\n");
+ ** COR 4 (there is no COR 3)
+ */
+ if (TtyP->termios->c_iflag & IGNBRK) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Ignore break condition\n");
Cor4 |= COR4_IGNBRK;
}
- if ( !(TtyP->termios->c_iflag & BRKINT) ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Break generates NULL condition\n");
+ if (!(TtyP->termios->c_iflag & BRKINT)) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Break generates NULL condition\n");
Cor4 |= COR4_NBRKINT;
} else {
- rio_dprintk (RIO_DEBUG_PARAM, "Interrupt on break condition\n");
+ rio_dprintk(RIO_DEBUG_PARAM, "Interrupt on break condition\n");
}
- if ( TtyP->termios->c_iflag & INLCR ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Map newline to carriage return on input\n");
+ if (TtyP->termios->c_iflag & INLCR) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage return on input\n");
Cor4 |= COR4_INLCR;
}
- if ( TtyP->termios->c_iflag & IGNCR ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Ignore carriage return on input\n");
+ if (TtyP->termios->c_iflag & IGNCR) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Ignore carriage return on input\n");
Cor4 |= COR4_IGNCR;
}
- if ( TtyP->termios->c_iflag & ICRNL ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Map carriage return to newline on input\n");
+ if (TtyP->termios->c_iflag & ICRNL) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on input\n");
Cor4 |= COR4_ICRNL;
}
- if ( TtyP->termios->c_iflag & IGNPAR ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Ignore characters with parity errors\n");
+ if (TtyP->termios->c_iflag & IGNPAR) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Ignore characters with parity errors\n");
Cor4 |= COR4_IGNPAR;
}
- if ( TtyP->termios->c_iflag & PARMRK ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Mark parity errors\n");
+ if (TtyP->termios->c_iflag & PARMRK) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Mark parity errors\n");
Cor4 |= COR4_PARMRK;
}
/*
- ** Set the RAISEMOD flag to ensure that the modem lines are raised
- ** on reception of a config packet.
- ** The download code handles the zero baud condition.
- */
+ ** Set the RAISEMOD flag to ensure that the modem lines are raised
+ ** on reception of a config packet.
+ ** The download code handles the zero baud condition.
+ */
Cor4 |= COR4_RAISEMOD;
/*
- ** COR 5
- */
+ ** COR 5
+ */
Cor5 = COR5_CMOE;
/*
- ** Set to monitor tbusy/tstop (or not).
- */
+ ** Set to monitor tbusy/tstop (or not).
+ */
if (PortP->MonitorTstate)
Cor5 |= COR5_TSTATE_ON;
@@ -453,182 +442,195 @@ int SleepFlag;
Cor5 |= COR5_TSTATE_OFF;
/*
- ** Could set LNE here if you wanted LNext processing. SVR4 will use it.
- */
- if ( TtyP->termios->c_iflag & ISTRIP ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Strip input characters\n");
- if (! (PortP->State & RIO_TRIAD_MODE)) {
+ ** Could set LNE here if you wanted LNext processing. SVR4 will use it.
+ */
+ if (TtyP->termios->c_iflag & ISTRIP) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Strip input characters\n");
+ if (!(PortP->State & RIO_TRIAD_MODE)) {
Cor5 |= COR5_ISTRIP;
}
}
- if ( TtyP->termios->c_oflag & ONLCR ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Map newline to carriage-return, newline on output\n");
- if ( PortP->CookMode == COOK_MEDIUM )
+ if (TtyP->termios->c_oflag & ONLCR) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage-return, newline on output\n");
+ if (PortP->CookMode == COOK_MEDIUM)
Cor5 |= COR5_ONLCR;
}
- if ( TtyP->termios->c_oflag & OCRNL ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Map carriage return to newline on output\n");
- if ( PortP->CookMode == COOK_MEDIUM )
+ if (TtyP->termios->c_oflag & OCRNL) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on output\n");
+ if (PortP->CookMode == COOK_MEDIUM)
Cor5 |= COR5_OCRNL;
}
- if ( ( TtyP->termios->c_oflag & TABDLY) == TAB3 ) {
- rio_dprintk (RIO_DEBUG_PARAM, "Tab delay 3 set\n");
- if ( PortP->CookMode == COOK_MEDIUM )
+ if ((TtyP->termios->c_oflag & TABDLY) == TAB3) {
+ rio_dprintk(RIO_DEBUG_PARAM, "Tab delay 3 set\n");
+ if (PortP->CookMode == COOK_MEDIUM)
Cor5 |= COR5_TAB3;
}
/*
- ** Flow control bytes.
- */
+ ** Flow control bytes.
+ */
TxXon = TtyP->termios->c_cc[VSTART];
TxXoff = TtyP->termios->c_cc[VSTOP];
RxXon = TtyP->termios->c_cc[VSTART];
RxXoff = TtyP->termios->c_cc[VSTOP];
/*
- ** LNEXT byte
- */
+ ** LNEXT byte
+ */
LNext = 0;
/*
- ** Baud rate bytes
- */
- rio_dprintk (RIO_DEBUG_PARAM, "Mapping of rx/tx baud %x (%x)\n",
- TtyP->termios->c_cflag, CBAUD);
+ ** Baud rate bytes
+ */
+ rio_dprintk(RIO_DEBUG_PARAM, "Mapping of rx/tx baud %x (%x)\n", TtyP->termios->c_cflag, CBAUD);
switch (TtyP->termios->c_cflag & CBAUD) {
#define e(b) case B ## b : RxBaud = TxBaud = RIO_B ## b ;break
- e(50);e(75);e(110);e(134);e(150);e(200);e(300);e(600);e(1200);
- e(1800);e(2400);e(4800);e(9600);e(19200);e(38400);e(57600);
- e(115200); /* e(230400);e(460800); e(921600); */
+ e(50);
+ e(75);
+ e(110);
+ e(134);
+ e(150);
+ e(200);
+ e(300);
+ e(600);
+ e(1200);
+ e(1800);
+ e(2400);
+ e(4800);
+ e(9600);
+ e(19200);
+ e(38400);
+ e(57600);
+ e(115200); /* e(230400);e(460800); e(921600); */
}
/* XXX MIssing conversion table. XXX */
- /* (TtyP->termios->c_cflag & V_CBAUD); */
+ /* (TtyP->termios->c_cflag & V_CBAUD); */
- rio_dprintk (RIO_DEBUG_PARAM, "tx baud 0x%x, rx baud 0x%x\n", TxBaud, RxBaud);
+ rio_dprintk(RIO_DEBUG_PARAM, "tx baud 0x%x, rx baud 0x%x\n", TxBaud, RxBaud);
/*
- ** Leftovers
- */
- if ( TtyP->termios->c_cflag & CREAD )
- rio_dprintk (RIO_DEBUG_PARAM, "Enable receiver\n");
+ ** Leftovers
+ */
+ if (TtyP->termios->c_cflag & CREAD)
+ rio_dprintk(RIO_DEBUG_PARAM, "Enable receiver\n");
#ifdef RCV1EN
- if ( TtyP->termios->c_cflag & RCV1EN )
- rio_dprintk (RIO_DEBUG_PARAM, "RCV1EN (?)\n");
+ if (TtyP->termios->c_cflag & RCV1EN)
+ rio_dprintk(RIO_DEBUG_PARAM, "RCV1EN (?)\n");
#endif
#ifdef XMT1EN
- if ( TtyP->termios->c_cflag & XMT1EN )
- rio_dprintk (RIO_DEBUG_PARAM, "XMT1EN (?)\n");
+ if (TtyP->termios->c_cflag & XMT1EN)
+ rio_dprintk(RIO_DEBUG_PARAM, "XMT1EN (?)\n");
#endif
#if 0
- if ( TtyP->termios->c_cflag & LOBLK )
- rio_dprintk (RIO_DEBUG_PARAM, "LOBLK - JCL output blocks when not current\n");
+ if (TtyP->termios->c_cflag & LOBLK)
+ rio_dprintk(RIO_DEBUG_PARAM, "LOBLK - JCL output blocks when not current\n");
#endif
- if ( TtyP->termios->c_lflag & ISIG )
- rio_dprintk (RIO_DEBUG_PARAM, "Input character signal generating enabled\n");
- if ( TtyP->termios->c_lflag & ICANON )
- rio_dprintk (RIO_DEBUG_PARAM, "Canonical input: erase and kill enabled\n");
- if ( TtyP->termios->c_lflag & XCASE )
- rio_dprintk (RIO_DEBUG_PARAM, "Canonical upper/lower presentation\n");
- if ( TtyP->termios->c_lflag & ECHO )
- rio_dprintk (RIO_DEBUG_PARAM, "Enable input echo\n");
- if ( TtyP->termios->c_lflag & ECHOE )
- rio_dprintk (RIO_DEBUG_PARAM, "Enable echo erase\n");
- if ( TtyP->termios->c_lflag & ECHOK )
- rio_dprintk (RIO_DEBUG_PARAM, "Enable echo kill\n");
- if ( TtyP->termios->c_lflag & ECHONL )
- rio_dprintk (RIO_DEBUG_PARAM, "Enable echo newline\n");
- if ( TtyP->termios->c_lflag & NOFLSH )
- rio_dprintk (RIO_DEBUG_PARAM, "Disable flush after interrupt or quit\n");
+ if (TtyP->termios->c_lflag & ISIG)
+ rio_dprintk(RIO_DEBUG_PARAM, "Input character signal generating enabled\n");
+ if (TtyP->termios->c_lflag & ICANON)
+ rio_dprintk(RIO_DEBUG_PARAM, "Canonical input: erase and kill enabled\n");
+ if (TtyP->termios->c_lflag & XCASE)
+ rio_dprintk(RIO_DEBUG_PARAM, "Canonical upper/lower presentation\n");
+ if (TtyP->termios->c_lflag & ECHO)
+ rio_dprintk(RIO_DEBUG_PARAM, "Enable input echo\n");
+ if (TtyP->termios->c_lflag & ECHOE)
+ rio_dprintk(RIO_DEBUG_PARAM, "Enable echo erase\n");
+ if (TtyP->termios->c_lflag & ECHOK)
+ rio_dprintk(RIO_DEBUG_PARAM, "Enable echo kill\n");
+ if (TtyP->termios->c_lflag & ECHONL)
+ rio_dprintk(RIO_DEBUG_PARAM, "Enable echo newline\n");
+ if (TtyP->termios->c_lflag & NOFLSH)
+ rio_dprintk(RIO_DEBUG_PARAM, "Disable flush after interrupt or quit\n");
#ifdef TOSTOP
- if ( TtyP->termios->c_lflag & TOSTOP )
- rio_dprintk (RIO_DEBUG_PARAM, "Send SIGTTOU for background output\n");
+ if (TtyP->termios->c_lflag & TOSTOP)
+ rio_dprintk(RIO_DEBUG_PARAM, "Send SIGTTOU for background output\n");
#endif
#ifdef XCLUDE
- if ( TtyP->termios->c_lflag & XCLUDE )
- rio_dprintk (RIO_DEBUG_PARAM, "Exclusive use of this line\n");
+ if (TtyP->termios->c_lflag & XCLUDE)
+ rio_dprintk(RIO_DEBUG_PARAM, "Exclusive use of this line\n");
#endif
- if ( TtyP->termios->c_iflag & IUCLC )
- rio_dprintk (RIO_DEBUG_PARAM, "Map uppercase to lowercase on input\n");
- if ( TtyP->termios->c_oflag & OPOST )
- rio_dprintk (RIO_DEBUG_PARAM, "Enable output post-processing\n");
- if ( TtyP->termios->c_oflag & OLCUC )
- rio_dprintk (RIO_DEBUG_PARAM, "Map lowercase to uppercase on output\n");
- if ( TtyP->termios->c_oflag & ONOCR )
- rio_dprintk (RIO_DEBUG_PARAM, "No carriage return output at column 0\n");
- if ( TtyP->termios->c_oflag & ONLRET )
- rio_dprintk (RIO_DEBUG_PARAM, "Newline performs carriage return function\n");
- if ( TtyP->termios->c_oflag & OFILL )
- rio_dprintk (RIO_DEBUG_PARAM, "Use fill characters for delay\n");
- if ( TtyP->termios->c_oflag & OFDEL )
- rio_dprintk (RIO_DEBUG_PARAM, "Fill character is DEL\n");
- if ( TtyP->termios->c_oflag & NLDLY )
- rio_dprintk (RIO_DEBUG_PARAM, "Newline delay set\n");
- if ( TtyP->termios->c_oflag & CRDLY )
- rio_dprintk (RIO_DEBUG_PARAM, "Carriage return delay set\n");
- if ( TtyP->termios->c_oflag & TABDLY )
- rio_dprintk (RIO_DEBUG_PARAM, "Tab delay set\n");
+ if (TtyP->termios->c_iflag & IUCLC)
+ rio_dprintk(RIO_DEBUG_PARAM, "Map uppercase to lowercase on input\n");
+ if (TtyP->termios->c_oflag & OPOST)
+ rio_dprintk(RIO_DEBUG_PARAM, "Enable output post-processing\n");
+ if (TtyP->termios->c_oflag & OLCUC)
+ rio_dprintk(RIO_DEBUG_PARAM, "Map lowercase to uppercase on output\n");
+ if (TtyP->termios->c_oflag & ONOCR)
+ rio_dprintk(RIO_DEBUG_PARAM, "No carriage return output at column 0\n");
+ if (TtyP->termios->c_oflag & ONLRET)
+ rio_dprintk(RIO_DEBUG_PARAM, "Newline performs carriage return function\n");
+ if (TtyP->termios->c_oflag & OFILL)
+ rio_dprintk(RIO_DEBUG_PARAM, "Use fill characters for delay\n");
+ if (TtyP->termios->c_oflag & OFDEL)
+ rio_dprintk(RIO_DEBUG_PARAM, "Fill character is DEL\n");
+ if (TtyP->termios->c_oflag & NLDLY)
+ rio_dprintk(RIO_DEBUG_PARAM, "Newline delay set\n");
+ if (TtyP->termios->c_oflag & CRDLY)
+ rio_dprintk(RIO_DEBUG_PARAM, "Carriage return delay set\n");
+ if (TtyP->termios->c_oflag & TABDLY)
+ rio_dprintk(RIO_DEBUG_PARAM, "Tab delay set\n");
#if 0
- if ( TtyP->termios->c_oflag & BSDLY )
- rio_dprintk (RIO_DEBUG_PARAM, "Back-space delay set\n");
- if ( TtyP->termios->c_oflag & VTDLY )
- rio_dprintk (RIO_DEBUG_PARAM, "Vertical tab delay set\n");
- if ( TtyP->termios->c_oflag & FFDLY )
- rio_dprintk (RIO_DEBUG_PARAM, "Form-feed delay set\n");
+ if (TtyP->termios->c_oflag & BSDLY)
+ rio_dprintk(RIO_DEBUG_PARAM, "Back-space delay set\n");
+ if (TtyP->termios->c_oflag & VTDLY)
+ rio_dprintk(RIO_DEBUG_PARAM, "Vertical tab delay set\n");
+ if (TtyP->termios->c_oflag & FFDLY)
+ rio_dprintk(RIO_DEBUG_PARAM, "Form-feed delay set\n");
#endif
/*
- ** These things are kind of useful in a later life!
- */
+ ** These things are kind of useful in a later life!
+ */
PortP->Cor2Copy = Cor2;
- if ( PortP->State & RIO_DELETED ) {
- rio_spin_unlock_irqrestore( &PortP->portSem, flags);
- func_exit ();
+ if (PortP->State & RIO_DELETED) {
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ func_exit();
return RIO_FAIL;
}
/*
- ** Actually write the info into the packet to be sent
- */
- WBYTE(phb_param_ptr->Cmd, cmd);
- WBYTE(phb_param_ptr->Cor1, Cor1);
- WBYTE(phb_param_ptr->Cor2, Cor2);
- WBYTE(phb_param_ptr->Cor4, Cor4);
- WBYTE(phb_param_ptr->Cor5, Cor5);
- WBYTE(phb_param_ptr->TxXon, TxXon);
- WBYTE(phb_param_ptr->RxXon, RxXon);
+ ** Actually write the info into the packet to be sent
+ */
+ WBYTE(phb_param_ptr->Cmd, cmd);
+ WBYTE(phb_param_ptr->Cor1, Cor1);
+ WBYTE(phb_param_ptr->Cor2, Cor2);
+ WBYTE(phb_param_ptr->Cor4, Cor4);
+ WBYTE(phb_param_ptr->Cor5, Cor5);
+ WBYTE(phb_param_ptr->TxXon, TxXon);
+ WBYTE(phb_param_ptr->RxXon, RxXon);
WBYTE(phb_param_ptr->TxXoff, TxXoff);
WBYTE(phb_param_ptr->RxXoff, RxXoff);
- WBYTE(phb_param_ptr->LNext, LNext);
+ WBYTE(phb_param_ptr->LNext, LNext);
WBYTE(phb_param_ptr->TxBaud, TxBaud);
WBYTE(phb_param_ptr->RxBaud, RxBaud);
/*
- ** Set the length/command field
- */
- WBYTE(PacketP->len , 12 | PKT_CMD_BIT);
+ ** Set the length/command field
+ */
+ WBYTE(PacketP->len, 12 | PKT_CMD_BIT);
/*
- ** The packet is formed - now, whack it off
- ** to its final destination:
- */
+ ** The packet is formed - now, whack it off
+ ** to its final destination:
+ */
add_transmit(PortP);
/*
- ** Count characters transmitted for port statistics reporting
- */
+ ** Count characters transmitted for port statistics reporting
+ */
if (PortP->statsGather)
PortP->txchars += 12;
- rio_spin_unlock_irqrestore( &PortP->portSem, flags);
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- rio_dprintk (RIO_DEBUG_PARAM, "add_transmit returned.\n");
+ rio_dprintk(RIO_DEBUG_PARAM, "add_transmit returned.\n");
/*
- ** job done.
- */
- func_exit ();
+ ** job done.
+ */
+ func_exit();
return RIO_SUCCESS;
}
@@ -638,16 +640,15 @@ int SleepFlag;
** We can add another packet to a transmit queue if the packet pointer pointed
** to by the TxAdd pointer has PKT_IN_USE clear in its address.
*/
-int
-can_add_transmit(PktP, PortP)
+int can_add_transmit(PktP, PortP)
PKT **PktP;
-struct Port *PortP;
+struct Port *PortP;
{
register PKT *tp;
- *PktP = tp = (PKT *)RIO_PTR(PortP->Caddr,RWORD(*PortP->TxAdd));
+ *PktP = tp = (PKT *) RIO_PTR(PortP->Caddr, RWORD(*PortP->TxAdd));
- return !((uint)tp & PKT_IN_USE);
+ return !((uint) tp & PKT_IN_USE);
}
/*
@@ -655,25 +656,22 @@ struct Port *PortP;
** and then move the TxAdd pointer along one position to point to the next
** packet pointer. You must wrap the pointer from the end back to the start.
*/
-void
-add_transmit(PortP)
-struct Port *PortP;
+void add_transmit(PortP)
+struct Port *PortP;
{
- if (RWORD(*PortP->TxAdd) & PKT_IN_USE) {
- rio_dprintk (RIO_DEBUG_PARAM, "add_transmit: Packet has been stolen!");
- }
- WWORD( *(ushort *)PortP->TxAdd, RWORD(*PortP->TxAdd) | PKT_IN_USE);
- PortP->TxAdd = (PortP->TxAdd == PortP->TxEnd) ? PortP->TxStart :
- PortP->TxAdd + 1;
- WWORD( PortP->PhbP->tx_add , RIO_OFF(PortP->Caddr,PortP->TxAdd) );
+ if (RWORD(*PortP->TxAdd) & PKT_IN_USE) {
+ rio_dprintk(RIO_DEBUG_PARAM, "add_transmit: Packet has been stolen!");
+ }
+ WWORD(*(ushort *) PortP->TxAdd, RWORD(*PortP->TxAdd) | PKT_IN_USE);
+ PortP->TxAdd = (PortP->TxAdd == PortP->TxEnd) ? PortP->TxStart : PortP->TxAdd + 1;
+ WWORD(PortP->PhbP->tx_add, RIO_OFF(PortP->Caddr, PortP->TxAdd));
}
/****************************************
* Put a packet onto the end of the
* free list
****************************************/
-void
-put_free_end(HostP, PktP)
+void put_free_end(HostP, PktP)
struct Host *HostP;
PKT *PktP;
{
@@ -688,24 +686,23 @@ PKT *PktP;
*
************************************************/
- rio_dprintk (RIO_DEBUG_PFE, "put_free_end(PktP=%x)\n",(int)PktP);
+ rio_dprintk(RIO_DEBUG_PFE, "put_free_end(PktP=%x)\n", (int) PktP);
- if ((old_end=RWORD(HostP->ParmMapP->free_list_end)) != TPNULL) {
- new_end = RIO_OFF(HostP->Caddr,PktP);
- tmp_pointer = (FREE_LIST *)RIO_PTR(HostP->Caddr,old_end);
- WWORD(tmp_pointer->next , new_end );
- WWORD(((FREE_LIST *)PktP)->prev , old_end);
- WWORD(((FREE_LIST *)PktP)->next , TPNULL);
+ if ((old_end = RWORD(HostP->ParmMapP->free_list_end)) != TPNULL) {
+ new_end = RIO_OFF(HostP->Caddr, PktP);
+ tmp_pointer = (FREE_LIST *) RIO_PTR(HostP->Caddr, old_end);
+ WWORD(tmp_pointer->next, new_end);
+ WWORD(((FREE_LIST *) PktP)->prev, old_end);
+ WWORD(((FREE_LIST *) PktP)->next, TPNULL);
WWORD(HostP->ParmMapP->free_list_end, new_end);
- }
- else { /* First packet on the free list this should never happen! */
- rio_dprintk (RIO_DEBUG_PFE, "put_free_end(): This should never happen\n");
- WWORD(HostP->ParmMapP->free_list_end , RIO_OFF(HostP->Caddr,PktP));
- tmp_pointer = (FREE_LIST *)PktP;
- WWORD(tmp_pointer->prev , TPNULL);
- WWORD(tmp_pointer->next , TPNULL);
- }
- rio_dprintk (RIO_DEBUG_CMD, "Before unlock: %p\n", &HostP->HostLock);
+ } else { /* First packet on the free list this should never happen! */
+ rio_dprintk(RIO_DEBUG_PFE, "put_free_end(): This should never happen\n");
+ WWORD(HostP->ParmMapP->free_list_end, RIO_OFF(HostP->Caddr, PktP));
+ tmp_pointer = (FREE_LIST *) PktP;
+ WWORD(tmp_pointer->prev, TPNULL);
+ WWORD(tmp_pointer->next, TPNULL);
+ }
+ rio_dprintk(RIO_DEBUG_CMD, "Before unlock: %p\n", &HostP->HostLock);
rio_spin_unlock_irqrestore(&HostP->HostLock, flags);
}
@@ -715,14 +712,12 @@ PKT *PktP;
** relevant packet, [having cleared the PKT_IN_USE bit]. If PKT_IN_USE is clear,
** then can_remove_receive() returns 0.
*/
-int
-can_remove_receive(PktP, PortP)
+int can_remove_receive(PktP, PortP)
PKT **PktP;
struct Port *PortP;
{
- if ( RWORD(*PortP->RxRemove) & PKT_IN_USE) {
- *PktP = (PKT *)RIO_PTR(PortP->Caddr,
- RWORD(*PortP->RxRemove) & ~PKT_IN_USE);
+ if (RWORD(*PortP->RxRemove) & PKT_IN_USE) {
+ *PktP = (PKT *) RIO_PTR(PortP->Caddr, RWORD(*PortP->RxRemove) & ~PKT_IN_USE);
return 1;
}
return 0;
@@ -733,12 +728,10 @@ struct Port *PortP;
** and then bump the pointers. Once the pointers get to the end, they must
** be wrapped back to the start.
*/
-void
-remove_receive(PortP)
-struct Port *PortP;
+void remove_receive(PortP)
+struct Port *PortP;
{
- WWORD( *PortP->RxRemove, RWORD(*PortP->RxRemove) & ~PKT_IN_USE );
- PortP->RxRemove = (PortP->RxRemove == PortP->RxEnd) ? PortP->RxStart :
- PortP->RxRemove + 1;
- WWORD( PortP->PhbP->rx_remove , RIO_OFF(PortP->Caddr, PortP->RxRemove) );
+ WWORD(*PortP->RxRemove, RWORD(*PortP->RxRemove) & ~PKT_IN_USE);
+ PortP->RxRemove = (PortP->RxRemove == PortP->RxEnd) ? PortP->RxStart : PortP->RxRemove + 1;
+ WWORD(PortP->PhbP->rx_remove, RIO_OFF(PortP->Caddr, PortP->RxRemove));
}
diff --git a/drivers/char/rio/riopcicopy.c b/drivers/char/rio/riopcicopy.c
index 2ea99a60aa3..535afaa51ca 100644
--- a/drivers/char/rio/riopcicopy.c
+++ b/drivers/char/rio/riopcicopy.c
@@ -1,8 +1,8 @@
/* Yeah. We have copyright on this one. Sure. */
-void rio_pcicopy( char *from, char *to, int amount)
+void rio_pcicopy(char *from, char *to, int amount)
{
- while ( amount-- )
- *to++ = *from++;
+ while (amount--)
+ *to++ = *from++;
}
diff --git a/drivers/char/rio/rioroute.c b/drivers/char/rio/rioroute.c
index e9564c9fb37..0f4cd33ba64 100644
--- a/drivers/char/rio/rioroute.c
+++ b/drivers/char/rio/rioroute.c
@@ -93,625 +93,517 @@ static void RIOConCon(struct rio_info *, struct Host *, uint, uint, uint, uint,
** Incoming on the ROUTE_RUP
** I wrote this while I was tired. Forgive me.
*/
-int RIORouteRup( struct rio_info *p, uint Rup, struct Host *HostP, PKT *PacketP )
+int RIORouteRup(struct rio_info *p, uint Rup, struct Host *HostP, PKT * PacketP)
{
- struct PktCmd *PktCmdP = (struct PktCmd *)PacketP->data;
- struct PktCmd_M *PktReplyP;
- struct CmdBlk *CmdBlkP;
- struct Port *PortP;
- struct Map *MapP;
- struct Top *TopP;
- int ThisLink, ThisLinkMin, ThisLinkMax;
- int port;
- int Mod, Mod1, Mod2;
- ushort RtaType;
- uint RtaUniq;
- uint ThisUnit, ThisUnit2; /* 2 ids to accommodate 16 port RTA */
- uint OldUnit, NewUnit, OldLink, NewLink;
- char *MyType, *MyName;
- int Lies;
- unsigned long flags;
+ struct PktCmd *PktCmdP = (struct PktCmd *) PacketP->data;
+ struct PktCmd_M *PktReplyP;
+ struct CmdBlk *CmdBlkP;
+ struct Port *PortP;
+ struct Map *MapP;
+ struct Top *TopP;
+ int ThisLink, ThisLinkMin, ThisLinkMax;
+ int port;
+ int Mod, Mod1, Mod2;
+ ushort RtaType;
+ uint RtaUniq;
+ uint ThisUnit, ThisUnit2; /* 2 ids to accommodate 16 port RTA */
+ uint OldUnit, NewUnit, OldLink, NewLink;
+ char *MyType, *MyName;
+ int Lies;
+ unsigned long flags;
#ifdef STACK
- RIOStackCheck("RIORouteRup");
+ RIOStackCheck("RIORouteRup");
#endif
#ifdef CHECK
- CheckPacketP(PacketP);
- CheckHostP(HostP);
- CheckRup(Rup);
- CheckHost(Host);
+ CheckPacketP(PacketP);
+ CheckHostP(HostP);
+ CheckRup(Rup);
+ CheckHost(Host);
#endif
- /*
- ** Is this unit telling us it's current link topology?
- */
- if ( RBYTE(PktCmdP->Command) == ROUTE_TOPOLOGY )
- {
- MapP = HostP->Mapping;
-
- /*
- ** The packet can be sent either by the host or by an RTA.
- ** If it comes from the host, then we need to fill in the
- ** Topology array in the host structure. If it came in
- ** from an RTA then we need to fill in the Mapping structure's
- ** Topology array for the unit.
- */
- if ( Rup >= (ushort)MAX_RUP )
- {
- ThisUnit = HOST_ID;
- TopP = HostP->Topology;
- MyType = "Host";
- MyName = HostP->Name;
- ThisLinkMin = ThisLinkMax = Rup - MAX_RUP;
- }
- else
- {
- ThisUnit = Rup+1;
- TopP = HostP->Mapping[Rup].Topology;
- MyType = "RTA";
- MyName = HostP->Mapping[Rup].Name;
- ThisLinkMin = 0;
- ThisLinkMax = LINKS_PER_UNIT - 1;
- }
-
- /*
- ** Lies will not be tolerated.
- ** If any pair of links claim to be connected to the same
- ** place, then ignore this packet completely.
- */
- Lies = 0;
- for ( ThisLink=ThisLinkMin + 1; ThisLink <= ThisLinkMax; ThisLink++)
- {
- /*
- ** it won't lie about network interconnect, total disconnects
- ** and no-IDs. (or at least, it doesn't *matter* if it does)
- */
- if ( RBYTE(PktCmdP->RouteTopology[ThisLink].Unit) > (ushort)MAX_RUP )
- continue;
-
- for ( NewLink=ThisLinkMin; NewLink < ThisLink; NewLink++ )
- {
- if ( (RBYTE(PktCmdP->RouteTopology[ThisLink].Unit) ==
- RBYTE(PktCmdP->RouteTopology[NewLink].Unit)) &&
- (RBYTE(PktCmdP->RouteTopology[ThisLink].Link) ==
- RBYTE(PktCmdP->RouteTopology[NewLink].Link)) )
- {
- Lies++;
- }
- }
- }
-
- if ( Lies )
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "LIES! DAMN LIES! %d LIES!\n",Lies);
- rio_dprintk (RIO_DEBUG_ROUTE, "%d:%c %d:%c %d:%c %d:%c\n",
- RBYTE(PktCmdP->RouteTopology[0].Unit),
- 'A'+RBYTE(PktCmdP->RouteTopology[0].Link),
- RBYTE(PktCmdP->RouteTopology[1].Unit),
- 'A'+RBYTE(PktCmdP->RouteTopology[1].Link),
- RBYTE(PktCmdP->RouteTopology[2].Unit),
- 'A'+RBYTE(PktCmdP->RouteTopology[2].Link),
- RBYTE(PktCmdP->RouteTopology[3].Unit),
- 'A'+RBYTE(PktCmdP->RouteTopology[3].Link));
- return TRUE;
- }
-
- /*
- ** now, process each link.
- */
- for ( ThisLink=ThisLinkMin; ThisLink <= ThisLinkMax; ThisLink++)
- {
- /*
- ** this is what it was connected to
- */
- OldUnit = TopP[ThisLink].Unit;
- OldLink = TopP[ThisLink].Link;
-
- /*
- ** this is what it is now connected to
- */
- NewUnit = RBYTE(PktCmdP->RouteTopology[ThisLink].Unit);
- NewLink = RBYTE(PktCmdP->RouteTopology[ThisLink].Link);
-
- if ( OldUnit != NewUnit || OldLink != NewLink )
- {
/*
- ** something has changed!
- */
-
- if ( NewUnit > MAX_RUP &&
- NewUnit != ROUTE_DISCONNECT &&
- NewUnit != ROUTE_NO_ID &&
- NewUnit != ROUTE_INTERCONNECT )
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "I have a link from %s %s to unit %d:%d - I don't like it.\n",
- MyType,
- MyName,
- NewUnit,
- NewLink);
- }
- else
- {
- /*
- ** put the new values in
- */
- TopP[ThisLink].Unit = NewUnit;
- TopP[ThisLink].Link = NewLink;
-
- RIOSetChange(p);
-
- if ( OldUnit <= MAX_RUP )
- {
- /*
- ** If something has become bust, then re-enable them messages
- */
- if (! p->RIONoMessage)
- RIOConCon(p,HostP,ThisUnit,ThisLink,OldUnit,OldLink,DISCONNECT);
- }
-
- if ( ( NewUnit <= MAX_RUP ) && !p->RIONoMessage )
- RIOConCon(p,HostP,ThisUnit,ThisLink,NewUnit,NewLink,CONNECT);
-
- if ( NewUnit == ROUTE_NO_ID )
- rio_dprintk (RIO_DEBUG_ROUTE, "%s %s (%c) is connected to an unconfigured unit.\n",
- MyType,MyName,'A'+ThisLink);
-
- if ( NewUnit == ROUTE_INTERCONNECT )
- {
- if (! p->RIONoMessage)
- cprintf("%s '%s' (%c) is connected to another network.\n", MyType,MyName,'A'+ThisLink);
- }
-
- /*
- ** perform an update for 'the other end', so that these messages
- ** only appears once. Only disconnect the other end if it is pointing
- ** at us!
- */
- if ( OldUnit == HOST_ID )
- {
- if ( HostP->Topology[OldLink].Unit == ThisUnit &&
- HostP->Topology[OldLink].Link == ThisLink )
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "SETTING HOST (%c) TO DISCONNECTED!\n", OldLink+'A');
- HostP->Topology[OldLink].Unit = ROUTE_DISCONNECT;
- HostP->Topology[OldLink].Link = NO_LINK;
- }
- else
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "HOST(%c) WAS NOT CONNECTED TO %s (%c)!\n",
- OldLink+'A',HostP->Mapping[ThisUnit-1].Name,ThisLink+'A');
- }
- }
- else if ( OldUnit <= MAX_RUP )
- {
- if ( HostP->Mapping[OldUnit-1].Topology[OldLink].Unit == ThisUnit &&
- HostP->Mapping[OldUnit-1].Topology[OldLink].Link == ThisLink )
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "SETTING RTA %s (%c) TO DISCONNECTED!\n",
- HostP->Mapping[OldUnit-1].Name,OldLink+'A');
- HostP->Mapping[OldUnit-1].Topology[OldLink].Unit=ROUTE_DISCONNECT;
- HostP->Mapping[OldUnit-1].Topology[OldLink].Link=NO_LINK;
- }
- else
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "RTA %s (%c) WAS NOT CONNECTED TO %s (%c)\n",
- HostP->Mapping[OldUnit-1].Name,OldLink+'A',
- HostP->Mapping[ThisUnit-1].Name,ThisLink+'A');
- }
- }
- if ( NewUnit == HOST_ID )
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "MARKING HOST (%c) CONNECTED TO %s (%c)\n",
- NewLink+'A',MyName,ThisLink+'A');
- HostP->Topology[NewLink].Unit = ThisUnit;
- HostP->Topology[NewLink].Link = ThisLink;
- }
- else if ( NewUnit <= MAX_RUP )
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "MARKING RTA %s (%c) CONNECTED TO %s (%c)\n",
- HostP->Mapping[NewUnit-1].Name,NewLink+'A',MyName,ThisLink+'A');
- HostP->Mapping[NewUnit-1].Topology[NewLink].Unit=ThisUnit;
- HostP->Mapping[NewUnit-1].Topology[NewLink].Link=ThisLink;
- }
+ ** Is this unit telling us it's current link topology?
+ */
+ if (RBYTE(PktCmdP->Command) == ROUTE_TOPOLOGY) {
+ MapP = HostP->Mapping;
+
+ /*
+ ** The packet can be sent either by the host or by an RTA.
+ ** If it comes from the host, then we need to fill in the
+ ** Topology array in the host structure. If it came in
+ ** from an RTA then we need to fill in the Mapping structure's
+ ** Topology array for the unit.
+ */
+ if (Rup >= (ushort) MAX_RUP) {
+ ThisUnit = HOST_ID;
+ TopP = HostP->Topology;
+ MyType = "Host";
+ MyName = HostP->Name;
+ ThisLinkMin = ThisLinkMax = Rup - MAX_RUP;
+ } else {
+ ThisUnit = Rup + 1;
+ TopP = HostP->Mapping[Rup].Topology;
+ MyType = "RTA";
+ MyName = HostP->Mapping[Rup].Name;
+ ThisLinkMin = 0;
+ ThisLinkMax = LINKS_PER_UNIT - 1;
+ }
+
+ /*
+ ** Lies will not be tolerated.
+ ** If any pair of links claim to be connected to the same
+ ** place, then ignore this packet completely.
+ */
+ Lies = 0;
+ for (ThisLink = ThisLinkMin + 1; ThisLink <= ThisLinkMax; ThisLink++) {
+ /*
+ ** it won't lie about network interconnect, total disconnects
+ ** and no-IDs. (or at least, it doesn't *matter* if it does)
+ */
+ if (RBYTE(PktCmdP->RouteTopology[ThisLink].Unit) > (ushort) MAX_RUP)
+ continue;
+
+ for (NewLink = ThisLinkMin; NewLink < ThisLink; NewLink++) {
+ if ((RBYTE(PktCmdP->RouteTopology[ThisLink].Unit) == RBYTE(PktCmdP->RouteTopology[NewLink].Unit)) && (RBYTE(PktCmdP->RouteTopology[ThisLink].Link) == RBYTE(PktCmdP->RouteTopology[NewLink].Link))) {
+ Lies++;
+ }
+ }
+ }
+
+ if (Lies) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "LIES! DAMN LIES! %d LIES!\n", Lies);
+ rio_dprintk(RIO_DEBUG_ROUTE, "%d:%c %d:%c %d:%c %d:%c\n",
+ RBYTE(PktCmdP->RouteTopology[0].Unit),
+ 'A' + RBYTE(PktCmdP->RouteTopology[0].Link),
+ RBYTE(PktCmdP->RouteTopology[1].Unit),
+ 'A' + RBYTE(PktCmdP->RouteTopology[1].Link), RBYTE(PktCmdP->RouteTopology[2].Unit), 'A' + RBYTE(PktCmdP->RouteTopology[2].Link), RBYTE(PktCmdP->RouteTopology[3].Unit), 'A' + RBYTE(PktCmdP->RouteTopology[3].Link));
+ return TRUE;
+ }
+
+ /*
+ ** now, process each link.
+ */
+ for (ThisLink = ThisLinkMin; ThisLink <= ThisLinkMax; ThisLink++) {
+ /*
+ ** this is what it was connected to
+ */
+ OldUnit = TopP[ThisLink].Unit;
+ OldLink = TopP[ThisLink].Link;
+
+ /*
+ ** this is what it is now connected to
+ */
+ NewUnit = RBYTE(PktCmdP->RouteTopology[ThisLink].Unit);
+ NewLink = RBYTE(PktCmdP->RouteTopology[ThisLink].Link);
+
+ if (OldUnit != NewUnit || OldLink != NewLink) {
+ /*
+ ** something has changed!
+ */
+
+ if (NewUnit > MAX_RUP && NewUnit != ROUTE_DISCONNECT && NewUnit != ROUTE_NO_ID && NewUnit != ROUTE_INTERCONNECT) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "I have a link from %s %s to unit %d:%d - I don't like it.\n", MyType, MyName, NewUnit, NewLink);
+ } else {
+ /*
+ ** put the new values in
+ */
+ TopP[ThisLink].Unit = NewUnit;
+ TopP[ThisLink].Link = NewLink;
+
+ RIOSetChange(p);
+
+ if (OldUnit <= MAX_RUP) {
+ /*
+ ** If something has become bust, then re-enable them messages
+ */
+ if (!p->RIONoMessage)
+ RIOConCon(p, HostP, ThisUnit, ThisLink, OldUnit, OldLink, DISCONNECT);
+ }
+
+ if ((NewUnit <= MAX_RUP) && !p->RIONoMessage)
+ RIOConCon(p, HostP, ThisUnit, ThisLink, NewUnit, NewLink, CONNECT);
+
+ if (NewUnit == ROUTE_NO_ID)
+ rio_dprintk(RIO_DEBUG_ROUTE, "%s %s (%c) is connected to an unconfigured unit.\n", MyType, MyName, 'A' + ThisLink);
+
+ if (NewUnit == ROUTE_INTERCONNECT) {
+ if (!p->RIONoMessage)
+ cprintf("%s '%s' (%c) is connected to another network.\n", MyType, MyName, 'A' + ThisLink);
+ }
+
+ /*
+ ** perform an update for 'the other end', so that these messages
+ ** only appears once. Only disconnect the other end if it is pointing
+ ** at us!
+ */
+ if (OldUnit == HOST_ID) {
+ if (HostP->Topology[OldLink].Unit == ThisUnit && HostP->Topology[OldLink].Link == ThisLink) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "SETTING HOST (%c) TO DISCONNECTED!\n", OldLink + 'A');
+ HostP->Topology[OldLink].Unit = ROUTE_DISCONNECT;
+ HostP->Topology[OldLink].Link = NO_LINK;
+ } else {
+ rio_dprintk(RIO_DEBUG_ROUTE, "HOST(%c) WAS NOT CONNECTED TO %s (%c)!\n", OldLink + 'A', HostP->Mapping[ThisUnit - 1].Name, ThisLink + 'A');
+ }
+ } else if (OldUnit <= MAX_RUP) {
+ if (HostP->Mapping[OldUnit - 1].Topology[OldLink].Unit == ThisUnit && HostP->Mapping[OldUnit - 1].Topology[OldLink].Link == ThisLink) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "SETTING RTA %s (%c) TO DISCONNECTED!\n", HostP->Mapping[OldUnit - 1].Name, OldLink + 'A');
+ HostP->Mapping[OldUnit - 1].Topology[OldLink].Unit = ROUTE_DISCONNECT;
+ HostP->Mapping[OldUnit - 1].Topology[OldLink].Link = NO_LINK;
+ } else {
+ rio_dprintk(RIO_DEBUG_ROUTE, "RTA %s (%c) WAS NOT CONNECTED TO %s (%c)\n", HostP->Mapping[OldUnit - 1].Name, OldLink + 'A', HostP->Mapping[ThisUnit - 1].Name, ThisLink + 'A');
+ }
+ }
+ if (NewUnit == HOST_ID) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "MARKING HOST (%c) CONNECTED TO %s (%c)\n", NewLink + 'A', MyName, ThisLink + 'A');
+ HostP->Topology[NewLink].Unit = ThisUnit;
+ HostP->Topology[NewLink].Link = ThisLink;
+ } else if (NewUnit <= MAX_RUP) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "MARKING RTA %s (%c) CONNECTED TO %s (%c)\n", HostP->Mapping[NewUnit - 1].Name, NewLink + 'A', MyName, ThisLink + 'A');
+ HostP->Mapping[NewUnit - 1].Topology[NewLink].Unit = ThisUnit;
+ HostP->Mapping[NewUnit - 1].Topology[NewLink].Link = ThisLink;
+ }
+ }
+ RIOSetChange(p);
+ RIOCheckIsolated(p, HostP, OldUnit);
+ }
+ }
+ return TRUE;
}
- RIOSetChange(p);
- RIOCheckIsolated(p, HostP, OldUnit );
- }
- }
- return TRUE;
- }
-
- /*
- ** The only other command we recognise is a route_request command
- */
- if ( RBYTE(PktCmdP->Command) != ROUTE_REQUEST )
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "Unknown command %d received on rup %d host %d ROUTE_RUP\n",
- RBYTE(PktCmdP->Command),Rup,(int)HostP);
- return TRUE;
- }
-
- RtaUniq = (RBYTE(PktCmdP->UniqNum[0])) +
- (RBYTE(PktCmdP->UniqNum[1]) << 8) +
- (RBYTE(PktCmdP->UniqNum[2]) << 16) +
- (RBYTE(PktCmdP->UniqNum[3]) << 24);
-
- /*
- ** Determine if 8 or 16 port RTA
- */
- RtaType = GetUnitType(RtaUniq);
-
- rio_dprintk (RIO_DEBUG_ROUTE, "Received a request for an ID for serial number %x\n", RtaUniq);
-
- Mod = RBYTE(PktCmdP->ModuleTypes);
- Mod1 = LONYBLE(Mod);
- if (RtaType == TYPE_RTA16)
- {
- /*
- ** Only one ident is set for a 16 port RTA. To make compatible
- ** with 8 port, set 2nd ident in Mod2 to the same as Mod1.
- */
- Mod2 = Mod1;
- rio_dprintk (RIO_DEBUG_ROUTE, "Backplane type is %s (all ports)\n",
- p->RIOModuleTypes[Mod1].Name);
- }
- else
- {
- Mod2 = HINYBLE(Mod);
- rio_dprintk (RIO_DEBUG_ROUTE, "Module types are %s (ports 0-3) and %s (ports 4-7)\n",
- p->RIOModuleTypes[Mod1].Name, p->RIOModuleTypes[Mod2].Name);
- }
-
- if ( RtaUniq == 0xffffffff )
- {
- ShowPacket( DBG_SPECIAL, PacketP );
- }
-
- /*
- ** try to unhook a command block from the command free list.
- */
- if ( !(CmdBlkP = RIOGetCmdBlk()) )
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "No command blocks to route RTA! come back later.\n");
- return 0;
- }
-
- /*
- ** Fill in the default info on the command block
- */
- CmdBlkP->Packet.dest_unit = Rup;
- CmdBlkP->Packet.dest_port = ROUTE_RUP;
- CmdBlkP->Packet.src_unit = HOST_ID;
- CmdBlkP->Packet.src_port = ROUTE_RUP;
- CmdBlkP->Packet.len = PKT_CMD_BIT | 1;
- CmdBlkP->PreFuncP = CmdBlkP->PostFuncP = NULL;
- PktReplyP = (struct PktCmd_M *)CmdBlkP->Packet.data;
-
- if (! RIOBootOk(p, HostP, RtaUniq))
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "RTA %x tried to get an ID, but does not belong - FOAD it!\n",
- RtaUniq);
- PktReplyP->Command = ROUTE_FOAD;
- HostP->Copy("RT_FOAD", PktReplyP->CommandText, 7);
- RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
- return TRUE;
- }
-
- /*
- ** Check to see if the RTA is configured for this host
- */
- for ( ThisUnit=0; ThisUnit<MAX_RUP; ThisUnit++ )
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "Entry %d Flags=%s %s UniqueNum=0x%x\n",
- ThisUnit,
- HostP->Mapping[ThisUnit].Flags & SLOT_IN_USE ?
- "Slot-In-Use":"Not In Use",
- HostP->Mapping[ThisUnit].Flags & SLOT_TENTATIVE ?
- "Slot-Tentative":"Not Tentative",
- HostP->Mapping[ThisUnit].RtaUniqueNum);
-
- /*
- ** We have an entry for it.
- */
- if ( (HostP->Mapping[ThisUnit].Flags & (SLOT_IN_USE | SLOT_TENTATIVE)) &&
- (HostP->Mapping[ThisUnit].RtaUniqueNum == RtaUniq) )
- {
- if (RtaType == TYPE_RTA16)
- {
- ThisUnit2 = HostP->Mapping[ThisUnit].ID2 - 1;
- rio_dprintk (RIO_DEBUG_ROUTE, "Found unit 0x%x at slots %d+%d\n",
- RtaUniq,ThisUnit,ThisUnit2);
- }
- else
- rio_dprintk (RIO_DEBUG_ROUTE, "Found unit 0x%x at slot %d\n",
- RtaUniq,ThisUnit);
- /*
- ** If we have no knowledge of booting it, then the host has
- ** been re-booted, and so we must kill the RTA, so that it
- ** will be booted again (potentially with new bins)
- ** and it will then re-ask for an ID, which we will service.
- */
- if ( (HostP->Mapping[ThisUnit].Flags & SLOT_IN_USE) &&
- !(HostP->Mapping[ThisUnit].Flags & RTA_BOOTED) )
- {
- if ( !(HostP->Mapping[ThisUnit].Flags & MSG_DONE) )
- {
- if ( !p->RIONoMessage )
- cprintf("RTA '%s' is being updated.\n",HostP->Mapping[ThisUnit].Name);
- HostP->Mapping[ThisUnit].Flags |= MSG_DONE;
+
+ /*
+ ** The only other command we recognise is a route_request command
+ */
+ if (RBYTE(PktCmdP->Command) != ROUTE_REQUEST) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "Unknown command %d received on rup %d host %d ROUTE_RUP\n", RBYTE(PktCmdP->Command), Rup, (int) HostP);
+ return TRUE;
}
- PktReplyP->Command = ROUTE_FOAD;
- HostP->Copy("RT_FOAD",PktReplyP->CommandText,7);
- RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
- return TRUE;
- }
-
- /*
- ** Send the ID (entry) to this RTA. The ID number is implicit as
- ** the offset into the table. It is worth noting at this stage
- ** that offset zero in the table contains the entries for the
- ** RTA with ID 1!!!!
- */
- PktReplyP->Command = ROUTE_ALLOCATE;
- PktReplyP->IDNum = ThisUnit+1;
- if (RtaType == TYPE_RTA16)
- {
- if (HostP->Mapping[ThisUnit].Flags & SLOT_IN_USE)
- /*
- ** Adjust the phb and tx pkt dest_units for 2nd block of 8
- ** only if the RTA has ports associated (SLOT_IN_USE)
- */
- RIOFixPhbs(p, HostP, ThisUnit2);
- PktReplyP->IDNum2 = ThisUnit2+1;
- rio_dprintk (RIO_DEBUG_ROUTE, "RTA '%s' has been allocated IDs %d+%d\n",
- HostP->Mapping[ThisUnit].Name, PktReplyP->IDNum, PktReplyP->IDNum2);
- }
- else
- {
- PktReplyP->IDNum2 = ROUTE_NO_ID;
- rio_dprintk (RIO_DEBUG_ROUTE, "RTA '%s' has been allocated ID %d\n",
- HostP->Mapping[ThisUnit].Name,PktReplyP->IDNum);
- }
- HostP->Copy("RT_ALLOCAT",PktReplyP->CommandText,10);
-
- RIOQueueCmdBlk( HostP, Rup, CmdBlkP);
-
- /*
- ** If this is a freshly booted RTA, then we need to re-open
- ** the ports, if any where open, so that data may once more
- ** flow around the system!
- */
- if ( (HostP->Mapping[ThisUnit].Flags & RTA_NEWBOOT) &&
- (HostP->Mapping[ThisUnit].SysPort != NO_PORT) )
- {
+
+ RtaUniq = (RBYTE(PktCmdP->UniqNum[0])) + (RBYTE(PktCmdP->UniqNum[1]) << 8) + (RBYTE(PktCmdP->UniqNum[2]) << 16) + (RBYTE(PktCmdP->UniqNum[3]) << 24);
+
/*
- ** look at the ports associated with this beast and
- ** see if any where open. If they was, then re-open
- ** them, using the info from the tty flags.
- */
- for ( port=0; port<PORTS_PER_RTA; port++ )
- {
- PortP = p->RIOPortp[port+HostP->Mapping[ThisUnit].SysPort];
- if ( PortP->State & (RIO_MOPEN|RIO_LOPEN) )
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "Re-opened this port\n");
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->MagicFlags |= MAGIC_REBOOT;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- }
+ ** Determine if 8 or 16 port RTA
+ */
+ RtaType = GetUnitType(RtaUniq);
+
+ rio_dprintk(RIO_DEBUG_ROUTE, "Received a request for an ID for serial number %x\n", RtaUniq);
+
+ Mod = RBYTE(PktCmdP->ModuleTypes);
+ Mod1 = LONYBLE(Mod);
+ if (RtaType == TYPE_RTA16) {
+ /*
+ ** Only one ident is set for a 16 port RTA. To make compatible
+ ** with 8 port, set 2nd ident in Mod2 to the same as Mod1.
+ */
+ Mod2 = Mod1;
+ rio_dprintk(RIO_DEBUG_ROUTE, "Backplane type is %s (all ports)\n", p->RIOModuleTypes[Mod1].Name);
+ } else {
+ Mod2 = HINYBLE(Mod);
+ rio_dprintk(RIO_DEBUG_ROUTE, "Module types are %s (ports 0-3) and %s (ports 4-7)\n", p->RIOModuleTypes[Mod1].Name, p->RIOModuleTypes[Mod2].Name);
}
- if (RtaType == TYPE_RTA16)
- {
- for ( port=0; port<PORTS_PER_RTA; port++ )
- {
- PortP = p->RIOPortp[port+HostP->Mapping[ThisUnit2].SysPort];
- if ( PortP->State & (RIO_MOPEN|RIO_LOPEN) )
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "Re-opened this port\n");
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->MagicFlags |= MAGIC_REBOOT;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- }
- }
+
+ if (RtaUniq == 0xffffffff) {
+ ShowPacket(DBG_SPECIAL, PacketP);
}
- }
-
- /*
- ** keep a copy of the module types!
- */
- HostP->UnixRups[ThisUnit].ModTypes = Mod;
- if (RtaType == TYPE_RTA16)
- HostP->UnixRups[ThisUnit2].ModTypes = Mod;
-
- /*
- ** If either of the modules on this unit is read-only or write-only
- ** or none-xprint, then we need to transfer that info over to the
- ** relevant ports.
- */
- if ( HostP->Mapping[ThisUnit].SysPort != NO_PORT )
- {
- for ( port=0; port<PORTS_PER_MODULE; port++ )
- {
- p->RIOPortp[port+HostP->Mapping[ThisUnit].SysPort]->Config &= ~RIO_NOMASK;
- p->RIOPortp[port+HostP->Mapping[ThisUnit].SysPort]->Config |=
- p->RIOModuleTypes[Mod1].Flags[port];
- p->RIOPortp[port+PORTS_PER_MODULE+HostP->Mapping[ThisUnit].SysPort]->Config &= ~RIO_NOMASK;
- p->RIOPortp[port+PORTS_PER_MODULE+HostP->Mapping[ThisUnit].SysPort]->Config |= p->RIOModuleTypes[Mod2].Flags[port];
+
+ /*
+ ** try to unhook a command block from the command free list.
+ */
+ if (!(CmdBlkP = RIOGetCmdBlk())) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "No command blocks to route RTA! come back later.\n");
+ return 0;
}
- if (RtaType == TYPE_RTA16)
- {
- for ( port=0; port<PORTS_PER_MODULE; port++ )
- {
- p->RIOPortp[port+HostP->Mapping[ThisUnit2].SysPort]->Config &= ~RIO_NOMASK;
- p->RIOPortp[port+HostP->Mapping[ThisUnit2].SysPort]->Config |= p->RIOModuleTypes[Mod1].Flags[port];
- p->RIOPortp[port+PORTS_PER_MODULE+HostP->Mapping[ThisUnit2].SysPort]->Config &= ~RIO_NOMASK;
- p->RIOPortp[port+PORTS_PER_MODULE+HostP->Mapping[ThisUnit2].SysPort]->Config |= p->RIOModuleTypes[Mod2].Flags[port];
- }
+
+ /*
+ ** Fill in the default info on the command block
+ */
+ CmdBlkP->Packet.dest_unit = Rup;
+ CmdBlkP->Packet.dest_port = ROUTE_RUP;
+ CmdBlkP->Packet.src_unit = HOST_ID;
+ CmdBlkP->Packet.src_port = ROUTE_RUP;
+ CmdBlkP->Packet.len = PKT_CMD_BIT | 1;
+ CmdBlkP->PreFuncP = CmdBlkP->PostFuncP = NULL;
+ PktReplyP = (struct PktCmd_M *) CmdBlkP->Packet.data;
+
+ if (!RIOBootOk(p, HostP, RtaUniq)) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "RTA %x tried to get an ID, but does not belong - FOAD it!\n", RtaUniq);
+ PktReplyP->Command = ROUTE_FOAD;
+ HostP->Copy("RT_FOAD", PktReplyP->CommandText, 7);
+ RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
+ return TRUE;
}
- }
-
- /*
- ** Job done, get on with the interrupts!
- */
- return TRUE;
- }
- }
- /*
- ** There is no table entry for this RTA at all.
- **
- ** Lets check to see if we actually booted this unit - if not,
- ** then we reset it and it will go round the loop of being booted
- ** we can then worry about trying to fit it into the table.
- */
- for ( ThisUnit=0; ThisUnit<HostP->NumExtraBooted; ThisUnit++ )
- if ( HostP->ExtraUnits[ThisUnit] == RtaUniq )
- break;
- if ( ThisUnit == HostP->NumExtraBooted && ThisUnit != MAX_EXTRA_UNITS )
- {
- /*
- ** if the unit wasn't in the table, and the table wasn't full, then
- ** we reset the unit, because we didn't boot it.
- ** However, if the table is full, it could be that we did boot
- ** this unit, and so we won't reboot it, because it isn't really
- ** all that disasterous to keep the old bins in most cases. This
- ** is a rather tacky feature, but we are on the edge of reallity
- ** here, because the implication is that someone has connected
- ** 16+MAX_EXTRA_UNITS onto one host.
- */
- static int UnknownMesgDone = 0;
-
- if ( !UnknownMesgDone )
- {
- if (! p->RIONoMessage)
- cprintf("One or more unknown RTAs are being updated.\n");
- UnknownMesgDone = 1;
- }
-
- PktReplyP->Command = ROUTE_FOAD;
- HostP->Copy("RT_FOAD",PktReplyP->CommandText,7);
- }
- else
- {
- /*
- ** we did boot it (as an extra), and there may now be a table
- ** slot free (because of a delete), so we will try to make
- ** a tentative entry for it, so that the configurator can see it
- ** and fill in the details for us.
- */
- if (RtaType == TYPE_RTA16)
- {
- if (RIOFindFreeID(p, HostP, &ThisUnit, &ThisUnit2) == 0)
- {
- RIODefaultName(p, HostP, ThisUnit);
- FillSlot(ThisUnit, ThisUnit2, RtaUniq, HostP);
+
+ /*
+ ** Check to see if the RTA is configured for this host
+ */
+ for (ThisUnit = 0; ThisUnit < MAX_RUP; ThisUnit++) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "Entry %d Flags=%s %s UniqueNum=0x%x\n",
+ ThisUnit, HostP->Mapping[ThisUnit].Flags & SLOT_IN_USE ? "Slot-In-Use" : "Not In Use", HostP->Mapping[ThisUnit].Flags & SLOT_TENTATIVE ? "Slot-Tentative" : "Not Tentative", HostP->Mapping[ThisUnit].RtaUniqueNum);
+
+ /*
+ ** We have an entry for it.
+ */
+ if ((HostP->Mapping[ThisUnit].Flags & (SLOT_IN_USE | SLOT_TENTATIVE)) && (HostP->Mapping[ThisUnit].RtaUniqueNum == RtaUniq)) {
+ if (RtaType == TYPE_RTA16) {
+ ThisUnit2 = HostP->Mapping[ThisUnit].ID2 - 1;
+ rio_dprintk(RIO_DEBUG_ROUTE, "Found unit 0x%x at slots %d+%d\n", RtaUniq, ThisUnit, ThisUnit2);
+ } else
+ rio_dprintk(RIO_DEBUG_ROUTE, "Found unit 0x%x at slot %d\n", RtaUniq, ThisUnit);
+ /*
+ ** If we have no knowledge of booting it, then the host has
+ ** been re-booted, and so we must kill the RTA, so that it
+ ** will be booted again (potentially with new bins)
+ ** and it will then re-ask for an ID, which we will service.
+ */
+ if ((HostP->Mapping[ThisUnit].Flags & SLOT_IN_USE) && !(HostP->Mapping[ThisUnit].Flags & RTA_BOOTED)) {
+ if (!(HostP->Mapping[ThisUnit].Flags & MSG_DONE)) {
+ if (!p->RIONoMessage)
+ cprintf("RTA '%s' is being updated.\n", HostP->Mapping[ThisUnit].Name);
+ HostP->Mapping[ThisUnit].Flags |= MSG_DONE;
+ }
+ PktReplyP->Command = ROUTE_FOAD;
+ HostP->Copy("RT_FOAD", PktReplyP->CommandText, 7);
+ RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
+ return TRUE;
+ }
+
+ /*
+ ** Send the ID (entry) to this RTA. The ID number is implicit as
+ ** the offset into the table. It is worth noting at this stage
+ ** that offset zero in the table contains the entries for the
+ ** RTA with ID 1!!!!
+ */
+ PktReplyP->Command = ROUTE_ALLOCATE;
+ PktReplyP->IDNum = ThisUnit + 1;
+ if (RtaType == TYPE_RTA16) {
+ if (HostP->Mapping[ThisUnit].Flags & SLOT_IN_USE)
+ /*
+ ** Adjust the phb and tx pkt dest_units for 2nd block of 8
+ ** only if the RTA has ports associated (SLOT_IN_USE)
+ */
+ RIOFixPhbs(p, HostP, ThisUnit2);
+ PktReplyP->IDNum2 = ThisUnit2 + 1;
+ rio_dprintk(RIO_DEBUG_ROUTE, "RTA '%s' has been allocated IDs %d+%d\n", HostP->Mapping[ThisUnit].Name, PktReplyP->IDNum, PktReplyP->IDNum2);
+ } else {
+ PktReplyP->IDNum2 = ROUTE_NO_ID;
+ rio_dprintk(RIO_DEBUG_ROUTE, "RTA '%s' has been allocated ID %d\n", HostP->Mapping[ThisUnit].Name, PktReplyP->IDNum);
+ }
+ HostP->Copy("RT_ALLOCAT", PktReplyP->CommandText, 10);
+
+ RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
+
+ /*
+ ** If this is a freshly booted RTA, then we need to re-open
+ ** the ports, if any where open, so that data may once more
+ ** flow around the system!
+ */
+ if ((HostP->Mapping[ThisUnit].Flags & RTA_NEWBOOT) && (HostP->Mapping[ThisUnit].SysPort != NO_PORT)) {
+ /*
+ ** look at the ports associated with this beast and
+ ** see if any where open. If they was, then re-open
+ ** them, using the info from the tty flags.
+ */
+ for (port = 0; port < PORTS_PER_RTA; port++) {
+ PortP = p->RIOPortp[port + HostP->Mapping[ThisUnit].SysPort];
+ if (PortP->State & (RIO_MOPEN | RIO_LOPEN)) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "Re-opened this port\n");
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ PortP->MagicFlags |= MAGIC_REBOOT;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ }
+ }
+ if (RtaType == TYPE_RTA16) {
+ for (port = 0; port < PORTS_PER_RTA; port++) {
+ PortP = p->RIOPortp[port + HostP->Mapping[ThisUnit2].SysPort];
+ if (PortP->State & (RIO_MOPEN | RIO_LOPEN)) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "Re-opened this port\n");
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ PortP->MagicFlags |= MAGIC_REBOOT;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ }
+ }
+ }
+ }
+
+ /*
+ ** keep a copy of the module types!
+ */
+ HostP->UnixRups[ThisUnit].ModTypes = Mod;
+ if (RtaType == TYPE_RTA16)
+ HostP->UnixRups[ThisUnit2].ModTypes = Mod;
+
+ /*
+ ** If either of the modules on this unit is read-only or write-only
+ ** or none-xprint, then we need to transfer that info over to the
+ ** relevant ports.
+ */
+ if (HostP->Mapping[ThisUnit].SysPort != NO_PORT) {
+ for (port = 0; port < PORTS_PER_MODULE; port++) {
+ p->RIOPortp[port + HostP->Mapping[ThisUnit].SysPort]->Config &= ~RIO_NOMASK;
+ p->RIOPortp[port + HostP->Mapping[ThisUnit].SysPort]->Config |= p->RIOModuleTypes[Mod1].Flags[port];
+ p->RIOPortp[port + PORTS_PER_MODULE + HostP->Mapping[ThisUnit].SysPort]->Config &= ~RIO_NOMASK;
+ p->RIOPortp[port + PORTS_PER_MODULE + HostP->Mapping[ThisUnit].SysPort]->Config |= p->RIOModuleTypes[Mod2].Flags[port];
+ }
+ if (RtaType == TYPE_RTA16) {
+ for (port = 0; port < PORTS_PER_MODULE; port++) {
+ p->RIOPortp[port + HostP->Mapping[ThisUnit2].SysPort]->Config &= ~RIO_NOMASK;
+ p->RIOPortp[port + HostP->Mapping[ThisUnit2].SysPort]->Config |= p->RIOModuleTypes[Mod1].Flags[port];
+ p->RIOPortp[port + PORTS_PER_MODULE + HostP->Mapping[ThisUnit2].SysPort]->Config &= ~RIO_NOMASK;
+ p->RIOPortp[port + PORTS_PER_MODULE + HostP->Mapping[ThisUnit2].SysPort]->Config |= p->RIOModuleTypes[Mod2].Flags[port];
+ }
+ }
+ }
+
+ /*
+ ** Job done, get on with the interrupts!
+ */
+ return TRUE;
+ }
}
- }
- else
- {
- if (RIOFindFreeID(p, HostP, &ThisUnit, NULL) == 0)
- {
- RIODefaultName(p, HostP, ThisUnit);
- FillSlot(ThisUnit, 0, RtaUniq, HostP);
+ /*
+ ** There is no table entry for this RTA at all.
+ **
+ ** Lets check to see if we actually booted this unit - if not,
+ ** then we reset it and it will go round the loop of being booted
+ ** we can then worry about trying to fit it into the table.
+ */
+ for (ThisUnit = 0; ThisUnit < HostP->NumExtraBooted; ThisUnit++)
+ if (HostP->ExtraUnits[ThisUnit] == RtaUniq)
+ break;
+ if (ThisUnit == HostP->NumExtraBooted && ThisUnit != MAX_EXTRA_UNITS) {
+ /*
+ ** if the unit wasn't in the table, and the table wasn't full, then
+ ** we reset the unit, because we didn't boot it.
+ ** However, if the table is full, it could be that we did boot
+ ** this unit, and so we won't reboot it, because it isn't really
+ ** all that disasterous to keep the old bins in most cases. This
+ ** is a rather tacky feature, but we are on the edge of reallity
+ ** here, because the implication is that someone has connected
+ ** 16+MAX_EXTRA_UNITS onto one host.
+ */
+ static int UnknownMesgDone = 0;
+
+ if (!UnknownMesgDone) {
+ if (!p->RIONoMessage)
+ cprintf("One or more unknown RTAs are being updated.\n");
+ UnknownMesgDone = 1;
+ }
+
+ PktReplyP->Command = ROUTE_FOAD;
+ HostP->Copy("RT_FOAD", PktReplyP->CommandText, 7);
+ } else {
+ /*
+ ** we did boot it (as an extra), and there may now be a table
+ ** slot free (because of a delete), so we will try to make
+ ** a tentative entry for it, so that the configurator can see it
+ ** and fill in the details for us.
+ */
+ if (RtaType == TYPE_RTA16) {
+ if (RIOFindFreeID(p, HostP, &ThisUnit, &ThisUnit2) == 0) {
+ RIODefaultName(p, HostP, ThisUnit);
+ FillSlot(ThisUnit, ThisUnit2, RtaUniq, HostP);
+ }
+ } else {
+ if (RIOFindFreeID(p, HostP, &ThisUnit, NULL) == 0) {
+ RIODefaultName(p, HostP, ThisUnit);
+ FillSlot(ThisUnit, 0, RtaUniq, HostP);
+ }
+ }
+ PktReplyP->Command = ROUTE_USED;
+ HostP->Copy("RT_USED", PktReplyP->CommandText, 7);
}
- }
- PktReplyP->Command = ROUTE_USED;
- HostP->Copy("RT_USED",PktReplyP->CommandText,7);
- }
- RIOQueueCmdBlk( HostP, Rup, CmdBlkP);
- return TRUE;
+ RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
+ return TRUE;
}
-void
-RIOFixPhbs(p, HostP, unit)
+void RIOFixPhbs(p, HostP, unit)
struct rio_info *p;
struct Host *HostP;
uint unit;
{
- ushort link, port;
- struct Port *PortP;
+ ushort link, port;
+ struct Port *PortP;
unsigned long flags;
int PortN = HostP->Mapping[unit].SysPort;
- rio_dprintk (RIO_DEBUG_ROUTE, "RIOFixPhbs unit %d sysport %d\n", unit, PortN);
+ rio_dprintk(RIO_DEBUG_ROUTE, "RIOFixPhbs unit %d sysport %d\n", unit, PortN);
if (PortN != -1) {
- ushort dest_unit = HostP->Mapping[unit].ID2;
+ ushort dest_unit = HostP->Mapping[unit].ID2;
/*
- ** Get the link number used for the 1st 8 phbs on this unit.
- */
+ ** Get the link number used for the 1st 8 phbs on this unit.
+ */
PortP = p->RIOPortp[HostP->Mapping[dest_unit - 1].SysPort];
link = RWORD(PortP->PhbP->link);
for (port = 0; port < PORTS_PER_RTA; port++, PortN++) {
- ushort dest_port = port + 8;
+ ushort dest_port = port + 8;
#if 0
- uint PktInt;
+ uint PktInt;
#endif
- WORD *TxPktP;
- PKT *Pkt;
+ WORD *TxPktP;
+ PKT *Pkt;
PortP = p->RIOPortp[PortN];
rio_spin_lock_irqsave(&PortP->portSem, flags);
/*
- ** If RTA is not powered on, the tx packets will be
- ** unset, so go no further.
- */
+ ** If RTA is not powered on, the tx packets will be
+ ** unset, so go no further.
+ */
if (PortP->TxStart == 0) {
- rio_dprintk (RIO_DEBUG_ROUTE, "Tx pkts not set up yet\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- break;
+ rio_dprintk(RIO_DEBUG_ROUTE, "Tx pkts not set up yet\n");
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ break;
}
/*
- ** For the second slot of a 16 port RTA, the driver needs to
- ** sort out the phb to port mappings. The dest_unit for this
- ** group of 8 phbs is set to the dest_unit of the accompanying
- ** 8 port block. The dest_port of the second unit is set to
- ** be in the range 8-15 (i.e. 8 is added). Thus, for a 16 port
- ** RTA with IDs 5 and 6, traffic bound for port 6 of unit 6
- ** (being the second map ID) will be sent to dest_unit 5, port
- ** 14. When this RTA is deleted, dest_unit for ID 6 will be
- ** restored, and the dest_port will be reduced by 8.
- ** Transmit packets also have a destination field which needs
- ** adjusting in the same manner.
- ** Note that the unit/port bytes in 'dest' are swapped.
- ** We also need to adjust the phb and rup link numbers for the
- ** second block of 8 ttys.
- */
+ ** For the second slot of a 16 port RTA, the driver needs to
+ ** sort out the phb to port mappings. The dest_unit for this
+ ** group of 8 phbs is set to the dest_unit of the accompanying
+ ** 8 port block. The dest_port of the second unit is set to
+ ** be in the range 8-15 (i.e. 8 is added). Thus, for a 16 port
+ ** RTA with IDs 5 and 6, traffic bound for port 6 of unit 6
+ ** (being the second map ID) will be sent to dest_unit 5, port
+ ** 14. When this RTA is deleted, dest_unit for ID 6 will be
+ ** restored, and the dest_port will be reduced by 8.
+ ** Transmit packets also have a destination field which needs
+ ** adjusting in the same manner.
+ ** Note that the unit/port bytes in 'dest' are swapped.
+ ** We also need to adjust the phb and rup link numbers for the
+ ** second block of 8 ttys.
+ */
for (TxPktP = PortP->TxStart; TxPktP <= PortP->TxEnd; TxPktP++) {
/*
- ** *TxPktP is the pointer to the transmit packet on the host
- ** card. This needs to be translated into a 32 bit pointer
- ** so it can be accessed from the driver.
- */
- Pkt = (PKT *) RIO_PTR(HostP->Caddr,RINDW(TxPktP));
+ ** *TxPktP is the pointer to the transmit packet on the host
+ ** card. This needs to be translated into a 32 bit pointer
+ ** so it can be accessed from the driver.
+ */
+ Pkt = (PKT *) RIO_PTR(HostP->Caddr, RINDW(TxPktP));
/*
- ** If the packet is used, reset it.
- */
- Pkt = (PKT *)((uint)Pkt & ~PKT_IN_USE);
+ ** If the packet is used, reset it.
+ */
+ Pkt = (PKT *) ((uint) Pkt & ~PKT_IN_USE);
WBYTE(Pkt->dest_unit, dest_unit);
WBYTE(Pkt->dest_port, dest_port);
}
- rio_dprintk (RIO_DEBUG_ROUTE, "phb dest: Old %x:%x New %x:%x\n",
- RWORD(PortP->PhbP->destination) & 0xff,
- (RWORD(PortP->PhbP->destination) >> 8) & 0xff,
- dest_unit, dest_port);
+ rio_dprintk(RIO_DEBUG_ROUTE, "phb dest: Old %x:%x New %x:%x\n", RWORD(PortP->PhbP->destination) & 0xff, (RWORD(PortP->PhbP->destination) >> 8) & 0xff, dest_unit, dest_port);
WWORD(PortP->PhbP->destination, dest_unit + (dest_port << 8));
WWORD(PortP->PhbP->link, link);
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
}
/*
- ** Now make sure the range of ports to be serviced includes
- ** the 2nd 8 on this 16 port RTA.
- */
- if (link > 3) return;
+ ** Now make sure the range of ports to be serviced includes
+ ** the 2nd 8 on this 16 port RTA.
+ */
+ if (link > 3)
+ return;
if (((unit * 8) + 7) > RWORD(HostP->LinkStrP[link].last_port)) {
- rio_dprintk (RIO_DEBUG_ROUTE, "last port on host link %d: %d\n", link, (unit * 8) + 7);
+ rio_dprintk(RIO_DEBUG_ROUTE, "last port on host link %d: %d\n", link, (unit * 8) + 7);
WWORD(HostP->LinkStrP[link].last_port, (unit * 8) + 7);
}
}
@@ -723,9 +615,8 @@ uint unit;
** the world about it. This is done to ensure that the configurator
** only gets up-to-date information about what is going on.
*/
-static int
-RIOCheckIsolated(p, HostP, UnitId)
-struct rio_info * p;
+static int RIOCheckIsolated(p, HostP, UnitId)
+struct rio_info *p;
struct Host *HostP;
uint UnitId;
{
@@ -733,16 +624,16 @@ uint UnitId;
rio_spin_lock_irqsave(&HostP->HostLock, flags);
#ifdef CHECK
- CheckHostP( HostP );
- CheckUnitId( UnitId );
+ CheckHostP(HostP);
+ CheckUnitId(UnitId);
#endif
- if ( RIOCheck( HostP, UnitId ) ) {
- rio_dprintk (RIO_DEBUG_ROUTE, "Unit %d is NOT isolated\n", UnitId);
+ if (RIOCheck(HostP, UnitId)) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "Unit %d is NOT isolated\n", UnitId);
rio_spin_unlock_irqrestore(&HostP->HostLock, flags);
- return(0);
+ return (0);
}
- RIOIsolate(p, HostP, UnitId );
+ RIOIsolate(p, HostP, UnitId);
RIOSetChange(p);
rio_spin_unlock_irqrestore(&HostP->HostLock, flags);
return 1;
@@ -753,85 +644,83 @@ uint UnitId;
** all the units attached to it. This will mean that the entire
** subnet will re-introduce itself.
*/
-static int
-RIOIsolate(p, HostP, UnitId)
-struct rio_info * p;
-struct Host * HostP;
-uint UnitId;
+static int RIOIsolate(p, HostP, UnitId)
+struct rio_info *p;
+struct Host *HostP;
+uint UnitId;
{
uint link, unit;
#ifdef CHECK
- CheckHostP( HostP );
- CheckUnitId( UnitId );
+ CheckHostP(HostP);
+ CheckUnitId(UnitId);
#endif
UnitId--; /* this trick relies on the Unit Id being UNSIGNED! */
- if ( UnitId >= MAX_RUP ) /* dontcha just lurv unsigned maths! */
- return(0);
+ if (UnitId >= MAX_RUP) /* dontcha just lurv unsigned maths! */
+ return (0);
- if ( HostP->Mapping[UnitId].Flags & BEEN_HERE )
- return(0);
+ if (HostP->Mapping[UnitId].Flags & BEEN_HERE)
+ return (0);
HostP->Mapping[UnitId].Flags |= BEEN_HERE;
- if ( p->RIOPrintDisabled == DO_PRINT )
- rio_dprintk (RIO_DEBUG_ROUTE, "RIOMesgIsolated %s", HostP->Mapping[UnitId].Name);
+ if (p->RIOPrintDisabled == DO_PRINT)
+ rio_dprintk(RIO_DEBUG_ROUTE, "RIOMesgIsolated %s", HostP->Mapping[UnitId].Name);
- for ( link=0; link<LINKS_PER_UNIT; link++) {
+ for (link = 0; link < LINKS_PER_UNIT; link++) {
unit = HostP->Mapping[UnitId].Topology[link].Unit;
HostP->Mapping[UnitId].Topology[link].Unit = ROUTE_DISCONNECT;
HostP->Mapping[UnitId].Topology[link].Link = NO_LINK;
- RIOIsolate(p, HostP, unit );
+ RIOIsolate(p, HostP, unit);
}
HostP->Mapping[UnitId].Flags &= ~BEEN_HERE;
return 1;
}
-static int
-RIOCheck(HostP, UnitId)
+static int RIOCheck(HostP, UnitId)
struct Host *HostP;
uint UnitId;
{
- unsigned char link;
+ unsigned char link;
#ifdef CHECK
- CheckHostP( HostP );
- CheckUnitId( UnitId );
+ CheckHostP(HostP);
+ CheckUnitId(UnitId);
#endif
/* rio_dprint(RIO_DEBUG_ROUTE, ("Check to see if unit %d has a route to the host\n",UnitId)); */
- rio_dprintk (RIO_DEBUG_ROUTE, "RIOCheck : UnitID = %d\n", UnitId);
+ rio_dprintk(RIO_DEBUG_ROUTE, "RIOCheck : UnitID = %d\n", UnitId);
- if ( UnitId == HOST_ID ) {
+ if (UnitId == HOST_ID) {
/* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d is NOT isolated - it IS the host!\n", UnitId)); */
return 1;
}
UnitId--;
- if ( UnitId >= MAX_RUP ) {
+ if (UnitId >= MAX_RUP) {
/* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d - ignored.\n", UnitId)); */
return 0;
}
- for ( link=0; link<LINKS_PER_UNIT; link++ ) {
- if ( HostP->Mapping[UnitId].Topology[link].Unit==HOST_ID ) {
+ for (link = 0; link < LINKS_PER_UNIT; link++) {
+ if (HostP->Mapping[UnitId].Topology[link].Unit == HOST_ID) {
/* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d is connected directly to host via link (%c).\n",
- UnitId, 'A'+link)); */
+ UnitId, 'A'+link)); */
return 1;
}
}
- if ( HostP->Mapping[UnitId].Flags & BEEN_HERE ) {
+ if (HostP->Mapping[UnitId].Flags & BEEN_HERE) {
/* rio_dprint(RIO_DEBUG_ROUTE, ("Been to Unit %d before - ignoring\n", UnitId)); */
return 0;
}
HostP->Mapping[UnitId].Flags |= BEEN_HERE;
- for ( link=0; link < LINKS_PER_UNIT; link++ ) {
+ for (link = 0; link < LINKS_PER_UNIT; link++) {
/* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d check link (%c)\n", UnitId,'A'+link)); */
- if ( RIOCheck( HostP, HostP->Mapping[UnitId].Topology[link].Unit ) ) {
+ if (RIOCheck(HostP, HostP->Mapping[UnitId].Topology[link].Unit)) {
/* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d is connected to something that knows the host via link (%c)\n", UnitId,link+'A')); */
HostP->Mapping[UnitId].Flags &= ~BEEN_HERE;
return 1;
@@ -841,7 +730,7 @@ uint UnitId;
HostP->Mapping[UnitId].Flags &= ~BEEN_HERE;
/* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d DOESNT KNOW THE HOST!\n", UnitId)); */
-
+
return 0;
}
@@ -849,61 +738,57 @@ uint UnitId;
** Returns the type of unit (host, 16/8 port RTA)
*/
-uint
-GetUnitType(Uniq)
+uint GetUnitType(Uniq)
uint Uniq;
{
- switch ( (Uniq >> 28) & 0xf)
- {
- case RIO_AT:
- case RIO_MCA:
- case RIO_EISA:
- case RIO_PCI:
- rio_dprintk (RIO_DEBUG_ROUTE, "Unit type: Host\n");
- return(TYPE_HOST);
- case RIO_RTA_16:
- rio_dprintk (RIO_DEBUG_ROUTE, "Unit type: 16 port RTA\n");
- return(TYPE_RTA16);
- case RIO_RTA:
- rio_dprintk (RIO_DEBUG_ROUTE, "Unit type: 8 port RTA\n");
- return(TYPE_RTA8);
- default :
- rio_dprintk (RIO_DEBUG_ROUTE, "Unit type: Unrecognised\n");
- return(99);
+ switch ((Uniq >> 28) & 0xf) {
+ case RIO_AT:
+ case RIO_MCA:
+ case RIO_EISA:
+ case RIO_PCI:
+ rio_dprintk(RIO_DEBUG_ROUTE, "Unit type: Host\n");
+ return (TYPE_HOST);
+ case RIO_RTA_16:
+ rio_dprintk(RIO_DEBUG_ROUTE, "Unit type: 16 port RTA\n");
+ return (TYPE_RTA16);
+ case RIO_RTA:
+ rio_dprintk(RIO_DEBUG_ROUTE, "Unit type: 8 port RTA\n");
+ return (TYPE_RTA8);
+ default:
+ rio_dprintk(RIO_DEBUG_ROUTE, "Unit type: Unrecognised\n");
+ return (99);
}
}
-int
-RIOSetChange(p)
-struct rio_info * p;
+int RIOSetChange(p)
+struct rio_info *p;
{
- if ( p->RIOQuickCheck != NOT_CHANGED )
- return(0);
+ if (p->RIOQuickCheck != NOT_CHANGED)
+ return (0);
p->RIOQuickCheck = CHANGED;
- if ( p->RIOSignalProcess ) {
- rio_dprintk (RIO_DEBUG_ROUTE, "Send SIG-HUP");
+ if (p->RIOSignalProcess) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "Send SIG-HUP");
/*
- psignal( RIOSignalProcess, SIGHUP );
- */
+ psignal( RIOSignalProcess, SIGHUP );
+ */
}
- return(0);
+ return (0);
}
-static void
-RIOConCon(p, HostP, FromId, FromLink, ToId, ToLink, Change)
-struct rio_info * p;
+static void RIOConCon(p, HostP, FromId, FromLink, ToId, ToLink, Change)
+struct rio_info *p;
struct Host *HostP;
uint FromId;
uint FromLink;
uint ToId;
uint ToLink;
-int Change;
+int Change;
{
- char *FromName;
- char *FromType;
- char *ToName;
- char *ToType;
- unsigned int tp;
+ char *FromName;
+ char *FromType;
+ char *ToName;
+ char *ToType;
+ unsigned int tp;
/*
** 15.10.1998 ARG - ESIL 0759
@@ -932,38 +817,32 @@ int Change;
** rio_info struct - RIORtaDisCons (RIO RTA connections) keeps track of RTA
** connections and disconnections.
*/
- if (Change == CONNECT) {
- if (p->RIORtaDisCons) p->RIORtaDisCons--;
- }
- else {
+ if (Change == CONNECT) {
+ if (p->RIORtaDisCons)
+ p->RIORtaDisCons--;
+ } else {
p->RIORtaDisCons++;
- }
+ }
- if ( p->RIOPrintDisabled == DONT_PRINT )
+ if (p->RIOPrintDisabled == DONT_PRINT)
return;
- if ( FromId > ToId ) {
+ if (FromId > ToId) {
tp = FromId;
FromId = ToId;
ToId = tp;
tp = FromLink;
FromLink = ToLink;
ToLink = tp;
- }
-
- FromName = FromId ? HostP->Mapping[FromId-1].Name : HostP->Name;
- FromType = FromId ? "RTA" : "HOST";
- ToName = ToId ? HostP->Mapping[ToId-1].Name : HostP->Name;
- ToType = ToId ? "RTA" : "HOST";
-
- rio_dprintk (RIO_DEBUG_ROUTE, "Link between %s '%s' (%c) and %s '%s' (%c) %s.\n",
- FromType, FromName, 'A'+FromLink,
- ToType, ToName, 'A'+ToLink,
- (Change==CONNECT) ? "established" : "disconnected");
- cprintf("Link between %s '%s' (%c) and %s '%s' (%c) %s.\n",
- FromType, FromName, 'A'+FromLink,
- ToType, ToName, 'A'+ToLink,
- (Change==CONNECT) ? "established" : "disconnected");
+ }
+
+ FromName = FromId ? HostP->Mapping[FromId - 1].Name : HostP->Name;
+ FromType = FromId ? "RTA" : "HOST";
+ ToName = ToId ? HostP->Mapping[ToId - 1].Name : HostP->Name;
+ ToType = ToId ? "RTA" : "HOST";
+
+ rio_dprintk(RIO_DEBUG_ROUTE, "Link between %s '%s' (%c) and %s '%s' (%c) %s.\n", FromType, FromName, 'A' + FromLink, ToType, ToName, 'A' + ToLink, (Change == CONNECT) ? "established" : "disconnected");
+ cprintf("Link between %s '%s' (%c) and %s '%s' (%c) %s.\n", FromType, FromName, 'A' + FromLink, ToType, ToName, 'A' + ToLink, (Change == CONNECT) ? "established" : "disconnected");
}
/*
@@ -972,24 +851,21 @@ int Change;
** Delete and RTA entry from the saved table given to us
** by the configuration program.
*/
-static int
-RIORemoveFromSavedTable(struct rio_info *p, struct Map *pMap)
+static int RIORemoveFromSavedTable(struct rio_info *p, struct Map *pMap)
{
- int entry;
-
- /*
- ** We loop for all entries even after finding an entry and
- ** zeroing it because we may have two entries to delete if
- ** it's a 16 port RTA.
- */
- for (entry = 0; entry < TOTAL_MAP_ENTRIES; entry++)
- {
- if (p->RIOSavedTable[entry].RtaUniqueNum == pMap->RtaUniqueNum)
- {
- bzero((caddr_t)&p->RIOSavedTable[entry], sizeof(struct Map));
+ int entry;
+
+ /*
+ ** We loop for all entries even after finding an entry and
+ ** zeroing it because we may have two entries to delete if
+ ** it's a 16 port RTA.
+ */
+ for (entry = 0; entry < TOTAL_MAP_ENTRIES; entry++) {
+ if (p->RIOSavedTable[entry].RtaUniqueNum == pMap->RtaUniqueNum) {
+ bzero((caddr_t) & p->RIOSavedTable[entry], sizeof(struct Map));
+ }
}
- }
- return 0;
+ return 0;
}
@@ -999,64 +875,58 @@ RIORemoveFromSavedTable(struct rio_info *p, struct Map *pMap)
** Scan the unit links to and return zero if the unit is completely
** disconnected.
*/
-static int
-RIOFreeDisconnected(struct rio_info *p, struct Host *HostP, int unit)
+static int RIOFreeDisconnected(struct rio_info *p, struct Host *HostP, int unit)
{
- int link;
-
-
- rio_dprintk (RIO_DEBUG_ROUTE, "RIOFreeDisconnect unit %d\n", unit);
- /*
- ** If the slot is tentative and does not belong to the
- ** second half of a 16 port RTA then scan to see if
- ** is disconnected.
- */
- for (link = 0; link < LINKS_PER_UNIT; link++)
- {
- if (HostP->Mapping[unit].Topology[link].Unit != ROUTE_DISCONNECT)
- break;
- }
-
- /*
- ** If not all links are disconnected then we can forget about it.
- */
- if (link < LINKS_PER_UNIT)
- return 1;
+ int link;
+
+
+ rio_dprintk(RIO_DEBUG_ROUTE, "RIOFreeDisconnect unit %d\n", unit);
+ /*
+ ** If the slot is tentative and does not belong to the
+ ** second half of a 16 port RTA then scan to see if
+ ** is disconnected.
+ */
+ for (link = 0; link < LINKS_PER_UNIT; link++) {
+ if (HostP->Mapping[unit].Topology[link].Unit != ROUTE_DISCONNECT)
+ break;
+ }
+
+ /*
+ ** If not all links are disconnected then we can forget about it.
+ */
+ if (link < LINKS_PER_UNIT)
+ return 1;
#ifdef NEED_TO_FIX_THIS
- /* Ok so all the links are disconnected. But we may have only just
- ** made this slot tentative and not yet received a topology update.
- ** Lets check how long ago we made it tentative.
- */
- rio_dprintk (RIO_DEBUG_ROUTE, "Just about to check LBOLT on entry %d\n", unit);
- if (drv_getparm(LBOLT, (ulong_t *) &current_time))
- rio_dprintk (RIO_DEBUG_ROUTE, "drv_getparm(LBOLT,....) Failed.\n");
-
- elapse_time = current_time - TentTime[unit];
- rio_dprintk (RIO_DEBUG_ROUTE, "elapse %d = current %d - tent %d (%d usec)\n",
- elapse_time, current_time, TentTime[unit], drv_hztousec(elapse_time));
- if (drv_hztousec(elapse_time) < WAIT_TO_FINISH)
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "Skipping slot %d, not timed out yet %d\n",
- unit, drv_hztousec(elapse_time));
- return 1;
- }
+ /* Ok so all the links are disconnected. But we may have only just
+ ** made this slot tentative and not yet received a topology update.
+ ** Lets check how long ago we made it tentative.
+ */
+ rio_dprintk(RIO_DEBUG_ROUTE, "Just about to check LBOLT on entry %d\n", unit);
+ if (drv_getparm(LBOLT, (ulong_t *) & current_time))
+ rio_dprintk(RIO_DEBUG_ROUTE, "drv_getparm(LBOLT,....) Failed.\n");
+
+ elapse_time = current_time - TentTime[unit];
+ rio_dprintk(RIO_DEBUG_ROUTE, "elapse %d = current %d - tent %d (%d usec)\n", elapse_time, current_time, TentTime[unit], drv_hztousec(elapse_time));
+ if (drv_hztousec(elapse_time) < WAIT_TO_FINISH) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "Skipping slot %d, not timed out yet %d\n", unit, drv_hztousec(elapse_time));
+ return 1;
+ }
#endif
- /*
- ** We have found an usable slot.
- ** If it is half of a 16 port RTA then delete the other half.
- */
- if (HostP->Mapping[unit].ID2 != 0)
- {
- int nOther = (HostP->Mapping[unit].ID2) -1;
-
- rio_dprintk (RIO_DEBUG_ROUTE, "RioFreedis second slot %d.\n", nOther);
- bzero((caddr_t)&HostP->Mapping[nOther], sizeof(struct Map));
- }
- RIORemoveFromSavedTable(p, &HostP->Mapping[unit]);
+ /*
+ ** We have found an usable slot.
+ ** If it is half of a 16 port RTA then delete the other half.
+ */
+ if (HostP->Mapping[unit].ID2 != 0) {
+ int nOther = (HostP->Mapping[unit].ID2) - 1;
+
+ rio_dprintk(RIO_DEBUG_ROUTE, "RioFreedis second slot %d.\n", nOther);
+ bzero((caddr_t) & HostP->Mapping[nOther], sizeof(struct Map));
+ }
+ RIORemoveFromSavedTable(p, &HostP->Mapping[unit]);
- return 0;
+ return 0;
}
@@ -1066,150 +936,134 @@ RIOFreeDisconnected(struct rio_info *p, struct Host *HostP, int unit)
** This function scans the given host table for either one
** or two free unit ID's.
*/
-int
-RIOFindFreeID(struct rio_info *p, struct Host *HostP, uint *pID1, uint *pID2)
+int RIOFindFreeID(struct rio_info *p, struct Host *HostP, uint * pID1, uint * pID2)
{
- int unit,tempID;
-
- /*
- ** Initialise the ID's to MAX_RUP.
- ** We do this to make the loop for setting the ID's as simple as
- ** possible.
- */
- *pID1 = MAX_RUP;
- if (pID2 != NULL)
- *pID2 = MAX_RUP;
-
- /*
- ** Scan all entries of the host mapping table for free slots.
- ** We scan for free slots first and then if that is not successful
- ** we start all over again looking for tentative slots we can re-use.
- */
- for (unit = 0; unit < MAX_RUP; unit++)
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "Scanning unit %d\n",unit);
+ int unit, tempID;
+
/*
- ** If the flags are zero then the slot is empty.
- */
- if (HostP->Mapping[unit].Flags == 0)
- {
- rio_dprintk (RIO_DEBUG_ROUTE, " This slot is empty.\n");
- /*
- ** If we haven't allocated the first ID then do it now.
- */
- if (*pID1 == MAX_RUP)
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "Make tentative entry for first unit %d\n", unit);
- *pID1 = unit;
+ ** Initialise the ID's to MAX_RUP.
+ ** We do this to make the loop for setting the ID's as simple as
+ ** possible.
+ */
+ *pID1 = MAX_RUP;
+ if (pID2 != NULL)
+ *pID2 = MAX_RUP;
+ /*
+ ** Scan all entries of the host mapping table for free slots.
+ ** We scan for free slots first and then if that is not successful
+ ** we start all over again looking for tentative slots we can re-use.
+ */
+ for (unit = 0; unit < MAX_RUP; unit++) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "Scanning unit %d\n", unit);
/*
- ** If the second ID is not needed then we can return
- ** now.
- */
- if (pID2 == NULL)
- return 0;
- }
- else
- {
- /*
- ** Allocate the second slot and return.
- */
- rio_dprintk (RIO_DEBUG_ROUTE, "Make tentative entry for second unit %d\n", unit);
- *pID2 = unit;
- return 0;
- }
+ ** If the flags are zero then the slot is empty.
+ */
+ if (HostP->Mapping[unit].Flags == 0) {
+ rio_dprintk(RIO_DEBUG_ROUTE, " This slot is empty.\n");
+ /*
+ ** If we haven't allocated the first ID then do it now.
+ */
+ if (*pID1 == MAX_RUP) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "Make tentative entry for first unit %d\n", unit);
+ *pID1 = unit;
+
+ /*
+ ** If the second ID is not needed then we can return
+ ** now.
+ */
+ if (pID2 == NULL)
+ return 0;
+ } else {
+ /*
+ ** Allocate the second slot and return.
+ */
+ rio_dprintk(RIO_DEBUG_ROUTE, "Make tentative entry for second unit %d\n", unit);
+ *pID2 = unit;
+ return 0;
+ }
+ }
}
- }
-
- /*
- ** If we manage to come out of the free slot loop then we
- ** need to start all over again looking for tentative slots
- ** that we can re-use.
- */
- rio_dprintk (RIO_DEBUG_ROUTE, "Starting to scan for tentative slots\n");
- for (unit = 0; unit < MAX_RUP; unit++)
- {
- if (((HostP->Mapping[unit].Flags & SLOT_TENTATIVE) ||
- (HostP->Mapping[unit].Flags == 0)) && !
- (HostP->Mapping[unit].Flags & RTA16_SECOND_SLOT ))
- {
- rio_dprintk (RIO_DEBUG_ROUTE, " Slot %d looks promising.\n",unit);
-
- if(unit == *pID1)
- {
- rio_dprintk (RIO_DEBUG_ROUTE, " No it isn't, its the 1st half\n");
- continue;
- }
-
- /*
- ** Slot is Tentative or Empty, but not a tentative second
- ** slot of a 16 porter.
- ** Attempt to free up this slot (and its parnter if
- ** it is a 16 port slot. The second slot will become
- ** empty after a call to RIOFreeDisconnected so thats why
- ** we look for empty slots above as well).
- */
- if (HostP->Mapping[unit].Flags != 0)
- if (RIOFreeDisconnected(p, HostP, unit) != 0)
- continue;
- /*
- ** If we haven't allocated the first ID then do it now.
- */
- if (*pID1 == MAX_RUP)
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "Grab tentative entry for first unit %d\n", unit);
- *pID1 = unit;
- /*
- ** Clear out this slot now that we intend to use it.
- */
- bzero(&HostP->Mapping[unit], sizeof(struct Map));
+ /*
+ ** If we manage to come out of the free slot loop then we
+ ** need to start all over again looking for tentative slots
+ ** that we can re-use.
+ */
+ rio_dprintk(RIO_DEBUG_ROUTE, "Starting to scan for tentative slots\n");
+ for (unit = 0; unit < MAX_RUP; unit++) {
+ if (((HostP->Mapping[unit].Flags & SLOT_TENTATIVE) || (HostP->Mapping[unit].Flags == 0)) && !(HostP->Mapping[unit].Flags & RTA16_SECOND_SLOT)) {
+ rio_dprintk(RIO_DEBUG_ROUTE, " Slot %d looks promising.\n", unit);
+
+ if (unit == *pID1) {
+ rio_dprintk(RIO_DEBUG_ROUTE, " No it isn't, its the 1st half\n");
+ continue;
+ }
- /*
- ** If the second ID is not needed then we can return
- ** now.
- */
- if (pID2 == NULL)
- return 0;
- }
- else
- {
- /*
- ** Allocate the second slot and return.
- */
- rio_dprintk (RIO_DEBUG_ROUTE, "Grab tentative/empty entry for second unit %d\n",
- unit);
- *pID2 = unit;
+ /*
+ ** Slot is Tentative or Empty, but not a tentative second
+ ** slot of a 16 porter.
+ ** Attempt to free up this slot (and its parnter if
+ ** it is a 16 port slot. The second slot will become
+ ** empty after a call to RIOFreeDisconnected so thats why
+ ** we look for empty slots above as well).
+ */
+ if (HostP->Mapping[unit].Flags != 0)
+ if (RIOFreeDisconnected(p, HostP, unit) != 0)
+ continue;
+ /*
+ ** If we haven't allocated the first ID then do it now.
+ */
+ if (*pID1 == MAX_RUP) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "Grab tentative entry for first unit %d\n", unit);
+ *pID1 = unit;
- /*
- ** Clear out this slot now that we intend to use it.
- */
- bzero(&HostP->Mapping[unit], sizeof(struct Map));
-
- /* At this point under the right(wrong?) conditions
- ** we may have a first unit ID being higher than the
- ** second unit ID. This is a bad idea if we are about
- ** to fill the slots with a 16 port RTA.
- ** Better check and swap them over.
- */
-
- if (*pID1 > *pID2)
- {
- rio_dprintk (RIO_DEBUG_ROUTE, "Swapping IDS %d %d\n", *pID1, *pID2);
- tempID = *pID1;
- *pID1 = *pID2;
- *pID2 = tempID;
+ /*
+ ** Clear out this slot now that we intend to use it.
+ */
+ bzero(&HostP->Mapping[unit], sizeof(struct Map));
+
+ /*
+ ** If the second ID is not needed then we can return
+ ** now.
+ */
+ if (pID2 == NULL)
+ return 0;
+ } else {
+ /*
+ ** Allocate the second slot and return.
+ */
+ rio_dprintk(RIO_DEBUG_ROUTE, "Grab tentative/empty entry for second unit %d\n", unit);
+ *pID2 = unit;
+
+ /*
+ ** Clear out this slot now that we intend to use it.
+ */
+ bzero(&HostP->Mapping[unit], sizeof(struct Map));
+
+ /* At this point under the right(wrong?) conditions
+ ** we may have a first unit ID being higher than the
+ ** second unit ID. This is a bad idea if we are about
+ ** to fill the slots with a 16 port RTA.
+ ** Better check and swap them over.
+ */
+
+ if (*pID1 > *pID2) {
+ rio_dprintk(RIO_DEBUG_ROUTE, "Swapping IDS %d %d\n", *pID1, *pID2);
+ tempID = *pID1;
+ *pID1 = *pID2;
+ *pID2 = tempID;
+ }
+ return 0;
+ }
}
- return 0;
- }
}
- }
- /*
- ** If we manage to get to the end of the second loop then we
- ** can give up and return a failure.
- */
- return 1;
+ /*
+ ** If we manage to get to the end of the second loop then we
+ ** can give up and return a failure.
+ */
+ return 1;
}
diff --git a/drivers/char/rio/riospace.h b/drivers/char/rio/riospace.h
index 32b09b0f23a..534f1f5b9f5 100644
--- a/drivers/char/rio/riospace.h
+++ b/drivers/char/rio/riospace.h
@@ -47,9 +47,8 @@ static char *_riospace_h_sccs_ = "@(#)riospace.h 1.2";
** In particular, it won't be able to see changes to RIO_SLOTS
*/
-struct Conf
-{
- char Locator[24];
+struct Conf {
+ char Locator[24];
unsigned int StartupTime;
unsigned int SlowCook;
unsigned int IntrPollTime;
@@ -59,8 +58,8 @@ struct Conf
unsigned int HostLoadBase;
unsigned int XpHz;
unsigned int XpCps;
- char *XpOn;
- char *XpOff;
+ char *XpOn;
+ char *XpOff;
unsigned int MaxXpCps;
unsigned int MinXpCps;
unsigned int SpinCmds;
@@ -74,7 +73,7 @@ struct Conf
/*
** Board types - these MUST correspond to product codes!
-*/
+*/
#define RIO_EMPTY 0x0
#define RIO_EISA 0x3
#define RIO_RTA_16 0x9
@@ -86,18 +85,16 @@ struct Conf
/*
** Board data structure. This is used for configuration info
*/
-struct Brd
-{
- unsigned char Type; /* RIO_EISA, RIO_MCA, RIO_AT, RIO_EMPTY... */
- unsigned char Ivec; /* POLLED or ivec number */
- unsigned char Mode; /* Control stuff, see below */
+struct Brd {
+ unsigned char Type; /* RIO_EISA, RIO_MCA, RIO_AT, RIO_EMPTY... */
+ unsigned char Ivec; /* POLLED or ivec number */
+ unsigned char Mode; /* Control stuff, see below */
};
-struct Board
-{
- char Locator[RIO_LOCATOR_LEN];
- int NumSlots;
- struct Brd Boards[MAX_RIO_BOARDS];
+struct Board {
+ char Locator[RIO_LOCATOR_LEN];
+ int NumSlots;
+ struct Brd Boards[MAX_RIO_BOARDS];
};
#define BOOT_FROM_LINK 0x00
@@ -158,4 +155,4 @@ struct Board
#define DBG_ALWAYS 0x80000000
-#endif /* __rio_riospace_h__ */
+#endif /* __rio_riospace_h__ */
diff --git a/drivers/char/rio/riotable.c b/drivers/char/rio/riotable.c
index e45bc275907..42c3dffcbbb 100644
--- a/drivers/char/rio/riotable.c
+++ b/drivers/char/rio/riotable.c
@@ -91,9 +91,8 @@ static char *_riotable_c_sccs_ = "@(#)riotable.c 1.2";
** A configuration table has been loaded. It is now up to us
** to sort it out and use the information contained therein.
*/
-int
-RIONewTable(p)
-struct rio_info * p;
+int RIONewTable(p)
+struct rio_info *p;
{
int Host, Host1, Host2, NameIsUnique, Entry, SubEnt;
struct Map *MapP;
@@ -103,26 +102,26 @@ struct rio_info * p;
char *cptr;
/*
- ** We have been sent a new table to install. We need to break
- ** it down into little bits and spread it around a bit to see
- ** what we have got.
- */
+ ** We have been sent a new table to install. We need to break
+ ** it down into little bits and spread it around a bit to see
+ ** what we have got.
+ */
/*
- ** Things to check:
- ** (things marked 'xx' aren't checked any more!)
- ** (1) That there are no booted Hosts/RTAs out there.
- ** (2) That the names are properly formed
- ** (3) That blank entries really are.
- ** xx (4) That hosts mentioned in the table actually exist. xx
- ** (5) That the IDs are unique (per host).
- ** (6) That host IDs are zero
- ** (7) That port numbers are valid
- ** (8) That port numbers aren't duplicated
- ** (9) That names aren't duplicated
- ** xx (10) That hosts that actually exist are mentioned in the table. xx
- */
- rio_dprintk (RIO_DEBUG_TABLE, "RIONewTable: entering(1)\n");
- if ( p->RIOSystemUp ) { /* (1) */
+ ** Things to check:
+ ** (things marked 'xx' aren't checked any more!)
+ ** (1) That there are no booted Hosts/RTAs out there.
+ ** (2) That the names are properly formed
+ ** (3) That blank entries really are.
+ ** xx (4) That hosts mentioned in the table actually exist. xx
+ ** (5) That the IDs are unique (per host).
+ ** (6) That host IDs are zero
+ ** (7) That port numbers are valid
+ ** (8) That port numbers aren't duplicated
+ ** (9) That names aren't duplicated
+ ** xx (10) That hosts that actually exist are mentioned in the table. xx
+ */
+ rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(1)\n");
+ if (p->RIOSystemUp) { /* (1) */
p->RIOError.Error = HOST_HAS_ALREADY_BEEN_BOOTED;
return -EBUSY;
}
@@ -131,19 +130,19 @@ struct rio_info * p;
p->RIOError.Entry = -1;
p->RIOError.Other = -1;
- for ( Entry=0; Entry<TOTAL_MAP_ENTRIES; Entry++ ) {
+ for (Entry = 0; Entry < TOTAL_MAP_ENTRIES; Entry++) {
MapP = &p->RIOConnectTable[Entry];
if ((MapP->Flags & RTA16_SECOND_SLOT) == 0) {
- rio_dprintk (RIO_DEBUG_TABLE, "RIONewTable: entering(2)\n");
- cptr = MapP->Name; /* (2) */
- cptr[MAX_NAME_LEN-1]='\0';
- if ( cptr[0]=='\0' ) {
- bcopy(MapP->RtaUniqueNum?"RTA NN":"HOST NN",MapP->Name,8);
- MapP->Name[5] = '0'+Entry/10;
- MapP->Name[6] = '0'+Entry%10;
+ rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(2)\n");
+ cptr = MapP->Name; /* (2) */
+ cptr[MAX_NAME_LEN - 1] = '\0';
+ if (cptr[0] == '\0') {
+ bcopy(MapP->RtaUniqueNum ? "RTA NN" : "HOST NN", MapP->Name, 8);
+ MapP->Name[5] = '0' + Entry / 10;
+ MapP->Name[6] = '0' + Entry % 10;
}
- while ( *cptr ) {
- if ( *cptr<' ' || *cptr>'~' ) {
+ while (*cptr) {
+ if (*cptr < ' ' || *cptr > '~') {
p->RIOError.Error = BAD_CHARACTER_IN_NAME;
p->RIOError.Entry = Entry;
return -ENXIO;
@@ -153,133 +152,119 @@ struct rio_info * p;
}
/*
- ** If the entry saved was a tentative entry then just forget
- ** about it.
- */
- if ( MapP->Flags & SLOT_TENTATIVE ) {
+ ** If the entry saved was a tentative entry then just forget
+ ** about it.
+ */
+ if (MapP->Flags & SLOT_TENTATIVE) {
MapP->HostUniqueNum = 0;
MapP->RtaUniqueNum = 0;
continue;
}
- rio_dprintk (RIO_DEBUG_TABLE, "RIONewTable: entering(3)\n");
- if ( !MapP->RtaUniqueNum && !MapP->HostUniqueNum ) { /* (3) */
- if ( MapP->ID || MapP->SysPort || MapP->Flags ) {
- rio_dprintk (RIO_DEBUG_TABLE, "%s pretending to be empty but isn't\n",MapP->Name);
+ rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(3)\n");
+ if (!MapP->RtaUniqueNum && !MapP->HostUniqueNum) { /* (3) */
+ if (MapP->ID || MapP->SysPort || MapP->Flags) {
+ rio_dprintk(RIO_DEBUG_TABLE, "%s pretending to be empty but isn't\n", MapP->Name);
p->RIOError.Error = TABLE_ENTRY_ISNT_PROPERLY_NULL;
p->RIOError.Entry = Entry;
return -ENXIO;
}
- rio_dprintk (RIO_DEBUG_TABLE, "!RIO: Daemon: test (3) passes\n");
+ rio_dprintk(RIO_DEBUG_TABLE, "!RIO: Daemon: test (3) passes\n");
continue;
}
- rio_dprintk (RIO_DEBUG_TABLE, "RIONewTable: entering(4)\n");
- for ( Host=0; Host<p->RIONumHosts; Host++ ) { /* (4) */
- if ( p->RIOHosts[Host].UniqueNum==MapP->HostUniqueNum ) {
+ rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(4)\n");
+ for (Host = 0; Host < p->RIONumHosts; Host++) { /* (4) */
+ if (p->RIOHosts[Host].UniqueNum == MapP->HostUniqueNum) {
HostP = &p->RIOHosts[Host];
/*
- ** having done the lookup, we don't really want to do
- ** it again, so hang the host number in a safe place
- */
+ ** having done the lookup, we don't really want to do
+ ** it again, so hang the host number in a safe place
+ */
MapP->Topology[0].Unit = Host;
break;
}
}
- if ( Host >= p->RIONumHosts ) {
- rio_dprintk (RIO_DEBUG_TABLE, "RTA %s has unknown host unique number 0x%x\n",
- MapP->Name, MapP->HostUniqueNum);
+ if (Host >= p->RIONumHosts) {
+ rio_dprintk(RIO_DEBUG_TABLE, "RTA %s has unknown host unique number 0x%x\n", MapP->Name, MapP->HostUniqueNum);
MapP->HostUniqueNum = 0;
- /* MapP->RtaUniqueNum = 0; */
- /* MapP->ID = 0; */
- /* MapP->Flags = 0; */
- /* MapP->SysPort = 0; */
- /* MapP->Name[0] = 0; */
+ /* MapP->RtaUniqueNum = 0; */
+ /* MapP->ID = 0; */
+ /* MapP->Flags = 0; */
+ /* MapP->SysPort = 0; */
+ /* MapP->Name[0] = 0; */
continue;
}
- rio_dprintk (RIO_DEBUG_TABLE, "RIONewTable: entering(5)\n");
- if ( MapP->RtaUniqueNum ) { /* (5) */
- if ( !MapP->ID ) {
- rio_dprintk (RIO_DEBUG_TABLE, "RIO: RTA %s has been allocated an ID of zero!\n",
- MapP->Name);
- p->RIOError.Error = ZERO_RTA_ID;
+ rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(5)\n");
+ if (MapP->RtaUniqueNum) { /* (5) */
+ if (!MapP->ID) {
+ rio_dprintk(RIO_DEBUG_TABLE, "RIO: RTA %s has been allocated an ID of zero!\n", MapP->Name);
+ p->RIOError.Error = ZERO_RTA_ID;
p->RIOError.Entry = Entry;
return -ENXIO;
}
- if ( MapP->ID > MAX_RUP ) {
- rio_dprintk (RIO_DEBUG_TABLE, "RIO: RTA %s has been allocated an invalid ID %d\n",
- MapP->Name, MapP->ID);
+ if (MapP->ID > MAX_RUP) {
+ rio_dprintk(RIO_DEBUG_TABLE, "RIO: RTA %s has been allocated an invalid ID %d\n", MapP->Name, MapP->ID);
p->RIOError.Error = ID_NUMBER_OUT_OF_RANGE;
p->RIOError.Entry = Entry;
return -ENXIO;
}
- for ( SubEnt=0; SubEnt<Entry; SubEnt++ ) {
- if ( MapP->HostUniqueNum ==
- p->RIOConnectTable[SubEnt].HostUniqueNum &&
- MapP->ID == p->RIOConnectTable[SubEnt].ID ) {
- rio_dprintk (RIO_DEBUG_TABLE, "Dupl. ID number allocated to RTA %s and RTA %s\n",
- MapP->Name, p->RIOConnectTable[SubEnt].Name);
+ for (SubEnt = 0; SubEnt < Entry; SubEnt++) {
+ if (MapP->HostUniqueNum == p->RIOConnectTable[SubEnt].HostUniqueNum && MapP->ID == p->RIOConnectTable[SubEnt].ID) {
+ rio_dprintk(RIO_DEBUG_TABLE, "Dupl. ID number allocated to RTA %s and RTA %s\n", MapP->Name, p->RIOConnectTable[SubEnt].Name);
p->RIOError.Error = DUPLICATED_RTA_ID;
p->RIOError.Entry = Entry;
p->RIOError.Other = SubEnt;
return -ENXIO;
}
/*
- ** If the RtaUniqueNum is the same, it may be looking at both
- ** entries for a 16 port RTA, so check the ids
- */
- if ((MapP->RtaUniqueNum ==
- p->RIOConnectTable[SubEnt].RtaUniqueNum)
- && (MapP->ID2 != p->RIOConnectTable[SubEnt].ID)) {
- rio_dprintk (RIO_DEBUG_TABLE, "RTA %s has duplicate unique number\n",MapP->Name);
- rio_dprintk (RIO_DEBUG_TABLE, "RTA %s has duplicate unique number\n",
- p->RIOConnectTable[SubEnt].Name);
+ ** If the RtaUniqueNum is the same, it may be looking at both
+ ** entries for a 16 port RTA, so check the ids
+ */
+ if ((MapP->RtaUniqueNum == p->RIOConnectTable[SubEnt].RtaUniqueNum)
+ && (MapP->ID2 != p->RIOConnectTable[SubEnt].ID)) {
+ rio_dprintk(RIO_DEBUG_TABLE, "RTA %s has duplicate unique number\n", MapP->Name);
+ rio_dprintk(RIO_DEBUG_TABLE, "RTA %s has duplicate unique number\n", p->RIOConnectTable[SubEnt].Name);
p->RIOError.Error = DUPLICATE_UNIQUE_NUMBER;
p->RIOError.Entry = Entry;
p->RIOError.Other = SubEnt;
return -ENXIO;
}
}
- rio_dprintk (RIO_DEBUG_TABLE, "RIONewTable: entering(7a)\n");
+ rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(7a)\n");
/* (7a) */
- if ((MapP->SysPort != NO_PORT)&&(MapP->SysPort % PORTS_PER_RTA)) {
- rio_dprintk (RIO_DEBUG_TABLE, "TTY Port number %d-RTA %s is not a multiple of %d!\n",
- (int)MapP->SysPort,MapP->Name, PORTS_PER_RTA);
+ if ((MapP->SysPort != NO_PORT) && (MapP->SysPort % PORTS_PER_RTA)) {
+ rio_dprintk(RIO_DEBUG_TABLE, "TTY Port number %d-RTA %s is not a multiple of %d!\n", (int) MapP->SysPort, MapP->Name, PORTS_PER_RTA);
p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
p->RIOError.Entry = Entry;
return -ENXIO;
}
- rio_dprintk (RIO_DEBUG_TABLE, "RIONewTable: entering(7b)\n");
+ rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(7b)\n");
/* (7b) */
- if ((MapP->SysPort != NO_PORT)&&(MapP->SysPort >= RIO_PORTS)) {
- rio_dprintk (RIO_DEBUG_TABLE, "TTY Port number %d for RTA %s is too big\n",
- (int)MapP->SysPort, MapP->Name);
+ if ((MapP->SysPort != NO_PORT) && (MapP->SysPort >= RIO_PORTS)) {
+ rio_dprintk(RIO_DEBUG_TABLE, "TTY Port number %d for RTA %s is too big\n", (int) MapP->SysPort, MapP->Name);
p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
p->RIOError.Entry = Entry;
return -ENXIO;
}
- for ( SubEnt=0; SubEnt<Entry; SubEnt++ ) {
- if ( p->RIOConnectTable[SubEnt].Flags & RTA16_SECOND_SLOT )
- continue;
- if ( p->RIOConnectTable[SubEnt].RtaUniqueNum ) {
- rio_dprintk (RIO_DEBUG_TABLE, "RIONewTable: entering(8)\n");
+ for (SubEnt = 0; SubEnt < Entry; SubEnt++) {
+ if (p->RIOConnectTable[SubEnt].Flags & RTA16_SECOND_SLOT)
+ continue;
+ if (p->RIOConnectTable[SubEnt].RtaUniqueNum) {
+ rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(8)\n");
/* (8) */
- if ( (MapP->SysPort != NO_PORT) && (MapP->SysPort ==
- p->RIOConnectTable[SubEnt].SysPort) ) {
- rio_dprintk (RIO_DEBUG_TABLE, "RTA %s:same TTY port # as RTA %s (%d)\n",
- MapP->Name, p->RIOConnectTable[SubEnt].Name,
- (int)MapP->SysPort);
+ if ((MapP->SysPort != NO_PORT) && (MapP->SysPort == p->RIOConnectTable[SubEnt].SysPort)) {
+ rio_dprintk(RIO_DEBUG_TABLE, "RTA %s:same TTY port # as RTA %s (%d)\n", MapP->Name, p->RIOConnectTable[SubEnt].Name, (int) MapP->SysPort);
p->RIOError.Error = TTY_NUMBER_IN_USE;
p->RIOError.Entry = Entry;
p->RIOError.Other = SubEnt;
return -ENXIO;
}
- rio_dprintk (RIO_DEBUG_TABLE, "RIONewTable: entering(9)\n");
- if (strcmp(MapP->Name,
- p->RIOConnectTable[SubEnt].Name)==0 && !(MapP->Flags & RTA16_SECOND_SLOT)) { /* (9) */
- rio_dprintk (RIO_DEBUG_TABLE, "RTA name %s used twice\n", MapP->Name);
+ rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(9)\n");
+ if (strcmp(MapP->Name, p->RIOConnectTable[SubEnt].Name) == 0 && !(MapP->Flags & RTA16_SECOND_SLOT)) { /* (9) */
+ rio_dprintk(RIO_DEBUG_TABLE, "RTA name %s used twice\n", MapP->Name);
p->RIOError.Error = NAME_USED_TWICE;
p->RIOError.Entry = Entry;
p->RIOError.Other = SubEnt;
@@ -287,19 +272,16 @@ struct rio_info * p;
}
}
}
- }
- else { /* (6) */
- rio_dprintk (RIO_DEBUG_TABLE, "RIONewTable: entering(6)\n");
- if ( MapP->ID ) {
- rio_dprintk (RIO_DEBUG_TABLE, "RIO:HOST %s has been allocated ID that isn't zero!\n",
- MapP->Name);
+ } else { /* (6) */
+ rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(6)\n");
+ if (MapP->ID) {
+ rio_dprintk(RIO_DEBUG_TABLE, "RIO:HOST %s has been allocated ID that isn't zero!\n", MapP->Name);
p->RIOError.Error = HOST_ID_NOT_ZERO;
p->RIOError.Entry = Entry;
return -ENXIO;
}
- if ( MapP->SysPort != NO_PORT ) {
- rio_dprintk (RIO_DEBUG_TABLE, "RIO: HOST %s has been allocated port numbers!\n",
- MapP->Name);
+ if (MapP->SysPort != NO_PORT) {
+ rio_dprintk(RIO_DEBUG_TABLE, "RIO: HOST %s has been allocated port numbers!\n", MapP->Name);
p->RIOError.Error = HOST_SYSPORT_BAD;
p->RIOError.Entry = Entry;
return -ENXIO;
@@ -308,106 +290,101 @@ struct rio_info * p;
}
/*
- ** wow! if we get here then it's a goody!
- */
+ ** wow! if we get here then it's a goody!
+ */
/*
- ** Zero the (old) entries for each host...
- */
- for ( Host=0; Host<RIO_HOSTS; Host++ ) {
- for ( Entry=0; Entry<MAX_RUP; Entry++ ) {
- bzero((caddr_t)&p->RIOHosts[Host].Mapping[Entry],
- sizeof(struct Map));
+ ** Zero the (old) entries for each host...
+ */
+ for (Host = 0; Host < RIO_HOSTS; Host++) {
+ for (Entry = 0; Entry < MAX_RUP; Entry++) {
+ bzero((caddr_t) & p->RIOHosts[Host].Mapping[Entry], sizeof(struct Map));
}
- bzero((caddr_t)&p->RIOHosts[Host].Name[0],
- sizeof(p->RIOHosts[Host].Name) );
+ bzero((caddr_t) & p->RIOHosts[Host].Name[0], sizeof(p->RIOHosts[Host].Name));
}
/*
- ** Copy in the new table entries
- */
- for ( Entry=0; Entry< TOTAL_MAP_ENTRIES; Entry++ ) {
- rio_dprintk (RIO_DEBUG_TABLE, "RIONewTable: Copy table for Host entry %d\n", Entry);
+ ** Copy in the new table entries
+ */
+ for (Entry = 0; Entry < TOTAL_MAP_ENTRIES; Entry++) {
+ rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: Copy table for Host entry %d\n", Entry);
MapP = &p->RIOConnectTable[Entry];
/*
- ** Now, if it is an empty slot ignore it!
- */
- if ( MapP->HostUniqueNum==0 )
+ ** Now, if it is an empty slot ignore it!
+ */
+ if (MapP->HostUniqueNum == 0)
continue;
/*
- ** we saved the host number earlier, so grab it back
- */
+ ** we saved the host number earlier, so grab it back
+ */
HostP = &p->RIOHosts[MapP->Topology[0].Unit];
/*
- ** If it is a host, then we only need to fill in the name field.
- */
- if ( MapP->ID==0 ) {
- rio_dprintk (RIO_DEBUG_TABLE, "Host entry found. Name %s\n", MapP->Name);
- bcopy(MapP->Name,HostP->Name,MAX_NAME_LEN);
+ ** If it is a host, then we only need to fill in the name field.
+ */
+ if (MapP->ID == 0) {
+ rio_dprintk(RIO_DEBUG_TABLE, "Host entry found. Name %s\n", MapP->Name);
+ bcopy(MapP->Name, HostP->Name, MAX_NAME_LEN);
continue;
}
/*
- ** Its an RTA entry, so fill in the host mapping entries for it
- ** and the port mapping entries. Notice that entry zero is for
- ** ID one.
- */
- HostMapP = &HostP->Mapping[MapP->ID-1];
+ ** Its an RTA entry, so fill in the host mapping entries for it
+ ** and the port mapping entries. Notice that entry zero is for
+ ** ID one.
+ */
+ HostMapP = &HostP->Mapping[MapP->ID - 1];
if (MapP->Flags & SLOT_IN_USE) {
- rio_dprintk (RIO_DEBUG_TABLE, "Rta entry found. Name %s\n", MapP->Name);
+ rio_dprintk(RIO_DEBUG_TABLE, "Rta entry found. Name %s\n", MapP->Name);
/*
- ** structure assign, then sort out the bits we shouldn't have done
- */
+ ** structure assign, then sort out the bits we shouldn't have done
+ */
*HostMapP = *MapP;
HostMapP->Flags = SLOT_IN_USE;
if (MapP->Flags & RTA16_SECOND_SLOT)
HostMapP->Flags |= RTA16_SECOND_SLOT;
- RIOReMapPorts(p, HostP, HostMapP );
- }
- else {
- rio_dprintk (RIO_DEBUG_TABLE, "TENTATIVE Rta entry found. Name %s\n", MapP->Name);
+ RIOReMapPorts(p, HostP, HostMapP);
+ } else {
+ rio_dprintk(RIO_DEBUG_TABLE, "TENTATIVE Rta entry found. Name %s\n", MapP->Name);
}
}
- for ( Entry=0; Entry< TOTAL_MAP_ENTRIES; Entry++ ) {
+ for (Entry = 0; Entry < TOTAL_MAP_ENTRIES; Entry++) {
p->RIOSavedTable[Entry] = p->RIOConnectTable[Entry];
}
- for ( Host=0; Host<p->RIONumHosts; Host++ ) {
- for ( SubEnt=0; SubEnt<LINKS_PER_UNIT; SubEnt++ ) {
+ for (Host = 0; Host < p->RIONumHosts; Host++) {
+ for (SubEnt = 0; SubEnt < LINKS_PER_UNIT; SubEnt++) {
p->RIOHosts[Host].Topology[SubEnt].Unit = ROUTE_DISCONNECT;
p->RIOHosts[Host].Topology[SubEnt].Link = NO_LINK;
}
- for ( Entry=0; Entry<MAX_RUP; Entry++ ) {
- for ( SubEnt=0; SubEnt<LINKS_PER_UNIT; SubEnt++ ) {
- p->RIOHosts[Host].Mapping[Entry].Topology[SubEnt].Unit =
- ROUTE_DISCONNECT;
- p->RIOHosts[Host].Mapping[Entry].Topology[SubEnt].Link =
- NO_LINK;
+ for (Entry = 0; Entry < MAX_RUP; Entry++) {
+ for (SubEnt = 0; SubEnt < LINKS_PER_UNIT; SubEnt++) {
+ p->RIOHosts[Host].Mapping[Entry].Topology[SubEnt].Unit = ROUTE_DISCONNECT;
+ p->RIOHosts[Host].Mapping[Entry].Topology[SubEnt].Link = NO_LINK;
}
}
- if ( !p->RIOHosts[Host].Name[0] ) {
- bcopy("HOST 1",p->RIOHosts[Host].Name,7);
+ if (!p->RIOHosts[Host].Name[0]) {
+ bcopy("HOST 1", p->RIOHosts[Host].Name, 7);
p->RIOHosts[Host].Name[5] += Host;
}
/*
- ** Check that default name assigned is unique.
- */
+ ** Check that default name assigned is unique.
+ */
Host1 = Host;
NameIsUnique = 0;
while (!NameIsUnique) {
NameIsUnique = 1;
- for ( Host2=0; Host2<p->RIONumHosts; Host2++ ) {
+ for (Host2 = 0; Host2 < p->RIONumHosts; Host2++) {
if (Host2 == Host)
continue;
if (strcmp(p->RIOHosts[Host].Name, p->RIOHosts[Host2].Name)
- == 0) {
+ == 0) {
NameIsUnique = 0;
Host1++;
if (Host1 >= p->RIONumHosts)
@@ -417,15 +394,14 @@ struct rio_info * p;
}
}
/*
- ** Rename host if name already used.
- */
- if (Host1 != Host)
- {
- rio_dprintk (RIO_DEBUG_TABLE, "Default name %s already used\n", p->RIOHosts[Host].Name);
- bcopy("HOST 1",p->RIOHosts[Host].Name,7);
+ ** Rename host if name already used.
+ */
+ if (Host1 != Host) {
+ rio_dprintk(RIO_DEBUG_TABLE, "Default name %s already used\n", p->RIOHosts[Host].Name);
+ bcopy("HOST 1", p->RIOHosts[Host].Name, 7);
p->RIOHosts[Host].Name[5] += Host1;
}
- rio_dprintk (RIO_DEBUG_TABLE, "Assigning default name %s\n", p->RIOHosts[Host].Name);
+ rio_dprintk(RIO_DEBUG_TABLE, "Assigning default name %s\n", p->RIOHosts[Host].Name);
}
return 0;
}
@@ -434,9 +410,8 @@ struct rio_info * p;
** User process needs the config table - build it from first
** principles.
*/
-int
-RIOApel(p)
-struct rio_info * p;
+int RIOApel(p)
+struct rio_info *p;
{
int Host;
int link;
@@ -446,35 +421,34 @@ struct rio_info * p;
struct Host *HostP;
long oldspl;
- disable(oldspl); /* strange but true! */
-
- rio_dprintk (RIO_DEBUG_TABLE, "Generating a table to return to config.rio\n");
+ disable(oldspl); /* strange but true! */
+
+ rio_dprintk(RIO_DEBUG_TABLE, "Generating a table to return to config.rio\n");
- bzero((caddr_t)&p->RIOConnectTable[0],
- sizeof(struct Map) * TOTAL_MAP_ENTRIES );
+ bzero((caddr_t) & p->RIOConnectTable[0], sizeof(struct Map) * TOTAL_MAP_ENTRIES);
- for ( Host=0; Host<RIO_HOSTS; Host++ ) {
- rio_dprintk (RIO_DEBUG_TABLE, "Processing host %d\n", Host);
+ for (Host = 0; Host < RIO_HOSTS; Host++) {
+ rio_dprintk(RIO_DEBUG_TABLE, "Processing host %d\n", Host);
HostP = &p->RIOHosts[Host];
MapP = &p->RIOConnectTable[Next++];
MapP->HostUniqueNum = HostP->UniqueNum;
- if ( (HostP->Flags & RUN_STATE) != RC_RUNNING )
+ if ((HostP->Flags & RUN_STATE) != RC_RUNNING)
continue;
MapP->RtaUniqueNum = 0;
MapP->ID = 0;
MapP->Flags = SLOT_IN_USE;
MapP->SysPort = NO_PORT;
- for ( link=0; link<LINKS_PER_UNIT; link++ )
+ for (link = 0; link < LINKS_PER_UNIT; link++)
MapP->Topology[link] = HostP->Topology[link];
- bcopy(HostP->Name,MapP->Name,MAX_NAME_LEN);
- for ( Rup=0; Rup<MAX_RUP; Rup++ ) {
- if ( HostP->Mapping[Rup].Flags & (SLOT_IN_USE|SLOT_TENTATIVE) ) {
+ bcopy(HostP->Name, MapP->Name, MAX_NAME_LEN);
+ for (Rup = 0; Rup < MAX_RUP; Rup++) {
+ if (HostP->Mapping[Rup].Flags & (SLOT_IN_USE | SLOT_TENTATIVE)) {
p->RIOConnectTable[Next] = HostP->Mapping[Rup];
- if ( HostP->Mapping[Rup].Flags & SLOT_IN_USE)
+ if (HostP->Mapping[Rup].Flags & SLOT_IN_USE)
p->RIOConnectTable[Next].Flags |= SLOT_IN_USE;
- if ( HostP->Mapping[Rup].Flags & SLOT_TENTATIVE)
+ if (HostP->Mapping[Rup].Flags & SLOT_TENTATIVE)
p->RIOConnectTable[Next].Flags |= SLOT_TENTATIVE;
- if ( HostP->Mapping[Rup].Flags & RTA16_SECOND_SLOT )
+ if (HostP->Mapping[Rup].Flags & RTA16_SECOND_SLOT)
p->RIOConnectTable[Next].Flags |= RTA16_SECOND_SLOT;
Next++;
}
@@ -489,8 +463,7 @@ struct rio_info * p;
** if the entry is suitably inactive, then we can gob on it and remove
** it from the table.
*/
-int
-RIODeleteRta(p, MapP)
+int RIODeleteRta(p, MapP)
struct rio_info *p;
struct Map *MapP;
{
@@ -502,110 +475,98 @@ struct Map *MapP;
int work_done = 0;
unsigned long lock_flags, sem_flags;
- rio_dprintk (RIO_DEBUG_TABLE, "Delete entry on host %x, rta %x\n",
- MapP->HostUniqueNum, MapP->RtaUniqueNum);
+ rio_dprintk(RIO_DEBUG_TABLE, "Delete entry on host %x, rta %x\n", MapP->HostUniqueNum, MapP->RtaUniqueNum);
- for ( host=0; host < p->RIONumHosts; host++ ) {
+ for (host = 0; host < p->RIONumHosts; host++) {
HostP = &p->RIOHosts[host];
- rio_spin_lock_irqsave( &HostP->HostLock, lock_flags );
+ rio_spin_lock_irqsave(&HostP->HostLock, lock_flags);
- if ( (HostP->Flags & RUN_STATE) != RC_RUNNING ) {
+ if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
rio_spin_unlock_irqrestore(&HostP->HostLock, lock_flags);
continue;
}
- for ( entry=0; entry<MAX_RUP; entry++ ) {
- if ( MapP->RtaUniqueNum == HostP->Mapping[entry].RtaUniqueNum ) {
+ for (entry = 0; entry < MAX_RUP; entry++) {
+ if (MapP->RtaUniqueNum == HostP->Mapping[entry].RtaUniqueNum) {
HostMapP = &HostP->Mapping[entry];
- rio_dprintk (RIO_DEBUG_TABLE, "Found entry offset %d on host %s\n",
- entry, HostP->Name);
+ rio_dprintk(RIO_DEBUG_TABLE, "Found entry offset %d on host %s\n", entry, HostP->Name);
/*
- ** Check all four links of the unit are disconnected
- */
- for ( link=0; link< LINKS_PER_UNIT; link++ ) {
- if ( HostMapP->Topology[link].Unit != ROUTE_DISCONNECT ) {
- rio_dprintk (RIO_DEBUG_TABLE, "Entry is in use and cannot be deleted!\n");
+ ** Check all four links of the unit are disconnected
+ */
+ for (link = 0; link < LINKS_PER_UNIT; link++) {
+ if (HostMapP->Topology[link].Unit != ROUTE_DISCONNECT) {
+ rio_dprintk(RIO_DEBUG_TABLE, "Entry is in use and cannot be deleted!\n");
p->RIOError.Error = UNIT_IS_IN_USE;
- rio_spin_unlock_irqrestore( &HostP->HostLock, lock_flags);
+ rio_spin_unlock_irqrestore(&HostP->HostLock, lock_flags);
return -EBUSY;
}
}
/*
- ** Slot has been allocated, BUT not booted/routed/
- ** connected/selected or anything else-ed
- */
+ ** Slot has been allocated, BUT not booted/routed/
+ ** connected/selected or anything else-ed
+ */
SysPort = HostMapP->SysPort;
- if ( SysPort != NO_PORT ) {
- for (port=SysPort; port < SysPort+PORTS_PER_RTA; port++) {
+ if (SysPort != NO_PORT) {
+ for (port = SysPort; port < SysPort + PORTS_PER_RTA; port++) {
PortP = p->RIOPortp[port];
- rio_dprintk (RIO_DEBUG_TABLE, "Unmap port\n");
+ rio_dprintk(RIO_DEBUG_TABLE, "Unmap port\n");
- rio_spin_lock_irqsave( &PortP->portSem, sem_flags );
+ rio_spin_lock_irqsave(&PortP->portSem, sem_flags);
PortP->Mapped = 0;
- if ( PortP->State & (RIO_MOPEN|RIO_LOPEN) ) {
+ if (PortP->State & (RIO_MOPEN | RIO_LOPEN)) {
- rio_dprintk (RIO_DEBUG_TABLE, "Gob on port\n");
+ rio_dprintk(RIO_DEBUG_TABLE, "Gob on port\n");
PortP->TxBufferIn = PortP->TxBufferOut = 0;
/* What should I do
- wakeup( &PortP->TxBufferIn );
- wakeup( &PortP->TxBufferOut);
- */
+ wakeup( &PortP->TxBufferIn );
+ wakeup( &PortP->TxBufferOut);
+ */
PortP->InUse = NOT_INUSE;
/* What should I do
- wakeup( &PortP->InUse );
- signal(PortP->TtyP->t_pgrp,SIGKILL);
- ttyflush(PortP->TtyP,(FREAD|FWRITE));
- */
+ wakeup( &PortP->InUse );
+ signal(PortP->TtyP->t_pgrp,SIGKILL);
+ ttyflush(PortP->TtyP,(FREAD|FWRITE));
+ */
PortP->State |= RIO_CLOSING | RIO_DELETED;
}
/*
- ** For the second slot of a 16 port RTA, the
- ** driver needs to reset the changes made to
- ** the phb to port mappings in RIORouteRup.
- */
+ ** For the second slot of a 16 port RTA, the
+ ** driver needs to reset the changes made to
+ ** the phb to port mappings in RIORouteRup.
+ */
if (PortP->SecondBlock) {
ushort dest_unit = HostMapP->ID;
ushort dest_port = port - SysPort;
- WORD *TxPktP;
- PKT *Pkt;
+ WORD *TxPktP;
+ PKT *Pkt;
- for (TxPktP = PortP->TxStart;
- TxPktP <= PortP->TxEnd; TxPktP++) {
+ for (TxPktP = PortP->TxStart; TxPktP <= PortP->TxEnd; TxPktP++) {
/*
- ** *TxPktP is the pointer to the
- ** transmit packet on the host card.
- ** This needs to be translated into
- ** a 32 bit pointer so it can be
- ** accessed from the driver.
- */
- Pkt = (PKT *) RIO_PTR(HostP->Caddr,
- RWORD(*TxPktP));
- rio_dprintk (RIO_DEBUG_TABLE,
- "Tx packet (%x) destination: Old %x:%x New %x:%x\n",
- *TxPktP, Pkt->dest_unit,
- Pkt->dest_port, dest_unit, dest_port);
+ ** *TxPktP is the pointer to the
+ ** transmit packet on the host card.
+ ** This needs to be translated into
+ ** a 32 bit pointer so it can be
+ ** accessed from the driver.
+ */
+ Pkt = (PKT *) RIO_PTR(HostP->Caddr, RWORD(*TxPktP));
+ rio_dprintk(RIO_DEBUG_TABLE, "Tx packet (%x) destination: Old %x:%x New %x:%x\n", *TxPktP, Pkt->dest_unit, Pkt->dest_port, dest_unit, dest_port);
WWORD(Pkt->dest_unit, dest_unit);
WWORD(Pkt->dest_port, dest_port);
}
- rio_dprintk (RIO_DEBUG_TABLE,
- "Port %d phb destination: Old %x:%x New %x:%x\n",
- port, PortP->PhbP->destination & 0xff,
- (PortP->PhbP->destination >> 8) & 0xff,
- dest_unit, dest_port);
- WWORD(PortP->PhbP->destination,
- dest_unit + (dest_port << 8));
+ rio_dprintk(RIO_DEBUG_TABLE, "Port %d phb destination: Old %x:%x New %x:%x\n", port, PortP->PhbP->destination & 0xff, (PortP->PhbP->destination >> 8) & 0xff, dest_unit, dest_port);
+ WWORD(PortP->PhbP->destination, dest_unit + (dest_port << 8));
}
rio_spin_unlock_irqrestore(&PortP->portSem, sem_flags);
}
}
- rio_dprintk (RIO_DEBUG_TABLE, "Entry nulled.\n");
- bzero((char *)HostMapP,sizeof(struct Map));
+ rio_dprintk(RIO_DEBUG_TABLE, "Entry nulled.\n");
+ bzero((char *) HostMapP, sizeof(struct Map));
work_done++;
}
}
@@ -613,203 +574,178 @@ struct Map *MapP;
}
/* XXXXX lock me up */
- for ( entry=0; entry< TOTAL_MAP_ENTRIES; entry++ ) {
- if ( p->RIOSavedTable[entry].RtaUniqueNum == MapP->RtaUniqueNum ) {
- bzero((char *)&p->RIOSavedTable[entry],sizeof(struct Map));
+ for (entry = 0; entry < TOTAL_MAP_ENTRIES; entry++) {
+ if (p->RIOSavedTable[entry].RtaUniqueNum == MapP->RtaUniqueNum) {
+ bzero((char *) &p->RIOSavedTable[entry], sizeof(struct Map));
work_done++;
}
- if ( p->RIOConnectTable[entry].RtaUniqueNum == MapP->RtaUniqueNum ) {
- bzero((char *)&p->RIOConnectTable[entry],sizeof(struct Map));
+ if (p->RIOConnectTable[entry].RtaUniqueNum == MapP->RtaUniqueNum) {
+ bzero((char *) &p->RIOConnectTable[entry], sizeof(struct Map));
work_done++;
}
}
- if ( work_done )
+ if (work_done)
return 0;
- rio_dprintk (RIO_DEBUG_TABLE, "Couldn't find entry to be deleted\n");
+ rio_dprintk(RIO_DEBUG_TABLE, "Couldn't find entry to be deleted\n");
p->RIOError.Error = COULDNT_FIND_ENTRY;
return -ENXIO;
}
-int RIOAssignRta( struct rio_info *p, struct Map *MapP )
+int RIOAssignRta(struct rio_info *p, struct Map *MapP)
{
- int host;
- struct Map *HostMapP;
- char *sptr;
- int link;
-
-
- rio_dprintk (RIO_DEBUG_TABLE, "Assign entry on host %x, rta %x, ID %d, Sysport %d\n",
- MapP->HostUniqueNum,MapP->RtaUniqueNum,
- MapP->ID, (int)MapP->SysPort);
-
- if ((MapP->ID != (ushort)-1) &&
- ((int)MapP->ID < (int)1 || (int)MapP->ID > MAX_RUP ))
- {
- rio_dprintk (RIO_DEBUG_TABLE, "Bad ID in map entry!\n");
- p->RIOError.Error = ID_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- if (MapP->RtaUniqueNum == 0)
- {
- rio_dprintk (RIO_DEBUG_TABLE, "Rta Unique number zero!\n");
- p->RIOError.Error = RTA_UNIQUE_NUMBER_ZERO;
- return -EINVAL;
- }
- if ( (MapP->SysPort != NO_PORT) && (MapP->SysPort % PORTS_PER_RTA) )
- {
- rio_dprintk (RIO_DEBUG_TABLE, "Port %d not multiple of %d!\n",(int)MapP->SysPort,PORTS_PER_RTA);
- p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- if ( (MapP->SysPort != NO_PORT) && (MapP->SysPort >= RIO_PORTS) )
- {
- rio_dprintk (RIO_DEBUG_TABLE, "Port %d not valid!\n",(int)MapP->SysPort);
- p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- /*
- ** Copy the name across to the map entry.
- */
- MapP->Name[MAX_NAME_LEN-1] = '\0';
- sptr = MapP->Name;
- while ( *sptr )
- {
- if ( *sptr<' ' || *sptr>'~' )
- {
- rio_dprintk (RIO_DEBUG_TABLE, "Name entry contains non-printing characters!\n");
- p->RIOError.Error = BAD_CHARACTER_IN_NAME;
- return -EINVAL;
- }
- sptr++;
- }
-
- for ( host=0; host < p->RIONumHosts; host++ )
- {
- if ( MapP->HostUniqueNum == p->RIOHosts[host].UniqueNum )
- {
- if ( (p->RIOHosts[host].Flags & RUN_STATE) != RC_RUNNING )
- {
- p->RIOError.Error = HOST_NOT_RUNNING;
- return -ENXIO;
- }
-
- /*
- ** Now we have a host we need to allocate an ID
- ** if the entry does not already have one.
- */
- if (MapP->ID == (ushort)-1)
- {
- int nNewID;
-
- rio_dprintk (RIO_DEBUG_TABLE, "Attempting to get a new ID for rta \"%s\"\n",
- MapP->Name);
- /*
- ** The idea here is to allow RTA's to be assigned
- ** before they actually appear on the network.
- ** This allows the addition of RTA's without having
- ** to plug them in.
- ** What we do is:
- ** - Find a free ID and allocate it to the RTA.
- ** - If this map entry is the second half of a
- ** 16 port entry then find the other half and
- ** make sure the 2 cross reference each other.
- */
- if (RIOFindFreeID(p, &p->RIOHosts[host], &nNewID, NULL) != 0)
- {
- p->RIOError.Error = COULDNT_FIND_ENTRY;
- return -EBUSY;
- }
- MapP->ID = (ushort)nNewID + 1;
- rio_dprintk (RIO_DEBUG_TABLE, "Allocated ID %d for this new RTA.\n", MapP->ID);
- HostMapP = &p->RIOHosts[host].Mapping[nNewID];
- HostMapP->RtaUniqueNum = MapP->RtaUniqueNum;
- HostMapP->HostUniqueNum = MapP->HostUniqueNum;
- HostMapP->ID = MapP->ID;
- for (link = 0; link < LINKS_PER_UNIT; link++)
- {
- HostMapP->Topology[link].Unit = ROUTE_DISCONNECT;
- HostMapP->Topology[link].Link = NO_LINK;
- }
- if (MapP->Flags & RTA16_SECOND_SLOT)
- {
- int unit;
-
- for (unit = 0; unit < MAX_RUP; unit++)
- if (p->RIOHosts[host].Mapping[unit].RtaUniqueNum ==
- MapP->RtaUniqueNum)
- break;
- if (unit == MAX_RUP)
- {
- p->RIOError.Error = COULDNT_FIND_ENTRY;
- return -EBUSY;
- }
- HostMapP->Flags |= RTA16_SECOND_SLOT;
- HostMapP->ID2 = MapP->ID2 = p->RIOHosts[host].Mapping[unit].ID;
- p->RIOHosts[host].Mapping[unit].ID2 = MapP->ID;
- rio_dprintk (RIO_DEBUG_TABLE, "Cross referenced id %d to ID %d.\n",
- MapP->ID,
- p->RIOHosts[host].Mapping[unit].ID);
+ int host;
+ struct Map *HostMapP;
+ char *sptr;
+ int link;
+
+
+ rio_dprintk(RIO_DEBUG_TABLE, "Assign entry on host %x, rta %x, ID %d, Sysport %d\n", MapP->HostUniqueNum, MapP->RtaUniqueNum, MapP->ID, (int) MapP->SysPort);
+
+ if ((MapP->ID != (ushort) - 1) && ((int) MapP->ID < (int) 1 || (int) MapP->ID > MAX_RUP)) {
+ rio_dprintk(RIO_DEBUG_TABLE, "Bad ID in map entry!\n");
+ p->RIOError.Error = ID_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+ if (MapP->RtaUniqueNum == 0) {
+ rio_dprintk(RIO_DEBUG_TABLE, "Rta Unique number zero!\n");
+ p->RIOError.Error = RTA_UNIQUE_NUMBER_ZERO;
+ return -EINVAL;
+ }
+ if ((MapP->SysPort != NO_PORT) && (MapP->SysPort % PORTS_PER_RTA)) {
+ rio_dprintk(RIO_DEBUG_TABLE, "Port %d not multiple of %d!\n", (int) MapP->SysPort, PORTS_PER_RTA);
+ p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+ if ((MapP->SysPort != NO_PORT) && (MapP->SysPort >= RIO_PORTS)) {
+ rio_dprintk(RIO_DEBUG_TABLE, "Port %d not valid!\n", (int) MapP->SysPort);
+ p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
+ return -EINVAL;
+ }
+
+ /*
+ ** Copy the name across to the map entry.
+ */
+ MapP->Name[MAX_NAME_LEN - 1] = '\0';
+ sptr = MapP->Name;
+ while (*sptr) {
+ if (*sptr < ' ' || *sptr > '~') {
+ rio_dprintk(RIO_DEBUG_TABLE, "Name entry contains non-printing characters!\n");
+ p->RIOError.Error = BAD_CHARACTER_IN_NAME;
+ return -EINVAL;
}
- }
+ sptr++;
+ }
+
+ for (host = 0; host < p->RIONumHosts; host++) {
+ if (MapP->HostUniqueNum == p->RIOHosts[host].UniqueNum) {
+ if ((p->RIOHosts[host].Flags & RUN_STATE) != RC_RUNNING) {
+ p->RIOError.Error = HOST_NOT_RUNNING;
+ return -ENXIO;
+ }
- HostMapP = &p->RIOHosts[host].Mapping[MapP->ID-1];
+ /*
+ ** Now we have a host we need to allocate an ID
+ ** if the entry does not already have one.
+ */
+ if (MapP->ID == (ushort) - 1) {
+ int nNewID;
- if ( HostMapP->Flags & SLOT_IN_USE )
- {
- rio_dprintk (RIO_DEBUG_TABLE, "Map table slot for ID %d is already in use.\n", MapP->ID);
- p->RIOError.Error = ID_ALREADY_IN_USE;
- return -EBUSY;
- }
-
- /*
- ** Assign the sys ports and the name, and mark the slot as
- ** being in use.
- */
- HostMapP->SysPort = MapP->SysPort;
- if ((MapP->Flags & RTA16_SECOND_SLOT) == 0)
- CCOPY( MapP->Name, HostMapP->Name, MAX_NAME_LEN );
- HostMapP->Flags = SLOT_IN_USE | RTA_BOOTED;
+ rio_dprintk(RIO_DEBUG_TABLE, "Attempting to get a new ID for rta \"%s\"\n", MapP->Name);
+ /*
+ ** The idea here is to allow RTA's to be assigned
+ ** before they actually appear on the network.
+ ** This allows the addition of RTA's without having
+ ** to plug them in.
+ ** What we do is:
+ ** - Find a free ID and allocate it to the RTA.
+ ** - If this map entry is the second half of a
+ ** 16 port entry then find the other half and
+ ** make sure the 2 cross reference each other.
+ */
+ if (RIOFindFreeID(p, &p->RIOHosts[host], &nNewID, NULL) != 0) {
+ p->RIOError.Error = COULDNT_FIND_ENTRY;
+ return -EBUSY;
+ }
+ MapP->ID = (ushort) nNewID + 1;
+ rio_dprintk(RIO_DEBUG_TABLE, "Allocated ID %d for this new RTA.\n", MapP->ID);
+ HostMapP = &p->RIOHosts[host].Mapping[nNewID];
+ HostMapP->RtaUniqueNum = MapP->RtaUniqueNum;
+ HostMapP->HostUniqueNum = MapP->HostUniqueNum;
+ HostMapP->ID = MapP->ID;
+ for (link = 0; link < LINKS_PER_UNIT; link++) {
+ HostMapP->Topology[link].Unit = ROUTE_DISCONNECT;
+ HostMapP->Topology[link].Link = NO_LINK;
+ }
+ if (MapP->Flags & RTA16_SECOND_SLOT) {
+ int unit;
+
+ for (unit = 0; unit < MAX_RUP; unit++)
+ if (p->RIOHosts[host].Mapping[unit].RtaUniqueNum == MapP->RtaUniqueNum)
+ break;
+ if (unit == MAX_RUP) {
+ p->RIOError.Error = COULDNT_FIND_ENTRY;
+ return -EBUSY;
+ }
+ HostMapP->Flags |= RTA16_SECOND_SLOT;
+ HostMapP->ID2 = MapP->ID2 = p->RIOHosts[host].Mapping[unit].ID;
+ p->RIOHosts[host].Mapping[unit].ID2 = MapP->ID;
+ rio_dprintk(RIO_DEBUG_TABLE, "Cross referenced id %d to ID %d.\n", MapP->ID, p->RIOHosts[host].Mapping[unit].ID);
+ }
+ }
+
+ HostMapP = &p->RIOHosts[host].Mapping[MapP->ID - 1];
+
+ if (HostMapP->Flags & SLOT_IN_USE) {
+ rio_dprintk(RIO_DEBUG_TABLE, "Map table slot for ID %d is already in use.\n", MapP->ID);
+ p->RIOError.Error = ID_ALREADY_IN_USE;
+ return -EBUSY;
+ }
+
+ /*
+ ** Assign the sys ports and the name, and mark the slot as
+ ** being in use.
+ */
+ HostMapP->SysPort = MapP->SysPort;
+ if ((MapP->Flags & RTA16_SECOND_SLOT) == 0)
+ CCOPY(MapP->Name, HostMapP->Name, MAX_NAME_LEN);
+ HostMapP->Flags = SLOT_IN_USE | RTA_BOOTED;
#ifdef NEED_TO_FIX
- RIO_SV_BROADCAST(p->RIOHosts[host].svFlags[MapP->ID-1]);
+ RIO_SV_BROADCAST(p->RIOHosts[host].svFlags[MapP->ID - 1]);
#endif
- if (MapP->Flags & RTA16_SECOND_SLOT)
- HostMapP->Flags |= RTA16_SECOND_SLOT;
-
- RIOReMapPorts( p, &p->RIOHosts[host], HostMapP );
- /*
- ** Adjust 2nd block of 8 phbs
- */
- if (MapP->Flags & RTA16_SECOND_SLOT)
- RIOFixPhbs(p, &p->RIOHosts[host], HostMapP->ID - 1);
-
- if ( HostMapP->SysPort != NO_PORT )
- {
- if ( HostMapP->SysPort < p->RIOFirstPortsBooted )
- p->RIOFirstPortsBooted = HostMapP->SysPort;
- if ( HostMapP->SysPort > p->RIOLastPortsBooted )
- p->RIOLastPortsBooted = HostMapP->SysPort;
- }
- if (MapP->Flags & RTA16_SECOND_SLOT)
- rio_dprintk (RIO_DEBUG_TABLE, "Second map of RTA %s added to configuration\n",
- p->RIOHosts[host].Mapping[MapP->ID2 - 1].Name);
- else
- rio_dprintk (RIO_DEBUG_TABLE, "RTA %s added to configuration\n", MapP->Name);
- return 0;
+ if (MapP->Flags & RTA16_SECOND_SLOT)
+ HostMapP->Flags |= RTA16_SECOND_SLOT;
+
+ RIOReMapPorts(p, &p->RIOHosts[host], HostMapP);
+ /*
+ ** Adjust 2nd block of 8 phbs
+ */
+ if (MapP->Flags & RTA16_SECOND_SLOT)
+ RIOFixPhbs(p, &p->RIOHosts[host], HostMapP->ID - 1);
+
+ if (HostMapP->SysPort != NO_PORT) {
+ if (HostMapP->SysPort < p->RIOFirstPortsBooted)
+ p->RIOFirstPortsBooted = HostMapP->SysPort;
+ if (HostMapP->SysPort > p->RIOLastPortsBooted)
+ p->RIOLastPortsBooted = HostMapP->SysPort;
+ }
+ if (MapP->Flags & RTA16_SECOND_SLOT)
+ rio_dprintk(RIO_DEBUG_TABLE, "Second map of RTA %s added to configuration\n", p->RIOHosts[host].Mapping[MapP->ID2 - 1].Name);
+ else
+ rio_dprintk(RIO_DEBUG_TABLE, "RTA %s added to configuration\n", MapP->Name);
+ return 0;
+ }
}
- }
- p->RIOError.Error = UNKNOWN_HOST_NUMBER;
- rio_dprintk (RIO_DEBUG_TABLE, "Unknown host %x\n", MapP->HostUniqueNum);
- return -ENXIO;
+ p->RIOError.Error = UNKNOWN_HOST_NUMBER;
+ rio_dprintk(RIO_DEBUG_TABLE, "Unknown host %x\n", MapP->HostUniqueNum);
+ return -ENXIO;
}
-int
-RIOReMapPorts(p, HostP, HostMapP)
-struct rio_info * p;
+int RIOReMapPorts(p, HostP, HostMapP)
+struct rio_info *p;
struct Host *HostP;
-struct Map *HostMapP;
+struct Map *HostMapP;
{
register struct Port *PortP;
uint SubEnt;
@@ -819,135 +755,127 @@ struct Map *HostMapP;
unsigned long flags;
#ifdef CHECK
- CheckHostP( HostP );
- CheckHostMapP( HostMapP );
+ CheckHostP(HostP);
+ CheckHostMapP(HostMapP);
#endif
- rio_dprintk (RIO_DEBUG_TABLE, "Mapping sysport %d to id %d\n", (int)HostMapP->SysPort, HostMapP->ID);
+ rio_dprintk(RIO_DEBUG_TABLE, "Mapping sysport %d to id %d\n", (int) HostMapP->SysPort, HostMapP->ID);
/*
- ** We need to tell the UnixRups which sysport the rup corresponds to
- */
- HostP->UnixRups[HostMapP->ID-1].BaseSysPort = HostMapP->SysPort;
+ ** We need to tell the UnixRups which sysport the rup corresponds to
+ */
+ HostP->UnixRups[HostMapP->ID - 1].BaseSysPort = HostMapP->SysPort;
- if ( HostMapP->SysPort == NO_PORT )
- return(0);
+ if (HostMapP->SysPort == NO_PORT)
+ return (0);
RtaType = GetUnitType(HostMapP->RtaUniqueNum);
- rio_dprintk (RIO_DEBUG_TABLE, "Mapping sysport %d-%d\n",
- (int)HostMapP->SysPort, (int)HostMapP->SysPort+PORTS_PER_RTA-1);
+ rio_dprintk(RIO_DEBUG_TABLE, "Mapping sysport %d-%d\n", (int) HostMapP->SysPort, (int) HostMapP->SysPort + PORTS_PER_RTA - 1);
/*
- ** now map each of its eight ports
- */
- for ( SubEnt=0; SubEnt<PORTS_PER_RTA; SubEnt++) {
- rio_dprintk (RIO_DEBUG_TABLE, "subent = %d, HostMapP->SysPort = %d\n",
- SubEnt, (int)HostMapP->SysPort);
- SysPort = HostMapP->SysPort+SubEnt; /* portnumber within system */
- /* portnumber on host */
-
- HostPort = (HostMapP->ID-1)*PORTS_PER_RTA+SubEnt;
-
- rio_dprintk (RIO_DEBUG_TABLE, "c1 p = %p, p->rioPortp = %p\n", p, p->RIOPortp);
+ ** now map each of its eight ports
+ */
+ for (SubEnt = 0; SubEnt < PORTS_PER_RTA; SubEnt++) {
+ rio_dprintk(RIO_DEBUG_TABLE, "subent = %d, HostMapP->SysPort = %d\n", SubEnt, (int) HostMapP->SysPort);
+ SysPort = HostMapP->SysPort + SubEnt; /* portnumber within system */
+ /* portnumber on host */
+
+ HostPort = (HostMapP->ID - 1) * PORTS_PER_RTA + SubEnt;
+
+ rio_dprintk(RIO_DEBUG_TABLE, "c1 p = %p, p->rioPortp = %p\n", p, p->RIOPortp);
PortP = p->RIOPortp[SysPort];
#if 0
- PortP->TtyP = &p->channel[SysPort];
+ PortP->TtyP = &p->channel[SysPort];
#endif
- rio_dprintk (RIO_DEBUG_TABLE, "Map port\n");
+ rio_dprintk(RIO_DEBUG_TABLE, "Map port\n");
/*
- ** Point at all the real neat data structures
- */
+ ** Point at all the real neat data structures
+ */
rio_spin_lock_irqsave(&PortP->portSem, flags);
PortP->HostP = HostP;
PortP->Caddr = HostP->Caddr;
/*
- ** The PhbP cannot be filled in yet
- ** unless the host has been booted
- */
+ ** The PhbP cannot be filled in yet
+ ** unless the host has been booted
+ */
if ((HostP->Flags & RUN_STATE) == RC_RUNNING) {
struct PHB *PhbP = PortP->PhbP = &HostP->PhbP[HostPort];
- PortP->TxAdd =(WORD *)RIO_PTR(HostP->Caddr,RWORD(PhbP->tx_add));
- PortP->TxStart =(WORD *)RIO_PTR(HostP->Caddr,RWORD(PhbP->tx_start));
- PortP->TxEnd =(WORD *)RIO_PTR(HostP->Caddr,RWORD(PhbP->tx_end));
- PortP->RxRemove=(WORD *)RIO_PTR(HostP->Caddr,
- RWORD(PhbP->rx_remove));
- PortP->RxStart =(WORD *)RIO_PTR(HostP->Caddr,RWORD(PhbP->rx_start));
- PortP->RxEnd =(WORD *)RIO_PTR(HostP->Caddr,RWORD(PhbP->rx_end));
- }
- else
+ PortP->TxAdd = (WORD *) RIO_PTR(HostP->Caddr, RWORD(PhbP->tx_add));
+ PortP->TxStart = (WORD *) RIO_PTR(HostP->Caddr, RWORD(PhbP->tx_start));
+ PortP->TxEnd = (WORD *) RIO_PTR(HostP->Caddr, RWORD(PhbP->tx_end));
+ PortP->RxRemove = (WORD *) RIO_PTR(HostP->Caddr, RWORD(PhbP->rx_remove));
+ PortP->RxStart = (WORD *) RIO_PTR(HostP->Caddr, RWORD(PhbP->rx_start));
+ PortP->RxEnd = (WORD *) RIO_PTR(HostP->Caddr, RWORD(PhbP->rx_end));
+ } else
PortP->PhbP = NULL;
/*
- ** port related flags
- */
- PortP->HostPort = HostPort;
+ ** port related flags
+ */
+ PortP->HostPort = HostPort;
/*
- ** For each part of a 16 port RTA, RupNum is ID - 1.
- */
+ ** For each part of a 16 port RTA, RupNum is ID - 1.
+ */
PortP->RupNum = HostMapP->ID - 1;
if (HostMapP->Flags & RTA16_SECOND_SLOT) {
- PortP->ID2 = HostMapP->ID2 - 1;
- PortP->SecondBlock = TRUE;
- }
- else {
- PortP->ID2 = 0;
- PortP->SecondBlock = FALSE;
+ PortP->ID2 = HostMapP->ID2 - 1;
+ PortP->SecondBlock = TRUE;
+ } else {
+ PortP->ID2 = 0;
+ PortP->SecondBlock = FALSE;
}
- PortP->RtaUniqueNum = HostMapP->RtaUniqueNum;
+ PortP->RtaUniqueNum = HostMapP->RtaUniqueNum;
/*
- ** If the port was already mapped then thats all we need to do.
- */
+ ** If the port was already mapped then thats all we need to do.
+ */
if (PortP->Mapped) {
- rio_spin_unlock_irqrestore( &PortP->portSem, flags);
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
continue;
- }
- else HostMapP->Flags &= ~RTA_NEWBOOT;
+ } else
+ HostMapP->Flags &= ~RTA_NEWBOOT;
- PortP->State = 0;
- PortP->Config = 0;
+ PortP->State = 0;
+ PortP->Config = 0;
/*
- ** Check out the module type - if it is special (read only etc.)
- ** then we need to set flags in the PortP->Config.
- ** Note: For 16 port RTA, all ports are of the same type.
- */
+ ** Check out the module type - if it is special (read only etc.)
+ ** then we need to set flags in the PortP->Config.
+ ** Note: For 16 port RTA, all ports are of the same type.
+ */
if (RtaType == TYPE_RTA16) {
- PortP->Config |= p->RIOModuleTypes[HostP->UnixRups
- [HostMapP->ID-1].ModTypes].Flags[SubEnt % PORTS_PER_MODULE];
+ PortP->Config |= p->RIOModuleTypes[HostP->UnixRups[HostMapP->ID - 1].ModTypes].Flags[SubEnt % PORTS_PER_MODULE];
} else {
- if ( SubEnt < PORTS_PER_MODULE )
- PortP->Config |= p->RIOModuleTypes[LONYBLE(HostP->UnixRups
- [HostMapP->ID-1].ModTypes)].Flags[SubEnt % PORTS_PER_MODULE];
+ if (SubEnt < PORTS_PER_MODULE)
+ PortP->Config |= p->RIOModuleTypes[LONYBLE(HostP->UnixRups[HostMapP->ID - 1].ModTypes)].Flags[SubEnt % PORTS_PER_MODULE];
else
- PortP->Config |= p->RIOModuleTypes[HINYBLE(HostP->UnixRups
- [HostMapP->ID-1].ModTypes)].Flags[SubEnt % PORTS_PER_MODULE];
+ PortP->Config |= p->RIOModuleTypes[HINYBLE(HostP->UnixRups[HostMapP->ID - 1].ModTypes)].Flags[SubEnt % PORTS_PER_MODULE];
}
/*
- ** more port related flags
- */
- PortP->PortState = 0;
- PortP->ModemLines = 0;
- PortP->ModemState = 0;
- PortP->CookMode = COOK_WELL;
- PortP->ParamSem = 0;
- PortP->FlushCmdBodge= 0;
- PortP->WflushFlag = 0;
- PortP->MagicFlags = 0;
- PortP->Lock = 0;
- PortP->Store = 0;
- PortP->FirstOpen = 1;
+ ** more port related flags
+ */
+ PortP->PortState = 0;
+ PortP->ModemLines = 0;
+ PortP->ModemState = 0;
+ PortP->CookMode = COOK_WELL;
+ PortP->ParamSem = 0;
+ PortP->FlushCmdBodge = 0;
+ PortP->WflushFlag = 0;
+ PortP->MagicFlags = 0;
+ PortP->Lock = 0;
+ PortP->Store = 0;
+ PortP->FirstOpen = 1;
/*
- ** Buffers 'n things
- */
- PortP->RxDataStart = 0;
- PortP->Cor2Copy = 0;
- PortP->Name = &HostMapP->Name[0];
+ ** Buffers 'n things
+ */
+ PortP->RxDataStart = 0;
+ PortP->Cor2Copy = 0;
+ PortP->Name = &HostMapP->Name[0];
#ifdef STATS
- bzero( (caddr_t)&PortP->Stat, sizeof(struct RIOStats) );
+ bzero((caddr_t) & PortP->Stat, sizeof(struct RIOStats));
#endif
PortP->statsGather = 0;
PortP->txchars = 0;
@@ -955,90 +883,87 @@ struct Map *HostMapP;
PortP->opens = 0;
PortP->closes = 0;
PortP->ioctls = 0;
- if ( PortP->TxRingBuffer )
- bzero( PortP->TxRingBuffer, p->RIOBufferSize );
- else if ( p->RIOBufferSize ) {
+ if (PortP->TxRingBuffer)
+ bzero(PortP->TxRingBuffer, p->RIOBufferSize);
+ else if (p->RIOBufferSize) {
PortP->TxRingBuffer = sysbrk(p->RIOBufferSize);
- bzero( PortP->TxRingBuffer, p->RIOBufferSize );
+ bzero(PortP->TxRingBuffer, p->RIOBufferSize);
}
- PortP->TxBufferOut = 0;
- PortP->TxBufferIn = 0;
- PortP->Debug = 0;
+ PortP->TxBufferOut = 0;
+ PortP->TxBufferIn = 0;
+ PortP->Debug = 0;
/*
- ** LastRxTgl stores the state of the rx toggle bit for this
- ** port, to be compared with the state of the next pkt received.
- ** If the same, we have received the same rx pkt from the RTA
- ** twice. Initialise to a value not equal to PHB_RX_TGL or 0.
- */
- PortP->LastRxTgl = ~(uchar)PHB_RX_TGL;
+ ** LastRxTgl stores the state of the rx toggle bit for this
+ ** port, to be compared with the state of the next pkt received.
+ ** If the same, we have received the same rx pkt from the RTA
+ ** twice. Initialise to a value not equal to PHB_RX_TGL or 0.
+ */
+ PortP->LastRxTgl = ~(uchar) PHB_RX_TGL;
/*
- ** and mark the port as usable
- */
+ ** and mark the port as usable
+ */
PortP->Mapped = 1;
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
}
- if ( HostMapP->SysPort < p->RIOFirstPortsMapped )
+ if (HostMapP->SysPort < p->RIOFirstPortsMapped)
p->RIOFirstPortsMapped = HostMapP->SysPort;
- if ( HostMapP->SysPort > p->RIOLastPortsMapped )
+ if (HostMapP->SysPort > p->RIOLastPortsMapped)
p->RIOLastPortsMapped = HostMapP->SysPort;
return 0;
}
-int
-RIOChangeName(p, MapP)
+int RIOChangeName(p, MapP)
struct rio_info *p;
-struct Map* MapP;
+struct Map *MapP;
{
int host;
struct Map *HostMapP;
char *sptr;
- rio_dprintk (RIO_DEBUG_TABLE, "Change name entry on host %x, rta %x, ID %d, Sysport %d\n",
- MapP->HostUniqueNum,MapP->RtaUniqueNum,
- MapP->ID, (int)MapP->SysPort);
+ rio_dprintk(RIO_DEBUG_TABLE, "Change name entry on host %x, rta %x, ID %d, Sysport %d\n", MapP->HostUniqueNum, MapP->RtaUniqueNum, MapP->ID, (int) MapP->SysPort);
- if ( MapP->ID > MAX_RUP ) {
- rio_dprintk (RIO_DEBUG_TABLE, "Bad ID in map entry!\n");
+ if (MapP->ID > MAX_RUP) {
+ rio_dprintk(RIO_DEBUG_TABLE, "Bad ID in map entry!\n");
p->RIOError.Error = ID_NUMBER_OUT_OF_RANGE;
return -EINVAL;
}
- MapP->Name[MAX_NAME_LEN-1] = '\0';
+ MapP->Name[MAX_NAME_LEN - 1] = '\0';
sptr = MapP->Name;
- while ( *sptr ) {
- if ( *sptr<' ' || *sptr>'~' ) {
- rio_dprintk (RIO_DEBUG_TABLE, "Name entry contains non-printing characters!\n");
+ while (*sptr) {
+ if (*sptr < ' ' || *sptr > '~') {
+ rio_dprintk(RIO_DEBUG_TABLE, "Name entry contains non-printing characters!\n");
p->RIOError.Error = BAD_CHARACTER_IN_NAME;
return -EINVAL;
}
sptr++;
}
- for ( host=0; host < p->RIONumHosts; host++ ) {
- if ( MapP->HostUniqueNum == p->RIOHosts[host].UniqueNum ) {
- if ( (p->RIOHosts[host].Flags & RUN_STATE) != RC_RUNNING ) {
+ for (host = 0; host < p->RIONumHosts; host++) {
+ if (MapP->HostUniqueNum == p->RIOHosts[host].UniqueNum) {
+ if ((p->RIOHosts[host].Flags & RUN_STATE) != RC_RUNNING) {
p->RIOError.Error = HOST_NOT_RUNNING;
return -ENXIO;
}
- if ( MapP->ID==0 ) {
- CCOPY( MapP->Name, p->RIOHosts[host].Name, MAX_NAME_LEN );
+ if (MapP->ID == 0) {
+ CCOPY(MapP->Name, p->RIOHosts[host].Name, MAX_NAME_LEN);
return 0;
}
- HostMapP = &p->RIOHosts[host].Mapping[MapP->ID-1];
+ HostMapP = &p->RIOHosts[host].Mapping[MapP->ID - 1];
- if ( HostMapP->RtaUniqueNum != MapP->RtaUniqueNum ) {
+ if (HostMapP->RtaUniqueNum != MapP->RtaUniqueNum) {
p->RIOError.Error = RTA_NUMBER_WRONG;
return -ENXIO;
}
- CCOPY( MapP->Name, HostMapP->Name, MAX_NAME_LEN );
+ CCOPY(MapP->Name, HostMapP->Name, MAX_NAME_LEN);
return 0;
}
}
p->RIOError.Error = UNKNOWN_HOST_NUMBER;
- rio_dprintk (RIO_DEBUG_TABLE, "Unknown host %x\n", MapP->HostUniqueNum);
+ rio_dprintk(RIO_DEBUG_TABLE, "Unknown host %x\n", MapP->HostUniqueNum);
return -ENXIO;
}
diff --git a/drivers/char/rio/riotime.h b/drivers/char/rio/riotime.h
index 66d52bc0549..35e01cd103d 100644
--- a/drivers/char/rio/riotime.h
+++ b/drivers/char/rio/riotime.h
@@ -40,7 +40,7 @@
#ifndef lint
#ifdef SCCS
-static char *_rio_riotime_h_sccs = "@(#)riotime.h 1.1" ;
+static char *_rio_riotime_h_sccs = "@(#)riotime.h 1.1";
#endif
#endif
@@ -52,7 +52,7 @@ static char *_rio_riotime_h_sccs = "@(#)riotime.h 1.1" ;
/**************************************
* Convert a RIO tick (1/10th second)
* into transputer low priority ticks
- *************************************/
+ *************************************/
#define RioTimeToLow(time) (time*(100000 / 64))
#define RioLowToTime(time) ((time*64)/100000)
diff --git a/drivers/char/rio/riotty.c b/drivers/char/rio/riotty.c
index 78a321afdf4..5894a25b011 100644
--- a/drivers/char/rio/riotty.c
+++ b/drivers/char/rio/riotty.c
@@ -90,21 +90,19 @@ static char *_riotty_c_sccs_ = "@(#)riotty.c 1.3";
#include "sam.h"
#if 0
-static void ttyseth_pv(struct Port *, struct ttystatics *,
- struct termios *sg, int);
+static void ttyseth_pv(struct Port *, struct ttystatics *, struct termios *sg, int);
#endif
static void RIOClearUp(struct Port *PortP);
-int RIOShortCommand(struct rio_info *p, struct Port *PortP,
- int command, int len, int arg);
+int RIOShortCommand(struct rio_info *p, struct Port *PortP, int command, int len, int arg);
#if 0
static int RIOCookMode(struct ttystatics *);
#endif
-extern int conv_vb[]; /* now defined in ttymgr.c */
-extern int conv_bv[]; /* now defined in ttymgr.c */
-
+extern int conv_vb[]; /* now defined in ttymgr.c */
+extern int conv_bv[]; /* now defined in ttymgr.c */
+
/*
** 16.09.1998 ARG - Fix to build riotty.k.o for Modular Kernel Support
**
@@ -117,27 +115,25 @@ extern int conv_bv[]; /* now defined in ttymgr.c */
#endif
#ifdef NEED_THIS2
-static struct old_sgttyb
-default_sg =
-{
- B19200, B19200, /* input and output speed */
- 'H' - '@', /* erase char */
- -1, /* 2nd erase char */
- 'U' - '@', /* kill char */
- ECHO | CRMOD, /* mode */
- 'C' - '@', /* interrupt character */
- '\\' - '@', /* quit char */
- 'Q' - '@', /* start char */
- 'S' - '@', /* stop char */
- 'D' - '@', /* EOF */
- -1, /* brk */
- (LCRTBS | LCRTERA | LCRTKIL | LCTLECH), /* local mode word */
- 'Z' - '@', /* process stop */
- 'Y' - '@', /* delayed stop */
- 'R' - '@', /* reprint line */
- 'O' - '@', /* flush output */
- 'W' - '@', /* word erase */
- 'V' - '@' /* literal next char */
+static struct old_sgttyb default_sg = {
+ B19200, B19200, /* input and output speed */
+ 'H' - '@', /* erase char */
+ -1, /* 2nd erase char */
+ 'U' - '@', /* kill char */
+ ECHO | CRMOD, /* mode */
+ 'C' - '@', /* interrupt character */
+ '\\' - '@', /* quit char */
+ 'Q' - '@', /* start char */
+ 'S' - '@', /* stop char */
+ 'D' - '@', /* EOF */
+ -1, /* brk */
+ (LCRTBS | LCRTERA | LCRTKIL | LCTLECH), /* local mode word */
+ 'Z' - '@', /* process stop */
+ 'Y' - '@', /* delayed stop */
+ 'R' - '@', /* reprint line */
+ 'O' - '@', /* flush output */
+ 'W' - '@', /* word erase */
+ 'V' - '@' /* literal next char */
};
#endif
@@ -145,62 +141,59 @@ default_sg =
extern struct rio_info *p;
-int
-riotopen(struct tty_struct * tty, struct file * filp)
+int riotopen(struct tty_struct *tty, struct file *filp)
{
register uint SysPort;
int Modem;
int repeat_this = 250;
- struct Port *PortP; /* pointer to the port structure */
+ struct Port *PortP; /* pointer to the port structure */
unsigned long flags;
int retval = 0;
- func_enter ();
+ func_enter();
/* Make sure driver_data is NULL in case the rio isn't booted jet. Else gs_close
is going to oops.
- */
+ */
tty->driver_data = NULL;
-
+
SysPort = rio_minor(tty);
- Modem = rio_ismodem(tty);
+ Modem = rio_ismodem(tty);
- if ( p->RIOFailed ) {
- rio_dprintk (RIO_DEBUG_TTY, "System initialisation failed\n");
+ if (p->RIOFailed) {
+ rio_dprintk(RIO_DEBUG_TTY, "System initialisation failed\n");
pseterr(ENXIO);
- func_exit ();
+ func_exit();
return -ENXIO;
}
- rio_dprintk (RIO_DEBUG_TTY, "port open SysPort %d (%s) (mapped:%d)\n",
- SysPort, Modem ? "Modem" : "tty",
- p->RIOPortp[SysPort]->Mapped);
+ rio_dprintk(RIO_DEBUG_TTY, "port open SysPort %d (%s) (mapped:%d)\n", SysPort, Modem ? "Modem" : "tty", p->RIOPortp[SysPort]->Mapped);
/*
- ** Validate that we have received a legitimate request.
- ** Currently, just check that we are opening a port on
- ** a host card that actually exists, and that the port
- ** has been mapped onto a host.
- */
+ ** Validate that we have received a legitimate request.
+ ** Currently, just check that we are opening a port on
+ ** a host card that actually exists, and that the port
+ ** has been mapped onto a host.
+ */
if (SysPort >= RIO_PORTS) { /* out of range ? */
- rio_dprintk (RIO_DEBUG_TTY, "Illegal port number %d\n",SysPort);
+ rio_dprintk(RIO_DEBUG_TTY, "Illegal port number %d\n", SysPort);
pseterr(ENXIO);
func_exit();
return -ENXIO;
}
/*
- ** Grab pointer to the port stucture
- */
+ ** Grab pointer to the port stucture
+ */
PortP = p->RIOPortp[SysPort]; /* Get control struc */
- rio_dprintk (RIO_DEBUG_TTY, "PortP: %p\n", PortP);
- if ( !PortP->Mapped ) { /* we aren't mapped yet! */
+ rio_dprintk(RIO_DEBUG_TTY, "PortP: %p\n", PortP);
+ if (!PortP->Mapped) { /* we aren't mapped yet! */
/*
- ** The system doesn't know which RTA this port
- ** corresponds to.
- */
- rio_dprintk (RIO_DEBUG_TTY, "port not mapped into system\n");
- func_exit ();
+ ** The system doesn't know which RTA this port
+ ** corresponds to.
+ */
+ rio_dprintk(RIO_DEBUG_TTY, "port not mapped into system\n");
+ func_exit();
pseterr(ENXIO);
return -ENXIO;
}
@@ -210,132 +203,131 @@ riotopen(struct tty_struct * tty, struct file * filp)
PortP->gs.tty = tty;
PortP->gs.count++;
- rio_dprintk (RIO_DEBUG_TTY, "%d bytes in tx buffer\n",
- PortP->gs.xmit_cnt);
+ rio_dprintk(RIO_DEBUG_TTY, "%d bytes in tx buffer\n", PortP->gs.xmit_cnt);
- retval = gs_init_port (&PortP->gs);
+ retval = gs_init_port(&PortP->gs);
if (retval) {
PortP->gs.count--;
return -ENXIO;
}
/*
- ** If the host hasn't been booted yet, then
- ** fail
- */
- if ( (PortP->HostP->Flags & RUN_STATE) != RC_RUNNING ) {
- rio_dprintk (RIO_DEBUG_TTY, "Host not running\n");
+ ** If the host hasn't been booted yet, then
+ ** fail
+ */
+ if ((PortP->HostP->Flags & RUN_STATE) != RC_RUNNING) {
+ rio_dprintk(RIO_DEBUG_TTY, "Host not running\n");
pseterr(ENXIO);
- func_exit ();
+ func_exit();
return -ENXIO;
}
/*
- ** If the RTA has not booted yet and the user has choosen to block
- ** until the RTA is present then we must spin here waiting for
- ** the RTA to boot.
- */
+ ** If the RTA has not booted yet and the user has choosen to block
+ ** until the RTA is present then we must spin here waiting for
+ ** the RTA to boot.
+ */
#if 0
if (!(PortP->HostP->Mapping[PortP->RupNum].Flags & RTA_BOOTED)) {
if (PortP->WaitUntilBooted) {
- rio_dprintk (RIO_DEBUG_TTY, "Waiting for RTA to boot\n");
+ rio_dprintk(RIO_DEBUG_TTY, "Waiting for RTA to boot\n");
do {
if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_TTY, "RTA EINTR in delay \n");
- func_exit ();
+ rio_dprintk(RIO_DEBUG_TTY, "RTA EINTR in delay \n");
+ func_exit();
return -EINTR;
}
- if (repeat_this -- <= 0) {
- rio_dprintk (RIO_DEBUG_TTY, "Waiting for RTA to boot timeout\n");
- RIOPreemptiveCmd(p, PortP, FCLOSE );
+ if (repeat_this-- <= 0) {
+ rio_dprintk(RIO_DEBUG_TTY, "Waiting for RTA to boot timeout\n");
+ RIOPreemptiveCmd(p, PortP, FCLOSE);
pseterr(EINTR);
- func_exit ();
+ func_exit();
return -EIO;
}
- } while(!(PortP->HostP->Mapping[PortP->RupNum].Flags & RTA_BOOTED));
- rio_dprintk (RIO_DEBUG_TTY, "RTA has been booted\n");
+ } while (!(PortP->HostP->Mapping[PortP->RupNum].Flags & RTA_BOOTED));
+ rio_dprintk(RIO_DEBUG_TTY, "RTA has been booted\n");
} else {
- rio_dprintk (RIO_DEBUG_TTY, "RTA never booted\n");
+ rio_dprintk(RIO_DEBUG_TTY, "RTA never booted\n");
pseterr(ENXIO);
- func_exit ();
+ func_exit();
return 0;
}
}
#else
/* I find the above code a bit hairy. I find the below code
- easier to read and shorter. Now, if it works too that would
+ easier to read and shorter. Now, if it works too that would
be great... -- REW
- */
- rio_dprintk (RIO_DEBUG_TTY, "Checking if RTA has booted... \n");
+ */
+ rio_dprintk(RIO_DEBUG_TTY, "Checking if RTA has booted... \n");
while (!(PortP->HostP->Mapping[PortP->RupNum].Flags & RTA_BOOTED)) {
- if (!PortP->WaitUntilBooted) {
- rio_dprintk (RIO_DEBUG_TTY, "RTA never booted\n");
- func_exit ();
- return -ENXIO;
- }
-
- /* Under Linux you'd normally use a wait instead of this
- busy-waiting. I'll stick with the old implementation for
- now. --REW
- */
- if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_TTY, "RTA_wait_for_boot: EINTR in delay \n");
- func_exit ();
- return -EINTR;
- }
- if (repeat_this -- <= 0) {
- rio_dprintk (RIO_DEBUG_TTY, "Waiting for RTA to boot timeout\n");
- func_exit ();
- return -EIO;
- }
+ if (!PortP->WaitUntilBooted) {
+ rio_dprintk(RIO_DEBUG_TTY, "RTA never booted\n");
+ func_exit();
+ return -ENXIO;
+ }
+
+ /* Under Linux you'd normally use a wait instead of this
+ busy-waiting. I'll stick with the old implementation for
+ now. --REW
+ */
+ if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_TTY, "RTA_wait_for_boot: EINTR in delay \n");
+ func_exit();
+ return -EINTR;
+ }
+ if (repeat_this-- <= 0) {
+ rio_dprintk(RIO_DEBUG_TTY, "Waiting for RTA to boot timeout\n");
+ func_exit();
+ return -EIO;
+ }
}
- rio_dprintk (RIO_DEBUG_TTY, "RTA has been booted\n");
+ rio_dprintk(RIO_DEBUG_TTY, "RTA has been booted\n");
#endif
#if 0
- tp = PortP->TtyP; /* get tty struct */
+ tp = PortP->TtyP; /* get tty struct */
#endif
rio_spin_lock_irqsave(&PortP->portSem, flags);
- if ( p->RIOHalted ) {
+ if (p->RIOHalted) {
goto bombout;
}
#if 0
retval = gs_init_port(&PortP->gs);
- if (retval){
- func_exit ();
+ if (retval) {
+ func_exit();
return retval;
}
#endif
/*
- ** If the port is in the final throws of being closed,
- ** we should wait here (politely), waiting
- ** for it to finish, so that it doesn't close us!
- */
- while ( (PortP->State & RIO_CLOSING) && !p->RIOHalted ) {
- rio_dprintk (RIO_DEBUG_TTY, "Waiting for RIO_CLOSING to go away\n");
- if (repeat_this -- <= 0) {
- rio_dprintk (RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n");
- RIOPreemptiveCmd(p, PortP, FCLOSE );
+ ** If the port is in the final throws of being closed,
+ ** we should wait here (politely), waiting
+ ** for it to finish, so that it doesn't close us!
+ */
+ while ((PortP->State & RIO_CLOSING) && !p->RIOHalted) {
+ rio_dprintk(RIO_DEBUG_TTY, "Waiting for RIO_CLOSING to go away\n");
+ if (repeat_this-- <= 0) {
+ rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n");
+ RIOPreemptiveCmd(p, PortP, FCLOSE);
retval = -EINTR;
goto bombout;
}
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
- rio_spin_lock_irqsave(&PortP->portSem, flags);
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
retval = -EINTR;
goto bombout;
}
- rio_spin_lock_irqsave(&PortP->portSem, flags);
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
}
- if ( !PortP->Mapped ) {
- rio_dprintk (RIO_DEBUG_TTY, "Port unmapped while closing!\n");
+ if (!PortP->Mapped) {
+ rio_dprintk(RIO_DEBUG_TTY, "Port unmapped while closing!\n");
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
retval = -ENXIO;
- func_exit ();
+ func_exit();
return retval;
}
- if ( p->RIOHalted ) {
+ if (p->RIOHalted) {
goto bombout;
}
@@ -346,16 +338,16 @@ riotopen(struct tty_struct * tty, struct file * filp)
*/
/* Uh? Suppose I turn these on and then another process opens
the port again? The flags get cleared! Not good. -- REW */
- if ( !(PortP->State & (RIO_LOPEN | RIO_MOPEN)) ) {
- PortP->Config &= ~(RIO_CTSFLOW|RIO_RTSFLOW);
+ if (!(PortP->State & (RIO_LOPEN | RIO_MOPEN))) {
+ PortP->Config &= ~(RIO_CTSFLOW | RIO_RTSFLOW);
}
if (!(PortP->firstOpen)) { /* First time ? */
- rio_dprintk (RIO_DEBUG_TTY, "First open for this port\n");
-
+ rio_dprintk(RIO_DEBUG_TTY, "First open for this port\n");
+
PortP->firstOpen++;
- PortP->CookMode = 0; /* XXX RIOCookMode(tp); */
+ PortP->CookMode = 0; /* XXX RIOCookMode(tp); */
PortP->InUse = NOT_INUSE;
/* Tentative fix for bug PR27. Didn't work. */
@@ -363,26 +355,26 @@ riotopen(struct tty_struct * tty, struct file * filp)
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
#ifdef NEED_THIS
- ttyseth(PortP, tp, (struct old_sgttyb *)&default_sg);
+ ttyseth(PortP, tp, (struct old_sgttyb *) &default_sg);
#endif
/* Someone explain to me why this delay/config is
- here. If I read the docs correctly the "open"
- command piggybacks the parameters immediately.
+ here. If I read the docs correctly the "open"
+ command piggybacks the parameters immediately.
-- REW */
- RIOParam(PortP,OPEN,Modem,OK_TO_SLEEP); /* Open the port */
+ RIOParam(PortP, OPEN, Modem, OK_TO_SLEEP); /* Open the port */
#if 0
/* This delay of 1 second was annoying. I removed it. -- REW */
- RIODelay(PortP, HUNDRED_MS*10);
- RIOParam(PortP,CONFIG,Modem,OK_TO_SLEEP); /* Config the port */
+ RIODelay(PortP, HUNDRED_MS * 10);
+ RIOParam(PortP, CONFIG, Modem, OK_TO_SLEEP); /* Config the port */
#endif
rio_spin_lock_irqsave(&PortP->portSem, flags);
/*
- ** wait for the port to be not closed.
- */
- while ( !(PortP->PortState & PORT_ISOPEN) && !p->RIOHalted ) {
- rio_dprintk (RIO_DEBUG_TTY, "Waiting for PORT_ISOPEN-currently %x\n",PortP->PortState);
+ ** wait for the port to be not closed.
+ */
+ while (!(PortP->PortState & PORT_ISOPEN) && !p->RIOHalted) {
+ rio_dprintk(RIO_DEBUG_TTY, "Waiting for PORT_ISOPEN-currently %x\n", PortP->PortState);
/*
** 15.10.1998 ARG - ESIL 0759
** (Part) fix for port being trashed when opened whilst RTA "disconnected"
@@ -399,115 +391,109 @@ riotopen(struct tty_struct * tty, struct file * filp)
*/
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_TTY, "Waiting for open to finish broken by signal\n");
- RIOPreemptiveCmd(p, PortP, FCLOSE );
- func_exit ();
+ rio_dprintk(RIO_DEBUG_TTY, "Waiting for open to finish broken by signal\n");
+ RIOPreemptiveCmd(p, PortP, FCLOSE);
+ func_exit();
return -EINTR;
}
rio_spin_lock_irqsave(&PortP->portSem, flags);
}
- if ( p->RIOHalted ) {
- retval = -EIO;
-bombout:
- /* RIOClearUp( PortP ); */
+ if (p->RIOHalted) {
+ retval = -EIO;
+ bombout:
+ /* RIOClearUp( PortP ); */
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
return retval;
}
- rio_dprintk (RIO_DEBUG_TTY, "PORT_ISOPEN found\n");
+ rio_dprintk(RIO_DEBUG_TTY, "PORT_ISOPEN found\n");
}
-
-#ifdef MODEM_SUPPORT
+#ifdef MODEM_SUPPORT
if (Modem) {
- rio_dprintk (RIO_DEBUG_TTY, "Modem - test for carrier\n");
+ rio_dprintk(RIO_DEBUG_TTY, "Modem - test for carrier\n");
/*
- ** ACTION
- ** insert test for carrier here. -- ???
- ** I already see that test here. What's the deal? -- REW
- */
- if ((PortP->gs.tty->termios->c_cflag & CLOCAL) || (PortP->ModemState & MSVR1_CD))
- {
- rio_dprintk (RIO_DEBUG_TTY, "open(%d) Modem carr on\n", SysPort);
+ ** ACTION
+ ** insert test for carrier here. -- ???
+ ** I already see that test here. What's the deal? -- REW
+ */
+ if ((PortP->gs.tty->termios->c_cflag & CLOCAL) || (PortP->ModemState & MSVR1_CD)) {
+ rio_dprintk(RIO_DEBUG_TTY, "open(%d) Modem carr on\n", SysPort);
/*
- tp->tm.c_state |= CARR_ON;
- wakeup((caddr_t) &tp->tm.c_canq);
- */
+ tp->tm.c_state |= CARR_ON;
+ wakeup((caddr_t) &tp->tm.c_canq);
+ */
PortP->State |= RIO_CARR_ON;
- wake_up_interruptible (&PortP->gs.open_wait);
- }
- else /* no carrier - wait for DCD */
- {
- /*
- while (!(PortP->gs.tty->termios->c_state & CARR_ON) &&
- !(filp->f_flags & O_NONBLOCK) && !p->RIOHalted )
- */
- while (!(PortP->State & RIO_CARR_ON) &&
- !(filp->f_flags & O_NONBLOCK) && !p->RIOHalted ) {
-
- rio_dprintk (RIO_DEBUG_TTY, "open(%d) sleeping for carr on\n",SysPort);
+ wake_up_interruptible(&PortP->gs.open_wait);
+ } else { /* no carrier - wait for DCD */
+
+ /*
+ while (!(PortP->gs.tty->termios->c_state & CARR_ON) &&
+ !(filp->f_flags & O_NONBLOCK) && !p->RIOHalted )
+ */
+ while (!(PortP->State & RIO_CARR_ON) && !(filp->f_flags & O_NONBLOCK) && !p->RIOHalted) {
+
+ rio_dprintk(RIO_DEBUG_TTY, "open(%d) sleeping for carr on\n", SysPort);
/*
- PortP->gs.tty->termios->c_state |= WOPEN;
- */
+ PortP->gs.tty->termios->c_state |= WOPEN;
+ */
PortP->State |= RIO_WOPEN;
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- if (RIODelay (PortP, HUNDRED_MS) == RIO_FAIL)
+ if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL)
#if 0
- if ( sleep((caddr_t)&tp->tm.c_canqo, TTIPRI|PCATCH))
+ if (sleep((caddr_t) & tp->tm.c_canqo, TTIPRI | PCATCH))
#endif
- {
- /*
- ** ACTION: verify that this is a good thing
- ** to do here. -- ???
- ** I think it's OK. -- REW
- */
- rio_dprintk (RIO_DEBUG_TTY, "open(%d) sleeping for carr broken by signal\n",
- SysPort);
- RIOPreemptiveCmd( p, PortP, FCLOSE );
- /*
- tp->tm.c_state &= ~WOPEN;
- */
- PortP->State &= ~RIO_WOPEN;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- func_exit ();
- return -EINTR;
- }
+ {
+ /*
+ ** ACTION: verify that this is a good thing
+ ** to do here. -- ???
+ ** I think it's OK. -- REW
+ */
+ rio_dprintk(RIO_DEBUG_TTY, "open(%d) sleeping for carr broken by signal\n", SysPort);
+ RIOPreemptiveCmd(p, PortP, FCLOSE);
+ /*
+ tp->tm.c_state &= ~WOPEN;
+ */
+ PortP->State &= ~RIO_WOPEN;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ func_exit();
+ return -EINTR;
+ }
}
PortP->State &= ~RIO_WOPEN;
}
- if ( p->RIOHalted )
+ if (p->RIOHalted)
goto bombout;
- rio_dprintk (RIO_DEBUG_TTY, "Setting RIO_MOPEN\n");
+ rio_dprintk(RIO_DEBUG_TTY, "Setting RIO_MOPEN\n");
PortP->State |= RIO_MOPEN;
- }
- else
+ } else
#endif
{
/*
- ** ACTION
- ** Direct line open - force carrier (will probably mean
- ** that sleeping Modem line fubar)
- */
+ ** ACTION
+ ** Direct line open - force carrier (will probably mean
+ ** that sleeping Modem line fubar)
+ */
PortP->State |= RIO_LOPEN;
}
- if ( p->RIOHalted ) {
+ if (p->RIOHalted) {
goto bombout;
}
- rio_dprintk (RIO_DEBUG_TTY, "high level open done\n");
+ rio_dprintk(RIO_DEBUG_TTY, "high level open done\n");
#ifdef STATS
PortP->Stat.OpenCnt++;
#endif
/*
- ** Count opens for port statistics reporting
- */
+ ** Count opens for port statistics reporting
+ */
if (PortP->statsGather)
PortP->opens++;
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- rio_dprintk (RIO_DEBUG_TTY, "Returning from open\n");
- func_exit ();
+ rio_dprintk(RIO_DEBUG_TTY, "Returning from open\n");
+ func_exit();
return 0;
}
@@ -516,36 +502,35 @@ bombout:
** The operating system thinks that this is last close for the device.
** As there are two interfaces to the port (Modem and tty), we need to
** check that both are closed before we close the device.
-*/
-int
-riotclose(void *ptr)
+*/
+int riotclose(void *ptr)
{
#if 0
register uint SysPort = dev;
- struct ttystatics *tp; /* pointer to our ttystruct */
+ struct ttystatics *tp; /* pointer to our ttystruct */
#endif
struct Port *PortP = ptr; /* pointer to the port structure */
int deleted = 0;
- int try = -1; /* Disable the timeouts by setting them to -1 */
- int repeat_this = -1; /* Congrats to those having 15 years of
- uptime! (You get to break the driver.) */
+ int try = -1; /* Disable the timeouts by setting them to -1 */
+ int repeat_this = -1; /* Congrats to those having 15 years of
+ uptime! (You get to break the driver.) */
unsigned long end_time;
- struct tty_struct * tty;
+ struct tty_struct *tty;
unsigned long flags;
int Modem;
int rv = 0;
-
- rio_dprintk (RIO_DEBUG_TTY, "port close SysPort %d\n",PortP->PortNum);
+
+ rio_dprintk(RIO_DEBUG_TTY, "port close SysPort %d\n", PortP->PortNum);
/* PortP = p->RIOPortp[SysPort]; */
- rio_dprintk (RIO_DEBUG_TTY, "Port is at address 0x%x\n",(int)PortP);
- /* tp = PortP->TtyP;*/ /* Get tty */
+ rio_dprintk(RIO_DEBUG_TTY, "Port is at address 0x%x\n", (int) PortP);
+ /* tp = PortP->TtyP; *//* Get tty */
tty = PortP->gs.tty;
- rio_dprintk (RIO_DEBUG_TTY, "TTY is at address 0x%x\n",(int)tty);
+ rio_dprintk(RIO_DEBUG_TTY, "TTY is at address 0x%x\n", (int) tty);
- if (PortP->gs.closing_wait)
+ if (PortP->gs.closing_wait)
end_time = jiffies + PortP->gs.closing_wait;
- else
+ else
end_time = jiffies + MAX_SCHEDULE_TIMEOUT;
Modem = rio_ismodem(tty);
@@ -553,48 +538,48 @@ riotclose(void *ptr)
/* What F.CKING cache? Even then, a higly idle multiprocessor,
system with large caches this won't work . Better find out when
this doesn't work asap, and fix the cause. -- REW */
-
- RIODelay(PortP, HUNDRED_MS*10); /* To flush the cache */
+
+ RIODelay(PortP, HUNDRED_MS * 10); /* To flush the cache */
#endif
rio_spin_lock_irqsave(&PortP->portSem, flags);
/*
- ** Setting this flag will make any process trying to open
- ** this port block until we are complete closing it.
- */
+ ** Setting this flag will make any process trying to open
+ ** this port block until we are complete closing it.
+ */
PortP->State |= RIO_CLOSING;
- if ( (PortP->State & RIO_DELETED) ) {
- rio_dprintk (RIO_DEBUG_TTY, "Close on deleted RTA\n");
+ if ((PortP->State & RIO_DELETED)) {
+ rio_dprintk(RIO_DEBUG_TTY, "Close on deleted RTA\n");
deleted = 1;
}
-
- if ( p->RIOHalted ) {
- RIOClearUp( PortP );
+
+ if (p->RIOHalted) {
+ RIOClearUp(PortP);
rv = -EIO;
goto close_end;
}
- rio_dprintk (RIO_DEBUG_TTY, "Clear bits\n");
+ rio_dprintk(RIO_DEBUG_TTY, "Clear bits\n");
/*
- ** clear the open bits for this device
- */
+ ** clear the open bits for this device
+ */
PortP->State &= (Modem ? ~RIO_MOPEN : ~RIO_LOPEN);
PortP->State &= ~RIO_CARR_ON;
PortP->ModemState &= ~MSVR1_CD;
/*
- ** If the device was open as both a Modem and a tty line
- ** then we need to wimp out here, as the port has not really
- ** been finally closed (gee, whizz!) The test here uses the
- ** bit for the OTHER mode of operation, to see if THAT is
- ** still active!
- */
- if ( (PortP->State & (RIO_LOPEN|RIO_MOPEN)) ) {
+ ** If the device was open as both a Modem and a tty line
+ ** then we need to wimp out here, as the port has not really
+ ** been finally closed (gee, whizz!) The test here uses the
+ ** bit for the OTHER mode of operation, to see if THAT is
+ ** still active!
+ */
+ if ((PortP->State & (RIO_LOPEN | RIO_MOPEN))) {
/*
- ** The port is still open for the other task -
- ** return, pretending that we are still active.
- */
- rio_dprintk (RIO_DEBUG_TTY, "Channel %d still open !\n",PortP->PortNum);
+ ** The port is still open for the other task -
+ ** return, pretending that we are still active.
+ */
+ rio_dprintk(RIO_DEBUG_TTY, "Channel %d still open !\n", PortP->PortNum);
PortP->State &= ~RIO_CLOSING;
if (PortP->firstOpen)
PortP->firstOpen--;
@@ -602,48 +587,47 @@ riotclose(void *ptr)
return -EIO;
}
- rio_dprintk (RIO_DEBUG_TTY, "Closing down - everything must go!\n");
+ rio_dprintk(RIO_DEBUG_TTY, "Closing down - everything must go!\n");
PortP->State &= ~RIO_DYNOROD;
/*
- ** This is where we wait for the port
- ** to drain down before closing. Bye-bye....
- ** (We never meant to do this)
- */
- rio_dprintk (RIO_DEBUG_TTY, "Timeout 1 starts\n");
+ ** This is where we wait for the port
+ ** to drain down before closing. Bye-bye....
+ ** (We never meant to do this)
+ */
+ rio_dprintk(RIO_DEBUG_TTY, "Timeout 1 starts\n");
if (!deleted)
- while ( (PortP->InUse != NOT_INUSE) && !p->RIOHalted &&
- (PortP->TxBufferIn != PortP->TxBufferOut) ) {
- cprintf("Need to flush the ttyport\n");
- if (repeat_this -- <= 0) {
- rv = -EINTR;
- rio_dprintk (RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n");
- RIOPreemptiveCmd(p, PortP, FCLOSE);
- goto close_end;
- }
- rio_dprintk (RIO_DEBUG_TTY, "Calling timeout to flush in closing\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- if (RIODelay_ni(PortP, HUNDRED_MS*10) == RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_TTY, "RTA EINTR in delay \n");
- rv = -EINTR;
+ while ((PortP->InUse != NOT_INUSE) && !p->RIOHalted && (PortP->TxBufferIn != PortP->TxBufferOut)) {
+ cprintf("Need to flush the ttyport\n");
+ if (repeat_this-- <= 0) {
+ rv = -EINTR;
+ rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n");
+ RIOPreemptiveCmd(p, PortP, FCLOSE);
+ goto close_end;
+ }
+ rio_dprintk(RIO_DEBUG_TTY, "Calling timeout to flush in closing\n");
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ if (RIODelay_ni(PortP, HUNDRED_MS * 10) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_TTY, "RTA EINTR in delay \n");
+ rv = -EINTR;
+ rio_spin_lock_irqsave(&PortP->portSem, flags);
+ goto close_end;
+ }
rio_spin_lock_irqsave(&PortP->portSem, flags);
- goto close_end;
}
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- }
PortP->TxBufferIn = PortP->TxBufferOut = 0;
repeat_this = 0xff;
PortP->InUse = 0;
- if ( (PortP->State & (RIO_LOPEN|RIO_MOPEN)) ) {
+ if ((PortP->State & (RIO_LOPEN | RIO_MOPEN))) {
/*
- ** The port has been re-opened for the other task -
- ** return, pretending that we are still active.
- */
- rio_dprintk (RIO_DEBUG_TTY, "Channel %d re-open!\n", PortP->PortNum);
+ ** The port has been re-opened for the other task -
+ ** return, pretending that we are still active.
+ */
+ rio_dprintk(RIO_DEBUG_TTY, "Channel %d re-open!\n", PortP->PortNum);
PortP->State &= ~RIO_CLOSING;
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
if (PortP->firstOpen)
@@ -651,8 +635,8 @@ riotclose(void *ptr)
return -EIO;
}
- if ( p->RIOHalted ) {
- RIOClearUp( PortP );
+ if (p->RIOHalted) {
+ RIOClearUp(PortP);
goto close_end;
}
@@ -665,57 +649,56 @@ riotclose(void *ptr)
}
if (!deleted)
- while (try && (PortP->PortState & PORT_ISOPEN)) {
- try--;
- if (time_after (jiffies, end_time)) {
- rio_dprintk (RIO_DEBUG_TTY, "Run out of tries - force the bugger shut!\n" );
- RIOPreemptiveCmd(p, PortP,FCLOSE);
- break;
- }
- rio_dprintk (RIO_DEBUG_TTY, "Close: PortState:ISOPEN is %d\n",
- PortP->PortState & PORT_ISOPEN);
+ while (try && (PortP->PortState & PORT_ISOPEN)) {
+ try--;
+ if (time_after(jiffies, end_time)) {
+ rio_dprintk(RIO_DEBUG_TTY, "Run out of tries - force the bugger shut!\n");
+ RIOPreemptiveCmd(p, PortP, FCLOSE);
+ break;
+ }
+ rio_dprintk(RIO_DEBUG_TTY, "Close: PortState:ISOPEN is %d\n", PortP->PortState & PORT_ISOPEN);
- if ( p->RIOHalted ) {
- RIOClearUp( PortP );
- goto close_end;
- }
- if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_TTY, "RTA EINTR in delay \n");
- RIOPreemptiveCmd(p, PortP,FCLOSE);
- break;
+ if (p->RIOHalted) {
+ RIOClearUp(PortP);
+ goto close_end;
+ }
+ if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
+ rio_dprintk(RIO_DEBUG_TTY, "RTA EINTR in delay \n");
+ RIOPreemptiveCmd(p, PortP, FCLOSE);
+ break;
+ }
}
- }
rio_spin_lock_irqsave(&PortP->portSem, flags);
- rio_dprintk (RIO_DEBUG_TTY, "Close: try was %d on completion\n", try );
-
+ rio_dprintk(RIO_DEBUG_TTY, "Close: try was %d on completion\n", try);
+
/* RIOPreemptiveCmd(p, PortP, FCLOSE); */
/*
** 15.10.1998 ARG - ESIL 0761 part fix
** RIO has it's own CTSFLOW and RTSFLOW flags in 'Config' in the port structure,** we need to make sure that the flags are clear when the port is opened.
*/
- PortP->Config &= ~(RIO_CTSFLOW|RIO_RTSFLOW);
+ PortP->Config &= ~(RIO_CTSFLOW | RIO_RTSFLOW);
#ifdef STATS
PortP->Stat.CloseCnt++;
#endif
/*
- ** Count opens for port statistics reporting
- */
+ ** Count opens for port statistics reporting
+ */
if (PortP->statsGather)
PortP->closes++;
-close_end:
+ close_end:
/* XXX: Why would a "DELETED" flag be reset here? I'd have
thought that a "deleted" flag means that the port was
permanently gone, but here we can make it reappear by it
being in close during the "deletion".
- */
- PortP->State &= ~(RIO_CLOSING|RIO_DELETED);
+ */
+ PortP->State &= ~(RIO_CLOSING | RIO_DELETED);
if (PortP->firstOpen)
PortP->firstOpen--;
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- rio_dprintk (RIO_DEBUG_TTY, "Return from close\n");
+ rio_dprintk(RIO_DEBUG_TTY, "Return from close\n");
return rv;
}
@@ -728,52 +711,50 @@ close_end:
** COOK_MEDIUM if the card can do all the processing necessary.
*/
#if 0
-static int
-RIOCookMode(struct ttystatics *tp)
+static int RIOCookMode(struct ttystatics *tp)
{
/*
- ** We can't handle tm.c_mstate != 0 on SCO
- ** We can't handle mapping
- ** We can't handle non-ttwrite line disc.
- ** We can't handle lflag XCASE
- ** We can handle oflag OPOST & (OCRNL, ONLCR, TAB3)
- */
+ ** We can't handle tm.c_mstate != 0 on SCO
+ ** We can't handle mapping
+ ** We can't handle non-ttwrite line disc.
+ ** We can't handle lflag XCASE
+ ** We can handle oflag OPOST & (OCRNL, ONLCR, TAB3)
+ */
#ifdef CHECK
- CheckTtyP( tp );
+ CheckTtyP(tp);
#endif
if (!(tp->tm.c_oflag & OPOST)) /* No post processing */
return COOK_RAW; /* Raw mode o/p */
- if ( tp->tm.c_lflag & XCASE )
+ if (tp->tm.c_lflag & XCASE)
return COOK_WELL; /* Use line disc */
- if (tp->tm.c_oflag & ~(OPOST | ONLCR | OCRNL | TAB3 ) )
+ if (tp->tm.c_oflag & ~(OPOST | ONLCR | OCRNL | TAB3))
return COOK_WELL; /* Use line disc for strange modes */
- if ( tp->tm.c_oflag == OPOST ) /* If only OPOST is set, do RAW */
+ if (tp->tm.c_oflag == OPOST) /* If only OPOST is set, do RAW */
return COOK_RAW;
/*
- ** So, we need to output process!
- */
+ ** So, we need to output process!
+ */
return COOK_MEDIUM;
}
#endif
-static void
-RIOClearUp(PortP)
+static void RIOClearUp(PortP)
struct Port *PortP;
{
- rio_dprintk (RIO_DEBUG_TTY, "RIOHalted set\n");
- PortP->Config = 0; /* Direct semaphore */
+ rio_dprintk(RIO_DEBUG_TTY, "RIOHalted set\n");
+ PortP->Config = 0; /* Direct semaphore */
PortP->PortState = 0;
PortP->firstOpen = 0;
PortP->FlushCmdBodge = 0;
PortP->ModemState = PortP->CookMode = 0;
PortP->Mapped = 0;
PortP->WflushFlag = 0;
- PortP->MagicFlags = 0;
+ PortP->MagicFlags = 0;
PortP->RxDataStart = 0;
PortP->TxBufferIn = 0;
PortP->TxBufferOut = 0;
@@ -788,33 +769,31 @@ struct Port *PortP;
** Other values of len aren't allowed, and will cause
** a panic.
*/
-int RIOShortCommand(struct rio_info *p, struct Port *PortP,
- int command, int len, int arg)
+int RIOShortCommand(struct rio_info *p, struct Port *PortP, int command, int len, int arg)
{
PKT *PacketP;
- int retries = 20; /* at 10 per second -> 2 seconds */
+ int retries = 20; /* at 10 per second -> 2 seconds */
unsigned long flags;
- rio_dprintk (RIO_DEBUG_TTY, "entering shortcommand.\n");
+ rio_dprintk(RIO_DEBUG_TTY, "entering shortcommand.\n");
#ifdef CHECK
- CheckPortP( PortP );
- if ( len < 1 || len > 2 )
- cprintf(("STUPID LENGTH %d\n",len));
+ CheckPortP(PortP);
+ if (len < 1 || len > 2)
+ cprintf(("STUPID LENGTH %d\n", len));
#endif
- if ( PortP->State & RIO_DELETED ) {
- rio_dprintk (RIO_DEBUG_TTY, "Short command to deleted RTA ignored\n");
+ if (PortP->State & RIO_DELETED) {
+ rio_dprintk(RIO_DEBUG_TTY, "Short command to deleted RTA ignored\n");
return RIO_FAIL;
}
rio_spin_lock_irqsave(&PortP->portSem, flags);
/*
- ** If the port is in use for pre-emptive command, then wait for it to
- ** be free again.
- */
- while ( (PortP->InUse != NOT_INUSE) && !p->RIOHalted ) {
- rio_dprintk (RIO_DEBUG_TTY, "Waiting for not in use (%d)\n",
- retries);
+ ** If the port is in use for pre-emptive command, then wait for it to
+ ** be free again.
+ */
+ while ((PortP->InUse != NOT_INUSE) && !p->RIOHalted) {
+ rio_dprintk(RIO_DEBUG_TTY, "Waiting for not in use (%d)\n", retries);
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
if (retries-- <= 0) {
return RIO_FAIL;
@@ -824,47 +803,47 @@ int RIOShortCommand(struct rio_info *p, struct Port *PortP,
}
rio_spin_lock_irqsave(&PortP->portSem, flags);
}
- if ( PortP->State & RIO_DELETED ) {
- rio_dprintk (RIO_DEBUG_TTY, "Short command to deleted RTA ignored\n");
+ if (PortP->State & RIO_DELETED) {
+ rio_dprintk(RIO_DEBUG_TTY, "Short command to deleted RTA ignored\n");
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
return RIO_FAIL;
}
- while ( !can_add_transmit(&PacketP,PortP) && !p->RIOHalted ) {
- rio_dprintk (RIO_DEBUG_TTY, "Waiting to add short command to queue (%d)\n", retries);
+ while (!can_add_transmit(&PacketP, PortP) && !p->RIOHalted) {
+ rio_dprintk(RIO_DEBUG_TTY, "Waiting to add short command to queue (%d)\n", retries);
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
if (retries-- <= 0) {
- rio_dprintk (RIO_DEBUG_TTY, "out of tries. Failing\n");
+ rio_dprintk(RIO_DEBUG_TTY, "out of tries. Failing\n");
return RIO_FAIL;
}
- if ( RIODelay_ni(PortP, HUNDRED_MS)==RIO_FAIL ) {
+ if (RIODelay_ni(PortP, HUNDRED_MS) == RIO_FAIL) {
return RIO_FAIL;
}
rio_spin_lock_irqsave(&PortP->portSem, flags);
}
- if ( p->RIOHalted ) {
+ if (p->RIOHalted) {
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
return RIO_FAIL;
}
/*
- ** set the command byte and the argument byte
- */
- WBYTE(PacketP->data[0] , command);
+ ** set the command byte and the argument byte
+ */
+ WBYTE(PacketP->data[0], command);
- if ( len==2 )
- WBYTE(PacketP->data[1] , arg);
+ if (len == 2)
+ WBYTE(PacketP->data[1], arg);
/*
- ** set the length of the packet and set the command bit.
- */
- WBYTE(PacketP->len , PKT_CMD_BIT | len);
+ ** set the length of the packet and set the command bit.
+ */
+ WBYTE(PacketP->len, PKT_CMD_BIT | len);
add_transmit(PortP);
/*
- ** Count characters transmitted for port statistics reporting
- */
+ ** Count characters transmitted for port statistics reporting
+ */
if (PortP->statsGather)
PortP->txchars += len;
@@ -878,28 +857,26 @@ int RIOShortCommand(struct rio_info *p, struct Port *PortP,
** This is an ioctl interface. This is the twentieth century. You know what
** its all about.
*/
-int
-riotioctl(struct rio_info *p, struct tty_struct *tty, int cmd, caddr_t arg)
+int riotioctl(struct rio_info *p, struct tty_struct *tty, int cmd, caddr_t arg)
{
- register struct Port *PortP;
- register struct ttystatics *tp;
- int current;
- int ParamSemIncremented = 0;
- int old_oflag, old_cflag, old_iflag, changed, oldcook;
- int i;
- unsigned char sio_regs[5]; /* Here be magic */
- short vpix_cflag;
- short divisor;
- int baud;
- uint SysPort = rio_minor(tty);
- int Modem = rio_ismodem(tty);
- int ioctl_processed;
-
- rio_dprintk (RIO_DEBUG_TTY, "port ioctl SysPort %d command 0x%x argument 0x%x %s\n",
- SysPort, cmd, arg, Modem?"Modem":"tty") ;
-
- if ( SysPort >= RIO_PORTS ) {
- rio_dprintk (RIO_DEBUG_TTY, "Bad port number %d\n", SysPort);
+ register struct Port *PortP;
+ register struct ttystatics *tp;
+ int current;
+ int ParamSemIncremented = 0;
+ int old_oflag, old_cflag, old_iflag, changed, oldcook;
+ int i;
+ unsigned char sio_regs[5]; /* Here be magic */
+ short vpix_cflag;
+ short divisor;
+ int baud;
+ uint SysPort = rio_minor(tty);
+ int Modem = rio_ismodem(tty);
+ int ioctl_processed;
+
+ rio_dprintk(RIO_DEBUG_TTY, "port ioctl SysPort %d command 0x%x argument 0x%x %s\n", SysPort, cmd, arg, Modem ? "Modem" : "tty");
+
+ if (SysPort >= RIO_PORTS) {
+ rio_dprintk(RIO_DEBUG_TTY, "Bad port number %d\n", SysPort);
return -ENXIO;
}
@@ -912,205 +889,195 @@ riotioctl(struct rio_info *p, struct tty_struct *tty, int cmd, caddr_t arg)
PortP->Stat.IoctlCnt++;
#endif
- if ( PortP->State & RIO_DELETED ) {
+ if (PortP->State & RIO_DELETED) {
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
return -EIO;
}
- if ( p->RIOHalted ) {
- RIOClearUp( PortP );
+ if (p->RIOHalted) {
+ RIOClearUp(PortP);
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
return -EIO;
}
/*
- ** Count ioctls for port statistics reporting
- */
+ ** Count ioctls for port statistics reporting
+ */
if (PortP->statsGather)
PortP->ioctls++;
/*
- ** Specialix RIO Ioctl calls
- */
+ ** Specialix RIO Ioctl calls
+ */
switch (cmd) {
- case TCRIOTRIAD:
- if ( arg )
- PortP->State |= RIO_TRIAD_MODE;
- else
- PortP->State &= ~RIO_TRIAD_MODE;
- /*
- ** Normally, when istrip is set on a port, a config is
- ** sent to the RTA instructing the CD1400 to do the
- ** stripping. In TRIAD mode, the interrupt receive routine
- ** must do the stripping instead, since it has to detect
- ** an 8 bit function key sequence. If istrip is set with
- ** TRIAD mode on(off), and 8 bit data is being read by
- ** the port, the user then turns TRIAD mode off(on), the RTA
- ** must be reconfigured (not) to do the stripping.
- ** Hence we call RIOParam here.
- */
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- RIOParam(PortP,CONFIG,Modem,OK_TO_SLEEP);
- return 0;
+ case TCRIOTRIAD:
+ if (arg)
+ PortP->State |= RIO_TRIAD_MODE;
+ else
+ PortP->State &= ~RIO_TRIAD_MODE;
+ /*
+ ** Normally, when istrip is set on a port, a config is
+ ** sent to the RTA instructing the CD1400 to do the
+ ** stripping. In TRIAD mode, the interrupt receive routine
+ ** must do the stripping instead, since it has to detect
+ ** an 8 bit function key sequence. If istrip is set with
+ ** TRIAD mode on(off), and 8 bit data is being read by
+ ** the port, the user then turns TRIAD mode off(on), the RTA
+ ** must be reconfigured (not) to do the stripping.
+ ** Hence we call RIOParam here.
+ */
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ RIOParam(PortP, CONFIG, Modem, OK_TO_SLEEP);
+ return 0;
- case TCRIOTSTATE:
- rio_dprintk (RIO_DEBUG_TTY, "tbusy/tstop monitoring %sabled\n",
- arg ? "en" : "dis");
- /* MonitorTstate = 0 ;*/
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- RIOParam(PortP, CONFIG, Modem, OK_TO_SLEEP);
- return 0;
+ case TCRIOTSTATE:
+ rio_dprintk(RIO_DEBUG_TTY, "tbusy/tstop monitoring %sabled\n", arg ? "en" : "dis");
+ /* MonitorTstate = 0 ; */
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ RIOParam(PortP, CONFIG, Modem, OK_TO_SLEEP);
+ return 0;
- case TCRIOSTATE: /* current state of Modem input pins */
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOSTATE\n");
- if (RIOPreemptiveCmd(p, PortP, MGET) == RIO_FAIL)
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOSTATE command failed\n");
- PortP->State |= RIO_BUSY;
- current = PortP->ModemState;
- if ( copyout((caddr_t)&current, (int)arg,
- sizeof(current))==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_TTY, "Copyout failed\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- pseterr(EFAULT);
- }
+ case TCRIOSTATE: /* current state of Modem input pins */
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOSTATE\n");
+ if (RIOPreemptiveCmd(p, PortP, MGET) == RIO_FAIL)
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOSTATE command failed\n");
+ PortP->State |= RIO_BUSY;
+ current = PortP->ModemState;
+ if (copyout((caddr_t) & current, (int) arg, sizeof(current)) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_TTY, "Copyout failed\n");
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return 0;
+ pseterr(EFAULT);
+ }
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return 0;
- case TCRIOMBIS: /* Set modem lines */
- case TCRIOMBIC: /* Clear modem lines */
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOMBIS/TCRIOMBIC\n");
- if (cmd == TCRIOMBIS) {
- uint state;
- state = (uint)arg;
- PortP->ModemState |= (ushort)state;
- PortP->ModemLines = (ulong) arg;
- if (RIOPreemptiveCmd(p, PortP, MBIS) == RIO_FAIL)
- rio_dprintk (RIO_DEBUG_TTY,
- "TCRIOMBIS command failed\n");
- }
- else {
- uint state;
-
- state = (uint)arg;
- PortP->ModemState &= ~(ushort)state;
- PortP->ModemLines = (ulong) arg;
- if (RIOPreemptiveCmd(p, PortP, MBIC) == RIO_FAIL)
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOMBIC command failed\n");
- }
- PortP->State |= RIO_BUSY;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return 0;
+ case TCRIOMBIS: /* Set modem lines */
+ case TCRIOMBIC: /* Clear modem lines */
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOMBIS/TCRIOMBIC\n");
+ if (cmd == TCRIOMBIS) {
+ uint state;
+ state = (uint) arg;
+ PortP->ModemState |= (ushort) state;
+ PortP->ModemLines = (ulong) arg;
+ if (RIOPreemptiveCmd(p, PortP, MBIS) == RIO_FAIL)
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOMBIS command failed\n");
+ } else {
+ uint state;
- case TCRIOXPON: /* set Xprint ON string */
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOXPON\n");
- if ( copyin((int)arg, (caddr_t)PortP->Xprint.XpOn,
- MAX_XP_CTRL_LEN)==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_TTY, "Copyin failed\n");
- PortP->Xprint.XpOn[0] = '\0';
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- pseterr(EFAULT);
- }
- PortP->Xprint.XpOn[MAX_XP_CTRL_LEN-1] = '\0';
- PortP->Xprint.XpLen = strlen(PortP->Xprint.XpOn)+
- strlen(PortP->Xprint.XpOff);
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return 0;
+ state = (uint) arg;
+ PortP->ModemState &= ~(ushort) state;
+ PortP->ModemLines = (ulong) arg;
+ if (RIOPreemptiveCmd(p, PortP, MBIC) == RIO_FAIL)
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOMBIC command failed\n");
+ }
+ PortP->State |= RIO_BUSY;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return 0;
- case TCRIOXPOFF: /* set Xprint OFF string */
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOXPOFF\n");
- if ( copyin( (int)arg, (caddr_t)PortP->Xprint.XpOff,
- MAX_XP_CTRL_LEN)==COPYFAIL ) {
- rio_dprintk (RIO_DEBUG_TTY, "Copyin failed\n");
- PortP->Xprint.XpOff[0] = '\0';
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- pseterr(EFAULT);
- }
- PortP->Xprint.XpOff[MAX_XP_CTRL_LEN-1] = '\0';
- PortP->Xprint.XpLen = strlen(PortP->Xprint.XpOn)+
- strlen(PortP->Xprint.XpOff);
+ case TCRIOXPON: /* set Xprint ON string */
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOXPON\n");
+ if (copyin((int) arg, (caddr_t) PortP->Xprint.XpOn, MAX_XP_CTRL_LEN) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_TTY, "Copyin failed\n");
+ PortP->Xprint.XpOn[0] = '\0';
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return 0;
+ pseterr(EFAULT);
+ }
+ PortP->Xprint.XpOn[MAX_XP_CTRL_LEN - 1] = '\0';
+ PortP->Xprint.XpLen = strlen(PortP->Xprint.XpOn) + strlen(PortP->Xprint.XpOff);
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return 0;
- case TCRIOXPCPS: /* set Xprint CPS string */
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOXPCPS\n");
- if ( (uint)arg > p->RIOConf.MaxXpCps ||
- (uint)arg < p->RIOConf.MinXpCps ) {
- rio_dprintk (RIO_DEBUG_TTY, "%d CPS out of range\n",arg);
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- pseterr(EINVAL);
- return 0;
- }
- PortP->Xprint.XpCps = (uint)arg;
+ case TCRIOXPOFF: /* set Xprint OFF string */
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOXPOFF\n");
+ if (copyin((int) arg, (caddr_t) PortP->Xprint.XpOff, MAX_XP_CTRL_LEN) == COPYFAIL) {
+ rio_dprintk(RIO_DEBUG_TTY, "Copyin failed\n");
+ PortP->Xprint.XpOff[0] = '\0';
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return 0;
+ pseterr(EFAULT);
+ }
+ PortP->Xprint.XpOff[MAX_XP_CTRL_LEN - 1] = '\0';
+ PortP->Xprint.XpLen = strlen(PortP->Xprint.XpOn) + strlen(PortP->Xprint.XpOff);
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return 0;
- case TCRIOXPRINT:
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOXPRINT\n");
- if ( copyout((caddr_t)&PortP->Xprint, (int)arg,
- sizeof(struct Xprint))==COPYFAIL ) {
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- pseterr(EFAULT);
- }
+ case TCRIOXPCPS: /* set Xprint CPS string */
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOXPCPS\n");
+ if ((uint) arg > p->RIOConf.MaxXpCps || (uint) arg < p->RIOConf.MinXpCps) {
+ rio_dprintk(RIO_DEBUG_TTY, "%d CPS out of range\n", arg);
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ pseterr(EINVAL);
return 0;
+ }
+ PortP->Xprint.XpCps = (uint) arg;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return 0;
- case TCRIOIXANYON:
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOIXANYON\n");
- PortP->Config |= RIO_IXANY;
+ case TCRIOXPRINT:
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOXPRINT\n");
+ if (copyout((caddr_t) & PortP->Xprint, (int) arg, sizeof(struct Xprint)) == COPYFAIL) {
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return 0;
+ pseterr(EFAULT);
+ }
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return 0;
- case TCRIOIXANYOFF:
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOIXANYOFF\n");
- PortP->Config &= ~RIO_IXANY;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return 0;
+ case TCRIOIXANYON:
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOIXANYON\n");
+ PortP->Config |= RIO_IXANY;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return 0;
- case TCRIOIXONON:
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOIXONON\n");
- PortP->Config |= RIO_IXON;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return 0;
+ case TCRIOIXANYOFF:
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOIXANYOFF\n");
+ PortP->Config &= ~RIO_IXANY;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return 0;
- case TCRIOIXONOFF:
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOIXONOFF\n");
- PortP->Config &= ~RIO_IXON;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return 0;
+ case TCRIOIXONON:
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOIXONON\n");
+ PortP->Config |= RIO_IXON;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return 0;
+
+ case TCRIOIXONOFF:
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOIXONOFF\n");
+ PortP->Config &= ~RIO_IXON;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ return 0;
/*
** 15.10.1998 ARG - ESIL 0761 part fix
** Added support for CTS and RTS flow control ioctls :
*/
- case TCRIOCTSFLOWEN:
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOCTSFLOWEN\n");
- PortP->Config |= RIO_CTSFLOW;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- RIOParam(PortP,CONFIG,Modem,OK_TO_SLEEP);
- return 0;
+ case TCRIOCTSFLOWEN:
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOCTSFLOWEN\n");
+ PortP->Config |= RIO_CTSFLOW;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ RIOParam(PortP, CONFIG, Modem, OK_TO_SLEEP);
+ return 0;
- case TCRIOCTSFLOWDIS:
- rio_dprintk (RIO_DEBUG_TTY, "TCRIOCTSFLOWDIS\n");
- PortP->Config &= ~RIO_CTSFLOW;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- RIOParam(PortP,CONFIG,Modem,OK_TO_SLEEP);
- return 0;
+ case TCRIOCTSFLOWDIS:
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIOCTSFLOWDIS\n");
+ PortP->Config &= ~RIO_CTSFLOW;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ RIOParam(PortP, CONFIG, Modem, OK_TO_SLEEP);
+ return 0;
- case TCRIORTSFLOWEN:
- rio_dprintk (RIO_DEBUG_TTY, "TCRIORTSFLOWEN\n");
- PortP->Config |= RIO_RTSFLOW;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- RIOParam(PortP,CONFIG,Modem,OK_TO_SLEEP);
- return 0;
+ case TCRIORTSFLOWEN:
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIORTSFLOWEN\n");
+ PortP->Config |= RIO_RTSFLOW;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ RIOParam(PortP, CONFIG, Modem, OK_TO_SLEEP);
+ return 0;
- case TCRIORTSFLOWDIS:
- rio_dprintk (RIO_DEBUG_TTY, "TCRIORTSFLOWDIS\n");
- PortP->Config &= ~RIO_RTSFLOW;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- RIOParam(PortP,CONFIG,Modem,OK_TO_SLEEP);
- return 0;
+ case TCRIORTSFLOWDIS:
+ rio_dprintk(RIO_DEBUG_TTY, "TCRIORTSFLOWDIS\n");
+ PortP->Config &= ~RIO_RTSFLOW;
+ rio_spin_unlock_irqrestore(&PortP->portSem, flags);
+ RIOParam(PortP, CONFIG, Modem, OK_TO_SLEEP);
+ return 0;
/* end ESIL 0761 part fix */
@@ -1119,35 +1086,35 @@ riotioctl(struct rio_info *p, struct tty_struct *tty, int cmd, caddr_t arg)
/* Lynx IOCTLS */
switch (cmd) {
- case TIOCSETP:
- case TIOCSETN:
- case OTIOCSETP:
- case OTIOCSETN:
- ioctl_processed++;
- ttyseth(PortP, tp, (struct old_sgttyb *)arg);
- break;
- case TCSETA:
- case TCSETAW:
- case TCSETAF:
- ioctl_processed++;
- rio_dprintk (RIO_DEBUG_TTY, "NON POSIX ioctl\n");
- ttyseth_pv(PortP, tp, (struct termios *)arg, 0);
- break;
- case TCSETAP: /* posix tcsetattr() */
- case TCSETAWP: /* posix tcsetattr() */
- case TCSETAFP: /* posix tcsetattr() */
- rio_dprintk (RIO_DEBUG_TTY, "NON POSIX SYSV ioctl\n");
- ttyseth_pv(PortP, tp, (struct termios *)arg, 1);
- ioctl_processed++;
- break;
+ case TIOCSETP:
+ case TIOCSETN:
+ case OTIOCSETP:
+ case OTIOCSETN:
+ ioctl_processed++;
+ ttyseth(PortP, tp, (struct old_sgttyb *) arg);
+ break;
+ case TCSETA:
+ case TCSETAW:
+ case TCSETAF:
+ ioctl_processed++;
+ rio_dprintk(RIO_DEBUG_TTY, "NON POSIX ioctl\n");
+ ttyseth_pv(PortP, tp, (struct termios *) arg, 0);
+ break;
+ case TCSETAP: /* posix tcsetattr() */
+ case TCSETAWP: /* posix tcsetattr() */
+ case TCSETAFP: /* posix tcsetattr() */
+ rio_dprintk(RIO_DEBUG_TTY, "NON POSIX SYSV ioctl\n");
+ ttyseth_pv(PortP, tp, (struct termios *) arg, 1);
+ ioctl_processed++;
+ break;
}
/*
- ** If its any of the commands that require the port to be in the
- ** non-busy state wait until all output has drained
- */
+ ** If its any of the commands that require the port to be in the
+ ** non-busy state wait until all output has drained
+ */
if (!ioctl_processed)
- switch(cmd) {
+ switch (cmd) {
case TCSETAW:
case TCSETAF:
case TCSETA:
@@ -1171,29 +1138,29 @@ riotioctl(struct rio_info *p, struct tty_struct *tty, int cmd, caddr_t arg)
#endif
case TIOCSETD:
case TIOCSETN:
- rio_dprintk (RIO_DEBUG_TTY, "wait for non-BUSY, semaphore set\n");
+ rio_dprintk(RIO_DEBUG_TTY, "wait for non-BUSY, semaphore set\n");
/*
- ** Wait for drain here, at least as far as the double buffer
- ** being empty.
- */
+ ** Wait for drain here, at least as far as the double buffer
+ ** being empty.
+ */
/* XXX Does the above comment mean that this has
still to be implemented? -- REW */
/* XXX Is the locking OK together with locking
- in txenable? (Deadlock?) -- REW */
-
- RIOTxEnable((char *)PortP);
+ in txenable? (Deadlock?) -- REW */
+
+ RIOTxEnable((char *) PortP);
break;
default:
break;
- }
+ }
old_cflag = tp->tm.c_cflag;
old_iflag = tp->tm.c_iflag;
old_oflag = tp->tm.c_oflag;
oldcook = PortP->CookMode;
- if ( p->RIOHalted ) {
- RIOClearUp( PortP );
+ if (p->RIOHalted) {
+ RIOClearUp(PortP);
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
pseterr(EIO);
return 0;
@@ -1202,10 +1169,10 @@ riotioctl(struct rio_info *p, struct tty_struct *tty, int cmd, caddr_t arg)
PortP->FlushCmdBodge = 0;
/*
- ** If the port is locked, and it is reconfigured, we want
- ** to restore the state of the tty structure so the change is NOT
- ** made.
- */
+ ** If the port is locked, and it is reconfigured, we want
+ ** to restore the state of the tty structure so the change is NOT
+ ** made.
+ */
if (PortP->Lock) {
tp->tm.c_iflag = PortP->StoredTty.iflag;
tp->tm.c_oflag = PortP->StoredTty.oflag;
@@ -1214,13 +1181,12 @@ riotioctl(struct rio_info *p, struct tty_struct *tty, int cmd, caddr_t arg)
tp->tm.c_line = PortP->StoredTty.line;
for (i = 0; i < NCC + 1; i++)
tp->tm.c_cc[i] = PortP->StoredTty.cc[i];
- }
- else {
+ } else {
/*
- ** If the port is set to store the parameters, and it is
- ** reconfigured, we want to save the current tty struct so it
- ** may be restored on the next open.
- */
+ ** If the port is set to store the parameters, and it is
+ ** reconfigured, we want to save the current tty struct so it
+ ** may be restored on the next open.
+ */
if (PortP->Store) {
PortP->StoredTty.iflag = tp->tm.c_iflag;
PortP->StoredTty.oflag = tp->tm.c_oflag;
@@ -1232,44 +1198,41 @@ riotioctl(struct rio_info *p, struct tty_struct *tty, int cmd, caddr_t arg)
}
}
- changed = (tp->tm.c_cflag != old_cflag) ||
- (tp->tm.c_iflag != old_iflag) ||
- (tp->tm.c_oflag != old_oflag);
+ changed = (tp->tm.c_cflag != old_cflag) || (tp->tm.c_iflag != old_iflag) || (tp->tm.c_oflag != old_oflag);
PortP->CookMode = RIOCookMode(tp); /* Set new cooking mode */
- rio_dprintk (RIO_DEBUG_TTY, "RIOIoctl changed %d newcook %d oldcook %d\n",
- changed,PortP->CookMode,oldcook);
+ rio_dprintk(RIO_DEBUG_TTY, "RIOIoctl changed %d newcook %d oldcook %d\n", changed, PortP->CookMode, oldcook);
#ifdef MODEM_SUPPORT
/*
- ** kludge to force CARR_ON if CLOCAL set
- */
- if ((tp->tm.c_cflag & CLOCAL) || (PortP->ModemState & MSVR1_CD)) {
+ ** kludge to force CARR_ON if CLOCAL set
+ */
+ if ((tp->tm.c_cflag & CLOCAL) || (PortP->ModemState & MSVR1_CD)) {
tp->tm.c_state |= CARR_ON;
- wakeup ((caddr_t)&tp->tm.c_canq);
+ wakeup((caddr_t) & tp->tm.c_canq);
}
#endif
- if ( p->RIOHalted ) {
- RIOClearUp( PortP );
+ if (p->RIOHalted) {
+ RIOClearUp(PortP);
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
pseterr(EIO);
return 0;
}
/*
- ** Re-configure if modes or cooking have changed
- */
+ ** Re-configure if modes or cooking have changed
+ */
if (changed || oldcook != PortP->CookMode || (ioctl_processed)) {
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- rio_dprintk (RIO_DEBUG_TTY, "Ioctl changing the PORT settings\n");
- RIOParam(PortP,CONFIG,Modem,OK_TO_SLEEP);
+ rio_dprintk(RIO_DEBUG_TTY, "Ioctl changing the PORT settings\n");
+ RIOParam(PortP, CONFIG, Modem, OK_TO_SLEEP);
rio_spin_lock_irqsave(&PortP->portSem, flags);
}
if (p->RIOHalted) {
rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- RIOClearUp( PortP );
+ RIOClearUp(PortP);
pseterr(EIO);
return 0;
}
@@ -1280,36 +1243,32 @@ riotioctl(struct rio_info *p, struct tty_struct *tty, int cmd, caddr_t arg)
/*
ttyseth -- set hardware dependent tty settings
*/
-void
-ttyseth(PortP, s, sg)
-struct Port * PortP;
-struct ttystatics * s;
+void ttyseth(PortP, s, sg)
+struct Port *PortP;
+struct ttystatics *s;
struct old_sgttyb *sg;
{
- struct old_sgttyb * tsg;
+ struct old_sgttyb *tsg;
struct termios *tp = &s->tm;
tsg = &s->sg;
- if (sg->sg_flags & (EVENP|ODDP)) {
+ if (sg->sg_flags & (EVENP | ODDP)) {
tp->c_cflag &= PARENB;
if (sg->sg_flags & EVENP) {
if (sg->sg_flags & ODDP) {
tp->c_cflag &= V_CS7;
tp->c_cflag &= ~PARENB;
- }
- else {
+ } else {
tp->c_cflag &= V_CS7;
tp->c_cflag &= PARENB;
tp->c_cflag &= PARODD;
}
- }
- else if (sg->sg_flags & ODDP) {
+ } else if (sg->sg_flags & ODDP) {
tp->c_cflag &= V_CS7;
tp->c_cflag &= PARENB;
tp->c_cflag &= PARODD;
- }
- else {
+ } else {
tp->c_cflag &= V_CS7;
tp->c_cflag &= PARENB;
}
@@ -1320,16 +1279,16 @@ struct old_sgttyb *sg;
* I will have to use separate sets of flags to store them in the
* Port structure.
*/
- if ( !sg->sg_ospeed )
+ if (!sg->sg_ospeed)
sg->sg_ospeed = sg->sg_ispeed;
else
sg->sg_ispeed = sg->sg_ospeed;
- if (sg->sg_ispeed > V_EXTB )
+ if (sg->sg_ispeed > V_EXTB)
sg->sg_ispeed = V_EXTB;
if (sg->sg_ispeed < V_B0)
sg->sg_ispeed = V_B0;
*tsg = *sg;
- tp->c_cflag = (tp->c_cflag & ~V_CBAUD) | conv_bv[(int)sg->sg_ispeed];
+ tp->c_cflag = (tp->c_cflag & ~V_CBAUD) | conv_bv[(int) sg->sg_ispeed];
}
/*
@@ -1338,36 +1297,33 @@ struct old_sgttyb *sg;
sysv = 0 => (POSIX): struct termios *sg
sysv != 0 => (System V): struct termio *sg
*/
-static void
-ttyseth_pv(PortP, s, sg, sysv)
+static void ttyseth_pv(PortP, s, sg, sysv)
struct Port *PortP;
struct ttystatics *s;
struct termios *sg;
int sysv;
{
- int speed;
- unsigned char csize;
- unsigned char cread;
- unsigned int lcr_flags;
- int ps;
-
- if (sysv) {
- /* sg points to a System V termio structure */
- csize = ((struct termio *)sg)->c_cflag & CSIZE;
- cread = ((struct termio *)sg)->c_cflag & CREAD;
- speed = conv_vb[((struct termio *)sg)->c_cflag & V_CBAUD];
- }
- else {
- /* sg points to a POSIX termios structure */
- csize = sg->c_cflag & CSIZE;
- cread = sg->c_cflag & CREAD;
- speed = conv_vb[sg->c_cflag & V_CBAUD];
- }
- if (s->sg.sg_ispeed != speed || s->sg.sg_ospeed != speed) {
- s->sg.sg_ispeed = speed;
- s->sg.sg_ospeed = speed;
- s->tm.c_cflag = (s->tm.c_cflag & ~V_CBAUD) |
- conv_bv[(int)s->sg.sg_ispeed];
- }
+ int speed;
+ unsigned char csize;
+ unsigned char cread;
+ unsigned int lcr_flags;
+ int ps;
+
+ if (sysv) {
+ /* sg points to a System V termio structure */
+ csize = ((struct termio *) sg)->c_cflag & CSIZE;
+ cread = ((struct termio *) sg)->c_cflag & CREAD;
+ speed = conv_vb[((struct termio *) sg)->c_cflag & V_CBAUD];
+ } else {
+ /* sg points to a POSIX termios structure */
+ csize = sg->c_cflag & CSIZE;
+ cread = sg->c_cflag & CREAD;
+ speed = conv_vb[sg->c_cflag & V_CBAUD];
+ }
+ if (s->sg.sg_ispeed != speed || s->sg.sg_ospeed != speed) {
+ s->sg.sg_ispeed = speed;
+ s->sg.sg_ospeed = speed;
+ s->tm.c_cflag = (s->tm.c_cflag & ~V_CBAUD) | conv_bv[(int) s->sg.sg_ispeed];
+ }
}
#endif
diff --git a/drivers/char/rio/riotypes.h b/drivers/char/rio/riotypes.h
index 1c7c42c638f..9b67e2468be 100644
--- a/drivers/char/rio/riotypes.h
+++ b/drivers/char/rio/riotypes.h
@@ -89,47 +89,46 @@ typedef RIO_POINTER u_short_ptr;
typedef RIO_POINTER ushort_ptr;
#endif
-#else /* not INKERNEL */
-typedef unsigned char BYTE;
-typedef unsigned short WORD;
-typedef unsigned long DWORD;
-typedef short NUMBER;
-typedef short *NUMBER_ptr;
-typedef unsigned short *WORD_ptr;
-typedef unsigned char *BYTE_ptr;
-typedef unsigned char uchar ;
-typedef unsigned short ushort ;
-typedef unsigned int uint ;
-typedef unsigned long ulong ;
-typedef unsigned char u_char ;
-typedef unsigned short u_short ;
-typedef unsigned int u_int ;
-typedef unsigned long u_long ;
-typedef unsigned short ERROR ;
-typedef unsigned long ID ;
-typedef char *char_ptr;
-typedef Channel *Channel_ptr;
+#else /* not INKERNEL */
+typedef unsigned char BYTE;
+typedef unsigned short WORD;
+typedef unsigned long DWORD;
+typedef short NUMBER;
+typedef short *NUMBER_ptr;
+typedef unsigned short *WORD_ptr;
+typedef unsigned char *BYTE_ptr;
+typedef unsigned char uchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+typedef unsigned short ERROR;
+typedef unsigned long ID;
+typedef char *char_ptr;
+typedef Channel *Channel_ptr;
typedef struct FREE_LIST *FREE_LIST_ptr;
typedef struct FREE_LIST **FREE_LIST_ptr_ptr;
-typedef struct LPB *LPB_ptr;
-typedef struct Process *Process_ptr;
-typedef struct PHB *PHB_ptr;
-typedef struct PKT *PKT_ptr;
-typedef struct PKT **PKT_ptr_ptr;
-typedef struct Q_BUF *Q_BUF_ptr;
-typedef struct Q_BUF **Q_BUF_ptr_ptr;
+typedef struct LPB *LPB_ptr;
+typedef struct Process *Process_ptr;
+typedef struct PHB *PHB_ptr;
+typedef struct PKT *PKT_ptr;
+typedef struct PKT **PKT_ptr_ptr;
+typedef struct Q_BUF *Q_BUF_ptr;
+typedef struct Q_BUF **Q_BUF_ptr_ptr;
typedef struct ROUTE_STR *ROUTE_STR_ptr;
-typedef struct RUP *RUP_ptr;
-typedef short *short_ptr;
-typedef u_short *u_short_ptr;
-typedef ushort *ushort_ptr;
-typedef struct PKT PKT;
-typedef struct LPB LPB;
-typedef struct RUP RUP;
+typedef struct RUP *RUP_ptr;
+typedef short *short_ptr;
+typedef u_short *u_short_ptr;
+typedef ushort *ushort_ptr;
+typedef struct PKT PKT;
+typedef struct LPB LPB;
+typedef struct RUP RUP;
#endif
-#endif /* __riotypes__ */
+#endif /* __riotypes__ */
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/riowinif.h b/drivers/char/rio/riowinif.h
index 18a4f147edc..f802d7593b8 100644
--- a/drivers/char/rio/riowinif.h
+++ b/drivers/char/rio/riowinif.h
@@ -40,7 +40,7 @@
*/
-#ifndef _riowinif_h /* If RIOWINDIF.H not already defined */
+#ifndef _riowinif_h /* If RIOWINDIF.H not already defined */
#define _riowinif_h 1
/*****************************************************************************
@@ -60,42 +60,41 @@
/* The PARM_MAP structure defines global values relating to the Host Card / RTA
and is the main structure from which all other structures are referenced. */
-typedef struct _PARM_MAP
-{
- _u16 phb_ptr; /* 0x00 Pointer to the PHB array */
- _u16 phb_num_ptr; /* 0x02 Ptr to Number of PHB's */
- _u16 free_list; /* 0x04 Free List pointer */
- _u16 free_list_end; /* 0x06 Free List End pointer */
- _u16 q_free_list_ptr; /* 0x08 Ptr to Q_BUF variable */
- _u16 unit_id_ptr; /* 0x0A Unit Id */
- _u16 link_str_ptr; /* 0x0C Link Structure Array */
- _u16 bootloader_1; /* 0x0E 1st Stage Boot Loader */
- _u16 bootloader_2; /* 0x10 2nd Stage Boot Loader */
- _u16 port_route_map_ptr; /* 0x12 Port Route Map */
- _u16 route_ptr; /* 0x14 Route Map */
- _u16 map_present; /* 0x16 Route Map present */
- _u16 pkt_num; /* 0x18 Total number of packets */
- _u16 q_num; /* 0x1A Total number of Q packets */
- _u16 buffers_per_port; /* 0x1C Number of buffers per port */
- _u16 heap_size; /* 0x1E Initial size of heap */
- _u16 heap_left; /* 0x20 Current Heap left */
- _u16 error; /* 0x22 Error code */
- _u16 tx_max; /* 0x24 Max number of tx pkts per phb */
- _u16 rx_max; /* 0x26 Max number of rx pkts per phb */
- _u16 rx_limit; /* 0x28 For high / low watermarks */
- _u16 links; /* 0x2A Links to use */
- _u16 timer; /* 0x2C Interrupts per second */
- _u16 rups; /* 0x2E Pointer to the RUPs */
- _u16 max_phb; /* 0x30 Mostly for debugging */
- _u16 living; /* 0x32 Just increments!! */
- _u16 init_done; /* 0x34 Initialisation over */
- _u16 booting_link; /* 0x36 */
- _u16 idle_count; /* 0x38 Idle time counter */
- _u16 busy_count; /* 0x3A Busy counter */
- _u16 idle_control; /* 0x3C Control Idle Process */
- _u16 tx_intr; /* 0x3E TX interrupt pending */
- _u16 rx_intr; /* 0x40 RX interrupt pending */
- _u16 rup_intr; /* 0x42 RUP interrupt pending */
+typedef struct _PARM_MAP {
+ _u16 phb_ptr; /* 0x00 Pointer to the PHB array */
+ _u16 phb_num_ptr; /* 0x02 Ptr to Number of PHB's */
+ _u16 free_list; /* 0x04 Free List pointer */
+ _u16 free_list_end; /* 0x06 Free List End pointer */
+ _u16 q_free_list_ptr; /* 0x08 Ptr to Q_BUF variable */
+ _u16 unit_id_ptr; /* 0x0A Unit Id */
+ _u16 link_str_ptr; /* 0x0C Link Structure Array */
+ _u16 bootloader_1; /* 0x0E 1st Stage Boot Loader */
+ _u16 bootloader_2; /* 0x10 2nd Stage Boot Loader */
+ _u16 port_route_map_ptr; /* 0x12 Port Route Map */
+ _u16 route_ptr; /* 0x14 Route Map */
+ _u16 map_present; /* 0x16 Route Map present */
+ _u16 pkt_num; /* 0x18 Total number of packets */
+ _u16 q_num; /* 0x1A Total number of Q packets */
+ _u16 buffers_per_port; /* 0x1C Number of buffers per port */
+ _u16 heap_size; /* 0x1E Initial size of heap */
+ _u16 heap_left; /* 0x20 Current Heap left */
+ _u16 error; /* 0x22 Error code */
+ _u16 tx_max; /* 0x24 Max number of tx pkts per phb */
+ _u16 rx_max; /* 0x26 Max number of rx pkts per phb */
+ _u16 rx_limit; /* 0x28 For high / low watermarks */
+ _u16 links; /* 0x2A Links to use */
+ _u16 timer; /* 0x2C Interrupts per second */
+ _u16 rups; /* 0x2E Pointer to the RUPs */
+ _u16 max_phb; /* 0x30 Mostly for debugging */
+ _u16 living; /* 0x32 Just increments!! */
+ _u16 init_done; /* 0x34 Initialisation over */
+ _u16 booting_link; /* 0x36 */
+ _u16 idle_count; /* 0x38 Idle time counter */
+ _u16 busy_count; /* 0x3A Busy counter */
+ _u16 idle_control; /* 0x3C Control Idle Process */
+ _u16 tx_intr; /* 0x3E TX interrupt pending */
+ _u16 rx_intr; /* 0x40 RX interrupt pending */
+ _u16 rup_intr; /* 0x42 RUP interrupt pending */
} PARM_MAP;
@@ -184,45 +183,44 @@ typedef struct _PARM_MAP
attached to the system and there is normally an array of MAX_RUPS (=16) structures
in a host card, defined by PARM_MAP->rup. */
-typedef struct _RUP
-{
- _u16 txpkt; /* 0x00 Outgoing packet */
- _u16 rxpkt; /* 0x02 ncoming packet */
- _u16 link; /* 0x04 Which link to send packet down ? */
- _u8 rup_dest_unit[2]; /* 0x06 Destination Unit */
- _u16 handshake; /* 0x08 Handshaking */
- _u16 timeout; /* 0x0A Timeout */
- _u16 status; /* 0x0C Status */
- _u16 txcontrol; /* 0x0E Transmit control */
- _u16 rxcontrol; /* 0x10 Receive control */
+typedef struct _RUP {
+ _u16 txpkt; /* 0x00 Outgoing packet */
+ _u16 rxpkt; /* 0x02 ncoming packet */
+ _u16 link; /* 0x04 Which link to send packet down ? */
+ _u8 rup_dest_unit[2]; /* 0x06 Destination Unit */
+ _u16 handshake; /* 0x08 Handshaking */
+ _u16 timeout; /* 0x0A Timeout */
+ _u16 status; /* 0x0C Status */
+ _u16 txcontrol; /* 0x0E Transmit control */
+ _u16 rxcontrol; /* 0x10 Receive control */
} RUP;
/* Same thing again, but defined as offsets... */
-#define RUP_txpkt 0x00 /* 0x00 Outgoing packet */
-#define RUP_rxpkt 0x02 /* 0x02 Incoming packet */
-#define RUP_link 0x04 /* 0x04 Which link to send packet down ? */
-#define RUP_rup_dest_unit 0x06 /* 0x06 Destination Unit */
-#define RUP_handshake 0x08 /* 0x08 Handshaking */
-#define RUP_timeout 0x0A /* 0x0A Timeout */
-#define RUP_status 0x0C /* 0x0C Status */
-#define RUP_txcontrol 0x0E /* 0x0E Transmit control */
-#define RUP_rxcontrol 0x10 /* 0x10 Receive control */
-#define sizeof_RUP 0x12 /* structure size = 0x12 */
+#define RUP_txpkt 0x00 /* 0x00 Outgoing packet */
+#define RUP_rxpkt 0x02 /* 0x02 Incoming packet */
+#define RUP_link 0x04 /* 0x04 Which link to send packet down ? */
+#define RUP_rup_dest_unit 0x06 /* 0x06 Destination Unit */
+#define RUP_handshake 0x08 /* 0x08 Handshaking */
+#define RUP_timeout 0x0A /* 0x0A Timeout */
+#define RUP_status 0x0C /* 0x0C Status */
+#define RUP_txcontrol 0x0E /* 0x0E Transmit control */
+#define RUP_rxcontrol 0x10 /* 0x10 Receive control */
+#define sizeof_RUP 0x12 /* structure size = 0x12 */
#define MAX_RUP 16
/* RUP.txcontrol definitions... */
-#define TX_RUP_INACTIVE 0 /* Nothing to transmit */
-#define TX_PACKET_READY 1 /* Transmit packet ready */
-#define TX_LOCK_RUP 2 /* Transmit side locked */
+#define TX_RUP_INACTIVE 0 /* Nothing to transmit */
+#define TX_PACKET_READY 1 /* Transmit packet ready */
+#define TX_LOCK_RUP 2 /* Transmit side locked */
/* RUP.txcontrol definitions... */
-#define RX_RUP_INACTIVE 0 /* Nothing received */
-#define RX_PACKET_READY 1 /* Packet received */
+#define RX_RUP_INACTIVE 0 /* Nothing received */
+#define RX_PACKET_READY 1 /* Packet received */
-#define RUP_NO_OWNER 0xFF /* RUP not owned by any process */
+#define RUP_NO_OWNER 0xFF /* RUP not owned by any process */
/*****************************************************************************
********************************** ***********************************
@@ -234,52 +232,51 @@ typedef struct _RUP
to the system and there is normally an array of MAX_PHBS (=128) structures
in a host card, defined by PARM_MAP->phb_ptr and PARM_MAP->phb_num_ptr. */
-typedef struct _PHB
-{
- _u16 source; /* 0x00 Location of the PHB in the host card */
- _u16 handshake; /* 0x02 Used to manage receive packet flow control */
- _u16 status; /* 0x04 Internal port transmit/receive status */
- _u16 timeout; /* 0x06 Time period to wait for an ACK */
- _u16 link; /* 0x08 The host link associated with the PHB */
- _u16 destination; /* 0x0A Location of the remote port on the network */
-
- _u16 tx_start; /* 0x0C first entry in the packet array for transmit packets */
- _u16 tx_end; /* 0x0E last entry in the packet array for transmit packets */
- _u16 tx_add; /* 0x10 position in the packet array for new transmit packets */
- _u16 tx_remove; /* 0x12 current position in the packet pointer array */
-
- _u16 rx_start; /* 0x14 first entry in the packet array for receive packets */
- _u16 rx_end; /* 0x16 last entry in the packet array for receive packets */
- _u16 rx_add; /* 0x18 position in the packet array for new receive packets */
- _u16 rx_remove; /* 0x1A current position in the packet pointer array */
+typedef struct _PHB {
+ _u16 source; /* 0x00 Location of the PHB in the host card */
+ _u16 handshake; /* 0x02 Used to manage receive packet flow control */
+ _u16 status; /* 0x04 Internal port transmit/receive status */
+ _u16 timeout; /* 0x06 Time period to wait for an ACK */
+ _u16 link; /* 0x08 The host link associated with the PHB */
+ _u16 destination; /* 0x0A Location of the remote port on the network */
+
+ _u16 tx_start; /* 0x0C first entry in the packet array for transmit packets */
+ _u16 tx_end; /* 0x0E last entry in the packet array for transmit packets */
+ _u16 tx_add; /* 0x10 position in the packet array for new transmit packets */
+ _u16 tx_remove; /* 0x12 current position in the packet pointer array */
+
+ _u16 rx_start; /* 0x14 first entry in the packet array for receive packets */
+ _u16 rx_end; /* 0x16 last entry in the packet array for receive packets */
+ _u16 rx_add; /* 0x18 position in the packet array for new receive packets */
+ _u16 rx_remove; /* 0x1A current position in the packet pointer array */
} PHB;
/* Same thing again, but defined as offsets... */
-#define PHB_source 0x00 /* 0x00 Location of the PHB in the host card */
-#define PHB_handshake 0x02 /* 0x02 Used to manage receive packet flow control */
-#define PHB_status 0x04 /* 0x04 Internal port transmit/receive status */
-#define PHB_timeout 0x06 /* 0x06 Time period to wait for an ACK */
-#define PHB_link 0x08 /* 0x08 The host link associated with the PHB */
-#define PHB_destination 0x0A /* 0x0A Location of the remote port on the network */
-#define PHB_tx_start 0x0C /* 0x0C first entry in the packet array for transmit packets */
-#define PHB_tx_end 0x0E /* 0x0E last entry in the packet array for transmit packets */
-#define PHB_tx_add 0x10 /* 0x10 position in the packet array for new transmit packets */
-#define PHB_tx_remove 0x12 /* 0x12 current position in the packet pointer array */
-#define PHB_rx_start 0x14 /* 0x14 first entry in the packet array for receive packets */
-#define PHB_rx_end 0x16 /* 0x16 last entry in the packet array for receive packets */
-#define PHB_rx_add 0x18 /* 0x18 position in the packet array for new receive packets */
-#define PHB_rx_remove 0x1A /* 0x1A current position in the packet pointer array */
-#define sizeof_PHB 0x1C /* structure size = 0x1C */
+#define PHB_source 0x00 /* 0x00 Location of the PHB in the host card */
+#define PHB_handshake 0x02 /* 0x02 Used to manage receive packet flow control */
+#define PHB_status 0x04 /* 0x04 Internal port transmit/receive status */
+#define PHB_timeout 0x06 /* 0x06 Time period to wait for an ACK */
+#define PHB_link 0x08 /* 0x08 The host link associated with the PHB */
+#define PHB_destination 0x0A /* 0x0A Location of the remote port on the network */
+#define PHB_tx_start 0x0C /* 0x0C first entry in the packet array for transmit packets */
+#define PHB_tx_end 0x0E /* 0x0E last entry in the packet array for transmit packets */
+#define PHB_tx_add 0x10 /* 0x10 position in the packet array for new transmit packets */
+#define PHB_tx_remove 0x12 /* 0x12 current position in the packet pointer array */
+#define PHB_rx_start 0x14 /* 0x14 first entry in the packet array for receive packets */
+#define PHB_rx_end 0x16 /* 0x16 last entry in the packet array for receive packets */
+#define PHB_rx_add 0x18 /* 0x18 position in the packet array for new receive packets */
+#define PHB_rx_remove 0x1A /* 0x1A current position in the packet pointer array */
+#define sizeof_PHB 0x1C /* structure size = 0x1C */
/* PHB.handshake definitions... */
-#define PHB_HANDSHAKE_SET 0x0001 /* Set by LRT */
-#define PHB_HANDSHAKE_RESET 0x0002 /* Set by ISR / driver */
+#define PHB_HANDSHAKE_SET 0x0001 /* Set by LRT */
+#define PHB_HANDSHAKE_RESET 0x0002 /* Set by ISR / driver */
#define PHB_HANDSHAKE_FLAGS (PHB_HANDSHAKE_RESET|PHB_HANDSHAKE_SET)
/* Reset by ltt */
-#define MAX_PHB 128 /* range 0-127 */
+#define MAX_PHB 128 /* range 0-127 */
/*****************************************************************************
********************************** ***********************************
@@ -291,86 +288,85 @@ typedef struct _PHB
and there is normally an array of MAX_LINKS (=4) structures in a host card,
defined by PARM_MAP->link_str_ptr. */
-typedef struct _LPB
-{
- _u16 link_number; /* 0x00 Link Number */
- _u16 in_ch; /* 0x02 Link In Channel */
- _u16 out_ch; /* 0x04 Link Out Channel */
- _u8 attached_serial[4]; /* 0x06 Attached serial number */
- _u8 attached_host_serial[4];/* 0x0A Serial number of Host who booted other end */
- _u16 descheduled; /* 0x0E Currently Descheduled */
- _u16 state; /* 0x10 Current state */
- _u16 send_poll; /* 0x12 Send a Poll Packet */
- _u16 ltt_p; /* 0x14 Process Descriptor */
- _u16 lrt_p; /* 0x16 Process Descriptor */
- _u16 lrt_status; /* 0x18 Current lrt status */
- _u16 ltt_status; /* 0x1A Current ltt status */
- _u16 timeout; /* 0x1C Timeout value */
- _u16 topology; /* 0x1E Topology bits */
- _u16 mon_ltt; /* 0x20 */
- _u16 mon_lrt; /* 0x22 */
- _u16 num_pkts; /* 0x24 */
- _u16 add_packet_list; /* 0x26 Add packets to here */
- _u16 remove_packet_list; /* 0x28 Send packets from here */
-
- _u16 lrt_fail_chan; /* 0x2A Lrt's failure channel */
- _u16 ltt_fail_chan; /* 0x2C Ltt's failure channel */
-
- RUP rup; /* 0x2E RUP structure for HOST to driver comms */
- RUP link_rup; /* 0x40 RUP for the link (POLL, topology etc.) */
- _u16 attached_link; /* 0x52 Number of attached link */
- _u16 csum_errors; /* 0x54 csum errors */
- _u16 num_disconnects; /* 0x56 number of disconnects */
- _u16 num_sync_rcvd; /* 0x58 # sync's received */
- _u16 num_sync_rqst; /* 0x5A # sync requests */
- _u16 num_tx; /* 0x5C Num pkts sent */
- _u16 num_rx; /* 0x5E Num pkts received */
- _u16 module_attached; /* 0x60 Module tpyes of attached */
- _u16 led_timeout; /* 0x62 LED timeout */
- _u16 first_port; /* 0x64 First port to service */
- _u16 last_port; /* 0x66 Last port to service */
+typedef struct _LPB {
+ _u16 link_number; /* 0x00 Link Number */
+ _u16 in_ch; /* 0x02 Link In Channel */
+ _u16 out_ch; /* 0x04 Link Out Channel */
+ _u8 attached_serial[4]; /* 0x06 Attached serial number */
+ _u8 attached_host_serial[4]; /* 0x0A Serial number of Host who booted other end */
+ _u16 descheduled; /* 0x0E Currently Descheduled */
+ _u16 state; /* 0x10 Current state */
+ _u16 send_poll; /* 0x12 Send a Poll Packet */
+ _u16 ltt_p; /* 0x14 Process Descriptor */
+ _u16 lrt_p; /* 0x16 Process Descriptor */
+ _u16 lrt_status; /* 0x18 Current lrt status */
+ _u16 ltt_status; /* 0x1A Current ltt status */
+ _u16 timeout; /* 0x1C Timeout value */
+ _u16 topology; /* 0x1E Topology bits */
+ _u16 mon_ltt; /* 0x20 */
+ _u16 mon_lrt; /* 0x22 */
+ _u16 num_pkts; /* 0x24 */
+ _u16 add_packet_list; /* 0x26 Add packets to here */
+ _u16 remove_packet_list; /* 0x28 Send packets from here */
+
+ _u16 lrt_fail_chan; /* 0x2A Lrt's failure channel */
+ _u16 ltt_fail_chan; /* 0x2C Ltt's failure channel */
+
+ RUP rup; /* 0x2E RUP structure for HOST to driver comms */
+ RUP link_rup; /* 0x40 RUP for the link (POLL, topology etc.) */
+ _u16 attached_link; /* 0x52 Number of attached link */
+ _u16 csum_errors; /* 0x54 csum errors */
+ _u16 num_disconnects; /* 0x56 number of disconnects */
+ _u16 num_sync_rcvd; /* 0x58 # sync's received */
+ _u16 num_sync_rqst; /* 0x5A # sync requests */
+ _u16 num_tx; /* 0x5C Num pkts sent */
+ _u16 num_rx; /* 0x5E Num pkts received */
+ _u16 module_attached; /* 0x60 Module tpyes of attached */
+ _u16 led_timeout; /* 0x62 LED timeout */
+ _u16 first_port; /* 0x64 First port to service */
+ _u16 last_port; /* 0x66 Last port to service */
} LPB;
/* Same thing again, but defined as offsets... */
-#define LPB_link_number 0x00 /* 0x00 Link Number */
-#define LPB_in_ch 0x02 /* 0x02 Link In Channel */
-#define LPB_out_ch 0x04 /* 0x04 Link Out Channel */
-#define LPB_attached_serial 0x06 /* 0x06 Attached serial number */
-#define LPB_attached_host_serial 0x0A /* 0x0A Serial number of Host who booted other end */
-#define LPB_descheduled 0x0E /* 0x0E Currently Descheduled */
-#define LPB_state 0x10 /* 0x10 Current state */
-#define LPB_send_poll 0x12 /* 0x12 Send a Poll Packet */
-#define LPB_ltt_p 0x14 /* 0x14 Process Descriptor */
-#define LPB_lrt_p 0x16 /* 0x16 Process Descriptor */
-#define LPB_lrt_status 0x18 /* 0x18 Current lrt status */
-#define LPB_ltt_status 0x1A /* 0x1A Current ltt status */
-#define LPB_timeout 0x1C /* 0x1C Timeout value */
-#define LPB_topology 0x1E /* 0x1E Topology bits */
-#define LPB_mon_ltt 0x20 /* 0x20 */
-#define LPB_mon_lrt 0x22 /* 0x22 */
-#define LPB_num_pkts 0x24 /* 0x24 */
-#define LPB_add_packet_list 0x26 /* 0x26 Add packets to here */
-#define LPB_remove_packet_list 0x28 /* 0x28 Send packets from here */
-#define LPB_lrt_fail_chan 0x2A /* 0x2A Lrt's failure channel */
-#define LPB_ltt_fail_chan 0x2C /* 0x2C Ltt's failure channel */
-#define LPB_rup 0x2E /* 0x2E RUP structure for HOST to driver comms */
-#define LPB_link_rup 0x40 /* 0x40 RUP for the link (POLL, topology etc.) */
-#define LPB_attached_link 0x52 /* 0x52 Number of attached link */
-#define LPB_csum_errors 0x54 /* 0x54 csum errors */
-#define LPB_num_disconnects 0x56 /* 0x56 number of disconnects */
-#define LPB_num_sync_rcvd 0x58 /* 0x58 # sync's received */
-#define LPB_num_sync_rqst 0x5A /* 0x5A # sync requests */
-#define LPB_num_tx 0x5C /* 0x5C Num pkts sent */
-#define LPB_num_rx 0x5E /* 0x5E Num pkts received */
-#define LPB_module_attached 0x60 /* 0x60 Module tpyes of attached */
-#define LPB_led_timeout 0x62 /* 0x62 LED timeout */
-#define LPB_first_port 0x64 /* 0x64 First port to service */
-#define LPB_last_port 0x66 /* 0x66 Last port to service */
-#define sizeof_LPB 0x68 /* structure size = 0x68 */
-
-#define LINKS_PER_UNIT 4 /* number of links from a host */
+#define LPB_link_number 0x00 /* 0x00 Link Number */
+#define LPB_in_ch 0x02 /* 0x02 Link In Channel */
+#define LPB_out_ch 0x04 /* 0x04 Link Out Channel */
+#define LPB_attached_serial 0x06 /* 0x06 Attached serial number */
+#define LPB_attached_host_serial 0x0A /* 0x0A Serial number of Host who booted other end */
+#define LPB_descheduled 0x0E /* 0x0E Currently Descheduled */
+#define LPB_state 0x10 /* 0x10 Current state */
+#define LPB_send_poll 0x12 /* 0x12 Send a Poll Packet */
+#define LPB_ltt_p 0x14 /* 0x14 Process Descriptor */
+#define LPB_lrt_p 0x16 /* 0x16 Process Descriptor */
+#define LPB_lrt_status 0x18 /* 0x18 Current lrt status */
+#define LPB_ltt_status 0x1A /* 0x1A Current ltt status */
+#define LPB_timeout 0x1C /* 0x1C Timeout value */
+#define LPB_topology 0x1E /* 0x1E Topology bits */
+#define LPB_mon_ltt 0x20 /* 0x20 */
+#define LPB_mon_lrt 0x22 /* 0x22 */
+#define LPB_num_pkts 0x24 /* 0x24 */
+#define LPB_add_packet_list 0x26 /* 0x26 Add packets to here */
+#define LPB_remove_packet_list 0x28 /* 0x28 Send packets from here */
+#define LPB_lrt_fail_chan 0x2A /* 0x2A Lrt's failure channel */
+#define LPB_ltt_fail_chan 0x2C /* 0x2C Ltt's failure channel */
+#define LPB_rup 0x2E /* 0x2E RUP structure for HOST to driver comms */
+#define LPB_link_rup 0x40 /* 0x40 RUP for the link (POLL, topology etc.) */
+#define LPB_attached_link 0x52 /* 0x52 Number of attached link */
+#define LPB_csum_errors 0x54 /* 0x54 csum errors */
+#define LPB_num_disconnects 0x56 /* 0x56 number of disconnects */
+#define LPB_num_sync_rcvd 0x58 /* 0x58 # sync's received */
+#define LPB_num_sync_rqst 0x5A /* 0x5A # sync requests */
+#define LPB_num_tx 0x5C /* 0x5C Num pkts sent */
+#define LPB_num_rx 0x5E /* 0x5E Num pkts received */
+#define LPB_module_attached 0x60 /* 0x60 Module tpyes of attached */
+#define LPB_led_timeout 0x62 /* 0x62 LED timeout */
+#define LPB_first_port 0x64 /* 0x64 First port to service */
+#define LPB_last_port 0x66 /* 0x66 Last port to service */
+#define sizeof_LPB 0x68 /* structure size = 0x68 */
+
+#define LINKS_PER_UNIT 4 /* number of links from a host */
/*****************************************************************************
******************************** *******************************
@@ -380,17 +376,16 @@ typedef struct _LPB
/* Used to overlay packet headers when allocating/freeing packets from the free list */
-typedef struct _FREE_LIST
-{
- _u16 next; /* 0x00 offset of next list item */
- _u16 prev; /* 0x02 offset of previous list item */
+typedef struct _FREE_LIST {
+ _u16 next; /* 0x00 offset of next list item */
+ _u16 prev; /* 0x02 offset of previous list item */
} FREE_LIST;
/* Same thing again, but defined as offsets... */
-#define FL_next 0x00 /* 0x00 offset of next list item */
-#define FL_prev 0x02 /* 0x02 offset of previous list item */
+#define FL_next 0x00 /* 0x00 offset of next list item */
+#define FL_prev 0x02 /* 0x02 offset of previous list item */
/*****************************************************************************
********************************** ***********************************
@@ -401,32 +396,31 @@ typedef struct _FREE_LIST
/* The PKT is the main unit of communication between Host Cards and RTAs across
the RIO network. */
-#define PKT_MAX_DATA_LEN 72 /* Size of packet data */
+#define PKT_MAX_DATA_LEN 72 /* Size of packet data */
-typedef struct _PKT
-{
- _u8 dest_unit; /* 0x00 Destination Unit Id */
- _u8 dest_port; /* 0x01 Destination Port */
- _u8 src_unit; /* 0x02 Source Unit Id */
- _u8 src_port; /* 0x03 Source Port */
- _u8 len; /* 0x04 Length (in bytes) of data field */
- _u8 control; /* 0x05 */
- _u8 data[PKT_MAX_DATA_LEN]; /* 0x06 Actual data */
- _u16 csum; /* 0x4E C-SUM */
+typedef struct _PKT {
+ _u8 dest_unit; /* 0x00 Destination Unit Id */
+ _u8 dest_port; /* 0x01 Destination Port */
+ _u8 src_unit; /* 0x02 Source Unit Id */
+ _u8 src_port; /* 0x03 Source Port */
+ _u8 len; /* 0x04 Length (in bytes) of data field */
+ _u8 control; /* 0x05 */
+ _u8 data[PKT_MAX_DATA_LEN]; /* 0x06 Actual data */
+ _u16 csum; /* 0x4E C-SUM */
} PKT;
/* Same thing again, but defined as offsets... */
-#define PKT_dest_unit 0x00 /* 0x00 Destination Unit Id */
-#define PKT_dest_port 0x01 /* 0x01 Destination Port */
-#define PKT_src_unit 0x02 /* 0x02 Source Unit Id */
-#define PKT_src_port 0x03 /* 0x03 Source Port */
-#define PKT_len 0x04 /* 0x04 Length (in bytes) of data field */
-#define PKT_control 0x05 /* 0x05 */
-#define PKT_data 0x06 /* 0x06 Actual data */
-#define PKT_csum 0x4E /* 0x4E C-SUM */
-#define sizeof_PKT 0x50 /* structure size = 0x50 */
+#define PKT_dest_unit 0x00 /* 0x00 Destination Unit Id */
+#define PKT_dest_port 0x01 /* 0x01 Destination Port */
+#define PKT_src_unit 0x02 /* 0x02 Source Unit Id */
+#define PKT_src_port 0x03 /* 0x03 Source Port */
+#define PKT_len 0x04 /* 0x04 Length (in bytes) of data field */
+#define PKT_control 0x05 /* 0x05 */
+#define PKT_data 0x06 /* 0x06 Actual data */
+#define PKT_csum 0x4E /* 0x4E C-SUM */
+#define sizeof_PKT 0x50 /* structure size = 0x50 */
/* PKT.len definitions... */
#define PKT_CMD_BIT 0x80
@@ -449,38 +443,38 @@ typedef struct _PKT
/* The following definitions and structures define the control packets sent
between the driver and RIO Ports, RTAs and Host Cards. */
-#define PRE_EMPTIVE 0x80 /* Pre-emptive command (sent via port's RUP) */
+#define PRE_EMPTIVE 0x80 /* Pre-emptive command (sent via port's RUP) */
/* "in-band" and "pre-emptive" port commands... */
-#define OPEN 0x00 /* Driver->RIO Open a port */
-#define CONFIG 0x01 /* Driver->RIO Configure a port */
-#define MOPEN 0x02 /* Driver->RIO Modem open (wait for DCD) */
-#define CLOSE 0x03 /* Driver->RIO Close a port */
+#define OPEN 0x00 /* Driver->RIO Open a port */
+#define CONFIG 0x01 /* Driver->RIO Configure a port */
+#define MOPEN 0x02 /* Driver->RIO Modem open (wait for DCD) */
+#define CLOSE 0x03 /* Driver->RIO Close a port */
#define WFLUSH (0x04|PRE_EMPTIVE) /* Driver->RIO Write flush */
#define RFLUSH (0x05|PRE_EMPTIVE) /* Driver->RIO Read flush */
#define RESUME (0x06|PRE_EMPTIVE) /* Driver->RIO Behave as if XON received */
-#define SBREAK 0x07 /* Driver->RIO Start break */
-#define EBREAK 0x08 /* Driver->RIO End break */
+#define SBREAK 0x07 /* Driver->RIO Start break */
+#define EBREAK 0x08 /* Driver->RIO End break */
#define SUSPEND (0x09|PRE_EMPTIVE) /* Driver->RIO Behave as if XOFF received */
#define FCLOSE (0x0A|PRE_EMPTIVE) /* Driver->RIO Force close */
-#define XPRINT 0x0B /* Driver->RIO Xprint packet */
+#define XPRINT 0x0B /* Driver->RIO Xprint packet */
#define MBIS (0x0C|PRE_EMPTIVE) /* Driver->RIO Set modem lines */
#define MBIC (0x0D|PRE_EMPTIVE) /* Driver->RIO Clear modem lines */
#define MSET (0x0E|PRE_EMPTIVE) /* Driver->RIO Set modem lines */
-#define PCLOSE 0x0F /* Driver->RIO Pseudo close */
+#define PCLOSE 0x0F /* Driver->RIO Pseudo close */
#define MGET (0x10|PRE_EMPTIVE) /* Driver->RIO Force update of modem status */
#define MEMDUMP (0x11|PRE_EMPTIVE) /* Driver->RIO DEBUG request for RTA memory */
#define READ_REGISTER (0x12|PRE_EMPTIVE) /* Driver->RIO DEBUG read CD1400 register */
/* Remote Unit Port (RUP) packet definitions... (specified in PKT.dest_unit and PKT.src_unit) */
-#define SYNC_RUP 0xFF /* Download internal */
-#define COMMAND_RUP 0xFE /* Command ack/status */
-#define ERROR_RUP 0xFD /* Download internal */
-#define POLL_RUP 0xFC /* Download internal */
-#define BOOT_RUP 0xFB /* Used to boot RTAs */
-#define ROUTE_RUP 0xFA /* Used to specify routing/topology */
-#define STATUS_RUP 0xF9 /* Not used */
-#define POWER_RUP 0xF8 /* Download internal */
+#define SYNC_RUP 0xFF /* Download internal */
+#define COMMAND_RUP 0xFE /* Command ack/status */
+#define ERROR_RUP 0xFD /* Download internal */
+#define POLL_RUP 0xFC /* Download internal */
+#define BOOT_RUP 0xFB /* Used to boot RTAs */
+#define ROUTE_RUP 0xFA /* Used to specify routing/topology */
+#define STATUS_RUP 0xF9 /* Not used */
+#define POWER_RUP 0xF8 /* Download internal */
/* COMMAND_RUP definitions... */
#define COMPLETE (0x20|PRE_EMPTIVE) /* RIO->Driver Command complete */
@@ -488,24 +482,24 @@ typedef struct _PKT
#define MODEM_STATUS (0x22|PRE_EMPTIVE) /* RIO->Driver Modem status change */
/* BOOT_RUP definitions... */
-#define BOOT_REQUEST 0x00 /* RIO->Driver Request for boot */
-#define BOOT_ABORT 0x01 /* Driver->RIO Abort a boot */
-#define BOOT_SEQUENCE 0x02 /* Driver->RIO Packet with firmware details */
-#define BOOT_COMPLETED 0x03 /* RIO->Driver Boot completed */
-#define IFOAD 0x2F /* Driver->RIO Shutdown/Reboot RTA (Fall Over And Die) */
-#define IDENTIFY 0x30 /* Driver->RIO Identify RTA */
-#define ZOMBIE 0x31 /* Driver->RIO Shutdown/Flash LEDs */
-#define UFOAD 0x32 /* Driver->RIO Shutdown/Reboot neighbouring RTA */
-#define IWAIT 0x33 /* Driver->RIO Pause booting process */
+#define BOOT_REQUEST 0x00 /* RIO->Driver Request for boot */
+#define BOOT_ABORT 0x01 /* Driver->RIO Abort a boot */
+#define BOOT_SEQUENCE 0x02 /* Driver->RIO Packet with firmware details */
+#define BOOT_COMPLETED 0x03 /* RIO->Driver Boot completed */
+#define IFOAD 0x2F /* Driver->RIO Shutdown/Reboot RTA (Fall Over And Die) */
+#define IDENTIFY 0x30 /* Driver->RIO Identify RTA */
+#define ZOMBIE 0x31 /* Driver->RIO Shutdown/Flash LEDs */
+#define UFOAD 0x32 /* Driver->RIO Shutdown/Reboot neighbouring RTA */
+#define IWAIT 0x33 /* Driver->RIO Pause booting process */
/* ROUTE_RUP definitions... */
-#define ROUTE_REQUEST 0x00 /* RIO->Driver Request an ID */
-#define ROUTE_FOAD 0x01 /* Driver->RIO Shutdown/reboot RTA */
-#define ROUTE_ALREADY 0x02 /* Driver->RIO Not used */
-#define ROUTE_USED 0x03 /* Driver->RIO Not used */
-#define ROUTE_ALLOCATE 0x04 /* Driver->RIO Allocate RTA RUP numbers */
-#define ROUTE_REQ_TOP 0x05 /* Driver->RIO Not used */
-#define ROUTE_TOPOLOGY 0x06 /* RIO->Driver Route/Topology status */
+#define ROUTE_REQUEST 0x00 /* RIO->Driver Request an ID */
+#define ROUTE_FOAD 0x01 /* Driver->RIO Shutdown/reboot RTA */
+#define ROUTE_ALREADY 0x02 /* Driver->RIO Not used */
+#define ROUTE_USED 0x03 /* Driver->RIO Not used */
+#define ROUTE_ALLOCATE 0x04 /* Driver->RIO Allocate RTA RUP numbers */
+#define ROUTE_REQ_TOP 0x05 /* Driver->RIO Not used */
+#define ROUTE_TOPOLOGY 0x06 /* RIO->Driver Route/Topology status */
/*****************************************************************************
********************************** **********************************
@@ -518,89 +512,89 @@ typedef struct _PKT
Sent to open a port.
Structure of configuration info used with OPEN, CONFIG and MOPEN packets... */
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_Cor1 (PKT_Data+1) /* Channel Option Register 1 */
-#define PKT_Cor2 (PKT_Data+2) /* Channel Option Register 2 */
-#define PKT_Cor4 (PKT_Data+3) /* Channel Option Register 4 */
-#define PKT_Cor5 (PKT_Data+4) /* Channel Option Register 5 */
-#define PKT_TxXon (PKT_Data+5) /* Transmit XON character */
-#define PKT_TxXoff (PKT_Data+6) /* Transmit XOFF character */
-#define PKT_RxXon (PKT_Data+7) /* Receive XON character */
-#define PKT_RxXoff (PKT_Data+8) /* Receive XOFF character */
-#define PKT_Lnext (PKT_Data+9) /* Lnext character */
-#define PKT_TxBaud (PKT_Data+10) /* Transmit baud rate */
-#define PKT_RxBaud (PKT_Data+11) /* Receive baud rate */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cor1 (PKT_Data+1) /* Channel Option Register 1 */
+#define PKT_Cor2 (PKT_Data+2) /* Channel Option Register 2 */
+#define PKT_Cor4 (PKT_Data+3) /* Channel Option Register 4 */
+#define PKT_Cor5 (PKT_Data+4) /* Channel Option Register 5 */
+#define PKT_TxXon (PKT_Data+5) /* Transmit XON character */
+#define PKT_TxXoff (PKT_Data+6) /* Transmit XOFF character */
+#define PKT_RxXon (PKT_Data+7) /* Receive XON character */
+#define PKT_RxXoff (PKT_Data+8) /* Receive XOFF character */
+#define PKT_Lnext (PKT_Data+9) /* Lnext character */
+#define PKT_TxBaud (PKT_Data+10) /* Transmit baud rate */
+#define PKT_RxBaud (PKT_Data+11) /* Receive baud rate */
/* COR1 definitions... */
-#define COR1_PARITY 0xE0 /* Parity mask */
-#define COR1_NONE 0x00 /* No parity */
-#define COR1_SPACE 0x20 /* Space parity */
-#define COR1_EVEN 0x40 /* Even parity */
-#define COR1_MARK 0xA0 /* Mark parity */
-#define COR1_ODD 0xC0 /* Odd parity */
-
-#define COR1_STOPBITS 0x0C /* Stop bits mask */
-#define COR1_STOP1 0x00 /* 1 stop bit */
-#define COR1_STOP1_5 0x04 /* 1.5 stop bits */
-#define COR1_STOP2 0x08 /* 2 stop bits */
-
-#define COR1_DATABITS 0x03 /* Data bits mask */
-#define COR1_DATA5 0x00 /* 5 data bits */
-#define COR1_DATA6 0x01 /* 6 data bits */
-#define COR1_DATA7 0x02 /* 7 data bits */
-#define COR1_DATA8 0x03 /* 8 data bits */
+#define COR1_PARITY 0xE0 /* Parity mask */
+#define COR1_NONE 0x00 /* No parity */
+#define COR1_SPACE 0x20 /* Space parity */
+#define COR1_EVEN 0x40 /* Even parity */
+#define COR1_MARK 0xA0 /* Mark parity */
+#define COR1_ODD 0xC0 /* Odd parity */
+
+#define COR1_STOPBITS 0x0C /* Stop bits mask */
+#define COR1_STOP1 0x00 /* 1 stop bit */
+#define COR1_STOP1_5 0x04 /* 1.5 stop bits */
+#define COR1_STOP2 0x08 /* 2 stop bits */
+
+#define COR1_DATABITS 0x03 /* Data bits mask */
+#define COR1_DATA5 0x00 /* 5 data bits */
+#define COR1_DATA6 0x01 /* 6 data bits */
+#define COR1_DATA7 0x02 /* 7 data bits */
+#define COR1_DATA8 0x03 /* 8 data bits */
/* COR2 definitions... */
-#define COR2_XON_TXFLOW 0x40 /* XON/XOFF Transmit Flow */
-#define COR2_XANY_TXFLOW 0xC0 /* XON/XANY Transmit Flow */
-#define COR2_HUPCL 0x20 /* Hang Up On Close */
-#define COR2_DSR_TXFLOW 0x08 /* DSR Transmit Flow Control */
-#define COR2_RTS_RXFLOW 0x04 /* RTS Receive Flow Control */
-#define COR2_CTS_TXFLOW 0x02 /* CTS Transmit Flow Control */
-#define COR2_XON_RXFLOW 0x01 /* XON/XOFF Receive Flow */
+#define COR2_XON_TXFLOW 0x40 /* XON/XOFF Transmit Flow */
+#define COR2_XANY_TXFLOW 0xC0 /* XON/XANY Transmit Flow */
+#define COR2_HUPCL 0x20 /* Hang Up On Close */
+#define COR2_DSR_TXFLOW 0x08 /* DSR Transmit Flow Control */
+#define COR2_RTS_RXFLOW 0x04 /* RTS Receive Flow Control */
+#define COR2_CTS_TXFLOW 0x02 /* CTS Transmit Flow Control */
+#define COR2_XON_RXFLOW 0x01 /* XON/XOFF Receive Flow */
/* COR4 definition... */
-#define COR4_IGNCR 0x80 /* Discard received CR */
-#define COR4_ICRNL 0x40 /* Map received CR -> NL */
-#define COR4_INLCR 0x20 /* Map received NL -> CR */
-#define COR4_IGNBRK 0x10 /* Ignore Received Break */
-#define COR4_NBRKINT 0x08 /* No interrupt on rx Break */
-#define COR4_IGNPAR 0x04 /* ignore rx parity error chars */
-#define COR4_PARMRK 0x02 /* Mark rx parity error chars */
-#define COR4_RAISEMOD 0x01 /* Raise modem lines on !0 baud */
+#define COR4_IGNCR 0x80 /* Discard received CR */
+#define COR4_ICRNL 0x40 /* Map received CR -> NL */
+#define COR4_INLCR 0x20 /* Map received NL -> CR */
+#define COR4_IGNBRK 0x10 /* Ignore Received Break */
+#define COR4_NBRKINT 0x08 /* No interrupt on rx Break */
+#define COR4_IGNPAR 0x04 /* ignore rx parity error chars */
+#define COR4_PARMRK 0x02 /* Mark rx parity error chars */
+#define COR4_RAISEMOD 0x01 /* Raise modem lines on !0 baud */
/* COR5 definitions... */
-#define COR5_ISTRIP 0x80 /* Strip input chars to 7 bits */
-#define COR5_LNE 0x40 /* Enable LNEXT processing */
-#define COR5_CMOE 0x20 /* Match good & error characters */
-#define COR5_TAB3 0x10 /* TAB3 mode */
-#define COR5_TSTATE_ON 0x08 /* Enable tbusy/tstop monitoring */
-#define COR5_TSTATE_OFF 0x04 /* Disable tbusy/tstop monitoring */
-#define COR5_ONLCR 0x02 /* NL -> CR NL on output */
-#define COR5_OCRNL 0x01 /* CR -> NL on output */
+#define COR5_ISTRIP 0x80 /* Strip input chars to 7 bits */
+#define COR5_LNE 0x40 /* Enable LNEXT processing */
+#define COR5_CMOE 0x20 /* Match good & error characters */
+#define COR5_TAB3 0x10 /* TAB3 mode */
+#define COR5_TSTATE_ON 0x08 /* Enable tbusy/tstop monitoring */
+#define COR5_TSTATE_OFF 0x04 /* Disable tbusy/tstop monitoring */
+#define COR5_ONLCR 0x02 /* NL -> CR NL on output */
+#define COR5_OCRNL 0x01 /* CR -> NL on output */
/* RxBaud and TxBaud definitions... */
-#define RIO_B0 0x00 /* RTS / DTR signals dropped */
-#define RIO_B50 0x01 /* 50 baud */
-#define RIO_B75 0x02 /* 75 baud */
-#define RIO_B110 0x03 /* 110 baud */
-#define RIO_B134 0x04 /* 134.5 baud */
-#define RIO_B150 0x05 /* 150 baud */
-#define RIO_B200 0x06 /* 200 baud */
-#define RIO_B300 0x07 /* 300 baud */
-#define RIO_B600 0x08 /* 600 baud */
-#define RIO_B1200 0x09 /* 1200 baud */
-#define RIO_B1800 0x0A /* 1800 baud */
-#define RIO_B2400 0x0B /* 2400 baud */
-#define RIO_B4800 0x0C /* 4800 baud */
-#define RIO_B9600 0x0D /* 9600 baud */
-#define RIO_B19200 0x0E /* 19200 baud */
-#define RIO_B38400 0x0F /* 38400 baud */
-#define RIO_B56000 0x10 /* 56000 baud */
-#define RIO_B57600 0x11 /* 57600 baud */
-#define RIO_B64000 0x12 /* 64000 baud */
-#define RIO_B115200 0x13 /* 115200 baud */
-#define RIO_B2000 0x14 /* 2000 baud */
+#define RIO_B0 0x00 /* RTS / DTR signals dropped */
+#define RIO_B50 0x01 /* 50 baud */
+#define RIO_B75 0x02 /* 75 baud */
+#define RIO_B110 0x03 /* 110 baud */
+#define RIO_B134 0x04 /* 134.5 baud */
+#define RIO_B150 0x05 /* 150 baud */
+#define RIO_B200 0x06 /* 200 baud */
+#define RIO_B300 0x07 /* 300 baud */
+#define RIO_B600 0x08 /* 600 baud */
+#define RIO_B1200 0x09 /* 1200 baud */
+#define RIO_B1800 0x0A /* 1800 baud */
+#define RIO_B2400 0x0B /* 2400 baud */
+#define RIO_B4800 0x0C /* 4800 baud */
+#define RIO_B9600 0x0D /* 9600 baud */
+#define RIO_B19200 0x0E /* 19200 baud */
+#define RIO_B38400 0x0F /* 38400 baud */
+#define RIO_B56000 0x10 /* 56000 baud */
+#define RIO_B57600 0x11 /* 57600 baud */
+#define RIO_B64000 0x12 /* 64000 baud */
+#define RIO_B115200 0x13 /* 115200 baud */
+#define RIO_B2000 0x14 /* 2000 baud */
/*****************************************************************************
********************************* *********************************
@@ -636,7 +630,7 @@ typedef struct _PKT
No parameters.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
/*****************************************************************************
********************************* *********************************
@@ -653,9 +647,9 @@ typedef struct _PKT
write flushing previously started by a pre-emptive WFLUSH packet. (in-band)
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
/*****************************************************************************
********************************* *********************************
@@ -669,8 +663,8 @@ typedef struct _PKT
packets of a port.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
#endif
/*****************************************************************************
@@ -685,8 +679,8 @@ typedef struct _PKT
transmission of data if blocked by XOFF. (as if XON had been received)
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
#endif
/*****************************************************************************
@@ -707,9 +701,9 @@ typedef struct _PKT
specified number of mS.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_BreakDelay (PKT_Data+1) /* Break delay in mS */
+#define PKT_BreakDelay (PKT_Data+1) /* Break delay in mS */
/*****************************************************************************
********************************* *********************************
@@ -736,8 +730,8 @@ typedef struct _PKT
transmission of data. (as if XOFF had been received)
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
#endif
/*****************************************************************************
@@ -753,8 +747,8 @@ typedef struct _PKT
modem signals if the COR5_HUPCL (Hang Up On Close) flag is set.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
#endif
/*****************************************************************************
@@ -774,8 +768,8 @@ typedef struct _PKT
- Transparent Print Stop Sequence.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
#endif
/*****************************************************************************
@@ -789,14 +783,14 @@ typedef struct _PKT
MBIS is sent pre-emptively from the driver to set a port's modem signals.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
#endif
-#define PKT_ModemSet (PKT_Data+4) /* Modem set signals mask */
+#define PKT_ModemSet (PKT_Data+4) /* Modem set signals mask */
/* ModemSet definitions... */
-#define MBIS_RTS 0x01 /* RTS modem signal */
-#define MBIS_DTR 0x02 /* DTR modem signal */
+#define MBIS_RTS 0x01 /* RTS modem signal */
+#define MBIS_DTR 0x02 /* DTR modem signal */
/*****************************************************************************
********************************** **********************************
@@ -808,16 +802,16 @@ typedef struct _PKT
MBIC is sent pre-emptively from the driver to clear a port's modem signals.
*/
-#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#if 0
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
#endif
-#define PKT_ModemClear (PKT_Data+4) /* Modem clear signals mask */
+#define PKT_ModemClear (PKT_Data+4) /* Modem clear signals mask */
/* ModemClear definitions... */
-#define MBIC_RTS 0x01 /* RTS modem signal */
-#define MBIC_DTR 0x02 /* DTR modem signal */
+#define MBIC_RTS 0x01 /* RTS modem signal */
+#define MBIC_DTR 0x02 /* DTR modem signal */
/*****************************************************************************
********************************** **********************************
@@ -829,15 +823,15 @@ typedef struct _PKT
MSET is sent pre-emptively from the driver to set/clear a port's modem signals. */
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
#endif
-#define PKT_ModemSet (PKT_Data+4) /* Modem set signals mask */
+#define PKT_ModemSet (PKT_Data+4) /* Modem set signals mask */
/* ModemSet definitions... */
-#define MSET_RTS 0x01 /* RTS modem signal */
-#define MSET_DTR 0x02 /* DTR modem signal */
+#define MSET_RTS 0x01 /* RTS modem signal */
+#define MSET_DTR 0x02 /* DTR modem signal */
/*****************************************************************************
********************************* *********************************
@@ -853,7 +847,7 @@ typedef struct _PKT
port's transmit / receive and modem signals will be left enabled and the
port marked internally as Pseudo Closed. */
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
/*****************************************************************************
********************************** **********************************
@@ -865,8 +859,8 @@ typedef struct _PKT
MGET is sent pre-emptively from the driver to request the port's current modem signals. */
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
/*****************************************************************************
********************************* ********************************
@@ -880,11 +874,11 @@ typedef struct _PKT
of the specified port's RTA address space.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
-#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
-#define PKT_Address (PKT_Data+6) /* Requested address */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
+#define PKT_Address (PKT_Data+6) /* Requested address */
/*****************************************************************************
****************************** *****************************
@@ -898,11 +892,11 @@ typedef struct _PKT
of the CD1400 register specified in address.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
-#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
-#define PKT_Address (PKT_Data+6) /* Requested address */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
+#define PKT_Address (PKT_Data+6) /* Requested address */
/*****************************************************************************
************************ **************************
@@ -916,33 +910,33 @@ typedef struct _PKT
packets, except MEMDUMP and READ_REGISTER.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
-#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
-#define PKT_ModemStatus (PKT_Data+3) /* Modem signal status */
-#define PKT_PortStatus (PKT_Data+4) /* Port signal status */
-#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
+#define PKT_ModemStatus (PKT_Data+3) /* Modem signal status */
+#define PKT_PortStatus (PKT_Data+4) /* Port signal status */
+#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
/* ModemStatus definitions... */
-#define MODEM_DSR 0x80 /* Data Set Ready modem state */
-#define MODEM_CTS 0x40 /* Clear To Send modem state */
-#define MODEM_RI 0x20 /* Ring Indicate modem state */
-#define MODEM_CD 0x10 /* Carrier Detect modem state */
-#define MODEM_TSTOP 0x08 /* Transmit Stopped state */
-#define MODEM_TEMPTY 0x04 /* Transmit Empty state */
-#define MODEM_DTR 0x02 /* DTR modem output state */
-#define MODEM_RTS 0x01 /* RTS modem output state */
+#define MODEM_DSR 0x80 /* Data Set Ready modem state */
+#define MODEM_CTS 0x40 /* Clear To Send modem state */
+#define MODEM_RI 0x20 /* Ring Indicate modem state */
+#define MODEM_CD 0x10 /* Carrier Detect modem state */
+#define MODEM_TSTOP 0x08 /* Transmit Stopped state */
+#define MODEM_TEMPTY 0x04 /* Transmit Empty state */
+#define MODEM_DTR 0x02 /* DTR modem output state */
+#define MODEM_RTS 0x01 /* RTS modem output state */
/* PortStatus definitions... */
-#define PORT_ISOPEN 0x01 /* Port open ? */
-#define PORT_HUPCL 0x02 /* Hangup on close? */
-#define PORT_MOPENPEND 0x04 /* Modem open pending */
-#define PORT_ISPARALLEL 0x08 /* Parallel port */
-#define PORT_BREAK 0x10 /* Port on break */
-#define PORT_STATUSPEND 0020 /* Status packet pending */
-#define PORT_BREAKPEND 0x40 /* Break packet pending */
-#define PORT_MODEMPEND 0x80 /* Modem status packet pending */
+#define PORT_ISOPEN 0x01 /* Port open ? */
+#define PORT_HUPCL 0x02 /* Hangup on close? */
+#define PORT_MOPENPEND 0x04 /* Modem open pending */
+#define PORT_ISPARALLEL 0x08 /* Parallel port */
+#define PORT_BREAK 0x10 /* Port on break */
+#define PORT_STATUSPEND 0020 /* Status packet pending */
+#define PORT_BREAKPEND 0x40 /* Break packet pending */
+#define PORT_MODEMPEND 0x80 /* Modem status packet pending */
/*****************************************************************************
************************ **************************
@@ -956,35 +950,35 @@ typedef struct _PKT
packets, except MEMDUMP and READ_REGISTER.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
-#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
#endif
-#define PKT_ModemStatus (PKT_Data+3) /* Modem signal status */
-#define PKT_PortStatus (PKT_Data+4) /* Port signal status */
+#define PKT_ModemStatus (PKT_Data+3) /* Modem signal status */
+#define PKT_PortStatus (PKT_Data+4) /* Port signal status */
#if 0
-#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
+#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
#endif
/* ModemStatus definitions... */
-#define MODEM_DSR 0x80 /* Data Set Ready modem state */
-#define MODEM_CTS 0x40 /* Clear To Send modem state */
-#define MODEM_RI 0x20 /* Ring Indicate modem state */
-#define MODEM_CD 0x10 /* Carrier Detect modem state */
-#define MODEM_TSTOP 0x08 /* Transmit Stopped state */
-#define MODEM_TEMPTY 0x04 /* Transmit Empty state */
-#define MODEM_DTR 0x02 /* DTR modem output state */
-#define MODEM_RTS 0x01 /* RTS modem output state */
+#define MODEM_DSR 0x80 /* Data Set Ready modem state */
+#define MODEM_CTS 0x40 /* Clear To Send modem state */
+#define MODEM_RI 0x20 /* Ring Indicate modem state */
+#define MODEM_CD 0x10 /* Carrier Detect modem state */
+#define MODEM_TSTOP 0x08 /* Transmit Stopped state */
+#define MODEM_TEMPTY 0x04 /* Transmit Empty state */
+#define MODEM_DTR 0x02 /* DTR modem output state */
+#define MODEM_RTS 0x01 /* RTS modem output state */
/* PortStatus definitions... */
-#define PORT_ISOPEN 0x01 /* Port open ? */
-#define PORT_HUPCL 0x02 /* Hangup on close? */
-#define PORT_MOPENPEND 0x04 /* Modem open pending */
-#define PORT_ISPARALLEL 0x08 /* Parallel port */
-#define PORT_BREAK 0x10 /* Port on break */
-#define PORT_STATUSPEND 0020 /* Status packet pending */
-#define PORT_BREAKPEND 0x40 /* Break packet pending */
-#define PORT_MODEMPEND 0x80 /* Modem status packet pending */
+#define PORT_ISOPEN 0x01 /* Port open ? */
+#define PORT_HUPCL 0x02 /* Hangup on close? */
+#define PORT_MOPENPEND 0x04 /* Modem open pending */
+#define PORT_ISPARALLEL 0x08 /* Parallel port */
+#define PORT_BREAK 0x10 /* Port on break */
+#define PORT_STATUSPEND 0020 /* Status packet pending */
+#define PORT_BREAKPEND 0x40 /* Break packet pending */
+#define PORT_MODEMPEND 0x80 /* Modem status packet pending */
/*****************************************************************************
******************** ********************
@@ -998,15 +992,15 @@ typedef struct _PKT
port I/O control command packet.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
-#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
-#define PKT_ModemStatus (PKT_Data+3) /* Modem signal status */
-#define PKT_PortStatus (PKT_Data+4) /* Port signal status */
-#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
-#define PKT_Address (PKT_Data+6) /* Requested address */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
+#define PKT_ModemStatus (PKT_Data+3) /* Modem signal status */
+#define PKT_PortStatus (PKT_Data+4) /* Port signal status */
+#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
+#define PKT_Address (PKT_Data+6) /* Requested address */
#endif
-#define PKT_Dump (PKT_Data+8) /* 32bytes of requested dump data */
+#define PKT_Dump (PKT_Data+8) /* 32bytes of requested dump data */
/*****************************************************************************
***************** *****************
@@ -1020,14 +1014,14 @@ typedef struct _PKT
READ_REGISTER port I/O control command packet.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /*Command code */
-#define PKT_PhbNum (PKT_Data+1) /*Port number wrt RTA */
-#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
+#define PKT_Cmd (PKT_Data+0) /*Command code */
+#define PKT_PhbNum (PKT_Data+1) /*Port number wrt RTA */
+#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
#endif
-#define PKT_RegisterValue (PKT_Data+3) /* Modem signal status */
+#define PKT_RegisterValue (PKT_Data+3) /* Modem signal status */
#if 0
-#define PKT_PortStatus (PKT_Data+4) /* Port signal status */
-#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
+#define PKT_PortStatus (PKT_Data+4) /* Port signal status */
+#define PKT_SubCmd (PKT_Data+5) /* Sub Command */
#endif
/*****************************************************************************
@@ -1041,9 +1035,9 @@ typedef struct _PKT
COMMAND_RUP - BREAK_RECEIVED packets are sent when the port detects a receive BREAK signal.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
-#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
#endif
/*****************************************************************************
@@ -1059,10 +1053,10 @@ typedef struct _PKT
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
-#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
-#define PKT_ModemStatus (PKT_Data+3) /* Modem signal status */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_PhbNum (PKT_Data+1) /* Port number wrt RTA */
+#define PKT_Cmd2 (PKT_Data+2) /* Command code copy */
+#define PKT_ModemStatus (PKT_Data+3) /* Modem signal status */
#endif
/*****************************************************************************
@@ -1077,7 +1071,7 @@ typedef struct _PKT
firmware code to load onto attached RTAs.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
/*****************************************************************************
@@ -1092,12 +1086,12 @@ typedef struct _PKT
to a BOOT_RUP - BOOT_REQUEST packet.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_NumPackets (PKT_Data+2) /* Packets required to load firmware */
-#define PKT_LoadBase (PKT_Data+4) /* RTA firmware load address */
-#define PKT_CodeSize (PKT_Data+6) /* Size of firmware in bytes */
-#define PKT_CmdString (PKT_Data+8) /* Command string */
+#define PKT_NumPackets (PKT_Data+2) /* Packets required to load firmware */
+#define PKT_LoadBase (PKT_Data+4) /* RTA firmware load address */
+#define PKT_CodeSize (PKT_Data+6) /* Size of firmware in bytes */
+#define PKT_CmdString (PKT_Data+8) /* Command string */
/*****************************************************************************
************************ ***********************
@@ -1111,10 +1105,10 @@ typedef struct _PKT
RTA firmware has completed.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_LinkNumber (PKT_Data+1) /* Link number RTA booted on */
-#define PKT_SerialNumber (PKT_Data+2) /* 4 byte serial number */
+#define PKT_LinkNumber (PKT_Data+1) /* Link number RTA booted on */
+#define PKT_SerialNumber (PKT_Data+2) /* 4 byte serial number */
/*****************************************************************************
************************ ***********************
@@ -1127,7 +1121,7 @@ typedef struct _PKT
BOOT_RUP packet without the PKT_CMD_BIT set in the PKT->len field is sent
from RIO to the Driver as a request for a firmware boot packet. */
-#define PKT_SequenceNumber (PKT_Data+0) /* Packet sequence number */
+#define PKT_SequenceNumber (PKT_Data+0) /* Packet sequence number */
/*****************************************************************************
*********************** ***********************
@@ -1141,9 +1135,9 @@ typedef struct _PKT
packet with the 70 bytes of the requested sequence.
*/
#if 0
-#define PKT_SequenceNumber (PKT_Data+0) /* Packet sequence number */
+#define PKT_SequenceNumber (PKT_Data+0) /* Packet sequence number */
#endif
-#define PKT_FirmwarePacket (PKT_Data+2) /* Firmware packet */
+#define PKT_FirmwarePacket (PKT_Data+2) /* Firmware packet */
/*****************************************************************************
**************************** ****************************
@@ -1157,10 +1151,10 @@ typedef struct _PKT
RTA to shut down and reboot.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_IfoadId1 (PKT_Data+2) /* IFOAD Id 1 */
-#define PKT_IfoadId2 (PKT_Data+3) /* IFOAD Id 2 */
+#define PKT_IfoadId1 (PKT_Data+2) /* IFOAD Id 1 */
+#define PKT_IfoadId2 (PKT_Data+3) /* IFOAD Id 2 */
#define IFOADID1 0xAD
#define IFOADID2 0xF0
@@ -1177,9 +1171,9 @@ typedef struct _PKT
RTA to flash its LEDs for a period of time.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_IdentifyId (PKT_Data+2) /* defines pattern to flash */
+#define PKT_IdentifyId (PKT_Data+2) /* defines pattern to flash */
/*****************************************************************************
**************************** ***************************
@@ -1193,10 +1187,10 @@ typedef struct _PKT
RTA to shut down and flash it's LEDs.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_ZombieId1 (PKT_Data+2) /* ZOMBIE Id 1 */
-#define PKT_ZombieId2 (PKT_Data+3) /* ZOMBIE Id 2 */
+#define PKT_ZombieId1 (PKT_Data+2) /* ZOMBIE Id 1 */
+#define PKT_ZombieId2 (PKT_Data+3) /* ZOMBIE Id 2 */
#define ZOMBIEID1 0x52
#define ZOMBIEID2 0x21
@@ -1213,11 +1207,11 @@ typedef struct _PKT
to ask it's neighbouring RTA to shut down and reboot.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_LinkNumber (PKT_Data+1) /* Link number of RTA to UFOAD */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_LinkNumber (PKT_Data+1) /* Link number of RTA to UFOAD */
#endif
-#define PKT_UfoadId1 (PKT_Data+2) /* UFOAD Id 1 */
-#define PKT_UfoadId2 (PKT_Data+3) /* UFOAD Id 2 */
+#define PKT_UfoadId1 (PKT_Data+2) /* UFOAD Id 1 */
+#define PKT_UfoadId2 (PKT_Data+3) /* UFOAD Id 2 */
#define UFOADID1 0x1E
#define UFOADID2 0x0D
@@ -1234,11 +1228,11 @@ typedef struct _PKT
to pause booting on the specified link for 30 seconds.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
-#define PKT_LinkNumber (PKT_Data+1) /* Link number of RTA to UFOAD */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_LinkNumber (PKT_Data+1) /* Link number of RTA to UFOAD */
#endif
-#define PKT_IwaitId1 (PKT_Data+2) /* IWAIT Id 1 */
-#define PKT_IwaitId2 (PKT_Data+3) /* IWAIT Id 2 */
+#define PKT_IwaitId1 (PKT_Data+2) /* IWAIT Id 1 */
+#define PKT_IwaitId2 (PKT_Data+3) /* IWAIT Id 2 */
#define IWAITID1 0xDE
#define IWAITID2 0xB1
@@ -1255,20 +1249,20 @@ typedef struct _PKT
RTA to a Driver to request an ID (RUP or unit number).
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_SerialNumber (PKT_Data+2) /* 4 byte serial number */
-#define PKT_ModuleTypes (PKT_Data+6) /* RTA Module types */
+#define PKT_SerialNumber (PKT_Data+2) /* 4 byte serial number */
+#define PKT_ModuleTypes (PKT_Data+6) /* RTA Module types */
/* ModuleTypes definitions... */
-#define MOD_BLANK 0x0F /* Blank plate attached */
-#define MOD_RS232DB25 0x00 /* RS232 DB25 connector */
-#define MOD_RS232RJ45 0x01 /* RS232 RJ45 connector */
-#define MOD_RS422DB25 0x02 /* RS422 DB25 connector */
-#define MOD_RS485DB25 0x03 /* RS485 DB25 connector */
-#define MOD_PARALLEL 0x04 /* Centronics parallel */
+#define MOD_BLANK 0x0F /* Blank plate attached */
+#define MOD_RS232DB25 0x00 /* RS232 DB25 connector */
+#define MOD_RS232RJ45 0x01 /* RS232 RJ45 connector */
+#define MOD_RS422DB25 0x02 /* RS422 DB25 connector */
+#define MOD_RS485DB25 0x03 /* RS485 DB25 connector */
+#define MOD_PARALLEL 0x04 /* Centronics parallel */
-#define MOD2 0x08 /* Set to indicate Rev2 module */
+#define MOD2 0x08 /* Set to indicate Rev2 module */
/*****************************************************************************
************************* *************************
@@ -1282,9 +1276,9 @@ typedef struct _PKT
packet to cause the RTA to "Fall Over And Die"., i.e. shutdown and reboot.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_RouteCmdString (PKT_Data+2) /* Command string */
+#define PKT_RouteCmdString (PKT_Data+2) /* Command string */
/*****************************************************************************
*********************** ***********************
@@ -1298,13 +1292,13 @@ typedef struct _PKT
packet to allocate the RTA's Id number (RUP number 1..16)
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_IdNum (PKT_Data+1) /* RUP number for ports 1..8 */
+#define PKT_IdNum (PKT_Data+1) /* RUP number for ports 1..8 */
#if 0
-#define PKT_RouteCmdString (PKT_Data+2) /* Command string */
+#define PKT_RouteCmdString (PKT_Data+2) /* Command string */
#endif
-#define PKT_IdNum2 (PKT_Data+0x17) /* RUP number for ports 9..16 */
+#define PKT_IdNum2 (PKT_Data+0x17) /* RUP number for ports 9..16 */
/*****************************************************************************
*********************** ***********************
@@ -1318,18 +1312,18 @@ typedef struct _PKT
current link status.
*/
#if 0
-#define PKT_Cmd (PKT_Data+0) /* Command code */
+#define PKT_Cmd (PKT_Data+0) /* Command code */
#endif
-#define PKT_Link1Rup (PKT_Data+2) /* Link 1 RUP number */
-#define PKT_Link1Link (PKT_Data+3) /* Link 1 link number */
-#define PKT_Link2Rup (PKT_Data+4) /* Link 2 RUP number */
-#define PKT_Link2Link (PKT_Data+5) /* Link 2 link number */
-#define PKT_Link3Rup (PKT_Data+6) /* Link 3 RUP number */
-#define PKT_Link3Link (PKT_Data+7) /* Link 3 link number */
-#define PKT_Link4Rup (PKT_Data+8) /* Link 4 RUP number */
-#define PKT_Link4Link (PKT_Data+9) /* Link 4 link number */
-#define PKT_RtaVpdProm (PKT_Data+10) /* 32 bytes of RTA VPD PROM Contents */
-
-#endif /* _sxwinif_h */
+#define PKT_Link1Rup (PKT_Data+2) /* Link 1 RUP number */
+#define PKT_Link1Link (PKT_Data+3) /* Link 1 link number */
+#define PKT_Link2Rup (PKT_Data+4) /* Link 2 RUP number */
+#define PKT_Link2Link (PKT_Data+5) /* Link 2 link number */
+#define PKT_Link3Rup (PKT_Data+6) /* Link 3 RUP number */
+#define PKT_Link3Link (PKT_Data+7) /* Link 3 link number */
+#define PKT_Link4Rup (PKT_Data+8) /* Link 4 RUP number */
+#define PKT_Link4Link (PKT_Data+9) /* Link 4 link number */
+#define PKT_RtaVpdProm (PKT_Data+10) /* 32 bytes of RTA VPD PROM Contents */
+
+#endif /* _sxwinif_h */
/* End of RIOWINIF.H */
diff --git a/drivers/char/rio/riscos.h b/drivers/char/rio/riscos.h
index 7685cc1d9e7..60d66d0056a 100644
--- a/drivers/char/rio/riscos.h
+++ b/drivers/char/rio/riscos.h
@@ -60,4 +60,4 @@ static char *_riscos_h_sccs_ = "@(#)riscos.h 1.2";
#define RINDW(A) (*(ushort *)(A))
#define WINDW(A,V) (*(ushort *)(A)=(ushort)(V))
-#endif /* __rio_riscos_h__ */
+#endif /* __rio_riscos_h__ */
diff --git a/drivers/char/rio/rom.h b/drivers/char/rio/rom.h
index ee79b8e5b97..58a7843625f 100644
--- a/drivers/char/rio/rom.h
+++ b/drivers/char/rio/rom.h
@@ -39,19 +39,19 @@
#ifndef lint
#ifdef SCCS
-static char *_rio_rom_h_sccs = "@(#)rom.h 1.1" ;
+static char *_rio_rom_h_sccs = "@(#)rom.h 1.1";
#endif
#endif
-typedef struct ROM ROM ;
-struct ROM {
- u_short slx ;
- char pcb_letter_rev ;
- char pcb_number_rev ;
- char serial[4] ;
- char year ;
- char week ;
- } ;
+typedef struct ROM ROM;
+struct ROM {
+ u_short slx;
+ char pcb_letter_rev;
+ char pcb_number_rev;
+ char serial[4];
+ char year;
+ char week;
+};
#endif
@@ -60,5 +60,3 @@ struct ROM {
#define ROM_LENGTH 0x20
/*********** end of file ***********/
-
-
diff --git a/drivers/char/rio/route.h b/drivers/char/rio/route.h
index c42dbb97171..769744e575a 100644
--- a/drivers/char/rio/route.h
+++ b/drivers/char/rio/route.h
@@ -44,26 +44,26 @@
#endif
#define MAX_LINKS 4
-#define MAX_NODES 17 /* Maximum nodes in a subnet */
-#define NODE_BYTES ((MAX_NODES / 8) + 1) /* Number of bytes needed for
- 1 bit per node */
-#define ROUTE_DATA_SIZE (NODE_BYTES + 2) /* Number of bytes for complete
- info about cost etc. */
+#define MAX_NODES 17 /* Maximum nodes in a subnet */
+#define NODE_BYTES ((MAX_NODES / 8) + 1) /* Number of bytes needed for
+ 1 bit per node */
+#define ROUTE_DATA_SIZE (NODE_BYTES + 2) /* Number of bytes for complete
+ info about cost etc. */
#define ROUTES_PER_PACKET ((PKT_MAX_DATA_LEN -2)/ ROUTE_DATA_SIZE)
- /* Number of nodes we can squeeze
- into one packet */
+ /* Number of nodes we can squeeze
+ into one packet */
#define MAX_TOPOLOGY_PACKETS (MAX_NODES / ROUTES_PER_PACKET + 1)
/************************************************
* Define the types of command for the ROUTE RUP.
************************************************/
-#define ROUTE_REQUEST 0 /* Request an ID */
-#define ROUTE_FOAD 1 /* Kill the RTA */
-#define ROUTE_ALREADY 2 /* ID given already */
-#define ROUTE_USED 3 /* All ID's used */
-#define ROUTE_ALLOCATE 4 /* Here it is */
-#define ROUTE_REQ_TOP 5 /* I bet you didn't expect....
- the Topological Inquisition */
-#define ROUTE_TOPOLOGY 6 /* Topology request answered FD */
+#define ROUTE_REQUEST 0 /* Request an ID */
+#define ROUTE_FOAD 1 /* Kill the RTA */
+#define ROUTE_ALREADY 2 /* ID given already */
+#define ROUTE_USED 3 /* All ID's used */
+#define ROUTE_ALLOCATE 4 /* Here it is */
+#define ROUTE_REQ_TOP 5 /* I bet you didn't expect....
+ the Topological Inquisition */
+#define ROUTE_TOPOLOGY 6 /* Topology request answered FD */
/*******************************************************************
* Define the Route Map Structure
*
@@ -72,22 +72,22 @@
******************************************************************/
typedef struct COST_ROUTE COST_ROUTE;
struct COST_ROUTE {
- unsigned char cost; /* Cost down this link */
- unsigned char route[NODE_BYTES]; /* Nodes thorough this route */
- } ;
+ unsigned char cost; /* Cost down this link */
+ unsigned char route[NODE_BYTES]; /* Nodes thorough this route */
+};
-typedef struct ROUTE_STR ROUTE_STR ;
-struct ROUTE_STR {
- COST_ROUTE cost_route[MAX_LINKS];
- /* cost / route for this link */
- ushort favoured; /* favoured link */
- } ;
+typedef struct ROUTE_STR ROUTE_STR;
+struct ROUTE_STR {
+ COST_ROUTE cost_route[MAX_LINKS];
+ /* cost / route for this link */
+ ushort favoured; /* favoured link */
+};
-#define NO_LINK (short) 5 /* Link unattached */
-#define ROUTE_NO_ID (short) 100 /* No Id */
-#define ROUTE_DISCONNECT (ushort) 0xff /* Not connected */
-#define ROUTE_INTERCONNECT (ushort) 0x40 /* Sub-net interconnect */
+#define NO_LINK (short) 5 /* Link unattached */
+#define ROUTE_NO_ID (short) 100 /* No Id */
+#define ROUTE_DISCONNECT (ushort) 0xff /* Not connected */
+#define ROUTE_INTERCONNECT (ushort) 0x40 /* Sub-net interconnect */
#define SYNC_RUP (ushort) 255
@@ -99,10 +99,9 @@ struct ROUTE_STR {
#define STATUS_RUP (ushort) 249
#define POWER_RUP (ushort) 248
-#define HIGHEST_RUP (ushort) 255 /* Set to Top one */
-#define LOWEST_RUP (ushort) 248 /* Set to bottom one */
+#define HIGHEST_RUP (ushort) 255 /* Set to Top one */
+#define LOWEST_RUP (ushort) 248 /* Set to bottom one */
#endif
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/rtahw.h b/drivers/char/rio/rtahw.h
index 06860118cbc..e6c2cdfd3a1 100644
--- a/drivers/char/rio/rtahw.h
+++ b/drivers/char/rio/rtahw.h
@@ -37,7 +37,7 @@
#ifndef lint
#ifdef SCCS_LABELS
-static char *_rio_rtahw_h_sccs = "@(#)rtahw.h 1.5" ;
+static char *_rio_rtahw_h_sccs = "@(#)rtahw.h 1.5";
#endif
#endif
@@ -58,12 +58,12 @@ static char *_rio_rtahw_h_sccs = "@(#)rtahw.h 1.5" ;
** Define the different types of modules we can have
*/
enum module {
- MOD_BLANK = 0x0f, /* Blank plate attached */
- MOD_RS232DB25 = 0x00, /* RS232 DB25 connector */
- MOD_RS232RJ45 = 0x01, /* RS232 RJ45 connector */
- MOD_RS422DB25 = 0x02, /* RS422 DB25 connector */
- MOD_RS485DB25 = 0x03, /* RS485 DB25 connector */
- MOD_PARALLEL = 0x04 /* Centronics parallel */
+ MOD_BLANK = 0x0f, /* Blank plate attached */
+ MOD_RS232DB25 = 0x00, /* RS232 DB25 connector */
+ MOD_RS232RJ45 = 0x01, /* RS232 RJ45 connector */
+ MOD_RS422DB25 = 0x02, /* RS422 DB25 connector */
+ MOD_RS485DB25 = 0x03, /* RS485 DB25 connector */
+ MOD_PARALLEL = 0x04 /* Centronics parallel */
};
#define TYPE_HOST 0
diff --git a/drivers/char/rio/rup.h b/drivers/char/rio/rup.h
index b9d2bc03d14..8d44fec91dd 100644
--- a/drivers/char/rio/rup.h
+++ b/drivers/char/rio/rup.h
@@ -44,39 +44,38 @@
#endif
#if defined( HOST ) || defined( INKERNEL )
-#define MAX_RUP ((short) 16)
+#define MAX_RUP ((short) 16)
#endif
#ifdef RTA
#define MAX_RUP ((short) 1)
#endif
-#define PKTS_PER_RUP ((short) 2) /* They are always used in pairs */
+#define PKTS_PER_RUP ((short) 2) /* They are always used in pairs */
/*************************************************
* Define all the packet request stuff
************************************************/
-#define TX_RUP_INACTIVE 0 /* Nothing to transmit */
-#define TX_PACKET_READY 1 /* Transmit packet ready */
-#define TX_LOCK_RUP 2 /* Transmit side locked */
+#define TX_RUP_INACTIVE 0 /* Nothing to transmit */
+#define TX_PACKET_READY 1 /* Transmit packet ready */
+#define TX_LOCK_RUP 2 /* Transmit side locked */
-#define RX_RUP_INACTIVE 0 /* Nothing received */
-#define RX_PACKET_READY 1 /* Packet received */
+#define RX_RUP_INACTIVE 0 /* Nothing received */
+#define RX_PACKET_READY 1 /* Packet received */
-#define RUP_NO_OWNER 0xff /* RUP not owned by any process */
+#define RUP_NO_OWNER 0xff /* RUP not owned by any process */
struct RUP {
- PKT_ptr txpkt; /* Outgoing packet */
- PKT_ptr rxpkt; /* Incoming packet */
- WORD link; /* Which link to send down? */
- BYTE rup_dest_unit[2]; /* Destination unit */
- WORD handshake; /* For handshaking */
- WORD timeout; /* Timeout */
- WORD status; /* Status */
- WORD txcontrol; /* Transmit control */
- WORD rxcontrol; /* Receive control */
- };
-
+ PKT_ptr txpkt; /* Outgoing packet */
+ PKT_ptr rxpkt; /* Incoming packet */
+ WORD link; /* Which link to send down? */
+ BYTE rup_dest_unit[2]; /* Destination unit */
+ WORD handshake; /* For handshaking */
+ WORD timeout; /* Timeout */
+ WORD status; /* Status */
+ WORD txcontrol; /* Transmit control */
+ WORD rxcontrol; /* Receive control */
+};
+
#endif
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/rupstat.h b/drivers/char/rio/rupstat.h
index b4aafaff0d7..56d828c63d2 100644
--- a/drivers/char/rio/rupstat.h
+++ b/drivers/char/rio/rupstat.h
@@ -39,7 +39,7 @@
#ifndef lint
#ifdef SCCS_LABELS
-static char *_rio_rupstat_h_sccs = "@(#)rupstat.h 1.1" ;
+static char *_rio_rupstat_h_sccs = "@(#)rupstat.h 1.1";
#endif
#endif
@@ -48,4 +48,3 @@ static char *_rio_rupstat_h_sccs = "@(#)rupstat.h 1.1" ;
#define STATUS_TOPOLOGY 2
#endif
-
diff --git a/drivers/char/rio/sam.h b/drivers/char/rio/sam.h
index c1accb8cb62..31494054b21 100644
--- a/drivers/char/rio/sam.h
+++ b/drivers/char/rio/sam.h
@@ -60,15 +60,12 @@
#define RX FALSE
-typedef struct FREE_LIST FREE_LIST ;
-struct FREE_LIST {
- FREE_LIST_ptr next ;
- FREE_LIST_ptr prev ;
- } ;
+typedef struct FREE_LIST FREE_LIST;
+struct FREE_LIST {
+ FREE_LIST_ptr next;
+ FREE_LIST_ptr prev;
+};
#endif
/*********** end of file ***********/
-
-
-
diff --git a/drivers/char/rio/selftest.h b/drivers/char/rio/selftest.h
index deae48722a2..7a3dba35232 100644
--- a/drivers/char/rio/selftest.h
+++ b/drivers/char/rio/selftest.h
@@ -38,36 +38,36 @@
** selftest on a booting RTA.
*/
typedef struct {
- short magic; /* Identifies packet type */
- int test; /* Test number, see below */
- unsigned int result; /* Result value */
- unsigned int dataIn;
- unsigned int dataOut;
-}selftestStruct;
+ short magic; /* Identifies packet type */
+ int test; /* Test number, see below */
+ unsigned int result; /* Result value */
+ unsigned int dataIn;
+ unsigned int dataOut;
+} selftestStruct;
/*
** The different tests are identified by the following data values.
*/
enum test {
- TESTS_COMPLETE = 0x00,
- MEMTEST_ADDR = 0x01,
- MEMTEST_BIT = 0x02,
- MEMTEST_FILL = 0x03,
- MEMTEST_DATABUS = 0x04,
- MEMTEST_ADDRBUS = 0x05,
- CD1400_INIT = 0x10,
- CD1400_LOOP = 0x11,
- CD1400_INTERRUPT = 0x12
+ TESTS_COMPLETE = 0x00,
+ MEMTEST_ADDR = 0x01,
+ MEMTEST_BIT = 0x02,
+ MEMTEST_FILL = 0x03,
+ MEMTEST_DATABUS = 0x04,
+ MEMTEST_ADDRBUS = 0x05,
+ CD1400_INIT = 0x10,
+ CD1400_LOOP = 0x11,
+ CD1400_INTERRUPT = 0x12
};
enum result {
- E_PORT = 0x10,
- E_TX = 0x11,
- E_RX = 0x12,
- E_EXCEPT = 0x13,
- E_COMPARE = 0x14,
- E_MODEM = 0x15,
- E_TIMEOUT = 0x16,
- E_INTERRUPT = 0x17
+ E_PORT = 0x10,
+ E_TX = 0x11,
+ E_RX = 0x12,
+ E_EXCEPT = 0x13,
+ E_COMPARE = 0x14,
+ E_MODEM = 0x15,
+ E_TIMEOUT = 0x16,
+ E_INTERRUPT = 0x17
};
-#endif /* _selftests_h_ */
+#endif /* _selftests_h_ */
diff --git a/drivers/char/rio/space.h b/drivers/char/rio/space.h
index 72398d34205..1f12690f9d1 100644
--- a/drivers/char/rio/space.h
+++ b/drivers/char/rio/space.h
@@ -42,4 +42,4 @@ extern int rio_bases[];
extern int rio_limits[];
extern int rio_vects[];
-#endif /* __rio_space_h__ */
+#endif /* __rio_space_h__ */
diff --git a/drivers/char/rio/sysmap.h b/drivers/char/rio/sysmap.h
index fdc73139357..e1c6f1160df 100644
--- a/drivers/char/rio/sysmap.h
+++ b/drivers/char/rio/sysmap.h
@@ -37,27 +37,26 @@
#ifndef lint
#ifdef SCCS_LABELS
-static char *_rio_sysmap_h_sccs = "@(#)sysmap.h 1.1" ;
+static char *_rio_sysmap_h_sccs = "@(#)sysmap.h 1.1";
#endif
#endif
-#define SYSTEM_MAP_LEN 64 /* Len of System Map array */
+#define SYSTEM_MAP_LEN 64 /* Len of System Map array */
-typedef struct SYS_MAP SYS_MAP ;
-typedef struct SYS_MAP_LINK SYS_MAP_LINK ;
+typedef struct SYS_MAP SYS_MAP;
+typedef struct SYS_MAP_LINK SYS_MAP_LINK;
struct SYS_MAP_LINK {
- short id ; /* Unit Id */
- short link ; /* Id's Link */
- short been_here ; /* Used by map_gen */
- } ;
+ short id; /* Unit Id */
+ short link; /* Id's Link */
+ short been_here; /* Used by map_gen */
+};
struct SYS_MAP {
- char serial_num[4] ;
- SYS_MAP_LINK link[4] ;
- } ;
+ char serial_num[4];
+ SYS_MAP_LINK link[4];
+};
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/timeouts.h b/drivers/char/rio/timeouts.h
index 11b31330c90..a8b5be3ca9b 100644
--- a/drivers/char/rio/timeouts.h
+++ b/drivers/char/rio/timeouts.h
@@ -37,15 +37,14 @@
#ifndef lint
#ifdef SCCS_LABELS
-static char *_rio_defaults_h_sccs = "@(#)timeouts.h 1.3" ;
+static char *_rio_defaults_h_sccs = "@(#)timeouts.h 1.3";
#endif
#endif
-#define MILLISECOND (int) (1000/64) /* 15.625 low ticks */
-#define SECOND (int) 15625 /* Low priority ticks */
+#define MILLISECOND (int) (1000/64) /* 15.625 low ticks */
+#define SECOND (int) 15625 /* Low priority ticks */
#define TX_TIMEOUT (int) (200 * MILLISECOND)
/*********** end of file ***********/
-
diff --git a/drivers/char/rio/top.h b/drivers/char/rio/top.h
index 255c40d463a..d15a11dc4f7 100644
--- a/drivers/char/rio/top.h
+++ b/drivers/char/rio/top.h
@@ -40,10 +40,9 @@ static char *_top_h_sccs_ = "@(#)top.h 1.2";
/*
** Topology information
*/
-struct Top
-{
- uchar Unit;
- uchar Link;
+struct Top {
+ uchar Unit;
+ uchar Link;
};
-#endif /* __rio_top_h__ */
+#endif /* __rio_top_h__ */
diff --git a/drivers/char/rio/typdef.h b/drivers/char/rio/typdef.h
index 2cb9dd693fa..185b889e151 100644
--- a/drivers/char/rio/typdef.h
+++ b/drivers/char/rio/typdef.h
@@ -45,11 +45,11 @@ static char *_typdef_h_sccs_ = "@(#)typdef.h 1.2";
** These types are ONLY to be used for refering to data structures
** on the RIO Host card!
*/
-typedef volatile unsigned char BYTE;
-typedef volatile unsigned short WORD;
-typedef volatile unsigned int DWORD;
-typedef volatile unsigned short RIOP;
-typedef volatile short NUMBER;
+typedef volatile unsigned char BYTE;
+typedef volatile unsigned short WORD;
+typedef volatile unsigned int DWORD;
+typedef volatile unsigned short RIOP;
+typedef volatile short NUMBER;
/*
@@ -59,13 +59,13 @@ typedef volatile short NUMBER;
** are here only to make the source compile.
*/
/* typedef unsigned int uint; */
-typedef unsigned long ulong_t;
-typedef unsigned short ushort_t;
-typedef unsigned char uchar_t;
-typedef unsigned char queue_t;
-typedef unsigned char mblk_t;
-typedef unsigned int paddr_t;
-typedef unsigned char uchar;
+typedef unsigned long ulong_t;
+typedef unsigned short ushort_t;
+typedef unsigned char uchar_t;
+typedef unsigned char queue_t;
+typedef unsigned char mblk_t;
+typedef unsigned int paddr_t;
+typedef unsigned char uchar;
#define TPNULL ((ushort)(0x8000))
@@ -73,10 +73,10 @@ typedef unsigned char uchar;
/*
** RIO structures defined in other include files.
*/
-typedef struct PKT PKT;
-typedef struct LPB LPB;
-typedef struct RUP RUP;
-typedef struct Port Port;
-typedef struct DpRam DpRam;
+typedef struct PKT PKT;
+typedef struct LPB LPB;
+typedef struct RUP RUP;
+typedef struct Port Port;
+typedef struct DpRam DpRam;
-#endif /* __rio_typdef_h__ */
+#endif /* __rio_typdef_h__ */
diff --git a/drivers/char/rio/unixrup.h b/drivers/char/rio/unixrup.h
index eddf86278ab..a126c7cabac 100644
--- a/drivers/char/rio/unixrup.h
+++ b/drivers/char/rio/unixrup.h
@@ -41,16 +41,15 @@ static char *_unixrup_h_sccs_ = "@(#)unixrup.h 1.2";
** UnixRup data structure. This contains pointers to actual RUPs on the
** host card, and all the command/boot control stuff.
*/
-struct UnixRup
-{
- struct CmdBlk *CmdsWaitingP; /* Commands waiting to be done */
- struct CmdBlk *CmdPendingP; /* The command currently being sent */
- struct RUP *RupP; /* the Rup to send it to */
- uint Id; /* Id number */
- uint BaseSysPort; /* SysPort of first tty on this RTA */
- uint ModTypes; /* Modules on this RTA */
- spinlock_t RupLock; /* Lock structure for MPX */
-/* struct lockb RupLock; */ /* Lock structure for MPX */
+struct UnixRup {
+ struct CmdBlk *CmdsWaitingP; /* Commands waiting to be done */
+ struct CmdBlk *CmdPendingP; /* The command currently being sent */
+ struct RUP *RupP; /* the Rup to send it to */
+ uint Id; /* Id number */
+ uint BaseSysPort; /* SysPort of first tty on this RTA */
+ uint ModTypes; /* Modules on this RTA */
+ spinlock_t RupLock; /* Lock structure for MPX */
+ /* struct lockb RupLock; *//* Lock structure for MPX */
};
-#endif /* __rio_unixrup_h__ */
+#endif /* __rio_unixrup_h__ */
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 55a3a0188ed..050e70ee592 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -46,6 +46,7 @@
#include <linux/major.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/tty_flip.h>
#include <asm/uaccess.h>
@@ -107,15 +108,15 @@ static struct riscom_port rc_port[RC_NBOARD * RC_NPORT];
/* RISCom/8 I/O ports addresses (without address translation) */
static unsigned short rc_ioport[] = {
-#if 1
+#if 1
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x09, 0x0a, 0x0b, 0x0c,
-#else
+#else
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x09, 0x0a, 0x0b, 0x0c, 0x10,
0x11, 0x12, 0x18, 0x28, 0x31, 0x32, 0x39, 0x3a, 0x40, 0x41, 0x61, 0x62,
0x63, 0x64, 0x6b, 0x70, 0x71, 0x78, 0x7a, 0x7b, 0x7f, 0x100, 0x101
-#endif
+#endif
};
-#define RC_NIOPORT (sizeof(rc_ioport) / sizeof(rc_ioport[0]))
+#define RC_NIOPORT ARRAY_SIZE(rc_ioport)
static inline int rc_paranoia_check(struct riscom_port const * port,
@@ -354,28 +355,17 @@ static inline void rc_receive_exc(struct riscom_board const * bp)
struct riscom_port *port;
struct tty_struct *tty;
unsigned char status;
- unsigned char ch;
+ unsigned char ch, flag;
if (!(port = rc_get_port(bp, "Receive")))
return;
tty = port->tty;
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- printk(KERN_WARNING "rc%d: port %d: Working around flip "
- "buffer overflow.\n",
- board_No(bp), port_No(port));
- return;
- }
#ifdef RC_REPORT_OVERRUN
status = rc_in(bp, CD180_RCSR);
- if (status & RCSR_OE) {
+ if (status & RCSR_OE)
port->overrun++;
-#if 0
- printk(KERN_ERR "rc%d: port %d: Overrun. Total %ld overruns\n",
- board_No(bp), port_No(port), port->overrun);
-#endif
- }
status &= port->mark_mask;
#else
status = rc_in(bp, CD180_RCSR) & port->mark_mask;
@@ -393,25 +383,24 @@ static inline void rc_receive_exc(struct riscom_board const * bp)
} else if (status & RCSR_BREAK) {
printk(KERN_INFO "rc%d: port %d: Handling break...\n",
board_No(bp), port_No(port));
- *tty->flip.flag_buf_ptr++ = TTY_BREAK;
+ flag = TTY_BREAK;
if (port->flags & ASYNC_SAK)
do_SAK(tty);
} else if (status & RCSR_PE)
- *tty->flip.flag_buf_ptr++ = TTY_PARITY;
+ flag = TTY_PARITY;
else if (status & RCSR_FE)
- *tty->flip.flag_buf_ptr++ = TTY_FRAME;
+ flag = TTY_FRAME;
else if (status & RCSR_OE)
- *tty->flip.flag_buf_ptr++ = TTY_OVERRUN;
+ flag = TTY_OVERRUN;
else
- *tty->flip.flag_buf_ptr++ = 0;
+ flag = TTY_NORMAL;
- *tty->flip.char_buf_ptr++ = ch;
- tty->flip.count++;
- schedule_delayed_work(&tty->flip.work, 1);
+ tty_insert_flip_char(tty, ch, flag);
+ tty_flip_buffer_push(tty);
}
static inline void rc_receive(struct riscom_board const * bp)
@@ -432,17 +421,15 @@ static inline void rc_receive(struct riscom_board const * bp)
#endif
while (count--) {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
+ if (tty_buffer_request_room(tty, 1) == 0) {
printk(KERN_WARNING "rc%d: port %d: Working around "
"flip buffer overflow.\n",
board_No(bp), port_No(port));
break;
}
- *tty->flip.char_buf_ptr++ = rc_in(bp, CD180_RDR);
- *tty->flip.flag_buf_ptr++ = 0;
- tty->flip.count++;
+ tty_insert_flip_char(tty, rc_in(bp, CD180_RDR), TTY_NORMAL);
}
- schedule_delayed_work(&tty->flip.work, 1);
+ tty_flip_buffer_push(tty);
}
static inline void rc_transmit(struct riscom_board const * bp)
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index d3bc731fbb2..0949dcef069 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -325,19 +325,16 @@ static void rp_do_receive(struct r_port *info,
{
unsigned int CharNStat;
int ToRecv, wRecv, space = 0, count;
- unsigned char *cbuf;
- char *fbuf;
+ unsigned char *cbuf, *chead;
+ char *fbuf, *fhead;
struct tty_ldisc *ld;
ld = tty_ldisc_ref(tty);
ToRecv = sGetRxCnt(cp);
- if (ld)
- space = ld->receive_room(tty);
+ space = tty->receive_room;
if (space > 2 * TTY_FLIPBUF_SIZE)
space = 2 * TTY_FLIPBUF_SIZE;
- cbuf = tty->flip.char_buf;
- fbuf = tty->flip.flag_buf;
count = 0;
#ifdef ROCKET_DEBUG_INTR
printk(KERN_INFO "rp_do_receive(%d, %d)...", ToRecv, space);
@@ -350,9 +347,13 @@ static void rp_do_receive(struct r_port *info,
if (ToRecv > space)
ToRecv = space;
+ ToRecv = tty_prepare_flip_string_flags(tty, &chead, &fhead, ToRecv);
if (ToRecv <= 0)
goto done;
+ cbuf = chead;
+ fbuf = fhead;
+
/*
* if status indicates there are errored characters in the
* FIFO, then enter status mode (a word in FIFO holds
@@ -399,7 +400,7 @@ static void rp_do_receive(struct r_port *info,
else if (CharNStat & STMRCVROVRH)
*fbuf++ = TTY_OVERRUN;
else
- *fbuf++ = 0;
+ *fbuf++ = TTY_NORMAL;
*cbuf++ = CharNStat & 0xff;
count++;
ToRecv--;
@@ -426,13 +427,13 @@ static void rp_do_receive(struct r_port *info,
sInStrW(sGetTxRxDataIO(cp), (unsigned short *) cbuf, wRecv);
if (ToRecv & 1)
cbuf[ToRecv - 1] = sInB(sGetTxRxDataIO(cp));
- memset(fbuf, 0, ToRecv);
+ memset(fbuf, TTY_NORMAL, ToRecv);
cbuf += ToRecv;
fbuf += ToRecv;
count += ToRecv;
}
/* Push the data up to the tty layer */
- ld->receive_buf(tty, tty->flip.char_buf, tty->flip.flag_buf, count);
+ ld->receive_buf(tty, cbuf, fbuf, count);
done:
tty_ldisc_deref(ld);
}
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index a7f099fb7df..7cac6d05d72 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -46,10 +46,10 @@
* 1.11a Daniele Bellucci: Audit create_proc_read_entry in rtc_init
* 1.12 Venkatesh Pallipadi: Hooks for emulating rtc on HPET base-timer
* CONFIG_HPET_EMULATE_RTC
- *
+ * 1.12ac Alan Cox: Allow read access to the day of week register
*/
-#define RTC_VERSION "1.12"
+#define RTC_VERSION "1.12ac"
#define RTC_IO_EXTENT 0x8
@@ -1250,9 +1250,9 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
/*
* Only the values that we read from the RTC are set. We leave
- * tm_wday, tm_yday and tm_isdst untouched. Even though the
- * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
- * by the RTC when initially set to a non-zero value.
+ * tm_wday, tm_yday and tm_isdst untouched. Note that while the
+ * RTC has RTC_DAY_OF_WEEK, we should usually ignore it, as it is
+ * only updated by the RTC when initially set to a non-zero value.
*/
spin_lock_irq(&rtc_lock);
rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
@@ -1261,6 +1261,9 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
+ /* Only set from 2.6.16 onwards */
+ rtc_tm->tm_wday = CMOS_READ(RTC_DAY_OF_WEEK);
+
#ifdef CONFIG_MACH_DECSTATION
real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
@@ -1275,6 +1278,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
BCD_TO_BIN(rtc_tm->tm_mday);
BCD_TO_BIN(rtc_tm->tm_mon);
BCD_TO_BIN(rtc_tm->tm_year);
+ BCD_TO_BIN(rtc_tm->tm_wday);
}
#ifdef CONFIG_MACH_DECSTATION
diff --git a/drivers/char/s3c2410-rtc.c b/drivers/char/s3c2410-rtc.c
index 3df7a574267..2e308657f6f 100644
--- a/drivers/char/s3c2410-rtc.c
+++ b/drivers/char/s3c2410-rtc.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
+#include <linux/clk.h>
#include <asm/hardware.h>
#include <asm/uaccess.h>
@@ -33,7 +34,6 @@
#include <asm/mach/time.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/regs-rtc.h>
/* need this for the RTC_AF definitions */
diff --git a/drivers/char/scc.h b/drivers/char/scc.h
index 51810f72f1a..93998f5baff 100644
--- a/drivers/char/scc.h
+++ b/drivers/char/scc.h
@@ -399,7 +399,7 @@ struct scc_port {
__asm__ __volatile__ ( "tstb %0" : : "g" (*_scc_del) : "cc" );\
} while (0)
-extern unsigned char scc_shadow[2][16];
+static unsigned char scc_shadow[2][16];
/* The following functions should relax the somehow complicated
* register access of the SCC. _SCCwrite() stores all written values
diff --git a/drivers/char/selection.c b/drivers/char/selection.c
index 5b187c895c1..71093a9fc46 100644
--- a/drivers/char/selection.c
+++ b/drivers/char/selection.c
@@ -275,7 +275,8 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t
int paste_selection(struct tty_struct *tty)
{
struct vc_data *vc = (struct vc_data *)tty->driver_data;
- int pasted = 0, count;
+ int pasted = 0;
+ unsigned int count;
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
@@ -293,7 +294,7 @@ int paste_selection(struct tty_struct *tty)
continue;
}
count = sel_buffer_lth - pasted;
- count = min(count, tty->ldisc.receive_room(tty));
+ count = min(count, tty->receive_room);
tty->ldisc.receive_buf(tty, sel_buffer + pasted, NULL, count);
pasted += count;
}
diff --git a/drivers/char/ser_a2232.c b/drivers/char/ser_a2232.c
index dda30e42ec7..80a5b840e22 100644
--- a/drivers/char/ser_a2232.c
+++ b/drivers/char/ser_a2232.c
@@ -194,11 +194,6 @@ static inline void a2232_receive_char(struct a2232_port *port, int ch, int err)
*/
struct tty_struct *tty = port->gs.tty;
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- return;
-
- tty->flip.count++;
-
#if 0
switch(err) {
case TTY_BREAK:
@@ -212,8 +207,7 @@ static inline void a2232_receive_char(struct a2232_port *port, int ch, int err)
}
#endif
- *tty->flip.flag_buf_ptr++ = err;
- *tty->flip.char_buf_ptr++ = ch;
+ tty_insert_flip_char(tty, ch, err);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index c2deac968bd..f36342ae8e7 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -117,7 +117,7 @@ struct cyclades_port cy_port[] = {
{-1 }, /* ttyS2 */
{-1 }, /* ttyS3 */
};
-#define NR_PORTS (sizeof(cy_port)/sizeof(struct cyclades_port))
+#define NR_PORTS ARRAY_SIZE(cy_port)
/*
* tmp_buf is used as a temporary buffer by serial_write. We need to
@@ -422,45 +422,35 @@ cd2401_rxerr_interrupt(int irq, void *dev_id, struct pt_regs *fp)
base_addr[CyREOIR] = rfoc ? 0 : CyNOTRANS;
return IRQ_HANDLED;
}
- if (tty->flip.count < TTY_FLIPBUF_SIZE){
- tty->flip.count++;
+ if (tty_buffer_request_room(tty, 1) != 0){
if (err & info->read_status_mask){
if(err & CyBREAK){
- *tty->flip.flag_buf_ptr++ = TTY_BREAK;
- *tty->flip.char_buf_ptr++ = data;
+ tty_insert_flip_char(tty, data, TTY_BREAK);
if (info->flags & ASYNC_SAK){
do_SAK(tty);
}
}else if(err & CyFRAME){
- *tty->flip.flag_buf_ptr++ = TTY_FRAME;
- *tty->flip.char_buf_ptr++ = data;
+ tty_insert_flip_char(tty, data, TTY_FRAME);
}else if(err & CyPARITY){
- *tty->flip.flag_buf_ptr++ = TTY_PARITY;
- *tty->flip.char_buf_ptr++ = data;
+ tty_insert_flip_char(tty, data, TTY_PARITY);
}else if(err & CyOVERRUN){
- *tty->flip.flag_buf_ptr++ = TTY_OVERRUN;
- *tty->flip.char_buf_ptr++ = 0;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
/*
If the flip buffer itself is
overflowing, we still loose
the next incoming character.
*/
- if(tty->flip.count < TTY_FLIPBUF_SIZE){
- tty->flip.count++;
- *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
- *tty->flip.char_buf_ptr++ = data;
- }
+ tty_insert_flip_char(tty, data, TTY_NORMAL);
+ }
/* These two conditions may imply */
/* a normal read should be done. */
/* else if(data & CyTIMEOUT) */
/* else if(data & CySPECHAR) */
}else{
- *tty->flip.flag_buf_ptr++ = 0;
- *tty->flip.char_buf_ptr++ = 0;
+ tty_insert_flip_char(tty, 0, TTY_NORMAL);
}
}else{
- *tty->flip.flag_buf_ptr++ = 0;
- *tty->flip.char_buf_ptr++ = 0;
+ tty_insert_flip_char(tty, data, TTY_NORMAL);
}
}else{
/* there was a software buffer overrun
@@ -692,12 +682,7 @@ cd2401_rx_interrupt(int irq, void *dev_id, struct pt_regs *fp)
#endif
while(char_count--){
data = base_addr[CyRDR];
- if (tty->flip.count >= TTY_FLIPBUF_SIZE){
- continue;
- }
- tty->flip.count++;
- *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
- *tty->flip.char_buf_ptr++ = data;
+ tty_insert_flip_char(tty, data, TTY_NORMAL);
#ifdef CYCLOM_16Y_HACK
udelay(10L);
#endif
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 51a07370e63..f8dd8527c6a 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -471,7 +471,6 @@ struct sonypi_keypress {
static struct sonypi_device {
struct pci_dev *dev;
- struct platform_device *pdev;
u16 irq;
u16 bits;
u16 ioport1;
@@ -511,6 +510,11 @@ static struct sonypi_device {
#define SONYPI_ACPI_ACTIVE 0
#endif /* CONFIG_ACPI */
+#ifdef CONFIG_ACPI
+static struct acpi_device *sonypi_acpi_device;
+static int acpi_enabled;
+#endif
+
static int sonypi_ec_write(u8 addr, u8 value)
{
#ifdef CONFIG_ACPI_EC
@@ -864,6 +868,11 @@ found:
if (useinput)
sonypi_report_input_event(event);
+#ifdef CONFIG_ACPI
+ if (acpi_enabled)
+ acpi_bus_generate_event(sonypi_acpi_device, 1, event);
+#endif
+
kfifo_put(sonypi_device.fifo, (unsigned char *)&event, sizeof(event));
kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN);
wake_up_interruptible(&sonypi_device.fifo_proc_list);
@@ -1165,45 +1174,38 @@ static int sonypi_disable(void)
return 0;
}
-#ifdef CONFIG_PM
-static int old_camera_power;
-
-static int sonypi_suspend(struct platform_device *dev, pm_message_t state)
+#ifdef CONFIG_ACPI
+static int sonypi_acpi_add(struct acpi_device *device)
{
- old_camera_power = sonypi_device.camera_power;
- sonypi_disable();
-
+ sonypi_acpi_device = device;
+ strcpy(acpi_device_name(device), "Sony laptop hotkeys");
+ strcpy(acpi_device_class(device), "sony/hotkey");
return 0;
}
-static int sonypi_resume(struct platform_device *dev)
+static int sonypi_acpi_remove(struct acpi_device *device, int type)
{
- sonypi_enable(old_camera_power);
+ sonypi_acpi_device = NULL;
return 0;
}
-#endif
-
-static void sonypi_shutdown(struct platform_device *dev)
-{
- sonypi_disable();
-}
-static struct platform_driver sonypi_driver = {
-#ifdef CONFIG_PM
- .suspend = sonypi_suspend,
- .resume = sonypi_resume,
-#endif
- .shutdown = sonypi_shutdown,
- .driver = {
- .name = "sonypi",
+static struct acpi_driver sonypi_acpi_driver = {
+ .name = "sonypi",
+ .class = "hkey",
+ .ids = "SNY6001",
+ .ops = {
+ .add = sonypi_acpi_add,
+ .remove = sonypi_acpi_remove,
},
};
+#endif
static int __devinit sonypi_create_input_devices(void)
{
struct input_dev *jog_dev;
struct input_dev *key_dev;
int i;
+ int error;
sonypi_device.input_jog_dev = jog_dev = input_allocate_device();
if (!jog_dev)
@@ -1219,9 +1221,8 @@ static int __devinit sonypi_create_input_devices(void)
sonypi_device.input_key_dev = key_dev = input_allocate_device();
if (!key_dev) {
- input_free_device(jog_dev);
- sonypi_device.input_jog_dev = NULL;
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err_free_jogdev;
}
key_dev->name = "Sony Vaio Keys";
@@ -1234,56 +1235,122 @@ static int __devinit sonypi_create_input_devices(void)
if (sonypi_inputkeys[i].inputev)
set_bit(sonypi_inputkeys[i].inputev, key_dev->keybit);
- input_register_device(jog_dev);
- input_register_device(key_dev);
+ error = input_register_device(jog_dev);
+ if (error)
+ goto err_free_keydev;
+
+ error = input_register_device(key_dev);
+ if (error)
+ goto err_unregister_jogdev;
return 0;
+
+ err_unregister_jogdev:
+ input_unregister_device(jog_dev);
+ /* Set to NULL so we don't free it again below */
+ jog_dev = NULL;
+ err_free_keydev:
+ input_free_device(key_dev);
+ sonypi_device.input_key_dev = NULL;
+ err_free_jogdev:
+ input_free_device(jog_dev);
+ sonypi_device.input_jog_dev = NULL;
+
+ return error;
}
-static int __devinit sonypi_probe(void)
+static int __devinit sonypi_setup_ioports(struct sonypi_device *dev,
+ const struct sonypi_ioport_list *ioport_list)
{
- int i, ret;
- struct sonypi_ioport_list *ioport_list;
- struct sonypi_irq_list *irq_list;
- struct pci_dev *pcidev;
+ while (ioport_list->port1) {
- if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82371AB_3, NULL)))
- sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1;
- else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_ICH6_1, NULL)))
- sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
- else
- sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2;
+ if (request_region(ioport_list->port1,
+ sonypi_device.region_size,
+ "Sony Programable I/O Device")) {
+ dev->ioport1 = ioport_list->port1;
+ dev->ioport2 = ioport_list->port2;
+ return 0;
+ }
+ ioport_list++;
+ }
- sonypi_device.dev = pcidev;
+ return -EBUSY;
+}
+
+static int __devinit sonypi_setup_irq(struct sonypi_device *dev,
+ const struct sonypi_irq_list *irq_list)
+{
+ while (irq_list->irq) {
+
+ if (!request_irq(irq_list->irq, sonypi_irq,
+ SA_SHIRQ, "sonypi", sonypi_irq)) {
+ dev->irq = irq_list->irq;
+ dev->bits = irq_list->bits;
+ return 0;
+ }
+ irq_list++;
+ }
+
+ return -EBUSY;
+}
+
+static void __devinit sonypi_display_info(void)
+{
+ printk(KERN_INFO "sonypi: detected type%d model, "
+ "verbose = %d, fnkeyinit = %s, camera = %s, "
+ "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
+ sonypi_device.model,
+ verbose,
+ fnkeyinit ? "on" : "off",
+ camera ? "on" : "off",
+ compat ? "on" : "off",
+ mask,
+ useinput ? "on" : "off",
+ SONYPI_ACPI_ACTIVE ? "on" : "off");
+ printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n",
+ sonypi_device.irq,
+ sonypi_device.ioport1, sonypi_device.ioport2);
+
+ if (minor == -1)
+ printk(KERN_INFO "sonypi: device allocated minor is %d\n",
+ sonypi_misc_device.minor);
+}
+
+static int __devinit sonypi_probe(struct platform_device *dev)
+{
+ const struct sonypi_ioport_list *ioport_list;
+ const struct sonypi_irq_list *irq_list;
+ struct pci_dev *pcidev;
+ int error;
spin_lock_init(&sonypi_device.fifo_lock);
sonypi_device.fifo = kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL,
&sonypi_device.fifo_lock);
if (IS_ERR(sonypi_device.fifo)) {
printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
- ret = PTR_ERR(sonypi_device.fifo);
- goto out_fifo;
+ return PTR_ERR(sonypi_device.fifo);
}
init_waitqueue_head(&sonypi_device.fifo_proc_list);
init_MUTEX(&sonypi_device.lock);
sonypi_device.bluetooth_power = -1;
+ if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_3, NULL)))
+ sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1;
+ else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH6_1, NULL)))
+ sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
+ else
+ sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2;
+
if (pcidev && pci_enable_device(pcidev)) {
printk(KERN_ERR "sonypi: pci_enable_device failed\n");
- ret = -EIO;
- goto out_pcienable;
- }
-
- if (minor != -1)
- sonypi_misc_device.minor = minor;
- if ((ret = misc_register(&sonypi_misc_device))) {
- printk(KERN_ERR "sonypi: misc_register failed\n");
- goto out_miscreg;
+ error = -EIO;
+ goto err_put_pcidev;
}
+ sonypi_device.dev = pcidev;
if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) {
ioport_list = sonypi_type1_ioport_list;
@@ -1302,43 +1369,36 @@ static int __devinit sonypi_probe(void)
irq_list = sonypi_type3_irq_list;
}
- for (i = 0; ioport_list[i].port1; i++) {
- if (request_region(ioport_list[i].port1,
- sonypi_device.region_size,
- "Sony Programable I/O Device")) {
- /* get the ioport */
- sonypi_device.ioport1 = ioport_list[i].port1;
- sonypi_device.ioport2 = ioport_list[i].port2;
- break;
- }
- }
- if (!sonypi_device.ioport1) {
- printk(KERN_ERR "sonypi: request_region failed\n");
- ret = -ENODEV;
- goto out_reqreg;
+ error = sonypi_setup_ioports(&sonypi_device, ioport_list);
+ if (error) {
+ printk(KERN_ERR "sonypi: failed to request ioports\n");
+ goto err_disable_pcidev;
}
- for (i = 0; irq_list[i].irq; i++) {
-
- sonypi_device.irq = irq_list[i].irq;
- sonypi_device.bits = irq_list[i].bits;
-
- if (!request_irq(sonypi_device.irq, sonypi_irq,
- SA_SHIRQ, "sonypi", sonypi_irq))
- break;
+ error = sonypi_setup_irq(&sonypi_device, irq_list);
+ if (error) {
+ printk(KERN_ERR "sonypi: request_irq failed\n");
+ goto err_free_ioports;
}
- if (!irq_list[i].irq) {
- printk(KERN_ERR "sonypi: request_irq failed\n");
- ret = -ENODEV;
- goto out_reqirq;
+ if (minor != -1)
+ sonypi_misc_device.minor = minor;
+ error = misc_register(&sonypi_misc_device);
+ if (error) {
+ printk(KERN_ERR "sonypi: misc_register failed\n");
+ goto err_free_irq;
}
+ sonypi_display_info();
+
if (useinput) {
- ret = sonypi_create_input_devices();
- if (ret)
- goto out_inputdevices;
+ error = sonypi_create_input_devices();
+ if (error) {
+ printk(KERN_ERR
+ "sonypi: failed to create input devices\n");
+ goto err_miscdev_unregister;
+ }
spin_lock_init(&sonypi_device.input_fifo_lock);
sonypi_device.input_fifo =
@@ -1346,91 +1406,104 @@ static int __devinit sonypi_probe(void)
&sonypi_device.input_fifo_lock);
if (IS_ERR(sonypi_device.input_fifo)) {
printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
- ret = PTR_ERR(sonypi_device.input_fifo);
- goto out_infifo;
+ error = PTR_ERR(sonypi_device.input_fifo);
+ goto err_inpdev_unregister;
}
INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL);
}
- sonypi_device.pdev = platform_device_register_simple("sonypi", -1,
- NULL, 0);
- if (IS_ERR(sonypi_device.pdev)) {
- ret = PTR_ERR(sonypi_device.pdev);
- goto out_platformdev;
- }
-
sonypi_enable(0);
- printk(KERN_INFO "sonypi: Sony Programmable I/O Controller Driver"
- "v%s.\n", SONYPI_DRIVER_VERSION);
- printk(KERN_INFO "sonypi: detected type%d model, "
- "verbose = %d, fnkeyinit = %s, camera = %s, "
- "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
- sonypi_device.model,
- verbose,
- fnkeyinit ? "on" : "off",
- camera ? "on" : "off",
- compat ? "on" : "off",
- mask,
- useinput ? "on" : "off",
- SONYPI_ACPI_ACTIVE ? "on" : "off");
- printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n",
- sonypi_device.irq,
- sonypi_device.ioport1, sonypi_device.ioport2);
-
- if (minor == -1)
- printk(KERN_INFO "sonypi: device allocated minor is %d\n",
- sonypi_misc_device.minor);
-
return 0;
-out_platformdev:
- kfifo_free(sonypi_device.input_fifo);
-out_infifo:
+ err_inpdev_unregister:
input_unregister_device(sonypi_device.input_key_dev);
input_unregister_device(sonypi_device.input_jog_dev);
-out_inputdevices:
+ err_miscdev_unregister:
+ misc_deregister(&sonypi_misc_device);
+ err_free_irq:
free_irq(sonypi_device.irq, sonypi_irq);
-out_reqirq:
+ err_free_ioports:
release_region(sonypi_device.ioport1, sonypi_device.region_size);
-out_reqreg:
- misc_deregister(&sonypi_misc_device);
-out_miscreg:
+ err_disable_pcidev:
if (pcidev)
pci_disable_device(pcidev);
-out_pcienable:
+ err_put_pcidev:
+ pci_dev_put(pcidev);
kfifo_free(sonypi_device.fifo);
-out_fifo:
- pci_dev_put(sonypi_device.dev);
- return ret;
+
+ return error;
}
-static void __devexit sonypi_remove(void)
+static int __devexit sonypi_remove(struct platform_device *dev)
{
sonypi_disable();
synchronize_sched(); /* Allow sonypi interrupt to complete. */
flush_scheduled_work();
- platform_device_unregister(sonypi_device.pdev);
-
if (useinput) {
input_unregister_device(sonypi_device.input_key_dev);
input_unregister_device(sonypi_device.input_jog_dev);
kfifo_free(sonypi_device.input_fifo);
}
+ misc_deregister(&sonypi_misc_device);
+
free_irq(sonypi_device.irq, sonypi_irq);
release_region(sonypi_device.ioport1, sonypi_device.region_size);
- misc_deregister(&sonypi_misc_device);
- if (sonypi_device.dev)
+
+ if (sonypi_device.dev) {
pci_disable_device(sonypi_device.dev);
+ pci_dev_put(sonypi_device.dev);
+ }
+
kfifo_free(sonypi_device.fifo);
- pci_dev_put(sonypi_device.dev);
- printk(KERN_INFO "sonypi: removed.\n");
+
+ return 0;
}
+#ifdef CONFIG_PM
+static int old_camera_power;
+
+static int sonypi_suspend(struct platform_device *dev, pm_message_t state)
+{
+ old_camera_power = sonypi_device.camera_power;
+ sonypi_disable();
+
+ return 0;
+}
+
+static int sonypi_resume(struct platform_device *dev)
+{
+ sonypi_enable(old_camera_power);
+ return 0;
+}
+#else
+#define sonypi_suspend NULL
+#define sonypi_resume NULL
+#endif
+
+static void sonypi_shutdown(struct platform_device *dev)
+{
+ sonypi_disable();
+}
+
+static struct platform_driver sonypi_driver = {
+ .driver = {
+ .name = "sonypi",
+ .owner = THIS_MODULE,
+ },
+ .probe = sonypi_probe,
+ .remove = __devexit_p(sonypi_remove),
+ .shutdown = sonypi_shutdown,
+ .suspend = sonypi_suspend,
+ .resume = sonypi_resume,
+};
+
+static struct platform_device *sonypi_platform_device;
+
static struct dmi_system_id __initdata sonypi_dmi_table[] = {
{
.ident = "Sony Vaio",
@@ -1451,26 +1524,52 @@ static struct dmi_system_id __initdata sonypi_dmi_table[] = {
static int __init sonypi_init(void)
{
- int ret;
+ int error;
+
+ printk(KERN_INFO
+ "sonypi: Sony Programmable I/O Controller Driver v%s.\n",
+ SONYPI_DRIVER_VERSION);
if (!dmi_check_system(sonypi_dmi_table))
return -ENODEV;
- ret = platform_driver_register(&sonypi_driver);
- if (ret)
- return ret;
+ error = platform_driver_register(&sonypi_driver);
+ if (error)
+ return error;
- ret = sonypi_probe();
- if (ret)
- platform_driver_unregister(&sonypi_driver);
+ sonypi_platform_device = platform_device_alloc("sonypi", -1);
+ if (!sonypi_platform_device) {
+ error = -ENOMEM;
+ goto err_driver_unregister;
+ }
- return ret;
+ error = platform_device_add(sonypi_platform_device);
+ if (error)
+ goto err_free_device;
+
+#ifdef CONFIG_ACPI
+ if (acpi_bus_register_driver(&sonypi_acpi_driver) > 0)
+ acpi_enabled = 1;
+#endif
+
+ return 0;
+
+ err_free_device:
+ platform_device_put(sonypi_platform_device);
+ err_driver_unregister:
+ platform_driver_unregister(&sonypi_driver);
+ return error;
}
static void __exit sonypi_exit(void)
{
+#ifdef CONFIG_ACPI
+ if (acpi_enabled)
+ acpi_bus_unregister_driver(&sonypi_acpi_driver);
+#endif
+ platform_device_unregister(sonypi_platform_device);
platform_driver_unregister(&sonypi_driver);
- sonypi_remove();
+ printk(KERN_INFO "sonypi: removed.\n");
}
module_init(sonypi_init);
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 0bbfce43031..0a574bdbce3 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -85,6 +85,7 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/mm.h>
#include <linux/serial.h>
#include <linux/fcntl.h>
@@ -665,7 +666,7 @@ static inline void sx_receive_exc(struct specialix_board * bp)
struct specialix_port *port;
struct tty_struct *tty;
unsigned char status;
- unsigned char ch;
+ unsigned char ch, flag;
func_enter();
@@ -676,8 +677,6 @@ static inline void sx_receive_exc(struct specialix_board * bp)
return;
}
tty = port->tty;
- dprintk (SX_DEBUG_RX, "port: %p count: %d BUFF_SIZE: %d\n",
- port, tty->flip.count, TTY_FLIPBUF_SIZE);
status = sx_in(bp, CD186x_RCSR);
@@ -691,7 +690,7 @@ static inline void sx_receive_exc(struct specialix_board * bp)
/* This flip buffer check needs to be below the reading of the
status register to reset the chip's IRQ.... */
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
+ if (tty_buffer_request_room(tty, 1) == 0) {
dprintk(SX_DEBUG_FIFO, "sx%d: port %d: Working around flip buffer overflow.\n",
board_No(bp), port_No(port));
func_exit();
@@ -712,26 +711,24 @@ static inline void sx_receive_exc(struct specialix_board * bp)
} else if (status & RCSR_BREAK) {
dprintk(SX_DEBUG_RX, "sx%d: port %d: Handling break...\n",
board_No(bp), port_No(port));
- *tty->flip.flag_buf_ptr++ = TTY_BREAK;
+ flag = TTY_BREAK;
if (port->flags & ASYNC_SAK)
do_SAK(tty);
} else if (status & RCSR_PE)
- *tty->flip.flag_buf_ptr++ = TTY_PARITY;
+ flag = TTY_PARITY;
else if (status & RCSR_FE)
- *tty->flip.flag_buf_ptr++ = TTY_FRAME;
+ flag = TTY_FRAME;
else if (status & RCSR_OE)
- *tty->flip.flag_buf_ptr++ = TTY_OVERRUN;
+ flag = TTY_OVERRUN;
else
- *tty->flip.flag_buf_ptr++ = 0;
-
- *tty->flip.char_buf_ptr++ = ch;
- tty->flip.count++;
- schedule_delayed_work(&tty->flip.work, 1);
+ flag = TTY_NORMAL;
+ if(tty_insert_flip_char(tty, ch, flag))
+ tty_flip_buffer_push(tty);
func_exit();
}
@@ -755,18 +752,11 @@ static inline void sx_receive(struct specialix_board * bp)
dprintk (SX_DEBUG_RX, "port: %p: count: %d\n", port, count);
port->hits[count > 8 ? 9 : count]++;
- while (count--) {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- printk(KERN_INFO "sx%d: port %d: Working around flip buffer overflow.\n",
- board_No(bp), port_No(port));
- break;
- }
- *tty->flip.char_buf_ptr++ = sx_in(bp, CD186x_RDR);
- *tty->flip.flag_buf_ptr++ = 0;
- tty->flip.count++;
- }
- schedule_delayed_work(&tty->flip.work, 1);
+ tty_buffer_request_room(tty, count);
+ while (count--)
+ tty_insert_flip_char(tty, sx_in(bp, CD186x_RDR), TTY_NORMAL);
+ tty_flip_buffer_push(tty);
func_exit();
}
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 95af2a94159..0e20780d4a2 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -103,7 +103,7 @@ static stlconf_t stl_brdconf[] = {
/*{ BRD_EASYIO, 0x2a0, 0, 0, 10, 0 },*/
};
-static int stl_nrbrds = sizeof(stl_brdconf) / sizeof(stlconf_t);
+static int stl_nrbrds = ARRAY_SIZE(stl_brdconf);
/*****************************************************************************/
@@ -424,7 +424,7 @@ static stlpcibrd_t stl_pcibrds[] = {
{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87410, BRD_ECHPCI },
};
-static int stl_nrpcibrds = sizeof(stl_pcibrds) / sizeof(stlpcibrd_t);
+static int stl_nrpcibrds = ARRAY_SIZE(stl_pcibrds);
#endif
@@ -704,7 +704,7 @@ static unsigned int sc26198_baudtable[] = {
230400, 460800, 921600
};
-#define SC26198_NRBAUDS (sizeof(sc26198_baudtable) / sizeof(unsigned int))
+#define SC26198_NRBAUDS ARRAY_SIZE(sc26198_baudtable)
/*****************************************************************************/
@@ -901,7 +901,7 @@ static unsigned long stl_atol(char *str)
static int stl_parsebrd(stlconf_t *confp, char **argp)
{
char *sp;
- int nrbrdnames, i;
+ int i;
#ifdef DEBUG
printk("stl_parsebrd(confp=%x,argp=%x)\n", (int) confp, (int) argp);
@@ -913,14 +913,13 @@ static int stl_parsebrd(stlconf_t *confp, char **argp)
for (sp = argp[0], i = 0; ((*sp != 0) && (i < 25)); sp++, i++)
*sp = TOLOWER(*sp);
- nrbrdnames = sizeof(stl_brdstr) / sizeof(stlbrdtype_t);
- for (i = 0; (i < nrbrdnames); i++) {
+ for (i = 0; i < ARRAY_SIZE(stl_brdstr); i++) {
if (strcmp(stl_brdstr[i].name, argp[0]) == 0)
break;
}
- if (i >= nrbrdnames) {
+ if (i == ARRAY_SIZE(stl_brdstr)) {
printk("STALLION: unknown board name, %s?\n", argp[0]);
- return(0);
+ return 0;
}
confp->brdtype = stl_brdstr[i].type;
@@ -2902,7 +2901,8 @@ static int stl_getportstats(stlport_t *portp, comstats_t __user *cp)
if (portp->tty != (struct tty_struct *) NULL) {
if (portp->tty->driver_data == portp) {
portp->stats.ttystate = portp->tty->flags;
- portp->stats.rxbuffered = portp->tty->flip.count;
+ /* No longer available as a statistic */
+ portp->stats.rxbuffered = 1; /*portp->tty->flip.count; */
if (portp->tty->termios != (struct termios *) NULL) {
portp->stats.cflags = portp->tty->termios->c_cflag;
portp->stats.iflags = portp->tty->termios->c_iflag;
@@ -4046,9 +4046,7 @@ static void stl_cd1400rxisr(stlpanel_t *panelp, int ioaddr)
if ((ioack & ACK_TYPMASK) == ACK_TYPRXGOOD) {
outb((RDCR + portp->uartaddr), ioaddr);
len = inb(ioaddr + EREG_DATA);
- if ((tty == (struct tty_struct *) NULL) ||
- (tty->flip.char_buf_ptr == (char *) NULL) ||
- ((buflen = TTY_FLIPBUF_SIZE - tty->flip.count) == 0)) {
+ if (tty == NULL || (buflen = tty_buffer_request_room(tty, len)) == 0) {
len = MIN(len, sizeof(stl_unwanted));
outb((RDSR + portp->uartaddr), ioaddr);
insb((ioaddr + EREG_DATA), &stl_unwanted[0], len);
@@ -4057,12 +4055,10 @@ static void stl_cd1400rxisr(stlpanel_t *panelp, int ioaddr)
} else {
len = MIN(len, buflen);
if (len > 0) {
+ unsigned char *ptr;
outb((RDSR + portp->uartaddr), ioaddr);
- insb((ioaddr + EREG_DATA), tty->flip.char_buf_ptr, len);
- memset(tty->flip.flag_buf_ptr, 0, len);
- tty->flip.flag_buf_ptr += len;
- tty->flip.char_buf_ptr += len;
- tty->flip.count += len;
+ tty_prepare_flip_string(tty, &ptr, len);
+ insb((ioaddr + EREG_DATA), ptr, len);
tty_schedule_flip(tty);
portp->stats.rxtotal += len;
}
@@ -4086,8 +4082,7 @@ static void stl_cd1400rxisr(stlpanel_t *panelp, int ioaddr)
portp->stats.txxoff++;
goto stl_rxalldone;
}
- if ((tty != (struct tty_struct *) NULL) &&
- ((portp->rxignoremsk & status) == 0)) {
+ if (tty != NULL && (portp->rxignoremsk & status) == 0) {
if (portp->rxmarkmsk & status) {
if (status & ST_BREAK) {
status = TTY_BREAK;
@@ -4107,14 +4102,8 @@ static void stl_cd1400rxisr(stlpanel_t *panelp, int ioaddr)
} else {
status = 0;
}
- if (tty->flip.char_buf_ptr != (char *) NULL) {
- if (tty->flip.count < TTY_FLIPBUF_SIZE) {
- *tty->flip.flag_buf_ptr++ = status;
- *tty->flip.char_buf_ptr++ = ch;
- tty->flip.count++;
- }
- tty_schedule_flip(tty);
- }
+ tty_insert_flip_char(tty, ch, status);
+ tty_schedule_flip(tty);
}
} else {
printk("STALLION: bad RX interrupt ack value=%x\n", ioack);
@@ -5013,9 +5002,7 @@ static void stl_sc26198rxisr(stlport_t *portp, unsigned int iack)
len = inb(ioaddr + XP_DATA) + 1;
if ((iack & IVR_TYPEMASK) == IVR_RXDATA) {
- if ((tty == (struct tty_struct *) NULL) ||
- (tty->flip.char_buf_ptr == (char *) NULL) ||
- ((buflen = TTY_FLIPBUF_SIZE - tty->flip.count) == 0)) {
+ if (tty == NULL || (buflen = tty_buffer_request_room(tty, len)) == 0) {
len = MIN(len, sizeof(stl_unwanted));
outb(GRXFIFO, (ioaddr + XP_ADDR));
insb((ioaddr + XP_DATA), &stl_unwanted[0], len);
@@ -5024,12 +5011,10 @@ static void stl_sc26198rxisr(stlport_t *portp, unsigned int iack)
} else {
len = MIN(len, buflen);
if (len > 0) {
+ unsigned char *ptr;
outb(GRXFIFO, (ioaddr + XP_ADDR));
- insb((ioaddr + XP_DATA), tty->flip.char_buf_ptr, len);
- memset(tty->flip.flag_buf_ptr, 0, len);
- tty->flip.flag_buf_ptr += len;
- tty->flip.char_buf_ptr += len;
- tty->flip.count += len;
+ tty_prepare_flip_string(tty, &ptr, len);
+ insb((ioaddr + XP_DATA), ptr, len);
tty_schedule_flip(tty);
portp->stats.rxtotal += len;
}
@@ -5097,14 +5082,8 @@ static inline void stl_sc26198rxbadch(stlport_t *portp, unsigned char status, ch
status = 0;
}
- if (tty->flip.char_buf_ptr != (char *) NULL) {
- if (tty->flip.count < TTY_FLIPBUF_SIZE) {
- *tty->flip.flag_buf_ptr++ = status;
- *tty->flip.char_buf_ptr++ = ch;
- tty->flip.count++;
- }
- tty_schedule_flip(tty);
- }
+ tty_insert_flip_char(tty, ch, status);
+ tty_schedule_flip(tty);
if (status == 0)
portp->stats.rxtotal++;
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index 3ad758a9a1d..64bf89cb574 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -345,9 +345,9 @@ static int si_probe_addrs[]= {0xc0000, 0xd0000, 0xe0000,
0xc8000, 0xd8000, 0xe8000, 0xa0000};
static int si1_probe_addrs[]= { 0xd0000};
-#define NR_SX_ADDRS (sizeof(sx_probe_addrs)/sizeof (int))
-#define NR_SI_ADDRS (sizeof(si_probe_addrs)/sizeof (int))
-#define NR_SI1_ADDRS (sizeof(si1_probe_addrs)/sizeof (int))
+#define NR_SX_ADDRS ARRAY_SIZE(sx_probe_addrs)
+#define NR_SI_ADDRS ARRAY_SIZE(si_probe_addrs)
+#define NR_SI1_ADDRS ARRAY_SIZE(si1_probe_addrs)
/* Set the mask to all-ones. This alas, only supports 32 interrupts.
@@ -1085,6 +1085,7 @@ static inline void sx_receive_chars (struct sx_port *port)
int rx_op;
struct tty_struct *tty;
int copied=0;
+ unsigned char *rp;
func_enter2 ();
tty = port->gs.tty;
@@ -1095,8 +1096,8 @@ static inline void sx_receive_chars (struct sx_port *port)
sx_dprintk (SX_DEBUG_RECEIVE, "rxop=%d, c = %d.\n", rx_op, c);
/* Don't copy more bytes than there is room for in the buffer */
- if (tty->flip.count + c > TTY_FLIPBUF_SIZE)
- c = TTY_FLIPBUF_SIZE - tty->flip.count;
+
+ c = tty_prepare_flip_string(tty, &rp, c);
sx_dprintk (SX_DEBUG_RECEIVE, "c = %d.\n", c);
@@ -1111,14 +1112,8 @@ static inline void sx_receive_chars (struct sx_port *port)
sx_dprintk (SX_DEBUG_RECEIVE , "Copying over %d chars. First is %d at %lx\n", c,
read_sx_byte (port->board, CHAN_OFFSET(port,hi_rxbuf) + rx_op),
CHAN_OFFSET(port, hi_rxbuf));
- memcpy_fromio (tty->flip.char_buf_ptr,
+ memcpy_fromio (rp,
port->board->base + CHAN_OFFSET(port,hi_rxbuf) + rx_op, c);
- memset(tty->flip.flag_buf_ptr, TTY_NORMAL, c);
-
- /* Update the kernel buffer end */
- tty->flip.count += c;
- tty->flip.char_buf_ptr += c;
- tty->flip.flag_buf_ptr += c;
/* This one last. ( Not essential.)
It allows the card to start putting more data into the buffer!
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 62aa0e534a6..9f1b466c4f8 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -1467,6 +1467,7 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
{
int Fifocount;
u16 status;
+ int work = 0;
unsigned char DataByte;
struct tty_struct *tty = info->tty;
struct mgsl_icount *icount = &info->icount;
@@ -1487,6 +1488,8 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
/* flush the receive FIFO */
while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
+ int flag;
+
/* read one byte from RxFIFO */
outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
info->io_base + CCAR );
@@ -1498,13 +1501,9 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- continue;
-
- *tty->flip.char_buf_ptr = DataByte;
icount->rx++;
- *tty->flip.flag_buf_ptr = 0;
+ flag = 0;
if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
printk("rxerr=%04X\n",status);
@@ -1530,41 +1529,31 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
status &= info->read_status_mask;
if (status & RXSTATUS_BREAK_RECEIVED) {
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ flag = TTY_BREAK;
if (info->flags & ASYNC_SAK)
do_SAK(tty);
} else if (status & RXSTATUS_PARITY_ERROR)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (status & RXSTATUS_FRAMING_ERROR)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
- if (status & RXSTATUS_OVERRUN) {
- /* Overrun is special, since it's
- * reported immediately, and doesn't
- * affect the current character
- */
- if (tty->flip.count < TTY_FLIPBUF_SIZE) {
- tty->flip.count++;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- }
- }
+ flag = TTY_FRAME;
} /* end of if (error) */
-
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ tty_insert_flip_char(tty, DataByte, flag);
+ if (status & RXSTATUS_OVERRUN) {
+ /* Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ work += tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
}
if ( debug_level >= DEBUG_LEVEL_ISR ) {
- printk("%s(%d):mgsl_isr_receive_data flip count=%d\n",
- __FILE__,__LINE__,tty->flip.count);
printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
__FILE__,__LINE__,icount->rx,icount->brk,
icount->parity,icount->frame,icount->overrun);
}
- if ( tty->flip.count )
+ if(work)
tty_flip_buffer_push(tty);
}
@@ -7058,7 +7047,7 @@ static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
{
static unsigned short BitPatterns[] =
{ 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
- static unsigned int Patterncount = sizeof(BitPatterns)/sizeof(unsigned short);
+ static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
unsigned int i;
BOOLEAN rc = TRUE;
unsigned long flags;
@@ -7501,9 +7490,9 @@ static int mgsl_adapter_test( struct mgsl_struct *info )
*/
static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
{
- static unsigned long BitPatterns[] = { 0x0, 0x55555555, 0xaaaaaaaa,
- 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
- unsigned long Patterncount = sizeof(BitPatterns)/sizeof(unsigned long);
+ static unsigned long BitPatterns[] =
+ { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
+ unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
unsigned long i;
unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
unsigned long * TestAddr;
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
new file mode 100644
index 00000000000..07c9be6a6bb
--- /dev/null
+++ b/drivers/char/synclink_gt.c
@@ -0,0 +1,4491 @@
+/*
+ * $Id: synclink_gt.c,v 4.22 2006/01/09 20:16:06 paulkf Exp $
+ *
+ * Device driver for Microgate SyncLink GT serial adapters.
+ *
+ * written by Paul Fulghum for Microgate Corporation
+ * paulkf@microgate.com
+ *
+ * Microgate and SyncLink are trademarks of Microgate Corporation
+ *
+ * This code is released under the GNU General Public License (GPL)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DEBUG OUTPUT DEFINITIONS
+ *
+ * uncomment lines below to enable specific types of debug output
+ *
+ * DBGINFO information - most verbose output
+ * DBGERR serious errors
+ * DBGBH bottom half service routine debugging
+ * DBGISR interrupt service routine debugging
+ * DBGDATA output receive and transmit data
+ * DBGTBUF output transmit DMA buffers and registers
+ * DBGRBUF output receive DMA buffers and registers
+ */
+
+#define DBGINFO(fmt) if (debug_level >= DEBUG_LEVEL_INFO) printk fmt
+#define DBGERR(fmt) if (debug_level >= DEBUG_LEVEL_ERROR) printk fmt
+#define DBGBH(fmt) if (debug_level >= DEBUG_LEVEL_BH) printk fmt
+#define DBGISR(fmt) if (debug_level >= DEBUG_LEVEL_ISR) printk fmt
+#define DBGDATA(info, buf, size, label) if (debug_level >= DEBUG_LEVEL_DATA) trace_block((info), (buf), (size), (label))
+//#define DBGTBUF(info) dump_tbufs(info)
+//#define DBGRBUF(info) dump_rbufs(info)
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/ioctl.h>
+#include <linux/termios.h>
+#include <linux/bitops.h>
+#include <linux/workqueue.h>
+#include <linux/hdlc.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/types.h>
+#include <asm/uaccess.h>
+
+#include "linux/synclink.h"
+
+#ifdef CONFIG_HDLC_MODULE
+#define CONFIG_HDLC 1
+#endif
+
+/*
+ * module identification
+ */
+static char *driver_name = "SyncLink GT";
+static char *driver_version = "$Revision: 4.22 $";
+static char *tty_driver_name = "synclink_gt";
+static char *tty_dev_prefix = "ttySLG";
+MODULE_LICENSE("GPL");
+#define MGSL_MAGIC 0x5401
+#define MAX_DEVICES 12
+
+static struct pci_device_id pci_table[] = {
+ {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,}, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_table);
+
+static int init_one(struct pci_dev *dev,const struct pci_device_id *ent);
+static void remove_one(struct pci_dev *dev);
+static struct pci_driver pci_driver = {
+ .name = "synclink_gt",
+ .id_table = pci_table,
+ .probe = init_one,
+ .remove = __devexit_p(remove_one),
+};
+
+static int pci_registered;
+
+/*
+ * module configuration and status
+ */
+static struct slgt_info *slgt_device_list;
+static int slgt_device_count;
+
+static int ttymajor;
+static int debug_level;
+static int maxframe[MAX_DEVICES];
+static int dosyncppp[MAX_DEVICES];
+
+module_param(ttymajor, int, 0);
+module_param(debug_level, int, 0);
+module_param_array(maxframe, int, NULL, 0);
+module_param_array(dosyncppp, int, NULL, 0);
+
+MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned");
+MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail");
+MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");
+MODULE_PARM_DESC(dosyncppp, "Enable synchronous net device, 0=disable 1=enable");
+
+/*
+ * tty support and callbacks
+ */
+#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
+
+static struct tty_driver *serial_driver;
+
+static int open(struct tty_struct *tty, struct file * filp);
+static void close(struct tty_struct *tty, struct file * filp);
+static void hangup(struct tty_struct *tty);
+static void set_termios(struct tty_struct *tty, struct termios *old_termios);
+
+static int write(struct tty_struct *tty, const unsigned char *buf, int count);
+static void put_char(struct tty_struct *tty, unsigned char ch);
+static void send_xchar(struct tty_struct *tty, char ch);
+static void wait_until_sent(struct tty_struct *tty, int timeout);
+static int write_room(struct tty_struct *tty);
+static void flush_chars(struct tty_struct *tty);
+static void flush_buffer(struct tty_struct *tty);
+static void tx_hold(struct tty_struct *tty);
+static void tx_release(struct tty_struct *tty);
+
+static int ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg);
+static int read_proc(char *page, char **start, off_t off, int count,int *eof, void *data);
+static int chars_in_buffer(struct tty_struct *tty);
+static void throttle(struct tty_struct * tty);
+static void unthrottle(struct tty_struct * tty);
+static void set_break(struct tty_struct *tty, int break_state);
+
+/*
+ * generic HDLC support and callbacks
+ */
+#ifdef CONFIG_HDLC
+#define dev_to_port(D) (dev_to_hdlc(D)->priv)
+static void hdlcdev_tx_done(struct slgt_info *info);
+static void hdlcdev_rx(struct slgt_info *info, char *buf, int size);
+static int hdlcdev_init(struct slgt_info *info);
+static void hdlcdev_exit(struct slgt_info *info);
+#endif
+
+
+/*
+ * device specific structures, macros and functions
+ */
+
+#define SLGT_MAX_PORTS 4
+#define SLGT_REG_SIZE 256
+
+/*
+ * DMA buffer descriptor and access macros
+ */
+struct slgt_desc
+{
+ unsigned short count;
+ unsigned short status;
+ unsigned int pbuf; /* physical address of data buffer */
+ unsigned int next; /* physical address of next descriptor */
+
+ /* driver book keeping */
+ char *buf; /* virtual address of data buffer */
+ unsigned int pdesc; /* physical address of this descriptor */
+ dma_addr_t buf_dma_addr;
+};
+
+#define set_desc_buffer(a,b) (a).pbuf = cpu_to_le32((unsigned int)(b))
+#define set_desc_next(a,b) (a).next = cpu_to_le32((unsigned int)(b))
+#define set_desc_count(a,b)(a).count = cpu_to_le16((unsigned short)(b))
+#define set_desc_eof(a,b) (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0))
+#define desc_count(a) (le16_to_cpu((a).count))
+#define desc_status(a) (le16_to_cpu((a).status))
+#define desc_complete(a) (le16_to_cpu((a).status) & BIT15)
+#define desc_eof(a) (le16_to_cpu((a).status) & BIT2)
+#define desc_crc_error(a) (le16_to_cpu((a).status) & BIT1)
+#define desc_abort(a) (le16_to_cpu((a).status) & BIT0)
+#define desc_residue(a) ((le16_to_cpu((a).status) & 0x38) >> 3)
+
+struct _input_signal_events {
+ int ri_up;
+ int ri_down;
+ int dsr_up;
+ int dsr_down;
+ int dcd_up;
+ int dcd_down;
+ int cts_up;
+ int cts_down;
+};
+
+/*
+ * device instance data structure
+ */
+struct slgt_info {
+ void *if_ptr; /* General purpose pointer (used by SPPP) */
+
+ struct slgt_info *next_device; /* device list link */
+
+ int magic;
+ int flags;
+
+ char device_name[25];
+ struct pci_dev *pdev;
+
+ int port_count; /* count of ports on adapter */
+ int adapter_num; /* adapter instance number */
+ int port_num; /* port instance number */
+
+ /* array of pointers to port contexts on this adapter */
+ struct slgt_info *port_array[SLGT_MAX_PORTS];
+
+ int count; /* count of opens */
+ int line; /* tty line instance number */
+ unsigned short close_delay;
+ unsigned short closing_wait; /* time to wait before closing */
+
+ struct mgsl_icount icount;
+
+ struct tty_struct *tty;
+ int timeout;
+ int x_char; /* xon/xoff character */
+ int blocked_open; /* # of blocked opens */
+ unsigned int read_status_mask;
+ unsigned int ignore_status_mask;
+
+ wait_queue_head_t open_wait;
+ wait_queue_head_t close_wait;
+
+ wait_queue_head_t status_event_wait_q;
+ wait_queue_head_t event_wait_q;
+ struct timer_list tx_timer;
+ struct timer_list rx_timer;
+
+ spinlock_t lock; /* spinlock for synchronizing with ISR */
+
+ struct work_struct task;
+ u32 pending_bh;
+ int bh_requested;
+ int bh_running;
+
+ int isr_overflow;
+ int irq_requested; /* nonzero if IRQ requested */
+ int irq_occurred; /* for diagnostics use */
+
+ /* device configuration */
+
+ unsigned int bus_type;
+ unsigned int irq_level;
+ unsigned long irq_flags;
+
+ unsigned char __iomem * reg_addr; /* memory mapped registers address */
+ u32 phys_reg_addr;
+ int reg_addr_requested;
+
+ MGSL_PARAMS params; /* communications parameters */
+ u32 idle_mode;
+ u32 max_frame_size; /* as set by device config */
+
+ unsigned int raw_rx_size;
+ unsigned int if_mode;
+
+ /* device status */
+
+ int rx_enabled;
+ int rx_restart;
+
+ int tx_enabled;
+ int tx_active;
+
+ unsigned char signals; /* serial signal states */
+ unsigned int init_error; /* initialization error */
+
+ unsigned char *tx_buf;
+ int tx_count;
+
+ char flag_buf[MAX_ASYNC_BUFFER_SIZE];
+ char char_buf[MAX_ASYNC_BUFFER_SIZE];
+ BOOLEAN drop_rts_on_tx_done;
+ struct _input_signal_events input_signal_events;
+
+ int dcd_chkcount; /* check counts to prevent */
+ int cts_chkcount; /* too many IRQs if a signal */
+ int dsr_chkcount; /* is floating */
+ int ri_chkcount;
+
+ char *bufs; /* virtual address of DMA buffer lists */
+ dma_addr_t bufs_dma_addr; /* physical address of buffer descriptors */
+
+ unsigned int rbuf_count;
+ struct slgt_desc *rbufs;
+ unsigned int rbuf_current;
+ unsigned int rbuf_index;
+
+ unsigned int tbuf_count;
+ struct slgt_desc *tbufs;
+ unsigned int tbuf_current;
+ unsigned int tbuf_start;
+
+ unsigned char *tmp_rbuf;
+ unsigned int tmp_rbuf_count;
+
+ /* SPPP/Cisco HDLC device parts */
+
+ int netcount;
+ int dosyncppp;
+ spinlock_t netlock;
+#ifdef CONFIG_HDLC
+ struct net_device *netdev;
+#endif
+
+};
+
+static MGSL_PARAMS default_params = {
+ .mode = MGSL_MODE_HDLC,
+ .loopback = 0,
+ .flags = HDLC_FLAG_UNDERRUN_ABORT15,
+ .encoding = HDLC_ENCODING_NRZI_SPACE,
+ .clock_speed = 0,
+ .addr_filter = 0xff,
+ .crc_type = HDLC_CRC_16_CCITT,
+ .preamble_length = HDLC_PREAMBLE_LENGTH_8BITS,
+ .preamble = HDLC_PREAMBLE_PATTERN_NONE,
+ .data_rate = 9600,
+ .data_bits = 8,
+ .stop_bits = 1,
+ .parity = ASYNC_PARITY_NONE
+};
+
+
+#define BH_RECEIVE 1
+#define BH_TRANSMIT 2
+#define BH_STATUS 4
+#define IO_PIN_SHUTDOWN_LIMIT 100
+
+#define DMABUFSIZE 256
+#define DESC_LIST_SIZE 4096
+
+#define MASK_PARITY BIT1
+#define MASK_FRAMING BIT2
+#define MASK_BREAK BIT3
+#define MASK_OVERRUN BIT4
+
+#define GSR 0x00 /* global status */
+#define TDR 0x80 /* tx data */
+#define RDR 0x80 /* rx data */
+#define TCR 0x82 /* tx control */
+#define TIR 0x84 /* tx idle */
+#define TPR 0x85 /* tx preamble */
+#define RCR 0x86 /* rx control */
+#define VCR 0x88 /* V.24 control */
+#define CCR 0x89 /* clock control */
+#define BDR 0x8a /* baud divisor */
+#define SCR 0x8c /* serial control */
+#define SSR 0x8e /* serial status */
+#define RDCSR 0x90 /* rx DMA control/status */
+#define TDCSR 0x94 /* tx DMA control/status */
+#define RDDAR 0x98 /* rx DMA descriptor address */
+#define TDDAR 0x9c /* tx DMA descriptor address */
+
+#define RXIDLE BIT14
+#define RXBREAK BIT14
+#define IRQ_TXDATA BIT13
+#define IRQ_TXIDLE BIT12
+#define IRQ_TXUNDER BIT11 /* HDLC */
+#define IRQ_RXDATA BIT10
+#define IRQ_RXIDLE BIT9 /* HDLC */
+#define IRQ_RXBREAK BIT9 /* async */
+#define IRQ_RXOVER BIT8
+#define IRQ_DSR BIT7
+#define IRQ_CTS BIT6
+#define IRQ_DCD BIT5
+#define IRQ_RI BIT4
+#define IRQ_ALL 0x3ff0
+#define IRQ_MASTER BIT0
+
+#define slgt_irq_on(info, mask) \
+ wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) | (mask)))
+#define slgt_irq_off(info, mask) \
+ wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) & ~(mask)))
+
+static __u8 rd_reg8(struct slgt_info *info, unsigned int addr);
+static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value);
+static __u16 rd_reg16(struct slgt_info *info, unsigned int addr);
+static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value);
+static __u32 rd_reg32(struct slgt_info *info, unsigned int addr);
+static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value);
+
+static void msc_set_vcr(struct slgt_info *info);
+
+static int startup(struct slgt_info *info);
+static int block_til_ready(struct tty_struct *tty, struct file * filp,struct slgt_info *info);
+static void shutdown(struct slgt_info *info);
+static void program_hw(struct slgt_info *info);
+static void change_params(struct slgt_info *info);
+
+static int register_test(struct slgt_info *info);
+static int irq_test(struct slgt_info *info);
+static int loopback_test(struct slgt_info *info);
+static int adapter_test(struct slgt_info *info);
+
+static void reset_adapter(struct slgt_info *info);
+static void reset_port(struct slgt_info *info);
+static void async_mode(struct slgt_info *info);
+static void hdlc_mode(struct slgt_info *info);
+
+static void rx_stop(struct slgt_info *info);
+static void rx_start(struct slgt_info *info);
+static void reset_rbufs(struct slgt_info *info);
+static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
+static void rdma_reset(struct slgt_info *info);
+static int rx_get_frame(struct slgt_info *info);
+static int rx_get_buf(struct slgt_info *info);
+
+static void tx_start(struct slgt_info *info);
+static void tx_stop(struct slgt_info *info);
+static void tx_set_idle(struct slgt_info *info);
+static unsigned int free_tbuf_count(struct slgt_info *info);
+static void reset_tbufs(struct slgt_info *info);
+static void tdma_reset(struct slgt_info *info);
+static void tx_load(struct slgt_info *info, const char *buf, unsigned int count);
+
+static void get_signals(struct slgt_info *info);
+static void set_signals(struct slgt_info *info);
+static void enable_loopback(struct slgt_info *info);
+static void set_rate(struct slgt_info *info, u32 data_rate);
+
+static int bh_action(struct slgt_info *info);
+static void bh_handler(void* context);
+static void bh_transmit(struct slgt_info *info);
+static void isr_serial(struct slgt_info *info);
+static void isr_rdma(struct slgt_info *info);
+static void isr_txeom(struct slgt_info *info, unsigned short status);
+static void isr_tdma(struct slgt_info *info);
+static irqreturn_t slgt_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+
+static int alloc_dma_bufs(struct slgt_info *info);
+static void free_dma_bufs(struct slgt_info *info);
+static int alloc_desc(struct slgt_info *info);
+static void free_desc(struct slgt_info *info);
+static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
+static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
+
+static int alloc_tmp_rbuf(struct slgt_info *info);
+static void free_tmp_rbuf(struct slgt_info *info);
+
+static void tx_timeout(unsigned long context);
+static void rx_timeout(unsigned long context);
+
+/*
+ * ioctl handlers
+ */
+static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount);
+static int get_params(struct slgt_info *info, MGSL_PARAMS __user *params);
+static int set_params(struct slgt_info *info, MGSL_PARAMS __user *params);
+static int get_txidle(struct slgt_info *info, int __user *idle_mode);
+static int set_txidle(struct slgt_info *info, int idle_mode);
+static int tx_enable(struct slgt_info *info, int enable);
+static int tx_abort(struct slgt_info *info);
+static int rx_enable(struct slgt_info *info, int enable);
+static int modem_input_wait(struct slgt_info *info,int arg);
+static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr);
+static int tiocmget(struct tty_struct *tty, struct file *file);
+static int tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear);
+static void set_break(struct tty_struct *tty, int break_state);
+static int get_interface(struct slgt_info *info, int __user *if_mode);
+static int set_interface(struct slgt_info *info, int if_mode);
+
+/*
+ * driver functions
+ */
+static void add_device(struct slgt_info *info);
+static void device_init(int adapter_num, struct pci_dev *pdev);
+static int claim_resources(struct slgt_info *info);
+static void release_resources(struct slgt_info *info);
+
+/*
+ * DEBUG OUTPUT CODE
+ */
+#ifndef DBGINFO
+#define DBGINFO(fmt)
+#endif
+#ifndef DBGERR
+#define DBGERR(fmt)
+#endif
+#ifndef DBGBH
+#define DBGBH(fmt)
+#endif
+#ifndef DBGISR
+#define DBGISR(fmt)
+#endif
+
+#ifdef DBGDATA
+static void trace_block(struct slgt_info *info, const char *data, int count, const char *label)
+{
+ int i;
+ int linecount;
+ printk("%s %s data:\n",info->device_name, label);
+ while(count) {
+ linecount = (count > 16) ? 16 : count;
+ for(i=0; i < linecount; i++)
+ printk("%02X ",(unsigned char)data[i]);
+ for(;i<17;i++)
+ printk(" ");
+ for(i=0;i<linecount;i++) {
+ if (data[i]>=040 && data[i]<=0176)
+ printk("%c",data[i]);
+ else
+ printk(".");
+ }
+ printk("\n");
+ data += linecount;
+ count -= linecount;
+ }
+}
+#else
+#define DBGDATA(info, buf, size, label)
+#endif
+
+#ifdef DBGTBUF
+static void dump_tbufs(struct slgt_info *info)
+{
+ int i;
+ printk("tbuf_current=%d\n", info->tbuf_current);
+ for (i=0 ; i < info->tbuf_count ; i++) {
+ printk("%d: count=%04X status=%04X\n",
+ i, le16_to_cpu(info->tbufs[i].count), le16_to_cpu(info->tbufs[i].status));
+ }
+}
+#else
+#define DBGTBUF(info)
+#endif
+
+#ifdef DBGRBUF
+static void dump_rbufs(struct slgt_info *info)
+{
+ int i;
+ printk("rbuf_current=%d\n", info->rbuf_current);
+ for (i=0 ; i < info->rbuf_count ; i++) {
+ printk("%d: count=%04X status=%04X\n",
+ i, le16_to_cpu(info->rbufs[i].count), le16_to_cpu(info->rbufs[i].status));
+ }
+}
+#else
+#define DBGRBUF(info)
+#endif
+
+static inline int sanity_check(struct slgt_info *info, char *devname, const char *name)
+{
+#ifdef SANITY_CHECK
+ if (!info) {
+ printk("null struct slgt_info for (%s) in %s\n", devname, name);
+ return 1;
+ }
+ if (info->magic != MGSL_MAGIC) {
+ printk("bad magic number struct slgt_info (%s) in %s\n", devname, name);
+ return 1;
+ }
+#else
+ if (!info)
+ return 1;
+#endif
+ return 0;
+}
+
+/**
+ * line discipline callback wrappers
+ *
+ * The wrappers maintain line discipline references
+ * while calling into the line discipline.
+ *
+ * ldisc_receive_buf - pass receive data to line discipline
+ */
+static void ldisc_receive_buf(struct tty_struct *tty,
+ const __u8 *data, char *flags, int count)
+{
+ struct tty_ldisc *ld;
+ if (!tty)
+ return;
+ ld = tty_ldisc_ref(tty);
+ if (ld) {
+ if (ld->receive_buf)
+ ld->receive_buf(tty, data, flags, count);
+ tty_ldisc_deref(ld);
+ }
+}
+
+/* tty callbacks */
+
+static int open(struct tty_struct *tty, struct file *filp)
+{
+ struct slgt_info *info;
+ int retval, line;
+ unsigned long flags;
+
+ line = tty->index;
+ if ((line < 0) || (line >= slgt_device_count)) {
+ DBGERR(("%s: open with invalid line #%d.\n", driver_name, line));
+ return -ENODEV;
+ }
+
+ info = slgt_device_list;
+ while(info && info->line != line)
+ info = info->next_device;
+ if (sanity_check(info, tty->name, "open"))
+ return -ENODEV;
+ if (info->init_error) {
+ DBGERR(("%s init error=%d\n", info->device_name, info->init_error));
+ return -ENODEV;
+ }
+
+ tty->driver_data = info;
+ info->tty = tty;
+
+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->count));
+
+ /* If port is closing, signal caller to try again */
+ if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){
+ if (info->flags & ASYNC_CLOSING)
+ interruptible_sleep_on(&info->close_wait);
+ retval = ((info->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS);
+ goto cleanup;
+ }
+
+ info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+
+ spin_lock_irqsave(&info->netlock, flags);
+ if (info->netcount) {
+ retval = -EBUSY;
+ spin_unlock_irqrestore(&info->netlock, flags);
+ goto cleanup;
+ }
+ info->count++;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ if (info->count == 1) {
+ /* 1st open on this device, init hardware */
+ retval = startup(info);
+ if (retval < 0)
+ goto cleanup;
+ }
+
+ retval = block_til_ready(tty, filp, info);
+ if (retval) {
+ DBGINFO(("%s block_til_ready rc=%d\n", info->device_name, retval));
+ goto cleanup;
+ }
+
+ retval = 0;
+
+cleanup:
+ if (retval) {
+ if (tty->count == 1)
+ info->tty = NULL; /* tty layer will release tty struct */
+ if(info->count)
+ info->count--;
+ }
+
+ DBGINFO(("%s open rc=%d\n", info->device_name, retval));
+ return retval;
+}
+
+static void close(struct tty_struct *tty, struct file *filp)
+{
+ struct slgt_info *info = tty->driver_data;
+
+ if (sanity_check(info, tty->name, "close"))
+ return;
+ DBGINFO(("%s close entry, count=%d\n", info->device_name, info->count));
+
+ if (!info->count)
+ return;
+
+ if (tty_hung_up_p(filp))
+ goto cleanup;
+
+ if ((tty->count == 1) && (info->count != 1)) {
+ /*
+ * tty->count is 1 and the tty structure will be freed.
+ * info->count should be one in this case.
+ * if it's not, correct it so that the port is shutdown.
+ */
+ DBGERR(("%s close: bad refcount; tty->count=1, "
+ "info->count=%d\n", info->device_name, info->count));
+ info->count = 1;
+ }
+
+ info->count--;
+
+ /* if at least one open remaining, leave hardware active */
+ if (info->count)
+ goto cleanup;
+
+ info->flags |= ASYNC_CLOSING;
+
+ /* set tty->closing to notify line discipline to
+ * only process XON/XOFF characters. Only the N_TTY
+ * discipline appears to use this (ppp does not).
+ */
+ tty->closing = 1;
+
+ /* wait for transmit data to clear all layers */
+
+ if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
+ DBGINFO(("%s call tty_wait_until_sent\n", info->device_name));
+ tty_wait_until_sent(tty, info->closing_wait);
+ }
+
+ if (info->flags & ASYNC_INITIALIZED)
+ wait_until_sent(tty, info->timeout);
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+ tty_ldisc_flush(tty);
+
+ shutdown(info);
+
+ tty->closing = 0;
+ info->tty = NULL;
+
+ if (info->blocked_open) {
+ if (info->close_delay) {
+ msleep_interruptible(jiffies_to_msecs(info->close_delay));
+ }
+ wake_up_interruptible(&info->open_wait);
+ }
+
+ info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
+
+ wake_up_interruptible(&info->close_wait);
+
+cleanup:
+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->count));
+}
+
+static void hangup(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+
+ if (sanity_check(info, tty->name, "hangup"))
+ return;
+ DBGINFO(("%s hangup\n", info->device_name));
+
+ flush_buffer(tty);
+ shutdown(info);
+
+ info->count = 0;
+ info->flags &= ~ASYNC_NORMAL_ACTIVE;
+ info->tty = NULL;
+
+ wake_up_interruptible(&info->open_wait);
+}
+
+static void set_termios(struct tty_struct *tty, struct termios *old_termios)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ DBGINFO(("%s set_termios\n", tty->driver->name));
+
+ /* just return if nothing has changed */
+ if ((tty->termios->c_cflag == old_termios->c_cflag)
+ && (RELEVANT_IFLAG(tty->termios->c_iflag)
+ == RELEVANT_IFLAG(old_termios->c_iflag)))
+ return;
+
+ change_params(info);
+
+ /* Handle transition to B0 status */
+ if (old_termios->c_cflag & CBAUD &&
+ !(tty->termios->c_cflag & CBAUD)) {
+ info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+ /* Handle transition away from B0 status */
+ if (!(old_termios->c_cflag & CBAUD) &&
+ tty->termios->c_cflag & CBAUD) {
+ info->signals |= SerialSignal_DTR;
+ if (!(tty->termios->c_cflag & CRTSCTS) ||
+ !test_bit(TTY_THROTTLED, &tty->flags)) {
+ info->signals |= SerialSignal_RTS;
+ }
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+ /* Handle turning off CRTSCTS */
+ if (old_termios->c_cflag & CRTSCTS &&
+ !(tty->termios->c_cflag & CRTSCTS)) {
+ tty->hw_stopped = 0;
+ tx_release(tty);
+ }
+}
+
+static int write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ int ret = 0;
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "write"))
+ goto cleanup;
+ DBGINFO(("%s write count=%d\n", info->device_name, count));
+
+ if (!tty || !info->tx_buf)
+ goto cleanup;
+
+ if (count > info->max_frame_size) {
+ ret = -EIO;
+ goto cleanup;
+ }
+
+ if (!count)
+ goto cleanup;
+
+ if (info->params.mode == MGSL_MODE_RAW) {
+ unsigned int bufs_needed = (count/DMABUFSIZE);
+ unsigned int bufs_free = free_tbuf_count(info);
+ if (count % DMABUFSIZE)
+ ++bufs_needed;
+ if (bufs_needed > bufs_free)
+ goto cleanup;
+ } else {
+ if (info->tx_active)
+ goto cleanup;
+ if (info->tx_count) {
+ /* send accumulated data from send_char() calls */
+ /* as frame and wait before accepting more data. */
+ tx_load(info, info->tx_buf, info->tx_count);
+ goto start;
+ }
+ }
+
+ ret = info->tx_count = count;
+ tx_load(info, buf, count);
+ goto start;
+
+start:
+ if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_active)
+ tx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+cleanup:
+ DBGINFO(("%s write rc=%d\n", info->device_name, ret));
+ return ret;
+}
+
+static void put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "put_char"))
+ return;
+ DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
+ if (!tty || !info->tx_buf)
+ return;
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_active && (info->tx_count < info->max_frame_size))
+ info->tx_buf[info->tx_count++] = ch;
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+static void send_xchar(struct tty_struct *tty, char ch)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "send_xchar"))
+ return;
+ DBGINFO(("%s send_xchar(%d)\n", info->device_name, ch));
+ info->x_char = ch;
+ if (ch) {
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_enabled)
+ tx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+static void wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long orig_jiffies, char_time;
+
+ if (!info )
+ return;
+ if (sanity_check(info, tty->name, "wait_until_sent"))
+ return;
+ DBGINFO(("%s wait_until_sent entry\n", info->device_name));
+ if (!(info->flags & ASYNC_INITIALIZED))
+ goto exit;
+
+ orig_jiffies = jiffies;
+
+ /* Set check interval to 1/5 of estimated time to
+ * send a character, and make it at least 1. The check
+ * interval should also be less than the timeout.
+ * Note: use tight timings here to satisfy the NIST-PCTS.
+ */
+
+ if (info->params.data_rate) {
+ char_time = info->timeout/(32 * 5);
+ if (!char_time)
+ char_time++;
+ } else
+ char_time = 1;
+
+ if (timeout)
+ char_time = min_t(unsigned long, char_time, timeout);
+
+ while (info->tx_active) {
+ msleep_interruptible(jiffies_to_msecs(char_time));
+ if (signal_pending(current))
+ break;
+ if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ break;
+ }
+
+exit:
+ DBGINFO(("%s wait_until_sent exit\n", info->device_name));
+}
+
+static int write_room(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ int ret;
+
+ if (sanity_check(info, tty->name, "write_room"))
+ return 0;
+ ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
+ DBGINFO(("%s write_room=%d\n", info->device_name, ret));
+ return ret;
+}
+
+static void flush_chars(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "flush_chars"))
+ return;
+ DBGINFO(("%s flush_chars entry tx_count=%d\n", info->device_name, info->tx_count));
+
+ if (info->tx_count <= 0 || tty->stopped ||
+ tty->hw_stopped || !info->tx_buf)
+ return;
+
+ DBGINFO(("%s flush_chars start transmit\n", info->device_name));
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_active && info->tx_count) {
+ tx_load(info, info->tx_buf,info->tx_count);
+ tx_start(info);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+static void flush_buffer(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "flush_buffer"))
+ return;
+ DBGINFO(("%s flush_buffer\n", info->device_name));
+
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_active)
+ info->tx_count = 0;
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ wake_up_interruptible(&tty->write_wait);
+ tty_wakeup(tty);
+}
+
+/*
+ * throttle (stop) transmitter
+ */
+static void tx_hold(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "tx_hold"))
+ return;
+ DBGINFO(("%s tx_hold\n", info->device_name));
+ spin_lock_irqsave(&info->lock,flags);
+ if (info->tx_enabled && info->params.mode == MGSL_MODE_ASYNC)
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/*
+ * release (start) transmitter
+ */
+static void tx_release(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "tx_release"))
+ return;
+ DBGINFO(("%s tx_release\n", info->device_name));
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_active && info->tx_count) {
+ tx_load(info, info->tx_buf, info->tx_count);
+ tx_start(info);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/*
+ * Service an IOCTL request
+ *
+ * Arguments
+ *
+ * tty pointer to tty instance data
+ * file pointer to associated file object for device
+ * cmd IOCTL command code
+ * arg command argument/context
+ *
+ * Return 0 if success, otherwise error code
+ */
+static int ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct slgt_info *info = tty->driver_data;
+ struct mgsl_icount cnow; /* kernel counter temps */
+ struct serial_icounter_struct __user *p_cuser; /* user space */
+ unsigned long flags;
+ void __user *argp = (void __user *)arg;
+
+ if (sanity_check(info, tty->name, "ioctl"))
+ return -ENODEV;
+ DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd));
+
+ if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
+ (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case MGSL_IOCGPARAMS:
+ return get_params(info, argp);
+ case MGSL_IOCSPARAMS:
+ return set_params(info, argp);
+ case MGSL_IOCGTXIDLE:
+ return get_txidle(info, argp);
+ case MGSL_IOCSTXIDLE:
+ return set_txidle(info, (int)arg);
+ case MGSL_IOCTXENABLE:
+ return tx_enable(info, (int)arg);
+ case MGSL_IOCRXENABLE:
+ return rx_enable(info, (int)arg);
+ case MGSL_IOCTXABORT:
+ return tx_abort(info);
+ case MGSL_IOCGSTATS:
+ return get_stats(info, argp);
+ case MGSL_IOCWAITEVENT:
+ return wait_mgsl_event(info, argp);
+ case TIOCMIWAIT:
+ return modem_input_wait(info,(int)arg);
+ case MGSL_IOCGIF:
+ return get_interface(info, argp);
+ case MGSL_IOCSIF:
+ return set_interface(info,(int)arg);
+ case TIOCGICOUNT:
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ spin_unlock_irqrestore(&info->lock,flags);
+ p_cuser = argp;
+ if (put_user(cnow.cts, &p_cuser->cts) ||
+ put_user(cnow.dsr, &p_cuser->dsr) ||
+ put_user(cnow.rng, &p_cuser->rng) ||
+ put_user(cnow.dcd, &p_cuser->dcd) ||
+ put_user(cnow.rx, &p_cuser->rx) ||
+ put_user(cnow.tx, &p_cuser->tx) ||
+ put_user(cnow.frame, &p_cuser->frame) ||
+ put_user(cnow.overrun, &p_cuser->overrun) ||
+ put_user(cnow.parity, &p_cuser->parity) ||
+ put_user(cnow.brk, &p_cuser->brk) ||
+ put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
+ return -EFAULT;
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+/*
+ * proc fs support
+ */
+static inline int line_info(char *buf, struct slgt_info *info)
+{
+ char stat_buf[30];
+ int ret;
+ unsigned long flags;
+
+ ret = sprintf(buf, "%s: IO=%08X IRQ=%d MaxFrameSize=%u\n",
+ info->device_name, info->phys_reg_addr,
+ info->irq_level, info->max_frame_size);
+
+ /* output current serial signal states */
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ stat_buf[0] = 0;
+ stat_buf[1] = 0;
+ if (info->signals & SerialSignal_RTS)
+ strcat(stat_buf, "|RTS");
+ if (info->signals & SerialSignal_CTS)
+ strcat(stat_buf, "|CTS");
+ if (info->signals & SerialSignal_DTR)
+ strcat(stat_buf, "|DTR");
+ if (info->signals & SerialSignal_DSR)
+ strcat(stat_buf, "|DSR");
+ if (info->signals & SerialSignal_DCD)
+ strcat(stat_buf, "|CD");
+ if (info->signals & SerialSignal_RI)
+ strcat(stat_buf, "|RI");
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ ret += sprintf(buf+ret, "\tHDLC txok:%d rxok:%d",
+ info->icount.txok, info->icount.rxok);
+ if (info->icount.txunder)
+ ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder);
+ if (info->icount.txabort)
+ ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort);
+ if (info->icount.rxshort)
+ ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort);
+ if (info->icount.rxlong)
+ ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong);
+ if (info->icount.rxover)
+ ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover);
+ if (info->icount.rxcrc)
+ ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc);
+ } else {
+ ret += sprintf(buf+ret, "\tASYNC tx:%d rx:%d",
+ info->icount.tx, info->icount.rx);
+ if (info->icount.frame)
+ ret += sprintf(buf+ret, " fe:%d", info->icount.frame);
+ if (info->icount.parity)
+ ret += sprintf(buf+ret, " pe:%d", info->icount.parity);
+ if (info->icount.brk)
+ ret += sprintf(buf+ret, " brk:%d", info->icount.brk);
+ if (info->icount.overrun)
+ ret += sprintf(buf+ret, " oe:%d", info->icount.overrun);
+ }
+
+ /* Append serial signal status to end */
+ ret += sprintf(buf+ret, " %s\n", stat_buf+1);
+
+ ret += sprintf(buf+ret, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
+ info->tx_active,info->bh_requested,info->bh_running,
+ info->pending_bh);
+
+ return ret;
+}
+
+/* Called to print information about devices
+ */
+static int read_proc(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ int len = 0, l;
+ off_t begin = 0;
+ struct slgt_info *info;
+
+ len += sprintf(page, "synclink_gt driver:%s\n", driver_version);
+
+ info = slgt_device_list;
+ while( info ) {
+ l = line_info(page + len, info);
+ len += l;
+ if (len+begin > off+count)
+ goto done;
+ if (len+begin < off) {
+ begin += len;
+ len = 0;
+ }
+ info = info->next_device;
+ }
+
+ *eof = 1;
+done:
+ if (off >= len+begin)
+ return 0;
+ *start = page + (off-begin);
+ return ((count < begin+len-off) ? count : begin+len-off);
+}
+
+/*
+ * return count of bytes in transmit buffer
+ */
+static int chars_in_buffer(struct tty_struct *tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ if (sanity_check(info, tty->name, "chars_in_buffer"))
+ return 0;
+ DBGINFO(("%s chars_in_buffer()=%d\n", info->device_name, info->tx_count));
+ return info->tx_count;
+}
+
+/*
+ * signal remote device to throttle send data (our receive data)
+ */
+static void throttle(struct tty_struct * tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "throttle"))
+ return;
+ DBGINFO(("%s throttle\n", info->device_name));
+ if (I_IXOFF(tty))
+ send_xchar(tty, STOP_CHAR(tty));
+ if (tty->termios->c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock,flags);
+ info->signals &= ~SerialSignal_RTS;
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+/*
+ * signal remote device to stop throttling send data (our receive data)
+ */
+static void unthrottle(struct tty_struct * tty)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "unthrottle"))
+ return;
+ DBGINFO(("%s unthrottle\n", info->device_name));
+ if (I_IXOFF(tty)) {
+ if (info->x_char)
+ info->x_char = 0;
+ else
+ send_xchar(tty, START_CHAR(tty));
+ }
+ if (tty->termios->c_cflag & CRTSCTS) {
+ spin_lock_irqsave(&info->lock,flags);
+ info->signals |= SerialSignal_RTS;
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+}
+
+/*
+ * set or clear transmit break condition
+ * break_state -1=set break condition, 0=clear
+ */
+static void set_break(struct tty_struct *tty, int break_state)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned short value;
+ unsigned long flags;
+
+ if (sanity_check(info, tty->name, "set_break"))
+ return;
+ DBGINFO(("%s set_break(%d)\n", info->device_name, break_state));
+
+ spin_lock_irqsave(&info->lock,flags);
+ value = rd_reg16(info, TCR);
+ if (break_state == -1)
+ value |= BIT6;
+ else
+ value &= ~BIT6;
+ wr_reg16(info, TCR, value);
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+#ifdef CONFIG_HDLC
+
+/**
+ * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
+ * set encoding and frame check sequence (FCS) options
+ *
+ * dev pointer to network device structure
+ * encoding serial encoding setting
+ * parity FCS setting
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
+ unsigned short parity)
+{
+ struct slgt_info *info = dev_to_port(dev);
+ unsigned char new_encoding;
+ unsigned short new_crctype;
+
+ /* return error if TTY interface open */
+ if (info->count)
+ return -EBUSY;
+
+ DBGINFO(("%s hdlcdev_attach\n", info->device_name));
+
+ switch (encoding)
+ {
+ case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
+ case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
+ case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
+ case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
+ case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
+ default: return -EINVAL;
+ }
+
+ switch (parity)
+ {
+ case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
+ case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
+ case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
+ default: return -EINVAL;
+ }
+
+ info->params.encoding = new_encoding;
+ info->params.crc_type = new_crctype;;
+
+ /* if network interface up, reprogram hardware */
+ if (info->netcount)
+ program_hw(info);
+
+ return 0;
+}
+
+/**
+ * called by generic HDLC layer to send frame
+ *
+ * skb socket buffer containing HDLC frame
+ * dev pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct slgt_info *info = dev_to_port(dev);
+ struct net_device_stats *stats = hdlc_stats(dev);
+ unsigned long flags;
+
+ DBGINFO(("%s hdlc_xmit\n", dev->name));
+
+ /* stop sending until this frame completes */
+ netif_stop_queue(dev);
+
+ /* copy data to device buffers */
+ info->tx_count = skb->len;
+ tx_load(info, skb->data, skb->len);
+
+ /* update network statistics */
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+
+ /* done with socket buffer, so free it */
+ dev_kfree_skb(skb);
+
+ /* save start time for transmit timeout detection */
+ dev->trans_start = jiffies;
+
+ /* start hardware transmitter if necessary */
+ spin_lock_irqsave(&info->lock,flags);
+ if (!info->tx_active)
+ tx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return 0;
+}
+
+/**
+ * called by network layer when interface enabled
+ * claim resources and initialize hardware
+ *
+ * dev pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_open(struct net_device *dev)
+{
+ struct slgt_info *info = dev_to_port(dev);
+ int rc;
+ unsigned long flags;
+
+ DBGINFO(("%s hdlcdev_open\n", dev->name));
+
+ /* generic HDLC layer open processing */
+ if ((rc = hdlc_open(dev)))
+ return rc;
+
+ /* arbitrate between network and tty opens */
+ spin_lock_irqsave(&info->netlock, flags);
+ if (info->count != 0 || info->netcount != 0) {
+ DBGINFO(("%s hdlc_open busy\n", dev->name));
+ spin_unlock_irqrestore(&info->netlock, flags);
+ return -EBUSY;
+ }
+ info->netcount=1;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ /* claim resources and init adapter */
+ if ((rc = startup(info)) != 0) {
+ spin_lock_irqsave(&info->netlock, flags);
+ info->netcount=0;
+ spin_unlock_irqrestore(&info->netlock, flags);
+ return rc;
+ }
+
+ /* assert DTR and RTS, apply hardware settings */
+ info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ program_hw(info);
+
+ /* enable network layer transmit */
+ dev->trans_start = jiffies;
+ netif_start_queue(dev);
+
+ /* inform generic HDLC layer of current DCD status */
+ spin_lock_irqsave(&info->lock, flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock, flags);
+ hdlc_set_carrier(info->signals & SerialSignal_DCD, dev);
+
+ return 0;
+}
+
+/**
+ * called by network layer when interface is disabled
+ * shutdown hardware and release resources
+ *
+ * dev pointer to network device structure
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_close(struct net_device *dev)
+{
+ struct slgt_info *info = dev_to_port(dev);
+ unsigned long flags;
+
+ DBGINFO(("%s hdlcdev_close\n", dev->name));
+
+ netif_stop_queue(dev);
+
+ /* shutdown adapter and release resources */
+ shutdown(info);
+
+ hdlc_close(dev);
+
+ spin_lock_irqsave(&info->netlock, flags);
+ info->netcount=0;
+ spin_unlock_irqrestore(&info->netlock, flags);
+
+ return 0;
+}
+
+/**
+ * called by network layer to process IOCTL call to network device
+ *
+ * dev pointer to network device structure
+ * ifr pointer to network interface request structure
+ * cmd IOCTL command code
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ const size_t size = sizeof(sync_serial_settings);
+ sync_serial_settings new_line;
+ sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
+ struct slgt_info *info = dev_to_port(dev);
+ unsigned int flags;
+
+ DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
+
+ /* return error if TTY interface open */
+ if (info->count)
+ return -EBUSY;
+
+ if (cmd != SIOCWANDEV)
+ return hdlc_ioctl(dev, ifr, cmd);
+
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE: /* return current sync_serial_settings */
+
+ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
+ if (ifr->ifr_settings.size < size) {
+ ifr->ifr_settings.size = size; /* data size wanted */
+ return -ENOBUFS;
+ }
+
+ flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+
+ switch (flags){
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
+ case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
+ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
+ default: new_line.clock_type = CLOCK_DEFAULT;
+ }
+
+ new_line.clock_rate = info->params.clock_speed;
+ new_line.loopback = info->params.loopback ? 1:0;
+
+ if (copy_to_user(line, &new_line, size))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&new_line, line, size))
+ return -EFAULT;
+
+ switch (new_line.clock_type)
+ {
+ case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
+ case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
+ case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
+ case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
+ case CLOCK_DEFAULT: flags = info->params.flags &
+ (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
+ default: return -EINVAL;
+ }
+
+ if (new_line.loopback != 0 && new_line.loopback != 1)
+ return -EINVAL;
+
+ info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
+ HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
+ HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
+ HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
+ info->params.flags |= flags;
+
+ info->params.loopback = new_line.loopback;
+
+ if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
+ info->params.clock_speed = new_line.clock_rate;
+ else
+ info->params.clock_speed = 0;
+
+ /* if network interface up, reprogram hardware */
+ if (info->netcount)
+ program_hw(info);
+ return 0;
+
+ default:
+ return hdlc_ioctl(dev, ifr, cmd);
+ }
+}
+
+/**
+ * called by network layer when transmit timeout is detected
+ *
+ * dev pointer to network device structure
+ */
+static void hdlcdev_tx_timeout(struct net_device *dev)
+{
+ struct slgt_info *info = dev_to_port(dev);
+ struct net_device_stats *stats = hdlc_stats(dev);
+ unsigned long flags;
+
+ DBGINFO(("%s hdlcdev_tx_timeout\n", dev->name));
+
+ stats->tx_errors++;
+ stats->tx_aborted_errors++;
+
+ spin_lock_irqsave(&info->lock,flags);
+ tx_stop(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ netif_wake_queue(dev);
+}
+
+/**
+ * called by device driver when transmit completes
+ * reenable network layer transmit if stopped
+ *
+ * info pointer to device instance information
+ */
+static void hdlcdev_tx_done(struct slgt_info *info)
+{
+ if (netif_queue_stopped(info->netdev))
+ netif_wake_queue(info->netdev);
+}
+
+/**
+ * called by device driver when frame received
+ * pass frame to network layer
+ *
+ * info pointer to device instance information
+ * buf pointer to buffer contianing frame data
+ * size count of data bytes in buf
+ */
+static void hdlcdev_rx(struct slgt_info *info, char *buf, int size)
+{
+ struct sk_buff *skb = dev_alloc_skb(size);
+ struct net_device *dev = info->netdev;
+ struct net_device_stats *stats = hdlc_stats(dev);
+
+ DBGINFO(("%s hdlcdev_rx\n", dev->name));
+
+ if (skb == NULL) {
+ DBGERR(("%s: can't alloc skb, drop packet\n", dev->name));
+ stats->rx_dropped++;
+ return;
+ }
+
+ memcpy(skb_put(skb, size),buf,size);
+
+ skb->protocol = hdlc_type_trans(skb, info->netdev);
+
+ stats->rx_packets++;
+ stats->rx_bytes += size;
+
+ netif_rx(skb);
+
+ info->netdev->last_rx = jiffies;
+}
+
+/**
+ * called by device driver when adding device instance
+ * do generic HDLC initialization
+ *
+ * info pointer to device instance information
+ *
+ * returns 0 if success, otherwise error code
+ */
+static int hdlcdev_init(struct slgt_info *info)
+{
+ int rc;
+ struct net_device *dev;
+ hdlc_device *hdlc;
+
+ /* allocate and initialize network and HDLC layer objects */
+
+ if (!(dev = alloc_hdlcdev(info))) {
+ printk(KERN_ERR "%s hdlc device alloc failure\n", info->device_name);
+ return -ENOMEM;
+ }
+
+ /* for network layer reporting purposes only */
+ dev->mem_start = info->phys_reg_addr;
+ dev->mem_end = info->phys_reg_addr + SLGT_REG_SIZE - 1;
+ dev->irq = info->irq_level;
+
+ /* network layer callbacks and settings */
+ dev->do_ioctl = hdlcdev_ioctl;
+ dev->open = hdlcdev_open;
+ dev->stop = hdlcdev_close;
+ dev->tx_timeout = hdlcdev_tx_timeout;
+ dev->watchdog_timeo = 10*HZ;
+ dev->tx_queue_len = 50;
+
+ /* generic HDLC layer callbacks and settings */
+ hdlc = dev_to_hdlc(dev);
+ hdlc->attach = hdlcdev_attach;
+ hdlc->xmit = hdlcdev_xmit;
+
+ /* register objects with HDLC layer */
+ if ((rc = register_hdlc_device(dev))) {
+ printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
+ free_netdev(dev);
+ return rc;
+ }
+
+ info->netdev = dev;
+ return 0;
+}
+
+/**
+ * called by device driver when removing device instance
+ * do generic HDLC cleanup
+ *
+ * info pointer to device instance information
+ */
+static void hdlcdev_exit(struct slgt_info *info)
+{
+ unregister_hdlc_device(info->netdev);
+ free_netdev(info->netdev);
+ info->netdev = NULL;
+}
+
+#endif /* ifdef CONFIG_HDLC */
+
+/*
+ * get async data from rx DMA buffers
+ */
+static void rx_async(struct slgt_info *info)
+{
+ struct tty_struct *tty = info->tty;
+ struct mgsl_icount *icount = &info->icount;
+ unsigned int start, end;
+ unsigned char *p;
+ unsigned char status;
+ struct slgt_desc *bufs = info->rbufs;
+ int i, count;
+ int chars = 0;
+ int stat;
+ unsigned char ch;
+
+ start = end = info->rbuf_current;
+
+ while(desc_complete(bufs[end])) {
+ count = desc_count(bufs[end]) - info->rbuf_index;
+ p = bufs[end].buf + info->rbuf_index;
+
+ DBGISR(("%s rx_async count=%d\n", info->device_name, count));
+ DBGDATA(info, p, count, "rx");
+
+ for(i=0 ; i < count; i+=2, p+=2) {
+ if (tty && chars) {
+ tty_flip_buffer_push(tty);
+ chars = 0;
+ }
+ ch = *p;
+ icount->rx++;
+
+ stat = 0;
+
+ if ((status = *(p+1) & (BIT9 + BIT8))) {
+ if (status & BIT9)
+ icount->parity++;
+ else if (status & BIT8)
+ icount->frame++;
+ /* discard char if tty control flags say so */
+ if (status & info->ignore_status_mask)
+ continue;
+ if (status & BIT9)
+ stat = TTY_PARITY;
+ else if (status & BIT8)
+ stat = TTY_FRAME;
+ }
+ if (tty) {
+ tty_insert_flip_char(tty, ch, stat);
+ chars++;
+ }
+ }
+
+ if (i < count) {
+ /* receive buffer not completed */
+ info->rbuf_index += i;
+ info->rx_timer.expires = jiffies + 1;
+ add_timer(&info->rx_timer);
+ break;
+ }
+
+ info->rbuf_index = 0;
+ free_rbufs(info, end, end);
+
+ if (++end == info->rbuf_count)
+ end = 0;
+
+ /* if entire list searched then no frame available */
+ if (end == start)
+ break;
+ }
+
+ if (tty && chars)
+ tty_flip_buffer_push(tty);
+}
+
+/*
+ * return next bottom half action to perform
+ */
+static int bh_action(struct slgt_info *info)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ if (info->pending_bh & BH_RECEIVE) {
+ info->pending_bh &= ~BH_RECEIVE;
+ rc = BH_RECEIVE;
+ } else if (info->pending_bh & BH_TRANSMIT) {
+ info->pending_bh &= ~BH_TRANSMIT;
+ rc = BH_TRANSMIT;
+ } else if (info->pending_bh & BH_STATUS) {
+ info->pending_bh &= ~BH_STATUS;
+ rc = BH_STATUS;
+ } else {
+ /* Mark BH routine as complete */
+ info->bh_running = 0;
+ info->bh_requested = 0;
+ rc = 0;
+ }
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ return rc;
+}
+
+/*
+ * perform bottom half processing
+ */
+static void bh_handler(void* context)
+{
+ struct slgt_info *info = context;
+ int action;
+
+ if (!info)
+ return;
+ info->bh_running = 1;
+
+ while((action = bh_action(info))) {
+ switch (action) {
+ case BH_RECEIVE:
+ DBGBH(("%s bh receive\n", info->device_name));
+ switch(info->params.mode) {
+ case MGSL_MODE_ASYNC:
+ rx_async(info);
+ break;
+ case MGSL_MODE_HDLC:
+ while(rx_get_frame(info));
+ break;
+ case MGSL_MODE_RAW:
+ while(rx_get_buf(info));
+ break;
+ }
+ /* restart receiver if rx DMA buffers exhausted */
+ if (info->rx_restart)
+ rx_start(info);
+ break;
+ case BH_TRANSMIT:
+ bh_transmit(info);
+ break;
+ case BH_STATUS:
+ DBGBH(("%s bh status\n", info->device_name));
+ info->ri_chkcount = 0;
+ info->dsr_chkcount = 0;
+ info->dcd_chkcount = 0;
+ info->cts_chkcount = 0;
+ break;
+ default:
+ DBGBH(("%s unknown action\n", info->device_name));
+ break;
+ }
+ }
+ DBGBH(("%s bh_handler exit\n", info->device_name));
+}
+
+static void bh_transmit(struct slgt_info *info)
+{
+ struct tty_struct *tty = info->tty;
+
+ DBGBH(("%s bh_transmit\n", info->device_name));
+ if (tty) {
+ tty_wakeup(tty);
+ wake_up_interruptible(&tty->write_wait);
+ }
+}
+
+static void dsr_change(struct slgt_info *info)
+{
+ get_signals(info);
+ DBGISR(("dsr_change %s signals=%04X\n", info->device_name, info->signals));
+ if ((info->dsr_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
+ slgt_irq_off(info, IRQ_DSR);
+ return;
+ }
+ info->icount.dsr++;
+ if (info->signals & SerialSignal_DSR)
+ info->input_signal_events.dsr_up++;
+ else
+ info->input_signal_events.dsr_down++;
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+ info->pending_bh |= BH_STATUS;
+}
+
+static void cts_change(struct slgt_info *info)
+{
+ get_signals(info);
+ DBGISR(("cts_change %s signals=%04X\n", info->device_name, info->signals));
+ if ((info->cts_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
+ slgt_irq_off(info, IRQ_CTS);
+ return;
+ }
+ info->icount.cts++;
+ if (info->signals & SerialSignal_CTS)
+ info->input_signal_events.cts_up++;
+ else
+ info->input_signal_events.cts_down++;
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+ info->pending_bh |= BH_STATUS;
+
+ if (info->flags & ASYNC_CTS_FLOW) {
+ if (info->tty) {
+ if (info->tty->hw_stopped) {
+ if (info->signals & SerialSignal_CTS) {
+ info->tty->hw_stopped = 0;
+ info->pending_bh |= BH_TRANSMIT;
+ return;
+ }
+ } else {
+ if (!(info->signals & SerialSignal_CTS))
+ info->tty->hw_stopped = 1;
+ }
+ }
+ }
+}
+
+static void dcd_change(struct slgt_info *info)
+{
+ get_signals(info);
+ DBGISR(("dcd_change %s signals=%04X\n", info->device_name, info->signals));
+ if ((info->dcd_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
+ slgt_irq_off(info, IRQ_DCD);
+ return;
+ }
+ info->icount.dcd++;
+ if (info->signals & SerialSignal_DCD) {
+ info->input_signal_events.dcd_up++;
+ } else {
+ info->input_signal_events.dcd_down++;
+ }
+#ifdef CONFIG_HDLC
+ if (info->netcount)
+ hdlc_set_carrier(info->signals & SerialSignal_DCD, info->netdev);
+#endif
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+ info->pending_bh |= BH_STATUS;
+
+ if (info->flags & ASYNC_CHECK_CD) {
+ if (info->signals & SerialSignal_DCD)
+ wake_up_interruptible(&info->open_wait);
+ else {
+ if (info->tty)
+ tty_hangup(info->tty);
+ }
+ }
+}
+
+static void ri_change(struct slgt_info *info)
+{
+ get_signals(info);
+ DBGISR(("ri_change %s signals=%04X\n", info->device_name, info->signals));
+ if ((info->ri_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
+ slgt_irq_off(info, IRQ_RI);
+ return;
+ }
+ info->icount.dcd++;
+ if (info->signals & SerialSignal_RI) {
+ info->input_signal_events.ri_up++;
+ } else {
+ info->input_signal_events.ri_down++;
+ }
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+ info->pending_bh |= BH_STATUS;
+}
+
+static void isr_serial(struct slgt_info *info)
+{
+ unsigned short status = rd_reg16(info, SSR);
+
+ DBGISR(("%s isr_serial status=%04X\n", info->device_name, status));
+
+ wr_reg16(info, SSR, status); /* clear pending */
+
+ info->irq_occurred = 1;
+
+ if (info->params.mode == MGSL_MODE_ASYNC) {
+ if (status & IRQ_TXIDLE) {
+ if (info->tx_count)
+ isr_txeom(info, status);
+ }
+ if ((status & IRQ_RXBREAK) && (status & RXBREAK)) {
+ info->icount.brk++;
+ /* process break detection if tty control allows */
+ if (info->tty) {
+ if (!(status & info->ignore_status_mask)) {
+ if (info->read_status_mask & MASK_BREAK) {
+ tty_insert_flip_char(info->tty, 0, TTY_BREAK);
+ if (info->flags & ASYNC_SAK)
+ do_SAK(info->tty);
+ }
+ }
+ }
+ }
+ } else {
+ if (status & (IRQ_TXIDLE + IRQ_TXUNDER))
+ isr_txeom(info, status);
+
+ if (status & IRQ_RXIDLE) {
+ if (status & RXIDLE)
+ info->icount.rxidle++;
+ else
+ info->icount.exithunt++;
+ wake_up_interruptible(&info->event_wait_q);
+ }
+
+ if (status & IRQ_RXOVER)
+ rx_start(info);
+ }
+
+ if (status & IRQ_DSR)
+ dsr_change(info);
+ if (status & IRQ_CTS)
+ cts_change(info);
+ if (status & IRQ_DCD)
+ dcd_change(info);
+ if (status & IRQ_RI)
+ ri_change(info);
+}
+
+static void isr_rdma(struct slgt_info *info)
+{
+ unsigned int status = rd_reg32(info, RDCSR);
+
+ DBGISR(("%s isr_rdma status=%08x\n", info->device_name, status));
+
+ /* RDCSR (rx DMA control/status)
+ *
+ * 31..07 reserved
+ * 06 save status byte to DMA buffer
+ * 05 error
+ * 04 eol (end of list)
+ * 03 eob (end of buffer)
+ * 02 IRQ enable
+ * 01 reset
+ * 00 enable
+ */
+ wr_reg32(info, RDCSR, status); /* clear pending */
+
+ if (status & (BIT5 + BIT4)) {
+ DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name));
+ info->rx_restart = 1;
+ }
+ info->pending_bh |= BH_RECEIVE;
+}
+
+static void isr_tdma(struct slgt_info *info)
+{
+ unsigned int status = rd_reg32(info, TDCSR);
+
+ DBGISR(("%s isr_tdma status=%08x\n", info->device_name, status));
+
+ /* TDCSR (tx DMA control/status)
+ *
+ * 31..06 reserved
+ * 05 error
+ * 04 eol (end of list)
+ * 03 eob (end of buffer)
+ * 02 IRQ enable
+ * 01 reset
+ * 00 enable
+ */
+ wr_reg32(info, TDCSR, status); /* clear pending */
+
+ if (status & (BIT5 + BIT4 + BIT3)) {
+ // another transmit buffer has completed
+ // run bottom half to get more send data from user
+ info->pending_bh |= BH_TRANSMIT;
+ }
+}
+
+static void isr_txeom(struct slgt_info *info, unsigned short status)
+{
+ DBGISR(("%s txeom status=%04x\n", info->device_name, status));
+
+ slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
+ tdma_reset(info);
+ reset_tbufs(info);
+ if (status & IRQ_TXUNDER) {
+ unsigned short val = rd_reg16(info, TCR);
+ wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
+ wr_reg16(info, TCR, val); /* clear reset bit */
+ }
+
+ if (info->tx_active) {
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ if (status & IRQ_TXUNDER)
+ info->icount.txunder++;
+ else if (status & IRQ_TXIDLE)
+ info->icount.txok++;
+ }
+
+ info->tx_active = 0;
+ info->tx_count = 0;
+
+ del_timer(&info->tx_timer);
+
+ if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
+ info->signals &= ~SerialSignal_RTS;
+ info->drop_rts_on_tx_done = 0;
+ set_signals(info);
+ }
+
+#ifdef CONFIG_HDLC
+ if (info->netcount)
+ hdlcdev_tx_done(info);
+ else
+#endif
+ {
+ if (info->tty && (info->tty->stopped || info->tty->hw_stopped)) {
+ tx_stop(info);
+ return;
+ }
+ info->pending_bh |= BH_TRANSMIT;
+ }
+ }
+}
+
+/* interrupt service routine
+ *
+ * irq interrupt number
+ * dev_id device ID supplied during interrupt registration
+ * regs interrupted processor context
+ */
+static irqreturn_t slgt_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ struct slgt_info *info;
+ unsigned int gsr;
+ unsigned int i;
+
+ DBGISR(("slgt_interrupt irq=%d entry\n", irq));
+
+ info = dev_id;
+ if (!info)
+ return IRQ_NONE;
+
+ spin_lock(&info->lock);
+
+ while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
+ DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
+ info->irq_occurred = 1;
+ for(i=0; i < info->port_count ; i++) {
+ if (info->port_array[i] == NULL)
+ continue;
+ if (gsr & (BIT8 << i))
+ isr_serial(info->port_array[i]);
+ if (gsr & (BIT16 << (i*2)))
+ isr_rdma(info->port_array[i]);
+ if (gsr & (BIT17 << (i*2)))
+ isr_tdma(info->port_array[i]);
+ }
+ }
+
+ for(i=0; i < info->port_count ; i++) {
+ struct slgt_info *port = info->port_array[i];
+
+ if (port && (port->count || port->netcount) &&
+ port->pending_bh && !port->bh_running &&
+ !port->bh_requested) {
+ DBGISR(("%s bh queued\n", port->device_name));
+ schedule_work(&port->task);
+ port->bh_requested = 1;
+ }
+ }
+
+ spin_unlock(&info->lock);
+
+ DBGISR(("slgt_interrupt irq=%d exit\n", irq));
+ return IRQ_HANDLED;
+}
+
+static int startup(struct slgt_info *info)
+{
+ DBGINFO(("%s startup\n", info->device_name));
+
+ if (info->flags & ASYNC_INITIALIZED)
+ return 0;
+
+ if (!info->tx_buf) {
+ info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
+ if (!info->tx_buf) {
+ DBGERR(("%s can't allocate tx buffer\n", info->device_name));
+ return -ENOMEM;
+ }
+ }
+
+ info->pending_bh = 0;
+
+ memset(&info->icount, 0, sizeof(info->icount));
+
+ /* program hardware for current parameters */
+ change_params(info);
+
+ if (info->tty)
+ clear_bit(TTY_IO_ERROR, &info->tty->flags);
+
+ info->flags |= ASYNC_INITIALIZED;
+
+ return 0;
+}
+
+/*
+ * called by close() and hangup() to shutdown hardware
+ */
+static void shutdown(struct slgt_info *info)
+{
+ unsigned long flags;
+
+ if (!(info->flags & ASYNC_INITIALIZED))
+ return;
+
+ DBGINFO(("%s shutdown\n", info->device_name));
+
+ /* clear status wait queue because status changes */
+ /* can't happen after shutting down the hardware */
+ wake_up_interruptible(&info->status_event_wait_q);
+ wake_up_interruptible(&info->event_wait_q);
+
+ del_timer_sync(&info->tx_timer);
+ del_timer_sync(&info->rx_timer);
+
+ kfree(info->tx_buf);
+ info->tx_buf = NULL;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ tx_stop(info);
+ rx_stop(info);
+
+ slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
+
+ if (!info->tty || info->tty->termios->c_cflag & HUPCL) {
+ info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ set_signals(info);
+ }
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
+
+ info->flags &= ~ASYNC_INITIALIZED;
+}
+
+static void program_hw(struct slgt_info *info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ rx_stop(info);
+ tx_stop(info);
+
+ if (info->params.mode == MGSL_MODE_HDLC ||
+ info->params.mode == MGSL_MODE_RAW ||
+ info->netcount)
+ hdlc_mode(info);
+ else
+ async_mode(info);
+
+ set_signals(info);
+
+ info->dcd_chkcount = 0;
+ info->cts_chkcount = 0;
+ info->ri_chkcount = 0;
+ info->dsr_chkcount = 0;
+
+ slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR);
+ get_signals(info);
+
+ if (info->netcount ||
+ (info->tty && info->tty->termios->c_cflag & CREAD))
+ rx_start(info);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+}
+
+/*
+ * reconfigure adapter based on new parameters
+ */
+static void change_params(struct slgt_info *info)
+{
+ unsigned cflag;
+ int bits_per_char;
+
+ if (!info->tty || !info->tty->termios)
+ return;
+ DBGINFO(("%s change_params\n", info->device_name));
+
+ cflag = info->tty->termios->c_cflag;
+
+ /* if B0 rate (hangup) specified then negate DTR and RTS */
+ /* otherwise assert DTR and RTS */
+ if (cflag & CBAUD)
+ info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ else
+ info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
+
+ /* byte size and parity */
+
+ switch (cflag & CSIZE) {
+ case CS5: info->params.data_bits = 5; break;
+ case CS6: info->params.data_bits = 6; break;
+ case CS7: info->params.data_bits = 7; break;
+ case CS8: info->params.data_bits = 8; break;
+ default: info->params.data_bits = 7; break;
+ }
+
+ info->params.stop_bits = (cflag & CSTOPB) ? 2 : 1;
+
+ if (cflag & PARENB)
+ info->params.parity = (cflag & PARODD) ? ASYNC_PARITY_ODD : ASYNC_PARITY_EVEN;
+ else
+ info->params.parity = ASYNC_PARITY_NONE;
+
+ /* calculate number of jiffies to transmit a full
+ * FIFO (32 bytes) at specified data rate
+ */
+ bits_per_char = info->params.data_bits +
+ info->params.stop_bits + 1;
+
+ info->params.data_rate = tty_get_baud_rate(info->tty);
+
+ if (info->params.data_rate) {
+ info->timeout = (32*HZ*bits_per_char) /
+ info->params.data_rate;
+ }
+ info->timeout += HZ/50; /* Add .02 seconds of slop */
+
+ if (cflag & CRTSCTS)
+ info->flags |= ASYNC_CTS_FLOW;
+ else
+ info->flags &= ~ASYNC_CTS_FLOW;
+
+ if (cflag & CLOCAL)
+ info->flags &= ~ASYNC_CHECK_CD;
+ else
+ info->flags |= ASYNC_CHECK_CD;
+
+ /* process tty input control flags */
+
+ info->read_status_mask = IRQ_RXOVER;
+ if (I_INPCK(info->tty))
+ info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
+ if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
+ info->read_status_mask |= MASK_BREAK;
+ if (I_IGNPAR(info->tty))
+ info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
+ if (I_IGNBRK(info->tty)) {
+ info->ignore_status_mask |= MASK_BREAK;
+ /* If ignoring parity and break indicators, ignore
+ * overruns too. (For real raw support).
+ */
+ if (I_IGNPAR(info->tty))
+ info->ignore_status_mask |= MASK_OVERRUN;
+ }
+
+ program_hw(info);
+}
+
+static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount)
+{
+ DBGINFO(("%s get_stats\n", info->device_name));
+ if (!user_icount) {
+ memset(&info->icount, 0, sizeof(info->icount));
+ } else {
+ if (copy_to_user(user_icount, &info->icount, sizeof(struct mgsl_icount)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int get_params(struct slgt_info *info, MGSL_PARAMS __user *user_params)
+{
+ DBGINFO(("%s get_params\n", info->device_name));
+ if (copy_to_user(user_params, &info->params, sizeof(MGSL_PARAMS)))
+ return -EFAULT;
+ return 0;
+}
+
+static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params)
+{
+ unsigned long flags;
+ MGSL_PARAMS tmp_params;
+
+ DBGINFO(("%s set_params\n", info->device_name));
+ if (copy_from_user(&tmp_params, new_params, sizeof(MGSL_PARAMS)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&info->lock, flags);
+ memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS));
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ change_params(info);
+
+ return 0;
+}
+
+static int get_txidle(struct slgt_info *info, int __user *idle_mode)
+{
+ DBGINFO(("%s get_txidle=%d\n", info->device_name, info->idle_mode));
+ if (put_user(info->idle_mode, idle_mode))
+ return -EFAULT;
+ return 0;
+}
+
+static int set_txidle(struct slgt_info *info, int idle_mode)
+{
+ unsigned long flags;
+ DBGINFO(("%s set_txidle(%d)\n", info->device_name, idle_mode));
+ spin_lock_irqsave(&info->lock,flags);
+ info->idle_mode = idle_mode;
+ tx_set_idle(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int tx_enable(struct slgt_info *info, int enable)
+{
+ unsigned long flags;
+ DBGINFO(("%s tx_enable(%d)\n", info->device_name, enable));
+ spin_lock_irqsave(&info->lock,flags);
+ if (enable) {
+ if (!info->tx_enabled)
+ tx_start(info);
+ } else {
+ if (info->tx_enabled)
+ tx_stop(info);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+/*
+ * abort transmit HDLC frame
+ */
+static int tx_abort(struct slgt_info *info)
+{
+ unsigned long flags;
+ DBGINFO(("%s tx_abort\n", info->device_name));
+ spin_lock_irqsave(&info->lock,flags);
+ tdma_reset(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int rx_enable(struct slgt_info *info, int enable)
+{
+ unsigned long flags;
+ DBGINFO(("%s rx_enable(%d)\n", info->device_name, enable));
+ spin_lock_irqsave(&info->lock,flags);
+ if (enable) {
+ if (!info->rx_enabled)
+ rx_start(info);
+ } else {
+ if (info->rx_enabled)
+ rx_stop(info);
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+/*
+ * wait for specified event to occur
+ */
+static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
+{
+ unsigned long flags;
+ int s;
+ int rc=0;
+ struct mgsl_icount cprev, cnow;
+ int events;
+ int mask;
+ struct _input_signal_events oldsigs, newsigs;
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (get_user(mask, mask_ptr))
+ return -EFAULT;
+
+ DBGINFO(("%s wait_mgsl_event(%d)\n", info->device_name, mask));
+
+ spin_lock_irqsave(&info->lock,flags);
+
+ /* return immediately if state matches requested events */
+ get_signals(info);
+ s = info->signals;
+
+ events = mask &
+ ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
+ ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
+ ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
+ ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
+ if (events) {
+ spin_unlock_irqrestore(&info->lock,flags);
+ goto exit;
+ }
+
+ /* save current irq counts */
+ cprev = info->icount;
+ oldsigs = info->input_signal_events;
+
+ /* enable hunt and idle irqs if needed */
+ if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) {
+ unsigned short val = rd_reg16(info, SCR);
+ if (!(val & IRQ_RXIDLE))
+ wr_reg16(info, SCR, (unsigned short)(val | IRQ_RXIDLE));
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&info->event_wait_q, &wait);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ for(;;) {
+ schedule();
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ /* get current irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ newsigs = info->input_signal_events;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ /* if no change, wait aborted for some reason */
+ if (newsigs.dsr_up == oldsigs.dsr_up &&
+ newsigs.dsr_down == oldsigs.dsr_down &&
+ newsigs.dcd_up == oldsigs.dcd_up &&
+ newsigs.dcd_down == oldsigs.dcd_down &&
+ newsigs.cts_up == oldsigs.cts_up &&
+ newsigs.cts_down == oldsigs.cts_down &&
+ newsigs.ri_up == oldsigs.ri_up &&
+ newsigs.ri_down == oldsigs.ri_down &&
+ cnow.exithunt == cprev.exithunt &&
+ cnow.rxidle == cprev.rxidle) {
+ rc = -EIO;
+ break;
+ }
+
+ events = mask &
+ ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
+ (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
+ (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
+ (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
+ (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
+ (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
+ (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
+ (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
+ (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
+ (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
+ if (events)
+ break;
+
+ cprev = cnow;
+ oldsigs = newsigs;
+ }
+
+ remove_wait_queue(&info->event_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+
+
+ if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
+ spin_lock_irqsave(&info->lock,flags);
+ if (!waitqueue_active(&info->event_wait_q)) {
+ /* disable enable exit hunt mode/idle rcvd IRQs */
+ wr_reg16(info, SCR,
+ (unsigned short)(rd_reg16(info, SCR) & ~IRQ_RXIDLE));
+ }
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+exit:
+ if (rc == 0)
+ rc = put_user(events, mask_ptr);
+ return rc;
+}
+
+static int get_interface(struct slgt_info *info, int __user *if_mode)
+{
+ DBGINFO(("%s get_interface=%x\n", info->device_name, info->if_mode));
+ if (put_user(info->if_mode, if_mode))
+ return -EFAULT;
+ return 0;
+}
+
+static int set_interface(struct slgt_info *info, int if_mode)
+{
+ unsigned long flags;
+ unsigned char val;
+
+ DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
+ spin_lock_irqsave(&info->lock,flags);
+ info->if_mode = if_mode;
+
+ msc_set_vcr(info);
+
+ /* TCR (tx control) 07 1=RTS driver control */
+ val = rd_reg16(info, TCR);
+ if (info->if_mode & MGSL_INTERFACE_RTS_EN)
+ val |= BIT7;
+ else
+ val &= ~BIT7;
+ wr_reg16(info, TCR, val);
+
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+static int modem_input_wait(struct slgt_info *info,int arg)
+{
+ unsigned long flags;
+ int rc;
+ struct mgsl_icount cprev, cnow;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /* save current irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cprev = info->icount;
+ add_wait_queue(&info->status_event_wait_q, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ for(;;) {
+ schedule();
+ if (signal_pending(current)) {
+ rc = -ERESTARTSYS;
+ break;
+ }
+
+ /* get new irq counts */
+ spin_lock_irqsave(&info->lock,flags);
+ cnow = info->icount;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ /* if no change, wait aborted for some reason */
+ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
+ cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
+ rc = -EIO;
+ break;
+ }
+
+ /* check for change in caller specified modem input */
+ if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
+ (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
+ (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
+ (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
+ rc = 0;
+ break;
+ }
+
+ cprev = cnow;
+ }
+ remove_wait_queue(&info->status_event_wait_q, &wait);
+ set_current_state(TASK_RUNNING);
+ return rc;
+}
+
+/*
+ * return state of serial control and status signals
+ */
+static int tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
+ ((info->signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
+ ((info->signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
+ ((info->signals & SerialSignal_RI) ? TIOCM_RNG:0) +
+ ((info->signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
+ ((info->signals & SerialSignal_CTS) ? TIOCM_CTS:0);
+
+ DBGINFO(("%s tiocmget value=%08X\n", info->device_name, result));
+ return result;
+}
+
+/*
+ * set modem control signals (DTR/RTS)
+ *
+ * cmd signal command: TIOCMBIS = set bit TIOCMBIC = clear bit
+ * TIOCMSET = set/clear signal values
+ * value bit mask for command
+ */
+static int tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct slgt_info *info = tty->driver_data;
+ unsigned long flags;
+
+ DBGINFO(("%s tiocmset(%x,%x)\n", info->device_name, set, clear));
+
+ if (set & TIOCM_RTS)
+ info->signals |= SerialSignal_RTS;
+ if (set & TIOCM_DTR)
+ info->signals |= SerialSignal_DTR;
+ if (clear & TIOCM_RTS)
+ info->signals &= ~SerialSignal_RTS;
+ if (clear & TIOCM_DTR)
+ info->signals &= ~SerialSignal_DTR;
+
+ spin_lock_irqsave(&info->lock,flags);
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ return 0;
+}
+
+/*
+ * block current process until the device is ready to open
+ */
+static int block_til_ready(struct tty_struct *tty, struct file *filp,
+ struct slgt_info *info)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int retval;
+ int do_clocal = 0, extra_count = 0;
+ unsigned long flags;
+
+ DBGINFO(("%s block_til_ready\n", tty->driver->name));
+
+ if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
+ /* nonblock mode is set or port is not enabled */
+ info->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = 1;
+
+ /* Wait for carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, info->count is dropped by one, so that
+ * close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
+ */
+
+ retval = 0;
+ add_wait_queue(&info->open_wait, &wait);
+
+ spin_lock_irqsave(&info->lock, flags);
+ if (!tty_hung_up_p(filp)) {
+ extra_count = 1;
+ info->count--;
+ }
+ spin_unlock_irqrestore(&info->lock, flags);
+ info->blocked_open++;
+
+ while (1) {
+ if ((tty->termios->c_cflag & CBAUD)) {
+ spin_lock_irqsave(&info->lock,flags);
+ info->signals |= SerialSignal_RTS + SerialSignal_DTR;
+ set_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){
+ retval = (info->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS;
+ break;
+ }
+
+ spin_lock_irqsave(&info->lock,flags);
+ get_signals(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ if (!(info->flags & ASYNC_CLOSING) &&
+ (do_clocal || (info->signals & SerialSignal_DCD)) ) {
+ break;
+ }
+
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
+ schedule();
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&info->open_wait, &wait);
+
+ if (extra_count)
+ info->count++;
+ info->blocked_open--;
+
+ if (!retval)
+ info->flags |= ASYNC_NORMAL_ACTIVE;
+
+ DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval));
+ return retval;
+}
+
+static int alloc_tmp_rbuf(struct slgt_info *info)
+{
+ info->tmp_rbuf = kmalloc(info->max_frame_size, GFP_KERNEL);
+ if (info->tmp_rbuf == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static void free_tmp_rbuf(struct slgt_info *info)
+{
+ kfree(info->tmp_rbuf);
+ info->tmp_rbuf = NULL;
+}
+
+/*
+ * allocate DMA descriptor lists.
+ */
+static int alloc_desc(struct slgt_info *info)
+{
+ unsigned int i;
+ unsigned int pbufs;
+
+ /* allocate memory to hold descriptor lists */
+ info->bufs = pci_alloc_consistent(info->pdev, DESC_LIST_SIZE, &info->bufs_dma_addr);
+ if (info->bufs == NULL)
+ return -ENOMEM;
+
+ memset(info->bufs, 0, DESC_LIST_SIZE);
+
+ info->rbufs = (struct slgt_desc*)info->bufs;
+ info->tbufs = ((struct slgt_desc*)info->bufs) + info->rbuf_count;
+
+ pbufs = (unsigned int)info->bufs_dma_addr;
+
+ /*
+ * Build circular lists of descriptors
+ */
+
+ for (i=0; i < info->rbuf_count; i++) {
+ /* physical address of this descriptor */
+ info->rbufs[i].pdesc = pbufs + (i * sizeof(struct slgt_desc));
+
+ /* physical address of next descriptor */
+ if (i == info->rbuf_count - 1)
+ info->rbufs[i].next = cpu_to_le32(pbufs);
+ else
+ info->rbufs[i].next = cpu_to_le32(pbufs + ((i+1) * sizeof(struct slgt_desc)));
+ set_desc_count(info->rbufs[i], DMABUFSIZE);
+ }
+
+ for (i=0; i < info->tbuf_count; i++) {
+ /* physical address of this descriptor */
+ info->tbufs[i].pdesc = pbufs + ((info->rbuf_count + i) * sizeof(struct slgt_desc));
+
+ /* physical address of next descriptor */
+ if (i == info->tbuf_count - 1)
+ info->tbufs[i].next = cpu_to_le32(pbufs + info->rbuf_count * sizeof(struct slgt_desc));
+ else
+ info->tbufs[i].next = cpu_to_le32(pbufs + ((info->rbuf_count + i + 1) * sizeof(struct slgt_desc)));
+ }
+
+ return 0;
+}
+
+static void free_desc(struct slgt_info *info)
+{
+ if (info->bufs != NULL) {
+ pci_free_consistent(info->pdev, DESC_LIST_SIZE, info->bufs, info->bufs_dma_addr);
+ info->bufs = NULL;
+ info->rbufs = NULL;
+ info->tbufs = NULL;
+ }
+}
+
+static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
+{
+ int i;
+ for (i=0; i < count; i++) {
+ if ((bufs[i].buf = pci_alloc_consistent(info->pdev, DMABUFSIZE, &bufs[i].buf_dma_addr)) == NULL)
+ return -ENOMEM;
+ bufs[i].pbuf = cpu_to_le32((unsigned int)bufs[i].buf_dma_addr);
+ }
+ return 0;
+}
+
+static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
+{
+ int i;
+ for (i=0; i < count; i++) {
+ if (bufs[i].buf == NULL)
+ continue;
+ pci_free_consistent(info->pdev, DMABUFSIZE, bufs[i].buf, bufs[i].buf_dma_addr);
+ bufs[i].buf = NULL;
+ }
+}
+
+static int alloc_dma_bufs(struct slgt_info *info)
+{
+ info->rbuf_count = 32;
+ info->tbuf_count = 32;
+
+ if (alloc_desc(info) < 0 ||
+ alloc_bufs(info, info->rbufs, info->rbuf_count) < 0 ||
+ alloc_bufs(info, info->tbufs, info->tbuf_count) < 0 ||
+ alloc_tmp_rbuf(info) < 0) {
+ DBGERR(("%s DMA buffer alloc fail\n", info->device_name));
+ return -ENOMEM;
+ }
+ reset_rbufs(info);
+ return 0;
+}
+
+static void free_dma_bufs(struct slgt_info *info)
+{
+ if (info->bufs) {
+ free_bufs(info, info->rbufs, info->rbuf_count);
+ free_bufs(info, info->tbufs, info->tbuf_count);
+ free_desc(info);
+ }
+ free_tmp_rbuf(info);
+}
+
+static int claim_resources(struct slgt_info *info)
+{
+ if (request_mem_region(info->phys_reg_addr, SLGT_REG_SIZE, "synclink_gt") == NULL) {
+ DBGERR(("%s reg addr conflict, addr=%08X\n",
+ info->device_name, info->phys_reg_addr));
+ info->init_error = DiagStatus_AddressConflict;
+ goto errout;
+ }
+ else
+ info->reg_addr_requested = 1;
+
+ info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE);
+ if (!info->reg_addr) {
+ DBGERR(("%s cant map device registers, addr=%08X\n",
+ info->device_name, info->phys_reg_addr));
+ info->init_error = DiagStatus_CantAssignPciResources;
+ goto errout;
+ }
+ return 0;
+
+errout:
+ release_resources(info);
+ return -ENODEV;
+}
+
+static void release_resources(struct slgt_info *info)
+{
+ if (info->irq_requested) {
+ free_irq(info->irq_level, info);
+ info->irq_requested = 0;
+ }
+
+ if (info->reg_addr_requested) {
+ release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE);
+ info->reg_addr_requested = 0;
+ }
+
+ if (info->reg_addr) {
+ iounmap(info->reg_addr);
+ info->reg_addr = NULL;
+ }
+}
+
+/* Add the specified device instance data structure to the
+ * global linked list of devices and increment the device count.
+ */
+static void add_device(struct slgt_info *info)
+{
+ char *devstr;
+
+ info->next_device = NULL;
+ info->line = slgt_device_count;
+ sprintf(info->device_name, "%s%d", tty_dev_prefix, info->line);
+
+ if (info->line < MAX_DEVICES) {
+ if (maxframe[info->line])
+ info->max_frame_size = maxframe[info->line];
+ info->dosyncppp = dosyncppp[info->line];
+ }
+
+ slgt_device_count++;
+
+ if (!slgt_device_list)
+ slgt_device_list = info;
+ else {
+ struct slgt_info *current_dev = slgt_device_list;
+ while(current_dev->next_device)
+ current_dev = current_dev->next_device;
+ current_dev->next_device = info;
+ }
+
+ if (info->max_frame_size < 4096)
+ info->max_frame_size = 4096;
+ else if (info->max_frame_size > 65535)
+ info->max_frame_size = 65535;
+
+ switch(info->pdev->device) {
+ case SYNCLINK_GT_DEVICE_ID:
+ devstr = "GT";
+ break;
+ case SYNCLINK_GT4_DEVICE_ID:
+ devstr = "GT4";
+ break;
+ case SYNCLINK_AC_DEVICE_ID:
+ devstr = "AC";
+ info->params.mode = MGSL_MODE_ASYNC;
+ break;
+ default:
+ devstr = "(unknown model)";
+ }
+ printk("SyncLink %s %s IO=%08x IRQ=%d MaxFrameSize=%u\n",
+ devstr, info->device_name, info->phys_reg_addr,
+ info->irq_level, info->max_frame_size);
+
+#ifdef CONFIG_HDLC
+ hdlcdev_init(info);
+#endif
+}
+
+/*
+ * allocate device instance structure, return NULL on failure
+ */
+static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
+{
+ struct slgt_info *info;
+
+ info = kmalloc(sizeof(struct slgt_info), GFP_KERNEL);
+
+ if (!info) {
+ DBGERR(("%s device alloc failed adapter=%d port=%d\n",
+ driver_name, adapter_num, port_num));
+ } else {
+ memset(info, 0, sizeof(struct slgt_info));
+ info->magic = MGSL_MAGIC;
+ INIT_WORK(&info->task, bh_handler, info);
+ info->max_frame_size = 4096;
+ info->raw_rx_size = DMABUFSIZE;
+ info->close_delay = 5*HZ/10;
+ info->closing_wait = 30*HZ;
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->close_wait);
+ init_waitqueue_head(&info->status_event_wait_q);
+ init_waitqueue_head(&info->event_wait_q);
+ spin_lock_init(&info->netlock);
+ memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+ info->idle_mode = HDLC_TXIDLE_FLAGS;
+ info->adapter_num = adapter_num;
+ info->port_num = port_num;
+
+ init_timer(&info->tx_timer);
+ info->tx_timer.data = (unsigned long)info;
+ info->tx_timer.function = tx_timeout;
+
+ init_timer(&info->rx_timer);
+ info->rx_timer.data = (unsigned long)info;
+ info->rx_timer.function = rx_timeout;
+
+ /* Copy configuration info to device instance data */
+ info->pdev = pdev;
+ info->irq_level = pdev->irq;
+ info->phys_reg_addr = pci_resource_start(pdev,0);
+
+ info->bus_type = MGSL_BUS_TYPE_PCI;
+ info->irq_flags = SA_SHIRQ;
+
+ info->init_error = -1; /* assume error, set to 0 on successful init */
+ }
+
+ return info;
+}
+
+static void device_init(int adapter_num, struct pci_dev *pdev)
+{
+ struct slgt_info *port_array[SLGT_MAX_PORTS];
+ int i;
+ int port_count = 1;
+
+ if (pdev->device == SYNCLINK_GT4_DEVICE_ID)
+ port_count = 4;
+
+ /* allocate device instances for all ports */
+ for (i=0; i < port_count; ++i) {
+ port_array[i] = alloc_dev(adapter_num, i, pdev);
+ if (port_array[i] == NULL) {
+ for (--i; i >= 0; --i)
+ kfree(port_array[i]);
+ return;
+ }
+ }
+
+ /* give copy of port_array to all ports and add to device list */
+ for (i=0; i < port_count; ++i) {
+ memcpy(port_array[i]->port_array, port_array, sizeof(port_array));
+ add_device(port_array[i]);
+ port_array[i]->port_count = port_count;
+ spin_lock_init(&port_array[i]->lock);
+ }
+
+ /* Allocate and claim adapter resources */
+ if (!claim_resources(port_array[0])) {
+
+ alloc_dma_bufs(port_array[0]);
+
+ /* copy resource information from first port to others */
+ for (i = 1; i < port_count; ++i) {
+ port_array[i]->lock = port_array[0]->lock;
+ port_array[i]->irq_level = port_array[0]->irq_level;
+ port_array[i]->reg_addr = port_array[0]->reg_addr;
+ alloc_dma_bufs(port_array[i]);
+ }
+
+ if (request_irq(port_array[0]->irq_level,
+ slgt_interrupt,
+ port_array[0]->irq_flags,
+ port_array[0]->device_name,
+ port_array[0]) < 0) {
+ DBGERR(("%s request_irq failed IRQ=%d\n",
+ port_array[0]->device_name,
+ port_array[0]->irq_level));
+ } else {
+ port_array[0]->irq_requested = 1;
+ adapter_test(port_array[0]);
+ for (i=1 ; i < port_count ; i++)
+ port_array[i]->init_error = port_array[0]->init_error;
+ }
+ }
+}
+
+static int __devinit init_one(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ if (pci_enable_device(dev)) {
+ printk("error enabling pci device %p\n", dev);
+ return -EIO;
+ }
+ pci_set_master(dev);
+ device_init(slgt_device_count, dev);
+ return 0;
+}
+
+static void __devexit remove_one(struct pci_dev *dev)
+{
+}
+
+static struct tty_operations ops = {
+ .open = open,
+ .close = close,
+ .write = write,
+ .put_char = put_char,
+ .flush_chars = flush_chars,
+ .write_room = write_room,
+ .chars_in_buffer = chars_in_buffer,
+ .flush_buffer = flush_buffer,
+ .ioctl = ioctl,
+ .throttle = throttle,
+ .unthrottle = unthrottle,
+ .send_xchar = send_xchar,
+ .break_ctl = set_break,
+ .wait_until_sent = wait_until_sent,
+ .read_proc = read_proc,
+ .set_termios = set_termios,
+ .stop = tx_hold,
+ .start = tx_release,
+ .hangup = hangup,
+ .tiocmget = tiocmget,
+ .tiocmset = tiocmset,
+};
+
+static void slgt_cleanup(void)
+{
+ int rc;
+ struct slgt_info *info;
+ struct slgt_info *tmp;
+
+ printk("unload %s %s\n", driver_name, driver_version);
+
+ if (serial_driver) {
+ if ((rc = tty_unregister_driver(serial_driver)))
+ DBGERR(("tty_unregister_driver error=%d\n", rc));
+ put_tty_driver(serial_driver);
+ }
+
+ /* reset devices */
+ info = slgt_device_list;
+ while(info) {
+ reset_port(info);
+ info = info->next_device;
+ }
+
+ /* release devices */
+ info = slgt_device_list;
+ while(info) {
+#ifdef CONFIG_HDLC
+ hdlcdev_exit(info);
+#endif
+ free_dma_bufs(info);
+ free_tmp_rbuf(info);
+ if (info->port_num == 0)
+ release_resources(info);
+ tmp = info;
+ info = info->next_device;
+ kfree(tmp);
+ }
+
+ if (pci_registered)
+ pci_unregister_driver(&pci_driver);
+}
+
+/*
+ * Driver initialization entry point.
+ */
+static int __init slgt_init(void)
+{
+ int rc;
+
+ printk("%s %s\n", driver_name, driver_version);
+
+ slgt_device_count = 0;
+ if ((rc = pci_register_driver(&pci_driver)) < 0) {
+ printk("%s pci_register_driver error=%d\n", driver_name, rc);
+ return rc;
+ }
+ pci_registered = 1;
+
+ if (!slgt_device_list) {
+ printk("%s no devices found\n",driver_name);
+ return -ENODEV;
+ }
+
+ serial_driver = alloc_tty_driver(MAX_DEVICES);
+ if (!serial_driver) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize the tty_driver structure */
+
+ serial_driver->owner = THIS_MODULE;
+ serial_driver->driver_name = tty_driver_name;
+ serial_driver->name = tty_dev_prefix;
+ serial_driver->major = ttymajor;
+ serial_driver->minor_start = 64;
+ serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ serial_driver->init_termios = tty_std_termios;
+ serial_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ serial_driver->flags = TTY_DRIVER_REAL_RAW;
+ tty_set_operations(serial_driver, &ops);
+ if ((rc = tty_register_driver(serial_driver)) < 0) {
+ DBGERR(("%s can't register serial driver\n", driver_name));
+ put_tty_driver(serial_driver);
+ serial_driver = NULL;
+ goto error;
+ }
+
+ printk("%s %s, tty major#%d\n",
+ driver_name, driver_version,
+ serial_driver->major);
+
+ return 0;
+
+error:
+ slgt_cleanup();
+ return rc;
+}
+
+static void __exit slgt_exit(void)
+{
+ slgt_cleanup();
+}
+
+module_init(slgt_init);
+module_exit(slgt_exit);
+
+/*
+ * register access routines
+ */
+
+#define CALC_REGADDR() \
+ unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
+ if (addr >= 0x80) \
+ reg_addr += (info->port_num) * 32;
+
+static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
+{
+ CALC_REGADDR();
+ return readb((void __iomem *)reg_addr);
+}
+
+static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value)
+{
+ CALC_REGADDR();
+ writeb(value, (void __iomem *)reg_addr);
+}
+
+static __u16 rd_reg16(struct slgt_info *info, unsigned int addr)
+{
+ CALC_REGADDR();
+ return readw((void __iomem *)reg_addr);
+}
+
+static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value)
+{
+ CALC_REGADDR();
+ writew(value, (void __iomem *)reg_addr);
+}
+
+static __u32 rd_reg32(struct slgt_info *info, unsigned int addr)
+{
+ CALC_REGADDR();
+ return readl((void __iomem *)reg_addr);
+}
+
+static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value)
+{
+ CALC_REGADDR();
+ writel(value, (void __iomem *)reg_addr);
+}
+
+static void rdma_reset(struct slgt_info *info)
+{
+ unsigned int i;
+
+ /* set reset bit */
+ wr_reg32(info, RDCSR, BIT1);
+
+ /* wait for enable bit cleared */
+ for(i=0 ; i < 1000 ; i++)
+ if (!(rd_reg32(info, RDCSR) & BIT0))
+ break;
+}
+
+static void tdma_reset(struct slgt_info *info)
+{
+ unsigned int i;
+
+ /* set reset bit */
+ wr_reg32(info, TDCSR, BIT1);
+
+ /* wait for enable bit cleared */
+ for(i=0 ; i < 1000 ; i++)
+ if (!(rd_reg32(info, TDCSR) & BIT0))
+ break;
+}
+
+/*
+ * enable internal loopback
+ * TxCLK and RxCLK are generated from BRG
+ * and TxD is looped back to RxD internally.
+ */
+static void enable_loopback(struct slgt_info *info)
+{
+ /* SCR (serial control) BIT2=looopback enable */
+ wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT2));
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ /* CCR (clock control)
+ * 07..05 tx clock source (010 = BRG)
+ * 04..02 rx clock source (010 = BRG)
+ * 01 auxclk enable (0 = disable)
+ * 00 BRG enable (1 = enable)
+ *
+ * 0100 1001
+ */
+ wr_reg8(info, CCR, 0x49);
+
+ /* set speed if available, otherwise use default */
+ if (info->params.clock_speed)
+ set_rate(info, info->params.clock_speed);
+ else
+ set_rate(info, 3686400);
+ }
+}
+
+/*
+ * set baud rate generator to specified rate
+ */
+static void set_rate(struct slgt_info *info, u32 rate)
+{
+ unsigned int div;
+ static unsigned int osc = 14745600;
+
+ /* div = osc/rate - 1
+ *
+ * Round div up if osc/rate is not integer to
+ * force to next slowest rate.
+ */
+
+ if (rate) {
+ div = osc/rate;
+ if (!(osc % rate) && div)
+ div--;
+ wr_reg16(info, BDR, (unsigned short)div);
+ }
+}
+
+static void rx_stop(struct slgt_info *info)
+{
+ unsigned short val;
+
+ /* disable and reset receiver */
+ val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
+ wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
+ wr_reg16(info, RCR, val); /* clear reset bit */
+
+ slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA + IRQ_RXIDLE);
+
+ /* clear pending rx interrupts */
+ wr_reg16(info, SSR, IRQ_RXIDLE + IRQ_RXOVER);
+
+ rdma_reset(info);
+
+ info->rx_enabled = 0;
+ info->rx_restart = 0;
+}
+
+static void rx_start(struct slgt_info *info)
+{
+ unsigned short val;
+
+ slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA);
+
+ /* clear pending rx overrun IRQ */
+ wr_reg16(info, SSR, IRQ_RXOVER);
+
+ /* reset and disable receiver */
+ val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
+ wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
+ wr_reg16(info, RCR, val); /* clear reset bit */
+
+ rdma_reset(info);
+ reset_rbufs(info);
+
+ /* set 1st descriptor address */
+ wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ /* enable rx DMA and DMA interrupt */
+ wr_reg32(info, RDCSR, (BIT2 + BIT0));
+ } else {
+ /* enable saving of rx status, rx DMA and DMA interrupt */
+ wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
+ }
+
+ slgt_irq_on(info, IRQ_RXOVER);
+
+ /* enable receiver */
+ wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1));
+
+ info->rx_restart = 0;
+ info->rx_enabled = 1;
+}
+
+static void tx_start(struct slgt_info *info)
+{
+ if (!info->tx_enabled) {
+ wr_reg16(info, TCR,
+ (unsigned short)(rd_reg16(info, TCR) | BIT1));
+ info->tx_enabled = TRUE;
+ }
+
+ if (info->tx_count) {
+ info->drop_rts_on_tx_done = 0;
+
+ if (info->params.mode != MGSL_MODE_ASYNC) {
+ if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
+ get_signals(info);
+ if (!(info->signals & SerialSignal_RTS)) {
+ info->signals |= SerialSignal_RTS;
+ set_signals(info);
+ info->drop_rts_on_tx_done = 1;
+ }
+ }
+
+ slgt_irq_off(info, IRQ_TXDATA);
+ slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
+ /* clear tx idle and underrun status bits */
+ wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
+
+ if (!(rd_reg32(info, TDCSR) & BIT0)) {
+ /* tx DMA stopped, restart tx DMA */
+ tdma_reset(info);
+ /* set 1st descriptor address */
+ wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
+ if (info->params.mode == MGSL_MODE_RAW)
+ wr_reg32(info, TDCSR, BIT2 + BIT0); /* IRQ + DMA enable */
+ else
+ wr_reg32(info, TDCSR, BIT0); /* DMA enable */
+ }
+
+ if (info->params.mode != MGSL_MODE_RAW) {
+ info->tx_timer.expires = jiffies + msecs_to_jiffies(5000);
+ add_timer(&info->tx_timer);
+ }
+ } else {
+ tdma_reset(info);
+ /* set 1st descriptor address */
+ wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
+
+ slgt_irq_off(info, IRQ_TXDATA);
+ slgt_irq_on(info, IRQ_TXIDLE);
+ /* clear tx idle status bit */
+ wr_reg16(info, SSR, IRQ_TXIDLE);
+
+ /* enable tx DMA */
+ wr_reg32(info, TDCSR, BIT0);
+ }
+
+ info->tx_active = 1;
+ }
+}
+
+static void tx_stop(struct slgt_info *info)
+{
+ unsigned short val;
+
+ del_timer(&info->tx_timer);
+
+ tdma_reset(info);
+
+ /* reset and disable transmitter */
+ val = rd_reg16(info, TCR) & ~BIT1; /* clear enable bit */
+ wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
+ wr_reg16(info, TCR, val); /* clear reset */
+
+ slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
+
+ /* clear tx idle and underrun status bit */
+ wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
+
+ reset_tbufs(info);
+
+ info->tx_enabled = 0;
+ info->tx_active = 0;
+}
+
+static void reset_port(struct slgt_info *info)
+{
+ if (!info->reg_addr)
+ return;
+
+ tx_stop(info);
+ rx_stop(info);
+
+ info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
+ set_signals(info);
+
+ slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
+}
+
+static void reset_adapter(struct slgt_info *info)
+{
+ int i;
+ for (i=0; i < info->port_count; ++i) {
+ if (info->port_array[i])
+ reset_port(info->port_array[i]);
+ }
+}
+
+static void async_mode(struct slgt_info *info)
+{
+ unsigned short val;
+
+ slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
+ tx_stop(info);
+ rx_stop(info);
+
+ /* TCR (tx control)
+ *
+ * 15..13 mode, 010=async
+ * 12..10 encoding, 000=NRZ
+ * 09 parity enable
+ * 08 1=odd parity, 0=even parity
+ * 07 1=RTS driver control
+ * 06 1=break enable
+ * 05..04 character length
+ * 00=5 bits
+ * 01=6 bits
+ * 10=7 bits
+ * 11=8 bits
+ * 03 0=1 stop bit, 1=2 stop bits
+ * 02 reset
+ * 01 enable
+ * 00 auto-CTS enable
+ */
+ val = 0x4000;
+
+ if (info->if_mode & MGSL_INTERFACE_RTS_EN)
+ val |= BIT7;
+
+ if (info->params.parity != ASYNC_PARITY_NONE) {
+ val |= BIT9;
+ if (info->params.parity == ASYNC_PARITY_ODD)
+ val |= BIT8;
+ }
+
+ switch (info->params.data_bits)
+ {
+ case 6: val |= BIT4; break;
+ case 7: val |= BIT5; break;
+ case 8: val |= BIT5 + BIT4; break;
+ }
+
+ if (info->params.stop_bits != 1)
+ val |= BIT3;
+
+ if (info->params.flags & HDLC_FLAG_AUTO_CTS)
+ val |= BIT0;
+
+ wr_reg16(info, TCR, val);
+
+ /* RCR (rx control)
+ *
+ * 15..13 mode, 010=async
+ * 12..10 encoding, 000=NRZ
+ * 09 parity enable
+ * 08 1=odd parity, 0=even parity
+ * 07..06 reserved, must be 0
+ * 05..04 character length
+ * 00=5 bits
+ * 01=6 bits
+ * 10=7 bits
+ * 11=8 bits
+ * 03 reserved, must be zero
+ * 02 reset
+ * 01 enable
+ * 00 auto-DCD enable
+ */
+ val = 0x4000;
+
+ if (info->params.parity != ASYNC_PARITY_NONE) {
+ val |= BIT9;
+ if (info->params.parity == ASYNC_PARITY_ODD)
+ val |= BIT8;
+ }
+
+ switch (info->params.data_bits)
+ {
+ case 6: val |= BIT4; break;
+ case 7: val |= BIT5; break;
+ case 8: val |= BIT5 + BIT4; break;
+ }
+
+ if (info->params.flags & HDLC_FLAG_AUTO_DCD)
+ val |= BIT0;
+
+ wr_reg16(info, RCR, val);
+
+ /* CCR (clock control)
+ *
+ * 07..05 011 = tx clock source is BRG/16
+ * 04..02 010 = rx clock source is BRG
+ * 01 0 = auxclk disabled
+ * 00 1 = BRG enabled
+ *
+ * 0110 1001
+ */
+ wr_reg8(info, CCR, 0x69);
+
+ msc_set_vcr(info);
+
+ tx_set_idle(info);
+
+ /* SCR (serial control)
+ *
+ * 15 1=tx req on FIFO half empty
+ * 14 1=rx req on FIFO half full
+ * 13 tx data IRQ enable
+ * 12 tx idle IRQ enable
+ * 11 rx break on IRQ enable
+ * 10 rx data IRQ enable
+ * 09 rx break off IRQ enable
+ * 08 overrun IRQ enable
+ * 07 DSR IRQ enable
+ * 06 CTS IRQ enable
+ * 05 DCD IRQ enable
+ * 04 RI IRQ enable
+ * 03 reserved, must be zero
+ * 02 1=txd->rxd internal loopback enable
+ * 01 reserved, must be zero
+ * 00 1=master IRQ enable
+ */
+ val = BIT15 + BIT14 + BIT0;
+ wr_reg16(info, SCR, val);
+
+ slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER);
+
+ set_rate(info, info->params.data_rate * 16);
+
+ if (info->params.loopback)
+ enable_loopback(info);
+}
+
+static void hdlc_mode(struct slgt_info *info)
+{
+ unsigned short val;
+
+ slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
+ tx_stop(info);
+ rx_stop(info);
+
+ /* TCR (tx control)
+ *
+ * 15..13 mode, 000=HDLC 001=raw sync
+ * 12..10 encoding
+ * 09 CRC enable
+ * 08 CRC32
+ * 07 1=RTS driver control
+ * 06 preamble enable
+ * 05..04 preamble length
+ * 03 share open/close flag
+ * 02 reset
+ * 01 enable
+ * 00 auto-CTS enable
+ */
+ val = 0;
+
+ if (info->params.mode == MGSL_MODE_RAW)
+ val |= BIT13;
+ if (info->if_mode & MGSL_INTERFACE_RTS_EN)
+ val |= BIT7;
+
+ switch(info->params.encoding)
+ {
+ case HDLC_ENCODING_NRZB: val |= BIT10; break;
+ case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break;
+ case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break;
+ case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break;
+ case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
+ case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
+ }
+
+ switch (info->params.crc_type)
+ {
+ case HDLC_CRC_16_CCITT: val |= BIT9; break;
+ case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
+ }
+
+ if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
+ val |= BIT6;
+
+ switch (info->params.preamble_length)
+ {
+ case HDLC_PREAMBLE_LENGTH_16BITS: val |= BIT5; break;
+ case HDLC_PREAMBLE_LENGTH_32BITS: val |= BIT4; break;
+ case HDLC_PREAMBLE_LENGTH_64BITS: val |= BIT5 + BIT4; break;
+ }
+
+ if (info->params.flags & HDLC_FLAG_AUTO_CTS)
+ val |= BIT0;
+
+ wr_reg16(info, TCR, val);
+
+ /* TPR (transmit preamble) */
+
+ switch (info->params.preamble)
+ {
+ case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break;
+ case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break;
+ case HDLC_PREAMBLE_PATTERN_ZEROS: val = 0x00; break;
+ case HDLC_PREAMBLE_PATTERN_10: val = 0x55; break;
+ case HDLC_PREAMBLE_PATTERN_01: val = 0xaa; break;
+ default: val = 0x7e; break;
+ }
+ wr_reg8(info, TPR, (unsigned char)val);
+
+ /* RCR (rx control)
+ *
+ * 15..13 mode, 000=HDLC 001=raw sync
+ * 12..10 encoding
+ * 09 CRC enable
+ * 08 CRC32
+ * 07..03 reserved, must be 0
+ * 02 reset
+ * 01 enable
+ * 00 auto-DCD enable
+ */
+ val = 0;
+
+ if (info->params.mode == MGSL_MODE_RAW)
+ val |= BIT13;
+
+ switch(info->params.encoding)
+ {
+ case HDLC_ENCODING_NRZB: val |= BIT10; break;
+ case HDLC_ENCODING_NRZI_MARK: val |= BIT11; break;
+ case HDLC_ENCODING_NRZI: val |= BIT11 + BIT10; break;
+ case HDLC_ENCODING_BIPHASE_MARK: val |= BIT12; break;
+ case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
+ case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
+ }
+
+ switch (info->params.crc_type)
+ {
+ case HDLC_CRC_16_CCITT: val |= BIT9; break;
+ case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
+ }
+
+ if (info->params.flags & HDLC_FLAG_AUTO_DCD)
+ val |= BIT0;
+
+ wr_reg16(info, RCR, val);
+
+ /* CCR (clock control)
+ *
+ * 07..05 tx clock source
+ * 04..02 rx clock source
+ * 01 auxclk enable
+ * 00 BRG enable
+ */
+ val = 0;
+
+ if (info->params.flags & HDLC_FLAG_TXC_BRG)
+ {
+ // when RxC source is DPLL, BRG generates 16X DPLL
+ // reference clock, so take TxC from BRG/16 to get
+ // transmit clock at actual data rate
+ if (info->params.flags & HDLC_FLAG_RXC_DPLL)
+ val |= BIT6 + BIT5; /* 011, txclk = BRG/16 */
+ else
+ val |= BIT6; /* 010, txclk = BRG */
+ }
+ else if (info->params.flags & HDLC_FLAG_TXC_DPLL)
+ val |= BIT7; /* 100, txclk = DPLL Input */
+ else if (info->params.flags & HDLC_FLAG_TXC_RXCPIN)
+ val |= BIT5; /* 001, txclk = RXC Input */
+
+ if (info->params.flags & HDLC_FLAG_RXC_BRG)
+ val |= BIT3; /* 010, rxclk = BRG */
+ else if (info->params.flags & HDLC_FLAG_RXC_DPLL)
+ val |= BIT4; /* 100, rxclk = DPLL */
+ else if (info->params.flags & HDLC_FLAG_RXC_TXCPIN)
+ val |= BIT2; /* 001, rxclk = TXC Input */
+
+ if (info->params.clock_speed)
+ val |= BIT1 + BIT0;
+
+ wr_reg8(info, CCR, (unsigned char)val);
+
+ if (info->params.flags & (HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL))
+ {
+ // program DPLL mode
+ switch(info->params.encoding)
+ {
+ case HDLC_ENCODING_BIPHASE_MARK:
+ case HDLC_ENCODING_BIPHASE_SPACE:
+ val = BIT7; break;
+ case HDLC_ENCODING_BIPHASE_LEVEL:
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL:
+ val = BIT7 + BIT6; break;
+ default: val = BIT6; // NRZ encodings
+ }
+ wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | val));
+
+ // DPLL requires a 16X reference clock from BRG
+ set_rate(info, info->params.clock_speed * 16);
+ }
+ else
+ set_rate(info, info->params.clock_speed);
+
+ tx_set_idle(info);
+
+ msc_set_vcr(info);
+
+ /* SCR (serial control)
+ *
+ * 15 1=tx req on FIFO half empty
+ * 14 1=rx req on FIFO half full
+ * 13 tx data IRQ enable
+ * 12 tx idle IRQ enable
+ * 11 underrun IRQ enable
+ * 10 rx data IRQ enable
+ * 09 rx idle IRQ enable
+ * 08 overrun IRQ enable
+ * 07 DSR IRQ enable
+ * 06 CTS IRQ enable
+ * 05 DCD IRQ enable
+ * 04 RI IRQ enable
+ * 03 reserved, must be zero
+ * 02 1=txd->rxd internal loopback enable
+ * 01 reserved, must be zero
+ * 00 1=master IRQ enable
+ */
+ wr_reg16(info, SCR, BIT15 + BIT14 + BIT0);
+
+ if (info->params.loopback)
+ enable_loopback(info);
+}
+
+/*
+ * set transmit idle mode
+ */
+static void tx_set_idle(struct slgt_info *info)
+{
+ unsigned char val = 0xff;
+
+ switch(info->idle_mode)
+ {
+ case HDLC_TXIDLE_FLAGS: val = 0x7e; break;
+ case HDLC_TXIDLE_ALT_ZEROS_ONES: val = 0xaa; break;
+ case HDLC_TXIDLE_ZEROS: val = 0x00; break;
+ case HDLC_TXIDLE_ONES: val = 0xff; break;
+ case HDLC_TXIDLE_ALT_MARK_SPACE: val = 0xaa; break;
+ case HDLC_TXIDLE_SPACE: val = 0x00; break;
+ case HDLC_TXIDLE_MARK: val = 0xff; break;
+ }
+
+ wr_reg8(info, TIR, val);
+}
+
+/*
+ * get state of V24 status (input) signals
+ */
+static void get_signals(struct slgt_info *info)
+{
+ unsigned short status = rd_reg16(info, SSR);
+
+ /* clear all serial signals except DTR and RTS */
+ info->signals &= SerialSignal_DTR + SerialSignal_RTS;
+
+ if (status & BIT3)
+ info->signals |= SerialSignal_DSR;
+ if (status & BIT2)
+ info->signals |= SerialSignal_CTS;
+ if (status & BIT1)
+ info->signals |= SerialSignal_DCD;
+ if (status & BIT0)
+ info->signals |= SerialSignal_RI;
+}
+
+/*
+ * set V.24 Control Register based on current configuration
+ */
+static void msc_set_vcr(struct slgt_info *info)
+{
+ unsigned char val = 0;
+
+ /* VCR (V.24 control)
+ *
+ * 07..04 serial IF select
+ * 03 DTR
+ * 02 RTS
+ * 01 LL
+ * 00 RL
+ */
+
+ switch(info->if_mode & MGSL_INTERFACE_MASK)
+ {
+ case MGSL_INTERFACE_RS232:
+ val |= BIT5; /* 0010 */
+ break;
+ case MGSL_INTERFACE_V35:
+ val |= BIT7 + BIT6 + BIT5; /* 1110 */
+ break;
+ case MGSL_INTERFACE_RS422:
+ val |= BIT6; /* 0100 */
+ break;
+ }
+
+ if (info->signals & SerialSignal_DTR)
+ val |= BIT3;
+ if (info->signals & SerialSignal_RTS)
+ val |= BIT2;
+ if (info->if_mode & MGSL_INTERFACE_LL)
+ val |= BIT1;
+ if (info->if_mode & MGSL_INTERFACE_RL)
+ val |= BIT0;
+ wr_reg8(info, VCR, val);
+}
+
+/*
+ * set state of V24 control (output) signals
+ */
+static void set_signals(struct slgt_info *info)
+{
+ unsigned char val = rd_reg8(info, VCR);
+ if (info->signals & SerialSignal_DTR)
+ val |= BIT3;
+ else
+ val &= ~BIT3;
+ if (info->signals & SerialSignal_RTS)
+ val |= BIT2;
+ else
+ val &= ~BIT2;
+ wr_reg8(info, VCR, val);
+}
+
+/*
+ * free range of receive DMA buffers (i to last)
+ */
+static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last)
+{
+ int done = 0;
+
+ while(!done) {
+ /* reset current buffer for reuse */
+ info->rbufs[i].status = 0;
+ if (info->params.mode == MGSL_MODE_RAW)
+ set_desc_count(info->rbufs[i], info->raw_rx_size);
+ else
+ set_desc_count(info->rbufs[i], DMABUFSIZE);
+
+ if (i == last)
+ done = 1;
+ if (++i == info->rbuf_count)
+ i = 0;
+ }
+ info->rbuf_current = i;
+}
+
+/*
+ * mark all receive DMA buffers as free
+ */
+static void reset_rbufs(struct slgt_info *info)
+{
+ free_rbufs(info, 0, info->rbuf_count - 1);
+}
+
+/*
+ * pass receive HDLC frame to upper layer
+ *
+ * return 1 if frame available, otherwise 0
+ */
+static int rx_get_frame(struct slgt_info *info)
+{
+ unsigned int start, end;
+ unsigned short status;
+ unsigned int framesize = 0;
+ int rc = 0;
+ unsigned long flags;
+ struct tty_struct *tty = info->tty;
+ unsigned char addr_field = 0xff;
+
+check_again:
+
+ framesize = 0;
+ addr_field = 0xff;
+ start = end = info->rbuf_current;
+
+ for (;;) {
+ if (!desc_complete(info->rbufs[end]))
+ goto cleanup;
+
+ if (framesize == 0 && info->params.addr_filter != 0xff)
+ addr_field = info->rbufs[end].buf[0];
+
+ framesize += desc_count(info->rbufs[end]);
+
+ if (desc_eof(info->rbufs[end]))
+ break;
+
+ if (++end == info->rbuf_count)
+ end = 0;
+
+ if (end == info->rbuf_current) {
+ if (info->rx_enabled){
+ spin_lock_irqsave(&info->lock,flags);
+ rx_start(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+ }
+ goto cleanup;
+ }
+ }
+
+ /* status
+ *
+ * 15 buffer complete
+ * 14..06 reserved
+ * 05..04 residue
+ * 02 eof (end of frame)
+ * 01 CRC error
+ * 00 abort
+ */
+ status = desc_status(info->rbufs[end]);
+
+ /* ignore CRC bit if not using CRC (bit is undefined) */
+ if (info->params.crc_type == HDLC_CRC_NONE)
+ status &= ~BIT1;
+
+ if (framesize == 0 ||
+ (addr_field != 0xff && addr_field != info->params.addr_filter)) {
+ free_rbufs(info, start, end);
+ goto check_again;
+ }
+
+ if (framesize < 2 || status & (BIT1+BIT0)) {
+ if (framesize < 2 || (status & BIT0))
+ info->icount.rxshort++;
+ else
+ info->icount.rxcrc++;
+ framesize = 0;
+
+#ifdef CONFIG_HDLC
+ {
+ struct net_device_stats *stats = hdlc_stats(info->netdev);
+ stats->rx_errors++;
+ stats->rx_frame_errors++;
+ }
+#endif
+ } else {
+ /* adjust frame size for CRC, if any */
+ if (info->params.crc_type == HDLC_CRC_16_CCITT)
+ framesize -= 2;
+ else if (info->params.crc_type == HDLC_CRC_32_CCITT)
+ framesize -= 4;
+ }
+
+ DBGBH(("%s rx frame status=%04X size=%d\n",
+ info->device_name, status, framesize));
+ DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, DMABUFSIZE), "rx");
+
+ if (framesize) {
+ if (framesize > info->max_frame_size)
+ info->icount.rxlong++;
+ else {
+ /* copy dma buffer(s) to contiguous temp buffer */
+ int copy_count = framesize;
+ int i = start;
+ unsigned char *p = info->tmp_rbuf;
+ info->tmp_rbuf_count = framesize;
+
+ info->icount.rxok++;
+
+ while(copy_count) {
+ int partial_count = min(copy_count, DMABUFSIZE);
+ memcpy(p, info->rbufs[i].buf, partial_count);
+ p += partial_count;
+ copy_count -= partial_count;
+ if (++i == info->rbuf_count)
+ i = 0;
+ }
+
+#ifdef CONFIG_HDLC
+ if (info->netcount)
+ hdlcdev_rx(info,info->tmp_rbuf, framesize);
+ else
+#endif
+ ldisc_receive_buf(tty, info->tmp_rbuf, info->flag_buf, framesize);
+ }
+ }
+ free_rbufs(info, start, end);
+ rc = 1;
+
+cleanup:
+ return rc;
+}
+
+/*
+ * pass receive buffer (RAW synchronous mode) to tty layer
+ * return 1 if buffer available, otherwise 0
+ */
+static int rx_get_buf(struct slgt_info *info)
+{
+ unsigned int i = info->rbuf_current;
+
+ if (!desc_complete(info->rbufs[i]))
+ return 0;
+ DBGDATA(info, info->rbufs[i].buf, desc_count(info->rbufs[i]), "rx");
+ DBGINFO(("rx_get_buf size=%d\n", desc_count(info->rbufs[i])));
+ ldisc_receive_buf(info->tty, info->rbufs[i].buf,
+ info->flag_buf, desc_count(info->rbufs[i]));
+ free_rbufs(info, i, i);
+ return 1;
+}
+
+static void reset_tbufs(struct slgt_info *info)
+{
+ unsigned int i;
+ info->tbuf_current = 0;
+ for (i=0 ; i < info->tbuf_count ; i++) {
+ info->tbufs[i].status = 0;
+ info->tbufs[i].count = 0;
+ }
+}
+
+/*
+ * return number of free transmit DMA buffers
+ */
+static unsigned int free_tbuf_count(struct slgt_info *info)
+{
+ unsigned int count = 0;
+ unsigned int i = info->tbuf_current;
+
+ do
+ {
+ if (desc_count(info->tbufs[i]))
+ break; /* buffer in use */
+ ++count;
+ if (++i == info->tbuf_count)
+ i=0;
+ } while (i != info->tbuf_current);
+
+ /* last buffer with zero count may be in use, assume it is */
+ if (count)
+ --count;
+
+ return count;
+}
+
+/*
+ * load transmit DMA buffer(s) with data
+ */
+static void tx_load(struct slgt_info *info, const char *buf, unsigned int size)
+{
+ unsigned short count;
+ unsigned int i;
+ struct slgt_desc *d;
+
+ if (size == 0)
+ return;
+
+ DBGDATA(info, buf, size, "tx");
+
+ info->tbuf_start = i = info->tbuf_current;
+
+ while (size) {
+ d = &info->tbufs[i];
+ if (++i == info->tbuf_count)
+ i = 0;
+
+ count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
+ memcpy(d->buf, buf, count);
+
+ size -= count;
+ buf += count;
+
+ if (!size && info->params.mode != MGSL_MODE_RAW)
+ set_desc_eof(*d, 1); /* HDLC: set EOF of last desc */
+ else
+ set_desc_eof(*d, 0);
+
+ set_desc_count(*d, count);
+ }
+
+ info->tbuf_current = i;
+}
+
+static int register_test(struct slgt_info *info)
+{
+ static unsigned short patterns[] =
+ {0x0000, 0xffff, 0xaaaa, 0x5555, 0x6969, 0x9696};
+ static unsigned int count = sizeof(patterns)/sizeof(patterns[0]);
+ unsigned int i;
+ int rc = 0;
+
+ for (i=0 ; i < count ; i++) {
+ wr_reg16(info, TIR, patterns[i]);
+ wr_reg16(info, BDR, patterns[(i+1)%count]);
+ if ((rd_reg16(info, TIR) != patterns[i]) ||
+ (rd_reg16(info, BDR) != patterns[(i+1)%count])) {
+ rc = -ENODEV;
+ break;
+ }
+ }
+
+ info->init_error = rc ? 0 : DiagStatus_AddressFailure;
+ return rc;
+}
+
+static int irq_test(struct slgt_info *info)
+{
+ unsigned long timeout;
+ unsigned long flags;
+ struct tty_struct *oldtty = info->tty;
+ u32 speed = info->params.data_rate;
+
+ info->params.data_rate = 921600;
+ info->tty = NULL;
+
+ spin_lock_irqsave(&info->lock, flags);
+ async_mode(info);
+ slgt_irq_on(info, IRQ_TXIDLE);
+
+ /* enable transmitter */
+ wr_reg16(info, TCR,
+ (unsigned short)(rd_reg16(info, TCR) | BIT1));
+
+ /* write one byte and wait for tx idle */
+ wr_reg16(info, TDR, 0);
+
+ /* assume failure */
+ info->init_error = DiagStatus_IrqFailure;
+ info->irq_occurred = FALSE;
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ timeout=100;
+ while(timeout-- && !info->irq_occurred)
+ msleep_interruptible(10);
+
+ spin_lock_irqsave(&info->lock,flags);
+ reset_port(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ info->params.data_rate = speed;
+ info->tty = oldtty;
+
+ info->init_error = info->irq_occurred ? 0 : DiagStatus_IrqFailure;
+ return info->irq_occurred ? 0 : -ENODEV;
+}
+
+static int loopback_test_rx(struct slgt_info *info)
+{
+ unsigned char *src, *dest;
+ int count;
+
+ if (desc_complete(info->rbufs[0])) {
+ count = desc_count(info->rbufs[0]);
+ src = info->rbufs[0].buf;
+ dest = info->tmp_rbuf;
+
+ for( ; count ; count-=2, src+=2) {
+ /* src=data byte (src+1)=status byte */
+ if (!(*(src+1) & (BIT9 + BIT8))) {
+ *dest = *src;
+ dest++;
+ info->tmp_rbuf_count++;
+ }
+ }
+ DBGDATA(info, info->tmp_rbuf, info->tmp_rbuf_count, "rx");
+ return 1;
+ }
+ return 0;
+}
+
+static int loopback_test(struct slgt_info *info)
+{
+#define TESTFRAMESIZE 20
+
+ unsigned long timeout;
+ u16 count = TESTFRAMESIZE;
+ unsigned char buf[TESTFRAMESIZE];
+ int rc = -ENODEV;
+ unsigned long flags;
+
+ struct tty_struct *oldtty = info->tty;
+ MGSL_PARAMS params;
+
+ memcpy(&params, &info->params, sizeof(params));
+
+ info->params.mode = MGSL_MODE_ASYNC;
+ info->params.data_rate = 921600;
+ info->params.loopback = 1;
+ info->tty = NULL;
+
+ /* build and send transmit frame */
+ for (count = 0; count < TESTFRAMESIZE; ++count)
+ buf[count] = (unsigned char)count;
+
+ info->tmp_rbuf_count = 0;
+ memset(info->tmp_rbuf, 0, TESTFRAMESIZE);
+
+ /* program hardware for HDLC and enabled receiver */
+ spin_lock_irqsave(&info->lock,flags);
+ async_mode(info);
+ rx_start(info);
+ info->tx_count = count;
+ tx_load(info, buf, count);
+ tx_start(info);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ /* wait for receive complete */
+ for (timeout = 100; timeout; --timeout) {
+ msleep_interruptible(10);
+ if (loopback_test_rx(info)) {
+ rc = 0;
+ break;
+ }
+ }
+
+ /* verify received frame length and contents */
+ if (!rc && (info->tmp_rbuf_count != count ||
+ memcmp(buf, info->tmp_rbuf, count))) {
+ rc = -ENODEV;
+ }
+
+ spin_lock_irqsave(&info->lock,flags);
+ reset_adapter(info);
+ spin_unlock_irqrestore(&info->lock,flags);
+
+ memcpy(&info->params, &params, sizeof(info->params));
+ info->tty = oldtty;
+
+ info->init_error = rc ? DiagStatus_DmaFailure : 0;
+ return rc;
+}
+
+static int adapter_test(struct slgt_info *info)
+{
+ DBGINFO(("testing %s\n", info->device_name));
+ if ((info->init_error = register_test(info)) < 0) {
+ printk("register test failure %s addr=%08X\n",
+ info->device_name, info->phys_reg_addr);
+ } else if ((info->init_error = irq_test(info)) < 0) {
+ printk("IRQ test failure %s IRQ=%d\n",
+ info->device_name, info->irq_level);
+ } else if ((info->init_error = loopback_test(info)) < 0) {
+ printk("loopback test failure %s\n", info->device_name);
+ }
+ return info->init_error;
+}
+
+/*
+ * transmit timeout handler
+ */
+static void tx_timeout(unsigned long context)
+{
+ struct slgt_info *info = (struct slgt_info*)context;
+ unsigned long flags;
+
+ DBGINFO(("%s tx_timeout\n", info->device_name));
+ if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) {
+ info->icount.txtimeout++;
+ }
+ spin_lock_irqsave(&info->lock,flags);
+ info->tx_active = 0;
+ info->tx_count = 0;
+ spin_unlock_irqrestore(&info->lock,flags);
+
+#ifdef CONFIG_HDLC
+ if (info->netcount)
+ hdlcdev_tx_done(info);
+ else
+#endif
+ bh_transmit(info);
+}
+
+/*
+ * receive buffer polling timer
+ */
+static void rx_timeout(unsigned long context)
+{
+ struct slgt_info *info = (struct slgt_info*)context;
+ unsigned long flags;
+
+ DBGINFO(("%s rx_timeout\n", info->device_name));
+ spin_lock_irqsave(&info->lock, flags);
+ info->pending_bh |= BH_RECEIVE;
+ spin_unlock_irqrestore(&info->lock, flags);
+ bh_handler(info);
+}
+
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index ee5a40be9f9..960adb256fb 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -2196,7 +2196,7 @@ void isr_rxint(SLMP_INFO * info)
if ( tty ) {
if (!(status & info->ignore_status_mask1)) {
if (info->read_status_mask1 & BRKD) {
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
if (info->flags & ASYNC_SAK)
do_SAK(tty);
}
@@ -2240,16 +2240,10 @@ void isr_rxrdy(SLMP_INFO * info)
while((status = read_reg(info,CST0)) & BIT0)
{
+ int flag = 0;
+ int over = 0;
DataByte = read_reg(info,TRB);
- if ( tty ) {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- continue;
-
- *tty->flip.char_buf_ptr = DataByte;
- *tty->flip.flag_buf_ptr = 0;
- }
-
icount->rx++;
if ( status & (PE + FRME + OVRN) ) {
@@ -2272,42 +2266,34 @@ void isr_rxrdy(SLMP_INFO * info)
if ( tty ) {
if (status & PE)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (status & FRME)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
+ flag = TTY_FRAME;
if (status & OVRN) {
/* Overrun is special, since it's
* reported immediately, and doesn't
* affect the current character
*/
- if (tty->flip.count < TTY_FLIPBUF_SIZE) {
- tty->flip.count++;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- }
+ over = 1;
}
}
} /* end of if (error) */
if ( tty ) {
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ tty_insert_flip_char(tty, DataByte, flag);
+ if (over)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
}
if ( debug_level >= DEBUG_LEVEL_ISR ) {
- printk("%s(%d):%s isr_rxrdy() flip count=%d\n",
- __FILE__,__LINE__,info->device_name,
- tty ? tty->flip.count : 0);
printk("%s(%d):%s rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
__FILE__,__LINE__,info->device_name,
icount->rx,icount->brk,icount->parity,
icount->frame,icount->overrun);
}
- if ( tty && tty->flip.count )
+ if ( tty )
tty_flip_buffer_push(tty);
}
@@ -5104,7 +5090,7 @@ void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count)
int register_test(SLMP_INFO *info)
{
static unsigned char testval[] = {0x00, 0xff, 0xaa, 0x55, 0x69, 0x96};
- static unsigned int count = sizeof(testval)/sizeof(unsigned char);
+ static unsigned int count = ARRAY_SIZE(testval);
unsigned int i;
int rc = TRUE;
unsigned long flags;
@@ -5422,7 +5408,7 @@ int memory_test(SLMP_INFO *info)
{
static unsigned long testval[] = { 0x0, 0x55555555, 0xaaaaaaaa,
0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
- unsigned long count = sizeof(testval)/sizeof(unsigned long);
+ unsigned long count = ARRAY_SIZE(testval);
unsigned long i;
unsigned long limit = SCA_MEM_SIZE/sizeof(unsigned long);
unsigned long * addr = (unsigned long *)info->memory_base;
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 145275ebdd7..5765f672e85 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -153,6 +153,21 @@ static struct sysrq_key_op sysrq_mountro_op = {
/* END SYNC SYSRQ HANDLERS BLOCK */
+#ifdef CONFIG_DEBUG_MUTEXES
+
+static void
+sysrq_handle_showlocks(int key, struct pt_regs *pt_regs, struct tty_struct *tty)
+{
+ mutex_debug_show_all_locks();
+}
+
+static struct sysrq_key_op sysrq_showlocks_op = {
+ .handler = sysrq_handle_showlocks,
+ .help_msg = "show-all-locks(D)",
+ .action_msg = "Show Locks Held",
+};
+
+#endif
/* SHOW SYSRQ HANDLERS BLOCK */
@@ -294,7 +309,11 @@ static struct sysrq_key_op *sysrq_key_table[SYSRQ_KEY_TABLE_LENGTH] = {
#else
/* c */ NULL,
#endif
+#ifdef CONFIG_DEBUG_MUTEXES
+/* d */ &sysrq_showlocks_op,
+#else
/* d */ NULL,
+#endif
/* e */ &sysrq_term_op,
/* f */ &sysrq_moom_op,
/* g */ NULL,
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index b3d411a756f..ac2a297ce37 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -1,7 +1,7 @@
/*
* Driver for TANBAC TB0219 base board.
*
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -27,7 +27,7 @@
#include <asm/vr41xx/giu.h>
#include <asm/vr41xx/tb0219.h>
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@hh.iij4u.or.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
MODULE_DESCRIPTION("TANBAC TB0219 base board driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 12167c04fa4..bc56df8a347 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -211,7 +211,7 @@ static int tlclk_open(struct inode *inode, struct file *filp)
result = request_irq(telclk_interrupt, &tlclk_interrupt,
SA_INTERRUPT, "telco_clock", tlclk_interrupt);
if (result == -EBUSY) {
- printk(KERN_ERR "telco_clock: Interrupt can't be reserved!\n");
+ printk(KERN_ERR "tlclk: Interrupt can't be reserved.\n");
return -EBUSY;
}
inb(TLCLK_REG6); /* Clear interrupt events */
@@ -741,7 +741,7 @@ static int __init tlclk_init(void)
ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
if (ret < 0) {
- printk(KERN_ERR "telco_clock: can't get major! %d\n", tlclk_major);
+ printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
return ret;
}
alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
@@ -750,7 +750,7 @@ static int __init tlclk_init(void)
/* Read telecom clock IRQ number (Set by BIOS) */
if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
- printk(KERN_ERR "tlclk: request_region failed! 0x%X\n",
+ printk(KERN_ERR "tlclk: request_region 0x%X failed.\n",
TLCLK_BASE);
ret = -EBUSY;
goto out2;
@@ -758,7 +758,7 @@ static int __init tlclk_init(void)
telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
- printk(KERN_ERR "telclk_interrup = 0x%x non-mcpbl0010 hw\n",
+ printk(KERN_ERR "telclk_interrup = 0x%x non-mcpbl0010 hw.\n",
telclk_interrupt);
ret = -ENXIO;
goto out3;
@@ -768,7 +768,7 @@ static int __init tlclk_init(void)
ret = misc_register(&tlclk_miscdev);
if (ret < 0) {
- printk(KERN_ERR " misc_register retruns %d\n", ret);
+ printk(KERN_ERR "tlclk: misc_register returns %d.\n", ret);
ret = -EBUSY;
goto out3;
}
@@ -776,8 +776,7 @@ static int __init tlclk_init(void)
tlclk_device = platform_device_register_simple("telco_clock",
-1, NULL, 0);
if (!tlclk_device) {
- printk(KERN_ERR " platform_device_register retruns 0x%X\n",
- (unsigned int) tlclk_device);
+ printk(KERN_ERR "tlclk: platform_device_register failed.\n");
ret = -EBUSY;
goto out4;
}
@@ -785,7 +784,7 @@ static int __init tlclk_init(void)
ret = sysfs_create_group(&tlclk_device->dev.kobj,
&tlclk_attribute_group);
if (ret) {
- printk(KERN_ERR "failed to create sysfs device attributes\n");
+ printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n");
sysfs_remove_group(&tlclk_device->dev.kobj,
&tlclk_attribute_group);
goto out5;
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 2392e404e8d..ba4582d160f 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,6 +2,9 @@
# Makefile for the kernel tpm device drivers.
#
obj-$(CONFIG_TCG_TPM) += tpm.o
+ifdef CONFIG_ACPI
+ obj-$(CONFIG_TCG_TPM) += tpm_bios.o
+endif
obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index a9be0e8eaea..5a3870477ef 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -466,6 +466,7 @@ void tpm_remove_hardware(struct device *dev)
kfree(chip->vendor->miscdev.name);
sysfs_remove_group(&dev->kobj, chip->vendor->attr_group);
+ tpm_bios_log_teardown(chip->bios_dir);
dev_mask[chip->dev_num / TPM_NUM_MASK_ENTRIES ] &=
~(1 << (chip->dev_num % TPM_NUM_MASK_ENTRIES));
@@ -593,6 +594,8 @@ dev_num_search_complete:
sysfs_create_group(&dev->kobj, chip->vendor->attr_group);
+ chip->bios_dir = tpm_bios_log_setup(devname);
+
return 0;
}
EXPORT_SYMBOL_GPL(tpm_register_hardware);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 159882ca69d..fd3a4beaa53 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -82,6 +82,8 @@ struct tpm_chip {
struct tpm_vendor_specific *vendor;
+ struct dentry **bios_dir;
+
struct list_head list;
};
@@ -107,3 +109,16 @@ extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
extern void tpm_remove_hardware(struct device *);
extern int tpm_pm_suspend(struct device *, pm_message_t);
extern int tpm_pm_resume(struct device *);
+
+#ifdef CONFIG_ACPI
+extern struct dentry ** tpm_bios_log_setup(char *);
+extern void tpm_bios_log_teardown(struct dentry **);
+#else
+static inline struct dentry* tpm_bios_log_setup(char *name)
+{
+ return NULL;
+}
+static inline void tpm_bios_log_teardown(struct dentry **dir)
+{
+}
+#endif
diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
new file mode 100644
index 00000000000..aedf7a8e6da
--- /dev/null
+++ b/drivers/char/tpm/tpm_bios.c
@@ -0,0 +1,540 @@
+/*
+ * Copyright (C) 2005 IBM Corporation
+ *
+ * Authors:
+ * Seiji Munetoh <munetoh@jp.ibm.com>
+ * Stefan Berger <stefanb@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * Access to the eventlog extended by the TCG BIOS of PC platform
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/module.h>
+#include <acpi/acpi.h>
+#include <acpi/actypes.h>
+#include <acpi/actbl.h>
+#include "tpm.h"
+
+#define TCG_EVENT_NAME_LEN_MAX 255
+#define MAX_TEXT_EVENT 1000 /* Max event string length */
+#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
+
+struct tpm_bios_log {
+ void *bios_event_log;
+ void *bios_event_log_end;
+};
+
+struct acpi_tcpa {
+ struct acpi_table_header hdr;
+ u16 reserved;
+ u32 log_max_len __attribute__ ((packed));
+ u32 log_start_addr __attribute__ ((packed));
+};
+
+struct tcpa_event {
+ u32 pcr_index;
+ u32 event_type;
+ u8 pcr_value[20]; /* SHA1 */
+ u32 event_size;
+ u8 event_data[0];
+};
+
+enum tcpa_event_types {
+ PREBOOT = 0,
+ POST_CODE,
+ UNUSED,
+ NO_ACTION,
+ SEPARATOR,
+ ACTION,
+ EVENT_TAG,
+ SCRTM_CONTENTS,
+ SCRTM_VERSION,
+ CPU_MICROCODE,
+ PLATFORM_CONFIG_FLAGS,
+ TABLE_OF_DEVICES,
+ COMPACT_HASH,
+ IPL,
+ IPL_PARTITION_DATA,
+ NONHOST_CODE,
+ NONHOST_CONFIG,
+ NONHOST_INFO,
+};
+
+static const char* tcpa_event_type_strings[] = {
+ "PREBOOT",
+ "POST CODE",
+ "",
+ "NO ACTION",
+ "SEPARATOR",
+ "ACTION",
+ "EVENT TAG",
+ "S-CRTM Contents",
+ "S-CRTM Version",
+ "CPU Microcode",
+ "Platform Config Flags",
+ "Table of Devices",
+ "Compact Hash",
+ "IPL",
+ "IPL Partition Data",
+ "Non-Host Code",
+ "Non-Host Config",
+ "Non-Host Info"
+};
+
+enum tcpa_pc_event_ids {
+ SMBIOS = 1,
+ BIS_CERT,
+ POST_BIOS_ROM,
+ ESCD,
+ CMOS,
+ NVRAM,
+ OPTION_ROM_EXEC,
+ OPTION_ROM_CONFIG,
+ OPTION_ROM_MICROCODE,
+ S_CRTM_VERSION,
+ S_CRTM_CONTENTS,
+ POST_CONTENTS,
+};
+
+static const char* tcpa_pc_event_id_strings[] = {
+ ""
+ "SMBIOS",
+ "BIS Certificate",
+ "POST BIOS ",
+ "ESCD ",
+ "CMOS",
+ "NVRAM",
+ "Option ROM",
+ "Option ROM config",
+ "Option ROM microcode",
+ "S-CRTM Version",
+ "S-CRTM Contents",
+ "S-CRTM POST Contents",
+};
+
+/* returns pointer to start of pos. entry of tcg log */
+static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t i;
+ struct tpm_bios_log *log = m->private;
+ void *addr = log->bios_event_log;
+ void *limit = log->bios_event_log_end;
+ struct tcpa_event *event;
+
+ /* read over *pos measurements */
+ for (i = 0; i < *pos; i++) {
+ event = addr;
+
+ if ((addr + sizeof(struct tcpa_event)) < limit) {
+ if (event->event_type == 0 && event->event_size == 0)
+ return NULL;
+ addr += sizeof(struct tcpa_event) + event->event_size;
+ }
+ }
+
+ /* now check if current entry is valid */
+ if ((addr + sizeof(struct tcpa_event)) >= limit)
+ return NULL;
+
+ event = addr;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+ ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
+ return NULL;
+
+ return addr;
+}
+
+static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
+ loff_t *pos)
+{
+ struct tcpa_event *event = v;
+ struct tpm_bios_log *log = m->private;
+ void *limit = log->bios_event_log_end;
+
+ v += sizeof(struct tcpa_event) + event->event_size;
+
+ /* now check if current entry is valid */
+ if ((v + sizeof(struct tcpa_event)) >= limit)
+ return NULL;
+
+ event = v;
+
+ if (event->event_type == 0 && event->event_size == 0)
+ return NULL;
+
+ if ((event->event_type == 0 && event->event_size == 0) ||
+ ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
+ return NULL;
+
+ (*pos)++;
+ return v;
+}
+
+static void tpm_bios_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+static int get_event_name(char *dest, struct tcpa_event *event,
+ unsigned char * event_entry)
+{
+ const char *name = "";
+ char data[40] = "";
+ int i, n_len = 0, d_len = 0;
+ u32 event_id, event_data_size;
+
+ switch(event->event_type) {
+ case PREBOOT:
+ case POST_CODE:
+ case UNUSED:
+ case NO_ACTION:
+ case SCRTM_CONTENTS:
+ case SCRTM_VERSION:
+ case CPU_MICROCODE:
+ case PLATFORM_CONFIG_FLAGS:
+ case TABLE_OF_DEVICES:
+ case COMPACT_HASH:
+ case IPL:
+ case IPL_PARTITION_DATA:
+ case NONHOST_CODE:
+ case NONHOST_CONFIG:
+ case NONHOST_INFO:
+ name = tcpa_event_type_strings[event->event_type];
+ n_len = strlen(name);
+ break;
+ case SEPARATOR:
+ case ACTION:
+ if (MAX_TEXT_EVENT > event->event_size) {
+ name = event_entry;
+ n_len = event->event_size;
+ }
+ break;
+ case EVENT_TAG:
+ event_id = be32_to_cpu(event_entry);
+ event_data_size = be32_to_cpu(&event_entry[4]);
+
+ /* ToDo Row data -> Base64 */
+
+ switch (event_id) {
+ case SMBIOS:
+ case BIS_CERT:
+ case CMOS:
+ case NVRAM:
+ case OPTION_ROM_EXEC:
+ case OPTION_ROM_CONFIG:
+ case OPTION_ROM_MICROCODE:
+ case S_CRTM_VERSION:
+ case S_CRTM_CONTENTS:
+ case POST_CONTENTS:
+ name = tcpa_pc_event_id_strings[event_id];
+ n_len = strlen(name);
+ break;
+ case POST_BIOS_ROM:
+ case ESCD:
+ name = tcpa_pc_event_id_strings[event_id];
+ n_len = strlen(name);
+ for (i = 0; i < 20; i++)
+ d_len += sprintf(data, "%02x",
+ event_entry[8 + i]);
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ return snprintf(dest, MAX_TEXT_EVENT, "[%.*s%.*s]",
+ n_len, name, d_len, data);
+
+}
+
+static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
+{
+
+ char *eventname;
+ char data[4];
+ u32 help;
+ int i, len;
+ struct tcpa_event *event = (struct tcpa_event *) v;
+ unsigned char *event_entry =
+ (unsigned char *) (v + sizeof(struct tcpa_event));
+
+ eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
+ if (!eventname) {
+ printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* 1st: PCR used is in little-endian format (4 bytes) */
+ help = le32_to_cpu(event->pcr_index);
+ memcpy(data, &help, 4);
+ for (i = 0; i < 4; i++)
+ seq_putc(m, data[i]);
+
+ /* 2nd: SHA1 (20 bytes) */
+ for (i = 0; i < 20; i++)
+ seq_putc(m, event->pcr_value[i]);
+
+ /* 3rd: event type identifier (4 bytes) */
+ help = le32_to_cpu(event->event_type);
+ memcpy(data, &help, 4);
+ for (i = 0; i < 4; i++)
+ seq_putc(m, data[i]);
+
+ len = 0;
+
+ len += get_event_name(eventname, event, event_entry);
+
+ /* 4th: filename <= 255 + \'0' delimiter */
+ if (len > TCG_EVENT_NAME_LEN_MAX)
+ len = TCG_EVENT_NAME_LEN_MAX;
+
+ for (i = 0; i < len; i++)
+ seq_putc(m, eventname[i]);
+
+ /* 5th: delimiter */
+ seq_putc(m, '\0');
+
+ return 0;
+}
+
+static int tpm_bios_measurements_release(struct inode *inode,
+ struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct tpm_bios_log *log = seq->private;
+
+ if (log) {
+ kfree(log->bios_event_log);
+ kfree(log);
+ }
+
+ return seq_release(inode, file);
+}
+
+static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
+{
+ int len = 0;
+ int i;
+ char *eventname;
+ struct tcpa_event *event = v;
+ unsigned char *event_entry =
+ (unsigned char *) (v + sizeof(struct tcpa_event));
+
+ eventname = kmalloc(MAX_TEXT_EVENT, GFP_KERNEL);
+ if (!eventname) {
+ printk(KERN_ERR "%s: ERROR - No Memory for event name\n ",
+ __func__);
+ return -EFAULT;
+ }
+
+ seq_printf(m, "%2d ", event->pcr_index);
+
+ /* 2nd: SHA1 */
+ for (i = 0; i < 20; i++)
+ seq_printf(m, "%02x", event->pcr_value[i]);
+
+ /* 3rd: event type identifier */
+ seq_printf(m, " %02x", event->event_type);
+
+ len += get_event_name(eventname, event, event_entry);
+
+ /* 4th: eventname <= max + \'0' delimiter */
+ seq_printf(m, " %s\n", eventname);
+
+ return 0;
+}
+
+static struct seq_operations tpm_ascii_b_measurments_seqops = {
+ .start = tpm_bios_measurements_start,
+ .next = tpm_bios_measurements_next,
+ .stop = tpm_bios_measurements_stop,
+ .show = tpm_ascii_bios_measurements_show,
+};
+
+static struct seq_operations tpm_binary_b_measurments_seqops = {
+ .start = tpm_bios_measurements_start,
+ .next = tpm_bios_measurements_next,
+ .stop = tpm_bios_measurements_stop,
+ .show = tpm_binary_bios_measurements_show,
+};
+
+/* read binary bios log */
+static int read_log(struct tpm_bios_log *log)
+{
+ struct acpi_tcpa *buff;
+ acpi_status status;
+ void *virt;
+
+ if (log->bios_event_log != NULL) {
+ printk(KERN_ERR
+ "%s: ERROR - Eventlog already initialized\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
+ status = acpi_get_firmware_table(ACPI_TCPA_SIG, 1,
+ ACPI_LOGICAL_ADDRESSING,
+ (struct acpi_table_header **)
+ &buff);
+
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
+ __func__);
+ return -EIO;
+ }
+
+ if (buff->log_max_len == 0) {
+ printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
+ return -EIO;
+ }
+
+ /* malloc EventLog space */
+ log->bios_event_log = kmalloc(buff->log_max_len, GFP_KERNEL);
+ if (!log->bios_event_log) {
+ printk
+ ("%s: ERROR - Not enough Memory for BIOS measurements\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ log->bios_event_log_end = log->bios_event_log + buff->log_max_len;
+
+ acpi_os_map_memory(buff->log_start_addr, buff->log_max_len, &virt);
+
+ memcpy(log->bios_event_log, virt, buff->log_max_len);
+
+ acpi_os_unmap_memory(virt, buff->log_max_len);
+ return 0;
+}
+
+static int tpm_ascii_bios_measurements_open(struct inode *inode,
+ struct file *file)
+{
+ int err;
+ struct tpm_bios_log *log;
+ struct seq_file *seq;
+
+ log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+
+ if ((err = read_log(log)))
+ return err;
+
+ /* now register seq file */
+ err = seq_open(file, &tpm_ascii_b_measurments_seqops);
+ if (!err) {
+ seq = file->private_data;
+ seq->private = log;
+ } else {
+ kfree(log->bios_event_log);
+ kfree(log);
+ }
+ return err;
+}
+
+struct file_operations tpm_ascii_bios_measurements_ops = {
+ .open = tpm_ascii_bios_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = tpm_bios_measurements_release,
+};
+
+static int tpm_binary_bios_measurements_open(struct inode *inode,
+ struct file *file)
+{
+ int err;
+ struct tpm_bios_log *log;
+ struct seq_file *seq;
+
+ log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+
+ if ((err = read_log(log)))
+ return err;
+
+ /* now register seq file */
+ err = seq_open(file, &tpm_binary_b_measurments_seqops);
+ if (!err) {
+ seq = file->private_data;
+ seq->private = log;
+ } else {
+ kfree(log->bios_event_log);
+ kfree(log);
+ }
+ return err;
+}
+
+struct file_operations tpm_binary_bios_measurements_ops = {
+ .open = tpm_binary_bios_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = tpm_bios_measurements_release,
+};
+
+struct dentry **tpm_bios_log_setup(char *name)
+{
+ struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
+
+ tpm_dir = securityfs_create_dir(name, NULL);
+ if (!tpm_dir)
+ goto out;
+
+ bin_file =
+ securityfs_create_file("binary_bios_measurements",
+ S_IRUSR | S_IRGRP, tpm_dir, NULL,
+ &tpm_binary_bios_measurements_ops);
+ if (!bin_file)
+ goto out_tpm;
+
+ ascii_file =
+ securityfs_create_file("ascii_bios_measurements",
+ S_IRUSR | S_IRGRP, tpm_dir, NULL,
+ &tpm_ascii_bios_measurements_ops);
+ if (!ascii_file)
+ goto out_bin;
+
+ ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL);
+ if (!ret)
+ goto out_ascii;
+
+ ret[0] = ascii_file;
+ ret[1] = bin_file;
+ ret[2] = tpm_dir;
+
+ return ret;
+
+out_ascii:
+ securityfs_remove(ascii_file);
+out_bin:
+ securityfs_remove(bin_file);
+out_tpm:
+ securityfs_remove(tpm_dir);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(tpm_bios_log_setup);
+
+void tpm_bios_log_teardown(struct dentry **lst)
+{
+ int i;
+
+ for (i = 0; i < 3; i++)
+ securityfs_remove(lst[i]);
+}
+EXPORT_SYMBOL_GPL(tpm_bios_log_teardown);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 4b1eef51ec5..eb8b5be4e24 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -166,9 +166,12 @@ static struct tty_struct *alloc_tty_struct(void)
return tty;
}
+static void tty_buffer_free_all(struct tty_struct *);
+
static inline void free_tty_struct(struct tty_struct *tty)
{
kfree(tty->write_buf);
+ tty_buffer_free_all(tty);
kfree(tty);
}
@@ -231,6 +234,200 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
}
/*
+ * Tty buffer allocation management
+ */
+
+static void tty_buffer_free_all(struct tty_struct *tty)
+{
+ struct tty_buffer *thead;
+ while((thead = tty->buf.head) != NULL) {
+ tty->buf.head = thead->next;
+ kfree(thead);
+ }
+ while((thead = tty->buf.free) != NULL) {
+ tty->buf.free = thead->next;
+ kfree(thead);
+ }
+ tty->buf.tail = NULL;
+}
+
+static void tty_buffer_init(struct tty_struct *tty)
+{
+ tty->buf.head = NULL;
+ tty->buf.tail = NULL;
+ tty->buf.free = NULL;
+}
+
+static struct tty_buffer *tty_buffer_alloc(size_t size)
+{
+ struct tty_buffer *p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
+ if(p == NULL)
+ return NULL;
+ p->used = 0;
+ p->size = size;
+ p->next = NULL;
+ p->char_buf_ptr = (char *)(p->data);
+ p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
+/* printk("Flip create %p\n", p); */
+ return p;
+}
+
+/* Must be called with the tty_read lock held. This needs to acquire strategy
+ code to decide if we should kfree or relink a given expired buffer */
+
+static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
+{
+ /* Dumb strategy for now - should keep some stats */
+/* printk("Flip dispose %p\n", b); */
+ if(b->size >= 512)
+ kfree(b);
+ else {
+ b->next = tty->buf.free;
+ tty->buf.free = b;
+ }
+}
+
+static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
+{
+ struct tty_buffer **tbh = &tty->buf.free;
+ while((*tbh) != NULL) {
+ struct tty_buffer *t = *tbh;
+ if(t->size >= size) {
+ *tbh = t->next;
+ t->next = NULL;
+ t->used = 0;
+ /* DEBUG ONLY */
+ memset(t->data, '*', size);
+/* printk("Flip recycle %p\n", t); */
+ return t;
+ }
+ tbh = &((*tbh)->next);
+ }
+ /* Round the buffer size out */
+ size = (size + 0xFF) & ~ 0xFF;
+ return tty_buffer_alloc(size);
+ /* Should possibly check if this fails for the largest buffer we
+ have queued and recycle that ? */
+}
+
+int tty_buffer_request_room(struct tty_struct *tty, size_t size)
+{
+ struct tty_buffer *b = tty->buf.tail, *n;
+ int left = 0;
+
+ /* OPTIMISATION: We could keep a per tty "zero" sized buffer to
+ remove this conditional if its worth it. This would be invisible
+ to the callers */
+ if(b != NULL)
+ left = b->size - b->used;
+ if(left >= size)
+ return size;
+ /* This is the slow path - looking for new buffers to use */
+ n = tty_buffer_find(tty, size);
+ if(n == NULL)
+ return left;
+ if(b != NULL)
+ b->next = n;
+ else
+ tty->buf.head = n;
+ tty->buf.tail = n;
+ return size;
+}
+
+EXPORT_SYMBOL_GPL(tty_buffer_request_room);
+
+int tty_insert_flip_string(struct tty_struct *tty, unsigned char *chars, size_t size)
+{
+ int copied = 0;
+ do {
+ int space = tty_buffer_request_room(tty, size - copied);
+ struct tty_buffer *tb = tty->buf.tail;
+ /* If there is no space then tb may be NULL */
+ if(unlikely(space == 0))
+ break;
+ memcpy(tb->char_buf_ptr + tb->used, chars, space);
+ memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
+ tb->used += space;
+ copied += space;
+ chars += space;
+/* printk("Flip insert %d.\n", space); */
+ }
+ /* There is a small chance that we need to split the data over
+ several buffers. If this is the case we must loop */
+ while (unlikely(size > copied));
+ return copied;
+}
+
+EXPORT_SYMBOL_GPL(tty_insert_flip_string);
+
+int tty_insert_flip_string_flags(struct tty_struct *tty, unsigned char *chars, char *flags, size_t size)
+{
+ int copied = 0;
+ do {
+ int space = tty_buffer_request_room(tty, size - copied);
+ struct tty_buffer *tb = tty->buf.tail;
+ /* If there is no space then tb may be NULL */
+ if(unlikely(space == 0))
+ break;
+ memcpy(tb->char_buf_ptr + tb->used, chars, space);
+ memcpy(tb->flag_buf_ptr + tb->used, flags, space);
+ tb->used += space;
+ copied += space;
+ chars += space;
+ flags += space;
+ }
+ /* There is a small chance that we need to split the data over
+ several buffers. If this is the case we must loop */
+ while (unlikely(size > copied));
+ return copied;
+}
+
+EXPORT_SYMBOL_GPL(tty_insert_flip_string_flags);
+
+
+/*
+ * Prepare a block of space in the buffer for data. Returns the length
+ * available and buffer pointer to the space which is now allocated and
+ * accounted for as ready for normal characters. This is used for drivers
+ * that need their own block copy routines into the buffer. There is no
+ * guarantee the buffer is a DMA target!
+ */
+
+int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size)
+{
+ int space = tty_buffer_request_room(tty, size);
+ struct tty_buffer *tb = tty->buf.tail;
+ *chars = tb->char_buf_ptr + tb->used;
+ memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
+ tb->used += space;
+ return space;
+}
+
+EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
+
+/*
+ * Prepare a block of space in the buffer for data. Returns the length
+ * available and buffer pointer to the space which is now allocated and
+ * accounted for as ready for characters. This is used for drivers
+ * that need their own block copy routines into the buffer. There is no
+ * guarantee the buffer is a DMA target!
+ */
+
+int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size)
+{
+ int space = tty_buffer_request_room(tty, size);
+ struct tty_buffer *tb = tty->buf.tail;
+ *chars = tb->char_buf_ptr + tb->used;
+ *flags = tb->flag_buf_ptr + tb->used;
+ tb->used += space;
+ return space;
+}
+
+EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
+
+
+
+/*
* This is probably overkill for real world processors but
* they are not on hot paths so a little discipline won't do
* any harm.
@@ -492,6 +689,17 @@ restart:
if (ld == NULL)
return -EINVAL;
+ /*
+ * No more input please, we are switching. The new ldisc
+ * will update this value in the ldisc open function
+ */
+
+ tty->receive_room = 0;
+
+ /*
+ * Problem: What do we do if this blocks ?
+ */
+
tty_wait_until_sent(tty, 0);
if (tty->ldisc.num == ldisc) {
@@ -560,9 +768,9 @@ restart:
* we say so later on.
*/
- work = cancel_delayed_work(&tty->flip.work);
+ work = cancel_delayed_work(&tty->buf.work);
/*
- * Wait for ->hangup_work and ->flip.work handlers to terminate
+ * Wait for ->hangup_work and ->buf.work handlers to terminate
*/
flush_scheduled_work();
@@ -616,7 +824,7 @@ restart:
/* Restart it in case no characters kick it off. Safe if
already running */
if (work)
- schedule_delayed_work(&tty->flip.work, 1);
+ schedule_delayed_work(&tty->buf.work, 1);
return retval;
}
@@ -1721,10 +1929,10 @@ static void release_dev(struct file * filp)
*/
clear_bit(TTY_LDISC, &tty->flags);
clear_bit(TTY_DONT_FLIP, &tty->flags);
- cancel_delayed_work(&tty->flip.work);
+ cancel_delayed_work(&tty->buf.work);
/*
- * Wait for ->hangup_work and ->flip.work handlers to terminate
+ * Wait for ->hangup_work and ->buf.work handlers to terminate
*/
flush_scheduled_work();
@@ -2518,17 +2726,15 @@ EXPORT_SYMBOL(do_SAK);
/*
* This routine is called out of the software interrupt to flush data
- * from the flip buffer to the line discipline.
+ * from the buffer chain to the line discipline.
*/
static void flush_to_ldisc(void *private_)
{
struct tty_struct *tty = (struct tty_struct *) private_;
- unsigned char *cp;
- char *fp;
- int count;
unsigned long flags;
struct tty_ldisc *disc;
+ struct tty_buffer *tbuf;
disc = tty_ldisc_ref(tty);
if (disc == NULL) /* !TTY_LDISC */
@@ -2538,28 +2744,23 @@ static void flush_to_ldisc(void *private_)
/*
* Do it after the next timer tick:
*/
- schedule_delayed_work(&tty->flip.work, 1);
+ schedule_delayed_work(&tty->buf.work, 1);
goto out;
}
spin_lock_irqsave(&tty->read_lock, flags);
- if (tty->flip.buf_num) {
- cp = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
- fp = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
- tty->flip.buf_num = 0;
- tty->flip.char_buf_ptr = tty->flip.char_buf;
- tty->flip.flag_buf_ptr = tty->flip.flag_buf;
- } else {
- cp = tty->flip.char_buf;
- fp = tty->flip.flag_buf;
- tty->flip.buf_num = 1;
- tty->flip.char_buf_ptr = tty->flip.char_buf + TTY_FLIPBUF_SIZE;
- tty->flip.flag_buf_ptr = tty->flip.flag_buf + TTY_FLIPBUF_SIZE;
- }
- count = tty->flip.count;
- tty->flip.count = 0;
+ while((tbuf = tty->buf.head) != NULL) {
+ tty->buf.head = tbuf->next;
+ if (tty->buf.head == NULL)
+ tty->buf.tail = NULL;
+ spin_unlock_irqrestore(&tty->read_lock, flags);
+ /* printk("Process buffer %p for %d\n", tbuf, tbuf->used); */
+ disc->receive_buf(tty, tbuf->char_buf_ptr,
+ tbuf->flag_buf_ptr,
+ tbuf->used);
+ spin_lock_irqsave(&tty->read_lock, flags);
+ tty_buffer_free(tty, tbuf);
+ }
spin_unlock_irqrestore(&tty->read_lock, flags);
-
- disc->receive_buf(tty, cp, fp, count);
out:
tty_ldisc_deref(disc);
}
@@ -2654,11 +2855,12 @@ void tty_flip_buffer_push(struct tty_struct *tty)
if (tty->low_latency)
flush_to_ldisc((void *) tty);
else
- schedule_delayed_work(&tty->flip.work, 1);
+ schedule_delayed_work(&tty->buf.work, 1);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
+
/*
* This subroutine initializes a tty structure.
*/
@@ -2669,10 +2871,10 @@ static void initialize_tty_struct(struct tty_struct *tty)
tty_ldisc_assign(tty, tty_ldisc_get(N_TTY));
tty->pgrp = -1;
tty->overrun_time = jiffies;
- tty->flip.char_buf_ptr = tty->flip.char_buf;
- tty->flip.flag_buf_ptr = tty->flip.flag_buf;
- INIT_WORK(&tty->flip.work, flush_to_ldisc, tty);
- init_MUTEX(&tty->flip.pty_sem);
+ tty->buf.head = tty->buf.tail = NULL;
+ tty_buffer_init(tty);
+ INIT_WORK(&tty->buf.work, flush_to_ldisc, tty);
+ init_MUTEX(&tty->buf.pty_sem);
init_MUTEX(&tty->termios_sem);
init_waitqueue_head(&tty->write_wait);
init_waitqueue_head(&tty->read_wait);
diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c
index 4d75c261f98..4e536038874 100644
--- a/drivers/char/viocons.c
+++ b/drivers/char/viocons.c
@@ -131,7 +131,7 @@ static void initDataEvent(struct viocharlpevent *viochar, HvLpIndex lp);
static struct tty_driver *viotty_driver;
-void hvlog(char *fmt, ...)
+static void hvlog(char *fmt, ...)
{
int i;
unsigned long flags;
@@ -147,7 +147,7 @@ void hvlog(char *fmt, ...)
spin_unlock_irqrestore(&consoleloglock, flags);
}
-void hvlogOutput(const char *buf, int count)
+static void hvlogOutput(const char *buf, int count)
{
unsigned long flags;
int begin;
@@ -476,19 +476,19 @@ static struct port_info *get_port_data(struct tty_struct *tty)
*/
static void initDataEvent(struct viocharlpevent *viochar, HvLpIndex lp)
{
+ struct HvLpEvent *hev = &viochar->event;
+
memset(viochar, 0, sizeof(struct viocharlpevent));
- viochar->event.xFlags.xValid = 1;
- viochar->event.xFlags.xFunction = HvLpEvent_Function_Int;
- viochar->event.xFlags.xAckInd = HvLpEvent_AckInd_NoAck;
- viochar->event.xFlags.xAckType = HvLpEvent_AckType_DeferredAck;
- viochar->event.xType = HvLpEvent_Type_VirtualIo;
- viochar->event.xSubtype = viomajorsubtype_chario | viochardata;
- viochar->event.xSourceLp = HvLpConfig_getLpIndex();
- viochar->event.xTargetLp = lp;
- viochar->event.xSizeMinus1 = sizeof(struct viocharlpevent);
- viochar->event.xSourceInstanceId = viopath_sourceinst(lp);
- viochar->event.xTargetInstanceId = viopath_targetinst(lp);
+ hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DEFERRED_ACK |
+ HV_LP_EVENT_INT;
+ hev->xType = HvLpEvent_Type_VirtualIo;
+ hev->xSubtype = viomajorsubtype_chario | viochardata;
+ hev->xSourceLp = HvLpConfig_getLpIndex();
+ hev->xTargetLp = lp;
+ hev->xSizeMinus1 = sizeof(struct viocharlpevent);
+ hev->xSourceInstanceId = viopath_sourceinst(lp);
+ hev->xTargetInstanceId = viopath_targetinst(lp);
}
/*
@@ -752,7 +752,7 @@ static void vioHandleOpenEvent(struct HvLpEvent *event)
struct port_info *pi;
int reject = 0;
- if (event->xFlags.xFunction == HvLpEvent_Function_Ack) {
+ if (hvlpevent_is_ack(event)) {
if (port >= VTTY_PORTS)
return;
@@ -788,7 +788,7 @@ static void vioHandleOpenEvent(struct HvLpEvent *event)
}
/* This had better require an ack, otherwise complain */
- if (event->xFlags.xAckInd != HvLpEvent_AckInd_DoAck) {
+ if (!hvlpevent_need_ack(event)) {
printk(VIOCONS_KERN_WARN "viocharopen without ack bit!\n");
return;
}
@@ -856,7 +856,7 @@ static void vioHandleCloseEvent(struct HvLpEvent *event)
struct viocharlpevent *cevent = (struct viocharlpevent *)event;
u8 port = cevent->virtual_device;
- if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+ if (hvlpevent_is_int(event)) {
if (port >= VTTY_PORTS) {
printk(VIOCONS_KERN_WARN
"close message from invalid virtual device.\n");
@@ -904,6 +904,7 @@ static void vioHandleData(struct HvLpEvent *event)
struct viocharlpevent *cevent = (struct viocharlpevent *)event;
struct port_info *pi;
int index;
+ int num_pushed;
u8 port = cevent->virtual_device;
if (port >= VTTY_PORTS) {
@@ -964,6 +965,7 @@ static void vioHandleData(struct HvLpEvent *event)
* functionality will only work if built into the kernel and
* then only if sysrq is enabled through the proc filesystem.
*/
+ num_pushed = 0;
for (index = 0; index < cevent->len; index++) {
#ifdef CONFIG_MAGIC_SYSRQ
if (sysrq_enabled) {
@@ -993,16 +995,14 @@ static void vioHandleData(struct HvLpEvent *event)
* Don't attempt to copy more data into the buffer than we
* have room for because it would fail without indication.
*/
- if ((tty->flip.count + 1) > TTY_FLIPBUF_SIZE) {
+ if(tty_insert_flip_char(tty, cevent->data[index], TTY_NORMAL) == 0) {
printk(VIOCONS_KERN_WARN "input buffer overflow!\n");
break;
}
- tty_insert_flip_char(tty, cevent->data[index], TTY_NORMAL);
+ num_pushed++;
}
- /* if cevent->len == 0 then no data was added to the buffer and flip.count == 0 */
- if (tty->flip.count)
- /* The next call resets flip.count when the data is flushed. */
+ if (num_pushed)
tty_flip_buffer_push(tty);
}
@@ -1056,8 +1056,7 @@ static void vioHandleCharEvent(struct HvLpEvent *event)
vioHandleConfig(event);
break;
default:
- if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
- (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
+ if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
event->xRc = HvLpEvent_Rc_InvalidSubtype;
HvCallEvent_ackLpEvent(event);
}
diff --git a/drivers/char/vme_scc.c b/drivers/char/vme_scc.c
index 19ba83635dd..d9325281e48 100644
--- a/drivers/char/vme_scc.c
+++ b/drivers/char/vme_scc.c
@@ -434,13 +434,7 @@ static irqreturn_t scc_rx_int(int irq, void *data, struct pt_regs *fp)
SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
return IRQ_HANDLED;
}
- if (tty->flip.count < TTY_FLIPBUF_SIZE) {
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = 0;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
+ tty_insert_flip_char(tty, ch, 0);
/* Check if another character is already ready; in that case, the
* spcond_int() function must be used, because this character may have an
@@ -487,13 +481,7 @@ static irqreturn_t scc_spcond_int(int irq, void *data, struct pt_regs *fp)
else
err = 0;
- if (tty->flip.count < TTY_FLIPBUF_SIZE) {
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = err;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
+ tty_insert_flip_char(tty, ch, err);
/* ++TeSche: *All* errors have to be cleared manually,
* else the condition persists for the next chars
@@ -875,13 +863,13 @@ static int scc_open (struct tty_struct * tty, struct file * filp)
local_irq_save(flags);
#if defined(CONFIG_MVME147_SCC) || defined(CONFIG_MVME162_SCC)
if (MACH_IS_MVME147 || MACH_IS_MVME16x) {
- for (i=0; i<sizeof(mvme_init_tab)/sizeof(*mvme_init_tab); ++i)
+ for (i = 0; i < ARRAY_SIZE(mvme_init_tab); ++i)
SCCwrite(mvme_init_tab[i].reg, mvme_init_tab[i].val);
}
#endif
#if defined(CONFIG_BVME6000_SCC)
if (MACH_IS_BVME6000) {
- for (i=0; i<sizeof(bvme_init_tab)/sizeof(*bvme_init_tab); ++i)
+ for (i = 0; i < ARRAY_SIZE(bvme_init_tab); ++i)
SCCwrite(bvme_init_tab[i].reg, bvme_init_tab[i].val);
}
#endif
diff --git a/drivers/char/vr41xx_giu.c b/drivers/char/vr41xx_giu.c
index 9ac6d43437b..2267c7b8179 100644
--- a/drivers/char/vr41xx_giu.c
+++ b/drivers/char/vr41xx_giu.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <yyuasa@mvista.com or source@mvista.com>
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -35,7 +35,7 @@
#include <asm/vr41xx/giu.h>
#include <asm/vr41xx/vr41xx.h>
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@hh.iij4u.or.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver");
MODULE_LICENSE("GPL");
@@ -718,7 +718,7 @@ static struct platform_driver giu_device_driver = {
},
};
-static int __devinit vr41xx_giu_init(void)
+static int __init vr41xx_giu_init(void)
{
int retval;
@@ -733,7 +733,7 @@ static int __devinit vr41xx_giu_init(void)
return retval;
}
-static void __devexit vr41xx_giu_exit(void)
+static void __exit vr41xx_giu_exit(void)
{
platform_driver_unregister(&giu_device_driver);
diff --git a/drivers/char/vr41xx_rtc.c b/drivers/char/vr41xx_rtc.c
index 435b30748e2..bc1b4a15212 100644
--- a/drivers/char/vr41xx_rtc.c
+++ b/drivers/char/vr41xx_rtc.c
@@ -1,7 +1,7 @@
/*
* Driver for NEC VR4100 series Real Time Clock unit.
*
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,7 +37,7 @@
#include <asm/uaccess.h>
#include <asm/vr41xx/vr41xx.h>
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@hh.iij4u.or.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
MODULE_LICENSE("GPL");
@@ -127,8 +127,6 @@ struct resource rtc_resource[2] = {
.flags = IORESOURCE_MEM, },
};
-#define RTC_NUM_RESOURCES sizeof(rtc_resource) / sizeof(struct resource)
-
static inline unsigned long read_elapsed_second(void)
{
unsigned long first_low, first_mid, first_high;
@@ -686,7 +684,8 @@ static int __devinit vr41xx_rtc_init(void)
break;
}
- rtc_platform_device = platform_device_register_simple("RTC", -1, rtc_resource, RTC_NUM_RESOURCES);
+ rtc_platform_device = platform_device_register_simple("RTC", -1,
+ rtc_resource, ARRAY_SIZE(rtc_resource));
if (IS_ERR(rtc_platform_device))
return PTR_ERR(rtc_platform_device);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index e91268e8683..f1d9cb7feae 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2758,29 +2758,6 @@ static void set_vesa_blanking(char __user *p)
vesa_blank_mode = (mode < 4) ? mode : 0;
}
-/*
- * This is called by a timer handler
- */
-static void vesa_powerdown(void)
-{
- struct vc_data *c = vc_cons[fg_console].d;
- /*
- * Power down if currently suspended (1 or 2),
- * suspend if currently blanked (0),
- * else do nothing (i.e. already powered down (3)).
- * Called only if powerdown features are allowed.
- */
- switch (vesa_blank_mode) {
- case VESA_NO_BLANKING:
- c->vc_sw->con_blank(c, VESA_VSYNC_SUSPEND+1, 0);
- break;
- case VESA_VSYNC_SUSPEND:
- case VESA_HSYNC_SUSPEND:
- c->vc_sw->con_blank(c, VESA_POWERDOWN+1, 0);
- break;
- }
-}
-
void do_blank_screen(int entering_gfx)
{
struct vc_data *vc = vc_cons[fg_console].d;
@@ -2791,8 +2768,7 @@ void do_blank_screen(int entering_gfx)
if (console_blanked) {
if (blank_state == blank_vesa_wait) {
blank_state = blank_off;
- vesa_powerdown();
-
+ vc->vc_sw->con_blank(vc, vesa_blank_mode + 1, 0);
}
return;
}
@@ -2822,7 +2798,7 @@ void do_blank_screen(int entering_gfx)
save_screen(vc);
/* In case we need to reset origin, blanking hook returns 1 */
- i = vc->vc_sw->con_blank(vc, 1, 0);
+ i = vc->vc_sw->con_blank(vc, vesa_off_interval ? 1 : (vesa_blank_mode + 1), 0);
console_blanked = fg_console + 1;
if (i)
set_origin(vc);
@@ -2830,13 +2806,10 @@ void do_blank_screen(int entering_gfx)
if (console_blank_hook && console_blank_hook(1))
return;
- if (vesa_off_interval) {
+ if (vesa_off_interval && vesa_blank_mode) {
blank_state = blank_vesa_wait;
mod_timer(&console_timer, jiffies + vesa_off_interval);
}
-
- if (vesa_blank_mode)
- vc->vc_sw->con_blank(vc, vesa_blank_mode + 1, 0);
}
EXPORT_SYMBOL(do_blank_screen);
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index 344001b45af..a6544790af6 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -438,7 +438,7 @@ config INDYDOG
config ZVM_WATCHDOG
tristate "z/VM Watchdog Timer"
- depends on WATCHDOG && ARCH_S390
+ depends on WATCHDOG && S390
help
IBM s/390 and zSeries machines running under z/VM 5.1 or later
provide a virtual watchdog timer to their guest that cause a
diff --git a/drivers/char/watchdog/cpu5wdt.c b/drivers/char/watchdog/cpu5wdt.c
index e75045fe264..3e8410b5a65 100644
--- a/drivers/char/watchdog/cpu5wdt.c
+++ b/drivers/char/watchdog/cpu5wdt.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/timer.h>
+#include <linux/completion.h>
#include <linux/jiffies.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -57,7 +58,7 @@ static int ticks = 10000;
/* some device data */
static struct {
- struct semaphore stop;
+ struct completion stop;
volatile int running;
struct timer_list timer;
volatile int queue;
@@ -85,7 +86,7 @@ static void cpu5wdt_trigger(unsigned long unused)
}
else {
/* ticks doesn't matter anyway */
- up(&cpu5wdt_device.stop);
+ complete(&cpu5wdt_device.stop);
}
}
@@ -239,7 +240,7 @@ static int __devinit cpu5wdt_init(void)
if ( !val )
printk(KERN_INFO PFX "sorry, was my fault\n");
- init_MUTEX_LOCKED(&cpu5wdt_device.stop);
+ init_completion(&cpu5wdt_device.stop);
cpu5wdt_device.queue = 0;
clear_bit(0, &cpu5wdt_device.inuse);
@@ -269,7 +270,7 @@ static void __devexit cpu5wdt_exit(void)
{
if ( cpu5wdt_device.queue ) {
cpu5wdt_device.queue = 0;
- down(&cpu5wdt_device.stop);
+ wait_for_completion(&cpu5wdt_device.stop);
}
misc_deregister(&cpu5wdt_misc);
diff --git a/drivers/char/watchdog/mpc8xx_wdt.c b/drivers/char/watchdog/mpc8xx_wdt.c
index 56d62ba7c6c..b2fc71e2085 100644
--- a/drivers/char/watchdog/mpc8xx_wdt.c
+++ b/drivers/char/watchdog/mpc8xx_wdt.c
@@ -18,6 +18,7 @@
#include <linux/watchdog.h>
#include <asm/8xx_immap.h>
#include <asm/uaccess.h>
+#include <asm/io.h>
#include <syslib/m8xx_wdt.h>
static unsigned long wdt_opened;
@@ -25,18 +26,26 @@ static int wdt_status;
static void mpc8xx_wdt_handler_disable(void)
{
- volatile immap_t *imap = (volatile immap_t *)IMAP_ADDR;
+ volatile uint __iomem *piscr;
+ piscr = (uint *)&((immap_t*)IMAP_ADDR)->im_sit.sit_piscr;
- imap->im_sit.sit_piscr &= ~(PISCR_PIE | PISCR_PTE);
+ if (!m8xx_has_internal_rtc)
+ m8xx_wdt_stop_timer();
+ else
+ out_be32(piscr, in_be32(piscr) & ~(PISCR_PIE | PISCR_PTE));
printk(KERN_NOTICE "mpc8xx_wdt: keep-alive handler deactivated\n");
}
static void mpc8xx_wdt_handler_enable(void)
{
- volatile immap_t *imap = (volatile immap_t *)IMAP_ADDR;
+ volatile uint __iomem *piscr;
+ piscr = (uint *)&((immap_t*)IMAP_ADDR)->im_sit.sit_piscr;
- imap->im_sit.sit_piscr |= PISCR_PIE | PISCR_PTE;
+ if (!m8xx_has_internal_rtc)
+ m8xx_wdt_install_timer();
+ else
+ out_be32(piscr, in_be32(piscr) | PISCR_PIE | PISCR_PTE);
printk(KERN_NOTICE "mpc8xx_wdt: keep-alive handler activated\n");
}
@@ -68,9 +77,6 @@ static int mpc8xx_wdt_release(struct inode *inode, struct file *file)
static ssize_t mpc8xx_wdt_write(struct file *file, const char *data, size_t len,
loff_t * ppos)
{
- if (ppos != &file->f_pos)
- return -ESPIPE;
-
if (len)
m8xx_wdt_reset();
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index eb667daee19..9dc54736e4e 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -46,12 +46,12 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/clk.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/arch/map.h>
-#include <asm/hardware/clock.h>
#undef S3C24XX_VA_WATCHDOG
#define S3C24XX_VA_WATCHDOG (0)
@@ -397,7 +397,6 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
return -ENOENT;
}
- clk_use(wdt_clock);
clk_enable(wdt_clock);
/* see if we can actually set the requested timer margin, and if
@@ -444,7 +443,6 @@ static int s3c2410wdt_remove(struct platform_device *dev)
if (wdt_clock != NULL) {
clk_disable(wdt_clock);
- clk_unuse(wdt_clock);
clk_put(wdt_clock);
wdt_clock = NULL;
}
diff --git a/drivers/char/watchdog/wdt977.c b/drivers/char/watchdog/wdt977.c
index 44d49dfacbb..3843900e94c 100644
--- a/drivers/char/watchdog/wdt977.c
+++ b/drivers/char/watchdog/wdt977.c
@@ -1,5 +1,5 @@
/*
- * Wdt977 0.03: A Watchdog Device for Netwinder W83977AF chip
+ * Wdt977 0.04: A Watchdog Device for Netwinder W83977AF chip
*
* (c) Copyright 1998 Rebel.com (Woody Suwalski <woody@netwinder.org>)
*
@@ -18,6 +18,8 @@
* from minutes to seconds.
* 07-Jul-2003 Daniele Bellucci: Audit return code of misc_register in
* nwwatchdog_init.
+ * 25-Oct-2005 Woody Suwalski: Convert addresses to #defs, add spinlocks
+ * remove limitiation to be used on Netwinders only
*/
#include <linux/module.h>
@@ -28,6 +30,7 @@
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
+#include <linux/ioport.h>
#include <linux/watchdog.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
@@ -37,8 +40,18 @@
#include <asm/mach-types.h>
#include <asm/uaccess.h>
-#define PFX "Wdt977: "
-#define WATCHDOG_MINOR 130
+#define WATCHDOG_VERSION "0.04"
+#define WATCHDOG_NAME "Wdt977"
+#define PFX WATCHDOG_NAME ": "
+#define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n"
+
+#define IO_INDEX_PORT 0x370 /* on some systems it can be 0x3F0 */
+#define IO_DATA_PORT (IO_INDEX_PORT+1)
+
+#define UNLOCK_DATA 0x87
+#define LOCK_DATA 0xAA
+#define DEVICE_REGISTER 0x07
+
#define DEFAULT_TIMEOUT 60 /* default timeout in seconds */
@@ -47,6 +60,7 @@ static int timeoutM; /* timeout in minutes */
static unsigned long timer_alive;
static int testmode;
static char expect_close;
+static spinlock_t spinlock;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,"Watchdog timeout in seconds (60..15300), default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")");
@@ -63,9 +77,13 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CON
static int wdt977_start(void)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&spinlock, flags);
+
/* unlock the SuperIO chip */
- outb(0x87,0x370);
- outb(0x87,0x370);
+ outb_p(UNLOCK_DATA, IO_INDEX_PORT);
+ outb_p(UNLOCK_DATA, IO_INDEX_PORT);
/* select device Aux2 (device=8) and set watchdog regs F2, F3 and F4
* F2 has the timeout in minutes
@@ -73,28 +91,29 @@ static int wdt977_start(void)
* at timeout, and to reset timer on kbd/mouse activity (not impl.)
* F4 is used to just clear the TIMEOUT'ed state (bit 0)
*/
- outb(0x07,0x370);
- outb(0x08,0x371);
- outb(0xF2,0x370);
- outb(timeoutM,0x371);
- outb(0xF3,0x370);
- outb(0x00,0x371); /* another setting is 0E for kbd/mouse/LED */
- outb(0xF4,0x370);
- outb(0x00,0x371);
+ outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
+ outb_p(0x08, IO_DATA_PORT);
+ outb_p(0xF2, IO_INDEX_PORT);
+ outb_p(timeoutM, IO_DATA_PORT);
+ outb_p(0xF3, IO_INDEX_PORT);
+ outb_p(0x00, IO_DATA_PORT); /* another setting is 0E for kbd/mouse/LED */
+ outb_p(0xF4, IO_INDEX_PORT);
+ outb_p(0x00, IO_DATA_PORT);
/* at last select device Aux1 (dev=7) and set GP16 as a watchdog output */
/* in test mode watch the bit 1 on F4 to indicate "triggered" */
if (!testmode)
{
- outb(0x07,0x370);
- outb(0x07,0x371);
- outb(0xE6,0x370);
- outb(0x08,0x371);
+ outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
+ outb_p(0x07, IO_DATA_PORT);
+ outb_p(0xE6, IO_INDEX_PORT);
+ outb_p(0x08, IO_DATA_PORT);
}
/* lock the SuperIO chip */
- outb(0xAA,0x370);
+ outb_p(LOCK_DATA, IO_INDEX_PORT);
+ spin_unlock_irqrestore(&spinlock, flags);
printk(KERN_INFO PFX "activated.\n");
return 0;
@@ -106,35 +125,39 @@ static int wdt977_start(void)
static int wdt977_stop(void)
{
+ unsigned long flags;
+ spin_lock_irqsave(&spinlock, flags);
+
/* unlock the SuperIO chip */
- outb(0x87,0x370);
- outb(0x87,0x370);
+ outb_p(UNLOCK_DATA, IO_INDEX_PORT);
+ outb_p(UNLOCK_DATA, IO_INDEX_PORT);
/* select device Aux2 (device=8) and set watchdog regs F2,F3 and F4
* F3 is reset to its default state
* F4 can clear the TIMEOUT'ed state (bit 0) - back to default
* We can not use GP17 as a PowerLed, as we use its usage as a RedLed
*/
- outb(0x07,0x370);
- outb(0x08,0x371);
- outb(0xF2,0x370);
- outb(0xFF,0x371);
- outb(0xF3,0x370);
- outb(0x00,0x371);
- outb(0xF4,0x370);
- outb(0x00,0x371);
- outb(0xF2,0x370);
- outb(0x00,0x371);
+ outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
+ outb_p(0x08, IO_DATA_PORT);
+ outb_p(0xF2, IO_INDEX_PORT);
+ outb_p(0xFF, IO_DATA_PORT);
+ outb_p(0xF3, IO_INDEX_PORT);
+ outb_p(0x00, IO_DATA_PORT);
+ outb_p(0xF4, IO_INDEX_PORT);
+ outb_p(0x00, IO_DATA_PORT);
+ outb_p(0xF2, IO_INDEX_PORT);
+ outb_p(0x00, IO_DATA_PORT);
/* at last select device Aux1 (dev=7) and set GP16 as a watchdog output */
- outb(0x07,0x370);
- outb(0x07,0x371);
- outb(0xE6,0x370);
- outb(0x08,0x371);
+ outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
+ outb_p(0x07, IO_DATA_PORT);
+ outb_p(0xE6, IO_INDEX_PORT);
+ outb_p(0x08, IO_DATA_PORT);
/* lock the SuperIO chip */
- outb(0xAA,0x370);
+ outb_p(LOCK_DATA, IO_INDEX_PORT);
+ spin_unlock_irqrestore(&spinlock, flags);
printk(KERN_INFO PFX "shutdown.\n");
return 0;
@@ -147,19 +170,23 @@ static int wdt977_stop(void)
static int wdt977_keepalive(void)
{
+ unsigned long flags;
+ spin_lock_irqsave(&spinlock, flags);
+
/* unlock the SuperIO chip */
- outb(0x87,0x370);
- outb(0x87,0x370);
+ outb_p(UNLOCK_DATA, IO_INDEX_PORT);
+ outb_p(UNLOCK_DATA, IO_INDEX_PORT);
/* select device Aux2 (device=8) and kicks watchdog reg F2 */
/* F2 has the timeout in minutes */
- outb(0x07,0x370);
- outb(0x08,0x371);
- outb(0xF2,0x370);
- outb(timeoutM,0x371);
+ outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
+ outb_p(0x08, IO_DATA_PORT);
+ outb_p(0xF2, IO_INDEX_PORT);
+ outb_p(timeoutM, IO_DATA_PORT);
/* lock the SuperIO chip */
- outb(0xAA,0x370);
+ outb_p(LOCK_DATA, IO_INDEX_PORT);
+ spin_unlock_irqrestore(&spinlock, flags);
return 0;
}
@@ -198,22 +225,26 @@ static int wdt977_set_timeout(int t)
static int wdt977_get_status(int *status)
{
int new_status;
+ unsigned long flags;
- *status=0;
+ spin_lock_irqsave(&spinlock, flags);
/* unlock the SuperIO chip */
- outb(0x87,0x370);
- outb(0x87,0x370);
+ outb_p(UNLOCK_DATA, IO_INDEX_PORT);
+ outb_p(UNLOCK_DATA, IO_INDEX_PORT);
/* select device Aux2 (device=8) and read watchdog reg F4 */
- outb(0x07,0x370);
- outb(0x08,0x371);
- outb(0xF4,0x370);
- new_status = inb(0x371);
+ outb_p(DEVICE_REGISTER, IO_INDEX_PORT);
+ outb_p(0x08, IO_DATA_PORT);
+ outb_p(0xF4, IO_INDEX_PORT);
+ new_status = inb_p(IO_DATA_PORT);
/* lock the SuperIO chip */
- outb(0xAA,0x370);
+ outb_p(LOCK_DATA, IO_INDEX_PORT);
+ spin_unlock_irqrestore(&spinlock, flags);
+
+ *status=0;
if (new_status & 1)
*status |= WDIOF_CARDRESET;
@@ -249,8 +280,8 @@ static int wdt977_release(struct inode *inode, struct file *file)
wdt977_stop();
clear_bit(0,&timer_alive);
} else {
- printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
wdt977_keepalive();
+ printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
}
expect_close = 0;
return 0;
@@ -271,14 +302,17 @@ static int wdt977_release(struct inode *inode, struct file *file)
static ssize_t wdt977_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- if (count) {
- if (!nowayout) {
+ if (count)
+ {
+ if (!nowayout)
+ {
size_t i;
/* In case it was set long ago */
expect_close = 0;
- for (i = 0; i != count; i++) {
+ for (i = 0; i != count; i++)
+ {
char c;
if (get_user(c, buf + i))
return -EFAULT;
@@ -287,6 +321,7 @@ static ssize_t wdt977_write(struct file *file, const char __user *buf,
}
}
+ /* someone wrote to us, we should restart timer */
wdt977_keepalive();
}
return count;
@@ -308,7 +343,7 @@ static struct watchdog_info ident = {
WDIOF_MAGICCLOSE |
WDIOF_KEEPALIVEPING,
.firmware_version = 1,
- .identity = "Winbond 83977",
+ .identity = WATCHDOG_NAME,
};
static int wdt977_ioctl(struct inode *inode, struct file *file,
@@ -405,50 +440,81 @@ static struct notifier_block wdt977_notifier = {
.notifier_call = wdt977_notify_sys,
};
-static int __init nwwatchdog_init(void)
+static int __init wd977_init(void)
{
- int retval;
- if (!machine_is_netwinder())
- return -ENODEV;
+ int rc;
+
+ //if (!machine_is_netwinder())
+ // return -ENODEV;
+
+ printk(KERN_INFO PFX DRIVER_VERSION);
+
+ spin_lock_init(&spinlock);
/* Check that the timeout value is within it's range ; if not reset to the default */
- if (wdt977_set_timeout(timeout)) {
+ if (wdt977_set_timeout(timeout))
+ {
wdt977_set_timeout(DEFAULT_TIMEOUT);
printk(KERN_INFO PFX "timeout value must be 60<timeout<15300, using %d\n",
DEFAULT_TIMEOUT);
}
- retval = register_reboot_notifier(&wdt977_notifier);
- if (retval) {
- printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n",
- retval);
- return retval;
+ /* on Netwinder the IOports are already reserved by
+ * arch/arm/mach-footbridge/netwinder-hw.c
+ */
+ if (!machine_is_netwinder())
+ {
+ if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME))
+ {
+ printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+ IO_INDEX_PORT);
+ rc = -EIO;
+ goto err_out;
+ }
}
- retval = misc_register(&wdt977_miscdev);
- if (retval) {
+ rc = misc_register(&wdt977_miscdev);
+ if (rc)
+ {
printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, retval);
- unregister_reboot_notifier(&wdt977_notifier);
- return retval;
+ wdt977_miscdev.minor, rc);
+ goto err_out_region;
+ }
+
+ rc = register_reboot_notifier(&wdt977_notifier);
+ if (rc)
+ {
+ printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n",
+ rc);
+ goto err_out_miscdev;
}
- printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d, testmode = %i)\n",
+ printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d, testmode=%i)\n",
timeout, nowayout, testmode);
return 0;
+
+err_out_miscdev:
+ misc_deregister(&wdt977_miscdev);
+err_out_region:
+ if (!machine_is_netwinder())
+ release_region(IO_INDEX_PORT,2);
+err_out:
+ return rc;
}
-static void __exit nwwatchdog_exit(void)
+static void __exit wd977_exit(void)
{
+ wdt977_stop();
misc_deregister(&wdt977_miscdev);
unregister_reboot_notifier(&wdt977_notifier);
+ release_region(IO_INDEX_PORT,2);
}
-module_init(nwwatchdog_init);
-module_exit(nwwatchdog_exit);
+module_init(wd977_init);
+module_exit(wd977_exit);
-MODULE_AUTHOR("Woody Suwalski <woody@netwinder.org>");
+MODULE_AUTHOR("Woody Suwalski <woodys@xandros.com>");
MODULE_DESCRIPTION("W83977AF Watchdog driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 969d2b4aaec..4b4d7db1ff7 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/init.h>
#include <asm/atomic.h>
@@ -34,14 +35,14 @@
static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
-/* proc_counts is used as the sequence number of the netlink message */
+/* proc_event_counts is used as the sequence number of the netlink message */
static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
static inline void get_seq(__u32 *ts, int *cpu)
{
*ts = get_cpu_var(proc_event_counts)++;
*cpu = smp_processor_id();
- put_cpu_var(proc_counts);
+ put_cpu_var(proc_event_counts);
}
void proc_fork_connector(struct task_struct *task)
@@ -56,7 +57,7 @@ void proc_fork_connector(struct task_struct *task)
msg = (struct cn_msg*)buffer;
ev = (struct proc_event*)msg->data;
get_seq(&msg->seq, &ev->cpu);
- getnstimestamp(&ev->timestamp);
+ ktime_get_ts(&ev->timestamp); /* get high res monotonic timestamp */
ev->what = PROC_EVENT_FORK;
ev->event_data.fork.parent_pid = task->real_parent->pid;
ev->event_data.fork.parent_tgid = task->real_parent->tgid;
@@ -82,7 +83,7 @@ void proc_exec_connector(struct task_struct *task)
msg = (struct cn_msg*)buffer;
ev = (struct proc_event*)msg->data;
get_seq(&msg->seq, &ev->cpu);
- getnstimestamp(&ev->timestamp);
+ ktime_get_ts(&ev->timestamp);
ev->what = PROC_EVENT_EXEC;
ev->event_data.exec.process_pid = task->pid;
ev->event_data.exec.process_tgid = task->tgid;
@@ -116,7 +117,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
} else
return;
get_seq(&msg->seq, &ev->cpu);
- getnstimestamp(&ev->timestamp);
+ ktime_get_ts(&ev->timestamp);
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
@@ -136,7 +137,7 @@ void proc_exit_connector(struct task_struct *task)
msg = (struct cn_msg*)buffer;
ev = (struct proc_event*)msg->data;
get_seq(&msg->seq, &ev->cpu);
- getnstimestamp(&ev->timestamp);
+ ktime_get_ts(&ev->timestamp);
ev->what = PROC_EVENT_EXIT;
ev->event_data.exit.process_pid = task->pid;
ev->event_data.exit.process_tgid = task->tgid;
@@ -169,7 +170,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
msg = (struct cn_msg*)buffer;
ev = (struct proc_event*)msg->data;
msg->seq = rcvd_seq;
- getnstimestamp(&ev->timestamp);
+ ktime_get_ts(&ev->timestamp);
ev->cpu = -1;
ev->what = PROC_EVENT_NONE;
ev->event_data.ack.err = err;
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 71407c578af..64819aa7cac 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -99,9 +99,6 @@ byte(const uint32_t x, const unsigned n)
return x >> (n << 3);
}
-#define uint32_t_in(x) le32_to_cpu(*(const uint32_t *)(x))
-#define uint32_t_out(to, from) (*(uint32_t *)(to) = cpu_to_le32(from))
-
#define E_KEY ctx->E
#define D_KEY ctx->D
@@ -294,6 +291,7 @@ static int
aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags)
{
struct aes_ctx *ctx = aes_ctx(ctx_arg);
+ const __le32 *key = (const __le32 *)in_key;
uint32_t i, t, u, v, w;
uint32_t P[AES_EXTENDED_KEY_SIZE];
uint32_t rounds;
@@ -313,10 +311,10 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t
ctx->E = ctx->e_data;
ctx->D = ctx->e_data;
- E_KEY[0] = uint32_t_in (in_key);
- E_KEY[1] = uint32_t_in (in_key + 4);
- E_KEY[2] = uint32_t_in (in_key + 8);
- E_KEY[3] = uint32_t_in (in_key + 12);
+ E_KEY[0] = le32_to_cpu(key[0]);
+ E_KEY[1] = le32_to_cpu(key[1]);
+ E_KEY[2] = le32_to_cpu(key[2]);
+ E_KEY[3] = le32_to_cpu(key[3]);
/* Prepare control words. */
memset(&ctx->cword, 0, sizeof(ctx->cword));
@@ -343,17 +341,17 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t
break;
case 24:
- E_KEY[4] = uint32_t_in (in_key + 16);
- t = E_KEY[5] = uint32_t_in (in_key + 20);
+ E_KEY[4] = le32_to_cpu(key[4]);
+ t = E_KEY[5] = le32_to_cpu(key[5]);
for (i = 0; i < 8; ++i)
loop6 (i);
break;
case 32:
- E_KEY[4] = uint32_t_in (in_key + 16);
- E_KEY[5] = uint32_t_in (in_key + 20);
- E_KEY[6] = uint32_t_in (in_key + 24);
- t = E_KEY[7] = uint32_t_in (in_key + 28);
+ E_KEY[4] = le32_to_cpu(in_key[4]);
+ E_KEY[5] = le32_to_cpu(in_key[5]);
+ E_KEY[6] = le32_to_cpu(in_key[6]);
+ t = E_KEY[7] = le32_to_cpu(in_key[7]);
for (i = 0; i < 7; ++i)
loop8 (i);
break;
@@ -468,6 +466,8 @@ static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
static struct crypto_alg aes_alg = {
.cra_name = "aes",
+ .cra_driver_name = "aes-padlock",
+ .cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h
index 3cf2b7a1234..b78489bc298 100644
--- a/drivers/crypto/padlock.h
+++ b/drivers/crypto/padlock.h
@@ -17,7 +17,7 @@
/* Control word. */
struct cword {
- int __attribute__ ((__packed__))
+ unsigned int __attribute__ ((__packed__))
rounds:4,
algo:3,
keygen:1,
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index bda5bce681b..343379f23a5 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -65,11 +65,11 @@
* v0.01 release to linux-ia64@linuxia64.org
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/sched.h> /* for capable() */
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/string.h>
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index db358cfa7cb..c5829591436 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -350,6 +350,18 @@ config SENSORS_VIA686A
This driver can also be built as a module. If so, the module
will be called via686a.
+config SENSORS_VT8231
+ tristate "VT8231"
+ depends on HWMON && I2C && PCI && EXPERIMENTAL
+ select HWMON_VID
+ select I2C_ISA
+ help
+ If you say yes here then you get support for the integrated sensors
+ in the VIA VT8231 device.
+
+ This driver can also be built as a module. If so, the module
+ will be called vt8231.
+
config SENSORS_W83781D
tristate "Winbond W83781D, W83782D, W83783S, W83627HF, Asus AS99127F"
depends on HWMON && I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index f7d6a2f61ee..06d4a1d1410 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
+obj-$(CONFIG_SENSORS_VT8231) += vt8231.o
obj-$(CONFIG_SENSORS_W83627EHF) += w83627ehf.o
obj-$(CONFIG_SENSORS_W83L785TS) += w83l785ts.o
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 8102876c7c3..665612729cb 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -126,10 +126,10 @@ static int read_only;
/* This is the driver that will be inserted */
static struct i2c_driver adm1021_driver = {
- .owner = THIS_MODULE,
- .name = "adm1021",
+ .driver = {
+ .name = "adm1021",
+ },
.id = I2C_DRIVERID_ADM1021,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = adm1021_attach_adapter,
.detach_client = adm1021_detach_client,
};
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 3ec12421694..9331c56d2ba 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -118,10 +118,10 @@ static struct adm1025_data *adm1025_update_device(struct device *dev);
*/
static struct i2c_driver adm1025_driver = {
- .owner = THIS_MODULE,
- .name = "adm1025",
+ .driver = {
+ .name = "adm1025",
+ },
.id = I2C_DRIVERID_ADM1025,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = adm1025_attach_adapter,
.detach_client = adm1025_detach_client,
};
@@ -287,8 +287,6 @@ static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char
struct adm1025_data *data = adm1025_update_device(dev);
return sprintf(buf, "%u\n", vid_from_reg(data->vid, data->vrm));
}
-/* in1_ref is deprecated in favour of cpu0_vid, remove after 2005-11-11 */
-static DEVICE_ATTR(in1_ref, S_IRUGO, show_vid, NULL);
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
static ssize_t show_vrm(struct device *dev, struct device_attribute *attr, char *buf)
@@ -444,8 +442,6 @@ static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind)
device_create_file(&new_client->dev, &dev_attr_temp1_max);
device_create_file(&new_client->dev, &dev_attr_temp2_max);
device_create_file(&new_client->dev, &dev_attr_alarms);
- /* in1_ref is deprecated, remove after 2005-11-11 */
- device_create_file(&new_client->dev, &dev_attr_in1_ref);
device_create_file(&new_client->dev, &dev_attr_cpu0_vid);
device_create_file(&new_client->dev, &dev_attr_vrm);
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index e0f56549d1d..fefe6e74fd0 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -308,9 +308,9 @@ static void adm1026_init_client(struct i2c_client *client);
static struct i2c_driver adm1026_driver = {
- .owner = THIS_MODULE,
- .name = "adm1026",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "adm1026",
+ },
.attach_adapter = adm1026_attach_adapter,
.detach_client = adm1026_detach_client,
};
@@ -1227,8 +1227,6 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, c
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf,"%d\n", vid_from_reg(data->vid & 0x3f, data->vrm));
}
-/* vid deprecated in favour of cpu0_vid, remove after 2005-11-11 */
-static DEVICE_ATTR(vid, S_IRUGO, show_vid_reg, NULL);
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1673,8 +1671,6 @@ static int adm1026_detect(struct i2c_adapter *adapter, int address,
device_create_file(&new_client->dev, &dev_attr_temp1_crit_enable);
device_create_file(&new_client->dev, &dev_attr_temp2_crit_enable);
device_create_file(&new_client->dev, &dev_attr_temp3_crit_enable);
- /* vid deprecated in favour of cpu0_vid, remove after 2005-11-11 */
- device_create_file(&new_client->dev, &dev_attr_vid);
device_create_file(&new_client->dev, &dev_attr_cpu0_vid);
device_create_file(&new_client->dev, &dev_attr_vrm);
device_create_file(&new_client->dev, &dev_attr_alarms);
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 7c545d5eee4..d0639796608 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -105,9 +105,9 @@ static struct adm1031_data *adm1031_update_device(struct device *dev);
/* This is the driver that will be inserted */
static struct i2c_driver adm1031_driver = {
- .owner = THIS_MODULE,
- .name = "adm1031",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "adm1031",
+ },
.attach_adapter = adm1031_attach_adapter,
.detach_client = adm1031_detach_client,
};
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 11dc95f8a17..5ddc22fea4a 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -137,10 +137,10 @@ static struct adm9240_data *adm9240_update_device(struct device *dev);
/* driver data */
static struct i2c_driver adm9240_driver = {
- .owner = THIS_MODULE,
- .name = "adm9240",
+ .driver = {
+ .name = "adm9240",
+ },
.id = I2C_DRIVERID_ADM9240,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = adm9240_attach_adapter,
.detach_client = adm9240_detach_client,
};
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 52c469722a6..ae9de63cf2e 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -217,10 +217,10 @@ static struct asb100_data *asb100_update_device(struct device *dev);
static void asb100_init_client(struct i2c_client *client);
static struct i2c_driver asb100_driver = {
- .owner = THIS_MODULE,
- .name = "asb100",
+ .driver = {
+ .name = "asb100",
+ },
.id = I2C_DRIVERID_ASB100,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = asb100_attach_adapter,
.detach_client = asb100_detach_client,
};
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index 53324f56404..b0c490073c8 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -50,9 +50,9 @@ static struct atxp1_data * atxp1_update_device(struct device *dev);
static int atxp1_detect(struct i2c_adapter *adapter, int address, int kind);
static struct i2c_driver atxp1_driver = {
- .owner = THIS_MODULE,
- .name = "atxp1",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "atxp1",
+ },
.attach_adapter = atxp1_attach_adapter,
.detach_client = atxp1_detach_client,
};
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 34f71b7c7f3..203f9c7abb2 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -89,10 +89,10 @@ static struct ds1621_data *ds1621_update_client(struct device *dev);
/* This is the driver that will be inserted */
static struct i2c_driver ds1621_driver = {
- .owner = THIS_MODULE,
- .name = "ds1621",
+ .driver = {
+ .name = "ds1621",
+ },
.id = I2C_DRIVERID_DS1621,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = ds1621_attach_adapter,
.detach_client = ds1621_detach_client,
};
diff --git a/drivers/hwmon/fscher.c b/drivers/hwmon/fscher.c
index a02e1c34c75..25409181d1e 100644
--- a/drivers/hwmon/fscher.c
+++ b/drivers/hwmon/fscher.c
@@ -118,10 +118,10 @@ static int fscher_write_value(struct i2c_client *client, u8 reg, u8 value);
*/
static struct i2c_driver fscher_driver = {
- .owner = THIS_MODULE,
- .name = "fscher",
+ .driver = {
+ .name = "fscher",
+ },
.id = I2C_DRIVERID_FSCHER,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = fscher_attach_adapter,
.detach_client = fscher_detach_client,
};
diff --git a/drivers/hwmon/fscpos.c b/drivers/hwmon/fscpos.c
index 64e4edc64f8..6d0146b5702 100644
--- a/drivers/hwmon/fscpos.c
+++ b/drivers/hwmon/fscpos.c
@@ -100,10 +100,10 @@ static void reset_fan_alarm(struct i2c_client *client, int nr);
* Driver data (common to all clients)
*/
static struct i2c_driver fscpos_driver = {
- .owner = THIS_MODULE,
- .name = "fscpos",
+ .driver = {
+ .name = "fscpos",
+ },
.id = I2C_DRIVERID_FSCPOS,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = fscpos_attach_adapter,
.detach_client = fscpos_detach_client,
};
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 2f178dbe3d8..9e685e3a3bc 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -151,10 +151,10 @@ static struct gl518_data *gl518_update_device(struct device *dev);
/* This is the driver that will be inserted */
static struct i2c_driver gl518_driver = {
- .owner = THIS_MODULE,
- .name = "gl518sm",
+ .driver = {
+ .name = "gl518sm",
+ },
.id = I2C_DRIVERID_GL518,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = gl518_attach_adapter,
.detach_client = gl518_detach_client,
};
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index c39ba123942..baee60e44b5 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -109,10 +109,10 @@ static struct gl520_data *gl520_update_device(struct device *dev);
/* Driver data */
static struct i2c_driver gl520_driver = {
- .owner = THIS_MODULE,
- .name = "gl520sm",
+ .driver = {
+ .name = "gl520sm",
+ },
.id = I2C_DRIVERID_GL520,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = gl520_attach_adapter,
.detach_client = gl520_detach_client,
};
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 312769ad4da..e497274916c 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -49,20 +49,22 @@
. . . .
11110 = 0.800 V
11111 = 0.000 V (off)
+
+ The 17 specification is in fact Intel Mobile Voltage Positioning -
+ (IMVP-II). You can find more information in the datasheet of Max1718
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2452
+
*/
/* vrm is the VRM/VRD document version multiplied by 10.
val is the 4-, 5- or 6-bit VID code.
Returned value is in mV to avoid floating point in the kernel. */
-int vid_from_reg(int val, int vrm)
+int vid_from_reg(int val, u8 vrm)
{
int vid;
switch(vrm) {
- case 0:
- return 0;
-
case 100: /* VRD 10.0 */
if((val & 0x1f) == 0x1f)
return 0;
@@ -91,10 +93,16 @@ int vid_from_reg(int val, int vrm)
case 84: /* VRM 8.4 */
val &= 0x0f;
/* fall through */
- default: /* VRM 8.2 */
+ case 82: /* VRM 8.2 */
return(val == 0x1f ? 0 :
val & 0x10 ? 5100 - (val) * 100 :
2050 - (val) * 50);
+ case 17: /* Intel IMVP-II */
+ return(val & 0x10 ? 975 - (val & 0xF) * 25 :
+ 1750 - val * 50);
+ default: /* report 0 for unknown */
+ printk(KERN_INFO "hwmon-vid: requested unknown VRM version\n");
+ return 0;
}
}
@@ -108,30 +116,36 @@ struct vrm_model {
u8 vendor;
u8 eff_family;
u8 eff_model;
- int vrm_type;
+ u8 eff_stepping;
+ u8 vrm_type;
};
#define ANY 0xFF
#ifdef CONFIG_X86
+/* the stepping parameter is highest acceptable stepping for current line */
+
static struct vrm_model vrm_models[] = {
- {X86_VENDOR_AMD, 0x6, ANY, 90}, /* Athlon Duron etc */
- {X86_VENDOR_AMD, 0xF, ANY, 24}, /* Athlon 64, Opteron */
- {X86_VENDOR_INTEL, 0x6, 0x9, 85}, /* 0.13um too */
- {X86_VENDOR_INTEL, 0x6, 0xB, 85}, /* Tualatin */
- {X86_VENDOR_INTEL, 0x6, ANY, 82}, /* any P6 */
- {X86_VENDOR_INTEL, 0x7, ANY, 0}, /* Itanium */
- {X86_VENDOR_INTEL, 0xF, 0x0, 90}, /* P4 */
- {X86_VENDOR_INTEL, 0xF, 0x1, 90}, /* P4 Willamette */
- {X86_VENDOR_INTEL, 0xF, 0x2, 90}, /* P4 Northwood */
- {X86_VENDOR_INTEL, 0xF, 0x3, 100}, /* P4 Prescott */
- {X86_VENDOR_INTEL, 0xF, 0x4, 100}, /* P4 Prescott */
- {X86_VENDOR_INTEL, 0x10,ANY, 0}, /* Itanium 2 */
- {X86_VENDOR_UNKNOWN, ANY, ANY, 0} /* stop here */
+ {X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */
+ {X86_VENDOR_AMD, 0xF, ANY, ANY, 24}, /* Athlon 64, Opteron and above VRM 24 */
+ {X86_VENDOR_INTEL, 0x6, 0x9, ANY, 85}, /* 0.13um too */
+ {X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */
+ {X86_VENDOR_INTEL, 0x6, ANY, ANY, 82}, /* any P6 */
+ {X86_VENDOR_INTEL, 0x7, ANY, ANY, 0}, /* Itanium */
+ {X86_VENDOR_INTEL, 0xF, 0x0, ANY, 90}, /* P4 */
+ {X86_VENDOR_INTEL, 0xF, 0x1, ANY, 90}, /* P4 Willamette */
+ {X86_VENDOR_INTEL, 0xF, 0x2, ANY, 90}, /* P4 Northwood */
+ {X86_VENDOR_INTEL, 0xF, ANY, ANY, 100}, /* Prescott and above assume VRD 10 */
+ {X86_VENDOR_INTEL, 0x10, ANY, ANY, 0}, /* Itanium 2 */
+ {X86_VENDOR_CENTAUR, 0x6, 0x7, ANY, 85}, /* Eden ESP/Ezra */
+ {X86_VENDOR_CENTAUR, 0x6, 0x8, 0x7, 85}, /* Ezra T */
+ {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85}, /* Nemiah */
+ {X86_VENDOR_CENTAUR, 0x6, 0x9, ANY, 17}, /* C3-M */
+ {X86_VENDOR_UNKNOWN, ANY, ANY, ANY, 0} /* stop here */
};
-static int find_vrm(u8 eff_family, u8 eff_model, u8 vendor)
+static u8 find_vrm(u8 eff_family, u8 eff_model, u8 eff_stepping, u8 vendor)
{
int i = 0;
@@ -139,7 +153,8 @@ static int find_vrm(u8 eff_family, u8 eff_model, u8 vendor)
if (vrm_models[i].vendor==vendor)
if ((vrm_models[i].eff_family==eff_family)
&& ((vrm_models[i].eff_model==eff_model) ||
- (vrm_models[i].eff_model==ANY)))
+ (vrm_models[i].eff_model==ANY)) &&
+ (eff_stepping <= vrm_models[i].eff_stepping))
return vrm_models[i].vrm_type;
i++;
}
@@ -147,12 +162,11 @@ static int find_vrm(u8 eff_family, u8 eff_model, u8 vendor)
return 0;
}
-int vid_which_vrm(void)
+u8 vid_which_vrm(void)
{
struct cpuinfo_x86 *c = cpu_data;
u32 eax;
- u8 eff_family, eff_model;
- int vrm_ret;
+ u8 eff_family, eff_model, eff_stepping, vrm_ret;
if (c->x86 < 6) /* Any CPU with family lower than 6 */
return 0; /* doesn't have VID and/or CPUID */
@@ -160,20 +174,21 @@ int vid_which_vrm(void)
eax = cpuid_eax(1);
eff_family = ((eax & 0x00000F00)>>8);
eff_model = ((eax & 0x000000F0)>>4);
+ eff_stepping = eax & 0xF;
if (eff_family == 0xF) { /* use extended model & family */
eff_family += ((eax & 0x00F00000)>>20);
eff_model += ((eax & 0x000F0000)>>16)<<4;
}
- vrm_ret = find_vrm(eff_family,eff_model,c->x86_vendor);
+ vrm_ret = find_vrm(eff_family, eff_model, eff_stepping, c->x86_vendor);
if (vrm_ret == 0)
printk(KERN_INFO "hwmon-vid: Unknown VRM version of your "
"x86 CPU\n");
return vrm_ret;
}
-/* and now something completely different for the non-x86 world */
+/* and now for something completely different for the non-x86 world */
#else
-int vid_which_vrm(void)
+u8 vid_which_vrm(void)
{
printk(KERN_INFO "hwmon-vid: Unknown VRM version of your CPU\n");
return 0;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index a61f5d00f10..0da7c9c508c 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -213,7 +213,7 @@ struct it87_data {
u8 sensor; /* Register value */
u8 fan_div[3]; /* Register encoding, shifted right */
u8 vid; /* Register encoding, combined */
- int vrm;
+ u8 vrm;
u32 alarms; /* Register encoding, combined */
u8 fan_main_ctrl; /* Register value */
u8 manual_pwm_ctl[3]; /* manual PWM value set by user */
@@ -234,17 +234,18 @@ static void it87_init_client(struct i2c_client *client, struct it87_data *data);
static struct i2c_driver it87_driver = {
- .owner = THIS_MODULE,
- .name = "it87",
+ .driver = {
+ .name = "it87",
+ },
.id = I2C_DRIVERID_IT87,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = it87_attach_adapter,
.detach_client = it87_detach_client,
};
static struct i2c_driver it87_isa_driver = {
- .owner = THIS_MODULE,
- .name = "it87-isa",
+ .driver = {
+ .name = "it87-isa",
+ },
.attach_adapter = it87_isa_attach_adapter,
.detach_client = it87_detach_client,
};
@@ -668,7 +669,7 @@ static ssize_t
show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf)
{
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%ld\n", (long) data->vrm);
+ return sprintf(buf, "%u\n", data->vrm);
}
static ssize_t
store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
@@ -761,7 +762,8 @@ static int it87_detect(struct i2c_adapter *adapter, int address, int kind)
/* Reserve the ISA region */
if (is_isa)
- if (!request_region(address, IT87_EXTENT, it87_isa_driver.name))
+ if (!request_region(address, IT87_EXTENT,
+ it87_isa_driver.driver.name))
goto ERROR0;
/* For now, we presume we have a valid client. We create the
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 954ec249724..6b1aa7ef552 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -139,9 +139,9 @@ static void lm63_init_client(struct i2c_client *client);
*/
static struct i2c_driver lm63_driver = {
- .owner = THIS_MODULE,
- .name = "lm63",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "lm63",
+ },
.attach_adapter = lm63_attach_adapter,
.detach_client = lm63_detach_client,
};
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index d70f4c8fc1e..74ca2c8c61c 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -66,10 +66,10 @@ static struct lm75_data *lm75_update_device(struct device *dev);
/* This is the driver that will be inserted */
static struct i2c_driver lm75_driver = {
- .owner = THIS_MODULE,
- .name = "lm75",
+ .driver = {
+ .name = "lm75",
+ },
.id = I2C_DRIVERID_LM75,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = lm75_attach_adapter,
.detach_client = lm75_detach_client,
};
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index 9380fda7dcd..a2f420d01fb 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -74,9 +74,9 @@ static struct lm77_data *lm77_update_device(struct device *dev);
/* This is the driver that will be inserted */
static struct i2c_driver lm77_driver = {
- .owner = THIS_MODULE,
- .name = "lm77",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "lm77",
+ },
.attach_adapter = lm77_attach_adapter,
.detach_client = lm77_detach_client,
};
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 78cdd506439..e404001e20d 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -164,17 +164,18 @@ static void lm78_init_client(struct i2c_client *client);
static struct i2c_driver lm78_driver = {
- .owner = THIS_MODULE,
- .name = "lm78",
+ .driver = {
+ .name = "lm78",
+ },
.id = I2C_DRIVERID_LM78,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = lm78_attach_adapter,
.detach_client = lm78_detach_client,
};
static struct i2c_driver lm78_isa_driver = {
- .owner = THIS_MODULE,
- .name = "lm78-isa",
+ .driver = {
+ .name = "lm78-isa",
+ },
.attach_adapter = lm78_isa_attach_adapter,
.detach_client = lm78_detach_client,
};
@@ -497,7 +498,7 @@ static int lm78_detect(struct i2c_adapter *adapter, int address, int kind)
/* Reserve the ISA region */
if (is_isa)
if (!request_region(address, LM78_EXTENT,
- lm78_isa_driver.name)) {
+ lm78_isa_driver.driver.name)) {
err = -EBUSY;
goto ERROR0;
}
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index c359fdea211..c9a7cdea7bd 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -143,10 +143,10 @@ static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value);
*/
static struct i2c_driver lm80_driver = {
- .owner = THIS_MODULE,
- .name = "lm80",
+ .driver = {
+ .name = "lm80",
+ },
.id = I2C_DRIVERID_LM80,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = lm80_attach_adapter,
.detach_client = lm80_detach_client,
};
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 9a70611a9f6..26dfa9e216c 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -124,10 +124,10 @@ static struct lm83_data *lm83_update_device(struct device *dev);
*/
static struct i2c_driver lm83_driver = {
- .owner = THIS_MODULE,
- .name = "lm83",
+ .driver = {
+ .name = "lm83",
+ },
.id = I2C_DRIVERID_LM83,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = lm83_attach_adapter,
.detach_client = lm83_detach_client,
};
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index d1070ed2bee..7389a012754 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -380,10 +380,10 @@ static void lm85_init_client(struct i2c_client *client);
static struct i2c_driver lm85_driver = {
- .owner = THIS_MODULE,
- .name = "lm85",
+ .driver = {
+ .name = "lm85",
+ },
.id = I2C_DRIVERID_LM85,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = lm85_attach_adapter,
.detach_client = lm85_detach_client,
};
@@ -443,7 +443,17 @@ show_fan_offset(4);
static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf)
{
struct lm85_data *data = lm85_update_device(dev);
- return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
+ int vid;
+
+ if (data->type == adt7463 && (data->vid & 0x80)) {
+ /* 6-pin VID (VRM 10) */
+ vid = vid_from_reg(data->vid & 0x3f, data->vrm);
+ } else {
+ /* 5-pin VID (VRM 9) */
+ vid = vid_from_reg(data->vid & 0x1f, data->vrm);
+ }
+
+ return sprintf(buf, "%d\n", vid);
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
@@ -1176,17 +1186,14 @@ static int lm85_detect(struct i2c_adapter *adapter, int address,
device_create_file(&new_client->dev, &dev_attr_in1_input);
device_create_file(&new_client->dev, &dev_attr_in2_input);
device_create_file(&new_client->dev, &dev_attr_in3_input);
- device_create_file(&new_client->dev, &dev_attr_in4_input);
device_create_file(&new_client->dev, &dev_attr_in0_min);
device_create_file(&new_client->dev, &dev_attr_in1_min);
device_create_file(&new_client->dev, &dev_attr_in2_min);
device_create_file(&new_client->dev, &dev_attr_in3_min);
- device_create_file(&new_client->dev, &dev_attr_in4_min);
device_create_file(&new_client->dev, &dev_attr_in0_max);
device_create_file(&new_client->dev, &dev_attr_in1_max);
device_create_file(&new_client->dev, &dev_attr_in2_max);
device_create_file(&new_client->dev, &dev_attr_in3_max);
- device_create_file(&new_client->dev, &dev_attr_in4_max);
device_create_file(&new_client->dev, &dev_attr_temp1_input);
device_create_file(&new_client->dev, &dev_attr_temp2_input);
device_create_file(&new_client->dev, &dev_attr_temp3_input);
@@ -1224,6 +1231,15 @@ static int lm85_detect(struct i2c_adapter *adapter, int address,
device_create_file(&new_client->dev, &dev_attr_temp2_auto_temp_crit);
device_create_file(&new_client->dev, &dev_attr_temp3_auto_temp_crit);
+ /* The ADT7463 has an optional VRM 10 mode where pin 21 is used
+ as a sixth digital VID input rather than an analog input. */
+ data->vid = lm85_read_value(new_client, LM85_REG_VID);
+ if (!(kind == adt7463 && (data->vid & 0x80))) {
+ device_create_file(&new_client->dev, &dev_attr_in4_input);
+ device_create_file(&new_client->dev, &dev_attr_in4_min);
+ device_create_file(&new_client->dev, &dev_attr_in4_max);
+ }
+
return 0;
/* Error out and cleanup code */
@@ -1382,11 +1398,18 @@ static struct lm85_data *lm85_update_device(struct device *dev)
irrelevant. So it is left in 4*/
data->adc_scale = (data->type == emc6d102 ) ? 16 : 4;
- for (i = 0; i <= 4; ++i) {
+ data->vid = lm85_read_value(client, LM85_REG_VID);
+
+ for (i = 0; i <= 3; ++i) {
data->in[i] =
lm85_read_value(client, LM85_REG_IN(i));
}
+ if (!(data->type == adt7463 && (data->vid & 0x80))) {
+ data->in[4] = lm85_read_value(client,
+ LM85_REG_IN(4));
+ }
+
for (i = 0; i <= 3; ++i) {
data->fan[i] =
lm85_read_value(client, LM85_REG_FAN(i));
@@ -1450,13 +1473,20 @@ static struct lm85_data *lm85_update_device(struct device *dev)
/* Things that don't change often */
dev_dbg(&client->dev, "Reading config values\n");
- for (i = 0; i <= 4; ++i) {
+ for (i = 0; i <= 3; ++i) {
data->in_min[i] =
lm85_read_value(client, LM85_REG_IN_MIN(i));
data->in_max[i] =
lm85_read_value(client, LM85_REG_IN_MAX(i));
}
+ if (!(data->type == adt7463 && (data->vid & 0x80))) {
+ data->in_min[4] = lm85_read_value(client,
+ LM85_REG_IN_MIN(4));
+ data->in_max[4] = lm85_read_value(client,
+ LM85_REG_IN_MAX(4));
+ }
+
if ( data->type == emc6d100 ) {
for (i = 5; i <= 7; ++i) {
data->in_min[i] =
@@ -1478,8 +1508,6 @@ static struct lm85_data *lm85_update_device(struct device *dev)
lm85_read_value(client, LM85_REG_TEMP_MAX(i));
}
- data->vid = lm85_read_value(client, LM85_REG_VID);
-
for (i = 0; i <= 2; ++i) {
int val ;
data->autofan[i].config =
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index eeec1817786..6ba34c302d8 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -161,10 +161,10 @@ static struct lm87_data *lm87_update_device(struct device *dev);
*/
static struct i2c_driver lm87_driver = {
- .owner = THIS_MODULE,
- .name = "lm87",
+ .driver = {
+ .name = "lm87",
+ },
.id = I2C_DRIVERID_LM87,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = lm87_attach_adapter,
.detach_client = lm87_detach_client,
};
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 83cf2e1b09f..5679464447c 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -186,10 +186,10 @@ static struct lm90_data *lm90_update_device(struct device *dev);
*/
static struct i2c_driver lm90_driver = {
- .owner = THIS_MODULE,
- .name = "lm90",
+ .driver = {
+ .name = "lm90",
+ },
.id = I2C_DRIVERID_LM90,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = lm90_attach_adapter,
.detach_client = lm90_detach_client,
};
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 7a4b3701ed1..b0c4cb730a7 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -410,10 +410,10 @@ static int lm92_detach_client(struct i2c_client *client)
*/
static struct i2c_driver lm92_driver = {
- .owner = THIS_MODULE,
- .name = "lm92",
+ .driver = {
+ .name = "lm92",
+ },
.id = I2C_DRIVERID_LM92,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = lm92_attach_adapter,
.detach_client = lm92_detach_client,
};
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 69e7e125683..3abe330b22c 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -90,9 +90,9 @@ static struct max1619_data *max1619_update_device(struct device *dev);
*/
static struct i2c_driver max1619_driver = {
- .owner = THIS_MODULE,
- .name = "max1619",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "max1619",
+ },
.attach_adapter = max1619_attach_adapter,
.detach_client = max1619_detach_client,
};
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 17f745a23d0..f161e88e3bb 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -236,8 +236,9 @@ static struct pc87360_data *pc87360_update_device(struct device *dev);
*/
static struct i2c_driver pc87360_driver = {
- .owner = THIS_MODULE,
- .name = "pc87360",
+ .driver = {
+ .name = "pc87360",
+ },
.attach_adapter = pc87360_detect,
.detach_client = pc87360_detach_client,
};
@@ -798,7 +799,7 @@ static int pc87360_detect(struct i2c_adapter *adapter)
for (i = 0; i < 3; i++) {
if (((data->address[i] = extra_isa[i]))
&& !request_region(extra_isa[i], PC87360_EXTENT,
- pc87360_driver.name)) {
+ pc87360_driver.driver.name)) {
dev_err(&new_client->dev, "Region 0x%x-0x%x already "
"in use!\n", extra_isa[i],
extra_isa[i]+PC87360_EXTENT-1);
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 9c6cadec108..8be5189d9bd 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -198,8 +198,9 @@ static struct sis5595_data *sis5595_update_device(struct device *dev);
static void sis5595_init_client(struct i2c_client *client);
static struct i2c_driver sis5595_driver = {
- .owner = THIS_MODULE,
- .name = "sis5595",
+ .driver = {
+ .name = "sis5595",
+ },
.attach_adapter = sis5595_detect,
.detach_client = sis5595_detach_client,
};
@@ -484,7 +485,8 @@ static int sis5595_detect(struct i2c_adapter *adapter)
if (force_addr)
address = force_addr & ~(SIS5595_EXTENT - 1);
/* Reserve the ISA region */
- if (!request_region(address, SIS5595_EXTENT, sis5595_driver.name)) {
+ if (!request_region(address, SIS5595_EXTENT,
+ sis5595_driver.driver.name)) {
err = -EBUSY;
goto exit;
}
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index 2a3e21b5b6b..8663bbbe97f 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -226,8 +226,9 @@ static int smsc47b397_detach_client(struct i2c_client *client)
static int smsc47b397_detect(struct i2c_adapter *adapter);
static struct i2c_driver smsc47b397_driver = {
- .owner = THIS_MODULE,
- .name = "smsc47b397",
+ .driver = {
+ .name = "smsc47b397",
+ },
.attach_adapter = smsc47b397_detect,
.detach_client = smsc47b397_detach_client,
};
@@ -238,7 +239,8 @@ static int smsc47b397_detect(struct i2c_adapter *adapter)
struct smsc47b397_data *data;
int err = 0;
- if (!request_region(address, SMSC_EXTENT, smsc47b397_driver.name)) {
+ if (!request_region(address, SMSC_EXTENT,
+ smsc47b397_driver.driver.name)) {
dev_err(&adapter->dev, "Region 0x%x already in use!\n",
address);
return -EBUSY;
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 5905c1af88f..d1e3ec0fe4d 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -126,8 +126,9 @@ static struct smsc47m1_data *smsc47m1_update_device(struct device *dev,
static struct i2c_driver smsc47m1_driver = {
- .owner = THIS_MODULE,
- .name = "smsc47m1",
+ .driver = {
+ .name = "smsc47m1",
+ },
.attach_adapter = smsc47m1_detect,
.detach_client = smsc47m1_detach_client,
};
@@ -394,7 +395,7 @@ static int smsc47m1_detect(struct i2c_adapter *adapter)
int err = 0;
int fan1, fan2, pwm1, pwm2;
- if (!request_region(address, SMSC_EXTENT, smsc47m1_driver.name)) {
+ if (!request_region(address, SMSC_EXTENT, smsc47m1_driver.driver.name)) {
dev_err(&adapter->dev, "Region 0x%x already in use!\n", address);
return -EBUSY;
}
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 6f696f89717..cb01848729b 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -572,8 +572,9 @@ static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
/* The driver. I choose to use type i2c_driver, as at is identical to both
smbus_driver and isa_driver, and clients could be of either kind */
static struct i2c_driver via686a_driver = {
- .owner = THIS_MODULE,
- .name = "via686a",
+ .driver = {
+ .name = "via686a",
+ },
.attach_adapter = via686a_detect,
.detach_client = via686a_detach_client,
};
@@ -615,7 +616,8 @@ static int via686a_detect(struct i2c_adapter *adapter)
}
/* Reserve the ISA region */
- if (!request_region(address, VIA686A_EXTENT, via686a_driver.name)) {
+ if (!request_region(address, VIA686A_EXTENT,
+ via686a_driver.driver.name)) {
dev_err(&adapter->dev, "region 0x%x already in use!\n",
address);
return -ENODEV;
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
new file mode 100644
index 00000000000..3eb08f004c0
--- /dev/null
+++ b/drivers/hwmon/vt8231.c
@@ -0,0 +1,862 @@
+/*
+ vt8231.c - Part of lm_sensors, Linux kernel modules
+ for hardware monitoring
+
+ Copyright (c) 2005 Roger Lucas <roger@planbit.co.uk>
+ Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
+ Aaron M. Marsh <amarsh@sdf.lonestar.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+/* Supports VIA VT8231 South Bridge embedded sensors
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/i2c-isa.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon-vid.h>
+#include <linux/err.h>
+#include <asm/io.h>
+
+static int force_addr;
+module_param(force_addr, int, 0);
+MODULE_PARM_DESC(force_addr, "Initialize the base address of the sensors");
+
+/* Device address
+ Note that we can't determine the ISA address until we have initialized
+ our module */
+static unsigned short isa_address;
+
+#define VT8231_EXTENT 0x80
+#define VT8231_BASE_REG 0x70
+#define VT8231_ENABLE_REG 0x74
+
+/* The VT8231 registers
+
+ The reset value for the input channel configuration is used (Reg 0x4A=0x07)
+ which sets the selected inputs marked with '*' below if multiple options are
+ possible:
+
+ Voltage Mode Temperature Mode
+ Sensor Linux Id Linux Id VIA Id
+ -------- -------- -------- ------
+ CPU Diode N/A temp1 0
+ UIC1 in0 temp2 * 1
+ UIC2 in1 * temp3 2
+ UIC3 in2 * temp4 3
+ UIC4 in3 * temp5 4
+ UIC5 in4 * temp6 5
+ 3.3V in5 N/A
+
+ Note that the BIOS may set the configuration register to a different value
+ to match the motherboard configuration.
+*/
+
+/* fans numbered 0-1 */
+#define VT8231_REG_FAN_MIN(nr) (0x3b + (nr))
+#define VT8231_REG_FAN(nr) (0x29 + (nr))
+
+/* Voltage inputs numbered 0-5 */
+
+static const u8 regvolt[] = { 0x21, 0x22, 0x23, 0x24, 0x25, 0x26 };
+static const u8 regvoltmax[] = { 0x3d, 0x2b, 0x2d, 0x2f, 0x31, 0x33 };
+static const u8 regvoltmin[] = { 0x3e, 0x2c, 0x2e, 0x30, 0x32, 0x34 };
+
+/* Temperatures are numbered 1-6 according to the Linux kernel specification.
+**
+** In the VIA datasheet, however, the temperatures are numbered from zero.
+** Since it is important that this driver can easily be compared to the VIA
+** datasheet, we will use the VIA numbering within this driver and map the
+** kernel sysfs device name to the VIA number in the sysfs callback.
+*/
+
+#define VT8231_REG_TEMP_LOW01 0x49
+#define VT8231_REG_TEMP_LOW25 0x4d
+
+static const u8 regtemp[] = { 0x1f, 0x21, 0x22, 0x23, 0x24, 0x25 };
+static const u8 regtempmax[] = { 0x39, 0x3d, 0x2b, 0x2d, 0x2f, 0x31 };
+static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 };
+
+#define TEMP_FROM_REG(reg) (((253 * 4 - (reg)) * 550 + 105) / 210)
+#define TEMP_MAXMIN_FROM_REG(reg) (((253 - (reg)) * 2200 + 105) / 210)
+#define TEMP_MAXMIN_TO_REG(val) (253 - ((val) * 210 + 1100) / 2200)
+
+#define VT8231_REG_CONFIG 0x40
+#define VT8231_REG_ALARM1 0x41
+#define VT8231_REG_ALARM2 0x42
+#define VT8231_REG_FANDIV 0x47
+#define VT8231_REG_UCH_CONFIG 0x4a
+#define VT8231_REG_TEMP1_CONFIG 0x4b
+#define VT8231_REG_TEMP2_CONFIG 0x4c
+
+/* temps 0-5 as numbered in VIA datasheet - see later for mapping to Linux
+** numbering
+*/
+#define ISTEMP(i, ch_config) ((i) == 0 ? 1 : \
+ ((ch_config) >> ((i)+1)) & 0x01)
+/* voltages 0-5 */
+#define ISVOLT(i, ch_config) ((i) == 5 ? 1 : \
+ !(((ch_config) >> ((i)+2)) & 0x01))
+
+#define DIV_FROM_REG(val) (1 << (val))
+
+/* NB The values returned here are NOT temperatures. The calibration curves
+** for the thermistor curves are board-specific and must go in the
+** sensors.conf file. Temperature sensors are actually ten bits, but the
+** VIA datasheet only considers the 8 MSBs obtained from the regtemp[]
+** register. The temperature value returned should have a magnitude of 3,
+** so we use the VIA scaling as the "true" scaling and use the remaining 2
+** LSBs as fractional precision.
+**
+** All the on-chip hardware temperature comparisons for the alarms are only
+** 8-bits wide, and compare against the 8 MSBs of the temperature. The bits
+** in the registers VT8231_REG_TEMP_LOW01 and VT8231_REG_TEMP_LOW25 are
+** ignored.
+*/
+
+/******** FAN RPM CONVERSIONS ********
+** This chip saturates back at 0, not at 255 like many the other chips.
+** So, 0 means 0 RPM
+*/
+static inline u8 FAN_TO_REG(long rpm, int div)
+{
+ if (rpm == 0)
+ return 0;
+ return SENSORS_LIMIT(1310720 / (rpm * div), 1, 255);
+}
+
+#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div)))
+
+struct vt8231_data {
+ struct i2c_client client;
+ struct semaphore update_lock;
+ struct class_device *class_dev;
+ char valid; /* !=0 if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+
+ u8 in[6]; /* Register value */
+ u8 in_max[6]; /* Register value */
+ u8 in_min[6]; /* Register value */
+ u16 temp[6]; /* Register value 10 bit, right aligned */
+ u8 temp_max[6]; /* Register value */
+ u8 temp_min[6]; /* Register value */
+ u8 fan[2]; /* Register value */
+ u8 fan_min[2]; /* Register value */
+ u8 fan_div[2]; /* Register encoding, shifted right */
+ u16 alarms; /* Register encoding */
+ u8 uch_config;
+};
+
+static struct pci_dev *s_bridge;
+static int vt8231_detect(struct i2c_adapter *adapter);
+static int vt8231_detach_client(struct i2c_client *client);
+static struct vt8231_data *vt8231_update_device(struct device *dev);
+static void vt8231_init_client(struct i2c_client *client);
+
+static inline int vt8231_read_value(struct i2c_client *client, u8 reg)
+{
+ return inb_p(client->addr + reg);
+}
+
+static inline void vt8231_write_value(struct i2c_client *client, u8 reg,
+ u8 value)
+{
+ outb_p(value, client->addr + reg);
+}
+
+/* following are the sysfs callback functions */
+static ssize_t show_in(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct vt8231_data *data = vt8231_update_device(dev);
+
+ return sprintf(buf, "%d\n", ((data->in[nr] - 3) * 10000) / 958);
+}
+
+static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct vt8231_data *data = vt8231_update_device(dev);
+
+ return sprintf(buf, "%d\n", ((data->in_min[nr] - 3) * 10000) / 958);
+}
+
+static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct vt8231_data *data = vt8231_update_device(dev);
+
+ return sprintf(buf, "%d\n", (((data->in_max[nr] - 3) * 10000) / 958));
+}
+
+static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ unsigned long val = simple_strtoul(buf, NULL, 10);
+
+ down(&data->update_lock);
+ data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
+ vt8231_write_value(client, regvoltmin[nr], data->in_min[nr]);
+ up(&data->update_lock);
+ return count;
+}
+
+static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ unsigned long val = simple_strtoul(buf, NULL, 10);
+
+ down(&data->update_lock);
+ data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
+ vt8231_write_value(client, regvoltmax[nr], data->in_max[nr]);
+ up(&data->update_lock);
+ return count;
+}
+
+/* Special case for input 5 as this has 3.3V scaling built into the chip */
+static ssize_t show_in5(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vt8231_data *data = vt8231_update_device(dev);
+
+ return sprintf(buf, "%d\n",
+ (((data->in[5] - 3) * 10000 * 54) / (958 * 34)));
+}
+
+static ssize_t show_in5_min(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vt8231_data *data = vt8231_update_device(dev);
+
+ return sprintf(buf, "%d\n",
+ (((data->in_min[5] - 3) * 10000 * 54) / (958 * 34)));
+}
+
+static ssize_t show_in5_max(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vt8231_data *data = vt8231_update_device(dev);
+
+ return sprintf(buf, "%d\n",
+ (((data->in_max[5] - 3) * 10000 * 54) / (958 * 34)));
+}
+
+static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ unsigned long val = simple_strtoul(buf, NULL, 10);
+
+ down(&data->update_lock);
+ data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
+ 0, 255);
+ vt8231_write_value(client, regvoltmin[5], data->in_min[5]);
+ up(&data->update_lock);
+ return count;
+}
+
+static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ unsigned long val = simple_strtoul(buf, NULL, 10);
+
+ down(&data->update_lock);
+ data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
+ 0, 255);
+ vt8231_write_value(client, regvoltmax[5], data->in_max[5]);
+ up(&data->update_lock);
+ return count;
+}
+
+#define define_voltage_sysfs(offset) \
+static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
+ show_in, NULL, offset); \
+static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
+ show_in_min, set_in_min, offset); \
+static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
+ show_in_max, set_in_max, offset)
+
+define_voltage_sysfs(0);
+define_voltage_sysfs(1);
+define_voltage_sysfs(2);
+define_voltage_sysfs(3);
+define_voltage_sysfs(4);
+
+static DEVICE_ATTR(in5_input, S_IRUGO, show_in5, NULL);
+static DEVICE_ATTR(in5_min, S_IRUGO | S_IWUSR, show_in5_min, set_in5_min);
+static DEVICE_ATTR(in5_max, S_IRUGO | S_IWUSR, show_in5_max, set_in5_max);
+
+/* Temperatures */
+static ssize_t show_temp0(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vt8231_data *data = vt8231_update_device(dev);
+ return sprintf(buf, "%d\n", data->temp[0] * 250);
+}
+
+static ssize_t show_temp0_max(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vt8231_data *data = vt8231_update_device(dev);
+ return sprintf(buf, "%d\n", data->temp_max[0] * 1000);
+}
+
+static ssize_t show_temp0_min(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vt8231_data *data = vt8231_update_device(dev);
+ return sprintf(buf, "%d\n", data->temp_min[0] * 1000);
+}
+
+static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ int val = simple_strtol(buf, NULL, 10);
+
+ down(&data->update_lock);
+ data->temp_max[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
+ vt8231_write_value(client, regtempmax[0], data->temp_max[0]);
+ up(&data->update_lock);
+ return count;
+}
+static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ int val = simple_strtol(buf, NULL, 10);
+
+ down(&data->update_lock);
+ data->temp_min[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
+ vt8231_write_value(client, regtempmin[0], data->temp_min[0]);
+ up(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct vt8231_data *data = vt8231_update_device(dev);
+ return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
+}
+
+static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct vt8231_data *data = vt8231_update_device(dev);
+ return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_max[nr]));
+}
+
+static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct vt8231_data *data = vt8231_update_device(dev);
+ return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_min[nr]));
+}
+
+static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ int val = simple_strtol(buf, NULL, 10);
+
+ down(&data->update_lock);
+ data->temp_max[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
+ vt8231_write_value(client, regtempmax[nr], data->temp_max[nr]);
+ up(&data->update_lock);
+ return count;
+}
+static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ int val = simple_strtol(buf, NULL, 10);
+
+ down(&data->update_lock);
+ data->temp_min[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
+ vt8231_write_value(client, regtempmin[nr], data->temp_min[nr]);
+ up(&data->update_lock);
+ return count;
+}
+
+/* Note that these map the Linux temperature sensor numbering (1-6) to the VIA
+** temperature sensor numbering (0-5)
+*/
+#define define_temperature_sysfs(offset) \
+static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
+ show_temp, NULL, offset - 1); \
+static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
+ show_temp_max, set_temp_max, offset - 1); \
+static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
+ show_temp_min, set_temp_min, offset - 1)
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp0, NULL);
+static DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp0_max, set_temp0_max);
+static DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, show_temp0_min, set_temp0_min);
+
+define_temperature_sysfs(2);
+define_temperature_sysfs(3);
+define_temperature_sysfs(4);
+define_temperature_sysfs(5);
+define_temperature_sysfs(6);
+
+#define CFG_INFO_TEMP(id) { &sensor_dev_attr_temp##id##_input.dev_attr, \
+ &sensor_dev_attr_temp##id##_min.dev_attr, \
+ &sensor_dev_attr_temp##id##_max.dev_attr }
+#define CFG_INFO_VOLT(id) { &sensor_dev_attr_in##id##_input.dev_attr, \
+ &sensor_dev_attr_in##id##_min.dev_attr, \
+ &sensor_dev_attr_in##id##_max.dev_attr }
+
+struct str_device_attr_table {
+ struct device_attribute *input;
+ struct device_attribute *min;
+ struct device_attribute *max;
+};
+
+static struct str_device_attr_table cfg_info_temp[] = {
+ { &dev_attr_temp1_input, &dev_attr_temp1_min, &dev_attr_temp1_max },
+ CFG_INFO_TEMP(2),
+ CFG_INFO_TEMP(3),
+ CFG_INFO_TEMP(4),
+ CFG_INFO_TEMP(5),
+ CFG_INFO_TEMP(6)
+};
+
+static struct str_device_attr_table cfg_info_volt[] = {
+ CFG_INFO_VOLT(0),
+ CFG_INFO_VOLT(1),
+ CFG_INFO_VOLT(2),
+ CFG_INFO_VOLT(3),
+ CFG_INFO_VOLT(4),
+ { &dev_attr_in5_input, &dev_attr_in5_min, &dev_attr_in5_max }
+};
+
+/* Fans */
+static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct vt8231_data *data = vt8231_update_device(dev);
+ return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
+ DIV_FROM_REG(data->fan_div[nr])));
+}
+
+static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct vt8231_data *data = vt8231_update_device(dev);
+ return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
+ DIV_FROM_REG(data->fan_div[nr])));
+}
+
+static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct vt8231_data *data = vt8231_update_device(dev);
+ return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
+}
+
+static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ int val = simple_strtoul(buf, NULL, 10);
+
+ down(&data->update_lock);
+ data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
+ vt8231_write_value(client, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]);
+ up(&data->update_lock);
+ return count;
+}
+
+static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ unsigned long val = simple_strtoul(buf, NULL, 10);
+ int nr = sensor_attr->index;
+ int old = vt8231_read_value(client, VT8231_REG_FANDIV);
+ long min = FAN_FROM_REG(data->fan_min[nr],
+ DIV_FROM_REG(data->fan_div[nr]));
+
+ down(&data->update_lock);
+ switch (val) {
+ case 1: data->fan_div[nr] = 0; break;
+ case 2: data->fan_div[nr] = 1; break;
+ case 4: data->fan_div[nr] = 2; break;
+ case 8: data->fan_div[nr] = 3; break;
+ default:
+ dev_err(&client->dev, "fan_div value %ld not supported."
+ "Choose one of 1, 2, 4 or 8!\n", val);
+ up(&data->update_lock);
+ return -EINVAL;
+ }
+
+ /* Correct the fan minimum speed */
+ data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
+ vt8231_write_value(client, VT8231_REG_FAN_MIN(nr), data->fan_min[nr]);
+
+ old = (old & 0x0f) | (data->fan_div[1] << 6) | (data->fan_div[0] << 4);
+ vt8231_write_value(client, VT8231_REG_FANDIV, old);
+ up(&data->update_lock);
+ return count;
+}
+
+
+#define define_fan_sysfs(offset) \
+static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
+ show_fan, NULL, offset - 1); \
+static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
+ show_fan_div, set_fan_div, offset - 1); \
+static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
+ show_fan_min, set_fan_min, offset - 1)
+
+define_fan_sysfs(1);
+define_fan_sysfs(2);
+
+/* Alarms */
+static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vt8231_data *data = vt8231_update_device(dev);
+ return sprintf(buf, "%d\n", data->alarms);
+}
+
+static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+
+static struct i2c_driver vt8231_driver = {
+ .driver = {
+ .name = "vt8231",
+ },
+ .attach_adapter = vt8231_detect,
+ .detach_client = vt8231_detach_client,
+};
+
+static struct pci_device_id vt8231_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, vt8231_pci_ids);
+
+static int __devinit vt8231_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id);
+
+static struct pci_driver vt8231_pci_driver = {
+ .name = "vt8231",
+ .id_table = vt8231_pci_ids,
+ .probe = vt8231_pci_probe,
+};
+
+int vt8231_detect(struct i2c_adapter *adapter)
+{
+ struct i2c_client *client;
+ struct vt8231_data *data;
+ int err = 0, i;
+ u16 val;
+
+ /* 8231 requires multiple of 256 */
+ if (force_addr) {
+ isa_address = force_addr & 0xFF00;
+ dev_warn(&adapter->dev, "forcing ISA address 0x%04X\n",
+ isa_address);
+ if (PCIBIOS_SUCCESSFUL != pci_write_config_word(s_bridge,
+ VT8231_BASE_REG, isa_address))
+ return -ENODEV;
+ }
+
+ if (PCIBIOS_SUCCESSFUL !=
+ pci_read_config_word(s_bridge, VT8231_ENABLE_REG, &val))
+ return -ENODEV;
+
+ if (!(val & 0x0001)) {
+ dev_warn(&adapter->dev, "enabling sensors\n");
+ if (PCIBIOS_SUCCESSFUL !=
+ pci_write_config_word(s_bridge, VT8231_ENABLE_REG,
+ val | 0x0001))
+ return -ENODEV;
+ }
+
+ /* Reserve the ISA region */
+ if (!request_region(isa_address, VT8231_EXTENT,
+ vt8231_pci_driver.name)) {
+ dev_err(&adapter->dev, "region 0x%x already in use!\n",
+ isa_address);
+ return -ENODEV;
+ }
+
+ if (!(data = kzalloc(sizeof(struct vt8231_data), GFP_KERNEL))) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+
+ client = &data->client;
+ i2c_set_clientdata(client, data);
+ client->addr = isa_address;
+ client->adapter = adapter;
+ client->driver = &vt8231_driver;
+ client->dev.parent = &adapter->dev;
+
+ /* Fill in the remaining client fields and put into the global list */
+ strlcpy(client->name, "vt8231", I2C_NAME_SIZE);
+
+ init_MUTEX(&data->update_lock);
+
+ /* Tell the I2C layer a new client has arrived */
+ if ((err = i2c_attach_client(client)))
+ goto exit_free;
+
+ vt8231_init_client(client);
+
+ /* Register sysfs hooks */
+ data->class_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->class_dev)) {
+ err = PTR_ERR(data->class_dev);
+ goto exit_detach;
+ }
+
+ /* Must update device information to find out the config field */
+ data->uch_config = vt8231_read_value(client, VT8231_REG_UCH_CONFIG);
+
+ for (i = 0; i < ARRAY_SIZE(cfg_info_temp); i++) {
+ if (ISTEMP(i, data->uch_config)) {
+ device_create_file(&client->dev,
+ cfg_info_temp[i].input);
+ device_create_file(&client->dev, cfg_info_temp[i].max);
+ device_create_file(&client->dev, cfg_info_temp[i].min);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cfg_info_volt); i++) {
+ if (ISVOLT(i, data->uch_config)) {
+ device_create_file(&client->dev,
+ cfg_info_volt[i].input);
+ device_create_file(&client->dev, cfg_info_volt[i].max);
+ device_create_file(&client->dev, cfg_info_volt[i].min);
+ }
+ }
+
+ device_create_file(&client->dev, &sensor_dev_attr_fan1_input.dev_attr);
+ device_create_file(&client->dev, &sensor_dev_attr_fan2_input.dev_attr);
+ device_create_file(&client->dev, &sensor_dev_attr_fan1_min.dev_attr);
+ device_create_file(&client->dev, &sensor_dev_attr_fan2_min.dev_attr);
+ device_create_file(&client->dev, &sensor_dev_attr_fan1_div.dev_attr);
+ device_create_file(&client->dev, &sensor_dev_attr_fan2_div.dev_attr);
+
+ device_create_file(&client->dev, &dev_attr_alarms);
+ return 0;
+
+exit_detach:
+ i2c_detach_client(client);
+exit_free:
+ kfree(data);
+exit_release:
+ release_region(isa_address, VT8231_EXTENT);
+ return err;
+}
+
+static int vt8231_detach_client(struct i2c_client *client)
+{
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ int err;
+
+ hwmon_device_unregister(data->class_dev);
+
+ if ((err = i2c_detach_client(client))) {
+ return err;
+ }
+
+ release_region(client->addr, VT8231_EXTENT);
+ kfree(data);
+
+ return 0;
+}
+
+static void vt8231_init_client(struct i2c_client *client)
+{
+ vt8231_write_value(client, VT8231_REG_TEMP1_CONFIG, 0);
+ vt8231_write_value(client, VT8231_REG_TEMP2_CONFIG, 0);
+}
+
+static struct vt8231_data *vt8231_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct vt8231_data *data = i2c_get_clientdata(client);
+ int i;
+ u16 low;
+
+ down(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
+ || !data->valid) {
+ for (i = 0; i < 6; i++) {
+ if (ISVOLT(i, data->uch_config)) {
+ data->in[i] = vt8231_read_value(client,
+ regvolt[i]);
+ data->in_min[i] = vt8231_read_value(client,
+ regvoltmin[i]);
+ data->in_max[i] = vt8231_read_value(client,
+ regvoltmax[i]);
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ data->fan[i] = vt8231_read_value(client,
+ VT8231_REG_FAN(i));
+ data->fan_min[i] = vt8231_read_value(client,
+ VT8231_REG_FAN_MIN(i));
+ }
+
+ low = vt8231_read_value(client, VT8231_REG_TEMP_LOW01);
+ low = (low >> 6) | ((low & 0x30) >> 2)
+ | (vt8231_read_value(client, VT8231_REG_TEMP_LOW25) << 4);
+ for (i = 0; i < 6; i++) {
+ if (ISTEMP(i, data->uch_config)) {
+ data->temp[i] = (vt8231_read_value(client,
+ regtemp[i]) << 2)
+ | ((low >> (2 * i)) & 0x03);
+ data->temp_max[i] = vt8231_read_value(client,
+ regtempmax[i]);
+ data->temp_min[i] = vt8231_read_value(client,
+ regtempmin[i]);
+ }
+ }
+
+ i = vt8231_read_value(client, VT8231_REG_FANDIV);
+ data->fan_div[0] = (i >> 4) & 0x03;
+ data->fan_div[1] = i >> 6;
+ data->alarms = vt8231_read_value(client, VT8231_REG_ALARM1) |
+ (vt8231_read_value(client, VT8231_REG_ALARM2) << 8);
+
+ /* Set alarm flags correctly */
+ if (!data->fan[0] && data->fan_min[0]) {
+ data->alarms |= 0x40;
+ } else if (data->fan[0] && !data->fan_min[0]) {
+ data->alarms &= ~0x40;
+ }
+
+ if (!data->fan[1] && data->fan_min[1]) {
+ data->alarms |= 0x80;
+ } else if (data->fan[1] && !data->fan_min[1]) {
+ data->alarms &= ~0x80;
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ up(&data->update_lock);
+
+ return data;
+}
+
+static int __devinit vt8231_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ u16 val;
+
+ if (PCIBIOS_SUCCESSFUL != pci_read_config_word(dev, VT8231_BASE_REG,
+ &val))
+ return -ENODEV;
+
+ isa_address = val & ~(VT8231_EXTENT - 1);
+ if (isa_address == 0 && force_addr == 0) {
+ dev_err(&dev->dev, "base address not set -\
+ upgrade BIOS or use force_addr=0xaddr\n");
+ return -ENODEV;
+ }
+
+ s_bridge = pci_dev_get(dev);
+
+ if (i2c_isa_add_driver(&vt8231_driver)) {
+ pci_dev_put(s_bridge);
+ s_bridge = NULL;
+ }
+
+ /* Always return failure here. This is to allow other drivers to bind
+ * to this pci device. We don't really want to have control over the
+ * pci device, we only wanted to read as few register values from it.
+ */
+ return -ENODEV;
+}
+
+static int __init sm_vt8231_init(void)
+{
+ return pci_register_driver(&vt8231_pci_driver);
+}
+
+static void __exit sm_vt8231_exit(void)
+{
+ pci_unregister_driver(&vt8231_pci_driver);
+ if (s_bridge != NULL) {
+ i2c_isa_del_driver(&vt8231_driver);
+ pci_dev_put(s_bridge);
+ s_bridge = NULL;
+ }
+}
+
+MODULE_AUTHOR("Roger Lucas <roger@planbit.co.uk>");
+MODULE_DESCRIPTION("VT8231 sensors");
+MODULE_LICENSE("GPL");
+
+module_init(sm_vt8231_init);
+module_exit(sm_vt8231_exit);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index eee22a57e92..12d79f5e490 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -676,7 +676,7 @@ static int w83627ehf_detect(struct i2c_adapter *adapter)
int i, err = 0;
if (!request_region(address + REGION_OFFSET, REGION_LENGTH,
- w83627ehf_driver.name)) {
+ w83627ehf_driver.driver.name)) {
err = -EBUSY;
goto exit;
}
@@ -785,8 +785,9 @@ static int w83627ehf_detach_client(struct i2c_client *client)
}
static struct i2c_driver w83627ehf_driver = {
- .owner = THIS_MODULE,
- .name = "w83627ehf",
+ .driver = {
+ .name = "w83627ehf",
+ },
.attach_adapter = w83627ehf_detect,
.detach_client = w83627ehf_detach_client,
};
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index bbb3dcde146..7ea441d4da6 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -332,8 +332,9 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev);
static void w83627hf_init_client(struct i2c_client *client);
static struct i2c_driver w83627hf_driver = {
- .owner = THIS_MODULE,
- .name = "w83627hf",
+ .driver = {
+ .name = "w83627hf",
+ },
.attach_adapter = w83627hf_detect,
.detach_client = w83627hf_detach_client,
};
@@ -1009,7 +1010,7 @@ static int w83627hf_detect(struct i2c_adapter *adapter)
address = force_addr & WINB_ALIGNMENT;
if (!request_region(address + WINB_REGION_OFFSET, WINB_REGION_SIZE,
- w83627hf_driver.name)) {
+ w83627hf_driver.driver.name)) {
err = -EBUSY;
goto ERROR0;
}
@@ -1122,11 +1123,10 @@ static int w83627hf_detect(struct i2c_adapter *adapter)
if (kind != w83697hf)
device_create_file_temp(new_client, 3);
- if (kind != w83697hf)
+ if (kind != w83697hf && data->vid != 0xff) {
device_create_file_vid(new_client);
-
- if (kind != w83697hf)
device_create_file_vrm(new_client);
+ }
device_create_file_fan_div(new_client, 1);
device_create_file_fan_div(new_client, 2);
@@ -1232,7 +1232,7 @@ static int w83627thf_read_gpio5(struct i2c_client *client)
/* Make sure the pins are configured for input
There must be at least five (VRM 9), and possibly 6 (VRM 10) */
- sel = superio_inb(W83627THF_GPIO5_IOSR);
+ sel = superio_inb(W83627THF_GPIO5_IOSR) & 0x3f;
if ((sel & 0x1f) != 0x1f) {
dev_dbg(&client->dev, "GPIO5 not configured for VID "
"function\n");
@@ -1323,19 +1323,18 @@ static void w83627hf_init_client(struct i2c_client *client)
int hi = w83627hf_read_value(client, W83781D_REG_CHIPID);
data->vid = (lo & 0x0f) | ((hi & 0x01) << 4);
} else if (w83627thf == data->type) {
- data->vid = w83627thf_read_gpio5(client) & 0x3f;
+ data->vid = w83627thf_read_gpio5(client);
}
/* Read VRM & OVT Config only once */
if (w83627thf == data->type || w83637hf == data->type) {
data->vrm_ovt =
w83627hf_read_value(client, W83627THF_REG_VRM_OVT_CFG);
- data->vrm = (data->vrm_ovt & 0x01) ? 90 : 82;
- } else {
- /* Convert VID to voltage based on default VRM */
- data->vrm = vid_which_vrm();
}
+ /* Convert VID to voltage based on VRM */
+ data->vrm = vid_which_vrm();
+
tmp = w83627hf_read_value(client, W83781D_REG_SCFG1);
for (i = 1; i <= 3; i++) {
if (!(tmp & BIT_SCFG1[i - 1])) {
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index ffdb3a03e2b..557114872f3 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -269,17 +269,18 @@ static struct w83781d_data *w83781d_update_device(struct device *dev);
static void w83781d_init_client(struct i2c_client *client);
static struct i2c_driver w83781d_driver = {
- .owner = THIS_MODULE,
- .name = "w83781d",
+ .driver = {
+ .name = "w83781d",
+ },
.id = I2C_DRIVERID_W83781D,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = w83781d_attach_adapter,
.detach_client = w83781d_detach_client,
};
static struct i2c_driver w83781d_isa_driver = {
- .owner = THIS_MODULE,
- .name = "w83781d-isa",
+ .driver = {
+ .name = "w83781d-isa",
+ },
.attach_adapter = w83781d_isa_attach_adapter,
.detach_client = w83781d_detach_client,
};
@@ -1012,7 +1013,7 @@ w83781d_detect(struct i2c_adapter *adapter, int address, int kind)
if (is_isa)
if (!request_region(address, W83781D_EXTENT,
- w83781d_isa_driver.name)) {
+ w83781d_isa_driver.driver.name)) {
dev_dbg(&adapter->dev, "Request of region "
"0x%x-0x%x for w83781d failed\n", address,
address + W83781D_EXTENT - 1);
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 1ba07263036..b176bf0c4c7 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -269,7 +269,6 @@ DIV_TO_REG(long val)
struct w83792d_data {
struct i2c_client client;
struct class_device *class_dev;
- struct semaphore lock;
enum chips type;
struct semaphore update_lock;
@@ -282,7 +281,7 @@ struct w83792d_data {
u8 in[9]; /* Register value */
u8 in_max[9]; /* Register value */
u8 in_min[9]; /* Register value */
- u8 low_bits[2]; /* Additional resolution to voltage in0-6 */
+ u16 low_bits; /* Additional resolution to voltage in6-0 */
u8 fan[7]; /* Register value */
u8 fan_min[7]; /* Register value */
u8 temp1[3]; /* current, over, thyst */
@@ -317,45 +316,17 @@ static void w83792d_print_debug(struct w83792d_data *data, struct device *dev);
static void w83792d_init_client(struct i2c_client *client);
static struct i2c_driver w83792d_driver = {
- .owner = THIS_MODULE,
- .name = "w83792d",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "w83792d",
+ },
.attach_adapter = w83792d_attach_adapter,
.detach_client = w83792d_detach_client,
};
-static long in_count_from_reg(int nr, struct w83792d_data *data)
+static inline long in_count_from_reg(int nr, struct w83792d_data *data)
{
- u16 vol_count = data->in[nr];
- u16 low_bits = 0;
- vol_count = (vol_count << 2);
- switch (nr)
- {
- case 0: /* vin0 */
- low_bits = (data->low_bits[0]) & 0x03;
- break;
- case 1: /* vin1 */
- low_bits = ((data->low_bits[0]) & 0x0c) >> 2;
- break;
- case 2: /* vin2 */
- low_bits = ((data->low_bits[0]) & 0x30) >> 4;
- break;
- case 3: /* vin3 */
- low_bits = ((data->low_bits[0]) & 0xc0) >> 6;
- break;
- case 4: /* vin4 */
- low_bits = (data->low_bits[1]) & 0x03;
- break;
- case 5: /* vin5 */
- low_bits = ((data->low_bits[1]) & 0x0c) >> 2;
- break;
- case 6: /* vin6 */
- low_bits = ((data->low_bits[1]) & 0x30) >> 4;
- default:
- break;
- }
- vol_count = vol_count | low_bits;
- return vol_count;
+ /* in7 and in8 do not have low bits, but the formula still works */
+ return ((data->in[nr] << 2) | ((data->low_bits >> (2 * nr)) & 0x03));
}
/* following are the sysfs callback functions */
@@ -1192,7 +1163,6 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind)
new_client = &data->client;
i2c_set_clientdata(new_client, data);
new_client->addr = address;
- init_MUTEX(&data->lock);
new_client->adapter = adapter;
new_client->driver = &w83792d_driver;
new_client->flags = 0;
@@ -1243,7 +1213,7 @@ w83792d_detect(struct i2c_adapter *adapter, int address, int kind)
goto ERROR1;
}
val1 = w83792d_read_value(new_client, W83792D_REG_WCHIPID);
- if (val1 == 0x7a && address >= 0x2c) {
+ if (val1 == 0x7a) {
kind = w83792d;
} else {
if (kind == 0)
@@ -1416,26 +1386,17 @@ w83792d_detach_client(struct i2c_client *client)
return 0;
}
-/* The SMBus locks itself, usually, but nothing may access the Winbond between
- bank switches. ISA access must always be locked explicitly!
- We ignore the W83792D BUSY flag at this moment - it could lead to deadlocks,
- would slow down the W83792D access and should not be necessary.
- There are some ugly typecasts here, but the good news is - they should
- nowhere else be necessary! */
-static int
-w83792d_read_value(struct i2c_client *client, u8 reg)
+/* The SMBus locks itself. The Winbond W83792D chip has a bank register,
+ but the driver only accesses registers in bank 0, so we don't have
+ to switch banks and lock access between switches. */
+static int w83792d_read_value(struct i2c_client *client, u8 reg)
{
- int res=0;
- res = i2c_smbus_read_byte_data(client, reg);
-
- return res;
+ return i2c_smbus_read_byte_data(client, reg);
}
-static int
-w83792d_write_value(struct i2c_client *client, u8 reg, u8 value)
+static int w83792d_write_value(struct i2c_client *client, u8 reg, u8 value)
{
- i2c_smbus_write_byte_data(client, reg, value);
- return 0;
+ return i2c_smbus_write_byte_data(client, reg, value);
}
static void
@@ -1492,10 +1453,10 @@ static struct w83792d_data *w83792d_update_device(struct device *dev)
data->in_min[i] = w83792d_read_value(client,
W83792D_REG_IN_MIN[i]);
}
- data->low_bits[0] = w83792d_read_value(client,
- W83792D_REG_LOW_BITS1);
- data->low_bits[1] = w83792d_read_value(client,
- W83792D_REG_LOW_BITS2);
+ data->low_bits = w83792d_read_value(client,
+ W83792D_REG_LOW_BITS1) +
+ (w83792d_read_value(client,
+ W83792D_REG_LOW_BITS2) << 8);
for (i = 0; i < 7; i++) {
/* Update the Fan measured value and limits */
data->fan[i] = w83792d_read_value(client,
@@ -1506,7 +1467,7 @@ static struct w83792d_data *w83792d_update_device(struct device *dev)
pwm_array_tmp[i] = w83792d_read_value(client,
W83792D_REG_PWM[i]);
data->pwm[i] = pwm_array_tmp[i] & 0x0f;
- data->pwm_mode[i] = (pwm_array_tmp[i] >> 7) & 0x01;
+ data->pwm_mode[i] = pwm_array_tmp[i] >> 7;
}
reg_tmp = w83792d_read_value(client, W83792D_REG_FAN_CFG);
@@ -1607,8 +1568,8 @@ static void w83792d_print_debug(struct w83792d_data *data, struct device *dev)
dev_dbg(dev, "vin[%d] max is: 0x%x\n", i, data->in_max[i]);
dev_dbg(dev, "vin[%d] min is: 0x%x\n", i, data->in_min[i]);
}
- dev_dbg(dev, "Low Bit1 is: 0x%x\n", data->low_bits[0]);
- dev_dbg(dev, "Low Bit2 is: 0x%x\n", data->low_bits[1]);
+ dev_dbg(dev, "Low Bit1 is: 0x%x\n", data->low_bits & 0xff);
+ dev_dbg(dev, "Low Bit2 is: 0x%x\n", data->low_bits >> 8);
dev_dbg(dev, "7 set of Fan Counts and Duty Cycles: =====>\n");
for (i=0; i<7; i++) {
dev_dbg(dev, "fan[%d] is: 0x%x\n", i, data->fan[i]);
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index f495b637866..f66c0cfdeda 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -92,10 +92,10 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev);
*/
static struct i2c_driver w83l785ts_driver = {
- .owner = THIS_MODULE,
- .name = "w83l785ts",
+ .driver = {
+ .name = "w83l785ts",
+ },
.id = I2C_DRIVERID_W83L785TS,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = w83l785ts_attach_adapter,
.detach_client = w83l785ts_detach_client,
};
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 4010fe92e72..08d5b8fed2d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -236,27 +236,17 @@ config I2C_IXP2000
This support is also available as a module. If so, the module
will be called i2c-ixp2000.
-config I2C_KEYWEST
- tristate "Powermac Keywest I2C interface"
+config I2C_POWERMAC
+ tristate "Powermac I2C interface"
depends on I2C && PPC_PMAC
+ default y
help
- This supports the use of the I2C interface in the combo-I/O
- chip on recent Apple machines. Say Y if you have such a machine.
-
- This support is also available as a module. If so, the module
- will be called i2c-keywest.
-
-config I2C_PMAC_SMU
- tristate "Powermac SMU I2C interface"
- depends on I2C && PMAC_SMU
- help
- This supports the use of the I2C interface in the SMU
- chip on recent Apple machines like the iMac G5. It is used
- among others by the thermal control driver for those machines.
- Say Y if you have such a machine.
+ This exposes the various PowerMac i2c interfaces to the linux i2c
+ layer and to userland. It is used by various drivers on the powemac
+ platform, thus should generally be enabled.
This support is also available as a module. If so, the module
- will be called i2c-pmac-smu.
+ will be called i2c-powermac.
config I2C_MPC
tristate "MPC107/824x/85xx/52xx"
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index f1df00f66c6..b44831dff68 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -19,8 +19,7 @@ obj-$(CONFIG_I2C_ISA) += i2c-isa.o
obj-$(CONFIG_I2C_ITE) += i2c-ite.o
obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o
-obj-$(CONFIG_I2C_KEYWEST) += i2c-keywest.o
-obj-$(CONFIG_I2C_PMAC_SMU) += i2c-pmac-smu.o
+obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ac3eafa8aac..1c752ddc10e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -468,8 +468,7 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
return -1;
}
- if (hwpec)
- outb_p(1, SMBAUXCTL); /* enable hardware PEC */
+ outb_p(hwpec, SMBAUXCTL); /* enable/disable hardware PEC */
if(block)
ret = i801_block_transaction(data, read_write, size, hwpec);
@@ -478,9 +477,6 @@ static s32 i801_access(struct i2c_adapter * adap, u16 addr,
ret = i801_transaction();
}
- if (hwpec)
- outb_p(0, SMBAUXCTL); /* disable hardware PEC */
-
if(block)
return ret;
if(ret)
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 1a587253d71..87fae937e66 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -725,6 +725,7 @@ static int __devinit iic_probe(struct ocp_device *ocp){
strcpy(adap->name, "IBM IIC");
i2c_set_adapdata(adap, dev);
adap->id = I2C_HW_OCP;
+ adap->class = I2C_CLASS_HWMON;
adap->algo = &iic_algo;
adap->client_register = NULL;
adap->client_unregister = NULL;
diff --git a/drivers/i2c/busses/i2c-isa.c b/drivers/i2c/busses/i2c-isa.c
index 03672c9ca40..9f2ffef4d81 100644
--- a/drivers/i2c/busses/i2c-isa.c
+++ b/drivers/i2c/busses/i2c-isa.c
@@ -92,15 +92,13 @@ int i2c_isa_add_driver(struct i2c_driver *driver)
int res;
/* Add the driver to the list of i2c drivers in the driver core */
- driver->driver.name = driver->name;
- driver->driver.owner = driver->owner;
driver->driver.bus = &i2c_bus_type;
driver->driver.probe = i2c_isa_device_probe;
driver->driver.remove = i2c_isa_device_remove;
res = driver_register(&driver->driver);
if (res)
return res;
- dev_dbg(&isa_adapter.dev, "Driver %s registered\n", driver->name);
+ dev_dbg(&isa_adapter.dev, "Driver %s registered\n", driver->driver.name);
/* Now look for clients */
driver->attach_adapter(&isa_adapter);
@@ -124,14 +122,14 @@ int i2c_isa_del_driver(struct i2c_driver *driver)
if ((res = driver->detach_client(client))) {
dev_err(&isa_adapter.dev, "Failed, driver "
"%s not unregistered!\n",
- driver->name);
+ driver->driver.name);
return res;
}
}
/* Get the driver off the core list */
driver_unregister(&driver->driver);
- dev_dbg(&isa_adapter.dev, "Driver %s unregistered\n", driver->name);
+ dev_dbg(&isa_adapter.dev, "Driver %s unregistered\n", driver->driver.name);
return 0;
}
@@ -176,7 +174,7 @@ static void __exit i2c_isa_exit(void)
list_for_each_safe(item, _n, &isa_adapter.clients) {
client = list_entry(item, struct i2c_client, list);
dev_err(&isa_adapter.dev, "Driver %s still has an active "
- "ISA client at 0x%x\n", client->driver->name,
+ "ISA client at 0x%x\n", client->driver->driver.name,
client->addr);
}
if (client != NULL)
diff --git a/drivers/i2c/busses/i2c-keywest.c b/drivers/i2c/busses/i2c-keywest.c
deleted file mode 100644
index d61f748278f..00000000000
--- a/drivers/i2c/busses/i2c-keywest.c
+++ /dev/null
@@ -1,751 +0,0 @@
-/*
- i2c Support for Apple Keywest I2C Bus Controller
-
- Copyright (c) 2001 Benjamin Herrenschmidt <benh@kernel.crashing.org>
-
- Original work by
-
- Copyright (c) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Changes:
-
- 2001/12/13 BenH New implementation
- 2001/12/15 BenH Add support for "byte" and "quick"
- transfers. Add i2c_xfer routine.
- 2003/09/21 BenH Rework state machine with Paulus help
- 2004/01/21 BenH Merge in Greg KH changes, polled mode is back
- 2004/02/05 BenH Merge 64 bits fixes from the g5 ppc64 tree
-
- My understanding of the various modes supported by keywest are:
-
- - Dumb mode : not implemented, probably direct tweaking of lines
- - Standard mode : simple i2c transaction of type
- S Addr R/W A Data A Data ... T
- - Standard sub mode : combined 8 bit subaddr write with data read
- S Addr R/W A SubAddr A Data A Data ... T
- - Combined mode : Subaddress and Data sequences appended with no stop
- S Addr R/W A SubAddr S Addr R/W A Data A Data ... T
-
- Currently, this driver uses only Standard mode for i2c xfer, and
- smbus byte & quick transfers ; and uses StandardSub mode for
- other smbus transfers instead of combined as we need that for the
- sound driver to be happy
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/timer.h>
-#include <linux/spinlock.h>
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/pmac_low_i2c.h>
-
-#include "i2c-keywest.h"
-
-#undef POLLED_MODE
-
-/* Some debug macros */
-#define WRONG_STATE(name) do {\
- pr_debug("KW: wrong state. Got %s, state: %s (isr: %02x)\n", \
- name, __kw_state_names[iface->state], isr); \
- } while(0)
-
-#ifdef DEBUG
-static const char *__kw_state_names[] = {
- "state_idle",
- "state_addr",
- "state_read",
- "state_write",
- "state_stop",
- "state_dead"
-};
-#endif /* DEBUG */
-
-MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
-MODULE_DESCRIPTION("I2C driver for Apple's Keywest");
-MODULE_LICENSE("GPL");
-
-#ifdef POLLED_MODE
-/* Don't schedule, the g5 fan controller is too
- * timing sensitive
- */
-static u8
-wait_interrupt(struct keywest_iface* iface)
-{
- int i;
- u8 isr;
-
- for (i = 0; i < 200000; i++) {
- isr = read_reg(reg_isr) & KW_I2C_IRQ_MASK;
- if (isr != 0)
- return isr;
- udelay(10);
- }
- return isr;
-}
-#endif /* POLLED_MODE */
-
-static void
-do_stop(struct keywest_iface* iface, int result)
-{
- write_reg(reg_control, KW_I2C_CTL_STOP);
- iface->state = state_stop;
- iface->result = result;
-}
-
-/* Main state machine for standard & standard sub mode */
-static void
-handle_interrupt(struct keywest_iface *iface, u8 isr)
-{
- int ack;
-
- if (isr == 0) {
- if (iface->state != state_stop) {
- pr_debug("KW: Timeout !\n");
- do_stop(iface, -EIO);
- }
- if (iface->state == state_stop) {
- ack = read_reg(reg_status);
- if (!(ack & KW_I2C_STAT_BUSY)) {
- iface->state = state_idle;
- write_reg(reg_ier, 0x00);
-#ifndef POLLED_MODE
- complete(&iface->complete);
-#endif /* POLLED_MODE */
- }
- }
- return;
- }
-
- if (isr & KW_I2C_IRQ_ADDR) {
- ack = read_reg(reg_status);
- if (iface->state != state_addr) {
- write_reg(reg_isr, KW_I2C_IRQ_ADDR);
- WRONG_STATE("KW_I2C_IRQ_ADDR");
- do_stop(iface, -EIO);
- return;
- }
- if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
- iface->state = state_stop;
- iface->result = -ENODEV;
- pr_debug("KW: NAK on address\n");
- } else {
- /* Handle rw "quick" mode */
- if (iface->datalen == 0) {
- do_stop(iface, 0);
- } else if (iface->read_write == I2C_SMBUS_READ) {
- iface->state = state_read;
- if (iface->datalen > 1)
- write_reg(reg_control, KW_I2C_CTL_AAK);
- } else {
- iface->state = state_write;
- write_reg(reg_data, *(iface->data++));
- iface->datalen--;
- }
- }
- write_reg(reg_isr, KW_I2C_IRQ_ADDR);
- }
-
- if (isr & KW_I2C_IRQ_DATA) {
- if (iface->state == state_read) {
- *(iface->data++) = read_reg(reg_data);
- write_reg(reg_isr, KW_I2C_IRQ_DATA);
- iface->datalen--;
- if (iface->datalen == 0)
- iface->state = state_stop;
- else if (iface->datalen == 1)
- write_reg(reg_control, 0);
- } else if (iface->state == state_write) {
- /* Check ack status */
- ack = read_reg(reg_status);
- if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
- pr_debug("KW: nack on data write (%x): %x\n",
- iface->data[-1], ack);
- do_stop(iface, -EIO);
- } else if (iface->datalen) {
- write_reg(reg_data, *(iface->data++));
- iface->datalen--;
- } else {
- write_reg(reg_control, KW_I2C_CTL_STOP);
- iface->state = state_stop;
- iface->result = 0;
- }
- write_reg(reg_isr, KW_I2C_IRQ_DATA);
- } else {
- write_reg(reg_isr, KW_I2C_IRQ_DATA);
- WRONG_STATE("KW_I2C_IRQ_DATA");
- if (iface->state != state_stop)
- do_stop(iface, -EIO);
- }
- }
-
- if (isr & KW_I2C_IRQ_STOP) {
- write_reg(reg_isr, KW_I2C_IRQ_STOP);
- if (iface->state != state_stop) {
- WRONG_STATE("KW_I2C_IRQ_STOP");
- iface->result = -EIO;
- }
- iface->state = state_idle;
- write_reg(reg_ier, 0x00);
-#ifndef POLLED_MODE
- complete(&iface->complete);
-#endif /* POLLED_MODE */
- }
-
- if (isr & KW_I2C_IRQ_START)
- write_reg(reg_isr, KW_I2C_IRQ_START);
-}
-
-#ifndef POLLED_MODE
-
-/* Interrupt handler */
-static irqreturn_t
-keywest_irq(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct keywest_iface *iface = (struct keywest_iface *)dev_id;
- unsigned long flags;
-
- spin_lock_irqsave(&iface->lock, flags);
- del_timer(&iface->timeout_timer);
- handle_interrupt(iface, read_reg(reg_isr));
- if (iface->state != state_idle) {
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
- }
- spin_unlock_irqrestore(&iface->lock, flags);
- return IRQ_HANDLED;
-}
-
-static void
-keywest_timeout(unsigned long data)
-{
- struct keywest_iface *iface = (struct keywest_iface *)data;
- unsigned long flags;
-
- pr_debug("timeout !\n");
- spin_lock_irqsave(&iface->lock, flags);
- handle_interrupt(iface, read_reg(reg_isr));
- if (iface->state != state_idle) {
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
- }
- spin_unlock_irqrestore(&iface->lock, flags);
-}
-
-#endif /* POLLED_MODE */
-
-/*
- * SMBUS-type transfer entrypoint
- */
-static s32
-keywest_smbus_xfer( struct i2c_adapter* adap,
- u16 addr,
- unsigned short flags,
- char read_write,
- u8 command,
- int size,
- union i2c_smbus_data* data)
-{
- struct keywest_chan* chan = i2c_get_adapdata(adap);
- struct keywest_iface* iface = chan->iface;
- int len;
- u8* buffer;
- u16 cur_word;
- int rc = 0;
-
- if (iface->state == state_dead)
- return -ENXIO;
-
- /* Prepare datas & select mode */
- iface->cur_mode &= ~KW_I2C_MODE_MODE_MASK;
- switch (size) {
- case I2C_SMBUS_QUICK:
- len = 0;
- buffer = NULL;
- iface->cur_mode |= KW_I2C_MODE_STANDARD;
- break;
- case I2C_SMBUS_BYTE:
- len = 1;
- buffer = &data->byte;
- iface->cur_mode |= KW_I2C_MODE_STANDARD;
- break;
- case I2C_SMBUS_BYTE_DATA:
- len = 1;
- buffer = &data->byte;
- iface->cur_mode |= KW_I2C_MODE_STANDARDSUB;
- break;
- case I2C_SMBUS_WORD_DATA:
- len = 2;
- cur_word = cpu_to_le16(data->word);
- buffer = (u8 *)&cur_word;
- iface->cur_mode |= KW_I2C_MODE_STANDARDSUB;
- break;
- case I2C_SMBUS_BLOCK_DATA:
- len = data->block[0];
- buffer = &data->block[1];
- iface->cur_mode |= KW_I2C_MODE_STANDARDSUB;
- break;
- default:
- return -1;
- }
-
- /* Turn a standardsub read into a combined mode access */
- if (read_write == I2C_SMBUS_READ
- && (iface->cur_mode & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB) {
- iface->cur_mode &= ~KW_I2C_MODE_MODE_MASK;
- iface->cur_mode |= KW_I2C_MODE_COMBINED;
- }
-
- /* Original driver had this limitation */
- if (len > 32)
- len = 32;
-
- if (pmac_low_i2c_lock(iface->node))
- return -ENXIO;
-
- pr_debug("chan: %d, addr: 0x%x, transfer len: %d, read: %d\n",
- chan->chan_no, addr, len, read_write == I2C_SMBUS_READ);
-
- iface->data = buffer;
- iface->datalen = len;
- iface->state = state_addr;
- iface->result = 0;
- iface->read_write = read_write;
-
- /* Setup channel & clear pending irqs */
- write_reg(reg_isr, read_reg(reg_isr));
- write_reg(reg_mode, iface->cur_mode | (chan->chan_no << 4));
- write_reg(reg_status, 0);
-
- /* Set up address and r/w bit */
- write_reg(reg_addr,
- (addr << 1) | ((read_write == I2C_SMBUS_READ) ? 0x01 : 0x00));
-
- /* Set up the sub address */
- if ((iface->cur_mode & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB
- || (iface->cur_mode & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED)
- write_reg(reg_subaddr, command);
-
-#ifndef POLLED_MODE
- /* Arm timeout */
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
-#endif
-
- /* Start sending address & enable interrupt*/
- write_reg(reg_control, KW_I2C_CTL_XADDR);
- write_reg(reg_ier, KW_I2C_IRQ_MASK);
-
-#ifdef POLLED_MODE
- pr_debug("using polled mode...\n");
- /* State machine, to turn into an interrupt handler */
- while(iface->state != state_idle) {
- unsigned long flags;
-
- u8 isr = wait_interrupt(iface);
- spin_lock_irqsave(&iface->lock, flags);
- handle_interrupt(iface, isr);
- spin_unlock_irqrestore(&iface->lock, flags);
- }
-#else /* POLLED_MODE */
- pr_debug("using interrupt mode...\n");
- wait_for_completion(&iface->complete);
-#endif /* POLLED_MODE */
-
- rc = iface->result;
- pr_debug("transfer done, result: %d\n", rc);
-
- if (rc == 0 && size == I2C_SMBUS_WORD_DATA && read_write == I2C_SMBUS_READ)
- data->word = le16_to_cpu(cur_word);
-
- /* Release sem */
- pmac_low_i2c_unlock(iface->node);
-
- return rc;
-}
-
-/*
- * Generic i2c master transfer entrypoint
- */
-static int
-keywest_xfer( struct i2c_adapter *adap,
- struct i2c_msg *msgs,
- int num)
-{
- struct keywest_chan* chan = i2c_get_adapdata(adap);
- struct keywest_iface* iface = chan->iface;
- struct i2c_msg *pmsg;
- int i, completed;
- int rc = 0;
-
- if (iface->state == state_dead)
- return -ENXIO;
-
- if (pmac_low_i2c_lock(iface->node))
- return -ENXIO;
-
- /* Set adapter to standard mode */
- iface->cur_mode &= ~KW_I2C_MODE_MODE_MASK;
- iface->cur_mode |= KW_I2C_MODE_STANDARD;
-
- completed = 0;
- for (i = 0; rc >= 0 && i < num;) {
- u8 addr;
-
- pmsg = &msgs[i++];
- addr = pmsg->addr;
- if (pmsg->flags & I2C_M_TEN) {
- printk(KERN_ERR "i2c-keywest: 10 bits addr not supported !\n");
- rc = -EINVAL;
- break;
- }
- pr_debug("xfer: chan: %d, doing %s %d bytes to 0x%02x - %d of %d messages\n",
- chan->chan_no,
- pmsg->flags & I2C_M_RD ? "read" : "write",
- pmsg->len, addr, i, num);
-
- /* Setup channel & clear pending irqs */
- write_reg(reg_mode, iface->cur_mode | (chan->chan_no << 4));
- write_reg(reg_isr, read_reg(reg_isr));
- write_reg(reg_status, 0);
-
- iface->data = pmsg->buf;
- iface->datalen = pmsg->len;
- iface->state = state_addr;
- iface->result = 0;
- if (pmsg->flags & I2C_M_RD)
- iface->read_write = I2C_SMBUS_READ;
- else
- iface->read_write = I2C_SMBUS_WRITE;
-
- /* Set up address and r/w bit */
- if (pmsg->flags & I2C_M_REV_DIR_ADDR)
- addr ^= 1;
- write_reg(reg_addr,
- (addr << 1) |
- ((iface->read_write == I2C_SMBUS_READ) ? 0x01 : 0x00));
-
-#ifndef POLLED_MODE
- /* Arm timeout */
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
-#endif
-
- /* Start sending address & enable interrupt*/
- write_reg(reg_ier, KW_I2C_IRQ_MASK);
- write_reg(reg_control, KW_I2C_CTL_XADDR);
-
-#ifdef POLLED_MODE
- pr_debug("using polled mode...\n");
- /* State machine, to turn into an interrupt handler */
- while(iface->state != state_idle) {
- u8 isr = wait_interrupt(iface);
- handle_interrupt(iface, isr);
- }
-#else /* POLLED_MODE */
- pr_debug("using interrupt mode...\n");
- wait_for_completion(&iface->complete);
-#endif /* POLLED_MODE */
-
- rc = iface->result;
- if (rc == 0)
- completed++;
- pr_debug("transfer done, result: %d\n", rc);
- }
-
- /* Release sem */
- pmac_low_i2c_unlock(iface->node);
-
- return completed;
-}
-
-static u32
-keywest_func(struct i2c_adapter * adapter)
-{
- return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_BLOCK_DATA;
-}
-
-/* For now, we only handle combined mode (smbus) */
-static struct i2c_algorithm keywest_algorithm = {
- .smbus_xfer = keywest_smbus_xfer,
- .master_xfer = keywest_xfer,
- .functionality = keywest_func,
-};
-
-
-static int
-create_iface(struct device_node *np, struct device *dev)
-{
- unsigned long steps;
- unsigned bsteps, tsize, i, nchan, addroffset;
- struct keywest_iface* iface;
- u32 *psteps, *prate;
- int rc;
-
- if (np->n_intrs < 1 || np->n_addrs < 1) {
- printk(KERN_ERR "%s: Missing interrupt or address !\n",
- np->full_name);
- return -ENODEV;
- }
- if (pmac_low_i2c_lock(np))
- return -ENODEV;
-
- psteps = (u32 *)get_property(np, "AAPL,address-step", NULL);
- steps = psteps ? (*psteps) : 0x10;
-
- /* Hrm... maybe we can be smarter here */
- for (bsteps = 0; (steps & 0x01) == 0; bsteps++)
- steps >>= 1;
-
- if (np->parent->name[0] == 'u') {
- nchan = 2;
- addroffset = 3;
- } else {
- addroffset = 0;
- nchan = 1;
- }
-
- tsize = sizeof(struct keywest_iface) +
- (sizeof(struct keywest_chan) + 4) * nchan;
- iface = kzalloc(tsize, GFP_KERNEL);
- if (iface == NULL) {
- printk(KERN_ERR "i2c-keywest: can't allocate inteface !\n");
- pmac_low_i2c_unlock(np);
- return -ENOMEM;
- }
- spin_lock_init(&iface->lock);
- init_completion(&iface->complete);
- iface->node = of_node_get(np);
- iface->bsteps = bsteps;
- iface->chan_count = nchan;
- iface->state = state_idle;
- iface->irq = np->intrs[0].line;
- iface->channels = (struct keywest_chan *)
- (((unsigned long)(iface + 1) + 3UL) & ~3UL);
- iface->base = ioremap(np->addrs[0].address + addroffset,
- np->addrs[0].size);
- if (!iface->base) {
- printk(KERN_ERR "i2c-keywest: can't map inteface !\n");
- kfree(iface);
- pmac_low_i2c_unlock(np);
- return -ENOMEM;
- }
-
-#ifndef POLLED_MODE
- init_timer(&iface->timeout_timer);
- iface->timeout_timer.function = keywest_timeout;
- iface->timeout_timer.data = (unsigned long)iface;
-#endif
-
- /* Select interface rate */
- iface->cur_mode = KW_I2C_MODE_100KHZ;
- prate = (u32 *)get_property(np, "AAPL,i2c-rate", NULL);
- if (prate) switch(*prate) {
- case 100:
- iface->cur_mode = KW_I2C_MODE_100KHZ;
- break;
- case 50:
- iface->cur_mode = KW_I2C_MODE_50KHZ;
- break;
- case 25:
- iface->cur_mode = KW_I2C_MODE_25KHZ;
- break;
- default:
- printk(KERN_WARNING "i2c-keywest: unknown rate %ldKhz, using 100KHz\n",
- (long)*prate);
- }
-
- /* Select standard mode by default */
- iface->cur_mode |= KW_I2C_MODE_STANDARD;
-
- /* Write mode */
- write_reg(reg_mode, iface->cur_mode);
-
- /* Switch interrupts off & clear them*/
- write_reg(reg_ier, 0x00);
- write_reg(reg_isr, KW_I2C_IRQ_MASK);
-
-#ifndef POLLED_MODE
- /* Request chip interrupt */
- rc = request_irq(iface->irq, keywest_irq, SA_INTERRUPT, "keywest i2c", iface);
- if (rc) {
- printk(KERN_ERR "i2c-keywest: can't get IRQ %d !\n", iface->irq);
- iounmap(iface->base);
- kfree(iface);
- pmac_low_i2c_unlock(np);
- return -ENODEV;
- }
-#endif /* POLLED_MODE */
-
- pmac_low_i2c_unlock(np);
- dev_set_drvdata(dev, iface);
-
- for (i=0; i<nchan; i++) {
- struct keywest_chan* chan = &iface->channels[i];
-
- sprintf(chan->adapter.name, "%s %d", np->parent->name, i);
- chan->iface = iface;
- chan->chan_no = i;
- chan->adapter.algo = &keywest_algorithm;
- chan->adapter.algo_data = NULL;
- chan->adapter.client_register = NULL;
- chan->adapter.client_unregister = NULL;
- i2c_set_adapdata(&chan->adapter, chan);
- chan->adapter.dev.parent = dev;
-
- rc = i2c_add_adapter(&chan->adapter);
- if (rc) {
- printk("i2c-keywest.c: Adapter %s registration failed\n",
- chan->adapter.name);
- i2c_set_adapdata(&chan->adapter, NULL);
- }
- }
-
- printk(KERN_INFO "Found KeyWest i2c on \"%s\", %d channel%s, stepping: %d bits\n",
- np->parent->name, nchan, nchan > 1 ? "s" : "", bsteps);
-
- return 0;
-}
-
-static int
-dispose_iface(struct device *dev)
-{
- struct keywest_iface *iface = dev_get_drvdata(dev);
- int i, rc;
-
- /* Make sure we stop all activity */
- if (pmac_low_i2c_lock(iface->node))
- return -ENODEV;
-
-#ifndef POLLED_MODE
- spin_lock_irq(&iface->lock);
- while (iface->state != state_idle) {
- spin_unlock_irq(&iface->lock);
- msleep(100);
- spin_lock_irq(&iface->lock);
- }
-#endif /* POLLED_MODE */
- iface->state = state_dead;
-#ifndef POLLED_MODE
- spin_unlock_irq(&iface->lock);
- free_irq(iface->irq, iface);
-#endif /* POLLED_MODE */
-
- pmac_low_i2c_unlock(iface->node);
-
- /* Release all channels */
- for (i=0; i<iface->chan_count; i++) {
- struct keywest_chan* chan = &iface->channels[i];
- if (i2c_get_adapdata(&chan->adapter) == NULL)
- continue;
- rc = i2c_del_adapter(&chan->adapter);
- i2c_set_adapdata(&chan->adapter, NULL);
- /* We aren't that prepared to deal with this... */
- if (rc)
- printk("i2c-keywest.c: i2c_del_adapter failed, that's bad !\n");
- }
- iounmap(iface->base);
- dev_set_drvdata(dev, NULL);
- of_node_put(iface->node);
- kfree(iface);
-
- return 0;
-}
-
-static int
-create_iface_macio(struct macio_dev* dev, const struct of_device_id *match)
-{
- return create_iface(dev->ofdev.node, &dev->ofdev.dev);
-}
-
-static int
-dispose_iface_macio(struct macio_dev* dev)
-{
- return dispose_iface(&dev->ofdev.dev);
-}
-
-static int
-create_iface_of_platform(struct of_device* dev, const struct of_device_id *match)
-{
- return create_iface(dev->node, &dev->dev);
-}
-
-static int
-dispose_iface_of_platform(struct of_device* dev)
-{
- return dispose_iface(&dev->dev);
-}
-
-static struct of_device_id i2c_keywest_match[] =
-{
- {
- .type = "i2c",
- .compatible = "keywest"
- },
- {},
-};
-
-static struct macio_driver i2c_keywest_macio_driver =
-{
- .owner = THIS_MODULE,
- .name = "i2c-keywest",
- .match_table = i2c_keywest_match,
- .probe = create_iface_macio,
- .remove = dispose_iface_macio
-};
-
-static struct of_platform_driver i2c_keywest_of_platform_driver =
-{
- .owner = THIS_MODULE,
- .name = "i2c-keywest",
- .match_table = i2c_keywest_match,
- .probe = create_iface_of_platform,
- .remove = dispose_iface_of_platform
-};
-
-static int __init
-i2c_keywest_init(void)
-{
- of_register_driver(&i2c_keywest_of_platform_driver);
- macio_register_driver(&i2c_keywest_macio_driver);
-
- return 0;
-}
-
-static void __exit
-i2c_keywest_cleanup(void)
-{
- of_unregister_driver(&i2c_keywest_of_platform_driver);
- macio_unregister_driver(&i2c_keywest_macio_driver);
-}
-
-module_init(i2c_keywest_init);
-module_exit(i2c_keywest_cleanup);
diff --git a/drivers/i2c/busses/i2c-keywest.h b/drivers/i2c/busses/i2c-keywest.h
deleted file mode 100644
index c5022e1ca6f..00000000000
--- a/drivers/i2c/busses/i2c-keywest.h
+++ /dev/null
@@ -1,108 +0,0 @@
-#ifndef __I2C_KEYWEST_H__
-#define __I2C_KEYWEST_H__
-
-/* The Tumbler audio equalizer can be really slow sometimes */
-#define POLL_TIMEOUT (2*HZ)
-
-/* Register indices */
-typedef enum {
- reg_mode = 0,
- reg_control,
- reg_status,
- reg_isr,
- reg_ier,
- reg_addr,
- reg_subaddr,
- reg_data
-} reg_t;
-
-
-/* Mode register */
-#define KW_I2C_MODE_100KHZ 0x00
-#define KW_I2C_MODE_50KHZ 0x01
-#define KW_I2C_MODE_25KHZ 0x02
-#define KW_I2C_MODE_DUMB 0x00
-#define KW_I2C_MODE_STANDARD 0x04
-#define KW_I2C_MODE_STANDARDSUB 0x08
-#define KW_I2C_MODE_COMBINED 0x0C
-#define KW_I2C_MODE_MODE_MASK 0x0C
-#define KW_I2C_MODE_CHAN_MASK 0xF0
-
-/* Control register */
-#define KW_I2C_CTL_AAK 0x01
-#define KW_I2C_CTL_XADDR 0x02
-#define KW_I2C_CTL_STOP 0x04
-#define KW_I2C_CTL_START 0x08
-
-/* Status register */
-#define KW_I2C_STAT_BUSY 0x01
-#define KW_I2C_STAT_LAST_AAK 0x02
-#define KW_I2C_STAT_LAST_RW 0x04
-#define KW_I2C_STAT_SDA 0x08
-#define KW_I2C_STAT_SCL 0x10
-
-/* IER & ISR registers */
-#define KW_I2C_IRQ_DATA 0x01
-#define KW_I2C_IRQ_ADDR 0x02
-#define KW_I2C_IRQ_STOP 0x04
-#define KW_I2C_IRQ_START 0x08
-#define KW_I2C_IRQ_MASK 0x0F
-
-/* Physical interface */
-struct keywest_iface
-{
- struct device_node *node;
- void __iomem * base;
- unsigned bsteps;
- int irq;
- spinlock_t lock;
- struct keywest_chan *channels;
- unsigned chan_count;
- u8 cur_mode;
- char read_write;
- u8 *data;
- unsigned datalen;
- int state;
- int result;
- struct timer_list timeout_timer;
- struct completion complete;
-};
-
-enum {
- state_idle,
- state_addr,
- state_read,
- state_write,
- state_stop,
- state_dead
-};
-
-/* Channel on an interface */
-struct keywest_chan
-{
- struct i2c_adapter adapter;
- struct keywest_iface* iface;
- unsigned chan_no;
-};
-
-/* Register access */
-
-static inline u8 __read_reg(struct keywest_iface *iface, reg_t reg)
-{
- return in_8(iface->base
- + (((unsigned)reg) << iface->bsteps));
-}
-
-static inline void __write_reg(struct keywest_iface *iface, reg_t reg, u8 val)
-{
- out_8(iface->base
- + (((unsigned)reg) << iface->bsteps), val);
- (void)__read_reg(iface, reg_subaddr);
-}
-
-#define write_reg(reg, val) __write_reg(iface, reg, val)
-#define read_reg(reg) __read_reg(iface, reg)
-
-
-
-#endif /* __I2C_KEYWEST_H__ */
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 81031eb5105..22781d84f79 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -1,6 +1,4 @@
/*
- * drivers/i2c/busses/i2c-mv64xxx.c
- *
* Driver for the i2c controller on the Marvell line of host bridges for MIPS
* and PPC (e.g, gt642[46]0, mv643[46]0, mv644[46]0).
*
@@ -65,7 +63,6 @@ enum {
MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK,
MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK,
MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_DATA,
- MV64XXX_I2C_STATE_ABORTING,
};
/* Driver actions */
@@ -85,6 +82,7 @@ struct mv64xxx_i2c_data {
int irq;
u32 state;
u32 action;
+ u32 aborting;
u32 cntl_bits;
void __iomem *reg_base;
u32 reg_base_p;
@@ -122,12 +120,6 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
return;
}
- if (drv_data->state == MV64XXX_I2C_STATE_ABORTING) {
- drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
- drv_data->state = MV64XXX_I2C_STATE_IDLE;
- return;
- }
-
/* The status from the ctlr [mostly] tells us what to do next */
switch (status) {
/* Start condition interrupt */
@@ -148,14 +140,16 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
/* FALLTHRU */
case MV64XXX_I2C_STATUS_MAST_WR_ADDR_2_ACK: /* 0xd0 */
case MV64XXX_I2C_STATUS_MAST_WR_ACK: /* 0x28 */
- if (drv_data->bytes_left > 0) {
+ if ((drv_data->bytes_left == 0)
+ || (drv_data->aborting
+ && (drv_data->byte_posn != 0))) {
+ drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
+ drv_data->state = MV64XXX_I2C_STATE_IDLE;
+ } else {
drv_data->action = MV64XXX_I2C_ACTION_SEND_DATA;
drv_data->state =
MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK;
drv_data->bytes_left--;
- } else {
- drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
- drv_data->state = MV64XXX_I2C_STATE_IDLE;
}
break;
@@ -184,7 +178,7 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
}
drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_DATA;
- if (drv_data->bytes_left == 1)
+ if ((drv_data->bytes_left == 1) || drv_data->aborting)
drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_ACK;
break;
@@ -320,6 +314,7 @@ mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
drv_data->msg = msg;
drv_data->byte_posn = 0;
drv_data->bytes_left = msg->len;
+ drv_data->aborting = 0;
drv_data->rc = 0;
drv_data->cntl_bits = MV64XXX_I2C_REG_CONTROL_ACK |
MV64XXX_I2C_REG_CONTROL_INTEN | MV64XXX_I2C_REG_CONTROL_TWSIEN;
@@ -359,17 +354,19 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data)
}
if (abort && drv_data->block) {
- drv_data->state = MV64XXX_I2C_STATE_ABORTING;
+ drv_data->aborting = 1;
spin_unlock_irqrestore(&drv_data->lock, flags);
time_left = wait_event_timeout(drv_data->waitq,
!drv_data->block,
msecs_to_jiffies(drv_data->adapter.timeout));
- if (time_left <= 0) {
+ if ((time_left <= 0) && drv_data->block) {
drv_data->state = MV64XXX_I2C_STATE_IDLE;
dev_err(&drv_data->adapter.dev,
- "mv64xxx: I2C bus locked\n");
+ "mv64xxx: I2C bus locked, block: %d, "
+ "time_left: %d\n", drv_data->block,
+ (int)time_left);
}
} else
spin_unlock_irqrestore(&drv_data->lock, flags);
@@ -510,7 +507,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
goto exit_kfree;
}
- strncpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter",
+ strlcpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter",
I2C_NAME_SIZE);
init_waitqueue_head(&drv_data->waitq);
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 4d18e6e5f15..2d80eb26f68 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -30,6 +30,7 @@
nForce3 Pro150 MCP 00D4
nForce3 250Gb MCP 00E4
nForce4 MCP 0052
+ nForce4 MCP-04 0034
This driver supports the 2 SMBuses that are included in the MCP of the
nForce2/3/4 chipsets.
@@ -257,6 +258,7 @@ static struct pci_device_id nforce2_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS) },
{ 0 }
};
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index f63a5377928..d702e5e0388 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -80,6 +80,14 @@ static struct adapter_parm adapter_parm[] = {
.setscl = { 0x01, DATA, 1 },
.getsda = { 0x10, STAT, 1 },
},
+ /* type 6: Barco LPT->DVI (K5800236) adapter */
+ {
+ .setsda = { 0x02, DATA, 1 },
+ .setscl = { 0x01, DATA, 1 },
+ .getsda = { 0x20, STAT, 0 },
+ .getscl = { 0x40, STAT, 0 },
+ .init = { 0xfc, DATA, 0 },
+ },
};
static int type;
@@ -91,4 +99,6 @@ MODULE_PARM_DESC(type,
" 2 = Velleman K8000 adapter\n"
" 3 = ELV adapter\n"
" 4 = ADM1032 evaluation board\n"
- " 5 = ADM1025, ADM1030 and ADM1031 evaluation boards\n");
+ " 5 = ADM1025, ADM1030 and ADM1031 evaluation boards\n"
+ " 6 = Barco LPT->DVI (K5800236) adapter\n"
+);
diff --git a/drivers/i2c/busses/i2c-pmac-smu.c b/drivers/i2c/busses/i2c-pmac-smu.c
deleted file mode 100644
index bfefe7f7a53..00000000000
--- a/drivers/i2c/busses/i2c-pmac-smu.c
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- i2c Support for Apple SMU Controller
-
- Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp.
- <benh@kernel.crashing.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
-#include <asm/smu.h>
-
-static int probe;
-
-MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
-MODULE_DESCRIPTION("I2C driver for Apple's SMU");
-MODULE_LICENSE("GPL");
-module_param(probe, bool, 0);
-
-
-/* Physical interface */
-struct smu_iface
-{
- struct i2c_adapter adapter;
- struct completion complete;
- u32 busid;
-};
-
-static void smu_i2c_done(struct smu_i2c_cmd *cmd, void *misc)
-{
- struct smu_iface *iface = misc;
- complete(&iface->complete);
-}
-
-/*
- * SMBUS-type transfer entrypoint
- */
-static s32 smu_smbus_xfer( struct i2c_adapter* adap,
- u16 addr,
- unsigned short flags,
- char read_write,
- u8 command,
- int size,
- union i2c_smbus_data* data)
-{
- struct smu_iface *iface = i2c_get_adapdata(adap);
- struct smu_i2c_cmd cmd;
- int rc = 0;
- int read = (read_write == I2C_SMBUS_READ);
-
- cmd.info.bus = iface->busid;
- cmd.info.devaddr = (addr << 1) | (read ? 0x01 : 0x00);
-
- /* Prepare datas & select mode */
- switch (size) {
- case I2C_SMBUS_QUICK:
- cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
- cmd.info.datalen = 0;
- break;
- case I2C_SMBUS_BYTE:
- cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
- cmd.info.datalen = 1;
- if (!read)
- cmd.info.data[0] = data->byte;
- break;
- case I2C_SMBUS_BYTE_DATA:
- cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
- cmd.info.datalen = 1;
- cmd.info.sublen = 1;
- cmd.info.subaddr[0] = command;
- cmd.info.subaddr[1] = 0;
- cmd.info.subaddr[2] = 0;
- if (!read)
- cmd.info.data[0] = data->byte;
- break;
- case I2C_SMBUS_WORD_DATA:
- cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
- cmd.info.datalen = 2;
- cmd.info.sublen = 1;
- cmd.info.subaddr[0] = command;
- cmd.info.subaddr[1] = 0;
- cmd.info.subaddr[2] = 0;
- if (!read) {
- cmd.info.data[0] = data->byte & 0xff;
- cmd.info.data[1] = (data->byte >> 8) & 0xff;
- }
- break;
- /* Note that these are broken vs. the expected smbus API where
- * on reads, the lenght is actually returned from the function,
- * but I think the current API makes no sense and I don't want
- * any driver that I haven't verified for correctness to go
- * anywhere near a pmac i2c bus anyway ...
- */
- case I2C_SMBUS_BLOCK_DATA:
- cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
- cmd.info.datalen = data->block[0] + 1;
- if (cmd.info.datalen > 6)
- return -EINVAL;
- if (!read)
- memcpy(cmd.info.data, data->block, cmd.info.datalen);
- cmd.info.sublen = 1;
- cmd.info.subaddr[0] = command;
- cmd.info.subaddr[1] = 0;
- cmd.info.subaddr[2] = 0;
- break;
- case I2C_SMBUS_I2C_BLOCK_DATA:
- cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
- cmd.info.datalen = data->block[0];
- if (cmd.info.datalen > 7)
- return -EINVAL;
- if (!read)
- memcpy(cmd.info.data, &data->block[1],
- cmd.info.datalen);
- cmd.info.sublen = 1;
- cmd.info.subaddr[0] = command;
- cmd.info.subaddr[1] = 0;
- cmd.info.subaddr[2] = 0;
- break;
-
- default:
- return -EINVAL;
- }
-
- /* Turn a standardsub read into a combined mode access */
- if (read_write == I2C_SMBUS_READ &&
- cmd.info.type == SMU_I2C_TRANSFER_STDSUB)
- cmd.info.type = SMU_I2C_TRANSFER_COMBINED;
-
- /* Finish filling command and submit it */
- cmd.done = smu_i2c_done;
- cmd.misc = iface;
- rc = smu_queue_i2c(&cmd);
- if (rc < 0)
- return rc;
- wait_for_completion(&iface->complete);
- rc = cmd.status;
-
- if (!read || rc < 0)
- return rc;
-
- switch (size) {
- case I2C_SMBUS_BYTE:
- case I2C_SMBUS_BYTE_DATA:
- data->byte = cmd.info.data[0];
- break;
- case I2C_SMBUS_WORD_DATA:
- data->word = ((u16)cmd.info.data[1]) << 8;
- data->word |= cmd.info.data[0];
- break;
- /* Note that these are broken vs. the expected smbus API where
- * on reads, the lenght is actually returned from the function,
- * but I think the current API makes no sense and I don't want
- * any driver that I haven't verified for correctness to go
- * anywhere near a pmac i2c bus anyway ...
- */
- case I2C_SMBUS_BLOCK_DATA:
- case I2C_SMBUS_I2C_BLOCK_DATA:
- memcpy(&data->block[0], cmd.info.data, cmd.info.datalen);
- break;
- }
-
- return rc;
-}
-
-static u32
-smu_smbus_func(struct i2c_adapter * adapter)
-{
- return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_BLOCK_DATA;
-}
-
-/* For now, we only handle combined mode (smbus) */
-static struct i2c_algorithm smu_algorithm = {
- .smbus_xfer = smu_smbus_xfer,
- .functionality = smu_smbus_func,
-};
-
-static int create_iface(struct device_node *np, struct device *dev)
-{
- struct smu_iface* iface;
- u32 *reg, busid;
- int rc;
-
- reg = (u32 *)get_property(np, "reg", NULL);
- if (reg == NULL) {
- printk(KERN_ERR "i2c-pmac-smu: can't find bus number !\n");
- return -ENXIO;
- }
- busid = *reg;
-
- iface = kzalloc(sizeof(struct smu_iface), GFP_KERNEL);
- if (iface == NULL) {
- printk(KERN_ERR "i2c-pmac-smu: can't allocate inteface !\n");
- return -ENOMEM;
- }
- init_completion(&iface->complete);
- iface->busid = busid;
-
- dev_set_drvdata(dev, iface);
-
- sprintf(iface->adapter.name, "smu-i2c-%02x", busid);
- iface->adapter.algo = &smu_algorithm;
- iface->adapter.algo_data = NULL;
- iface->adapter.client_register = NULL;
- iface->adapter.client_unregister = NULL;
- i2c_set_adapdata(&iface->adapter, iface);
- iface->adapter.dev.parent = dev;
-
- rc = i2c_add_adapter(&iface->adapter);
- if (rc) {
- printk(KERN_ERR "i2c-pamc-smu.c: Adapter %s registration "
- "failed\n", iface->adapter.name);
- i2c_set_adapdata(&iface->adapter, NULL);
- }
-
- if (probe) {
- unsigned char addr;
- printk("Probe: ");
- for (addr = 0x00; addr <= 0x7f; addr++) {
- if (i2c_smbus_xfer(&iface->adapter,addr,
- 0,0,0,I2C_SMBUS_QUICK,NULL) >= 0)
- printk("%02x ", addr);
- }
- printk("\n");
- }
-
- printk(KERN_INFO "SMU i2c bus %x registered\n", busid);
-
- return 0;
-}
-
-static int dispose_iface(struct device *dev)
-{
- struct smu_iface *iface = dev_get_drvdata(dev);
- int rc;
-
- rc = i2c_del_adapter(&iface->adapter);
- i2c_set_adapdata(&iface->adapter, NULL);
- /* We aren't that prepared to deal with this... */
- if (rc)
- printk("i2c-pmac-smu.c: Failed to remove bus %s !\n",
- iface->adapter.name);
- dev_set_drvdata(dev, NULL);
- kfree(iface);
-
- return 0;
-}
-
-
-static int create_iface_of_platform(struct of_device* dev,
- const struct of_device_id *match)
-{
- return create_iface(dev->node, &dev->dev);
-}
-
-
-static int dispose_iface_of_platform(struct of_device* dev)
-{
- return dispose_iface(&dev->dev);
-}
-
-
-static struct of_device_id i2c_smu_match[] =
-{
- {
- .compatible = "smu-i2c",
- },
- {},
-};
-static struct of_platform_driver i2c_smu_of_platform_driver =
-{
- .name = "i2c-smu",
- .match_table = i2c_smu_match,
- .probe = create_iface_of_platform,
- .remove = dispose_iface_of_platform
-};
-
-
-static int __init i2c_pmac_smu_init(void)
-{
- of_register_driver(&i2c_smu_of_platform_driver);
- return 0;
-}
-
-
-static void __exit i2c_pmac_smu_cleanup(void)
-{
- of_unregister_driver(&i2c_smu_of_platform_driver);
-}
-
-module_init(i2c_pmac_smu_init);
-module_exit(i2c_pmac_smu_cleanup);
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
new file mode 100644
index 00000000000..df786eb5529
--- /dev/null
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -0,0 +1,290 @@
+/*
+ i2c Support for Apple SMU Controller
+
+ Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp.
+ <benh@kernel.crashing.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <asm/prom.h>
+#include <asm/pmac_low_i2c.h>
+
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("I2C driver for Apple PowerMac");
+MODULE_LICENSE("GPL");
+
+/*
+ * SMBUS-type transfer entrypoint
+ */
+static s32 i2c_powermac_smbus_xfer( struct i2c_adapter* adap,
+ u16 addr,
+ unsigned short flags,
+ char read_write,
+ u8 command,
+ int size,
+ union i2c_smbus_data* data)
+{
+ struct pmac_i2c_bus *bus = i2c_get_adapdata(adap);
+ int rc = 0;
+ int read = (read_write == I2C_SMBUS_READ);
+ int addrdir = (addr << 1) | read;
+ u8 local[2];
+
+ rc = pmac_i2c_open(bus, 0);
+ if (rc)
+ return rc;
+
+ switch (size) {
+ case I2C_SMBUS_QUICK:
+ rc = pmac_i2c_setmode(bus, pmac_i2c_mode_std);
+ if (rc)
+ goto bail;
+ rc = pmac_i2c_xfer(bus, addrdir, 0, 0, NULL, 0);
+ break;
+ case I2C_SMBUS_BYTE:
+ rc = pmac_i2c_setmode(bus, pmac_i2c_mode_std);
+ if (rc)
+ goto bail;
+ rc = pmac_i2c_xfer(bus, addrdir, 0, 0, &data->byte, 1);
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ rc = pmac_i2c_setmode(bus, read ?
+ pmac_i2c_mode_combined :
+ pmac_i2c_mode_stdsub);
+ if (rc)
+ goto bail;
+ rc = pmac_i2c_xfer(bus, addrdir, 1, command, &data->byte, 1);
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ rc = pmac_i2c_setmode(bus, read ?
+ pmac_i2c_mode_combined :
+ pmac_i2c_mode_stdsub);
+ if (rc)
+ goto bail;
+ if (!read) {
+ local[0] = data->word & 0xff;
+ local[1] = (data->word >> 8) & 0xff;
+ }
+ rc = pmac_i2c_xfer(bus, addrdir, 1, command, local, 2);
+ if (rc == 0 && read) {
+ data->word = ((u16)local[1]) << 8;
+ data->word |= local[0];
+ }
+ break;
+
+ /* Note that these are broken vs. the expected smbus API where
+ * on reads, the lenght is actually returned from the function,
+ * but I think the current API makes no sense and I don't want
+ * any driver that I haven't verified for correctness to go
+ * anywhere near a pmac i2c bus anyway ...
+ *
+ * I'm also not completely sure what kind of phases to do between
+ * the actual command and the data (what I am _supposed_ to do that
+ * is). For now, I assume writes are a single stream and reads have
+ * a repeat start/addr phase (but not stop in between)
+ */
+ case I2C_SMBUS_BLOCK_DATA:
+ rc = pmac_i2c_setmode(bus, read ?
+ pmac_i2c_mode_combined :
+ pmac_i2c_mode_stdsub);
+ if (rc)
+ goto bail;
+ rc = pmac_i2c_xfer(bus, addrdir, 1, command, data->block,
+ data->block[0] + 1);
+
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ rc = pmac_i2c_setmode(bus, read ?
+ pmac_i2c_mode_combined :
+ pmac_i2c_mode_stdsub);
+ if (rc)
+ goto bail;
+ rc = pmac_i2c_xfer(bus, addrdir, 1, command,
+ read ? data->block : &data->block[1],
+ data->block[0]);
+ break;
+
+ default:
+ rc = -EINVAL;
+ }
+ bail:
+ pmac_i2c_close(bus);
+ return rc;
+}
+
+/*
+ * Generic i2c master transfer entrypoint. This driver only support single
+ * messages (for "lame i2c" transfers). Anything else should use the smbus
+ * entry point
+ */
+static int i2c_powermac_master_xfer( struct i2c_adapter *adap,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct pmac_i2c_bus *bus = i2c_get_adapdata(adap);
+ int rc = 0;
+ int read;
+ int addrdir;
+
+ if (num != 1)
+ return -EINVAL;
+ if (msgs->flags & I2C_M_TEN)
+ return -EINVAL;
+ read = (msgs->flags & I2C_M_RD) != 0;
+ addrdir = (msgs->addr << 1) | read;
+ if (msgs->flags & I2C_M_REV_DIR_ADDR)
+ addrdir ^= 1;
+
+ rc = pmac_i2c_open(bus, 0);
+ if (rc)
+ return rc;
+ rc = pmac_i2c_setmode(bus, pmac_i2c_mode_std);
+ if (rc)
+ goto bail;
+ rc = pmac_i2c_xfer(bus, addrdir, 0, 0, msgs->buf, msgs->len);
+ bail:
+ pmac_i2c_close(bus);
+ return rc < 0 ? rc : msgs->len;
+}
+
+static u32 i2c_powermac_func(struct i2c_adapter * adapter)
+{
+ return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_I2C;
+}
+
+/* For now, we only handle smbus */
+static struct i2c_algorithm i2c_powermac_algorithm = {
+ .smbus_xfer = i2c_powermac_smbus_xfer,
+ .master_xfer = i2c_powermac_master_xfer,
+ .functionality = i2c_powermac_func,
+};
+
+
+static int i2c_powermac_remove(struct device *dev)
+{
+ struct i2c_adapter *adapter = dev_get_drvdata(dev);
+ struct pmac_i2c_bus *bus = i2c_get_adapdata(adapter);
+ int rc;
+
+ rc = i2c_del_adapter(adapter);
+ pmac_i2c_detach_adapter(bus, adapter);
+ i2c_set_adapdata(adapter, NULL);
+ /* We aren't that prepared to deal with this... */
+ if (rc)
+ printk("i2c-powermac.c: Failed to remove bus %s !\n",
+ adapter->name);
+ dev_set_drvdata(dev, NULL);
+ kfree(adapter);
+
+ return 0;
+}
+
+
+static int i2c_powermac_probe(struct device *dev)
+{
+ struct pmac_i2c_bus *bus = dev->platform_data;
+ struct device_node *parent = NULL;
+ struct i2c_adapter *adapter;
+ char name[32], *basename;
+ int rc;
+
+ if (bus == NULL)
+ return -EINVAL;
+
+ /* Ok, now we need to make up a name for the interface that will
+ * match what we used to do in the past, that is basically the
+ * controller's parent device node for keywest. PMU didn't have a
+ * naming convention and SMU has a different one
+ */
+ switch(pmac_i2c_get_type(bus)) {
+ case pmac_i2c_bus_keywest:
+ parent = of_get_parent(pmac_i2c_get_controller(bus));
+ if (parent == NULL)
+ return -EINVAL;
+ basename = parent->name;
+ break;
+ case pmac_i2c_bus_pmu:
+ basename = "pmu";
+ break;
+ case pmac_i2c_bus_smu:
+ /* This is not what we used to do but I'm fixing drivers at
+ * the same time as this change
+ */
+ basename = "smu";
+ break;
+ default:
+ return -EINVAL;
+ }
+ snprintf(name, 32, "%s %d", basename, pmac_i2c_get_channel(bus));
+ of_node_put(parent);
+
+ adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
+ if (adapter == NULL) {
+ printk(KERN_ERR "i2c-powermac: can't allocate inteface !\n");
+ return -ENOMEM;
+ }
+ dev_set_drvdata(dev, adapter);
+ strcpy(adapter->name, name);
+ adapter->algo = &i2c_powermac_algorithm;
+ i2c_set_adapdata(adapter, bus);
+ adapter->dev.parent = dev;
+ pmac_i2c_attach_adapter(bus, adapter);
+ rc = i2c_add_adapter(adapter);
+ if (rc) {
+ printk(KERN_ERR "i2c-powermac: Adapter %s registration "
+ "failed\n", name);
+ i2c_set_adapdata(adapter, NULL);
+ pmac_i2c_detach_adapter(bus, adapter);
+ }
+
+ printk(KERN_INFO "PowerMac i2c bus %s registered\n", name);
+ return rc;
+}
+
+
+static struct device_driver i2c_powermac_driver = {
+ .name = "i2c-powermac",
+ .bus = &platform_bus_type,
+ .probe = i2c_powermac_probe,
+ .remove = i2c_powermac_remove,
+};
+
+static int __init i2c_powermac_init(void)
+{
+ driver_register(&i2c_powermac_driver);
+ return 0;
+}
+
+
+static void __exit i2c_powermac_cleanup(void)
+{
+ driver_unregister(&i2c_powermac_driver);
+}
+
+module_init(i2c_powermac_init);
+module_exit(i2c_powermac_cleanup);
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 70f7ab829d3..86e2234faf8 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -899,6 +899,12 @@ static int i2c_pxa_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num
struct pxa_i2c *i2c = adap->algo_data;
int ret, i;
+ /* If the I2C controller is disabled we need to reset it (probably due
+ to a suspend/resume destroying state). We do this here as we can then
+ avoid worrying about resuming the controller before its users. */
+ if (!(ICR & ICR_IUE))
+ i2c_pxa_reset(i2c);
+
for (i = adap->retries; i >= 0; i--) {
ret = i2c_pxa_do_xfer(i2c, msgs, num);
if (ret != I2C_RETRY)
@@ -939,7 +945,9 @@ static struct pxa_i2c i2c_pxa = {
static int i2c_pxa_probe(struct platform_device *dev)
{
struct pxa_i2c *i2c = &i2c_pxa;
+#ifdef CONFIG_I2C_PXA_SLAVE
struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
+#endif
int ret;
#ifdef CONFIG_PXA27x
@@ -1024,5 +1032,7 @@ static void i2c_adap_pxa_exit(void)
return platform_driver_unregister(&i2c_pxa_driver);
}
+MODULE_LICENSE("GPL");
+
module_init(i2c_adap_pxa_init);
module_exit(i2c_adap_pxa_exit);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 58cfd3111ef..f7d40f8e5f5 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -34,12 +34,12 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <asm/hardware.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/regs-gpio.h>
#include <asm/arch/regs-iic.h>
#include <asm/arch/iic.h>
@@ -738,7 +738,6 @@ static void s3c24xx_i2c_free(struct s3c24xx_i2c *i2c)
{
if (i2c->clk != NULL && !IS_ERR(i2c->clk)) {
clk_disable(i2c->clk);
- clk_unuse(i2c->clk);
clk_put(i2c->clk);
i2c->clk = NULL;
}
@@ -778,7 +777,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
- clk_use(i2c->clk);
clk_enable(i2c->clk);
/* map the registers */
diff --git a/drivers/i2c/chips/ds1337.c b/drivers/i2c/chips/ds1337.c
index 02682fb794c..93d483b8b77 100644
--- a/drivers/i2c/chips/ds1337.c
+++ b/drivers/i2c/chips/ds1337.c
@@ -52,9 +52,9 @@ static int ds1337_command(struct i2c_client *client, unsigned int cmd,
* Driver data (common to all clients)
*/
static struct i2c_driver ds1337_driver = {
- .owner = THIS_MODULE,
- .name = "ds1337",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "ds1337",
+ },
.attach_adapter = ds1337_attach_adapter,
.detach_client = ds1337_detach_client,
.command = ds1337_command,
@@ -337,13 +337,38 @@ exit:
static void ds1337_init_client(struct i2c_client *client)
{
- s32 val;
+ u8 status, control;
- /* Ensure that device is set in 24-hour mode */
- val = i2c_smbus_read_byte_data(client, DS1337_REG_HOUR);
- if ((val >= 0) && (val & (1 << 6)))
- i2c_smbus_write_byte_data(client, DS1337_REG_HOUR,
- val & 0x3f);
+ /* On some boards, the RTC isn't configured by boot firmware.
+ * Handle that case by starting/configuring the RTC now.
+ */
+ status = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS);
+ control = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
+
+ if ((status & 0x80) || (control & 0x80)) {
+ /* RTC not running */
+ u8 buf[16];
+ struct i2c_msg msg[1];
+
+ dev_dbg(&client->dev, "%s: RTC not running!\n", __FUNCTION__);
+
+ /* Initialize all, including STATUS and CONTROL to zero */
+ memset(buf, 0, sizeof(buf));
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = sizeof(buf);
+ msg[0].buf = &buf[0];
+
+ i2c_transfer(client->adapter, msg, 1);
+ } else {
+ /* Running: ensure that device is set in 24-hour mode */
+ s32 val;
+
+ val = i2c_smbus_read_byte_data(client, DS1337_REG_HOUR);
+ if ((val >= 0) && (val & (1 << 6)))
+ i2c_smbus_write_byte_data(client, DS1337_REG_HOUR,
+ val & 0x3f);
+ }
}
static int ds1337_detach_client(struct i2c_client *client)
diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c
index da488b735ab..0710b9da9d5 100644
--- a/drivers/i2c/chips/ds1374.c
+++ b/drivers/i2c/chips/ds1374.c
@@ -232,10 +232,10 @@ static int ds1374_detach(struct i2c_client *client)
}
static struct i2c_driver ds1374_driver = {
- .owner = THIS_MODULE,
- .name = DS1374_DRV_NAME,
+ .driver = {
+ .name = DS1374_DRV_NAME,
+ },
.id = I2C_DRIVERID_DS1374,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = ds1374_attach,
.detach_client = ds1374_detach,
};
diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c
index 4baf573fa04..41116b7947f 100644
--- a/drivers/i2c/chips/eeprom.c
+++ b/drivers/i2c/chips/eeprom.c
@@ -68,10 +68,10 @@ static int eeprom_detach_client(struct i2c_client *client);
/* This is the driver that will be inserted */
static struct i2c_driver eeprom_driver = {
- .owner = THIS_MODULE,
- .name = "eeprom",
+ .driver = {
+ .name = "eeprom",
+ },
.id = I2C_DRIVERID_EEPROM,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = eeprom_attach_adapter,
.detach_client = eeprom_detach_client,
};
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index d2a100d7783..1251c7fc18d 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -1632,11 +1632,11 @@ static int isp1301_scan_bus(struct i2c_adapter *bus)
}
static struct i2c_driver isp1301_driver = {
- .owner = THIS_MODULE,
- .name = "isp1301_omap",
+ .driver = {
+ .name = "isp1301_omap",
+ },
.id = 1301, /* FIXME "official", i2c-ids.h */
.class = I2C_CLASS_HWMON,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = isp1301_scan_bus,
.detach_client = isp1301_detach_client,
};
diff --git a/drivers/i2c/chips/m41t00.c b/drivers/i2c/chips/m41t00.c
index 3df309ae44a..2dc3d48375f 100644
--- a/drivers/i2c/chips/m41t00.c
+++ b/drivers/i2c/chips/m41t00.c
@@ -211,10 +211,10 @@ m41t00_detach(struct i2c_client *client)
}
static struct i2c_driver m41t00_driver = {
- .owner = THIS_MODULE,
- .name = M41T00_DRV_NAME,
+ .driver = {
+ .name = M41T00_DRV_NAME,
+ },
.id = I2C_DRIVERID_STM41T00,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = m41t00_attach,
.detach_client = m41t00_detach,
};
diff --git a/drivers/i2c/chips/max6875.c b/drivers/i2c/chips/max6875.c
index b376a006883..6d3ff584155 100644
--- a/drivers/i2c/chips/max6875.c
+++ b/drivers/i2c/chips/max6875.c
@@ -67,9 +67,9 @@ static int max6875_detach_client(struct i2c_client *client);
/* This is the driver that will be inserted */
static struct i2c_driver max6875_driver = {
- .owner = THIS_MODULE,
- .name = "max6875",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "max6875",
+ },
.attach_adapter = max6875_attach_adapter,
.detach_client = max6875_detach_client,
};
diff --git a/drivers/i2c/chips/pca9539.c b/drivers/i2c/chips/pca9539.c
index 59a93034622..54b6e6a4bee 100644
--- a/drivers/i2c/chips/pca9539.c
+++ b/drivers/i2c/chips/pca9539.c
@@ -38,9 +38,9 @@ static int pca9539_detach_client(struct i2c_client *client);
/* This is the driver that will be inserted */
static struct i2c_driver pca9539_driver = {
- .owner = THIS_MODULE,
- .name = "pca9539",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "pca9539",
+ },
.attach_adapter = pca9539_attach_adapter,
.detach_client = pca9539_detach_client,
};
diff --git a/drivers/i2c/chips/pcf8574.c b/drivers/i2c/chips/pcf8574.c
index c323c2de236..c3e6449c448 100644
--- a/drivers/i2c/chips/pcf8574.c
+++ b/drivers/i2c/chips/pcf8574.c
@@ -65,10 +65,10 @@ static void pcf8574_init_client(struct i2c_client *client);
/* This is the driver that will be inserted */
static struct i2c_driver pcf8574_driver = {
- .owner = THIS_MODULE,
- .name = "pcf8574",
+ .driver = {
+ .name = "pcf8574",
+ },
.id = I2C_DRIVERID_PCF8574,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = pcf8574_attach_adapter,
.detach_client = pcf8574_detach_client,
};
diff --git a/drivers/i2c/chips/pcf8591.c b/drivers/i2c/chips/pcf8591.c
index ce420a67560..36cff09c678 100644
--- a/drivers/i2c/chips/pcf8591.c
+++ b/drivers/i2c/chips/pcf8591.c
@@ -88,10 +88,10 @@ static int pcf8591_read_channel(struct device *dev, int channel);
/* This is the driver that will be inserted */
static struct i2c_driver pcf8591_driver = {
- .owner = THIS_MODULE,
- .name = "pcf8591",
+ .driver = {
+ .name = "pcf8591",
+ },
.id = I2C_DRIVERID_PCF8591,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = pcf8591_attach_adapter,
.detach_client = pcf8591_detach_client,
};
diff --git a/drivers/i2c/chips/rtc8564.c b/drivers/i2c/chips/rtc8564.c
index 916cdc1af23..ceaa6b0bdfd 100644
--- a/drivers/i2c/chips/rtc8564.c
+++ b/drivers/i2c/chips/rtc8564.c
@@ -14,6 +14,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/bcd.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -52,9 +53,6 @@ static inline u8 _rtc8564_ctrl2(struct i2c_client *client)
#define CTRL1(c) _rtc8564_ctrl1(c)
#define CTRL2(c) _rtc8564_ctrl2(c)
-#define BCD_TO_BIN(val) (((val)&15) + ((val)>>4)*10)
-#define BIN_TO_BCD(val) ((((val)/10)<<4) + (val)%10)
-
static int debug;;
module_param(debug, int, S_IRUGO | S_IWUSR);
@@ -157,7 +155,6 @@ static int rtc8564_attach(struct i2c_adapter *adap, int addr, int kind)
strlcpy(new_client->name, "RTC8564", I2C_NAME_SIZE);
i2c_set_clientdata(new_client, d);
- new_client->flags = I2C_CLIENT_ALLOW_USE;
new_client->addr = addr;
new_client->adapter = adap;
new_client->driver = &rtc8564_driver;
@@ -224,16 +221,16 @@ static int rtc8564_get_datetime(struct i2c_client *client, struct rtc_tm *dt)
return ret;
/* century stored in minute alarm reg */
- dt->year = BCD_TO_BIN(buf[RTC8564_REG_YEAR]);
- dt->year += 100 * BCD_TO_BIN(buf[RTC8564_REG_AL_MIN] & 0x3f);
- dt->mday = BCD_TO_BIN(buf[RTC8564_REG_DAY] & 0x3f);
- dt->wday = BCD_TO_BIN(buf[RTC8564_REG_WDAY] & 7);
- dt->mon = BCD_TO_BIN(buf[RTC8564_REG_MON_CENT] & 0x1f);
+ dt->year = BCD2BIN(buf[RTC8564_REG_YEAR]);
+ dt->year += 100 * BCD2BIN(buf[RTC8564_REG_AL_MIN] & 0x3f);
+ dt->mday = BCD2BIN(buf[RTC8564_REG_DAY] & 0x3f);
+ dt->wday = BCD2BIN(buf[RTC8564_REG_WDAY] & 7);
+ dt->mon = BCD2BIN(buf[RTC8564_REG_MON_CENT] & 0x1f);
- dt->secs = BCD_TO_BIN(buf[RTC8564_REG_SEC] & 0x7f);
+ dt->secs = BCD2BIN(buf[RTC8564_REG_SEC] & 0x7f);
dt->vl = (buf[RTC8564_REG_SEC] & 0x80) == 0x80;
- dt->mins = BCD_TO_BIN(buf[RTC8564_REG_MIN] & 0x7f);
- dt->hours = BCD_TO_BIN(buf[RTC8564_REG_HR] & 0x3f);
+ dt->mins = BCD2BIN(buf[RTC8564_REG_MIN] & 0x7f);
+ dt->hours = BCD2BIN(buf[RTC8564_REG_HR] & 0x3f);
_DBGRTCTM(2, *dt);
@@ -255,18 +252,18 @@ rtc8564_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo)
buf[RTC8564_REG_CTRL1] = CTRL1(client) | RTC8564_CTRL1_STOP;
buf[RTC8564_REG_CTRL2] = CTRL2(client);
- buf[RTC8564_REG_SEC] = BIN_TO_BCD(dt->secs);
- buf[RTC8564_REG_MIN] = BIN_TO_BCD(dt->mins);
- buf[RTC8564_REG_HR] = BIN_TO_BCD(dt->hours);
+ buf[RTC8564_REG_SEC] = BIN2BCD(dt->secs);
+ buf[RTC8564_REG_MIN] = BIN2BCD(dt->mins);
+ buf[RTC8564_REG_HR] = BIN2BCD(dt->hours);
if (datetoo) {
len += 5;
- buf[RTC8564_REG_DAY] = BIN_TO_BCD(dt->mday);
- buf[RTC8564_REG_WDAY] = BIN_TO_BCD(dt->wday);
- buf[RTC8564_REG_MON_CENT] = BIN_TO_BCD(dt->mon) & 0x1f;
+ buf[RTC8564_REG_DAY] = BIN2BCD(dt->mday);
+ buf[RTC8564_REG_WDAY] = BIN2BCD(dt->wday);
+ buf[RTC8564_REG_MON_CENT] = BIN2BCD(dt->mon) & 0x1f;
/* century stored in minute alarm reg */
- buf[RTC8564_REG_YEAR] = BIN_TO_BCD(dt->year % 100);
- buf[RTC8564_REG_AL_MIN] = BIN_TO_BCD(dt->year / 100);
+ buf[RTC8564_REG_YEAR] = BIN2BCD(dt->year % 100);
+ buf[RTC8564_REG_AL_MIN] = BIN2BCD(dt->year / 100);
}
ret = rtc8564_write(client, 0, buf, len);
@@ -361,10 +358,10 @@ rtc8564_command(struct i2c_client *client, unsigned int cmd, void *arg)
}
static struct i2c_driver rtc8564_driver = {
- .owner = THIS_MODULE,
- .name = "RTC8564",
+ .driver = {
+ .name = "RTC8564",
+ },
.id = I2C_DRIVERID_RTC8564,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = rtc8564_probe,
.detach_client = rtc8564_detach,
.command = rtc8564_command
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index 280dd7a45db..1af3dfbb808 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -494,6 +494,7 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
{
struct tps65010 *tps;
int status;
+ unsigned long irqflags;
if (the_tps) {
dev_dbg(&bus->dev, "only one %s for now\n", DRIVER_NAME);
@@ -520,13 +521,14 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
}
#ifdef CONFIG_ARM
+ irqflags = SA_SAMPLE_RANDOM | SA_TRIGGER_LOW;
if (machine_is_omap_h2()) {
tps->model = TPS65010;
omap_cfg_reg(W4_GPIO58);
tps->irq = OMAP_GPIO_IRQ(58);
omap_request_gpio(58);
omap_set_gpio_direction(58, 1);
- set_irq_type(tps->irq, IRQT_FALLING);
+ irqflags |= SA_TRIGGER_FALLING;
}
if (machine_is_omap_osk()) {
tps->model = TPS65010;
@@ -534,7 +536,7 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
tps->irq = OMAP_GPIO_IRQ(OMAP_MPUIO(1));
omap_request_gpio(OMAP_MPUIO(1));
omap_set_gpio_direction(OMAP_MPUIO(1), 1);
- set_irq_type(tps->irq, IRQT_FALLING);
+ irqflags |= SA_TRIGGER_FALLING;
}
if (machine_is_omap_h3()) {
tps->model = TPS65013;
@@ -542,13 +544,12 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
// FIXME set up this board's IRQ ...
}
#else
-#define set_irq_type(num,trigger) do{}while(0)
+ irqflags = SA_SAMPLE_RANDOM;
#endif
if (tps->irq > 0) {
- set_irq_type(tps->irq, IRQT_LOW);
status = request_irq(tps->irq, tps65010_irq,
- SA_SAMPLE_RANDOM, DRIVER_NAME, tps);
+ irqflags, DRIVER_NAME, tps);
if (status < 0) {
dev_dbg(&tps->client.dev, "can't get IRQ %d, err %d\n",
tps->irq, status);
@@ -637,9 +638,9 @@ static int __init tps65010_scan_bus(struct i2c_adapter *bus)
}
static struct i2c_driver tps65010_driver = {
- .owner = THIS_MODULE,
- .name = "tps65010",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "tps65010",
+ },
.attach_adapter = tps65010_scan_bus,
.detach_client = __exit_p(tps65010_detach_client),
};
diff --git a/drivers/i2c/chips/x1205.c b/drivers/i2c/chips/x1205.c
index 7da366cdc18..245fffa92db 100644
--- a/drivers/i2c/chips/x1205.c
+++ b/drivers/i2c/chips/x1205.c
@@ -105,9 +105,9 @@ static int x1205_command(struct i2c_client *client, unsigned int cmd,
void *arg);
static struct i2c_driver x1205_driver = {
- .owner = THIS_MODULE,
- .name = "x1205",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "x1205",
+ },
.attach_adapter = &x1205_attach,
.detach_client = &x1205_detach,
};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 82ea1b7ec91..52b77477df5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -197,7 +197,7 @@ int i2c_add_adapter(struct i2c_adapter *adap)
/* inform drivers of new adapters */
list_for_each(item,&drivers) {
driver = list_entry(item, struct i2c_driver, list);
- if (driver->flags & I2C_DF_NOTIFY)
+ if (driver->attach_adapter)
/* We ignore the return code; if it fails, too bad */
driver->attach_adapter(adap);
}
@@ -235,7 +235,8 @@ int i2c_del_adapter(struct i2c_adapter *adap)
if (driver->detach_adapter)
if ((res = driver->detach_adapter(adap))) {
dev_err(&adap->dev, "detach_adapter failed "
- "for driver [%s]\n", driver->name);
+ "for driver [%s]\n",
+ driver->driver.name);
goto out_unlock;
}
}
@@ -245,10 +246,6 @@ int i2c_del_adapter(struct i2c_adapter *adap)
list_for_each_safe(item, _n, &adap->clients) {
client = list_entry(item, struct i2c_client, list);
- /* detaching devices is unconditional of the set notify
- * flag, as _all_ clients that reside on the adapter
- * must be deleted, as this would cause invalid states.
- */
if ((res=client->driver->detach_client(client))) {
dev_err(&adap->dev, "detach_client failed for client "
"[%s] at address 0x%02x\n", client->name,
@@ -286,7 +283,7 @@ int i2c_del_adapter(struct i2c_adapter *adap)
* chips.
*/
-int i2c_add_driver(struct i2c_driver *driver)
+int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
{
struct list_head *item;
struct i2c_adapter *adapter;
@@ -295,8 +292,7 @@ int i2c_add_driver(struct i2c_driver *driver)
down(&core_lists);
/* add the driver to the list of i2c drivers in the driver core */
- driver->driver.owner = driver->owner;
- driver->driver.name = driver->name;
+ driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
driver->driver.probe = i2c_device_probe;
driver->driver.remove = i2c_device_remove;
@@ -306,10 +302,10 @@ int i2c_add_driver(struct i2c_driver *driver)
goto out_unlock;
list_add_tail(&driver->list,&drivers);
- pr_debug("i2c-core: driver [%s] registered\n", driver->name);
+ pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
/* now look for instances of driver on our adapters */
- if (driver->flags & I2C_DF_NOTIFY) {
+ if (driver->attach_adapter) {
list_for_each(item,&adapters) {
adapter = list_entry(item, struct i2c_adapter, list);
driver->attach_adapter(adapter);
@@ -320,6 +316,7 @@ int i2c_add_driver(struct i2c_driver *driver)
up(&core_lists);
return res;
}
+EXPORT_SYMBOL(i2c_register_driver);
int i2c_del_driver(struct i2c_driver *driver)
{
@@ -334,17 +331,14 @@ int i2c_del_driver(struct i2c_driver *driver)
/* Have a look at each adapter, if clients of this driver are still
* attached. If so, detach them to be able to kill the driver
* afterwards.
- *
- * Removing clients does not depend on the notify flag, else
- * invalid operation might (will!) result, when using stale client
- * pointers.
*/
list_for_each(item1,&adapters) {
adap = list_entry(item1, struct i2c_adapter, list);
if (driver->detach_adapter) {
if ((res = driver->detach_adapter(adap))) {
dev_err(&adap->dev, "detach_adapter failed "
- "for driver [%s]\n", driver->name);
+ "for driver [%s]\n",
+ driver->driver.name);
goto out_unlock;
}
} else {
@@ -368,7 +362,7 @@ int i2c_del_driver(struct i2c_driver *driver)
driver_unregister(&driver->driver);
list_del(&driver->list);
- pr_debug("i2c-core: driver [%s] unregistered\n", driver->name);
+ pr_debug("i2c-core: driver [%s] unregistered\n", driver->driver.name);
out_unlock:
up(&core_lists);
@@ -419,8 +413,7 @@ int i2c_attach_client(struct i2c_client *client)
}
}
- if (client->flags & I2C_CLIENT_ALLOW_USE)
- client->usage_count = 0;
+ client->usage_count = 0;
client->dev.parent = &client->adapter->dev;
client->dev.driver = &client->driver->driver;
@@ -443,8 +436,7 @@ int i2c_detach_client(struct i2c_client *client)
struct i2c_adapter *adapter = client->adapter;
int res = 0;
- if ((client->flags & I2C_CLIENT_ALLOW_USE)
- && (client->usage_count > 0)) {
+ if (client->usage_count > 0) {
dev_warn(&client->dev, "Client [%s] still busy, "
"can't detach\n", client->name);
return -EBUSY;
@@ -475,10 +467,10 @@ int i2c_detach_client(struct i2c_client *client)
static int i2c_inc_use_client(struct i2c_client *client)
{
- if (!try_module_get(client->driver->owner))
+ if (!try_module_get(client->driver->driver.owner))
return -ENODEV;
if (!try_module_get(client->adapter->owner)) {
- module_put(client->driver->owner);
+ module_put(client->driver->driver.owner);
return -ENODEV;
}
@@ -487,7 +479,7 @@ static int i2c_inc_use_client(struct i2c_client *client)
static void i2c_dec_use_client(struct i2c_client *client)
{
- module_put(client->driver->owner);
+ module_put(client->driver->driver.owner);
module_put(client->adapter->owner);
}
@@ -499,33 +491,20 @@ int i2c_use_client(struct i2c_client *client)
if (ret)
return ret;
- if (client->flags & I2C_CLIENT_ALLOW_USE) {
- if (client->flags & I2C_CLIENT_ALLOW_MULTIPLE_USE)
- client->usage_count++;
- else if (client->usage_count > 0)
- goto busy;
- else
- client->usage_count++;
- }
+ client->usage_count++;
return 0;
- busy:
- i2c_dec_use_client(client);
- return -EBUSY;
}
int i2c_release_client(struct i2c_client *client)
{
- if(client->flags & I2C_CLIENT_ALLOW_USE) {
- if(client->usage_count>0)
- client->usage_count--;
- else {
- pr_debug("i2c-core: %s used one too many times\n",
- __FUNCTION__);
- return -EPERM;
- }
+ if (!client->usage_count) {
+ pr_debug("i2c-core: %s used one too many times\n",
+ __FUNCTION__);
+ return -EPERM;
}
+ client->usage_count--;
i2c_dec_use_client(client);
return 0;
@@ -539,14 +518,14 @@ void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg)
down(&adap->clist_lock);
list_for_each(item,&adap->clients) {
client = list_entry(item, struct i2c_client, list);
- if (!try_module_get(client->driver->owner))
+ if (!try_module_get(client->driver->driver.owner))
continue;
if (NULL != client->driver->command) {
up(&adap->clist_lock);
client->driver->command(client,cmd,arg);
down(&adap->clist_lock);
}
- module_put(client->driver->owner);
+ module_put(client->driver->driver.owner);
}
up(&adap->clist_lock);
}
@@ -1147,7 +1126,6 @@ EXPORT_SYMBOL_GPL(i2c_bus_type);
EXPORT_SYMBOL(i2c_add_adapter);
EXPORT_SYMBOL(i2c_del_adapter);
-EXPORT_SYMBOL(i2c_add_driver);
EXPORT_SYMBOL(i2c_del_driver);
EXPORT_SYMBOL(i2c_attach_client);
EXPORT_SYMBOL(i2c_detach_client);
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 8af0bd1424d..ed7eed388ba 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -42,8 +42,7 @@ static struct i2c_client i2cdev_client_template;
struct i2c_dev {
int minor;
struct i2c_adapter *adap;
- struct class_device class_dev;
- struct completion released; /* FIXME, we need a class_device_unregister() */
+ struct class_device *class_dev;
};
#define to_i2c_dev(d) container_of(d, struct i2c_dev, class_dev)
@@ -105,7 +104,10 @@ static void return_i2c_dev(struct i2c_dev *i2c_dev)
static ssize_t show_adapter_name(struct class_device *class_dev, char *buf)
{
- struct i2c_dev *i2c_dev = to_i2c_dev(class_dev);
+ struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(class_dev->devt));
+
+ if (!i2c_dev)
+ return -ENODEV;
return sprintf(buf, "%s\n", i2c_dev->adap->name);
}
static CLASS_DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
@@ -408,21 +410,12 @@ static struct file_operations i2cdev_fops = {
.release = i2cdev_release,
};
-static void release_i2c_dev(struct class_device *dev)
-{
- struct i2c_dev *i2c_dev = to_i2c_dev(dev);
- complete(&i2c_dev->released);
-}
-
-static struct class i2c_dev_class = {
- .name = "i2c-dev",
- .release = &release_i2c_dev,
-};
+static struct class *i2c_dev_class;
static int i2cdev_attach_adapter(struct i2c_adapter *adap)
{
struct i2c_dev *i2c_dev;
- int retval;
+ struct device *dev;
i2c_dev = get_free_i2c_dev(adap);
if (IS_ERR(i2c_dev))
@@ -434,21 +427,20 @@ static int i2cdev_attach_adapter(struct i2c_adapter *adap)
/* register this i2c device with the driver core */
i2c_dev->adap = adap;
if (adap->dev.parent == &platform_bus)
- i2c_dev->class_dev.dev = &adap->dev;
+ dev = &adap->dev;
else
- i2c_dev->class_dev.dev = adap->dev.parent;
- i2c_dev->class_dev.class = &i2c_dev_class;
- i2c_dev->class_dev.devt = MKDEV(I2C_MAJOR, i2c_dev->minor);
- snprintf(i2c_dev->class_dev.class_id, BUS_ID_SIZE, "i2c-%d", i2c_dev->minor);
- retval = class_device_register(&i2c_dev->class_dev);
- if (retval)
+ dev = adap->dev.parent;
+ i2c_dev->class_dev = class_device_create(i2c_dev_class, NULL,
+ MKDEV(I2C_MAJOR, i2c_dev->minor),
+ dev, "i2c-%d", i2c_dev->minor);
+ if (!i2c_dev->class_dev)
goto error;
- class_device_create_file(&i2c_dev->class_dev, &class_device_attr_name);
+ class_device_create_file(i2c_dev->class_dev, &class_device_attr_name);
return 0;
error:
return_i2c_dev(i2c_dev);
kfree(i2c_dev);
- return retval;
+ return -ENODEV;
}
static int i2cdev_detach_adapter(struct i2c_adapter *adap)
@@ -459,10 +451,8 @@ static int i2cdev_detach_adapter(struct i2c_adapter *adap)
if (!i2c_dev)
return -ENODEV;
- init_completion(&i2c_dev->released);
return_i2c_dev(i2c_dev);
- class_device_unregister(&i2c_dev->class_dev);
- wait_for_completion(&i2c_dev->released);
+ class_device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, i2c_dev->minor));
kfree(i2c_dev);
pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
@@ -474,21 +464,14 @@ static int i2cdev_detach_client(struct i2c_client *client)
return 0;
}
-static int i2cdev_command(struct i2c_client *client, unsigned int cmd,
- void *arg)
-{
- return -1;
-}
-
static struct i2c_driver i2cdev_driver = {
- .owner = THIS_MODULE,
- .name = "dev_driver",
+ .driver = {
+ .name = "dev_driver",
+ },
.id = I2C_DRIVERID_I2CDEV,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = i2cdev_attach_adapter,
.detach_adapter = i2cdev_detach_adapter,
.detach_client = i2cdev_detach_client,
- .command = i2cdev_command,
};
static struct i2c_client i2cdev_client_template = {
@@ -507,8 +490,8 @@ static int __init i2c_dev_init(void)
if (res)
goto out;
- res = class_register(&i2c_dev_class);
- if (res)
+ i2c_dev_class = class_create(THIS_MODULE, "i2c-dev");
+ if (IS_ERR(i2c_dev_class))
goto out_unreg_chrdev;
res = i2c_add_driver(&i2cdev_driver);
@@ -518,7 +501,7 @@ static int __init i2c_dev_init(void)
return 0;
out_unreg_class:
- class_unregister(&i2c_dev_class);
+ class_destroy(i2c_dev_class);
out_unreg_chrdev:
unregister_chrdev(I2C_MAJOR, "i2c");
out:
@@ -529,7 +512,7 @@ out:
static void __exit i2c_dev_exit(void)
{
i2c_del_driver(&i2cdev_driver);
- class_unregister(&i2c_dev_class);
+ class_destroy(i2c_dev_class);
unregister_chrdev(I2C_MAJOR,"i2c");
}
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 70aeb3a6012..9b2ebd219ad 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -614,7 +614,7 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
*/
spin_lock_irqsave(&ide_lock, flags);
end_that_request_chunk(failed, 0, failed->data_len);
- end_that_request_last(failed);
+ end_that_request_last(failed, 0);
spin_unlock_irqrestore(&ide_lock, flags);
}
@@ -1332,8 +1332,6 @@ static ide_startstop_t cdrom_start_read (ide_drive_t *drive, unsigned int block)
if (cdrom_read_from_buffer(drive))
return ide_stopped;
- blk_attempt_remerge(drive->queue, rq);
-
/* Clear the local sector buffer. */
info->nsectors_buffered = 0;
@@ -1735,7 +1733,7 @@ end_request:
spin_lock_irqsave(&ide_lock, flags);
blkdev_dequeue_request(rq);
- end_that_request_last(rq);
+ end_that_request_last(rq, 1);
HWGROUP(drive)->rq = NULL;
spin_unlock_irqrestore(&ide_lock, flags);
return ide_stopped;
@@ -1874,14 +1872,6 @@ static ide_startstop_t cdrom_start_write(ide_drive_t *drive, struct request *rq)
return ide_stopped;
}
- /*
- * for dvd-ram and such media, it's a really big deal to get
- * big writes all the time. so scour the queue and attempt to
- * remerge requests, often the plugging will not have had time
- * to do this properly
- */
- blk_attempt_remerge(drive->queue, rq);
-
info->nsectors_buffered = 0;
/* use dma, if possible. we don't need to check more, since we
@@ -2905,6 +2895,8 @@ static int ide_cdrom_register (ide_drive_t *drive, int nslots)
devinfo->mask |= CDC_CLOSE_TRAY;
if (!CDROM_CONFIG_FLAGS(drive)->mo_drive)
devinfo->mask |= CDC_MO_DRIVE;
+ if (!CDROM_CONFIG_FLAGS(drive)->ram)
+ devinfo->mask |= CDC_RAM;
devinfo->disk = info->disk;
return register_cdrom(devinfo);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 4e5767968d7..cab362ea033 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -681,50 +681,9 @@ static ide_proc_entry_t idedisk_proc[] = {
#endif /* CONFIG_PROC_FS */
-static void idedisk_end_flush(request_queue_t *q, struct request *flush_rq)
+static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
{
ide_drive_t *drive = q->queuedata;
- struct request *rq = flush_rq->end_io_data;
- int good_sectors = rq->hard_nr_sectors;
- int bad_sectors;
- sector_t sector;
-
- if (flush_rq->errors & ABRT_ERR) {
- printk(KERN_ERR "%s: barrier support doesn't work\n", drive->name);
- blk_queue_ordered(drive->queue, QUEUE_ORDERED_NONE);
- blk_queue_issue_flush_fn(drive->queue, NULL);
- good_sectors = 0;
- } else if (flush_rq->errors) {
- good_sectors = 0;
- if (blk_barrier_preflush(rq)) {
- sector = ide_get_error_location(drive,flush_rq->buffer);
- if ((sector >= rq->hard_sector) &&
- (sector < rq->hard_sector + rq->hard_nr_sectors))
- good_sectors = sector - rq->hard_sector;
- }
- }
-
- if (flush_rq->errors)
- printk(KERN_ERR "%s: failed barrier write: "
- "sector=%Lx(good=%d/bad=%d)\n",
- drive->name, (unsigned long long)rq->sector,
- good_sectors,
- (int) (rq->hard_nr_sectors-good_sectors));
-
- bad_sectors = rq->hard_nr_sectors - good_sectors;
-
- if (good_sectors)
- __ide_end_request(drive, rq, 1, good_sectors);
- if (bad_sectors)
- __ide_end_request(drive, rq, 0, bad_sectors);
-}
-
-static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
-{
- ide_drive_t *drive = q->queuedata;
-
- if (!drive->wcache)
- return 0;
memset(rq->cmd, 0, sizeof(rq->cmd));
@@ -735,9 +694,8 @@ static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
rq->cmd[0] = WIN_FLUSH_CACHE;
- rq->flags |= REQ_DRIVE_TASK | REQ_SOFTBARRIER;
+ rq->flags |= REQ_DRIVE_TASK;
rq->buffer = rq->cmd;
- return 1;
}
static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
@@ -794,27 +752,64 @@ static int set_nowerr(ide_drive_t *drive, int arg)
return 0;
}
+static void update_ordered(ide_drive_t *drive)
+{
+ struct hd_driveid *id = drive->id;
+ unsigned ordered = QUEUE_ORDERED_NONE;
+ prepare_flush_fn *prep_fn = NULL;
+ issue_flush_fn *issue_fn = NULL;
+
+ if (drive->wcache) {
+ unsigned long long capacity;
+ int barrier;
+ /*
+ * We must avoid issuing commands a drive does not
+ * understand or we may crash it. We check flush cache
+ * is supported. We also check we have the LBA48 flush
+ * cache if the drive capacity is too large. By this
+ * time we have trimmed the drive capacity if LBA48 is
+ * not available so we don't need to recheck that.
+ */
+ capacity = idedisk_capacity(drive);
+ barrier = ide_id_has_flush_cache(id) &&
+ (drive->addressing == 0 || capacity <= (1ULL << 28) ||
+ ide_id_has_flush_cache_ext(id));
+
+ printk(KERN_INFO "%s: cache flushes %ssupported\n",
+ drive->name, barrier ? "" : "not");
+
+ if (barrier) {
+ ordered = QUEUE_ORDERED_DRAIN_FLUSH;
+ prep_fn = idedisk_prepare_flush;
+ issue_fn = idedisk_issue_flush;
+ }
+ } else
+ ordered = QUEUE_ORDERED_DRAIN;
+
+ blk_queue_ordered(drive->queue, ordered, prep_fn);
+ blk_queue_issue_flush_fn(drive->queue, issue_fn);
+}
+
static int write_cache(ide_drive_t *drive, int arg)
{
ide_task_t args;
- int err;
+ int err = 1;
- if (!ide_id_has_flush_cache(drive->id))
- return 1;
-
- memset(&args, 0, sizeof(ide_task_t));
- args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
+ if (ide_id_has_flush_cache(drive->id)) {
+ memset(&args, 0, sizeof(ide_task_t));
+ args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE;
- args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
- args.command_type = IDE_DRIVE_TASK_NO_DATA;
- args.handler = &task_no_data_intr;
+ args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
+ args.command_type = IDE_DRIVE_TASK_NO_DATA;
+ args.handler = &task_no_data_intr;
+ err = ide_raw_taskfile(drive, &args, NULL);
+ if (err == 0)
+ drive->wcache = arg;
+ }
- err = ide_raw_taskfile(drive, &args, NULL);
- if (err)
- return err;
+ update_ordered(drive);
- drive->wcache = arg;
- return 0;
+ return err;
}
static int do_idedisk_flushcache (ide_drive_t *drive)
@@ -888,7 +883,6 @@ static void idedisk_setup (ide_drive_t *drive)
{
struct hd_driveid *id = drive->id;
unsigned long long capacity;
- int barrier;
idedisk_add_settings(drive);
@@ -992,31 +986,6 @@ static void idedisk_setup (ide_drive_t *drive)
drive->wcache = 1;
write_cache(drive, 1);
-
- /*
- * We must avoid issuing commands a drive does not understand
- * or we may crash it. We check flush cache is supported. We also
- * check we have the LBA48 flush cache if the drive capacity is
- * too large. By this time we have trimmed the drive capacity if
- * LBA48 is not available so we don't need to recheck that.
- */
- barrier = 0;
- if (ide_id_has_flush_cache(id))
- barrier = 1;
- if (drive->addressing == 1) {
- /* Can't issue the correct flush ? */
- if (capacity > (1ULL << 28) && !ide_id_has_flush_cache_ext(id))
- barrier = 0;
- }
-
- printk(KERN_INFO "%s: cache flushes %ssupported\n",
- drive->name, barrier ? "" : "not ");
- if (barrier) {
- blk_queue_ordered(drive->queue, QUEUE_ORDERED_FLUSH);
- drive->queue->prepare_flush_fn = idedisk_prepare_flush;
- drive->queue->end_flush_fn = idedisk_end_flush;
- blk_queue_issue_flush_fn(drive->queue, idedisk_issue_flush);
- }
}
static void ide_cacheflush_p(ide_drive_t *drive)
@@ -1161,6 +1130,17 @@ static int idedisk_release(struct inode *inode, struct file *filp)
return 0;
}
+static int idedisk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct ide_disk_obj *idkp = ide_disk_g(bdev->bd_disk);
+ ide_drive_t *drive = idkp->drive;
+
+ geo->heads = drive->bios_head;
+ geo->sectors = drive->bios_sect;
+ geo->cylinders = (u16)drive->bios_cyl; /* truncate */
+ return 0;
+}
+
static int idedisk_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -1195,6 +1175,7 @@ static struct block_device_operations idedisk_ops = {
.open = idedisk_open,
.release = idedisk_release,
.ioctl = idedisk_ioctl,
+ .getgeo = idedisk_getgeo,
.media_changed = idedisk_media_changed,
.revalidate_disk= idedisk_revalidate_disk
};
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index fba3fffc2d6..5945f551aaa 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -2031,6 +2031,17 @@ static int idefloppy_release(struct inode *inode, struct file *filp)
return 0;
}
+static int idefloppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct ide_floppy_obj *floppy = ide_floppy_g(bdev->bd_disk);
+ ide_drive_t *drive = floppy->drive;
+
+ geo->heads = drive->bios_head;
+ geo->sectors = drive->bios_sect;
+ geo->cylinders = (u16)drive->bios_cyl; /* truncate */
+ return 0;
+}
+
static int idefloppy_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -2120,6 +2131,7 @@ static struct block_device_operations idefloppy_ops = {
.open = idefloppy_open,
.release = idefloppy_release,
.ioctl = idefloppy_ioctl,
+ .getgeo = idefloppy_getgeo,
.media_changed = idefloppy_media_changed,
.revalidate_disk= idefloppy_revalidate_disk
};
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index ecfafcdafea..8d50df4526a 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -83,15 +83,12 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
if (!end_that_request_first(rq, uptodate, nr_sectors)) {
add_disk_randomness(rq->rq_disk);
-
- if (blk_rq_tagged(rq))
- blk_queue_end_tag(drive->queue, rq);
-
blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
- end_that_request_last(rq);
+ end_that_request_last(rq, uptodate);
ret = 0;
}
+
return ret;
}
EXPORT_SYMBOL(__ide_end_request);
@@ -113,16 +110,17 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
unsigned long flags;
int ret = 1;
+ /*
+ * room for locking improvements here, the calls below don't
+ * need the queue lock held at all
+ */
spin_lock_irqsave(&ide_lock, flags);
rq = HWGROUP(drive)->rq;
if (!nr_sectors)
nr_sectors = rq->hard_cur_sectors;
- if (blk_complete_barrier_rq_locked(drive->queue, rq, nr_sectors))
- ret = rq->nr_sectors != 0;
- else
- ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
+ ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
spin_unlock_irqrestore(&ide_lock, flags);
return ret;
@@ -247,7 +245,7 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
}
blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
- end_that_request_last(rq);
+ end_that_request_last(rq, 1);
spin_unlock_irqrestore(&ide_lock, flags);
}
@@ -379,7 +377,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
rq->errors = err;
- end_that_request_last(rq);
+ end_that_request_last(rq, !rq->errors);
spin_unlock_irqrestore(&ide_lock, flags);
}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 02167a5b751..e7425546b4b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -655,7 +655,7 @@ static void hwif_release_dev (struct device *dev)
{
ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
- up(&hwif->gendev_rel_sem);
+ complete(&hwif->gendev_rel_comp);
}
static void hwif_register (ide_hwif_t *hwif)
@@ -1325,7 +1325,7 @@ static void drive_release_dev (struct device *dev)
drive->queue = NULL;
spin_unlock_irq(&ide_lock);
- up(&drive->gendev_rel_sem);
+ complete(&drive->gendev_rel_comp);
}
/*
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 4b524f6b3ec..ec5a4cb173b 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -222,7 +222,7 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
hwif->mwdma_mask = 0x80; /* disable all mwdma */
hwif->swdma_mask = 0x80; /* disable all swdma */
- sema_init(&hwif->gendev_rel_sem, 0);
+ init_completion(&hwif->gendev_rel_comp);
default_hwif_iops(hwif);
default_hwif_transport(hwif);
@@ -245,7 +245,7 @@ static void init_hwif_data(ide_hwif_t *hwif, unsigned int index)
drive->is_flash = 0;
drive->vdma = 0;
INIT_LIST_HEAD(&drive->list);
- sema_init(&drive->gendev_rel_sem, 0);
+ init_completion(&drive->gendev_rel_comp);
}
}
@@ -602,7 +602,7 @@ void ide_unregister(unsigned int index)
}
spin_unlock_irq(&ide_lock);
device_unregister(&drive->gendev);
- down(&drive->gendev_rel_sem);
+ wait_for_completion(&drive->gendev_rel_comp);
spin_lock_irq(&ide_lock);
}
hwif->present = 0;
@@ -662,7 +662,7 @@ void ide_unregister(unsigned int index)
/* More messed up locking ... */
spin_unlock_irq(&ide_lock);
device_unregister(&hwif->gendev);
- down(&hwif->gendev_rel_sem);
+ wait_for_completion(&hwif->gendev_rel_comp);
/*
* Remove us from the kernel's knowledge
@@ -1278,19 +1278,6 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
up(&ide_setting_sem);
switch (cmd) {
- case HDIO_GETGEO:
- {
- struct hd_geometry geom;
- if (!p || (drive->media != ide_disk && drive->media != ide_floppy)) return -EINVAL;
- geom.heads = drive->bios_head;
- geom.sectors = drive->bios_sect;
- geom.cylinders = (u16)drive->bios_cyl; /* truncate */
- geom.start = get_start_sect(bdev);
- if (copy_to_user(p, &geom, sizeof(struct hd_geometry)))
- return -EFAULT;
- return 0;
- }
-
case HDIO_OBSOLETE_IDENTITY:
case HDIO_GET_IDENTITY:
if (bdev != bdev->bd_contains)
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 242029c9c0c..6439dec6688 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -658,22 +658,14 @@ static void do_hd_request (request_queue_t * q)
enable_irq(HD_IRQ);
}
-static int hd_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
+static int hd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct hd_i_struct *disk = inode->i_bdev->bd_disk->private_data;
- struct hd_geometry __user *loc = (struct hd_geometry __user *) arg;
- struct hd_geometry g;
-
- if (cmd != HDIO_GETGEO)
- return -EINVAL;
- if (!loc)
- return -EINVAL;
- g.heads = disk->head;
- g.sectors = disk->sect;
- g.cylinders = disk->cyl;
- g.start = get_start_sect(inode->i_bdev);
- return copy_to_user(loc, &g, sizeof g) ? -EFAULT : 0;
+ struct hd_i_struct *disk = bdev->bd_disk->private_data;
+
+ geo->heads = disk->head;
+ geo->sectors = disk->sect;
+ geo->cylinders = disk->cyl;
+ return 0;
}
/*
@@ -695,7 +687,7 @@ static irqreturn_t hd_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
static struct block_device_operations hd_fops = {
- .ioctl = hd_ioctl,
+ .getgeo = hd_getgeo,
};
/*
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 211641a5439..fe06ebb0e5b 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -39,7 +39,7 @@
#define PDC202_DEBUG_CABLE 0
-const static char *pdc_quirk_drives[] = {
+static const char *pdc_quirk_drives[] = {
"QUANTUM FIREBALLlct08 08",
"QUANTUM FIREBALLP KA6.4",
"QUANTUM FIREBALLP KA9.1",
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index ff2e217a8c8..0d3073f4eab 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -69,7 +69,7 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list)
static u8 svwks_ratemask (ide_drive_t *drive)
{
struct pci_dev *dev = HWIF(drive)->pci_dev;
- u8 mode;
+ u8 mode = 0;
if (!svwks_revision)
pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision);
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index 86fb1e0286d..c85b87cb59d 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -439,7 +439,7 @@ static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
hwif->speedproc = &via_set_drive;
-#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_PPC32)
+#if defined(CONFIG_PPC_CHRP) && defined(CONFIG_PPC32)
if(_machine == _MACH_chrp && _chrp_type == _CHRP_Pegasos) {
hwif->irq = hwif->channel ? 15 : 14;
}
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 16b28357885..5013b1285e2 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1271,7 +1271,7 @@ static int
pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
{
struct device_node *np = pmif->node;
- int *bidp, i;
+ int *bidp;
pmif->cable_80 = 0;
pmif->broken_dma = pmif->broken_dma_warn = 0;
@@ -1430,7 +1430,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
pmif = &pmac_ide[i];
hwif = &ide_hwifs[i];
- if (mdev->ofdev.node->n_addrs == 0) {
+ if (macio_resource_count(mdev) == 0) {
printk(KERN_WARNING "ide%d: no address for %s\n",
i, mdev->ofdev.node->full_name);
return -ENXIO;
@@ -1686,7 +1686,7 @@ pmac_ide_probe(void)
#else
macio_register_driver(&pmac_ide_macio_driver);
pci_register_driver(&pmac_ide_pci_driver);
-#endif
+#endif
}
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 25103a0ef9b..39142e2f804 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -169,27 +169,4 @@ config IEEE1394_RAWIO
To compile this driver as a module, say M here: the
module will be called raw1394.
-config IEEE1394_CMP
- tristate "IEC61883-1 Plug support"
- depends on IEEE1394
- help
- This option enables the Connection Management Procedures
- (IEC61883-1) driver, which implements input and output plugs.
-
- To compile this driver as a module, say M here: the
- module will be called cmp.
-
-config IEEE1394_AMDTP
- tristate "IEC61883-6 (Audio transmission) support"
- depends on IEEE1394 && IEEE1394_OHCI1394 && IEEE1394_CMP
- help
- This option enables the Audio & Music Data Transmission Protocol
- (IEC61883-6) driver, which implements audio transmission over
- IEEE1394.
-
- The userspace interface is documented in amdtp.h.
-
- To compile this driver as a module, say M here: the
- module will be called amdtp.
-
endmenu
diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
index e8b4d48d376..6f53611fe25 100644
--- a/drivers/ieee1394/Makefile
+++ b/drivers/ieee1394/Makefile
@@ -14,8 +14,6 @@ obj-$(CONFIG_IEEE1394_RAWIO) += raw1394.o
obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
-obj-$(CONFIG_IEEE1394_AMDTP) += amdtp.o
-obj-$(CONFIG_IEEE1394_CMP) += cmp.o
quiet_cmd_oui2c = OUI2C $@
cmd_oui2c = $(CONFIG_SHELL) $(srctree)/$(src)/oui2c.sh < $< > $@
diff --git a/drivers/ieee1394/amdtp.c b/drivers/ieee1394/amdtp.c
index 75897509c40..17390d762cf 100644
--- a/drivers/ieee1394/amdtp.c
+++ b/drivers/ieee1394/amdtp.c
@@ -80,7 +80,6 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
-#include <linux/ioctl32.h>
#include <linux/compat.h>
#include <linux/cdev.h>
#include <asm/uaccess.h>
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index 61ddd5d37ef..15773544234 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -1261,7 +1261,7 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
return CSR1212_EINVAL;
#endif
- cr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
+ cr = CSR1212_MALLOC(sizeof(*cr));
if (!cr)
return CSR1212_ENOMEM;
@@ -1393,8 +1393,7 @@ int csr1212_parse_keyval(struct csr1212_keyval *kv,
case CSR1212_KV_TYPE_LEAF:
if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
- if (!kv->value.leaf.data)
- {
+ if (!kv->value.leaf.data) {
ret = CSR1212_ENOMEM;
goto fail;
}
@@ -1462,7 +1461,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
cache->next = NULL;
csr->cache_tail = cache;
cache->filled_head =
- CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
+ CSR1212_MALLOC(sizeof(*cache->filled_head));
if (!cache->filled_head) {
return CSR1212_ENOMEM;
}
@@ -1484,7 +1483,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
/* Now seach read portions of the cache to see if it is there. */
for (cr = cache->filled_head; cr; cr = cr->next) {
if (cache_index < cr->offset_start) {
- newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
+ newcr = CSR1212_MALLOC(sizeof(*newcr));
if (!newcr)
return CSR1212_ENOMEM;
@@ -1508,7 +1507,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
if (!cr) {
cr = cache->filled_tail;
- newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region));
+ newcr = CSR1212_MALLOC(sizeof(*newcr));
if (!newcr)
return CSR1212_ENOMEM;
@@ -1611,15 +1610,17 @@ int csr1212_parse_csr(struct csr1212_csr *csr)
csr->root_kv->valid = 0;
csr->root_kv->next = csr->root_kv;
csr->root_kv->prev = csr->root_kv;
- csr1212_get_keyval(csr, csr->root_kv);
+ ret = _csr1212_read_keyval(csr, csr->root_kv);
+ if (ret != CSR1212_SUCCESS)
+ return ret;
/* Scan through the Root directory finding all extended ROM regions
* and make cache regions for them */
for (dentry = csr->root_kv->value.directory.dentries_head;
dentry; dentry = dentry->next) {
- if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
- csr1212_get_keyval(csr, dentry->kv);
-
+ if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
+ !dentry->kv->valid) {
+ ret = _csr1212_read_keyval(csr, dentry->kv);
if (ret != CSR1212_SUCCESS)
return ret;
}
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
index 28c5f4b726e..cecd5871f2d 100644
--- a/drivers/ieee1394/csr1212.h
+++ b/drivers/ieee1394/csr1212.h
@@ -646,7 +646,7 @@ static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t o
{
struct csr1212_csr_rom_cache *cache;
- cache = CSR1212_MALLOC(sizeof(struct csr1212_csr_rom_cache) + size);
+ cache = CSR1212_MALLOC(sizeof(*cache) + size);
if (!cache)
return NULL;
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
index b79ddb43e74..9fb2769d9ab 100644
--- a/drivers/ieee1394/dma.c
+++ b/drivers/ieee1394/dma.c
@@ -23,7 +23,8 @@ void dma_prog_region_init(struct dma_prog_region *prog)
prog->bus_addr = 0;
}
-int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev)
+int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
+ struct pci_dev *dev)
{
/* round up to page size */
n_bytes = PAGE_ALIGN(n_bytes);
@@ -32,7 +33,8 @@ int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
if (!prog->kvirt) {
- printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
+ printk(KERN_ERR
+ "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
dma_prog_region_free(prog);
return -ENOMEM;
}
@@ -45,7 +47,8 @@ int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
void dma_prog_region_free(struct dma_prog_region *prog)
{
if (prog->kvirt) {
- pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, prog->kvirt, prog->bus_addr);
+ pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT,
+ prog->kvirt, prog->bus_addr);
}
prog->kvirt = NULL;
@@ -65,7 +68,8 @@ void dma_region_init(struct dma_region *dma)
dma->sglist = NULL;
}
-int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction)
+int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
+ struct pci_dev *dev, int direction)
{
unsigned int i;
@@ -95,14 +99,16 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d
/* fill scatter/gather list with pages */
for (i = 0; i < dma->n_pages; i++) {
- unsigned long va = (unsigned long) dma->kvirt + (i << PAGE_SHIFT);
+ unsigned long va =
+ (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
dma->sglist[i].page = vmalloc_to_page((void *)va);
dma->sglist[i].length = PAGE_SIZE;
}
/* map sglist to the IOMMU */
- dma->n_dma_pages = pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
+ dma->n_dma_pages =
+ pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
if (dma->n_dma_pages == 0) {
printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
@@ -114,7 +120,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d
return 0;
-err:
+ err:
dma_region_free(dma);
return -ENOMEM;
}
@@ -122,7 +128,8 @@ err:
void dma_region_free(struct dma_region *dma)
{
if (dma->n_dma_pages) {
- pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction);
+ pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages,
+ dma->direction);
dma->n_dma_pages = 0;
dma->dev = NULL;
}
@@ -137,7 +144,8 @@ void dma_region_free(struct dma_region *dma)
/* find the scatterlist index and remaining offset corresponding to a
given offset from the beginning of the buffer */
-static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem)
+static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
+ unsigned long *rem)
{
int i;
unsigned long off = offset;
@@ -156,15 +164,18 @@ static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
return i;
}
-dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset)
+dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
+ unsigned long offset)
{
unsigned long rem = 0;
- struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)];
+ struct scatterlist *sg =
+ &dma->sglist[dma_region_find(dma, offset, &rem)];
return sg_dma_address(sg) + rem;
}
-void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len)
+void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
+ unsigned long len)
{
int first, last;
unsigned long rem;
@@ -175,10 +186,12 @@ void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsig
first = dma_region_find(dma, offset, &rem);
last = dma_region_find(dma, offset + len - 1, &rem);
- pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
+ pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
+ dma->direction);
}
-void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len)
+void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
+ unsigned long len)
{
int first, last;
unsigned long rem;
@@ -189,44 +202,47 @@ void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, un
first = dma_region_find(dma, offset, &rem);
last = dma_region_find(dma, offset + len - 1, &rem);
- pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], last - first + 1, dma->direction);
+ pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
+ last - first + 1, dma->direction);
}
#ifdef CONFIG_MMU
/* nopage() handler for mmap access */
-static struct page*
-dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int *type)
+static struct page *dma_region_pagefault(struct vm_area_struct *area,
+ unsigned long address, int *type)
{
unsigned long offset;
unsigned long kernel_virt_addr;
struct page *ret = NOPAGE_SIGBUS;
- struct dma_region *dma = (struct dma_region*) area->vm_private_data;
+ struct dma_region *dma = (struct dma_region *)area->vm_private_data;
if (!dma->kvirt)
goto out;
- if ( (address < (unsigned long) area->vm_start) ||
- (address > (unsigned long) area->vm_start + (dma->n_pages << PAGE_SHIFT)) )
+ if ((address < (unsigned long)area->vm_start) ||
+ (address >
+ (unsigned long)area->vm_start + (dma->n_pages << PAGE_SHIFT)))
goto out;
if (type)
*type = VM_FAULT_MINOR;
offset = address - area->vm_start;
- kernel_virt_addr = (unsigned long) dma->kvirt + offset;
- ret = vmalloc_to_page((void*) kernel_virt_addr);
+ kernel_virt_addr = (unsigned long)dma->kvirt + offset;
+ ret = vmalloc_to_page((void *)kernel_virt_addr);
get_page(ret);
-out:
+ out:
return ret;
}
static struct vm_operations_struct dma_region_vm_ops = {
- .nopage = dma_region_pagefault,
+ .nopage = dma_region_pagefault,
};
-int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
+int dma_region_mmap(struct dma_region *dma, struct file *file,
+ struct vm_area_struct *vma)
{
unsigned long size;
@@ -250,11 +266,12 @@ int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_st
return 0;
}
-#else /* CONFIG_MMU */
+#else /* CONFIG_MMU */
-int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma)
+int dma_region_mmap(struct dma_region *dma, struct file *file,
+ struct vm_area_struct *vma)
{
return -EINVAL;
}
-#endif /* CONFIG_MMU */
+#endif /* CONFIG_MMU */
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index cbbbe14b884..efeaa944bd0 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -108,7 +108,6 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
-#include <linux/ioctl32.h>
#include <linux/compat.h>
#include <linux/cdev.h>
@@ -123,15 +122,6 @@
#include "ohci1394.h"
-#ifndef virt_to_page
-#define virt_to_page(x) MAP_NR(x)
-#endif
-
-#ifndef vmalloc_32
-#define vmalloc_32(x) vmalloc(x)
-#endif
-
-
/* DEBUG LEVELS:
0 - no debugging messages
1 - some debugging messages, but none during DMA frame transmission
@@ -2218,14 +2208,12 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
unsigned long flags;
int i;
- video = kmalloc(sizeof(struct video_card), GFP_KERNEL);
+ video = kzalloc(sizeof(*video), GFP_KERNEL);
if (!video) {
printk(KERN_ERR "dv1394: cannot allocate video_card\n");
goto err;
}
- memset(video, 0, sizeof(struct video_card));
-
video->ohci = ohci;
/* lower 2 bits of id indicate which of four "plugs"
per host */
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index c9e92d85c89..30fa0d43a43 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -88,9 +88,6 @@
printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args)
#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
-static char version[] __devinitdata =
- "$Rev: 1312 $ Ben Collins <bcollins@debian.org>";
-
struct fragment_info {
struct list_head list;
int offset;
@@ -355,12 +352,12 @@ static int eth1394_probe(struct device *dev)
if (!hi)
return -ENOENT;
- new_node = kmalloc(sizeof(struct eth1394_node_ref),
+ new_node = kmalloc(sizeof(*new_node),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
if (!new_node)
return -ENOMEM;
- node_info = kmalloc(sizeof(struct eth1394_node_info),
+ node_info = kmalloc(sizeof(*node_info),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
if (!node_info) {
kfree(new_node);
@@ -436,12 +433,12 @@ static int eth1394_update(struct unit_directory *ud)
node = eth1394_find_node(&priv->ip_node_list, ud);
if (!node) {
- node = kmalloc(sizeof(struct eth1394_node_ref),
+ node = kmalloc(sizeof(*node),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
if (!node)
return -ENOMEM;
- node_info = kmalloc(sizeof(struct eth1394_node_info),
+ node_info = kmalloc(sizeof(*node_info),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
if (!node_info) {
kfree(node);
@@ -566,7 +563,6 @@ static void ether1394_add_host (struct hpsb_host *host)
struct eth1394_host_info *hi = NULL;
struct net_device *dev = NULL;
struct eth1394_priv *priv;
- static int version_printed = 0;
u64 fifo_addr;
if (!(host->config_roms & HPSB_CONFIG_ROM_ENTRY_IP1394))
@@ -581,9 +577,6 @@ static void ether1394_add_host (struct hpsb_host *host)
if (fifo_addr == ~0ULL)
goto out;
- if (version_printed++ == 0)
- ETH1394_PRINT_G (KERN_INFO, "%s\n", version);
-
/* We should really have our own alloc_hpsbdev() function in
* net_init.c instead of calling the one for ethernet then hijacking
* it for ourselves. That way we'd be a real networking device. */
@@ -1021,7 +1014,7 @@ static inline int new_fragment(struct list_head *frag_info, int offset, int len)
}
}
- new = kmalloc(sizeof(struct fragment_info), GFP_ATOMIC);
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new)
return -ENOMEM;
@@ -1040,7 +1033,7 @@ static inline int new_partial_datagram(struct net_device *dev,
{
struct partial_datagram *new;
- new = kmalloc(sizeof(struct partial_datagram), GFP_ATOMIC);
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new)
return -ENOMEM;
@@ -1768,7 +1761,6 @@ fail:
static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strcpy (info->driver, driver_name);
- strcpy (info->version, "$Rev: 1312 $");
/* FIXME XXX provide sane businfo */
strcpy (info->bus_info, "ieee1394");
}
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 997e1bf6297..734b121a055 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -101,12 +101,10 @@ void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
return NULL;
}
- hi = kmalloc(sizeof(*hi) + data_size, GFP_ATOMIC);
+ hi = kzalloc(sizeof(*hi) + data_size, GFP_ATOMIC);
if (!hi)
return NULL;
- memset(hi, 0, sizeof(*hi) + data_size);
-
if (data_size) {
data = hi->data = hi + 1;
hi->size = data_size;
@@ -326,11 +324,9 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
return retval;
}
- as = (struct hpsb_address_serve *)
- kmalloc(sizeof(struct hpsb_address_serve), GFP_KERNEL);
- if (as == NULL) {
+ as = kmalloc(sizeof(*as), GFP_KERNEL);
+ if (!as)
return retval;
- }
INIT_LIST_HEAD(&as->host_list);
INIT_LIST_HEAD(&as->hl_list);
@@ -383,11 +379,9 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
return 0;
}
- as = (struct hpsb_address_serve *)
- kmalloc(sizeof(struct hpsb_address_serve), GFP_ATOMIC);
- if (as == NULL) {
- return 0;
- }
+ as = kmalloc(sizeof(*as), GFP_ATOMIC);
+ if (!as)
+ return 0;
INIT_LIST_HEAD(&as->host_list);
INIT_LIST_HEAD(&as->hl_list);
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index aeeaeb670d0..ba09741fc82 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -61,12 +61,12 @@ static void delayed_reset_bus(void * __reset_info)
static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p)
{
- return 0;
+ return 0;
}
static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg)
{
- return -1;
+ return -1;
}
static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg)
@@ -75,9 +75,9 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned
}
static struct hpsb_host_driver dummy_driver = {
- .transmit_packet = dummy_transmit_packet,
- .devctl = dummy_devctl,
- .isoctl = dummy_isoctl
+ .transmit_packet = dummy_transmit_packet,
+ .devctl = dummy_devctl,
+ .isoctl = dummy_isoctl
};
static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
@@ -110,13 +110,13 @@ static DECLARE_MUTEX(host_num_alloc);
struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
struct device *dev)
{
- struct hpsb_host *h;
+ struct hpsb_host *h;
int i;
int hostnum = 0;
- h = kmalloc(sizeof(struct hpsb_host) + extra, SLAB_KERNEL);
- if (!h) return NULL;
- memset(h, 0, sizeof(struct hpsb_host) + extra);
+ h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL);
+ if (!h)
+ return NULL;
h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h);
if (!h->csr.rom) {
@@ -125,7 +125,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
}
h->hostdata = h + 1;
- h->driver = drv;
+ h->driver = drv;
skb_queue_head_init(&h->pending_packet_queue);
INIT_LIST_HEAD(&h->addr_space);
@@ -145,8 +145,8 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
h->timeout.function = abort_timedouts;
h->timeout_interval = HZ / 20; // 50ms by default
- h->topology_map = h->csr.topology_map + 3;
- h->speed_map = (u8 *)(h->csr.speed_map + 2);
+ h->topology_map = h->csr.topology_map + 3;
+ h->speed_map = (u8 *)(h->csr.speed_map + 2);
down(&host_num_alloc);
@@ -186,14 +186,14 @@ int hpsb_add_host(struct hpsb_host *host)
void hpsb_remove_host(struct hpsb_host *host)
{
- host->is_shutdown = 1;
+ host->is_shutdown = 1;
cancel_delayed_work(&host->delayed_reset);
flush_scheduled_work();
- host->driver = &dummy_driver;
+ host->driver = &dummy_driver;
- highlevel_remove_host(host);
+ highlevel_remove_host(host);
hpsb_remove_extra_config_roms(host);
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index ae9b02cc013..07d188ca849 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -17,47 +17,47 @@ struct hpsb_packet;
struct hpsb_iso;
struct hpsb_host {
- struct list_head host_list;
+ struct list_head host_list;
- void *hostdata;
+ void *hostdata;
- atomic_t generation;
+ atomic_t generation;
struct sk_buff_head pending_packet_queue;
struct timer_list timeout;
unsigned long timeout_interval;
- unsigned char iso_listen_count[64];
+ unsigned char iso_listen_count[64];
- int node_count; /* number of identified nodes on this bus */
- int selfid_count; /* total number of SelfIDs received */
+ int node_count; /* number of identified nodes on this bus */
+ int selfid_count; /* total number of SelfIDs received */
int nodes_active; /* number of nodes that are actually active */
- nodeid_t node_id; /* node ID of this host */
- nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
- nodeid_t busmgr_id; /* ID of this bus' bus manager */
+ nodeid_t node_id; /* node ID of this host */
+ nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
+ nodeid_t busmgr_id; /* ID of this bus' bus manager */
- /* this nodes state */
- unsigned in_bus_reset:1;
- unsigned is_shutdown:1;
+ /* this nodes state */
+ unsigned in_bus_reset:1;
+ unsigned is_shutdown:1;
unsigned resume_packet_sent:1;
- /* this nodes' duties on the bus */
- unsigned is_root:1;
- unsigned is_cycmst:1;
- unsigned is_irm:1;
- unsigned is_busmgr:1;
+ /* this nodes' duties on the bus */
+ unsigned is_root:1;
+ unsigned is_cycmst:1;
+ unsigned is_irm:1;
+ unsigned is_busmgr:1;
- int reset_retries;
- quadlet_t *topology_map;
- u8 *speed_map;
- struct csr_control csr;
+ int reset_retries;
+ quadlet_t *topology_map;
+ u8 *speed_map;
+ struct csr_control csr;
/* Per node tlabel pool allocation */
struct hpsb_tlabel_pool tpool[64];
- struct hpsb_host_driver *driver;
+ struct hpsb_host_driver *driver;
struct pci_dev *pdev;
@@ -77,34 +77,34 @@ struct hpsb_host {
enum devctl_cmd {
- /* Host is requested to reset its bus and cancel all outstanding async
- * requests. If arg == 1, it shall also attempt to become root on the
- * bus. Return void. */
- RESET_BUS,
-
- /* Arg is void, return value is the hardware cycle counter value. */
- GET_CYCLE_COUNTER,
-
- /* Set the hardware cycle counter to the value in arg, return void.
- * FIXME - setting is probably not required. */
- SET_CYCLE_COUNTER,
-
- /* Configure hardware for new bus ID in arg, return void. */
- SET_BUS_ID,
-
- /* If arg true, start sending cycle start packets, stop if arg == 0.
- * Return void. */
- ACT_CYCLE_MASTER,
-
- /* Cancel all outstanding async requests without resetting the bus.
- * Return void. */
- CANCEL_REQUESTS,
-
- /* Start or stop receiving isochronous channel in arg. Return void.
- * This acts as an optimization hint, hosts are not required not to
- * listen on unrequested channels. */
- ISO_LISTEN_CHANNEL,
- ISO_UNLISTEN_CHANNEL
+ /* Host is requested to reset its bus and cancel all outstanding async
+ * requests. If arg == 1, it shall also attempt to become root on the
+ * bus. Return void. */
+ RESET_BUS,
+
+ /* Arg is void, return value is the hardware cycle counter value. */
+ GET_CYCLE_COUNTER,
+
+ /* Set the hardware cycle counter to the value in arg, return void.
+ * FIXME - setting is probably not required. */
+ SET_CYCLE_COUNTER,
+
+ /* Configure hardware for new bus ID in arg, return void. */
+ SET_BUS_ID,
+
+ /* If arg true, start sending cycle start packets, stop if arg == 0.
+ * Return void. */
+ ACT_CYCLE_MASTER,
+
+ /* Cancel all outstanding async requests without resetting the bus.
+ * Return void. */
+ CANCEL_REQUESTS,
+
+ /* Start or stop receiving isochronous channel in arg. Return void.
+ * This acts as an optimization hint, hosts are not required not to
+ * listen on unrequested channels. */
+ ISO_LISTEN_CHANNEL,
+ ISO_UNLISTEN_CHANNEL
};
enum isoctl_cmd {
@@ -135,13 +135,13 @@ enum isoctl_cmd {
};
enum reset_types {
- /* 166 microsecond reset -- only type of reset available on
- non-1394a capable controllers */
- LONG_RESET,
+ /* 166 microsecond reset -- only type of reset available on
+ non-1394a capable controllers */
+ LONG_RESET,
- /* Short (arbitrated) reset -- only available on 1394a capable
- controllers */
- SHORT_RESET,
+ /* Short (arbitrated) reset -- only available on 1394a capable
+ controllers */
+ SHORT_RESET,
/* Variants that set force_root before issueing the bus reset */
LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
@@ -159,22 +159,22 @@ struct hpsb_host_driver {
* reads to the ConfigROM on its own. */
void (*set_hw_config_rom) (struct hpsb_host *host, quadlet_t *config_rom);
- /* This function shall implement packet transmission based on
- * packet->type. It shall CRC both parts of the packet (unless
- * packet->type == raw) and do byte-swapping as necessary or instruct
- * the hardware to do so. It can return immediately after the packet
- * was queued for sending. After sending, hpsb_sent_packet() has to be
- * called. Return 0 on success, negative errno on failure.
- * NOTE: The function must be callable in interrupt context.
- */
- int (*transmit_packet) (struct hpsb_host *host,
- struct hpsb_packet *packet);
-
- /* This function requests miscellanous services from the driver, see
- * above for command codes and expected actions. Return -1 for unknown
- * command, though that should never happen.
- */
- int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg);
+ /* This function shall implement packet transmission based on
+ * packet->type. It shall CRC both parts of the packet (unless
+ * packet->type == raw) and do byte-swapping as necessary or instruct
+ * the hardware to do so. It can return immediately after the packet
+ * was queued for sending. After sending, hpsb_sent_packet() has to be
+ * called. Return 0 on success, negative errno on failure.
+ * NOTE: The function must be callable in interrupt context.
+ */
+ int (*transmit_packet) (struct hpsb_host *host,
+ struct hpsb_packet *packet);
+
+ /* This function requests miscellanous services from the driver, see
+ * above for command codes and expected actions. Return -1 for unknown
+ * command, though that should never happen.
+ */
+ int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg);
/* ISO transmission/reception functions. Return 0 on success, -1
* (or -EXXX errno code) on failure. If the low-level driver does not
@@ -182,15 +182,15 @@ struct hpsb_host_driver {
*/
int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg);
- /* This function is mainly to redirect local CSR reads/locks to the iso
- * management registers (bus manager id, bandwidth available, channels
- * available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus
- * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids
- * as OHCI uses). data and compare are the new data and expected data
- * respectively, return value is the old value.
- */
- quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg,
- quadlet_t data, quadlet_t compare);
+ /* This function is mainly to redirect local CSR reads/locks to the iso
+ * management registers (bus manager id, bandwidth available, channels
+ * available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus
+ * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids
+ * as OHCI uses). data and compare are the new data and expected data
+ * respectively, return value is the old value.
+ */
+ quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg,
+ quadlet_t data, quadlet_t compare);
};
diff --git a/drivers/ieee1394/ieee1394-ioctl.h b/drivers/ieee1394/ieee1394-ioctl.h
index f92b566363d..15670398634 100644
--- a/drivers/ieee1394/ieee1394-ioctl.h
+++ b/drivers/ieee1394/ieee1394-ioctl.h
@@ -7,14 +7,6 @@
#include <linux/ioctl.h>
#include <linux/types.h>
-
-/* AMDTP Gets 6 */
-#define AMDTP_IOC_CHANNEL _IOW('#', 0x00, struct amdtp_ioctl)
-#define AMDTP_IOC_PLUG _IOW('#', 0x01, struct amdtp_ioctl)
-#define AMDTP_IOC_PING _IOW('#', 0x02, struct amdtp_ioctl)
-#define AMDTP_IOC_ZAP _IO ('#', 0x03)
-
-
/* DV1394 Gets 10 */
/* Get the driver ready to transmit video. pass a struct dv1394_init* as
diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h
index b634a9bb365..936d776de00 100644
--- a/drivers/ieee1394/ieee1394.h
+++ b/drivers/ieee1394/ieee1394.h
@@ -62,6 +62,7 @@
extern const char *hpsb_speedto_str[];
+/* 1394a cable PHY packets */
#define SELFID_PWRCL_NO_POWER 0x0
#define SELFID_PWRCL_PROVIDE_15W 0x1
#define SELFID_PWRCL_PROVIDE_30W 0x2
@@ -76,8 +77,24 @@ extern const char *hpsb_speedto_str[];
#define SELFID_PORT_NCONN 0x1
#define SELFID_PORT_NONE 0x0
+#define PHYPACKET_LINKON 0x40000000
+#define PHYPACKET_PHYCONFIG_R 0x00800000
+#define PHYPACKET_PHYCONFIG_T 0x00400000
+#define EXTPHYPACKET_TYPE_PING 0x00000000
+#define EXTPHYPACKET_TYPE_REMOTEACCESS_BASE 0x00040000
+#define EXTPHYPACKET_TYPE_REMOTEACCESS_PAGED 0x00140000
+#define EXTPHYPACKET_TYPE_REMOTEREPLY_BASE 0x000C0000
+#define EXTPHYPACKET_TYPE_REMOTEREPLY_PAGED 0x001C0000
+#define EXTPHYPACKET_TYPE_REMOTECOMMAND 0x00200000
+#define EXTPHYPACKET_TYPE_REMOTECONFIRMATION 0x00280000
+#define EXTPHYPACKET_TYPE_RESUME 0x003C0000
-/* 1394a PHY bitmasks */
+#define EXTPHYPACKET_TYPEMASK 0xC0FC0000
+
+#define PHYPACKET_PORT_SHIFT 24
+#define PHYPACKET_GAPCOUNT_SHIFT 16
+
+/* 1394a PHY register map bitmasks */
#define PHY_00_PHYSICAL_ID 0xFC
#define PHY_00_R 0x02 /* Root */
#define PHY_00_PS 0x01 /* Power Status*/
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 32a1e016c85..25ef5a86f5f 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -179,34 +179,34 @@ void hpsb_free_packet(struct hpsb_packet *packet)
int hpsb_reset_bus(struct hpsb_host *host, int type)
{
- if (!host->in_bus_reset) {
- host->driver->devctl(host, RESET_BUS, type);
- return 0;
- } else {
- return 1;
- }
+ if (!host->in_bus_reset) {
+ host->driver->devctl(host, RESET_BUS, type);
+ return 0;
+ } else {
+ return 1;
+ }
}
int hpsb_bus_reset(struct hpsb_host *host)
{
- if (host->in_bus_reset) {
- HPSB_NOTICE("%s called while bus reset already in progress",
+ if (host->in_bus_reset) {
+ HPSB_NOTICE("%s called while bus reset already in progress",
__FUNCTION__);
- return 1;
- }
+ return 1;
+ }
- abort_requests(host);
- host->in_bus_reset = 1;
- host->irm_id = -1;
+ abort_requests(host);
+ host->in_bus_reset = 1;
+ host->irm_id = -1;
host->is_irm = 0;
- host->busmgr_id = -1;
+ host->busmgr_id = -1;
host->is_busmgr = 0;
host->is_cycmst = 0;
- host->node_count = 0;
- host->selfid_count = 0;
+ host->node_count = 0;
+ host->selfid_count = 0;
- return 0;
+ return 0;
}
@@ -216,150 +216,156 @@ int hpsb_bus_reset(struct hpsb_host *host)
*/
static int check_selfids(struct hpsb_host *host)
{
- int nodeid = -1;
- int rest_of_selfids = host->selfid_count;
- struct selfid *sid = (struct selfid *)host->topology_map;
- struct ext_selfid *esid;
- int esid_seq = 23;
+ int nodeid = -1;
+ int rest_of_selfids = host->selfid_count;
+ struct selfid *sid = (struct selfid *)host->topology_map;
+ struct ext_selfid *esid;
+ int esid_seq = 23;
host->nodes_active = 0;
- while (rest_of_selfids--) {
- if (!sid->extended) {
- nodeid++;
- esid_seq = 0;
+ while (rest_of_selfids--) {
+ if (!sid->extended) {
+ nodeid++;
+ esid_seq = 0;
- if (sid->phy_id != nodeid) {
- HPSB_INFO("SelfIDs failed monotony check with "
- "%d", sid->phy_id);
- return 0;
- }
+ if (sid->phy_id != nodeid) {
+ HPSB_INFO("SelfIDs failed monotony check with "
+ "%d", sid->phy_id);
+ return 0;
+ }
if (sid->link_active) {
host->nodes_active++;
if (sid->contender)
host->irm_id = LOCAL_BUS | sid->phy_id;
}
- } else {
- esid = (struct ext_selfid *)sid;
-
- if ((esid->phy_id != nodeid)
- || (esid->seq_nr != esid_seq)) {
- HPSB_INFO("SelfIDs failed monotony check with "
- "%d/%d", esid->phy_id, esid->seq_nr);
- return 0;
- }
- esid_seq++;
- }
- sid++;
- }
-
- esid = (struct ext_selfid *)(sid - 1);
- while (esid->extended) {
- if ((esid->porta == 0x2) || (esid->portb == 0x2)
- || (esid->portc == 0x2) || (esid->portd == 0x2)
- || (esid->porte == 0x2) || (esid->portf == 0x2)
- || (esid->portg == 0x2) || (esid->porth == 0x2)) {
+ } else {
+ esid = (struct ext_selfid *)sid;
+
+ if ((esid->phy_id != nodeid)
+ || (esid->seq_nr != esid_seq)) {
+ HPSB_INFO("SelfIDs failed monotony check with "
+ "%d/%d", esid->phy_id, esid->seq_nr);
+ return 0;
+ }
+ esid_seq++;
+ }
+ sid++;
+ }
+
+ esid = (struct ext_selfid *)(sid - 1);
+ while (esid->extended) {
+ if ((esid->porta == SELFID_PORT_PARENT) ||
+ (esid->portb == SELFID_PORT_PARENT) ||
+ (esid->portc == SELFID_PORT_PARENT) ||
+ (esid->portd == SELFID_PORT_PARENT) ||
+ (esid->porte == SELFID_PORT_PARENT) ||
+ (esid->portf == SELFID_PORT_PARENT) ||
+ (esid->portg == SELFID_PORT_PARENT) ||
+ (esid->porth == SELFID_PORT_PARENT)) {
HPSB_INFO("SelfIDs failed root check on "
"extended SelfID");
return 0;
- }
- esid--;
- }
+ }
+ esid--;
+ }
- sid = (struct selfid *)esid;
- if ((sid->port0 == 0x2) || (sid->port1 == 0x2) || (sid->port2 == 0x2)) {
+ sid = (struct selfid *)esid;
+ if ((sid->port0 == SELFID_PORT_PARENT) ||
+ (sid->port1 == SELFID_PORT_PARENT) ||
+ (sid->port2 == SELFID_PORT_PARENT)) {
HPSB_INFO("SelfIDs failed root check");
return 0;
- }
+ }
host->node_count = nodeid + 1;
- return 1;
+ return 1;
}
static void build_speed_map(struct hpsb_host *host, int nodecount)
{
u8 speedcap[nodecount];
u8 cldcnt[nodecount];
- u8 *map = host->speed_map;
- struct selfid *sid;
- struct ext_selfid *esid;
- int i, j, n;
-
- for (i = 0; i < (nodecount * 64); i += 64) {
- for (j = 0; j < nodecount; j++) {
- map[i+j] = IEEE1394_SPEED_MAX;
- }
- }
-
- for (i = 0; i < nodecount; i++) {
- cldcnt[i] = 0;
- }
-
- /* find direct children count and speed */
- for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
- n = nodecount - 1;
- (void *)sid >= (void *)host->topology_map; sid--) {
- if (sid->extended) {
- esid = (struct ext_selfid *)sid;
-
- if (esid->porta == 0x3) cldcnt[n]++;
- if (esid->portb == 0x3) cldcnt[n]++;
- if (esid->portc == 0x3) cldcnt[n]++;
- if (esid->portd == 0x3) cldcnt[n]++;
- if (esid->porte == 0x3) cldcnt[n]++;
- if (esid->portf == 0x3) cldcnt[n]++;
- if (esid->portg == 0x3) cldcnt[n]++;
- if (esid->porth == 0x3) cldcnt[n]++;
+ u8 *map = host->speed_map;
+ struct selfid *sid;
+ struct ext_selfid *esid;
+ int i, j, n;
+
+ for (i = 0; i < (nodecount * 64); i += 64) {
+ for (j = 0; j < nodecount; j++) {
+ map[i+j] = IEEE1394_SPEED_MAX;
+ }
+ }
+
+ for (i = 0; i < nodecount; i++) {
+ cldcnt[i] = 0;
+ }
+
+ /* find direct children count and speed */
+ for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
+ n = nodecount - 1;
+ (void *)sid >= (void *)host->topology_map; sid--) {
+ if (sid->extended) {
+ esid = (struct ext_selfid *)sid;
+
+ if (esid->porta == SELFID_PORT_CHILD) cldcnt[n]++;
+ if (esid->portb == SELFID_PORT_CHILD) cldcnt[n]++;
+ if (esid->portc == SELFID_PORT_CHILD) cldcnt[n]++;
+ if (esid->portd == SELFID_PORT_CHILD) cldcnt[n]++;
+ if (esid->porte == SELFID_PORT_CHILD) cldcnt[n]++;
+ if (esid->portf == SELFID_PORT_CHILD) cldcnt[n]++;
+ if (esid->portg == SELFID_PORT_CHILD) cldcnt[n]++;
+ if (esid->porth == SELFID_PORT_CHILD) cldcnt[n]++;
} else {
- if (sid->port0 == 0x3) cldcnt[n]++;
- if (sid->port1 == 0x3) cldcnt[n]++;
- if (sid->port2 == 0x3) cldcnt[n]++;
-
- speedcap[n] = sid->speed;
- n--;
- }
- }
-
- /* set self mapping */
- for (i = 0; i < nodecount; i++) {
- map[64*i + i] = speedcap[i];
- }
-
- /* fix up direct children count to total children count;
- * also fix up speedcaps for sibling and parent communication */
- for (i = 1; i < nodecount; i++) {
- for (j = cldcnt[i], n = i - 1; j > 0; j--) {
- cldcnt[i] += cldcnt[n];
- speedcap[n] = min(speedcap[n], speedcap[i]);
- n -= cldcnt[n] + 1;
- }
- }
-
- for (n = 0; n < nodecount; n++) {
- for (i = n - cldcnt[n]; i <= n; i++) {
- for (j = 0; j < (n - cldcnt[n]); j++) {
- map[j*64 + i] = map[i*64 + j] =
- min(map[i*64 + j], speedcap[n]);
- }
- for (j = n + 1; j < nodecount; j++) {
- map[j*64 + i] = map[i*64 + j] =
- min(map[i*64 + j], speedcap[n]);
- }
- }
- }
+ if (sid->port0 == SELFID_PORT_CHILD) cldcnt[n]++;
+ if (sid->port1 == SELFID_PORT_CHILD) cldcnt[n]++;
+ if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
+
+ speedcap[n] = sid->speed;
+ n--;
+ }
+ }
+
+ /* set self mapping */
+ for (i = 0; i < nodecount; i++) {
+ map[64*i + i] = speedcap[i];
+ }
+
+ /* fix up direct children count to total children count;
+ * also fix up speedcaps for sibling and parent communication */
+ for (i = 1; i < nodecount; i++) {
+ for (j = cldcnt[i], n = i - 1; j > 0; j--) {
+ cldcnt[i] += cldcnt[n];
+ speedcap[n] = min(speedcap[n], speedcap[i]);
+ n -= cldcnt[n] + 1;
+ }
+ }
+
+ for (n = 0; n < nodecount; n++) {
+ for (i = n - cldcnt[n]; i <= n; i++) {
+ for (j = 0; j < (n - cldcnt[n]); j++) {
+ map[j*64 + i] = map[i*64 + j] =
+ min(map[i*64 + j], speedcap[n]);
+ }
+ for (j = n + 1; j < nodecount; j++) {
+ map[j*64 + i] = map[i*64 + j] =
+ min(map[i*64 + j], speedcap[n]);
+ }
+ }
+ }
}
void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
{
- if (host->in_bus_reset) {
- HPSB_VERBOSE("Including SelfID 0x%x", sid);
- host->topology_map[host->selfid_count++] = sid;
- } else {
- HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
+ if (host->in_bus_reset) {
+ HPSB_VERBOSE("Including SelfID 0x%x", sid);
+ host->topology_map[host->selfid_count++] = sid;
+ } else {
+ HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
sid, NODEID_TO_BUS(host->node_id));
- }
+ }
}
void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
@@ -367,50 +373,50 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
if (!host->in_bus_reset)
HPSB_NOTICE("SelfID completion called outside of bus reset!");
- host->node_id = LOCAL_BUS | phyid;
- host->is_root = isroot;
+ host->node_id = LOCAL_BUS | phyid;
+ host->is_root = isroot;
- if (!check_selfids(host)) {
- if (host->reset_retries++ < 20) {
- /* selfid stage did not complete without error */
- HPSB_NOTICE("Error in SelfID stage, resetting");
+ if (!check_selfids(host)) {
+ if (host->reset_retries++ < 20) {
+ /* selfid stage did not complete without error */
+ HPSB_NOTICE("Error in SelfID stage, resetting");
host->in_bus_reset = 0;
/* this should work from ohci1394 now... */
- hpsb_reset_bus(host, LONG_RESET);
- return;
- } else {
- HPSB_NOTICE("Stopping out-of-control reset loop");
- HPSB_NOTICE("Warning - topology map and speed map will not be valid");
+ hpsb_reset_bus(host, LONG_RESET);
+ return;
+ } else {
+ HPSB_NOTICE("Stopping out-of-control reset loop");
+ HPSB_NOTICE("Warning - topology map and speed map will not be valid");
host->reset_retries = 0;
- }
- } else {
+ }
+ } else {
host->reset_retries = 0;
- build_speed_map(host, host->node_count);
- }
+ build_speed_map(host, host->node_count);
+ }
HPSB_VERBOSE("selfid_complete called with successful SelfID stage "
"... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id);
- /* irm_id is kept up to date by check_selfids() */
- if (host->irm_id == host->node_id) {
- host->is_irm = 1;
- } else {
- host->is_busmgr = 0;
- host->is_irm = 0;
- }
+ /* irm_id is kept up to date by check_selfids() */
+ if (host->irm_id == host->node_id) {
+ host->is_irm = 1;
+ } else {
+ host->is_busmgr = 0;
+ host->is_irm = 0;
+ }
- if (isroot) {
+ if (isroot) {
host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
host->is_cycmst = 1;
}
atomic_inc(&host->generation);
host->in_bus_reset = 0;
- highlevel_host_reset(host);
+ highlevel_host_reset(host);
}
void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
- int ackcode)
+ int ackcode)
{
unsigned long flags;
@@ -457,6 +463,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
{
struct hpsb_packet *packet;
+ quadlet_t d = 0;
int retval = 0;
if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 ||
@@ -466,26 +473,16 @@ int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
return -EINVAL;
}
- packet = hpsb_alloc_packet(0);
- if (!packet)
- return -ENOMEM;
-
- packet->host = host;
- packet->header_size = 8;
- packet->data_size = 0;
- packet->expect_response = 0;
- packet->no_waiter = 0;
- packet->type = hpsb_raw;
- packet->header[0] = 0;
if (rootid != -1)
- packet->header[0] |= rootid << 24 | 1 << 23;
+ d |= PHYPACKET_PHYCONFIG_R | rootid << PHYPACKET_PORT_SHIFT;
if (gapcnt != -1)
- packet->header[0] |= gapcnt << 16 | 1 << 22;
+ d |= PHYPACKET_PHYCONFIG_T | gapcnt << PHYPACKET_GAPCOUNT_SHIFT;
- packet->header[1] = ~packet->header[0];
+ packet = hpsb_make_phypacket(host, d);
+ if (!packet)
+ return -ENOMEM;
packet->generation = get_hpsb_generation(host);
-
retval = hpsb_send_packet_and_wait(packet);
hpsb_free_packet(packet);
@@ -510,13 +507,13 @@ int hpsb_send_packet(struct hpsb_packet *packet)
{
struct hpsb_host *host = packet->host;
- if (host->is_shutdown)
+ if (host->is_shutdown)
return -EINVAL;
if (host->in_bus_reset ||
(packet->generation != get_hpsb_generation(host)))
- return -EAGAIN;
+ return -EAGAIN;
- packet->state = hpsb_queued;
+ packet->state = hpsb_queued;
/* This just seems silly to me */
WARN_ON(packet->no_waiter && packet->expect_response);
@@ -530,42 +527,42 @@ int hpsb_send_packet(struct hpsb_packet *packet)
skb_queue_tail(&host->pending_packet_queue, packet->skb);
}
- if (packet->node_id == host->node_id) {
+ if (packet->node_id == host->node_id) {
/* it is a local request, so handle it locally */
- quadlet_t *data;
- size_t size = packet->data_size + packet->header_size;
+ quadlet_t *data;
+ size_t size = packet->data_size + packet->header_size;
- data = kmalloc(size, GFP_ATOMIC);
- if (!data) {
- HPSB_ERR("unable to allocate memory for concatenating header and data");
- return -ENOMEM;
- }
+ data = kmalloc(size, GFP_ATOMIC);
+ if (!data) {
+ HPSB_ERR("unable to allocate memory for concatenating header and data");
+ return -ENOMEM;
+ }
- memcpy(data, packet->header, packet->header_size);
+ memcpy(data, packet->header, packet->header_size);
- if (packet->data_size)
+ if (packet->data_size)
memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
- dump_packet("send packet local", packet->header, packet->header_size, -1);
+ dump_packet("send packet local", packet->header, packet->header_size, -1);
- hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
- hpsb_packet_received(host, data, size, 0);
+ hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
+ hpsb_packet_received(host, data, size, 0);
- kfree(data);
+ kfree(data);
- return 0;
- }
+ return 0;
+ }
- if (packet->type == hpsb_async && packet->node_id != ALL_NODES) {
- packet->speed_code =
- host->speed_map[NODEID_TO_NODE(host->node_id) * 64
- + NODEID_TO_NODE(packet->node_id)];
- }
+ if (packet->type == hpsb_async && packet->node_id != ALL_NODES) {
+ packet->speed_code =
+ host->speed_map[NODEID_TO_NODE(host->node_id) * 64
+ + NODEID_TO_NODE(packet->node_id)];
+ }
- dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
+ dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
- return host->driver->transmit_packet(host, packet);
+ return host->driver->transmit_packet(host, packet);
}
/* We could just use complete() directly as the packet complete
@@ -593,81 +590,81 @@ int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
static void send_packet_nocare(struct hpsb_packet *packet)
{
- if (hpsb_send_packet(packet) < 0) {
- hpsb_free_packet(packet);
- }
+ if (hpsb_send_packet(packet) < 0) {
+ hpsb_free_packet(packet);
+ }
}
static void handle_packet_response(struct hpsb_host *host, int tcode,
quadlet_t *data, size_t size)
{
- struct hpsb_packet *packet = NULL;
+ struct hpsb_packet *packet = NULL;
struct sk_buff *skb;
- int tcode_match = 0;
- int tlabel;
- unsigned long flags;
+ int tcode_match = 0;
+ int tlabel;
+ unsigned long flags;
- tlabel = (data[0] >> 10) & 0x3f;
+ tlabel = (data[0] >> 10) & 0x3f;
spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
skb_queue_walk(&host->pending_packet_queue, skb) {
packet = (struct hpsb_packet *)skb->data;
- if ((packet->tlabel == tlabel)
- && (packet->node_id == (data[1] >> 16))){
- break;
- }
+ if ((packet->tlabel == tlabel)
+ && (packet->node_id == (data[1] >> 16))){
+ break;
+ }
packet = NULL;
- }
+ }
if (packet == NULL) {
- HPSB_DEBUG("unsolicited response packet received - no tlabel match");
- dump_packet("contents", data, 16, -1);
+ HPSB_DEBUG("unsolicited response packet received - no tlabel match");
+ dump_packet("contents", data, 16, -1);
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
- return;
- }
+ return;
+ }
- switch (packet->tcode) {
- case TCODE_WRITEQ:
- case TCODE_WRITEB:
- if (tcode != TCODE_WRITE_RESPONSE)
+ switch (packet->tcode) {
+ case TCODE_WRITEQ:
+ case TCODE_WRITEB:
+ if (tcode != TCODE_WRITE_RESPONSE)
break;
tcode_match = 1;
memcpy(packet->header, data, 12);
- break;
- case TCODE_READQ:
- if (tcode != TCODE_READQ_RESPONSE)
+ break;
+ case TCODE_READQ:
+ if (tcode != TCODE_READQ_RESPONSE)
break;
tcode_match = 1;
memcpy(packet->header, data, 16);
- break;
- case TCODE_READB:
- if (tcode != TCODE_READB_RESPONSE)
+ break;
+ case TCODE_READB:
+ if (tcode != TCODE_READB_RESPONSE)
break;
tcode_match = 1;
BUG_ON(packet->skb->len - sizeof(*packet) < size - 16);
memcpy(packet->header, data, 16);
memcpy(packet->data, data + 4, size - 16);
- break;
- case TCODE_LOCK_REQUEST:
- if (tcode != TCODE_LOCK_RESPONSE)
+ break;
+ case TCODE_LOCK_REQUEST:
+ if (tcode != TCODE_LOCK_RESPONSE)
break;
tcode_match = 1;
size = min((size - 16), (size_t)8);
BUG_ON(packet->skb->len - sizeof(*packet) < size);
memcpy(packet->header, data, 16);
memcpy(packet->data, data + 4, size);
- break;
- }
+ break;
+ }
- if (!tcode_match) {
+ if (!tcode_match) {
spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
- HPSB_INFO("unsolicited response packet received - tcode mismatch");
- dump_packet("contents", data, 16, -1);
- return;
- }
+ HPSB_INFO("unsolicited response packet received - tcode mismatch");
+ dump_packet("contents", data, 16, -1);
+ return;
+ }
__skb_unlink(skb, &host->pending_packet_queue);
@@ -686,27 +683,27 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
quadlet_t *data, size_t dsize)
{
- struct hpsb_packet *p;
+ struct hpsb_packet *p;
- p = hpsb_alloc_packet(dsize);
- if (unlikely(p == NULL)) {
- /* FIXME - send data_error response */
- return NULL;
- }
+ p = hpsb_alloc_packet(dsize);
+ if (unlikely(p == NULL)) {
+ /* FIXME - send data_error response */
+ return NULL;
+ }
- p->type = hpsb_async;
- p->state = hpsb_unused;
- p->host = host;
- p->node_id = data[1] >> 16;
- p->tlabel = (data[0] >> 10) & 0x3f;
- p->no_waiter = 1;
+ p->type = hpsb_async;
+ p->state = hpsb_unused;
+ p->host = host;
+ p->node_id = data[1] >> 16;
+ p->tlabel = (data[0] >> 10) & 0x3f;
+ p->no_waiter = 1;
p->generation = get_hpsb_generation(host);
if (dsize % 4)
p->data[dsize / 4] = 0;
- return p;
+ return p;
}
#define PREP_ASYNC_HEAD_RCODE(tc) \
@@ -717,7 +714,7 @@ static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
packet->header[2] = 0
static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
- quadlet_t data)
+ quadlet_t data)
{
PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
packet->header[3] = data;
@@ -726,7 +723,7 @@ static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
}
static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
- int length)
+ int length)
{
if (rcode != RCODE_COMPLETE)
length = 0;
@@ -746,7 +743,7 @@ static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
}
static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
- int length)
+ int length)
{
if (rcode != RCODE_COMPLETE)
length = 0;
@@ -758,184 +755,184 @@ static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extc
}
#define PREP_REPLY_PACKET(length) \
- packet = create_reply_packet(host, data, length); \
- if (packet == NULL) break
+ packet = create_reply_packet(host, data, length); \
+ if (packet == NULL) break
static void handle_incoming_packet(struct hpsb_host *host, int tcode,
quadlet_t *data, size_t size, int write_acked)
{
- struct hpsb_packet *packet;
- int length, rcode, extcode;
- quadlet_t buffer;
- nodeid_t source = data[1] >> 16;
- nodeid_t dest = data[0] >> 16;
- u16 flags = (u16) data[0];
- u64 addr;
-
- /* big FIXME - no error checking is done for an out of bounds length */
-
- switch (tcode) {
- case TCODE_WRITEQ:
- addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_write(host, source, dest, data+3,
+ struct hpsb_packet *packet;
+ int length, rcode, extcode;
+ quadlet_t buffer;
+ nodeid_t source = data[1] >> 16;
+ nodeid_t dest = data[0] >> 16;
+ u16 flags = (u16) data[0];
+ u64 addr;
+
+ /* big FIXME - no error checking is done for an out of bounds length */
+
+ switch (tcode) {
+ case TCODE_WRITEQ:
+ addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
+ rcode = highlevel_write(host, source, dest, data+3,
addr, 4, flags);
- if (!write_acked
- && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
- && (rcode >= 0)) {
- /* not a broadcast write, reply */
- PREP_REPLY_PACKET(0);
- fill_async_write_resp(packet, rcode);
- send_packet_nocare(packet);
- }
- break;
-
- case TCODE_WRITEB:
- addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_write(host, source, dest, data+4,
+ if (!write_acked
+ && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
+ && (rcode >= 0)) {
+ /* not a broadcast write, reply */
+ PREP_REPLY_PACKET(0);
+ fill_async_write_resp(packet, rcode);
+ send_packet_nocare(packet);
+ }
+ break;
+
+ case TCODE_WRITEB:
+ addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
+ rcode = highlevel_write(host, source, dest, data+4,
addr, data[3]>>16, flags);
- if (!write_acked
- && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
- && (rcode >= 0)) {
- /* not a broadcast write, reply */
- PREP_REPLY_PACKET(0);
- fill_async_write_resp(packet, rcode);
- send_packet_nocare(packet);
- }
- break;
-
- case TCODE_READQ:
- addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
-
- if (rcode >= 0) {
- PREP_REPLY_PACKET(0);
- fill_async_readquad_resp(packet, rcode, buffer);
- send_packet_nocare(packet);
- }
- break;
-
- case TCODE_READB:
- length = data[3] >> 16;
- PREP_REPLY_PACKET(length);
-
- addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
- rcode = highlevel_read(host, source, packet->data, addr,
- length, flags);
-
- if (rcode >= 0) {
- fill_async_readblock_resp(packet, rcode, length);
- send_packet_nocare(packet);
- } else {
- hpsb_free_packet(packet);
- }
- break;
-
- case TCODE_LOCK_REQUEST:
- length = data[3] >> 16;
- extcode = data[3] & 0xffff;
- addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
-
- PREP_REPLY_PACKET(8);
-
- if ((extcode == 0) || (extcode >= 7)) {
- /* let switch default handle error */
- length = 0;
- }
-
- switch (length) {
- case 4:
- rcode = highlevel_lock(host, source, packet->data, addr,
- data[4], 0, extcode,flags);
- fill_async_lock_resp(packet, rcode, extcode, 4);
- break;
- case 8:
- if ((extcode != EXTCODE_FETCH_ADD)
- && (extcode != EXTCODE_LITTLE_ADD)) {
- rcode = highlevel_lock(host, source,
- packet->data, addr,
- data[5], data[4],
- extcode, flags);
- fill_async_lock_resp(packet, rcode, extcode, 4);
- } else {
- rcode = highlevel_lock64(host, source,
- (octlet_t *)packet->data, addr,
- *(octlet_t *)(data + 4), 0ULL,
- extcode, flags);
- fill_async_lock_resp(packet, rcode, extcode, 8);
- }
- break;
- case 16:
- rcode = highlevel_lock64(host, source,
- (octlet_t *)packet->data, addr,
- *(octlet_t *)(data + 6),
- *(octlet_t *)(data + 4),
- extcode, flags);
- fill_async_lock_resp(packet, rcode, extcode, 8);
- break;
- default:
- rcode = RCODE_TYPE_ERROR;
- fill_async_lock_resp(packet, rcode,
- extcode, 0);
- }
-
- if (rcode >= 0) {
- send_packet_nocare(packet);
- } else {
- hpsb_free_packet(packet);
- }
- break;
- }
+ if (!write_acked
+ && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
+ && (rcode >= 0)) {
+ /* not a broadcast write, reply */
+ PREP_REPLY_PACKET(0);
+ fill_async_write_resp(packet, rcode);
+ send_packet_nocare(packet);
+ }
+ break;
+
+ case TCODE_READQ:
+ addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
+ rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
+
+ if (rcode >= 0) {
+ PREP_REPLY_PACKET(0);
+ fill_async_readquad_resp(packet, rcode, buffer);
+ send_packet_nocare(packet);
+ }
+ break;
+
+ case TCODE_READB:
+ length = data[3] >> 16;
+ PREP_REPLY_PACKET(length);
+
+ addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
+ rcode = highlevel_read(host, source, packet->data, addr,
+ length, flags);
+
+ if (rcode >= 0) {
+ fill_async_readblock_resp(packet, rcode, length);
+ send_packet_nocare(packet);
+ } else {
+ hpsb_free_packet(packet);
+ }
+ break;
+
+ case TCODE_LOCK_REQUEST:
+ length = data[3] >> 16;
+ extcode = data[3] & 0xffff;
+ addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
+
+ PREP_REPLY_PACKET(8);
+
+ if ((extcode == 0) || (extcode >= 7)) {
+ /* let switch default handle error */
+ length = 0;
+ }
+
+ switch (length) {
+ case 4:
+ rcode = highlevel_lock(host, source, packet->data, addr,
+ data[4], 0, extcode,flags);
+ fill_async_lock_resp(packet, rcode, extcode, 4);
+ break;
+ case 8:
+ if ((extcode != EXTCODE_FETCH_ADD)
+ && (extcode != EXTCODE_LITTLE_ADD)) {
+ rcode = highlevel_lock(host, source,
+ packet->data, addr,
+ data[5], data[4],
+ extcode, flags);
+ fill_async_lock_resp(packet, rcode, extcode, 4);
+ } else {
+ rcode = highlevel_lock64(host, source,
+ (octlet_t *)packet->data, addr,
+ *(octlet_t *)(data + 4), 0ULL,
+ extcode, flags);
+ fill_async_lock_resp(packet, rcode, extcode, 8);
+ }
+ break;
+ case 16:
+ rcode = highlevel_lock64(host, source,
+ (octlet_t *)packet->data, addr,
+ *(octlet_t *)(data + 6),
+ *(octlet_t *)(data + 4),
+ extcode, flags);
+ fill_async_lock_resp(packet, rcode, extcode, 8);
+ break;
+ default:
+ rcode = RCODE_TYPE_ERROR;
+ fill_async_lock_resp(packet, rcode,
+ extcode, 0);
+ }
+
+ if (rcode >= 0) {
+ send_packet_nocare(packet);
+ } else {
+ hpsb_free_packet(packet);
+ }
+ break;
+ }
}
#undef PREP_REPLY_PACKET
void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
- int write_acked)
+ int write_acked)
{
- int tcode;
-
- if (host->in_bus_reset) {
- HPSB_INFO("received packet during reset; ignoring");
- return;
- }
-
- dump_packet("received packet", data, size, -1);
-
- tcode = (data[0] >> 4) & 0xf;
-
- switch (tcode) {
- case TCODE_WRITE_RESPONSE:
- case TCODE_READQ_RESPONSE:
- case TCODE_READB_RESPONSE:
- case TCODE_LOCK_RESPONSE:
- handle_packet_response(host, tcode, data, size);
- break;
-
- case TCODE_WRITEQ:
- case TCODE_WRITEB:
- case TCODE_READQ:
- case TCODE_READB:
- case TCODE_LOCK_REQUEST:
- handle_incoming_packet(host, tcode, data, size, write_acked);
- break;
-
-
- case TCODE_ISO_DATA:
- highlevel_iso_receive(host, data, size);
- break;
-
- case TCODE_CYCLE_START:
- /* simply ignore this packet if it is passed on */
- break;
-
- default:
- HPSB_NOTICE("received packet with bogus transaction code %d",
- tcode);
- break;
- }
+ int tcode;
+
+ if (host->in_bus_reset) {
+ HPSB_INFO("received packet during reset; ignoring");
+ return;
+ }
+
+ dump_packet("received packet", data, size, -1);
+
+ tcode = (data[0] >> 4) & 0xf;
+
+ switch (tcode) {
+ case TCODE_WRITE_RESPONSE:
+ case TCODE_READQ_RESPONSE:
+ case TCODE_READB_RESPONSE:
+ case TCODE_LOCK_RESPONSE:
+ handle_packet_response(host, tcode, data, size);
+ break;
+
+ case TCODE_WRITEQ:
+ case TCODE_WRITEB:
+ case TCODE_READQ:
+ case TCODE_READB:
+ case TCODE_LOCK_REQUEST:
+ handle_incoming_packet(host, tcode, data, size, write_acked);
+ break;
+
+
+ case TCODE_ISO_DATA:
+ highlevel_iso_receive(host, data, size);
+ break;
+
+ case TCODE_CYCLE_START:
+ /* simply ignore this packet if it is passed on */
+ break;
+
+ default:
+ HPSB_NOTICE("received packet with bogus transaction code %d",
+ tcode);
+ break;
+ }
}
@@ -1030,10 +1027,10 @@ static int hpsbpkt_thread(void *__hi)
daemonize("khpsbpkt");
+ current->flags |= PF_NOFREEZE;
+
while (1) {
if (down_interruptible(&khpsbpkt_sig)) {
- if (try_to_freeze())
- continue;
printk("khpsbpkt: received unexpected signal?!\n" );
break;
}
@@ -1129,7 +1126,7 @@ static int __init ieee1394_init(void)
nodemgr implements functionality required of ieee1394a-2000
IRMs */
hpsb_disable_irm = 1;
-
+
return 0;
}
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
index 0b31429d0a6..b35466023f0 100644
--- a/drivers/ieee1394/ieee1394_core.h
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -10,8 +10,8 @@
struct hpsb_packet {
- /* This struct is basically read-only for hosts with the exception of
- * the data buffer contents and xnext - see below. */
+ /* This struct is basically read-only for hosts with the exception of
+ * the data buffer contents and xnext - see below. */
/* This can be used for host driver internal linking.
*
@@ -21,47 +21,47 @@ struct hpsb_packet {
* driver_list when free'ing it. */
struct list_head driver_list;
- nodeid_t node_id;
+ nodeid_t node_id;
- /* Async and Iso types should be clear, raw means send-as-is, do not
- * CRC! Byte swapping shall still be done in this case. */
- enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type;
+ /* Async and Iso types should be clear, raw means send-as-is, do not
+ * CRC! Byte swapping shall still be done in this case. */
+ enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type;
- /* Okay, this is core internal and a no care for hosts.
- * queued = queued for sending
- * pending = sent, waiting for response
- * complete = processing completed, successful or not
- */
- enum {
- hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete
- } __attribute__((packed)) state;
+ /* Okay, this is core internal and a no care for hosts.
+ * queued = queued for sending
+ * pending = sent, waiting for response
+ * complete = processing completed, successful or not
+ */
+ enum {
+ hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete
+ } __attribute__((packed)) state;
- /* These are core internal. */
- signed char tlabel;
+ /* These are core internal. */
+ signed char tlabel;
signed char ack_code;
unsigned char tcode;
- unsigned expect_response:1;
- unsigned no_waiter:1;
+ unsigned expect_response:1;
+ unsigned no_waiter:1;
- /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */
- unsigned speed_code:2;
+ /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */
+ unsigned speed_code:2;
- /*
- * *header and *data are guaranteed to be 32-bit DMAable and may be
- * overwritten to allow in-place byte swapping. Neither of these is
- * CRCed (the sizes also don't include CRC), but contain space for at
- * least one additional quadlet to allow in-place CRCing. The memory is
- * also guaranteed to be DMA mappable.
- */
- quadlet_t *header;
- quadlet_t *data;
- size_t header_size;
- size_t data_size;
+ /*
+ * *header and *data are guaranteed to be 32-bit DMAable and may be
+ * overwritten to allow in-place byte swapping. Neither of these is
+ * CRCed (the sizes also don't include CRC), but contain space for at
+ * least one additional quadlet to allow in-place CRCing. The memory is
+ * also guaranteed to be DMA mappable.
+ */
+ quadlet_t *header;
+ quadlet_t *data;
+ size_t header_size;
+ size_t data_size;
- struct hpsb_host *host;
- unsigned int generation;
+ struct hpsb_host *host;
+ unsigned int generation;
atomic_t refcnt;
@@ -73,10 +73,10 @@ struct hpsb_packet {
/* XXX This is just a hack at the moment */
struct sk_buff *skb;
- /* Store jiffies for implementing bus timeouts. */
- unsigned long sendtime;
+ /* Store jiffies for implementing bus timeouts. */
+ unsigned long sendtime;
- quadlet_t embedded_header[5];
+ quadlet_t embedded_header[5];
};
/* Set a task for when a packet completes */
@@ -102,7 +102,7 @@ void hpsb_free_packet(struct hpsb_packet *packet);
*/
static inline unsigned int get_hpsb_generation(struct hpsb_host *host)
{
- return atomic_read(&host->generation);
+ return atomic_read(&host->generation);
}
/*
@@ -157,7 +157,7 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
* from within a transmit packet routine.
*/
void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
- int ackcode);
+ int ackcode);
/*
* Hand over received packet to the core. The contents of data are expected to
@@ -171,7 +171,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
* packet type.
*/
void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
- int write_acked);
+ int write_acked);
/*
@@ -197,20 +197,20 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
* Block 15 (240-255) reserved for drivers under development, etc.
*/
-#define IEEE1394_MAJOR 171
+#define IEEE1394_MAJOR 171
-#define IEEE1394_MINOR_BLOCK_RAW1394 0
-#define IEEE1394_MINOR_BLOCK_VIDEO1394 1
-#define IEEE1394_MINOR_BLOCK_DV1394 2
-#define IEEE1394_MINOR_BLOCK_AMDTP 3
+#define IEEE1394_MINOR_BLOCK_RAW1394 0
+#define IEEE1394_MINOR_BLOCK_VIDEO1394 1
+#define IEEE1394_MINOR_BLOCK_DV1394 2
+#define IEEE1394_MINOR_BLOCK_AMDTP 3
#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15
-#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0)
-#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)
-#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16)
-#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16)
-#define IEEE1394_AMDTP_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16)
-#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
+#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0)
+#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)
+#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16)
+#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16)
+#define IEEE1394_AMDTP_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16)
+#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
/* return the index (within a minor number block) of a file */
static inline unsigned char ieee1394_file_to_instance(struct file *file)
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
index 0aa876360f9..3fe2f6c4a25 100644
--- a/drivers/ieee1394/ieee1394_transactions.c
+++ b/drivers/ieee1394/ieee1394_transactions.c
@@ -22,7 +22,7 @@
#include "ieee1394_core.h"
#include "highlevel.h"
#include "nodemgr.h"
-
+#include "ieee1394_transactions.h"
#define PREP_ASYNC_HEAD_ADDRESS(tc) \
packet->tcode = tc; \
@@ -31,80 +31,82 @@
packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \
packet->header[2] = addr & 0xffffffff
-
static void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
{
- PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ);
- packet->header_size = 12;
- packet->data_size = 0;
- packet->expect_response = 1;
+ PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ);
+ packet->header_size = 12;
+ packet->data_size = 0;
+ packet->expect_response = 1;
}
-static void fill_async_readblock(struct hpsb_packet *packet, u64 addr, int length)
+static void fill_async_readblock(struct hpsb_packet *packet, u64 addr,
+ int length)
{
- PREP_ASYNC_HEAD_ADDRESS(TCODE_READB);
- packet->header[3] = length << 16;
- packet->header_size = 16;
- packet->data_size = 0;
- packet->expect_response = 1;
+ PREP_ASYNC_HEAD_ADDRESS(TCODE_READB);
+ packet->header[3] = length << 16;
+ packet->header_size = 16;
+ packet->data_size = 0;
+ packet->expect_response = 1;
}
-static void fill_async_writequad(struct hpsb_packet *packet, u64 addr, quadlet_t data)
+static void fill_async_writequad(struct hpsb_packet *packet, u64 addr,
+ quadlet_t data)
{
- PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ);
- packet->header[3] = data;
- packet->header_size = 16;
- packet->data_size = 0;
- packet->expect_response = 1;
+ PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ);
+ packet->header[3] = data;
+ packet->header_size = 16;
+ packet->data_size = 0;
+ packet->expect_response = 1;
}
-static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int length)
+static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr,
+ int length)
{
- PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB);
- packet->header[3] = length << 16;
- packet->header_size = 16;
- packet->expect_response = 1;
- packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
+ PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB);
+ packet->header[3] = length << 16;
+ packet->header_size = 16;
+ packet->expect_response = 1;
+ packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
}
static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
- int length)
+ int length)
{
- PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST);
- packet->header[3] = (length << 16) | extcode;
- packet->header_size = 16;
- packet->data_size = length;
- packet->expect_response = 1;
+ PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST);
+ packet->header[3] = (length << 16) | extcode;
+ packet->header_size = 16;
+ packet->data_size = length;
+ packet->expect_response = 1;
}
static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
- int tag, int sync)
+ int tag, int sync)
{
- packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
- | (TCODE_ISO_DATA << 4) | sync;
+ packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
+ | (TCODE_ISO_DATA << 4) | sync;
- packet->header_size = 4;
- packet->data_size = length;
- packet->type = hpsb_iso;
- packet->tcode = TCODE_ISO_DATA;
+ packet->header_size = 4;
+ packet->data_size = length;
+ packet->type = hpsb_iso;
+ packet->tcode = TCODE_ISO_DATA;
}
static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
{
- packet->header[0] = data;
- packet->header[1] = ~data;
- packet->header_size = 8;
- packet->data_size = 0;
- packet->expect_response = 0;
- packet->type = hpsb_raw; /* No CRC added */
- packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */
+ packet->header[0] = data;
+ packet->header[1] = ~data;
+ packet->header_size = 8;
+ packet->data_size = 0;
+ packet->expect_response = 0;
+ packet->type = hpsb_raw; /* No CRC added */
+ packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */
}
static void fill_async_stream_packet(struct hpsb_packet *packet, int length,
int channel, int tag, int sync)
{
packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
- | (TCODE_STREAM_DATA << 4) | sync;
+ | (TCODE_STREAM_DATA << 4) | sync;
packet->header_size = 4;
packet->data_size = length;
@@ -171,99 +173,96 @@ int hpsb_get_tlabel(struct hpsb_packet *packet)
*/
void hpsb_free_tlabel(struct hpsb_packet *packet)
{
- unsigned long flags;
+ unsigned long flags;
struct hpsb_tlabel_pool *tp;
tp = &packet->host->tpool[packet->node_id & NODE_MASK];
BUG_ON(packet->tlabel > 63 || packet->tlabel < 0);
- spin_lock_irqsave(&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
BUG_ON(!test_and_clear_bit(packet->tlabel, tp->pool));
- spin_unlock_irqrestore(&tp->lock, flags);
+ spin_unlock_irqrestore(&tp->lock, flags);
up(&tp->count);
}
-
-
int hpsb_packet_success(struct hpsb_packet *packet)
{
- switch (packet->ack_code) {
- case ACK_PENDING:
- switch ((packet->header[1] >> 12) & 0xf) {
- case RCODE_COMPLETE:
- return 0;
- case RCODE_CONFLICT_ERROR:
- return -EAGAIN;
- case RCODE_DATA_ERROR:
- return -EREMOTEIO;
- case RCODE_TYPE_ERROR:
- return -EACCES;
- case RCODE_ADDRESS_ERROR:
- return -EINVAL;
- default:
- HPSB_ERR("received reserved rcode %d from node %d",
- (packet->header[1] >> 12) & 0xf,
- packet->node_id);
- return -EAGAIN;
- }
- HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__);
-
- case ACK_BUSY_X:
- case ACK_BUSY_A:
- case ACK_BUSY_B:
- return -EBUSY;
-
- case ACK_TYPE_ERROR:
- return -EACCES;
-
- case ACK_COMPLETE:
- if (packet->tcode == TCODE_WRITEQ
- || packet->tcode == TCODE_WRITEB) {
- return 0;
- } else {
- HPSB_ERR("impossible ack_complete from node %d "
- "(tcode %d)", packet->node_id, packet->tcode);
- return -EAGAIN;
- }
-
-
- case ACK_DATA_ERROR:
- if (packet->tcode == TCODE_WRITEB
- || packet->tcode == TCODE_LOCK_REQUEST) {
- return -EAGAIN;
- } else {
- HPSB_ERR("impossible ack_data_error from node %d "
- "(tcode %d)", packet->node_id, packet->tcode);
- return -EAGAIN;
- }
-
- case ACK_ADDRESS_ERROR:
- return -EINVAL;
-
- case ACK_TARDY:
- case ACK_CONFLICT_ERROR:
- case ACKX_NONE:
- case ACKX_SEND_ERROR:
- case ACKX_ABORTED:
- case ACKX_TIMEOUT:
- /* error while sending */
- return -EAGAIN;
-
- default:
- HPSB_ERR("got invalid ack %d from node %d (tcode %d)",
- packet->ack_code, packet->node_id, packet->tcode);
- return -EAGAIN;
- }
-
- HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__);
+ switch (packet->ack_code) {
+ case ACK_PENDING:
+ switch ((packet->header[1] >> 12) & 0xf) {
+ case RCODE_COMPLETE:
+ return 0;
+ case RCODE_CONFLICT_ERROR:
+ return -EAGAIN;
+ case RCODE_DATA_ERROR:
+ return -EREMOTEIO;
+ case RCODE_TYPE_ERROR:
+ return -EACCES;
+ case RCODE_ADDRESS_ERROR:
+ return -EINVAL;
+ default:
+ HPSB_ERR("received reserved rcode %d from node %d",
+ (packet->header[1] >> 12) & 0xf,
+ packet->node_id);
+ return -EAGAIN;
+ }
+ HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__);
+
+ case ACK_BUSY_X:
+ case ACK_BUSY_A:
+ case ACK_BUSY_B:
+ return -EBUSY;
+
+ case ACK_TYPE_ERROR:
+ return -EACCES;
+
+ case ACK_COMPLETE:
+ if (packet->tcode == TCODE_WRITEQ
+ || packet->tcode == TCODE_WRITEB) {
+ return 0;
+ } else {
+ HPSB_ERR("impossible ack_complete from node %d "
+ "(tcode %d)", packet->node_id, packet->tcode);
+ return -EAGAIN;
+ }
+
+ case ACK_DATA_ERROR:
+ if (packet->tcode == TCODE_WRITEB
+ || packet->tcode == TCODE_LOCK_REQUEST) {
+ return -EAGAIN;
+ } else {
+ HPSB_ERR("impossible ack_data_error from node %d "
+ "(tcode %d)", packet->node_id, packet->tcode);
+ return -EAGAIN;
+ }
+
+ case ACK_ADDRESS_ERROR:
+ return -EINVAL;
+
+ case ACK_TARDY:
+ case ACK_CONFLICT_ERROR:
+ case ACKX_NONE:
+ case ACKX_SEND_ERROR:
+ case ACKX_ABORTED:
+ case ACKX_TIMEOUT:
+ /* error while sending */
+ return -EAGAIN;
+
+ default:
+ HPSB_ERR("got invalid ack %d from node %d (tcode %d)",
+ packet->ack_code, packet->node_id, packet->tcode);
+ return -EAGAIN;
+ }
+
+ HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__);
}
struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
u64 addr, size_t length)
{
- struct hpsb_packet *packet;
+ struct hpsb_packet *packet;
if (length == 0)
return NULL;
@@ -288,8 +287,9 @@ struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
return packet;
}
-struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node,
- u64 addr, quadlet_t *buffer, size_t length)
+struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host, nodeid_t node,
+ u64 addr, quadlet_t * buffer,
+ size_t length)
{
struct hpsb_packet *packet;
@@ -300,7 +300,7 @@ struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node
if (!packet)
return NULL;
- if (length % 4) { /* zero padding bytes */
+ if (length % 4) { /* zero padding bytes */
packet->data[length >> 2] = 0;
}
packet->host = host;
@@ -322,8 +322,9 @@ struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node
return packet;
}
-struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, int length,
- int channel, int tag, int sync)
+struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 * buffer,
+ int length, int channel, int tag,
+ int sync)
{
struct hpsb_packet *packet;
@@ -334,7 +335,7 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, i
if (!packet)
return NULL;
- if (length % 4) { /* zero padding bytes */
+ if (length % 4) { /* zero padding bytes */
packet->data[length >> 2] = 0;
}
packet->host = host;
@@ -352,14 +353,15 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, i
}
struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
- u64 addr, int extcode, quadlet_t *data,
- quadlet_t arg)
+ u64 addr, int extcode,
+ quadlet_t * data, quadlet_t arg)
{
struct hpsb_packet *p;
u32 length;
p = hpsb_alloc_packet(8);
- if (!p) return NULL;
+ if (!p)
+ return NULL;
p->host = host;
p->node_id = node;
@@ -388,15 +390,16 @@ struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
return p;
}
-struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node,
- u64 addr, int extcode, octlet_t *data,
- octlet_t arg)
+struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host,
+ nodeid_t node, u64 addr, int extcode,
+ octlet_t * data, octlet_t arg)
{
struct hpsb_packet *p;
u32 length;
p = hpsb_alloc_packet(16);
- if (!p) return NULL;
+ if (!p)
+ return NULL;
p->host = host;
p->node_id = node;
@@ -429,18 +432,18 @@ struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node
return p;
}
-struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host,
- quadlet_t data)
+struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data)
{
- struct hpsb_packet *p;
+ struct hpsb_packet *p;
- p = hpsb_alloc_packet(0);
- if (!p) return NULL;
+ p = hpsb_alloc_packet(0);
+ if (!p)
+ return NULL;
- p->host = host;
- fill_phy_packet(p, data);
+ p->host = host;
+ fill_phy_packet(p, data);
- return p;
+ return p;
}
struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
@@ -450,7 +453,8 @@ struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
struct hpsb_packet *p;
p = hpsb_alloc_packet(length);
- if (!p) return NULL;
+ if (!p)
+ return NULL;
p->host = host;
fill_iso_packet(p, length, channel, tag, sync);
@@ -466,47 +470,46 @@ struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
*/
int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
- u64 addr, quadlet_t *buffer, size_t length)
+ u64 addr, quadlet_t * buffer, size_t length)
{
- struct hpsb_packet *packet;
- int retval = 0;
+ struct hpsb_packet *packet;
+ int retval = 0;
- if (length == 0)
- return -EINVAL;
+ if (length == 0)
+ return -EINVAL;
- BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
+ BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
packet = hpsb_make_readpacket(host, node, addr, length);
- if (!packet) {
- return -ENOMEM;
- }
+ if (!packet) {
+ return -ENOMEM;
+ }
packet->generation = generation;
- retval = hpsb_send_packet_and_wait(packet);
+ retval = hpsb_send_packet_and_wait(packet);
if (retval < 0)
goto hpsb_read_fail;
- retval = hpsb_packet_success(packet);
+ retval = hpsb_packet_success(packet);
- if (retval == 0) {
- if (length == 4) {
- *buffer = packet->header[3];
- } else {
- memcpy(buffer, packet->data, length);
- }
- }
+ if (retval == 0) {
+ if (length == 4) {
+ *buffer = packet->header[3];
+ } else {
+ memcpy(buffer, packet->data, length);
+ }
+ }
-hpsb_read_fail:
- hpsb_free_tlabel(packet);
- hpsb_free_packet(packet);
+ hpsb_read_fail:
+ hpsb_free_tlabel(packet);
+ hpsb_free_packet(packet);
- return retval;
+ return retval;
}
-
int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
- u64 addr, quadlet_t *buffer, size_t length)
+ u64 addr, quadlet_t * buffer, size_t length)
{
struct hpsb_packet *packet;
int retval;
@@ -514,62 +517,61 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
if (length == 0)
return -EINVAL;
- BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
+ BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
- packet = hpsb_make_writepacket (host, node, addr, buffer, length);
+ packet = hpsb_make_writepacket(host, node, addr, buffer, length);
if (!packet)
return -ENOMEM;
packet->generation = generation;
- retval = hpsb_send_packet_and_wait(packet);
+ retval = hpsb_send_packet_and_wait(packet);
if (retval < 0)
goto hpsb_write_fail;
- retval = hpsb_packet_success(packet);
+ retval = hpsb_packet_success(packet);
-hpsb_write_fail:
- hpsb_free_tlabel(packet);
- hpsb_free_packet(packet);
+ hpsb_write_fail:
+ hpsb_free_tlabel(packet);
+ hpsb_free_packet(packet);
- return retval;
+ return retval;
}
#if 0
int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
- u64 addr, int extcode, quadlet_t *data, quadlet_t arg)
+ u64 addr, int extcode, quadlet_t * data, quadlet_t arg)
{
- struct hpsb_packet *packet;
- int retval = 0;
+ struct hpsb_packet *packet;
+ int retval = 0;
- BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
+ BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
- if (!packet)
- return -ENOMEM;
+ if (!packet)
+ return -ENOMEM;
packet->generation = generation;
- retval = hpsb_send_packet_and_wait(packet);
+ retval = hpsb_send_packet_and_wait(packet);
if (retval < 0)
goto hpsb_lock_fail;
- retval = hpsb_packet_success(packet);
+ retval = hpsb_packet_success(packet);
- if (retval == 0) {
- *data = packet->data[0];
- }
+ if (retval == 0) {
+ *data = packet->data[0];
+ }
-hpsb_lock_fail:
- hpsb_free_tlabel(packet);
- hpsb_free_packet(packet);
+ hpsb_lock_fail:
+ hpsb_free_tlabel(packet);
+ hpsb_free_packet(packet);
- return retval;
+ return retval;
}
-
int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
- quadlet_t *buffer, size_t length, u32 specifier_id,
+ quadlet_t * buffer, size_t length, u32 specifier_id,
unsigned int version)
{
struct hpsb_packet *packet;
@@ -586,7 +588,8 @@ int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
return -ENOMEM;
packet->data[0] = cpu_to_be32((host->node_id << 16) | specifier_id_hi);
- packet->data[1] = cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff));
+ packet->data[1] =
+ cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff));
memcpy(&(packet->data[2]), buffer, length - 8);
@@ -601,4 +604,4 @@ int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
return retval;
}
-#endif /* 0 */
+#endif /* 0 */
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c
index 615541b8b90..f26680ebef7 100644
--- a/drivers/ieee1394/iso.c
+++ b/drivers/ieee1394/iso.c
@@ -36,20 +36,22 @@ void hpsb_iso_shutdown(struct hpsb_iso *iso)
kfree(iso);
}
-static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_iso_type type,
+static struct hpsb_iso *hpsb_iso_common_init(struct hpsb_host *host,
+ enum hpsb_iso_type type,
unsigned int data_buf_size,
unsigned int buf_packets,
- int channel,
- int dma_mode,
+ int channel, int dma_mode,
int irq_interval,
- void (*callback)(struct hpsb_iso*))
+ void (*callback) (struct hpsb_iso
+ *))
{
struct hpsb_iso *iso;
int dma_direction;
/* make sure driver supports the ISO API */
if (!host->driver->isoctl) {
- printk(KERN_INFO "ieee1394: host driver '%s' does not support the rawiso API\n",
+ printk(KERN_INFO
+ "ieee1394: host driver '%s' does not support the rawiso API\n",
host->driver->name);
return NULL;
}
@@ -59,12 +61,13 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i
if (buf_packets < 2)
buf_packets = 2;
- if ((dma_mode < HPSB_ISO_DMA_DEFAULT) || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER))
- dma_mode=HPSB_ISO_DMA_DEFAULT;
+ if ((dma_mode < HPSB_ISO_DMA_DEFAULT)
+ || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER))
+ dma_mode = HPSB_ISO_DMA_DEFAULT;
if ((irq_interval < 0) || (irq_interval > buf_packets / 4))
- irq_interval = buf_packets / 4;
- if (irq_interval == 0) /* really interrupt for each packet*/
+ irq_interval = buf_packets / 4;
+ if (irq_interval == 0) /* really interrupt for each packet */
irq_interval = 1;
if (channel < -1 || channel >= 64)
@@ -76,7 +79,10 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i
/* allocate and write the struct hpsb_iso */
- iso = kmalloc(sizeof(*iso) + buf_packets * sizeof(struct hpsb_iso_packet_info), GFP_KERNEL);
+ iso =
+ kmalloc(sizeof(*iso) +
+ buf_packets * sizeof(struct hpsb_iso_packet_info),
+ GFP_KERNEL);
if (!iso)
return NULL;
@@ -111,17 +117,18 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i
iso->prebuffer = 0;
/* allocate the packet buffer */
- if (dma_region_alloc(&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
+ if (dma_region_alloc
+ (&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
goto err;
return iso;
-err:
+ err:
hpsb_iso_shutdown(iso);
return NULL;
}
-int hpsb_iso_n_ready(struct hpsb_iso* iso)
+int hpsb_iso_n_ready(struct hpsb_iso *iso)
{
unsigned long flags;
int val;
@@ -133,18 +140,19 @@ int hpsb_iso_n_ready(struct hpsb_iso* iso)
return val;
}
-
-struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
+struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
unsigned int data_buf_size,
unsigned int buf_packets,
int channel,
int speed,
int irq_interval,
- void (*callback)(struct hpsb_iso*))
+ void (*callback) (struct hpsb_iso *))
{
struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT,
data_buf_size, buf_packets,
- channel, HPSB_ISO_DMA_DEFAULT, irq_interval, callback);
+ channel,
+ HPSB_ISO_DMA_DEFAULT,
+ irq_interval, callback);
if (!iso)
return NULL;
@@ -157,22 +165,23 @@ struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
iso->flags |= HPSB_ISO_DRIVER_INIT;
return iso;
-err:
+ err:
hpsb_iso_shutdown(iso);
return NULL;
}
-struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
+struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
unsigned int data_buf_size,
unsigned int buf_packets,
int channel,
int dma_mode,
int irq_interval,
- void (*callback)(struct hpsb_iso*))
+ void (*callback) (struct hpsb_iso *))
{
struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV,
data_buf_size, buf_packets,
- channel, dma_mode, irq_interval, callback);
+ channel, dma_mode,
+ irq_interval, callback);
if (!iso)
return NULL;
@@ -183,7 +192,7 @@ struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
iso->flags |= HPSB_ISO_DRIVER_INIT;
return iso;
-err:
+ err:
hpsb_iso_shutdown(iso);
return NULL;
}
@@ -197,16 +206,17 @@ int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
{
- if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
- return -EINVAL;
- return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
+ if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
+ return -EINVAL;
+ return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
}
int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
{
if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
return -EINVAL;
- return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK, (unsigned long) &mask);
+ return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK,
+ (unsigned long)&mask);
}
int hpsb_iso_recv_flush(struct hpsb_iso *iso)
@@ -283,7 +293,9 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
isoctl_args[2] = sync;
- retval = iso->host->driver->isoctl(iso, RECV_START, (unsigned long) &isoctl_args[0]);
+ retval =
+ iso->host->driver->isoctl(iso, RECV_START,
+ (unsigned long)&isoctl_args[0]);
if (retval)
return retval;
@@ -296,7 +308,8 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
unsigned int offset, unsigned short len,
- unsigned int *out_offset, unsigned short *out_len)
+ unsigned int *out_offset,
+ unsigned short *out_len)
{
if (offset >= iso->buf_size)
return -EFAULT;
@@ -316,8 +329,8 @@ static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
return 0;
}
-
-int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy)
+int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
+ u8 tag, u8 sy)
{
struct hpsb_iso_packet_info *info;
unsigned long flags;
@@ -334,7 +347,8 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag
info = &iso->infos[iso->first_packet];
/* check for bogus offset/length */
- if (hpsb_iso_check_offset_len(iso, offset, len, &info->offset, &info->len))
+ if (hpsb_iso_check_offset_len
+ (iso, offset, len, &info->offset, &info->len))
return -EFAULT;
info->tag = tag;
@@ -342,13 +356,13 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag
spin_lock_irqsave(&iso->lock, flags);
- rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long) info);
+ rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long)info);
if (rv)
goto out;
/* increment cursors */
- iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
- iso->xmit_cycle = (iso->xmit_cycle+1) % 8000;
+ iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
+ iso->xmit_cycle = (iso->xmit_cycle + 1) % 8000;
iso->n_ready_packets--;
if (iso->prebuffer != 0) {
@@ -359,7 +373,7 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag
}
}
-out:
+ out:
spin_unlock_irqrestore(&iso->lock, flags);
return rv;
}
@@ -369,7 +383,9 @@ int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
if (iso->type != HPSB_ISO_XMIT)
return -EINVAL;
- return wait_event_interruptible(iso->waitq, hpsb_iso_n_ready(iso) == iso->buf_packets);
+ return wait_event_interruptible(iso->waitq,
+ hpsb_iso_n_ready(iso) ==
+ iso->buf_packets);
}
void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
@@ -396,7 +412,8 @@ void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
}
void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
- u16 total_len, u16 cycle, u8 channel, u8 tag, u8 sy)
+ u16 total_len, u16 cycle, u8 channel, u8 tag,
+ u8 sy)
{
unsigned long flags;
spin_lock_irqsave(&iso->lock, flags);
@@ -416,7 +433,7 @@ void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
info->tag = tag;
info->sy = sy;
- iso->pkt_dma = (iso->pkt_dma+1) % iso->buf_packets;
+ iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
iso->n_ready_packets++;
}
@@ -435,20 +452,21 @@ int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
spin_lock_irqsave(&iso->lock, flags);
for (i = 0; i < n_packets; i++) {
rv = iso->host->driver->isoctl(iso, RECV_RELEASE,
- (unsigned long) &iso->infos[iso->first_packet]);
+ (unsigned long)&iso->infos[iso->
+ first_packet]);
if (rv)
break;
- iso->first_packet = (iso->first_packet+1) % iso->buf_packets;
+ iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
iso->n_ready_packets--;
/* release memory from packets discarded when queue was full */
- if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */
+ if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */
if (iso->bytes_discarded != 0) {
struct hpsb_iso_packet_info inf;
inf.total_len = iso->bytes_discarded;
iso->host->driver->isoctl(iso, RECV_RELEASE,
- (unsigned long) &inf);
+ (unsigned long)&inf);
iso->bytes_discarded = 0;
}
}
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index f2453668acf..082c7fd239f 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -743,21 +743,20 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
unsigned int generation)
{
struct hpsb_host *host = hi->host;
- struct node_entry *ne;
-
- ne = kmalloc(sizeof(struct node_entry), GFP_KERNEL);
- if (!ne) return NULL;
+ struct node_entry *ne;
- memset(ne, 0, sizeof(struct node_entry));
+ ne = kzalloc(sizeof(*ne), GFP_KERNEL);
+ if (!ne)
+ return NULL;
ne->tpool = &host->tpool[nodeid & NODE_MASK];
- ne->host = host;
- ne->nodeid = nodeid;
+ ne->host = host;
+ ne->nodeid = nodeid;
ne->generation = generation;
ne->needs_probe = 1;
- ne->guid = guid;
+ ne->guid = guid;
ne->guid_vendor_id = (guid >> 40) & 0xffffff;
ne->guid_vendor_oui = nodemgr_find_oui_name(ne->guid_vendor_id);
ne->csr = csr;
@@ -787,7 +786,7 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
(host->node_id == nodeid) ? "Host" : "Node",
NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
- return ne;
+ return ne;
}
@@ -872,12 +871,10 @@ static struct unit_directory *nodemgr_process_unit_directory
struct csr1212_keyval *kv;
u8 last_key_id = 0;
- ud = kmalloc(sizeof(struct unit_directory), GFP_KERNEL);
+ ud = kzalloc(sizeof(*ud), GFP_KERNEL);
if (!ud)
goto unit_directory_error;
- memset (ud, 0, sizeof(struct unit_directory));
-
ud->ne = ne;
ud->ignore_driver = ignore_drivers;
ud->address = ud_kv->offset + CSR1212_CONFIG_ROM_SPACE_BASE;
@@ -937,10 +934,10 @@ static struct unit_directory *nodemgr_process_unit_directory
/* Logical Unit Number */
if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
if (ud->flags & UNIT_DIRECTORY_HAS_LUN) {
- ud_child = kmalloc(sizeof(struct unit_directory), GFP_KERNEL);
+ ud_child = kmalloc(sizeof(*ud_child), GFP_KERNEL);
if (!ud_child)
goto unit_directory_error;
- memcpy(ud_child, ud, sizeof(struct unit_directory));
+ memcpy(ud_child, ud, sizeof(*ud_child));
nodemgr_register_device(ne, ud_child, &ne->device);
ud_child = NULL;
@@ -1200,7 +1197,7 @@ static void nodemgr_node_scan_one(struct host_info *hi,
struct csr1212_csr *csr;
struct nodemgr_csr_info *ci;
- ci = kmalloc(sizeof(struct nodemgr_csr_info), GFP_KERNEL);
+ ci = kmalloc(sizeof(*ci), GFP_KERNEL);
if (!ci)
return;
@@ -1410,14 +1407,28 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
struct hpsb_host *host = hi->host;
struct class *class = &nodemgr_ne_class;
struct class_device *cdev;
+ struct node_entry *ne;
/* Do some processing of the nodes we've probed. This pulls them
* into the sysfs layer if needed, and can result in processing of
* unit-directories, or just updating the node and it's
- * unit-directories. */
+ * unit-directories.
+ *
+ * Run updates before probes. Usually, updates are time-critical
+ * while probes are time-consuming. (Well, those probes need some
+ * improvement...) */
+
down_read(&class->subsys.rwsem);
- list_for_each_entry(cdev, &class->children, node)
- nodemgr_probe_ne(hi, container_of(cdev, struct node_entry, class_dev), generation);
+ list_for_each_entry(cdev, &class->children, node) {
+ ne = container_of(cdev, struct node_entry, class_dev);
+ if (!ne->needs_probe)
+ nodemgr_probe_ne(hi, ne, generation);
+ }
+ list_for_each_entry(cdev, &class->children, node) {
+ ne = container_of(cdev, struct node_entry, class_dev);
+ if (ne->needs_probe)
+ nodemgr_probe_ne(hi, ne, generation);
+ }
up_read(&class->subsys.rwsem);
@@ -1448,7 +1459,8 @@ static int nodemgr_send_resume_packet(struct hpsb_host *host)
int ret = 1;
packet = hpsb_make_phypacket(host,
- 0x003c0000 | NODEID_TO_NODE(host->node_id) << 24);
+ EXTPHYPACKET_TYPE_RESUME |
+ NODEID_TO_NODE(host->node_id) << PHYPACKET_PORT_SHIFT);
if (packet) {
packet->no_waiter = 1;
packet->generation = get_hpsb_generation(host);
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index 3a2f0c02fd0..0b26616e16c 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -151,24 +151,6 @@ static inline int hpsb_node_entry_valid(struct node_entry *ne)
}
/*
- * Returns a node entry (which has its reference count incremented) or NULL if
- * the GUID in question is not known. Getting a valid entry does not mean that
- * the node with this GUID is currently accessible (might be powered down).
- */
-struct node_entry *hpsb_guid_get_entry(u64 guid);
-
-/* Same as above, but use the nodeid to get an node entry. This is not
- * fool-proof by itself, since the nodeid can change. */
-struct node_entry *hpsb_nodeid_get_entry(struct hpsb_host *host, nodeid_t nodeid);
-
-/*
- * If the entry refers to a local host, this function will return the pointer
- * to the hpsb_host structure. It will return NULL otherwise. Once you have
- * established it is a local host, you can use that knowledge from then on (the
- * GUID won't wander to an external node). */
-struct hpsb_host *hpsb_get_host_by_ne(struct node_entry *ne);
-
-/*
* This will fill in the given, pre-initialised hpsb_packet with the current
* information from the node entry (host, node ID, generation number). It will
* return false if the node owning the GUID is not accessible (and not modify the
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 4cf9b8f3e33..b6b96fa04d6 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -161,9 +161,6 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
#define PRINT(level, fmt, args...) \
printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
-static char version[] __devinitdata =
- "$Rev: 1313 $ Ben Collins <bcollins@debian.org>";
-
/* Module Parameters */
static int phys_dma = 1;
module_param(phys_dma, int, 0644);
@@ -587,12 +584,13 @@ static void ohci_initialize(struct ti_ohci *ohci)
sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
#endif
PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
- "MMIO=[%lx-%lx] Max Packet=[%d]",
+ "MMIO=[%lx-%lx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
pci_resource_start(ohci->dev, 0),
pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
- ohci->max_packet_size);
+ ohci->max_packet_size,
+ ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
/* Check all of our ports to make sure that if anything is
* connected, we enable that port. */
@@ -2960,28 +2958,23 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
d->ctrlClear = 0;
d->cmdPtr = 0;
- d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC);
- d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
+ d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
+ d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
if (d->buf_cpu == NULL || d->buf_bus == NULL) {
PRINT(KERN_ERR, "Failed to allocate dma buffer");
free_dma_rcv_ctx(d);
return -ENOMEM;
}
- memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
- memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
- d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
- GFP_ATOMIC);
- d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
+ d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
+ d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
if (d->prg_cpu == NULL || d->prg_bus == NULL) {
PRINT(KERN_ERR, "Failed to allocate dma prg");
free_dma_rcv_ctx(d);
return -ENOMEM;
}
- memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
- memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
@@ -3093,17 +3086,14 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
d->ctrlClear = 0;
d->cmdPtr = 0;
- d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
- GFP_KERNEL);
- d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
+ d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
+ d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
if (d->prg_cpu == NULL || d->prg_bus == NULL) {
PRINT(KERN_ERR, "Failed to allocate at dma prg");
free_dma_trm_ctx(d);
return -ENOMEM;
}
- memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
- memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
len = sprintf(pool_name, "ohci1394_trm_prg");
sprintf(pool_name+len, "%d", num_allocs);
@@ -3201,8 +3191,6 @@ static struct hpsb_host_driver ohci1394_driver = {
.hw_csr_reg = ohci_hw_csr_reg,
};
-
-
/***********************************
* PCI Driver Interface functions *
***********************************/
@@ -3217,15 +3205,10 @@ do { \
static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
- static int version_printed = 0;
-
struct hpsb_host *host;
struct ti_ohci *ohci; /* shortcut to currently handled device */
unsigned long ohci_base;
- if (version_printed++ == 0)
- PRINT_G(KERN_INFO, "%s", version);
-
if (pci_enable_device(dev))
FAIL(-ENXIO, "Failed to enable OHCI hardware");
pci_set_master(dev);
@@ -3369,13 +3352,8 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
/* Determine the number of available IR and IT contexts. */
ohci->nb_iso_rcv_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
- DBGMSG("%d iso receive contexts available",
- ohci->nb_iso_rcv_ctx);
-
ohci->nb_iso_xmit_ctx =
get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
- DBGMSG("%d iso transmit contexts available",
- ohci->nb_iso_xmit_ctx);
/* Set the usage bits for non-existent contexts so they can't
* be allocated */
@@ -3606,8 +3584,6 @@ static struct pci_driver ohci1394_pci_driver = {
.suspend = ohci1394_pci_suspend,
};
-
-
/***********************************
* OHCI1394 Video Interface *
***********************************/
@@ -3714,7 +3690,6 @@ EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
-
/***********************************
* General module initialization *
***********************************/
diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h
index cc66c1cae25..7df0962144e 100644
--- a/drivers/ieee1394/ohci1394.h
+++ b/drivers/ieee1394/ohci1394.h
@@ -219,8 +219,8 @@ struct ti_ohci {
int self_id_errors;
- /* Tasklets for iso receive and transmit, used by video1394,
- * amdtp and dv1394 */
+ /* Tasklets for iso receive and transmit, used by video1394
+ * and dv1394 */
struct list_head iso_tasklet_list;
spinlock_t iso_tasklet_list_lock;
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 6b1ab875333..e2edc41e1b6 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1435,7 +1435,7 @@ static int __devinit add_card(struct pci_dev *dev,
struct i2c_algo_bit_data i2c_adapter_data;
error = -ENOMEM;
- i2c_ad = kmalloc(sizeof(struct i2c_adapter), SLAB_KERNEL);
+ i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL);
if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 24411e666b2..b0523563991 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -102,12 +102,9 @@ static struct pending_request *__alloc_pending_request(gfp_t flags)
{
struct pending_request *req;
- req = (struct pending_request *)kmalloc(sizeof(struct pending_request),
- flags);
- if (req != NULL) {
- memset(req, 0, sizeof(struct pending_request));
+ req = kzalloc(sizeof(*req), flags);
+ if (req)
INIT_LIST_HEAD(&req->list);
- }
return req;
}
@@ -192,9 +189,9 @@ static void add_host(struct hpsb_host *host)
struct host_info *hi;
unsigned long flags;
- hi = (struct host_info *)kmalloc(sizeof(struct host_info), GFP_KERNEL);
+ hi = kmalloc(sizeof(*hi), GFP_KERNEL);
- if (hi != NULL) {
+ if (hi) {
INIT_LIST_HEAD(&hi->list);
hi->host = host;
INIT_LIST_HEAD(&hi->file_info_list);
@@ -315,8 +312,8 @@ static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data,
break;
if (!ibs) {
- ibs = kmalloc(sizeof(struct iso_block_store)
- + length, SLAB_ATOMIC);
+ ibs = kmalloc(sizeof(*ibs) + length,
+ SLAB_ATOMIC);
if (!ibs) {
kfree(req);
break;
@@ -376,8 +373,8 @@ static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
break;
if (!ibs) {
- ibs = kmalloc(sizeof(struct iso_block_store)
- + length, SLAB_ATOMIC);
+ ibs = kmalloc(sizeof(*ibs) + length,
+ SLAB_ATOMIC);
if (!ibs) {
kfree(req);
break;
@@ -502,10 +499,9 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
switch (req->req.type) {
case RAW1394_REQ_LIST_CARDS:
spin_lock_irqsave(&host_info_lock, flags);
- khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count,
- SLAB_ATOMIC);
+ khl = kmalloc(sizeof(*khl) * host_count, SLAB_ATOMIC);
- if (khl != NULL) {
+ if (khl) {
req->req.misc = host_count;
req->data = (quadlet_t *) khl;
@@ -517,7 +513,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
}
spin_unlock_irqrestore(&host_info_lock, flags);
- if (khl != NULL) {
+ if (khl) {
req->req.error = RAW1394_ERROR_NONE;
req->req.length = min(req->req.length,
(u32) (sizeof
@@ -1647,13 +1643,13 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
return (-EINVAL);
}
/* addr-list-entry for fileinfo */
- addr = (struct arm_addr *)kmalloc(sizeof(struct arm_addr), SLAB_KERNEL);
+ addr = kmalloc(sizeof(*addr), SLAB_KERNEL);
if (!addr) {
req->req.length = 0;
return (-ENOMEM);
}
/* allocation of addr_space_buffer */
- addr->addr_space_buffer = (u8 *) vmalloc(req->req.length);
+ addr->addr_space_buffer = vmalloc(req->req.length);
if (!(addr->addr_space_buffer)) {
kfree(addr);
req->req.length = 0;
@@ -2122,8 +2118,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
return -ENOMEM;
}
- cache->filled_head =
- kmalloc(sizeof(struct csr1212_cache_region), GFP_KERNEL);
+ cache->filled_head = kmalloc(sizeof(*cache->filled_head), GFP_KERNEL);
if (!cache->filled_head) {
csr1212_release_keyval(fi->csr1212_dirs[dr]);
fi->csr1212_dirs[dr] = NULL;
@@ -2136,7 +2131,6 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
req->req.length)) {
csr1212_release_keyval(fi->csr1212_dirs[dr]);
fi->csr1212_dirs[dr] = NULL;
- CSR1212_FREE(cache);
ret = -EFAULT;
} else {
cache->len = req->req.length;
@@ -2172,7 +2166,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
}
}
kfree(cache->filled_head);
- kfree(cache);
+ CSR1212_FREE(cache);
if (ret >= 0) {
/* we have to free the request, because we queue no response,
@@ -2488,8 +2482,8 @@ static int raw1394_iso_recv_packets(struct file_info *fi, void __user * uaddr)
/* ensure user-supplied buffer is accessible and big enough */
if (!access_ok(VERIFY_WRITE, upackets.infos,
- upackets.n_packets *
- sizeof(struct raw1394_iso_packet_info)))
+ upackets.n_packets *
+ sizeof(struct raw1394_iso_packet_info)))
return -EFAULT;
/* copy the packet_infos out */
@@ -2522,8 +2516,8 @@ static int raw1394_iso_send_packets(struct file_info *fi, void __user * uaddr)
/* ensure user-supplied buffer is accessible and big enough */
if (!access_ok(VERIFY_READ, upackets.infos,
- upackets.n_packets *
- sizeof(struct raw1394_iso_packet_info)))
+ upackets.n_packets *
+ sizeof(struct raw1394_iso_packet_info)))
return -EFAULT;
/* copy the infos structs in and queue the packets */
@@ -2684,11 +2678,10 @@ static int raw1394_open(struct inode *inode, struct file *file)
{
struct file_info *fi;
- fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL);
- if (fi == NULL)
+ fi = kzalloc(sizeof(*fi), SLAB_KERNEL);
+ if (!fi)
return -ENOMEM;
- memset(fi, 0, sizeof(struct file_info));
fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */
INIT_LIST_HEAD(&fi->list);
@@ -2748,8 +2741,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
list) {
entry = fi_hlp->addr_list.next;
while (entry != &(fi_hlp->addr_list)) {
- arm_addr = list_entry(entry,
- struct
+ arm_addr = list_entry(entry, struct
arm_addr,
addr_list);
if (arm_addr->start ==
@@ -2912,16 +2904,17 @@ static int __init init_raw1394(void)
hpsb_register_highlevel(&raw1394_highlevel);
- if (IS_ERR(class_device_create(hpsb_protocol_class, NULL, MKDEV(
- IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16),
- NULL, RAW1394_DEVICE_NAME))) {
+ if (IS_ERR
+ (class_device_create
+ (hpsb_protocol_class, NULL,
+ MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), NULL,
+ RAW1394_DEVICE_NAME))) {
ret = -EFAULT;
goto out_unreg;
}
-
- devfs_mk_cdev(MKDEV(
- IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16),
- S_IFCHR | S_IRUSR | S_IWUSR, RAW1394_DEVICE_NAME);
+
+ devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16),
+ S_IFCHR | S_IRUSR | S_IWUSR, RAW1394_DEVICE_NAME);
cdev_init(&raw1394_cdev, &raw1394_fops);
raw1394_cdev.owner = THIS_MODULE;
@@ -2943,20 +2936,22 @@ static int __init init_raw1394(void)
goto out;
-out_dev:
+ out_dev:
devfs_remove(RAW1394_DEVICE_NAME);
class_device_destroy(hpsb_protocol_class,
- MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16));
-out_unreg:
+ MKDEV(IEEE1394_MAJOR,
+ IEEE1394_MINOR_BLOCK_RAW1394 * 16));
+ out_unreg:
hpsb_unregister_highlevel(&raw1394_highlevel);
-out:
+ out:
return ret;
}
static void __exit cleanup_raw1394(void)
{
class_device_destroy(hpsb_protocol_class,
- MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16));
+ MKDEV(IEEE1394_MAJOR,
+ IEEE1394_MINOR_BLOCK_RAW1394 * 16));
cdev_del(&raw1394_cdev);
devfs_remove(RAW1394_DEVICE_NAME);
hpsb_unregister_highlevel(&raw1394_highlevel);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f7e18ccc5c0..18d7eda3885 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -80,9 +80,6 @@
#include "ieee1394_transactions.h"
#include "sbp2.h"
-static char version[] __devinitdata =
- "$Rev: 1306 $ Ben Collins <bcollins@debian.org>";
-
/*
* Module load parameter definitions
*/
@@ -151,18 +148,15 @@ static int force_inquiry_hack;
module_param(force_inquiry_hack, int, 0444);
MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
-
/*
* Export information about protocols/devices supported by this driver.
*/
static struct ieee1394_device_id sbp2_id_table[] = {
{
- .match_flags =IEEE1394_MATCH_SPECIFIER_ID |
- IEEE1394_MATCH_VERSION,
- .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
- .version = SBP2_SW_VERSION_ENTRY & 0xffffff
- },
- { }
+ .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
+ .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
+ .version = SBP2_SW_VERSION_ENTRY & 0xffffff},
+ {}
};
MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
@@ -221,7 +215,6 @@ static u32 global_outstanding_dmas = 0;
#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
-
/*
* Globals
*/
@@ -254,8 +247,8 @@ static struct hpsb_address_ops sbp2_ops = {
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
static struct hpsb_address_ops sbp2_physdma_ops = {
- .read = sbp2_handle_physdma_read,
- .write = sbp2_handle_physdma_write,
+ .read = sbp2_handle_physdma_read,
+ .write = sbp2_handle_physdma_write,
};
#endif
@@ -287,7 +280,6 @@ static u32 sbp2_broken_inquiry_list[] = {
* General utility functions
**************************************/
-
#ifndef __BIG_ENDIAN
/*
* Converts a buffer from be32 to cpu byte ordering. Length is in bytes.
@@ -324,7 +316,8 @@ static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
/*
* Debug packet dump routine. Length is in bytes.
*/
-static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32 dump_phys_addr)
+static void sbp2util_packet_dump(void *buffer, int length, char *dump_name,
+ u32 dump_phys_addr)
{
int i;
unsigned char *dump = buffer;
@@ -345,7 +338,7 @@ static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32
printk(" ");
if ((i & 0xf) == 0)
printk("\n ");
- printk("%02x ", (int) dump[i]);
+ printk("%02x ", (int)dump[i]);
}
printk("\n");
@@ -364,9 +357,9 @@ static int sbp2util_down_timeout(atomic_t *done, int timeout)
for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) {
if (msleep_interruptible(100)) /* 100ms */
- return(1);
+ return 1;
}
- return ((i > 0) ? 0:1);
+ return (i > 0) ? 0 : 1;
}
/* Free's an allocated packet */
@@ -380,21 +373,22 @@ static void sbp2_free_packet(struct hpsb_packet *packet)
* subaction and returns immediately. Can be used from interrupts.
*/
static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
- quadlet_t *buffer, size_t length)
+ quadlet_t *buffer, size_t length)
{
struct hpsb_packet *packet;
packet = hpsb_make_writepacket(ne->host, ne->nodeid,
addr, buffer, length);
- if (!packet)
- return -ENOMEM;
+ if (!packet)
+ return -ENOMEM;
- hpsb_set_packet_complete_task(packet, (void (*)(void*))sbp2_free_packet,
+ hpsb_set_packet_complete_task(packet,
+ (void (*)(void *))sbp2_free_packet,
packet);
hpsb_node_fill_packet(ne, packet);
- if (hpsb_send_packet(packet) < 0) {
+ if (hpsb_send_packet(packet) < 0) {
sbp2_free_packet(packet);
return -EIO;
}
@@ -417,22 +411,22 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i
spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
for (i = 0; i < orbs; i++) {
- command = (struct sbp2_command_info *)
- kmalloc(sizeof(struct sbp2_command_info), GFP_ATOMIC);
+ command = kzalloc(sizeof(*command), GFP_ATOMIC);
if (!command) {
- spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
- return(-ENOMEM);
+ spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock,
+ flags);
+ return -ENOMEM;
}
- memset(command, '\0', sizeof(struct sbp2_command_info));
command->command_orb_dma =
- pci_map_single (hi->host->pdev, &command->command_orb,
- sizeof(struct sbp2_command_orb),
- PCI_DMA_BIDIRECTIONAL);
+ pci_map_single(hi->host->pdev, &command->command_orb,
+ sizeof(struct sbp2_command_orb),
+ PCI_DMA_BIDIRECTIONAL);
SBP2_DMA_ALLOC("single command orb DMA");
command->sge_dma =
- pci_map_single (hi->host->pdev, &command->scatter_gather_element,
- sizeof(command->scatter_gather_element),
- PCI_DMA_BIDIRECTIONAL);
+ pci_map_single(hi->host->pdev,
+ &command->scatter_gather_element,
+ sizeof(command->scatter_gather_element),
+ PCI_DMA_BIDIRECTIONAL);
SBP2_DMA_ALLOC("scatter_gather_element");
INIT_LIST_HEAD(&command->list);
list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
@@ -488,7 +482,7 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb(
list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
if (command->command_orb_dma == orb) {
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
- return (command);
+ return command;
}
}
}
@@ -496,7 +490,7 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb(
SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
- return(NULL);
+ return NULL;
}
/*
@@ -513,12 +507,12 @@ static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_
list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
if (command->Current_SCpnt == SCpnt) {
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
- return (command);
+ return command;
}
}
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
- return(NULL);
+ return NULL;
}
/*
@@ -545,7 +539,7 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!");
}
spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
- return (command);
+ return command;
}
/* Free our DMA's */
@@ -587,7 +581,8 @@ static void sbp2util_free_command_dma(struct sbp2_command_info *command)
/*
* This function moves a command to the completed orb list.
*/
-static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id, struct sbp2_command_info *command)
+static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id,
+ struct sbp2_command_info *command)
{
unsigned long flags;
@@ -606,8 +601,6 @@ static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_
return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo;
}
-
-
/*********************************************
* IEEE-1394 core driver stack related section
*********************************************/
@@ -627,14 +620,14 @@ static int sbp2_probe(struct device *dev)
if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
return -ENODEV;
- scsi_id = sbp2_alloc_device(ud);
+ scsi_id = sbp2_alloc_device(ud);
- if (!scsi_id)
- return -ENOMEM;
+ if (!scsi_id)
+ return -ENOMEM;
- sbp2_parse_unit_directory(scsi_id, ud);
+ sbp2_parse_unit_directory(scsi_id, ud);
- return sbp2_start_device(scsi_id);
+ return sbp2_start_device(scsi_id);
}
static int sbp2_remove(struct device *dev)
@@ -719,12 +712,11 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
SBP2_DEBUG("sbp2_alloc_device");
- scsi_id = kmalloc(sizeof(*scsi_id), GFP_KERNEL);
+ scsi_id = kzalloc(sizeof(*scsi_id), GFP_KERNEL);
if (!scsi_id) {
SBP2_ERR("failed to create scsi_id");
goto failed_alloc;
}
- memset(scsi_id, 0, sizeof(*scsi_id));
scsi_id->ne = ud->ne;
scsi_id->ud = ud;
@@ -735,7 +727,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
INIT_LIST_HEAD(&scsi_id->scsi_list);
spin_lock_init(&scsi_id->sbp2_command_orb_lock);
- scsi_id->sbp2_device_type_and_lun = SBP2_DEVICE_TYPE_LUN_UNINITIALIZED;
+ scsi_id->sbp2_lun = 0;
ud->device.driver_data = scsi_id;
@@ -769,7 +761,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
/* Register our host with the SCSI stack. */
scsi_host = scsi_host_alloc(&scsi_driver_template,
- sizeof (unsigned long));
+ sizeof(unsigned long));
if (!scsi_host) {
SBP2_ERR("failed to register scsi host");
goto failed_alloc;
@@ -790,7 +782,6 @@ failed_alloc:
return NULL;
}
-
static void sbp2_host_reset(struct hpsb_host *host)
{
struct sbp2scsi_host_info *hi;
@@ -804,7 +795,6 @@ static void sbp2_host_reset(struct hpsb_host *host)
}
}
-
/*
* This function is where we first pull the node unique ids, and then
* allocate memory and register a SBP-2 device.
@@ -818,7 +808,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
/* Login FIFO DMA */
scsi_id->login_response =
- pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_response),
+ pci_alloc_consistent(hi->host->pdev,
+ sizeof(struct sbp2_login_response),
&scsi_id->login_response_dma);
if (!scsi_id->login_response)
goto alloc_fail;
@@ -826,7 +817,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
/* Query logins ORB DMA */
scsi_id->query_logins_orb =
- pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_orb),
+ pci_alloc_consistent(hi->host->pdev,
+ sizeof(struct sbp2_query_logins_orb),
&scsi_id->query_logins_orb_dma);
if (!scsi_id->query_logins_orb)
goto alloc_fail;
@@ -834,7 +826,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
/* Query logins response DMA */
scsi_id->query_logins_response =
- pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_response),
+ pci_alloc_consistent(hi->host->pdev,
+ sizeof(struct sbp2_query_logins_response),
&scsi_id->query_logins_response_dma);
if (!scsi_id->query_logins_response)
goto alloc_fail;
@@ -842,7 +835,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
/* Reconnect ORB DMA */
scsi_id->reconnect_orb =
- pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_reconnect_orb),
+ pci_alloc_consistent(hi->host->pdev,
+ sizeof(struct sbp2_reconnect_orb),
&scsi_id->reconnect_orb_dma);
if (!scsi_id->reconnect_orb)
goto alloc_fail;
@@ -850,7 +844,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
/* Logout ORB DMA */
scsi_id->logout_orb =
- pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_logout_orb),
+ pci_alloc_consistent(hi->host->pdev,
+ sizeof(struct sbp2_logout_orb),
&scsi_id->logout_orb_dma);
if (!scsi_id->logout_orb)
goto alloc_fail;
@@ -858,58 +853,11 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
/* Login ORB DMA */
scsi_id->login_orb =
- pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_orb),
+ pci_alloc_consistent(hi->host->pdev,
+ sizeof(struct sbp2_login_orb),
&scsi_id->login_orb_dma);
- if (!scsi_id->login_orb) {
-alloc_fail:
- if (scsi_id->query_logins_response) {
- pci_free_consistent(hi->host->pdev,
- sizeof(struct sbp2_query_logins_response),
- scsi_id->query_logins_response,
- scsi_id->query_logins_response_dma);
- SBP2_DMA_FREE("query logins response DMA");
- }
-
- if (scsi_id->query_logins_orb) {
- pci_free_consistent(hi->host->pdev,
- sizeof(struct sbp2_query_logins_orb),
- scsi_id->query_logins_orb,
- scsi_id->query_logins_orb_dma);
- SBP2_DMA_FREE("query logins ORB DMA");
- }
-
- if (scsi_id->logout_orb) {
- pci_free_consistent(hi->host->pdev,
- sizeof(struct sbp2_logout_orb),
- scsi_id->logout_orb,
- scsi_id->logout_orb_dma);
- SBP2_DMA_FREE("logout ORB DMA");
- }
-
- if (scsi_id->reconnect_orb) {
- pci_free_consistent(hi->host->pdev,
- sizeof(struct sbp2_reconnect_orb),
- scsi_id->reconnect_orb,
- scsi_id->reconnect_orb_dma);
- SBP2_DMA_FREE("reconnect ORB DMA");
- }
-
- if (scsi_id->login_response) {
- pci_free_consistent(hi->host->pdev,
- sizeof(struct sbp2_login_response),
- scsi_id->login_response,
- scsi_id->login_response_dma);
- SBP2_DMA_FREE("login FIFO DMA");
- }
-
- list_del(&scsi_id->scsi_list);
-
- kfree(scsi_id);
-
- SBP2_ERR ("Could not allocate memory for scsi_id");
-
- return -ENOMEM;
- }
+ if (!scsi_id->login_orb)
+ goto alloc_fail;
SBP2_DMA_ALLOC("consistent DMA region for login ORB");
SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id);
@@ -935,7 +883,7 @@ alloc_fail:
sbp2_remove_device(scsi_id);
return -EINTR;
}
-
+
/*
* Login to the sbp-2 device
*/
@@ -964,10 +912,17 @@ alloc_fail:
error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
if (error) {
SBP2_ERR("scsi_add_device failed");
+ sbp2_logout_device(scsi_id);
+ sbp2_remove_device(scsi_id);
return error;
}
return 0;
+
+alloc_fail:
+ SBP2_ERR("Could not allocate memory for scsi_id");
+ sbp2_remove_device(scsi_id);
+ return -ENOMEM;
}
/*
@@ -1054,51 +1009,44 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
* This function deals with physical dma write requests (for adapters that do not support
* physical dma in hardware). Mostly just here for debugging...
*/
-static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data,
- u64 addr, size_t length, u16 flags)
+static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid,
+ int destid, quadlet_t *data, u64 addr,
+ size_t length, u16 flags)
{
- /*
- * Manually put the data in the right place.
- */
- memcpy(bus_to_virt((u32)addr), data, length);
- sbp2util_packet_dump(data, length, "sbp2 phys dma write by device", (u32)addr);
- return(RCODE_COMPLETE);
+ /*
+ * Manually put the data in the right place.
+ */
+ memcpy(bus_to_virt((u32) addr), data, length);
+ sbp2util_packet_dump(data, length, "sbp2 phys dma write by device",
+ (u32) addr);
+ return RCODE_COMPLETE;
}
/*
* This function deals with physical dma read requests (for adapters that do not support
* physical dma in hardware). Mostly just here for debugging...
*/
-static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data,
- u64 addr, size_t length, u16 flags)
+static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid,
+ quadlet_t *data, u64 addr, size_t length,
+ u16 flags)
{
- /*
- * Grab data from memory and send a read response.
- */
- memcpy(data, bus_to_virt((u32)addr), length);
- sbp2util_packet_dump(data, length, "sbp2 phys dma read by device", (u32)addr);
- return(RCODE_COMPLETE);
+ /*
+ * Grab data from memory and send a read response.
+ */
+ memcpy(data, bus_to_virt((u32) addr), length);
+ sbp2util_packet_dump(data, length, "sbp2 phys dma read by device",
+ (u32) addr);
+ return RCODE_COMPLETE;
}
#endif
-
/**************************************
* SBP-2 protocol related section
**************************************/
/*
- * This function determines if we should convert scsi commands for a particular sbp2 device type
- */
-static __inline__ int sbp2_command_conversion_device_type(u8 device_type)
-{
- return (((device_type == TYPE_DISK) ||
- (device_type == TYPE_RBC) ||
- (device_type == TYPE_ROM)) ? 1:0);
-}
-
-/*
* This function queries the device for the maximum concurrent logins it
* supports.
*/
@@ -1120,11 +1068,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
- if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
- scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
- SBP2_DEBUG("sbp2_query_logins: set lun to %d",
- ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
- }
+ scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
SBP2_DEBUG("sbp2_query_logins: lun_misc initialized");
scsi_id->query_logins_orb->reserved_resp_length =
@@ -1161,12 +1105,12 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) {
SBP2_INFO("Error querying logins to SBP-2 device - timed out");
- return(-EIO);
+ return -EIO;
}
if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) {
SBP2_INFO("Error querying logins to SBP-2 device - timed out");
- return(-EIO);
+ return -EIO;
}
if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
@@ -1174,7 +1118,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
SBP2_INFO("Error querying logins to SBP-2 device - timed out");
- return(-EIO);
+ return -EIO;
}
sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response));
@@ -1191,7 +1135,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
SBP2_DEBUG("Number of active logins: %d", active_logins);
if (active_logins >= max_logins) {
- return(-EIO);
+ return -EIO;
}
return 0;
@@ -1210,13 +1154,13 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
if (!scsi_id->login_orb) {
SBP2_DEBUG("sbp2_login_device: login_orb not alloc'd!");
- return(-EIO);
+ return -EIO;
}
if (!exclusive_login) {
if (sbp2_query_logins(scsi_id)) {
SBP2_INFO("Device does not support any more concurrent logins");
- return(-EIO);
+ return -EIO;
}
}
@@ -1233,12 +1177,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */
scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */
scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
- /* Set the lun if we were able to pull it from the device's unit directory */
- if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
- scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
- SBP2_DEBUG("sbp2_query_logins: set lun to %d",
- ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
- }
+ scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
SBP2_DEBUG("sbp2_login_device: lun_misc initialized");
scsi_id->login_orb->passwd_resp_lengths =
@@ -1288,7 +1227,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
*/
if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) {
SBP2_ERR("Error logging into SBP-2 device - login timed-out");
- return(-EIO);
+ return -EIO;
}
/*
@@ -1296,7 +1235,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
*/
if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
SBP2_ERR("Error logging into SBP-2 device - login timed-out");
- return(-EIO);
+ return -EIO;
}
/*
@@ -1307,7 +1246,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
SBP2_ERR("Error logging into SBP-2 device - login failed");
- return(-EIO);
+ return -EIO;
}
/*
@@ -1331,7 +1270,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
SBP2_INFO("Logged into SBP-2 device");
- return(0);
+ return 0;
}
@@ -1385,8 +1324,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
atomic_set(&scsi_id->sbp2_login_complete, 0);
error = hpsb_node_write(scsi_id->ne,
- scsi_id->sbp2_management_agent_addr,
- data, 8);
+ scsi_id->sbp2_management_agent_addr, data, 8);
if (error)
return error;
@@ -1396,7 +1334,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
SBP2_INFO("Logged out of SBP-2 device");
- return(0);
+ return 0;
}
@@ -1456,8 +1394,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
atomic_set(&scsi_id->sbp2_login_complete, 0);
error = hpsb_node_write(scsi_id->ne,
- scsi_id->sbp2_management_agent_addr,
- data, 8);
+ scsi_id->sbp2_management_agent_addr, data, 8);
if (error)
return error;
@@ -1466,7 +1403,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
*/
if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) {
SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
- return(-EIO);
+ return -EIO;
}
/*
@@ -1474,7 +1411,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
*/
if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
- return(-EIO);
+ return -EIO;
}
/*
@@ -1485,12 +1422,12 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
SBP2_ERR("Error reconnecting to SBP-2 device - reconnect failed");
- return(-EIO);
+ return -EIO;
}
HPSB_DEBUG("Reconnected to SBP-2 device");
- return(0);
+ return 0;
}
@@ -1513,10 +1450,9 @@ static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id)
SBP2_ERR("sbp2_set_busy_timeout error");
}
- return(0);
+ return 0;
}
-
/*
* This function is called to parse sbp2 device's config rom unit
* directory. Used to determine things like sbp2 management agent offset,
@@ -1529,7 +1465,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
struct csr1212_dentry *dentry;
u64 management_agent_addr;
u32 command_set_spec_id, command_set, unit_characteristics,
- firmware_revision, workarounds;
+ firmware_revision, workarounds;
int i;
SBP2_DEBUG("sbp2_parse_unit_directory");
@@ -1547,13 +1483,14 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) {
/* Save off the management agent address */
management_agent_addr =
- CSR1212_REGISTER_SPACE_BASE +
- (kv->value.csr_offset << 2);
+ CSR1212_REGISTER_SPACE_BASE +
+ (kv->value.csr_offset << 2);
SBP2_DEBUG("sbp2_management_agent_addr = %x",
- (unsigned int) management_agent_addr);
+ (unsigned int)management_agent_addr);
} else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
- scsi_id->sbp2_device_type_and_lun = kv->value.immediate;
+ scsi_id->sbp2_lun =
+ ORB_SET_LUN(kv->value.immediate);
}
break;
@@ -1561,14 +1498,14 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
/* Command spec organization */
command_set_spec_id = kv->value.immediate;
SBP2_DEBUG("sbp2_command_set_spec_id = %x",
- (unsigned int) command_set_spec_id);
+ (unsigned int)command_set_spec_id);
break;
case SBP2_COMMAND_SET_KEY:
/* Command set used by sbp2 device */
command_set = kv->value.immediate;
SBP2_DEBUG("sbp2_command_set = %x",
- (unsigned int) command_set);
+ (unsigned int)command_set);
break;
case SBP2_UNIT_CHARACTERISTICS_KEY:
@@ -1578,7 +1515,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
*/
unit_characteristics = kv->value.immediate;
SBP2_DEBUG("sbp2_unit_characteristics = %x",
- (unsigned int) unit_characteristics);
+ (unsigned int)unit_characteristics);
break;
case SBP2_FIRMWARE_REVISION_KEY:
@@ -1586,9 +1523,10 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
firmware_revision = kv->value.immediate;
if (force_inquiry_hack)
SBP2_INFO("sbp2_firmware_revision = %x",
- (unsigned int) firmware_revision);
- else SBP2_DEBUG("sbp2_firmware_revision = %x",
- (unsigned int) firmware_revision);
+ (unsigned int)firmware_revision);
+ else
+ SBP2_DEBUG("sbp2_firmware_revision = %x",
+ (unsigned int)firmware_revision);
break;
default:
@@ -1646,7 +1584,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
scsi_id->sbp2_firmware_revision = firmware_revision;
scsi_id->workarounds = workarounds;
if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
- scsi_id->sbp2_device_type_and_lun = ud->lun;
+ scsi_id->sbp2_lun = ORB_SET_LUN(ud->lun);
}
}
@@ -1666,8 +1604,9 @@ static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
SBP2_DEBUG("sbp2_max_speed_and_size");
/* Initial setting comes from the hosts speed map */
- scsi_id->speed_code = hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64
- + NODEID_TO_NODE(scsi_id->ne->nodeid)];
+ scsi_id->speed_code =
+ hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64 +
+ NODEID_TO_NODE(scsi_id->ne->nodeid)];
/* Bump down our speed if the user requested it */
if (scsi_id->speed_code > max_speed) {
@@ -1678,15 +1617,16 @@ static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
/* Payload size is the lesser of what our speed supports and what
* our host supports. */
- scsi_id->max_payload_size = min(sbp2_speedto_max_payload[scsi_id->speed_code],
- (u8)(hi->host->csr.max_rec - 1));
+ scsi_id->max_payload_size =
+ min(sbp2_speedto_max_payload[scsi_id->speed_code],
+ (u8) (hi->host->csr.max_rec - 1));
HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid),
hpsb_speedto_str[scsi_id->speed_code],
- 1 << ((u32)scsi_id->max_payload_size + 2));
+ 1 << ((u32) scsi_id->max_payload_size + 2));
- return(0);
+ return 0;
}
/*
@@ -1721,30 +1661,187 @@ static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
*/
scsi_id->last_orb = NULL;
- return(0);
+ return 0;
+}
+
+static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
+ struct sbp2scsi_host_info *hi,
+ struct sbp2_command_info *command,
+ unsigned int scsi_use_sg,
+ struct scatterlist *sgpnt,
+ u32 orb_direction,
+ enum dma_data_direction dma_dir)
+{
+ command->dma_dir = dma_dir;
+ orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
+ orb->misc |= ORB_SET_DIRECTION(orb_direction);
+
+ /* Special case if only one element (and less than 64KB in size) */
+ if ((scsi_use_sg == 1) &&
+ (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
+
+ SBP2_DEBUG("Only one s/g element");
+ command->dma_size = sgpnt[0].length;
+ command->dma_type = CMD_DMA_PAGE;
+ command->cmd_dma = pci_map_page(hi->host->pdev,
+ sgpnt[0].page,
+ sgpnt[0].offset,
+ command->dma_size,
+ command->dma_dir);
+ SBP2_DMA_ALLOC("single page scatter element");
+
+ orb->data_descriptor_lo = command->cmd_dma;
+ orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
+
+ } else {
+ struct sbp2_unrestricted_page_table *sg_element =
+ &command->scatter_gather_element[0];
+ u32 sg_count, sg_len;
+ dma_addr_t sg_addr;
+ int i, count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg,
+ dma_dir);
+
+ SBP2_DMA_ALLOC("scatter list");
+
+ command->dma_size = scsi_use_sg;
+ command->sge_buffer = sgpnt;
+
+ /* use page tables (s/g) */
+ orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
+ orb->data_descriptor_lo = command->sge_dma;
+
+ /*
+ * Loop through and fill out our sbp-2 page tables
+ * (and split up anything too large)
+ */
+ for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
+ sg_len = sg_dma_len(sgpnt);
+ sg_addr = sg_dma_address(sgpnt);
+ while (sg_len) {
+ sg_element[sg_count].segment_base_lo = sg_addr;
+ if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
+ sg_element[sg_count].length_segment_base_hi =
+ PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
+ sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
+ sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
+ } else {
+ sg_element[sg_count].length_segment_base_hi =
+ PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
+ sg_len = 0;
+ }
+ sg_count++;
+ }
+ }
+
+ /* Number of page table (s/g) elements */
+ orb->misc |= ORB_SET_DATA_SIZE(sg_count);
+
+ sbp2util_packet_dump(sg_element,
+ (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
+ "sbp2 s/g list", command->sge_dma);
+
+ /* Byte swap page tables if necessary */
+ sbp2util_cpu_to_be32_buffer(sg_element,
+ (sizeof(struct sbp2_unrestricted_page_table)) *
+ sg_count);
+ }
+}
+
+static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
+ struct sbp2scsi_host_info *hi,
+ struct sbp2_command_info *command,
+ struct scatterlist *sgpnt,
+ u32 orb_direction,
+ unsigned int scsi_request_bufflen,
+ void *scsi_request_buffer,
+ enum dma_data_direction dma_dir)
+{
+ command->dma_dir = dma_dir;
+ command->dma_size = scsi_request_bufflen;
+ command->dma_type = CMD_DMA_SINGLE;
+ command->cmd_dma = pci_map_single(hi->host->pdev, scsi_request_buffer,
+ command->dma_size, command->dma_dir);
+ orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
+ orb->misc |= ORB_SET_DIRECTION(orb_direction);
+
+ SBP2_DMA_ALLOC("single bulk");
+
+ /*
+ * Handle case where we get a command w/o s/g enabled (but
+ * check for transfers larger than 64K)
+ */
+ if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
+
+ orb->data_descriptor_lo = command->cmd_dma;
+ orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
+
+ } else {
+ struct sbp2_unrestricted_page_table *sg_element =
+ &command->scatter_gather_element[0];
+ u32 sg_count, sg_len;
+ dma_addr_t sg_addr;
+
+ /*
+ * Need to turn this into page tables, since the
+ * buffer is too large.
+ */
+ orb->data_descriptor_lo = command->sge_dma;
+
+ /* Use page tables (s/g) */
+ orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
+
+ /*
+ * fill out our sbp-2 page tables (and split up
+ * the large buffer)
+ */
+ sg_count = 0;
+ sg_len = scsi_request_bufflen;
+ sg_addr = command->cmd_dma;
+ while (sg_len) {
+ sg_element[sg_count].segment_base_lo = sg_addr;
+ if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
+ sg_element[sg_count].length_segment_base_hi =
+ PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
+ sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
+ sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
+ } else {
+ sg_element[sg_count].length_segment_base_hi =
+ PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
+ sg_len = 0;
+ }
+ sg_count++;
+ }
+
+ /* Number of page table (s/g) elements */
+ orb->misc |= ORB_SET_DATA_SIZE(sg_count);
+
+ sbp2util_packet_dump(sg_element,
+ (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
+ "sbp2 s/g list", command->sge_dma);
+
+ /* Byte swap page tables if necessary */
+ sbp2util_cpu_to_be32_buffer(sg_element,
+ (sizeof(struct sbp2_unrestricted_page_table)) *
+ sg_count);
+ }
}
/*
* This function is called to create the actual command orb and s/g list
* out of the scsi command itself.
*/
-static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
- struct sbp2_command_info *command,
- unchar *scsi_cmd,
- unsigned int scsi_use_sg,
- unsigned int scsi_request_bufflen,
- void *scsi_request_buffer,
- enum dma_data_direction dma_dir)
-
+static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
+ struct sbp2_command_info *command,
+ unchar *scsi_cmd,
+ unsigned int scsi_use_sg,
+ unsigned int scsi_request_bufflen,
+ void *scsi_request_buffer,
+ enum dma_data_direction dma_dir)
{
struct sbp2scsi_host_info *hi = scsi_id->hi;
- struct scatterlist *sgpnt = (struct scatterlist *) scsi_request_buffer;
+ struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer;
struct sbp2_command_orb *command_orb = &command->command_orb;
- struct sbp2_unrestricted_page_table *scatter_gather_element =
- &command->scatter_gather_element[0];
- u32 sg_count, sg_len, orb_direction;
- dma_addr_t sg_addr;
- int i;
+ u32 orb_direction;
/*
* Set-up our command ORB..
@@ -1758,222 +1855,42 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
command_orb->next_ORB_lo = 0x0;
command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size);
command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code);
- command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */
+ command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */
- /*
- * Get the direction of the transfer. If the direction is unknown, then use our
- * goofy table as a back-up.
- */
- switch (dma_dir) {
- case DMA_NONE:
- orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
- break;
- case DMA_TO_DEVICE:
- orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
- break;
- case DMA_FROM_DEVICE:
- orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
- break;
- case DMA_BIDIRECTIONAL:
- default:
- SBP2_ERR("SCSI data transfer direction not specified. "
- "Update the SBP2 direction table in sbp2.h if "
- "necessary for your application");
- __scsi_print_command(scsi_cmd);
- orb_direction = sbp2scsi_direction_table[*scsi_cmd];
- break;
+ if (dma_dir == DMA_NONE)
+ orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
+ else if (dma_dir == DMA_TO_DEVICE && scsi_request_bufflen)
+ orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
+ else if (dma_dir == DMA_FROM_DEVICE && scsi_request_bufflen)
+ orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
+ else {
+ SBP2_WARN("Falling back to DMA_NONE");
+ orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
}
- /*
- * Set-up our pagetable stuff... unfortunately, this has become
- * messier than I'd like. Need to clean this up a bit. ;-)
- */
+ /* Set-up our pagetable stuff */
if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
-
SBP2_DEBUG("No data transfer");
-
- /*
- * Handle no data transfer
- */
command_orb->data_descriptor_hi = 0x0;
command_orb->data_descriptor_lo = 0x0;
command_orb->misc |= ORB_SET_DIRECTION(1);
-
} else if (scsi_use_sg) {
-
SBP2_DEBUG("Use scatter/gather");
-
- /*
- * Special case if only one element (and less than 64KB in size)
- */
- if ((scsi_use_sg == 1) && (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
-
- SBP2_DEBUG("Only one s/g element");
- command->dma_dir = dma_dir;
- command->dma_size = sgpnt[0].length;
- command->dma_type = CMD_DMA_PAGE;
- command->cmd_dma = pci_map_page(hi->host->pdev,
- sgpnt[0].page,
- sgpnt[0].offset,
- command->dma_size,
- command->dma_dir);
- SBP2_DMA_ALLOC("single page scatter element");
-
- command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
- command_orb->data_descriptor_lo = command->cmd_dma;
- command_orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
- command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
-
- } else {
- int count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, dma_dir);
- SBP2_DMA_ALLOC("scatter list");
-
- command->dma_size = scsi_use_sg;
- command->dma_dir = dma_dir;
- command->sge_buffer = sgpnt;
-
- /* use page tables (s/g) */
- command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
- command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
- command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
- command_orb->data_descriptor_lo = command->sge_dma;
-
- /*
- * Loop through and fill out our sbp-2 page tables
- * (and split up anything too large)
- */
- for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
- sg_len = sg_dma_len(sgpnt);
- sg_addr = sg_dma_address(sgpnt);
- while (sg_len) {
- scatter_gather_element[sg_count].segment_base_lo = sg_addr;
- if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
- scatter_gather_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
- sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
- sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
- } else {
- scatter_gather_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
- sg_len = 0;
- }
- sg_count++;
- }
- }
-
- /* Number of page table (s/g) elements */
- command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
-
- sbp2util_packet_dump(scatter_gather_element,
- (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
- "sbp2 s/g list", command->sge_dma);
-
- /*
- * Byte swap page tables if necessary
- */
- sbp2util_cpu_to_be32_buffer(scatter_gather_element,
- (sizeof(struct sbp2_unrestricted_page_table)) *
- sg_count);
-
- }
-
+ sbp2_prep_command_orb_sg(command_orb, hi, command, scsi_use_sg,
+ sgpnt, orb_direction, dma_dir);
} else {
-
SBP2_DEBUG("No scatter/gather");
-
- command->dma_dir = dma_dir;
- command->dma_size = scsi_request_bufflen;
- command->dma_type = CMD_DMA_SINGLE;
- command->cmd_dma = pci_map_single (hi->host->pdev, scsi_request_buffer,
- command->dma_size,
- command->dma_dir);
- SBP2_DMA_ALLOC("single bulk");
-
- /*
- * Handle case where we get a command w/o s/g enabled (but
- * check for transfers larger than 64K)
- */
- if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
-
- command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
- command_orb->data_descriptor_lo = command->cmd_dma;
- command_orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
- command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
-
- /*
- * Sanity, in case our direction table is not
- * up-to-date
- */
- if (!scsi_request_bufflen) {
- command_orb->data_descriptor_hi = 0x0;
- command_orb->data_descriptor_lo = 0x0;
- command_orb->misc |= ORB_SET_DIRECTION(1);
- }
-
- } else {
- /*
- * Need to turn this into page tables, since the
- * buffer is too large.
- */
- command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
- command_orb->data_descriptor_lo = command->sge_dma;
-
- /* Use page tables (s/g) */
- command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
- command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
-
- /*
- * fill out our sbp-2 page tables (and split up
- * the large buffer)
- */
- sg_count = 0;
- sg_len = scsi_request_bufflen;
- sg_addr = command->cmd_dma;
- while (sg_len) {
- scatter_gather_element[sg_count].segment_base_lo = sg_addr;
- if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
- scatter_gather_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
- sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
- sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
- } else {
- scatter_gather_element[sg_count].length_segment_base_hi =
- PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
- sg_len = 0;
- }
- sg_count++;
- }
-
- /* Number of page table (s/g) elements */
- command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
-
- sbp2util_packet_dump(scatter_gather_element,
- (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
- "sbp2 s/g list", command->sge_dma);
-
- /*
- * Byte swap page tables if necessary
- */
- sbp2util_cpu_to_be32_buffer(scatter_gather_element,
- (sizeof(struct sbp2_unrestricted_page_table)) *
- sg_count);
-
- }
-
+ sbp2_prep_command_orb_no_sg(command_orb, hi, command, sgpnt,
+ orb_direction, scsi_request_bufflen,
+ scsi_request_buffer, dma_dir);
}
- /*
- * Byte swap command ORB if necessary
- */
+ /* Byte swap command ORB if necessary */
sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb));
- /*
- * Put our scsi command in the command ORB
- */
+ /* Put our scsi command in the command ORB */
memset(command_orb->cdb, 0, 12);
memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
-
- return(0);
}
/*
@@ -1989,7 +1906,7 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
outstanding_orb_incr;
SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x",
- command_orb, global_outstanding_command_orbs);
+ command_orb, global_outstanding_command_orbs);
pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma,
sizeof(struct sbp2_command_orb),
@@ -2034,10 +1951,11 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
* both by the sbp2 device and us.
*/
scsi_id->last_orb->next_ORB_lo =
- cpu_to_be32(command->command_orb_dma);
+ cpu_to_be32(command->command_orb_dma);
/* Tells hardware that this pointer is valid */
scsi_id->last_orb->next_ORB_hi = 0x0;
- pci_dma_sync_single_for_device(hi->host->pdev, scsi_id->last_orb_dma,
+ pci_dma_sync_single_for_device(hi->host->pdev,
+ scsi_id->last_orb_dma,
sizeof(struct sbp2_command_orb),
PCI_DMA_BIDIRECTIONAL);
@@ -2051,14 +1969,14 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
if (sbp2util_node_write_no_wait(ne, addr, &data, 4) < 0) {
SBP2_ERR("sbp2util_node_write_no_wait failed");
- return(-EIO);
+ return -EIO;
}
scsi_id->last_orb = command_orb;
scsi_id->last_orb_dma = command->command_orb_dma;
}
- return(0);
+ return 0;
}
/*
@@ -2085,7 +2003,7 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
*/
command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done);
if (!command) {
- return(-EIO);
+ return -EIO;
}
/*
@@ -2106,11 +2024,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg,
request_bufflen, SCpnt->request_buffer,
SCpnt->sc_data_direction);
- /*
- * Update our cdb if necessary (to handle sbp2 RBC command set
- * differences). This is where the command set hacks go! =)
- */
- sbp2_check_sbp2_command(scsi_id, command->command_orb.cdb);
sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
"sbp2 command orb", command->command_orb_dma);
@@ -2125,112 +2038,7 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
*/
sbp2_link_orb_command(scsi_id, command);
- return(0);
-}
-
-
-/*
- * This function deals with command set differences between Linux scsi
- * command set and sbp2 RBC command set.
- */
-static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd)
-{
- unchar new_cmd[16];
- u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
-
- SBP2_DEBUG("sbp2_check_sbp2_command");
-
- switch (*cmd) {
-
- case READ_6:
-
- if (sbp2_command_conversion_device_type(device_type)) {
-
- SBP2_DEBUG("Convert READ_6 to READ_10");
-
- /*
- * Need to turn read_6 into read_10
- */
- new_cmd[0] = 0x28;
- new_cmd[1] = (cmd[1] & 0xe0);
- new_cmd[2] = 0x0;
- new_cmd[3] = (cmd[1] & 0x1f);
- new_cmd[4] = cmd[2];
- new_cmd[5] = cmd[3];
- new_cmd[6] = 0x0;
- new_cmd[7] = 0x0;
- new_cmd[8] = cmd[4];
- new_cmd[9] = cmd[5];
-
- memcpy(cmd, new_cmd, 10);
-
- }
-
- break;
-
- case WRITE_6:
-
- if (sbp2_command_conversion_device_type(device_type)) {
-
- SBP2_DEBUG("Convert WRITE_6 to WRITE_10");
-
- /*
- * Need to turn write_6 into write_10
- */
- new_cmd[0] = 0x2a;
- new_cmd[1] = (cmd[1] & 0xe0);
- new_cmd[2] = 0x0;
- new_cmd[3] = (cmd[1] & 0x1f);
- new_cmd[4] = cmd[2];
- new_cmd[5] = cmd[3];
- new_cmd[6] = 0x0;
- new_cmd[7] = 0x0;
- new_cmd[8] = cmd[4];
- new_cmd[9] = cmd[5];
-
- memcpy(cmd, new_cmd, 10);
-
- }
-
- break;
-
- case MODE_SENSE:
-
- if (sbp2_command_conversion_device_type(device_type)) {
-
- SBP2_DEBUG("Convert MODE_SENSE_6 to MODE_SENSE_10");
-
- /*
- * Need to turn mode_sense_6 into mode_sense_10
- */
- new_cmd[0] = 0x5a;
- new_cmd[1] = cmd[1];
- new_cmd[2] = cmd[2];
- new_cmd[3] = 0x0;
- new_cmd[4] = 0x0;
- new_cmd[5] = 0x0;
- new_cmd[6] = 0x0;
- new_cmd[7] = 0x0;
- new_cmd[8] = cmd[4];
- new_cmd[9] = cmd[5];
-
- memcpy(cmd, new_cmd, 10);
-
- }
-
- break;
-
- case MODE_SELECT:
-
- /*
- * TODO. Probably need to change mode select to 10 byte version
- */
-
- default:
- break;
- }
-
- return;
+ return 0;
}
/*
@@ -2260,80 +2068,40 @@ static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense
sense_data[14] = sbp2_status[20];
sense_data[15] = sbp2_status[21];
- return(sbp2_status[8] & 0x3f); /* return scsi status */
+ return sbp2_status[8] & 0x3f; /* return scsi status */
}
/*
* This function is called after a command is completed, in order to do any necessary SBP-2
* response data translations for the SCSI stack
*/
-static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
+static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
struct scsi_cmnd *SCpnt)
{
u8 *scsi_buf = SCpnt->request_buffer;
- u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
SBP2_DEBUG("sbp2_check_sbp2_response");
switch (SCpnt->cmnd[0]) {
- case INQUIRY:
-
- /*
- * If scsi_id->sbp2_device_type_and_lun is uninitialized, then fill
- * this information in from the inquiry response data. Lun is set to zero.
- */
- if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
- SBP2_DEBUG("Creating sbp2_device_type_and_lun from scsi inquiry data");
- scsi_id->sbp2_device_type_and_lun = (scsi_buf[0] & 0x1f) << 16;
- }
-
- /*
- * Make sure data length is ok. Minimum length is 36 bytes
- */
- if (scsi_buf[4] == 0) {
- scsi_buf[4] = 36 - 5;
- }
-
- /*
- * Check for Simple Direct Access Device and change it to TYPE_DISK
- */
- if ((scsi_buf[0] & 0x1f) == TYPE_RBC) {
- SBP2_DEBUG("Changing TYPE_RBC to TYPE_DISK");
- scsi_buf[0] &= 0xe0;
- }
-
- /*
- * Fix ansi revision and response data format
- */
- scsi_buf[2] |= 2;
- scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
-
- break;
-
- case MODE_SENSE:
-
- if (sbp2_command_conversion_device_type(device_type)) {
-
- SBP2_DEBUG("Modify mode sense response (10 byte version)");
-
- scsi_buf[0] = scsi_buf[1]; /* Mode data length */
- scsi_buf[1] = scsi_buf[2]; /* Medium type */
- scsi_buf[2] = scsi_buf[3]; /* Device specific parameter */
- scsi_buf[3] = scsi_buf[7]; /* Block descriptor length */
- memcpy(scsi_buf + 4, scsi_buf + 8, scsi_buf[0]);
- }
-
- break;
+ case INQUIRY:
+ /*
+ * Make sure data length is ok. Minimum length is 36 bytes
+ */
+ if (scsi_buf[4] == 0) {
+ scsi_buf[4] = 36 - 5;
+ }
- case MODE_SELECT:
+ /*
+ * Fix ansi revision and response data format
+ */
+ scsi_buf[2] |= 2;
+ scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
- /*
- * TODO. Probably need to change mode select to 10 byte version
- */
+ break;
- default:
- break;
+ default:
+ break;
}
return;
}
@@ -2358,14 +2126,14 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
if (!host) {
SBP2_ERR("host is NULL - this is bad!");
- return(RCODE_ADDRESS_ERROR);
+ return RCODE_ADDRESS_ERROR;
}
hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
if (!hi) {
SBP2_ERR("host info is NULL - this is bad!");
- return(RCODE_ADDRESS_ERROR);
+ return RCODE_ADDRESS_ERROR;
}
/*
@@ -2382,7 +2150,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
if (!scsi_id) {
SBP2_ERR("scsi_id is NULL - device is gone?");
- return(RCODE_ADDRESS_ERROR);
+ return RCODE_ADDRESS_ERROR;
}
/*
@@ -2480,10 +2248,9 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
SBP2_ORB_DEBUG("command orb completed");
}
- return(RCODE_COMPLETE);
+ return RCODE_COMPLETE;
}
-
/**************************************
* SCSI interface related section
**************************************/
@@ -2541,6 +2308,16 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
}
/*
+ * Bidirectional commands are not yet implemented,
+ * and unknown transfer direction not handled.
+ */
+ if (SCpnt->sc_data_direction == DMA_BIDIRECTIONAL) {
+ SBP2_ERR("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
+ result = DID_ERROR << 16;
+ goto done;
+ }
+
+ /*
* Try and send our SCSI command
*/
if (sbp2_send_command(scsi_id, SCpnt, done)) {
@@ -2616,55 +2393,56 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
* complete the command, just let it get retried at the end of the
* bus reset.
*/
- if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
+ if (!hpsb_node_entry_valid(scsi_id->ne)
+ && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
SBP2_ERR("Bus reset in progress - retry command later");
return;
}
-
+
/*
* Switch on scsi status
*/
switch (scsi_status) {
- case SBP2_SCSI_STATUS_GOOD:
- SCpnt->result = DID_OK;
- break;
+ case SBP2_SCSI_STATUS_GOOD:
+ SCpnt->result = DID_OK;
+ break;
- case SBP2_SCSI_STATUS_BUSY:
- SBP2_ERR("SBP2_SCSI_STATUS_BUSY");
- SCpnt->result = DID_BUS_BUSY << 16;
- break;
+ case SBP2_SCSI_STATUS_BUSY:
+ SBP2_ERR("SBP2_SCSI_STATUS_BUSY");
+ SCpnt->result = DID_BUS_BUSY << 16;
+ break;
- case SBP2_SCSI_STATUS_CHECK_CONDITION:
- SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION");
- SCpnt->result = CHECK_CONDITION << 1;
+ case SBP2_SCSI_STATUS_CHECK_CONDITION:
+ SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION");
+ SCpnt->result = CHECK_CONDITION << 1;
- /*
- * Debug stuff
- */
+ /*
+ * Debug stuff
+ */
#if CONFIG_IEEE1394_SBP2_DEBUG >= 1
- scsi_print_command(SCpnt);
- scsi_print_sense("bh", SCpnt);
+ scsi_print_command(SCpnt);
+ scsi_print_sense("bh", SCpnt);
#endif
- break;
+ break;
- case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
- SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT");
- SCpnt->result = DID_NO_CONNECT << 16;
- scsi_print_command(SCpnt);
- break;
+ case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
+ SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT");
+ SCpnt->result = DID_NO_CONNECT << 16;
+ scsi_print_command(SCpnt);
+ break;
- case SBP2_SCSI_STATUS_CONDITION_MET:
- case SBP2_SCSI_STATUS_RESERVATION_CONFLICT:
- case SBP2_SCSI_STATUS_COMMAND_TERMINATED:
- SBP2_ERR("Bad SCSI status = %x", scsi_status);
- SCpnt->result = DID_ERROR << 16;
- scsi_print_command(SCpnt);
- break;
+ case SBP2_SCSI_STATUS_CONDITION_MET:
+ case SBP2_SCSI_STATUS_RESERVATION_CONFLICT:
+ case SBP2_SCSI_STATUS_COMMAND_TERMINATED:
+ SBP2_ERR("Bad SCSI status = %x", scsi_status);
+ SCpnt->result = DID_ERROR << 16;
+ scsi_print_command(SCpnt);
+ break;
- default:
- SBP2_ERR("Unsupported SCSI status = %x", scsi_status);
- SCpnt->result = DID_ERROR << 16;
+ default:
+ SBP2_ERR("Unsupported SCSI status = %x", scsi_status);
+ SCpnt->result = DID_ERROR << 16;
}
/*
@@ -2678,7 +2456,8 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
* If a bus reset is in progress and there was an error, complete
* the command as busy so that it will get retried.
*/
- if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
+ if (!hpsb_node_entry_valid(scsi_id->ne)
+ && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
SBP2_ERR("Completing command with busy (bus reset)");
SCpnt->result = DID_BUS_BUSY << 16;
}
@@ -2699,31 +2478,29 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
/*
* Tell scsi stack that we're done with this command
*/
- done (SCpnt);
+ done(SCpnt);
}
-
static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
{
((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev;
return 0;
}
-
static int sbp2scsi_slave_configure(struct scsi_device *sdev)
{
blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
+ sdev->use_10_for_rw = 1;
+ sdev->use_10_for_ms = 1;
return 0;
}
-
static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
{
((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL;
return;
}
-
/*
* Called by scsi stack when something has really gone wrong. Usually
* called when a command has timed-out for some reason.
@@ -2769,7 +2546,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
}
- return(SUCCESS);
+ return SUCCESS;
}
/*
@@ -2779,28 +2556,20 @@ static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
{
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
- unsigned long flags;
SBP2_ERR("reset requested");
- spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
-
if (sbp2util_node_is_available(scsi_id)) {
SBP2_ERR("Generating sbp2 fetch agent reset");
sbp2_agent_reset(scsi_id, 0);
}
- spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
-
return SUCCESS;
}
-static const char *sbp2scsi_info (struct Scsi_Host *host)
-{
- return "SCSI emulation for IEEE-1394 SBP-2 Devices";
-}
-
-static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct scsi_device *sdev;
struct scsi_id_instance_data *scsi_id;
@@ -2812,10 +2581,7 @@ static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_att
if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0]))
return 0;
- if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED)
- lun = 0;
- else
- lun = ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
+ lun = ORB_SET_LUN(scsi_id->sbp2_lun);
return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid,
scsi_id->ud->id, lun);
@@ -2837,12 +2603,9 @@ static struct scsi_host_template scsi_driver_template = {
.module = THIS_MODULE,
.name = "SBP-2 IEEE-1394",
.proc_name = SBP2_DEVICE_NAME,
- .info = sbp2scsi_info,
.queuecommand = sbp2scsi_queuecommand,
.eh_abort_handler = sbp2scsi_abort,
.eh_device_reset_handler = sbp2scsi_reset,
- .eh_bus_reset_handler = sbp2scsi_reset,
- .eh_host_reset_handler = sbp2scsi_reset,
.slave_alloc = sbp2scsi_slave_alloc,
.slave_configure = sbp2scsi_slave_configure,
.slave_destroy = sbp2scsi_slave_destroy,
@@ -2861,8 +2624,6 @@ static int sbp2_module_init(void)
SBP2_DEBUG("sbp2_module_init");
- printk(KERN_INFO "sbp2: %s\n", version);
-
/* Module load debug option to force one command at a time (serializing I/O) */
if (serialize_io) {
SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)");
@@ -2874,7 +2635,6 @@ static int sbp2_module_init(void)
/* Set max sectors (module load option). Default is 255 sectors. */
scsi_driver_template.max_sectors = max_sectors;
-
/* Register our high level driver with 1394 stack */
hpsb_register_highlevel(&sbp2_highlevel);
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index cd425be7484..900ea1d25e7 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -119,8 +119,8 @@ struct sbp2_query_logins_response {
struct sbp2_reconnect_orb {
u32 reserved1;
u32 reserved2;
- u32 reserved3;
- u32 reserved4;
+ u32 reserved3;
+ u32 reserved4;
u32 login_ID_misc;
u32 reserved5;
u32 status_FIFO_hi;
@@ -130,8 +130,8 @@ struct sbp2_reconnect_orb {
struct sbp2_logout_orb {
u32 reserved1;
u32 reserved2;
- u32 reserved3;
- u32 reserved4;
+ u32 reserved3;
+ u32 reserved4;
u32 login_ID_misc;
u32 reserved5;
u32 status_FIFO_hi;
@@ -188,7 +188,7 @@ struct sbp2_unrestricted_page_table {
struct sbp2_status_block {
u32 ORB_offset_hi_misc;
u32 ORB_offset_lo;
- u8 command_set_dependent[24];
+ u8 command_set_dependent[24];
};
/*
@@ -211,7 +211,7 @@ struct sbp2_status_block {
* specified for write posting, where the ohci controller will
* automatically send an ack_complete when the status is written by the
* sbp2 device... saving a split transaction. =)
- */
+ */
#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL
#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe
#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0
@@ -229,9 +229,6 @@ struct sbp2_status_block {
#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14
#define SBP2_FIRMWARE_REVISION_KEY 0x3c
-#define SBP2_DEVICE_TYPE(q) (((q) >> 16) & 0x1f)
-#define SBP2_DEVICE_LUN(q) ((q) & 0xffff)
-
#define SBP2_AGENT_STATE_OFFSET 0x00ULL
#define SBP2_AGENT_RESET_OFFSET 0x04ULL
#define SBP2_ORB_POINTER_OFFSET 0x08ULL
@@ -256,8 +253,6 @@ struct sbp2_status_block {
*/
#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
-#define SBP2_DEVICE_TYPE_LUN_UNINITIALIZED 0xffffffff
-
/*
* SCSI specific stuff
*/
@@ -265,45 +260,7 @@ struct sbp2_status_block {
#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
#define SBP2_MAX_UDS_PER_NODE 16 /* Maximum scsi devices per node */
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
-
-/*
- * SCSI direction table...
- * (now used as a back-up in case the direction passed down from above is "unknown")
- *
- * DIN = IN data direction
- * DOU = OUT data direction
- * DNO = No data transfer
- * DUN = Unknown data direction
- *
- * Opcode 0xec (Teac specific "opc execute") possibly should be DNO,
- * but we'll change it when somebody reports a problem with this.
- */
-#define DIN ORB_DIRECTION_READ_FROM_MEDIA
-#define DOU ORB_DIRECTION_WRITE_TO_MEDIA
-#define DNO ORB_DIRECTION_NO_DATA_TRANSFER
-#define DUN DIN
-
-static unchar sbp2scsi_direction_table[0x100] = {
- DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
- DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
- DIN,DUN,DIN,DIN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
- DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
- DOU,DOU,DIN,DIN,DIN,DNO,DIN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DNO,DUN,
- DUN,DIN,DIN,DNO,DNO,DOU,DUN,DUN,DNO,DIN,DIN,DNO,DIN,DOU,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DNO,DOU,DOU,DIN,DNO,DNO,DNO,DIN,DNO,DOU,DUN,DNO,DIN,DOU,DOU,
- DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DIN,DNO,DNO,DNO,DIN,DIN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
-};
-
-/* This should be safe */
-#define SBP2_MAX_CMDS 8
+#define SBP2_MAX_CMDS 8 /* This should be safe */
/* This is the two dma types we use for cmd_dma below */
enum cmd_dma_types {
@@ -338,10 +295,8 @@ struct sbp2_command_info {
#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1
#define SBP2_BREAKAGE_INQUIRY_HACK 0x2
-
struct sbp2scsi_host_info;
-
/*
* Information needed on a per scsi id basis (one for each sbp2 device)
*/
@@ -379,7 +334,7 @@ struct scsi_id_instance_data {
u32 sbp2_command_set_spec_id;
u32 sbp2_command_set;
u32 sbp2_unit_characteristics;
- u32 sbp2_device_type_and_lun;
+ u32 sbp2_lun;
u32 sbp2_firmware_revision;
/*
@@ -411,7 +366,6 @@ struct scsi_id_instance_data {
u32 workarounds;
};
-
/* Sbp2 host data structure (one per IEEE1394 host) */
struct sbp2scsi_host_info {
struct hpsb_host *host; /* IEEE1394 host */
@@ -456,20 +410,12 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id);
static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
quadlet_t *data, u64 addr, size_t length, u16 flags);
static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait);
-static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
- struct sbp2_command_info *command,
- unchar *scsi_cmd,
- unsigned int scsi_use_sg,
- unsigned int scsi_request_bufflen,
- void *scsi_request_buffer,
- enum dma_data_direction dma_dir);
static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
struct sbp2_command_info *command);
static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *));
static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data);
-static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd);
static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
struct scsi_cmnd *SCpnt);
static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 23911da5015..39fb88309e8 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -19,12 +19,6 @@
*
* NOTES:
*
- * jds -- add private data to file to keep track of iso contexts associated
- * with each open -- so release won't kill all iso transfers.
- *
- * Damien Douxchamps: Fix failure when the number of DMA pages per frame is
- * one.
- *
* ioctl return codes:
* EFAULT is only for invalid address for the argp
* EINVAL for out of range values
@@ -34,12 +28,6 @@
* ENOTTY for unsupported ioctl request
*
*/
-
-/* Markus Tavenrath <speedygoo@speedygoo.de> :
- - fixed checks for valid buffer-numbers in video1394_icotl
- - changed the ways the dma prg's are used, now it's possible to use
- even a single dma buffer
-*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -60,7 +48,6 @@
#include <linux/vmalloc.h>
#include <linux/timex.h>
#include <linux/mm.h>
-#include <linux/ioctl32.h>
#include <linux/compat.h>
#include <linux/cdev.h>
@@ -77,14 +64,6 @@
#define ISO_CHANNELS 64
-#ifndef virt_to_page
-#define virt_to_page(x) MAP_NR(x)
-#endif
-
-#ifndef vmalloc_32
-#define vmalloc_32(x) vmalloc(x)
-#endif
-
struct it_dma_prg {
struct dma_cmd begin;
quadlet_t data[4];
@@ -206,14 +185,12 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
struct dma_iso_ctx *d;
int i;
- d = kmalloc(sizeof(struct dma_iso_ctx), GFP_KERNEL);
- if (d == NULL) {
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma_iso_ctx");
return NULL;
}
- memset(d, 0, sizeof *d);
-
d->ohci = ohci;
d->type = type;
d->channel = channel;
@@ -251,9 +228,8 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
}
d->ctx = d->iso_tasklet.context;
- d->prg_reg = kmalloc(d->num_desc * sizeof(struct dma_prog_region),
- GFP_KERNEL);
- if (d->prg_reg == NULL) {
+ d->prg_reg = kmalloc(d->num_desc * sizeof(*d->prg_reg), GFP_KERNEL);
+ if (!d->prg_reg) {
PRINT(KERN_ERR, ohci->host->id, "Failed to allocate ir prg regs");
free_dma_iso_ctx(d);
return NULL;
@@ -268,15 +244,14 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx;
d->ctxMatch = OHCI1394_IsoRcvContextMatch+32*d->ctx;
- d->ir_prg = kmalloc(d->num_desc * sizeof(struct dma_cmd *),
+ d->ir_prg = kzalloc(d->num_desc * sizeof(*d->ir_prg),
GFP_KERNEL);
- if (d->ir_prg == NULL) {
+ if (!d->ir_prg) {
PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg");
free_dma_iso_ctx(d);
return NULL;
}
- memset(d->ir_prg, 0, d->num_desc * sizeof(struct dma_cmd *));
d->nb_cmd = d->buf_size / PAGE_SIZE + 1;
d->left_size = (d->frame_size % PAGE_SIZE) ?
@@ -297,16 +272,15 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx;
d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx;
- d->it_prg = kmalloc(d->num_desc * sizeof(struct it_dma_prg *),
+ d->it_prg = kzalloc(d->num_desc * sizeof(*d->it_prg),
GFP_KERNEL);
- if (d->it_prg == NULL) {
+ if (!d->it_prg) {
PRINT(KERN_ERR, ohci->host->id,
"Failed to allocate dma it prg");
free_dma_iso_ctx(d);
return NULL;
}
- memset(d->it_prg, 0, d->num_desc*sizeof(struct it_dma_prg *));
d->packet_size = packet_size;
@@ -337,47 +311,24 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
}
}
- d->buffer_status = kmalloc(d->num_desc * sizeof(unsigned int),
- GFP_KERNEL);
- d->buffer_prg_assignment = kmalloc(d->num_desc * sizeof(unsigned int),
- GFP_KERNEL);
- d->buffer_time = kmalloc(d->num_desc * sizeof(struct timeval),
- GFP_KERNEL);
- d->last_used_cmd = kmalloc(d->num_desc * sizeof(unsigned int),
- GFP_KERNEL);
- d->next_buffer = kmalloc(d->num_desc * sizeof(int),
- GFP_KERNEL);
-
- if (d->buffer_status == NULL) {
- PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_status");
- free_dma_iso_ctx(d);
- return NULL;
- }
- if (d->buffer_prg_assignment == NULL) {
- PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_prg_assignment");
- free_dma_iso_ctx(d);
- return NULL;
- }
- if (d->buffer_time == NULL) {
- PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_time");
- free_dma_iso_ctx(d);
- return NULL;
- }
- if (d->last_used_cmd == NULL) {
- PRINT(KERN_ERR, ohci->host->id, "Failed to allocate last_used_cmd");
- free_dma_iso_ctx(d);
- return NULL;
- }
- if (d->next_buffer == NULL) {
- PRINT(KERN_ERR, ohci->host->id, "Failed to allocate next_buffer");
+ d->buffer_status =
+ kzalloc(d->num_desc * sizeof(*d->buffer_status), GFP_KERNEL);
+ d->buffer_prg_assignment =
+ kzalloc(d->num_desc * sizeof(*d->buffer_prg_assignment), GFP_KERNEL);
+ d->buffer_time =
+ kzalloc(d->num_desc * sizeof(*d->buffer_time), GFP_KERNEL);
+ d->last_used_cmd =
+ kzalloc(d->num_desc * sizeof(*d->last_used_cmd), GFP_KERNEL);
+ d->next_buffer =
+ kzalloc(d->num_desc * sizeof(*d->next_buffer), GFP_KERNEL);
+
+ if (!d->buffer_status || !d->buffer_prg_assignment || !d->buffer_time ||
+ !d->last_used_cmd || !d->next_buffer) {
+ PRINT(KERN_ERR, ohci->host->id,
+ "Failed to allocate dma_iso_ctx member");
free_dma_iso_ctx(d);
return NULL;
}
- memset(d->buffer_status, 0, d->num_desc * sizeof(unsigned int));
- memset(d->buffer_prg_assignment, 0, d->num_desc * sizeof(unsigned int));
- memset(d->buffer_time, 0, d->num_desc * sizeof(struct timeval));
- memset(d->last_used_cmd, 0, d->num_desc * sizeof(unsigned int));
- memset(d->next_buffer, -1, d->num_desc * sizeof(int));
spin_lock_init(&d->lock);
@@ -539,7 +490,7 @@ static void wakeup_dma_ir_ctx(unsigned long l)
if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) {
reset_ir_status(d, i);
d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY;
- do_gettimeofday(&d->buffer_time[i]);
+ do_gettimeofday(&d->buffer_time[d->buffer_prg_assignment[i]]);
}
}
@@ -1046,7 +997,6 @@ static int __video1394_ioctl(struct file *file,
/* set time of buffer */
v.filltime = d->buffer_time[v.buffer];
-// printk("Buffer %d time %d\n", v.buffer, (d->buffer_time[v.buffer]).tv_usec);
/*
* Look ahead to see how many more buffers have been received
@@ -1085,7 +1035,7 @@ static int __video1394_ioctl(struct file *file,
}
if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
- int buf_size = d->nb_cmd * sizeof(unsigned int);
+ int buf_size = d->nb_cmd * sizeof(*psizes);
struct video1394_queue_variable __user *p = argp;
unsigned int __user *qv;
@@ -1104,7 +1054,7 @@ static int __video1394_ioctl(struct file *file,
spin_lock_irqsave(&d->lock,flags);
- // last_buffer is last_prg
+ /* last_buffer is last_prg */
next_prg = (d->last_buffer + 1) % d->num_desc;
if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) {
PRINT(KERN_ERR, ohci->host->id,
@@ -1251,13 +1201,12 @@ static int video1394_open(struct inode *inode, struct file *file)
if (ohci == NULL)
return -EIO;
- ctx = kmalloc(sizeof(struct file_ctx), GFP_KERNEL);
- if (ctx == NULL) {
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
PRINT(KERN_ERR, ohci->host->id, "Cannot malloc file_ctx");
return -ENOMEM;
}
- memset(ctx, 0, sizeof(struct file_ctx));
ctx->ohci = ohci;
INIT_LIST_HEAD(&ctx->context_list);
ctx->current_ctx = NULL;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 02110e00d14..c06b18102b6 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -308,10 +308,11 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
int ret;
+ static int next_id;
do {
spin_lock_irqsave(&cm.lock, flags);
- ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
+ ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++,
(__force int *) &cm_id_priv->id.local_id);
spin_unlock_irqrestore(&cm.lock, flags);
} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
@@ -684,6 +685,13 @@ retest:
cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
break;
case IB_CM_REQ_SENT:
+ ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
+ &cm_id_priv->av.port->cm_dev->ca_guid,
+ sizeof cm_id_priv->av.port->cm_dev->ca_guid,
+ NULL, 0);
+ break;
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
@@ -694,10 +702,8 @@ retest:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
- &cm_id_priv->av.port->cm_dev->ca_guid,
- sizeof cm_id_priv->av.port->cm_dev->ca_guid,
- NULL, 0);
+ ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
+ NULL, 0, NULL, 0);
break;
case IB_CM_ESTABLISHED:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -3157,22 +3163,6 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
}
EXPORT_SYMBOL(ib_cm_init_qp_attr);
-static __be64 cm_get_ca_guid(struct ib_device *device)
-{
- struct ib_device_attr *device_attr;
- __be64 guid;
- int ret;
-
- device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
- if (!device_attr)
- return 0;
-
- ret = ib_query_device(device, device_attr);
- guid = ret ? 0 : device_attr->node_guid;
- kfree(device_attr);
- return guid;
-}
-
static void cm_add_one(struct ib_device *device)
{
struct cm_device *cm_dev;
@@ -3194,9 +3184,7 @@ static void cm_add_one(struct ib_device *device)
return;
cm_dev->device = device;
- cm_dev->ca_guid = cm_get_ca_guid(device);
- if (!cm_dev->ca_guid)
- goto error1;
+ cm_dev->ca_guid = device->node_guid;
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
for (i = 1; i <= device->phys_port_cnt; i++) {
@@ -3211,11 +3199,11 @@ static void cm_add_one(struct ib_device *device)
cm_recv_handler,
port);
if (IS_ERR(port->mad_agent))
- goto error2;
+ goto error1;
ret = ib_modify_port(device, i, 0, &port_modify);
if (ret)
- goto error3;
+ goto error2;
}
ib_set_client_data(device, &cm_client, cm_dev);
@@ -3224,9 +3212,9 @@ static void cm_add_one(struct ib_device *device)
write_unlock_irqrestore(&cm.device_lock, flags);
return;
-error3:
- ib_unregister_mad_agent(port->mad_agent);
error2:
+ ib_unregister_mad_agent(port->mad_agent);
+error1:
port_modify.set_port_cap_mask = 0;
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
while (--i) {
@@ -3234,7 +3222,6 @@ error2:
ib_modify_port(device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
}
-error1:
kfree(cm_dev);
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index e169e798354..b2f3cb91d9b 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -38,8 +38,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
-
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include "core_priv.h"
@@ -57,13 +56,13 @@ static LIST_HEAD(device_list);
static LIST_HEAD(client_list);
/*
- * device_sem protects access to both device_list and client_list.
+ * device_mutex protects access to both device_list and client_list.
* There's no real point to using multiple locks or something fancier
* like an rwsem: we always access both lists, and we're always
* modifying one list or the other list. In any case this is not a
* hot path so there's no point in trying to optimize.
*/
-static DECLARE_MUTEX(device_sem);
+static DEFINE_MUTEX(device_mutex);
static int ib_device_check_mandatory(struct ib_device *device)
{
@@ -221,7 +220,7 @@ int ib_register_device(struct ib_device *device)
{
int ret;
- down(&device_sem);
+ mutex_lock(&device_mutex);
if (strchr(device->name, '%')) {
ret = alloc_name(device->name);
@@ -259,7 +258,7 @@ int ib_register_device(struct ib_device *device)
}
out:
- up(&device_sem);
+ mutex_unlock(&device_mutex);
return ret;
}
EXPORT_SYMBOL(ib_register_device);
@@ -276,7 +275,7 @@ void ib_unregister_device(struct ib_device *device)
struct ib_client_data *context, *tmp;
unsigned long flags;
- down(&device_sem);
+ mutex_lock(&device_mutex);
list_for_each_entry_reverse(client, &client_list, list)
if (client->remove)
@@ -284,7 +283,7 @@ void ib_unregister_device(struct ib_device *device)
list_del(&device->core_list);
- up(&device_sem);
+ mutex_unlock(&device_mutex);
spin_lock_irqsave(&device->client_data_lock, flags);
list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
@@ -312,14 +311,14 @@ int ib_register_client(struct ib_client *client)
{
struct ib_device *device;
- down(&device_sem);
+ mutex_lock(&device_mutex);
list_add_tail(&client->list, &client_list);
list_for_each_entry(device, &device_list, core_list)
if (client->add && !add_client_context(device, client))
client->add(device);
- up(&device_sem);
+ mutex_unlock(&device_mutex);
return 0;
}
@@ -339,7 +338,7 @@ void ib_unregister_client(struct ib_client *client)
struct ib_device *device;
unsigned long flags;
- down(&device_sem);
+ mutex_lock(&device_mutex);
list_for_each_entry(device, &device_list, core_list) {
if (client->remove)
@@ -355,7 +354,7 @@ void ib_unregister_client(struct ib_client *client)
}
list_del(&client->list);
- up(&device_sem);
+ mutex_unlock(&device_mutex);
}
EXPORT_SYMBOL(ib_unregister_client);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 1f1743c5c9a..5982d687a00 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -445,13 +445,7 @@ static int ib_device_uevent(struct class_device *cdev, char **envp,
return -ENOMEM;
/*
- * It might be nice to pass the node GUID with the event, but
- * right now the only way to get it is to query the device
- * provider, and this can crash during device removal because
- * we are will be running after driver removal has started.
- * We could add a node_guid field to struct ib_device, or we
- * could just let userspace read the node GUID from sysfs when
- * devices are added.
+ * It would be nice to pass the node GUID with the event...
*/
envp[i] = NULL;
@@ -623,21 +617,15 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
static ssize_t show_node_guid(struct class_device *cdev, char *buf)
{
struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
- struct ib_device_attr attr;
- ssize_t ret;
if (!ibdev_is_alive(dev))
return -ENODEV;
- ret = ib_query_device(dev, &attr);
- if (ret)
- return ret;
-
return sprintf(buf, "%04x:%04x:%04x:%04x\n",
- be16_to_cpu(((__be16 *) &attr.node_guid)[0]),
- be16_to_cpu(((__be16 *) &attr.node_guid)[1]),
- be16_to_cpu(((__be16 *) &attr.node_guid)[2]),
- be16_to_cpu(((__be16 *) &attr.node_guid)[3]));
+ be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
+ be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
+ be16_to_cpu(((__be16 *) &dev->node_guid)[2]),
+ be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
}
static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 6e15787d1de..e95c4293a49 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -42,6 +42,7 @@
#include <linux/mount.h>
#include <linux/cdev.h>
#include <linux/idr.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
@@ -113,7 +114,7 @@ static struct ib_client ucm_client = {
.remove = ib_ucm_remove_one
};
-static DECLARE_MUTEX(ctx_id_mutex);
+static DEFINE_MUTEX(ctx_id_mutex);
static DEFINE_IDR(ctx_id_table);
static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES);
@@ -121,7 +122,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
{
struct ib_ucm_context *ctx;
- down(&ctx_id_mutex);
+ mutex_lock(&ctx_id_mutex);
ctx = idr_find(&ctx_id_table, id);
if (!ctx)
ctx = ERR_PTR(-ENOENT);
@@ -129,7 +130,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
ctx = ERR_PTR(-EINVAL);
else
atomic_inc(&ctx->ref);
- up(&ctx_id_mutex);
+ mutex_unlock(&ctx_id_mutex);
return ctx;
}
@@ -186,9 +187,9 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
if (!result)
goto error;
- down(&ctx_id_mutex);
+ mutex_lock(&ctx_id_mutex);
result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
- up(&ctx_id_mutex);
+ mutex_unlock(&ctx_id_mutex);
} while (result == -EAGAIN);
if (result)
@@ -550,9 +551,9 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
err2:
ib_destroy_cm_id(ctx->cm_id);
err1:
- down(&ctx_id_mutex);
+ mutex_lock(&ctx_id_mutex);
idr_remove(&ctx_id_table, ctx->id);
- up(&ctx_id_mutex);
+ mutex_unlock(&ctx_id_mutex);
kfree(ctx);
return result;
}
@@ -572,7 +573,7 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- down(&ctx_id_mutex);
+ mutex_lock(&ctx_id_mutex);
ctx = idr_find(&ctx_id_table, cmd.id);
if (!ctx)
ctx = ERR_PTR(-ENOENT);
@@ -580,7 +581,7 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
ctx = ERR_PTR(-EINVAL);
else
idr_remove(&ctx_id_table, ctx->id);
- up(&ctx_id_mutex);
+ mutex_unlock(&ctx_id_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
@@ -1280,9 +1281,9 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
struct ib_ucm_context, file_list);
up(&file->mutex);
- down(&ctx_id_mutex);
+ mutex_lock(&ctx_id_mutex);
idr_remove(&ctx_id_table, ctx->id);
- up(&ctx_id_mutex);
+ mutex_unlock(&ctx_id_mutex);
ib_destroy_cm_id(ctx->cm_id);
ib_ucm_cleanup_events(ctx);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index eb7f52537cc..c908de8db5a 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -197,8 +197,8 @@ static void send_handler(struct ib_mad_agent *agent,
memcpy(timeout->mad.data, packet->mad.data,
sizeof (struct ib_mad_hdr));
- if (!queue_packet(file, agent, timeout))
- return;
+ if (queue_packet(file, agent, timeout))
+ kfree(timeout);
}
out:
kfree(packet);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 7114e3fbab0..f7eecbc6af6 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -41,6 +41,7 @@
#include <linux/kref.h>
#include <linux/idr.h>
+#include <linux/mutex.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
@@ -88,7 +89,7 @@ struct ib_uverbs_event_file {
struct ib_uverbs_file {
struct kref ref;
- struct semaphore mutex;
+ struct mutex mutex;
struct ib_uverbs_device *device;
struct ib_ucontext *ucontext;
struct ib_event_handler event_handler;
@@ -131,7 +132,7 @@ struct ib_ucq_object {
u32 async_events_reported;
};
-extern struct semaphore ib_uverbs_idr_mutex;
+extern struct mutex ib_uverbs_idr_mutex;
extern struct idr ib_uverbs_pd_idr;
extern struct idr ib_uverbs_mr_idr;
extern struct idr ib_uverbs_mw_idr;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a57d021d435..407b6284d7d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -67,7 +67,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- down(&file->mutex);
+ mutex_lock(&file->mutex);
if (file->ucontext) {
ret = -EINVAL;
@@ -119,7 +119,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
fd_install(resp.async_fd, filp);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
return in_len;
@@ -131,7 +131,7 @@ err_free:
ibdev->dealloc_ucontext(ucontext);
err:
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
return ret;
}
@@ -157,7 +157,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
memset(&resp, 0, sizeof resp);
resp.fw_ver = attr.fw_ver;
- resp.node_guid = attr.node_guid;
+ resp.node_guid = file->device->ib_dev->node_guid;
resp.sys_image_guid = attr.sys_image_guid;
resp.max_mr_size = attr.max_mr_size;
resp.page_size_cap = attr.page_size_cap;
@@ -290,7 +290,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
pd->uobject = uobj;
atomic_set(&pd->usecnt, 0);
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
retry:
if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) {
@@ -314,11 +314,11 @@ retry:
goto err_idr;
}
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_add_tail(&uobj->list, &file->ucontext->pd_list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return in_len;
@@ -326,7 +326,7 @@ err_idr:
idr_remove(&ib_uverbs_pd_idr, uobj->id);
err_up:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
ib_dealloc_pd(pd);
err:
@@ -346,7 +346,7 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
if (!pd || pd->uobject->context != file->ucontext)
@@ -360,14 +360,14 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_del(&uobj->list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
kfree(uobj);
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return ret ? ret : in_len;
}
@@ -426,7 +426,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
obj->umem.virt_base = cmd.hca_va;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
if (!pd || pd->uobject->context != file->ucontext) {
@@ -476,11 +476,11 @@ retry:
goto err_idr;
}
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return in_len;
@@ -489,9 +489,10 @@ err_idr:
err_unreg:
ib_dereg_mr(mr);
+ atomic_dec(&pd->usecnt);
err_up:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
ib_umem_release(file->device->ib_dev, &obj->umem);
@@ -512,7 +513,7 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle);
if (!mr || mr->uobject->context != file->ucontext)
@@ -526,15 +527,15 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_del(&memobj->uobject.list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
ib_umem_release(file->device->ib_dev, &memobj->umem);
kfree(memobj);
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return ret ? ret : in_len;
}
@@ -593,13 +594,18 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
if (cmd.comp_vector >= file->device->num_comp_vectors)
return -EINVAL;
- if (cmd.comp_channel >= 0)
- ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
-
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
if (!uobj)
return -ENOMEM;
+ if (cmd.comp_channel >= 0) {
+ ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
+ if (!ev_file) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
uobj->uobject.user_handle = cmd.user_handle;
uobj->uobject.context = file->ucontext;
uobj->uverbs_file = file;
@@ -622,7 +628,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
cq->cq_context = ev_file;
atomic_set(&cq->usecnt, 0);
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
retry:
if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) {
@@ -647,11 +653,11 @@ retry:
goto err_idr;
}
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return in_len;
@@ -659,10 +665,12 @@ err_idr:
idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
err_up:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
ib_destroy_cq(cq);
err:
+ if (ev_file)
+ ib_uverbs_release_ucq(file, ev_file, uobj);
kfree(uobj);
return ret;
}
@@ -693,7 +701,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
goto out_wc;
}
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
if (!cq || cq->uobject->context != file->ucontext) {
ret = -EINVAL;
@@ -723,7 +731,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
ret = -EFAULT;
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
kfree(resp);
out_wc:
@@ -742,14 +750,14 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
if (cq && cq->uobject->context == file->ucontext) {
ib_req_notify_cq(cq, cmd.solicited_only ?
IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
ret = in_len;
}
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return ret;
}
@@ -771,7 +779,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
memset(&resp, 0, sizeof resp);
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
if (!cq || cq->uobject->context != file->ucontext)
@@ -787,9 +795,9 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_del(&uobj->uobject.list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
ib_uverbs_release_ucq(file, ev_file, uobj);
@@ -803,7 +811,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
ret = -EFAULT;
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return ret ? ret : in_len;
}
@@ -837,7 +845,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
if (!uobj)
return -ENOMEM;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
@@ -922,11 +930,11 @@ retry:
goto err_idr;
}
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return in_len;
@@ -935,9 +943,14 @@ err_idr:
err_destroy:
ib_destroy_qp(qp);
+ atomic_dec(&pd->usecnt);
+ atomic_dec(&attr.send_cq->usecnt);
+ atomic_dec(&attr.recv_cq->usecnt);
+ if (attr.srq)
+ atomic_dec(&attr.srq->usecnt);
err_up:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
kfree(uobj);
return ret;
@@ -959,7 +972,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
if (!attr)
return -ENOMEM;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
if (!qp || qp->uobject->context != file->ucontext) {
@@ -1020,7 +1033,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
ret = in_len;
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
kfree(attr);
return ret;
@@ -1041,7 +1054,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
memset(&resp, 0, sizeof resp);
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
if (!qp || qp->uobject->context != file->ucontext)
@@ -1060,9 +1073,9 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_del(&uobj->uevent.uobject.list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
ib_uverbs_release_uevent(file, &uobj->uevent);
@@ -1075,7 +1088,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
ret = -EFAULT;
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return ret ? ret : in_len;
}
@@ -1106,7 +1119,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
if (!user_wr)
return -ENOMEM;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
if (!qp || qp->uobject->context != file->ucontext)
@@ -1211,7 +1224,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
ret = -EFAULT;
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
while (wr) {
next = wr->next;
@@ -1328,7 +1341,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
if (IS_ERR(wr))
return PTR_ERR(wr);
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
if (!qp || qp->uobject->context != file->ucontext)
@@ -1349,7 +1362,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
ret = -EFAULT;
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
while (wr) {
next = wr->next;
@@ -1379,7 +1392,7 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
if (IS_ERR(wr))
return PTR_ERR(wr);
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
if (!srq || srq->uobject->context != file->ucontext)
@@ -1400,7 +1413,7 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
ret = -EFAULT;
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
while (wr) {
next = wr->next;
@@ -1433,7 +1446,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (!uobj)
return -ENOMEM;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
if (!pd || pd->uobject->context != file->ucontext) {
@@ -1448,6 +1461,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
attr.sl = cmd.attr.sl;
attr.src_path_bits = cmd.attr.src_path_bits;
attr.static_rate = cmd.attr.static_rate;
+ attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
attr.port_num = cmd.attr.port_num;
attr.grh.flow_label = cmd.attr.grh.flow_label;
attr.grh.sgid_index = cmd.attr.grh.sgid_index;
@@ -1484,11 +1498,11 @@ retry:
goto err_idr;
}
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_add_tail(&uobj->list, &file->ucontext->ah_list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return in_len;
@@ -1499,7 +1513,7 @@ err_destroy:
ib_destroy_ah(ah);
err_up:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
kfree(uobj);
return ret;
@@ -1516,7 +1530,7 @@ ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
ah = idr_find(&ib_uverbs_ah_idr, cmd.ah_handle);
if (!ah || ah->uobject->context != file->ucontext)
@@ -1530,14 +1544,14 @@ ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
idr_remove(&ib_uverbs_ah_idr, cmd.ah_handle);
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_del(&uobj->list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
kfree(uobj);
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return ret ? ret : in_len;
}
@@ -1555,7 +1569,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
if (!qp || qp->uobject->context != file->ucontext)
@@ -1588,7 +1602,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
kfree(mcast);
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return ret ? ret : in_len;
}
@@ -1606,7 +1620,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
if (!qp || qp->uobject->context != file->ucontext)
@@ -1627,7 +1641,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
}
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return ret ? ret : in_len;
}
@@ -1659,7 +1673,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
if (!uobj)
return -ENOMEM;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
@@ -1716,11 +1730,11 @@ retry:
goto err_idr;
}
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return in_len;
@@ -1729,9 +1743,10 @@ err_idr:
err_destroy:
ib_destroy_srq(srq);
+ atomic_dec(&pd->usecnt);
err_up:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
kfree(uobj);
return ret;
@@ -1749,7 +1764,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
if (!srq || srq->uobject->context != file->ucontext) {
@@ -1763,7 +1778,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return ret ? ret : in_len;
}
@@ -1781,7 +1796,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
memset(&resp, 0, sizeof resp);
@@ -1797,9 +1812,9 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
- down(&file->mutex);
+ mutex_lock(&file->mutex);
list_del(&uobj->uobject.list);
- up(&file->mutex);
+ mutex_unlock(&file->mutex);
ib_uverbs_release_uevent(file, uobj);
@@ -1812,7 +1827,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
ret = -EFAULT;
out:
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return ret ? ret : in_len;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 81737bd6fae..96ea79b63df 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -66,7 +66,7 @@ enum {
static struct class *uverbs_class;
-DECLARE_MUTEX(ib_uverbs_idr_mutex);
+DEFINE_MUTEX(ib_uverbs_idr_mutex);
DEFINE_IDR(ib_uverbs_pd_idr);
DEFINE_IDR(ib_uverbs_mr_idr);
DEFINE_IDR(ib_uverbs_mw_idr);
@@ -180,7 +180,7 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
if (!context)
return 0;
- down(&ib_uverbs_idr_mutex);
+ mutex_lock(&ib_uverbs_idr_mutex);
list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
struct ib_ah *ah = idr_find(&ib_uverbs_ah_idr, uobj->id);
@@ -250,7 +250,7 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uobj);
}
- up(&ib_uverbs_idr_mutex);
+ mutex_unlock(&ib_uverbs_idr_mutex);
return context->device->dealloc_ucontext(context);
}
@@ -653,7 +653,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
file->ucontext = NULL;
file->async_file = NULL;
kref_init(&file->ref);
- init_MUTEX(&file->mutex);
+ mutex_init(&file->mutex);
filp->private_data = file;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 4c15e112736..c857361be44 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -107,9 +107,9 @@ struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
if (wc->wc_flags & IB_WC_GRH) {
ah_attr.ah_flags = IB_AH_GRH;
- ah_attr.grh.dgid = grh->dgid;
+ ah_attr.grh.dgid = grh->sgid;
- ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num,
+ ret = ib_find_cached_gid(pd->device, &grh->dgid, &port_num,
&gid_index);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 22fdc446f25..a14eed08a0f 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -163,6 +163,11 @@ int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
return 0;
}
+int mthca_ah_grh_present(struct mthca_ah *ah)
+{
+ return !!(ah->av->g_slid & 0x80);
+}
+
int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
struct ib_ud_header *header)
{
@@ -172,8 +177,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
header->lrh.service_level = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
header->lrh.destination_lid = ah->av->dlid;
header->lrh.source_lid = cpu_to_be16(ah->av->g_slid & 0x7f);
- if (ah->av->g_slid & 0x80) {
- header->grh_present = 1;
+ if (mthca_ah_grh_present(ah)) {
header->grh.traffic_class =
(be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff;
header->grh.flow_label =
@@ -184,8 +188,6 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
&header->grh.source_gid);
memcpy(header->grh.destination_gid.raw,
ah->av->dgid, 16);
- } else {
- header->grh_present = 0;
}
return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 9ed34587fc5..be1791be627 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -606,7 +606,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
err = -EINVAL;
goto out;
}
- for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i) {
+ for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
if (virt != -1) {
pages[nent * 2] = cpu_to_be64(virt);
virt += 1 << lg;
@@ -727,8 +727,8 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
* system pages needed.
*/
dev->fw.arbel.fw_pages =
- (dev->fw.arbel.fw_pages + (1 << (PAGE_SHIFT - 12)) - 1) >>
- (PAGE_SHIFT - 12);
+ ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >>
+ (PAGE_SHIFT - 12);
mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n",
(unsigned long long) dev->fw.arbel.clr_int_base,
@@ -937,10 +937,6 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
if (err)
goto out;
- MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
- dev_lim->max_srq_sz = (1 << field) - 1;
- MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
- dev_lim->max_qp_sz = (1 << field) - 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
dev_lim->reserved_qps = 1 << (field & 0xf);
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
@@ -1056,6 +1052,10 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
if (mthca_is_memfree(dev)) {
+ MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
+ dev_lim->max_srq_sz = 1 << field;
+ MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
+ dev_lim->max_qp_sz = 1 << field;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
dev_lim->hca.arbel.resize_srq = field & 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
@@ -1087,6 +1087,10 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
mthca_dbg(dev, "Max ICM size %lld MB\n",
(unsigned long long) dev_lim->hca.arbel.max_icm_sz >> 20);
} else {
+ MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
+ dev_lim->max_srq_sz = (1 << field) - 1;
+ MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
+ dev_lim->max_qp_sz = (1 << field) - 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET);
dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f);
dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
@@ -1441,6 +1445,7 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
* pages needed.
*/
*aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12);
+ *aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12);
return 0;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 4a8adcef207..96f1a86bf04 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -128,12 +128,12 @@ struct mthca_err_cqe {
__be32 my_qpn;
u32 reserved1[3];
u8 syndrome;
- u8 reserved2;
+ u8 vendor_err;
__be16 db_cnt;
- u32 reserved3;
+ u32 reserved2;
__be32 wqe;
u8 opcode;
- u8 reserved4[2];
+ u8 reserved3[2];
u8 owner;
};
@@ -253,6 +253,15 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
wake_up(&cq->wait);
}
+static inline int is_recv_cqe(struct mthca_cqe *cqe)
+{
+ if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
+ MTHCA_ERROR_CQE_OPCODE_MASK)
+ return !(cqe->opcode & 0x01);
+ else
+ return !(cqe->is_send & 0x80);
+}
+
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
struct mthca_srq *srq)
{
@@ -296,7 +305,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
while ((int) --prod_index - (int) cq->cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
if (cqe->my_qpn == cpu_to_be32(qpn)) {
- if (srq)
+ if (srq && is_recv_cqe(cqe))
mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
++nfreed;
} else if (nfreed)
@@ -333,8 +342,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
}
/*
- * For completions in error, only work request ID, status (and
- * freed resource count for RD) have to be set.
+ * For completions in error, only work request ID, status, vendor error
+ * (and freed resource count for RD) have to be set.
*/
switch (cqe->syndrome) {
case SYNDROME_LOCAL_LENGTH_ERR:
@@ -396,6 +405,8 @@ static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
break;
}
+ entry->vendor_err = cqe->vendor_err;
+
/*
* Mem-free HCAs always generate one CQE per WQE, even in the
* error case, so we don't have to check the doorbell count, etc.
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 497ff794ef6..a104ab041ea 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -43,6 +43,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
+#include <linux/timer.h>
#include <asm/semaphore.h>
#include "mthca_provider.h"
@@ -519,6 +520,7 @@ int mthca_create_ah(struct mthca_dev *dev,
int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah);
int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
struct ib_ud_header *header);
+int mthca_ah_grh_present(struct mthca_ah *ah);
int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 34d68e5a72d..2eabb27804c 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -45,6 +45,7 @@
enum {
MTHCA_NUM_ASYNC_EQE = 0x80,
MTHCA_NUM_CMD_EQE = 0x80,
+ MTHCA_NUM_SPARE_EQE = 0x80,
MTHCA_EQ_ENTRY_SIZE = 0x20
};
@@ -277,11 +278,10 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
{
struct mthca_eqe *eqe;
int disarm_cqn;
- int eqes_found = 0;
+ int eqes_found = 0;
+ int set_ci = 0;
while ((eqe = next_eqe_sw(eq))) {
- int set_ci = 0;
-
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
@@ -345,12 +345,6 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
be16_to_cpu(eqe->event.cmd.token),
eqe->event.cmd.status,
be64_to_cpu(eqe->event.cmd.out_param));
- /*
- * cmd_event() may add more commands.
- * The card will think the queue has overflowed if
- * we don't tell it we've been processing events.
- */
- set_ci = 1;
break;
case MTHCA_EVENT_TYPE_PORT_CHANGE:
@@ -385,8 +379,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
set_eqe_hw(eqe);
++eq->cons_index;
eqes_found = 1;
+ ++set_ci;
- if (unlikely(set_ci)) {
+ /*
+ * The HCA will think the queue has overflowed if we
+ * don't tell it we've been processing events. We
+ * create our EQs with MTHCA_NUM_SPARE_EQE extra
+ * entries, so we must update our consumer index at
+ * least that often.
+ */
+ if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
/*
* Conditional on hca_type is OK here because
* this is a rare case, not the fast path.
@@ -484,8 +486,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
u8 intr,
struct mthca_eq *eq)
{
- int npages = (nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
- PAGE_SIZE;
+ int npages;
u64 *dma_list = NULL;
dma_addr_t t;
struct mthca_mailbox *mailbox;
@@ -496,6 +497,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
+ npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
eq->page_list = kmalloc(npages * sizeof *eq->page_list,
GFP_KERNEL);
@@ -862,19 +864,19 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?
128 : dev->eq_table.inta_pin;
- err = mthca_create_eq(dev, dev->limits.num_cqs,
+ err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
&dev->eq_table.eq[MTHCA_EQ_COMP]);
if (err)
goto err_out_unmap;
- err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE,
+ err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
&dev->eq_table.eq[MTHCA_EQ_ASYNC]);
if (err)
goto err_out_comp;
- err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE,
+ err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
(dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
&dev->eq_table.eq[MTHCA_EQ_CMD]);
if (err)
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 6f94b25f3ac..8b00d9a0f6f 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -261,6 +261,10 @@ static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
}
err = mthca_dev_lim(mdev, &dev_lim);
+ if (err) {
+ mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
+ goto err_disable;
+ }
profile = default_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 2fc449da418..77bc6c746f4 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -111,7 +111,8 @@ static int find_mgm(struct mthca_dev *dev,
goto out;
if (status) {
mthca_err(dev, "READ_MGM returned status %02x\n", status);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
if (!memcmp(mgm->gid, zero_gid, 16)) {
@@ -126,7 +127,7 @@ static int find_mgm(struct mthca_dev *dev,
goto out;
*prev = *index;
- *index = be32_to_cpu(mgm->next_gid_index) >> 5;
+ *index = be32_to_cpu(mgm->next_gid_index) >> 6;
} while (*index);
*index = -1;
@@ -153,8 +154,10 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
return PTR_ERR(mailbox);
mgm = mailbox->buf;
- if (down_interruptible(&dev->mcg_table.sem))
- return -EINTR;
+ if (down_interruptible(&dev->mcg_table.sem)) {
+ err = -EINTR;
+ goto err_sem;
+ }
err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
@@ -181,9 +184,8 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = -EINVAL;
goto out;
}
-
+ memset(mgm, 0, sizeof *mgm);
memcpy(mgm->gid, gid->raw, 16);
- mgm->next_gid_index = 0;
}
for (i = 0; i < MTHCA_QP_PER_MGM; ++i)
@@ -209,6 +211,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (status) {
mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
err = -EINVAL;
+ goto out;
}
if (!link)
@@ -223,7 +226,7 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out;
}
- mgm->next_gid_index = cpu_to_be32(index << 5);
+ mgm->next_gid_index = cpu_to_be32(index << 6);
err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
if (err)
@@ -234,7 +237,12 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
out:
+ if (err && link && index != -1) {
+ BUG_ON(index < dev->limits.num_mgms);
+ mthca_free(&dev->mcg_table.alloc, index);
+ }
up(&dev->mcg_table.sem);
+ err_sem:
mthca_free_mailbox(dev, mailbox);
return err;
}
@@ -255,8 +263,10 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
return PTR_ERR(mailbox);
mgm = mailbox->buf;
- if (down_interruptible(&dev->mcg_table.sem))
- return -EINTR;
+ if (down_interruptible(&dev->mcg_table.sem)) {
+ err = -EINTR;
+ goto err_sem;
+ }
err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index);
if (err)
@@ -305,13 +315,11 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (i != 1)
goto out;
- goto out;
-
if (prev == -1) {
/* Remove entry from MGM */
- if (be32_to_cpu(mgm->next_gid_index) >> 5) {
- err = mthca_READ_MGM(dev,
- be32_to_cpu(mgm->next_gid_index) >> 5,
+ int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6;
+ if (amgm_index_to_free) {
+ err = mthca_READ_MGM(dev, amgm_index_to_free,
mailbox, &status);
if (err)
goto out;
@@ -332,9 +340,13 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = -EINVAL;
goto out;
}
+ if (amgm_index_to_free) {
+ BUG_ON(amgm_index_to_free < dev->limits.num_mgms);
+ mthca_free(&dev->mcg_table.alloc, amgm_index_to_free);
+ }
} else {
/* Remove entry from AMGM */
- index = be32_to_cpu(mgm->next_gid_index) >> 5;
+ int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
err = mthca_READ_MGM(dev, prev, mailbox, &status);
if (err)
goto out;
@@ -344,7 +356,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out;
}
- mgm->next_gid_index = cpu_to_be32(index << 5);
+ mgm->next_gid_index = cpu_to_be32(curr_next_index << 6);
err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
if (err)
@@ -354,10 +366,13 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = -EINVAL;
goto out;
}
+ BUG_ON(index < dev->limits.num_mgms);
+ mthca_free(&dev->mcg_table.alloc, index);
}
out:
up(&dev->mcg_table.sem);
+ err_sem:
mthca_free_mailbox(dev, mailbox);
return err;
}
@@ -365,11 +380,12 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int __devinit mthca_init_mcg_table(struct mthca_dev *dev)
{
int err;
+ int table_size = dev->limits.num_mgms + dev->limits.num_amgms;
err = mthca_alloc_init(&dev->mcg_table.alloc,
- dev->limits.num_amgms,
- dev->limits.num_amgms - 1,
- 0);
+ table_size,
+ table_size - 1,
+ dev->limits.num_mgms);
if (err)
return err;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index d72fe95cba0..9fb985a016e 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -233,7 +233,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)
for (i = 0; i < chunk->npages; ++i) {
if (chunk->mem[i].length >= offset) {
page = chunk->mem[i].page;
- break;
+ goto out;
}
offset -= chunk->mem[i].length;
}
@@ -485,6 +485,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
put_page(db_tab->page[i].mem.page);
}
}
+
+ kfree(db_tab);
}
int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 4cc7e2846df..484a7e6b7f8 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -33,7 +33,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
+ * $Id: mthca_provider.c 4859 2006-01-09 21:55:10Z roland $
*/
#include <rdma/ib_smi.h>
@@ -45,6 +45,14 @@
#include "mthca_user.h"
#include "mthca_memfree.h"
+static void init_query_mad(struct ib_smp *mad)
+{
+ mad->base_version = 1;
+ mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ mad->class_version = 1;
+ mad->method = IB_MGMT_METHOD_GET;
+}
+
static int mthca_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
@@ -55,7 +63,7 @@ static int mthca_query_device(struct ib_device *ibdev,
u8 status;
- in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
@@ -64,12 +72,8 @@ static int mthca_query_device(struct ib_device *ibdev,
props->fw_ver = mdev->fw_ver;
- memset(in_mad, 0, sizeof *in_mad);
- in_mad->base_version = 1;
- in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- in_mad->class_version = 1;
- in_mad->method = IB_MGMT_METHOD_GET;
- in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(mdev, 1, 1,
1, NULL, NULL, in_mad, out_mad,
@@ -87,7 +91,6 @@ static int mthca_query_device(struct ib_device *ibdev,
props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
- memcpy(&props->node_guid, out_mad->data + 12, 8);
props->max_mr_size = ~0ull;
props->page_size_cap = mdev->limits.page_size_cap;
@@ -128,20 +131,16 @@ static int mthca_query_port(struct ib_device *ibdev,
int err = -ENOMEM;
u8 status;
- in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
memset(props, 0, sizeof *props);
- memset(in_mad, 0, sizeof *in_mad);
- in_mad->base_version = 1;
- in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- in_mad->class_version = 1;
- in_mad->method = IB_MGMT_METHOD_GET;
- in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
- in_mad->attr_mod = cpu_to_be32(port);
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad,
@@ -220,18 +219,14 @@ static int mthca_query_pkey(struct ib_device *ibdev,
int err = -ENOMEM;
u8 status;
- in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
- memset(in_mad, 0, sizeof *in_mad);
- in_mad->base_version = 1;
- in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- in_mad->class_version = 1;
- in_mad->method = IB_MGMT_METHOD_GET;
- in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
- in_mad->attr_mod = cpu_to_be32(index / 32);
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
+ in_mad->attr_mod = cpu_to_be32(index / 32);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad,
@@ -259,18 +254,14 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
int err = -ENOMEM;
u8 status;
- in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
- memset(in_mad, 0, sizeof *in_mad);
- in_mad->base_version = 1;
- in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- in_mad->class_version = 1;
- in_mad->method = IB_MGMT_METHOD_GET;
- in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
- in_mad->attr_mod = cpu_to_be32(port);
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad,
@@ -284,13 +275,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
memcpy(gid->raw, out_mad->data + 8, 8);
- memset(in_mad, 0, sizeof *in_mad);
- in_mad->base_version = 1;
- in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- in_mad->class_version = 1;
- in_mad->method = IB_MGMT_METHOD_GET;
- in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
- in_mad->attr_mod = cpu_to_be32(index / 8);
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
+ in_mad->attr_mod = cpu_to_be32(index / 8);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
port, NULL, NULL, in_mad, out_mad,
@@ -458,8 +445,10 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
if (pd->uobject) {
context = to_mucontext(pd->uobject->context);
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
- return ERR_PTR(-EFAULT);
+ if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
+ err = -EFAULT;
+ goto err_free;
+ }
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
context->db_tab, ucmd.db_index,
@@ -535,8 +524,10 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
if (pd->uobject) {
context = to_mucontext(pd->uobject->context);
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
+ if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
+ kfree(qp);
return ERR_PTR(-EFAULT);
+ }
err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
context->db_tab,
@@ -783,24 +774,20 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
return ERR_PTR(-EINVAL);
- if (num_phys_buf > 1 &&
- ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK))
- return ERR_PTR(-EINVAL);
-
mask = 0;
total_size = 0;
for (i = 0; i < num_phys_buf; ++i) {
- if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
- return ERR_PTR(-EINVAL);
- if (i != 0 && i != num_phys_buf - 1 &&
- (buffer_list[i].size & ~PAGE_MASK))
- return ERR_PTR(-EINVAL);
+ if (i != 0)
+ mask |= buffer_list[i].addr;
+ if (i != num_phys_buf - 1)
+ mask |= buffer_list[i].addr + buffer_list[i].size;
total_size += buffer_list[i].size;
- if (i > 0)
- mask |= buffer_list[i].addr;
}
+ if (mask & ~PAGE_MASK)
+ return ERR_PTR(-EINVAL);
+
/* Find largest page shift we can use to cover buffers */
for (shift = PAGE_SHIFT; shift < 31; ++shift)
if (num_phys_buf > 1) {
@@ -1070,11 +1057,48 @@ static struct class_device_attribute *mthca_class_attributes[] = {
&class_device_attr_board_id
};
+static int mthca_init_node_data(struct mthca_dev *dev)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+ u8 status;
+
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
+ out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+ err = mthca_MAD_IFC(dev, 1, 1,
+ 1, NULL, NULL, in_mad, out_mad,
+ &status);
+ if (err)
+ goto out;
+ if (status) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
int mthca_register_device(struct mthca_dev *dev)
{
int ret;
int i;
+ ret = mthca_init_node_data(dev);
+ if (ret)
+ return ret;
+
strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 7450550db73..fba608ed7df 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -383,12 +383,10 @@ static const struct {
[UC] = (IB_QP_CUR_STATE |
IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
- IB_QP_PKEY_INDEX |
IB_QP_PATH_MIG_STATE),
[RC] = (IB_QP_CUR_STATE |
IB_QP_ALT_PATH |
IB_QP_ACCESS_FLAGS |
- IB_QP_PKEY_INDEX |
IB_QP_MIN_RNR_TIMER |
IB_QP_PATH_MIG_STATE),
[MLX] = (IB_QP_CUR_STATE |
@@ -476,9 +474,8 @@ static const struct {
.opt_param = {
[UD] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
- [UC] = IB_QP_CUR_STATE,
- [RC] = (IB_QP_CUR_STATE |
- IB_QP_MIN_RNR_TIMER),
+ [UC] = (IB_QP_CUR_STATE |
+ IB_QP_ACCESS_FLAGS),
[MLX] = (IB_QP_CUR_STATE |
IB_QP_QKEY),
}
@@ -522,6 +519,55 @@ static void init_port(struct mthca_dev *dev, int port)
mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
}
+static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ u8 dest_rd_atomic;
+ u32 access_flags;
+ u32 hw_access_flags = 0;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ dest_rd_atomic = attr->max_dest_rd_atomic;
+ else
+ dest_rd_atomic = qp->resp_depth;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ access_flags = attr->qp_access_flags;
+ else
+ access_flags = qp->atomic_rd_en;
+
+ if (!dest_rd_atomic)
+ access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+ if (access_flags & IB_ACCESS_REMOTE_READ)
+ hw_access_flags |= MTHCA_QP_BIT_RRE;
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ hw_access_flags |= MTHCA_QP_BIT_RAE;
+ if (access_flags & IB_ACCESS_REMOTE_WRITE)
+ hw_access_flags |= MTHCA_QP_BIT_RWE;
+
+ return cpu_to_be32(hw_access_flags);
+}
+
+static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
+{
+ path->g_mylmc = ah->src_path_bits & 0x7f;
+ path->rlid = cpu_to_be16(ah->dlid);
+ path->static_rate = !!ah->static_rate;
+
+ if (ah->ah_flags & IB_AH_GRH) {
+ path->g_mylmc |= 1 << 7;
+ path->mgid_index = ah->grh.sgid_index;
+ path->hop_limit = ah->grh.hop_limit;
+ path->sl_tclass_flowlabel =
+ cpu_to_be32((ah->sl << 28) |
+ (ah->grh.traffic_class << 20) |
+ (ah->grh.flow_label));
+ memcpy(path->rgid, ah->grh.dgid.raw, 16);
+ } else
+ path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
+}
+
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
@@ -591,6 +637,26 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
return -EINVAL;
}
+ if ((attr_mask & IB_QP_PORT) &&
+ (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
+ mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
+ mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
+ attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
+ mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
+ attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
+ return -EINVAL;
+ }
+
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
@@ -665,28 +731,14 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
}
if (attr_mask & IB_QP_RNR_RETRY) {
- qp_context->pri_path.rnr_retry = attr->rnr_retry << 5;
- qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY);
+ qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
+ attr->rnr_retry << 5;
+ qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
+ MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
}
if (attr_mask & IB_QP_AV) {
- qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f;
- qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid);
- qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate;
- if (attr->ah_attr.ah_flags & IB_AH_GRH) {
- qp_context->pri_path.g_mylmc |= 1 << 7;
- qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
- qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;
- qp_context->pri_path.sl_tclass_flowlabel =
- cpu_to_be32((attr->ah_attr.sl << 28) |
- (attr->ah_attr.grh.traffic_class << 20) |
- (attr->ah_attr.grh.flow_label));
- memcpy(qp_context->pri_path.rgid,
- attr->ah_attr.grh.dgid.raw, 16);
- } else {
- qp_context->pri_path.sl_tclass_flowlabel =
- cpu_to_be32(attr->ah_attr.sl << 28);
- }
+ mthca_path_set(&attr->ah_attr, &qp_context->pri_path);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
}
@@ -695,7 +747,19 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
}
- /* XXX alt_path */
+ if (attr_mask & IB_QP_ALT_PATH) {
+ if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
+ mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
+ attr->alt_port_num);
+ return -EINVAL;
+ }
+
+ mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path);
+ qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
+ attr->alt_port_num << 24);
+ qp_context->alt_path.ackto = attr->alt_timeout << 3;
+ qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
+ }
/* leave rdd as 0 */
qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
@@ -703,9 +767,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
(MTHCA_FLIGHT_LIMIT << 24) |
- MTHCA_QP_BIT_SRE |
- MTHCA_QP_BIT_SWE |
- MTHCA_QP_BIT_SAE);
+ MTHCA_QP_BIT_SWE);
if (qp->sq_policy == IB_SIGNAL_ALL_WR)
qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
if (attr_mask & IB_QP_RETRY_CNT) {
@@ -714,9 +776,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
- qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ?
- ffs(attr->max_rd_atomic) - 1 : 0,
- 7) << 21);
+ if (attr->max_rd_atomic) {
+ qp_context->params1 |=
+ cpu_to_be32(MTHCA_QP_BIT_SRE |
+ MTHCA_QP_BIT_SAE);
+ qp_context->params1 |=
+ cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
+ }
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
}
@@ -729,71 +795,19 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
}
- if (attr_mask & IB_QP_ACCESS_FLAGS) {
- qp_context->params2 |=
- cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
- MTHCA_QP_BIT_RWE : 0);
-
- /*
- * Only enable RDMA reads and atomics if we have
- * responder resources set to a non-zero value.
- */
- if (qp->resp_depth) {
- qp_context->params2 |=
- cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
- MTHCA_QP_BIT_RRE : 0);
- qp_context->params2 |=
- cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?
- MTHCA_QP_BIT_RAE : 0);
- }
-
- qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
- MTHCA_QP_OPTPAR_RRE |
- MTHCA_QP_OPTPAR_RAE);
-
- qp->atomic_rd_en = attr->qp_access_flags;
- }
-
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
- u8 rra_max;
-
- if (qp->resp_depth && !attr->max_dest_rd_atomic) {
- /*
- * Lowering our responder resources to zero.
- * Turn off reads RDMA and atomics as responder.
- * (RRE/RAE in params2 already zero)
- */
- qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
- MTHCA_QP_OPTPAR_RAE);
- }
-
- if (!qp->resp_depth && attr->max_dest_rd_atomic) {
- /*
- * Increasing our responder resources from
- * zero. Turn on RDMA reads and atomics as
- * appropriate.
- */
+ if (attr->max_dest_rd_atomic)
qp_context->params2 |=
- cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
- MTHCA_QP_BIT_RRE : 0);
- qp_context->params2 |=
- cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
- MTHCA_QP_BIT_RAE : 0);
-
- qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
- MTHCA_QP_OPTPAR_RAE);
- }
+ cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
- for (rra_max = 0;
- 1 << rra_max < attr->max_dest_rd_atomic &&
- rra_max < dev->qp_table.rdb_shift;
- ++rra_max)
- ; /* nothing */
-
- qp_context->params2 |= cpu_to_be32(rra_max << 21);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
+ }
- qp->resp_depth = attr->max_dest_rd_atomic;
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
+ qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
+ qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
+ MTHCA_QP_OPTPAR_RRE |
+ MTHCA_QP_OPTPAR_RAE);
}
qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
@@ -835,8 +849,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
err = -EINVAL;
}
- if (!err)
+ if (!err) {
qp->state = new_state;
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->atomic_rd_en = attr->qp_access_flags;
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->resp_depth = attr->max_dest_rd_atomic;
+ }
mthca_free_mailbox(dev, mailbox);
@@ -885,18 +904,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
return err;
}
-static void mthca_adjust_qp_caps(struct mthca_dev *dev,
- struct mthca_pd *pd,
- struct mthca_qp *qp)
+static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
{
- int max_data_size;
-
/*
* Calculate the maximum size of WQE s/g segments, excluding
* the next segment and other non-data segments.
*/
- max_data_size = min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift) -
- sizeof (struct mthca_next_seg);
+ int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
switch (qp->transport) {
case MLX:
@@ -915,11 +929,24 @@ static void mthca_adjust_qp_caps(struct mthca_dev *dev,
break;
}
+ return max_data_size;
+}
+
+static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
+{
/* We don't support inline data for kernel QPs (yet). */
- if (!pd->ibpd.uobject)
- qp->max_inline_data = 0;
- else
- qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE;
+ return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
+}
+
+static void mthca_adjust_qp_caps(struct mthca_dev *dev,
+ struct mthca_pd *pd,
+ struct mthca_qp *qp)
+{
+ int max_data_size = mthca_max_data_size(dev, qp,
+ min(dev->limits.max_desc_sz,
+ 1 << qp->sq.wqe_shift));
+
+ qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
qp->sq.max_gs = min_t(int, dev->limits.max_sg,
max_data_size / sizeof (struct mthca_data_seg));
@@ -1186,13 +1213,23 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
}
static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
- struct mthca_qp *qp)
+ struct mthca_pd *pd, struct mthca_qp *qp)
{
+ int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
+
/* Sanity check QP size before proceeding */
- if (cap->max_send_wr > dev->limits.max_wqes ||
- cap->max_recv_wr > dev->limits.max_wqes ||
- cap->max_send_sge > dev->limits.max_sg ||
- cap->max_recv_sge > dev->limits.max_sg)
+ if (cap->max_send_wr > dev->limits.max_wqes ||
+ cap->max_recv_wr > dev->limits.max_wqes ||
+ cap->max_send_sge > dev->limits.max_sg ||
+ cap->max_recv_sge > dev->limits.max_sg ||
+ cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
+ return -EINVAL;
+
+ /*
+ * For MLX transport we need 2 extra S/G entries:
+ * one for the header and one for the checksum at the end
+ */
+ if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)
return -EINVAL;
if (mthca_is_memfree(dev)) {
@@ -1211,14 +1248,6 @@ static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
MTHCA_INLINE_CHUNK_SIZE) /
sizeof (struct mthca_data_seg));
- /*
- * For MLX transport we need 2 extra S/G entries:
- * one for the header and one for the checksum at the end
- */
- if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) ||
- qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg)
- return -EINVAL;
-
return 0;
}
@@ -1233,7 +1262,7 @@ int mthca_alloc_qp(struct mthca_dev *dev,
{
int err;
- err = mthca_set_qp_size(dev, cap, qp);
+ err = mthca_set_qp_size(dev, cap, pd, qp);
if (err)
return err;
@@ -1276,7 +1305,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
int err;
- err = mthca_set_qp_size(dev, cap, &sqp->qp);
+ err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
if (err)
return err;
@@ -1405,7 +1434,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
u16 pkey;
ib_ud_header_init(256, /* assume a MAD */
- sqp->ud_header.grh_present,
+ mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
&sqp->ud_header);
err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index f7d234295ef..e7e153d9c4c 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -201,7 +201,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
if (mthca_is_memfree(dev))
srq->max = roundup_pow_of_two(srq->max + 1);
- ds = min(64UL,
+ ds = max(64UL,
roundup_pow_of_two(sizeof (struct mthca_next_seg) +
srq->max_gs * sizeof (struct mthca_data_seg)));
srq->wqe_shift = long_log2(ds);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 9923a15a999..e0a5412b7e6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -45,11 +45,11 @@
#include <linux/config.h>
#include <linux/kref.h>
#include <linux/if_infiniband.h>
+#include <linux/mutex.h>
#include <net/neighbour.h>
#include <asm/atomic.h>
-#include <asm/semaphore.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
@@ -123,8 +123,8 @@ struct ipoib_dev_priv {
unsigned long flags;
- struct semaphore mcast_mutex;
- struct semaphore vlan_mutex;
+ struct mutex mcast_mutex;
+ struct mutex vlan_mutex;
struct rb_root path_tree;
struct list_head path_list;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 23885801b6d..86bcdd72a10 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(data_debug_level,
#define IPOIB_OP_RECV (1ul << 31)
-static DECLARE_MUTEX(pkey_sem);
+static DEFINE_MUTEX(pkey_mutex);
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
@@ -445,25 +445,16 @@ int ipoib_ib_dev_down(struct net_device *dev)
/* Shutdown the P_Key thread if still active */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
- down(&pkey_sem);
+ mutex_lock(&pkey_mutex);
set_bit(IPOIB_PKEY_STOP, &priv->flags);
cancel_delayed_work(&priv->pkey_task);
- up(&pkey_sem);
+ mutex_unlock(&pkey_mutex);
flush_workqueue(ipoib_workqueue);
}
ipoib_mcast_stop_thread(dev, 1);
-
- /*
- * Flush the multicast groups first so we stop any multicast joins. The
- * completion thread may have already died and we may deadlock waiting
- * for the completion thread to finish some multicast joins.
- */
ipoib_mcast_dev_flush(dev);
- /* Delete broadcast and local addresses since they will be recreated */
- ipoib_mcast_dev_down(dev);
-
ipoib_flush_paths(dev);
return 0;
@@ -608,13 +599,13 @@ void ipoib_ib_dev_flush(void *_dev)
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
ipoib_ib_dev_up(dev);
- down(&priv->vlan_mutex);
+ mutex_lock(&priv->vlan_mutex);
/* Flush any child interfaces too */
list_for_each_entry(cpriv, &priv->child_intfs, list)
ipoib_ib_dev_flush(&cpriv->dev);
- up(&priv->vlan_mutex);
+ mutex_unlock(&priv->vlan_mutex);
}
void ipoib_ib_dev_cleanup(struct net_device *dev)
@@ -624,9 +615,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
ipoib_dbg(priv, "cleaning up ib_dev\n");
ipoib_mcast_stop_thread(dev, 1);
-
- /* Delete the broadcast address and the local address */
- ipoib_mcast_dev_down(dev);
+ ipoib_mcast_dev_flush(dev);
ipoib_transport_dev_cleanup(dev);
}
@@ -662,12 +651,12 @@ void ipoib_pkey_poll(void *dev_ptr)
if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
ipoib_open(dev);
else {
- down(&pkey_sem);
+ mutex_lock(&pkey_mutex);
if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
queue_delayed_work(ipoib_workqueue,
&priv->pkey_task,
HZ);
- up(&pkey_sem);
+ mutex_unlock(&pkey_mutex);
}
}
@@ -681,12 +670,12 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev)
/* P_Key value not assigned yet - start polling */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
- down(&pkey_sem);
+ mutex_lock(&pkey_mutex);
clear_bit(IPOIB_PKEY_STOP, &priv->flags);
queue_delayed_work(ipoib_workqueue,
&priv->pkey_task,
HZ);
- up(&pkey_sem);
+ mutex_unlock(&pkey_mutex);
return 1;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 780009c7eaa..fd3f5c862a5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -105,7 +105,7 @@ int ipoib_open(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring up any child interfaces too */
- down(&priv->vlan_mutex);
+ mutex_lock(&priv->vlan_mutex);
list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags;
@@ -115,7 +115,7 @@ int ipoib_open(struct net_device *dev)
dev_change_flags(cpriv->dev, flags | IFF_UP);
}
- up(&priv->vlan_mutex);
+ mutex_unlock(&priv->vlan_mutex);
}
netif_start_queue(dev);
@@ -140,7 +140,7 @@ static int ipoib_stop(struct net_device *dev)
struct ipoib_dev_priv *cpriv;
/* Bring down any child interfaces too */
- down(&priv->vlan_mutex);
+ mutex_lock(&priv->vlan_mutex);
list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags;
@@ -150,7 +150,7 @@ static int ipoib_stop(struct net_device *dev)
dev_change_flags(cpriv->dev, flags & ~IFF_UP);
}
- up(&priv->vlan_mutex);
+ mutex_unlock(&priv->vlan_mutex);
}
return 0;
@@ -892,8 +892,8 @@ static void ipoib_setup(struct net_device *dev)
spin_lock_init(&priv->lock);
spin_lock_init(&priv->tx_lock);
- init_MUTEX(&priv->mcast_mutex);
- init_MUTEX(&priv->vlan_mutex);
+ mutex_init(&priv->mcast_mutex);
+ mutex_init(&priv->vlan_mutex);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ed0c2ead8bc..98039da0caf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(mcast_debug_level,
"Enable multicast debug tracing if > 0");
#endif
-static DECLARE_MUTEX(mcast_mutex);
+static DEFINE_MUTEX(mcast_mutex);
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
struct ipoib_mcast {
@@ -97,8 +97,6 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh, *tmp;
unsigned long flags;
- LIST_HEAD(ah_list);
- struct ipoib_ah *ah, *tah;
ipoib_dbg_mcast(netdev_priv(dev),
"deleting multicast group " IPOIB_GID_FMT "\n",
@@ -107,8 +105,14 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
+ /*
+ * It's safe to call ipoib_put_ah() inside priv->lock
+ * here, because we know that mcast->ah will always
+ * hold one more reference, so ipoib_put_ah() will
+ * never do more than decrement the ref count.
+ */
if (neigh->ah)
- list_add_tail(&neigh->ah->list, &ah_list);
+ ipoib_put_ah(neigh->ah);
*to_ipoib_neigh(neigh->neighbour) = NULL;
neigh->neighbour->ops->destructor = NULL;
kfree(neigh);
@@ -116,9 +120,6 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
spin_unlock_irqrestore(&priv->lock, flags);
- list_for_each_entry_safe(ah, tah, &ah_list, list)
- ipoib_put_ah(ah);
-
if (mcast->ah)
ipoib_put_ah(mcast->ah);
@@ -384,10 +385,10 @@ static void ipoib_mcast_join_complete(int status,
if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
mcast->backoff = 1;
- down(&mcast_mutex);
+ mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_work(ipoib_workqueue, &priv->mcast_task);
- up(&mcast_mutex);
+ mutex_unlock(&mcast_mutex);
complete(&mcast->done);
return;
}
@@ -417,7 +418,7 @@ static void ipoib_mcast_join_complete(int status,
mcast->query = NULL;
- down(&mcast_mutex);
+ mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
if (status == -ETIMEDOUT)
queue_work(ipoib_workqueue, &priv->mcast_task);
@@ -426,7 +427,7 @@ static void ipoib_mcast_join_complete(int status,
mcast->backoff * HZ);
} else
complete(&mcast->done);
- up(&mcast_mutex);
+ mutex_unlock(&mcast_mutex);
return;
}
@@ -481,12 +482,12 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
- down(&mcast_mutex);
+ mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_delayed_work(ipoib_workqueue,
&priv->mcast_task,
mcast->backoff * HZ);
- up(&mcast_mutex);
+ mutex_unlock(&mcast_mutex);
} else
mcast->query_id = ret;
}
@@ -519,11 +520,11 @@ void ipoib_mcast_join_task(void *dev_ptr)
priv->broadcast = ipoib_mcast_alloc(dev, 1);
if (!priv->broadcast) {
ipoib_warn(priv, "failed to allocate broadcast group\n");
- down(&mcast_mutex);
+ mutex_lock(&mcast_mutex);
if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_delayed_work(ipoib_workqueue,
&priv->mcast_task, HZ);
- up(&mcast_mutex);
+ mutex_unlock(&mcast_mutex);
return;
}
@@ -579,10 +580,10 @@ int ipoib_mcast_start_thread(struct net_device *dev)
ipoib_dbg_mcast(priv, "starting multicast thread\n");
- down(&mcast_mutex);
+ mutex_lock(&mcast_mutex);
if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
queue_work(ipoib_workqueue, &priv->mcast_task);
- up(&mcast_mutex);
+ mutex_unlock(&mcast_mutex);
return 0;
}
@@ -594,10 +595,10 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
ipoib_dbg_mcast(priv, "stopping multicast thread\n");
- down(&mcast_mutex);
+ mutex_lock(&mcast_mutex);
clear_bit(IPOIB_MCAST_RUN, &priv->flags);
cancel_delayed_work(&priv->mcast_task);
- up(&mcast_mutex);
+ mutex_unlock(&mcast_mutex);
if (flush)
flush_workqueue(ipoib_workqueue);
@@ -741,48 +742,23 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
LIST_HEAD(remove_list);
- struct ipoib_mcast *mcast, *tmcast, *nmcast;
+ struct ipoib_mcast *mcast, *tmcast;
unsigned long flags;
ipoib_dbg_mcast(priv, "flushing multicast list\n");
spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
- nmcast = ipoib_mcast_alloc(dev, 0);
- if (nmcast) {
- nmcast->flags =
- mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY);
- nmcast->mcmember.mgid = mcast->mcmember.mgid;
-
- /* Add the new group in before the to-be-destroyed group */
- list_add_tail(&nmcast->list, &mcast->list);
- list_del_init(&mcast->list);
-
- rb_replace_node(&mcast->rb_node, &nmcast->rb_node,
- &priv->multicast_tree);
-
- list_add_tail(&mcast->list, &remove_list);
- } else {
- ipoib_warn(priv, "could not reallocate multicast group "
- IPOIB_GID_FMT "\n",
- IPOIB_GID_ARG(mcast->mcmember.mgid));
- }
+ list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
+ list_del(&mcast->list);
+ rb_erase(&mcast->rb_node, &priv->multicast_tree);
+ list_add_tail(&mcast->list, &remove_list);
}
if (priv->broadcast) {
- nmcast = ipoib_mcast_alloc(dev, 0);
- if (nmcast) {
- nmcast->mcmember.mgid = priv->broadcast->mcmember.mgid;
-
- rb_replace_node(&priv->broadcast->rb_node,
- &nmcast->rb_node,
- &priv->multicast_tree);
-
- list_add_tail(&priv->broadcast->list, &remove_list);
- }
-
- priv->broadcast = nmcast;
+ rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
+ list_add_tail(&priv->broadcast->list, &remove_list);
+ priv->broadcast = NULL;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -793,24 +769,6 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
}
}
-void ipoib_mcast_dev_down(struct net_device *dev)
-{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- unsigned long flags;
-
- /* Delete broadcast since it will be recreated */
- if (priv->broadcast) {
- ipoib_dbg_mcast(priv, "deleting broadcast group\n");
-
- spin_lock_irqsave(&priv->lock, flags);
- rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
- spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_mcast_leave(dev, priv->broadcast);
- ipoib_mcast_free(priv->broadcast);
- priv->broadcast = NULL;
- }
-}
-
void ipoib_mcast_restart_task(void *dev_ptr)
{
struct net_device *dev = dev_ptr;
@@ -824,7 +782,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
ipoib_mcast_stop_thread(dev, 0);
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&dev->xmit_lock, flags);
+ spin_lock(&priv->lock);
/*
* Unfortunately, the networking core only gives us a list of all of
@@ -896,7 +855,9 @@ void ipoib_mcast_restart_task(void *dev_ptr)
list_add_tail(&mcast->list, &remove_list);
}
}
- spin_unlock_irqrestore(&priv->lock, flags);
+
+ spin_unlock(&priv->lock);
+ spin_unlock_irqrestore(&dev->xmit_lock, flags);
/* We have to cancel outside of the spinlock */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index e829e10400e..faaf10e5fc7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -65,9 +65,9 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
}
/* attach QP to multicast group */
- down(&priv->mcast_mutex);
+ mutex_lock(&priv->mcast_mutex);
ret = ib_attach_mcast(priv->qp, mgid, mlid);
- up(&priv->mcast_mutex);
+ mutex_unlock(&priv->mcast_mutex);
if (ret)
ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);
@@ -81,9 +81,9 @@ int ipoib_mcast_detach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
- down(&priv->mcast_mutex);
+ mutex_lock(&priv->mcast_mutex);
ret = ib_detach_mcast(priv->qp, mgid, mlid);
- up(&priv->mcast_mutex);
+ mutex_unlock(&priv->mcast_mutex);
if (ret)
ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index d280b341a37..4ca175553f9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -63,7 +63,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
- down(&ppriv->vlan_mutex);
+ mutex_lock(&ppriv->vlan_mutex);
/*
* First ensure this isn't a duplicate. We check the parent device and
@@ -124,7 +124,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
list_add_tail(&priv->list, &ppriv->child_intfs);
- up(&ppriv->vlan_mutex);
+ mutex_unlock(&ppriv->vlan_mutex);
return 0;
@@ -139,7 +139,7 @@ device_init_failed:
free_netdev(priv->dev);
err:
- up(&ppriv->vlan_mutex);
+ mutex_unlock(&ppriv->vlan_mutex);
return result;
}
@@ -153,7 +153,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
- down(&ppriv->vlan_mutex);
+ mutex_lock(&ppriv->vlan_mutex);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey) {
unregister_netdev(priv->dev);
@@ -167,7 +167,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
break;
}
}
- up(&ppriv->vlan_mutex);
+ mutex_unlock(&ppriv->vlan_mutex);
return ret;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index ee9fe226ae9..31207e66414 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -39,6 +39,7 @@
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/random.h>
+#include <linux/jiffies.h>
#include <asm/atomic.h>
@@ -1515,8 +1516,7 @@ static ssize_t show_port(struct class_device *class_dev, char *buf)
static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
-static struct srp_host *srp_add_port(struct ib_device *device,
- __be64 node_guid, u8 port)
+static struct srp_host *srp_add_port(struct ib_device *device, u8 port)
{
struct srp_host *host;
@@ -1531,7 +1531,7 @@ static struct srp_host *srp_add_port(struct ib_device *device,
host->port = port;
host->initiator_port_id[7] = port;
- memcpy(host->initiator_port_id + 8, &node_guid, 8);
+ memcpy(host->initiator_port_id + 8, &device->node_guid, 8);
host->pd = ib_alloc_pd(device);
if (IS_ERR(host->pd))
@@ -1579,22 +1579,11 @@ static void srp_add_one(struct ib_device *device)
{
struct list_head *dev_list;
struct srp_host *host;
- struct ib_device_attr *dev_attr;
int s, e, p;
- dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
- if (!dev_attr)
- return;
-
- if (ib_query_device(device, dev_attr)) {
- printk(KERN_WARNING PFX "Couldn't query node GUID for %s.\n",
- device->name);
- goto out;
- }
-
dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
if (!dev_list)
- goto out;
+ return;
INIT_LIST_HEAD(dev_list);
@@ -1607,15 +1596,12 @@ static void srp_add_one(struct ib_device *device)
}
for (p = s; p <= e; ++p) {
- host = srp_add_port(device, dev_attr->node_guid, p);
+ host = srp_add_port(device, p);
if (host)
list_add_tail(&host->list, dev_list);
}
ib_set_client_data(device, &srp_client, dev_list);
-
-out:
- kfree(dev_attr);
}
static void srp_remove_one(struct ib_device *device)
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 0270d1ec942..745979f33dc 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -154,11 +154,13 @@ struct input_event_compat {
__s32 value;
};
+/* Note to the author of this code: did it ever occur to
+ you why the ifdefs are needed? Think about it again. -AK */
#ifdef CONFIG_X86_64
-# define COMPAT_TEST test_thread_flag(TIF_IA32)
+# define COMPAT_TEST is_compat_task()
#elif defined(CONFIG_IA64)
-# define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current))
-#elif defined(CONFIG_ARCH_S390)
+# define COMPAT_TEST IS_IA32_PROCESS(task_pt_regs(current))
+#elif defined(CONFIG_S390)
# define COMPAT_TEST test_thread_flag(TIF_31BIT)
#elif defined(CONFIG_MIPS)
# define COMPAT_TEST (current->thread.mflags & MF_32BIT_ADDR)
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index 8558a99f663..ec55a29fc86 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -64,8 +64,8 @@ static irqreturn_t amijoy_interrupt(int irq, void *dummy, struct pt_regs *fp)
if (amijoy[i]) {
switch (i) {
- case 0: data = ~custom.joy0dat; button = (~ciaa.pra >> 6) & 1; break;
- case 1: data = ~custom.joy1dat; button = (~ciaa.pra >> 7) & 1; break;
+ case 0: data = ~amiga_custom.joy0dat; button = (~ciaa.pra >> 6) & 1; break;
+ case 1: data = ~amiga_custom.joy1dat; button = (~ciaa.pra >> 7) & 1; break;
}
input_regs(amijoy_dev[i], fp);
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
index 64672d49122..e301ee4ca26 100644
--- a/drivers/input/keyboard/corgikbd.c
+++ b/drivers/input/keyboard/corgikbd.c
@@ -19,7 +19,6 @@
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <asm/irq.h>
#include <asm/arch/corgi.h>
#include <asm/arch/hardware.h>
@@ -343,10 +342,9 @@ static int __init corgikbd_probe(struct platform_device *pdev)
for (i = 0; i < CORGI_KEY_SENSE_NUM; i++) {
pxa_gpio_mode(CORGI_GPIO_KEY_SENSE(i) | GPIO_IN);
if (request_irq(CORGI_IRQ_GPIO_KEY_SENSE(i), corgikbd_interrupt,
- SA_INTERRUPT, "corgikbd", corgikbd))
+ SA_INTERRUPT | SA_TRIGGER_RISING,
+ "corgikbd", corgikbd))
printk(KERN_WARNING "corgikbd: Can't get IRQ: %d!\n", i);
- else
- set_irq_type(CORGI_IRQ_GPIO_KEY_SENSE(i),IRQT_RISING);
}
/* Set Strobe lines as outputs - set high */
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
index 6a15fe3bc52..83999d58312 100644
--- a/drivers/input/keyboard/spitzkbd.c
+++ b/drivers/input/keyboard/spitzkbd.c
@@ -19,7 +19,6 @@
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <asm/irq.h>
#include <asm/arch/spitz.h>
#include <asm/arch/hardware.h>
@@ -407,10 +406,9 @@ static int __init spitzkbd_probe(struct platform_device *dev)
for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++) {
pxa_gpio_mode(spitz_senses[i] | GPIO_IN);
if (request_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd_interrupt,
- SA_INTERRUPT, "Spitzkbd Sense", spitzkbd))
+ SA_INTERRUPT|SA_TRIGGER_RISING,
+ "Spitzkbd Sense", spitzkbd))
printk(KERN_WARNING "spitzkbd: Can't get Sense IRQ: %d!\n", i);
- else
- set_irq_type(IRQ_GPIO(spitz_senses[i]),IRQT_RISING);
}
/* Set Strobe lines as outputs - set high */
@@ -422,15 +420,18 @@ static int __init spitzkbd_probe(struct platform_device *dev)
pxa_gpio_mode(SPITZ_GPIO_SWA | GPIO_IN);
pxa_gpio_mode(SPITZ_GPIO_SWB | GPIO_IN);
- request_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd_interrupt, SA_INTERRUPT, "Spitzkbd Sync", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd_interrupt, SA_INTERRUPT, "Spitzkbd PwrOn", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd_hinge_isr, SA_INTERRUPT, "Spitzkbd SWA", spitzkbd);
- request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr, SA_INTERRUPT, "Spitzkbd SWB", spitzkbd);
-
- set_irq_type(SPITZ_IRQ_GPIO_SYNC, IRQT_BOTHEDGE);
- set_irq_type(SPITZ_IRQ_GPIO_ON_KEY, IRQT_BOTHEDGE);
- set_irq_type(SPITZ_IRQ_GPIO_SWA, IRQT_BOTHEDGE);
- set_irq_type(SPITZ_IRQ_GPIO_SWB, IRQT_BOTHEDGE);
+ request_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd_interrupt,
+ SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
+ "Spitzkbd Sync", spitzkbd);
+ request_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd_interrupt,
+ SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
+ "Spitzkbd PwrOn", spitzkbd);
+ request_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd_hinge_isr,
+ SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
+ "Spitzkbd SWA", spitzkbd);
+ request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr,
+ SA_INTERRUPT | SA_TRIGGER_RISING | SA_TRIGGER_FALLING,
+ "Spitzkbd SWB", spitzkbd);
printk(KERN_INFO "input: Spitz Keyboard Registered\n");
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 1cd7657f7e4..1be963961c1 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -60,8 +60,6 @@ static struct fasync_struct *hp_sdc_rtc_async_queue;
static DECLARE_WAIT_QUEUE_HEAD(hp_sdc_rtc_wait);
-static loff_t hp_sdc_rtc_llseek(struct file *file, loff_t offset, int origin);
-
static ssize_t hp_sdc_rtc_read(struct file *file, char *buf,
size_t count, loff_t *ppos);
@@ -387,11 +385,6 @@ static int hp_sdc_rtc_set_i8042timer (struct timeval *setto, uint8_t setcmd)
return 0;
}
-static loff_t hp_sdc_rtc_llseek(struct file *file, loff_t offset, int origin)
-{
- return -ESPIPE;
-}
-
static ssize_t hp_sdc_rtc_read(struct file *file, char *buf,
size_t count, loff_t *ppos) {
ssize_t retval;
@@ -679,7 +672,7 @@ static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
static struct file_operations hp_sdc_rtc_fops = {
.owner = THIS_MODULE,
- .llseek = hp_sdc_rtc_llseek,
+ .llseek = no_llseek,
.read = hp_sdc_rtc_read,
.poll = hp_sdc_rtc_poll,
.ioctl = hp_sdc_rtc_ioctl,
diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c
index d13d4c8fe3c..c8b2cc9f184 100644
--- a/drivers/input/mouse/amimouse.c
+++ b/drivers/input/mouse/amimouse.c
@@ -41,7 +41,7 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy, struct pt_regs *fp)
unsigned short joy0dat, potgor;
int nx, ny, dx, dy;
- joy0dat = custom.joy0dat;
+ joy0dat = amiga_custom.joy0dat;
nx = joy0dat & 0xff;
ny = joy0dat >> 8;
@@ -57,7 +57,7 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy, struct pt_regs *fp)
amimouse_lastx = nx;
amimouse_lasty = ny;
- potgor = custom.potgor;
+ potgor = amiga_custom.potgor;
input_regs(amimouse_dev, fp);
@@ -77,7 +77,7 @@ static int amimouse_open(struct input_dev *dev)
{
unsigned short joy0dat;
- joy0dat = custom.joy0dat;
+ joy0dat = amiga_custom.joy0dat;
amimouse_lastx = joy0dat & 0xff;
amimouse_lasty = joy0dat >> 8;
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 9b1ab5e7a98..3df5eedf8f3 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -19,12 +19,12 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/kmi.h>
+#include <linux/clk.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/hardware/amba.h>
-#include <asm/hardware/amba_kmi.h>
-#include <asm/hardware/clock.h>
#define KMI_BASE (kmi->base)
@@ -72,13 +72,9 @@ static int amba_kmi_open(struct serio *io)
unsigned int divisor;
int ret;
- ret = clk_use(kmi->clk);
- if (ret)
- goto out;
-
ret = clk_enable(kmi->clk);
if (ret)
- goto clk_unuse;
+ goto out;
divisor = clk_get_rate(kmi->clk) / 8000000 - 1;
writeb(divisor, KMICLKDIV);
@@ -97,8 +93,6 @@ static int amba_kmi_open(struct serio *io)
clk_disable:
clk_disable(kmi->clk);
- clk_unuse:
- clk_unuse(kmi->clk);
out:
return ret;
}
@@ -111,7 +105,6 @@ static void amba_kmi_close(struct serio *io)
free_irq(kmi->irq, kmi);
clk_disable(kmi->clk);
- clk_unuse(kmi->clk);
}
static int amba_kmi_probe(struct amba_device *dev, void *id)
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index 3f0df3330fb..ebd9976fc81 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -20,7 +20,6 @@
#include <linux/spinlock.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/system.h>
#include <asm/hardware/sa1111.h>
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 1bd88fca054..54a680cc704 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -96,6 +96,7 @@ static int serport_ldisc_open(struct tty_struct *tty)
init_waitqueue_head(&serport->wait);
tty->disc_data = serport;
+ tty->receive_room = 256;
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
return 0;
@@ -140,17 +141,6 @@ out:
}
/*
- * serport_ldisc_room() reports how much room we do have for receiving data.
- * Although we in fact have infinite room, we need to specify some value
- * here, and 256 seems to be reasonable.
- */
-
-static int serport_ldisc_room(struct tty_struct *tty)
-{
- return 256;
-}
-
-/*
* serport_ldisc_read() just waits indefinitely if everything goes well.
* However, when the serio driver closes the serio port, it finishes,
* returning 0 characters.
@@ -237,7 +227,6 @@ static struct tty_ldisc serport_ldisc = {
.read = serport_ldisc_read,
.ioctl = serport_ldisc_ioctl,
.receive_buf = serport_ldisc_receive,
- .receive_room = serport_ldisc_room,
.write_wakeup = serport_ldisc_write_wakeup
};
diff --git a/drivers/isdn/act2000/act2000.h b/drivers/isdn/act2000/act2000.h
index b091d1a5412..d4c50512a1f 100644
--- a/drivers/isdn/act2000/act2000.h
+++ b/drivers/isdn/act2000/act2000.h
@@ -181,17 +181,17 @@ typedef struct act2000_card {
char regname[35]; /* Name used for request_region */
} act2000_card;
-extern __inline__ void act2000_schedule_tx(act2000_card *card)
+static inline void act2000_schedule_tx(act2000_card *card)
{
schedule_work(&card->snd_tq);
}
-extern __inline__ void act2000_schedule_rx(act2000_card *card)
+static inline void act2000_schedule_rx(act2000_card *card)
{
schedule_work(&card->rcv_tq);
}
-extern __inline__ void act2000_schedule_poll(act2000_card *card)
+static inline void act2000_schedule_poll(act2000_card *card)
{
schedule_work(&card->poll_tq);
}
diff --git a/drivers/isdn/act2000/capi.h b/drivers/isdn/act2000/capi.h
index f6d5f530b86..49f453c53c6 100644
--- a/drivers/isdn/act2000/capi.h
+++ b/drivers/isdn/act2000/capi.h
@@ -78,29 +78,29 @@ typedef union actcapi_infoel { /* info element */
typedef struct actcapi_msn {
__u8 eaz;
__u8 len; /* Length of MSN */
- __u8 msn[15] __attribute__ ((packed));
-} actcapi_msn;
+ __u8 msn[15];
+} __attribute__((packed)) actcapi_msn;
typedef struct actcapi_dlpd {
__u8 len; /* Length of structure */
- __u16 dlen __attribute__ ((packed)); /* Data Length */
- __u8 laa __attribute__ ((packed)); /* Link Address A */
+ __u16 dlen; /* Data Length */
+ __u8 laa; /* Link Address A */
__u8 lab; /* Link Address B */
__u8 modulo; /* Modulo Mode */
__u8 win; /* Window size */
__u8 xid[100]; /* XID Information */
-} actcapi_dlpd;
+} __attribute__((packed)) actcapi_dlpd;
typedef struct actcapi_ncpd {
__u8 len; /* Length of structure */
- __u16 lic __attribute__ ((packed));
- __u16 hic __attribute__ ((packed));
- __u16 ltc __attribute__ ((packed));
- __u16 htc __attribute__ ((packed));
- __u16 loc __attribute__ ((packed));
- __u16 hoc __attribute__ ((packed));
- __u8 modulo __attribute__ ((packed));
-} actcapi_ncpd;
+ __u16 lic;
+ __u16 hic;
+ __u16 ltc;
+ __u16 htc;
+ __u16 loc;
+ __u16 hoc;
+ __u8 modulo;
+} __attribute__((packed)) actcapi_ncpd;
#define actcapi_ncpi actcapi_ncpd
/*
@@ -168,19 +168,19 @@ typedef struct actcapi_msg {
__u16 manuf_msg;
__u16 controller;
actcapi_msn msnmap;
- } manufacturer_req_msn;
+ } __attribute ((packed)) manufacturer_req_msn;
/* TODO: TraceInit-req/conf/ind/resp and
* TraceDump-req/conf/ind/resp
*/
struct connect_req {
__u8 controller;
__u8 bchan;
- __u32 infomask __attribute__ ((packed));
+ __u32 infomask;
__u8 si1;
__u8 si2;
__u8 eaz;
actcapi_addr addr;
- } connect_req;
+ } __attribute__ ((packed)) connect_req;
struct connect_conf {
__u16 plci;
__u16 info;
@@ -192,7 +192,7 @@ typedef struct actcapi_msg {
__u8 si2;
__u8 eaz;
actcapi_addr addr;
- } connect_ind;
+ } __attribute__ ((packed)) connect_ind;
struct connect_resp {
__u16 plci;
__u8 rejectcause;
@@ -200,14 +200,14 @@ typedef struct actcapi_msg {
struct connect_active_ind {
__u16 plci;
actcapi_addr addr;
- } connect_active_ind;
+ } __attribute__ ((packed)) connect_active_ind;
struct connect_active_resp {
__u16 plci;
} connect_active_resp;
struct connect_b3_req {
__u16 plci;
actcapi_ncpi ncpi;
- } connect_b3_req;
+ } __attribute__ ((packed)) connect_b3_req;
struct connect_b3_conf {
__u16 plci;
__u16 ncci;
@@ -217,12 +217,12 @@ typedef struct actcapi_msg {
__u16 ncci;
__u16 plci;
actcapi_ncpi ncpi;
- } connect_b3_ind;
+ } __attribute__ ((packed)) connect_b3_ind;
struct connect_b3_resp {
__u16 ncci;
__u8 rejectcause;
- actcapi_ncpi ncpi __attribute__ ((packed));
- } connect_b3_resp;
+ actcapi_ncpi ncpi;
+ } __attribute__ ((packed)) connect_b3_resp;
struct disconnect_req {
__u16 plci;
__u8 cause;
@@ -241,14 +241,14 @@ typedef struct actcapi_msg {
struct connect_b3_active_ind {
__u16 ncci;
actcapi_ncpi ncpi;
- } connect_b3_active_ind;
+ } __attribute__ ((packed)) connect_b3_active_ind;
struct connect_b3_active_resp {
__u16 ncci;
} connect_b3_active_resp;
struct disconnect_b3_req {
__u16 ncci;
actcapi_ncpi ncpi;
- } disconnect_b3_req;
+ } __attribute__ ((packed)) disconnect_b3_req;
struct disconnect_b3_conf {
__u16 ncci;
__u16 info;
@@ -257,7 +257,7 @@ typedef struct actcapi_msg {
__u16 ncci;
__u16 info;
actcapi_ncpi ncpi;
- } disconnect_b3_ind;
+ } __attribute__ ((packed)) disconnect_b3_ind;
struct disconnect_b3_resp {
__u16 ncci;
} disconnect_b3_resp;
@@ -265,7 +265,7 @@ typedef struct actcapi_msg {
__u16 plci;
actcapi_infonr nr;
actcapi_infoel el;
- } info_ind;
+ } __attribute__ ((packed)) info_ind;
struct info_resp {
__u16 plci;
} info_resp;
@@ -279,8 +279,8 @@ typedef struct actcapi_msg {
struct select_b2_protocol_req {
__u16 plci;
__u8 protocol;
- actcapi_dlpd dlpd __attribute__ ((packed));
- } select_b2_protocol_req;
+ actcapi_dlpd dlpd;
+ } __attribute__ ((packed)) select_b2_protocol_req;
struct select_b2_protocol_conf {
__u16 plci;
__u16 info;
@@ -288,49 +288,49 @@ typedef struct actcapi_msg {
struct select_b3_protocol_req {
__u16 plci;
__u8 protocol;
- actcapi_ncpd ncpd __attribute__ ((packed));
- } select_b3_protocol_req;
+ actcapi_ncpd ncpd;
+ } __attribute__ ((packed)) select_b3_protocol_req;
struct select_b3_protocol_conf {
__u16 plci;
__u16 info;
} select_b3_protocol_conf;
struct listen_req {
__u8 controller;
- __u32 infomask __attribute__ ((packed));
- __u16 eazmask __attribute__ ((packed));
- __u16 simask __attribute__ ((packed));
- } listen_req;
+ __u32 infomask;
+ __u16 eazmask;
+ __u16 simask;
+ } __attribute__ ((packed)) listen_req;
struct listen_conf {
__u8 controller;
- __u16 info __attribute__ ((packed));
- } listen_conf;
+ __u16 info;
+ } __attribute__ ((packed)) listen_conf;
struct data_b3_req {
__u16 fakencci;
__u16 datalen;
__u32 unused;
__u8 blocknr;
- __u16 flags __attribute__ ((packed));
- } data_b3_req;
+ __u16 flags;
+ } __attribute ((packed)) data_b3_req;
struct data_b3_ind {
__u16 fakencci;
__u16 datalen;
__u32 unused;
__u8 blocknr;
- __u16 flags __attribute__ ((packed));
- } data_b3_ind;
+ __u16 flags;
+ } __attribute__ ((packed)) data_b3_ind;
struct data_b3_resp {
__u16 ncci;
__u8 blocknr;
- } data_b3_resp;
+ } __attribute__ ((packed)) data_b3_resp;
struct data_b3_conf {
__u16 ncci;
__u8 blocknr;
- __u16 info __attribute__ ((packed));
- } data_b3_conf;
+ __u16 info;
+ } __attribute__ ((packed)) data_b3_conf;
} msg;
-} actcapi_msg;
+} __attribute__ ((packed)) actcapi_msg;
-extern __inline__ unsigned short
+static inline unsigned short
actcapi_nextsmsg(act2000_card *card)
{
unsigned long flags;
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 11ae0fddea0..623adbb0d13 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -463,8 +463,7 @@ static int handle_recv_skb(struct capiminor *mp, struct sk_buff *skb)
#endif
goto bad;
}
- if (ld->receive_room &&
- ld->receive_room(mp->tty) < datalen) {
+ if (mp->tty->receive_room < datalen) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
printk(KERN_DEBUG "capi: no room in tty\n");
#endif
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index 7b564c0dd99..0a37aded4b5 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -17,6 +17,8 @@
#include <linux/ctype.h>
#include <linux/sched.h> /* current */
+#include "capifs.h"
+
MODULE_DESCRIPTION("CAPI4Linux: /dev/capi/ filesystem");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
@@ -136,7 +138,7 @@ static struct dentry *get_node(int num)
{
char s[10];
struct dentry *root = capifs_root;
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
return lookup_one_len(s, root, sprintf(s, "%d", num));
}
@@ -157,7 +159,7 @@ void capifs_new_ncci(unsigned int number, dev_t device)
dentry = get_node(number);
if (!IS_ERR(dentry) && !dentry->d_inode)
d_instantiate(dentry, inode);
- up(&capifs_root->d_inode->i_sem);
+ mutex_unlock(&capifs_root->d_inode->i_mutex);
}
void capifs_free_ncci(unsigned int number)
@@ -173,7 +175,7 @@ void capifs_free_ncci(unsigned int number)
}
dput(dentry);
}
- up(&capifs_root->d_inode->i_sem);
+ mutex_unlock(&capifs_root->d_inode->i_mutex);
}
static int __init capifs_init(void)
diff --git a/drivers/isdn/hardware/eicon/os_4bri.c b/drivers/isdn/hardware/eicon/os_4bri.c
index cccfabc1117..11e6f937c1e 100644
--- a/drivers/isdn/hardware/eicon/os_4bri.c
+++ b/drivers/isdn/hardware/eicon/os_4bri.c
@@ -16,6 +16,7 @@
#include "diva_pci.h"
#include "mi_pc.h"
#include "dsrv4bri.h"
+#include "helpers.h"
static void *diva_xdiLoadFileFile = NULL;
static dword diva_xdiLoadFileLength = 0;
@@ -815,7 +816,7 @@ diva_4bri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
return (ret);
}
-void *xdiLoadFile(char *FileName, unsigned long *FileLength,
+void *xdiLoadFile(char *FileName, dword *FileLength,
unsigned long lim)
{
void *ret = diva_xdiLoadFileFile;
diff --git a/drivers/isdn/hardware/eicon/os_bri.c b/drivers/isdn/hardware/eicon/os_bri.c
index 4cc44a5dd1d..f31bba5b16f 100644
--- a/drivers/isdn/hardware/eicon/os_bri.c
+++ b/drivers/isdn/hardware/eicon/os_bri.c
@@ -16,6 +16,7 @@
#include "diva_pci.h"
#include "mi_pc.h"
#include "pc_maint.h"
+#include "dsrv_bri.h"
/*
** IMPORTS
diff --git a/drivers/isdn/hardware/eicon/os_pri.c b/drivers/isdn/hardware/eicon/os_pri.c
index 8ac207f75e5..a296a846f29 100644
--- a/drivers/isdn/hardware/eicon/os_pri.c
+++ b/drivers/isdn/hardware/eicon/os_pri.c
@@ -18,6 +18,7 @@
#include "pc_maint.h"
#include "dsp_tst.h"
#include "diva_dma.h"
+#include "dsrv_pri.h"
/* --------------------------------------------------------------------------
OS Dependent part of XDI driver for DIVA PRI Adapter
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index c82105920d7..0ef560144be 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -110,7 +110,7 @@ config HISAX_16_3
config HISAX_TELESPCI
bool "Teles PCI"
- depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
+ depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || FRV))
help
This enables HiSax support for the Teles PCI.
See <file:Documentation/isdn/README.HiSax> on how to configure it.
@@ -238,7 +238,7 @@ config HISAX_MIC
config HISAX_NETJET
bool "NETjet card"
- depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
+ depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || FRV))
help
This enables HiSax support for the NetJet from Traverse
Technologies.
@@ -249,7 +249,7 @@ config HISAX_NETJET
config HISAX_NETJET_U
bool "NETspider U card"
- depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
+ depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || FRV))
help
This enables HiSax support for the Netspider U interface ISDN card
from Traverse Technologies.
@@ -317,7 +317,7 @@ config HISAX_GAZEL
config HISAX_HFC_PCI
bool "HFC PCI-Bus cards"
- depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
+ depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || FRV))
help
This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
@@ -344,7 +344,7 @@ config HISAX_HFC_SX
config HISAX_ENTERNOW_PCI
bool "Formula-n enter:now PCI card"
- depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
+ depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || FRV))
help
This enables HiSax support for the Formula-n enter:now PCI
ISDN card.
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 26c545fa223..1b85ce166af 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -396,17 +396,17 @@ struct isar_hw {
struct hdlc_stat_reg {
#ifdef __BIG_ENDIAN
- u_char fill __attribute__((packed));
- u_char mode __attribute__((packed));
- u_char xml __attribute__((packed));
- u_char cmd __attribute__((packed));
+ u_char fill;
+ u_char mode;
+ u_char xml;
+ u_char cmd;
#else
- u_char cmd __attribute__((packed));
- u_char xml __attribute__((packed));
- u_char mode __attribute__((packed));
- u_char fill __attribute__((packed));
+ u_char cmd;
+ u_char xml;
+ u_char mode;
+ u_char fill;
#endif
-};
+} __attribute__((packed));
struct hdlc_hw {
union {
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.h b/drivers/isdn/hisax/hisax_fcpcipnp.h
index bd8a22e4d6a..21fbcedf3a9 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.h
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.h
@@ -12,17 +12,17 @@ enum {
struct hdlc_stat_reg {
#ifdef __BIG_ENDIAN
- u_char fill __attribute__((packed));
- u_char mode __attribute__((packed));
- u_char xml __attribute__((packed));
- u_char cmd __attribute__((packed));
+ u_char fill;
+ u_char mode;
+ u_char xml;
+ u_char cmd;
#else
- u_char cmd __attribute__((packed));
- u_char xml __attribute__((packed));
- u_char mode __attribute__((packed));
- u_char fill __attribute__((packed));
+ u_char cmd;
+ u_char xml;
+ u_char mode;
+ u_char fill;
#endif
-};
+} __attribute__((packed));
struct fritz_bcs {
struct hisax_b_if b_if;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 4643df097bf..22759c01746 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -857,6 +857,118 @@ isdn_readbchan(int di, int channel, u_char * buf, u_char * fp, int len, wait_que
return count;
}
+/*
+ * isdn_readbchan_tty() tries to get data from the read-queue.
+ * It MUST be called with interrupts off.
+ *
+ * Be aware that this is not an atomic operation when sleep != 0, even though
+ * interrupts are turned off! Well, like that we are currently only called
+ * on behalf of a read system call on raw device files (which are documented
+ * to be dangerous and for for debugging purpose only). The inode semaphore
+ * takes care that this is not called for the same minor device number while
+ * we are sleeping, but access is not serialized against simultaneous read()
+ * from the corresponding ttyI device. Can other ugly events, like changes
+ * of the mapping (di,ch)<->minor, happen during the sleep? --he
+ */
+int
+isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
+{
+ int count;
+ int count_pull;
+ int count_put;
+ int dflag;
+ struct sk_buff *skb;
+ char last = 0;
+ int len;
+
+ if (!dev->drv[di])
+ return 0;
+ if (skb_queue_empty(&dev->drv[di]->rpqueue[channel]))
+ return 0;
+
+ len = tty_buffer_request_room(tty, dev->drv[di]->rcvcount[channel]);
+ if(len == 0)
+ return len;
+
+ count = 0;
+ while (len) {
+ if (!(skb = skb_peek(&dev->drv[di]->rpqueue[channel])))
+ break;
+#ifdef CONFIG_ISDN_AUDIO
+ if (ISDN_AUDIO_SKB_LOCK(skb))
+ break;
+ ISDN_AUDIO_SKB_LOCK(skb) = 1;
+ if ((ISDN_AUDIO_SKB_DLECOUNT(skb)) || (dev->drv[di]->DLEflag & (1 << channel))) {
+ char *p = skb->data;
+ unsigned long DLEmask = (1 << channel);
+
+ dflag = 0;
+ count_pull = count_put = 0;
+ while ((count_pull < skb->len) && (len > 0)) {
+ len--;
+ if (dev->drv[di]->DLEflag & DLEmask) {
+ last = DLE;
+ dev->drv[di]->DLEflag &= ~DLEmask;
+ } else {
+ last = *p;
+ if (last == DLE) {
+ dev->drv[di]->DLEflag |= DLEmask;
+ (ISDN_AUDIO_SKB_DLECOUNT(skb))--;
+ }
+ p++;
+ count_pull++;
+ }
+ count_put++;
+ }
+ if (count_pull >= skb->len)
+ dflag = 1;
+ } else {
+#endif
+ /* No DLE's in buff, so simply copy it */
+ dflag = 1;
+ if ((count_pull = skb->len) > len) {
+ count_pull = len;
+ dflag = 0;
+ }
+ count_put = count_pull;
+ if(count_put > 1)
+ tty_insert_flip_string(tty, skb->data, count_put - 1);
+ last = skb->data[count_put] - 1;
+ len -= count_put;
+#ifdef CONFIG_ISDN_AUDIO
+ }
+#endif
+ count += count_put;
+ if (dflag) {
+ /* We got all the data in this buff.
+ * Now we can dequeue it.
+ */
+ if(cisco_hack)
+ tty_insert_flip_char(tty, last, 0xFF);
+ else
+ tty_insert_flip_char(tty, last, TTY_NORMAL);
+#ifdef CONFIG_ISDN_AUDIO
+ ISDN_AUDIO_SKB_LOCK(skb) = 0;
+#endif
+ skb = skb_dequeue(&dev->drv[di]->rpqueue[channel]);
+ dev_kfree_skb(skb);
+ } else {
+ tty_insert_flip_char(tty, last, TTY_NORMAL);
+ /* Not yet emptied this buff, so it
+ * must stay in the queue, for further calls
+ * but we pull off the data we got until now.
+ */
+ skb_pull(skb, count_pull);
+#ifdef CONFIG_ISDN_AUDIO
+ ISDN_AUDIO_SKB_LOCK(skb) = 0;
+#endif
+ }
+ dev->drv[di]->rcvcount[channel] -= count_put;
+ }
+ return count;
+}
+
+
static __inline int
isdn_minor2drv(int minor)
{
diff --git a/drivers/isdn/i4l/isdn_common.h b/drivers/isdn/i4l/isdn_common.h
index e27e9c3a81e..082735dbb41 100644
--- a/drivers/isdn/i4l/isdn_common.h
+++ b/drivers/isdn/i4l/isdn_common.h
@@ -37,6 +37,7 @@ extern void isdn_timer_ctrl(int tf, int onoff);
extern void isdn_unexclusive_channel(int di, int ch);
extern int isdn_getnum(char **);
extern int isdn_readbchan(int, int, u_char *, u_char *, int, wait_queue_head_t *);
+extern int isdn_readbchan_tty(int, int, struct tty_struct *, int);
extern int isdn_get_free_channel(int, int, int, int, int, char *);
extern int isdn_writebuf_skb_stub(int, int, int, struct sk_buff *);
extern int register_isdn(isdn_if * i);
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 8c404b4e248..f190a99604f 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -64,37 +64,42 @@ isdn_tty_try_read(modem_info * info, struct sk_buff *skb)
int c;
int len;
struct tty_struct *tty;
+ char last;
if (info->online) {
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
- c = TTY_FLIPBUF_SIZE - tty->flip.count;
len = skb->len
#ifdef CONFIG_ISDN_AUDIO
+ ISDN_AUDIO_SKB_DLECOUNT(skb)
#endif
;
+
+ c = tty_buffer_request_room(tty, len);
if (c >= len) {
#ifdef CONFIG_ISDN_AUDIO
- if (ISDN_AUDIO_SKB_DLECOUNT(skb))
- while (skb->len--) {
+ if (ISDN_AUDIO_SKB_DLECOUNT(skb)) {
+ int l = skb->len;
+ unsigned char *dp = skb->data;
+ while (--l) {
if (*skb->data == DLE)
tty_insert_flip_char(tty, DLE, 0);
- tty_insert_flip_char(tty, *skb->data++, 0);
+ tty_insert_flip_char(tty, *dp++, 0);
+ }
+ last = *dp;
} else {
#endif
- memcpy(tty->flip.char_buf_ptr,
- skb->data, len);
- tty->flip.count += len;
- tty->flip.char_buf_ptr += len;
- memset(tty->flip.flag_buf_ptr, 0, len);
- tty->flip.flag_buf_ptr += len;
+ if(len > 1)
+ tty_insert_flip_string(tty, skb->data, len - 1);
+ last = skb->data[len - 1];
#ifdef CONFIG_ISDN_AUDIO
}
#endif
if (info->emu.mdmreg[REG_CPPP] & BIT_CPPP)
- tty->flip.flag_buf_ptr[len - 1] = 0xff;
- schedule_delayed_work(&tty->flip.work, 1);
+ tty_insert_flip_char(tty, last, 0xFF);
+ else
+ tty_insert_flip_char(tty, last, TTY_NORMAL);
+ tty_flip_buffer_push(tty);
kfree_skb(skb);
return 1;
}
@@ -114,7 +119,6 @@ isdn_tty_readmodem(void)
int resched = 0;
int midx;
int i;
- int c;
int r;
struct tty_struct *tty;
modem_info *info;
@@ -131,20 +135,13 @@ isdn_tty_readmodem(void)
#endif
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
- c = TTY_FLIPBUF_SIZE - tty->flip.count;
- if (c > 0) {
- r = isdn_readbchan(info->isdn_driver, info->isdn_channel,
- tty->flip.char_buf_ptr,
- tty->flip.flag_buf_ptr, c, NULL);
- /* CISCO AsyncPPP Hack */
- if (!(info->emu.mdmreg[REG_CPPP] & BIT_CPPP))
- memset(tty->flip.flag_buf_ptr, 0, r);
- tty->flip.count += r;
- tty->flip.flag_buf_ptr += r;
- tty->flip.char_buf_ptr += r;
- if (r)
- schedule_delayed_work(&tty->flip.work, 1);
- }
+ /* CISCO AsyncPPP Hack */
+ if (!(info->emu.mdmreg[REG_CPPP] & BIT_CPPP))
+ r = isdn_readbchan_tty(info->isdn_driver, info->isdn_channel, tty, 0);
+ else
+ r = isdn_readbchan_tty(info->isdn_driver, info->isdn_channel, tty, 1);
+ if (r)
+ tty_flip_buffer_push(tty);
} else
r = 1;
} else
@@ -249,7 +246,7 @@ isdn_tty_rcv_skb(int i, int di, int channel, struct sk_buff *skb)
}
#endif
#endif
- /* Try to deliver directly via tty-flip-buf if queue is empty */
+ /* Try to deliver directly via tty-buf if queue is empty */
spin_lock_irqsave(&info->readlock, flags);
if (skb_queue_empty(&dev->drv[di]->rpqueue[channel]))
if (isdn_tty_try_read(info, skb)) {
@@ -534,7 +531,7 @@ isdn_tty_senddown(modem_info * info)
/* The next routine is called once from within timer-interrupt
* triggered within isdn_tty_modem_ncarrier(). It calls
* isdn_tty_modem_result() to stuff a "NO CARRIER" Message
- * into the tty's flip-buffer.
+ * into the tty's buffer.
*/
static void
isdn_tty_modem_do_ncarrier(unsigned long data)
@@ -2347,6 +2344,7 @@ isdn_tty_at_cout(char *msg, modem_info * info)
u_long flags;
struct sk_buff *skb = NULL;
char *sp = NULL;
+ int l = strlen(msg);
if (!msg) {
printk(KERN_WARNING "isdn_tty: Null-Message in isdn_tty_at_cout\n");
@@ -2359,16 +2357,16 @@ isdn_tty_at_cout(char *msg, modem_info * info)
return;
}
- /* use queue instead of direct flip, if online and */
- /* data is in queue or flip buffer is full */
- if ((info->online) && (((tty->flip.count + strlen(msg)) >= TTY_FLIPBUF_SIZE) ||
- (!skb_queue_empty(&dev->drv[info->isdn_driver]->rpqueue[info->isdn_channel])))) {
- skb = alloc_skb(strlen(msg), GFP_ATOMIC);
+ /* use queue instead of direct, if online and */
+ /* data is in queue or buffer is full */
+ if ((info->online && tty_buffer_request_room(tty, l) < l) ||
+ (!skb_queue_empty(&dev->drv[info->isdn_driver]->rpqueue[info->isdn_channel]))) {
+ skb = alloc_skb(l, GFP_ATOMIC);
if (!skb) {
spin_unlock_irqrestore(&info->readlock, flags);
return;
}
- sp = skb_put(skb, strlen(msg));
+ sp = skb_put(skb, l);
#ifdef CONFIG_ISDN_AUDIO
ISDN_AUDIO_SKB_DLECOUNT(skb) = 0;
ISDN_AUDIO_SKB_LOCK(skb) = 0;
@@ -2392,9 +2390,8 @@ isdn_tty_at_cout(char *msg, modem_info * info)
if (skb) {
*sp++ = c;
} else {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
+ if(tty_insert_flip_char(tty, c, TTY_NORMAL) == 0)
break;
- tty_insert_flip_char(tty, c, 0);
}
}
if (skb) {
@@ -2402,12 +2399,12 @@ isdn_tty_at_cout(char *msg, modem_info * info)
dev->drv[info->isdn_driver]->rcvcount[info->isdn_channel] += skb->len;
spin_unlock_irqrestore(&info->readlock, flags);
/* Schedule dequeuing */
- if ((dev->modempoll) && (info->rcvsched))
+ if (dev->modempoll && info->rcvsched)
isdn_timer_ctrl(ISDN_TIMER_MODEMREAD, 1);
} else {
spin_unlock_irqrestore(&info->readlock, flags);
- schedule_delayed_work(&tty->flip.work, 1);
+ tty_flip_buffer_push(tty);
}
}
diff --git a/drivers/isdn/sc/command.c b/drivers/isdn/sc/command.c
index 19f2fcf0ae4..b4b24335f71 100644
--- a/drivers/isdn/sc/command.c
+++ b/drivers/isdn/sc/command.c
@@ -43,7 +43,6 @@ extern int send_and_receive(int, unsigned int, unsigned char, unsigned char,
RspMessage *, int);
extern int sendmessage(int, unsigned int, unsigned int, unsigned int,
unsigned int, unsigned int, unsigned int, unsigned int *);
-extern inline void pullphone(char *, char *);
#ifdef DEBUG
/*
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index a0ea44c3e8b..7d4a0ac28c0 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -149,14 +149,14 @@ config MAC_EMUMOUSEBTN
config THERM_WINDTUNNEL
tristate "Support for thermal management on Windtunnel G4s"
- depends on I2C && I2C_KEYWEST && PPC_PMAC && !PPC_PMAC64
+ depends on I2C && I2C_POWERMAC && PPC_PMAC && !PPC_PMAC64
help
This driver provides some thermostat and fan control for the desktop
G4 "Windtunnel"
config THERM_ADT746X
tristate "Support for thermal mgmnt on laptops with ADT 746x chipset"
- depends on I2C && I2C_KEYWEST && PPC_PMAC && !PPC_PMAC64
+ depends on I2C && I2C_POWERMAC && PPC_PMAC && !PPC_PMAC64
help
This driver provides some thermostat and fan control for the
iBook G4, and the ATI based aluminium PowerBooks, allowing slighlty
@@ -164,7 +164,7 @@ config THERM_ADT746X
config THERM_PM72
tristate "Support for thermal management on PowerMac G5"
- depends on I2C && I2C_KEYWEST && PPC_PMAC64
+ depends on I2C && I2C_POWERMAC && PPC_PMAC64
help
This driver provides thermostat and fan control for the desktop
G5 machines.
@@ -175,14 +175,14 @@ config WINDFARM
config WINDFARM_PM81
tristate "Support for thermal management on iMac G5"
depends on WINDFARM && I2C && CPU_FREQ_PMAC64 && PMAC_SMU
- select I2C_PMAC_SMU
+ select I2C_POWERMAC
help
This driver provides thermal control for the iMacG5
config WINDFARM_PM91
tristate "Support for thermal management on PowerMac9,1"
depends on WINDFARM && I2C && CPU_FREQ_PMAC64 && PMAC_SMU
- select I2C_PMAC_SMU
+ select I2C_POWERMAC
help
This driver provides thermal control for the PowerMac9,1
which is the recent (SMU based) single CPU desktop G5
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index 71aeb912ec6..d56d400b6aa 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -239,7 +239,7 @@ static int adb_iop_write(struct adb_request *req)
local_irq_save(flags);
- req->next = 0;
+ req->next = NULL;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c
index cf6a6f2248a..314fc0830d9 100644
--- a/drivers/macintosh/macio-adb.c
+++ b/drivers/macintosh/macio-adb.c
@@ -17,6 +17,7 @@
#include <asm/irq.h>
#include <asm/system.h>
#include <linux/init.h>
+#include <linux/ioport.h>
struct preg {
unsigned char r;
@@ -88,24 +89,26 @@ int macio_probe(void)
int macio_init(void)
{
struct device_node *adbs;
+ struct resource r;
adbs = find_compatible_devices("adb", "chrp,adb0");
if (adbs == 0)
return -ENXIO;
#if 0
- { int i;
+ { int i = 0;
printk("macio_adb_init: node = %p, addrs =", adbs->node);
- for (i = 0; i < adbs->n_addrs; ++i)
- printk(" %x(%x)", adbs->addrs[i].address, adbs->addrs[i].size);
+ while(!of_address_to_resource(adbs, i, &r))
+ printk(" %x(%x)", r.start, r.end - r.start);
printk(", intrs =");
for (i = 0; i < adbs->n_intrs; ++i)
printk(" %x", adbs->intrs[i].line);
printk("\n"); }
#endif
-
- adb = ioremap(adbs->addrs->address, sizeof(struct adb_regs));
+ if (of_address_to_resource(adbs, 0, &r))
+ return -ENXIO;
+ adb = ioremap(r.start, sizeof(struct adb_regs));
out_8(&adb->ctrl.r, 0);
out_8(&adb->intr.r, 0);
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 228e1852a83..2a545ceb523 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -3,6 +3,13 @@
* a MacIO ASIC. Interface to new driver model mostly
* stolen from the PCI version.
*
+ * Copyright (C) 2005 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
* TODO:
*
* - Don't probe below media bay by default, but instead provide
@@ -218,12 +225,14 @@ postcore_initcall(macio_bus_driver_init);
/**
- * macio_release_dev - free a macio device structure when all users of it are finished.
+ * macio_release_dev - free a macio device structure when all users of it are
+ * finished.
* @dev: device that's been disconnected
*
- * Will be called only by the device core when all users of this macio device are
- * done. This currently means never as we don't hot remove any macio device yet,
- * though that will happen with mediabay based devices in a later implementation.
+ * Will be called only by the device core when all users of this macio device
+ * are done. This currently means never as we don't hot remove any macio
+ * device yet, though that will happen with mediabay based devices in a later
+ * implementation.
*/
static void macio_release_dev(struct device *dev)
{
@@ -242,49 +251,114 @@ static void macio_release_dev(struct device *dev)
* If this routine returns non-null, then the resource is completely
* skipped.
*/
-static int macio_resource_quirks(struct device_node *np, struct resource *res, int index)
+static int macio_resource_quirks(struct device_node *np, struct resource *res,
+ int index)
{
if (res->flags & IORESOURCE_MEM) {
/* Grand Central has too large resource 0 on some machines */
- if (index == 0 && !strcmp(np->name, "gc")) {
- np->addrs[0].size = 0x20000;
+ if (index == 0 && !strcmp(np->name, "gc"))
res->end = res->start + 0x1ffff;
- }
+
/* Airport has bogus resource 2 */
if (index >= 2 && !strcmp(np->name, "radio"))
return 1;
+
+#ifndef CONFIG_PPC64
/* DBDMAs may have bogus sizes */
- if ((res->start & 0x0001f000) == 0x00008000) {
- np->addrs[index].size = 0x100;
+ if ((res->start & 0x0001f000) == 0x00008000)
res->end = res->start + 0xff;
- }
- /* ESCC parent eats child resources. We could have added a level of hierarchy,
- * but I don't really feel the need for it */
+#endif /* CONFIG_PPC64 */
+
+ /* ESCC parent eats child resources. We could have added a
+ * level of hierarchy, but I don't really feel the need
+ * for it
+ */
if (!strcmp(np->name, "escc"))
return 1;
+
/* ESCC has bogus resources >= 3 */
- if (index >= 3 && !(strcmp(np->name, "ch-a") && strcmp(np->name, "ch-b")))
+ if (index >= 3 && !(strcmp(np->name, "ch-a") &&
+ strcmp(np->name, "ch-b")))
return 1;
+
/* Media bay has too many resources, keep only first one */
if (index > 0 && !strcmp(np->name, "media-bay"))
return 1;
+
/* Some older IDE resources have bogus sizes */
if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") &&
strcmp(np->type, "ide") && strcmp(np->type, "ata"))) {
- if (index == 0 && np->addrs[0].size > 0x1000) {
- np->addrs[0].size = 0x1000;
+ if (index == 0 && (res->end - res->start) > 0xfff)
res->end = res->start + 0xfff;
- }
- if (index == 1 && np->addrs[1].size > 0x100) {
- np->addrs[1].size = 0x100;
+ if (index == 1 && (res->end - res->start) > 0xff)
res->end = res->start + 0xff;
- }
}
}
return 0;
}
+static void macio_setup_interrupts(struct macio_dev *dev)
+{
+ struct device_node *np = dev->ofdev.node;
+ int i,j;
+
+ /* For now, we use pre-parsed entries in the device-tree for
+ * interrupt routing and addresses, but we should change that
+ * to dynamically parsed entries and so get rid of most of the
+ * clutter in struct device_node
+ */
+ for (i = j = 0; i < np->n_intrs; i++) {
+ struct resource *res = &dev->interrupt[j];
+
+ if (j >= MACIO_DEV_COUNT_IRQS)
+ break;
+ res->start = np->intrs[i].line;
+ res->flags = IORESOURCE_IO;
+ if (np->intrs[j].sense)
+ res->flags |= IORESOURCE_IRQ_LOWLEVEL;
+ else
+ res->flags |= IORESOURCE_IRQ_HIGHEDGE;
+ res->name = dev->ofdev.dev.bus_id;
+ if (macio_resource_quirks(np, res, i))
+ memset(res, 0, sizeof(struct resource));
+ else
+ j++;
+ }
+ dev->n_interrupts = j;
+}
+
+static void macio_setup_resources(struct macio_dev *dev,
+ struct resource *parent_res)
+{
+ struct device_node *np = dev->ofdev.node;
+ struct resource r;
+ int index;
+
+ for (index = 0; of_address_to_resource(np, index, &r) == 0; index++) {
+ struct resource *res = &dev->resource[index];
+ if (index >= MACIO_DEV_COUNT_RESOURCES)
+ break;
+ *res = r;
+ res->name = dev->ofdev.dev.bus_id;
+
+ if (macio_resource_quirks(np, res, index)) {
+ memset(res, 0, sizeof(struct resource));
+ continue;
+ }
+ /* Currently, we consider failure as harmless, this may
+ * change in the future, once I've found all the device
+ * tree bugs in older machines & worked around them
+ */
+ if (insert_resource(parent_res, res)) {
+ printk(KERN_WARNING "Can't request resource "
+ "%d for MacIO device %s\n",
+ index, dev->ofdev.dev.bus_id);
+ }
+ }
+ dev->n_resources = index;
+}
+
/**
* macio_add_one_device - Add one device from OF node to the device tree
* @chip: pointer to the macio_chip holding the device
@@ -294,12 +368,13 @@ static int macio_resource_quirks(struct device_node *np, struct resource *res, i
* When media-bay is changed to hotswap drivers, this function will
* be exposed to the bay driver some way...
*/
-static struct macio_dev * macio_add_one_device(struct macio_chip *chip, struct device *parent,
- struct device_node *np, struct macio_dev *in_bay,
+static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
+ struct device *parent,
+ struct device_node *np,
+ struct macio_dev *in_bay,
struct resource *parent_res)
{
struct macio_dev *dev;
- int i, j;
u32 *reg;
if (np == NULL)
@@ -326,7 +401,8 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, struct d
/* MacIO itself has a different reg, we use it's PCI base */
if (np == chip->of_node) {
- sprintf(dev->ofdev.dev.bus_id, "%1d.%08lx:%.*s", chip->lbus.index,
+ sprintf(dev->ofdev.dev.bus_id, "%1d.%08lx:%.*s",
+ chip->lbus.index,
#ifdef CONFIG_PCI
pci_resource_start(chip->lbus.pdev, 0),
#else
@@ -335,57 +411,16 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, struct d
MAX_NODE_NAME_SIZE, np->name);
} else {
reg = (u32 *)get_property(np, "reg", NULL);
- sprintf(dev->ofdev.dev.bus_id, "%1d.%08x:%.*s", chip->lbus.index,
+ sprintf(dev->ofdev.dev.bus_id, "%1d.%08x:%.*s",
+ chip->lbus.index,
reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name);
}
- /* For now, we use pre-parsed entries in the device-tree for
- * interrupt routing and addresses, but we should change that
- * to dynamically parsed entries and so get rid of most of the
- * clutter in struct device_node
- */
- for (i = j = 0; i < np->n_intrs; i++) {
- struct resource *res = &dev->interrupt[j];
-
- if (j >= MACIO_DEV_COUNT_IRQS)
- break;
- res->start = np->intrs[i].line;
- res->flags = IORESOURCE_IO;
- if (np->intrs[j].sense)
- res->flags |= IORESOURCE_IRQ_LOWLEVEL;
- else
- res->flags |= IORESOURCE_IRQ_HIGHEDGE;
- res->name = dev->ofdev.dev.bus_id;
- if (macio_resource_quirks(np, res, i))
- memset(res, 0, sizeof(struct resource));
- else
- j++;
- }
- dev->n_interrupts = j;
- for (i = j = 0; i < np->n_addrs; i++) {
- struct resource *res = &dev->resource[j];
-
- if (j >= MACIO_DEV_COUNT_RESOURCES)
- break;
- res->start = np->addrs[i].address;
- res->end = np->addrs[i].address + np->addrs[i].size - 1;
- res->flags = IORESOURCE_MEM;
- res->name = dev->ofdev.dev.bus_id;
- if (macio_resource_quirks(np, res, i))
- memset(res, 0, sizeof(struct resource));
- else {
- j++;
- /* Currently, we consider failure as harmless, this may
- * change in the future, once I've found all the device
- * tree bugs in older machines & worked around them
- */
- if (insert_resource(parent_res, res))
- printk(KERN_WARNING "Can't request resource %d for MacIO"
- " device %s\n", i, dev->ofdev.dev.bus_id);
- }
- }
- dev->n_resources = j;
+ /* Setup interrupts & resources */
+ macio_setup_interrupts(dev);
+ macio_setup_resources(dev, parent_res);
+ /* Register with core */
if (of_device_register(&dev->ofdev) != 0) {
printk(KERN_DEBUG"macio: device registration error for %s!\n",
dev->ofdev.dev.bus_id);
@@ -442,36 +477,42 @@ static void macio_pci_add_devices(struct macio_chip *chip)
/* First scan 1st level */
for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
- if (!macio_skip_device(np)) {
- of_node_get(np);
- mdev = macio_add_one_device(chip, &rdev->ofdev.dev, np, NULL, root_res);
- if (mdev == NULL)
- of_node_put(np);
- else if (strncmp(np->name, "media-bay", 9) == 0)
- mbdev = mdev;
- else if (strncmp(np->name, "escc", 4) == 0)
- sdev = mdev;
- }
+ if (macio_skip_device(np))
+ continue;
+ of_node_get(np);
+ mdev = macio_add_one_device(chip, &rdev->ofdev.dev, np, NULL,
+ root_res);
+ if (mdev == NULL)
+ of_node_put(np);
+ else if (strncmp(np->name, "media-bay", 9) == 0)
+ mbdev = mdev;
+ else if (strncmp(np->name, "escc", 4) == 0)
+ sdev = mdev;
}
/* Add media bay devices if any */
if (mbdev)
- for (np = NULL; (np = of_get_next_child(mbdev->ofdev.node, np)) != NULL;)
- if (!macio_skip_device(np)) {
- of_node_get(np);
- if (macio_add_one_device(chip, &mbdev->ofdev.dev, np, mbdev,
- root_res) == NULL)
- of_node_put(np);
- }
+ for (np = NULL; (np = of_get_next_child(mbdev->ofdev.node, np))
+ != NULL;) {
+ if (macio_skip_device(np))
+ continue;
+ of_node_get(np);
+ if (macio_add_one_device(chip, &mbdev->ofdev.dev, np,
+ mbdev, root_res) == NULL)
+ of_node_put(np);
+ }
+
/* Add serial ports if any */
if (sdev) {
- for (np = NULL; (np = of_get_next_child(sdev->ofdev.node, np)) != NULL;)
- if (!macio_skip_device(np)) {
- of_node_get(np);
- if (macio_add_one_device(chip, &sdev->ofdev.dev, np, NULL,
- root_res) == NULL)
- of_node_put(np);
- }
+ for (np = NULL; (np = of_get_next_child(sdev->ofdev.node, np))
+ != NULL;) {
+ if (macio_skip_device(np))
+ continue;
+ of_node_get(np);
+ if (macio_add_one_device(chip, &sdev->ofdev.dev, np,
+ NULL, root_res) == NULL)
+ of_node_put(np);
+ }
}
}
@@ -519,7 +560,8 @@ void macio_unregister_driver(struct macio_driver *drv)
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
-int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name)
+int macio_request_resource(struct macio_dev *dev, int resource_no,
+ const char *name)
{
if (macio_resource_len(dev, resource_no) == 0)
return 0;
@@ -606,20 +648,20 @@ static int __devinit macio_pci_probe(struct pci_dev *pdev, const struct pci_devi
if (ent->vendor != PCI_VENDOR_ID_APPLE)
return -ENODEV;
- /* Note regarding refcounting: We assume pci_device_to_OF_node() is ported
- * to new OF APIs and returns a node with refcount incremented. This isn't
- * the case today, but on the other hand ppc32 doesn't do refcounting. This
- * will have to be fixed when going to ppc64. --BenH.
+ /* Note regarding refcounting: We assume pci_device_to_OF_node() is
+ * ported to new OF APIs and returns a node with refcount incremented.
*/
np = pci_device_to_OF_node(pdev);
if (np == NULL)
return -ENODEV;
- /* This assumption is wrong, fix that here for now until I fix the arch */
+ /* The above assumption is wrong !!!
+ * fix that here for now until I fix the arch code
+ */
of_node_get(np);
- /* We also assume that pmac_feature will have done a get() on nodes stored
- * in the macio chips array
+ /* We also assume that pmac_feature will have done a get() on nodes
+ * stored in the macio chips array
*/
chip = macio_find(np, macio_unknown);
of_node_put(np);
@@ -639,9 +681,9 @@ static int __devinit macio_pci_probe(struct pci_dev *pdev, const struct pci_devi
/*
* HACK ALERT: The WallStreet PowerBook and some OHare based machines
- * have 2 macio ASICs. I must probe the "main" one first or IDE ordering
- * will be incorrect. So I put on "hold" the second one since it seem to
- * appear first on PCI
+ * have 2 macio ASICs. I must probe the "main" one first or IDE
+ * ordering will be incorrect. So I put on "hold" the second one since
+ * it seem to appear first on PCI
*/
if (chip->type == macio_gatwick || chip->type == macio_ohareII)
if (macio_chips[0].lbus.pdev == NULL) {
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index b856bb67169..8dbf2852bae 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -647,6 +647,7 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
struct media_bay_info* bay;
u32 __iomem *regbase;
struct device_node *ofnode;
+ unsigned long base;
int i;
ofnode = mdev->ofdev.node;
@@ -656,10 +657,11 @@ static int __devinit media_bay_attach(struct macio_dev *mdev, const struct of_de
if (macio_request_resources(mdev, "media-bay"))
return -EBUSY;
/* Media bay registers are located at the beginning of the
- * mac-io chip, we get the parent address for now (hrm...)
+ * mac-io chip, for now, we trick and align down the first
+ * resource passed in
*/
- regbase = (u32 __iomem *)
- ioremap(ofnode->parent->addrs[0].address, 0x100);
+ base = macio_resource_start(mdev, 0) & 0xffff0000u;
+ regbase = (u32 __iomem *)ioremap(base, 0x100);
if (regbase == NULL) {
macio_release_resources(mdev);
return -ENOMEM;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index e8378274d71..db2ae71d07e 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -53,7 +53,7 @@
#undef DEBUG_SMU
#ifdef DEBUG_SMU
-#define DPRINTK(fmt, args...) do { udbg_printf(KERN_DEBUG fmt , ##args); } while (0)
+#define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0)
#else
#define DPRINTK(fmt, args...) do { } while (0)
#endif
@@ -94,6 +94,8 @@ struct smu_device {
static struct smu_device *smu;
static DECLARE_MUTEX(smu_part_access);
+static void smu_i2c_retry(unsigned long data);
+
/*
* SMU driver low level stuff
*/
@@ -469,7 +471,6 @@ int __init smu_init (void)
smu->of_node = np;
smu->db_irq = NO_IRQ;
smu->msg_irq = NO_IRQ;
- init_timer(&smu->i2c_timer);
/* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
* 32 bits value safely
@@ -544,6 +545,10 @@ static int smu_late_init(void)
if (!smu)
return 0;
+ init_timer(&smu->i2c_timer);
+ smu->i2c_timer.function = smu_i2c_retry;
+ smu->i2c_timer.data = (unsigned long)smu;
+
/*
* Try to request the interrupts
*/
@@ -570,7 +575,10 @@ static int smu_late_init(void)
return 0;
}
-arch_initcall(smu_late_init);
+/* This has to be before arch_initcall as the low i2c stuff relies on the
+ * above having been done before we reach arch_initcalls
+ */
+core_initcall(smu_late_init);
/*
* sysfs visibility
@@ -580,20 +588,10 @@ static void smu_expose_childs(void *unused)
{
struct device_node *np;
- for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;) {
- if (device_is_compatible(np, "smu-i2c")) {
- char name[32];
- u32 *reg = (u32 *)get_property(np, "reg", NULL);
-
- if (reg == NULL)
- continue;
- sprintf(name, "smu-i2c-%02x", *reg);
- of_platform_device_create(np, name, &smu->of_dev->dev);
- }
+ for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;)
if (device_is_compatible(np, "smu-sensors"))
- of_platform_device_create(np, "smu-sensors", &smu->of_dev->dev);
- }
-
+ of_platform_device_create(np, "smu-sensors",
+ &smu->of_dev->dev);
}
static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
@@ -712,13 +710,13 @@ static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail)
static void smu_i2c_retry(unsigned long data)
{
- struct smu_i2c_cmd *cmd = (struct smu_i2c_cmd *)data;
+ struct smu_i2c_cmd *cmd = smu->cmd_i2c_cur;
DPRINTK("SMU: i2c failure, requeuing...\n");
/* requeue command simply by resetting reply_len */
cmd->pdata[0] = 0xff;
- cmd->scmd.reply_len = 0x10;
+ cmd->scmd.reply_len = sizeof(cmd->pdata);
smu_queue_cmd(&cmd->scmd);
}
@@ -747,10 +745,8 @@ static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc)
*/
if (fail && --cmd->retries > 0) {
DPRINTK("SMU: i2c failure, starting timer...\n");
- smu->i2c_timer.function = smu_i2c_retry;
- smu->i2c_timer.data = (unsigned long)cmd;
- smu->i2c_timer.expires = jiffies + msecs_to_jiffies(5);
- add_timer(&smu->i2c_timer);
+ BUG_ON(cmd != smu->cmd_i2c_cur);
+ mod_timer(&smu->i2c_timer, jiffies + msecs_to_jiffies(5));
return;
}
@@ -764,7 +760,7 @@ static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc)
/* Ok, initial command complete, now poll status */
scmd->reply_buf = cmd->pdata;
- scmd->reply_len = 0x10;
+ scmd->reply_len = sizeof(cmd->pdata);
scmd->data_buf = cmd->pdata;
scmd->data_len = 1;
cmd->pdata[0] = 0;
@@ -786,7 +782,7 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd)
cmd->scmd.done = smu_i2c_low_completion;
cmd->scmd.misc = cmd;
cmd->scmd.reply_buf = cmd->pdata;
- cmd->scmd.reply_len = 0x10;
+ cmd->scmd.reply_len = sizeof(cmd->pdata);
cmd->scmd.data_buf = (u8 *)(char *)&cmd->info;
cmd->scmd.status = 1;
cmd->stage = 0;
@@ -909,10 +905,13 @@ static struct smu_sdbp_header *smu_create_sdb_partition(int id)
struct property *prop;
/* First query the partition info */
+ DPRINTK("SMU: Query partition infos ... (irq=%d)\n", smu->db_irq);
smu_queue_simple(&cmd, SMU_CMD_PARTITION_COMMAND, 2,
smu_done_complete, &comp,
SMU_CMD_PARTITION_LATEST, id);
wait_for_completion(&comp);
+ DPRINTK("SMU: done, status: %d, reply_len: %d\n",
+ cmd.cmd.status, cmd.cmd.reply_len);
/* Partition doesn't exist (or other error) */
if (cmd.cmd.status != 0 || cmd.cmd.reply_len != 6)
@@ -975,6 +974,8 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
sprintf(pname, "sdb-partition-%02x", id);
+ DPRINTK("smu_get_sdb_partition(%02x)\n", id);
+
if (interruptible) {
int rc;
rc = down_interruptible(&smu_part_access);
@@ -986,6 +987,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
part = (struct smu_sdbp_header *)get_property(smu->of_node,
pname, size);
if (part == NULL) {
+ DPRINTK("trying to extract from SMU ...\n");
part = smu_create_sdb_partition(id);
if (part != NULL && size)
*size = part->len << 2;
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index f38696622eb..5ebfd1d138d 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -52,6 +52,7 @@ static char *sensor_location[3] = {NULL, NULL, NULL};
static int limit_adjust = 0;
static int fan_speed = -1;
+static int verbose = 0;
MODULE_AUTHOR("Colin Leroy <colin@colino.net>");
MODULE_DESCRIPTION("Driver for ADT746x thermostat in iBook G4 and "
@@ -66,6 +67,10 @@ module_param(fan_speed, int, 0644);
MODULE_PARM_DESC(fan_speed,"Specify starting fan speed (0-255) "
"(default 64)");
+module_param(verbose, bool, 0);
+MODULE_PARM_DESC(verbose,"Verbose log operations "
+ "(default 0)");
+
struct thermostat {
struct i2c_client clt;
u8 temps[3];
@@ -149,13 +154,13 @@ detach_thermostat(struct i2c_adapter *adapter)
if (thread_therm != NULL) {
kthread_stop(thread_therm);
}
-
+
printk(KERN_INFO "adt746x: Putting max temperatures back from "
"%d, %d, %d to %d, %d, %d\n",
th->limits[0], th->limits[1], th->limits[2],
th->initial_limits[0], th->initial_limits[1],
th->initial_limits[2]);
-
+
for (i = 0; i < 3; i++)
write_reg(th, LIMIT_REG[i], th->initial_limits[i]);
@@ -171,9 +176,9 @@ detach_thermostat(struct i2c_adapter *adapter)
}
static struct i2c_driver thermostat_driver = {
- .owner = THIS_MODULE,
- .name = "therm_adt746x",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "therm_adt746x",
+ },
.attach_adapter = attach_thermostat,
.detach_adapter = detach_thermostat,
};
@@ -212,12 +217,14 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan)
return;
if (th->last_speed[fan] != speed) {
- if (speed == -1)
- printk(KERN_DEBUG "adt746x: Setting speed to automatic "
- "for %s fan.\n", sensor_location[fan+1]);
- else
- printk(KERN_DEBUG "adt746x: Setting speed to %d "
- "for %s fan.\n", speed, sensor_location[fan+1]);
+ if (verbose) {
+ if (speed == -1)
+ printk(KERN_DEBUG "adt746x: Setting speed to automatic "
+ "for %s fan.\n", sensor_location[fan+1]);
+ else
+ printk(KERN_DEBUG "adt746x: Setting speed to %d "
+ "for %s fan.\n", speed, sensor_location[fan+1]);
+ }
} else
return;
@@ -298,10 +305,11 @@ static void update_fans_speed (struct thermostat *th)
if (new_speed > 255)
new_speed = 255;
- printk(KERN_DEBUG "adt746x: setting fans speed to %d "
- "(limit exceeded by %d on %s) \n",
- new_speed, var,
- sensor_location[fan_number+1]);
+ if (verbose)
+ printk(KERN_DEBUG "adt746x: Setting fans speed to %d "
+ "(limit exceeded by %d on %s) \n",
+ new_speed, var,
+ sensor_location[fan_number+1]);
write_both_fan_speed(th, new_speed);
th->last_var[fan_number] = var;
} else if (var < -2) {
@@ -309,8 +317,9 @@ static void update_fans_speed (struct thermostat *th)
* so cold (lastvar >= -1) */
if (i == 2 && lastvar < -1) {
if (th->last_speed[fan_number] != 0)
- printk(KERN_DEBUG "adt746x: Stopping "
- "fans.\n");
+ if (verbose)
+ printk(KERN_DEBUG "adt746x: Stopping "
+ "fans.\n");
write_both_fan_speed(th, 0);
}
}
@@ -406,7 +415,7 @@ static int attach_one_thermostat(struct i2c_adapter *adapter, int addr,
th->initial_limits[i] = read_reg(th, LIMIT_REG[i]);
set_limit(th, i);
}
-
+
printk(KERN_INFO "adt746x: Lowering max temperatures from %d, %d, %d"
" to %d, %d, %d\n",
th->initial_limits[0], th->initial_limits[1],
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 190878eef99..4f50ee5767a 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -283,9 +283,9 @@ static int therm_pm72_detach(struct i2c_adapter *adapter);
static struct i2c_driver therm_pm72_driver =
{
- .owner = THIS_MODULE,
- .name = "therm_pm72",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "therm_pm72",
+ },
.attach_adapter = therm_pm72_attach,
.detach_adapter = therm_pm72_detach,
};
@@ -630,12 +630,12 @@ static int read_eeprom(int cpu, struct mpu_data *out)
sprintf(nodename, "/u3@0,f8000000/i2c@f8001000/cpuid@a%d", cpu ? 2 : 0);
np = of_find_node_by_path(nodename);
if (np == NULL) {
- printk(KERN_ERR "therm_pm72: Failed to retreive cpuid node from device-tree\n");
+ printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid node from device-tree\n");
return -ENODEV;
}
data = (u8 *)get_property(np, "cpuid", &len);
if (data == NULL) {
- printk(KERN_ERR "therm_pm72: Failed to retreive cpuid property from device-tree\n");
+ printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid property from device-tree\n");
of_node_put(np);
return -ENODEV;
}
@@ -1988,18 +1988,13 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
static int fcu_of_probe(struct of_device* dev, const struct of_device_id *match)
{
- int rc;
-
state = state_detached;
/* Lookup the fans in the device tree */
fcu_lookup_fans(dev->node);
/* Add the driver */
- rc = i2c_add_driver(&therm_pm72_driver);
- if (rc < 0)
- return rc;
- return 0;
+ return i2c_add_driver(&therm_pm72_driver);
}
static int fcu_of_remove(struct of_device* dev)
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 6aaa1df1a64..3d9dd2e166a 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -354,10 +354,10 @@ do_detach( struct i2c_client *client )
}
static struct i2c_driver g4fan_driver = {
- .owner = THIS_MODULE,
- .name = "therm_windtunnel",
+ .driver = {
+ .name = "therm_windtunnel",
+ },
.id = I2C_DRIVERID_G4FAN,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = do_attach,
.detach_client = do_detach,
};
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index d843a6c9c6d..2d9d7915040 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -127,39 +127,34 @@ struct adb_driver via_cuda_driver = {
#endif /* CONFIG_ADB */
#ifdef CONFIG_PPC
-int __init
-find_via_cuda(void)
+int __init find_via_cuda(void)
{
- int err;
struct adb_request req;
+ phys_addr_t taddr;
+ u32 *reg;
+ int err;
if (vias != 0)
return 1;
- vias = find_devices("via-cuda");
+ vias = of_find_node_by_name(NULL, "via-cuda");
if (vias == 0)
return 0;
- if (vias->next != 0)
- printk(KERN_WARNING "Warning: only using 1st via-cuda\n");
-
-#if 0
- { int i;
-
- printk("find_via_cuda: node = %p, addrs =", vias->node);
- for (i = 0; i < vias->n_addrs; ++i)
- printk(" %x(%x)", vias->addrs[i].address, vias->addrs[i].size);
- printk(", intrs =");
- for (i = 0; i < vias->n_intrs; ++i)
- printk(" %x", vias->intrs[i].line);
- printk("\n"); }
-#endif
- if (vias->n_addrs != 1 || vias->n_intrs != 1) {
- printk(KERN_ERR "via-cuda: expecting 1 address (%d) and 1 interrupt (%d)\n",
- vias->n_addrs, vias->n_intrs);
- if (vias->n_addrs < 1 || vias->n_intrs < 1)
- return 0;
+ reg = (u32 *)get_property(vias, "reg", NULL);
+ if (reg == NULL) {
+ printk(KERN_ERR "via-cuda: No \"reg\" property !\n");
+ goto fail;
+ }
+ taddr = of_translate_address(vias, reg);
+ if (taddr == 0) {
+ printk(KERN_ERR "via-cuda: Can't translate address !\n");
+ goto fail;
+ }
+ via = ioremap(taddr, 0x2000);
+ if (via == NULL) {
+ printk(KERN_ERR "via-cuda: Can't map address !\n");
+ goto fail;
}
- via = ioremap(vias->addrs->address, 0x2000);
cuda_state = idle;
sys_ctrler = SYS_CTRLER_CUDA;
@@ -185,6 +180,11 @@ find_via_cuda(void)
cuda_poll();
return 1;
+
+ fail:
+ of_node_put(vias);
+ vias = NULL;
+ return 0;
}
#endif /* CONFIG_PPC */
@@ -193,10 +193,6 @@ static int __init via_cuda_start(void)
if (via == NULL)
return -ENODEV;
-#ifdef CONFIG_PPC
- request_OF_resource(vias, 0, NULL);
-#endif
-
if (request_irq(CUDA_IRQ, cuda_interrupt, 0, "ADB", cuda_interrupt)) {
printk(KERN_ERR "cuda_init: can't get irq %d\n", CUDA_IRQ);
return -EAGAIN;
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index e9a159ad302..2a2ffe06016 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -260,7 +260,7 @@ static int macii_write(struct adb_request *req)
return -EINVAL;
}
- req->next = 0;
+ req->next = NULL;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
@@ -295,7 +295,7 @@ static void macii_poll(void)
unsigned long flags;
local_irq_save(flags);
- if (via[IFR] & SR_INT) macii_interrupt(0, 0, 0);
+ if (via[IFR] & SR_INT) macii_interrupt(0, NULL, NULL);
local_irq_restore(flags);
}
diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c
index a1966975d58..0129fcc3b18 100644
--- a/drivers/macintosh/via-maciisi.c
+++ b/drivers/macintosh/via-maciisi.c
@@ -294,6 +294,24 @@ static void maciisi_sync(struct adb_request *req)
printk(KERN_ERR "maciisi_send_request: poll timed out!\n");
}
+int
+maciisi_request(struct adb_request *req, void (*done)(struct adb_request *),
+ int nbytes, ...)
+{
+ va_list list;
+ int i;
+
+ req->nbytes = nbytes;
+ req->done = done;
+ req->reply_expected = 0;
+ va_start(list, nbytes);
+ for (i = 0; i < nbytes; i++)
+ req->data[i++] = va_arg(list, int);
+ va_end(list);
+
+ return maciisi_send_request(req, 1);
+}
+
/* Enqueue a request, and run the queue if possible */
static int
maciisi_write(struct adb_request* req)
@@ -308,7 +326,7 @@ maciisi_write(struct adb_request* req)
req->complete = 1;
return -EINVAL;
}
- req->next = 0;
+ req->next = NULL;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
@@ -403,7 +421,7 @@ maciisi_poll(void)
local_irq_save(flags);
if (via[IFR] & SR_INT) {
- maciisi_interrupt(0, 0, 0);
+ maciisi_interrupt(0, NULL, NULL);
}
else /* avoid calling this function too quickly in a loop */
udelay(ADB_DELAY);
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 56404350856..6eb93e45fcd 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -55,6 +55,8 @@
#include <asm/sections.h>
#include <asm/irq.h>
#include <asm/pmac_feature.h>
+#include <asm/pmac_pfunc.h>
+#include <asm/pmac_low_i2c.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/cputable.h>
@@ -147,6 +149,7 @@ static struct device_node *vias;
static int pmu_kind = PMU_UNKNOWN;
static int pmu_fully_inited = 0;
static int pmu_has_adb;
+static struct device_node *gpio_node;
static unsigned char __iomem *gpio_reg = NULL;
static int gpio_irq = -1;
static int gpio_irq_enabled = -1;
@@ -157,8 +160,8 @@ static int pmu_version;
static int drop_interrupts;
#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
static int option_lid_wakeup = 1;
-static int sleep_in_progress;
#endif /* CONFIG_PM && CONFIG_PPC32 */
+static int sleep_in_progress;
static unsigned long async_req_locks;
static unsigned int pmu_irq_stats[11];
@@ -196,7 +199,6 @@ static int pmu_adb_reset_bus(void);
#endif /* CONFIG_ADB */
static int init_pmu(void);
-static int pmu_queue_request(struct adb_request *req);
static void pmu_start(void);
static irqreturn_t via_pmu_interrupt(int irq, void *arg, struct pt_regs *regs);
static irqreturn_t gpio1_interrupt(int irq, void *arg, struct pt_regs *regs);
@@ -295,22 +297,26 @@ static struct backlight_controller pmu_backlight_controller = {
};
#endif /* CONFIG_PMAC_BACKLIGHT */
-int
-find_via_pmu(void)
+int __init find_via_pmu(void)
{
+ u64 taddr;
+ u32 *reg;
+
if (via != 0)
return 1;
- vias = find_devices("via-pmu");
- if (vias == 0)
+ vias = of_find_node_by_name(NULL, "via-pmu");
+ if (vias == NULL)
return 0;
- if (vias->next != 0)
- printk(KERN_WARNING "Warning: only using 1st via-pmu\n");
- if (vias->n_addrs < 1 || vias->n_intrs < 1) {
- printk(KERN_ERR "via-pmu: %d addresses, %d interrupts!\n",
- vias->n_addrs, vias->n_intrs);
- if (vias->n_addrs < 1 || vias->n_intrs < 1)
- return 0;
+ reg = (u32 *)get_property(vias, "reg", NULL);
+ if (reg == NULL) {
+ printk(KERN_ERR "via-pmu: No \"reg\" property !\n");
+ goto fail;
+ }
+ taddr = of_translate_address(vias, reg);
+ if (taddr == OF_BAD_ADDR) {
+ printk(KERN_ERR "via-pmu: Can't translate address !\n");
+ goto fail;
}
spin_lock_init(&pmu_lock);
@@ -331,7 +337,8 @@ find_via_pmu(void)
pmu_kind = PMU_HEATHROW_BASED;
else if (device_is_compatible(vias->parent, "Keylargo")
|| device_is_compatible(vias->parent, "K2-Keylargo")) {
- struct device_node *gpio, *gpiop;
+ struct device_node *gpiop;
+ u64 gaddr = OF_BAD_ADDR;
pmu_kind = PMU_KEYLARGO_BASED;
pmu_has_adb = (find_type_devices("adb") != NULL);
@@ -341,19 +348,24 @@ find_via_pmu(void)
PMU_INT_TICK |
PMU_INT_ENVIRONMENT;
- gpiop = find_devices("gpio");
- if (gpiop && gpiop->n_addrs) {
- gpio_reg = ioremap(gpiop->addrs->address, 0x10);
- gpio = find_devices("extint-gpio1");
- if (gpio == NULL)
- gpio = find_devices("pmu-interrupt");
- if (gpio && gpio->parent == gpiop && gpio->n_intrs)
- gpio_irq = gpio->intrs[0].line;
+ gpiop = of_find_node_by_name(NULL, "gpio");
+ if (gpiop) {
+ reg = (u32 *)get_property(gpiop, "reg", NULL);
+ if (reg)
+ gaddr = of_translate_address(gpiop, reg);
+ if (gaddr != OF_BAD_ADDR)
+ gpio_reg = ioremap(gaddr, 0x10);
}
+ if (gpio_reg == NULL)
+ printk(KERN_ERR "via-pmu: Can't find GPIO reg !\n");
} else
pmu_kind = PMU_UNKNOWN;
- via = ioremap(vias->addrs->address, 0x2000);
+ via = ioremap(taddr, 0x2000);
+ if (via == NULL) {
+ printk(KERN_ERR "via-pmu: Can't map address !\n");
+ goto fail;
+ }
out_8(&via[IER], IER_CLR | 0x7f); /* disable all intrs */
out_8(&via[IFR], 0x7f); /* clear IFR */
@@ -365,23 +377,25 @@ find_via_pmu(void)
return 0;
}
- printk(KERN_INFO "PMU driver %d initialized for %s, firmware: %02x\n",
+ printk(KERN_INFO "PMU driver v%d initialized for %s, firmware: %02x\n",
PMU_DRIVER_VERSION, pbook_type[pmu_kind], pmu_version);
sys_ctrler = SYS_CTRLER_PMU;
return 1;
+ fail:
+ of_node_put(vias);
+ vias = NULL;
+ return 0;
}
#ifdef CONFIG_ADB
-static int
-pmu_probe(void)
+static int pmu_probe(void)
{
return vias == NULL? -ENODEV: 0;
}
-static int __init
-pmu_init(void)
+static int __init pmu_init(void)
{
if (vias == NULL)
return -ENODEV;
@@ -405,7 +419,7 @@ static int __init via_pmu_start(void)
bright_req_2.complete = 1;
batt_req.complete = 1;
-#if defined(CONFIG_PPC32) && !defined(CONFIG_PPC_MERGE)
+#ifndef CONFIG_PPC_MERGE
if (pmu_kind == PMU_KEYLARGO_BASED)
openpic_set_irq_priority(vias->intrs[0].line,
OPENPIC_PRIORITY_DEFAULT + 1);
@@ -418,10 +432,22 @@ static int __init via_pmu_start(void)
return -EAGAIN;
}
- if (pmu_kind == PMU_KEYLARGO_BASED && gpio_irq != -1) {
- if (request_irq(gpio_irq, gpio1_interrupt, 0, "GPIO1 ADB", (void *)0))
- printk(KERN_ERR "pmu: can't get irq %d (GPIO1)\n", gpio_irq);
- gpio_irq_enabled = 1;
+ if (pmu_kind == PMU_KEYLARGO_BASED) {
+ gpio_node = of_find_node_by_name(NULL, "extint-gpio1");
+ if (gpio_node == NULL)
+ gpio_node = of_find_node_by_name(NULL,
+ "pmu-interrupt");
+ if (gpio_node && gpio_node->n_intrs > 0)
+ gpio_irq = gpio_node->intrs[0].line;
+
+ if (gpio_irq != -1) {
+ if (request_irq(gpio_irq, gpio1_interrupt, 0,
+ "GPIO1 ADB", (void *)0))
+ printk(KERN_ERR "pmu: can't get irq %d"
+ " (GPIO1)\n", gpio_irq);
+ else
+ gpio_irq_enabled = 1;
+ }
}
/* Enable interrupts */
@@ -454,9 +480,6 @@ static int __init via_pmu_dev_init(void)
if (vias == NULL)
return -ENODEV;
-#ifndef CONFIG_PPC64
- request_OF_resource(vias, 0, NULL);
-#endif
#ifdef CONFIG_PMAC_BACKLIGHT
/* Enable backlight */
register_backlight_controller(&pmu_backlight_controller, NULL, "pmu");
@@ -1371,7 +1394,6 @@ next:
}
pmu_done(req);
} else {
-#if defined(CONFIG_XMON) && !defined(CONFIG_PPC64)
if (len == 4 && data[1] == 0x2c) {
extern int xmon_wants_key, xmon_adb_keycode;
if (xmon_wants_key) {
@@ -1379,7 +1401,6 @@ next:
return;
}
}
-#endif /* defined(CONFIG_XMON) && !defined(CONFIG_PPC64) */
#ifdef CONFIG_ADB
/*
* XXX On the [23]400 the PMU gives us an up
@@ -1782,258 +1803,6 @@ pmu_present(void)
return via != 0;
}
-struct pmu_i2c_hdr {
- u8 bus;
- u8 mode;
- u8 bus2;
- u8 address;
- u8 sub_addr;
- u8 comb_addr;
- u8 count;
-};
-
-int
-pmu_i2c_combined_read(int bus, int addr, int subaddr, u8* data, int len)
-{
- struct adb_request req;
- struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req.data[1];
- int retry;
- int rc;
-
- for (retry=0; retry<16; retry++) {
- memset(&req, 0, sizeof(req));
-
- hdr->bus = bus;
- hdr->address = addr & 0xfe;
- hdr->mode = PMU_I2C_MODE_COMBINED;
- hdr->bus2 = 0;
- hdr->sub_addr = subaddr;
- hdr->comb_addr = addr | 1;
- hdr->count = len;
-
- req.nbytes = sizeof(struct pmu_i2c_hdr) + 1;
- req.reply_expected = 0;
- req.reply_len = 0;
- req.data[0] = PMU_I2C_CMD;
- req.reply[0] = 0xff;
- rc = pmu_queue_request(&req);
- if (rc)
- return rc;
- while(!req.complete)
- pmu_poll();
- if (req.reply[0] == PMU_I2C_STATUS_OK)
- break;
- mdelay(15);
- }
- if (req.reply[0] != PMU_I2C_STATUS_OK)
- return -1;
-
- for (retry=0; retry<16; retry++) {
- memset(&req, 0, sizeof(req));
-
- mdelay(15);
-
- hdr->bus = PMU_I2C_BUS_STATUS;
- req.reply[0] = 0xff;
-
- req.nbytes = 2;
- req.reply_expected = 0;
- req.reply_len = 0;
- req.data[0] = PMU_I2C_CMD;
- rc = pmu_queue_request(&req);
- if (rc)
- return rc;
- while(!req.complete)
- pmu_poll();
- if (req.reply[0] == PMU_I2C_STATUS_DATAREAD) {
- memcpy(data, &req.reply[1], req.reply_len - 1);
- return req.reply_len - 1;
- }
- }
- return -1;
-}
-
-int
-pmu_i2c_stdsub_write(int bus, int addr, int subaddr, u8* data, int len)
-{
- struct adb_request req;
- struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req.data[1];
- int retry;
- int rc;
-
- for (retry=0; retry<16; retry++) {
- memset(&req, 0, sizeof(req));
-
- hdr->bus = bus;
- hdr->address = addr & 0xfe;
- hdr->mode = PMU_I2C_MODE_STDSUB;
- hdr->bus2 = 0;
- hdr->sub_addr = subaddr;
- hdr->comb_addr = addr & 0xfe;
- hdr->count = len;
-
- req.data[0] = PMU_I2C_CMD;
- memcpy(&req.data[sizeof(struct pmu_i2c_hdr) + 1], data, len);
- req.nbytes = sizeof(struct pmu_i2c_hdr) + len + 1;
- req.reply_expected = 0;
- req.reply_len = 0;
- req.reply[0] = 0xff;
- rc = pmu_queue_request(&req);
- if (rc)
- return rc;
- while(!req.complete)
- pmu_poll();
- if (req.reply[0] == PMU_I2C_STATUS_OK)
- break;
- mdelay(15);
- }
- if (req.reply[0] != PMU_I2C_STATUS_OK)
- return -1;
-
- for (retry=0; retry<16; retry++) {
- memset(&req, 0, sizeof(req));
-
- mdelay(15);
-
- hdr->bus = PMU_I2C_BUS_STATUS;
- req.reply[0] = 0xff;
-
- req.nbytes = 2;
- req.reply_expected = 0;
- req.reply_len = 0;
- req.data[0] = PMU_I2C_CMD;
- rc = pmu_queue_request(&req);
- if (rc)
- return rc;
- while(!req.complete)
- pmu_poll();
- if (req.reply[0] == PMU_I2C_STATUS_OK)
- return len;
- }
- return -1;
-}
-
-int
-pmu_i2c_simple_read(int bus, int addr, u8* data, int len)
-{
- struct adb_request req;
- struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req.data[1];
- int retry;
- int rc;
-
- for (retry=0; retry<16; retry++) {
- memset(&req, 0, sizeof(req));
-
- hdr->bus = bus;
- hdr->address = addr | 1;
- hdr->mode = PMU_I2C_MODE_SIMPLE;
- hdr->bus2 = 0;
- hdr->sub_addr = 0;
- hdr->comb_addr = 0;
- hdr->count = len;
-
- req.data[0] = PMU_I2C_CMD;
- req.nbytes = sizeof(struct pmu_i2c_hdr) + 1;
- req.reply_expected = 0;
- req.reply_len = 0;
- req.reply[0] = 0xff;
- rc = pmu_queue_request(&req);
- if (rc)
- return rc;
- while(!req.complete)
- pmu_poll();
- if (req.reply[0] == PMU_I2C_STATUS_OK)
- break;
- mdelay(15);
- }
- if (req.reply[0] != PMU_I2C_STATUS_OK)
- return -1;
-
- for (retry=0; retry<16; retry++) {
- memset(&req, 0, sizeof(req));
-
- mdelay(15);
-
- hdr->bus = PMU_I2C_BUS_STATUS;
- req.reply[0] = 0xff;
-
- req.nbytes = 2;
- req.reply_expected = 0;
- req.reply_len = 0;
- req.data[0] = PMU_I2C_CMD;
- rc = pmu_queue_request(&req);
- if (rc)
- return rc;
- while(!req.complete)
- pmu_poll();
- if (req.reply[0] == PMU_I2C_STATUS_DATAREAD) {
- memcpy(data, &req.reply[1], req.reply_len - 1);
- return req.reply_len - 1;
- }
- }
- return -1;
-}
-
-int
-pmu_i2c_simple_write(int bus, int addr, u8* data, int len)
-{
- struct adb_request req;
- struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req.data[1];
- int retry;
- int rc;
-
- for (retry=0; retry<16; retry++) {
- memset(&req, 0, sizeof(req));
-
- hdr->bus = bus;
- hdr->address = addr & 0xfe;
- hdr->mode = PMU_I2C_MODE_SIMPLE;
- hdr->bus2 = 0;
- hdr->sub_addr = 0;
- hdr->comb_addr = 0;
- hdr->count = len;
-
- req.data[0] = PMU_I2C_CMD;
- memcpy(&req.data[sizeof(struct pmu_i2c_hdr) + 1], data, len);
- req.nbytes = sizeof(struct pmu_i2c_hdr) + len + 1;
- req.reply_expected = 0;
- req.reply_len = 0;
- req.reply[0] = 0xff;
- rc = pmu_queue_request(&req);
- if (rc)
- return rc;
- while(!req.complete)
- pmu_poll();
- if (req.reply[0] == PMU_I2C_STATUS_OK)
- break;
- mdelay(15);
- }
- if (req.reply[0] != PMU_I2C_STATUS_OK)
- return -1;
-
- for (retry=0; retry<16; retry++) {
- memset(&req, 0, sizeof(req));
-
- mdelay(15);
-
- hdr->bus = PMU_I2C_BUS_STATUS;
- req.reply[0] = 0xff;
-
- req.nbytes = 2;
- req.reply_expected = 0;
- req.reply_len = 0;
- req.data[0] = PMU_I2C_CMD;
- rc = pmu_queue_request(&req);
- if (rc)
- return rc;
- while(!req.complete)
- pmu_poll();
- if (req.reply[0] == PMU_I2C_STATUS_OK)
- return len;
- }
- return -1;
-}
-
#ifdef CONFIG_PM
static LIST_HEAD(sleep_notifiers);
@@ -2338,8 +2107,9 @@ pmac_suspend_devices(void)
return -EBUSY;
}
- /* Disable clock spreading on some machines */
- pmac_tweak_clock_spreading(0);
+ /* Call platform functions marked "on sleep" */
+ pmac_pfunc_i2c_suspend();
+ pmac_pfunc_base_suspend();
/* Stop preemption */
preempt_disable();
@@ -2411,8 +2181,9 @@ pmac_wakeup_devices(void)
mdelay(10);
preempt_enable();
- /* Re-enable clock spreading on some machines */
- pmac_tweak_clock_spreading(1);
+ /* Call platform functions marked "on wake" */
+ pmac_pfunc_base_resume();
+ pmac_pfunc_i2c_resume();
/* Resume devices */
device_resume();
@@ -3130,16 +2901,13 @@ static int __init init_pmu_sysfs(void)
subsys_initcall(init_pmu_sysfs);
EXPORT_SYMBOL(pmu_request);
+EXPORT_SYMBOL(pmu_queue_request);
EXPORT_SYMBOL(pmu_poll);
EXPORT_SYMBOL(pmu_poll_adb);
EXPORT_SYMBOL(pmu_wait_complete);
EXPORT_SYMBOL(pmu_suspend);
EXPORT_SYMBOL(pmu_resume);
EXPORT_SYMBOL(pmu_unlock);
-EXPORT_SYMBOL(pmu_i2c_combined_read);
-EXPORT_SYMBOL(pmu_i2c_stdsub_write);
-EXPORT_SYMBOL(pmu_i2c_simple_read);
-EXPORT_SYMBOL(pmu_i2c_simple_write);
#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
EXPORT_SYMBOL(pmu_enable_irled);
EXPORT_SYMBOL(pmu_battery_count);
diff --git a/drivers/macintosh/via-pmu68k.c b/drivers/macintosh/via-pmu68k.c
index 6f80d76ac17..f08e52f2107 100644
--- a/drivers/macintosh/via-pmu68k.c
+++ b/drivers/macintosh/via-pmu68k.c
@@ -493,7 +493,7 @@ pmu_queue_request(struct adb_request *req)
return -EINVAL;
}
- req->next = 0;
+ req->next = NULL;
req->sent = 0;
req->complete = 0;
local_irq_save(flags);
@@ -717,7 +717,7 @@ pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs)
printk(KERN_ERR "PMU: extra ADB reply\n");
return;
}
- req_awaiting_reply = 0;
+ req_awaiting_reply = NULL;
if (len <= 2)
req->reply_len = 0;
else {
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index a0a41ad0f2b..906d3ecae6e 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -21,6 +21,7 @@
#include <asm/io.h>
#include <asm/system.h>
#include <asm/sections.h>
+#include <asm/pmac_low_i2c.h>
#include "windfarm.h"
@@ -47,9 +48,9 @@ static int wf_lm75_attach(struct i2c_adapter *adapter);
static int wf_lm75_detach(struct i2c_client *client);
static struct i2c_driver wf_lm75_driver = {
- .owner = THIS_MODULE,
- .name = "wf_lm75",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "wf_lm75",
+ },
.attach_adapter = wf_lm75_attach,
.detach_client = wf_lm75_detach,
};
@@ -157,53 +158,21 @@ static struct wf_lm75_sensor *wf_lm75_create(struct i2c_adapter *adapter,
static int wf_lm75_attach(struct i2c_adapter *adapter)
{
- u8 bus_id;
- struct device_node *smu, *bus, *dev;
-
- /* We currently only deal with LM75's hanging off the SMU
- * i2c busses. If we extend that driver to other/older
- * machines, we should split this function into SMU-i2c,
- * keywest-i2c, PMU-i2c, ...
- */
+ struct device_node *busnode, *dev;
+ struct pmac_i2c_bus *bus;
DBG("wf_lm75: adapter %s detected\n", adapter->name);
- if (strncmp(adapter->name, "smu-i2c-", 8) != 0)
- return 0;
- smu = of_find_node_by_type(NULL, "smu");
- if (smu == NULL)
- return 0;
-
- /* Look for the bus in the device-tree */
- bus_id = (u8)simple_strtoul(adapter->name + 8, NULL, 16);
-
- DBG("wf_lm75: bus ID is %x\n", bus_id);
-
- /* Look for sensors subdir */
- for (bus = NULL;
- (bus = of_get_next_child(smu, bus)) != NULL;) {
- u32 *reg;
-
- if (strcmp(bus->name, "i2c"))
- continue;
- reg = (u32 *)get_property(bus, "reg", NULL);
- if (reg == NULL)
- continue;
- if (bus_id == *reg)
- break;
- }
- of_node_put(smu);
- if (bus == NULL) {
- printk(KERN_WARNING "windfarm: SMU i2c bus 0x%x not found"
- " in device-tree !\n", bus_id);
- return 0;
- }
+ bus = pmac_i2c_adapter_to_bus(adapter);
+ if (bus == NULL)
+ return -ENODEV;
+ busnode = pmac_i2c_get_bus_node(bus);
DBG("wf_lm75: bus found, looking for device...\n");
/* Now look for lm75(s) in there */
for (dev = NULL;
- (dev = of_get_next_child(bus, dev)) != NULL;) {
+ (dev = of_get_next_child(busnode, dev)) != NULL;) {
const char *loc =
get_property(dev, "hwsensor-location", NULL);
u32 *reg = (u32 *)get_property(dev, "reg", NULL);
@@ -217,9 +186,6 @@ static int wf_lm75_attach(struct i2c_adapter *adapter)
else if (device_is_compatible(dev, "ds1775"))
wf_lm75_create(adapter, *reg, 1, loc);
}
-
- of_node_put(bus);
-
return 0;
}
@@ -240,12 +206,7 @@ static int wf_lm75_detach(struct i2c_client *client)
static int __init wf_lm75_sensor_init(void)
{
- int rc;
-
- rc = i2c_add_driver(&wf_lm75_driver);
- if (rc < 0)
- return rc;
- return 0;
+ return i2c_add_driver(&wf_lm75_driver);
}
static void __exit wf_lm75_sensor_exit(void)
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index 80ddf9776bd..eb69a601e76 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -26,7 +26,7 @@
* (typically the drive fan)
* - the main control (first control) gets the target value scaled with
* the first pair of factors, and is then modified as below
- * - the value of the target of the CPU Fan control loop is retreived,
+ * - the value of the target of the CPU Fan control loop is retrieved,
* scaled with the second pair of factors, and the max of that and
* the scaled target is applied to the main control.
*
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index 2c3158c81ff..4d811600bda 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/wait.h>
+#include <linux/completion.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
index b558cc209d4..1a00d9c75a2 100644
--- a/drivers/macintosh/windfarm_smu_sensors.c
+++ b/drivers/macintosh/windfarm_smu_sensors.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/wait.h>
+#include <linux/completion.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/io.h>
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 252d55df964..76a189ceb52 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -315,6 +315,8 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait)
if (bitmap->file == NULL)
return write_sb_page(bitmap->mddev, bitmap->offset, page, wait);
+ flush_dcache_page(page); /* make sure visible to anyone reading the file */
+
if (wait)
lock_page(page);
else {
@@ -341,7 +343,7 @@ static int write_page(struct bitmap *bitmap, struct page *page, int wait)
/* add to list to be waited for by daemon */
struct page_list *item = mempool_alloc(bitmap->write_pool, GFP_NOIO);
item->page = page;
- page_cache_get(page);
+ get_page(page);
spin_lock(&bitmap->write_lock);
list_add(&item->list, &bitmap->complete_pages);
spin_unlock(&bitmap->write_lock);
@@ -357,10 +359,10 @@ static struct page *read_page(struct file *file, unsigned long index,
struct inode *inode = file->f_mapping->host;
struct page *page = NULL;
loff_t isize = i_size_read(inode);
- unsigned long end_index = isize >> PAGE_CACHE_SHIFT;
+ unsigned long end_index = isize >> PAGE_SHIFT;
- PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_CACHE_SIZE,
- (unsigned long long)index << PAGE_CACHE_SHIFT);
+ PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE,
+ (unsigned long long)index << PAGE_SHIFT);
page = read_cache_page(inode->i_mapping, index,
(filler_t *)inode->i_mapping->a_ops->readpage, file);
@@ -368,7 +370,7 @@ static struct page *read_page(struct file *file, unsigned long index,
goto out;
wait_on_page_locked(page);
if (!PageUptodate(page) || PageError(page)) {
- page_cache_release(page);
+ put_page(page);
page = ERR_PTR(-EIO);
goto out;
}
@@ -376,14 +378,14 @@ static struct page *read_page(struct file *file, unsigned long index,
if (index > end_index) /* we have read beyond EOF */
*bytes_read = 0;
else if (index == end_index) /* possible short read */
- *bytes_read = isize & ~PAGE_CACHE_MASK;
+ *bytes_read = isize & ~PAGE_MASK;
else
- *bytes_read = PAGE_CACHE_SIZE; /* got a full page */
+ *bytes_read = PAGE_SIZE; /* got a full page */
out:
if (IS_ERR(page))
printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
- (int)PAGE_CACHE_SIZE,
- (unsigned long long)index << PAGE_CACHE_SHIFT,
+ (int)PAGE_SIZE,
+ (unsigned long long)index << PAGE_SHIFT,
PTR_ERR(page));
return page;
}
@@ -406,11 +408,11 @@ int bitmap_update_sb(struct bitmap *bitmap)
return 0;
}
spin_unlock_irqrestore(&bitmap->lock, flags);
- sb = (bitmap_super_t *)kmap(bitmap->sb_page);
+ sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
sb->events = cpu_to_le64(bitmap->mddev->events);
if (!bitmap->mddev->degraded)
sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
- kunmap(bitmap->sb_page);
+ kunmap_atomic(sb, KM_USER0);
return write_page(bitmap, bitmap->sb_page, 1);
}
@@ -421,7 +423,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
if (!bitmap || !bitmap->sb_page)
return;
- sb = (bitmap_super_t *)kmap(bitmap->sb_page);
+ sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
@@ -440,7 +442,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
printk(KERN_DEBUG " sync size: %llu KB\n",
(unsigned long long)le64_to_cpu(sb->sync_size)/2);
printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
- kunmap(bitmap->sb_page);
+ kunmap_atomic(sb, KM_USER0);
}
/* read the superblock from the bitmap file and initialize some bitmap fields */
@@ -466,7 +468,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
return err;
}
- sb = (bitmap_super_t *)kmap(bitmap->sb_page);
+ sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
if (bytes_read < sizeof(*sb)) { /* short read */
printk(KERN_INFO "%s: bitmap file superblock truncated\n",
@@ -485,12 +487,12 @@ static int bitmap_read_sb(struct bitmap *bitmap)
else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
reason = "unrecognized superblock version";
- else if (chunksize < 512 || chunksize > (1024 * 1024 * 4))
- reason = "bitmap chunksize out of range (512B - 4MB)";
+ else if (chunksize < PAGE_SIZE)
+ reason = "bitmap chunksize too small";
else if ((1 << ffz(~chunksize)) != chunksize)
reason = "bitmap chunksize not a power of 2";
- else if (daemon_sleep < 1 || daemon_sleep > 15)
- reason = "daemon sleep period out of range (1-15s)";
+ else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ)
+ reason = "daemon sleep period out of range";
else if (write_behind > COUNTER_MAX)
reason = "write-behind limit out of range (0 - 16383)";
if (reason) {
@@ -535,7 +537,7 @@ success:
bitmap->events_cleared = bitmap->mddev->events;
err = 0;
out:
- kunmap(bitmap->sb_page);
+ kunmap_atomic(sb, KM_USER0);
if (err)
bitmap_print_sb(bitmap);
return err;
@@ -558,9 +560,9 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
spin_unlock_irqrestore(&bitmap->lock, flags);
return;
}
- page_cache_get(bitmap->sb_page);
+ get_page(bitmap->sb_page);
spin_unlock_irqrestore(&bitmap->lock, flags);
- sb = (bitmap_super_t *)kmap(bitmap->sb_page);
+ sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
switch (op) {
case MASK_SET: sb->state |= bits;
break;
@@ -568,8 +570,8 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
break;
default: BUG();
}
- kunmap(bitmap->sb_page);
- page_cache_release(bitmap->sb_page);
+ kunmap_atomic(sb, KM_USER0);
+ put_page(bitmap->sb_page);
}
/*
@@ -622,12 +624,11 @@ static void bitmap_file_unmap(struct bitmap *bitmap)
while (pages--)
if (map[pages]->index != 0) /* 0 is sb_page, release it below */
- page_cache_release(map[pages]);
+ put_page(map[pages]);
kfree(map);
kfree(attr);
- if (sb_page)
- page_cache_release(sb_page);
+ safe_put_page(sb_page);
}
static void bitmap_stop_daemon(struct bitmap *bitmap);
@@ -654,7 +655,7 @@ static void drain_write_queues(struct bitmap *bitmap)
while ((item = dequeue_page(bitmap))) {
/* don't bother to wait */
- page_cache_release(item->page);
+ put_page(item->page);
mempool_free(item, bitmap->write_pool);
}
@@ -763,7 +764,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
/* make sure the page stays cached until it gets written out */
if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY))
- page_cache_get(page);
+ get_page(page);
/* set the bit */
kaddr = kmap_atomic(page, KM_USER0);
@@ -854,6 +855,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
unsigned long bytes, offset, dummy;
int outofdate;
int ret = -ENOSPC;
+ void *paddr;
chunks = bitmap->chunks;
file = bitmap->file;
@@ -887,12 +889,10 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
if (!bitmap->filemap)
goto out;
- bitmap->filemap_attr = kmalloc(sizeof(long) * num_pages, GFP_KERNEL);
+ bitmap->filemap_attr = kzalloc(sizeof(long) * num_pages, GFP_KERNEL);
if (!bitmap->filemap_attr)
goto out;
- memset(bitmap->filemap_attr, 0, sizeof(long) * num_pages);
-
oldindex = ~0L;
for (i = 0; i < chunks; i++) {
@@ -901,8 +901,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
bit = file_page_offset(i);
if (index != oldindex) { /* this is a new page, read it in */
/* unmap the old page, we're done with it */
- if (oldpage != NULL)
- kunmap(oldpage);
if (index == 0) {
/*
* if we're here then the superblock page
@@ -925,30 +923,32 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
oldindex = index;
oldpage = page;
- kmap(page);
if (outofdate) {
/*
* if bitmap is out of date, dirty the
* whole page and write it out
*/
- memset(page_address(page) + offset, 0xff,
+ paddr = kmap_atomic(page, KM_USER0);
+ memset(paddr + offset, 0xff,
PAGE_SIZE - offset);
+ kunmap_atomic(paddr, KM_USER0);
ret = write_page(bitmap, page, 1);
if (ret) {
- kunmap(page);
/* release, page not in filemap yet */
- page_cache_release(page);
+ put_page(page);
goto out;
}
}
bitmap->filemap[bitmap->file_pages++] = page;
}
+ paddr = kmap_atomic(page, KM_USER0);
if (bitmap->flags & BITMAP_HOSTENDIAN)
- b = test_bit(bit, page_address(page));
+ b = test_bit(bit, paddr);
else
- b = ext2_test_bit(bit, page_address(page));
+ b = ext2_test_bit(bit, paddr);
+ kunmap_atomic(paddr, KM_USER0);
if (b) {
/* if the disk bit is set, set the memory bit */
bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap),
@@ -963,9 +963,6 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
ret = 0;
bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
- if (page) /* unmap the last page */
- kunmap(page);
-
if (bit_cnt) { /* Kick recovery if any bits were set */
set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
md_wakeup_thread(bitmap->mddev->thread);
@@ -1021,6 +1018,7 @@ int bitmap_daemon_work(struct bitmap *bitmap)
int err = 0;
int blocks;
int attr;
+ void *paddr;
if (bitmap == NULL)
return 0;
@@ -1043,7 +1041,7 @@ int bitmap_daemon_work(struct bitmap *bitmap)
/* skip this page unless it's marked as needing cleaning */
if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) {
if (attr & BITMAP_PAGE_NEEDWRITE) {
- page_cache_get(page);
+ get_page(page);
clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
}
spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -1057,13 +1055,13 @@ int bitmap_daemon_work(struct bitmap *bitmap)
default:
bitmap_file_kick(bitmap);
}
- page_cache_release(page);
+ put_page(page);
}
continue;
}
/* grab the new page, sync and release the old */
- page_cache_get(page);
+ get_page(page);
if (lastpage != NULL) {
if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
@@ -1077,14 +1075,12 @@ int bitmap_daemon_work(struct bitmap *bitmap)
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
}
- kunmap(lastpage);
- page_cache_release(lastpage);
+ put_page(lastpage);
if (err)
bitmap_file_kick(bitmap);
} else
spin_unlock_irqrestore(&bitmap->lock, flags);
lastpage = page;
- kmap(page);
/*
printk("bitmap clean at page %lu\n", j);
*/
@@ -1107,10 +1103,12 @@ int bitmap_daemon_work(struct bitmap *bitmap)
-1);
/* clear the bit */
+ paddr = kmap_atomic(page, KM_USER0);
if (bitmap->flags & BITMAP_HOSTENDIAN)
- clear_bit(file_page_offset(j), page_address(page));
+ clear_bit(file_page_offset(j), paddr);
else
- ext2_clear_bit(file_page_offset(j), page_address(page));
+ ext2_clear_bit(file_page_offset(j), paddr);
+ kunmap_atomic(paddr, KM_USER0);
}
}
spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -1118,7 +1116,6 @@ int bitmap_daemon_work(struct bitmap *bitmap)
/* now sync the final page */
if (lastpage != NULL) {
- kunmap(lastpage);
spin_lock_irqsave(&bitmap->lock, flags);
if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
@@ -1133,7 +1130,7 @@ int bitmap_daemon_work(struct bitmap *bitmap)
spin_unlock_irqrestore(&bitmap->lock, flags);
}
- page_cache_release(lastpage);
+ put_page(lastpage);
}
return err;
@@ -1184,7 +1181,7 @@ static void bitmap_writeback_daemon(mddev_t *mddev)
PRINTK("finished page writeback: %p\n", page);
err = PageError(page);
- page_cache_release(page);
+ put_page(page);
if (err) {
printk(KERN_WARNING "%s: bitmap file writeback "
"failed (page %lu): %d\n",
@@ -1530,6 +1527,8 @@ void bitmap_destroy(mddev_t *mddev)
return;
mddev->bitmap = NULL; /* disconnect from the md device */
+ if (mddev->thread)
+ mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
bitmap_free(bitmap);
}
@@ -1555,12 +1554,10 @@ int bitmap_create(mddev_t *mddev)
BUG_ON(file && mddev->bitmap_offset);
- bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
+ bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
return -ENOMEM;
- memset(bitmap, 0, sizeof(*bitmap));
-
spin_lock_init(&bitmap->lock);
bitmap->mddev = mddev;
@@ -1601,12 +1598,11 @@ int bitmap_create(mddev_t *mddev)
#ifdef INJECT_FATAL_FAULT_1
bitmap->bp = NULL;
#else
- bitmap->bp = kmalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
+ bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
#endif
err = -ENOMEM;
if (!bitmap->bp)
goto error;
- memset(bitmap->bp, 0, pages * sizeof(*bitmap->bp));
bitmap->flags |= BITMAP_ACTIVE;
@@ -1636,6 +1632,8 @@ int bitmap_create(mddev_t *mddev)
if (IS_ERR(bitmap->writeback_daemon))
return PTR_ERR(bitmap->writeback_daemon);
+ mddev->thread->timeout = bitmap->daemon_sleep * HZ;
+
return bitmap_update_sb(bitmap);
error:
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index cf663105668..a601a427885 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -690,6 +690,8 @@ bad3:
bad2:
crypto_free_tfm(tfm);
bad1:
+ /* Must zero key material before freeing */
+ memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
kfree(cc);
return -EINVAL;
}
@@ -706,6 +708,9 @@ static void crypt_dtr(struct dm_target *ti)
cc->iv_gen_ops->dtr(cc);
crypto_free_tfm(cc->tfm);
dm_put_device(ti, cc->dev);
+
+ /* Must zero key material before freeing */
+ memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
kfree(cc);
}
diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h
index 1a77f326570..f9035bfd1a9 100644
--- a/drivers/md/dm-io.h
+++ b/drivers/md/dm-io.h
@@ -9,9 +9,6 @@
#include "dm.h"
-/* FIXME make this configurable */
-#define DM_MAX_IO_REGIONS 8
-
struct io_region {
struct block_device *bdev;
sector_t sector;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 07d44e19536..561bda5011e 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -270,6 +270,7 @@ static int dm_hash_rename(const char *old, const char *new)
{
char *new_name, *old_name;
struct hash_cell *hc;
+ struct dm_table *table;
/*
* duplicate new.
@@ -317,6 +318,15 @@ static int dm_hash_rename(const char *old, const char *new)
/* rename the device node in devfs */
register_with_devfs(hc);
+ /*
+ * Wake up any dm event waiters.
+ */
+ table = dm_get_table(hc->md);
+ if (table) {
+ dm_table_event(table);
+ dm_table_put(table);
+ }
+
up_write(&_hash_lock);
kfree(old_name);
return 0;
@@ -683,14 +693,18 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
static int do_suspend(struct dm_ioctl *param)
{
int r = 0;
+ int do_lockfs = 1;
struct mapped_device *md;
md = find_device(param);
if (!md)
return -ENXIO;
+ if (param->flags & DM_SKIP_LOCKFS_FLAG)
+ do_lockfs = 0;
+
if (!dm_suspended(md))
- r = dm_suspend(md);
+ r = dm_suspend(md, do_lockfs);
if (!r)
r = __dev_status(md, param);
@@ -702,6 +716,7 @@ static int do_suspend(struct dm_ioctl *param)
static int do_resume(struct dm_ioctl *param)
{
int r = 0;
+ int do_lockfs = 1;
struct hash_cell *hc;
struct mapped_device *md;
struct dm_table *new_map;
@@ -727,8 +742,10 @@ static int do_resume(struct dm_ioctl *param)
/* Do we need to load a new map ? */
if (new_map) {
/* Suspend if it isn't already suspended */
+ if (param->flags & DM_SKIP_LOCKFS_FLAG)
+ do_lockfs = 0;
if (!dm_suspended(md))
- dm_suspend(md);
+ dm_suspend(md, do_lockfs);
r = dm_swap_table(md, new_map);
if (r) {
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index a76349cb10a..efe4adf7853 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -573,7 +573,7 @@ static int core_get_resync_work(struct dirty_log *log, region_t *region)
lc->sync_search);
lc->sync_search = *region + 1;
- if (*region == lc->region_count)
+ if (*region >= lc->region_count)
return 0;
} while (log_test_bit(lc->recovering_bits, *region));
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 6b0fc167092..6cfa8d435d5 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -562,6 +562,8 @@ struct mirror_set {
region_t nr_regions;
int in_sync;
+ struct mirror *default_mirror; /* Default mirror */
+
unsigned int nr_mirrors;
struct mirror mirror[0];
};
@@ -611,7 +613,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
unsigned long flags = 0;
/* fill in the source */
- m = ms->mirror + DEFAULT_MIRROR;
+ m = ms->default_mirror;
from.bdev = m->dev->bdev;
from.sector = m->offset + region_to_sector(reg->rh, reg->key);
if (reg->key == (ms->nr_regions - 1)) {
@@ -627,7 +629,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
/* fill in the destinations */
for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
- if (i == DEFAULT_MIRROR)
+ if (&ms->mirror[i] == ms->default_mirror)
continue;
m = ms->mirror + i;
@@ -682,7 +684,7 @@ static void do_recovery(struct mirror_set *ms)
static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
{
/* FIXME: add read balancing */
- return ms->mirror + DEFAULT_MIRROR;
+ return ms->default_mirror;
}
/*
@@ -709,7 +711,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
if (rh_in_sync(&ms->rh, region, 0))
m = choose_mirror(ms, bio->bi_sector);
else
- m = ms->mirror + DEFAULT_MIRROR;
+ m = ms->default_mirror;
map_bio(ms, m, bio);
generic_make_request(bio);
@@ -833,7 +835,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
rh_delay(&ms->rh, bio);
while ((bio = bio_list_pop(&nosync))) {
- map_bio(ms, ms->mirror + DEFAULT_MIRROR, bio);
+ map_bio(ms, ms->default_mirror, bio);
generic_make_request(bio);
}
}
@@ -900,6 +902,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
ms->nr_mirrors = nr_mirrors;
ms->nr_regions = dm_sector_div_up(ti->len, region_size);
ms->in_sync = 0;
+ ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
ti->error = "dm-mirror: Error creating dirty region hash";
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ab54f99b7c3..4b9dd8fb1e5 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -371,6 +371,20 @@ static inline ulong round_up(ulong n, ulong size)
return (n + size) & ~size;
}
+static void read_snapshot_metadata(struct dm_snapshot *s)
+{
+ if (s->have_metadata)
+ return;
+
+ if (s->store.read_metadata(&s->store)) {
+ down_write(&s->lock);
+ s->valid = 0;
+ up_write(&s->lock);
+ }
+
+ s->have_metadata = 1;
+}
+
/*
* Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
*/
@@ -848,16 +862,7 @@ static void snapshot_resume(struct dm_target *ti)
{
struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
- if (s->have_metadata)
- return;
-
- if (s->store.read_metadata(&s->store)) {
- down_write(&s->lock);
- s->valid = 0;
- up_write(&s->lock);
- }
-
- s->have_metadata = 1;
+ read_snapshot_metadata(s);
}
static int snapshot_status(struct dm_target *ti, status_type_t type,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 930b9fc2795..097d1e54009 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -55,6 +55,7 @@ union map_info *dm_get_mapinfo(struct bio *bio)
*/
#define DMF_BLOCK_IO 0
#define DMF_SUSPENDED 1
+#define DMF_FROZEN 2
struct mapped_device {
struct rw_semaphore io_lock;
@@ -97,7 +98,7 @@ struct mapped_device {
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
- struct block_device *frozen_bdev;
+ struct block_device *suspended_bdev;
};
#define MIN_IOS 256
@@ -767,6 +768,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
md->queue->backing_dev_info.congested_fn = dm_any_congested;
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
+ blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
md->queue->issue_flush_fn = dm_flush_all;
@@ -836,9 +838,9 @@ static void __set_size(struct mapped_device *md, sector_t size)
{
set_capacity(md->disk, size);
- down(&md->frozen_bdev->bd_inode->i_sem);
- i_size_write(md->frozen_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
- up(&md->frozen_bdev->bd_inode->i_sem);
+ mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
+ i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
+ mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
}
static int __bind(struct mapped_device *md, struct dm_table *t)
@@ -902,10 +904,9 @@ int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
return create_aux(minor, 1, result);
}
-void *dm_get_mdptr(dev_t dev)
+static struct mapped_device *dm_find_md(dev_t dev)
{
struct mapped_device *md;
- void *mdptr = NULL;
unsigned minor = MINOR(dev);
if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
@@ -914,12 +915,32 @@ void *dm_get_mdptr(dev_t dev)
down(&_minor_lock);
md = idr_find(&_minor_idr, minor);
-
- if (md && (dm_disk(md)->first_minor == minor))
- mdptr = md->interface_ptr;
+ if (!md || (dm_disk(md)->first_minor != minor))
+ md = NULL;
up(&_minor_lock);
+ return md;
+}
+
+struct mapped_device *dm_get_md(dev_t dev)
+{
+ struct mapped_device *md = dm_find_md(dev);
+
+ if (md)
+ dm_get(md);
+
+ return md;
+}
+
+void *dm_get_mdptr(dev_t dev)
+{
+ struct mapped_device *md;
+ void *mdptr = NULL;
+
+ md = dm_find_md(dev);
+ if (md)
+ mdptr = md->interface_ptr;
return mdptr;
}
@@ -991,43 +1012,33 @@ out:
*/
static int lock_fs(struct mapped_device *md)
{
- int r = -ENOMEM;
-
- md->frozen_bdev = bdget_disk(md->disk, 0);
- if (!md->frozen_bdev) {
- DMWARN("bdget failed in lock_fs");
- goto out;
- }
+ int r;
WARN_ON(md->frozen_sb);
- md->frozen_sb = freeze_bdev(md->frozen_bdev);
+ md->frozen_sb = freeze_bdev(md->suspended_bdev);
if (IS_ERR(md->frozen_sb)) {
r = PTR_ERR(md->frozen_sb);
- goto out_bdput;
+ md->frozen_sb = NULL;
+ return r;
}
+ set_bit(DMF_FROZEN, &md->flags);
+
/* don't bdput right now, we don't want the bdev
- * to go away while it is locked. We'll bdput
- * in unlock_fs
+ * to go away while it is locked.
*/
return 0;
-
-out_bdput:
- bdput(md->frozen_bdev);
- md->frozen_sb = NULL;
- md->frozen_bdev = NULL;
-out:
- return r;
}
static void unlock_fs(struct mapped_device *md)
{
- thaw_bdev(md->frozen_bdev, md->frozen_sb);
- bdput(md->frozen_bdev);
+ if (!test_bit(DMF_FROZEN, &md->flags))
+ return;
+ thaw_bdev(md->suspended_bdev, md->frozen_sb);
md->frozen_sb = NULL;
- md->frozen_bdev = NULL;
+ clear_bit(DMF_FROZEN, &md->flags);
}
/*
@@ -1037,7 +1048,7 @@ static void unlock_fs(struct mapped_device *md)
* dm_bind_table, dm_suspend must be called to flush any in
* flight bios and ensure that any further io gets deferred.
*/
-int dm_suspend(struct mapped_device *md)
+int dm_suspend(struct mapped_device *md, int do_lockfs)
{
struct dm_table *map = NULL;
DECLARE_WAITQUEUE(wait, current);
@@ -1053,10 +1064,19 @@ int dm_suspend(struct mapped_device *md)
/* This does not get reverted if there's an error later. */
dm_table_presuspend_targets(map);
- /* Flush I/O to the device. */
- r = lock_fs(md);
- if (r)
+ md->suspended_bdev = bdget_disk(md->disk, 0);
+ if (!md->suspended_bdev) {
+ DMWARN("bdget failed in dm_suspend");
+ r = -ENOMEM;
goto out;
+ }
+
+ /* Flush I/O to the device. */
+ if (do_lockfs) {
+ r = lock_fs(md);
+ if (r)
+ goto out;
+ }
/*
* First we set the BLOCK_IO flag so no more ios will be mapped.
@@ -1105,6 +1125,11 @@ int dm_suspend(struct mapped_device *md)
r = 0;
out:
+ if (r && md->suspended_bdev) {
+ bdput(md->suspended_bdev);
+ md->suspended_bdev = NULL;
+ }
+
dm_table_put(map);
up(&md->suspend_lock);
return r;
@@ -1135,6 +1160,9 @@ int dm_resume(struct mapped_device *md)
unlock_fs(md);
+ bdput(md->suspended_bdev);
+ md->suspended_bdev = NULL;
+
clear_bit(DMF_SUSPENDED, &md->flags);
dm_table_unplug_all(map);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index e38c3fc1a1d..4eaf075da21 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -28,7 +28,7 @@
* in types.h.
*/
#ifdef CONFIG_LBD
-#define SECTOR_FORMAT "%Lu"
+#define SECTOR_FORMAT "%llu"
#else
#define SECTOR_FORMAT "%lu"
#endif
@@ -58,6 +58,7 @@ int dm_create(struct mapped_device **md);
int dm_create_with_minor(unsigned int minor, struct mapped_device **md);
void dm_set_mdptr(struct mapped_device *md, void *ptr);
void *dm_get_mdptr(dev_t dev);
+struct mapped_device *dm_get_md(dev_t dev);
/*
* Reference counting for md.
@@ -68,7 +69,7 @@ void dm_put(struct mapped_device *md);
/*
* A device can still be used while suspended, but I/O is deferred.
*/
-int dm_suspend(struct mapped_device *md);
+int dm_suspend(struct mapped_device *md, int with_lockfs);
int dm_resume(struct mapped_device *md);
/*
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 0248f8e7eac..a7a5ab55433 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -316,9 +316,10 @@ static int stop(mddev_t *mddev)
return 0;
}
-static mdk_personality_t faulty_personality =
+static struct mdk_personality faulty_personality =
{
.name = "faulty",
+ .level = LEVEL_FAULTY,
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
@@ -329,15 +330,17 @@ static mdk_personality_t faulty_personality =
static int __init raid_init(void)
{
- return register_md_personality(FAULTY, &faulty_personality);
+ return register_md_personality(&faulty_personality);
}
static void raid_exit(void)
{
- unregister_md_personality(FAULTY);
+ unregister_md_personality(&faulty_personality);
}
module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md-personality-10"); /* faulty */
+MODULE_ALIAS("md-faulty");
+MODULE_ALIAS("md-level--5");
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index eb703648597..ca99979c868 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -561,11 +561,13 @@ int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
* Cancels a kcopyd job, eg. someone might be deactivating a
* mirror.
*/
+#if 0
int kcopyd_cancel(struct kcopyd_job *job, int block)
{
/* FIXME: finish */
return -1;
}
+#endif /* 0 */
/*-----------------------------------------------------------------
* Unit setup
@@ -684,4 +686,3 @@ void kcopyd_client_destroy(struct kcopyd_client *kc)
EXPORT_SYMBOL(kcopyd_client_create);
EXPORT_SYMBOL(kcopyd_client_destroy);
EXPORT_SYMBOL(kcopyd_copy);
-EXPORT_SYMBOL(kcopyd_cancel);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 946efef3a8f..777585458c8 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -121,11 +121,10 @@ static int linear_run (mddev_t *mddev)
sector_t curr_offset;
struct list_head *tmp;
- conf = kmalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t),
+ conf = kzalloc (sizeof (*conf) + mddev->raid_disks*sizeof(dev_info_t),
GFP_KERNEL);
if (!conf)
goto out;
- memset(conf, 0, sizeof(*conf) + mddev->raid_disks*sizeof(dev_info_t));
mddev->private = conf;
cnt = 0;
@@ -352,9 +351,10 @@ static void linear_status (struct seq_file *seq, mddev_t *mddev)
}
-static mdk_personality_t linear_personality=
+static struct mdk_personality linear_personality =
{
.name = "linear",
+ .level = LEVEL_LINEAR,
.owner = THIS_MODULE,
.make_request = linear_make_request,
.run = linear_run,
@@ -364,16 +364,18 @@ static mdk_personality_t linear_personality=
static int __init linear_init (void)
{
- return register_md_personality (LINEAR, &linear_personality);
+ return register_md_personality (&linear_personality);
}
static void linear_exit (void)
{
- unregister_md_personality (LINEAR);
+ unregister_md_personality (&linear_personality);
}
module_init(linear_init);
module_exit(linear_exit);
MODULE_LICENSE("GPL");
-MODULE_ALIAS("md-personality-1"); /* LINEAR */
+MODULE_ALIAS("md-personality-1"); /* LINEAR - deprecated*/
+MODULE_ALIAS("md-linear");
+MODULE_ALIAS("md-level--1");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8175a2a222d..1778104e106 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -42,6 +42,7 @@
#include <linux/devfs_fs_kernel.h>
#include <linux/buffer_head.h> /* for invalidate_bdev */
#include <linux/suspend.h>
+#include <linux/poll.h>
#include <linux/init.h>
@@ -67,7 +68,7 @@
static void autostart_arrays (int part);
#endif
-static mdk_personality_t *pers[MAX_PERSONALITY];
+static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
/*
@@ -80,10 +81,22 @@ static DEFINE_SPINLOCK(pers_lock);
* idle IO detection.
*
* you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
+ * or /sys/block/mdX/md/sync_speed_{min,max}
*/
static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
+static inline int speed_min(mddev_t *mddev)
+{
+ return mddev->sync_speed_min ?
+ mddev->sync_speed_min : sysctl_speed_limit_min;
+}
+
+static inline int speed_max(mddev_t *mddev)
+{
+ return mddev->sync_speed_max ?
+ mddev->sync_speed_max : sysctl_speed_limit_max;
+}
static struct ctl_table_header *raid_table_header;
@@ -134,6 +147,24 @@ static struct block_device_operations md_fops;
static int start_readonly;
/*
+ * We have a system wide 'event count' that is incremented
+ * on any 'interesting' event, and readers of /proc/mdstat
+ * can use 'poll' or 'select' to find out when the event
+ * count increases.
+ *
+ * Events are:
+ * start array, stop array, error, add device, remove device,
+ * start build, activate spare
+ */
+static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
+static atomic_t md_event_count;
+static void md_new_event(mddev_t *mddev)
+{
+ atomic_inc(&md_event_count);
+ wake_up(&md_event_waiters);
+}
+
+/*
* Enables to iterate over all existing md arrays
* all_mddevs_lock protects this list.
*/
@@ -209,12 +240,10 @@ static mddev_t * mddev_find(dev_t unit)
}
spin_unlock(&all_mddevs_lock);
- new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
- memset(new, 0, sizeof(*new));
-
new->unit = unit;
if (MAJOR(unit) == MD_MAJOR)
new->md_minor = MINOR(unit);
@@ -262,7 +291,7 @@ static inline void mddev_unlock(mddev_t * mddev)
md_wakeup_thread(mddev->thread);
}
-mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
+static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
{
mdk_rdev_t * rdev;
struct list_head *tmp;
@@ -286,6 +315,18 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
return NULL;
}
+static struct mdk_personality *find_pers(int level, char *clevel)
+{
+ struct mdk_personality *pers;
+ list_for_each_entry(pers, &pers_list, list) {
+ if (level != LEVEL_NONE && pers->level == level)
+ return pers;
+ if (strcmp(pers->name, clevel)==0)
+ return pers;
+ }
+ return NULL;
+}
+
static inline sector_t calc_dev_sboffset(struct block_device *bdev)
{
sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
@@ -320,7 +361,7 @@ static int alloc_disk_sb(mdk_rdev_t * rdev)
static void free_disk_sb(mdk_rdev_t * rdev)
{
if (rdev->sb_page) {
- page_cache_release(rdev->sb_page);
+ put_page(rdev->sb_page);
rdev->sb_loaded = 0;
rdev->sb_page = NULL;
rdev->sb_offset = 0;
@@ -461,6 +502,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
bio_put(bio);
return ret;
}
+EXPORT_SYMBOL_GPL(sync_page_io);
static int read_disk_sb(mdk_rdev_t * rdev, int size)
{
@@ -665,6 +707,10 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
}
rdev->size = calc_dev_size(rdev, sb->chunk_size);
+ if (rdev->size < sb->size && sb->level > 1)
+ /* "this cannot possibly happen" ... */
+ ret = -EINVAL;
+
abort:
return ret;
}
@@ -688,6 +734,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->ctime = sb->ctime;
mddev->utime = sb->utime;
mddev->level = sb->level;
+ mddev->clevel[0] = 0;
mddev->layout = sb->layout;
mddev->raid_disks = sb->raid_disks;
mddev->size = sb->size;
@@ -714,9 +761,10 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
mddev->bitmap_file == NULL) {
- if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6) {
+ if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
+ && mddev->level != 10) {
/* FIXME use a better test */
- printk(KERN_WARNING "md: bitmaps only support for raid1\n");
+ printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
return -EINVAL;
}
mddev->bitmap_offset = mddev->default_bitmap_offset;
@@ -968,6 +1016,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
}
rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset);
+ atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
@@ -1006,6 +1055,9 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
rdev->size = le64_to_cpu(sb->data_size)/2;
if (le32_to_cpu(sb->chunksize))
rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
+
+ if (le32_to_cpu(sb->size) > rdev->size*2)
+ return -EINVAL;
return 0;
}
@@ -1023,6 +1075,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
mddev->level = le32_to_cpu(sb->level);
+ mddev->clevel[0] = 0;
mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->size = le64_to_cpu(sb->size)/2;
@@ -1037,8 +1090,9 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
mddev->bitmap_file == NULL ) {
- if (mddev->level != 1) {
- printk(KERN_WARNING "md: bitmaps only supported for raid1\n");
+ if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
+ && mddev->level != 10) {
+ printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
return -EINVAL;
}
mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
@@ -1105,6 +1159,8 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
else
sb->resync_offset = cpu_to_le64(0);
+ sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
+
if (mddev->bitmap && mddev->bitmap_file == NULL) {
sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
@@ -1182,11 +1238,20 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
mdk_rdev_t *same_pdev;
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct kobject *ko;
+ char *s;
if (rdev->mddev) {
MD_BUG();
return -EINVAL;
}
+ /* make sure rdev->size exceeds mddev->size */
+ if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
+ if (mddev->pers)
+ /* Cannot change size, so fail */
+ return -ENOSPC;
+ else
+ mddev->size = rdev->size;
+ }
same_pdev = match_dev_unit(mddev, rdev);
if (same_pdev)
printk(KERN_WARNING
@@ -1213,6 +1278,8 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
bdevname(rdev->bdev,b);
if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
return -ENOMEM;
+ while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
+ *s = '!';
list_add(&rdev->same_set, &mddev->disks);
rdev->mddev = mddev;
@@ -1496,6 +1563,26 @@ repeat:
}
+/* words written to sysfs files may, or my not, be \n terminated.
+ * We want to accept with case. For this we use cmd_match.
+ */
+static int cmd_match(const char *cmd, const char *str)
+{
+ /* See if cmd, written into a sysfs file, matches
+ * str. They must either be the same, or cmd can
+ * have a trailing newline
+ */
+ while (*cmd && *str && *cmd == *str) {
+ cmd++;
+ str++;
+ }
+ if (*cmd == '\n')
+ cmd++;
+ if (*str || *cmd)
+ return 0;
+ return 1;
+}
+
struct rdev_sysfs_entry {
struct attribute attr;
ssize_t (*show)(mdk_rdev_t *, char *);
@@ -1538,9 +1625,113 @@ super_show(mdk_rdev_t *rdev, char *page)
}
static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
+static ssize_t
+errors_show(mdk_rdev_t *rdev, char *page)
+{
+ return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
+}
+
+static ssize_t
+errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+ char *e;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+ if (*buf && (*e == 0 || *e == '\n')) {
+ atomic_set(&rdev->corrected_errors, n);
+ return len;
+ }
+ return -EINVAL;
+}
+static struct rdev_sysfs_entry rdev_errors =
+__ATTR(errors, 0644, errors_show, errors_store);
+
+static ssize_t
+slot_show(mdk_rdev_t *rdev, char *page)
+{
+ if (rdev->raid_disk < 0)
+ return sprintf(page, "none\n");
+ else
+ return sprintf(page, "%d\n", rdev->raid_disk);
+}
+
+static ssize_t
+slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+ char *e;
+ int slot = simple_strtoul(buf, &e, 10);
+ if (strncmp(buf, "none", 4)==0)
+ slot = -1;
+ else if (e==buf || (*e && *e!= '\n'))
+ return -EINVAL;
+ if (rdev->mddev->pers)
+ /* Cannot set slot in active array (yet) */
+ return -EBUSY;
+ if (slot >= rdev->mddev->raid_disks)
+ return -ENOSPC;
+ rdev->raid_disk = slot;
+ /* assume it is working */
+ rdev->flags = 0;
+ set_bit(In_sync, &rdev->flags);
+ return len;
+}
+
+
+static struct rdev_sysfs_entry rdev_slot =
+__ATTR(slot, 0644, slot_show, slot_store);
+
+static ssize_t
+offset_show(mdk_rdev_t *rdev, char *page)
+{
+ return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
+}
+
+static ssize_t
+offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+ char *e;
+ unsigned long long offset = simple_strtoull(buf, &e, 10);
+ if (e==buf || (*e && *e != '\n'))
+ return -EINVAL;
+ if (rdev->mddev->pers)
+ return -EBUSY;
+ rdev->data_offset = offset;
+ return len;
+}
+
+static struct rdev_sysfs_entry rdev_offset =
+__ATTR(offset, 0644, offset_show, offset_store);
+
+static ssize_t
+rdev_size_show(mdk_rdev_t *rdev, char *page)
+{
+ return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
+}
+
+static ssize_t
+rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+{
+ char *e;
+ unsigned long long size = simple_strtoull(buf, &e, 10);
+ if (e==buf || (*e && *e != '\n'))
+ return -EINVAL;
+ if (rdev->mddev->pers)
+ return -EBUSY;
+ rdev->size = size;
+ if (size < rdev->mddev->size || rdev->mddev->size == 0)
+ rdev->mddev->size = size;
+ return len;
+}
+
+static struct rdev_sysfs_entry rdev_size =
+__ATTR(size, 0644, rdev_size_show, rdev_size_store);
+
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
&rdev_super.attr,
+ &rdev_errors.attr,
+ &rdev_slot.attr,
+ &rdev_offset.attr,
+ &rdev_size.attr,
NULL,
};
static ssize_t
@@ -1598,12 +1789,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
mdk_rdev_t *rdev;
sector_t size;
- rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) {
printk(KERN_ERR "md: could not alloc mem for new device!\n");
return ERR_PTR(-ENOMEM);
}
- memset(rdev, 0, sizeof(*rdev));
if ((err = alloc_disk_sb(rdev)))
goto abort_free;
@@ -1621,6 +1811,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
rdev->data_offset = 0;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
+ atomic_set(&rdev->corrected_errors, 0);
size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
if (!size) {
@@ -1725,16 +1916,37 @@ static void analyze_sbs(mddev_t * mddev)
static ssize_t
level_show(mddev_t *mddev, char *page)
{
- mdk_personality_t *p = mddev->pers;
- if (p == NULL && mddev->raid_disks == 0)
- return 0;
- if (mddev->level >= 0)
- return sprintf(page, "raid%d\n", mddev->level);
- else
+ struct mdk_personality *p = mddev->pers;
+ if (p)
return sprintf(page, "%s\n", p->name);
+ else if (mddev->clevel[0])
+ return sprintf(page, "%s\n", mddev->clevel);
+ else if (mddev->level != LEVEL_NONE)
+ return sprintf(page, "%d\n", mddev->level);
+ else
+ return 0;
}
-static struct md_sysfs_entry md_level = __ATTR_RO(level);
+static ssize_t
+level_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ int rv = len;
+ if (mddev->pers)
+ return -EBUSY;
+ if (len == 0)
+ return 0;
+ if (len >= sizeof(mddev->clevel))
+ return -ENOSPC;
+ strncpy(mddev->clevel, buf, len);
+ if (mddev->clevel[len-1] == '\n')
+ len--;
+ mddev->clevel[len] = 0;
+ mddev->level = LEVEL_NONE;
+ return rv;
+}
+
+static struct md_sysfs_entry md_level =
+__ATTR(level, 0644, level_show, level_store);
static ssize_t
raid_disks_show(mddev_t *mddev, char *page)
@@ -1744,7 +1956,197 @@ raid_disks_show(mddev_t *mddev, char *page)
return sprintf(page, "%d\n", mddev->raid_disks);
}
-static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks);
+static int update_raid_disks(mddev_t *mddev, int raid_disks);
+
+static ssize_t
+raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* can only set raid_disks if array is not yet active */
+ char *e;
+ int rv = 0;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+
+ if (!*buf || (*e && *e != '\n'))
+ return -EINVAL;
+
+ if (mddev->pers)
+ rv = update_raid_disks(mddev, n);
+ else
+ mddev->raid_disks = n;
+ return rv ? rv : len;
+}
+static struct md_sysfs_entry md_raid_disks =
+__ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store);
+
+static ssize_t
+chunk_size_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%d\n", mddev->chunk_size);
+}
+
+static ssize_t
+chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* can only set chunk_size if array is not yet active */
+ char *e;
+ unsigned long n = simple_strtoul(buf, &e, 10);
+
+ if (mddev->pers)
+ return -EBUSY;
+ if (!*buf || (*e && *e != '\n'))
+ return -EINVAL;
+
+ mddev->chunk_size = n;
+ return len;
+}
+static struct md_sysfs_entry md_chunk_size =
+__ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store);
+
+static ssize_t
+null_show(mddev_t *mddev, char *page)
+{
+ return -EINVAL;
+}
+
+static ssize_t
+new_dev_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* buf must be %d:%d\n? giving major and minor numbers */
+ /* The new device is added to the array.
+ * If the array has a persistent superblock, we read the
+ * superblock to initialise info and check validity.
+ * Otherwise, only checking done is that in bind_rdev_to_array,
+ * which mainly checks size.
+ */
+ char *e;
+ int major = simple_strtoul(buf, &e, 10);
+ int minor;
+ dev_t dev;
+ mdk_rdev_t *rdev;
+ int err;
+
+ if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
+ return -EINVAL;
+ minor = simple_strtoul(e+1, &e, 10);
+ if (*e && *e != '\n')
+ return -EINVAL;
+ dev = MKDEV(major, minor);
+ if (major != MAJOR(dev) ||
+ minor != MINOR(dev))
+ return -EOVERFLOW;
+
+
+ if (mddev->persistent) {
+ rdev = md_import_device(dev, mddev->major_version,
+ mddev->minor_version);
+ if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
+ mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
+ mdk_rdev_t, same_set);
+ err = super_types[mddev->major_version]
+ .load_super(rdev, rdev0, mddev->minor_version);
+ if (err < 0)
+ goto out;
+ }
+ } else
+ rdev = md_import_device(dev, -1, -1);
+
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+ err = bind_rdev_to_array(rdev, mddev);
+ out:
+ if (err)
+ export_rdev(rdev);
+ return err ? err : len;
+}
+
+static struct md_sysfs_entry md_new_device =
+__ATTR(new_dev, 0200, null_show, new_dev_store);
+
+static ssize_t
+size_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
+}
+
+static int update_size(mddev_t *mddev, unsigned long size);
+
+static ssize_t
+size_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ /* If array is inactive, we can reduce the component size, but
+ * not increase it (except from 0).
+ * If array is active, we can try an on-line resize
+ */
+ char *e;
+ int err = 0;
+ unsigned long long size = simple_strtoull(buf, &e, 10);
+ if (!*buf || *buf == '\n' ||
+ (*e && *e != '\n'))
+ return -EINVAL;
+
+ if (mddev->pers) {
+ err = update_size(mddev, size);
+ md_update_sb(mddev);
+ } else {
+ if (mddev->size == 0 ||
+ mddev->size > size)
+ mddev->size = size;
+ else
+ err = -ENOSPC;
+ }
+ return err ? err : len;
+}
+
+static struct md_sysfs_entry md_size =
+__ATTR(component_size, 0644, size_show, size_store);
+
+
+/* Metdata version.
+ * This is either 'none' for arrays with externally managed metadata,
+ * or N.M for internally known formats
+ */
+static ssize_t
+metadata_show(mddev_t *mddev, char *page)
+{
+ if (mddev->persistent)
+ return sprintf(page, "%d.%d\n",
+ mddev->major_version, mddev->minor_version);
+ else
+ return sprintf(page, "none\n");
+}
+
+static ssize_t
+metadata_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ int major, minor;
+ char *e;
+ if (!list_empty(&mddev->disks))
+ return -EBUSY;
+
+ if (cmd_match(buf, "none")) {
+ mddev->persistent = 0;
+ mddev->major_version = 0;
+ mddev->minor_version = 90;
+ return len;
+ }
+ major = simple_strtoul(buf, &e, 10);
+ if (e==buf || *e != '.')
+ return -EINVAL;
+ buf = e+1;
+ minor = simple_strtoul(buf, &e, 10);
+ if (e==buf || *e != '\n')
+ return -EINVAL;
+ if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
+ super_types[major].name == NULL)
+ return -ENOENT;
+ mddev->major_version = major;
+ mddev->minor_version = minor;
+ mddev->persistent = 1;
+ return len;
+}
+
+static struct md_sysfs_entry md_metadata =
+__ATTR(metadata_version, 0644, metadata_show, metadata_store);
static ssize_t
action_show(mddev_t *mddev, char *page)
@@ -1771,31 +2173,27 @@ action_store(mddev_t *mddev, const char *page, size_t len)
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
- if (strcmp(page, "idle")==0 || strcmp(page, "idle\n")==0) {
+ if (cmd_match(page, "idle")) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_unregister_thread(mddev->sync_thread);
mddev->sync_thread = NULL;
mddev->recovery = 0;
}
- return len;
- }
-
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
return -EBUSY;
- if (strcmp(page, "resync")==0 || strcmp(page, "resync\n")==0 ||
- strcmp(page, "recover")==0 || strcmp(page, "recover\n")==0)
+ else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
else {
- if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0)
+ if (cmd_match(page, "check"))
set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
- else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0)
+ else if (cmd_match(page, "repair"))
return -EINVAL;
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
return len;
}
@@ -1814,15 +2212,107 @@ md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
static struct md_sysfs_entry
md_mismatches = __ATTR_RO(mismatch_cnt);
+static ssize_t
+sync_min_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%d (%s)\n", speed_min(mddev),
+ mddev->sync_speed_min ? "local": "system");
+}
+
+static ssize_t
+sync_min_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ int min;
+ char *e;
+ if (strncmp(buf, "system", 6)==0) {
+ mddev->sync_speed_min = 0;
+ return len;
+ }
+ min = simple_strtoul(buf, &e, 10);
+ if (buf == e || (*e && *e != '\n') || min <= 0)
+ return -EINVAL;
+ mddev->sync_speed_min = min;
+ return len;
+}
+
+static struct md_sysfs_entry md_sync_min =
+__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
+
+static ssize_t
+sync_max_show(mddev_t *mddev, char *page)
+{
+ return sprintf(page, "%d (%s)\n", speed_max(mddev),
+ mddev->sync_speed_max ? "local": "system");
+}
+
+static ssize_t
+sync_max_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ int max;
+ char *e;
+ if (strncmp(buf, "system", 6)==0) {
+ mddev->sync_speed_max = 0;
+ return len;
+ }
+ max = simple_strtoul(buf, &e, 10);
+ if (buf == e || (*e && *e != '\n') || max <= 0)
+ return -EINVAL;
+ mddev->sync_speed_max = max;
+ return len;
+}
+
+static struct md_sysfs_entry md_sync_max =
+__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
+
+
+static ssize_t
+sync_speed_show(mddev_t *mddev, char *page)
+{
+ unsigned long resync, dt, db;
+ resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
+ dt = ((jiffies - mddev->resync_mark) / HZ);
+ if (!dt) dt++;
+ db = resync - (mddev->resync_mark_cnt);
+ return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
+}
+
+static struct md_sysfs_entry
+md_sync_speed = __ATTR_RO(sync_speed);
+
+static ssize_t
+sync_completed_show(mddev_t *mddev, char *page)
+{
+ unsigned long max_blocks, resync;
+
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+ max_blocks = mddev->resync_max_sectors;
+ else
+ max_blocks = mddev->size << 1;
+
+ resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
+ return sprintf(page, "%lu / %lu\n", resync, max_blocks);
+}
+
+static struct md_sysfs_entry
+md_sync_completed = __ATTR_RO(sync_completed);
+
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_raid_disks.attr,
+ &md_chunk_size.attr,
+ &md_size.attr,
+ &md_metadata.attr,
+ &md_new_device.attr,
NULL,
};
static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_mismatches.attr,
+ &md_sync_min.attr,
+ &md_sync_max.attr,
+ &md_sync_speed.attr,
+ &md_sync_completed.attr,
NULL,
};
static struct attribute_group md_redundancy_group = {
@@ -1937,14 +2427,16 @@ static void md_safemode_timeout(unsigned long data)
md_wakeup_thread(mddev->thread);
}
+static int start_dirty_degraded;
static int do_md_run(mddev_t * mddev)
{
- int pnum, err;
+ int err;
int chunk_size;
struct list_head *tmp;
mdk_rdev_t *rdev;
struct gendisk *disk;
+ struct mdk_personality *pers;
char b[BDEVNAME_SIZE];
if (list_empty(&mddev->disks))
@@ -1961,20 +2453,8 @@ static int do_md_run(mddev_t * mddev)
analyze_sbs(mddev);
chunk_size = mddev->chunk_size;
- pnum = level_to_pers(mddev->level);
- if ((pnum != MULTIPATH) && (pnum != RAID1)) {
- if (!chunk_size) {
- /*
- * 'default chunksize' in the old md code used to
- * be PAGE_SIZE, baaad.
- * we abort here to be on the safe side. We don't
- * want to continue the bad practice.
- */
- printk(KERN_ERR
- "no chunksize specified, see 'man raidtab'\n");
- return -EINVAL;
- }
+ if (chunk_size) {
if (chunk_size > MAX_CHUNK_SIZE) {
printk(KERN_ERR "too big chunk_size: %d > %d\n",
chunk_size, MAX_CHUNK_SIZE);
@@ -2010,10 +2490,10 @@ static int do_md_run(mddev_t * mddev)
}
#ifdef CONFIG_KMOD
- if (!pers[pnum])
- {
- request_module("md-personality-%d", pnum);
- }
+ if (mddev->level != LEVEL_NONE)
+ request_module("md-level-%d", mddev->level);
+ else if (mddev->clevel[0])
+ request_module("md-%s", mddev->clevel);
#endif
/*
@@ -2035,30 +2515,39 @@ static int do_md_run(mddev_t * mddev)
return -ENOMEM;
spin_lock(&pers_lock);
- if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) {
+ pers = find_pers(mddev->level, mddev->clevel);
+ if (!pers || !try_module_get(pers->owner)) {
spin_unlock(&pers_lock);
- printk(KERN_WARNING "md: personality %d is not loaded!\n",
- pnum);
+ if (mddev->level != LEVEL_NONE)
+ printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
+ mddev->level);
+ else
+ printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
+ mddev->clevel);
return -EINVAL;
}
-
- mddev->pers = pers[pnum];
+ mddev->pers = pers;
spin_unlock(&pers_lock);
+ mddev->level = pers->level;
+ strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->recovery = 0;
mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
mddev->barriers_work = 1;
+ mddev->ok_start_degraded = start_dirty_degraded;
if (start_readonly)
mddev->ro = 2; /* read-only, but switch on first write */
- /* before we start the array running, initialise the bitmap */
- err = bitmap_create(mddev);
- if (err)
- printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
- mdname(mddev), err);
- else
- err = mddev->pers->run(mddev);
+ err = mddev->pers->run(mddev);
+ if (!err && mddev->pers->sync_request) {
+ err = bitmap_create(mddev);
+ if (err) {
+ printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
+ mdname(mddev), err);
+ mddev->pers->stop(mddev);
+ }
+ }
if (err) {
printk(KERN_ERR "md: pers->run() failed ...\n");
module_put(mddev->pers->owner);
@@ -2104,6 +2593,7 @@ static int do_md_run(mddev_t * mddev)
mddev->queue->make_request_fn = mddev->pers->make_request;
mddev->changed = 1;
+ md_new_event(mddev);
return 0;
}
@@ -2231,6 +2721,7 @@ static int do_md_stop(mddev_t * mddev, int ro)
printk(KERN_INFO "md: %s switched to read-only mode.\n",
mdname(mddev));
err = 0;
+ md_new_event(mddev);
out:
return err;
}
@@ -2668,12 +3159,6 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (info->state & (1<<MD_DISK_WRITEMOSTLY))
set_bit(WriteMostly, &rdev->flags);
- err = bind_rdev_to_array(rdev, mddev);
- if (err) {
- export_rdev(rdev);
- return err;
- }
-
if (!mddev->persistent) {
printk(KERN_INFO "md: nonpersistent superblock ...\n");
rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
@@ -2681,8 +3166,11 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
rdev->size = calc_dev_size(rdev, mddev->chunk_size);
- if (!mddev->size || (mddev->size > rdev->size))
- mddev->size = rdev->size;
+ err = bind_rdev_to_array(rdev, mddev);
+ if (err) {
+ export_rdev(rdev);
+ return err;
+ }
}
return 0;
@@ -2705,6 +3193,7 @@ static int hot_remove_disk(mddev_t * mddev, dev_t dev)
kick_rdev_from_array(rdev);
md_update_sb(mddev);
+ md_new_event(mddev);
return 0;
busy:
@@ -2753,15 +3242,6 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
size = calc_dev_size(rdev, mddev->chunk_size);
rdev->size = size;
- if (size < mddev->size) {
- printk(KERN_WARNING
- "%s: disk size %llu blocks < array size %llu\n",
- mdname(mddev), (unsigned long long)size,
- (unsigned long long)mddev->size);
- err = -ENOSPC;
- goto abort_export;
- }
-
if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
"md: can not hot-add faulty %s disk to %s!\n",
@@ -2771,7 +3251,9 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
}
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
- bind_rdev_to_array(rdev, mddev);
+ err = bind_rdev_to_array(rdev, mddev);
+ if (err)
+ goto abort_export;
/*
* The rest should better be atomic, we can have disk failures
@@ -2795,7 +3277,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
-
+ md_new_event(mddev);
return 0;
abort_unbind_export:
@@ -2942,6 +3424,81 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
return 0;
}
+static int update_size(mddev_t *mddev, unsigned long size)
+{
+ mdk_rdev_t * rdev;
+ int rv;
+ struct list_head *tmp;
+
+ if (mddev->pers->resize == NULL)
+ return -EINVAL;
+ /* The "size" is the amount of each device that is used.
+ * This can only make sense for arrays with redundancy.
+ * linear and raid0 always use whatever space is available
+ * We can only consider changing the size if no resync
+ * or reconstruction is happening, and if the new size
+ * is acceptable. It must fit before the sb_offset or,
+ * if that is <data_offset, it must fit before the
+ * size of each device.
+ * If size is zero, we find the largest size that fits.
+ */
+ if (mddev->sync_thread)
+ return -EBUSY;
+ ITERATE_RDEV(mddev,rdev,tmp) {
+ sector_t avail;
+ int fit = (size == 0);
+ if (rdev->sb_offset > rdev->data_offset)
+ avail = (rdev->sb_offset*2) - rdev->data_offset;
+ else
+ avail = get_capacity(rdev->bdev->bd_disk)
+ - rdev->data_offset;
+ if (fit && (size == 0 || size > avail/2))
+ size = avail/2;
+ if (avail < ((sector_t)size << 1))
+ return -ENOSPC;
+ }
+ rv = mddev->pers->resize(mddev, (sector_t)size *2);
+ if (!rv) {
+ struct block_device *bdev;
+
+ bdev = bdget_disk(mddev->gendisk, 0);
+ if (bdev) {
+ mutex_lock(&bdev->bd_inode->i_mutex);
+ i_size_write(bdev->bd_inode, mddev->array_size << 10);
+ mutex_unlock(&bdev->bd_inode->i_mutex);
+ bdput(bdev);
+ }
+ }
+ return rv;
+}
+
+static int update_raid_disks(mddev_t *mddev, int raid_disks)
+{
+ int rv;
+ /* change the number of raid disks */
+ if (mddev->pers->reshape == NULL)
+ return -EINVAL;
+ if (raid_disks <= 0 ||
+ raid_disks >= mddev->max_disks)
+ return -EINVAL;
+ if (mddev->sync_thread)
+ return -EBUSY;
+ rv = mddev->pers->reshape(mddev, raid_disks);
+ if (!rv) {
+ struct block_device *bdev;
+
+ bdev = bdget_disk(mddev->gendisk, 0);
+ if (bdev) {
+ mutex_lock(&bdev->bd_inode->i_mutex);
+ i_size_write(bdev->bd_inode, mddev->array_size << 10);
+ mutex_unlock(&bdev->bd_inode->i_mutex);
+ bdput(bdev);
+ }
+ }
+ return rv;
+}
+
+
/*
* update_array_info is used to change the configuration of an
* on-line array.
@@ -2990,71 +3547,12 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
else
return mddev->pers->reconfig(mddev, info->layout, -1);
}
- if (mddev->size != info->size) {
- mdk_rdev_t * rdev;
- struct list_head *tmp;
- if (mddev->pers->resize == NULL)
- return -EINVAL;
- /* The "size" is the amount of each device that is used.
- * This can only make sense for arrays with redundancy.
- * linear and raid0 always use whatever space is available
- * We can only consider changing the size if no resync
- * or reconstruction is happening, and if the new size
- * is acceptable. It must fit before the sb_offset or,
- * if that is <data_offset, it must fit before the
- * size of each device.
- * If size is zero, we find the largest size that fits.
- */
- if (mddev->sync_thread)
- return -EBUSY;
- ITERATE_RDEV(mddev,rdev,tmp) {
- sector_t avail;
- int fit = (info->size == 0);
- if (rdev->sb_offset > rdev->data_offset)
- avail = (rdev->sb_offset*2) - rdev->data_offset;
- else
- avail = get_capacity(rdev->bdev->bd_disk)
- - rdev->data_offset;
- if (fit && (info->size == 0 || info->size > avail/2))
- info->size = avail/2;
- if (avail < ((sector_t)info->size << 1))
- return -ENOSPC;
- }
- rv = mddev->pers->resize(mddev, (sector_t)info->size *2);
- if (!rv) {
- struct block_device *bdev;
-
- bdev = bdget_disk(mddev->gendisk, 0);
- if (bdev) {
- down(&bdev->bd_inode->i_sem);
- i_size_write(bdev->bd_inode, mddev->array_size << 10);
- up(&bdev->bd_inode->i_sem);
- bdput(bdev);
- }
- }
- }
- if (mddev->raid_disks != info->raid_disks) {
- /* change the number of raid disks */
- if (mddev->pers->reshape == NULL)
- return -EINVAL;
- if (info->raid_disks <= 0 ||
- info->raid_disks >= mddev->max_disks)
- return -EINVAL;
- if (mddev->sync_thread)
- return -EBUSY;
- rv = mddev->pers->reshape(mddev, info->raid_disks);
- if (!rv) {
- struct block_device *bdev;
-
- bdev = bdget_disk(mddev->gendisk, 0);
- if (bdev) {
- down(&bdev->bd_inode->i_sem);
- i_size_write(bdev->bd_inode, mddev->array_size << 10);
- up(&bdev->bd_inode->i_sem);
- bdput(bdev);
- }
- }
- }
+ if (mddev->size != info->size)
+ rv = update_size(mddev, info->size);
+
+ if (mddev->raid_disks != info->raid_disks)
+ rv = update_raid_disks(mddev, info->raid_disks);
+
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
if (mddev->pers->quiesce == NULL)
return -EINVAL;
@@ -3103,12 +3601,21 @@ static int set_disk_faulty(mddev_t *mddev, dev_t dev)
return 0;
}
+static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ mddev_t *mddev = bdev->bd_disk->private_data;
+
+ geo->heads = 2;
+ geo->sectors = 4;
+ geo->cylinders = get_capacity(mddev->gendisk) / 8;
+ return 0;
+}
+
static int md_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
int err = 0;
void __user *argp = (void __user *)arg;
- struct hd_geometry __user *loc = argp;
mddev_t *mddev = NULL;
if (!capable(CAP_SYS_ADMIN))
@@ -3270,24 +3777,6 @@ static int md_ioctl(struct inode *inode, struct file *file,
* 4 sectors (with a BIG number of cylinders...). This drives
* dosfs just mad... ;-)
*/
- case HDIO_GETGEO:
- if (!loc) {
- err = -EINVAL;
- goto abort_unlock;
- }
- err = put_user (2, (char __user *) &loc->heads);
- if (err)
- goto abort_unlock;
- err = put_user (4, (char __user *) &loc->sectors);
- if (err)
- goto abort_unlock;
- err = put_user(get_capacity(mddev->gendisk)/8,
- (short __user *) &loc->cylinders);
- if (err)
- goto abort_unlock;
- err = put_user (get_start_sect(inode->i_bdev),
- (long __user *) &loc->start);
- goto done_unlock;
}
/*
@@ -3416,6 +3905,7 @@ static struct block_device_operations md_fops =
.open = md_open,
.release = md_release,
.ioctl = md_ioctl,
+ .getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
};
@@ -3476,11 +3966,10 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
{
mdk_thread_t *thread;
- thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL);
+ thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
if (!thread)
return NULL;
- memset(thread, 0, sizeof(mdk_thread_t));
init_waitqueue_head(&thread->wqueue);
thread->run = run;
@@ -3524,6 +4013,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ md_new_event(mddev);
}
/* seq_file implementation /proc/mdstat */
@@ -3664,24 +4154,29 @@ static void md_seq_stop(struct seq_file *seq, void *v)
mddev_put(mddev);
}
+struct mdstat_info {
+ int event;
+};
+
static int md_seq_show(struct seq_file *seq, void *v)
{
mddev_t *mddev = v;
sector_t size;
struct list_head *tmp2;
mdk_rdev_t *rdev;
- int i;
+ struct mdstat_info *mi = seq->private;
struct bitmap *bitmap;
if (v == (void*)1) {
+ struct mdk_personality *pers;
seq_printf(seq, "Personalities : ");
spin_lock(&pers_lock);
- for (i = 0; i < MAX_PERSONALITY; i++)
- if (pers[i])
- seq_printf(seq, "[%s] ", pers[i]->name);
+ list_for_each_entry(pers, &pers_list, list)
+ seq_printf(seq, "[%s] ", pers->name);
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
+ mi->event = atomic_read(&md_event_count);
return 0;
}
if (v == (void*)2) {
@@ -3790,47 +4285,68 @@ static struct seq_operations md_seq_ops = {
static int md_seq_open(struct inode *inode, struct file *file)
{
int error;
+ struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (mi == NULL)
+ return -ENOMEM;
error = seq_open(file, &md_seq_ops);
+ if (error)
+ kfree(mi);
+ else {
+ struct seq_file *p = file->private_data;
+ p->private = mi;
+ mi->event = atomic_read(&md_event_count);
+ }
return error;
}
+static int md_seq_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct mdstat_info *mi = m->private;
+ m->private = NULL;
+ kfree(mi);
+ return seq_release(inode, file);
+}
+
+static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+{
+ struct seq_file *m = filp->private_data;
+ struct mdstat_info *mi = m->private;
+ int mask;
+
+ poll_wait(filp, &md_event_waiters, wait);
+
+ /* always allow read */
+ mask = POLLIN | POLLRDNORM;
+
+ if (mi->event != atomic_read(&md_event_count))
+ mask |= POLLERR | POLLPRI;
+ return mask;
+}
+
static struct file_operations md_seq_fops = {
.open = md_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = md_seq_release,
+ .poll = mdstat_poll,
};
-int register_md_personality(int pnum, mdk_personality_t *p)
+int register_md_personality(struct mdk_personality *p)
{
- if (pnum >= MAX_PERSONALITY) {
- printk(KERN_ERR
- "md: tried to install personality %s as nr %d, but max is %lu\n",
- p->name, pnum, MAX_PERSONALITY-1);
- return -EINVAL;
- }
-
spin_lock(&pers_lock);
- if (pers[pnum]) {
- spin_unlock(&pers_lock);
- return -EBUSY;
- }
-
- pers[pnum] = p;
- printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
+ list_add_tail(&p->list, &pers_list);
+ printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
spin_unlock(&pers_lock);
return 0;
}
-int unregister_md_personality(int pnum)
+int unregister_md_personality(struct mdk_personality *p)
{
- if (pnum >= MAX_PERSONALITY)
- return -EINVAL;
-
- printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
+ printk(KERN_INFO "md: %s personality unregistered\n", p->name);
spin_lock(&pers_lock);
- pers[pnum] = NULL;
+ list_del_init(&p->list);
spin_unlock(&pers_lock);
return 0;
}
@@ -4012,10 +4528,10 @@ static void md_do_sync(mddev_t *mddev)
printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
- " %d KB/sec/disc.\n", sysctl_speed_limit_min);
+ " %d KB/sec/disc.\n", speed_min(mddev));
printk(KERN_INFO "md: using maximum available idle IO bandwidth "
"(but not more than %d KB/sec) for reconstruction.\n",
- sysctl_speed_limit_max);
+ speed_max(mddev));
is_mddev_idle(mddev); /* this also initializes IO event counters */
/* we don't use the checkpoint if there's a bitmap */
@@ -4056,7 +4572,7 @@ static void md_do_sync(mddev_t *mddev)
skipped = 0;
sectors = mddev->pers->sync_request(mddev, j, &skipped,
- currspeed < sysctl_speed_limit_min);
+ currspeed < speed_min(mddev));
if (sectors == 0) {
set_bit(MD_RECOVERY_ERR, &mddev->recovery);
goto out;
@@ -4069,7 +4585,11 @@ static void md_do_sync(mddev_t *mddev)
j += sectors;
if (j>1) mddev->curr_resync = j;
-
+ if (last_check == 0)
+ /* this is the earliers that rebuilt will be
+ * visible in /proc/mdstat
+ */
+ md_new_event(mddev);
if (last_check + window > io_sectors || j == max_sectors)
continue;
@@ -4117,8 +4637,8 @@ static void md_do_sync(mddev_t *mddev)
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
/((jiffies-mddev->resync_mark)/HZ +1) +1;
- if (currspeed > sysctl_speed_limit_min) {
- if ((currspeed > sysctl_speed_limit_max) ||
+ if (currspeed > speed_min(mddev)) {
+ if ((currspeed > speed_max(mddev)) ||
!is_mddev_idle(mddev)) {
msleep(500);
goto repeat;
@@ -4255,6 +4775,7 @@ void md_check_recovery(mddev_t *mddev)
mddev->recovery = 0;
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_new_event(mddev);
goto unlock;
}
/* Clear some bits that don't mean anything, but
@@ -4292,6 +4813,7 @@ void md_check_recovery(mddev_t *mddev)
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
spares++;
+ md_new_event(mddev);
} else
break;
}
@@ -4324,9 +4846,9 @@ void md_check_recovery(mddev_t *mddev)
mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
mddev->recovery = 0;
- } else {
+ } else
md_wakeup_thread(mddev->sync_thread);
- }
+ md_new_event(mddev);
}
unlock:
mddev_unlock(mddev);
@@ -4503,12 +5025,14 @@ static int set_ro(const char *val, struct kernel_param *kp)
int num = simple_strtoul(val, &e, 10);
if (*val && (*e == '\0' || *e == '\n')) {
start_readonly = num;
- return 0;;
+ return 0;
}
return -EINVAL;
}
module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
+module_param(start_dirty_degraded, int, 0644);
+
EXPORT_SYMBOL(register_md_personality);
EXPORT_SYMBOL(unregister_md_personality);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 145cdc5ad00..96f7af4ae40 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -35,15 +35,10 @@
#define NR_RESERVED_BUFS 32
-static mdk_personality_t multipath_personality;
-
-
static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
{
struct multipath_bh *mpb;
- mpb = kmalloc(sizeof(*mpb), gfp_flags);
- if (mpb)
- memset(mpb, 0, sizeof(*mpb));
+ mpb = kzalloc(sizeof(*mpb), gfp_flags);
return mpb;
}
@@ -308,6 +303,7 @@ static void print_multipath_conf (multipath_conf_t *conf)
static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
{
multipath_conf_t *conf = mddev->private;
+ struct request_queue *q;
int found = 0;
int path;
struct multipath_info *p;
@@ -316,8 +312,8 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
for (path=0; path<mddev->raid_disks; path++)
if ((p=conf->multipaths+path)->rdev == NULL) {
- blk_queue_stack_limits(mddev->queue,
- rdev->bdev->bd_disk->queue);
+ q = rdev->bdev->bd_disk->queue;
+ blk_queue_stack_limits(mddev->queue, q);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_sector to one PAGE, as
@@ -325,7 +321,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* (Note: it is very unlikely that a device with
* merge_bvec_fn will be involved in multipath.)
*/
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
+ if (q->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
@@ -444,7 +440,7 @@ static int multipath_run (mddev_t *mddev)
* should be freed in multipath_stop()]
*/
- conf = kmalloc(sizeof(multipath_conf_t), GFP_KERNEL);
+ conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf;
if (!conf) {
printk(KERN_ERR
@@ -452,9 +448,8 @@ static int multipath_run (mddev_t *mddev)
mdname(mddev));
goto out;
}
- memset(conf, 0, sizeof(*conf));
- conf->multipaths = kmalloc(sizeof(struct multipath_info)*mddev->raid_disks,
+ conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
GFP_KERNEL);
if (!conf->multipaths) {
printk(KERN_ERR
@@ -462,7 +457,6 @@ static int multipath_run (mddev_t *mddev)
mdname(mddev));
goto out_free_conf;
}
- memset(conf->multipaths, 0, sizeof(struct multipath_info)*mddev->raid_disks);
conf->working_disks = 0;
ITERATE_RDEV(mddev,rdev,tmp) {
@@ -557,9 +551,10 @@ static int multipath_stop (mddev_t *mddev)
return 0;
}
-static mdk_personality_t multipath_personality=
+static struct mdk_personality multipath_personality =
{
.name = "multipath",
+ .level = LEVEL_MULTIPATH,
.owner = THIS_MODULE,
.make_request = multipath_make_request,
.run = multipath_run,
@@ -572,15 +567,17 @@ static mdk_personality_t multipath_personality=
static int __init multipath_init (void)
{
- return register_md_personality (MULTIPATH, &multipath_personality);
+ return register_md_personality (&multipath_personality);
}
static void __exit multipath_exit (void)
{
- unregister_md_personality (MULTIPATH);
+ unregister_md_personality (&multipath_personality);
}
module_init(multipath_init);
module_exit(multipath_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
+MODULE_ALIAS("md-multipath");
+MODULE_ALIAS("md-level--4");
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index fece3277c2a..d03f99cf4b7 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -113,21 +113,16 @@ static int create_strip_zones (mddev_t *mddev)
}
printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
- conf->strip_zone = kmalloc(sizeof(struct strip_zone)*
+ conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
conf->nr_strip_zones, GFP_KERNEL);
if (!conf->strip_zone)
return 1;
- conf->devlist = kmalloc(sizeof(mdk_rdev_t*)*
+ conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
conf->nr_strip_zones*mddev->raid_disks,
GFP_KERNEL);
if (!conf->devlist)
return 1;
- memset(conf->strip_zone, 0,sizeof(struct strip_zone)*
- conf->nr_strip_zones);
- memset(conf->devlist, 0,
- sizeof(mdk_rdev_t*) * conf->nr_strip_zones * mddev->raid_disks);
-
/* The first zone must contain all devices, so here we check that
* there is a proper alignment of slots to devices and find them all
*/
@@ -280,7 +275,11 @@ static int raid0_run (mddev_t *mddev)
mdk_rdev_t *rdev;
struct list_head *tmp;
- printk("%s: setting max_sectors to %d, segment boundary to %d\n",
+ if (mddev->chunk_size == 0) {
+ printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
+ return -EINVAL;
+ }
+ printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
mdname(mddev),
mddev->chunk_size >> 9,
(mddev->chunk_size>>1)-1);
@@ -307,9 +306,6 @@ static int raid0_run (mddev_t *mddev)
printk("raid0 : conf->hash_spacing is %llu blocks.\n",
(unsigned long long)conf->hash_spacing);
{
-#if __GNUC__ < 3
- volatile
-#endif
sector_t s = mddev->array_size;
sector_t space = conf->hash_spacing;
int round;
@@ -361,7 +357,7 @@ static int raid0_run (mddev_t *mddev)
* chunksize should be used in that case.
*/
{
- int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE;
+ int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
}
@@ -440,9 +436,6 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
{
-#if __GNUC__ < 3
- volatile
-#endif
sector_t x = block >> conf->preshift;
sector_div(x, (u32)conf->hash_spacing);
zone = conf->hash_table[x];
@@ -512,9 +505,10 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev)
return;
}
-static mdk_personality_t raid0_personality=
+static struct mdk_personality raid0_personality=
{
.name = "raid0",
+ .level = 0,
.owner = THIS_MODULE,
.make_request = raid0_make_request,
.run = raid0_run,
@@ -524,15 +518,17 @@ static mdk_personality_t raid0_personality=
static int __init raid0_init (void)
{
- return register_md_personality (RAID0, &raid0_personality);
+ return register_md_personality (&raid0_personality);
}
static void raid0_exit (void)
{
- unregister_md_personality (RAID0);
+ unregister_md_personality (&raid0_personality);
}
module_init(raid0_init);
module_exit(raid0_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md-personality-2"); /* RAID0 */
+MODULE_ALIAS("md-raid0");
+MODULE_ALIAS("md-level-0");
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 229d7b20429..a06ff91f27e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -47,10 +47,11 @@
*/
#define NR_RAID1_BIOS 256
-static mdk_personality_t raid1_personality;
static void unplug_slaves(mddev_t *mddev);
+static void allow_barrier(conf_t *conf);
+static void lower_barrier(conf_t *conf);
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{
@@ -59,10 +60,8 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
int size = offsetof(r1bio_t, bios[pi->raid_disks]);
/* allocate a r1bio with room for raid_disks entries in the bios array */
- r1_bio = kmalloc(size, gfp_flags);
- if (r1_bio)
- memset(r1_bio, 0, size);
- else
+ r1_bio = kzalloc(size, gfp_flags);
+ if (!r1_bio)
unplug_slaves(pi->mddev);
return r1_bio;
@@ -104,15 +103,30 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
}
/*
* Allocate RESYNC_PAGES data pages and attach them to
- * the first bio;
+ * the first bio.
+ * If this is a user-requested check/repair, allocate
+ * RESYNC_PAGES for each bio.
*/
- bio = r1_bio->bios[0];
- for (i = 0; i < RESYNC_PAGES; i++) {
- page = alloc_page(gfp_flags);
- if (unlikely(!page))
- goto out_free_pages;
-
- bio->bi_io_vec[i].bv_page = page;
+ if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
+ j = pi->raid_disks;
+ else
+ j = 1;
+ while(j--) {
+ bio = r1_bio->bios[j];
+ for (i = 0; i < RESYNC_PAGES; i++) {
+ page = alloc_page(gfp_flags);
+ if (unlikely(!page))
+ goto out_free_pages;
+
+ bio->bi_io_vec[i].bv_page = page;
+ }
+ }
+ /* If not user-requests, copy the page pointers to all bios */
+ if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
+ for (i=0; i<RESYNC_PAGES ; i++)
+ for (j=1; j<pi->raid_disks; j++)
+ r1_bio->bios[j]->bi_io_vec[i].bv_page =
+ r1_bio->bios[0]->bi_io_vec[i].bv_page;
}
r1_bio->master_bio = NULL;
@@ -120,8 +134,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
return r1_bio;
out_free_pages:
- for ( ; i > 0 ; i--)
- __free_page(bio->bi_io_vec[i-1].bv_page);
+ for (i=0; i < RESYNC_PAGES ; i++)
+ for (j=0 ; j < pi->raid_disks; j++)
+ safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
+ j = -1;
out_free_bio:
while ( ++j < pi->raid_disks )
bio_put(r1_bio->bios[j]);
@@ -132,14 +148,16 @@ out_free_bio:
static void r1buf_pool_free(void *__r1_bio, void *data)
{
struct pool_info *pi = data;
- int i;
+ int i,j;
r1bio_t *r1bio = __r1_bio;
- struct bio *bio = r1bio->bios[0];
- for (i = 0; i < RESYNC_PAGES; i++) {
- __free_page(bio->bi_io_vec[i].bv_page);
- bio->bi_io_vec[i].bv_page = NULL;
- }
+ for (i = 0; i < RESYNC_PAGES; i++)
+ for (j = pi->raid_disks; j-- ;) {
+ if (j == 0 ||
+ r1bio->bios[j]->bi_io_vec[i].bv_page !=
+ r1bio->bios[0]->bi_io_vec[i].bv_page)
+ safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
+ }
for (i=0 ; i < pi->raid_disks; i++)
bio_put(r1bio->bios[i]);
@@ -152,7 +170,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
for (i = 0; i < conf->raid_disks; i++) {
struct bio **bio = r1_bio->bios + i;
- if (*bio)
+ if (*bio && *bio != IO_BLOCKED)
bio_put(*bio);
*bio = NULL;
}
@@ -160,20 +178,13 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
static inline void free_r1bio(r1bio_t *r1_bio)
{
- unsigned long flags;
-
conf_t *conf = mddev_to_conf(r1_bio->mddev);
/*
* Wake up any possible resync thread that waits for the device
* to go idle.
*/
- spin_lock_irqsave(&conf->resync_lock, flags);
- if (!--conf->nr_pending) {
- wake_up(&conf->wait_idle);
- wake_up(&conf->wait_resume);
- }
- spin_unlock_irqrestore(&conf->resync_lock, flags);
+ allow_barrier(conf);
put_all_bios(conf, r1_bio);
mempool_free(r1_bio, conf->r1bio_pool);
@@ -182,22 +193,17 @@ static inline void free_r1bio(r1bio_t *r1_bio)
static inline void put_buf(r1bio_t *r1_bio)
{
conf_t *conf = mddev_to_conf(r1_bio->mddev);
- unsigned long flags;
+ int i;
- mempool_free(r1_bio, conf->r1buf_pool);
+ for (i=0; i<conf->raid_disks; i++) {
+ struct bio *bio = r1_bio->bios[i];
+ if (bio->bi_end_io)
+ rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
+ }
- spin_lock_irqsave(&conf->resync_lock, flags);
- if (!conf->barrier)
- BUG();
- --conf->barrier;
- wake_up(&conf->wait_resume);
- wake_up(&conf->wait_idle);
+ mempool_free(r1_bio, conf->r1buf_pool);
- if (!--conf->nr_pending) {
- wake_up(&conf->wait_idle);
- wake_up(&conf->wait_resume);
- }
- spin_unlock_irqrestore(&conf->resync_lock, flags);
+ lower_barrier(conf);
}
static void reschedule_retry(r1bio_t *r1_bio)
@@ -208,8 +214,10 @@ static void reschedule_retry(r1bio_t *r1_bio)
spin_lock_irqsave(&conf->device_lock, flags);
list_add(&r1_bio->retry_list, &conf->retry_list);
+ conf->nr_queued ++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ wake_up(&conf->wait_barrier);
md_wakeup_thread(mddev->thread);
}
@@ -261,9 +269,9 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- if (!uptodate)
- md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
- else
+ update_head_pos(mirror, r1_bio);
+
+ if (uptodate || conf->working_disks <= 1) {
/*
* Set R1BIO_Uptodate in our master bio, so that
* we will return a good error code for to the higher
@@ -273,16 +281,11 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
* user-side. So if something waits for IO, then it will
* wait for the 'master' bio.
*/
- set_bit(R1BIO_Uptodate, &r1_bio->state);
-
- update_head_pos(mirror, r1_bio);
+ if (uptodate)
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
- /*
- * we have only one bio on the read side
- */
- if (uptodate)
raid_end_bio_io(r1_bio);
- else {
+ } else {
/*
* oops, read error:
*/
@@ -378,7 +381,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
/* free extra copy of the data pages */
int i = bio->bi_vcnt;
while (i--)
- __free_page(bio->bi_io_vec[i].bv_page);
+ safe_put_page(bio->bi_io_vec[i].bv_page);
}
/* clear the bitmap if all writes complete successfully */
bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
@@ -433,11 +436,13 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
new_disk = 0;
for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
+ r1_bio->bios[new_disk] == IO_BLOCKED ||
!rdev || !test_bit(In_sync, &rdev->flags)
|| test_bit(WriteMostly, &rdev->flags);
rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
- if (rdev && test_bit(In_sync, &rdev->flags))
+ if (rdev && test_bit(In_sync, &rdev->flags) &&
+ r1_bio->bios[new_disk] != IO_BLOCKED)
wonly_disk = new_disk;
if (new_disk == conf->raid_disks - 1) {
@@ -451,11 +456,13 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
/* make sure the disk is operational */
for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
+ r1_bio->bios[new_disk] == IO_BLOCKED ||
!rdev || !test_bit(In_sync, &rdev->flags) ||
test_bit(WriteMostly, &rdev->flags);
rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
- if (rdev && test_bit(In_sync, &rdev->flags))
+ if (rdev && test_bit(In_sync, &rdev->flags) &&
+ r1_bio->bios[new_disk] != IO_BLOCKED)
wonly_disk = new_disk;
if (new_disk <= 0)
@@ -492,7 +499,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
rdev = rcu_dereference(conf->mirrors[disk].rdev);
- if (!rdev ||
+ if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
!test_bit(In_sync, &rdev->flags) ||
test_bit(WriteMostly, &rdev->flags))
continue;
@@ -520,7 +527,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
/* cannot risk returning a device that failed
* before we inc'ed nr_pending
*/
- atomic_dec(&rdev->nr_pending);
+ rdev_dec_pending(rdev, conf->mddev);
goto retry;
}
conf->next_seq_sect = this_sector + sectors;
@@ -593,42 +600,119 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
return ret;
}
-/*
- * Throttle resync depth, so that we can both get proper overlapping of
- * requests, but are still able to handle normal requests quickly.
+/* Barriers....
+ * Sometimes we need to suspend IO while we do something else,
+ * either some resync/recovery, or reconfigure the array.
+ * To do this we raise a 'barrier'.
+ * The 'barrier' is a counter that can be raised multiple times
+ * to count how many activities are happening which preclude
+ * normal IO.
+ * We can only raise the barrier if there is no pending IO.
+ * i.e. if nr_pending == 0.
+ * We choose only to raise the barrier if no-one is waiting for the
+ * barrier to go down. This means that as soon as an IO request
+ * is ready, no other operations which require a barrier will start
+ * until the IO request has had a chance.
+ *
+ * So: regular IO calls 'wait_barrier'. When that returns there
+ * is no backgroup IO happening, It must arrange to call
+ * allow_barrier when it has finished its IO.
+ * backgroup IO calls must call raise_barrier. Once that returns
+ * there is no normal IO happeing. It must arrange to call
+ * lower_barrier when the particular background IO completes.
*/
#define RESYNC_DEPTH 32
-static void device_barrier(conf_t *conf, sector_t sect)
+static void raise_barrier(conf_t *conf)
{
spin_lock_irq(&conf->resync_lock);
- wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume),
- conf->resync_lock, raid1_unplug(conf->mddev->queue));
-
- if (!conf->barrier++) {
- wait_event_lock_irq(conf->wait_idle, !conf->nr_pending,
- conf->resync_lock, raid1_unplug(conf->mddev->queue));
- if (conf->nr_pending)
- BUG();
+
+ /* Wait until no block IO is waiting */
+ wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
+ conf->resync_lock,
+ raid1_unplug(conf->mddev->queue));
+
+ /* block any new IO from starting */
+ conf->barrier++;
+
+ /* No wait for all pending IO to complete */
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
+ conf->resync_lock,
+ raid1_unplug(conf->mddev->queue));
+
+ spin_unlock_irq(&conf->resync_lock);
+}
+
+static void lower_barrier(conf_t *conf)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&conf->resync_lock, flags);
+ conf->barrier--;
+ spin_unlock_irqrestore(&conf->resync_lock, flags);
+ wake_up(&conf->wait_barrier);
+}
+
+static void wait_barrier(conf_t *conf)
+{
+ spin_lock_irq(&conf->resync_lock);
+ if (conf->barrier) {
+ conf->nr_waiting++;
+ wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
+ conf->resync_lock,
+ raid1_unplug(conf->mddev->queue));
+ conf->nr_waiting--;
}
- wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, raid1_unplug(conf->mddev->queue));
- conf->next_resync = sect;
+ conf->nr_pending++;
+ spin_unlock_irq(&conf->resync_lock);
+}
+
+static void allow_barrier(conf_t *conf)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&conf->resync_lock, flags);
+ conf->nr_pending--;
+ spin_unlock_irqrestore(&conf->resync_lock, flags);
+ wake_up(&conf->wait_barrier);
+}
+
+static void freeze_array(conf_t *conf)
+{
+ /* stop syncio and normal IO and wait for everything to
+ * go quite.
+ * We increment barrier and nr_waiting, and then
+ * wait until barrier+nr_pending match nr_queued+2
+ */
+ spin_lock_irq(&conf->resync_lock);
+ conf->barrier++;
+ conf->nr_waiting++;
+ wait_event_lock_irq(conf->wait_barrier,
+ conf->barrier+conf->nr_pending == conf->nr_queued+2,
+ conf->resync_lock,
+ raid1_unplug(conf->mddev->queue));
+ spin_unlock_irq(&conf->resync_lock);
+}
+static void unfreeze_array(conf_t *conf)
+{
+ /* reverse the effect of the freeze */
+ spin_lock_irq(&conf->resync_lock);
+ conf->barrier--;
+ conf->nr_waiting--;
+ wake_up(&conf->wait_barrier);
spin_unlock_irq(&conf->resync_lock);
}
+
/* duplicate the data pages for behind I/O */
static struct page **alloc_behind_pages(struct bio *bio)
{
int i;
struct bio_vec *bvec;
- struct page **pages = kmalloc(bio->bi_vcnt * sizeof(struct page *),
+ struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
GFP_NOIO);
if (unlikely(!pages))
goto do_sync_io;
- memset(pages, 0, bio->bi_vcnt * sizeof(struct page *));
-
bio_for_each_segment(bvec, bio, i) {
pages[i] = alloc_page(GFP_NOIO);
if (unlikely(!pages[i]))
@@ -644,7 +728,7 @@ static struct page **alloc_behind_pages(struct bio *bio)
do_sync_io:
if (pages)
for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
- __free_page(pages[i]);
+ put_page(pages[i]);
kfree(pages);
PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
return NULL;
@@ -678,10 +762,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
*/
md_write_start(mddev, bio); /* wait on superblock update early */
- spin_lock_irq(&conf->resync_lock);
- wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, );
- conf->nr_pending++;
- spin_unlock_irq(&conf->resync_lock);
+ wait_barrier(conf);
disk_stat_inc(mddev->gendisk, ios[rw]);
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
@@ -749,7 +830,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
!test_bit(Faulty, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
if (test_bit(Faulty, &rdev->flags)) {
- atomic_dec(&rdev->nr_pending);
+ rdev_dec_pending(rdev, mddev);
r1_bio->bios[i] = NULL;
} else
r1_bio->bios[i] = bio;
@@ -909,13 +990,8 @@ static void print_conf(conf_t *conf)
static void close_sync(conf_t *conf)
{
- spin_lock_irq(&conf->resync_lock);
- wait_event_lock_irq(conf->wait_resume, !conf->barrier,
- conf->resync_lock, raid1_unplug(conf->mddev->queue));
- spin_unlock_irq(&conf->resync_lock);
-
- if (conf->barrier) BUG();
- if (waitqueue_active(&conf->wait_idle)) BUG();
+ wait_barrier(conf);
+ allow_barrier(conf);
mempool_destroy(conf->r1buf_pool);
conf->r1buf_pool = NULL;
@@ -1015,28 +1091,27 @@ abort:
static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
- conf_t *conf = mddev_to_conf(r1_bio->mddev);
+ int i;
if (bio->bi_size)
return 1;
- if (r1_bio->bios[r1_bio->read_disk] != bio)
- BUG();
- update_head_pos(r1_bio->read_disk, r1_bio);
+ for (i=r1_bio->mddev->raid_disks; i--; )
+ if (r1_bio->bios[i] == bio)
+ break;
+ BUG_ON(i < 0);
+ update_head_pos(i, r1_bio);
/*
* we have read a block, now it needs to be re-written,
* or re-read if the read failed.
* We don't do much here, just schedule handling by raid1d
*/
- if (!uptodate) {
- md_error(r1_bio->mddev,
- conf->mirrors[r1_bio->read_disk].rdev);
- } else
+ if (test_bit(BIO_UPTODATE, &bio->bi_flags))
set_bit(R1BIO_Uptodate, &r1_bio->state);
- rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
- reschedule_retry(r1_bio);
+
+ if (atomic_dec_and_test(&r1_bio->remaining))
+ reschedule_retry(r1_bio);
return 0;
}
@@ -1066,7 +1141,6 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
md_done_sync(mddev, r1_bio->sectors, uptodate);
put_buf(r1_bio);
}
- rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
return 0;
}
@@ -1079,34 +1153,173 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
bio = r1_bio->bios[r1_bio->read_disk];
-/*
- if (r1_bio->sector == 0) printk("First sync write startss\n");
-*/
- /*
- * schedule writes
- */
+
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ /* We have read all readable devices. If we haven't
+ * got the block, then there is no hope left.
+ * If we have, then we want to do a comparison
+ * and skip the write if everything is the same.
+ * If any blocks failed to read, then we need to
+ * attempt an over-write
+ */
+ int primary;
+ if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
+ for (i=0; i<mddev->raid_disks; i++)
+ if (r1_bio->bios[i]->bi_end_io == end_sync_read)
+ md_error(mddev, conf->mirrors[i].rdev);
+
+ md_done_sync(mddev, r1_bio->sectors, 1);
+ put_buf(r1_bio);
+ return;
+ }
+ for (primary=0; primary<mddev->raid_disks; primary++)
+ if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
+ test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
+ r1_bio->bios[primary]->bi_end_io = NULL;
+ rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
+ break;
+ }
+ r1_bio->read_disk = primary;
+ for (i=0; i<mddev->raid_disks; i++)
+ if (r1_bio->bios[i]->bi_end_io == end_sync_read &&
+ test_bit(BIO_UPTODATE, &r1_bio->bios[i]->bi_flags)) {
+ int j;
+ int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
+ struct bio *pbio = r1_bio->bios[primary];
+ struct bio *sbio = r1_bio->bios[i];
+ for (j = vcnt; j-- ; )
+ if (memcmp(page_address(pbio->bi_io_vec[j].bv_page),
+ page_address(sbio->bi_io_vec[j].bv_page),
+ PAGE_SIZE))
+ break;
+ if (j >= 0)
+ mddev->resync_mismatches += r1_bio->sectors;
+ if (j < 0 || test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
+ sbio->bi_end_io = NULL;
+ rdev_dec_pending(conf->mirrors[i].rdev, mddev);
+ } else {
+ /* fixup the bio for reuse */
+ sbio->bi_vcnt = vcnt;
+ sbio->bi_size = r1_bio->sectors << 9;
+ sbio->bi_idx = 0;
+ sbio->bi_phys_segments = 0;
+ sbio->bi_hw_segments = 0;
+ sbio->bi_hw_front_size = 0;
+ sbio->bi_hw_back_size = 0;
+ sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
+ sbio->bi_flags |= 1 << BIO_UPTODATE;
+ sbio->bi_next = NULL;
+ sbio->bi_sector = r1_bio->sector +
+ conf->mirrors[i].rdev->data_offset;
+ sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+ }
+ }
+ }
if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
- /*
- * There is no point trying a read-for-reconstruct as
- * reconstruct is about to be aborted
+ /* ouch - failed to read all of that.
+ * Try some synchronous reads of other devices to get
+ * good data, much like with normal read errors. Only
+ * read into the pages we already have so they we don't
+ * need to re-issue the read request.
+ * We don't need to freeze the array, because being in an
+ * active sync request, there is no normal IO, and
+ * no overlapping syncs.
*/
- char b[BDEVNAME_SIZE];
- printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
- " for block %llu\n",
- bdevname(bio->bi_bdev,b),
- (unsigned long long)r1_bio->sector);
- md_done_sync(mddev, r1_bio->sectors, 0);
- put_buf(r1_bio);
- return;
+ sector_t sect = r1_bio->sector;
+ int sectors = r1_bio->sectors;
+ int idx = 0;
+
+ while(sectors) {
+ int s = sectors;
+ int d = r1_bio->read_disk;
+ int success = 0;
+ mdk_rdev_t *rdev;
+
+ if (s > (PAGE_SIZE>>9))
+ s = PAGE_SIZE >> 9;
+ do {
+ if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
+ rdev = conf->mirrors[d].rdev;
+ if (sync_page_io(rdev->bdev,
+ sect + rdev->data_offset,
+ s<<9,
+ bio->bi_io_vec[idx].bv_page,
+ READ)) {
+ success = 1;
+ break;
+ }
+ }
+ d++;
+ if (d == conf->raid_disks)
+ d = 0;
+ } while (!success && d != r1_bio->read_disk);
+
+ if (success) {
+ int start = d;
+ /* write it back and re-read */
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+ while (d != r1_bio->read_disk) {
+ if (d == 0)
+ d = conf->raid_disks;
+ d--;
+ if (r1_bio->bios[d]->bi_end_io != end_sync_read)
+ continue;
+ rdev = conf->mirrors[d].rdev;
+ atomic_add(s, &rdev->corrected_errors);
+ if (sync_page_io(rdev->bdev,
+ sect + rdev->data_offset,
+ s<<9,
+ bio->bi_io_vec[idx].bv_page,
+ WRITE) == 0)
+ md_error(mddev, rdev);
+ }
+ d = start;
+ while (d != r1_bio->read_disk) {
+ if (d == 0)
+ d = conf->raid_disks;
+ d--;
+ if (r1_bio->bios[d]->bi_end_io != end_sync_read)
+ continue;
+ rdev = conf->mirrors[d].rdev;
+ if (sync_page_io(rdev->bdev,
+ sect + rdev->data_offset,
+ s<<9,
+ bio->bi_io_vec[idx].bv_page,
+ READ) == 0)
+ md_error(mddev, rdev);
+ }
+ } else {
+ char b[BDEVNAME_SIZE];
+ /* Cannot read from anywhere, array is toast */
+ md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
+ printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
+ " for block %llu\n",
+ bdevname(bio->bi_bdev,b),
+ (unsigned long long)r1_bio->sector);
+ md_done_sync(mddev, r1_bio->sectors, 0);
+ put_buf(r1_bio);
+ return;
+ }
+ sectors -= s;
+ sect += s;
+ idx ++;
+ }
}
+ /*
+ * schedule writes
+ */
atomic_set(&r1_bio->remaining, 1);
for (i = 0; i < disks ; i++) {
wbio = r1_bio->bios[i];
- if (wbio->bi_end_io != end_sync_write)
+ if (wbio->bi_end_io == NULL ||
+ (wbio->bi_end_io == end_sync_read &&
+ (i == r1_bio->read_disk ||
+ !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
continue;
- atomic_inc(&conf->mirrors[i].rdev->nr_pending);
+ wbio->bi_rw = WRITE;
+ wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
@@ -1167,6 +1380,7 @@ static void raid1d(mddev_t *mddev)
break;
r1_bio = list_entry(head->prev, r1bio_t, retry_list);
list_del(head->prev);
+ conf->nr_queued--;
spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r1_bio->mddev;
@@ -1206,6 +1420,86 @@ static void raid1d(mddev_t *mddev)
}
} else {
int disk;
+
+ /* we got a read error. Maybe the drive is bad. Maybe just
+ * the block and we can fix it.
+ * We freeze all other IO, and try reading the block from
+ * other devices. When we find one, we re-write
+ * and check it that fixes the read error.
+ * This is all done synchronously while the array is
+ * frozen
+ */
+ sector_t sect = r1_bio->sector;
+ int sectors = r1_bio->sectors;
+ freeze_array(conf);
+ if (mddev->ro == 0) while(sectors) {
+ int s = sectors;
+ int d = r1_bio->read_disk;
+ int success = 0;
+
+ if (s > (PAGE_SIZE>>9))
+ s = PAGE_SIZE >> 9;
+
+ do {
+ rdev = conf->mirrors[d].rdev;
+ if (rdev &&
+ test_bit(In_sync, &rdev->flags) &&
+ sync_page_io(rdev->bdev,
+ sect + rdev->data_offset,
+ s<<9,
+ conf->tmppage, READ))
+ success = 1;
+ else {
+ d++;
+ if (d == conf->raid_disks)
+ d = 0;
+ }
+ } while (!success && d != r1_bio->read_disk);
+
+ if (success) {
+ /* write it back and re-read */
+ int start = d;
+ while (d != r1_bio->read_disk) {
+ if (d==0)
+ d = conf->raid_disks;
+ d--;
+ rdev = conf->mirrors[d].rdev;
+ atomic_add(s, &rdev->corrected_errors);
+ if (rdev &&
+ test_bit(In_sync, &rdev->flags)) {
+ if (sync_page_io(rdev->bdev,
+ sect + rdev->data_offset,
+ s<<9, conf->tmppage, WRITE) == 0)
+ /* Well, this device is dead */
+ md_error(mddev, rdev);
+ }
+ }
+ d = start;
+ while (d != r1_bio->read_disk) {
+ if (d==0)
+ d = conf->raid_disks;
+ d--;
+ rdev = conf->mirrors[d].rdev;
+ if (rdev &&
+ test_bit(In_sync, &rdev->flags)) {
+ if (sync_page_io(rdev->bdev,
+ sect + rdev->data_offset,
+ s<<9, conf->tmppage, READ) == 0)
+ /* Well, this device is dead */
+ md_error(mddev, rdev);
+ }
+ }
+ } else {
+ /* Cannot read from anywhere -- bye bye array */
+ md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
+ break;
+ }
+ sectors -= s;
+ sect += s;
+ }
+
+ unfreeze_array(conf);
+
bio = r1_bio->bios[r1_bio->read_disk];
if ((disk=read_balance(conf, r1_bio)) == -1) {
printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
@@ -1214,7 +1508,8 @@ static void raid1d(mddev_t *mddev)
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
- r1_bio->bios[r1_bio->read_disk] = NULL;
+ r1_bio->bios[r1_bio->read_disk] =
+ mddev->ro ? IO_BLOCKED : NULL;
r1_bio->read_disk = disk;
bio_put(bio);
bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
@@ -1269,14 +1564,13 @@ static int init_resync(conf_t *conf)
static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
{
conf_t *conf = mddev_to_conf(mddev);
- mirror_info_t *mirror;
r1bio_t *r1_bio;
struct bio *bio;
sector_t max_sector, nr_sectors;
- int disk;
+ int disk = -1;
int i;
- int wonly;
- int write_targets = 0;
+ int wonly = -1;
+ int write_targets = 0, read_targets = 0;
int sync_blocks;
int still_degraded = 0;
@@ -1317,55 +1611,35 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return sync_blocks;
}
/*
- * If there is non-resync activity waiting for us then
- * put in a delay to throttle resync.
+ * If there is non-resync activity waiting for a turn,
+ * and resync is going fast enough,
+ * then let it though before starting on this new sync request.
*/
- if (!go_faster && waitqueue_active(&conf->wait_resume))
+ if (!go_faster && conf->nr_waiting)
msleep_interruptible(1000);
- device_barrier(conf, sector_nr + RESYNC_SECTORS);
-
- /*
- * If reconstructing, and >1 working disc,
- * could dedicate one to rebuild and others to
- * service read requests ..
- */
- disk = conf->last_used;
- /* make sure disk is operational */
- wonly = disk;
- while (conf->mirrors[disk].rdev == NULL ||
- !test_bit(In_sync, &conf->mirrors[disk].rdev->flags) ||
- test_bit(WriteMostly, &conf->mirrors[disk].rdev->flags)
- ) {
- if (conf->mirrors[disk].rdev &&
- test_bit(In_sync, &conf->mirrors[disk].rdev->flags))
- wonly = disk;
- if (disk <= 0)
- disk = conf->raid_disks;
- disk--;
- if (disk == conf->last_used) {
- disk = wonly;
- break;
- }
- }
- conf->last_used = disk;
- atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
+ raise_barrier(conf);
- mirror = conf->mirrors + disk;
+ conf->next_resync = sector_nr;
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
-
- spin_lock_irq(&conf->resync_lock);
- conf->nr_pending++;
- spin_unlock_irq(&conf->resync_lock);
+ rcu_read_lock();
+ /*
+ * If we get a correctably read error during resync or recovery,
+ * we might want to read from a different device. So we
+ * flag all drives that could conceivably be read from for READ,
+ * and any others (which will be non-In_sync devices) for WRITE.
+ * If a read fails, we try reading from something else for which READ
+ * is OK.
+ */
r1_bio->mddev = mddev;
r1_bio->sector = sector_nr;
r1_bio->state = 0;
set_bit(R1BIO_IsSync, &r1_bio->state);
- r1_bio->read_disk = disk;
for (i=0; i < conf->raid_disks; i++) {
+ mdk_rdev_t *rdev;
bio = r1_bio->bios[i];
/* take from bio_init */
@@ -1380,35 +1654,49 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
bio->bi_end_io = NULL;
bio->bi_private = NULL;
- if (i == disk) {
- bio->bi_rw = READ;
- bio->bi_end_io = end_sync_read;
- } else if (conf->mirrors[i].rdev == NULL ||
- test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
+ rdev = rcu_dereference(conf->mirrors[i].rdev);
+ if (rdev == NULL ||
+ test_bit(Faulty, &rdev->flags)) {
still_degraded = 1;
continue;
- } else if (!test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
- sector_nr + RESYNC_SECTORS > mddev->recovery_cp ||
- test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ } else if (!test_bit(In_sync, &rdev->flags)) {
bio->bi_rw = WRITE;
bio->bi_end_io = end_sync_write;
write_targets ++;
- } else
- /* no need to read or write here */
- continue;
- bio->bi_sector = sector_nr + conf->mirrors[i].rdev->data_offset;
- bio->bi_bdev = conf->mirrors[i].rdev->bdev;
+ } else {
+ /* may need to read from here */
+ bio->bi_rw = READ;
+ bio->bi_end_io = end_sync_read;
+ if (test_bit(WriteMostly, &rdev->flags)) {
+ if (wonly < 0)
+ wonly = i;
+ } else {
+ if (disk < 0)
+ disk = i;
+ }
+ read_targets++;
+ }
+ atomic_inc(&rdev->nr_pending);
+ bio->bi_sector = sector_nr + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
bio->bi_private = r1_bio;
}
+ rcu_read_unlock();
+ if (disk < 0)
+ disk = wonly;
+ r1_bio->read_disk = disk;
+
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
+ /* extra read targets are also write targets */
+ write_targets += read_targets-1;
- if (write_targets == 0) {
+ if (write_targets == 0 || read_targets == 0) {
/* There is nowhere to write, so all non-sync
* drives must be failed - so we are finished
*/
sector_t rv = max_sector - sector_nr;
*skipped = 1;
put_buf(r1_bio);
- rdev_dec_pending(conf->mirrors[disk].rdev, mddev);
return rv;
}
@@ -1436,10 +1724,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
for (i=0 ; i < conf->raid_disks; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io) {
- page = r1_bio->bios[0]->bi_io_vec[bio->bi_vcnt].bv_page;
+ page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
if (bio_add_page(bio, page, len, 0) == 0) {
/* stop here */
- r1_bio->bios[0]->bi_io_vec[bio->bi_vcnt].bv_page = page;
+ bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
while (i > 0) {
i--;
bio = r1_bio->bios[i];
@@ -1459,12 +1747,28 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
sync_blocks -= (len>>9);
} while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
bio_full:
- bio = r1_bio->bios[disk];
r1_bio->sectors = nr_sectors;
- md_sync_acct(mirror->rdev->bdev, nr_sectors);
+ /* For a user-requested sync, we read all readable devices and do a
+ * compare
+ */
+ if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ atomic_set(&r1_bio->remaining, read_targets);
+ for (i=0; i<conf->raid_disks; i++) {
+ bio = r1_bio->bios[i];
+ if (bio->bi_end_io == end_sync_read) {
+ md_sync_acct(conf->mirrors[i].rdev->bdev, nr_sectors);
+ generic_make_request(bio);
+ }
+ }
+ } else {
+ atomic_set(&r1_bio->remaining, 1);
+ bio = r1_bio->bios[r1_bio->read_disk];
+ md_sync_acct(conf->mirrors[r1_bio->read_disk].rdev->bdev,
+ nr_sectors);
+ generic_make_request(bio);
- generic_make_request(bio);
+ }
return nr_sectors;
}
@@ -1487,18 +1791,19 @@ static int run(mddev_t *mddev)
* bookkeeping area. [whatever we allocate in run(),
* should be freed in stop()]
*/
- conf = kmalloc(sizeof(conf_t), GFP_KERNEL);
+ conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
mddev->private = conf;
if (!conf)
goto out_no_mem;
- memset(conf, 0, sizeof(*conf));
- conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks,
+ conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
GFP_KERNEL);
if (!conf->mirrors)
goto out_no_mem;
- memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks);
+ conf->tmppage = alloc_page(GFP_KERNEL);
+ if (!conf->tmppage)
+ goto out_no_mem;
conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
if (!conf->poolinfo)
@@ -1542,8 +1847,7 @@ static int run(mddev_t *mddev)
mddev->recovery_cp = MaxSector;
spin_lock_init(&conf->resync_lock);
- init_waitqueue_head(&conf->wait_idle);
- init_waitqueue_head(&conf->wait_resume);
+ init_waitqueue_head(&conf->wait_barrier);
bio_list_init(&conf->pending_bio_list);
bio_list_init(&conf->flushing_bio_list);
@@ -1583,7 +1887,6 @@ static int run(mddev_t *mddev)
mdname(mddev));
goto out_free_conf;
}
- if (mddev->bitmap) mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
printk(KERN_INFO
"raid1: raid set %s active with %d out of %d mirrors\n",
@@ -1608,6 +1911,7 @@ out_free_conf:
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
+ safe_put_page(conf->tmppage);
kfree(conf->poolinfo);
kfree(conf);
mddev->private = NULL;
@@ -1706,19 +2010,14 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks)
kfree(newpoolinfo);
return -ENOMEM;
}
- newmirrors = kmalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
+ newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
if (!newmirrors) {
kfree(newpoolinfo);
mempool_destroy(newpool);
return -ENOMEM;
}
- memset(newmirrors, 0, sizeof(struct mirror_info)*raid_disks);
- spin_lock_irq(&conf->resync_lock);
- conf->barrier++;
- wait_event_lock_irq(conf->wait_idle, !conf->nr_pending,
- conf->resync_lock, raid1_unplug(mddev->queue));
- spin_unlock_irq(&conf->resync_lock);
+ raise_barrier(conf);
/* ok, everything is stopped */
oldpool = conf->r1bio_pool;
@@ -1738,12 +2037,7 @@ static int raid1_reshape(mddev_t *mddev, int raid_disks)
conf->raid_disks = mddev->raid_disks = raid_disks;
conf->last_used = 0; /* just make sure it is in-range */
- spin_lock_irq(&conf->resync_lock);
- conf->barrier--;
- spin_unlock_irq(&conf->resync_lock);
- wake_up(&conf->wait_resume);
- wake_up(&conf->wait_idle);
-
+ lower_barrier(conf);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
@@ -1758,33 +2052,19 @@ static void raid1_quiesce(mddev_t *mddev, int state)
switch(state) {
case 1:
- spin_lock_irq(&conf->resync_lock);
- conf->barrier++;
- wait_event_lock_irq(conf->wait_idle, !conf->nr_pending,
- conf->resync_lock, raid1_unplug(mddev->queue));
- spin_unlock_irq(&conf->resync_lock);
+ raise_barrier(conf);
break;
case 0:
- spin_lock_irq(&conf->resync_lock);
- conf->barrier--;
- spin_unlock_irq(&conf->resync_lock);
- wake_up(&conf->wait_resume);
- wake_up(&conf->wait_idle);
+ lower_barrier(conf);
break;
}
- if (mddev->thread) {
- if (mddev->bitmap)
- mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
- else
- mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- md_wakeup_thread(mddev->thread);
- }
}
-static mdk_personality_t raid1_personality =
+static struct mdk_personality raid1_personality =
{
.name = "raid1",
+ .level = 1,
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
@@ -1802,15 +2082,17 @@ static mdk_personality_t raid1_personality =
static int __init raid_init(void)
{
- return register_md_personality(RAID1, &raid1_personality);
+ return register_md_personality(&raid1_personality);
}
static void raid_exit(void)
{
- unregister_md_personality(RAID1);
+ unregister_md_personality(&raid1_personality);
}
module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md-personality-3"); /* RAID1 */
+MODULE_ALIAS("md-raid1");
+MODULE_ALIAS("md-level-1");
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 713dc9c2c73..9e658e519a2 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -18,7 +18,9 @@
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include "dm-bio-list.h"
#include <linux/raid/raid10.h>
+#include <linux/raid/bitmap.h>
/*
* RAID10 provides a combination of RAID0 and RAID1 functionality.
@@ -47,6 +49,9 @@
static void unplug_slaves(mddev_t *mddev);
+static void allow_barrier(conf_t *conf);
+static void lower_barrier(conf_t *conf);
+
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
conf_t *conf = data;
@@ -54,10 +59,8 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
int size = offsetof(struct r10bio_s, devs[conf->copies]);
/* allocate a r10bio with room for raid_disks entries in the bios array */
- r10_bio = kmalloc(size, gfp_flags);
- if (r10_bio)
- memset(r10_bio, 0, size);
- else
+ r10_bio = kzalloc(size, gfp_flags);
+ if (!r10_bio)
unplug_slaves(conf->mddev);
return r10_bio;
@@ -129,10 +132,10 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
out_free_pages:
for ( ; i > 0 ; i--)
- __free_page(bio->bi_io_vec[i-1].bv_page);
+ safe_put_page(bio->bi_io_vec[i-1].bv_page);
while (j--)
for (i = 0; i < RESYNC_PAGES ; i++)
- __free_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
+ safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
j = -1;
out_free_bio:
while ( ++j < nalloc )
@@ -152,7 +155,7 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
struct bio *bio = r10bio->devs[j].bio;
if (bio) {
for (i = 0; i < RESYNC_PAGES; i++) {
- __free_page(bio->bi_io_vec[i].bv_page);
+ safe_put_page(bio->bi_io_vec[i].bv_page);
bio->bi_io_vec[i].bv_page = NULL;
}
bio_put(bio);
@@ -167,7 +170,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
for (i = 0; i < conf->copies; i++) {
struct bio **bio = & r10_bio->devs[i].bio;
- if (*bio)
+ if (*bio && *bio != IO_BLOCKED)
bio_put(*bio);
*bio = NULL;
}
@@ -175,20 +178,13 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
static inline void free_r10bio(r10bio_t *r10_bio)
{
- unsigned long flags;
-
conf_t *conf = mddev_to_conf(r10_bio->mddev);
/*
* Wake up any possible resync thread that waits for the device
* to go idle.
*/
- spin_lock_irqsave(&conf->resync_lock, flags);
- if (!--conf->nr_pending) {
- wake_up(&conf->wait_idle);
- wake_up(&conf->wait_resume);
- }
- spin_unlock_irqrestore(&conf->resync_lock, flags);
+ allow_barrier(conf);
put_all_bios(conf, r10_bio);
mempool_free(r10_bio, conf->r10bio_pool);
@@ -197,22 +193,10 @@ static inline void free_r10bio(r10bio_t *r10_bio)
static inline void put_buf(r10bio_t *r10_bio)
{
conf_t *conf = mddev_to_conf(r10_bio->mddev);
- unsigned long flags;
mempool_free(r10_bio, conf->r10buf_pool);
- spin_lock_irqsave(&conf->resync_lock, flags);
- if (!conf->barrier)
- BUG();
- --conf->barrier;
- wake_up(&conf->wait_resume);
- wake_up(&conf->wait_idle);
-
- if (!--conf->nr_pending) {
- wake_up(&conf->wait_idle);
- wake_up(&conf->wait_resume);
- }
- spin_unlock_irqrestore(&conf->resync_lock, flags);
+ lower_barrier(conf);
}
static void reschedule_retry(r10bio_t *r10_bio)
@@ -223,6 +207,7 @@ static void reschedule_retry(r10bio_t *r10_bio)
spin_lock_irqsave(&conf->device_lock, flags);
list_add(&r10_bio->retry_list, &conf->retry_list);
+ conf->nr_queued ++;
spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread);
@@ -268,9 +253,9 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- if (!uptodate)
- md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
- else
+ update_head_pos(slot, r10_bio);
+
+ if (uptodate) {
/*
* Set R10BIO_Uptodate in our master bio, so that
* we will return a good error code to the higher
@@ -281,15 +266,8 @@ static int raid10_end_read_request(struct bio *bio, unsigned int bytes_done, int
* wait for the 'master' bio.
*/
set_bit(R10BIO_Uptodate, &r10_bio->state);
-
- update_head_pos(slot, r10_bio);
-
- /*
- * we have only one bio on the read side
- */
- if (uptodate)
raid_end_bio_io(r10_bio);
- else {
+ } else {
/*
* oops, read error:
*/
@@ -322,9 +300,11 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
- if (!uptodate)
+ if (!uptodate) {
md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
- else
+ /* an I/O failed, we can't clear the bitmap */
+ set_bit(R10BIO_Degraded, &r10_bio->state);
+ } else
/*
* Set R10BIO_Uptodate in our master bio, so that
* we will return a good error code for to the higher
@@ -344,6 +324,11 @@ static int raid10_end_write_request(struct bio *bio, unsigned int bytes_done, in
* already.
*/
if (atomic_dec_and_test(&r10_bio->remaining)) {
+ /* clear the bitmap if all writes complete successfully */
+ bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
+ r10_bio->sectors,
+ !test_bit(R10BIO_Degraded, &r10_bio->state),
+ 0);
md_write_end(r10_bio->mddev);
raid_end_bio_io(r10_bio);
}
@@ -502,8 +487,9 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
rcu_read_lock();
/*
* Check if we can balance. We can balance on the whole
- * device if no resync is going on, or below the resync window.
- * We take the first readable disk when above the resync window.
+ * device if no resync is going on (recovery is ok), or below
+ * the resync window. We take the first readable disk when
+ * above the resync window.
*/
if (conf->mddev->recovery_cp < MaxSector
&& (this_sector + sectors >= conf->next_resync)) {
@@ -512,6 +498,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
disk = r10_bio->devs[slot].devnum;
while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
+ r10_bio->devs[slot].bio == IO_BLOCKED ||
!test_bit(In_sync, &rdev->flags)) {
slot++;
if (slot == conf->copies) {
@@ -529,6 +516,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
slot = 0;
disk = r10_bio->devs[slot].devnum;
while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
+ r10_bio->devs[slot].bio == IO_BLOCKED ||
!test_bit(In_sync, &rdev->flags)) {
slot ++;
if (slot == conf->copies) {
@@ -549,6 +537,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
+ r10_bio->devs[nslot].bio == IO_BLOCKED ||
!test_bit(In_sync, &rdev->flags))
continue;
@@ -607,7 +596,10 @@ static void unplug_slaves(mddev_t *mddev)
static void raid10_unplug(request_queue_t *q)
{
+ mddev_t *mddev = q->queuedata;
+
unplug_slaves(q->queuedata);
+ md_wakeup_thread(mddev->thread);
}
static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
@@ -640,27 +632,107 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
return ret;
}
-/*
- * Throttle resync depth, so that we can both get proper overlapping of
- * requests, but are still able to handle normal requests quickly.
+/* Barriers....
+ * Sometimes we need to suspend IO while we do something else,
+ * either some resync/recovery, or reconfigure the array.
+ * To do this we raise a 'barrier'.
+ * The 'barrier' is a counter that can be raised multiple times
+ * to count how many activities are happening which preclude
+ * normal IO.
+ * We can only raise the barrier if there is no pending IO.
+ * i.e. if nr_pending == 0.
+ * We choose only to raise the barrier if no-one is waiting for the
+ * barrier to go down. This means that as soon as an IO request
+ * is ready, no other operations which require a barrier will start
+ * until the IO request has had a chance.
+ *
+ * So: regular IO calls 'wait_barrier'. When that returns there
+ * is no backgroup IO happening, It must arrange to call
+ * allow_barrier when it has finished its IO.
+ * backgroup IO calls must call raise_barrier. Once that returns
+ * there is no normal IO happeing. It must arrange to call
+ * lower_barrier when the particular background IO completes.
*/
#define RESYNC_DEPTH 32
-static void device_barrier(conf_t *conf, sector_t sect)
+static void raise_barrier(conf_t *conf, int force)
+{
+ BUG_ON(force && !conf->barrier);
+ spin_lock_irq(&conf->resync_lock);
+
+ /* Wait until no block IO is waiting (unless 'force') */
+ wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
+ conf->resync_lock,
+ raid10_unplug(conf->mddev->queue));
+
+ /* block any new IO from starting */
+ conf->barrier++;
+
+ /* No wait for all pending IO to complete */
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
+ conf->resync_lock,
+ raid10_unplug(conf->mddev->queue));
+
+ spin_unlock_irq(&conf->resync_lock);
+}
+
+static void lower_barrier(conf_t *conf)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&conf->resync_lock, flags);
+ conf->barrier--;
+ spin_unlock_irqrestore(&conf->resync_lock, flags);
+ wake_up(&conf->wait_barrier);
+}
+
+static void wait_barrier(conf_t *conf)
{
spin_lock_irq(&conf->resync_lock);
- wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume),
- conf->resync_lock, unplug_slaves(conf->mddev));
-
- if (!conf->barrier++) {
- wait_event_lock_irq(conf->wait_idle, !conf->nr_pending,
- conf->resync_lock, unplug_slaves(conf->mddev));
- if (conf->nr_pending)
- BUG();
+ if (conf->barrier) {
+ conf->nr_waiting++;
+ wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
+ conf->resync_lock,
+ raid10_unplug(conf->mddev->queue));
+ conf->nr_waiting--;
}
- wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, unplug_slaves(conf->mddev));
- conf->next_resync = sect;
+ conf->nr_pending++;
+ spin_unlock_irq(&conf->resync_lock);
+}
+
+static void allow_barrier(conf_t *conf)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&conf->resync_lock, flags);
+ conf->nr_pending--;
+ spin_unlock_irqrestore(&conf->resync_lock, flags);
+ wake_up(&conf->wait_barrier);
+}
+
+static void freeze_array(conf_t *conf)
+{
+ /* stop syncio and normal IO and wait for everything to
+ * go quiet.
+ * We increment barrier and nr_waiting, and then
+ * wait until barrier+nr_pending match nr_queued+2
+ */
+ spin_lock_irq(&conf->resync_lock);
+ conf->barrier++;
+ conf->nr_waiting++;
+ wait_event_lock_irq(conf->wait_barrier,
+ conf->barrier+conf->nr_pending == conf->nr_queued+2,
+ conf->resync_lock,
+ raid10_unplug(conf->mddev->queue));
+ spin_unlock_irq(&conf->resync_lock);
+}
+
+static void unfreeze_array(conf_t *conf)
+{
+ /* reverse the effect of the freeze */
+ spin_lock_irq(&conf->resync_lock);
+ conf->barrier--;
+ conf->nr_waiting--;
+ wake_up(&conf->wait_barrier);
spin_unlock_irq(&conf->resync_lock);
}
@@ -674,6 +746,8 @@ static int make_request(request_queue_t *q, struct bio * bio)
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
+ struct bio_list bl;
+ unsigned long flags;
if (unlikely(bio_barrier(bio))) {
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
@@ -719,10 +793,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
* thread has put up a bar for new requests.
* Continue immediately if no resync is active currently.
*/
- spin_lock_irq(&conf->resync_lock);
- wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock, );
- conf->nr_pending++;
- spin_unlock_irq(&conf->resync_lock);
+ wait_barrier(conf);
disk_stat_inc(mddev->gendisk, ios[rw]);
disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
@@ -734,6 +805,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_sector;
+ r10_bio->state = 0;
if (rw == READ) {
/*
@@ -778,13 +850,16 @@ static int make_request(request_queue_t *q, struct bio * bio)
!test_bit(Faulty, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
r10_bio->devs[i].bio = bio;
- } else
+ } else {
r10_bio->devs[i].bio = NULL;
+ set_bit(R10BIO_Degraded, &r10_bio->state);
+ }
}
rcu_read_unlock();
- atomic_set(&r10_bio->remaining, 1);
+ atomic_set(&r10_bio->remaining, 0);
+ bio_list_init(&bl);
for (i = 0; i < conf->copies; i++) {
struct bio *mbio;
int d = r10_bio->devs[i].devnum;
@@ -802,13 +877,14 @@ static int make_request(request_queue_t *q, struct bio * bio)
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
- generic_make_request(mbio);
+ bio_list_add(&bl, mbio);
}
- if (atomic_dec_and_test(&r10_bio->remaining)) {
- md_write_end(mddev);
- raid_end_bio_io(r10_bio);
- }
+ bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_merge(&conf->pending_bio_list, &bl);
+ blk_plug_device(mddev->queue);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
return 0;
}
@@ -897,13 +973,8 @@ static void print_conf(conf_t *conf)
static void close_sync(conf_t *conf)
{
- spin_lock_irq(&conf->resync_lock);
- wait_event_lock_irq(conf->wait_resume, !conf->barrier,
- conf->resync_lock, unplug_slaves(conf->mddev));
- spin_unlock_irq(&conf->resync_lock);
-
- if (conf->barrier) BUG();
- if (waitqueue_active(&conf->wait_idle)) BUG();
+ wait_barrier(conf);
+ allow_barrier(conf);
mempool_destroy(conf->r10buf_pool);
conf->r10buf_pool = NULL;
@@ -971,7 +1042,12 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
if (!enough(conf))
return 0;
- for (mirror=0; mirror < mddev->raid_disks; mirror++)
+ if (rdev->saved_raid_disk >= 0 &&
+ conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
+ mirror = rdev->saved_raid_disk;
+ else
+ mirror = 0;
+ for ( ; mirror < mddev->raid_disks; mirror++)
if ( !(p=conf->mirrors+mirror)->rdev) {
blk_queue_stack_limits(mddev->queue,
@@ -987,6 +1063,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
p->head_position = 0;
rdev->raid_disk = mirror;
found = 1;
+ if (rdev->saved_raid_disk != mirror)
+ conf->fullsync = 1;
rcu_assign_pointer(p->rdev, rdev);
break;
}
@@ -1027,7 +1105,6 @@ abort:
static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
conf_t *conf = mddev_to_conf(r10_bio->mddev);
int i,d;
@@ -1042,9 +1119,16 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
BUG();
update_head_pos(i, r10_bio);
d = r10_bio->devs[i].devnum;
- if (!uptodate)
- md_error(r10_bio->mddev,
- conf->mirrors[d].rdev);
+
+ if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+ set_bit(R10BIO_Uptodate, &r10_bio->state);
+ else {
+ atomic_add(r10_bio->sectors,
+ &conf->mirrors[d].rdev->corrected_errors);
+ if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
+ md_error(r10_bio->mddev,
+ conf->mirrors[d].rdev);
+ }
/* for reconstruct, we always reschedule after a read.
* for resync, only after all reads
@@ -1132,23 +1216,32 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
fbio = r10_bio->devs[i].bio;
/* now find blocks with errors */
- for (i=first+1 ; i < conf->copies ; i++) {
- int vcnt, j, d;
+ for (i=0 ; i < conf->copies ; i++) {
+ int j, d;
+ int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
- if (!test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
- continue;
- /* We know that the bi_io_vec layout is the same for
- * both 'first' and 'i', so we just compare them.
- * All vec entries are PAGE_SIZE;
- */
tbio = r10_bio->devs[i].bio;
- vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
- for (j = 0; j < vcnt; j++)
- if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
- page_address(tbio->bi_io_vec[j].bv_page),
- PAGE_SIZE))
- break;
- if (j == vcnt)
+
+ if (tbio->bi_end_io != end_sync_read)
+ continue;
+ if (i == first)
+ continue;
+ if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
+ /* We know that the bi_io_vec layout is the same for
+ * both 'first' and 'i', so we just compare them.
+ * All vec entries are PAGE_SIZE;
+ */
+ for (j = 0; j < vcnt; j++)
+ if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
+ page_address(tbio->bi_io_vec[j].bv_page),
+ PAGE_SIZE))
+ break;
+ if (j == vcnt)
+ continue;
+ mddev->resync_mismatches += r10_bio->sectors;
+ }
+ if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ /* Don't fix anything. */
continue;
/* Ok, we need to write this bio
* First we need to fixup bv_offset, bv_len and
@@ -1227,7 +1320,10 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
- generic_make_request(wbio);
+ if (test_bit(R10BIO_Uptodate, &r10_bio->state))
+ generic_make_request(wbio);
+ else
+ bio_endio(wbio, wbio->bi_size, -EIO);
}
@@ -1254,10 +1350,31 @@ static void raid10d(mddev_t *mddev)
for (;;) {
char b[BDEVNAME_SIZE];
spin_lock_irqsave(&conf->device_lock, flags);
+
+ if (conf->pending_bio_list.head) {
+ bio = bio_list_get(&conf->pending_bio_list);
+ blk_remove_plug(mddev->queue);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ /* flush any pending bitmap writes to disk before proceeding w/ I/O */
+ if (bitmap_unplug(mddev->bitmap) != 0)
+ printk("%s: bitmap file write failed!\n", mdname(mddev));
+
+ while (bio) { /* submit pending writes */
+ struct bio *next = bio->bi_next;
+ bio->bi_next = NULL;
+ generic_make_request(bio);
+ bio = next;
+ }
+ unplug = 1;
+
+ continue;
+ }
+
if (list_empty(head))
break;
r10_bio = list_entry(head->prev, r10bio_t, retry_list);
list_del(head->prev);
+ conf->nr_queued--;
spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r10_bio->mddev;
@@ -1270,8 +1387,96 @@ static void raid10d(mddev_t *mddev)
unplug = 1;
} else {
int mirror;
+ /* we got a read error. Maybe the drive is bad. Maybe just
+ * the block and we can fix it.
+ * We freeze all other IO, and try reading the block from
+ * other devices. When we find one, we re-write
+ * and check it that fixes the read error.
+ * This is all done synchronously while the array is
+ * frozen.
+ */
+ int sect = 0; /* Offset from r10_bio->sector */
+ int sectors = r10_bio->sectors;
+ freeze_array(conf);
+ if (mddev->ro == 0) while(sectors) {
+ int s = sectors;
+ int sl = r10_bio->read_slot;
+ int success = 0;
+
+ if (s > (PAGE_SIZE>>9))
+ s = PAGE_SIZE >> 9;
+
+ do {
+ int d = r10_bio->devs[sl].devnum;
+ rdev = conf->mirrors[d].rdev;
+ if (rdev &&
+ test_bit(In_sync, &rdev->flags) &&
+ sync_page_io(rdev->bdev,
+ r10_bio->devs[sl].addr +
+ sect + rdev->data_offset,
+ s<<9,
+ conf->tmppage, READ))
+ success = 1;
+ else {
+ sl++;
+ if (sl == conf->copies)
+ sl = 0;
+ }
+ } while (!success && sl != r10_bio->read_slot);
+
+ if (success) {
+ int start = sl;
+ /* write it back and re-read */
+ while (sl != r10_bio->read_slot) {
+ int d;
+ if (sl==0)
+ sl = conf->copies;
+ sl--;
+ d = r10_bio->devs[sl].devnum;
+ rdev = conf->mirrors[d].rdev;
+ atomic_add(s, &rdev->corrected_errors);
+ if (rdev &&
+ test_bit(In_sync, &rdev->flags)) {
+ if (sync_page_io(rdev->bdev,
+ r10_bio->devs[sl].addr +
+ sect + rdev->data_offset,
+ s<<9, conf->tmppage, WRITE) == 0)
+ /* Well, this device is dead */
+ md_error(mddev, rdev);
+ }
+ }
+ sl = start;
+ while (sl != r10_bio->read_slot) {
+ int d;
+ if (sl==0)
+ sl = conf->copies;
+ sl--;
+ d = r10_bio->devs[sl].devnum;
+ rdev = conf->mirrors[d].rdev;
+ if (rdev &&
+ test_bit(In_sync, &rdev->flags)) {
+ if (sync_page_io(rdev->bdev,
+ r10_bio->devs[sl].addr +
+ sect + rdev->data_offset,
+ s<<9, conf->tmppage, READ) == 0)
+ /* Well, this device is dead */
+ md_error(mddev, rdev);
+ }
+ }
+ } else {
+ /* Cannot read from anywhere -- bye bye array */
+ md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev);
+ break;
+ }
+ sectors -= s;
+ sect += s;
+ }
+
+ unfreeze_array(conf);
+
bio = r10_bio->devs[r10_bio->read_slot].bio;
- r10_bio->devs[r10_bio->read_slot].bio = NULL;
+ r10_bio->devs[r10_bio->read_slot].bio =
+ mddev->ro ? IO_BLOCKED : NULL;
bio_put(bio);
mirror = read_balance(conf, r10_bio);
if (mirror == -1) {
@@ -1360,6 +1565,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
sector_t max_sector, nr_sectors;
int disk;
int i;
+ int max_sync;
+ int sync_blocks;
sector_t sectors_skipped = 0;
int chunks_skipped = 0;
@@ -1373,6 +1580,29 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
max_sector = mddev->resync_max_sectors;
if (sector_nr >= max_sector) {
+ /* If we aborted, we need to abort the
+ * sync on the 'current' bitmap chucks (there can
+ * be several when recovering multiple devices).
+ * as we may have started syncing it but not finished.
+ * We can find the current address in
+ * mddev->curr_resync, but for recovery,
+ * we need to convert that to several
+ * virtual addresses.
+ */
+ if (mddev->curr_resync < max_sector) { /* aborted */
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
+ bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
+ &sync_blocks, 1);
+ else for (i=0; i<conf->raid_disks; i++) {
+ sector_t sect =
+ raid10_find_virt(conf, mddev->curr_resync, i);
+ bitmap_end_sync(mddev->bitmap, sect,
+ &sync_blocks, 1);
+ }
+ } else /* completed sync */
+ conf->fullsync = 0;
+
+ bitmap_close_sync(mddev->bitmap);
close_sync(conf);
*skipped = 1;
return sectors_skipped;
@@ -1395,9 +1625,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
* If there is non-resync activity waiting for us then
* put in a delay to throttle resync.
*/
- if (!go_faster && waitqueue_active(&conf->wait_resume))
+ if (!go_faster && conf->nr_waiting)
msleep_interruptible(1000);
- device_barrier(conf, sector_nr + RESYNC_SECTORS);
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
@@ -1414,6 +1643,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
* end_sync_write if we will want to write.
*/
+ max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* recovery... the complicated one */
int i, j, k;
@@ -1422,14 +1652,29 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
for (i=0 ; i<conf->raid_disks; i++)
if (conf->mirrors[i].rdev &&
!test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
+ int still_degraded = 0;
/* want to reconstruct this device */
r10bio_t *rb2 = r10_bio;
+ sector_t sect = raid10_find_virt(conf, sector_nr, i);
+ int must_sync;
+ /* Unless we are doing a full sync, we only need
+ * to recover the block if it is set in the bitmap
+ */
+ must_sync = bitmap_start_sync(mddev->bitmap, sect,
+ &sync_blocks, 1);
+ if (sync_blocks < max_sync)
+ max_sync = sync_blocks;
+ if (!must_sync &&
+ !conf->fullsync) {
+ /* yep, skip the sync_blocks here, but don't assume
+ * that there will never be anything to do here
+ */
+ chunks_skipped = -1;
+ continue;
+ }
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
- spin_lock_irq(&conf->resync_lock);
- conf->nr_pending++;
- if (rb2) conf->barrier++;
- spin_unlock_irq(&conf->resync_lock);
+ raise_barrier(conf, rb2 != NULL);
atomic_set(&r10_bio->remaining, 0);
r10_bio->master_bio = (struct bio*)rb2;
@@ -1437,8 +1682,23 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
atomic_inc(&rb2->remaining);
r10_bio->mddev = mddev;
set_bit(R10BIO_IsRecover, &r10_bio->state);
- r10_bio->sector = raid10_find_virt(conf, sector_nr, i);
+ r10_bio->sector = sect;
+
raid10_find_phys(conf, r10_bio);
+ /* Need to check if this section will still be
+ * degraded
+ */
+ for (j=0; j<conf->copies;j++) {
+ int d = r10_bio->devs[j].devnum;
+ if (conf->mirrors[d].rdev == NULL ||
+ test_bit(Faulty, &conf->mirrors[d].rdev->flags)) {
+ still_degraded = 1;
+ break;
+ }
+ }
+ must_sync = bitmap_start_sync(mddev->bitmap, sect,
+ &sync_blocks, still_degraded);
+
for (j=0; j<conf->copies;j++) {
int d = r10_bio->devs[j].devnum;
if (conf->mirrors[d].rdev &&
@@ -1498,14 +1758,22 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
} else {
/* resync. Schedule a read for every block at this virt offset */
int count = 0;
- r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
- spin_lock_irq(&conf->resync_lock);
- conf->nr_pending++;
- spin_unlock_irq(&conf->resync_lock);
+ if (!bitmap_start_sync(mddev->bitmap, sector_nr,
+ &sync_blocks, mddev->degraded) &&
+ !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ /* We can skip this block */
+ *skipped = 1;
+ return sync_blocks + sectors_skipped;
+ }
+ if (sync_blocks < max_sync)
+ max_sync = sync_blocks;
+ r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
r10_bio->mddev = mddev;
atomic_set(&r10_bio->remaining, 0);
+ raise_barrier(conf, 0);
+ conf->next_resync = sector_nr;
r10_bio->master_bio = NULL;
r10_bio->sector = sector_nr;
@@ -1558,6 +1826,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
}
nr_sectors = 0;
+ if (sector_nr + max_sync < max_sector)
+ max_sector = sector_nr + max_sync;
do {
struct page *page;
int len = PAGE_SIZE;
@@ -1632,11 +1902,11 @@ static int run(mddev_t *mddev)
int nc, fc;
sector_t stride, size;
- if (mddev->level != 10) {
- printk(KERN_ERR "raid10: %s: raid level not set correctly... (%d)\n",
- mdname(mddev), mddev->level);
- goto out;
+ if (mddev->chunk_size == 0) {
+ printk(KERN_ERR "md/raid10: non-zero chunk size required.\n");
+ return -EINVAL;
}
+
nc = mddev->layout & 255;
fc = (mddev->layout >> 8) & 255;
if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
@@ -1650,22 +1920,24 @@ static int run(mddev_t *mddev)
* bookkeeping area. [whatever we allocate in run(),
* should be freed in stop()]
*/
- conf = kmalloc(sizeof(conf_t), GFP_KERNEL);
+ conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
mddev->private = conf;
if (!conf) {
printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
mdname(mddev));
goto out;
}
- memset(conf, 0, sizeof(*conf));
- conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks,
+ conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
GFP_KERNEL);
if (!conf->mirrors) {
printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
mdname(mddev));
goto out_free_conf;
}
- memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks);
+
+ conf->tmppage = alloc_page(GFP_KERNEL);
+ if (!conf->tmppage)
+ goto out_free_conf;
conf->near_copies = nc;
conf->far_copies = fc;
@@ -1713,8 +1985,7 @@ static int run(mddev_t *mddev)
INIT_LIST_HEAD(&conf->retry_list);
spin_lock_init(&conf->resync_lock);
- init_waitqueue_head(&conf->wait_idle);
- init_waitqueue_head(&conf->wait_resume);
+ init_waitqueue_head(&conf->wait_barrier);
/* need to check that every block has at least one working mirror */
if (!enough(conf)) {
@@ -1763,7 +2034,7 @@ static int run(mddev_t *mddev)
* maybe...
*/
{
- int stripe = conf->raid_disks * mddev->chunk_size / PAGE_CACHE_SIZE;
+ int stripe = conf->raid_disks * mddev->chunk_size / PAGE_SIZE;
stripe /= conf->near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
@@ -1776,6 +2047,7 @@ static int run(mddev_t *mddev)
out_free_conf:
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
+ safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf);
mddev->private = NULL;
@@ -1798,10 +2070,31 @@ static int stop(mddev_t *mddev)
return 0;
}
+static void raid10_quiesce(mddev_t *mddev, int state)
+{
+ conf_t *conf = mddev_to_conf(mddev);
+
+ switch(state) {
+ case 1:
+ raise_barrier(conf, 0);
+ break;
+ case 0:
+ lower_barrier(conf);
+ break;
+ }
+ if (mddev->thread) {
+ if (mddev->bitmap)
+ mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
+ else
+ mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
+ md_wakeup_thread(mddev->thread);
+ }
+}
-static mdk_personality_t raid10_personality =
+static struct mdk_personality raid10_personality =
{
.name = "raid10",
+ .level = 10,
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
@@ -1812,19 +2105,22 @@ static mdk_personality_t raid10_personality =
.hot_remove_disk= raid10_remove_disk,
.spare_active = raid10_spare_active,
.sync_request = sync_request,
+ .quiesce = raid10_quiesce,
};
static int __init raid_init(void)
{
- return register_md_personality(RAID10, &raid10_personality);
+ return register_md_personality(&raid10_personality);
}
static void raid_exit(void)
{
- unregister_md_personality(RAID10);
+ unregister_md_personality(&raid10_personality);
}
module_init(raid_init);
module_exit(raid_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md-personality-9"); /* RAID10 */
+MODULE_ALIAS("md-raid10");
+MODULE_ALIAS("md-level-10");
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index fafc4bc045f..54f4a9847e3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -35,12 +35,10 @@
#define STRIPE_SHIFT (PAGE_SHIFT - 9)
#define STRIPE_SECTORS (STRIPE_SIZE>>9)
#define IO_THRESHOLD 1
-#define HASH_PAGES 1
-#define HASH_PAGES_ORDER 0
-#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
+#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
#define HASH_MASK (NR_HASH - 1)
-#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])
+#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
@@ -113,29 +111,21 @@ static void release_stripe(struct stripe_head *sh)
spin_unlock_irqrestore(&conf->device_lock, flags);
}
-static void remove_hash(struct stripe_head *sh)
+static inline void remove_hash(struct stripe_head *sh)
{
PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
- if (sh->hash_pprev) {
- if (sh->hash_next)
- sh->hash_next->hash_pprev = sh->hash_pprev;
- *sh->hash_pprev = sh->hash_next;
- sh->hash_pprev = NULL;
- }
+ hlist_del_init(&sh->hash);
}
-static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
+static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
{
- struct stripe_head **shp = &stripe_hash(conf, sh->sector);
+ struct hlist_head *hp = stripe_hash(conf, sh->sector);
PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
CHECK_DEVLOCK();
- if ((sh->hash_next = *shp) != NULL)
- (*shp)->hash_pprev = &sh->hash_next;
- *shp = sh;
- sh->hash_pprev = shp;
+ hlist_add_head(&sh->hash, hp);
}
@@ -167,7 +157,7 @@ static void shrink_buffers(struct stripe_head *sh, int num)
if (!p)
continue;
sh->dev[i].page = NULL;
- page_cache_release(p);
+ put_page(p);
}
}
@@ -228,10 +218,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i
static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
{
struct stripe_head *sh;
+ struct hlist_node *hn;
CHECK_DEVLOCK();
PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
- for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next)
+ hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
if (sh->sector == sector)
return sh;
PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
@@ -417,7 +408,7 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
set_bit(R5_UPTODATE, &sh->dev[i].flags);
#endif
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
- printk("R5: read error corrected!!\n");
+ printk(KERN_INFO "raid5: read error corrected!!\n");
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
}
@@ -428,13 +419,14 @@ static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&conf->disks[i].rdev->read_errors);
if (conf->mddev->degraded)
- printk("R5: read error not correctable.\n");
+ printk(KERN_WARNING "raid5: read error not correctable.\n");
else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
/* Oh, no!!! */
- printk("R5: read error NOT corrected!!\n");
+ printk(KERN_WARNING "raid5: read error NOT corrected!!\n");
else if (atomic_read(&conf->disks[i].rdev->read_errors)
> conf->max_nr_stripes)
- printk("raid5: Too many read errors, failing device.\n");
+ printk(KERN_WARNING
+ "raid5: Too many read errors, failing device.\n");
else
retry = 1;
if (retry)
@@ -604,7 +596,7 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
*dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
break;
default:
- printk("raid5: unsupported algorithm %d\n",
+ printk(KERN_ERR "raid5: unsupported algorithm %d\n",
conf->algorithm);
}
@@ -645,7 +637,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
i -= (sh->pd_idx + 1);
break;
default:
- printk("raid5: unsupported algorithm %d\n",
+ printk(KERN_ERR "raid5: unsupported algorithm %d\n",
conf->algorithm);
}
@@ -654,7 +646,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i)
check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
- printk("compute_blocknr: map not correct\n");
+ printk(KERN_ERR "compute_blocknr: map not correct\n");
return 0;
}
return r_sector;
@@ -737,7 +729,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
ptr[count++] = p;
else
- printk("compute_block() %d, stripe %llu, %d"
+ printk(KERN_ERR "compute_block() %d, stripe %llu, %d"
" not present\n", dd_idx,
(unsigned long long)sh->sector, i);
@@ -960,11 +952,11 @@ static void handle_stripe(struct stripe_head *sh)
syncing = test_bit(STRIPE_SYNCING, &sh->state);
/* Now to look around and see what can be done */
+ rcu_read_lock();
for (i=disks; i--; ) {
mdk_rdev_t *rdev;
dev = &sh->dev[i];
clear_bit(R5_Insync, &dev->flags);
- clear_bit(R5_Syncio, &dev->flags);
PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
i, dev->flags, dev->toread, dev->towrite, dev->written);
@@ -1003,9 +995,9 @@ static void handle_stripe(struct stripe_head *sh)
non_overwrite++;
}
if (dev->written) written++;
- rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */
+ rdev = rcu_dereference(conf->disks[i].rdev);
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
- /* The ReadError flag wil just be confusing now */
+ /* The ReadError flag will just be confusing now */
clear_bit(R5_ReadError, &dev->flags);
clear_bit(R5_ReWrite, &dev->flags);
}
@@ -1016,6 +1008,7 @@ static void handle_stripe(struct stripe_head *sh)
} else
set_bit(R5_Insync, &dev->flags);
}
+ rcu_read_unlock();
PRINTK("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d\n",
locked, uptodate, to_read, to_write, failed, failed_num);
@@ -1027,10 +1020,13 @@ static void handle_stripe(struct stripe_head *sh)
int bitmap_end = 0;
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
- mdk_rdev_t *rdev = conf->disks[i].rdev;
+ mdk_rdev_t *rdev;
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && test_bit(In_sync, &rdev->flags))
/* multiple read failures in one stripe */
md_error(conf->mddev, rdev);
+ rcu_read_unlock();
}
spin_lock_irq(&conf->device_lock);
@@ -1179,9 +1175,6 @@ static void handle_stripe(struct stripe_head *sh)
locked++;
PRINTK("Reading block %d (sync=%d)\n",
i, syncing);
- if (syncing)
- md_sync_acct(conf->disks[i].rdev->bdev,
- STRIPE_SECTORS);
}
}
}
@@ -1288,7 +1281,7 @@ static void handle_stripe(struct stripe_head *sh)
* is available
*/
if (syncing && locked == 0 &&
- !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 1) {
+ !test_bit(STRIPE_INSYNC, &sh->state)) {
set_bit(STRIPE_HANDLE, &sh->state);
if (failed == 0) {
char *pagea;
@@ -1306,27 +1299,25 @@ static void handle_stripe(struct stripe_head *sh)
if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
/* don't try to repair!! */
set_bit(STRIPE_INSYNC, &sh->state);
+ else {
+ compute_block(sh, sh->pd_idx);
+ uptodate++;
+ }
}
}
if (!test_bit(STRIPE_INSYNC, &sh->state)) {
+ /* either failed parity check, or recovery is happening */
if (failed==0)
failed_num = sh->pd_idx;
- /* should be able to compute the missing block and write it to spare */
- if (!test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)) {
- if (uptodate+1 != disks)
- BUG();
- compute_block(sh, failed_num);
- uptodate++;
- }
- if (uptodate != disks)
- BUG();
dev = &sh->dev[failed_num];
+ BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
+ BUG_ON(uptodate != disks);
+
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
clear_bit(STRIPE_DEGRADED, &sh->state);
locked++;
set_bit(STRIPE_INSYNC, &sh->state);
- set_bit(R5_Syncio, &dev->flags);
}
}
if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
@@ -1392,7 +1383,7 @@ static void handle_stripe(struct stripe_head *sh)
rcu_read_unlock();
if (rdev) {
- if (test_bit(R5_Syncio, &sh->dev[i].flags))
+ if (syncing)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
bi->bi_bdev = rdev->bdev;
@@ -1409,6 +1400,9 @@ static void handle_stripe(struct stripe_head *sh)
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
+ if (rw == WRITE &&
+ test_bit(R5_ReWrite, &sh->dev[i].flags))
+ atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
generic_make_request(bi);
} else {
if (rw == 1)
@@ -1822,21 +1816,21 @@ static int run(mddev_t *mddev)
struct list_head *tmp;
if (mddev->level != 5 && mddev->level != 4) {
- printk("raid5: %s: raid level not set to 4/5 (%d)\n", mdname(mddev), mddev->level);
+ printk(KERN_ERR "raid5: %s: raid level not set to 4/5 (%d)\n",
+ mdname(mddev), mddev->level);
return -EIO;
}
- mddev->private = kmalloc (sizeof (raid5_conf_t)
- + mddev->raid_disks * sizeof(struct disk_info),
- GFP_KERNEL);
+ mddev->private = kzalloc(sizeof (raid5_conf_t)
+ + mddev->raid_disks * sizeof(struct disk_info),
+ GFP_KERNEL);
if ((conf = mddev->private) == NULL)
goto abort;
- memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) );
+
conf->mddev = mddev;
- if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL)
+ if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
goto abort;
- memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
spin_lock_init(&conf->device_lock);
init_waitqueue_head(&conf->wait_for_stripe);
@@ -1903,10 +1897,17 @@ static int run(mddev_t *mddev)
if (mddev->degraded == 1 &&
mddev->recovery_cp != MaxSector) {
- printk(KERN_ERR
- "raid5: cannot start dirty degraded array for %s\n",
- mdname(mddev));
- goto abort;
+ if (mddev->ok_start_degraded)
+ printk(KERN_WARNING
+ "raid5: starting dirty degraded array: %s"
+ "- data corruption possible.\n",
+ mdname(mddev));
+ else {
+ printk(KERN_ERR
+ "raid5: cannot start dirty degraded array for %s\n",
+ mdname(mddev));
+ goto abort;
+ }
}
{
@@ -1948,7 +1949,7 @@ static int run(mddev_t *mddev)
*/
{
int stripe = (mddev->raid_disks-1) * mddev->chunk_size
- / PAGE_CACHE_SIZE;
+ / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
}
@@ -1956,9 +1957,6 @@ static int run(mddev_t *mddev)
/* Ok, everything is just fine now */
sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
- if (mddev->bitmap)
- mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
-
mddev->queue->unplug_fn = raid5_unplug_device;
mddev->queue->issue_flush_fn = raid5_issue_flush;
@@ -1967,9 +1965,7 @@ static int run(mddev_t *mddev)
abort:
if (conf) {
print_raid5_conf(conf);
- if (conf->stripe_hashtbl)
- free_pages((unsigned long) conf->stripe_hashtbl,
- HASH_PAGES_ORDER);
+ kfree(conf->stripe_hashtbl);
kfree(conf);
}
mddev->private = NULL;
@@ -1986,7 +1982,7 @@ static int stop(mddev_t *mddev)
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
shrink_stripes(conf);
- free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER);
+ kfree(conf->stripe_hashtbl);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
kfree(conf);
@@ -2014,12 +2010,12 @@ static void print_sh (struct stripe_head *sh)
static void printall (raid5_conf_t *conf)
{
struct stripe_head *sh;
+ struct hlist_node *hn;
int i;
spin_lock_irq(&conf->device_lock);
for (i = 0; i < NR_HASH; i++) {
- sh = conf->stripe_hashtbl[i];
- for (; sh; sh = sh->hash_next) {
+ hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
if (sh->raid_conf != conf)
continue;
print_sh(sh);
@@ -2192,17 +2188,12 @@ static void raid5_quiesce(mddev_t *mddev, int state)
spin_unlock_irq(&conf->device_lock);
break;
}
- if (mddev->thread) {
- if (mddev->bitmap)
- mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
- else
- mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- md_wakeup_thread(mddev->thread);
- }
}
-static mdk_personality_t raid5_personality=
+
+static struct mdk_personality raid5_personality =
{
.name = "raid5",
+ .level = 5,
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
@@ -2217,17 +2208,42 @@ static mdk_personality_t raid5_personality=
.quiesce = raid5_quiesce,
};
-static int __init raid5_init (void)
+static struct mdk_personality raid4_personality =
{
- return register_md_personality (RAID5, &raid5_personality);
+ .name = "raid4",
+ .level = 4,
+ .owner = THIS_MODULE,
+ .make_request = make_request,
+ .run = run,
+ .stop = stop,
+ .status = status,
+ .error_handler = error,
+ .hot_add_disk = raid5_add_disk,
+ .hot_remove_disk= raid5_remove_disk,
+ .spare_active = raid5_spare_active,
+ .sync_request = sync_request,
+ .resize = raid5_resize,
+ .quiesce = raid5_quiesce,
+};
+
+static int __init raid5_init(void)
+{
+ register_md_personality(&raid5_personality);
+ register_md_personality(&raid4_personality);
+ return 0;
}
-static void raid5_exit (void)
+static void raid5_exit(void)
{
- unregister_md_personality (RAID5);
+ unregister_md_personality(&raid5_personality);
+ unregister_md_personality(&raid4_personality);
}
module_init(raid5_init);
module_exit(raid5_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md-personality-4"); /* RAID5 */
+MODULE_ALIAS("md-raid5");
+MODULE_ALIAS("md-raid4");
+MODULE_ALIAS("md-level-5");
+MODULE_ALIAS("md-level-4");
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c
index 0000d162d19..8c823d686a6 100644
--- a/drivers/md/raid6main.c
+++ b/drivers/md/raid6main.c
@@ -40,12 +40,10 @@
#define STRIPE_SHIFT (PAGE_SHIFT - 9)
#define STRIPE_SECTORS (STRIPE_SIZE>>9)
#define IO_THRESHOLD 1
-#define HASH_PAGES 1
-#define HASH_PAGES_ORDER 0
-#define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
+#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
#define HASH_MASK (NR_HASH - 1)
-#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])
+#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
@@ -132,29 +130,21 @@ static void release_stripe(struct stripe_head *sh)
spin_unlock_irqrestore(&conf->device_lock, flags);
}
-static void remove_hash(struct stripe_head *sh)
+static inline void remove_hash(struct stripe_head *sh)
{
PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
- if (sh->hash_pprev) {
- if (sh->hash_next)
- sh->hash_next->hash_pprev = sh->hash_pprev;
- *sh->hash_pprev = sh->hash_next;
- sh->hash_pprev = NULL;
- }
+ hlist_del_init(&sh->hash);
}
-static __inline__ void insert_hash(raid6_conf_t *conf, struct stripe_head *sh)
+static inline void insert_hash(raid6_conf_t *conf, struct stripe_head *sh)
{
- struct stripe_head **shp = &stripe_hash(conf, sh->sector);
+ struct hlist_head *hp = stripe_hash(conf, sh->sector);
PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
CHECK_DEVLOCK();
- if ((sh->hash_next = *shp) != NULL)
- (*shp)->hash_pprev = &sh->hash_next;
- *shp = sh;
- sh->hash_pprev = shp;
+ hlist_add_head(&sh->hash, hp);
}
@@ -186,7 +176,7 @@ static void shrink_buffers(struct stripe_head *sh, int num)
if (!p)
continue;
sh->dev[i].page = NULL;
- page_cache_release(p);
+ put_page(p);
}
}
@@ -247,10 +237,11 @@ static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_i
static struct stripe_head *__find_stripe(raid6_conf_t *conf, sector_t sector)
{
struct stripe_head *sh;
+ struct hlist_node *hn;
CHECK_DEVLOCK();
PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
- for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next)
+ hlist_for_each_entry (sh, hn, stripe_hash(conf, sector), hash)
if (sh->sector == sector)
return sh;
PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
@@ -367,8 +358,8 @@ static void shrink_stripes(raid6_conf_t *conf)
conf->slab_cache = NULL;
}
-static int raid6_end_read_request (struct bio * bi, unsigned int bytes_done,
- int error)
+static int raid6_end_read_request(struct bio * bi, unsigned int bytes_done,
+ int error)
{
struct stripe_head *sh = bi->bi_private;
raid6_conf_t *conf = sh->raid_conf;
@@ -420,9 +411,35 @@ static int raid6_end_read_request (struct bio * bi, unsigned int bytes_done,
#else
set_bit(R5_UPTODATE, &sh->dev[i].flags);
#endif
+ if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
+ printk(KERN_INFO "raid6: read error corrected!!\n");
+ clear_bit(R5_ReadError, &sh->dev[i].flags);
+ clear_bit(R5_ReWrite, &sh->dev[i].flags);
+ }
+ if (atomic_read(&conf->disks[i].rdev->read_errors))
+ atomic_set(&conf->disks[i].rdev->read_errors, 0);
} else {
- md_error(conf->mddev, conf->disks[i].rdev);
+ int retry = 0;
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
+ atomic_inc(&conf->disks[i].rdev->read_errors);
+ if (conf->mddev->degraded)
+ printk(KERN_WARNING "raid6: read error not correctable.\n");
+ else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+ /* Oh, no!!! */
+ printk(KERN_WARNING "raid6: read error NOT corrected!!\n");
+ else if (atomic_read(&conf->disks[i].rdev->read_errors)
+ > conf->max_nr_stripes)
+ printk(KERN_WARNING
+ "raid6: Too many read errors, failing device.\n");
+ else
+ retry = 1;
+ if (retry)
+ set_bit(R5_ReadError, &sh->dev[i].flags);
+ else {
+ clear_bit(R5_ReadError, &sh->dev[i].flags);
+ clear_bit(R5_ReWrite, &sh->dev[i].flags);
+ md_error(conf->mddev, conf->disks[i].rdev);
+ }
}
rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
#if 0
@@ -805,7 +822,7 @@ static void compute_parity(struct stripe_head *sh, int method)
}
/* Compute one missing block */
-static void compute_block_1(struct stripe_head *sh, int dd_idx)
+static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero)
{
raid6_conf_t *conf = sh->raid_conf;
int i, count, disks = conf->raid_disks;
@@ -821,7 +838,7 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx)
compute_parity(sh, UPDATE_PARITY);
} else {
ptr[0] = page_address(sh->dev[dd_idx].page);
- memset(ptr[0], 0, STRIPE_SIZE);
+ if (!nozero) memset(ptr[0], 0, STRIPE_SIZE);
count = 1;
for (i = disks ; i--; ) {
if (i == dd_idx || i == qd_idx)
@@ -838,7 +855,8 @@ static void compute_block_1(struct stripe_head *sh, int dd_idx)
}
if (count != 1)
xor_block(count, STRIPE_SIZE, ptr);
- set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
+ if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
+ else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
}
}
@@ -871,7 +889,7 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
return;
} else {
/* We're missing D+Q; recompute D from P */
- compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1);
+ compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0);
compute_parity(sh, UPDATE_PARITY); /* Is this necessary? */
return;
}
@@ -982,6 +1000,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
}
+static int page_is_zero(struct page *p)
+{
+ char *a = page_address(p);
+ return ((*(u32*)a) == 0 &&
+ memcmp(a, a+4, STRIPE_SIZE-4)==0);
+}
/*
* handle_stripe - do things to a stripe.
*
@@ -1000,7 +1024,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
*
*/
-static void handle_stripe(struct stripe_head *sh)
+static void handle_stripe(struct stripe_head *sh, struct page *tmp_page)
{
raid6_conf_t *conf = sh->raid_conf;
int disks = conf->raid_disks;
@@ -1027,11 +1051,11 @@ static void handle_stripe(struct stripe_head *sh)
syncing = test_bit(STRIPE_SYNCING, &sh->state);
/* Now to look around and see what can be done */
+ rcu_read_lock();
for (i=disks; i--; ) {
mdk_rdev_t *rdev;
dev = &sh->dev[i];
clear_bit(R5_Insync, &dev->flags);
- clear_bit(R5_Syncio, &dev->flags);
PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
i, dev->flags, dev->toread, dev->towrite, dev->written);
@@ -1070,14 +1094,21 @@ static void handle_stripe(struct stripe_head *sh)
non_overwrite++;
}
if (dev->written) written++;
- rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */
+ rdev = rcu_dereference(conf->disks[i].rdev);
if (!rdev || !test_bit(In_sync, &rdev->flags)) {
+ /* The ReadError flag will just be confusing now */
+ clear_bit(R5_ReadError, &dev->flags);
+ clear_bit(R5_ReWrite, &dev->flags);
+ }
+ if (!rdev || !test_bit(In_sync, &rdev->flags)
+ || test_bit(R5_ReadError, &dev->flags)) {
if ( failed < 2 )
failed_num[failed] = i;
failed++;
} else
set_bit(R5_Insync, &dev->flags);
}
+ rcu_read_unlock();
PRINTK("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d,%d\n",
locked, uptodate, to_read, to_write, failed,
@@ -1088,6 +1119,17 @@ static void handle_stripe(struct stripe_head *sh)
if (failed > 2 && to_read+to_write+written) {
for (i=disks; i--; ) {
int bitmap_end = 0;
+
+ if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
+ mdk_rdev_t *rdev;
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(In_sync, &rdev->flags))
+ /* multiple read failures in one stripe */
+ md_error(conf->mddev, rdev);
+ rcu_read_unlock();
+ }
+
spin_lock_irq(&conf->device_lock);
/* fail all writes first */
bi = sh->dev[i].towrite;
@@ -1123,7 +1165,8 @@ static void handle_stripe(struct stripe_head *sh)
}
/* fail any reads if this device is non-operational */
- if (!test_bit(R5_Insync, &sh->dev[i].flags)) {
+ if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
+ test_bit(R5_ReadError, &sh->dev[i].flags)) {
bi = sh->dev[i].toread;
sh->dev[i].toread = NULL;
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
@@ -1228,7 +1271,7 @@ static void handle_stripe(struct stripe_head *sh)
if (uptodate == disks-1) {
PRINTK("Computing stripe %llu block %d\n",
(unsigned long long)sh->sector, i);
- compute_block_1(sh, i);
+ compute_block_1(sh, i, 0);
uptodate++;
} else if ( uptodate == disks-2 && failed >= 2 ) {
/* Computing 2-failure is *very* expensive; only do it if failed >= 2 */
@@ -1259,9 +1302,6 @@ static void handle_stripe(struct stripe_head *sh)
locked++;
PRINTK("Reading block %d (sync=%d)\n",
i, syncing);
- if (syncing)
- md_sync_acct(conf->disks[i].rdev->bdev,
- STRIPE_SECTORS);
}
}
}
@@ -1323,7 +1363,7 @@ static void handle_stripe(struct stripe_head *sh)
/* We have failed blocks and need to compute them */
switch ( failed ) {
case 0: BUG();
- case 1: compute_block_1(sh, failed_num[0]); break;
+ case 1: compute_block_1(sh, failed_num[0], 0); break;
case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break;
default: BUG(); /* This request should have been failed? */
}
@@ -1338,12 +1378,10 @@ static void handle_stripe(struct stripe_head *sh)
(unsigned long long)sh->sector, i);
locked++;
set_bit(R5_Wantwrite, &sh->dev[i].flags);
-#if 0 /**** FIX: I don't understand the logic here... ****/
- if (!test_bit(R5_Insync, &sh->dev[i].flags)
- || ((i==pd_idx || i==qd_idx) && failed == 0)) /* FIX? */
- set_bit(STRIPE_INSYNC, &sh->state);
-#endif
}
+ /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
+ set_bit(STRIPE_INSYNC, &sh->state);
+
if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
atomic_dec(&conf->preread_active_stripes);
if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
@@ -1356,84 +1394,119 @@ static void handle_stripe(struct stripe_head *sh)
* Any reads will already have been scheduled, so we just see if enough data
* is available
*/
- if (syncing && locked == 0 &&
- !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 2) {
- set_bit(STRIPE_HANDLE, &sh->state);
-#if 0 /* RAID-6: Don't support CHECK PARITY yet */
- if (failed == 0) {
- char *pagea;
- if (uptodate != disks)
- BUG();
- compute_parity(sh, CHECK_PARITY);
- uptodate--;
- pagea = page_address(sh->dev[pd_idx].page);
- if ((*(u32*)pagea) == 0 &&
- !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
- /* parity is correct (on disc, not in buffer any more) */
- set_bit(STRIPE_INSYNC, &sh->state);
- }
- }
-#endif
- if (!test_bit(STRIPE_INSYNC, &sh->state)) {
- int failed_needupdate[2];
- struct r5dev *adev, *bdev;
-
- if ( failed < 1 )
- failed_num[0] = pd_idx;
- if ( failed < 2 )
- failed_num[1] = (failed_num[0] == qd_idx) ? pd_idx : qd_idx;
+ if (syncing && locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) {
+ int update_p = 0, update_q = 0;
+ struct r5dev *dev;
- failed_needupdate[0] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[0]].flags);
- failed_needupdate[1] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[1]].flags);
+ set_bit(STRIPE_HANDLE, &sh->state);
- PRINTK("sync: failed=%d num=%d,%d fnu=%u%u\n",
- failed, failed_num[0], failed_num[1], failed_needupdate[0], failed_needupdate[1]);
+ BUG_ON(failed>2);
+ BUG_ON(uptodate < disks);
+ /* Want to check and possibly repair P and Q.
+ * However there could be one 'failed' device, in which
+ * case we can only check one of them, possibly using the
+ * other to generate missing data
+ */
-#if 0 /* RAID-6: This code seems to require that CHECK_PARITY destroys the uptodateness of the parity */
- /* should be able to compute the missing block(s) and write to spare */
- if ( failed_needupdate[0] ^ failed_needupdate[1] ) {
- if (uptodate+1 != disks)
- BUG();
- compute_block_1(sh, failed_needupdate[0] ? failed_num[0] : failed_num[1]);
- uptodate++;
- } else if ( failed_needupdate[0] & failed_needupdate[1] ) {
- if (uptodate+2 != disks)
- BUG();
- compute_block_2(sh, failed_num[0], failed_num[1]);
- uptodate += 2;
+ /* If !tmp_page, we cannot do the calculations,
+ * but as we have set STRIPE_HANDLE, we will soon be called
+ * by stripe_handle with a tmp_page - just wait until then.
+ */
+ if (tmp_page) {
+ if (failed == q_failed) {
+ /* The only possible failed device holds 'Q', so it makes
+ * sense to check P (If anything else were failed, we would
+ * have used P to recreate it).
+ */
+ compute_block_1(sh, pd_idx, 1);
+ if (!page_is_zero(sh->dev[pd_idx].page)) {
+ compute_block_1(sh,pd_idx,0);
+ update_p = 1;
+ }
+ }
+ if (!q_failed && failed < 2) {
+ /* q is not failed, and we didn't use it to generate
+ * anything, so it makes sense to check it
+ */
+ memcpy(page_address(tmp_page),
+ page_address(sh->dev[qd_idx].page),
+ STRIPE_SIZE);
+ compute_parity(sh, UPDATE_PARITY);
+ if (memcmp(page_address(tmp_page),
+ page_address(sh->dev[qd_idx].page),
+ STRIPE_SIZE)!= 0) {
+ clear_bit(STRIPE_INSYNC, &sh->state);
+ update_q = 1;
+ }
+ }
+ if (update_p || update_q) {
+ conf->mddev->resync_mismatches += STRIPE_SECTORS;
+ if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
+ /* don't try to repair!! */
+ update_p = update_q = 0;
}
-#else
- compute_block_2(sh, failed_num[0], failed_num[1]);
- uptodate += failed_needupdate[0] + failed_needupdate[1];
-#endif
- if (uptodate != disks)
- BUG();
+ /* now write out any block on a failed drive,
+ * or P or Q if they need it
+ */
- PRINTK("Marking for sync stripe %llu blocks %d,%d\n",
- (unsigned long long)sh->sector, failed_num[0], failed_num[1]);
+ if (failed == 2) {
+ dev = &sh->dev[failed_num[1]];
+ locked++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
+ if (failed >= 1) {
+ dev = &sh->dev[failed_num[0]];
+ locked++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
- /**** FIX: Should we really do both of these unconditionally? ****/
- adev = &sh->dev[failed_num[0]];
- locked += !test_bit(R5_LOCKED, &adev->flags);
- set_bit(R5_LOCKED, &adev->flags);
- set_bit(R5_Wantwrite, &adev->flags);
- bdev = &sh->dev[failed_num[1]];
- locked += !test_bit(R5_LOCKED, &bdev->flags);
- set_bit(R5_LOCKED, &bdev->flags);
+ if (update_p) {
+ dev = &sh->dev[pd_idx];
+ locked ++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
+ if (update_q) {
+ dev = &sh->dev[qd_idx];
+ locked++;
+ set_bit(R5_LOCKED, &dev->flags);
+ set_bit(R5_Wantwrite, &dev->flags);
+ }
clear_bit(STRIPE_DEGRADED, &sh->state);
- set_bit(R5_Wantwrite, &bdev->flags);
set_bit(STRIPE_INSYNC, &sh->state);
- set_bit(R5_Syncio, &adev->flags);
- set_bit(R5_Syncio, &bdev->flags);
}
}
+
if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS,1);
clear_bit(STRIPE_SYNCING, &sh->state);
}
+ /* If the failed drives are just a ReadError, then we might need
+ * to progress the repair/check process
+ */
+ if (failed <= 2 && ! conf->mddev->ro)
+ for (i=0; i<failed;i++) {
+ dev = &sh->dev[failed_num[i]];
+ if (test_bit(R5_ReadError, &dev->flags)
+ && !test_bit(R5_LOCKED, &dev->flags)
+ && test_bit(R5_UPTODATE, &dev->flags)
+ ) {
+ if (!test_bit(R5_ReWrite, &dev->flags)) {
+ set_bit(R5_Wantwrite, &dev->flags);
+ set_bit(R5_ReWrite, &dev->flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ } else {
+ /* let's read it back */
+ set_bit(R5_Wantread, &dev->flags);
+ set_bit(R5_LOCKED, &dev->flags);
+ }
+ }
+ }
spin_unlock(&sh->lock);
while ((bi=return_bi)) {
@@ -1472,7 +1545,7 @@ static void handle_stripe(struct stripe_head *sh)
rcu_read_unlock();
if (rdev) {
- if (test_bit(R5_Syncio, &sh->dev[i].flags))
+ if (syncing)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
bi->bi_bdev = rdev->bdev;
@@ -1489,6 +1562,9 @@ static void handle_stripe(struct stripe_head *sh)
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
+ if (rw == WRITE &&
+ test_bit(R5_ReWrite, &sh->dev[i].flags))
+ atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
generic_make_request(bi);
} else {
if (rw == 1)
@@ -1664,7 +1740,7 @@ static int make_request (request_queue_t *q, struct bio * bi)
}
finish_wait(&conf->wait_for_overlap, &w);
raid6_plug_device(conf);
- handle_stripe(sh);
+ handle_stripe(sh, NULL);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
@@ -1728,6 +1804,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return rv;
}
if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
!conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
/* we can skip this block, and probably more */
sync_blocks /= STRIPE_SECTORS;
@@ -1765,7 +1842,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
clear_bit(STRIPE_INSYNC, &sh->state);
spin_unlock(&sh->lock);
- handle_stripe(sh);
+ handle_stripe(sh, NULL);
release_stripe(sh);
return STRIPE_SECTORS;
@@ -1821,7 +1898,7 @@ static void raid6d (mddev_t *mddev)
spin_unlock_irq(&conf->device_lock);
handled++;
- handle_stripe(sh);
+ handle_stripe(sh, conf->spare_page);
release_stripe(sh);
spin_lock_irq(&conf->device_lock);
@@ -1848,17 +1925,19 @@ static int run(mddev_t *mddev)
return -EIO;
}
- mddev->private = kmalloc (sizeof (raid6_conf_t)
- + mddev->raid_disks * sizeof(struct disk_info),
- GFP_KERNEL);
+ mddev->private = kzalloc(sizeof (raid6_conf_t)
+ + mddev->raid_disks * sizeof(struct disk_info),
+ GFP_KERNEL);
if ((conf = mddev->private) == NULL)
goto abort;
- memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) );
conf->mddev = mddev;
- if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL)
+ if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
+ goto abort;
+
+ conf->spare_page = alloc_page(GFP_KERNEL);
+ if (!conf->spare_page)
goto abort;
- memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
spin_lock_init(&conf->device_lock);
init_waitqueue_head(&conf->wait_for_stripe);
@@ -1929,13 +2008,18 @@ static int run(mddev_t *mddev)
goto abort;
}
-#if 0 /* FIX: For now */
if (mddev->degraded > 0 &&
mddev->recovery_cp != MaxSector) {
- printk(KERN_ERR "raid6: cannot start dirty degraded array for %s\n", mdname(mddev));
- goto abort;
+ if (mddev->ok_start_degraded)
+ printk(KERN_WARNING "raid6: starting dirty degraded array:%s"
+ "- data corruption possible.\n",
+ mdname(mddev));
+ else {
+ printk(KERN_ERR "raid6: cannot start dirty degraded array"
+ " for %s\n", mdname(mddev));
+ goto abort;
+ }
}
-#endif
{
mddev->thread = md_register_thread(raid6d, mddev, "%s_raid6");
@@ -1977,7 +2061,7 @@ static int run(mddev_t *mddev)
*/
{
int stripe = (mddev->raid_disks-2) * mddev->chunk_size
- / PAGE_CACHE_SIZE;
+ / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
}
@@ -1985,18 +2069,14 @@ static int run(mddev_t *mddev)
/* Ok, everything is just fine now */
mddev->array_size = mddev->size * (mddev->raid_disks - 2);
- if (mddev->bitmap)
- mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
-
mddev->queue->unplug_fn = raid6_unplug_device;
mddev->queue->issue_flush_fn = raid6_issue_flush;
return 0;
abort:
if (conf) {
print_raid6_conf(conf);
- if (conf->stripe_hashtbl)
- free_pages((unsigned long) conf->stripe_hashtbl,
- HASH_PAGES_ORDER);
+ safe_put_page(conf->spare_page);
+ kfree(conf->stripe_hashtbl);
kfree(conf);
}
mddev->private = NULL;
@@ -2013,7 +2093,7 @@ static int stop (mddev_t *mddev)
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
shrink_stripes(conf);
- free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER);
+ kfree(conf->stripe_hashtbl);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
kfree(conf);
mddev->private = NULL;
@@ -2040,12 +2120,13 @@ static void print_sh (struct seq_file *seq, struct stripe_head *sh)
static void printall (struct seq_file *seq, raid6_conf_t *conf)
{
struct stripe_head *sh;
+ struct hlist_node *hn;
int i;
spin_lock_irq(&conf->device_lock);
for (i = 0; i < NR_HASH; i++) {
sh = conf->stripe_hashtbl[i];
- for (; sh; sh = sh->hash_next) {
+ hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
if (sh->raid_conf != conf)
continue;
print_sh(seq, sh);
@@ -2223,17 +2304,12 @@ static void raid6_quiesce(mddev_t *mddev, int state)
spin_unlock_irq(&conf->device_lock);
break;
}
- if (mddev->thread) {
- if (mddev->bitmap)
- mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
- else
- mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- md_wakeup_thread(mddev->thread);
- }
}
-static mdk_personality_t raid6_personality=
+
+static struct mdk_personality raid6_personality =
{
.name = "raid6",
+ .level = 6,
.owner = THIS_MODULE,
.make_request = make_request,
.run = run,
@@ -2248,7 +2324,7 @@ static mdk_personality_t raid6_personality=
.quiesce = raid6_quiesce,
};
-static int __init raid6_init (void)
+static int __init raid6_init(void)
{
int e;
@@ -2256,15 +2332,17 @@ static int __init raid6_init (void)
if ( e )
return e;
- return register_md_personality (RAID6, &raid6_personality);
+ return register_md_personality(&raid6_personality);
}
static void raid6_exit (void)
{
- unregister_md_personality (RAID6);
+ unregister_md_personality(&raid6_personality);
}
module_init(raid6_init);
module_exit(raid6_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("md-personality-8"); /* RAID6 */
+MODULE_ALIAS("md-raid6");
+MODULE_ALIAS("md-level-6");
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index 2899d34e5f7..04c1938b9c9 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -109,10 +109,9 @@ static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
struct page *pg;
int i;
- sglist = kmalloc(sizeof(struct scatterlist)*nr_pages, GFP_KERNEL);
+ sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
- memset(sglist,0,sizeof(struct scatterlist)*nr_pages);
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
pg = vmalloc_to_page(virt);
if (NULL == pg)
@@ -306,15 +305,13 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
struct saa7146_dev *dev;
int err = -ENOMEM;
- dev = kmalloc(sizeof(struct saa7146_dev), GFP_KERNEL);
+ /* clear out mem for sure */
+ dev = kzalloc(sizeof(struct saa7146_dev), GFP_KERNEL);
if (!dev) {
ERR(("out of memory.\n"));
goto out;
}
- /* clear out mem for sure */
- memset(dev, 0x0, sizeof(struct saa7146_dev));
-
DEB_EE(("pci:%p\n",pci));
err = pci_enable_device(pci);
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 09ec964dec5..f8cf73ed49a 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -239,13 +239,12 @@ static int fops_open(struct inode *inode, struct file *file)
}
/* allocate per open data */
- fh = kmalloc(sizeof(*fh),GFP_KERNEL);
+ fh = kzalloc(sizeof(*fh),GFP_KERNEL);
if (NULL == fh) {
DEB_S(("cannot allocate memory for per open data.\n"));
result = -ENOMEM;
goto out;
}
- memset(fh,0,sizeof(*fh));
file->private_data = fh;
fh->dev = dev;
@@ -253,7 +252,10 @@ static int fops_open(struct inode *inode, struct file *file)
if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
DEB_S(("initializing vbi...\n"));
- result = saa7146_vbi_uops.open(dev,file);
+ if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
+ result = saa7146_vbi_uops.open(dev,file);
+ if (dev->ext_vv_data->vbi_fops.open)
+ dev->ext_vv_data->vbi_fops.open(inode, file);
} else {
DEB_S(("initializing video...\n"));
result = saa7146_video_uops.open(dev,file);
@@ -289,7 +291,10 @@ static int fops_release(struct inode *inode, struct file *file)
return -ERESTARTSYS;
if( fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
- saa7146_vbi_uops.release(dev,file);
+ if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
+ saa7146_vbi_uops.release(dev,file);
+ if (dev->ext_vv_data->vbi_fops.release)
+ dev->ext_vv_data->vbi_fops.release(inode, file);
} else {
saa7146_video_uops.release(dev,file);
}
@@ -332,6 +337,7 @@ static int fops_mmap(struct file *file, struct vm_area_struct * vma)
BUG();
return 0;
}
+
return videobuf_mmap_mapper(q,vma);
}
@@ -381,7 +387,10 @@ static ssize_t fops_read(struct file *file, char __user *data, size_t count, lof
}
case V4L2_BUF_TYPE_VBI_CAPTURE: {
// DEB_EE(("V4L2_BUF_TYPE_VBI_CAPTURE: file:%p, data:%p, count:%lu\n", file, data, (unsigned long)count));
- return saa7146_vbi_uops.read(file,data,count,ppos);
+ if (fh->dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
+ return saa7146_vbi_uops.read(file,data,count,ppos);
+ else
+ return -EINVAL;
}
break;
default:
@@ -390,12 +399,31 @@ static ssize_t fops_read(struct file *file, char __user *data, size_t count, lof
}
}
+static ssize_t fops_write(struct file *file, const char __user *data, size_t count, loff_t *ppos)
+{
+ struct saa7146_fh *fh = file->private_data;
+
+ switch (fh->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return -EINVAL;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (fh->dev->ext_vv_data->vbi_fops.write)
+ return fh->dev->ext_vv_data->vbi_fops.write(file, data, count, ppos);
+ else
+ return -EINVAL;
+ default:
+ BUG();
+ return -EINVAL;
+ }
+}
+
static struct file_operations video_fops =
{
.owner = THIS_MODULE,
.open = fops_open,
.release = fops_release,
.read = fops_read,
+ .write = fops_write,
.poll = fops_poll,
.mmap = fops_mmap,
.ioctl = fops_ioctl,
@@ -435,12 +463,11 @@ static struct video_device device_template =
int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
{
- struct saa7146_vv *vv = kmalloc (sizeof(struct saa7146_vv),GFP_KERNEL);
+ struct saa7146_vv *vv = kzalloc (sizeof(struct saa7146_vv),GFP_KERNEL);
if( NULL == vv ) {
ERR(("out of memory. aborting.\n"));
return -1;
}
- memset(vv, 0x0, sizeof(*vv));
DEB_EE(("dev:%p\n",dev));
@@ -467,7 +494,8 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
memset(vv->d_clipping.cpu_addr, 0x0, SAA7146_CLIPPING_MEM);
saa7146_video_uops.init(dev,vv);
- saa7146_vbi_uops.init(dev,vv);
+ if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
+ saa7146_vbi_uops.init(dev,vv);
dev->vv_data = vv;
dev->vv_callback = &vv_callback;
diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
index ec52dff8cb6..33bec8a6843 100644
--- a/drivers/media/common/saa7146_hlp.c
+++ b/drivers/media/common/saa7146_hlp.c
@@ -562,19 +562,26 @@ static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int
int b_depth = vv->ov_fmt->depth;
int b_bpl = vv->ov_fb.fmt.bytesperline;
- u32 base = (u32)vv->ov_fb.base;
+ /* The unsigned long cast is to remove a 64-bit compile warning since
+ it looks like a 64-bit address is cast to a 32-bit value, even
+ though the base pointer is really a 32-bit physical address that
+ goes into a 32-bit DMA register.
+ FIXME: might not work on some 64-bit platforms, but see the FIXME
+ in struct v4l2_framebuffer (videodev2.h) for that.
+ */
+ u32 base = (u32)(unsigned long)vv->ov_fb.base;
struct saa7146_video_dma vdma1;
/* calculate memory offsets for picture, look if we shall top-down-flip */
vdma1.pitch = 2*b_bpl;
if ( 0 == vv->vflip ) {
- vdma1.base_even = (u32)base + (w_y * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
+ vdma1.base_even = base + (w_y * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
vdma1.base_odd = vdma1.base_even + (vdma1.pitch / 2);
vdma1.prot_addr = vdma1.base_even + (w_height * (vdma1.pitch / 2));
}
else {
- vdma1.base_even = (u32)base + ((w_y+w_height) * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
+ vdma1.base_even = base + ((w_y+w_height) * (vdma1.pitch/2)) + (w_x * (b_depth / 8));
vdma1.base_odd = vdma1.base_even - (vdma1.pitch / 2);
vdma1.prot_addr = vdma1.base_odd - (w_height * (vdma1.pitch / 2));
}
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index 063986ec16b..468d3c95907 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -500,9 +500,9 @@ static ssize_t vbi_read(struct file *file, char __user *data, size_t count, loff
}
struct saa7146_use_ops saa7146_vbi_uops = {
- .init = vbi_init,
- .open = vbi_open,
+ .init = vbi_init,
+ .open = vbi_open,
.release = vbi_close,
.irq_done = vbi_irq_done,
- .read = vbi_read,
+ .read = vbi_read,
};
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 1d961023b83..7ebac7949df 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -151,8 +151,8 @@ static int try_win(struct saa7146_dev *dev, struct v4l2_window *win)
if (V4L2_FIELD_ANY == field) {
field = (win->w.height > maxh/2)
- ? V4L2_FIELD_INTERLACED
- : V4L2_FIELD_TOP;
+ ? V4L2_FIELD_INTERLACED
+ : V4L2_FIELD_TOP;
}
switch (field) {
case V4L2_FIELD_TOP:
@@ -1114,10 +1114,6 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
return 0;
}
case VIDIOC_OVERLAY:
-
-
-
-
{
int on = *(int *)arg;
int err = 0;
@@ -1359,7 +1355,6 @@ static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
saa7146_buffer_queue(fh->dev,&vv->video_q,buf);
}
-
static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
struct file *file = q->priv_data;
diff --git a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
index 21a9045b3ef..0b940e152b7 100644
--- a/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
+++ b/drivers/media/dvb/b2c2/flexcop-fe-tuner.c
@@ -298,7 +298,7 @@ static int flexcop_fe_request_firmware(struct dvb_frontend* fe, const struct fir
}
static int lgdt3303_pll_set(struct dvb_frontend* fe,
- struct dvb_frontend_parameters* params)
+ struct dvb_frontend_parameters* params)
{
struct flexcop_device *fc = fe->dvb->priv;
u8 buf[4];
@@ -485,12 +485,16 @@ static struct stv0297_config alps_tdee4_stv0297_config = {
/* try to figure out the frontend, each card/box can have on of the following list */
int flexcop_frontend_init(struct flexcop_device *fc)
{
+ struct dvb_frontend_ops *ops;
+
/* try the sky v2.6 (stv0299/Samsung tbmu24112(sl1935)) */
if ((fc->fe = stv0299_attach(&samsung_tbmu24112_config, &fc->i2c_adap)) != NULL) {
- fc->fe->ops->set_voltage = flexcop_set_voltage;
+ ops = fc->fe->ops;
+
+ ops->set_voltage = flexcop_set_voltage;
- fc->fe_sleep = fc->fe->ops->sleep;
- fc->fe->ops->sleep = flexcop_sleep;
+ fc->fe_sleep = ops->sleep;
+ ops->sleep = flexcop_sleep;
fc->dev_type = FC_SKY;
info("found the stv0299 at i2c address: 0x%02x",samsung_tbmu24112_config.demod_address);
@@ -522,15 +526,17 @@ int flexcop_frontend_init(struct flexcop_device *fc)
} else
/* try the sky v2.3 (vp310/Samsung tbdu18132(tsa5059)) */
if ((fc->fe = vp310_attach(&skystar23_samsung_tbdu18132_config, &fc->i2c_adap)) != NULL) {
- fc->fe->ops->diseqc_send_master_cmd = flexcop_diseqc_send_master_cmd;
- fc->fe->ops->diseqc_send_burst = flexcop_diseqc_send_burst;
- fc->fe->ops->set_tone = flexcop_set_tone;
- fc->fe->ops->set_voltage = flexcop_set_voltage;
+ ops = fc->fe->ops;
+
+ ops->diseqc_send_master_cmd = flexcop_diseqc_send_master_cmd;
+ ops->diseqc_send_burst = flexcop_diseqc_send_burst;
+ ops->set_tone = flexcop_set_tone;
+ ops->set_voltage = flexcop_set_voltage;
- fc->fe_sleep = fc->fe->ops->sleep;
- fc->fe->ops->sleep = flexcop_sleep;
+ fc->fe_sleep = ops->sleep;
+ ops->sleep = flexcop_sleep;
- fc->dev_type = FC_SKY_OLD;
+ fc->dev_type = FC_SKY_OLD;
info("found the vp310 (aka mt312) at i2c address: 0x%02x",skystar23_samsung_tbdu18132_config.demod_address);
}
@@ -540,8 +546,9 @@ int flexcop_frontend_init(struct flexcop_device *fc)
} else {
if (dvb_register_frontend(&fc->dvb_adapter, fc->fe)) {
err("frontend registration failed!");
- if (fc->fe->ops->release != NULL)
- fc->fe->ops->release(fc->fe);
+ ops = fc->fe->ops;
+ if (ops->release != NULL)
+ ops->release(fc->fe);
fc->fe = NULL;
return -EINVAL;
}
diff --git a/drivers/media/dvb/b2c2/flexcop-reg.h b/drivers/media/dvb/b2c2/flexcop-reg.h
index 23cc6431e2b..3153f9513c6 100644
--- a/drivers/media/dvb/b2c2/flexcop-reg.h
+++ b/drivers/media/dvb/b2c2/flexcop-reg.h
@@ -39,11 +39,13 @@ extern const char *flexcop_device_names[];
/* FlexCop IBI Registers */
#if defined(__LITTLE_ENDIAN)
#include "flexcop_ibi_value_le.h"
-#elif defined(__BIG_ENDIAN)
+#else
+#if defined(__BIG_ENDIAN)
#include "flexcop_ibi_value_be.h"
#else
#error no endian defined
#endif
+#endif
#define fc_data_Tag_ID_DVB 0x3e
#define fc_data_Tag_ID_ATSC 0x3f
diff --git a/drivers/media/dvb/b2c2/flexcop.c b/drivers/media/dvb/b2c2/flexcop.c
index 123ed96f6fa..56ba5247067 100644
--- a/drivers/media/dvb/b2c2/flexcop.c
+++ b/drivers/media/dvb/b2c2/flexcop.c
@@ -220,20 +220,18 @@ EXPORT_SYMBOL(flexcop_reset_block_300);
struct flexcop_device *flexcop_device_kmalloc(size_t bus_specific_len)
{
void *bus;
- struct flexcop_device *fc = kmalloc(sizeof(struct flexcop_device), GFP_KERNEL);
+ struct flexcop_device *fc = kzalloc(sizeof(struct flexcop_device), GFP_KERNEL);
if (!fc) {
err("no memory");
return NULL;
}
- memset(fc, 0, sizeof(struct flexcop_device));
- bus = kmalloc(bus_specific_len, GFP_KERNEL);
+ bus = kzalloc(bus_specific_len, GFP_KERNEL);
if (!bus) {
err("no memory");
kfree(fc);
return NULL;
}
- memset(bus, 0, bus_specific_len);
fc->bus_specific = bus;
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index 8977c7a313d..3a2ff1cc24b 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -1341,30 +1341,40 @@ static int dst_read_snr(struct dvb_frontend *fe, u16 *snr)
return 0;
}
-static int dst_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p)
+static int dst_set_frontend(struct dvb_frontend* fe,
+ struct dvb_frontend_parameters* p,
+ unsigned int mode_flags,
+ int *delay,
+ fe_status_t *status)
{
struct dst_state *state = fe->demodulator_priv;
- dst_set_freq(state, p->frequency);
- dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency);
+ if (p != NULL) {
+ dst_set_freq(state, p->frequency);
+ dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency);
- if (state->dst_type == DST_TYPE_IS_SAT) {
- if (state->type_flags & DST_TYPE_HAS_OBS_REGS)
- dst_set_inversion(state, p->inversion);
- dst_set_fec(state, p->u.qpsk.fec_inner);
- dst_set_symbolrate(state, p->u.qpsk.symbol_rate);
- dst_set_polarization(state);
- dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->u.qpsk.symbol_rate);
-
- } else if (state->dst_type == DST_TYPE_IS_TERR)
- dst_set_bandwidth(state, p->u.ofdm.bandwidth);
- else if (state->dst_type == DST_TYPE_IS_CABLE) {
- dst_set_fec(state, p->u.qam.fec_inner);
- dst_set_symbolrate(state, p->u.qam.symbol_rate);
- dst_set_modulation(state, p->u.qam.modulation);
+ if (state->dst_type == DST_TYPE_IS_SAT) {
+ if (state->type_flags & DST_TYPE_HAS_OBS_REGS)
+ dst_set_inversion(state, p->inversion);
+ dst_set_fec(state, p->u.qpsk.fec_inner);
+ dst_set_symbolrate(state, p->u.qpsk.symbol_rate);
+ dst_set_polarization(state);
+ dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->u.qpsk.symbol_rate);
+
+ } else if (state->dst_type == DST_TYPE_IS_TERR)
+ dst_set_bandwidth(state, p->u.ofdm.bandwidth);
+ else if (state->dst_type == DST_TYPE_IS_CABLE) {
+ dst_set_fec(state, p->u.qam.fec_inner);
+ dst_set_symbolrate(state, p->u.qam.symbol_rate);
+ dst_set_modulation(state, p->u.qam.modulation);
+ }
+ dst_write_tuna(fe);
}
- dst_write_tuna(fe);
+ if (!(mode_flags & FE_TUNE_MODE_ONESHOT))
+ dst_read_status(fe, status);
+
+ *delay = HZ/10;
return 0;
}
@@ -1445,7 +1455,7 @@ static struct dvb_frontend_ops dst_dvbt_ops = {
.release = dst_release,
.init = dst_init,
- .set_frontend = dst_set_frontend,
+ .tune = dst_set_frontend,
.get_frontend = dst_get_frontend,
.read_status = dst_read_status,
.read_signal_strength = dst_read_signal_strength,
@@ -1469,7 +1479,7 @@ static struct dvb_frontend_ops dst_dvbs_ops = {
.release = dst_release,
.init = dst_init,
- .set_frontend = dst_set_frontend,
+ .tune = dst_set_frontend,
.get_frontend = dst_get_frontend,
.read_status = dst_read_status,
.read_signal_strength = dst_read_signal_strength,
@@ -1496,7 +1506,7 @@ static struct dvb_frontend_ops dst_dvbc_ops = {
.release = dst_release,
.init = dst_init,
- .set_frontend = dst_set_frontend,
+ .tune = dst_set_frontend,
.get_frontend = dst_get_frontend,
.read_status = dst_read_status,
.read_signal_strength = dst_read_signal_strength,
diff --git a/drivers/media/dvb/bt8xx/dst_ca.c b/drivers/media/dvb/bt8xx/dst_ca.c
index 2239651969c..c650b4bf7f5 100644
--- a/drivers/media/dvb/bt8xx/dst_ca.c
+++ b/drivers/media/dvb/bt8xx/dst_ca.c
@@ -283,16 +283,17 @@ static int handle_dst_tag(struct dst_state *state, struct ca_msg *p_ca_message,
hw_buffer->msg[4] = 0x03;
hw_buffer->msg[5] = length & 0xff;
hw_buffer->msg[6] = 0x00;
+
/*
* Need to compute length for EN50221 section 8.3.2, for the time being
* assuming 8.3.2 is not applicable
*/
memcpy(&hw_buffer->msg[7], &p_ca_message->msg[4], length);
}
+
return 0;
}
-
static int write_to_8820(struct dst_state *state, struct ca_msg *hw_buffer, u8 length, u8 reply)
{
if ((dst_put_ci(state, hw_buffer->msg, length, hw_buffer->msg, reply)) < 0) {
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.c b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
index 77977e9c013..f65f64b00ff 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
@@ -600,7 +600,6 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
struct dst_state* state = NULL;
switch(type) {
-#ifdef BTTV_BOARD_DVICO_DVBT_LITE
case BTTV_BOARD_DVICO_DVBT_LITE:
card->fe = mt352_attach(&thomson_dtt7579_config, card->i2c_adapter);
if (card->fe != NULL) {
@@ -608,22 +607,15 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
card->fe->ops->info.frequency_max = 862000000;
}
break;
-#endif
-#ifdef BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE
case BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE:
lgdt330x_reset(card);
card->fe = lgdt330x_attach(&tdvs_tua6034_config, card->i2c_adapter);
if (card->fe != NULL)
dprintk ("dvb_bt8xx: lgdt330x detected\n");
break;
-#endif
-#ifdef BTTV_BOARD_TWINHAN_VP3021
- case BTTV_BOARD_TWINHAN_VP3021:
-#else
case BTTV_BOARD_NEBULA_DIGITV:
-#endif
/*
* It is possible to determine the correct frontend using the I2C bus (see the Nebula SDK);
* this would be a cleaner solution than trying each frontend in turn.
@@ -794,10 +786,9 @@ static int dvb_bt8xx_probe(struct device *dev)
struct pci_dev* bttv_pci_dev;
int ret;
- if (!(card = kmalloc(sizeof(struct dvb_bt8xx_card), GFP_KERNEL)))
+ if (!(card = kzalloc(sizeof(struct dvb_bt8xx_card), GFP_KERNEL)))
return -ENOMEM;
- memset(card, 0, sizeof(*card));
init_MUTEX(&card->lock);
card->bttv_nr = sub->core->nr;
strncpy(card->card_name, sub->core->name, sizeof(sub->core->name));
@@ -812,9 +803,7 @@ static int dvb_bt8xx_probe(struct device *dev)
card->irq_err_ignore = 0;
break;
-#ifdef BTTV_BOARD_DVICO_DVBT_LITE
case BTTV_BOARD_DVICO_DVBT_LITE:
-#endif
card->gpio_mode = 0x0400C060;
card->op_sync_orin = 0;
card->irq_err_ignore = 0;
@@ -823,19 +812,13 @@ static int dvb_bt8xx_probe(struct device *dev)
* DA_APP(parallel) */
break;
-#ifdef BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE
case BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE:
-#endif
card->gpio_mode = 0x0400c060;
card->op_sync_orin = BT878_RISC_SYNC_MASK;
card->irq_err_ignore = BT878_AFBUS | BT878_AFDSR;
break;
-#ifdef BTTV_BOARD_TWINHAN_VP3021
- case BTTV_BOARD_TWINHAN_VP3021:
-#else
case BTTV_BOARD_NEBULA_DIGITV:
-#endif
case BTTV_BOARD_AVDVBT_761:
card->gpio_mode = (1 << 26) | (1 << 14) | (1 << 5);
card->op_sync_orin = 0;
diff --git a/drivers/media/dvb/cinergyT2/Kconfig b/drivers/media/dvb/cinergyT2/Kconfig
index 7cf4c4a888e..6018fcdba1e 100644
--- a/drivers/media/dvb/cinergyT2/Kconfig
+++ b/drivers/media/dvb/cinergyT2/Kconfig
@@ -21,35 +21,35 @@ config DVB_CINERGYT2_TUNING
config DVB_CINERGYT2_STREAM_URB_COUNT
int "Number of queued USB Request Blocks for Highspeed Stream Transfers"
depends on DVB_CINERGYT2_TUNING
- default "32"
+ default "32"
help
USB Request Blocks for Highspeed Stream transfers are scheduled in
a queue for the Host Controller.
Usually the default value is a safe choice.
- You may increase this number if you are using this device in a
- Server Environment with many high-traffic USB Highspeed devices
+ You may increase this number if you are using this device in a
+ Server Environment with many high-traffic USB Highspeed devices
sharing the same USB bus.
config DVB_CINERGYT2_STREAM_BUF_SIZE
int "Size of URB Stream Buffers for Highspeed Transfers"
depends on DVB_CINERGYT2_TUNING
- default "512"
+ default "512"
help
Should be a multiple of native buffer size of 512 bytes.
Default value is a safe choice.
- You may increase this number if you are using this device in a
- Server Environment with many high-traffic USB Highspeed devices
+ You may increase this number if you are using this device in a
+ Server Environment with many high-traffic USB Highspeed devices
sharing the same USB bus.
config DVB_CINERGYT2_QUERY_INTERVAL
int "Status update interval [milliseconds]"
depends on DVB_CINERGYT2_TUNING
- default "250"
+ default "250"
help
This is the interval for status readouts from the demodulator.
You may try lower values if you need more responsive signal quality
@@ -64,9 +64,9 @@ config DVB_CINERGYT2_QUERY_INTERVAL
config DVB_CINERGYT2_ENABLE_RC_INPUT_DEVICE
bool "Register the onboard IR Remote Control Receiver as Input Device"
depends on DVB_CINERGYT2_TUNING
- default "yes"
+ default "yes"
help
- Enable this option if you want to use the onboard Infrared Remote
+ Enable this option if you want to use the onboard Infrared Remote
Control Receiver as Linux-Input device.
Right now only the keycode table for the default Remote Control
@@ -77,7 +77,7 @@ config DVB_CINERGYT2_ENABLE_RC_INPUT_DEVICE
config DVB_CINERGYT2_RC_QUERY_INTERVAL
int "Infrared Remote Controller update interval [milliseconds]"
depends on DVB_CINERGYT2_TUNING && DVB_CINERGYT2_ENABLE_RC_INPUT_DEVICE
- default "50"
+ default "50"
help
If you have a very fast-repeating remote control you can try lower
values, for normal consumer receivers the default value should be
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c
index b996fb59b7e..c4b4c5b6b7c 100644
--- a/drivers/media/dvb/cinergyT2/cinergyT2.c
+++ b/drivers/media/dvb/cinergyT2/cinergyT2.c
@@ -60,7 +60,7 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
#define dprintk(level, args...) \
do { \
if ((debug & level)) { \
- printk("%s: %s(): ", __stringify(KBUILD_MODNAME), \
+ printk("%s: %s(): ", KBUILD_MODNAME, \
__FUNCTION__); \
printk(args); } \
} while (0)
@@ -131,6 +131,8 @@ struct cinergyt2 {
wait_queue_head_t poll_wq;
int pending_fe_events;
+ int disconnect_pending;
+ atomic_t inuse;
void *streambuf;
dma_addr_t streambuf_dmahandle;
@@ -343,7 +345,7 @@ static int cinergyt2_start_feed(struct dvb_demux_feed *dvbdmxfeed)
struct dvb_demux *demux = dvbdmxfeed->demux;
struct cinergyt2 *cinergyt2 = demux->priv;
- if (down_interruptible(&cinergyt2->sem))
+ if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem))
return -ERESTARTSYS;
if (cinergyt2->streaming == 0)
@@ -359,7 +361,7 @@ static int cinergyt2_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
struct dvb_demux *demux = dvbdmxfeed->demux;
struct cinergyt2 *cinergyt2 = demux->priv;
- if (down_interruptible(&cinergyt2->sem))
+ if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem))
return -ERESTARTSYS;
if (--cinergyt2->streaming == 0)
@@ -479,23 +481,37 @@ static int cinergyt2_open (struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
struct cinergyt2 *cinergyt2 = dvbdev->priv;
- int err;
+ int err = -ERESTARTSYS;
- if ((err = dvb_generic_open(inode, file)))
+ if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem))
+ return -ERESTARTSYS;
+
+ if ((err = dvb_generic_open(inode, file))) {
+ up(&cinergyt2->sem);
return err;
+ }
- if (down_interruptible(&cinergyt2->sem))
- return -ERESTARTSYS;
if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
cinergyt2_sleep(cinergyt2, 0);
schedule_delayed_work(&cinergyt2->query_work, HZ/2);
}
+ atomic_inc(&cinergyt2->inuse);
+
up(&cinergyt2->sem);
return 0;
}
+static void cinergyt2_unregister(struct cinergyt2 *cinergyt2)
+{
+ dvb_unregister_device(cinergyt2->fedev);
+ dvb_unregister_adapter(&cinergyt2->adapter);
+
+ cinergyt2_free_stream_urbs(cinergyt2);
+ kfree(cinergyt2);
+}
+
static int cinergyt2_release (struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
@@ -504,7 +520,7 @@ static int cinergyt2_release (struct inode *inode, struct file *file)
if (down_interruptible(&cinergyt2->sem))
return -ERESTARTSYS;
- if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
+ if (!cinergyt2->disconnect_pending && (file->f_flags & O_ACCMODE) != O_RDONLY) {
cancel_delayed_work(&cinergyt2->query_work);
flush_scheduled_work();
cinergyt2_sleep(cinergyt2, 1);
@@ -512,6 +528,11 @@ static int cinergyt2_release (struct inode *inode, struct file *file)
up(&cinergyt2->sem);
+ if (atomic_dec_and_test(&cinergyt2->inuse) && cinergyt2->disconnect_pending) {
+ warn("delayed unregister in release");
+ cinergyt2_unregister(cinergyt2);
+ }
+
return dvb_generic_release(inode, file);
}
@@ -519,7 +540,14 @@ static unsigned int cinergyt2_poll (struct file *file, struct poll_table_struct
{
struct dvb_device *dvbdev = file->private_data;
struct cinergyt2 *cinergyt2 = dvbdev->priv;
+
+ if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem))
+ return -ERESTARTSYS;
+
poll_wait(file, &cinergyt2->poll_wq, wait);
+
+ up(&cinergyt2->sem);
+
return (POLLIN | POLLRDNORM | POLLPRI);
}
@@ -564,10 +592,15 @@ static int cinergyt2_ioctl (struct inode *inode, struct file *file,
(__u16 __user *) arg);
case FE_READ_UNCORRECTED_BLOCKS:
- /* UNC are already converted to host byte order... */
- return put_user(stat->uncorrected_block_count,
- (__u32 __user *) arg);
+ {
+ uint32_t unc_count;
+
+ unc_count = stat->uncorrected_block_count;
+ stat->uncorrected_block_count = 0;
+ /* UNC are already converted to host byte order... */
+ return put_user(unc_count,(__u32 __user *) arg);
+ }
case FE_SET_FRONTEND:
{
struct dvbt_set_parameters_msg *param = &cinergyt2->param;
@@ -580,7 +613,7 @@ static int cinergyt2_ioctl (struct inode *inode, struct file *file,
if (copy_from_user(&p, (void __user*) arg, sizeof(p)))
return -EFAULT;
- if (down_interruptible(&cinergyt2->sem))
+ if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem))
return -ERESTARTSYS;
param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
@@ -691,7 +724,7 @@ static void cinergyt2_query_rc (void *data)
struct cinergyt2_rc_event rc_events[12];
int n, len, i;
- if (down_interruptible(&cinergyt2->sem))
+ if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem))
return;
len = cinergyt2_command(cinergyt2, buf, sizeof(buf),
@@ -786,7 +819,6 @@ static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2)
static void cinergyt2_unregister_rc(struct cinergyt2 *cinergyt2)
{
cancel_delayed_work(&cinergyt2->rc_query_work);
- flush_scheduled_work();
input_unregister_device(cinergyt2->rc_input_dev);
}
@@ -817,7 +849,7 @@ static void cinergyt2_query (void *data)
uint8_t lock_bits;
uint32_t unc;
- if (down_interruptible(&cinergyt2->sem))
+ if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem))
return;
unc = s->uncorrected_block_count;
@@ -917,28 +949,25 @@ static void cinergyt2_disconnect (struct usb_interface *intf)
{
struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf);
- if (down_interruptible(&cinergyt2->sem))
- return;
+ flush_scheduled_work();
cinergyt2_unregister_rc(cinergyt2);
+ cancel_delayed_work(&cinergyt2->query_work);
+ wake_up_interruptible(&cinergyt2->poll_wq);
+
cinergyt2->demux.dmx.close(&cinergyt2->demux.dmx);
- dvb_net_release(&cinergyt2->dvbnet);
- dvb_dmxdev_release(&cinergyt2->dmxdev);
- dvb_dmx_release(&cinergyt2->demux);
- dvb_unregister_device(cinergyt2->fedev);
- dvb_unregister_adapter(&cinergyt2->adapter);
+ cinergyt2->disconnect_pending = 1;
- cinergyt2_free_stream_urbs(cinergyt2);
- up(&cinergyt2->sem);
- kfree(cinergyt2);
+ if (!atomic_read(&cinergyt2->inuse))
+ cinergyt2_unregister(cinergyt2);
}
static int cinergyt2_suspend (struct usb_interface *intf, pm_message_t state)
{
struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf);
- if (down_interruptible(&cinergyt2->sem))
+ if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem))
return -ERESTARTSYS;
if (state.event > PM_EVENT_ON) {
@@ -961,7 +990,7 @@ static int cinergyt2_resume (struct usb_interface *intf)
struct cinergyt2 *cinergyt2 = usb_get_intfdata (intf);
struct dvbt_set_parameters_msg *param = &cinergyt2->param;
- if (down_interruptible(&cinergyt2->sem))
+ if (cinergyt2->disconnect_pending || down_interruptible(&cinergyt2->sem))
return -ERESTARTSYS;
if (!cinergyt2->sleeping) {
@@ -1014,4 +1043,3 @@ module_exit (cinergyt2_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Holger Waechtler, Daniel Mack");
-
diff --git a/drivers/media/dvb/dvb-core/Kconfig b/drivers/media/dvb/dvb-core/Kconfig
index a9a7b342104..12ee912a570 100644
--- a/drivers/media/dvb/dvb-core/Kconfig
+++ b/drivers/media/dvb/dvb-core/Kconfig
@@ -5,7 +5,7 @@ config DVB_CORE
help
DVB core utility functions for device handling, software fallbacks etc.
Say Y when you have a DVB card and want to use it. Say Y if your want
- to build your drivers outside the kernel, but need the DVB core. All
+ to build your drivers outside the kernel, but need the DVB core. All
in-kernel drivers will select this automatically if needed.
If unsure say N.
diff --git a/drivers/media/dvb/dvb-core/Makefile b/drivers/media/dvb/dvb-core/Makefile
index c6baac20f52..7adb50c1e8e 100644
--- a/drivers/media/dvb/dvb-core/Makefile
+++ b/drivers/media/dvb/dvb-core/Makefile
@@ -3,7 +3,7 @@
#
dvb-core-objs = dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o \
- dvb_ca_en50221.o dvb_frontend.o \
+ dvb_ca_en50221.o dvb_frontend.o \
dvb_net.o dvb_ringbuffer.o
obj-$(CONFIG_DVB_CORE) += dvb-core.o
diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
index 5956c35d34a..00347a75068 100644
--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
@@ -1649,21 +1649,17 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
return -EINVAL;
/* initialise the system data */
- if ((ca =
- (struct dvb_ca_private *) kmalloc(sizeof(struct dvb_ca_private),
- GFP_KERNEL)) == NULL) {
+ if ((ca = kzalloc(sizeof(struct dvb_ca_private), GFP_KERNEL)) == NULL) {
ret = -ENOMEM;
goto error;
}
- memset(ca, 0, sizeof(struct dvb_ca_private));
ca->pub = pubca;
ca->flags = flags;
ca->slot_count = slot_count;
- if ((ca->slot_info = kmalloc(sizeof(struct dvb_ca_slot) * slot_count, GFP_KERNEL)) == NULL) {
+ if ((ca->slot_info = kcalloc(slot_count, sizeof(struct dvb_ca_slot), GFP_KERNEL)) == NULL) {
ret = -ENOMEM;
goto error;
}
- memset(ca->slot_info, 0, sizeof(struct dvb_ca_slot) * slot_count);
init_waitqueue_head(&ca->wait_queue);
ca->thread_pid = 0;
init_waitqueue_head(&ca->thread_queue);
@@ -1745,9 +1741,7 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
for (i = 0; i < ca->slot_count; i++) {
dvb_ca_en50221_slot_shutdown(ca, i);
- if (ca->slot_info[i].rx_buffer.data != NULL) {
- vfree(ca->slot_info[i].rx_buffer.data);
- }
+ vfree(ca->slot_info[i].rx_buffer.data);
}
kfree(ca->slot_info);
dvb_unregister_device(ca->dvbdev);
diff --git a/drivers/media/dvb/dvb-core/dvb_filter.c b/drivers/media/dvb/dvb-core/dvb_filter.c
index c49fd0bd718..772003fb182 100644
--- a/drivers/media/dvb/dvb-core/dvb_filter.c
+++ b/drivers/media/dvb/dvb-core/dvb_filter.c
@@ -409,16 +409,16 @@ static u8 *skip_pes_header(u8 **bufp)
if ((inbuf[6] & 0xc0) == 0x80){ /* mpeg2 */
if (buf[7] & PTS_ONLY)
- pts = buf+9;
+ pts = buf+9;
else pts = NULL;
buf = inbuf + 9 + inbuf[8];
} else { /* mpeg1 */
for (buf = inbuf + 6; *buf == 0xff; buf++)
- if (buf == inbuf + 6 + 16) {
- break;
- }
+ if (buf == inbuf + 6 + 16) {
+ break;
+ }
if ((*buf & 0xc0) == 0x40)
- buf += 2;
+ buf += 2;
skip = mpeg1_skip_table [*buf >> 4];
if (skip == 5 || skip == 10) pts = buf;
else pts = NULL;
@@ -529,9 +529,9 @@ static void init_mpg_picture( struct mpg_picture *pic, int chan, int32_t field_t
pic->picture_header = 0;
pic->sequence_header_data
= ( INIT_HORIZONTAL_SIZE << 20 )
- | ( INIT_VERTICAL_SIZE << 8 )
- | ( INIT_ASPECT_RATIO << 4 )
- | ( INIT_FRAME_RATE );
+ | ( INIT_VERTICAL_SIZE << 8 )
+ | ( INIT_ASPECT_RATIO << 4 )
+ | ( INIT_FRAME_RATE );
pic->mpeg1_flag = 0;
pic->vinfo.horizontal_size
= INIT_DISP_HORIZONTAL_SIZE;
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 95ea5095e07..771f32d889e 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -92,6 +92,7 @@ static DECLARE_MUTEX(frontend_mutex);
struct dvb_frontend_private {
+ /* thread/frontend values */
struct dvb_device *dvbdev;
struct dvb_frontend_parameters parameters;
struct dvb_fe_events events;
@@ -100,20 +101,25 @@ struct dvb_frontend_private {
wait_queue_head_t wait_queue;
pid_t thread_pid;
unsigned long release_jiffies;
- int state;
- int bending;
- int lnb_drift;
- int inversion;
- int auto_step;
- int auto_sub_step;
- int started_auto_step;
- int min_delay;
- int max_drift;
- int step_size;
- int exit;
- int wakeup;
+ unsigned int exit;
+ unsigned int wakeup;
fe_status_t status;
- fe_sec_tone_mode_t tone;
+ unsigned long tune_mode_flags;
+ unsigned int delay;
+
+ /* swzigzag values */
+ unsigned int state;
+ unsigned int bending;
+ int lnb_drift;
+ unsigned int inversion;
+ unsigned int auto_step;
+ unsigned int auto_sub_step;
+ unsigned int started_auto_step;
+ unsigned int min_delay;
+ unsigned int max_drift;
+ unsigned int step_size;
+ int quality;
+ unsigned int check_wrapped;
};
@@ -208,21 +214,21 @@ static void dvb_frontend_init(struct dvb_frontend *fe)
fe->ops->init(fe);
}
-static void update_delay(int *quality, int *delay, int min_delay, int locked)
+static void dvb_frontend_swzigzag_update_delay(struct dvb_frontend_private *fepriv, int locked)
{
- int q2;
+ int q2;
- dprintk ("%s\n", __FUNCTION__);
+ dprintk ("%s\n", __FUNCTION__);
- if (locked)
- (*quality) = (*quality * 220 + 36*256) / 256;
- else
- (*quality) = (*quality * 220 + 0) / 256;
+ if (locked)
+ (fepriv->quality) = (fepriv->quality * 220 + 36*256) / 256;
+ else
+ (fepriv->quality) = (fepriv->quality * 220 + 0) / 256;
- q2 = *quality - 128;
- q2 *= q2;
+ q2 = fepriv->quality - 128;
+ q2 *= q2;
- *delay = min_delay + q2 * HZ / (128*128);
+ fepriv->delay = fepriv->min_delay + q2 * HZ / (128*128);
}
/**
@@ -232,7 +238,7 @@ static void update_delay(int *quality, int *delay, int min_delay, int locked)
* @param check_wrapped Checks if an iteration has completed. DO NOT SET ON THE FIRST ATTEMPT
* @returns Number of complete iterations that have been performed.
*/
-static int dvb_frontend_autotune(struct dvb_frontend *fe, int check_wrapped)
+static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wrapped)
{
int autoinversion;
int ready = 0;
@@ -321,6 +327,129 @@ static int dvb_frontend_autotune(struct dvb_frontend *fe, int check_wrapped)
return 0;
}
+static void dvb_frontend_swzigzag(struct dvb_frontend *fe)
+{
+ fe_status_t s;
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
+
+ /* if we've got no parameters, just keep idling */
+ if (fepriv->state & FESTATE_IDLE) {
+ fepriv->delay = 3*HZ;
+ fepriv->quality = 0;
+ return;
+ }
+
+ /* in SCAN mode, we just set the frontend when asked and leave it alone */
+ if (fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT) {
+ if (fepriv->state & FESTATE_RETUNE) {
+ if (fe->ops->set_frontend)
+ fe->ops->set_frontend(fe, &fepriv->parameters);
+ fepriv->state = FESTATE_TUNED;
+ }
+ fepriv->delay = 3*HZ;
+ fepriv->quality = 0;
+ return;
+ }
+
+ /* get the frontend status */
+ if (fepriv->state & FESTATE_RETUNE) {
+ s = 0;
+ } else {
+ if (fe->ops->read_status)
+ fe->ops->read_status(fe, &s);
+ if (s != fepriv->status) {
+ dvb_frontend_add_event(fe, s);
+ fepriv->status = s;
+ }
+ }
+
+ /* if we're not tuned, and we have a lock, move to the TUNED state */
+ if ((fepriv->state & FESTATE_WAITFORLOCK) && (s & FE_HAS_LOCK)) {
+ dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK);
+ fepriv->state = FESTATE_TUNED;
+
+ /* if we're tuned, then we have determined the correct inversion */
+ if ((!(fe->ops->info.caps & FE_CAN_INVERSION_AUTO)) &&
+ (fepriv->parameters.inversion == INVERSION_AUTO)) {
+ fepriv->parameters.inversion = fepriv->inversion;
+ }
+ return;
+ }
+
+ /* if we are tuned already, check we're still locked */
+ if (fepriv->state & FESTATE_TUNED) {
+ dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK);
+
+ /* we're tuned, and the lock is still good... */
+ if (s & FE_HAS_LOCK) {
+ return;
+ } else { /* if we _WERE_ tuned, but now don't have a lock */
+ fepriv->state = FESTATE_ZIGZAG_FAST;
+ fepriv->started_auto_step = fepriv->auto_step;
+ fepriv->check_wrapped = 0;
+ }
+ }
+
+ /* don't actually do anything if we're in the LOSTLOCK state,
+ * the frontend is set to FE_CAN_RECOVER, and the max_drift is 0 */
+ if ((fepriv->state & FESTATE_LOSTLOCK) &&
+ (fe->ops->info.caps & FE_CAN_RECOVER) && (fepriv->max_drift == 0)) {
+ dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK);
+ return;
+ }
+
+ /* don't do anything if we're in the DISEQC state, since this
+ * might be someone with a motorized dish controlled by DISEQC.
+ * If its actually a re-tune, there will be a SET_FRONTEND soon enough. */
+ if (fepriv->state & FESTATE_DISEQC) {
+ dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK);
+ return;
+ }
+
+ /* if we're in the RETUNE state, set everything up for a brand
+ * new scan, keeping the current inversion setting, as the next
+ * tune is _very_ likely to require the same */
+ if (fepriv->state & FESTATE_RETUNE) {
+ fepriv->lnb_drift = 0;
+ fepriv->auto_step = 0;
+ fepriv->auto_sub_step = 0;
+ fepriv->started_auto_step = 0;
+ fepriv->check_wrapped = 0;
+ }
+
+ /* fast zigzag. */
+ if ((fepriv->state & FESTATE_SEARCHING_FAST) || (fepriv->state & FESTATE_RETUNE)) {
+ fepriv->delay = fepriv->min_delay;
+
+ /* peform a tune */
+ if (dvb_frontend_swzigzag_autotune(fe, fepriv->check_wrapped)) {
+ /* OK, if we've run out of trials at the fast speed.
+ * Drop back to slow for the _next_ attempt */
+ fepriv->state = FESTATE_SEARCHING_SLOW;
+ fepriv->started_auto_step = fepriv->auto_step;
+ return;
+ }
+ fepriv->check_wrapped = 1;
+
+ /* if we've just retuned, enter the ZIGZAG_FAST state.
+ * This ensures we cannot return from an
+ * FE_SET_FRONTEND ioctl before the first frontend tune
+ * occurs */
+ if (fepriv->state & FESTATE_RETUNE) {
+ fepriv->state = FESTATE_TUNING_FAST;
+ }
+ }
+
+ /* slow zigzag */
+ if (fepriv->state & FESTATE_SEARCHING_SLOW) {
+ dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK);
+
+ /* Note: don't bother checking for wrapping; we stay in this
+ * state until we get a lock */
+ dvb_frontend_swzigzag_autotune(fe, 0);
+ }
+}
+
static int dvb_frontend_is_exiting(struct dvb_frontend *fe)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
@@ -330,7 +459,7 @@ static int dvb_frontend_is_exiting(struct dvb_frontend *fe)
if (fepriv->dvbdev->writers == 1)
if (time_after(jiffies, fepriv->release_jiffies +
- dvb_shutdown_timeout * HZ))
+ dvb_shutdown_timeout * HZ))
return 1;
return 0;
@@ -355,18 +484,14 @@ static void dvb_frontend_wakeup(struct dvb_frontend *fe)
wake_up_interruptible(&fepriv->wait_queue);
}
-/*
- * FIXME: use linux/kthread.h
- */
static int dvb_frontend_thread(void *data)
{
struct dvb_frontend *fe = data;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
unsigned long timeout;
char name [15];
- int quality = 0, delay = 3*HZ;
fe_status_t s;
- int check_wrapped = 0;
+ struct dvb_frontend_parameters *params;
dprintk("%s\n", __FUNCTION__);
@@ -377,6 +502,9 @@ static int dvb_frontend_thread(void *data)
sigfillset(&current->blocked);
unlock_kernel();
+ fepriv->check_wrapped = 0;
+ fepriv->quality = 0;
+ fepriv->delay = 3*HZ;
fepriv->status = 0;
dvb_frontend_init(fe);
fepriv->wakeup = 0;
@@ -386,7 +514,7 @@ static int dvb_frontend_thread(void *data)
timeout = wait_event_interruptible_timeout(fepriv->wait_queue,
dvb_frontend_should_wakeup(fe),
- delay);
+ fepriv->delay);
if (0 != dvb_frontend_is_exiting(fe)) {
/* got signal or quitting */
break;
@@ -397,108 +525,22 @@ static int dvb_frontend_thread(void *data)
if (down_interruptible(&fepriv->sem))
break;
- /* if we've got no parameters, just keep idling */
- if (fepriv->state & FESTATE_IDLE) {
- delay = 3*HZ;
- quality = 0;
- continue;
- }
+ /* do an iteration of the tuning loop */
+ if (fe->ops->tune) {
+ /* have we been asked to retune? */
+ params = NULL;
+ if (fepriv->state & FESTATE_RETUNE) {
+ params = &fepriv->parameters;
+ fepriv->state = FESTATE_TUNED;
+ }
- /* get the frontend status */
- if (fepriv->state & FESTATE_RETUNE) {
- s = 0;
- } else {
- if (fe->ops->read_status)
- fe->ops->read_status(fe, &s);
+ fe->ops->tune(fe, params, fepriv->tune_mode_flags, &fepriv->delay, &s);
if (s != fepriv->status) {
dvb_frontend_add_event(fe, s);
fepriv->status = s;
}
- }
- /* if we're not tuned, and we have a lock, move to the TUNED state */
- if ((fepriv->state & FESTATE_WAITFORLOCK) && (s & FE_HAS_LOCK)) {
- update_delay(&quality, &delay, fepriv->min_delay, s & FE_HAS_LOCK);
- fepriv->state = FESTATE_TUNED;
-
- /* if we're tuned, then we have determined the correct inversion */
- if ((!(fe->ops->info.caps & FE_CAN_INVERSION_AUTO)) &&
- (fepriv->parameters.inversion == INVERSION_AUTO)) {
- fepriv->parameters.inversion = fepriv->inversion;
- }
- continue;
- }
-
- /* if we are tuned already, check we're still locked */
- if (fepriv->state & FESTATE_TUNED) {
- update_delay(&quality, &delay, fepriv->min_delay, s & FE_HAS_LOCK);
-
- /* we're tuned, and the lock is still good... */
- if (s & FE_HAS_LOCK)
- continue;
- else { /* if we _WERE_ tuned, but now don't have a lock */
- fepriv->state = FESTATE_ZIGZAG_FAST;
- fepriv->started_auto_step = fepriv->auto_step;
- check_wrapped = 0;
- }
- }
-
- /* don't actually do anything if we're in the LOSTLOCK state,
- * the frontend is set to FE_CAN_RECOVER, and the max_drift is 0 */
- if ((fepriv->state & FESTATE_LOSTLOCK) &&
- (fe->ops->info.caps & FE_CAN_RECOVER) && (fepriv->max_drift == 0)) {
- update_delay(&quality, &delay, fepriv->min_delay, s & FE_HAS_LOCK);
- continue;
- }
-
- /* don't do anything if we're in the DISEQC state, since this
- * might be someone with a motorized dish controlled by DISEQC.
- * If its actually a re-tune, there will be a SET_FRONTEND soon enough. */
- if (fepriv->state & FESTATE_DISEQC) {
- update_delay(&quality, &delay, fepriv->min_delay, s & FE_HAS_LOCK);
- continue;
- }
-
- /* if we're in the RETUNE state, set everything up for a brand
- * new scan, keeping the current inversion setting, as the next
- * tune is _very_ likely to require the same */
- if (fepriv->state & FESTATE_RETUNE) {
- fepriv->lnb_drift = 0;
- fepriv->auto_step = 0;
- fepriv->auto_sub_step = 0;
- fepriv->started_auto_step = 0;
- check_wrapped = 0;
- }
-
- /* fast zigzag. */
- if ((fepriv->state & FESTATE_SEARCHING_FAST) || (fepriv->state & FESTATE_RETUNE)) {
- delay = fepriv->min_delay;
-
- /* peform a tune */
- if (dvb_frontend_autotune(fe, check_wrapped)) {
- /* OK, if we've run out of trials at the fast speed.
- * Drop back to slow for the _next_ attempt */
- fepriv->state = FESTATE_SEARCHING_SLOW;
- fepriv->started_auto_step = fepriv->auto_step;
- continue;
- }
- check_wrapped = 1;
-
- /* if we've just retuned, enter the ZIGZAG_FAST state.
- * This ensures we cannot return from an
- * FE_SET_FRONTEND ioctl before the first frontend tune
- * occurs */
- if (fepriv->state & FESTATE_RETUNE) {
- fepriv->state = FESTATE_TUNING_FAST;
- }
- }
-
- /* slow zigzag */
- if (fepriv->state & FESTATE_SEARCHING_SLOW) {
- update_delay(&quality, &delay, fepriv->min_delay, s & FE_HAS_LOCK);
-
- /* Note: don't bother checking for wrapping; we stay in this
- * state until we get a lock */
- dvb_frontend_autotune(fe, 0);
+ } else {
+ dvb_frontend_swzigzag(fe);
}
}
@@ -733,7 +775,6 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
err = fe->ops->set_tone(fe, (fe_sec_tone_mode_t) parg);
fepriv->state = FESTATE_DISEQC;
fepriv->status = 0;
- fepriv->tone = (fe_sec_tone_mode_t) parg;
}
break;
@@ -747,7 +788,7 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
case FE_DISHNETWORK_SEND_LEGACY_CMD:
if (fe->ops->dishnetwork_send_legacy_command) {
- err = fe->ops->dishnetwork_send_legacy_command(fe, (unsigned int) parg);
+ err = fe->ops->dishnetwork_send_legacy_command(fe, (unsigned long) parg);
fepriv->state = FESTATE_DISEQC;
fepriv->status = 0;
} else if (fe->ops->set_voltage) {
@@ -767,13 +808,13 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
* initialization, so parg is 8 bits and does not
* include the initialization or start bit
*/
- unsigned int cmd = ((unsigned int) parg) << 1;
+ unsigned long cmd = ((unsigned long) parg) << 1;
struct timeval nexttime;
struct timeval tv[10];
int i;
u8 last = 1;
if (dvb_frontend_debug)
- printk("%s switch command: 0x%04x\n", __FUNCTION__, cmd);
+ printk("%s switch command: 0x%04lx\n", __FUNCTION__, cmd);
do_gettimeofday(&nexttime);
if (dvb_frontend_debug)
memcpy(&tv[0], &nexttime, sizeof(struct timeval));
@@ -814,7 +855,7 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
case FE_ENABLE_HIGH_LNB_VOLTAGE:
if (fe->ops->enable_high_lnb_voltage)
- err = fe->ops->enable_high_lnb_voltage(fe, (int) parg);
+ err = fe->ops->enable_high_lnb_voltage(fe, (long) parg);
break;
case FE_SET_FRONTEND: {
@@ -891,6 +932,10 @@ static int dvb_frontend_ioctl(struct inode *inode, struct file *file,
err = fe->ops->get_frontend(fe, (struct dvb_frontend_parameters*) parg);
}
break;
+
+ case FE_SET_FRONTEND_TUNE_MODE:
+ fepriv->tune_mode_flags = (unsigned long) parg;
+ break;
};
up (&fepriv->sem);
@@ -932,6 +977,9 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
/* empty event queue */
fepriv->events.eventr = fepriv->events.eventw = 0;
+
+ /* normal tune mode when opened R/W */
+ fepriv->tune_mode_flags &= ~FE_TUNE_MODE_ONESHOT;
}
return ret;
@@ -976,13 +1024,12 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
if (down_interruptible (&frontend_mutex))
return -ERESTARTSYS;
- fe->frontend_priv = kmalloc(sizeof(struct dvb_frontend_private), GFP_KERNEL);
+ fe->frontend_priv = kzalloc(sizeof(struct dvb_frontend_private), GFP_KERNEL);
if (fe->frontend_priv == NULL) {
up(&frontend_mutex);
return -ENOMEM;
}
fepriv = fe->frontend_priv;
- memset(fe->frontend_priv, 0, sizeof(struct dvb_frontend_private));
init_MUTEX (&fepriv->sem);
init_waitqueue_head (&fepriv->wait_queue);
@@ -990,7 +1037,6 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
init_MUTEX (&fepriv->events.sem);
fe->dvb = dvb;
fepriv->inversion = INVERSION_OFF;
- fepriv->tone = SEC_TONE_OFF;
printk ("DVB: registering frontend %i (%s)...\n",
fe->dvb->num,
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index 1e0840d02f1..70a6d14efda 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -58,10 +58,19 @@ struct dvb_frontend_ops {
int (*init)(struct dvb_frontend* fe);
int (*sleep)(struct dvb_frontend* fe);
+ /* if this is set, it overrides the default swzigzag */
+ int (*tune)(struct dvb_frontend* fe,
+ struct dvb_frontend_parameters* params,
+ unsigned int mode_flags,
+ int *delay,
+ fe_status_t *status);
+
+ /* these two are only used for the swzigzag code */
int (*set_frontend)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params);
- int (*get_frontend)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params);
int (*get_tune_settings)(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* settings);
+ int (*get_frontend)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params);
+
int (*read_status)(struct dvb_frontend* fe, fe_status_t* status);
int (*read_ber)(struct dvb_frontend* fe, u32* ber);
int (*read_signal_strength)(struct dvb_frontend* fe, u16* strength);
@@ -74,8 +83,9 @@ struct dvb_frontend_ops {
int (*diseqc_send_burst)(struct dvb_frontend* fe, fe_sec_mini_cmd_t minicmd);
int (*set_tone)(struct dvb_frontend* fe, fe_sec_tone_mode_t tone);
int (*set_voltage)(struct dvb_frontend* fe, fe_sec_voltage_t voltage);
- int (*enable_high_lnb_voltage)(struct dvb_frontend* fe, int arg);
- int (*dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned int cmd);
+ int (*enable_high_lnb_voltage)(struct dvb_frontend* fe, long arg);
+ int (*dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned long cmd);
+ int (*i2c_gate_ctrl)(struct dvb_frontend* fe, int enable);
};
#define MAX_EVENT 8
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 86bba81e851..6711eb6a058 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -1222,7 +1222,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
return if_num;
}
-static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned int num)
+static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
{
struct net_device *net = dvbnet->device[num];
struct dvb_net_priv *priv;
@@ -1296,9 +1296,9 @@ static int dvb_net_do_ioctl(struct inode *inode, struct file *file,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if ((unsigned int) parg >= DVB_NET_DEVICES_MAX)
+ if ((unsigned long) parg >= DVB_NET_DEVICES_MAX)
return -EINVAL;
- ret = dvb_net_remove_if(dvbnet, (unsigned int) parg);
+ ret = dvb_net_remove_if(dvbnet, (unsigned long) parg);
if (!ret)
module_put(dvbdev->adapter->module);
return ret;
diff --git a/drivers/media/dvb/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb/dvb-core/dvb_ringbuffer.c
index 283c6e9339a..77ad2410f4d 100644
--- a/drivers/media/dvb/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb/dvb-core/dvb_ringbuffer.c
@@ -112,10 +112,10 @@ ssize_t dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len, in
split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0;
if (split > 0) {
if (!usermem)
- memcpy(buf, rbuf->data+rbuf->pread, split);
+ memcpy(buf, rbuf->data+rbuf->pread, split);
else
- if (copy_to_user(buf, rbuf->data+rbuf->pread, split))
- return -EFAULT;
+ if (copy_to_user(buf, rbuf->data+rbuf->pread, split))
+ return -EFAULT;
buf += split;
todo -= split;
rbuf->pread = 0;
@@ -124,7 +124,7 @@ ssize_t dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len, in
memcpy(buf, rbuf->data+rbuf->pread, todo);
else
if (copy_to_user(buf, rbuf->data+rbuf->pread, todo))
- return -EFAULT;
+ return -EFAULT;
rbuf->pread = (rbuf->pread + todo) % rbuf->size;
@@ -167,7 +167,7 @@ ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf, size_t le
}
ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
- int offset, u8* buf, size_t len, int usermem)
+ int offset, u8* buf, size_t len, int usermem)
{
size_t todo;
size_t split;
@@ -183,10 +183,10 @@ ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
split = ((idx + len) > rbuf->size) ? rbuf->size - idx : 0;
if (split > 0) {
if (!usermem)
- memcpy(buf, rbuf->data+idx, split);
+ memcpy(buf, rbuf->data+idx, split);
else
- if (copy_to_user(buf, rbuf->data+idx, split))
- return -EFAULT;
+ if (copy_to_user(buf, rbuf->data+idx, split))
+ return -EFAULT;
buf += split;
todo -= split;
idx = 0;
@@ -195,7 +195,7 @@ ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
memcpy(buf, rbuf->data+idx, todo);
else
if (copy_to_user(buf, rbuf->data+idx, todo))
- return -EFAULT;
+ return -EFAULT;
return len;
}
@@ -209,12 +209,12 @@ void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx)
// clean up disposed packets
while(dvb_ringbuffer_avail(rbuf) > DVB_RINGBUFFER_PKTHDRSIZE) {
if (DVB_RINGBUFFER_PEEK(rbuf, 2) == PKT_DISPOSED) {
- pktlen = DVB_RINGBUFFER_PEEK(rbuf, 0) << 8;
- pktlen |= DVB_RINGBUFFER_PEEK(rbuf, 1);
- DVB_RINGBUFFER_SKIP(rbuf, pktlen + DVB_RINGBUFFER_PKTHDRSIZE);
+ pktlen = DVB_RINGBUFFER_PEEK(rbuf, 0) << 8;
+ pktlen |= DVB_RINGBUFFER_PEEK(rbuf, 1);
+ DVB_RINGBUFFER_SKIP(rbuf, pktlen + DVB_RINGBUFFER_PKTHDRSIZE);
} else {
- // first packet is not disposed, so we stop cleaning now
- break;
+ // first packet is not disposed, so we stop cleaning now
+ break;
}
}
}
@@ -242,8 +242,8 @@ ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t*
curpktstatus = rbuf->data[(idx + 2) % rbuf->size];
if (curpktstatus == PKT_READY) {
- *pktlen = curpktlen;
- return idx;
+ *pktlen = curpktlen;
+ return idx;
}
consumed += curpktlen + DVB_RINGBUFFER_PKTHDRSIZE;
diff --git a/drivers/media/dvb/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
index fa476f662f8..6d256097277 100644
--- a/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
+++ b/drivers/media/dvb/dvb-core/dvb_ringbuffer.h
@@ -106,7 +106,7 @@ extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf);
** returns number of bytes transferred or -EFAULT
*/
extern ssize_t dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf,
- size_t len, int usermem);
+ size_t len, int usermem);
/* write routines & macros */
@@ -121,7 +121,7 @@ extern ssize_t dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf,
** returns number of bytes transferred or -EFAULT
*/
extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf,
- size_t len);
+ size_t len);
/**
@@ -133,7 +133,7 @@ extern ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf,
* returns Number of bytes written, or -EFAULT, -ENOMEM, -EVINAL.
*/
extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf,
- size_t len);
+ size_t len);
/**
* Read from a packet in the ringbuffer. Note: unlike dvb_ringbuffer_read(), this
@@ -149,7 +149,7 @@ extern ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf,
* returns Number of bytes read, or -EFAULT.
*/
extern ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
- int offset, u8* buf, size_t len, int usermem);
+ int offset, u8* buf, size_t len, int usermem);
/**
* Dispose of a packet in the ring buffer.
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index a4aee866585..06b696e9acb 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -92,10 +92,10 @@ static int dvb_device_open(struct inode *inode, struct file *file)
old_fops = file->f_op;
file->f_op = fops_get(dvbdev->fops);
if(file->f_op->open)
- err = file->f_op->open(inode,file);
+ err = file->f_op->open(inode,file);
if (err) {
- fops_put(file->f_op);
- file->f_op = fops_get(old_fops);
+ fops_put(file->f_op);
+ file->f_op = fops_get(old_fops);
}
fops_put(old_fops);
return err;
@@ -356,18 +356,18 @@ int dvb_usercopy(struct inode *inode, struct file *file,
case _IOC_WRITE:
case (_IOC_WRITE | _IOC_READ):
if (_IOC_SIZE(cmd) <= sizeof(sbuf)) {
- parg = sbuf;
+ parg = sbuf;
} else {
- /* too big to allocate from stack */
- mbuf = kmalloc(_IOC_SIZE(cmd),GFP_KERNEL);
- if (NULL == mbuf)
- return -ENOMEM;
- parg = mbuf;
+ /* too big to allocate from stack */
+ mbuf = kmalloc(_IOC_SIZE(cmd),GFP_KERNEL);
+ if (NULL == mbuf)
+ return -ENOMEM;
+ parg = mbuf;
}
err = -EFAULT;
if (copy_from_user(parg, (void __user *)arg, _IOC_SIZE(cmd)))
- goto out;
+ goto out;
break;
}
@@ -384,7 +384,7 @@ int dvb_usercopy(struct inode *inode, struct file *file,
case _IOC_READ:
case (_IOC_WRITE | _IOC_READ):
if (copy_to_user((void __user *)arg, parg, _IOC_SIZE(cmd)))
- err = -EFAULT;
+ err = -EFAULT;
break;
}
diff --git a/drivers/media/dvb/dvb-core/dvbdev.h b/drivers/media/dvb/dvb-core/dvbdev.h
index 0cc6e4a0e27..74ed5853f0f 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.h
+++ b/drivers/media/dvb/dvb-core/dvbdev.h
@@ -97,7 +97,7 @@ we simply define out own dvb_usercopy(), which will hopefully become
generic_usercopy() someday... */
extern int dvb_usercopy(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg,
+ unsigned int cmd, unsigned long arg,
int (*func)(struct inode *inode, struct file *file,
unsigned int cmd, void *arg));
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 54e2b29076b..90a69d343b7 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -37,16 +37,16 @@ config DVB_USB_DIBUSB_MB
DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-B demodulator.
Devices supported by this driver:
- TwinhanDTV USB-Ter (VP7041)
- TwinhanDTV Magic Box (VP7041e)
- KWorld/JetWay/ADSTech V-Stream XPERT DTV - DVB-T USB1.1 and USB2.0
- Hama DVB-T USB1.1-Box
- DiBcom USB1.1 reference devices (non-public)
- Ultima Electronic/Artec T1 USB TVBOX
+ Artec T1 USB1.1 boxes
+ Avermedia AverTV DVBT USB1.1
Compro Videomate DVB-U2000 - DVB-T USB
+ DiBcom USB1.1 reference devices (non-public)
Grandtec DVB-T USB
- Avermedia AverTV DVBT USB1.1
- Artec T1 USB1.1 boxes
+ Hama DVB-T USB1.1-Box
+ KWorld/JetWay/ADSTech V-Stream XPERT DTV - DVB-T USB1.1 and USB2.0
+ TwinhanDTV Magic Box (VP7041e)
+ TwinhanDTV USB-Ter (VP7041)
+ Ultima Electronic/Artec T1 USB TVBOX
The VP7041 seems to be identical to "CTS Portable" (Chinese
Television System).
@@ -54,6 +54,12 @@ config DVB_USB_DIBUSB_MB
Say Y if you own such a device and want to use it. You should build it as
a module.
+config DVB_USB_DIBUSB_MB_FAULTY
+ bool "Support faulty USB IDs"
+ depends on DVB_USB_DIBUSB_MB
+ help
+ Support for faulty USB IDs due to an invalid EEPROM on some Artec devices.
+
config DVB_USB_DIBUSB_MC
tristate "DiBcom USB DVB-T devices (based on the DiB3000M-C/P) (see help for device list)"
depends on DVB_USB
@@ -63,8 +69,8 @@ config DVB_USB_DIBUSB_MC
DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-C/P demodulator.
Devices supported by this driver:
- DiBcom USB2.0 reference devices (non-public)
Artec T1 USB2.0 boxes
+ DiBcom USB2.0 reference devices (non-public)
Say Y if you own such a device and want to use it. You should build it as
a module.
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index d05fab01ccc..18d169836c9 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -11,10 +11,11 @@
* design, so it can be reused for the "analogue-only" device (if it will
* appear at all).
*
- * TODO: check if the cx25840-driver (from ivtv) can be used for the analogue
- * part
+ * TODO: Use the cx25840-driver for the analogue part
*
* Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2005 Michael Krufky (mkrufky@m1k.net)
+ * Copyright (C) 2006 Chris Pascoe (c.pascoe@itee.uq.edu.au)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -25,6 +26,9 @@
#include "cxusb.h"
#include "cx22702.h"
+#include "lgdt330x.h"
+#include "mt352.h"
+#include "mt352_priv.h"
/* debug */
int dvb_usb_cxusb_debug;
@@ -156,6 +160,119 @@ static int cxusb_streaming_ctrl(struct dvb_usb_device *d, int onoff)
return 0;
}
+static int cxusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
+{
+ struct dvb_usb_rc_key *keymap = d->props.rc_key_map;
+ u8 ircode[4];
+ int i;
+
+ cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
+
+ *event = 0;
+ *state = REMOTE_NO_KEY_PRESSED;
+
+ for (i = 0; i < d->props.rc_key_map_size; i++) {
+ if (keymap[i].custom == ircode[2] &&
+ keymap[i].data == ircode[3]) {
+ *event = keymap[i].event;
+ *state = REMOTE_KEY_PRESSED;
+
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+struct dvb_usb_rc_key dvico_mce_rc_keys[] = {
+ { 0xfe, 0x02, KEY_TV },
+ { 0xfe, 0x0e, KEY_MP3 },
+ { 0xfe, 0x1a, KEY_DVD },
+ { 0xfe, 0x1e, KEY_FAVORITES },
+ { 0xfe, 0x16, KEY_SETUP },
+ { 0xfe, 0x46, KEY_POWER2 },
+ { 0xfe, 0x0a, KEY_EPG },
+ { 0xfe, 0x49, KEY_BACK },
+ { 0xfe, 0x4d, KEY_MENU },
+ { 0xfe, 0x51, KEY_UP },
+ { 0xfe, 0x5b, KEY_LEFT },
+ { 0xfe, 0x5f, KEY_RIGHT },
+ { 0xfe, 0x53, KEY_DOWN },
+ { 0xfe, 0x5e, KEY_OK },
+ { 0xfe, 0x59, KEY_INFO },
+ { 0xfe, 0x55, KEY_TAB },
+ { 0xfe, 0x0f, KEY_PREVIOUSSONG },/* Replay */
+ { 0xfe, 0x12, KEY_NEXTSONG }, /* Skip */
+ { 0xfe, 0x42, KEY_ENTER }, /* Windows/Start */
+ { 0xfe, 0x15, KEY_VOLUMEUP },
+ { 0xfe, 0x05, KEY_VOLUMEDOWN },
+ { 0xfe, 0x11, KEY_CHANNELUP },
+ { 0xfe, 0x09, KEY_CHANNELDOWN },
+ { 0xfe, 0x52, KEY_CAMERA },
+ { 0xfe, 0x5a, KEY_TUNER }, /* Live */
+ { 0xfe, 0x19, KEY_OPEN },
+ { 0xfe, 0x0b, KEY_1 },
+ { 0xfe, 0x17, KEY_2 },
+ { 0xfe, 0x1b, KEY_3 },
+ { 0xfe, 0x07, KEY_4 },
+ { 0xfe, 0x50, KEY_5 },
+ { 0xfe, 0x54, KEY_6 },
+ { 0xfe, 0x48, KEY_7 },
+ { 0xfe, 0x4c, KEY_8 },
+ { 0xfe, 0x58, KEY_9 },
+ { 0xfe, 0x13, KEY_ANGLE }, /* Aspect */
+ { 0xfe, 0x03, KEY_0 },
+ { 0xfe, 0x1f, KEY_ZOOM },
+ { 0xfe, 0x43, KEY_REWIND },
+ { 0xfe, 0x47, KEY_PLAYPAUSE },
+ { 0xfe, 0x4f, KEY_FASTFORWARD },
+ { 0xfe, 0x57, KEY_MUTE },
+ { 0xfe, 0x0d, KEY_STOP },
+ { 0xfe, 0x01, KEY_RECORD },
+ { 0xfe, 0x4e, KEY_POWER },
+};
+
+static int cxusb_dee1601_demod_init(struct dvb_frontend* fe)
+{
+ static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x38 };
+ static u8 reset [] = { RESET, 0x80 };
+ static u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
+ static u8 agc_cfg [] = { AGC_TARGET, 0x28, 0x20 };
+ static u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
+
+ mt352_write(fe, clock_config, sizeof(clock_config));
+ udelay(200);
+ mt352_write(fe, reset, sizeof(reset));
+ mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
+
+ mt352_write(fe, agc_cfg, sizeof(agc_cfg));
+ mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg));
+ mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
+
+ return 0;
+}
+
+static int cxusb_mt352_demod_init(struct dvb_frontend* fe)
+{ /* used in both lgz201 and th7579 */
+ static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x39 };
+ static u8 reset [] = { RESET, 0x80 };
+ static u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
+ static u8 agc_cfg [] = { AGC_TARGET, 0x24, 0x20 };
+ static u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
+
+ mt352_write(fe, clock_config, sizeof(clock_config));
+ udelay(200);
+ mt352_write(fe, reset, sizeof(reset));
+ mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
+
+ mt352_write(fe, agc_cfg, sizeof(agc_cfg));
+ mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg));
+ mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
+ return 0;
+}
+
struct cx22702_config cxusb_cx22702_config = {
.demod_address = 0x63,
@@ -165,17 +282,68 @@ struct cx22702_config cxusb_cx22702_config = {
.pll_set = dvb_usb_pll_set_i2c,
};
+struct lgdt330x_config cxusb_lgdt330x_config = {
+ .demod_address = 0x0e,
+ .demod_chip = LGDT3303,
+ .pll_set = dvb_usb_pll_set_i2c,
+};
+
+struct mt352_config cxusb_dee1601_config = {
+ .demod_address = 0x0f,
+ .demod_init = cxusb_dee1601_demod_init,
+ .pll_set = dvb_usb_pll_set,
+};
+
+struct mt352_config cxusb_mt352_config = {
+ /* used in both lgz201 and th7579 */
+ .demod_address = 0x0f,
+ .demod_init = cxusb_mt352_demod_init,
+ .pll_set = dvb_usb_pll_set,
+};
+
/* Callbacks for DVB USB */
-static int cxusb_tuner_attach(struct dvb_usb_device *d)
+static int cxusb_fmd1216me_tuner_attach(struct dvb_usb_device *d)
{
u8 bpll[4] = { 0x0b, 0xdc, 0x9c, 0xa0 };
d->pll_addr = 0x61;
- memcpy(d->pll_init,bpll,4);
+ memcpy(d->pll_init, bpll, 4);
d->pll_desc = &dvb_pll_fmd1216me;
return 0;
}
-static int cxusb_frontend_attach(struct dvb_usb_device *d)
+static int cxusb_lgh064f_tuner_attach(struct dvb_usb_device *d)
+{
+ u8 bpll[4] = { 0x00, 0x00, 0x18, 0x50 };
+ /* bpll[2] : unset bit 3, set bits 4&5
+ bpll[3] : 0x50 - digital, 0x20 - analog */
+ d->pll_addr = 0x61;
+ memcpy(d->pll_init, bpll, 4);
+ d->pll_desc = &dvb_pll_tdvs_tua6034;
+ return 0;
+}
+
+static int cxusb_dee1601_tuner_attach(struct dvb_usb_device *d)
+{
+ d->pll_addr = 0x61;
+ d->pll_desc = &dvb_pll_thomson_dtt7579;
+ return 0;
+}
+
+static int cxusb_lgz201_tuner_attach(struct dvb_usb_device *d)
+{
+ d->pll_addr = 0x61;
+ d->pll_desc = &dvb_pll_lg_z201;
+ return 0;
+}
+
+static int cxusb_dtt7579_tuner_attach(struct dvb_usb_device *d)
+{
+ d->pll_addr = 0x60;
+ d->pll_desc = &dvb_pll_thomson_dtt7579;
+ return 0;
+}
+
+static int cxusb_cx22702_frontend_attach(struct dvb_usb_device *d)
{
u8 b;
if (usb_set_interface(d->udev,0,6) < 0)
@@ -189,22 +357,107 @@ static int cxusb_frontend_attach(struct dvb_usb_device *d)
return -EIO;
}
+static int cxusb_lgdt330x_frontend_attach(struct dvb_usb_device *d)
+{
+ if (usb_set_interface(d->udev,0,7) < 0)
+ err("set interface failed");
+
+ cxusb_ctrl_msg(d,CMD_DIGITAL, NULL, 0, NULL, 0);
+
+ if ((d->fe = lgdt330x_attach(&cxusb_lgdt330x_config, &d->i2c_adap)) != NULL)
+ return 0;
+
+ return -EIO;
+}
+
+static int cxusb_mt352_frontend_attach(struct dvb_usb_device *d)
+{ /* used in both lgz201 and th7579 */
+ if (usb_set_interface(d->udev,0,0) < 0)
+ err("set interface failed");
+
+ cxusb_ctrl_msg(d,CMD_DIGITAL, NULL, 0, NULL, 0);
+
+ if ((d->fe = mt352_attach(&cxusb_mt352_config, &d->i2c_adap)) != NULL)
+ return 0;
+
+ return -EIO;
+}
+
+static int cxusb_dee1601_frontend_attach(struct dvb_usb_device *d)
+{
+ if (usb_set_interface(d->udev,0,0) < 0)
+ err("set interface failed");
+
+ cxusb_ctrl_msg(d,CMD_DIGITAL, NULL, 0, NULL, 0);
+
+ if ((d->fe = mt352_attach(&cxusb_dee1601_config, &d->i2c_adap)) != NULL)
+ return 0;
+
+ return -EIO;
+}
+
+/*
+ * DViCO bluebird firmware needs the "warm" product ID to be patched into the
+ * firmware file before download.
+ */
+
+#define BLUEBIRD_01_ID_OFFSET 6638
+static int bluebird_patch_dvico_firmware_download(struct usb_device *udev, const struct firmware *fw)
+{
+ if (fw->size < BLUEBIRD_01_ID_OFFSET + 4)
+ return -EINVAL;
+
+ if (fw->data[BLUEBIRD_01_ID_OFFSET] == (USB_VID_DVICO & 0xff) &&
+ fw->data[BLUEBIRD_01_ID_OFFSET + 1] == USB_VID_DVICO >> 8) {
+
+ /* FIXME: are we allowed to change the fw-data ? */
+ fw->data[BLUEBIRD_01_ID_OFFSET + 2] = udev->descriptor.idProduct + 1;
+ fw->data[BLUEBIRD_01_ID_OFFSET + 3] = udev->descriptor.idProduct >> 8;
+
+ return usb_cypress_load_firmware(udev,fw,CYPRESS_FX2);
+ }
+
+ return -EINVAL;
+}
+
/* DVB USB Driver stuff */
-static struct dvb_usb_properties cxusb_properties;
+static struct dvb_usb_properties cxusb_medion_properties;
+static struct dvb_usb_properties cxusb_bluebird_lgh064f_properties;
+static struct dvb_usb_properties cxusb_bluebird_dee1601_properties;
+static struct dvb_usb_properties cxusb_bluebird_lgz201_properties;
+static struct dvb_usb_properties cxusb_bluebird_dtt7579_properties;
static int cxusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- return dvb_usb_device_init(intf,&cxusb_properties,THIS_MODULE,NULL);
+ if (dvb_usb_device_init(intf,&cxusb_medion_properties,THIS_MODULE,NULL) == 0 ||
+ dvb_usb_device_init(intf,&cxusb_bluebird_lgh064f_properties,THIS_MODULE,NULL) == 0 ||
+ dvb_usb_device_init(intf,&cxusb_bluebird_dee1601_properties,THIS_MODULE,NULL) == 0 ||
+ dvb_usb_device_init(intf,&cxusb_bluebird_lgz201_properties,THIS_MODULE,NULL) == 0 ||
+ dvb_usb_device_init(intf,&cxusb_bluebird_dtt7579_properties,THIS_MODULE,NULL) == 0) {
+ return 0;
+ }
+
+ return -EINVAL;
}
static struct usb_device_id cxusb_table [] = {
{ USB_DEVICE(USB_VID_MEDION, USB_PID_MEDION_MD95700) },
+ { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LG064F_COLD) },
+ { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LG064F_WARM) },
+ { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DEE1601_COLD) },
+ { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_DEE1601_WARM) },
+ { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LGZ201_COLD) },
+ { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_LGZ201_WARM) },
+ { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_TH7579_COLD) },
+ { USB_DEVICE(USB_VID_DVICO, USB_PID_DVICO_BLUEBIRD_TH7579_WARM) },
+ { USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DEE1601_COLD) },
+ { USB_DEVICE(USB_VID_DVICO, USB_PID_DIGITALNOW_BLUEBIRD_DEE1601_WARM) },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, cxusb_table);
-static struct dvb_usb_properties cxusb_properties = {
+static struct dvb_usb_properties cxusb_medion_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = CYPRESS_FX2,
@@ -213,8 +466,8 @@ static struct dvb_usb_properties cxusb_properties = {
.streaming_ctrl = cxusb_streaming_ctrl,
.power_ctrl = cxusb_power_ctrl,
- .frontend_attach = cxusb_frontend_attach,
- .tuner_attach = cxusb_tuner_attach,
+ .frontend_attach = cxusb_cx22702_frontend_attach,
+ .tuner_attach = cxusb_fmd1216me_tuner_attach,
.i2c_algo = &cxusb_i2c_algo,
@@ -240,6 +493,175 @@ static struct dvb_usb_properties cxusb_properties = {
}
};
+static struct dvb_usb_properties cxusb_bluebird_lgh064f_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .firmware = "dvb-usb-bluebird-01.fw",
+ .download_firmware = bluebird_patch_dvico_firmware_download,
+ /* use usb alt setting 0 for EP4 transfer (dvb-t),
+ use usb alt setting 7 for EP2 transfer (atsc) */
+
+ .size_of_priv = sizeof(struct cxusb_state),
+
+ .streaming_ctrl = cxusb_streaming_ctrl,
+ .power_ctrl = cxusb_power_ctrl,
+ .frontend_attach = cxusb_lgdt330x_frontend_attach,
+ .tuner_attach = cxusb_lgh064f_tuner_attach,
+
+ .i2c_algo = &cxusb_i2c_algo,
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+ /* parameter for the MPEG2-data transfer */
+ .urb = {
+ .type = DVB_USB_BULK,
+ .count = 5,
+ .endpoint = 0x02,
+ .u = {
+ .bulk = {
+ .buffersize = 8192,
+ }
+ }
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DViCO FusionHDTV5 USB Gold",
+ { &cxusb_table[1], NULL },
+ { &cxusb_table[2], NULL },
+ },
+ }
+};
+
+static struct dvb_usb_properties cxusb_bluebird_dee1601_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .firmware = "dvb-usb-bluebird-01.fw",
+ .download_firmware = bluebird_patch_dvico_firmware_download,
+ /* use usb alt setting 0 for EP4 transfer (dvb-t),
+ use usb alt setting 7 for EP2 transfer (atsc) */
+
+ .size_of_priv = sizeof(struct cxusb_state),
+
+ .streaming_ctrl = cxusb_streaming_ctrl,
+ .power_ctrl = cxusb_power_ctrl,
+ .frontend_attach = cxusb_dee1601_frontend_attach,
+ .tuner_attach = cxusb_dee1601_tuner_attach,
+
+ .i2c_algo = &cxusb_i2c_algo,
+
+ .rc_interval = 150,
+ .rc_key_map = dvico_mce_rc_keys,
+ .rc_key_map_size = ARRAY_SIZE(dvico_mce_rc_keys),
+ .rc_query = cxusb_rc_query,
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+ /* parameter for the MPEG2-data transfer */
+ .urb = {
+ .type = DVB_USB_BULK,
+ .count = 5,
+ .endpoint = 0x04,
+ .u = {
+ .bulk = {
+ .buffersize = 8192,
+ }
+ }
+ },
+
+ .num_device_descs = 2,
+ .devices = {
+ { "DViCO FusionHDTV DVB-T Dual USB",
+ { &cxusb_table[3], NULL },
+ { &cxusb_table[4], NULL },
+ },
+ { "DigitalNow DVB-T Dual USB",
+ { &cxusb_table[9], NULL },
+ { &cxusb_table[10], NULL },
+ },
+ }
+};
+
+static struct dvb_usb_properties cxusb_bluebird_lgz201_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .firmware = "dvb-usb-bluebird-01.fw",
+ .download_firmware = bluebird_patch_dvico_firmware_download,
+ /* use usb alt setting 0 for EP4 transfer (dvb-t),
+ use usb alt setting 7 for EP2 transfer (atsc) */
+
+ .size_of_priv = sizeof(struct cxusb_state),
+
+ .streaming_ctrl = cxusb_streaming_ctrl,
+ .power_ctrl = cxusb_power_ctrl,
+ .frontend_attach = cxusb_mt352_frontend_attach,
+ .tuner_attach = cxusb_lgz201_tuner_attach,
+
+ .i2c_algo = &cxusb_i2c_algo,
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+ /* parameter for the MPEG2-data transfer */
+ .urb = {
+ .type = DVB_USB_BULK,
+ .count = 5,
+ .endpoint = 0x04,
+ .u = {
+ .bulk = {
+ .buffersize = 8192,
+ }
+ }
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DViCO FusionHDTV DVB-T USB (LGZ201)",
+ { &cxusb_table[5], NULL },
+ { &cxusb_table[6], NULL },
+ },
+ }
+};
+
+static struct dvb_usb_properties cxusb_bluebird_dtt7579_properties = {
+ .caps = DVB_USB_IS_AN_I2C_ADAPTER,
+
+ .usb_ctrl = DEVICE_SPECIFIC,
+ .firmware = "dvb-usb-bluebird-01.fw",
+ .download_firmware = bluebird_patch_dvico_firmware_download,
+ /* use usb alt setting 0 for EP4 transfer (dvb-t),
+ use usb alt setting 7 for EP2 transfer (atsc) */
+
+ .size_of_priv = sizeof(struct cxusb_state),
+
+ .streaming_ctrl = cxusb_streaming_ctrl,
+ .power_ctrl = cxusb_power_ctrl,
+ .frontend_attach = cxusb_mt352_frontend_attach,
+ .tuner_attach = cxusb_dtt7579_tuner_attach,
+
+ .i2c_algo = &cxusb_i2c_algo,
+
+ .generic_bulk_ctrl_endpoint = 0x01,
+ /* parameter for the MPEG2-data transfer */
+ .urb = {
+ .type = DVB_USB_BULK,
+ .count = 5,
+ .endpoint = 0x04,
+ .u = {
+ .bulk = {
+ .buffersize = 8192,
+ }
+ }
+ },
+
+ .num_device_descs = 1,
+ .devices = {
+ { "DViCO FusionHDTV DVB-T USB (TH7579)",
+ { &cxusb_table[7], NULL },
+ { &cxusb_table[8], NULL },
+ },
+ }
+};
+
static struct usb_driver cxusb_driver = {
.name = "dvb_usb_cxusb",
.probe = cxusb_probe,
diff --git a/drivers/media/dvb/dvb-usb/cxusb.h b/drivers/media/dvb/dvb-usb/cxusb.h
index 135c2a81f58..087c9942785 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.h
+++ b/drivers/media/dvb/dvb-usb/cxusb.h
@@ -21,6 +21,8 @@ extern int dvb_usb_cxusb_debug;
#define CMD_STREAMING_ON 0x36
#define CMD_STREAMING_OFF 0x37
+#define CMD_GET_IR_CODE 0x47
+
#define CMD_ANALOG 0x50
#define CMD_DIGITAL 0x51
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c
index 52ac3e5adf5..dd5a1319588 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c
@@ -65,11 +65,11 @@ static int dibusb_tuner_probe_and_attach(struct dvb_usb_device *d)
d->tuner_pass_ctrl(d->fe,0,msg[0].addr);
if (b2[0] == 0xfe) {
- info("this device has the Thomson Cable onboard. Which is default.");
+ info("This device has the Thomson Cable onboard. Which is default.");
dibusb_thomson_tuner_attach(d);
} else {
u8 bpll[4] = { 0x0b, 0xf5, 0x85, 0xab };
- info("this device has the Panasonic ENV77H11D5 onboard.");
+ info("This device has the Panasonic ENV77H11D5 onboard.");
d->pll_addr = 0x60;
memcpy(d->pll_init,bpll,4);
d->pll_desc = &dvb_pll_tda665x;
@@ -98,15 +98,15 @@ static int dibusb_probe(struct usb_interface *intf,
/* do not change the order of the ID table */
static struct usb_device_id dibusb_dib3000mb_table [] = {
-/* 00 */ { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_AVERMEDIA_DVBT_USB_COLD)},
-/* 01 */ { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_AVERMEDIA_DVBT_USB_WARM)},
+/* 00 */ { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_AVERMEDIA_DVBT_USB_COLD) },
+/* 01 */ { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_AVERMEDIA_DVBT_USB_WARM) },
/* 02 */ { USB_DEVICE(USB_VID_COMPRO, USB_PID_COMPRO_DVBU2000_COLD) },
/* 03 */ { USB_DEVICE(USB_VID_COMPRO, USB_PID_COMPRO_DVBU2000_WARM) },
/* 04 */ { USB_DEVICE(USB_VID_COMPRO_UNK, USB_PID_COMPRO_DVBU2000_UNK_COLD) },
/* 05 */ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_MOD3000_COLD) },
/* 06 */ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_MOD3000_WARM) },
-/* 07 */ { USB_DEVICE(USB_VID_EMPIA, USB_PID_KWORLD_VSTREAM_COLD) },
-/* 08 */ { USB_DEVICE(USB_VID_EMPIA, USB_PID_KWORLD_VSTREAM_WARM) },
+/* 07 */ { USB_DEVICE(USB_VID_EMPIA, USB_PID_KWORLD_VSTREAM_COLD) },
+/* 08 */ { USB_DEVICE(USB_VID_EMPIA, USB_PID_KWORLD_VSTREAM_WARM) },
/* 09 */ { USB_DEVICE(USB_VID_GRANDTEC, USB_PID_GRANDTEC_DVBT_USB_COLD) },
/* 10 */ { USB_DEVICE(USB_VID_GRANDTEC, USB_PID_GRANDTEC_DVBT_USB_WARM) },
/* 11 */ { USB_DEVICE(USB_VID_GRANDTEC, USB_PID_DIBCOM_MOD3000_COLD) },
@@ -117,27 +117,34 @@ static struct usb_device_id dibusb_dib3000mb_table [] = {
/* 16 */ { USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_TWINHAN_VP7041_WARM) },
/* 17 */ { USB_DEVICE(USB_VID_TWINHAN, USB_PID_TWINHAN_VP7041_COLD) },
/* 18 */ { USB_DEVICE(USB_VID_TWINHAN, USB_PID_TWINHAN_VP7041_WARM) },
-/* 19 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_COLD) },
-/* 20 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_WARM) },
-/* 21 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_AN2235_COLD) },
-/* 22 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_AN2235_WARM) },
+/* 19 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_COLD) },
+/* 20 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_WARM) },
+/* 21 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_AN2235_COLD) },
+/* 22 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_AN2235_WARM) },
/* 23 */ { USB_DEVICE(USB_VID_ADSTECH, USB_PID_ADSTECH_USB2_COLD) },
/* device ID with default DIBUSB2_0-firmware and with the hacked firmware */
/* 24 */ { USB_DEVICE(USB_VID_ADSTECH, USB_PID_ADSTECH_USB2_WARM) },
-/* 25 */ { USB_DEVICE(USB_VID_KYE, USB_PID_KYE_DVB_T_COLD) },
-/* 26 */ { USB_DEVICE(USB_VID_KYE, USB_PID_KYE_DVB_T_WARM) },
+/* 25 */ { USB_DEVICE(USB_VID_KYE, USB_PID_KYE_DVB_T_COLD) },
+/* 26 */ { USB_DEVICE(USB_VID_KYE, USB_PID_KYE_DVB_T_WARM) },
/* 27 */ { USB_DEVICE(USB_VID_KWORLD, USB_PID_KWORLD_VSTREAM_COLD) },
-/* 28 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_USB2_COLD) },
-/* 29 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_USB2_WARM) },
+/* 28 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_USB2_COLD) },
+/* 29 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_USB2_WARM) },
-// #define DVB_USB_DIBUSB_MB_FAULTY_USB_IDs
+/*
+ * XXX: As Artec just 'forgot' to program the EEPROM on some Artec T1 devices
+ * we don't catch these faulty IDs (namely 'Cypress FX1 USB controller') that
+ * have been left on the device. If you don't have such a device but an Artec
+ * device that's supposed to work with this driver but is not detected by it,
+ * free to enable CONFIG_DVB_USB_DIBUSB_MB_FAULTY via your kernel config.
+ */
-#ifdef DVB_USB_DIBUSB_MB_FAULTY_USB_IDs
+#ifdef CONFIG_DVB_USB_DIBUSB_MB_FAULTY
/* 30 */ { USB_DEVICE(USB_VID_ANCHOR, USB_PID_ULTIMA_TVBOX_ANCHOR_COLD) },
#endif
+
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, dibusb_dib3000mb_table);
@@ -257,7 +264,7 @@ static struct dvb_usb_properties dibusb1_1_an2235_properties = {
}
},
-#ifdef DVB_USB_DIBUSB_MB_FAULTY_USB_IDs
+#ifdef CONFIG_DVB_USB_DIBUSB_MB_FAULTY
.num_device_descs = 2,
#else
.num_device_descs = 1,
@@ -267,11 +274,12 @@ static struct dvb_usb_properties dibusb1_1_an2235_properties = {
{ &dibusb_dib3000mb_table[20], NULL },
{ &dibusb_dib3000mb_table[21], NULL },
},
-#ifdef DVB_USB_DIBUSB_MB_FAULTY_USB_IDs
+#ifdef CONFIG_DVB_USB_DIBUSB_MB_FAULTY
{ "Artec T1 USB1.1 TVBOX with AN2235 (faulty USB IDs)",
{ &dibusb_dib3000mb_table[30], NULL },
{ NULL },
},
+ { NULL },
#endif
}
};
@@ -323,6 +331,7 @@ static struct dvb_usb_properties dibusb2_0b_properties = {
{ &dibusb_dib3000mb_table[27], NULL },
{ NULL }
},
+ { NULL },
}
};
@@ -369,6 +378,7 @@ static struct dvb_usb_properties artec_t1_usb2_properties = {
{ &dibusb_dib3000mb_table[28], NULL },
{ &dibusb_dib3000mb_table[29], NULL },
},
+ { NULL },
}
};
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index 450417a9e64..e6c55c9c941 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -32,7 +32,7 @@ static int digitv_ctrl_msg(struct dvb_usb_device *d,
sndbuf[1] = vv;
sndbuf[2] = wo ? wlen : rlen;
- if (!wo) {
+ if (wo) {
memcpy(&sndbuf[3],wbuf,wlen);
dvb_usb_generic_write(d,sndbuf,7);
} else {
diff --git a/drivers/media/dvb/dvb-usb/dtt200u-fe.c b/drivers/media/dvb/dvb-usb/dtt200u-fe.c
index 0a94ec22aeb..cd21ddbfd05 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u-fe.c
@@ -156,10 +156,9 @@ struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d)
struct dtt200u_fe_state* state = NULL;
/* allocate memory for the internal state */
- state = (struct dtt200u_fe_state*) kmalloc(sizeof(struct dtt200u_fe_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct dtt200u_fe_state), GFP_KERNEL);
if (state == NULL)
goto error;
- memset(state,0,sizeof(struct dtt200u_fe_state));
deb_info("attaching frontend dtt200u\n");
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index 6e2bac87344..130ea7f21f5 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -151,7 +151,7 @@ static struct dvb_usb_properties dtt200u_properties = {
.cold_ids = { &dtt200u_usb_table[0], NULL },
.warm_ids = { &dtt200u_usb_table[1], NULL },
},
- { NULL },
+ { 0 },
}
};
@@ -160,7 +160,7 @@ static struct dvb_usb_properties wt220u_properties = {
.pid_filter_count = 15,
.usb_ctrl = CYPRESS_FX2,
- .firmware = "dvb-usb-wt220u-01.fw",
+ .firmware = "dvb-usb-wt220u-02.fw",
.power_ctrl = dtt200u_power_ctrl,
.streaming_ctrl = dtt200u_streaming_ctrl,
@@ -192,7 +192,7 @@ static struct dvb_usb_properties wt220u_properties = {
.cold_ids = { &dtt200u_usb_table[2], NULL },
.warm_ids = { &dtt200u_usb_table[3], NULL },
},
- { NULL },
+ { 0 },
}
};
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.h b/drivers/media/dvb/dvb-usb/dtt200u.h
index 6f1f3042e21..005b0a7df35 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.h
+++ b/drivers/media/dvb/dvb-usb/dtt200u.h
@@ -13,6 +13,7 @@
#define _DVB_USB_DTT200U_H_
#define DVB_USB_LOG_PREFIX "dtt200u"
+
#include "dvb-usb.h"
extern int dvb_usb_dtt200u_debug;
@@ -25,15 +26,15 @@ extern int dvb_usb_dtt200u_debug;
* 88 - locking 2 bytes (0x80 0x40 == no signal, 0x89 0x20 == nice signal)
*/
-#define GET_SPEED 0x00
-#define GET_TUNE_STATUS 0x81
-#define GET_RC_CODE 0x84
-#define GET_CONFIGURATION 0x88
-#define GET_AGC 0x89
-#define GET_SNR 0x8a
-#define GET_VIT_ERR_CNT 0x8c
-#define GET_RS_ERR_CNT 0x8d
-#define GET_RS_UNCOR_BLK_CNT 0x8e
+#define GET_SPEED 0x00
+#define GET_TUNE_STATUS 0x81
+#define GET_RC_CODE 0x84
+#define GET_CONFIGURATION 0x88
+#define GET_AGC 0x89
+#define GET_SNR 0x8a
+#define GET_VIT_ERR_CNT 0x8c
+#define GET_RS_ERR_CNT 0x8d
+#define GET_RS_UNCOR_BLK_CNT 0x8e
/* write
* 01 - init
@@ -44,12 +45,12 @@ extern int dvb_usb_dtt200u_debug;
* 08 - transfer switch
*/
-#define SET_INIT 0x01
-#define SET_RF_FREQ 0x02
-#define SET_BANDWIDTH 0x03
-#define SET_PID_FILTER 0x04
-#define RESET_PID_FILTER 0x05
-#define SET_STREAMING 0x08
+#define SET_INIT 0x01
+#define SET_RF_FREQ 0x02
+#define SET_BANDWIDTH 0x03
+#define SET_PID_FILTER 0x04
+#define RESET_PID_FILTER 0x05
+#define SET_STREAMING 0x08
extern struct dvb_frontend * dtt200u_fe_attach(struct dvb_usb_device *d);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-common.h b/drivers/media/dvb/dvb-usb/dvb-usb-common.h
index 7300489d3e2..a3460bf2d9f 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-common.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-common.h
@@ -24,7 +24,7 @@ extern int dvb_usb_disable_rc_polling;
#define deb_mem(args...) dprintk(dvb_usb_debug,0x80,args)
/* commonly used methods */
-extern int usb_cypress_load_firmware(struct usb_device *, const char *, int);
+extern int dvb_usb_download_firmware(struct usb_device *, struct dvb_usb_properties *);
extern int dvb_usb_urb_submit(struct dvb_usb_device *);
extern int dvb_usb_urb_kill(struct dvb_usb_device *);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c b/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c
index 5244e39770a..8535895819f 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-firmware.c
@@ -9,7 +9,6 @@
*/
#include "dvb-usb-common.h"
-#include <linux/firmware.h>
#include <linux/usb.h>
struct usb_cypress_controller {
@@ -19,9 +18,10 @@ struct usb_cypress_controller {
};
static struct usb_cypress_controller cypress[] = {
- { .id = CYPRESS_AN2135, .name = "Cypress AN2135", .cpu_cs_register = 0x7f92 },
- { .id = CYPRESS_AN2235, .name = "Cypress AN2235", .cpu_cs_register = 0x7f92 },
- { .id = CYPRESS_FX2, .name = "Cypress FX2", .cpu_cs_register = 0xe600 },
+ { .id = DEVICE_SPECIFIC, .name = "Device specific", .cpu_cs_register = 0 },
+ { .id = CYPRESS_AN2135, .name = "Cypress AN2135", .cpu_cs_register = 0x7f92 },
+ { .id = CYPRESS_AN2235, .name = "Cypress AN2235", .cpu_cs_register = 0x7f92 },
+ { .id = CYPRESS_FX2, .name = "Cypress FX2", .cpu_cs_register = 0xe600 },
};
/*
@@ -30,71 +30,117 @@ static struct usb_cypress_controller cypress[] = {
static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 len)
{
return usb_control_msg(udev, usb_sndctrlpipe(udev,0),
- 0xa0, USB_TYPE_VENDOR, addr, 0x00, data, len, 5*HZ);
+ 0xa0, USB_TYPE_VENDOR, addr, 0x00, data, len, 5000);
}
-int usb_cypress_load_firmware(struct usb_device *udev, const char *filename, int type)
+int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
- const struct firmware *fw = NULL;
- u16 addr;
- u8 *b,*p;
- int ret = 0,i;
+ struct hexline hx;
+ u8 reset;
+ int ret,pos=0;
- if ((ret = request_firmware(&fw, filename, &udev->dev)) != 0) {
- err("did not find the firmware file. (%s) "
- "Please see linux/Documentation/dvb/ for more details on firmware-problems.",
- filename);
+ /* stop the CPU */
+ reset = 1;
+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
+ err("could not stop the USB controller CPU.");
+
+ while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
+ ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
+
+ if (ret != hx.len) {
+ err("error while transferring firmware "
+ "(transferred size: %d, block size: %d)",
+ ret,hx.len);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ if (ret < 0) {
+ err("firmware download failed at %d with %d",pos,ret);
return ret;
}
- info("downloading firmware from file '%s' to the '%s'",filename,cypress[type].name);
-
- p = kmalloc(fw->size,GFP_KERNEL);
- if (p != NULL) {
- u8 reset;
- /*
- * you cannot use the fw->data as buffer for
- * usb_control_msg, a new buffer has to be
- * created
- */
- memcpy(p,fw->data,fw->size);
-
- /* stop the CPU */
- reset = 1;
- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
- err("could not stop the USB controller CPU.");
- for(i = 0; p[i+3] == 0 && i < fw->size; ) {
- b = (u8 *) &p[i];
- addr = cpu_to_le16( *((u16 *) &b[1]) );
-
- deb_fw("writing to address 0x%04x (buffer: 0x%02x%02x)\n",addr,b[1],b[2]);
-
- ret = usb_cypress_writemem(udev,addr,&b[4],b[0]);
-
- if (ret != b[0]) {
- err("error while transferring firmware "
- "(transferred size: %d, block size: %d)",
- ret,b[0]);
- ret = -EINVAL;
- break;
- }
- i += 5 + b[0];
- }
- /* length in ret */
- if (ret > 0)
- ret = 0;
+ if (ret == 0) {
/* restart the CPU */
reset = 0;
if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
err("could not restart the USB controller CPU.");
ret = -EINVAL;
}
+ } else
+ ret = -EIO;
- kfree(p);
- } else {
- ret = -ENOMEM;
+ return ret;
+}
+EXPORT_SYMBOL(usb_cypress_load_firmware);
+
+int dvb_usb_download_firmware(struct usb_device *udev, struct dvb_usb_properties *props)
+{
+ int ret;
+ const struct firmware *fw = NULL;
+
+ if ((ret = request_firmware(&fw, props->firmware, &udev->dev)) != 0) {
+ err("did not find the firmware file. (%s) "
+ "Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
+ props->firmware,ret);
+ return ret;
}
- release_firmware(fw);
+ info("downloading firmware from file '%s'",props->firmware);
+
+ switch (props->usb_ctrl) {
+ case CYPRESS_AN2135:
+ case CYPRESS_AN2235:
+ case CYPRESS_FX2:
+ ret = usb_cypress_load_firmware(udev, fw, props->usb_ctrl);
+ break;
+ case DEVICE_SPECIFIC:
+ if (props->download_firmware)
+ ret = props->download_firmware(udev,fw);
+ else {
+ err("BUG: driver didn't specified a download_firmware-callback, although it claims to have a DEVICE_SPECIFIC one.");
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ release_firmware(fw);
return ret;
}
+
+int dvb_usb_get_hexline(const struct firmware *fw, struct hexline *hx, int *pos)
+{
+ u8 *b = (u8 *) &fw->data[*pos];
+ int data_offs = 4;
+ if (*pos >= fw->size)
+ return 0;
+
+ memset(hx,0,sizeof(struct hexline));
+
+ hx->len = b[0];
+
+ if ((*pos + hx->len + 4) >= fw->size)
+ return -EINVAL;
+
+ hx->addr = le16_to_cpu( *((u16 *) &b[1]) );
+ hx->type = b[3];
+
+ if (hx->type == 0x04) {
+ /* b[4] and b[5] are the Extended linear address record data field */
+ hx->addr |= (b[4] << 24) | (b[5] << 16);
+/* hx->len -= 2;
+ data_offs += 2; */
+ }
+ memcpy(hx->data,&b[data_offs],hx->len);
+ hx->chk = b[hx->len + data_offs];
+
+ *pos += hx->len + 5;
+
+ return *pos;
+}
+EXPORT_SYMBOL(dvb_usb_get_hexline);
+
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
index da970947dfc..9b254532af4 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-i2c.c
@@ -52,9 +52,8 @@ int dvb_usb_pll_init_i2c(struct dvb_frontend *fe)
struct i2c_msg msg = { .addr = d->pll_addr, .flags = 0, .buf = d->pll_init, .len = 4 };
int ret = 0;
- /* if there is nothing to initialize */
- if (d->pll_init[0] == 0x00 && d->pll_init[1] == 0x00 &&
- d->pll_init[2] == 0x00 && d->pll_init[3] == 0x00)
+ /* if pll_desc is not used */
+ if (d->pll_desc == NULL)
return 0;
if (d->tuner_pass_ctrl)
@@ -80,6 +79,9 @@ int dvb_usb_pll_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep
{
struct dvb_usb_device *d = fe->dvb->priv;
+ if (d->pll_desc == NULL)
+ return 0;
+
deb_pll("pll addr: %x, freq: %d %p\n",d->pll_addr,fep->frequency,d->pll_desc);
b[0] = d->pll_addr << 1;
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 6be99e537e1..4a1b9e77e33 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -86,11 +86,17 @@
#define USB_PID_WINTV_NOVA_T_USB2_COLD 0x9300
#define USB_PID_WINTV_NOVA_T_USB2_WARM 0x9301
#define USB_PID_NEBULA_DIGITV 0x0201
-#define USB_PID_DVICO_BLUEBIRD_LGZ201 0xdb00
-#define USB_PID_DVICO_BLUEBIRD_TH7579 0xdb10
#define USB_PID_DVICO_BLUEBIRD_LGDT 0xd820
-#define USB_PID_DVICO_BLUEBIRD_LGZ201_1 0xdb01
-#define USB_PID_DVICO_BLUEBIRD_TH7579_2 0xdb11
+#define USB_PID_DVICO_BLUEBIRD_LG064F_COLD 0xd500
+#define USB_PID_DVICO_BLUEBIRD_LG064F_WARM 0xd501
+#define USB_PID_DVICO_BLUEBIRD_LGZ201_COLD 0xdb00
+#define USB_PID_DVICO_BLUEBIRD_LGZ201_WARM 0xdb01
+#define USB_PID_DVICO_BLUEBIRD_TH7579_COLD 0xdb10
+#define USB_PID_DVICO_BLUEBIRD_TH7579_WARM 0xdb11
+#define USB_PID_DVICO_BLUEBIRD_DEE1601_COLD 0xdb50
+#define USB_PID_DVICO_BLUEBIRD_DEE1601_WARM 0xdb51
+#define USB_PID_DIGITALNOW_BLUEBIRD_DEE1601_COLD 0xdb54
+#define USB_PID_DIGITALNOW_BLUEBIRD_DEE1601_WARM 0xdb55
#define USB_PID_MEDION_MD95700 0x0932
#define USB_PID_KYE_DVB_T_COLD 0x701e
#define USB_PID_KYE_DVB_T_WARM 0x701f
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-init.c b/drivers/media/dvb/dvb-usb/dvb-usb-init.c
index dd8e0b94edb..716f8bf528c 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-init.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-init.c
@@ -138,6 +138,9 @@ int dvb_usb_device_init(struct usb_interface *intf, struct dvb_usb_properties
int ret = -ENOMEM,cold=0;
+ if (du != NULL)
+ *du = NULL;
+
if ((desc = dvb_usb_find_device(udev,props,&cold)) == NULL) {
deb_err("something went very wrong, device was not found in current device list - let's see what comes next.\n");
return -ENODEV;
@@ -145,38 +148,38 @@ int dvb_usb_device_init(struct usb_interface *intf, struct dvb_usb_properties
if (cold) {
info("found a '%s' in cold state, will try to load a firmware",desc->name);
- ret = usb_cypress_load_firmware(udev,props->firmware,props->usb_ctrl);
- } else {
- info("found a '%s' in warm state.",desc->name);
- d = kmalloc(sizeof(struct dvb_usb_device),GFP_KERNEL);
- if (d == NULL) {
- err("no memory for 'struct dvb_usb_device'");
+ ret = dvb_usb_download_firmware(udev,props);
+ if (!props->no_reconnect)
return ret;
+ }
+
+ info("found a '%s' in warm state.",desc->name);
+ d = kzalloc(sizeof(struct dvb_usb_device),GFP_KERNEL);
+ if (d == NULL) {
+ err("no memory for 'struct dvb_usb_device'");
+ return ret;
+ }
+
+ d->udev = udev;
+ memcpy(&d->props,props,sizeof(struct dvb_usb_properties));
+ d->desc = desc;
+ d->owner = owner;
+
+ if (d->props.size_of_priv > 0) {
+ d->priv = kzalloc(d->props.size_of_priv,GFP_KERNEL);
+ if (d->priv == NULL) {
+ err("no memory for priv in 'struct dvb_usb_device'");
+ kfree(d);
+ return -ENOMEM;
}
- memset(d,0,sizeof(struct dvb_usb_device));
-
- d->udev = udev;
- memcpy(&d->props,props,sizeof(struct dvb_usb_properties));
- d->desc = desc;
- d->owner = owner;
-
- if (d->props.size_of_priv > 0) {
- d->priv = kmalloc(d->props.size_of_priv,GFP_KERNEL);
- if (d->priv == NULL) {
- err("no memory for priv in 'struct dvb_usb_device'");
- kfree(d);
- return -ENOMEM;
- }
- memset(d->priv,0,d->props.size_of_priv);
- }
+ }
- usb_set_intfdata(intf, d);
+ usb_set_intfdata(intf, d);
- if (du != NULL)
- *du = d;
+ if (du != NULL)
+ *du = d;
- ret = dvb_usb_init(d);
- }
+ ret = dvb_usb_init(d);
if (ret == 0)
info("%s successfully initialized and connected.",desc->name);
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
index 36b7048c02d..ee821974dc6 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-urb.c
@@ -175,15 +175,13 @@ static int dvb_usb_allocate_stream_buffers(struct dvb_usb_device *d, int num, un
deb_mem("all in all I will use %lu bytes for streaming\n",num*size);
- if ((d->buf_list = kmalloc(num*sizeof(u8 *), GFP_ATOMIC)) == NULL)
+ if ((d->buf_list = kcalloc(num, sizeof(u8 *), GFP_ATOMIC)) == NULL)
return -ENOMEM;
- if ((d->dma_addr = kmalloc(num*sizeof(dma_addr_t), GFP_ATOMIC)) == NULL) {
+ if ((d->dma_addr = kcalloc(num, sizeof(dma_addr_t), GFP_ATOMIC)) == NULL) {
kfree(d->buf_list);
return -ENOMEM;
}
- memset(d->buf_list,0,num*sizeof(u8 *));
- memset(d->dma_addr,0,num*sizeof(dma_addr_t));
d->state |= DVB_USB_STATE_URB_BUF;
@@ -285,10 +283,9 @@ int dvb_usb_urb_init(struct dvb_usb_device *d)
usb_clear_halt(d->udev,usb_rcvbulkpipe(d->udev,d->props.urb.endpoint));
/* allocate the array for the data transfer URBs */
- d->urb_list = kmalloc(d->props.urb.count * sizeof(struct urb *),GFP_KERNEL);
+ d->urb_list = kzalloc(d->props.urb.count * sizeof(struct urb *),GFP_KERNEL);
if (d->urb_list == NULL)
return -ENOMEM;
- memset(d->urb_list,0,d->props.urb.count * sizeof(struct urb *));
d->state |= DVB_USB_STATE_URB_LIST;
switch (d->props.urb.type) {
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index b4a1a98006c..dd568396e59 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -10,8 +10,8 @@
#include <linux/config.h>
#include <linux/input.h>
-#include <linux/module.h>
#include <linux/usb.h>
+#include <linux/firmware.h>
#include "dvb_frontend.h"
#include "dvb_demux.h"
@@ -94,7 +94,11 @@ struct dvb_usb_device;
* @usb_ctrl: which USB device-side controller is in use. Needed for firmware
* download.
* @firmware: name of the firmware file.
- *
+ * @download_firmware: called to download the firmware when the usb_ctrl is
+ * DEVICE_SPECIFIC.
+ * @no_reconnect: device doesn't do a reconnect after downloading the firmware,
+ so do the warm initialization right after it
+
* @size_of_priv: how many bytes shall be allocated for the private field
* of struct dvb_usb_device.
*
@@ -142,11 +146,14 @@ struct dvb_usb_properties {
int caps;
int pid_filter_count;
-#define CYPRESS_AN2135 0
-#define CYPRESS_AN2235 1
-#define CYPRESS_FX2 2
+#define DEVICE_SPECIFIC 0
+#define CYPRESS_AN2135 1
+#define CYPRESS_AN2235 2
+#define CYPRESS_FX2 3
int usb_ctrl;
- const char *firmware;
+ const char firmware[FIRMWARE_NAME_MAX];
+ int (*download_firmware) (struct usb_device *, const struct firmware *);
+ int no_reconnect;
int size_of_priv;
@@ -326,5 +333,15 @@ extern int dvb_usb_pll_init_i2c(struct dvb_frontend *);
extern int dvb_usb_pll_set(struct dvb_frontend *, struct dvb_frontend_parameters *, u8[]);
extern int dvb_usb_pll_set_i2c(struct dvb_frontend *, struct dvb_frontend_parameters *);
+/* commonly used firmware download types and function */
+struct hexline {
+ u8 len;
+ u32 addr;
+ u8 type;
+ u8 data[255];
+ u8 chk;
+};
+extern int dvb_usb_get_hexline(const struct firmware *, struct hexline *, int *);
+extern int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type);
#endif
diff --git a/drivers/media/dvb/dvb-usb/nova-t-usb2.c b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
index fac48fc7a4a..412039d8dba 100644
--- a/drivers/media/dvb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
@@ -129,10 +129,6 @@ static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
dibusb_read_eeprom_byte(d,i, &b);
mac[5 - (i - 136)] = b;
-
-/* deb_ee("%02x ",b);
- if ((i+1) % 16 == 0)
- deb_ee("\n");*/
}
return 0;
@@ -153,7 +149,7 @@ static struct usb_device_id nova_t_table [] = {
/* 01 */ { USB_DEVICE(USB_VID_HAUPPAUGE, USB_PID_WINTV_NOVA_T_USB2_WARM) },
{ } /* Terminating entry */
};
-MODULE_DEVICE_TABLE (usb, nova_t_table);
+MODULE_DEVICE_TABLE(usb, nova_t_table);
static struct dvb_usb_properties nova_t_properties = {
.caps = DVB_USB_HAS_PID_FILTER | DVB_USB_PID_FILTER_CAN_BE_TURNED_OFF | DVB_USB_IS_AN_I2C_ADAPTER,
@@ -198,6 +194,7 @@ static struct dvb_usb_properties nova_t_properties = {
{ &nova_t_table[0], NULL },
{ &nova_t_table[1], NULL },
},
+ { NULL },
}
};
diff --git a/drivers/media/dvb/dvb-usb/vp702x-fe.c b/drivers/media/dvb/dvb-usb/vp702x-fe.c
index 104b5d016c7..b6d95e1c9c5 100644
--- a/drivers/media/dvb/dvb-usb/vp702x-fe.c
+++ b/drivers/media/dvb/dvb-usb/vp702x-fe.c
@@ -190,7 +190,7 @@ static int vp702x_fe_get_frontend(struct dvb_frontend* fe,
}
static int vp702x_fe_send_diseqc_msg (struct dvb_frontend* fe,
- struct dvb_diseqc_master_cmd *m)
+ struct dvb_diseqc_master_cmd *m)
{
struct vp702x_fe_state *st = fe->demodulator_priv;
u8 cmd[8],ibuf[10];
@@ -281,10 +281,9 @@ static struct dvb_frontend_ops vp702x_fe_ops;
struct dvb_frontend * vp702x_fe_attach(struct dvb_usb_device *d)
{
- struct vp702x_fe_state *s = kmalloc(sizeof(struct vp702x_fe_state), GFP_KERNEL);
+ struct vp702x_fe_state *s = kzalloc(sizeof(struct vp702x_fe_state), GFP_KERNEL);
if (s == NULL)
goto error;
- memset(s,0,sizeof(struct vp702x_fe_state));
s->d = d;
s->fe.ops = &vp702x_fe_ops;
diff --git a/drivers/media/dvb/dvb-usb/vp702x.h b/drivers/media/dvb/dvb-usb/vp702x.h
index 4a3e8c7eca2..a808d48e7bf 100644
--- a/drivers/media/dvb/dvb-usb/vp702x.h
+++ b/drivers/media/dvb/dvb-usb/vp702x.h
@@ -13,47 +13,47 @@ extern int dvb_usb_vp702x_debug;
/* commands are read and written with USB control messages */
/* consecutive read/write operation */
-#define REQUEST_OUT 0xB2
-#define REQUEST_IN 0xB3
+#define REQUEST_OUT 0xB2
+#define REQUEST_IN 0xB3
/* the out-buffer of these consecutive operations contain sub-commands when b[0] = 0
* request: 0xB2; i: 0; v: 0; b[0] = 0, b[1] = subcmd, additional buffer
* the returning buffer looks as follows
* request: 0xB3; i: 0; v: 0; b[0] = 0xB3, additional buffer */
-#define GET_TUNER_STATUS 0x05
+#define GET_TUNER_STATUS 0x05
/* additional in buffer:
* 0 1 2 3 4 5 6 7 8
* N/A N/A 0x05 signal-quality N/A N/A signal-strength lock==0 N/A */
-#define GET_SYSTEM_STRING 0x06
+#define GET_SYSTEM_STRING 0x06
/* additional in buffer:
* 0 1 2 3 4 5 6 7 8
* N/A 'U' 'S' 'B' '7' '0' '2' 'X' N/A */
-#define SET_DISEQC_CMD 0x08
+#define SET_DISEQC_CMD 0x08
/* additional out buffer:
* 0 1 2 3 4
* len X1 X2 X3 X4
* additional in buffer:
* 0 1 2
- * N/A 0 0 b[1] == b[2] == 0 -> success otherwise not */
+ * N/A 0 0 b[1] == b[2] == 0 -> success, failure otherwise */
-#define SET_LNB_POWER 0x09
+#define SET_LNB_POWER 0x09
/* additional out buffer:
* 0 1 2
* 0x00 0xff 1 = on, 0 = off
* additional in buffer:
* 0 1 2
- * N/A 0 0 b[1] == b[2] == 0 -> success otherwise not */
+ * N/A 0 0 b[1] == b[2] == 0 -> success failure otherwise */
-#define GET_MAC_ADDRESS 0x0A
+#define GET_MAC_ADDRESS 0x0A
/* #define GET_MAC_ADDRESS 0x0B */
/* additional in buffer:
* 0 1 2 3 4 5 6 7 8
* N/A N/A 0x0A or 0x0B MAC0 MAC1 MAC2 MAC3 MAC4 MAC5 */
-#define SET_PID_FILTER 0x11
+#define SET_PID_FILTER 0x11
/* additional in buffer:
* 0 1 ... 14 15 16
* PID0_MSB PID0_LSB ... PID7_MSB PID7_LSB PID_active (bits) */
@@ -64,39 +64,38 @@ extern int dvb_usb_vp702x_debug;
* freq0 freq1 divstep srate0 srate1 srate2 flag chksum
*/
-
/* one direction requests */
-#define READ_REMOTE_REQ 0xB4
+#define READ_REMOTE_REQ 0xB4
/* IN i: 0; v: 0; b[0] == request, b[1] == key */
-#define READ_PID_NUMBER_REQ 0xB5
+#define READ_PID_NUMBER_REQ 0xB5
/* IN i: 0; v: 0; b[0] == request, b[1] == 0, b[2] = pid number */
-#define WRITE_EEPROM_REQ 0xB6
+#define WRITE_EEPROM_REQ 0xB6
/* OUT i: offset; v: value to write; no extra buffer */
-#define READ_EEPROM_REQ 0xB7
+#define READ_EEPROM_REQ 0xB7
/* IN i: bufferlen; v: offset; buffer with bufferlen bytes */
-#define READ_STATUS 0xB8
+#define READ_STATUS 0xB8
/* IN i: 0; v: 0; bufferlen 10 */
-#define READ_TUNER_REG_REQ 0xB9
+#define READ_TUNER_REG_REQ 0xB9
/* IN i: 0; v: register; b[0] = value */
-#define READ_FX2_REG_REQ 0xBA
+#define READ_FX2_REG_REQ 0xBA
/* IN i: offset; v: 0; b[0] = value */
-#define WRITE_FX2_REG_REQ 0xBB
+#define WRITE_FX2_REG_REQ 0xBB
/* OUT i: offset; v: value to write; 1 byte extra buffer */
-#define SET_TUNER_POWER_REQ 0xBC
+#define SET_TUNER_POWER_REQ 0xBC
/* IN i: 0 = power off, 1 = power on */
-#define WRITE_TUNER_REG_REQ 0xBD
+#define WRITE_TUNER_REG_REQ 0xBD
/* IN i: register, v: value to write, no extra buffer */
-#define RESET_TUNER 0xBE
+#define RESET_TUNER 0xBE
/* IN i: 0, v: 0, no extra buffer */
extern struct dvb_frontend * vp702x_fe_attach(struct dvb_usb_device *d);
diff --git a/drivers/media/dvb/dvb-usb/vp7045-fe.c b/drivers/media/dvb/dvb-usb/vp7045-fe.c
index 83f1de1e7e5..5242cca5db4 100644
--- a/drivers/media/dvb/dvb-usb/vp7045-fe.c
+++ b/drivers/media/dvb/dvb-usb/vp7045-fe.c
@@ -145,10 +145,9 @@ static struct dvb_frontend_ops vp7045_fe_ops;
struct dvb_frontend * vp7045_fe_attach(struct dvb_usb_device *d)
{
- struct vp7045_fe_state *s = kmalloc(sizeof(struct vp7045_fe_state), GFP_KERNEL);
+ struct vp7045_fe_state *s = kzalloc(sizeof(struct vp7045_fe_state), GFP_KERNEL);
if (s == NULL)
goto error;
- memset(s,0,sizeof(struct vp7045_fe_state));
s->d = d;
s->fe.ops = &vp7045_fe_ops;
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 3835235b68d..028204956bb 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -247,7 +247,7 @@ static struct dvb_usb_properties vp7045_properties = {
.cold_ids = { &vp7045_usb_table[2], NULL },
.warm_ids = { &vp7045_usb_table[3], NULL },
},
- { NULL },
+ { 0 },
}
};
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 8e269e1c1f9..db3a8b40031 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -16,6 +16,12 @@ config DVB_CX24110
help
A DVB-S tuner module. Say Y when you want to support this frontend.
+config DVB_CX24123
+ tristate "Conexant CX24123 based"
+ depends on DVB_CORE
+ help
+ A DVB-S tuner module. Say Y when you want to support this frontend.
+
config DVB_TDA8083
tristate "Philips TDA8083 based"
depends on DVB_CORE
@@ -50,18 +56,19 @@ comment "DVB-T (terrestrial) frontends"
depends on DVB_CORE
config DVB_SP8870
- tristate "Spase sp8870 based"
+ tristate "Spase sp8870 based"
depends on DVB_CORE
select FW_LOADER
help
- A DVB-T tuner module. Say Y when you want to support this frontend.
+ A DVB-T tuner module. Say Y when you want to support this frontend.
This driver needs external firmware. Please use the command
"<kerneldir>/Documentation/dvb/get_dvb_firmware sp8870" to
- download/extract it, and then copy it to /usr/lib/hotplug/firmware.
+ download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
config DVB_SP887X
- tristate "Spase sp887x based"
+ tristate "Spase sp887x based"
depends on DVB_CORE
select FW_LOADER
help
@@ -69,7 +76,8 @@ config DVB_SP887X
This driver needs external firmware. Please use the command
"<kerneldir>/Documentation/dvb/get_dvb_firmware sp887x" to
- download/extract it, and then copy it to /usr/lib/hotplug/firmware.
+ download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
config DVB_CX22700
tristate "Conexant CX22700 based"
@@ -78,10 +86,10 @@ config DVB_CX22700
A DVB-T tuner module. Say Y when you want to support this frontend.
config DVB_CX22702
- tristate "Conexant cx22702 demodulator (OFDM)"
- depends on DVB_CORE
- help
- A DVB-T tuner module. Say Y when you want to support this frontend.
+ tristate "Conexant cx22702 demodulator (OFDM)"
+ depends on DVB_CORE
+ help
+ A DVB-T tuner module. Say Y when you want to support this frontend.
config DVB_L64781
tristate "LSI L64781"
@@ -98,8 +106,9 @@ config DVB_TDA1004X
This driver needs external firmware. Please use the commands
"<kerneldir>/Documentation/dvb/get_dvb_firmware tda10045",
- "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10046" to
- download/extract them, and then copy them to /usr/lib/hotplug/firmware.
+ "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10046" to
+ download/extract them, and then copy them to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
config DVB_NXT6000
tristate "NxtWave Communications NXT6000 based"
@@ -140,13 +149,13 @@ config DVB_VES1820
tristate "VLSI VES1820 based"
depends on DVB_CORE
help
- A DVB-C tuner module. Say Y when you want to support this frontend.
+ A DVB-C tuner module. Say Y when you want to support this frontend.
config DVB_TDA10021
tristate "Philips TDA10021 based"
depends on DVB_CORE
help
- A DVB-C tuner module. Say Y when you want to support this frontend.
+ A DVB-C tuner module. Say Y when you want to support this frontend.
config DVB_STV0297
tristate "ST STV0297 based"
@@ -164,6 +173,11 @@ config DVB_NXT2002
help
An ATSC 8VSB tuner module. Say Y when you want to support this frontend.
+ This driver needs external firmware. Please use the command
+ "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2002" to
+ download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
+
config DVB_NXT200X
tristate "Nextwave NXT2002/NXT2004 based"
depends on DVB_CORE
@@ -172,6 +186,12 @@ config DVB_NXT200X
An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
to support this frontend.
+ This driver needs external firmware. Please use the commands
+ "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2002" and
+ "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2004" to
+ download/extract them, and then copy them to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
+
config DVB_OR51211
tristate "or51211 based (pcHDTV HD2000 card)"
depends on DVB_CORE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index a98760fe08a..615ec830e1c 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_DVB_OR51132) += or51132.o
obj-$(CONFIG_DVB_BCM3510) += bcm3510.o
obj-$(CONFIG_DVB_S5H1420) += s5h1420.o
obj-$(CONFIG_DVB_LGDT330X) += lgdt330x.o
+obj-$(CONFIG_DVB_CX24123) += cx24123.o
diff --git a/drivers/media/dvb/frontends/bcm3510.c b/drivers/media/dvb/frontends/bcm3510.c
index 8ceb9a33c7a..caaee893ca7 100644
--- a/drivers/media/dvb/frontends/bcm3510.c
+++ b/drivers/media/dvb/frontends/bcm3510.c
@@ -255,7 +255,7 @@ static int bcm3510_bert_reset(struct bcm3510_state *st)
bcm3510_register_value b;
int ret;
- if ((ret < bcm3510_readB(st,0xfa,&b)) < 0)
+ if ((ret = bcm3510_readB(st,0xfa,&b)) < 0)
return ret;
b.BERCTL_fa.RESYNC = 0; bcm3510_writeB(st,0xfa,b);
@@ -623,13 +623,13 @@ static int bcm3510_download_firmware(struct dvb_frontend* fe)
err("could not load firmware (%s): %d",BCM3510_DEFAULT_FIRMWARE,ret);
return ret;
}
- deb_info("got firmware: %d\n",fw->size);
+ deb_info("got firmware: %zd\n",fw->size);
b = fw->data;
for (i = 0; i < fw->size;) {
addr = le16_to_cpu( *( (u16 *)&b[i] ) );
len = le16_to_cpu( *( (u16 *)&b[i+2] ) );
- deb_info("firmware chunk, addr: 0x%04x, len: 0x%04x, total length: 0x%04x\n",addr,len,fw->size);
+ deb_info("firmware chunk, addr: 0x%04x, len: 0x%04x, total length: 0x%04zx\n",addr,len,fw->size);
if ((ret = bcm3510_write_ram(st,addr,&b[i+4],len)) < 0) {
err("firmware download failed: %d\n",ret);
return ret;
@@ -782,10 +782,9 @@ struct dvb_frontend* bcm3510_attach(const struct bcm3510_config *config,
bcm3510_register_value v;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct bcm3510_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct bcm3510_state), GFP_KERNEL);
if (state == NULL)
goto error;
- memset(state,0,sizeof(struct bcm3510_state));
/* setup the state */
diff --git a/drivers/media/dvb/frontends/cx22702.c b/drivers/media/dvb/frontends/cx22702.c
index 5de0e6d350b..0fc899f81c5 100644
--- a/drivers/media/dvb/frontends/cx22702.c
+++ b/drivers/media/dvb/frontends/cx22702.c
@@ -195,6 +195,16 @@ static int cx22702_get_tps (struct cx22702_state *state, struct dvb_ofdm_paramet
return 0;
}
+static int cx22702_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
+{
+ struct cx22702_state* state = fe->demodulator_priv;
+ dprintk ("%s(%d)\n", __FUNCTION__, enable);
+ if (enable)
+ return cx22702_writereg (state, 0x0D, cx22702_readreg(state, 0x0D) & 0xfe);
+ else
+ return cx22702_writereg (state, 0x0D, cx22702_readreg(state, 0x0D) | 1);
+}
+
/* Talk to the demod, set the FEC, GUARD, QAM settings etc */
static int cx22702_set_tps (struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
{
@@ -202,7 +212,7 @@ static int cx22702_set_tps (struct dvb_frontend* fe, struct dvb_frontend_paramet
struct cx22702_state* state = fe->demodulator_priv;
/* set PLL */
- cx22702_writereg (state, 0x0D, cx22702_readreg(state,0x0D) &0xfe);
+ cx22702_i2c_gate_ctrl(fe, 1);
if (state->config->pll_set) {
state->config->pll_set(fe, p);
} else if (state->config->pll_desc) {
@@ -216,7 +226,7 @@ static int cx22702_set_tps (struct dvb_frontend* fe, struct dvb_frontend_paramet
} else {
BUG();
}
- cx22702_writereg (state, 0x0D, cx22702_readreg(state,0x0D) | 1);
+ cx22702_i2c_gate_ctrl(fe, 0);
/* set inversion */
cx22702_set_inversion (state, p->inversion);
@@ -349,11 +359,10 @@ static int cx22702_init (struct dvb_frontend* fe)
cx22702_writereg (state, 0xf8, (state->config->output_mode << 1) & 0x02);
/* init PLL */
- if (state->config->pll_init) {
- cx22702_writereg (state, 0x0D, cx22702_readreg(state,0x0D) & 0xfe);
+ if (state->config->pll_init)
state->config->pll_init(fe);
- cx22702_writereg (state, 0x0D, cx22702_readreg(state,0x0D) | 1);
- }
+
+ cx22702_i2c_gate_ctrl(fe, 0);
return 0;
}
@@ -531,6 +540,7 @@ static struct dvb_frontend_ops cx22702_ops = {
.read_signal_strength = cx22702_read_signal_strength,
.read_snr = cx22702_read_snr,
.read_ucblocks = cx22702_read_ucblocks,
+ .i2c_gate_ctrl = cx22702_i2c_gate_ctrl,
};
module_param(debug, int, 0644);
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c
index 0c4db80ec33..d15d32c51dc 100644
--- a/drivers/media/dvb/frontends/cx24110.c
+++ b/drivers/media/dvb/frontends/cx24110.c
@@ -27,7 +27,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
-#include <linux/jiffies.h>
#include "dvb_frontend.h"
#include "cx24110.h"
@@ -56,7 +55,7 @@ static int debug;
static struct {u8 reg; u8 data;} cx24110_regdata[]=
/* Comments beginning with @ denote this value should
- be the default */
+ be the default */
{{0x09,0x01}, /* SoftResetAll */
{0x09,0x00}, /* release reset */
{0x01,0xe8}, /* MSB of code rate 27.5MS/s */
@@ -67,26 +66,26 @@ static struct {u8 reg; u8 data;} cx24110_regdata[]=
{0x07,0x01}, /* @ Fclk, i.e. sampling clock, 60MHz */
{0x0a,0x00}, /* @ partial chip disables, do not set */
{0x0b,0x01}, /* set output clock in gapped mode, start signal low
- active for first byte */
+ active for first byte */
{0x0c,0x11}, /* no parity bytes, large hold time, serial data out */
{0x0d,0x6f}, /* @ RS Sync/Unsync thresholds */
{0x10,0x40}, /* chip doc is misleading here: write bit 6 as 1
- to avoid starting the BER counter. Reset the
- CRC test bit. Finite counting selected */
+ to avoid starting the BER counter. Reset the
+ CRC test bit. Finite counting selected */
{0x15,0xff}, /* @ size of the limited time window for RS BER
- estimation. It is <value>*256 RS blocks, this
- gives approx. 2.6 sec at 27.5MS/s, rate 3/4 */
+ estimation. It is <value>*256 RS blocks, this
+ gives approx. 2.6 sec at 27.5MS/s, rate 3/4 */
{0x16,0x00}, /* @ enable all RS output ports */
{0x17,0x04}, /* @ time window allowed for the RS to sync */
{0x18,0xae}, /* @ allow all standard DVB code rates to be scanned
- for automatically */
+ for automatically */
/* leave the current code rate and normalization
- registers as they are after reset... */
+ registers as they are after reset... */
{0x21,0x10}, /* @ during AutoAcq, search each viterbi setting
- only once */
+ only once */
{0x23,0x18}, /* @ size of the limited time window for Viterbi BER
- estimation. It is <value>*65536 channel bits, i.e.
- approx. 38ms at 27.5MS/s, rate 3/4 */
+ estimation. It is <value>*65536 channel bits, i.e.
+ approx. 38ms at 27.5MS/s, rate 3/4 */
{0x24,0x24}, /* do not trigger Viterbi CRC test. Finite count window */
/* leave front-end AGC parameters at default values */
/* leave decimation AGC parameters at default values */
diff --git a/drivers/media/dvb/frontends/cx24123.c b/drivers/media/dvb/frontends/cx24123.c
new file mode 100644
index 00000000000..d661c6f9cbe
--- /dev/null
+++ b/drivers/media/dvb/frontends/cx24123.c
@@ -0,0 +1,889 @@
+/*
+ Conexant cx24123/cx24109 - DVB QPSK Satellite demod/tuner driver
+
+ Copyright (C) 2005 Steven Toth <stoth@hauppauge.com>
+
+ Support for KWorld DVB-S 100 by Vadim Catana <skystar@moldova.cc>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+
+#include "dvb_frontend.h"
+#include "cx24123.h"
+
+static int debug;
+#define dprintk(args...) \
+ do { \
+ if (debug) printk (KERN_DEBUG "cx24123: " args); \
+ } while (0)
+
+struct cx24123_state
+{
+ struct i2c_adapter* i2c;
+ struct dvb_frontend_ops ops;
+ const struct cx24123_config* config;
+
+ struct dvb_frontend frontend;
+
+ u32 lastber;
+ u16 snr;
+ u8 lnbreg;
+
+ /* Some PLL specifics for tuning */
+ u32 VCAarg;
+ u32 VGAarg;
+ u32 bandselectarg;
+ u32 pllarg;
+
+ /* The Demod/Tuner can't easily provide these, we cache them */
+ u32 currentfreq;
+ u32 currentsymbolrate;
+};
+
+/* Various tuner defaults need to be established for a given symbol rate Sps */
+static struct
+{
+ u32 symbolrate_low;
+ u32 symbolrate_high;
+ u32 VCAslope;
+ u32 VCAoffset;
+ u32 VGA1offset;
+ u32 VGA2offset;
+ u32 VCAprogdata;
+ u32 VGAprogdata;
+} cx24123_AGC_vals[] =
+{
+ {
+ .symbolrate_low = 1000000,
+ .symbolrate_high = 4999999,
+ .VCAslope = 0x07,
+ .VCAoffset = 0x0f,
+ .VGA1offset = 0x1f8,
+ .VGA2offset = 0x1f8,
+ .VGAprogdata = (2 << 18) | (0x1f8 << 9) | 0x1f8,
+ .VCAprogdata = (4 << 18) | (0x07 << 9) | 0x07,
+ },
+ {
+ .symbolrate_low = 5000000,
+ .symbolrate_high = 14999999,
+ .VCAslope = 0x1f,
+ .VCAoffset = 0x1f,
+ .VGA1offset = 0x1e0,
+ .VGA2offset = 0x180,
+ .VGAprogdata = (2 << 18) | (0x180 << 9) | 0x1e0,
+ .VCAprogdata = (4 << 18) | (0x07 << 9) | 0x1f,
+ },
+ {
+ .symbolrate_low = 15000000,
+ .symbolrate_high = 45000000,
+ .VCAslope = 0x3f,
+ .VCAoffset = 0x3f,
+ .VGA1offset = 0x180,
+ .VGA2offset = 0x100,
+ .VGAprogdata = (2 << 18) | (0x100 << 9) | 0x180,
+ .VCAprogdata = (4 << 18) | (0x07 << 9) | 0x3f,
+ },
+};
+
+/*
+ * Various tuner defaults need to be established for a given frequency kHz.
+ * fixme: The bounds on the bands do not match the doc in real life.
+ * fixme: Some of them have been moved, other might need adjustment.
+ */
+static struct
+{
+ u32 freq_low;
+ u32 freq_high;
+ u32 bandselect;
+ u32 VCOdivider;
+ u32 VCOnumber;
+ u32 progdata;
+} cx24123_bandselect_vals[] =
+{
+ {
+ .freq_low = 950000,
+ .freq_high = 1018999,
+ .bandselect = 0x40,
+ .VCOdivider = 4,
+ .VCOnumber = 7,
+ .progdata = (0 << 18) | (0 << 9) | 0x40,
+ },
+ {
+ .freq_low = 1019000,
+ .freq_high = 1074999,
+ .bandselect = 0x80,
+ .VCOdivider = 4,
+ .VCOnumber = 8,
+ .progdata = (0 << 18) | (0 << 9) | 0x80,
+ },
+ {
+ .freq_low = 1075000,
+ .freq_high = 1227999,
+ .bandselect = 0x01,
+ .VCOdivider = 2,
+ .VCOnumber = 1,
+ .progdata = (0 << 18) | (1 << 9) | 0x01,
+ },
+ {
+ .freq_low = 1228000,
+ .freq_high = 1349999,
+ .bandselect = 0x02,
+ .VCOdivider = 2,
+ .VCOnumber = 2,
+ .progdata = (0 << 18) | (1 << 9) | 0x02,
+ },
+ {
+ .freq_low = 1350000,
+ .freq_high = 1481999,
+ .bandselect = 0x04,
+ .VCOdivider = 2,
+ .VCOnumber = 3,
+ .progdata = (0 << 18) | (1 << 9) | 0x04,
+ },
+ {
+ .freq_low = 1482000,
+ .freq_high = 1595999,
+ .bandselect = 0x08,
+ .VCOdivider = 2,
+ .VCOnumber = 4,
+ .progdata = (0 << 18) | (1 << 9) | 0x08,
+ },
+ {
+ .freq_low = 1596000,
+ .freq_high = 1717999,
+ .bandselect = 0x10,
+ .VCOdivider = 2,
+ .VCOnumber = 5,
+ .progdata = (0 << 18) | (1 << 9) | 0x10,
+ },
+ {
+ .freq_low = 1718000,
+ .freq_high = 1855999,
+ .bandselect = 0x20,
+ .VCOdivider = 2,
+ .VCOnumber = 6,
+ .progdata = (0 << 18) | (1 << 9) | 0x20,
+ },
+ {
+ .freq_low = 1856000,
+ .freq_high = 2035999,
+ .bandselect = 0x40,
+ .VCOdivider = 2,
+ .VCOnumber = 7,
+ .progdata = (0 << 18) | (1 << 9) | 0x40,
+ },
+ {
+ .freq_low = 2036000,
+ .freq_high = 2149999,
+ .bandselect = 0x80,
+ .VCOdivider = 2,
+ .VCOnumber = 8,
+ .progdata = (0 << 18) | (1 << 9) | 0x80,
+ },
+};
+
+static struct {
+ u8 reg;
+ u8 data;
+} cx24123_regdata[] =
+{
+ {0x00, 0x03}, /* Reset system */
+ {0x00, 0x00}, /* Clear reset */
+ {0x01, 0x3b}, /* Apply sensible defaults, from an i2c sniffer */
+ {0x03, 0x07},
+ {0x04, 0x10},
+ {0x05, 0x04},
+ {0x06, 0x31},
+ {0x0d, 0x02},
+ {0x0e, 0x03},
+ {0x0f, 0xfe},
+ {0x10, 0x01},
+ {0x14, 0x01},
+ {0x15, 0x98},
+ {0x16, 0x00},
+ {0x17, 0x01},
+ {0x1b, 0x05},
+ {0x1c, 0x80},
+ {0x1d, 0x00},
+ {0x1e, 0x00},
+ {0x20, 0x41},
+ {0x21, 0x15},
+ {0x27, 0x14},
+ {0x28, 0x46},
+ {0x29, 0x00},
+ {0x2a, 0xb0},
+ {0x2b, 0x73},
+ {0x2c, 0x00},
+ {0x2d, 0x00},
+ {0x2e, 0x00},
+ {0x2f, 0x00},
+ {0x30, 0x00},
+ {0x31, 0x00},
+ {0x32, 0x8c},
+ {0x33, 0x00},
+ {0x34, 0x00},
+ {0x35, 0x03},
+ {0x36, 0x02},
+ {0x37, 0x3a},
+ {0x3a, 0x00}, /* Enable AGC accumulator */
+ {0x44, 0x00},
+ {0x45, 0x00},
+ {0x46, 0x05},
+ {0x56, 0x41},
+ {0x57, 0xff},
+ {0x67, 0x83},
+};
+
+static int cx24123_writereg(struct cx24123_state* state, int reg, int data)
+{
+ u8 buf[] = { reg, data };
+ struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 };
+ int err;
+
+ if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
+ printk("%s: writereg error(err == %i, reg == 0x%02x,"
+ " data == 0x%02x)\n", __FUNCTION__, err, reg, data);
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int cx24123_writelnbreg(struct cx24123_state* state, int reg, int data)
+{
+ u8 buf[] = { reg, data };
+ /* fixme: put the intersil addr int the config */
+ struct i2c_msg msg = { .addr = 0x08, .flags = 0, .buf = buf, .len = 2 };
+ int err;
+
+ if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
+ printk("%s: writelnbreg error (err == %i, reg == 0x%02x,"
+ " data == 0x%02x)\n", __FUNCTION__, err, reg, data);
+ return -EREMOTEIO;
+ }
+
+ /* cache the write, no way to read back */
+ state->lnbreg = data;
+
+ return 0;
+}
+
+static int cx24123_readreg(struct cx24123_state* state, u8 reg)
+{
+ int ret;
+ u8 b0[] = { reg };
+ u8 b1[] = { 0 };
+ struct i2c_msg msg[] = {
+ { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 },
+ { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 }
+ };
+
+ ret = i2c_transfer(state->i2c, msg, 2);
+
+ if (ret != 2) {
+ printk("%s: reg=0x%x (error=%d)\n", __FUNCTION__, reg, ret);
+ return ret;
+ }
+
+ return b1[0];
+}
+
+static int cx24123_readlnbreg(struct cx24123_state* state, u8 reg)
+{
+ return state->lnbreg;
+}
+
+static int cx24123_set_inversion(struct cx24123_state* state, fe_spectral_inversion_t inversion)
+{
+ switch (inversion) {
+ case INVERSION_OFF:
+ cx24123_writereg(state, 0x0e, cx24123_readreg(state, 0x0e) & 0x7f);
+ cx24123_writereg(state, 0x10, cx24123_readreg(state, 0x10) | 0x80);
+ break;
+ case INVERSION_ON:
+ cx24123_writereg(state, 0x0e, cx24123_readreg(state, 0x0e) | 0x80);
+ cx24123_writereg(state, 0x10, cx24123_readreg(state, 0x10) | 0x80);
+ break;
+ case INVERSION_AUTO:
+ cx24123_writereg(state, 0x10, cx24123_readreg(state, 0x10) & 0x7f);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cx24123_get_inversion(struct cx24123_state* state, fe_spectral_inversion_t *inversion)
+{
+ u8 val;
+
+ val = cx24123_readreg(state, 0x1b) >> 7;
+
+ if (val == 0)
+ *inversion = INVERSION_OFF;
+ else
+ *inversion = INVERSION_ON;
+
+ return 0;
+}
+
+static int cx24123_set_fec(struct cx24123_state* state, fe_code_rate_t fec)
+{
+ if ( (fec < FEC_NONE) || (fec > FEC_AUTO) )
+ fec = FEC_AUTO;
+
+ /* Hardware has 5/11 and 3/5 but are never unused */
+ switch (fec) {
+ case FEC_NONE:
+ return cx24123_writereg(state, 0x0f, 0x01);
+ case FEC_1_2:
+ return cx24123_writereg(state, 0x0f, 0x02);
+ case FEC_2_3:
+ return cx24123_writereg(state, 0x0f, 0x04);
+ case FEC_3_4:
+ return cx24123_writereg(state, 0x0f, 0x08);
+ case FEC_5_6:
+ return cx24123_writereg(state, 0x0f, 0x20);
+ case FEC_7_8:
+ return cx24123_writereg(state, 0x0f, 0x80);
+ case FEC_AUTO:
+ return cx24123_writereg(state, 0x0f, 0xae);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int cx24123_get_fec(struct cx24123_state* state, fe_code_rate_t *fec)
+{
+ int ret;
+ u8 val;
+
+ ret = cx24123_readreg (state, 0x1b);
+ if (ret < 0)
+ return ret;
+ val = ret & 0x07;
+ switch (val) {
+ case 1:
+ *fec = FEC_1_2;
+ break;
+ case 3:
+ *fec = FEC_2_3;
+ break;
+ case 4:
+ *fec = FEC_3_4;
+ break;
+ case 5:
+ *fec = FEC_4_5;
+ break;
+ case 6:
+ *fec = FEC_5_6;
+ break;
+ case 7:
+ *fec = FEC_7_8;
+ break;
+ case 2: /* *fec = FEC_3_5; break; */
+ case 0: /* *fec = FEC_5_11; break; */
+ *fec = FEC_AUTO;
+ break;
+ default:
+ *fec = FEC_NONE; // can't happen
+ }
+
+ return 0;
+}
+
+/* fixme: Symbol rates < 3MSps may not work because of precision loss */
+static int cx24123_set_symbolrate(struct cx24123_state* state, u32 srate)
+{
+ u32 val;
+
+ val = (srate / 1185) * 100;
+
+ /* Compensate for scaling up, by removing 17 symbols per 1Msps */
+ val = val - (17 * (srate / 1000000));
+
+ cx24123_writereg(state, 0x08, (val >> 16) & 0xff );
+ cx24123_writereg(state, 0x09, (val >> 8) & 0xff );
+ cx24123_writereg(state, 0x0a, (val ) & 0xff );
+
+ return 0;
+}
+
+/*
+ * Based on the required frequency and symbolrate, the tuner AGC has to be configured
+ * and the correct band selected. Calculate those values
+ */
+static int cx24123_pll_calculate(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+ u32 ndiv = 0, adiv = 0, vco_div = 0;
+ int i = 0;
+
+ /* Defaults for low freq, low rate */
+ state->VCAarg = cx24123_AGC_vals[0].VCAprogdata;
+ state->VGAarg = cx24123_AGC_vals[0].VGAprogdata;
+ state->bandselectarg = cx24123_bandselect_vals[0].progdata;
+ vco_div = cx24123_bandselect_vals[0].VCOdivider;
+
+ /* For the given symbolerate, determine the VCA and VGA programming bits */
+ for (i = 0; i < sizeof(cx24123_AGC_vals) / sizeof(cx24123_AGC_vals[0]); i++)
+ {
+ if ((cx24123_AGC_vals[i].symbolrate_low <= p->u.qpsk.symbol_rate) &&
+ (cx24123_AGC_vals[i].symbolrate_high >= p->u.qpsk.symbol_rate) ) {
+ state->VCAarg = cx24123_AGC_vals[i].VCAprogdata;
+ state->VGAarg = cx24123_AGC_vals[i].VGAprogdata;
+ }
+ }
+
+ /* For the given frequency, determine the bandselect programming bits */
+ for (i = 0; i < sizeof(cx24123_bandselect_vals) / sizeof(cx24123_bandselect_vals[0]); i++)
+ {
+ if ((cx24123_bandselect_vals[i].freq_low <= p->frequency) &&
+ (cx24123_bandselect_vals[i].freq_high >= p->frequency) ) {
+ state->bandselectarg = cx24123_bandselect_vals[i].progdata;
+ vco_div = cx24123_bandselect_vals[i].VCOdivider;
+ }
+ }
+
+ /* Determine the N/A dividers for the requested lband freq (in kHz). */
+ /* Note: 10111 (kHz) is the Crystal Freq and divider of 10. */
+ ndiv = ( ((p->frequency * vco_div) / (10111 / 10) / 2) / 32) & 0x1ff;
+ adiv = ( ((p->frequency * vco_div) / (10111 / 10) / 2) % 32) & 0x1f;
+
+ if (adiv == 0)
+ adiv++;
+
+ /* determine the correct pll frequency values. */
+ /* Command 11, refdiv 11, cpump polarity 1, cpump current 3mA 10. */
+ state->pllarg = (3 << 19) | (3 << 17) | (1 << 16) | (2 << 14);
+ state->pllarg |= (ndiv << 5) | adiv;
+
+ return 0;
+}
+
+/*
+ * Tuner data is 21 bits long, must be left-aligned in data.
+ * Tuner cx24109 is written through a dedicated 3wire interface on the demod chip.
+ */
+static int cx24123_pll_writereg(struct dvb_frontend* fe, struct dvb_frontend_parameters *p, u32 data)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+ unsigned long timeout;
+
+ /* align the 21 bytes into to bit23 boundary */
+ data = data << 3;
+
+ /* Reset the demod pll word length to 0x15 bits */
+ cx24123_writereg(state, 0x21, 0x15);
+
+ /* write the msb 8 bits, wait for the send to be completed */
+ timeout = jiffies + msecs_to_jiffies(40);
+ cx24123_writereg(state, 0x22, (data >> 16) & 0xff);
+ while ((cx24123_readreg(state, 0x20) & 0x40) == 0) {
+ if (time_after(jiffies, timeout)) {
+ printk("%s: demodulator is not responding, possibly hung, aborting.\n", __FUNCTION__);
+ return -EREMOTEIO;
+ }
+ msleep(10);
+ }
+
+ /* send another 8 bytes, wait for the send to be completed */
+ timeout = jiffies + msecs_to_jiffies(40);
+ cx24123_writereg(state, 0x22, (data>>8) & 0xff );
+ while ((cx24123_readreg(state, 0x20) & 0x40) == 0) {
+ if (time_after(jiffies, timeout)) {
+ printk("%s: demodulator is not responding, possibly hung, aborting.\n", __FUNCTION__);
+ return -EREMOTEIO;
+ }
+ msleep(10);
+ }
+
+ /* send the lower 5 bits of this byte, padded with 3 LBB, wait for the send to be completed */
+ timeout = jiffies + msecs_to_jiffies(40);
+ cx24123_writereg(state, 0x22, (data) & 0xff );
+ while ((cx24123_readreg(state, 0x20) & 0x80)) {
+ if (time_after(jiffies, timeout)) {
+ printk("%s: demodulator is not responding, possibly hung, aborting.\n", __FUNCTION__);
+ return -EREMOTEIO;
+ }
+ msleep(10);
+ }
+
+ /* Trigger the demod to configure the tuner */
+ cx24123_writereg(state, 0x20, cx24123_readreg(state, 0x20) | 2);
+ cx24123_writereg(state, 0x20, cx24123_readreg(state, 0x20) & 0xfd);
+
+ return 0;
+}
+
+static int cx24123_pll_tune(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+
+ if (cx24123_pll_calculate(fe, p) != 0) {
+ printk("%s: cx24123_pll_calcutate failed\n",__FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Write the new VCO/VGA */
+ cx24123_pll_writereg(fe, p, state->VCAarg);
+ cx24123_pll_writereg(fe, p, state->VGAarg);
+
+ /* Write the new bandselect and pll args */
+ cx24123_pll_writereg(fe, p, state->bandselectarg);
+ cx24123_pll_writereg(fe, p, state->pllarg);
+
+ return 0;
+}
+
+static int cx24123_initfe(struct dvb_frontend* fe)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+ int i;
+
+ /* Configure the demod to a good set of defaults */
+ for (i = 0; i < sizeof(cx24123_regdata) / sizeof(cx24123_regdata[0]); i++)
+ cx24123_writereg(state, cx24123_regdata[i].reg, cx24123_regdata[i].data);
+
+ if (state->config->pll_init)
+ state->config->pll_init(fe);
+
+ /* Configure the LNB for 14V */
+ if (state->config->use_isl6421)
+ cx24123_writelnbreg(state, 0x0, 0x2a);
+
+ return 0;
+}
+
+static int cx24123_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+ u8 val;
+
+ switch (state->config->use_isl6421) {
+
+ case 1:
+
+ val = cx24123_readlnbreg(state, 0x0);
+
+ switch (voltage) {
+ case SEC_VOLTAGE_13:
+ return cx24123_writelnbreg(state, 0x0, val & 0x32); /* V 13v */
+ case SEC_VOLTAGE_18:
+ return cx24123_writelnbreg(state, 0x0, val | 0x04); /* H 18v */
+ case SEC_VOLTAGE_OFF:
+ return cx24123_writelnbreg(state, 0x0, val & 0x30);
+ default:
+ return -EINVAL;
+ };
+
+ case 0:
+
+ val = cx24123_readreg(state, 0x29);
+
+ switch (voltage) {
+ case SEC_VOLTAGE_13:
+ dprintk("%s: setting voltage 13V\n", __FUNCTION__);
+ if (state->config->enable_lnb_voltage)
+ state->config->enable_lnb_voltage(fe, 1);
+ return cx24123_writereg(state, 0x29, val | 0x80);
+ case SEC_VOLTAGE_18:
+ dprintk("%s: setting voltage 18V\n", __FUNCTION__);
+ if (state->config->enable_lnb_voltage)
+ state->config->enable_lnb_voltage(fe, 1);
+ return cx24123_writereg(state, 0x29, val & 0x7f);
+ case SEC_VOLTAGE_OFF:
+ dprintk("%s: setting voltage off\n", __FUNCTION__);
+ if (state->config->enable_lnb_voltage)
+ state->config->enable_lnb_voltage(fe, 0);
+ return 0;
+ default:
+ return -EINVAL;
+ };
+ }
+
+ return 0;
+}
+
+static int cx24123_send_diseqc_msg(struct dvb_frontend* fe,
+ struct dvb_diseqc_master_cmd *cmd)
+{
+ /* fixme: Implement diseqc */
+ printk("%s: No support yet\n",__FUNCTION__);
+
+ return -ENOTSUPP;
+}
+
+static int cx24123_read_status(struct dvb_frontend* fe, fe_status_t* status)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+
+ int sync = cx24123_readreg(state, 0x14);
+ int lock = cx24123_readreg(state, 0x20);
+
+ *status = 0;
+ if (lock & 0x01)
+ *status |= FE_HAS_CARRIER | FE_HAS_SIGNAL;
+ if (sync & 0x04)
+ *status |= FE_HAS_VITERBI;
+ if (sync & 0x08)
+ *status |= FE_HAS_CARRIER;
+ if (sync & 0x80)
+ *status |= FE_HAS_SYNC | FE_HAS_LOCK;
+
+ return 0;
+}
+
+/*
+ * Configured to return the measurement of errors in blocks, because no UCBLOCKS value
+ * is available, so this value doubles up to satisfy both measurements
+ */
+static int cx24123_read_ber(struct dvb_frontend* fe, u32* ber)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+
+ state->lastber =
+ ((cx24123_readreg(state, 0x1c) & 0x3f) << 16) |
+ (cx24123_readreg(state, 0x1d) << 8 |
+ cx24123_readreg(state, 0x1e));
+
+ /* Do the signal quality processing here, it's derived from the BER. */
+ /* Scale the BER from a 24bit to a SNR 16 bit where higher = better */
+ if (state->lastber < 5000)
+ state->snr = 655*100;
+ else if ( (state->lastber >= 5000) && (state->lastber < 55000) )
+ state->snr = 655*90;
+ else if ( (state->lastber >= 55000) && (state->lastber < 150000) )
+ state->snr = 655*80;
+ else if ( (state->lastber >= 150000) && (state->lastber < 250000) )
+ state->snr = 655*70;
+ else if ( (state->lastber >= 250000) && (state->lastber < 450000) )
+ state->snr = 655*65;
+ else
+ state->snr = 0;
+
+ *ber = state->lastber;
+
+ return 0;
+}
+
+static int cx24123_read_signal_strength(struct dvb_frontend* fe, u16* signal_strength)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+ *signal_strength = cx24123_readreg(state, 0x3b) << 8; /* larger = better */
+
+ return 0;
+}
+
+static int cx24123_read_snr(struct dvb_frontend* fe, u16* snr)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+ *snr = state->snr;
+
+ return 0;
+}
+
+static int cx24123_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+ *ucblocks = state->lastber;
+
+ return 0;
+}
+
+static int cx24123_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+
+ if (state->config->set_ts_params)
+ state->config->set_ts_params(fe, 0);
+
+ state->currentfreq=p->frequency;
+ state->currentsymbolrate = p->u.qpsk.symbol_rate;
+
+ cx24123_set_inversion(state, p->inversion);
+ cx24123_set_fec(state, p->u.qpsk.fec_inner);
+ cx24123_set_symbolrate(state, p->u.qpsk.symbol_rate);
+ cx24123_pll_tune(fe, p);
+
+ /* Enable automatic aquisition and reset cycle */
+ cx24123_writereg(state, 0x03, (cx24123_readreg(state, 0x03) | 0x07));
+ cx24123_writereg(state, 0x00, 0x10);
+ cx24123_writereg(state, 0x00, 0);
+
+ return 0;
+}
+
+static int cx24123_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+
+ if (cx24123_get_inversion(state, &p->inversion) != 0) {
+ printk("%s: Failed to get inversion status\n",__FUNCTION__);
+ return -EREMOTEIO;
+ }
+ if (cx24123_get_fec(state, &p->u.qpsk.fec_inner) != 0) {
+ printk("%s: Failed to get fec status\n",__FUNCTION__);
+ return -EREMOTEIO;
+ }
+ p->frequency = state->currentfreq;
+ p->u.qpsk.symbol_rate = state->currentsymbolrate;
+
+ return 0;
+}
+
+static int cx24123_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
+{
+ struct cx24123_state *state = fe->demodulator_priv;
+ u8 val;
+
+ switch (state->config->use_isl6421) {
+ case 1:
+
+ val = cx24123_readlnbreg(state, 0x0);
+
+ switch (tone) {
+ case SEC_TONE_ON:
+ return cx24123_writelnbreg(state, 0x0, val | 0x10);
+ case SEC_TONE_OFF:
+ return cx24123_writelnbreg(state, 0x0, val & 0x2f);
+ default:
+ printk("%s: CASE reached default with tone=%d\n", __FUNCTION__, tone);
+ return -EINVAL;
+ }
+
+ case 0:
+
+ val = cx24123_readreg(state, 0x29);
+
+ switch (tone) {
+ case SEC_TONE_ON:
+ dprintk("%s: setting tone on\n", __FUNCTION__);
+ return cx24123_writereg(state, 0x29, val | 0x10);
+ case SEC_TONE_OFF:
+ dprintk("%s: setting tone off\n",__FUNCTION__);
+ return cx24123_writereg(state, 0x29, val & 0xef);
+ default:
+ printk("%s: CASE reached default with tone=%d\n", __FUNCTION__, tone);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void cx24123_release(struct dvb_frontend* fe)
+{
+ struct cx24123_state* state = fe->demodulator_priv;
+ dprintk("%s\n",__FUNCTION__);
+ kfree(state);
+}
+
+static struct dvb_frontend_ops cx24123_ops;
+
+struct dvb_frontend* cx24123_attach(const struct cx24123_config* config,
+ struct i2c_adapter* i2c)
+{
+ struct cx24123_state* state = NULL;
+ int ret;
+
+ dprintk("%s\n",__FUNCTION__);
+
+ /* allocate memory for the internal state */
+ state = kmalloc(sizeof(struct cx24123_state), GFP_KERNEL);
+ if (state == NULL) {
+ printk("Unable to kmalloc\n");
+ goto error;
+ }
+
+ /* setup the state */
+ state->config = config;
+ state->i2c = i2c;
+ memcpy(&state->ops, &cx24123_ops, sizeof(struct dvb_frontend_ops));
+ state->lastber = 0;
+ state->snr = 0;
+ state->lnbreg = 0;
+ state->VCAarg = 0;
+ state->VGAarg = 0;
+ state->bandselectarg = 0;
+ state->pllarg = 0;
+ state->currentfreq = 0;
+ state->currentsymbolrate = 0;
+
+ /* check if the demod is there */
+ ret = cx24123_readreg(state, 0x00);
+ if ((ret != 0xd1) && (ret != 0xe1)) {
+ printk("Version != d1 or e1\n");
+ goto error;
+ }
+
+ /* create dvb_frontend */
+ state->frontend.ops = &state->ops;
+ state->frontend.demodulator_priv = state;
+ return &state->frontend;
+
+error:
+ kfree(state);
+
+ return NULL;
+}
+
+static struct dvb_frontend_ops cx24123_ops = {
+
+ .info = {
+ .name = "Conexant CX24123/CX24109",
+ .type = FE_QPSK,
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_stepsize = 1011, /* kHz for QPSK frontends */
+ .frequency_tolerance = 29500,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_RECOVER
+ },
+
+ .release = cx24123_release,
+
+ .init = cx24123_initfe,
+ .set_frontend = cx24123_set_frontend,
+ .get_frontend = cx24123_get_frontend,
+ .read_status = cx24123_read_status,
+ .read_ber = cx24123_read_ber,
+ .read_signal_strength = cx24123_read_signal_strength,
+ .read_snr = cx24123_read_snr,
+ .read_ucblocks = cx24123_read_ucblocks,
+ .diseqc_send_master_cmd = cx24123_send_diseqc_msg,
+ .set_tone = cx24123_set_tone,
+ .set_voltage = cx24123_set_voltage,
+};
+
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+
+MODULE_DESCRIPTION("DVB Frontend module for Conexant cx24123/cx24109 hardware");
+MODULE_AUTHOR("Steven Toth");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(cx24123_attach);
diff --git a/drivers/media/dvb/frontends/cx24123.h b/drivers/media/dvb/frontends/cx24123.h
new file mode 100644
index 00000000000..0c922b5e926
--- /dev/null
+++ b/drivers/media/dvb/frontends/cx24123.h
@@ -0,0 +1,51 @@
+/*
+ Conexant cx24123/cx24109 - DVB QPSK Satellite demod/tuner driver
+
+ Copyright (C) 2005 Steven Toth <stoth@hauppauge.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef CX24123_H
+#define CX24123_H
+
+#include <linux/dvb/frontend.h>
+
+struct cx24123_config
+{
+ /* the demodulator's i2c address */
+ u8 demod_address;
+
+ /*
+ cards like Hauppauge Nova-S Plus/Nova-SE2 use an Intersil ISL6421 chip
+ for LNB control, while KWorld DVB-S 100 use the LNBDC and LNBTone bits
+ from register 0x29 of the CX24123 demodulator
+ */
+ int use_isl6421;
+
+ /* PLL maintenance */
+ int (*pll_init)(struct dvb_frontend* fe);
+ int (*pll_set)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params);
+
+ /* Need to set device param for start_dma */
+ int (*set_ts_params)(struct dvb_frontend* fe, int is_punctured);
+
+ void (*enable_lnb_voltage)(struct dvb_frontend* fe, int on);
+};
+
+extern struct dvb_frontend* cx24123_attach(const struct cx24123_config* config,
+ struct i2c_adapter* i2c);
+
+#endif /* CX24123_H */
diff --git a/drivers/media/dvb/frontends/dib3000mb.c b/drivers/media/dvb/frontends/dib3000mb.c
index 6b055360861..ae589adb1c0 100644
--- a/drivers/media/dvb/frontends/dib3000mb.c
+++ b/drivers/media/dvb/frontends/dib3000mb.c
@@ -700,10 +700,9 @@ struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
struct dib3000_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct dib3000_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct dib3000_state), GFP_KERNEL);
if (state == NULL)
goto error;
- memset(state,0,sizeof(struct dib3000_state));
/* setup the state */
state->i2c = i2c;
diff --git a/drivers/media/dvb/frontends/dib3000mc.c b/drivers/media/dvb/frontends/dib3000mc.c
index c024fad1733..3b303dbb615 100644
--- a/drivers/media/dvb/frontends/dib3000mc.c
+++ b/drivers/media/dvb/frontends/dib3000mc.c
@@ -832,10 +832,9 @@ struct dvb_frontend* dib3000mc_attach(const struct dib3000_config* config,
u16 devid;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct dib3000_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct dib3000_state), GFP_KERNEL);
if (state == NULL)
goto error;
- memset(state,0,sizeof(struct dib3000_state));
/* setup the state */
state->i2c = i2c;
diff --git a/drivers/media/dvb/frontends/dvb-pll.c b/drivers/media/dvb/frontends/dvb-pll.c
index f857b869616..757075f007c 100644
--- a/drivers/media/dvb/frontends/dvb-pll.c
+++ b/drivers/media/dvb/frontends/dvb-pll.c
@@ -107,18 +107,19 @@ struct dvb_pll_desc dvb_pll_microtune_4042 = {
};
EXPORT_SYMBOL(dvb_pll_microtune_4042);
-struct dvb_pll_desc dvb_pll_thomson_dtt7611 = {
- .name = "Thomson dtt7611",
- .min = 44000000,
- .max = 958000000,
+struct dvb_pll_desc dvb_pll_thomson_dtt761x = {
+ /* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */
+ .name = "Thomson dtt761x",
+ .min = 57000000,
+ .max = 863000000,
.count = 3,
.entries = {
- { 157250000, 44000000, 62500, 0x8e, 0x39 },
- { 454000000, 44000000, 62500, 0x8e, 0x3a },
+ { 147000000, 44000000, 62500, 0x8e, 0x39 },
+ { 417000000, 44000000, 62500, 0x8e, 0x3a },
{ 999999999, 44000000, 62500, 0x8e, 0x3c },
},
};
-EXPORT_SYMBOL(dvb_pll_thomson_dtt7611);
+EXPORT_SYMBOL(dvb_pll_thomson_dtt761x);
struct dvb_pll_desc dvb_pll_unknown_1 = {
.name = "unknown 1", /* used by dntv live dvb-t */
@@ -344,6 +345,23 @@ struct dvb_pll_desc dvb_pll_tbmv30111in = {
};
EXPORT_SYMBOL(dvb_pll_tbmv30111in);
+/*
+ * Philips SD1878 Tuner.
+ */
+struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
+ .name = "Philips SD1878",
+ .min = 950000,
+ .max = 2150000,
+ .count = 4,
+ .entries = {
+ { 1250000, 499, 500, 0xc4, 0x00},
+ { 1550000, 499, 500, 0xc4, 0x40},
+ { 2050000, 499, 500, 0xc4, 0x80},
+ { 2150000, 499, 500, 0xc4, 0xc0},
+ },
+};
+EXPORT_SYMBOL(dvb_pll_philips_sd1878_tda8261);
+
/* ----------------------------------------------------------- */
/* code */
diff --git a/drivers/media/dvb/frontends/dvb-pll.h b/drivers/media/dvb/frontends/dvb-pll.h
index 497d31dcf41..f682c09189b 100644
--- a/drivers/media/dvb/frontends/dvb-pll.h
+++ b/drivers/media/dvb/frontends/dvb-pll.h
@@ -25,7 +25,7 @@ extern struct dvb_pll_desc dvb_pll_thomson_dtt759x;
extern struct dvb_pll_desc dvb_pll_thomson_dtt7610;
extern struct dvb_pll_desc dvb_pll_lg_z201;
extern struct dvb_pll_desc dvb_pll_microtune_4042;
-extern struct dvb_pll_desc dvb_pll_thomson_dtt7611;
+extern struct dvb_pll_desc dvb_pll_thomson_dtt761x;
extern struct dvb_pll_desc dvb_pll_unknown_1;
extern struct dvb_pll_desc dvb_pll_tua6010xs;
@@ -39,6 +39,7 @@ extern struct dvb_pll_desc dvb_pll_tded4;
extern struct dvb_pll_desc dvb_pll_tuv1236d;
extern struct dvb_pll_desc dvb_pll_tdhu2;
extern struct dvb_pll_desc dvb_pll_tbmv30111in;
+extern struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261;
int dvb_pll_configure(struct dvb_pll_desc *desc, u8 *buf,
u32 freq, int bandwidth);
diff --git a/drivers/media/dvb/frontends/lgdt330x.c b/drivers/media/dvb/frontends/lgdt330x.c
index cb5301865d0..4691ac54bc1 100644
--- a/drivers/media/dvb/frontends/lgdt330x.c
+++ b/drivers/media/dvb/frontends/lgdt330x.c
@@ -27,6 +27,7 @@
* DViCO FusionHDTV 3 Gold-T
* DViCO FusionHDTV 5 Gold
* DViCO FusionHDTV 5 Lite
+ * DViCO FusionHDTV 5 USB Gold
* Air2PC/AirStar 2 ATSC 3rd generation (HD5000)
*
* TODO:
@@ -402,6 +403,8 @@ static int lgdt330x_set_parameters(struct dvb_frontend* fe,
state->config->pll_set(fe, param);
/* Keep track of the new frequency */
+ /* FIXME this is the wrong way to do this... */
+ /* The tuner is shared with the video4linux analog API */
state->current_frequency = param->frequency;
lgdt330x_SwReset(state);
@@ -711,10 +714,9 @@ struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config* config,
u8 buf[1];
/* Allocate memory for the internal state */
- state = (struct lgdt330x_state*) kmalloc(sizeof(struct lgdt330x_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct lgdt330x_state), GFP_KERNEL);
if (state == NULL)
goto error;
- memset(state,0,sizeof(*state));
/* Setup the state */
state->config = config;
diff --git a/drivers/media/dvb/frontends/mt312.c b/drivers/media/dvb/frontends/mt312.c
index 8d672283c93..ec4e641acc6 100644
--- a/drivers/media/dvb/frontends/mt312.c
+++ b/drivers/media/dvb/frontends/mt312.c
@@ -501,7 +501,8 @@ static int mt312_set_frontend(struct dvb_frontend* fe,
case ID_VP310:
// For now we will do this only for the VP310.
// It should be better for the mt312 as well, but tunning will be slower. ACCJr 09/29/03
- if ((ret = mt312_readreg(state, CONFIG, &config_val) < 0))
+ ret = mt312_readreg(state, CONFIG, &config_val);
+ if (ret < 0)
return ret;
if (p->u.qpsk.symbol_rate >= 30000000) //Note that 30MS/s should use 90MHz
{
diff --git a/drivers/media/dvb/frontends/mt352.c b/drivers/media/dvb/frontends/mt352.c
index f0c610f2c2d..aaaec909ddf 100644
--- a/drivers/media/dvb/frontends/mt352.c
+++ b/drivers/media/dvb/frontends/mt352.c
@@ -535,9 +535,8 @@ struct dvb_frontend* mt352_attach(const struct mt352_config* config,
struct mt352_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct mt352_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct mt352_state), GFP_KERNEL);
if (state == NULL) goto error;
- memset(state,0,sizeof(*state));
/* setup the state */
state->i2c = i2c;
diff --git a/drivers/media/dvb/frontends/nxt2002.c b/drivers/media/dvb/frontends/nxt2002.c
index 52c416043a6..4f263e65ba1 100644
--- a/drivers/media/dvb/frontends/nxt2002.c
+++ b/drivers/media/dvb/frontends/nxt2002.c
@@ -22,7 +22,8 @@
/*
* This driver needs external firmware. Please use the command
* "<kerneldir>/Documentation/dvb/get_dvb_firmware nxt2002" to
- * download/extract it, and then copy it to /usr/lib/hotplug/firmware.
+ * download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ * or /lib/firmware (depending on configuration of firmware hotplug).
*/
#define NXT2002_DEFAULT_FIRMWARE "dvb-fe-nxt2002.fw"
#define CRC_CCIT_MASK 0x1021
diff --git a/drivers/media/dvb/frontends/nxt200x.c b/drivers/media/dvb/frontends/nxt200x.c
index aeafef46e3e..78d2b93d35b 100644
--- a/drivers/media/dvb/frontends/nxt200x.c
+++ b/drivers/media/dvb/frontends/nxt200x.c
@@ -1110,10 +1110,9 @@ struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* config,
u8 buf [] = {0,0,0,0,0};
/* allocate memory for the internal state */
- state = (struct nxt200x_state*) kmalloc(sizeof(struct nxt200x_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct nxt200x_state), GFP_KERNEL);
if (state == NULL)
goto error;
- memset(state,0,sizeof(*state));
/* setup the state */
state->config = config;
diff --git a/drivers/media/dvb/frontends/nxt6000.c b/drivers/media/dvb/frontends/nxt6000.c
index a458a3bfff7..a16eeba0020 100644
--- a/drivers/media/dvb/frontends/nxt6000.c
+++ b/drivers/media/dvb/frontends/nxt6000.c
@@ -574,11 +574,11 @@ static struct dvb_frontend_ops nxt6000_ops = {
.symbol_rate_max = 9360000, /* FIXME */
.symbol_rate_tolerance = 4000,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
- FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
- FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO |
- FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO,
+ FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
+ FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO,
},
.release = nxt6000_release,
diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
index 531f76246e5..7c3aed1f546 100644
--- a/drivers/media/dvb/frontends/or51211.c
+++ b/drivers/media/dvb/frontends/or51211.c
@@ -25,7 +25,8 @@
/*
* This driver needs external firmware. Please use the command
* "<kerneldir>/Documentation/dvb/get_dvb_firmware or51211" to
- * download/extract it, and then copy it to /usr/lib/hotplug/firmware.
+ * download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ * or /lib/firmware (depending on configuration of firmware hotplug).
*/
#define OR51211_DEFAULT_FIRMWARE "dvb-fe-or51211.fw"
@@ -112,7 +113,7 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
u8 tudata[585];
int i;
- dprintk("Firmware is %d bytes\n",fw->size);
+ dprintk("Firmware is %zd bytes\n",fw->size);
/* Get eprom data */
tudata[0] = 17;
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 18715091aed..d6947759692 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -521,8 +521,8 @@ static void s5h1420_setfec_inversion(struct s5h1420_state* state,
case FEC_3_4:
s5h1420_writereg(state, 0x30, 0x04);
- s5h1420_writereg(state, 0x31, 0x12 | inversion);
- break;
+ s5h1420_writereg(state, 0x31, 0x12 | inversion);
+ break;
case FEC_5_6:
s5h1420_writereg(state, 0x30, 0x08);
diff --git a/drivers/media/dvb/frontends/sp8870.c b/drivers/media/dvb/frontends/sp8870.c
index fc06cd6b46c..73829e647e5 100644
--- a/drivers/media/dvb/frontends/sp8870.c
+++ b/drivers/media/dvb/frontends/sp8870.c
@@ -22,7 +22,8 @@
/*
* This driver needs external firmware. Please use the command
* "<kerneldir>/Documentation/dvb/get_dvb_firmware alps_tdlb7" to
- * download/extract it, and then copy it to /usr/lib/hotplug/firmware.
+ * download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ * or /lib/firmware (depending on configuration of firmware hotplug).
*/
#define SP8870_DEFAULT_FIRMWARE "dvb-fe-sp8870.fw"
diff --git a/drivers/media/dvb/frontends/sp887x.c b/drivers/media/dvb/frontends/sp887x.c
index e3b66578224..eb8a602198c 100644
--- a/drivers/media/dvb/frontends/sp887x.c
+++ b/drivers/media/dvb/frontends/sp887x.c
@@ -5,7 +5,8 @@
/*
* This driver needs external firmware. Please use the command
* "<kerneldir>/Documentation/dvb/get_dvb_firmware sp887x" to
- * download/extract it, and then copy it to /usr/lib/hotplug/firmware.
+ * download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ * or /lib/firmware (depending on configuration of firmware hotplug).
*/
#define SP887X_DEFAULT_FIRMWARE "dvb-fe-sp887x.fw"
@@ -581,7 +582,7 @@ static struct dvb_frontend_ops sp887x_ops = {
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
- FE_CAN_RECOVER
+ FE_CAN_RECOVER
},
.release = sp887x_release,
diff --git a/drivers/media/dvb/frontends/stv0299.c b/drivers/media/dvb/frontends/stv0299.c
index 177d71d56b6..5bcd00f792e 100644
--- a/drivers/media/dvb/frontends/stv0299.c
+++ b/drivers/media/dvb/frontends/stv0299.c
@@ -131,6 +131,13 @@ static int stv0299_readregs (struct stv0299_state* state, u8 reg1, u8 *b, u8 len
return ret == 2 ? 0 : ret;
}
+int stv0299_enable_plli2c (struct dvb_frontend* fe)
+{
+ struct stv0299_state* state = fe->demodulator_priv;
+
+ return stv0299_writeregI(state, 0x05, 0xb5); /* enable i2c repeater on stv0299 */
+}
+
static int stv0299_set_FEC (struct stv0299_state* state, fe_code_rate_t fec)
{
dprintk ("%s\n", __FUNCTION__);
@@ -387,7 +394,7 @@ static int stv0299_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltag
};
}
-static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, u32 cmd)
+static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long cmd)
{
struct stv0299_state* state = fe->demodulator_priv;
u8 reg0x08;
@@ -407,7 +414,7 @@ static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, u32 cmd)
cmd = cmd << 1;
if (debug_legacy_dish_switch)
- printk ("%s switch command: 0x%04x\n",__FUNCTION__, cmd);
+ printk ("%s switch command: 0x%04lx\n",__FUNCTION__, cmd);
do_gettimeofday (&nexttime);
if (debug_legacy_dish_switch)
@@ -717,5 +724,6 @@ MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, "
"Andreas Oberritter, Andrew de Quincey, Kenneth Aafløy");
MODULE_LICENSE("GPL");
+EXPORT_SYMBOL(stv0299_enable_plli2c);
EXPORT_SYMBOL(stv0299_writereg);
EXPORT_SYMBOL(stv0299_attach);
diff --git a/drivers/media/dvb/frontends/stv0299.h b/drivers/media/dvb/frontends/stv0299.h
index 9af3d71c89d..32c87b4c2f1 100644
--- a/drivers/media/dvb/frontends/stv0299.h
+++ b/drivers/media/dvb/frontends/stv0299.h
@@ -94,6 +94,7 @@ struct stv0299_config
};
extern int stv0299_writereg (struct dvb_frontend* fe, u8 reg, u8 data);
+extern int stv0299_enable_plli2c (struct dvb_frontend* fe);
extern struct dvb_frontend* stv0299_attach(const struct stv0299_config* config,
struct i2c_adapter* i2c);
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 425cd19136f..21255cac979 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -95,7 +95,7 @@ static u8 tda10021_readreg (struct tda10021_state* state, u8 reg)
u8 b0 [] = { reg };
u8 b1 [] = { 0 };
struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 },
- { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } };
+ { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } };
int ret;
ret = i2c_transfer (state->i2c, msg, 2);
@@ -434,7 +434,7 @@ static struct dvb_frontend_ops tda10021_ops = {
.frequency_max = 858000000,
.symbol_rate_min = (XIN/2)/64, /* SACLK/64 == (XIN/2)/64 */
.symbol_rate_max = (XIN/2)/4, /* SACLK/4 */
- #if 0
+#if 0
.frequency_tolerance = ???,
.symbol_rate_tolerance = ???, /* ppm */ /* == 8% (spec p. 5) */
#endif
diff --git a/drivers/media/dvb/frontends/tda1004x.c b/drivers/media/dvb/frontends/tda1004x.c
index dd02aff467f..c63e9a5084e 100644
--- a/drivers/media/dvb/frontends/tda1004x.c
+++ b/drivers/media/dvb/frontends/tda1004x.c
@@ -23,7 +23,8 @@
* This driver needs external firmware. Please use the commands
* "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10045",
* "<kerneldir>/Documentation/dvb/get_dvb_firmware tda10046" to
- * download/extract them, and then copy them to /usr/lib/hotplug/firmware.
+ * download/extract them, and then copy them to /usr/lib/hotplug/firmware
+ * or /lib/firmware (depending on configuration of firmware hotplug).
*/
#define TDA10045_DEFAULT_FIRMWARE "dvb-fe-tda10045.fw"
#define TDA10046_DEFAULT_FIRMWARE "dvb-fe-tda10046.fw"
@@ -271,32 +272,57 @@ static int tda10045h_set_bandwidth(struct tda1004x_state *state,
static int tda10046h_set_bandwidth(struct tda1004x_state *state,
fe_bandwidth_t bandwidth)
{
- static u8 bandwidth_6mhz[] = { 0x80, 0x15, 0xfe, 0xab, 0x8e };
- static u8 bandwidth_7mhz[] = { 0x6e, 0x02, 0x53, 0xc8, 0x25 };
- static u8 bandwidth_8mhz[] = { 0x60, 0x12, 0xa8, 0xe4, 0xbd };
-
+ static u8 bandwidth_6mhz_53M[] = { 0x7b, 0x2e, 0x11, 0xf0, 0xd2 };
+ static u8 bandwidth_7mhz_53M[] = { 0x6a, 0x02, 0x6a, 0x43, 0x9f };
+ static u8 bandwidth_8mhz_53M[] = { 0x5c, 0x32, 0xc2, 0x96, 0x6d };
+
+ static u8 bandwidth_6mhz_48M[] = { 0x70, 0x02, 0x49, 0x24, 0x92 };
+ static u8 bandwidth_7mhz_48M[] = { 0x60, 0x02, 0xaa, 0xaa, 0xab };
+ static u8 bandwidth_8mhz_48M[] = { 0x54, 0x03, 0x0c, 0x30, 0xc3 };
+ int tda10046_clk53m;
+
+ if ((state->config->if_freq == TDA10046_FREQ_045) ||
+ (state->config->if_freq == TDA10046_FREQ_052))
+ tda10046_clk53m = 0;
+ else
+ tda10046_clk53m = 1;
switch (bandwidth) {
case BANDWIDTH_6_MHZ:
- tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_6mhz, sizeof(bandwidth_6mhz));
+ if (tda10046_clk53m)
+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_6mhz_53M,
+ sizeof(bandwidth_6mhz_53M));
+ else
+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_6mhz_48M,
+ sizeof(bandwidth_6mhz_48M));
if (state->config->if_freq == TDA10046_FREQ_045) {
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x09);
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x4f);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0a);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0xab);
}
break;
case BANDWIDTH_7_MHZ:
- tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_7mhz, sizeof(bandwidth_7mhz));
+ if (tda10046_clk53m)
+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_7mhz_53M,
+ sizeof(bandwidth_7mhz_53M));
+ else
+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_7mhz_48M,
+ sizeof(bandwidth_7mhz_48M));
if (state->config->if_freq == TDA10046_FREQ_045) {
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0a);
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x79);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0c);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x00);
}
break;
case BANDWIDTH_8_MHZ:
- tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_8mhz, sizeof(bandwidth_8mhz));
+ if (tda10046_clk53m)
+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_8mhz_53M,
+ sizeof(bandwidth_8mhz_53M));
+ else
+ tda1004x_write_buf(state, TDA10046H_TIME_WREF1, bandwidth_8mhz_48M,
+ sizeof(bandwidth_8mhz_48M));
if (state->config->if_freq == TDA10046_FREQ_045) {
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0b);
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0xa3);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0d);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x55);
}
break;
@@ -418,9 +444,22 @@ static int tda10045_fwupload(struct dvb_frontend* fe)
static void tda10046_init_plls(struct dvb_frontend* fe)
{
struct tda1004x_state* state = fe->demodulator_priv;
+ int tda10046_clk53m;
+
+ if ((state->config->if_freq == TDA10046_FREQ_045) ||
+ (state->config->if_freq == TDA10046_FREQ_052))
+ tda10046_clk53m = 0;
+ else
+ tda10046_clk53m = 1;
tda1004x_write_byteI(state, TDA10046H_CONFPLL1, 0xf0);
- tda1004x_write_byteI(state, TDA10046H_CONFPLL2, 0x0a); // PLL M = 10
+ if(tda10046_clk53m) {
+ printk(KERN_INFO "tda1004x: setting up plls for 53MHz sampling clock\n");
+ tda1004x_write_byteI(state, TDA10046H_CONFPLL2, 0x08); // PLL M = 8
+ } else {
+ printk(KERN_INFO "tda1004x: setting up plls for 48MHz sampling clock\n");
+ tda1004x_write_byteI(state, TDA10046H_CONFPLL2, 0x03); // PLL M = 3
+ }
if (state->config->xtal_freq == TDA10046_XTAL_4M ) {
dprintk("%s: setting up PLLs for a 4 MHz Xtal\n", __FUNCTION__);
tda1004x_write_byteI(state, TDA10046H_CONFPLL3, 0); // PLL P = N = 0
@@ -428,26 +467,32 @@ static void tda10046_init_plls(struct dvb_frontend* fe)
dprintk("%s: setting up PLLs for a 16 MHz Xtal\n", __FUNCTION__);
tda1004x_write_byteI(state, TDA10046H_CONFPLL3, 3); // PLL P = 0, N = 3
}
- tda1004x_write_byteI(state, TDA10046H_FREQ_OFFSET, 99);
+ if(tda10046_clk53m)
+ tda1004x_write_byteI(state, TDA10046H_FREQ_OFFSET, 0x67);
+ else
+ tda1004x_write_byteI(state, TDA10046H_FREQ_OFFSET, 0x72);
+ /* Note clock frequency is handled implicitly */
switch (state->config->if_freq) {
- case TDA10046_FREQ_3617:
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd4);
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x2c);
- break;
- case TDA10046_FREQ_3613:
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd4);
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x13);
- break;
case TDA10046_FREQ_045:
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0b);
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0xa3);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0c);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x00);
break;
case TDA10046_FREQ_052:
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0c);
- tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x06);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0x0d);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0xc7);
+ break;
+ case TDA10046_FREQ_3617:
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd7);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x59);
+ break;
+ case TDA10046_FREQ_3613:
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_MSB, 0xd7);
+ tda1004x_write_byteI(state, TDA10046H_FREQ_PHY2_LSB, 0x3f);
break;
}
tda10046h_set_bandwidth(state, BANDWIDTH_8_MHZ); // default bandwidth 8 MHz
+ /* let the PLLs settle */
+ msleep(120);
}
static int tda10046_fwupload(struct dvb_frontend* fe)
@@ -462,13 +507,13 @@ static int tda10046_fwupload(struct dvb_frontend* fe)
/* let the clocks recover from sleep */
msleep(5);
+ /* The PLLs need to be reprogrammed after sleep */
+ tda10046_init_plls(fe);
+
/* don't re-upload unless necessary */
if (tda1004x_check_upload_ok(state) == 0)
return 0;
- /* set parameters */
- tda10046_init_plls(fe);
-
if (state->config->request_firmware != NULL) {
/* request the firmware, this will block until someone uploads it */
printk(KERN_INFO "tda1004x: waiting for firmware upload...\n");
@@ -484,7 +529,6 @@ static int tda10046_fwupload(struct dvb_frontend* fe)
return ret;
} else {
/* boot from firmware eeprom */
- /* Hac Note: we might need to do some GPIO Magic here */
printk(KERN_INFO "tda1004x: booting from eeprom\n");
tda1004x_write_mask(state, TDA1004X_CONFC4, 4, 4);
msleep(300);
@@ -606,10 +650,9 @@ static int tda10046_init(struct dvb_frontend* fe)
// tda setup
tda1004x_write_mask(state, TDA1004X_CONFC4, 0x20, 0); // disable DSP watchdog timer
- tda1004x_write_byteI(state, TDA1004X_AUTO, 7); // select HP stream
- tda1004x_write_byteI(state, TDA1004X_CONFC1, 8); // disable pulse killer
+ tda1004x_write_byteI(state, TDA1004X_AUTO, 0x87); // 100 ppm crystal, select HP stream
+ tda1004x_write_byteI(state, TDA1004X_CONFC1, 8); // disable pulse killer
- tda10046_init_plls(fe);
switch (state->config->agc_config) {
case TDA10046_AGC_DEFAULT:
tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x00); // AGC setup
@@ -626,25 +669,22 @@ static int tda10046_init(struct dvb_frontend* fe)
case TDA10046_AGC_TDA827X:
tda1004x_write_byteI(state, TDA10046H_AGC_CONF, 0x02); // AGC setup
tda1004x_write_byteI(state, TDA10046H_AGC_THR, 0x70); // AGC Threshold
- tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x0E); // Gain Renormalize
- tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x60); // set AGC polarities
+ tda1004x_write_byteI(state, TDA10046H_AGC_RENORM, 0x08); // Gain Renormalize
+ tda1004x_write_byteI(state, TDA10046H_CONF_POLARITY, 0x6a); // set AGC polarities
break;
}
+ tda1004x_write_byteI(state, TDA1004X_CONFADC2, 0x38);
tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE1, 0x61); // Turn both AGC outputs on
tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MIN, 0); // }
tda1004x_write_byteI(state, TDA10046H_AGC_TUN_MAX, 0xff); // } AGC min/max values
tda1004x_write_byteI(state, TDA10046H_AGC_IF_MIN, 0); // }
tda1004x_write_byteI(state, TDA10046H_AGC_IF_MAX, 0xff); // }
- tda1004x_write_byteI(state, TDA10046H_AGC_GAINS, 1); // IF gain 2, TUN gain 1
+ tda1004x_write_byteI(state, TDA10046H_AGC_GAINS, 0x12); // IF gain 2, TUN gain 1
tda1004x_write_byteI(state, TDA10046H_CVBER_CTRL, 0x1a); // 10^6 VBER measurement bits
tda1004x_write_byteI(state, TDA1004X_CONF_TS1, 7); // MPEG2 interface config
tda1004x_write_byteI(state, TDA1004X_CONF_TS2, 0xc0); // MPEG2 interface config
tda1004x_write_mask(state, 0x3a, 0x80, state->config->invert_oclk << 7);
- tda1004x_write_byteI(state, TDA10046H_CONF_TRISTATE2, 0xe1); // tristate setup
- tda1004x_write_byteI(state, TDA10046H_GPIO_OUT_SEL, 0xcc); // GPIO output config
- tda1004x_write_byteI(state, TDA10046H_GPIO_SELECT, 8); // GPIO select
-
state->initialised = 1;
return 0;
}
@@ -686,9 +726,9 @@ static int tda1004x_set_fe(struct dvb_frontend* fe,
// Set standard params.. or put them to auto
if ((fe_params->u.ofdm.code_rate_HP == FEC_AUTO) ||
- (fe_params->u.ofdm.code_rate_LP == FEC_AUTO) ||
- (fe_params->u.ofdm.constellation == QAM_AUTO) ||
- (fe_params->u.ofdm.hierarchy_information == HIERARCHY_AUTO)) {
+ (fe_params->u.ofdm.code_rate_LP == FEC_AUTO) ||
+ (fe_params->u.ofdm.constellation == QAM_AUTO) ||
+ (fe_params->u.ofdm.hierarchy_information == HIERARCHY_AUTO)) {
tda1004x_write_mask(state, TDA1004X_AUTO, 1, 1); // enable auto
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x03, 0); // turn off constellation bits
tda1004x_write_mask(state, TDA1004X_IN_CONF1, 0x60, 0); // turn off hierarchy bits
@@ -851,6 +891,7 @@ static int tda1004x_set_fe(struct dvb_frontend* fe,
static int tda1004x_get_fe(struct dvb_frontend* fe, struct dvb_frontend_parameters *fe_params)
{
struct tda1004x_state* state = fe->demodulator_priv;
+
dprintk("%s\n", __FUNCTION__);
// inversion status
@@ -875,16 +916,18 @@ static int tda1004x_get_fe(struct dvb_frontend* fe, struct dvb_frontend_paramete
break;
}
break;
-
case TDA1004X_DEMOD_TDA10046:
switch (tda1004x_read_byte(state, TDA10046H_TIME_WREF1)) {
- case 0x60:
+ case 0x5c:
+ case 0x54:
fe_params->u.ofdm.bandwidth = BANDWIDTH_8_MHZ;
break;
- case 0x6e:
+ case 0x6a:
+ case 0x60:
fe_params->u.ofdm.bandwidth = BANDWIDTH_7_MHZ;
break;
- case 0x80:
+ case 0x7b:
+ case 0x70:
fe_params->u.ofdm.bandwidth = BANDWIDTH_6_MHZ;
break;
}
diff --git a/drivers/media/dvb/pluto2/Kconfig b/drivers/media/dvb/pluto2/Kconfig
index f02842be0d6..84f8f9f5286 100644
--- a/drivers/media/dvb/pluto2/Kconfig
+++ b/drivers/media/dvb/pluto2/Kconfig
@@ -8,7 +8,7 @@ config DVB_PLUTO2
Support for PCI cards based on the Pluto2 FPGA like the Satelco
Easywatch Mobile Terrestrial DVB-T Receiver.
- Since these cards have no MPEG decoder onboard, they transmit
+ Since these cards have no MPEG decoder onboard, they transmit
only compressed MPEG data over the PCI bus, so you need
an external software decoder to watch TV on your computer.
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index bbebd1c4cac..1c5316e209e 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -584,11 +584,10 @@ static int __devinit pluto2_probe(struct pci_dev *pdev,
struct dmx_demux *dmx;
int ret = -ENOMEM;
- pluto = kmalloc(sizeof(struct pluto), GFP_KERNEL);
+ pluto = kzalloc(sizeof(struct pluto), GFP_KERNEL);
if (!pluto)
goto out;
- memset(pluto, 0, sizeof(struct pluto));
pluto->pdev = pdev;
ret = pci_enable_device(pdev);
diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig
index fa5034a9ecf..5b2aadb8385 100644
--- a/drivers/media/dvb/ttpci/Kconfig
+++ b/drivers/media/dvb/ttpci/Kconfig
@@ -18,9 +18,10 @@ config DVB_AV7110
This driver only supports the fullfeatured cards with
onboard MPEG2 decoder.
- This driver needs an external firmware. Please use the script
- "<kerneldir>/Documentation/dvb/get_dvb_firmware av7110" to
- download/extract it, and then copy it to /usr/lib/hotplug/firmware.
+ This driver needs an external firmware. Please use the script
+ "<kerneldir>/Documentation/dvb/get_dvb_firmware av7110" to
+ download/extract it, and then copy it to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
Say Y if you own such a card and want to use it.
diff --git a/drivers/media/dvb/ttpci/Makefile b/drivers/media/dvb/ttpci/Makefile
index 825ab1c38a4..a690730ac39 100644
--- a/drivers/media/dvb/ttpci/Makefile
+++ b/drivers/media/dvb/ttpci/Makefile
@@ -16,7 +16,7 @@ EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
hostprogs-y := fdump
ifdef CONFIG_DVB_AV7110_FIRMWARE
-$(obj)/av7110.o: $(obj)/fdump $(obj)/av7110_firm.h
+$(obj)/av7110.o: $(obj)/fdump $(obj)/av7110_firm.h
$(obj)/av7110_firm.h:
$(obj)/fdump $(CONFIG_DVB_AV7110_FIRMWARE_FILE) dvb_ttpci_fw $@
diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c
index 7dae91e5863..327a8089193 100644
--- a/drivers/media/dvb/ttpci/av7110.c
+++ b/drivers/media/dvb/ttpci/av7110.c
@@ -133,7 +133,13 @@ static void init_av7110_av(struct av7110 *av7110)
/* remaining inits according to card and frontend type */
av7110->analog_tuner_flags = 0;
av7110->current_input = 0;
- if (i2c_writereg(av7110, 0x20, 0x00, 0x00) == 1) {
+ if (dev->pci->subsystem_vendor == 0x13c2 && dev->pci->subsystem_device == 0x000a) {
+ printk("dvb-ttpci: MSP3415 audio DAC @ card %d\n",
+ av7110->dvb_adapter.num);
+ av7110->adac_type = DVB_ADAC_MSP34x5;
+ av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, 0); // SPDIF on
+ }
+ else if (i2c_writereg(av7110, 0x20, 0x00, 0x00) == 1) {
printk ("dvb-ttpci: Crystal audio DAC @ card %d detected\n",
av7110->dvb_adapter.num);
av7110->adac_type = DVB_ADAC_CRYSTAL;
@@ -156,10 +162,10 @@ static void init_av7110_av(struct av7110 *av7110)
else {
av7110->adac_type = adac;
printk("dvb-ttpci: adac type set to %d @ card %d\n",
- av7110->dvb_adapter.num, av7110->adac_type);
+ av7110->adac_type, av7110->dvb_adapter.num);
}
- if (av7110->adac_type == DVB_ADAC_NONE || av7110->adac_type == DVB_ADAC_MSP) {
+ if (av7110->adac_type == DVB_ADAC_NONE || av7110->adac_type == DVB_ADAC_MSP34x0) {
// switch DVB SCART on
ret = av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, MainSwitch, 1, 0);
if (ret < 0)
@@ -190,17 +196,15 @@ static void recover_arm(struct av7110 *av7110)
av7110_bootarm(av7110);
msleep(100);
- restart_feeds(av7110);
- av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetIR, 1, av7110->ir_config);
-}
-static void arm_error(struct av7110 *av7110)
-{
- dprintk(4, "%p\n",av7110);
+ init_av7110_av(av7110);
+
+ /* card-specific recovery */
+ if (av7110->recover)
+ av7110->recover(av7110);
- av7110->arm_errors++;
- av7110->arm_ready = 0;
- recover_arm(av7110);
+ restart_feeds(av7110);
+ av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetIR, 1, av7110->ir_config);
}
static void av7110_arm_sync(struct av7110 *av7110)
@@ -240,26 +244,22 @@ static int arm_thread(void *data)
if (down_interruptible(&av7110->dcomlock))
break;
-
newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2);
up(&av7110->dcomlock);
- if (newloops == av7110->arm_loops) {
+ if (newloops == av7110->arm_loops || av7110->arm_errors > 3) {
printk(KERN_ERR "dvb-ttpci: ARM crashed @ card %d\n",
av7110->dvb_adapter.num);
- arm_error(av7110);
- av7710_set_video_mode(av7110, vidmode);
-
- init_av7110_av(av7110);
+ recover_arm(av7110);
if (down_interruptible(&av7110->dcomlock))
break;
-
newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2) - 1;
up(&av7110->dcomlock);
}
av7110->arm_loops = newloops;
+ av7110->arm_errors = 0;
}
av7110->arm_thread = NULL;
@@ -510,10 +510,6 @@ static void gpioirq(unsigned long data)
iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
av7110->video_size.h = h_ar & 0xfff;
- dprintk(8, "GPIO0 irq: DATA_MPEG_VIDEO_EVENT: w/h/ar = %u/%u/%u\n",
- av7110->video_size.w,
- av7110->video_size.h,
- av7110->video_size.aspect_ratio);
event.type = VIDEO_EVENT_SIZE_CHANGED;
event.u.size.w = av7110->video_size.w;
@@ -535,6 +531,11 @@ static void gpioirq(unsigned long data)
event.u.size.aspect_ratio = VIDEO_FORMAT_4_3;
av7110->videostate.video_format = VIDEO_FORMAT_4_3;
}
+
+ dprintk(8, "GPIO0 irq: DATA_MPEG_VIDEO_EVENT: w/h/ar = %u/%u/%u\n",
+ av7110->video_size.w, av7110->video_size.h,
+ av7110->video_size.aspect_ratio);
+
dvb_video_add_event(av7110, &event);
break;
}
@@ -714,6 +715,8 @@ static struct dvb_device dvbdev_osd = {
static inline int SetPIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
u16 subpid, u16 pcrpid)
{
+ u16 aflags = 0;
+
dprintk(4, "%p\n", av7110);
if (vpid == 0x1fff || apid == 0x1fff ||
@@ -725,8 +728,11 @@ static inline int SetPIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
av7110->pids[DMX_PES_PCR] = 0;
}
- return av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, MultiPID, 5,
- pcrpid, vpid, apid, ttpid, subpid);
+ if (av7110->audiostate.bypass_mode)
+ aflags |= 0x8000;
+
+ return av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, MultiPID, 6,
+ pcrpid, vpid, apid, ttpid, subpid, aflags);
}
int ChangePIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
@@ -1043,7 +1049,7 @@ static void restart_feeds(struct av7110 *av7110)
struct dvb_demux *dvbdmx = &av7110->demux;
struct dvb_demux_feed *feed;
int mode;
- int i;
+ int i, j;
dprintk(4, "%p\n", av7110);
@@ -1051,10 +1057,21 @@ static void restart_feeds(struct av7110 *av7110)
av7110->playing = 0;
av7110->rec_mode = 0;
- for (i = 0; i < dvbdmx->filternum; i++) {
+ for (i = 0; i < dvbdmx->feednum; i++) {
feed = &dvbdmx->feed[i];
- if (feed->state == DMX_STATE_GO)
+ if (feed->state == DMX_STATE_GO) {
+ if (feed->type == DMX_TYPE_SEC) {
+ for (j = 0; j < dvbdmx->filternum; j++) {
+ if (dvbdmx->filter[j].type != DMX_TYPE_SEC)
+ continue;
+ if (dvbdmx->filter[j].filter.parent != &feed->feed.sec)
+ continue;
+ if (dvbdmx->filter[j].state == DMX_STATE_GO)
+ dvbdmx->filter[j].state = DMX_STATE_READY;
+ }
+ }
av7110_start_feed(feed);
+ }
}
if (mode)
@@ -1483,9 +1500,9 @@ static int get_firmware(struct av7110* av7110)
if (ret == -ENOENT) {
printk(KERN_ERR "dvb-ttpci: could not load firmware,"
" file not found: dvb-ttpci-01.fw\n");
- printk(KERN_ERR "dvb-ttpci: usually this should be in"
- " /usr/lib/hotplug/firmware\n");
- printk(KERN_ERR "dvb-ttpci: and can be downloaded here"
+ printk(KERN_ERR "dvb-ttpci: usually this should be in "
+ "/usr/lib/hotplug/firmware or /lib/firmware\n");
+ printk(KERN_ERR "dvb-ttpci: and can be downloaded from"
" http://www.linuxtv.org/download/dvb/firmware/\n");
} else
printk(KERN_ERR "dvb-ttpci: cannot request firmware"
@@ -2110,8 +2127,10 @@ static int av7110_fe_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_p
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
- if (!ret)
+ if (!ret) {
+ av7110->saved_fe_params = *params;
ret = av7110->fe_set_frontend(fe, params);
+ }
return ret;
}
@@ -2153,8 +2172,10 @@ static int av7110_fe_diseqc_send_master_cmd(struct dvb_frontend* fe,
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
- if (!ret)
+ if (!ret) {
+ av7110->saved_master_cmd = *cmd;
ret = av7110->fe_diseqc_send_master_cmd(fe, cmd);
+ }
return ret;
}
@@ -2163,8 +2184,10 @@ static int av7110_fe_diseqc_send_burst(struct dvb_frontend* fe, fe_sec_mini_cmd_
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
- if (!ret)
+ if (!ret) {
+ av7110->saved_minicmd = minicmd;
ret = av7110->fe_diseqc_send_burst(fe, minicmd);
+ }
return ret;
}
@@ -2173,8 +2196,10 @@ static int av7110_fe_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
- if (!ret)
+ if (!ret) {
+ av7110->saved_tone = tone;
ret = av7110->fe_set_tone(fe, tone);
+ }
return ret;
}
@@ -2183,12 +2208,14 @@ static int av7110_fe_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t volta
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
- if (!ret)
+ if (!ret) {
+ av7110->saved_voltage = voltage;
ret = av7110->fe_set_voltage(fe, voltage);
+ }
return ret;
}
-static int av7110_fe_dishnetwork_send_legacy_command(struct dvb_frontend* fe, unsigned int cmd)
+static int av7110_fe_dishnetwork_send_legacy_command(struct dvb_frontend* fe, unsigned long cmd)
{
struct av7110* av7110 = fe->dvb->priv;
@@ -2198,6 +2225,23 @@ static int av7110_fe_dishnetwork_send_legacy_command(struct dvb_frontend* fe, un
return ret;
}
+static void dvb_s_recover(struct av7110* av7110)
+{
+ av7110_fe_init(av7110->fe);
+
+ av7110_fe_set_voltage(av7110->fe, av7110->saved_voltage);
+ if (av7110->saved_master_cmd.msg_len) {
+ msleep(20);
+ av7110_fe_diseqc_send_master_cmd(av7110->fe, &av7110->saved_master_cmd);
+ }
+ msleep(20);
+ av7110_fe_diseqc_send_burst(av7110->fe, av7110->saved_minicmd);
+ msleep(20);
+ av7110_fe_set_tone(av7110->fe, av7110->saved_tone);
+
+ av7110_fe_set_frontend(av7110->fe, &av7110->saved_fe_params);
+}
+
static u8 read_pwm(struct av7110* av7110)
{
u8 b = 0xff;
@@ -2235,6 +2279,7 @@ static int frontend_init(struct av7110 *av7110)
av7110->fe->ops->diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
av7110->fe->ops->diseqc_send_burst = av7110_diseqc_send_burst;
av7110->fe->ops->set_tone = av7110_set_tone;
+ av7110->recover = dvb_s_recover;
break;
}
@@ -2244,15 +2289,17 @@ static int frontend_init(struct av7110 *av7110)
av7110->fe->ops->diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
av7110->fe->ops->diseqc_send_burst = av7110_diseqc_send_burst;
av7110->fe->ops->set_tone = av7110_set_tone;
+ av7110->recover = dvb_s_recover;
break;
}
// Try the grundig 29504-451
- av7110->fe = tda8083_attach(&grundig_29504_451_config, &av7110->i2c_adap);
+ av7110->fe = tda8083_attach(&grundig_29504_451_config, &av7110->i2c_adap);
if (av7110->fe) {
av7110->fe->ops->diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
av7110->fe->ops->diseqc_send_burst = av7110_diseqc_send_burst;
av7110->fe->ops->set_tone = av7110_set_tone;
+ av7110->recover = dvb_s_recover;
break;
}
@@ -2274,12 +2321,12 @@ static int frontend_init(struct av7110 *av7110)
case 0x0001: // Hauppauge/TT Nexus-T premium rev1.X
// ALPS TDLB7
- av7110->fe = sp8870_attach(&alps_tdlb7_config, &av7110->i2c_adap);
+ av7110->fe = sp8870_attach(&alps_tdlb7_config, &av7110->i2c_adap);
break;
case 0x0002: // Hauppauge/TT DVB-C premium rev2.X
- av7110->fe = ves1820_attach(&alps_tdbe2_config, &av7110->i2c_adap, read_pwm(av7110));
+ av7110->fe = ves1820_attach(&alps_tdbe2_config, &av7110->i2c_adap, read_pwm(av7110));
break;
case 0x0006: /* Fujitsu-Siemens DVB-S rev 1.6 */
@@ -2289,6 +2336,7 @@ static int frontend_init(struct av7110 *av7110)
av7110->fe->ops->diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
av7110->fe->ops->diseqc_send_burst = av7110_diseqc_send_burst;
av7110->fe->ops->set_tone = av7110_set_tone;
+ av7110->recover = dvb_s_recover;
}
break;
@@ -2314,8 +2362,11 @@ static int frontend_init(struct av7110 *av7110)
case 0x000E: /* Hauppauge/TT Nexus-S rev 2.3 */
/* ALPS BSBE1 */
av7110->fe = stv0299_attach(&alps_bsbe1_config, &av7110->i2c_adap);
- if (av7110->fe)
+ if (av7110->fe) {
av7110->fe->ops->set_voltage = lnbp21_set_voltage;
+ av7110->fe->ops->dishnetwork_send_legacy_command = NULL;
+ av7110->recover = dvb_s_recover;
+ }
break;
}
}
@@ -2514,14 +2565,12 @@ static int av7110_attach(struct saa7146_dev* dev, struct saa7146_pci_extension_d
}
/* prepare the av7110 device struct */
- av7110 = kmalloc(sizeof(struct av7110), GFP_KERNEL);
+ av7110 = kzalloc(sizeof(struct av7110), GFP_KERNEL);
if (!av7110) {
dprintk(1, "out of memory\n");
return -ENOMEM;
}
- memset(av7110, 0, sizeof(struct av7110));
-
av7110->card_name = (char*) pci_ext->ext_priv;
av7110->dev = dev;
dev->ext_priv = av7110;
diff --git a/drivers/media/dvb/ttpci/av7110.h b/drivers/media/dvb/ttpci/av7110.h
index cce00ef293e..6ea30df2e82 100644
--- a/drivers/media/dvb/ttpci/av7110.h
+++ b/drivers/media/dvb/ttpci/av7110.h
@@ -98,7 +98,8 @@ struct av7110 {
int adac_type; /* audio DAC type */
#define DVB_ADAC_TI 0
#define DVB_ADAC_CRYSTAL 1
-#define DVB_ADAC_MSP 2
+#define DVB_ADAC_MSP34x0 2
+#define DVB_ADAC_MSP34x5 3
#define DVB_ADAC_NONE -1
@@ -228,6 +229,9 @@ struct av7110 {
struct dvb_video_events video_events;
video_size_t video_size;
+ u16 wssMode;
+ u16 wssData;
+
u32 ir_config;
u32 ir_command;
void (*ir_handler)(struct av7110 *av7110, u32 ircom);
@@ -245,6 +249,15 @@ struct av7110 {
struct dvb_frontend* fe;
fe_status_t fe_status;
+
+ /* crash recovery */
+ void (*recover)(struct av7110* av7110);
+ struct dvb_frontend_parameters saved_fe_params;
+ fe_sec_voltage_t saved_voltage;
+ fe_sec_tone_mode_t saved_tone;
+ struct dvb_diseqc_master_cmd saved_master_cmd;
+ fe_sec_mini_cmd_t saved_minicmd;
+
int (*fe_init)(struct dvb_frontend* fe);
int (*fe_read_status)(struct dvb_frontend* fe, fe_status_t* status);
int (*fe_diseqc_reset_overload)(struct dvb_frontend* fe);
@@ -252,7 +265,7 @@ struct av7110 {
int (*fe_diseqc_send_burst)(struct dvb_frontend* fe, fe_sec_mini_cmd_t minicmd);
int (*fe_set_tone)(struct dvb_frontend* fe, fe_sec_tone_mode_t tone);
int (*fe_set_voltage)(struct dvb_frontend* fe, fe_sec_voltage_t voltage);
- int (*fe_dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned int cmd);
+ int (*fe_dishnetwork_send_legacy_command)(struct dvb_frontend* fe, unsigned long cmd);
int (*fe_set_frontend)(struct dvb_frontend* fe, struct dvb_frontend_parameters* params);
};
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index 0696a5a4f85..400facec740 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -309,7 +309,7 @@ int av7110_set_volume(struct av7110 *av7110, int volleft, int volright)
i2c_writereg(av7110, 0x20, 0x04, volright);
return 0;
- case DVB_ADAC_MSP:
+ case DVB_ADAC_MSP34x0:
vol = (volleft > volright) ? volleft : volright;
val = (vol * 0x73 / 255) << 8;
if (vol > 0)
@@ -1256,7 +1256,9 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
break;
case AUDIO_SET_BYPASS_MODE:
- ret = -EINVAL;
+ if (FW_VERSION(av7110->arm_app) < 0x2621)
+ ret = -EINVAL;
+ av7110->audiostate.bypass_mode = (int)arg;
break;
case AUDIO_CHANNEL_SELECT:
@@ -1295,7 +1297,11 @@ static int dvb_audio_ioctl(struct inode *inode, struct file *file,
break;
case AUDIO_GET_CAPABILITIES:
- *(int *)parg = AUDIO_CAP_LPCM | AUDIO_CAP_MP1 | AUDIO_CAP_MP2;
+ if (FW_VERSION(av7110->arm_app) < 0x2621)
+ *(unsigned int *)parg = AUDIO_CAP_LPCM | AUDIO_CAP_MP1 | AUDIO_CAP_MP2;
+ else
+ *(unsigned int *)parg = AUDIO_CAP_LPCM | AUDIO_CAP_DTS | AUDIO_CAP_AC3 |
+ AUDIO_CAP_MP1 | AUDIO_CAP_MP2;
break;
case AUDIO_CLEAR_BUFFER:
diff --git a/drivers/media/dvb/ttpci/av7110_hw.c b/drivers/media/dvb/ttpci/av7110_hw.c
index 87106e8bf35..cb377452b57 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.c
+++ b/drivers/media/dvb/ttpci/av7110_hw.c
@@ -230,6 +230,8 @@ int av7110_bootarm(struct av7110 *av7110)
dprintk(4, "%p\n", av7110);
+ av7110->arm_ready = 0;
+
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTLO);
/* Disable DEBI and GPIO irq */
@@ -361,6 +363,7 @@ static int __av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length)
break;
if (err) {
printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for COMMAND idle\n", __FUNCTION__);
+ av7110->arm_errors++;
return -ETIMEDOUT;
}
msleep(1);
@@ -1206,9 +1209,9 @@ int av7110_osd_capability(struct av7110 *av7110, osd_cap_t *cap)
switch (cap->cmd) {
case OSD_CAP_MEMSIZE:
if (FW_4M_SDRAM(av7110->arm_app))
- cap->val = 1000000;
+ cap->val = 1000000;
else
- cap->val = 92000;
+ cap->val = 92000;
return 0;
default:
return -EINVAL;
diff --git a/drivers/media/dvb/ttpci/av7110_hw.h b/drivers/media/dvb/ttpci/av7110_hw.h
index 2a5e87ba105..84b83299b8b 100644
--- a/drivers/media/dvb/ttpci/av7110_hw.h
+++ b/drivers/media/dvb/ttpci/av7110_hw.h
@@ -167,7 +167,8 @@ enum av7110_encoder_command {
LoadVidCode,
SetMonitorType,
SetPanScanType,
- SetFreezeMode
+ SetFreezeMode,
+ SetWSSConfig
};
enum av7110_rec_play_state {
diff --git a/drivers/media/dvb/ttpci/av7110_ir.c b/drivers/media/dvb/ttpci/av7110_ir.c
index f5e59fc924a..9138132ad25 100644
--- a/drivers/media/dvb/ttpci/av7110_ir.c
+++ b/drivers/media/dvb/ttpci/av7110_ir.c
@@ -17,6 +17,8 @@ static int av_cnt;
static struct av7110 *av_list[4];
static struct input_dev *input_dev;
+static u8 delay_timer_finished;
+
static u16 key_map [256] = {
KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7,
KEY_8, KEY_9, KEY_BACK, 0, KEY_POWER, KEY_MUTE, 0, KEY_INFO,
@@ -112,13 +114,16 @@ static void av7110_emit_key(unsigned long parm)
if (timer_pending(&keyup_timer)) {
del_timer(&keyup_timer);
if (keyup_timer.data != keycode || new_toggle != old_toggle) {
+ delay_timer_finished = 0;
input_event(input_dev, EV_KEY, keyup_timer.data, !!0);
input_event(input_dev, EV_KEY, keycode, !0);
} else
- input_event(input_dev, EV_KEY, keycode, 2);
-
- } else
+ if (delay_timer_finished)
+ input_event(input_dev, EV_KEY, keycode, 2);
+ } else {
+ delay_timer_finished = 0;
input_event(input_dev, EV_KEY, keycode, !0);
+ }
keyup_timer.expires = jiffies + UP_TIMEOUT;
keyup_timer.data = keycode;
@@ -145,7 +150,8 @@ static void input_register_keys(void)
static void input_repeat_key(unsigned long data)
{
- /* dummy routine to disable autorepeat in the input driver */
+ /* called by the input driver after rep[REP_DELAY] ms */
+ delay_timer_finished = 1;
}
diff --git a/drivers/media/dvb/ttpci/av7110_v4l.c b/drivers/media/dvb/ttpci/av7110_v4l.c
index b5aea4129fa..94cf38c7e8a 100644
--- a/drivers/media/dvb/ttpci/av7110_v4l.c
+++ b/drivers/media/dvb/ttpci/av7110_v4l.c
@@ -490,6 +490,58 @@ static int av7110_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
dprintk(2, "VIDIOC_S_AUDIO: %d\n", a->index);
break;
}
+ case VIDIOC_G_SLICED_VBI_CAP:
+ {
+ struct v4l2_sliced_vbi_cap *cap = arg;
+ dprintk(2, "VIDIOC_G_SLICED_VBI_CAP\n");
+ memset(cap, 0, sizeof *cap);
+ if (FW_VERSION(av7110->arm_app) >= 0x2623) {
+ cap->service_set = V4L2_SLICED_WSS_625;
+ cap->service_lines[0][23] = V4L2_SLICED_WSS_625;
+ }
+ break;
+ }
+ case VIDIOC_G_FMT:
+ {
+ struct v4l2_format *f = arg;
+ dprintk(2, "VIDIOC_G_FMT:\n");
+ if (f->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT ||
+ FW_VERSION(av7110->arm_app) < 0x2623)
+ return -EAGAIN; /* handled by core driver */
+ memset(&f->fmt.sliced, 0, sizeof f->fmt.sliced);
+ if (av7110->wssMode) {
+ f->fmt.sliced.service_set = V4L2_SLICED_WSS_625;
+ f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
+ f->fmt.sliced.io_size = sizeof (struct v4l2_sliced_vbi_data);
+ }
+ break;
+ }
+ case VIDIOC_S_FMT:
+ {
+ struct v4l2_format *f = arg;
+ dprintk(2, "VIDIOC_S_FMT\n");
+ if (f->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT ||
+ FW_VERSION(av7110->arm_app) < 0x2623)
+ return -EAGAIN; /* handled by core driver */
+ if (f->fmt.sliced.service_set != V4L2_SLICED_WSS_625 &&
+ f->fmt.sliced.service_lines[0][23] != V4L2_SLICED_WSS_625) {
+ memset(&f->fmt.sliced, 0, sizeof f->fmt.sliced);
+ /* WSS controlled by firmware */
+ av7110->wssMode = 0;
+ av7110->wssData = 0;
+ return av7110_fw_cmd(av7110, COMTYPE_ENCODER,
+ SetWSSConfig, 1, 0);
+ } else {
+ memset(&f->fmt.sliced, 0, sizeof f->fmt.sliced);
+ f->fmt.sliced.service_set = V4L2_SLICED_WSS_625;
+ f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
+ f->fmt.sliced.io_size = sizeof (struct v4l2_sliced_vbi_data);
+ /* WSS controlled by userspace */
+ av7110->wssMode = 1;
+ av7110->wssData = 0;
+ }
+ break;
+ }
default:
printk("no such ioctl\n");
return -ENOIOCTLCMD;
@@ -497,6 +549,46 @@ static int av7110_ioctl(struct saa7146_fh *fh, unsigned int cmd, void *arg)
return 0;
}
+static int av7110_vbi_reset(struct inode *inode, struct file *file)
+{
+ struct saa7146_fh *fh = file->private_data;
+ struct saa7146_dev *dev = fh->dev;
+ struct av7110 *av7110 = (struct av7110*) dev->ext_priv;
+
+ dprintk(2, "%s\n", __FUNCTION__);
+ av7110->wssMode = 0;
+ av7110->wssData = 0;
+ if (FW_VERSION(av7110->arm_app) < 0x2623)
+ return 0;
+ else
+ return av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 1, 0);
+}
+
+static ssize_t av7110_vbi_write(struct file *file, const char __user *data, size_t count, loff_t *ppos)
+{
+ struct saa7146_fh *fh = file->private_data;
+ struct saa7146_dev *dev = fh->dev;
+ struct av7110 *av7110 = (struct av7110*) dev->ext_priv;
+ struct v4l2_sliced_vbi_data d;
+ int rc;
+
+ dprintk(2, "%s\n", __FUNCTION__);
+ if (FW_VERSION(av7110->arm_app) < 0x2623 || !av7110->wssMode || count != sizeof d)
+ return -EINVAL;
+ if (copy_from_user(&d, data, count))
+ return -EFAULT;
+ if ((d.id != 0 && d.id != V4L2_SLICED_WSS_625) || d.field != 0 || d.line != 23)
+ return -EINVAL;
+ if (d.id) {
+ av7110->wssData = ((d.data[1] << 8) & 0x3f00) | d.data[0];
+ rc = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig,
+ 2, 1, av7110->wssData);
+ } else {
+ av7110->wssData = 0;
+ rc = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 1, 0);
+ }
+ return (rc < 0) ? rc : count;
+}
/****************************************************************************
* INITIALIZATION
@@ -512,6 +604,9 @@ static struct saa7146_extension_ioctls ioctls[] = {
{ VIDIOC_S_TUNER, SAA7146_EXCLUSIVE },
{ VIDIOC_G_AUDIO, SAA7146_EXCLUSIVE },
{ VIDIOC_S_AUDIO, SAA7146_EXCLUSIVE },
+ { VIDIOC_G_SLICED_VBI_CAP, SAA7146_EXCLUSIVE },
+ { VIDIOC_G_FMT, SAA7146_BEFORE },
+ { VIDIOC_S_FMT, SAA7146_BEFORE },
{ 0, 0 }
};
@@ -587,7 +682,7 @@ int av7110_init_analog_module(struct av7110 *av7110)
printk("dvb-ttpci: DVB-C analog module @ card %d detected, initializing MSP3400\n",
av7110->dvb_adapter.num);
- av7110->adac_type = DVB_ADAC_MSP;
+ av7110->adac_type = DVB_ADAC_MSP34x0;
msleep(100); // the probing above resets the msp...
msp_readreg(av7110, MSP_RD_DSP, 0x001e, &version1);
msp_readreg(av7110, MSP_RD_DSP, 0x001f, &version2);
@@ -692,12 +787,11 @@ int av7110_init_v4l(struct av7110 *av7110)
saa7146_vv_release(dev);
return -ENODEV;
}
- if (av7110->analog_tuner_flags) {
- if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI)) {
- ERR(("cannot register vbi v4l2 device. skipping.\n"));
- } else {
+ if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI)) {
+ ERR(("cannot register vbi v4l2 device. skipping.\n"));
+ } else {
+ if (av7110->analog_tuner_flags)
av7110->analog_tuner_flags |= ANALOG_TUNER_VBI;
- }
}
return 0;
}
@@ -778,7 +872,7 @@ static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std)
static struct saa7146_ext_vv av7110_vv_data_st = {
.inputs = 1,
.audios = 1,
- .capabilities = 0,
+ .capabilities = V4L2_CAP_SLICED_VBI_OUTPUT,
.flags = 0,
.stds = &standard[0],
@@ -787,12 +881,16 @@ static struct saa7146_ext_vv av7110_vv_data_st = {
.ioctls = &ioctls[0],
.ioctl = av7110_ioctl,
+
+ .vbi_fops.open = av7110_vbi_reset,
+ .vbi_fops.release = av7110_vbi_reset,
+ .vbi_fops.write = av7110_vbi_write,
};
static struct saa7146_ext_vv av7110_vv_data_c = {
.inputs = 1,
.audios = 1,
- .capabilities = V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE,
+ .capabilities = V4L2_CAP_TUNER | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_OUTPUT,
.flags = SAA7146_USE_PORT_B_FOR_VBI,
.stds = &standard[0],
@@ -801,5 +899,9 @@ static struct saa7146_ext_vv av7110_vv_data_c = {
.ioctls = &ioctls[0],
.ioctl = av7110_ioctl,
+
+ .vbi_fops.open = av7110_vbi_reset,
+ .vbi_fops.release = av7110_vbi_reset,
+ .vbi_fops.write = av7110_vbi_write,
};
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 9f51bae7194..1465c04e49a 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -37,6 +37,7 @@
#include "stv0299.h"
#include "tda10021.h"
#include "tda1004x.h"
+#include "dvb-pll.h"
#include <media/saa7146_vv.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -127,7 +128,7 @@ static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int ad
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTHI);
udelay(1);
- result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, address & 0xfff, 1, 0, 0);
+ result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, address & 0xfff, 1, 0, 1);
if (result == -ETIMEDOUT)
budget_av->slot_status = 0;
@@ -145,7 +146,7 @@ static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int a
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTHI);
udelay(1);
- result = ttpci_budget_debiwrite(&budget_av->budget, DEBICICAM, address & 0xfff, 1, value, 0, 0);
+ result = ttpci_budget_debiwrite(&budget_av->budget, DEBICICAM, address & 0xfff, 1, value, 0, 1);
if (result == -ETIMEDOUT)
budget_av->slot_status = 0;
@@ -192,7 +193,7 @@ static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_av *budget_av = (struct budget_av *) ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
- int timeout = 500; // 5 seconds (4.4.6 Ready)
+ int timeout = 50; // 5 seconds (4.4.6 Ready)
if (slot != 0)
return -EINVAL;
@@ -256,19 +257,37 @@ static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open
{
struct budget_av *budget_av = (struct budget_av *) ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
+ int cam_present = 0;
if (slot != 0)
return -EINVAL;
- if (!budget_av->slot_status) {
+ if (!budget_av->slot_status)
+ {
+ // first of all test the card detect line
saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
udelay(1);
if (saa7146_read(saa, PSR) & MASK_06)
{
+ cam_present = 1;
+ }
+ saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO);
+
+ // that is unreliable however, so try and read from IO memory
+ if (!cam_present)
+ {
+ saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO);
+ if (ttpci_budget_debiread(&budget_av->budget, DEBICICAM, 0, 1, 0, 1) != -ETIMEDOUT)
+ {
+ cam_present = 1;
+ }
+ }
+
+ // did we find something?
+ if (cam_present) {
printk(KERN_INFO "budget-av: cam inserted\n");
budget_av->slot_status = 1;
}
- saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO);
} else if (!open) {
saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO);
if (ttpci_budget_debiread(&budget_av->budget, DEBICICAM, 0, 1, 0, 1) == -ETIMEDOUT)
@@ -484,6 +503,140 @@ static int philips_su1278_ty_ci_pll_set(struct dvb_frontend *fe,
return 0;
}
+#define MIN2(a,b) ((a) < (b) ? (a) : (b))
+#define MIN3(a,b,c) MIN2(MIN2(a,b),c)
+
+static int philips_su1278sh2_tua6100_pll_set(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct dvb_frontend_parameters *params)
+{
+ u8 reg0 [2] = { 0x00, 0x00 };
+ u8 reg1 [4] = { 0x01, 0x00, 0x00, 0x00 };
+ u8 reg2 [3] = { 0x02, 0x00, 0x00 };
+ int _fband;
+ int first_ZF;
+ int R, A, N, P, M;
+ struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = NULL,.len = 0 };
+ int freq = params->frequency;
+
+ first_ZF = (freq) / 1000;
+
+ if (abs(MIN2(abs(first_ZF-1190),abs(first_ZF-1790))) <
+ abs(MIN3(abs(first_ZF-1202),abs(first_ZF-1542),abs(first_ZF-1890))))
+ _fband = 2;
+ else
+ _fband = 3;
+
+ if (_fband == 2) {
+ if (((first_ZF >= 950) && (first_ZF < 1350)) ||
+ ((first_ZF >= 1430) && (first_ZF < 1950)))
+ reg0[1] = 0x07;
+ else if (((first_ZF >= 1350) && (first_ZF < 1430)) ||
+ ((first_ZF >= 1950) && (first_ZF < 2150)))
+ reg0[1] = 0x0B;
+ }
+
+ if(_fband == 3) {
+ if (((first_ZF >= 950) && (first_ZF < 1350)) ||
+ ((first_ZF >= 1455) && (first_ZF < 1950)))
+ reg0[1] = 0x07;
+ else if (((first_ZF >= 1350) && (first_ZF < 1420)) ||
+ ((first_ZF >= 1950) && (first_ZF < 2150)))
+ reg0[1] = 0x0B;
+ else if ((first_ZF >= 1420) && (first_ZF < 1455))
+ reg0[1] = 0x0F;
+ }
+
+ if (first_ZF > 1525)
+ reg1[1] |= 0x80;
+ else
+ reg1[1] &= 0x7F;
+
+ if (_fband == 2) {
+ if (first_ZF > 1430) { /* 1430MHZ */
+ reg1[1] &= 0xCF; /* N2 */
+ reg2[1] &= 0xCF; /* R2 */
+ reg2[1] |= 0x10;
+ } else {
+ reg1[1] &= 0xCF; /* N2 */
+ reg1[1] |= 0x20;
+ reg2[1] &= 0xCF; /* R2 */
+ reg2[1] |= 0x10;
+ }
+ }
+
+ if (_fband == 3) {
+ if ((first_ZF >= 1455) &&
+ (first_ZF < 1630)) {
+ reg1[1] &= 0xCF; /* N2 */
+ reg1[1] |= 0x20;
+ reg2[1] &= 0xCF; /* R2 */
+ } else {
+ if (first_ZF < 1455) {
+ reg1[1] &= 0xCF; /* N2 */
+ reg1[1] |= 0x20;
+ reg2[1] &= 0xCF; /* R2 */
+ reg2[1] |= 0x10;
+ } else {
+ if (first_ZF >= 1630) {
+ reg1[1] &= 0xCF; /* N2 */
+ reg2[1] &= 0xCF; /* R2 */
+ reg2[1] |= 0x10;
+ }
+ }
+ }
+ }
+
+ /* set ports, enable P0 for symbol rates > 4Ms/s */
+ if (params->u.qpsk.symbol_rate >= 4000000)
+ reg1[1] |= 0x0c;
+ else
+ reg1[1] |= 0x04;
+
+ reg2[1] |= 0x0c;
+
+ R = 64;
+ A = 64;
+ P = 64; //32
+
+ M = (freq * R) / 4; /* in Mhz */
+ N = (M - A * 1000) / (P * 1000);
+
+ reg1[1] |= (N >> 9) & 0x03;
+ reg1[2] = (N >> 1) & 0xff;
+ reg1[3] = (N << 7) & 0x80;
+
+ reg2[1] |= (R >> 8) & 0x03;
+ reg2[2] = R & 0xFF; /* R */
+
+ reg1[3] |= A & 0x7f; /* A */
+
+ if (P == 64)
+ reg1[1] |= 0x40; /* Prescaler 64/65 */
+
+ reg0[1] |= 0x03;
+
+ /* already enabled - do not reenable i2c repeater or TX fails */
+ msg.buf = reg0;
+ msg.len = sizeof(reg0);
+ if (i2c_transfer(i2c, &msg, 1) != 1)
+ return -EIO;
+
+ stv0299_enable_plli2c(fe);
+ msg.buf = reg1;
+ msg.len = sizeof(reg1);
+ if (i2c_transfer(i2c, &msg, 1) != 1)
+ return -EIO;
+
+ stv0299_enable_plli2c(fe);
+ msg.buf = reg2;
+ msg.len = sizeof(reg2);
+ if (i2c_transfer(i2c, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
static u8 typhoon_cinergy1200s_inittab[] = {
0x01, 0x15,
0x02, 0x30,
@@ -553,6 +706,18 @@ static struct stv0299_config cinergy_1200s_config = {
.pll_set = philips_su1278_ty_ci_pll_set,
};
+static struct stv0299_config cinergy_1200s_1894_0010_config = {
+ .demod_address = 0x68,
+ .inittab = typhoon_cinergy1200s_inittab,
+ .mclk = 88000000UL,
+ .invert = 1,
+ .skip_reinit = 0,
+ .lock_output = STV0229_LOCKOUTPUT_1,
+ .volt13_op0_op1 = STV0299_VOLT13_OP0,
+ .min_delay_ms = 100,
+ .set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate,
+ .pll_set = philips_su1278sh2_tua6100_pll_set,
+};
static int philips_cu1216_pll_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
{
@@ -700,8 +865,117 @@ static struct tda1004x_config philips_tu1216_config = {
.request_firmware = philips_tu1216_request_firmware,
};
+static u8 philips_sd1878_inittab[] = {
+ 0x01, 0x15,
+ 0x02, 0x30,
+ 0x03, 0x00,
+ 0x04, 0x7d,
+ 0x05, 0x35,
+ 0x06, 0x40,
+ 0x07, 0x00,
+ 0x08, 0x43,
+ 0x09, 0x02,
+ 0x0C, 0x51,
+ 0x0D, 0x82,
+ 0x0E, 0x23,
+ 0x10, 0x3f,
+ 0x11, 0x84,
+ 0x12, 0xb9,
+ 0x15, 0xc9,
+ 0x16, 0x19,
+ 0x17, 0x8c,
+ 0x18, 0x59,
+ 0x19, 0xf8,
+ 0x1a, 0xfe,
+ 0x1c, 0x7f,
+ 0x1d, 0x00,
+ 0x1e, 0x00,
+ 0x1f, 0x50,
+ 0x20, 0x00,
+ 0x21, 0x00,
+ 0x22, 0x00,
+ 0x23, 0x00,
+ 0x28, 0x00,
+ 0x29, 0x28,
+ 0x2a, 0x14,
+ 0x2b, 0x0f,
+ 0x2c, 0x09,
+ 0x2d, 0x09,
+ 0x31, 0x1f,
+ 0x32, 0x19,
+ 0x33, 0xfc,
+ 0x34, 0x93,
+ 0xff, 0xff
+};
+
+static int philips_sd1878_tda8261_pll_set(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct dvb_frontend_parameters *params)
+{
+ u8 buf[4];
+ int rc;
+ struct i2c_msg tuner_msg = {.addr=0x60,.flags=0,.buf=buf,.len=sizeof(buf)};
+
+ if((params->frequency < 950000) || (params->frequency > 2150000))
+ return -EINVAL;
+
+ rc=dvb_pll_configure(&dvb_pll_philips_sd1878_tda8261, buf,
+ params->frequency, 0);
+ if(rc < 0) return rc;
+
+ if(i2c_transfer(i2c, &tuner_msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+static int philips_sd1878_ci_set_symbol_rate(struct dvb_frontend *fe,
+ u32 srate, u32 ratio)
+{
+ u8 aclk = 0;
+ u8 bclk = 0;
+ u8 m1;
+ aclk = 0xb5;
+ if (srate < 2000000)
+ bclk = 0x86;
+ else if (srate < 5000000)
+ bclk = 0x89;
+ else if (srate < 15000000)
+ bclk = 0x8f;
+ else if (srate < 45000000)
+ bclk = 0x95;
+
+ m1 = 0x14;
+ if (srate < 4000000)
+ m1 = 0x10;
+
+ stv0299_writereg(fe, 0x0e, 0x23);
+ stv0299_writereg(fe, 0x0f, 0x94);
+ stv0299_writereg(fe, 0x10, 0x39);
+ stv0299_writereg(fe, 0x13, aclk);
+ stv0299_writereg(fe, 0x14, bclk);
+ stv0299_writereg(fe, 0x15, 0xc9);
+ stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
+ stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
+ stv0299_writereg(fe, 0x21, (ratio) & 0xf0);
+ stv0299_writereg(fe, 0x0f, 0x80 | m1);
+
+ return 0;
+}
+
+static struct stv0299_config philips_sd1878_config = {
+ .demod_address = 0x68,
+ .inittab = philips_sd1878_inittab,
+ .mclk = 88000000UL,
+ .invert = 0,
+ .skip_reinit = 0,
+ .lock_output = STV0229_LOCKOUTPUT_1,
+ .volt13_op0_op1 = STV0299_VOLT13_OP0,
+ .min_delay_ms = 100,
+ .set_symbol_rate = philips_sd1878_ci_set_symbol_rate,
+ .pll_set = philips_sd1878_tda8261_pll_set,
+};
static u8 read_pwm(struct budget_av *budget_av)
{
@@ -722,7 +996,10 @@ static u8 read_pwm(struct budget_av *budget_av)
#define SUBID_DVBS_KNC1_PLUS 0x0011
#define SUBID_DVBS_TYPHOON 0x4f56
#define SUBID_DVBS_CINERGY1200 0x1154
+#define SUBID_DVBS_CYNERGY1200N 0x1155
+#define SUBID_DVBS_TV_STAR 0x0014
+#define SUBID_DVBS_TV_STAR_CI 0x0016
#define SUBID_DVBC_KNC1 0x0020
#define SUBID_DVBC_KNC1_PLUS 0x0021
#define SUBID_DVBC_CINERGY1200 0x1156
@@ -749,6 +1026,22 @@ static void frontend_init(struct budget_av *budget_av)
switch (saa->pci->subsystem_device) {
case SUBID_DVBS_KNC1:
+ if (saa->pci->subsystem_vendor == 0x1894) {
+ fe = stv0299_attach(&cinergy_1200s_1894_0010_config,
+ &budget_av->budget.i2c_adap);
+ } else {
+ fe = stv0299_attach(&typhoon_config,
+ &budget_av->budget.i2c_adap);
+ }
+ break;
+
+ case SUBID_DVBS_TV_STAR:
+ case SUBID_DVBS_TV_STAR_CI:
+ case SUBID_DVBS_CYNERGY1200N:
+ fe = stv0299_attach(&philips_sd1878_config,
+ &budget_av->budget.i2c_adap);
+ break;
+
case SUBID_DVBS_KNC1_PLUS:
case SUBID_DVBS_TYPHOON:
fe = stv0299_attach(&typhoon_config,
@@ -854,11 +1147,9 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
dprintk(2, "dev: %p\n", dev);
- if (!(budget_av = kmalloc(sizeof(struct budget_av), GFP_KERNEL)))
+ if (!(budget_av = kzalloc(sizeof(struct budget_av), GFP_KERNEL)))
return -ENOMEM;
- memset(budget_av, 0, sizeof(struct budget_av));
-
budget_av->has_saa7113 = 0;
budget_av->budget.ci_present = 0;
@@ -993,22 +1284,28 @@ static struct saa7146_extension budget_extension;
MAKE_BUDGET_INFO(knc1s, "KNC1 DVB-S", BUDGET_KNC1S);
MAKE_BUDGET_INFO(knc1c, "KNC1 DVB-C", BUDGET_KNC1C);
MAKE_BUDGET_INFO(knc1t, "KNC1 DVB-T", BUDGET_KNC1T);
+MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR);
MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP);
MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP);
MAKE_BUDGET_INFO(knc1tp, "KNC1 DVB-T Plus", BUDGET_KNC1TP);
MAKE_BUDGET_INFO(cin1200s, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S);
+MAKE_BUDGET_INFO(cin1200sn, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S);
MAKE_BUDGET_INFO(cin1200c, "Terratec Cinergy 1200 DVB-C", BUDGET_CIN1200C);
MAKE_BUDGET_INFO(cin1200t, "Terratec Cinergy 1200 DVB-T", BUDGET_CIN1200T);
static struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x4f56),
MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x0010),
+ MAKE_EXTENSION_PCI(knc1s, 0x1894, 0x0010),
MAKE_EXTENSION_PCI(knc1sp, 0x1131, 0x0011),
+ MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0014),
+ MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016),
MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020),
MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021),
MAKE_EXTENSION_PCI(knc1t, 0x1894, 0x0030),
MAKE_EXTENSION_PCI(knc1tp, 0x1894, 0x0031),
MAKE_EXTENSION_PCI(cin1200s, 0x153b, 0x1154),
+ MAKE_EXTENSION_PCI(cin1200sn, 0x153b, 0x1155),
MAKE_EXTENSION_PCI(cin1200c, 0x153b, 0x1156),
MAKE_EXTENSION_PCI(cin1200t, 0x153b, 0x1157),
{
diff --git a/drivers/media/dvb/ttpci/budget-core.c b/drivers/media/dvb/ttpci/budget-core.c
index 017fcbccb8c..633e68c341c 100644
--- a/drivers/media/dvb/ttpci/budget-core.c
+++ b/drivers/media/dvb/ttpci/budget-core.c
@@ -404,9 +404,7 @@ int ttpci_budget_init(struct budget *budget, struct saa7146_dev *dev,
tasklet_init(&budget->vpe_tasklet, vpeirq, (unsigned long) budget);
/* frontend power on */
- if (bi->type == BUDGET_FS_ACTIVY)
- saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI);
- else
+ if (bi->type != BUDGET_FS_ACTIVY)
saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI);
if (budget_register(budget) == 0) {
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index fafe6407b3d..238c77b52f8 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -112,6 +112,7 @@ static int SendDiSEqCMsg (struct budget *budget, int len, u8 *msg, unsigned long
* Routines for the Fujitsu Siemens Activy budget card
* 22 kHz tone and DiSEqC are handled by the frontend.
* Voltage must be set here.
+ * GPIO 1: LNBP EN, GPIO 2: LNBP VSEL
*/
static int SetVoltage_Activy (struct budget *budget, fe_sec_voltage_t voltage)
{
@@ -121,11 +122,16 @@ static int SetVoltage_Activy (struct budget *budget, fe_sec_voltage_t voltage)
switch (voltage) {
case SEC_VOLTAGE_13:
+ saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI);
saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTLO);
break;
case SEC_VOLTAGE_18:
+ saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI);
saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI);
break;
+ case SEC_VOLTAGE_OFF:
+ saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO);
+ break;
default:
return -EINVAL;
}
@@ -206,7 +212,7 @@ static int lnbp21_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
return 0;
}
-static int lnbp21_enable_high_lnb_voltage(struct dvb_frontend* fe, int arg)
+static int lnbp21_enable_high_lnb_voltage(struct dvb_frontend* fe, long arg)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
u8 buf;
@@ -580,6 +586,7 @@ static void frontend_init(struct budget *budget)
if (budget->dvb_frontend) {
budget->dvb_frontend->ops->set_voltage = lnbp21_set_voltage;
budget->dvb_frontend->ops->enable_high_lnb_voltage = lnbp21_enable_high_lnb_voltage;
+ budget->dvb_frontend->ops->dishnetwork_send_legacy_command = NULL;
if (lnbp21_init(budget)) {
printk("%s: No LNBP21 found!\n", __FUNCTION__);
goto error_out;
@@ -624,7 +631,7 @@ static void frontend_init(struct budget *budget)
budget->dvb_frontend = stv0299_attach(&alps_bsru6_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops->set_voltage = siemens_budget_set_voltage;
- break;
+ budget->dvb_frontend->ops->dishnetwork_send_legacy_command = NULL;
}
break;
@@ -632,7 +639,7 @@ static void frontend_init(struct budget *budget)
budget->dvb_frontend = tda8083_attach(&grundig_29504_451_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops->set_voltage = siemens_budget_set_voltage;
- break;
+ budget->dvb_frontend->ops->dishnetwork_send_legacy_command = NULL;
}
break;
diff --git a/drivers/media/dvb/ttpci/budget.h b/drivers/media/dvb/ttpci/budget.h
index fdaa3318ad3..c7bb63c4d98 100644
--- a/drivers/media/dvb/ttpci/budget.h
+++ b/drivers/media/dvb/ttpci/budget.h
@@ -19,7 +19,7 @@ extern int budget_debug;
#endif
#define dprintk(level,args...) \
- do { if ((budget_debug & level)) { printk("%s: %s(): ",__stringify(KBUILD_MODNAME), __FUNCTION__); printk(args); } } while (0)
+ do { if ((budget_debug & level)) { printk("%s: %s(): ", KBUILD_MODNAME, __FUNCTION__); printk(args); } } while (0)
struct budget_info {
char *name;
@@ -95,6 +95,7 @@ static struct saa7146_pci_extension_data x_var = { \
#define BUDGET_KNC1SP 11
#define BUDGET_KNC1CP 12
#define BUDGET_KNC1TP 13
+#define BUDGET_TVSTAR 14
#define BUDGET_VIDEO_PORTA 0
#define BUDGET_VIDEO_PORTB 1
diff --git a/drivers/media/dvb/ttpci/ttpci-eeprom.c b/drivers/media/dvb/ttpci/ttpci-eeprom.c
index 18aa22b5478..1f31e91195b 100644
--- a/drivers/media/dvb/ttpci/ttpci-eeprom.c
+++ b/drivers/media/dvb/ttpci/ttpci-eeprom.c
@@ -13,7 +13,7 @@
Holger Waechtler Convergence
Copyright (C) 2002-2003 Ralph Metzler <rjkm@metzlerbros.de>
- Metzler Brothers Systementwicklung GbR
+ Metzler Brothers Systementwicklung GbR
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/dvb/ttusb-budget/Kconfig b/drivers/media/dvb/ttusb-budget/Kconfig
index c6c1d41a2ef..914587d52b5 100644
--- a/drivers/media/dvb/ttusb-budget/Kconfig
+++ b/drivers/media/dvb/ttusb-budget/Kconfig
@@ -10,7 +10,7 @@ config DVB_TTUSB_BUDGET
Support for external USB adapters designed by Technotrend and
produced by Hauppauge, shipped under the brand name 'Nova-USB'.
- These devices don't have a MPEG decoder built in, so you need
+ These devices don't have a MPEG decoder built in, so you need
an external software decoder to watch TV.
Say Y if you own such a device and want to use it.
diff --git a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
index 104df610dbe..5a13c4744f6 100644
--- a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
@@ -1489,11 +1489,9 @@ static int ttusb_probe(struct usb_interface *intf, const struct usb_device_id *i
if (intf->altsetting->desc.bInterfaceNumber != 1) return -ENODEV;
- if (!(ttusb = kmalloc(sizeof(struct ttusb), GFP_KERNEL)))
+ if (!(ttusb = kzalloc(sizeof(struct ttusb), GFP_KERNEL)))
return -ENOMEM;
- memset(ttusb, 0, sizeof(struct ttusb));
-
ttusb->dev = udev;
ttusb->c = 0;
ttusb->mux_state = 0;
diff --git a/drivers/media/dvb/ttusb-dec/Kconfig b/drivers/media/dvb/ttusb-dec/Kconfig
index c334526af66..83611012ef3 100644
--- a/drivers/media/dvb/ttusb-dec/Kconfig
+++ b/drivers/media/dvb/ttusb-dec/Kconfig
@@ -8,14 +8,15 @@ config DVB_TTUSB_DEC
produced by Hauppauge, shipped under the brand name 'DEC2000-t'
and 'DEC3000-s'.
- Even if these devices have a MPEG decoder built in, they transmit
+ Even if these devices have a MPEG decoder built in, they transmit
only compressed MPEG data over the USB bus, so you need
an external software decoder to watch TV on your computer.
- This driver needs external firmware. Please use the commands
- "<kerneldir>/Documentation/dvb/get_dvb_firmware dec2000t",
- "<kerneldir>/Documentation/dvb/get_dvb_firmware dec2540t",
- "<kerneldir>/Documentation/dvb/get_dvb_firmware dec3000s",
- download/extract them, and then copy them to /usr/lib/hotplug/firmware.
+ This driver needs external firmware. Please use the commands
+ "<kerneldir>/Documentation/dvb/get_dvb_firmware dec2000t",
+ "<kerneldir>/Documentation/dvb/get_dvb_firmware dec2540t",
+ "<kerneldir>/Documentation/dvb/get_dvb_firmware dec3000s",
+ download/extract them, and then copy them to /usr/lib/hotplug/firmware
+ or /lib/firmware (depending on configuration of firmware hotplug).
Say Y if you own such a device and want to use it.
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index 8abc2189012..df831171e03 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -369,7 +369,7 @@ static int ttusb_dec_get_stb_state (struct ttusb_dec *dec, unsigned int *mode,
static int ttusb_dec_audio_pes2ts_cb(void *priv, unsigned char *data)
{
- struct ttusb_dec *dec = (struct ttusb_dec *)priv;
+ struct ttusb_dec *dec = priv;
dec->audio_filter->feed->cb.ts(data, 188, NULL, 0,
&dec->audio_filter->feed->feed.ts,
@@ -380,7 +380,7 @@ static int ttusb_dec_audio_pes2ts_cb(void *priv, unsigned char *data)
static int ttusb_dec_video_pes2ts_cb(void *priv, unsigned char *data)
{
- struct ttusb_dec *dec = (struct ttusb_dec *)priv;
+ struct ttusb_dec *dec = priv;
dec->video_filter->feed->cb.ts(data, 188, NULL, 0,
&dec->video_filter->feed->feed.ts,
@@ -965,8 +965,8 @@ static int ttusb_dec_start_ts_feed(struct dvb_demux_feed *dvbdmxfeed)
case DMX_TS_PES_TELETEXT:
dec->pid[DMX_PES_TELETEXT] = dvbdmxfeed->pid;
- dprintk(" pes_type: DMX_TS_PES_TELETEXT\n");
- break;
+ dprintk(" pes_type: DMX_TS_PES_TELETEXT(not supported)\n");
+ return -ENOSYS;
case DMX_TS_PES_PCR:
dprintk(" pes_type: DMX_TS_PES_PCR\n");
@@ -975,8 +975,8 @@ static int ttusb_dec_start_ts_feed(struct dvb_demux_feed *dvbdmxfeed)
break;
case DMX_TS_PES_OTHER:
- dprintk(" pes_type: DMX_TS_PES_OTHER\n");
- break;
+ dprintk(" pes_type: DMX_TS_PES_OTHER(not supported)\n");
+ return -ENOSYS;
default:
dprintk(" pes_type: unknown (%d)\n", dvbdmxfeed->pes_type);
@@ -1182,7 +1182,7 @@ static void ttusb_dec_init_tasklet(struct ttusb_dec *dec)
(unsigned long)dec);
}
-static int ttusb_init_rc(struct ttusb_dec *dec)
+static int ttusb_init_rc( struct ttusb_dec *dec)
{
struct input_dev *input_dev;
u8 b[] = { 0x00, 0x01 };
@@ -1203,13 +1203,12 @@ static int ttusb_init_rc(struct ttusb_dec *dec)
input_dev->keycode = rc_keys;
for (i = 0; i < ARRAY_SIZE(rc_keys); i++)
- set_bit(rc_keys[i], input_dev->keybit);
+ set_bit(rc_keys[i], input_dev->keybit);
input_register_device(input_dev);
if (usb_submit_urb(dec->irq_urb, GFP_KERNEL))
printk("%s: usb_submit_urb failed\n",__FUNCTION__);
-
/* enable irq pipe */
ttusb_dec_send_command(dec,0xb0,sizeof(b),b,NULL,NULL);
@@ -1395,6 +1394,7 @@ static int ttusb_dec_init_stb(struct ttusb_dec *dec)
/* We can't trust the USB IDs that some firmwares
give the box */
switch (model) {
+ case 0x00070001:
case 0x00070008:
case 0x0007000c:
ttusb_dec_set_model(dec, TTUSB_DEC3000S);
@@ -1588,7 +1588,7 @@ static int fe_send_command(struct dvb_frontend* fe, const u8 command,
int param_length, const u8 params[],
int *result_length, u8 cmd_result[])
{
- struct ttusb_dec* dec = (struct ttusb_dec*) fe->dvb->priv;
+ struct ttusb_dec* dec = fe->dvb->priv;
return ttusb_dec_send_command(dec, command, param_length, params, result_length, cmd_result);
}
@@ -1606,15 +1606,13 @@ static int ttusb_dec_probe(struct usb_interface *intf,
udev = interface_to_usbdev(intf);
- if (!(dec = kmalloc(sizeof(struct ttusb_dec), GFP_KERNEL))) {
+ if (!(dec = kzalloc(sizeof(struct ttusb_dec), GFP_KERNEL))) {
printk("%s: couldn't allocate memory.\n", __FUNCTION__);
return -ENOMEM;
}
usb_set_intfdata(intf, (void *)dec);
- memset(dec, 0, sizeof(struct ttusb_dec));
-
switch (le16_to_cpu(id->idProduct)) {
case 0x1006:
ttusb_dec_set_model(dec, TTUSB_DEC3000S);
diff --git a/drivers/media/dvb/ttusb-dec/ttusbdecfe.c b/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
index 725af3af5b2..a5a46175fa0 100644
--- a/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/dvb/ttusb-dec/ttusbdecfe.c
@@ -42,8 +42,39 @@ struct ttusbdecfe_state {
static int ttusbdecfe_read_status(struct dvb_frontend* fe, fe_status_t* status)
{
- *status = FE_HAS_SIGNAL | FE_HAS_VITERBI |
- FE_HAS_SYNC | FE_HAS_CARRIER | FE_HAS_LOCK;
+ struct ttusbdecfe_state* state = fe->demodulator_priv;
+ u8 b[] = { 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 };
+ u8 result[4];
+ int len, ret;
+
+ *status=0;
+
+ ret=state->config->send_command(fe, 0x73, sizeof(b), b, &len, result);
+ if(ret)
+ return ret;
+
+ if(len != 4) {
+ printk(KERN_ERR "%s: unexpected reply\n", __FUNCTION__);
+ return -EIO;
+ }
+
+ switch(result[3]) {
+ case 1: /* not tuned yet */
+ case 2: /* no signal/no lock*/
+ break;
+ case 3: /* signal found and locked*/
+ *status = FE_HAS_SIGNAL | FE_HAS_VITERBI |
+ FE_HAS_SYNC | FE_HAS_CARRIER | FE_HAS_LOCK;
+ break;
+ case 4:
+ *status = FE_TIMEDOUT;
+ break;
+ default:
+ pr_info("%s: returned unknown value: %d\n",
+ __FUNCTION__, result[3]);
+ return -EIO;
+ }
return 0;
}
@@ -64,6 +95,16 @@ static int ttusbdecfe_dvbt_set_frontend(struct dvb_frontend* fe, struct dvb_fron
return 0;
}
+static int ttusbdecfe_dvbt_get_tune_settings(struct dvb_frontend* fe,
+ struct dvb_frontend_tune_settings* fesettings)
+{
+ fesettings->min_delay_ms = 1500;
+ /* Drift compensation makes no sense for DVB-T */
+ fesettings->step_size = 0;
+ fesettings->max_drift = 0;
+ return 0;
+}
+
static int ttusbdecfe_dvbs_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
@@ -212,6 +253,8 @@ static struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
.set_frontend = ttusbdecfe_dvbt_set_frontend,
+ .get_tune_settings = ttusbdecfe_dvbt_get_tune_settings,
+
.read_status = ttusbdecfe_read_status,
};
@@ -223,11 +266,11 @@ static struct dvb_frontend_ops ttusbdecfe_dvbs_ops = {
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 125,
+ .symbol_rate_min = 1000000, /* guessed */
+ .symbol_rate_max = 45000000, /* guessed */
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
- FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO,
+ FE_CAN_QPSK
},
.release = ttusbdecfe_release,
diff --git a/drivers/media/radio/miropcm20-radio.c b/drivers/media/radio/miropcm20-radio.c
index c2ebe8754a9..dc292da2605 100644
--- a/drivers/media/radio/miropcm20-radio.c
+++ b/drivers/media/radio/miropcm20-radio.c
@@ -220,6 +220,7 @@ static struct file_operations pcm20_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = pcm20_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 877c770558e..914deab4e04 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -299,6 +299,7 @@ static struct file_operations rtrack_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = rt_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 5319a9c9a97..523be820f9c 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -256,6 +256,7 @@ static struct file_operations aztech_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = az_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 9b0406318f2..f1b5ac81e9d 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -490,6 +490,7 @@ static struct file_operations cadet_fops = {
.release = cadet_release,
.read = cadet_read,
.ioctl = cadet_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
index 630cc786d0a..8e499b8f64c 100644
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ b/drivers/media/radio/radio-gemtek-pci.c
@@ -301,6 +301,7 @@ static struct file_operations gemtek_pci_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = gemtek_pci_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
@@ -317,11 +318,10 @@ static int __devinit gemtek_pci_probe( struct pci_dev *pci_dev, const struct pci
struct gemtek_pci_card *card;
struct video_device *devradio;
- if ( (card = kmalloc( sizeof( struct gemtek_pci_card ), GFP_KERNEL )) == NULL ) {
+ if ( (card = kzalloc( sizeof( struct gemtek_pci_card ), GFP_KERNEL )) == NULL ) {
printk( KERN_ERR "gemtek_pci: out of memory\n" );
return -ENOMEM;
}
- memset( card, 0, sizeof( struct gemtek_pci_card ) );
if ( pci_enable_device( pci_dev ) )
goto err_pci;
@@ -394,7 +394,7 @@ static struct pci_driver gemtek_pci_driver =
static int __init gemtek_pci_init_module( void )
{
- return pci_module_init( &gemtek_pci_driver );
+ return pci_register_driver( &gemtek_pci_driver );
}
static void __exit gemtek_pci_cleanup_module( void )
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 6418f03b9ce..47173be97b9 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -233,6 +233,7 @@ static struct file_operations gemtek_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = gemtek_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/radio/radio-maestro.c b/drivers/media/radio/radio-maestro.c
index e5e2021a731..36c9f5bf8cd 100644
--- a/drivers/media/radio/radio-maestro.c
+++ b/drivers/media/radio/radio-maestro.c
@@ -27,34 +27,30 @@
#include <linux/pci.h>
#include <linux/videodev.h>
-#define DRIVER_VERSION "0.04"
+#define DRIVER_VERSION "0.05"
-#define PCI_VENDOR_ESS 0x125D
-#define PCI_DEVICE_ID_ESS_ESS1968 0x1968 /* Maestro 2 */
-#define PCI_DEVICE_ID_ESS_ESS1978 0x1978 /* Maestro 2E */
-
-#define GPIO_DATA 0x60 /* port offset from ESS_IO_BASE */
+#define GPIO_DATA 0x60 /* port offset from ESS_IO_BASE */
#define IO_MASK 4 /* mask register offset from GPIO_DATA
bits 1=unmask write to given bit */
#define IO_DIR 8 /* direction register offset from GPIO_DATA
bits 0/1=read/write direction */
-#define GPIO6 0x0040 /* mask bits for GPIO lines */
-#define GPIO7 0x0080
-#define GPIO8 0x0100
-#define GPIO9 0x0200
+#define GPIO6 0x0040 /* mask bits for GPIO lines */
+#define GPIO7 0x0080
+#define GPIO8 0x0100
+#define GPIO9 0x0200
-#define STR_DATA GPIO6 /* radio TEA5757 pins and GPIO bits */
-#define STR_CLK GPIO7
-#define STR_WREN GPIO8
-#define STR_MOST GPIO9
+#define STR_DATA GPIO6 /* radio TEA5757 pins and GPIO bits */
+#define STR_CLK GPIO7
+#define STR_WREN GPIO8
+#define STR_MOST GPIO9
#define FREQ_LO 50*16000
#define FREQ_HI 150*16000
-#define FREQ_IF 171200 /* 10.7*16000 */
-#define FREQ_STEP 200 /* 12.5*16 */
+#define FREQ_IF 171200 /* 10.7*16000 */
+#define FREQ_STEP 200 /* 12.5*16 */
#define FREQ2BITS(x) ((((unsigned int)(x)+FREQ_IF+(FREQ_STEP<<1))\
/(FREQ_STEP<<2))<<2) /* (x==fmhz*16*1000) -> bits */
@@ -65,39 +61,58 @@ static int radio_nr = -1;
module_param(radio_nr, int, 0);
static int radio_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
+ unsigned int cmd, unsigned long arg);
+static int maestro_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void maestro_remove(struct pci_dev *pdev);
+
+static struct pci_device_id maestro_r_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_ESS1968),
+ .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8,
+ .class_mask = 0xffff00 },
+ { PCI_DEVICE(PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_ESS1978),
+ .class = PCI_CLASS_MULTIMEDIA_AUDIO << 8,
+ .class_mask = 0xffff00 },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, maestro_r_pci_tbl);
+
+static struct pci_driver maestro_r_driver = {
+ .name = "maestro_radio",
+ .id_table = maestro_r_pci_tbl,
+ .probe = maestro_probe,
+ .remove = __devexit_p(maestro_remove),
+};
static struct file_operations maestro_fops = {
.owner = THIS_MODULE,
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = radio_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
-static struct video_device maestro_radio=
-{
- .owner = THIS_MODULE,
+static struct video_device maestro_radio = {
.name = "Maestro radio",
.type = VID_TYPE_TUNER,
.hardware = VID_HARDWARE_SF16MI,
- .fops = &maestro_fops,
+ .fops = &maestro_fops,
};
-static struct radio_device
-{
- __u16 io, /* base of Maestro card radio io (GPIO_DATA)*/
+struct radio_device {
+ u16 io, /* base of Maestro card radio io (GPIO_DATA)*/
muted, /* VIDEO_AUDIO_MUTE */
stereo, /* VIDEO_TUNER_STEREO_ON */
tuned; /* signal strength (0 or 0xffff) */
- struct semaphore lock;
-} radio_unit = {0, 0, 0, 0, };
+ struct semaphore lock;
+};
-static __u32 radio_bits_get(struct radio_device *dev)
+static u32 radio_bits_get(struct radio_device *dev)
{
- register __u16 io=dev->io, l, rdata;
- register __u32 data=0;
- __u16 omask;
+ register u16 io=dev->io, l, rdata;
+ register u32 data=0;
+ u16 omask;
+
omask = inw(io + IO_MASK);
outw(~(STR_CLK | STR_WREN), io + IO_MASK);
outw(0, io);
@@ -120,17 +135,21 @@ static __u32 radio_bits_get(struct radio_device *dev)
data++;
udelay(2);
}
+
if(dev->muted)
outw(STR_WREN, io);
+
udelay(4);
outw(omask, io + IO_MASK);
+
return data & 0x3ffe;
}
-static void radio_bits_set(struct radio_device *dev, __u32 data)
+static void radio_bits_set(struct radio_device *dev, u32 data)
{
- register __u16 io=dev->io, l, bits;
- __u16 omask, odir;
+ register u16 io=dev->io, l, bits;
+ u16 omask, odir;
+
omask = inw(io + IO_MASK);
odir = (inw(io + IO_DIR) & ~STR_DATA) | (STR_CLK | STR_WREN);
outw(odir | STR_DATA, io + IO_DIR);
@@ -146,8 +165,10 @@ static void radio_bits_set(struct radio_device *dev, __u32 data)
outw(bits, io); /* LO level */
udelay(4);
}
+
if(!dev->muted)
outw(0, io);
+
udelay(4);
outw(omask, io + IO_MASK);
outw(odir, io + IO_DIR);
@@ -155,141 +176,103 @@ static void radio_bits_set(struct radio_device *dev, __u32 data)
}
static inline int radio_function(struct inode *inode, struct file *file,
- unsigned int cmd, void *arg)
+ unsigned int cmd, void *arg)
{
struct video_device *dev = video_devdata(file);
- struct radio_device *card=dev->priv;
-
- switch(cmd) {
- case VIDIOCGCAP: {
- struct video_capability *v = arg;
- memset(v,0,sizeof(*v));
- strcpy(v->name, "Maestro radio");
- v->type=VID_TYPE_TUNER;
- v->channels=v->audios=1;
- return 0;
- }
- case VIDIOCGTUNER: {
- struct video_tuner *v = arg;
- if(v->tuner)
- return -EINVAL;
- (void)radio_bits_get(card);
- v->flags = VIDEO_TUNER_LOW | card->stereo;
- v->signal = card->tuned;
- strcpy(v->name, "FM");
- v->rangelow = FREQ_LO;
- v->rangehigh = FREQ_HI;
- v->mode = VIDEO_MODE_AUTO;
- return 0;
- }
- case VIDIOCSTUNER: {
- struct video_tuner *v = arg;
- if(v->tuner!=0)
- return -EINVAL;
- return 0;
- }
- case VIDIOCGFREQ: {
- unsigned long *freq = arg;
- *freq = BITS2FREQ(radio_bits_get(card));
- return 0;
- }
- case VIDIOCSFREQ: {
- unsigned long *freq = arg;
- if (*freq<FREQ_LO || *freq>FREQ_HI )
- return -EINVAL;
- radio_bits_set(card, FREQ2BITS(*freq));
+ struct radio_device *card = video_get_drvdata(dev);
+
+ switch (cmd) {
+ case VIDIOCGCAP: {
+ struct video_capability *v = arg;
+ memset(v, 0, sizeof(*v));
+ strcpy(v->name, "Maestro radio");
+ v->type = VID_TYPE_TUNER;
+ v->channels = v->audios = 1;
+ return 0;
+ } case VIDIOCGTUNER: {
+ struct video_tuner *v = arg;
+ if (v->tuner)
+ return -EINVAL;
+ (void)radio_bits_get(card);
+ v->flags = VIDEO_TUNER_LOW | card->stereo;
+ v->signal = card->tuned;
+ strcpy(v->name, "FM");
+ v->rangelow = FREQ_LO;
+ v->rangehigh = FREQ_HI;
+ v->mode = VIDEO_MODE_AUTO;
+ return 0;
+ } case VIDIOCSTUNER: {
+ struct video_tuner *v = arg;
+ if (v->tuner != 0)
+ return -EINVAL;
+ return 0;
+ } case VIDIOCGFREQ: {
+ unsigned long *freq = arg;
+ *freq = BITS2FREQ(radio_bits_get(card));
+ return 0;
+ } case VIDIOCSFREQ: {
+ unsigned long *freq = arg;
+ if (*freq < FREQ_LO || *freq > FREQ_HI)
+ return -EINVAL;
+ radio_bits_set(card, FREQ2BITS(*freq));
+ return 0;
+ } case VIDIOCGAUDIO: {
+ struct video_audio *v = arg;
+ memset(v, 0, sizeof(*v));
+ strcpy(v->name, "Radio");
+ v->flags = VIDEO_AUDIO_MUTABLE | card->muted;
+ v->mode = VIDEO_SOUND_STEREO;
+ return 0;
+ } case VIDIOCSAUDIO: {
+ struct video_audio *v = arg;
+ if (v->audio)
+ return -EINVAL;
+ {
+ register u16 io = card->io;
+ register u16 omask = inw(io + IO_MASK);
+ outw(~STR_WREN, io + IO_MASK);
+ outw((card->muted = v->flags & VIDEO_AUDIO_MUTE) ?
+ STR_WREN : 0, io);
+ udelay(4);
+ outw(omask, io + IO_MASK);
+ msleep(125);
return 0;
}
- case VIDIOCGAUDIO: {
- struct video_audio *v = arg;
- memset(v,0,sizeof(*v));
- strcpy(v->name, "Radio");
- v->flags=VIDEO_AUDIO_MUTABLE | card->muted;
- v->mode=VIDEO_SOUND_STEREO;
- return 0;
- }
- case VIDIOCSAUDIO: {
- struct video_audio *v = arg;
- if(v->audio)
- return -EINVAL;
- {
- register __u16 io=card->io;
- register __u16 omask = inw(io + IO_MASK);
- outw(~STR_WREN, io + IO_MASK);
- outw((card->muted = v->flags & VIDEO_AUDIO_MUTE)
- ? STR_WREN : 0, io);
- udelay(4);
- outw(omask, io + IO_MASK);
- msleep(125);
- return 0;
- }
- }
- case VIDIOCGUNIT: {
- struct video_unit *v = arg;
- v->video=VIDEO_NO_UNIT;
- v->vbi=VIDEO_NO_UNIT;
- v->radio=dev->minor;
- v->audio=0;
- v->teletext=VIDEO_NO_UNIT;
- return 0;
- }
- default: return -ENOIOCTLCMD;
+ } case VIDIOCGUNIT: {
+ struct video_unit *v = arg;
+ v->video = VIDEO_NO_UNIT;
+ v->vbi = VIDEO_NO_UNIT;
+ v->radio = dev->minor;
+ v->audio = 0;
+ v->teletext = VIDEO_NO_UNIT;
+ return 0;
+ } default:
+ return -ENOIOCTLCMD;
}
}
static int radio_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg)
{
struct video_device *dev = video_devdata(file);
- struct radio_device *card=dev->priv;
+ struct radio_device *card = video_get_drvdata(dev);
int ret;
down(&card->lock);
ret = video_usercopy(inode, file, cmd, arg, radio_function);
up(&card->lock);
- return ret;
-}
-static __u16 radio_install(struct pci_dev *pcidev);
-
-MODULE_AUTHOR("Adam Tlalka, atlka@pg.gda.pl");
-MODULE_DESCRIPTION("Radio driver for the Maestro PCI sound card radio.");
-MODULE_LICENSE("GPL");
-
-static void __exit maestro_radio_exit(void)
-{
- video_unregister_device(&maestro_radio);
+ return ret;
}
-static int __init maestro_radio_init(void)
+static u16 __devinit radio_power_on(struct radio_device *dev)
{
- register __u16 found=0;
- struct pci_dev *pcidev = NULL;
- while(!found && (pcidev = pci_find_device(PCI_VENDOR_ESS,
- PCI_DEVICE_ID_ESS_ESS1968,
- pcidev)))
- found |= radio_install(pcidev);
- while(!found && (pcidev = pci_find_device(PCI_VENDOR_ESS,
- PCI_DEVICE_ID_ESS_ESS1978,
- pcidev)))
- found |= radio_install(pcidev);
- if(!found) {
- printk(KERN_INFO "radio-maestro: no devices found.\n");
- return -ENODEV;
- }
- return 0;
-}
+ register u16 io = dev->io;
+ register u32 ofreq;
+ u16 omask, odir;
-module_init(maestro_radio_init);
-module_exit(maestro_radio_exit);
-
-static inline __u16 radio_power_on(struct radio_device *dev)
-{
- register __u16 io=dev->io;
- register __u32 ofreq;
- __u16 omask, odir;
omask = inw(io + IO_MASK);
- odir = (inw(io + IO_DIR) & ~STR_DATA) | (STR_CLK | STR_WREN);
+ odir = (inw(io + IO_DIR) & ~STR_DATA) | (STR_CLK | STR_WREN);
outw(odir & ~STR_WREN, io + IO_DIR);
dev->muted = inw(io) & STR_WREN ? 0 : VIDEO_AUDIO_MUTE;
outw(odir, io + IO_DIR);
@@ -298,35 +281,101 @@ static inline __u16 radio_power_on(struct radio_device *dev)
udelay(16);
outw(omask, io + IO_MASK);
ofreq = radio_bits_get(dev);
- if((ofreq<FREQ2BITS(FREQ_LO)) || (ofreq>FREQ2BITS(FREQ_HI)))
+
+ if ((ofreq < FREQ2BITS(FREQ_LO)) || (ofreq > FREQ2BITS(FREQ_HI)))
ofreq = FREQ2BITS(FREQ_LO);
radio_bits_set(dev, ofreq);
+
return (ofreq == radio_bits_get(dev));
}
-static __u16 radio_install(struct pci_dev *pcidev)
+static int __devinit maestro_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- if(((pcidev->class >> 8) & 0xffff) != PCI_CLASS_MULTIMEDIA_AUDIO)
- return 0;
-
- radio_unit.io = pcidev->resource[0].start + GPIO_DATA;
- maestro_radio.priv = &radio_unit;
- init_MUTEX(&radio_unit.lock);
-
- if(radio_power_on(&radio_unit)) {
- if(video_register_device(&maestro_radio, VFL_TYPE_RADIO, radio_nr)==-1) {
- printk("radio-maestro: can't register device!");
- return 0;
- }
- printk(KERN_INFO "radio-maestro: version "
- DRIVER_VERSION
- " time "
- __TIME__ " "
- __DATE__
- "\n");
- printk(KERN_INFO "radio-maestro: radio chip initialized\n");
- return 1;
- } else
- return 0;
+ struct radio_device *radio_unit;
+ struct video_device *maestro_radio_inst;
+ int retval;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "enabling pci device failed!\n");
+ goto err;
+ }
+
+ retval = -ENOMEM;
+
+ radio_unit = kzalloc(sizeof(*radio_unit), GFP_KERNEL);
+ if (radio_unit == NULL) {
+ dev_err(&pdev->dev, "not enough memory\n");
+ goto err;
+ }
+
+ radio_unit->io = pci_resource_start(pdev, 0) + GPIO_DATA;
+ init_MUTEX(&radio_unit->lock);
+
+ maestro_radio_inst = video_device_alloc();
+ if (maestro_radio_inst == NULL) {
+ dev_err(&pdev->dev, "not enough memory\n");
+ goto errfr;
+ }
+
+ memcpy(maestro_radio_inst, &maestro_radio, sizeof(maestro_radio));
+ video_set_drvdata(maestro_radio_inst, radio_unit);
+ pci_set_drvdata(pdev, maestro_radio_inst);
+
+ retval = video_register_device(maestro_radio_inst, VFL_TYPE_RADIO,
+ radio_nr);
+ if (retval) {
+ printk(KERN_ERR "can't register video device!\n");
+ goto errfr1;
+ }
+
+ if (!radio_power_on(radio_unit)) {
+ retval = -EIO;
+ goto errunr;
+ }
+
+ dev_info(&pdev->dev, "version " DRIVER_VERSION " time " __TIME__ " "
+ __DATE__ "\n");
+ dev_info(&pdev->dev, "radio chip initialized\n");
+
+ return 0;
+errunr:
+ video_unregister_device(maestro_radio_inst);
+errfr1:
+ kfree(maestro_radio_inst);
+errfr:
+ kfree(radio_unit);
+err:
+ return retval;
+
+}
+
+static void __devexit maestro_remove(struct pci_dev *pdev)
+{
+ struct video_device *vdev = pci_get_drvdata(pdev);
+
+ video_unregister_device(vdev);
}
+static int __init maestro_radio_init(void)
+{
+ int retval = pci_register_driver(&maestro_r_driver);
+
+ if (retval)
+ printk(KERN_ERR "error during registration pci driver\n");
+
+ return retval;
+}
+
+static void __exit maestro_radio_exit(void)
+{
+ pci_unregister_driver(&maestro_r_driver);
+}
+
+module_init(maestro_radio_init);
+module_exit(maestro_radio_exit);
+
+MODULE_AUTHOR("Adam Tlalka, atlka@pg.gda.pl");
+MODULE_DESCRIPTION("Radio driver for the Maestro PCI sound card radio.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 02d39a50d5e..c975ddd86cd 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -80,6 +80,7 @@ static struct file_operations maxiradio_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = radio_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
static struct video_device maxiradio_radio =
@@ -337,7 +338,7 @@ static struct pci_driver maxiradio_driver = {
static int __init maxiradio_radio_init(void)
{
- return pci_module_init(&maxiradio_driver);
+ return pci_register_driver(&maxiradio_driver);
}
static void __exit maxiradio_radio_exit(void)
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index b2256d675b4..28a47c9e7a8 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -199,6 +199,7 @@ static struct file_operations rtrack2_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = rt_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 6f03ce4dd7b..0229f792a05 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -225,6 +225,7 @@ static struct file_operations fmi_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = fmi_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 71971e9bb34..099ffb3b9c7 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -48,7 +48,7 @@ static int io = 0x384;
static int radio_nr = -1;
/* hw precision is 12.5 kHz
- * It is only usefull to give freq in intervall of 200 (=0.0125Mhz),
+ * It is only useful to give freq in intervall of 200 (=0.0125Mhz),
* other bits will be truncated
*/
#define RSF16_ENCODE(x) ((x)/200+856)
@@ -356,6 +356,7 @@ static struct file_operations fmr2_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = fmr2_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index b03573c6840..fcfde2e4f19 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -276,6 +276,7 @@ static struct file_operations terratec_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = tt_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index b300bedf7c7..5a099a50d4d 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -255,6 +255,7 @@ static struct file_operations trust_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = tr_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index f304f3c1476..8ac9a8ef909 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -261,6 +261,7 @@ static struct file_operations typhoon_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = typhoon_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index 4c6d6fb4903..d590e80c922 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -313,6 +313,7 @@ static struct file_operations zoltrix_fops =
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = zol_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index fc87efc5049..d82c8a30ba4 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -7,6 +7,15 @@ menu "Video For Linux"
comment "Video Adapters"
+config VIDEO_ADV_DEBUG
+ bool "Enable advanced debug functionality"
+ depends on VIDEO_DEV
+ default n
+ ---help---
+ Say Y here to enable advanced debugging functionality on some
+ V4L devices.
+ In doubt, say N.
+
config VIDEO_BT848
tristate "BT848 Video For Linux"
depends on VIDEO_DEV && PCI && I2C
@@ -174,7 +183,7 @@ config VIDEO_STRADIS
help
Say Y here to enable support for the Stradis 4:2:2 MPEG-2 video
driver for PCI. There is a product page at
- <http://www.stradis.com/decoder.html>.
+ <http://www.stradis.com/>.
config VIDEO_ZORAN
tristate "Zoran ZR36057/36067 Video For Linux"
@@ -342,6 +351,6 @@ config VIDEO_DECODER
depends on VIDEO_DEV && I2C && EXPERIMENTAL
---help---
Say Y here to compile drivers for SAA7115, SAA7127 and CX25840
- video decoders.
+ video decoders.
endmenu
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 82060f9909d..dd24896906f 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -3,15 +3,19 @@
#
bttv-objs := bttv-driver.o bttv-cards.o bttv-if.o \
- bttv-risc.o bttv-vbi.o bttv-i2c.o bttv-gpio.o
+ bttv-risc.o bttv-vbi.o bttv-i2c.o bttv-gpio.o \
+ bttv-input.o
zoran-objs := zr36120.o zr36120_i2c.o zr36120_mem.o
zr36067-objs := zoran_procfs.o zoran_device.o \
zoran_driver.o zoran_card.o
tuner-objs := tuner-core.o tuner-simple.o mt20xx.o tda8290.o tea5767.o
-obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o
+
+msp3400-objs := msp3400-driver.o msp3400-kthreads.o
+
+obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o compat_ioctl32.o
obj-$(CONFIG_VIDEO_BT848) += bttv.o msp3400.o tvaudio.o \
- tda7432.o tda9875.o ir-kbd-i2c.o ir-kbd-gpio.o
+ tda7432.o tda9875.o ir-kbd-i2c.o
obj-$(CONFIG_SOUND_TVMIXER) += tvmixer.o
obj-$(CONFIG_VIDEO_ZR36120) += zoran.o
diff --git a/drivers/media/video/adv7170.c b/drivers/media/video/adv7170.c
index 1ca2b67aedf..4ce07ae62da 100644
--- a/drivers/media/video/adv7170.c
+++ b/drivers/media/video/adv7170.c
@@ -413,14 +413,12 @@ adv7170_detect_client (struct i2c_adapter *adapter,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_adv7170;
- client->flags = I2C_CLIENT_ALLOW_USE;
if ((client->addr == I2C_ADV7170 >> 1) ||
(client->addr == (I2C_ADV7170 >> 1) + 1)) {
dname = adv7170_name;
@@ -434,12 +432,11 @@ adv7170_detect_client (struct i2c_adapter *adapter,
}
strlcpy(I2C_NAME(client), dname, sizeof(I2C_NAME(client)));
- encoder = kmalloc(sizeof(struct adv7170), GFP_KERNEL);
+ encoder = kzalloc(sizeof(struct adv7170), GFP_KERNEL);
if (encoder == NULL) {
kfree(client);
return -ENOMEM;
}
- memset(encoder, 0, sizeof(struct adv7170));
encoder->norm = VIDEO_MODE_NTSC;
encoder->input = 0;
encoder->enable = 1;
@@ -498,11 +495,11 @@ adv7170_detach_client (struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver i2c_driver_adv7170 = {
- .owner = THIS_MODULE,
- .name = "adv7170", /* name */
+ .driver = {
+ .name = "adv7170", /* name */
+ },
.id = I2C_DRIVERID_ADV7170,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = adv7170_attach_adapter,
.detach_client = adv7170_detach_client,
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index 173bca1e029..4e218f22b21 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -463,14 +463,12 @@ adv7175_detect_client (struct i2c_adapter *adapter,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_adv7175;
- client->flags = I2C_CLIENT_ALLOW_USE;
if ((client->addr == I2C_ADV7175 >> 1) ||
(client->addr == (I2C_ADV7175 >> 1) + 1)) {
dname = adv7175_name;
@@ -484,12 +482,11 @@ adv7175_detect_client (struct i2c_adapter *adapter,
}
strlcpy(I2C_NAME(client), dname, sizeof(I2C_NAME(client)));
- encoder = kmalloc(sizeof(struct adv7175), GFP_KERNEL);
+ encoder = kzalloc(sizeof(struct adv7175), GFP_KERNEL);
if (encoder == NULL) {
kfree(client);
return -ENOMEM;
}
- memset(encoder, 0, sizeof(struct adv7175));
encoder->norm = VIDEO_MODE_PAL;
encoder->input = 0;
encoder->enable = 1;
@@ -548,11 +545,11 @@ adv7175_detach_client (struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver i2c_driver_adv7175 = {
- .owner = THIS_MODULE,
- .name = "adv7175", /* name */
+ .driver = {
+ .name = "adv7175", /* name */
+ },
.id = I2C_DRIVERID_ADV7175,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = adv7175_attach_adapter,
.detach_client = adv7175_detach_client,
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index 881cdcb1875..7d5a068353f 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -749,6 +749,7 @@ static struct file_operations ar_fops = {
.release = video_exclusive_release,
.read = ar_read,
.ioctl = ar_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/video/bt819.c b/drivers/media/video/bt819.c
index 3ee0afca76a..d6447791d0e 100644
--- a/drivers/media/video/bt819.c
+++ b/drivers/media/video/bt819.c
@@ -528,22 +528,18 @@ bt819_detect_client (struct i2c_adapter *adapter,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_bt819;
- client->flags = I2C_CLIENT_ALLOW_USE;
- decoder = kmalloc(sizeof(struct bt819), GFP_KERNEL);
+ decoder = kzalloc(sizeof(struct bt819), GFP_KERNEL);
if (decoder == NULL) {
kfree(client);
return -ENOMEM;
}
-
- memset(decoder, 0, sizeof(struct bt819));
decoder->norm = VIDEO_MODE_NTSC;
decoder->input = 0;
decoder->enable = 1;
@@ -623,11 +619,11 @@ bt819_detach_client (struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver i2c_driver_bt819 = {
- .owner = THIS_MODULE,
- .name = "bt819",
+ .driver = {
+ .name = "bt819",
+ },
.id = I2C_DRIVERID_BT819,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = bt819_attach_adapter,
.detach_client = bt819_detach_client,
diff --git a/drivers/media/video/bt832.c b/drivers/media/video/bt832.c
index 3ca1d768bfd..07c78f1f7a4 100644
--- a/drivers/media/video/bt832.c
+++ b/drivers/media/video/bt832.c
@@ -30,8 +30,9 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
-
#include <media/audiochip.h>
+#include <media/v4l2-common.h>
+
#include "bttv.h"
#include "bt832.h"
@@ -42,9 +43,10 @@ static unsigned short normal_i2c[] = { I2C_BT832_ALT1>>1, I2C_BT832_ALT2>>1,
I2C_CLIENT_END };
I2C_CLIENT_INSMOD;
-/* ---------------------------------------------------------------------- */
+int debug = 0; /* debug output */
+module_param(debug, int, 0644);
-#define dprintk if (debug) printk
+/* ---------------------------------------------------------------------- */
static int bt832_detach(struct i2c_client *client);
@@ -61,23 +63,26 @@ int bt832_hexdump(struct i2c_client *i2c_client_s, unsigned char *buf)
int i,rc;
buf[0]=0x80; // start at register 0 with auto-increment
if (1 != (rc = i2c_master_send(i2c_client_s,buf,1)))
- printk("bt832: i2c i/o error: rc == %d (should be 1)\n",rc);
+ v4l_err(i2c_client_s,"i2c i/o error: rc == %d (should be 1)\n",rc);
for(i=0;i<65;i++)
buf[i]=0;
if (65 != (rc=i2c_master_recv(i2c_client_s,buf,65)))
- printk("bt832: i2c i/o error: rc == %d (should be 65)\n",rc);
+ v4l_err(i2c_client_s,"i2c i/o error: rc == %d (should be 65)\n",rc);
// Note: On READ the first byte is the current index
// (e.g. 0x80, what we just wrote)
- if(1) {
+ if(debug>1) {
int i;
- printk("BT832 hexdump:\n");
+ v4l_dbg(2, debug,i2c_client_s,"hexdump:");
for(i=1;i<65;i++) {
if(i!=1) {
- if(((i-1)%8)==0) printk(" ");
- if(((i-1)%16)==0) printk("\n");
+ if(((i-1)%8)==0) printk(" ");
+ if(((i-1)%16)==0) {
+ printk("\n");
+ v4l_dbg(2, debug,i2c_client_s,"hexdump:");
+ }
}
printk(" %02x",buf[i]);
}
@@ -96,56 +101,56 @@ int bt832_init(struct i2c_client *i2c_client_s)
bt832_hexdump(i2c_client_s,buf);
if(buf[0x40] != 0x31) {
- printk("bt832: this i2c chip is no bt832 (id=%02x). Detaching.\n",buf[0x40]);
+ v4l_err(i2c_client_s,"This i2c chip is no bt832 (id=%02x). Detaching.\n",buf[0x40]);
kfree(buf);
return 0;
}
- printk("Write 0 tp VPSTATUS\n");
+ v4l_err(i2c_client_s,"Write 0 tp VPSTATUS\n");
buf[0]=BT832_VP_STATUS; // Reg.52
buf[1]= 0x00;
if (2 != (rc = i2c_master_send(i2c_client_s,buf,2)))
- printk("bt832: i2c i/o error VPS: rc == %d (should be 2)\n",rc);
+ v4l_err(i2c_client_s,"i2c i/o error VPS: rc == %d (should be 2)\n",rc);
bt832_hexdump(i2c_client_s,buf);
// Leave low power mode:
- printk("Bt832: leave low power mode.\n");
+ v4l_err(i2c_client_s,"leave low power mode.\n");
buf[0]=BT832_CAM_SETUP0; //0x39 57
buf[1]=0x08;
if (2 != (rc = i2c_master_send(i2c_client_s,buf,2)))
- printk("bt832: i2c i/o error LLPM: rc == %d (should be 2)\n",rc);
+ v4l_err(i2c_client_s,"i2c i/o error LLPM: rc == %d (should be 2)\n",rc);
bt832_hexdump(i2c_client_s,buf);
- printk("Write 0 tp VPSTATUS\n");
+ v4l_info(i2c_client_s,"Write 0 tp VPSTATUS\n");
buf[0]=BT832_VP_STATUS; // Reg.52
buf[1]= 0x00;
if (2 != (rc = i2c_master_send(i2c_client_s,buf,2)))
- printk("bt832: i2c i/o error VPS: rc == %d (should be 2)\n",rc);
+ v4l_err(i2c_client_s,"i2c i/o error VPS: rc == %d (should be 2)\n",rc);
bt832_hexdump(i2c_client_s,buf);
// Enable Output
- printk("Enable Output\n");
+ v4l_info(i2c_client_s,"Enable Output\n");
buf[0]=BT832_VP_CONTROL1; // Reg.40
buf[1]= 0x27 & (~0x01); // Default | !skip
if (2 != (rc = i2c_master_send(i2c_client_s,buf,2)))
- printk("bt832: i2c i/o error EO: rc == %d (should be 2)\n",rc);
+ v4l_err(i2c_client_s,"i2c i/o error EO: rc == %d (should be 2)\n",rc);
bt832_hexdump(i2c_client_s,buf);
// for testing (even works when no camera attached)
- printk("bt832: *** Generate NTSC M Bars *****\n");
+ v4l_info(i2c_client_s,"*** Generate NTSC M Bars *****\n");
buf[0]=BT832_VP_TESTCONTROL0; // Reg. 42
buf[1]=3; // Generate NTSC System M bars, Generate Frame timing internally
if (2 != (rc = i2c_master_send(i2c_client_s,buf,2)))
- printk("bt832: i2c i/o error MBAR: rc == %d (should be 2)\n",rc);
+ v4l_info(i2c_client_s,"i2c i/o error MBAR: rc == %d (should be 2)\n",rc);
- printk("Bt832: Camera Present: %s\n",
+ v4l_info(i2c_client_s,"Camera Present: %s\n",
(buf[1+BT832_CAM_STATUS] & BT832_56_CAMERA_PRESENT) ? "yes":"no");
bt832_hexdump(i2c_client_s,buf);
@@ -159,20 +164,18 @@ static int bt832_attach(struct i2c_adapter *adap, int addr, int kind)
{
struct bt832 *t;
- printk("bt832_attach\n");
-
client_template.adapter = adap;
client_template.addr = addr;
- printk("bt832: chip found @ 0x%x\n", addr<<1);
-
- if (NULL == (t = kmalloc(sizeof(*t), GFP_KERNEL)))
+ if (NULL == (t = kzalloc(sizeof(*t), GFP_KERNEL)))
return -ENOMEM;
- memset(t,0,sizeof(*t));
t->client = client_template;
i2c_set_clientdata(&t->client, t);
i2c_attach_client(&t->client);
+ v4l_info(&t->client,"chip found @ 0x%x\n", addr<<1);
+
+
if(! bt832_init(&t->client)) {
bt832_detach(&t->client);
return -1;
@@ -183,13 +186,8 @@ static int bt832_attach(struct i2c_adapter *adap, int addr, int kind)
static int bt832_probe(struct i2c_adapter *adap)
{
-#ifdef I2C_CLASS_TV_ANALOG
if (adap->class & I2C_CLASS_TV_ANALOG)
return i2c_probe(adap, &addr_data, bt832_attach);
-#else
- if (adap->id == I2C_HW_B_BT848)
- return i2c_probe(adap, &addr_data, bt832_attach);
-#endif
return 0;
}
@@ -197,7 +195,7 @@ static int bt832_detach(struct i2c_client *client)
{
struct bt832 *t = i2c_get_clientdata(client);
- printk("bt832: detach.\n");
+ v4l_info(&t->client,"dettach\n");
i2c_detach_client(client);
kfree(t);
return 0;
@@ -208,7 +206,8 @@ bt832_command(struct i2c_client *client, unsigned int cmd, void *arg)
{
struct bt832 *t = i2c_get_clientdata(client);
- printk("bt832: command %x\n",cmd);
+ if (debug>1)
+ v4l_i2c_print_ioctl(&t->client,cmd);
switch (cmd) {
case BT832_HEXDUMP: {
@@ -219,7 +218,7 @@ bt832_command(struct i2c_client *client, unsigned int cmd, void *arg)
}
break;
case BT832_REATTACH:
- printk("bt832: re-attach\n");
+ v4l_info(&t->client,"re-attach\n");
i2c_del_driver(&driver);
i2c_add_driver(&driver);
break;
@@ -230,10 +229,10 @@ bt832_command(struct i2c_client *client, unsigned int cmd, void *arg)
/* ----------------------------------------------------------------------- */
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "i2c bt832 driver",
- .id = -1, /* FIXME */
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "bt832",
+ },
+ .id = 0, /* FIXME */
.attach_adapter = bt832_probe,
.detach_client = bt832_detach,
.command = bt832_command,
@@ -241,7 +240,6 @@ static struct i2c_driver driver = {
static struct i2c_client client_template =
{
.name = "bt832",
- .flags = I2C_CLIENT_ALLOW_USE,
.driver = &driver,
};
diff --git a/drivers/media/video/bt856.c b/drivers/media/video/bt856.c
index 8eb871d0e85..909b593530e 100644
--- a/drivers/media/video/bt856.c
+++ b/drivers/media/video/bt856.c
@@ -316,22 +316,19 @@ bt856_detect_client (struct i2c_adapter *adapter,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_bt856;
- client->flags = I2C_CLIENT_ALLOW_USE;
strlcpy(I2C_NAME(client), "bt856", sizeof(I2C_NAME(client)));
- encoder = kmalloc(sizeof(struct bt856), GFP_KERNEL);
+ encoder = kzalloc(sizeof(struct bt856), GFP_KERNEL);
if (encoder == NULL) {
kfree(client);
return -ENOMEM;
}
- memset(encoder, 0, sizeof(struct bt856));
encoder->norm = VIDEO_MODE_NTSC;
encoder->enable = 1;
i2c_set_clientdata(client, encoder);
@@ -405,11 +402,11 @@ bt856_detach_client (struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver i2c_driver_bt856 = {
- .owner = THIS_MODULE,
- .name = "bt856",
+ .driver = {
+ .name = "bt856",
+ },
.id = I2C_DRIVERID_BT856,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = bt856_attach_adapter,
.detach_client = bt856_detach_client,
diff --git a/drivers/media/video/bttv-cards.c b/drivers/media/video/bttv-cards.c
index 012be639aa1..65323e78d5f 100644
--- a/drivers/media/video/bttv-cards.c
+++ b/drivers/media/video/bttv-cards.c
@@ -38,6 +38,7 @@
#include <asm/io.h>
#include "bttvp.h"
+#include <media/v4l2-common.h>
/* fwd decl */
static void boot_msp34xx(struct bttv *btv, int pin);
@@ -292,6 +293,9 @@ static struct CARD {
/* likely broken, vendor id doesn't match the other magic views ...
* { 0xa0fca04f, BTTV_BOARD_MAGICTVIEW063, "Guillemot Maxi TV Video 3" }, */
+ /* Duplicate PCI ID, reconfigure for this board during the eeprom read.
+ * { 0x13eb0070, BTTV_BOARD_HAUPPAUGE_IMPACTVCB, "Hauppauge ImpactVCB" }, */
+
/* DVB cards (using pci function .1 for mpeg data xfer) */
{ 0x01010071, BTTV_BOARD_NEBULA_DIGITV, "Nebula Electronics DigiTV" },
{ 0x07611461, BTTV_BOARD_AVDVBT_761, "AverMedia AverTV DVB-T 761" },
@@ -317,7 +321,7 @@ struct tvcard bttv_tvcards[] = {
.audio_inputs = 1,
.tuner = 0,
.svhs = 2,
- .muxsel = { 2, 3, 1, 0},
+ .muxsel = { 2, 3, 1, 0 },
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -329,8 +333,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 15,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 2, 0, 0, 0, 10},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 2, 0, 0, 0, 10 },
.needs_tvaudio = 1,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -343,8 +347,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 7,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0, 1, 2, 3, 4},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0, 1, 2, 3, 4 },
.needs_tvaudio = 1,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -357,8 +361,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 7,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 4, 0, 2, 3, 1},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 4, 0, 2, 3, 1 },
.no_msp34xx = 1,
.needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_NTSC,
@@ -376,7 +380,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = -1,
.svhs = 2,
.gpiomask = 0,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 0 },
.needs_tvaudio = 0,
.tuner_type = 4,
@@ -390,8 +394,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 3,
- .muxsel = { 2, 3, 1, 0},
- .audiomux = { 0, 1, 0, 1, 3},
+ .muxsel = { 2, 3, 1, 0 },
+ .audiomux = { 0, 1, 0, 1, 3 },
.needs_tvaudio = 1,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -403,9 +407,9 @@ struct tvcard bttv_tvcards[] = {
.audio_inputs = 1,
.tuner = 0,
.svhs = 3,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.gpiomask = 0x0f,
- .audiomux = { 0x0c, 0x04, 0x08, 0x04, 0},
+ .audiomux = { 0x0c, 0x04, 0x08, 0x04, 0 },
/* 0x04 for some cards ?? */
.needs_tvaudio = 1,
.tuner_type = -1,
@@ -421,8 +425,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = -1,
.svhs = 3,
.gpiomask = 0,
- .muxsel = { 2, 3, 1, 0, 0},
- .audiomux = {0 },
+ .muxsel = { 2, 3, 1, 0, 0 },
+ .audiomux = { 0 },
.needs_tvaudio = 1,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -437,8 +441,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0xc00,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0, 0xc00, 0x800, 0x400, 0xc00, 0},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0, 0xc00, 0x800, 0x400, 0xc00, 0 },
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = -1,
@@ -452,8 +456,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 3,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 1, 1, 2, 3, 0},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 1, 1, 2, 3, 0 },
.needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_PAL,
@@ -467,8 +471,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x0f, /* old: 7 */
- .muxsel = { 2, 0, 1, 1},
- .audiomux = { 0, 1, 2, 3, 4},
+ .muxsel = { 2, 0, 1, 1 },
+ .audiomux = { 0, 1, 2, 3, 4 },
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = -1,
@@ -482,8 +486,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x3014f,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0x20001,0x10001, 0, 0,10},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0x20001,0x10001, 0, 0,10 },
.needs_tvaudio = 1,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -498,8 +502,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 15,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 13, 14, 11, 7, 0, 0},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 13, 14, 11, 7, 0, 0 },
.needs_tvaudio = 1,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -512,8 +516,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 15,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 13, 14, 11, 7, 0, 0},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 13, 14, 11, 7, 0, 0 },
.needs_tvaudio = 1,
.msp34xx_alt = 1,
.pll = PLL_28,
@@ -530,8 +534,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 7,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0, 2, 1, 3, 4}, /* old: { 0, 1, 2, 3, 4} */
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0, 2, 1, 3, 4 }, /* old: {0, 1, 2, 3, 4} */
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = -1,
@@ -545,8 +549,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 15,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = {0 , 0, 1 , 0, 10},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0 , 0, 1 , 0, 10 },
.needs_tvaudio = 1,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -561,7 +565,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x01fe00,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
#if 0
/* old */
.audiomux = { 0x01c000, 0, 0x018000, 0x014000, 0x002000, 0 },
@@ -580,8 +584,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x8300f8,
- .muxsel = { 2, 3, 1, 1,0},
- .audiomux = { 0x4fa007,0xcfa007,0xcfa007,0xcfa007,0xcfa007,0xcfa007},
+ .muxsel = { 2, 3, 1, 1,0 },
+ .audiomux = { 0x4fa007,0xcfa007,0xcfa007,0xcfa007,0xcfa007,0xcfa007 },
.needs_tvaudio = 1,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -596,8 +600,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0,
- .muxsel = {2, 3, 1, 1},
- .audiomux = {1, 0, 0, 0, 0},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 1, 0, 0, 0, 0 },
.needs_tvaudio = 1,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -610,7 +614,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = -1,
.svhs = -1,
.gpiomask = 0x8dff00,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 0 },
.no_msp34xx = 1,
.tuner_type = -1,
@@ -625,7 +629,7 @@ struct tvcard bttv_tvcards[] = {
.audio_inputs = 3,
.tuner = 0,
.svhs = 2,
- .muxsel = {2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -637,8 +641,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x1800,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0, 0x800, 0x1000, 0x1000, 0x1800},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0, 0x800, 0x1000, 0x1000, 0x1800 },
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
@@ -651,8 +655,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0xc00,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = {0, 1, 0x800, 0x400, 0xc00, 0},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0, 1, 0x800, 0x400, 0xc00, 0 },
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = -1,
@@ -684,8 +688,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0xe00,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = {0x400, 0x400, 0x400, 0x400, 0xc00},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = {0x400, 0x400, 0x400, 0x400, 0xc00 },
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = -1,
@@ -700,8 +704,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x1f0fff,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0x20000, 0x30000, 0x10000, 0, 0x40000},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0x20000, 0x30000, 0x10000, 0, 0x40000 },
.needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -715,8 +719,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 3,
.gpiomask = 7,
- .muxsel = { 2, 0, 1, 1},
- .audiomux = { 0, 1, 2, 3, 4},
+ .muxsel = { 2, 0, 1, 1 },
+ .audiomux = { 0, 1, 2, 3, 4 },
.needs_tvaudio = 1,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -729,8 +733,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x1800,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0, 0x800, 0x1000, 0x1000, 0x1800},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0, 0x800, 0x1000, 0x1000, 0x1800 },
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_SECAM,
.tuner_addr = ADDR_UNSET,
@@ -745,8 +749,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x1f0fff,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0x20000, 0x30000, 0x10000, 0x00000, 0x40000},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0x20000, 0x30000, 0x10000, 0x00000, 0x40000 },
.needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -808,7 +812,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x1800, /* 0x8dfe00 */
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 0, 0x0800, 0x1000, 0x1000, 0x1800, 0 },
.pll = PLL_28,
.tuner_type = -1,
@@ -822,7 +826,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 3,
.gpiomask = 1,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 1, 0, 0, 0, 0 },
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -838,7 +842,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = -1,
.svhs = 2,
.gpiomask = 0,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 0 },
.needs_tvaudio = 0,
.tuner_type = 4,
@@ -852,8 +856,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0xffff00,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0x500, 0, 0x300, 0x900, 0x900},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0x500, 0, 0x300, 0x900, 0x900 },
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -866,7 +870,7 @@ struct tvcard bttv_tvcards[] = {
.audio_inputs = 1,
.tuner = 0,
.svhs = 2,
- .muxsel = { 2, 3, 1, 1, 0}, /* TV, CVid, SVid, CVid over SVid connector */
+ .muxsel = { 2, 3, 1, 1, 0 }, /* TV, CVid, SVid, CVid over SVid connector */
#if 0
.gpiomask = 0xc33000,
.audiomux = { 0x422000,0x1000,0x0000,0x620000,0x800000 },
@@ -902,8 +906,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x1800,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0, 0x800, 0x1000, 0x1000, 0x1800},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0, 0x800, 0x1000, 0x1000, 0x1800 },
.pll = PLL_28,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -918,7 +922,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x1800,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 0, 0x800, 0x1000, 0x1000, 0x1800, 0 },
.pll = PLL_28,
.tuner_type = -1,
@@ -948,7 +952,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x551e00,
- .muxsel = { 2, 3, 1, 0},
+ .muxsel = { 2, 3, 1, 0 },
.audiomux = { 0x551400, 0x551200, 0, 0, 0x551c00, 0x551200 },
.needs_tvaudio = 1,
.pll = PLL_28,
@@ -964,8 +968,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x03000F,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 2, 0xd0001, 0, 0, 1},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 2, 0xd0001, 0, 0, 1 },
.needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = -1,
@@ -981,8 +985,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 7,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 4, 0, 2, 3, 1},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 4, 0, 2, 3, 1 },
.no_msp34xx = 1,
.needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_NTSC,
@@ -998,8 +1002,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 15,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 13, 4, 11, 7, 0, 0},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 13, 4, 11, 7, 0, 0 },
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = -1,
@@ -1031,8 +1035,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0xe00b,
- .muxsel = {2, 3, 1, 1},
- .audiomux = {0xff9ff6, 0xff9ff6, 0xff1ff7, 0, 0xff3ffc},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0xff9ff6, 0xff9ff6, 0xff1ff7, 0, 0xff3ffc },
.no_msp34xx = 1,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -1047,8 +1051,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = -1,
.gpiomask = 3,
- .muxsel = {2, 3, 1, 1},
- .audiomux = {1, 1, 0, 2, 3},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 1, 1, 0, 2, 3 },
.no_msp34xx = 1,
.pll = PLL_NONE,
.tuner_type = -1,
@@ -1062,8 +1066,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = -1,
.svhs = 3,
.gpiomask = 0,
- .muxsel = { 2, 3, 1, 0, 0},
- .audiomux = {0 },
+ .muxsel = { 2, 3, 1, 0, 0 },
+ .audiomux = { 0 },
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = -1,
@@ -1077,8 +1081,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0xbcf03f,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0xbc803f, 0xbc903f, 0xbcb03f, 0, 0xbcb03f},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0xbc803f, 0xbc903f, 0xbcb03f, 0, 0xbcb03f },
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = 21,
@@ -1092,7 +1096,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x70000,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 0x20000, 0x30000, 0x10000, 0, 0x40000, 0x20000 },
.needs_tvaudio = 1,
.no_msp34xx = 1,
@@ -1111,8 +1115,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 15,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = {2,0,0,0,1},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = {2,0,0,0,1 },
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = -1,
@@ -1126,8 +1130,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x010f00,
- .muxsel = {2, 3, 0, 0},
- .audiomux = {0x10000, 0, 0x10000, 0, 0, 0},
+ .muxsel = {2, 3, 0, 0 },
+ .audiomux = {0x10000, 0, 0x10000, 0, 0, 0 },
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ALPS_TSHC6_NTSC,
@@ -1168,8 +1172,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 7,
- .muxsel = { 2, 0, 1, 1},
- .audiomux = { 0, 1, 2, 3, 4},
+ .muxsel = { 2, 0, 1, 1 },
+ .audiomux = { 0, 1, 2, 3, 4 },
.pll = PLL_28,
.tuner_type = -1 /* TUNER_ALPS_TMDH2_NTSC */,
.tuner_addr = ADDR_UNSET,
@@ -1185,8 +1189,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 3,
.gpiomask = 0x03000F,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 1, 0xd0001, 0, 0, 10},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 1, 0xd0001, 0, 0, 10 },
/* sound path (5 sources):
MUX1 (mask 0x03), Enable Pin 0x08 (0=enable, 1=disable)
0= ext. Audio IN
@@ -1211,7 +1215,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x1c,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 0, 0, 0x10, 8, 4 },
.needs_tvaudio = 1,
.pll = PLL_28,
@@ -1232,7 +1236,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x18e0,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 0x0000,0x0800,0x1000,0x1000,0x18e0 },
/* For cards with tda9820/tda9821:
0x0000: Tuner normal stereo
@@ -1252,8 +1256,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0xF,
- .muxsel = { 2, 3, 1, 0},
- .audiomux = { 2, 0, 0, 0, 10},
+ .muxsel = { 2, 3, 1, 0 },
+ .audiomux = { 2, 0, 0, 0, 10 },
.needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_PAL,
@@ -1270,7 +1274,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x1800,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 0, 0x800, 0x1000, 0x1000, 0x1800, 0 },
.pll = PLL_28,
.tuner_type = 5,
@@ -1320,8 +1324,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x03000F,
- .muxsel = { 2, 3, 1, 0},
- .audiomux = { 2,0,0,0,1 },
+ .muxsel = { 2, 3, 1, 0 },
+ .audiomux = { 2, 0, 0, 0, 1 },
.pll = PLL_28,
.tuner_type = 0,
.tuner_addr = ADDR_UNSET,
@@ -1337,8 +1341,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = -1,
.gpiomask = 11,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 2, 0, 0, 1, 8},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 2, 0, 0, 1, 8 },
.pll = PLL_35,
.tuner_type = TUNER_TEMIC_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1352,7 +1356,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = -1,
.svhs = 1,
.gpiomask = 0xF,
- .muxsel = { 2, 2},
+ .muxsel = { 2, 2 },
.audiomux = { },
.no_msp34xx = 1,
.needs_tvaudio = 0,
@@ -1371,8 +1375,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0xFF,
- .muxsel = { 2, 3, 1, 0},
- .audiomux = { 1, 0, 4, 4, 9},
+ .muxsel = { 2, 3, 1, 0 },
+ .audiomux = { 1, 0, 4, 4, 9 },
.needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -1388,7 +1392,7 @@ struct tvcard bttv_tvcards[] = {
.svhs = 2,
.gpiomask = 0xf03f,
.muxsel = { 2, 3, 1, 0 },
- .audiomux = { 0xbffe, 0, 0xbfff, 0, 0xbffe},
+ .audiomux = { 0xbffe, 0, 0xbfff, 0, 0xbffe },
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_4006FN5_MULTI_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1404,8 +1408,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = -1,
.gpiomask = 1,
- .muxsel = { 2, 3, 0, 1},
- .audiomux = { 0, 0, 1, 0, 0},
+ .muxsel = { 2, 3, 0, 1 },
+ .audiomux = { 0, 0, 1, 0, 0 },
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_4006FN5_MULTI_PAL,
@@ -1420,7 +1424,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x18e0,
- .muxsel = { 2, 3, 0, 1},
+ .muxsel = { 2, 3, 0, 1 },
/* Radio changed from 1e80 to 0x800 to make
FlyVideo2000S in .hu happy (gm)*/
/* -dk-???: set mute=0x1800 for tda9874h daughterboard */
@@ -1441,8 +1445,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0xffff00,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0x500, 0x500, 0x300, 0x900, 0x900},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0x500, 0x500, 0x300, 0x900, 0x900 },
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -1458,8 +1462,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x010f00,
- .muxsel = {2, 3, 0, 0},
- .audiomux = {0x10000, 0, 0x10000, 0, 0, 0},
+ .muxsel = {2, 3, 0, 0 },
+ .audiomux = {0x10000, 0, 0x10000, 0, 0, 0 },
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_SHARP_2U5JF5540_NTSC,
@@ -1483,7 +1487,7 @@ struct tvcard bttv_tvcards[] = {
.audiomux = {0x947fff, 0x987fff,0x947fff,0x947fff, 0x947fff},
/* tvtuner, radio, external,internal, mute, stereo
* tuner, Composit, SVid, Composit-on-Svid-adapter */
- .muxsel = { 2, 3 ,0 ,1},
+ .muxsel = { 2, 3 ,0 ,1 },
.tuner_type = TUNER_MT2032,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -1511,8 +1515,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 15,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 0, 0, 11, 7, 13, 0}, /* TV and Radio with same GPIO ! */
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0, 0, 11, 7, 13, 0 }, /* TV and Radio with same GPIO ! */
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = 25,
@@ -1533,7 +1537,7 @@ struct tvcard bttv_tvcards[] = {
.audio_inputs = 0,
.tuner = -1,
.svhs = 2,
- .muxsel = { 2, 3, 1, 1}, /* AV1, AV2, SVHS, CVid adapter on SVHS */
+ .muxsel = { 2, 3, 1, 1 }, /* AV1, AV2, SVHS, CVid adapter on SVHS */
.pll = PLL_28,
.no_msp34xx = 1,
.tuner_type = UNSET,
@@ -1579,7 +1583,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = -1,
.svhs = 4,
.gpiomask = 0,
- .muxsel = { 2, 3, 1, 0, 0},
+ .muxsel = { 2, 3, 1, 0, 0 },
.audiomux = { 0 },
.needs_tvaudio = 0,
.tuner_type = -1,
@@ -1692,7 +1696,7 @@ struct tvcard bttv_tvcards[] = {
.name = "DSP Design TCVIDEO",
.video_inputs = 4,
.svhs = -1,
- .muxsel = { 2, 3, 1, 0},
+ .muxsel = { 2, 3, 1, 0 },
.pll = PLL_28,
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
@@ -1706,7 +1710,7 @@ struct tvcard bttv_tvcards[] = {
.audio_inputs = 1,
.tuner = 0,
.svhs = 2,
- .muxsel = { 2, 0, 1, 1},
+ .muxsel = { 2, 0, 1, 1 },
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = -1,
@@ -1723,7 +1727,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x0f0f80,
- .muxsel = {2, 3, 1, 0},
+ .muxsel = {2, 3, 1, 0 },
.audiomux = {0x030000, 0x010000, 0, 0, 0x020000, 0},
.no_msp34xx = 1,
.pll = PLL_28,
@@ -1940,7 +1944,7 @@ struct tvcard bttv_tvcards[] = {
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
- .muxsel = { 3, 0, 1, 2},
+ .muxsel = { 3, 0, 1, 2 },
.pll = PLL_28,
.no_gpioirq = 1,
.has_dvb = 1,
@@ -1953,7 +1957,7 @@ struct tvcard bttv_tvcards[] = {
.svhs = 3,
.gpiomask = 2,
/* TV, Comp1, Composite over SVID con, SVID */
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 2, 2, 0, 0, 0 },
.pll = PLL_28,
.has_radio = 1,
@@ -2070,7 +2074,7 @@ struct tvcard bttv_tvcards[] = {
.audio_inputs = 1,
.tuner = 0,
.svhs = 2,
- .muxsel = { 2, 3, 1, 1}, /* Tuner, CVid, SVid, CVid over SVid connector */
+ .muxsel = { 2, 3, 1, 1 }, /* Tuner, CVid, SVid, CVid over SVid connector */
.gpiomask = 0,
.no_tda9875 = 1,
.no_tda7432 = 1,
@@ -2124,7 +2128,7 @@ struct tvcard bttv_tvcards[] = {
.video_inputs = 1,
.tuner = -1,
.svhs = -1,
- .muxsel = { 2, 3, 1, 0},
+ .muxsel = { 2, 3, 1, 0 },
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
@@ -2136,7 +2140,6 @@ struct tvcard bttv_tvcards[] = {
.has_remote = 1,
.gpiomask = 0x1b,
.no_gpioirq = 1,
- .any_irq = 1,
},
[BTTV_BOARD_PV143] = {
/* Jorge Boncompte - DTI2 <jorge@dti2.net> */
@@ -2163,7 +2166,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = -1, /* card has no tuner */
.svhs = 3,
.gpiomask = 0x00,
- .muxsel = { 2, 3, 1, 0},
+ .muxsel = { 2, 3, 1, 0 },
.audiomux = { 0, 0, 0, 0, 0, 0 }, /* card has no audio */
.needs_tvaudio = 1,
.pll = PLL_28,
@@ -2178,7 +2181,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = -1, /* card has no tuner */
.svhs = 3,
.gpiomask = 0x00,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 0, 0, 0, 0, 0, 0 }, /* card has no audio */
.needs_tvaudio = 1,
.pll = PLL_28,
@@ -2265,7 +2268,7 @@ struct tvcard bttv_tvcards[] = {
.audio_inputs = 1,
.tuner = 0,
.svhs = 2,
- .muxsel = { 2, 3, 1, 0},
+ .muxsel = { 2, 3, 1, 0 },
.tuner_type = TUNER_PHILIPS_ATSC,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -2288,7 +2291,7 @@ struct tvcard bttv_tvcards[] = {
.audio_inputs = 0,
.svhs = 1,
.tuner = -1,
- .muxsel = { 3, 1, 1, 3}, /* Vid In, SVid In, Vid over SVid in connector */
+ .muxsel = { 3, 1, 1, 3 }, /* Vid In, SVid In, Vid over SVid in connector */
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
@@ -2304,8 +2307,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 3,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 1, 1, 1, 1, 0},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 1, 1, 1, 1, 0 },
.needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -2324,7 +2327,7 @@ struct tvcard bttv_tvcards[] = {
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.pll = PLL_28,
- .muxsel = { 2, 2, 2, 2},
+ .muxsel = { 2, 2, 2, 2 },
.gpiomask = 0x3F,
.muxsel_hook = gvc1100_muxsel,
},
@@ -2335,8 +2338,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x008007,
- .muxsel = {2, 3, 0, 0},
- .audiomux = {0, 0, 0, 0, 0x000003, 0},
+ .muxsel = { 2, 3, 0, 0 },
+ .audiomux = { 0, 0, 0, 0, 0x000003, 0 },
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -2371,7 +2374,7 @@ struct tvcard bttv_tvcards[] = {
.svhs = 2,
.needs_tvaudio = 0,
.gpiomask = 0x68,
- .muxsel = { 2, 3, 1},
+ .muxsel = { 2, 3, 1 },
.audiomux = { 0x68, 0x68, 0x61, 0x61, 0x00 },
.pll = PLL_28,
},
@@ -2430,7 +2433,7 @@ struct tvcard bttv_tvcards[] = {
.svhs = 2,
.gpiomask = 0x0000000f,
.muxsel = { 2, 1, 1 },
- .audiomux = { 0x02, 0x00, 0x00, 0x00, 0x00},
+ .audiomux = { 0x02, 0x00, 0x00, 0x00, 0x00 },
.tuner_type = TUNER_TEMIC_PAL,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -2463,7 +2466,7 @@ struct tvcard bttv_tvcards[] = {
.video_inputs = 2,
.tuner = -1,
.svhs = 1,
- .muxsel = { 3, 1, 2, 0}, /* Comp0, S-Video, ?, ? */
+ .muxsel = { 3, 1, 2, 0 }, /* Comp0, S-Video, ?, ? */
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
@@ -2520,8 +2523,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = -1,
.gpiomask = 0xFF,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 2, 0, 0, 0, 10},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 2, 0, 0, 0, 10 },
.needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -2554,8 +2557,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x3f,
- .muxsel = {2, 3, 1, 0},
- .audiomux = {0x31, 0x31, 0x31, 0x31, 0x31, 0x31},
+ .muxsel = {2, 3, 1, 0 },
+ .audiomux = {0x31, 0x31, 0x31, 0x31, 0x31, 0x31 },
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_NTSC_M,
@@ -2573,12 +2576,12 @@ struct tvcard bttv_tvcards[] = {
.audio_inputs = 1,
.tuner = 0,
.svhs = 2,
- .muxsel = { 2, 3, 1, 0},
+ .muxsel = { 2, 3, 1, 0 },
.tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.gpiomask = 0x008007,
- .audiomux = { 0, 0x000001,0,0, 0},
+ .audiomux = { 0, 0x000001,0,0, 0 },
.needs_tvaudio = 1,
.has_radio = 1,
},
@@ -2669,7 +2672,7 @@ struct tvcard bttv_tvcards[] = {
.audio_inputs = 1,
.tuner = 0,
.svhs = 2,
- .muxsel = { 2, 3, 1, 0},
+ .muxsel = { 2, 3, 1, 0 },
.tuner_type = -1,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -2703,7 +2706,7 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x01fe00,
- .muxsel = { 2, 3, 1, 1},
+ .muxsel = { 2, 3, 1, 1 },
.audiomux = { 0x001e00, 0, 0x018000, 0x014000, 0x002000, 0 },
.needs_tvaudio = 1,
.pll = PLL_28,
@@ -2791,8 +2794,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 15,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = {2,0,0,0,1},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 2, 0, 0, 0, 1 },
.needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = 2,
@@ -2807,8 +2810,8 @@ struct tvcard bttv_tvcards[] = {
.tuner = 0,
.svhs = 2,
.gpiomask = 0x108007,
- .muxsel = { 2, 3, 1, 1},
- .audiomux = { 100000, 100002, 100002, 100000},
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 100000, 100002, 100002, 100000 },
.no_msp34xx = 1,
.no_tda9875 = 1,
.no_tda7432 = 1,
@@ -2817,6 +2820,45 @@ struct tvcard bttv_tvcards[] = {
.tuner_addr = ADDR_UNSET,
.has_radio = 1,
},
+ /* ---- card 0x8f ---------------------------------- */
+ [BTTV_BOARD_HAUPPAUGE_IMPACTVCB] = {
+ .name = "Hauppauge ImpactVCB (bt878)",
+ .video_inputs = 4,
+ .audio_inputs = 0,
+ .tuner = -1,
+ .svhs = -1,
+ .gpiomask = 0x0f, /* old: 7 */
+ .muxsel = { 0, 1, 3, 2 }, /* Composite 0-3 */
+ .no_msp34xx = 1,
+ .no_tda9875 = 1,
+ .no_tda7432 = 1,
+ .tuner_type = -1,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ },
+ [BTTV_BOARD_MACHTV_MAGICTV] = {
+ /* Julian Calaby <julian.calaby@gmail.com>
+ * Slightly different from original MachTV definition (0x60)
+
+ * FIXME: RegSpy says gpiomask should be "0x001c800f", but it
+ * stuffs up remote chip. Bug is a pin on the jaecs is not set
+ * properly (methinks) causing no keyup bits being set */
+
+ .name = "MagicTV", /* rebranded MachTV */
+ .video_inputs = 3,
+ .audio_inputs = 1,
+ .tuner = 0,
+ .svhs = 2,
+ .gpiomask = 7,
+ .muxsel = { 2, 3, 1, 1 },
+ .audiomux = { 0, 1, 2, 3, 4 },
+ .tuner_type = TUNER_TEMIC_4009FR5_PAL,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .pll = PLL_28,
+ .has_radio = 1,
+ .has_remote = 1,
+ },
};
static const unsigned int bttv_num_tvcards = ARRAY_SIZE(bttv_tvcards);
@@ -3037,26 +3079,33 @@ static void miro_pinnacle_gpio(struct bttv *btv)
switch (id) {
case 1:
info = "PAL / mono";
+ btv->tda9887_conf = TDA9887_INTERCARRIER;
break;
case 2:
info = "PAL+SECAM / stereo";
btv->has_radio = 1;
+ btv->tda9887_conf = TDA9887_QSS;
break;
case 3:
info = "NTSC / stereo";
btv->has_radio = 1;
+ btv->tda9887_conf = TDA9887_QSS;
break;
case 4:
info = "PAL+SECAM / mono";
+ btv->tda9887_conf = TDA9887_QSS;
break;
case 5:
info = "NTSC / mono";
+ btv->tda9887_conf = TDA9887_INTERCARRIER;
break;
case 6:
info = "NTSC / stereo";
+ btv->tda9887_conf = TDA9887_INTERCARRIER;
break;
case 7:
info = "PAL / stereo";
+ btv->tda9887_conf = TDA9887_INTERCARRIER;
break;
default:
info = "oops: unknown card";
@@ -3067,8 +3116,7 @@ static void miro_pinnacle_gpio(struct bttv *btv)
printk(KERN_INFO
"bttv%d: pinnacle/mt: id=%d info=\"%s\" radio=%s\n",
btv->c.nr, id, info, btv->has_radio ? "yes" : "no");
- btv->tuner_type = 33;
- btv->pinnacle_id = id;
+ btv->tuner_type = TUNER_MT2032;
}
}
@@ -3370,9 +3418,9 @@ void __devinit bttv_init_card2(struct bttv *btv)
bttv_call_i2c_clients(btv, TUNER_SET_TYPE_ADDR, &tun_setup);
}
- if (btv->pinnacle_id != UNSET) {
- bttv_call_i2c_clients(btv, AUDC_CONFIG_PINNACLE,
- &btv->pinnacle_id);
+ if (btv->tda9887_conf) {
+ bttv_call_i2c_clients(btv, TDA9887_SET_CONFIG,
+ &btv->tda9887_conf);
}
btv->svhs = bttv_tvcards[btv->c.type].svhs;
@@ -3387,8 +3435,6 @@ void __devinit bttv_init_card2(struct bttv *btv)
btv->has_remote=1;
if (!bttv_tvcards[btv->c.type].no_gpioirq)
btv->gpioirq=1;
- if (bttv_tvcards[btv->c.type].any_irq)
- btv->any_irq = 1;
if (bttv_tvcards[btv->c.type].audio_hook)
btv->audio_hook=bttv_tvcards[btv->c.type].audio_hook;
@@ -3424,7 +3470,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
/* tuner modules */
tda9887 = 0;
- if (btv->pinnacle_id != UNSET)
+ if (btv->tda9887_conf)
tda9887 = 1;
if (0 == tda9887 && 0 == bttv_tvcards[btv->c.type].has_dvb &&
bttv_I2CRead(btv, I2C_TDA9887, "TDA9887") >=0)
@@ -3471,6 +3517,21 @@ static void __devinit hauppauge_eeprom(struct bttv *btv)
tveeprom_hauppauge_analog(&btv->i2c_client, &tv, eeprom_data);
btv->tuner_type = tv.tuner_type;
btv->has_radio = tv.has_radio;
+
+ printk("bttv%d: Hauppauge eeprom indicates model#%d\n",
+ btv->c.nr, tv.model);
+
+ /*
+ * Some of the 878 boards have duplicate PCI IDs. Switch the board
+ * type based on model #.
+ */
+ if(tv.model == 64900) {
+ printk("bttv%d: Switching board type from %s to %s\n",
+ btv->c.nr,
+ bttv_tvcards[btv->c.type].name,
+ bttv_tvcards[BTTV_BOARD_HAUPPAUGE_IMPACTVCB].name);
+ btv->c.type = BTTV_BOARD_HAUPPAUGE_IMPACTVCB;
+ }
}
static int terratec_active_radio_upgrade(struct bttv *btv)
diff --git a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c
index 1ddf9ba613e..0e697034678 100644
--- a/drivers/media/video/bttv-driver.c
+++ b/drivers/media/video/bttv-driver.c
@@ -34,13 +34,14 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kdev_t.h>
+#include "bttvp.h"
+#include <media/v4l2-common.h>
+
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/byteorder.h>
-#include "bttvp.h"
-
#include "rds.h"
@@ -210,6 +211,9 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
.vdelay = 0x20,
.vbipack = 255,
.sram = 0,
+ /* ITU-R frame line number of the first VBI line
+ we can capture, of the first and second field. */
+ .vbistart = { 7,320 },
},{
.v4l2_id = V4L2_STD_NTSC_M,
.name = "NTSC",
@@ -226,6 +230,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
.vdelay = 0x1a,
.vbipack = 144,
.sram = 1,
+ .vbistart = { 10, 273 },
},{
.v4l2_id = V4L2_STD_SECAM,
.name = "SECAM",
@@ -242,6 +247,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
.vdelay = 0x20,
.vbipack = 255,
.sram = 0, /* like PAL, correct? */
+ .vbistart = { 7, 320 },
},{
.v4l2_id = V4L2_STD_PAL_Nc,
.name = "PAL-Nc",
@@ -258,6 +264,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
.vdelay = 0x1a,
.vbipack = 144,
.sram = -1,
+ .vbistart = { 7, 320 },
},{
.v4l2_id = V4L2_STD_PAL_M,
.name = "PAL-M",
@@ -274,6 +281,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
.vdelay = 0x1a,
.vbipack = 144,
.sram = -1,
+ .vbistart = { 10, 273 },
},{
.v4l2_id = V4L2_STD_PAL_N,
.name = "PAL-N",
@@ -290,6 +298,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
.vdelay = 0x20,
.vbipack = 144,
.sram = -1,
+ .vbistart = { 7, 320},
},{
.v4l2_id = V4L2_STD_NTSC_M_JP,
.name = "NTSC-JP",
@@ -306,6 +315,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
.vdelay = 0x16,
.vbipack = 144,
.sram = -1,
+ .vbistart = {10, 273},
},{
/* that one hopefully works with the strange timing
* which video recorders produce when playing a NTSC
@@ -326,6 +336,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
.vbipack = 255,
.vtotal = 524,
.sram = -1,
+ .vbistart = { 10, 273 },
}
};
static const unsigned int BTTV_TVNORMS = ARRAY_SIZE(bttv_tvnorms);
@@ -1510,14 +1521,6 @@ static struct videobuf_queue_ops bttv_video_qops = {
.buf_release = buffer_release,
};
-static const char *v4l1_ioctls[] = {
- "?", "CGAP", "GCHAN", "SCHAN", "GTUNER", "STUNER", "GPICT", "SPICT",
- "CCAPTURE", "GWIN", "SWIN", "GFBUF", "SFBUF", "KEY", "GFREQ",
- "SFREQ", "GAUDIO", "SAUDIO", "SYNC", "MCAPTURE", "GMBUF", "GUNIT",
- "GCAPTURE", "SCAPTURE", "SPLAYMODE", "SWRITEMODE", "GPLAYINFO",
- "SMICROCODE", "GVBIFMT", "SVBIFMT" };
-#define V4L1_IOCTLS ARRAY_SIZE(v4l1_ioctls)
-
static int bttv_common_ioctls(struct bttv *btv, unsigned int cmd, void *arg)
{
switch (cmd) {
@@ -2206,22 +2209,9 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
unsigned long flags;
int retval = 0;
- if (bttv_debug > 1) {
- switch (_IOC_TYPE(cmd)) {
- case 'v':
- printk("bttv%d: ioctl 0x%x (v4l1, VIDIOC%s)\n",
- btv->c.nr, cmd, (_IOC_NR(cmd) < V4L1_IOCTLS) ?
- v4l1_ioctls[_IOC_NR(cmd)] : "???");
- break;
- case 'V':
- printk("bttv%d: ioctl 0x%x (v4l2, %s)\n",
- btv->c.nr, cmd, v4l2_ioctl_names[_IOC_NR(cmd)]);
- break;
- default:
- printk("bttv%d: ioctl 0x%x (???)\n",
- btv->c.nr, cmd);
- }
- }
+ if (bttv_debug > 1)
+ v4l_print_ioctl(btv->c.name, cmd);
+
if (btv->errors)
bttv_reinit_bt848(btv);
@@ -2570,10 +2560,10 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
fmt->count[0] = fmt2.fmt.vbi.count[0];
fmt->start[1] = fmt2.fmt.vbi.start[1];
fmt->count[1] = fmt2.fmt.vbi.count[1];
- if (fmt2.fmt.vbi.flags & VBI_UNSYNC)
- fmt->flags |= V4L2_VBI_UNSYNC;
- if (fmt2.fmt.vbi.flags & VBI_INTERLACED)
- fmt->flags |= V4L2_VBI_INTERLACED;
+ if (fmt2.fmt.vbi.flags & V4L2_VBI_UNSYNC)
+ fmt->flags |= VBI_UNSYNC;
+ if (fmt2.fmt.vbi.flags & V4L2_VBI_INTERLACED)
+ fmt->flags |= VBI_INTERLACED;
return 0;
}
case VIDIOCSVBIFMT:
@@ -3120,6 +3110,7 @@ static struct file_operations bttv_fops =
.open = bttv_open,
.release = bttv_release,
.ioctl = bttv_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
.read = bttv_read,
.mmap = bttv_mmap,
@@ -3229,6 +3220,7 @@ static int radio_do_ioctl(struct inode *inode, struct file *file,
case VIDIOCSFREQ:
case VIDIOCGAUDIO:
case VIDIOCSAUDIO:
+ case VIDIOC_LOG_STATUS:
return bttv_common_ioctls(btv,cmd,arg);
default:
@@ -3701,8 +3693,8 @@ static irqreturn_t bttv_irq(int irq, void *dev_id, struct pt_regs * regs)
btv=(struct bttv *)dev_id;
- if (btv->any_irq)
- handled = bttv_any_irq(&btv->c);
+ if (btv->custom_irq)
+ handled = btv->custom_irq(btv);
count=0;
while (1) {
@@ -3738,9 +3730,9 @@ static irqreturn_t bttv_irq(int irq, void *dev_id, struct pt_regs * regs)
if (astat&BT848_INT_VSYNC)
btv->field_count++;
- if (astat & BT848_INT_GPINT) {
+ if ((astat & BT848_INT_GPINT) && btv->remote) {
wake_up(&btv->gpioq);
- bttv_gpio_irq(&btv->c);
+ bttv_input_irq(btv);
}
if (astat & BT848_INT_I2CDONE) {
@@ -3946,7 +3938,6 @@ static int __devinit bttv_probe(struct pci_dev *dev,
btv->i2c_rc = -1;
btv->tuner_type = UNSET;
- btv->pinnacle_id = UNSET;
btv->new_input = UNSET;
btv->has_radio=radio[btv->c.nr];
@@ -4065,11 +4056,11 @@ static int __devinit bttv_probe(struct pci_dev *dev,
}
/* add subdevices */
- if (btv->has_remote)
- bttv_sub_add_device(&btv->c, "remote");
if (bttv_tvcards[btv->c.type].has_dvb)
bttv_sub_add_device(&btv->c, "dvb");
+ bttv_input_init(btv);
+
/* everything is fine */
bttv_num++;
return 0;
@@ -4104,6 +4095,7 @@ static void __devexit bttv_remove(struct pci_dev *pci_dev)
/* tell gpio modules we are leaving ... */
btv->shutdown=1;
wake_up(&btv->gpioq);
+ bttv_input_fini(btv);
bttv_sub_del_devices(&btv->c);
/* unregister i2c_bus + input */
@@ -4253,7 +4245,7 @@ static int bttv_init_module(void)
bttv_check_chipset();
bus_register(&bttv_sub_bus_type);
- return pci_module_init(&bttv_pci_driver);
+ return pci_register_driver(&bttv_pci_driver);
}
static void bttv_cleanup_module(void)
diff --git a/drivers/media/video/bttv-gpio.c b/drivers/media/video/bttv-gpio.c
index 616a5b7e510..d64accc17b0 100644
--- a/drivers/media/video/bttv-gpio.c
+++ b/drivers/media/video/bttv-gpio.c
@@ -64,10 +64,9 @@ int bttv_sub_add_device(struct bttv_core *core, char *name)
struct bttv_sub_device *sub;
int err;
- sub = kmalloc(sizeof(*sub),GFP_KERNEL);
+ sub = kzalloc(sizeof(*sub),GFP_KERNEL);
if (NULL == sub)
return -ENOMEM;
- memset(sub,0,sizeof(*sub));
sub->core = core;
sub->dev.parent = &core->pci->dev;
@@ -113,24 +112,6 @@ void bttv_gpio_irq(struct bttv_core *core)
}
}
-int bttv_any_irq(struct bttv_core *core)
-{
- struct bttv_sub_driver *drv;
- struct bttv_sub_device *dev;
- struct list_head *item;
- int handled = 0;
-
- list_for_each(item,&core->subs) {
- dev = list_entry(item,struct bttv_sub_device,list);
- drv = to_bttv_sub_drv(dev->dev.driver);
- if (drv && drv->any_irq) {
- if (drv->any_irq(dev))
- handled = 1;
- }
- }
- return handled;
-}
-
/* ----------------------------------------------------------------------- */
/* external: sub-driver register/unregister */
diff --git a/drivers/media/video/bttv-i2c.c b/drivers/media/video/bttv-i2c.c
index 77619eb131f..748d630c7fe 100644
--- a/drivers/media/video/bttv-i2c.c
+++ b/drivers/media/video/bttv-i2c.c
@@ -28,10 +28,11 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <asm/io.h>
#include "bttvp.h"
+#include <media/v4l2-common.h>
+#include <linux/jiffies.h>
+#include <asm/io.h>
static struct i2c_algo_bit_data bttv_i2c_algo_bit_template;
static struct i2c_adapter bttv_i2c_adap_sw_template;
@@ -105,10 +106,8 @@ static struct i2c_algo_bit_data bttv_i2c_algo_bit_template = {
static struct i2c_adapter bttv_i2c_adap_sw_template = {
.owner = THIS_MODULE,
-#ifdef I2C_CLASS_TV_ANALOG
.class = I2C_CLASS_TV_ANALOG,
-#endif
- .name = "bt848",
+ .name = "bttv",
.id = I2C_HW_B_BT848,
.client_register = attach_inform,
};
@@ -275,10 +274,8 @@ static struct i2c_algorithm bttv_algo = {
};
static struct i2c_adapter bttv_i2c_adap_hw_template = {
- .owner = THIS_MODULE,
-#ifdef I2C_CLASS_TV_ANALOG
+ .owner = THIS_MODULE,
.class = I2C_CLASS_TV_ANALOG,
-#endif
.name = "bt878",
.id = I2C_HW_B_BT848 /* FIXME */,
.algo = &bttv_algo,
@@ -300,7 +297,7 @@ static int attach_inform(struct i2c_client *client)
if (bttv_debug)
printk(KERN_DEBUG "bttv%d: %s i2c attach [addr=0x%x,client=%s]\n",
- btv->c.nr,client->driver->name,client->addr,
+ btv->c.nr, client->driver->driver.name, client->addr,
client->name);
if (!client->driver->command)
return 0;
@@ -441,12 +438,10 @@ int __devinit init_bttv_i2c(struct bttv *btv)
i2c_set_adapdata(&btv->c.i2c_adap, btv);
btv->i2c_client.adapter = &btv->c.i2c_adap;
-#ifdef I2C_CLASS_TV_ANALOG
if (bttv_tvcards[btv->c.type].no_video)
btv->c.i2c_adap.class &= ~I2C_CLASS_TV_ANALOG;
if (bttv_tvcards[btv->c.type].has_dvb)
btv->c.i2c_adap.class |= I2C_CLASS_TV_DIGITAL;
-#endif
if (btv->use_i2c_hw) {
btv->i2c_rc = i2c_add_adapter(&btv->c.i2c_adap);
diff --git a/drivers/media/video/ir-kbd-gpio.c b/drivers/media/video/bttv-input.c
index de1385e5d05..221b36e7f39 100644
--- a/drivers/media/video/ir-kbd-gpio.c
+++ b/drivers/media/video/bttv-input.c
@@ -24,11 +24,9 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/input.h>
-#include <linux/pci.h>
-
-#include <media/ir-common.h>
#include "bttv.h"
+#include "bttvp.h"
/* ---------------------------------------------------------------------- */
@@ -156,9 +154,6 @@ static IR_KEYTAB_TYPE ir_codes_apac_viewcomp[IR_KEYTAB_SIZE] = {
/* ---------------------------------------------------------------------- */
-/* Ricardo Cerqueira <v4l@cerqueira.org> */
-/* Weird matching, since the remote has "uncommon" keys */
-
static IR_KEYTAB_TYPE ir_codes_conceptronic[IR_KEYTAB_SIZE] = {
[ 30 ] = KEY_POWER, // power
@@ -279,34 +274,6 @@ static IR_KEYTAB_TYPE ir_codes_nebula[IR_KEYTAB_SIZE] = {
[0x36] = KEY_PC
};
-struct IR {
- struct bttv_sub_device *sub;
- struct input_dev *input;
- struct ir_input_state ir;
- char name[32];
- char phys[32];
-
- /* Usual gpio signalling */
-
- u32 mask_keycode;
- u32 mask_keydown;
- u32 mask_keyup;
- u32 polling;
- u32 last_gpio;
- struct work_struct work;
- struct timer_list timer;
-
- /* RC5 gpio */
- u32 rc5_gpio;
- struct timer_list timer_end; /* timer_end for code completion */
- struct timer_list timer_keyup; /* timer_end for key release */
- u32 last_rc5; /* last good rc5 code */
- u32 last_bit; /* last raw bit seen */
- u32 code; /* raw code under construction */
- struct timeval base_time; /* time of last seen code */
- int active; /* building raw code */
-};
-
static int debug;
module_param(debug, int, 0644); /* debug level (0,1,2) */
static int repeat_delay = 500;
@@ -314,31 +281,17 @@ module_param(repeat_delay, int, 0644);
static int repeat_period = 33;
module_param(repeat_period, int, 0644);
-#define DEVNAME "ir-kbd-gpio"
-#define dprintk(fmt, arg...) if (debug) \
- printk(KERN_DEBUG DEVNAME ": " fmt , ## arg)
-
-static void ir_irq(struct bttv_sub_device *sub);
-static int ir_probe(struct device *dev);
-static int ir_remove(struct device *dev);
-
-static struct bttv_sub_driver driver = {
- .drv = {
- .name = DEVNAME,
- .probe = ir_probe,
- .remove = ir_remove,
- },
- .gpio_irq = ir_irq,
-};
+#define DEVNAME "bttv-input"
/* ---------------------------------------------------------------------- */
-static void ir_handle_key(struct IR *ir)
+static void ir_handle_key(struct bttv *btv)
{
+ struct bttv_ir *ir = btv->remote;
u32 gpio,data;
/* read gpio value */
- gpio = bttv_gpio_read(ir->sub->core);
+ gpio = bttv_gpio_read(&btv->c);
if (ir->polling) {
if (ir->last_gpio == gpio)
return;
@@ -347,56 +300,36 @@ static void ir_handle_key(struct IR *ir)
/* extract data */
data = ir_extract_bits(gpio, ir->mask_keycode);
- dprintk(DEVNAME ": irq gpio=0x%x code=%d | %s%s%s\n",
+ dprintk(KERN_INFO DEVNAME ": irq gpio=0x%x code=%d | %s%s%s\n",
gpio, data,
ir->polling ? "poll" : "irq",
(gpio & ir->mask_keydown) ? " down" : "",
(gpio & ir->mask_keyup) ? " up" : "");
- if (ir->mask_keydown) {
- /* bit set on keydown */
- if (gpio & ir->mask_keydown) {
- ir_input_keydown(ir->input, &ir->ir, data, data);
- } else {
- ir_input_nokey(ir->input, &ir->ir);
- }
-
- } else if (ir->mask_keyup) {
- /* bit cleared on keydown */
- if (0 == (gpio & ir->mask_keyup)) {
- ir_input_keydown(ir->input, &ir->ir, data, data);
- } else {
- ir_input_nokey(ir->input, &ir->ir);
- }
-
+ if ((ir->mask_keydown && (0 != (gpio & ir->mask_keydown))) ||
+ (ir->mask_keyup && (0 == (gpio & ir->mask_keyup)))) {
+ ir_input_keydown(ir->dev,&ir->ir,data,data);
} else {
- /* can't disturgissh keydown/up :-/ */
- ir_input_keydown(ir->input, &ir->ir, data, data);
- ir_input_nokey(ir->input, &ir->ir);
+ ir_input_nokey(ir->dev,&ir->ir);
}
-}
-static void ir_irq(struct bttv_sub_device *sub)
-{
- struct IR *ir = dev_get_drvdata(&sub->dev);
-
- if (!ir->polling)
- ir_handle_key(ir);
}
-static void ir_timer(unsigned long data)
+void bttv_input_irq(struct bttv *btv)
{
- struct IR *ir = (struct IR*)data;
+ struct bttv_ir *ir = btv->remote;
- schedule_work(&ir->work);
+ if (!ir->polling)
+ ir_handle_key(btv);
}
-static void ir_work(void *data)
+static void bttv_input_timer(unsigned long data)
{
- struct IR *ir = data;
+ struct bttv *btv = (struct bttv*)data;
+ struct bttv_ir *ir = btv->remote;
unsigned long timeout;
- ir_handle_key(ir);
+ ir_handle_key(btv);
timeout = jiffies + (ir->polling * HZ / 1000);
mod_timer(&ir->timer, timeout);
}
@@ -435,26 +368,26 @@ static u32 rc5_decode(unsigned int code)
rc5 |= 1;
break;
case 3:
- dprintk("bad code: %x\n", org_code);
+ dprintk(KERN_WARNING "bad code: %x\n", org_code);
return 0;
}
}
- dprintk("code=%x, rc5=%x, start=%x, toggle=%x, address=%x, "
+ dprintk(KERN_WARNING "code=%x, rc5=%x, start=%x, toggle=%x, address=%x, "
"instr=%x\n", rc5, org_code, RC5_START(rc5),
RC5_TOGGLE(rc5), RC5_ADDR(rc5), RC5_INSTR(rc5));
return rc5;
}
-static int ir_rc5_irq(struct bttv_sub_device *sub)
+static int bttv_rc5_irq(struct bttv *btv)
{
- struct IR *ir = dev_get_drvdata(&sub->dev);
+ struct bttv_ir *ir = btv->remote;
struct timeval tv;
u32 gpio;
u32 gap;
unsigned long current_jiffies, timeout;
/* read gpio port */
- gpio = bttv_gpio_read(ir->sub->core);
+ gpio = bttv_gpio_read(&btv->c);
/* remote IRQ? */
if (!(gpio & 0x20))
@@ -493,14 +426,15 @@ static int ir_rc5_irq(struct bttv_sub_device *sub)
}
/* toggle GPIO pin 4 to reset the irq */
- bttv_gpio_write(ir->sub->core, gpio & ~(1 << 4));
- bttv_gpio_write(ir->sub->core, gpio | (1 << 4));
+ bttv_gpio_write(&btv->c, gpio & ~(1 << 4));
+ bttv_gpio_write(&btv->c, gpio | (1 << 4));
return 1;
}
-static void ir_rc5_timer_end(unsigned long data)
+
+static void bttv_rc5_timer_end(unsigned long data)
{
- struct IR *ir = (struct IR *)data;
+ struct bttv_ir *ir = (struct bttv_ir *)data;
struct timeval tv;
unsigned long current_jiffies, timeout;
u32 gap;
@@ -519,20 +453,20 @@ static void ir_rc5_timer_end(unsigned long data)
/* Allow some timmer jitter (RC5 is ~24ms anyway so this is ok) */
if (gap < 28000) {
- dprintk("spurious timer_end\n");
+ dprintk(KERN_WARNING "spurious timer_end\n");
return;
}
ir->active = 0;
if (ir->last_bit < 20) {
/* ignore spurious codes (caused by light/other remotes) */
- dprintk("short code: %x\n", ir->code);
+ dprintk(KERN_WARNING "short code: %x\n", ir->code);
} else {
u32 rc5 = rc5_decode(ir->code);
/* two start bits? */
if (RC5_START(rc5) != 3) {
- dprintk("rc5 start bits invalid: %u\n", RC5_START(rc5));
+ dprintk(KERN_WARNING "rc5 start bits invalid: %u\n", RC5_START(rc5));
/* right address? */
} else if (RC5_ADDR(rc5) == 0x0) {
@@ -542,10 +476,10 @@ static void ir_rc5_timer_end(unsigned long data)
/* Good code, decide if repeat/repress */
if (toggle != RC5_TOGGLE(ir->last_rc5) ||
instr != RC5_INSTR(ir->last_rc5)) {
- dprintk("instruction %x, toggle %x\n", instr,
+ dprintk(KERN_WARNING "instruction %x, toggle %x\n", instr,
toggle);
- ir_input_nokey(ir->input, &ir->ir);
- ir_input_keydown(ir->input, &ir->ir, instr,
+ ir_input_nokey(ir->dev, &ir->ir);
+ ir_input_keydown(ir->dev, &ir->ir, instr,
instr);
}
@@ -560,34 +494,37 @@ static void ir_rc5_timer_end(unsigned long data)
}
}
-static void ir_rc5_timer_keyup(unsigned long data)
+static void bttv_rc5_timer_keyup(unsigned long data)
{
- struct IR *ir = (struct IR *)data;
+ struct bttv_ir *ir = (struct bttv_ir *)data;
- dprintk("key released\n");
- ir_input_nokey(ir->input, &ir->ir);
+ dprintk(KERN_DEBUG "key released\n");
+ ir_input_nokey(ir->dev, &ir->ir);
}
/* ---------------------------------------------------------------------- */
-static int ir_probe(struct device *dev)
+int bttv_input_init(struct bttv *btv)
{
- struct bttv_sub_device *sub = to_bttv_sub_dev(dev);
- struct IR *ir;
- struct input_dev *input_dev;
+ struct bttv_ir *ir;
IR_KEYTAB_TYPE *ir_codes = NULL;
+ struct input_dev *input_dev;
int ir_type = IR_TYPE_OTHER;
- ir = kzalloc(sizeof(*ir), GFP_KERNEL);
+ if (!btv->has_remote)
+ return -ENODEV;
+
+ ir = kzalloc(sizeof(*ir),GFP_KERNEL);
input_dev = input_allocate_device();
if (!ir || !input_dev) {
kfree(ir);
input_free_device(input_dev);
return -ENOMEM;
}
+ memset(ir,0,sizeof(*ir));
/* detect & configure */
- switch (sub->core->type) {
+ switch (btv->c.type) {
case BTTV_BOARD_AVERMEDIA:
case BTTV_BOARD_AVPHONE98:
case BTTV_BOARD_AVERMEDIA98:
@@ -643,12 +580,18 @@ static int ir_probe(struct device *dev)
break;
case BTTV_BOARD_NEBULA_DIGITV:
ir_codes = ir_codes_nebula;
- driver.any_irq = ir_rc5_irq;
- driver.gpio_irq = NULL;
+ btv->custom_irq = bttv_rc5_irq;
ir->rc5_gpio = 1;
break;
+ case BTTV_BOARD_MACHTV_MAGICTV:
+ ir_codes = ir_codes_apac_viewcomp;
+ ir->mask_keycode = 0x001F00;
+ ir->mask_keyup = 0x004000;
+ ir->polling = 50; /* ms */
+ break;
}
if (NULL == ir_codes) {
+ dprintk(KERN_INFO "Ooops: IR config error [card=%d]\n",btv->c.type);
kfree(ir);
input_free_device(input_dev);
return -ENODEV;
@@ -657,109 +600,92 @@ static int ir_probe(struct device *dev)
if (ir->rc5_gpio) {
u32 gpio;
/* enable remote irq */
- bttv_gpio_inout(sub->core, (1 << 4), 1 << 4);
- gpio = bttv_gpio_read(sub->core);
- bttv_gpio_write(sub->core, gpio & ~(1 << 4));
- bttv_gpio_write(sub->core, gpio | (1 << 4));
+ bttv_gpio_inout(&btv->c, (1 << 4), 1 << 4);
+ gpio = bttv_gpio_read(&btv->c);
+ bttv_gpio_write(&btv->c, gpio & ~(1 << 4));
+ bttv_gpio_write(&btv->c, gpio | (1 << 4));
} else {
/* init hardware-specific stuff */
- bttv_gpio_inout(sub->core, ir->mask_keycode | ir->mask_keydown, 0);
+ bttv_gpio_inout(&btv->c, ir->mask_keycode | ir->mask_keydown, 0);
}
/* init input device */
+ ir->dev = input_dev;
+
snprintf(ir->name, sizeof(ir->name), "bttv IR (card=%d)",
- sub->core->type);
+ btv->c.type);
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0",
- pci_name(sub->core->pci));
+ pci_name(btv->c.pci));
ir_input_init(input_dev, &ir->ir, ir_type, ir_codes);
input_dev->name = ir->name;
input_dev->phys = ir->phys;
input_dev->id.bustype = BUS_PCI;
input_dev->id.version = 1;
- if (sub->core->pci->subsystem_vendor) {
- input_dev->id.vendor = sub->core->pci->subsystem_vendor;
- input_dev->id.product = sub->core->pci->subsystem_device;
+ if (btv->c.pci->subsystem_vendor) {
+ input_dev->id.vendor = btv->c.pci->subsystem_vendor;
+ input_dev->id.product = btv->c.pci->subsystem_device;
} else {
- input_dev->id.vendor = sub->core->pci->vendor;
- input_dev->id.product = sub->core->pci->device;
+ input_dev->id.vendor = btv->c.pci->vendor;
+ input_dev->id.product = btv->c.pci->device;
}
- input_dev->cdev.dev = &sub->core->pci->dev;
-
- ir->input = input_dev;
- ir->sub = sub;
+ input_dev->cdev.dev = &btv->c.pci->dev;
+ btv->remote = ir;
if (ir->polling) {
- INIT_WORK(&ir->work, ir_work, ir);
init_timer(&ir->timer);
- ir->timer.function = ir_timer;
- ir->timer.data = (unsigned long)ir;
- schedule_work(&ir->work);
+ ir->timer.function = bttv_input_timer;
+ ir->timer.data = (unsigned long)btv;
+ ir->timer.expires = jiffies + HZ;
+ add_timer(&ir->timer);
} else if (ir->rc5_gpio) {
/* set timer_end for code completion */
init_timer(&ir->timer_end);
- ir->timer_end.function = ir_rc5_timer_end;
+ ir->timer_end.function = bttv_rc5_timer_end;
ir->timer_end.data = (unsigned long)ir;
init_timer(&ir->timer_keyup);
- ir->timer_keyup.function = ir_rc5_timer_keyup;
+ ir->timer_keyup.function = bttv_rc5_timer_keyup;
ir->timer_keyup.data = (unsigned long)ir;
}
/* all done */
- dev_set_drvdata(dev, ir);
- input_register_device(ir->input);
+ input_register_device(btv->remote->dev);
+ printk(DEVNAME ": %s detected at %s\n",ir->name,ir->phys);
/* the remote isn't as bouncy as a keyboard */
- ir->input->rep[REP_DELAY] = repeat_delay;
- ir->input->rep[REP_PERIOD] = repeat_period;
+ ir->dev->rep[REP_DELAY] = repeat_delay;
+ ir->dev->rep[REP_PERIOD] = repeat_period;
return 0;
}
-static int ir_remove(struct device *dev)
+void bttv_input_fini(struct bttv *btv)
{
- struct IR *ir = dev_get_drvdata(dev);
+ if (btv->remote == NULL)
+ return;
- if (ir->polling) {
- del_timer(&ir->timer);
+ if (btv->remote->polling) {
+ del_timer_sync(&btv->remote->timer);
flush_scheduled_work();
}
- if (ir->rc5_gpio) {
+
+ if (btv->remote->rc5_gpio) {
u32 gpio;
- del_timer(&ir->timer_end);
+ del_timer_sync(&btv->remote->timer_end);
flush_scheduled_work();
- gpio = bttv_gpio_read(ir->sub->core);
- bttv_gpio_write(ir->sub->core, gpio & ~(1 << 4));
+ gpio = bttv_gpio_read(&btv->c);
+ bttv_gpio_write(&btv->c, gpio & ~(1 << 4));
}
- input_unregister_device(ir->input);
- kfree(ir);
- return 0;
+ input_unregister_device(btv->remote->dev);
+ kfree(btv->remote);
+ btv->remote = NULL;
}
-/* ---------------------------------------------------------------------- */
-
-MODULE_AUTHOR("Gerd Knorr, Pavel Machek");
-MODULE_DESCRIPTION("input driver for bt8x8 gpio IR remote controls");
-MODULE_LICENSE("GPL");
-
-static int ir_init(void)
-{
- return bttv_sub_register(&driver, "remote");
-}
-
-static void ir_fini(void)
-{
- bttv_sub_unregister(&driver);
-}
-
-module_init(ir_init);
-module_exit(ir_fini);
-
/*
* Local variables:
diff --git a/drivers/media/video/bttv-vbi.c b/drivers/media/video/bttv-vbi.c
index f4f58c60f15..72afdd64b88 100644
--- a/drivers/media/video/bttv-vbi.c
+++ b/drivers/media/video/bttv-vbi.c
@@ -31,6 +31,12 @@
#include <asm/io.h>
#include "bttvp.h"
+/* Offset from line sync pulse leading edge (0H) in 1 / sampling_rate:
+ bt8x8 /HRESET pulse starts at 0H and has length 64 / fCLKx1 (E|O_VTC
+ HSFMT = 0). VBI_HDELAY (always 0) is an offset from the trailing edge
+ of /HRESET in 1 / fCLKx1, and the sampling_rate tvnorm->Fsc is fCLKx2. */
+#define VBI_OFFSET ((64 + 0) * 2)
+
#define VBI_DEFLINES 16
#define VBI_MAXLINES 32
@@ -163,40 +169,30 @@ void bttv_vbi_setlines(struct bttv_fh *fh, struct bttv *btv, int lines)
void bttv_vbi_try_fmt(struct bttv_fh *fh, struct v4l2_format *f)
{
const struct bttv_tvnorm *tvnorm;
- u32 start0,start1;
- s32 count0,count1,count;
+ s64 count0,count1,count;
tvnorm = &bttv_tvnorms[fh->btv->tvnorm];
f->type = V4L2_BUF_TYPE_VBI_CAPTURE;
f->fmt.vbi.sampling_rate = tvnorm->Fsc;
f->fmt.vbi.samples_per_line = 2048;
f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
- f->fmt.vbi.offset = 244;
+ f->fmt.vbi.offset = VBI_OFFSET;
f->fmt.vbi.flags = 0;
- switch (fh->btv->tvnorm) {
- case 1: /* NTSC */
- start0 = 10;
- start1 = 273;
- break;
- case 0: /* PAL */
- case 2: /* SECAM */
- default:
- start0 = 7;
- start1 = 320;
- }
- count0 = (f->fmt.vbi.start[0] + f->fmt.vbi.count[0]) - start0;
- count1 = (f->fmt.vbi.start[1] + f->fmt.vbi.count[1]) - start1;
- count = max(count0,count1);
- if (count > VBI_MAXLINES)
- count = VBI_MAXLINES;
- if (count < 1)
- count = 1;
+ /* s64 to prevent overflow. */
+ count0 = (s64) f->fmt.vbi.start[0] + f->fmt.vbi.count[0]
+ - tvnorm->vbistart[0];
+ count1 = (s64) f->fmt.vbi.start[1] + f->fmt.vbi.count[1]
+ - tvnorm->vbistart[1];
+ count = clamp (max (count0, count1), 1LL, (s64) VBI_MAXLINES);
- f->fmt.vbi.start[0] = start0;
- f->fmt.vbi.start[1] = start1;
+ f->fmt.vbi.start[0] = tvnorm->vbistart[0];
+ f->fmt.vbi.start[1] = tvnorm->vbistart[1];
f->fmt.vbi.count[0] = count;
f->fmt.vbi.count[1] = count;
+
+ f->fmt.vbi.reserved[0] = 0;
+ f->fmt.vbi.reserved[1] = 0;
}
void bttv_vbi_get_fmt(struct bttv_fh *fh, struct v4l2_format *f)
@@ -209,21 +205,12 @@ void bttv_vbi_get_fmt(struct bttv_fh *fh, struct v4l2_format *f)
f->fmt.vbi.sampling_rate = tvnorm->Fsc;
f->fmt.vbi.samples_per_line = 2048;
f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
- f->fmt.vbi.offset = 244;
+ f->fmt.vbi.offset = VBI_OFFSET;
+ f->fmt.vbi.start[0] = tvnorm->vbistart[0];
+ f->fmt.vbi.start[1] = tvnorm->vbistart[1];
f->fmt.vbi.count[0] = fh->lines;
f->fmt.vbi.count[1] = fh->lines;
f->fmt.vbi.flags = 0;
- switch (fh->btv->tvnorm) {
- case 1: /* NTSC */
- f->fmt.vbi.start[0] = 10;
- f->fmt.vbi.start[1] = 273;
- break;
- case 0: /* PAL */
- case 2: /* SECAM */
- default:
- f->fmt.vbi.start[0] = 7;
- f->fmt.vbi.start[1] = 319;
- }
}
/* ----------------------------------------------------------------------- */
diff --git a/drivers/media/video/bttv.h b/drivers/media/video/bttv.h
index 93298f06e01..e370d74f2a1 100644
--- a/drivers/media/video/bttv.h
+++ b/drivers/media/video/bttv.h
@@ -16,6 +16,8 @@
#include <linux/videodev.h>
#include <linux/i2c.h>
+#include <media/ir-common.h>
+#include <media/ir-kbd-i2c.h>
/* ---------------------------------------------------------- */
/* exported by bttv-cards.c */
@@ -163,6 +165,8 @@
#define BTTV_BOARD_OSPREY440 0x8c
#define BTTV_BOARD_ASOUND_SKYEYE 0x8d
#define BTTV_BOARD_SABRENT_TVFM 0x8e
+#define BTTV_BOARD_HAUPPAUGE_IMPACTVCB 0x8f
+#define BTTV_BOARD_MACHTV_MAGICTV 0x90
/* i2c address list */
#define I2C_TSA5522 0xc2
@@ -210,6 +214,34 @@ struct bttv_core {
struct bttv;
+
+struct bttv_ir {
+ struct input_dev *dev;
+ struct ir_input_state ir;
+ char name[32];
+ char phys[32];
+
+ /* Usual gpio signalling */
+
+ u32 mask_keycode;
+ u32 mask_keydown;
+ u32 mask_keyup;
+ u32 polling;
+ u32 last_gpio;
+ struct work_struct work;
+ struct timer_list timer;
+
+ /* RC5 gpio */
+ u32 rc5_gpio;
+ struct timer_list timer_end; /* timer_end for code completion */
+ struct timer_list timer_keyup; /* timer_end for key release */
+ u32 last_rc5; /* last good rc5 code */
+ u32 last_bit; /* last raw bit seen */
+ u32 code; /* raw code under construction */
+ struct timeval base_time; /* time of last seen code */
+ int active; /* building raw code */
+};
+
struct tvcard
{
char *name;
@@ -235,7 +267,6 @@ struct tvcard
unsigned int has_dvb:1;
unsigned int has_remote:1;
unsigned int no_gpioirq:1;
- unsigned int any_irq:1;
/* other settings */
unsigned int pll;
@@ -335,7 +366,6 @@ struct bttv_sub_driver {
struct device_driver drv;
char wanted[BUS_ID_SIZE];
void (*gpio_irq)(struct bttv_sub_device *sub);
- int (*any_irq)(struct bttv_sub_device *sub);
};
#define to_bttv_sub_drv(x) container_of((x), struct bttv_sub_driver, drv)
@@ -363,6 +393,10 @@ extern int bttv_I2CWrite(struct bttv *btv, unsigned char addr, unsigned char b1,
unsigned char b2, int both);
extern void bttv_readee(struct bttv *btv, unsigned char *eedata, int addr);
+extern int bttv_input_init(struct bttv *dev);
+extern void bttv_input_fini(struct bttv *dev);
+extern void bttv_input_irq(struct bttv *dev);
+
#endif /* _BTTV_H_ */
/*
* Local variables:
diff --git a/drivers/media/video/bttvp.h b/drivers/media/video/bttvp.h
index 1e6a5632c3c..dd00c20ab95 100644
--- a/drivers/media/video/bttvp.h
+++ b/drivers/media/video/bttvp.h
@@ -73,6 +73,8 @@
#define UNSET (-1U)
+#define clamp(x, low, high) min (max (low, x), high)
+
/* ---------------------------------------------------------- */
struct bttv_tvnorm {
@@ -88,6 +90,9 @@ struct bttv_tvnorm {
u8 vbipack;
u16 vtotal;
int sram;
+ /* ITU-R frame line number of the first VBI line we can
+ capture, of the first and second field. */
+ u16 vbistart[2];
};
extern const struct bttv_tvnorm bttv_tvnorms[];
@@ -209,7 +214,6 @@ extern struct bus_type bttv_sub_bus_type;
int bttv_sub_add_device(struct bttv_core *core, char *name);
int bttv_sub_del_devices(struct bttv_core *core);
void bttv_gpio_irq(struct bttv_core *core);
-int bttv_any_irq(struct bttv_core *core);
/* ---------------------------------------------------------- */
@@ -270,12 +274,13 @@ struct bttv {
/* card configuration info */
unsigned int cardid; /* pci subsystem id (bt878 based ones) */
unsigned int tuner_type; /* tuner chip type */
- unsigned int pinnacle_id;
+ unsigned int tda9887_conf;
unsigned int svhs;
struct bttv_pll_info pll;
int triton1;
int gpioirq;
- int any_irq;
+ int (*custom_irq)(struct bttv *btv);
+
int use_i2c_hw;
/* old gpio interface */
@@ -300,7 +305,7 @@ struct bttv {
/* infrared remote */
int has_remote;
- struct bttv_input *remote;
+ struct bttv_ir *remote;
/* locking */
spinlock_t s_lock;
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 0065d0c240d..6bad93ef969 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -875,6 +875,7 @@ static struct file_operations qcam_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = qcam_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.read = qcam_read,
.llseek = no_llseek,
};
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 75442ec49f3..9976db4f6da 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -687,6 +687,7 @@ static struct file_operations qcam_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = qcam_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.read = qcam_read,
.llseek = no_llseek,
};
diff --git a/drivers/media/video/compat_ioctl32.c b/drivers/media/video/compat_ioctl32.c
new file mode 100644
index 00000000000..297c32ab51e
--- /dev/null
+++ b/drivers/media/video/compat_ioctl32.c
@@ -0,0 +1,879 @@
+/*
+ * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
+ * Separated from fs stuff by Arnd Bergmann <arnd@arndb.de>
+ *
+ * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
+ * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
+ * Copyright (C) 2003 Pavel Machek (pavel@suse.cz)
+ * Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be)
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * ioctls.
+ */
+
+#include <linux/config.h>
+#include <linux/compat.h>
+#include <linux/videodev.h>
+#include <linux/videodev2.h>
+#include <linux/module.h>
+#include <linux/smp_lock.h>
+#include <media/v4l2-common.h>
+
+#ifdef CONFIG_COMPAT
+
+
+struct video_tuner32 {
+ compat_int_t tuner;
+ char name[32];
+ compat_ulong_t rangelow, rangehigh;
+ u32 flags; /* It is really u32 in videodev.h */
+ u16 mode, signal;
+};
+
+static int get_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user *up)
+{
+ if(!access_ok(VERIFY_READ, up, sizeof(struct video_tuner32)) ||
+ get_user(kp->tuner, &up->tuner) ||
+ copy_from_user(kp->name, up->name, 32) ||
+ get_user(kp->rangelow, &up->rangelow) ||
+ get_user(kp->rangehigh, &up->rangehigh) ||
+ get_user(kp->flags, &up->flags) ||
+ get_user(kp->mode, &up->mode) ||
+ get_user(kp->signal, &up->signal))
+ return -EFAULT;
+ return 0;
+}
+
+static int put_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user *up)
+{
+ if(!access_ok(VERIFY_WRITE, up, sizeof(struct video_tuner32)) ||
+ put_user(kp->tuner, &up->tuner) ||
+ copy_to_user(up->name, kp->name, 32) ||
+ put_user(kp->rangelow, &up->rangelow) ||
+ put_user(kp->rangehigh, &up->rangehigh) ||
+ put_user(kp->flags, &up->flags) ||
+ put_user(kp->mode, &up->mode) ||
+ put_user(kp->signal, &up->signal))
+ return -EFAULT;
+ return 0;
+}
+
+struct video_buffer32 {
+ compat_caddr_t base;
+ compat_int_t height, width, depth, bytesperline;
+};
+
+static int get_video_buffer32(struct video_buffer *kp, struct video_buffer32 __user *up)
+{
+ u32 tmp;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct video_buffer32)) ||
+ get_user(tmp, &up->base) ||
+ get_user(kp->height, &up->height) ||
+ get_user(kp->width, &up->width) ||
+ get_user(kp->depth, &up->depth) ||
+ get_user(kp->bytesperline, &up->bytesperline))
+ return -EFAULT;
+
+ /* This is actually a physical address stored
+ * as a void pointer.
+ */
+ kp->base = (void *)(unsigned long) tmp;
+
+ return 0;
+}
+
+static int put_video_buffer32(struct video_buffer *kp, struct video_buffer32 __user *up)
+{
+ u32 tmp = (u32)((unsigned long)kp->base);
+
+ if(!access_ok(VERIFY_WRITE, up, sizeof(struct video_buffer32)) ||
+ put_user(tmp, &up->base) ||
+ put_user(kp->height, &up->height) ||
+ put_user(kp->width, &up->width) ||
+ put_user(kp->depth, &up->depth) ||
+ put_user(kp->bytesperline, &up->bytesperline))
+ return -EFAULT;
+ return 0;
+}
+
+struct video_clip32 {
+ s32 x, y, width, height; /* Its really s32 in videodev.h */
+ compat_caddr_t next;
+};
+
+struct video_window32 {
+ u32 x, y, width, height, chromakey, flags;
+ compat_caddr_t clips;
+ compat_int_t clipcount;
+};
+
+static int native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOIOCTLCMD;
+
+ if (file->f_op->unlocked_ioctl)
+ ret = file->f_op->unlocked_ioctl(file, cmd, arg);
+ else if (file->f_op->ioctl) {
+ lock_kernel();
+ ret = file->f_op->ioctl(file->f_dentry->d_inode, file, cmd, arg);
+ unlock_kernel();
+ }
+
+ return ret;
+}
+
+
+/* You get back everything except the clips... */
+static int put_video_window32(struct video_window *kp, struct video_window32 __user *up)
+{
+ if(!access_ok(VERIFY_WRITE, up, sizeof(struct video_window32)) ||
+ put_user(kp->x, &up->x) ||
+ put_user(kp->y, &up->y) ||
+ put_user(kp->width, &up->width) ||
+ put_user(kp->height, &up->height) ||
+ put_user(kp->chromakey, &up->chromakey) ||
+ put_user(kp->flags, &up->flags) ||
+ put_user(kp->clipcount, &up->clipcount))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_clip32
+{
+ struct v4l2_rect c;
+ compat_caddr_t next;
+};
+
+struct v4l2_window32
+{
+ struct v4l2_rect w;
+ enum v4l2_field field;
+ __u32 chromakey;
+ compat_caddr_t clips; /* actually struct v4l2_clip32 * */
+ __u32 clipcount;
+ compat_caddr_t bitmap;
+};
+
+static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) ||
+ copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
+ get_user(kp->field, &up->field) ||
+ get_user(kp->chromakey, &up->chromakey) ||
+ get_user(kp->clipcount, &up->clipcount))
+ return -EFAULT;
+ if (kp->clipcount > 2048)
+ return -EINVAL;
+ if (kp->clipcount) {
+ struct v4l2_clip32 *uclips = compat_ptr(up->clips);
+ struct v4l2_clip *kclips;
+ int n = kp->clipcount;
+
+ kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
+ kp->clips = kclips;
+ while (--n >= 0) {
+ if (!access_ok(VERIFY_READ, &uclips->c, sizeof(uclips->c)) ||
+ copy_from_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
+ return -EFAULT;
+ kclips->next = n ? kclips + 1 : 0;
+ uclips += 1;
+ kclips += 1;
+ }
+ } else
+ kp->clips = 0;
+ return 0;
+}
+
+static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_window32)) ||
+ copy_to_user(&up->w, &kp->w, sizeof(up->w)) ||
+ put_user(kp->field, &up->field) ||
+ put_user(kp->chromakey, &up->chromakey) ||
+ put_user(kp->clipcount, &up->clipcount))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_pix_format)) ||
+ copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_pix_format)) ||
+ copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_vbi_format)) ||
+ copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_vbi_format)) ||
+ copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_format32
+{
+ enum v4l2_buf_type type;
+ union
+ {
+ struct v4l2_pix_format pix; // V4L2_BUF_TYPE_VIDEO_CAPTURE
+ struct v4l2_window32 win; // V4L2_BUF_TYPE_VIDEO_OVERLAY
+ struct v4l2_vbi_format vbi; // V4L2_BUF_TYPE_VBI_CAPTURE
+ __u8 raw_data[200]; // user-defined
+ } fmt;
+};
+
+static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
+ get_user(kp->type, &up->type))
+ return -EFAULT;
+ switch (kp->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ return get_v4l2_window32(&kp->fmt.win, &up->fmt.win);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
+ default:
+ printk("compat_ioctl : unexpected VIDIOC_FMT type %d\n",
+ kp->type);
+ return -ENXIO;
+ }
+}
+
+static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
+{
+ if(!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)) ||
+ put_user(kp->type, &up->type))
+ return -EFAULT;
+ switch (kp->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ return put_v4l2_window32(&kp->fmt.win, &up->fmt.win);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
+ default:
+ return -ENXIO;
+ }
+}
+
+static inline int get_v4l2_standard(struct v4l2_standard *kp, struct v4l2_standard __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard)) ||
+ copy_from_user(kp, up, sizeof(struct v4l2_standard)))
+ return -EFAULT;
+ return 0;
+
+}
+
+static inline int put_v4l2_standard(struct v4l2_standard *kp, struct v4l2_standard __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard)) ||
+ copy_to_user(up, kp, sizeof(struct v4l2_standard)))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_standard32
+{
+ __u32 index;
+ __u32 id[2]; /* __u64 would get the alignment wrong */
+ __u8 name[24];
+ struct v4l2_fract frameperiod; /* Frames, not fields */
+ __u32 framelines;
+ __u32 reserved[4];
+};
+
+static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+{
+ /* other fields are not set by the user, nor used by the driver */
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) ||
+ get_user(kp->index, &up->index))
+ return -EFAULT;
+ return 0;
+}
+
+static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
+{
+ if(!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
+ put_user(kp->index, &up->index) ||
+ copy_to_user(up->id, &kp->id, sizeof(__u64)) ||
+ copy_to_user(up->name, kp->name, 24) ||
+ copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
+ put_user(kp->framelines, &up->framelines) ||
+ copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int get_v4l2_tuner(struct v4l2_tuner *kp, struct v4l2_tuner __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_tuner)) ||
+ copy_from_user(kp, up, sizeof(struct v4l2_tuner)))
+ return -EFAULT;
+ return 0;
+
+}
+
+static inline int put_v4l2_tuner(struct v4l2_tuner *kp, struct v4l2_tuner __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_tuner)) ||
+ copy_to_user(up, kp, sizeof(struct v4l2_tuner)))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_buffer32
+{
+ __u32 index;
+ enum v4l2_buf_type type;
+ __u32 bytesused;
+ __u32 flags;
+ enum v4l2_field field;
+ struct compat_timeval timestamp;
+ struct v4l2_timecode timecode;
+ __u32 sequence;
+
+ /* memory location */
+ enum v4l2_memory memory;
+ union {
+ __u32 offset;
+ compat_long_t userptr;
+ } m;
+ __u32 length;
+ __u32 input;
+ __u32 reserved;
+};
+
+static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+{
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
+ get_user(kp->index, &up->index) ||
+ get_user(kp->type, &up->type) ||
+ get_user(kp->flags, &up->flags) ||
+ get_user(kp->memory, &up->memory) ||
+ get_user(kp->input, &up->input))
+ return -EFAULT;
+ switch(kp->memory) {
+ case V4L2_MEMORY_MMAP:
+ break;
+ case V4L2_MEMORY_USERPTR:
+ {
+ unsigned long tmp = (unsigned long)compat_ptr(up->m.userptr);
+
+ if(get_user(kp->length, &up->length) ||
+ get_user(kp->m.userptr, &tmp))
+ return -EFAULT;
+ }
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ if(get_user(kp->m.offset, &up->m.offset))
+ return -EFAULT;
+ break;
+ }
+ return 0;
+}
+
+static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
+ put_user(kp->index, &up->index) ||
+ put_user(kp->type, &up->type) ||
+ put_user(kp->flags, &up->flags) ||
+ put_user(kp->memory, &up->memory) ||
+ put_user(kp->input, &up->input))
+ return -EFAULT;
+ switch(kp->memory) {
+ case V4L2_MEMORY_MMAP:
+ if (put_user(kp->length, &up->length) ||
+ put_user(kp->m.offset, &up->m.offset))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_USERPTR:
+ if (put_user(kp->length, &up->length) ||
+ put_user(kp->m.userptr, &up->m.userptr))
+ return -EFAULT;
+ break;
+ case V4L2_MEMORY_OVERLAY:
+ if (put_user(kp->m.offset, &up->m.offset))
+ return -EFAULT;
+ break;
+ }
+ if (put_user(kp->bytesused, &up->bytesused) ||
+ put_user(kp->field, &up->field) ||
+ put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+ put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
+ copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
+ put_user(kp->sequence, &up->sequence) ||
+ put_user(kp->reserved, &up->reserved))
+ return -EFAULT;
+ return 0;
+}
+
+struct v4l2_framebuffer32
+{
+ __u32 capability;
+ __u32 flags;
+ compat_caddr_t base;
+ struct v4l2_pix_format fmt;
+};
+
+static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+{
+ u32 tmp;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
+ get_user(tmp, &up->base) ||
+ get_user(kp->capability, &up->capability) ||
+ get_user(kp->flags, &up->flags))
+ return -EFAULT;
+ kp->base = compat_ptr(tmp);
+ get_v4l2_pix_format(&kp->fmt, &up->fmt);
+ return 0;
+}
+
+static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
+{
+ u32 tmp = (u32)((unsigned long)kp->base);
+
+ if(!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
+ put_user(tmp, &up->base) ||
+ put_user(kp->capability, &up->capability) ||
+ put_user(kp->flags, &up->flags))
+ return -EFAULT;
+ put_v4l2_pix_format(&kp->fmt, &up->fmt);
+ return 0;
+}
+
+static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_input) - 4) ||
+ copy_from_user(kp, up, sizeof(struct v4l2_input) - 4))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_input) - 4) ||
+ copy_to_user(up, kp, sizeof(struct v4l2_input) - 4))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int get_v4l2_input(struct v4l2_input *kp, struct v4l2_input __user *up)
+{
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_input)) ||
+ copy_from_user(kp, up, sizeof(struct v4l2_input)))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int put_v4l2_input(struct v4l2_input *kp, struct v4l2_input __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_input)) ||
+ copy_to_user(up, kp, sizeof(struct v4l2_input)))
+ return -EFAULT;
+ return 0;
+}
+
+#define VIDIOCGTUNER32 _IOWR('v',4, struct video_tuner32)
+#define VIDIOCSTUNER32 _IOW('v',5, struct video_tuner32)
+#define VIDIOCGWIN32 _IOR('v',9, struct video_window32)
+#define VIDIOCSWIN32 _IOW('v',10, struct video_window32)
+#define VIDIOCGFBUF32 _IOR('v',11, struct video_buffer32)
+#define VIDIOCSFBUF32 _IOW('v',12, struct video_buffer32)
+#define VIDIOCGFREQ32 _IOR('v',14, u32)
+#define VIDIOCSFREQ32 _IOW('v',15, u32)
+
+/* VIDIOC_ENUMINPUT32 is VIDIOC_ENUMINPUT minus 4 bytes of padding alignement */
+#define VIDIOC_ENUMINPUT32 VIDIOC_ENUMINPUT - _IOC(0, 0, 0, 4)
+#define VIDIOC_G_FMT32 _IOWR ('V', 4, struct v4l2_format32)
+#define VIDIOC_S_FMT32 _IOWR ('V', 5, struct v4l2_format32)
+#define VIDIOC_QUERYBUF32 _IOWR ('V', 9, struct v4l2_buffer32)
+#define VIDIOC_G_FBUF32 _IOR ('V', 10, struct v4l2_framebuffer32)
+#define VIDIOC_S_FBUF32 _IOW ('V', 11, struct v4l2_framebuffer32)
+/* VIDIOC_OVERLAY is now _IOW, but was _IOWR */
+#define VIDIOC_OVERLAY32 _IOWR ('V', 14, compat_int_t)
+#define VIDIOC_QBUF32 _IOWR ('V', 15, struct v4l2_buffer32)
+#define VIDIOC_DQBUF32 _IOWR ('V', 17, struct v4l2_buffer32)
+#define VIDIOC_STREAMON32 _IOW ('V', 18, compat_int_t)
+#define VIDIOC_STREAMOFF32 _IOW ('V', 19, compat_int_t)
+#define VIDIOC_ENUMSTD32 _IOWR ('V', 25, struct v4l2_standard32)
+/* VIDIOC_S_CTRL is now _IOWR, but was _IOW */
+#define VIDIOC_S_CTRL32 _IOW ('V', 28, struct v4l2_control)
+#define VIDIOC_G_INPUT32 _IOR ('V', 38, compat_int_t)
+#define VIDIOC_S_INPUT32 _IOWR ('V', 39, compat_int_t)
+#define VIDIOC_TRY_FMT32 _IOWR ('V', 64, struct v4l2_format32)
+
+enum {
+ MaxClips = (~0U-sizeof(struct video_window))/sizeof(struct video_clip)
+};
+
+static int do_set_window(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct video_window32 __user *up = compat_ptr(arg);
+ struct video_window __user *vw;
+ struct video_clip __user *p;
+ int nclips;
+ u32 n;
+
+ if (!access_ok(VERIFY_READ, up, sizeof(struct video_window32)))
+ return -EFAULT;
+
+ if (get_user(nclips, &up->clipcount))
+ return -EFAULT;
+
+ /* Peculiar interface... */
+ if (nclips < 0)
+ nclips = VIDEO_CLIPMAP_SIZE;
+
+ if (nclips > MaxClips)
+ return -ENOMEM;
+
+ vw = compat_alloc_user_space(sizeof(struct video_window) +
+ nclips * sizeof(struct video_clip));
+
+ p = nclips ? (struct video_clip __user *)(vw + 1) : NULL;
+
+ if (get_user(n, &up->x) || put_user(n, &vw->x) ||
+ get_user(n, &up->y) || put_user(n, &vw->y) ||
+ get_user(n, &up->width) || put_user(n, &vw->width) ||
+ get_user(n, &up->height) || put_user(n, &vw->height) ||
+ get_user(n, &up->chromakey) || put_user(n, &vw->chromakey) ||
+ get_user(n, &up->flags) || put_user(n, &vw->flags) ||
+ get_user(n, &up->clipcount) || put_user(n, &vw->clipcount) ||
+ get_user(n, &up->clips) || put_user(p, &vw->clips))
+ return -EFAULT;
+
+ if (nclips) {
+ struct video_clip32 __user *u = compat_ptr(n);
+ int i;
+ if (!u)
+ return -EINVAL;
+ for (i = 0; i < nclips; i++, u++, p++) {
+ s32 v;
+ if (!access_ok(VERIFY_READ, u, sizeof(struct video_clip32)) ||
+ !access_ok(VERIFY_WRITE, p, sizeof(struct video_clip32)) ||
+ get_user(v, &u->x) ||
+ put_user(v, &p->x) ||
+ get_user(v, &u->y) ||
+ put_user(v, &p->y) ||
+ get_user(v, &u->width) ||
+ put_user(v, &p->width) ||
+ get_user(v, &u->height) ||
+ put_user(v, &p->height) ||
+ put_user(NULL, &p->next))
+ return -EFAULT;
+ }
+ }
+
+ return native_ioctl(file, VIDIOCSWIN, (unsigned long)vw);
+}
+
+static int do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ union {
+ struct video_tuner vt;
+ struct video_buffer vb;
+ struct video_window vw;
+ struct v4l2_format v2f;
+ struct v4l2_buffer v2b;
+ struct v4l2_framebuffer v2fb;
+ struct v4l2_standard v2s;
+ struct v4l2_input v2i;
+ struct v4l2_tuner v2t;
+ unsigned long vx;
+ } karg;
+ void __user *up = compat_ptr(arg);
+ int compatible_arg = 1;
+ int err = 0;
+ int realcmd = cmd;
+
+ /* First, convert the command. */
+ switch(cmd) {
+ case VIDIOCGTUNER32: cmd = VIDIOCGTUNER; break;
+ case VIDIOCSTUNER32: cmd = VIDIOCSTUNER; break;
+ case VIDIOCGWIN32: cmd = VIDIOCGWIN; break;
+ case VIDIOCGFBUF32: cmd = VIDIOCGFBUF; break;
+ case VIDIOCSFBUF32: cmd = VIDIOCSFBUF; break;
+ case VIDIOCGFREQ32: cmd = VIDIOCGFREQ; break;
+ case VIDIOCSFREQ32: cmd = VIDIOCSFREQ; break;
+ case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
+ case VIDIOC_S_FMT32: cmd = VIDIOC_S_FMT; break;
+ case VIDIOC_QUERYBUF32: cmd = VIDIOC_QUERYBUF; break;
+ case VIDIOC_QBUF32: cmd = VIDIOC_QBUF; break;
+ case VIDIOC_DQBUF32: cmd = VIDIOC_DQBUF; break;
+ case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
+ case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
+ case VIDIOC_G_FBUF32: cmd = VIDIOC_G_FBUF; break;
+ case VIDIOC_S_FBUF32: cmd = VIDIOC_S_FBUF; break;
+ case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
+ case VIDIOC_ENUMSTD32: realcmd = VIDIOC_ENUMSTD; break;
+ case VIDIOC_ENUMINPUT32: realcmd = VIDIOC_ENUMINPUT; break;
+ case VIDIOC_S_CTRL32: cmd = VIDIOC_S_CTRL; break;
+ case VIDIOC_G_INPUT32: cmd = VIDIOC_G_INPUT; break;
+ case VIDIOC_S_INPUT32: cmd = VIDIOC_S_INPUT; break;
+ case VIDIOC_TRY_FMT32: cmd = VIDIOC_TRY_FMT; break;
+ };
+
+ switch(cmd) {
+ case VIDIOCSTUNER:
+ case VIDIOCGTUNER:
+ err = get_video_tuner32(&karg.vt, up);
+ compatible_arg = 0;
+
+ break;
+
+ case VIDIOCSFBUF:
+ err = get_video_buffer32(&karg.vb, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOCSFREQ:
+ case VIDIOC_S_INPUT:
+ case VIDIOC_OVERLAY:
+ case VIDIOC_STREAMON:
+ case VIDIOC_STREAMOFF:
+ err = get_user(karg.vx, (u32 __user *)up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_S_FBUF:
+ err = get_v4l2_framebuffer32(&karg.v2fb, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_G_FMT:
+ case VIDIOC_S_FMT:
+ case VIDIOC_TRY_FMT:
+ err = get_v4l2_format32(&karg.v2f, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_QUERYBUF:
+ case VIDIOC_QBUF:
+ case VIDIOC_DQBUF:
+ err = get_v4l2_buffer32(&karg.v2b, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_ENUMSTD:
+ err = get_v4l2_standard(&karg.v2s, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_ENUMSTD32:
+ err = get_v4l2_standard32(&karg.v2s, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_ENUMINPUT:
+ err = get_v4l2_input(&karg.v2i, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_ENUMINPUT32:
+ err = get_v4l2_input32(&karg.v2i, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOC_G_TUNER:
+ case VIDIOC_S_TUNER:
+ err = get_v4l2_tuner(&karg.v2t, up);
+ compatible_arg = 0;
+ break;
+
+ case VIDIOCGWIN:
+ case VIDIOCGFBUF:
+ case VIDIOCGFREQ:
+ case VIDIOC_G_FBUF:
+ case VIDIOC_G_INPUT:
+ compatible_arg = 0;
+ };
+
+ if(err)
+ goto out;
+
+ if(compatible_arg)
+ err = native_ioctl(file, realcmd, (unsigned long)up);
+ else {
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ err = native_ioctl(file, realcmd, (unsigned long)&karg);
+ set_fs(old_fs);
+ }
+ if(err == 0) {
+ switch(cmd) {
+ case VIDIOCGTUNER:
+ err = put_video_tuner32(&karg.vt, up);
+ break;
+
+ case VIDIOCGWIN:
+ err = put_video_window32(&karg.vw, up);
+ break;
+
+ case VIDIOCGFBUF:
+ err = put_video_buffer32(&karg.vb, up);
+ break;
+
+ case VIDIOC_G_FBUF:
+ err = put_v4l2_framebuffer32(&karg.v2fb, up);
+ break;
+
+ case VIDIOC_G_FMT:
+ case VIDIOC_S_FMT:
+ case VIDIOC_TRY_FMT:
+ err = put_v4l2_format32(&karg.v2f, up);
+ break;
+
+ case VIDIOC_QUERYBUF:
+ case VIDIOC_QBUF:
+ case VIDIOC_DQBUF:
+ err = put_v4l2_buffer32(&karg.v2b, up);
+ break;
+
+ case VIDIOC_ENUMSTD:
+ err = put_v4l2_standard(&karg.v2s, up);
+ break;
+
+ case VIDIOC_ENUMSTD32:
+ err = put_v4l2_standard32(&karg.v2s, up);
+ break;
+
+ case VIDIOC_G_TUNER:
+ case VIDIOC_S_TUNER:
+ err = put_v4l2_tuner(&karg.v2t, up);
+ break;
+
+ case VIDIOC_ENUMINPUT:
+ err = put_v4l2_input(&karg.v2i, up);
+ break;
+
+ case VIDIOC_ENUMINPUT32:
+ err = put_v4l2_input32(&karg.v2i, up);
+ break;
+
+ case VIDIOCGFREQ:
+ case VIDIOC_G_INPUT:
+ err = put_user(((u32)karg.vx), (u32 __user *)up);
+ break;
+ };
+ }
+out:
+ return err;
+}
+
+long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOIOCTLCMD;
+
+ if (!file->f_op->ioctl)
+ return ret;
+
+ switch (cmd) {
+ case VIDIOCSWIN32:
+ ret = do_set_window(file, cmd, arg);
+ break;
+ case VIDIOCGTUNER32:
+ case VIDIOCSTUNER32:
+ case VIDIOCGWIN32:
+ case VIDIOCGFBUF32:
+ case VIDIOCSFBUF32:
+ case VIDIOCGFREQ32:
+ case VIDIOCSFREQ32:
+ case VIDIOC_QUERYCAP:
+ case VIDIOC_ENUM_FMT:
+ case VIDIOC_G_FMT32:
+ case VIDIOC_S_FMT32:
+ case VIDIOC_REQBUFS:
+ case VIDIOC_QUERYBUF32:
+ case VIDIOC_G_FBUF32:
+ case VIDIOC_S_FBUF32:
+ case VIDIOC_OVERLAY32:
+ case VIDIOC_QBUF32:
+ case VIDIOC_DQBUF32:
+ case VIDIOC_STREAMON32:
+ case VIDIOC_STREAMOFF32:
+ case VIDIOC_G_PARM:
+ case VIDIOC_G_STD:
+ case VIDIOC_S_STD:
+ case VIDIOC_G_TUNER:
+ case VIDIOC_S_TUNER:
+ case VIDIOC_ENUMSTD:
+ case VIDIOC_ENUMSTD32:
+ case VIDIOC_ENUMINPUT:
+ case VIDIOC_ENUMINPUT32:
+ case VIDIOC_G_CTRL:
+ case VIDIOC_S_CTRL32:
+ case VIDIOC_QUERYCTRL:
+ case VIDIOC_G_INPUT32:
+ case VIDIOC_S_INPUT32:
+ case VIDIOC_TRY_FMT32:
+ ret = do_video_ioctl(file, cmd, arg);
+ break;
+
+ /* Little v, the video4linux ioctls (conflict?) */
+ case VIDIOCGCAP:
+ case VIDIOCGCHAN:
+ case VIDIOCSCHAN:
+ case VIDIOCGPICT:
+ case VIDIOCSPICT:
+ case VIDIOCCAPTURE:
+ case VIDIOCKEY:
+ case VIDIOCGAUDIO:
+ case VIDIOCSAUDIO:
+ case VIDIOCSYNC:
+ case VIDIOCMCAPTURE:
+ case VIDIOCGMBUF:
+ case VIDIOCGUNIT:
+ case VIDIOCGCAPTURE:
+ case VIDIOCSCAPTURE:
+
+ /* BTTV specific... */
+ case _IOW('v', BASE_VIDIOCPRIVATE+0, char [256]):
+ case _IOR('v', BASE_VIDIOCPRIVATE+1, char [256]):
+ case _IOR('v' , BASE_VIDIOCPRIVATE+2, unsigned int):
+ case _IOW('v' , BASE_VIDIOCPRIVATE+3, char [16]): /* struct bttv_pll_info */
+ case _IOR('v' , BASE_VIDIOCPRIVATE+4, int):
+ case _IOR('v' , BASE_VIDIOCPRIVATE+5, int):
+ case _IOR('v' , BASE_VIDIOCPRIVATE+6, int):
+ case _IOR('v' , BASE_VIDIOCPRIVATE+7, int):
+ ret = native_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+ break;
+ default:
+ v4l_print_ioctl("compat_ioctl32", cmd);
+ }
+ return ret;
+}
+#else
+long v4l_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+#endif
+EXPORT_SYMBOL_GPL(v4l_compat_ioctl32);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index b7ec9bf4508..9f59541155d 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -3807,6 +3807,7 @@ static struct file_operations cpia_fops = {
.read = cpia_read,
.mmap = cpia_mmap,
.ioctl = cpia_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c
index ddf184f95d8..74cff626e04 100644
--- a/drivers/media/video/cpia_pp.c
+++ b/drivers/media/video/cpia_pp.c
@@ -170,16 +170,9 @@ static size_t cpia_read_nibble (struct parport *port,
/* Does the error line indicate end of data? */
if (((i /*& 1*/) == 0) &&
(parport_read_status(port) & PARPORT_STATUS_ERROR)) {
- port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA;
- DBG("%s: No more nibble data (%d bytes)\n",
- port->name, i/2);
-
- /* Go to reverse idle phase. */
- parport_frob_control (port,
- PARPORT_CONTROL_AUTOFD,
- PARPORT_CONTROL_AUTOFD);
- port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
- break;
+ DBG("%s: No more nibble data (%d bytes)\n",
+ port->name, i/2);
+ goto end_of_data;
}
/* Event 7: Set nAutoFd low. */
@@ -227,18 +220,21 @@ static size_t cpia_read_nibble (struct parport *port,
byte = nibble;
}
- i /= 2; /* i is now in bytes */
-
if (i == len) {
/* Read the last nibble without checking data avail. */
- port = port->physport;
- if (parport_read_status (port) & PARPORT_STATUS_ERROR)
- port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA;
+ if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
+ end_of_data:
+ /* Go to reverse idle phase. */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+ port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
+ }
else
- port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
+ port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
}
- return i;
+ return i/2;
}
/* CPiA nonstandard "Nibble Stream" mode (2 nibbles per cycle, instead of 1)
@@ -706,12 +702,11 @@ static int cpia_pp_register(struct parport *port)
return -ENXIO;
}
- cam = kmalloc(sizeof(struct pp_cam_entry), GFP_KERNEL);
+ cam = kzalloc(sizeof(struct pp_cam_entry), GFP_KERNEL);
if (cam == NULL) {
LOG("failed to allocate camera structure\n");
return -ENOMEM;
}
- memset(cam,0,sizeof(struct pp_cam_entry));
pdev = parport_register_device(port, "cpia_pp", NULL, NULL,
NULL, 0, cam);
diff --git a/drivers/media/video/cpia_usb.c b/drivers/media/video/cpia_usb.c
index 1439cb75287..03275c37c5d 100644
--- a/drivers/media/video/cpia_usb.c
+++ b/drivers/media/video/cpia_usb.c
@@ -499,14 +499,12 @@ static int cpia_probe(struct usb_interface *intf,
printk(KERN_INFO "USB CPiA camera found\n");
- ucpia = kmalloc(sizeof(*ucpia), GFP_KERNEL);
+ ucpia = kzalloc(sizeof(*ucpia), GFP_KERNEL);
if (!ucpia) {
printk(KERN_ERR "couldn't kmalloc cpia struct\n");
return -ENOMEM;
}
- memset(ucpia, 0, sizeof(*ucpia));
-
ucpia->dev = udev;
ucpia->iface = interface->desc.bInterfaceNumber;
init_waitqueue_head(&ucpia->wq_stream);
diff --git a/drivers/media/video/cs53l32a.c b/drivers/media/video/cs53l32a.c
index 780b352ec11..8739c64785e 100644
--- a/drivers/media/video/cs53l32a.c
+++ b/drivers/media/video/cs53l32a.c
@@ -27,7 +27,7 @@
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/videodev.h>
-#include <media/audiochip.h>
+#include <media/v4l2-common.h>
MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC");
MODULE_AUTHOR("Martin Vaughan");
@@ -39,20 +39,6 @@ module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging messages\n\t\t\t0=Off (default), 1=On");
-#define cs53l32a_dbg(fmt, arg...) \
- do { \
- if (debug) \
- printk(KERN_INFO "%s debug %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); \
- } while (0)
-
-#define cs53l32a_err(fmt, arg...) do { \
- printk(KERN_ERR "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-#define cs53l32a_info(fmt, arg...) do { \
- printk(KERN_INFO "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-
static unsigned short normal_i2c[] = { 0x22 >> 1, I2C_CLIENT_END };
@@ -73,50 +59,59 @@ static int cs53l32a_read(struct i2c_client *client, u8 reg)
static int cs53l32a_command(struct i2c_client *client, unsigned int cmd,
void *arg)
{
- int *input = arg;
+ struct v4l2_audio *input = arg;
+ struct v4l2_control *ctrl = arg;
switch (cmd) {
- case AUDC_SET_INPUT:
- switch (*input) {
- case AUDIO_TUNER:
- cs53l32a_write(client, 0x01, 0x01);
- break;
- case AUDIO_EXTERN:
- cs53l32a_write(client, 0x01, 0x21);
- break;
- case AUDIO_MUTE:
- cs53l32a_write(client, 0x03, 0xF0);
- break;
- case AUDIO_UNMUTE:
- cs53l32a_write(client, 0x03, 0x30);
- break;
- default:
- cs53l32a_err("Invalid input %d.\n", *input);
+ case VIDIOC_S_AUDIO:
+ /* There are 2 physical inputs, but the second input can be
+ placed in two modes, the first mode bypasses the PGA (gain),
+ the second goes through the PGA. Hence there are three
+ possible inputs to choose from. */
+ if (input->index > 2) {
+ v4l_err(client, "Invalid input %d.\n", input->index);
return -EINVAL;
}
+ cs53l32a_write(client, 0x01, 0x01 + (input->index << 4));
+ break;
+
+ case VIDIOC_G_AUDIO:
+ memset(input, 0, sizeof(*input));
+ input->index = (cs53l32a_read(client, 0x01) >> 4) & 3;
+ break;
+
+ case VIDIOC_G_CTRL:
+ if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
+ ctrl->value = (cs53l32a_read(client, 0x03) & 0xc0) != 0;
+ break;
+ }
+ if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
+ return -EINVAL;
+ ctrl->value = (s8)cs53l32a_read(client, 0x04);
break;
case VIDIOC_S_CTRL:
- {
- struct v4l2_control *ctrl = arg;
-
- if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
- return -EINVAL;
- if (ctrl->value > 12 || ctrl->value < -90)
- return -EINVAL;
- cs53l32a_write(client, 0x04, (u8) ctrl->value);
- cs53l32a_write(client, 0x05, (u8) ctrl->value);
+ if (ctrl->id == V4L2_CID_AUDIO_MUTE) {
+ cs53l32a_write(client, 0x03, ctrl->value ? 0xf0 : 0x30);
break;
}
+ if (ctrl->id != V4L2_CID_AUDIO_VOLUME)
+ return -EINVAL;
+ if (ctrl->value > 12 || ctrl->value < -96)
+ return -EINVAL;
+ cs53l32a_write(client, 0x04, (u8) ctrl->value);
+ cs53l32a_write(client, 0x05, (u8) ctrl->value);
+ break;
case VIDIOC_LOG_STATUS:
{
u8 v = cs53l32a_read(client, 0x01);
u8 m = cs53l32a_read(client, 0x03);
+ s8 vol = cs53l32a_read(client, 0x04);
- cs53l32a_info("Input: %s%s\n",
- v == 0x21 ? "external line in" : "tuner",
+ v4l_info(client, "Input: %d%s\n", (v >> 4) & 3,
(m & 0xC0) ? " (muted)" : "");
+ v4l_info(client, "Volume: %d dB\n", vol);
break;
}
@@ -146,23 +141,21 @@ static int cs53l32a_attach(struct i2c_adapter *adapter, int address, int kind)
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver;
- client->flags = I2C_CLIENT_ALLOW_USE;
snprintf(client->name, sizeof(client->name) - 1, "cs53l32a");
- cs53l32a_info("chip found @ 0x%x (%s)\n", address << 1, adapter->name);
+ v4l_info(client, "chip found @ 0x%x (%s)\n", address << 1, adapter->name);
for (i = 1; i <= 7; i++) {
u8 v = cs53l32a_read(client, i);
- cs53l32a_dbg("Read Reg %d %02x\n", i, v);
+ v4l_dbg(1, debug, client, "Read Reg %d %02x\n", i, v);
}
/* Set cs53l32a internal register for Adaptec 2010/2410 setup */
@@ -180,7 +173,7 @@ static int cs53l32a_attach(struct i2c_adapter *adapter, int address, int kind)
for (i = 1; i <= 7; i++) {
u8 v = cs53l32a_read(client, i);
- cs53l32a_dbg("Read Reg %d %02x\n", i, v);
+ v4l_dbg(1, debug, client, "Read Reg %d %02x\n", i, v);
}
i2c_attach_client(client);
@@ -190,11 +183,7 @@ static int cs53l32a_attach(struct i2c_adapter *adapter, int address, int kind)
static int cs53l32a_probe(struct i2c_adapter *adapter)
{
-#ifdef I2C_CLASS_TV_ANALOG
if (adapter->class & I2C_CLASS_TV_ANALOG)
-#else
- if (adapter->id == I2C_HW_B_BT848)
-#endif
return i2c_probe(adapter, &addr_data, cs53l32a_attach);
return 0;
}
@@ -216,13 +205,13 @@ static int cs53l32a_detach(struct i2c_client *client)
/* i2c implementation */
static struct i2c_driver i2c_driver = {
- .name = "cs53l32a",
+ .driver = {
+ .name = "cs53l32a",
+ },
.id = I2C_DRIVERID_CS53L32A,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = cs53l32a_probe,
.detach_client = cs53l32a_detach,
.command = cs53l32a_command,
- .owner = THIS_MODULE,
};
diff --git a/drivers/media/video/cx25840/cx25840-audio.c b/drivers/media/video/cx25840/cx25840-audio.c
index 740908f8027..cb9a7981e40 100644
--- a/drivers/media/video/cx25840/cx25840-audio.c
+++ b/drivers/media/video/cx25840/cx25840-audio.c
@@ -23,11 +23,13 @@
#include "cx25840.h"
-inline static int set_audclk_freq(struct i2c_client *client,
- enum v4l2_audio_clock_freq freq)
+static int set_audclk_freq(struct i2c_client *client, u32 freq)
{
struct cx25840_state *state = i2c_get_clientdata(client);
+ if (freq != 32000 && freq != 44100 && freq != 48000)
+ return -EINVAL;
+
/* assert soft reset */
cx25840_and_or(client, 0x810, ~0x1, 0x01);
@@ -35,10 +37,9 @@ inline static int set_audclk_freq(struct i2c_client *client,
/* SA_MCLK_SEL=1, SA_MCLK_DIV=0x10 */
cx25840_write(client, 0x127, 0x50);
- switch (state->audio_input) {
- case AUDIO_TUNER:
+ if (state->aud_input != CX25840_AUDIO_SERIAL) {
switch (freq) {
- case V4L2_AUDCLK_32_KHZ:
+ case 32000:
/* VID_PLL and AUX_PLL */
cx25840_write4(client, 0x108, 0x0f040610);
@@ -51,7 +52,7 @@ inline static int set_audclk_freq(struct i2c_client *client,
cx25840_write4(client, 0x90c, 0x7ff70108);
break;
- case V4L2_AUDCLK_441_KHZ:
+ case 44100:
/* VID_PLL and AUX_PLL */
cx25840_write4(client, 0x108, 0x0f040910);
@@ -64,7 +65,7 @@ inline static int set_audclk_freq(struct i2c_client *client,
cx25840_write4(client, 0x90c, 0x596d0108);
break;
- case V4L2_AUDCLK_48_KHZ:
+ case 48000:
/* VID_PLL and AUX_PLL */
cx25840_write4(client, 0x108, 0x0f040a10);
@@ -77,14 +78,9 @@ inline static int set_audclk_freq(struct i2c_client *client,
cx25840_write4(client, 0x90c, 0xaa4f0108);
break;
}
- break;
-
- case AUDIO_EXTERN_1:
- case AUDIO_EXTERN_2:
- case AUDIO_INTERN:
- case AUDIO_RADIO:
+ } else {
switch (freq) {
- case V4L2_AUDCLK_32_KHZ:
+ case 32000:
/* VID_PLL and AUX_PLL */
cx25840_write4(client, 0x108, 0x0f04081e);
@@ -103,7 +99,7 @@ inline static int set_audclk_freq(struct i2c_client *client,
cx25840_write(client, 0x127, 0x54);
break;
- case V4L2_AUDCLK_441_KHZ:
+ case 44100:
/* VID_PLL and AUX_PLL */
cx25840_write4(client, 0x108, 0x0f040918);
@@ -119,7 +115,7 @@ inline static int set_audclk_freq(struct i2c_client *client,
cx25840_write4(client, 0x90c, 0x85730108);
break;
- case V4L2_AUDCLK_48_KHZ:
+ case 48000:
/* VID_PLL and AUX_PLL */
cx25840_write4(client, 0x108, 0x0f040a18);
@@ -135,7 +131,6 @@ inline static int set_audclk_freq(struct i2c_client *client,
cx25840_write4(client, 0x90c, 0x55550108);
break;
}
- break;
}
/* deassert soft reset */
@@ -146,51 +141,36 @@ inline static int set_audclk_freq(struct i2c_client *client,
return 0;
}
-static int set_input(struct i2c_client *client, int audio_input)
+void cx25840_audio_set_path(struct i2c_client *client)
{
struct cx25840_state *state = i2c_get_clientdata(client);
- cx25840_dbg("set audio input (%d)\n", audio_input);
-
/* stop microcontroller */
cx25840_and_or(client, 0x803, ~0x10, 0);
/* Mute everything to prevent the PFFT! */
cx25840_write(client, 0x8d3, 0x1f);
- switch (audio_input) {
- case AUDIO_TUNER:
- /* Set Path1 to Analog Demod Main Channel */
- cx25840_write4(client, 0x8d0, 0x7038061f);
-
- /* When the microcontroller detects the
- * audio format, it will unmute the lines */
- cx25840_and_or(client, 0x803, ~0x10, 0x10);
- break;
-
- case AUDIO_EXTERN_1:
- case AUDIO_EXTERN_2:
- case AUDIO_INTERN:
- case AUDIO_RADIO:
+ if (state->aud_input == CX25840_AUDIO_SERIAL) {
/* Set Path1 to Serial Audio Input */
cx25840_write4(client, 0x8d0, 0x12100101);
/* The microcontroller should not be started for the
* non-tuner inputs: autodetection is specific for
* TV audio. */
- break;
+ } else {
+ /* Set Path1 to Analog Demod Main Channel */
+ cx25840_write4(client, 0x8d0, 0x7038061f);
- default:
- cx25840_dbg("Invalid audio input selection %d\n", audio_input);
- return -EINVAL;
+ /* When the microcontroller detects the
+ * audio format, it will unmute the lines */
+ cx25840_and_or(client, 0x803, ~0x10, 0x10);
}
- state->audio_input = audio_input;
-
- return set_audclk_freq(client, state->audclk_freq);
+ set_audclk_freq(client, state->audclk_freq);
}
-inline static int get_volume(struct i2c_client *client)
+static int get_volume(struct i2c_client *client)
{
/* Volume runs +18dB to -96dB in 1/2dB steps
* change to fit the msp3400 -114dB to +12dB range */
@@ -201,7 +181,7 @@ inline static int get_volume(struct i2c_client *client)
return vol << 9;
}
-inline static void set_volume(struct i2c_client *client, int volume)
+static void set_volume(struct i2c_client *client, int volume)
{
/* First convert the volume to msp3400 values (0-127) */
int vol = volume >> 9;
@@ -218,7 +198,7 @@ inline static void set_volume(struct i2c_client *client, int volume)
cx25840_write(client, 0x8d4, 228 - (vol * 2));
}
-inline static int get_bass(struct i2c_client *client)
+static int get_bass(struct i2c_client *client)
{
/* bass is 49 steps +12dB to -12dB */
@@ -228,13 +208,13 @@ inline static int get_bass(struct i2c_client *client)
return bass;
}
-inline static void set_bass(struct i2c_client *client, int bass)
+static void set_bass(struct i2c_client *client, int bass)
{
/* PATH1_EQ_BASS_VOL */
cx25840_and_or(client, 0x8d9, ~0x3f, 48 - (bass * 48 / 0xffff));
}
-inline static int get_treble(struct i2c_client *client)
+static int get_treble(struct i2c_client *client)
{
/* treble is 49 steps +12dB to -12dB */
@@ -244,13 +224,13 @@ inline static int get_treble(struct i2c_client *client)
return treble;
}
-inline static void set_treble(struct i2c_client *client, int treble)
+static void set_treble(struct i2c_client *client, int treble)
{
/* PATH1_EQ_TREBLE_VOL */
cx25840_and_or(client, 0x8db, ~0x3f, 48 - (treble * 48 / 0xffff));
}
-inline static int get_balance(struct i2c_client *client)
+static int get_balance(struct i2c_client *client)
{
/* balance is 7 bit, 0 to -96dB */
@@ -264,7 +244,7 @@ inline static int get_balance(struct i2c_client *client)
return balance << 8;
}
-inline static void set_balance(struct i2c_client *client, int balance)
+static void set_balance(struct i2c_client *client, int balance)
{
int bal = balance >> 8;
if (bal > 0x80) {
@@ -280,17 +260,17 @@ inline static void set_balance(struct i2c_client *client, int balance)
}
}
-inline static int get_mute(struct i2c_client *client)
+static int get_mute(struct i2c_client *client)
{
/* check SRC1_MUTE_EN */
return cx25840_read(client, 0x8d3) & 0x2 ? 1 : 0;
}
-inline static void set_mute(struct i2c_client *client, int mute)
+static void set_mute(struct i2c_client *client, int mute)
{
struct cx25840_state *state = i2c_get_clientdata(client);
- if (state->audio_input == AUDIO_TUNER) {
+ if (state->aud_input != CX25840_AUDIO_SERIAL) {
/* Must turn off microcontroller in order to mute sound.
* Not sure if this is the best method, but it does work.
* If the microcontroller is running, then it will undo any
@@ -314,10 +294,9 @@ int cx25840_audio(struct i2c_client *client, unsigned int cmd, void *arg)
struct v4l2_control *ctrl = arg;
switch (cmd) {
- case AUDC_SET_INPUT:
- return set_input(client, *(int *)arg);
case VIDIOC_INT_AUDIO_CLOCK_FREQ:
- return set_audclk_freq(client, *(enum v4l2_audio_clock_freq *)arg);
+ return set_audclk_freq(client, *(u32 *)arg);
+
case VIDIOC_G_CTRL:
switch (ctrl->id) {
case V4L2_CID_AUDIO_VOLUME:
@@ -339,6 +318,7 @@ int cx25840_audio(struct i2c_client *client, unsigned int cmd, void *arg)
return -EINVAL;
}
break;
+
case VIDIOC_S_CTRL:
switch (ctrl->id) {
case V4L2_CID_AUDIO_VOLUME:
@@ -360,6 +340,7 @@ int cx25840_audio(struct i2c_client *client, unsigned int cmd, void *arg)
return -EINVAL;
}
break;
+
default:
return -EINVAL;
}
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 5b93723a176..1d75a42629d 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -45,9 +45,9 @@ static unsigned short normal_i2c[] = { 0x88 >> 1, I2C_CLIENT_END };
int cx25840_debug = 0;
-module_param(cx25840_debug, bool, 0644);
+module_param_named(debug,cx25840_debug, int, 0644);
-MODULE_PARM_DESC(cx25840_debug, "Debugging messages [0=Off (default) 1=On]");
+MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]");
I2C_CLIENT_INSMOD;
@@ -115,13 +115,13 @@ int cx25840_and_or(struct i2c_client *client, u16 addr, u8 and_mask,
/* ----------------------------------------------------------------------- */
-static int set_input(struct i2c_client *, enum cx25840_input);
-static void input_change(struct i2c_client *);
+static int set_input(struct i2c_client *client, enum cx25840_video_input vid_input,
+ enum cx25840_audio_input aud_input);
static void log_status(struct i2c_client *client);
/* ----------------------------------------------------------------------- */
-static inline void init_dll1(struct i2c_client *client)
+static void init_dll1(struct i2c_client *client)
{
/* This is the Hauppauge sequence used to
* initialize the Delay Lock Loop 1 (ADC DLL). */
@@ -135,7 +135,7 @@ static inline void init_dll1(struct i2c_client *client)
cx25840_write(client, 0x15b, 0x10);
}
-static inline void init_dll2(struct i2c_client *client)
+static void init_dll2(struct i2c_client *client)
{
/* This is the Hauppauge sequence used to
* initialize the Delay Lock Loop 2 (ADC DLL). */
@@ -195,10 +195,8 @@ static void cx25840_initialize(struct i2c_client *client, int loadfw)
/* AC97 shift */
cx25840_write(client, 0x8cf, 0x0f);
- /* (re)set video input */
- set_input(client, state->input);
- /* (re)set audio input */
- cx25840_audio(client, AUDC_SET_INPUT, &state->audio_input);
+ /* (re)set input */
+ set_input(client, state->vid_input, state->aud_input);
/* start microcontroller */
cx25840_and_or(client, 0x803, ~0x10, 0x10);
@@ -223,7 +221,7 @@ static void input_change(struct i2c_client *client)
cx25840_write(client, 0x80b, 0x10);
} else if (std & V4L2_STD_NTSC) {
/* NTSC */
- if (state->cardtype == CARDTYPE_PVR150_WORKAROUND) {
+ if (state->pvr150_workaround) {
/* Certain Hauppauge PVR150 models have a hardware bug
that causes audio to drop out. For these models the
audio standard must be set explicitly.
@@ -259,72 +257,68 @@ static void input_change(struct i2c_client *client)
}
}
-static int set_input(struct i2c_client *client, enum cx25840_input input)
+static int set_input(struct i2c_client *client, enum cx25840_video_input vid_input,
+ enum cx25840_audio_input aud_input)
{
struct cx25840_state *state = i2c_get_clientdata(client);
+ u8 is_composite = (vid_input >= CX25840_COMPOSITE1 &&
+ vid_input <= CX25840_COMPOSITE8);
+ u8 reg;
- cx25840_dbg("decoder set input (%d)\n", input);
+ v4l_dbg(1, cx25840_debug, client, "decoder set video input %d, audio input %d\n",
+ vid_input, aud_input);
- switch (input) {
- case CX25840_TUNER:
- cx25840_dbg("now setting Tuner input\n");
-
- if (state->cardtype == CARDTYPE_PVR150 ||
- state->cardtype == CARDTYPE_PVR150_WORKAROUND) {
- /* CH_SEL_ADC2=1 */
- cx25840_and_or(client, 0x102, ~0x2, 0x02);
- }
-
- /* Video Input Control */
- if (state->cardtype == CARDTYPE_PG600) {
- cx25840_write(client, 0x103, 0x11);
- } else {
- cx25840_write(client, 0x103, 0x46);
- }
-
- /* INPUT_MODE=0 */
- cx25840_and_or(client, 0x401, ~0x6, 0x00);
- break;
-
- case CX25840_COMPOSITE0:
- case CX25840_COMPOSITE1:
- cx25840_dbg("now setting Composite input\n");
+ if (is_composite) {
+ reg = 0xf0 + (vid_input - CX25840_COMPOSITE1);
+ } else {
+ int luma = vid_input & 0xf0;
+ int chroma = vid_input & 0xf00;
- /* Video Input Control */
- if (state->cardtype == CARDTYPE_PG600) {
- cx25840_write(client, 0x103, 0x00);
- } else {
- cx25840_write(client, 0x103, 0x02);
+ if ((vid_input & ~0xff0) ||
+ luma < CX25840_SVIDEO_LUMA1 || luma > CX25840_SVIDEO_LUMA4 ||
+ chroma < CX25840_SVIDEO_CHROMA4 || chroma > CX25840_SVIDEO_CHROMA8) {
+ v4l_err(client, "0x%04x is not a valid video input!\n", vid_input);
+ return -EINVAL;
}
-
- /* INPUT_MODE=0 */
- cx25840_and_or(client, 0x401, ~0x6, 0x00);
- break;
-
- case CX25840_SVIDEO0:
- case CX25840_SVIDEO1:
- cx25840_dbg("now setting S-Video input\n");
-
- /* CH_SEL_ADC2=0 */
- cx25840_and_or(client, 0x102, ~0x2, 0x00);
-
- /* Video Input Control */
- if (state->cardtype == CARDTYPE_PG600) {
- cx25840_write(client, 0x103, 0x02);
+ reg = 0xf0 + ((luma - CX25840_SVIDEO_LUMA1) >> 4);
+ if (chroma >= CX25840_SVIDEO_CHROMA7) {
+ reg &= 0x3f;
+ reg |= (chroma - CX25840_SVIDEO_CHROMA7) >> 2;
} else {
- cx25840_write(client, 0x103, 0x10);
+ reg &= 0xcf;
+ reg |= (chroma - CX25840_SVIDEO_CHROMA4) >> 4;
}
+ }
- /* INPUT_MODE=1 */
- cx25840_and_or(client, 0x401, ~0x6, 0x02);
+ switch (aud_input) {
+ case CX25840_AUDIO_SERIAL:
+ /* do nothing, use serial audio input */
break;
+ case CX25840_AUDIO4: reg &= ~0x30; break;
+ case CX25840_AUDIO5: reg &= ~0x30; reg |= 0x10; break;
+ case CX25840_AUDIO6: reg &= ~0x30; reg |= 0x20; break;
+ case CX25840_AUDIO7: reg &= ~0xc0; break;
+ case CX25840_AUDIO8: reg &= ~0xc0; reg |= 0x40; break;
default:
- cx25840_err("%d is not a valid input!\n", input);
+ v4l_err(client, "0x%04x is not a valid audio input!\n", aud_input);
return -EINVAL;
}
- state->input = input;
+ cx25840_write(client, 0x103, reg);
+ /* Set INPUT_MODE to Composite (0) or S-Video (1) */
+ cx25840_and_or(client, 0x401, ~0x6, is_composite ? 0 : 0x02);
+ /* Set CH_SEL_ADC2 to 1 if input comes from CH3 */
+ cx25840_and_or(client, 0x102, ~0x2, (reg & 0x80) == 0 ? 2 : 0);
+ /* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2 and CH3 */
+ if ((reg & 0xc0) != 0xc0 && (reg & 0x30) != 0x30)
+ cx25840_and_or(client, 0x102, ~0x4, 4);
+ else
+ cx25840_and_or(client, 0x102, ~0x4, 0);
+
+ state->vid_input = vid_input;
+ state->aud_input = aud_input;
+ cx25840_audio_set_path(client);
input_change(client);
return 0;
}
@@ -395,23 +389,14 @@ static int set_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl)
struct cx25840_state *state = i2c_get_clientdata(client);
switch (ctrl->id) {
- case CX25840_CID_CARDTYPE:
- switch (ctrl->value) {
- case CARDTYPE_PVR150:
- case CARDTYPE_PVR150_WORKAROUND:
- case CARDTYPE_PG600:
- state->cardtype = ctrl->value;
- break;
- default:
- return -ERANGE;
- }
-
- set_input(client, state->input);
+ case CX25840_CID_ENABLE_PVR150_WORKAROUND:
+ state->pvr150_workaround = ctrl->value;
+ set_input(client, state->vid_input, state->aud_input);
break;
case V4L2_CID_BRIGHTNESS:
if (ctrl->value < 0 || ctrl->value > 255) {
- cx25840_err("invalid brightness setting %d\n",
+ v4l_err(client, "invalid brightness setting %d\n",
ctrl->value);
return -ERANGE;
}
@@ -421,7 +406,7 @@ static int set_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl)
case V4L2_CID_CONTRAST:
if (ctrl->value < 0 || ctrl->value > 127) {
- cx25840_err("invalid contrast setting %d\n",
+ v4l_err(client, "invalid contrast setting %d\n",
ctrl->value);
return -ERANGE;
}
@@ -431,7 +416,7 @@ static int set_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl)
case V4L2_CID_SATURATION:
if (ctrl->value < 0 || ctrl->value > 127) {
- cx25840_err("invalid saturation setting %d\n",
+ v4l_err(client, "invalid saturation setting %d\n",
ctrl->value);
return -ERANGE;
}
@@ -442,7 +427,7 @@ static int set_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl)
case V4L2_CID_HUE:
if (ctrl->value < -127 || ctrl->value > 127) {
- cx25840_err("invalid hue setting %d\n", ctrl->value);
+ v4l_err(client, "invalid hue setting %d\n", ctrl->value);
return -ERANGE;
}
@@ -455,6 +440,9 @@ static int set_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl)
case V4L2_CID_AUDIO_BALANCE:
case V4L2_CID_AUDIO_MUTE:
return cx25840_audio(client, VIDIOC_S_CTRL, ctrl);
+
+ default:
+ return -EINVAL;
}
return 0;
@@ -465,11 +453,11 @@ static int get_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl)
struct cx25840_state *state = i2c_get_clientdata(client);
switch (ctrl->id) {
- case CX25840_CID_CARDTYPE:
- ctrl->value = state->cardtype;
+ case CX25840_CID_ENABLE_PVR150_WORKAROUND:
+ ctrl->value = state->pvr150_workaround;
break;
case V4L2_CID_BRIGHTNESS:
- ctrl->value = cx25840_read(client, 0x414) + 128;
+ ctrl->value = (s8)cx25840_read(client, 0x414) + 128;
break;
case V4L2_CID_CONTRAST:
ctrl->value = cx25840_read(client, 0x415) >> 1;
@@ -478,7 +466,7 @@ static int get_v4lctrl(struct i2c_client *client, struct v4l2_control *ctrl)
ctrl->value = cx25840_read(client, 0x420) >> 1;
break;
case V4L2_CID_HUE:
- ctrl->value = cx25840_read(client, 0x422);
+ ctrl->value = (s8)cx25840_read(client, 0x422);
break;
case V4L2_CID_AUDIO_VOLUME:
case V4L2_CID_AUDIO_BASS:
@@ -527,7 +515,7 @@ static int set_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt)
if ((pix->width * 16 < Hsrc) || (Hsrc < pix->width) ||
(Vlines * 8 < Vsrc) || (Vsrc < Vlines)) {
- cx25840_err("%dx%d is not a valid size!\n",
+ v4l_err(client, "%dx%d is not a valid size!\n",
pix->width, pix->height);
return -ERANGE;
}
@@ -545,7 +533,7 @@ static int set_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt)
else
filter = 3;
- cx25840_dbg("decoder set size %dx%d -> scale %ux%u\n",
+ v4l_dbg(1, cx25840_debug, client, "decoder set size %dx%d -> scale %ux%u\n",
pix->width, pix->height, HSC, VSC);
/* HSCALE=HSC */
@@ -574,17 +562,98 @@ static int set_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt)
/* ----------------------------------------------------------------------- */
+static struct v4l2_queryctrl cx25840_qctrl[] = {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 128,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 64,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 64,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = -128,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 0,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_AUDIO_VOLUME,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Volume",
+ .minimum = 0,
+ .maximum = 65535,
+ .step = 65535/100,
+ .default_value = 58880,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_AUDIO_BALANCE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Balance",
+ .minimum = 0,
+ .maximum = 65535,
+ .step = 65535/100,
+ .default_value = 32768,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_AUDIO_MUTE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mute",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_AUDIO_BASS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Bass",
+ .minimum = 0,
+ .maximum = 65535,
+ .step = 65535/100,
+ .default_value = 32768,
+ }, {
+ .id = V4L2_CID_AUDIO_TREBLE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Treble",
+ .minimum = 0,
+ .maximum = 65535,
+ .step = 65535/100,
+ .default_value = 32768,
+ },
+};
+
+/* ----------------------------------------------------------------------- */
+
static int cx25840_command(struct i2c_client *client, unsigned int cmd,
void *arg)
{
struct cx25840_state *state = i2c_get_clientdata(client);
struct v4l2_tuner *vt = arg;
- int result = 0;
switch (cmd) {
- case 0:
- break;
-
#ifdef CONFIG_VIDEO_ADV_DEBUG
/* ioctls to allow direct access to the
* cx25840 registers for testing */
@@ -615,18 +684,16 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
return cx25840_vbi(client, cmd, arg);
case VIDIOC_INT_AUDIO_CLOCK_FREQ:
- case AUDC_SET_INPUT:
- result = cx25840_audio(client, cmd, arg);
- break;
+ return cx25840_audio(client, cmd, arg);
case VIDIOC_STREAMON:
- cx25840_dbg("enable output\n");
+ v4l_dbg(1, cx25840_debug, client, "enable output\n");
cx25840_write(client, 0x115, 0x8c);
cx25840_write(client, 0x116, 0x07);
break;
case VIDIOC_STREAMOFF:
- cx25840_dbg("disable output\n");
+ v4l_dbg(1, cx25840_debug, client, "disable output\n");
cx25840_write(client, 0x115, 0x00);
cx25840_write(client, 0x116, 0x00);
break;
@@ -636,28 +703,58 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
break;
case VIDIOC_G_CTRL:
- result = get_v4lctrl(client, (struct v4l2_control *)arg);
- break;
+ return get_v4lctrl(client, (struct v4l2_control *)arg);
case VIDIOC_S_CTRL:
- result = set_v4lctrl(client, (struct v4l2_control *)arg);
- break;
+ return set_v4lctrl(client, (struct v4l2_control *)arg);
+
+ case VIDIOC_QUERYCTRL:
+ {
+ struct v4l2_queryctrl *qc = arg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cx25840_qctrl); i++)
+ if (qc->id && qc->id == cx25840_qctrl[i].id) {
+ memcpy(qc, &cx25840_qctrl[i], sizeof(*qc));
+ return 0;
+ }
+ return -EINVAL;
+ }
case VIDIOC_G_STD:
*(v4l2_std_id *)arg = cx25840_get_v4lstd(client);
break;
case VIDIOC_S_STD:
- result = set_v4lstd(client, *(v4l2_std_id *)arg);
+ state->radio = 0;
+ return set_v4lstd(client, *(v4l2_std_id *)arg);
+
+ case AUDC_SET_RADIO:
+ state->radio = 1;
break;
case VIDIOC_G_INPUT:
- *(int *)arg = state->input;
+ *(int *)arg = state->vid_input;
break;
case VIDIOC_S_INPUT:
- result = set_input(client, *(int *)arg);
+ return set_input(client, *(enum cx25840_video_input *)arg, state->aud_input);
+
+ case VIDIOC_S_AUDIO:
+ {
+ struct v4l2_audio *input = arg;
+
+ return set_input(client, state->vid_input, input->index);
+ }
+
+ case VIDIOC_G_AUDIO:
+ {
+ struct v4l2_audio *input = arg;
+
+ memset(input, 0, sizeof(*input));
+ input->index = state->aud_input;
break;
+ }
case VIDIOC_S_FREQUENCY:
input_change(client);
@@ -670,6 +767,9 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
u8 vpres = cx25840_read(client, 0x80a) & 0x10;
int val = 0;
+ if (state->radio)
+ break;
+
vt->capability |=
V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 |
V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
@@ -724,12 +824,10 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
break;
case VIDIOC_G_FMT:
- result = get_v4lfmt(client, (struct v4l2_format *)arg);
- break;
+ return get_v4lfmt(client, (struct v4l2_format *)arg);
case VIDIOC_S_FMT:
- result = set_v4lfmt(client, (struct v4l2_format *)arg);
- break;
+ return set_v4lfmt(client, (struct v4l2_format *)arg);
case VIDIOC_INT_RESET:
cx25840_initialize(client, 0);
@@ -741,11 +839,10 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
break;
default:
- cx25840_err("invalid ioctl %x\n", cmd);
return -EINVAL;
}
- return result;
+ return 0;
}
/* ----------------------------------------------------------------------- */
@@ -765,18 +862,16 @@ static int cx25840_detect_client(struct i2c_adapter *adapter, int address,
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_cx25840;
- client->flags = I2C_CLIENT_ALLOW_USE;
snprintf(client->name, sizeof(client->name) - 1, "cx25840");
- cx25840_dbg("detecting cx25840 client on address 0x%x\n", address << 1);
+ v4l_dbg(1, cx25840_debug, client, "detecting cx25840 client on address 0x%x\n", address << 1);
device_id = cx25840_read(client, 0x101) << 8;
device_id |= cx25840_read(client, 0x100);
@@ -784,12 +879,12 @@ static int cx25840_detect_client(struct i2c_adapter *adapter, int address,
/* The high byte of the device ID should be
* 0x84 if chip is present */
if ((device_id & 0xff00) != 0x8400) {
- cx25840_dbg("cx25840 not found\n");
+ v4l_dbg(1, cx25840_debug, client, "cx25840 not found\n");
kfree(client);
return 0;
}
- cx25840_info("cx25%3x-2%x found @ 0x%x (%s)\n",
+ v4l_info(client, "cx25%3x-2%x found @ 0x%x (%s)\n",
(device_id & 0xfff0) >> 4,
(device_id & 0x0f) < 3 ? (device_id & 0x0f) + 1 : 3,
address << 1, adapter->name);
@@ -802,10 +897,10 @@ static int cx25840_detect_client(struct i2c_adapter *adapter, int address,
i2c_set_clientdata(client, state);
memset(state, 0, sizeof(struct cx25840_state));
- state->input = CX25840_TUNER;
- state->audclk_freq = V4L2_AUDCLK_48_KHZ;
- state->audio_input = AUDIO_TUNER;
- state->cardtype = CARDTYPE_PVR150;
+ state->vid_input = CX25840_COMPOSITE7;
+ state->aud_input = CX25840_AUDIO8;
+ state->audclk_freq = 48000;
+ state->pvr150_workaround = 0;
cx25840_initialize(client, 1);
@@ -816,11 +911,7 @@ static int cx25840_detect_client(struct i2c_adapter *adapter, int address,
static int cx25840_attach_adapter(struct i2c_adapter *adapter)
{
-#ifdef I2C_CLASS_TV_ANALOG
if (adapter->class & I2C_CLASS_TV_ANALOG)
-#else
- if (adapter->id == I2C_HW_B_BT848)
-#endif
return i2c_probe(adapter, &addr_data, &cx25840_detect_client);
return 0;
}
@@ -844,15 +935,13 @@ static int cx25840_detach_client(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver i2c_driver_cx25840 = {
- .name = "cx25840",
-
+ .driver = {
+ .name = "cx25840",
+ },
.id = I2C_DRIVERID_CX25840,
- .flags = I2C_DF_NOTIFY,
-
.attach_adapter = cx25840_attach_adapter,
.detach_client = cx25840_detach_client,
.command = cx25840_command,
- .owner = THIS_MODULE,
};
@@ -893,11 +982,13 @@ static void log_status(struct i2c_client *client)
u8 pref_mode = cx25840_read(client, 0x809);
u8 afc0 = cx25840_read(client, 0x80b);
u8 mute_ctl = cx25840_read(client, 0x8d3);
+ int vid_input = state->vid_input;
+ int aud_input = state->aud_input;
char *p;
- cx25840_info("Video signal: %spresent\n",
+ v4l_info(client, "Video signal: %spresent\n",
(microctrl_vidfmt & 0x10) ? "" : "not ");
- cx25840_info("Detected format: %s\n",
+ v4l_info(client, "Detected format: %s\n",
fmt_strs[gen_stat1 & 0xf]);
switch (mod_det_stat0) {
@@ -912,7 +1003,7 @@ static void log_status(struct i2c_client *client)
case 0xfe: p = "forced mode"; break;
default: p = "not defined";
}
- cx25840_info("Detected audio mode: %s\n", p);
+ v4l_info(client, "Detected audio mode: %s\n", p);
switch (mod_det_stat1) {
case 0x00: p = "not defined"; break;
@@ -938,10 +1029,10 @@ static void log_status(struct i2c_client *client)
case 0xff: p = "no detected audio standard"; break;
default: p = "not defined";
}
- cx25840_info("Detected audio standard: %s\n", p);
- cx25840_info("Audio muted: %s\n",
+ v4l_info(client, "Detected audio standard: %s\n", p);
+ v4l_info(client, "Audio muted: %s\n",
(mute_ctl & 0x2) ? "yes" : "no");
- cx25840_info("Audio microcontroller: %s\n",
+ v4l_info(client, "Audio microcontroller: %s\n",
(download_ctl & 0x10) ? "running" : "stopped");
switch (audio_config >> 4) {
@@ -963,7 +1054,7 @@ static void log_status(struct i2c_client *client)
case 0x0f: p = "automatic detection"; break;
default: p = "undefined";
}
- cx25840_info("Configured audio standard: %s\n", p);
+ v4l_info(client, "Configured audio standard: %s\n", p);
if ((audio_config >> 4) < 0xF) {
switch (audio_config & 0xF) {
@@ -980,7 +1071,7 @@ static void log_status(struct i2c_client *client)
case 0x0a: p = "SAP"; break;
default: p = "undefined";
}
- cx25840_info("Configured audio mode: %s\n", p);
+ v4l_info(client, "Configured audio mode: %s\n", p);
} else {
switch (audio_config & 0xF) {
case 0x00: p = "BG"; break;
@@ -996,30 +1087,27 @@ static void log_status(struct i2c_client *client)
case 0x0f: p = "automatic standard and mode detection"; break;
default: p = "undefined";
}
- cx25840_info("Configured audio system: %s\n", p);
+ v4l_info(client, "Configured audio system: %s\n", p);
}
- cx25840_info("Specified standard: %s\n",
+ v4l_info(client, "Specified standard: %s\n",
vidfmt_sel ? fmt_strs[vidfmt_sel] : "automatic detection");
- switch (state->input) {
- case CX25840_COMPOSITE0: p = "Composite 0"; break;
- case CX25840_COMPOSITE1: p = "Composite 1"; break;
- case CX25840_SVIDEO0: p = "S-Video 0"; break;
- case CX25840_SVIDEO1: p = "S-Video 1"; break;
- case CX25840_TUNER: p = "Tuner"; break;
+ if (vid_input >= CX25840_COMPOSITE1 &&
+ vid_input <= CX25840_COMPOSITE8) {
+ v4l_info(client, "Specified video input: Composite %d\n",
+ vid_input - CX25840_COMPOSITE1 + 1);
+ } else {
+ v4l_info(client, "Specified video input: S-Video (Luma In%d, Chroma In%d)\n",
+ (vid_input & 0xf0) >> 4, (vid_input & 0xf00) >> 8);
}
- cx25840_info("Specified input: %s\n", p);
- cx25840_info("Specified audio input: %s\n",
- state->audio_input == 0 ? "Tuner" : "External");
-
- switch (state->audclk_freq) {
- case V4L2_AUDCLK_441_KHZ: p = "44.1 kHz"; break;
- case V4L2_AUDCLK_48_KHZ: p = "48 kHz"; break;
- case V4L2_AUDCLK_32_KHZ: p = "32 kHz"; break;
- default: p = "undefined";
+ if (aud_input) {
+ v4l_info(client, "Specified audio input: Tuner (In%d)\n", aud_input);
+ } else {
+ v4l_info(client, "Specified audio input: External\n");
}
- cx25840_info("Specified audioclock freq: %s\n", p);
+
+ v4l_info(client, "Specified audioclock freq: %d Hz\n", state->audclk_freq);
switch (pref_mode & 0xf) {
case 0: p = "mono/language A"; break;
@@ -1032,7 +1120,7 @@ static void log_status(struct i2c_client *client)
case 7: p = "language AB"; break;
default: p = "undefined";
}
- cx25840_info("Preferred audio mode: %s\n", p);
+ v4l_info(client, "Preferred audio mode: %s\n", p);
if ((audio_config & 0xf) == 0xf) {
switch ((afc0 >> 3) & 0x3) {
@@ -1041,7 +1129,7 @@ static void log_status(struct i2c_client *client)
case 2: p = "autodetect"; break;
default: p = "undefined";
}
- cx25840_info("Selected 65 MHz format: %s\n", p);
+ v4l_info(client, "Selected 65 MHz format: %s\n", p);
switch (afc0 & 0x7) {
case 0: p = "chroma"; break;
@@ -1051,6 +1139,6 @@ static void log_status(struct i2c_client *client)
case 4: p = "autodetect"; break;
default: p = "undefined";
}
- cx25840_info("Selected 45 MHz format: %s\n", p);
+ v4l_info(client, "Selected 45 MHz format: %s\n", p);
}
}
diff --git a/drivers/media/video/cx25840/cx25840-firmware.c b/drivers/media/video/cx25840/cx25840-firmware.c
index df9d50a7554..e1a7823d82c 100644
--- a/drivers/media/video/cx25840/cx25840-firmware.c
+++ b/drivers/media/video/cx25840/cx25840-firmware.c
@@ -15,7 +15,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
-
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -38,7 +37,7 @@ module_param(firmware, charp, 0444);
MODULE_PARM_DESC(fastfw, "Load firmware fast [0=100MHz 1=333MHz (default)]");
MODULE_PARM_DESC(firmware, "Firmware image [default: " FWFILE "]");
-static inline void set_i2c_delay(struct i2c_client *client, int delay)
+static void set_i2c_delay(struct i2c_client *client, int delay)
{
struct i2c_algo_bit_data *algod = client->adapter->algo_data;
@@ -52,7 +51,7 @@ static inline void set_i2c_delay(struct i2c_client *client, int delay)
}
}
-static inline void start_fw_load(struct i2c_client *client)
+static void start_fw_load(struct i2c_client *client)
{
/* DL_ADDR_LB=0 DL_ADDR_HB=0 */
cx25840_write(client, 0x800, 0x00);
@@ -66,7 +65,7 @@ static inline void start_fw_load(struct i2c_client *client)
set_i2c_delay(client, 3);
}
-static inline void end_fw_load(struct i2c_client *client)
+static void end_fw_load(struct i2c_client *client)
{
if (fastfw)
set_i2c_delay(client, 10);
@@ -77,38 +76,47 @@ static inline void end_fw_load(struct i2c_client *client)
cx25840_write(client, 0x803, 0x03);
}
-static inline int check_fw_load(struct i2c_client *client, int size)
+static int check_fw_load(struct i2c_client *client, int size)
{
/* DL_ADDR_HB DL_ADDR_LB */
int s = cx25840_read(client, 0x801) << 8;
s |= cx25840_read(client, 0x800);
if (size != s) {
- cx25840_err("firmware %s load failed\n", firmware);
+ v4l_err(client, "firmware %s load failed\n", firmware);
return -EINVAL;
}
- cx25840_info("loaded %s firmware (%d bytes)\n", firmware, size);
+ v4l_info(client, "loaded %s firmware (%d bytes)\n", firmware, size);
return 0;
}
-static inline int fw_write(struct i2c_client *client, u8 * data, int size)
+static int fw_write(struct i2c_client *client, u8 * data, int size)
{
- if (i2c_master_send(client, data, size) < size) {
+ int sent;
+
+ if ((sent = i2c_master_send(client, data, size)) < size) {
if (fastfw) {
- cx25840_err("333MHz i2c firmware load failed\n");
+ v4l_err(client, "333MHz i2c firmware load failed\n");
fastfw = 0;
set_i2c_delay(client, 10);
+ if (sent > 2) {
+ u16 dl_addr = cx25840_read(client, 0x801) << 8;
+ dl_addr |= cx25840_read(client, 0x800);
+ dl_addr -= sent - 2;
+ cx25840_write(client, 0x801, dl_addr >> 8);
+ cx25840_write(client, 0x800, dl_addr & 0xff);
+ }
+
if (i2c_master_send(client, data, size) < size) {
- cx25840_err
- ("100MHz i2c firmware load failed\n");
+ v4l_err(client, "100MHz i2c firmware load failed\n");
return -ENOSYS;
}
} else {
- cx25840_err("firmware load i2c failure\n");
+ v4l_err(client, "firmware load i2c failure\n");
return -ENOSYS;
}
@@ -124,7 +132,7 @@ int cx25840_loadfw(struct i2c_client *client)
int size, send, retval;
if (request_firmware(&fw, firmware, FWDEV(client)) != 0) {
- cx25840_err("unable to open firmware %s\n", firmware);
+ v4l_err(client, "unable to open firmware %s\n", firmware);
return -EINVAL;
}
diff --git a/drivers/media/video/cx25840/cx25840-vbi.c b/drivers/media/video/cx25840/cx25840-vbi.c
index 13ba4e15dde..04d879da7d6 100644
--- a/drivers/media/video/cx25840/cx25840-vbi.c
+++ b/drivers/media/video/cx25840/cx25840-vbi.c
@@ -22,7 +22,7 @@
#include "cx25840.h"
-static inline int odd_parity(u8 c)
+static int odd_parity(u8 c)
{
c ^= (c >> 4);
c ^= (c >> 2);
@@ -31,7 +31,7 @@ static inline int odd_parity(u8 c)
return c & 1;
}
-static inline int decode_vps(u8 * dst, u8 * p)
+static int decode_vps(u8 * dst, u8 * p)
{
static const u8 biphase_tbl[] = {
0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
diff --git a/drivers/media/video/cx25840/cx25840.h b/drivers/media/video/cx25840/cx25840.h
index 4932ed1c9b1..fd22f30dcc1 100644
--- a/drivers/media/video/cx25840/cx25840.h
+++ b/drivers/media/video/cx25840/cx25840.h
@@ -24,47 +24,60 @@
#include <linux/videodev2.h>
#include <linux/i2c.h>
-extern int cx25840_debug;
-
-#define cx25840_dbg(fmt, arg...) do { if (cx25840_debug) \
- printk(KERN_INFO "%s debug %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-
-#define cx25840_err(fmt, arg...) do { \
- printk(KERN_ERR "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-
-#define cx25840_info(fmt, arg...) do { \
- printk(KERN_INFO "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-
-#define CX25840_CID_CARDTYPE (V4L2_CID_PRIVATE_BASE+0)
-
-/* The CARDTYPE_PVR150_WORKAROUND cardtype activates a workaround for a
- hardware bug that is present in PVR150 (and possible PVR500) cards that
- have certain NTSC tuners (tveeprom model numbers 85, 99 and 112). The
+/* ENABLE_PVR150_WORKAROUND activates a workaround for a hardware bug that is
+ present in Hauppauge PVR-150 (and possibly PVR-500) cards that have
+ certain NTSC tuners (tveeprom tuner model numbers 85, 99 and 112). The
audio autodetect fails on some channels for these models and the workaround
is to select the audio standard explicitly. Many thanks to Hauppauge for
providing this information. */
-enum cx25840_cardtype {
- CARDTYPE_PVR150,
- CARDTYPE_PG600,
- CARDTYPE_PVR150_WORKAROUND,
+#define CX25840_CID_ENABLE_PVR150_WORKAROUND (V4L2_CID_PRIVATE_BASE+0)
+
+enum cx25840_video_input {
+ /* Composite video inputs In1-In8 */
+ CX25840_COMPOSITE1 = 1,
+ CX25840_COMPOSITE2,
+ CX25840_COMPOSITE3,
+ CX25840_COMPOSITE4,
+ CX25840_COMPOSITE5,
+ CX25840_COMPOSITE6,
+ CX25840_COMPOSITE7,
+ CX25840_COMPOSITE8,
+
+ /* S-Video inputs consist of one luma input (In1-In4) ORed with one
+ chroma input (In5-In8) */
+ CX25840_SVIDEO_LUMA1 = 0x10,
+ CX25840_SVIDEO_LUMA2 = 0x20,
+ CX25840_SVIDEO_LUMA3 = 0x30,
+ CX25840_SVIDEO_LUMA4 = 0x40,
+ CX25840_SVIDEO_CHROMA4 = 0x400,
+ CX25840_SVIDEO_CHROMA5 = 0x500,
+ CX25840_SVIDEO_CHROMA6 = 0x600,
+ CX25840_SVIDEO_CHROMA7 = 0x700,
+ CX25840_SVIDEO_CHROMA8 = 0x800,
+
+ /* S-Video aliases for common luma/chroma combinations */
+ CX25840_SVIDEO1 = 0x510,
+ CX25840_SVIDEO2 = 0x620,
+ CX25840_SVIDEO3 = 0x730,
+ CX25840_SVIDEO4 = 0x840,
};
-enum cx25840_input {
- CX25840_TUNER,
- CX25840_COMPOSITE0,
- CX25840_COMPOSITE1,
- CX25840_SVIDEO0,
- CX25840_SVIDEO1
+enum cx25840_audio_input {
+ /* Audio inputs: serial or In4-In8 */
+ CX25840_AUDIO_SERIAL,
+ CX25840_AUDIO4 = 4,
+ CX25840_AUDIO5,
+ CX25840_AUDIO6,
+ CX25840_AUDIO7,
+ CX25840_AUDIO8,
};
struct cx25840_state {
- enum cx25840_cardtype cardtype;
- enum cx25840_input input;
- int audio_input;
- enum v4l2_audio_clock_freq audclk_freq;
+ int pvr150_workaround;
+ int radio;
+ enum cx25840_video_input vid_input;
+ enum cx25840_audio_input aud_input;
+ u32 audclk_freq;
};
/* ----------------------------------------------------------------------- */
@@ -83,6 +96,7 @@ int cx25840_loadfw(struct i2c_client *client);
/* ----------------------------------------------------------------------- */
/* cx25850-audio.c */
int cx25840_audio(struct i2c_client *client, unsigned int cmd, void *arg);
+void cx25840_audio_set_path(struct i2c_client *client);
/* ----------------------------------------------------------------------- */
/* cx25850-vbi.c */
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig
index 85ba4106dc7..76fcb4e995c 100644
--- a/drivers/media/video/cx88/Kconfig
+++ b/drivers/media/video/cx88/Kconfig
@@ -29,6 +29,21 @@ config VIDEO_CX88_DVB
You must also select one or more DVB/ATSC demodulators.
If you are unsure which you need, choose all of them.
+config VIDEO_CX88_ALSA
+ tristate "ALSA DMA audio support"
+ depends on VIDEO_CX88 && SND
+ select SND_PCM_OSS
+ ---help---
+ This is a video4linux driver for direct (DMA) audio on
+ Conexant 2388x based TV cards.
+ It only works with boards with function 01 enabled.
+ To check if your board supports, use lspci -n.
+ If supported, you should see 1471:8801 or 1471:8811
+ PCI device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cx88-alsa.
+
config VIDEO_CX88_DVB_ALL_FRONTENDS
bool "Build all supported frontends for cx2388x based TV cards"
default y
@@ -38,6 +53,7 @@ config VIDEO_CX88_DVB_ALL_FRONTENDS
select DVB_CX22702
select DVB_LGDT330X
select DVB_NXT200X
+ select DVB_CX24123
---help---
This builds cx88-dvb with all currently supported frontend
demodulators. If you wish to tweak your configuration, and
@@ -89,3 +105,12 @@ config VIDEO_CX88_DVB_NXT200X
---help---
This adds ATSC 8VSB and QAM64/256 support for cards based on the
Connexant 2388x chip and the NXT2002/NXT2004 demodulator.
+
+config VIDEO_CX88_DVB_CX24123
+ bool "Conexant CX24123 DVB-S Support"
+ default y
+ depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS
+ select DVB_CX24123
+ ---help---
+ This adds DVB-S support for cards based on the
+ Connexant 2388x chip and the CX24123 demodulator.
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile
index 54401b02b7c..e4b2134fe56 100644
--- a/drivers/media/video/cx88/Makefile
+++ b/drivers/media/video/cx88/Makefile
@@ -4,7 +4,7 @@ cx8800-objs := cx88-video.o cx88-vbi.o
cx8802-objs := cx88-mpeg.o
obj-$(CONFIG_VIDEO_CX88) += cx88xx.o cx8800.o cx8802.o cx88-blackbird.o
-obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
+obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o cx88-vp3054-i2c.o
EXTRA_CFLAGS += -I$(src)/..
EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core
@@ -16,5 +16,7 @@ extra-cflags-$(CONFIG_DVB_OR51132) += -DHAVE_OR51132=1
extra-cflags-$(CONFIG_DVB_LGDT330X) += -DHAVE_LGDT330X=1
extra-cflags-$(CONFIG_DVB_MT352) += -DHAVE_MT352=1
extra-cflags-$(CONFIG_DVB_NXT200X) += -DHAVE_NXT200X=1
+extra-cflags-$(CONFIG_DVB_CX24123) += -DHAVE_CX24123=1
+extra-cflags-$(CONFIG_VIDEO_CX88_DVB)+= -DHAVE_VP3054_I2C=1
EXTRA_CFLAGS += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
new file mode 100644
index 00000000000..7695b521eb3
--- /dev/null
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -0,0 +1,848 @@
+/*
+ *
+ * Support for audio capture
+ * PCI function #1 of the cx2388x.
+ *
+ * (c) 2005,2006 Ricardo Cerqueira <v4l@cerqueira.org>
+ * (c) 2005 Mauro Carvalho Chehab <mchehab@brturbo.com.br>
+ * Based on a dummy cx88 module by Gerd Knorr <kraxel@bytesex.org>
+ * Based on dummy.c by Jaroslav Kysela <perex@suse.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <asm/delay.h>
+#include <sound/driver.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/control.h>
+#include <sound/initval.h>
+
+#include "cx88.h"
+#include "cx88-reg.h"
+
+#define dprintk(level,fmt, arg...) if (debug >= level) \
+ printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg)
+
+#define dprintk_core(level,fmt, arg...) if (debug >= level) \
+ printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg)
+
+
+/****************************************************************************
+ Data type declarations - Can be moded to a header file later
+ ****************************************************************************/
+
+/* These can be replaced after done */
+#define MIXER_ADDR_LAST MAX_CX88_INPUT
+
+struct cx88_audio_dev {
+ struct cx88_core *core;
+ struct cx88_dmaqueue q;
+
+ /* pci i/o */
+ struct pci_dev *pci;
+ unsigned char pci_rev,pci_lat;
+
+ /* audio controls */
+ int irq;
+
+ snd_card_t *card;
+
+ spinlock_t reg_lock;
+
+ unsigned int dma_size;
+ unsigned int period_size;
+ unsigned int num_periods;
+
+ struct videobuf_dmabuf dma_risc;
+
+ int mixer_volume[MIXER_ADDR_LAST+1][2];
+ int capture_source[MIXER_ADDR_LAST+1][2];
+
+ long int read_count;
+ long int read_offset;
+
+ struct cx88_buffer *buf;
+
+ long opened;
+ snd_pcm_substream_t *substream;
+
+};
+typedef struct cx88_audio_dev snd_cx88_card_t;
+
+
+
+/****************************************************************************
+ Module global static vars
+ ****************************************************************************/
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
+static snd_card_t *snd_cx88_cards[SNDRV_CARDS];
+
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(enable, "Enable cx88x soundcard. default enabled.");
+
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s).");
+
+
+/****************************************************************************
+ Module macros
+ ****************************************************************************/
+
+MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards");
+MODULE_AUTHOR("Ricardo Cerqueira");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@brturbo.com.br>");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("{{Conexant,23881},"
+ "{{Conexant,23882},"
+ "{{Conexant,23883}");
+static unsigned int debug = 0;
+module_param(debug,int,0644);
+MODULE_PARM_DESC(debug,"enable debug messages");
+
+/****************************************************************************
+ Module specific funtions
+ ****************************************************************************/
+
+/*
+ * BOARD Specific: Sets audio DMA
+ */
+
+int _cx88_start_audio_dma(snd_cx88_card_t *chip)
+{
+ struct cx88_buffer *buf = chip->buf;
+ struct cx88_core *core=chip->core;
+ struct sram_channel *audio_ch = &cx88_sram_channels[SRAM_CH25];
+
+
+ dprintk(1, "Starting audio DMA for %i bytes/line and %i (%i) lines at address %08x\n",buf->bpl, chip->num_periods, audio_ch->fifo_size / buf->bpl, audio_ch->fifo_start);
+
+ /* setup fifo + format - out channel */
+ cx88_sram_channel_setup(chip->core, &cx88_sram_channels[SRAM_CH25],
+ buf->bpl, buf->risc.dma);
+
+ /* sets bpl size */
+ cx_write(MO_AUDD_LNGTH, buf->bpl);
+
+ /* reset counter */
+ cx_write(MO_AUDD_GPCNTRL,GP_COUNT_CONTROL_RESET);
+
+ dprintk(1,"Enabling IRQ, setting mask from 0x%x to 0x%x\n",chip->core->pci_irqmask,(chip->core->pci_irqmask | 0x02));
+ /* enable irqs */
+ cx_set(MO_PCI_INTMSK, chip->core->pci_irqmask | 0x02);
+
+
+ /* Enables corresponding bits at AUD_INT_STAT */
+ cx_write(MO_AUD_INTMSK,
+ (1<<16)|
+ (1<<12)|
+ (1<<4)|
+ (1<<0)
+ );
+
+ /* start dma */
+ cx_set(MO_DEV_CNTRL2, (1<<5)); /* Enables Risc Processor */
+ cx_set(MO_AUD_DMACNTRL, 0x11); /* audio downstream FIFO and RISC enable */
+
+ if (debug)
+ cx88_sram_channel_dump(chip->core, &cx88_sram_channels[SRAM_CH25]);
+
+ return 0;
+}
+
+/*
+ * BOARD Specific: Resets audio DMA
+ */
+int _cx88_stop_audio_dma(snd_cx88_card_t *chip)
+{
+ struct cx88_core *core=chip->core;
+ dprintk(1, "Stopping audio DMA\n");
+
+ /* stop dma */
+ cx_clear(MO_AUD_DMACNTRL, 0x11);
+
+ /* disable irqs */
+ cx_clear(MO_PCI_INTMSK, 0x02);
+ cx_clear(MO_AUD_INTMSK,
+ (1<<16)|
+ (1<<12)|
+ (1<<4)|
+ (1<<0)
+ );
+
+ if (debug)
+ cx88_sram_channel_dump(chip->core, &cx88_sram_channels[SRAM_CH25]);
+
+ return 0;
+}
+
+#define MAX_IRQ_LOOP 10
+
+/*
+ * BOARD Specific: IRQ dma bits
+ */
+static char *cx88_aud_irqs[32] = {
+ "dn_risci1", "up_risci1", "rds_dn_risc1", /* 0-2 */
+ NULL, /* reserved */
+ "dn_risci2", "up_risci2", "rds_dn_risc2", /* 4-6 */
+ NULL, /* reserved */
+ "dnf_of", "upf_uf", "rds_dnf_uf", /* 8-10 */
+ NULL, /* reserved */
+ "dn_sync", "up_sync", "rds_dn_sync", /* 12-14 */
+ NULL, /* reserved */
+ "opc_err", "par_err", "rip_err", /* 16-18 */
+ "pci_abort", "ber_irq", "mchg_irq" /* 19-21 */
+};
+
+/*
+ * BOARD Specific: Threats IRQ audio specific calls
+ */
+static void cx8801_aud_irq(snd_cx88_card_t *chip)
+{
+ struct cx88_core *core = chip->core;
+ u32 status, mask;
+ u32 count;
+
+ status = cx_read(MO_AUD_INTSTAT);
+ mask = cx_read(MO_AUD_INTMSK);
+ if (0 == (status & mask)) {
+ spin_unlock(&chip->reg_lock);
+ return;
+ }
+ cx_write(MO_AUD_INTSTAT, status);
+ if (debug > 1 || (status & mask & ~0xff))
+ cx88_print_irqbits(core->name, "irq aud",
+ cx88_aud_irqs, status, mask);
+ /* risc op code error */
+ if (status & (1 << 16)) {
+ printk(KERN_WARNING "%s/0: audio risc op code error\n",core->name);
+ cx_clear(MO_AUD_DMACNTRL, 0x11);
+ cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH25]);
+ }
+
+ /* risc1 downstream */
+ if (status & 0x01) {
+ spin_lock(&chip->reg_lock);
+ count = cx_read(MO_AUDD_GPCNT);
+ spin_unlock(&chip->reg_lock);
+ if (chip->read_count == 0)
+ chip->read_count += chip->dma_size;
+ }
+
+ if (chip->read_count >= chip->period_size) {
+ dprintk(2, "Elapsing period\n");
+ snd_pcm_period_elapsed(chip->substream);
+ }
+
+ dprintk(3,"Leaving audio IRQ handler...\n");
+
+ /* FIXME: Any other status should deserve a special handling? */
+}
+
+/*
+ * BOARD Specific: Handles IRQ calls
+ */
+static irqreturn_t cx8801_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ snd_cx88_card_t *chip = dev_id;
+ struct cx88_core *core = chip->core;
+ u32 status;
+ int loop, handled = 0;
+
+ for (loop = 0; loop < MAX_IRQ_LOOP; loop++) {
+ status = cx_read(MO_PCI_INTSTAT) & (core->pci_irqmask | 0x02);
+ if (0 == status)
+ goto out;
+ dprintk( 3, "cx8801_irq\n" );
+ dprintk( 3, " loop: %d/%d\n", loop, MAX_IRQ_LOOP );
+ dprintk( 3, " status: %d\n", status );
+ handled = 1;
+ cx_write(MO_PCI_INTSTAT, status);
+
+ if (status & 0x02)
+ {
+ dprintk( 2, " ALSA IRQ handling\n" );
+ cx8801_aud_irq(chip);
+ }
+ };
+
+ if (MAX_IRQ_LOOP == loop) {
+ dprintk( 0, "clearing mask\n" );
+ dprintk(1,"%s/0: irq loop -- clearing mask\n",
+ core->name);
+ cx_clear(MO_PCI_INTMSK,0x02);
+ }
+
+ out:
+ return IRQ_RETVAL(handled);
+}
+
+
+static int dsp_buffer_free(snd_cx88_card_t *chip)
+{
+ BUG_ON(!chip->dma_size);
+
+ dprintk(2,"Freeing buffer\n");
+ videobuf_dma_pci_unmap(chip->pci, &chip->dma_risc);
+ videobuf_dma_free(&chip->dma_risc);
+ btcx_riscmem_free(chip->pci,&chip->buf->risc);
+ kfree(chip->buf);
+
+ chip->dma_size = 0;
+
+ return 0;
+}
+
+/****************************************************************************
+ ALSA PCM Interface
+ ****************************************************************************/
+
+/*
+ * Digital hardware definition
+ */
+static snd_pcm_hardware_t snd_cx88_digital_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 2,
+ .buffer_bytes_max = (2*2048),
+ .period_bytes_min = 256,
+ .period_bytes_max = 2048,
+ .periods_min = 2,
+ .periods_max = 16,
+};
+
+/*
+ * audio pcm capture runtime free
+ */
+static void snd_card_cx88_runtime_free(snd_pcm_runtime_t *runtime)
+{
+}
+/*
+ * audio pcm capture open callback
+ */
+static int snd_cx88_pcm_open(snd_pcm_substream_t *substream)
+{
+ snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ snd_pcm_runtime_t *runtime = substream->runtime;
+ int err;
+
+ if (test_and_set_bit(0, &chip->opened))
+ return -EBUSY;
+
+ err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
+ goto _error;
+
+ chip->substream = substream;
+
+ chip->read_count = 0;
+ chip->read_offset = 0;
+
+ runtime->private_free = snd_card_cx88_runtime_free;
+ runtime->hw = snd_cx88_digital_hw;
+
+ return 0;
+_error:
+ dprintk(1,"Error opening PCM!\n");
+ clear_bit(0, &chip->opened);
+ smp_mb__after_clear_bit();
+ return err;
+}
+
+/*
+ * audio close callback
+ */
+static int snd_cx88_close(snd_pcm_substream_t *substream)
+{
+ snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+
+ clear_bit(0, &chip->opened);
+ smp_mb__after_clear_bit();
+
+ return 0;
+}
+
+/*
+ * hw_params callback
+ */
+static int snd_cx88_hw_params(snd_pcm_substream_t * substream,
+ snd_pcm_hw_params_t * hw_params)
+{
+ snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ struct cx88_buffer *buf;
+
+ if (substream->runtime->dma_area) {
+ dsp_buffer_free(chip);
+ substream->runtime->dma_area = NULL;
+ }
+
+
+ chip->period_size = params_period_bytes(hw_params);
+ chip->num_periods = params_periods(hw_params);
+ chip->dma_size = chip->period_size * params_periods(hw_params);
+
+ BUG_ON(!chip->dma_size);
+
+ dprintk(1,"Setting buffer\n");
+
+ buf = kmalloc(sizeof(*buf),GFP_KERNEL);
+ if (NULL == buf)
+ return -ENOMEM;
+ memset(buf,0,sizeof(*buf));
+
+
+ buf->vb.memory = V4L2_MEMORY_MMAP;
+ buf->vb.width = chip->period_size;
+ buf->vb.height = chip->num_periods;
+ buf->vb.size = chip->dma_size;
+ buf->vb.field = V4L2_FIELD_NONE;
+
+ videobuf_dma_init(&buf->vb.dma);
+ videobuf_dma_init_kernel(&buf->vb.dma,PCI_DMA_FROMDEVICE,
+ (PAGE_ALIGN(buf->vb.size) >> PAGE_SHIFT));
+
+ videobuf_dma_pci_map(chip->pci,&buf->vb.dma);
+
+
+ cx88_risc_databuffer(chip->pci, &buf->risc,
+ buf->vb.dma.sglist,
+ buf->vb.width, buf->vb.height);
+
+ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
+ buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
+
+ buf->vb.state = STATE_PREPARED;
+
+ buf->bpl = chip->period_size;
+ chip->buf = buf;
+ chip->dma_risc = buf->vb.dma;
+
+ dprintk(1,"Buffer ready at %u\n",chip->dma_risc.nr_pages);
+ substream->runtime->dma_area = chip->dma_risc.vmalloc;
+ return 0;
+}
+
+/*
+ * hw free callback
+ */
+static int snd_cx88_hw_free(snd_pcm_substream_t * substream)
+{
+
+ snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+
+ if (substream->runtime->dma_area) {
+ dsp_buffer_free(chip);
+ substream->runtime->dma_area = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * prepare callback
+ */
+static int snd_cx88_prepare(snd_pcm_substream_t *substream)
+{
+ return 0;
+}
+
+
+/*
+ * trigger callback
+ */
+static int snd_cx88_card_trigger(snd_pcm_substream_t *substream, int cmd)
+{
+ snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ int err;
+
+ spin_lock(&chip->reg_lock);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ err=_cx88_start_audio_dma(chip);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ err=_cx88_stop_audio_dma(chip);
+ break;
+ default:
+ err=-EINVAL;
+ break;
+ }
+
+ spin_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+/*
+ * pointer callback
+ */
+static snd_pcm_uframes_t snd_cx88_pointer(snd_pcm_substream_t *substream)
+{
+ snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+ snd_pcm_runtime_t *runtime = substream->runtime;
+
+ if (chip->read_count) {
+ chip->read_count -= snd_pcm_lib_period_bytes(substream);
+ chip->read_offset += snd_pcm_lib_period_bytes(substream);
+ if (chip->read_offset == chip->dma_size)
+ chip->read_offset = 0;
+ }
+
+ dprintk(2, "Pointer time, will return %li, read %li\n",chip->read_offset,chip->read_count);
+ return bytes_to_frames(runtime, chip->read_offset);
+
+}
+
+/*
+ * operators
+ */
+static snd_pcm_ops_t snd_cx88_pcm_ops = {
+ .open = snd_cx88_pcm_open,
+ .close = snd_cx88_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_cx88_hw_params,
+ .hw_free = snd_cx88_hw_free,
+ .prepare = snd_cx88_prepare,
+ .trigger = snd_cx88_card_trigger,
+ .pointer = snd_cx88_pointer,
+};
+
+/*
+ * create a PCM device
+ */
+static int __devinit snd_cx88_pcm(snd_cx88_card_t *chip, int device, char *name)
+{
+ int err;
+ snd_pcm_t *pcm;
+
+ err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm);
+ if (err < 0)
+ return err;
+ pcm->private_data = chip;
+ strcpy(pcm->name, name);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cx88_pcm_ops);
+
+ return 0;
+}
+
+/****************************************************************************
+ CONTROL INTERFACE
+ ****************************************************************************/
+static int snd_cx88_capture_volume_info(snd_kcontrol_t *kcontrol, snd_ctl_elem_info_t *info)
+{
+ info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ info->count = 1;
+ info->value.integer.min = 0;
+ info->value.integer.max = 0x3f;
+
+ return 0;
+}
+
+/* OK - TODO: test it */
+static int snd_cx88_capture_volume_get(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t *value)
+{
+ snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core=chip->core;
+
+ value->value.integer.value[0] = 0x3f - (cx_read(AUD_VOL_CTL) & 0x3f);
+
+ return 0;
+}
+
+/* OK - TODO: test it */
+static int snd_cx88_capture_volume_put(snd_kcontrol_t *kcontrol, snd_ctl_elem_value_t *value)
+{
+ snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+ struct cx88_core *core=chip->core;
+ int v;
+ u32 old_control;
+
+ spin_lock_irq(&chip->reg_lock);
+ old_control = 0x3f - (cx_read(AUD_VOL_CTL) & 0x3f);
+ v = 0x3f - (value->value.integer.value[0] & 0x3f);
+ cx_andor(AUD_VOL_CTL, 0x3f, v);
+ spin_unlock_irq(&chip->reg_lock);
+
+ return v != old_control;
+}
+
+static snd_kcontrol_new_t snd_cx88_capture_volume = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Capture Volume",
+ .info = snd_cx88_capture_volume_info,
+ .get = snd_cx88_capture_volume_get,
+ .put = snd_cx88_capture_volume_put,
+};
+
+
+/****************************************************************************
+ Basic Flow for Sound Devices
+ ****************************************************************************/
+
+/*
+ * PCI ID Table - 14f1:8801 and 14f1:8811 means function 1: Audio
+ * Only boards with eeprom and byte 1 at eeprom=1 have it
+ */
+
+struct pci_device_id cx88_audio_pci_tbl[] = {
+ {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
+ {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, cx88_audio_pci_tbl);
+
+/*
+ * Chip-specific destructor
+ */
+
+static int snd_cx88_free(snd_cx88_card_t *chip)
+{
+
+ if (chip->irq >= 0){
+ synchronize_irq(chip->irq);
+ free_irq(chip->irq, chip);
+ }
+
+ cx88_core_put(chip->core,chip->pci);
+
+ pci_disable_device(chip->pci);
+ return 0;
+}
+
+/*
+ * Component Destructor
+ */
+static void snd_cx88_dev_free(snd_card_t * card)
+{
+ snd_cx88_card_t *chip = card->private_data;
+
+ snd_cx88_free(chip);
+}
+
+
+/*
+ * Alsa Constructor - Component probe
+ */
+
+static int devno=0;
+static int __devinit snd_cx88_create(snd_card_t *card, struct pci_dev *pci,
+ snd_cx88_card_t **rchip)
+{
+ snd_cx88_card_t *chip;
+ struct cx88_core *core;
+ int err;
+
+ *rchip = NULL;
+
+ err = pci_enable_device(pci);
+ if (err < 0)
+ return err;
+
+ pci_set_master(pci);
+
+ chip = (snd_cx88_card_t *) card->private_data;
+
+ core = cx88_core_get(pci);
+
+ if (!pci_dma_supported(pci,0xffffffff)) {
+ dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
+ err = -EIO;
+ cx88_core_put(core,pci);
+ return err;
+ }
+
+
+ /* pci init */
+ chip->card = card;
+ chip->pci = pci;
+ chip->irq = -1;
+ spin_lock_init(&chip->reg_lock);
+
+ cx88_reset(core);
+ if (NULL == core) {
+ err = -EINVAL;
+ kfree (chip);
+ return err;
+ }
+ chip->core = core;
+
+ /* get irq */
+ err = request_irq(chip->pci->irq, cx8801_irq,
+ SA_SHIRQ | SA_INTERRUPT, chip->core->name, chip);
+ if (err < 0) {
+ dprintk(0, "%s: can't get IRQ %d\n",
+ chip->core->name, chip->pci->irq);
+ return err;
+ }
+
+ /* print pci info */
+ pci_read_config_byte(pci, PCI_CLASS_REVISION, &chip->pci_rev);
+ pci_read_config_byte(pci, PCI_LATENCY_TIMER, &chip->pci_lat);
+
+ dprintk(1,"ALSA %s/%i: found at %s, rev: %d, irq: %d, "
+ "latency: %d, mmio: 0x%lx\n", core->name, devno,
+ pci_name(pci), chip->pci_rev, pci->irq,
+ chip->pci_lat,pci_resource_start(pci,0));
+
+ chip->irq = pci->irq;
+ synchronize_irq(chip->irq);
+
+ snd_card_set_dev(card, &pci->dev);
+
+ *rchip = chip;
+
+ return 0;
+}
+
+static int __devinit cx88_audio_initdev(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ snd_card_t *card;
+ snd_cx88_card_t *chip;
+ int err;
+
+ if (devno >= SNDRV_CARDS)
+ return (-ENODEV);
+
+ if (!enable[devno]) {
+ ++devno;
+ return (-ENOENT);
+ }
+
+ card = snd_card_new(index[devno], id[devno], THIS_MODULE, sizeof(snd_cx88_card_t));
+ if (!card)
+ return (-ENOMEM);
+
+ card->private_free = snd_cx88_dev_free;
+
+ err = snd_cx88_create(card, pci, &chip);
+ if (err < 0)
+ return (err);
+
+ err = snd_cx88_pcm(chip, 0, "CX88 Digital");
+
+ if (err < 0) {
+ snd_card_free(card);
+ return (err);
+ }
+
+ err = snd_ctl_add(card, snd_ctl_new1(&snd_cx88_capture_volume, chip));
+ if (err < 0) {
+ snd_card_free(card);
+ return (err);
+ }
+
+ strcpy (card->driver, "CX88x");
+ sprintf(card->shortname, "Conexant CX%x", pci->device);
+ sprintf(card->longname, "%s at %#lx",
+ card->shortname, pci_resource_start(pci, 0));
+ strcpy (card->mixername, "CX88");
+
+ dprintk (0, "%s/%i: ALSA support for cx2388x boards\n",
+ card->driver,devno);
+
+ err = snd_card_register(card);
+ if (err < 0) {
+ snd_card_free(card);
+ return (err);
+ }
+ snd_cx88_cards[devno] = card;
+
+ pci_set_drvdata(pci,card);
+
+ devno++;
+ return 0;
+}
+/*
+ * ALSA destructor
+ */
+static void __devexit cx88_audio_finidev(struct pci_dev *pci)
+{
+ struct cx88_audio_dev *card = pci_get_drvdata(pci);
+
+ snd_card_free((void *)card);
+
+ pci_set_drvdata(pci, NULL);
+
+ devno--;
+}
+
+/*
+ * PCI driver definition
+ */
+
+static struct pci_driver cx88_audio_pci_driver = {
+ .name = "cx88_audio",
+ .id_table = cx88_audio_pci_tbl,
+ .probe = cx88_audio_initdev,
+ .remove = cx88_audio_finidev,
+ SND_PCI_PM_CALLBACKS
+};
+
+/****************************************************************************
+ LINUX MODULE INIT
+ ****************************************************************************/
+
+/*
+ * module init
+ */
+static int cx88_audio_init(void)
+{
+ printk(KERN_INFO "cx2388x alsa driver version %d.%d.%d loaded\n",
+ (CX88_VERSION_CODE >> 16) & 0xff,
+ (CX88_VERSION_CODE >> 8) & 0xff,
+ CX88_VERSION_CODE & 0xff);
+#ifdef SNAPSHOT
+ printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
+ SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
+#endif
+ return pci_register_driver(&cx88_audio_pci_driver);
+}
+
+/*
+ * module remove
+ */
+static void cx88_audio_fini(void)
+{
+
+ pci_unregister_driver(&cx88_audio_pci_driver);
+}
+
+module_init(cx88_audio_init);
+module_exit(cx88_audio_fini);
+
+/* ----------------------------------------------------------- */
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 74e57a53116..a502a4d6e4a 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -32,10 +32,10 @@
#include <linux/firmware.h>
#include "cx88.h"
+#include <media/v4l2-common.h>
MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards");
-MODULE_AUTHOR("Jelle Foks <jelle@foks.8m.com>");
-MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
+MODULE_AUTHOR("Jelle Foks <jelle@foks.8m.com>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
static unsigned int mpegbufs = 32;
@@ -1375,7 +1375,7 @@ static int mpeg_do_ioctl(struct inode *inode, struct file *file,
struct cx88_core *core = dev->core;
if (debug > 1)
- cx88_print_ioctl(core->name,cmd);
+ v4l_print_ioctl(core->name,cmd);
switch (cmd) {
@@ -1539,10 +1539,9 @@ static int mpeg_open(struct inode *inode, struct file *file)
dprintk(1,"open minor=%d\n",minor);
/* allocate + initialize per filehandle data */
- fh = kmalloc(sizeof(*fh),GFP_KERNEL);
+ fh = kzalloc(sizeof(*fh),GFP_KERNEL);
if (NULL == fh)
return -ENOMEM;
- memset(fh,0,sizeof(*fh));
file->private_data = fh;
fh->dev = dev;
@@ -1678,10 +1677,9 @@ static int __devinit blackbird_probe(struct pci_dev *pci_dev,
goto fail_core;
err = -ENOMEM;
- dev = kmalloc(sizeof(*dev),GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev),GFP_KERNEL);
if (NULL == dev)
goto fail_core;
- memset(dev,0,sizeof(*dev));
dev->pci = pci_dev;
dev->core = core;
dev->width = 720;
@@ -1689,6 +1687,18 @@ static int __devinit blackbird_probe(struct pci_dev *pci_dev,
memcpy(&dev->params,&default_mpeg_params,sizeof(default_mpeg_params));
memcpy(&dev->dnr_params,&default_dnr_params,sizeof(default_dnr_params));
+ if (core->board == CX88_BOARD_HAUPPAUGE_ROSLYN) {
+
+ if (core->tuner_formats & V4L2_STD_525_60) {
+ dev->height = 480;
+ dev->params.vi_frame_rate = 30;
+ } else {
+ dev->height = 576;
+ dev->params.vi_frame_rate = 25;
+ }
+
+ }
+
err = cx8802_init_common(dev);
if (0 != err)
goto fail_free;
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 951709aa88b..ad2f565f522 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -611,12 +611,12 @@ struct cx88_board cx88_boards[] = {
.input = {{
.type = CX88_VMUX_TELEVISION,
.vmux = 0,
- .gpio0 = 0xed12, /* internal decoder */
+ .gpio0 = 0xed1a,
.gpio2 = 0x00ff,
},{
.type = CX88_VMUX_DEBUG,
.vmux = 0,
- .gpio0 = 0xff01, /* mono from tuner chip */
+ .gpio0 = 0xff01,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
@@ -708,7 +708,7 @@ struct cx88_board cx88_boards[] = {
},
[CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_T] = {
.name = "DViCO FusionHDTV 3 Gold-T",
- .tuner_type = TUNER_THOMSON_DTT7611,
+ .tuner_type = TUNER_THOMSON_DTT761X,
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
@@ -897,6 +897,158 @@ struct cx88_board cx88_boards[] = {
.gpio3 = 0x0000,
}},
},
+ [CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1] = {
+ .name = "Hauppauge Nova-S-Plus DVB-S",
+ .tuner_type = TUNER_ABSENT,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .input = {{
+ .type = CX88_VMUX_DVB,
+ .vmux = 0,
+ },{
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ },{
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ }},
+ .dvb = 1,
+ },
+ [CX88_BOARD_HAUPPAUGE_NOVASE2_S1] = {
+ .name = "Hauppauge Nova-SE2 DVB-S",
+ .tuner_type = TUNER_ABSENT,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .input = {{
+ .type = CX88_VMUX_DVB,
+ .vmux = 0,
+ }},
+ .dvb = 1,
+ },
+ [CX88_BOARD_KWORLD_DVBS_100] = {
+ .name = "KWorld DVB-S 100",
+ .tuner_type = TUNER_ABSENT,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .input = {{
+ .type = CX88_VMUX_DVB,
+ .vmux = 0,
+ },{
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ },{
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ }},
+ .dvb = 1,
+ },
+ [CX88_BOARD_HAUPPAUGE_HVR1100] = {
+ .name = "Hauppauge WinTV-HVR1100 DVB-T/Hybrid",
+ .tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .input = {{
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ },{
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ },{
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ }},
+ /* fixme: Add radio support */
+ .dvb = 1,
+ },
+ [CX88_BOARD_HAUPPAUGE_HVR1100LP] = {
+ .name = "Hauppauge WinTV-HVR1100 DVB-T/Hybrid (Low Profile)",
+ .tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT,
+ .input = {{
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ },{
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ }},
+ /* fixme: Add radio support */
+ .dvb = 1,
+ },
+ [CX88_BOARD_DNTV_LIVE_DVB_T_PRO] = {
+ .name = "digitalnow DNTV Live! DVB-T Pro",
+ .tuner_type = TUNER_PHILIPS_FMD1216ME_MK3,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE |
+ TDA9887_PORT2_ACTIVE,
+ .input = {{
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0xf80808,
+ },{
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0xf80808,
+ },{
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0xf80808,
+ }},
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0xf80808,
+ },
+ .dvb = 1,
+ },
+ [CX88_BOARD_KWORLD_DVB_T_CX22702] = {
+ /* Kworld V-stream Xpert DVB-T with Thomson tuner */
+ /* DTT 7579 Conexant CX22702-19 Conexant CX2388x */
+ /* Manenti Marco <marco_manenti@colman.it> */
+ .name = "KWorld/VStream XPert DVB-T with cx22702",
+ .tuner_type = TUNER_ABSENT,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .input = {{
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0700,
+ .gpio2 = 0x0101,
+ },{
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0700,
+ .gpio2 = 0x0101,
+ }},
+ .dvb = 1,
+ },
+ [CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL] = {
+ .name = "DViCO FusionHDTV DVB-T Dual Digital",
+ .tuner_type = TUNER_ABSENT, /* No analog tuner */
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .input = {{
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x000027df,
+ },{
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x000027df,
+ }},
+ .dvb = 1,
+ },
+
};
const unsigned int cx88_bcount = ARRAY_SIZE(cx88_boards);
@@ -1044,6 +1196,59 @@ struct cx88_subid cx88_subids[] = {
.subvendor = 0x1461,
.subdevice = 0x000a,
.card = CX88_BOARD_AVERTV_303,
+ },{
+ .subvendor = 0x0070,
+ .subdevice = 0x9200,
+ .card = CX88_BOARD_HAUPPAUGE_NOVASE2_S1,
+ },{
+ .subvendor = 0x0070,
+ .subdevice = 0x9201,
+ .card = CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1,
+ },{
+ .subvendor = 0x0070,
+ .subdevice = 0x9202,
+ .card = CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1,
+ },{
+ .subvendor = 0x17de,
+ .subdevice = 0x08b2,
+ .card = CX88_BOARD_KWORLD_DVBS_100,
+ },{
+ .subvendor = 0x0070,
+ .subdevice = 0x9400,
+ .card = CX88_BOARD_HAUPPAUGE_HVR1100,
+ },{
+ .subvendor = 0x0070,
+ .subdevice = 0x9402,
+ .card = CX88_BOARD_HAUPPAUGE_HVR1100,
+ },{
+ .subvendor = 0x0070,
+ .subdevice = 0x9800,
+ .card = CX88_BOARD_HAUPPAUGE_HVR1100LP,
+ },{
+ .subvendor = 0x0070,
+ .subdevice = 0x9802,
+ .card = CX88_BOARD_HAUPPAUGE_HVR1100LP,
+ },{
+ .subvendor = 0x0070,
+ .subdevice = 0x9001,
+ .card = CX88_BOARD_HAUPPAUGE_DVB_T1,
+ },{
+ .subvendor = 0x1822,
+ .subdevice = 0x0025,
+ .card = CX88_BOARD_DNTV_LIVE_DVB_T_PRO,
+ },{
+ .subvendor = 0x17de,
+ .subdevice = 0x08a1,
+ .card = CX88_BOARD_KWORLD_DVB_T_CX22702,
+ },{
+ .subvendor = 0x18ac,
+ .subdevice = 0xdb50,
+ .card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL,
+ },{
+ .subvendor = 0x18ac,
+ .subdevice = 0xdb11,
+ .card = CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS,
+ /* Re-branded DViCO: UltraView DVB-T Plus */
},
};
const unsigned int cx88_idcount = ARRAY_SIZE(cx88_subids);
@@ -1075,20 +1280,19 @@ static void __devinit leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
core->name, core->tuner_type, eeprom_data[0]);
}
-
-/* ----------------------------------------------------------------------- */
-
static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
{
struct tveeprom tv;
tveeprom_hauppauge_analog(&core->i2c_client, &tv, eeprom_data);
core->tuner_type = tv.tuner_type;
+ core->tuner_formats = tv.tuner_formats;
core->has_radio = tv.has_radio;
/* Make sure we support the board model */
switch (tv.model)
{
+ case 28552: /* WinTV-PVR 'Roslyn' (No IR) */
case 90002: /* Nova-T-PCI (9002) */
case 92001: /* Nova-S-Plus (Video and IR) */
case 92002: /* Nova-S-Plus (Video and IR) */
@@ -1096,7 +1300,9 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
case 90500: /* Nova-T-PCI (oem) */
case 90501: /* Nova-T-PCI (oem/IR) */
case 92000: /* Nova-SE2 (OEM, No Video or IR) */
-
+ case 94009: /* WinTV-HVR1100 (Video and IR Retail) */
+ case 94501: /* WinTV-HVR1100 (Video and IR OEM) */
+ case 98559: /* WinTV-HVR1100LP (Video no IR, Retail - Low Profile) */
/* known */
break;
default:
@@ -1211,12 +1417,21 @@ void cx88_card_setup(struct cx88_core *core)
if (0 == core->i2c_rc)
leadtek_eeprom(core,eeprom);
break;
+ case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
+ case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
case CX88_BOARD_HAUPPAUGE_DVB_T1:
+ case CX88_BOARD_HAUPPAUGE_HVR1100:
+ case CX88_BOARD_HAUPPAUGE_HVR1100LP:
if (0 == core->i2c_rc)
hauppauge_eeprom(core,eeprom);
break;
+ case CX88_BOARD_KWORLD_DVBS_100:
+ cx_write(MO_GP0_IO, 0x000007f8);
+ cx_write(MO_GP1_IO, 0x00000001);
+ break;
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
+ case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
/* GPIO0:0 is hooked to mt352 reset pin */
cx_set(MO_GP0_IO, 0x00000101);
cx_clear(MO_GP0_IO, 0x00000001);
@@ -1232,6 +1447,9 @@ void cx88_card_setup(struct cx88_core *core)
cx_clear(MO_GP0_IO, 0x00000007);
cx_set(MO_GP2_IO, 0x00000101);
break;
+ case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
+ cx_write(MO_GP0_IO, 0x00080808);
+ break;
case CX88_BOARD_ATI_HDTVWONDER:
if (0 == core->i2c_rc) {
/* enable tuner */
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index bb6eb54e19c..194446f28c5 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -34,6 +34,7 @@
#include <linux/videodev2.h>
#include "cx88.h"
+#include <media/v4l2-common.h>
MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
@@ -76,60 +77,6 @@ static unsigned int cx88_devcount;
static LIST_HEAD(cx88_devlist);
static DECLARE_MUTEX(devlist);
-/* ------------------------------------------------------------------ */
-/* debug help functions */
-
-static const char *v4l1_ioctls[] = {
- "0", "CGAP", "GCHAN", "SCHAN", "GTUNER", "STUNER", "GPICT", "SPICT",
- "CCAPTURE", "GWIN", "SWIN", "GFBUF", "SFBUF", "KEY", "GFREQ",
- "SFREQ", "GAUDIO", "SAUDIO", "SYNC", "MCAPTURE", "GMBUF", "GUNIT",
- "GCAPTURE", "SCAPTURE", "SPLAYMODE", "SWRITEMODE", "GPLAYINFO",
- "SMICROCODE", "GVBIFMT", "SVBIFMT" };
-#define V4L1_IOCTLS ARRAY_SIZE(v4l1_ioctls)
-
-static const char *v4l2_ioctls[] = {
- "QUERYCAP", "1", "ENUM_PIXFMT", "ENUM_FBUFFMT", "G_FMT", "S_FMT",
- "G_COMP", "S_COMP", "REQBUFS", "QUERYBUF", "G_FBUF", "S_FBUF",
- "G_WIN", "S_WIN", "PREVIEW", "QBUF", "16", "DQBUF", "STREAMON",
- "STREAMOFF", "G_PERF", "G_PARM", "S_PARM", "G_STD", "S_STD",
- "ENUMSTD", "ENUMINPUT", "G_CTRL", "S_CTRL", "G_TUNER", "S_TUNER",
- "G_FREQ", "S_FREQ", "G_AUDIO", "S_AUDIO", "35", "QUERYCTRL",
- "QUERYMENU", "G_INPUT", "S_INPUT", "ENUMCVT", "41", "42", "43",
- "44", "45", "G_OUTPUT", "S_OUTPUT", "ENUMOUTPUT", "G_AUDOUT",
- "S_AUDOUT", "ENUMFX", "G_EFFECT", "S_EFFECT", "G_MODULATOR",
- "S_MODULATOR"
-};
-#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
-
-void cx88_print_ioctl(char *name, unsigned int cmd)
-{
- char *dir;
-
- switch (_IOC_DIR(cmd)) {
- case _IOC_NONE: dir = "--"; break;
- case _IOC_READ: dir = "r-"; break;
- case _IOC_WRITE: dir = "-w"; break;
- case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
- default: dir = "??"; break;
- }
- switch (_IOC_TYPE(cmd)) {
- case 'v':
- printk(KERN_DEBUG "%s: ioctl 0x%08x (v4l1, %s, VIDIOC%s)\n",
- name, cmd, dir, (_IOC_NR(cmd) < V4L1_IOCTLS) ?
- v4l1_ioctls[_IOC_NR(cmd)] : "???");
- break;
- case 'V':
- printk(KERN_DEBUG "%s: ioctl 0x%08x (v4l2, %s, VIDIOC_%s)\n",
- name, cmd, dir, (_IOC_NR(cmd) < V4L2_IOCTLS) ?
- v4l2_ioctls[_IOC_NR(cmd)] : "???");
- break;
- default:
- printk(KERN_DEBUG "%s: ioctl 0x%08x (???, %s, #%d)\n",
- name, cmd, dir, _IOC_NR(cmd));
- }
-}
-
-/* ------------------------------------------------------------------ */
#define NO_SYNC_LINE (-1U)
static u32* cx88_risc_field(u32 *rp, struct scatterlist *sglist,
@@ -291,9 +238,9 @@ cx88_free_buffer(struct pci_dev *pci, struct cx88_buffer *buf)
* channel 22 (u video) - 2.0k
* channel 23 (v video) - 2.0k
* channel 24 (vbi) - 4.0k
- * channels 25+26 (audio) - 0.5k
+ * channels 25+26 (audio) - 4.0k
* channel 28 (mpeg) - 4.0k
- * TOTAL = 25.5k
+ * TOTAL = 29.0k
*
* Every channel has 160 bytes control data (64 bytes instruction
* queue and 6 CDT entries), which is close to 2k total.
@@ -359,7 +306,7 @@ struct sram_channel cx88_sram_channels[] = {
.ctrl_start = 0x180680,
.cdt = 0x180680 + 64,
.fifo_start = 0x185400,
- .fifo_size = 0x000200,
+ .fifo_size = 0x001000,
.ptr1_reg = MO_DMA25_PTR1,
.ptr2_reg = MO_DMA25_PTR2,
.cnt1_reg = MO_DMA25_CNT1,
@@ -371,7 +318,7 @@ struct sram_channel cx88_sram_channels[] = {
.ctrl_start = 0x180720,
.cdt = 0x180680 + 64, /* same as audio IN */
.fifo_start = 0x185400, /* same as audio IN */
- .fifo_size = 0x000200, /* same as audio IN */
+ .fifo_size = 0x001000, /* same as audio IN */
.ptr1_reg = MO_DMA26_PTR1,
.ptr2_reg = MO_DMA26_PTR2,
.cnt1_reg = MO_DMA26_CNT1,
@@ -382,7 +329,7 @@ struct sram_channel cx88_sram_channels[] = {
.cmds_start = 0x180200,
.ctrl_start = 0x1807C0,
.cdt = 0x1807C0 + 64,
- .fifo_start = 0x185600,
+ .fifo_start = 0x186400,
.fifo_size = 0x001000,
.ptr1_reg = MO_DMA28_PTR1,
.ptr2_reg = MO_DMA28_PTR2,
@@ -848,7 +795,6 @@ int cx88_start_audio_dma(struct cx88_core *core)
/* start dma */
cx_write(MO_AUD_DMACNTRL, 0x0003); /* Up and Down fifo enable */
-
return 0;
}
@@ -1104,11 +1050,10 @@ struct cx88_core* cx88_core_get(struct pci_dev *pci)
up(&devlist);
return core;
}
- core = kmalloc(sizeof(*core),GFP_KERNEL);
+ core = kzalloc(sizeof(*core),GFP_KERNEL);
if (NULL == core)
goto fail_unlock;
- memset(core,0,sizeof(*core));
atomic_inc(&core->refcount);
core->pci_bus = pci->bus->number;
core->pci_slot = PCI_SLOT(pci->devfn);
@@ -1208,7 +1153,6 @@ void cx88_core_put(struct cx88_core *core, struct pci_dev *pci)
/* ------------------------------------------------------------------ */
-EXPORT_SYMBOL(cx88_print_ioctl);
EXPORT_SYMBOL(cx88_print_irqbits);
EXPORT_SYMBOL(cx88_core_irq);
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 99ea955f598..e48aa3f6e50 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -3,7 +3,7 @@
* device driver for Conexant 2388x based TV cards
* MPEG Transport Stream (DVB) routines
*
- * (c) 2004 Chris Pascoe <c.pascoe@itee.uq.edu.au>
+ * (c) 2004, 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
* (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
*
* This program is free software; you can redistribute it and/or modify
@@ -31,10 +31,14 @@
#include "cx88.h"
#include "dvb-pll.h"
+#include <media/v4l2-common.h>
#ifdef HAVE_MT352
# include "mt352.h"
# include "mt352_priv.h"
+# ifdef HAVE_VP3054_I2C
+# include "cx88-vp3054-i2c.h"
+# endif
#endif
#ifdef HAVE_CX22702
# include "cx22702.h"
@@ -48,6 +52,9 @@
#ifdef HAVE_NXT200X
# include "nxt200x.h"
#endif
+#ifdef HAVE_CX24123
+# include "cx24123.h"
+#endif
MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
@@ -125,6 +132,27 @@ static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
return 0;
}
+static int dvico_dual_demod_init(struct dvb_frontend *fe)
+{
+ static u8 clock_config [] = { CLOCK_CTL, 0x38, 0x38 };
+ static u8 reset [] = { RESET, 0x80 };
+ static u8 adc_ctl_1_cfg [] = { ADC_CTL_1, 0x40 };
+ static u8 agc_cfg [] = { AGC_TARGET, 0x28, 0x20 };
+ static u8 gpp_ctl_cfg [] = { GPP_CTL, 0x33 };
+ static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
+
+ mt352_write(fe, clock_config, sizeof(clock_config));
+ udelay(200);
+ mt352_write(fe, reset, sizeof(reset));
+ mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
+
+ mt352_write(fe, agc_cfg, sizeof(agc_cfg));
+ mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg));
+ mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
+
+ return 0;
+}
+
static int dntv_live_dvbt_demod_init(struct dvb_frontend* fe)
{
static u8 clock_config [] = { 0x89, 0x38, 0x39 };
@@ -172,6 +200,98 @@ static struct mt352_config dntv_live_dvbt_config = {
.demod_init = dntv_live_dvbt_demod_init,
.pll_set = mt352_pll_set,
};
+
+static struct mt352_config dvico_fusionhdtv_dual = {
+ .demod_address = 0x0F,
+ .demod_init = dvico_dual_demod_init,
+ .pll_set = mt352_pll_set,
+};
+
+#ifdef HAVE_VP3054_I2C
+static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
+{
+ static u8 clock_config [] = { 0x89, 0x38, 0x38 };
+ static u8 reset [] = { 0x50, 0x80 };
+ static u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
+ static u8 agc_cfg [] = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
+ 0x00, 0xFF, 0x00, 0x40, 0x40 };
+ static u8 dntv_extra[] = { 0xB5, 0x7A };
+ static u8 capt_range_cfg[] = { 0x75, 0x32 };
+
+ mt352_write(fe, clock_config, sizeof(clock_config));
+ udelay(2000);
+ mt352_write(fe, reset, sizeof(reset));
+ mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
+
+ mt352_write(fe, agc_cfg, sizeof(agc_cfg));
+ udelay(2000);
+ mt352_write(fe, dntv_extra, sizeof(dntv_extra));
+ mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
+
+ return 0;
+}
+
+static int philips_fmd1216_pll_init(struct dvb_frontend *fe)
+{
+ struct cx8802_dev *dev= fe->dvb->priv;
+
+ /* this message is to set up ATC and ALC */
+ static u8 fmd1216_init[] = { 0x0b, 0xdc, 0x9c, 0xa0 };
+ struct i2c_msg msg =
+ { .addr = dev->core->pll_addr, .flags = 0,
+ .buf = fmd1216_init, .len = sizeof(fmd1216_init) };
+ int err;
+
+ if ((err = i2c_transfer(&dev->core->i2c_adap, &msg, 1)) != 1) {
+ if (err < 0)
+ return err;
+ else
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int dntv_live_dvbt_pro_pll_set(struct dvb_frontend* fe,
+ struct dvb_frontend_parameters* params,
+ u8* pllbuf)
+{
+ struct cx8802_dev *dev= fe->dvb->priv;
+ struct i2c_msg msg =
+ { .addr = dev->core->pll_addr, .flags = 0,
+ .buf = pllbuf+1, .len = 4 };
+ int err;
+
+ /* Switch PLL to DVB mode */
+ err = philips_fmd1216_pll_init(fe);
+ if (err)
+ return err;
+
+ /* Tune PLL */
+ pllbuf[0] = dev->core->pll_addr << 1;
+ dvb_pll_configure(dev->core->pll_desc, pllbuf+1,
+ params->frequency,
+ params->u.ofdm.bandwidth);
+ if ((err = i2c_transfer(&dev->core->i2c_adap, &msg, 1)) != 1) {
+ printk(KERN_WARNING "cx88-dvb: %s error "
+ "(addr %02x <- %02x, err = %i)\n",
+ __FUNCTION__, pllbuf[0], pllbuf[1], err);
+ if (err < 0)
+ return err;
+ else
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static struct mt352_config dntv_live_dvbt_pro_config = {
+ .demod_address = 0x0f,
+ .no_tuner = 1,
+ .demod_init = dntv_live_dvbt_pro_demod_init,
+ .pll_set = dntv_live_dvbt_pro_pll_set,
+};
+#endif
#endif
#ifdef HAVE_CX22702
@@ -188,6 +308,12 @@ static struct cx22702_config hauppauge_novat_config = {
.pll_address = 0x61,
.pll_desc = &dvb_pll_thomson_dtt759x,
};
+static struct cx22702_config hauppauge_hvr1100_config = {
+ .demod_address = 0x63,
+ .output_mode = CX22702_SERIAL_OUTPUT,
+ .pll_address = 0x61,
+ .pll_desc = &dvb_pll_fmd1216me,
+};
#endif
#ifdef HAVE_OR51132
@@ -314,6 +440,40 @@ static struct nxt200x_config ati_hdtvwonder = {
};
#endif
+#ifdef HAVE_CX24123
+static int cx24123_set_ts_param(struct dvb_frontend* fe,
+ int is_punctured)
+{
+ struct cx8802_dev *dev= fe->dvb->priv;
+ dev->ts_gen_cntrl = 0x2;
+ return 0;
+}
+
+static void cx24123_enable_lnb_voltage(struct dvb_frontend* fe, int on)
+{
+ struct cx8802_dev *dev= fe->dvb->priv;
+ struct cx88_core *core = dev->core;
+
+ if (on)
+ cx_write(MO_GP0_IO, 0x000006f9);
+ else
+ cx_write(MO_GP0_IO, 0x000006fB);
+}
+
+static struct cx24123_config hauppauge_novas_config = {
+ .demod_address = 0x55,
+ .use_isl6421 = 1,
+ .set_ts_params = cx24123_set_ts_param,
+};
+
+static struct cx24123_config kworld_dvbs_100_config = {
+ .demod_address = 0x15,
+ .use_isl6421 = 0,
+ .set_ts_params = cx24123_set_ts_param,
+ .enable_lnb_voltage = cx24123_enable_lnb_voltage,
+};
+#endif
+
static int dvb_register(struct cx8802_dev *dev)
{
/* init struct videobuf_dvb */
@@ -329,10 +489,16 @@ static int dvb_register(struct cx8802_dev *dev)
break;
case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1:
case CX88_BOARD_CONEXANT_DVB_T1:
+ case CX88_BOARD_KWORLD_DVB_T_CX22702:
case CX88_BOARD_WINFAST_DTV1000:
dev->dvb.frontend = cx22702_attach(&connexant_refboard_config,
&dev->core->i2c_adap);
break;
+ case CX88_BOARD_HAUPPAUGE_HVR1100:
+ case CX88_BOARD_HAUPPAUGE_HVR1100LP:
+ dev->dvb.frontend = cx22702_attach(&hauppauge_hvr1100_config,
+ &dev->core->i2c_adap);
+ break;
#endif
#ifdef HAVE_MT352
case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T1:
@@ -355,6 +521,24 @@ static int dvb_register(struct cx8802_dev *dev)
dev->dvb.frontend = mt352_attach(&dntv_live_dvbt_config,
&dev->core->i2c_adap);
break;
+ case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
+#ifdef HAVE_VP3054_I2C
+ dev->core->pll_addr = 0x61;
+ dev->core->pll_desc = &dvb_pll_fmd1216me;
+ dev->dvb.frontend = mt352_attach(&dntv_live_dvbt_pro_config,
+ &((struct vp3054_i2c_state *)dev->card_priv)->adap);
+#else
+ printk("%s: built without vp3054 support\n", dev->core->name);
+#endif
+ break;
+ case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
+ /* The tin box says DEE1601, but it seems to be DTT7579
+ * compatible, with a slightly different MT352 AGC gain. */
+ dev->core->pll_addr = 0x61;
+ dev->core->pll_desc = &dvb_pll_thomson_dtt7579;
+ dev->dvb.frontend = mt352_attach(&dvico_fusionhdtv_dual,
+ &dev->core->i2c_adap);
+ break;
#endif
#ifdef HAVE_OR51132
case CX88_BOARD_PCHDTV_HD3000:
@@ -393,7 +577,7 @@ static int dvb_register(struct cx8802_dev *dev)
cx_set(MO_GP0_IO, 9);
mdelay(200);
dev->core->pll_addr = 0x61;
- dev->core->pll_desc = &dvb_pll_thomson_dtt7611;
+ dev->core->pll_desc = &dvb_pll_thomson_dtt761x;
dev->dvb.frontend = lgdt330x_attach(&fusionhdtv_3_gold,
&dev->core->i2c_adap);
}
@@ -421,6 +605,17 @@ static int dvb_register(struct cx8802_dev *dev)
&dev->core->i2c_adap);
break;
#endif
+#ifdef HAVE_CX24123
+ case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
+ case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
+ dev->dvb.frontend = cx24123_attach(&hauppauge_novas_config,
+ &dev->core->i2c_adap);
+ break;
+ case CX88_BOARD_KWORLD_DVBS_100:
+ dev->dvb.frontend = cx24123_attach(&kworld_dvbs_100_config,
+ &dev->core->i2c_adap);
+ break;
+#endif
default:
printk("%s: The frontend of your DVB/ATSC card isn't supported yet\n",
dev->core->name);
@@ -462,10 +657,9 @@ static int __devinit dvb_probe(struct pci_dev *pci_dev,
goto fail_core;
err = -ENOMEM;
- dev = kmalloc(sizeof(*dev),GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev),GFP_KERNEL);
if (NULL == dev)
goto fail_core;
- memset(dev,0,sizeof(*dev));
dev->pci = pci_dev;
dev->core = core;
@@ -473,6 +667,12 @@ static int __devinit dvb_probe(struct pci_dev *pci_dev,
if (0 != err)
goto fail_free;
+#ifdef HAVE_VP3054_I2C
+ err = vp3054_i2c_probe(dev);
+ if (0 != err)
+ goto fail_free;
+#endif
+
/* dvb stuff */
printk("%s/2: cx2388x based dvb card\n", core->name);
videobuf_queue_init(&dev->dvb.dvbq, &dvb_qops,
@@ -484,6 +684,9 @@ static int __devinit dvb_probe(struct pci_dev *pci_dev,
err = dvb_register(dev);
if (0 != err)
goto fail_fini;
+
+ /* Maintain a reference to cx88-video can query the 8802 device. */
+ core->dvbdev = dev;
return 0;
fail_fini:
@@ -499,9 +702,16 @@ static void __devexit dvb_remove(struct pci_dev *pci_dev)
{
struct cx8802_dev *dev = pci_get_drvdata(pci_dev);
+ /* Destroy any 8802 reference. */
+ dev->core->dvbdev = NULL;
+
/* dvb */
videobuf_dvb_unregister(&dev->dvb);
+#ifdef HAVE_VP3054_I2C
+ vp3054_i2c_remove(dev);
+#endif
+
/* common */
cx8802_fini_common(dev);
cx88_core_put(dev->core,dev->pci);
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 9790d412f19..f720901e963 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -30,6 +30,7 @@
#include <asm/io.h>
#include "cx88.h"
+#include <media/v4l2-common.h>
static unsigned int i2c_debug = 0;
module_param(i2c_debug, int, 0644);
@@ -94,7 +95,7 @@ static int attach_inform(struct i2c_client *client)
struct cx88_core *core = i2c_get_adapdata(client->adapter);
dprintk(1, "%s i2c attach [addr=0x%x,client=%s]\n",
- client->driver->name, client->addr, client->name);
+ client->driver->driver.name, client->addr, client->name);
if (!client->driver->command)
return 0;
@@ -135,7 +136,17 @@ void cx88_call_i2c_clients(struct cx88_core *core, unsigned int cmd, void *arg)
{
if (0 != core->i2c_rc)
return;
- i2c_clients_command(&core->i2c_adap, cmd, arg);
+
+ if (core->dvbdev) {
+ if (core->dvbdev->dvb.frontend->ops->i2c_gate_ctrl)
+ core->dvbdev->dvb.frontend->ops->i2c_gate_ctrl(core->dvbdev->dvb.frontend, 1);
+
+ i2c_clients_command(&core->i2c_adap, cmd, arg);
+
+ if (core->dvbdev->dvb.frontend->ops->i2c_gate_ctrl)
+ core->dvbdev->dvb.frontend->ops->i2c_gate_ctrl(core->dvbdev->dvb.frontend, 0);
+ } else
+ i2c_clients_command(&core->i2c_adap, cmd, arg);
}
static struct i2c_algo_bit_data cx8800_i2c_algo_template = {
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 461019dca90..da2ad5c4b55 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -5,7 +5,7 @@
*
* Copyright (c) 2003 Pavel Machek
* Copyright (c) 2004 Gerd Knorr
- * Copyright (c) 2004 Chris Pascoe
+ * Copyright (c) 2004, 2005 Chris Pascoe
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -29,9 +29,8 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <media/ir-common.h>
-
#include "cx88.h"
+#include <media/ir-common.h>
/* ---------------------------------------------------------------------- */
@@ -258,6 +257,114 @@ static IR_KEYTAB_TYPE ir_codes_cinergy_1400[IR_KEYTAB_SIZE] = {
/* ---------------------------------------------------------------------- */
+/* AVERTV STUDIO 303 Remote */
+static IR_KEYTAB_TYPE ir_codes_avertv_303[IR_KEYTAB_SIZE] = {
+ [ 0x2a ] = KEY_KP1,
+ [ 0x32 ] = KEY_KP2,
+ [ 0x3a ] = KEY_KP3,
+ [ 0x4a ] = KEY_KP4,
+ [ 0x52 ] = KEY_KP5,
+ [ 0x5a ] = KEY_KP6,
+ [ 0x6a ] = KEY_KP7,
+ [ 0x72 ] = KEY_KP8,
+ [ 0x7a ] = KEY_KP9,
+ [ 0x0e ] = KEY_KP0,
+
+ [ 0x02 ] = KEY_POWER,
+ [ 0x22 ] = KEY_VIDEO,
+ [ 0x42 ] = KEY_AUDIO,
+ [ 0x62 ] = KEY_ZOOM,
+ [ 0x0a ] = KEY_TV,
+ [ 0x12 ] = KEY_CD,
+ [ 0x1a ] = KEY_TEXT,
+
+ [ 0x16 ] = KEY_SUBTITLE,
+ [ 0x1e ] = KEY_REWIND,
+ [ 0x06 ] = KEY_PRINT,
+
+ [ 0x2e ] = KEY_SEARCH,
+ [ 0x36 ] = KEY_SLEEP,
+ [ 0x3e ] = KEY_SHUFFLE,
+ [ 0x26 ] = KEY_MUTE,
+
+ [ 0x4e ] = KEY_RECORD,
+ [ 0x56 ] = KEY_PAUSE,
+ [ 0x5e ] = KEY_STOP,
+ [ 0x46 ] = KEY_PLAY,
+
+ [ 0x6e ] = KEY_RED,
+ [ 0x0b ] = KEY_GREEN,
+ [ 0x66 ] = KEY_YELLOW,
+ [ 0x03 ] = KEY_BLUE,
+
+ [ 0x76 ] = KEY_LEFT,
+ [ 0x7e ] = KEY_RIGHT,
+ [ 0x13 ] = KEY_DOWN,
+ [ 0x1b ] = KEY_UP,
+};
+
+/* ---------------------------------------------------------------------- */
+
+/* DigitalNow DNTV Live! DVB-T Pro Remote */
+static IR_KEYTAB_TYPE ir_codes_dntv_live_dvbt_pro[IR_KEYTAB_SIZE] = {
+ [ 0x16 ] = KEY_POWER,
+ [ 0x5b ] = KEY_HOME,
+
+ [ 0x55 ] = KEY_TV, /* live tv */
+ [ 0x58 ] = KEY_TUNER, /* digital Radio */
+ [ 0x5a ] = KEY_RADIO, /* FM radio */
+ [ 0x59 ] = KEY_DVD, /* dvd menu */
+ [ 0x03 ] = KEY_1,
+ [ 0x01 ] = KEY_2,
+ [ 0x06 ] = KEY_3,
+ [ 0x09 ] = KEY_4,
+ [ 0x1d ] = KEY_5,
+ [ 0x1f ] = KEY_6,
+ [ 0x0d ] = KEY_7,
+ [ 0x19 ] = KEY_8,
+ [ 0x1b ] = KEY_9,
+ [ 0x0c ] = KEY_CANCEL,
+ [ 0x15 ] = KEY_0,
+ [ 0x4a ] = KEY_CLEAR,
+ [ 0x13 ] = KEY_BACK,
+ [ 0x00 ] = KEY_TAB,
+ [ 0x4b ] = KEY_UP,
+ [ 0x4e ] = KEY_LEFT,
+ [ 0x4f ] = KEY_OK,
+ [ 0x52 ] = KEY_RIGHT,
+ [ 0x51 ] = KEY_DOWN,
+ [ 0x1e ] = KEY_VOLUMEUP,
+ [ 0x0a ] = KEY_VOLUMEDOWN,
+ [ 0x02 ] = KEY_CHANNELDOWN,
+ [ 0x05 ] = KEY_CHANNELUP,
+ [ 0x11 ] = KEY_RECORD,
+ [ 0x14 ] = KEY_PLAY,
+ [ 0x4c ] = KEY_PAUSE,
+ [ 0x1a ] = KEY_STOP,
+ [ 0x40 ] = KEY_REWIND,
+ [ 0x12 ] = KEY_FASTFORWARD,
+ [ 0x41 ] = KEY_PREVIOUSSONG, /* replay |< */
+ [ 0x42 ] = KEY_NEXTSONG, /* skip >| */
+ [ 0x54 ] = KEY_CAMERA, /* capture */
+ [ 0x50 ] = KEY_LANGUAGE, /* sap */
+ [ 0x47 ] = KEY_TV2, /* pip */
+ [ 0x4d ] = KEY_SCREEN,
+ [ 0x43 ] = KEY_SUBTITLE,
+ [ 0x10 ] = KEY_MUTE,
+ [ 0x49 ] = KEY_AUDIO, /* l/r */
+ [ 0x07 ] = KEY_SLEEP,
+ [ 0x08 ] = KEY_VIDEO, /* a/v */
+ [ 0x0e ] = KEY_PREVIOUS, /* recall */
+ [ 0x45 ] = KEY_ZOOM, /* zoom + */
+ [ 0x46 ] = KEY_ANGLE, /* zoom - */
+ [ 0x56 ] = KEY_RED,
+ [ 0x57 ] = KEY_GREEN,
+ [ 0x5c ] = KEY_YELLOW,
+ [ 0x5d ] = KEY_BLUE,
+};
+
+/* ---------------------------------------------------------------------- */
+
struct cx88_IR {
struct cx88_core *core;
struct input_dev *input;
@@ -266,7 +373,7 @@ struct cx88_IR {
char phys[32];
/* sample from gpio pin 16 */
- int sampling;
+ u32 sampling;
u32 samples[16];
int scount;
unsigned long release;
@@ -384,10 +491,13 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1:
ir_codes = ir_codes_cinergy_1400;
ir_type = IR_TYPE_PD;
- ir->sampling = 1;
+ ir->sampling = 0xeb04; /* address */
break;
case CX88_BOARD_HAUPPAUGE:
case CX88_BOARD_HAUPPAUGE_DVB_T1:
+ case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
+ case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
+ case CX88_BOARD_HAUPPAUGE_HVR1100:
ir_codes = ir_codes_hauppauge_new;
ir_type = IR_TYPE_RC5;
ir->sampling = 1;
@@ -427,6 +537,19 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
ir->mask_keyup = 0x40;
ir->polling = 1; /* ms */
break;
+ case CX88_BOARD_AVERTV_303:
+ case CX88_BOARD_AVERTV_STUDIO_303:
+ ir_codes = ir_codes_avertv_303;
+ ir->gpio_addr = MO_GP2_IO;
+ ir->mask_keycode = 0xfb;
+ ir->mask_keydown = 0x02;
+ ir->polling = 50; /* ms */
+ break;
+ case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
+ ir_codes = ir_codes_dntv_live_dvbt_pro;
+ ir_type = IR_TYPE_PD;
+ ir->sampling = 0xff00; /* address */
+ break;
}
if (NULL == ir_codes) {
@@ -484,6 +607,10 @@ int cx88_ir_fini(struct cx88_core *core)
if (NULL == ir)
return 0;
+ if (ir->sampling) {
+ cx_write(MO_DDSCFG_IO, 0x0);
+ core->pci_irqmask &= ~(1 << 18);
+ }
if (ir->polling) {
del_timer(&ir->timer);
flush_scheduled_work();
@@ -535,6 +662,7 @@ void cx88_ir_irq(struct cx88_core *core)
/* decode it */
switch (core->board) {
case CX88_BOARD_TERRATEC_CINERGY_1400_DVB_T1:
+ case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
ircode = ir_decode_pulsedistance(ir->samples, ir->scount, 1, 4);
if (ircode == 0xffffffff) { /* decoding error */
@@ -550,7 +678,7 @@ void cx88_ir_irq(struct cx88_core *core)
break;
}
- if ((ircode & 0xffff) != 0xeb04) { /* wrong address */
+ if ((ircode & 0xffff) != (ir->sampling & 0xffff)) { /* wrong address */
ir_dprintk("pulse distance decoded wrong address\n");
break;
}
@@ -567,6 +695,9 @@ void cx88_ir_irq(struct cx88_core *core)
break;
case CX88_BOARD_HAUPPAUGE:
case CX88_BOARD_HAUPPAUGE_DVB_T1:
+ case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
+ case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
+ case CX88_BOARD_HAUPPAUGE_HVR1100:
ircode = ir_decode_biphase(ir->samples, ir->scount, 5, 7);
ir_dprintk("biphase decoded: %x\n", ircode);
if ((ircode & 0xfffff000) != 0x3000)
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 35e6d0c2b87..c79cc1d2bf8 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -78,6 +78,11 @@ static int cx8802_start_dma(struct cx8802_dev *dev,
case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD:
cx_write(TS_SOP_STAT, 1<<13);
break;
+ case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
+ case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
+ cx_write(MO_PINMUX_IO, 0x88); /* Enable MPEG parallel IO and video signal pins */
+ udelay(100);
+ break;
default:
cx_write(TS_SOP_STAT, 0x00);
break;
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index a1b120c8a9b..24118e43e73 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -132,14 +132,22 @@ static void set_audio_finish(struct cx88_core *core, u32 ctl)
{
u32 volume;
+#ifndef USING_CX88_ALSA
/* restart dma; This avoids buzz in NICAM and is good in others */
cx88_stop_audio_dma(core);
+#endif
cx_write(AUD_RATE_THRES_DMD, 0x000000C0);
+#ifndef USING_CX88_ALSA
cx88_start_audio_dma(core);
+#endif
if (cx88_boards[core->board].blackbird) {
/* sets sound input from external adc */
- cx_set(AUD_CTL, EN_I2SIN_ENABLE);
+ if (core->board == CX88_BOARD_HAUPPAUGE_ROSLYN)
+ cx_clear(AUD_CTL, EN_I2SIN_ENABLE);
+ else
+ cx_set(AUD_CTL, EN_I2SIN_ENABLE);
+
cx_write(AUD_I2SINPUTCNTL, 4);
cx_write(AUD_BAUDRATE, 1);
/* 'pass-thru mode': this enables the i2s output to the mpeg encoder */
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 24a48f8a48c..073494ceab0 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -33,6 +33,7 @@
#include <asm/div64.h>
#include "cx88.h"
+#include <media/v4l2-common.h>
/* Include V4L1 specific functions. Should be removed soon */
#include <linux/videodev.h>
@@ -240,7 +241,7 @@ static struct cx88_ctrl cx8800_ctls[] = {
.minimum = 0,
.maximum = 0xff,
.step = 1,
- .default_value = 0,
+ .default_value = 0x3f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 0,
@@ -271,7 +272,7 @@ static struct cx88_ctrl cx8800_ctls[] = {
.minimum = 0,
.maximum = 0xff,
.step = 1,
- .default_value = 0,
+ .default_value = 0x7f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 0,
@@ -285,6 +286,7 @@ static struct cx88_ctrl cx8800_ctls[] = {
.name = "Mute",
.minimum = 0,
.maximum = 1,
+ .default_value = 1,
.type = V4L2_CTRL_TYPE_BOOLEAN,
},
.reg = AUD_VOL_CTL,
@@ -298,7 +300,7 @@ static struct cx88_ctrl cx8800_ctls[] = {
.minimum = 0,
.maximum = 0x3f,
.step = 1,
- .default_value = 0,
+ .default_value = 0x1f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.reg = AUD_VOL_CTL,
@@ -748,10 +750,9 @@ static int video_open(struct inode *inode, struct file *file)
minor,radio,v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
- fh = kmalloc(sizeof(*fh),GFP_KERNEL);
+ fh = kzalloc(sizeof(*fh),GFP_KERNEL);
if (NULL == fh)
return -ENOMEM;
- memset(fh,0,sizeof(*fh));
file->private_data = fh;
fh->dev = dev;
fh->radio = radio;
@@ -917,6 +918,9 @@ static int get_control(struct cx88_core *core, struct v4l2_control *ctl)
ctl->value = ((value + (c->off << c->shift)) & c->mask) >> c->shift;
break;
}
+ printk("get_control id=0x%X reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
+ ctl->id, c->reg, ctl->value,
+ c->mask, c->sreg ? " [shadowed]" : "");
return 0;
}
@@ -925,13 +929,13 @@ static int set_control(struct cx88_core *core, struct v4l2_control *ctl)
{
/* struct cx88_core *core = dev->core; */
struct cx88_ctrl *c = NULL;
- u32 v_sat_value;
- u32 value;
+ u32 value,mask;
int i;
-
- for (i = 0; i < CX8800_CTLS; i++)
- if (cx8800_ctls[i].v.id == ctl->id)
+ for (i = 0; i < CX8800_CTLS; i++) {
+ if (cx8800_ctls[i].v.id == ctl->id) {
c = &cx8800_ctls[i];
+ }
+ }
if (NULL == c)
return -EINVAL;
@@ -939,6 +943,7 @@ static int set_control(struct cx88_core *core, struct v4l2_control *ctl)
ctl->value = c->v.minimum;
if (ctl->value > c->v.maximum)
ctl->value = c->v.maximum;
+ mask=c->mask;
switch (ctl->id) {
case V4L2_CID_AUDIO_BALANCE:
value = (ctl->value < 0x40) ? (0x40 - ctl->value) : ctl->value;
@@ -948,56 +953,44 @@ static int set_control(struct cx88_core *core, struct v4l2_control *ctl)
break;
case V4L2_CID_SATURATION:
/* special v_sat handling */
- v_sat_value = ctl->value - (0x7f - 0x5a);
- if (v_sat_value > 0xff)
- v_sat_value = 0xff;
- if (v_sat_value < 0x00)
- v_sat_value = 0x00;
- cx_andor(MO_UV_SATURATION, 0xff00, v_sat_value << 8);
- /* fall through to default route for u_sat */
+
+ value = ((ctl->value - c->off) << c->shift) & c->mask;
+
+ if (core->tvnorm->id & V4L2_STD_SECAM) {
+ /* For SECAM, both U and V sat should be equal */
+ value=value<<8|value;
+ } else {
+ /* Keeps U Saturation proportional to V Sat */
+ value=(value*0x5a)/0x7f<<8|value;
+ }
+ mask=0xffff;
+ break;
default:
value = ((ctl->value - c->off) << c->shift) & c->mask;
break;
}
- dprintk(1,"set_control id=0x%X reg=0x%x val=0x%x%s\n",
- ctl->id, c->reg, value, c->sreg ? " [shadowed]" : "");
+ printk("set_control id=0x%X reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
+ ctl->id, c->reg, value,
+ mask, c->sreg ? " [shadowed]" : "");
if (c->sreg) {
- cx_sandor(c->sreg, c->reg, c->mask, value);
+ cx_sandor(c->sreg, c->reg, mask, value);
} else {
- cx_andor(c->reg, c->mask, value);
+ cx_andor(c->reg, mask, value);
}
return 0;
}
-/* static void init_controls(struct cx8800_dev *dev) */
static void init_controls(struct cx88_core *core)
{
- static struct v4l2_control mute = {
- .id = V4L2_CID_AUDIO_MUTE,
- .value = 1,
- };
- static struct v4l2_control volume = {
- .id = V4L2_CID_AUDIO_VOLUME,
- .value = 0x3f,
- };
- static struct v4l2_control hue = {
- .id = V4L2_CID_HUE,
- .value = 0x80,
- };
- static struct v4l2_control contrast = {
- .id = V4L2_CID_CONTRAST,
- .value = 0x80,
- };
- static struct v4l2_control brightness = {
- .id = V4L2_CID_BRIGHTNESS,
- .value = 0x80,
- };
+ struct v4l2_control ctrl;
+ int i;
- set_control(core,&mute);
- set_control(core,&volume);
- set_control(core,&hue);
- set_control(core,&contrast);
- set_control(core,&brightness);
+ for (i = 0; i < CX8800_CTLS; i++) {
+ ctrl.id=cx8800_ctls[i].v.id;
+ ctrl.value=cx8800_ctls[i].v.default_value
+ +cx8800_ctls[i].off;
+ set_control(core, &ctrl);
+ }
}
/* ------------------------------------------------------------------ */
@@ -1125,7 +1118,7 @@ static int video_do_ioctl(struct inode *inode, struct file *file,
int err;
if (video_debug > 1)
- cx88_print_ioctl(core->name,cmd);
+ v4l_print_ioctl(core->name,cmd);
switch (cmd) {
/* --- capabilities ------------------------------------------ */
@@ -1261,7 +1254,7 @@ int cx88_do_ioctl(struct inode *inode, struct file *file, int radio,
dprintk( 1, "CORE IOCTL: 0x%x\n", cmd );
if (video_debug > 1)
- cx88_print_ioctl(core->name,cmd);
+ v4l_print_ioctl(core->name,cmd);
switch (cmd) {
/* ---------- tv norms ---------- */
@@ -1481,7 +1474,7 @@ static int radio_do_ioctl(struct inode *inode, struct file *file,
struct cx88_core *core = dev->core;
if (video_debug > 1)
- cx88_print_ioctl(core->name,cmd);
+ v4l_print_ioctl(core->name,cmd);
switch (cmd) {
case VIDIOC_QUERYCAP:
@@ -1740,6 +1733,7 @@ static struct file_operations video_fops =
.poll = video_poll,
.mmap = video_mmap,
.ioctl = video_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
@@ -1767,6 +1761,7 @@ static struct file_operations radio_fops =
.open = video_open,
.release = video_release,
.ioctl = radio_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
@@ -1813,10 +1808,9 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
struct cx88_core *core;
int err;
- dev = kmalloc(sizeof(*dev),GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev),GFP_KERNEL);
if (NULL == dev)
return -ENOMEM;
- memset(dev,0,sizeof(*dev));
/* pci init */
dev->pci = pci_dev;
@@ -1928,8 +1922,8 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
/* initial device configuration */
down(&core->lock);
- init_controls(core);
cx88_set_tvnorm(core,tvnorms);
+ init_controls(core);
video_mux(core,0);
up(&core->lock);
diff --git a/drivers/media/video/cx88/cx88-vp3054-i2c.c b/drivers/media/video/cx88/cx88-vp3054-i2c.c
new file mode 100644
index 00000000000..372cd29cedb
--- /dev/null
+++ b/drivers/media/video/cx88/cx88-vp3054-i2c.c
@@ -0,0 +1,173 @@
+/*
+
+ cx88-vp3054-i2c.c -- support for the secondary I2C bus of the
+ DNTV Live! DVB-T Pro (VP-3054), wired as:
+ GPIO[0] -> SCL, GPIO[1] -> SDA
+
+ (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+
+#include "cx88.h"
+#include "cx88-vp3054-i2c.h"
+
+
+/* ----------------------------------------------------------------------- */
+
+static void vp3054_bit_setscl(void *data, int state)
+{
+ struct cx8802_dev *dev = data;
+ struct cx88_core *core = dev->core;
+ struct vp3054_i2c_state *vp3054_i2c = dev->card_priv;
+
+ if (state) {
+ vp3054_i2c->state |= 0x0001; /* SCL high */
+ vp3054_i2c->state &= ~0x0100; /* external pullup */
+ } else {
+ vp3054_i2c->state &= ~0x0001; /* SCL low */
+ vp3054_i2c->state |= 0x0100; /* drive pin */
+ }
+ cx_write(MO_GP0_IO, 0x010000 | vp3054_i2c->state);
+ cx_read(MO_GP0_IO);
+}
+
+static void vp3054_bit_setsda(void *data, int state)
+{
+ struct cx8802_dev *dev = data;
+ struct cx88_core *core = dev->core;
+ struct vp3054_i2c_state *vp3054_i2c = dev->card_priv;
+
+ if (state) {
+ vp3054_i2c->state |= 0x0002; /* SDA high */
+ vp3054_i2c->state &= ~0x0200; /* tristate pin */
+ } else {
+ vp3054_i2c->state &= ~0x0002; /* SDA low */
+ vp3054_i2c->state |= 0x0200; /* drive pin */
+ }
+ cx_write(MO_GP0_IO, 0x020000 | vp3054_i2c->state);
+ cx_read(MO_GP0_IO);
+}
+
+static int vp3054_bit_getscl(void *data)
+{
+ struct cx8802_dev *dev = data;
+ struct cx88_core *core = dev->core;
+ u32 state;
+
+ state = cx_read(MO_GP0_IO);
+ return (state & 0x01) ? 1 : 0;
+}
+
+static int vp3054_bit_getsda(void *data)
+{
+ struct cx8802_dev *dev = data;
+ struct cx88_core *core = dev->core;
+ u32 state;
+
+ state = cx_read(MO_GP0_IO);
+ return (state & 0x02) ? 1 : 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static struct i2c_algo_bit_data vp3054_i2c_algo_template = {
+ .setsda = vp3054_bit_setsda,
+ .setscl = vp3054_bit_setscl,
+ .getsda = vp3054_bit_getsda,
+ .getscl = vp3054_bit_getscl,
+ .udelay = 16,
+ .mdelay = 10,
+ .timeout = 200,
+};
+
+/* ----------------------------------------------------------------------- */
+
+static struct i2c_adapter vp3054_i2c_adap_template = {
+ .name = "cx2388x",
+ .owner = THIS_MODULE,
+ .id = I2C_HW_B_CX2388x,
+};
+
+static struct i2c_client vp3054_i2c_client_template = {
+ .name = "VP-3054",
+};
+
+int vp3054_i2c_probe(struct cx8802_dev *dev)
+{
+ struct cx88_core *core = dev->core;
+ struct vp3054_i2c_state *vp3054_i2c;
+ int rc;
+
+ if (core->board != CX88_BOARD_DNTV_LIVE_DVB_T_PRO)
+ return 0;
+
+ dev->card_priv = kzalloc(sizeof(*vp3054_i2c), GFP_KERNEL);
+ if (dev->card_priv == NULL)
+ return -ENOMEM;
+ vp3054_i2c = dev->card_priv;
+
+ memcpy(&vp3054_i2c->adap, &vp3054_i2c_adap_template,
+ sizeof(vp3054_i2c->adap));
+ memcpy(&vp3054_i2c->algo, &vp3054_i2c_algo_template,
+ sizeof(vp3054_i2c->algo));
+ memcpy(&vp3054_i2c->client, &vp3054_i2c_client_template,
+ sizeof(vp3054_i2c->client));
+
+ vp3054_i2c->adap.class |= I2C_CLASS_TV_DIGITAL;
+
+ vp3054_i2c->adap.dev.parent = &dev->pci->dev;
+ strlcpy(vp3054_i2c->adap.name, core->name,
+ sizeof(vp3054_i2c->adap.name));
+ vp3054_i2c->algo.data = dev;
+ i2c_set_adapdata(&vp3054_i2c->adap, dev);
+ vp3054_i2c->adap.algo_data = &vp3054_i2c->algo;
+ vp3054_i2c->client.adapter = &vp3054_i2c->adap;
+
+ vp3054_bit_setscl(dev,1);
+ vp3054_bit_setsda(dev,1);
+
+ rc = i2c_bit_add_bus(&vp3054_i2c->adap);
+ if (0 != rc) {
+ printk("%s: vp3054_i2c register FAILED\n", core->name);
+
+ kfree(dev->card_priv);
+ dev->card_priv = NULL;
+ }
+
+ return rc;
+}
+
+void vp3054_i2c_remove(struct cx8802_dev *dev)
+{
+ struct vp3054_i2c_state *vp3054_i2c = dev->card_priv;
+
+ if (vp3054_i2c == NULL ||
+ dev->core->board != CX88_BOARD_DNTV_LIVE_DVB_T_PRO)
+ return;
+
+ i2c_bit_del_bus(&vp3054_i2c->adap);
+ kfree(vp3054_i2c);
+}
+
+EXPORT_SYMBOL(vp3054_i2c_probe);
+EXPORT_SYMBOL(vp3054_i2c_remove);
diff --git a/drivers/media/video/cx88/cx88-vp3054-i2c.h b/drivers/media/video/cx88/cx88-vp3054-i2c.h
new file mode 100644
index 00000000000..b7a0a04d242
--- /dev/null
+++ b/drivers/media/video/cx88/cx88-vp3054-i2c.h
@@ -0,0 +1,35 @@
+/*
+
+ cx88-vp3054-i2c.h -- support for the secondary I2C bus of the
+ DNTV Live! DVB-T Pro (VP-3054), wired as:
+ GPIO[0] -> SCL, GPIO[1] -> SDA
+
+ (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+/* ----------------------------------------------------------------------- */
+struct vp3054_i2c_state {
+ struct i2c_adapter adap;
+ struct i2c_algo_bit_data algo;
+ struct i2c_client client;
+ u32 state;
+};
+
+/* ----------------------------------------------------------------------- */
+int vp3054_i2c_probe(struct cx8802_dev *dev);
+void vp3054_i2c_remove(struct cx8802_dev *dev);
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 77beafc5c32..e9fd55b57fa 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -179,6 +179,14 @@ extern struct sram_channel cx88_sram_channels[];
#define CX88_BOARD_ATI_HDTVWONDER 34
#define CX88_BOARD_WINFAST_DTV1000 35
#define CX88_BOARD_AVERTV_303 36
+#define CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1 37
+#define CX88_BOARD_HAUPPAUGE_NOVASE2_S1 38
+#define CX88_BOARD_KWORLD_DVBS_100 39
+#define CX88_BOARD_HAUPPAUGE_HVR1100 40
+#define CX88_BOARD_HAUPPAUGE_HVR1100LP 41
+#define CX88_BOARD_DNTV_LIVE_DVB_T_PRO 42
+#define CX88_BOARD_KWORLD_DVB_T_CX22702 43
+#define CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL 44
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
@@ -280,6 +288,9 @@ struct cx88_core {
unsigned int tda9887_conf;
unsigned int has_radio;
+ /* Supported V4L _STD_ tuner formats */
+ unsigned int tuner_formats;
+
/* config info -- dvb */
struct dvb_pll_desc *pll_desc;
unsigned int pll_addr;
@@ -301,6 +312,9 @@ struct cx88_core {
/* various v4l controls */
u32 freq;
+
+ /* cx88-video needs to access cx8802 for hybrid tuner pll access. */
+ struct cx8802_dev *dvbdev;
};
struct cx8800_dev;
@@ -411,6 +425,8 @@ struct cx8802_dev {
struct videobuf_dvb dvb;
void* fe_handle;
int (*fe_release)(void *handle);
+
+ void *card_priv;
/* for switching modulation types */
unsigned char ts_gen_cntrl;
@@ -447,7 +463,6 @@ struct cx8802_dev {
extern void cx88_print_irqbits(char *name, char *tag, char **strings,
u32 bits, u32 mask);
-extern void cx88_print_ioctl(char *name, unsigned int cmd);
extern int cx88_core_irq(struct cx88_core *core, u32 status);
extern void cx88_wakeup(struct cx88_core *core,
diff --git a/drivers/media/video/dpc7146.c b/drivers/media/video/dpc7146.c
index da9481198c5..2831bdd1205 100644
--- a/drivers/media/video/dpc7146.c
+++ b/drivers/media/video/dpc7146.c
@@ -94,12 +94,11 @@ static int dpc_probe(struct saa7146_dev* dev)
struct i2c_client *client;
struct list_head *item;
- dpc = (struct dpc*)kmalloc(sizeof(struct dpc), GFP_KERNEL);
+ dpc = kzalloc(sizeof(struct dpc), GFP_KERNEL);
if( NULL == dpc ) {
printk("dpc_v4l2.o: dpc_probe: not enough kernel memory.\n");
return -ENOMEM;
}
- memset(dpc, 0x0, sizeof(struct dpc));
/* FIXME: enable i2c-port pins, video-port-pins
video port pins should be enabled here ?! */
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 57779e63f35..58f7b4194a0 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -30,6 +30,7 @@
#include <media/tuner.h>
#include <media/audiochip.h>
#include <media/tveeprom.h>
+#include <media/v4l2-common.h>
#include "msp3400.h"
#include "em28xx.h"
@@ -261,7 +262,6 @@ void em28xx_card_setup(struct em28xx *dev)
/* request some modules */
if (dev->model == EM2820_BOARD_HAUPPAUGE_WINTV_USB_2) {
struct tveeprom tv;
- struct v4l2_audioout ao;
#ifdef CONFIG_MODULES
request_module("tveeprom");
request_module("ir-kbd-i2c");
@@ -274,12 +274,8 @@ void em28xx_card_setup(struct em28xx *dev)
dev->tuner_type= tv.tuner_type;
if (tv.audio_processor == AUDIO_CHIP_MSP34XX) {
+ dev->i2s_speed=2048000;
dev->has_msp34xx=1;
- memset (&ao,0,sizeof(ao));
-
- ao.index=2;
- ao.mode=V4L2_AUDMODE_32BITS;
- em28xx_i2c_call_clients(dev, VIDIOC_S_AUDOUT, &ao);
} else
dev->has_msp34xx=0;
}
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 0cfe75416ec..dff3893f32f 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -32,7 +32,7 @@
/* #define ENABLE_DEBUG_ISOC_FRAMES */
-static unsigned int core_debug;
+static unsigned int core_debug = 0;
module_param(core_debug,int,0644);
MODULE_PARM_DESC(core_debug,"enable debug messages [core]");
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(core_debug,"enable debug messages [core]");
printk(KERN_INFO "%s %s :"fmt, \
dev->name, __FUNCTION__ , ##arg); } while (0)
-static unsigned int reg_debug;
+static unsigned int reg_debug = 0;
module_param(reg_debug,int,0644);
MODULE_PARM_DESC(reg_debug,"enable debug messages [URB reg]");
@@ -50,7 +50,7 @@ MODULE_PARM_DESC(reg_debug,"enable debug messages [URB reg]");
printk(KERN_INFO "%s %s :"fmt, \
dev->name, __FUNCTION__ , ##arg); } while (0)
-static unsigned int isoc_debug;
+static unsigned int isoc_debug = 0;
module_param(isoc_debug,int,0644);
MODULE_PARM_DESC(isoc_debug,"enable debug messages [isoc transfers]");
@@ -63,59 +63,6 @@ static int alt = EM28XX_PINOUT;
module_param(alt, int, 0644);
MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
-/* ------------------------------------------------------------------ */
-/* debug help functions */
-
-static const char *v4l1_ioctls[] = {
- "0", "CGAP", "GCHAN", "SCHAN", "GTUNER", "STUNER", "GPICT", "SPICT",
- "CCAPTURE", "GWIN", "SWIN", "GFBUF", "SFBUF", "KEY", "GFREQ",
- "SFREQ", "GAUDIO", "SAUDIO", "SYNC", "MCAPTURE", "GMBUF", "GUNIT",
- "GCAPTURE", "SCAPTURE", "SPLAYMODE", "SWRITEMODE", "GPLAYINFO",
- "SMICROCODE", "GVBIFMT", "SVBIFMT" };
-#define V4L1_IOCTLS ARRAY_SIZE(v4l1_ioctls)
-
-static const char *v4l2_ioctls[] = {
- "QUERYCAP", "1", "ENUM_PIXFMT", "ENUM_FBUFFMT", "G_FMT", "S_FMT",
- "G_COMP", "S_COMP", "REQBUFS", "QUERYBUF", "G_FBUF", "S_FBUF",
- "G_WIN", "S_WIN", "PREVIEW", "QBUF", "16", "DQBUF", "STREAMON",
- "STREAMOFF", "G_PERF", "G_PARM", "S_PARM", "G_STD", "S_STD",
- "ENUMSTD", "ENUMINPUT", "G_CTRL", "S_CTRL", "G_TUNER", "S_TUNER",
- "G_FREQ", "S_FREQ", "G_AUDIO", "S_AUDIO", "35", "QUERYCTRL",
- "QUERYMENU", "G_INPUT", "S_INPUT", "ENUMCVT", "41", "42", "43",
- "44", "45", "G_OUTPUT", "S_OUTPUT", "ENUMOUTPUT", "G_AUDOUT",
- "S_AUDOUT", "ENUMFX", "G_EFFECT", "S_EFFECT", "G_MODULATOR",
- "S_MODULATOR"
-};
-#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
-
-void em28xx_print_ioctl(char *name, unsigned int cmd)
-{
- char *dir;
-
- switch (_IOC_DIR(cmd)) {
- case _IOC_NONE: dir = "--"; break;
- case _IOC_READ: dir = "r-"; break;
- case _IOC_WRITE: dir = "-w"; break;
- case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
- default: dir = "??"; break;
- }
- switch (_IOC_TYPE(cmd)) {
- case 'v':
- printk(KERN_DEBUG "%s: ioctl 0x%08x (v4l1, %s, VIDIOC%s)\n",
- name, cmd, dir, (_IOC_NR(cmd) < V4L1_IOCTLS) ?
- v4l1_ioctls[_IOC_NR(cmd)] : "???");
- break;
- case 'V':
- printk(KERN_DEBUG "%s: ioctl 0x%08x (v4l2, %s, VIDIOC_%s)\n",
- name, cmd, dir, (_IOC_NR(cmd) < V4L2_IOCTLS) ?
- v4l2_ioctls[_IOC_NR(cmd)] : "???");
- break;
- default:
- printk(KERN_DEBUG "%s: ioctl 0x%08x (???, %s, #%d)\n",
- name, cmd, dir, _IOC_NR(cmd));
- }
-}
-
/*
* em28xx_request_buffers()
@@ -126,7 +73,7 @@ u32 em28xx_request_buffers(struct em28xx *dev, u32 count)
const size_t imagesize = PAGE_ALIGN(dev->frame_size); /*needs to be page aligned cause the buffers can be mapped individually! */
void *buff = NULL;
u32 i;
- em28xx_coredbg("requested %i buffers with size %zd", count, imagesize);
+ em28xx_coredbg("requested %i buffers with size %zi", count, imagesize);
if (count > EM28XX_NUM_FRAMES)
count = EM28XX_NUM_FRAMES;
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 7f5603054f0..0591a705b7a 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -28,6 +28,7 @@
#include <linux/video_decoder.h>
#include "em28xx.h"
+#include <media/v4l2-common.h>
#include <media/tuner.h>
/* ----------------------------------------------------------- */
@@ -486,9 +487,7 @@ static struct i2c_adapter em28xx_adap_template = {
.inc_use = inc_use,
.dec_use = dec_use,
#endif
-#ifdef I2C_CLASS_TV_ANALOG
.class = I2C_CLASS_TV_ANALOG,
-#endif
.name = "em28xx",
.id = I2C_HW_B_EM28XX,
.algo = &em28xx_algo,
@@ -497,7 +496,6 @@ static struct i2c_adapter em28xx_adap_template = {
static struct i2c_client em28xx_client_template = {
.name = "em28xx internal",
- .flags = I2C_CLIENT_ALLOW_USE,
};
/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 3a56120397a..3323dffe26a 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -32,6 +32,7 @@
#include "em28xx.h"
#include <media/tuner.h>
+#include <media/v4l2-common.h>
#define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \
"Markus Rechberger <mrechberger@gmail.com>, " \
@@ -106,8 +107,32 @@ static const unsigned char saa7114_i2c_init[] = {
#define TVNORMS ARRAY_SIZE(tvnorms)
/* supported controls */
+/* Common to all boards */
static struct v4l2_queryctrl em28xx_qctrl[] = {
{
+ .id = V4L2_CID_AUDIO_VOLUME,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Volume",
+ .minimum = 0x0,
+ .maximum = 0x1f,
+ .step = 0x1,
+ .default_value = 0x1f,
+ .flags = 0,
+ },{
+ .id = V4L2_CID_AUDIO_MUTE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mute",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ .flags = 0,
+ }
+};
+
+/* FIXME: These are specific to saa711x - should be moved to its code */
+static struct v4l2_queryctrl saa711x_qctrl[] = {
+ {
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Brightness",
@@ -135,24 +160,6 @@ static struct v4l2_queryctrl em28xx_qctrl[] = {
.default_value = 0x10,
.flags = 0,
},{
- .id = V4L2_CID_AUDIO_VOLUME,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Volume",
- .minimum = 0x0,
- .maximum = 0x1f,
- .step = 0x1,
- .default_value = 0x1f,
- .flags = 0,
- },{
- .id = V4L2_CID_AUDIO_MUTE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- .flags = 0,
- },{
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red chroma balance",
@@ -179,7 +186,7 @@ static struct v4l2_queryctrl em28xx_qctrl[] = {
.step = 0x1,
.default_value = 0x20,
.flags = 0,
- }
+ }
};
static struct usb_driver em28xx_usb_driver;
@@ -280,6 +287,8 @@ static void video_mux(struct em28xx *dev, int index)
em28xx_videodbg("Setting input index=%d, vmux=%d, amux=%d\n",index,input,dev->ctl_ainput);
if (dev->has_msp34xx) {
+ if (dev->i2s_speed)
+ em28xx_i2c_call_clients(dev, VIDIOC_INT_I2S_CLOCK_FREQ, &dev->i2s_speed);
em28xx_i2c_call_clients(dev, VIDIOC_S_AUDIO, &dev->ctl_ainput);
ainput = EM28XX_AUDIO_SRC_TUNER;
em28xx_audio_source(dev, ainput);
@@ -674,7 +683,6 @@ static int em28xx_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
*/
static int em28xx_get_ctrl(struct em28xx *dev, struct v4l2_control *ctrl)
{
- s32 tmp;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
ctrl->value = dev->mute;
@@ -682,6 +690,16 @@ static int em28xx_get_ctrl(struct em28xx *dev, struct v4l2_control *ctrl)
case V4L2_CID_AUDIO_VOLUME:
ctrl->value = dev->volume;
return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*FIXME: should be moved to saa711x */
+static int saa711x_get_ctrl(struct em28xx *dev, struct v4l2_control *ctrl)
+{
+ s32 tmp;
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
if ((tmp = em28xx_brightness_get(dev)) < 0)
return -EIO;
@@ -731,6 +749,15 @@ static int em28xx_set_ctrl(struct em28xx *dev, const struct v4l2_control *ctrl)
case V4L2_CID_AUDIO_VOLUME:
dev->volume = ctrl->value;
return em28xx_audio_analog_set(dev);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*FIXME: should be moved to saa711x */
+static int saa711x_set_ctrl(struct em28xx *dev, const struct v4l2_control *ctrl)
+{
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
return em28xx_brightness_set(dev, ctrl->value);
case V4L2_CID_CONTRAST:
@@ -994,14 +1021,34 @@ static int em28xx_do_ioctl(struct inode *inode, struct file *filp,
case VIDIOC_QUERYCTRL:
{
struct v4l2_queryctrl *qc = arg;
- u8 i, n;
- n = sizeof(em28xx_qctrl) / sizeof(em28xx_qctrl[0]);
- for (i = 0; i < n; i++)
- if (qc->id && qc->id == em28xx_qctrl[i].id) {
- memcpy(qc, &(em28xx_qctrl[i]),
+ int i, id=qc->id;
+
+ memset(qc,0,sizeof(*qc));
+ qc->id=id;
+
+ if (!dev->has_msp34xx) {
+ for (i = 0; i < ARRAY_SIZE(em28xx_qctrl); i++) {
+ if (qc->id && qc->id == em28xx_qctrl[i].id) {
+ memcpy(qc, &(em28xx_qctrl[i]),
+ sizeof(*qc));
+ return 0;
+ }
+ }
+ }
+ if (dev->decoder == EM28XX_TVP5150) {
+ em28xx_i2c_call_clients(dev,cmd,qc);
+ if (qc->type)
+ return 0;
+ else
+ return -EINVAL;
+ }
+ for (i = 0; i < ARRAY_SIZE(saa711x_qctrl); i++) {
+ if (qc->id && qc->id == saa711x_qctrl[i].id) {
+ memcpy(qc, &(saa711x_qctrl[i]),
sizeof(*qc));
return 0;
}
+ }
return -EINVAL;
}
@@ -1009,29 +1056,64 @@ static int em28xx_do_ioctl(struct inode *inode, struct file *filp,
case VIDIOC_G_CTRL:
{
struct v4l2_control *ctrl = arg;
+ int retval=-EINVAL;
+ if (!dev->has_msp34xx)
+ retval=em28xx_get_ctrl(dev, ctrl);
+ if (retval==-EINVAL) {
+ if (dev->decoder == EM28XX_TVP5150) {
+ em28xx_i2c_call_clients(dev,cmd,arg);
+ return 0;
+ }
- return em28xx_get_ctrl(dev, ctrl);
+ return saa711x_get_ctrl(dev, ctrl);
+ } else return retval;
}
- case VIDIOC_S_CTRL_OLD: /* ??? */
case VIDIOC_S_CTRL:
{
struct v4l2_control *ctrl = arg;
- u8 i, n;
-
-
- n = sizeof(em28xx_qctrl) / sizeof(em28xx_qctrl[0]);
- for (i = 0; i < n; i++)
- if (ctrl->id == em28xx_qctrl[i].id) {
- if (ctrl->value <
- em28xx_qctrl[i].minimum
- || ctrl->value >
- em28xx_qctrl[i].maximum)
- return -ERANGE;
+ u8 i;
+
+ if (!dev->has_msp34xx){
+ for (i = 0; i < ARRAY_SIZE(em28xx_qctrl); i++) {
+ if (ctrl->id == em28xx_qctrl[i].id) {
+ if (ctrl->value <
+ em28xx_qctrl[i].minimum
+ || ctrl->value >
+ em28xx_qctrl[i].maximum)
+ return -ERANGE;
+ return em28xx_set_ctrl(dev, ctrl);
+ }
+ }
+ }
- return em28xx_set_ctrl(dev, ctrl);
+ if (dev->decoder == EM28XX_TVP5150) {
+ em28xx_i2c_call_clients(dev,cmd,arg);
+ return 0;
+ } else if (!dev->has_msp34xx) {
+ for (i = 0; i < ARRAY_SIZE(em28xx_qctrl); i++) {
+ if (ctrl->id == em28xx_qctrl[i].id) {
+ if (ctrl->value <
+ em28xx_qctrl[i].minimum
+ || ctrl->value >
+ em28xx_qctrl[i].maximum)
+ return -ERANGE;
+ return em28xx_set_ctrl(dev, ctrl);
+ }
}
+ for (i = 0; i < ARRAY_SIZE(saa711x_qctrl); i++) {
+ if (ctrl->id == saa711x_qctrl[i].id) {
+ if (ctrl->value <
+ saa711x_qctrl[i].minimum
+ || ctrl->value >
+ saa711x_qctrl[i].maximum)
+ return -ERANGE;
+ return saa711x_set_ctrl(dev, ctrl);
+ }
+ }
+ }
+
return -EINVAL;
}
@@ -1187,7 +1269,7 @@ static int em28xx_video_do_ioctl(struct inode *inode, struct file *filp,
return -ENODEV;
if (video_debug > 1)
- em28xx_print_ioctl(dev->name,cmd);
+ v4l_print_ioctl(dev->name,cmd);
switch (cmd) {
@@ -1564,6 +1646,8 @@ static struct file_operations em28xx_v4l_fops = {
.poll = em28xx_v4l2_poll,
.mmap = em28xx_v4l2_mmap,
.llseek = no_llseek,
+ .compat_ioctl = v4l_compat_ioctl32,
+
};
/******************************** usb interface *****************************************/
@@ -1777,12 +1861,11 @@ static int em28xx_usb_probe(struct usb_interface *interface,
}
/* allocate memory for our device state and initialize it */
- dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
em28xx_err(DRIVER_NAME ": out of memory!\n");
return -ENOMEM;
}
- memset(dev, 0, sizeof(*dev));
/* compute alternate max packet sizes */
uif = udev->actconfig->interface[0];
@@ -1848,9 +1931,12 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
struct em28xx *dev = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
+/*FIXME: IR should be disconnected */
+
if (!dev)
return;
+
down_write(&em28xx_disconnect);
down(&dev->lock);
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 5c7a41ce69f..33de9d846af 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -1,5 +1,5 @@
/*
- em28xx-cards.c - driver for Empia EM2800/EM2820/2840 USB video capture devices
+ em28xx.h - driver for Empia EM2800/EM2820/2840 USB video capture devices
Copyright (C) 2005 Markus Rechberger <mrechberger@gmail.com>
Ludovico Cavedon <cavedon@sssup.it>
@@ -216,6 +216,8 @@ struct em28xx {
unsigned int has_msp34xx:1;
unsigned int has_tda9887:1;
+ u32 i2s_speed; /* I2S speed for audio digital stream */
+
enum em28xx_decoder decoder;
int tuner_type; /* type of the tuner */
@@ -293,8 +295,6 @@ void em28xx_set_ir(struct em28xx * dev,struct IR_i2c *ir);
/* Provided by em28xx-core.c */
-void em28xx_print_ioctl(char *name, unsigned int cmd);
-
u32 em28xx_request_buffers(struct em28xx *dev, u32 count);
void em28xx_queue_unusedframes(struct em28xx *dev);
void em28xx_release_buffers(struct em28xx *dev);
diff --git a/drivers/media/video/hexium_gemini.c b/drivers/media/video/hexium_gemini.c
index c9b00eafefd..e7bbeb11553 100644
--- a/drivers/media/video/hexium_gemini.c
+++ b/drivers/media/video/hexium_gemini.c
@@ -240,12 +240,11 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
DEB_EE((".\n"));
- hexium = (struct hexium *) kmalloc(sizeof(struct hexium), GFP_KERNEL);
+ hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
if (NULL == hexium) {
printk("hexium_gemini: not enough kernel memory in hexium_attach().\n");
return -ENOMEM;
}
- memset(hexium, 0x0, sizeof(struct hexium));
dev->ext_priv = hexium;
/* enable i2c-port pins */
diff --git a/drivers/media/video/hexium_orion.c b/drivers/media/video/hexium_orion.c
index 42a9414155c..0b6c2096ec6 100644
--- a/drivers/media/video/hexium_orion.c
+++ b/drivers/media/video/hexium_orion.c
@@ -224,12 +224,11 @@ static int hexium_probe(struct saa7146_dev *dev)
return -EFAULT;
}
- hexium = (struct hexium *) kmalloc(sizeof(struct hexium), GFP_KERNEL);
+ hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
if (NULL == hexium) {
printk("hexium_orion: hexium_probe: not enough kernel memory.\n");
return -ENOMEM;
}
- memset(hexium, 0x0, sizeof(struct hexium));
/* enable i2c-port pins */
saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
diff --git a/drivers/media/video/indycam.c b/drivers/media/video/indycam.c
index deeef125eb9..7420b79e987 100644
--- a/drivers/media/video/indycam.c
+++ b/drivers/media/video/indycam.c
@@ -289,18 +289,15 @@ static int indycam_attach(struct i2c_adapter *adap, int addr, int kind)
printk(KERN_INFO "SGI IndyCam driver version %s\n",
INDYCAM_MODULE_VERSION);
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (!client)
return -ENOMEM;
- camera = kmalloc(sizeof(struct indycam), GFP_KERNEL);
+ camera = kzalloc(sizeof(struct indycam), GFP_KERNEL);
if (!camera) {
err = -ENOMEM;
goto out_free_client;
}
- memset(client, 0, sizeof(struct i2c_client));
- memset(camera, 0, sizeof(struct indycam));
-
client->addr = addr;
client->adapter = adap;
client->driver = &i2c_driver_indycam;
@@ -451,10 +448,10 @@ static int indycam_command(struct i2c_client *client, unsigned int cmd,
}
static struct i2c_driver i2c_driver_indycam = {
- .owner = THIS_MODULE,
- .name = "indycam",
+ .driver = {
+ .name = "indycam",
+ },
.id = I2C_DRIVERID_INDYCAM,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = indycam_probe,
.detach_client = indycam_detach,
.command = indycam_command,
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 740e543311a..58b0e698282 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -278,9 +278,10 @@ static int ir_detach(struct i2c_client *client);
static int ir_probe(struct i2c_adapter *adap);
static struct i2c_driver driver = {
- .name = "ir remote kbd driver",
+ .driver = {
+ .name = "ir-kbd-i2c",
+ },
.id = I2C_DRIVERID_INFRARED,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = ir_probe,
.detach_client = ir_detach,
};
@@ -303,18 +304,20 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
ir = kzalloc(sizeof(struct IR_i2c),GFP_KERNEL);
input_dev = input_allocate_device();
if (!ir || !input_dev) {
- kfree(ir);
input_free_device(input_dev);
+ kfree(ir);
return -ENOMEM;
}
+ memset(ir,0,sizeof(*ir));
ir->c = client_template;
ir->input = input_dev;
- i2c_set_clientdata(&ir->c, ir);
ir->c.adapter = adap;
ir->c.addr = addr;
+ i2c_set_clientdata(&ir->c, ir);
+
switch(addr) {
case 0x64:
name = "Pixelview";
@@ -377,13 +380,15 @@ static int ir_attach(struct i2c_adapter *adap, int addr,
ir->c.dev.bus_id);
/* init + register input device */
- ir_input_init(input_dev, &ir->ir, ir_type, ir_codes);
- input_dev->id.bustype = BUS_I2C;
- input_dev->name = ir->c.name;
- input_dev->phys = ir->phys;
+ ir_input_init(input_dev,&ir->ir,ir_type,ir->ir_codes);
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->name = ir->c.name;
+ input_dev->phys = ir->phys;
/* register event device */
input_register_device(ir->input);
+ printk(DEVNAME ": %s detected at %s [%s]\n",
+ ir->input->name,ir->input->phys,adap->name);
/* start polling via eventd */
INIT_WORK(&ir->work, ir_work, ir);
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index 3f2a882bc20..2869464aee0 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -1754,6 +1754,7 @@ static struct file_operations meye_fops = {
.release = meye_release,
.mmap = meye_mmap,
.ioctl = meye_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.poll = meye_poll,
.llseek = no_llseek,
};
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
new file mode 100644
index 00000000000..9b05a0ab776
--- /dev/null
+++ b/drivers/media/video/msp3400-driver.c
@@ -0,0 +1,1274 @@
+/*
+ * Programming the mspx4xx sound processor family
+ *
+ * (c) 1997-2001 Gerd Knorr <kraxel@bytesex.org>
+ *
+ * what works and what doesn't:
+ *
+ * AM-Mono
+ * Support for Hauppauge cards added (decoding handled by tuner) added by
+ * Frederic Crozat <fcrozat@mail.dotcom.fr>
+ *
+ * FM-Mono
+ * should work. The stereo modes are backward compatible to FM-mono,
+ * therefore FM-Mono should be allways available.
+ *
+ * FM-Stereo (B/G, used in germany)
+ * should work, with autodetect
+ *
+ * FM-Stereo (satellite)
+ * should work, no autodetect (i.e. default is mono, but you can
+ * switch to stereo -- untested)
+ *
+ * NICAM (B/G, L , used in UK, Scandinavia, Spain and France)
+ * should work, with autodetect. Support for NICAM was added by
+ * Pekka Pietikainen <pp@netppl.fi>
+ *
+ * TODO:
+ * - better SAT support
+ *
+ * 980623 Thomas Sailer (sailer@ife.ee.ethz.ch)
+ * using soundcore instead of OSS
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/videodev.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/audiochip.h>
+#include <linux/kthread.h>
+#include <linux/suspend.h>
+#include "msp3400.h"
+
+/* ---------------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("device driver for msp34xx TV sound processor");
+MODULE_AUTHOR("Gerd Knorr");
+MODULE_LICENSE("GPL");
+
+/* module parameters */
+static int opmode = OPMODE_AUTO;
+int msp_debug = 0; /* msp_debug output */
+int msp_once = 0; /* no continous stereo monitoring */
+int msp_amsound = 0; /* hard-wire AM sound at 6.5 Hz (france),
+ the autoscan seems work well only with FM... */
+int msp_standard = 1; /* Override auto detect of audio msp_standard, if needed. */
+int msp_dolby = 0;
+
+int msp_stereo_thresh = 0x190; /* a2 threshold for stereo/bilingual
+ (msp34xxg only) 0x00a0-0x03c0 */
+
+/* read-only */
+module_param(opmode, int, 0444);
+
+/* read-write */
+module_param_named(once,msp_once, bool, 0644);
+module_param_named(debug,msp_debug, int, 0644);
+module_param_named(stereo_threshold,msp_stereo_thresh, int, 0644);
+module_param_named(standard,msp_standard, int, 0644);
+module_param_named(amsound,msp_amsound, bool, 0644);
+module_param_named(dolby,msp_dolby, bool, 0644);
+
+MODULE_PARM_DESC(opmode, "Forces a MSP3400 opmode. 0=Manual, 1=Autodetect, 2=Autodetect and autoselect");
+MODULE_PARM_DESC(once, "No continuous stereo monitoring");
+MODULE_PARM_DESC(debug, "Enable debug messages [0-3]");
+MODULE_PARM_DESC(stereo_threshold, "Sets signal threshold to activate stereo");
+MODULE_PARM_DESC(standard, "Specify audio standard: 32 = NTSC, 64 = radio, Default: Autodetect");
+MODULE_PARM_DESC(amsound, "Hardwire AM sound at 6.5Hz (France), FM can autoscan");
+MODULE_PARM_DESC(dolby, "Activates Dolby processsing");
+
+/* ---------------------------------------------------------------------- */
+
+/* control subaddress */
+#define I2C_MSP_CONTROL 0x00
+/* demodulator unit subaddress */
+#define I2C_MSP_DEM 0x10
+/* DSP unit subaddress */
+#define I2C_MSP_DSP 0x12
+
+/* Addresses to scan */
+static unsigned short normal_i2c[] = { 0x80 >> 1, 0x88 >> 1, I2C_CLIENT_END };
+I2C_CLIENT_INSMOD;
+
+/* ----------------------------------------------------------------------- */
+/* functions for talking to the MSP3400C Sound processor */
+
+int msp_reset(struct i2c_client *client)
+{
+ /* reset and read revision code */
+ static u8 reset_off[3] = { I2C_MSP_CONTROL, 0x80, 0x00 };
+ static u8 reset_on[3] = { I2C_MSP_CONTROL, 0x00, 0x00 };
+ static u8 write[3] = { I2C_MSP_DSP + 1, 0x00, 0x1e };
+ u8 read[2];
+ struct i2c_msg reset[2] = {
+ { client->addr, I2C_M_IGNORE_NAK, 3, reset_off },
+ { client->addr, I2C_M_IGNORE_NAK, 3, reset_on },
+ };
+ struct i2c_msg test[2] = {
+ { client->addr, 0, 3, write },
+ { client->addr, I2C_M_RD, 2, read },
+ };
+
+ v4l_dbg(3, msp_debug, client, "msp_reset\n");
+ if (i2c_transfer(client->adapter, &reset[0], 1) != 1 ||
+ i2c_transfer(client->adapter, &reset[1], 1) != 1 ||
+ i2c_transfer(client->adapter, test, 2) != 2) {
+ v4l_err(client, "chip reset failed\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int msp_read(struct i2c_client *client, int dev, int addr)
+{
+ int err, retval;
+ u8 write[3];
+ u8 read[2];
+ struct i2c_msg msgs[2] = {
+ { client->addr, 0, 3, write },
+ { client->addr, I2C_M_RD, 2, read }
+ };
+
+ write[0] = dev + 1;
+ write[1] = addr >> 8;
+ write[2] = addr & 0xff;
+
+ for (err = 0; err < 3; err++) {
+ if (i2c_transfer(client->adapter, msgs, 2) == 2)
+ break;
+ v4l_warn(client, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
+ dev, addr);
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(msecs_to_jiffies(10));
+ }
+ if (err == 3) {
+ v4l_warn(client, "giving up, resetting chip. Sound will go off, sorry folks :-|\n");
+ msp_reset(client);
+ return -1;
+ }
+ retval = read[0] << 8 | read[1];
+ v4l_dbg(3, msp_debug, client, "msp_read(0x%x, 0x%x): 0x%x\n", dev, addr, retval);
+ return retval;
+}
+
+int msp_read_dem(struct i2c_client *client, int addr)
+{
+ return msp_read(client, I2C_MSP_DEM, addr);
+}
+
+int msp_read_dsp(struct i2c_client *client, int addr)
+{
+ return msp_read(client, I2C_MSP_DSP, addr);
+}
+
+static int msp_write(struct i2c_client *client, int dev, int addr, int val)
+{
+ int err;
+ u8 buffer[5];
+
+ buffer[0] = dev;
+ buffer[1] = addr >> 8;
+ buffer[2] = addr & 0xff;
+ buffer[3] = val >> 8;
+ buffer[4] = val & 0xff;
+
+ v4l_dbg(3, msp_debug, client, "msp_write(0x%x, 0x%x, 0x%x)\n", dev, addr, val);
+ for (err = 0; err < 3; err++) {
+ if (i2c_master_send(client, buffer, 5) == 5)
+ break;
+ v4l_warn(client, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
+ dev, addr);
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(msecs_to_jiffies(10));
+ }
+ if (err == 3) {
+ v4l_warn(client, "giving up, resetting chip. Sound will go off, sorry folks :-|\n");
+ msp_reset(client);
+ return -1;
+ }
+ return 0;
+}
+
+int msp_write_dem(struct i2c_client *client, int addr, int val)
+{
+ return msp_write(client, I2C_MSP_DEM, addr, val);
+}
+
+int msp_write_dsp(struct i2c_client *client, int addr, int val)
+{
+ return msp_write(client, I2C_MSP_DSP, addr, val);
+}
+
+/* ----------------------------------------------------------------------- *
+ * bits 9 8 5 - SCART DSP input Select:
+ * 0 0 0 - SCART 1 to DSP input (reset position)
+ * 0 1 0 - MONO to DSP input
+ * 1 0 0 - SCART 2 to DSP input
+ * 1 1 1 - Mute DSP input
+ *
+ * bits 11 10 6 - SCART 1 Output Select:
+ * 0 0 0 - undefined (reset position)
+ * 0 1 0 - SCART 2 Input to SCART 1 Output (for devices with 2 SCARTS)
+ * 1 0 0 - MONO input to SCART 1 Output
+ * 1 1 0 - SCART 1 DA to SCART 1 Output
+ * 0 0 1 - SCART 2 DA to SCART 1 Output
+ * 0 1 1 - SCART 1 Input to SCART 1 Output
+ * 1 1 1 - Mute SCART 1 Output
+ *
+ * bits 13 12 7 - SCART 2 Output Select (for devices with 2 Output SCART):
+ * 0 0 0 - SCART 1 DA to SCART 2 Output (reset position)
+ * 0 1 0 - SCART 1 Input to SCART 2 Output
+ * 1 0 0 - MONO input to SCART 2 Output
+ * 0 0 1 - SCART 2 DA to SCART 2 Output
+ * 0 1 1 - SCART 2 Input to SCART 2 Output
+ * 1 1 0 - Mute SCART 2 Output
+ *
+ * Bits 4 to 0 should be zero.
+ * ----------------------------------------------------------------------- */
+
+static int scarts[3][9] = {
+ /* MASK IN1 IN2 IN1_DA IN2_DA IN3 IN4 MONO MUTE */
+ /* SCART DSP Input select */
+ { 0x0320, 0x0000, 0x0200, -1, -1, 0x0300, 0x0020, 0x0100, 0x0320 },
+ /* SCART1 Output select */
+ { 0x0c40, 0x0440, 0x0400, 0x0c00, 0x0040, 0x0000, 0x0840, 0x0800, 0x0c40 },
+ /* SCART2 Output select */
+ { 0x3080, 0x1000, 0x1080, 0x0000, 0x0080, 0x2080, 0x3080, 0x2000, 0x3000 },
+};
+
+static char *scart_names[] = {
+ "mask", "in1", "in2", "in1 da", "in2 da", "in3", "in4", "mono", "mute"
+};
+
+void msp_set_scart(struct i2c_client *client, int in, int out)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ state->in_scart=in;
+
+ if (in >= 1 && in <= 8 && out >= 0 && out <= 2) {
+ if (-1 == scarts[out][in])
+ return;
+
+ state->acb &= ~scarts[out][SCART_MASK];
+ state->acb |= scarts[out][in];
+ } else
+ state->acb = 0xf60; /* Mute Input and SCART 1 Output */
+
+ v4l_dbg(1, msp_debug, client, "scart switch: %s => %d (ACB=0x%04x)\n",
+ scart_names[in], out, state->acb);
+ msp_write_dsp(client, 0x13, state->acb);
+
+ /* Sets I2S speed 0 = 1.024 Mbps, 1 = 2.048 Mbps */
+ msp_write_dem(client, 0x40, state->i2s_mode);
+}
+
+void msp_set_mute(struct i2c_client *client)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ v4l_dbg(1, msp_debug, client, "mute audio\n");
+ msp_write_dsp(client, 0x0000, 0);
+ msp_write_dsp(client, 0x0007, 1);
+ if (state->has_scart2_out_volume)
+ msp_write_dsp(client, 0x0040, 1);
+ if (state->has_headphones)
+ msp_write_dsp(client, 0x0006, 0);
+}
+
+void msp_set_audio(struct i2c_client *client)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+ int bal = 0, bass, treble, loudness;
+ int val = 0;
+
+ if (!state->muted)
+ val = (state->volume * 0x7f / 65535) << 8;
+
+ v4l_dbg(1, msp_debug, client, "mute=%s volume=%d\n",
+ state->muted ? "on" : "off", state->volume);
+
+ msp_write_dsp(client, 0x0000, val);
+ msp_write_dsp(client, 0x0007, state->muted ? 0x1 : (val | 0x1));
+ if (state->has_scart2_out_volume)
+ msp_write_dsp(client, 0x0040, state->muted ? 0x1 : (val | 0x1));
+ if (state->has_headphones)
+ msp_write_dsp(client, 0x0006, val);
+ if (!state->has_sound_processing)
+ return;
+
+ if (val)
+ bal = (u8)((state->balance / 256) - 128);
+ bass = ((state->bass - 32768) * 0x60 / 65535) << 8;
+ treble = ((state->treble - 32768) * 0x60 / 65535) << 8;
+ loudness = state->loudness ? ((5 * 4) << 8) : 0;
+
+ v4l_dbg(1, msp_debug, client, "balance=%d bass=%d treble=%d loudness=%d\n",
+ state->balance, state->bass, state->treble, state->loudness);
+
+ msp_write_dsp(client, 0x0001, bal << 8);
+ msp_write_dsp(client, 0x0002, bass);
+ msp_write_dsp(client, 0x0003, treble);
+ msp_write_dsp(client, 0x0004, loudness);
+ if (!state->has_headphones)
+ return;
+ msp_write_dsp(client, 0x0030, bal << 8);
+ msp_write_dsp(client, 0x0031, bass);
+ msp_write_dsp(client, 0x0032, treble);
+ msp_write_dsp(client, 0x0033, loudness);
+}
+
+int msp_modus(struct i2c_client *client)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ if (state->radio) {
+ v4l_dbg(1, msp_debug, client, "video mode selected to Radio\n");
+ return 0x0003;
+ }
+
+ if (state->v4l2_std & V4L2_STD_PAL) {
+ v4l_dbg(1, msp_debug, client, "video mode selected to PAL\n");
+
+#if 1
+ /* experimental: not sure this works with all chip versions */
+ return 0x7003;
+#else
+ /* previous value, try this if it breaks ... */
+ return 0x1003;
+#endif
+ }
+ if (state->v4l2_std & V4L2_STD_NTSC) {
+ v4l_dbg(1, msp_debug, client, "video mode selected to NTSC\n");
+ return 0x2003;
+ }
+ if (state->v4l2_std & V4L2_STD_SECAM) {
+ v4l_dbg(1, msp_debug, client, "video mode selected to SECAM\n");
+ return 0x0003;
+ }
+ return 0x0003;
+}
+
+/* ------------------------------------------------------------------------ */
+
+
+static void msp_wake_thread(struct i2c_client *client)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ if (NULL == state->kthread)
+ return;
+ msp_set_mute(client);
+ state->watch_stereo = 0;
+ state->restart = 1;
+ wake_up_interruptible(&state->wq);
+}
+
+int msp_sleep(struct msp_state *state, int timeout)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue(&state->wq, &wait);
+ if (!kthread_should_stop()) {
+ if (timeout < 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ } else {
+ schedule_timeout_interruptible
+ (msecs_to_jiffies(timeout));
+ }
+ }
+
+ remove_wait_queue(&state->wq, &wait);
+ try_to_freeze();
+ return state->restart;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int msp_mode_v4l2_to_v4l1(int rxsubchans)
+{
+ int mode = 0;
+
+ if (rxsubchans & V4L2_TUNER_SUB_STEREO)
+ mode |= VIDEO_SOUND_STEREO;
+ if (rxsubchans & V4L2_TUNER_SUB_LANG2)
+ mode |= VIDEO_SOUND_LANG2;
+ if (rxsubchans & V4L2_TUNER_SUB_LANG1)
+ mode |= VIDEO_SOUND_LANG1;
+ if (mode == 0)
+ mode |= VIDEO_SOUND_MONO;
+ return mode;
+}
+
+static int msp_mode_v4l1_to_v4l2(int mode)
+{
+ if (mode & VIDEO_SOUND_STEREO)
+ return V4L2_TUNER_MODE_STEREO;
+ if (mode & VIDEO_SOUND_LANG2)
+ return V4L2_TUNER_MODE_LANG2;
+ if (mode & VIDEO_SOUND_LANG1)
+ return V4L2_TUNER_MODE_LANG1;
+ return V4L2_TUNER_MODE_MONO;
+}
+
+static void msp_any_detect_stereo(struct i2c_client *client)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ switch (state->opmode) {
+ case OPMODE_MANUAL:
+ case OPMODE_AUTODETECT:
+ autodetect_stereo(client);
+ break;
+ case OPMODE_AUTOSELECT:
+ msp34xxg_detect_stereo(client);
+ break;
+ }
+}
+
+static struct v4l2_queryctrl msp_qctrl_std[] = {
+ {
+ .id = V4L2_CID_AUDIO_VOLUME,
+ .name = "Volume",
+ .minimum = 0,
+ .maximum = 65535,
+ .step = 65535/100,
+ .default_value = 58880,
+ .flags = 0,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ },{
+ .id = V4L2_CID_AUDIO_MUTE,
+ .name = "Mute",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ .flags = 0,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ },
+};
+
+static struct v4l2_queryctrl msp_qctrl_sound_processing[] = {
+ {
+ .id = V4L2_CID_AUDIO_BALANCE,
+ .name = "Balance",
+ .minimum = 0,
+ .maximum = 65535,
+ .step = 65535/100,
+ .default_value = 32768,
+ .flags = 0,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ },{
+ .id = V4L2_CID_AUDIO_BASS,
+ .name = "Bass",
+ .minimum = 0,
+ .maximum = 65535,
+ .step = 65535/100,
+ .default_value = 32768,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ },{
+ .id = V4L2_CID_AUDIO_TREBLE,
+ .name = "Treble",
+ .minimum = 0,
+ .maximum = 65535,
+ .step = 65535/100,
+ .default_value = 32768,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ },{
+ .id = V4L2_CID_AUDIO_LOUDNESS,
+ .name = "Loudness",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ .flags = 0,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ },
+};
+
+
+static void msp_any_set_audmode(struct i2c_client *client, int audmode)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ switch (state->opmode) {
+ case OPMODE_MANUAL:
+ case OPMODE_AUTODETECT:
+ state->watch_stereo = 0;
+ msp3400c_setstereo(client, audmode);
+ break;
+ case OPMODE_AUTOSELECT:
+ msp34xxg_set_audmode(client, audmode);
+ break;
+ }
+}
+
+static int msp_get_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ ctrl->value = state->volume;
+ break;
+
+ case V4L2_CID_AUDIO_MUTE:
+ ctrl->value = state->muted;
+ break;
+
+ case V4L2_CID_AUDIO_BALANCE:
+ if (!state->has_sound_processing)
+ return -EINVAL;
+ ctrl->value = state->balance;
+ break;
+
+ case V4L2_CID_AUDIO_BASS:
+ if (!state->has_sound_processing)
+ return -EINVAL;
+ ctrl->value = state->bass;
+ break;
+
+ case V4L2_CID_AUDIO_TREBLE:
+ if (!state->has_sound_processing)
+ return -EINVAL;
+ ctrl->value = state->treble;
+ break;
+
+ case V4L2_CID_AUDIO_LOUDNESS:
+ if (!state->has_sound_processing)
+ return -EINVAL;
+ ctrl->value = state->loudness;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int msp_set_ctrl(struct i2c_client *client, struct v4l2_control *ctrl)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUDIO_VOLUME:
+ state->volume = ctrl->value;
+ if (state->volume == 0)
+ state->balance = 32768;
+ break;
+
+ case V4L2_CID_AUDIO_MUTE:
+ if (ctrl->value < 0 || ctrl->value >= 2)
+ return -ERANGE;
+ state->muted = ctrl->value;
+ break;
+
+ case V4L2_CID_AUDIO_BASS:
+ if (!state->has_sound_processing)
+ return -EINVAL;
+ state->bass = ctrl->value;
+ break;
+
+ case V4L2_CID_AUDIO_TREBLE:
+ if (!state->has_sound_processing)
+ return -EINVAL;
+ state->treble = ctrl->value;
+ break;
+
+ case V4L2_CID_AUDIO_LOUDNESS:
+ if (!state->has_sound_processing)
+ return -EINVAL;
+ state->loudness = ctrl->value;
+ break;
+
+ case V4L2_CID_AUDIO_BALANCE:
+ if (!state->has_sound_processing)
+ return -EINVAL;
+ state->balance = ctrl->value;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ msp_set_audio(client);
+ return 0;
+}
+
+static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+ u16 *sarg = arg;
+ int scart = 0;
+
+ if (msp_debug >= 2)
+ v4l_i2c_print_ioctl(client, cmd);
+
+ switch (cmd) {
+ case AUDC_SET_INPUT:
+ if (*sarg == state->input)
+ break;
+ state->input = *sarg;
+ switch (*sarg) {
+ case AUDIO_RADIO:
+ /* Hauppauge uses IN2 for the radio */
+ state->mode = MSP_MODE_FM_RADIO;
+ scart = SCART_IN2;
+ break;
+ case AUDIO_EXTERN_1:
+ /* IN1 is often used for external input ... */
+ state->mode = MSP_MODE_EXTERN;
+ scart = SCART_IN1;
+ break;
+ case AUDIO_EXTERN_2:
+ /* ... sometimes it is IN2 through ;) */
+ state->mode = MSP_MODE_EXTERN;
+ scart = SCART_IN2;
+ break;
+ case AUDIO_TUNER:
+ state->mode = -1;
+ break;
+ default:
+ if (*sarg & AUDIO_MUTE)
+ msp_set_scart(client, SCART_MUTE, 0);
+ break;
+ }
+ if (scart) {
+ state->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ state->audmode = V4L2_TUNER_MODE_STEREO;
+ msp_set_scart(client, scart, 0);
+ msp_write_dsp(client, 0x000d, 0x1900);
+ if (state->opmode != OPMODE_AUTOSELECT)
+ msp3400c_setstereo(client, state->audmode);
+ }
+ msp_wake_thread(client);
+ break;
+
+ case AUDC_SET_RADIO:
+ if (state->radio)
+ return 0;
+ state->radio = 1;
+ v4l_dbg(1, msp_debug, client, "switching to radio mode\n");
+ state->watch_stereo = 0;
+ switch (state->opmode) {
+ case OPMODE_MANUAL:
+ /* set msp3400 to FM radio mode */
+ msp3400c_setmode(client, MSP_MODE_FM_RADIO);
+ msp3400c_setcarrier(client, MSP_CARRIER(10.7),
+ MSP_CARRIER(10.7));
+ msp_set_audio(client);
+ break;
+ case OPMODE_AUTODETECT:
+ case OPMODE_AUTOSELECT:
+ /* the thread will do for us */
+ msp_wake_thread(client);
+ break;
+ }
+ break;
+
+ /* --- v4l ioctls --- */
+ /* take care: bttv does userspace copying, we'll get a
+ kernel pointer here... */
+ case VIDIOCGAUDIO:
+ {
+ struct video_audio *va = arg;
+
+ va->flags |= VIDEO_AUDIO_VOLUME | VIDEO_AUDIO_MUTABLE;
+ if (state->has_sound_processing)
+ va->flags |= VIDEO_AUDIO_BALANCE |
+ VIDEO_AUDIO_BASS |
+ VIDEO_AUDIO_TREBLE;
+ if (state->muted)
+ va->flags |= VIDEO_AUDIO_MUTE;
+ va->volume = state->volume;
+ va->balance = state->volume ? state->balance : 32768;
+ va->bass = state->bass;
+ va->treble = state->treble;
+
+ if (state->radio)
+ break;
+ if (state->opmode == OPMODE_AUTOSELECT)
+ msp_any_detect_stereo(client);
+ va->mode = msp_mode_v4l2_to_v4l1(state->rxsubchans);
+ break;
+ }
+
+ case VIDIOCSAUDIO:
+ {
+ struct video_audio *va = arg;
+
+ state->muted = (va->flags & VIDEO_AUDIO_MUTE);
+ state->volume = va->volume;
+ state->balance = va->balance;
+ state->bass = va->bass;
+ state->treble = va->treble;
+ msp_set_audio(client);
+
+ if (va->mode != 0 && state->radio == 0)
+ msp_any_set_audmode(client, msp_mode_v4l1_to_v4l2(va->mode));
+ break;
+ }
+
+ case VIDIOCSCHAN:
+ {
+ struct video_channel *vc = arg;
+ int update = 0;
+ v4l2_std_id std;
+
+ if (state->radio)
+ update = 1;
+ state->radio = 0;
+ if (vc->norm == VIDEO_MODE_PAL)
+ std = V4L2_STD_PAL;
+ else if (vc->norm == VIDEO_MODE_SECAM)
+ std = V4L2_STD_SECAM;
+ else
+ std = V4L2_STD_NTSC;
+ if (std != state->v4l2_std) {
+ state->v4l2_std = std;
+ update = 1;
+ }
+ if (update)
+ msp_wake_thread(client);
+ break;
+ }
+
+ case VIDIOCSFREQ:
+ case VIDIOC_S_FREQUENCY:
+ {
+ /* new channel -- kick audio carrier scan */
+ msp_wake_thread(client);
+ break;
+ }
+
+ /* msp34xx specific */
+ case MSP_SET_MATRIX:
+ {
+ struct msp_matrix *mspm = arg;
+
+ msp_set_scart(client, mspm->input, mspm->output);
+ break;
+ }
+
+ /* --- v4l2 ioctls --- */
+ case VIDIOC_S_STD:
+ {
+ v4l2_std_id *id = arg;
+ int update = state->radio || state->v4l2_std != *id;
+
+ state->v4l2_std = *id;
+ state->radio = 0;
+ if (update)
+ msp_wake_thread(client);
+ return 0;
+ }
+
+ case VIDIOC_ENUMINPUT:
+ {
+ struct v4l2_input *i = arg;
+
+ if (i->index != 0)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_TUNER;
+ switch (i->index) {
+ case AUDIO_RADIO:
+ strcpy(i->name, "Radio");
+ break;
+ case AUDIO_EXTERN_1:
+ strcpy(i->name, "Extern 1");
+ break;
+ case AUDIO_EXTERN_2:
+ strcpy(i->name, "Extern 2");
+ break;
+ case AUDIO_TUNER:
+ strcpy(i->name, "Television");
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ case VIDIOC_G_AUDIO:
+ {
+ struct v4l2_audio *a = arg;
+
+ memset(a, 0, sizeof(*a));
+
+ switch (a->index) {
+ case AUDIO_RADIO:
+ strcpy(a->name, "Radio");
+ break;
+ case AUDIO_EXTERN_1:
+ strcpy(a->name, "Extern 1");
+ break;
+ case AUDIO_EXTERN_2:
+ strcpy(a->name, "Extern 2");
+ break;
+ case AUDIO_TUNER:
+ strcpy(a->name, "Television");
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ msp_any_detect_stereo(client);
+ if (state->audmode == V4L2_TUNER_MODE_STEREO) {
+ a->capability = V4L2_AUDCAP_STEREO;
+ }
+
+ break;
+ }
+
+ case VIDIOC_S_AUDIO:
+ {
+ struct v4l2_audio *sarg = arg;
+
+ switch (sarg->index) {
+ case AUDIO_RADIO:
+ /* Hauppauge uses IN2 for the radio */
+ state->mode = MSP_MODE_FM_RADIO;
+ scart = SCART_IN2;
+ break;
+ case AUDIO_EXTERN_1:
+ /* IN1 is often used for external input ... */
+ state->mode = MSP_MODE_EXTERN;
+ scart = SCART_IN1;
+ break;
+ case AUDIO_EXTERN_2:
+ /* ... sometimes it is IN2 through ;) */
+ state->mode = MSP_MODE_EXTERN;
+ scart = SCART_IN2;
+ break;
+ case AUDIO_TUNER:
+ state->mode = -1;
+ break;
+ }
+ if (scart) {
+ state->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ state->audmode = V4L2_TUNER_MODE_STEREO;
+ msp_set_scart(client, scart, 0);
+ msp_write_dsp(client, 0x000d, 0x1900);
+ }
+ if (sarg->capability == V4L2_AUDCAP_STEREO) {
+ state->audmode = V4L2_TUNER_MODE_STEREO;
+ } else {
+ state->audmode &= ~V4L2_TUNER_MODE_STEREO;
+ }
+ msp_any_set_audmode(client, state->audmode);
+ msp_wake_thread(client);
+ break;
+ }
+
+ case VIDIOC_G_TUNER:
+ {
+ struct v4l2_tuner *vt = arg;
+
+ if (state->radio)
+ break;
+ if (state->opmode == OPMODE_AUTOSELECT)
+ msp_any_detect_stereo(client);
+ vt->audmode = state->audmode;
+ vt->rxsubchans = state->rxsubchans;
+ vt->capability = V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
+ break;
+ }
+
+ case VIDIOC_S_TUNER:
+ {
+ struct v4l2_tuner *vt = (struct v4l2_tuner *)arg;
+
+ if (state->radio)
+ break;
+ /* only set audmode */
+ if (vt->audmode != -1 && vt->audmode != 0)
+ msp_any_set_audmode(client, vt->audmode);
+ break;
+ }
+
+ case VIDIOC_G_AUDOUT:
+ {
+ struct v4l2_audioout *a = (struct v4l2_audioout *)arg;
+ int idx = a->index;
+
+ memset(a, 0, sizeof(*a));
+
+ switch (idx) {
+ case 0:
+ strcpy(a->name, "Scart1 Out");
+ break;
+ case 1:
+ strcpy(a->name, "Scart2 Out");
+ break;
+ case 2:
+ strcpy(a->name, "I2S Out");
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ }
+
+ case VIDIOC_S_AUDOUT:
+ {
+ struct v4l2_audioout *a = (struct v4l2_audioout *)arg;
+
+ if (a->index < 0 || a->index > 2)
+ return -EINVAL;
+
+ v4l_dbg(1, msp_debug, client, "Setting audio out on msp34xx to input %i\n", a->index);
+ msp_set_scart(client, state->in_scart, a->index + 1);
+
+ break;
+ }
+
+ case VIDIOC_INT_I2S_CLOCK_FREQ:
+ {
+ u32 *a = (u32 *)arg;
+
+ v4l_dbg(1, msp_debug, client, "Setting I2S speed to %d\n", *a);
+
+ switch (*a) {
+ case 1024000:
+ state->i2s_mode = 0;
+ break;
+ case 2048000:
+ state->i2s_mode = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ }
+
+ case VIDIOC_QUERYCTRL:
+ {
+ struct v4l2_queryctrl *qc = arg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(msp_qctrl_std); i++)
+ if (qc->id && qc->id == msp_qctrl_std[i].id) {
+ memcpy(qc, &msp_qctrl_std[i], sizeof(*qc));
+ return 0;
+ }
+ if (!state->has_sound_processing)
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(msp_qctrl_sound_processing); i++)
+ if (qc->id && qc->id == msp_qctrl_sound_processing[i].id) {
+ memcpy(qc, &msp_qctrl_sound_processing[i], sizeof(*qc));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ case VIDIOC_G_CTRL:
+ return msp_get_ctrl(client, arg);
+
+ case VIDIOC_S_CTRL:
+ return msp_set_ctrl(client, arg);
+
+ case VIDIOC_LOG_STATUS:
+ {
+ const char *p;
+
+ if (state->opmode == OPMODE_AUTOSELECT)
+ msp_any_detect_stereo(client);
+ v4l_info(client, "%s rev1 = 0x%04x rev2 = 0x%04x\n",
+ client->name, state->rev1, state->rev2);
+ v4l_info(client, "Audio: volume %d%s\n",
+ state->volume, state->muted ? " (muted)" : "");
+ if (state->has_sound_processing) {
+ v4l_info(client, "Audio: balance %d bass %d treble %d loudness %s\n",
+ state->balance, state->bass, state->treble,
+ state->loudness ? "on" : "off");
+ }
+ switch (state->mode) {
+ case MSP_MODE_AM_DETECT: p = "AM (for carrier detect)"; break;
+ case MSP_MODE_FM_RADIO: p = "FM Radio"; break;
+ case MSP_MODE_FM_TERRA: p = "Terrestial FM-mono + FM-stereo"; break;
+ case MSP_MODE_FM_SAT: p = "Satellite FM-mono"; break;
+ case MSP_MODE_FM_NICAM1: p = "NICAM/FM (B/G, D/K)"; break;
+ case MSP_MODE_FM_NICAM2: p = "NICAM/FM (I)"; break;
+ case MSP_MODE_AM_NICAM: p = "NICAM/AM (L)"; break;
+ case MSP_MODE_BTSC: p = "BTSC"; break;
+ case MSP_MODE_EXTERN: p = "External input"; break;
+ default: p = "unknown"; break;
+ }
+ if (state->opmode == OPMODE_MANUAL) {
+ v4l_info(client, "Mode: %s (%s%s)\n", p,
+ (state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono",
+ (state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : "");
+ } else {
+ v4l_info(client, "Mode: %s\n", p);
+ v4l_info(client, "Standard: %s (%s%s)\n",
+ msp_standard_std_name(state->std),
+ (state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono",
+ (state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : "");
+ }
+ v4l_info(client, "ACB: 0x%04x\n", state->acb);
+ break;
+ }
+
+ default:
+ /* nothing */
+ break;
+ }
+ return 0;
+}
+
+static int msp_suspend(struct device * dev, pm_message_t state)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+
+ v4l_dbg(1, msp_debug, client, "suspend\n");
+ msp_reset(client);
+ return 0;
+}
+
+static int msp_resume(struct device * dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+
+ v4l_dbg(1, msp_debug, client, "resume\n");
+ msp_wake_thread(client);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static struct i2c_driver i2c_driver;
+
+static int msp_attach(struct i2c_adapter *adapter, int address, int kind)
+{
+ struct i2c_client *client;
+ struct msp_state *state;
+ int (*thread_func)(void *data) = NULL;
+ int msp_hard;
+ int msp_family;
+ int msp_revision;
+ int msp_product, msp_prod_hi, msp_prod_lo;
+ int msp_rom;
+
+ client = kmalloc(sizeof(*client), GFP_KERNEL);
+ if (client == NULL)
+ return -ENOMEM;
+ memset(client, 0, sizeof(*client));
+ client->addr = address;
+ client->adapter = adapter;
+ client->driver = &i2c_driver;
+ snprintf(client->name, sizeof(client->name) - 1, "msp3400");
+
+ if (msp_reset(client) == -1) {
+ v4l_dbg(1, msp_debug, client, "msp3400 not found\n");
+ kfree(client);
+ return -1;
+ }
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL) {
+ kfree(client);
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(client, state);
+
+ memset(state, 0, sizeof(*state));
+ state->v4l2_std = V4L2_STD_NTSC;
+ state->volume = 58880; /* 0db gain */
+ state->balance = 32768; /* 0db gain */
+ state->bass = 32768;
+ state->treble = 32768;
+ state->loudness = 0;
+ state->input = -1;
+ state->muted = 0;
+ state->i2s_mode = 0;
+ init_waitqueue_head(&state->wq);
+
+ state->rev1 = msp_read_dsp(client, 0x1e);
+ if (state->rev1 != -1)
+ state->rev2 = msp_read_dsp(client, 0x1f);
+ v4l_dbg(1, msp_debug, client, "rev1=0x%04x, rev2=0x%04x\n", state->rev1, state->rev2);
+ if (state->rev1 == -1 || (state->rev1 == 0 && state->rev2 == 0)) {
+ v4l_dbg(1, msp_debug, client, "not an msp3400 (cannot read chip version)\n");
+ kfree(state);
+ kfree(client);
+ return -1;
+ }
+
+ msp_set_audio(client);
+
+ msp_family = ((state->rev1 >> 4) & 0x0f) + 3;
+ msp_product = (state->rev2 >> 8) & 0xff;
+ msp_prod_hi = msp_product / 10;
+ msp_prod_lo = msp_product % 10;
+ msp_revision = (state->rev1 & 0x0f) + '@';
+ msp_hard = ((state->rev1 >> 8) & 0xff) + '@';
+ msp_rom = state->rev2 & 0x1f;
+ snprintf(client->name, sizeof(client->name), "MSP%d4%02d%c-%c%d",
+ msp_family, msp_product,
+ msp_revision, msp_hard, msp_rom);
+
+ /* Has NICAM support: all mspx41x and mspx45x products have NICAM */
+ state->has_nicam = msp_prod_hi == 1 || msp_prod_hi == 5;
+ /* Has radio support: was added with revision G */
+ state->has_radio = msp_revision >= 'G';
+ /* Has headphones output: not for stripped down products */
+ state->has_headphones = msp_prod_lo < 5;
+ /* Has scart4 input: not in pre D revisions, not in stripped D revs */
+ state->has_scart4 = msp_family >= 4 || (msp_revision >= 'D' && msp_prod_lo < 5);
+ /* Has scart2 and scart3 inputs and scart2 output: not in stripped
+ down products of the '3' family */
+ state->has_scart23_in_scart2_out = msp_family >= 4 || msp_prod_lo < 5;
+ /* Has scart2 a volume control? Not in pre-D revisions. */
+ state->has_scart2_out_volume = msp_revision > 'C' && state->has_scart23_in_scart2_out;
+ /* Has a configurable i2s out? */
+ state->has_i2s_conf = msp_revision >= 'G' && msp_prod_lo < 7;
+ /* Has subwoofer output: not in pre-D revs and not in stripped down products */
+ state->has_subwoofer = msp_revision >= 'D' && msp_prod_lo < 5;
+ /* Has soundprocessing (bass/treble/balance/loudness/equalizer): not in
+ stripped down products */
+ state->has_sound_processing = msp_prod_lo < 7;
+ /* Has Virtual Dolby Surround: only in msp34x1 */
+ state->has_virtual_dolby_surround = msp_revision == 'G' && msp_prod_lo == 1;
+ /* Has Virtual Dolby Surround & Dolby Pro Logic: only in msp34x2 */
+ state->has_dolby_pro_logic = msp_revision == 'G' && msp_prod_lo == 2;
+
+ state->opmode = opmode;
+ if (state->opmode == OPMODE_AUTO) {
+ /* MSP revision G and up have both autodetect and autoselect */
+ if (msp_revision >= 'G')
+ state->opmode = OPMODE_AUTOSELECT;
+ /* MSP revision D and up have autodetect */
+ else if (msp_revision >= 'D')
+ state->opmode = OPMODE_AUTODETECT;
+ else
+ state->opmode = OPMODE_MANUAL;
+ }
+
+ /* hello world :-) */
+ v4l_info(client, "%s found @ 0x%x (%s)\n", client->name, address << 1, adapter->name);
+ v4l_info(client, "%s ", client->name);
+ if (state->has_nicam && state->has_radio)
+ printk("supports nicam and radio, ");
+ else if (state->has_nicam)
+ printk("supports nicam, ");
+ else if (state->has_radio)
+ printk("supports radio, ");
+ printk("mode is ");
+
+ /* version-specific initialization */
+ switch (state->opmode) {
+ case OPMODE_MANUAL:
+ printk("manual");
+ thread_func = msp3400c_thread;
+ break;
+ case OPMODE_AUTODETECT:
+ printk("autodetect");
+ thread_func = msp3410d_thread;
+ break;
+ case OPMODE_AUTOSELECT:
+ printk("autodetect and autoselect");
+ thread_func = msp34xxg_thread;
+ break;
+ }
+ printk("\n");
+
+ /* startup control thread if needed */
+ if (thread_func) {
+ state->kthread = kthread_run(thread_func, client, "msp34xx");
+
+ if (state->kthread == NULL)
+ v4l_warn(client, "kernel_thread() failed\n");
+ msp_wake_thread(client);
+ }
+
+ /* done */
+ i2c_attach_client(client);
+
+ return 0;
+}
+
+static int msp_probe(struct i2c_adapter *adapter)
+{
+ if (adapter->class & I2C_CLASS_TV_ANALOG)
+ return i2c_probe(adapter, &addr_data, msp_attach);
+ return 0;
+}
+
+static int msp_detach(struct i2c_client *client)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+ int err;
+
+ /* shutdown control thread */
+ if (state->kthread) {
+ state->restart = 1;
+ kthread_stop(state->kthread);
+ }
+ msp_reset(client);
+
+ err = i2c_detach_client(client);
+ if (err) {
+ return err;
+ }
+
+ kfree(state);
+ kfree(client);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* i2c implementation */
+static struct i2c_driver i2c_driver = {
+ .id = I2C_DRIVERID_MSP3400,
+ .attach_adapter = msp_probe,
+ .detach_client = msp_detach,
+ .command = msp_command,
+ .driver = {
+ .name = "msp3400",
+ .suspend = msp_suspend,
+ .resume = msp_resume,
+ },
+};
+
+static int __init msp3400_init_module(void)
+{
+ return i2c_add_driver(&i2c_driver);
+}
+
+static void __exit msp3400_cleanup_module(void)
+{
+ i2c_del_driver(&i2c_driver);
+}
+
+module_init(msp3400_init_module);
+module_exit(msp3400_cleanup_module);
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
new file mode 100644
index 00000000000..2072c3efebb
--- /dev/null
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -0,0 +1,1010 @@
+/*
+ * Programming the mspx4xx sound processor family
+ *
+ * (c) 1997-2001 Gerd Knorr <kraxel@bytesex.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/videodev.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/audiochip.h>
+#include <linux/kthread.h>
+#include <linux/suspend.h>
+#include "msp3400.h"
+
+/* this one uses the automatic sound standard detection of newer msp34xx
+ chip versions */
+static struct {
+ int retval;
+ int main, second;
+ char *name;
+} msp_stdlist[] = {
+ { 0x0000, 0, 0, "could not detect sound standard" },
+ { 0x0001, 0, 0, "autodetect start" },
+ { 0x0002, MSP_CARRIER(4.5), MSP_CARRIER(4.72), "4.5/4.72 M Dual FM-Stereo" },
+ { 0x0003, MSP_CARRIER(5.5), MSP_CARRIER(5.7421875), "5.5/5.74 B/G Dual FM-Stereo" },
+ { 0x0004, MSP_CARRIER(6.5), MSP_CARRIER(6.2578125), "6.5/6.25 D/K1 Dual FM-Stereo" },
+ { 0x0005, MSP_CARRIER(6.5), MSP_CARRIER(6.7421875), "6.5/6.74 D/K2 Dual FM-Stereo" },
+ { 0x0006, MSP_CARRIER(6.5), MSP_CARRIER(6.5), "6.5 D/K FM-Mono (HDEV3)" },
+ { 0x0008, MSP_CARRIER(5.5), MSP_CARRIER(5.85), "5.5/5.85 B/G NICAM FM" },
+ { 0x0009, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 L NICAM AM" },
+ { 0x000a, MSP_CARRIER(6.0), MSP_CARRIER(6.55), "6.0/6.55 I NICAM FM" },
+ { 0x000b, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 D/K NICAM FM" },
+ { 0x000c, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 D/K NICAM FM (HDEV2)" },
+ { 0x0020, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M BTSC-Stereo" },
+ { 0x0021, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M BTSC-Mono + SAP" },
+ { 0x0030, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M EIA-J Japan Stereo" },
+ { 0x0040, MSP_CARRIER(10.7), MSP_CARRIER(10.7), "10.7 FM-Stereo Radio" },
+ { 0x0050, MSP_CARRIER(6.5), MSP_CARRIER(6.5), "6.5 SAT-Mono" },
+ { 0x0051, MSP_CARRIER(7.02), MSP_CARRIER(7.20), "7.02/7.20 SAT-Stereo" },
+ { 0x0060, MSP_CARRIER(7.2), MSP_CARRIER(7.2), "7.2 SAT ADR" },
+ { -1, 0, 0, NULL }, /* EOF */
+};
+
+static struct msp3400c_init_data_dem {
+ int fir1[6];
+ int fir2[6];
+ int cdo1;
+ int cdo2;
+ int ad_cv;
+ int mode_reg;
+ int dsp_src;
+ int dsp_matrix;
+} msp3400c_init_data[] = {
+ { /* AM (for carrier detect / msp3400) */
+ {75, 19, 36, 35, 39, 40},
+ {75, 19, 36, 35, 39, 40},
+ MSP_CARRIER(5.5), MSP_CARRIER(5.5),
+ 0x00d0, 0x0500, 0x0020, 0x3000
+ },{ /* AM (for carrier detect / msp3410) */
+ {-1, -1, -8, 2, 59, 126},
+ {-1, -1, -8, 2, 59, 126},
+ MSP_CARRIER(5.5), MSP_CARRIER(5.5),
+ 0x00d0, 0x0100, 0x0020, 0x3000
+ },{ /* FM Radio */
+ {-8, -8, 4, 6, 78, 107},
+ {-8, -8, 4, 6, 78, 107},
+ MSP_CARRIER(10.7), MSP_CARRIER(10.7),
+ 0x00d0, 0x0480, 0x0020, 0x3000
+ },{ /* Terrestial FM-mono + FM-stereo */
+ {3, 18, 27, 48, 66, 72},
+ {3, 18, 27, 48, 66, 72},
+ MSP_CARRIER(5.5), MSP_CARRIER(5.5),
+ 0x00d0, 0x0480, 0x0030, 0x3000
+ },{ /* Sat FM-mono */
+ { 1, 9, 14, 24, 33, 37},
+ { 3, 18, 27, 48, 66, 72},
+ MSP_CARRIER(6.5), MSP_CARRIER(6.5),
+ 0x00c6, 0x0480, 0x0000, 0x3000
+ },{ /* NICAM/FM -- B/G (5.5/5.85), D/K (6.5/5.85) */
+ {-2, -8, -10, 10, 50, 86},
+ {3, 18, 27, 48, 66, 72},
+ MSP_CARRIER(5.5), MSP_CARRIER(5.5),
+ 0x00d0, 0x0040, 0x0120, 0x3000
+ },{ /* NICAM/FM -- I (6.0/6.552) */
+ {2, 4, -6, -4, 40, 94},
+ {3, 18, 27, 48, 66, 72},
+ MSP_CARRIER(6.0), MSP_CARRIER(6.0),
+ 0x00d0, 0x0040, 0x0120, 0x3000
+ },{ /* NICAM/AM -- L (6.5/5.85) */
+ {-2, -8, -10, 10, 50, 86},
+ {-4, -12, -9, 23, 79, 126},
+ MSP_CARRIER(6.5), MSP_CARRIER(6.5),
+ 0x00c6, 0x0140, 0x0120, 0x7c03
+ },
+};
+
+struct msp3400c_carrier_detect {
+ int cdo;
+ char *name;
+};
+
+static struct msp3400c_carrier_detect msp3400c_carrier_detect_main[] = {
+ /* main carrier */
+ { MSP_CARRIER(4.5), "4.5 NTSC" },
+ { MSP_CARRIER(5.5), "5.5 PAL B/G" },
+ { MSP_CARRIER(6.0), "6.0 PAL I" },
+ { MSP_CARRIER(6.5), "6.5 PAL D/K + SAT + SECAM" }
+};
+
+static struct msp3400c_carrier_detect msp3400c_carrier_detect_55[] = {
+ /* PAL B/G */
+ { MSP_CARRIER(5.7421875), "5.742 PAL B/G FM-stereo" },
+ { MSP_CARRIER(5.85), "5.85 PAL B/G NICAM" }
+};
+
+static struct msp3400c_carrier_detect msp3400c_carrier_detect_65[] = {
+ /* PAL SAT / SECAM */
+ { MSP_CARRIER(5.85), "5.85 PAL D/K + SECAM NICAM" },
+ { MSP_CARRIER(6.2578125), "6.25 PAL D/K1 FM-stereo" },
+ { MSP_CARRIER(6.7421875), "6.74 PAL D/K2 FM-stereo" },
+ { MSP_CARRIER(7.02), "7.02 PAL SAT FM-stereo s/b" },
+ { MSP_CARRIER(7.20), "7.20 PAL SAT FM-stereo s" },
+ { MSP_CARRIER(7.38), "7.38 PAL SAT FM-stereo b" },
+};
+
+/* ------------------------------------------------------------------------ */
+
+const char *msp_standard_std_name(int std)
+{
+ int i;
+
+ for (i = 0; msp_stdlist[i].name != NULL; i++)
+ if (msp_stdlist[i].retval == std)
+ return msp_stdlist[i].name;
+ return "unknown";
+}
+
+void msp3400c_setcarrier(struct i2c_client *client, int cdo1, int cdo2)
+{
+ msp_write_dem(client, 0x0093, cdo1 & 0xfff);
+ msp_write_dem(client, 0x009b, cdo1 >> 12);
+ msp_write_dem(client, 0x00a3, cdo2 & 0xfff);
+ msp_write_dem(client, 0x00ab, cdo2 >> 12);
+ msp_write_dem(client, 0x0056, 0); /*LOAD_REG_1/2*/
+}
+
+void msp3400c_setmode(struct i2c_client *client, int type)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+ int i;
+
+ v4l_dbg(1, msp_debug, client, "setmode: %d\n", type);
+ state->mode = type;
+ state->audmode = V4L2_TUNER_MODE_MONO;
+ state->rxsubchans = V4L2_TUNER_SUB_MONO;
+
+ msp_write_dem(client, 0x00bb, msp3400c_init_data[type].ad_cv);
+
+ for (i = 5; i >= 0; i--) /* fir 1 */
+ msp_write_dem(client, 0x0001, msp3400c_init_data[type].fir1[i]);
+
+ msp_write_dem(client, 0x0005, 0x0004); /* fir 2 */
+ msp_write_dem(client, 0x0005, 0x0040);
+ msp_write_dem(client, 0x0005, 0x0000);
+ for (i = 5; i >= 0; i--)
+ msp_write_dem(client, 0x0005, msp3400c_init_data[type].fir2[i]);
+
+ msp_write_dem(client, 0x0083, msp3400c_init_data[type].mode_reg);
+
+ msp3400c_setcarrier(client, msp3400c_init_data[type].cdo1,
+ msp3400c_init_data[type].cdo2);
+
+ msp_write_dem(client, 0x0056, 0); /*LOAD_REG_1/2*/
+
+ if (msp_dolby) {
+ msp_write_dsp(client, 0x0008, 0x0520); /* I2S1 */
+ msp_write_dsp(client, 0x0009, 0x0620); /* I2S2 */
+ msp_write_dsp(client, 0x000b, msp3400c_init_data[type].dsp_src);
+ } else {
+ msp_write_dsp(client, 0x0008, msp3400c_init_data[type].dsp_src);
+ msp_write_dsp(client, 0x0009, msp3400c_init_data[type].dsp_src);
+ msp_write_dsp(client, 0x000b, msp3400c_init_data[type].dsp_src);
+ }
+ msp_write_dsp(client, 0x000a, msp3400c_init_data[type].dsp_src);
+ msp_write_dsp(client, 0x000e, msp3400c_init_data[type].dsp_matrix);
+
+ if (state->has_nicam) {
+ /* nicam prescale */
+ msp_write_dsp(client, 0x0010, 0x5a00); /* was: 0x3000 */
+ }
+}
+
+/* turn on/off nicam + stereo */
+void msp3400c_setstereo(struct i2c_client *client, int mode)
+{
+ static char *strmode[] = { "mono", "stereo", "lang2", "lang1" };
+ struct msp_state *state = i2c_get_clientdata(client);
+ int nicam = 0; /* channel source: FM/AM or nicam */
+ int src = 0;
+
+ if (state->opmode == OPMODE_AUTOSELECT) {
+ /* this method would break everything, let's make sure
+ * it's never called
+ */
+ v4l_dbg(1, msp_debug, client, "setstereo called with mode=%d instead of set_source (ignored)\n",
+ mode);
+ return;
+ }
+
+ /* switch demodulator */
+ switch (state->mode) {
+ case MSP_MODE_FM_TERRA:
+ v4l_dbg(1, msp_debug, client, "FM setstereo: %s\n", strmode[mode]);
+ msp3400c_setcarrier(client, state->second, state->main);
+ switch (mode) {
+ case V4L2_TUNER_MODE_STEREO:
+ msp_write_dsp(client, 0x000e, 0x3001);
+ break;
+ case V4L2_TUNER_MODE_MONO:
+ case V4L2_TUNER_MODE_LANG1:
+ case V4L2_TUNER_MODE_LANG2:
+ msp_write_dsp(client, 0x000e, 0x3000);
+ break;
+ }
+ break;
+ case MSP_MODE_FM_SAT:
+ v4l_dbg(1, msp_debug, client, "SAT setstereo: %s\n", strmode[mode]);
+ switch (mode) {
+ case V4L2_TUNER_MODE_MONO:
+ msp3400c_setcarrier(client, MSP_CARRIER(6.5), MSP_CARRIER(6.5));
+ break;
+ case V4L2_TUNER_MODE_STEREO:
+ msp3400c_setcarrier(client, MSP_CARRIER(7.2), MSP_CARRIER(7.02));
+ break;
+ case V4L2_TUNER_MODE_LANG1:
+ msp3400c_setcarrier(client, MSP_CARRIER(7.38), MSP_CARRIER(7.02));
+ break;
+ case V4L2_TUNER_MODE_LANG2:
+ msp3400c_setcarrier(client, MSP_CARRIER(7.38), MSP_CARRIER(7.02));
+ break;
+ }
+ break;
+ case MSP_MODE_FM_NICAM1:
+ case MSP_MODE_FM_NICAM2:
+ case MSP_MODE_AM_NICAM:
+ v4l_dbg(1, msp_debug, client, "NICAM setstereo: %s\n",strmode[mode]);
+ msp3400c_setcarrier(client,state->second,state->main);
+ if (state->nicam_on)
+ nicam=0x0100;
+ break;
+ case MSP_MODE_BTSC:
+ v4l_dbg(1, msp_debug, client, "BTSC setstereo: %s\n",strmode[mode]);
+ nicam=0x0300;
+ break;
+ case MSP_MODE_EXTERN:
+ v4l_dbg(1, msp_debug, client, "extern setstereo: %s\n",strmode[mode]);
+ nicam = 0x0200;
+ break;
+ case MSP_MODE_FM_RADIO:
+ v4l_dbg(1, msp_debug, client, "FM-Radio setstereo: %s\n",strmode[mode]);
+ break;
+ default:
+ v4l_dbg(1, msp_debug, client, "mono setstereo\n");
+ return;
+ }
+
+ /* switch audio */
+ switch (mode) {
+ case V4L2_TUNER_MODE_STEREO:
+ src = 0x0020 | nicam;
+ break;
+ case V4L2_TUNER_MODE_MONO:
+ if (state->mode == MSP_MODE_AM_NICAM) {
+ v4l_dbg(1, msp_debug, client, "switching to AM mono\n");
+ /* AM mono decoding is handled by tuner, not MSP chip */
+ /* SCART switching control register */
+ msp_set_scart(client, SCART_MONO, 0);
+ src = 0x0200;
+ break;
+ }
+ case V4L2_TUNER_MODE_LANG1:
+ src = 0x0000 | nicam;
+ break;
+ case V4L2_TUNER_MODE_LANG2:
+ src = 0x0010 | nicam;
+ break;
+ }
+ v4l_dbg(1, msp_debug, client, "setstereo final source/matrix = 0x%x\n", src);
+
+ if (msp_dolby) {
+ msp_write_dsp(client, 0x0008, 0x0520);
+ msp_write_dsp(client, 0x0009, 0x0620);
+ msp_write_dsp(client, 0x000a, src);
+ msp_write_dsp(client, 0x000b, src);
+ } else {
+ msp_write_dsp(client, 0x0008, src);
+ msp_write_dsp(client, 0x0009, src);
+ msp_write_dsp(client, 0x000a, src);
+ msp_write_dsp(client, 0x000b, src);
+ msp_write_dsp(client, 0x000c, src);
+ if (state->has_scart23_in_scart2_out)
+ msp_write_dsp(client, 0x0041, src);
+ }
+}
+
+static void msp3400c_print_mode(struct i2c_client *client)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ if (state->main == state->second) {
+ v4l_dbg(1, msp_debug, client, "mono sound carrier: %d.%03d MHz\n",
+ state->main / 910000, (state->main / 910) % 1000);
+ } else {
+ v4l_dbg(1, msp_debug, client, "main sound carrier: %d.%03d MHz\n",
+ state->main / 910000, (state->main / 910) % 1000);
+ }
+ if (state->mode == MSP_MODE_FM_NICAM1 || state->mode == MSP_MODE_FM_NICAM2)
+ v4l_dbg(1, msp_debug, client, "NICAM/FM carrier : %d.%03d MHz\n",
+ state->second / 910000, (state->second/910) % 1000);
+ if (state->mode == MSP_MODE_AM_NICAM)
+ v4l_dbg(1, msp_debug, client, "NICAM/AM carrier : %d.%03d MHz\n",
+ state->second / 910000, (state->second / 910) % 1000);
+ if (state->mode == MSP_MODE_FM_TERRA && state->main != state->second) {
+ v4l_dbg(1, msp_debug, client, "FM-stereo carrier : %d.%03d MHz\n",
+ state->second / 910000, (state->second / 910) % 1000);
+ }
+}
+
+/* ----------------------------------------------------------------------- */
+
+int autodetect_stereo(struct i2c_client *client)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+ int val;
+ int rxsubchans = state->rxsubchans;
+ int newnicam = state->nicam_on;
+ int update = 0;
+
+ switch (state->mode) {
+ case MSP_MODE_FM_TERRA:
+ val = msp_read_dsp(client, 0x18);
+ if (val > 32767)
+ val -= 65536;
+ v4l_dbg(2, msp_debug, client, "stereo detect register: %d\n", val);
+ if (val > 4096) {
+ rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
+ } else if (val < -4096) {
+ rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
+ } else {
+ rxsubchans = V4L2_TUNER_SUB_MONO;
+ }
+ newnicam = 0;
+ break;
+ case MSP_MODE_FM_NICAM1:
+ case MSP_MODE_FM_NICAM2:
+ case MSP_MODE_AM_NICAM:
+ val = msp_read_dem(client, 0x23);
+ v4l_dbg(2, msp_debug, client, "nicam sync=%d, mode=%d\n",
+ val & 1, (val & 0x1e) >> 1);
+
+ if (val & 1) {
+ /* nicam synced */
+ switch ((val & 0x1e) >> 1) {
+ case 0:
+ case 8:
+ rxsubchans = V4L2_TUNER_SUB_STEREO;
+ break;
+ case 1:
+ case 9:
+ rxsubchans = V4L2_TUNER_SUB_MONO
+ | V4L2_TUNER_SUB_LANG1;
+ break;
+ case 2:
+ case 10:
+ rxsubchans = V4L2_TUNER_SUB_MONO
+ | V4L2_TUNER_SUB_LANG1
+ | V4L2_TUNER_SUB_LANG2;
+ break;
+ default:
+ rxsubchans = V4L2_TUNER_SUB_MONO;
+ break;
+ }
+ newnicam = 1;
+ } else {
+ newnicam = 0;
+ rxsubchans = V4L2_TUNER_SUB_MONO;
+ }
+ break;
+ case MSP_MODE_BTSC:
+ val = msp_read_dem(client, 0x200);
+ v4l_dbg(2, msp_debug, client, "status=0x%x (pri=%s, sec=%s, %s%s%s)\n",
+ val,
+ (val & 0x0002) ? "no" : "yes",
+ (val & 0x0004) ? "no" : "yes",
+ (val & 0x0040) ? "stereo" : "mono",
+ (val & 0x0080) ? ", nicam 2nd mono" : "",
+ (val & 0x0100) ? ", bilingual/SAP" : "");
+ rxsubchans = V4L2_TUNER_SUB_MONO;
+ if (val & 0x0040) rxsubchans |= V4L2_TUNER_SUB_STEREO;
+ if (val & 0x0100) rxsubchans |= V4L2_TUNER_SUB_LANG1;
+ break;
+ }
+ if (rxsubchans != state->rxsubchans) {
+ update = 1;
+ v4l_dbg(1, msp_debug, client, "watch: rxsubchans %d => %d\n",
+ state->rxsubchans,rxsubchans);
+ state->rxsubchans = rxsubchans;
+ }
+ if (newnicam != state->nicam_on) {
+ update = 1;
+ v4l_dbg(1, msp_debug, client, "watch: nicam %d => %d\n",
+ state->nicam_on,newnicam);
+ state->nicam_on = newnicam;
+ }
+ return update;
+}
+
+/*
+ * A kernel thread for msp3400 control -- we don't want to block the
+ * in the ioctl while doing the sound carrier & stereo detect
+ */
+/* stereo/multilang monitoring */
+static void watch_stereo(struct i2c_client *client)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ if (autodetect_stereo(client)) {
+ if (state->rxsubchans & V4L2_TUNER_SUB_STEREO)
+ msp3400c_setstereo(client, V4L2_TUNER_MODE_STEREO);
+ else if (state->rxsubchans & V4L2_TUNER_SUB_LANG1)
+ msp3400c_setstereo(client, V4L2_TUNER_MODE_LANG1);
+ else
+ msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
+ }
+
+ if (msp_once)
+ state->watch_stereo = 0;
+}
+
+int msp3400c_thread(void *data)
+{
+ struct i2c_client *client = data;
+ struct msp_state *state = i2c_get_clientdata(client);
+ struct msp3400c_carrier_detect *cd;
+ int count, max1,max2,val1,val2, val,this;
+
+
+ v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n");
+ for (;;) {
+ v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n");
+ msp_sleep(state, -1);
+ v4l_dbg(2, msp_debug, client, "msp3400 thread: wakeup\n");
+
+ restart:
+ v4l_dbg(1, msp_debug, client, "thread: restart scan\n");
+ state->restart = 0;
+ if (kthread_should_stop())
+ break;
+
+ if (state->radio || MSP_MODE_EXTERN == state->mode) {
+ /* no carrier scan, just unmute */
+ v4l_dbg(1, msp_debug, client, "thread: no carrier scan\n");
+ msp_set_audio(client);
+ continue;
+ }
+
+ /* mute */
+ msp_set_mute(client);
+ msp3400c_setmode(client, MSP_MODE_AM_DETECT /* +1 */ );
+ val1 = val2 = 0;
+ max1 = max2 = -1;
+ state->watch_stereo = 0;
+
+ /* some time for the tuner to sync */
+ if (msp_sleep(state,200))
+ goto restart;
+
+ /* carrier detect pass #1 -- main carrier */
+ cd = msp3400c_carrier_detect_main;
+ count = ARRAY_SIZE(msp3400c_carrier_detect_main);
+
+ if (msp_amsound && (state->v4l2_std & V4L2_STD_SECAM)) {
+ /* autodetect doesn't work well with AM ... */
+ max1 = 3;
+ count = 0;
+ v4l_dbg(1, msp_debug, client, "AM sound override\n");
+ }
+
+ for (this = 0; this < count; this++) {
+ msp3400c_setcarrier(client, cd[this].cdo,cd[this].cdo);
+ if (msp_sleep(state,100))
+ goto restart;
+ val = msp_read_dsp(client, 0x1b);
+ if (val > 32767)
+ val -= 65536;
+ if (val1 < val)
+ val1 = val, max1 = this;
+ v4l_dbg(1, msp_debug, client, "carrier1 val: %5d / %s\n", val,cd[this].name);
+ }
+
+ /* carrier detect pass #2 -- second (stereo) carrier */
+ switch (max1) {
+ case 1: /* 5.5 */
+ cd = msp3400c_carrier_detect_55;
+ count = ARRAY_SIZE(msp3400c_carrier_detect_55);
+ break;
+ case 3: /* 6.5 */
+ cd = msp3400c_carrier_detect_65;
+ count = ARRAY_SIZE(msp3400c_carrier_detect_65);
+ break;
+ case 0: /* 4.5 */
+ case 2: /* 6.0 */
+ default:
+ cd = NULL;
+ count = 0;
+ break;
+ }
+
+ if (msp_amsound && (state->v4l2_std & V4L2_STD_SECAM)) {
+ /* autodetect doesn't work well with AM ... */
+ cd = NULL;
+ count = 0;
+ max2 = 0;
+ }
+ for (this = 0; this < count; this++) {
+ msp3400c_setcarrier(client, cd[this].cdo,cd[this].cdo);
+ if (msp_sleep(state,100))
+ goto restart;
+ val = msp_read_dsp(client, 0x1b);
+ if (val > 32767)
+ val -= 65536;
+ if (val2 < val)
+ val2 = val, max2 = this;
+ v4l_dbg(1, msp_debug, client, "carrier2 val: %5d / %s\n", val,cd[this].name);
+ }
+
+ /* program the msp3400 according to the results */
+ state->main = msp3400c_carrier_detect_main[max1].cdo;
+ switch (max1) {
+ case 1: /* 5.5 */
+ if (max2 == 0) {
+ /* B/G FM-stereo */
+ state->second = msp3400c_carrier_detect_55[max2].cdo;
+ msp3400c_setmode(client, MSP_MODE_FM_TERRA);
+ state->nicam_on = 0;
+ msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
+ state->watch_stereo = 1;
+ } else if (max2 == 1 && state->has_nicam) {
+ /* B/G NICAM */
+ state->second = msp3400c_carrier_detect_55[max2].cdo;
+ msp3400c_setmode(client, MSP_MODE_FM_NICAM1);
+ state->nicam_on = 1;
+ msp3400c_setcarrier(client, state->second, state->main);
+ state->watch_stereo = 1;
+ } else {
+ goto no_second;
+ }
+ break;
+ case 2: /* 6.0 */
+ /* PAL I NICAM */
+ state->second = MSP_CARRIER(6.552);
+ msp3400c_setmode(client, MSP_MODE_FM_NICAM2);
+ state->nicam_on = 1;
+ msp3400c_setcarrier(client, state->second, state->main);
+ state->watch_stereo = 1;
+ break;
+ case 3: /* 6.5 */
+ if (max2 == 1 || max2 == 2) {
+ /* D/K FM-stereo */
+ state->second = msp3400c_carrier_detect_65[max2].cdo;
+ msp3400c_setmode(client, MSP_MODE_FM_TERRA);
+ state->nicam_on = 0;
+ msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
+ state->watch_stereo = 1;
+ } else if (max2 == 0 && (state->v4l2_std & V4L2_STD_SECAM)) {
+ /* L NICAM or AM-mono */
+ state->second = msp3400c_carrier_detect_65[max2].cdo;
+ msp3400c_setmode(client, MSP_MODE_AM_NICAM);
+ state->nicam_on = 0;
+ msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
+ msp3400c_setcarrier(client, state->second, state->main);
+ /* volume prescale for SCART (AM mono input) */
+ msp_write_dsp(client, 0x000d, 0x1900);
+ state->watch_stereo = 1;
+ } else if (max2 == 0 && state->has_nicam) {
+ /* D/K NICAM */
+ state->second = msp3400c_carrier_detect_65[max2].cdo;
+ msp3400c_setmode(client, MSP_MODE_FM_NICAM1);
+ state->nicam_on = 1;
+ msp3400c_setcarrier(client, state->second, state->main);
+ state->watch_stereo = 1;
+ } else {
+ goto no_second;
+ }
+ break;
+ case 0: /* 4.5 */
+ default:
+ no_second:
+ state->second = msp3400c_carrier_detect_main[max1].cdo;
+ msp3400c_setmode(client, MSP_MODE_FM_TERRA);
+ state->nicam_on = 0;
+ msp3400c_setcarrier(client, state->second, state->main);
+ state->rxsubchans = V4L2_TUNER_SUB_MONO;
+ msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
+ break;
+ }
+
+ /* unmute */
+ msp_set_audio(client);
+
+ if (msp_debug)
+ msp3400c_print_mode(client);
+
+ /* monitor tv audio mode */
+ while (state->watch_stereo) {
+ if (msp_sleep(state,5000))
+ goto restart;
+ watch_stereo(client);
+ }
+ }
+ v4l_dbg(1, msp_debug, client, "thread: exit\n");
+ return 0;
+}
+
+
+int msp3410d_thread(void *data)
+{
+ struct i2c_client *client = data;
+ struct msp_state *state = i2c_get_clientdata(client);
+ int val, i, std;
+
+ v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
+
+ for (;;) {
+ v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n");
+ msp_sleep(state,-1);
+ v4l_dbg(2, msp_debug, client, "msp3410 thread: wakeup\n");
+
+ restart:
+ v4l_dbg(1, msp_debug, client, "thread: restart scan\n");
+ state->restart = 0;
+ if (kthread_should_stop())
+ break;
+
+ if (state->mode == MSP_MODE_EXTERN) {
+ /* no carrier scan needed, just unmute */
+ v4l_dbg(1, msp_debug, client, "thread: no carrier scan\n");
+ msp_set_audio(client);
+ continue;
+ }
+
+ /* put into sane state (and mute) */
+ msp_reset(client);
+
+ /* some time for the tuner to sync */
+ if (msp_sleep(state,200))
+ goto restart;
+
+ /* start autodetect */
+ if (state->radio)
+ std = 0x40;
+ else
+ std = (state->v4l2_std & V4L2_STD_NTSC) ? 0x20 : 1;
+ state->watch_stereo = 0;
+
+ if (msp_debug)
+ v4l_dbg(1, msp_debug, client, "setting standard: %s (0x%04x)\n",
+ msp_standard_std_name(std), std);
+
+ if (std != 1) {
+ /* programmed some specific mode */
+ val = std;
+ } else {
+ /* triggered autodetect */
+ msp_write_dem(client, 0x20, std);
+ for (;;) {
+ if (msp_sleep(state, 100))
+ goto restart;
+
+ /* check results */
+ val = msp_read_dem(client, 0x7e);
+ if (val < 0x07ff)
+ break;
+ v4l_dbg(1, msp_debug, client, "detection still in progress\n");
+ }
+ }
+ for (i = 0; msp_stdlist[i].name != NULL; i++)
+ if (msp_stdlist[i].retval == val)
+ break;
+ v4l_dbg(1, msp_debug, client, "current standard: %s (0x%04x)\n",
+ msp_standard_std_name(val), val);
+ state->main = msp_stdlist[i].main;
+ state->second = msp_stdlist[i].second;
+ state->std = val;
+
+ if (msp_amsound && !state->radio && (state->v4l2_std & V4L2_STD_SECAM) &&
+ (val != 0x0009)) {
+ /* autodetection has failed, let backup */
+ v4l_dbg(1, msp_debug, client, "autodetection failed,"
+ " switching to backup standard: %s (0x%04x)\n",
+ msp_stdlist[8].name ? msp_stdlist[8].name : "unknown",val);
+ val = 0x0009;
+ msp_write_dem(client, 0x20, val);
+ }
+
+ /* set various prescales */
+ msp_write_dsp(client, 0x0d, 0x1900); /* scart */
+ msp_write_dsp(client, 0x0e, 0x2403); /* FM */
+ msp_write_dsp(client, 0x10, 0x5a00); /* nicam */
+
+ /* set stereo */
+ switch (val) {
+ case 0x0008: /* B/G NICAM */
+ case 0x000a: /* I NICAM */
+ if (val == 0x0008)
+ state->mode = MSP_MODE_FM_NICAM1;
+ else
+ state->mode = MSP_MODE_FM_NICAM2;
+ /* just turn on stereo */
+ state->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ state->nicam_on = 1;
+ state->watch_stereo = 1;
+ msp3400c_setstereo(client,V4L2_TUNER_MODE_STEREO);
+ break;
+ case 0x0009:
+ state->mode = MSP_MODE_AM_NICAM;
+ state->rxsubchans = V4L2_TUNER_SUB_MONO;
+ state->nicam_on = 1;
+ msp3400c_setstereo(client,V4L2_TUNER_MODE_MONO);
+ state->watch_stereo = 1;
+ break;
+ case 0x0020: /* BTSC */
+ /* just turn on stereo */
+ state->mode = MSP_MODE_BTSC;
+ state->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ state->nicam_on = 0;
+ state->watch_stereo = 1;
+ msp3400c_setstereo(client,V4L2_TUNER_MODE_STEREO);
+ break;
+ case 0x0040: /* FM radio */
+ state->mode = MSP_MODE_FM_RADIO;
+ state->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ state->audmode = V4L2_TUNER_MODE_STEREO;
+ state->nicam_on = 0;
+ state->watch_stereo = 0;
+ /* not needed in theory if we have radio, but
+ short programming enables carrier mute */
+ msp3400c_setmode(client, MSP_MODE_FM_RADIO);
+ msp3400c_setcarrier(client, MSP_CARRIER(10.7),
+ MSP_CARRIER(10.7));
+ /* scart routing */
+ msp_set_scart(client,SCART_IN2,0);
+ /* msp34xx does radio decoding */
+ msp_write_dsp(client, 0x08, 0x0020);
+ msp_write_dsp(client, 0x09, 0x0020);
+ msp_write_dsp(client, 0x0b, 0x0020);
+ break;
+ case 0x0003:
+ case 0x0004:
+ case 0x0005:
+ state->mode = MSP_MODE_FM_TERRA;
+ state->rxsubchans = V4L2_TUNER_SUB_MONO;
+ state->audmode = V4L2_TUNER_MODE_MONO;
+ state->nicam_on = 0;
+ state->watch_stereo = 1;
+ break;
+ }
+
+ /* unmute, restore misc registers */
+ msp_set_audio(client);
+ msp_write_dsp(client, 0x13, state->acb);
+ if (state->has_i2s_conf)
+ msp_write_dem(client, 0x40, state->i2s_mode);
+
+ /* monitor tv audio mode */
+ while (state->watch_stereo) {
+ if (msp_sleep(state,5000))
+ goto restart;
+ watch_stereo(client);
+ }
+ }
+ v4l_dbg(1, msp_debug, client, "thread: exit\n");
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* msp34xxG + (autoselect no-thread) */
+/* this one uses both automatic standard detection and automatic sound */
+/* select which are available in the newer G versions */
+/* struct msp: only norm, acb and source are really used in this mode */
+
+/* set the same 'source' for the loudspeaker, scart and quasi-peak detector
+ * the value for source is the same as bit 15:8 of DSP registers 0x08,
+ * 0x0a and 0x0c: 0=mono, 1=stereo or A|B, 2=SCART, 3=stereo or A, 4=stereo or B
+ *
+ * this function replaces msp3400c_setstereo
+ */
+static void msp34xxg_set_source(struct i2c_client *client, int source)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ /* fix matrix mode to stereo and let the msp choose what
+ * to output according to 'source', as recommended
+ * for MONO (source==0) downmixing set bit[7:0] to 0x30
+ */
+ int value = (source & 0x07) << 8 | (source == 0 ? 0x30 : 0x20);
+
+ v4l_dbg(1, msp_debug, client, "set source to %d (0x%x)\n", source, value);
+ /* Loudspeaker Output */
+ msp_write_dsp(client, 0x08, value);
+ /* SCART1 DA Output */
+ msp_write_dsp(client, 0x0a, value);
+ /* Quasi-peak detector */
+ msp_write_dsp(client, 0x0c, value);
+ /*
+ * set identification threshold. Personally, I
+ * I set it to a higher value that the default
+ * of 0x190 to ignore noisy stereo signals.
+ * this needs tuning. (recommended range 0x00a0-0x03c0)
+ * 0x7f0 = forced mono mode
+ */
+ /* a2 threshold for stereo/bilingual */
+ msp_write_dem(client, 0x22, msp_stereo_thresh);
+ state->source = source;
+}
+
+/* (re-)initialize the msp34xxg, according to the current norm in state->norm
+ * return 0 if it worked, -1 if it failed
+ */
+static int msp34xxg_reset(struct i2c_client *client)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+ int modus, std;
+
+ if (msp_reset(client))
+ return -1;
+
+ /* make sure that input/output is muted (paranoid mode) */
+ /* ACB, mute DSP input, mute SCART 1 */
+ if (msp_write_dsp(client, 0x13, 0x0f20))
+ return -1;
+
+ if (state->has_i2s_conf)
+ msp_write_dem(client, 0x40, state->i2s_mode);
+
+ /* step-by-step initialisation, as described in the manual */
+ modus = msp_modus(client);
+ if (state->radio)
+ std = 0x40;
+ else
+ std = (state->v4l2_std & V4L2_STD_NTSC) ? 0x20 : 1;
+ modus &= ~0x03; /* STATUS_CHANGE = 0 */
+ modus |= 0x01; /* AUTOMATIC_SOUND_DETECTION = 1 */
+ if (msp_write_dem(client, 0x30, modus))
+ return -1;
+ if (msp_write_dem(client, 0x20, std))
+ return -1;
+
+ /* write the dsps that may have an influence on
+ standard/audio autodetection right now */
+ msp34xxg_set_source(client, state->source);
+
+ /* AM/FM Prescale [15:8] 75khz deviation */
+ if (msp_write_dsp(client, 0x0e, 0x3000))
+ return -1;
+
+ /* NICAM Prescale 9db gain (as recommended) */
+ if (msp_write_dsp(client, 0x10, 0x5a00))
+ return -1;
+
+ return 0;
+}
+
+int msp34xxg_thread(void *data)
+{
+ struct i2c_client *client = data;
+ struct msp_state *state = i2c_get_clientdata(client);
+ int val, std, i;
+
+ v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n");
+
+ state->source = 1; /* default */
+ for (;;) {
+ v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n");
+ msp_sleep(state, -1);
+ v4l_dbg(2, msp_debug, client, "msp34xxg thread: wakeup\n");
+
+ restart:
+ v4l_dbg(1, msp_debug, client, "thread: restart scan\n");
+ state->restart = 0;
+ if (kthread_should_stop())
+ break;
+
+ /* setup the chip*/
+ msp34xxg_reset(client);
+ std = msp_standard;
+ if (std != 0x01)
+ goto unmute;
+
+ /* watch autodetect */
+ v4l_dbg(1, msp_debug, client, "triggered autodetect, waiting for result\n");
+ for (i = 0; i < 10; i++) {
+ if (msp_sleep(state, 100))
+ goto restart;
+
+ /* check results */
+ val = msp_read_dem(client, 0x7e);
+ if (val < 0x07ff) {
+ std = val;
+ break;
+ }
+ v4l_dbg(2, msp_debug, client, "detection still in progress\n");
+ }
+ if (std == 1) {
+ v4l_dbg(1, msp_debug, client, "detection still in progress after 10 tries. giving up.\n");
+ continue;
+ }
+
+ unmute:
+ state->std = std;
+ v4l_dbg(1, msp_debug, client, "current standard: %s (0x%04x)\n",
+ msp_standard_std_name(std), std);
+
+ /* unmute: dispatch sound to scart output, set scart volume */
+ msp_set_audio(client);
+
+ /* restore ACB */
+ if (msp_write_dsp(client, 0x13, state->acb))
+ return -1;
+
+ msp_write_dem(client, 0x40, state->i2s_mode);
+ }
+ v4l_dbg(1, msp_debug, client, "thread: exit\n");
+ return 0;
+}
+
+void msp34xxg_detect_stereo(struct i2c_client *client)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+
+ int status = msp_read_dem(client, 0x0200);
+ int is_bilingual = status & 0x100;
+ int is_stereo = status & 0x40;
+
+ state->rxsubchans = 0;
+ if (is_stereo)
+ state->rxsubchans |= V4L2_TUNER_SUB_STEREO;
+ else
+ state->rxsubchans |= V4L2_TUNER_SUB_MONO;
+ if (is_bilingual) {
+ state->rxsubchans |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
+ /* I'm supposed to check whether it's SAP or not
+ * and set only LANG2/SAP in this case. Yet, the MSP
+ * does a lot of work to hide this and handle everything
+ * the same way. I don't want to work around it so unless
+ * this is a problem, I'll handle SAP just like lang1/lang2.
+ */
+ }
+ v4l_dbg(1, msp_debug, client, "status=0x%x, stereo=%d, bilingual=%d -> rxsubchans=%d\n",
+ status, is_stereo, is_bilingual, state->rxsubchans);
+}
+
+void msp34xxg_set_audmode(struct i2c_client *client, int audmode)
+{
+ struct msp_state *state = i2c_get_clientdata(client);
+ int source;
+
+ switch (audmode) {
+ case V4L2_TUNER_MODE_MONO:
+ source = 0; /* mono only */
+ break;
+ case V4L2_TUNER_MODE_STEREO:
+ source = 1; /* stereo or A|B, see comment in msp34xxg_get_v4l2_stereo() */
+ /* problem: that could also mean 2 (scart input) */
+ break;
+ case V4L2_TUNER_MODE_LANG1:
+ source = 3; /* stereo or A */
+ break;
+ case V4L2_TUNER_MODE_LANG2:
+ source = 4; /* stereo or B */
+ break;
+ default:
+ audmode = 0;
+ source = 1;
+ break;
+ }
+ state->audmode = audmode;
+ msp34xxg_set_source(client, source);
+}
+
diff --git a/drivers/media/video/msp3400.c b/drivers/media/video/msp3400.c
deleted file mode 100644
index d86f8e92e53..00000000000
--- a/drivers/media/video/msp3400.c
+++ /dev/null
@@ -1,2229 +0,0 @@
-/*
- * programming the msp34* sound processor family
- *
- * (c) 1997-2001 Gerd Knorr <kraxel@bytesex.org>
- *
- * what works and what doesn't:
- *
- * AM-Mono
- * Support for Hauppauge cards added (decoding handled by tuner) added by
- * Frederic Crozat <fcrozat@mail.dotcom.fr>
- *
- * FM-Mono
- * should work. The stereo modes are backward compatible to FM-mono,
- * therefore FM-Mono should be allways available.
- *
- * FM-Stereo (B/G, used in germany)
- * should work, with autodetect
- *
- * FM-Stereo (satellite)
- * should work, no autodetect (i.e. default is mono, but you can
- * switch to stereo -- untested)
- *
- * NICAM (B/G, L , used in UK, Scandinavia, Spain and France)
- * should work, with autodetect. Support for NICAM was added by
- * Pekka Pietikainen <pp@netppl.fi>
- *
- *
- * TODO:
- * - better SAT support
- *
- *
- * 980623 Thomas Sailer (sailer@ife.ee.ethz.ch)
- * using soundcore instead of OSS
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/videodev.h>
-#include <linux/init.h>
-#include <linux/smp_lock.h>
-#include <linux/kthread.h>
-#include <linux/suspend.h>
-#include <asm/semaphore.h>
-#include <asm/pgtable.h>
-
-#include <media/audiochip.h>
-#include "msp3400.h"
-
-#define msp3400_dbg(fmt, arg...) \
- do { \
- if (debug) \
- printk(KERN_INFO "%s debug %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); \
- } while (0)
-
-/* Medium volume debug. */
-#define msp3400_dbg_mediumvol(fmt, arg...) \
- do { \
- if (debug >= 2) \
- printk(KERN_INFO "%s debug %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); \
- } while (0)
-
-/* High volume debug. Use with care. */
-#define msp3400_dbg_highvol(fmt, arg...) \
- do { \
- if (debug >= 16) \
- printk(KERN_INFO "%s debug %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); \
- } while (0)
-
-#define msp3400_err(fmt, arg...) do { \
- printk(KERN_ERR "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-#define msp3400_warn(fmt, arg...) do { \
- printk(KERN_WARNING "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-#define msp3400_info(fmt, arg...) do { \
- printk(KERN_INFO "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-
-#define OPMODE_AUTO -1
-#define OPMODE_MANUAL 0
-#define OPMODE_SIMPLE 1 /* use short programming (>= msp3410 only) */
-#define OPMODE_SIMPLER 2 /* use shorter programming (>= msp34xxG) */
-
-/* insmod parameters */
-static int opmode = OPMODE_AUTO;
-static int debug = 0; /* debug output */
-static int once = 0; /* no continous stereo monitoring */
-static int amsound = 0; /* hard-wire AM sound at 6.5 Hz (france),
- the autoscan seems work well only with FM... */
-static int standard = 1; /* Override auto detect of audio standard, if needed. */
-static int dolby = 0;
-
-static int stereo_threshold = 0x190; /* a2 threshold for stereo/bilingual
- (msp34xxg only) 0x00a0-0x03c0 */
-#define DFP_COUNT 0x41
-static const int bl_dfp[] = {
- 0x00, 0x01, 0x02, 0x03, 0x06, 0x08, 0x09, 0x0a,
- 0x0b, 0x0d, 0x0e, 0x10
-};
-
-#define IS_MSP34XX_G(msp) ((msp)->opmode==2)
-
-struct msp3400c {
- int rev1,rev2;
-
- int opmode;
- int nicam;
- int mode;
- int norm;
- int stereo;
- int nicam_on;
- int acb;
- int in_scart;
- int i2s_mode;
- int main, second; /* sound carrier */
- int input;
- int source; /* see msp34xxg_set_source */
-
- /* v4l2 */
- int audmode;
- int rxsubchans;
-
- int muted;
- int left, right; /* volume */
- int bass, treble;
-
- /* shadow register set */
- int dfp_regs[DFP_COUNT];
-
- /* thread */
- struct task_struct *kthread;
- wait_queue_head_t wq;
- int restart:1;
- int watch_stereo:1;
-};
-
-#define MIN(a,b) (((a)>(b))?(b):(a))
-#define MAX(a,b) (((a)>(b))?(a):(b))
-#define HAVE_NICAM(msp) (((msp->rev2>>8) & 0xff) != 00)
-#define HAVE_SIMPLE(msp) ((msp->rev1 & 0xff) >= 'D'-'@')
-#define HAVE_SIMPLER(msp) ((msp->rev1 & 0xff) >= 'G'-'@')
-#define HAVE_RADIO(msp) ((msp->rev1 & 0xff) >= 'G'-'@')
-
-#define VIDEO_MODE_RADIO 16 /* norm magic for radio mode */
-
-/* ---------------------------------------------------------------------- */
-
-/* read-only */
-module_param(opmode, int, 0444);
-
-/* read-write */
-module_param(once, int, 0644);
-module_param(debug, int, 0644);
-module_param(stereo_threshold, int, 0644);
-module_param(standard, int, 0644);
-module_param(amsound, int, 0644);
-module_param(dolby, int, 0644);
-
-MODULE_PARM_DESC(opmode, "Forces a MSP3400 opmode. 0=Manual, 1=Simple, 2=Simpler");
-MODULE_PARM_DESC(once, "No continuous stereo monitoring");
-MODULE_PARM_DESC(debug, "Enable debug messages");
-MODULE_PARM_DESC(stereo_threshold, "Sets signal threshold to activate stereo");
-MODULE_PARM_DESC(standard, "Specify audio standard: 32 = NTSC, 64 = radio, Default: Autodetect");
-MODULE_PARM_DESC(amsound, "Hardwire AM sound at 6.5Hz (France), FM can autoscan");
-MODULE_PARM_DESC(dolby, "Activates Dolby processsing");
-
-/* ---------------------------------------------------------------------- */
-
-#define I2C_MSP3400C 0x80
-#define I2C_MSP3400C_ALT 0x88
-
-#define I2C_MSP3400C_DEM 0x10
-#define I2C_MSP3400C_DFP 0x12
-
-/* Addresses to scan */
-static unsigned short normal_i2c[] = {
- I2C_MSP3400C >> 1,
- I2C_MSP3400C_ALT >> 1,
- I2C_CLIENT_END
-};
-I2C_CLIENT_INSMOD;
-
-MODULE_DESCRIPTION("device driver for msp34xx TV sound processor");
-MODULE_AUTHOR("Gerd Knorr");
-MODULE_LICENSE("GPL");
-
-/* ----------------------------------------------------------------------- */
-/* functions for talking to the MSP3400C Sound processor */
-
-static int msp3400c_reset(struct i2c_client *client)
-{
- /* reset and read revision code */
- static char reset_off[3] = { 0x00, 0x80, 0x00 };
- static char reset_on[3] = { 0x00, 0x00, 0x00 };
- static char write[3] = { I2C_MSP3400C_DFP + 1, 0x00, 0x1e };
- char read[2];
- struct i2c_msg reset[2] = {
- { client->addr, I2C_M_IGNORE_NAK, 3, reset_off },
- { client->addr, I2C_M_IGNORE_NAK, 3, reset_on },
- };
- struct i2c_msg test[2] = {
- { client->addr, 0, 3, write },
- { client->addr, I2C_M_RD, 2, read },
- };
-
- msp3400_dbg_highvol("msp3400c_reset\n");
- if ( (1 != i2c_transfer(client->adapter,&reset[0],1)) ||
- (1 != i2c_transfer(client->adapter,&reset[1],1)) ||
- (2 != i2c_transfer(client->adapter,test,2)) ) {
- msp3400_err("chip reset failed\n");
- return -1;
- }
- return 0;
-}
-
-static int msp3400c_read(struct i2c_client *client, int dev, int addr)
-{
- int err,retval;
-
- unsigned char write[3];
- unsigned char read[2];
- struct i2c_msg msgs[2] = {
- { client->addr, 0, 3, write },
- { client->addr, I2C_M_RD, 2, read }
- };
-
- write[0] = dev+1;
- write[1] = addr >> 8;
- write[2] = addr & 0xff;
-
- for (err = 0; err < 3;) {
- if (2 == i2c_transfer(client->adapter,msgs,2))
- break;
- err++;
- msp3400_warn("I/O error #%d (read 0x%02x/0x%02x)\n", err,
- dev, addr);
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(msecs_to_jiffies(10));
- }
- if (3 == err) {
- msp3400_warn("giving up, resetting chip. Sound will go off, sorry folks :-|\n");
- msp3400c_reset(client);
- return -1;
- }
- retval = read[0] << 8 | read[1];
- msp3400_dbg_highvol("msp3400c_read(0x%x, 0x%x): 0x%x\n", dev, addr, retval);
- return retval;
-}
-
-static int msp3400c_write(struct i2c_client *client, int dev, int addr, int val)
-{
- int err;
- unsigned char buffer[5];
-
- buffer[0] = dev;
- buffer[1] = addr >> 8;
- buffer[2] = addr & 0xff;
- buffer[3] = val >> 8;
- buffer[4] = val & 0xff;
-
- msp3400_dbg_highvol("msp3400c_write(0x%x, 0x%x, 0x%x)\n", dev, addr, val);
- for (err = 0; err < 3;) {
- if (5 == i2c_master_send(client, buffer, 5))
- break;
- err++;
- msp3400_warn("I/O error #%d (write 0x%02x/0x%02x)\n", err,
- dev, addr);
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(msecs_to_jiffies(10));
- }
- if (3 == err) {
- msp3400_warn("giving up, reseting chip. Sound will go off, sorry folks :-|\n");
- msp3400c_reset(client);
- return -1;
- }
- return 0;
-}
-
-/* ------------------------------------------------------------------------ */
-
-/* This macro is allowed for *constants* only, gcc must calculate it
- at compile time. Remember -- no floats in kernel mode */
-#define MSP_CARRIER(freq) ((int)((float)(freq/18.432)*(1<<24)))
-
-#define MSP_MODE_AM_DETECT 0
-#define MSP_MODE_FM_RADIO 2
-#define MSP_MODE_FM_TERRA 3
-#define MSP_MODE_FM_SAT 4
-#define MSP_MODE_FM_NICAM1 5
-#define MSP_MODE_FM_NICAM2 6
-#define MSP_MODE_AM_NICAM 7
-#define MSP_MODE_BTSC 8
-#define MSP_MODE_EXTERN 9
-
-static struct MSP_INIT_DATA_DEM {
- int fir1[6];
- int fir2[6];
- int cdo1;
- int cdo2;
- int ad_cv;
- int mode_reg;
- int dfp_src;
- int dfp_matrix;
-} msp_init_data[] = {
- { /* AM (for carrier detect / msp3400) */
- {75, 19, 36, 35, 39, 40},
- {75, 19, 36, 35, 39, 40},
- MSP_CARRIER(5.5), MSP_CARRIER(5.5),
- 0x00d0, 0x0500, 0x0020, 0x3000
- },{ /* AM (for carrier detect / msp3410) */
- {-1, -1, -8, 2, 59, 126},
- {-1, -1, -8, 2, 59, 126},
- MSP_CARRIER(5.5), MSP_CARRIER(5.5),
- 0x00d0, 0x0100, 0x0020, 0x3000
- },{ /* FM Radio */
- {-8, -8, 4, 6, 78, 107},
- {-8, -8, 4, 6, 78, 107},
- MSP_CARRIER(10.7), MSP_CARRIER(10.7),
- 0x00d0, 0x0480, 0x0020, 0x3000
- },{ /* Terrestial FM-mono + FM-stereo */
- {3, 18, 27, 48, 66, 72},
- {3, 18, 27, 48, 66, 72},
- MSP_CARRIER(5.5), MSP_CARRIER(5.5),
- 0x00d0, 0x0480, 0x0030, 0x3000
- },{ /* Sat FM-mono */
- { 1, 9, 14, 24, 33, 37},
- { 3, 18, 27, 48, 66, 72},
- MSP_CARRIER(6.5), MSP_CARRIER(6.5),
- 0x00c6, 0x0480, 0x0000, 0x3000
- },{ /* NICAM/FM -- B/G (5.5/5.85), D/K (6.5/5.85) */
- {-2, -8, -10, 10, 50, 86},
- {3, 18, 27, 48, 66, 72},
- MSP_CARRIER(5.5), MSP_CARRIER(5.5),
- 0x00d0, 0x0040, 0x0120, 0x3000
- },{ /* NICAM/FM -- I (6.0/6.552) */
- {2, 4, -6, -4, 40, 94},
- {3, 18, 27, 48, 66, 72},
- MSP_CARRIER(6.0), MSP_CARRIER(6.0),
- 0x00d0, 0x0040, 0x0120, 0x3000
- },{ /* NICAM/AM -- L (6.5/5.85) */
- {-2, -8, -10, 10, 50, 86},
- {-4, -12, -9, 23, 79, 126},
- MSP_CARRIER(6.5), MSP_CARRIER(6.5),
- 0x00c6, 0x0140, 0x0120, 0x7c03
- },
-};
-
-struct CARRIER_DETECT {
- int cdo;
- char *name;
-};
-
-static struct CARRIER_DETECT carrier_detect_main[] = {
- /* main carrier */
- { MSP_CARRIER(4.5), "4.5 NTSC" },
- { MSP_CARRIER(5.5), "5.5 PAL B/G" },
- { MSP_CARRIER(6.0), "6.0 PAL I" },
- { MSP_CARRIER(6.5), "6.5 PAL D/K + SAT + SECAM" }
-};
-
-static struct CARRIER_DETECT carrier_detect_55[] = {
- /* PAL B/G */
- { MSP_CARRIER(5.7421875), "5.742 PAL B/G FM-stereo" },
- { MSP_CARRIER(5.85), "5.85 PAL B/G NICAM" }
-};
-
-static struct CARRIER_DETECT carrier_detect_65[] = {
- /* PAL SAT / SECAM */
- { MSP_CARRIER(5.85), "5.85 PAL D/K + SECAM NICAM" },
- { MSP_CARRIER(6.2578125), "6.25 PAL D/K1 FM-stereo" },
- { MSP_CARRIER(6.7421875), "6.74 PAL D/K2 FM-stereo" },
- { MSP_CARRIER(7.02), "7.02 PAL SAT FM-stereo s/b" },
- { MSP_CARRIER(7.20), "7.20 PAL SAT FM-stereo s" },
- { MSP_CARRIER(7.38), "7.38 PAL SAT FM-stereo b" },
-};
-
-#define CARRIER_COUNT(x) (sizeof(x)/sizeof(struct CARRIER_DETECT))
-
-/* ----------------------------------------------------------------------- *
- * bits 9 8 5 - SCART DSP input Select:
- * 0 0 0 - SCART 1 to DSP input (reset position)
- * 0 1 0 - MONO to DSP input
- * 1 0 0 - SCART 2 to DSP input
- * 1 1 1 - Mute DSP input
- *
- * bits 11 10 6 - SCART 1 Output Select:
- * 0 0 0 - undefined (reset position)
- * 0 1 0 - SCART 2 Input to SCART 1 Output (for devices with 2 SCARTS)
- * 1 0 0 - MONO input to SCART 1 Output
- * 1 1 0 - SCART 1 DA to SCART 1 Output
- * 0 0 1 - SCART 2 DA to SCART 1 Output
- * 0 1 1 - SCART 1 Input to SCART 1 Output
- * 1 1 1 - Mute SCART 1 Output
- *
- * bits 13 12 7 - SCART 2 Output Select (for devices with 2 Output SCART):
- * 0 0 0 - SCART 1 DA to SCART 2 Output (reset position)
- * 0 1 0 - SCART 1 Input to SCART 2 Output
- * 1 0 0 - MONO input to SCART 2 Output
- * 0 0 1 - SCART 2 DA to SCART 2 Output
- * 0 1 1 - SCART 2 Input to SCART 2 Output
- * 1 1 0 - Mute SCART 2 Output
- *
- * Bits 4 to 0 should be zero.
- * ----------------------------------------------------------------------- */
-
-static int scarts[3][9] = {
- /* MASK IN1 IN2 IN1_DA IN2_DA IN3 IN4 MONO MUTE */
- /* SCART DSP Input select */
- { 0x0320, 0x0000, 0x0200, -1, -1, 0x0300, 0x0020, 0x0100, 0x0320 },
- /* SCART1 Output select */
- { 0x0c40, 0x0440, 0x0400, 0x0c00, 0x0040, 0x0000, 0x0840, 0x0800, 0x0c40 },
- /* SCART2 Output select */
- { 0x3080, 0x1000, 0x1080, 0x0000, 0x0080, 0x2080, 0x3080, 0x2000, 0x3000 },
-};
-
-static char *scart_names[] = {
- "mask", "in1", "in2", "in1 da", "in2 da", "in3", "in4", "mono", "mute"
-};
-
-static void msp3400c_set_scart(struct i2c_client *client, int in, int out)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
-
- msp->in_scart=in;
-
- if (in >= 1 && in <= 8 && out >= 0 && out <= 2) {
- if (-1 == scarts[out][in])
- return;
-
- msp->acb &= ~scarts[out][SCART_MASK];
- msp->acb |= scarts[out][in];
- } else
- msp->acb = 0xf60; /* Mute Input and SCART 1 Output */
-
- msp3400_dbg("scart switch: %s => %d (ACB=0x%04x)\n",
- scart_names[in], out, msp->acb);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x13, msp->acb);
-
- /* Sets I2S speed 0 = 1.024 Mbps, 1 = 2.048 Mbps */
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x40, msp->i2s_mode);
-}
-
-/* ------------------------------------------------------------------------ */
-
-static void msp3400c_setcarrier(struct i2c_client *client, int cdo1, int cdo2)
-{
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x0093, cdo1 & 0xfff);
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x009b, cdo1 >> 12);
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x00a3, cdo2 & 0xfff);
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x00ab, cdo2 >> 12);
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x0056, 0); /*LOAD_REG_1/2*/
-}
-
-static void msp3400c_setvolume(struct i2c_client *client,
- int muted, int left, int right)
- {
- int vol = 0, val = 0, balance = 0;
-
- if (!muted) {
- /* 0x7f instead if 0x73 here has sound quality issues,
- * probably due to overmodulation + clipping ... */
- vol = (left > right) ? left : right;
- val = (vol * 0x73 / 65535) << 8;
- }
- if (vol > 0) {
- balance = ((right - left) * 127) / vol;
- }
-
- msp3400_dbg("setvolume: mute=%s %d:%d v=0x%02x b=0x%02x\n",
- muted ? "on" : "off", left, right, val >> 8, balance);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0000, val); /* loudspeaker */
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0006, val); /* headphones */
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0007,
- muted ? 0x1 : (val | 0x1));
- msp3400c_write(client, I2C_MSP3400C_DFP, 0x0001, balance << 8);
-}
-
-static void msp3400c_setbass(struct i2c_client *client, int bass)
-{
- int val = ((bass-32768) * 0x60 / 65535) << 8;
-
- msp3400_dbg("setbass: %d 0x%02x\n", bass, val >> 8);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0002, val); /* loudspeaker */
-}
-
-static void msp3400c_settreble(struct i2c_client *client, int treble)
-{
- int val = ((treble-32768) * 0x60 / 65535) << 8;
-
- msp3400_dbg("settreble: %d 0x%02x\n",treble, val>>8);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0003, val); /* loudspeaker */
-}
-
-static void msp3400c_setmode(struct i2c_client *client, int type)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
- int i;
-
- msp3400_dbg("setmode: %d\n",type);
- msp->mode = type;
- msp->audmode = V4L2_TUNER_MODE_MONO;
- msp->rxsubchans = V4L2_TUNER_SUB_MONO;
-
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x00bb, /* ad_cv */
- msp_init_data[type].ad_cv);
-
- for (i = 5; i >= 0; i--) /* fir 1 */
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x0001,
- msp_init_data[type].fir1[i]);
-
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x0005, 0x0004); /* fir 2 */
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x0005, 0x0040);
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x0005, 0x0000);
- for (i = 5; i >= 0; i--)
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x0005,
- msp_init_data[type].fir2[i]);
-
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x0083, /* MODE_REG */
- msp_init_data[type].mode_reg);
-
- msp3400c_setcarrier(client, msp_init_data[type].cdo1,
- msp_init_data[type].cdo2);
-
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x0056, 0); /*LOAD_REG_1/2*/
-
- if (dolby) {
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0008,
- 0x0520); /* I2S1 */
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0009,
- 0x0620); /* I2S2 */
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x000b,
- msp_init_data[type].dfp_src);
- } else {
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0008,
- msp_init_data[type].dfp_src);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0009,
- msp_init_data[type].dfp_src);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x000b,
- msp_init_data[type].dfp_src);
- }
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x000a,
- msp_init_data[type].dfp_src);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x000e,
- msp_init_data[type].dfp_matrix);
-
- if (HAVE_NICAM(msp)) {
- /* nicam prescale */
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0010, 0x5a00); /* was: 0x3000 */
- }
-}
-
-/* given a bitmask of VIDEO_SOUND_XXX returns the "best" in the bitmask */
-static int best_video_sound(int rxsubchans)
-{
- if (rxsubchans & V4L2_TUNER_SUB_STEREO)
- return V4L2_TUNER_MODE_STEREO;
- if (rxsubchans & V4L2_TUNER_SUB_LANG1)
- return V4L2_TUNER_MODE_LANG1;
- if (rxsubchans & V4L2_TUNER_SUB_LANG2)
- return V4L2_TUNER_MODE_LANG2;
- return V4L2_TUNER_MODE_MONO;
-}
-
-/* turn on/off nicam + stereo */
-static void msp3400c_setstereo(struct i2c_client *client, int mode)
-{
- static char *strmode[] = { "0", "mono", "stereo", "3",
- "lang1", "5", "6", "7", "lang2"
- };
- struct msp3400c *msp = i2c_get_clientdata(client);
- int nicam = 0; /* channel source: FM/AM or nicam */
- int src = 0;
-
- if (IS_MSP34XX_G(msp)) {
- /* this method would break everything, let's make sure
- * it's never called
- */
- msp3400_dbg
- ("DEBUG WARNING setstereo called with mode=%d instead of set_source (ignored)\n",
- mode);
- return;
- }
-
- /* switch demodulator */
- switch (msp->mode) {
- case MSP_MODE_FM_TERRA:
- msp3400_dbg("FM setstereo: %s\n", strmode[mode]);
- msp3400c_setcarrier(client,msp->second,msp->main);
- switch (mode) {
- case V4L2_TUNER_MODE_STEREO:
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x000e, 0x3001);
- break;
- case V4L2_TUNER_MODE_MONO:
- case V4L2_TUNER_MODE_LANG1:
- case V4L2_TUNER_MODE_LANG2:
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x000e, 0x3000);
- break;
- }
- break;
- case MSP_MODE_FM_SAT:
- msp3400_dbg("SAT setstereo: %s\n", strmode[mode]);
- switch (mode) {
- case V4L2_TUNER_MODE_MONO:
- msp3400c_setcarrier(client, MSP_CARRIER(6.5), MSP_CARRIER(6.5));
- break;
- case V4L2_TUNER_MODE_STEREO:
- msp3400c_setcarrier(client, MSP_CARRIER(7.2), MSP_CARRIER(7.02));
- break;
- case V4L2_TUNER_MODE_LANG1:
- msp3400c_setcarrier(client, MSP_CARRIER(7.38), MSP_CARRIER(7.02));
- break;
- case V4L2_TUNER_MODE_LANG2:
- msp3400c_setcarrier(client, MSP_CARRIER(7.38), MSP_CARRIER(7.02));
- break;
- }
- break;
- case MSP_MODE_FM_NICAM1:
- case MSP_MODE_FM_NICAM2:
- case MSP_MODE_AM_NICAM:
- msp3400_dbg("NICAM setstereo: %s\n",strmode[mode]);
- msp3400c_setcarrier(client,msp->second,msp->main);
- if (msp->nicam_on)
- nicam=0x0100;
- break;
- case MSP_MODE_BTSC:
- msp3400_dbg("BTSC setstereo: %s\n",strmode[mode]);
- nicam=0x0300;
- break;
- case MSP_MODE_EXTERN:
- msp3400_dbg("extern setstereo: %s\n",strmode[mode]);
- nicam = 0x0200;
- break;
- case MSP_MODE_FM_RADIO:
- msp3400_dbg("FM-Radio setstereo: %s\n",strmode[mode]);
- break;
- default:
- msp3400_dbg("mono setstereo\n");
- return;
- }
-
- /* switch audio */
- switch (best_video_sound(mode)) {
- case V4L2_TUNER_MODE_STEREO:
- src = 0x0020 | nicam;
- break;
- case V4L2_TUNER_MODE_MONO:
- if (msp->mode == MSP_MODE_AM_NICAM) {
- msp3400_dbg("switching to AM mono\n");
- /* AM mono decoding is handled by tuner, not MSP chip */
- /* SCART switching control register */
- msp3400c_set_scart(client,SCART_MONO,0);
- src = 0x0200;
- break;
- }
- case V4L2_TUNER_MODE_LANG1:
- src = 0x0000 | nicam;
- break;
- case V4L2_TUNER_MODE_LANG2:
- src = 0x0010 | nicam;
- break;
- }
- msp3400_dbg("setstereo final source/matrix = 0x%x\n", src);
-
- if (dolby) {
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0008,0x0520);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0009,0x0620);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x000a,src);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x000b,src);
- } else {
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0008,src);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0009,src);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x000a,src);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x000b,src);
- }
-}
-
-static void
-msp3400c_print_mode(struct i2c_client *client)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
-
- if (msp->main == msp->second) {
- msp3400_dbg("mono sound carrier: %d.%03d MHz\n",
- msp->main/910000,(msp->main/910)%1000);
- } else {
- msp3400_dbg("main sound carrier: %d.%03d MHz\n",
- msp->main/910000,(msp->main/910)%1000);
- }
- if (msp->mode == MSP_MODE_FM_NICAM1 || msp->mode == MSP_MODE_FM_NICAM2)
- msp3400_dbg("NICAM/FM carrier : %d.%03d MHz\n",
- msp->second/910000,(msp->second/910)%1000);
- if (msp->mode == MSP_MODE_AM_NICAM)
- msp3400_dbg("NICAM/AM carrier : %d.%03d MHz\n",
- msp->second/910000,(msp->second/910)%1000);
- if (msp->mode == MSP_MODE_FM_TERRA &&
- msp->main != msp->second) {
- msp3400_dbg("FM-stereo carrier : %d.%03d MHz\n",
- msp->second/910000,(msp->second/910)%1000);
- }
-}
-
-#define MSP3400_MAX 4
-static struct i2c_client *msps[MSP3400_MAX];
-static void msp3400c_restore_dfp(struct i2c_client *client)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
- int i;
-
- for (i = 0; i < DFP_COUNT; i++) {
- if (-1 == msp->dfp_regs[i])
- continue;
- msp3400c_write(client, I2C_MSP3400C_DFP, i, msp->dfp_regs[i]);
- }
-}
-
-/* if the dfp_regs is set, set what's in there. Otherwise, set the default value */
-static int msp3400c_write_dfp_with_default(struct i2c_client *client,
- int addr, int default_value)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
- int value = default_value;
- if (addr < DFP_COUNT && -1 != msp->dfp_regs[addr])
- value = msp->dfp_regs[addr];
- return msp3400c_write(client, I2C_MSP3400C_DFP, addr, value);
-}
-
-/* ----------------------------------------------------------------------- */
-
-struct REGISTER_DUMP {
- int addr;
- char *name;
-};
-
-struct REGISTER_DUMP d1[] = {
- {0x007e, "autodetect"},
- {0x0023, "C_AD_BITS "},
- {0x0038, "ADD_BITS "},
- {0x003e, "CIB_BITS "},
- {0x0057, "ERROR_RATE"},
-};
-
-static int autodetect_stereo(struct i2c_client *client)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
- int val;
- int rxsubchans = msp->rxsubchans;
- int newnicam = msp->nicam_on;
- int update = 0;
-
- switch (msp->mode) {
- case MSP_MODE_FM_TERRA:
- val = msp3400c_read(client, I2C_MSP3400C_DFP, 0x18);
- if (val > 32767)
- val -= 65536;
- msp3400_dbg("stereo detect register: %d\n",val);
- if (val > 4096) {
- rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
- } else if (val < -4096) {
- rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
- } else {
- rxsubchans = V4L2_TUNER_SUB_MONO;
- }
- newnicam = 0;
- break;
- case MSP_MODE_FM_NICAM1:
- case MSP_MODE_FM_NICAM2:
- case MSP_MODE_AM_NICAM:
- val = msp3400c_read(client, I2C_MSP3400C_DEM, 0x23);
- msp3400_dbg("nicam sync=%d, mode=%d\n",
- val & 1, (val & 0x1e) >> 1);
-
- if (val & 1) {
- /* nicam synced */
- switch ((val & 0x1e) >> 1) {
- case 0:
- case 8:
- rxsubchans = V4L2_TUNER_SUB_STEREO;
- break;
- case 1:
- case 9:
- rxsubchans = V4L2_TUNER_SUB_MONO
- | V4L2_TUNER_SUB_LANG1;
- break;
- case 2:
- case 10:
- rxsubchans = V4L2_TUNER_SUB_MONO
- | V4L2_TUNER_SUB_LANG1
- | V4L2_TUNER_SUB_LANG2;
- break;
- default:
- rxsubchans = V4L2_TUNER_SUB_MONO;
- break;
- }
- newnicam=1;
- } else {
- newnicam = 0;
- rxsubchans = V4L2_TUNER_SUB_MONO;
- }
- break;
- case MSP_MODE_BTSC:
- val = msp3400c_read(client, I2C_MSP3400C_DEM, 0x200);
- msp3400_dbg("status=0x%x (pri=%s, sec=%s, %s%s%s)\n",
- val,
- (val & 0x0002) ? "no" : "yes",
- (val & 0x0004) ? "no" : "yes",
- (val & 0x0040) ? "stereo" : "mono",
- (val & 0x0080) ? ", nicam 2nd mono" : "",
- (val & 0x0100) ? ", bilingual/SAP" : "");
- rxsubchans = V4L2_TUNER_SUB_MONO;
- if (val & 0x0040) rxsubchans |= V4L2_TUNER_SUB_STEREO;
- if (val & 0x0100) rxsubchans |= V4L2_TUNER_SUB_LANG1;
- break;
- }
- if (rxsubchans != msp->rxsubchans) {
- update = 1;
- msp3400_dbg("watch: rxsubchans %d => %d\n",
- msp->rxsubchans,rxsubchans);
- msp->rxsubchans = rxsubchans;
- }
- if (newnicam != msp->nicam_on) {
- update = 1;
- msp3400_dbg("watch: nicam %d => %d\n",
- msp->nicam_on,newnicam);
- msp->nicam_on = newnicam;
- }
- return update;
-}
-
-/*
- * A kernel thread for msp3400 control -- we don't want to block the
- * in the ioctl while doing the sound carrier & stereo detect
- */
-
-static int msp34xx_sleep(struct msp3400c *msp, int timeout)
-{
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&msp->wq, &wait);
- if (!kthread_should_stop()) {
- if (timeout < 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- } else {
- schedule_timeout_interruptible
- (msecs_to_jiffies(timeout));
- }
- }
-
- remove_wait_queue(&msp->wq, &wait);
- try_to_freeze();
- return msp->restart;
-}
-
-/* stereo/multilang monitoring */
-static void watch_stereo(struct i2c_client *client)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
-
- if (autodetect_stereo(client)) {
- if (msp->stereo & V4L2_TUNER_MODE_STEREO)
- msp3400c_setstereo(client, V4L2_TUNER_MODE_STEREO);
- else if (msp->stereo & VIDEO_SOUND_LANG1)
- msp3400c_setstereo(client, V4L2_TUNER_MODE_LANG1);
- else
- msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
- }
-
- if (once)
- msp->watch_stereo = 0;
-}
-
-
-static int msp3400c_thread(void *data)
-{
- struct i2c_client *client = data;
- struct msp3400c *msp = i2c_get_clientdata(client);
- struct CARRIER_DETECT *cd;
- int count, max1,max2,val1,val2, val,this;
-
-
- msp3400_info("msp3400 daemon started\n");
- for (;;) {
- msp3400_dbg_mediumvol("msp3400 thread: sleep\n");
- msp34xx_sleep(msp,-1);
- msp3400_dbg_mediumvol("msp3400 thread: wakeup\n");
-
- restart:
- msp3400_dbg("thread: restart scan\n");
- msp->restart = 0;
- if (kthread_should_stop())
- break;
-
- if (VIDEO_MODE_RADIO == msp->norm ||
- MSP_MODE_EXTERN == msp->mode) {
- /* no carrier scan, just unmute */
- msp3400_info("thread: no carrier scan\n");
- msp3400c_setvolume(client, msp->muted, msp->left, msp->right);
- continue;
- }
-
- /* mute */
- msp3400c_setvolume(client, msp->muted, 0, 0);
- msp3400c_setmode(client, MSP_MODE_AM_DETECT /* +1 */ );
- val1 = val2 = 0;
- max1 = max2 = -1;
- msp->watch_stereo = 0;
-
- /* some time for the tuner to sync */
- if (msp34xx_sleep(msp,200))
- goto restart;
-
- /* carrier detect pass #1 -- main carrier */
- cd = carrier_detect_main;
- count = CARRIER_COUNT(carrier_detect_main);
-
- if (amsound && (msp->norm == VIDEO_MODE_SECAM)) {
- /* autodetect doesn't work well with AM ... */
- max1 = 3;
- count = 0;
- msp3400_dbg("AM sound override\n");
- }
-
- for (this = 0; this < count; this++) {
- msp3400c_setcarrier(client, cd[this].cdo,cd[this].cdo);
- if (msp34xx_sleep(msp,100))
- goto restart;
- val = msp3400c_read(client, I2C_MSP3400C_DFP, 0x1b);
- if (val > 32767)
- val -= 65536;
- if (val1 < val)
- val1 = val, max1 = this;
- msp3400_dbg("carrier1 val: %5d / %s\n", val,cd[this].name);
- }
-
- /* carrier detect pass #2 -- second (stereo) carrier */
- switch (max1) {
- case 1: /* 5.5 */
- cd = carrier_detect_55;
- count = CARRIER_COUNT(carrier_detect_55);
- break;
- case 3: /* 6.5 */
- cd = carrier_detect_65;
- count = CARRIER_COUNT(carrier_detect_65);
- break;
- case 0: /* 4.5 */
- case 2: /* 6.0 */
- default:
- cd = NULL;
- count = 0;
- break;
- }
-
- if (amsound && (msp->norm == VIDEO_MODE_SECAM)) {
- /* autodetect doesn't work well with AM ... */
- cd = NULL;
- count = 0;
- max2 = 0;
- }
- for (this = 0; this < count; this++) {
- msp3400c_setcarrier(client, cd[this].cdo,cd[this].cdo);
- if (msp34xx_sleep(msp,100))
- goto restart;
- val = msp3400c_read(client, I2C_MSP3400C_DFP, 0x1b);
- if (val > 32767)
- val -= 65536;
- if (val2 < val)
- val2 = val, max2 = this;
- msp3400_dbg("carrier2 val: %5d / %s\n", val,cd[this].name);
- }
-
- /* programm the msp3400 according to the results */
- msp->main = carrier_detect_main[max1].cdo;
- switch (max1) {
- case 1: /* 5.5 */
- if (max2 == 0) {
- /* B/G FM-stereo */
- msp->second = carrier_detect_55[max2].cdo;
- msp3400c_setmode(client, MSP_MODE_FM_TERRA);
- msp->nicam_on = 0;
- msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
- msp->watch_stereo = 1;
- } else if (max2 == 1 && HAVE_NICAM(msp)) {
- /* B/G NICAM */
- msp->second = carrier_detect_55[max2].cdo;
- msp3400c_setmode(client, MSP_MODE_FM_NICAM1);
- msp->nicam_on = 1;
- msp3400c_setcarrier(client, msp->second, msp->main);
- msp->watch_stereo = 1;
- } else {
- goto no_second;
- }
- break;
- case 2: /* 6.0 */
- /* PAL I NICAM */
- msp->second = MSP_CARRIER(6.552);
- msp3400c_setmode(client, MSP_MODE_FM_NICAM2);
- msp->nicam_on = 1;
- msp3400c_setcarrier(client, msp->second, msp->main);
- msp->watch_stereo = 1;
- break;
- case 3: /* 6.5 */
- if (max2 == 1 || max2 == 2) {
- /* D/K FM-stereo */
- msp->second = carrier_detect_65[max2].cdo;
- msp3400c_setmode(client, MSP_MODE_FM_TERRA);
- msp->nicam_on = 0;
- msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
- msp->watch_stereo = 1;
- } else if (max2 == 0 &&
- msp->norm == VIDEO_MODE_SECAM) {
- /* L NICAM or AM-mono */
- msp->second = carrier_detect_65[max2].cdo;
- msp3400c_setmode(client, MSP_MODE_AM_NICAM);
- msp->nicam_on = 0;
- msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
- msp3400c_setcarrier(client, msp->second, msp->main);
- /* volume prescale for SCART (AM mono input) */
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x000d, 0x1900);
- msp->watch_stereo = 1;
- } else if (max2 == 0 && HAVE_NICAM(msp)) {
- /* D/K NICAM */
- msp->second = carrier_detect_65[max2].cdo;
- msp3400c_setmode(client, MSP_MODE_FM_NICAM1);
- msp->nicam_on = 1;
- msp3400c_setcarrier(client, msp->second, msp->main);
- msp->watch_stereo = 1;
- } else {
- goto no_second;
- }
- break;
- case 0: /* 4.5 */
- default:
- no_second:
- msp->second = carrier_detect_main[max1].cdo;
- msp3400c_setmode(client, MSP_MODE_FM_TERRA);
- msp->nicam_on = 0;
- msp3400c_setcarrier(client, msp->second, msp->main);
- msp->rxsubchans = V4L2_TUNER_SUB_MONO;
- msp3400c_setstereo(client, V4L2_TUNER_MODE_MONO);
- break;
- }
-
- /* unmute */
- msp3400c_setvolume(client, msp->muted, msp->left, msp->right);
- msp3400c_restore_dfp(client);
-
- if (debug)
- msp3400c_print_mode(client);
-
- /* monitor tv audio mode */
- while (msp->watch_stereo) {
- if (msp34xx_sleep(msp,5000))
- goto restart;
- watch_stereo(client);
- }
- }
- msp3400_dbg("thread: exit\n");
- return 0;
-}
-
-/* ----------------------------------------------------------------------- */
-/* this one uses the automatic sound standard detection of newer */
-/* msp34xx chip versions */
-
-static struct MODES {
- int retval;
- int main, second;
- char *name;
-} modelist[] = {
- { 0x0000, 0, 0, "ERROR" },
- { 0x0001, 0, 0, "autodetect start" },
- { 0x0002, MSP_CARRIER(4.5), MSP_CARRIER(4.72), "4.5/4.72 M Dual FM-Stereo" },
- { 0x0003, MSP_CARRIER(5.5), MSP_CARRIER(5.7421875), "5.5/5.74 B/G Dual FM-Stereo" },
- { 0x0004, MSP_CARRIER(6.5), MSP_CARRIER(6.2578125), "6.5/6.25 D/K1 Dual FM-Stereo" },
- { 0x0005, MSP_CARRIER(6.5), MSP_CARRIER(6.7421875), "6.5/6.74 D/K2 Dual FM-Stereo" },
- { 0x0006, MSP_CARRIER(6.5), MSP_CARRIER(6.5), "6.5 D/K FM-Mono (HDEV3)" },
- { 0x0008, MSP_CARRIER(5.5), MSP_CARRIER(5.85), "5.5/5.85 B/G NICAM FM" },
- { 0x0009, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 L NICAM AM" },
- { 0x000a, MSP_CARRIER(6.0), MSP_CARRIER(6.55), "6.0/6.55 I NICAM FM" },
- { 0x000b, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 D/K NICAM FM" },
- { 0x000c, MSP_CARRIER(6.5), MSP_CARRIER(5.85), "6.5/5.85 D/K NICAM FM (HDEV2)" },
- { 0x0020, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M BTSC-Stereo" },
- { 0x0021, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M BTSC-Mono + SAP" },
- { 0x0030, MSP_CARRIER(4.5), MSP_CARRIER(4.5), "4.5 M EIA-J Japan Stereo" },
- { 0x0040, MSP_CARRIER(10.7), MSP_CARRIER(10.7), "10.7 FM-Stereo Radio" },
- { 0x0050, MSP_CARRIER(6.5), MSP_CARRIER(6.5), "6.5 SAT-Mono" },
- { 0x0051, MSP_CARRIER(7.02), MSP_CARRIER(7.20), "7.02/7.20 SAT-Stereo" },
- { 0x0060, MSP_CARRIER(7.2), MSP_CARRIER(7.2), "7.2 SAT ADR" },
- { -1, 0, 0, NULL }, /* EOF */
-};
-
-static inline const char *msp34xx_standard_mode_name(int mode)
-{
- int i;
- for (i = 0; modelist[i].name != NULL; i++)
- if (modelist[i].retval == mode)
- return modelist[i].name;
- return "unknown";
-}
-
-static int msp34xx_modus(struct i2c_client *client, int norm)
-{
- switch (norm) {
- case VIDEO_MODE_PAL:
- msp3400_dbg("video mode selected to PAL\n");
-
-#if 1
- /* experimental: not sure this works with all chip versions */
- return 0x7003;
-#else
- /* previous value, try this if it breaks ... */
- return 0x1003;
-#endif
- case VIDEO_MODE_NTSC: /* BTSC */
- msp3400_dbg("video mode selected to NTSC\n");
- return 0x2003;
- case VIDEO_MODE_SECAM:
- msp3400_dbg("video mode selected to SECAM\n");
- return 0x0003;
- case VIDEO_MODE_RADIO:
- msp3400_dbg("video mode selected to Radio\n");
- return 0x0003;
- case VIDEO_MODE_AUTO:
- msp3400_dbg("video mode selected to Auto\n");
- return 0x2003;
- default:
- return 0x0003;
- }
-}
-
-static int msp34xx_standard(int norm)
-{
- switch (norm) {
- case VIDEO_MODE_PAL:
- return 1;
- case VIDEO_MODE_NTSC: /* BTSC */
- return 0x0020;
- case VIDEO_MODE_SECAM:
- return 1;
- case VIDEO_MODE_RADIO:
- return 0x0040;
- default:
- return 1;
- }
-}
-
-static int msp3410d_thread(void *data)
-{
- struct i2c_client *client = data;
- struct msp3400c *msp = i2c_get_clientdata(client);
- int mode,val,i,std;
-
- msp3400_info("msp3410 daemon started\n");
-
- for (;;) {
- msp3400_dbg_mediumvol("msp3410 thread: sleep\n");
- msp34xx_sleep(msp,-1);
- msp3400_dbg_mediumvol("msp3410 thread: wakeup\n");
-
- restart:
- msp3400_dbg("thread: restart scan\n");
- msp->restart = 0;
- if (kthread_should_stop())
- break;
-
- if (msp->mode == MSP_MODE_EXTERN) {
- /* no carrier scan needed, just unmute */
- msp3400_dbg("thread: no carrier scan\n");
- msp3400c_setvolume(client, msp->muted, msp->left, msp->right);
- continue;
- }
-
- /* put into sane state (and mute) */
- msp3400c_reset(client);
-
- /* some time for the tuner to sync */
- if (msp34xx_sleep(msp,200))
- goto restart;
-
- /* start autodetect */
- mode = msp34xx_modus(client, msp->norm);
- std = msp34xx_standard(msp->norm);
- msp3400c_write(client, I2C_MSP3400C_DEM, 0x30, mode);
- msp3400c_write(client, I2C_MSP3400C_DEM, 0x20, std);
- msp->watch_stereo = 0;
-
- if (debug)
- msp3400_dbg("setting mode: %s (0x%04x)\n",
- msp34xx_standard_mode_name(std) ,std);
-
- if (std != 1) {
- /* programmed some specific mode */
- val = std;
- } else {
- /* triggered autodetect */
- for (;;) {
- if (msp34xx_sleep(msp,100))
- goto restart;
-
- /* check results */
- val = msp3400c_read(client, I2C_MSP3400C_DEM, 0x7e);
- if (val < 0x07ff)
- break;
- msp3400_dbg("detection still in progress\n");
- }
- }
- for (i = 0; modelist[i].name != NULL; i++)
- if (modelist[i].retval == val)
- break;
- msp3400_dbg("current mode: %s (0x%04x)\n",
- modelist[i].name ? modelist[i].name : "unknown",
- val);
- msp->main = modelist[i].main;
- msp->second = modelist[i].second;
-
- if (amsound && (msp->norm == VIDEO_MODE_SECAM) && (val != 0x0009)) {
- /* autodetection has failed, let backup */
- msp3400_dbg("autodetection failed,"
- " switching to backup mode: %s (0x%04x)\n",
- modelist[8].name ? modelist[8].name : "unknown",val);
- val = 0x0009;
- msp3400c_write(client, I2C_MSP3400C_DEM, 0x20, val);
- }
-
- /* set various prescales */
- msp3400c_write(client, I2C_MSP3400C_DFP, 0x0d, 0x1900); /* scart */
- msp3400c_write(client, I2C_MSP3400C_DFP, 0x0e, 0x2403); /* FM */
- msp3400c_write(client, I2C_MSP3400C_DFP, 0x10, 0x5a00); /* nicam */
-
- /* set stereo */
- switch (val) {
- case 0x0008: /* B/G NICAM */
- case 0x000a: /* I NICAM */
- if (val == 0x0008)
- msp->mode = MSP_MODE_FM_NICAM1;
- else
- msp->mode = MSP_MODE_FM_NICAM2;
- /* just turn on stereo */
- msp->rxsubchans = V4L2_TUNER_SUB_STEREO;
- msp->nicam_on = 1;
- msp->watch_stereo = 1;
- msp3400c_setstereo(client,V4L2_TUNER_MODE_STEREO);
- break;
- case 0x0009:
- msp->mode = MSP_MODE_AM_NICAM;
- msp->rxsubchans = V4L2_TUNER_SUB_MONO;
- msp->nicam_on = 1;
- msp3400c_setstereo(client,V4L2_TUNER_MODE_MONO);
- msp->watch_stereo = 1;
- break;
- case 0x0020: /* BTSC */
- /* just turn on stereo */
- msp->mode = MSP_MODE_BTSC;
- msp->rxsubchans = V4L2_TUNER_SUB_STEREO;
- msp->nicam_on = 0;
- msp->watch_stereo = 1;
- msp3400c_setstereo(client,V4L2_TUNER_MODE_STEREO);
- break;
- case 0x0040: /* FM radio */
- msp->mode = MSP_MODE_FM_RADIO;
- msp->rxsubchans = V4L2_TUNER_SUB_STEREO;
- msp->audmode = V4L2_TUNER_MODE_STEREO;
- msp->nicam_on = 0;
- msp->watch_stereo = 0;
- /* not needed in theory if HAVE_RADIO(), but
- short programming enables carrier mute */
- msp3400c_setmode(client,MSP_MODE_FM_RADIO);
- msp3400c_setcarrier(client, MSP_CARRIER(10.7),
- MSP_CARRIER(10.7));
- /* scart routing */
- msp3400c_set_scart(client,SCART_IN2,0);
- /* msp34xx does radio decoding */
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x08, 0x0020);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x09, 0x0020);
- msp3400c_write(client,I2C_MSP3400C_DFP, 0x0b, 0x0020);
- break;
- case 0x0003:
- case 0x0004:
- case 0x0005:
- msp->mode = MSP_MODE_FM_TERRA;
- msp->rxsubchans = V4L2_TUNER_SUB_MONO;
- msp->audmode = V4L2_TUNER_MODE_MONO;
- msp->nicam_on = 0;
- msp->watch_stereo = 1;
- break;
- }
-
- /* unmute, restore misc registers */
- msp3400c_setbass(client, msp->bass);
- msp3400c_settreble(client, msp->treble);
- msp3400c_setvolume(client, msp->muted, msp->left, msp->right);
- msp3400c_write(client, I2C_MSP3400C_DFP, 0x13, msp->acb);
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x40, msp->i2s_mode);
- msp3400c_restore_dfp(client);
-
- /* monitor tv audio mode */
- while (msp->watch_stereo) {
- if (msp34xx_sleep(msp,5000))
- goto restart;
- watch_stereo(client);
- }
- }
- msp3400_dbg("thread: exit\n");
- return 0;
-}
-
-/* ----------------------------------------------------------------------- */
-/* msp34xxG + (simpler no-thread) */
-/* this one uses both automatic standard detection and automatic sound */
-/* select which are available in the newer G versions */
-/* struct msp: only norm, acb and source are really used in this mode */
-
-static void msp34xxg_set_source(struct i2c_client *client, int source);
-
-/* (re-)initialize the msp34xxg, according to the current norm in msp->norm
- * return 0 if it worked, -1 if it failed
- */
-static int msp34xxg_reset(struct i2c_client *client)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
- int modus,std;
-
- if (msp3400c_reset(client))
- return -1;
-
- /* make sure that input/output is muted (paranoid mode) */
- if (msp3400c_write(client,
- I2C_MSP3400C_DFP,
- 0x13, /* ACB */
- 0x0f20 /* mute DSP input, mute SCART 1 */))
- return -1;
-
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x40, msp->i2s_mode);
-
- /* step-by-step initialisation, as described in the manual */
- modus = msp34xx_modus(client, msp->norm);
- std = msp34xx_standard(msp->norm);
- modus &= ~0x03; /* STATUS_CHANGE=0 */
- modus |= 0x01; /* AUTOMATIC_SOUND_DETECTION=1 */
- if (msp3400c_write(client,
- I2C_MSP3400C_DEM,
- 0x30/*MODUS*/,
- modus))
- return -1;
- if (msp3400c_write(client,
- I2C_MSP3400C_DEM,
- 0x20/*standard*/,
- std))
- return -1;
-
- /* write the dfps that may have an influence on
- standard/audio autodetection right now */
- msp34xxg_set_source(client, msp->source);
-
- if (msp3400c_write_dfp_with_default(client, 0x0e, /* AM/FM Prescale */
- 0x3000
- /* default: [15:8] 75khz deviation */
- ))
- return -1;
-
- if (msp3400c_write_dfp_with_default(client, 0x10, /* NICAM Prescale */
- 0x5a00
- /* default: 9db gain (as recommended) */
- ))
- return -1;
-
- return 0;
-}
-
-static int msp34xxg_thread(void *data)
-{
- struct i2c_client *client = data;
- struct msp3400c *msp = i2c_get_clientdata(client);
- int val, std, i;
-
- msp3400_info("msp34xxg daemon started\n");
-
- msp->source = 1; /* default */
- for (;;) {
- msp3400_dbg_mediumvol("msp34xxg thread: sleep\n");
- msp34xx_sleep(msp,-1);
- msp3400_dbg_mediumvol("msp34xxg thread: wakeup\n");
-
- restart:
- msp3400_dbg("thread: restart scan\n");
- msp->restart = 0;
- if (kthread_should_stop())
- break;
-
- /* setup the chip*/
- msp34xxg_reset(client);
- std = standard;
- if (std != 0x01)
- goto unmute;
-
- /* watch autodetect */
- msp3400_dbg("triggered autodetect, waiting for result\n");
- for (i = 0; i < 10; i++) {
- if (msp34xx_sleep(msp,100))
- goto restart;
-
- /* check results */
- val = msp3400c_read(client, I2C_MSP3400C_DEM, 0x7e);
- if (val < 0x07ff) {
- std = val;
- break;
- }
- msp3400_dbg("detection still in progress\n");
- }
- if (0x01 == std) {
- msp3400_dbg("detection still in progress after 10 tries. giving up.\n");
- continue;
- }
-
- unmute:
- msp3400_dbg("current mode: %s (0x%04x)\n",
- msp34xx_standard_mode_name(std), std);
-
- /* unmute: dispatch sound to scart output, set scart volume */
- msp3400_dbg("unmute\n");
-
- msp3400c_setbass(client, msp->bass);
- msp3400c_settreble(client, msp->treble);
- msp3400c_setvolume(client, msp->muted, msp->left, msp->right);
-
- /* restore ACB */
- if (msp3400c_write(client,
- I2C_MSP3400C_DFP,
- 0x13, /* ACB */
- msp->acb))
- return -1;
-
- msp3400c_write(client,I2C_MSP3400C_DEM, 0x40, msp->i2s_mode);
- }
- msp3400_dbg("thread: exit\n");
- return 0;
-}
-
-/* set the same 'source' for the loudspeaker, scart and quasi-peak detector
- * the value for source is the same as bit 15:8 of DFP registers 0x08,
- * 0x0a and 0x0c: 0=mono, 1=stereo or A|B, 2=SCART, 3=stereo or A, 4=stereo or B
- *
- * this function replaces msp3400c_setstereo
- */
-static void msp34xxg_set_source(struct i2c_client *client, int source)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
-
- /* fix matrix mode to stereo and let the msp choose what
- * to output according to 'source', as recommended
- * for MONO (source==0) downmixing set bit[7:0] to 0x30
- */
- int value = (source&0x07)<<8|(source==0 ? 0x30:0x20);
- msp3400_dbg("set source to %d (0x%x)\n", source, value);
- msp3400c_write(client,
- I2C_MSP3400C_DFP,
- 0x08, /* Loudspeaker Output */
- value);
- msp3400c_write(client,
- I2C_MSP3400C_DFP,
- 0x0a, /* SCART1 DA Output */
- value);
- msp3400c_write(client,
- I2C_MSP3400C_DFP,
- 0x0c, /* Quasi-peak detector */
- value);
- /*
- * set identification threshold. Personally, I
- * I set it to a higher value that the default
- * of 0x190 to ignore noisy stereo signals.
- * this needs tuning. (recommended range 0x00a0-0x03c0)
- * 0x7f0 = forced mono mode
- */
- msp3400c_write(client,
- I2C_MSP3400C_DEM,
- 0x22, /* a2 threshold for stereo/bilingual */
- stereo_threshold);
- msp->source=source;
-}
-
-static void msp34xxg_detect_stereo(struct i2c_client *client)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
-
- int status = msp3400c_read(client,
- I2C_MSP3400C_DEM,
- 0x0200 /* STATUS */);
- int is_bilingual = status&0x100;
- int is_stereo = status&0x40;
-
- msp->rxsubchans = 0;
- if (is_stereo)
- msp->rxsubchans |= V4L2_TUNER_SUB_STEREO;
- else
- msp->rxsubchans |= V4L2_TUNER_SUB_MONO;
- if (is_bilingual) {
- msp->rxsubchans |= V4L2_TUNER_SUB_LANG1|V4L2_TUNER_SUB_LANG2;
- /* I'm supposed to check whether it's SAP or not
- * and set only LANG2/SAP in this case. Yet, the MSP
- * does a lot of work to hide this and handle everything
- * the same way. I don't want to work around it so unless
- * this is a problem, I'll handle SAP just like lang1/lang2.
- */
- }
- msp3400_dbg("status=0x%x, stereo=%d, bilingual=%d -> rxsubchans=%d\n",
- status, is_stereo, is_bilingual, msp->rxsubchans);
-}
-
-static void msp34xxg_set_audmode(struct i2c_client *client, int audmode)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
- int source;
-
- switch (audmode) {
- case V4L2_TUNER_MODE_MONO:
- source=0; /* mono only */
- break;
- case V4L2_TUNER_MODE_STEREO:
- source=1; /* stereo or A|B, see comment in msp34xxg_get_v4l2_stereo() */
- /* problem: that could also mean 2 (scart input) */
- break;
- case V4L2_TUNER_MODE_LANG1:
- source=3; /* stereo or A */
- break;
- case V4L2_TUNER_MODE_LANG2:
- source=4; /* stereo or B */
- break;
- default:
- audmode = 0;
- source = 1;
- break;
- }
- msp->audmode = audmode;
- msp34xxg_set_source(client, source);
-}
-
-
-/* ----------------------------------------------------------------------- */
-
-static int msp_attach(struct i2c_adapter *adap, int addr, int kind);
-static int msp_detach(struct i2c_client *client);
-static int msp_probe(struct i2c_adapter *adap);
-static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg);
-
-static int msp_suspend(struct device * dev, pm_message_t state);
-static int msp_resume(struct device * dev);
-
-static void msp_wake_thread(struct i2c_client *client);
-
-static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "msp3400",
- .id = I2C_DRIVERID_MSP3400,
- .flags = I2C_DF_NOTIFY,
- .attach_adapter = msp_probe,
- .detach_client = msp_detach,
- .command = msp_command,
- .driver = {
- .suspend = msp_suspend,
- .resume = msp_resume,
- },
-};
-
-static struct i2c_client client_template =
-{
- .name = "(unset)",
- .flags = I2C_CLIENT_ALLOW_USE,
- .driver = &driver,
-};
-
-static int msp_attach(struct i2c_adapter *adap, int addr, int kind)
-{
- struct msp3400c *msp;
- struct i2c_client *client = &client_template;
- int (*thread_func)(void *data) = NULL;
- int i;
-
- client_template.adapter = adap;
- client_template.addr = addr;
-
- if (-1 == msp3400c_reset(&client_template)) {
- msp3400_dbg("no chip found\n");
- return -1;
- }
-
- if (NULL == (client = kmalloc(sizeof(struct i2c_client),GFP_KERNEL)))
- return -ENOMEM;
- memcpy(client,&client_template,sizeof(struct i2c_client));
- if (NULL == (msp = kmalloc(sizeof(struct msp3400c),GFP_KERNEL))) {
- kfree(client);
- return -ENOMEM;
- }
-
- memset(msp,0,sizeof(struct msp3400c));
- msp->norm = VIDEO_MODE_NTSC;
- msp->left = 58880; /* 0db gain */
- msp->right = 58880; /* 0db gain */
- msp->bass = 32768;
- msp->treble = 32768;
- msp->input = -1;
- msp->muted = 0;
- msp->i2s_mode = 0;
- for (i = 0; i < DFP_COUNT; i++)
- msp->dfp_regs[i] = -1;
-
- i2c_set_clientdata(client, msp);
- init_waitqueue_head(&msp->wq);
-
- if (-1 == msp3400c_reset(client)) {
- kfree(msp);
- kfree(client);
- msp3400_dbg("no chip found\n");
- return -1;
- }
-
- msp->rev1 = msp3400c_read(client, I2C_MSP3400C_DFP, 0x1e);
- if (-1 != msp->rev1)
- msp->rev2 = msp3400c_read(client, I2C_MSP3400C_DFP, 0x1f);
- if ((-1 == msp->rev1) || (0 == msp->rev1 && 0 == msp->rev2)) {
- kfree(msp);
- kfree(client);
- msp3400_dbg("error while reading chip version\n");
- return -1;
- }
- msp3400_dbg("rev1=0x%04x, rev2=0x%04x\n", msp->rev1, msp->rev2);
-
- msp3400c_setvolume(client, msp->muted, msp->left, msp->right);
-
- snprintf(client->name, sizeof(client->name), "MSP%c4%02d%c-%c%d",
- ((msp->rev1>>4)&0x0f) + '3',
- (msp->rev2>>8)&0xff, (msp->rev1&0x0f)+'@',
- ((msp->rev1>>8)&0xff)+'@', msp->rev2&0x1f);
-
- msp->opmode = opmode;
- if (OPMODE_AUTO == msp->opmode) {
- if (HAVE_SIMPLER(msp))
- msp->opmode = OPMODE_SIMPLER;
- else if (HAVE_SIMPLE(msp))
- msp->opmode = OPMODE_SIMPLE;
- else
- msp->opmode = OPMODE_MANUAL;
- }
-
- /* hello world :-) */
- msp3400_info("chip=%s", client->name);
- if (HAVE_NICAM(msp))
- printk(" +nicam");
- if (HAVE_SIMPLE(msp))
- printk(" +simple");
- if (HAVE_SIMPLER(msp))
- printk(" +simpler");
- if (HAVE_RADIO(msp))
- printk(" +radio");
-
- /* version-specific initialization */
- switch (msp->opmode) {
- case OPMODE_MANUAL:
- printk(" mode=manual");
- thread_func = msp3400c_thread;
- break;
- case OPMODE_SIMPLE:
- printk(" mode=simple");
- thread_func = msp3410d_thread;
- break;
- case OPMODE_SIMPLER:
- printk(" mode=simpler");
- thread_func = msp34xxg_thread;
- break;
- }
- printk("\n");
-
- /* startup control thread if needed */
- if (thread_func) {
- msp->kthread = kthread_run(thread_func, client, "msp34xx");
-
- if (NULL == msp->kthread)
- msp3400_warn("kernel_thread() failed\n");
- msp_wake_thread(client);
- }
-
- /* done */
- i2c_attach_client(client);
-
- /* update our own array */
- for (i = 0; i < MSP3400_MAX; i++) {
- if (NULL == msps[i]) {
- msps[i] = client;
- break;
- }
- }
-
- return 0;
-}
-
-static int msp_detach(struct i2c_client *client)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
- int i;
-
- /* shutdown control thread */
- if (msp->kthread) {
- msp->restart = 1;
- kthread_stop(msp->kthread);
- }
- msp3400c_reset(client);
-
- /* update our own array */
- for (i = 0; i < MSP3400_MAX; i++) {
- if (client == msps[i]) {
- msps[i] = NULL;
- break;
- }
- }
-
- i2c_detach_client(client);
-
- kfree(msp);
- kfree(client);
- return 0;
-}
-
-static int msp_probe(struct i2c_adapter *adap)
-{
- if (adap->class & I2C_CLASS_TV_ANALOG)
- return i2c_probe(adap, &addr_data, msp_attach);
- return 0;
-}
-
-static void msp_wake_thread(struct i2c_client *client)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
-
- if (NULL == msp->kthread)
- return;
- msp3400c_setvolume(client,msp->muted,0,0);
- msp->watch_stereo = 0;
- msp->restart = 1;
- wake_up_interruptible(&msp->wq);
-}
-
-/* ----------------------------------------------------------------------- */
-
-static int mode_v4l2_to_v4l1(int rxsubchans)
-{
- int mode = 0;
-
- if (rxsubchans & V4L2_TUNER_SUB_STEREO)
- mode |= VIDEO_SOUND_STEREO;
- if (rxsubchans & V4L2_TUNER_SUB_LANG2)
- mode |= VIDEO_SOUND_LANG2;
- if (rxsubchans & V4L2_TUNER_SUB_LANG1)
- mode |= VIDEO_SOUND_LANG1;
- if (0 == mode)
- mode |= VIDEO_SOUND_MONO;
- return mode;
-}
-
-static int mode_v4l1_to_v4l2(int mode)
-{
- if (mode & VIDEO_SOUND_STEREO)
- return V4L2_TUNER_MODE_STEREO;
- if (mode & VIDEO_SOUND_LANG2)
- return V4L2_TUNER_MODE_LANG2;
- if (mode & VIDEO_SOUND_LANG1)
- return V4L2_TUNER_MODE_LANG1;
- return V4L2_TUNER_MODE_MONO;
-}
-
-static void msp_any_detect_stereo(struct i2c_client *client)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
-
- switch (msp->opmode) {
- case OPMODE_MANUAL:
- case OPMODE_SIMPLE:
- autodetect_stereo(client);
- break;
- case OPMODE_SIMPLER:
- msp34xxg_detect_stereo(client);
- break;
- }
-}
-
-static void msp_any_set_audmode(struct i2c_client *client, int audmode)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
-
- switch (msp->opmode) {
- case OPMODE_MANUAL:
- case OPMODE_SIMPLE:
- msp->watch_stereo = 0;
- msp3400c_setstereo(client, audmode);
- break;
- case OPMODE_SIMPLER:
- msp34xxg_set_audmode(client, audmode);
- break;
- }
-}
-
-
-static int msp_command(struct i2c_client *client, unsigned int cmd, void *arg)
-{
- struct msp3400c *msp = i2c_get_clientdata(client);
- __u16 *sarg = arg;
- int scart = 0;
-
- switch (cmd) {
-
- case AUDC_SET_INPUT:
- msp3400_dbg("AUDC_SET_INPUT(%d)\n",*sarg);
-
- if (*sarg == msp->input)
- break;
- msp->input = *sarg;
- switch (*sarg) {
- case AUDIO_RADIO:
- /* Hauppauge uses IN2 for the radio */
- msp->mode = MSP_MODE_FM_RADIO;
- scart = SCART_IN2;
- break;
- case AUDIO_EXTERN_1:
- /* IN1 is often used for external input ... */
- msp->mode = MSP_MODE_EXTERN;
- scart = SCART_IN1;
- break;
- case AUDIO_EXTERN_2:
- /* ... sometimes it is IN2 through ;) */
- msp->mode = MSP_MODE_EXTERN;
- scart = SCART_IN2;
- break;
- case AUDIO_TUNER:
- msp->mode = -1;
- break;
- default:
- if (*sarg & AUDIO_MUTE)
- msp3400c_set_scart(client,SCART_MUTE,0);
- break;
- }
- if (scart) {
- msp->rxsubchans = V4L2_TUNER_SUB_STEREO;
- msp->audmode = V4L2_TUNER_MODE_STEREO;
- msp3400c_set_scart(client,scart,0);
- msp3400c_write(client,I2C_MSP3400C_DFP,0x000d,0x1900);
- if (msp->opmode != OPMODE_SIMPLER)
- msp3400c_setstereo(client, msp->audmode);
- }
- msp_wake_thread(client);
- break;
-
- case AUDC_SET_RADIO:
- msp3400_dbg("AUDC_SET_RADIO\n");
- msp->norm = VIDEO_MODE_RADIO;
- msp3400_dbg("switching to radio mode\n");
- msp->watch_stereo = 0;
- switch (msp->opmode) {
- case OPMODE_MANUAL:
- /* set msp3400 to FM radio mode */
- msp3400c_setmode(client,MSP_MODE_FM_RADIO);
- msp3400c_setcarrier(client, MSP_CARRIER(10.7),
- MSP_CARRIER(10.7));
- msp3400c_setvolume(client, msp->muted, msp->left, msp->right);
- break;
- case OPMODE_SIMPLE:
- case OPMODE_SIMPLER:
- /* the thread will do for us */
- msp_wake_thread(client);
- break;
- }
- break;
- /* work-in-progress: hook to control the DFP registers */
- case MSP_SET_DFPREG:
- {
- struct msp_dfpreg *r = arg;
- int i;
-
- if (r->reg < 0 || r->reg >= DFP_COUNT)
- return -EINVAL;
- for (i = 0; i < sizeof(bl_dfp) / sizeof(int); i++)
- if (r->reg == bl_dfp[i])
- return -EINVAL;
- msp->dfp_regs[r->reg] = r->value;
- msp3400c_write(client, I2C_MSP3400C_DFP, r->reg, r->value);
- return 0;
- }
- case MSP_GET_DFPREG:
- {
- struct msp_dfpreg *r = arg;
-
- if (r->reg < 0 || r->reg >= DFP_COUNT)
- return -EINVAL;
- r->value = msp3400c_read(client, I2C_MSP3400C_DFP, r->reg);
- return 0;
- }
-
- /* --- v4l ioctls --- */
- /* take care: bttv does userspace copying, we'll get a
- kernel pointer here... */
- case VIDIOCGAUDIO:
- {
- struct video_audio *va = arg;
-
- msp3400_dbg("VIDIOCGAUDIO\n");
- va->flags |= VIDEO_AUDIO_VOLUME |
- VIDEO_AUDIO_BASS |
- VIDEO_AUDIO_TREBLE |
- VIDEO_AUDIO_MUTABLE;
- if (msp->muted)
- va->flags |= VIDEO_AUDIO_MUTE;
-
- if (msp->muted)
- va->flags |= VIDEO_AUDIO_MUTE;
- va->volume = MAX(msp->left, msp->right);
- va->balance = (32768 * MIN(msp->left, msp->right)) /
- (va->volume ? va->volume : 1);
- va->balance = (msp->left < msp->right) ?
- (65535 - va->balance) : va->balance;
- if (0 == va->volume)
- va->balance = 32768;
- va->bass = msp->bass;
- va->treble = msp->treble;
-
- msp_any_detect_stereo(client);
- va->mode = mode_v4l2_to_v4l1(msp->rxsubchans);
- break;
- }
- case VIDIOCSAUDIO:
- {
- struct video_audio *va = arg;
-
- msp3400_dbg("VIDIOCSAUDIO\n");
- msp->muted = (va->flags & VIDEO_AUDIO_MUTE);
- msp->left = (MIN(65536 - va->balance, 32768) *
- va->volume) / 32768;
- msp->right = (MIN(va->balance, 32768) * va->volume) / 32768;
- msp->bass = va->bass;
- msp->treble = va->treble;
- msp3400_dbg("VIDIOCSAUDIO setting va->volume to %d\n",
- va->volume);
- msp3400_dbg("VIDIOCSAUDIO setting va->balance to %d\n",
- va->balance);
- msp3400_dbg("VIDIOCSAUDIO setting va->flags to %d\n",
- va->flags);
- msp3400_dbg("VIDIOCSAUDIO setting msp->left to %d\n",
- msp->left);
- msp3400_dbg("VIDIOCSAUDIO setting msp->right to %d\n",
- msp->right);
- msp3400_dbg("VIDIOCSAUDIO setting msp->bass to %d\n",
- msp->bass);
- msp3400_dbg("VIDIOCSAUDIO setting msp->treble to %d\n",
- msp->treble);
- msp3400_dbg("VIDIOCSAUDIO setting msp->mode to %d\n",
- msp->mode);
- msp3400c_setvolume(client, msp->muted, msp->left, msp->right);
- msp3400c_setbass(client, msp->bass);
- msp3400c_settreble(client, msp->treble);
-
- if (va->mode != 0 && msp->norm != VIDEO_MODE_RADIO)
- msp_any_set_audmode(client,mode_v4l1_to_v4l2(va->mode));
- break;
- }
-
- case VIDIOCSCHAN:
- {
- struct video_channel *vc = arg;
-
- msp3400_dbg("VIDIOCSCHAN (norm=%d)\n",vc->norm);
- msp->norm = vc->norm;
- msp_wake_thread(client);
- break;
- }
-
- case VIDIOCSFREQ:
- case VIDIOC_S_FREQUENCY:
- {
- /* new channel -- kick audio carrier scan */
- msp3400_dbg("VIDIOCSFREQ\n");
- msp_wake_thread(client);
- break;
- }
-
- /* msp34xx specific */
- case MSP_SET_MATRIX:
- {
- struct msp_matrix *mspm = arg;
-
- msp3400_dbg("MSP_SET_MATRIX\n");
- msp3400c_set_scart(client, mspm->input, mspm->output);
- break;
- }
-
- /* --- v4l2 ioctls --- */
- case VIDIOC_S_STD:
- {
- v4l2_std_id *id = arg;
-
- /*FIXME: use V4L2 mode flags on msp3400 instead of V4L1*/
- if (*id & V4L2_STD_PAL) {
- msp->norm=VIDEO_MODE_PAL;
- } else if (*id & V4L2_STD_SECAM) {
- msp->norm=VIDEO_MODE_SECAM;
- } else {
- msp->norm=VIDEO_MODE_NTSC;
- }
-
- msp_wake_thread(client);
- return 0;
- }
-
- case VIDIOC_ENUMINPUT:
- {
- struct v4l2_input *i = arg;
-
- if (i->index != 0)
- return -EINVAL;
-
- i->type = V4L2_INPUT_TYPE_TUNER;
- switch (i->index) {
- case AUDIO_RADIO:
- strcpy(i->name,"Radio");
- break;
- case AUDIO_EXTERN_1:
- strcpy(i->name,"Extern 1");
- break;
- case AUDIO_EXTERN_2:
- strcpy(i->name,"Extern 2");
- break;
- case AUDIO_TUNER:
- strcpy(i->name,"Television");
- break;
- default:
- return -EINVAL;
- }
- return 0;
- }
-
- case VIDIOC_G_AUDIO:
- {
- struct v4l2_audio *a = arg;
-
- memset(a,0,sizeof(*a));
-
- switch (a->index) {
- case AUDIO_RADIO:
- strcpy(a->name,"Radio");
- break;
- case AUDIO_EXTERN_1:
- strcpy(a->name,"Extern 1");
- break;
- case AUDIO_EXTERN_2:
- strcpy(a->name,"Extern 2");
- break;
- case AUDIO_TUNER:
- strcpy(a->name,"Television");
- break;
- default:
- return -EINVAL;
- }
-
- msp_any_detect_stereo(client);
- if (msp->audmode == V4L2_TUNER_MODE_STEREO) {
- a->capability=V4L2_AUDCAP_STEREO;
- }
-
- break;
- }
- case VIDIOC_S_AUDIO:
- {
- struct v4l2_audio *sarg = arg;
-
- switch (sarg->index) {
- case AUDIO_RADIO:
- /* Hauppauge uses IN2 for the radio */
- msp->mode = MSP_MODE_FM_RADIO;
- scart = SCART_IN2;
- break;
- case AUDIO_EXTERN_1:
- /* IN1 is often used for external input ... */
- msp->mode = MSP_MODE_EXTERN;
- scart = SCART_IN1;
- break;
- case AUDIO_EXTERN_2:
- /* ... sometimes it is IN2 through ;) */
- msp->mode = MSP_MODE_EXTERN;
- scart = SCART_IN2;
- break;
- case AUDIO_TUNER:
- msp->mode = -1;
- break;
- }
- if (scart) {
- msp->rxsubchans = V4L2_TUNER_SUB_STEREO;
- msp->audmode = V4L2_TUNER_MODE_STEREO;
- msp3400c_set_scart(client,scart,0);
- msp3400c_write(client,I2C_MSP3400C_DFP,0x000d,0x1900);
- }
- if (sarg->capability==V4L2_AUDCAP_STEREO) {
- msp->audmode = V4L2_TUNER_MODE_STEREO;
- } else {
- msp->audmode &= ~V4L2_TUNER_MODE_STEREO;
- }
- msp_any_set_audmode(client, msp->audmode);
- msp_wake_thread(client);
- break;
- }
- case VIDIOC_G_TUNER:
- {
- struct v4l2_tuner *vt = arg;
-
- msp_any_detect_stereo(client);
- vt->audmode = msp->audmode;
- vt->rxsubchans = msp->rxsubchans;
- vt->capability = V4L2_TUNER_CAP_STEREO |
- V4L2_TUNER_CAP_LANG1|
- V4L2_TUNER_CAP_LANG2;
- break;
- }
- case VIDIOC_S_TUNER:
- {
- struct v4l2_tuner *vt=(struct v4l2_tuner *)arg;
-
- /* only set audmode */
- if (vt->audmode != -1 && vt->audmode != 0)
- msp_any_set_audmode(client, vt->audmode);
- break;
- }
-
- case VIDIOC_G_AUDOUT:
- {
- struct v4l2_audioout *a=(struct v4l2_audioout *)arg;
- int idx=a->index;
-
- memset(a,0,sizeof(*a));
-
- switch (idx) {
- case 0:
- strcpy(a->name,"Scart1 Out");
- break;
- case 1:
- strcpy(a->name,"Scart2 Out");
- break;
- case 2:
- strcpy(a->name,"I2S Out");
- break;
- default:
- return -EINVAL;
- }
- break;
-
- }
- case VIDIOC_S_AUDOUT:
- {
- struct v4l2_audioout *a=(struct v4l2_audioout *)arg;
-
- if (a->index<0||a->index>2)
- return -EINVAL;
-
- if (a->index==2) {
- if (a->mode == V4L2_AUDMODE_32BITS)
- msp->i2s_mode=1;
- else
- msp->i2s_mode=0;
- }
- msp3400_dbg("Setting audio out on msp34xx to input %i, mode %i\n",
- a->index,msp->i2s_mode);
- msp3400c_set_scart(client,msp->in_scart,a->index+1);
-
- break;
- }
-
- default:
- /* nothing */
- break;
- }
- return 0;
-}
-
-static int msp_suspend(struct device * dev, pm_message_t state)
-{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
-
- msp3400_dbg("msp34xx: suspend\n");
- msp3400c_reset(client);
- return 0;
-}
-
-static int msp_resume(struct device * dev)
-{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
-
- msp3400_dbg("msp34xx: resume\n");
- msp_wake_thread(client);
- return 0;
-}
-
-/* ----------------------------------------------------------------------- */
-
-static int __init msp3400_init_module(void)
-{
- return i2c_add_driver(&driver);
-}
-
-static void __exit msp3400_cleanup_module(void)
-{
- i2c_del_driver(&driver);
-}
-
-module_init(msp3400_init_module);
-module_exit(msp3400_cleanup_module);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/msp3400.h b/drivers/media/video/msp3400.h
index 2d9ff40f0b0..70a5ef8ba01 100644
--- a/drivers/media/video/msp3400.h
+++ b/drivers/media/video/msp3400.h
@@ -6,22 +6,28 @@
/* ---------------------------------------------------------------------- */
-struct msp_dfpreg {
- int reg;
- int value;
-};
-
struct msp_matrix {
int input;
int output;
};
-#define MSP_SET_DFPREG _IOW('m',15,struct msp_dfpreg)
-#define MSP_GET_DFPREG _IOW('m',16,struct msp_dfpreg)
-
/* ioctl for MSP_SET_MATRIX will have to be registered */
#define MSP_SET_MATRIX _IOW('m',17,struct msp_matrix)
+/* This macro is allowed for *constants* only, gcc must calculate it
+ at compile time. Remember -- no floats in kernel mode */
+#define MSP_CARRIER(freq) ((int)((float)(freq / 18.432) * (1 << 24)))
+
+#define MSP_MODE_AM_DETECT 0
+#define MSP_MODE_FM_RADIO 2
+#define MSP_MODE_FM_TERRA 3
+#define MSP_MODE_FM_SAT 4
+#define MSP_MODE_FM_NICAM1 5
+#define MSP_MODE_FM_NICAM2 6
+#define MSP_MODE_AM_NICAM 7
+#define MSP_MODE_BTSC 8
+#define MSP_MODE_EXTERN 9
+
#define SCART_MASK 0
#define SCART_IN1 1
#define SCART_IN2 2
@@ -36,4 +42,84 @@ struct msp_matrix {
#define SCART1_OUT 1
#define SCART2_OUT 2
+#define OPMODE_AUTO -1
+#define OPMODE_MANUAL 0
+#define OPMODE_AUTODETECT 1 /* use autodetect (>= msp3410 only) */
+#define OPMODE_AUTOSELECT 2 /* use autodetect & autoselect (>= msp34xxG) */
+
+/* module parameters */
+extern int msp_debug;
+extern int msp_once;
+extern int msp_amsound;
+extern int msp_standard;
+extern int msp_dolby;
+extern int msp_stereo_thresh;
+
+struct msp_state {
+ int rev1, rev2;
+ u8 has_nicam;
+ u8 has_radio;
+ u8 has_headphones;
+ u8 has_ntsc_jp_d_k3;
+ u8 has_scart4;
+ u8 has_scart23_in_scart2_out;
+ u8 has_scart2_out_volume;
+ u8 has_i2s_conf;
+ u8 has_subwoofer;
+ u8 has_sound_processing;
+ u8 has_virtual_dolby_surround;
+ u8 has_dolby_pro_logic;
+
+ int radio;
+ int opmode;
+ int std;
+ int mode;
+ v4l2_std_id v4l2_std;
+ int nicam_on;
+ int acb;
+ int in_scart;
+ int i2s_mode;
+ int main, second; /* sound carrier */
+ int input;
+ int source; /* see msp34xxg_set_source */
+
+ /* v4l2 */
+ int audmode;
+ int rxsubchans;
+
+ int volume, muted;
+ int balance, loudness;
+ int bass, treble;
+
+ /* thread */
+ struct task_struct *kthread;
+ wait_queue_head_t wq;
+ int restart:1;
+ int watch_stereo:1;
+};
+
+/* msp3400-driver.c */
+int msp_write_dem(struct i2c_client *client, int addr, int val);
+int msp_write_dsp(struct i2c_client *client, int addr, int val);
+int msp_read_dem(struct i2c_client *client, int addr);
+int msp_read_dsp(struct i2c_client *client, int addr);
+int msp_reset(struct i2c_client *client);
+void msp_set_scart(struct i2c_client *client, int in, int out);
+void msp_set_mute(struct i2c_client *client);
+void msp_set_audio(struct i2c_client *client);
+int msp_modus(struct i2c_client *client);
+int msp_sleep(struct msp_state *state, int timeout);
+
+/* msp3400-kthreads.c */
+const char *msp_standard_std_name(int std);
+void msp3400c_setcarrier(struct i2c_client *client, int cdo1, int cdo2);
+void msp3400c_setmode(struct i2c_client *client, int type);
+void msp3400c_setstereo(struct i2c_client *client, int mode);
+int autodetect_stereo(struct i2c_client *client);
+int msp3400c_thread(void *data);
+int msp3410d_thread(void *data);
+int msp34xxg_thread(void *data);
+void msp34xxg_detect_stereo(struct i2c_client *client);
+void msp34xxg_set_audmode(struct i2c_client *client, int audmode);
+
#endif /* MSP3400_H */
diff --git a/drivers/media/video/mt20xx.c b/drivers/media/video/mt20xx.c
index 2180018f06d..0bf1caac588 100644
--- a/drivers/media/video/mt20xx.c
+++ b/drivers/media/video/mt20xx.c
@@ -20,6 +20,9 @@ module_param(tv_antenna, int, 0644);
static unsigned int radio_antenna = 0;
module_param(radio_antenna, int, 0644);
+/* from tuner-core.c */
+extern int tuner_debug;
+
/* ---------------------------------------------------------------------- */
#define MT2032 0x04
@@ -494,6 +497,13 @@ int microtune_init(struct i2c_client *c)
t->tv_freq = NULL;
t->radio_freq = NULL;
t->standby = NULL;
+ if (t->std & V4L2_STD_525_60) {
+ tuner_dbg("pinnacle ntsc\n");
+ t->radio_if2 = 41300 * 1000;
+ } else {
+ tuner_dbg("pinnacle pal\n");
+ t->radio_if2 = 33300 * 1000;
+ }
name = "unknown";
i2c_master_send(c,buf,1);
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index d04793fb80f..8416ceff524 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -26,6 +26,7 @@
#include <media/saa7146_vv.h>
#include <media/tuner.h>
#include <linux/video_decoder.h>
+#include <media/v4l2-common.h>
#include "mxb.h"
#include "tea6415c.h"
@@ -176,12 +177,11 @@ static int mxb_probe(struct saa7146_dev* dev)
return -ENODEV;
}
- mxb = (struct mxb*)kmalloc(sizeof(struct mxb), GFP_KERNEL);
+ mxb = kzalloc(sizeof(struct mxb), GFP_KERNEL);
if( NULL == mxb ) {
DEB_D(("not enough kernel memory.\n"));
return -ENOMEM;
}
- memset(mxb, 0x0, sizeof(struct mxb));
mxb->i2c_adapter = (struct i2c_adapter) {
.class = I2C_CLASS_TV_ANALOG,
diff --git a/drivers/media/video/ovcamchip/ov6x20.c b/drivers/media/video/ovcamchip/ov6x20.c
index b3f4d266ced..c04130dab12 100644
--- a/drivers/media/video/ovcamchip/ov6x20.c
+++ b/drivers/media/video/ovcamchip/ov6x20.c
@@ -178,10 +178,9 @@ static int ov6x20_init(struct i2c_client *c)
if (rc < 0)
return rc;
- ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL);
+ ov->spriv = s = kzalloc(sizeof *s, GFP_KERNEL);
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof *s);
s->auto_brt = 1;
s->auto_exp = 1;
diff --git a/drivers/media/video/ovcamchip/ov6x30.c b/drivers/media/video/ovcamchip/ov6x30.c
index 6eab458ab79..73b94f51a85 100644
--- a/drivers/media/video/ovcamchip/ov6x30.c
+++ b/drivers/media/video/ovcamchip/ov6x30.c
@@ -141,10 +141,9 @@ static int ov6x30_init(struct i2c_client *c)
if (rc < 0)
return rc;
- ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL);
+ ov->spriv = s = kzalloc(sizeof *s, GFP_KERNEL);
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof *s);
s->auto_brt = 1;
s->auto_exp = 1;
diff --git a/drivers/media/video/ovcamchip/ov76be.c b/drivers/media/video/ovcamchip/ov76be.c
index 29bbdc05e3b..11f6be924d8 100644
--- a/drivers/media/video/ovcamchip/ov76be.c
+++ b/drivers/media/video/ovcamchip/ov76be.c
@@ -105,10 +105,9 @@ static int ov76be_init(struct i2c_client *c)
if (rc < 0)
return rc;
- ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL);
+ ov->spriv = s = kzalloc(sizeof *s, GFP_KERNEL);
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof *s);
s->auto_brt = 1;
s->auto_exp = 1;
diff --git a/drivers/media/video/ovcamchip/ov7x10.c b/drivers/media/video/ovcamchip/ov7x10.c
index 6c383d4b14f..5206e791392 100644
--- a/drivers/media/video/ovcamchip/ov7x10.c
+++ b/drivers/media/video/ovcamchip/ov7x10.c
@@ -115,10 +115,9 @@ static int ov7x10_init(struct i2c_client *c)
if (rc < 0)
return rc;
- ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL);
+ ov->spriv = s = kzalloc(sizeof *s, GFP_KERNEL);
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof *s);
s->auto_brt = 1;
s->auto_exp = 1;
diff --git a/drivers/media/video/ovcamchip/ov7x20.c b/drivers/media/video/ovcamchip/ov7x20.c
index 3c8c48f338b..8e26ae338f3 100644
--- a/drivers/media/video/ovcamchip/ov7x20.c
+++ b/drivers/media/video/ovcamchip/ov7x20.c
@@ -232,10 +232,9 @@ static int ov7x20_init(struct i2c_client *c)
if (rc < 0)
return rc;
- ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL);
+ ov->spriv = s = kzalloc(sizeof *s, GFP_KERNEL);
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof *s);
s->auto_brt = 1;
s->auto_exp = DFL_AUTO_EXP;
diff --git a/drivers/media/video/ovcamchip/ovcamchip_core.c b/drivers/media/video/ovcamchip/ovcamchip_core.c
index 2de34ebf067..e76b53d5909 100644
--- a/drivers/media/video/ovcamchip/ovcamchip_core.c
+++ b/drivers/media/video/ovcamchip/ovcamchip_core.c
@@ -316,12 +316,11 @@ static int ovcamchip_attach(struct i2c_adapter *adap)
c->adapter = adap;
strcpy(c->name, "OV????");
- ov = kmalloc(sizeof *ov, GFP_KERNEL);
+ ov = kzalloc(sizeof *ov, GFP_KERNEL);
if (!ov) {
rc = -ENOMEM;
goto no_ov;
}
- memset(ov, 0, sizeof *ov);
i2c_set_clientdata(c, ov);
rc = ovcamchip_detect(c);
@@ -410,11 +409,11 @@ static int ovcamchip_command(struct i2c_client *c, unsigned int cmd, void *arg)
/* ----------------------------------------------------------------------- */
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "ovcamchip",
+ .driver = {
+ .name = "ovcamchip",
+ },
.id = I2C_DRIVERID_OVCAMCHIP,
.class = I2C_CLASS_CAM_DIGITAL,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = ovcamchip_attach,
.detach_client = ovcamchip_detach,
.command = ovcamchip_command,
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 2504207b2e3..9e644863948 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -883,6 +883,7 @@ static struct file_operations pms_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = pms_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.read = pms_read,
.llseek = no_llseek,
};
diff --git a/drivers/media/video/saa5246a.c b/drivers/media/video/saa5246a.c
index b8054da31ff..2ce01020130 100644
--- a/drivers/media/video/saa5246a.c
+++ b/drivers/media/video/saa5246a.c
@@ -83,13 +83,12 @@ static int saa5246a_attach(struct i2c_adapter *adap, int addr, int kind)
client_template.adapter = adap;
client_template.addr = addr;
memcpy(client, &client_template, sizeof(*client));
- t = kmalloc(sizeof(*t), GFP_KERNEL);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
if(t==NULL)
{
kfree(client);
return -ENOMEM;
}
- memset(t, 0, sizeof(*t));
strlcpy(client->name, IF_NAME, I2C_NAME_SIZE);
init_MUTEX(&t->lock);
@@ -151,25 +150,18 @@ static int saa5246a_detach(struct i2c_client *client)
return 0;
}
-static int saa5246a_command(struct i2c_client *device, unsigned int cmd,
- void *arg)
-{
- return -EINVAL;
-}
-
/*
* I2C interfaces
*/
static struct i2c_driver i2c_driver_videotext =
{
- .owner = THIS_MODULE,
- .name = IF_NAME, /* name */
+ .driver = {
+ .name = IF_NAME, /* name */
+ },
.id = I2C_DRIVERID_SAA5249, /* in i2c.h */
- .flags = I2C_DF_NOTIFY,
.attach_adapter = saa5246a_probe,
.detach_client = saa5246a_detach,
- .command = saa5246a_command
};
static struct i2c_client client_template = {
diff --git a/drivers/media/video/saa5249.c b/drivers/media/video/saa5249.c
index 7ffa2e9a9bf..5694eb58c3a 100644
--- a/drivers/media/video/saa5249.c
+++ b/drivers/media/video/saa5249.c
@@ -151,13 +151,12 @@ static int saa5249_attach(struct i2c_adapter *adap, int addr, int kind)
client_template.adapter = adap;
client_template.addr = addr;
memcpy(client, &client_template, sizeof(*client));
- t = kmalloc(sizeof(*t), GFP_KERNEL);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
if(t==NULL)
{
kfree(client);
return -ENOMEM;
}
- memset(t, 0, sizeof(*t));
strlcpy(client->name, IF_NAME, I2C_NAME_SIZE);
init_MUTEX(&t->lock);
@@ -165,7 +164,7 @@ static int saa5249_attach(struct i2c_adapter *adap, int addr, int kind)
* Now create a video4linux device
*/
- vd = (struct video_device *)kmalloc(sizeof(struct video_device), GFP_KERNEL);
+ vd = kmalloc(sizeof(struct video_device), GFP_KERNEL);
if(vd==NULL)
{
kfree(t);
@@ -226,23 +225,16 @@ static int saa5249_detach(struct i2c_client *client)
return 0;
}
-static int saa5249_command(struct i2c_client *device,
- unsigned int cmd, void *arg)
-{
- return -EINVAL;
-}
-
/* new I2C driver support */
static struct i2c_driver i2c_driver_videotext =
{
- .owner = THIS_MODULE,
- .name = IF_NAME, /* name */
+ .driver = {
+ .name = IF_NAME, /* name */
+ },
.id = I2C_DRIVERID_SAA5249, /* in i2c.h */
- .flags = I2C_DF_NOTIFY,
.attach_adapter = saa5249_probe,
.detach_client = saa5249_detach,
- .command = saa5249_command
};
static struct i2c_client client_template = {
@@ -709,6 +701,7 @@ static struct file_operations saa_fops = {
.open = saa5249_open,
.release = saa5249_release,
.ioctl = saa5249_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c
index 923322503e8..e70b17ef36e 100644
--- a/drivers/media/video/saa6588.c
+++ b/drivers/media/video/saa6588.c
@@ -427,18 +427,8 @@ static int saa6588_attach(struct i2c_adapter *adap, int addr, int kind)
static int saa6588_probe(struct i2c_adapter *adap)
{
-#ifdef I2C_CLASS_TV_ANALOG
if (adap->class & I2C_CLASS_TV_ANALOG)
return i2c_probe(adap, &addr_data, saa6588_attach);
-#else
- switch (adap->id) {
- case I2C_HW_B_BT848:
- case I2C_HW_B_RIVA:
- case I2C_HW_SAA7134:
- return i2c_probe(adap, &addr_data, saa6588_attach);
- break;
- }
-#endif
return 0;
}
@@ -495,10 +485,10 @@ static int saa6588_command(struct i2c_client *client, unsigned int cmd,
/* ----------------------------------------------------------------------- */
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "i2c saa6588 driver",
+ .driver = {
+ .name = "saa6588",
+ },
.id = -1, /* FIXME */
- .flags = I2C_DF_NOTIFY,
.attach_adapter = saa6588_probe,
.detach_client = saa6588_detach,
.command = saa6588_command,
@@ -506,7 +496,6 @@ static struct i2c_driver driver = {
static struct i2c_client client_template = {
.name = "saa6588",
- .flags = I2C_CLIENT_ALLOW_USE,
.driver = &driver,
};
diff --git a/drivers/media/video/saa7110.c b/drivers/media/video/saa7110.c
index e116bdbed31..7bb85a7b326 100644
--- a/drivers/media/video/saa7110.c
+++ b/drivers/media/video/saa7110.c
@@ -494,22 +494,19 @@ saa7110_detect_client (struct i2c_adapter *adapter,
I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_saa7110;
- client->flags = I2C_CLIENT_ALLOW_USE;
strlcpy(I2C_NAME(client), "saa7110", sizeof(I2C_NAME(client)));
- decoder = kmalloc(sizeof(struct saa7110), GFP_KERNEL);
+ decoder = kzalloc(sizeof(struct saa7110), GFP_KERNEL);
if (decoder == 0) {
kfree(client);
return -ENOMEM;
}
- memset(decoder, 0, sizeof(struct saa7110));
decoder->norm = VIDEO_MODE_PAL;
decoder->input = 0;
decoder->enable = 1;
@@ -587,11 +584,11 @@ saa7110_detach_client (struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver i2c_driver_saa7110 = {
- .owner = THIS_MODULE,
- .name = "saa7110",
+ .driver = {
+ .name = "saa7110",
+ },
.id = I2C_DRIVERID_SAA7110,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = saa7110_attach_adapter,
.detach_client = saa7110_detach_client,
diff --git a/drivers/media/video/saa7111.c b/drivers/media/video/saa7111.c
index fe8a5e45396..8c06592b37f 100644
--- a/drivers/media/video/saa7111.c
+++ b/drivers/media/video/saa7111.c
@@ -511,22 +511,19 @@ saa7111_detect_client (struct i2c_adapter *adapter,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_saa7111;
- client->flags = I2C_CLIENT_ALLOW_USE;
strlcpy(I2C_NAME(client), "saa7111", sizeof(I2C_NAME(client)));
- decoder = kmalloc(sizeof(struct saa7111), GFP_KERNEL);
+ decoder = kzalloc(sizeof(struct saa7111), GFP_KERNEL);
if (decoder == NULL) {
kfree(client);
return -ENOMEM;
}
- memset(decoder, 0, sizeof(struct saa7111));
decoder->norm = VIDEO_MODE_NTSC;
decoder->input = 0;
decoder->enable = 1;
@@ -590,11 +587,11 @@ saa7111_detach_client (struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver i2c_driver_saa7111 = {
- .owner = THIS_MODULE,
- .name = "saa7111",
+ .driver = {
+ .name = "saa7111",
+ },
.id = I2C_DRIVERID_SAA7111A,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = saa7111_attach_adapter,
.detach_client = saa7111_detach_client,
diff --git a/drivers/media/video/saa7114.c b/drivers/media/video/saa7114.c
index d9f50e2f7b9..fd0a4b4ef01 100644
--- a/drivers/media/video/saa7114.c
+++ b/drivers/media/video/saa7114.c
@@ -852,22 +852,19 @@ saa7114_detect_client (struct i2c_adapter *adapter,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_saa7114;
- client->flags = I2C_CLIENT_ALLOW_USE;
strlcpy(I2C_NAME(client), "saa7114", sizeof(I2C_NAME(client)));
- decoder = kmalloc(sizeof(struct saa7114), GFP_KERNEL);
+ decoder = kzalloc(sizeof(struct saa7114), GFP_KERNEL);
if (decoder == NULL) {
kfree(client);
return -ENOMEM;
}
- memset(decoder, 0, sizeof(struct saa7114));
decoder->norm = VIDEO_MODE_NTSC;
decoder->input = -1;
decoder->enable = 1;
@@ -1204,11 +1201,11 @@ saa7114_detach_client (struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver i2c_driver_saa7114 = {
- .owner = THIS_MODULE,
- .name = "saa7114",
+ .driver = {
+ .name = "saa7114",
+ },
.id = I2C_DRIVERID_SAA7114,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = saa7114_attach_adapter,
.detach_client = saa7114_detach_client,
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index e717e30d818..048d000941c 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -39,30 +39,18 @@
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
+#include <media/audiochip.h>
+#include <asm/div64.h>
MODULE_DESCRIPTION("Philips SAA7114/SAA7115 video decoder driver");
MODULE_AUTHOR("Maxim Yevtyushkin, Kevin Thayer, Chris Kennedy, Hans Verkuil");
MODULE_LICENSE("GPL");
static int debug = 0;
-module_param(debug, int, 0644);
+module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
-#define saa7115_dbg(fmt,arg...) \
- do { \
- if (debug) \
- printk(KERN_INFO "%s debug %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); \
- } while (0)
-
-#define saa7115_err(fmt, arg...) do { \
- printk(KERN_ERR "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-#define saa7115_info(fmt, arg...) do { \
- printk(KERN_INFO "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-
static unsigned short normal_i2c[] = { 0x42 >> 1, 0x40 >> 1, I2C_CLIENT_END };
@@ -72,12 +60,13 @@ struct saa7115_state {
v4l2_std_id std;
int input;
int enable;
+ int radio;
int bright;
int contrast;
int hue;
int sat;
enum v4l2_chip_ident ident;
- enum v4l2_audio_clock_freq audclk_freq;
+ u32 audclk_freq;
};
/* ----------------------------------------------------------------------- */
@@ -468,80 +457,6 @@ static const unsigned char saa7115_init_misc[] = {
0x00, 0x00
};
-/* ============== SAA7715 AUDIO settings ============= */
-
-/* 48.0 kHz */
-static const unsigned char saa7115_cfg_48_audio[] = {
- 0x34, 0xce,
- 0x35, 0xfb,
- 0x36, 0x30,
- 0x00, 0x00
-};
-
-/* 44.1 kHz */
-static const unsigned char saa7115_cfg_441_audio[] = {
- 0x34, 0xf2,
- 0x35, 0x00,
- 0x36, 0x2d,
- 0x00, 0x00
-};
-
-/* 32.0 kHz */
-static const unsigned char saa7115_cfg_32_audio[] = {
- 0x34, 0xdf,
- 0x35, 0xa7,
- 0x36, 0x20,
- 0x00, 0x00
-};
-
-/* 48.0 kHz 60hz */
-static const unsigned char saa7115_cfg_60hz_48_audio[] = {
- 0x30, 0xcd,
- 0x31, 0x20,
- 0x32, 0x03,
- 0x00, 0x00
-};
-
-/* 48.0 kHz 50hz */
-static const unsigned char saa7115_cfg_50hz_48_audio[] = {
- 0x30, 0x00,
- 0x31, 0xc0,
- 0x32, 0x03,
- 0x00, 0x00
-};
-
-/* 44.1 kHz 60hz */
-static const unsigned char saa7115_cfg_60hz_441_audio[] = {
- 0x30, 0xbc,
- 0x31, 0xdf,
- 0x32, 0x02,
- 0x00, 0x00
-};
-
-/* 44.1 kHz 50hz */
-static const unsigned char saa7115_cfg_50hz_441_audio[] = {
- 0x30, 0x00,
- 0x31, 0x72,
- 0x32, 0x03,
- 0x00, 0x00
-};
-
-/* 32.0 kHz 60hz */
-static const unsigned char saa7115_cfg_60hz_32_audio[] = {
- 0x30, 0xde,
- 0x31, 0x15,
- 0x32, 0x02,
- 0x00, 0x00
-};
-
-/* 32.0 kHz 50hz */
-static const unsigned char saa7115_cfg_50hz_32_audio[] = {
- 0x30, 0x00,
- 0x31, 0x80,
- 0x32, 0x02,
- 0x00, 0x00
-};
-
static int saa7115_odd_parity(u8 c)
{
c ^= (c >> 4);
@@ -626,40 +541,38 @@ static int saa7115_decode_wss(u8 * p)
}
-static int saa7115_set_audio_clock_freq(struct i2c_client *client, enum v4l2_audio_clock_freq freq)
+static int saa7115_set_audio_clock_freq(struct i2c_client *client, u32 freq)
{
struct saa7115_state *state = i2c_get_clientdata(client);
+ u32 acpf;
+ u32 acni;
+ u32 hz;
+ u64 f;
- saa7115_dbg("set audio clock freq: %d\n", freq);
- switch (freq) {
- case V4L2_AUDCLK_32_KHZ:
- saa7115_writeregs(client, saa7115_cfg_32_audio);
- if (state->std & V4L2_STD_525_60) {
- saa7115_writeregs(client, saa7115_cfg_60hz_32_audio);
- } else {
- saa7115_writeregs(client, saa7115_cfg_50hz_32_audio);
- }
- break;
- case V4L2_AUDCLK_441_KHZ:
- saa7115_writeregs(client, saa7115_cfg_441_audio);
- if (state->std & V4L2_STD_525_60) {
- saa7115_writeregs(client, saa7115_cfg_60hz_441_audio);
- } else {
- saa7115_writeregs(client, saa7115_cfg_50hz_441_audio);
- }
- break;
- case V4L2_AUDCLK_48_KHZ:
- saa7115_writeregs(client, saa7115_cfg_48_audio);
- if (state->std & V4L2_STD_525_60) {
- saa7115_writeregs(client, saa7115_cfg_60hz_48_audio);
- } else {
- saa7115_writeregs(client, saa7115_cfg_50hz_48_audio);
- }
- break;
- default:
- saa7115_dbg("invalid audio setting %d\n", freq);
- return -EINVAL;
- }
+ v4l_dbg(1, debug, client, "set audio clock freq: %d\n", freq);
+
+ /* sanity check */
+ if (freq < 32000 || freq > 48000)
+ return -EINVAL;
+
+ /* hz is the refresh rate times 100 */
+ hz = (state->std & V4L2_STD_525_60) ? 5994 : 5000;
+ /* acpf = (256 * freq) / field_frequency == (256 * 100 * freq) / hz */
+ acpf = (25600 * freq) / hz;
+ /* acni = (256 * freq * 2^23) / crystal_frequency =
+ (freq * 2^(8+23)) / crystal_frequency =
+ (freq << 31) / 32.11 MHz */
+ f = freq;
+ f = f << 31;
+ do_div(f, 32110000);
+ acni = f;
+
+ saa7115_write(client, 0x30, acpf & 0xff);
+ saa7115_write(client, 0x31, (acpf >> 8) & 0xff);
+ saa7115_write(client, 0x32, (acpf >> 16) & 0x03);
+ saa7115_write(client, 0x34, acni & 0xff);
+ saa7115_write(client, 0x35, (acni >> 8) & 0xff);
+ saa7115_write(client, 0x36, (acni >> 16) & 0x3f);
state->audclk_freq = freq;
return 0;
}
@@ -671,7 +584,7 @@ static int saa7115_set_v4lctrl(struct i2c_client *client, struct v4l2_control *c
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
if (ctrl->value < 0 || ctrl->value > 255) {
- saa7115_err("invalid brightness setting %d\n", ctrl->value);
+ v4l_err(client, "invalid brightness setting %d\n", ctrl->value);
return -ERANGE;
}
@@ -681,7 +594,7 @@ static int saa7115_set_v4lctrl(struct i2c_client *client, struct v4l2_control *c
case V4L2_CID_CONTRAST:
if (ctrl->value < 0 || ctrl->value > 127) {
- saa7115_err("invalid contrast setting %d\n", ctrl->value);
+ v4l_err(client, "invalid contrast setting %d\n", ctrl->value);
return -ERANGE;
}
@@ -691,7 +604,7 @@ static int saa7115_set_v4lctrl(struct i2c_client *client, struct v4l2_control *c
case V4L2_CID_SATURATION:
if (ctrl->value < 0 || ctrl->value > 127) {
- saa7115_err("invalid saturation setting %d\n", ctrl->value);
+ v4l_err(client, "invalid saturation setting %d\n", ctrl->value);
return -ERANGE;
}
@@ -701,13 +614,16 @@ static int saa7115_set_v4lctrl(struct i2c_client *client, struct v4l2_control *c
case V4L2_CID_HUE:
if (ctrl->value < -127 || ctrl->value > 127) {
- saa7115_err("invalid hue setting %d\n", ctrl->value);
+ v4l_err(client, "invalid hue setting %d\n", ctrl->value);
return -ERANGE;
}
state->hue = ctrl->value;
saa7115_write(client, 0x0d, state->hue);
break;
+
+ default:
+ return -EINVAL;
}
return 0;
@@ -742,12 +658,22 @@ static void saa7115_set_v4lstd(struct i2c_client *client, v4l2_std_id std)
struct saa7115_state *state = i2c_get_clientdata(client);
int taskb = saa7115_read(client, 0x80) & 0x10;
+ /* Prevent unnecessary standard changes. During a standard
+ change the I-Port is temporarily disabled. Any devices
+ reading from that port can get confused.
+ Note that VIDIOC_S_STD is also used to switch from
+ radio to TV mode, so if a VIDIOC_S_STD is broadcast to
+ all I2C devices then you do not want to have an unwanted
+ side-effect here. */
+ if (std == state->std)
+ return;
+
// This works for NTSC-M, SECAM-L and the 50Hz PAL variants.
if (std & V4L2_STD_525_60) {
- saa7115_dbg("decoder set standard 60 Hz\n");
+ v4l_dbg(1, debug, client, "decoder set standard 60 Hz\n");
saa7115_writeregs(client, saa7115_cfg_60hz_video);
} else {
- saa7115_dbg("decoder set standard 50 Hz\n");
+ v4l_dbg(1, debug, client, "decoder set standard 50 Hz\n");
saa7115_writeregs(client, saa7115_cfg_50hz_video);
}
@@ -772,24 +698,17 @@ static v4l2_std_id saa7115_get_v4lstd(struct i2c_client *client)
static void saa7115_log_status(struct i2c_client *client)
{
struct saa7115_state *state = i2c_get_clientdata(client);
- char *audfreq = "undefined";
int reg1e, reg1f;
int signalOk;
int vcr;
- switch (state->audclk_freq) {
- case V4L2_AUDCLK_32_KHZ: audfreq = "32 kHz"; break;
- case V4L2_AUDCLK_441_KHZ: audfreq = "44.1 kHz"; break;
- case V4L2_AUDCLK_48_KHZ: audfreq = "48 kHz"; break;
- }
-
- saa7115_info("Audio frequency: %s\n", audfreq);
+ v4l_info(client, "Audio frequency: %d Hz\n", state->audclk_freq);
if (client->name[6] == '4') {
/* status for the saa7114 */
reg1f = saa7115_read(client, 0x1f);
signalOk = (reg1f & 0xc1) == 0x81;
- saa7115_info("Video signal: %s\n", signalOk ? "ok" : "bad");
- saa7115_info("Frequency: %s\n", (reg1f & 0x20) ? "60Hz" : "50Hz");
+ v4l_info(client, "Video signal: %s\n", signalOk ? "ok" : "bad");
+ v4l_info(client, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz");
return;
}
@@ -800,21 +719,26 @@ static void saa7115_log_status(struct i2c_client *client)
signalOk = (reg1f & 0xc1) == 0x81 && (reg1e & 0xc0) == 0x80;
vcr = !(reg1f & 0x10);
- saa7115_info("Video signal: %s\n", signalOk ? (vcr ? "VCR" : "broadcast/DVD") : "bad");
- saa7115_info("Frequency: %s\n", (reg1f & 0x20) ? "60Hz" : "50Hz");
+ if (state->input >= 6) {
+ v4l_info(client, "Input: S-Video %d\n", state->input - 6);
+ } else {
+ v4l_info(client, "Input: Composite %d\n", state->input);
+ }
+ v4l_info(client, "Video signal: %s\n", signalOk ? (vcr ? "VCR" : "broadcast/DVD") : "bad");
+ v4l_info(client, "Frequency: %s\n", (reg1f & 0x20) ? "60 Hz" : "50 Hz");
switch (reg1e & 0x03) {
case 1:
- saa7115_info("Detected format: NTSC\n");
+ v4l_info(client, "Detected format: NTSC\n");
break;
case 2:
- saa7115_info("Detected format: PAL\n");
+ v4l_info(client, "Detected format: PAL\n");
break;
case 3:
- saa7115_info("Detected format: SECAM\n");
+ v4l_info(client, "Detected format: SECAM\n");
break;
default:
- saa7115_info("Detected format: BW/No color\n");
+ v4l_info(client, "Detected format: BW/No color\n");
break;
}
}
@@ -939,7 +863,7 @@ static int saa7115_set_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt
pix = &(fmt->fmt.pix);
- saa7115_dbg("decoder set size\n");
+ v4l_dbg(1, debug, client, "decoder set size\n");
/* FIXME need better bounds checking here */
if ((pix->width < 1) || (pix->width > 1440))
@@ -965,7 +889,7 @@ static int saa7115_set_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt
HPSC = HPSC ? HPSC : 1;
HFSC = (int)((1024 * 720) / (HPSC * pix->width));
- saa7115_dbg("Hpsc: 0x%05x, Hfsc: 0x%05x\n", HPSC, HFSC);
+ v4l_dbg(1, debug, client, "Hpsc: 0x%05x, Hfsc: 0x%05x\n", HPSC, HFSC);
/* FIXME hardcodes to "Task B"
* write H prescaler integer */
saa7115_write(client, 0xd0, (u8) (HPSC & 0x3f));
@@ -979,10 +903,10 @@ static int saa7115_set_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt
saa7115_write(client, 0xDD, (u8) ((HFSC >> 9) & 0xff));
} else {
if (is_50hz) {
- saa7115_dbg("Setting full 50hz width\n");
+ v4l_dbg(1, debug, client, "Setting full 50hz width\n");
saa7115_writeregs(client, saa7115_cfg_50hz_fullres_x);
} else {
- saa7115_dbg("Setting full 60hz width\n");
+ v4l_dbg(1, debug, client, "Setting full 60hz width\n");
saa7115_writeregs(client, saa7115_cfg_60hz_fullres_x);
}
}
@@ -991,7 +915,7 @@ static int saa7115_set_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt
if (pix->height != Vsrc) {
VSCY = (int)((1024 * Vsrc) / pix->height);
- saa7115_dbg("Vsrc: %d, Vscy: 0x%05x\n", Vsrc, VSCY);
+ v4l_dbg(1, debug, client, "Vsrc: %d, Vscy: 0x%05x\n", Vsrc, VSCY);
/* Correct Contrast and Luminance */
saa7115_write(client, 0xd5, (u8) (64 * 1024 / VSCY));
@@ -1005,10 +929,10 @@ static int saa7115_set_v4lfmt(struct i2c_client *client, struct v4l2_format *fmt
saa7115_write(client, 0xe3, (u8) ((VSCY >> 8) & 0xff));
} else {
if (is_50hz) {
- saa7115_dbg("Setting full 50Hz height\n");
+ v4l_dbg(1, debug, client, "Setting full 50Hz height\n");
saa7115_writeregs(client, saa7115_cfg_50hz_fullres_y);
} else {
- saa7115_dbg("Setting full 60hz height\n");
+ v4l_dbg(1, debug, client, "Setting full 60hz height\n");
saa7115_writeregs(client, saa7115_cfg_60hz_fullres_y);
}
}
@@ -1088,6 +1012,48 @@ static void saa7115_decode_vbi_line(struct i2c_client *client,
/* ============ SAA7115 AUDIO settings (end) ============= */
+static struct v4l2_queryctrl saa7115_qctrl[] = {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 128,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 64,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 64,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = -128,
+ .maximum = 127,
+ .step = 1,
+ .default_value = 0,
+ .flags = 0,
+ },
+};
+
+/* ----------------------------------------------------------------------- */
+
static int saa7115_command(struct i2c_client *client, unsigned int cmd, void *arg)
{
struct saa7115_state *state = i2c_get_clientdata(client);
@@ -1102,16 +1068,18 @@ static int saa7115_command(struct i2c_client *client, unsigned int cmd, void *ar
return saa7115_get_v4lfmt(client, (struct v4l2_format *)arg);
case VIDIOC_INT_AUDIO_CLOCK_FREQ:
- return saa7115_set_audio_clock_freq(client, *(enum v4l2_audio_clock_freq *)arg);
+ return saa7115_set_audio_clock_freq(client, *(u32 *)arg);
case VIDIOC_G_TUNER:
{
struct v4l2_tuner *vt = arg;
int status;
+ if (state->radio)
+ break;
status = saa7115_read(client, 0x1f);
- saa7115_dbg("status: 0x%02x\n", status);
+ v4l_dbg(1, debug, client, "status: 0x%02x\n", status);
vt->signal = ((status & (1 << 6)) == 0) ? 0xffff : 0x0;
break;
}
@@ -1126,20 +1094,38 @@ static int saa7115_command(struct i2c_client *client, unsigned int cmd, void *ar
case VIDIOC_S_CTRL:
return saa7115_set_v4lctrl(client, (struct v4l2_control *)arg);
+ case VIDIOC_QUERYCTRL:
+ {
+ struct v4l2_queryctrl *qc = arg;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(saa7115_qctrl); i++)
+ if (qc->id && qc->id == saa7115_qctrl[i].id) {
+ memcpy(qc, &saa7115_qctrl[i], sizeof(*qc));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
case VIDIOC_G_STD:
*(v4l2_std_id *)arg = saa7115_get_v4lstd(client);
break;
case VIDIOC_S_STD:
+ state->radio = 0;
saa7115_set_v4lstd(client, *(v4l2_std_id *)arg);
break;
+ case AUDC_SET_RADIO:
+ state->radio = 1;
+ break;
+
case VIDIOC_G_INPUT:
*(int *)arg = state->input;
break;
case VIDIOC_S_INPUT:
- saa7115_dbg("decoder set input %d\n", *iarg);
+ v4l_dbg(1, debug, client, "decoder set input %d\n", *iarg);
/* inputs from 0-9 are available */
if (*iarg < 0 || *iarg > 9) {
return -EINVAL;
@@ -1147,7 +1133,7 @@ static int saa7115_command(struct i2c_client *client, unsigned int cmd, void *ar
if (state->input == *iarg)
break;
- saa7115_dbg("now setting %s input\n",
+ v4l_dbg(1, debug, client, "now setting %s input\n",
*iarg >= 6 ? "S-Video" : "Composite");
state->input = *iarg;
@@ -1164,7 +1150,7 @@ static int saa7115_command(struct i2c_client *client, unsigned int cmd, void *ar
case VIDIOC_STREAMON:
case VIDIOC_STREAMOFF:
- saa7115_dbg("%s output\n",
+ v4l_dbg(1, debug, client, "%s output\n",
(cmd == VIDIOC_STREAMON) ? "enable" : "disable");
if (state->enable != (cmd == VIDIOC_STREAMON)) {
@@ -1178,7 +1164,7 @@ static int saa7115_command(struct i2c_client *client, unsigned int cmd, void *ar
break;
case VIDIOC_INT_RESET:
- saa7115_dbg("decoder RESET\n");
+ v4l_dbg(1, debug, client, "decoder RESET\n");
saa7115_writeregs(client, saa7115_cfg_reset_scaler);
break;
@@ -1263,48 +1249,46 @@ static int saa7115_attach(struct i2c_adapter *adapter, int address, int kind)
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_saa7115;
- client->flags = I2C_CLIENT_ALLOW_USE;
snprintf(client->name, sizeof(client->name) - 1, "saa7115");
- saa7115_dbg("detecting saa7115 client on address 0x%x\n", address << 1);
+ v4l_dbg(1, debug, client, "detecting saa7115 client on address 0x%x\n", address << 1);
saa7115_write(client, 0, 5);
chip_id = saa7115_read(client, 0) & 0x0f;
if (chip_id != 4 && chip_id != 5) {
- saa7115_dbg("saa7115 not found\n");
+ v4l_dbg(1, debug, client, "saa7115 not found\n");
kfree(client);
return 0;
}
if (chip_id == 4) {
snprintf(client->name, sizeof(client->name) - 1, "saa7114");
}
- saa7115_info("saa711%d found @ 0x%x (%s)\n", chip_id, address << 1, adapter->name);
+ v4l_info(client, "saa711%d found @ 0x%x (%s)\n", chip_id, address << 1, adapter->name);
- state = kmalloc(sizeof(struct saa7115_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct saa7115_state), GFP_KERNEL);
i2c_set_clientdata(client, state);
if (state == NULL) {
kfree(client);
return -ENOMEM;
}
- memset(state, 0, sizeof(struct saa7115_state));
state->std = V4L2_STD_NTSC;
state->input = -1;
state->enable = 1;
+ state->radio = 0;
state->bright = 128;
state->contrast = 64;
state->hue = 0;
state->sat = 64;
state->ident = (chip_id == 4) ? V4L2_IDENT_SAA7114 : V4L2_IDENT_SAA7115;
- state->audclk_freq = V4L2_AUDCLK_48_KHZ;
+ state->audclk_freq = 48000;
- saa7115_dbg("writing init values\n");
+ v4l_dbg(1, debug, client, "writing init values\n");
/* init to 60hz/48khz */
saa7115_writeregs(client, saa7115_init_auto_input);
@@ -1312,13 +1296,12 @@ static int saa7115_attach(struct i2c_adapter *adapter, int address, int kind)
saa7115_writeregs(client, saa7115_cfg_60hz_fullres_x);
saa7115_writeregs(client, saa7115_cfg_60hz_fullres_y);
saa7115_writeregs(client, saa7115_cfg_60hz_video);
- saa7115_writeregs(client, saa7115_cfg_48_audio);
- saa7115_writeregs(client, saa7115_cfg_60hz_48_audio);
+ saa7115_set_audio_clock_freq(client, state->audclk_freq);
saa7115_writeregs(client, saa7115_cfg_reset_scaler);
i2c_attach_client(client);
- saa7115_dbg("status: (1E) 0x%02x, (1F) 0x%02x\n",
+ v4l_dbg(1, debug, client, "status: (1E) 0x%02x, (1F) 0x%02x\n",
saa7115_read(client, 0x1e), saa7115_read(client, 0x1f));
return 0;
@@ -1326,11 +1309,7 @@ static int saa7115_attach(struct i2c_adapter *adapter, int address, int kind)
static int saa7115_probe(struct i2c_adapter *adapter)
{
-#ifdef I2C_CLASS_TV_ANALOG
if (adapter->class & I2C_CLASS_TV_ANALOG)
-#else
- if (adapter->id == I2C_HW_B_BT848)
-#endif
return i2c_probe(adapter, &addr_data, &saa7115_attach);
return 0;
}
@@ -1354,13 +1333,13 @@ static int saa7115_detach(struct i2c_client *client)
/* i2c implementation */
static struct i2c_driver i2c_driver_saa7115 = {
- .name = "saa7115",
+ .driver = {
+ .name = "saa7115",
+ },
.id = I2C_DRIVERID_SAA711X,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = saa7115_probe,
.detach_client = saa7115_detach,
.command = saa7115_command,
- .owner = THIS_MODULE,
};
diff --git a/drivers/media/video/saa711x.c b/drivers/media/video/saa711x.c
index 31f7b950b01..ae53063875f 100644
--- a/drivers/media/video/saa711x.c
+++ b/drivers/media/video/saa711x.c
@@ -487,21 +487,18 @@ saa711x_detect_client (struct i2c_adapter *adapter,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_saa711x;
- client->flags = I2C_CLIENT_ALLOW_USE;
strlcpy(I2C_NAME(client), "saa711x", sizeof(I2C_NAME(client)));
- decoder = kmalloc(sizeof(struct saa711x), GFP_KERNEL);
+ decoder = kzalloc(sizeof(struct saa711x), GFP_KERNEL);
if (decoder == NULL) {
kfree(client);
return -ENOMEM;
}
- memset(decoder, 0, sizeof(struct saa711x));
decoder->norm = VIDEO_MODE_NTSC;
decoder->input = 0;
decoder->enable = 1;
@@ -565,12 +562,10 @@ saa711x_detach_client (struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver i2c_driver_saa711x = {
- .owner = THIS_MODULE,
- .name = "saa711x",
-
+ .driver = {
+ .name = "saa711x",
+ },
.id = I2C_DRIVERID_SAA711X,
- .flags = I2C_DF_NOTIFY,
-
.attach_adapter = saa711x_attach_adapter,
.detach_client = saa711x_detach_client,
.command = saa711x_command,
diff --git a/drivers/media/video/saa7127.c b/drivers/media/video/saa7127.c
index c36f014f1fd..992c71774f3 100644
--- a/drivers/media/video/saa7127.c
+++ b/drivers/media/video/saa7127.c
@@ -66,28 +66,6 @@ module_param(test_image, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
MODULE_PARM_DESC(test_image, "test_image (0-1)");
-#define saa7127_dbg(fmt, arg...) \
- do { \
- if (debug >= 1) \
- printk(KERN_INFO "%s debug %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); \
- } while (0)
-
-/* High volume debug. Use with care. */
-#define saa7127_dbg_highvol(fmt, arg...) \
- do { \
- if (debug == 2) \
- printk(KERN_INFO "%s debug %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); \
- } while (0)
-
-#define saa7127_err(fmt, arg...) do { \
- printk(KERN_ERR "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-#define saa7127_info(fmt, arg...) do { \
- printk(KERN_INFO "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-
static unsigned short normal_i2c[] = { 0x88 >> 1, I2C_CLIENT_END };
@@ -334,7 +312,7 @@ static int saa7127_write(struct i2c_client *client, u8 reg, u8 val)
if (i2c_smbus_write_byte_data(client, reg, val) == 0)
return 0;
}
- saa7127_err("I2C Write Problem\n");
+ v4l_err(client, "I2C Write Problem\n");
return -1;
}
@@ -360,7 +338,7 @@ static int saa7127_set_vps(struct i2c_client *client, struct v4l2_sliced_vbi_dat
if (enable && (data->field != 0 || data->line != 16))
return -EINVAL;
if (state->vps_enable != enable) {
- saa7127_dbg("Turn VPS Signal %s\n", enable ? "on" : "off");
+ v4l_dbg(1, debug, client, "Turn VPS Signal %s\n", enable ? "on" : "off");
saa7127_write(client, 0x54, enable << 7);
state->vps_enable = enable;
}
@@ -372,7 +350,7 @@ static int saa7127_set_vps(struct i2c_client *client, struct v4l2_sliced_vbi_dat
state->vps_data[2] = data->data[11];
state->vps_data[3] = data->data[12];
state->vps_data[4] = data->data[13];
- saa7127_dbg("Set VPS data %02x %02x %02x %02x %02x\n",
+ v4l_dbg(1, debug, client, "Set VPS data %02x %02x %02x %02x %02x\n",
state->vps_data[0], state->vps_data[1],
state->vps_data[2], state->vps_data[3],
state->vps_data[4]);
@@ -395,7 +373,7 @@ static int saa7127_set_cc(struct i2c_client *client, struct v4l2_sliced_vbi_data
if (enable && (data->field != 0 || data->line != 21))
return -EINVAL;
if (state->cc_enable != enable) {
- saa7127_dbg("Turn CC %s\n", enable ? "on" : "off");
+ v4l_dbg(1, debug, client, "Turn CC %s\n", enable ? "on" : "off");
saa7127_write(client, SAA7127_REG_CLOSED_CAPTION,
(state->xds_enable << 7) | (enable << 6) | 0x11);
state->cc_enable = enable;
@@ -403,7 +381,7 @@ static int saa7127_set_cc(struct i2c_client *client, struct v4l2_sliced_vbi_data
if (!enable)
return 0;
- saa7127_dbg_highvol("CC data: %04x\n", cc);
+ v4l_dbg(2, debug, client, "CC data: %04x\n", cc);
saa7127_write(client, SAA7127_REG_LINE_21_ODD_0, cc & 0xff);
saa7127_write(client, SAA7127_REG_LINE_21_ODD_1, cc >> 8);
state->cc_data = cc;
@@ -421,7 +399,7 @@ static int saa7127_set_xds(struct i2c_client *client, struct v4l2_sliced_vbi_dat
if (enable && (data->field != 1 || data->line != 21))
return -EINVAL;
if (state->xds_enable != enable) {
- saa7127_dbg("Turn XDS %s\n", enable ? "on" : "off");
+ v4l_dbg(1, debug, client, "Turn XDS %s\n", enable ? "on" : "off");
saa7127_write(client, SAA7127_REG_CLOSED_CAPTION,
(enable << 7) | (state->cc_enable << 6) | 0x11);
state->xds_enable = enable;
@@ -429,7 +407,7 @@ static int saa7127_set_xds(struct i2c_client *client, struct v4l2_sliced_vbi_dat
if (!enable)
return 0;
- saa7127_dbg_highvol("XDS data: %04x\n", xds);
+ v4l_dbg(2, debug, client, "XDS data: %04x\n", xds);
saa7127_write(client, SAA7127_REG_LINE_21_EVEN_0, xds & 0xff);
saa7127_write(client, SAA7127_REG_LINE_21_EVEN_1, xds >> 8);
state->xds_data = xds;
@@ -446,7 +424,7 @@ static int saa7127_set_wss(struct i2c_client *client, struct v4l2_sliced_vbi_dat
if (enable && (data->field != 0 || data->line != 23))
return -EINVAL;
if (state->wss_enable != enable) {
- saa7127_dbg("Turn WSS %s\n", enable ? "on" : "off");
+ v4l_dbg(1, debug, client, "Turn WSS %s\n", enable ? "on" : "off");
saa7127_write(client, 0x27, enable << 7);
state->wss_enable = enable;
}
@@ -455,7 +433,7 @@ static int saa7127_set_wss(struct i2c_client *client, struct v4l2_sliced_vbi_dat
saa7127_write(client, 0x26, data->data[0]);
saa7127_write(client, 0x27, 0x80 | (data->data[1] & 0x3f));
- saa7127_dbg("WSS mode: %s\n", wss_strs[data->data[0] & 0xf]);
+ v4l_dbg(1, debug, client, "WSS mode: %s\n", wss_strs[data->data[0] & 0xf]);
state->wss_mode = (data->data[1] & 0x3f) << 8 | data->data[0];
return 0;
}
@@ -467,11 +445,11 @@ static int saa7127_set_video_enable(struct i2c_client *client, int enable)
struct saa7127_state *state = i2c_get_clientdata(client);
if (enable) {
- saa7127_dbg("Enable Video Output\n");
+ v4l_dbg(1, debug, client, "Enable Video Output\n");
saa7127_write(client, 0x2d, state->reg_2d);
saa7127_write(client, 0x61, state->reg_61);
} else {
- saa7127_dbg("Disable Video Output\n");
+ v4l_dbg(1, debug, client, "Disable Video Output\n");
saa7127_write(client, 0x2d, (state->reg_2d & 0xf0));
saa7127_write(client, 0x61, (state->reg_61 | 0xc0));
}
@@ -487,11 +465,11 @@ static int saa7127_set_std(struct i2c_client *client, v4l2_std_id std)
const struct i2c_reg_value *inittab;
if (std & V4L2_STD_525_60) {
- saa7127_dbg("Selecting 60 Hz video Standard\n");
+ v4l_dbg(1, debug, client, "Selecting 60 Hz video Standard\n");
inittab = saa7127_init_config_60hz;
state->reg_61 = SAA7127_60HZ_DAC_CONTROL;
} else {
- saa7127_dbg("Selecting 50 Hz video Standard\n");
+ v4l_dbg(1, debug, client, "Selecting 50 Hz video Standard\n");
inittab = saa7127_init_config_50hz;
state->reg_61 = SAA7127_50HZ_DAC_CONTROL;
}
@@ -542,7 +520,7 @@ static int saa7127_set_output_type(struct i2c_client *client, int output)
default:
return -EINVAL;
}
- saa7127_dbg("Selecting %s output type\n", output_strs[output]);
+ v4l_dbg(1, debug, client, "Selecting %s output type\n", output_strs[output]);
/* Configure Encoder */
saa7127_write(client, 0x2d, state->reg_2d);
@@ -559,12 +537,12 @@ static int saa7127_set_input_type(struct i2c_client *client, int input)
switch (input) {
case SAA7127_INPUT_TYPE_NORMAL: /* avia */
- saa7127_dbg("Selecting Normal Encoder Input\n");
+ v4l_dbg(1, debug, client, "Selecting Normal Encoder Input\n");
state->reg_3a_cb = 0;
break;
case SAA7127_INPUT_TYPE_TEST_IMAGE: /* color bar */
- saa7127_dbg("Selecting Color Bar generator\n");
+ v4l_dbg(1, debug, client, "Selecting Color Bar generator\n");
state->reg_3a_cb = 0x80;
break;
@@ -631,14 +609,14 @@ static int saa7127_command(struct i2c_client *client,
break;
case VIDIOC_LOG_STATUS:
- saa7127_info("Standard: %s\n", (state->std & V4L2_STD_525_60) ? "60 Hz" : "50 Hz");
- saa7127_info("Input: %s\n", state->input_type ? "color bars" : "normal");
- saa7127_info("Output: %s\n", state->video_enable ?
+ v4l_info(client, "Standard: %s\n", (state->std & V4L2_STD_525_60) ? "60 Hz" : "50 Hz");
+ v4l_info(client, "Input: %s\n", state->input_type ? "color bars" : "normal");
+ v4l_info(client, "Output: %s\n", state->video_enable ?
output_strs[state->output_type] : "disabled");
- saa7127_info("WSS: %s\n", state->wss_enable ?
+ v4l_info(client, "WSS: %s\n", state->wss_enable ?
wss_strs[state->wss_mode] : "disabled");
- saa7127_info("VPS: %s\n", state->vps_enable ? "enabled" : "disabled");
- saa7127_info("CC: %s\n", state->cc_enable ? "enabled" : "disabled");
+ v4l_info(client, "VPS: %s\n", state->vps_enable ? "enabled" : "disabled");
+ v4l_info(client, "CC: %s\n", state->cc_enable ? "enabled" : "disabled");
break;
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -711,18 +689,16 @@ static int saa7127_attach(struct i2c_adapter *adapter, int address, int kind)
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_saa7127;
- client->flags = I2C_CLIENT_ALLOW_USE;
snprintf(client->name, sizeof(client->name) - 1, "saa7127");
- saa7127_dbg("detecting saa7127 client on address 0x%x\n", address << 1);
+ v4l_dbg(1, debug, client, "detecting saa7127 client on address 0x%x\n", address << 1);
/* First test register 0: Bits 5-7 are a version ID (should be 0),
and bit 2 should also be 0.
@@ -731,11 +707,11 @@ static int saa7127_attach(struct i2c_adapter *adapter, int address, int kind)
0x1d after a reset and not expected to ever change. */
if ((saa7127_read(client, 0) & 0xe4) != 0 ||
(saa7127_read(client, 0x29) & 0x3f) != 0x1d) {
- saa7127_dbg("saa7127 not found\n");
+ v4l_dbg(1, debug, client, "saa7127 not found\n");
kfree(client);
return 0;
}
- state = kmalloc(sizeof(struct saa7127_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct saa7127_state), GFP_KERNEL);
if (state == NULL) {
kfree(client);
@@ -743,11 +719,10 @@ static int saa7127_attach(struct i2c_adapter *adapter, int address, int kind)
}
i2c_set_clientdata(client, state);
- memset(state, 0, sizeof(struct saa7127_state));
/* Configure Encoder */
- saa7127_dbg("Configuring encoder\n");
+ v4l_dbg(1, debug, client, "Configuring encoder\n");
saa7127_write_inittab(client, saa7127_init_config_common);
saa7127_set_std(client, V4L2_STD_NTSC);
saa7127_set_output_type(client, SAA7127_OUTPUT_TYPE_BOTH);
@@ -768,12 +743,12 @@ static int saa7127_attach(struct i2c_adapter *adapter, int address, int kind)
read_result = saa7127_read(client, SAA7129_REG_FADE_KEY_COL2);
saa7127_write(client, SAA7129_REG_FADE_KEY_COL2, 0xaa);
if (saa7127_read(client, SAA7129_REG_FADE_KEY_COL2) == 0xaa) {
- saa7127_info("saa7129 found @ 0x%x (%s)\n", address << 1, adapter->name);
+ v4l_info(client, "saa7129 found @ 0x%x (%s)\n", address << 1, adapter->name);
saa7127_write(client, SAA7129_REG_FADE_KEY_COL2, read_result);
saa7127_write_inittab(client, saa7129_init_config_extra);
state->ident = V4L2_IDENT_SAA7129;
} else {
- saa7127_info("saa7127 found @ 0x%x (%s)\n", address << 1, adapter->name);
+ v4l_info(client, "saa7127 found @ 0x%x (%s)\n", address << 1, adapter->name);
state->ident = V4L2_IDENT_SAA7127;
}
@@ -786,11 +761,7 @@ static int saa7127_attach(struct i2c_adapter *adapter, int address, int kind)
static int saa7127_probe(struct i2c_adapter *adapter)
{
-#ifdef I2C_CLASS_TV_ANALOG
if (adapter->class & I2C_CLASS_TV_ANALOG)
-#else
- if (adapter->id == I2C_HW_B_BT848)
-#endif
return i2c_probe(adapter, &addr_data, saa7127_attach);
return 0;
}
@@ -819,13 +790,13 @@ static int saa7127_detach(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver i2c_driver_saa7127 = {
- .name = "saa7127",
+ .driver = {
+ .name = "saa7127",
+ },
.id = I2C_DRIVERID_SAA7127,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = saa7127_probe,
.detach_client = saa7127_detach,
.command = saa7127_command,
- .owner = THIS_MODULE,
};
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig
index 8a5c3e71b37..86671a43e76 100644
--- a/drivers/media/video/saa7134/Kconfig
+++ b/drivers/media/video/saa7134/Kconfig
@@ -15,7 +15,7 @@ config VIDEO_SAA7134
config VIDEO_SAA7134_ALSA
tristate "Philips SAA7134 DMA audio support"
depends on VIDEO_SAA7134 && SND
- select SND_PCM_OSS
+ select SND_PCM
---help---
This is a video4linux driver for direct (DMA) audio in
Philips SAA713x based TV cards using ALSA
diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
index a61d24f588f..0e0ba50946e 100644
--- a/drivers/media/video/saa7134/saa6752hs.c
+++ b/drivers/media/video/saa7134/saa6752hs.c
@@ -9,7 +9,8 @@
#include <linux/poll.h>
#include <linux/i2c.h>
#include <linux/types.h>
-#include <linux/videodev.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
#include <linux/init.h>
#include <linux/crc32.h>
@@ -509,11 +510,9 @@ static int saa6752hs_attach(struct i2c_adapter *adap, int addr, int kind)
{
struct saa6752hs_state *h;
- printk("saa6752hs: chip found @ 0x%x\n", addr<<1);
- if (NULL == (h = kmalloc(sizeof(*h), GFP_KERNEL)))
+ if (NULL == (h = kzalloc(sizeof(*h), GFP_KERNEL)))
return -ENOMEM;
- memset(h,0,sizeof(*h));
h->client = client_template;
h->params = param_defaults;
h->client.adapter = adap;
@@ -525,6 +524,8 @@ static int saa6752hs_attach(struct i2c_adapter *adap, int addr, int kind)
i2c_set_clientdata(&h->client, h);
i2c_attach_client(&h->client);
+ v4l_info(&h->client,"saa6752hs: chip found @ 0x%x\n", addr<<1);
+
return 0;
}
@@ -597,10 +598,10 @@ saa6752hs_command(struct i2c_client *client, unsigned int cmd, void *arg)
/* ----------------------------------------------------------------------- */
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "i2c saa6752hs MPEG encoder",
+ .driver = {
+ .name = "saa6752hs",
+ },
.id = I2C_DRIVERID_SAA6752HS,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = saa6752hs_probe,
.detach_client = saa6752hs_detach,
.command = saa6752hs_command,
@@ -609,7 +610,6 @@ static struct i2c_driver driver = {
static struct i2c_client client_template =
{
.name = "saa6752hs",
- .flags = I2C_CLIENT_ALLOW_USE,
.driver = &driver,
};
diff --git a/drivers/media/video/saa7134/saa7134-alsa.c b/drivers/media/video/saa7134/saa7134-alsa.c
index ade05f75fdb..a7a6ab9298a 100644
--- a/drivers/media/video/saa7134/saa7134-alsa.c
+++ b/drivers/media/video/saa7134/saa7134-alsa.c
@@ -20,13 +20,13 @@
*
*/
-#include <sound/driver.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/moduleparam.h>
#include <linux/module.h>
+#include <sound/driver.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/pcm.h>
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 672fb205959..275d06af69d 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -25,6 +25,7 @@
#include "saa7134-reg.h"
#include "saa7134.h"
+#include <media/v4l2-common.h>
/* commly used strings */
static char name_mute[] = "mute";
@@ -1665,7 +1666,7 @@ struct saa7134_board saa7134_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
- .tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER,
+ .tda9887_conf = TDA9887_PRESENT | TDA9887_INTERCARRIER | TDA9887_PORT2_ACTIVE,
.mpeg = SAA7134_MPEG_DVB,
.inputs = {{
.name = name_tv,
@@ -2555,6 +2556,69 @@ struct saa7134_board saa7134_boards[] = {
.amux = LINE1,
},
},
+ [SAA7134_BOARD_CINERGY250PCI] = {
+ /* remote-control does not work. The signal about a
+ key press comes in via gpio, but the key code
+ doesn't. Neither does it have an i2c remote control
+ interface. */
+ .name = "Terratec Cinergy 250 PCI TV",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .gpiomask = 0x80200000,
+ .inputs = {{
+ .name = name_tv,
+ .vmux = 1,
+ .amux = TV,
+ .tv = 1,
+ },{
+ .name = name_svideo, /* NOT tested */
+ .vmux = 8,
+ .amux = LINE1,
+ }},
+ .radio = {
+ .name = name_radio,
+ .amux = LINE1,
+ .gpio = 0x0200000,
+ },
+ },
+ [SAA7134_BOARD_FLYDVB_TRIO] = {
+ /* LifeView LR319 FlyDVB Trio */
+ /* Peter Missel <peter.missel@onlinehome.de> */
+ .name = "LifeView FlyDVB Trio",
+ .audio_clock = 0x00200000,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .gpiomask = 0x00200000,
+ .inputs = {{
+ .name = name_tv, /* Analog broadcast/cable TV */
+ .vmux = 1,
+ .amux = TV,
+ .gpio = 0x200000, /* GPIO21=High for TV input */
+ .tv = 1,
+ },{
+ .name = name_svideo, /* S-Video signal on S-Video input */
+ .vmux = 8,
+ .amux = LINE2,
+ },{
+ .name = name_comp1, /* Composite signal on S-Video input */
+ .vmux = 0,
+ .amux = LINE2,
+ },{
+ .name = name_comp2, /* Composite input */
+ .vmux = 3,
+ .amux = LINE2,
+ }},
+ .radio = {
+ .name = name_radio,
+ .amux = TV,
+ .gpio = 0x000000, /* GPIO21=Low for FM radio antenna */
+ },
+ },
};
const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -2895,6 +2959,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7133,
.subvendor = 0x1421,
+ .subdevice = 0x0351, /* PCI version, new revision */
+ .driver_data = SAA7134_BOARD_ADS_INSTANT_TV,
+ },{
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x1421,
.subdevice = 0x0370, /* cardbus version */
.driver_data = SAA7134_BOARD_ADS_INSTANT_TV,
},{
@@ -3002,6 +3072,18 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0x6231,
.driver_data = SAA7134_BOARD_MSI_TVATANYWHERE_PLUS,
},{
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x153b,
+ .subdevice = 0x1160,
+ .driver_data = SAA7134_BOARD_CINERGY250PCI,
+ },{
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133, /* SAA 7131E */
+ .subvendor = 0x5168,
+ .subdevice = 0x0319,
+ .driver_data = SAA7134_BOARD_FLYDVB_TRIO,
+ },{
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -3090,6 +3172,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_AVERMEDIA_GO_007_FM:
/* case SAA7134_BOARD_SABRENT_SBTTVFM: */ /* not finished yet */
case SAA7134_BOARD_VIDEOMATE_TV_PVR:
+ case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS:
case SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUSII:
case SAA7134_BOARD_VIDEOMATE_DVBT_300:
case SAA7134_BOARD_VIDEOMATE_DVBT_200:
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 23d8747338e..3983a6524ca 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -95,77 +95,6 @@ int (*dmasound_exit)(struct saa7134_dev *dev);
#define dprintk(fmt, arg...) if (core_debug) \
printk(KERN_DEBUG "%s/core: " fmt, dev->name , ## arg)
-/* ------------------------------------------------------------------ */
-/* debug help functions */
-
-static const char *v4l1_ioctls[] = {
- "0", "GCAP", "GCHAN", "SCHAN", "GTUNER", "STUNER", "GPICT", "SPICT",
- "CCAPTURE", "GWIN", "SWIN", "GFBUF", "SFBUF", "KEY", "GFREQ",
- "SFREQ", "GAUDIO", "SAUDIO", "SYNC", "MCAPTURE", "GMBUF", "GUNIT",
- "GCAPTURE", "SCAPTURE", "SPLAYMODE", "SWRITEMODE", "GPLAYINFO",
- "SMICROCODE", "GVBIFMT", "SVBIFMT" };
-#define V4L1_IOCTLS ARRAY_SIZE(v4l1_ioctls)
-
-static const char *v4l2_ioctls[] = {
- "QUERYCAP", "1", "ENUM_PIXFMT", "ENUM_FBUFFMT", "G_FMT", "S_FMT",
- "G_COMP", "S_COMP", "REQBUFS", "QUERYBUF", "G_FBUF", "S_FBUF",
- "G_WIN", "S_WIN", "PREVIEW", "QBUF", "16", "DQBUF", "STREAMON",
- "STREAMOFF", "G_PERF", "G_PARM", "S_PARM", "G_STD", "S_STD",
- "ENUMSTD", "ENUMINPUT", "G_CTRL", "S_CTRL", "G_TUNER", "S_TUNER",
- "G_FREQ", "S_FREQ", "G_AUDIO", "S_AUDIO", "35", "QUERYCTRL",
- "QUERYMENU", "G_INPUT", "S_INPUT", "ENUMCVT", "41", "42", "43",
- "44", "45", "G_OUTPUT", "S_OUTPUT", "ENUMOUTPUT", "G_AUDOUT",
- "S_AUDOUT", "ENUMFX", "G_EFFECT", "S_EFFECT", "G_MODULATOR",
- "S_MODULATOR"
-};
-#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
-
-static const char *osspcm_ioctls[] = {
- "RESET", "SYNC", "SPEED", "STEREO", "GETBLKSIZE", "SETFMT",
- "CHANNELS", "?", "POST", "SUBDIVIDE", "SETFRAGMENT", "GETFMTS",
- "GETOSPACE", "GETISPACE", "NONBLOCK", "GETCAPS", "GET/SETTRIGGER",
- "GETIPTR", "GETOPTR", "MAPINBUF", "MAPOUTBUF", "SETSYNCRO",
- "SETDUPLEX", "GETODELAY"
-};
-#define OSSPCM_IOCTLS ARRAY_SIZE(v4l2_ioctls)
-
-void saa7134_print_ioctl(char *name, unsigned int cmd)
-{
- char *dir;
-
- switch (_IOC_DIR(cmd)) {
- case _IOC_NONE: dir = "--"; break;
- case _IOC_READ: dir = "r-"; break;
- case _IOC_WRITE: dir = "-w"; break;
- case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
- default: dir = "??"; break;
- }
- switch (_IOC_TYPE(cmd)) {
- case 'v':
- printk(KERN_DEBUG "%s: ioctl 0x%08x (v4l1, %s, VIDIOC%s)\n",
- name, cmd, dir, (_IOC_NR(cmd) < V4L1_IOCTLS) ?
- v4l1_ioctls[_IOC_NR(cmd)] : "???");
- break;
- case 'V':
- printk(KERN_DEBUG "%s: ioctl 0x%08x (v4l2, %s, VIDIOC_%s)\n",
- name, cmd, dir, (_IOC_NR(cmd) < V4L2_IOCTLS) ?
- v4l2_ioctls[_IOC_NR(cmd)] : "???");
- break;
- case 'P':
- printk(KERN_DEBUG "%s: ioctl 0x%08x (oss dsp, %s, SNDCTL_DSP_%s)\n",
- name, cmd, dir, (_IOC_NR(cmd) < OSSPCM_IOCTLS) ?
- osspcm_ioctls[_IOC_NR(cmd)] : "???");
- break;
- case 'M':
- printk(KERN_DEBUG "%s: ioctl 0x%08x (oss mixer, %s, #%d)\n",
- name, cmd, dir, _IOC_NR(cmd));
- break;
- default:
- printk(KERN_DEBUG "%s: ioctl 0x%08x (???, %s, #%d)\n",
- name, cmd, dir, _IOC_NR(cmd));
- }
-}
-
void saa7134_track_gpio(struct saa7134_dev *dev, char *msg)
{
unsigned long mode,status;
@@ -211,7 +140,7 @@ static int pending_call(struct notifier_block *self, unsigned long state,
return NOTIFY_DONE;
}
-static int pending_registered;
+static int pending_registered=0;
static struct notifier_block pending_notifier = {
.notifier_call = pending_call,
};
@@ -610,11 +539,38 @@ static irqreturn_t saa7134_irq(int irq, void *dev_id, struct pt_regs *regs)
card_has_mpeg(dev))
saa7134_irq_ts_done(dev,status);
- if ((report & (SAA7134_IRQ_REPORT_GPIO16 |
- SAA7134_IRQ_REPORT_GPIO18)) &&
- dev->remote)
- saa7134_input_irq(dev);
+ if (report & SAA7134_IRQ_REPORT_GPIO16) {
+ switch (dev->has_remote) {
+ case SAA7134_REMOTE_GPIO:
+ if (dev->remote->mask_keydown & 0x10000) {
+ saa7134_input_irq(dev);
+ }
+ break;
+
+ case SAA7134_REMOTE_I2C:
+ break; /* FIXME: invoke I2C get_key() */
+ default: /* GPIO16 not used by IR remote */
+ break;
+ }
+ }
+
+ if (report & SAA7134_IRQ_REPORT_GPIO18) {
+ switch (dev->has_remote) {
+ case SAA7134_REMOTE_GPIO:
+ if ((dev->remote->mask_keydown & 0x40000) ||
+ (dev->remote->mask_keyup & 0x40000)) {
+ saa7134_input_irq(dev);
+ }
+ break;
+
+ case SAA7134_REMOTE_I2C:
+ break; /* FIXME: invoke I2C get_key() */
+
+ default: /* GPIO18 not used by IR remote */
+ break;
+ }
+ }
}
if (10 == loop) {
@@ -624,13 +580,16 @@ static irqreturn_t saa7134_irq(int irq, void *dev_id, struct pt_regs *regs)
printk(KERN_WARNING "%s/irq: looping -- "
"clearing PE (parity error!) enable bit\n",dev->name);
saa_clearl(SAA7134_IRQ2,SAA7134_IRQ2_INTE_PE);
- } else if (report & (SAA7134_IRQ_REPORT_GPIO16 |
- SAA7134_IRQ_REPORT_GPIO18)) {
- /* disable gpio IRQs */
+ } else if (report & SAA7134_IRQ_REPORT_GPIO16) {
+ /* disable gpio16 IRQ */
printk(KERN_WARNING "%s/irq: looping -- "
- "clearing GPIO enable bits\n",dev->name);
- saa_clearl(SAA7134_IRQ2, (SAA7134_IRQ2_INTE_GPIO16 |
- SAA7134_IRQ2_INTE_GPIO18));
+ "clearing GPIO16 enable bit\n",dev->name);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16);
+ } else if (report & SAA7134_IRQ_REPORT_GPIO18) {
+ /* disable gpio18 IRQs */
+ printk(KERN_WARNING "%s/irq: looping -- "
+ "clearing GPIO18 enable bit\n",dev->name);
+ saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18);
} else {
/* disable all irqs */
printk(KERN_WARNING "%s/irq: looping -- "
@@ -711,10 +670,14 @@ static int saa7134_hwinit2(struct saa7134_dev *dev)
SAA7134_IRQ2_INTE_PE |
SAA7134_IRQ2_INTE_AR;
- if (dev->has_remote == SAA7134_REMOTE_GPIO)
- irq2_mask |= (SAA7134_IRQ2_INTE_GPIO18 |
- SAA7134_IRQ2_INTE_GPIO18A |
- SAA7134_IRQ2_INTE_GPIO16 );
+ if (dev->has_remote == SAA7134_REMOTE_GPIO) {
+ if (dev->remote->mask_keydown & 0x10000)
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO16;
+ else if (dev->remote->mask_keydown & 0x40000)
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO18;
+ else if (dev->remote->mask_keyup & 0x40000)
+ irq2_mask |= SAA7134_IRQ2_INTE_GPIO18A;
+ }
saa_writel(SAA7134_IRQ1, 0);
saa_writel(SAA7134_IRQ2, irq2_mask);
@@ -840,10 +803,9 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
struct saa7134_mpeg_ops *mops;
int err;
- dev = kmalloc(sizeof(*dev),GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev),GFP_KERNEL);
if (NULL == dev)
return -ENOMEM;
- memset(dev,0,sizeof(*dev));
/* pci init */
dev->pci = pci_dev;
@@ -1156,7 +1118,7 @@ static int saa7134_init(void)
printk(KERN_INFO "saa7130/34: snapshot date %04d-%02d-%02d\n",
SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
#endif
- return pci_module_init(&saa7134_pci_driver);
+ return pci_register_driver(&saa7134_pci_driver);
}
static void saa7134_fini(void)
@@ -1173,7 +1135,6 @@ module_exit(saa7134_fini);
/* ----------------------------------------------------------- */
-EXPORT_SYMBOL(saa7134_print_ioctl);
EXPORT_SYMBOL(saa7134_i2c_call_clients);
EXPORT_SYMBOL(saa7134_devlist);
EXPORT_SYMBOL(saa7134_boards);
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index e016480c346..399f9952596 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -31,6 +31,7 @@
#include "saa7134-reg.h"
#include "saa7134.h"
+#include <media/v4l2-common.h>
#ifdef HAVE_MT352
# include "mt352.h"
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 575f3e835f9..bd4c389d4c3 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -29,6 +29,7 @@
#include "saa7134.h"
#include <media/saa6752hs.h>
+#include <media/v4l2-common.h>
/* ------------------------------------------------------------------ */
@@ -163,7 +164,7 @@ static int ts_do_ioctl(struct inode *inode, struct file *file,
struct saa7134_dev *dev = file->private_data;
if (debug > 1)
- saa7134_print_ioctl(dev->name,cmd);
+ v4l_print_ioctl(dev->name,cmd);
switch (cmd) {
case VIDIOC_QUERYCAP:
{
diff --git a/drivers/media/video/saa7134/saa7134-i2c.c b/drivers/media/video/saa7134/saa7134-i2c.c
index df9dd36721e..6162550c413 100644
--- a/drivers/media/video/saa7134/saa7134-i2c.c
+++ b/drivers/media/video/saa7134/saa7134-i2c.c
@@ -30,6 +30,7 @@
#include "saa7134-reg.h"
#include "saa7134.h"
+#include <media/v4l2-common.h>
/* ----------------------------------------------------------- */
@@ -333,7 +334,7 @@ static int attach_inform(struct i2c_client *client)
struct tuner_setup tun_setup;
d1printk( "%s i2c attach [addr=0x%x,client=%s]\n",
- client->driver->name, client->addr, client->name);
+ client->driver->driver.name, client->addr, client->name);
/* Am I an i2c remote control? */
@@ -343,7 +344,7 @@ static int attach_inform(struct i2c_client *client)
{
struct IR_i2c *ir = i2c_get_clientdata(client);
d1printk("%s i2c IR detected (%s).\n",
- client->driver->name,ir->phys);
+ client->driver->driver.name, ir->phys);
saa7134_set_i2c_ir(dev,ir);
break;
}
@@ -390,9 +391,7 @@ static struct i2c_algorithm saa7134_algo = {
static struct i2c_adapter saa7134_adap_template = {
.owner = THIS_MODULE,
-#ifdef I2C_CLASS_TV_ANALOG
.class = I2C_CLASS_TV_ANALOG,
-#endif
.name = "saa7134",
.id = I2C_HW_SAA7134,
.algo = &saa7134_algo,
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index ab75ca5ac35..82d28cbf289 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -56,23 +56,23 @@ static IR_KEYTAB_TYPE flyvideo_codes[IR_KEYTAB_SIZE] = {
[ 12 ] = KEY_KP8,
[ 13 ] = KEY_KP9,
- [ 14 ] = KEY_TUNER, // Air/Cable
+ [ 14 ] = KEY_MODE, // Air/Cable
[ 17 ] = KEY_VIDEO, // Video
[ 21 ] = KEY_AUDIO, // Audio
- [ 0 ] = KEY_POWER, // Pover
+ [ 0 ] = KEY_POWER, // Power
+ [ 24 ] = KEY_TUNER, // AV Source
[ 2 ] = KEY_ZOOM, // Fullscreen
+ [ 26 ] = KEY_LANGUAGE, // Stereo
[ 27 ] = KEY_MUTE, // Mute
- [ 20 ] = KEY_VOLUMEUP,
- [ 23 ] = KEY_VOLUMEDOWN,
+ [ 20 ] = KEY_VOLUMEUP, // Volume +
+ [ 23 ] = KEY_VOLUMEDOWN, // Volume -
[ 18 ] = KEY_CHANNELUP, // Channel +
[ 19 ] = KEY_CHANNELDOWN, // Channel -
- [ 6 ] = KEY_AGAIN, // Recal
- [ 16 ] = KEY_KPENTER, // Enter
-
- [ 26 ] = KEY_F22, // Stereo
- [ 24 ] = KEY_EDIT, // AV Source
+ [ 6 ] = KEY_AGAIN, // Recall
+ [ 16 ] = KEY_ENTER, // Enter
};
+
static IR_KEYTAB_TYPE cinergy_codes[IR_KEYTAB_SIZE] = {
[ 0 ] = KEY_KP0,
[ 1 ] = KEY_KP1,
@@ -543,12 +543,22 @@ static int build_key(struct saa7134_dev *dev)
dprintk("build_key gpio=0x%x mask=0x%x data=%d\n",
gpio, ir->mask_keycode, data);
- if ((ir->mask_keydown && (0 != (gpio & ir->mask_keydown))) ||
- (ir->mask_keyup && (0 == (gpio & ir->mask_keyup)))) {
- ir_input_keydown(ir->dev, &ir->ir, data, data);
- } else {
- ir_input_nokey(ir->dev, &ir->ir);
+ if (ir->polling) {
+ if ((ir->mask_keydown && (0 != (gpio & ir->mask_keydown))) ||
+ (ir->mask_keyup && (0 == (gpio & ir->mask_keyup)))) {
+ ir_input_keydown(ir->dev, &ir->ir, data, data);
+ } else {
+ ir_input_nokey(ir->dev, &ir->ir);
+ }
+ }
+ else { /* IRQ driven mode - handle key press and release in one go */
+ if ((ir->mask_keydown && (0 != (gpio & ir->mask_keydown))) ||
+ (ir->mask_keyup && (0 == (gpio & ir->mask_keyup)))) {
+ ir_input_keydown(ir->dev, &ir->ir, data, data);
+ ir_input_nokey(ir->dev, &ir->ir);
+ }
}
+
return 0;
}
@@ -686,6 +696,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
polling = 50; // ms
break;
case SAA7134_BOARD_VIDEOMATE_TV_PVR:
+ case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS:
case SAA7134_BOARD_VIDEOMATE_TV_GOLD_PLUSII:
ir_codes = videomate_tv_pvr_codes;
mask_keycode = 0x00003F;
diff --git a/drivers/media/video/saa7134/saa7134-oss.c b/drivers/media/video/saa7134/saa7134-oss.c
index 8badd2a9cb2..7448e386a80 100644
--- a/drivers/media/video/saa7134/saa7134-oss.c
+++ b/drivers/media/video/saa7134/saa7134-oss.c
@@ -373,6 +373,42 @@ static ssize_t dsp_write(struct file *file, const char __user *buffer,
return -EINVAL;
}
+static const char *osspcm_ioctls[] = {
+ "RESET", "SYNC", "SPEED", "STEREO", "GETBLKSIZE", "SETFMT",
+ "CHANNELS", "?", "POST", "SUBDIVIDE", "SETFRAGMENT", "GETFMTS",
+ "GETOSPACE", "GETISPACE", "NONBLOCK", "GETCAPS", "GET/SETTRIGGER",
+ "GETIPTR", "GETOPTR", "MAPINBUF", "MAPOUTBUF", "SETSYNCRO",
+ "SETDUPLEX", "GETODELAY"
+};
+#define OSSPCM_IOCTLS ARRAY_SIZE(osspcm_ioctls)
+
+static void saa7134_oss_print_ioctl(char *name, unsigned int cmd)
+{
+ char *dir;
+
+ switch (_IOC_DIR(cmd)) {
+ case _IOC_NONE: dir = "--"; break;
+ case _IOC_READ: dir = "r-"; break;
+ case _IOC_WRITE: dir = "-w"; break;
+ case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
+ default: dir = "??"; break;
+ }
+ switch (_IOC_TYPE(cmd)) {
+ case 'P':
+ printk(KERN_DEBUG "%s: ioctl 0x%08x (oss dsp, %s, SNDCTL_DSP_%s)\n",
+ name, cmd, dir, (_IOC_NR(cmd) < OSSPCM_IOCTLS) ?
+ osspcm_ioctls[_IOC_NR(cmd)] : "???");
+ break;
+ case 'M':
+ printk(KERN_DEBUG "%s: ioctl 0x%08x (oss mixer, %s, #%d)\n",
+ name, cmd, dir, _IOC_NR(cmd));
+ break;
+ default:
+ printk(KERN_DEBUG "%s: ioctl 0x%08x (???, %s, #%d)\n",
+ name, cmd, dir, _IOC_NR(cmd));
+ }
+}
+
static int dsp_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -382,7 +418,7 @@ static int dsp_ioctl(struct inode *inode, struct file *file,
int val = 0;
if (debug > 1)
- saa7134_print_ioctl(dev->name,cmd);
+ saa7134_oss_print_ioctl(dev->name,cmd);
switch (cmd) {
case OSS_GETVERSION:
return put_user(SOUND_VERSION, p);
@@ -678,7 +714,7 @@ static int mixer_ioctl(struct inode *inode, struct file *file,
int __user *p = argp;
if (debug > 1)
- saa7134_print_ioctl(dev->name,cmd);
+ saa7134_oss_print_ioctl(dev->name,cmd);
switch (cmd) {
case OSS_GETVERSION:
return put_user(SOUND_VERSION, p);
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 45c852df13e..e97426bc85d 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -29,6 +29,7 @@
#include "saa7134-reg.h"
#include "saa7134.h"
+#include <media/v4l2-common.h>
/* Include V4L1 specific functions. Should be removed soon */
#include <linux/videodev.h>
@@ -1263,10 +1264,9 @@ static int video_open(struct inode *inode, struct file *file)
v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
- fh = kmalloc(sizeof(*fh),GFP_KERNEL);
+ fh = kzalloc(sizeof(*fh),GFP_KERNEL);
if (NULL == fh)
return -ENOMEM;
- memset(fh,0,sizeof(*fh));
file->private_data = fh;
fh->dev = dev;
fh->radio = radio;
@@ -1689,7 +1689,7 @@ static int video_do_ioctl(struct inode *inode, struct file *file,
int err;
if (video_debug > 1)
- saa7134_print_ioctl(dev->name,cmd);
+ v4l_print_ioctl(dev->name,cmd);
switch (cmd) {
case VIDIOC_S_CTRL:
@@ -2142,7 +2142,7 @@ static int radio_do_ioctl(struct inode *inode, struct file *file,
struct saa7134_dev *dev = fh->dev;
if (video_debug > 1)
- saa7134_print_ioctl(dev->name,cmd);
+ v4l_print_ioctl(dev->name,cmd);
switch (cmd) {
case VIDIOC_QUERYCAP:
{
@@ -2262,6 +2262,7 @@ static struct file_operations video_fops =
.poll = video_poll,
.mmap = video_mmap,
.ioctl = video_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
@@ -2271,6 +2272,7 @@ static struct file_operations radio_fops =
.open = video_open,
.release = video_release,
.ioctl = radio_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index add49db1ad4..e70eae8d29b 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -37,6 +37,9 @@
#include <media/ir-common.h>
#include <media/ir-kbd-i2c.h>
#include <media/video-buf.h>
+#include <sound/driver.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
#include <media/video-buf-dvb.h>
#ifndef TRUE
@@ -47,10 +50,6 @@
#endif
#define UNSET (-1U)
-#include <sound/driver.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-
/* ----------------------------------------------------------- */
/* enums */
@@ -209,6 +208,8 @@ struct saa7134_format {
#define SAA7134_BOARD_ASUSTEK_DIGIMATRIX_TV 80
#define SAA7134_BOARD_PHILIPS_TIGER 81
#define SAA7134_BOARD_MSI_TVATANYWHERE_PLUS 82
+#define SAA7134_BOARD_CINERGY250PCI 83
+#define SAA7134_BOARD_FLYDVB_TRIO 84
#define SAA7134_MAXBOARDS 8
#define SAA7134_INPUT_MAX 8
@@ -546,7 +547,6 @@ struct saa7134_dev {
extern struct list_head saa7134_devlist;
-void saa7134_print_ioctl(char *name, unsigned int cmd);
void saa7134_track_gpio(struct saa7134_dev *dev, char *msg);
#define SAA7134_PGTABLE_SIZE 4096
diff --git a/drivers/media/video/saa7146.h b/drivers/media/video/saa7146.h
index f305ec802ea..756963f01bb 100644
--- a/drivers/media/video/saa7146.h
+++ b/drivers/media/video/saa7146.h
@@ -73,7 +73,6 @@ struct saa7146
unsigned int nr;
unsigned long irq; /* IRQ used by SAA7146 card */
unsigned short id;
- struct pci_dev *dev;
unsigned char revision;
unsigned char boardcfg[64]; /* 64 bytes of config from eeprom */
unsigned long saa7146_adr; /* bus address of IO mem from PCI BIOS */
diff --git a/drivers/media/video/saa7185.c b/drivers/media/video/saa7185.c
index 132aa7943c1..3ed0edb870a 100644
--- a/drivers/media/video/saa7185.c
+++ b/drivers/media/video/saa7185.c
@@ -408,22 +408,19 @@ saa7185_detect_client (struct i2c_adapter *adapter,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_saa7185;
- client->flags = I2C_CLIENT_ALLOW_USE;
strlcpy(I2C_NAME(client), "saa7185", sizeof(I2C_NAME(client)));
- encoder = kmalloc(sizeof(struct saa7185), GFP_KERNEL);
+ encoder = kzalloc(sizeof(struct saa7185), GFP_KERNEL);
if (encoder == NULL) {
kfree(client);
return -ENOMEM;
}
- memset(encoder, 0, sizeof(struct saa7185));
encoder->norm = VIDEO_MODE_NTSC;
encoder->enable = 1;
i2c_set_clientdata(client, encoder);
@@ -487,11 +484,11 @@ saa7185_detach_client (struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver i2c_driver_saa7185 = {
- .owner = THIS_MODULE,
- .name = "saa7185", /* name */
+ .driver = {
+ .name = "saa7185", /* name */
+ },
.id = I2C_DRIVERID_SAA7185B,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = saa7185_attach_adapter,
.detach_client = saa7185_detach_client,
diff --git a/drivers/media/video/saa7191.c b/drivers/media/video/saa7191.c
index cbca896e8cf..746cadb8f1c 100644
--- a/drivers/media/video/saa7191.c
+++ b/drivers/media/video/saa7191.c
@@ -571,18 +571,15 @@ static int saa7191_attach(struct i2c_adapter *adap, int addr, int kind)
printk(KERN_INFO "Philips SAA7191 driver version %s\n",
SAA7191_MODULE_VERSION);
- client = kmalloc(sizeof(*client), GFP_KERNEL);
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return -ENOMEM;
- decoder = kmalloc(sizeof(*decoder), GFP_KERNEL);
+ decoder = kzalloc(sizeof(*decoder), GFP_KERNEL);
if (!decoder) {
err = -ENOMEM;
goto out_free_client;
}
- memset(client, 0, sizeof(struct i2c_client));
- memset(decoder, 0, sizeof(struct saa7191));
-
client->addr = addr;
client->adapter = adap;
client->driver = &i2c_driver_saa7191;
@@ -788,10 +785,10 @@ static int saa7191_command(struct i2c_client *client, unsigned int cmd,
}
static struct i2c_driver i2c_driver_saa7191 = {
- .owner = THIS_MODULE,
- .name = "saa7191",
+ .driver = {
+ .name = "saa7191",
+ },
.id = I2C_DRIVERID_SAA7191,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = saa7191_probe,
.detach_client = saa7191_detach,
.command = saa7191_command
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c
index d4497dbae05..54fc33011ff 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/media/video/stradis.c
@@ -49,9 +49,9 @@
#include "saa7121.h"
#include "cs8420.h"
-#define DEBUG(x) /* debug driver */
-#undef IDEBUG /* debug irq handler */
-#undef MDEBUG /* debug memory management */
+#define DEBUG(x) /* debug driver */
+#undef IDEBUG /* debug irq handler */
+#undef MDEBUG /* debug memory management */
#define SAA7146_MAX 6
@@ -63,7 +63,6 @@ static int video_nr = -1;
module_param(video_nr, int, 0);
MODULE_LICENSE("GPL");
-
#define nDebNormal 0x00480000
#define nDebNoInc 0x00480000
#define nDebVideo 0xd0480000
@@ -99,7 +98,12 @@ MODULE_LICENSE("GPL");
#ifdef USE_RESCUE_EEPROM_SDM275
static unsigned char rescue_eeprom[64] = {
-0x00,0x01,0x04,0x13,0x26,0x0f,0x10,0x00,0x00,0x00,0x43,0x63,0x22,0x01,0x29,0x15,0x73,0x00,0x1f, 'd', 'e', 'c', 'x', 'l', 'd', 'v', 'a',0x02,0x00,0x01,0x00,0xcc,0xa4,0x63,0x09,0xe2,0x10,0x00,0x0a,0x00,0x02,0x02, 'd', 'e', 'c', 'x', 'l', 'a',0x00,0x00,0x42,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00, 0x01, 0x04, 0x13, 0x26, 0x0f, 0x10, 0x00, 0x00, 0x00, 0x43, 0x63,
+ 0x22, 0x01, 0x29, 0x15, 0x73, 0x00, 0x1f, 'd', 'e', 'c', 'x', 'l',
+ 'd', 'v', 'a', 0x02, 0x00, 0x01, 0x00, 0xcc, 0xa4, 0x63, 0x09, 0xe2,
+ 0x10, 0x00, 0x0a, 0x00, 0x02, 0x02, 'd', 'e', 'c', 'x', 'l', 'a',
+ 0x00, 0x00, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
};
#endif
@@ -140,17 +144,18 @@ static int I2CRead(struct saa7146 *saa, unsigned char addr,
if (saaread(SAA7146_I2C_STATUS) & 0x3c)
I2CWipe(saa);
- for (i = 0; i < 1000 &&
- (saaread(SAA7146_I2C_STATUS) & SAA7146_I2C_BUSY); i++)
+ for (i = 0;
+ i < 1000 && (saaread(SAA7146_I2C_STATUS) & SAA7146_I2C_BUSY);
+ i++)
schedule();
if (i == 1000)
I2CWipe(saa);
if (dosub)
saawrite(((addr & 0xfe) << 24) | (((addr | 1) & 0xff) << 8) |
- ((subaddr & 0xff) << 16) | 0xed, SAA7146_I2C_TRANSFER);
+ ((subaddr & 0xff) << 16) | 0xed, SAA7146_I2C_TRANSFER);
else
saawrite(((addr & 0xfe) << 24) | (((addr | 1) & 0xff) << 16) |
- 0xf1, SAA7146_I2C_TRANSFER);
+ 0xf1, SAA7146_I2C_TRANSFER);
saawrite((SAA7146_MC2_UPLD_I2C << 16) |
SAA7146_MC2_UPLD_I2C, SAA7146_MC2);
/* wait for i2c registers to be programmed */
@@ -163,7 +168,7 @@ static int I2CRead(struct saa7146 *saa, unsigned char addr,
schedule();
if (saaread(SAA7146_I2C_STATUS) & SAA7146_I2C_ERR)
return -1;
- if (i == 1000)
+ if (i == 1000)
printk("i2c setup read timeout\n");
saawrite(0x41, SAA7146_I2C_TRANSFER);
saawrite((SAA7146_MC2_UPLD_I2C << 16) |
@@ -178,7 +183,7 @@ static int I2CRead(struct saa7146 *saa, unsigned char addr,
schedule();
if (saaread(SAA7146_I2C_TRANSFER) & SAA7146_I2C_ERR)
return -1;
- if (i == 1000)
+ if (i == 1000)
printk("i2c read timeout\n");
return ((saaread(SAA7146_I2C_TRANSFER) >> 24) & 0xff);
}
@@ -213,20 +218,22 @@ static void attach_inform(struct saa7146 *saa, int id)
{
int i;
- DEBUG(printk(KERN_DEBUG "stradis%d: i2c: device found=%02x\n", saa->nr, id));
- if (id == 0xa0) { /* we have rev2 or later board, fill in info */
+ DEBUG(printk(KERN_DEBUG "stradis%d: i2c: device found=%02x\n", saa->nr,
+ id));
+ if (id == 0xa0) { /* we have rev2 or later board, fill in info */
for (i = 0; i < 64; i++)
saa->boardcfg[i] = I2CRead(saa, 0xa0, i, 1);
#ifdef USE_RESCUE_EEPROM_SDM275
if (saa->boardcfg[0] != 0) {
- printk("stradis%d: WARNING: EEPROM STORED VALUES HAVE BEEN IGNORED\n", saa->nr);
+ printk("stradis%d: WARNING: EEPROM STORED VALUES HAVE "
+ "BEEN IGNORED\n", saa->nr);
for (i = 0; i < 64; i++)
saa->boardcfg[i] = rescue_eeprom[i];
}
#endif
printk("stradis%d: config =", saa->nr);
for (i = 0; i < 51; i++) {
- printk(" %02x",saa->boardcfg[i]);
+ printk(" %02x", saa->boardcfg[i]);
}
printk("\n");
}
@@ -254,17 +261,19 @@ static int wait_for_debi_done(struct saa7146 *saa)
for (i = 0; i < 500000 &&
(saaread(SAA7146_PSR) & SAA7146_PSR_DEBI_S); i++)
saaread(SAA7146_MC2);
+
if (i > debiwait_maxwait)
printk("wait-for-debi-done maxwait: %d\n",
debiwait_maxwait = i);
-
+
if (i == 500000)
return -1;
+
return 0;
}
static int debiwrite(struct saa7146 *saa, u32 config, int addr,
- u32 val, int count)
+ u32 val, int count)
{
u32 cmd;
if (count <= 0 || count > 32764)
@@ -309,41 +318,6 @@ static u32 debiread(struct saa7146 *saa, u32 config, int addr, int count)
return result;
}
-#if 0 /* unused */
-/* MUST be a multiple of 8 bytes and 8-byte aligned and < 32768 bytes */
-/* data copied into saa->dmadebi buffer, caller must re-enable interrupts */
-static void ibm_block_dram_read(struct saa7146 *saa, int address, int bytes)
-{
- int i, j;
- u32 *buf;
- buf = (u32 *) saa->dmadebi;
- if (bytes > 0x7000)
- bytes = 0x7000;
- saawrite(0, SAA7146_IER); /* disable interrupts */
- for (i=0; i < 10000 &&
- (debiread(saa, debNormal, IBM_MP2_DRAM_CMD_STAT, 2)
- & 0x8000); i++)
- saaread(SAA7146_MC2);
- if (i == 10000)
- printk(KERN_ERR "stradis%d: dram_busy never cleared\n",
- saa->nr);
- debiwrite(saa, debNormal, IBM_MP2_SRC_ADDR, (address<<16) |
- (address>>16), 4);
- debiwrite(saa, debNormal, IBM_MP2_BLOCK_SIZE, bytes, 2);
- debiwrite(saa, debNormal, IBM_MP2_CMD_STAT, 0x8a10, 2);
- for (j = 0; j < bytes/4; j++) {
- for (i = 0; i < 10000 &&
- (!(debiread(saa, debNormal, IBM_MP2_DRAM_CMD_STAT, 2)
- & 0x4000)); i++)
- saaread(SAA7146_MC2);
- if (i == 10000)
- printk(KERN_ERR "stradis%d: dram_ready never set\n",
- saa->nr);
- buf[j] = debiread(saa, debNormal, IBM_MP2_DRAM_DATA, 4);
- }
-}
-#endif /* unused */
-
static void do_irq_send_data(struct saa7146 *saa)
{
int split, audbytes, vidbytes;
@@ -365,16 +339,15 @@ static void do_irq_send_data(struct saa7146 *saa)
return;
}
/* if at least 1 block audio waiting and audio fifo isn't full */
- if (audbytes >= 2048 && (debiread(saa, debNormal,
- IBM_MP2_AUD_FIFO, 2) & 0xff) < 60) {
+ if (audbytes >= 2048 && (debiread(saa, debNormal, IBM_MP2_AUD_FIFO, 2)
+ & 0xff) < 60) {
if (saa->audhead > saa->audtail)
split = 65536 - saa->audhead;
else
split = 0;
audbytes = 2048;
if (split > 0 && split < 2048) {
- memcpy(saa->dmadebi, saa->audbuf + saa->audhead,
- split);
+ memcpy(saa->dmadebi, saa->audbuf + saa->audhead, split);
saa->audhead = 0;
audbytes -= split;
} else
@@ -383,20 +356,19 @@ static void do_irq_send_data(struct saa7146 *saa)
audbytes);
saa->audhead += audbytes;
saa->audhead &= 0xffff;
- debiwrite(saa, debAudio, (NewCard? IBM_MP2_AUD_FIFO :
- IBM_MP2_AUD_FIFOW), 0, 2048);
+ debiwrite(saa, debAudio, (NewCard ? IBM_MP2_AUD_FIFO :
+ IBM_MP2_AUD_FIFOW), 0, 2048);
wake_up_interruptible(&saa->audq);
- /* if at least 1 block video waiting and video fifo isn't full */
+ /* if at least 1 block video waiting and video fifo isn't full */
} else if (vidbytes >= 30720 && (debiread(saa, debNormal,
- IBM_MP2_FIFO, 2)) < 16384) {
+ IBM_MP2_FIFO, 2)) < 16384) {
if (saa->vidhead > saa->vidtail)
split = 524288 - saa->vidhead;
else
split = 0;
vidbytes = 30720;
if (split > 0 && split < 30720) {
- memcpy(saa->dmadebi, saa->vidbuf + saa->vidhead,
- split);
+ memcpy(saa->dmadebi, saa->vidbuf + saa->vidhead, split);
saa->vidhead = 0;
vidbytes -= split;
} else
@@ -406,7 +378,7 @@ static void do_irq_send_data(struct saa7146 *saa)
saa->vidhead += vidbytes;
saa->vidhead &= 0x7ffff;
debiwrite(saa, debVideo, (NewCard ? IBM_MP2_FIFO :
- IBM_MP2_FIFOW), 0, 30720);
+ IBM_MP2_FIFOW), 0, 30720);
wake_up_interruptible(&saa->vidq);
}
saawrite(SAA7146_PSR_DEBI_S | SAA7146_PSR_PIN1, SAA7146_IER);
@@ -418,10 +390,10 @@ static void send_osd_data(struct saa7146 *saa)
if (size > 30720)
size = 30720;
/* ensure some multiple of 8 bytes is transferred */
- size = 8 * ((size + 8)>>3);
+ size = 8 * ((size + 8) >> 3);
if (size) {
debiwrite(saa, debNormal, IBM_MP2_OSD_ADDR,
- (saa->osdhead>>3), 2);
+ (saa->osdhead >> 3), 2);
memcpy(saa->dmadebi, &saa->osdbuf[saa->osdhead], size);
saa->osdhead += size;
/* block transfer of next 8 bytes to ~32k bytes */
@@ -435,7 +407,7 @@ static void send_osd_data(struct saa7146 *saa)
static irqreturn_t saa7146_irq(int irq, void *dev_id, struct pt_regs *regs)
{
- struct saa7146 *saa = (struct saa7146 *) dev_id;
+ struct saa7146 *saa = dev_id;
u32 stat, astat;
int count;
int handled = 0;
@@ -484,7 +456,7 @@ static irqreturn_t saa7146_irq(int irq, void *dev_id, struct pt_regs *regs)
saa->vidinfo.v_size = 480;
#if 0
if (saa->endmarkhead != saa->endmarktail) {
- saa->audhead =
+ saa->audhead =
saa->endmark[saa->endmarkhead];
saa->endmarkhead++;
if (saa->endmarkhead >= MAX_MARKS)
@@ -494,7 +466,7 @@ static irqreturn_t saa7146_irq(int irq, void *dev_id, struct pt_regs *regs)
}
if (istat & 0x4000) { /* Sequence Error Code */
if (saa->endmarkhead != saa->endmarktail) {
- saa->audhead =
+ saa->audhead =
saa->endmark[saa->endmarkhead];
saa->endmarkhead++;
if (saa->endmarkhead >= MAX_MARKS)
@@ -613,7 +585,7 @@ static int ibm_send_command(struct saa7146 *saa,
int i;
if (chain)
- debiwrite(saa, debNormal, IBM_MP2_COMMAND, (command << 1) | 1, 2);
+ debiwrite(saa, debNormal, IBM_MP2_COMMAND, (command << 1)| 1,2);
else
debiwrite(saa, debNormal, IBM_MP2_COMMAND, command << 1, 2);
debiwrite(saa, debNormal, IBM_MP2_CMD_DATA, data, 2);
@@ -663,11 +635,9 @@ static void initialize_cs8420(struct saa7146 *saa, int pro)
else
sequence = mode8420con;
for (i = 0; i < INIT8420LEN; i++)
- I2CWrite(saa, 0x20, init8420[i * 2],
- init8420[i * 2 + 1], 2);
+ I2CWrite(saa, 0x20, init8420[i * 2], init8420[i * 2 + 1], 2);
for (i = 0; i < MODE8420LEN; i++)
- I2CWrite(saa, 0x20, sequence[i * 2],
- sequence[i * 2 + 1], 2);
+ I2CWrite(saa, 0x20, sequence[i * 2], sequence[i * 2 + 1], 2);
printk("stradis%d: CS8420 initialized\n", saa->nr);
}
@@ -683,35 +653,36 @@ static void initialize_saa7121(struct saa7146 *saa, int dopal)
/* initialize PAL/NTSC video encoder */
for (i = 0; i < INIT7121LEN; i++) {
if (NewCard) { /* handle new card encoder differences */
- if (sequence[i*2] == 0x3a)
+ if (sequence[i * 2] == 0x3a)
I2CWrite(saa, 0x88, 0x3a, 0x13, 2);
- else if (sequence[i*2] == 0x6b)
+ else if (sequence[i * 2] == 0x6b)
I2CWrite(saa, 0x88, 0x6b, 0x20, 2);
- else if (sequence[i*2] == 0x6c)
+ else if (sequence[i * 2] == 0x6c)
I2CWrite(saa, 0x88, 0x6c,
dopal ? 0x09 : 0xf5, 2);
- else if (sequence[i*2] == 0x6d)
+ else if (sequence[i * 2] == 0x6d)
I2CWrite(saa, 0x88, 0x6d,
dopal ? 0x20 : 0x00, 2);
- else if (sequence[i*2] == 0x7a)
+ else if (sequence[i * 2] == 0x7a)
I2CWrite(saa, 0x88, 0x7a,
dopal ? (PALFirstActive - 1) :
(NTSCFirstActive - 4), 2);
- else if (sequence[i*2] == 0x7b)
+ else if (sequence[i * 2] == 0x7b)
I2CWrite(saa, 0x88, 0x7b,
dopal ? PALLastActive :
NTSCLastActive, 2);
- else I2CWrite(saa, 0x88, sequence[i * 2],
- sequence[i * 2 + 1], 2);
+ else
+ I2CWrite(saa, 0x88, sequence[i * 2],
+ sequence[i * 2 + 1], 2);
} else {
- if (sequence[i*2] == 0x6b && mod)
- I2CWrite(saa, 0x88, 0x6b,
- (sequence[i * 2 + 1] ^ 0x09), 2);
- else if (sequence[i*2] == 0x7a)
+ if (sequence[i * 2] == 0x6b && mod)
+ I2CWrite(saa, 0x88, 0x6b,
+ (sequence[i * 2 + 1] ^ 0x09), 2);
+ else if (sequence[i * 2] == 0x7a)
I2CWrite(saa, 0x88, 0x7a,
dopal ? (PALFirstActive - 1) :
(NTSCFirstActive - 4), 2);
- else if (sequence[i*2] == 0x7b)
+ else if (sequence[i * 2] == 0x7b)
I2CWrite(saa, 0x88, 0x7b,
dopal ? PALLastActive :
NTSCLastActive, 2);
@@ -735,7 +706,8 @@ static void set_genlock_offset(struct saa7146 *saa, int noffset)
nCode = noffset + 0x100;
if (nCode == 1)
nCode = 0x401;
- else if (nCode < 1) nCode = 0x400 + PixelsPerLine + nCode;
+ else if (nCode < 1)
+ nCode = 0x400 + PixelsPerLine + nCode;
debiwrite(saa, debNormal, XILINX_GLDELAY, nCode, 2);
}
@@ -745,33 +717,31 @@ static void set_out_format(struct saa7146 *saa, int mode)
saa->boardcfg[2] = mode;
/* do not adjust analog video parameters here, use saa7121 init */
/* you will affect the SDI output on the new card */
- if (mode == VIDEO_MODE_PAL) { /* PAL */
+ if (mode == VIDEO_MODE_PAL) { /* PAL */
debiwrite(saa, debNormal, XILINX_CTL0, 0x0808, 2);
mdelay(50);
saawrite(0x012002c0, SAA7146_NUM_LINE_BYTE1);
if (NewCard) {
- debiwrite(saa, debNormal, IBM_MP2_DISP_MODE,
- 0xe100, 2);
+ debiwrite(saa, debNormal, IBM_MP2_DISP_MODE, 0xe100, 2);
mdelay(50);
}
debiwrite(saa, debNormal, IBM_MP2_DISP_MODE,
- NewCard ? 0xe500: 0x6500, 2);
+ NewCard ? 0xe500 : 0x6500, 2);
debiwrite(saa, debNormal, IBM_MP2_DISP_DLY,
(1 << 8) |
- (NewCard ? PALFirstActive : PALFirstActive-6), 2);
+ (NewCard ? PALFirstActive : PALFirstActive - 6), 2);
} else { /* NTSC */
debiwrite(saa, debNormal, XILINX_CTL0, 0x0800, 2);
mdelay(50);
saawrite(0x00f002c0, SAA7146_NUM_LINE_BYTE1);
debiwrite(saa, debNormal, IBM_MP2_DISP_MODE,
- NewCard ? 0xe100: 0x6100, 2);
+ NewCard ? 0xe100 : 0x6100, 2);
debiwrite(saa, debNormal, IBM_MP2_DISP_DLY,
(1 << 8) |
- (NewCard ? NTSCFirstActive : NTSCFirstActive-6), 2);
+ (NewCard ? NTSCFirstActive : NTSCFirstActive - 6), 2);
}
}
-
/* Intialize bitmangler to map from a byte value to the mangled word that
* must be output to program the Xilinx part through the DEBI port.
* Xilinx Data Bit->DEBI Bit: 0->15 1->7 2->6 3->12 4->11 5->2 6->1 7->0
@@ -799,43 +769,41 @@ static int initialize_fpga(struct video_code *bitdata)
for (num = 0; num < saa_num; num++) {
saa = &saa7146s[num];
if (saa->boardcfg[0] > 20)
- continue; /* card was programmed */
+ continue; /* card was programmed */
loadtwo = (saa->boardcfg[18] & 0x10);
if (!NewCard) /* we have an old board */
for (i = 0; i < 256; i++)
- bitmangler[i] = ((i & 0x01) << 15) |
- ((i & 0x02) << 6) | ((i & 0x04) << 4) |
- ((i & 0x08) << 9) | ((i & 0x10) << 7) |
- ((i & 0x20) >> 3) | ((i & 0x40) >> 5) |
- ((i & 0x80) >> 7);
- else /* else we have a new board */
+ bitmangler[i] = ((i & 0x01) << 15) |
+ ((i & 0x02) << 6) | ((i & 0x04) << 4) |
+ ((i & 0x08) << 9) | ((i & 0x10) << 7) |
+ ((i & 0x20) >> 3) | ((i & 0x40) >> 5) |
+ ((i & 0x80) >> 7);
+ else /* else we have a new board */
for (i = 0; i < 256; i++)
- bitmangler[i] = ((i & 0x01) << 7) |
- ((i & 0x02) << 5) | ((i & 0x04) << 3) |
- ((i & 0x08) << 1) | ((i & 0x10) >> 1) |
- ((i & 0x20) >> 3) | ((i & 0x40) >> 5) |
- ((i & 0x80) >> 7);
+ bitmangler[i] = ((i & 0x01) << 7) |
+ ((i & 0x02) << 5) | ((i & 0x04) << 3) |
+ ((i & 0x08) << 1) | ((i & 0x10) >> 1) |
+ ((i & 0x20) >> 3) | ((i & 0x40) >> 5) |
+ ((i & 0x80) >> 7);
dmabuf = (u16 *) saa->dmadebi;
newdma = (u8 *) saa->dmadebi;
if (NewCard) { /* SDM2xxx */
if (!strncmp(bitdata->loadwhat, "decoder2", 8))
continue; /* fpga not for this card */
- if (!strncmp(&saa->boardcfg[42],
- bitdata->loadwhat, 8)) {
+ if (!strncmp(&saa->boardcfg[42], bitdata->loadwhat, 8))
loadfile = 1;
- } else if (loadtwo && !strncmp(&saa->boardcfg[19],
- bitdata->loadwhat, 8)) {
+ else if (loadtwo && !strncmp(&saa->boardcfg[19],
+ bitdata->loadwhat, 8))
loadfile = 2;
- } else if (!saa->boardcfg[42] && /* special */
- !strncmp("decxl", bitdata->loadwhat, 8)) {
- loadfile = 1;
- } else
+ else if (!saa->boardcfg[42] && !strncmp("decxl",
+ bitdata->loadwhat, 8))
+ loadfile = 1; /* special */
+ else
continue; /* fpga not for this card */
- if (loadfile != 1 && loadfile != 2) {
+ if (loadfile != 1 && loadfile != 2)
continue; /* skip to next card */
- }
- if (saa->boardcfg[0] && loadfile == 1 )
+ if (saa->boardcfg[0] && loadfile == 1)
continue; /* skip to next card */
if (saa->boardcfg[0] != 1 && loadfile == 2)
continue; /* skip to next card */
@@ -870,8 +838,9 @@ static int initialize_fpga(struct video_code *bitdata)
/* Release Xilinx INIT signal (WS2) */
saawrite(0x00000000, SAA7146_GPIO_CTRL);
/* Wait for the INIT to go High */
- for (i = 0; i < 10000 &&
- !(saaread(SAA7146_PSR) & SAA7146_PSR_PIN2); i++)
+ for (i = 0;
+ i < 10000 && !(saaread(SAA7146_PSR) & SAA7146_PSR_PIN2);
+ i++)
schedule();
if (i == 1000) {
printk(KERN_INFO "stradis%d: no fpga INIT\n", saa->nr);
@@ -881,17 +850,13 @@ send_fpga_stuff:
if (NewCard) {
for (i = startindex; i < bitdata->datasize; i++)
newdma[i - startindex] =
- bitmangler[bitdata->data[i]];
+ bitmangler[bitdata->data[i]];
debiwrite(saa, 0x01420000, 0, 0,
((bitdata->datasize - startindex) + 5));
- if (loadtwo) {
- if (loadfile == 1) {
- printk("stradis%d: "
- "awaiting 2nd FPGA bitfile\n",
- saa->nr);
- continue; /* skip to next card */
- }
-
+ if (loadtwo && loadfile == 1) {
+ printk("stradis%d: awaiting 2nd FPGA bitfile\n",
+ saa->nr);
+ continue; /* skip to next card */
}
} else {
for (i = startindex; i < bitdata->datasize; i++)
@@ -900,8 +865,9 @@ send_fpga_stuff:
debiwrite(saa, 0x014a0000, 0, 0,
((bitdata->datasize - startindex) + 5) * 2);
}
- for (i = 0; i < 1000 &&
- !(saaread(SAA7146_PSR) & SAA7146_PSR_PIN2); i++)
+ for (i = 0;
+ i < 1000 && !(saaread(SAA7146_PSR) & SAA7146_PSR_PIN2);
+ i++)
schedule();
if (i == 1000) {
printk(KERN_INFO "stradis%d: FPGA load failed\n",
@@ -925,14 +891,14 @@ send_fpga_stuff:
/* mute CS3310 */
if (HaveCS3310)
debiwrite(saa, debNormal, XILINX_CS3310_CMPLT,
- 0, 2);
+ 0, 2);
/* set VXCO to PWM mode, release reset, blank on */
debiwrite(saa, debNormal, XILINX_CTL0, 0xffc4, 2);
mdelay(10);
/* unmute CS3310 */
if (HaveCS3310)
debiwrite(saa, debNormal, XILINX_CTL0,
- 0x2020, 2);
+ 0x2020, 2);
}
/* set source Black */
debiwrite(saa, debNormal, XILINX_CTL0, 0x1707, 2);
@@ -958,10 +924,10 @@ send_fpga_stuff:
/* we must init CS8420 first since rev b pulls i2s */
/* master clock low and CS4341 needs i2s master to */
/* run the i2c port. */
- if (HaveCS8420) {
+ if (HaveCS8420)
/* 0=consumer, 1=pro */
initialize_cs8420(saa, 0);
- }
+
mdelay(5);
if (HaveCS4341)
initialize_cs4341(saa);
@@ -981,6 +947,7 @@ send_fpga_stuff:
debiwrite(saa, debNormal, XILINX_CTL0, 0x8080, 2);
#endif
}
+
return failure;
}
@@ -1021,10 +988,10 @@ static int do_ibm_reset(struct saa7146 *saa)
/* we must init CS8420 first since rev b pulls i2s */
/* master clock low and CS4341 needs i2s master to */
/* run the i2c port. */
- if (HaveCS8420) {
+ if (HaveCS8420)
/* 0=consumer, 1=pro */
initialize_cs8420(saa, 1);
- }
+
mdelay(5);
if (HaveCS4341)
initialize_cs4341(saa);
@@ -1039,12 +1006,12 @@ static int do_ibm_reset(struct saa7146 *saa)
debiwrite(saa, debNormal, IBM_MP2_OSD_SIZE, 0x2000, 2);
debiwrite(saa, debNormal, IBM_MP2_AUD_CTL, 0x4552, 2);
if (ibm_send_command(saa, IBM_MP2_CONFIG_DECODER,
- (ChipControl == 0x43 ? 0xe800 : 0xe000), 1)) {
+ (ChipControl == 0x43 ? 0xe800 : 0xe000), 1)) {
printk(KERN_ERR "stradis%d: IBM config failed\n", saa->nr);
}
if (HaveCS3310) {
int i = CS3310MaxLvl;
- debiwrite(saa, debNormal, XILINX_CS3310_CMPLT, ((i<<8)|i), 2);
+ debiwrite(saa, debNormal, XILINX_CS3310_CMPLT, ((i << 8)| i),2);
}
/* start video decoder */
debiwrite(saa, debNormal, IBM_MP2_CHIP_CONTROL, ChipControl, 2);
@@ -1057,6 +1024,7 @@ static int do_ibm_reset(struct saa7146 *saa)
/* clear pending interrupts */
debiread(saa, debNormal, IBM_MP2_HOST_INT, 2);
debiwrite(saa, debNormal, XILINX_CTL0, 0x1711, 2);
+
return 0;
}
@@ -1070,8 +1038,8 @@ static int initialize_ibmmpeg2(struct video_code *microcode)
saa = &saa7146s[num];
/* check that FPGA is loaded */
debiwrite(saa, debNormal, IBM_MP2_OSD_SIZE, 0xa55a, 2);
- if ((i = debiread(saa, debNormal, IBM_MP2_OSD_SIZE, 2)) !=
- 0xa55a) {
+ i = debiread(saa, debNormal, IBM_MP2_OSD_SIZE, 2);
+ if (i != 0xa55a) {
printk(KERN_INFO "stradis%d: %04x != 0xa55a\n",
saa->nr, i);
#if 0
@@ -1082,17 +1050,17 @@ static int initialize_ibmmpeg2(struct video_code *microcode)
if (saa->boardcfg[0] > 27)
continue; /* skip to next card */
/* load video control store */
- saa->boardcfg[1] = 0x13; /* no-sync default */
+ saa->boardcfg[1] = 0x13; /* no-sync default */
debiwrite(saa, debNormal, IBM_MP2_WR_PROT, 1, 2);
debiwrite(saa, debNormal, IBM_MP2_PROC_IADDR, 0, 2);
for (i = 0; i < microcode->datasize / 2; i++)
debiwrite(saa, debNormal, IBM_MP2_PROC_IDATA,
(microcode->data[i * 2] << 8) |
- microcode->data[i * 2 + 1], 2);
+ microcode->data[i * 2 + 1], 2);
debiwrite(saa, debNormal, IBM_MP2_PROC_IADDR, 0, 2);
debiwrite(saa, debNormal, IBM_MP2_WR_PROT, 0, 2);
debiwrite(saa, debNormal, IBM_MP2_CHIP_CONTROL,
- ChipControl, 2);
+ ChipControl, 2);
saa->boardcfg[0] = 28;
}
if (!strncmp(microcode->loadwhat, "decoder.aud", 11)) {
@@ -1109,34 +1077,32 @@ static int initialize_ibmmpeg2(struct video_code *microcode)
debiwrite(saa, debNormal, IBM_MP2_OSD_SIZE, 0x2000, 2);
debiwrite(saa, debNormal, IBM_MP2_AUD_CTL, 0x4552, 2);
if (ibm_send_command(saa, IBM_MP2_CONFIG_DECODER,
- 0xe000, 1)) {
- printk(KERN_ERR
- "stradis%d: IBM config failed\n",
- saa->nr);
+ 0xe000, 1)) {
+ printk(KERN_ERR "stradis%d: IBM config "
+ "failed\n", saa->nr);
return -1;
}
/* set PWM to center value */
if (NewCard) {
debiwrite(saa, debNormal, XILINX_PWM,
- saa->boardcfg[14] +
- (saa->boardcfg[13]<<8), 2);
+ saa->boardcfg[14] +
+ (saa->boardcfg[13] << 8), 2);
} else
- debiwrite(saa, debNormal, XILINX_PWM,
- 0x46, 2);
+ debiwrite(saa, debNormal, XILINX_PWM, 0x46, 2);
+
if (HaveCS3310) {
i = CS3310MaxLvl;
- debiwrite(saa, debNormal,
- XILINX_CS3310_CMPLT, ((i<<8)|i), 2);
+ debiwrite(saa, debNormal, XILINX_CS3310_CMPLT,
+ (i << 8) | i, 2);
}
- printk(KERN_INFO
- "stradis%d: IBM MPEGCD%d Initialized\n",
- saa->nr, 18 + (debiread(saa, debNormal,
- IBM_MP2_CHIP_CONTROL, 2) >> 12));
+ printk(KERN_INFO "stradis%d: IBM MPEGCD%d Inited\n",
+ saa->nr, 18 + (debiread(saa, debNormal,
+ IBM_MP2_CHIP_CONTROL, 2) >> 12));
/* start video decoder */
debiwrite(saa, debNormal, IBM_MP2_CHIP_CONTROL,
ChipControl, 2);
- debiwrite(saa, debNormal, IBM_MP2_RB_THRESHOLD,
- 0x4037, 2); /* 256k vid, 3520 bytes aud */
+ debiwrite(saa, debNormal, IBM_MP2_RB_THRESHOLD, 0x4037,
+ 2); /* 256k vid, 3520 bytes aud */
debiwrite(saa, debNormal, IBM_MP2_AUD_CTL, 0x4573, 2);
ibm_send_command(saa, IBM_MP2_PLAY, 0, 0);
/* enable buffer threshold irq */
@@ -1149,52 +1115,48 @@ static int initialize_ibmmpeg2(struct video_code *microcode)
saa->boardcfg[0] = 37;
}
}
+
return 0;
}
-static u32 palette2fmt[] =
-{ /* some of these YUV translations are wrong */
- 0xffffffff, 0x86000000, 0x87000000, 0x80000000, 0x8100000, 0x82000000,
- 0x83000000, 0x00000000, 0x03000000, 0x03000000, 0x0a00000, 0x03000000,
- 0x06000000, 0x00000000, 0x03000000, 0x0a000000, 0x0300000
+static u32 palette2fmt[] = { /* some of these YUV translations are wrong */
+ 0xffffffff, 0x86000000, 0x87000000, 0x80000000, 0x8100000, 0x82000000,
+ 0x83000000, 0x00000000, 0x03000000, 0x03000000, 0x0a00000, 0x03000000,
+ 0x06000000, 0x00000000, 0x03000000, 0x0a000000, 0x0300000
};
-static int bpp2fmt[4] =
-{
+static int bpp2fmt[4] = {
VIDEO_PALETTE_HI240, VIDEO_PALETTE_RGB565, VIDEO_PALETTE_RGB24,
VIDEO_PALETTE_RGB32
};
/* I wish I could find a formula to calculate these... */
-static u32 h_prescale[64] =
-{
- 0x10000000, 0x18040202, 0x18080000, 0x380c0606, 0x38100204, 0x38140808,
- 0x38180000, 0x381c0000, 0x3820161c, 0x38242a3b, 0x38281230, 0x382c4460,
- 0x38301040, 0x38340080, 0x38380000, 0x383c0000, 0x3840fefe, 0x3844ee9f,
- 0x3848ee9f, 0x384cee9f, 0x3850ee9f, 0x38542a3b, 0x38581230, 0x385c0000,
- 0x38600000, 0x38640000, 0x38680000, 0x386c0000, 0x38700000, 0x38740000,
- 0x38780000, 0x387c0000, 0x30800000, 0x38840000, 0x38880000, 0x388c0000,
- 0x38900000, 0x38940000, 0x38980000, 0x389c0000, 0x38a00000, 0x38a40000,
- 0x38a80000, 0x38ac0000, 0x38b00000, 0x38b40000, 0x38b80000, 0x38bc0000,
- 0x38c00000, 0x38c40000, 0x38c80000, 0x38cc0000, 0x38d00000, 0x38d40000,
- 0x38d80000, 0x38dc0000, 0x38e00000, 0x38e40000, 0x38e80000, 0x38ec0000,
- 0x38f00000, 0x38f40000, 0x38f80000, 0x38fc0000,
+static u32 h_prescale[64] = {
+ 0x10000000, 0x18040202, 0x18080000, 0x380c0606, 0x38100204, 0x38140808,
+ 0x38180000, 0x381c0000, 0x3820161c, 0x38242a3b, 0x38281230, 0x382c4460,
+ 0x38301040, 0x38340080, 0x38380000, 0x383c0000, 0x3840fefe, 0x3844ee9f,
+ 0x3848ee9f, 0x384cee9f, 0x3850ee9f, 0x38542a3b, 0x38581230, 0x385c0000,
+ 0x38600000, 0x38640000, 0x38680000, 0x386c0000, 0x38700000, 0x38740000,
+ 0x38780000, 0x387c0000, 0x30800000, 0x38840000, 0x38880000, 0x388c0000,
+ 0x38900000, 0x38940000, 0x38980000, 0x389c0000, 0x38a00000, 0x38a40000,
+ 0x38a80000, 0x38ac0000, 0x38b00000, 0x38b40000, 0x38b80000, 0x38bc0000,
+ 0x38c00000, 0x38c40000, 0x38c80000, 0x38cc0000, 0x38d00000, 0x38d40000,
+ 0x38d80000, 0x38dc0000, 0x38e00000, 0x38e40000, 0x38e80000, 0x38ec0000,
+ 0x38f00000, 0x38f40000, 0x38f80000, 0x38fc0000,
};
-static u32 v_gain[64] =
-{
- 0x016000ff, 0x016100ff, 0x016100ff, 0x016200ff, 0x016200ff, 0x016200ff,
- 0x016200ff, 0x016300ff, 0x016300ff, 0x016300ff, 0x016300ff, 0x016300ff,
- 0x016300ff, 0x016300ff, 0x016300ff, 0x016400ff, 0x016400ff, 0x016400ff,
- 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
- 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
- 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
- 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
- 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
- 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
- 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
- 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
+static u32 v_gain[64] = {
+ 0x016000ff, 0x016100ff, 0x016100ff, 0x016200ff, 0x016200ff, 0x016200ff,
+ 0x016200ff, 0x016300ff, 0x016300ff, 0x016300ff, 0x016300ff, 0x016300ff,
+ 0x016300ff, 0x016300ff, 0x016300ff, 0x016400ff, 0x016400ff, 0x016400ff,
+ 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
+ 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
+ 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
+ 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
+ 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
+ 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
+ 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
+ 0x016400ff, 0x016400ff, 0x016400ff, 0x016400ff,
};
-
static void saa7146_set_winsize(struct saa7146 *saa)
{
u32 format;
@@ -1209,24 +1171,23 @@ static void saa7146_set_winsize(struct saa7146 *saa)
saawrite(saa->win.vidadr + saa->win.bpl * saa->win.sheight,
SAA7146_PROT_ADDR1);
saawrite(0, SAA7146_PAGE1);
- saawrite(format|0x60, SAA7146_CLIP_FORMAT_CTRL);
+ saawrite(format | 0x60, SAA7146_CLIP_FORMAT_CTRL);
offset = (704 / (saa->win.width - 1)) & 0x3f;
saawrite(h_prescale[offset], SAA7146_HPS_H_PRESCALE);
offset = (720896 / saa->win.width) / (offset + 1);
- saawrite((offset<<12)|0x0c, SAA7146_HPS_H_SCALE);
+ saawrite((offset << 12) | 0x0c, SAA7146_HPS_H_SCALE);
if (CurrentMode == VIDEO_MODE_NTSC) {
- yacl = /*(480 / saa->win.height - 1) & 0x3f*/ 0;
+ yacl = /*(480 / saa->win.height - 1) & 0x3f */ 0;
ysci = 1024 - (saa->win.height * 1024 / 480);
} else {
- yacl = /*(576 / saa->win.height - 1) & 0x3f*/ 0;
+ yacl = /*(576 / saa->win.height - 1) & 0x3f */ 0;
ysci = 1024 - (saa->win.height * 1024 / 576);
}
- saawrite((1<<31)|(ysci<<21)|(yacl<<15), SAA7146_HPS_V_SCALE);
+ saawrite((1 << 31) | (ysci << 21) | (yacl << 15), SAA7146_HPS_V_SCALE);
saawrite(v_gain[yacl], SAA7146_HPS_V_GAIN);
saawrite(((SAA7146_MC2_UPLD_DMA1 | SAA7146_MC2_UPLD_HPS_V |
- SAA7146_MC2_UPLD_HPS_H) << 16) | (SAA7146_MC2_UPLD_DMA1 |
- SAA7146_MC2_UPLD_HPS_V | SAA7146_MC2_UPLD_HPS_H),
- SAA7146_MC2);
+ SAA7146_MC2_UPLD_HPS_H) << 16) | (SAA7146_MC2_UPLD_DMA1 |
+ SAA7146_MC2_UPLD_HPS_V | SAA7146_MC2_UPLD_HPS_H), SAA7146_MC2);
}
/* clip_draw_rectangle(cm,x,y,w,h) -- handle clipping an area
@@ -1261,8 +1222,8 @@ static void clip_draw_rectangle(u32 *clipmap, int x, int y, int w, int h)
startword = (x >> 5);
endword = ((x + w) >> 5);
bitsleft = (0xffffffff >> (x & 31));
- bitsright = (0xffffffff << (~((x + w) - (endword<<5))));
- temp = &clipmap[(y<<5) + startword];
+ bitsright = (0xffffffff << (~((x + w) - (endword << 5))));
+ temp = &clipmap[(y << 5) + startword];
w = endword - startword;
if (!w) {
bitsleft |= bitsright;
@@ -1287,13 +1248,13 @@ static void make_clip_tab(struct saa7146 *saa, struct video_clip *cr, int ncr)
u32 *clipmap;
clipmap = saa->dmavid2;
- if((width=saa->win.width)>1023)
- width = 1023; /* sanity check */
- if((height=saa->win.height)>640)
- height = 639; /* sanity check */
- if (ncr > 0) { /* rectangles pased */
+ if ((width = saa->win.width) > 1023)
+ width = 1023; /* sanity check */
+ if ((height = saa->win.height) > 640)
+ height = 639; /* sanity check */
+ if (ncr > 0) { /* rectangles pased */
/* convert rectangular clips to a bitmap */
- memset(clipmap, 0, VIDEO_CLIPMAP_SIZE); /* clear map */
+ memset(clipmap, 0, VIDEO_CLIPMAP_SIZE); /* clear map */
for (i = 0; i < ncr; i++)
clip_draw_rectangle(clipmap, cr[i].x, cr[i].y,
cr[i].width, cr[i].height);
@@ -1301,14 +1262,15 @@ static void make_clip_tab(struct saa7146 *saa, struct video_clip *cr, int ncr)
/* clip against viewing window AND screen
so we do not have to rely on the user program
*/
- clip_draw_rectangle(clipmap,(saa->win.x+width>saa->win.swidth) ?
- (saa->win.swidth-saa->win.x) : width, 0, 1024, 768);
- clip_draw_rectangle(clipmap,0,(saa->win.y+height>saa->win.sheight) ?
- (saa->win.sheight-saa->win.y) : height,1024,768);
- if (saa->win.x<0)
- clip_draw_rectangle(clipmap, 0, 0, -(saa->win.x), 768);
- if (saa->win.y<0)
- clip_draw_rectangle(clipmap, 0, 0, 1024, -(saa->win.y));
+ clip_draw_rectangle(clipmap, (saa->win.x + width > saa->win.swidth) ?
+ (saa->win.swidth - saa->win.x) : width, 0, 1024, 768);
+ clip_draw_rectangle(clipmap, 0,
+ (saa->win.y + height > saa->win.sheight) ?
+ (saa->win.sheight - saa->win.y) : height, 1024, 768);
+ if (saa->win.x < 0)
+ clip_draw_rectangle(clipmap, 0, 0, -saa->win.x, 768);
+ if (saa->win.y < 0)
+ clip_draw_rectangle(clipmap, 0, 0, 1024, -saa->win.y);
}
static int saa_ioctl(struct inode *inode, struct file *file,
@@ -1322,11 +1284,9 @@ static int saa_ioctl(struct inode *inode, struct file *file,
{
struct video_capability b;
strcpy(b.name, saa->video_dev.name);
- b.type = VID_TYPE_CAPTURE |
- VID_TYPE_OVERLAY |
- VID_TYPE_CLIPPING |
- VID_TYPE_FRAMERAM |
- VID_TYPE_SCALES;
+ b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY |
+ VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM |
+ VID_TYPE_SCALES;
b.channels = 1;
b.audios = 1;
b.maxwidth = 768;
@@ -1363,17 +1323,18 @@ static int saa_ioctl(struct inode *inode, struct file *file,
if (p.palette < sizeof(palette2fmt) / sizeof(u32)) {
format = palette2fmt[p.palette];
saa->win.color_fmt = format;
- saawrite(format|0x60, SAA7146_CLIP_FORMAT_CTRL);
+ saawrite(format | 0x60,
+ SAA7146_CLIP_FORMAT_CTRL);
}
saawrite(((p.brightness & 0xff00) << 16) |
- ((p.contrast & 0xfe00) << 7) |
- ((p.colour & 0xfe00) >> 9), SAA7146_BCS_CTRL);
+ ((p.contrast & 0xfe00) << 7) |
+ ((p.colour & 0xfe00) >> 9), SAA7146_BCS_CTRL);
saa->picture = p;
/* upload changed registers */
saawrite(((SAA7146_MC2_UPLD_HPS_H |
- SAA7146_MC2_UPLD_HPS_V) << 16) |
- SAA7146_MC2_UPLD_HPS_H | SAA7146_MC2_UPLD_HPS_V,
- SAA7146_MC2);
+ SAA7146_MC2_UPLD_HPS_V) << 16) |
+ SAA7146_MC2_UPLD_HPS_H |
+ SAA7146_MC2_UPLD_HPS_V, SAA7146_MC2);
return 0;
}
case VIDIOCSWIN:
@@ -1384,11 +1345,14 @@ static int saa_ioctl(struct inode *inode, struct file *file,
if (copy_from_user(&vw, arg, sizeof(vw)))
return -EFAULT;
- if (vw.flags || vw.width < 16 || vw.height < 16) { /* stop capture */
- saawrite((SAA7146_MC1_TR_E_1 << 16), SAA7146_MC1);
+ /* stop capture */
+ if (vw.flags || vw.width < 16 || vw.height < 16) {
+ saawrite((SAA7146_MC1_TR_E_1 << 16),
+ SAA7146_MC1);
return -EINVAL;
}
- if (saa->win.bpp < 4) { /* 32-bit align start and adjust width */
+ /* 32-bit align start and adjust width */
+ if (saa->win.bpp < 4) {
int i = vw.x;
vw.x = (vw.x + 3) & ~3;
i = vw.x - i;
@@ -1417,23 +1381,24 @@ static int saa_ioctl(struct inode *inode, struct file *file,
*/
if (vw.clipcount < 0) {
if (copy_from_user(saa->dmavid2, vw.clips,
- VIDEO_CLIPMAP_SIZE))
+ VIDEO_CLIPMAP_SIZE))
return -EFAULT;
- }
- else if (vw.clipcount > 16384) {
+ } else if (vw.clipcount > 16384) {
return -EINVAL;
} else if (vw.clipcount > 0) {
- if ((vcp = vmalloc(sizeof(struct video_clip) *
- (vw.clipcount))) == NULL)
- return -ENOMEM;
+ vcp = vmalloc(sizeof(struct video_clip) *
+ vw.clipcount);
+ if (vcp == NULL)
+ return -ENOMEM;
if (copy_from_user(vcp, vw.clips,
- sizeof(struct video_clip) *
- vw.clipcount)) {
+ sizeof(struct video_clip) *
+ vw.clipcount)) {
vfree(vcp);
return -EFAULT;
}
} else /* nothing clipped */
memset(saa->dmavid2, 0, VIDEO_CLIPMAP_SIZE);
+
make_clip_tab(saa, vcp, vw.clipcount);
if (vw.clipcount > 0)
vfree(vcp);
@@ -1466,21 +1431,21 @@ static int saa_ioctl(struct inode *inode, struct file *file,
if (v == 0) {
saa->cap &= ~1;
saawrite((SAA7146_MC1_TR_E_1 << 16),
- SAA7146_MC1);
+ SAA7146_MC1);
} else {
if (saa->win.vidadr == 0 || saa->win.width == 0
- || saa->win.height == 0)
+ || saa->win.height == 0)
return -EINVAL;
saa->cap |= 1;
saawrite((SAA7146_MC1_TR_E_1 << 16) | 0xffff,
- SAA7146_MC1);
+ SAA7146_MC1);
}
return 0;
}
case VIDIOCGFBUF:
{
struct video_buffer v;
- v.base = (void *) saa->win.vidadr;
+ v.base = (void *)saa->win.vidadr;
v.height = saa->win.sheight;
v.width = saa->win.swidth;
v.depth = saa->win.depth;
@@ -1498,19 +1463,20 @@ static int saa_ioctl(struct inode *inode, struct file *file,
if (copy_from_user(&v, arg, sizeof(v)))
return -EFAULT;
if (v.depth != 8 && v.depth != 15 && v.depth != 16 &&
- v.depth != 24 && v.depth != 32 && v.width > 16 &&
+ v.depth != 24 && v.depth != 32 && v.width > 16 &&
v.height > 16 && v.bytesperline > 16)
return -EINVAL;
if (v.base)
- saa->win.vidadr = (unsigned long) v.base;
+ saa->win.vidadr = (unsigned long)v.base;
saa->win.sheight = v.height;
saa->win.swidth = v.width;
saa->win.bpp = ((v.depth + 7) & 0x38) / 8;
saa->win.depth = v.depth;
saa->win.bpl = v.bytesperline;
- DEBUG(printk("Display at %p is %d by %d, bytedepth %d, bpl %d\n",
- v.base, v.width, v.height, saa->win.bpp, saa->win.bpl));
+ DEBUG(printk("Display at %p is %d by %d, bytedepth %d, "
+ "bpl %d\n", v.base, v.width, v.height,
+ saa->win.bpp, saa->win.bpl));
saa7146_set_winsize(saa);
return 0;
}
@@ -1538,21 +1504,18 @@ static int saa_ioctl(struct inode *inode, struct file *file,
int i;
if (copy_from_user(&v, arg, sizeof(v)))
return -EFAULT;
- i = (~(v.volume>>8))&0xff;
+ i = (~(v.volume >> 8)) & 0xff;
if (!HaveCS4341) {
- if (v.flags & VIDEO_AUDIO_MUTE) {
+ if (v.flags & VIDEO_AUDIO_MUTE)
debiwrite(saa, debNormal,
- IBM_MP2_FRNT_ATTEN,
- 0xffff, 2);
- }
+ IBM_MP2_FRNT_ATTEN, 0xffff, 2);
if (!(v.flags & VIDEO_AUDIO_MUTE))
debiwrite(saa, debNormal,
- IBM_MP2_FRNT_ATTEN,
- 0x0000, 2);
+ IBM_MP2_FRNT_ATTEN, 0x0000, 2);
if (v.flags & VIDEO_AUDIO_VOLUME)
debiwrite(saa, debNormal,
IBM_MP2_FRNT_ATTEN,
- (i<<8)|i, 2);
+ (i << 8) | i, 2);
} else {
if (v.flags & VIDEO_AUDIO_MUTE)
cs4341_setlevel(saa, 0xff, 0xff);
@@ -1580,163 +1543,138 @@ static int saa_ioctl(struct inode *inode, struct file *file,
case VIDIOCSPLAYMODE:
{
struct video_play_mode pmode;
- if (copy_from_user((void *) &pmode, arg,
- sizeof(struct video_play_mode)))
+ if (copy_from_user((void *)&pmode, arg,
+ sizeof(struct video_play_mode)))
return -EFAULT;
switch (pmode.mode) {
- case VID_PLAY_VID_OUT_MODE:
- if (pmode.p1 != VIDEO_MODE_NTSC &&
+ case VID_PLAY_VID_OUT_MODE:
+ if (pmode.p1 != VIDEO_MODE_NTSC &&
pmode.p1 != VIDEO_MODE_PAL)
- return -EINVAL;
- set_out_format(saa, pmode.p1);
- return 0;
- case VID_PLAY_GENLOCK:
- debiwrite(saa, debNormal,
- XILINX_CTL0,
- (pmode.p1 ? 0x8000 : 0x8080),
- 2);
- if (NewCard)
- set_genlock_offset(saa,
- pmode.p2);
- return 0;
- case VID_PLAY_NORMAL:
- debiwrite(saa, debNormal,
- IBM_MP2_CHIP_CONTROL,
- ChipControl, 2);
- ibm_send_command(saa,
- IBM_MP2_PLAY, 0, 0);
- saa->playmode = pmode.mode;
- return 0;
- case VID_PLAY_PAUSE:
- /* IBM removed the PAUSE command */
- /* they say use SINGLE_FRAME now */
- case VID_PLAY_SINGLE_FRAME:
- ibm_send_command(saa,
- IBM_MP2_SINGLE_FRAME,
- 0, 0);
- if (saa->playmode == pmode.mode) {
- debiwrite(saa, debNormal,
- IBM_MP2_CHIP_CONTROL,
- ChipControl, 2);
- }
- saa->playmode = pmode.mode;
- return 0;
- case VID_PLAY_FAST_FORWARD:
- ibm_send_command(saa,
- IBM_MP2_FAST_FORWARD, 0, 0);
- saa->playmode = pmode.mode;
- return 0;
- case VID_PLAY_SLOW_MOTION:
- ibm_send_command(saa,
- IBM_MP2_SLOW_MOTION,
- pmode.p1, 0);
- saa->playmode = pmode.mode;
- return 0;
- case VID_PLAY_IMMEDIATE_NORMAL:
- /* ensure transfers resume */
- debiwrite(saa, debNormal,
- IBM_MP2_CHIP_CONTROL,
- ChipControl, 2);
- ibm_send_command(saa,
- IBM_MP2_IMED_NORM_PLAY, 0, 0);
- saa->playmode = VID_PLAY_NORMAL;
- return 0;
- case VID_PLAY_SWITCH_CHANNELS:
- saa->audhead = saa->audtail = 0;
- saa->vidhead = saa->vidtail = 0;
- ibm_send_command(saa,
- IBM_MP2_FREEZE_FRAME, 0, 1);
- ibm_send_command(saa,
- IBM_MP2_RESET_AUD_RATE, 0, 1);
- debiwrite(saa, debNormal,
- IBM_MP2_CHIP_CONTROL, 0, 2);
- ibm_send_command(saa,
- IBM_MP2_CHANNEL_SWITCH, 0, 1);
+ return -EINVAL;
+ set_out_format(saa, pmode.p1);
+ return 0;
+ case VID_PLAY_GENLOCK:
+ debiwrite(saa, debNormal, XILINX_CTL0,
+ pmode.p1 ? 0x8000 : 0x8080, 2);
+ if (NewCard)
+ set_genlock_offset(saa, pmode.p2);
+ return 0;
+ case VID_PLAY_NORMAL:
+ debiwrite(saa, debNormal,
+ IBM_MP2_CHIP_CONTROL, ChipControl, 2);
+ ibm_send_command(saa, IBM_MP2_PLAY, 0, 0);
+ saa->playmode = pmode.mode;
+ return 0;
+ case VID_PLAY_PAUSE:
+ /* IBM removed the PAUSE command */
+ /* they say use SINGLE_FRAME now */
+ case VID_PLAY_SINGLE_FRAME:
+ ibm_send_command(saa, IBM_MP2_SINGLE_FRAME,0,0);
+ if (saa->playmode == pmode.mode) {
debiwrite(saa, debNormal,
IBM_MP2_CHIP_CONTROL,
ChipControl, 2);
- ibm_send_command(saa,
- IBM_MP2_PLAY, 0, 0);
- saa->playmode = VID_PLAY_NORMAL;
- return 0;
- case VID_PLAY_FREEZE_FRAME:
- ibm_send_command(saa,
- IBM_MP2_FREEZE_FRAME, 0, 0);
- saa->playmode = pmode.mode;
- return 0;
- case VID_PLAY_STILL_MODE:
- ibm_send_command(saa,
- IBM_MP2_SET_STILL_MODE, 0, 0);
- saa->playmode = pmode.mode;
- return 0;
- case VID_PLAY_MASTER_MODE:
- if (pmode.p1 == VID_PLAY_MASTER_NONE)
- saa->boardcfg[1] = 0x13;
- else if (pmode.p1 ==
- VID_PLAY_MASTER_VIDEO)
- saa->boardcfg[1] = 0x23;
- else if (pmode.p1 ==
- VID_PLAY_MASTER_AUDIO)
- saa->boardcfg[1] = 0x43;
- else
+ }
+ saa->playmode = pmode.mode;
+ return 0;
+ case VID_PLAY_FAST_FORWARD:
+ ibm_send_command(saa, IBM_MP2_FAST_FORWARD,0,0);
+ saa->playmode = pmode.mode;
+ return 0;
+ case VID_PLAY_SLOW_MOTION:
+ ibm_send_command(saa, IBM_MP2_SLOW_MOTION,
+ pmode.p1, 0);
+ saa->playmode = pmode.mode;
+ return 0;
+ case VID_PLAY_IMMEDIATE_NORMAL:
+ /* ensure transfers resume */
+ debiwrite(saa, debNormal,
+ IBM_MP2_CHIP_CONTROL, ChipControl, 2);
+ ibm_send_command(saa, IBM_MP2_IMED_NORM_PLAY,
+ 0, 0);
+ saa->playmode = VID_PLAY_NORMAL;
+ return 0;
+ case VID_PLAY_SWITCH_CHANNELS:
+ saa->audhead = saa->audtail = 0;
+ saa->vidhead = saa->vidtail = 0;
+ ibm_send_command(saa, IBM_MP2_FREEZE_FRAME,0,1);
+ ibm_send_command(saa, IBM_MP2_RESET_AUD_RATE,
+ 0, 1);
+ debiwrite(saa, debNormal, IBM_MP2_CHIP_CONTROL,
+ 0, 2);
+ ibm_send_command(saa, IBM_MP2_CHANNEL_SWITCH,
+ 0, 1);
+ debiwrite(saa, debNormal, IBM_MP2_CHIP_CONTROL,
+ ChipControl, 2);
+ ibm_send_command(saa, IBM_MP2_PLAY, 0, 0);
+ saa->playmode = VID_PLAY_NORMAL;
+ return 0;
+ case VID_PLAY_FREEZE_FRAME:
+ ibm_send_command(saa, IBM_MP2_FREEZE_FRAME,0,0);
+ saa->playmode = pmode.mode;
+ return 0;
+ case VID_PLAY_STILL_MODE:
+ ibm_send_command(saa, IBM_MP2_SET_STILL_MODE,
+ 0, 0);
+ saa->playmode = pmode.mode;
+ return 0;
+ case VID_PLAY_MASTER_MODE:
+ if (pmode.p1 == VID_PLAY_MASTER_NONE)
+ saa->boardcfg[1] = 0x13;
+ else if (pmode.p1 == VID_PLAY_MASTER_VIDEO)
+ saa->boardcfg[1] = 0x23;
+ else if (pmode.p1 == VID_PLAY_MASTER_AUDIO)
+ saa->boardcfg[1] = 0x43;
+ else
+ return -EINVAL;
+ debiwrite(saa, debNormal,
+ IBM_MP2_CHIP_CONTROL, ChipControl, 2);
+ return 0;
+ case VID_PLAY_ACTIVE_SCANLINES:
+ if (CurrentMode == VIDEO_MODE_PAL) {
+ if (pmode.p1 < 1 || pmode.p2 > 625)
return -EINVAL;
- debiwrite(saa, debNormal,
- IBM_MP2_CHIP_CONTROL,
- ChipControl, 2);
- return 0;
- case VID_PLAY_ACTIVE_SCANLINES:
- if (CurrentMode == VIDEO_MODE_PAL) {
- if (pmode.p1 < 1 ||
- pmode.p2 > 625)
- return -EINVAL;
- saa->boardcfg[5] = pmode.p1;
- saa->boardcfg[55] = (pmode.p1 +
- (pmode.p2/2) - 1) &
- 0xff;
- } else {
- if (pmode.p1 < 4 ||
- pmode.p2 > 525)
- return -EINVAL;
- saa->boardcfg[4] = pmode.p1;
- saa->boardcfg[54] = (pmode.p1 +
- (pmode.p2/2) - 4) &
- 0xff;
- }
- set_out_format(saa, CurrentMode);
- case VID_PLAY_RESET:
- return do_ibm_reset(saa);
- case VID_PLAY_END_MARK:
- if (saa->endmarktail <
- saa->endmarkhead) {
- if (saa->endmarkhead -
+ saa->boardcfg[5] = pmode.p1;
+ saa->boardcfg[55] = (pmode.p1 +
+ (pmode.p2 / 2) - 1) & 0xff;
+ } else {
+ if (pmode.p1 < 4 || pmode.p2 > 525)
+ return -EINVAL;
+ saa->boardcfg[4] = pmode.p1;
+ saa->boardcfg[54] = (pmode.p1 +
+ (pmode.p2 / 2) - 4) & 0xff;
+ }
+ set_out_format(saa, CurrentMode);
+ case VID_PLAY_RESET:
+ return do_ibm_reset(saa);
+ case VID_PLAY_END_MARK:
+ if (saa->endmarktail < saa->endmarkhead) {
+ if (saa->endmarkhead -
saa->endmarktail < 2)
- return -ENOSPC;
- } else if (saa->endmarkhead <=
- saa->endmarktail) {
- if (saa->endmarktail -
- saa->endmarkhead >
- (MAX_MARKS - 2))
- return -ENOSPC;
- } else
return -ENOSPC;
- saa->endmark[saa->endmarktail] =
- saa->audtail;
- saa->endmarktail++;
- if (saa->endmarktail >= MAX_MARKS)
- saa->endmarktail = 0;
+ } else if (saa->endmarkhead <=saa->endmarktail){
+ if (saa->endmarktail - saa->endmarkhead
+ > (MAX_MARKS - 2))
+ return -ENOSPC;
+ } else
+ return -ENOSPC;
+ saa->endmark[saa->endmarktail] = saa->audtail;
+ saa->endmarktail++;
+ if (saa->endmarktail >= MAX_MARKS)
+ saa->endmarktail = 0;
}
return -EINVAL;
}
case VIDIOCSWRITEMODE:
{
int mode;
- if (copy_from_user((void *) &mode, arg, sizeof(int)))
- return -EFAULT;
+ if (copy_from_user((void *)&mode, arg, sizeof(int)))
+ return -EFAULT;
if (mode == VID_WRITE_MPEG_AUD ||
- mode == VID_WRITE_MPEG_VID ||
- mode == VID_WRITE_CC ||
- mode == VID_WRITE_TTX ||
- mode == VID_WRITE_OSD) {
+ mode == VID_WRITE_MPEG_VID ||
+ mode == VID_WRITE_CC ||
+ mode == VID_WRITE_TTX ||
+ mode == VID_WRITE_OSD) {
saa->writemode = mode;
return 0;
}
@@ -1750,7 +1688,7 @@ static int saa_ioctl(struct inode *inode, struct file *file,
if (copy_from_user(&ucode, arg, sizeof(ucode)))
return -EFAULT;
if (ucode.datasize > 65536 || ucode.datasize < 1024 ||
- strncmp(ucode.loadwhat, "dec", 3))
+ strncmp(ucode.loadwhat, "dec", 3))
return -EINVAL;
if ((udata = vmalloc(ucode.datasize)) == NULL)
return -ENOMEM;
@@ -1759,8 +1697,8 @@ static int saa_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
}
ucode.data = udata;
- if (!strncmp(ucode.loadwhat, "decoder.aud", 11)
- || !strncmp(ucode.loadwhat, "decoder.vid", 11))
+ if (!strncmp(ucode.loadwhat, "decoder.aud", 11) ||
+ !strncmp(ucode.loadwhat, "decoder.vid", 11))
i = initialize_ibmmpeg2(&ucode);
else
i = initialize_fpga(&ucode);
@@ -1805,14 +1743,14 @@ static int saa_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
}
-static ssize_t saa_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t saa_read(struct file *file, char __user * buf,
+ size_t count, loff_t * ppos)
{
return -EINVAL;
}
-static ssize_t saa_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t saa_write(struct file *file, const char __user * buf,
+ size_t count, loff_t * ppos)
{
struct saa7146 *saa = file->private_data;
unsigned long todo = count;
@@ -1823,20 +1761,22 @@ static ssize_t saa_write(struct file *file, const char __user *buf,
if (saa->writemode == VID_WRITE_MPEG_AUD) {
spin_lock_irqsave(&saa->lock, flags);
if (saa->audhead <= saa->audtail)
- blocksize = 65536-(saa->audtail - saa->audhead);
+ blocksize = 65536 -
+ (saa->audtail - saa->audhead);
else
blocksize = saa->audhead - saa->audtail;
spin_unlock_irqrestore(&saa->lock, flags);
if (blocksize < 16384) {
saawrite(SAA7146_PSR_DEBI_S |
- SAA7146_PSR_PIN1, SAA7146_IER);
+ SAA7146_PSR_PIN1, SAA7146_IER);
saawrite(SAA7146_PSR_PIN1, SAA7146_PSR);
/* wait for buffer space to open */
interruptible_sleep_on(&saa->audq);
}
spin_lock_irqsave(&saa->lock, flags);
if (saa->audhead <= saa->audtail) {
- blocksize = 65536-(saa->audtail - saa->audhead);
+ blocksize = 65536 -
+ (saa->audtail - saa->audhead);
split = 65536 - saa->audtail;
} else {
blocksize = saa->audhead - saa->audtail;
@@ -1851,7 +1791,7 @@ static ssize_t saa_write(struct file *file, const char __user *buf,
return -ENOSPC;
if (split < blocksize) {
if (copy_from_user(saa->audbuf +
- saa->audtail, buf, split))
+ saa->audtail, buf, split))
return -EFAULT;
buf += split;
todo -= split;
@@ -1859,7 +1799,7 @@ static ssize_t saa_write(struct file *file, const char __user *buf,
saa->audtail = 0;
}
if (copy_from_user(saa->audbuf + saa->audtail, buf,
- blocksize))
+ blocksize))
return -EFAULT;
saa->audtail += blocksize;
todo -= blocksize;
@@ -1868,20 +1808,22 @@ static ssize_t saa_write(struct file *file, const char __user *buf,
} else if (saa->writemode == VID_WRITE_MPEG_VID) {
spin_lock_irqsave(&saa->lock, flags);
if (saa->vidhead <= saa->vidtail)
- blocksize=524288-(saa->vidtail - saa->vidhead);
+ blocksize = 524288 -
+ (saa->vidtail - saa->vidhead);
else
blocksize = saa->vidhead - saa->vidtail;
spin_unlock_irqrestore(&saa->lock, flags);
if (blocksize < 65536) {
saawrite(SAA7146_PSR_DEBI_S |
- SAA7146_PSR_PIN1, SAA7146_IER);
+ SAA7146_PSR_PIN1, SAA7146_IER);
saawrite(SAA7146_PSR_PIN1, SAA7146_PSR);
/* wait for buffer space to open */
interruptible_sleep_on(&saa->vidq);
}
spin_lock_irqsave(&saa->lock, flags);
if (saa->vidhead <= saa->vidtail) {
- blocksize=524288-(saa->vidtail - saa->vidhead);
+ blocksize = 524288 -
+ (saa->vidtail - saa->vidhead);
split = 524288 - saa->vidtail;
} else {
blocksize = saa->vidhead - saa->vidtail;
@@ -1896,7 +1838,7 @@ static ssize_t saa_write(struct file *file, const char __user *buf,
return -ENOSPC;
if (split < blocksize) {
if (copy_from_user(saa->vidbuf +
- saa->vidtail, buf, split))
+ saa->vidtail, buf, split))
return -EFAULT;
buf += split;
todo -= split;
@@ -1904,7 +1846,7 @@ static ssize_t saa_write(struct file *file, const char __user *buf,
saa->vidtail = 0;
}
if (copy_from_user(saa->vidbuf + saa->vidtail, buf,
- blocksize))
+ blocksize))
return -EFAULT;
saa->vidtail += blocksize;
todo -= blocksize;
@@ -1922,8 +1864,8 @@ static ssize_t saa_write(struct file *file, const char __user *buf,
debiwrite(saa, debNormal, IBM_MP2_OSD_LINK_ADDR, 0, 2);
debiwrite(saa, debNormal, IBM_MP2_MASK0, 0xc00d, 2);
debiwrite(saa, debNormal, IBM_MP2_DISP_MODE,
- debiread(saa, debNormal,
- IBM_MP2_DISP_MODE, 2) | 1, 2);
+ debiread(saa, debNormal,
+ IBM_MP2_DISP_MODE, 2) | 1, 2);
/* trigger osd data transfer */
saawrite(SAA7146_PSR_DEBI_S |
SAA7146_PSR_PIN1, SAA7146_IER);
@@ -1935,21 +1877,11 @@ static ssize_t saa_write(struct file *file, const char __user *buf,
static int saa_open(struct inode *inode, struct file *file)
{
- struct saa7146 *saa = NULL;
- unsigned int minor = iminor(inode);
- int i;
+ struct video_device *vdev = video_devdata(file);
+ struct saa7146 *saa = container_of(vdev, struct saa7146, video_dev);
- for (i = 0; i < SAA7146_MAX; i++) {
- if (saa7146s[i].video_dev.minor == minor) {
- saa = &saa7146s[i];
- }
- }
- if (saa == NULL) {
- return -ENODEV;
- }
file->private_data = saa;
- //saa->video_dev.busy = 0; /* old hack to support multiple open */
saa->user++;
if (saa->user > 1)
return 0; /* device open already, don't reset */
@@ -1961,42 +1893,39 @@ static int saa_release(struct inode *inode, struct file *file)
{
struct saa7146 *saa = file->private_data;
saa->user--;
- //saa->video_dev.busy = 0; /* old hack to support multiple open */
+
if (saa->user > 0) /* still someone using device */
return 0;
saawrite(0x007f0000, SAA7146_MC1); /* stop all overlay dma */
return 0;
}
-static struct file_operations saa_fops =
-{
- .owner = THIS_MODULE,
- .open = saa_open,
- .release = saa_release,
- .ioctl = saa_ioctl,
- .read = saa_read,
- .llseek = no_llseek,
- .write = saa_write,
- .mmap = saa_mmap,
+static struct file_operations saa_fops = {
+ .owner = THIS_MODULE,
+ .open = saa_open,
+ .release = saa_release,
+ .ioctl = saa_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
+ .read = saa_read,
+ .llseek = no_llseek,
+ .write = saa_write,
+ .mmap = saa_mmap,
};
/* template for video_device-structure */
-static struct video_device saa_template =
-{
- .name = "SAA7146A",
- .type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY,
- .hardware = VID_HARDWARE_SAA7146,
- .fops = &saa_fops,
- .minor = -1,
+static struct video_device saa_template = {
+ .name = "SAA7146A",
+ .type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY,
+ .hardware = VID_HARDWARE_SAA7146,
+ .fops = &saa_fops,
+ .minor = -1,
};
-static int configure_saa7146(struct pci_dev *dev, int num)
+static int __devinit configure_saa7146(struct pci_dev *pdev, int num)
{
- int result;
- struct saa7146 *saa;
+ int retval;
+ struct saa7146 *saa = pci_get_drvdata(pdev);
- saa = &saa7146s[num];
-
saa->endmarkhead = saa->endmarktail = 0;
saa->win.x = saa->win.y = 0;
saa->win.width = saa->win.cropwidth = 720;
@@ -2012,7 +1941,6 @@ static int configure_saa7146(struct pci_dev *dev, int num)
saa->picture.contrast = 38768;
saa->picture.colour = 32768;
saa->cap = 0;
- saa->dev = dev;
saa->nr = num;
saa->playmode = VID_PLAY_NORMAL;
memset(saa->boardcfg, 0, 64); /* clear board config area */
@@ -2032,45 +1960,59 @@ static int configure_saa7146(struct pci_dev *dev, int num)
init_waitqueue_head(&saa->vidq);
spin_lock_init(&saa->lock);
- if (pci_enable_device(dev))
- return -EIO;
-
- saa->id = dev->device;
- saa->irq = dev->irq;
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "%d: pci_enable_device failed!\n", num);
+ goto err;
+ }
+
+ saa->id = pdev->device;
+ saa->irq = pdev->irq;
saa->video_dev.minor = -1;
- saa->saa7146_adr = pci_resource_start(dev, 0);
- pci_read_config_byte(dev, PCI_CLASS_REVISION, &saa->revision);
+ saa->saa7146_adr = pci_resource_start(pdev, 0);
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &saa->revision);
saa->saa7146_mem = ioremap(saa->saa7146_adr, 0x200);
- if (!saa->saa7146_mem)
- return -EIO;
+ if (saa->saa7146_mem == NULL) {
+ dev_err(&pdev->dev, "%d: ioremap failed!\n", num);
+ retval = -EIO;
+ goto err;
+ }
memcpy(&saa->video_dev, &saa_template, sizeof(saa_template));
saawrite(0, SAA7146_IER); /* turn off all interrupts */
- result = request_irq(saa->irq, saa7146_irq,
- SA_SHIRQ | SA_INTERRUPT, "stradis", (void *) saa);
- if (result == -EINVAL)
- printk(KERN_ERR "stradis%d: Bad irq number or handler\n",
- num);
- if (result == -EBUSY)
- printk(KERN_ERR "stradis%d: IRQ %ld busy, change your PnP"
- " config in BIOS\n", num, saa->irq);
- if (result < 0) {
- iounmap(saa->saa7146_mem);
- return result;
- }
- pci_set_master(dev);
- if (video_register_device(&saa->video_dev, VFL_TYPE_GRABBER, video_nr) < 0) {
- iounmap(saa->saa7146_mem);
- return -1;
+
+ retval = request_irq(saa->irq, saa7146_irq, SA_SHIRQ | SA_INTERRUPT,
+ "stradis", saa);
+ if (retval == -EINVAL)
+ dev_err(&pdev->dev, "%d: Bad irq number or handler\n", num);
+ else if (retval == -EBUSY)
+ dev_err(&pdev->dev, "%d: IRQ %ld busy, change your PnP config "
+ "in BIOS\n", num, saa->irq);
+ if (retval < 0)
+ goto errio;
+
+ pci_set_master(pdev);
+ retval = video_register_device(&saa->video_dev, VFL_TYPE_GRABBER,
+ video_nr);
+ if (retval < 0) {
+ dev_err(&pdev->dev, "%d: error in registering video device!\n",
+ num);
+ goto errio;
}
+
return 0;
+errio:
+ iounmap(saa->saa7146_mem);
+err:
+ return retval;
}
-static int init_saa7146(int i)
+static int __devinit init_saa7146(struct pci_dev *pdev)
{
- struct saa7146 *saa = &saa7146s[i];
+ struct saa7146 *saa = pci_get_drvdata(pdev);
+ memset(saa, 0, sizeof(*saa));
saa->user = 0;
/* reset the saa7146 */
saawrite(0xffff0000, SAA7146_MC1);
@@ -2088,23 +2030,23 @@ static int init_saa7146(int i)
saawrite(0x00000000, SAA7146_DD1_STREAM_A);
saawrite(0x00000000, SAA7146_BRS_CTRL);
saawrite(0x80400040, SAA7146_BCS_CTRL);
- saawrite(0x0000e000 /*| (1<<29)*/, SAA7146_HPS_CTRL);
+ saawrite(0x0000e000 /*| (1<<29) */ , SAA7146_HPS_CTRL);
saawrite(0x00000060, SAA7146_CLIP_FORMAT_CTRL);
saawrite(0x00000000, SAA7146_ACON1);
saawrite(0x00000000, SAA7146_ACON2);
saawrite(0x00000600, SAA7146_I2C_STATUS);
saawrite(((SAA7146_MC2_UPLD_D1_B | SAA7146_MC2_UPLD_D1_A |
- SAA7146_MC2_UPLD_BRS | SAA7146_MC2_UPLD_HPS_H |
- SAA7146_MC2_UPLD_HPS_V | SAA7146_MC2_UPLD_DMA2 |
- SAA7146_MC2_UPLD_DMA1 | SAA7146_MC2_UPLD_I2C) << 16) | 0xffff,
- SAA7146_MC2);
+ SAA7146_MC2_UPLD_BRS | SAA7146_MC2_UPLD_HPS_H |
+ SAA7146_MC2_UPLD_HPS_V | SAA7146_MC2_UPLD_DMA2 |
+ SAA7146_MC2_UPLD_DMA1 | SAA7146_MC2_UPLD_I2C) << 16) | 0xffff,
+ SAA7146_MC2);
/* setup arbitration control registers */
saawrite(0x1412121a, SAA7146_PCI_BT_V1);
/* allocate 32k dma buffer + 4k for page table */
if ((saa->dmadebi = kmalloc(32768 + 4096, GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "stradis%d: debi kmalloc failed\n", i);
- return -1;
+ dev_err(&pdev->dev, "%d: debi kmalloc failed\n", saa->nr);
+ goto err;
}
#if 0
saa->pagedebi = saa->dmadebi + 32768; /* top 4k is for mmu */
@@ -2114,133 +2056,162 @@ static int init_saa7146(int i)
#endif
saa->audhead = saa->vidhead = saa->osdhead = 0;
saa->audtail = saa->vidtail = saa->osdtail = 0;
- if (saa->vidbuf == NULL)
- if ((saa->vidbuf = vmalloc(524288)) == NULL) {
- printk(KERN_ERR "stradis%d: malloc failed\n", saa->nr);
- return -ENOMEM;
- }
- if (saa->audbuf == NULL)
- if ((saa->audbuf = vmalloc(65536)) == NULL) {
- printk(KERN_ERR "stradis%d: malloc failed\n", saa->nr);
- vfree(saa->vidbuf);
- saa->vidbuf = NULL;
- return -ENOMEM;
- }
- if (saa->osdbuf == NULL)
- if ((saa->osdbuf = vmalloc(131072)) == NULL) {
- printk(KERN_ERR "stradis%d: malloc failed\n", saa->nr);
- vfree(saa->vidbuf);
- vfree(saa->audbuf);
- saa->vidbuf = saa->audbuf = NULL;
- return -ENOMEM;
- }
+ if (saa->vidbuf == NULL && (saa->vidbuf = vmalloc(524288)) == NULL) {
+ dev_err(&pdev->dev, "%d: malloc failed\n", saa->nr);
+ goto err;
+ }
+ if (saa->audbuf == NULL && (saa->audbuf = vmalloc(65536)) == NULL) {
+ dev_err(&pdev->dev, "%d: malloc failed\n", saa->nr);
+ goto errvid;
+ }
+ if (saa->osdbuf == NULL && (saa->osdbuf = vmalloc(131072)) == NULL) {
+ dev_err(&pdev->dev, "%d: malloc failed\n", saa->nr);
+ goto erraud;
+ }
/* allocate 81920 byte buffer for clipping */
- if ((saa->dmavid2 = kmalloc(VIDEO_CLIPMAP_SIZE, GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "stradis%d: clip kmalloc failed\n", saa->nr);
- vfree(saa->vidbuf);
- vfree(saa->audbuf);
- vfree(saa->osdbuf);
- saa->vidbuf = saa->audbuf = saa->osdbuf = NULL;
- saa->dmavid2 = NULL;
- return -1;
+ if ((saa->dmavid2 = kzalloc(VIDEO_CLIPMAP_SIZE, GFP_KERNEL)) == NULL) {
+ dev_err(&pdev->dev, "%d: clip kmalloc failed\n", saa->nr);
+ goto errosd;
}
- memset(saa->dmavid2, 0x00, VIDEO_CLIPMAP_SIZE); /* clip everything */
/* setup clipping registers */
saawrite(virt_to_bus(saa->dmavid2), SAA7146_BASE_EVEN2);
saawrite(virt_to_bus(saa->dmavid2) + 128, SAA7146_BASE_ODD2);
saawrite(virt_to_bus(saa->dmavid2) + VIDEO_CLIPMAP_SIZE,
- SAA7146_PROT_ADDR2);
+ SAA7146_PROT_ADDR2);
saawrite(256, SAA7146_PITCH2);
- saawrite(4, SAA7146_PAGE2); /* dma direction: read, no byteswap */
+ saawrite(4, SAA7146_PAGE2); /* dma direction: read, no byteswap */
saawrite(((SAA7146_MC2_UPLD_DMA2) << 16) | SAA7146_MC2_UPLD_DMA2,
SAA7146_MC2);
I2CBusScan(saa);
+
return 0;
+errosd:
+ vfree(saa->osdbuf);
+ saa->osdbuf = NULL;
+erraud:
+ vfree(saa->audbuf);
+ saa->audbuf = NULL;
+errvid:
+ vfree(saa->vidbuf);
+ saa->vidbuf = NULL;
+err:
+ return -ENOMEM;
}
-static void release_saa(void)
+static void stradis_release_saa(struct pci_dev *pdev)
{
u8 command;
- int i;
- struct saa7146 *saa;
+ struct saa7146 *saa = pci_get_drvdata(pdev);
- for (i = 0; i < saa_num; i++) {
- saa = &saa7146s[i];
+ /* turn off all capturing, DMA and IRQs */
+ saawrite(0xffff0000, SAA7146_MC1); /* reset chip */
+ saawrite(0, SAA7146_MC2);
+ saawrite(0, SAA7146_IER);
+ saawrite(0xffffffffUL, SAA7146_ISR);
+
+ /* disable PCI bus-mastering */
+ pci_read_config_byte(pdev, PCI_COMMAND, &command);
+ command &= ~PCI_COMMAND_MASTER;
+ pci_write_config_byte(pdev, PCI_COMMAND, command);
+
+ /* unmap and free memory */
+ saa->audhead = saa->audtail = saa->osdhead = 0;
+ saa->vidhead = saa->vidtail = saa->osdtail = 0;
+ vfree(saa->vidbuf);
+ vfree(saa->audbuf);
+ vfree(saa->osdbuf);
+ kfree(saa->dmavid2);
+ saa->audbuf = saa->vidbuf = saa->osdbuf = NULL;
+ saa->dmavid2 = NULL;
+ kfree(saa->dmadebi);
+ kfree(saa->dmavid1);
+ kfree(saa->dmavid3);
+ kfree(saa->dmaa1in);
+ kfree(saa->dmaa1out);
+ kfree(saa->dmaa2in);
+ kfree(saa->dmaa2out);
+ kfree(saa->dmaRPS1);
+ kfree(saa->dmaRPS2);
+ free_irq(saa->irq, saa);
+ if (saa->saa7146_mem)
+ iounmap(saa->saa7146_mem);
+ if (saa->video_dev.minor != -1)
+ video_unregister_device(&saa->video_dev);
+}
- /* turn off all capturing, DMA and IRQs */
- saawrite(0xffff0000, SAA7146_MC1); /* reset chip */
- saawrite(0, SAA7146_MC2);
- saawrite(0, SAA7146_IER);
- saawrite(0xffffffffUL, SAA7146_ISR);
-
- /* disable PCI bus-mastering */
- pci_read_config_byte(saa->dev, PCI_COMMAND, &command);
- command &= ~PCI_COMMAND_MASTER;
- pci_write_config_byte(saa->dev, PCI_COMMAND, command);
-
- /* unmap and free memory */
- saa->audhead = saa->audtail = saa->osdhead = 0;
- saa->vidhead = saa->vidtail = saa->osdtail = 0;
- vfree(saa->vidbuf);
- vfree(saa->audbuf);
- vfree(saa->osdbuf);
- kfree(saa->dmavid2);
- saa->audbuf = saa->vidbuf = saa->osdbuf = NULL;
- saa->dmavid2 = NULL;
- kfree(saa->dmadebi);
- kfree(saa->dmavid1);
- kfree(saa->dmavid3);
- kfree(saa->dmaa1in);
- kfree(saa->dmaa1out);
- kfree(saa->dmaa2in);
- kfree(saa->dmaa2out);
- kfree(saa->dmaRPS1);
- kfree(saa->dmaRPS2);
- free_irq(saa->irq, saa);
- if (saa->saa7146_mem)
- iounmap(saa->saa7146_mem);
- if (saa->video_dev.minor != -1)
- video_unregister_device(&saa->video_dev);
+static int __devinit stradis_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int retval = -EINVAL;
+
+ if (saa_num >= SAA7146_MAX)
+ goto err;
+
+ if (!pdev->subsystem_vendor)
+ dev_info(&pdev->dev, "%d: rev1 decoder\n", saa_num);
+ else
+ dev_info(&pdev->dev, "%d: SDM2xx found\n", saa_num);
+
+ pci_set_drvdata(pdev, &saa7146s[saa_num]);
+
+ retval = configure_saa7146(pdev, saa_num);
+ if (retval) {
+ dev_err(&pdev->dev, "%d: error in configuring\n", saa_num);
+ goto err;
+ }
+
+ if (init_saa7146(pdev) < 0) {
+ dev_err(&pdev->dev, "%d: error in initialization\n", saa_num);
+ retval = -EIO;
+ goto errrel;
}
+
+ saa_num++;
+
+ return 0;
+errrel:
+ stradis_release_saa(pdev);
+err:
+ return retval;
+}
+
+static void __devexit stradis_remove(struct pci_dev *pdev)
+{
+ stradis_release_saa(pdev);
}
+static struct pci_device_id stradis_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, stradis_pci_tbl);
+
+static struct pci_driver stradis_driver = {
+ .name = "stradis",
+ .id_table = stradis_pci_tbl,
+ .probe = stradis_probe,
+ .remove = __devexit_p(stradis_remove)
+};
-static int __init stradis_init (void)
+int __init stradis_init(void)
{
- struct pci_dev *dev = NULL;
- int result = 0, i;
+ int retval;
saa_num = 0;
- while ((dev = pci_find_device(PCI_VENDOR_ID_PHILIPS, PCI_DEVICE_ID_PHILIPS_SAA7146, dev))) {
- if (!dev->subsystem_vendor)
- printk(KERN_INFO "stradis%d: rev1 decoder\n", saa_num);
- else
- printk(KERN_INFO "stradis%d: SDM2xx found\n", saa_num);
- result = configure_saa7146(dev, saa_num++);
- if (result)
- return result;
- }
- if (saa_num)
- printk(KERN_INFO "stradis: %d card(s) found.\n", saa_num);
- else
- return -EINVAL;
- for (i = 0; i < saa_num; i++)
- if (init_saa7146(i) < 0) {
- release_saa();
- return -EIO;
- }
- return 0;
-}
+ retval = pci_register_driver(&stradis_driver);
+ if (retval)
+ printk(KERN_ERR "stradis: Unable to register pci driver.\n");
+ return retval;
+}
-static void __exit stradis_exit (void)
+void __exit stradis_exit(void)
{
- release_saa();
+ pci_unregister_driver(&stradis_driver);
printk(KERN_INFO "stradis: module cleanup complete\n");
}
-
module_init(stradis_init);
module_exit(stradis_exit);
-
diff --git a/drivers/media/video/tda7432.c b/drivers/media/video/tda7432.c
index d32737dd214..fc3d5824eff 100644
--- a/drivers/media/video/tda7432.c
+++ b/drivers/media/video/tda7432.c
@@ -50,6 +50,7 @@
#include "bttv.h"
#include <media/audiochip.h>
+#include <media/v4l2-common.h>
#ifndef VIDEO_AUDIO_BALANCE
# define VIDEO_AUDIO_BALANCE 32
@@ -90,9 +91,6 @@ struct tda7432 {
static struct i2c_driver driver;
static struct i2c_client client_template;
-#define dprintk if (debug) printk
-#define d2printk if (debug > 1) printk
-
/* The TDA7432 is made by STS-Thompson
* http://www.st.com
* http://us.st.com/stonline/books/pdf/docs/4056.pdf
@@ -229,12 +227,12 @@ static struct i2c_client client_template;
static int tda7432_write(struct i2c_client *client, int subaddr, int val)
{
unsigned char buffer[2];
- d2printk("tda7432: In tda7432_write\n");
- dprintk("tda7432: Writing %d 0x%x\n", subaddr, val);
+ v4l_dbg(2, debug,client,"In tda7432_write\n");
+ v4l_dbg(1, debug,client,"Writing %d 0x%x\n", subaddr, val);
buffer[0] = subaddr;
buffer[1] = val;
if (2 != i2c_master_send(client,buffer,2)) {
- printk(KERN_WARNING "tda7432: I/O error, trying (write %d 0x%x)\n",
+ v4l_err(client,"I/O error, trying (write %d 0x%x)\n",
subaddr, val);
return -1;
}
@@ -247,9 +245,9 @@ static int tda7432_set(struct i2c_client *client)
{
struct tda7432 *t = i2c_get_clientdata(client);
unsigned char buf[16];
- d2printk("tda7432: In tda7432_set\n");
+ v4l_dbg(2, debug,client,"In tda7432_set\n");
- dprintk(KERN_INFO
+ v4l_dbg(1, debug,client,
"tda7432: 7432_set(0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x)\n",
t->input,t->volume,t->bass,t->treble,t->lf,t->lr,t->rf,t->rr,t->loud);
buf[0] = TDA7432_IN;
@@ -263,7 +261,7 @@ static int tda7432_set(struct i2c_client *client)
buf[8] = t->rr;
buf[9] = t->loud;
if (10 != i2c_master_send(client,buf,10)) {
- printk(KERN_WARNING "tda7432: I/O error, trying tda7432_set\n");
+ v4l_err(client,"I/O error, trying tda7432_set\n");
return -1;
}
@@ -273,7 +271,7 @@ static int tda7432_set(struct i2c_client *client)
static void do_tda7432_init(struct i2c_client *client)
{
struct tda7432 *t = i2c_get_clientdata(client);
- d2printk("tda7432: In tda7432_init\n");
+ v4l_dbg(2, debug,client,"In tda7432_init\n");
t->input = TDA7432_STEREO_IN | /* Main (stereo) input */
TDA7432_BASS_SYM | /* Symmetric bass cut */
@@ -301,12 +299,10 @@ static int tda7432_attach(struct i2c_adapter *adap, int addr, int kind)
{
struct tda7432 *t;
struct i2c_client *client;
- d2printk("tda7432: In tda7432_attach\n");
- t = kmalloc(sizeof *t,GFP_KERNEL);
+ t = kzalloc(sizeof *t,GFP_KERNEL);
if (!t)
return -ENOMEM;
- memset(t,0,sizeof *t);
client = &t->c;
memcpy(client,&client_template,sizeof(struct i2c_client));
@@ -315,21 +311,16 @@ static int tda7432_attach(struct i2c_adapter *adap, int addr, int kind)
i2c_set_clientdata(client, t);
do_tda7432_init(client);
- printk(KERN_INFO "tda7432: init\n");
-
i2c_attach_client(client);
+
+ v4l_info(client, "chip found @ 0x%x (%s)\n", addr << 1, adap->name);
return 0;
}
static int tda7432_probe(struct i2c_adapter *adap)
{
-#ifdef I2C_CLASS_TV_ANALOG
if (adap->class & I2C_CLASS_TV_ANALOG)
return i2c_probe(adap, &addr_data, tda7432_attach);
-#else
- if (adap->id == I2C_HW_B_BT848)
- return i2c_probe(adap, &addr_data, tda7432_attach);
-#endif
return 0;
}
@@ -348,7 +339,9 @@ static int tda7432_command(struct i2c_client *client,
unsigned int cmd, void *arg)
{
struct tda7432 *t = i2c_get_clientdata(client);
- d2printk("tda7432: In tda7432_command\n");
+ v4l_dbg(2, debug,client,"In tda7432_command\n");
+ if (debug>1)
+ v4l_i2c_print_ioctl(client,cmd);
switch (cmd) {
/* --- v4l ioctls --- */
@@ -359,7 +352,6 @@ static int tda7432_command(struct i2c_client *client,
case VIDIOCGAUDIO:
{
struct video_audio *va = arg;
- dprintk("tda7432: VIDIOCGAUDIO\n");
va->flags |= VIDEO_AUDIO_VOLUME |
VIDEO_AUDIO_BASS |
@@ -414,7 +406,6 @@ static int tda7432_command(struct i2c_client *client,
case VIDIOCSAUDIO:
{
struct video_audio *va = arg;
- dprintk("tda7432: VIDEOCSAUDIO\n");
if(va->flags & VIDEO_AUDIO_VOLUME){
if(!maxvol){ /* max +20db */
@@ -490,21 +481,16 @@ static int tda7432_command(struct i2c_client *client,
} /* end of VIDEOCSAUDIO case */
- default: /* Not VIDEOCGAUDIO or VIDEOCSAUDIO */
-
- /* nothing */
- d2printk("tda7432: Default\n");
-
} /* end of (cmd) switch */
return 0;
}
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "i2c tda7432 driver",
+ .driver = {
+ .name = "tda7432",
+ },
.id = I2C_DRIVERID_TDA7432,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = tda7432_probe,
.detach_client = tda7432_detach,
.command = tda7432_command,
@@ -519,7 +505,7 @@ static struct i2c_client client_template =
static int __init tda7432_init(void)
{
if ( (loudness < 0) || (loudness > 15) ) {
- printk(KERN_ERR "tda7432: loudness parameter must be between 0 and 15\n");
+ printk(KERN_ERR "loudness parameter must be between 0 and 15\n");
return -EINVAL;
}
diff --git a/drivers/media/video/tda8290.c b/drivers/media/video/tda8290.c
index 61d94ddaff4..2498b76df42 100644
--- a/drivers/media/video/tda8290.c
+++ b/drivers/media/video/tda8290.c
@@ -398,14 +398,8 @@ static int tda8290_tune(struct i2c_client *c, u16 ifc, unsigned int freq)
return 0;
}
-
/*---------------------------------------------------------------------*/
-#define V4L2_STD_MN (V4L2_STD_PAL_M|V4L2_STD_PAL_N|V4L2_STD_PAL_Nc|V4L2_STD_NTSC)
-#define V4L2_STD_B (V4L2_STD_PAL_B|V4L2_STD_PAL_B1|V4L2_STD_SECAM_B)
-#define V4L2_STD_GH (V4L2_STD_PAL_G|V4L2_STD_PAL_H|V4L2_STD_SECAM_G|V4L2_STD_SECAM_H)
-#define V4L2_STD_DK (V4L2_STD_PAL_DK|V4L2_STD_SECAM_DK)
-
static void set_audio(struct tuner *t)
{
char* mode;
diff --git a/drivers/media/video/tda9840.c b/drivers/media/video/tda9840.c
index 1794686612c..ed4c04119cc 100644
--- a/drivers/media/video/tda9840.c
+++ b/drivers/media/video/tda9840.c
@@ -34,7 +34,7 @@ static int debug = 0; /* insmod parameter */
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off device debugging (default:off).");
#define dprintk(args...) \
- do { if (debug) { printk("%s: %s()[%d]: ",__stringify(KBUILD_MODNAME), __FUNCTION__, __LINE__); printk(args); } } while (0)
+ do { if (debug) { printk("%s: %s()[%d]: ", KBUILD_MODNAME, __FUNCTION__, __LINE__); printk(args); } } while (0)
#define SWITCH 0x00
#define LEVEL_ADJUST 0x02
@@ -221,10 +221,10 @@ static int detach(struct i2c_client *client)
}
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "tda9840",
+ .driver = {
+ .name = "tda9840",
+ },
.id = I2C_DRIVERID_TDA9840,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = attach,
.detach_client = detach,
.command = command,
diff --git a/drivers/media/video/tda9875.c b/drivers/media/video/tda9875.c
index a5e37dc91f3..ef98c498225 100644
--- a/drivers/media/video/tda9875.c
+++ b/drivers/media/video/tda9875.c
@@ -232,10 +232,9 @@ static int tda9875_attach(struct i2c_adapter *adap, int addr, int kind)
struct i2c_client *client;
dprintk("In tda9875_attach\n");
- t = kmalloc(sizeof *t,GFP_KERNEL);
+ t = kzalloc(sizeof *t,GFP_KERNEL);
if (!t)
return -ENOMEM;
- memset(t,0,sizeof *t);
client = &t->c;
memcpy(client,&client_template,sizeof(struct i2c_client));
@@ -257,13 +256,8 @@ static int tda9875_attach(struct i2c_adapter *adap, int addr, int kind)
static int tda9875_probe(struct i2c_adapter *adap)
{
-#ifdef I2C_CLASS_TV_ANALOG
if (adap->class & I2C_CLASS_TV_ANALOG)
return i2c_probe(adap, &addr_data, tda9875_attach);
-#else
- if (adap->id == I2C_HW_B_BT848)
- return i2c_probe(adap, &addr_data, tda9875_attach);
-#endif
return 0;
}
@@ -372,10 +366,10 @@ static int tda9875_command(struct i2c_client *client,
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "i2c tda9875 driver",
+ .driver = {
+ .name = "tda9875",
+ },
.id = I2C_DRIVERID_TDA9875,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = tda9875_probe,
.detach_client = tda9875_detach,
.command = tda9875_command,
diff --git a/drivers/media/video/tda9887.c b/drivers/media/video/tda9887.c
index 2f2414e90e8..5815649bdc7 100644
--- a/drivers/media/video/tda9887.c
+++ b/drivers/media/video/tda9887.c
@@ -9,7 +9,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
-#include <media/audiochip.h>
+#include <media/v4l2-common.h>
#include <media/tuner.h>
@@ -57,7 +57,6 @@ struct tda9887 {
v4l2_std_id std;
enum tuner_mode mode;
unsigned int config;
- unsigned int pinnacle_id;
unsigned int using_v4l2;
unsigned int radio_mode;
unsigned char data[4];
@@ -115,6 +114,9 @@ static struct i2c_client client_template;
#define cAudioGain0 0x00 // bit c7
#define cAudioGain6 0x80 // bit c7
+#define cTopMask 0x1f // bit c0:4
+#define cTopPalSecamDefault 0x14 // bit c0:4
+#define cTopNtscRadioDefault 0x10 // bit c0:4
//// third reg (e)
#define cAudioIF_4_5 0x00 // bit e0:1
@@ -146,13 +148,15 @@ static struct i2c_client client_template;
static struct tvnorm tvnorms[] = {
{
- .std = V4L2_STD_PAL_BG,
- .name = "PAL-BG",
+ .std = V4L2_STD_PAL_BG | V4L2_STD_PAL_H | V4L2_STD_PAL_N,
+ .name = "PAL-BGHN",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
- cDeemphasis50 ),
- .e = ( cAudioIF_5_5 |
+ cDeemphasis50 |
+ cTopPalSecamDefault),
+ .e = ( cGating_36 |
+ cAudioIF_5_5 |
cVideoIF_38_90 ),
},{
.std = V4L2_STD_PAL_I,
@@ -160,8 +164,10 @@ static struct tvnorm tvnorms[] = {
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
- cDeemphasis50 ),
- .e = ( cAudioIF_6_0 |
+ cDeemphasis50 |
+ cTopPalSecamDefault),
+ .e = ( cGating_36 |
+ cAudioIF_6_0 |
cVideoIF_38_90 ),
},{
.std = V4L2_STD_PAL_DK,
@@ -169,52 +175,80 @@ static struct tvnorm tvnorms[] = {
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
- cDeemphasis50 ),
- .e = ( cAudioIF_6_5 |
- cVideoIF_38_00 ),
+ cDeemphasis50 |
+ cTopPalSecamDefault),
+ .e = ( cGating_36 |
+ cAudioIF_6_5 |
+ cVideoIF_38_90 ),
},{
- .std = V4L2_STD_PAL_M | V4L2_STD_PAL_N,
- .name = "PAL-M/N",
+ .std = V4L2_STD_PAL_M | V4L2_STD_PAL_Nc,
+ .name = "PAL-M/Nc",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
- cDeemphasis75 ),
- .e = ( cAudioIF_4_5 |
+ cDeemphasis75 |
+ cTopNtscRadioDefault),
+ .e = ( cGating_36 |
+ cAudioIF_4_5 |
cVideoIF_45_75 ),
},{
+ .std = V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
+ .name = "SECAM-BGH",
+ .b = ( cPositiveAmTV |
+ cQSS ),
+ .c = ( cTopPalSecamDefault),
+ .e = ( cGating_36 |
+ cAudioIF_5_5 |
+ cVideoIF_38_90 ),
+ },{
.std = V4L2_STD_SECAM_L,
.name = "SECAM-L",
.b = ( cPositiveAmTV |
cQSS ),
+ .c = ( cTopPalSecamDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_38_90 ),
},{
+ .std = V4L2_STD_SECAM_LC,
+ .name = "SECAM-L'",
+ .b = ( cOutputPort2Inactive |
+ cPositiveAmTV |
+ cQSS ),
+ .c = ( cTopPalSecamDefault),
+ .e = ( cGating_36 |
+ cAudioIF_6_5 |
+ cVideoIF_33_90 ),
+ },{
.std = V4L2_STD_SECAM_DK,
.name = "SECAM-DK",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
- cDeemphasis50 ),
- .e = ( cAudioIF_6_5 |
- cVideoIF_38_00 ),
+ cDeemphasis50 |
+ cTopPalSecamDefault),
+ .e = ( cGating_36 |
+ cAudioIF_6_5 |
+ cVideoIF_38_90 ),
},{
.std = V4L2_STD_NTSC_M,
.name = "NTSC-M",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
- cDeemphasis75 ),
+ cDeemphasis75 |
+ cTopNtscRadioDefault),
.e = ( cGating_36 |
cAudioIF_4_5 |
cVideoIF_45_75 ),
},{
.std = V4L2_STD_NTSC_M_JP,
- .name = "NTSC-JP",
+ .name = "NTSC-M-JP",
.b = ( cNegativeFmTV |
cQSS ),
.c = ( cDeemphasisON |
- cDeemphasis50 ),
+ cDeemphasis50 |
+ cTopNtscRadioDefault),
.e = ( cGating_36 |
cAudioIF_4_5 |
cVideoIF_58_75 ),
@@ -226,8 +260,10 @@ static struct tvnorm radio_stereo = {
.b = ( cFmRadio |
cQSS ),
.c = ( cDeemphasisOFF |
- cAudioGain6 ),
- .e = ( cAudioIF_5_5 |
+ cAudioGain6 |
+ cTopNtscRadioDefault),
+ .e = ( cTunerGainLow |
+ cAudioIF_5_5 |
cRadioIF_38_90 ),
};
@@ -236,8 +272,10 @@ static struct tvnorm radio_mono = {
.b = ( cFmRadio |
cQSS ),
.c = ( cDeemphasisON |
- cDeemphasis50),
- .e = ( cAudioIF_5_5 |
+ cDeemphasis75 |
+ cTopNtscRadioDefault),
+ .e = ( cTunerGainLow |
+ cAudioIF_5_5 |
cRadioIF_38_90 ),
};
@@ -400,7 +438,8 @@ static int tda9887_set_tvnorm(struct tda9887 *t, char *buf)
static unsigned int port1 = UNSET;
static unsigned int port2 = UNSET;
static unsigned int qss = UNSET;
-static unsigned int adjust = 0x10;
+static unsigned int adjust = UNSET;
+
module_param(port1, int, 0644);
module_param(port2, int, 0644);
module_param(qss, int, 0644);
@@ -428,8 +467,10 @@ static int tda9887_set_insmod(struct tda9887 *t, char *buf)
buf[1] &= ~cQSS;
}
- if (adjust >= 0x00 && adjust < 0x20)
+ if (adjust >= 0x00 && adjust < 0x20) {
+ buf[2] &= ~cTopMask;
buf[2] |= adjust;
+ }
return 0;
}
@@ -465,6 +506,10 @@ static int tda9887_set_config(struct tda9887 *t, char *buf)
break;
}
}
+ if (t->config & TDA9887_TOP_SET) {
+ buf[2] &= ~cTopMask;
+ buf[2] |= (t->config >> 8) & cTopMask;
+ }
if ((t->config & TDA9887_INTERCARRIER_NTSC) && (t->std & V4L2_STD_NTSC))
buf[1] &= ~cQSS;
return 0;
@@ -472,38 +517,13 @@ static int tda9887_set_config(struct tda9887 *t, char *buf)
/* ---------------------------------------------------------------------- */
-static int tda9887_set_pinnacle(struct tda9887 *t, char *buf)
-{
- unsigned int bCarrierMode = UNSET;
-
- if (t->std & V4L2_STD_625_50) {
- if ((1 == t->pinnacle_id) || (7 == t->pinnacle_id)) {
- bCarrierMode = cIntercarrier;
- } else {
- bCarrierMode = cQSS;
- }
- }
- if (t->std & V4L2_STD_525_60) {
- if ((5 == t->pinnacle_id) || (6 == t->pinnacle_id)) {
- bCarrierMode = cIntercarrier;
- } else {
- bCarrierMode = cQSS;
- }
- }
-
- if (bCarrierMode != UNSET) {
- buf[1] &= ~0x04;
- buf[1] |= bCarrierMode;
- }
- return 0;
-}
-
-/* ---------------------------------------------------------------------- */
+static char pal[] = "--";
+static char secam[] = "--";
+static char ntsc[] = "-";
-static char pal[] = "-";
module_param_string(pal, pal, sizeof(pal), 0644);
-static char secam[] = "-";
module_param_string(secam, secam, sizeof(secam), 0644);
+module_param_string(ntsc, ntsc, sizeof(ntsc), 0644);
static int tda9887_fixup_std(struct tda9887 *t)
{
@@ -514,8 +534,17 @@ static int tda9887_fixup_std(struct tda9887 *t)
case 'B':
case 'g':
case 'G':
- tda9887_dbg("insmod fixup: PAL => PAL-BG\n");
- t->std = V4L2_STD_PAL_BG;
+ case 'h':
+ case 'H':
+ case 'n':
+ case 'N':
+ if (pal[1] == 'c' || pal[1] == 'C') {
+ tda9887_dbg("insmod fixup: PAL => PAL-Nc\n");
+ t->std = V4L2_STD_PAL_Nc;
+ } else {
+ tda9887_dbg("insmod fixup: PAL => PAL-BGHN\n");
+ t->std = V4L2_STD_PAL_BG | V4L2_STD_PAL_H | V4L2_STD_PAL_N;
+ }
break;
case 'i':
case 'I':
@@ -529,6 +558,11 @@ static int tda9887_fixup_std(struct tda9887 *t)
tda9887_dbg("insmod fixup: PAL => PAL-DK\n");
t->std = V4L2_STD_PAL_DK;
break;
+ case 'm':
+ case 'M':
+ tda9887_dbg("insmod fixup: PAL => PAL-M\n");
+ t->std = V4L2_STD_PAL_M;
+ break;
case '-':
/* default parameter, do nothing */
break;
@@ -539,6 +573,15 @@ static int tda9887_fixup_std(struct tda9887 *t)
}
if ((t->std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
switch (secam[0]) {
+ case 'b':
+ case 'B':
+ case 'g':
+ case 'G':
+ case 'h':
+ case 'H':
+ tda9887_dbg("insmod fixup: SECAM => SECAM-BGH\n");
+ t->std = V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H;
+ break;
case 'd':
case 'D':
case 'k':
@@ -548,8 +591,13 @@ static int tda9887_fixup_std(struct tda9887 *t)
break;
case 'l':
case 'L':
- tda9887_dbg("insmod fixup: SECAM => SECAM-L\n");
- t->std = V4L2_STD_SECAM_L;
+ if (secam[1] == 'c' || secam[1] == 'C') {
+ tda9887_dbg("insmod fixup: SECAM => SECAM-L'\n");
+ t->std = V4L2_STD_SECAM_LC;
+ } else {
+ tda9887_dbg("insmod fixup: SECAM => SECAM-L\n");
+ t->std = V4L2_STD_SECAM_L;
+ }
break;
case '-':
/* default parameter, do nothing */
@@ -559,6 +607,26 @@ static int tda9887_fixup_std(struct tda9887 *t)
break;
}
}
+ if ((t->std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
+ switch (ntsc[0]) {
+ case 'm':
+ case 'M':
+ tda9887_dbg("insmod fixup: NTSC => NTSC-M\n");
+ t->std = V4L2_STD_NTSC_M;
+ break;
+ case 'j':
+ case 'J':
+ tda9887_dbg("insmod fixup: NTSC => NTSC_M_JP\n");
+ t->std = V4L2_STD_NTSC_M_JP;
+ break;
+ case '-':
+ /* default parameter, do nothing */
+ break;
+ default:
+ tda9887_info("ntsc= argument not recognised\n");
+ break;
+ }
+ }
return 0;
}
@@ -581,12 +649,22 @@ static int tda9887_configure(struct tda9887 *t)
memset(t->data,0,sizeof(t->data));
tda9887_set_tvnorm(t,t->data);
+ /* A note on the port settings:
+ These settings tend to depend on the specifics of the board.
+ By default they are set to inactive (bit value 1) by this driver,
+ overwriting any changes made by the tvnorm. This means that it
+ is the responsibility of the module using the tda9887 to set
+ these values in case of changes in the tvnorm.
+ In many cases port 2 should be made active (0) when selecting
+ SECAM-L, and port 2 should remain inactive (1) for SECAM-L'.
+
+ For the other standards the tda9887 application note says that
+ the ports should be set to active (0), but, again, that may
+ differ depending on the precise hardware configuration.
+ */
t->data[1] |= cOutputPort1Inactive;
t->data[1] |= cOutputPort2Inactive;
- if (UNSET != t->pinnacle_id) {
- tda9887_set_pinnacle(t,t->data);
- }
tda9887_set_config(t,t->data);
tda9887_set_insmod(t,t->data);
@@ -594,7 +672,6 @@ static int tda9887_configure(struct tda9887 *t)
t->data[1] |= cForcedMuteAudioON;
}
-
tda9887_dbg("writing: b=0x%02x c=0x%02x e=0x%02x\n",
t->data[1],t->data[2],t->data[3]);
if (debug > 1)
@@ -619,13 +696,11 @@ static int tda9887_attach(struct i2c_adapter *adap, int addr, int kind)
client_template.adapter = adap;
client_template.addr = addr;
- if (NULL == (t = kmalloc(sizeof(*t), GFP_KERNEL)))
+ if (NULL == (t = kzalloc(sizeof(*t), GFP_KERNEL)))
return -ENOMEM;
- memset(t,0,sizeof(*t));
t->client = client_template;
t->std = 0;
- t->pinnacle_id = UNSET;
t->radio_mode = V4L2_TUNER_MODE_STEREO;
tda9887_info("chip found @ 0x%x (%s)\n", addr<<1, adap->name);
@@ -638,18 +713,8 @@ static int tda9887_attach(struct i2c_adapter *adap, int addr, int kind)
static int tda9887_probe(struct i2c_adapter *adap)
{
-#ifdef I2C_CLASS_TV_ANALOG
if (adap->class & I2C_CLASS_TV_ANALOG)
return i2c_probe(adap, &addr_data, tda9887_attach);
-#else
- switch (adap->id) {
- case I2C_HW_B_BT848:
- case I2C_HW_B_RIVA:
- case I2C_HW_SAA7134:
- return i2c_probe(adap, &addr_data, tda9887_attach);
- break;
- }
-#endif
return 0;
}
@@ -689,14 +754,6 @@ tda9887_command(struct i2c_client *client, unsigned int cmd, void *arg)
tda9887_configure(t);
break;
}
- case AUDC_CONFIG_PINNACLE:
- {
- int *i = arg;
-
- t->pinnacle_id = *i;
- tda9887_configure(t);
- break;
- }
case TDA9887_SET_CONFIG:
{
int *i = arg;
@@ -787,7 +844,7 @@ tda9887_command(struct i2c_client *client, unsigned int cmd, void *arg)
}
case VIDIOC_LOG_STATUS:
{
- tda9887_info("Data bytes: b=%02x c=%02x e=%02x\n", t->data[1], t->data[2], t->data[3]);
+ tda9887_info("Data bytes: b=0x%02x c=0x%02x e=0x%02x\n", t->data[1], t->data[2], t->data[3]);
break;
}
default:
@@ -819,14 +876,12 @@ static int tda9887_resume(struct device * dev)
/* ----------------------------------------------------------------------- */
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "i2c tda9887 driver",
.id = -1, /* FIXME */
- .flags = I2C_DF_NOTIFY,
.attach_adapter = tda9887_probe,
.detach_client = tda9887_detach,
.command = tda9887_command,
.driver = {
+ .name = "tda9887",
.suspend = tda9887_suspend,
.resume = tda9887_resume,
},
@@ -834,7 +889,6 @@ static struct i2c_driver driver = {
static struct i2c_client client_template =
{
.name = "tda9887",
- .flags = I2C_CLIENT_ALLOW_USE,
.driver = &driver,
};
diff --git a/drivers/media/video/tea5767.c b/drivers/media/video/tea5767.c
index a9375ef05de..921fe72f23d 100644
--- a/drivers/media/video/tea5767.c
+++ b/drivers/media/video/tea5767.c
@@ -17,6 +17,9 @@
#define PREFIX "TEA5767 "
+/* from tuner-core.c */
+extern int tuner_debug;
+
/*****************************************************************************/
/******************************
@@ -264,7 +267,7 @@ static int tea5767_signal(struct i2c_client *c)
if (5 != (rc = i2c_master_recv(c, buffer, 5)))
tuner_warn("i2c i/o error: rc == %d (should be 5)\n", rc);
- return ((buffer[3] & TEA5767_ADC_LEVEL_MASK) << (13 - 4));
+ return ((buffer[3] & TEA5767_ADC_LEVEL_MASK) << 8);
}
static int tea5767_stereo(struct i2c_client *c)
diff --git a/drivers/media/video/tea6415c.c b/drivers/media/video/tea6415c.c
index ee3688348b6..bb35844e384 100644
--- a/drivers/media/video/tea6415c.c
+++ b/drivers/media/video/tea6415c.c
@@ -36,7 +36,7 @@ static int debug = 0; /* insmod parameter */
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off device debugging (default:off).");
#define dprintk(args...) \
- do { if (debug) { printk("%s: %s()[%d]: ",__stringify(KBUILD_MODNAME), __FUNCTION__, __LINE__); printk(args); } } while (0)
+ do { if (debug) { printk("%s: %s()[%d]: ", KBUILD_MODNAME, __FUNCTION__, __LINE__); printk(args); } } while (0)
#define TEA6415C_NUM_INPUTS 8
#define TEA6415C_NUM_OUTPUTS 6
@@ -190,10 +190,10 @@ static int command(struct i2c_client *client, unsigned int cmd, void *arg)
}
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "tea6415c",
+ .driver = {
+ .name = "tea6415c",
+ },
.id = I2C_DRIVERID_TEA6415C,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = attach,
.detach_client = detach,
.command = command,
diff --git a/drivers/media/video/tea6420.c b/drivers/media/video/tea6420.c
index 17975c19da5..4dcba5a4fff 100644
--- a/drivers/media/video/tea6420.c
+++ b/drivers/media/video/tea6420.c
@@ -36,7 +36,7 @@ static int debug = 0; /* insmod parameter */
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off device debugging (default:off).");
#define dprintk(args...) \
- do { if (debug) { printk("%s: %s()[%d]: ",__stringify(KBUILD_MODNAME), __FUNCTION__, __LINE__); printk(args); } } while (0)
+ do { if (debug) { printk("%s: %s()[%d]: ", KBUILD_MODNAME, __FUNCTION__, __LINE__); printk(args); } } while (0)
/* addresses to scan, found only at 0x4c and/or 0x4d (7-Bit) */
static unsigned short normal_i2c[] = { I2C_TEA6420_1, I2C_TEA6420_2, I2C_CLIENT_END };
@@ -99,11 +99,10 @@ static int tea6420_detect(struct i2c_adapter *adapter, int address, int kind)
}
/* allocate memory for client structure */
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (0 == client) {
return -ENOMEM;
}
- memset(client, 0x0, sizeof(struct i2c_client));
/* fill client structure */
memcpy(client, &client_template, sizeof(struct i2c_client));
@@ -167,10 +166,10 @@ static int command(struct i2c_client *client, unsigned int cmd, void *arg)
}
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "tea6420",
+ .driver = {
+ .name = "tea6420",
+ },
.id = I2C_DRIVERID_TEA6420,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = attach,
.detach_client = detach,
.command = command,
diff --git a/drivers/media/video/tuner-3036.c b/drivers/media/video/tuner-3036.c
index 79203595b9c..c4a78e7a5a5 100644
--- a/drivers/media/video/tuner-3036.c
+++ b/drivers/media/video/tuner-3036.c
@@ -175,10 +175,10 @@ tuner_probe(struct i2c_adapter *adap)
static struct i2c_driver
i2c_driver_tuner =
{
- .owner = THIS_MODULE,
- .name = "sab3036",
+ .driver = {
+ .name = "sab3036",
+ },
.id = I2C_DRIVERID_SAB3036,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = tuner_probe,
.detach_client = tuner_detach,
.command = tuner_command
@@ -193,8 +193,7 @@ static struct i2c_client client_template =
static int __init
tuner3036_init(void)
{
- i2c_add_driver(&i2c_driver_tuner);
- return 0;
+ return i2c_add_driver(&i2c_driver_tuner);
}
static void __exit
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index e58abdfcaab..f30ef79d795 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -20,10 +20,9 @@
#include <linux/init.h>
#include <media/tuner.h>
+#include <media/v4l2-common.h>
#include <media/audiochip.h>
-#include "msp3400.h"
-
#define UNSET (-1U)
/* standard i2c insmod options */
@@ -38,21 +37,30 @@ I2C_CLIENT_INSMOD;
/* insmod options used at init time => read/only */
static unsigned int addr = 0;
-module_param(addr, int, 0444);
-
static unsigned int no_autodetect = 0;
-module_param(no_autodetect, int, 0444);
-
static unsigned int show_i2c = 0;
-module_param(show_i2c, int, 0444);
/* insmod options used at runtime => read/write */
-unsigned int tuner_debug = 0;
-module_param(tuner_debug, int, 0644);
+static unsigned int tuner_debug_old = 0;
+int tuner_debug = 0;
static unsigned int tv_range[2] = { 44, 958 };
static unsigned int radio_range[2] = { 65, 108 };
+static char pal[] = "--";
+static char secam[] = "--";
+static char ntsc[] = "-";
+
+
+module_param(addr, int, 0444);
+module_param(no_autodetect, int, 0444);
+module_param(show_i2c, int, 0444);
+/* Note: tuner_debug is deprecated and will be removed in 2.6.17 */
+module_param_named(tuner_debug,tuner_debug_old, int, 0444);
+module_param_named(debug,tuner_debug, int, 0644);
+module_param_string(pal, pal, sizeof(pal), 0644);
+module_param_string(secam, secam, sizeof(secam), 0644);
+module_param_string(ntsc, ntsc, sizeof(ntsc), 0644);
module_param_array(tv_range, int, NULL, 0644);
module_param_array(radio_range, int, NULL, 0644);
@@ -206,7 +214,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
set_freq(c, t->freq);
tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
- c->adapter->name, c->driver->name, c->addr << 1, type,
+ c->adapter->name, c->driver->driver.name, c->addr << 1, type,
t->mode_mask);
}
@@ -249,11 +257,6 @@ static inline int check_mode(struct tuner *t, char *cmd)
return 0;
}
-static char pal[] = "-";
-module_param_string(pal, pal, sizeof(pal), 0644);
-static char secam[] = "--";
-module_param_string(secam, secam, sizeof(secam), 0644);
-
/* get more precise norm info from insmod option */
static int tuner_fixup_std(struct tuner *t)
{
@@ -285,8 +288,13 @@ static int tuner_fixup_std(struct tuner *t)
break;
case 'N':
case 'n':
- tuner_dbg ("insmod fixup: PAL => PAL-N\n");
- t->std = V4L2_STD_PAL_N;
+ if (pal[1] == 'c' || pal[1] == 'C') {
+ tuner_dbg("insmod fixup: PAL => PAL-Nc\n");
+ t->std = V4L2_STD_PAL_Nc;
+ } else {
+ tuner_dbg ("insmod fixup: PAL => PAL-N\n");
+ t->std = V4L2_STD_PAL_N;
+ }
break;
case '-':
/* default parameter, do nothing */
@@ -298,6 +306,15 @@ static int tuner_fixup_std(struct tuner *t)
}
if ((t->std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
switch (secam[0]) {
+ case 'b':
+ case 'B':
+ case 'g':
+ case 'G':
+ case 'h':
+ case 'H':
+ tuner_dbg("insmod fixup: SECAM => SECAM-BGH\n");
+ t->std = V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H;
+ break;
case 'd':
case 'D':
case 'k':
@@ -324,9 +341,60 @@ static int tuner_fixup_std(struct tuner *t)
}
}
+ if ((t->std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
+ switch (ntsc[0]) {
+ case 'm':
+ case 'M':
+ tuner_dbg("insmod fixup: NTSC => NTSC-M\n");
+ t->std = V4L2_STD_NTSC_M;
+ break;
+ case 'j':
+ case 'J':
+ tuner_dbg("insmod fixup: NTSC => NTSC_M_JP\n");
+ t->std = V4L2_STD_NTSC_M_JP;
+ break;
+ case '-':
+ /* default parameter, do nothing */
+ break;
+ default:
+ tuner_info("ntsc= argument not recognised\n");
+ break;
+ }
+ }
return 0;
}
+static void tuner_status(struct i2c_client *client)
+{
+ struct tuner *t = i2c_get_clientdata(client);
+ unsigned long freq, freq_fraction;
+ const char *p;
+
+ switch (t->mode) {
+ case V4L2_TUNER_RADIO: p = "radio"; break;
+ case V4L2_TUNER_ANALOG_TV: p = "analog TV"; break;
+ case V4L2_TUNER_DIGITAL_TV: p = "digital TV"; break;
+ default: p = "undefined"; break;
+ }
+ if (t->mode == V4L2_TUNER_RADIO) {
+ freq = t->freq / 16000;
+ freq_fraction = (t->freq % 16000) * 100 / 16000;
+ } else {
+ freq = t->freq / 16;
+ freq_fraction = (t->freq % 16) * 100 / 16;
+ }
+ tuner_info("Tuner mode: %s\n", p);
+ tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction);
+ tuner_info("Standard: 0x%08llx\n", t->std);
+ if (t->mode == V4L2_TUNER_RADIO) {
+ if (t->has_signal) {
+ tuner_info("Signal strength: %d\n", t->has_signal(client));
+ }
+ if (t->is_stereo) {
+ tuner_info("Stereo: %s\n", t->is_stereo(client) ? "yes" : "no");
+ }
+ }
+}
/* ---------------------------------------------------------------------- */
/* static var Used only in tuner_attach and tuner_probe */
@@ -342,16 +410,20 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind)
client_template.adapter = adap;
client_template.addr = addr;
- t = kmalloc(sizeof(struct tuner), GFP_KERNEL);
+ t = kzalloc(sizeof(struct tuner), GFP_KERNEL);
if (NULL == t)
return -ENOMEM;
- memset(t, 0, sizeof(struct tuner));
memcpy(&t->i2c, &client_template, sizeof(struct i2c_client));
i2c_set_clientdata(&t->i2c, t);
t->type = UNSET;
t->radio_if2 = 10700 * 1000; /* 10.7MHz - FM radio */
t->audmode = V4L2_TUNER_MODE_STEREO;
t->mode_mask = T_UNINITIALIZED;
+ if (tuner_debug_old) {
+ tuner_debug = tuner_debug_old;
+ printk(KERN_ERR "tuner: tuner_debug is deprecated and will be removed in 2.6.17.\n");
+ printk(KERN_ERR "tuner: use the debug option instead.\n");
+ }
if (show_i2c) {
unsigned char buffer[16];
@@ -478,7 +550,9 @@ static inline int check_v4l2(struct tuner *t)
static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
{
struct tuner *t = i2c_get_clientdata(client);
- unsigned int *iarg = (int *)arg;
+
+ if (tuner_debug>1)
+ v4l_i2c_print_ioctl(&(t->i2c),cmd);
switch (cmd) {
/* --- configuration --- */
@@ -501,18 +575,6 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
t->standby (client);
break;
}
- case AUDC_CONFIG_PINNACLE:
- switch (*iarg) {
- case 2:
- tuner_dbg("pinnacle pal\n");
- t->radio_if2 = 33300 * 1000;
- break;
- case 3:
- tuner_dbg("pinnacle ntsc\n");
- t->radio_if2 = 41300 * 1000;
- break;
- }
- break;
case VIDIOCSAUDIO:
if (check_mode(t, "VIDIOCSAUDIO") == EINVAL)
return 0;
@@ -523,9 +585,6 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
tuner_dbg("VIDIOCSAUDIO not implemented.\n");
break;
- case MSP_SET_MATRIX:
- case TDA9887_SET_CONFIG:
- break;
/* --- v4l ioctls --- */
/* take care: bttv does userspace copying, we'll get a
kernel pointer here... */
@@ -708,10 +767,8 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
}
break;
}
- default:
- tuner_dbg("Unimplemented IOCTL 0x%08x(dir=%d,tp='%c',nr=%d,sz=%d)\n",
- cmd, _IOC_DIR(cmd), _IOC_TYPE(cmd),
- _IOC_NR(cmd), _IOC_SIZE(cmd));
+ case VIDIOC_LOG_STATUS:
+ tuner_status(client);
break;
}
@@ -742,21 +799,18 @@ static int tuner_resume(struct device *dev)
/* ----------------------------------------------------------------------- */
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "tuner",
.id = I2C_DRIVERID_TUNER,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = tuner_probe,
.detach_client = tuner_detach,
.command = tuner_command,
.driver = {
- .suspend = tuner_suspend,
- .resume = tuner_resume,
- },
+ .name = "tuner",
+ .suspend = tuner_suspend,
+ .resume = tuner_resume,
+ },
};
static struct i2c_client client_template = {
.name = "(tuner unset)",
- .flags = I2C_CLIENT_ALLOW_USE,
.driver = &driver,
};
diff --git a/drivers/media/video/tuner-simple.c b/drivers/media/video/tuner-simple.c
index e0c9fdb9914..e5fb7436583 100644
--- a/drivers/media/video/tuner-simple.c
+++ b/drivers/media/video/tuner-simple.c
@@ -8,6 +8,10 @@
#include <linux/videodev.h>
#include <media/tuner.h>
+static int offset = 0;
+module_param(offset, int, 0666);
+MODULE_PARM_DESC(offset,"Allows to specify an offset for tuner");
+
/* ---------------------------------------------------------------------- */
/* tv standard selection for Temic 4046 FM5
@@ -75,24 +79,20 @@
#define TUNER_PLL_LOCKED 0x40
#define TUNER_STEREO_MK3 0x04
+#define TUNER_MAX_RANGES 3
+
/* ---------------------------------------------------------------------- */
struct tunertype
{
char *name;
- unsigned char Vendor;
- unsigned char Type;
-
- unsigned short thresh1; /* band switch VHF_LO <=> VHF_HI */
- unsigned short thresh2; /* band switch VHF_HI <=> UHF */
- unsigned char VHF_L;
- unsigned char VHF_H;
- unsigned char UHF;
+
+ int count;
+ struct {
+ unsigned short thresh;
+ unsigned char cb;
+ } ranges[TUNER_MAX_RANGES];
unsigned char config;
- unsigned short IFPCoff; /* 622.4=16*38.90 MHz PAL,
- 732 =16*45.75 NTSCi,
- 940 =16*58.75 NTSC-Japan
- 704 =16*44 ATSC */
};
/*
@@ -102,158 +102,696 @@ struct tunertype
*/
static struct tunertype tuners[] = {
/* 0-9 */
- { "Temic PAL (4002 FH5)", TEMIC, PAL,
- 16*140.25,16*463.25,0x02,0x04,0x01,0x8e,623},
- { "Philips PAL_I (FI1246 and compatibles)", Philips, PAL_I,
- 16*140.25,16*463.25,0xa0,0x90,0x30,0x8e,623},
- { "Philips NTSC (FI1236,FM1236 and compatibles)", Philips, NTSC,
- 16*157.25,16*451.25,0xA0,0x90,0x30,0x8e,732},
- { "Philips (SECAM+PAL_BG) (FI1216MF, FM1216MF, FR1216MF)", Philips, SECAM,
- 16*168.25,16*447.25,0xA7,0x97,0x37,0x8e,623},
- { "NoTuner", NoTuner, NOTUNER,
- 0,0,0x00,0x00,0x00,0x00,0x00},
- { "Philips PAL_BG (FI1216 and compatibles)", Philips, PAL,
- 16*168.25,16*447.25,0xA0,0x90,0x30,0x8e,623},
- { "Temic NTSC (4032 FY5)", TEMIC, NTSC,
- 16*157.25,16*463.25,0x02,0x04,0x01,0x8e,732},
- { "Temic PAL_I (4062 FY5)", TEMIC, PAL_I,
- 16*170.00,16*450.00,0x02,0x04,0x01,0x8e,623},
- { "Temic NTSC (4036 FY5)", TEMIC, NTSC,
- 16*157.25,16*463.25,0xa0,0x90,0x30,0x8e,732},
- { "Alps HSBH1", TEMIC, NTSC,
- 16*137.25,16*385.25,0x01,0x02,0x08,0x8e,732},
+ [TUNER_TEMIC_PAL] = { /* TEMIC PAL */
+ .name = "Temic PAL (4002 FH5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 140.25 /*MHz*/, 0x02, },
+ { 16 * 463.25 /*MHz*/, 0x04, },
+ { 16 * 999.99 , 0x01, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_PAL_I] = { /* Philips PAL_I */
+ .name = "Philips PAL_I (FI1246 and compatibles)",
+ .count = 3,
+ .ranges = {
+ { 16 * 140.25 /*MHz*/, 0xa0, },
+ { 16 * 463.25 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_NTSC] = { /* Philips NTSC */
+ .name = "Philips NTSC (FI1236,FM1236 and compatibles)",
+ .count = 3,
+ .ranges = {
+ { 16 * 157.25 /*MHz*/, 0xa0, },
+ { 16 * 451.25 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_SECAM] = { /* Philips SECAM */
+ .name = "Philips (SECAM+PAL_BG) (FI1216MF, FM1216MF, FR1216MF)",
+ .count = 3,
+ .ranges = {
+ { 16 * 168.25 /*MHz*/, 0xa7, },
+ { 16 * 447.25 /*MHz*/, 0x97, },
+ { 16 * 999.99 , 0x37, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_ABSENT] = { /* Tuner Absent */
+ .name = "NoTuner",
+ .count = 1,
+ .ranges = {
+ { 0, 0x00, },
+ },
+ .config = 0x00,
+ },
+ [TUNER_PHILIPS_PAL] = { /* Philips PAL */
+ .name = "Philips PAL_BG (FI1216 and compatibles)",
+ .count = 3,
+ .ranges = {
+ { 16 * 168.25 /*MHz*/, 0xa0, },
+ { 16 * 447.25 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEMIC_NTSC] = { /* TEMIC NTSC */
+ .name = "Temic NTSC (4032 FY5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 157.25 /*MHz*/, 0x02, },
+ { 16 * 463.25 /*MHz*/, 0x04, },
+ { 16 * 999.99 , 0x01, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEMIC_PAL_I] = { /* TEMIC PAL_I */
+ .name = "Temic PAL_I (4062 FY5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0x02, },
+ { 16 * 450.00 /*MHz*/, 0x04, },
+ { 16 * 999.99 , 0x01, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEMIC_4036FY5_NTSC] = { /* TEMIC NTSC */
+ .name = "Temic NTSC (4036 FY5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 157.25 /*MHz*/, 0xa0, },
+ { 16 * 463.25 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_ALPS_TSBH1_NTSC] = { /* TEMIC NTSC */
+ .name = "Alps HSBH1",
+ .count = 3,
+ .ranges = {
+ { 16 * 137.25 /*MHz*/, 0x01, },
+ { 16 * 385.25 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
/* 10-19 */
- { "Alps TSBE1", TEMIC, PAL,
- 16*137.25,16*385.25,0x01,0x02,0x08,0x8e,732},
- { "Alps TSBB5", Alps, PAL_I, /* tested (UK UHF) with Modulartech MM205 */
- 16*133.25,16*351.25,0x01,0x02,0x08,0x8e,632},
- { "Alps TSBE5", Alps, PAL, /* untested - data sheet guess. Only IF differs. */
- 16*133.25,16*351.25,0x01,0x02,0x08,0x8e,622},
- { "Alps TSBC5", Alps, PAL, /* untested - data sheet guess. Only IF differs. */
- 16*133.25,16*351.25,0x01,0x02,0x08,0x8e,608},
- { "Temic PAL_BG (4006FH5)", TEMIC, PAL,
- 16*170.00,16*450.00,0xa0,0x90,0x30,0x8e,623},
- { "Alps TSCH6", Alps, NTSC,
- 16*137.25,16*385.25,0x14,0x12,0x11,0x8e,732},
- { "Temic PAL_DK (4016 FY5)", TEMIC, PAL,
- 16*168.25,16*456.25,0xa0,0x90,0x30,0x8e,623},
- { "Philips NTSC_M (MK2)", Philips, NTSC,
- 16*160.00,16*454.00,0xa0,0x90,0x30,0x8e,732},
- { "Temic PAL_I (4066 FY5)", TEMIC, PAL_I,
- 16*169.00, 16*454.00, 0xa0,0x90,0x30,0x8e,623},
- { "Temic PAL* auto (4006 FN5)", TEMIC, PAL,
- 16*169.00, 16*454.00, 0xa0,0x90,0x30,0x8e,623},
+ [TUNER_ALPS_TSBE1_PAL] = { /* TEMIC PAL */
+ .name = "Alps TSBE1",
+ .count = 3,
+ .ranges = {
+ { 16 * 137.25 /*MHz*/, 0x01, },
+ { 16 * 385.25 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_ALPS_TSBB5_PAL_I] = { /* Alps PAL_I */
+ .name = "Alps TSBB5",
+ .count = 3,
+ .ranges = {
+ { 16 * 133.25 /*MHz*/, 0x01, },
+ { 16 * 351.25 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_ALPS_TSBE5_PAL] = { /* Alps PAL */
+ .name = "Alps TSBE5",
+ .count = 3,
+ .ranges = {
+ { 16 * 133.25 /*MHz*/, 0x01, },
+ { 16 * 351.25 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_ALPS_TSBC5_PAL] = { /* Alps PAL */
+ .name = "Alps TSBC5",
+ .count = 3,
+ .ranges = {
+ { 16 * 133.25 /*MHz*/, 0x01, },
+ { 16 * 351.25 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEMIC_4006FH5_PAL] = { /* TEMIC PAL */
+ .name = "Temic PAL_BG (4006FH5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0xa0, },
+ { 16 * 450.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_ALPS_TSHC6_NTSC] = { /* Alps NTSC */
+ .name = "Alps TSCH6",
+ .count = 3,
+ .ranges = {
+ { 16 * 137.25 /*MHz*/, 0x14, },
+ { 16 * 385.25 /*MHz*/, 0x12, },
+ { 16 * 999.99 , 0x11, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEMIC_PAL_DK] = { /* TEMIC PAL */
+ .name = "Temic PAL_DK (4016 FY5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 168.25 /*MHz*/, 0xa0, },
+ { 16 * 456.25 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_NTSC_M] = { /* Philips NTSC */
+ .name = "Philips NTSC_M (MK2)",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0xa0, },
+ { 16 * 454.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEMIC_4066FY5_PAL_I] = { /* TEMIC PAL_I */
+ .name = "Temic PAL_I (4066 FY5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 169.00 /*MHz*/, 0xa0, },
+ { 16 * 454.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEMIC_4006FN5_MULTI_PAL] = { /* TEMIC PAL */
+ .name = "Temic PAL* auto (4006 FN5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 169.00 /*MHz*/, 0xa0, },
+ { 16 * 454.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
/* 20-29 */
- { "Temic PAL_BG (4009 FR5) or PAL_I (4069 FR5)", TEMIC, PAL,
- 16*141.00, 16*464.00, 0xa0,0x90,0x30,0x8e,623},
- { "Temic NTSC (4039 FR5)", TEMIC, NTSC,
- 16*158.00, 16*453.00, 0xa0,0x90,0x30,0x8e,732},
- { "Temic PAL/SECAM multi (4046 FM5)", TEMIC, PAL,
- 16*169.00, 16*454.00, 0xa0,0x90,0x30,0x8e,623},
- { "Philips PAL_DK (FI1256 and compatibles)", Philips, PAL,
- 16*170.00,16*450.00,0xa0,0x90,0x30,0x8e,623},
- { "Philips PAL/SECAM multi (FQ1216ME)", Philips, PAL,
- 16*170.00,16*450.00,0xa0,0x90,0x30,0x8e,623},
- { "LG PAL_I+FM (TAPC-I001D)", LGINNOTEK, PAL_I,
- 16*170.00,16*450.00,0xa0,0x90,0x30,0x8e,623},
- { "LG PAL_I (TAPC-I701D)", LGINNOTEK, PAL_I,
- 16*170.00,16*450.00,0xa0,0x90,0x30,0x8e,623},
- { "LG NTSC+FM (TPI8NSR01F)", LGINNOTEK, NTSC,
- 16*210.00,16*497.00,0xa0,0x90,0x30,0x8e,732},
- { "LG PAL_BG+FM (TPI8PSB01D)", LGINNOTEK, PAL,
- 16*170.00,16*450.00,0xa0,0x90,0x30,0x8e,623},
- { "LG PAL_BG (TPI8PSB11D)", LGINNOTEK, PAL,
- 16*170.00,16*450.00,0xa0,0x90,0x30,0x8e,623},
+ [TUNER_TEMIC_4009FR5_PAL] = { /* TEMIC PAL */
+ .name = "Temic PAL_BG (4009 FR5) or PAL_I (4069 FR5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 141.00 /*MHz*/, 0xa0, },
+ { 16 * 464.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEMIC_4039FR5_NTSC] = { /* TEMIC NTSC */
+ .name = "Temic NTSC (4039 FR5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 158.00 /*MHz*/, 0xa0, },
+ { 16 * 453.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEMIC_4046FM5] = { /* TEMIC PAL */
+ .name = "Temic PAL/SECAM multi (4046 FM5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 169.00 /*MHz*/, 0xa0, },
+ { 16 * 454.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_PAL_DK] = { /* Philips PAL */
+ .name = "Philips PAL_DK (FI1256 and compatibles)",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0xa0, },
+ { 16 * 450.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_FQ1216ME] = { /* Philips PAL */
+ .name = "Philips PAL/SECAM multi (FQ1216ME)",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0xa0, },
+ { 16 * 450.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_LG_PAL_I_FM] = { /* LGINNOTEK PAL_I */
+ .name = "LG PAL_I+FM (TAPC-I001D)",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0xa0, },
+ { 16 * 450.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_LG_PAL_I] = { /* LGINNOTEK PAL_I */
+ .name = "LG PAL_I (TAPC-I701D)",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0xa0, },
+ { 16 * 450.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_LG_NTSC_FM] = { /* LGINNOTEK NTSC */
+ .name = "LG NTSC+FM (TPI8NSR01F)",
+ .count = 3,
+ .ranges = {
+ { 16 * 210.00 /*MHz*/, 0xa0, },
+ { 16 * 497.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_LG_PAL_FM] = { /* LGINNOTEK PAL */
+ .name = "LG PAL_BG+FM (TPI8PSB01D)",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0xa0, },
+ { 16 * 450.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_LG_PAL] = { /* LGINNOTEK PAL */
+ .name = "LG PAL_BG (TPI8PSB11D)",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0xa0, },
+ { 16 * 450.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
/* 30-39 */
- { "Temic PAL* auto + FM (4009 FN5)", TEMIC, PAL,
- 16*141.00, 16*464.00, 0xa0,0x90,0x30,0x8e,623},
- { "SHARP NTSC_JP (2U5JF5540)", SHARP, NTSC, /* 940=16*58.75 NTSC@Japan */
- 16*137.25,16*317.25,0x01,0x02,0x08,0x8e,940 },
- { "Samsung PAL TCPM9091PD27", Samsung, PAL, /* from sourceforge v3tv */
- 16*169,16*464,0xA0,0x90,0x30,0x8e,623},
- { "MT20xx universal", Microtune, PAL|NTSC,
+ [TUNER_TEMIC_4009FN5_MULTI_PAL_FM] = { /* TEMIC PAL */
+ .name = "Temic PAL* auto + FM (4009 FN5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 141.00 /*MHz*/, 0xa0, },
+ { 16 * 464.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_SHARP_2U5JF5540_NTSC] = { /* SHARP NTSC */
+ .name = "SHARP NTSC_JP (2U5JF5540)",
+ .count = 3,
+ .ranges = {
+ { 16 * 137.25 /*MHz*/, 0x01, },
+ { 16 * 317.25 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_Samsung_PAL_TCPM9091PD27] = { /* Samsung PAL */
+ .name = "Samsung PAL TCPM9091PD27",
+ .count = 3,
+ .ranges = {
+ { 16 * 169 /*MHz*/, 0xa0, },
+ { 16 * 464 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_MT2032] = { /* Microtune PAL|NTSC */
+ .name = "MT20xx universal",
/* see mt20xx.c for details */ },
- { "Temic PAL_BG (4106 FH5)", TEMIC, PAL,
- 16*141.00, 16*464.00, 0xa0,0x90,0x30,0x8e,623},
- { "Temic PAL_DK/SECAM_L (4012 FY5)", TEMIC, PAL,
- 16*140.25, 16*463.25, 0x02,0x04,0x01,0x8e,623},
- { "Temic NTSC (4136 FY5)", TEMIC, NTSC,
- 16*158.00, 16*453.00, 0xa0,0x90,0x30,0x8e,732},
- { "LG PAL (newer TAPC series)", LGINNOTEK, PAL,
- 16*170.00, 16*450.00, 0x01,0x02,0x08,0x8e,623},
- { "Philips PAL/SECAM multi (FM1216ME MK3)", Philips, PAL,
- 16*158.00,16*442.00,0x01,0x02,0x04,0x8e,623 },
- { "LG NTSC (newer TAPC series)", LGINNOTEK, NTSC,
- 16*170.00, 16*450.00, 0x01,0x02,0x08,0x8e,732},
+ [TUNER_TEMIC_4106FH5] = { /* TEMIC PAL */
+ .name = "Temic PAL_BG (4106 FH5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 141.00 /*MHz*/, 0xa0, },
+ { 16 * 464.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEMIC_4012FY5] = { /* TEMIC PAL */
+ .name = "Temic PAL_DK/SECAM_L (4012 FY5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 140.25 /*MHz*/, 0x02, },
+ { 16 * 463.25 /*MHz*/, 0x04, },
+ { 16 * 999.99 , 0x01, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEMIC_4136FY5] = { /* TEMIC NTSC */
+ .name = "Temic NTSC (4136 FY5)",
+ .count = 3,
+ .ranges = {
+ { 16 * 158.00 /*MHz*/, 0xa0, },
+ { 16 * 453.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_LG_PAL_NEW_TAPC] = { /* LGINNOTEK PAL */
+ .name = "LG PAL (newer TAPC series)",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0x01, },
+ { 16 * 450.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_FM1216ME_MK3] = { /* Philips PAL */
+ .name = "Philips PAL/SECAM multi (FM1216ME MK3)",
+ .count = 3,
+ .ranges = {
+ { 16 * 158.00 /*MHz*/, 0x01, },
+ { 16 * 442.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_LG_NTSC_NEW_TAPC] = { /* LGINNOTEK NTSC */
+ .name = "LG NTSC (newer TAPC series)",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0x01, },
+ { 16 * 450.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
/* 40-49 */
- { "HITACHI V7-J180AT", HITACHI, NTSC,
- 16*170.00, 16*450.00, 0x01,0x02,0x08,0x8e,940 },
- { "Philips PAL_MK (FI1216 MK)", Philips, PAL,
- 16*140.25,16*463.25,0x01,0xc2,0xcf,0x8e,623},
- { "Philips 1236D ATSC/NTSC daul in", Philips, ATSC,
- 16*157.25,16*454.00,0xa0,0x90,0x30,0x8e,732},
- { "Philips NTSC MK3 (FM1236MK3 or FM1236/F)", Philips, NTSC,
- 16*160.00,16*442.00,0x01,0x02,0x04,0x8e,732},
- { "Philips 4 in 1 (ATI TV Wonder Pro/Conexant)", Philips, NTSC,
- 16*160.00,16*442.00,0x01,0x02,0x04,0x8e,732},
- { "Microtune 4049 FM5", Microtune, PAL,
- 16*141.00,16*464.00,0xa0,0x90,0x30,0x8e,623},
- { "Panasonic VP27s/ENGE4324D", Panasonic, NTSC,
- 16*160.00,16*454.00,0x01,0x02,0x08,0xce,940},
- { "LG NTSC (TAPE series)", LGINNOTEK, NTSC,
- 16*160.00,16*442.00,0x01,0x02,0x04,0x8e,732 },
- { "Tenna TNF 8831 BGFF)", Philips, PAL,
- 16*161.25,16*463.25,0xa0,0x90,0x30,0x8e,623},
- { "Microtune 4042 FI5 ATSC/NTSC dual in", Microtune, NTSC,
- 16*162.00,16*457.00,0xa2,0x94,0x31,0x8e,732},
+ [TUNER_HITACHI_NTSC] = { /* HITACHI NTSC */
+ .name = "HITACHI V7-J180AT",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0x01, },
+ { 16 * 450.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_PAL_MK] = { /* Philips PAL */
+ .name = "Philips PAL_MK (FI1216 MK)",
+ .count = 3,
+ .ranges = {
+ { 16 * 140.25 /*MHz*/, 0x01, },
+ { 16 * 463.25 /*MHz*/, 0xc2, },
+ { 16 * 999.99 , 0xcf, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_ATSC] = { /* Philips ATSC */
+ .name = "Philips 1236D ATSC/NTSC dual in",
+ .count = 3,
+ .ranges = {
+ { 16 * 157.25 /*MHz*/, 0xa0, },
+ { 16 * 454.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_FM1236_MK3] = { /* Philips NTSC */
+ .name = "Philips NTSC MK3 (FM1236MK3 or FM1236/F)",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0x01, },
+ { 16 * 442.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_4IN1] = { /* Philips NTSC */
+ .name = "Philips 4 in 1 (ATI TV Wonder Pro/Conexant)",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0x01, },
+ { 16 * 442.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_MICROTUNE_4049FM5] = { /* Microtune PAL */
+ .name = "Microtune 4049 FM5",
+ .count = 3,
+ .ranges = {
+ { 16 * 141.00 /*MHz*/, 0xa0, },
+ { 16 * 464.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PANASONIC_VP27] = { /* Panasonic NTSC */
+ .name = "Panasonic VP27s/ENGE4324D",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0x01, },
+ { 16 * 454.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0xce,
+ },
+ [TUNER_LG_NTSC_TAPE] = { /* LGINNOTEK NTSC */
+ .name = "LG NTSC (TAPE series)",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0x01, },
+ { 16 * 442.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TNF_8831BGFF] = { /* Philips PAL */
+ .name = "Tenna TNF 8831 BGFF)",
+ .count = 3,
+ .ranges = {
+ { 16 * 161.25 /*MHz*/, 0xa0, },
+ { 16 * 463.25 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_MICROTUNE_4042FI5] = { /* Microtune NTSC */
+ .name = "Microtune 4042 FI5 ATSC/NTSC dual in",
+ .count = 3,
+ .ranges = {
+ { 16 * 162.00 /*MHz*/, 0xa2, },
+ { 16 * 457.00 /*MHz*/, 0x94, },
+ { 16 * 999.99 , 0x31, },
+ },
+ .config = 0x8e,
+ },
/* 50-59 */
- { "TCL 2002N", TCL, NTSC,
- 16*172.00,16*448.00,0x01,0x02,0x08,0x8e,732},
- { "Philips PAL/SECAM_D (FM 1256 I-H3)", Philips, PAL,
- 16*160.00,16*442.00,0x01,0x02,0x04,0x8e,623 },
- { "Thomson DDT 7610 (ATSC/NTSC)", THOMSON, ATSC,
- 16*157.25,16*454.00,0x39,0x3a,0x3c,0x8e,732},
- { "Philips FQ1286", Philips, NTSC,
- 16*160.00,16*454.00,0x41,0x42,0x04,0x8e,940}, /* UHF band untested */
- { "tda8290+75", Philips, PAL|NTSC,
+ [TUNER_TCL_2002N] = { /* TCL NTSC */
+ .name = "TCL 2002N",
+ .count = 3,
+ .ranges = {
+ { 16 * 172.00 /*MHz*/, 0x01, },
+ { 16 * 448.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_FM1256_IH3] = { /* Philips PAL */
+ .name = "Philips PAL/SECAM_D (FM 1256 I-H3)",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0x01, },
+ { 16 * 442.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_THOMSON_DTT7610] = { /* THOMSON ATSC */
+ .name = "Thomson DTT 7610 (ATSC/NTSC)",
+ .count = 3,
+ .ranges = {
+ { 16 * 157.25 /*MHz*/, 0x39, },
+ { 16 * 454.00 /*MHz*/, 0x3a, },
+ { 16 * 999.99 , 0x3c, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_FQ1286] = { /* Philips NTSC */
+ .name = "Philips FQ1286",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0x41, },
+ { 16 * 454.00 /*MHz*/, 0x42, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_TDA8290] = { /* Philips PAL|NTSC */
+ .name = "tda8290+75",
/* see tda8290.c for details */ },
- { "TCL 2002MB", TCL, PAL,
- 16*170.00, 16*450.00, 0x01,0x02,0x08,0xce,623},
- { "Philips PAL/SECAM multi (FQ1216AME MK4)", Philips, PAL,
- 16*160.00,16*442.00,0x01,0x02,0x04,0xce,623 },
- { "Philips FQ1236A MK4", Philips, NTSC,
- 16*160.00,16*442.00,0x01,0x02,0x04,0x8e,732 },
- { "Ymec TVision TVF-8531MF/8831MF/8731MF", Philips, NTSC,
- 16*160.00,16*454.00,0xa0,0x90,0x30,0x8e,732},
- { "Ymec TVision TVF-5533MF", Philips, NTSC,
- 16*160.00,16*454.00,0x01,0x02,0x04,0x8e,732},
+ [TUNER_TCL_2002MB] = { /* TCL PAL */
+ .name = "TCL 2002MB",
+ .count = 3,
+ .ranges = {
+ { 16 * 170.00 /*MHz*/, 0x01, },
+ { 16 * 450.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0xce,
+ },
+ [TUNER_PHILIPS_FQ1216AME_MK4] = { /* Philips PAL */
+ .name = "Philips PAL/SECAM multi (FQ1216AME MK4)",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0x01, },
+ { 16 * 442.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0xce,
+ },
+ [TUNER_PHILIPS_FQ1236A_MK4] = { /* Philips NTSC */
+ .name = "Philips FQ1236A MK4",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0x01, },
+ { 16 * 442.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_YMEC_TVF_8531MF] = { /* Philips NTSC */
+ .name = "Ymec TVision TVF-8531MF/8831MF/8731MF",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0xa0, },
+ { 16 * 454.00 /*MHz*/, 0x90, },
+ { 16 * 999.99 , 0x30, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_YMEC_TVF_5533MF] = { /* Philips NTSC */
+ .name = "Ymec TVision TVF-5533MF",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0x01, },
+ { 16 * 454.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0x8e,
+ },
/* 60-69 */
- { "Thomson DDT 7611 (ATSC/NTSC)", THOMSON, ATSC,
- 16*157.25,16*454.00,0x39,0x3a,0x3c,0x8e,732},
- { "Tena TNF9533-D/IF/TNF9533-B/DF", Philips, PAL,
- 16*160.25,16*464.25,0x01,0x02,0x04,0x8e,623},
- { "Philips TEA5767HN FM Radio", Philips, RADIO,
+ [TUNER_THOMSON_DTT761X] = { /* THOMSON ATSC */
+ /* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */
+ .name = "Thomson DTT 761X (ATSC/NTSC)",
+ .count = 3,
+ .ranges = {
+ { 16 * 145.25 /*MHz*/, 0x39, },
+ { 16 * 415.25 /*MHz*/, 0x3a, },
+ { 16 * 999.99 , 0x3c, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TENA_9533_DI] = { /* Philips PAL */
+ .name = "Tena TNF9533-D/IF/TNF9533-B/DF",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.25 /*MHz*/, 0x01, },
+ { 16 * 464.25 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_TEA5767] = { /* Philips RADIO */
+ .name = "Philips TEA5767HN FM Radio",
/* see tea5767.c for details */},
- { "Philips FMD1216ME MK3 Hybrid Tuner", Philips, PAL,
- 16*160.00,16*442.00,0x51,0x52,0x54,0x86,623 },
- { "LG TDVS-H062F/TUA6034", LGINNOTEK, ATSC,
- 16*160.00,16*455.00,0x01,0x02,0x04,0x8e,732},
- { "Ymec TVF66T5-B/DFF", Philips, PAL,
- 16*160.25,16*464.25,0x01,0x02,0x08,0x8e,623},
- { "LG NTSC (TALN mini series)", LGINNOTEK, NTSC,
- 16*137.25,16*373.25,0x01,0x02,0x08,0x8e,732 },
- { "Philips TD1316 Hybrid Tuner", Philips, PAL,
- 16*160.00,16*442.00,0xa1,0xa2,0xa4,0xc8,623 },
- { "Philips TUV1236D ATSC/NTSC dual in", Philips, ATSC,
- 16*157.25,16*454.00,0x01,0x02,0x04,0xce,732 },
- { "Tena TNF 5335 MF", Philips, NTSC,
- 16*157.25,16*454.00,0x01,0x02,0x04,0x8e,732 },
+ [TUNER_PHILIPS_FMD1216ME_MK3] = { /* Philips PAL */
+ .name = "Philips FMD1216ME MK3 Hybrid Tuner",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0x51, },
+ { 16 * 442.00 /*MHz*/, 0x52, },
+ { 16 * 999.99 , 0x54, },
+ },
+ .config = 0x86,
+ },
+ [TUNER_LG_TDVS_H062F] = { /* LGINNOTEK ATSC */
+ .name = "LG TDVS-H062F/TUA6034",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0x01 },
+ { 16 * 455.00 /*MHz*/, 0x02 },
+ { 16 * 999.99 , 0x04 },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_YMEC_TVF66T5_B_DFF] = { /* Philips PAL */
+ .name = "Ymec TVF66T5-B/DFF",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.25 /*MHz*/, 0x01, },
+ { 16 * 464.25 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_LG_NTSC_TALN_MINI] = { /* LGINNOTEK NTSC */
+ .name = "LG NTSC (TALN mini series)",
+ .count = 3,
+ .ranges = {
+ { 16 * 137.25 /*MHz*/, 0x01, },
+ { 16 * 373.25 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x08, },
+ },
+ .config = 0x8e,
+ },
+ [TUNER_PHILIPS_TD1316] = { /* Philips PAL */
+ .name = "Philips TD1316 Hybrid Tuner",
+ .count = 3,
+ .ranges = {
+ { 16 * 160.00 /*MHz*/, 0xa1, },
+ { 16 * 442.00 /*MHz*/, 0xa2, },
+ { 16 * 999.99 , 0xa4, },
+ },
+ .config = 0xc8,
+ },
+ [TUNER_PHILIPS_TUV1236D] = { /* Philips ATSC */
+ .name = "Philips TUV1236D ATSC/NTSC dual in",
+ .count = 3,
+ .ranges = {
+ { 16 * 157.25 /*MHz*/, 0x01, },
+ { 16 * 454.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0xce,
+ },
+ [TUNER_TNF_5335MF] = { /* Philips NTSC */
+ .name = "Tena TNF 5335 MF",
+ .count = 3,
+ .ranges = {
+ { 16 * 157.25 /*MHz*/, 0x01, },
+ { 16 * 454.00 /*MHz*/, 0x02, },
+ { 16 * 999.99 , 0x04, },
+ },
+ .config = 0x8e,
+ },
};
unsigned const int tuner_count = ARRAY_SIZE(tuners);
@@ -305,20 +843,19 @@ static void default_set_tv_freq(struct i2c_client *c, unsigned int freq)
u16 div;
struct tunertype *tun;
unsigned char buffer[4];
- int rc;
+ int rc, IFPCoff, i;
tun = &tuners[t->type];
- if (freq < tun->thresh1) {
- config = tun->VHF_L;
- tuner_dbg("tv: VHF lowrange\n");
- } else if (freq < tun->thresh2) {
- config = tun->VHF_H;
- tuner_dbg("tv: VHF high range\n");
- } else {
- config = tun->UHF;
- tuner_dbg("tv: UHF range\n");
+ for (i = 0; i < tun->count; i++) {
+ if (freq > tun->ranges[i].thresh)
+ continue;
+ break;
}
-
+ config = tun->ranges[i].cb;
+ /* i == 0 -> VHF_LO */
+ /* i == 1 -> VHF_HI */
+ /* i == 2 -> UHF */
+ tuner_dbg("tv: range %d\n",i);
/* tv norm specific stuff for multi-norm tuners */
switch (t->type) {
@@ -420,7 +957,37 @@ static void default_set_tv_freq(struct i2c_client *c, unsigned int freq)
* frequency in case (wanted frequency < current frequency).
*/
- div=freq + tun->IFPCoff;
+ /* IFPCoff = Video Intermediate Frequency - Vif:
+ 940 =16*58.75 NTSC/J (Japan)
+ 732 =16*45.75 M/N STD
+ 704 =16*44 ATSC (at DVB code)
+ 632 =16*39.50 I U.K.
+ 622.4=16*38.90 B/G D/K I, L STD
+ 592 =16*37.00 D China
+ 590 =16.36.875 B Australia
+ 543.2=16*33.95 L' STD
+ 171.2=16*10.70 FM Radio (at set_radio_freq)
+ */
+
+ if (t->std == V4L2_STD_NTSC_M_JP) {
+ IFPCoff = 940;
+ } else if ((t->std & V4L2_STD_MN) &&
+ !(t->std & ~V4L2_STD_MN)) {
+ IFPCoff = 732;
+ } else if (t->std == V4L2_STD_SECAM_LC) {
+ IFPCoff = 543;
+ } else {
+ IFPCoff = 623;
+ }
+
+ div=freq + IFPCoff + offset;
+
+ tuner_dbg("Freq= %d.%02d MHz, V_IF=%d.%02d MHz, Offset=%d.%02d MHz, div=%0d\n",
+ freq / 16, freq % 16 * 100 / 16,
+ IFPCoff / 16, IFPCoff % 16 * 100 / 16,
+ offset / 16, offset % 16 * 100 / 16,
+ div);
+
if (t->type == TUNER_PHILIPS_SECAM && freq < t->freq) {
buffer[0] = tun->config;
buffer[1] = config;
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index 5b20e8177ca..6d03b9b05c6 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -31,6 +31,7 @@
#include <linux/smp_lock.h>
#include <media/audiochip.h>
+#include <media/v4l2-common.h>
#include "tvaudio.h"
@@ -46,17 +47,6 @@ MODULE_LICENSE("GPL");
#define UNSET (-1U)
-#define tvaudio_info(fmt, arg...) do {\
- printk(KERN_INFO "tvaudio %d-%04x: " fmt, \
- chip->c.adapter->nr, chip->c.addr , ##arg); } while (0)
-#define tvaudio_warn(fmt, arg...) do {\
- printk(KERN_WARNING "tvaudio %d-%04x: " fmt, \
- chip->c.adapter->nr, chip->c.addr , ##arg); } while (0)
-#define tvaudio_dbg(fmt, arg...) do {\
- if (debug) \
- printk(KERN_INFO "tvaudio %d-%04x: " fmt, \
- chip->c.adapter->nr, chip->c.addr , ##arg); } while (0)
-
/* ---------------------------------------------------------------------- */
/* our structs */
@@ -131,7 +121,7 @@ struct CHIPSTATE {
/* current settings */
__u16 left,right,treble,bass,mode;
int prevmode;
- int norm;
+ int radio;
/* thread */
pid_t tpid;
@@ -142,8 +132,6 @@ struct CHIPSTATE {
int watch_stereo;
};
-#define VIDEO_MODE_RADIO 16 /* norm magic for radio mode */
-
/* ---------------------------------------------------------------------- */
/* i2c addresses */
@@ -171,23 +159,23 @@ static int chip_write(struct CHIPSTATE *chip, int subaddr, int val)
unsigned char buffer[2];
if (-1 == subaddr) {
- tvaudio_dbg("%s: chip_write: 0x%x\n",
+ v4l_dbg(1, debug, &chip->c, "%s: chip_write: 0x%x\n",
chip->c.name, val);
chip->shadow.bytes[1] = val;
buffer[0] = val;
if (1 != i2c_master_send(&chip->c,buffer,1)) {
- tvaudio_warn("%s: I/O error (write 0x%x)\n",
+ v4l_warn(&chip->c, "%s: I/O error (write 0x%x)\n",
chip->c.name, val);
return -1;
}
} else {
- tvaudio_dbg("%s: chip_write: reg%d=0x%x\n",
+ v4l_dbg(1, debug, &chip->c, "%s: chip_write: reg%d=0x%x\n",
chip->c.name, subaddr, val);
chip->shadow.bytes[subaddr+1] = val;
buffer[0] = subaddr;
buffer[1] = val;
if (2 != i2c_master_send(&chip->c,buffer,2)) {
- tvaudio_warn("%s: I/O error (write reg%d=0x%x)\n",
+ v4l_warn(&chip->c, "%s: I/O error (write reg%d=0x%x)\n",
chip->c.name, subaddr, val);
return -1;
}
@@ -212,11 +200,11 @@ static int chip_read(struct CHIPSTATE *chip)
unsigned char buffer;
if (1 != i2c_master_recv(&chip->c,&buffer,1)) {
- tvaudio_warn("%s: I/O error (read)\n",
+ v4l_warn(&chip->c, "%s: I/O error (read)\n",
chip->c.name);
return -1;
}
- tvaudio_dbg("%s: chip_read: 0x%x\n",chip->c.name, buffer);
+ v4l_dbg(1, debug, &chip->c, "%s: chip_read: 0x%x\n",chip->c.name, buffer);
return buffer;
}
@@ -231,10 +219,10 @@ static int chip_read2(struct CHIPSTATE *chip, int subaddr)
write[0] = subaddr;
if (2 != i2c_transfer(chip->c.adapter,msgs,2)) {
- tvaudio_warn("%s: I/O error (read2)\n", chip->c.name);
+ v4l_warn(&chip->c, "%s: I/O error (read2)\n", chip->c.name);
return -1;
}
- tvaudio_dbg("%s: chip_read2: reg%d=0x%x\n",
+ v4l_dbg(1, debug, &chip->c, "%s: chip_read2: reg%d=0x%x\n",
chip->c.name, subaddr,read[0]);
return read[0];
}
@@ -247,7 +235,7 @@ static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
return 0;
/* update our shadow register set; print bytes if (debug > 0) */
- tvaudio_dbg("%s: chip_cmd(%s): reg=%d, data:",
+ v4l_dbg(1, debug, &chip->c, "%s: chip_cmd(%s): reg=%d, data:",
chip->c.name, name,cmd->bytes[0]);
for (i = 1; i < cmd->count; i++) {
if (debug)
@@ -259,7 +247,7 @@ static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd)
/* send data to the chip */
if (cmd->count != i2c_master_send(&chip->c,cmd->bytes,cmd->count)) {
- tvaudio_warn("%s: I/O error (%s)\n", chip->c.name, name);
+ v4l_warn(&chip->c, "%s: I/O error (%s)\n", chip->c.name, name);
return -1;
}
return 0;
@@ -286,7 +274,7 @@ static int chip_thread(void *data)
daemonize("%s", chip->c.name);
allow_signal(SIGTERM);
- tvaudio_dbg("%s: thread started\n", chip->c.name);
+ v4l_dbg(1, debug, &chip->c, "%s: thread started\n", chip->c.name);
for (;;) {
add_wait_queue(&chip->wq, &wait);
@@ -298,10 +286,10 @@ static int chip_thread(void *data)
try_to_freeze();
if (chip->done || signal_pending(current))
break;
- tvaudio_dbg("%s: thread wakeup\n", chip->c.name);
+ v4l_dbg(1, debug, &chip->c, "%s: thread wakeup\n", chip->c.name);
/* don't do anything for radio or if mode != auto */
- if (chip->norm == VIDEO_MODE_RADIO || chip->mode != 0)
+ if (chip->radio || chip->mode != 0)
continue;
/* have a look what's going on */
@@ -311,7 +299,7 @@ static int chip_thread(void *data)
mod_timer(&chip->wt, jiffies+2*HZ);
}
- tvaudio_dbg("%s: thread exiting\n", chip->c.name);
+ v4l_dbg(1, debug, &chip->c, "%s: thread exiting\n", chip->c.name);
complete_and_exit(&chip->texit, 0);
return 0;
}
@@ -324,7 +312,7 @@ static void generic_checkmode(struct CHIPSTATE *chip)
if (mode == chip->prevmode)
return;
- tvaudio_dbg("%s: thread checkmode\n", chip->c.name);
+ v4l_dbg(1, debug, &chip->c, "%s: thread checkmode\n", chip->c.name);
chip->prevmode = mode;
if (mode & VIDEO_SOUND_STEREO)
@@ -371,7 +359,7 @@ static int tda9840_getmode(struct CHIPSTATE *chip)
if (val & TDA9840_ST_STEREO)
mode |= VIDEO_SOUND_STEREO;
- tvaudio_dbg ("tda9840_getmode(): raw chip read: %d, return: %d\n",
+ v4l_dbg(1, debug, &chip->c, "tda9840_getmode(): raw chip read: %d, return: %d\n",
val, mode);
return mode;
}
@@ -667,7 +655,7 @@ static int tda9873_getmode(struct CHIPSTATE *chip)
mode |= VIDEO_SOUND_STEREO;
if (val & TDA9873_DUAL)
mode |= VIDEO_SOUND_LANG1 | VIDEO_SOUND_LANG2;
- tvaudio_dbg ("tda9873_getmode(): raw chip read: %d, return: %d\n",
+ v4l_dbg(1, debug, &chip->c, "tda9873_getmode(): raw chip read: %d, return: %d\n",
val, mode);
return mode;
}
@@ -678,12 +666,12 @@ static void tda9873_setmode(struct CHIPSTATE *chip, int mode)
/* int adj_data = chip->shadow.bytes[TDA9873_AD+1] ; */
if ((sw_data & TDA9873_INP_MASK) != TDA9873_INTERNAL) {
- tvaudio_dbg("tda9873_setmode(): external input\n");
+ v4l_dbg(1, debug, &chip->c, "tda9873_setmode(): external input\n");
return;
}
- tvaudio_dbg("tda9873_setmode(): chip->shadow.bytes[%d] = %d\n", TDA9873_SW+1, chip->shadow.bytes[TDA9873_SW+1]);
- tvaudio_dbg("tda9873_setmode(): sw_data = %d\n", sw_data);
+ v4l_dbg(1, debug, &chip->c, "tda9873_setmode(): chip->shadow.bytes[%d] = %d\n", TDA9873_SW+1, chip->shadow.bytes[TDA9873_SW+1]);
+ v4l_dbg(1, debug, &chip->c, "tda9873_setmode(): sw_data = %d\n", sw_data);
switch (mode) {
case VIDEO_SOUND_MONO:
@@ -704,7 +692,7 @@ static void tda9873_setmode(struct CHIPSTATE *chip, int mode)
}
chip_write(chip, TDA9873_SW, sw_data);
- tvaudio_dbg("tda9873_setmode(): req. mode %d; chip_write: %d\n",
+ v4l_dbg(1, debug, &chip->c, "tda9873_setmode(): req. mode %d; chip_write: %d\n",
mode, sw_data);
}
@@ -843,7 +831,7 @@ static int tda9874a_setup(struct CHIPSTATE *chip)
chip_write(chip, TDA9874A_SDACOSR, (tda9874a_mode) ? 0x81:0x80);
chip_write(chip, TDA9874A_AOSR, 0x00); /* or 0x10 */
}
- tvaudio_dbg("tda9874a_setup(): %s [0x%02X].\n",
+ v4l_dbg(1, debug, &chip->c, "tda9874a_setup(): %s [0x%02X].\n",
tda9874a_modelist[tda9874a_STD].name,tda9874a_STD);
return 1;
}
@@ -886,7 +874,7 @@ static int tda9874a_getmode(struct CHIPSTATE *chip)
mode |= VIDEO_SOUND_LANG1 | VIDEO_SOUND_LANG2;
}
- tvaudio_dbg("tda9874a_getmode(): DSR=0x%X, NSR=0x%X, NECR=0x%X, return: %d.\n",
+ v4l_dbg(1, debug, &chip->c, "tda9874a_getmode(): DSR=0x%X, NSR=0x%X, NECR=0x%X, return: %d.\n",
dsr, nsr, necr, mode);
return mode;
}
@@ -932,7 +920,7 @@ static void tda9874a_setmode(struct CHIPSTATE *chip, int mode)
chip_write(chip, TDA9874A_AOSR, aosr);
chip_write(chip, TDA9874A_MDACOSR, mdacosr);
- tvaudio_dbg("tda9874a_setmode(): req. mode %d; AOSR=0x%X, MDACOSR=0x%X.\n",
+ v4l_dbg(1, debug, &chip->c, "tda9874a_setmode(): req. mode %d; AOSR=0x%X, MDACOSR=0x%X.\n",
mode, aosr, mdacosr);
} else { /* dic == 0x07 */
@@ -967,7 +955,7 @@ static void tda9874a_setmode(struct CHIPSTATE *chip, int mode)
chip_write(chip, TDA9874A_FMMR, fmmr);
chip_write(chip, TDA9874A_AOSR, aosr);
- tvaudio_dbg("tda9874a_setmode(): req. mode %d; FMMR=0x%X, AOSR=0x%X.\n",
+ v4l_dbg(1, debug, &chip->c, "tda9874a_setmode(): req. mode %d; FMMR=0x%X, AOSR=0x%X.\n",
mode, fmmr, aosr);
}
}
@@ -981,10 +969,10 @@ static int tda9874a_checkit(struct CHIPSTATE *chip)
if(-1 == (sic = chip_read2(chip,TDA9874A_SIC)))
return 0;
- tvaudio_dbg("tda9874a_checkit(): DIC=0x%X, SIC=0x%X.\n", dic, sic);
+ v4l_dbg(1, debug, &chip->c, "tda9874a_checkit(): DIC=0x%X, SIC=0x%X.\n", dic, sic);
if((dic == 0x11)||(dic == 0x07)) {
- tvaudio_info("found tda9874%s.\n", (dic == 0x11) ? "a":"h");
+ v4l_info(&chip->c, "found tda9874%s.\n", (dic == 0x11) ? "a":"h");
tda9874a_dic = dic; /* remember device id. */
return 1;
}
@@ -1196,7 +1184,7 @@ static int ta8874z_getmode(struct CHIPSTATE *chip)
}else if (!(val & TA8874Z_B0)){
mode |= VIDEO_SOUND_STEREO;
}
- /* tvaudio_dbg ("ta8874z_getmode(): raw chip read: 0x%02x, return: 0x%02x\n", val, mode); */
+ /* v4l_dbg(1, debug, &chip->c, "ta8874z_getmode(): raw chip read: 0x%02x, return: 0x%02x\n", val, mode); */
return mode;
}
@@ -1209,7 +1197,7 @@ static void ta8874z_setmode(struct CHIPSTATE *chip, int mode)
{
int update = 1;
audiocmd *t = NULL;
- tvaudio_dbg("ta8874z_setmode(): mode: 0x%02x\n", mode);
+ v4l_dbg(1, debug, &chip->c, "ta8874z_setmode(): mode: 0x%02x\n", mode);
switch(mode){
case VIDEO_SOUND_MONO:
@@ -1480,17 +1468,16 @@ static int chip_attach(struct i2c_adapter *adap, int addr, int kind)
struct CHIPSTATE *chip;
struct CHIPDESC *desc;
- chip = kmalloc(sizeof(*chip),GFP_KERNEL);
+ chip = kzalloc(sizeof(*chip),GFP_KERNEL);
if (!chip)
return -ENOMEM;
- memset(chip,0,sizeof(*chip));
memcpy(&chip->c,&client_template,sizeof(struct i2c_client));
chip->c.adapter = adap;
chip->c.addr = addr;
i2c_set_clientdata(&chip->c, chip);
/* find description for the chip */
- tvaudio_dbg("chip found @ 0x%x\n", addr<<1);
+ v4l_dbg(1, debug, &chip->c, "chip found @ 0x%x\n", addr<<1);
for (desc = chiplist; desc->name != NULL; desc++) {
if (0 == *(desc->insmodopt))
continue;
@@ -1502,12 +1489,12 @@ static int chip_attach(struct i2c_adapter *adap, int addr, int kind)
break;
}
if (desc->name == NULL) {
- tvaudio_dbg("no matching chip description found\n");
+ v4l_dbg(1, debug, &chip->c, "no matching chip description found\n");
return -EIO;
}
- tvaudio_info("%s found @ 0x%x (%s)\n", desc->name, addr<<1, adap->name);
+ v4l_info(&chip->c, "%s found @ 0x%x (%s)\n", desc->name, addr<<1, adap->name);
if (desc->flags) {
- tvaudio_dbg("matches:%s%s%s.\n",
+ v4l_dbg(1, debug, &chip->c, "matches:%s%s%s.\n",
(desc->flags & CHIP_HAS_VOLUME) ? " volume" : "",
(desc->flags & CHIP_HAS_BASSTREBLE) ? " bass/treble" : "",
(desc->flags & CHIP_HAS_INPUTSEL) ? " audiomux" : "");
@@ -1550,7 +1537,7 @@ static int chip_attach(struct i2c_adapter *adap, int addr, int kind)
init_completion(&chip->texit);
chip->tpid = kernel_thread(chip_thread,(void *)chip,0);
if (chip->tpid < 0)
- tvaudio_warn("%s: kernel_thread() failed\n",
+ v4l_warn(&chip->c, "%s: kernel_thread() failed\n",
chip->c.name);
wake_up_interruptible(&chip->wq);
}
@@ -1563,17 +1550,8 @@ static int chip_probe(struct i2c_adapter *adap)
because dedicated drivers are used */
if ((adap->id == I2C_HW_SAA7146))
return 0;
-#ifdef I2C_CLASS_TV_ANALOG
if (adap->class & I2C_CLASS_TV_ANALOG)
return i2c_probe(adap, &addr_data, chip_attach);
-#else
- switch (adap->id) {
- case I2C_HW_B_BT848:
- case I2C_HW_B_RIVA:
- case I2C_HW_SAA7134:
- return i2c_probe(adap, &addr_data, chip_attach);
- }
-#endif
return 0;
}
@@ -1604,7 +1582,7 @@ static int chip_command(struct i2c_client *client,
struct CHIPSTATE *chip = i2c_get_clientdata(client);
struct CHIPDESC *desc = chiplist + chip->type;
- tvaudio_dbg("%s: chip_command 0x%x\n", chip->c.name, cmd);
+ v4l_dbg(1, debug, &chip->c, "%s: chip_command 0x%x\n", chip->c.name, cmd);
switch (cmd) {
case AUDC_SET_INPUT:
@@ -1617,7 +1595,7 @@ static int chip_command(struct i2c_client *client,
break;
case AUDC_SET_RADIO:
- chip->norm = VIDEO_MODE_RADIO;
+ chip->radio = 1;
chip->watch_stereo = 0;
/* del_timer(&chip->wt); */
break;
@@ -1643,7 +1621,7 @@ static int chip_command(struct i2c_client *client,
va->bass = chip->bass;
va->treble = chip->treble;
}
- if (chip->norm != VIDEO_MODE_RADIO) {
+ if (!chip->radio) {
if (desc->getmode)
va->mode = desc->getmode(chip);
else
@@ -1678,15 +1656,80 @@ static int chip_command(struct i2c_client *client,
}
break;
}
- case VIDIOCSCHAN:
+
+ case VIDIOC_S_TUNER:
{
- struct video_channel *vc = arg;
+ struct v4l2_tuner *vt = arg;
+ int mode = 0;
+
+ switch (vt->audmode) {
+ case V4L2_TUNER_MODE_MONO:
+ mode = VIDEO_SOUND_MONO;
+ break;
+ case V4L2_TUNER_MODE_STEREO:
+ mode = VIDEO_SOUND_STEREO;
+ break;
+ case V4L2_TUNER_MODE_LANG1:
+ mode = VIDEO_SOUND_LANG1;
+ break;
+ case V4L2_TUNER_MODE_LANG2:
+ mode = VIDEO_SOUND_LANG2;
+ break;
+ default:
+ break;
+ }
- chip->norm = vc->norm;
+ if (desc->setmode && mode) {
+ chip->watch_stereo = 0;
+ /* del_timer(&chip->wt); */
+ chip->mode = mode;
+ desc->setmode(chip, mode);
+ }
break;
}
- case VIDIOCSFREQ:
+
+ case VIDIOC_G_TUNER:
{
+ struct v4l2_tuner *vt = arg;
+ int mode = VIDEO_SOUND_MONO;
+
+ if (chip->radio)
+ break;
+ vt->audmode = 0;
+ vt->rxsubchans = 0;
+ vt->capability = V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
+
+ if (desc->getmode)
+ mode = desc->getmode(chip);
+
+ if (mode & VIDEO_SOUND_MONO)
+ vt->rxsubchans |= V4L2_TUNER_SUB_MONO;
+ if (mode & VIDEO_SOUND_STEREO)
+ vt->rxsubchans |= V4L2_TUNER_SUB_STEREO;
+ if (mode & VIDEO_SOUND_LANG1)
+ vt->rxsubchans |= V4L2_TUNER_SUB_LANG1 |
+ V4L2_TUNER_SUB_LANG2;
+
+ mode = chip->mode;
+ if (mode & VIDEO_SOUND_MONO)
+ vt->audmode = V4L2_TUNER_MODE_MONO;
+ if (mode & VIDEO_SOUND_STEREO)
+ vt->audmode = V4L2_TUNER_MODE_STEREO;
+ if (mode & VIDEO_SOUND_LANG1)
+ vt->audmode = V4L2_TUNER_MODE_LANG1;
+ if (mode & VIDEO_SOUND_LANG2)
+ vt->audmode = V4L2_TUNER_MODE_LANG2;
+ break;
+ }
+
+ case VIDIOCSCHAN:
+ case VIDIOC_S_STD:
+ chip->radio = 0;
+ break;
+
+ case VIDIOCSFREQ:
+ case VIDIOC_S_FREQUENCY:
chip->mode = 0; /* automatic */
if (desc->checkmode) {
desc->setmode(chip,VIDEO_SOUND_MONO);
@@ -1695,17 +1738,16 @@ static int chip_command(struct i2c_client *client,
mod_timer(&chip->wt, jiffies+2*HZ);
/* the thread will call checkmode() later */
}
- }
+ break;
}
return 0;
}
-
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "generic i2c audio driver",
+ .driver = {
+ .name = "tvaudio",
+ },
.id = I2C_DRIVERID_TVAUDIO,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = chip_probe,
.detach_client = chip_detach,
.command = chip_command,
@@ -1714,7 +1756,6 @@ static struct i2c_driver driver = {
static struct i2c_client client_template =
{
.name = "(unset)",
- .flags = I2C_CLIENT_ALLOW_USE,
.driver = &driver,
};
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index 5ac235365dd..5e71a354e87 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -40,6 +40,7 @@
#include <media/tuner.h>
#include <media/tveeprom.h>
+#include <media/v4l2-common.h>
#include <media/audiochip.h>
MODULE_DESCRIPTION("i2c Hauppauge eeprom decoder driver");
@@ -52,21 +53,19 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define STRM(array,i) (i < sizeof(array)/sizeof(char*) ? array[i] : "unknown")
-#define tveeprom_info(fmt, arg...) do {\
- printk(KERN_INFO "tveeprom %d-%04x: " fmt, \
- c->adapter->nr, c->addr , ##arg); } while (0)
-#define tveeprom_warn(fmt, arg...) do {\
- printk(KERN_WARNING "tveeprom %d-%04x: " fmt, \
- c->adapter->nr, c->addr , ##arg); } while (0)
-#define tveeprom_dbg(fmt, arg...) do {\
+#define tveeprom_info(fmt, arg...) \
+ v4l_printk(KERN_INFO, "tveeprom", c->adapter, c->addr, fmt , ## arg)
+#define tveeprom_warn(fmt, arg...) \
+ v4l_printk(KERN_WARNING, "tveeprom", c->adapter, c->addr, fmt , ## arg)
+#define tveeprom_dbg(fmt, arg...) do { \
if (debug) \
- printk(KERN_INFO "tveeprom %d-%04x: " fmt, \
- c->adapter->nr, c->addr , ##arg); } while (0)
-
-
-/* ----------------------------------------------------------------------- */
-/* some hauppauge specific stuff */
+ v4l_printk(KERN_DEBUG, "tveeprom", c->adapter, c->addr, fmt , ## arg); \
+ } while (0)
+/*
+ * The Hauppauge eeprom uses an 8bit field to determine which
+ * tuner formats the tuner supports.
+ */
static struct HAUPPAUGE_TUNER_FMT
{
int id;
@@ -74,14 +73,14 @@ static struct HAUPPAUGE_TUNER_FMT
}
hauppauge_tuner_fmt[] =
{
- { 0x00000000, " unknown1" },
- { 0x00000000, " unknown2" },
- { 0x00000007, " PAL(B/G)" },
- { 0x00001000, " NTSC(M)" },
- { 0x00000010, " PAL(I)" },
- { 0x00400000, " SECAM(L/L')" },
- { 0x00000e00, " PAL(D/K)" },
- { 0x03000000, " ATSC/DVB Digital" },
+ { V4L2_STD_UNKNOWN," UNKNOWN" },
+ { V4L2_STD_UNKNOWN," FM" },
+ { V4L2_STD_PAL_BG, " PAL(B/G)" },
+ { V4L2_STD_NTSC_M, " NTSC(M)" },
+ { V4L2_STD_PAL_I, " PAL(I)" },
+ { V4L2_STD_SECAM_L," SECAM(L/L')" },
+ { V4L2_STD_PAL_DK, " PAL(D/D1/K)" },
+ { V4L2_STD_ATSC, " ATSC/DVB Digital" },
};
/* This is the full list of possible tuners. Many thanks to Hauppauge for
@@ -387,7 +386,7 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
if ((eeprom_data[0] == 0x1a) && (eeprom_data[1] == 0xeb) &&
(eeprom_data[2] == 0x67) && (eeprom_data[3] == 0x95))
start=0xa0; /* Generic em28xx offset */
- else if (((eeprom_data[0] & 0xf0) == 0x10) &&
+ else if (((eeprom_data[0] & 0xe1) == 0x01) &&
(eeprom_data[1] == 0x00) &&
(eeprom_data[2] == 0x00) &&
(eeprom_data[8] == 0x84))
@@ -720,8 +719,7 @@ tveeprom_command(struct i2c_client *client,
switch (cmd) {
case 0:
- buf = kmalloc(256,GFP_KERNEL);
- memset(buf,0,256);
+ buf = kzalloc(256,GFP_KERNEL);
tveeprom_read(client,buf,256);
tveeprom_hauppauge_analog(client, &eeprom,buf);
kfree(buf);
@@ -744,14 +742,12 @@ tveeprom_detect_client(struct i2c_adapter *adapter,
{
struct i2c_client *client;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (NULL == client)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver_tveeprom;
- client->flags = I2C_CLIENT_ALLOW_USE;
snprintf(client->name, sizeof(client->name), "tveeprom");
i2c_attach_client(client);
@@ -779,10 +775,10 @@ tveeprom_detach_client (struct i2c_client *client)
}
static struct i2c_driver i2c_driver_tveeprom = {
- .owner = THIS_MODULE,
- .name = "tveeprom",
+ .driver = {
+ .name = "tveeprom",
+ },
.id = I2C_DRIVERID_TVEEPROM,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = tveeprom_attach_adapter,
.detach_client = tveeprom_detach_client,
.command = tveeprom_command,
diff --git a/drivers/media/video/tvmixer.c b/drivers/media/video/tvmixer.c
index 8318bd1aad0..9e86caeb96a 100644
--- a/drivers/media/video/tvmixer.c
+++ b/drivers/media/video/tvmixer.c
@@ -227,17 +227,11 @@ static int tvmixer_release(struct inode *inode, struct file *file)
}
static struct i2c_driver driver = {
-#ifdef I2C_PEC
- .owner = THIS_MODULE,
-#endif
- .name = "tv card mixer driver",
+ .driver = {
+ .name = "tvmixer",
+ },
.id = I2C_DRIVERID_TVMIXER,
-#ifdef I2C_DF_DUMMY
- .flags = I2C_DF_DUMMY,
-#else
- .flags = I2C_DF_NOTIFY,
.detach_adapter = tvmixer_adapters,
-#endif
.attach_adapter = tvmixer_adapters,
.detach_client = tvmixer_clients,
};
@@ -269,22 +263,8 @@ static int tvmixer_clients(struct i2c_client *client)
struct video_audio va;
int i,minor;
-#ifdef I2C_CLASS_TV_ANALOG
if (!(client->adapter->class & I2C_CLASS_TV_ANALOG))
return -1;
-#else
- /* TV card ??? */
- switch (client->adapter->id) {
- case I2C_HW_SMBUS_VOODOO3:
- case I2C_HW_B_BT848:
- case I2C_HW_B_RIVA:
- /* ok, have a look ... */
- break;
- default:
- /* ignore that one */
- return -1;
- }
-#endif
/* unregister ?? */
for (i = 0; i < DEV_MAX; i++) {
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index 97431e26d22..e86b522938f 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -9,6 +9,7 @@
#include <linux/videodev.h>
#include <linux/delay.h>
#include <linux/video_decoder.h>
+#include <media/v4l2-common.h>
#include "tvp5150_reg.h"
@@ -28,33 +29,38 @@ static int debug = 0;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
-#define dprintk(num, format, args...) \
+#define tvp5150_info(fmt, arg...) do { \
+ printk(KERN_INFO "%s %d-%04x: " fmt, c->driver->driver.name, \
+ i2c_adapter_id(c->adapter), c->addr , ## arg); } while (0)
+#define tvp5150_dbg(num, fmt, arg...) \
do { \
if (debug >= num) \
- printk(format, ##args); \
- } while (0)
+ printk(KERN_DEBUG "%s debug %d-%04x: " fmt,\
+ c->driver->driver.name, \
+ i2c_adapter_id(c->adapter), \
+ c->addr , ## arg); } while (0)
/* supported controls */
static struct v4l2_queryctrl tvp5150_qctrl[] = {
{
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 0,
- .flags = 0,
- }, {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 0x1,
- .default_value = 0x10,
- .flags = 0,
- }, {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 0,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 0x1,
+ .default_value = 0x10,
+ .flags = 0,
+ }, {
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Saturation",
@@ -63,16 +69,16 @@ static struct v4l2_queryctrl tvp5150_qctrl[] = {
.step = 0x1,
.default_value = 0x10,
.flags = 0,
- }, {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -128,
- .maximum = 127,
- .step = 0x1,
- .default_value = 0x10,
- .flags = 0,
- }
+ }, {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = -128,
+ .maximum = 127,
+ .step = 0x1,
+ .default_value = 0x10,
+ .flags = 0,
+ }
};
struct tvp5150 {
@@ -94,12 +100,14 @@ static inline int tvp5150_read(struct i2c_client *c, unsigned char addr)
buffer[0] = addr;
if (1 != (rc = i2c_master_send(c, buffer, 1)))
- dprintk(0, "i2c i/o error: rc == %d (should be 1)\n", rc);
+ tvp5150_dbg(0, "i2c i/o error: rc == %d (should be 1)\n", rc);
msleep(10);
if (1 != (rc = i2c_master_recv(c, buffer, 1)))
- dprintk(0, "i2c i/o error: rc == %d (should be 1)\n", rc);
+ tvp5150_dbg(0, "i2c i/o error: rc == %d (should be 1)\n", rc);
+
+ tvp5150_dbg(2, "tvp5150: read 0x%02x = 0x%02x\n", addr, buffer[0]);
return (buffer[0]);
}
@@ -109,13 +117,12 @@ static inline void tvp5150_write(struct i2c_client *c, unsigned char addr,
{
unsigned char buffer[2];
int rc;
-/* struct tvp5150 *core = i2c_get_clientdata(c); */
buffer[0] = addr;
buffer[1] = value;
- dprintk(1, "tvp5150: writing 0x%02x 0x%02x\n", buffer[0], buffer[1]);
+ tvp5150_dbg(2, "tvp5150: writing 0x%02x 0x%02x\n", buffer[0], buffer[1]);
if (2 != (rc = i2c_master_send(c, buffer, 2)))
- dprintk(0, "i2c i/o error: rc == %d (should be 2)\n", rc);
+ tvp5150_dbg(0, "i2c i/o error: rc == %d (should be 2)\n", rc);
}
static void dump_reg(struct i2c_client *c)
@@ -437,48 +444,346 @@ enum tvp5150_input {
static inline void tvp5150_selmux(struct i2c_client *c,
enum tvp5150_input input)
{
+ int opmode=0;
+
struct tvp5150 *decoder = i2c_get_clientdata(c);
if (!decoder->enable)
input |= TVP5150_BLACK_SCREEN;
+ switch (input) {
+ case TVP5150_ANALOG_CH0:
+ case TVP5150_ANALOG_CH1:
+ opmode=0x30; /* TV Mode */
+ break;
+ default:
+ opmode=0; /* Auto Mode */
+ break;
+ }
+
+ tvp5150_write(c, TVP5150_OP_MODE_CTL, opmode);
tvp5150_write(c, TVP5150_VD_IN_SRC_SEL_1, input);
};
-static inline void tvp5150_reset(struct i2c_client *c)
+struct i2c_reg_value {
+ unsigned char reg;
+ unsigned char value;
+};
+
+/* Default values as sugested at TVP5150AM1 datasheet */
+static const struct i2c_reg_value tvp5150_init_default[] = {
+ { /* 0x00 */
+ TVP5150_VD_IN_SRC_SEL_1,0x00
+ },
+ { /* 0x01 */
+ TVP5150_ANAL_CHL_CTL,0x15
+ },
+ { /* 0x02 */
+ TVP5150_OP_MODE_CTL,0x00
+ },
+ { /* 0x03 */
+ TVP5150_MISC_CTL,0x01
+ },
+ { /* 0x06 */
+ TVP5150_COLOR_KIL_THSH_CTL,0x10
+ },
+ { /* 0x07 */
+ TVP5150_LUMA_PROC_CTL_1,0x60
+ },
+ { /* 0x08 */
+ TVP5150_LUMA_PROC_CTL_2,0x00
+ },
+ { /* 0x09 */
+ TVP5150_BRIGHT_CTL,0x80
+ },
+ { /* 0x0a */
+ TVP5150_SATURATION_CTL,0x80
+ },
+ { /* 0x0b */
+ TVP5150_HUE_CTL,0x00
+ },
+ { /* 0x0c */
+ TVP5150_CONTRAST_CTL,0x80
+ },
+ { /* 0x0d */
+ TVP5150_DATA_RATE_SEL,0x47
+ },
+ { /* 0x0e */
+ TVP5150_LUMA_PROC_CTL_3,0x00
+ },
+ { /* 0x0f */
+ TVP5150_CONF_SHARED_PIN,0x08
+ },
+ { /* 0x11 */
+ TVP5150_ACT_VD_CROP_ST_MSB,0x00
+ },
+ { /* 0x12 */
+ TVP5150_ACT_VD_CROP_ST_LSB,0x00
+ },
+ { /* 0x13 */
+ TVP5150_ACT_VD_CROP_STP_MSB,0x00
+ },
+ { /* 0x14 */
+ TVP5150_ACT_VD_CROP_STP_LSB,0x00
+ },
+ { /* 0x15 */
+ TVP5150_GENLOCK,0x01
+ },
+ { /* 0x16 */
+ TVP5150_HORIZ_SYNC_START,0x80
+ },
+ { /* 0x18 */
+ TVP5150_VERT_BLANKING_START,0x00
+ },
+ { /* 0x19 */
+ TVP5150_VERT_BLANKING_STOP,0x00
+ },
+ { /* 0x1a */
+ TVP5150_CHROMA_PROC_CTL_1,0x0c
+ },
+ { /* 0x1b */
+ TVP5150_CHROMA_PROC_CTL_2,0x14
+ },
+ { /* 0x1c */
+ TVP5150_INT_RESET_REG_B,0x00
+ },
+ { /* 0x1d */
+ TVP5150_INT_ENABLE_REG_B,0x00
+ },
+ { /* 0x1e */
+ TVP5150_INTT_CONFIG_REG_B,0x00
+ },
+ { /* 0x28 */
+ TVP5150_VIDEO_STD,0x00
+ },
+ { /* 0x2e */
+ TVP5150_MACROVISION_ON_CTR,0x0f
+ },
+ { /* 0x2f */
+ TVP5150_MACROVISION_OFF_CTR,0x01
+ },
+ { /* 0xbb */
+ TVP5150_TELETEXT_FIL_ENA,0x00
+ },
+ { /* 0xc0 */
+ TVP5150_INT_STATUS_REG_A,0x00
+ },
+ { /* 0xc1 */
+ TVP5150_INT_ENABLE_REG_A,0x00
+ },
+ { /* 0xc2 */
+ TVP5150_INT_CONF,0x04
+ },
+ { /* 0xc8 */
+ TVP5150_FIFO_INT_THRESHOLD,0x80
+ },
+ { /* 0xc9 */
+ TVP5150_FIFO_RESET,0x00
+ },
+ { /* 0xca */
+ TVP5150_LINE_NUMBER_INT,0x00
+ },
+ { /* 0xcb */
+ TVP5150_PIX_ALIGN_REG_LOW,0x4e
+ },
+ { /* 0xcc */
+ TVP5150_PIX_ALIGN_REG_HIGH,0x00
+ },
+ { /* 0xcd */
+ TVP5150_FIFO_OUT_CTRL,0x01
+ },
+ { /* 0xcf */
+ TVP5150_FULL_FIELD_ENA_1,0x00
+ },
+ { /* 0xd0 */
+ TVP5150_FULL_FIELD_ENA_2,0x00
+ },
+ { /* 0xfc */
+ TVP5150_FULL_FIELD_MODE_REG,0x7f
+ },
+ { /* end of data */
+ 0xff,0xff
+ }
+};
+
+/* Default values as sugested at TVP5150AM1 datasheet */
+static const struct i2c_reg_value tvp5150_init_enable[] = {
+ {
+ TVP5150_CONF_SHARED_PIN, 2
+ },{ /* Automatic offset and AGC enabled */
+ TVP5150_ANAL_CHL_CTL, 0x15
+ },{ /* Activate YCrCb output 0x9 or 0xd ? */
+ TVP5150_MISC_CTL, 0x6f
+ },{ /* Activates video std autodetection for all standards */
+ TVP5150_AUTOSW_MSK, 0x0
+ },{ /* Default format: 0x47. For 4:2:2: 0x40 */
+ TVP5150_DATA_RATE_SEL, 0x47
+ },{
+ TVP5150_CHROMA_PROC_CTL_1, 0x0c
+ },{
+ TVP5150_CHROMA_PROC_CTL_2, 0x54
+ },{ /* Non documented, but initialized on WinTV USB2 */
+ 0x27, 0x20
+ },{
+ 0xff,0xff
+ }
+};
+
+struct i2c_vbi_ram_value {
+ u16 reg;
+ unsigned char values[26];
+};
+
+struct i2c_vbi_ram_value vbi_ram_default[] =
{
- struct tvp5150 *decoder = i2c_get_clientdata(c);
+ {0x010, /* WST SECAM 6 */
+ { 0xaa, 0xaa, 0xff, 0xff , 0xe7, 0x2e, 0x20, 0x26, 0xe6, 0xb4, 0x0e, 0x0, 0x0, 0x0, 0x10, 0x0 }
+ },
+ {0x030, /* WST PAL B 6 */
+ { 0xaa, 0xaa, 0xff, 0xff , 0x27, 0x2e, 0x20, 0x2b, 0xa6, 0x72, 0x10, 0x0, 0x0, 0x0, 0x10, 0x0 }
+ },
+ {0x050, /* WST PAL C 6 */
+ { 0xaa, 0xaa, 0xff, 0xff , 0xe7, 0x2e, 0x20, 0x22, 0xa6, 0x98, 0x0d, 0x0, 0x0, 0x0, 0x10, 0x0 }
+ },
+ {0x070, /* WST NTSC 6 */
+ { 0xaa, 0xaa, 0xff, 0xff , 0x27, 0x2e, 0x20, 0x23, 0x69, 0x93, 0x0d, 0x0, 0x0, 0x0, 0x10, 0x0 }
+ },
+ {0x090, /* NABTS, NTSC 6 */
+ { 0xaa, 0xaa, 0xff, 0xff , 0xe7, 0x2e, 0x20, 0x22, 0x69, 0x93, 0x0d, 0x0, 0x0, 0x0, 0x15, 0x0 }
+ },
+ {0x0b0, /* NABTS, NTSC-J 6 */
+ { 0xaa, 0xaa, 0xff, 0xff , 0xa7, 0x2e, 0x20, 0x23, 0x69, 0x93, 0x0d, 0x0, 0x0, 0x0, 0x10, 0x0 }
+ },
+ {0x0d0, /* CC, PAL/SECAM 6 */
+ { 0xaa, 0x2a, 0xff, 0x3f , 0x04, 0x51, 0x6e, 0x02, 0xa6, 0x7b, 0x09, 0x0, 0x0, 0x0, 0x27, 0x0 }
+ },
+ {0x0f0, /* CC, NTSC 6 */
+ { 0xaa, 0x2a, 0xff, 0x3f , 0x04, 0x51, 0x6e, 0x02, 0x69, 0x8c, 0x09, 0x0, 0x0, 0x0, 0x27, 0x0 }
+ },
+ {0x110, /* WSS, PAL/SECAM 6 */
+ { 0x5b, 0x55, 0xc5, 0xff , 0x0, 0x71, 0x6e, 0x42, 0xa6, 0xcd, 0x0f, 0x0, 0x0, 0x0, 0x3a, 0x0 }
+ },
+ {0x130, /* WSS, NTSC C */
+ { 0x38, 0x00, 0x3f, 0x00 , 0x0, 0x71, 0x6e, 0x43, 0x69, 0x7c, 0x08, 0x0, 0x0, 0x0, 0x39, 0x0 }
+ },
+ {0x150, /* VITC, PAL/SECAM 6 */
+ { 0x0, 0x0, 0x0, 0x0 , 0x0, 0x8f, 0x6d, 0x49, 0xa6, 0x85, 0x08, 0x0, 0x0, 0x0, 0x4c, 0x0 }
+ },
+ {0x170, /* VITC, NTSC 6 */
+ { 0x0, 0x0, 0x0, 0x0 , 0x0, 0x8f, 0x6d, 0x49, 0x69, 0x94, 0x08, 0x0, 0x0, 0x0, 0x4c, 0x0 }
+ },
+ { (u16)-1 }
+};
- tvp5150_write(c, TVP5150_CONF_SHARED_PIN, 2);
+static int tvp5150_write_inittab(struct i2c_client *c,
+ const struct i2c_reg_value *regs)
+{
+ while (regs->reg != 0xff) {
+ tvp5150_write(c, regs->reg, regs->value);
+ regs++;
+ }
+ return 0;
+}
- /* Automatic offset and AGC enabled */
- tvp5150_write(c, TVP5150_ANAL_CHL_CTL, 0x15);
+static int tvp5150_vdp_init(struct i2c_client *c,
+ const struct i2c_vbi_ram_value *regs)
+{
+ unsigned int i;
- /* Normal Operation */
-// tvp5150_write(c, TVP5150_OP_MODE_CTL, 0x00);
+ /* Disable Full Field */
+ tvp5150_write(c, TVP5150_FULL_FIELD_ENA_1, 0);
- /* Activate YCrCb output 0x9 or 0xd ? */
- tvp5150_write(c, TVP5150_MISC_CTL, 0x6f);
+ /* Before programming, Line mode should be at 0xff */
+ for (i=TVP5150_FULL_FIELD_ENA_2; i<=TVP5150_LINE_MODE_REG_44; i++)
+ tvp5150_write(c, i, 0xff);
- /* Activates video std autodetection for all standards */
- tvp5150_write(c, TVP5150_AUTOSW_MSK, 0x0);
+ /* Load Ram Table */
+ while (regs->reg != (u16)-1 ) {
+ tvp5150_write(c, TVP5150_CONF_RAM_ADDR_HIGH,regs->reg>>8);
+ tvp5150_write(c, TVP5150_CONF_RAM_ADDR_LOW,regs->reg);
- /* Default format: 0x47, 4:2:2: 0x40 */
- tvp5150_write(c, TVP5150_DATA_RATE_SEL, 0x47);
+ for (i=0;i<16;i++)
+ tvp5150_write(c, TVP5150_VDP_CONF_RAM_DATA,regs->values[i]);
- tvp5150_selmux(c, decoder->input);
+ regs++;
+ }
+ return 0;
+}
- tvp5150_write(c, TVP5150_CHROMA_PROC_CTL_1, 0x0c);
- tvp5150_write(c, TVP5150_CHROMA_PROC_CTL_2, 0x54);
+static int tvp5150_set_std(struct i2c_client *c, v4l2_std_id std)
+{
+ struct tvp5150 *decoder = i2c_get_clientdata(c);
+ int fmt=0;
+
+ decoder->norm=std;
+
+ /* First tests should be against specific std */
+
+ if (std == V4L2_STD_ALL) {
+ fmt=0; /* Autodetect mode */
+ } else if (std & V4L2_STD_NTSC_443) {
+ fmt=0xa;
+ } else if (std & V4L2_STD_PAL_M) {
+ fmt=0x6;
+ } else if (std & (V4L2_STD_PAL_N| V4L2_STD_PAL_Nc)) {
+ fmt=0x8;
+ } else {
+ /* Then, test against generic ones */
+ if (std & V4L2_STD_NTSC) {
+ fmt=0x2;
+ } else if (std & V4L2_STD_PAL) {
+ fmt=0x4;
+ } else if (std & V4L2_STD_SECAM) {
+ fmt=0xc;
+ }
+ }
- tvp5150_write(c, 0x27, 0x20); /* ?????????? */
+ tvp5150_dbg(1,"Set video std register to %d.\n",fmt);
+ tvp5150_write(c, TVP5150_VIDEO_STD, fmt);
- tvp5150_write(c, TVP5150_VIDEO_STD, 0x0); /* Auto switch */
+ return 0;
+}
+static inline void tvp5150_reset(struct i2c_client *c)
+{
+ u8 type, ver_656, msb_id, lsb_id, msb_rom, lsb_rom;
+ struct tvp5150 *decoder = i2c_get_clientdata(c);
+
+ type=tvp5150_read(c,TVP5150_AUTOSW_MSK);
+ msb_id=tvp5150_read(c,TVP5150_MSB_DEV_ID);
+ lsb_id=tvp5150_read(c,TVP5150_LSB_DEV_ID);
+ msb_rom=tvp5150_read(c,TVP5150_ROM_MAJOR_VER);
+ lsb_rom=tvp5150_read(c,TVP5150_ROM_MINOR_VER);
+
+ if (type==0xdc) {
+ ver_656=tvp5150_read(c,TVP5150_REV_SELECT);
+ tvp5150_info("tvp%02x%02xam1 detected 656 version is %d.\n",msb_id, lsb_id,ver_656);
+ } else if (type==0xfc) {
+ tvp5150_info("tvp%02x%02xa detected.\n",msb_id, lsb_id);
+ } else {
+ tvp5150_info("unknown tvp%02x%02x chip detected(%d).\n",msb_id,lsb_id,type);
+ }
+ tvp5150_info("Rom ver is %d.%d\n",msb_rom,lsb_rom);
+
+ /* Initializes TVP5150 to its default values */
+ tvp5150_write_inittab(c, tvp5150_init_default);
+
+ /* Initializes VDP registers */
+ tvp5150_vdp_init(c, vbi_ram_default);
+
+ /* Selects decoder input */
+ tvp5150_selmux(c, decoder->input);
+
+ /* Initializes TVP5150 to stream enabled values */
+ tvp5150_write_inittab(c, tvp5150_init_enable);
+
+ /* Initialize image preferences */
tvp5150_write(c, TVP5150_BRIGHT_CTL, decoder->bright >> 8);
tvp5150_write(c, TVP5150_CONTRAST_CTL, decoder->contrast >> 8);
tvp5150_write(c, TVP5150_SATURATION_CTL, decoder->contrast >> 8);
tvp5150_write(c, TVP5150_HUE_CTL, (decoder->hue - 32768) >> 8);
+
+ tvp5150_set_std(c, decoder->norm);
};
static int tvp5150_get_ctrl(struct i2c_client *c, struct v4l2_control *ctrl)
@@ -498,9 +803,8 @@ static int tvp5150_get_ctrl(struct i2c_client *c, struct v4l2_control *ctrl)
case V4L2_CID_HUE:
ctrl->value = tvp5150_read(c, TVP5150_HUE_CTL);
return 0;
- default:
- return -EINVAL;
}
+ return -EINVAL;
}
static int tvp5150_set_ctrl(struct i2c_client *c, struct v4l2_control *ctrl)
@@ -520,28 +824,59 @@ static int tvp5150_set_ctrl(struct i2c_client *c, struct v4l2_control *ctrl)
case V4L2_CID_HUE:
tvp5150_write(c, TVP5150_HUE_CTL, ctrl->value);
return 0;
- default:
- return -EINVAL;
}
+ return -EINVAL;
}
/****************************************************************************
I2C Command
****************************************************************************/
-static int tvp5150_command(struct i2c_client *client,
+static int tvp5150_command(struct i2c_client *c,
unsigned int cmd, void *arg)
{
- struct tvp5150 *decoder = i2c_get_clientdata(client);
+ struct tvp5150 *decoder = i2c_get_clientdata(c);
switch (cmd) {
case 0:
+ case VIDIOC_INT_RESET:
case DECODER_INIT:
- tvp5150_reset(client);
+ tvp5150_reset(c);
+ break;
+ case VIDIOC_S_STD:
+ if (decoder->norm == *(v4l2_std_id *)arg)
+ break;
+ return tvp5150_set_std(c, *(v4l2_std_id *)arg);
+ case VIDIOC_G_STD:
+ *(v4l2_std_id *)arg = decoder->norm;
break;
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ case VIDIOC_INT_G_REGISTER:
+ {
+ struct v4l2_register *reg = arg;
+
+ if (reg->i2c_id != I2C_DRIVERID_TVP5150)
+ return -EINVAL;
+ reg->val = tvp5150_read(c, reg->reg & 0xff);
+ break;
+ }
+
+ case VIDIOC_INT_S_REGISTER:
+ {
+ struct v4l2_register *reg = arg;
+
+ if (reg->i2c_id != I2C_DRIVERID_TVP5150)
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ tvp5150_write(c, reg->reg & 0xff, reg->val & 0xff);
+ break;
+ }
+#endif
+
case DECODER_DUMP:
- dump_reg(client);
+ dump_reg(c);
break;
case DECODER_GET_CAPABILITIES:
@@ -600,7 +935,7 @@ static int tvp5150_command(struct i2c_client *client,
}
decoder->input = *iarg;
- tvp5150_selmux(client, decoder->input);
+ tvp5150_selmux(c, decoder->input);
break;
}
@@ -620,19 +955,18 @@ static int tvp5150_command(struct i2c_client *client,
decoder->enable = (*iarg != 0);
- tvp5150_selmux(client, decoder->input);
+ tvp5150_selmux(c, decoder->input);
break;
}
case VIDIOC_QUERYCTRL:
{
struct v4l2_queryctrl *qc = arg;
- u8 i, n;
+ int i;
- dprintk(1, KERN_DEBUG "VIDIOC_QUERYCTRL");
+ tvp5150_dbg(1, "VIDIOC_QUERYCTRL called\n");
- n = sizeof(tvp5150_qctrl) / sizeof(tvp5150_qctrl[0]);
- for (i = 0; i < n; i++)
+ for (i = 0; i < ARRAY_SIZE(tvp5150_qctrl); i++)
if (qc->id && qc->id == tvp5150_qctrl[i].id) {
memcpy(qc, &(tvp5150_qctrl[i]),
sizeof(*qc));
@@ -644,16 +978,14 @@ static int tvp5150_command(struct i2c_client *client,
case VIDIOC_G_CTRL:
{
struct v4l2_control *ctrl = arg;
- dprintk(1, KERN_DEBUG "VIDIOC_G_CTRL");
+ tvp5150_dbg(1, "VIDIOC_G_CTRL called\n");
- return tvp5150_get_ctrl(client, ctrl);
+ return tvp5150_get_ctrl(c, ctrl);
}
- case VIDIOC_S_CTRL_OLD: /* ??? */
case VIDIOC_S_CTRL:
{
struct v4l2_control *ctrl = arg;
u8 i, n;
- dprintk(1, KERN_DEBUG "VIDIOC_S_CTRL");
n = sizeof(tvp5150_qctrl) / sizeof(tvp5150_qctrl[0]);
for (i = 0; i < n; i++)
if (ctrl->id == tvp5150_qctrl[i].id) {
@@ -662,11 +994,10 @@ static int tvp5150_command(struct i2c_client *client,
|| ctrl->value >
tvp5150_qctrl[i].maximum)
return -ERANGE;
- dprintk(1,
- KERN_DEBUG
- "VIDIOC_S_CTRL: id=%d, value=%d",
+ tvp5150_dbg(1,
+ "VIDIOC_S_CTRL: id=%d, value=%d\n",
ctrl->id, ctrl->value);
- return tvp5150_set_ctrl(client, ctrl);
+ return tvp5150_set_ctrl(c, ctrl);
}
return -EINVAL;
}
@@ -677,25 +1008,25 @@ static int tvp5150_command(struct i2c_client *client,
if (decoder->bright != pic->brightness) {
/* We want 0 to 255 we get 0-65535 */
decoder->bright = pic->brightness;
- tvp5150_write(client, TVP5150_BRIGHT_CTL,
+ tvp5150_write(c, TVP5150_BRIGHT_CTL,
decoder->bright >> 8);
}
if (decoder->contrast != pic->contrast) {
/* We want 0 to 255 we get 0-65535 */
decoder->contrast = pic->contrast;
- tvp5150_write(client, TVP5150_CONTRAST_CTL,
+ tvp5150_write(c, TVP5150_CONTRAST_CTL,
decoder->contrast >> 8);
}
if (decoder->sat != pic->colour) {
/* We want 0 to 255 we get 0-65535 */
decoder->sat = pic->colour;
- tvp5150_write(client, TVP5150_SATURATION_CTL,
+ tvp5150_write(c, TVP5150_SATURATION_CTL,
decoder->contrast >> 8);
}
if (decoder->hue != pic->hue) {
/* We want -128 to 127 we get 0-65535 */
decoder->hue = pic->hue;
- tvp5150_write(client, TVP5150_HUE_CTL,
+ tvp5150_write(c, TVP5150_HUE_CTL,
(decoder->hue - 32768) >> 8);
}
break;
@@ -714,19 +1045,18 @@ static struct i2c_driver driver;
static struct i2c_client client_template = {
.name = "(unset)",
- .flags = I2C_CLIENT_ALLOW_USE,
.driver = &driver,
};
static int tvp5150_detect_client(struct i2c_adapter *adapter,
int address, int kind)
{
- struct i2c_client *client;
+ struct i2c_client *c;
struct tvp5150 *core;
int rv;
- dprintk(1,
- KERN_INFO
+ if (debug)
+ printk( KERN_INFO
"tvp5150.c: detecting tvp5150 client on address 0x%x\n",
address << 1);
@@ -739,22 +1069,21 @@ static int tvp5150_detect_client(struct i2c_adapter *adapter,
I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
- if (client == 0)
+ c = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ if (c == 0)
return -ENOMEM;
- memcpy(client, &client_template, sizeof(struct i2c_client));
+ memcpy(c, &client_template, sizeof(struct i2c_client));
- core = kmalloc(sizeof(struct tvp5150), GFP_KERNEL);
+ core = kzalloc(sizeof(struct tvp5150), GFP_KERNEL);
if (core == 0) {
- kfree(client);
+ kfree(c);
return -ENOMEM;
}
- memset(core, 0, sizeof(struct tvp5150));
- i2c_set_clientdata(client, core);
+ i2c_set_clientdata(c, core);
- rv = i2c_attach_client(client);
+ rv = i2c_attach_client(c);
- core->norm = VIDEO_MODE_AUTO;
+ core->norm = V4L2_STD_ALL;
core->input = 2;
core->enable = 1;
core->bright = 32768;
@@ -763,37 +1092,41 @@ static int tvp5150_detect_client(struct i2c_adapter *adapter,
core->sat = 32768;
if (rv) {
- kfree(client);
+ kfree(c);
kfree(core);
return rv;
}
if (debug > 1)
- dump_reg(client);
+ dump_reg(c);
return 0;
}
static int tvp5150_attach_adapter(struct i2c_adapter *adapter)
{
- dprintk(1,
- KERN_INFO
+ if (debug)
+ printk( KERN_INFO
"tvp5150.c: starting probe for adapter %s (0x%x)\n",
adapter->name, adapter->id);
return i2c_probe(adapter, &addr_data, &tvp5150_detect_client);
}
-static int tvp5150_detach_client(struct i2c_client *client)
+static int tvp5150_detach_client(struct i2c_client *c)
{
- struct tvp5150 *decoder = i2c_get_clientdata(client);
+ struct tvp5150 *decoder = i2c_get_clientdata(c);
int err;
- err = i2c_detach_client(client);
+ tvp5150_dbg(1,
+ "tvp5150.c: removing tvp5150 adapter on address 0x%x\n",
+ c->addr << 1);
+
+ err = i2c_detach_client(c);
if (err) {
return err;
}
kfree(decoder);
- kfree(client);
+ kfree(c);
return 0;
}
@@ -801,12 +1134,10 @@ static int tvp5150_detach_client(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
static struct i2c_driver driver = {
- .owner = THIS_MODULE,
- .name = "tvp5150",
-
- /* FIXME */
- .id = I2C_DRIVERID_SAA7110,
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "tvp5150",
+ },
+ .id = I2C_DRIVERID_TVP5150,
.attach_adapter = tvp5150_attach_adapter,
.detach_client = tvp5150_detach_client,
diff --git a/drivers/media/video/v4l1-compat.c b/drivers/media/video/v4l1-compat.c
index 4134549d11a..474a29bc176 100644
--- a/drivers/media/video/v4l1-compat.c
+++ b/drivers/media/video/v4l1-compat.c
@@ -305,9 +305,8 @@ v4l_compat_translate_ioctl(struct inode *inode,
{
struct video_capability *cap = arg;
- cap2 = kmalloc(sizeof(*cap2),GFP_KERNEL);
+ cap2 = kzalloc(sizeof(*cap2),GFP_KERNEL);
memset(cap, 0, sizeof(*cap));
- memset(cap2, 0, sizeof(*cap2));
memset(&fbuf2, 0, sizeof(fbuf2));
err = drv(inode, file, VIDIOC_QUERYCAP, cap2);
@@ -422,9 +421,8 @@ v4l_compat_translate_ioctl(struct inode *inode,
{
struct video_window *win = arg;
- fmt2 = kmalloc(sizeof(*fmt2),GFP_KERNEL);
+ fmt2 = kzalloc(sizeof(*fmt2),GFP_KERNEL);
memset(win,0,sizeof(*win));
- memset(fmt2,0,sizeof(*fmt2));
fmt2->type = V4L2_BUF_TYPE_VIDEO_OVERLAY;
err = drv(inode, file, VIDIOC_G_FMT, fmt2);
@@ -461,8 +459,7 @@ v4l_compat_translate_ioctl(struct inode *inode,
struct video_window *win = arg;
int err1,err2;
- fmt2 = kmalloc(sizeof(*fmt2),GFP_KERNEL);
- memset(fmt2,0,sizeof(*fmt2));
+ fmt2 = kzalloc(sizeof(*fmt2),GFP_KERNEL);
fmt2->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
drv(inode, file, VIDIOC_STREAMOFF, &fmt2->type);
err1 = drv(inode, file, VIDIOC_G_FMT, fmt2);
@@ -595,8 +592,7 @@ v4l_compat_translate_ioctl(struct inode *inode,
pict->whiteness = get_v4l_control(inode, file,
V4L2_CID_WHITENESS, drv);
- fmt2 = kmalloc(sizeof(*fmt2),GFP_KERNEL);
- memset(fmt2,0,sizeof(*fmt2));
+ fmt2 = kzalloc(sizeof(*fmt2),GFP_KERNEL);
fmt2->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
err = drv(inode, file, VIDIOC_G_FMT, fmt2);
if (err < 0) {
@@ -622,8 +618,7 @@ v4l_compat_translate_ioctl(struct inode *inode,
set_v4l_control(inode, file,
V4L2_CID_WHITENESS, pict->whiteness, drv);
- fmt2 = kmalloc(sizeof(*fmt2),GFP_KERNEL);
- memset(fmt2,0,sizeof(*fmt2));
+ fmt2 = kzalloc(sizeof(*fmt2),GFP_KERNEL);
fmt2->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
err = drv(inode, file, VIDIOC_G_FMT, fmt2);
if (err < 0)
@@ -846,9 +841,8 @@ v4l_compat_translate_ioctl(struct inode *inode,
{
struct video_mmap *mm = arg;
- fmt2 = kmalloc(sizeof(*fmt2),GFP_KERNEL);
+ fmt2 = kzalloc(sizeof(*fmt2),GFP_KERNEL);
memset(&buf2,0,sizeof(buf2));
- memset(fmt2,0,sizeof(*fmt2));
fmt2->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
err = drv(inode, file, VIDIOC_G_FMT, fmt2);
@@ -942,8 +936,7 @@ v4l_compat_translate_ioctl(struct inode *inode,
{
struct vbi_format *fmt = arg;
- fmt2 = kmalloc(sizeof(*fmt2),GFP_KERNEL);
- memset(fmt2, 0, sizeof(*fmt2));
+ fmt2 = kzalloc(sizeof(*fmt2),GFP_KERNEL);
fmt2->type = V4L2_BUF_TYPE_VBI_CAPTURE;
err = drv(inode, file, VIDIOC_G_FMT, fmt2);
@@ -951,6 +944,10 @@ v4l_compat_translate_ioctl(struct inode *inode,
dprintk("VIDIOCGVBIFMT / VIDIOC_G_FMT: %d\n", err);
break;
}
+ if (fmt2->fmt.vbi.sample_format != V4L2_PIX_FMT_GREY) {
+ err = -EINVAL;
+ break;
+ }
memset(fmt, 0, sizeof(*fmt));
fmt->samples_per_line = fmt2->fmt.vbi.samples_per_line;
fmt->sampling_rate = fmt2->fmt.vbi.sampling_rate;
@@ -966,8 +963,12 @@ v4l_compat_translate_ioctl(struct inode *inode,
{
struct vbi_format *fmt = arg;
- fmt2 = kmalloc(sizeof(*fmt2),GFP_KERNEL);
- memset(fmt2, 0, sizeof(*fmt2));
+ if (VIDEO_PALETTE_RAW != fmt->sample_format) {
+ err = -EINVAL;
+ break;
+ }
+
+ fmt2 = kzalloc(sizeof(*fmt2),GFP_KERNEL);
fmt2->type = V4L2_BUF_TYPE_VBI_CAPTURE;
fmt2->fmt.vbi.samples_per_line = fmt->samples_per_line;
@@ -986,7 +987,7 @@ v4l_compat_translate_ioctl(struct inode *inode,
if (fmt2->fmt.vbi.samples_per_line != fmt->samples_per_line ||
fmt2->fmt.vbi.sampling_rate != fmt->sampling_rate ||
- VIDEO_PALETTE_RAW != fmt->sample_format ||
+ fmt2->fmt.vbi.sample_format != V4L2_PIX_FMT_GREY ||
fmt2->fmt.vbi.start[0] != fmt->start[0] ||
fmt2->fmt.vbi.count[0] != fmt->count[0] ||
fmt2->fmt.vbi.start[1] != fmt->start[1] ||
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 597b8db35a1..5dbd7c1b362 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -58,6 +58,8 @@
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/div64.h>
+#include <linux/video_decoder.h>
+#include <media/v4l2-common.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
@@ -190,57 +192,174 @@ char *v4l2_type_names[] = {
[V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
};
-char *v4l2_ioctl_names[256] = {
-#if __GNUC__ >= 3
- [0 ... 255] = "UNKNOWN",
+/* ------------------------------------------------------------------ */
+/* debug help functions */
+
+#ifdef HAVE_V4L1
+static const char *v4l1_ioctls[] = {
+ [_IOC_NR(VIDIOCGCAP)] = "VIDIOCGCAP",
+ [_IOC_NR(VIDIOCGCHAN)] = "VIDIOCGCHAN",
+ [_IOC_NR(VIDIOCSCHAN)] = "VIDIOCSCHAN",
+ [_IOC_NR(VIDIOCGTUNER)] = "VIDIOCGTUNER",
+ [_IOC_NR(VIDIOCSTUNER)] = "VIDIOCSTUNER",
+ [_IOC_NR(VIDIOCGPICT)] = "VIDIOCGPICT",
+ [_IOC_NR(VIDIOCSPICT)] = "VIDIOCSPICT",
+ [_IOC_NR(VIDIOCCAPTURE)] = "VIDIOCCAPTURE",
+ [_IOC_NR(VIDIOCGWIN)] = "VIDIOCGWIN",
+ [_IOC_NR(VIDIOCSWIN)] = "VIDIOCSWIN",
+ [_IOC_NR(VIDIOCGFBUF)] = "VIDIOCGFBUF",
+ [_IOC_NR(VIDIOCSFBUF)] = "VIDIOCSFBUF",
+ [_IOC_NR(VIDIOCKEY)] = "VIDIOCKEY",
+ [_IOC_NR(VIDIOCGFREQ)] = "VIDIOCGFREQ",
+ [_IOC_NR(VIDIOCSFREQ)] = "VIDIOCSFREQ",
+ [_IOC_NR(VIDIOCGAUDIO)] = "VIDIOCGAUDIO",
+ [_IOC_NR(VIDIOCSAUDIO)] = "VIDIOCSAUDIO",
+ [_IOC_NR(VIDIOCSYNC)] = "VIDIOCSYNC",
+ [_IOC_NR(VIDIOCMCAPTURE)] = "VIDIOCMCAPTURE",
+ [_IOC_NR(VIDIOCGMBUF)] = "VIDIOCGMBUF",
+ [_IOC_NR(VIDIOCGUNIT)] = "VIDIOCGUNIT",
+ [_IOC_NR(VIDIOCGCAPTURE)] = "VIDIOCGCAPTURE",
+ [_IOC_NR(VIDIOCSCAPTURE)] = "VIDIOCSCAPTURE",
+ [_IOC_NR(VIDIOCSPLAYMODE)] = "VIDIOCSPLAYMODE",
+ [_IOC_NR(VIDIOCSWRITEMODE)] = "VIDIOCSWRITEMODE",
+ [_IOC_NR(VIDIOCGPLAYINFO)] = "VIDIOCGPLAYINFO",
+ [_IOC_NR(VIDIOCSMICROCODE)] = "VIDIOCSMICROCODE",
+ [_IOC_NR(VIDIOCGVBIFMT)] = "VIDIOCGVBIFMT",
+ [_IOC_NR(VIDIOCSVBIFMT)] = "VIDIOCSVBIFMT"
+};
+#define V4L1_IOCTLS ARRAY_SIZE(v4l1_ioctls)
+#endif
+
+static const char *v4l2_ioctls[] = {
+ [_IOC_NR(VIDIOC_QUERYCAP)] = "VIDIOC_QUERYCAP",
+ [_IOC_NR(VIDIOC_RESERVED)] = "VIDIOC_RESERVED",
+ [_IOC_NR(VIDIOC_ENUM_FMT)] = "VIDIOC_ENUM_FMT",
+ [_IOC_NR(VIDIOC_G_FMT)] = "VIDIOC_G_FMT",
+ [_IOC_NR(VIDIOC_S_FMT)] = "VIDIOC_S_FMT",
+ [_IOC_NR(VIDIOC_G_MPEGCOMP)] = "VIDIOC_G_MPEGCOMP",
+ [_IOC_NR(VIDIOC_S_MPEGCOMP)] = "VIDIOC_S_MPEGCOMP",
+ [_IOC_NR(VIDIOC_REQBUFS)] = "VIDIOC_REQBUFS",
+ [_IOC_NR(VIDIOC_QUERYBUF)] = "VIDIOC_QUERYBUF",
+ [_IOC_NR(VIDIOC_G_FBUF)] = "VIDIOC_G_FBUF",
+ [_IOC_NR(VIDIOC_S_FBUF)] = "VIDIOC_S_FBUF",
+ [_IOC_NR(VIDIOC_OVERLAY)] = "VIDIOC_OVERLAY",
+ [_IOC_NR(VIDIOC_QBUF)] = "VIDIOC_QBUF",
+ [_IOC_NR(VIDIOC_DQBUF)] = "VIDIOC_DQBUF",
+ [_IOC_NR(VIDIOC_STREAMON)] = "VIDIOC_STREAMON",
+ [_IOC_NR(VIDIOC_STREAMOFF)] = "VIDIOC_STREAMOFF",
+ [_IOC_NR(VIDIOC_G_PARM)] = "VIDIOC_G_PARM",
+ [_IOC_NR(VIDIOC_S_PARM)] = "VIDIOC_S_PARM",
+ [_IOC_NR(VIDIOC_G_STD)] = "VIDIOC_G_STD",
+ [_IOC_NR(VIDIOC_S_STD)] = "VIDIOC_S_STD",
+ [_IOC_NR(VIDIOC_ENUMSTD)] = "VIDIOC_ENUMSTD",
+ [_IOC_NR(VIDIOC_ENUMINPUT)] = "VIDIOC_ENUMINPUT",
+ [_IOC_NR(VIDIOC_G_CTRL)] = "VIDIOC_G_CTRL",
+ [_IOC_NR(VIDIOC_S_CTRL)] = "VIDIOC_S_CTRL",
+ [_IOC_NR(VIDIOC_G_TUNER)] = "VIDIOC_G_TUNER",
+ [_IOC_NR(VIDIOC_S_TUNER)] = "VIDIOC_S_TUNER",
+ [_IOC_NR(VIDIOC_G_AUDIO)] = "VIDIOC_G_AUDIO",
+ [_IOC_NR(VIDIOC_S_AUDIO)] = "VIDIOC_S_AUDIO",
+ [_IOC_NR(VIDIOC_QUERYCTRL)] = "VIDIOC_QUERYCTRL",
+ [_IOC_NR(VIDIOC_QUERYMENU)] = "VIDIOC_QUERYMENU",
+ [_IOC_NR(VIDIOC_G_INPUT)] = "VIDIOC_G_INPUT",
+ [_IOC_NR(VIDIOC_S_INPUT)] = "VIDIOC_S_INPUT",
+ [_IOC_NR(VIDIOC_G_OUTPUT)] = "VIDIOC_G_OUTPUT",
+ [_IOC_NR(VIDIOC_S_OUTPUT)] = "VIDIOC_S_OUTPUT",
+ [_IOC_NR(VIDIOC_ENUMOUTPUT)] = "VIDIOC_ENUMOUTPUT",
+ [_IOC_NR(VIDIOC_G_AUDOUT)] = "VIDIOC_G_AUDOUT",
+ [_IOC_NR(VIDIOC_S_AUDOUT)] = "VIDIOC_S_AUDOUT",
+ [_IOC_NR(VIDIOC_G_MODULATOR)] = "VIDIOC_G_MODULATOR",
+ [_IOC_NR(VIDIOC_S_MODULATOR)] = "VIDIOC_S_MODULATOR",
+ [_IOC_NR(VIDIOC_G_FREQUENCY)] = "VIDIOC_G_FREQUENCY",
+ [_IOC_NR(VIDIOC_S_FREQUENCY)] = "VIDIOC_S_FREQUENCY",
+ [_IOC_NR(VIDIOC_CROPCAP)] = "VIDIOC_CROPCAP",
+ [_IOC_NR(VIDIOC_G_CROP)] = "VIDIOC_G_CROP",
+ [_IOC_NR(VIDIOC_S_CROP)] = "VIDIOC_S_CROP",
+ [_IOC_NR(VIDIOC_G_JPEGCOMP)] = "VIDIOC_G_JPEGCOMP",
+ [_IOC_NR(VIDIOC_S_JPEGCOMP)] = "VIDIOC_S_JPEGCOMP",
+ [_IOC_NR(VIDIOC_QUERYSTD)] = "VIDIOC_QUERYSTD",
+ [_IOC_NR(VIDIOC_TRY_FMT)] = "VIDIOC_TRY_FMT",
+ [_IOC_NR(VIDIOC_ENUMAUDIO)] = "VIDIOC_ENUMAUDIO",
+ [_IOC_NR(VIDIOC_ENUMAUDOUT)] = "VIDIOC_ENUMAUDOUT",
+ [_IOC_NR(VIDIOC_G_PRIORITY)] = "VIDIOC_G_PRIORITY",
+ [_IOC_NR(VIDIOC_S_PRIORITY)] = "VIDIOC_S_PRIORITY",
+#if 1
+ [_IOC_NR(VIDIOC_G_SLICED_VBI_CAP)] = "VIDIOC_G_SLICED_VBI_CAP",
#endif
- [_IOC_NR(VIDIOC_QUERYCAP)] = "VIDIOC_QUERYCAP",
- [_IOC_NR(VIDIOC_RESERVED)] = "VIDIOC_RESERVED",
- [_IOC_NR(VIDIOC_ENUM_FMT)] = "VIDIOC_ENUM_FMT",
- [_IOC_NR(VIDIOC_G_FMT)] = "VIDIOC_G_FMT",
- [_IOC_NR(VIDIOC_S_FMT)] = "VIDIOC_S_FMT",
- [_IOC_NR(VIDIOC_REQBUFS)] = "VIDIOC_REQBUFS",
- [_IOC_NR(VIDIOC_QUERYBUF)] = "VIDIOC_QUERYBUF",
- [_IOC_NR(VIDIOC_G_FBUF)] = "VIDIOC_G_FBUF",
- [_IOC_NR(VIDIOC_S_FBUF)] = "VIDIOC_S_FBUF",
- [_IOC_NR(VIDIOC_OVERLAY)] = "VIDIOC_OVERLAY",
- [_IOC_NR(VIDIOC_QBUF)] = "VIDIOC_QBUF",
- [_IOC_NR(VIDIOC_DQBUF)] = "VIDIOC_DQBUF",
- [_IOC_NR(VIDIOC_STREAMON)] = "VIDIOC_STREAMON",
- [_IOC_NR(VIDIOC_STREAMOFF)] = "VIDIOC_STREAMOFF",
- [_IOC_NR(VIDIOC_G_PARM)] = "VIDIOC_G_PARM",
- [_IOC_NR(VIDIOC_S_PARM)] = "VIDIOC_S_PARM",
- [_IOC_NR(VIDIOC_G_STD)] = "VIDIOC_G_STD",
- [_IOC_NR(VIDIOC_S_STD)] = "VIDIOC_S_STD",
- [_IOC_NR(VIDIOC_ENUMSTD)] = "VIDIOC_ENUMSTD",
- [_IOC_NR(VIDIOC_ENUMINPUT)] = "VIDIOC_ENUMINPUT",
- [_IOC_NR(VIDIOC_G_CTRL)] = "VIDIOC_G_CTRL",
- [_IOC_NR(VIDIOC_S_CTRL)] = "VIDIOC_S_CTRL",
- [_IOC_NR(VIDIOC_G_TUNER)] = "VIDIOC_G_TUNER",
- [_IOC_NR(VIDIOC_S_TUNER)] = "VIDIOC_S_TUNER",
- [_IOC_NR(VIDIOC_G_AUDIO)] = "VIDIOC_G_AUDIO",
- [_IOC_NR(VIDIOC_S_AUDIO)] = "VIDIOC_S_AUDIO",
- [_IOC_NR(VIDIOC_QUERYCTRL)] = "VIDIOC_QUERYCTRL",
- [_IOC_NR(VIDIOC_QUERYMENU)] = "VIDIOC_QUERYMENU",
- [_IOC_NR(VIDIOC_G_INPUT)] = "VIDIOC_G_INPUT",
- [_IOC_NR(VIDIOC_S_INPUT)] = "VIDIOC_S_INPUT",
- [_IOC_NR(VIDIOC_G_OUTPUT)] = "VIDIOC_G_OUTPUT",
- [_IOC_NR(VIDIOC_S_OUTPUT)] = "VIDIOC_S_OUTPUT",
- [_IOC_NR(VIDIOC_ENUMOUTPUT)] = "VIDIOC_ENUMOUTPUT",
- [_IOC_NR(VIDIOC_G_AUDOUT)] = "VIDIOC_G_AUDOUT",
- [_IOC_NR(VIDIOC_S_AUDOUT)] = "VIDIOC_S_AUDOUT",
- [_IOC_NR(VIDIOC_G_MODULATOR)] = "VIDIOC_G_MODULATOR",
- [_IOC_NR(VIDIOC_S_MODULATOR)] = "VIDIOC_S_MODULATOR",
- [_IOC_NR(VIDIOC_G_FREQUENCY)] = "VIDIOC_G_FREQUENCY",
- [_IOC_NR(VIDIOC_S_FREQUENCY)] = "VIDIOC_S_FREQUENCY",
- [_IOC_NR(VIDIOC_CROPCAP)] = "VIDIOC_CROPCAP",
- [_IOC_NR(VIDIOC_G_CROP)] = "VIDIOC_G_CROP",
- [_IOC_NR(VIDIOC_S_CROP)] = "VIDIOC_S_CROP",
- [_IOC_NR(VIDIOC_G_JPEGCOMP)] = "VIDIOC_G_JPEGCOMP",
- [_IOC_NR(VIDIOC_S_JPEGCOMP)] = "VIDIOC_S_JPEGCOMP",
- [_IOC_NR(VIDIOC_QUERYSTD)] = "VIDIOC_QUERYSTD",
- [_IOC_NR(VIDIOC_TRY_FMT)] = "VIDIOC_TRY_FMT",
+ [_IOC_NR(VIDIOC_LOG_STATUS)] = "VIDIOC_LOG_STATUS"
};
+#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
+
+static const char *v4l2_int_ioctls[] = {
+#ifdef HAVE_VIDEO_DECODER
+ [_IOC_NR(DECODER_GET_CAPABILITIES)] = "DECODER_GET_CAPABILITIES",
+ [_IOC_NR(DECODER_GET_STATUS)] = "DECODER_GET_STATUS",
+ [_IOC_NR(DECODER_SET_NORM)] = "DECODER_SET_NORM",
+ [_IOC_NR(DECODER_SET_INPUT)] = "DECODER_SET_INPUT",
+ [_IOC_NR(DECODER_SET_OUTPUT)] = "DECODER_SET_OUTPUT",
+ [_IOC_NR(DECODER_ENABLE_OUTPUT)] = "DECODER_ENABLE_OUTPUT",
+ [_IOC_NR(DECODER_SET_PICTURE)] = "DECODER_SET_PICTURE",
+ [_IOC_NR(DECODER_SET_GPIO)] = "DECODER_SET_GPIO",
+ [_IOC_NR(DECODER_INIT)] = "DECODER_INIT",
+ [_IOC_NR(DECODER_SET_VBI_BYPASS)] = "DECODER_SET_VBI_BYPASS",
+ [_IOC_NR(DECODER_DUMP)] = "DECODER_DUMP",
+#endif
+ [_IOC_NR(AUDC_SET_RADIO)] = "AUDC_SET_RADIO",
+ [_IOC_NR(AUDC_SET_INPUT)] = "AUDC_SET_INPUT",
+
+ [_IOC_NR(TUNER_SET_TYPE_ADDR)] = "TUNER_SET_TYPE_ADDR",
+ [_IOC_NR(TUNER_SET_STANDBY)] = "TUNER_SET_STANDBY",
+ [_IOC_NR(TDA9887_SET_CONFIG)] = "TDA9887_SET_CONFIG",
+
+ [_IOC_NR(VIDIOC_INT_S_REGISTER)] = "VIDIOC_INT_S_REGISTER",
+ [_IOC_NR(VIDIOC_INT_G_REGISTER)] = "VIDIOC_INT_G_REGISTER",
+ [_IOC_NR(VIDIOC_INT_RESET)] = "VIDIOC_INT_RESET",
+ [_IOC_NR(VIDIOC_INT_AUDIO_CLOCK_FREQ)] = "VIDIOC_INT_AUDIO_CLOCK_FREQ",
+ [_IOC_NR(VIDIOC_INT_DECODE_VBI_LINE)] = "VIDIOC_INT_DECODE_VBI_LINE",
+ [_IOC_NR(VIDIOC_INT_S_VBI_DATA)] = "VIDIOC_INT_S_VBI_DATA",
+ [_IOC_NR(VIDIOC_INT_G_VBI_DATA)] = "VIDIOC_INT_G_VBI_DATA",
+ [_IOC_NR(VIDIOC_INT_G_CHIP_IDENT)] = "VIDIOC_INT_G_CHIP_IDENT",
+ [_IOC_NR(VIDIOC_INT_I2S_CLOCK_FREQ)] = "VIDIOC_INT_I2S_CLOCK_FREQ"
+};
+#define V4L2_INT_IOCTLS ARRAY_SIZE(v4l2_int_ioctls)
+
+/* Common ioctl debug function. This function can be used by
+ external ioctl messages as well as internal V4L ioctl */
+void v4l_printk_ioctl(unsigned int cmd)
+{
+ char *dir;
+
+ switch (_IOC_DIR(cmd)) {
+ case _IOC_NONE: dir = "--"; break;
+ case _IOC_READ: dir = "r-"; break;
+ case _IOC_WRITE: dir = "-w"; break;
+ case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
+ default: dir = "*ERR*"; break;
+ }
+ switch (_IOC_TYPE(cmd)) {
+ case 'd':
+ printk("v4l2_int ioctl %s, dir=%s (0x%08x)\n",
+ (_IOC_NR(cmd) < V4L2_INT_IOCTLS) ?
+ v4l2_int_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd);
+ break;
+#ifdef HAVE_V4L1
+ case 'v':
+ printk("v4l1 ioctl %s, dir=%s (0x%08x)\n",
+ (_IOC_NR(cmd) < V4L1_IOCTLS) ?
+ v4l1_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd);
+ break;
+#endif
+ case 'V':
+ printk("v4l2 ioctl %s, dir=%s (0x%08x)\n",
+ (_IOC_NR(cmd) < V4L2_IOCTLS) ?
+ v4l2_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd);
+ break;
+
+ default:
+ printk("unknown ioctl '%c', dir=%s, #%d (0x%08x)\n",
+ _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd);
+ }
+}
/* ----------------------------------------------------------------- */
@@ -255,7 +374,7 @@ EXPORT_SYMBOL(v4l2_prio_check);
EXPORT_SYMBOL(v4l2_field_names);
EXPORT_SYMBOL(v4l2_type_names);
-EXPORT_SYMBOL(v4l2_ioctl_names);
+EXPORT_SYMBOL(v4l_printk_ioctl);
/*
* Local variables:
diff --git a/drivers/media/video/video-buf.c b/drivers/media/video/video-buf.c
index 9a6bf287e26..9ef477523d2 100644
--- a/drivers/media/video/video-buf.c
+++ b/drivers/media/video/video-buf.c
@@ -52,10 +52,9 @@ videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
struct page *pg;
int i;
- sglist = kmalloc(sizeof(struct scatterlist)*nr_pages, GFP_KERNEL);
+ sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
- memset(sglist,0,sizeof(struct scatterlist)*nr_pages);
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
pg = vmalloc_to_page(virt);
if (NULL == pg)
@@ -80,10 +79,9 @@ videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
if (NULL == pages[0])
return NULL;
- sglist = kmalloc(sizeof(*sglist) * nr_pages, GFP_KERNEL);
+ sglist = kcalloc(nr_pages, sizeof(*sglist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
- memset(sglist, 0, sizeof(*sglist) * nr_pages);
if (NULL == pages[0])
goto nopage;
@@ -284,9 +282,8 @@ void* videobuf_alloc(unsigned int size)
{
struct videobuf_buffer *vb;
- vb = kmalloc(size,GFP_KERNEL);
+ vb = kzalloc(size,GFP_KERNEL);
if (NULL != vb) {
- memset(vb,0,size);
videobuf_dma_init(&vb->dma);
init_waitqueue_head(&vb->done);
vb->magic = MAGIC_BUFFER;
diff --git a/drivers/media/video/videocodec.c b/drivers/media/video/videocodec.c
index 839db622040..8f271de57fd 100644
--- a/drivers/media/video/videocodec.c
+++ b/drivers/media/video/videocodec.c
@@ -124,17 +124,13 @@ videocodec_attach (struct videocodec_master *master)
if (res == 0) {
dprintk(3, "videocodec_attach '%s'\n",
codec->name);
- ptr = (struct attached_list *)
- kmalloc(sizeof(struct attached_list),
- GFP_KERNEL);
+ ptr = kzalloc(sizeof(struct attached_list), GFP_KERNEL);
if (!ptr) {
dprintk(1,
KERN_ERR
"videocodec_attach: no memory\n");
goto out_kfree;
}
- memset(ptr, 0,
- sizeof(struct attached_list));
ptr->codec = codec;
a = h->list;
@@ -249,14 +245,11 @@ videocodec_register (const struct videocodec *codec)
"videocodec: register '%s', type: %x, flags %lx, magic %lx\n",
codec->name, codec->type, codec->flags, codec->magic);
- ptr =
- (struct codec_list *) kmalloc(sizeof(struct codec_list),
- GFP_KERNEL);
+ ptr = kzalloc(sizeof(struct codec_list), GFP_KERNEL);
if (!ptr) {
dprintk(1, KERN_ERR "videocodec_register: no memory\n");
return -ENOMEM;
}
- memset(ptr, 0, sizeof(struct codec_list));
ptr->codec = codec;
if (!h) {
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
index 6de5b0094b8..d5be2598714 100644
--- a/drivers/media/video/videodev.c
+++ b/drivers/media/video/videodev.c
@@ -52,10 +52,7 @@ struct video_device *video_device_alloc(void)
{
struct video_device *vfd;
- vfd = kmalloc(sizeof(*vfd),GFP_KERNEL);
- if (NULL == vfd)
- return NULL;
- memset(vfd,0,sizeof(*vfd));
+ vfd = kzalloc(sizeof(*vfd),GFP_KERNEL);
return vfd;
}
@@ -68,7 +65,8 @@ static void video_release(struct class_device *cd)
{
struct video_device *vfd = container_of(cd, struct video_device, class_dev);
-#if 1 /* needed until all drivers are fixed */
+#if 1
+ /* needed until all drivers are fixed */
if (!vfd->release)
return;
#endif
@@ -338,13 +336,14 @@ int video_register_device(struct video_device *vfd, int type, int nr)
if (vfd->dev)
vfd->class_dev.dev = vfd->dev;
vfd->class_dev.class = &video_class;
- vfd->class_dev.devt = MKDEV(VIDEO_MAJOR, vfd->minor);
+ vfd->class_dev.devt = MKDEV(VIDEO_MAJOR, vfd->minor);
strlcpy(vfd->class_dev.class_id, vfd->devfs_name + 4, BUS_ID_SIZE);
class_device_register(&vfd->class_dev);
class_device_create_file(&vfd->class_dev,
- &class_device_attr_name);
+ &class_device_attr_name);
-#if 1 /* needed until all drivers are fixed */
+#if 1
+ /* needed until all drivers are fixed */
if (!vfd->release)
printk(KERN_WARNING "videodev: \"%s\" has no release callback. "
"Please fix your driver for proper sysfs support, see "
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 71b28e9e085..c8fd8238904 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -4499,13 +4499,11 @@ static int vino_init(void)
dma_addr_t dma_dummy_address;
int i;
- vino_drvdata = (struct vino_settings *)
- kmalloc(sizeof(struct vino_settings), GFP_KERNEL);
+ vino_drvdata = kzalloc(sizeof(struct vino_settings), GFP_KERNEL);
if (!vino_drvdata) {
vino_module_cleanup(vino_init_stage);
return -ENOMEM;
}
- memset(vino_drvdata, 0, sizeof(struct vino_settings));
vino_init_stage++;
/* create a dummy dma descriptor */
diff --git a/drivers/media/video/vpx3220.c b/drivers/media/video/vpx3220.c
index 137b58f2c66..d0a1e72ea8c 100644
--- a/drivers/media/video/vpx3220.c
+++ b/drivers/media/video/vpx3220.c
@@ -621,17 +621,14 @@ vpx3220_detect_client (struct i2c_adapter *adapter,
(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == NULL) {
return -ENOMEM;
}
- memset(client, 0, sizeof(struct i2c_client));
-
client->addr = address;
client->adapter = adapter;
client->driver = &vpx3220_i2c_driver;
- client->flags = I2C_CLIENT_ALLOW_USE;
/* Check for manufacture ID and part number */
if (kind < 0) {
@@ -676,12 +673,11 @@ vpx3220_detect_client (struct i2c_adapter *adapter,
sizeof(I2C_NAME(client)));
}
- decoder = kmalloc(sizeof(struct vpx3220), GFP_KERNEL);
+ decoder = kzalloc(sizeof(struct vpx3220), GFP_KERNEL);
if (decoder == NULL) {
kfree(client);
return -ENOMEM;
}
- memset(decoder, 0, sizeof(struct vpx3220));
decoder->norm = VIDEO_MODE_PAL;
decoder->input = 0;
decoder->enable = 1;
@@ -722,11 +718,11 @@ vpx3220_attach_adapter (struct i2c_adapter *adapter)
*/
static struct i2c_driver vpx3220_i2c_driver = {
- .owner = THIS_MODULE,
- .name = "vpx3220",
+ .driver = {
+ .name = "vpx3220",
+ },
.id = I2C_DRIVERID_VPX3220,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = vpx3220_attach_adapter,
.detach_client = vpx3220_detach_client,
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index c318ba32fba..b7b0afffd21 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -187,6 +187,7 @@ static struct file_operations w9966_fops = {
.open = video_exclusive_open,
.release = video_exclusive_release,
.ioctl = w9966_v4l_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.read = w9966_v4l_read,
.llseek = no_llseek,
};
diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c
index a6936ad74fc..8cb64f8a8a9 100644
--- a/drivers/media/video/wm8775.c
+++ b/drivers/media/video/wm8775.c
@@ -25,7 +25,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/ioctl.h>
@@ -33,20 +32,12 @@
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <linux/videodev.h>
-#include <media/audiochip.h>
+#include <media/v4l2-common.h>
MODULE_DESCRIPTION("wm8775 driver");
MODULE_AUTHOR("Ulf Eklund, Hans Verkuil");
MODULE_LICENSE("GPL");
-#define wm8775_err(fmt, arg...) do { \
- printk(KERN_ERR "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-#define wm8775_info(fmt, arg...) do { \
- printk(KERN_INFO "%s %d-%04x: " fmt, client->driver->name, \
- i2c_adapter_id(client->adapter), client->addr , ## arg); } while (0)
-
-
static unsigned short normal_i2c[] = { 0x36 >> 1, I2C_CLIENT_END };
@@ -70,7 +61,7 @@ static int wm8775_write(struct i2c_client *client, int reg, u16 val)
int i;
if (reg < 0 || reg >= TOT_REGS) {
- wm8775_err("Invalid register R%d\n", reg);
+ v4l_err(client, "Invalid register R%d\n", reg);
return -1;
}
@@ -80,7 +71,7 @@ static int wm8775_write(struct i2c_client *client, int reg, u16 val)
return 0;
}
}
- wm8775_err("I2C: cannot write %03x to register R%d\n", val, reg);
+ v4l_err(client, "I2C: cannot write %03x to register R%d\n", val, reg);
return -1;
}
@@ -88,38 +79,53 @@ static int wm8775_command(struct i2c_client *client, unsigned int cmd,
void *arg)
{
struct wm8775_state *state = i2c_get_clientdata(client);
- int *input = arg;
+ struct v4l2_audio *input = arg;
+ struct v4l2_control *ctrl = arg;
switch (cmd) {
- case AUDC_SET_INPUT:
+ case VIDIOC_S_AUDIO:
+ /* There are 4 inputs and one output. Zero or more inputs
+ are multiplexed together to the output. Hence there are
+ 16 combinations.
+ If only one input is active (the normal case) then the
+ input values 1, 2, 4 or 8 should be used. */
+ if (input->index > 15) {
+ v4l_err(client, "Invalid input %d.\n", input->index);
+ return -EINVAL;
+ }
+ state->input = input->index;
+ if (state->muted)
+ break;
wm8775_write(client, R21, 0x0c0);
wm8775_write(client, R14, 0x1d4);
wm8775_write(client, R15, 0x1d4);
+ wm8775_write(client, R21, 0x100 + state->input);
+ break;
- if (*input == AUDIO_RADIO) {
- wm8775_write(client, R21, 0x108);
- state->input = 8;
- state->muted = 0;
- break;
- }
- if (*input == AUDIO_MUTE) {
- state->muted = 1;
- break;
- }
- if (*input == AUDIO_UNMUTE) {
+ case VIDIOC_G_AUDIO:
+ memset(input, 0, sizeof(*input));
+ input->index = state->input;
+ break;
+
+ case VIDIOC_G_CTRL:
+ if (ctrl->id != V4L2_CID_AUDIO_MUTE)
+ return -EINVAL;
+ ctrl->value = state->muted;
+ break;
+
+ case VIDIOC_S_CTRL:
+ if (ctrl->id != V4L2_CID_AUDIO_MUTE)
+ return -EINVAL;
+ state->muted = ctrl->value;
+ wm8775_write(client, R21, 0x0c0);
+ wm8775_write(client, R14, 0x1d4);
+ wm8775_write(client, R15, 0x1d4);
+ if (!state->muted)
wm8775_write(client, R21, 0x100 + state->input);
- state->muted = 0;
- break;
- }
- /* All other inputs... */
- wm8775_write(client, R21, 0x102);
- state->input = 2;
- state->muted = 0;
break;
case VIDIOC_LOG_STATUS:
- wm8775_info("Input: %s%s\n",
- state->input == 8 ? "radio" : "default",
+ v4l_info(client, "Input: %d%s\n", state->input,
state->muted ? " (muted)" : "");
break;
@@ -160,18 +166,16 @@ static int wm8775_attach(struct i2c_adapter *adapter, int address, int kind)
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return 0;
- client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
+ client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
if (client == 0)
return -ENOMEM;
- memset(client, 0, sizeof(struct i2c_client));
client->addr = address;
client->adapter = adapter;
client->driver = &i2c_driver;
- client->flags = I2C_CLIENT_ALLOW_USE;
snprintf(client->name, sizeof(client->name) - 1, "wm8775");
- wm8775_info("chip found @ 0x%x (%s)\n", address << 1, adapter->name);
+ v4l_info(client, "chip found @ 0x%x (%s)\n", address << 1, adapter->name);
state = kmalloc(sizeof(struct wm8775_state), GFP_KERNEL);
if (state == NULL) {
@@ -207,11 +211,7 @@ static int wm8775_attach(struct i2c_adapter *adapter, int address, int kind)
static int wm8775_probe(struct i2c_adapter *adapter)
{
-#ifdef I2C_CLASS_TV_ANALOG
if (adapter->class & I2C_CLASS_TV_ANALOG)
-#else
- if (adapter->id == I2C_HW_B_BT848)
-#endif
return i2c_probe(adapter, &addr_data, wm8775_attach);
return 0;
}
@@ -233,15 +233,13 @@ static int wm8775_detach(struct i2c_client *client)
/* i2c implementation */
static struct i2c_driver i2c_driver = {
- .name = "wm8775",
-
- .id = I2C_DRIVERID_WM8775,
- .flags = I2C_DF_NOTIFY,
-
+ .driver = {
+ .name = "wm8775",
+ },
+ .id = I2C_DRIVERID_WM8775,
.attach_adapter = wm8775_probe,
- .detach_client = wm8775_detach,
- .command = wm8775_command,
- .owner = THIS_MODULE,
+ .detach_client = wm8775_detach,
+ .command = wm8775_command,
};
diff --git a/drivers/media/video/zoran_card.c b/drivers/media/video/zoran_card.c
index 39a0d238900..ea3288661a3 100644
--- a/drivers/media/video/zoran_card.c
+++ b/drivers/media/video/zoran_card.c
@@ -1050,7 +1050,7 @@ zr36057_init (struct zoran *zr)
/* allocate memory *before* doing anything to the hardware
* in case allocation fails */
mem_needed = BUZ_NUM_STAT_COM * 4;
- mem = (unsigned long) kmalloc(mem_needed, GFP_KERNEL);
+ mem = kzalloc(mem_needed, GFP_KERNEL);
vdev = (void *) kmalloc(sizeof(struct video_device), GFP_KERNEL);
if (!mem || !vdev) {
dprintk(1,
@@ -1061,7 +1061,6 @@ zr36057_init (struct zoran *zr)
kfree((void *)mem);
return -ENOMEM;
}
- memset((void *) mem, 0, mem_needed);
zr->stat_com = (u32 *) mem;
for (j = 0; j < BUZ_NUM_STAT_COM; j++) {
zr->stat_com[j] = 1; /* mark as unavailable to zr36057 */
diff --git a/drivers/media/video/zoran_driver.c b/drivers/media/video/zoran_driver.c
index 07bde9acd67..485553be190 100644
--- a/drivers/media/video/zoran_driver.c
+++ b/drivers/media/video/zoran_driver.c
@@ -1311,7 +1311,7 @@ zoran_open (struct inode *inode,
res = -ENODEV;
goto open_unlock_and_return;
}
- if (!try_module_get(zr->decoder->driver->owner)) {
+ if (!try_module_get(zr->decoder->driver->driver.owner)) {
dprintk(1,
KERN_ERR
"%s: failed to grab ownership of i2c decoder\n",
@@ -1321,13 +1321,13 @@ zoran_open (struct inode *inode,
goto open_unlock_and_return;
}
if (zr->encoder &&
- !try_module_get(zr->encoder->driver->owner)) {
+ !try_module_get(zr->encoder->driver->driver.owner)) {
dprintk(1,
KERN_ERR
"%s: failed to grab ownership of i2c encoder\n",
ZR_DEVNAME(zr));
res = -EIO;
- module_put(zr->decoder->driver->owner);
+ module_put(zr->decoder->driver->driver.owner);
module_put(THIS_MODULE);
goto open_unlock_and_return;
}
@@ -1345,7 +1345,7 @@ zoran_open (struct inode *inode,
ZR_DEVNAME(zr), current->comm, current->pid, zr->user);
/* now, create the open()-specific file_ops struct */
- fh = kmalloc(sizeof(struct zoran_fh), GFP_KERNEL);
+ fh = kzalloc(sizeof(struct zoran_fh), GFP_KERNEL);
if (!fh) {
dprintk(1,
KERN_ERR
@@ -1354,7 +1354,6 @@ zoran_open (struct inode *inode,
res = -ENOMEM;
goto open_unlock_and_return;
}
- memset(fh, 0, sizeof(struct zoran_fh));
/* used to be BUZ_MAX_WIDTH/HEIGHT, but that gives overflows
* on norm-change! */
fh->overlay_mask =
@@ -1393,9 +1392,9 @@ zoran_open (struct inode *inode,
open_unlock_and_return:
/* if we grabbed locks, release them accordingly */
if (have_module_locks) {
- module_put(zr->decoder->driver->owner);
+ module_put(zr->decoder->driver->driver.owner);
if (zr->encoder) {
- module_put(zr->encoder->driver->owner);
+ module_put(zr->encoder->driver->driver.owner);
}
module_put(THIS_MODULE);
}
@@ -1461,9 +1460,9 @@ zoran_close (struct inode *inode,
kfree(fh);
/* release locks on the i2c modules */
- module_put(zr->decoder->driver->owner);
+ module_put(zr->decoder->driver->driver.owner);
if (zr->encoder) {
- module_put(zr->encoder->driver->owner);
+ module_put(zr->encoder->driver->driver.owner);
}
module_put(THIS_MODULE);
@@ -4678,6 +4677,7 @@ static struct file_operations zoran_fops = {
.open = zoran_open,
.release = zoran_close,
.ioctl = zoran_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
.read = zoran_read,
.write = zoran_write,
diff --git a/drivers/media/video/zr36016.c b/drivers/media/video/zr36016.c
index 4ed898585c7..10130ef67ea 100644
--- a/drivers/media/video/zr36016.c
+++ b/drivers/media/video/zr36016.c
@@ -451,12 +451,11 @@ zr36016_setup (struct videocodec *codec)
return -ENOSPC;
}
//mem structure init
- codec->data = ptr = kmalloc(sizeof(struct zr36016), GFP_KERNEL);
+ codec->data = ptr = kzalloc(sizeof(struct zr36016), GFP_KERNEL);
if (NULL == ptr) {
dprintk(1, KERN_ERR "zr36016: Can't get enough memory!\n");
return -ENOMEM;
}
- memset(ptr, 0, sizeof(struct zr36016));
snprintf(ptr->name, sizeof(ptr->name), "zr36016[%d]",
zr36016_codecs);
diff --git a/drivers/media/video/zr36050.c b/drivers/media/video/zr36050.c
index 0144576a612..bd0cd28543c 100644
--- a/drivers/media/video/zr36050.c
+++ b/drivers/media/video/zr36050.c
@@ -813,12 +813,11 @@ zr36050_setup (struct videocodec *codec)
return -ENOSPC;
}
//mem structure init
- codec->data = ptr = kmalloc(sizeof(struct zr36050), GFP_KERNEL);
+ codec->data = ptr = kzalloc(sizeof(struct zr36050), GFP_KERNEL);
if (NULL == ptr) {
dprintk(1, KERN_ERR "zr36050: Can't get enough memory!\n");
return -ENOMEM;
}
- memset(ptr, 0, sizeof(struct zr36050));
snprintf(ptr->name, sizeof(ptr->name), "zr36050[%d]",
zr36050_codecs);
diff --git a/drivers/media/video/zr36060.c b/drivers/media/video/zr36060.c
index 129744a07ab..28fa31a5f15 100644
--- a/drivers/media/video/zr36060.c
+++ b/drivers/media/video/zr36060.c
@@ -919,12 +919,11 @@ zr36060_setup (struct videocodec *codec)
return -ENOSPC;
}
//mem structure init
- codec->data = ptr = kmalloc(sizeof(struct zr36060), GFP_KERNEL);
+ codec->data = ptr = kzalloc(sizeof(struct zr36060), GFP_KERNEL);
if (NULL == ptr) {
dprintk(1, KERN_ERR "zr36060: Can't get enough memory!\n");
return -ENOMEM;
}
- memset(ptr, 0, sizeof(struct zr36060));
snprintf(ptr->name, sizeof(ptr->name), "zr36060[%d]",
zr36060_codecs);
diff --git a/drivers/media/video/zr36120.c b/drivers/media/video/zr36120.c
index 07286816d7d..d4c633b8a7f 100644
--- a/drivers/media/video/zr36120.c
+++ b/drivers/media/video/zr36120.c
@@ -1490,6 +1490,7 @@ static struct video_device zr36120_template=
.write = zoran_write,
.poll = zoran_poll,
.ioctl = zoran_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.mmap = zoran_mmap,
.minor = -1,
};
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 959d2c5951b..7c340240a50 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2585,8 +2585,6 @@ static struct miscdevice mptctl_miscdev = {
#ifdef CONFIG_COMPAT
-#include <linux/ioctl32.h>
-
static int
compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index 43a942a29c2..fef67710388 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -24,6 +24,18 @@ config I2O
If unsure, say N.
+config I2O_LCT_NOTIFY_ON_CHANGES
+ bool "Enable LCT notification"
+ depends on I2O
+ default y
+ ---help---
+ Only say N here if you have a I2O controller from SUN. The SUN
+ firmware doesn't support LCT notification on changes. If this option
+ is enabled on such a controller the driver will hang up in a endless
+ loop. On all other controllers say Y.
+
+ If unsure, say Y.
+
config I2O_EXT_ADAPTEC
bool "Enable Adaptec extensions"
depends on I2O
diff --git a/drivers/message/i2o/README.ioctl b/drivers/message/i2o/README.ioctl
index 73dd084c0e9..65c0c47aeb7 100644
--- a/drivers/message/i2o/README.ioctl
+++ b/drivers/message/i2o/README.ioctl
@@ -185,7 +185,7 @@ VII. Getting Parameters
ENOMEM Kernel memory allocation error
A return value of 0 does not mean that the value was actually
- properly retreived. The user should check the result list
+ properly retrieved. The user should check the result list
to determine the specific status of the transaction.
VIII. Downloading Software
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
index 151b228e1cb..ac06f10c54e 100644
--- a/drivers/message/i2o/bus-osm.c
+++ b/drivers/message/i2o/bus-osm.c
@@ -17,7 +17,7 @@
#include <linux/i2o.h>
#define OSM_NAME "bus-osm"
-#define OSM_VERSION "$Rev$"
+#define OSM_VERSION "1.317"
#define OSM_DESCRIPTION "I2O Bus Adapter OSM"
static struct i2o_driver i2o_bus_driver;
@@ -39,18 +39,18 @@ static struct i2o_class_id i2o_bus_class_id[] = {
*/
static int i2o_bus_scan(struct i2o_device *dev)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
return -ETIMEDOUT;
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.
+ tid);
- return i2o_msg_post_wait(dev->iop, m, 60);
+ return i2o_msg_post_wait(dev->iop, msg, 60);
};
/**
@@ -59,8 +59,9 @@ static int i2o_bus_scan(struct i2o_device *dev)
*
* Returns count.
*/
-static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t i2o_bus_store_scan(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2o_device *i2o_dev = to_i2o_device(d);
int rc;
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
index 10432f66520..3bba7aa82e5 100644
--- a/drivers/message/i2o/config-osm.c
+++ b/drivers/message/i2o/config-osm.c
@@ -22,7 +22,7 @@
#include <asm/uaccess.h>
#define OSM_NAME "config-osm"
-#define OSM_VERSION "1.248"
+#define OSM_VERSION "1.323"
#define OSM_DESCRIPTION "I2O Configuration OSM"
/* access mode user rw */
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
index 9eefedb1621..90628562851 100644
--- a/drivers/message/i2o/core.h
+++ b/drivers/message/i2o/core.h
@@ -14,8 +14,6 @@
*/
/* Exec-OSM */
-extern struct bus_type i2o_bus_type;
-
extern struct i2o_driver i2o_exec_driver;
extern int i2o_exec_lct_get(struct i2o_controller *);
@@ -23,6 +21,8 @@ extern int __init i2o_exec_init(void);
extern void __exit i2o_exec_exit(void);
/* driver */
+extern struct bus_type i2o_bus_type;
+
extern int i2o_driver_dispatch(struct i2o_controller *, u32);
extern int __init i2o_driver_init(void);
@@ -33,19 +33,27 @@ extern int __init i2o_pci_init(void);
extern void __exit i2o_pci_exit(void);
/* device */
+extern struct device_attribute i2o_device_attrs[];
+
extern void i2o_device_remove(struct i2o_device *);
extern int i2o_device_parse_lct(struct i2o_controller *);
/* IOP */
extern struct i2o_controller *i2o_iop_alloc(void);
-extern void i2o_iop_free(struct i2o_controller *);
+
+/**
+ * i2o_iop_free - Free the i2o_controller struct
+ * @c: I2O controller to free
+ */
+static inline void i2o_iop_free(struct i2o_controller *c)
+{
+ i2o_pool_free(&c->in_msg);
+ kfree(c);
+}
extern int i2o_iop_add(struct i2o_controller *);
extern void i2o_iop_remove(struct i2o_controller *);
-/* config */
-extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
-
/* control registers relative to c->base */
#define I2O_IRQ_STATUS 0x30
#define I2O_IRQ_MASK 0x34
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 8eb50cdb8ae..ee183053fa2 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -35,18 +35,18 @@
static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
u32 type)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]);
- writel(type, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid);
+ msg->body[0] = cpu_to_le32(type);
- return i2o_msg_post_wait(dev->iop, m, 60);
+ return i2o_msg_post_wait(dev->iop, msg, 60);
}
/**
@@ -123,7 +123,6 @@ int i2o_device_claim_release(struct i2o_device *dev)
return rc;
}
-
/**
* i2o_device_release - release the memory for a I2O device
* @dev: I2O device which should be released
@@ -140,10 +139,10 @@ static void i2o_device_release(struct device *dev)
kfree(i2o_dev);
}
-
/**
- * i2o_device_class_show_class_id - Displays class id of I2O device
- * @cd: class device of which the class id should be displayed
+ * i2o_device_show_class_id - Displays class id of I2O device
+ * @dev: device of which the class id should be displayed
+ * @attr: pointer to device attribute
* @buf: buffer into which the class id should be printed
*
* Returns the number of bytes which are printed into the buffer.
@@ -159,15 +158,15 @@ static ssize_t i2o_device_show_class_id(struct device *dev,
}
/**
- * i2o_device_class_show_tid - Displays TID of I2O device
- * @cd: class device of which the TID should be displayed
- * @buf: buffer into which the class id should be printed
+ * i2o_device_show_tid - Displays TID of I2O device
+ * @dev: device of which the TID should be displayed
+ * @attr: pointer to device attribute
+ * @buf: buffer into which the TID should be printed
*
* Returns the number of bytes which are printed into the buffer.
*/
static ssize_t i2o_device_show_tid(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
@@ -175,6 +174,7 @@ static ssize_t i2o_device_show_tid(struct device *dev,
return strlen(buf) + 1;
}
+/* I2O device attributes */
struct device_attribute i2o_device_attrs[] = {
__ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL),
__ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL),
@@ -193,12 +193,10 @@ static struct i2o_device *i2o_device_alloc(void)
{
struct i2o_device *dev;
- dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
- memset(dev, 0, sizeof(*dev));
-
INIT_LIST_HEAD(&dev->list);
init_MUTEX(&dev->lock);
@@ -209,66 +207,6 @@ static struct i2o_device *i2o_device_alloc(void)
}
/**
- * i2o_setup_sysfs_links - Adds attributes to the I2O device
- * @cd: I2O class device which is added to the I2O device class
- *
- * This function get called when a I2O device is added to the class. It
- * creates the attributes for each device and creates user/parent symlink
- * if necessary.
- *
- * Returns 0 on success or negative error code on failure.
- */
-static void i2o_setup_sysfs_links(struct i2o_device *i2o_dev)
-{
- struct i2o_controller *c = i2o_dev->iop;
- struct i2o_device *tmp;
-
- /* create user entries for this device */
- tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
- if (tmp && tmp != i2o_dev)
- sysfs_create_link(&i2o_dev->device.kobj,
- &tmp->device.kobj, "user");
-
- /* create user entries refering to this device */
- list_for_each_entry(tmp, &c->devices, list)
- if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid &&
- tmp != i2o_dev)
- sysfs_create_link(&tmp->device.kobj,
- &i2o_dev->device.kobj, "user");
-
- /* create parent entries for this device */
- tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
- if (tmp && tmp != i2o_dev)
- sysfs_create_link(&i2o_dev->device.kobj,
- &tmp->device.kobj, "parent");
-
- /* create parent entries refering to this device */
- list_for_each_entry(tmp, &c->devices, list)
- if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid &&
- tmp != i2o_dev)
- sysfs_create_link(&tmp->device.kobj,
- &i2o_dev->device.kobj, "parent");
-}
-
-static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
-{
- struct i2o_controller *c = i2o_dev->iop;
- struct i2o_device *tmp;
-
- sysfs_remove_link(&i2o_dev->device.kobj, "parent");
- sysfs_remove_link(&i2o_dev->device.kobj, "user");
-
- list_for_each_entry(tmp, &c->devices, list) {
- if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
- sysfs_remove_link(&tmp->device.kobj, "parent");
- if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
- sysfs_remove_link(&tmp->device.kobj, "user");
- }
-}
-
-
-
-/**
* i2o_device_add - allocate a new I2O device and add it to the IOP
* @iop: I2O controller where the device is on
* @entry: LCT entry of the I2O device
@@ -282,33 +220,57 @@ static void i2o_remove_sysfs_links(struct i2o_device *i2o_dev)
static struct i2o_device *i2o_device_add(struct i2o_controller *c,
i2o_lct_entry * entry)
{
- struct i2o_device *dev;
+ struct i2o_device *i2o_dev, *tmp;
- dev = i2o_device_alloc();
- if (IS_ERR(dev)) {
+ i2o_dev = i2o_device_alloc();
+ if (IS_ERR(i2o_dev)) {
printk(KERN_ERR "i2o: unable to allocate i2o device\n");
- return dev;
+ return i2o_dev;
}
- dev->lct_data = *entry;
- dev->iop = c;
+ i2o_dev->lct_data = *entry;
- snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit,
- dev->lct_data.tid);
+ snprintf(i2o_dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit,
+ i2o_dev->lct_data.tid);
- dev->device.parent = &c->device;
+ i2o_dev->iop = c;
+ i2o_dev->device.parent = &c->device;
- device_register(&dev->device);
+ device_register(&i2o_dev->device);
- list_add_tail(&dev->list, &c->devices);
+ list_add_tail(&i2o_dev->list, &c->devices);
- i2o_setup_sysfs_links(dev);
+ /* create user entries for this device */
+ tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
+ if (tmp && (tmp != i2o_dev))
+ sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
+ "user");
- i2o_driver_notify_device_add_all(dev);
+ /* create user entries refering to this device */
+ list_for_each_entry(tmp, &c->devices, list)
+ if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
+ && (tmp != i2o_dev))
+ sysfs_create_link(&tmp->device.kobj,
+ &i2o_dev->device.kobj, "user");
- pr_debug("i2o: device %s added\n", dev->device.bus_id);
+ /* create parent entries for this device */
+ tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
+ if (tmp && (tmp != i2o_dev))
+ sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
+ "parent");
- return dev;
+ /* create parent entries refering to this device */
+ list_for_each_entry(tmp, &c->devices, list)
+ if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
+ && (tmp != i2o_dev))
+ sysfs_create_link(&tmp->device.kobj,
+ &i2o_dev->device.kobj, "parent");
+
+ i2o_driver_notify_device_add_all(i2o_dev);
+
+ pr_debug("i2o: device %s added\n", i2o_dev->device.bus_id);
+
+ return i2o_dev;
}
/**
@@ -321,9 +283,22 @@ static struct i2o_device *i2o_device_add(struct i2o_controller *c,
*/
void i2o_device_remove(struct i2o_device *i2o_dev)
{
+ struct i2o_device *tmp;
+ struct i2o_controller *c = i2o_dev->iop;
+
i2o_driver_notify_device_remove_all(i2o_dev);
- i2o_remove_sysfs_links(i2o_dev);
+
+ sysfs_remove_link(&i2o_dev->device.kobj, "parent");
+ sysfs_remove_link(&i2o_dev->device.kobj, "user");
+
+ list_for_each_entry(tmp, &c->devices, list) {
+ if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
+ sysfs_remove_link(&tmp->device.kobj, "parent");
+ if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
+ sysfs_remove_link(&tmp->device.kobj, "user");
+ }
list_del(&i2o_dev->list);
+
device_unregister(&i2o_dev->device);
}
@@ -341,56 +316,83 @@ int i2o_device_parse_lct(struct i2o_controller *c)
{
struct i2o_device *dev, *tmp;
i2o_lct *lct;
- int i;
- int max;
+ u32 *dlct = c->dlct.virt;
+ int max = 0, i = 0;
+ u16 table_size;
+ u32 buf;
down(&c->lct_lock);
kfree(c->lct);
- lct = c->dlct.virt;
+ buf = le32_to_cpu(*dlct++);
+ table_size = buf & 0xffff;
- c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL);
- if (!c->lct) {
+ lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL);
+ if (!lct) {
up(&c->lct_lock);
return -ENOMEM;
}
- if (lct->table_size * 4 > c->dlct.len) {
- memcpy(c->lct, c->dlct.virt, c->dlct.len);
- up(&c->lct_lock);
- return -EAGAIN;
- }
+ lct->lct_ver = buf >> 28;
+ lct->boot_tid = buf >> 16 & 0xfff;
+ lct->table_size = table_size;
+ lct->change_ind = le32_to_cpu(*dlct++);
+ lct->iop_flags = le32_to_cpu(*dlct++);
- memcpy(c->lct, c->dlct.virt, lct->table_size * 4);
-
- lct = c->lct;
-
- max = (lct->table_size - 3) / 9;
+ table_size -= 3;
pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max,
lct->table_size);
- /* remove devices, which are not in the LCT anymore */
- list_for_each_entry_safe(dev, tmp, &c->devices, list) {
+ while (table_size > 0) {
+ i2o_lct_entry *entry = &lct->lct_entry[max];
int found = 0;
- for (i = 0; i < max; i++) {
- if (lct->lct_entry[i].tid == dev->lct_data.tid) {
+ buf = le32_to_cpu(*dlct++);
+ entry->entry_size = buf & 0xffff;
+ entry->tid = buf >> 16 & 0xfff;
+
+ entry->change_ind = le32_to_cpu(*dlct++);
+ entry->device_flags = le32_to_cpu(*dlct++);
+
+ buf = le32_to_cpu(*dlct++);
+ entry->class_id = buf & 0xfff;
+ entry->version = buf >> 12 & 0xf;
+ entry->vendor_id = buf >> 16;
+
+ entry->sub_class = le32_to_cpu(*dlct++);
+
+ buf = le32_to_cpu(*dlct++);
+ entry->user_tid = buf & 0xfff;
+ entry->parent_tid = buf >> 12 & 0xfff;
+ entry->bios_info = buf >> 24;
+
+ memcpy(&entry->identity_tag, dlct, 8);
+ dlct += 2;
+
+ entry->event_capabilities = le32_to_cpu(*dlct++);
+
+ /* add new devices, which are new in the LCT */
+ list_for_each_entry_safe(dev, tmp, &c->devices, list) {
+ if (entry->tid == dev->lct_data.tid) {
found = 1;
break;
}
}
if (!found)
- i2o_device_remove(dev);
+ i2o_device_add(c, entry);
+
+ table_size -= 9;
+ max++;
}
- /* add new devices, which are new in the LCT */
- for (i = 0; i < max; i++) {
+ /* remove devices, which are not in the LCT anymore */
+ list_for_each_entry_safe(dev, tmp, &c->devices, list) {
int found = 0;
- list_for_each_entry_safe(dev, tmp, &c->devices, list) {
+ for (i = 0; i < max; i++) {
if (lct->lct_entry[i].tid == dev->lct_data.tid) {
found = 1;
break;
@@ -398,14 +400,14 @@ int i2o_device_parse_lct(struct i2o_controller *c)
}
if (!found)
- i2o_device_add(c, &lct->lct_entry[i]);
+ i2o_device_remove(dev);
}
+
up(&c->lct_lock);
return 0;
}
-
/*
* Run time support routines
*/
@@ -419,13 +421,9 @@ int i2o_device_parse_lct(struct i2o_controller *c)
* ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
*/
int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
- int oplen, void *reslist, int reslen)
+ int oplen, void *reslist, int reslen)
{
- struct i2o_message __iomem *msg;
- u32 m;
- u32 *res32 = (u32 *) reslist;
- u32 *restmp = (u32 *) reslist;
- int len = 0;
+ struct i2o_message *msg;
int i = 0;
int rc;
struct i2o_dma res;
@@ -437,26 +435,27 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL))
return -ENOMEM;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY) {
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg)) {
i2o_dma_free(dev, &res);
- return -ETIMEDOUT;
+ return PTR_ERR(msg);
}
i = 0;
- writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid,
- &msg->u.head[1]);
- writel(0, &msg->body[i++]);
- writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */
- memcpy_toio(&msg->body[i], oplist, oplen);
+ msg->u.head[1] =
+ cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid);
+ msg->body[i++] = cpu_to_le32(0x00000000);
+ msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */
+ memcpy(&msg->body[i], oplist, oplen);
i += (oplen / 4 + (oplen % 4 ? 1 : 0));
- writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */
- writel(res.phys, &msg->body[i++]);
+ msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */
+ msg->body[i++] = cpu_to_le32(res.phys);
- writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
- SGL_OFFSET_5, &msg->u.head[0]);
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
+ SGL_OFFSET_5);
- rc = i2o_msg_post_wait_mem(c, m, 10, &res);
+ rc = i2o_msg_post_wait_mem(c, msg, 10, &res);
/* This only looks like a memory leak - don't "fix" it. */
if (rc == -ETIMEDOUT)
@@ -465,36 +464,7 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
memcpy(reslist, res.virt, res.len);
i2o_dma_free(dev, &res);
- /* Query failed */
- if (rc)
- return rc;
- /*
- * Calculate number of bytes of Result LIST
- * We need to loop through each Result BLOCK and grab the length
- */
- restmp = res32 + 1;
- len = 1;
- for (i = 0; i < (res32[0] & 0X0000FFFF); i++) {
- if (restmp[0] & 0x00FF0000) { /* BlockStatus != SUCCESS */
- printk(KERN_WARNING
- "%s - Error:\n ErrorInfoSize = 0x%02x, "
- "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
- (cmd ==
- I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" :
- "PARAMS_GET", res32[1] >> 24,
- (res32[1] >> 16) & 0xFF, res32[1] & 0xFFFF);
-
- /*
- * If this is the only request,than we return an error
- */
- if ((res32[0] & 0x0000FFFF) == 1) {
- return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */
- }
- }
- len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */
- restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */
- }
- return (len << 2); /* bytes used by result list */
+ return rc;
}
/*
@@ -503,28 +473,25 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
void *buf, int buflen)
{
- u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
+ u32 opblk[] = { cpu_to_le32(0x00000001),
+ cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET),
+ cpu_to_le32((s16) field << 16 | 0x00000001)
+ };
u8 *resblk; /* 8 bytes for header */
- int size;
-
- if (field == -1) /* whole group */
- opblk[4] = -1;
+ int rc;
resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC);
if (!resblk)
return -ENOMEM;
- size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
- sizeof(opblk), resblk, buflen + 8);
+ rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
+ sizeof(opblk), resblk, buflen + 8);
memcpy(buf, resblk + 8, buflen); /* cut off header */
kfree(resblk);
- if (size > buflen)
- return buflen;
-
- return size;
+ return rc;
}
/*
@@ -534,12 +501,12 @@ int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
* else return specific fields
* ibuf contains fieldindexes
*
- * if oper == I2O_PARAMS_LIST_GET, get from specific rows
- * if fieldcount == -1 return all fields
+ * if oper == I2O_PARAMS_LIST_GET, get from specific rows
+ * if fieldcount == -1 return all fields
* ibuf contains rowcount, keyvalues
- * else return specific fields
+ * else return specific fields
* fieldcount is # of fieldindexes
- * ibuf contains fieldindexes, rowcount, keyvalues
+ * ibuf contains fieldindexes, rowcount, keyvalues
*
* You could also use directly function i2o_issue_params().
*/
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index 0fb9c4e2ad4..64130227574 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -61,12 +61,10 @@ static int i2o_bus_match(struct device *dev, struct device_driver *drv)
};
/* I2O bus type */
-extern struct device_attribute i2o_device_attrs[];
-
struct bus_type i2o_bus_type = {
.name = "i2o",
.match = i2o_bus_match,
- .dev_attrs = i2o_device_attrs,
+ .dev_attrs = i2o_device_attrs
};
/**
@@ -219,14 +217,14 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
/* cut of header from message size (in 32-bit words) */
size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5;
- evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC | __GFP_ZERO);
+ evt = kzalloc(size * 4 + sizeof(*evt), GFP_ATOMIC);
if (!evt)
return -ENOMEM;
evt->size = size;
evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt);
evt->event_indicator = le32_to_cpu(msg->body[0]);
- memcpy(&evt->tcntxt, &msg->u.s.tcntxt, size * 4);
+ memcpy(&evt->data, &msg->body[1], size * 4);
list_for_each_entry_safe(dev, tmp, &c->devices, list)
if (dev->lct_data.tid == tid) {
@@ -349,12 +347,10 @@ int __init i2o_driver_init(void)
osm_info("max drivers = %d\n", i2o_max_drivers);
i2o_drivers =
- kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL);
+ kzalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL);
if (!i2o_drivers)
return -ENOMEM;
- memset(i2o_drivers, 0, i2o_max_drivers * sizeof(*i2o_drivers));
-
rc = bus_register(&i2o_bus_type);
if (rc < 0)
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 9c339a2505b..9bb9859f6df 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -33,7 +33,7 @@
#include <linux/workqueue.h>
#include <linux/string.h>
#include <linux/slab.h>
-#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */
+#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */
#include <asm/param.h> /* HZ */
#include "core.h"
@@ -75,11 +75,9 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
{
struct i2o_exec_wait *wait;
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ wait = kzalloc(sizeof(*wait), GFP_KERNEL);
if (!wait)
- return ERR_PTR(-ENOMEM);
-
- memset(wait, 0, sizeof(*wait));
+ return NULL;
INIT_LIST_HEAD(&wait->list);
@@ -114,13 +112,12 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
* Returns 0 on success, negative error code on timeout or positive error
* code from reply.
*/
-int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
- timeout, struct i2o_dma *dma)
+int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
+ unsigned long timeout, struct i2o_dma *dma)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
struct i2o_exec_wait *wait;
static u32 tcntxt = 0x80000000;
- struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
int rc = 0;
wait = i2o_exec_wait_alloc();
@@ -138,15 +135,15 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
* We will only use transaction contexts >= 0x80000000 for POST WAIT,
* so we could find a POST WAIT reply easier in the reply handler.
*/
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
wait->tcntxt = tcntxt++;
- writel(wait->tcntxt, &msg->u.s.tcntxt);
+ msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
/*
* Post the message to the controller. At some point later it will
* return. If we time out before it returns then complete will be zero.
*/
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
if (!wait->complete) {
wait->wq = &wq;
@@ -266,13 +263,14 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
*
* Returns number of bytes printed into buffer.
*/
-static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute *attr, char *buf)
+static ssize_t i2o_exec_show_vendor_id(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct i2o_device *dev = to_i2o_device(d);
u16 id;
- if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
- sprintf(buf, "0x%04x", id);
+ if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
+ sprintf(buf, "0x%04x", le16_to_cpu(id));
return strlen(buf) + 1;
}
@@ -286,13 +284,15 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute
*
* Returns number of bytes printed into buffer.
*/
-static ssize_t i2o_exec_show_product_id(struct device *d, struct device_attribute *attr, char *buf)
+static ssize_t i2o_exec_show_product_id(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct i2o_device *dev = to_i2o_device(d);
u16 id;
- if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
- sprintf(buf, "0x%04x", id);
+ if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
+ sprintf(buf, "0x%04x", le16_to_cpu(id));
return strlen(buf) + 1;
}
@@ -362,7 +362,9 @@ static void i2o_exec_lct_modified(struct i2o_controller *c)
if (i2o_device_parse_lct(c) != -EAGAIN)
change_ind = c->lct->change_ind + 1;
+#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
i2o_exec_lct_notify(c, change_ind);
+#endif
};
/**
@@ -385,23 +387,22 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
u32 context;
if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
+ struct i2o_message __iomem *pmsg;
+ u32 pm;
+
/*
* If Fail bit is set we must take the transaction context of
* the preserved message to find the right request again.
*/
- struct i2o_message __iomem *pmsg;
- u32 pm;
pm = le32_to_cpu(msg->body[3]);
-
pmsg = i2o_msg_in_to_virt(c, pm);
+ context = readl(&pmsg->u.s.tcntxt);
i2o_report_status(KERN_INFO, "i2o_core", msg);
- context = readl(&pmsg->u.s.tcntxt);
-
/* Release the preserved msg */
- i2o_msg_nop(c, pm);
+ i2o_msg_nop_mfa(c, pm);
} else
context = le32_to_cpu(msg->u.s.tcntxt);
@@ -462,25 +463,26 @@ static void i2o_exec_event(struct i2o_event *evt)
*/
int i2o_exec_lct_get(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int i = 0;
int rc = -EAGAIN;
for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(0xffffffff, &msg->body[0]);
- writel(0x00000000, &msg->body[1]);
- writel(0xd0000000 | c->dlct.len, &msg->body[2]);
- writel(c->dlct.phys, &msg->body[3]);
-
- rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] =
+ cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->body[0] = cpu_to_le32(0xffffffff);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
+ msg->body[3] = cpu_to_le32(c->dlct.phys);
+
+ rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
if (rc < 0)
break;
@@ -506,29 +508,29 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
{
i2o_status_block *sb = c->status_block.virt;
struct device *dev;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
dev = &c->pdev->dev;
- if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL))
+ if (i2o_dma_realloc
+ (dev, &c->dlct, le32_to_cpu(sb->expected_lct_size), GFP_KERNEL))
return -ENOMEM;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0, &msg->u.s.tcntxt); /* FIXME */
- writel(0xffffffff, &msg->body[0]);
- writel(change_ind, &msg->body[1]);
- writel(0xd0000000 | c->dlct.len, &msg->body[2]);
- writel(c->dlct.phys, &msg->body[3]);
-
- i2o_msg_post(c, m);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0xffffffff);
+ msg->body[1] = cpu_to_le32(change_ind);
+ msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
+ msg->body[3] = cpu_to_le32(c->dlct.phys);
+
+ i2o_msg_post(c, msg);
return 0;
};
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f283b5bafdd..b09fb630715 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -59,10 +59,12 @@
#include <linux/blkdev.h>
#include <linux/hdreg.h>
+#include <scsi/scsi.h>
+
#include "i2o_block.h"
#define OSM_NAME "block-osm"
-#define OSM_VERSION "1.287"
+#define OSM_VERSION "1.325"
#define OSM_DESCRIPTION "I2O Block Device OSM"
static struct i2o_driver i2o_block_driver;
@@ -130,20 +132,20 @@ static int i2o_block_remove(struct device *dev)
*/
static int i2o_block_device_flush(struct i2o_device *dev)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(60 << 16, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(60 << 16);
osm_debug("Flushing...\n");
- return i2o_msg_post_wait(dev->iop, m, 60);
+ return i2o_msg_post_wait(dev->iop, msg, 60);
};
/**
@@ -181,21 +183,21 @@ static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
*/
static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
- u32 m;
-
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(-1, &msg->body[0]);
- writel(0, &msg->body[1]);
+ struct i2o_message *msg;
+
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(-1);
+ msg->body[1] = cpu_to_le32(0x00000000);
osm_debug("Mounting...\n");
- return i2o_msg_post_wait(dev->iop, m, 2);
+ return i2o_msg_post_wait(dev->iop, msg, 2);
};
/**
@@ -210,20 +212,20 @@ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
*/
static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg) == I2O_QUEUE_EMPTY)
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(-1, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(-1);
osm_debug("Locking...\n");
- return i2o_msg_post_wait(dev->iop, m, 2);
+ return i2o_msg_post_wait(dev->iop, msg, 2);
};
/**
@@ -238,20 +240,20 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
*/
static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(media_id, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(media_id);
osm_debug("Unlocking...\n");
- return i2o_msg_post_wait(dev->iop, m, 2);
+ return i2o_msg_post_wait(dev->iop, msg, 2);
};
/**
@@ -267,21 +269,21 @@ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
{
struct i2o_device *i2o_dev = dev->i2o_dev;
struct i2o_controller *c = i2o_dev->iop;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int rc;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data.
- tid, &msg->u.head[1]);
- writel(op << 24, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(op << 24);
osm_debug("Power...\n");
- rc = i2o_msg_post_wait(c, m, 60);
+ rc = i2o_msg_post_wait(c, msg, 60);
if (!rc)
dev->power = op;
@@ -331,7 +333,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
*/
static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
struct i2o_block_request *ireq,
- u32 __iomem ** mptr)
+ u32 ** mptr)
{
int nents;
enum dma_data_direction direction;
@@ -466,7 +468,7 @@ static void i2o_block_end_request(struct request *req, int uptodate,
spin_lock_irqsave(q->queue_lock, flags);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
if (likely(dev)) {
dev->open_queue_depth--;
@@ -660,6 +662,13 @@ static int i2o_block_release(struct inode *inode, struct file *file)
return 0;
}
+static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ i2o_block_biosparam(get_capacity(bdev->bd_disk),
+ &geo->cylinders, &geo->heads, &geo->sectors);
+ return 0;
+}
+
/**
* i2o_block_ioctl - Issue device specific ioctl calls.
* @cmd: ioctl command
@@ -674,7 +683,6 @@ static int i2o_block_ioctl(struct inode *inode, struct file *file,
{
struct gendisk *disk = inode->i_bdev->bd_disk;
struct i2o_block_device *dev = disk->private_data;
- void __user *argp = (void __user *)arg;
/* Anyone capable of this syscall can do *real bad* things */
@@ -682,15 +690,6 @@ static int i2o_block_ioctl(struct inode *inode, struct file *file,
return -EPERM;
switch (cmd) {
- case HDIO_GETGEO:
- {
- struct hd_geometry g;
- i2o_block_biosparam(get_capacity(disk),
- &g.cylinders, &g.heads, &g.sectors);
- g.start = get_start_sect(inode->i_bdev);
- return copy_to_user(argp, &g, sizeof(g)) ? -EFAULT : 0;
- }
-
case BLKI2OGRSTRAT:
return put_user(dev->rcache, (int __user *)arg);
case BLKI2OGWSTRAT:
@@ -745,10 +744,9 @@ static int i2o_block_transfer(struct request *req)
struct i2o_block_device *dev = req->rq_disk->private_data;
struct i2o_controller *c;
int tid = dev->i2o_dev->lct_data.tid;
- struct i2o_message __iomem *msg;
- u32 __iomem *mptr;
+ struct i2o_message *msg;
+ u32 *mptr;
struct i2o_block_request *ireq = req->special;
- u32 m;
u32 tcntxt;
u32 sgl_offset = SGL_OFFSET_8;
u32 ctl_flags = 0x00000000;
@@ -763,9 +761,9 @@ static int i2o_block_transfer(struct request *req)
c = dev->i2o_dev->iop;
- m = i2o_msg_get(c, &msg);
- if (m == I2O_QUEUE_EMPTY) {
- rc = -EBUSY;
+ msg = i2o_msg_get(c);
+ if (IS_ERR(msg)) {
+ rc = PTR_ERR(msg);
goto exit;
}
@@ -775,8 +773,8 @@ static int i2o_block_transfer(struct request *req)
goto nop_msg;
}
- writel(i2o_block_driver.context, &msg->u.s.icntxt);
- writel(tcntxt, &msg->u.s.tcntxt);
+ msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(tcntxt);
mptr = &msg->body[0];
@@ -834,11 +832,11 @@ static int i2o_block_transfer(struct request *req)
sgl_offset = SGL_OFFSET_12;
- writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid,
- &msg->u.head[1]);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
- writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++);
- writel(tid, mptr++);
+ *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
+ *mptr++ = cpu_to_le32(tid);
/*
* ENABLE_DISCONNECT
@@ -846,29 +844,31 @@ static int i2o_block_transfer(struct request *req)
* RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
*/
if (rq_data_dir(req) == READ) {
- cmd[0] = 0x28;
+ cmd[0] = READ_10;
scsi_flags = 0x60a0000a;
} else {
- cmd[0] = 0x2A;
+ cmd[0] = WRITE_10;
scsi_flags = 0xa0a0000a;
}
- writel(scsi_flags, mptr++);
+ *mptr++ = cpu_to_le32(scsi_flags);
*((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
*((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
- memcpy_toio(mptr, cmd, 10);
+ memcpy(mptr, cmd, 10);
mptr += 4;
- writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);
+ *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
} else
#endif
{
- writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]);
- writel(ctl_flags, mptr++);
- writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);
- writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++);
- writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++);
+ msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
+ *mptr++ = cpu_to_le32(ctl_flags);
+ *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+ *mptr++ =
+ cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
+ *mptr++ =
+ cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
}
if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -876,13 +876,13 @@ static int i2o_block_transfer(struct request *req)
goto context_remove;
}
- writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) |
- sgl_offset, &msg->u.head[0]);
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
list_add_tail(&ireq->queue, &dev->open_queue);
dev->open_queue_depth++;
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
return 0;
@@ -890,7 +890,7 @@ static int i2o_block_transfer(struct request *req)
i2o_cntxt_list_remove(c, req);
nop_msg:
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
exit:
return rc;
@@ -959,6 +959,7 @@ static struct block_device_operations i2o_block_fops = {
.open = i2o_block_open,
.release = i2o_block_release,
.ioctl = i2o_block_ioctl,
+ .getgeo = i2o_block_getgeo,
.media_changed = i2o_block_media_changed
};
@@ -978,13 +979,12 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
struct request_queue *queue;
int rc;
- dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
osm_err("Insufficient memory to allocate I2O Block disk.\n");
rc = -ENOMEM;
goto exit;
}
- memset(dev, 0, sizeof(*dev));
INIT_LIST_HEAD(&dev->open_queue);
spin_lock_init(&dev->lock);
@@ -1049,8 +1049,8 @@ static int i2o_block_probe(struct device *dev)
int rc;
u64 size;
u32 blocksize;
- u32 flags, status;
u16 body_size = 4;
+ u16 power;
unsigned short max_sectors;
#ifdef CONFIG_I2O_EXT_ADAPTEC
@@ -1108,22 +1108,20 @@ static int i2o_block_probe(struct device *dev)
* Ask for the current media data. If that isn't supported
* then we ask for the device capacity data
*/
- if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
- i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
- blk_queue_hardsect_size(queue, blocksize);
+ if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
+ !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
+ blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
} else
osm_warn("unable to get blocksize of %s\n", gd->disk_name);
- if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
- i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
- set_capacity(gd, size >> KERNEL_SECTOR_SHIFT);
+ if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
+ !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
+ set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT);
} else
osm_warn("could not get size of %s\n", gd->disk_name);
- if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2))
- i2o_blk_dev->power = 0;
- i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
- i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
+ if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
+ i2o_blk_dev->power = power;
i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 3c3a7abebb1..89daf67b764 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -36,12 +36,12 @@
#include <asm/uaccess.h>
-#include "core.h"
-
#define SG_TABLESIZE 30
-static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
- unsigned long arg);
+extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
+
+static int i2o_cfg_ioctl(struct inode *, struct file *, unsigned int,
+ unsigned long);
static spinlock_t i2o_config_lock;
@@ -230,8 +230,7 @@ static int i2o_cfg_swdl(unsigned long arg)
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
unsigned char maxfrag = 0, curfrag = 1;
struct i2o_dma buffer;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int status = 0, swlen = 0, fragsize = 8192;
struct i2o_controller *c;
@@ -257,31 +256,34 @@ static int i2o_cfg_swdl(unsigned long arg)
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
return -ENOMEM;
}
__copy_from_user(buffer.virt, kxfer.buf, fragsize);
- writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
- writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) |
- (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]);
- writel(swlen, &msg->body[1]);
- writel(kxfer.sw_id, &msg->body[2]);
- writel(0xD0000000 | fragsize, &msg->body[3]);
- writel(buffer.phys, &msg->body[4]);
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer.
+ sw_type) << 16) |
+ (((u32) maxfrag) << 8) | (((u32) curfrag)));
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
+ msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
+ msg->body[4] = cpu_to_le32(buffer.phys);
osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
- status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
+ status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
if (status != -ETIMEDOUT)
i2o_dma_free(&c->pdev->dev, &buffer);
@@ -302,8 +304,7 @@ static int i2o_cfg_swul(unsigned long arg)
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
unsigned char maxfrag = 0, curfrag = 1;
struct i2o_dma buffer;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int status = 0, swlen = 0, fragsize = 8192;
struct i2o_controller *c;
int ret = 0;
@@ -330,30 +331,30 @@ static int i2o_cfg_swul(unsigned long arg)
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
return -ENOMEM;
}
- writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
- writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- writel((u32) kxfer.flags << 24 | (u32) kxfer.
- sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag,
- &msg->body[0]);
- writel(swlen, &msg->body[1]);
- writel(kxfer.sw_id, &msg->body[2]);
- writel(0xD0000000 | fragsize, &msg->body[3]);
- writel(buffer.phys, &msg->body[4]);
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.
+ sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag);
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
+ msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
+ msg->body[4] = cpu_to_le32(buffer.phys);
osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
- status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
+ status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
if (status != I2O_POST_WAIT_OK) {
if (status != -ETIMEDOUT)
@@ -380,8 +381,7 @@ static int i2o_cfg_swdel(unsigned long arg)
struct i2o_controller *c;
struct i2o_sw_xfer kxfer;
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int swlen;
int token;
@@ -395,21 +395,21 @@ static int i2o_cfg_swdel(unsigned long arg)
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16,
- &msg->body[0]);
- writel(swlen, &msg->body[1]);
- writel(kxfer.sw_id, &msg->body[2]);
+ msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16);
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
- token = i2o_msg_post_wait(c, m, 10);
+ token = i2o_msg_post_wait(c, msg, 10);
if (token != I2O_POST_WAIT_OK) {
osm_info("swdel failed, DetailedStatus = %d\n", token);
@@ -423,25 +423,24 @@ static int i2o_cfg_validate(unsigned long arg)
{
int token;
int iop = (int)arg;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
struct i2o_controller *c;
c = i2o_find_iop(iop);
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
- token = i2o_msg_post_wait(c, m, 10);
+ token = i2o_msg_post_wait(c, msg, 10);
if (token != I2O_POST_WAIT_OK) {
osm_info("Can't validate configuration, ErrorStatus = %d\n",
@@ -454,8 +453,7 @@ static int i2o_cfg_validate(unsigned long arg)
static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
struct i2o_evt_id kdesc;
struct i2o_controller *c;
@@ -474,18 +472,19 @@ static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
if (!d)
return -ENODEV;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]);
- writel(kdesc.evt_mask, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 |
+ kdesc.tid);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data));
+ msg->body[0] = cpu_to_le32(kdesc.evt_mask);
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
return 0;
}
@@ -537,7 +536,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
u32 sg_index = 0;
i2o_status_block *sb;
struct i2o_message *msg;
- u32 m;
unsigned int iop;
cmd = (struct i2o_cmd_passthru32 __user *)arg;
@@ -553,7 +551,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
return -ENXIO;
}
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
sb = c->status_block.virt;
@@ -585,19 +583,15 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
reply_size >>= 16;
reply_size <<= 2;
- reply = kmalloc(reply_size, GFP_KERNEL);
+ reply = kzalloc(reply_size, GFP_KERNEL);
if (!reply) {
printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
c->name);
return -ENOMEM;
}
- memset(reply, 0, reply_size);
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
- writel(i2o_config_driver.context, &msg->u.s.icntxt);
- writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
-
memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
if (sg_offset) {
struct sg_simple_element *sg;
@@ -631,7 +625,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
goto cleanup;
}
sg_size = sg[i].flag_count & 0xffffff;
- p = &(sg_list[sg_index++]);
+ p = &(sg_list[sg_index]);
/* Allocate memory for the transfer */
if (i2o_dma_alloc
(&c->pdev->dev, p, sg_size,
@@ -642,6 +636,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
rcode = -ENOMEM;
goto sg_list_cleanup;
}
+ sg_index++;
/* Copy in the user's SG buffer if necessary */
if (sg[i].
flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
@@ -662,9 +657,11 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
}
}
- rcode = i2o_msg_post_wait(c, m, 60);
- if (rcode)
+ rcode = i2o_msg_post_wait(c, msg, 60);
+ if (rcode) {
+ reply[4] = ((u32) rcode) << 24;
goto sg_list_cleanup;
+ }
if (sg_offset) {
u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE];
@@ -714,6 +711,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
}
}
+ sg_list_cleanup:
/* Copy back the reply to user space */
if (reply_size) {
// we wrote our own values for context - now restore the user supplied ones
@@ -731,7 +729,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
}
}
- sg_list_cleanup:
for (i = 0; i < sg_index; i++)
i2o_dma_free(&c->pdev->dev, &sg_list[i]);
@@ -780,8 +777,7 @@ static int i2o_cfg_passthru(unsigned long arg)
u32 i = 0;
void *p = NULL;
i2o_status_block *sb;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int iop;
if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
@@ -793,7 +789,7 @@ static int i2o_cfg_passthru(unsigned long arg)
return -ENXIO;
}
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
sb = c->status_block.virt;
@@ -820,19 +816,15 @@ static int i2o_cfg_passthru(unsigned long arg)
reply_size >>= 16;
reply_size <<= 2;
- reply = kmalloc(reply_size, GFP_KERNEL);
+ reply = kzalloc(reply_size, GFP_KERNEL);
if (!reply) {
printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
c->name);
return -ENOMEM;
}
- memset(reply, 0, reply_size);
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
- writel(i2o_config_driver.context, &msg->u.s.icntxt);
- writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
-
memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
if (sg_offset) {
struct sg_simple_element *sg;
@@ -894,9 +886,11 @@ static int i2o_cfg_passthru(unsigned long arg)
}
}
- rcode = i2o_msg_post_wait(c, m, 60);
- if (rcode)
+ rcode = i2o_msg_post_wait(c, msg, 60);
+ if (rcode) {
+ reply[4] = ((u32) rcode) << 24;
goto sg_list_cleanup;
+ }
if (sg_offset) {
u32 msg[128];
@@ -946,6 +940,7 @@ static int i2o_cfg_passthru(unsigned long arg)
}
}
+ sg_list_cleanup:
/* Copy back the reply to user space */
if (reply_size) {
// we wrote our own values for context - now restore the user supplied ones
@@ -962,7 +957,6 @@ static int i2o_cfg_passthru(unsigned long arg)
}
}
- sg_list_cleanup:
for (i = 0; i < sg_index; i++)
kfree(sg_list[i]);
diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h
index 561d63304d7..6502b817df5 100644
--- a/drivers/message/i2o/i2o_lan.h
+++ b/drivers/message/i2o/i2o_lan.h
@@ -103,14 +103,14 @@
#define I2O_LAN_DSC_SUSPENDED 0x11
struct i2o_packet_info {
- u32 offset : 24;
- u32 flags : 8;
- u32 len : 24;
- u32 status : 8;
+ u32 offset:24;
+ u32 flags:8;
+ u32 len:24;
+ u32 status:8;
};
struct i2o_bucket_descriptor {
- u32 context; /* FIXME: 64bit support */
+ u32 context; /* FIXME: 64bit support */
struct i2o_packet_info packet_info[1];
};
@@ -127,14 +127,14 @@ struct i2o_lan_local {
u8 unit;
struct i2o_device *i2o_dev;
- struct fddi_statistics stats; /* see also struct net_device_stats */
- unsigned short (*type_trans)(struct sk_buff *, struct net_device *);
- atomic_t buckets_out; /* nbr of unused buckets on DDM */
- atomic_t tx_out; /* outstanding TXes */
- u8 tx_count; /* packets in one TX message frame */
- u16 tx_max_out; /* DDM's Tx queue len */
- u8 sgl_max; /* max SGLs in one message frame */
- u32 m; /* IOP address of the batch msg frame */
+ struct fddi_statistics stats; /* see also struct net_device_stats */
+ unsigned short (*type_trans) (struct sk_buff *, struct net_device *);
+ atomic_t buckets_out; /* nbr of unused buckets on DDM */
+ atomic_t tx_out; /* outstanding TXes */
+ u8 tx_count; /* packets in one TX message frame */
+ u16 tx_max_out; /* DDM's Tx queue len */
+ u8 sgl_max; /* max SGLs in one message frame */
+ u32 m; /* IOP address of the batch msg frame */
struct work_struct i2o_batch_send_task;
int send_active;
@@ -144,16 +144,16 @@ struct i2o_lan_local {
spinlock_t tx_lock;
- u32 max_size_mc_table; /* max number of multicast addresses */
+ u32 max_size_mc_table; /* max number of multicast addresses */
/* LAN OSM configurable parameters are here: */
- u16 max_buckets_out; /* max nbr of buckets to send to DDM */
- u16 bucket_thresh; /* send more when this many used */
+ u16 max_buckets_out; /* max nbr of buckets to send to DDM */
+ u16 bucket_thresh; /* send more when this many used */
u16 rx_copybreak;
- u8 tx_batch_mode; /* Set when using batch mode sends */
- u32 i2o_event_mask; /* To turn on interesting event flags */
+ u8 tx_batch_mode; /* Set when using batch mode sends */
+ u32 i2o_event_mask; /* To turn on interesting event flags */
};
-#endif /* _I2O_LAN_H */
+#endif /* _I2O_LAN_H */
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index d559a175836..2a0c42b8cda 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -28,7 +28,7 @@
*/
#define OSM_NAME "proc-osm"
-#define OSM_VERSION "1.145"
+#define OSM_VERSION "1.316"
#define OSM_DESCRIPTION "I2O ProcFS OSM"
#define I2O_MAX_MODULES 4
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 9f1744c3933..f9e5a23697a 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -70,7 +70,7 @@
#include <scsi/sg_request.h>
#define OSM_NAME "scsi-osm"
-#define OSM_VERSION "1.282"
+#define OSM_VERSION "1.316"
#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM"
static struct i2o_driver i2o_scsi_driver;
@@ -113,7 +113,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
list_for_each_entry(i2o_dev, &c->devices, list)
if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
- if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
+ if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
&& (type == 0x01)) /* SCSI bus */
max_channel++;
}
@@ -146,7 +146,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
i = 0;
list_for_each_entry(i2o_dev, &c->devices, list)
if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
- if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
+ if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
&& (type == 0x01)) /* only SCSI bus */
i2o_shost->channel[i++] = i2o_dev;
@@ -238,13 +238,15 @@ static int i2o_scsi_probe(struct device *dev)
u8 type;
struct i2o_device *d = i2o_shost->channel[0];
- if (i2o_parm_field_get(d, 0x0000, 0, &type, 1)
+ if (!i2o_parm_field_get(d, 0x0000, 0, &type, 1)
&& (type == 0x01)) /* SCSI bus */
- if (i2o_parm_field_get(d, 0x0200, 4, &id, 4)) {
+ if (!i2o_parm_field_get(d, 0x0200, 4, &id, 4)) {
channel = 0;
if (i2o_dev->lct_data.class_id ==
I2O_CLASS_RANDOM_BLOCK_STORAGE)
- lun = i2o_shost->lun++;
+ lun =
+ cpu_to_le64(i2o_shost->
+ lun++);
else
lun = 0;
}
@@ -253,10 +255,10 @@ static int i2o_scsi_probe(struct device *dev)
break;
case I2O_CLASS_SCSI_PERIPHERAL:
- if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4) < 0)
+ if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4))
return -EFAULT;
- if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8) < 0)
+ if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8))
return -EFAULT;
parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid);
@@ -281,20 +283,22 @@ static int i2o_scsi_probe(struct device *dev)
return -EFAULT;
}
- if (id >= scsi_host->max_id) {
- osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id,
- scsi_host->max_id);
+ if (le32_to_cpu(id) >= scsi_host->max_id) {
+ osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)",
+ le32_to_cpu(id), scsi_host->max_id);
return -EFAULT;
}
- if (lun >= scsi_host->max_lun) {
- osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)",
- (unsigned int)lun, scsi_host->max_lun);
+ if (le64_to_cpu(lun) >= scsi_host->max_lun) {
+ osm_warn("SCSI device lun (%lu) >= max_lun of I2O host (%d)",
+ (long unsigned int)le64_to_cpu(lun),
+ scsi_host->max_lun);
return -EFAULT;
}
scsi_dev =
- __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev);
+ __scsi_add_device(i2o_shost->scsi_host, channel, le32_to_cpu(id),
+ le64_to_cpu(lun), i2o_dev);
if (IS_ERR(scsi_dev)) {
osm_warn("can not add SCSI device %03x\n",
@@ -305,8 +309,9 @@ static int i2o_scsi_probe(struct device *dev)
sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj,
"scsi");
- osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n",
- i2o_dev->lct_data.tid, channel, id, (unsigned int)lun);
+ osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n",
+ i2o_dev->lct_data.tid, channel, le32_to_cpu(id),
+ (long unsigned int)le64_to_cpu(lun));
return 0;
};
@@ -510,8 +515,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
struct i2o_controller *c;
struct i2o_device *i2o_dev;
int tid;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
/*
* ENABLE_DISCONNECT
* SIMPLE_TAG
@@ -519,7 +523,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
*/
u32 scsi_flags = 0x20a00000;
u32 sgl_offset;
- u32 __iomem *mptr;
+ u32 *mptr;
u32 cmd = I2O_CMD_SCSI_EXEC << 24;
int rc = 0;
@@ -576,8 +580,8 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
* throw it back to the scsi layer
*/
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY) {
+ msg = i2o_msg_get(c);
+ if (IS_ERR(msg)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto exit;
}
@@ -617,16 +621,16 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
if (sgl_offset == SGL_OFFSET_10)
sgl_offset = SGL_OFFSET_12;
cmd = I2O_CMD_PRIVATE << 24;
- writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++);
- writel(adpt_flags | tid, mptr++);
+ *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
+ *mptr++ = cpu_to_le32(adpt_flags | tid);
}
#endif
- writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]);
- writel(i2o_scsi_driver.context, &msg->u.s.icntxt);
+ msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
+ msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context);
/* We want the SCSI control block back */
- writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt);
+ msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt));
/* LSI_920_PCI_QUIRK
*
@@ -649,15 +653,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
}
*/
- writel(scsi_flags | SCpnt->cmd_len, mptr++);
+ *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len);
/* Write SCSI command into the message - always 16 byte block */
- memcpy_toio(mptr, SCpnt->cmnd, 16);
+ memcpy(mptr, SCpnt->cmnd, 16);
mptr += 4;
if (sgl_offset != SGL_OFFSET_0) {
/* write size of data addressed by SGL */
- writel(SCpnt->request_bufflen, mptr++);
+ *mptr++ = cpu_to_le32(SCpnt->request_bufflen);
/* Now fill in the SGList and command */
if (SCpnt->use_sg) {
@@ -676,11 +680,11 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
}
/* Stick the headers on */
- writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset,
- &msg->u.head[0]);
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
/* Queue the message */
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
osm_debug("Issued %ld\n", SCpnt->serial_number);
@@ -688,7 +692,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
nomem:
rc = -ENOMEM;
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
exit:
return rc;
@@ -709,8 +713,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
{
struct i2o_device *i2o_dev;
struct i2o_controller *c;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int tid;
int status = FAILED;
@@ -720,16 +723,16 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
c = i2o_dev->iop;
tid = i2o_dev->lct_data.tid;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
return SCSI_MLQUEUE_HOST_BUSY;
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid,
- &msg->u.head[1]);
- writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
+ msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
- if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT))
+ if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
status = SUCCESS;
return status;
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 4eb53258842..49216744693 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -32,7 +32,7 @@
#include "core.h"
#define OSM_NAME "i2o"
-#define OSM_VERSION "1.288"
+#define OSM_VERSION "1.325"
#define OSM_DESCRIPTION "I2O subsystem"
/* global I2O controller list */
@@ -47,27 +47,6 @@ static struct i2o_dma i2o_systab;
static int i2o_hrt_get(struct i2o_controller *c);
/**
- * i2o_msg_nop - Returns a message which is not used
- * @c: I2O controller from which the message was created
- * @m: message which should be returned
- *
- * If you fetch a message via i2o_msg_get, and can't use it, you must
- * return the message with this function. Otherwise the message frame
- * is lost.
- */
-void i2o_msg_nop(struct i2o_controller *c, u32 m)
-{
- struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
-
- writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(0, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- i2o_msg_post(c, m);
-};
-
-/**
* i2o_msg_get_wait - obtain an I2O message from the IOP
* @c: I2O controller
* @msg: pointer to a I2O message pointer
@@ -81,22 +60,21 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m)
* address from the read port (see the i2o spec). If no message is
* available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
*/
-u32 i2o_msg_get_wait(struct i2o_controller *c,
- struct i2o_message __iomem ** msg, int wait)
+struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait)
{
unsigned long timeout = jiffies + wait * HZ;
- u32 m;
+ struct i2o_message *msg;
- while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) {
+ while (IS_ERR(msg = i2o_msg_get(c))) {
if (time_after(jiffies, timeout)) {
osm_debug("%s: Timeout waiting for message frame.\n",
c->name);
- return I2O_QUEUE_EMPTY;
+ return ERR_PTR(-ETIMEDOUT);
}
schedule_timeout_uninterruptible(1);
}
- return m;
+ return msg;
};
#if BITS_PER_LONG == 64
@@ -301,8 +279,7 @@ struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
*/
static int i2o_iop_quiesce(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
i2o_status_block *sb = c->status_block.virt;
int rc;
@@ -313,16 +290,17 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
(sb->iop_state != ADAPTER_STATE_OPERATIONAL))
return 0;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
/* Long timeout needed for quiesce if lots of devices */
- if ((rc = i2o_msg_post_wait(c, m, 240)))
+ if ((rc = i2o_msg_post_wait(c, msg, 240)))
osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc);
else
osm_debug("%s: Quiesced.\n", c->name);
@@ -342,8 +320,7 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
*/
static int i2o_iop_enable(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
i2o_status_block *sb = c->status_block.virt;
int rc;
@@ -353,16 +330,17 @@ static int i2o_iop_enable(struct i2o_controller *c)
if (sb->iop_state != ADAPTER_STATE_READY)
return -EINVAL;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
/* How long of a timeout do we need? */
- if ((rc = i2o_msg_post_wait(c, m, 240)))
+ if ((rc = i2o_msg_post_wait(c, msg, 240)))
osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc);
else
osm_debug("%s: Enabled.\n", c->name);
@@ -413,22 +391,22 @@ static inline void i2o_iop_enable_all(void)
*/
static int i2o_iop_clear(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int rc;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
/* Quiesce all IOPs first */
i2o_iop_quiesce_all();
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
- if ((rc = i2o_msg_post_wait(c, m, 30)))
+ if ((rc = i2o_msg_post_wait(c, msg, 30)))
osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc);
else
osm_debug("%s: Cleared.\n", c->name);
@@ -446,13 +424,13 @@ static int i2o_iop_clear(struct i2o_controller *c)
* Clear and (re)initialize IOP's outbound queue and post the message
* frames to the IOP.
*
- * Returns 0 on success or a negative errno code on failure.
+ * Returns 0 on success or negative error code on failure.
*/
static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
{
- volatile u8 *status = c->status.virt;
u32 m;
- struct i2o_message __iomem *msg;
+ volatile u8 *status = c->status.virt;
+ struct i2o_message *msg;
ulong timeout;
int i;
@@ -460,23 +438,24 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
memset(c->status.virt, 0, 4);
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0x00000000, &msg->u.s.tcntxt);
- writel(PAGE_SIZE, &msg->body[0]);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(PAGE_SIZE);
/* Outbound msg frame size in words and Initcode */
- writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]);
- writel(0xd0000004, &msg->body[2]);
- writel(i2o_dma_low(c->status.phys), &msg->body[3]);
- writel(i2o_dma_high(c->status.phys), &msg->body[4]);
+ msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80);
+ msg->body[2] = cpu_to_le32(0xd0000004);
+ msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys));
+ msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys));
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
while (*status <= I2O_CMD_IN_PROGRESS) {
@@ -511,34 +490,34 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
static int i2o_iop_reset(struct i2o_controller *c)
{
volatile u8 *status = c->status.virt;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned long timeout;
i2o_status_block *sb = c->status_block.virt;
int rc = 0;
osm_debug("%s: Resetting controller\n", c->name);
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
memset(c->status_block.virt, 0, 8);
/* Quiesce all IOPs first */
i2o_iop_quiesce_all();
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context
- writel(0, &msg->body[0]);
- writel(0, &msg->body[1]);
- writel(i2o_dma_low(c->status.phys), &msg->body[2]);
- writel(i2o_dma_high(c->status.phys), &msg->body[3]);
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0x00000000);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys));
+ msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys));
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
/* Wait for a reply */
timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
@@ -567,18 +546,15 @@ static int i2o_iop_reset(struct i2o_controller *c)
osm_debug("%s: Reset in progress, waiting for reboot...\n",
c->name);
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
- while (m == I2O_QUEUE_EMPTY) {
+ while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) {
if (time_after(jiffies, timeout)) {
osm_err("%s: IOP reset timeout.\n", c->name);
- rc = -ETIMEDOUT;
+ rc = PTR_ERR(msg);
goto exit;
}
schedule_timeout_uninterruptible(1);
-
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
}
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
/* from here all quiesce commands are safe */
c->no_quiesce = 0;
@@ -686,8 +662,7 @@ static int i2o_iop_activate(struct i2o_controller *c)
*/
static int i2o_iop_systab_set(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
i2o_status_block *sb = c->status_block.virt;
struct device *dev = &c->pdev->dev;
struct resource *root;
@@ -735,41 +710,38 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
}
}
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
PCI_DMA_TODEVICE);
if (!i2o_systab.phys) {
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
return -ENOMEM;
}
- writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
/*
* Provide three SGL-elements:
* System table (SysTab), Private memory space declaration and
* Private i/o space declaration
- *
- * FIXME: is this still true?
- * Nasty one here. We can't use dma_alloc_coherent to send the
- * same table to everyone. We have to go remap it for them all
*/
- writel(c->unit + 2, &msg->body[0]);
- writel(0, &msg->body[1]);
- writel(0x54000000 | i2o_systab.len, &msg->body[2]);
- writel(i2o_systab.phys, &msg->body[3]);
- writel(0x54000000 | sb->current_mem_size, &msg->body[4]);
- writel(sb->current_mem_base, &msg->body[5]);
- writel(0xd4000000 | sb->current_io_size, &msg->body[6]);
- writel(sb->current_io_base, &msg->body[6]);
+ msg->body[0] = cpu_to_le32(c->unit + 2);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len);
+ msg->body[3] = cpu_to_le32(i2o_systab.phys);
+ msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size);
+ msg->body[5] = cpu_to_le32(sb->current_mem_base);
+ msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size);
+ msg->body[6] = cpu_to_le32(sb->current_io_base);
- rc = i2o_msg_post_wait(c, m, 120);
+ rc = i2o_msg_post_wait(c, msg, 120);
dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
PCI_DMA_TODEVICE);
@@ -780,8 +752,6 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
else
osm_debug("%s: SysTab set.\n", c->name);
- i2o_status_get(c); // Entered READY state
-
return rc;
}
@@ -791,7 +761,7 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
*
* Send the system table and enable the I2O controller.
*
- * Returns 0 on success or negativer error code on failure.
+ * Returns 0 on success or negative error code on failure.
*/
static int i2o_iop_online(struct i2o_controller *c)
{
@@ -830,7 +800,6 @@ void i2o_iop_remove(struct i2o_controller *c)
list_for_each_entry_safe(dev, tmp, &c->devices, list)
i2o_device_remove(dev);
- class_device_unregister(c->classdev);
device_del(&c->device);
/* Ask the IOP to switch to RESET state */
@@ -869,12 +838,11 @@ static int i2o_systab_build(void)
i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers *
sizeof(struct i2o_sys_tbl_entry);
- systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL);
+ systab = i2o_systab.virt = kzalloc(i2o_systab.len, GFP_KERNEL);
if (!systab) {
osm_err("unable to allocate memory for System Table\n");
return -ENOMEM;
}
- memset(systab, 0, i2o_systab.len);
systab->version = I2OVERSION;
systab->change_ind = change_ind + 1;
@@ -952,30 +920,30 @@ static int i2o_parse_hrt(struct i2o_controller *c)
*/
int i2o_status_get(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
volatile u8 *status_block;
unsigned long timeout;
status_block = (u8 *) c->status_block.virt;
memset(c->status_block.virt, 0, sizeof(i2o_status_block));
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context
- writel(0, &msg->body[0]);
- writel(0, &msg->body[1]);
- writel(i2o_dma_low(c->status_block.phys), &msg->body[2]);
- writel(i2o_dma_high(c->status_block.phys), &msg->body[3]);
- writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0x00000000);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys));
+ msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys));
+ msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
/* Wait for a reply */
timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
@@ -1002,7 +970,7 @@ int i2o_status_get(struct i2o_controller *c)
* The HRT contains information about possible hidden devices but is
* mostly useless to us.
*
- * Returns 0 on success or negativer error code on failure.
+ * Returns 0 on success or negative error code on failure.
*/
static int i2o_hrt_get(struct i2o_controller *c)
{
@@ -1013,20 +981,20 @@ static int i2o_hrt_get(struct i2o_controller *c)
struct device *dev = &c->pdev->dev;
for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]);
- writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(0xd0000000 | c->hrt.len, &msg->body[0]);
- writel(c->hrt.phys, &msg->body[1]);
+ msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len);
+ msg->body[1] = cpu_to_le32(c->hrt.phys);
- rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt);
+ rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt);
if (rc < 0) {
osm_err("%s: Unable to get HRT (status=%#x)\n", c->name,
@@ -1051,15 +1019,6 @@ static int i2o_hrt_get(struct i2o_controller *c)
}
/**
- * i2o_iop_free - Free the i2o_controller struct
- * @c: I2O controller to free
- */
-void i2o_iop_free(struct i2o_controller *c)
-{
- kfree(c);
-};
-
-/**
* i2o_iop_release - release the memory for a I2O controller
* @dev: I2O controller which should be released
*
@@ -1073,14 +1032,11 @@ static void i2o_iop_release(struct device *dev)
i2o_iop_free(c);
};
-/* I2O controller class */
-static struct class *i2o_controller_class;
-
/**
* i2o_iop_alloc - Allocate and initialize a i2o_controller struct
*
* Allocate the necessary memory for a i2o_controller struct and
- * initialize the lists.
+ * initialize the lists and message mempool.
*
* Returns a pointer to the I2O controller or a negative error code on
* failure.
@@ -1089,20 +1045,29 @@ struct i2o_controller *i2o_iop_alloc(void)
{
static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
struct i2o_controller *c;
+ char poolname[32];
- c = kmalloc(sizeof(*c), GFP_KERNEL);
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
osm_err("i2o: Insufficient memory to allocate a I2O controller."
"\n");
return ERR_PTR(-ENOMEM);
}
- memset(c, 0, sizeof(*c));
+
+ c->unit = unit++;
+ sprintf(c->name, "iop%d", c->unit);
+
+ snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
+ if (i2o_pool_alloc
+ (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4,
+ I2O_MSG_INPOOL_MIN)) {
+ kfree(c);
+ return ERR_PTR(-ENOMEM);
+ };
INIT_LIST_HEAD(&c->devices);
spin_lock_init(&c->lock);
init_MUTEX(&c->lct_lock);
- c->unit = unit++;
- sprintf(c->name, "iop%d", c->unit);
device_initialize(&c->device);
@@ -1137,36 +1102,29 @@ int i2o_iop_add(struct i2o_controller *c)
goto iop_reset;
}
- c->classdev = class_device_create(i2o_controller_class, NULL, MKDEV(0,0),
- &c->device, "iop%d", c->unit);
- if (IS_ERR(c->classdev)) {
- osm_err("%s: could not add controller class\n", c->name);
- goto device_del;
- }
-
osm_info("%s: Activating I2O controller...\n", c->name);
osm_info("%s: This may take a few minutes if there are many devices\n",
c->name);
if ((rc = i2o_iop_activate(c))) {
osm_err("%s: could not activate controller\n", c->name);
- goto class_del;
+ goto device_del;
}
osm_debug("%s: building sys table...\n", c->name);
if ((rc = i2o_systab_build()))
- goto class_del;
+ goto device_del;
osm_debug("%s: online controller...\n", c->name);
if ((rc = i2o_iop_online(c)))
- goto class_del;
+ goto device_del;
osm_debug("%s: getting LCT...\n", c->name);
if ((rc = i2o_exec_lct_get(c)))
- goto class_del;
+ goto device_del;
list_add(&c->list, &i2o_controllers);
@@ -1176,9 +1134,6 @@ int i2o_iop_add(struct i2o_controller *c)
return 0;
- class_del:
- class_device_unregister(c->classdev);
-
device_del:
device_del(&c->device);
@@ -1199,28 +1154,27 @@ int i2o_iop_add(struct i2o_controller *c)
* is waited for, or expected. If you do not want further notifications,
* call the i2o_event_register again with a evt_mask of 0.
*
- * Returns 0 on success or -ETIMEDOUT if no message could be fetched for
- * sending the request.
+ * Returns 0 on success or negative error code on failure.
*/
int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
int tcntxt, u32 evt_mask)
{
struct i2o_controller *c = dev->iop;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data.
- tid, &msg->u.head[1]);
- writel(drv->context, &msg->u.s.icntxt);
- writel(tcntxt, &msg->u.s.tcntxt);
- writel(evt_mask, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->u.s.icntxt = cpu_to_le32(drv->context);
+ msg->u.s.tcntxt = cpu_to_le32(tcntxt);
+ msg->body[0] = cpu_to_le32(evt_mask);
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
return 0;
};
@@ -1239,14 +1193,8 @@ static int __init i2o_iop_init(void)
printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
- i2o_controller_class = class_create(THIS_MODULE, "i2o_controller");
- if (IS_ERR(i2o_controller_class)) {
- osm_err("can't register class i2o_controller\n");
- goto exit;
- }
-
if ((rc = i2o_driver_init()))
- goto class_exit;
+ goto exit;
if ((rc = i2o_exec_init()))
goto driver_exit;
@@ -1262,9 +1210,6 @@ static int __init i2o_iop_init(void)
driver_exit:
i2o_driver_exit();
- class_exit:
- class_destroy(i2o_controller_class);
-
exit:
return rc;
}
@@ -1279,7 +1224,6 @@ static void __exit i2o_iop_exit(void)
i2o_pci_exit();
i2o_exec_exit();
i2o_driver_exit();
- class_destroy(i2o_controller_class);
};
module_init(i2o_iop_init);
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index ee7075fa1ec..c5b656cdea7 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -339,7 +339,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
pci_name(pdev));
c->pdev = pdev;
- c->device.parent = get_device(&pdev->dev);
+ c->device.parent = &pdev->dev;
/* Cards that fall apart if you hit them with large I/O loads... */
if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) {
@@ -410,8 +410,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
if ((rc = i2o_iop_add(c)))
goto uninstall;
- get_device(&c->device);
-
if (i960)
pci_write_config_word(i960, 0x42, 0x03ff);
@@ -424,7 +422,6 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
i2o_pci_free(c);
free_controller:
- put_device(c->device.parent);
i2o_iop_free(c);
disable:
@@ -454,7 +451,6 @@ static void __devexit i2o_pci_remove(struct pci_dev *pdev)
printk(KERN_INFO "%s: Controller removed.\n", c->name);
- put_device(c->device.parent);
put_device(&c->device);
};
@@ -483,4 +479,5 @@ void __exit i2o_pci_exit(void)
{
pci_unregister_driver(&i2o_pci_driver);
};
+
MODULE_DEVICE_TABLE(pci, i2o_pci_ids);
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index e335d54c465..aff83f96680 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -24,14 +24,14 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/mutex.h>
#include <asm/dma.h>
#include <asm/hardware.h>
-#include <asm/irq.h>
#include "ucb1x00.h"
-static DECLARE_MUTEX(ucb1x00_sem);
+static DEFINE_MUTEX(ucb1x00_mutex);
static LIST_HEAD(ucb1x00_drivers);
static LIST_HEAD(ucb1x00_devices);
@@ -507,14 +507,14 @@ static int ucb1x00_probe(struct mcp *mcp)
goto err_free;
}
- ret = request_irq(ucb->irq, ucb1x00_irq, 0, "UCB1x00", ucb);
+ ret = request_irq(ucb->irq, ucb1x00_irq, SA_TRIGGER_RISING,
+ "UCB1x00", ucb);
if (ret) {
printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n",
ucb->irq, ret);
goto err_free;
}
- set_irq_type(ucb->irq, IRQT_RISING);
mcp_set_drvdata(mcp, ucb);
ret = class_device_register(&ucb->cdev);
@@ -522,12 +522,12 @@ static int ucb1x00_probe(struct mcp *mcp)
goto err_irq;
INIT_LIST_HEAD(&ucb->devs);
- down(&ucb1x00_sem);
+ mutex_lock(&ucb1x00_mutex);
list_add(&ucb->node, &ucb1x00_devices);
list_for_each_entry(drv, &ucb1x00_drivers, node) {
ucb1x00_add_dev(ucb, drv);
}
- up(&ucb1x00_sem);
+ mutex_unlock(&ucb1x00_mutex);
goto out;
err_irq:
@@ -545,13 +545,13 @@ static void ucb1x00_remove(struct mcp *mcp)
struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
struct list_head *l, *n;
- down(&ucb1x00_sem);
+ mutex_lock(&ucb1x00_mutex);
list_del(&ucb->node);
list_for_each_safe(l, n, &ucb->devs) {
struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node);
ucb1x00_remove_dev(dev);
}
- up(&ucb1x00_sem);
+ mutex_unlock(&ucb1x00_mutex);
free_irq(ucb->irq, ucb);
class_device_unregister(&ucb->cdev);
@@ -562,12 +562,12 @@ int ucb1x00_register_driver(struct ucb1x00_driver *drv)
struct ucb1x00 *ucb;
INIT_LIST_HEAD(&drv->devs);
- down(&ucb1x00_sem);
+ mutex_lock(&ucb1x00_mutex);
list_add(&drv->node, &ucb1x00_drivers);
list_for_each_entry(ucb, &ucb1x00_devices, node) {
ucb1x00_add_dev(ucb, drv);
}
- up(&ucb1x00_sem);
+ mutex_unlock(&ucb1x00_mutex);
return 0;
}
@@ -575,13 +575,13 @@ void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
{
struct list_head *n, *l;
- down(&ucb1x00_sem);
+ mutex_lock(&ucb1x00_mutex);
list_del(&drv->node);
list_for_each_safe(l, n, &drv->devs) {
struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node);
ucb1x00_remove_dev(dev);
}
- up(&ucb1x00_sem);
+ mutex_unlock(&ucb1x00_mutex);
}
static int ucb1x00_suspend(struct mcp *mcp, pm_message_t state)
@@ -589,12 +589,12 @@ static int ucb1x00_suspend(struct mcp *mcp, pm_message_t state)
struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
struct ucb1x00_dev *dev;
- down(&ucb1x00_sem);
+ mutex_lock(&ucb1x00_mutex);
list_for_each_entry(dev, &ucb->devs, dev_node) {
if (dev->drv->suspend)
dev->drv->suspend(dev, state);
}
- up(&ucb1x00_sem);
+ mutex_unlock(&ucb1x00_mutex);
return 0;
}
@@ -603,12 +603,12 @@ static int ucb1x00_resume(struct mcp *mcp)
struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
struct ucb1x00_dev *dev;
- down(&ucb1x00_sem);
+ mutex_lock(&ucb1x00_mutex);
list_for_each_entry(dev, &ucb->devs, dev_node) {
if (dev->drv->resume)
dev->drv->resume(dev);
}
- up(&ucb1x00_sem);
+ mutex_unlock(&ucb1x00_mutex);
return 0;
}
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 551061c2ead..79fd062ccb3 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -32,7 +32,6 @@
#include <linux/suspend.h>
#include <linux/slab.h>
#include <linux/kthread.h>
-#include <linux/delay.h>
#include <asm/dma.h>
#include <asm/semaphore.h>
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index eb41391e06e..bfca5c176e8 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -495,6 +495,7 @@ static void mmc_decode_cid(struct mmc_card *card)
case 2: /* MMC v2.0 - v2.2 */
case 3: /* MMC v3.1 - v3.3 */
+ case 4: /* MMC v4 */
card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
@@ -550,6 +551,11 @@ static void mmc_decode_csd(struct mmc_card *card)
csd->capacity = (1 + m) << (e + 2);
csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
+ csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
+ csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
+ csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
+ csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
+ csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
} else {
/*
* We only understand CSD structure v1.1 and v1.2.
@@ -579,6 +585,11 @@ static void mmc_decode_csd(struct mmc_card *card)
csd->capacity = (1 + m) << (e + 2);
csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
+ csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
+ csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
+ csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
+ csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
+ csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
}
}
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index abcf19116d7..9b7c37e0e57 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -28,6 +28,7 @@
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
#include <linux/devfs_fs_kernel.h>
+#include <linux/mutex.h>
#include <linux/mmc/card.h>
#include <linux/mmc/protocol.h>
@@ -54,41 +55,36 @@ struct mmc_blk_data {
unsigned int usage;
unsigned int block_bits;
+ unsigned int read_only;
};
-static DECLARE_MUTEX(open_lock);
+static DEFINE_MUTEX(open_lock);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
- down(&open_lock);
+ mutex_lock(&open_lock);
md = disk->private_data;
if (md && md->usage == 0)
md = NULL;
if (md)
md->usage++;
- up(&open_lock);
+ mutex_unlock(&open_lock);
return md;
}
static void mmc_blk_put(struct mmc_blk_data *md)
{
- down(&open_lock);
+ mutex_lock(&open_lock);
md->usage--;
if (md->usage == 0) {
put_disk(md->disk);
mmc_cleanup_queue(&md->queue);
kfree(md);
}
- up(&open_lock);
-}
-
-static inline int mmc_blk_readonly(struct mmc_card *card)
-{
- return mmc_card_readonly(card) ||
- !(card->csd.cmdclass & CCC_BLOCK_WRITE);
+ mutex_unlock(&open_lock);
}
static int mmc_blk_open(struct inode *inode, struct file *filp)
@@ -102,8 +98,7 @@ static int mmc_blk_open(struct inode *inode, struct file *filp)
check_disk_change(inode->i_bdev);
ret = 0;
- if ((filp->f_mode & FMODE_WRITE) &&
- mmc_blk_readonly(md->queue.card))
+ if ((filp->f_mode & FMODE_WRITE) && md->read_only)
ret = -EROFS;
}
@@ -119,31 +114,18 @@ static int mmc_blk_release(struct inode *inode, struct file *filp)
}
static int
-mmc_blk_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct block_device *bdev = inode->i_bdev;
-
- if (cmd == HDIO_GETGEO) {
- struct hd_geometry geo;
-
- memset(&geo, 0, sizeof(struct hd_geometry));
-
- geo.cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
- geo.heads = 4;
- geo.sectors = 16;
- geo.start = get_start_sect(bdev);
-
- return copy_to_user((void __user *)arg, &geo, sizeof(geo))
- ? -EFAULT : 0;
- }
-
- return -ENOTTY;
+ geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ geo->heads = 4;
+ geo->sectors = 16;
+ return 0;
}
static struct block_device_operations mmc_bdops = {
.open = mmc_blk_open,
.release = mmc_blk_release,
- .ioctl = mmc_blk_ioctl,
+ .getgeo = mmc_blk_getgeo,
.owner = THIS_MODULE,
};
@@ -206,7 +188,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.data.flags |= MMC_DATA_WRITE;
brq.data.blocks = 1;
}
- brq.mrq.stop = brq.data.blocks > 1 ? &brq.stop : NULL;
+
+ if (brq.data.blocks > 1) {
+ brq.data.flags |= MMC_DATA_MULTI;
+ brq.mrq.stop = &brq.stop;
+ } else {
+ brq.mrq.stop = NULL;
+ }
brq.data.sg = mq->sg;
brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);
@@ -263,7 +251,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
*/
add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req);
- end_that_request_last(req);
+ end_that_request_last(req, 1);
}
spin_unlock_irq(&md->lock);
} while (ret);
@@ -289,7 +277,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req);
- end_that_request_last(req);
+ end_that_request_last(req, 0);
spin_unlock_irq(&md->lock);
return 0;
@@ -299,6 +287,12 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
static unsigned long dev_use[MMC_NUM_MINORS/(8*sizeof(unsigned long))];
+static inline int mmc_blk_readonly(struct mmc_card *card)
+{
+ return mmc_card_readonly(card) ||
+ !(card->csd.cmdclass & CCC_BLOCK_WRITE);
+}
+
static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
{
struct mmc_blk_data *md;
@@ -310,64 +304,121 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
__set_bit(devidx, dev_use);
md = kmalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
- if (md) {
- memset(md, 0, sizeof(struct mmc_blk_data));
+ if (!md) {
+ ret = -ENOMEM;
+ goto out;
+ }
- md->disk = alloc_disk(1 << MMC_SHIFT);
- if (md->disk == NULL) {
- kfree(md);
- md = ERR_PTR(-ENOMEM);
- goto out;
- }
+ memset(md, 0, sizeof(struct mmc_blk_data));
- spin_lock_init(&md->lock);
- md->usage = 1;
+ /*
+ * Set the read-only status based on the supported commands
+ * and the write protect switch.
+ */
+ md->read_only = mmc_blk_readonly(card);
- ret = mmc_init_queue(&md->queue, card, &md->lock);
- if (ret) {
- put_disk(md->disk);
- kfree(md);
- md = ERR_PTR(ret);
- goto out;
+ /*
+ * Figure out a workable block size. MMC cards have:
+ * - two block sizes, one for read and one for write.
+ * - may support partial reads and/or writes
+ * (allows block sizes smaller than specified)
+ */
+ md->block_bits = card->csd.read_blkbits;
+ if (card->csd.write_blkbits != card->csd.read_blkbits) {
+ if (card->csd.write_blkbits < card->csd.read_blkbits &&
+ card->csd.read_partial) {
+ /*
+ * write block size is smaller than read block
+ * size, but we support partial reads, so choose
+ * the smaller write block size.
+ */
+ md->block_bits = card->csd.write_blkbits;
+ } else if (card->csd.write_blkbits > card->csd.read_blkbits &&
+ card->csd.write_partial) {
+ /*
+ * read block size is smaller than write block
+ * size, but we support partial writes. Use read
+ * block size.
+ */
+ } else {
+ /*
+ * We don't support this configuration for writes.
+ */
+ printk(KERN_ERR "%s: unable to select block size for "
+ "writing (rb%u wb%u rp%u wp%u)\n",
+ md->disk->disk_name,
+ 1 << card->csd.read_blkbits,
+ 1 << card->csd.write_blkbits,
+ card->csd.read_partial,
+ card->csd.write_partial);
+ md->read_only = 1;
}
- md->queue.prep_fn = mmc_blk_prep_rq;
- md->queue.issue_fn = mmc_blk_issue_rq;
- md->queue.data = md;
+ }
- md->disk->major = major;
- md->disk->first_minor = devidx << MMC_SHIFT;
- md->disk->fops = &mmc_bdops;
- md->disk->private_data = md;
- md->disk->queue = md->queue.queue;
- md->disk->driverfs_dev = &card->dev;
+ /*
+ * Refuse to allow block sizes smaller than 512 bytes.
+ */
+ if (md->block_bits < 9) {
+ printk(KERN_ERR "%s: unable to support block size %u\n",
+ mmc_card_id(card), 1 << md->block_bits);
+ ret = -EINVAL;
+ goto err_kfree;
+ }
- /*
- * As discussed on lkml, GENHD_FL_REMOVABLE should:
- *
- * - be set for removable media with permanent block devices
- * - be unset for removable block devices with permanent media
- *
- * Since MMC block devices clearly fall under the second
- * case, we do not set GENHD_FL_REMOVABLE. Userspace
- * should use the block device creation/destruction hotplug
- * messages to tell when the card is present.
- */
+ md->disk = alloc_disk(1 << MMC_SHIFT);
+ if (md->disk == NULL) {
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
- sprintf(md->disk->disk_name, "mmcblk%d", devidx);
- sprintf(md->disk->devfs_name, "mmc/blk%d", devidx);
+ spin_lock_init(&md->lock);
+ md->usage = 1;
- md->block_bits = card->csd.read_blkbits;
+ ret = mmc_init_queue(&md->queue, card, &md->lock);
+ if (ret)
+ goto err_putdisk;
- blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
+ md->queue.prep_fn = mmc_blk_prep_rq;
+ md->queue.issue_fn = mmc_blk_issue_rq;
+ md->queue.data = md;
- /*
- * The CSD capacity field is in units of read_blkbits.
- * set_capacity takes units of 512 bytes.
- */
- set_capacity(md->disk, card->csd.capacity << (card->csd.read_blkbits - 9));
- }
- out:
+ md->disk->major = major;
+ md->disk->first_minor = devidx << MMC_SHIFT;
+ md->disk->fops = &mmc_bdops;
+ md->disk->private_data = md;
+ md->disk->queue = md->queue.queue;
+ md->disk->driverfs_dev = &card->dev;
+
+ /*
+ * As discussed on lkml, GENHD_FL_REMOVABLE should:
+ *
+ * - be set for removable media with permanent block devices
+ * - be unset for removable block devices with permanent media
+ *
+ * Since MMC block devices clearly fall under the second
+ * case, we do not set GENHD_FL_REMOVABLE. Userspace
+ * should use the block device creation/destruction hotplug
+ * messages to tell when the card is present.
+ */
+
+ sprintf(md->disk->disk_name, "mmcblk%d", devidx);
+ sprintf(md->disk->devfs_name, "mmc/blk%d", devidx);
+
+ blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
+
+ /*
+ * The CSD capacity field is in units of read_blkbits.
+ * set_capacity takes units of 512 bytes.
+ */
+ set_capacity(md->disk, card->csd.capacity << (card->csd.read_blkbits - 9));
return md;
+
+ err_putdisk:
+ put_disk(md->disk);
+ err_kfree:
+ kfree(md);
+ out:
+ return ERR_PTR(ret);
}
static int
@@ -403,12 +454,6 @@ static int mmc_blk_probe(struct mmc_card *card)
if (!(card->csd.cmdclass & CCC_BLOCK_READ))
return -ENODEV;
- if (card->csd.read_blkbits < 9) {
- printk(KERN_WARNING "%s: read blocksize too small (%u)\n",
- mmc_card_id(card), 1 << card->csd.read_blkbits);
- return -ENODEV;
- }
-
md = mmc_blk_alloc(card);
if (IS_ERR(md))
return PTR_ERR(md);
@@ -419,7 +464,7 @@ static int mmc_blk_probe(struct mmc_card *card)
printk(KERN_INFO "%s: %s %s %luKiB %s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
- get_capacity(md->disk) >> 1, mmc_blk_readonly(card)?"(ro)":"");
+ get_capacity(md->disk) >> 1, md->read_only ? "(ro)" : "");
mmc_set_drvdata(card, md);
add_disk(md->disk);
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index 166c9b0ad04..634ef53e85a 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -19,13 +19,14 @@
#include <linux/highmem.h>
#include <linux/mmc/host.h>
#include <linux/mmc/protocol.h>
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
+#include <asm/cacheflush.h>
#include <asm/div64.h>
#include <asm/io.h>
#include <asm/scatterlist.h>
#include <asm/sizes.h>
-#include <asm/hardware/amba.h>
-#include <asm/hardware/clock.h>
#include <asm/mach/mmc.h>
#include "mmci.h"
@@ -157,6 +158,13 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
data->error = MMC_ERR_FIFO;
status |= MCI_DATAEND;
+
+ /*
+ * We hit an error condition. Ensure that any data
+ * partially written to a page is properly coherent.
+ */
+ if (host->sg_len && data->flags & MMC_DATA_READ)
+ flush_dcache_page(host->sg_ptr->page);
}
if (status & MCI_DATAEND) {
mmci_stop_data(host);
@@ -292,7 +300,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id, struct pt_regs *regs)
/*
* Unmap the buffer.
*/
- mmci_kunmap_atomic(host, &flags);
+ mmci_kunmap_atomic(host, buffer, &flags);
host->sg_off += len;
host->size -= len;
@@ -301,6 +309,13 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id, struct pt_regs *regs)
if (remain)
break;
+ /*
+ * If we were reading, and we have completed this
+ * page, ensure that the data cache is coherent.
+ */
+ if (status & MCI_RXACTIVE)
+ flush_dcache_page(host->sg_ptr->page);
+
if (!mmci_next_sg(host))
break;
@@ -479,13 +494,9 @@ static int mmci_probe(struct amba_device *dev, void *id)
goto host_free;
}
- ret = clk_use(host->clk);
- if (ret)
- goto clk_free;
-
ret = clk_enable(host->clk);
if (ret)
- goto clk_unuse;
+ goto clk_free;
host->plat = plat;
host->mclk = clk_get_rate(host->clk);
@@ -558,8 +569,6 @@ static int mmci_probe(struct amba_device *dev, void *id)
iounmap(host->base);
clk_disable:
clk_disable(host->clk);
- clk_unuse:
- clk_unuse(host->clk);
clk_free:
clk_put(host->clk);
host_free:
@@ -594,7 +603,6 @@ static int mmci_remove(struct amba_device *dev)
iounmap(host->base);
clk_disable(host->clk);
- clk_unuse(host->clk);
clk_put(host->clk);
mmc_free_host(mmc);
diff --git a/drivers/mmc/mmci.h b/drivers/mmc/mmci.h
index 4589bbd6819..6d7eadc9a67 100644
--- a/drivers/mmc/mmci.h
+++ b/drivers/mmc/mmci.h
@@ -172,8 +172,8 @@ static inline char *mmci_kmap_atomic(struct mmci_host *host, unsigned long *flag
return kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
}
-static inline void mmci_kunmap_atomic(struct mmci_host *host, unsigned long *flags)
+static inline void mmci_kunmap_atomic(struct mmci_host *host, void *buffer, unsigned long *flags)
{
- kunmap_atomic(host->sg_ptr->page, KM_BIO_SRC_IRQ);
+ kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index c7eb7c26908..f2575762536 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -90,7 +90,7 @@ static int dma = 2;
* Basic functions
*/
-static inline void wbsd_unlock_config(struct wbsd_host* host)
+static inline void wbsd_unlock_config(struct wbsd_host *host)
{
BUG_ON(host->config == 0);
@@ -98,14 +98,14 @@ static inline void wbsd_unlock_config(struct wbsd_host* host)
outb(host->unlock_code, host->config);
}
-static inline void wbsd_lock_config(struct wbsd_host* host)
+static inline void wbsd_lock_config(struct wbsd_host *host)
{
BUG_ON(host->config == 0);
outb(LOCK_CODE, host->config);
}
-static inline void wbsd_write_config(struct wbsd_host* host, u8 reg, u8 value)
+static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value)
{
BUG_ON(host->config == 0);
@@ -113,7 +113,7 @@ static inline void wbsd_write_config(struct wbsd_host* host, u8 reg, u8 value)
outb(value, host->config + 1);
}
-static inline u8 wbsd_read_config(struct wbsd_host* host, u8 reg)
+static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg)
{
BUG_ON(host->config == 0);
@@ -121,13 +121,13 @@ static inline u8 wbsd_read_config(struct wbsd_host* host, u8 reg)
return inb(host->config + 1);
}
-static inline void wbsd_write_index(struct wbsd_host* host, u8 index, u8 value)
+static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value)
{
outb(index, host->base + WBSD_IDXR);
outb(value, host->base + WBSD_DATAR);
}
-static inline u8 wbsd_read_index(struct wbsd_host* host, u8 index)
+static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index)
{
outb(index, host->base + WBSD_IDXR);
return inb(host->base + WBSD_DATAR);
@@ -137,7 +137,7 @@ static inline u8 wbsd_read_index(struct wbsd_host* host, u8 index)
* Common routines
*/
-static void wbsd_init_device(struct wbsd_host* host)
+static void wbsd_init_device(struct wbsd_host *host)
{
u8 setup, ier;
@@ -197,7 +197,7 @@ static void wbsd_init_device(struct wbsd_host* host)
inb(host->base + WBSD_ISR);
}
-static void wbsd_reset(struct wbsd_host* host)
+static void wbsd_reset(struct wbsd_host *host)
{
u8 setup;
@@ -211,14 +211,13 @@ static void wbsd_reset(struct wbsd_host* host)
wbsd_write_index(host, WBSD_IDX_SETUP, setup);
}
-static void wbsd_request_end(struct wbsd_host* host, struct mmc_request* mrq)
+static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
{
unsigned long dmaflags;
DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
- if (host->dma >= 0)
- {
+ if (host->dma >= 0) {
/*
* Release ISA DMA controller.
*/
@@ -247,7 +246,7 @@ static void wbsd_request_end(struct wbsd_host* host, struct mmc_request* mrq)
* Scatter/gather functions
*/
-static inline void wbsd_init_sg(struct wbsd_host* host, struct mmc_data* data)
+static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data)
{
/*
* Get info. about SG list from data structure.
@@ -259,7 +258,7 @@ static inline void wbsd_init_sg(struct wbsd_host* host, struct mmc_data* data)
host->remain = host->cur_sg->length;
}
-static inline int wbsd_next_sg(struct wbsd_host* host)
+static inline int wbsd_next_sg(struct wbsd_host *host)
{
/*
* Skip to next SG entry.
@@ -270,33 +269,32 @@ static inline int wbsd_next_sg(struct wbsd_host* host)
/*
* Any entries left?
*/
- if (host->num_sg > 0)
- {
- host->offset = 0;
- host->remain = host->cur_sg->length;
- }
+ if (host->num_sg > 0) {
+ host->offset = 0;
+ host->remain = host->cur_sg->length;
+ }
return host->num_sg;
}
-static inline char* wbsd_kmap_sg(struct wbsd_host* host)
+static inline char *wbsd_kmap_sg(struct wbsd_host *host)
{
host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
host->cur_sg->offset;
return host->mapped_sg;
}
-static inline void wbsd_kunmap_sg(struct wbsd_host* host)
+static inline void wbsd_kunmap_sg(struct wbsd_host *host)
{
kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
}
-static inline void wbsd_sg_to_dma(struct wbsd_host* host, struct mmc_data* data)
+static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
{
unsigned int len, i, size;
- struct scatterlist* sg;
- char* dmabuf = host->dma_buffer;
- char* sgbuf;
+ struct scatterlist *sg;
+ char *dmabuf = host->dma_buffer;
+ char *sgbuf;
size = host->size;
@@ -308,8 +306,7 @@ static inline void wbsd_sg_to_dma(struct wbsd_host* host, struct mmc_data* data)
* be the entire list though so make sure that
* we do not transfer too much.
*/
- for (i = 0;i < len;i++)
- {
+ for (i = 0; i < len; i++) {
sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
if (size < sg[i].length)
memcpy(dmabuf, sgbuf, size);
@@ -337,12 +334,12 @@ static inline void wbsd_sg_to_dma(struct wbsd_host* host, struct mmc_data* data)
host->size -= size;
}
-static inline void wbsd_dma_to_sg(struct wbsd_host* host, struct mmc_data* data)
+static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
{
unsigned int len, i, size;
- struct scatterlist* sg;
- char* dmabuf = host->dma_buffer;
- char* sgbuf;
+ struct scatterlist *sg;
+ char *dmabuf = host->dma_buffer;
+ char *sgbuf;
size = host->size;
@@ -354,8 +351,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host* host, struct mmc_data* data)
* be the entire list though so make sure that
* we do not transfer too much.
*/
- for (i = 0;i < len;i++)
- {
+ for (i = 0; i < len; i++) {
sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
if (size < sg[i].length)
memcpy(sgbuf, dmabuf, size);
@@ -387,46 +383,38 @@ static inline void wbsd_dma_to_sg(struct wbsd_host* host, struct mmc_data* data)
* Command handling
*/
-static inline void wbsd_get_short_reply(struct wbsd_host* host,
- struct mmc_command* cmd)
+static inline void wbsd_get_short_reply(struct wbsd_host *host,
+ struct mmc_command *cmd)
{
/*
* Correct response type?
*/
- if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT)
- {
+ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) {
cmd->error = MMC_ERR_INVALID;
return;
}
- cmd->resp[0] =
- wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
- cmd->resp[0] |=
- wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
- cmd->resp[0] |=
- wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
- cmd->resp[0] |=
- wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
- cmd->resp[1] =
- wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
+ cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
+ cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
+ cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
+ cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
+ cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
}
-static inline void wbsd_get_long_reply(struct wbsd_host* host,
- struct mmc_command* cmd)
+static inline void wbsd_get_long_reply(struct wbsd_host *host,
+ struct mmc_command *cmd)
{
int i;
/*
* Correct response type?
*/
- if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG)
- {
+ if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) {
cmd->error = MMC_ERR_INVALID;
return;
}
- for (i = 0;i < 4;i++)
- {
+ for (i = 0; i < 4; i++) {
cmd->resp[i] =
wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
cmd->resp[i] |=
@@ -438,7 +426,7 @@ static inline void wbsd_get_long_reply(struct wbsd_host* host,
}
}
-static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd)
+static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
{
int i;
u8 status, isr;
@@ -456,7 +444,7 @@ static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd)
* Send the command (CRC calculated by host).
*/
outb(cmd->opcode, host->base + WBSD_CMDR);
- for (i = 3;i >= 0;i--)
+ for (i = 3; i >= 0; i--)
outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
cmd->error = MMC_ERR_NONE;
@@ -471,8 +459,7 @@ static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd)
/*
* Do we expect a reply?
*/
- if ((cmd->flags & MMC_RSP_MASK) != MMC_RSP_NONE)
- {
+ if ((cmd->flags & MMC_RSP_MASK) != MMC_RSP_NONE) {
/*
* Read back status.
*/
@@ -488,8 +475,7 @@ static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd)
else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
cmd->error = MMC_ERR_BADCRC;
/* All ok */
- else
- {
+ else {
if ((cmd->flags & MMC_RSP_MASK) == MMC_RSP_SHORT)
wbsd_get_short_reply(host, cmd);
else
@@ -504,10 +490,10 @@ static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd)
* Data functions
*/
-static void wbsd_empty_fifo(struct wbsd_host* host)
+static void wbsd_empty_fifo(struct wbsd_host *host)
{
- struct mmc_data* data = host->mrq->cmd->data;
- char* buffer;
+ struct mmc_data *data = host->mrq->cmd->data;
+ char *buffer;
int i, fsr, fifo;
/*
@@ -522,8 +508,7 @@ static void wbsd_empty_fifo(struct wbsd_host* host)
* Drain the fifo. This has a tendency to loop longer
* than the FIFO length (usually one block).
*/
- while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY))
- {
+ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) {
/*
* The size field in the FSR is broken so we have to
* do some guessing.
@@ -535,8 +520,7 @@ static void wbsd_empty_fifo(struct wbsd_host* host)
else
fifo = 1;
- for (i = 0;i < fifo;i++)
- {
+ for (i = 0; i < fifo; i++) {
*buffer = inb(host->base + WBSD_DFR);
buffer++;
host->offset++;
@@ -547,8 +531,7 @@ static void wbsd_empty_fifo(struct wbsd_host* host)
/*
* Transfer done?
*/
- if (data->bytes_xfered == host->size)
- {
+ if (data->bytes_xfered == host->size) {
wbsd_kunmap_sg(host);
return;
}
@@ -556,15 +539,13 @@ static void wbsd_empty_fifo(struct wbsd_host* host)
/*
* End of scatter list entry?
*/
- if (host->remain == 0)
- {
+ if (host->remain == 0) {
wbsd_kunmap_sg(host);
/*
* Get next entry. Check if last.
*/
- if (!wbsd_next_sg(host))
- {
+ if (!wbsd_next_sg(host)) {
/*
* We should never reach this point.
* It means that we're trying to
@@ -594,10 +575,10 @@ static void wbsd_empty_fifo(struct wbsd_host* host)
tasklet_schedule(&host->fifo_tasklet);
}
-static void wbsd_fill_fifo(struct wbsd_host* host)
+static void wbsd_fill_fifo(struct wbsd_host *host)
{
- struct mmc_data* data = host->mrq->cmd->data;
- char* buffer;
+ struct mmc_data *data = host->mrq->cmd->data;
+ char *buffer;
int i, fsr, fifo;
/*
@@ -613,8 +594,7 @@ static void wbsd_fill_fifo(struct wbsd_host* host)
* Fill the fifo. This has a tendency to loop longer
* than the FIFO length (usually one block).
*/
- while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL))
- {
+ while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) {
/*
* The size field in the FSR is broken so we have to
* do some guessing.
@@ -626,8 +606,7 @@ static void wbsd_fill_fifo(struct wbsd_host* host)
else
fifo = 15;
- for (i = 16;i > fifo;i--)
- {
+ for (i = 16; i > fifo; i--) {
outb(*buffer, host->base + WBSD_DFR);
buffer++;
host->offset++;
@@ -638,8 +617,7 @@ static void wbsd_fill_fifo(struct wbsd_host* host)
/*
* Transfer done?
*/
- if (data->bytes_xfered == host->size)
- {
+ if (data->bytes_xfered == host->size) {
wbsd_kunmap_sg(host);
return;
}
@@ -647,15 +625,13 @@ static void wbsd_fill_fifo(struct wbsd_host* host)
/*
* End of scatter list entry?
*/
- if (host->remain == 0)
- {
+ if (host->remain == 0) {
wbsd_kunmap_sg(host);
/*
* Get next entry. Check if last.
*/
- if (!wbsd_next_sg(host))
- {
+ if (!wbsd_next_sg(host)) {
/*
* We should never reach this point.
* It means that we're trying to
@@ -684,7 +660,7 @@ static void wbsd_fill_fifo(struct wbsd_host* host)
tasklet_schedule(&host->fifo_tasklet);
}
-static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
+static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
{
u16 blksize;
u8 setup;
@@ -706,8 +682,10 @@ static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
*/
if (data->timeout_ns > 127000000)
wbsd_write_index(host, WBSD_IDX_TAAC, 127);
- else
- wbsd_write_index(host, WBSD_IDX_TAAC, data->timeout_ns/1000000);
+ else {
+ wbsd_write_index(host, WBSD_IDX_TAAC,
+ data->timeout_ns / 1000000);
+ }
if (data->timeout_clks > 255)
wbsd_write_index(host, WBSD_IDX_NSAC, 255);
@@ -722,23 +700,18 @@ static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
* Space for CRC must be included in the size.
* Two bytes are needed for each data line.
*/
- if (host->bus_width == MMC_BUS_WIDTH_1)
- {
+ if (host->bus_width == MMC_BUS_WIDTH_1) {
blksize = (1 << data->blksz_bits) + 2;
wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
- }
- else if (host->bus_width == MMC_BUS_WIDTH_4)
- {
+ } else if (host->bus_width == MMC_BUS_WIDTH_4) {
blksize = (1 << data->blksz_bits) + 2 * 4;
- wbsd_write_index(host, WBSD_IDX_PBSMSB, ((blksize >> 4) & 0xF0)
- | WBSD_DATA_WIDTH);
+ wbsd_write_index(host, WBSD_IDX_PBSMSB,
+ ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
- }
- else
- {
+ } else {
data->error = MMC_ERR_INVALID;
return;
}
@@ -755,14 +728,12 @@ static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
/*
* DMA transfer?
*/
- if (host->dma >= 0)
- {
+ if (host->dma >= 0) {
/*
* The buffer for DMA is only 64 kB.
*/
BUG_ON(host->size > 0x10000);
- if (host->size > 0x10000)
- {
+ if (host->size > 0x10000) {
data->error = MMC_ERR_INVALID;
return;
}
@@ -794,9 +765,7 @@ static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
* Enable DMA on the host.
*/
wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
- }
- else
- {
+ } else {
/*
* This flag is used to keep printk
* output to a minimum.
@@ -817,13 +786,10 @@ static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
* Set up FIFO threshold levels (and fill
* buffer if doing a write).
*/
- if (data->flags & MMC_DATA_READ)
- {
+ if (data->flags & MMC_DATA_READ) {
wbsd_write_index(host, WBSD_IDX_FIFOEN,
WBSD_FIFOEN_FULL | 8);
- }
- else
- {
+ } else {
wbsd_write_index(host, WBSD_IDX_FIFOEN,
WBSD_FIFOEN_EMPTY | 8);
wbsd_fill_fifo(host);
@@ -833,7 +799,7 @@ static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
data->error = MMC_ERR_NONE;
}
-static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
+static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
{
unsigned long dmaflags;
int count;
@@ -851,16 +817,14 @@ static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
* Wait for the controller to leave data
* transfer state.
*/
- do
- {
+ do {
status = wbsd_read_index(host, WBSD_IDX_STATUS);
} while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
/*
* DMA transfer?
*/
- if (host->dma >= 0)
- {
+ if (host->dma >= 0) {
/*
* Disable DMA on the host.
*/
@@ -878,16 +842,13 @@ static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
/*
* Any leftover data?
*/
- if (count)
- {
+ if (count) {
printk(KERN_ERR "%s: Incomplete DMA transfer. "
"%d bytes left.\n",
mmc_hostname(host->mmc), count);
data->error = MMC_ERR_FAILED;
- }
- else
- {
+ } else {
/*
* Transfer data from DMA buffer to
* SG list.
@@ -910,10 +871,10 @@ static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
* *
\*****************************************************************************/
-static void wbsd_request(struct mmc_host* mmc, struct mmc_request* mrq)
+static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
- struct wbsd_host* host = mmc_priv(mmc);
- struct mmc_command* cmd;
+ struct wbsd_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd;
/*
* Disable tasklets to avoid a deadlock.
@@ -930,8 +891,7 @@ static void wbsd_request(struct mmc_host* mmc, struct mmc_request* mrq)
* If there is no card in the slot then
* timeout immediatly.
*/
- if (!(host->flags & WBSD_FCARD_PRESENT))
- {
+ if (!(host->flags & WBSD_FCARD_PRESENT)) {
cmd->error = MMC_ERR_TIMEOUT;
goto done;
}
@@ -939,8 +899,7 @@ static void wbsd_request(struct mmc_host* mmc, struct mmc_request* mrq)
/*
* Does the request include data?
*/
- if (cmd->data)
- {
+ if (cmd->data) {
wbsd_prepare_data(host, cmd->data);
if (cmd->data->error != MMC_ERR_NONE)
@@ -954,8 +913,7 @@ static void wbsd_request(struct mmc_host* mmc, struct mmc_request* mrq)
* will be finished after the data has
* transfered.
*/
- if (cmd->data && (cmd->error == MMC_ERR_NONE))
- {
+ if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
/*
* Dirty fix for hardware bug.
*/
@@ -973,14 +931,14 @@ done:
spin_unlock_bh(&host->lock);
}
-static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
+static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
- struct wbsd_host* host = mmc_priv(mmc);
+ struct wbsd_host *host = mmc_priv(mmc);
u8 clk, setup, pwr;
DBGF("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
- ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
- ios->vdd, ios->bus_width);
+ ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
+ ios->vdd, ios->bus_width);
spin_lock_bh(&host->lock);
@@ -1004,8 +962,7 @@ static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
* Only write to the clock register when
* there is an actual change.
*/
- if (clk != host->clk)
- {
+ if (clk != host->clk) {
wbsd_write_index(host, WBSD_IDX_CLK, clk);
host->clk = clk;
}
@@ -1013,8 +970,7 @@ static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
/*
* Power up card.
*/
- if (ios->power_mode != MMC_POWER_OFF)
- {
+ if (ios->power_mode != MMC_POWER_OFF) {
pwr = inb(host->base + WBSD_CSR);
pwr &= ~WBSD_POWER_N;
outb(pwr, host->base + WBSD_CSR);
@@ -1026,23 +982,19 @@ static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
* that needs to be disabled.
*/
setup = wbsd_read_index(host, WBSD_IDX_SETUP);
- if (ios->chip_select == MMC_CS_HIGH)
- {
+ if (ios->chip_select == MMC_CS_HIGH) {
BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
setup |= WBSD_DAT3_H;
host->flags |= WBSD_FIGNORE_DETECT;
- }
- else
- {
- if (setup & WBSD_DAT3_H)
- {
+ } else {
+ if (setup & WBSD_DAT3_H) {
setup &= ~WBSD_DAT3_H;
/*
* We cannot resume card detection immediatly
* because of capacitance and delays in the chip.
*/
- mod_timer(&host->ignore_timer, jiffies + HZ/100);
+ mod_timer(&host->ignore_timer, jiffies + HZ / 100);
}
}
wbsd_write_index(host, WBSD_IDX_SETUP, setup);
@@ -1056,9 +1008,9 @@ static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
spin_unlock_bh(&host->lock);
}
-static int wbsd_get_ro(struct mmc_host* mmc)
+static int wbsd_get_ro(struct mmc_host *mmc)
{
- struct wbsd_host* host = mmc_priv(mmc);
+ struct wbsd_host *host = mmc_priv(mmc);
u8 csr;
spin_lock_bh(&host->lock);
@@ -1096,7 +1048,7 @@ static struct mmc_host_ops wbsd_ops = {
static void wbsd_reset_ignore(unsigned long data)
{
- struct wbsd_host *host = (struct wbsd_host*)data;
+ struct wbsd_host *host = (struct wbsd_host *)data;
BUG_ON(host == NULL);
@@ -1119,7 +1071,7 @@ static void wbsd_reset_ignore(unsigned long data)
* Tasklets
*/
-static inline struct mmc_data* wbsd_get_data(struct wbsd_host* host)
+static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
{
WARN_ON(!host->mrq);
if (!host->mrq)
@@ -1138,14 +1090,13 @@ static inline struct mmc_data* wbsd_get_data(struct wbsd_host* host)
static void wbsd_tasklet_card(unsigned long param)
{
- struct wbsd_host* host = (struct wbsd_host*)param;
+ struct wbsd_host *host = (struct wbsd_host *)param;
u8 csr;
int delay = -1;
spin_lock(&host->lock);
- if (host->flags & WBSD_FIGNORE_DETECT)
- {
+ if (host->flags & WBSD_FIGNORE_DETECT) {
spin_unlock(&host->lock);
return;
}
@@ -1153,23 +1104,18 @@ static void wbsd_tasklet_card(unsigned long param)
csr = inb(host->base + WBSD_CSR);
WARN_ON(csr == 0xff);
- if (csr & WBSD_CARDPRESENT)
- {
- if (!(host->flags & WBSD_FCARD_PRESENT))
- {
+ if (csr & WBSD_CARDPRESENT) {
+ if (!(host->flags & WBSD_FCARD_PRESENT)) {
DBG("Card inserted\n");
host->flags |= WBSD_FCARD_PRESENT;
delay = 500;
}
- }
- else if (host->flags & WBSD_FCARD_PRESENT)
- {
+ } else if (host->flags & WBSD_FCARD_PRESENT) {
DBG("Card removed\n");
host->flags &= ~WBSD_FCARD_PRESENT;
- if (host->mrq)
- {
+ if (host->mrq) {
printk(KERN_ERR "%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
wbsd_reset(host);
@@ -1193,8 +1139,8 @@ static void wbsd_tasklet_card(unsigned long param)
static void wbsd_tasklet_fifo(unsigned long param)
{
- struct wbsd_host* host = (struct wbsd_host*)param;
- struct mmc_data* data;
+ struct wbsd_host *host = (struct wbsd_host *)param;
+ struct mmc_data *data;
spin_lock(&host->lock);
@@ -1213,8 +1159,7 @@ static void wbsd_tasklet_fifo(unsigned long param)
/*
* Done?
*/
- if (host->size == data->bytes_xfered)
- {
+ if (host->size == data->bytes_xfered) {
wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
tasklet_schedule(&host->finish_tasklet);
}
@@ -1225,8 +1170,8 @@ end:
static void wbsd_tasklet_crc(unsigned long param)
{
- struct wbsd_host* host = (struct wbsd_host*)param;
- struct mmc_data* data;
+ struct wbsd_host *host = (struct wbsd_host *)param;
+ struct mmc_data *data;
spin_lock(&host->lock);
@@ -1249,8 +1194,8 @@ end:
static void wbsd_tasklet_timeout(unsigned long param)
{
- struct wbsd_host* host = (struct wbsd_host*)param;
- struct mmc_data* data;
+ struct wbsd_host *host = (struct wbsd_host *)param;
+ struct mmc_data *data;
spin_lock(&host->lock);
@@ -1273,8 +1218,8 @@ end:
static void wbsd_tasklet_finish(unsigned long param)
{
- struct wbsd_host* host = (struct wbsd_host*)param;
- struct mmc_data* data;
+ struct wbsd_host *host = (struct wbsd_host *)param;
+ struct mmc_data *data;
spin_lock(&host->lock);
@@ -1294,14 +1239,13 @@ end:
static void wbsd_tasklet_block(unsigned long param)
{
- struct wbsd_host* host = (struct wbsd_host*)param;
- struct mmc_data* data;
+ struct wbsd_host *host = (struct wbsd_host *)param;
+ struct mmc_data *data;
spin_lock(&host->lock);
if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) !=
- WBSD_CRC_OK)
- {
+ WBSD_CRC_OK) {
data = wbsd_get_data(host);
if (!data)
goto end;
@@ -1323,7 +1267,7 @@ end:
static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs)
{
- struct wbsd_host* host = dev_id;
+ struct wbsd_host *host = dev_id;
int isr;
isr = inb(host->base + WBSD_ISR);
@@ -1365,10 +1309,10 @@ static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs)
* Allocate/free MMC structure.
*/
-static int __devinit wbsd_alloc_mmc(struct device* dev)
+static int __devinit wbsd_alloc_mmc(struct device *dev)
{
- struct mmc_host* mmc;
- struct wbsd_host* host;
+ struct mmc_host *mmc;
+ struct wbsd_host *host;
/*
* Allocate MMC structure.
@@ -1388,7 +1332,7 @@ static int __devinit wbsd_alloc_mmc(struct device* dev)
mmc->ops = &wbsd_ops;
mmc->f_min = 375000;
mmc->f_max = 24000000;
- mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->caps = MMC_CAP_4_BIT_DATA;
spin_lock_init(&host->lock);
@@ -1424,10 +1368,10 @@ static int __devinit wbsd_alloc_mmc(struct device* dev)
return 0;
}
-static void __devexit wbsd_free_mmc(struct device* dev)
+static void __devexit wbsd_free_mmc(struct device *dev)
{
- struct mmc_host* mmc;
- struct wbsd_host* host;
+ struct mmc_host *mmc;
+ struct wbsd_host *host;
mmc = dev_get_drvdata(dev);
if (!mmc)
@@ -1447,7 +1391,7 @@ static void __devexit wbsd_free_mmc(struct device* dev)
* Scan for known chip id:s
*/
-static int __devinit wbsd_scan(struct wbsd_host* host)
+static int __devinit wbsd_scan(struct wbsd_host *host)
{
int i, j, k;
int id;
@@ -1456,13 +1400,11 @@ static int __devinit wbsd_scan(struct wbsd_host* host)
* Iterate through all ports, all codes to
* find hardware that is in our known list.
*/
- for (i = 0;i < sizeof(config_ports)/sizeof(int);i++)
- {
+ for (i = 0; i < ARRAY_SIZE(config_ports); i++) {
if (!request_region(config_ports[i], 2, DRIVER_NAME))
continue;
- for (j = 0;j < sizeof(unlock_codes)/sizeof(int);j++)
- {
+ for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
id = 0xFFFF;
host->config = config_ports[i];
@@ -1478,18 +1420,15 @@ static int __devinit wbsd_scan(struct wbsd_host* host)
wbsd_lock_config(host);
- for (k = 0;k < sizeof(valid_ids)/sizeof(int);k++)
- {
- if (id == valid_ids[k])
- {
+ for (k = 0; k < ARRAY_SIZE(valid_ids); k++) {
+ if (id == valid_ids[k]) {
host->chip_id = id;
return 0;
}
}
- if (id != 0xFFFF)
- {
+ if (id != 0xFFFF) {
DBG("Unknown hardware (id %x) found at %x\n",
id, config_ports[i]);
}
@@ -1508,7 +1447,7 @@ static int __devinit wbsd_scan(struct wbsd_host* host)
* Allocate/free io port ranges
*/
-static int __devinit wbsd_request_region(struct wbsd_host* host, int base)
+static int __devinit wbsd_request_region(struct wbsd_host *host, int base)
{
if (io & 0x7)
return -EINVAL;
@@ -1521,7 +1460,7 @@ static int __devinit wbsd_request_region(struct wbsd_host* host, int base)
return 0;
}
-static void __devexit wbsd_release_regions(struct wbsd_host* host)
+static void __devexit wbsd_release_regions(struct wbsd_host *host)
{
if (host->base)
release_region(host->base, 8);
@@ -1538,7 +1477,7 @@ static void __devexit wbsd_release_regions(struct wbsd_host* host)
* Allocate/free DMA port and buffer
*/
-static void __devinit wbsd_request_dma(struct wbsd_host* host, int dma)
+static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma)
{
if (dma < 0)
return;
@@ -1582,8 +1521,8 @@ kfree:
*/
BUG_ON(1);
- dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_single(host->mmc->dev, host->dma_addr,
+ WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
host->dma_addr = (dma_addr_t)NULL;
kfree(host->dma_buffer);
@@ -1597,11 +1536,12 @@ err:
"Falling back on FIFO.\n", dma);
}
-static void __devexit wbsd_release_dma(struct wbsd_host* host)
+static void __devexit wbsd_release_dma(struct wbsd_host *host)
{
- if (host->dma_addr)
- dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE,
- DMA_BIDIRECTIONAL);
+ if (host->dma_addr) {
+ dma_unmap_single(host->mmc->dev, host->dma_addr,
+ WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
+ }
kfree(host->dma_buffer);
if (host->dma >= 0)
free_dma(host->dma);
@@ -1615,7 +1555,7 @@ static void __devexit wbsd_release_dma(struct wbsd_host* host)
* Allocate/free IRQ.
*/
-static int __devinit wbsd_request_irq(struct wbsd_host* host, int irq)
+static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq)
{
int ret;
@@ -1632,17 +1572,23 @@ static int __devinit wbsd_request_irq(struct wbsd_host* host, int irq)
/*
* Set up tasklets.
*/
- tasklet_init(&host->card_tasklet, wbsd_tasklet_card, (unsigned long)host);
- tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, (unsigned long)host);
- tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, (unsigned long)host);
- tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, (unsigned long)host);
- tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, (unsigned long)host);
- tasklet_init(&host->block_tasklet, wbsd_tasklet_block, (unsigned long)host);
+ tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
+ (unsigned long)host);
+ tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
+ (unsigned long)host);
+ tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
+ (unsigned long)host);
+ tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
+ (unsigned long)host);
+ tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
+ (unsigned long)host);
+ tasklet_init(&host->block_tasklet, wbsd_tasklet_block,
+ (unsigned long)host);
return 0;
}
-static void __devexit wbsd_release_irq(struct wbsd_host* host)
+static void __devexit wbsd_release_irq(struct wbsd_host *host)
{
if (!host->irq)
return;
@@ -1663,7 +1609,7 @@ static void __devexit wbsd_release_irq(struct wbsd_host* host)
* Allocate all resources for the host.
*/
-static int __devinit wbsd_request_resources(struct wbsd_host* host,
+static int __devinit wbsd_request_resources(struct wbsd_host *host,
int base, int irq, int dma)
{
int ret;
@@ -1694,7 +1640,7 @@ static int __devinit wbsd_request_resources(struct wbsd_host* host,
* Release all resources for the host.
*/
-static void __devexit wbsd_release_resources(struct wbsd_host* host)
+static void __devexit wbsd_release_resources(struct wbsd_host *host)
{
wbsd_release_dma(host);
wbsd_release_irq(host);
@@ -1705,7 +1651,7 @@ static void __devexit wbsd_release_resources(struct wbsd_host* host)
* Configure the resources the chip should use.
*/
-static void wbsd_chip_config(struct wbsd_host* host)
+static void wbsd_chip_config(struct wbsd_host *host)
{
wbsd_unlock_config(host);
@@ -1749,7 +1695,7 @@ static void wbsd_chip_config(struct wbsd_host* host)
* Check that configured resources are correct.
*/
-static int wbsd_chip_validate(struct wbsd_host* host)
+static int wbsd_chip_validate(struct wbsd_host *host)
{
int base, irq, dma;
@@ -1789,7 +1735,7 @@ static int wbsd_chip_validate(struct wbsd_host* host)
* Powers down the SD function
*/
-static void wbsd_chip_poweroff(struct wbsd_host* host)
+static void wbsd_chip_poweroff(struct wbsd_host *host)
{
wbsd_unlock_config(host);
@@ -1805,11 +1751,11 @@ static void wbsd_chip_poweroff(struct wbsd_host* host)
* *
\*****************************************************************************/
-static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
+static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
int pnp)
{
- struct wbsd_host* host = NULL;
- struct mmc_host* mmc = NULL;
+ struct wbsd_host *host = NULL;
+ struct mmc_host *mmc = NULL;
int ret;
ret = wbsd_alloc_mmc(dev);
@@ -1823,16 +1769,12 @@ static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
* Scan for hardware.
*/
ret = wbsd_scan(host);
- if (ret)
- {
- if (pnp && (ret == -ENODEV))
- {
+ if (ret) {
+ if (pnp && (ret == -ENODEV)) {
printk(KERN_WARNING DRIVER_NAME
": Unable to confirm device presence. You may "
"experience lock-ups.\n");
- }
- else
- {
+ } else {
wbsd_free_mmc(dev);
return ret;
}
@@ -1842,8 +1784,7 @@ static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
* Request resources.
*/
ret = wbsd_request_resources(host, io, irq, dma);
- if (ret)
- {
+ if (ret) {
wbsd_release_resources(host);
wbsd_free_mmc(dev);
return ret;
@@ -1852,18 +1793,15 @@ static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
/*
* See if chip needs to be configured.
*/
- if (pnp)
- {
- if ((host->config != 0) && !wbsd_chip_validate(host))
- {
+ if (pnp) {
+ if ((host->config != 0) && !wbsd_chip_validate(host)) {
printk(KERN_WARNING DRIVER_NAME
": PnP active but chip not configured! "
"You probably have a buggy BIOS. "
"Configuring chip manually.\n");
wbsd_chip_config(host);
}
- }
- else
+ } else
wbsd_chip_config(host);
/*
@@ -1871,8 +1809,7 @@ static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
* Not tested.
*/
#ifdef CONFIG_PM
- if (host->config)
- {
+ if (host->config) {
wbsd_unlock_config(host);
wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
wbsd_lock_config(host);
@@ -1905,10 +1842,10 @@ static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
return 0;
}
-static void __devexit wbsd_shutdown(struct device* dev, int pnp)
+static void __devexit wbsd_shutdown(struct device *dev, int pnp)
{
- struct mmc_host* mmc = dev_get_drvdata(dev);
- struct wbsd_host* host;
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct wbsd_host *host;
if (!mmc)
return;
@@ -1932,12 +1869,12 @@ static void __devexit wbsd_shutdown(struct device* dev, int pnp)
* Non-PnP
*/
-static int __devinit wbsd_probe(struct platform_device* dev)
+static int __devinit wbsd_probe(struct platform_device *dev)
{
return wbsd_init(&dev->dev, io, irq, dma, 0);
}
-static int __devexit wbsd_remove(struct platform_device* dev)
+static int __devexit wbsd_remove(struct platform_device *dev)
{
wbsd_shutdown(&dev->dev, 0);
@@ -1951,7 +1888,7 @@ static int __devexit wbsd_remove(struct platform_device* dev)
#ifdef CONFIG_PNP
static int __devinit
-wbsd_pnp_probe(struct pnp_dev * pnpdev, const struct pnp_device_id *dev_id)
+wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
{
int io, irq, dma;
@@ -1970,7 +1907,7 @@ wbsd_pnp_probe(struct pnp_dev * pnpdev, const struct pnp_device_id *dev_id)
return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
}
-static void __devexit wbsd_pnp_remove(struct pnp_dev * dev)
+static void __devexit wbsd_pnp_remove(struct pnp_dev *dev)
{
wbsd_shutdown(&dev->dev, 1);
}
@@ -1983,37 +1920,54 @@ static void __devexit wbsd_pnp_remove(struct pnp_dev * dev)
#ifdef CONFIG_PM
-static int wbsd_suspend(struct platform_device *dev, pm_message_t state)
+static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
+{
+ BUG_ON(host == NULL);
+
+ return mmc_suspend_host(host->mmc, state);
+}
+
+static int wbsd_resume(struct wbsd_host *host)
+{
+ BUG_ON(host == NULL);
+
+ wbsd_init_device(host);
+
+ return mmc_resume_host(host->mmc);
+}
+
+static int wbsd_platform_suspend(struct platform_device *dev,
+ pm_message_t state)
{
struct mmc_host *mmc = platform_get_drvdata(dev);
struct wbsd_host *host;
int ret;
- if (!mmc)
+ if (mmc == NULL)
return 0;
- DBG("Suspending...\n");
-
- ret = mmc_suspend_host(mmc, state);
- if (!ret)
- return ret;
+ DBGF("Suspending...\n");
host = mmc_priv(mmc);
+ ret = wbsd_suspend(host, state);
+ if (ret)
+ return ret;
+
wbsd_chip_poweroff(host);
return 0;
}
-static int wbsd_resume(struct platform_device *dev)
+static int wbsd_platform_resume(struct platform_device *dev)
{
struct mmc_host *mmc = platform_get_drvdata(dev);
struct wbsd_host *host;
- if (!mmc)
+ if (mmc == NULL)
return 0;
- DBG("Resuming...\n");
+ DBGF("Resuming...\n");
host = mmc_priv(mmc);
@@ -2024,15 +1978,68 @@ static int wbsd_resume(struct platform_device *dev)
*/
mdelay(5);
- wbsd_init_device(host);
+ return wbsd_resume(host);
+}
+
+#ifdef CONFIG_PNP
+
+static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
+{
+ struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
+ struct wbsd_host *host;
+
+ if (mmc == NULL)
+ return 0;
+
+ DBGF("Suspending...\n");
+
+ host = mmc_priv(mmc);
+
+ return wbsd_suspend(host, state);
+}
+
+static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
+ struct wbsd_host *host;
+
+ if (mmc == NULL)
+ return 0;
+
+ DBGF("Resuming...\n");
+
+ host = mmc_priv(mmc);
+
+ /*
+ * See if chip needs to be configured.
+ */
+ if (host->config != 0) {
+ if (!wbsd_chip_validate(host)) {
+ printk(KERN_WARNING DRIVER_NAME
+ ": PnP active but chip not configured! "
+ "You probably have a buggy BIOS. "
+ "Configuring chip manually.\n");
+ wbsd_chip_config(host);
+ }
+ }
- return mmc_resume_host(mmc);
+ /*
+ * Allow device to initialise itself properly.
+ */
+ mdelay(5);
+
+ return wbsd_resume(host);
}
+#endif /* CONFIG_PNP */
+
#else /* CONFIG_PM */
-#define wbsd_suspend NULL
-#define wbsd_resume NULL
+#define wbsd_platform_suspend NULL
+#define wbsd_platform_resume NULL
+
+#define wbsd_pnp_suspend NULL
+#define wbsd_pnp_resume NULL
#endif /* CONFIG_PM */
@@ -2042,8 +2049,8 @@ static struct platform_driver wbsd_driver = {
.probe = wbsd_probe,
.remove = __devexit_p(wbsd_remove),
- .suspend = wbsd_suspend,
- .resume = wbsd_resume,
+ .suspend = wbsd_platform_suspend,
+ .resume = wbsd_platform_resume,
.driver = {
.name = DRIVER_NAME,
},
@@ -2056,6 +2063,9 @@ static struct pnp_driver wbsd_pnp_driver = {
.id_table = pnp_dev_table,
.probe = wbsd_pnp_probe,
.remove = __devexit_p(wbsd_pnp_remove),
+
+ .suspend = wbsd_pnp_suspend,
+ .resume = wbsd_pnp_resume,
};
#endif /* CONFIG_PNP */
@@ -2075,25 +2085,30 @@ static int __init wbsd_drv_init(void)
#ifdef CONFIG_PNP
- if (!nopnp)
- {
+ if (!nopnp) {
result = pnp_register_driver(&wbsd_pnp_driver);
if (result < 0)
return result;
}
-
#endif /* CONFIG_PNP */
- if (nopnp)
- {
+ if (nopnp) {
result = platform_driver_register(&wbsd_driver);
if (result < 0)
return result;
- wbsd_device = platform_device_register_simple(DRIVER_NAME, -1,
- NULL, 0);
- if (IS_ERR(wbsd_device))
- return PTR_ERR(wbsd_device);
+ wbsd_device = platform_device_alloc(DRIVER_NAME, -1);
+ if (!wbsd_device) {
+ platform_driver_unregister(&wbsd_driver);
+ return -ENOMEM;
+ }
+
+ result = platform_device_add(wbsd_device);
+ if (result) {
+ platform_device_put(wbsd_device);
+ platform_driver_unregister(&wbsd_driver);
+ return result;
+ }
}
return 0;
@@ -2108,8 +2123,7 @@ static void __exit wbsd_drv_exit(void)
#endif /* CONFIG_PNP */
- if (nopnp)
- {
+ if (nopnp) {
platform_device_unregister(wbsd_device);
platform_driver_unregister(&wbsd_driver);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index b9b77cf39a1..7abd7fee0dd 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -473,14 +473,6 @@ config MTD_IXP2000
IXDP425 and Coyote. If you have an IXP2000 based board and
would like to use the flash chips on it, say 'Y'.
-config MTD_EPXA10DB
- tristate "CFI Flash device mapped on Epxa10db"
- depends on MTD_CFI && MTD_PARTITIONS && ARCH_CAMELOT
- help
- This enables support for the flash devices on the Altera
- Excalibur XA10 Development Board. If you are building a kernel
- for on of these boards then you should say 'Y' otherwise say 'N'.
-
config MTD_FORTUNET
tristate "CFI Flash device mapped on the FortuNet board"
depends on MTD_CFI && MTD_PARTITIONS && SA1100_FORTUNET
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 2f7e254912f..ab71f172eb7 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o
obj-$(CONFIG_MTD_DC21285) += dc21285.o
obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
-obj-$(CONFIG_MTD_EPXA10DB) += epxa10db-flash.o
obj-$(CONFIG_MTD_IQ80310) += iq80310.o
obj-$(CONFIG_MTD_L440GX) += l440gx.o
obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
diff --git a/drivers/mtd/maps/epxa10db-flash.c b/drivers/mtd/maps/epxa10db-flash.c
deleted file mode 100644
index 265b079fe93..00000000000
--- a/drivers/mtd/maps/epxa10db-flash.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Flash memory access on EPXA based devices
- *
- * (C) 2000 Nicolas Pitre <nico@cam.org>
- * Copyright (C) 2001 Altera Corporation
- * Copyright (C) 2001 Red Hat, Inc.
- *
- * $Id: epxa10db-flash.c,v 1.15 2005/11/07 11:14:27 gleixner Exp $
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <asm/hardware.h>
-
-#ifdef CONFIG_EPXA10DB
-#define BOARD_NAME "EPXA10DB"
-#else
-#define BOARD_NAME "EPXA1DB"
-#endif
-
-static int nr_parts = 0;
-static struct mtd_partition *parts;
-
-static struct mtd_info *mymtd;
-
-static int epxa_default_partitions(struct mtd_info *master, struct mtd_partition **pparts);
-
-
-static struct map_info epxa_map = {
- .name = "EPXA flash",
- .size = FLASH_SIZE,
- .bankwidth = 2,
- .phys = FLASH_START,
-};
-
-static const char *probes[] = { "RedBoot", "afs", NULL };
-
-static int __init epxa_mtd_init(void)
-{
- int i;
-
- printk(KERN_NOTICE "%s flash device: 0x%x at 0x%x\n", BOARD_NAME, FLASH_SIZE, FLASH_START);
-
- epxa_map.virt = ioremap(FLASH_START, FLASH_SIZE);
- if (!epxa_map.virt) {
- printk("Failed to ioremap %s flash\n",BOARD_NAME);
- return -EIO;
- }
- simple_map_init(&epxa_map);
-
- mymtd = do_map_probe("cfi_probe", &epxa_map);
- if (!mymtd) {
- iounmap((void *)epxa_map.virt);
- return -ENXIO;
- }
-
- mymtd->owner = THIS_MODULE;
-
- /* Unlock the flash device. */
- if(mymtd->unlock){
- for (i=0; i<mymtd->numeraseregions;i++){
- int j;
- for(j=0;j<mymtd->eraseregions[i].numblocks;j++){
- mymtd->unlock(mymtd,mymtd->eraseregions[i].offset + j * mymtd->eraseregions[i].erasesize,mymtd->eraseregions[i].erasesize);
- }
- }
- }
-
-#ifdef CONFIG_MTD_PARTITIONS
- nr_parts = parse_mtd_partitions(mymtd, probes, &parts, 0);
-
- if (nr_parts > 0) {
- add_mtd_partitions(mymtd, parts, nr_parts);
- return 0;
- }
-#endif
- /* No recognised partitioning schemes found - use defaults */
- nr_parts = epxa_default_partitions(mymtd, &parts);
- if (nr_parts > 0) {
- add_mtd_partitions(mymtd, parts, nr_parts);
- return 0;
- }
-
- /* If all else fails... */
- add_mtd_device(mymtd);
- return 0;
-}
-
-static void __exit epxa_mtd_cleanup(void)
-{
- if (mymtd) {
- if (nr_parts)
- del_mtd_partitions(mymtd);
- else
- del_mtd_device(mymtd);
- map_destroy(mymtd);
- }
- if (epxa_map.virt) {
- iounmap((void *)epxa_map.virt);
- epxa_map.virt = 0;
- }
-}
-
-
-/*
- * This will do for now, once we decide which bootldr we're finally
- * going to use then we'll remove this function and do it properly
- *
- * Partions are currently (as offsets from base of flash):
- * 0x00000000 - 0x003FFFFF - bootloader (!)
- * 0x00400000 - 0x00FFFFFF - Flashdisk
- */
-
-static int __init epxa_default_partitions(struct mtd_info *master, struct mtd_partition **pparts)
-{
- struct mtd_partition *parts;
- int ret, i;
- int npartitions = 0;
- char *names;
- const char *name = "jffs";
-
- printk("Using default partitions for %s\n",BOARD_NAME);
- npartitions=1;
- parts = kmalloc(npartitions*sizeof(*parts)+strlen(name), GFP_KERNEL);
- memzero(parts,npartitions*sizeof(*parts)+strlen(name));
- if (!parts) {
- ret = -ENOMEM;
- goto out;
- }
- i=0;
- names = (char *)&parts[npartitions];
- parts[i].name = names;
- names += strlen(name) + 1;
- strcpy(parts[i].name, name);
-
-#ifdef CONFIG_EPXA10DB
- parts[i].size = FLASH_SIZE-0x00400000;
- parts[i].offset = 0x00400000;
-#else
- parts[i].size = FLASH_SIZE-0x00180000;
- parts[i].offset = 0x00180000;
-#endif
-
- out:
- *pparts = parts;
- return npartitions;
-}
-
-
-module_init(epxa_mtd_init);
-module_exit(epxa_mtd_cleanup);
-
-MODULE_AUTHOR("Clive Davies");
-MODULE_DESCRIPTION("Altera epxa mtd flash map");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 339cb1218ea..7f3ff500b68 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -194,6 +194,14 @@ static int blktrans_release(struct inode *i, struct file *f)
return ret;
}
+static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+
+ if (dev->tr->getgeo)
+ return dev->tr->getgeo(dev, geo);
+ return -ENOTTY;
+}
static int blktrans_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
@@ -207,22 +215,6 @@ static int blktrans_ioctl(struct inode *inode, struct file *file,
return tr->flush(dev);
/* The core code did the work, we had nothing to do. */
return 0;
-
- case HDIO_GETGEO:
- if (tr->getgeo) {
- struct hd_geometry g;
- int ret;
-
- memset(&g, 0, sizeof(g));
- ret = tr->getgeo(dev, &g);
- if (ret)
- return ret;
-
- g.start = get_start_sect(inode->i_bdev);
- if (copy_to_user((void __user *)arg, &g, sizeof(g)))
- return -EFAULT;
- return 0;
- } /* else */
default:
return -ENOTTY;
}
@@ -233,6 +225,7 @@ struct block_device_operations mtd_blktrans_ops = {
.open = blktrans_open,
.release = blktrans_release,
.ioctl = blktrans_ioctl,
+ .getgeo = blktrans_getgeo,
};
int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 9c5945d6df8..201e1362da1 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -43,7 +43,7 @@ static int nand_width = 1; /* default x8*/
/*
* Define partitions for flash device
*/
-const static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
{
.name = "NAND FS 0",
.offset = 0,
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index 3a5841c9d95..4129c03dfd9 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -96,7 +96,7 @@ static struct mtd_info *rtc_from4_mtd = NULL;
*/
static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE);
-const static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
{
.name = "Renesas flash partition 1",
.offset = 0,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d209214b131..5b55599739f 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -53,6 +53,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -60,7 +61,6 @@
#include <linux/mtd/partitions.h>
#include <asm/io.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/regs-nand.h>
#include <asm/arch/nand.h>
@@ -460,7 +460,6 @@ static int s3c2410_nand_remove(struct platform_device *pdev)
if (info->clk != NULL && !IS_ERR(info->clk)) {
clk_disable(info->clk);
- clk_unuse(info->clk);
clk_put(info->clk);
}
@@ -598,7 +597,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440)
goto exit_error;
}
- clk_use(info->clk);
clk_enable(info->clk);
/* allocate and map the resource */
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
index 32541cbb010..9cf1ce718ec 100644
--- a/drivers/mtd/nand/spia.c
+++ b/drivers/mtd/nand/spia.c
@@ -67,7 +67,7 @@ module_param(spia_peddr, int, 0);
/*
* Define partitions for flash device
*/
-const static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
{
.name = "SPIA flash partition 1",
.offset = 0,
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 45c077d0f06..af06a80f44d 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index 20ce212638f..a3e00a4635a 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -18,6 +18,7 @@
#include <linux/mtd/blktrans.h>
#include <linux/mtd/mtd.h>
#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include <linux/jiffies.h>
#include <asm/types.h>
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 5c5eebdb691..dcc98afa65d 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -148,14 +148,6 @@ el2_pio_probe(struct net_device *dev)
return -ENODEV;
}
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: el2_close() handles free_irq */
- release_region(dev->base_addr, EL2_IO_EXTENT);
- if (ei_status.mem)
- iounmap(ei_status.mem);
-}
-
#ifndef MODULE
struct net_device * __init el2_probe(int unit)
{
@@ -726,6 +718,14 @@ init_module(void)
return -ENXIO;
}
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: el2_close() handles free_irq */
+ release_region(dev->base_addr, EL2_IO_EXTENT);
+ if (ei_status.mem)
+ iounmap(ei_status.mem);
+}
+
void
cleanup_module(void)
{
diff --git a/drivers/net/3c527.h b/drivers/net/3c527.h
index c10f009ce9b..53b5b071df0 100644
--- a/drivers/net/3c527.h
+++ b/drivers/net/3c527.h
@@ -32,43 +32,43 @@
struct mc32_mailbox
{
- u16 mbox __attribute((packed));
- u16 data[1] __attribute((packed));
-};
+ u16 mbox;
+ u16 data[1];
+} __attribute((packed));
struct skb_header
{
- u8 status __attribute((packed));
- u8 control __attribute((packed));
- u16 next __attribute((packed)); /* Do not change! */
- u16 length __attribute((packed));
- u32 data __attribute((packed));
-};
+ u8 status;
+ u8 control;
+ u16 next; /* Do not change! */
+ u16 length;
+ u32 data;
+} __attribute((packed));
struct mc32_stats
{
/* RX Errors */
- u32 rx_crc_errors __attribute((packed));
- u32 rx_alignment_errors __attribute((packed));
- u32 rx_overrun_errors __attribute((packed));
- u32 rx_tooshort_errors __attribute((packed));
- u32 rx_toolong_errors __attribute((packed));
- u32 rx_outofresource_errors __attribute((packed));
+ u32 rx_crc_errors;
+ u32 rx_alignment_errors;
+ u32 rx_overrun_errors;
+ u32 rx_tooshort_errors;
+ u32 rx_toolong_errors;
+ u32 rx_outofresource_errors;
- u32 rx_discarded __attribute((packed)); /* via card pattern match filter */
+ u32 rx_discarded; /* via card pattern match filter */
/* TX Errors */
- u32 tx_max_collisions __attribute((packed));
- u32 tx_carrier_errors __attribute((packed));
- u32 tx_underrun_errors __attribute((packed));
- u32 tx_cts_errors __attribute((packed));
- u32 tx_timeout_errors __attribute((packed)) ;
+ u32 tx_max_collisions;
+ u32 tx_carrier_errors;
+ u32 tx_underrun_errors;
+ u32 tx_cts_errors;
+ u32 tx_timeout_errors;
/* various cruft */
- u32 dataA[6] __attribute((packed));
- u16 dataB[5] __attribute((packed));
- u32 dataC[14] __attribute((packed));
-};
+ u32 dataA[6];
+ u16 dataB[5];
+ u32 dataC[14];
+} __attribute((packed));
#define STATUS_MASK 0x0F
#define COMPLETED (1<<7)
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index d2102a27d30..adfba44dac5 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -505,7 +505,7 @@ enum chip_flags {
#define HW_REVID_MASK HW_REVID(1, 1, 1, 1, 1, 1, 1)
/* directly indexed by chip_t, above */
-const static struct {
+static const struct {
const char *name;
u32 version; /* from RTL8139C/RTL8139D docs */
u32 flags;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e2fa29b612c..5c15f3e9ea0 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -27,6 +27,19 @@ config NETDEVICES
# that for each of the symbols.
if NETDEVICES
+config IFB
+ tristate "Intermediate Functional Block support"
+ depends on NET_CLS_ACT
+ ---help---
+ This is an intermidiate driver that allows sharing of
+ resources.
+ To compile this driver as a module, choose M here: the module
+ will be called ifb. If you want to use more than one ifb
+ device at a time, you need to compile this driver as a module.
+ Instead of 'ifb', the devices will then be called 'ifb0',
+ 'ifb1' etc.
+ Look at the iproute2 documentation directory for usage etc
+
config DUMMY
tristate "Dummy net driver support"
---help---
@@ -129,7 +142,7 @@ config NET_SB1000
If you don't have this card, of course say N.
- source "drivers/net/arcnet/Kconfig"
+source "drivers/net/arcnet/Kconfig"
source "drivers/net/phy/Kconfig"
@@ -844,7 +857,7 @@ config SMC9194
config DM9000
tristate "DM9000 support"
- depends on ARM && NET_ETHERNET
+ depends on (ARM || MIPS) && NET_ETHERNET
select CRC32
select MII
---help---
@@ -1374,7 +1387,7 @@ config FORCEDETH
config CS89x0
tristate "CS89x0 support"
- depends on (NET_PCI && (ISA || ARCH_IXDP2X01)) || ARCH_PNX0105
+ depends on NET_PCI && (ISA || ARCH_IXDP2X01 || ARCH_PNX010X)
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the
@@ -1384,7 +1397,7 @@ config CS89x0
To compile this driver as a module, choose M here and read
<file:Documentation/networking/net-modules.txt>. The module will be
- called cs89x.
+ called cs89x0.
config TC35815
tristate "TOSHIBA TC35815 Ethernet support"
@@ -1791,7 +1804,7 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire CPUs)"
- depends on M523x || M527x || M5272 || M528x
+ depends on M523x || M527x || M5272 || M528x || M520x
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire processors.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b74a7cb5bae..00e72b12fb9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -125,6 +125,7 @@ ifeq ($(CONFIG_SLIP_COMPRESSED),y)
endif
obj-$(CONFIG_DUMMY) += dummy.o
+obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_DE600) += de600.o
obj-$(CONFIG_DE620) += de620.o
obj-$(CONFIG_LANCE) += lance.o
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 8a0af5453e2..7952dc6d77e 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -123,14 +123,6 @@ static int __init do_ac3200_probe(struct net_device *dev)
return -ENODEV;
}
-static void cleanup_card(struct net_device *dev)
-{
- /* Someday free_irq may be in ac_close_card() */
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, AC_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
#ifndef MODULE
struct net_device * __init ac3200_probe(int unit)
{
@@ -406,6 +398,14 @@ init_module(void)
return -ENXIO;
}
+static void cleanup_card(struct net_device *dev)
+{
+ /* Someday free_irq may be in ac_close_card() */
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, AC_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
void
cleanup_module(void)
{
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index 470364deded..625184b65e3 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -31,16 +31,3 @@ config ARM_ETHERH
help
If you have an Acorn system with one of these network cards, you
should say Y to this option if you wish to use it with Linux.
-
-config ARM_ETHER00
- tristate "Altera Ether00 support"
- depends on NET_ETHERNET && ARM && ARCH_CAMELOT
- help
- This is the driver for Altera's ether00 ethernet mac IP core. Say
- Y here if you want to build support for this into the kernel. It
- is also available as a module (say M here) that can be inserted/
- removed from the kernel at the same time as the PLD is configured.
- If this driver is running on an epxa10 development board then it
- will generate a suitable hw address based on the board serial
- number (MTD support is required for this). Otherwise you will
- need to set a suitable hw address using ifconfig.
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
index b0d706834d8..bc263edf06a 100644
--- a/drivers/net/arm/Makefile
+++ b/drivers/net/arm/Makefile
@@ -4,7 +4,6 @@
#
obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
-obj-$(CONFIG_ARM_ETHER00) += ether00.o
obj-$(CONFIG_ARM_ETHERH) += etherh.o
obj-$(CONFIG_ARM_ETHER3) += ether3.o
obj-$(CONFIG_ARM_ETHER1) += ether1.o
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 877891a29aa..53e3afc1b7b 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -668,9 +668,8 @@ static void __init am79c961_banner(void)
printk(KERN_INFO "%s", version);
}
-static int __init am79c961_probe(struct device *_dev)
+static int __init am79c961_probe(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(_dev);
struct resource *res;
struct net_device *dev;
struct dev_priv *priv;
@@ -758,15 +757,16 @@ out:
return ret;
}
-static struct device_driver am79c961_driver = {
- .name = "am79c961",
- .bus = &platform_bus_type,
+static struct platform_driver am79c961_driver = {
.probe = am79c961_probe,
+ .driver = {
+ .name = "am79c961",
+ },
};
static int __init am79c961_init(void)
{
- return driver_register(&am79c961_driver);
+ return platform_driver_register(&am79c961_driver);
}
__initcall(am79c961_init);
diff --git a/drivers/net/arm/ether00.c b/drivers/net/arm/ether00.c
deleted file mode 100644
index 4f1f4e31bda..00000000000
--- a/drivers/net/arm/ether00.c
+++ /dev/null
@@ -1,1017 +0,0 @@
-/*
- * drivers/net/ether00.c
- *
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/* includes */
-#include <linux/config.h>
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/tqueue.h>
-#include <linux/mtd/mtd.h>
-#include <linux/pld/pld_hotswap.h>
-#include <asm/arch/excalibur.h>
-#include <asm/arch/hardware.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/sizes.h>
-
-#include <asm/arch/ether00.h>
-#include <asm/arch/tdkphy.h>
-
-
-MODULE_AUTHOR("Clive Davies");
-MODULE_DESCRIPTION("Altera Ether00 IP core driver");
-MODULE_LICENSE("GPL");
-
-#define PKT_BUF_SZ 1540 /* Size of each rx buffer */
-#define ETH_NR 4 /* Number of MACs this driver supports */
-
-#define DEBUG(x)
-
-#define __dma_va(x) (unsigned int)((unsigned int)priv->dma_data+(((unsigned int)(x))&(EXC_SPSRAM_BLOCK0_SIZE-1)))
-#define __dma_pa(x) (unsigned int)(EXC_SPSRAM_BLOCK0_BASE+(((unsigned int)(x))-(unsigned int)priv->dma_data))
-
-#define ETHER00_BASE 0
-#define ETHER00_TYPE
-#define ETHER00_NAME "ether00"
-#define MAC_REG_SIZE 0x400 /* size of MAC register area */
-
-
-
-/* typedefs */
-
-/* The definition of the driver control structure */
-
-#define RX_NUM_BUFF 10
-#define RX_NUM_FDESC 10
-#define TX_NUM_FDESC 10
-
-struct tx_fda_ent{
- FDA_DESC fd;
- BUF_DESC bd;
- BUF_DESC pad;
-};
-struct rx_fda_ent{
- FDA_DESC fd;
- BUF_DESC bd;
- BUF_DESC pad;
-};
-struct rx_blist_ent{
- FDA_DESC fd;
- BUF_DESC bd;
- BUF_DESC pad;
-};
-struct net_priv
-{
- struct net_device_stats stats;
- struct sk_buff* skb;
- void* dma_data;
- struct rx_blist_ent* rx_blist_vp;
- struct rx_fda_ent* rx_fda_ptr;
- struct tx_fda_ent* tx_fdalist_vp;
- struct tq_struct tq_memupdate;
- unsigned char memupdate_scheduled;
- unsigned char rx_disabled;
- unsigned char queue_stopped;
- spinlock_t rx_lock;
-};
-
-static const char vendor_id[2]={0x07,0xed};
-
-#ifdef ETHER00_DEBUG
-
-/* Dump (most) registers for debugging puposes */
-
-static void dump_regs(struct net_device *dev){
- struct net_priv* priv=dev->priv;
- unsigned int* i;
-
- printk("\n RX free descriptor area:\n");
-
- for(i=(unsigned int*)priv->rx_fda_ptr;
- i<((unsigned int*)(priv->rx_fda_ptr+RX_NUM_FDESC));){
- printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
- i+=4;
- }
-
- printk("\n RX buffer list:\n");
-
- for(i=(unsigned int*)priv->rx_blist_vp;
- i<((unsigned int*)(priv->rx_blist_vp+RX_NUM_BUFF));){
- printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
- i+=4;
- }
-
- printk("\n TX frame descriptor list:\n");
-
- for(i=(unsigned int*)priv->tx_fdalist_vp;
- i<((unsigned int*)(priv->tx_fdalist_vp+TX_NUM_FDESC));){
- printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
- i+=4;
- }
-
- printk("\ndma ctl=%#x\n",readw(ETHER_DMA_CTL(dev->base_addr)));
- printk("txfrmptr=%#x\n",readw(ETHER_TXFRMPTR(dev->base_addr)));
- printk("txthrsh=%#x\n",readw(ETHER_TXTHRSH(dev->base_addr)));
- printk("txpollctr=%#x\n",readw(ETHER_TXPOLLCTR(dev->base_addr)));
- printk("blfrmptr=%#x\n",readw(ETHER_BLFRMPTR(dev->base_addr)));
- printk("rxfragsize=%#x\n",readw(ETHER_RXFRAGSIZE(dev->base_addr)));
- printk("tx_int_en=%#x\n",readw(ETHER_INT_EN(dev->base_addr)));
- printk("fda_bas=%#x\n",readw(ETHER_FDA_BAS(dev->base_addr)));
- printk("fda_lim=%#x\n",readw(ETHER_FDA_LIM(dev->base_addr)));
- printk("int_src=%#x\n",readw(ETHER_INT_SRC(dev->base_addr)));
- printk("pausecnt=%#x\n",readw(ETHER_PAUSECNT(dev->base_addr)));
- printk("rempaucnt=%#x\n",readw(ETHER_REMPAUCNT(dev->base_addr)));
- printk("txconfrmstat=%#x\n",readw(ETHER_TXCONFRMSTAT(dev->base_addr)));
- printk("mac_ctl=%#x\n",readw(ETHER_MAC_CTL(dev->base_addr)));
- printk("arc_ctl=%#x\n",readw(ETHER_ARC_CTL(dev->base_addr)));
- printk("tx_ctl=%#x\n",readw(ETHER_TX_CTL(dev->base_addr)));
-}
-#endif /* ETHER00_DEBUG */
-
-
-static int ether00_write_phy(struct net_device *dev, short address, short value)
-{
- volatile int count = 1024;
- writew(value,ETHER_MD_DATA(dev->base_addr));
- writew( ETHER_MD_CA_BUSY_MSK |
- ETHER_MD_CA_WR_MSK |
- (address & ETHER_MD_CA_ADDR_MSK),
- ETHER_MD_CA(dev->base_addr));
-
- /* Wait for the command to complete */
- while((readw(ETHER_MD_CA(dev->base_addr)) & ETHER_MD_CA_BUSY_MSK)&&count){
- count--;
- }
- if (!count){
- printk("Write to phy failed, addr=%#x, data=%#x\n",address, value);
- return -EIO;
- }
- return 0;
-}
-
-static int ether00_read_phy(struct net_device *dev, short address)
-{
- volatile int count = 1024;
- writew( ETHER_MD_CA_BUSY_MSK |
- (address & ETHER_MD_CA_ADDR_MSK),
- ETHER_MD_CA(dev->base_addr));
-
- /* Wait for the command to complete */
- while((readw(ETHER_MD_CA(dev->base_addr)) & ETHER_MD_CA_BUSY_MSK)&&count){
- count--;
- }
- if (!count){
- printk(KERN_WARNING "Read from phy timed out\n");
- return -EIO;
- }
- return readw(ETHER_MD_DATA(dev->base_addr));
-}
-
-static void ether00_phy_int(int irq_num, void* dev_id, struct pt_regs* regs)
-{
- struct net_device* dev=dev_id;
- int irq_status;
-
- irq_status=ether00_read_phy(dev, PHY_IRQ_CONTROL);
-
- if(irq_status & PHY_IRQ_CONTROL_ANEG_COMP_INT_MSK){
- /*
- * Autonegotiation complete on epxa10db. The mac doesn't
- * twig if we're in full duplex so we need to check the
- * phy status register and configure the mac accordingly
- */
- if(ether00_read_phy(dev, PHY_STATUS)&(PHY_STATUS_10T_F_MSK|PHY_STATUS_100_X_F_MSK)){
- int tmp;
- tmp=readl(ETHER_MAC_CTL(dev->base_addr));
- writel(tmp|ETHER_MAC_CTL_FULLDUP_MSK,ETHER_MAC_CTL(dev->base_addr));
- }
- }
-
- if(irq_status&PHY_IRQ_CONTROL_LS_CHG_INT_MSK){
-
- if(ether00_read_phy(dev, PHY_STATUS)& PHY_STATUS_LINK_MSK){
- /* Link is up */
- netif_carrier_on(dev);
- //printk("Carrier on\n");
- }else{
- netif_carrier_off(dev);
- //printk("Carrier off\n");
-
- }
- }
-
-}
-
-static void setup_blist_entry(struct sk_buff* skb,struct rx_blist_ent* blist_ent_ptr){
- /* Make the buffer consistent with the cache as the mac is going to write
- * directly into it*/
- blist_ent_ptr->fd.FDSystem=(unsigned int)skb;
- blist_ent_ptr->bd.BuffData=(char*)__pa(skb->data);
- consistent_sync(skb->data,PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
- /* align IP on 16 Byte (DMA_CTL set to skip 2 bytes) */
- skb_reserve(skb,2);
- blist_ent_ptr->bd.BuffLength=PKT_BUF_SZ-2;
- blist_ent_ptr->fd.FDLength=1;
- blist_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK;
- blist_ent_ptr->bd.BDCtl=BDCTL_COWNSBD_MSK;
-}
-
-
-static int ether00_mem_init(struct net_device* dev)
-{
- struct net_priv* priv=dev->priv;
- struct tx_fda_ent *tx_fd_ptr,*tx_end_ptr;
- struct rx_blist_ent* blist_ent_ptr;
- int i;
-
- /*
- * Grab a block of on chip SRAM to contain the control stuctures for
- * the ethernet MAC. This uncached becuase it needs to be accesses by both
- * bus masters (cpu + mac). However, it shouldn't matter too much in terms
- * of speed as its on chip memory
- */
- priv->dma_data=ioremap_nocache(EXC_SPSRAM_BLOCK0_BASE,EXC_SPSRAM_BLOCK0_SIZE );
- if (!priv->dma_data)
- return -ENOMEM;
-
- priv->rx_fda_ptr=(struct rx_fda_ent*)priv->dma_data;
- /*
- * Now share it out amongst the Frame descriptors and the buffer list
- */
- priv->rx_blist_vp=(struct rx_blist_ent*)((unsigned int)priv->dma_data+RX_NUM_FDESC*sizeof(struct rx_fda_ent));
-
- /*
- *Initalise the FDA list
- */
- /* set ownership to the controller */
- memset(priv->rx_fda_ptr,0x80,RX_NUM_FDESC*sizeof(struct rx_fda_ent));
-
- /*
- *Initialise the buffer list
- */
- blist_ent_ptr=priv->rx_blist_vp;
- i=0;
- while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){
- struct sk_buff *skb;
- blist_ent_ptr->fd.FDLength=1;
- skb=dev_alloc_skb(PKT_BUF_SZ);
- if(skb){
- setup_blist_entry(skb,blist_ent_ptr);
- blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(blist_ent_ptr+1);
- blist_ent_ptr->bd.BDStat=i++;
- blist_ent_ptr++;
- }
- else
- {
- printk("Failed to initalise buffer list\n");
- }
-
- }
- blist_ent_ptr--;
- blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->rx_blist_vp);
-
- priv->tx_fdalist_vp=(struct tx_fda_ent*)(priv->rx_blist_vp+RX_NUM_BUFF);
-
- /* Initialise the buffers to be a circular list. The mac will then go poll
- * the list until it finds a frame ready to transmit */
- tx_end_ptr=priv->tx_fdalist_vp+TX_NUM_FDESC;
- for(tx_fd_ptr=priv->tx_fdalist_vp;tx_fd_ptr<tx_end_ptr;tx_fd_ptr++){
- tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa((tx_fd_ptr+1));
- tx_fd_ptr->fd.FDCtl=1;
- tx_fd_ptr->fd.FDStat=0;
- tx_fd_ptr->fd.FDLength=1;
-
- }
- /* Change the last FDNext pointer to make a circular list */
- tx_fd_ptr--;
- tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->tx_fdalist_vp);
-
- /* Point the device at the chain of Rx and Tx Buffers */
- writel((unsigned int)__dma_pa(priv->rx_fda_ptr),ETHER_FDA_BAS(dev->base_addr));
- writel((RX_NUM_FDESC-1)*sizeof(struct rx_fda_ent),ETHER_FDA_LIM(dev->base_addr));
- writel((unsigned int)__dma_pa(priv->rx_blist_vp),ETHER_BLFRMPTR(dev->base_addr));
-
- writel((unsigned int)__dma_pa(priv->tx_fdalist_vp),ETHER_TXFRMPTR(dev->base_addr));
-
- return 0;
-}
-
-
-void ether00_mem_update(void* dev_id)
-{
- struct net_device* dev=dev_id;
- struct net_priv* priv=dev->priv;
- struct sk_buff* skb;
- struct tx_fda_ent *fda_ptr=priv->tx_fdalist_vp;
- struct rx_blist_ent* blist_ent_ptr;
- unsigned long flags;
-
- priv->tq_memupdate.sync=0;
- //priv->tq_memupdate.list=
- priv->memupdate_scheduled=0;
-
- /* Transmit interrupt */
- while(fda_ptr<(priv->tx_fdalist_vp+TX_NUM_FDESC)){
- if(!(FDCTL_COWNSFD_MSK&fda_ptr->fd.FDCtl) && (ETHER_TX_STAT_COMP_MSK&fda_ptr->fd.FDStat)){
- priv->stats.tx_packets++;
- priv->stats.tx_bytes+=fda_ptr->bd.BuffLength;
- skb=(struct sk_buff*)fda_ptr->fd.FDSystem;
- //printk("%d:txcln:fda=%#x skb=%#x\n",jiffies,fda_ptr,skb);
- dev_kfree_skb(skb);
- fda_ptr->fd.FDSystem=0;
- fda_ptr->fd.FDStat=0;
- fda_ptr->fd.FDCtl=0;
- }
- fda_ptr++;
- }
- /* Fill in any missing buffers from the received queue */
- spin_lock_irqsave(&priv->rx_lock,flags);
- blist_ent_ptr=priv->rx_blist_vp;
- while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){
- /* fd.FDSystem of 0 indicates we failed to allocate the buffer in the ISR */
- if(!blist_ent_ptr->fd.FDSystem){
- struct sk_buff *skb;
- skb=dev_alloc_skb(PKT_BUF_SZ);
- blist_ent_ptr->fd.FDSystem=(unsigned int)skb;
- if(skb){
- setup_blist_entry(skb,blist_ent_ptr);
- }
- else
- {
- break;
- }
- }
- blist_ent_ptr++;
- }
- spin_unlock_irqrestore(&priv->rx_lock,flags);
- if(priv->queue_stopped){
- //printk("%d:cln:start q\n",jiffies);
- netif_start_queue(dev);
- }
- if(priv->rx_disabled){
- //printk("%d:enable_irq\n",jiffies);
- priv->rx_disabled=0;
- writel(ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr));
-
- }
-}
-
-
-static void ether00_int( int irq_num, void* dev_id, struct pt_regs* regs)
-{
- struct net_device* dev=dev_id;
- struct net_priv* priv=dev->priv;
-
- unsigned int interruptValue;
-
- interruptValue=readl(ETHER_INT_SRC(dev->base_addr));
-
- //printk("INT_SRC=%x\n",interruptValue);
-
- if(!(readl(ETHER_INT_SRC(dev->base_addr)) & ETHER_INT_SRC_IRQ_MSK))
- {
- return; /* Interrupt wasn't caused by us!! */
- }
-
- if(readl(ETHER_INT_SRC(dev->base_addr))&
- (ETHER_INT_SRC_INTMACRX_MSK |
- ETHER_INT_SRC_FDAEX_MSK |
- ETHER_INT_SRC_BLEX_MSK)) {
- struct rx_blist_ent* blist_ent_ptr;
- struct rx_fda_ent* fda_ent_ptr;
- struct sk_buff* skb;
-
- fda_ent_ptr=priv->rx_fda_ptr;
- spin_lock(&priv->rx_lock);
- while(fda_ent_ptr<(priv->rx_fda_ptr+RX_NUM_FDESC)){
- int result;
-
- if(!(fda_ent_ptr->fd.FDCtl&FDCTL_COWNSFD_MSK))
- {
- /* This frame is ready for processing */
- /*find the corresponding buffer in the bufferlist */
- blist_ent_ptr=priv->rx_blist_vp+fda_ent_ptr->bd.BDStat;
- skb=(struct sk_buff*)blist_ent_ptr->fd.FDSystem;
-
- /* Pass this skb up the stack */
- skb->dev=dev;
- skb_put(skb,fda_ent_ptr->fd.FDLength);
- skb->protocol=eth_type_trans(skb,dev);
- skb->ip_summed=CHECKSUM_UNNECESSARY;
- result=netif_rx(skb);
- /* Update statistics */
- priv->stats.rx_packets++;
- priv->stats.rx_bytes+=fda_ent_ptr->fd.FDLength;
-
- /* Free the FDA entry */
- fda_ent_ptr->bd.BDStat=0xff;
- fda_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK;
-
- /* Allocate a new skb and point the bd entry to it */
- blist_ent_ptr->fd.FDSystem=0;
- skb=dev_alloc_skb(PKT_BUF_SZ);
- //printk("allocskb=%#x\n",skb);
- if(skb){
- setup_blist_entry(skb,blist_ent_ptr);
-
- }
- else if(!priv->memupdate_scheduled){
- int tmp;
- /* There are no buffers at the moment, so schedule */
- /* the background task to sort this out */
- schedule_task(&priv->tq_memupdate);
- priv->memupdate_scheduled=1;
- printk(KERN_DEBUG "%s:No buffers",dev->name);
- /* If this interrupt was due to a lack of buffers then
- * we'd better stop the receiver too */
- if(interruptValue&ETHER_INT_SRC_BLEX_MSK){
- priv->rx_disabled=1;
- tmp=readl(ETHER_INT_SRC(dev->base_addr));
- writel(tmp&~ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr));
- printk(KERN_DEBUG "%s:Halting rx",dev->name);
- }
-
- }
-
- }
- fda_ent_ptr++;
- }
- spin_unlock(&priv->rx_lock);
-
- /* Clear the interrupts */
- writel(ETHER_INT_SRC_INTMACRX_MSK | ETHER_INT_SRC_FDAEX_MSK
- | ETHER_INT_SRC_BLEX_MSK,ETHER_INT_SRC(dev->base_addr));
-
- }
-
- if(readl(ETHER_INT_SRC(dev->base_addr))&ETHER_INT_SRC_INTMACTX_MSK){
-
- if(!priv->memupdate_scheduled){
- schedule_task(&priv->tq_memupdate);
- priv->memupdate_scheduled=1;
- }
- /* Clear the interrupt */
- writel(ETHER_INT_SRC_INTMACTX_MSK,ETHER_INT_SRC(dev->base_addr));
- }
-
- if (readl(ETHER_INT_SRC(dev->base_addr)) & (ETHER_INT_SRC_SWINT_MSK|
- ETHER_INT_SRC_INTEARNOT_MSK|
- ETHER_INT_SRC_INTLINK_MSK|
- ETHER_INT_SRC_INTEXBD_MSK|
- ETHER_INT_SRC_INTTXCTLCMP_MSK))
- {
- /*
- * Not using any of these so they shouldn't happen
- *
- * In the cased of INTEXBD - if you allocate more
- * than 28 decsriptors you may need to think about this
- */
- printk("Not using this interrupt\n");
- }
-
- if (readl(ETHER_INT_SRC(dev->base_addr)) &
- (ETHER_INT_SRC_INTSBUS_MSK |
- ETHER_INT_SRC_INTNRABT_MSK
- |ETHER_INT_SRC_DMPARERR_MSK))
- {
- /*
- * Hardware errors, we can either ignore them and hope they go away
- *or reset the device, I'll try the first for now to see if they happen
- */
- printk("Hardware error\n");
- }
-}
-
-static void ether00_setup_ethernet_address(struct net_device* dev)
-{
- int tmp;
-
- dev->addr_len=6;
- writew(0,ETHER_ARC_ADR(dev->base_addr));
- writel((dev->dev_addr[0]<<24) |
- (dev->dev_addr[1]<<16) |
- (dev->dev_addr[2]<<8) |
- dev->dev_addr[3],
- ETHER_ARC_DATA(dev->base_addr));
-
- writew(4,ETHER_ARC_ADR(dev->base_addr));
- tmp=readl(ETHER_ARC_DATA(dev->base_addr));
- tmp&=0xffff;
- tmp|=(dev->dev_addr[4]<<24) | (dev->dev_addr[5]<<16);
- writel(tmp, ETHER_ARC_DATA(dev->base_addr));
- /* Enable this entry in the ARC */
-
- writel(1,ETHER_ARC_ENA(dev->base_addr));
-
- return;
-}
-
-
-static void ether00_reset(struct net_device *dev)
-{
- /* reset the controller */
- writew(ETHER_MAC_CTL_RESET_MSK,ETHER_MAC_CTL(dev->base_addr));
-
- /*
- * Make sure we're not going to send anything
- */
-
- writew(ETHER_TX_CTL_TXHALT_MSK,ETHER_TX_CTL(dev->base_addr));
-
- /*
- * Make sure we're not going to receive anything
- */
- writew(ETHER_RX_CTL_RXHALT_MSK,ETHER_RX_CTL(dev->base_addr));
-
- /*
- * Disable Interrupts for now, and set the burst size to 8 bytes
- */
-
- writel(ETHER_DMA_CTL_INTMASK_MSK |
- ((8 << ETHER_DMA_CTL_DMBURST_OFST) & ETHER_DMA_CTL_DMBURST_MSK)
- |(2<<ETHER_DMA_CTL_RXALIGN_OFST),
- ETHER_DMA_CTL(dev->base_addr));
-
-
- /*
- * Set TxThrsh - start transmitting a packet after 1514
- * bytes or when a packet is complete, whichever comes first
- */
- writew(1514,ETHER_TXTHRSH(dev->base_addr));
-
- /*
- * Set TxPollCtr. Each cycle is
- * 61.44 microseconds with a 33 MHz bus
- */
- writew(1,ETHER_TXPOLLCTR(dev->base_addr));
-
- /*
- * Set Rx_Ctl - Turn off reception and let RxData turn it
- * on later
- */
- writew(ETHER_RX_CTL_RXHALT_MSK,ETHER_RX_CTL(dev->base_addr));
-
-}
-
-
-static void ether00_set_multicast(struct net_device* dev)
-{
- int count=dev->mc_count;
-
- /* Set promiscuous mode if it's asked for. */
-
- if (dev->flags&IFF_PROMISC){
-
- writew( ETHER_ARC_CTL_COMPEN_MSK |
- ETHER_ARC_CTL_BROADACC_MSK |
- ETHER_ARC_CTL_GROUPACC_MSK |
- ETHER_ARC_CTL_STATIONACC_MSK,
- ETHER_ARC_CTL(dev->base_addr));
- return;
- }
-
- /*
- * Get all multicast packets if required, or if there are too
- * many addresses to fit in hardware
- */
- if (dev->flags & IFF_ALLMULTI){
- writew( ETHER_ARC_CTL_COMPEN_MSK |
- ETHER_ARC_CTL_GROUPACC_MSK |
- ETHER_ARC_CTL_BROADACC_MSK,
- ETHER_ARC_CTL(dev->base_addr));
- return;
- }
- if (dev->mc_count > (ETHER_ARC_SIZE - 1)){
-
- printk(KERN_WARNING "Too many multicast addresses for hardware to filter - receiving all multicast packets\n");
- writew( ETHER_ARC_CTL_COMPEN_MSK |
- ETHER_ARC_CTL_GROUPACC_MSK |
- ETHER_ARC_CTL_BROADACC_MSK,
- ETHER_ARC_CTL(dev->base_addr));
- return;
- }
-
- if(dev->mc_count){
- struct dev_mc_list *mc_list_ent=dev->mc_list;
- unsigned int temp,i;
- DEBUG(printk("mc_count=%d mc_list=%#x\n",dev-> mc_count, dev->mc_list));
- DEBUG(printk("mc addr=%02#x%02x%02x%02x%02x%02x\n",
- mc_list_ent->dmi_addr[5],
- mc_list_ent->dmi_addr[4],
- mc_list_ent->dmi_addr[3],
- mc_list_ent->dmi_addr[2],
- mc_list_ent->dmi_addr[1],
- mc_list_ent->dmi_addr[0]);)
-
- /*
- * The first 6 bytes are the MAC address, so
- * don't change them!
- */
- writew(4,ETHER_ARC_ADR(dev->base_addr));
- temp=readl(ETHER_ARC_DATA(dev->base_addr));
- temp&=0xffff0000;
-
- /* Disable the current multicast stuff */
- writel(1,ETHER_ARC_ENA(dev->base_addr));
-
- for(;;){
- temp|=mc_list_ent->dmi_addr[1] |
- mc_list_ent->dmi_addr[0]<<8;
- writel(temp,ETHER_ARC_DATA(dev->base_addr));
-
- i=readl(ETHER_ARC_ADR(dev->base_addr));
- writew(i+4,ETHER_ARC_ADR(dev->base_addr));
-
- temp=mc_list_ent->dmi_addr[5]|
- mc_list_ent->dmi_addr[4]<<8 |
- mc_list_ent->dmi_addr[3]<<16 |
- mc_list_ent->dmi_addr[2]<<24;
- writel(temp,ETHER_ARC_DATA(dev->base_addr));
-
- count--;
- if(!mc_list_ent->next || !count){
- break;
- }
- DEBUG(printk("mc_list_next=%#x\n",mc_list_ent->next);)
- mc_list_ent=mc_list_ent->next;
-
-
- i=readl(ETHER_ARC_ADR(dev->base_addr));
- writel(i+4,ETHER_ARC_ADR(dev->base_addr));
-
- temp=mc_list_ent->dmi_addr[3]|
- mc_list_ent->dmi_addr[2]<<8 |
- mc_list_ent->dmi_addr[1]<<16 |
- mc_list_ent->dmi_addr[0]<<24;
- writel(temp,ETHER_ARC_DATA(dev->base_addr));
-
- i=readl(ETHER_ARC_ADR(dev->base_addr));
- writel(i+4,ETHER_ARC_ADR(dev->base_addr));
-
- temp=mc_list_ent->dmi_addr[4]<<16 |
- mc_list_ent->dmi_addr[5]<<24;
-
- writel(temp,ETHER_ARC_DATA(dev->base_addr));
-
- count--;
- if(!mc_list_ent->next || !count){
- break;
- }
- mc_list_ent=mc_list_ent->next;
- }
-
-
- if(count)
- printk(KERN_WARNING "Multicast list size error\n");
-
-
- writew( ETHER_ARC_CTL_BROADACC_MSK|
- ETHER_ARC_CTL_COMPEN_MSK,
- ETHER_ARC_CTL(dev->base_addr));
-
- }
-
- /* enable the active ARC enties */
- writew((1<<(count+2))-1,ETHER_ARC_ENA(dev->base_addr));
-}
-
-
-static int ether00_open(struct net_device* dev)
-{
- int result,tmp;
- struct net_priv* priv;
-
- if (!is_valid_ether_addr(dev->dev_addr))
- return -EINVAL;
-
- /* Install interrupt handlers */
- result=request_irq(dev->irq,ether00_int,0,"ether00",dev);
- if(result)
- goto open_err1;
-
- result=request_irq(2,ether00_phy_int,0,"ether00_phy",dev);
- if(result)
- goto open_err2;
-
- ether00_reset(dev);
- result=ether00_mem_init(dev);
- if(result)
- goto open_err3;
-
-
- ether00_setup_ethernet_address(dev);
-
- ether00_set_multicast(dev);
-
- result=ether00_write_phy(dev,PHY_CONTROL, PHY_CONTROL_ANEGEN_MSK | PHY_CONTROL_RANEG_MSK);
- if(result)
- goto open_err4;
- result=ether00_write_phy(dev,PHY_IRQ_CONTROL, PHY_IRQ_CONTROL_LS_CHG_IE_MSK |
- PHY_IRQ_CONTROL_ANEG_COMP_IE_MSK);
- if(result)
- goto open_err4;
-
- /* Start the device enable interrupts */
- writew(ETHER_RX_CTL_RXEN_MSK
-// | ETHER_RX_CTL_STRIPCRC_MSK
- | ETHER_RX_CTL_ENGOOD_MSK
- | ETHER_RX_CTL_ENRXPAR_MSK| ETHER_RX_CTL_ENLONGERR_MSK
- | ETHER_RX_CTL_ENOVER_MSK| ETHER_RX_CTL_ENCRCERR_MSK,
- ETHER_RX_CTL(dev->base_addr));
-
- writew(ETHER_TX_CTL_TXEN_MSK|
- ETHER_TX_CTL_ENEXDEFER_MSK|
- ETHER_TX_CTL_ENLCARR_MSK|
- ETHER_TX_CTL_ENEXCOLL_MSK|
- ETHER_TX_CTL_ENLATECOLL_MSK|
- ETHER_TX_CTL_ENTXPAR_MSK|
- ETHER_TX_CTL_ENCOMP_MSK,
- ETHER_TX_CTL(dev->base_addr));
-
- tmp=readl(ETHER_DMA_CTL(dev->base_addr));
- writel(tmp&~ETHER_DMA_CTL_INTMASK_MSK,ETHER_DMA_CTL(dev->base_addr));
-
- return 0;
-
- open_err4:
- ether00_reset(dev);
- open_err3:
- free_irq(2,dev);
- open_err2:
- free_irq(dev->irq,dev);
- open_err1:
- return result;
-
-}
-
-
-static int ether00_tx(struct sk_buff* skb, struct net_device* dev)
-{
- struct net_priv *priv=dev->priv;
- struct tx_fda_ent *fda_ptr;
- int i;
-
-
- /*
- * Find an empty slot in which to stick the frame
- */
- fda_ptr=(struct tx_fda_ent*)__dma_va(readl(ETHER_TXFRMPTR(dev->base_addr)));
- i=0;
- while(i<TX_NUM_FDESC){
- if (fda_ptr->fd.FDStat||(fda_ptr->fd.FDCtl & FDCTL_COWNSFD_MSK)){
- fda_ptr =(struct tx_fda_ent*) __dma_va((struct tx_fda_ent*)fda_ptr->fd.FDNext);
- }
- else {
- break;
- }
- i++;
- }
-
- /* Write the skb data from the cache*/
- consistent_sync(skb->data,skb->len,PCI_DMA_TODEVICE);
- fda_ptr->bd.BuffData=(char*)__pa(skb->data);
- fda_ptr->bd.BuffLength=(unsigned short)skb->len;
- /* Save the pointer to the skb for freeing later */
- fda_ptr->fd.FDSystem=(unsigned int)skb;
- fda_ptr->fd.FDStat=0;
- /* Pass ownership of the buffers to the controller */
- fda_ptr->fd.FDCtl=1;
- fda_ptr->fd.FDCtl|=FDCTL_COWNSFD_MSK;
-
- /* If the next buffer in the list is full, stop the queue */
- fda_ptr=(struct tx_fda_ent*)__dma_va(fda_ptr->fd.FDNext);
- if ((fda_ptr->fd.FDStat)||(fda_ptr->fd.FDCtl & FDCTL_COWNSFD_MSK)){
- netif_stop_queue(dev);
- priv->queue_stopped=1;
- }
-
- return 0;
-}
-
-static struct net_device_stats *ether00_stats(struct net_device* dev)
-{
- struct net_priv *priv=dev->priv;
- return &priv->stats;
-}
-
-
-static int ether00_stop(struct net_device* dev)
-{
- struct net_priv *priv=dev->priv;
- int tmp;
-
- /* Stop/disable the device. */
- tmp=readw(ETHER_RX_CTL(dev->base_addr));
- tmp&=~(ETHER_RX_CTL_RXEN_MSK | ETHER_RX_CTL_ENGOOD_MSK);
- tmp|=ETHER_RX_CTL_RXHALT_MSK;
- writew(tmp,ETHER_RX_CTL(dev->base_addr));
-
- tmp=readl(ETHER_TX_CTL(dev->base_addr));
- tmp&=~ETHER_TX_CTL_TXEN_MSK;
- tmp|=ETHER_TX_CTL_TXHALT_MSK;
- writel(tmp,ETHER_TX_CTL(dev->base_addr));
-
- /* Free up system resources */
- free_irq(dev->irq,dev);
- free_irq(2,dev);
- iounmap(priv->dma_data);
-
- return 0;
-}
-
-
-static void ether00_get_ethernet_address(struct net_device* dev)
-{
- struct mtd_info *mymtd=NULL;
- int i;
- size_t retlen;
-
- /*
- * For the Epxa10 dev board (camelot), the ethernet MAC
- * address is of the form 00:aa:aa:00:xx:xx where
- * 00:aa:aa is the Altera vendor ID and xx:xx is the
- * last 2 bytes of the board serial number, as programmed
- * into the OTP area of the flash device on EBI1. If this
- * isn't an expa10 dev board, or there's no mtd support to
- * read the serial number from flash then we'll force the
- * use to set their own mac address using ifconfig.
- */
-
-#ifdef CONFIG_ARCH_CAMELOT
-#ifdef CONFIG_MTD
- /* get the mtd_info structure for the first mtd device*/
- for(i=0;i<MAX_MTD_DEVICES;i++){
- mymtd=get_mtd_device(NULL,i);
- if(!mymtd||!strcmp(mymtd->name,"EPXA10DB flash"))
- break;
- }
-
- if(!mymtd || !mymtd->read_user_prot_reg){
- printk(KERN_WARNING "%s: Failed to read MAC address from flash\n",dev->name);
- }else{
- mymtd->read_user_prot_reg(mymtd,2,1,&retlen,&dev->dev_addr[5]);
- mymtd->read_user_prot_reg(mymtd,3,1,&retlen,&dev->dev_addr[4]);
- dev->dev_addr[3]=0;
- dev->dev_addr[2]=vendor_id[1];
- dev->dev_addr[1]=vendor_id[0];
- dev->dev_addr[0]=0;
- }
-#else
- printk(KERN_WARNING "%s: MTD support required to read MAC address from EPXA10 dev board\n", dev->name);
-#endif
-#endif
-
- if (!is_valid_ether_addr(dev->dev_addr))
- printk("%s: Invalid ethernet MAC address. Please set using "
- "ifconfig\n", dev->name);
-
-}
-
-/*
- * Keep a mapping of dev_info addresses -> port lines to use when
- * removing ports dev==NULL indicates unused entry
- */
-
-
-static struct net_device* dev_list[ETH_NR];
-
-static int ether00_add_device(struct pldhs_dev_info* dev_info,void* dev_ps_data)
-{
- struct net_device *dev;
- struct net_priv *priv;
- void *map_addr;
- int result;
- int i;
-
- i=0;
- while(dev_list[i] && i < ETH_NR)
- i++;
-
- if(i==ETH_NR){
- printk(KERN_WARNING "ether00: Maximum number of ports reached\n");
- return 0;
- }
-
-
- if (!request_mem_region(dev_info->base_addr, MAC_REG_SIZE, "ether00"))
- return -EBUSY;
-
- dev = alloc_etherdev(sizeof(struct net_priv));
- if(!dev) {
- result = -ENOMEM;
- goto out_release;
- }
- priv = dev->priv;
-
- priv->tq_memupdate.routine=ether00_mem_update;
- priv->tq_memupdate.data=(void*) dev;
-
- spin_lock_init(&priv->rx_lock);
-
- map_addr=ioremap_nocache(dev_info->base_addr,SZ_4K);
- if(!map_addr){
- result = -ENOMEM;
- out_kfree;
- }
-
- dev->open=ether00_open;
- dev->stop=ether00_stop;
- dev->set_multicast_list=ether00_set_multicast;
- dev->hard_start_xmit=ether00_tx;
- dev->get_stats=ether00_stats;
-
- ether00_get_ethernet_address(dev);
-
- SET_MODULE_OWNER(dev);
-
- dev->base_addr=(unsigned int)map_addr;
- dev->irq=dev_info->irq;
- dev->features=NETIF_F_DYNALLOC | NETIF_F_HW_CSUM;
-
- result=register_netdev(dev);
- if(result){
- printk("Ether00: Error %i registering driver\n",result);
- goto out_unmap;
- }
- printk("registered ether00 device at %#x\n",dev_info->base_addr);
-
- dev_list[i]=dev;
-
- return result;
-
- out_unmap:
- iounmap(map_addr);
- out_kfree:
- free_netdev(dev);
- out_release:
- release_mem_region(dev_info->base_addr, MAC_REG_SIZE);
- return result;
-}
-
-
-static int ether00_remove_devices(void)
-{
- int i;
-
- for(i=0;i<ETH_NR;i++){
- if(dev_list[i]){
- netif_device_detach(dev_list[i]);
- unregister_netdev(dev_list[i]);
- iounmap((void*)dev_list[i]->base_addr);
- release_mem_region(dev_list[i]->base_addr, MAC_REG_SIZE);
- free_netdev(dev_list[i]);
- dev_list[i]=0;
- }
- }
- return 0;
-}
-
-static struct pld_hotswap_ops ether00_pldhs_ops={
- .name = ETHER00_NAME,
- .add_device = ether00_add_device,
- .remove_devices = ether00_remove_devices,
-};
-
-
-static void __exit ether00_cleanup_module(void)
-{
- int result;
- result=ether00_remove_devices();
- if(result)
- printk(KERN_WARNING "ether00: failed to remove all devices\n");
-
- pldhs_unregister_driver(ETHER00_NAME);
-}
-module_exit(ether00_cleanup_module);
-
-
-static int __init ether00_mod_init(void)
-{
- printk("mod init\n");
- return pldhs_register_driver(&ether00_pldhs_ops);
-
-}
-
-module_init(ether00_mod_init);
-
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 1cc53abc3a3..f1d5b1027ff 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -69,7 +69,6 @@
#include <asm/system.h>
#include <asm/ecard.h>
#include <asm/io.h>
-#include <asm/irq.h>
static char version[] __initdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 942a2819576..6a93b666eb7 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -50,7 +50,6 @@
#include <asm/system.h>
#include <asm/ecard.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include "../8390.h"
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 854ddfb90da..f2a63186ae0 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -169,9 +169,9 @@ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_
index = next_index;
}
- _unlock_tx_hashtbl(bond);
-
tlb_init_slave(slave);
+
+ _unlock_tx_hashtbl(bond);
}
/* Must be called before starting the monitor timer */
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 015c7f1d1bc..3dd78d048c3 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -22,8 +22,8 @@
#include "bond_3ad.h"
#include "bond_alb.h"
-#define DRV_VERSION "3.0.0"
-#define DRV_RELDATE "November 8, 2005"
+#define DRV_VERSION "3.0.1"
+#define DRV_RELDATE "January 9, 2006"
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
@@ -205,7 +205,7 @@ struct bonding {
*
* Caller must hold bond lock for read
*/
-extern inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev)
+static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev)
{
struct slave *slave = NULL;
int i;
@@ -219,7 +219,7 @@ extern inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
return slave;
}
-extern inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
+static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
{
if (!slave || !slave->dev->master) {
return NULL;
@@ -228,13 +228,13 @@ extern inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
return (struct bonding *)slave->dev->master->priv;
}
-extern inline void bond_set_slave_inactive_flags(struct slave *slave)
+static inline void bond_set_slave_inactive_flags(struct slave *slave)
{
slave->state = BOND_STATE_BACKUP;
slave->dev->flags |= IFF_NOARP;
}
-extern inline void bond_set_slave_active_flags(struct slave *slave)
+static inline void bond_set_slave_active_flags(struct slave *slave)
{
slave->state = BOND_STATE_ACTIVE;
slave->dev->flags &= ~IFF_NOARP;
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index a6078ad9b65..e2cfde7e31e 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -100,7 +100,7 @@
* Note that even if DMA is turned off we still support the 'dma' and 'use_dma'
* module options so we don't break any startup scripts.
*/
-#ifndef CONFIG_ARCH_IXDP2X01
+#ifndef CONFIG_ISA_DMA_API
#define ALLOW_DMA 0
#else
#define ALLOW_DMA 1
@@ -175,7 +175,7 @@ static unsigned int cs8900_irq_map[] = {1,0,0,0};
#include <asm/irq.h>
static unsigned int netcard_portlist[] __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
-#elif defined(CONFIG_ARCH_PNX0105)
+#elif defined(CONFIG_ARCH_PNX010X)
#include <asm/irq.h>
#include <asm/arch/gpio.h>
#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */
@@ -338,30 +338,86 @@ out:
}
#endif
+#if defined(CONFIG_ARCH_IXDP2X01)
+static int
+readword(unsigned long base_addr, int portno)
+{
+ return (u16)__raw_readl(base_addr + (portno << 1));
+}
+
+static void
+writeword(unsigned long base_addr, int portno, int value)
+{
+ __raw_writel((u16)value, base_addr + (portno << 1));
+}
+#else
+#if defined(CONFIG_ARCH_PNX010X)
static int
-readreg(struct net_device *dev, int portno)
+readword(unsigned long base_addr, int portno)
{
- outw(portno, dev->base_addr + ADD_PORT);
- return inw(dev->base_addr + DATA_PORT);
+ return inw(base_addr + (portno << 1));
}
static void
-writereg(struct net_device *dev, int portno, int value)
+writeword(unsigned long base_addr, int portno, int value)
+{
+ outw(value, base_addr + (portno << 1));
+}
+#else
+static int
+readword(unsigned long base_addr, int portno)
{
- outw(portno, dev->base_addr + ADD_PORT);
- outw(value, dev->base_addr + DATA_PORT);
+ return inw(base_addr + portno);
+}
+
+static void
+writeword(unsigned long base_addr, int portno, int value)
+{
+ outw(value, base_addr + portno);
+}
+#endif
+#endif
+
+static void
+readwords(unsigned long base_addr, int portno, void *buf, int length)
+{
+ u8 *buf8 = (u8 *)buf;
+
+ do {
+ u32 tmp32;
+
+ tmp32 = readword(base_addr, portno);
+ *buf8++ = (u8)tmp32;
+ *buf8++ = (u8)(tmp32 >> 8);
+ } while (--length);
+}
+
+static void
+writewords(unsigned long base_addr, int portno, void *buf, int length)
+{
+ u8 *buf8 = (u8 *)buf;
+
+ do {
+ u32 tmp32;
+
+ tmp32 = *buf8++;
+ tmp32 |= (*buf8++) << 8;
+ writeword(base_addr, portno, tmp32);
+ } while (--length);
}
static int
-readword(struct net_device *dev, int portno)
+readreg(struct net_device *dev, int regno)
{
- return inw(dev->base_addr + portno);
+ writeword(dev->base_addr, ADD_PORT, regno);
+ return readword(dev->base_addr, DATA_PORT);
}
static void
-writeword(struct net_device *dev, int portno, int value)
+writereg(struct net_device *dev, int regno, int value)
{
- outw(value, dev->base_addr + portno);
+ writeword(dev->base_addr, ADD_PORT, regno);
+ writeword(dev->base_addr, DATA_PORT, value);
}
static int __init
@@ -456,7 +512,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
#endif
}
-#ifdef CONFIG_ARCH_PNX0105
+#ifdef CONFIG_ARCH_PNX010X
initialize_ebi();
/* Map GPIO registers for the pins connected to the CS8900a. */
@@ -491,8 +547,8 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
#ifdef CONFIG_SH_HICOSH4
/* truely reset the chip */
- outw(0x0114, ioaddr + ADD_PORT);
- outw(0x0040, ioaddr + DATA_PORT);
+ writeword(ioaddr, ADD_PORT, 0x0114);
+ writeword(ioaddr, DATA_PORT, 0x0040);
#endif
/* if they give us an odd I/O address, then do ONE write to
@@ -503,24 +559,24 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
if (net_debug > 1)
printk(KERN_INFO "%s: odd ioaddr 0x%x\n", dev->name, ioaddr);
if ((ioaddr & 2) != 2)
- if ((inw((ioaddr & ~3)+ ADD_PORT) & ADD_MASK) != ADD_SIG) {
+ if ((readword(ioaddr & ~3, ADD_PORT) & ADD_MASK) != ADD_SIG) {
printk(KERN_ERR "%s: bad signature 0x%x\n",
- dev->name, inw((ioaddr & ~3)+ ADD_PORT));
+ dev->name, readword(ioaddr & ~3, ADD_PORT));
retval = -ENODEV;
goto out2;
}
}
- printk(KERN_DEBUG "PP_addr at %x: 0x%x\n",
- ioaddr + ADD_PORT, inw(ioaddr + ADD_PORT));
+ printk(KERN_DEBUG "PP_addr at %x[%x]: 0x%x\n",
+ ioaddr, ADD_PORT, readword(ioaddr, ADD_PORT));
ioaddr &= ~3;
- outw(PP_ChipID, ioaddr + ADD_PORT);
+ writeword(ioaddr, ADD_PORT, PP_ChipID);
- tmp = inw(ioaddr + DATA_PORT);
+ tmp = readword(ioaddr, DATA_PORT);
if (tmp != CHIP_EISA_ID_SIG) {
- printk(KERN_DEBUG "%s: incorrect signature at %x: 0x%x!="
+ printk(KERN_DEBUG "%s: incorrect signature at %x[%x]: 0x%x!="
CHIP_EISA_ID_SIG_STR "\n",
- dev->name, ioaddr + DATA_PORT, tmp);
+ dev->name, ioaddr, DATA_PORT, tmp);
retval = -ENODEV;
goto out2;
}
@@ -724,7 +780,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
} else {
i = lp->isa_config & INT_NO_MASK;
if (lp->chip_type == CS8900) {
-#if defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX0105)
+#if defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX010X)
i = cs8900_irq_map[0];
#else
/* Translate the IRQ using the IRQ mapping table. */
@@ -790,7 +846,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
goto out3;
return 0;
out3:
- outw(PP_ChipID, dev->base_addr + ADD_PORT);
+ writeword(dev->base_addr, ADD_PORT, PP_ChipID);
out2:
release_region(ioaddr & ~3, NETCARD_IO_EXTENT);
out1:
@@ -970,11 +1026,11 @@ void __init reset_chip(struct net_device *dev)
#ifndef CONFIG_ARCH_IXDP2X01
if (lp->chip_type != CS8900) {
/* Hardware problem requires PNP registers to be reconfigured after a reset */
- outw(PP_CS8920_ISAINT, ioaddr + ADD_PORT);
+ writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT);
outb(dev->irq, ioaddr + DATA_PORT);
outb(0, ioaddr + DATA_PORT + 1);
- outw(PP_CS8920_ISAMemB, ioaddr + ADD_PORT);
+ writeword(ioaddr, ADD_PORT, PP_CS8920_ISAMemB);
outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1);
}
@@ -1104,8 +1160,8 @@ send_test_pkt(struct net_device *dev)
memcpy(test_packet, dev->dev_addr, ETH_ALEN);
memcpy(test_packet+ETH_ALEN, dev->dev_addr, ETH_ALEN);
- writeword(dev, TX_CMD_PORT, TX_AFTER_ALL);
- writeword(dev, TX_LEN_PORT, ETH_ZLEN);
+ writeword(dev->base_addr, TX_CMD_PORT, TX_AFTER_ALL);
+ writeword(dev->base_addr, TX_LEN_PORT, ETH_ZLEN);
/* Test to see if the chip has allocated memory for the packet */
while (jiffies - timenow < 5)
@@ -1115,7 +1171,7 @@ send_test_pkt(struct net_device *dev)
return 0; /* this shouldn't happen */
/* Write the contents of the packet */
- outsw(dev->base_addr + TX_FRAME_PORT,test_packet,(ETH_ZLEN+1) >>1);
+ writewords(dev->base_addr, TX_FRAME_PORT,test_packet,(ETH_ZLEN+1) >>1);
if (net_debug > 1) printk("Sending test packet ");
/* wait a couple of jiffies for packet to be received */
@@ -1200,7 +1256,7 @@ net_open(struct net_device *dev)
int i;
int ret;
-#if !defined(CONFIG_SH_HICOSH4) && !defined(CONFIG_ARCH_PNX0105) /* uses irq#1, so this won't work */
+#if !defined(CONFIG_SH_HICOSH4) && !defined(CONFIG_ARCH_PNX010X) /* uses irq#1, so this won't work */
if (dev->irq < 2) {
/* Allow interrupts to be generated by the chip */
/* Cirrus' release had this: */
@@ -1231,7 +1287,7 @@ net_open(struct net_device *dev)
else
#endif
{
-#if !defined(CONFIG_ARCH_IXDP2X01) && !defined(CONFIG_ARCH_PNX0105)
+#if !defined(CONFIG_ARCH_IXDP2X01) && !defined(CONFIG_ARCH_PNX010X)
if (((1 << dev->irq) & lp->irq_map) == 0) {
printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
dev->name, dev->irq, lp->irq_map);
@@ -1316,7 +1372,7 @@ net_open(struct net_device *dev)
case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break;
default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2);
}
-#ifdef CONFIG_ARCH_PNX0105
+#ifdef CONFIG_ARCH_PNX010X
result = A_CNF_10B_T;
#endif
if (!result) {
@@ -1457,8 +1513,8 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
/* initiate a transmit sequence */
- writeword(dev, TX_CMD_PORT, lp->send_cmd);
- writeword(dev, TX_LEN_PORT, skb->len);
+ writeword(dev->base_addr, TX_CMD_PORT, lp->send_cmd);
+ writeword(dev->base_addr, TX_LEN_PORT, skb->len);
/* Test to see if the chip has allocated memory for the packet */
if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
@@ -1472,7 +1528,7 @@ static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
return 1;
}
/* Write the contents of the packet */
- outsw(dev->base_addr + TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
+ writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
spin_unlock_irq(&lp->lock);
lp->stats.tx_bytes += skb->len;
dev->trans_start = jiffies;
@@ -1512,7 +1568,7 @@ static irqreturn_t net_interrupt(int irq, void *dev_id, struct pt_regs * regs)
course, if you're on a slow machine, and packets are arriving
faster than you can read them off, you're screwed. Hasta la
vista, baby! */
- while ((status = readword(dev, ISQ_PORT))) {
+ while ((status = readword(dev->base_addr, ISQ_PORT))) {
if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status);
handled = 1;
switch(status & ISQ_EVENT_MASK) {
@@ -1606,8 +1662,8 @@ net_rx(struct net_device *dev)
int status, length;
int ioaddr = dev->base_addr;
- status = inw(ioaddr + RX_FRAME_PORT);
- length = inw(ioaddr + RX_FRAME_PORT);
+ status = readword(ioaddr, RX_FRAME_PORT);
+ length = readword(ioaddr, RX_FRAME_PORT);
if ((status & RX_OK) == 0) {
count_rx_errors(status, lp);
@@ -1626,9 +1682,9 @@ net_rx(struct net_device *dev)
skb_reserve(skb, 2); /* longword align L3 header */
skb->dev = dev;
- insw(ioaddr + RX_FRAME_PORT, skb_put(skb, length), length >> 1);
+ readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
if (length & 1)
- skb->data[length-1] = inw(ioaddr + RX_FRAME_PORT);
+ skb->data[length-1] = readword(ioaddr, RX_FRAME_PORT);
if (net_debug > 3) {
printk( "%s: received %d byte packet of type %x\n",
@@ -1901,7 +1957,7 @@ void
cleanup_module(void)
{
unregister_netdev(dev_cs89x0);
- outw(PP_ChipID, dev_cs89x0->base_addr + ADD_PORT);
+ writeword(dev_cs89x0->base_addr, ADD_PORT, PP_ChipID);
release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
free_netdev(dev_cs89x0);
}
diff --git a/drivers/net/cs89x0.h b/drivers/net/cs89x0.h
index decea264f12..bd954aaa636 100644
--- a/drivers/net/cs89x0.h
+++ b/drivers/net/cs89x0.h
@@ -16,13 +16,6 @@
#include <linux/config.h>
-#if defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX0105)
-/* IXDP2401/IXDP2801 uses dword-aligned register addressing */
-#define CS89x0_PORT(reg) ((reg) * 2)
-#else
-#define CS89x0_PORT(reg) (reg)
-#endif
-
#define PP_ChipID 0x0000 /* offset 0h -> Corp -ID */
/* offset 2h -> Model/Product Number */
/* offset 3h -> Chip Revision Number */
@@ -332,16 +325,16 @@
#define RAM_SIZE 0x1000 /* The card has 4k bytes or RAM */
#define PKT_START PP_TxFrame /* Start of packet RAM */
-#define RX_FRAME_PORT CS89x0_PORT(0x0000)
+#define RX_FRAME_PORT 0x0000
#define TX_FRAME_PORT RX_FRAME_PORT
-#define TX_CMD_PORT CS89x0_PORT(0x0004)
+#define TX_CMD_PORT 0x0004
#define TX_NOW 0x0000 /* Tx packet after 5 bytes copied */
#define TX_AFTER_381 0x0040 /* Tx packet after 381 bytes copied */
#define TX_AFTER_ALL 0x00c0 /* Tx packet after all bytes copied */
-#define TX_LEN_PORT CS89x0_PORT(0x0006)
-#define ISQ_PORT CS89x0_PORT(0x0008)
-#define ADD_PORT CS89x0_PORT(0x000A)
-#define DATA_PORT CS89x0_PORT(0x000C)
+#define TX_LEN_PORT 0x0006
+#define ISQ_PORT 0x0008
+#define ADD_PORT 0x000A
+#define DATA_PORT 0x000C
#define EEPROM_WRITE_EN 0x00F0
#define EEPROM_WRITE_DIS 0x0000
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 22cd0455670..23de22631c6 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -132,6 +132,10 @@
* TODO:
* o several entry points race with dev->close
* o check for tx-no-resources/stop Q races with tx clean/wake Q
+ *
+ * FIXES:
+ * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
+ * - Stratus87247: protect MDI control register manipulations
*/
#include <linux/config.h>
@@ -578,6 +582,7 @@ struct nic {
u16 leds;
u16 eeprom_wc;
u16 eeprom[256];
+ spinlock_t mdio_lock;
};
static inline void e100_write_flush(struct nic *nic)
@@ -876,15 +881,35 @@ static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
{
u32 data_out = 0;
unsigned int i;
+ unsigned long flags;
+
+ /*
+ * Stratus87247: we shouldn't be writing the MDI control
+ * register until the Ready bit shows True. Also, since
+ * manipulation of the MDI control registers is a multi-step
+ * procedure it should be done under lock.
+ */
+ spin_lock_irqsave(&nic->mdio_lock, flags);
+ for (i = 100; i; --i) {
+ if (readl(&nic->csr->mdi_ctrl) & mdi_ready)
+ break;
+ udelay(20);
+ }
+ if (unlikely(!i)) {
+ printk("e100.mdio_ctrl(%s) won't go Ready\n",
+ nic->netdev->name );
+ spin_unlock_irqrestore(&nic->mdio_lock, flags);
+ return 0; /* No way to indicate timeout error */
+ }
writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
- for(i = 0; i < 100; i++) {
+ for (i = 0; i < 100; i++) {
udelay(20);
- if((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
+ if ((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
break;
}
-
+ spin_unlock_irqrestore(&nic->mdio_lock, flags);
DPRINTK(HW, DEBUG,
"%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
@@ -2562,6 +2587,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
/* locks must be initialized before calling hw_reset */
spin_lock_init(&nic->cb_lock);
spin_lock_init(&nic->cmd_lock);
+ spin_lock_init(&nic->mdio_lock);
/* Reset the device before pci_set_master() in case device is in some
* funky state and has an interrupt pending - hint: we don't have the
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 38695d5b463..ccbbe5ad8e0 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -545,7 +545,7 @@ e1000_check_fiber_options(struct e1000_adapter *adapter)
static void __devinit
e1000_check_copper_options(struct e1000_adapter *adapter)
{
- int speed, dplx;
+ int speed, dplx, an;
int bd = adapter->bd_number;
{ /* Speed */
@@ -641,8 +641,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
.p = an_list }}
};
- int an = AutoNeg[bd];
- e1000_validate_option(&an, &opt, adapter);
+ if (num_AutoNeg > bd) {
+ an = AutoNeg[bd];
+ e1000_validate_option(&an, &opt, adapter);
+ } else {
+ an = opt.def;
+ }
adapter->hw.autoneg_advertised = an;
}
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index f5a4dd7d856..e5c5cd2a271 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -140,13 +140,6 @@ static int __init do_e2100_probe(struct net_device *dev)
return -ENODEV;
}
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: e21_close() handles free_irq */
- iounmap(ei_status.mem);
- release_region(dev->base_addr, E21_IO_EXTENT);
-}
-
#ifndef MODULE
struct net_device * __init e2100_probe(int unit)
{
@@ -463,6 +456,13 @@ init_module(void)
return -ENXIO;
}
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: e21_close() handles free_irq */
+ iounmap(ei_status.mem);
+ release_region(dev->base_addr, E21_IO_EXTENT);
+}
+
void
cleanup_module(void)
{
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 50f8e23bb9e..6b0ab1eac3f 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -155,13 +155,6 @@ static int __init do_es_probe(struct net_device *dev)
return -ENODEV;
}
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, ES_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
#ifndef MODULE
struct net_device * __init es_probe(int unit)
{
@@ -456,6 +449,13 @@ init_module(void)
return -ENXIO;
}
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ES_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
void
cleanup_module(void)
{
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index c39344adecc..3682ec61e8a 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -101,6 +101,7 @@
* 0.46: 20 Oct 2005: Add irq optimization modes.
* 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
* 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
+ * 0.49: 10 Dec 2005: Fix tso for large buffers.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -112,7 +113,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.48"
+#define FORCEDETH_VERSION "0.49"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -349,6 +350,8 @@ typedef union _ring_type {
#define NV_TX2_VALID (1<<31)
#define NV_TX2_TSO (1<<28)
#define NV_TX2_TSO_SHIFT 14
+#define NV_TX2_TSO_MAX_SHIFT 14
+#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
#define NV_TX2_CHECKSUM_L3 (1<<27)
#define NV_TX2_CHECKSUM_L4 (1<<26)
@@ -408,15 +411,15 @@ typedef union _ring_type {
#define NV_WATCHDOG_TIMEO (5*HZ)
#define RX_RING 128
-#define TX_RING 64
+#define TX_RING 256
/*
* If your nic mysteriously hangs then try to reduce the limits
* to 1/0: It might be required to set NV_TX_LASTPACKET in the
* last valid ring entry. But this would be impossible to
* implement - probably a disassembly error.
*/
-#define TX_LIMIT_STOP 63
-#define TX_LIMIT_START 62
+#define TX_LIMIT_STOP 255
+#define TX_LIMIT_START 254
/* rx/tx mac addr + type + vlan + align + slack*/
#define NV_RX_HEADERS (64)
@@ -535,6 +538,7 @@ struct fe_priv {
unsigned int next_tx, nic_tx;
struct sk_buff *tx_skbuff[TX_RING];
dma_addr_t tx_dma[TX_RING];
+ unsigned int tx_dma_len[TX_RING];
u32 tx_flags;
};
@@ -935,6 +939,7 @@ static void nv_init_tx(struct net_device *dev)
else
np->tx_ring.ex[i].FlagLen = 0;
np->tx_skbuff[i] = NULL;
+ np->tx_dma[i] = 0;
}
}
@@ -945,30 +950,27 @@ static int nv_init_ring(struct net_device *dev)
return nv_alloc_rx(dev);
}
-static void nv_release_txskb(struct net_device *dev, unsigned int skbnr)
+static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
{
struct fe_priv *np = netdev_priv(dev);
- struct sk_buff *skb = np->tx_skbuff[skbnr];
- unsigned int j, entry, fragments;
-
- dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d, skb %p\n",
- dev->name, skbnr, np->tx_skbuff[skbnr]);
-
- entry = skbnr;
- if ((fragments = skb_shinfo(skb)->nr_frags) != 0) {
- for (j = fragments; j >= 1; j--) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[j-1];
- pci_unmap_page(np->pci_dev, np->tx_dma[entry],
- frag->size,
- PCI_DMA_TODEVICE);
- entry = (entry - 1) % TX_RING;
- }
+
+ dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
+ dev->name, skbnr);
+
+ if (np->tx_dma[skbnr]) {
+ pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
+ np->tx_dma_len[skbnr],
+ PCI_DMA_TODEVICE);
+ np->tx_dma[skbnr] = 0;
+ }
+
+ if (np->tx_skbuff[skbnr]) {
+ dev_kfree_skb_irq(np->tx_skbuff[skbnr]);
+ np->tx_skbuff[skbnr] = NULL;
+ return 1;
+ } else {
+ return 0;
}
- pci_unmap_single(np->pci_dev, np->tx_dma[entry],
- skb->len - skb->data_len,
- PCI_DMA_TODEVICE);
- dev_kfree_skb_irq(skb);
- np->tx_skbuff[skbnr] = NULL;
}
static void nv_drain_tx(struct net_device *dev)
@@ -981,10 +983,8 @@ static void nv_drain_tx(struct net_device *dev)
np->tx_ring.orig[i].FlagLen = 0;
else
np->tx_ring.ex[i].FlagLen = 0;
- if (np->tx_skbuff[i]) {
- nv_release_txskb(dev, i);
+ if (nv_release_txskb(dev, i))
np->stats.tx_dropped++;
- }
}
}
@@ -1021,68 +1021,105 @@ static void drain_ring(struct net_device *dev)
static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
+ u32 tx_flags = 0;
u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
unsigned int fragments = skb_shinfo(skb)->nr_frags;
- unsigned int nr = (np->next_tx + fragments) % TX_RING;
+ unsigned int nr = (np->next_tx - 1) % TX_RING;
+ unsigned int start_nr = np->next_tx % TX_RING;
unsigned int i;
+ u32 offset = 0;
+ u32 bcnt;
+ u32 size = skb->len-skb->data_len;
+ u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+
+ /* add fragments to entries count */
+ for (i = 0; i < fragments; i++) {
+ entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
+ ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+ }
spin_lock_irq(&np->lock);
- if ((np->next_tx - np->nic_tx + fragments) > TX_LIMIT_STOP) {
+ if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) {
spin_unlock_irq(&np->lock);
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
}
- np->tx_skbuff[nr] = skb;
-
- if (fragments) {
- dprintk(KERN_DEBUG "%s: nv_start_xmit: buffer contains %d fragments\n", dev->name, fragments);
- /* setup descriptors in reverse order */
- for (i = fragments; i >= 1; i--) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
- np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset, frag->size,
- PCI_DMA_TODEVICE);
+ /* setup the header buffer */
+ do {
+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+ nr = (nr + 1) % TX_RING;
+
+ np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
+ PCI_DMA_TODEVICE);
+ np->tx_dma_len[nr] = bcnt;
+
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
+ np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+ } else {
+ np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
+ np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
+ np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
+ }
+ tx_flags = np->tx_flags;
+ offset += bcnt;
+ size -= bcnt;
+ } while(size);
+
+ /* setup the fragments */
+ for (i = 0; i < fragments; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 size = frag->size;
+ offset = 0;
+
+ do {
+ bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+ nr = (nr + 1) % TX_RING;
+
+ np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
+ PCI_DMA_TODEVICE);
+ np->tx_dma_len[nr] = bcnt;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
- np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra);
+ np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
} else {
np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
- np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra);
+ np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
}
-
- nr = (nr - 1) % TX_RING;
+ offset += bcnt;
+ size -= bcnt;
+ } while (size);
+ }
- if (np->desc_ver == DESC_VER_1)
- tx_flags_extra &= ~NV_TX_LASTPACKET;
- else
- tx_flags_extra &= ~NV_TX2_LASTPACKET;
- }
+ /* set last fragment flag */
+ if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+ np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
+ } else {
+ np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
}
+ np->tx_skbuff[nr] = skb;
+
#ifdef NETIF_F_TSO
if (skb_shinfo(skb)->tso_size)
- tx_flags_extra |= NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
+ tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
else
#endif
- tx_flags_extra |= (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
+ tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
- np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len-skb->data_len,
- PCI_DMA_TODEVICE);
-
+ /* set tx flags */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
- np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
- np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
+ np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
} else {
- np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
- np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
- np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
+ np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
}
- dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission. tx_flags_extra: %x\n",
- dev->name, np->next_tx, tx_flags_extra);
+ dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
+ dev->name, np->next_tx, entries, tx_flags_extra);
{
int j;
for (j=0; j<64; j++) {
@@ -1093,7 +1130,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
dprintk("\n");
}
- np->next_tx += 1 + fragments;
+ np->next_tx += entries;
dev->trans_start = jiffies;
spin_unlock_irq(&np->lock);
@@ -1140,7 +1177,6 @@ static void nv_tx_done(struct net_device *dev)
np->stats.tx_packets++;
np->stats.tx_bytes += skb->len;
}
- nv_release_txskb(dev, i);
}
} else {
if (Flags & NV_TX2_LASTPACKET) {
@@ -1156,9 +1192,9 @@ static void nv_tx_done(struct net_device *dev)
np->stats.tx_packets++;
np->stats.tx_bytes += skb->len;
}
- nv_release_txskb(dev, i);
}
}
+ nv_release_txskb(dev, i);
np->nic_tx++;
}
if (np->next_tx - np->nic_tx < TX_LIMIT_START)
@@ -2456,7 +2492,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
#ifdef NETIF_F_TSO
- /* disabled dev->features |= NETIF_F_TSO; */
+ dev->features |= NETIF_F_TSO;
#endif
}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 146f9513aea..0c18dbd67d3 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -84,6 +84,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
+#include <linux/in.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -398,12 +399,15 @@ static int init_phy(struct net_device *dev)
priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
SUPPORTED_1000baseT_Full : 0;
struct phy_device *phydev;
+ char phy_id[BUS_ID_SIZE];
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
- phydev = phy_connect(dev, priv->einfo->bus_id, &adjust_link, 0);
+ snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
+
+ phydev = phy_connect(dev, phy_id, &adjust_link, 0);
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 94a91da84fb..cb9d66ac3ab 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -718,14 +718,14 @@ struct gfar_private {
uint32_t msg_enable;
};
-extern inline u32 gfar_read(volatile unsigned *addr)
+static inline u32 gfar_read(volatile unsigned *addr)
{
u32 val;
val = in_be32(addr);
return val;
}
-extern inline void gfar_write(volatile unsigned *addr, u32 val)
+static inline void gfar_write(volatile unsigned *addr, u32 val)
{
out_be32(addr, val);
}
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 04a462c2a5b..74e52fcbf80 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -128,6 +128,7 @@ int gfar_mdio_probe(struct device *dev)
struct gianfar_mdio_data *pdata;
struct gfar_mii *regs;
struct mii_bus *new_bus;
+ struct resource *r;
int err = 0;
if (NULL == dev)
@@ -151,8 +152,10 @@ int gfar_mdio_probe(struct device *dev)
return -ENODEV;
}
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
/* Set the PHY base address */
- regs = (struct gfar_mii *) ioremap(pdata->paddr,
+ regs = (struct gfar_mii *) ioremap(r->start,
sizeof (struct gfar_mii));
if (NULL == regs) {
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index 10d34cb1919..51ef181b136 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -7,7 +7,7 @@
* Based on 8260_io/fcc_enet.c
*
* Author: Andy Fleming
- * Maintainer: Kumar Gala (kumar.gala@freescale.com)
+ * Maintainer: Kumar Gala (galak@kernel.crashing.org)
*
* Copyright (c) 2002-2005 Freescale Semiconductor, Inc.
*
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 90999867a32..102c1f0b90d 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -456,11 +456,6 @@ out:
/* ----------------------------------------------------------------------- */
-static int sixpack_receive_room(struct tty_struct *tty)
-{
- return 65536; /* We can handle an infinite amount of data. :-) */
-}
-
/*
* Handle the 'receiver data ready' interrupt.
* This function is called by the 'tty_io' module in the kernel when
@@ -671,6 +666,7 @@ static int sixpack_open(struct tty_struct *tty)
/* Done. We have linked the TTY line to a channel. */
tty->disc_data = sp;
+ tty->receive_room = 65536;
/* Now we're ready to register. */
if (register_netdev(dev))
@@ -802,7 +798,6 @@ static struct tty_ldisc sp_ldisc = {
.close = sixpack_close,
.ioctl = sixpack_ioctl,
.receive_buf = sixpack_receive_buf,
- .receive_room = sixpack_receive_room,
.write_wakeup = sixpack_write_wakeup,
};
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 3e9accf137e..dc5e9d59dee 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -515,6 +515,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
}
}
+ spin_unlock_bh(&ax->buflock);
set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
@@ -752,6 +753,7 @@ static int mkiss_open(struct tty_struct *tty)
ax->tty = tty;
tty->disc_data = ax;
+ tty->receive_room = 65535;
if (tty->driver->flush_buffer)
tty->driver->flush_buffer(tty);
@@ -939,11 +941,6 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
tty->driver->unthrottle(tty);
}
-static int mkiss_receive_room(struct tty_struct *tty)
-{
- return 65536; /* We can handle an infinite amount of data. :-) */
-}
-
/*
* Called by the driver when there's room for more data. If we have
* more packets to send, we send them here.
@@ -982,7 +979,6 @@ static struct tty_ldisc ax_ldisc = {
.close = mkiss_close,
.ioctl = mkiss_ioctl,
.receive_buf = mkiss_receive_buf,
- .receive_room = mkiss_receive_room,
.write_wakeup = mkiss_write_wakeup
};
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index 0abf5dd08b4..74e167e7dea 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -138,12 +138,6 @@ static int __init do_hpp_probe(struct net_device *dev)
return -ENODEV;
}
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: hpp_close() handles free_irq */
- release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
-}
-
#ifndef MODULE
struct net_device * __init hp_plus_probe(int unit)
{
@@ -473,6 +467,12 @@ init_module(void)
return -ENXIO;
}
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: hpp_close() handles free_irq */
+ release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
+}
+
void
cleanup_module(void)
{
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 59cf841b14a..cf9fb3698a6 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -102,12 +102,6 @@ static int __init do_hp_probe(struct net_device *dev)
return -ENODEV;
}
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
-}
-
#ifndef MODULE
struct net_device * __init hp_probe(int unit)
{
@@ -444,6 +438,12 @@ init_module(void)
return -ENXIO;
}
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
+}
+
void
cleanup_module(void)
{
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index e92c17f6931..55c7ed60839 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -276,7 +276,7 @@ static void hp100_RegisterDump(struct net_device *dev);
* Convert an address in a kernel buffer to a bus/phys/dma address.
* This work *only* for memory fragments part of lp->page_vaddr,
* because it was properly DMA allocated via pci_alloc_consistent(),
- * so we just need to "retreive" the original mapping to bus/phys/dma
+ * so we just need to "retrieve" the original mapping to bus/phys/dma
* address - Jean II */
static inline dma_addr_t virt_to_whatever(struct net_device *dev, u32 * ptr)
{
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index 08703d6f934..d8410634bca 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -150,7 +150,7 @@ static void __init hplance_init(struct net_device *dev, struct dio_dev *d)
lp->lance.name = (char*)d->name; /* discards const, shut up gcc */
lp->lance.base = va;
lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
- lp->lance.lance_init_block = 0; /* LANCE addr of same RAM */
+ lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
lp->lance.irq = d->ipl;
lp->lance.writerap = hplance_writerap;
diff --git a/drivers/net/ibm_emac/ibm_emac.h b/drivers/net/ibm_emac/ibm_emac.h
index 644edbff4f9..c2dae6092c4 100644
--- a/drivers/net/ibm_emac/ibm_emac.h
+++ b/drivers/net/ibm_emac/ibm_emac.h
@@ -110,6 +110,7 @@ struct emac_regs {
#define EMAC_MR1_TFS_2K 0x00080000
#define EMAC_MR1_TR0_MULT 0x00008000
#define EMAC_MR1_JPSM 0x00000000
+#define EMAC_MR1_MWSW_001 0x00000000
#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT)
#else
#define EMAC_MR1_RFS_4K 0x00180000
@@ -130,7 +131,7 @@ struct emac_regs {
(freq) <= 83 ? EMAC_MR1_OBCI_83 : \
(freq) <= 100 ? EMAC_MR1_OBCI_100 : EMAC_MR1_OBCI_100P)
#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR | \
- EMAC_MR1_MWSW_001 | EMAC_MR1_OBCI(opb))
+ EMAC_MR1_OBCI(opb))
#endif
/* EMACx_TMR0 */
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 1da8a66f91e..591c5864ffb 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -408,7 +408,7 @@ static int emac_configure(struct ocp_enet_private *dev)
/* Mode register */
r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
if (dev->phy.duplex == DUPLEX_FULL)
- r |= EMAC_MR1_FDE;
+ r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
dev->stop_timeout = STOP_TIMEOUT_10;
switch (dev->phy.speed) {
case SPEED_1000:
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
new file mode 100644
index 00000000000..1b699259b4e
--- /dev/null
+++ b/drivers/net/ifb.c
@@ -0,0 +1,294 @@
+/* drivers/net/ifb.c:
+
+ The purpose of this driver is to provide a device that allows
+ for sharing of resources:
+
+ 1) qdiscs/policies that are per device as opposed to system wide.
+ ifb allows for a device which can be redirected to thus providing
+ an impression of sharing.
+
+ 2) Allows for queueing incoming traffic for shaping instead of
+ dropping.
+
+ The original concept is based on what is known as the IMQ
+ driver initially written by Martin Devera, later rewritten
+ by Patrick McHardy and then maintained by Andre Correa.
+
+ You need the tc action mirror or redirect to feed this device
+ packets.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version
+ 2 of the License, or (at your option) any later version.
+
+ Authors: Jamal Hadi Salim (2005)
+
+*/
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <net/pkt_sched.h>
+
+#define TX_TIMEOUT (2*HZ)
+
+#define TX_Q_LIMIT 32
+struct ifb_private {
+ struct net_device_stats stats;
+ struct tasklet_struct ifb_tasklet;
+ int tasklet_pending;
+ /* mostly debug stats leave in for now */
+ unsigned long st_task_enter; /* tasklet entered */
+ unsigned long st_txq_refl_try; /* transmit queue refill attempt */
+ unsigned long st_rxq_enter; /* receive queue entered */
+ unsigned long st_rx2tx_tran; /* receive to trasmit transfers */
+ unsigned long st_rxq_notenter; /*receiveQ not entered, resched */
+ unsigned long st_rx_frm_egr; /* received from egress path */
+ unsigned long st_rx_frm_ing; /* received from ingress path */
+ unsigned long st_rxq_check;
+ unsigned long st_rxq_rsch;
+ struct sk_buff_head rq;
+ struct sk_buff_head tq;
+};
+
+static int numifbs = 1;
+
+static void ri_tasklet(unsigned long dev);
+static int ifb_xmit(struct sk_buff *skb, struct net_device *dev);
+static struct net_device_stats *ifb_get_stats(struct net_device *dev);
+static int ifb_open(struct net_device *dev);
+static int ifb_close(struct net_device *dev);
+
+static void ri_tasklet(unsigned long dev)
+{
+
+ struct net_device *_dev = (struct net_device *)dev;
+ struct ifb_private *dp = netdev_priv(_dev);
+ struct net_device_stats *stats = &dp->stats;
+ struct sk_buff *skb;
+
+ dp->st_task_enter++;
+ if ((skb = skb_peek(&dp->tq)) == NULL) {
+ dp->st_txq_refl_try++;
+ if (spin_trylock(&_dev->xmit_lock)) {
+ dp->st_rxq_enter++;
+ while ((skb = skb_dequeue(&dp->rq)) != NULL) {
+ skb_queue_tail(&dp->tq, skb);
+ dp->st_rx2tx_tran++;
+ }
+ spin_unlock(&_dev->xmit_lock);
+ } else {
+ /* reschedule */
+ dp->st_rxq_notenter++;
+ goto resched;
+ }
+ }
+
+ while ((skb = skb_dequeue(&dp->tq)) != NULL) {
+ u32 from = G_TC_FROM(skb->tc_verd);
+
+ skb->tc_verd = 0;
+ skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
+ stats->tx_packets++;
+ stats->tx_bytes +=skb->len;
+ if (from & AT_EGRESS) {
+ dp->st_rx_frm_egr++;
+ dev_queue_xmit(skb);
+ } else if (from & AT_INGRESS) {
+
+ dp->st_rx_frm_ing++;
+ netif_rx(skb);
+ } else {
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ }
+ }
+
+ if (spin_trylock(&_dev->xmit_lock)) {
+ dp->st_rxq_check++;
+ if ((skb = skb_peek(&dp->rq)) == NULL) {
+ dp->tasklet_pending = 0;
+ if (netif_queue_stopped(_dev))
+ netif_wake_queue(_dev);
+ } else {
+ dp->st_rxq_rsch++;
+ spin_unlock(&_dev->xmit_lock);
+ goto resched;
+ }
+ spin_unlock(&_dev->xmit_lock);
+ } else {
+resched:
+ dp->tasklet_pending = 1;
+ tasklet_schedule(&dp->ifb_tasklet);
+ }
+
+}
+
+static void __init ifb_setup(struct net_device *dev)
+{
+ /* Initialize the device structure. */
+ dev->get_stats = ifb_get_stats;
+ dev->hard_start_xmit = ifb_xmit;
+ dev->open = &ifb_open;
+ dev->stop = &ifb_close;
+
+ /* Fill in device structure with ethernet-generic values. */
+ ether_setup(dev);
+ dev->tx_queue_len = TX_Q_LIMIT;
+ dev->change_mtu = NULL;
+ dev->flags |= IFF_NOARP;
+ dev->flags &= ~IFF_MULTICAST;
+ SET_MODULE_OWNER(dev);
+ random_ether_addr(dev->dev_addr);
+}
+
+static int ifb_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ifb_private *dp = netdev_priv(dev);
+ struct net_device_stats *stats = &dp->stats;
+ int ret = 0;
+ u32 from = G_TC_FROM(skb->tc_verd);
+
+ stats->tx_packets++;
+ stats->tx_bytes+=skb->len;
+
+ if (!from || !skb->input_dev) {
+dropped:
+ dev_kfree_skb(skb);
+ stats->rx_dropped++;
+ return ret;
+ } else {
+ /*
+ * note we could be going
+ * ingress -> egress or
+ * egress -> ingress
+ */
+ skb->dev = skb->input_dev;
+ skb->input_dev = dev;
+ if (from & AT_INGRESS) {
+ skb_pull(skb, skb->dev->hard_header_len);
+ } else {
+ if (!(from & AT_EGRESS)) {
+ goto dropped;
+ }
+ }
+ }
+
+ if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
+ netif_stop_queue(dev);
+ }
+
+ dev->trans_start = jiffies;
+ skb_queue_tail(&dp->rq, skb);
+ if (!dp->tasklet_pending) {
+ dp->tasklet_pending = 1;
+ tasklet_schedule(&dp->ifb_tasklet);
+ }
+
+ return ret;
+}
+
+static struct net_device_stats *ifb_get_stats(struct net_device *dev)
+{
+ struct ifb_private *dp = netdev_priv(dev);
+ struct net_device_stats *stats = &dp->stats;
+
+ pr_debug("tasklets stats %ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld:%ld \n",
+ dp->st_task_enter, dp->st_txq_refl_try, dp->st_rxq_enter,
+ dp->st_rx2tx_tran dp->st_rxq_notenter, dp->st_rx_frm_egr,
+ dp->st_rx_frm_ing, dp->st_rxq_check, dp->st_rxq_rsch );
+
+ return stats;
+}
+
+static struct net_device **ifbs;
+
+/* Number of ifb devices to be set up by this module. */
+module_param(numifbs, int, 0);
+MODULE_PARM_DESC(numifbs, "Number of ifb devices");
+
+static int ifb_close(struct net_device *dev)
+{
+ struct ifb_private *dp = netdev_priv(dev);
+
+ tasklet_kill(&dp->ifb_tasklet);
+ netif_stop_queue(dev);
+ skb_queue_purge(&dp->rq);
+ skb_queue_purge(&dp->tq);
+ return 0;
+}
+
+static int ifb_open(struct net_device *dev)
+{
+ struct ifb_private *dp = netdev_priv(dev);
+
+ tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
+ skb_queue_head_init(&dp->rq);
+ skb_queue_head_init(&dp->tq);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int __init ifb_init_one(int index)
+{
+ struct net_device *dev_ifb;
+ int err;
+
+ dev_ifb = alloc_netdev(sizeof(struct ifb_private),
+ "ifb%d", ifb_setup);
+
+ if (!dev_ifb)
+ return -ENOMEM;
+
+ if ((err = register_netdev(dev_ifb))) {
+ free_netdev(dev_ifb);
+ dev_ifb = NULL;
+ } else {
+ ifbs[index] = dev_ifb;
+ }
+
+ return err;
+}
+
+static void ifb_free_one(int index)
+{
+ unregister_netdev(ifbs[index]);
+ free_netdev(ifbs[index]);
+}
+
+static int __init ifb_init_module(void)
+{
+ int i, err = 0;
+ ifbs = kmalloc(numifbs * sizeof(void *), GFP_KERNEL);
+ if (!ifbs)
+ return -ENOMEM;
+ for (i = 0; i < numifbs && !err; i++)
+ err = ifb_init_one(i);
+ if (err) {
+ while (--i >= 0)
+ ifb_free_one(i);
+ }
+
+ return err;
+}
+
+static void __exit ifb_cleanup_module(void)
+{
+ int i;
+
+ for (i = 0; i < numifbs; i++)
+ ifb_free_one(i);
+ kfree(ifbs);
+}
+
+module_init(ifb_init_module);
+module_exit(ifb_cleanup_module);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamal Hadi Salim");
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index d54156f11e6..7a081346f07 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -1,4 +1,3 @@
-
menu "Infrared-port device drivers"
depends on IRDA!=n
@@ -156,7 +155,7 @@ comment "Old Serial dongle support"
config DONGLE_OLD
bool "Old Serial dongle support"
- depends on (IRTTY_OLD || IRPORT_SIR) && BROKEN_ON_SMP
+ depends on IRPORT_SIR && BROKEN_ON_SMP
help
Say Y here if you have an infrared device that connects to your
computer's serial port. These devices are called dongles. Then say Y
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index e7a8b7f7f5d..72cbfdc9cfc 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -45,4 +45,4 @@ obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
# The SIR helper module
-sir-dev-objs := sir_core.o sir_dev.o sir_dongle.o sir_kthread.o
+sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 3d016a498e1..6070195b87b 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -285,19 +285,6 @@ static void irport_start(struct irport_cb *self)
}
/*
- * Function irport_probe (void)
- *
- * Start IO port
- *
- */
-int irport_probe(int iobase)
-{
- IRDA_DEBUG(4, "%s(), iobase=%#x\n", __FUNCTION__, iobase);
-
- return 0;
-}
-
-/*
* Function irport_get_fcr (speed)
*
* Compute value of fcr
@@ -382,7 +369,7 @@ static void irport_change_speed(void *priv, __u32 speed)
* we cannot use schedule_timeout() when we are in interrupt context
*
*/
-int __irport_change_speed(struct irda_task *task)
+static int __irport_change_speed(struct irda_task *task)
{
struct irport_cb *self;
__u32 speed = (__u32) task->param;
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index b8d112348ba..101750bf210 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -289,22 +289,6 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
}
/*
- * Function irtty_receive_room (tty)
- *
- * Used by the TTY to find out how much data we can receive at a time
- *
-*/
-static int irtty_receive_room(struct tty_struct *tty)
-{
- struct sirtty_cb *priv = tty->disc_data;
-
- IRDA_ASSERT(priv != NULL, return 0;);
- IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return 0;);
-
- return 65536; /* We can handle an infinite amount of data. :-) */
-}
-
-/*
* Function irtty_write_wakeup (tty)
*
* Called by the driver when there's room for more data. If we have
@@ -534,6 +518,7 @@ static int irtty_open(struct tty_struct *tty)
dev->priv = priv;
tty->disc_data = priv;
+ tty->receive_room = 65536;
up(&irtty_sem);
@@ -605,7 +590,6 @@ static struct tty_ldisc irda_ldisc = {
.ioctl = irtty_ioctl,
.poll = NULL,
.receive_buf = irtty_receive_buf,
- .receive_room = irtty_receive_room,
.write_wakeup = irtty_write_wakeup,
.owner = THIS_MODULE,
};
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index f0b8bc3637e..f69fb4cec76 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -133,8 +133,6 @@ extern int sirdev_put_dongle(struct sir_dev *self);
extern void sirdev_enable_rx(struct sir_dev *dev);
extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
-extern int __init irda_thread_create(void);
-extern void __exit irda_thread_join(void);
/* inline helpers */
diff --git a/drivers/net/irda/sir_core.c b/drivers/net/irda/sir_core.c
deleted file mode 100644
index a49f910c835..00000000000
--- a/drivers/net/irda/sir_core.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*********************************************************************
- *
- * sir_core.c: module core for irda-sir abstraction layer
- *
- * Copyright (c) 2002 Martin Diehl
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- ********************************************************************/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <net/irda/irda.h>
-
-#include "sir-dev.h"
-
-/***************************************************************************/
-
-MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
-MODULE_DESCRIPTION("IrDA SIR core");
-MODULE_LICENSE("GPL");
-
-/***************************************************************************/
-
-EXPORT_SYMBOL(irda_register_dongle);
-EXPORT_SYMBOL(irda_unregister_dongle);
-
-EXPORT_SYMBOL(sirdev_get_instance);
-EXPORT_SYMBOL(sirdev_put_instance);
-
-EXPORT_SYMBOL(sirdev_set_dongle);
-EXPORT_SYMBOL(sirdev_write_complete);
-EXPORT_SYMBOL(sirdev_receive);
-
-EXPORT_SYMBOL(sirdev_raw_write);
-EXPORT_SYMBOL(sirdev_raw_read);
-EXPORT_SYMBOL(sirdev_set_dtr_rts);
-
-static int __init sir_core_init(void)
-{
- return irda_thread_create();
-}
-
-static void __exit sir_core_exit(void)
-{
- irda_thread_join();
-}
-
-module_init(sir_core_init);
-module_exit(sir_core_exit);
-
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index df22b8b532e..ea7c9464d46 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -60,6 +60,7 @@ int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
up(&dev->fsm.sem);
return err;
}
+EXPORT_SYMBOL(sirdev_set_dongle);
/* used by dongle drivers for dongle programming */
@@ -94,6 +95,7 @@ int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
spin_unlock_irqrestore(&dev->tx_lock, flags);
return ret;
}
+EXPORT_SYMBOL(sirdev_raw_write);
/* seems some dongle drivers may need this */
@@ -116,6 +118,7 @@ int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
return count;
}
+EXPORT_SYMBOL(sirdev_raw_read);
int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
{
@@ -124,7 +127,8 @@ int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
ret = dev->drv->set_dtr_rts(dev, dtr, rts);
return ret;
}
-
+EXPORT_SYMBOL(sirdev_set_dtr_rts);
+
/**********************************************************************/
/* called from client driver - likely with bh-context - to indicate
@@ -227,6 +231,7 @@ void sirdev_write_complete(struct sir_dev *dev)
done:
spin_unlock_irqrestore(&dev->tx_lock, flags);
}
+EXPORT_SYMBOL(sirdev_write_complete);
/* called from client driver - likely with bh-context - to give us
* some more received bytes. We put them into the rx-buffer,
@@ -279,6 +284,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
return 0;
}
+EXPORT_SYMBOL(sirdev_receive);
/**********************************************************************/
@@ -641,6 +647,7 @@ out_freenetdev:
out:
return NULL;
}
+EXPORT_SYMBOL(sirdev_get_instance);
int sirdev_put_instance(struct sir_dev *dev)
{
@@ -673,4 +680,5 @@ int sirdev_put_instance(struct sir_dev *dev)
return 0;
}
+EXPORT_SYMBOL(sirdev_put_instance);
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
index c5b76746e72..8d225921ae7 100644
--- a/drivers/net/irda/sir_dongle.c
+++ b/drivers/net/irda/sir_dongle.c
@@ -50,6 +50,7 @@ int irda_register_dongle(struct dongle_driver *new)
up(&dongle_list_lock);
return 0;
}
+EXPORT_SYMBOL(irda_register_dongle);
int irda_unregister_dongle(struct dongle_driver *drv)
{
@@ -58,6 +59,7 @@ int irda_unregister_dongle(struct dongle_driver *drv)
up(&dongle_list_lock);
return 0;
}
+EXPORT_SYMBOL(irda_unregister_dongle);
int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
{
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c
index c65054364bc..e3904d6bfec 100644
--- a/drivers/net/irda/sir_kthread.c
+++ b/drivers/net/irda/sir_kthread.c
@@ -466,7 +466,7 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par
return 0;
}
-int __init irda_thread_create(void)
+static int __init irda_thread_create(void)
{
struct completion startup;
int pid;
@@ -488,7 +488,7 @@ int __init irda_thread_create(void)
return 0;
}
-void __exit irda_thread_join(void)
+static void __exit irda_thread_join(void)
{
if (irda_rq_queue.thread) {
flush_irda_queue();
@@ -499,3 +499,10 @@ void __exit irda_thread_join(void)
}
}
+module_init(irda_thread_create);
+module_exit(irda_thread_join);
+
+MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
+MODULE_DESCRIPTION("IrDA SIR core");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 741aecc655d..a82a4ba8de4 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -577,8 +577,8 @@ struct ring_descr_hw {
struct {
u8 addr_res[3];
volatile u8 status; /* descriptor status */
- } rd_s __attribute__((packed));
- } rd_u __attribute((packed));
+ } __attribute__((packed)) rd_s;
+ } __attribute((packed)) rd_u;
} __attribute__ ((packed));
#define rd_addr rd_u.addr
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 77eadf84cb2..f0f04be989d 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -590,9 +590,9 @@ static void veth_handle_event(struct HvLpEvent *event, struct pt_regs *regs)
{
struct veth_lpevent *veth_event = (struct veth_lpevent *)event;
- if (event->xFlags.xFunction == HvLpEvent_Function_Ack)
+ if (hvlpevent_is_ack(event))
veth_handle_ack(veth_event);
- else if (event->xFlags.xFunction == HvLpEvent_Function_Int)
+ else
veth_handle_int(veth_event);
}
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 1d75ca0bb93..d1d714faa6c 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -309,17 +309,6 @@ static void lance_tx_timeout (struct net_device *dev);
-static void cleanup_card(struct net_device *dev)
-{
- struct lance_private *lp = dev->priv;
- if (dev->dma != 4)
- free_dma(dev->dma);
- release_region(dev->base_addr, LANCE_TOTAL_SIZE);
- kfree(lp->tx_bounce_buffs);
- kfree((void*)lp->rx_buffs);
- kfree(lp);
-}
-
#ifdef MODULE
#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
@@ -367,6 +356,17 @@ int init_module(void)
return -ENXIO;
}
+static void cleanup_card(struct net_device *dev)
+{
+ struct lance_private *lp = dev->priv;
+ if (dev->dma != 4)
+ free_dma(dev->dma);
+ release_region(dev->base_addr, LANCE_TOTAL_SIZE);
+ kfree(lp->tx_bounce_buffs);
+ kfree((void*)lp->rx_buffs);
+ kfree(lp);
+}
+
void cleanup_module(void)
{
int this_dev;
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 309d254842c..646e89fc356 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -145,13 +145,6 @@ static int __init do_lne390_probe(struct net_device *dev)
return -ENODEV;
}
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, LNE390_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
#ifndef MODULE
struct net_device * __init lne390_probe(int unit)
{
@@ -440,6 +433,13 @@ int init_module(void)
return -ENXIO;
}
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, LNE390_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
void cleanup_module(void)
{
int this_dev;
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index d8c99f038fa..06cb460361a 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -559,55 +559,52 @@ static void mac8390_no_reset(struct net_device *dev)
/* directly from daynaport.c by Alan Cox */
static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count)
{
- volatile unsigned short *ptr;
- unsigned short *target=to;
+ volatile unsigned char *ptr;
+ unsigned char *target=to;
from<<=1; /* word, skip overhead */
- ptr=(unsigned short *)(dev->mem_start+from);
+ ptr=(unsigned char *)(dev->mem_start+from);
/* Leading byte? */
if (from&2) {
- *((char *)target)++ = *(((char *)ptr++)-1);
+ *target++ = ptr[-1];
+ ptr += 2;
count--;
}
while(count>=2)
{
- *target++=*ptr++; /* Copy and */
- ptr++; /* skip cruft */
+ *(unsigned short *)target = *(unsigned short volatile *)ptr;
+ ptr += 4; /* skip cruft */
+ target += 2;
count-=2;
}
/* Trailing byte? */
if(count)
- {
- /* Big endian */
- unsigned short v=*ptr;
- *((char *)target)=v>>8;
- }
+ *target = *ptr;
}
static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count)
{
volatile unsigned short *ptr;
- const unsigned short *src=from;
+ const unsigned char *src=from;
to<<=1; /* word, skip overhead */
ptr=(unsigned short *)(dev->mem_start+to);
/* Leading byte? */
if (to&2) { /* avoid a byte write (stomps on other data) */
- ptr[-1] = (ptr[-1]&0xFF00)|*((unsigned char *)src)++;
+ ptr[-1] = (ptr[-1]&0xFF00)|*src++;
ptr++;
count--;
}
while(count>=2)
{
- *ptr++=*src++; /* Copy and */
+ *ptr++=*(unsigned short *)src; /* Copy and */
ptr++; /* skip cruft */
+ src += 2;
count-=2;
}
/* Trailing byte? */
if(count)
{
- /* Big endian */
- unsigned short v=*src;
/* card doesn't like byte writes */
- *ptr=(*ptr&0x00FF)|(v&0xFF00);
+ *ptr=(*ptr&0x00FF)|(*src << 8);
}
}
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 3cb9b3fe0cf..22c3a37bba5 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -6,7 +6,7 @@
* Copyright (C) 2002 rabeeh@galileo.co.il
*
* Copyright (C) 2003 PMC-Sierra, Inc.,
- * written by Manish Lachwani (lachwani@pmc-sierra.com)
+ * written by Manish Lachwani
*
* Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
*
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 0de8fdd2aa8..94f782d51f0 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -212,15 +212,6 @@ static int __init do_ne_probe(struct net_device *dev)
return -ENODEV;
}
-static void cleanup_card(struct net_device *dev)
-{
- struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
- if (idev)
- pnp_device_detach(idev);
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, NE_IO_EXTENT);
-}
-
#ifndef MODULE
struct net_device * __init ne_probe(int unit)
{
@@ -859,6 +850,15 @@ int init_module(void)
return -ENODEV;
}
+static void cleanup_card(struct net_device *dev)
+{
+ struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
+ if (idev)
+ pnp_device_detach(idev);
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+}
+
void cleanup_module(void)
{
int this_dev;
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index 6d62ada85de..e6df375a1d4 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -278,14 +278,6 @@ static int __init do_ne2_probe(struct net_device *dev)
return -ENODEV;
}
-static void cleanup_card(struct net_device *dev)
-{
- mca_mark_as_unused(ei_status.priv);
- mca_set_adapter_procfn( ei_status.priv, NULL, NULL);
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, NE_IO_EXTENT);
-}
-
#ifndef MODULE
struct net_device * __init ne2_probe(int unit)
{
@@ -812,6 +804,14 @@ int init_module(void)
return -ENXIO;
}
+static void cleanup_card(struct net_device *dev)
+{
+ mca_mark_as_unused(ei_status.priv);
+ mca_set_adapter_procfn( ei_status.priv, NULL, NULL);
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, NE_IO_EXTENT);
+}
+
void cleanup_module(void)
{
int this_dev;
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index a1ac4bd1696..a7bb54df75a 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -415,7 +415,7 @@ typedef enum {
/* directly indexed by chip_t, above */
-const static struct {
+static const struct {
const char *name;
u8 version; /* from RTL8139C docs */
u32 RxConfigMask; /* should clear the bits supported by this chip */
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 049c34b3706..593d8adee89 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1598,7 +1598,7 @@ do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
switch(cmd) {
case SIOCGMIIPHY: /* Get the address of the PHY in use. */
data[0] = 0; /* we have only this address */
- /* fall trough */
+ /* fall through */
case SIOCGMIIREG: /* Read the specified MII register. */
data[3] = mii_rd(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
break;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c782a632980..fa39b944bc4 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -6,7 +6,7 @@ menu "PHY device support"
config PHYLIB
tristate "PHY Device support and infrastructure"
- depends on NET_ETHERNET && (BROKEN || !ARCH_S390)
+ depends on NET_ETHERNET && (BROKEN || !S390)
help
Ethernet controllers are usually attached to PHY
devices. This option provides infrastructure for
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 02940c0fef6..459443b572c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -81,7 +81,7 @@ int mdiobus_register(struct mii_bus *bus)
phydev->dev.parent = bus->dev;
phydev->dev.bus = &mdio_bus_type;
- sprintf(phydev->dev.bus_id, "phy%d:%d", bus->id, i);
+ snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, i);
phydev->bus = bus;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b8686e47f89..1474b7c5ac0 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -42,7 +42,7 @@
*/
void phy_print_status(struct phy_device *phydev)
{
- pr_info("%s: Link is %s", phydev->dev.bus_id,
+ pr_info("PHY: %s - Link is %s", phydev->dev.bus_id,
phydev->link ? "Up" : "Down");
if (phydev->link)
printk(" - %d/%s", phydev->speed,
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 1bd22cd40c7..87ee3271b17 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -98,7 +98,6 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/lp.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -106,7 +105,6 @@ static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n"
#include <linux/skbuff.h>
#include <linux/if_plip.h>
#include <linux/workqueue.h>
-#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/parport.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 400f652282d..aa6540b3946 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -189,7 +189,7 @@ ppp_asynctty_open(struct tty_struct *tty)
goto out_free;
tty->disc_data = ap;
-
+ tty->receive_room = 65536;
return 0;
out_free:
@@ -343,12 +343,6 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
return 0;
}
-static int
-ppp_asynctty_room(struct tty_struct *tty)
-{
- return 65535;
-}
-
/*
* This can now be called from hard interrupt level as well
* as soft interrupt level or mainline.
@@ -398,7 +392,6 @@ static struct tty_ldisc ppp_ldisc = {
.write = ppp_asynctty_write,
.ioctl = ppp_asynctty_ioctl,
.poll = ppp_asynctty_poll,
- .receive_room = ppp_asynctty_room,
.receive_buf = ppp_asynctty_receive,
.write_wakeup = ppp_asynctty_wakeup,
};
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4d51c0c8023..33cb8254e79 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -237,7 +237,7 @@ ppp_sync_open(struct tty_struct *tty)
goto out_free;
tty->disc_data = ap;
-
+ tty->receive_room = 65536;
return 0;
out_free:
@@ -384,12 +384,6 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
return 0;
}
-static int
-ppp_sync_room(struct tty_struct *tty)
-{
- return 65535;
-}
-
/*
* This can now be called from hard interrupt level as well
* as soft interrupt level or mainline.
@@ -439,7 +433,6 @@ static struct tty_ldisc ppp_sync_ldisc = {
.write = ppp_sync_write,
.ioctl = ppp_synctty_ioctl,
.poll = ppp_sync_poll,
- .receive_room = ppp_sync_room,
.receive_buf = ppp_sync_receive,
.write_wakeup = ppp_sync_wakeup,
};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 14a76f7cf90..2e1bed153c3 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -170,7 +170,7 @@ enum phy_version {
#define _R(NAME,MAC,MASK) \
{ .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
-const static struct {
+static const struct {
const char *name;
u8 mac_version;
u32 RxConfigMask; /* Clears the bits supported by this chip */
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 478791e09bf..b420182eec4 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -329,7 +329,7 @@ static struct mii_chip_info {
{ NULL, }
};
-const static struct {
+static const struct {
const char *name;
} sis_chip_info[] = {
{ "SiS 190 PCI Fast Ethernet adapter" },
diff --git a/drivers/net/sk98lin/skdim.c b/drivers/net/sk98lin/skdim.c
index 0fddf61047b..07c1b4c8699 100644
--- a/drivers/net/sk98lin/skdim.c
+++ b/drivers/net/sk98lin/skdim.c
@@ -180,7 +180,7 @@ SkDimModerate(SK_AC *pAC) {
/*
** The number of interrupts per sec is the same as expected.
** Evalulate the descriptor-ratio. If it has changed, a resize
- ** in the moderation timer might be usefull
+ ** in the moderation timer might be useful
*/
if (M_DIMINFO.AutoSizing) {
ResizeDimTimerDuration(pAC);
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 9a76ac180b1..a5f2b1ee075 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -282,26 +282,22 @@ SK_U32 Val) /* pointer to store the read value */
* Description:
* This function initialize the PCI resources and IO
*
- * Returns: N/A
- *
+ * Returns:
+ * 0 - indicate everything worked ok.
+ * != 0 - error indication
*/
-int SkGeInitPCI(SK_AC *pAC)
+static __devinit int SkGeInitPCI(SK_AC *pAC)
{
struct SK_NET_DEVICE *dev = pAC->dev[0];
struct pci_dev *pdev = pAC->PciDev;
int retval;
- if (pci_enable_device(pdev) != 0) {
- return 1;
- }
-
dev->mem_start = pci_resource_start (pdev, 0);
pci_set_master(pdev);
- if (pci_request_regions(pdev, "sk98lin") != 0) {
- retval = 2;
- goto out_disable;
- }
+ retval = pci_request_regions(pdev, "sk98lin");
+ if (retval)
+ goto out;
#ifdef SK_BIG_ENDIAN
/*
@@ -320,9 +316,8 @@ int SkGeInitPCI(SK_AC *pAC)
* Remap the regs into kernel space.
*/
pAC->IoBase = ioremap_nocache(dev->mem_start, 0x4000);
-
- if (!pAC->IoBase){
- retval = 3;
+ if (!pAC->IoBase) {
+ retval = -EIO;
goto out_release;
}
@@ -330,8 +325,7 @@ int SkGeInitPCI(SK_AC *pAC)
out_release:
pci_release_regions(pdev);
- out_disable:
- pci_disable_device(pdev);
+ out:
return retval;
}
@@ -492,7 +486,7 @@ module_param_array(AutoSizing, charp, NULL, 0);
* 0, if everything is ok
* !=0, on error
*/
-static int __init SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC)
+static int __devinit SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC)
{
short i;
unsigned long Flags;
@@ -529,7 +523,7 @@ SK_BOOL DualNet;
if (SkGeInit(pAC, pAC->IoBase, SK_INIT_DATA) != 0) {
printk("HWInit (0) failed.\n");
spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
- return(-EAGAIN);
+ return -EIO;
}
SkI2cInit( pAC, pAC->IoBase, SK_INIT_DATA);
SkEventInit(pAC, pAC->IoBase, SK_INIT_DATA);
@@ -551,7 +545,7 @@ SK_BOOL DualNet;
if (SkGeInit(pAC, pAC->IoBase, SK_INIT_IO) != 0) {
printk("sk98lin: HWInit (1) failed.\n");
spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
- return(-EAGAIN);
+ return -EIO;
}
SkI2cInit( pAC, pAC->IoBase, SK_INIT_IO);
SkEventInit(pAC, pAC->IoBase, SK_INIT_IO);
@@ -583,20 +577,20 @@ SK_BOOL DualNet;
} else {
printk(KERN_WARNING "sk98lin: Illegal number of ports: %d\n",
pAC->GIni.GIMacsFound);
- return -EAGAIN;
+ return -EIO;
}
if (Ret) {
printk(KERN_WARNING "sk98lin: Requested IRQ %d is busy.\n",
dev->irq);
- return -EAGAIN;
+ return Ret;
}
pAC->AllocFlag |= SK_ALLOC_IRQ;
/* Alloc memory for this board (Mem for RxD/TxD) : */
if(!BoardAllocMem(pAC)) {
printk("No memory for descriptor rings.\n");
- return(-EAGAIN);
+ return -ENOMEM;
}
BoardInitMem(pAC);
@@ -612,7 +606,7 @@ SK_BOOL DualNet;
DualNet)) {
BoardFreeMem(pAC);
printk("sk98lin: SkGeInitAssignRamToQueues failed.\n");
- return(-EAGAIN);
+ return -EIO;
}
return (0);
@@ -633,8 +627,7 @@ SK_BOOL DualNet;
* SK_TRUE, if all memory could be allocated
* SK_FALSE, if not
*/
-static SK_BOOL BoardAllocMem(
-SK_AC *pAC)
+static __devinit SK_BOOL BoardAllocMem(SK_AC *pAC)
{
caddr_t pDescrMem; /* pointer to descriptor memory area */
size_t AllocLength; /* length of complete descriptor area */
@@ -727,8 +720,7 @@ size_t AllocLength; /* length of complete descriptor area */
*
* Returns: N/A
*/
-static void BoardInitMem(
-SK_AC *pAC) /* pointer to adapter context */
+static __devinit void BoardInitMem(SK_AC *pAC)
{
int i; /* loop counter */
int RxDescrSize; /* the size of a rx descriptor rounded up to alignment*/
@@ -2859,7 +2851,7 @@ unsigned long Flags; /* for spin lock */
* Description:
* This function is called if an ioctl is issued on the device.
* There are three subfunction for reading, writing and test-writing
- * the private MIB data structure (usefull for SysKonnect-internal tools).
+ * the private MIB data structure (useful for SysKonnect-internal tools).
*
* Returns:
* 0, if everything is ok
@@ -4776,32 +4768,47 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
struct net_device *dev = NULL;
static int boards_found = 0;
int error = -ENODEV;
+ int using_dac = 0;
char DeviceStr[80];
if (pci_enable_device(pdev))
goto out;
/* Configure DMA attributes. */
- if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
- pci_set_dma_mask(pdev, DMA_32BIT_MASK))
- goto out_disable_device;
-
+ if (sizeof(dma_addr_t) > sizeof(u32) &&
+ !(error = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
+ using_dac = 1;
+ error = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ if (error < 0) {
+ printk(KERN_ERR "sk98lin %s unable to obtain 64 bit DMA "
+ "for consistent allocations\n", pci_name(pdev));
+ goto out_disable_device;
+ }
+ } else {
+ error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (error) {
+ printk(KERN_ERR "sk98lin %s no usable DMA configuration\n",
+ pci_name(pdev));
+ goto out_disable_device;
+ }
+ }
- if ((dev = alloc_etherdev(sizeof(DEV_NET))) == NULL) {
- printk(KERN_ERR "Unable to allocate etherdev "
+ error = -ENOMEM;
+ dev = alloc_etherdev(sizeof(DEV_NET));
+ if (!dev) {
+ printk(KERN_ERR "sk98lin: unable to allocate etherdev "
"structure!\n");
goto out_disable_device;
}
pNet = netdev_priv(dev);
- pNet->pAC = kmalloc(sizeof(SK_AC), GFP_KERNEL);
+ pNet->pAC = kzalloc(sizeof(SK_AC), GFP_KERNEL);
if (!pNet->pAC) {
- printk(KERN_ERR "Unable to allocate adapter "
+ printk(KERN_ERR "sk98lin: unable to allocate adapter "
"structure!\n");
goto out_free_netdev;
}
- memset(pNet->pAC, 0, sizeof(SK_AC));
pAC = pNet->pAC;
pAC->PciDev = pdev;
@@ -4810,6 +4817,7 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
pAC->CheckQueue = SK_FALSE;
dev->irq = pdev->irq;
+
error = SkGeInitPCI(pAC);
if (error) {
printk(KERN_ERR "sk98lin: PCI setup failed: %i\n", error);
@@ -4844,19 +4852,25 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
#endif
}
+ if (using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
pAC->Index = boards_found++;
- if (SkGeBoardInit(dev, pAC))
+ error = SkGeBoardInit(dev, pAC);
+ if (error)
goto out_free_netdev;
/* Read Adapter name from VPD */
if (ProductStr(pAC, DeviceStr, sizeof(DeviceStr)) != 0) {
+ error = -EIO;
printk(KERN_ERR "sk98lin: Could not read VPD data.\n");
goto out_free_resources;
}
/* Register net device */
- if (register_netdev(dev)) {
+ error = register_netdev(dev);
+ if (error) {
printk(KERN_ERR "sk98lin: Could not register device.\n");
goto out_free_resources;
}
@@ -4883,15 +4897,17 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
boards_found++;
+ pci_set_drvdata(pdev, dev);
+
/* More then one port found */
if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
- if ((dev = alloc_etherdev(sizeof(DEV_NET))) == 0) {
- printk(KERN_ERR "Unable to allocate etherdev "
+ dev = alloc_etherdev(sizeof(DEV_NET));
+ if (!dev) {
+ printk(KERN_ERR "sk98lin: unable to allocate etherdev "
"structure!\n");
- goto out;
+ goto single_port;
}
- pAC->dev[1] = dev;
pNet = netdev_priv(dev);
pNet->PortNr = 1;
pNet->NetNr = 1;
@@ -4920,20 +4936,28 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
#endif
}
- if (register_netdev(dev)) {
- printk(KERN_ERR "sk98lin: Could not register device for seconf port.\n");
+ if (using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
+
+ error = register_netdev(dev);
+ if (error) {
+ printk(KERN_ERR "sk98lin: Could not register device"
+ " for second port. (%d)\n", error);
free_netdev(dev);
- pAC->dev[1] = pAC->dev[0];
- } else {
- memcpy(&dev->dev_addr,
- &pAC->Addr.Net[1].CurrentMacAddress, 6);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
- printk("%s: %s\n", dev->name, DeviceStr);
- printk(" PrefPort:B RlmtMode:Dual Check Link State\n");
+ goto single_port;
}
+
+ pAC->dev[1] = dev;
+ memcpy(&dev->dev_addr,
+ &pAC->Addr.Net[1].CurrentMacAddress, 6);
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+ printk("%s: %s\n", dev->name, DeviceStr);
+ printk(" PrefPort:B RlmtMode:Dual Check Link State\n");
}
+single_port:
+
/* Save the hardware revision */
pAC->HWRevision = (((pAC->GIni.GIPciHwRev >> 4) & 0x0F)*10) +
(pAC->GIni.GIPciHwRev & 0x0F);
@@ -4945,7 +4969,6 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
memset(&pAC->PnmiBackup, 0, sizeof(SK_PNMI_STRUCT_DATA));
memcpy(&pAC->PnmiBackup, &pAC->PnmiStruct, sizeof(SK_PNMI_STRUCT_DATA));
- pci_set_drvdata(pdev, dev);
return 0;
out_free_resources:
diff --git a/drivers/net/sk98lin/skgepnmi.c b/drivers/net/sk98lin/skgepnmi.c
index 58e1a5be913..a386172107e 100644
--- a/drivers/net/sk98lin/skgepnmi.c
+++ b/drivers/net/sk98lin/skgepnmi.c
@@ -611,7 +611,7 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
* Description:
* Calls a general sub-function for all this stuff. The preset does
* the same as a set, but returns just before finally setting the
- * new value. This is usefull to check if a set might be successfull.
+ * new value. This is useful to check if a set might be successfull.
* If the instance -1 is passed, an array of values is supposed and
* all instances of the OID will be set.
*
@@ -654,7 +654,7 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
* Description:
* Calls a general sub-function for all this stuff. The preset does
* the same as a set, but returns just before finally setting the
- * new value. This is usefull to check if a set might be successfull.
+ * new value. This is useful to check if a set might be successfull.
* If the instance -1 is passed, an array of values is supposed and
* all instances of the OID will be set.
*
@@ -870,7 +870,7 @@ SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
* Description:
* Calls a general sub-function for all this set stuff. The preset does
* the same as a set, but returns just before finally setting the
- * new value. This is usefull to check if a set might be successfull.
+ * new value. This is useful to check if a set might be successfull.
* The sub-function runs through the IdTable, checks which OIDs are able
* to set, and calls the handler function of the OID to perform the
* preset. The return value of the function will also be stored in
@@ -6473,7 +6473,7 @@ unsigned int PhysPortIndex) /* Physical port index */
*
* Description:
* The COMMON module only tells us if the mode is half or full duplex.
- * But in the decade of auto sensing it is usefull for the user to
+ * But in the decade of auto sensing it is useful for the user to
* know if the mode was negotiated or forced. Therefore we have a
* look to the mode, which was last used by the negotiation process.
*
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 404ea4297e3..b2e18d28850 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -651,11 +651,6 @@ static void sl_setup(struct net_device *dev)
******************************************/
-static int slip_receive_room(struct tty_struct *tty)
-{
- return 65536; /* We can handle an infinite amount of data. :-) */
-}
-
/*
* Handle the 'receiver data ready' interrupt.
* This function is called by the 'tty_io' module in the kernel when
@@ -869,10 +864,6 @@ static int slip_open(struct tty_struct *tty)
sl->line = tty_devnum(tty);
sl->pid = current->pid;
- /* FIXME: already done before we were called - seems this can go */
- if (tty->driver->flush_buffer)
- tty->driver->flush_buffer(tty);
-
if (!test_bit(SLF_INUSE, &sl->flags)) {
/* Perform the low-level SLIP initialization. */
if ((err = sl_alloc_bufs(sl, SL_MTU)) != 0)
@@ -897,6 +888,7 @@ static int slip_open(struct tty_struct *tty)
/* Done. We have linked the TTY line to a channel. */
rtnl_unlock();
+ tty->receive_room = 65536; /* We don't flow control */
return sl->dev->base_addr;
err_free_bufs:
@@ -1329,7 +1321,6 @@ static struct tty_ldisc sl_ldisc = {
.close = slip_close,
.ioctl = slip_ioctl,
.receive_buf = slip_receive_buf,
- .receive_room = slip_receive_room,
.write_wakeup = slip_write_wakeup,
};
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index ba8593ac3f8..3db30cd0625 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -168,18 +168,6 @@ static int __init do_ultra_probe(struct net_device *dev)
return -ENODEV;
}
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: ultra_close_card() does free_irq */
-#ifdef __ISAPNP__
- struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
- if (idev)
- pnp_device_detach(idev);
-#endif
- release_region(dev->base_addr - ULTRA_NIC_OFFSET, ULTRA_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
#ifndef MODULE
struct net_device * __init ultra_probe(int unit)
{
@@ -594,6 +582,18 @@ init_module(void)
return -ENXIO;
}
+static void cleanup_card(struct net_device *dev)
+{
+ /* NB: ultra_close_card() does free_irq */
+#ifdef __ISAPNP__
+ struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
+ if (idev)
+ pnp_device_detach(idev);
+#endif
+ release_region(dev->base_addr - ULTRA_NIC_OFFSET, ULTRA_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
void
cleanup_module(void)
{
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 28bf2e69eb5..7ec08127c9d 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -88,7 +88,6 @@ static const char version[] =
#include <linux/skbuff.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include "smc91x.h"
@@ -2007,12 +2006,10 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
}
/* Grab the IRQ */
- retval = request_irq(dev->irq, &smc_interrupt, 0, dev->name, dev);
+ retval = request_irq(dev->irq, &smc_interrupt, SMC_IRQ_FLAGS, dev->name, dev);
if (retval)
goto err_out;
- set_irq_type(dev->irq, SMC_IRQ_TRIGGER_TYPE);
-
#ifdef SMC_USE_PXA_DMA
{
int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 5c2824be4ee..e0efd1964e7 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -90,7 +90,7 @@
__l--; \
} \
} while (0)
-#define set_irq_type(irq, type)
+#define SMC_IRQ_FLAGS (0)
#elif defined(CONFIG_SA1100_PLEB)
/* We can only do 16-bit reads and writes in the static memory space. */
@@ -109,7 +109,7 @@
#define SMC_outw(v, a, r) writew(v, (a) + (r))
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-#define set_irq_type(irq, type) do {} while (0)
+#define SMC_IRQ_FLAGS (0)
#elif defined(CONFIG_SA1100_ASSABET)
@@ -185,11 +185,11 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#include <asm/mach-types.h>
#include <asm/arch/cpu.h>
-#define SMC_IRQ_TRIGGER_TYPE (( \
+#define SMC_IRQ_FLAGS (( \
machine_is_omap_h2() \
|| machine_is_omap_h3() \
|| (machine_is_omap_innovator() && !cpu_is_omap1510()) \
- ) ? IRQT_FALLING : IRQT_RISING)
+ ) ? SA_TRIGGER_FALLING : SA_TRIGGER_RISING)
#elif defined(CONFIG_SH_SH4202_MICRODEV)
@@ -209,7 +209,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l)
#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l)
-#define set_irq_type(irq, type) do {} while(0)
+#define SMC_IRQ_FLAGS (0)
#elif defined(CONFIG_ISA)
@@ -237,7 +237,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l)
#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l)
-#define set_irq_type(irq, type) do {} while(0)
+#define SMC_IRQ_FLAGS (0)
#define RPC_LSA_DEFAULT RPC_LED_TX_RX
#define RPC_LSB_DEFAULT RPC_LED_100_10
@@ -319,7 +319,7 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
au_writew(*_p++ , _a); \
} while(0)
-#define set_irq_type(irq, type) do {} while (0)
+#define SMC_IRQ_FLAGS (0)
#else
@@ -342,8 +342,8 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
#endif
-#ifndef SMC_IRQ_TRIGGER_TYPE
-#define SMC_IRQ_TRIGGER_TYPE IRQT_RISING
+#ifndef SMC_IRQ_FLAGS
+#define SMC_IRQ_FLAGS SA_TRIGGER_RISING
#endif
#ifdef SMC_USE_PXA_DMA
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 5c8fcd40ef4..01bdb233405 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -389,7 +389,7 @@ static int __init lance_probe( struct net_device *dev)
dev->stop = &lance_close;
dev->get_stats = &lance_get_stats;
dev->set_multicast_list = &set_multicast_list;
- dev->set_mac_address = 0;
+ dev->set_mac_address = NULL;
// KLUDGE -- REMOVE ME
set_bit(__LINK_STATE_PRESENT, &dev->state);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 125ed00e95a..c67c91251d0 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1564,7 +1564,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
dev->dev_addr, 6);
}
#endif
-#if defined(__i386__) /* Patch up x86 BIOS bug. */
+#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
if (last_irq)
irq = last_irq;
#endif
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 1a431633625..98398166680 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1689,9 +1689,9 @@ MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_PARM(debug, "i");
-MODULE_PARM(mode, "i");
-MODULE_PARM(cr6set, "i");
+module_param(debug, int, 0644);
+module_param(mode, int, 0);
+module_param(cr6set, int, 0);
MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 82c6b757d30..c2d5907dc8e 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -791,7 +791,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
#endif
if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) {
- dev->features |= NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_IP_CSUM;
}
ret = register_netdev(dev);
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 52f26b9c69d..931cbdf6d79 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -689,7 +689,7 @@ static void cpc_tty_rx_work(void * data)
}
}
cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
- kfree(buf);
+ kfree((void *)buf);
buf = cpc_tty->buf_rx.first;
flg_rx = 1;
}
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 036adc4f8ba..22e794071cf 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -329,9 +329,9 @@ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
struct _dlci_stat
{
- short dlci __attribute__((packed));
- char flags __attribute__((packed));
-};
+ short dlci;
+ char flags;
+} __attribute__((packed));
struct _frad_stat
{
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index bdf672c4818..9c3ccc66914 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -515,11 +515,6 @@ static int x25_asy_close(struct net_device *dev)
return 0;
}
-static int x25_asy_receive_room(struct tty_struct *tty)
-{
- return 65536; /* We can handle an infinite amount of data. :-) */
-}
-
/*
* Handle the 'receiver data ready' interrupt.
* This function is called by the 'tty_io' module in the kernel when
@@ -573,6 +568,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
sl->tty = tty;
tty->disc_data = sl;
+ tty->receive_room = 65536;
if (tty->driver->flush_buffer) {
tty->driver->flush_buffer(tty);
}
@@ -779,7 +775,6 @@ static struct tty_ldisc x25_ldisc = {
.close = x25_asy_close_tty,
.ioctl = x25_asy_ioctl,
.receive_buf = x25_asy_receive_buf,
- .receive_room = x25_asy_receive_room,
.write_wakeup = x25_asy_write_wakeup,
};
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index b03feae459f..7caa8dc88a5 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -127,13 +127,6 @@ static int __init do_wd_probe(struct net_device *dev)
return -ENODEV;
}
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr - WD_NIC_OFFSET, WD_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
#ifndef MODULE
struct net_device * __init wd_probe(int unit)
{
@@ -538,6 +531,13 @@ init_module(void)
return -ENXIO;
}
+static void cleanup_card(struct net_device *dev)
+{
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr - WD_NIC_OFFSET, WD_IO_EXTENT);
+ iounmap(ei_status.mem);
+}
+
void
cleanup_module(void)
{
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 24f7967aab6..c1a6e69f790 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -243,7 +243,7 @@ config IPW2200_DEBUG
config AIRO
tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
- depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN)
+ depends on NET_RADIO && ISA_DMA_API && CRYPTO && (PCI || BROKEN)
---help---
This is the standard Linux driver to support Cisco/Aironet ISA and
PCI 802.11 wireless cards.
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index e4729ddf29f..f0ccfef6644 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1407,6 +1407,17 @@ static int atmel_close(struct net_device *dev)
{
struct atmel_private *priv = netdev_priv(dev);
+ /* Send event to userspace that we are disassociating */
+ if (priv->station_state == STATION_STATE_READY) {
+ union iwreq_data wrqu;
+
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+ wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+ }
+
atmel_enter_state(priv, STATION_STATE_DOWN);
if (priv->bus_type == BUS_TYPE_PCCARD)
@@ -1780,10 +1791,10 @@ static int atmel_set_encode(struct net_device *dev,
priv->wep_is_on = 1;
priv->exclude_unencrypted = 1;
if (priv->wep_key_len[index] > 5) {
- priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
+ priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
priv->encryption_level = 2;
} else {
- priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
+ priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
priv->encryption_level = 1;
}
}
@@ -1853,6 +1864,181 @@ static int atmel_get_encode(struct net_device *dev,
return 0;
}
+static int atmel_set_encodeext(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ struct iw_point *encoding = &wrqu->encoding;
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+ int idx, key_len;
+
+ /* Determine and validate the key index */
+ idx = encoding->flags & IW_ENCODE_INDEX;
+ if (idx) {
+ if (idx < 1 || idx > WEP_KEYS)
+ return -EINVAL;
+ idx--;
+ } else
+ idx = priv->default_key;
+
+ if ((encoding->flags & IW_ENCODE_DISABLED) ||
+ ext->alg == IW_ENCODE_ALG_NONE) {
+ priv->wep_is_on = 0;
+ priv->encryption_level = 0;
+ priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
+ }
+
+ if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
+ priv->default_key = idx;
+
+ /* Set the requested key */
+ switch (ext->alg) {
+ case IW_ENCODE_ALG_NONE:
+ break;
+ case IW_ENCODE_ALG_WEP:
+ if (ext->key_len > 5) {
+ priv->wep_key_len[idx] = 13;
+ priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
+ priv->encryption_level = 2;
+ } else if (ext->key_len > 0) {
+ priv->wep_key_len[idx] = 5;
+ priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
+ priv->encryption_level = 1;
+ } else {
+ return -EINVAL;
+ }
+ priv->wep_is_on = 1;
+ memset(priv->wep_keys[idx], 0, 13);
+ key_len = min ((int)ext->key_len, priv->wep_key_len[idx]);
+ memcpy(priv->wep_keys[idx], ext->key, key_len);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINPROGRESS;
+}
+
+static int atmel_get_encodeext(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ struct iw_point *encoding = &wrqu->encoding;
+ struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+ int idx, max_key_len;
+
+ max_key_len = encoding->length - sizeof(*ext);
+ if (max_key_len < 0)
+ return -EINVAL;
+
+ idx = encoding->flags & IW_ENCODE_INDEX;
+ if (idx) {
+ if (idx < 1 || idx > WEP_KEYS)
+ return -EINVAL;
+ idx--;
+ } else
+ idx = priv->default_key;
+
+ encoding->flags = idx + 1;
+ memset(ext, 0, sizeof(*ext));
+
+ if (!priv->wep_is_on) {
+ ext->alg = IW_ENCODE_ALG_NONE;
+ ext->key_len = 0;
+ encoding->flags |= IW_ENCODE_DISABLED;
+ } else {
+ if (priv->encryption_level > 0)
+ ext->alg = IW_ENCODE_ALG_WEP;
+ else
+ return -EINVAL;
+
+ ext->key_len = priv->wep_key_len[idx];
+ memcpy(ext->key, priv->wep_keys[idx], ext->key_len);
+ encoding->flags |= IW_ENCODE_ENABLED;
+ }
+
+ return 0;
+}
+
+static int atmel_set_auth(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ struct iw_param *param = &wrqu->param;
+
+ switch (param->flags & IW_AUTH_INDEX) {
+ case IW_AUTH_WPA_VERSION:
+ case IW_AUTH_CIPHER_PAIRWISE:
+ case IW_AUTH_CIPHER_GROUP:
+ case IW_AUTH_KEY_MGMT:
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ case IW_AUTH_PRIVACY_INVOKED:
+ /*
+ * atmel does not use these parameters
+ */
+ break;
+
+ case IW_AUTH_DROP_UNENCRYPTED:
+ priv->exclude_unencrypted = param->value ? 1 : 0;
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG: {
+ if (param->value & IW_AUTH_ALG_SHARED_KEY) {
+ priv->exclude_unencrypted = 1;
+ } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
+ priv->exclude_unencrypted = 0;
+ } else
+ return -EINVAL;
+ break;
+ }
+
+ case IW_AUTH_WPA_ENABLED:
+ /* Silently accept disable of WPA */
+ if (param->value > 0)
+ return -EOPNOTSUPP;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ return -EINPROGRESS;
+}
+
+static int atmel_get_auth(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct atmel_private *priv = netdev_priv(dev);
+ struct iw_param *param = &wrqu->param;
+
+ switch (param->flags & IW_AUTH_INDEX) {
+ case IW_AUTH_DROP_UNENCRYPTED:
+ param->value = priv->exclude_unencrypted;
+ break;
+
+ case IW_AUTH_80211_AUTH_ALG:
+ if (priv->exclude_unencrypted == 1)
+ param->value = IW_AUTH_ALG_SHARED_KEY;
+ else
+ param->value = IW_AUTH_ALG_OPEN_SYSTEM;
+ break;
+
+ case IW_AUTH_WPA_ENABLED:
+ param->value = 0;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+
static int atmel_get_name(struct net_device *dev,
struct iw_request_info *info,
char *cwrq,
@@ -2289,13 +2475,15 @@ static int atmel_set_wap(struct net_device *dev,
{
struct atmel_private *priv = netdev_priv(dev);
int i;
- static const u8 bcast[] = { 255, 255, 255, 255, 255, 255 };
+ static const u8 any[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ static const u8 off[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
unsigned long flags;
if (awrq->sa_family != ARPHRD_ETHER)
return -EINVAL;
- if (memcmp(bcast, awrq->sa_data, 6) == 0) {
+ if (!memcmp(any, awrq->sa_data, 6) ||
+ !memcmp(off, awrq->sa_data, 6)) {
del_timer_sync(&priv->management_timer);
spin_lock_irqsave(&priv->irqlock, flags);
atmel_scan(priv, 1);
@@ -2378,6 +2566,15 @@ static const iw_handler atmel_handler[] =
(iw_handler) atmel_get_encode, /* SIOCGIWENCODE */
(iw_handler) atmel_set_power, /* SIOCSIWPOWER */
(iw_handler) atmel_get_power, /* SIOCGIWPOWER */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* -- hole -- */
+ (iw_handler) NULL, /* SIOCSIWGENIE */
+ (iw_handler) NULL, /* SIOCGIWGENIE */
+ (iw_handler) atmel_set_auth, /* SIOCSIWAUTH */
+ (iw_handler) atmel_get_auth, /* SIOCGIWAUTH */
+ (iw_handler) atmel_set_encodeext, /* SIOCSIWENCODEEXT */
+ (iw_handler) atmel_get_encodeext, /* SIOCGIWENCODEEXT */
+ (iw_handler) NULL, /* SIOCSIWPMKSA */
};
static const iw_handler atmel_private_handler[] =
@@ -2924,6 +3121,8 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
u16 ass_id = le16_to_cpu(ass_resp->ass_id);
u16 rates_len = ass_resp->length > 4 ? 4 : ass_resp->length;
+ union iwreq_data wrqu;
+
if (frame_len < 8 + rates_len)
return;
@@ -2954,6 +3153,14 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
priv->station_is_associated = 1;
priv->station_was_associated = 1;
atmel_enter_state(priv, STATION_STATE_READY);
+
+ /* Send association event to userspace */
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ memcpy(wrqu.ap_addr.sa_data, priv->CurrentBSSID, ETH_ALEN);
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+
return;
}
@@ -3632,6 +3839,7 @@ static int reset_atmel_card(struct net_device *dev)
struct atmel_private *priv = netdev_priv(dev);
u8 configuration;
+ int old_state = priv->station_state;
/* data to add to the firmware names, in priority order
this implemenents firmware versioning */
@@ -3792,6 +4000,17 @@ static int reset_atmel_card(struct net_device *dev)
else
build_wep_mib(priv);
+ if (old_state == STATION_STATE_READY)
+ {
+ union iwreq_data wrqu;
+
+ wrqu.data.length = 0;
+ wrqu.data.flags = 0;
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+ wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+ }
+
return 1;
}
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 44cd3fcd157..cf05661fb1b 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -7153,7 +7153,7 @@ static int ipw2100_wx_get_range(struct net_device *dev,
/* Set the Wireless Extension versions */
range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 16;
+ range->we_version_source = 18;
// range->retry_capa; /* What retry options are supported */
// range->retry_flags; /* How to decode max/min retry limit */
@@ -7184,6 +7184,9 @@ static int ipw2100_wx_get_range(struct net_device *dev,
IW_EVENT_CAPA_MASK(SIOCGIWAP));
range->event_capa[1] = IW_EVENT_CAPA_K_1;
+ range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
+ IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
+
IPW_DEBUG_WX("GET Range\n");
return 0;
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index d25264ba0c0..18baacfc5a2 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -1675,11 +1675,6 @@ static int strip_rebuild_header(struct sk_buff *skb)
/************************************************************************/
/* Receiving routines */
-static int strip_receive_room(struct tty_struct *tty)
-{
- return 0x10000; /* We can handle an infinite amount of data. :-) */
-}
-
/*
* This function parses the response to the ATS300? command,
* extracting the radio version and serial number.
@@ -2424,7 +2419,7 @@ static struct net_device_stats *strip_get_stats(struct net_device *dev)
/*
* Here's the order things happen:
* When the user runs "slattach -p strip ..."
- * 1. The TTY module calls strip_open
+ * 1. The TTY module calls strip_open;;
* 2. strip_open calls strip_alloc
* 3. strip_alloc calls register_netdev
* 4. register_netdev calls strip_dev_init
@@ -2652,6 +2647,8 @@ static int strip_open(struct tty_struct *tty)
strip_info->tty = tty;
tty->disc_data = strip_info;
+ tty->receive_room = 65536;
+
if (tty->driver->flush_buffer)
tty->driver->flush_buffer(tty);
@@ -2762,7 +2759,6 @@ static struct tty_ldisc strip_ldisc = {
.close = strip_close,
.ioctl = strip_ioctl,
.receive_buf = strip_receive_buf,
- .receive_room = strip_receive_room,
.write_wakeup = strip_write_some_more,
};
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 531b0731314..b2e8e49c865 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -43,13 +43,16 @@ static void process_task_mortuary(void);
* list for processing. Only after two full buffer syncs
* does the task eventually get freed, because by then
* we are sure we will not reference it again.
+ * Can be invoked from softirq via RCU callback due to
+ * call_rcu() of the task struct, hence the _irqsave.
*/
static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
{
+ unsigned long flags;
struct task_struct * task = data;
- spin_lock(&task_mortuary);
+ spin_lock_irqsave(&task_mortuary, flags);
list_add(&task->tasks, &dying_tasks);
- spin_unlock(&task_mortuary);
+ spin_unlock_irqrestore(&task_mortuary, flags);
return NOTIFY_OK;
}
@@ -431,25 +434,22 @@ static void increment_tail(struct oprofile_cpu_buffer * b)
*/
static void process_task_mortuary(void)
{
- struct list_head * pos;
- struct list_head * pos2;
+ unsigned long flags;
+ LIST_HEAD(local_dead_tasks);
struct task_struct * task;
+ struct task_struct * ttask;
- spin_lock(&task_mortuary);
+ spin_lock_irqsave(&task_mortuary, flags);
- list_for_each_safe(pos, pos2, &dead_tasks) {
- task = list_entry(pos, struct task_struct, tasks);
- list_del(&task->tasks);
- free_task(task);
- }
+ list_splice_init(&dead_tasks, &local_dead_tasks);
+ list_splice_init(&dying_tasks, &dead_tasks);
- list_for_each_safe(pos, pos2, &dying_tasks) {
- task = list_entry(pos, struct task_struct, tasks);
+ spin_unlock_irqrestore(&task_mortuary, flags);
+
+ list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
list_del(&task->tasks);
- list_add_tail(&task->tasks, &dead_tasks);
+ free_task(task);
}
-
- spin_unlock(&task_mortuary);
}
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 026f671ea55..78193e4bbdb 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -52,7 +52,8 @@ int alloc_cpu_buffers(void)
for_each_online_cpu(i) {
struct oprofile_cpu_buffer * b = &cpu_buffer[i];
- b->buffer = vmalloc(sizeof(struct op_sample) * buffer_size);
+ b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
+ cpu_to_node(i));
if (!b->buffer)
goto fail;
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 166bca79013..b80318f0342 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -15,6 +15,7 @@
#include <linux/vmalloc.h>
#include <linux/oprofile.h>
#include <linux/sched.h>
+#include <linux/capability.h>
#include <linux/dcookies.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 5ab75334c57..216d1d85932 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -83,7 +83,8 @@
** bus number for each dino.
*/
-#define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA)
+#define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA)
+#define is_cujo(id) ((id)->hversion == 0x682)
#define DINO_IAR0 0x004
#define DINO_IODC_ADDR 0x008
@@ -124,6 +125,7 @@
#define DINO_IRQS 11 /* bits 0-10 are architected */
#define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */
+#define DINO_LOCAL_IRQS (DINO_IRQS+1)
#define DINO_MASK_IRQ(x) (1<<(x))
@@ -146,7 +148,7 @@ struct dino_device
unsigned long txn_addr; /* EIR addr to generate interrupt */
u32 txn_data; /* EIR data assign to each dino */
u32 imr; /* IRQ's which are enabled */
- int global_irq[12]; /* map IMR bit to global irq */
+ int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */
#ifdef DINO_DEBUG
unsigned int dino_irr0; /* save most recent IRQ line stat */
#endif
@@ -297,7 +299,7 @@ struct pci_port_ops dino_port_ops = {
static void dino_disable_irq(unsigned int irq)
{
struct dino_device *dino_dev = irq_desc[irq].handler_data;
- int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, irq);
+ int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq);
@@ -309,7 +311,7 @@ static void dino_disable_irq(unsigned int irq)
static void dino_enable_irq(unsigned int irq)
{
struct dino_device *dino_dev = irq_desc[irq].handler_data;
- int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, irq);
+ int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, dino_dev, irq);
@@ -435,6 +437,21 @@ static void dino_choose_irq(struct parisc_device *dev, void *ctrl)
dino_assign_irq(dino, irq, &dev->irq);
}
+
+/*
+ * Cirrus 6832 Cardbus reports wrong irq on RDI Tadpole PARISC Laptop (deller@gmx.de)
+ * (the irqs are off-by-one, not sure yet if this is a cirrus, dino-hardware or dino-driver problem...)
+ */
+static void __devinit quirk_cirrus_cardbus(struct pci_dev *dev)
+{
+ u8 new_irq = dev->irq - 1;
+ printk(KERN_INFO "PCI: Cirrus Cardbus IRQ fixup for %s, from %d to %d\n",
+ pci_name(dev), dev->irq, new_irq);
+ dev->irq = new_irq;
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
+
+
static void __init
dino_bios_init(void)
{
@@ -666,7 +683,6 @@ dino_fixup_bus(struct pci_bus *bus)
printk(KERN_WARNING "Device %s has unassigned IRQ\n", pci_name(dev));
#endif
} else {
-
/* Adjust INT_LINE for that busses region */
dino_assign_irq(dino_dev, dev->irq, &dev->irq);
}
@@ -872,7 +888,7 @@ static int __init dino_common_init(struct parisc_device *dev,
/* allocate I/O Port resource region */
res = &dino_dev->hba.io_space;
- if (dev->id.hversion == 0x680 || is_card_dino(&dev->id)) {
+ if (!is_cujo(&dev->id)) {
res->name = "Dino I/O Port";
} else {
res->name = "Cujo I/O Port";
@@ -927,7 +943,7 @@ static int __init dino_probe(struct parisc_device *dev)
if (is_card_dino(&dev->id)) {
version = "3.x (card mode)";
} else {
- if(dev->id.hversion == 0x680) {
+ if (!is_cujo(&dev->id)) {
if (dev->id.hversion_rev < 4) {
version = dino_vers[dev->id.hversion_rev];
}
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 6362bf99eff..3d94d86c1c9 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -57,7 +57,7 @@
static DEFINE_SPINLOCK(eisa_irq_lock);
-void __iomem *eisa_eeprom_addr;
+void __iomem *eisa_eeprom_addr __read_mostly;
/* We can only have one EISA adapter in the system because neither
* implementation can be flexed.
@@ -141,7 +141,7 @@ static int slave_mask;
* in the furure.
*/
/* irq 13,8,2,1,0 must be edge */
-static unsigned int eisa_irq_level; /* default to edge triggered */
+static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered */
/* called by free irq */
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 3a1b4826e5c..e13aafa70bf 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -48,7 +48,7 @@ static loff_t eisa_eeprom_llseek(struct file *file, loff_t offset, int origin )
}
static ssize_t eisa_eeprom_read(struct file * file,
- char *buf, size_t count, loff_t *ppos )
+ char __user *buf, size_t count, loff_t *ppos )
{
unsigned char *tmp;
ssize_t ret;
diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c
index a8c20396ffb..2b3ba1dcf33 100644
--- a/drivers/parisc/lasi.c
+++ b/drivers/parisc/lasi.c
@@ -150,7 +150,7 @@ void __init lasi_led_init(unsigned long lasi_hpa)
*
*/
-static unsigned long lasi_power_off_hpa;
+static unsigned long lasi_power_off_hpa __read_mostly;
static void lasi_power_off(void)
{
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 5e495dcbc58..cbae8c8963f 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -167,7 +167,7 @@
/* non-postable I/O port space, densely packed */
#define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL)
-static void __iomem *astro_iop_base;
+static void __iomem *astro_iop_base __read_mostly;
#define ELROY_HVERS 0x782
#define MERCURY_HVERS 0x783
@@ -695,11 +695,71 @@ lba_claim_dev_resources(struct pci_dev *dev)
}
}
}
+
+
+/*
+ * truncate_pat_collision: Deal with overlaps or outright collisions
+ * between PAT PDC reported ranges.
+ *
+ * Broken PA8800 firmware will report lmmio range that
+ * overlaps with CPU HPA. Just truncate the lmmio range.
+ *
+ * BEWARE: conflicts with this lmmio range may be an
+ * elmmio range which is pointing down another rope.
+ *
+ * FIXME: only deals with one collision per range...theoretically we
+ * could have several. Supporting more than one collision will get messy.
+ */
+static unsigned long
+truncate_pat_collision(struct resource *root, struct resource *new)
+{
+ unsigned long start = new->start;
+ unsigned long end = new->end;
+ struct resource *tmp = root->child;
+
+ if (end <= start || start < root->start || !tmp)
+ return 0;
+
+ /* find first overlap */
+ while (tmp && tmp->end < start)
+ tmp = tmp->sibling;
+
+ /* no entries overlap */
+ if (!tmp) return 0;
+
+ /* found one that starts behind the new one
+ ** Don't need to do anything.
+ */
+ if (tmp->start >= end) return 0;
+
+ if (tmp->start <= start) {
+ /* "front" of new one overlaps */
+ new->start = tmp->end + 1;
+
+ if (tmp->end >= end) {
+ /* AACCKK! totally overlaps! drop this range. */
+ return 1;
+ }
+ }
+
+ if (tmp->end < end ) {
+ /* "end" of new one overlaps */
+ new->end = tmp->start - 1;
+ }
+
+ printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] "
+ "to [%lx,%lx]\n",
+ start, end,
+ new->start, new->end );
+
+ return 0; /* truncation successful */
+}
+
#else
-#define lba_claim_dev_resources(dev)
+#define lba_claim_dev_resources(dev) do { } while (0)
+#define truncate_pat_collision(r,n) (0)
#endif
-
/*
** The algorithm is generic code.
** But it needs to access local data structures to get the IRQ base.
@@ -747,6 +807,9 @@ lba_fixup_bus(struct pci_bus *bus)
lba_dump_res(&ioport_resource, 2);
BUG();
}
+ /* advertize Host bridge resources to PCI bus */
+ bus->resource[0] = &(ldev->hba.io_space);
+ i = 1;
if (ldev->hba.elmmio_space.start) {
err = request_resource(&iomem_resource,
@@ -760,23 +823,35 @@ lba_fixup_bus(struct pci_bus *bus)
/* lba_dump_res(&iomem_resource, 2); */
/* BUG(); */
- }
+ } else
+ bus->resource[i++] = &(ldev->hba.elmmio_space);
}
- err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
- if (err < 0) {
- /* FIXME overlaps with elmmio will fail here.
- * Need to prune (or disable) the distributed range.
- *
- * BEWARE: conflicts with this lmmio range may be
- * elmmio range which is pointing down another rope.
- */
-
- printk("FAILED: lba_fixup_bus() request for "
+
+ /* Overlaps with elmmio can (and should) fail here.
+ * We will prune (or ignore) the distributed range.
+ *
+ * FIXME: SBA code should register all elmmio ranges first.
+ * that would take care of elmmio ranges routed
+ * to a different rope (already discovered) from
+ * getting registered *after* LBA code has already
+ * registered it's distributed lmmio range.
+ */
+ if (truncate_pat_collision(&iomem_resource,
+ &(ldev->hba.lmmio_space))) {
+
+ printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
+ ldev->hba.lmmio_space.start,
+ ldev->hba.lmmio_space.end);
+ } else {
+ err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
+ if (err < 0) {
+ printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
"lmmio_space [%lx/%lx]\n",
ldev->hba.lmmio_space.start,
ldev->hba.lmmio_space.end);
- /* lba_dump_res(&iomem_resource, 2); */
+ } else
+ bus->resource[i++] = &(ldev->hba.lmmio_space);
}
#ifdef CONFIG_64BIT
@@ -791,18 +866,10 @@ lba_fixup_bus(struct pci_bus *bus)
lba_dump_res(&iomem_resource, 2);
BUG();
}
+ bus->resource[i++] = &(ldev->hba.gmmio_space);
}
#endif
- /* advertize Host bridge resources to PCI bus */
- bus->resource[0] = &(ldev->hba.io_space);
- bus->resource[1] = &(ldev->hba.lmmio_space);
- i=2;
- if (ldev->hba.elmmio_space.start)
- bus->resource[i++] = &(ldev->hba.elmmio_space);
- if (ldev->hba.gmmio_space.start)
- bus->resource[i++] = &(ldev->hba.gmmio_space);
-
}
list_for_each(ln, &bus->devices) {
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 95bd07b8b61..3627a2d7f79 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -3,7 +3,7 @@
*
* (c) Copyright 2000 Red Hat Software
* (c) Copyright 2000 Helge Deller <hdeller@redhat.com>
- * (c) Copyright 2001-2004 Helge Deller <deller@gmx.de>
+ * (c) Copyright 2001-2005 Helge Deller <deller@gmx.de>
* (c) Copyright 2001 Randolph Chung <tausq@debian.org>
*
* This program is free software; you can redistribute it and/or modify
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/utsname.h>
+#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
@@ -56,13 +57,13 @@
relatively large amount of CPU time, some of the calculations can be
turned off with the following variables (controlled via procfs) */
-static int led_type = -1;
+static int led_type __read_mostly = -1;
static unsigned char lastleds; /* LED state from most recent update */
-static unsigned int led_heartbeat = 1;
-static unsigned int led_diskio = 1;
-static unsigned int led_lanrxtx = 1;
-static char lcd_text[32];
-static char lcd_text_default[32];
+static unsigned int led_heartbeat __read_mostly = 1;
+static unsigned int led_diskio __read_mostly = 1;
+static unsigned int led_lanrxtx __read_mostly = 1;
+static char lcd_text[32] __read_mostly;
+static char lcd_text_default[32] __read_mostly;
static struct workqueue_struct *led_wq;
@@ -108,7 +109,7 @@ struct pdc_chassis_lcd_info_ret_block {
/* lcd_info is pre-initialized to the values needed to program KittyHawk LCD's
* HP seems to have used Sharp/Hitachi HD44780 LCDs most of the time. */
static struct pdc_chassis_lcd_info_ret_block
-lcd_info __attribute__((aligned(8))) =
+lcd_info __attribute__((aligned(8))) __read_mostly =
{
.model = DISPLAY_MODEL_LCD,
.lcd_width = 16,
@@ -144,7 +145,7 @@ static int start_task(void)
device_initcall(start_task);
/* ptr to LCD/LED-specific function */
-static void (*led_func_ptr) (unsigned char);
+static void (*led_func_ptr) (unsigned char) __read_mostly;
#ifdef CONFIG_PROC_FS
static int led_proc_read(char *page, char **start, off_t off, int count,
@@ -347,7 +348,7 @@ static void led_LCD_driver(unsigned char leds)
**
** led_get_net_activity()
**
- ** calculate if there was TX- or RX-troughput on the network interfaces
+ ** calculate if there was TX- or RX-throughput on the network interfaces
** (analog to dev_get_info() from net/core/dev.c)
**
*/
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 273a7417972..42a3c54e8e6 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -42,9 +42,9 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/sched.h> /* for capable() */
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
@@ -56,7 +56,7 @@
#include <asm/uaccess.h>
#include <asm/hardware.h>
-#define PDCS_VERSION "0.09"
+#define PDCS_VERSION "0.10"
#define PDCS_ADDR_PPRI 0x00
#define PDCS_ADDR_OSID 0x40
@@ -70,7 +70,7 @@ MODULE_DESCRIPTION("sysfs interface to HP PDC Stable Storage data");
MODULE_LICENSE("GPL");
MODULE_VERSION(PDCS_VERSION);
-static unsigned long pdcs_size = 0;
+static unsigned long pdcs_size __read_mostly;
/* This struct defines what we need to deal with a parisc pdc path entry */
struct pdcspath_entry {
@@ -194,7 +194,8 @@ pdcspath_store(struct pdcspath_entry *entry)
return -EIO;
}
- entry->ready = 1;
+ /* kobject is already registered */
+ entry->ready = 2;
DPRINTK("%s: device: 0x%p\n", __func__, entry->dev);
@@ -653,15 +654,21 @@ pdcs_register_pathentries(void)
{
unsigned short i;
struct pdcspath_entry *entry;
+ int err;
for (i = 0; (entry = pdcspath_entries[i]); i++) {
if (pdcspath_fetch(entry) < 0)
continue;
- kobject_set_name(&entry->kobj, "%s", entry->name);
+ if ((err = kobject_set_name(&entry->kobj, "%s", entry->name)))
+ return err;
kobj_set_kset_s(entry, paths_subsys);
- kobject_register(&entry->kobj);
-
+ if ((err = kobject_register(&entry->kobj)))
+ return err;
+
+ /* kobject is now registered */
+ entry->ready = 2;
+
if (!entry->dev)
continue;
@@ -675,14 +682,14 @@ pdcs_register_pathentries(void)
/**
* pdcs_unregister_pathentries - Routine called when unregistering the module.
*/
-static inline void __exit
+static inline void
pdcs_unregister_pathentries(void)
{
unsigned short i;
struct pdcspath_entry *entry;
for (i = 0; (entry = pdcspath_entries[i]); i++)
- if (entry->ready)
+ if (entry->ready >= 2)
kobject_unregister(&entry->kobj);
}
@@ -704,7 +711,7 @@ pdc_stable_init(void)
/* For now we'll register the pdc subsys within this driver */
if ((rc = firmware_register(&pdc_subsys)))
- return rc;
+ goto fail_firmreg;
/* Don't forget the info entry */
for (i = 0; (attr = pdcs_subsys_attrs[i]) && !error; i++)
@@ -713,12 +720,25 @@ pdc_stable_init(void)
/* register the paths subsys as a subsystem of pdc subsys */
kset_set_kset_s(&paths_subsys, pdc_subsys);
- subsystem_register(&paths_subsys);
+ if ((rc= subsystem_register(&paths_subsys)))
+ goto fail_subsysreg;
/* now we create all "files" for the paths subsys */
- pdcs_register_pathentries();
+ if ((rc = pdcs_register_pathentries()))
+ goto fail_pdcsreg;
+
+ return rc;
- return 0;
+fail_pdcsreg:
+ pdcs_unregister_pathentries();
+ subsystem_unregister(&paths_subsys);
+
+fail_subsysreg:
+ firmware_unregister(&pdc_subsys);
+
+fail_firmreg:
+ printk(KERN_INFO "PDC Stable Storage bailing out\n");
+ return rc;
}
static void __exit
diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
index ff75e9296df..54b2b7f20b9 100644
--- a/drivers/parisc/power.c
+++ b/drivers/parisc/power.c
@@ -2,7 +2,7 @@
* linux/arch/parisc/kernel/power.c
* HP PARISC soft power switch support driver
*
- * Copyright (c) 2001-2002 Helge Deller <deller@gmx.de>
+ * Copyright (c) 2001-2005 Helge Deller <deller@gmx.de>
* All rights reserved.
*
*
@@ -102,7 +102,7 @@ static DECLARE_WORK(poweroff_work, deferred_poweroff, NULL);
static void poweroff(void)
{
- static int powering_off;
+ static int powering_off __read_mostly;
if (powering_off)
return;
@@ -113,7 +113,7 @@ static void poweroff(void)
/* local time-counter for shutdown */
-static int shutdown_timer;
+static int shutdown_timer __read_mostly;
/* check, give feedback and start shutdown after one second */
static void process_shutdown(void)
@@ -139,7 +139,7 @@ static void process_shutdown(void)
DECLARE_TASKLET_DISABLED(power_tasklet, NULL, 0);
/* soft power switch enabled/disabled */
-int pwrsw_enabled = 1;
+int pwrsw_enabled __read_mostly = 1;
/*
* On gecko style machines (e.g. 712/xx and 715/xx)
@@ -149,7 +149,7 @@ int pwrsw_enabled = 1;
*/
static void gecko_tasklet_func(unsigned long unused)
{
- if (!pwrsw_enabled)
+ if (unlikely(!pwrsw_enabled))
return;
if (__getDIAG(25) & 0x80000000) {
@@ -173,7 +173,7 @@ static void polling_tasklet_func(unsigned long soft_power_reg)
{
unsigned long current_status;
- if (!pwrsw_enabled)
+ if (unlikely(!pwrsw_enabled))
return;
current_status = gsc_readl(soft_power_reg);
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 725a14119f2..f605dea5722 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -34,7 +34,7 @@ config PARPORT
config PARPORT_PC
tristate "PC-style hardware"
- depends on PARPORT && (!SPARC64 || PCI) && !SPARC32 && !M32R
+ depends on PARPORT && (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV
---help---
You should say Y here if you have a PC-style parallel port. All
IBM PC compatible computers and some Alphas have PC-style
@@ -77,7 +77,7 @@ config PARPORT_PC_SUPERIO
config PARPORT_PC_PCMCIA
tristate "Support for PCMCIA management for PC-style ports"
- depends on PARPORT!=n && (PCMCIA!=n && PARPORT_PC=m && PARPORT_PC || PARPORT_PC=y && PCMCIA)
+ depends on PCMCIA && PARPORT_PC
help
Say Y here if you need PCMCIA support for your PC-style parallel
ports. If unsure, say N.
@@ -121,6 +121,7 @@ config PARPORT_GSC
tristate
default GSC
depends on PARPORT
+ select PARPORT_NOT_PC
config PARPORT_SUNBPP
tristate "Sparc hardware (EXPERIMENTAL)"
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 075c7eb5c85..9ee67321b63 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -144,9 +144,9 @@ again:
add_dev (numdevs++, port, -1);
/* Find out the legacy device's IEEE 1284 device ID. */
- deviceid = kmalloc (1000, GFP_KERNEL);
+ deviceid = kmalloc (1024, GFP_KERNEL);
if (deviceid) {
- if (parport_device_id (numdevs - 1, deviceid, 1000) > 2)
+ if (parport_device_id (numdevs - 1, deviceid, 1024) > 2)
detected++;
kfree (deviceid);
@@ -252,7 +252,7 @@ struct pardevice *parport_open (int devnum, const char *name,
selected = port->daisy;
parport_release (dev);
- if (selected != port->daisy) {
+ if (selected != daisy) {
/* No corresponding device. */
parport_unregister_device (dev);
return NULL;
@@ -344,9 +344,9 @@ static int cpp_daisy (struct parport *port, int cmd)
PARPORT_CONTROL_STROBE,
PARPORT_CONTROL_STROBE);
udelay (1);
+ s = parport_read_status (port);
parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
udelay (1);
- s = parport_read_status (port);
parport_write_data (port, 0xff); udelay (2);
return s;
@@ -395,15 +395,15 @@ int parport_daisy_select (struct parport *port, int daisy, int mode)
case IEEE1284_MODE_EPP:
case IEEE1284_MODE_EPPSL:
case IEEE1284_MODE_EPPSWE:
- return (cpp_daisy (port, 0x20 + daisy) &
- PARPORT_STATUS_ERROR);
+ return !(cpp_daisy (port, 0x20 + daisy) &
+ PARPORT_STATUS_ERROR);
// For these modes we should switch to ECP mode:
case IEEE1284_MODE_ECP:
case IEEE1284_MODE_ECPRLE:
case IEEE1284_MODE_ECPSWE:
- return (cpp_daisy (port, 0xd0 + daisy) &
- PARPORT_STATUS_ERROR);
+ return !(cpp_daisy (port, 0xd0 + daisy) &
+ PARPORT_STATUS_ERROR);
// Nothing was told for BECP in Daisy chain specification.
// May be it's wise to use ECP?
@@ -413,8 +413,8 @@ int parport_daisy_select (struct parport *port, int daisy, int mode)
case IEEE1284_MODE_BYTE:
case IEEE1284_MODE_COMPAT:
default:
- return (cpp_daisy (port, 0xe0 + daisy) &
- PARPORT_STATUS_ERROR);
+ return !(cpp_daisy (port, 0xe0 + daisy) &
+ PARPORT_STATUS_ERROR);
}
}
@@ -436,7 +436,7 @@ static int select_port (struct parport *port)
static int assign_addrs (struct parport *port)
{
- unsigned char s, last_dev;
+ unsigned char s;
unsigned char daisy;
int thisdev = numdevs;
int detected;
@@ -472,10 +472,13 @@ static int assign_addrs (struct parport *port)
}
parport_write_data (port, 0x78); udelay (2);
- last_dev = 0; /* We've just been speaking to a device, so we
- know there must be at least _one_ out there. */
+ s = parport_read_status (port);
- for (daisy = 0; daisy < 4; daisy++) {
+ for (daisy = 0;
+ (s & (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT))
+ == (PARPORT_STATUS_PAPEROUT|PARPORT_STATUS_SELECT)
+ && daisy < 4;
+ ++daisy) {
parport_write_data (port, daisy);
udelay (2);
parport_frob_control (port,
@@ -485,14 +488,18 @@ static int assign_addrs (struct parport *port)
parport_frob_control (port, PARPORT_CONTROL_STROBE, 0);
udelay (1);
- if (last_dev)
- /* No more devices. */
- break;
+ add_dev (numdevs++, port, daisy);
- last_dev = !(parport_read_status (port)
- & PARPORT_STATUS_BUSY);
+ /* See if this device thought it was the last in the
+ * chain. */
+ if (!(s & PARPORT_STATUS_BUSY))
+ break;
- add_dev (numdevs++, port, daisy);
+ /* We are seeing pass through status now. We see
+ last_dev from next device or if last_dev does not
+ work status lines from some non-daisy chain
+ device. */
+ s = parport_read_status (port);
}
parport_write_data (port, 0xff); udelay (2);
@@ -501,11 +508,11 @@ static int assign_addrs (struct parport *port)
detected);
/* Ask the new devices to introduce themselves. */
- deviceid = kmalloc (1000, GFP_KERNEL);
+ deviceid = kmalloc (1024, GFP_KERNEL);
if (!deviceid) return 0;
for (daisy = 0; thisdev < numdevs; thisdev++, daisy++)
- parport_device_id (thisdev, deviceid, 1000);
+ parport_device_id (thisdev, deviceid, 1024);
kfree (deviceid);
return detected;
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
index ce1e2aad8b1..d6c77658231 100644
--- a/drivers/parport/ieee1284_ops.c
+++ b/drivers/parport/ieee1284_ops.c
@@ -165,17 +165,7 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
/* Does the error line indicate end of data? */
if (((i & 1) == 0) &&
(parport_read_status(port) & PARPORT_STATUS_ERROR)) {
- port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA;
- DPRINTK (KERN_DEBUG
- "%s: No more nibble data (%d bytes)\n",
- port->name, i/2);
-
- /* Go to reverse idle phase. */
- parport_frob_control (port,
- PARPORT_CONTROL_AUTOFD,
- PARPORT_CONTROL_AUTOFD);
- port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
- break;
+ goto end_of_data;
}
/* Event 7: Set nAutoFd low. */
@@ -225,18 +215,25 @@ size_t parport_ieee1284_read_nibble (struct parport *port,
byte = nibble;
}
- i /= 2; /* i is now in bytes */
-
if (i == len) {
/* Read the last nibble without checking data avail. */
- port = port->physport;
- if (parport_read_status (port) & PARPORT_STATUS_ERROR)
- port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA;
+ if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
+ end_of_data:
+ DPRINTK (KERN_DEBUG
+ "%s: No more nibble data (%d bytes)\n",
+ port->name, i/2);
+
+ /* Go to reverse idle phase. */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+ port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
+ }
else
- port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
+ port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
}
- return i;
+ return i/2;
#endif /* IEEE1284 support */
}
@@ -256,17 +253,7 @@ size_t parport_ieee1284_read_byte (struct parport *port,
/* Data available? */
if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
- port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DNA;
- DPRINTK (KERN_DEBUG
- "%s: No more byte data (%Zd bytes)\n",
- port->name, count);
-
- /* Go to reverse idle phase. */
- parport_frob_control (port,
- PARPORT_CONTROL_AUTOFD,
- PARPORT_CONTROL_AUTOFD);
- port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
- break;
+ goto end_of_data;
}
/* Event 14: Place data bus in high impedance state. */
@@ -318,11 +305,20 @@ size_t parport_ieee1284_read_byte (struct parport *port,
if (count == len) {
/* Read the last byte without checking data avail. */
- port = port->physport;
- if (parport_read_status (port) & PARPORT_STATUS_ERROR)
- port->ieee1284.phase = IEEE1284_PH_HBUSY_DNA;
+ if (parport_read_status (port) & PARPORT_STATUS_ERROR) {
+ end_of_data:
+ DPRINTK (KERN_DEBUG
+ "%s: No more byte data (%Zd bytes)\n",
+ port->name, count);
+
+ /* Go to reverse idle phase. */
+ parport_frob_control (port,
+ PARPORT_CONTROL_AUTOFD,
+ PARPORT_CONTROL_AUTOFD);
+ port->physport->ieee1284.phase = IEEE1284_PH_REV_IDLE;
+ }
else
- port->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
+ port->physport->ieee1284.phase = IEEE1284_PH_HBUSY_DAVAIL;
}
return count;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index c6493ad7c0c..9302b8fd746 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1169,7 +1169,7 @@ dump_parport_state ("fwd idle", port);
/* GCC is not inlining extern inline function later overwriten to non-inline,
so we use outlined_ variants here. */
-static struct parport_operations parport_pc_ops =
+static const struct parport_operations parport_pc_ops =
{
.write_data = parport_pc_write_data,
.read_data = parport_pc_read_data,
@@ -1211,10 +1211,11 @@ static struct parport_operations parport_pc_ops =
static void __devinit show_parconfig_smsc37c669(int io, int key)
{
int cr1,cr4,cra,cr23,cr26,cr27,i=0;
- static const char *modes[]={ "SPP and Bidirectional (PS/2)",
- "EPP and SPP",
- "ECP",
- "ECP and EPP" };
+ static const char *const modes[]={
+ "SPP and Bidirectional (PS/2)",
+ "EPP and SPP",
+ "ECP",
+ "ECP and EPP" };
outb(key,io);
outb(key,io);
@@ -1288,7 +1289,7 @@ static void __devinit show_parconfig_smsc37c669(int io, int key)
static void __devinit show_parconfig_winbond(int io, int key)
{
int cr30,cr60,cr61,cr70,cr74,crf0,i=0;
- static const char *modes[] = {
+ static const char *const modes[] = {
"Standard (SPP) and Bidirectional(PS/2)", /* 0 */
"EPP-1.9 and SPP",
"ECP",
@@ -1297,7 +1298,9 @@ static void __devinit show_parconfig_winbond(int io, int key)
"EPP-1.7 and SPP", /* 5 */
"undefined!",
"ECP and EPP-1.7" };
- static char *irqtypes[] = { "pulsed low, high-Z", "follows nACK" };
+ static char *const irqtypes[] = {
+ "pulsed low, high-Z",
+ "follows nACK" };
/* The registers are called compatible-PnP because the
register layout is modelled after ISA-PnP, the access
@@ -2368,8 +2371,10 @@ void parport_pc_unregister_port (struct parport *p)
spin_lock(&ports_lock);
list_del_init(&priv->list);
spin_unlock(&ports_lock);
+#if defined(CONFIG_PARPORT_PC_FIFO) && defined(HAS_DMA)
if (p->dma != PARPORT_DMA_NONE)
free_dma(p->dma);
+#endif
if (p->irq != PARPORT_IRQ_NONE)
free_irq(p->irq, p);
release_region(p->base, 3);
@@ -2377,14 +2382,12 @@ void parport_pc_unregister_port (struct parport *p)
release_region(p->base + 3, p->size - 3);
if (p->modes & PARPORT_MODE_ECP)
release_region(p->base_hi, 3);
-#ifdef CONFIG_PARPORT_PC_FIFO
-#ifdef HAS_DMA
+#if defined(CONFIG_PARPORT_PC_FIFO) && defined(HAS_DMA)
if (priv->dma_buf)
pci_free_consistent(priv->dev, PAGE_SIZE,
priv->dma_buf,
priv->dma_handle);
#endif
-#endif
kfree (p->private_data);
parport_put_port(p);
kfree (ops); /* hope no-one cached it */
@@ -2396,7 +2399,8 @@ EXPORT_SYMBOL (parport_pc_unregister_port);
/* ITE support maintained by Rich Liu <richliu@poorman.org> */
static int __devinit sio_ite_8872_probe (struct pci_dev *pdev, int autoirq,
- int autodma, struct parport_pc_via_data *via)
+ int autodma,
+ const struct parport_pc_via_data *via)
{
short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 };
struct resource *base_res;
@@ -2524,7 +2528,8 @@ static struct parport_pc_via_data via_8231_data __devinitdata = {
};
static int __devinit sio_via_probe (struct pci_dev *pdev, int autoirq,
- int autodma, struct parport_pc_via_data *via)
+ int autodma,
+ const struct parport_pc_via_data *via)
{
u8 tmp, tmp2, siofunc;
u8 ppcontrol = 0;
@@ -2694,8 +2699,9 @@ enum parport_pc_sio_types {
/* each element directly indexed from enum list, above */
static struct parport_pc_superio {
- int (*probe) (struct pci_dev *pdev, int autoirq, int autodma, struct parport_pc_via_data *via);
- struct parport_pc_via_data *via;
+ int (*probe) (struct pci_dev *pdev, int autoirq, int autodma,
+ const struct parport_pc_via_data *via);
+ const struct parport_pc_via_data *via;
} parport_pc_superio_info[] __devinitdata = {
{ sio_via_probe, &via_686a_data, },
{ sio_via_probe, &via_8231_data, },
@@ -2828,7 +2834,7 @@ static struct parport_pc_pci {
/* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */
};
-static struct pci_device_id parport_pc_pci_tbl[] = {
+static const struct pci_device_id parport_pc_pci_tbl[] = {
/* Super-IO onboard chips */
{ 0x1106, 0x0686, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_686a },
{ 0x1106, 0x8231, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sio_via_8231 },
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index d3dad0aac7c..76dd077e318 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -464,7 +464,7 @@ static struct pci_driver parport_serial_pci_driver = {
static int __init parport_serial_init (void)
{
- return pci_module_init (&parport_serial_pci_driver);
+ return pci_register_driver (&parport_serial_pci_driver);
}
static void __exit parport_serial_exit (void)
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index 4b48b31ec23..b62aee8de3c 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -11,9 +11,9 @@
#include <linux/string.h>
#include <asm/uaccess.h>
-static struct {
- char *token;
- char *descr;
+static const struct {
+ const char *token;
+ const char *descr;
} classes[] = {
{ "", "Legacy device" },
{ "PRINTER", "Printer" },
@@ -128,8 +128,131 @@ static void parse_data(struct parport *port, int device, char *str)
kfree(txt);
}
+/* Read up to count-1 bytes of device id. Terminate buffer with
+ * '\0'. Buffer begins with two Device ID length bytes as given by
+ * device. */
+static ssize_t parport_read_device_id (struct parport *port, char *buffer,
+ size_t count)
+{
+ unsigned char length[2];
+ unsigned lelen, belen;
+ size_t idlens[4];
+ unsigned numidlens;
+ unsigned current_idlen;
+ ssize_t retval;
+ size_t len;
+
+ /* First two bytes are MSB,LSB of inclusive length. */
+ retval = parport_read (port, length, 2);
+
+ if (retval < 0)
+ return retval;
+ if (retval != 2)
+ return -EIO;
+
+ if (count < 2)
+ return 0;
+ memcpy(buffer, length, 2);
+ len = 2;
+
+ /* Some devices wrongly send LE length, and some send it two
+ * bytes short. Construct a sorted array of lengths to try. */
+ belen = (length[0] << 8) + length[1];
+ lelen = (length[1] << 8) + length[0];
+ idlens[0] = min(belen, lelen);
+ idlens[1] = idlens[0]+2;
+ if (belen != lelen) {
+ int off = 2;
+ /* Don't try lenghts of 0x100 and 0x200 as 1 and 2 */
+ if (idlens[0] <= 2)
+ off = 0;
+ idlens[off] = max(belen, lelen);
+ idlens[off+1] = idlens[off]+2;
+ numidlens = off+2;
+ }
+ else {
+ /* Some devices don't truly implement Device ID, but
+ * just return constant nibble forever. This catches
+ * also those cases. */
+ if (idlens[0] == 0 || idlens[0] > 0xFFF) {
+ printk (KERN_DEBUG "%s: reported broken Device ID"
+ " length of %#zX bytes\n",
+ port->name, idlens[0]);
+ return -EIO;
+ }
+ numidlens = 2;
+ }
+
+ /* Try to respect the given ID length despite all the bugs in
+ * the ID length. Read according to shortest possible ID
+ * first. */
+ for (current_idlen = 0; current_idlen < numidlens; ++current_idlen) {
+ size_t idlen = idlens[current_idlen];
+ if (idlen+1 >= count)
+ break;
+
+ retval = parport_read (port, buffer+len, idlen-len);
+
+ if (retval < 0)
+ return retval;
+ len += retval;
+
+ if (port->physport->ieee1284.phase != IEEE1284_PH_HBUSY_DAVAIL) {
+ if (belen != len) {
+ printk (KERN_DEBUG "%s: Device ID was %d bytes"
+ " while device told it would be %d"
+ " bytes\n",
+ port->name, len, belen);
+ }
+ goto done;
+ }
+
+ /* This might end reading the Device ID too
+ * soon. Hopefully the needed fields were already in
+ * the first 256 bytes or so that we must have read so
+ * far. */
+ if (buffer[len-1] == ';') {
+ printk (KERN_DEBUG "%s: Device ID reading stopped"
+ " before device told data not available. "
+ "Current idlen %d of %d, len bytes %02X %02X\n",
+ port->name, current_idlen, numidlens,
+ length[0], length[1]);
+ goto done;
+ }
+ }
+ if (current_idlen < numidlens) {
+ /* Buffer not large enough, read to end of buffer. */
+ size_t idlen, len2;
+ if (len+1 < count) {
+ retval = parport_read (port, buffer+len, count-len-1);
+ if (retval < 0)
+ return retval;
+ len += retval;
+ }
+ /* Read the whole ID since some devices would not
+ * otherwise give back the Device ID from beginning
+ * next time when asked. */
+ idlen = idlens[current_idlen];
+ len2 = len;
+ while(len2 < idlen && retval > 0) {
+ char tmp[4];
+ retval = parport_read (port, tmp,
+ min(sizeof tmp, idlen-len2));
+ if (retval < 0)
+ return retval;
+ len2 += retval;
+ }
+ }
+ /* In addition, there are broken devices out there that don't
+ even finish off with a semi-colon. We do not need to care
+ about those at this time. */
+ done:
+ buffer[len] = '\0';
+ return len;
+}
+
/* Get Std 1284 Device ID. */
-ssize_t parport_device_id (int devnum, char *buffer, size_t len)
+ssize_t parport_device_id (int devnum, char *buffer, size_t count)
{
ssize_t retval = -ENXIO;
struct pardevice *dev = parport_open (devnum, "Device ID probe",
@@ -139,76 +262,20 @@ ssize_t parport_device_id (int devnum, char *buffer, size_t len)
parport_claim_or_block (dev);
- /* Negotiate to compatibility mode, and then to device ID mode.
- * (This is in case we are already in device ID mode.) */
+ /* Negotiate to compatibility mode, and then to device ID
+ * mode. (This so that we start form beginning of device ID if
+ * already in device ID mode.) */
parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
retval = parport_negotiate (dev->port,
IEEE1284_MODE_NIBBLE | IEEE1284_DEVICEID);
if (!retval) {
- int idlen;
- unsigned char length[2];
-
- /* First two bytes are MSB,LSB of inclusive length. */
- retval = parport_read (dev->port, length, 2);
-
- if (retval != 2) goto end_id;
-
- idlen = (length[0] << 8) + length[1] - 2;
- /*
- * Check if the caller-allocated buffer is large enough
- * otherwise bail out or there will be an at least off by one.
- */
- if (idlen + 1 < len)
- len = idlen;
- else {
- retval = -EINVAL;
- goto out;
- }
- retval = parport_read (dev->port, buffer, len);
-
- if (retval != len)
- printk (KERN_DEBUG "%s: only read %Zd of %Zd ID bytes\n",
- dev->port->name, retval,
- len);
-
- /* Some printer manufacturers mistakenly believe that
- the length field is supposed to be _exclusive_.
- In addition, there are broken devices out there
- that don't even finish off with a semi-colon. */
- if (buffer[len - 1] != ';') {
- ssize_t diff;
- diff = parport_read (dev->port, buffer + len, 2);
- retval += diff;
-
- if (diff)
- printk (KERN_DEBUG
- "%s: device reported incorrect "
- "length field (%d, should be %Zd)\n",
- dev->port->name, idlen, retval);
- else {
- /* One semi-colon short of a device ID. */
- buffer[len++] = ';';
- printk (KERN_DEBUG "%s: faking semi-colon\n",
- dev->port->name);
-
- /* If we get here, I don't think we
- need to worry about the possible
- standard violation of having read
- more than we were told to. The
- device is non-compliant anyhow. */
- }
- }
-
- end_id:
- buffer[len] = '\0';
+ retval = parport_read_device_id (dev->port, buffer, count);
parport_negotiate (dev->port, IEEE1284_MODE_COMPAT);
+ if (retval > 2)
+ parse_data (dev->port, dev->daisy, buffer+2);
}
- if (retval > 2)
- parse_data (dev->port, dev->daisy, buffer);
-
-out:
parport_release (dev);
parport_close (dev);
return retval;
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 9cb3ab156b0..ea62bed6bc8 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -1002,6 +1002,7 @@ EXPORT_SYMBOL(parport_register_driver);
EXPORT_SYMBOL(parport_unregister_driver);
EXPORT_SYMBOL(parport_register_device);
EXPORT_SYMBOL(parport_unregister_device);
+EXPORT_SYMBOL(parport_get_port);
EXPORT_SYMBOL(parport_put_port);
EXPORT_SYMBOL(parport_find_number);
EXPORT_SYMBOL(parport_find_base);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 8e21f6ab89a..509a5b3ae99 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -794,12 +794,14 @@ static int enable_device(struct acpiphp_slot *slot)
if (PCI_SLOT(dev->devfn) != slot->device)
continue;
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
max = pci_scan_bridge(bus, dev, max, pass);
+ if (pass && dev->subordinate)
+ pci_bus_size_bridges(dev->subordinate);
+ }
}
}
- pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
acpiphp_sanitize_bus(bus);
pci_enable_bridges(bus);
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 092491e25ef..cb88404c89f 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -317,6 +317,7 @@ struct controller {
u16 vendor_id;
struct work_struct int_task_event;
wait_queue_head_t queue; /* sleep & wake process */
+ struct dentry *dentry; /* debugfs dentry */
};
struct irq_mapping {
@@ -399,8 +400,11 @@ struct resource_lists {
#define msg_button_ignore "PCI slot #%d - button press ignored. (action in progress...)\n"
-/* sysfs functions for the hotplug controller info */
-extern void cpqhp_create_ctrl_files (struct controller *ctrl);
+/* debugfs functions for the hotplug controller info */
+extern void cpqhp_initialize_debugfs (void);
+extern void cpqhp_shutdown_debugfs (void);
+extern void cpqhp_create_debugfs_files (struct controller *ctrl);
+extern void cpqhp_remove_debugfs_files (struct controller *ctrl);
/* controller functions */
extern void cpqhp_pushbutton_thread (unsigned long event_pointer);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 9aed8efe6a1..b3659ffccac 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -327,7 +327,9 @@ static int ctrl_slot_setup(struct controller *ctrl,
void __iomem *smbios_start,
void __iomem *smbios_table)
{
- struct slot *new_slot;
+ struct slot *slot;
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *hotplug_slot_info;
u8 number_of_slots;
u8 slot_device;
u8 slot_number;
@@ -345,93 +347,105 @@ static int ctrl_slot_setup(struct controller *ctrl,
slot_number = ctrl->first_slot;
while (number_of_slots) {
- new_slot = kmalloc(sizeof(*new_slot), GFP_KERNEL);
- if (!new_slot)
+ slot = kmalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
goto error;
- memset(new_slot, 0, sizeof(struct slot));
- new_slot->hotplug_slot = kmalloc(sizeof(*(new_slot->hotplug_slot)),
+ memset(slot, 0, sizeof(struct slot));
+ slot->hotplug_slot = kmalloc(sizeof(*(slot->hotplug_slot)),
GFP_KERNEL);
- if (!new_slot->hotplug_slot)
+ if (!slot->hotplug_slot)
goto error_slot;
- memset(new_slot->hotplug_slot, 0, sizeof(struct hotplug_slot));
+ hotplug_slot = slot->hotplug_slot;
+ memset(hotplug_slot, 0, sizeof(struct hotplug_slot));
- new_slot->hotplug_slot->info =
- kmalloc(sizeof(*(new_slot->hotplug_slot->info)),
+ hotplug_slot->info =
+ kmalloc(sizeof(*(hotplug_slot->info)),
GFP_KERNEL);
- if (!new_slot->hotplug_slot->info)
+ if (!hotplug_slot->info)
goto error_hpslot;
- memset(new_slot->hotplug_slot->info, 0,
+ hotplug_slot_info = hotplug_slot->info;
+ memset(hotplug_slot_info, 0,
sizeof(struct hotplug_slot_info));
- new_slot->hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
- if (!new_slot->hotplug_slot->name)
+ hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
+
+ if (!hotplug_slot->name)
goto error_info;
- new_slot->ctrl = ctrl;
- new_slot->bus = ctrl->bus;
- new_slot->device = slot_device;
- new_slot->number = slot_number;
- dbg("slot->number = %d\n",new_slot->number);
+ slot->ctrl = ctrl;
+ slot->bus = ctrl->bus;
+ slot->device = slot_device;
+ slot->number = slot_number;
+ dbg("slot->number = %d\n", slot->number);
slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
slot_entry);
- while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) != new_slot->number)) {
+ while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
+ slot->number)) {
slot_entry = get_SMBIOS_entry(smbios_start,
smbios_table, 9, slot_entry);
}
- new_slot->p_sm_slot = slot_entry;
+ slot->p_sm_slot = slot_entry;
- init_timer(&new_slot->task_event);
- new_slot->task_event.expires = jiffies + 5 * HZ;
- new_slot->task_event.function = cpqhp_pushbutton_thread;
+ init_timer(&slot->task_event);
+ slot->task_event.expires = jiffies + 5 * HZ;
+ slot->task_event.function = cpqhp_pushbutton_thread;
//FIXME: these capabilities aren't used but if they are
// they need to be correctly implemented
- new_slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
- new_slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
+ slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
+ slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
- if (is_slot64bit(new_slot))
- new_slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
- if (is_slot66mhz(new_slot))
- new_slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
+ if (is_slot64bit(slot))
+ slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
+ if (is_slot66mhz(slot))
+ slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
if (ctrl->speed == PCI_SPEED_66MHz)
- new_slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
+ slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
- ctrl_slot = slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
+ ctrl_slot =
+ slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
// Check presence
- new_slot->capabilities |= ((((~tempdword) >> 23) | ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
+ slot->capabilities |=
+ ((((~tempdword) >> 23) |
+ ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
// Check the switch state
- new_slot->capabilities |= ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
+ slot->capabilities |=
+ ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
// Check the slot enable
- new_slot->capabilities |= ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
+ slot->capabilities |=
+ ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
/* register this slot with the hotplug pci core */
- new_slot->hotplug_slot->release = &release_slot;
- new_slot->hotplug_slot->private = new_slot;
- make_slot_name(new_slot->hotplug_slot->name, SLOT_NAME_SIZE, new_slot);
- new_slot->hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
+ hotplug_slot->release = &release_slot;
+ hotplug_slot->private = slot;
+ make_slot_name(hotplug_slot->name, SLOT_NAME_SIZE, slot);
+ hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
- new_slot->hotplug_slot->info->power_status = get_slot_enabled(ctrl, new_slot);
- new_slot->hotplug_slot->info->attention_status = cpq_get_attention_status(ctrl, new_slot);
- new_slot->hotplug_slot->info->latch_status = cpq_get_latch_status(ctrl, new_slot);
- new_slot->hotplug_slot->info->adapter_status = get_presence_status(ctrl, new_slot);
+ hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
+ hotplug_slot_info->attention_status =
+ cpq_get_attention_status(ctrl, slot);
+ hotplug_slot_info->latch_status =
+ cpq_get_latch_status(ctrl, slot);
+ hotplug_slot_info->adapter_status =
+ get_presence_status(ctrl, slot);
- dbg ("registering bus %d, dev %d, number %d, "
+ dbg("registering bus %d, dev %d, number %d, "
"ctrl->slot_device_offset %d, slot %d\n",
- new_slot->bus, new_slot->device,
- new_slot->number, ctrl->slot_device_offset,
+ slot->bus, slot->device,
+ slot->number, ctrl->slot_device_offset,
slot_number);
- result = pci_hp_register (new_slot->hotplug_slot);
+ result = pci_hp_register(hotplug_slot);
if (result) {
- err ("pci_hp_register failed with error %d\n", result);
+ err("pci_hp_register failed with error %d\n", result);
goto error_name;
}
- new_slot->next = ctrl->slot;
- ctrl->slot = new_slot;
+ slot->next = ctrl->slot;
+ ctrl->slot = slot;
number_of_slots--;
slot_device++;
@@ -439,15 +453,14 @@ static int ctrl_slot_setup(struct controller *ctrl,
}
return 0;
-
error_name:
- kfree(new_slot->hotplug_slot->name);
+ kfree(hotplug_slot->name);
error_info:
- kfree(new_slot->hotplug_slot->info);
+ kfree(hotplug_slot_info);
error_hpslot:
- kfree(new_slot->hotplug_slot);
+ kfree(hotplug_slot);
error_slot:
- kfree(new_slot);
+ kfree(slot);
error:
return result;
}
@@ -466,6 +479,8 @@ static int ctrl_slot_cleanup (struct controller * ctrl)
old_slot = next_slot;
}
+ cpqhp_remove_debugfs_files(ctrl);
+
//Free IRQ associated with hot plug device
free_irq(ctrl->interrupt, ctrl);
//Unmap the memory
@@ -1262,7 +1277,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
// Done with exclusive hardware access
up(&ctrl->crit_sect);
- cpqhp_create_ctrl_files(ctrl);
+ cpqhp_create_debugfs_files(ctrl);
return 0;
@@ -1502,6 +1517,7 @@ static int __init cpqhpc_init(void)
cpqhp_debug = debug;
info (DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ cpqhp_initialize_debugfs();
result = pci_register_driver(&cpqhpc_driver);
dbg("pci_register_driver = %d\n", result);
return result;
@@ -1515,6 +1531,7 @@ static void __exit cpqhpc_cleanup(void)
dbg("pci_unregister_driver\n");
pci_unregister_driver(&cpqhpc_driver);
+ cpqhp_shutdown_debugfs();
}
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 10a5a7674a8..771ed34b181 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -2630,29 +2630,15 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
hold_mem_node = NULL;
}
- /* If we have prefetchable memory resources copy them and
- * fill in the bridge's memory range registers. Otherwise,
- * fill in the range registers with values that disable them. */
- if (p_mem_node) {
- memcpy(hold_p_mem_node, p_mem_node, sizeof(struct pci_resource));
- p_mem_node->next = NULL;
-
- /* set Pre Mem base and Limit registers */
- temp_word = p_mem_node->base >> 16;
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word);
-
- temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16;
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
- } else {
- temp_word = 0xFFFF;
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word);
+ memcpy(hold_p_mem_node, p_mem_node, sizeof(struct pci_resource));
+ p_mem_node->next = NULL;
- temp_word = 0x0000;
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
+ /* set Pre Mem base and Limit registers */
+ temp_word = p_mem_node->base >> 16;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word);
- kfree(hold_p_mem_node);
- hold_p_mem_node = NULL;
- }
+ temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
/* Adjust this to compensate for extra adjustment in first loop */
irqs.barber_pole--;
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index 4c11048ad51..bbfeed767ff 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -33,22 +33,15 @@
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/pci.h>
+#include <linux/debugfs.h>
#include "cpqphp.h"
-
-/* A few routines that create sysfs entries for the hot plug controller */
-
-static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, char *buf)
+static int show_ctrl (struct controller *ctrl, char *buf)
{
- struct pci_dev *pci_dev;
- struct controller *ctrl;
- char * out = buf;
+ char *out = buf;
int index;
struct pci_resource *res;
- pci_dev = container_of (dev, struct pci_dev, dev);
- ctrl = pci_get_drvdata(pci_dev);
-
out += sprintf(buf, "Free resources: memory\n");
index = 11;
res = ctrl->mem_head;
@@ -80,22 +73,16 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha
return out - buf;
}
-static DEVICE_ATTR (ctrl, S_IRUGO, show_ctrl, NULL);
-static ssize_t show_dev (struct device *dev, struct device_attribute *attr, char *buf)
+static int show_dev (struct controller *ctrl, char *buf)
{
- struct pci_dev *pci_dev;
- struct controller *ctrl;
char * out = buf;
int index;
struct pci_resource *res;
struct pci_func *new_slot;
struct slot *slot;
- pci_dev = container_of (dev, struct pci_dev, dev);
- ctrl = pci_get_drvdata(pci_dev);
-
- slot=ctrl->slot;
+ slot = ctrl->slot;
while (slot) {
new_slot = cpqhp_slot_find(slot->bus, slot->device, 0);
@@ -134,10 +121,117 @@ static ssize_t show_dev (struct device *dev, struct device_attribute *attr, char
return out - buf;
}
-static DEVICE_ATTR (dev, S_IRUGO, show_dev, NULL);
-void cpqhp_create_ctrl_files (struct controller *ctrl)
+static int spew_debug_info(struct controller *ctrl, char *data, int size)
{
- device_create_file (&ctrl->pci_dev->dev, &dev_attr_ctrl);
- device_create_file (&ctrl->pci_dev->dev, &dev_attr_dev);
+ int used;
+
+ used = size - show_ctrl(ctrl, data);
+ used = (size - used) - show_dev(ctrl, &data[used]);
+ return used;
+}
+
+struct ctrl_dbg {
+ int size;
+ char *data;
+ struct controller *ctrl;
+};
+
+#define MAX_OUTPUT (4*PAGE_SIZE)
+
+static int open(struct inode *inode, struct file *file)
+{
+ struct controller *ctrl = inode->u.generic_ip;
+ struct ctrl_dbg *dbg;
+ int retval = -ENOMEM;
+
+ lock_kernel();
+ dbg = kmalloc(sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ goto exit;
+ dbg->data = kmalloc(MAX_OUTPUT, GFP_KERNEL);
+ if (!dbg->data) {
+ kfree(dbg);
+ goto exit;
+ }
+ dbg->size = spew_debug_info(ctrl, dbg->data, MAX_OUTPUT);
+ file->private_data = dbg;
+ retval = 0;
+exit:
+ unlock_kernel();
+ return retval;
+}
+
+static loff_t lseek(struct file *file, loff_t off, int whence)
+{
+ struct ctrl_dbg *dbg;
+ loff_t new = -1;
+
+ lock_kernel();
+ dbg = file->private_data;
+
+ switch (whence) {
+ case 0:
+ new = off;
+ break;
+ case 1:
+ new = file->f_pos + off;
+ break;
+ }
+ if (new < 0 || new > dbg->size) {
+ unlock_kernel();
+ return -EINVAL;
+ }
+ unlock_kernel();
+ return (file->f_pos = new);
}
+
+static ssize_t read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct ctrl_dbg *dbg = file->private_data;
+ return simple_read_from_buffer(buf, nbytes, ppos, dbg->data, dbg->size);
+}
+
+static int release(struct inode *inode, struct file *file)
+{
+ struct ctrl_dbg *dbg = file->private_data;
+
+ kfree(dbg->data);
+ kfree(dbg);
+ return 0;
+}
+
+static struct file_operations debug_ops = {
+ .owner = THIS_MODULE,
+ .open = open,
+ .llseek = lseek,
+ .read = read,
+ .release = release,
+};
+
+static struct dentry *root;
+
+void cpqhp_initialize_debugfs(void)
+{
+ if (!root)
+ root = debugfs_create_dir("cpqhp", NULL);
+}
+
+void cpqhp_shutdown_debugfs(void)
+{
+ debugfs_remove(root);
+}
+
+void cpqhp_create_debugfs_files(struct controller *ctrl)
+{
+ ctrl->dentry = debugfs_create_file(ctrl->pci_dev->dev.bus_id, S_IRUGO, root, ctrl, &debug_ops);
+}
+
+void cpqhp_remove_debugfs_files(struct controller *ctrl)
+{
+ if (ctrl->dentry)
+ debugfs_remove(ctrl->dentry);
+ ctrl->dentry = NULL;
+}
+
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index b1ba429e0a2..155133fe5c1 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -969,7 +969,7 @@ static int configure_bridge (struct pci_func **func_passed, u8 slotno)
debug ("io 32\n");
need_io_upper = TRUE;
}
- if ((io_base & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
+ if ((pfmem_base & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
debug ("pfmem 64\n");
need_pfmem_upper = TRUE;
}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 6a61b9f286e..0aac6a61337 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -32,6 +32,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/sched.h> /* signal_pending() */
#include <linux/pcieport_if.h>
#include "pci_hotplug.h"
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 8df70486034..4fb569018a2 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -103,7 +103,10 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
static int init_slots(struct controller *ctrl)
{
- struct slot *new_slot;
+ struct slot *slot;
+ struct hpc_ops *hpc_ops;
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *hotplug_slot_info;
u8 number_of_slots;
u8 slot_device;
u32 slot_number;
@@ -114,59 +117,66 @@ static int init_slots(struct controller *ctrl)
slot_number = ctrl->first_slot;
while (number_of_slots) {
- new_slot = kmalloc(sizeof(*new_slot), GFP_KERNEL);
- if (!new_slot)
+ slot = kmalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
goto error;
- memset(new_slot, 0, sizeof(struct slot));
- new_slot->hotplug_slot =
- kmalloc(sizeof(*(new_slot->hotplug_slot)),
+ memset(slot, 0, sizeof(struct slot));
+ slot->hotplug_slot =
+ kmalloc(sizeof(*(slot->hotplug_slot)),
GFP_KERNEL);
- if (!new_slot->hotplug_slot)
+ if (!slot->hotplug_slot)
goto error_slot;
- memset(new_slot->hotplug_slot, 0, sizeof(struct hotplug_slot));
+ hotplug_slot = slot->hotplug_slot;
+ memset(hotplug_slot, 0, sizeof(struct hotplug_slot));
- new_slot->hotplug_slot->info =
- kmalloc(sizeof(*(new_slot->hotplug_slot->info)),
+ hotplug_slot->info =
+ kmalloc(sizeof(*(hotplug_slot->info)),
GFP_KERNEL);
- if (!new_slot->hotplug_slot->info)
+ if (!hotplug_slot->info)
goto error_hpslot;
- memset(new_slot->hotplug_slot->info, 0,
+ hotplug_slot_info = hotplug_slot->info;
+ memset(hotplug_slot_info, 0,
sizeof(struct hotplug_slot_info));
- new_slot->hotplug_slot->name = kmalloc(SLOT_NAME_SIZE,
- GFP_KERNEL);
- if (!new_slot->hotplug_slot->name)
+ hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
+ if (!hotplug_slot->name)
goto error_info;
- new_slot->ctrl = ctrl;
- new_slot->bus = ctrl->slot_bus;
- new_slot->device = slot_device;
- new_slot->hpc_ops = ctrl->hpc_ops;
+ slot->ctrl = ctrl;
+ slot->bus = ctrl->slot_bus;
+ slot->device = slot_device;
+ slot->hpc_ops = hpc_ops = ctrl->hpc_ops;
- new_slot->number = ctrl->first_slot;
- new_slot->hp_slot = slot_device - ctrl->slot_device_offset;
+ slot->number = ctrl->first_slot;
+ slot->hp_slot = slot_device - ctrl->slot_device_offset;
/* register this slot with the hotplug pci core */
- new_slot->hotplug_slot->private = new_slot;
- new_slot->hotplug_slot->release = &release_slot;
- make_slot_name(new_slot->hotplug_slot->name, SLOT_NAME_SIZE, new_slot);
- new_slot->hotplug_slot->ops = &pciehp_hotplug_slot_ops;
-
- new_slot->hpc_ops->get_power_status(new_slot, &(new_slot->hotplug_slot->info->power_status));
- new_slot->hpc_ops->get_attention_status(new_slot, &(new_slot->hotplug_slot->info->attention_status));
- new_slot->hpc_ops->get_latch_status(new_slot, &(new_slot->hotplug_slot->info->latch_status));
- new_slot->hpc_ops->get_adapter_status(new_slot, &(new_slot->hotplug_slot->info->adapter_status));
-
- dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x slot_device_offset=%x\n",
- new_slot->bus, new_slot->device, new_slot->hp_slot, new_slot->number, ctrl->slot_device_offset);
- result = pci_hp_register (new_slot->hotplug_slot);
+ hotplug_slot->private = slot;
+ hotplug_slot->release = &release_slot;
+ make_slot_name(hotplug_slot->name, SLOT_NAME_SIZE, slot);
+ hotplug_slot->ops = &pciehp_hotplug_slot_ops;
+
+ hpc_ops->get_power_status(slot,
+ &(hotplug_slot_info->power_status));
+ hpc_ops->get_attention_status(slot,
+ &(hotplug_slot_info->attention_status));
+ hpc_ops->get_latch_status(slot,
+ &(hotplug_slot_info->latch_status));
+ hpc_ops->get_adapter_status(slot,
+ &(hotplug_slot_info->adapter_status));
+
+ dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
+ "slot_device_offset=%x\n",
+ slot->bus, slot->device, slot->hp_slot, slot->number,
+ ctrl->slot_device_offset);
+ result = pci_hp_register(hotplug_slot);
if (result) {
err ("pci_hp_register failed with error %d\n", result);
goto error_name;
}
- new_slot->next = ctrl->slot;
- ctrl->slot = new_slot;
+ slot->next = ctrl->slot;
+ ctrl->slot = slot;
number_of_slots--;
slot_device++;
@@ -176,13 +186,13 @@ static int init_slots(struct controller *ctrl)
return 0;
error_name:
- kfree(new_slot->hotplug_slot->name);
+ kfree(hotplug_slot->name);
error_info:
- kfree(new_slot->hotplug_slot->info);
+ kfree(hotplug_slot_info);
error_hpslot:
- kfree(new_slot->hotplug_slot);
+ kfree(hotplug_slot);
error_slot:
- kfree(new_slot);
+ kfree(slot);
error:
return result;
}
@@ -502,7 +512,7 @@ static void __exit unload_pciehpd(void)
}
-int hpdriver_context = 0;
+static int hpdriver_context = 0;
static void pciehp_remove (struct pcie_device *device)
{
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 0b8b26beb16..77e530321de 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -30,6 +30,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/signal.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -784,8 +787,13 @@ static int hpc_power_on_slot(struct slot * slot)
slot_cmd = (slot_ctrl & ~PWR_CTRL) | POWER_ON;
+ /* Enable detection that we turned off at slot power-off time */
if (!pciehp_poll_mode)
- slot_cmd = slot_cmd | HP_INTR_ENABLE;
+ slot_cmd = slot_cmd |
+ PWR_FAULT_DETECT_ENABLE |
+ MRL_DETECT_ENABLE |
+ PRSN_DETECT_ENABLE |
+ HP_INTR_ENABLE;
retval = pcie_write_cmd(slot, slot_cmd);
@@ -830,8 +838,18 @@ static int hpc_power_off_slot(struct slot * slot)
slot_cmd = (slot_ctrl & ~PWR_CTRL) | POWER_OFF;
+ /*
+ * If we get MRL or presence detect interrupts now, the isr
+ * will notice the sticky power-fault bit too and issue power
+ * indicator change commands. This will lead to an endless loop
+ * of command completions, since the power-fault bit remains on
+ * till the slot is powered on again.
+ */
if (!pciehp_poll_mode)
- slot_cmd = slot_cmd | HP_INTR_ENABLE;
+ slot_cmd = (slot_cmd &
+ ~PWR_FAULT_DETECT_ENABLE &
+ ~MRL_DETECT_ENABLE &
+ ~PRSN_DETECT_ENABLE) | HP_INTR_ENABLE;
retval = pcie_write_cmd(slot, slot_cmd);
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 647673a7d22..4017fb03a0b 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -34,6 +34,31 @@
#include "../pci.h"
#include "pciehp.h"
+static int pciehp_add_bridge(struct pci_dev *dev)
+{
+ struct pci_bus *parent = dev->bus;
+ int pass, busnr, start = parent->secondary;
+ int end = parent->subordinate;
+
+ for (busnr = start; busnr <= end; busnr++) {
+ if (!pci_find_bus(pci_domain_nr(parent), busnr))
+ break;
+ }
+ if (busnr-- > end) {
+ err("No bus number available for hot-added bridge %s\n",
+ pci_name(dev));
+ return -1;
+ }
+ for (pass = 0; pass < 2; pass++)
+ busnr = pci_scan_bridge(parent, dev, busnr, pass);
+ if (!dev->subordinate)
+ return -1;
+ pci_bus_size_bridges(dev->subordinate);
+ pci_bus_assign_resources(parent);
+ pci_enable_bridges(parent);
+ pci_bus_add_devices(parent);
+ return 0;
+}
int pciehp_configure_device(struct slot *p_slot)
{
@@ -55,8 +80,8 @@ int pciehp_configure_device(struct slot *p_slot)
}
for (fn = 0; fn < 8; fn++) {
- if (!(dev = pci_find_slot(p_slot->bus,
- PCI_DEVFN(p_slot->device, fn))))
+ dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
+ if (!dev)
continue;
if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
err("Cannot hot-add display device %s\n",
@@ -65,27 +90,7 @@ int pciehp_configure_device(struct slot *p_slot)
}
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
- /* Find an unused bus number for the new bridge */
- struct pci_bus *child;
- unsigned char busnr, start = parent->secondary;
- unsigned char end = parent->subordinate;
- for (busnr = start; busnr <= end; busnr++) {
- if (!pci_find_bus(pci_domain_nr(parent),
- busnr))
- break;
- }
- if (busnr >= end) {
- err("No free bus for hot-added bridge\n");
- continue;
- }
- child = pci_add_new_bus(parent, dev, busnr);
- if (!child) {
- err("Cannot add new bus for %s\n",
- pci_name(dev));
- continue;
- }
- child->subordinate = pci_do_scan_bus(child);
- pci_bus_size_bridges(child);
+ pciehp_add_bridge(dev);
}
/* TBD: program firmware provided _HPP values */
/* program_fw_provided_values(dev); */
@@ -93,7 +98,6 @@ int pciehp_configure_device(struct slot *p_slot)
pci_bus_assign_resources(parent);
pci_bus_add_devices(parent);
- pci_enable_bridges(parent);
return 0;
}
diff --git a/drivers/pci/hotplug/pciehprm_acpi.c b/drivers/pci/hotplug/pciehprm_acpi.c
index ae244e21862..2bdb30f68bf 100644
--- a/drivers/pci/hotplug/pciehprm_acpi.c
+++ b/drivers/pci/hotplug/pciehprm_acpi.c
@@ -174,7 +174,9 @@ int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
acpi_status status;
acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev));
struct pci_dev *pdev = dev;
+ struct pci_bus *parent;
u8 *path_name;
+
/*
* Per PCI firmware specification, we should run the ACPI _OSC
* method to get control of hotplug hardware before using it.
@@ -190,17 +192,18 @@ int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
*/
if (!pdev || !pdev->bus->parent)
break;
+ parent = pdev->bus->parent;
dbg("Could not find %s in acpi namespace, trying parent\n",
pci_name(pdev));
- if (!pdev->bus->parent->self)
+ if (!parent->self)
/* Parent must be a host bridge */
handle = acpi_get_pci_rootbridge_handle(
- pci_domain_nr(pdev->bus->parent),
- pdev->bus->parent->number);
+ pci_domain_nr(parent),
+ parent->number);
else
handle = DEVICE_ACPI_HANDLE(
- &(pdev->bus->parent->self->dev));
- pdev = pdev->bus->parent->self;
+ &(parent->self->dev));
+ pdev = parent->self;
}
while (handle) {
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index cc03609f45d..7d93dbaf628 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -112,28 +112,6 @@ static struct slot *find_slot(struct device_node *dn)
return NULL;
}
-static void rpadlpar_claim_one_bus(struct pci_bus *b)
-{
- struct list_head *ld;
- struct pci_bus *child_bus;
-
- for (ld = b->devices.next; ld != &b->devices; ld = ld->next) {
- struct pci_dev *dev = pci_dev_b(ld);
- int i;
-
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *r = &dev->resource[i];
-
- if (r->parent || !r->start || !r->flags)
- continue;
- rpaphp_claim_resource(dev, i);
- }
- }
-
- list_for_each_entry(child_bus, &b->children, node)
- rpadlpar_claim_one_bus(child_bus);
-}
-
static struct pci_dev *dlpar_find_new_dev(struct pci_bus *parent,
struct device_node *dev_dn)
{
@@ -154,7 +132,8 @@ static struct pci_dev *dlpar_pci_add_bus(struct device_node *dn)
struct pci_controller *phb = pdn->phb;
struct pci_dev *dev = NULL;
- rpaphp_eeh_init_nodes(dn);
+ eeh_add_device_tree_early(dn);
+
/* Add EADS device to PHB bus, adding new entry to bus->devices */
dev = of_create_pci_dev(dn, phb->bus, pdn->devfn);
if (!dev) {
@@ -170,7 +149,7 @@ static struct pci_dev *dlpar_pci_add_bus(struct device_node *dn)
rpaphp_init_new_devs(dev->subordinate);
/* Claim new bus resources */
- rpadlpar_claim_one_bus(dev->bus);
+ pcibios_claim_one_bus(dev->bus);
/* ioremap() for child bus, which may or may not succeed */
(void) remap_bus_range(dev->bus);
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 4b35097b3d9..396b54b0c84 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -62,28 +62,6 @@ struct pci_bus *rpaphp_find_pci_bus(struct device_node *dn)
}
EXPORT_SYMBOL_GPL(rpaphp_find_pci_bus);
-int rpaphp_claim_resource(struct pci_dev *dev, int resource)
-{
- struct resource *res = &dev->resource[resource];
- struct resource *root = pci_find_parent_resource(dev, res);
- char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
- int err = -EINVAL;
-
- if (root != NULL) {
- err = request_resource(root, res);
- }
-
- if (err) {
- err("PCI: %s region %d of %s %s [%lx:%lx]\n",
- root ? "Address space collision on" :
- "No parent found for",
- resource, dtype, pci_name(dev), res->start, res->end);
- }
- return err;
-}
-
-EXPORT_SYMBOL_GPL(rpaphp_claim_resource);
-
static int rpaphp_get_sensor_state(struct slot *slot, int *state)
{
int rc;
@@ -177,7 +155,7 @@ void rpaphp_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
if (r->parent || !r->start || !r->flags)
continue;
- rpaphp_claim_resource(dev, i);
+ pci_claim_resource(dev, i);
}
}
}
@@ -287,18 +265,6 @@ rpaphp_pci_config_slot(struct pci_bus *bus)
return dev;
}
-void rpaphp_eeh_init_nodes(struct device_node *dn)
-{
- struct device_node *sib;
-
- for (sib = dn->child; sib; sib = sib->sibling)
- rpaphp_eeh_init_nodes(sib);
- eeh_add_device_early(dn);
- return;
-
-}
-EXPORT_SYMBOL_GPL(rpaphp_eeh_init_nodes);
-
static void print_slot_pci_funcs(struct pci_bus *bus)
{
struct device_node *dn;
@@ -324,7 +290,7 @@ int rpaphp_config_pci_adapter(struct pci_bus *bus)
if (!dn)
goto exit;
- rpaphp_eeh_init_nodes(dn);
+ eeh_add_device_tree_early(dn);
dev = rpaphp_pci_config_slot(bus);
if (!dev) {
err("%s: can't find any devices.\n", __FUNCTION__);
@@ -370,13 +336,14 @@ EXPORT_SYMBOL_GPL(rpaphp_unconfig_pci_adapter);
static int setup_pci_hotplug_slot_info(struct slot *slot)
{
+ struct hotplug_slot_info *hotplug_slot_info = slot->hotplug_slot->info;
+
dbg("%s Initilize the PCI slot's hotplug->info structure ...\n",
__FUNCTION__);
- rpaphp_get_power_status(slot, &slot->hotplug_slot->info->power_status);
+ rpaphp_get_power_status(slot, &hotplug_slot_info->power_status);
rpaphp_get_pci_adapter_status(slot, 1,
- &slot->hotplug_slot->info->
- adapter_status);
- if (slot->hotplug_slot->info->adapter_status == NOT_VALID) {
+ &hotplug_slot_info->adapter_status);
+ if (hotplug_slot_info->adapter_status == NOT_VALID) {
err("%s: NOT_VALID: skip dn->full_name=%s\n",
__FUNCTION__, slot->dn->full_name);
return -EINVAL;
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 08ad26a0cae..ce0e9b6ce83 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -98,6 +98,10 @@ struct controller {
enum pci_bus_speed speed;
u32 first_slot; /* First physical slot number */
u8 slot_bus; /* Bus where the slots handled by this controller sit */
+ u32 cap_offset;
+ unsigned long mmio_base;
+ unsigned long mmio_size;
+ volatile int cmd_busy;
};
struct hotplug_params {
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 63628e01dd4..a2b3f0010ce 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -65,6 +65,7 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
static int get_attention_status (struct hotplug_slot *slot, u8 *value);
static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
+static int get_address (struct hotplug_slot *slot, u32 *value);
static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
@@ -77,6 +78,7 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.get_attention_status = get_attention_status,
.get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_status,
+ .get_address = get_address,
.get_max_bus_speed = get_max_bus_speed,
.get_cur_bus_speed = get_cur_bus_speed,
};
@@ -314,6 +316,18 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
+static int get_address (struct hotplug_slot *hotplug_slot, u32 *value)
+{
+ struct slot *slot = get_slot (hotplug_slot, __FUNCTION__);
+ struct pci_bus *bus = slot->ctrl->pci_dev->subordinate;
+
+ dbg("%s - physical_slot = %s\n", __FUNCTION__, hotplug_slot->name);
+
+ *value = (pci_domain_nr(bus) << 16) | (slot->bus << 8) | slot->device;
+
+ return 0;
+}
+
static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
{
struct slot *slot = get_slot (hotplug_slot, __FUNCTION__);
@@ -377,8 +391,6 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_ctrl;
}
- ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
-
pci_set_drvdata(pdev, ctrl);
ctrl->pci_bus = kmalloc (sizeof (*ctrl->pci_bus), GFP_KERNEL);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 58619359ad0..25ccb0e4759 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -248,7 +248,6 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
up(&ctrl->crit_sect);
return WRONG_BUS_FREQUENCY;
}
- wait_for_ctrl_irq (ctrl);
if ((rc = p_slot->hpc_ops->check_cmd_status(ctrl))) {
err("%s: Can't set bus speed/mode in the case of adapter & bus mismatch\n",
@@ -330,9 +329,6 @@ static int board_added(struct slot *p_slot)
up(&ctrl->crit_sect);
return -1;
}
-
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
rc = p_slot->hpc_ops->check_cmd_status(ctrl);
if (rc) {
@@ -352,7 +348,6 @@ static int board_added(struct slot *p_slot)
up(&ctrl->crit_sect);
return WRONG_BUS_FREQUENCY;
}
- wait_for_ctrl_irq (ctrl);
if ((rc = p_slot->hpc_ops->check_cmd_status(ctrl))) {
err("%s: Can't set bus speed/mode in the case of adapter & bus mismatch\n",
@@ -367,7 +362,6 @@ static int board_added(struct slot *p_slot)
up(&ctrl->crit_sect);
return rc;
}
- wait_for_ctrl_irq (ctrl);
if ((rc = p_slot->hpc_ops->check_cmd_status(ctrl))) {
err("%s: Failed to enable slot, error code(%d)\n", __FUNCTION__, rc);
@@ -494,7 +488,6 @@ static int board_added(struct slot *p_slot)
up(&ctrl->crit_sect);
return rc;
}
- wait_for_ctrl_irq (ctrl);
if ((rc = p_slot->hpc_ops->check_cmd_status(ctrl))) {
err("%s: Failed to enable slot, error code(%d)\n", __FUNCTION__, rc);
@@ -532,9 +525,6 @@ static int board_added(struct slot *p_slot)
p_slot->hpc_ops->green_led_on(p_slot);
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
-
/* Done with exclusive hardware access */
up(&ctrl->crit_sect);
@@ -552,8 +542,6 @@ err_exit:
up(&ctrl->crit_sect);
return rc;
}
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
rc = p_slot->hpc_ops->check_cmd_status(ctrl);
if (rc) {
@@ -603,8 +591,6 @@ static int remove_board(struct slot *p_slot)
up(&ctrl->crit_sect);
return rc;
}
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
rc = p_slot->hpc_ops->check_cmd_status(ctrl);
if (rc) {
@@ -621,8 +607,6 @@ static int remove_board(struct slot *p_slot)
up(&ctrl->crit_sect);
return rc;
}
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
/* Done with exclusive hardware access */
up(&ctrl->crit_sect);
@@ -676,9 +660,6 @@ static void shpchp_pushbutton_thread (unsigned long slot)
p_slot->hpc_ops->green_led_off(p_slot);
- /* Wait for the command to complete */
- wait_for_ctrl_irq (p_slot->ctrl);
-
/* Done with exclusive hardware access */
up(&p_slot->ctrl->crit_sect);
}
@@ -790,14 +771,9 @@ static void interrupt_event_handler(struct controller *ctrl)
down(&ctrl->crit_sect);
p_slot->hpc_ops->green_led_on(p_slot);
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
p_slot->hpc_ops->set_attention_status(p_slot, 0);
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
-
/* Done with exclusive hardware access */
up(&ctrl->crit_sect);
break;
@@ -806,12 +782,8 @@ static void interrupt_event_handler(struct controller *ctrl)
down(&ctrl->crit_sect);
p_slot->hpc_ops->green_led_off(p_slot);
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
p_slot->hpc_ops->set_attention_status(p_slot, 0);
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
/* Done with exclusive hardware access */
up(&ctrl->crit_sect);
@@ -845,14 +817,9 @@ static void interrupt_event_handler(struct controller *ctrl)
/* blink green LED and turn off amber */
p_slot->hpc_ops->green_led_blink(p_slot);
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
p_slot->hpc_ops->set_attention_status(p_slot, 0);
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
-
/* Done with exclusive hardware access */
up(&ctrl->crit_sect);
@@ -870,12 +837,8 @@ static void interrupt_event_handler(struct controller *ctrl)
down(&ctrl->crit_sect);
p_slot->hpc_ops->set_attention_status(p_slot, 1);
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
p_slot->hpc_ops->green_led_off(p_slot);
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
/* Done with exclusive hardware access */
up(&ctrl->crit_sect);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 9987a6fd65b..b4226ff3a85 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -275,6 +275,25 @@ static void start_int_poll_timer(struct php_ctlr_state_s *php_ctlr, int seconds)
return;
}
+static inline int shpc_wait_cmd(struct controller *ctrl)
+{
+ int retval = 0;
+ unsigned int timeout_msec = shpchp_poll_mode ? 2000 : 1000;
+ unsigned long timeout = msecs_to_jiffies(timeout_msec);
+ int rc = wait_event_interruptible_timeout(ctrl->queue,
+ !ctrl->cmd_busy, timeout);
+ if (!rc) {
+ retval = -EIO;
+ err("Command not completed in %d msec\n", timeout_msec);
+ } else if (rc < 0) {
+ retval = -EINTR;
+ info("Command was interrupted by a signal\n");
+ }
+ ctrl->cmd_busy = 0;
+
+ return retval;
+}
+
static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
{
struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle;
@@ -314,8 +333,14 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
/* To make sure the Controller Busy bit is 0 before we send out the
* command.
*/
+ slot->ctrl->cmd_busy = 1;
writew(temp_word, php_ctlr->creg + CMD);
+ /*
+ * Wait for command completion.
+ */
+ retval = shpc_wait_cmd(slot->ctrl);
+
DBG_LEAVE_ROUTINE
return retval;
}
@@ -604,7 +629,7 @@ static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode)
sec_bus_status = readw(php_ctlr->creg + SEC_BUS_CONFIG);
if (pi == 2) {
- *mode = (sec_bus_status & 0x0100) >> 7;
+ *mode = (sec_bus_status & 0x0100) >> 8;
} else {
retval = -1;
}
@@ -791,7 +816,7 @@ static void hpc_release_ctlr(struct controller *ctrl)
}
if (php_ctlr->pci_dev) {
iounmap(php_ctlr->creg);
- release_mem_region(pci_resource_start(php_ctlr->pci_dev, 0), pci_resource_len(php_ctlr->pci_dev, 0));
+ release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
php_ctlr->pci_dev = NULL;
}
@@ -1058,12 +1083,13 @@ static irqreturn_t shpc_isr(int IRQ, void *dev_id, struct pt_regs *regs)
if (intr_loc & 0x0001) {
/*
* Command Complete Interrupt Pending
- * RO only - clear by writing 0 to the Command Completion
+ * RO only - clear by writing 1 to the Command Completion
* Detect bit in Controller SERR-INT register
*/
temp_dword = readl(php_ctlr->creg + SERR_INTR_ENABLE);
- temp_dword &= 0xfffeffff;
+ temp_dword &= 0xfffdffff;
writel(temp_dword, php_ctlr->creg + SERR_INTR_ENABLE);
+ ctrl->cmd_busy = 0;
wake_up_interruptible(&ctrl->queue);
}
@@ -1121,7 +1147,6 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
int retval = 0;
u8 pi;
u32 slot_avail1, slot_avail2;
- int slot_num;
DBG_ENTER_ROUTINE
@@ -1140,39 +1165,39 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
slot_avail2 = readl(php_ctlr->creg + SLOT_AVAIL2);
if (pi == 2) {
- if ((slot_num = ((slot_avail2 & SLOT_133MHZ_PCIX_533) >> 27) ) != 0 )
+ if (slot_avail2 & SLOT_133MHZ_PCIX_533)
bus_speed = PCIX_133MHZ_533;
- else if ((slot_num = ((slot_avail2 & SLOT_100MHZ_PCIX_533) >> 23) ) != 0 )
+ else if (slot_avail2 & SLOT_100MHZ_PCIX_533)
bus_speed = PCIX_100MHZ_533;
- else if ((slot_num = ((slot_avail2 & SLOT_66MHZ_PCIX_533) >> 19) ) != 0 )
+ else if (slot_avail2 & SLOT_66MHZ_PCIX_533)
bus_speed = PCIX_66MHZ_533;
- else if ((slot_num = ((slot_avail2 & SLOT_133MHZ_PCIX_266) >> 15) ) != 0 )
+ else if (slot_avail2 & SLOT_133MHZ_PCIX_266)
bus_speed = PCIX_133MHZ_266;
- else if ((slot_num = ((slot_avail2 & SLOT_100MHZ_PCIX_266) >> 11) ) != 0 )
+ else if (slot_avail2 & SLOT_100MHZ_PCIX_266)
bus_speed = PCIX_100MHZ_266;
- else if ((slot_num = ((slot_avail2 & SLOT_66MHZ_PCIX_266) >> 7) ) != 0 )
+ else if (slot_avail2 & SLOT_66MHZ_PCIX_266)
bus_speed = PCIX_66MHZ_266;
- else if ((slot_num = ((slot_avail1 & SLOT_133MHZ_PCIX) >> 23) ) != 0 )
+ else if (slot_avail1 & SLOT_133MHZ_PCIX)
bus_speed = PCIX_133MHZ;
- else if ((slot_num = ((slot_avail1 & SLOT_100MHZ_PCIX) >> 15) ) != 0 )
+ else if (slot_avail1 & SLOT_100MHZ_PCIX)
bus_speed = PCIX_100MHZ;
- else if ((slot_num = ((slot_avail1 & SLOT_66MHZ_PCIX) >> 7) ) != 0 )
+ else if (slot_avail1 & SLOT_66MHZ_PCIX)
bus_speed = PCIX_66MHZ;
- else if ((slot_num = (slot_avail2 & SLOT_66MHZ)) != 0 )
+ else if (slot_avail2 & SLOT_66MHZ)
bus_speed = PCI_66MHZ;
- else if ((slot_num = (slot_avail1 & SLOT_33MHZ)) != 0 )
+ else if (slot_avail1 & SLOT_33MHZ)
bus_speed = PCI_33MHZ;
else bus_speed = PCI_SPEED_UNKNOWN;
} else {
- if ((slot_num = ((slot_avail1 & SLOT_133MHZ_PCIX) >> 23) ) != 0 )
+ if (slot_avail1 & SLOT_133MHZ_PCIX)
bus_speed = PCIX_133MHZ;
- else if ((slot_num = ((slot_avail1 & SLOT_100MHZ_PCIX) >> 15) ) != 0 )
+ else if (slot_avail1 & SLOT_100MHZ_PCIX)
bus_speed = PCIX_100MHZ;
- else if ((slot_num = ((slot_avail1 & SLOT_66MHZ_PCIX) >> 7) ) != 0 )
+ else if (slot_avail1 & SLOT_66MHZ_PCIX)
bus_speed = PCIX_66MHZ;
- else if ((slot_num = (slot_avail2 & SLOT_66MHZ)) != 0 )
+ else if (slot_avail2 & SLOT_66MHZ)
bus_speed = PCI_66MHZ;
- else if ((slot_num = (slot_avail1 & SLOT_33MHZ)) != 0 )
+ else if (slot_avail1 & SLOT_33MHZ)
bus_speed = PCI_33MHZ;
else bus_speed = PCI_SPEED_UNKNOWN;
}
@@ -1321,19 +1346,34 @@ static struct hpc_ops shpchp_hpc_ops = {
.check_cmd_status = hpc_check_cmd_status,
};
+inline static int shpc_indirect_creg_read(struct controller *ctrl, int index,
+ u32 *value)
+{
+ int rc;
+ u32 cap_offset = ctrl->cap_offset;
+ struct pci_dev *pdev = ctrl->pci_dev;
+
+ rc = pci_write_config_byte(pdev, cap_offset + DWORD_SELECT, index);
+ if (rc)
+ return rc;
+ return pci_read_config_dword(pdev, cap_offset + DWORD_DATA, value);
+}
+
int shpc_init(struct controller * ctrl, struct pci_dev * pdev)
{
struct php_ctlr_state_s *php_ctlr, *p;
void *instance_id = ctrl;
- int rc;
+ int rc, num_slots = 0;
u8 hp_slot;
static int first = 1;
- u32 shpc_cap_offset, shpc_base_offset;
+ u32 shpc_base_offset;
u32 tempdword, slot_reg;
u8 i;
DBG_ENTER_ROUTINE
+ ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
+
spin_lock_init(&list_lock);
php_ctlr = (struct php_ctlr_state_s *) kmalloc(sizeof(struct php_ctlr_state_s), GFP_KERNEL);
@@ -1348,41 +1388,45 @@ int shpc_init(struct controller * ctrl, struct pci_dev * pdev)
if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
PCI_DEVICE_ID_AMD_GOLAM_7450)) {
- shpc_base_offset = 0; /* amd shpc driver doesn't use this; assume 0 */
+ /* amd shpc driver doesn't use Base Offset; assume 0 */
+ ctrl->mmio_base = pci_resource_start(pdev, 0);
+ ctrl->mmio_size = pci_resource_len(pdev, 0);
} else {
- if ((shpc_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_SHPC)) == 0) {
- err("%s : shpc_cap_offset == 0\n", __FUNCTION__);
+ ctrl->cap_offset = pci_find_capability(pdev, PCI_CAP_ID_SHPC);
+ if (!ctrl->cap_offset) {
+ err("%s : cap_offset == 0\n", __FUNCTION__);
goto abort_free_ctlr;
}
- dbg("%s: shpc_cap_offset = %x\n", __FUNCTION__, shpc_cap_offset);
-
- rc = pci_write_config_byte(pdev, (u8)shpc_cap_offset + DWORD_SELECT , BASE_OFFSET);
+ dbg("%s: cap_offset = %x\n", __FUNCTION__, ctrl->cap_offset);
+
+ rc = shpc_indirect_creg_read(ctrl, 0, &shpc_base_offset);
if (rc) {
- err("%s : pci_word_config_byte failed\n", __FUNCTION__);
+ err("%s: cannot read base_offset\n", __FUNCTION__);
goto abort_free_ctlr;
}
-
- rc = pci_read_config_dword(pdev, (u8)shpc_cap_offset + DWORD_DATA, &shpc_base_offset);
+
+ rc = shpc_indirect_creg_read(ctrl, 3, &tempdword);
if (rc) {
- err("%s : pci_read_config_dword failed\n", __FUNCTION__);
+ err("%s: cannot read slot config\n", __FUNCTION__);
goto abort_free_ctlr;
}
+ num_slots = tempdword & SLOT_NUM;
+ dbg("%s: num_slots (indirect) %x\n", __FUNCTION__, num_slots);
- for (i = 0; i <= 14; i++) {
- rc = pci_write_config_byte(pdev, (u8)shpc_cap_offset + DWORD_SELECT , i);
- if (rc) {
- err("%s : pci_word_config_byte failed\n", __FUNCTION__);
- goto abort_free_ctlr;
- }
-
- rc = pci_read_config_dword(pdev, (u8)shpc_cap_offset + DWORD_DATA, &tempdword);
+ for (i = 0; i < 9 + num_slots; i++) {
+ rc = shpc_indirect_creg_read(ctrl, i, &tempdword);
if (rc) {
- err("%s : pci_read_config_dword failed\n", __FUNCTION__);
+ err("%s: cannot read creg (index = %d)\n",
+ __FUNCTION__, i);
goto abort_free_ctlr;
}
dbg("%s: offset %d: value %x\n", __FUNCTION__,i,
tempdword);
}
+
+ ctrl->mmio_base =
+ pci_resource_start(pdev, 0) + shpc_base_offset;
+ ctrl->mmio_size = 0x24 + 0x4 * num_slots;
}
if (first) {
@@ -1396,16 +1440,16 @@ int shpc_init(struct controller * ctrl, struct pci_dev * pdev)
if (pci_enable_device(pdev))
goto abort_free_ctlr;
- if (!request_mem_region(pci_resource_start(pdev, 0) + shpc_base_offset, pci_resource_len(pdev, 0), MY_NAME)) {
+ if (!request_mem_region(ctrl->mmio_base, ctrl->mmio_size, MY_NAME)) {
err("%s: cannot reserve MMIO region\n", __FUNCTION__);
goto abort_free_ctlr;
}
- php_ctlr->creg = ioremap(pci_resource_start(pdev, 0) + shpc_base_offset, pci_resource_len(pdev, 0));
+ php_ctlr->creg = ioremap(ctrl->mmio_base, ctrl->mmio_size);
if (!php_ctlr->creg) {
- err("%s: cannot remap MMIO region %lx @ %lx\n", __FUNCTION__, pci_resource_len(pdev, 0),
- pci_resource_start(pdev, 0) + shpc_base_offset);
- release_mem_region(pci_resource_start(pdev, 0) + shpc_base_offset, pci_resource_len(pdev, 0));
+ err("%s: cannot remap MMIO region %lx @ %lx\n", __FUNCTION__,
+ ctrl->mmio_size, ctrl->mmio_base);
+ release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
goto abort_free_ctlr;
}
dbg("%s: php_ctlr->creg %p\n", __FUNCTION__, php_ctlr->creg);
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 38009bc0fd5..19e1a5e1e30 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -89,10 +89,11 @@ int shpchp_configure_device(struct slot *p_slot)
struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
int num, fn;
- dev = pci_find_slot(p_slot->bus, PCI_DEVFN(p_slot->device, 0));
+ dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
if (dev) {
err("Device %s already exists at %x:%x, cannot hot-add\n",
pci_name(dev), p_slot->bus, p_slot->device);
+ pci_dev_put(dev);
return -EINVAL;
}
@@ -103,12 +104,13 @@ int shpchp_configure_device(struct slot *p_slot)
}
for (fn = 0; fn < 8; fn++) {
- if (!(dev = pci_find_slot(p_slot->bus,
- PCI_DEVFN(p_slot->device, fn))))
+ dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
+ if (!dev)
continue;
if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
err("Cannot hot-add display device %s\n",
pci_name(dev));
+ pci_dev_put(dev);
continue;
}
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
@@ -124,18 +126,21 @@ int shpchp_configure_device(struct slot *p_slot)
}
if (busnr >= end) {
err("No free bus for hot-added bridge\n");
+ pci_dev_put(dev);
continue;
}
child = pci_add_new_bus(parent, dev, busnr);
if (!child) {
err("Cannot add new bus for %s\n",
pci_name(dev));
+ pci_dev_put(dev);
continue;
}
child->subordinate = pci_do_scan_bus(child);
pci_bus_size_bridges(child);
}
program_fw_provided_values(dev);
+ pci_dev_put(dev);
}
pci_bus_assign_resources(parent);
@@ -149,17 +154,19 @@ int shpchp_unconfigure_device(struct slot *p_slot)
int rc = 0;
int j;
u8 bctl = 0;
-
+ struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
+
dbg("%s: bus/dev = %x/%x\n", __FUNCTION__, p_slot->bus, p_slot->device);
for (j=0; j<8 ; j++) {
- struct pci_dev* temp = pci_find_slot(p_slot->bus,
+ struct pci_dev* temp = pci_get_slot(parent,
(p_slot->device << 3) | j);
if (!temp)
continue;
if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
err("Cannot remove display device %s\n",
pci_name(temp));
+ pci_dev_put(temp);
continue;
}
if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
@@ -167,10 +174,12 @@ int shpchp_unconfigure_device(struct slot *p_slot)
if (bctl & PCI_BRIDGE_CTL_VGA) {
err("Cannot remove display device %s\n",
pci_name(temp));
+ pci_dev_put(temp);
continue;
}
}
pci_remove_bus_device(temp);
+ pci_dev_put(temp);
}
return rc;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8e287a828d5..d2a633efa10 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -19,6 +19,7 @@
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
+#if 0
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
@@ -63,6 +64,8 @@ pci_max_busnr(void)
return max;
}
+#endif /* 0 */
+
static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap)
{
u8 id;
@@ -587,7 +590,7 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
{
u8 pin;
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ pin = dev->pin;
if (!pin)
return -1;
pin--;
@@ -917,8 +920,6 @@ EXPORT_SYMBOL_GPL(pci_restore_bars);
EXPORT_SYMBOL(pci_enable_device_bars);
EXPORT_SYMBOL(pci_enable_device);
EXPORT_SYMBOL(pci_disable_device);
-EXPORT_SYMBOL(pci_max_busnr);
-EXPORT_SYMBOL(pci_bus_max_busnr);
EXPORT_SYMBOL(pci_find_capability);
EXPORT_SYMBOL(pci_bus_find_capability);
EXPORT_SYMBOL(pci_release_regions);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 294849d2459..a6dfee2f6d2 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -26,20 +26,15 @@ extern int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
#ifdef CONFIG_PROC_FS
extern int pci_proc_attach_device(struct pci_dev *dev);
extern int pci_proc_detach_device(struct pci_dev *dev);
-extern int pci_proc_attach_bus(struct pci_bus *bus);
extern int pci_proc_detach_bus(struct pci_bus *bus);
#else
static inline int pci_proc_attach_device(struct pci_dev *dev) { return 0; }
static inline int pci_proc_detach_device(struct pci_dev *dev) { return 0; }
-static inline int pci_proc_attach_bus(struct pci_bus *bus) { return 0; }
static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
#endif
/* Functions for PCI Hotplug drivers to use */
extern unsigned int pci_do_scan_bus(struct pci_bus *bus);
-extern int pci_remove_device_safe(struct pci_dev *dev);
-extern unsigned char pci_max_busnr(void);
-extern unsigned char pci_bus_max_busnr(struct pci_bus *bus);
extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap);
extern void pci_remove_legacy_files(struct pci_bus *bus);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 467a4ceccf1..e4e5f1e8d81 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -238,8 +238,8 @@ static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev,
device->driver = NULL;
device->driver_data = NULL;
device->release = release_pcie_device; /* callback to free pcie dev */
- sprintf(&device->bus_id[0], "pcie%02x",
- get_descriptor_id(port_type, service_type));
+ snprintf(device->bus_id, sizeof(device->bus_id), "%s:pcie%02x",
+ pci_name(parent), get_descriptor_id(port_type, service_type));
device->parent = &parent->dev;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index fce2cb2112d..adfad4fd6a1 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -264,8 +264,10 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
if (base <= limit) {
res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
- res->start = base;
- res->end = limit + 0xfff;
+ if (!res->start)
+ res->start = base;
+ if (!res->end)
+ res->end = limit + 0xfff;
}
res = child->resource[1];
@@ -431,7 +433,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
{
struct pci_bus *child;
int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
- u32 buses, i;
+ u32 buses, i, j = 0;
u16 bctl;
pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
@@ -541,10 +543,29 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
* as cards with a PCI-to-PCI bridge can be
* inserted later.
*/
- for (i=0; i<CARDBUS_RESERVE_BUSNR; i++)
+ for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
+ struct pci_bus *parent = bus;
if (pci_find_bus(pci_domain_nr(bus),
max+i+1))
break;
+ while (parent->parent) {
+ if ((!pcibios_assign_all_busses()) &&
+ (parent->subordinate > max) &&
+ (parent->subordinate <= max+i)) {
+ j = 1;
+ }
+ parent = parent->parent;
+ }
+ if (j) {
+ /*
+ * Often, there are two cardbus bridges
+ * -- try to leave one valid bus number
+ * for each one.
+ */
+ i /= 2;
+ break;
+ }
+ }
max += i;
pci_fixup_parent_subordinate_busnr(child, max);
}
@@ -559,6 +580,22 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number);
+ while (bus->parent) {
+ if ((child->subordinate > bus->subordinate) ||
+ (child->number > bus->subordinate) ||
+ (child->number < bus->number) ||
+ (child->subordinate < bus->number)) {
+ printk(KERN_WARNING "PCI: Bus #%02x (-#%02x) may be "
+ "hidden behind%s bridge #%02x (-#%02x)%s\n",
+ child->number, child->subordinate,
+ bus->self->transparent ? " transparent" : " ",
+ bus->number, bus->subordinate,
+ pcibios_assign_all_busses() ? " " :
+ " (try 'pci=assign-busses')");
+ }
+ bus = bus->parent;
+ }
+
return max;
}
@@ -571,6 +608,7 @@ static void pci_read_irq(struct pci_dev *dev)
unsigned char irq;
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
+ dev->pin = irq;
if (irq)
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
dev->irq = irq;
@@ -624,6 +662,7 @@ static int pci_setup_device(struct pci_dev * dev)
/* The PCI-to-PCI bridge spec requires that subtractive
decoding (i.e. transparent) bridge must have programming
interface code of 0x01. */
+ pci_read_irq(dev);
dev->transparent = ((dev->class & 0xff) == 1);
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
break;
@@ -678,7 +717,7 @@ static void pci_release_dev(struct device *dev)
* reading the dword at 0x100 which must either be 0 or a valid extended
* capability header.
*/
-static int pci_cfg_space_size(struct pci_dev *dev)
+int pci_cfg_space_size(struct pci_dev *dev)
{
int pos;
u32 status;
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 9eb465727fc..92a88576083 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -25,7 +25,7 @@ proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
loff_t new = -1;
struct inode *inode = file->f_dentry->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
switch (whence) {
case 0:
new = off;
@@ -41,7 +41,7 @@ proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
new = -EINVAL;
else
file->f_pos = new;
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return new;
}
@@ -431,6 +431,7 @@ int pci_proc_detach_device(struct pci_dev *dev)
return 0;
}
+#if 0
int pci_proc_attach_bus(struct pci_bus* bus)
{
struct proc_dir_entry *de = bus->procdir;
@@ -447,6 +448,7 @@ int pci_proc_attach_bus(struct pci_bus* bus)
}
return 0;
}
+#endif /* 0 */
int pci_proc_detach_bus(struct pci_bus* bus)
{
@@ -612,7 +614,6 @@ __initcall(pci_proc_init);
#ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(pci_proc_attach_device);
-EXPORT_SYMBOL(pci_proc_attach_bus);
EXPORT_SYMBOL(pci_proc_detach_bus);
#endif
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f28ebdd3958..605f0df0bfb 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1342,6 +1342,32 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
pci_do_fixups(dev, start, end);
}
+/* Enable 1k I/O space granularity on the Intel P64H2 */
+static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
+{
+ u16 en1k;
+ u8 io_base_lo, io_limit_lo;
+ unsigned long base, limit;
+ struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES;
+
+ pci_read_config_word(dev, 0x40, &en1k);
+
+ if (en1k & 0x200) {
+ printk(KERN_INFO "PCI: Enable I/O Space to 1 KB Granularity\n");
+
+ pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
+ pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
+ base = (io_base_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8;
+ limit = (io_limit_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8;
+
+ if (base <= limit) {
+ res->start = base;
+ res->end = limit + 0x3ff;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
+
EXPORT_SYMBOL(pcie_mch_quirk);
#ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(pci_fixup_device);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 27a294b6965..1a6bf9de166 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -48,6 +48,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
* in question is not being used by a driver.
* Returns 0 on success.
*/
+#if 0
int pci_remove_device_safe(struct pci_dev *dev)
{
if (pci_dev_driver(dev))
@@ -55,7 +56,7 @@ int pci_remove_device_safe(struct pci_dev *dev)
pci_destroy_dev(dev);
return 0;
}
-EXPORT_SYMBOL(pci_remove_device_safe);
+#endif /* 0 */
void pci_remove_bus(struct pci_bus *pci_bus)
{
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 0252582b91c..621ec459d27 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -920,6 +920,37 @@ pcmcia_device_stringattr(prod_id2, prod_id[1]);
pcmcia_device_stringattr(prod_id3, prod_id[2]);
pcmcia_device_stringattr(prod_id4, prod_id[3]);
+
+static ssize_t pcmcia_show_pm_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
+
+ if (p_dev->dev.power.power_state.event != PM_EVENT_ON)
+ return sprintf(buf, "off\n");
+ else
+ return sprintf(buf, "on\n");
+}
+
+static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
+ int ret = 0;
+
+ if (!count)
+ return -EINVAL;
+
+ if ((p_dev->dev.power.power_state.event == PM_EVENT_ON) &&
+ (!strncmp(buf, "off", 3)))
+ ret = dpm_runtime_suspend(dev, PMSG_SUSPEND);
+ else if ((p_dev->dev.power.power_state.event != PM_EVENT_ON) &&
+ (!strncmp(buf, "on", 2)))
+ dpm_runtime_resume(dev);
+
+ return ret ? ret : count;
+}
+
+
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
@@ -945,8 +976,9 @@ static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
- if (!count)
- return -EINVAL;
+
+ if (!count)
+ return -EINVAL;
down(&p_dev->socket->skt_sem);
p_dev->allow_func_id_match = 1;
@@ -959,6 +991,7 @@ static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
static struct device_attribute pcmcia_dev_attrs[] = {
__ATTR(function, 0444, func_show, NULL),
+ __ATTR(pm_state, 0644, pcmcia_show_pm_state, pcmcia_store_pm_state),
__ATTR_RO(func_id),
__ATTR_RO(manf_id),
__ATTR_RO(card_id),
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 5d957dfe23d..fda06941e73 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -171,27 +171,22 @@ static int __init mst_pcmcia_init(void)
{
int ret;
- mst_pcmcia_device = kzalloc(sizeof(*mst_pcmcia_device), GFP_KERNEL);
+ mst_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!mst_pcmcia_device)
return -ENOMEM;
- mst_pcmcia_device->name = "pxa2xx-pcmcia";
+
mst_pcmcia_device->dev.platform_data = &mst_pcmcia_ops;
- ret = platform_device_register(mst_pcmcia_device);
+ ret = platform_device_add(mst_pcmcia_device);
+
if (ret)
- kfree(mst_pcmcia_device);
+ platform_device_put(mst_pcmcia_device);
return ret;
}
static void __exit mst_pcmcia_exit(void)
{
- /*
- * This call is supposed to free our mst_pcmcia_device.
- * Unfortunately platform_device don't have a free method, and
- * we can't assume it's free of any reference at this point so we
- * can't free it either.
- */
platform_device_unregister(mst_pcmcia_device);
}
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index b5fdeec20b1..fd364736895 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -36,9 +36,18 @@
struct scoop_pcmcia_config *platform_scoop_config;
#define SCOOP_DEV platform_scoop_config->devs
-static void sharpsl_pcmcia_init_reset(struct scoop_pcmcia_dev *scoopdev)
+static void sharpsl_pcmcia_init_reset(struct soc_pcmcia_socket *skt)
{
+ struct scoop_pcmcia_dev *scoopdev = &SCOOP_DEV[skt->nr];
+
reset_scoop(scoopdev->dev);
+
+ /* Shared power controls need to be handled carefully */
+ if (platform_scoop_config->power_ctrl)
+ platform_scoop_config->power_ctrl(scoopdev->dev, 0x0000, skt->nr);
+ else
+ write_scoop_reg(scoopdev->dev, SCOOP_CPR, 0x0000);
+
scoopdev->keep_vs = NO_KEEP_VS;
scoopdev->keep_rd = 0;
}
@@ -208,26 +217,17 @@ static int sharpsl_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
static void sharpsl_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
{
- sharpsl_pcmcia_init_reset(&SCOOP_DEV[skt->nr]);
+ sharpsl_pcmcia_init_reset(skt);
/* Enable interrupt */
write_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_IMR, 0x00C0);
write_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_MCR, 0x0101);
SCOOP_DEV[skt->nr].keep_vs = NO_KEEP_VS;
-
- if (machine_is_collie())
- /* We need to disable SS_OUTPUT_ENA here. */
- write_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_CPR, read_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_CPR) & ~0x0080);
}
static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
{
- /* CF_BUS_OFF */
- sharpsl_pcmcia_init_reset(&SCOOP_DEV[skt->nr]);
-
- if (machine_is_collie())
- /* We need to disable SS_OUTPUT_ENA here. */
- write_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_CPR, read_scoop_reg(SCOOP_DEV[skt->nr].dev, SCOOP_CPR) & ~0x0080);
+ sharpsl_pcmcia_init_reset(skt);
}
static struct pcmcia_low_level sharpsl_pcmcia_ops = {
@@ -263,30 +263,25 @@ static int __init sharpsl_pcmcia_init(void)
{
int ret;
- sharpsl_pcmcia_ops.nr=platform_scoop_config->num_devs;
- sharpsl_pcmcia_device = kzalloc(sizeof(*sharpsl_pcmcia_device), GFP_KERNEL);
+ sharpsl_pcmcia_ops.nr = platform_scoop_config->num_devs;
+ sharpsl_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
+
if (!sharpsl_pcmcia_device)
return -ENOMEM;
- sharpsl_pcmcia_device->name = "pxa2xx-pcmcia";
sharpsl_pcmcia_device->dev.platform_data = &sharpsl_pcmcia_ops;
- sharpsl_pcmcia_device->dev.parent=platform_scoop_config->devs[0].dev;
+ sharpsl_pcmcia_device->dev.parent = platform_scoop_config->devs[0].dev;
+
+ ret = platform_device_add(sharpsl_pcmcia_device);
- ret = platform_device_register(sharpsl_pcmcia_device);
if (ret)
- kfree(sharpsl_pcmcia_device);
+ platform_device_put(sharpsl_pcmcia_device);
return ret;
}
static void __exit sharpsl_pcmcia_exit(void)
{
- /*
- * This call is supposed to free our sharpsl_pcmcia_device.
- * Unfortunately platform_device don't have a free method, and
- * we can't assume it's free of any reference at this point so we
- * can't free it either.
- */
platform_device_unregister(sharpsl_pcmcia_device);
}
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 7a7744662d5..5ab1cdef7c4 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -98,6 +98,30 @@ static ssize_t pccard_store_insert(struct class_device *dev, const char *buf, si
}
static CLASS_DEVICE_ATTR(card_insert, 0200, NULL, pccard_store_insert);
+
+static ssize_t pccard_show_card_pm_state(struct class_device *dev, char *buf)
+{
+ struct pcmcia_socket *s = to_socket(dev);
+ return sprintf(buf, "%s\n", s->state & SOCKET_SUSPEND ? "off" : "on");
+}
+
+static ssize_t pccard_store_card_pm_state(struct class_device *dev, const char *buf, size_t count)
+{
+ ssize_t ret = -EINVAL;
+ struct pcmcia_socket *s = to_socket(dev);
+
+ if (!count)
+ return -EINVAL;
+
+ if (!(s->state & SOCKET_SUSPEND) && !strncmp(buf, "off", 3))
+ ret = pcmcia_suspend_card(s);
+ else if ((s->state & SOCKET_SUSPEND) && !strncmp(buf, "on", 2))
+ ret = pcmcia_resume_card(s);
+
+ return ret ? -ENODEV : count;
+}
+static CLASS_DEVICE_ATTR(card_pm_state, 0644, pccard_show_card_pm_state, pccard_store_card_pm_state);
+
static ssize_t pccard_store_eject(struct class_device *dev, const char *buf, size_t count)
{
ssize_t ret;
@@ -320,6 +344,7 @@ static struct class_device_attribute *pccard_socket_attributes[] = {
&class_device_attr_card_vpp,
&class_device_attr_card_vcc,
&class_device_attr_card_insert,
+ &class_device_attr_card_pm_state,
&class_device_attr_card_eject,
&class_device_attr_card_irq_mask,
&class_device_attr_available_resources_setup_done,
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index 24c547ef512..0574efd7828 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -1,7 +1,7 @@
/*
* vrc4171_card.c, NEC VRC4171 Card Controller driver for Socket Services.
*
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,7 +33,7 @@
#include "i82365.h"
MODULE_DESCRIPTION("NEC VRC4171 Card Controllers driver for Socket Services");
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@hh.iij4u.or.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
MODULE_LICENSE("GPL");
#define CARD_MAX_SLOTS 2
diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c
index 1b277d2c1c9..57f38dba0a4 100644
--- a/drivers/pcmcia/vrc4173_cardu.c
+++ b/drivers/pcmcia/vrc4173_cardu.c
@@ -6,7 +6,7 @@
* NEC VRC4173 CARDU driver for Socket Services
* (This device doesn't support CardBus. it is supporting only 16bit PC Card.)
*
- * Copyright 2002,2003 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright 2002,2003 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -41,7 +41,7 @@
#include "vrc4173_cardu.h"
MODULE_DESCRIPTION("NEC VRC4173 CARDU driver for Socket Services");
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@hh.iij4u.or.jp>");
+MODULE_AUTHOR("Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>");
MODULE_LICENSE("GPL");
static int vrc4173_cardu_slots;
@@ -561,7 +561,7 @@ static int __devinit vrc4173_cardu_init(void)
{
vrc4173_cardu_slots = 0;
- return pci_module_init(&vrc4173_cardu_driver);
+ return pci_register_driver(&vrc4173_cardu_driver);
}
static void __devexit vrc4173_cardu_exit(void)
diff --git a/drivers/pcmcia/vrc4173_cardu.h b/drivers/pcmcia/vrc4173_cardu.h
index 113726f7cf9..7d77c74120c 100644
--- a/drivers/pcmcia/vrc4173_cardu.h
+++ b/drivers/pcmcia/vrc4173_cardu.h
@@ -5,7 +5,7 @@
* BRIEF MODULE DESCRIPTION
* Include file for NEC VRC4173 CARDU.
*
- * Copyright 2002 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright 2002 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index 6b7583f497d..a1f0b0ba2bf 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -31,15 +31,6 @@ static struct {
} pnp_bios_callpoint;
-/* The PnP BIOS entries in the GDT */
-#define PNP_GDT (GDT_ENTRY_PNPBIOS_BASE * 8)
-
-#define PNP_CS32 (PNP_GDT+0x00) /* segment for calling fn */
-#define PNP_CS16 (PNP_GDT+0x08) /* code segment for BIOS */
-#define PNP_DS (PNP_GDT+0x10) /* data segment for BIOS */
-#define PNP_TS1 (PNP_GDT+0x18) /* transfer data segment */
-#define PNP_TS2 (PNP_GDT+0x20) /* another data segment */
-
/*
* These are some opcodes for a "static asmlinkage"
* As this code is *not* executed inside the linux kernel segment, but in a
@@ -67,16 +58,11 @@ __asm__(
".previous \n"
);
-#define Q_SET_SEL(cpu, selname, address, size) \
-do { \
-set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], __va((u32)(address))); \
-set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \
-} while(0)
-
#define Q2_SET_SEL(cpu, selname, address, size) \
do { \
-set_base(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], (u32)(address)); \
-set_limit(per_cpu(cpu_gdt_table,cpu)[(selname) >> 3], size); \
+struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \
+set_base(gdt[(selname) >> 3], (u32)(address)); \
+set_limit(gdt[(selname) >> 3], size); \
} while(0)
static struct desc_struct bad_bios_desc = { 0, 0x00409200 };
@@ -115,8 +101,8 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
return PNP_FUNCTION_NOT_SUPPORTED;
cpu = get_cpu();
- save_desc_40 = per_cpu(cpu_gdt_table,cpu)[0x40 / 8];
- per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = bad_bios_desc;
+ save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
+ get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
/* On some boxes IRQ's during PnP BIOS calls are deadly. */
spin_lock_irqsave(&pnp_bios_lock, flags);
@@ -158,7 +144,7 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
);
spin_unlock_irqrestore(&pnp_bios_lock, flags);
- per_cpu(cpu_gdt_table,cpu)[0x40 / 8] = save_desc_40;
+ get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
put_cpu();
/* If we get here and this is set then the PnP BIOS faulted on us. */
@@ -290,12 +276,15 @@ int pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
{
u16 status;
+ u16 tmp_nodenum;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
if ( !boot && pnpbios_dont_use_current_config )
return PNP_FUNCTION_NOT_SUPPORTED;
+ tmp_nodenum = *nodenum;
status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0,
- nodenum, sizeof(char), data, 65536);
+ &tmp_nodenum, sizeof(tmp_nodenum), data, 65536);
+ *nodenum = tmp_nodenum;
return status;
}
@@ -535,10 +524,12 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
- for(i=0; i < NR_CPUS; i++)
- {
- Q2_SET_SEL(i, PNP_CS32, &pnp_bios_callfunc, 64 * 1024);
- Q_SET_SEL(i, PNP_CS16, header->fields.pm16cseg, 64 * 1024);
- Q_SET_SEL(i, PNP_DS, header->fields.pm16dseg, 64 * 1024);
- }
+ for (i = 0; i < NR_CPUS; i++) {
+ struct desc_struct *gdt = get_cpu_gdt_table(i);
+ if (!gdt)
+ continue;
+ set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc);
+ set_base(gdt[GDT_ENTRY_PNPBIOS_CS16], __va(header->fields.pm16cseg));
+ set_base(gdt[GDT_ENTRY_PNPBIOS_DS], __va(header->fields.pm16dseg));
+ }
}
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 4f7ed4bd3be..94e30fe4b8f 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -24,6 +24,8 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
#include "rio.h"
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 30a11436e24..bef9316e95d 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -15,6 +15,7 @@
#include <linux/rio.h>
#include <linux/rio_drv.h>
#include <linux/stat.h>
+#include <linux/sched.h> /* for capable() */
#include "rio.h"
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 3ca1011ceaa..5e382470faa 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -23,6 +23,7 @@
#include <linux/rio_regs.h>
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/slab.h>
#include "rio.h"
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index c99a2fe92fb..9803c9352d7 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
# Makefile for the S/390 specific device drivers
#
-obj-y += s390mach.o sysinfo.o
+obj-y += s390mach.o sysinfo.o s390_rdev.o
obj-y += cio/ block/ char/ crypto/ net/ scsi/
drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 6e7d7b06421..6f50cc9323d 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -1,11 +1,11 @@
-if ARCH_S390
+if S390
comment "S/390 block device drivers"
- depends on ARCH_S390
+ depends on S390
config BLK_DEV_XPRAM
tristate "XPRAM disk support"
- depends on ARCH_S390
+ depends on S390
help
Select this option if you want to use your expanded storage on S/390
or zSeries as a disk. This is useful as a _fast_ swap device if you
@@ -49,7 +49,7 @@ config DASD_FBA
config DASD_DIAG
tristate "Support for DIAG access to Disks"
- depends on DASD && ( ARCH_S390X = 'n' || EXPERIMENTAL)
+ depends on DASD && ( 64BIT = 'n' || EXPERIMENTAL)
help
Select this option if you want to use Diagnose250 command to access
Disks under VM. If you are not running under VM or unsure what it is,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 7008d32433b..9c25654b1e7 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -7,7 +7,7 @@
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
*
- * $Revision: 1.167 $
+ * $Revision: 1.172 $
*/
#include <linux/config.h>
@@ -18,6 +18,7 @@
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
+#include <linux/hdreg.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
@@ -604,7 +605,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize,
void
dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
{
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
struct ccw1 *ccw;
/* Clear any idals used for the request. */
@@ -1035,7 +1036,7 @@ dasd_end_request(struct request *req, int uptodate)
if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
BUG();
add_disk_randomness(req->rq_disk);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
}
/*
@@ -1224,6 +1225,12 @@ __dasd_start_head(struct dasd_device * device)
if (list_empty(&device->ccw_queue))
return;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
+ /* check FAILFAST */
+ if (device->stopped & ~DASD_STOPPED_PENDING &&
+ test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) {
+ cqr->status = DASD_CQR_FAILED;
+ dasd_schedule_bh(device);
+ }
if ((cqr->status == DASD_CQR_QUEUED) &&
(!device->stopped)) {
/* try to start the first I/O that can be started */
@@ -1323,7 +1330,7 @@ void
dasd_schedule_bh(struct dasd_device * device)
{
/* Protect against rescheduling. */
- if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled))
+ if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
return;
dasd_get_device(device);
tasklet_hi_schedule(&device->tasklet);
@@ -1717,12 +1724,35 @@ dasd_release(struct inode *inp, struct file *filp)
return 0;
}
+/*
+ * Return disk geometry.
+ */
+static int
+dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct dasd_device *device;
+
+ device = bdev->bd_disk->private_data;
+ if (!device)
+ return -ENODEV;
+
+ if (!device->discipline ||
+ !device->discipline->fill_geometry)
+ return -EINVAL;
+
+ device->discipline->fill_geometry(device, geo);
+ geo->start = get_start_sect(bdev) >> device->s2b_shift;
+ return 0;
+}
+
struct block_device_operations
dasd_device_operations = {
.owner = THIS_MODULE,
.open = dasd_open,
.release = dasd_release,
.ioctl = dasd_ioctl,
+ .compat_ioctl = dasd_compat_ioctl,
+ .getgeo = dasd_getgeo,
};
@@ -1750,8 +1780,10 @@ dasd_exit(void)
* SECTION: common functions for ccw_driver use
*/
-/* initial attempt at a probe function. this can be simplified once
- * the other detection code is gone */
+/*
+ * Initial attempt at a probe function. this can be simplified once
+ * the other detection code is gone.
+ */
int
dasd_generic_probe (struct ccw_device *cdev,
struct dasd_discipline *discipline)
@@ -1770,8 +1802,10 @@ dasd_generic_probe (struct ccw_device *cdev,
return ret;
}
-/* this will one day be called from a global not_oper handler.
- * It is also used by driver_unregister during module unload */
+/*
+ * This will one day be called from a global not_oper handler.
+ * It is also used by driver_unregister during module unload.
+ */
void
dasd_generic_remove (struct ccw_device *cdev)
{
@@ -1798,9 +1832,11 @@ dasd_generic_remove (struct ccw_device *cdev)
dasd_delete_device(device);
}
-/* activate a device. This is called from dasd_{eckd,fba}_probe() when either
+/*
+ * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
* the device is detected for the first time and is supposed to be used
- * or the user has started activation through sysfs */
+ * or the user has started activation through sysfs.
+ */
int
dasd_generic_set_online (struct ccw_device *cdev,
struct dasd_discipline *discipline)
@@ -1917,7 +1953,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
if (cqr->status == DASD_CQR_IN_IO)
cqr->status = DASD_CQR_FAILED;
device->stopped |= DASD_STOPPED_DC_EIO;
- dasd_schedule_bh(device);
} else {
list_for_each_entry(cqr, &device->ccw_queue, list)
if (cqr->status == DASD_CQR_IN_IO) {
@@ -1927,6 +1962,7 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
device->stopped |= DASD_STOPPED_DC_WAIT;
dasd_set_timer(device, 0);
}
+ dasd_schedule_bh(device);
ret = 1;
break;
case CIO_OPER:
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index ab8754e566b..ba80fdea7eb 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -6,7 +6,7 @@
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
- * $Revision: 1.51 $
+ * $Revision: 1.53 $
*/
#include <linux/config.h>
@@ -25,6 +25,7 @@
#include <asm/io.h>
#include <asm/s390_ext.h>
#include <asm/todclk.h>
+#include <asm/vtoc.h>
#include "dasd_int.h"
#include "dasd_diag.h"
@@ -74,7 +75,7 @@ dia250(void *iob, int cmd)
int rc;
__asm__ __volatile__(
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
" lghi %0,3\n"
" lgr 0,%3\n"
" diag 0,%2,0x250\n"
@@ -329,7 +330,7 @@ dasd_diag_check_device(struct dasd_device *device)
struct dasd_diag_private *private;
struct dasd_diag_characteristics *rdc_data;
struct dasd_diag_bio bio;
- struct dasd_diag_cms_label *label;
+ struct vtoc_cms_label *label;
blocknum_t end_block;
unsigned int sb, bsize;
int rc;
@@ -380,7 +381,7 @@ dasd_diag_check_device(struct dasd_device *device)
mdsk_term_io(device);
/* figure out blocksize of device */
- label = (struct dasd_diag_cms_label *) get_zeroed_page(GFP_KERNEL);
+ label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
if (label == NULL) {
DEV_MESSAGE(KERN_WARNING, device, "%s",
"No memory to allocate initialization request");
@@ -548,6 +549,8 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
}
cqr->retries = DIAG_MAX_RETRIES;
cqr->buildclk = get_clock();
+ if (req->flags & REQ_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->device = device;
cqr->expires = DIAG_TIMEOUT;
cqr->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index df31484d73a..a4f80bd735f 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -6,7 +6,7 @@
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
- * $Revision: 1.8 $
+ * $Revision: 1.9 $
*/
#define MDSK_WRITE_REQ 0x01
@@ -44,29 +44,8 @@ struct dasd_diag_characteristics {
u8 rdev_features;
} __attribute__ ((packed, aligned(4)));
-struct dasd_diag_cms_label {
- u8 label_id[4];
- u8 vol_id[6];
- u16 version_id;
- u32 block_size;
- u32 origin_ptr;
- u32 usable_count;
- u32 formatted_count;
- u32 block_count;
- u32 used_count;
- u32 fst_size;
- u32 fst_count;
- u8 format_date[6];
- u8 reserved1[2];
- u32 disk_offset;
- u32 map_block;
- u32 hblk_disp;
- u32 user_disp;
- u8 reserved2[4];
- u8 segment_name[8];
-} __attribute__ ((packed));
-
-#ifdef CONFIG_ARCH_S390X
+
+#ifdef CONFIG_64BIT
#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT
typedef u64 blocknum_t;
@@ -107,7 +86,7 @@ struct dasd_diag_rw_io {
struct dasd_diag_bio *bio_list;
u8 spare4[8];
} __attribute__ ((packed, aligned(8)));
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
#define DASD_DIAG_FLAGA_DEFAULT 0x0
typedef u32 blocknum_t;
@@ -146,4 +125,4 @@ struct dasd_diag_rw_io {
u32 interrupt_params;
u8 spare3[20];
} __attribute__ ((packed, aligned(8)));
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 811060e10c0..96eb4825858 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -7,7 +7,7 @@
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
- * $Revision: 1.71 $
+ * $Revision: 1.74 $
*/
#include <linux/config.h>
@@ -1041,7 +1041,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
/* Eckd can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv->bv_len >> (device->s2b_shift + 9);
-#if defined(CONFIG_ARCH_S390X)
+#if defined(CONFIG_64BIT)
if (idal_is_needed (page_address(bv->bv_page),
bv->bv_len))
cidaw += bv->bv_len >> (device->s2b_shift + 9);
@@ -1136,6 +1136,8 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
recid++;
}
}
+ if (req->flags & REQ_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->device = device;
cqr->expires = 5 * 60 * HZ; /* 5 minutes */
cqr->lpm = private->path_data.ppm;
@@ -1252,6 +1254,7 @@ dasd_eckd_release(struct block_device *bdev, int no, long args)
cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
cqr->device = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 0;
cqr->expires = 2 * HZ;
cqr->buildclk = get_clock();
@@ -1296,6 +1299,7 @@ dasd_eckd_reserve(struct block_device *bdev, int no, long args)
cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
cqr->device = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 0;
cqr->expires = 2 * HZ;
cqr->buildclk = get_clock();
@@ -1339,6 +1343,7 @@ dasd_eckd_steal_lock(struct block_device *bdev, int no, long args)
cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
cqr->device = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 0;
cqr->expires = 2 * HZ;
cqr->buildclk = get_clock();
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 28cb4613b7f..8ec75dc08e2 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -4,7 +4,7 @@
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
- * $Revision: 1.40 $
+ * $Revision: 1.41 $
*/
#include <linux/config.h>
@@ -271,7 +271,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv->bv_len >> (device->s2b_shift + 9);
-#if defined(CONFIG_ARCH_S390X)
+#if defined(CONFIG_64BIT)
if (idal_is_needed (page_address(bv->bv_page),
bv->bv_len))
cidaw += bv->bv_len / blksize;
@@ -352,6 +352,8 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
recid++;
}
}
+ if (req->flags & REQ_FAILFAST)
+ set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->device = device;
cqr->expires = 5 * 60 * HZ; /* 5 minutes */
cqr->retries = 32;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 9fab04f3056..e4b401500b0 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -6,7 +6,7 @@
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
*
- * $Revision: 1.65 $
+ * $Revision: 1.68 $
*/
#ifndef DASD_INT_H
@@ -208,6 +208,7 @@ struct dasd_ccw_req {
/* per dasd_ccw_req flags */
#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
+#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
/* Signature for error recovery functions. */
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
@@ -526,6 +527,7 @@ void dasd_ioctl_exit(void);
int dasd_ioctl_no_register(struct module *, int, dasd_ioctl_fn_t);
int dasd_ioctl_no_unregister(struct module *, int, dasd_ioctl_fn_t);
int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+long dasd_compat_ioctl(struct file *, unsigned int, unsigned long);
/* externals in dasd_proc.c */
int dasd_proc_init(void);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 789595b3fa0..9396fcacb8f 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -7,7 +7,7 @@
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
*
- * $Revision: 1.47 $
+ * $Revision: 1.50 $
*
* i/o controls for the dasd driver.
*/
@@ -118,6 +118,18 @@ dasd_ioctl(struct inode *inp, struct file *filp,
return -EINVAL;
}
+long
+dasd_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int rval;
+
+ lock_kernel();
+ rval = dasd_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+ unlock_kernel();
+
+ return (rval == -EINVAL) ? -ENOIOCTLCMD : rval;
+}
+
static int
dasd_ioctl_api_version(struct block_device *bdev, int no, long args)
{
@@ -352,6 +364,9 @@ dasd_ioctl_read_profile(struct block_device *bdev, int no, long args)
if (device == NULL)
return -ENODEV;
+ if (dasd_profile_level == DASD_PROFILE_OFF)
+ return -EIO;
+
if (copy_to_user((long __user *) args, (long *) &device->profile,
sizeof (struct dasd_profile_info_t)))
return -EFAULT;
@@ -483,33 +498,6 @@ dasd_ioctl_set_ro(struct block_device *bdev, int no, long args)
}
/*
- * Return disk geometry.
- */
-static int
-dasd_ioctl_getgeo(struct block_device *bdev, int no, long args)
-{
- struct hd_geometry geo = { 0, };
- struct dasd_device *device;
-
- device = bdev->bd_disk->private_data;
- if (device == NULL)
- return -ENODEV;
-
- if (device == NULL || device->discipline == NULL ||
- device->discipline->fill_geometry == NULL)
- return -EINVAL;
-
- geo = (struct hd_geometry) {};
- device->discipline->fill_geometry(device, &geo);
- geo.start = get_start_sect(bdev) >> device->s2b_shift;
- if (copy_to_user((struct hd_geometry __user *) args, &geo,
- sizeof (struct hd_geometry)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
* List of static ioctls.
*/
static struct { int no; dasd_ioctl_fn_t fn; } dasd_ioctls[] =
@@ -525,7 +513,6 @@ static struct { int no; dasd_ioctl_fn_t fn; } dasd_ioctls[] =
{ BIODASDPRRST, dasd_ioctl_reset_profile },
{ BLKROSET, dasd_ioctl_set_ro },
{ DASDAPIVER, dasd_ioctl_api_version },
- { HDIO_GETGEO, dasd_ioctl_getgeo },
{ -1, NULL }
};
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4fde4118899..2e727f49ad1 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -15,7 +15,7 @@
#include <asm/io.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
-#include <asm/ccwdev.h> // for s390_root_dev_(un)register()
+#include <asm/s390_rdev.h>
//#define DCSSBLK_DEBUG /* Debug messages on/off */
#define DCSSBLK_NAME "dcssblk"
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index d428c909b8a..54ecd548c31 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -160,7 +160,7 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b,1b\n"
@@ -208,7 +208,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b,1b\n"
@@ -328,31 +328,27 @@ fail:
return 0;
}
-static int xpram_ioctl (struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- struct hd_geometry __user *geo;
unsigned long size;
- if (cmd != HDIO_GETGEO)
- return -EINVAL;
+
/*
* get geometry: we have to fake one... trim the size to a
* multiple of 64 (32k): tell we have 16 sectors, 4 heads,
* whatever cylinders. Tell also that data starts at sector. 4.
*/
- geo = (struct hd_geometry __user *) arg;
size = (xpram_pages * 8) & ~0x3f;
- put_user(size >> 6, &geo->cylinders);
- put_user(4, &geo->heads);
- put_user(16, &geo->sectors);
- put_user(4, &geo->start);
+ geo->cylinders = size >> 6;
+ geo->heads = 4;
+ geo->sectors = 16;
+ geo->start = 4;
return 0;
}
static struct block_device_operations xpram_devops =
{
.owner = THIS_MODULE,
- .ioctl = xpram_ioctl,
+ .getgeo = xpram_getgeo,
};
/*
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 75419cf9d35..1f060914cfa 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/vt_kern.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -432,8 +433,6 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
if (count > slen)
count = slen;
} else
- if (count >= TTY_FLIPBUF_SIZE - tty->flip.count)
- count = TTY_FLIPBUF_SIZE - tty->flip.count - 1;
EBCASC(raw->inbuf, count);
cchar = ctrlchar_handle(raw->inbuf, count, tty);
switch (cchar & CTRLCHAR_MASK) {
@@ -441,28 +440,20 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
break;
case CTRLCHAR_CTRL:
- tty->flip.count++;
- *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
- *tty->flip.char_buf_ptr++ = cchar;
+ tty_insert_flip_char(tty, cchar, TTY_NORMAL);
tty_flip_buffer_push(raw->tty);
break;
case CTRLCHAR_NONE:
- memcpy(tty->flip.char_buf_ptr,
- raw->inbuf, count);
if (count < 2 ||
- (strncmp(raw->inbuf+count-2, "^n", 2) &&
- strncmp(raw->inbuf+count-2, "\252n", 2)) ) {
- /* don't add the auto \n */
- tty->flip.char_buf_ptr[count] = '\n';
- memset(tty->flip.flag_buf_ptr,
- TTY_NORMAL, count + 1);
+ (strncmp(raw->inbuf+count-2, "\252n", 2) &&
+ strncmp(raw->inbuf+count-2, "^n", 2)) ) {
+ /* add the auto \n */
+ raw->inbuf[count] = '\n';
count++;
} else
- count-=2;
- tty->flip.char_buf_ptr += count;
- tty->flip.flag_buf_ptr += count;
- tty->flip.count += count;
+ count -= 2;
+ tty_insert_flip_string(tty, raw->inbuf, count);
tty_flip_buffer_push(raw->tty);
break;
}
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 735a7fcdeff..5f6fa4c6784 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -319,9 +319,8 @@ fs3270_write(struct file *filp, const char *data, size_t count, loff_t *off)
/*
* process ioctl commands for the tube driver
*/
-static int
-fs3270_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static long
+fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct fs3270 *fp;
struct raw3270_iocb iocb;
@@ -331,6 +330,7 @@ fs3270_ioctl(struct inode *inode, struct file *filp,
if (!fp)
return -ENODEV;
rc = 0;
+ lock_kernel();
switch (cmd) {
case TUBICMD:
fp->read_command = arg;
@@ -356,6 +356,7 @@ fs3270_ioctl(struct inode *inode, struct file *filp,
rc = -EFAULT;
break;
}
+ unlock_kernel();
return rc;
}
@@ -491,12 +492,13 @@ fs3270_close(struct inode *inode, struct file *filp)
}
static struct file_operations fs3270_fops = {
- .owner = THIS_MODULE, /* owner */
- .read = fs3270_read, /* read */
- .write = fs3270_write, /* write */
- .ioctl = fs3270_ioctl, /* ioctl */
- .open = fs3270_open, /* open */
- .release = fs3270_close, /* release */
+ .owner = THIS_MODULE, /* owner */
+ .read = fs3270_read, /* read */
+ .write = fs3270_write, /* write */
+ .unlocked_ioctl = fs3270_ioctl, /* ioctl */
+ .compat_ioctl = fs3270_ioctl, /* ioctl */
+ .open = fs3270_open, /* open */
+ .release = fs3270_close, /* release */
};
/*
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 5a6cef2dfa1..80f7f31310e 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -204,7 +204,7 @@ cpi_module_init(void)
printk(KERN_WARNING "cpi: no control program identification "
"support\n");
sclp_unregister(&sclp_cpi_event);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
req = cpi_prepare_req();
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 83f75774df6..56fa6916889 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -32,7 +32,7 @@ do_load_quiesce_psw(void * __unused)
psw_t quiesce_psw;
int cpu;
- if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
+ if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */
for_each_online_cpu(cpu) {
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index a20d7c89341..6cbf067f1a8 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -13,6 +13,7 @@
#include <linux/kmod.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/slab.h>
@@ -496,25 +497,19 @@ sclp_tty_input(unsigned char* buf, unsigned int count)
case CTRLCHAR_SYSRQ:
break;
case CTRLCHAR_CTRL:
- sclp_tty->flip.count++;
- *sclp_tty->flip.flag_buf_ptr++ = TTY_NORMAL;
- *sclp_tty->flip.char_buf_ptr++ = cchar;
+ tty_insert_flip_char(sclp_tty, cchar, TTY_NORMAL);
tty_flip_buffer_push(sclp_tty);
break;
case CTRLCHAR_NONE:
/* send (normal) input to line discipline */
- memcpy(sclp_tty->flip.char_buf_ptr, buf, count);
if (count < 2 ||
- (strncmp ((const char *) buf + count - 2, "^n", 2) &&
- strncmp ((const char *) buf + count - 2, "\0252n", 2))) {
- sclp_tty->flip.char_buf_ptr[count] = '\n';
- count++;
+ (strncmp((const char *) buf + count - 2, "^n", 2) &&
+ strncmp((const char *) buf + count - 2, "\252n", 2))) {
+ /* add the auto \n */
+ tty_insert_flip_string(sclp_tty, buf, count);
+ tty_insert_flip_char(sclp_tty, '\n', TTY_NORMAL);
} else
- count -= 2;
- memset(sclp_tty->flip.flag_buf_ptr, TTY_NORMAL, count);
- sclp_tty->flip.char_buf_ptr += count;
- sclp_tty->flip.flag_buf_ptr += count;
- sclp_tty->flip.count += count;
+ tty_insert_flip_string(sclp_tty, buf, count - 2);
tty_flip_buffer_push(sclp_tty);
break;
}
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 06bd85824d7..9e02625c82c 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/mm.h>
@@ -482,16 +483,7 @@ sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
/* Send input to line discipline */
buffer++;
count--;
- /* Prevent buffer overrun by discarding input. Note that
- * because buffer_push works asynchronously, we cannot wait
- * for the buffer to be emptied. */
- if (count + sclp_vt220_tty->flip.count > TTY_FLIPBUF_SIZE)
- count = TTY_FLIPBUF_SIZE - sclp_vt220_tty->flip.count;
- memcpy(sclp_vt220_tty->flip.char_buf_ptr, buffer, count);
- memset(sclp_vt220_tty->flip.flag_buf_ptr, TTY_NORMAL, count);
- sclp_vt220_tty->flip.char_buf_ptr += count;
- sclp_vt220_tty->flip.flag_buf_ptr += count;
- sclp_vt220_tty->flip.count += count;
+ tty_insert_flip_string(sclp_vt220_tty, buffer, count);
tty_flip_buffer_push(sclp_vt220_tty);
break;
}
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 1efc9f21229..5ced2725d6c 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -65,7 +65,7 @@ static void
tapeblock_trigger_requeue(struct tape_device *device)
{
/* Protect against rescheduling. */
- if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled))
+ if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
return;
schedule_work(&device->blk_data.requeue_task);
}
@@ -78,7 +78,7 @@ tapeblock_end_request(struct request *req, int uptodate)
{
if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
BUG();
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
}
static void
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 86262a13f7c..5ce7ca38ace 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -37,6 +37,8 @@ static int tapechar_open(struct inode *,struct file *);
static int tapechar_release(struct inode *,struct file *);
static int tapechar_ioctl(struct inode *, struct file *, unsigned int,
unsigned long);
+static long tapechar_compat_ioctl(struct file *, unsigned int,
+ unsigned long);
static struct file_operations tape_fops =
{
@@ -44,6 +46,7 @@ static struct file_operations tape_fops =
.read = tapechar_read,
.write = tapechar_write,
.ioctl = tapechar_ioctl,
+ .compat_ioctl = tapechar_compat_ioctl,
.open = tapechar_open,
.release = tapechar_release,
};
@@ -463,6 +466,23 @@ tapechar_ioctl(struct inode *inp, struct file *filp,
return device->discipline->ioctl_fn(device, no, data);
}
+static long
+tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
+{
+ struct tape_device *device = filp->private_data;
+ int rval = -ENOIOCTLCMD;
+
+ if (device->discipline->ioctl_fn) {
+ lock_kernel();
+ rval = device->discipline->ioctl_fn(device, no, data);
+ unlock_kernel();
+ if (rval == -EINVAL)
+ rval = -ENOIOCTLCMD;
+ }
+
+ return rval;
+}
+
/*
* Initialize character device frontend.
*/
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 5473c23fcb5..5acc0ace3d7 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -66,7 +66,7 @@ static int __diag288(enum vmwdt_func func, unsigned int timeout,
__cmdl = len;
err = 0;
asm volatile (
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
"diag %2,%4,0x288\n"
"1: \n"
".section .fixup,\"ax\"\n"
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index a1c52a68219..daf21e03b21 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/blacklist.c
* S/390 common I/O routines -- blacklisting of specific devices
- * $Revision: 1.35 $
+ * $Revision: 1.39 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -15,6 +15,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/device.h>
@@ -34,10 +35,10 @@
* These can be single devices or ranges of devices
*/
-/* 65536 bits to indicate if a devno is blacklisted or not */
-#define __BL_DEV_WORDS ((__MAX_SUBCHANNELS + (8*sizeof(long) - 1)) / \
+/* 65536 bits for each set to indicate if a devno is blacklisted or not */
+#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
(8*sizeof(long)))
-static unsigned long bl_dev[__BL_DEV_WORDS];
+static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
typedef enum {add, free} range_action;
/*
@@ -45,21 +46,23 @@ typedef enum {add, free} range_action;
* (Un-)blacklist the devices from-to
*/
static inline void
-blacklist_range (range_action action, unsigned int from, unsigned int to)
+blacklist_range (range_action action, unsigned int from, unsigned int to,
+ unsigned int ssid)
{
if (!to)
to = from;
- if (from > to || to > __MAX_SUBCHANNELS) {
+ if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) {
printk (KERN_WARNING "Invalid blacklist range "
- "0x%04x to 0x%04x, skipping\n", from, to);
+ "0.%x.%04x to 0.%x.%04x, skipping\n",
+ ssid, from, ssid, to);
return;
}
for (; from <= to; from++) {
if (action == add)
- set_bit (from, bl_dev);
+ set_bit (from, bl_dev[ssid]);
else
- clear_bit (from, bl_dev);
+ clear_bit (from, bl_dev[ssid]);
}
}
@@ -69,7 +72,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to)
* Shamelessly grabbed from dasd_devmap.c.
*/
static inline int
-blacklist_busid(char **str, int *id0, int *id1, int *devno)
+blacklist_busid(char **str, int *id0, int *ssid, int *devno)
{
int val, old_style;
char *sav;
@@ -86,7 +89,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno)
goto confused;
val = simple_strtoul(*str, str, 16);
if (old_style || (*str)[0] != '.') {
- *id0 = *id1 = 0;
+ *id0 = *ssid = 0;
if (val < 0 || val > 0xffff)
goto confused;
*devno = val;
@@ -105,7 +108,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno)
val = simple_strtoul(*str, str, 16);
if (val < 0 || val > 0xff || (*str)++[0] != '.')
goto confused;
- *id1 = val;
+ *ssid = val;
if (!isxdigit((*str)[0])) /* We require at least one hex digit */
goto confused;
val = simple_strtoul(*str, str, 16);
@@ -125,7 +128,7 @@ confused:
static inline int
blacklist_parse_parameters (char *str, range_action action)
{
- unsigned int from, to, from_id0, to_id0, from_id1, to_id1;
+ unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid;
while (*str != 0 && *str != '\n') {
range_action ra = action;
@@ -142,23 +145,25 @@ blacklist_parse_parameters (char *str, range_action action)
*/
if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 ||
strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) {
- from = 0;
- to = __MAX_SUBCHANNELS;
+ int j;
+
str += 3;
+ for (j=0; j <= __MAX_SSID; j++)
+ blacklist_range(ra, 0, __MAX_SUBCHANNEL, j);
} else {
int rc;
rc = blacklist_busid(&str, &from_id0,
- &from_id1, &from);
+ &from_ssid, &from);
if (rc)
continue;
to = from;
to_id0 = from_id0;
- to_id1 = from_id1;
+ to_ssid = from_ssid;
if (*str == '-') {
str++;
rc = blacklist_busid(&str, &to_id0,
- &to_id1, &to);
+ &to_ssid, &to);
if (rc)
continue;
}
@@ -168,18 +173,19 @@ blacklist_parse_parameters (char *str, range_action action)
strsep(&str, ",\n"));
continue;
}
- if ((from_id0 != to_id0) || (from_id1 != to_id1)) {
+ if ((from_id0 != to_id0) ||
+ (from_ssid != to_ssid)) {
printk(KERN_WARNING "invalid cio_ignore range "
"%x.%x.%04x-%x.%x.%04x\n",
- from_id0, from_id1, from,
- to_id0, to_id1, to);
+ from_id0, from_ssid, from,
+ to_id0, to_ssid, to);
continue;
}
+ pr_debug("blacklist_setup: adding range "
+ "from %x.%x.%04x to %x.%x.%04x\n",
+ from_id0, from_ssid, from, to_id0, to_ssid, to);
+ blacklist_range (ra, from, to, to_ssid);
}
- /* FIXME: ignoring id0 and id1 here. */
- pr_debug("blacklist_setup: adding range "
- "from 0.0.%04x to 0.0.%04x\n", from, to);
- blacklist_range (ra, from, to);
}
return 1;
}
@@ -213,12 +219,33 @@ __setup ("cio_ignore=", blacklist_setup);
* Used by validate_subchannel()
*/
int
-is_blacklisted (int devno)
+is_blacklisted (int ssid, int devno)
{
- return test_bit (devno, bl_dev);
+ return test_bit (devno, bl_dev[ssid]);
}
#ifdef CONFIG_PROC_FS
+static int
+__s390_redo_validation(struct subchannel_id schid, void *data)
+{
+ int ret;
+ struct subchannel *sch;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ /* Already known. */
+ put_device(&sch->dev);
+ return 0;
+ }
+ ret = css_probe_device(schid);
+ if (ret == -ENXIO)
+ return ret; /* We're through. */
+ if (ret == -ENOMEM)
+ /* Stop validation for now. Bad, but no need for a panic. */
+ return ret;
+ return 0;
+}
+
/*
* Function: s390_redo_validation
* Look for no longer blacklisted devices
@@ -226,29 +253,9 @@ is_blacklisted (int devno)
static inline void
s390_redo_validation (void)
{
- unsigned int irq;
-
CIO_TRACE_EVENT (0, "redoval");
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- int ret;
- struct subchannel *sch;
-
- sch = get_subchannel_by_schid(irq);
- if (sch) {
- /* Already known. */
- put_device(&sch->dev);
- continue;
- }
- ret = css_probe_device(irq);
- if (ret == -ENXIO)
- break; /* We're through. */
- if (ret == -ENOMEM)
- /*
- * Stop validation for now. Bad, but no need for a
- * panic.
- */
- break;
- }
+
+ for_each_subchannel(__s390_redo_validation, NULL);
}
/*
@@ -278,41 +285,90 @@ blacklist_parse_proc_parameters (char *buf)
s390_redo_validation ();
}
-/* FIXME: These should be real bus ids and not home-grown ones! */
-static int cio_ignore_read (char *page, char **start, off_t off,
- int count, int *eof, void *data)
+/* Iterator struct for all devices. */
+struct ccwdev_iter {
+ int devno;
+ int ssid;
+ int in_range;
+};
+
+static void *
+cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
{
- const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */
- long devno;
- int len;
-
- len = 0;
- for (devno = off; /* abuse the page variable
- * as counter, see fs/proc/generic.c */
- devno < __MAX_SUBCHANNELS && len + entry_size < count; devno++) {
- if (!test_bit(devno, bl_dev))
- continue;
- len += sprintf(page + len, "0.0.%04lx", devno);
- if (test_bit(devno + 1, bl_dev)) { /* print range */
- while (++devno < __MAX_SUBCHANNELS)
- if (!test_bit(devno, bl_dev))
- break;
- len += sprintf(page + len, "-0.0.%04lx", --devno);
- }
- len += sprintf(page + len, "\n");
- }
+ struct ccwdev_iter *iter;
+
+ if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ return NULL;
+ iter = kzalloc(sizeof(struct ccwdev_iter), GFP_KERNEL);
+ if (!iter)
+ return ERR_PTR(-ENOMEM);
+ iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
+ iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
+ return iter;
+}
+
+static void
+cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
+{
+ if (!IS_ERR(it))
+ kfree(it);
+}
+
+static void *
+cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
+{
+ struct ccwdev_iter *iter;
+
+ if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ return NULL;
+ iter = it;
+ if (iter->devno == __MAX_SUBCHANNEL) {
+ iter->devno = 0;
+ iter->ssid++;
+ if (iter->ssid > __MAX_SSID)
+ return NULL;
+ } else
+ iter->devno++;
+ (*offset)++;
+ return iter;
+}
- if (devno < __MAX_SUBCHANNELS)
- *eof = 1;
- *start = (char *) (devno - off); /* number of checked entries */
- return len;
+static int
+cio_ignore_proc_seq_show(struct seq_file *s, void *it)
+{
+ struct ccwdev_iter *iter;
+
+ iter = it;
+ if (!is_blacklisted(iter->ssid, iter->devno))
+ /* Not blacklisted, nothing to output. */
+ return 0;
+ if (!iter->in_range) {
+ /* First device in range. */
+ if ((iter->devno == __MAX_SUBCHANNEL) ||
+ !is_blacklisted(iter->ssid, iter->devno + 1))
+ /* Singular device. */
+ return seq_printf(s, "0.%x.%04x\n",
+ iter->ssid, iter->devno);
+ iter->in_range = 1;
+ return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
+ }
+ if ((iter->devno == __MAX_SUBCHANNEL) ||
+ !is_blacklisted(iter->ssid, iter->devno + 1)) {
+ /* Last device in range. */
+ iter->in_range = 0;
+ return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
+ }
+ return 0;
}
-static int cio_ignore_write(struct file *file, const char __user *user_buf,
- unsigned long user_len, void *data)
+static ssize_t
+cio_ignore_write(struct file *file, const char __user *user_buf,
+ size_t user_len, loff_t *offset)
{
char *buf;
+ if (*offset)
+ return -EINVAL;
if (user_len > 65536)
user_len = 65536;
buf = vmalloc (user_len + 1); /* maybe better use the stack? */
@@ -330,6 +386,27 @@ static int cio_ignore_write(struct file *file, const char __user *user_buf,
return user_len;
}
+static struct seq_operations cio_ignore_proc_seq_ops = {
+ .start = cio_ignore_proc_seq_start,
+ .stop = cio_ignore_proc_seq_stop,
+ .next = cio_ignore_proc_seq_next,
+ .show = cio_ignore_proc_seq_show,
+};
+
+static int
+cio_ignore_proc_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &cio_ignore_proc_seq_ops);
+}
+
+static struct file_operations cio_ignore_proc_fops = {
+ .open = cio_ignore_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = cio_ignore_write,
+};
+
static int
cio_ignore_proc_init (void)
{
@@ -340,8 +417,7 @@ cio_ignore_proc_init (void)
if (!entry)
return 0;
- entry->read_proc = cio_ignore_read;
- entry->write_proc = cio_ignore_write;
+ entry->proc_fops = &cio_ignore_proc_fops;
return 1;
}
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h
index fb42cafbe57..95e25c1df92 100644
--- a/drivers/s390/cio/blacklist.h
+++ b/drivers/s390/cio/blacklist.h
@@ -1,6 +1,6 @@
#ifndef S390_BLACKLIST_H
#define S390_BLACKLIST_H
-extern int is_blacklisted (int devno);
+extern int is_blacklisted (int ssid, int devno);
#endif
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index be9d2d65c22..e849289d4f3 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/ccwgroup.c
* bus driver for ccwgroup
- * $Revision: 1.32 $
+ * $Revision: 1.33 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -263,7 +263,7 @@ ccwgroup_set_online(struct ccwgroup_device *gdev)
struct ccwgroup_driver *gdrv;
int ret;
- if (atomic_compare_and_swap(0, 1, &gdev->onoff))
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_ONLINE) {
ret = 0;
@@ -289,7 +289,7 @@ ccwgroup_set_offline(struct ccwgroup_device *gdev)
struct ccwgroup_driver *gdrv;
int ret;
- if (atomic_compare_and_swap(0, 1, &gdev->onoff))
+ if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE) {
ret = 0;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index fa3c23b80e3..7270808c02d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
- * $Revision: 1.120 $
+ * $Revision: 1.126 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -24,8 +24,6 @@
#include "ioasm.h"
#include "chsc.h"
-static struct channel_path *chps[NR_CHPIDS];
-
static void *sei_page;
static int new_channel_path(int chpid);
@@ -33,13 +31,13 @@ static int new_channel_path(int chpid);
static inline void
set_chp_logically_online(int chp, int onoff)
{
- chps[chp]->state = onoff;
+ css[0]->chps[chp]->state = onoff;
}
static int
get_chp_status(int chp)
{
- return (chps[chp] ? chps[chp]->state : -ENODEV);
+ return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
}
void
@@ -77,7 +75,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
struct {
struct chsc_header request;
- u16 reserved1;
+ u16 reserved1a:10;
+ u16 ssid:2;
+ u16 reserved1b:4;
u16 f_sch; /* first subchannel */
u16 reserved2;
u16 l_sch; /* last subchannel */
@@ -104,8 +104,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
.code = 0x0004,
};
- ssd_area->f_sch = sch->irq;
- ssd_area->l_sch = sch->irq;
+ ssd_area->ssid = sch->schid.ssid;
+ ssd_area->f_sch = sch->schid.sch_no;
+ ssd_area->l_sch = sch->schid.sch_no;
ccode = chsc(ssd_area);
if (ccode > 0) {
@@ -147,7 +148,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
*/
if (ssd_area->st > 3) { /* uhm, that looks strange... */
CIO_CRW_EVENT(0, "Strange subchannel type %d"
- " for sch %04x\n", ssd_area->st, sch->irq);
+ " for sch 0.%x.%04x\n", ssd_area->st,
+ sch->schid.ssid, sch->schid.sch_no);
/*
* There may have been a new subchannel type defined in the
* time since this code was written; since we don't know which
@@ -156,8 +158,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
return 0;
} else {
const char *type[4] = {"I/O", "chsc", "message", "ADM"};
- CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n",
- sch->irq, type[ssd_area->st]);
+ CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
+ sch->schid.ssid, sch->schid.sch_no,
+ type[ssd_area->st]);
sch->ssd_info.valid = 1;
sch->ssd_info.type = ssd_area->st;
@@ -218,13 +221,13 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
int j;
int mask;
struct subchannel *sch;
- __u8 *chpid;
+ struct channel_path *chpid;
struct schib schib;
sch = to_subchannel(dev);
chpid = data;
for (j = 0; j < 8; j++)
- if (sch->schib.pmcw.chpid[j] == *chpid)
+ if (sch->schib.pmcw.chpid[j] == chpid->id)
break;
if (j >= 8)
return 0;
@@ -232,7 +235,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
mask = 0x80 >> j;
spin_lock(&sch->lock);
- stsch(sch->irq, &schib);
+ stsch(sch->schid, &schib);
if (!schib.pmcw.dnv)
goto out_unreg;
memcpy(&sch->schib, &schib, sizeof(struct schib));
@@ -284,7 +287,7 @@ out_unlock:
out_unreg:
spin_unlock(&sch->lock);
sch->lpm = 0;
- if (css_enqueue_subchannel_slow(sch->irq)) {
+ if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
}
@@ -295,23 +298,30 @@ static inline void
s390_set_chpid_offline( __u8 chpid)
{
char dbf_txt[15];
+ struct device *dev;
sprintf(dbf_txt, "chpr%x", chpid);
CIO_TRACE_EVENT(2, dbf_txt);
if (get_chp_status(chpid) <= 0)
return;
-
- bus_for_each_dev(&css_bus_type, NULL, &chpid,
+ dev = get_device(&css[0]->chps[chpid]->dev);
+ bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
s390_subchannel_remove_chpid);
if (need_rescan || css_slow_subchannels_exist())
queue_work(slow_path_wq, &slow_path_work);
+ put_device(dev);
}
+struct res_acc_data {
+ struct channel_path *chp;
+ u32 fla_mask;
+ u16 fla;
+};
+
static int
-s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
- struct subchannel *sch)
+s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
{
int found;
int chp;
@@ -323,8 +333,9 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
* check if chpid is in information updated by ssd
*/
if (sch->ssd_info.valid &&
- sch->ssd_info.chpid[chp] == chpid &&
- (sch->ssd_info.fla[chp] & fla_mask) == fla) {
+ sch->ssd_info.chpid[chp] == res_data->chp->id &&
+ (sch->ssd_info.fla[chp] & res_data->fla_mask)
+ == res_data->fla) {
found = 1;
break;
}
@@ -337,24 +348,87 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
* new path information and eventually check for logically
* offline chpids.
*/
- ccode = stsch(sch->irq, &sch->schib);
+ ccode = stsch(sch->schid, &sch->schib);
if (ccode > 0)
return 0;
return 0x80 >> chp;
}
+static inline int
+s390_process_res_acc_new_sch(struct subchannel_id schid)
+{
+ struct schib schib;
+ int ret;
+ /*
+ * We don't know the device yet, but since a path
+ * may be available now to the device we'll have
+ * to do recognition again.
+ * Since we don't have any idea about which chpid
+ * that beast may be on we'll have to do a stsch
+ * on all devices, grr...
+ */
+ if (stsch_err(schid, &schib))
+ /* We're through */
+ return need_rescan ? -EAGAIN : -ENXIO;
+
+ /* Put it on the slow path. */
+ ret = css_enqueue_subchannel_slow(schid);
+ if (ret) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ return -EAGAIN;
+ }
+ return 0;
+}
+
static int
-s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
+__s390_process_res_acc(struct subchannel_id schid, void *data)
{
+ int chp_mask, old_lpm;
+ struct res_acc_data *res_data;
struct subchannel *sch;
- int irq, rc;
+
+ res_data = (struct res_acc_data *)data;
+ sch = get_subchannel_by_schid(schid);
+ if (!sch)
+ /* Check if a subchannel is newly available. */
+ return s390_process_res_acc_new_sch(schid);
+
+ spin_lock_irq(&sch->lock);
+
+ chp_mask = s390_process_res_acc_sch(res_data, sch);
+
+ if (chp_mask == 0) {
+ spin_unlock_irq(&sch->lock);
+ return 0;
+ }
+ old_lpm = sch->lpm;
+ sch->lpm = ((sch->schib.pmcw.pim &
+ sch->schib.pmcw.pam &
+ sch->schib.pmcw.pom)
+ | chp_mask) & sch->opm;
+ if (!old_lpm && sch->lpm)
+ device_trigger_reprobe(sch);
+ else if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+
+ spin_unlock_irq(&sch->lock);
+ put_device(&sch->dev);
+ return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
+}
+
+
+static int
+s390_process_res_acc (struct res_acc_data *res_data)
+{
+ int rc;
char dbf_txt[15];
- sprintf(dbf_txt, "accpr%x", chpid);
+ sprintf(dbf_txt, "accpr%x", res_data->chp->id);
CIO_TRACE_EVENT( 2, dbf_txt);
- if (fla != 0) {
- sprintf(dbf_txt, "fla%x", fla);
+ if (res_data->fla != 0) {
+ sprintf(dbf_txt, "fla%x", res_data->fla);
CIO_TRACE_EVENT( 2, dbf_txt);
}
@@ -365,70 +439,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
* The more information we have (info), the less scanning
* will we have to do.
*/
-
- if (!get_chp_status(chpid))
- return 0; /* no need to do the rest */
-
- rc = 0;
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- int chp_mask, old_lpm;
-
- sch = get_subchannel_by_schid(irq);
- if (!sch) {
- struct schib schib;
- int ret;
- /*
- * We don't know the device yet, but since a path
- * may be available now to the device we'll have
- * to do recognition again.
- * Since we don't have any idea about which chpid
- * that beast may be on we'll have to do a stsch
- * on all devices, grr...
- */
- if (stsch(irq, &schib)) {
- /* We're through */
- if (need_rescan)
- rc = -EAGAIN;
- break;
- }
- if (need_rescan) {
- rc = -EAGAIN;
- continue;
- }
- /* Put it on the slow path. */
- ret = css_enqueue_subchannel_slow(irq);
- if (ret) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
- rc = -EAGAIN;
- continue;
- }
-
- spin_lock_irq(&sch->lock);
-
- chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
-
- if (chp_mask == 0) {
-
- spin_unlock_irq(&sch->lock);
- continue;
- }
- old_lpm = sch->lpm;
- sch->lpm = ((sch->schib.pmcw.pim &
- sch->schib.pmcw.pam &
- sch->schib.pmcw.pom)
- | chp_mask) & sch->opm;
- if (!old_lpm && sch->lpm)
- device_trigger_reprobe(sch);
- else if (sch->driver && sch->driver->verify)
- sch->driver->verify(&sch->dev);
-
- spin_unlock_irq(&sch->lock);
- put_device(&sch->dev);
- if (fla_mask == 0xffff)
- break;
- }
+ rc = for_each_subchannel(__s390_process_res_acc, res_data);
+ if (css_slow_subchannels_exist())
+ rc = -EAGAIN;
+ else if (rc != -EAGAIN)
+ rc = 0;
return rc;
}
@@ -466,6 +481,7 @@ int
chsc_process_crw(void)
{
int chpid, ret;
+ struct res_acc_data res_data;
struct {
struct chsc_header request;
u32 reserved1;
@@ -499,8 +515,9 @@ chsc_process_crw(void)
ret = 0;
do {
int ccode, status;
+ struct device *dev;
memset(sei_area, 0, sizeof(*sei_area));
-
+ memset(&res_data, 0, sizeof(struct res_acc_data));
sei_area->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x000e,
@@ -573,26 +590,25 @@ chsc_process_crw(void)
if (status < 0)
new_channel_path(sei_area->rsid);
else if (!status)
- return 0;
- if ((sei_area->vf & 0x80) == 0) {
- pr_debug("chpid: %x\n", sei_area->rsid);
- ret = s390_process_res_acc(sei_area->rsid,
- 0, 0);
- } else if ((sei_area->vf & 0xc0) == 0x80) {
- pr_debug("chpid: %x link addr: %x\n",
- sei_area->rsid, sei_area->fla);
- ret = s390_process_res_acc(sei_area->rsid,
- sei_area->fla,
- 0xff00);
- } else if ((sei_area->vf & 0xc0) == 0xc0) {
- pr_debug("chpid: %x full link addr: %x\n",
- sei_area->rsid, sei_area->fla);
- ret = s390_process_res_acc(sei_area->rsid,
- sei_area->fla,
- 0xffff);
+ break;
+ dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
+ res_data.chp = to_channelpath(dev);
+ pr_debug("chpid: %x", sei_area->rsid);
+ if ((sei_area->vf & 0xc0) != 0) {
+ res_data.fla = sei_area->fla;
+ if ((sei_area->vf & 0xc0) == 0xc0) {
+ pr_debug(" full link addr: %x",
+ sei_area->fla);
+ res_data.fla_mask = 0xffff;
+ } else {
+ pr_debug(" link addr: %x",
+ sei_area->fla);
+ res_data.fla_mask = 0xff00;
+ }
}
- pr_debug("\n");
-
+ ret = s390_process_res_acc(&res_data);
+ pr_debug("\n\n");
+ put_device(dev);
break;
default: /* other stuff */
@@ -604,12 +620,72 @@ chsc_process_crw(void)
return ret;
}
+static inline int
+__chp_add_new_sch(struct subchannel_id schid)
+{
+ struct schib schib;
+ int ret;
+
+ if (stsch(schid, &schib))
+ /* We're through */
+ return need_rescan ? -EAGAIN : -ENXIO;
+
+ /* Put it on the slow path. */
+ ret = css_enqueue_subchannel_slow(schid);
+ if (ret) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+
static int
-chp_add(int chpid)
+__chp_add(struct subchannel_id schid, void *data)
{
+ int i;
+ struct channel_path *chp;
struct subchannel *sch;
- int irq, ret, rc;
+
+ chp = (struct channel_path *)data;
+ sch = get_subchannel_by_schid(schid);
+ if (!sch)
+ /* Check if the subchannel is now available. */
+ return __chp_add_new_sch(schid);
+ spin_lock(&sch->lock);
+ for (i=0; i<8; i++)
+ if (sch->schib.pmcw.chpid[i] == chp->id) {
+ if (stsch(sch->schid, &sch->schib) != 0) {
+ /* Endgame. */
+ spin_unlock(&sch->lock);
+ return -ENXIO;
+ }
+ break;
+ }
+ if (i==8) {
+ spin_unlock(&sch->lock);
+ return 0;
+ }
+ sch->lpm = ((sch->schib.pmcw.pim &
+ sch->schib.pmcw.pam &
+ sch->schib.pmcw.pom)
+ | 0x80 >> i) & sch->opm;
+
+ if (sch->driver && sch->driver->verify)
+ sch->driver->verify(&sch->dev);
+
+ spin_unlock(&sch->lock);
+ put_device(&sch->dev);
+ return 0;
+}
+
+static int
+chp_add(int chpid)
+{
+ int rc;
char dbf_txt[15];
+ struct device *dev;
if (!get_chp_status(chpid))
return 0; /* no need to do the rest */
@@ -617,59 +693,13 @@ chp_add(int chpid)
sprintf(dbf_txt, "cadd%x", chpid);
CIO_TRACE_EVENT(2, dbf_txt);
- rc = 0;
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- int i;
-
- sch = get_subchannel_by_schid(irq);
- if (!sch) {
- struct schib schib;
-
- if (stsch(irq, &schib)) {
- /* We're through */
- if (need_rescan)
- rc = -EAGAIN;
- break;
- }
- if (need_rescan) {
- rc = -EAGAIN;
- continue;
- }
- /* Put it on the slow path. */
- ret = css_enqueue_subchannel_slow(irq);
- if (ret) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
- rc = -EAGAIN;
- continue;
- }
-
- spin_lock(&sch->lock);
- for (i=0; i<8; i++)
- if (sch->schib.pmcw.chpid[i] == chpid) {
- if (stsch(sch->irq, &sch->schib) != 0) {
- /* Endgame. */
- spin_unlock(&sch->lock);
- return rc;
- }
- break;
- }
- if (i==8) {
- spin_unlock(&sch->lock);
- return rc;
- }
- sch->lpm = ((sch->schib.pmcw.pim &
- sch->schib.pmcw.pam &
- sch->schib.pmcw.pom)
- | 0x80 >> i) & sch->opm;
-
- if (sch->driver && sch->driver->verify)
- sch->driver->verify(&sch->dev);
-
- spin_unlock(&sch->lock);
- put_device(&sch->dev);
- }
+ dev = get_device(&css[0]->chps[chpid]->dev);
+ rc = for_each_subchannel(__chp_add, to_channelpath(dev));
+ if (css_slow_subchannels_exist())
+ rc = -EAGAIN;
+ if (rc != -EAGAIN)
+ rc = 0;
+ put_device(dev);
return rc;
}
@@ -702,7 +732,7 @@ __check_for_io_and_kill(struct subchannel *sch, int index)
if (!device_is_online(sch))
/* cio could be doing I/O. */
return 0;
- cc = stsch(sch->irq, &sch->schib);
+ cc = stsch(sch->schid, &sch->schib);
if (cc)
return 0;
if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
@@ -743,7 +773,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
* just varied off path. Then kill it.
*/
if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
- if (css_enqueue_subchannel_slow(sch->irq)) {
+ if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
}
@@ -781,6 +811,29 @@ s390_subchannel_vary_chpid_on(struct device *dev, void *data)
return 0;
}
+static int
+__s390_vary_chpid_on(struct subchannel_id schid, void *data)
+{
+ struct schib schib;
+ struct subchannel *sch;
+
+ sch = get_subchannel_by_schid(schid);
+ if (sch) {
+ put_device(&sch->dev);
+ return 0;
+ }
+ if (stsch_err(schid, &schib))
+ /* We're through */
+ return -ENXIO;
+ /* Put it on the slow path. */
+ if (css_enqueue_subchannel_slow(schid)) {
+ css_clear_subchannel_slow_list();
+ need_rescan = 1;
+ return -EAGAIN;
+ }
+ return 0;
+}
+
/*
* Function: s390_vary_chpid
* Varies the specified chpid online or offline
@@ -789,8 +842,7 @@ static int
s390_vary_chpid( __u8 chpid, int on)
{
char dbf_text[15];
- int status, irq, ret;
- struct subchannel *sch;
+ int status;
sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
CIO_TRACE_EVENT( 2, dbf_text);
@@ -815,30 +867,9 @@ s390_vary_chpid( __u8 chpid, int on)
bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
s390_subchannel_vary_chpid_on :
s390_subchannel_vary_chpid_off);
- if (!on)
- goto out;
- /* Scan for new devices on varied on path. */
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- struct schib schib;
-
- if (need_rescan)
- break;
- sch = get_subchannel_by_schid(irq);
- if (sch) {
- put_device(&sch->dev);
- continue;
- }
- if (stsch(irq, &schib))
- /* We're through */
- break;
- /* Put it on the slow path. */
- ret = css_enqueue_subchannel_slow(irq);
- if (ret) {
- css_clear_subchannel_slow_list();
- need_rescan = 1;
- }
- }
-out:
+ if (on)
+ /* Scan for new devices on varied on path. */
+ for_each_subchannel(__s390_vary_chpid_on, NULL);
if (need_rescan || css_slow_subchannels_exist())
queue_work(slow_path_wq, &slow_path_work);
return 0;
@@ -995,7 +1026,7 @@ new_channel_path(int chpid)
chp->id = chpid;
chp->state = 1;
chp->dev = (struct device) {
- .parent = &css_bus_device,
+ .parent = &css[0]->device,
.release = chp_release,
};
snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
@@ -1017,7 +1048,7 @@ new_channel_path(int chpid)
device_unregister(&chp->dev);
goto out_free;
} else
- chps[chpid] = chp;
+ css[0]->chps[chpid] = chp;
return ret;
out_free:
kfree(chp);
@@ -1030,7 +1061,7 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no)
struct channel_path *chp;
struct channel_path_desc *desc;
- chp = chps[sch->schib.pmcw.chpid[chp_no]];
+ chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
if (!chp)
return NULL;
desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
@@ -1051,6 +1082,54 @@ chsc_alloc_sei_area(void)
return (sei_page ? 0 : -ENOMEM);
}
+int __init
+chsc_enable_facility(int operation_code)
+{
+ int ret;
+ struct {
+ struct chsc_header request;
+ u8 reserved1:4;
+ u8 format:4;
+ u8 reserved2;
+ u16 operation_code;
+ u32 reserved3;
+ u32 reserved4;
+ u32 operation_data_area[252];
+ struct chsc_header response;
+ u32 reserved5:4;
+ u32 format2:4;
+ u32 reserved6:24;
+ } *sda_area;
+
+ sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
+ if (!sda_area)
+ return -ENOMEM;
+ sda_area->request = (struct chsc_header) {
+ .length = 0x0400,
+ .code = 0x0031,
+ };
+ sda_area->operation_code = operation_code;
+
+ ret = chsc(sda_area);
+ if (ret > 0) {
+ ret = (ret == 3) ? -ENODEV : -EBUSY;
+ goto out;
+ }
+ switch (sda_area->response.code) {
+ case 0x0003: /* invalid request block */
+ case 0x0007:
+ ret = -EINVAL;
+ break;
+ case 0x0004: /* command not provided */
+ case 0x0101: /* facility not provided */
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ out:
+ free_page((unsigned long)sda_area);
+ return ret;
+}
+
subsys_initcall(chsc_alloc_sei_area);
struct css_general_char css_general_characteristics;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index be20da49d14..44e4b4bb1c5 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -1,12 +1,12 @@
#ifndef S390_CHSC_H
#define S390_CHSC_H
-#define NR_CHPIDS 256
-
#define CHSC_SEI_ACC_CHPID 1
#define CHSC_SEI_ACC_LINKADDR 2
#define CHSC_SEI_ACC_FULLLINKADDR 3
+#define CHSC_SDA_OC_MSS 0x2
+
struct chsc_header {
u16 length;
u16 code;
@@ -43,7 +43,9 @@ struct css_general_char {
u32 ext_mb : 1; /* bit 48 */
u32 : 7;
u32 aif_tdd : 1; /* bit 56 */
- u32 : 10;
+ u32 : 1;
+ u32 qebsm : 1; /* bit 58 */
+ u32 : 8;
u32 aif_osa : 1; /* bit 67 */
u32 : 28;
}__attribute__((packed));
@@ -63,4 +65,9 @@ extern int chsc_determine_css_characteristics(void);
extern int css_characteristics_avail;
extern void *chsc_get_chp_desc(struct subchannel*, int);
+
+extern int chsc_enable_facility(int);
+
+#define to_channelpath(dev) container_of(dev, struct channel_path, dev)
+
#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 185bc73c3ec..7376bc87206 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls
- * $Revision: 1.135 $
+ * $Revision: 1.138 $
*
* Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -135,7 +135,7 @@ cio_tpi(void)
return 0;
irb = (struct irb *) __LC_IRB;
/* Store interrupt response block to lowcore. */
- if (tsch (tpi_info->irq, irb) != 0)
+ if (tsch (tpi_info->schid, irb) != 0)
/* Not status pending or not operational. */
return 1;
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
@@ -163,10 +163,11 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
else
sch->lpm = 0;
- stsch (sch->irq, &sch->schib);
+ stsch (sch->schid, &sch->schib);
CIO_MSG_EVENT(0, "cio_start: 'not oper' status for "
- "subchannel %04x!\n", sch->irq);
+ "subchannel 0.%x.%04x!\n", sch->schid.ssid,
+ sch->schid.sch_no);
sprintf(dbf_text, "no%s", sch->dev.bus_id);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
@@ -194,7 +195,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
sch->orb.spnd = sch->options.suspend;
sch->orb.ssic = sch->options.suspend && sch->options.inter;
sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm;
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
/*
* for 64 bit we always support 64 bit IDAWs with 4k page size only
*/
@@ -204,7 +205,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
sch->orb.key = key >> 4;
/* issue "Start Subchannel" */
sch->orb.cpa = (__u32) __pa (cpa);
- ccode = ssch (sch->irq, &sch->orb);
+ ccode = ssch (sch->schid, &sch->orb);
/* process condition code */
sprintf (dbf_txt, "ccode:%d", ccode);
@@ -243,7 +244,7 @@ cio_resume (struct subchannel *sch)
CIO_TRACE_EVENT (4, "resIO");
CIO_TRACE_EVENT (4, sch->dev.bus_id);
- ccode = rsch (sch->irq);
+ ccode = rsch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (4, dbf_txt);
@@ -283,7 +284,7 @@ cio_halt(struct subchannel *sch)
/*
* Issue "Halt subchannel" and process condition code
*/
- ccode = hsch (sch->irq);
+ ccode = hsch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
@@ -318,7 +319,7 @@ cio_clear(struct subchannel *sch)
/*
* Issue "Clear subchannel" and process condition code
*/
- ccode = csch (sch->irq);
+ ccode = csch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
@@ -351,7 +352,7 @@ cio_cancel (struct subchannel *sch)
CIO_TRACE_EVENT (2, "cancelIO");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
- ccode = xsch (sch->irq);
+ ccode = xsch (sch->schid);
sprintf (dbf_txt, "ccode:%d", ccode);
CIO_TRACE_EVENT (2, dbf_txt);
@@ -359,7 +360,7 @@ cio_cancel (struct subchannel *sch)
switch (ccode) {
case 0: /* success */
/* Update information in scsw. */
- stsch (sch->irq, &sch->schib);
+ stsch (sch->schid, &sch->schib);
return 0;
case 1: /* status pending */
return -EBUSY;
@@ -381,7 +382,7 @@ cio_modify (struct subchannel *sch)
ret = 0;
for (retry = 0; retry < 5; retry++) {
- ccode = msch_err (sch->irq, &sch->schib);
+ ccode = msch_err (sch->schid, &sch->schib);
if (ccode < 0) /* -EIO if msch gets a program check. */
return ccode;
switch (ccode) {
@@ -414,7 +415,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
CIO_TRACE_EVENT (2, "ensch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
- ccode = stsch (sch->irq, &sch->schib);
+ ccode = stsch (sch->schid, &sch->schib);
if (ccode)
return -ENODEV;
@@ -432,13 +433,13 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
*/
sch->schib.pmcw.csense = 0;
if (ret == 0) {
- stsch (sch->irq, &sch->schib);
+ stsch (sch->schid, &sch->schib);
if (sch->schib.pmcw.ena)
break;
}
if (ret == -EBUSY) {
struct irb irb;
- if (tsch(sch->irq, &irb) != 0)
+ if (tsch(sch->schid, &irb) != 0)
break;
}
}
@@ -461,7 +462,7 @@ cio_disable_subchannel (struct subchannel *sch)
CIO_TRACE_EVENT (2, "dissch");
CIO_TRACE_EVENT (2, sch->dev.bus_id);
- ccode = stsch (sch->irq, &sch->schib);
+ ccode = stsch (sch->schid, &sch->schib);
if (ccode == 3) /* Not operational. */
return -ENODEV;
@@ -485,7 +486,7 @@ cio_disable_subchannel (struct subchannel *sch)
*/
break;
if (ret == 0) {
- stsch (sch->irq, &sch->schib);
+ stsch (sch->schid, &sch->schib);
if (!sch->schib.pmcw.ena)
break;
}
@@ -508,12 +509,12 @@ cio_disable_subchannel (struct subchannel *sch)
* -ENODEV for subchannels with invalid device number or blacklisted devices
*/
int
-cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
+cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
{
char dbf_txt[15];
int ccode;
- sprintf (dbf_txt, "valsch%x", irq);
+ sprintf (dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT (4, dbf_txt);
/* Nuke all fields. */
@@ -522,17 +523,20 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
spin_lock_init(&sch->lock);
/* Set a name for the subchannel */
- snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq);
+ snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
+ schid.sch_no);
/*
* The first subchannel that is not-operational (ccode==3)
* indicates that there aren't any more devices available.
+ * If stsch gets an exception, it means the current subchannel set
+ * is not valid.
*/
- sch->irq = irq;
- ccode = stsch (irq, &sch->schib);
+ ccode = stsch_err (schid, &sch->schib);
if (ccode)
- return -ENXIO;
+ return (ccode == 3) ? -ENXIO : ccode;
+ sch->schid = schid;
/* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st;
@@ -541,9 +545,9 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
*/
if (sch->st != 0) {
CIO_DEBUG(KERN_INFO, 0,
- "Subchannel %04X reports "
+ "Subchannel 0.%x.%04x reports "
"non-I/O subchannel type %04X\n",
- sch->irq, sch->st);
+ sch->schid.ssid, sch->schid.sch_no, sch->st);
/* We stop here for non-io subchannels. */
return sch->st;
}
@@ -554,26 +558,29 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
return -ENODEV;
/* Devno is valid. */
- if (is_blacklisted (sch->schib.pmcw.dev)) {
+ if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
/*
* This device must not be known to Linux. So we simply
* say that there is no device and return ENODEV.
*/
CIO_MSG_EVENT(0, "Blacklisted device detected "
- "at devno %04X\n", sch->schib.pmcw.dev);
+ "at devno %04X, subchannel set %x\n",
+ sch->schib.pmcw.dev, sch->schid.ssid);
return -ENODEV;
}
sch->opm = 0xff;
- chsc_validate_chpids(sch);
+ if (!cio_is_console(sch->schid))
+ chsc_validate_chpids(sch);
sch->lpm = sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom &
sch->opm;
CIO_DEBUG(KERN_INFO, 0,
- "Detected device %04X on subchannel %04X"
+ "Detected device %04x on subchannel 0.%x.%04X"
" - PIM = %02X, PAM = %02X, POM = %02X\n",
- sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim,
+ sch->schib.pmcw.dev, sch->schid.ssid,
+ sch->schid.sch_no, sch->schib.pmcw.pim,
sch->schib.pmcw.pam, sch->schib.pmcw.pom);
/*
@@ -632,7 +639,7 @@ do_IRQ (struct pt_regs *regs)
if (sch)
spin_lock(&sch->lock);
/* Store interrupt response block to lowcore. */
- if (tsch (tpi_info->irq, irb) == 0 && sch) {
+ if (tsch (tpi_info->schid, irb) == 0 && sch) {
/* Keep subchannel information word up to date. */
memcpy (&sch->schib.scsw, &irb->scsw,
sizeof (irb->scsw));
@@ -691,28 +698,36 @@ wait_cons_dev (void)
}
static int
-cio_console_irq(void)
+cio_test_for_console(struct subchannel_id schid, void *data)
{
- int irq;
+ if (stsch_err(schid, &console_subchannel.schib) != 0)
+ return -ENXIO;
+ if (console_subchannel.schib.pmcw.dnv &&
+ console_subchannel.schib.pmcw.dev ==
+ console_devno) {
+ console_irq = schid.sch_no;
+ return 1; /* found */
+ }
+ return 0;
+}
+
+
+static int
+cio_get_console_sch_no(void)
+{
+ struct subchannel_id schid;
+ init_subchannel_id(&schid);
if (console_irq != -1) {
/* VM provided us with the irq number of the console. */
- if (stsch(console_irq, &console_subchannel.schib) != 0 ||
+ schid.sch_no = console_irq;
+ if (stsch(schid, &console_subchannel.schib) != 0 ||
!console_subchannel.schib.pmcw.dnv)
return -1;
console_devno = console_subchannel.schib.pmcw.dev;
} else if (console_devno != -1) {
/* At least the console device number is known. */
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- if (stsch(irq, &console_subchannel.schib) != 0)
- break;
- if (console_subchannel.schib.pmcw.dnv &&
- console_subchannel.schib.pmcw.dev ==
- console_devno) {
- console_irq = irq;
- break;
- }
- }
+ for_each_subchannel(cio_test_for_console, NULL);
if (console_irq == -1)
return -1;
} else {
@@ -728,17 +743,20 @@ cio_console_irq(void)
struct subchannel *
cio_probe_console(void)
{
- int irq, ret;
+ int sch_no, ret;
+ struct subchannel_id schid;
if (xchg(&console_subchannel_in_use, 1) != 0)
return ERR_PTR(-EBUSY);
- irq = cio_console_irq();
- if (irq == -1) {
+ sch_no = cio_get_console_sch_no();
+ if (sch_no == -1) {
console_subchannel_in_use = 0;
return ERR_PTR(-ENODEV);
}
memset(&console_subchannel, 0, sizeof(struct subchannel));
- ret = cio_validate_subchannel(&console_subchannel, irq);
+ init_subchannel_id(&schid);
+ schid.sch_no = sch_no;
+ ret = cio_validate_subchannel(&console_subchannel, schid);
if (ret) {
console_subchannel_in_use = 0;
return ERR_PTR(-ENODEV);
@@ -770,11 +788,11 @@ cio_release_console(void)
/* Bah... hack to catch console special sausages. */
int
-cio_is_console(int irq)
+cio_is_console(struct subchannel_id schid)
{
if (!console_subchannel_in_use)
return 0;
- return (irq == console_subchannel.irq);
+ return schid_equal(&schid, &console_subchannel.schid);
}
struct subchannel *
@@ -787,7 +805,7 @@ cio_get_console_subchannel(void)
#endif
static inline int
-__disable_subchannel_easy(unsigned int schid, struct schib *schib)
+__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
{
int retry, cc;
@@ -805,7 +823,7 @@ __disable_subchannel_easy(unsigned int schid, struct schib *schib)
}
static inline int
-__clear_subchannel_easy(unsigned int schid)
+__clear_subchannel_easy(struct subchannel_id schid)
{
int retry;
@@ -815,8 +833,8 @@ __clear_subchannel_easy(unsigned int schid)
struct tpi_info ti;
if (tpi(&ti)) {
- tsch(ti.irq, (struct irb *)__LC_IRB);
- if (ti.irq == schid)
+ tsch(ti.schid, (struct irb *)__LC_IRB);
+ if (schid_equal(&ti.schid, &schid))
return 0;
}
udelay(100);
@@ -825,31 +843,33 @@ __clear_subchannel_easy(unsigned int schid)
}
extern void do_reipl(unsigned long devno);
+static int
+__shutdown_subchannel_easy(struct subchannel_id schid, void *data)
+{
+ struct schib schib;
+
+ if (stsch_err(schid, &schib))
+ return -ENXIO;
+ if (!schib.pmcw.ena)
+ return 0;
+ switch(__disable_subchannel_easy(schid, &schib)) {
+ case 0:
+ case -ENODEV:
+ break;
+ default: /* -EBUSY */
+ if (__clear_subchannel_easy(schid))
+ break; /* give up... */
+ stsch(schid, &schib);
+ __disable_subchannel_easy(schid, &schib);
+ }
+ return 0;
+}
-/* Clear all subchannels. */
void
clear_all_subchannels(void)
{
- unsigned int schid;
-
local_irq_disable();
- for (schid=0;schid<=highest_subchannel;schid++) {
- struct schib schib;
- if (stsch(schid, &schib))
- break; /* break out of the loop */
- if (!schib.pmcw.ena)
- continue;
- switch(__disable_subchannel_easy(schid, &schib)) {
- case 0:
- case -ENODEV:
- break;
- default: /* -EBUSY */
- if (__clear_subchannel_easy(schid))
- break; /* give up... jump out of switch */
- stsch(schid, &schib);
- __disable_subchannel_easy(schid, &schib);
- }
- }
+ for_each_subchannel(__shutdown_subchannel_easy, NULL);
}
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index c50a9da420a..0ca987344e0 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -1,6 +1,8 @@
#ifndef S390_CIO_H
#define S390_CIO_H
+#include "schid.h"
+
/*
* where we put the ssd info
*/
@@ -83,7 +85,7 @@ struct orb {
/* subchannel data structure used by I/O subroutines */
struct subchannel {
- unsigned int irq; /* aka. subchannel number */
+ struct subchannel_id schid;
spinlock_t lock; /* subchannel lock */
enum {
@@ -114,7 +116,7 @@ struct subchannel {
#define to_subchannel(n) container_of(n, struct subchannel, dev)
-extern int cio_validate_subchannel (struct subchannel *, unsigned int);
+extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
extern int cio_enable_subchannel (struct subchannel *, unsigned int);
extern int cio_disable_subchannel (struct subchannel *);
extern int cio_cancel (struct subchannel *);
@@ -127,14 +129,15 @@ extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int);
extern int cio_get_options (struct subchannel *);
extern int cio_modify (struct subchannel *);
+
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void);
-extern int cio_is_console(int irq);
+extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void);
#else
-#define cio_is_console(irq) 0
+#define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL
#endif
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index b978f7fe832..0b03714e696 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $)
+ * linux/drivers/s390/cio/cmf.c ($Revision: 1.19 $)
*
* Linux on zSeries Channel Measurement Facility support
*
@@ -178,7 +178,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
/* msch can silently fail, so do it again if necessary */
for (retry = 0; retry < 3; retry++) {
/* prepare schib */
- stsch(sch->irq, schib);
+ stsch(sch->schid, schib);
schib->pmcw.mme = mme;
schib->pmcw.mbfc = mbfc;
/* address can be either a block address or a block index */
@@ -188,7 +188,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
schib->pmcw.mbi = address;
/* try to submit it */
- switch(ret = msch_err(sch->irq, schib)) {
+ switch(ret = msch_err(sch->schid, schib)) {
case 0:
break;
case 1:
@@ -202,7 +202,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
ret = -EINVAL;
break;
}
- stsch(sch->irq, schib); /* restore the schib */
+ stsch(sch->schid, schib); /* restore the schib */
if (ret)
break;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 555119cacc2..e565193650c 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/css.c
* driver for channel subsystem
- * $Revision: 1.85 $
+ * $Revision: 1.93 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -21,19 +21,35 @@
#include "ioasm.h"
#include "chsc.h"
-unsigned int highest_subchannel;
int need_rescan = 0;
int css_init_done = 0;
+static int max_ssid = 0;
+
+struct channel_subsystem *css[__MAX_CSSID + 1];
-struct pgid global_pgid;
int css_characteristics_avail = 0;
-struct device css_bus_device = {
- .bus_id = "css0",
-};
+inline int
+for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
+{
+ struct subchannel_id schid;
+ int ret;
+
+ init_subchannel_id(&schid);
+ ret = -ENODEV;
+ do {
+ do {
+ ret = fn(schid, data);
+ if (ret)
+ break;
+ } while (schid.sch_no++ < __MAX_SUBCHANNEL);
+ schid.sch_no = 0;
+ } while (schid.ssid++ < max_ssid);
+ return ret;
+}
static struct subchannel *
-css_alloc_subchannel(int irq)
+css_alloc_subchannel(struct subchannel_id schid)
{
struct subchannel *sch;
int ret;
@@ -41,13 +57,11 @@ css_alloc_subchannel(int irq)
sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
if (sch == NULL)
return ERR_PTR(-ENOMEM);
- ret = cio_validate_subchannel (sch, irq);
+ ret = cio_validate_subchannel (sch, schid);
if (ret < 0) {
kfree(sch);
return ERR_PTR(ret);
}
- if (irq > highest_subchannel)
- highest_subchannel = irq;
if (sch->st != SUBCHANNEL_TYPE_IO) {
/* For now we ignore all non-io subchannels. */
@@ -87,7 +101,7 @@ css_subchannel_release(struct device *dev)
struct subchannel *sch;
sch = to_subchannel(dev);
- if (!cio_is_console(sch->irq))
+ if (!cio_is_console(sch->schid))
kfree(sch);
}
@@ -99,7 +113,7 @@ css_register_subchannel(struct subchannel *sch)
int ret;
/* Initialize the subchannel structure */
- sch->dev.parent = &css_bus_device;
+ sch->dev.parent = &css[0]->device;
sch->dev.bus = &css_bus_type;
sch->dev.release = &css_subchannel_release;
@@ -114,12 +128,12 @@ css_register_subchannel(struct subchannel *sch)
}
int
-css_probe_device(int irq)
+css_probe_device(struct subchannel_id schid)
{
int ret;
struct subchannel *sch;
- sch = css_alloc_subchannel(irq);
+ sch = css_alloc_subchannel(schid);
if (IS_ERR(sch))
return PTR_ERR(sch);
ret = css_register_subchannel(sch);
@@ -132,26 +146,26 @@ static int
check_subchannel(struct device * dev, void * data)
{
struct subchannel *sch;
- int irq = (unsigned long)data;
+ struct subchannel_id *schid = data;
sch = to_subchannel(dev);
- return (sch->irq == irq);
+ return schid_equal(&sch->schid, schid);
}
struct subchannel *
-get_subchannel_by_schid(int irq)
+get_subchannel_by_schid(struct subchannel_id schid)
{
struct device *dev;
dev = bus_find_device(&css_bus_type, NULL,
- (void *)(unsigned long)irq, check_subchannel);
+ (void *)&schid, check_subchannel);
return dev ? to_subchannel(dev) : NULL;
}
static inline int
-css_get_subchannel_status(struct subchannel *sch, int schid)
+css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid)
{
struct schib schib;
int cc;
@@ -170,13 +184,13 @@ css_get_subchannel_status(struct subchannel *sch, int schid)
}
static int
-css_evaluate_subchannel(int irq, int slow)
+css_evaluate_subchannel(struct subchannel_id schid, int slow)
{
int event, ret, disc;
struct subchannel *sch;
unsigned long flags;
- sch = get_subchannel_by_schid(irq);
+ sch = get_subchannel_by_schid(schid);
disc = sch ? device_is_disconnected(sch) : 0;
if (disc && slow) {
if (sch)
@@ -194,9 +208,10 @@ css_evaluate_subchannel(int irq, int slow)
put_device(&sch->dev);
return -EAGAIN; /* Will be done on the slow path. */
}
- event = css_get_subchannel_status(sch, irq);
- CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n",
- irq, event, sch?(disc?"disconnected":"normal"):"unknown",
+ event = css_get_subchannel_status(sch, schid);
+ CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
+ schid.ssid, schid.sch_no, event,
+ sch?(disc?"disconnected":"normal"):"unknown",
slow?"slow":"fast");
switch (event) {
case CIO_NO_PATH:
@@ -253,7 +268,7 @@ css_evaluate_subchannel(int irq, int slow)
sch->schib.pmcw.intparm = 0;
cio_modify(sch);
put_device(&sch->dev);
- ret = css_probe_device(irq);
+ ret = css_probe_device(schid);
} else {
/*
* We can't immediately deregister the disconnected
@@ -272,7 +287,7 @@ css_evaluate_subchannel(int irq, int slow)
device_trigger_reprobe(sch);
spin_unlock_irqrestore(&sch->lock, flags);
}
- ret = sch ? 0 : css_probe_device(irq);
+ ret = sch ? 0 : css_probe_device(schid);
break;
default:
BUG();
@@ -281,28 +296,15 @@ css_evaluate_subchannel(int irq, int slow)
return ret;
}
-static void
-css_rescan_devices(void)
+static int
+css_rescan_devices(struct subchannel_id schid, void *data)
{
- int irq, ret;
-
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- ret = css_evaluate_subchannel(irq, 1);
- /* No more memory. It doesn't make sense to continue. No
- * panic because this can happen in midflight and just
- * because we can't use a new device is no reason to crash
- * the system. */
- if (ret == -ENOMEM)
- break;
- /* -ENXIO indicates that there are no more subchannels. */
- if (ret == -ENXIO)
- break;
- }
+ return css_evaluate_subchannel(schid, 1);
}
struct slow_subchannel {
struct list_head slow_list;
- unsigned long schid;
+ struct subchannel_id schid;
};
static LIST_HEAD(slow_subchannels_head);
@@ -315,7 +317,7 @@ css_trigger_slow_path(void)
if (need_rescan) {
need_rescan = 0;
- css_rescan_devices();
+ for_each_subchannel(css_rescan_devices, NULL);
return;
}
@@ -354,23 +356,31 @@ css_reiterate_subchannels(void)
* Called from the machine check handler for subchannel report words.
*/
int
-css_process_crw(int irq)
+css_process_crw(int rsid1, int rsid2)
{
int ret;
+ struct subchannel_id mchk_schid;
- CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq);
+ CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
+ rsid1, rsid2);
if (need_rescan)
/* We need to iterate all subchannels anyway. */
return -EAGAIN;
+
+ init_subchannel_id(&mchk_schid);
+ mchk_schid.sch_no = rsid1;
+ if (rsid2 != 0)
+ mchk_schid.ssid = (rsid2 >> 8) & 3;
+
/*
* Since we are always presented with IPI in the CRW, we have to
* use stsch() to find out if the subchannel in question has come
* or gone.
*/
- ret = css_evaluate_subchannel(irq, 0);
+ ret = css_evaluate_subchannel(mchk_schid, 0);
if (ret == -EAGAIN) {
- if (css_enqueue_subchannel_slow(irq)) {
+ if (css_enqueue_subchannel_slow(mchk_schid)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
}
@@ -378,22 +388,83 @@ css_process_crw(int irq)
return ret;
}
-static void __init
-css_generate_pgid(void)
+static int __init
+__init_channel_subsystem(struct subchannel_id schid, void *data)
{
- /* Let's build our path group ID here. */
- if (css_characteristics_avail && css_general_characteristics.mcss)
- global_pgid.cpu_addr = 0x8000;
+ struct subchannel *sch;
+ int ret;
+
+ if (cio_is_console(schid))
+ sch = cio_get_console_subchannel();
else {
+ sch = css_alloc_subchannel(schid);
+ if (IS_ERR(sch))
+ ret = PTR_ERR(sch);
+ else
+ ret = 0;
+ switch (ret) {
+ case 0:
+ break;
+ case -ENOMEM:
+ panic("Out of memory in init_channel_subsystem\n");
+ /* -ENXIO: no more subchannels. */
+ case -ENXIO:
+ return ret;
+ default:
+ return 0;
+ }
+ }
+ /*
+ * We register ALL valid subchannels in ioinfo, even those
+ * that have been present before init_channel_subsystem.
+ * These subchannels can't have been registered yet (kmalloc
+ * not working) so we do it now. This is true e.g. for the
+ * console subchannel.
+ */
+ css_register_subchannel(sch);
+ return 0;
+}
+
+static void __init
+css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
+{
+ if (css_characteristics_avail && css_general_characteristics.mcss) {
+ css->global_pgid.pgid_high.ext_cssid.version = 0x80;
+ css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
+ } else {
#ifdef CONFIG_SMP
- global_pgid.cpu_addr = hard_smp_processor_id();
+ css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
#else
- global_pgid.cpu_addr = 0;
+ css->global_pgid.pgid_high.cpu_addr = 0;
#endif
}
- global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
- global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
- global_pgid.tod_high = (__u32) (get_clock() >> 32);
+ css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
+ css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
+ css->global_pgid.tod_high = tod_high;
+
+}
+
+static void
+channel_subsystem_release(struct device *dev)
+{
+ struct channel_subsystem *css;
+
+ css = to_css(dev);
+ kfree(css);
+}
+
+static inline void __init
+setup_css(int nr)
+{
+ u32 tod_high;
+
+ memset(css[nr], 0, sizeof(struct channel_subsystem));
+ css[nr]->valid = 1;
+ css[nr]->cssid = nr;
+ sprintf(css[nr]->device.bus_id, "css%x", nr);
+ css[nr]->device.release = channel_subsystem_release;
+ tod_high = (u32) (get_clock() >> 32);
+ css_generate_pgid(css[nr], tod_high);
}
/*
@@ -404,53 +475,50 @@ css_generate_pgid(void)
static int __init
init_channel_subsystem (void)
{
- int ret, irq;
+ int ret, i;
if (chsc_determine_css_characteristics() == 0)
css_characteristics_avail = 1;
- css_generate_pgid();
-
if ((ret = bus_register(&css_bus_type)))
goto out;
- if ((ret = device_register (&css_bus_device)))
- goto out_bus;
+ /* Try to enable MSS. */
+ ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
+ switch (ret) {
+ case 0: /* Success. */
+ max_ssid = __MAX_SSID;
+ break;
+ case -ENOMEM:
+ goto out_bus;
+ default:
+ max_ssid = 0;
+ }
+ /* Setup css structure. */
+ for (i = 0; i <= __MAX_CSSID; i++) {
+ css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
+ if (!css[i]) {
+ ret = -ENOMEM;
+ goto out_unregister;
+ }
+ setup_css(i);
+ ret = device_register(&css[i]->device);
+ if (ret)
+ goto out_free;
+ }
css_init_done = 1;
ctl_set_bit(6, 28);
- for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
- struct subchannel *sch;
-
- if (cio_is_console(irq))
- sch = cio_get_console_subchannel();
- else {
- sch = css_alloc_subchannel(irq);
- if (IS_ERR(sch))
- ret = PTR_ERR(sch);
- else
- ret = 0;
- if (ret == -ENOMEM)
- panic("Out of memory in "
- "init_channel_subsystem\n");
- /* -ENXIO: no more subchannels. */
- if (ret == -ENXIO)
- break;
- if (ret)
- continue;
- }
- /*
- * We register ALL valid subchannels in ioinfo, even those
- * that have been present before init_channel_subsystem.
- * These subchannels can't have been registered yet (kmalloc
- * not working) so we do it now. This is true e.g. for the
- * console subchannel.
- */
- css_register_subchannel(sch);
- }
+ for_each_subchannel(__init_channel_subsystem, NULL);
return 0;
-
+out_free:
+ kfree(css[i]);
+out_unregister:
+ while (i > 0) {
+ i--;
+ device_unregister(&css[i]->device);
+ }
out_bus:
bus_unregister(&css_bus_type);
out:
@@ -481,47 +549,8 @@ struct bus_type css_bus_type = {
subsys_initcall(init_channel_subsystem);
-/*
- * Register root devices for some drivers. The release function must not be
- * in the device drivers, so we do it here.
- */
-static void
-s390_root_dev_release(struct device *dev)
-{
- kfree(dev);
-}
-
-struct device *
-s390_root_dev_register(const char *name)
-{
- struct device *dev;
- int ret;
-
- if (!strlen(name))
- return ERR_PTR(-EINVAL);
- dev = kmalloc(sizeof(struct device), GFP_KERNEL);
- if (!dev)
- return ERR_PTR(-ENOMEM);
- memset(dev, 0, sizeof(struct device));
- strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
- dev->release = s390_root_dev_release;
- ret = device_register(dev);
- if (ret) {
- kfree(dev);
- return ERR_PTR(ret);
- }
- return dev;
-}
-
-void
-s390_root_dev_unregister(struct device *dev)
-{
- if (dev)
- device_unregister(dev);
-}
-
int
-css_enqueue_subchannel_slow(unsigned long schid)
+css_enqueue_subchannel_slow(struct subchannel_id schid)
{
struct slow_subchannel *new_slow_sch;
unsigned long flags;
@@ -564,6 +593,4 @@ css_slow_subchannels_exist(void)
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(css_bus_type);
-EXPORT_SYMBOL(s390_root_dev_register);
-EXPORT_SYMBOL(s390_root_dev_unregister);
EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 2004a6c4938..251ebd7a7d3 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -6,6 +6,8 @@
#include <asm/cio.h>
+#include "schid.h"
+
/*
* path grouping stuff
*/
@@ -33,19 +35,25 @@ struct path_state {
__u8 resvd : 3; /* reserved */
} __attribute__ ((packed));
+struct extended_cssid {
+ u8 version;
+ u8 cssid;
+} __attribute__ ((packed));
+
struct pgid {
union {
__u8 fc; /* SPID function code */
struct path_state ps; /* SNID path state */
} inf;
- __u32 cpu_addr : 16; /* CPU address */
+ union {
+ __u32 cpu_addr : 16; /* CPU address */
+ struct extended_cssid ext_cssid;
+ } pgid_high;
__u32 cpu_id : 24; /* CPU identification */
__u32 cpu_model : 16; /* CPU model */
__u32 tod_high; /* high word TOD clock */
} __attribute__ ((packed));
-extern struct pgid global_pgid;
-
#define MAX_CIWS 8
/*
@@ -68,7 +76,8 @@ struct ccw_device_private {
atomic_t onoff;
unsigned long registered;
__u16 devno; /* device number */
- __u16 irq; /* subchannel number */
+ __u16 sch_no; /* subchannel number */
+ __u8 ssid; /* subchannel set id */
__u8 imask; /* lpm mask for SNID/SID/SPGID */
int iretry; /* retry counter SNID/SID/SPGID */
struct {
@@ -121,15 +130,27 @@ struct css_driver {
extern struct bus_type css_bus_type;
extern struct css_driver io_subchannel_driver;
-int css_probe_device(int irq);
-extern struct subchannel * get_subchannel_by_schid(int irq);
-extern unsigned int highest_subchannel;
+extern int css_probe_device(struct subchannel_id);
+extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
extern int css_init_done;
-
-#define __MAX_SUBCHANNELS 65536
+extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
+
+#define __MAX_SUBCHANNEL 65535
+#define __MAX_SSID 3
+#define __MAX_CHPID 255
+#define __MAX_CSSID 0
+
+struct channel_subsystem {
+ u8 cssid;
+ int valid;
+ struct channel_path *chps[__MAX_CHPID];
+ struct device device;
+ struct pgid global_pgid;
+};
+#define to_css(dev) container_of(dev, struct channel_subsystem, device)
extern struct bus_type css_bus_type;
-extern struct device css_bus_device;
+extern struct channel_subsystem *css[];
/* Some helper functions for disconnected state. */
int device_is_disconnected(struct subchannel *);
@@ -144,7 +165,7 @@ void device_set_waiting(struct subchannel *);
void device_kill_pending_timer(struct subchannel *);
/* Helper functions to build lists for the slow path. */
-int css_enqueue_subchannel_slow(unsigned long schid);
+extern int css_enqueue_subchannel_slow(struct subchannel_id schid);
void css_walk_subchannel_slow_list(void (*fn)(unsigned long));
void css_clear_subchannel_slow_list(void);
int css_slow_subchannels_exist(void);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 85908cacc3b..fa3e4c0a253 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/device.c
* bus driver for ccw devices
- * $Revision: 1.131 $
+ * $Revision: 1.137 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -374,7 +374,7 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
int i, force, ret;
char *tmp;
- if (atomic_compare_and_swap(0, 1, &cdev->private->onoff))
+ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
return -EAGAIN;
if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -535,7 +535,8 @@ ccw_device_register(struct ccw_device *cdev)
}
struct match_data {
- unsigned int devno;
+ unsigned int devno;
+ unsigned int ssid;
struct ccw_device * sibling;
};
@@ -548,6 +549,7 @@ match_devno(struct device * dev, void * data)
cdev = to_ccwdev(dev);
if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
(cdev->private->devno == d->devno) &&
+ (cdev->private->ssid == d->ssid) &&
(cdev != d->sibling)) {
cdev->private->state = DEV_STATE_NOT_OPER;
return 1;
@@ -556,11 +558,13 @@ match_devno(struct device * dev, void * data)
}
static struct ccw_device *
-get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling)
+get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid,
+ struct ccw_device *sibling)
{
struct device *dev;
struct match_data data = {
- .devno = devno,
+ .devno = devno,
+ .ssid = ssid,
.sibling = sibling,
};
@@ -616,13 +620,13 @@ ccw_device_do_unreg_rereg(void *data)
need_rename = 1;
other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev,
- cdev);
+ sch->schid.ssid, cdev);
if (other_cdev) {
struct subchannel *other_sch;
other_sch = to_subchannel(other_cdev->dev.parent);
if (get_device(&other_sch->dev)) {
- stsch(other_sch->irq, &other_sch->schib);
+ stsch(other_sch->schid, &other_sch->schib);
if (other_sch->schib.pmcw.dnv) {
other_sch->schib.pmcw.intparm = 0;
cio_modify(other_sch);
@@ -639,8 +643,8 @@ ccw_device_do_unreg_rereg(void *data)
if (test_and_clear_bit(1, &cdev->private->registered))
device_del(&cdev->dev);
if (need_rename)
- snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x",
- sch->schib.pmcw.dev);
+ snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
+ sch->schid.ssid, sch->schib.pmcw.dev);
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_add_changed, (void *)cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
@@ -769,18 +773,20 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
sch->dev.driver_data = cdev;
sch->driver = &io_subchannel_driver;
cdev->ccwlock = &sch->lock;
+
/* Init private data. */
priv = cdev->private;
priv->devno = sch->schib.pmcw.dev;
- priv->irq = sch->irq;
+ priv->ssid = sch->schid.ssid;
+ priv->sch_no = sch->schid.sch_no;
priv->state = DEV_STATE_NOT_OPER;
INIT_LIST_HEAD(&priv->cmb_list);
init_waitqueue_head(&priv->wait_q);
init_timer(&priv->timer);
/* Set an initial name for the device. */
- snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x",
- sch->schib.pmcw.dev);
+ snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
+ sch->schid.ssid, sch->schib.pmcw.dev);
/* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
@@ -951,7 +957,7 @@ io_subchannel_shutdown(struct device *dev)
sch = to_subchannel(dev);
cdev = dev->driver_data;
- if (cio_is_console(sch->irq))
+ if (cio_is_console(sch->schid))
return;
if (!sch->schib.pmcw.ena)
/* Nothing to do. */
@@ -986,10 +992,6 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
cdev->dev = (struct device) {
.parent = &sch->dev,
};
- /* Initialize the subchannel structure */
- sch->dev.parent = &css_bus_device;
- sch->dev.bus = &css_bus_type;
-
rc = io_subchannel_recog(cdev, sch);
if (rc)
return rc;
@@ -1146,6 +1148,16 @@ ccw_driver_unregister (struct ccw_driver *cdriver)
driver_unregister(&cdriver->driver);
}
+/* Helper func for qdio. */
+struct subchannel_id
+ccw_device_get_subchannel_id(struct ccw_device *cdev)
+{
+ struct subchannel *sch;
+
+ sch = to_subchannel(cdev->dev.parent);
+ return sch->schid;
+}
+
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccw_device_set_online);
EXPORT_SYMBOL(ccw_device_set_offline);
@@ -1155,3 +1167,4 @@ EXPORT_SYMBOL(get_ccwdev_by_busid);
EXPORT_SYMBOL(ccw_bus_type);
EXPORT_SYMBOL(ccw_device_work);
EXPORT_SYMBOL(ccw_device_notify_work);
+EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index a3aa056d724..11587ebb728 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -110,6 +110,7 @@ int ccw_device_stlck(struct ccw_device *);
/* qdio needs this. */
void ccw_device_set_timeout(struct ccw_device *, int);
+extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
void retry_set_schib(struct ccw_device *cdev);
#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c1c89f4fd4e..23d12b65e5f 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -133,7 +133,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
int ret;
sch = to_subchannel(cdev->dev.parent);
- ret = stsch(sch->irq, &sch->schib);
+ ret = stsch(sch->schid, &sch->schib);
if (ret || !sch->schib.pmcw.dnv)
return -ENODEV;
if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
@@ -231,7 +231,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
* through ssch() and the path information is up to date.
*/
old_lpm = sch->lpm;
- stsch(sch->irq, &sch->schib);
+ stsch(sch->schid, &sch->schib);
sch->lpm = sch->schib.pmcw.pim &
sch->schib.pmcw.pam &
sch->schib.pmcw.pom &
@@ -257,8 +257,9 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
switch (state) {
case DEV_STATE_NOT_OPER:
CIO_DEBUG(KERN_WARNING, 2,
- "SenseID : unknown device %04x on subchannel %04x\n",
- cdev->private->devno, sch->irq);
+ "SenseID : unknown device %04x on subchannel "
+ "0.%x.%04x\n", cdev->private->devno,
+ sch->schid.ssid, sch->schid.sch_no);
break;
case DEV_STATE_OFFLINE:
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
@@ -282,16 +283,18 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
return;
}
/* Issue device info message. */
- CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
+ CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
"CU Type/Mod = %04X/%02X, Dev Type/Mod = "
- "%04X/%02X\n", cdev->private->devno,
+ "%04X/%02X\n",
+ cdev->private->ssid, cdev->private->devno,
cdev->id.cu_type, cdev->id.cu_model,
cdev->id.dev_type, cdev->id.dev_model);
break;
case DEV_STATE_BOXED:
CIO_DEBUG(KERN_WARNING, 2,
- "SenseID : boxed device %04x on subchannel %04x\n",
- cdev->private->devno, sch->irq);
+ "SenseID : boxed device %04x on subchannel "
+ "0.%x.%04x\n", cdev->private->devno,
+ sch->schid.ssid, sch->schid.sch_no);
break;
}
cdev->private->state = state;
@@ -359,7 +362,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (state == DEV_STATE_BOXED)
CIO_DEBUG(KERN_WARNING, 2,
"Boxed device %04x on subchannel %04x\n",
- cdev->private->devno, sch->irq);
+ cdev->private->devno, sch->schid.sch_no);
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
@@ -592,7 +595,7 @@ ccw_device_offline(struct ccw_device *cdev)
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
- if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv)
+ if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE) {
if (sch->schib.scsw.actl != 0)
@@ -711,7 +714,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
* Since we might not just be coming from an interrupt from the
* subchannel we have to update the schib.
*/
- stsch(sch->irq, &sch->schib);
+ stsch(sch->schid, &sch->schib);
if (sch->schib.scsw.actl != 0 ||
(cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
@@ -923,7 +926,7 @@ ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* Iff device is idle, reset timeout. */
sch = to_subchannel(cdev->dev.parent);
- if (!stsch(sch->irq, &sch->schib))
+ if (!stsch(sch->schid, &sch->schib))
if (sch->schib.scsw.actl == 0)
ccw_device_set_timeout(cdev, 0);
/* Call the handler. */
@@ -1035,7 +1038,7 @@ device_trigger_reprobe(struct subchannel *sch)
return;
/* Update some values. */
- if (stsch(sch->irq, &sch->schib))
+ if (stsch(sch->schid, &sch->schib))
return;
/*
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 0e68fb511dc..04ceba343db 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -27,7 +27,7 @@
/*
* diag210 is used under VM to get information about a virtual device
*/
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
int
diag210(struct diag210 * addr)
{
@@ -256,16 +256,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
* sense id information. So, for intervention required,
* we use the "whack it until it talks" strategy...
*/
- CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x "
- "reports cmd reject\n",
- cdev->private->devno, sch->irq);
+ CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel "
+ "0.%x.%04x reports cmd reject\n",
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no);
return -EOPNOTSUPP;
}
if (irb->esw.esw0.erw.cons) {
- CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, "
+ CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, "
"lpum %02X, cnt %02d, sns :"
" %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->devno,
+ cdev->private->ssid, cdev->private->devno,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
@@ -277,16 +278,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
if (irb->scsw.cc == 3) {
if ((sch->orb.lpm &
sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
- CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on"
- " subchannel %04x is 'not operational'\n",
- sch->orb.lpm, cdev->private->devno,
- sch->irq);
+ CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x "
+ "on subchannel 0.%x.%04x is "
+ "'not operational'\n", sch->orb.lpm,
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no);
return -EACCES;
}
/* Hmm, whatever happened, try again. */
CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
- "subchannel %04x returns status %02X%02X\n",
- cdev->private->devno, sch->irq,
+ "subchannel 0.%x.%04x returns status %02X%02X\n",
+ cdev->private->devno, sch->schid.ssid, sch->schid.sch_no,
irb->scsw.dstat, irb->scsw.cstat);
return -EAGAIN;
}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 85a3026e690..143b6c25a4e 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -1,7 +1,7 @@
/*
* drivers/s390/cio/device_ops.c
*
- * $Revision: 1.57 $
+ * $Revision: 1.58 $
*
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
* IBM Corporation
@@ -570,7 +570,7 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
int
_ccw_device_get_subchannel_number(struct ccw_device *cdev)
{
- return cdev->private->irq;
+ return cdev->private->sch_no;
}
int
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 0adac8a6733..052832d03d3 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -22,6 +22,7 @@
#include "cio_debug.h"
#include "css.h"
#include "device.h"
+#include "ioasm.h"
/*
* Start Sense Path Group ID helper function. Used in ccw_device_recog
@@ -56,10 +57,10 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
if (ret != -EACCES)
return ret;
CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
- "%04x, lpm %02X, became 'not "
+ "0.%x.%04x, lpm %02X, became 'not "
"operational'\n",
- cdev->private->devno, sch->irq,
- cdev->private->imask);
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no, cdev->private->imask);
}
cdev->private->imask >>= 1;
@@ -105,10 +106,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
return -EOPNOTSUPP;
}
if (irb->esw.esw0.erw.cons) {
- CIO_MSG_EVENT(2, "SNID - device %04x, unit check, "
+ CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
"lpum %02X, cnt %02d, sns : "
"%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
- cdev->private->devno,
+ cdev->private->ssid, cdev->private->devno,
irb->esw.esw0.sublog.lpum,
irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
@@ -118,15 +119,17 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
- CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
- "%04x, lpm %02X, became 'not operational'\n",
- cdev->private->devno, sch->irq, sch->orb.lpm);
+ CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x,"
+ " lpm %02X, became 'not operational'\n",
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no, sch->orb.lpm);
return -EACCES;
}
if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
- CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x "
+ CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
"is reserved by someone else\n",
- cdev->private->devno, sch->irq);
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no);
return -EUSERS;
}
return 0;
@@ -162,7 +165,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
/* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
case 0: /* Sense Path Group ID successful. */
if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET)
- memcpy(&cdev->private->pgid, &global_pgid,
+ memcpy(&cdev->private->pgid, &css[0]->global_pgid,
sizeof(struct pgid));
ccw_device_sense_pgid_done(cdev, 0);
break;
@@ -235,8 +238,9 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
sch->lpm &= ~cdev->private->imask;
sch->vpm &= ~cdev->private->imask;
CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
- "%04x, lpm %02X, became 'not operational'\n",
- cdev->private->devno, sch->irq, cdev->private->imask);
+ "0.%x.%04x, lpm %02X, became 'not operational'\n",
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no, cdev->private->imask);
return ret;
}
@@ -258,8 +262,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
if (irb->ecw[0] & SNS0_CMD_REJECT)
return -EOPNOTSUPP;
/* Hmm, whatever happened, try again. */
- CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, "
+ CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
+ "cnt %02d, "
"sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
+ cdev->private->ssid,
cdev->private->devno, irb->esw.esw0.erw.scnt,
irb->ecw[0], irb->ecw[1],
irb->ecw[2], irb->ecw[3],
@@ -268,10 +274,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
return -EAGAIN;
}
if (irb->scsw.cc == 3) {
- CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
- "%04x, lpm %02X, became 'not operational'\n",
- cdev->private->devno, sch->irq,
- cdev->private->imask);
+ CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x,"
+ " lpm %02X, became 'not operational'\n",
+ cdev->private->devno, sch->schid.ssid,
+ sch->schid.sch_no, cdev->private->imask);
return -EACCES;
}
return 0;
@@ -364,8 +370,22 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
void
ccw_device_verify_start(struct ccw_device *cdev)
{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+
cdev->private->flags.pgid_single = 0;
cdev->private->iretry = 5;
+ /*
+ * Update sch->lpm with current values to catch paths becoming
+ * available again.
+ */
+ if (stsch(sch->schid, &sch->schib)) {
+ ccw_device_verify_done(cdev, -ENODEV);
+ return;
+ }
+ sch->lpm = sch->schib.pmcw.pim &
+ sch->schib.pmcw.pam &
+ sch->schib.pmcw.pom &
+ sch->opm;
__ccw_device_verify_start(cdev);
}
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 12a24d4331a..db09c209098 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -36,15 +36,16 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
"received"
- " ... device %04X on subchannel %04X, dev_stat "
+ " ... device %04x on subchannel 0.%x.%04x, dev_stat "
": %02X sch_stat : %02X\n",
- cdev->private->devno, cdev->private->irq,
+ cdev->private->devno, cdev->private->ssid,
+ cdev->private->sch_no,
irb->scsw.dstat, irb->scsw.cstat);
if (irb->scsw.cc != 3) {
char dbf_text[15];
- sprintf(dbf_text, "chk%x", cdev->private->irq);
+ sprintf(dbf_text, "chk%x", cdev->private->sch_no);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, irb, sizeof (struct irb));
}
@@ -59,10 +60,11 @@ ccw_device_path_notoper(struct ccw_device *cdev)
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
- stsch (sch->irq, &sch->schib);
+ stsch (sch->schid, &sch->schib);
- CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are "
- "not operational \n", __FUNCTION__, sch->irq,
+ CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
+ "not operational \n", __FUNCTION__,
+ sch->schid.ssid, sch->schid.sch_no,
sch->schib.pmcw.pnom);
sch->lpm &= ~sch->schib.pmcw.pnom;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 45480a2bc4c..95a9462f9a9 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -1,12 +1,13 @@
#ifndef S390_CIO_IOASM_H
#define S390_CIO_IOASM_H
+#include "schid.h"
+
/*
* TPI info structure
*/
struct tpi_info {
- __u32 reserved1 : 16; /* reserved 0x00000001 */
- __u32 irq : 16; /* aka. subchannel number */
+ struct subchannel_id schid;
__u32 intparm; /* interruption parameter */
__u32 adapter_IO : 1;
__u32 reserved2 : 1;
@@ -21,7 +22,8 @@ struct tpi_info {
* Some S390 specific IO instructions as inline
*/
-static inline int stsch(int irq, volatile struct schib *addr)
+static inline int stsch(struct subchannel_id schid,
+ volatile struct schib *addr)
{
int ccode;
@@ -31,12 +33,42 @@ static inline int stsch(int irq, volatile struct schib *addr)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000), "a" (addr)
+ : "d" (schid), "a" (addr), "m" (*addr)
+ : "cc", "1" );
+ return ccode;
+}
+
+static inline int stsch_err(struct subchannel_id schid,
+ volatile struct schib *addr)
+{
+ int ccode;
+
+ __asm__ __volatile__(
+ " lhi %0,%3\n"
+ " lr 1,%1\n"
+ " stsch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+#ifdef CONFIG_64BIT
+ ".section __ex_table,\"a\"\n"
+ " .align 8\n"
+ " .quad 0b,1b\n"
+ ".previous"
+#else
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 0b,1b\n"
+ ".previous"
+#endif
+ : "=&d" (ccode)
+ : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
: "cc", "1" );
return ccode;
}
-static inline int msch(int irq, volatile struct schib *addr)
+static inline int msch(struct subchannel_id schid,
+ volatile struct schib *addr)
{
int ccode;
@@ -46,12 +78,13 @@ static inline int msch(int irq, volatile struct schib *addr)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L), "a" (addr)
+ : "d" (schid), "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode;
}
-static inline int msch_err(int irq, volatile struct schib *addr)
+static inline int msch_err(struct subchannel_id schid,
+ volatile struct schib *addr)
{
int ccode;
@@ -62,7 +95,7 @@ static inline int msch_err(int irq, volatile struct schib *addr)
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,1b\n"
@@ -74,12 +107,13 @@ static inline int msch_err(int irq, volatile struct schib *addr)
".previous"
#endif
: "=&d" (ccode)
- : "d" (irq | 0x10000L), "a" (addr), "K" (-EIO)
+ : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
: "cc", "1" );
return ccode;
}
-static inline int tsch(int irq, volatile struct irb *addr)
+static inline int tsch(struct subchannel_id schid,
+ volatile struct irb *addr)
{
int ccode;
@@ -89,7 +123,7 @@ static inline int tsch(int irq, volatile struct irb *addr)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L), "a" (addr)
+ : "d" (schid), "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode;
}
@@ -103,12 +137,13 @@ static inline int tpi( volatile struct tpi_info *addr)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "a" (addr)
+ : "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode;
}
-static inline int ssch(int irq, volatile struct orb *addr)
+static inline int ssch(struct subchannel_id schid,
+ volatile struct orb *addr)
{
int ccode;
@@ -118,12 +153,12 @@ static inline int ssch(int irq, volatile struct orb *addr)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L), "a" (addr)
+ : "d" (schid), "a" (addr), "m" (*addr)
: "cc", "1" );
return ccode;
}
-static inline int rsch(int irq)
+static inline int rsch(struct subchannel_id schid)
{
int ccode;
@@ -133,12 +168,12 @@ static inline int rsch(int irq)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L)
+ : "d" (schid)
: "cc", "1" );
return ccode;
}
-static inline int csch(int irq)
+static inline int csch(struct subchannel_id schid)
{
int ccode;
@@ -148,12 +183,12 @@ static inline int csch(int irq)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L)
+ : "d" (schid)
: "cc", "1" );
return ccode;
}
-static inline int hsch(int irq)
+static inline int hsch(struct subchannel_id schid)
{
int ccode;
@@ -163,12 +198,12 @@ static inline int hsch(int irq)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L)
+ : "d" (schid)
: "cc", "1" );
return ccode;
}
-static inline int xsch(int irq)
+static inline int xsch(struct subchannel_id schid)
{
int ccode;
@@ -178,21 +213,22 @@ static inline int xsch(int irq)
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
- : "d" (irq | 0x10000L)
+ : "d" (schid)
: "cc", "1" );
return ccode;
}
static inline int chsc(void *chsc_area)
{
+ typedef struct { char _[4096]; } addr_type;
int cc;
__asm__ __volatile__ (
- ".insn rre,0xb25f0000,%1,0 \n\t"
+ ".insn rre,0xb25f0000,%2,0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
- : "=d" (cc)
- : "d" (chsc_area)
+ : "=d" (cc), "=m" (*(addr_type *) chsc_area)
+ : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
: "cc" );
return cc;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index eb39218b925..30a836ffc31 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -56,7 +56,7 @@
#include "ioasm.h"
#include "chsc.h"
-#define VERSION_QDIO_C "$Revision: 1.108 $"
+#define VERSION_QDIO_C "$Revision: 1.114 $"
/****************** MODULE PARAMETER VARIABLES ********************/
MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
@@ -76,6 +76,7 @@ static struct qdio_perf_stats perf_stats;
#endif /* QDIO_PERFORMANCE_STATS */
static int hydra_thinints;
+static int is_passthrough = 0;
static int omit_svs;
static int indicator_used[INDICATORS_PER_CACHELINE];
@@ -136,12 +137,126 @@ qdio_release_q(struct qdio_q *q)
atomic_dec(&q->use_count);
}
-static volatile inline void
-qdio_set_slsb(volatile char *slsb, unsigned char value)
+/*check ccq */
+static inline int
+qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
+{
+ char dbf_text[15];
+
+ if (ccq == 0 || ccq == 32 || ccq == 96)
+ return 0;
+ if (ccq == 97)
+ return 1;
+ /*notify devices immediately*/
+ sprintf(dbf_text,"%d", ccq);
+ QDIO_DBF_TEXT2(1,trace,dbf_text);
+ return -EIO;
+}
+/* EQBS: extract buffer states */
+static inline int
+qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
+ unsigned int *start, unsigned int *cnt)
+{
+ struct qdio_irq *irq;
+ unsigned int tmp_cnt, q_no, ccq;
+ int rc ;
+ char dbf_text[15];
+
+ ccq = 0;
+ tmp_cnt = *cnt;
+ irq = (struct qdio_irq*)q->irq_ptr;
+ q_no = q->q_no;
+ if(!q->is_input_q)
+ q_no += irq->no_input_qs;
+ ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
+ rc = qdio_check_ccq(q, ccq);
+ if (rc < 0) {
+ QDIO_DBF_TEXT2(1,trace,"eqberr");
+ sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
+ QDIO_DBF_TEXT2(1,trace,dbf_text);
+ q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
+ QDIO_STATUS_LOOK_FOR_ERROR,
+ 0, 0, 0, -1, -1, q->int_parm);
+ return 0;
+ }
+ return (tmp_cnt - *cnt);
+}
+
+/* SQBS: set buffer states */
+static inline int
+qdio_do_sqbs(struct qdio_q *q, unsigned char state,
+ unsigned int *start, unsigned int *cnt)
{
- xchg((char*)slsb,value);
+ struct qdio_irq *irq;
+ unsigned int tmp_cnt, q_no, ccq;
+ int rc;
+ char dbf_text[15];
+
+ ccq = 0;
+ tmp_cnt = *cnt;
+ irq = (struct qdio_irq*)q->irq_ptr;
+ q_no = q->q_no;
+ if(!q->is_input_q)
+ q_no += irq->no_input_qs;
+ ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
+ rc = qdio_check_ccq(q, ccq);
+ if (rc < 0) {
+ QDIO_DBF_TEXT3(1,trace,"sqberr");
+ sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no);
+ QDIO_DBF_TEXT3(1,trace,dbf_text);
+ q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
+ QDIO_STATUS_LOOK_FOR_ERROR,
+ 0, 0, 0, -1, -1, q->int_parm);
+ return 0;
+ }
+ return (tmp_cnt - *cnt);
}
+static inline int
+qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
+ unsigned char state, unsigned int *count)
+{
+ volatile char *slsb;
+ struct qdio_irq *irq;
+
+ irq = (struct qdio_irq*)q->irq_ptr;
+ if (!irq->is_qebsm) {
+ slsb = (char *)&q->slsb.acc.val[(*bufno)];
+ xchg(slsb, state);
+ return 1;
+ }
+ return qdio_do_sqbs(q, state, bufno, count);
+}
+
+#ifdef CONFIG_QDIO_DEBUG
+static inline void
+qdio_trace_slsb(struct qdio_q *q)
+{
+ if (q->queue_type==QDIO_TRACE_QTYPE) {
+ if (q->is_input_q)
+ QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
+ QDIO_MAX_BUFFERS_PER_Q);
+ else
+ QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
+ QDIO_MAX_BUFFERS_PER_Q);
+ }
+}
+#endif
+
+static inline int
+set_slsb(struct qdio_q *q, unsigned int *bufno,
+ unsigned char state, unsigned int *count)
+{
+ int rc;
+#ifdef CONFIG_QDIO_DEBUG
+ qdio_trace_slsb(q);
+#endif
+ rc = qdio_set_slsb(q, bufno, state, count);
+#ifdef CONFIG_QDIO_DEBUG
+ qdio_trace_slsb(q);
+#endif
+ return rc;
+}
static inline int
qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
unsigned int gpr3)
@@ -155,7 +270,7 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
perf_stats.siga_syncs++;
#endif /* QDIO_PERFORMANCE_STATS */
- cc = do_siga_sync(q->irq, gpr2, gpr3);
+ cc = do_siga_sync(q->schid, gpr2, gpr3);
if (cc)
QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
@@ -170,6 +285,23 @@ qdio_siga_sync_q(struct qdio_q *q)
return qdio_siga_sync(q, q->mask, 0);
}
+static int
+__do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
+{
+ struct qdio_irq *irq;
+ unsigned int fc = 0;
+ unsigned long schid;
+
+ irq = (struct qdio_irq *) q->irq_ptr;
+ if (!irq->is_qebsm)
+ schid = *((u32 *)&q->schid);
+ else {
+ schid = irq->sch_token;
+ fc |= 0x80;
+ }
+ return do_siga_output(schid, q->mask, busy_bit, fc);
+}
+
/*
* returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
* an access exception
@@ -189,7 +321,7 @@ qdio_siga_output(struct qdio_q *q)
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
for (;;) {
- cc = do_siga_output(q->irq, q->mask, &busy_bit);
+ cc = __do_siga_output(q, &busy_bit);
//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
if (!start_time)
@@ -221,7 +353,7 @@ qdio_siga_input(struct qdio_q *q)
perf_stats.siga_ins++;
#endif /* QDIO_PERFORMANCE_STATS */
- cc = do_siga_input(q->irq, q->mask);
+ cc = do_siga_input(q->schid, q->mask);
if (cc)
QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
@@ -230,7 +362,7 @@ qdio_siga_input(struct qdio_q *q)
}
/* locked by the locks in qdio_activate and qdio_cleanup */
-static __u32 volatile *
+static __u32 *
qdio_get_indicator(void)
{
int i;
@@ -258,7 +390,7 @@ qdio_put_indicator(__u32 *addr)
atomic_dec(&spare_indicator_usecount);
}
-static inline volatile void
+static inline void
tiqdio_clear_summary_bit(__u32 *location)
{
QDIO_DBF_TEXT5(0,trace,"clrsummb");
@@ -267,7 +399,7 @@ tiqdio_clear_summary_bit(__u32 *location)
xchg(location,0);
}
-static inline volatile void
+static inline void
tiqdio_set_summary_bit(__u32 *location)
{
QDIO_DBF_TEXT5(0,trace,"setsummb");
@@ -336,7 +468,9 @@ static inline int
qdio_stop_polling(struct qdio_q *q)
{
#ifdef QDIO_USE_PROCESSING_STATE
- int gsf;
+ unsigned int tmp, gsf, count = 1;
+ unsigned char state = 0;
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
if (!atomic_swap(&q->polling,0))
return 1;
@@ -348,17 +482,22 @@ qdio_stop_polling(struct qdio_q *q)
if (!q->is_input_q)
return 1;
- gsf=GET_SAVED_FRONTIER(q);
- set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)&
- (QDIO_MAX_BUFFERS_PER_Q-1)],
- SLSB_P_INPUT_NOT_INIT);
+ tmp = gsf = GET_SAVED_FRONTIER(q);
+ tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
+ set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
+
/*
* we don't issue this SYNC_MEMORY, as we trust Rick T and
* moreover will not use the PROCESSING state under VM, so
* q->polling was 0 anyway
*/
/*SYNC_MEMORY;*/
- if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED)
+ if (irq->is_qebsm) {
+ count = 1;
+ qdio_do_eqbs(q, &state, &gsf, &count);
+ } else
+ state = q->slsb.acc.val[gsf];
+ if (state != SLSB_P_INPUT_PRIMED)
return 1;
/*
* set our summary bit again, as otherwise there is a
@@ -431,18 +570,136 @@ tiqdio_clear_global_summary(void)
/************************* OUTBOUND ROUTINES *******************************/
+static int
+qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
+{
+ struct qdio_irq *irq;
+ unsigned char state;
+ unsigned int cnt, count, ftc;
+
+ irq = (struct qdio_irq *) q->irq_ptr;
+ if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
+ SYNC_MEMORY;
+
+ ftc = q->first_to_check;
+ count = qdio_min(atomic_read(&q->number_of_buffers_used),
+ (QDIO_MAX_BUFFERS_PER_Q-1));
+ if (count == 0)
+ return q->first_to_check;
+ cnt = qdio_do_eqbs(q, &state, &ftc, &count);
+ if (cnt == 0)
+ return q->first_to_check;
+ switch (state) {
+ case SLSB_P_OUTPUT_ERROR:
+ QDIO_DBF_TEXT3(0,trace,"outperr");
+ atomic_sub(cnt , &q->number_of_buffers_used);
+ if (q->qdio_error)
+ q->error_status_flags |=
+ QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
+ q->qdio_error = SLSB_P_OUTPUT_ERROR;
+ q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
+ q->first_to_check = ftc;
+ break;
+ case SLSB_P_OUTPUT_EMPTY:
+ QDIO_DBF_TEXT5(0,trace,"outpempt");
+ atomic_sub(cnt, &q->number_of_buffers_used);
+ q->first_to_check = ftc;
+ break;
+ case SLSB_CU_OUTPUT_PRIMED:
+ /* all buffers primed */
+ QDIO_DBF_TEXT5(0,trace,"outpprim");
+ break;
+ default:
+ break;
+ }
+ QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
+ return q->first_to_check;
+}
+
+static int
+qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
+{
+ struct qdio_irq *irq;
+ unsigned char state;
+ int tmp, ftc, count, cnt;
+ char dbf_text[15];
+
+
+ irq = (struct qdio_irq *) q->irq_ptr;
+ ftc = q->first_to_check;
+ count = qdio_min(atomic_read(&q->number_of_buffers_used),
+ (QDIO_MAX_BUFFERS_PER_Q-1));
+ if (count == 0)
+ return q->first_to_check;
+ cnt = qdio_do_eqbs(q, &state, &ftc, &count);
+ if (cnt == 0)
+ return q->first_to_check;
+ switch (state) {
+ case SLSB_P_INPUT_ERROR :
+#ifdef CONFIG_QDIO_DEBUG
+ QDIO_DBF_TEXT3(1,trace,"inperr");
+ sprintf(dbf_text,"%2x,%2x",ftc,count);
+ QDIO_DBF_TEXT3(1,trace,dbf_text);
+#endif /* CONFIG_QDIO_DEBUG */
+ if (q->qdio_error)
+ q->error_status_flags |=
+ QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
+ q->qdio_error = SLSB_P_INPUT_ERROR;
+ q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
+ atomic_sub(cnt, &q->number_of_buffers_used);
+ q->first_to_check = ftc;
+ break;
+ case SLSB_P_INPUT_PRIMED :
+ QDIO_DBF_TEXT3(0,trace,"inptprim");
+ sprintf(dbf_text,"%2x,%2x",ftc,count);
+ QDIO_DBF_TEXT3(1,trace,dbf_text);
+ tmp = 0;
+ ftc = q->first_to_check;
+#ifdef QDIO_USE_PROCESSING_STATE
+ if (cnt > 1) {
+ cnt -= 1;
+ tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
+ if (!tmp)
+ break;
+ }
+ cnt = 1;
+ tmp += set_slsb(q, &ftc,
+ SLSB_P_INPUT_PROCESSING, &cnt);
+ atomic_set(&q->polling, 1);
+#else
+ tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
+#endif
+ atomic_sub(tmp, &q->number_of_buffers_used);
+ q->first_to_check = ftc;
+ break;
+ case SLSB_CU_INPUT_EMPTY:
+ case SLSB_P_INPUT_NOT_INIT:
+ case SLSB_P_INPUT_PROCESSING:
+ QDIO_DBF_TEXT5(0,trace,"inpnipro");
+ break;
+ default:
+ break;
+ }
+ QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
+ return q->first_to_check;
+}
static inline int
qdio_get_outbound_buffer_frontier(struct qdio_q *q)
{
- int f,f_mod_no;
- volatile char *slsb;
- int first_not_to_check;
+ struct qdio_irq *irq;
+ volatile char *slsb;
+ unsigned int count = 1;
+ int first_not_to_check, f, f_mod_no;
char dbf_text[15];
QDIO_DBF_TEXT4(0,trace,"getobfro");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ irq = (struct qdio_irq *) q->irq_ptr;
+ if (irq->is_qebsm)
+ return qdio_qebsm_get_outbound_buffer_frontier(q);
+
slsb=&q->slsb.acc.val[0];
f_mod_no=f=q->first_to_check;
/*
@@ -484,7 +741,7 @@ check_next:
QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
/* kind of process the buffer */
- set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT);
+ set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
/*
* we increment the frontier, as this buffer
@@ -597,48 +854,48 @@ qdio_kick_outbound_q(struct qdio_q *q)
result=qdio_siga_output(q);
- switch (result) {
- case 0:
- /* went smooth this time, reset timestamp */
+ switch (result) {
+ case 0:
+ /* went smooth this time, reset timestamp */
#ifdef CONFIG_QDIO_DEBUG
- QDIO_DBF_TEXT3(0,trace,"cc2reslv");
- sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
- atomic_read(&q->busy_siga_counter));
- QDIO_DBF_TEXT3(0,trace,dbf_text);
+ QDIO_DBF_TEXT3(0,trace,"cc2reslv");
+ sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
+ atomic_read(&q->busy_siga_counter));
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
- q->timing.busy_start=0;
+ q->timing.busy_start=0;
+ break;
+ case (2|QDIO_SIGA_ERROR_B_BIT_SET):
+ /* cc=2 and busy bit: */
+ atomic_inc(&q->busy_siga_counter);
+
+ /* if the last siga was successful, save
+ * timestamp here */
+ if (!q->timing.busy_start)
+ q->timing.busy_start=NOW;
+
+ /* if we're in time, don't touch error_status_flags
+ * and siga_error */
+ if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
+ qdio_mark_q(q);
break;
- case (2|QDIO_SIGA_ERROR_B_BIT_SET):
- /* cc=2 and busy bit: */
- atomic_inc(&q->busy_siga_counter);
-
- /* if the last siga was successful, save
- * timestamp here */
- if (!q->timing.busy_start)
- q->timing.busy_start=NOW;
-
- /* if we're in time, don't touch error_status_flags
- * and siga_error */
- if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
- qdio_mark_q(q);
- break;
- }
- QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
+ }
+ QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
#ifdef CONFIG_QDIO_DEBUG
- sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no,
- atomic_read(&q->busy_siga_counter));
- QDIO_DBF_TEXT3(0,trace,dbf_text);
+ sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
+ atomic_read(&q->busy_siga_counter));
+ QDIO_DBF_TEXT3(0,trace,dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
- /* else fallthrough and report error */
- default:
- /* for plain cc=1, 2 or 3: */
- if (q->siga_error)
- q->error_status_flags|=
- QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
+ /* else fallthrough and report error */
+ default:
+ /* for plain cc=1, 2 or 3: */
+ if (q->siga_error)
q->error_status_flags|=
- QDIO_STATUS_LOOK_FOR_ERROR;
- q->siga_error=result;
- }
+ QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
+ q->error_status_flags|=
+ QDIO_STATUS_LOOK_FOR_ERROR;
+ q->siga_error=result;
+ }
}
static inline void
@@ -743,8 +1000,10 @@ qdio_outbound_processing(struct qdio_q *q)
static inline int
qdio_get_inbound_buffer_frontier(struct qdio_q *q)
{
+ struct qdio_irq *irq;
int f,f_mod_no;
volatile char *slsb;
+ unsigned int count = 1;
int first_not_to_check;
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
@@ -756,6 +1015,10 @@ qdio_get_inbound_buffer_frontier(struct qdio_q *q)
QDIO_DBF_TEXT4(0,trace,"getibfro");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
+ irq = (struct qdio_irq *) q->irq_ptr;
+ if (irq->is_qebsm)
+ return qdio_qebsm_get_inbound_buffer_frontier(q);
+
slsb=&q->slsb.acc.val[0];
f_mod_no=f=q->first_to_check;
/*
@@ -792,19 +1055,19 @@ check_next:
* kill VM in terms of CP overhead
*/
if (q->siga_sync) {
- set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
+ set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
} else {
/* set the previous buffer to NOT_INIT. The current
* buffer will be set to PROCESSING at the end of
* this function to avoid further interrupts. */
if (last_position>=0)
- set_slsb(&slsb[last_position],
- SLSB_P_INPUT_NOT_INIT);
+ set_slsb(q, &last_position,
+ SLSB_P_INPUT_NOT_INIT, &count);
atomic_set(&q->polling,1);
last_position=f_mod_no;
}
#else /* QDIO_USE_PROCESSING_STATE */
- set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
+ set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
#endif /* QDIO_USE_PROCESSING_STATE */
/*
* not needed, as the inbound queue will be synced on the next
@@ -829,7 +1092,7 @@ check_next:
QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
/* kind of process the buffer */
- set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT);
+ set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
if (q->qdio_error)
q->error_status_flags|=
@@ -857,7 +1120,7 @@ out:
#ifdef QDIO_USE_PROCESSING_STATE
if (last_position>=0)
- set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING);
+ set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count);
#endif /* QDIO_USE_PROCESSING_STATE */
QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
@@ -902,6 +1165,10 @@ static inline int
tiqdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
+ unsigned int start_buf, count;
+ unsigned char state = 0;
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
+
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
#endif
@@ -927,8 +1194,13 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
if (!q->siga_sync)
/* we'll check for more primed buffers in qeth_stop_polling */
return 0;
-
- if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED)
+ if (irq->is_qebsm) {
+ count = 1;
+ start_buf = q->first_to_check;
+ qdio_do_eqbs(q, &state, &start_buf, &count);
+ } else
+ state = q->slsb.acc.val[q->first_to_check];
+ if (state != SLSB_P_INPUT_PRIMED)
/*
* nothing more to do, if next buffer is not PRIMED.
* note that we did a SYNC_MEMORY before, that there
@@ -955,6 +1227,10 @@ static inline int
qdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
+ unsigned int start_buf, count;
+ unsigned char state = 0;
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
+
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[15];
#endif
@@ -973,8 +1249,13 @@ qdio_is_inbound_q_done(struct qdio_q *q)
QDIO_DBF_TEXT4(0,trace,dbf_text);
return 1;
}
-
- if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) {
+ if (irq->is_qebsm) {
+ count = 1;
+ start_buf = q->first_to_check;
+ qdio_do_eqbs(q, &state, &start_buf, &count);
+ } else
+ state = q->slsb.acc.val[q->first_to_check];
+ if (state == SLSB_P_INPUT_PRIMED) {
/* we got something to do */
QDIO_DBF_TEXT4(0,trace,"inqisntA");
QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
@@ -1456,7 +1737,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
void *ptr;
int available;
- sprintf(dbf_text,"qfqs%4x",cdev->private->irq);
+ sprintf(dbf_text,"qfqs%4x",cdev->private->sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
for (i=0;i<no_input_qs;i++) {
q=irq_ptr->input_qs[i];
@@ -1476,7 +1757,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
q->queue_type=q_format;
q->int_parm=int_parm;
- q->irq=irq_ptr->irq;
+ q->schid = irq_ptr->schid;
q->irq_ptr = irq_ptr;
q->cdev = cdev;
q->mask=1<<(31-i);
@@ -1523,11 +1804,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
/* fill in slsb */
- for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) {
- set_slsb(&q->slsb.acc.val[j],
- SLSB_P_INPUT_NOT_INIT);
-/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/
- }
+ if (!irq_ptr->is_qebsm) {
+ unsigned int count = 1;
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
+ }
}
for (i=0;i<no_output_qs;i++) {
@@ -1549,7 +1830,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
q->queue_type=q_format;
q->int_parm=int_parm;
q->is_input_q=0;
- q->irq=irq_ptr->irq;
+ q->schid = irq_ptr->schid;
q->cdev = cdev;
q->irq_ptr = irq_ptr;
q->mask=1<<(31-i);
@@ -1584,11 +1865,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
/* fill in slsb */
- for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) {
- set_slsb(&q->slsb.acc.val[j],
- SLSB_P_OUTPUT_NOT_INIT);
-/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/
- }
+ if (!irq_ptr->is_qebsm) {
+ unsigned int count = 1;
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
+ set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
+ }
}
}
@@ -1656,7 +1937,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
char dbf_text[15];
QDIO_DBF_TEXT5(0,trace,"newstate");
- sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state);
+ sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
QDIO_DBF_TEXT5(0,trace,dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
@@ -1669,12 +1950,12 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
}
static inline void
-qdio_irq_check_sense(int irq, struct irb *irb)
+qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
{
char dbf_text[15];
if (irb->esw.esw0.erw.cons) {
- sprintf(dbf_text,"sens%4x",irq);
+ sprintf(dbf_text,"sens%4x",schid.sch_no);
QDIO_DBF_TEXT2(1,trace,dbf_text);
QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
@@ -1785,21 +2066,22 @@ qdio_timeout_handler(struct ccw_device *cdev)
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
- QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n",
- irq_ptr->irq);
+ QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1,setup,"eq:timeo");
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
break;
case QDIO_IRQ_STATE_CLEANUP:
- QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n",
- irq_ptr->irq);
+ QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
+ "irq=0.%x.%x.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
break;
case QDIO_IRQ_STATE_ESTABLISHED:
case QDIO_IRQ_STATE_ACTIVE:
/* I/O has been terminated by common I/O layer. */
- QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n",
- irq_ptr->irq);
+ QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1, trace, "cio:term");
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
if (get_device(&cdev->dev)) {
@@ -1862,7 +2144,7 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
}
}
- qdio_irq_check_sense(irq_ptr->irq, irb);
+ qdio_irq_check_sense(irq_ptr->schid, irb);
#ifdef CONFIG_QDIO_DEBUG
sprintf(dbf_text, "state:%d", irq_ptr->state);
@@ -1905,7 +2187,7 @@ int
qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
unsigned int queue_number)
{
- int cc;
+ int cc = 0;
struct qdio_q *q;
struct qdio_irq *irq_ptr;
void *ptr;
@@ -1918,7 +2200,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
return -ENODEV;
#ifdef CONFIG_QDIO_DEBUG
- *((int*)(&dbf_text[4])) = irq_ptr->irq;
+ *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
*((int*)(&dbf_text[0]))=flags;
*((int*)(&dbf_text[4]))=queue_number;
@@ -1929,12 +2211,14 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
q=irq_ptr->input_qs[queue_number];
if (!q)
return -EINVAL;
- cc = do_siga_sync(q->irq, 0, q->mask);
+ if (!(irq_ptr->is_qebsm))
+ cc = do_siga_sync(q->schid, 0, q->mask);
} else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
q=irq_ptr->output_qs[queue_number];
if (!q)
return -EINVAL;
- cc = do_siga_sync(q->irq, q->mask, 0);
+ if (!(irq_ptr->is_qebsm))
+ cc = do_siga_sync(q->schid, q->mask, 0);
} else
return -EINVAL;
@@ -1945,15 +2229,54 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
return cc;
}
-static unsigned char
-qdio_check_siga_needs(int sch)
+static inline void
+qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
+ unsigned long token)
+{
+ struct qdio_q *q;
+ int i;
+ unsigned int count, start_buf;
+ char dbf_text[15];
+
+ /*check if QEBSM is disabled */
+ if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) {
+ irq_ptr->is_qebsm = 0;
+ irq_ptr->sch_token = 0;
+ irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
+ QDIO_DBF_TEXT0(0,setup,"noV=V");
+ return;
+ }
+ irq_ptr->sch_token = token;
+ /*input queue*/
+ for (i = 0; i < irq_ptr->no_input_qs;i++) {
+ q = irq_ptr->input_qs[i];
+ count = QDIO_MAX_BUFFERS_PER_Q;
+ start_buf = 0;
+ set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
+ }
+ sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+ /*output queue*/
+ for (i = 0; i < irq_ptr->no_output_qs; i++) {
+ q = irq_ptr->output_qs[i];
+ count = QDIO_MAX_BUFFERS_PER_Q;
+ start_buf = 0;
+ set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
+ }
+}
+
+static void
+qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
{
int result;
unsigned char qdioac;
-
struct {
struct chsc_header request;
- u16 reserved1;
+ u16 reserved1:10;
+ u16 ssid:2;
+ u16 fmt:4;
u16 first_sch;
u16 reserved2;
u16 last_sch;
@@ -1964,67 +2287,83 @@ qdio_check_siga_needs(int sch)
u8 reserved5;
u16 sch;
u8 qfmt;
- u8 reserved6;
- u8 qdioac;
+ u8 parm;
+ u8 qdioac1;
u8 sch_class;
u8 reserved7;
u8 icnt;
u8 reserved8;
u8 ocnt;
+ u8 reserved9;
+ u8 mbccnt;
+ u16 qdioac2;
+ u64 sch_token;
} *ssqd_area;
+ QDIO_DBF_TEXT0(0,setup,"getssqd");
+ qdioac = 0;
ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!ssqd_area) {
QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
- "SIGAs for sch x%x.\n", sch);
- return CHSC_FLAG_SIGA_INPUT_NECESSARY ||
- CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
- CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
+ "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
+ irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
+ CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
+ CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
+ irq_ptr->is_qebsm = 0;
+ irq_ptr->sch_token = 0;
+ irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
+ return;
}
+
ssqd_area->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0024,
};
-
- ssqd_area->first_sch = sch;
- ssqd_area->last_sch = sch;
-
- result=chsc(ssqd_area);
+ ssqd_area->first_sch = irq_ptr->schid.sch_no;
+ ssqd_area->last_sch = irq_ptr->schid.sch_no;
+ ssqd_area->ssid = irq_ptr->schid.ssid;
+ result = chsc(ssqd_area);
if (result) {
QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
- "SIGAs for sch x%x.\n",
- result,sch);
+ "SIGAs for sch 0.%x.%x.\n", result,
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
+ irq_ptr->is_qebsm = 0;
goto out;
}
if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
QDIO_PRINT_WARN("response upon checking SIGA needs " \
- "is 0x%x. Using all SIGAs for sch x%x.\n",
- ssqd_area->response.code, sch);
+ "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
+ ssqd_area->response.code,
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
+ irq_ptr->is_qebsm = 0;
goto out;
}
if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
- (ssqd_area->sch != sch)) {
- QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \
- "using all SIGAs.\n",sch);
+ (ssqd_area->sch != irq_ptr->schid.sch_no)) {
+ QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
+ "using all SIGAs.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
+ irq_ptr->is_qebsm = 0;
goto out;
}
-
- qdioac = ssqd_area->qdioac;
+ qdioac = ssqd_area->qdioac1;
out:
+ qdio_check_subchannel_qebsm(irq_ptr, qdioac,
+ ssqd_area->sch_token);
free_page ((unsigned long) ssqd_area);
- return qdioac;
+ irq_ptr->qdioac = qdioac;
}
static unsigned int
@@ -2055,6 +2394,13 @@ tiqdio_check_chsc_availability(void)
sprintf(dbf_text,"hydrati%1x", hydra_thinints);
QDIO_DBF_TEXT0(0,setup,dbf_text);
+#ifdef CONFIG_64BIT
+ /* Check for QEBSM support in general (bit 58). */
+ is_passthrough = css_general_characteristics.qebsm;
+#endif
+ sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
+ QDIO_DBF_TEXT0(0,setup,dbf_text);
+
/* Check for aif time delay disablement fac (bit 56). If installed,
* omit svs even under lpar (good point by rick again) */
omit_svs = css_general_characteristics.aif_tdd;
@@ -2091,7 +2437,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
/* set to 0x10000000 to enable
* time delay disablement facility */
u32 reserved5;
- u32 subsystem_id;
+ struct subchannel_id schid;
u32 reserved6[1004];
struct chsc_header response;
u32 reserved7;
@@ -2113,7 +2459,8 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scssc_area) {
QDIO_PRINT_WARN("No memory for setting indicators on " \
- "subchannel x%x.\n", irq_ptr->irq);
+ "subchannel 0.%x.%x.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
return -ENOMEM;
}
scssc_area->request = (struct chsc_header) {
@@ -2127,7 +2474,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
scssc_area->ks = QDIO_STORAGE_KEY;
scssc_area->kc = QDIO_STORAGE_KEY;
scssc_area->isc = TIQDIO_THININT_ISC;
- scssc_area->subsystem_id = (1<<16) + irq_ptr->irq;
+ scssc_area->schid = irq_ptr->schid;
/* enables the time delay disablement facility. Don't care
* whether it is really there (i.e. we haven't checked for
* it) */
@@ -2137,12 +2484,11 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
QDIO_PRINT_WARN("Time delay disablement facility " \
"not available\n");
-
-
result = chsc(scssc_area);
if (result) {
- QDIO_PRINT_WARN("could not set indicators on irq x%x, " \
- "cc=%i.\n",irq_ptr->irq,result);
+ QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
+ "cc=%i.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
result = -EIO;
goto out;
}
@@ -2198,7 +2544,8 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!scsscf_area) {
QDIO_PRINT_WARN("No memory for setting delay target on " \
- "subchannel x%x.\n", irq_ptr->irq);
+ "subchannel 0.%x.%x.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
return -ENOMEM;
}
scsscf_area->request = (struct chsc_header) {
@@ -2210,8 +2557,10 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
result=chsc(scsscf_area);
if (result) {
- QDIO_PRINT_WARN("could not set delay target on irq x%x, " \
- "cc=%i. Continuing.\n",irq_ptr->irq,result);
+ QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
+ "cc=%i. Continuing.\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
+ result);
result = -EIO;
goto out;
}
@@ -2245,7 +2594,7 @@ qdio_cleanup(struct ccw_device *cdev, int how)
if (!irq_ptr)
return -ENODEV;
- sprintf(dbf_text,"qcln%4x",irq_ptr->irq);
+ sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
QDIO_DBF_TEXT1(0,trace,dbf_text);
QDIO_DBF_TEXT0(0,setup,dbf_text);
@@ -2272,7 +2621,7 @@ qdio_shutdown(struct ccw_device *cdev, int how)
down(&irq_ptr->setting_up_sema);
- sprintf(dbf_text,"qsqs%4x",irq_ptr->irq);
+ sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
QDIO_DBF_TEXT1(0,trace,dbf_text);
QDIO_DBF_TEXT0(0,setup,dbf_text);
@@ -2378,7 +2727,7 @@ qdio_free(struct ccw_device *cdev)
down(&irq_ptr->setting_up_sema);
- sprintf(dbf_text,"qfqs%4x",irq_ptr->irq);
+ sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
QDIO_DBF_TEXT1(0,trace,dbf_text);
QDIO_DBF_TEXT0(0,setup,dbf_text);
@@ -2526,13 +2875,14 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
irq_ptr = cdev->private->qdio_data;
if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
- sprintf(dbf_text,"ick1%4x",irq_ptr->irq);
+ sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(1,trace,dbf_text);
QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
QDIO_PRINT_ERR("received check condition on establish " \
- "queues on irq 0x%x (cs=x%x, ds=x%x).\n",
- irq_ptr->irq,cstat,dstat);
+ "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
+ cstat,dstat);
qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
}
@@ -2540,9 +2890,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
QDIO_DBF_TEXT2(1,setup,"eq:no de");
QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
- QDIO_PRINT_ERR("establish queues on irq %04x: didn't get "
+ QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
"device end: dstat=%02x, cstat=%02x\n",
- irq_ptr->irq, dstat, cstat);
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
+ dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
return 1;
}
@@ -2551,10 +2902,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
QDIO_DBF_TEXT2(1,setup,"eq:badio");
QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
- QDIO_PRINT_ERR("establish queues on irq %04x: got "
+ QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
"the following devstat: dstat=%02x, "
- "cstat=%02x\n",
- irq_ptr->irq, dstat, cstat);
+ "cstat=%02x\n", irq_ptr->schid.ssid,
+ irq_ptr->schid.sch_no, dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
return 1;
}
@@ -2569,7 +2920,7 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
irq_ptr = cdev->private->qdio_data;
- sprintf(dbf_text,"qehi%4x",cdev->private->irq);
+ sprintf(dbf_text,"qehi%4x",cdev->private->sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
@@ -2588,7 +2939,7 @@ qdio_initialize(struct qdio_initialize *init_data)
int rc;
char dbf_text[15];
- sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq);
+ sprintf(dbf_text,"qini%4x",init_data->cdev->private->sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
@@ -2609,7 +2960,7 @@ qdio_allocate(struct qdio_initialize *init_data)
struct qdio_irq *irq_ptr;
char dbf_text[15];
- sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq);
+ sprintf(dbf_text,"qalc%4x",init_data->cdev->private->sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
@@ -2682,7 +3033,7 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
irq_ptr->int_parm=init_data->int_parm;
- irq_ptr->irq = init_data->cdev->private->irq;
+ irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
irq_ptr->no_input_qs=init_data->no_input_qs;
irq_ptr->no_output_qs=init_data->no_output_qs;
@@ -2698,11 +3049,12 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
QDIO_DBF_TEXT2(0,setup,dbf_text);
if (irq_ptr->is_thinint_irq) {
- irq_ptr->dev_st_chg_ind=qdio_get_indicator();
+ irq_ptr->dev_st_chg_ind = qdio_get_indicator();
QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
if (!irq_ptr->dev_st_chg_ind) {
QDIO_PRINT_WARN("no indicator location available " \
- "for irq 0x%x\n",irq_ptr->irq);
+ "for irq 0.%x.%x\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdio_release_irq_memory(irq_ptr);
return -ENOBUFS;
}
@@ -2747,6 +3099,10 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
/* fill in qib */
+ irq_ptr->is_qebsm = is_passthrough;
+ if (irq_ptr->is_qebsm)
+ irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
+
irq_ptr->qib.qfmt=init_data->q_format;
if (init_data->no_input_qs)
irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
@@ -2829,7 +3185,7 @@ qdio_establish(struct qdio_initialize *init_data)
tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
}
- sprintf(dbf_text,"qest%4x",cdev->private->irq);
+ sprintf(dbf_text,"qest%4x",cdev->private->sch_no);
QDIO_DBF_TEXT0(0,setup,dbf_text);
QDIO_DBF_TEXT0(0,trace,dbf_text);
@@ -2855,9 +3211,10 @@ qdio_establish(struct qdio_initialize *init_data)
sprintf(dbf_text,"eq:io%4x",result);
QDIO_DBF_TEXT2(1,setup,dbf_text);
}
- QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \
- "returned %i, next try returned %i\n",
- irq_ptr->irq,result,result2);
+ QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
+ "returned %i, next try returned %i\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
+ result, result2);
result=result2;
if (result)
ccw_device_set_timeout(cdev, 0);
@@ -2884,7 +3241,7 @@ qdio_establish(struct qdio_initialize *init_data)
return -EIO;
}
- irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq);
+ qdio_get_ssqd_information(irq_ptr);
/* if this gets set once, we're running under VM and can omit SVSes */
if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
omit_svs=1;
@@ -2930,7 +3287,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
goto out;
}
- sprintf(dbf_text,"qact%4x", irq_ptr->irq);
+ sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(0,setup,dbf_text);
QDIO_DBF_TEXT2(0,trace,dbf_text);
@@ -2955,9 +3312,10 @@ qdio_activate(struct ccw_device *cdev, int flags)
sprintf(dbf_text,"aq:io%4x",result);
QDIO_DBF_TEXT2(1,setup,dbf_text);
}
- QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \
- "returned %i, next try returned %i\n",
- irq_ptr->irq,result,result2);
+ QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
+ "returned %i, next try returned %i\n",
+ irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
+ result, result2);
result=result2;
}
@@ -3015,30 +3373,40 @@ static inline void
qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
unsigned int count, struct qdio_buffer *buffers)
{
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
+ qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
+ if (irq->is_qebsm) {
+ while (count)
+ set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
+ return;
+ }
for (;;) {
- set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY);
+ set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
count--;
if (!count) break;
- qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
+ qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
}
-
- /* not necessary, as the queues are synced during the SIGA read */
- /*SYNC_MEMORY;*/
}
static inline void
qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
unsigned int count, struct qdio_buffer *buffers)
{
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
+
+ qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
+ if (irq->is_qebsm) {
+ while (count)
+ set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
+ return;
+ }
+
for (;;) {
- set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED);
+ set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
count--;
if (!count) break;
- qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
+ qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
}
-
- /* SIGA write will sync the queues */
- /*SYNC_MEMORY;*/
}
static inline void
@@ -3083,6 +3451,9 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
struct qdio_buffer *buffers)
{
int used_elements;
+ unsigned int cnt, start_buf;
+ unsigned char state = 0;
+ struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
/* This is the outbound handling of queues */
#ifdef QDIO_PERFORMANCE_STATS
@@ -3115,9 +3486,15 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
* SYNC_MEMORY :-/ ), we try to
* fast-requeue buffers
*/
- if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
- &(QDIO_MAX_BUFFERS_PER_Q-1)]!=
- SLSB_CU_OUTPUT_PRIMED) {
+ if (irq->is_qebsm) {
+ cnt = 1;
+ start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
+ (QDIO_MAX_BUFFERS_PER_Q-1));
+ qdio_do_eqbs(q, &state, &start_buf, &cnt);
+ } else
+ state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
+ &(QDIO_MAX_BUFFERS_PER_Q-1) ];
+ if (state != SLSB_CU_OUTPUT_PRIMED) {
qdio_kick_outbound_q(q);
} else {
QDIO_DBF_TEXT3(0,trace, "fast-req");
@@ -3150,7 +3527,7 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
#ifdef CONFIG_QDIO_DEBUG
char dbf_text[20];
- sprintf(dbf_text,"doQD%04x",cdev->private->irq);
+ sprintf(dbf_text,"doQD%04x",cdev->private->sch_no);
QDIO_DBF_TEXT3(0,trace,dbf_text);
#endif /* CONFIG_QDIO_DEBUG */
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 328e31cc685..fa385e761fe 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -3,14 +3,15 @@
#include <asm/page.h>
-#define VERSION_CIO_QDIO_H "$Revision: 1.33 $"
+#include "schid.h"
+
+#define VERSION_CIO_QDIO_H "$Revision: 1.40 $"
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_VERBOSE_LEVEL 9
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_VERBOSE_LEVEL 5
#endif /* CONFIG_QDIO_DEBUG */
-
#define QDIO_USE_PROCESSING_STATE
#ifdef CONFIG_QDIO_PERF_STATS
@@ -265,12 +266,64 @@ QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
/*
* Some instructions as assembly
*/
+
+static inline int
+do_sqbs(unsigned long sch, unsigned char state, int queue,
+ unsigned int *start, unsigned int *count)
+{
+#ifdef CONFIG_64BIT
+ register unsigned long _ccq asm ("0") = *count;
+ register unsigned long _sch asm ("1") = sch;
+ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+
+ asm volatile (
+ " .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t"
+ : "+d" (_ccq), "+d" (_queuestart)
+ : "d" ((unsigned long)state), "d" (_sch)
+ : "memory", "cc"
+ );
+ *count = _ccq & 0xff;
+ *start = _queuestart & 0xff;
+
+ return (_ccq >> 32) & 0xff;
+#else
+ return 0;
+#endif
+}
+
+static inline int
+do_eqbs(unsigned long sch, unsigned char *state, int queue,
+ unsigned int *start, unsigned int *count)
+{
+#ifdef CONFIG_64BIT
+ register unsigned long _ccq asm ("0") = *count;
+ register unsigned long _sch asm ("1") = sch;
+ unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
+ unsigned long _state = 0;
+
+ asm volatile (
+ " .insn rrf,0xB99c0000,%1,%2,0,0 \n\t"
+ : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
+ : "d" (_sch)
+ : "memory", "cc"
+ );
+ *count = _ccq & 0xff;
+ *start = _queuestart & 0xff;
+ *state = _state & 0xff;
+
+ return (_ccq >> 32) & 0xff;
+#else
+ return 0;
+#endif
+}
+
+
static inline int
-do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
+do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
{
int cc;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
asm volatile (
"lhi 0,2 \n\t"
"lr 1,%1 \n\t"
@@ -280,10 +333,10 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
- : "d" (0x10000|irq), "d" (mask1), "d" (mask2)
+ : "d" (schid), "d" (mask1), "d" (mask2)
: "cc", "0", "1", "2", "3"
);
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
asm volatile (
"lghi 0,2 \n\t"
"llgfr 1,%1 \n\t"
@@ -293,19 +346,19 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
- : "d" (0x10000|irq), "d" (mask1), "d" (mask2)
+ : "d" (schid), "d" (mask1), "d" (mask2)
: "cc", "0", "1", "2", "3"
);
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
return cc;
}
static inline int
-do_siga_input(unsigned int irq, unsigned int mask)
+do_siga_input(struct subchannel_id schid, unsigned int mask)
{
int cc;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
asm volatile (
"lhi 0,1 \n\t"
"lr 1,%1 \n\t"
@@ -314,10 +367,10 @@ do_siga_input(unsigned int irq, unsigned int mask)
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
- : "d" (0x10000|irq), "d" (mask)
+ : "d" (schid), "d" (mask)
: "cc", "0", "1", "2", "memory"
);
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
asm volatile (
"lghi 0,1 \n\t"
"llgfr 1,%1 \n\t"
@@ -326,21 +379,22 @@ do_siga_input(unsigned int irq, unsigned int mask)
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
- : "d" (0x10000|irq), "d" (mask)
+ : "d" (schid), "d" (mask)
: "cc", "0", "1", "2", "memory"
);
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
return cc;
}
static inline int
-do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
+do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
+ unsigned int fc)
{
int cc;
__u32 busy_bit;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
asm volatile (
"lhi 0,0 \n\t"
"lr 1,%2 \n\t"
@@ -366,14 +420,14 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
".long 0b,2b \n\t"
".previous \n\t"
: "=d" (cc), "=d" (busy_bit)
- : "d" (0x10000|irq), "d" (mask),
+ : "d" (schid), "d" (mask),
"i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
: "cc", "0", "1", "2", "memory"
);
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
asm volatile (
- "lghi 0,0 \n\t"
- "llgfr 1,%2 \n\t"
+ "llgfr 0,%5 \n\t"
+ "lgr 1,%2 \n\t"
"llgfr 2,%3 \n\t"
"siga 0 \n\t"
"0:"
@@ -391,11 +445,11 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
".quad 0b,1b \n\t"
".previous \n\t"
: "=d" (cc), "=d" (busy_bit)
- : "d" (0x10000|irq), "d" (mask),
- "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
+ : "d" (schid), "d" (mask),
+ "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc)
: "cc", "0", "1", "2", "memory"
);
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
(*bb) = busy_bit;
return cc;
@@ -407,21 +461,21 @@ do_clear_global_summary(void)
unsigned long time;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
asm volatile (
"lhi 1,3 \n\t"
".insn rre,0xb2650000,2,0 \n\t"
"lr %0,3 \n\t"
: "=d" (time) : : "cc", "1", "2", "3"
);
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
asm volatile (
"lghi 1,3 \n\t"
".insn rre,0xb2650000,2,0 \n\t"
"lgr %0,3 \n\t"
: "=d" (time) : : "cc", "1", "2", "3"
);
-#endif /* CONFIG_ARCH_S390X */
+#endif /* CONFIG_64BIT */
return time;
}
@@ -488,42 +542,21 @@ struct qdio_perf_stats {
#define MY_MODULE_STRING(x) #x
-#ifdef CONFIG_ARCH_S390X
+#ifdef CONFIG_64BIT
#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x)
-#else /* CONFIG_ARCH_S390X */
+#else /* CONFIG_64BIT */
#define QDIO_GET_ADDR(x) ((__u32)(long)x)
-#endif /* CONFIG_ARCH_S390X */
-
-#ifdef CONFIG_QDIO_DEBUG
-#define set_slsb(x,y) \
- if(q->queue_type==QDIO_TRACE_QTYPE) { \
- if(q->is_input_q) { \
- QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
- } else { \
- QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
- } \
- } \
- qdio_set_slsb(x,y); \
- if(q->queue_type==QDIO_TRACE_QTYPE) { \
- if(q->is_input_q) { \
- QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
- } else { \
- QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
- } \
- }
-#else /* CONFIG_QDIO_DEBUG */
-#define set_slsb(x,y) qdio_set_slsb(x,y)
-#endif /* CONFIG_QDIO_DEBUG */
+#endif /* CONFIG_64BIT */
struct qdio_q {
volatile struct slsb slsb;
char unused[QDIO_MAX_BUFFERS_PER_Q];
- __u32 * volatile dev_st_chg_ind;
+ __u32 * dev_st_chg_ind;
int is_input_q;
- int irq;
+ struct subchannel_id schid;
struct ccw_device *cdev;
unsigned int is_iqdio_q;
@@ -568,6 +601,7 @@ struct qdio_q {
struct tasklet_struct tasklet;
#endif /* QDIO_USE_TIMERS_FOR_POLLING */
+
enum qdio_irq_states state;
/* used to store the error condition during a data transfer */
@@ -617,13 +651,17 @@ struct qdio_irq {
__u32 * volatile dev_st_chg_ind;
unsigned long int_parm;
- int irq;
+ struct subchannel_id schid;
unsigned int is_iqdio_irq;
unsigned int is_thinint_irq;
unsigned int hydra_gives_outbound_pcis;
unsigned int sync_done_on_outb_pcis;
+ /* QEBSM facility */
+ unsigned int is_qebsm;
+ unsigned long sch_token;
+
enum qdio_irq_states state;
unsigned int no_input_qs;
diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h
new file mode 100644
index 00000000000..54328fec5ad
--- /dev/null
+++ b/drivers/s390/cio/schid.h
@@ -0,0 +1,26 @@
+#ifndef S390_SCHID_H
+#define S390_SCHID_H
+
+struct subchannel_id {
+ __u32 reserved:13;
+ __u32 ssid:2;
+ __u32 one:1;
+ __u32 sch_no:16;
+} __attribute__ ((packed,aligned(4)));
+
+
+/* Helper function for sane state of pre-allocated subchannel_id. */
+static inline void
+init_subchannel_id(struct subchannel_id *schid)
+{
+ memset(schid, 0, sizeof(struct subchannel_id));
+ schid->one = 1;
+}
+
+static inline int
+schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2)
+{
+ return !memcmp(schid1, schid2, sizeof(struct subchannel_id));
+}
+
+#endif /* S390_SCHID_H */
diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h
index e319e78b5ea..f87c785f203 100644
--- a/drivers/s390/crypto/z90common.h
+++ b/drivers/s390/crypto/z90common.h
@@ -1,9 +1,9 @@
/*
* linux/drivers/s390/crypto/z90common.h
*
- * z90crypt 1.3.2
+ * z90crypt 1.3.3
*
- * Copyright (C) 2001, 2004 IBM Corporation
+ * Copyright (C) 2001, 2005 IBM Corporation
* Author(s): Robert Burroughs (burrough@us.ibm.com)
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -91,12 +91,13 @@ enum hdstat {
#define TSQ_FATAL_ERROR 34
#define RSQ_FATAL_ERROR 35
-#define Z90CRYPT_NUM_TYPES 5
+#define Z90CRYPT_NUM_TYPES 6
#define PCICA 0
#define PCICC 1
#define PCIXCC_MCL2 2
#define PCIXCC_MCL3 3
#define CEX2C 4
+#define CEX2A 5
#define NILDEV -1
#define ANYDEV -1
#define PCIXCC_UNK -2
@@ -105,7 +106,7 @@ enum hdevice_type {
PCICC_HW = 3,
PCICA_HW = 4,
PCIXCC_HW = 5,
- OTHER_HW = 6,
+ CEX2A_HW = 6,
CEX2C_HW = 7
};
diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h
index 0a3bb5a10dd..3a18443fdfa 100644
--- a/drivers/s390/crypto/z90crypt.h
+++ b/drivers/s390/crypto/z90crypt.h
@@ -1,9 +1,9 @@
/*
* linux/drivers/s390/crypto/z90crypt.h
*
- * z90crypt 1.3.2
+ * z90crypt 1.3.3
*
- * Copyright (C) 2001, 2004 IBM Corporation
+ * Copyright (C) 2001, 2005 IBM Corporation
* Author(s): Robert Burroughs (burrough@us.ibm.com)
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -29,11 +29,11 @@
#include <linux/ioctl.h>
-#define VERSION_Z90CRYPT_H "$Revision: 1.11 $"
+#define VERSION_Z90CRYPT_H "$Revision: 1.2.2.4 $"
#define z90crypt_VERSION 1
#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards
-#define z90crypt_VARIANT 2 // 2 = added PCIXCC MCL3 and CEX2C support
+#define z90crypt_VARIANT 3 // 3 = CEX2A support
/**
* struct ica_rsa_modexpo
@@ -122,6 +122,9 @@ struct ica_rsa_modexpo_crt {
* Z90STAT_CEX2CCOUNT
* Return an integer count of all CEX2Cs.
*
+ * Z90STAT_CEX2ACOUNT
+ * Return an integer count of all CEX2As.
+ *
* Z90STAT_REQUESTQ_COUNT
* Return an integer count of the number of entries waiting to be
* sent to a device.
@@ -144,6 +147,7 @@ struct ica_rsa_modexpo_crt {
* 0x03: PCIXCC_MCL2
* 0x04: PCIXCC_MCL3
* 0x05: CEX2C
+ * 0x06: CEX2A
* 0x0d: device is disabled via the proc filesystem
*
* Z90STAT_QDEPTH_MASK
@@ -199,6 +203,7 @@ struct ica_rsa_modexpo_crt {
#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int)
#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int)
#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int)
+#define Z90STAT_CEX2ACOUNT _IOR(Z90_IOCTL_MAGIC, 0x4e, int)
#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int)
#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int)
#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int)
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c
index c215e088973..d7f7494a0cb 100644
--- a/drivers/s390/crypto/z90hardware.c
+++ b/drivers/s390/crypto/z90hardware.c
@@ -1,9 +1,9 @@
/*
* linux/drivers/s390/crypto/z90hardware.c
*
- * z90crypt 1.3.2
+ * z90crypt 1.3.3
*
- * Copyright (C) 2001, 2004 IBM Corporation
+ * Copyright (C) 2001, 2005 IBM Corporation
* Author(s): Robert Burroughs (burrough@us.ibm.com)
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -648,6 +648,87 @@ static struct cca_public_sec static_cca_pub_sec = {
#define RESPONSE_CPRB_SIZE 0x000006B8
#define RESPONSE_CPRBX_SIZE 0x00000724
+struct type50_hdr {
+ u8 reserved1;
+ u8 msg_type_code;
+ u16 msg_len;
+ u8 reserved2;
+ u8 ignored;
+ u16 reserved3;
+};
+
+#define TYPE50_TYPE_CODE 0x50
+
+#define TYPE50_MEB1_LEN (sizeof(struct type50_meb1_msg))
+#define TYPE50_MEB2_LEN (sizeof(struct type50_meb2_msg))
+#define TYPE50_CRB1_LEN (sizeof(struct type50_crb1_msg))
+#define TYPE50_CRB2_LEN (sizeof(struct type50_crb2_msg))
+
+#define TYPE50_MEB1_FMT 0x0001
+#define TYPE50_MEB2_FMT 0x0002
+#define TYPE50_CRB1_FMT 0x0011
+#define TYPE50_CRB2_FMT 0x0012
+
+struct type50_meb1_msg {
+ struct type50_hdr header;
+ u16 keyblock_type;
+ u8 reserved[6];
+ u8 exponent[128];
+ u8 modulus[128];
+ u8 message[128];
+};
+
+struct type50_meb2_msg {
+ struct type50_hdr header;
+ u16 keyblock_type;
+ u8 reserved[6];
+ u8 exponent[256];
+ u8 modulus[256];
+ u8 message[256];
+};
+
+struct type50_crb1_msg {
+ struct type50_hdr header;
+ u16 keyblock_type;
+ u8 reserved[6];
+ u8 p[64];
+ u8 q[64];
+ u8 dp[64];
+ u8 dq[64];
+ u8 u[64];
+ u8 message[128];
+};
+
+struct type50_crb2_msg {
+ struct type50_hdr header;
+ u16 keyblock_type;
+ u8 reserved[6];
+ u8 p[128];
+ u8 q[128];
+ u8 dp[128];
+ u8 dq[128];
+ u8 u[128];
+ u8 message[256];
+};
+
+union type50_msg {
+ struct type50_meb1_msg meb1;
+ struct type50_meb2_msg meb2;
+ struct type50_crb1_msg crb1;
+ struct type50_crb2_msg crb2;
+};
+
+struct type80_hdr {
+ u8 reserved1;
+ u8 type;
+ u16 len;
+ u8 code;
+ u8 reserved2[3];
+ u8 reserved3[8];
+};
+
+#define TYPE80_RSP_CODE 0x80
+
struct error_hdr {
unsigned char reserved1;
unsigned char type;
@@ -657,6 +738,7 @@ struct error_hdr {
};
#define TYPE82_RSP_CODE 0x82
+#define TYPE88_RSP_CODE 0x88
#define REP82_ERROR_MACHINE_FAILURE 0x10
#define REP82_ERROR_PREEMPT_FAILURE 0x12
@@ -679,6 +761,22 @@ struct error_hdr {
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
+#define REP88_ERROR_MODULE_FAILURE 0x10
+#define REP88_ERROR_MODULE_TIMEOUT 0x11
+#define REP88_ERROR_MODULE_NOTINIT 0x13
+#define REP88_ERROR_MODULE_NOTAVAIL 0x14
+#define REP88_ERROR_MODULE_DISABLED 0x15
+#define REP88_ERROR_MODULE_IN_DIAGN 0x17
+#define REP88_ERROR_FASTPATH_DISABLD 0x19
+#define REP88_ERROR_MESSAGE_TYPE 0x20
+#define REP88_ERROR_MESSAGE_MALFORMD 0x22
+#define REP88_ERROR_MESSAGE_LENGTH 0x23
+#define REP88_ERROR_RESERVED_FIELD 0x24
+#define REP88_ERROR_KEY_TYPE 0x34
+#define REP88_ERROR_INVALID_KEY 0x82
+#define REP88_ERROR_OPERAND 0x84
+#define REP88_ERROR_OPERAND_EVEN_MOD 0x85
+
#define CALLER_HEADER 12
static inline int
@@ -687,7 +785,7 @@ testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
int ccode;
asm volatile
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
(" llgfr 0,%4 \n"
" slgr 1,1 \n"
" lgr 2,1 \n"
@@ -757,7 +855,7 @@ resetq(int q_nr, struct ap_status_word *stat_p)
int ccode;
asm volatile
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
(" llgfr 0,%2 \n"
" lghi 1,1 \n"
" sll 1,24 \n"
@@ -823,7 +921,7 @@ sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat)
int ccode;
asm volatile
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
(" lgr 6,%3 \n"
" llgfr 7,%2 \n"
" llgt 0,0(6) \n"
@@ -902,7 +1000,7 @@ rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id,
int ccode;
asm volatile
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
(" llgfr 0,%2 \n"
" lgr 3,%4 \n"
" lgr 6,%3 \n"
@@ -1029,10 +1127,6 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
stat = HD_ONLINE;
*q_depth = t_depth + 1;
switch (t_dev_type) {
- case OTHER_HW:
- stat = HD_NOT_THERE;
- *dev_type = NILDEV;
- break;
case PCICA_HW:
*dev_type = PCICA;
break;
@@ -1045,6 +1139,9 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
case CEX2C_HW:
*dev_type = CEX2C;
break;
+ case CEX2A_HW:
+ *dev_type = CEX2A;
+ break;
default:
*dev_type = NILDEV;
break;
@@ -2029,6 +2126,177 @@ ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
return 0;
}
+static int
+ICAMEX_msg_to_type50MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
+ union type50_msg *z90cMsg_p)
+{
+ int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
+ unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
+ union type50_msg *tmp_type50_msg;
+
+ mod_len = icaMex_p->inputdatalength;
+
+ msg_size = ((mod_len <= 128) ? TYPE50_MEB1_LEN : TYPE50_MEB2_LEN) +
+ CALLER_HEADER;
+
+ memset(z90cMsg_p, 0, msg_size);
+
+ tmp_type50_msg = (union type50_msg *)
+ ((unsigned char *) z90cMsg_p + CALLER_HEADER);
+
+ tmp_type50_msg->meb1.header.msg_type_code = TYPE50_TYPE_CODE;
+
+ if (mod_len <= 128) {
+ tmp_type50_msg->meb1.header.msg_len = TYPE50_MEB1_LEN;
+ tmp_type50_msg->meb1.keyblock_type = TYPE50_MEB1_FMT;
+ mod_tgt = tmp_type50_msg->meb1.modulus;
+ mod_tgt_len = sizeof(tmp_type50_msg->meb1.modulus);
+ exp_tgt = tmp_type50_msg->meb1.exponent;
+ exp_tgt_len = sizeof(tmp_type50_msg->meb1.exponent);
+ inp_tgt = tmp_type50_msg->meb1.message;
+ inp_tgt_len = sizeof(tmp_type50_msg->meb1.message);
+ } else {
+ tmp_type50_msg->meb2.header.msg_len = TYPE50_MEB2_LEN;
+ tmp_type50_msg->meb2.keyblock_type = TYPE50_MEB2_FMT;
+ mod_tgt = tmp_type50_msg->meb2.modulus;
+ mod_tgt_len = sizeof(tmp_type50_msg->meb2.modulus);
+ exp_tgt = tmp_type50_msg->meb2.exponent;
+ exp_tgt_len = sizeof(tmp_type50_msg->meb2.exponent);
+ inp_tgt = tmp_type50_msg->meb2.message;
+ inp_tgt_len = sizeof(tmp_type50_msg->meb2.message);
+ }
+
+ mod_tgt += (mod_tgt_len - mod_len);
+ if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(mod_tgt, mod_len))
+ return SEN_USER_ERROR;
+ exp_tgt += (exp_tgt_len - mod_len);
+ if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(exp_tgt, mod_len))
+ return SEN_USER_ERROR;
+ inp_tgt += (inp_tgt_len - mod_len);
+ if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(inp_tgt, mod_len))
+ return SEN_USER_ERROR;
+
+ *z90cMsg_l_p = msg_size - CALLER_HEADER;
+
+ return 0;
+}
+
+static int
+ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
+ int *z90cMsg_l_p, union type50_msg *z90cMsg_p)
+{
+ int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
+ dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len, long_offset;
+ unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt,
+ temp[8];
+ union type50_msg *tmp_type50_msg;
+
+ mod_len = icaMsg_p->inputdatalength;
+ short_len = mod_len / 2;
+ long_len = mod_len / 2 + 8;
+ long_offset = 0;
+
+ if (long_len > 128) {
+ memset(temp, 0x00, sizeof(temp));
+ if (copy_from_user(temp, icaMsg_p->np_prime, long_len-128))
+ return SEN_RELEASED;
+ if (!is_empty(temp, 8))
+ return SEN_NOT_AVAIL;
+ if (copy_from_user(temp, icaMsg_p->bp_key, long_len-128))
+ return SEN_RELEASED;
+ if (!is_empty(temp, 8))
+ return SEN_NOT_AVAIL;
+ if (copy_from_user(temp, icaMsg_p->u_mult_inv, long_len-128))
+ return SEN_RELEASED;
+ if (!is_empty(temp, 8))
+ return SEN_NOT_AVAIL;
+ long_offset = long_len - 128;
+ long_len = 128;
+ }
+
+ tmp_size = ((mod_len <= 128) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) +
+ CALLER_HEADER;
+
+ memset(z90cMsg_p, 0, tmp_size);
+
+ tmp_type50_msg = (union type50_msg *)
+ ((unsigned char *) z90cMsg_p + CALLER_HEADER);
+
+ tmp_type50_msg->crb1.header.msg_type_code = TYPE50_TYPE_CODE;
+ if (long_len <= 64) {
+ tmp_type50_msg->crb1.header.msg_len = TYPE50_CRB1_LEN;
+ tmp_type50_msg->crb1.keyblock_type = TYPE50_CRB1_FMT;
+ p_tgt = tmp_type50_msg->crb1.p;
+ p_tgt_len = sizeof(tmp_type50_msg->crb1.p);
+ q_tgt = tmp_type50_msg->crb1.q;
+ q_tgt_len = sizeof(tmp_type50_msg->crb1.q);
+ dp_tgt = tmp_type50_msg->crb1.dp;
+ dp_tgt_len = sizeof(tmp_type50_msg->crb1.dp);
+ dq_tgt = tmp_type50_msg->crb1.dq;
+ dq_tgt_len = sizeof(tmp_type50_msg->crb1.dq);
+ u_tgt = tmp_type50_msg->crb1.u;
+ u_tgt_len = sizeof(tmp_type50_msg->crb1.u);
+ inp_tgt = tmp_type50_msg->crb1.message;
+ inp_tgt_len = sizeof(tmp_type50_msg->crb1.message);
+ } else {
+ tmp_type50_msg->crb2.header.msg_len = TYPE50_CRB2_LEN;
+ tmp_type50_msg->crb2.keyblock_type = TYPE50_CRB2_FMT;
+ p_tgt = tmp_type50_msg->crb2.p;
+ p_tgt_len = sizeof(tmp_type50_msg->crb2.p);
+ q_tgt = tmp_type50_msg->crb2.q;
+ q_tgt_len = sizeof(tmp_type50_msg->crb2.q);
+ dp_tgt = tmp_type50_msg->crb2.dp;
+ dp_tgt_len = sizeof(tmp_type50_msg->crb2.dp);
+ dq_tgt = tmp_type50_msg->crb2.dq;
+ dq_tgt_len = sizeof(tmp_type50_msg->crb2.dq);
+ u_tgt = tmp_type50_msg->crb2.u;
+ u_tgt_len = sizeof(tmp_type50_msg->crb2.u);
+ inp_tgt = tmp_type50_msg->crb2.message;
+ inp_tgt_len = sizeof(tmp_type50_msg->crb2.message);
+ }
+
+ p_tgt += (p_tgt_len - long_len);
+ if (copy_from_user(p_tgt, icaMsg_p->np_prime + long_offset, long_len))
+ return SEN_RELEASED;
+ if (is_empty(p_tgt, long_len))
+ return SEN_USER_ERROR;
+ q_tgt += (q_tgt_len - short_len);
+ if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
+ return SEN_RELEASED;
+ if (is_empty(q_tgt, short_len))
+ return SEN_USER_ERROR;
+ dp_tgt += (dp_tgt_len - long_len);
+ if (copy_from_user(dp_tgt, icaMsg_p->bp_key + long_offset, long_len))
+ return SEN_RELEASED;
+ if (is_empty(dp_tgt, long_len))
+ return SEN_USER_ERROR;
+ dq_tgt += (dq_tgt_len - short_len);
+ if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
+ return SEN_RELEASED;
+ if (is_empty(dq_tgt, short_len))
+ return SEN_USER_ERROR;
+ u_tgt += (u_tgt_len - long_len);
+ if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv + long_offset, long_len))
+ return SEN_RELEASED;
+ if (is_empty(u_tgt, long_len))
+ return SEN_USER_ERROR;
+ inp_tgt += (inp_tgt_len - mod_len);
+ if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
+ return SEN_RELEASED;
+ if (is_empty(inp_tgt, mod_len))
+ return SEN_USER_ERROR;
+
+ *z90cMsg_l_p = tmp_size - CALLER_HEADER;
+
+ return 0;
+}
+
int
convert_request(unsigned char *buffer, int func, unsigned short function,
int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p)
@@ -2071,6 +2339,16 @@ convert_request(unsigned char *buffer, int func, unsigned short function,
cdx, msg_l_p, (struct type6_msg *) msg_p,
dev_type);
}
+ if (dev_type == CEX2A) {
+ if (func == ICARSACRT)
+ return ICACRT_msg_to_type50CRT_msg(
+ (struct ica_rsa_modexpo_crt *) buffer,
+ msg_l_p, (union type50_msg *) msg_p);
+ else
+ return ICAMEX_msg_to_type50MEX_msg(
+ (struct ica_rsa_modexpo *) buffer,
+ msg_l_p, (union type50_msg *) msg_p);
+ }
return 0;
}
@@ -2081,8 +2359,8 @@ unset_ext_bitlens(void)
{
if (!ext_bitlens_msg_count) {
PRINTK("Unable to use coprocessors for extended bitlengths. "
- "Using PCICAs (if present) for extended bitlengths. "
- "This is not an error.\n");
+ "Using PCICAs/CEX2As (if present) for extended "
+ "bitlengths. This is not an error.\n");
ext_bitlens_msg_count++;
}
ext_bitlens = 0;
@@ -2094,6 +2372,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
{
struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
struct error_hdr *errh_p = (struct error_hdr *) response;
+ struct type80_hdr *t80h_p = (struct type80_hdr *) response;
struct type84_hdr *t84h_p = (struct type84_hdr *) response;
struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
int reply_code, service_rc, service_rs, src_l;
@@ -2108,6 +2387,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
src_l = 0;
switch (errh_p->type) {
case TYPE82_RSP_CODE:
+ case TYPE88_RSP_CODE:
reply_code = errh_p->reply_code;
src_p = (unsigned char *)errh_p;
PRINTK("Hardware error: Type %02X Message Header: "
@@ -2116,6 +2396,10 @@ convert_response(unsigned char *response, unsigned char *buffer,
src_p[0], src_p[1], src_p[2], src_p[3],
src_p[4], src_p[5], src_p[6], src_p[7]);
break;
+ case TYPE80_RSP_CODE:
+ src_l = icaMsg_p->outputdatalength;
+ src_p = response + (int)t80h_p->len - src_l;
+ break;
case TYPE84_RSP_CODE:
src_l = icaMsg_p->outputdatalength;
src_p = response + (int)t84h_p->len - src_l;
@@ -2202,6 +2486,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
if (reply_code)
switch (reply_code) {
case REP82_ERROR_OPERAND_INVALID:
+ case REP88_ERROR_MESSAGE_MALFORMD:
return REC_OPERAND_INV;
case REP82_ERROR_OPERAND_SIZE:
return REC_OPERAND_SIZE;
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
index 790fcbb74b4..2f54d033d7c 100644
--- a/drivers/s390/crypto/z90main.c
+++ b/drivers/s390/crypto/z90main.c
@@ -30,7 +30,6 @@
#include <linux/delay.h> // mdelay
#include <linux/init.h>
#include <linux/interrupt.h> // for tasklets
-#include <linux/ioctl32.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -228,7 +227,7 @@ struct device_x {
*/
struct device {
int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
- // PCIXCC_MCL3, CEX2C
+ // PCIXCC_MCL3, CEX2C, CEX2A
enum devstat dev_stat; // current device status
int dev_self_x; // Index in array
int disabled; // Set when device is in error
@@ -295,26 +294,30 @@ struct caller {
/**
* Function prototypes from z90hardware.c
*/
-enum hdstat query_online(int, int, int, int *, int *);
-enum devstat reset_device(int, int, int);
-enum devstat send_to_AP(int, int, int, unsigned char *);
-enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *);
-int convert_request(unsigned char *, int, short, int, int, int *,
- unsigned char *);
-int convert_response(unsigned char *, unsigned char *, int *, unsigned char *);
+enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
+ int *dev_type);
+enum devstat reset_device(int deviceNr, int cdx, int resetNr);
+enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
+enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
+ unsigned char *resp, unsigned char *psmid);
+int convert_request(unsigned char *buffer, int func, unsigned short function,
+ int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
+int convert_response(unsigned char *response, unsigned char *buffer,
+ int *respbufflen_p, unsigned char *resp_buff);
/**
* Low level function prototypes
*/
-static int create_z90crypt(int *);
-static int refresh_z90crypt(int *);
-static int find_crypto_devices(struct status *);
-static int create_crypto_device(int);
-static int destroy_crypto_device(int);
+static int create_z90crypt(int *cdx_p);
+static int refresh_z90crypt(int *cdx_p);
+static int find_crypto_devices(struct status *deviceMask);
+static int create_crypto_device(int index);
+static int destroy_crypto_device(int index);
static void destroy_z90crypt(void);
-static int refresh_index_array(struct status *, struct device_x *);
-static int probe_device_type(struct device *);
-static int probe_PCIXCC_type(struct device *);
+static int refresh_index_array(struct status *status_str,
+ struct device_x *index_array);
+static int probe_device_type(struct device *devPtr);
+static int probe_PCIXCC_type(struct device *devPtr);
/**
* proc fs definitions
@@ -425,7 +428,7 @@ static struct miscdevice z90crypt_misc_device = {
MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
"and Jochen Roehrig");
MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
- "Copyright 2001, 2004 IBM Corporation");
+ "Copyright 2001, 2005 IBM Corporation");
MODULE_LICENSE("GPL");
module_param(domain, int, 0);
MODULE_PARM_DESC(domain, "domain index for device");
@@ -860,6 +863,12 @@ get_status_CEX2Ccount(void)
}
static inline int
+get_status_CEX2Acount(void)
+{
+ return z90crypt.hdware_info->type_mask[CEX2A].st_count;
+}
+
+static inline int
get_status_requestq_count(void)
{
return requestq_count;
@@ -1008,11 +1017,13 @@ static inline int
select_device_type(int *dev_type_p, int bytelength)
{
static int count = 0;
- int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use;
+ int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
+ index_to_use;
struct status *stat;
if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
(*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
- (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV))
+ (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
+ (*dev_type_p != ANYDEV))
return -1;
if (*dev_type_p != ANYDEV) {
stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
@@ -1022,7 +1033,13 @@ select_device_type(int *dev_type_p, int bytelength)
return -1;
}
- /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */
+ /**
+ * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
+ * speed.
+ *
+ * PCICA and CEX2A do NOT co-exist, so it would be either one or the
+ * other present.
+ */
stat = &z90crypt.hdware_info->type_mask[PCICA];
PCICA_avail = stat->st_count -
(stat->disabled_count + stat->user_disabled_count);
@@ -1032,29 +1049,38 @@ select_device_type(int *dev_type_p, int bytelength)
stat = &z90crypt.hdware_info->type_mask[CEX2C];
CEX2C_avail = stat->st_count -
(stat->disabled_count + stat->user_disabled_count);
- if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) {
+ stat = &z90crypt.hdware_info->type_mask[CEX2A];
+ CEX2A_avail = stat->st_count -
+ (stat->disabled_count + stat->user_disabled_count);
+ if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
/**
- * bitlength is a factor, PCICA is the most capable, even with
- * the new MCL for PCIXCC.
+ * bitlength is a factor, PCICA or CEX2A are the most capable,
+ * even with the new MCL for PCIXCC.
*/
if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
(!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
- if (!PCICA_avail)
- return -1;
- else {
+ if (PCICA_avail) {
*dev_type_p = PCICA;
return 0;
}
+ if (CEX2A_avail) {
+ *dev_type_p = CEX2A;
+ return 0;
+ }
+ return -1;
}
index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
- CEX2C_avail);
+ CEX2C_avail + CEX2A_avail);
if (index_to_use < PCICA_avail)
*dev_type_p = PCICA;
else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
*dev_type_p = PCIXCC_MCL3;
- else
+ else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
+ CEX2C_avail))
*dev_type_p = CEX2C;
+ else
+ *dev_type_p = CEX2A;
count++;
return 0;
}
@@ -1359,7 +1385,7 @@ build_caller(struct work_element *we_p, short function)
if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
(we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
- (we_p->devtype != CEX2C))
+ (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
return SEN_NOT_AVAIL;
memcpy(caller_p->caller_id, we_p->caller_id,
@@ -1428,7 +1454,8 @@ get_crypto_request_buffer(struct work_element *we_p)
if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
(we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
- (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) {
+ (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
+ (we_p->devtype != ANYDEV)) {
PRINTK("invalid device type\n");
return SEN_USER_ERROR;
}
@@ -1503,8 +1530,9 @@ get_crypto_request_buffer(struct work_element *we_p)
function = PCI_FUNC_KEY_ENCRYPT;
switch (we_p->devtype) {
- /* PCICA does everything with a simple RSA mod-expo operation */
+ /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
case PCICA:
+ case CEX2A:
function = PCI_FUNC_KEY_ENCRYPT;
break;
/**
@@ -1662,7 +1690,8 @@ z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
* trigger a fallback to software.
*/
case -EINVAL:
- if (we_p->devtype != PCICA)
+ if ((we_p->devtype != PCICA) &&
+ (we_p->devtype != CEX2A))
rv = -EGETBUFF;
break;
case -ETIMEOUT:
@@ -1779,6 +1808,12 @@ z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = -EFAULT;
break;
+ case Z90STAT_CEX2ACOUNT:
+ tempstat = get_status_CEX2Acount();
+ if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
+ ret = -EFAULT;
+ break;
+
case Z90STAT_REQUESTQ_COUNT:
tempstat = get_status_requestq_count();
if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
@@ -2019,6 +2054,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset,
get_status_PCIXCCMCL3count());
len += sprintf(resp_buff+len, "CEX2C count: %d\n",
get_status_CEX2Ccount());
+ len += sprintf(resp_buff+len, "CEX2A count: %d\n",
+ get_status_CEX2Acount());
len += sprintf(resp_buff+len, "requestq count: %d\n",
get_status_requestq_count());
len += sprintf(resp_buff+len, "pendingq count: %d\n",
@@ -2026,8 +2063,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset,
len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
get_status_totalopen_count());
len += sprinthx(
- "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), "
- "4: PCIXCC (MCL3), 5: CEX2C",
+ "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
+ "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
resp_buff+len,
get_status_status_mask(workarea),
Z90CRYPT_NUM_APS);
@@ -2140,6 +2177,7 @@ z90crypt_status_write(struct file *file, const char __user *buffer,
case '3': // PCIXCC_MCL2
case '4': // PCIXCC_MCL3
case '5': // CEX2C
+ case '6': // CEX2A
j++;
break;
case 'd':
@@ -3007,7 +3045,9 @@ create_crypto_device(int index)
z90crypt.hdware_info->device_type_array[index] = 4;
else if (deviceType == CEX2C)
z90crypt.hdware_info->device_type_array[index] = 5;
- else
+ else if (deviceType == CEX2A)
+ z90crypt.hdware_info->device_type_array[index] = 6;
+ else // No idea how this would happen.
z90crypt.hdware_info->device_type_array[index] = -1;
}
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index a7efc394515..54885475492 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -1,5 +1,5 @@
menu "S/390 network device drivers"
- depends on NETDEVICES && ARCH_S390
+ depends on NETDEVICES && S390
config LCS
tristate "Lan Channel Station Interface"
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 6b63d21612e..e70af7f3994 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -1603,7 +1603,7 @@ dumpit(char* buf, int len)
__u32 ct, sw, rm, dup;
char *ptr, *rptr;
char tbuf[82], tdup[82];
-#if (CONFIG_ARCH_S390X)
+#if (CONFIG_64BIT)
char addr[22];
#else
char addr[12];
@@ -1619,7 +1619,7 @@ dumpit(char* buf, int len)
dup = 0;
for ( ct=0; ct < len; ct++, ptr++, rptr++ ) {
if (sw == 0) {
-#if (CONFIG_ARCH_S390X)
+#if (CONFIG_64BIT)
sprintf(addr, "%16.16lX",(unsigned long)rptr);
#else
sprintf(addr, "%8.8X",(__u32)rptr);
@@ -1634,7 +1634,7 @@ dumpit(char* buf, int len)
if (sw == 8) {
strcat(bhex, " ");
}
-#if (CONFIG_ARCH_S390X)
+#if (CONFIG_64BIT)
sprintf(tbuf,"%2.2lX", (unsigned long)*ptr);
#else
sprintf(tbuf,"%2.2X", (__u32)*ptr);
diff --git a/drivers/s390/net/ctctty.c b/drivers/s390/net/ctctty.c
index 968f2c113ef..93d1725eb79 100644
--- a/drivers/s390/net/ctctty.c
+++ b/drivers/s390/net/ctctty.c
@@ -25,6 +25,7 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/serial_reg.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -101,25 +102,17 @@ static spinlock_t ctc_tty_lock;
static int
ctc_tty_try_read(ctc_tty_info * info, struct sk_buff *skb)
{
- int c;
int len;
struct tty_struct *tty;
DBF_TEXT(trace, 5, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
- c = TTY_FLIPBUF_SIZE - tty->flip.count;
len = skb->len;
- if (c >= len) {
- memcpy(tty->flip.char_buf_ptr, skb->data, len);
- memset(tty->flip.flag_buf_ptr, 0, len);
- tty->flip.count += len;
- tty->flip.char_buf_ptr += len;
- tty->flip.flag_buf_ptr += len;
- tty_flip_buffer_push(tty);
- kfree_skb(skb);
- return 1;
- }
+ tty_insert_flip_string(tty, skb->data, len);
+ tty_flip_buffer_push(tty);
+ kfree_skb(skb);
+ return 1;
}
}
return 0;
@@ -138,19 +131,12 @@ ctc_tty_readmodem(ctc_tty_info *info)
DBF_TEXT(trace, 5, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
- int c = TTY_FLIPBUF_SIZE - tty->flip.count;
struct sk_buff *skb;
- if ((c > 0) && (skb = skb_dequeue(&info->rx_queue))) {
+ if ((skb = skb_dequeue(&info->rx_queue))) {
int len = skb->len;
- if (len > c)
- len = c;
- memcpy(tty->flip.char_buf_ptr, skb->data, len);
+ tty_insert_flip_string(tty, skb->data, len);
skb_pull(skb, len);
- memset(tty->flip.flag_buf_ptr, 0, len);
- tty->flip.count += len;
- tty->flip.char_buf_ptr += len;
- tty->flip.flag_buf_ptr += len;
tty_flip_buffer_push(tty);
if (skb->len > 0)
skb_queue_head(&info->rx_queue, skb);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index 0075894c71d..77dacb46573 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -1,5 +1,5 @@
/*
- * $Id: cu3088.c,v 1.35 2005/03/30 19:28:52 richtera Exp $
+ * $Id: cu3088.c,v 1.36 2005/10/25 14:37:17 cohuck Exp $
*
* CTC / LCS ccw_device driver
*
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/err.h>
+#include <asm/s390_rdev.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
index df7647c3c10..ea817739256 100644
--- a/drivers/s390/net/iucv.c
+++ b/drivers/s390/net/iucv.c
@@ -1,5 +1,5 @@
/*
- * $Id: iucv.c,v 1.45 2005/04/26 22:59:06 braunu Exp $
+ * $Id: iucv.c,v 1.47 2005/11/21 11:35:22 mschwide Exp $
*
* IUCV network driver
*
@@ -29,7 +29,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.45 $
+ * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.47 $
*
*/
@@ -54,7 +54,7 @@
#include <asm/s390_ext.h>
#include <asm/ebcdic.h>
#include <asm/smp.h>
-#include <asm/ccwdev.h> //for root device stuff
+#include <asm/s390_rdev.h>
/* FLAGS:
* All flags are defined in the field IPFLAGS1 of each function
@@ -355,7 +355,7 @@ do { \
static void
iucv_banner(void)
{
- char vbuf[] = "$Revision: 1.45 $";
+ char vbuf[] = "$Revision: 1.47 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
@@ -477,7 +477,7 @@ grab_param(void)
ptr++;
if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
ptr = iucv_param_pool;
- } while (atomic_compare_and_swap(0, 1, &ptr->in_use));
+ } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0);
hint = ptr - iucv_param_pool;
memset(&ptr->param, 0, sizeof(ptr->param));
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index f8f55cc468b..97f927c01a8 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -65,6 +65,7 @@
#include <asm/timex.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
+#include <asm/s390_rdev.h>
#include "qeth.h"
#include "qeth_mpc.h"
@@ -1396,7 +1397,7 @@ qeth_idx_activate_get_answer(struct qeth_channel *channel,
channel->ccw.cda = (__u32) __pa(iob->data);
wait_event(card->wait_q,
- atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
+ atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(setup, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_start(channel->ccwdev,
@@ -1463,7 +1464,7 @@ qeth_idx_activate_channel(struct qeth_channel *channel,
memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
wait_event(card->wait_q,
- atomic_compare_and_swap(0,1,&channel->irq_pending) == 0);
+ atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(setup, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_start(channel->ccwdev,
@@ -1616,7 +1617,7 @@ qeth_issue_next_read(struct qeth_card *card)
}
qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
wait_event(card->wait_q,
- atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0);
+ atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(trace, 6, "noirqpnd");
rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
(addr_t) iob, 0, 0);
@@ -1882,7 +1883,7 @@ qeth_send_control_data(struct qeth_card *card, int len,
spin_unlock_irqrestore(&card->lock, flags);
QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
wait_event(card->wait_q,
- atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
+ atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
qeth_prepare_control_data(card, len, iob);
if (IS_IPA(iob->data))
timer.expires = jiffies + QETH_IPA_TIMEOUT;
@@ -1924,7 +1925,7 @@ qeth_osn_send_control_data(struct qeth_card *card, int len,
QETH_DBF_TEXT(trace, 5, "osndctrd");
wait_event(card->wait_q,
- atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0);
+ atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
qeth_prepare_control_data(card, len, iob);
QETH_DBF_TEXT(trace, 6, "osnoirqp");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
@@ -4236,9 +4237,8 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
QETH_DBF_TEXT(trace, 6, "dosndpfa");
/* spin until we get the queue ... */
- while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED,
- &queue->state));
+ while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
/* ... now we've got the queue */
index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
@@ -4292,9 +4292,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
QETH_DBF_TEXT(trace, 6, "dosndpkt");
/* spin until we get the queue ... */
- while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED,
- &queue->state));
+ while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
start_index = queue->next_buf_to_fill;
buffer = &queue->bufs[queue->next_buf_to_fill];
/*
diff --git a/drivers/s390/s390_rdev.c b/drivers/s390/s390_rdev.c
new file mode 100644
index 00000000000..566cc3d185b
--- /dev/null
+++ b/drivers/s390/s390_rdev.c
@@ -0,0 +1,53 @@
+/*
+ * drivers/s390/s390_rdev.c
+ * s390 root device
+ * $Revision: 1.2 $
+ *
+ * Copyright (C) 2002, 2005 IBM Deutschland Entwicklung GmbH,
+ * IBM Corporation
+ * Author(s): Cornelia Huck (cohuck@de.ibm.com)
+ * Carsten Otte (cotte@de.ibm.com)
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <asm/s390_rdev.h>
+
+static void
+s390_root_dev_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+struct device *
+s390_root_dev_register(const char *name)
+{
+ struct device *dev;
+ int ret;
+
+ if (!strlen(name))
+ return ERR_PTR(-EINVAL);
+ dev = kmalloc(sizeof(struct device), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ memset(dev, 0, sizeof(struct device));
+ strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
+ dev->release = s390_root_dev_release;
+ ret = device_register(dev);
+ if (ret) {
+ kfree(dev);
+ return ERR_PTR(ret);
+ }
+ return dev;
+}
+
+void
+s390_root_dev_unregister(struct device *dev)
+{
+ if (dev)
+ device_unregister(dev);
+}
+
+EXPORT_SYMBOL(s390_root_dev_register);
+EXPORT_SYMBOL(s390_root_dev_unregister);
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 4191fd9d4d1..3bf46660351 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -23,7 +23,7 @@
static struct semaphore m_sem;
-extern int css_process_crw(int);
+extern int css_process_crw(int, int);
extern int chsc_process_crw(void);
extern int chp_process_crw(int, int);
extern void css_reiterate_subchannels(void);
@@ -49,9 +49,10 @@ s390_handle_damage(char *msg)
static int
s390_collect_crw_info(void *param)
{
- struct crw crw;
+ struct crw crw[2];
int ccode, ret, slow;
struct semaphore *sem;
+ unsigned int chain;
sem = (struct semaphore *)param;
/* Set a nice name. */
@@ -59,25 +60,50 @@ s390_collect_crw_info(void *param)
repeat:
down_interruptible(sem);
slow = 0;
+ chain = 0;
while (1) {
- ccode = stcrw(&crw);
+ if (unlikely(chain > 1)) {
+ struct crw tmp_crw;
+
+ printk(KERN_WARNING"%s: Code does not support more "
+ "than two chained crws; please report to "
+ "linux390@de.ibm.com!\n", __FUNCTION__);
+ ccode = stcrw(&tmp_crw);
+ printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
+ "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
+ __FUNCTION__, tmp_crw.slct, tmp_crw.oflw,
+ tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
+ tmp_crw.erc, tmp_crw.rsid);
+ printk(KERN_WARNING"%s: This was crw number %x in the "
+ "chain\n", __FUNCTION__, chain);
+ if (ccode != 0)
+ break;
+ chain = tmp_crw.chn ? chain + 1 : 0;
+ continue;
+ }
+ ccode = stcrw(&crw[chain]);
if (ccode != 0)
break;
DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
- crw.slct, crw.oflw, crw.chn, crw.rsc, crw.anc,
- crw.erc, crw.rsid);
+ crw[chain].slct, crw[chain].oflw, crw[chain].chn,
+ crw[chain].rsc, crw[chain].anc, crw[chain].erc,
+ crw[chain].rsid);
/* Check for overflows. */
- if (crw.oflw) {
+ if (crw[chain].oflw) {
pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
css_reiterate_subchannels();
+ chain = 0;
slow = 1;
continue;
}
- switch (crw.rsc) {
+ switch (crw[chain].rsc) {
case CRW_RSC_SCH:
- pr_debug("source is subchannel %04X\n", crw.rsid);
- ret = css_process_crw (crw.rsid);
+ if (crw[0].chn && !chain)
+ break;
+ pr_debug("source is subchannel %04X\n", crw[0].rsid);
+ ret = css_process_crw (crw[0].rsid,
+ chain ? crw[1].rsid : 0);
if (ret == -EAGAIN)
slow = 1;
break;
@@ -85,18 +111,18 @@ repeat:
pr_debug("source is monitoring facility\n");
break;
case CRW_RSC_CPATH:
- pr_debug("source is channel path %02X\n", crw.rsid);
- switch (crw.erc) {
+ pr_debug("source is channel path %02X\n", crw[0].rsid);
+ switch (crw[0].erc) {
case CRW_ERC_IPARM: /* Path has come. */
- ret = chp_process_crw(crw.rsid, 1);
+ ret = chp_process_crw(crw[0].rsid, 1);
break;
case CRW_ERC_PERRI: /* Path has gone. */
case CRW_ERC_PERRN:
- ret = chp_process_crw(crw.rsid, 0);
+ ret = chp_process_crw(crw[0].rsid, 0);
break;
default:
pr_debug("Don't know how to handle erc=%x\n",
- crw.erc);
+ crw[0].erc);
ret = 0;
}
if (ret == -EAGAIN)
@@ -115,6 +141,8 @@ repeat:
pr_debug("unknown source\n");
break;
}
+ /* chain is always 0 or 1 here. */
+ chain = crw[chain].chn ? chain + 1 : 0;
}
if (slow)
queue_work(slow_path_wq, &slow_path_work);
@@ -218,7 +246,7 @@ s390_revalidate_registers(struct mci *mci)
*/
kill_task = 1;
-#ifndef __s390x__
+#ifndef CONFIG_64BIT
asm volatile("ld 0,0(%0)\n"
"ld 2,8(%0)\n"
"ld 4,16(%0)\n"
@@ -227,7 +255,7 @@ s390_revalidate_registers(struct mci *mci)
#endif
if (MACHINE_HAS_IEEE) {
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
fpt_save_area = &S390_lowcore.floating_pt_save_area;
fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
#else
@@ -286,7 +314,7 @@ s390_revalidate_registers(struct mci *mci)
*/
s390_handle_damage("invalid control registers.");
else
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
asm volatile("lctlg 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area));
#else
@@ -299,7 +327,7 @@ s390_revalidate_registers(struct mci *mci)
* can't write something sensible into that register.
*/
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
/*
* See if we can revalidate the TOD programmable register with its
* old contents (should be zero) otherwise set it to zero.
@@ -356,7 +384,7 @@ s390_do_machine_check(struct pt_regs *regs)
if (mci->b) {
/* Processing backup -> verify if we can survive this */
u64 z_mcic, o_mcic, t_mcic;
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 87c2db1bd4f..66da840c931 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -106,7 +106,7 @@ static inline int stsi (void *sysinfo,
{
int cc, retv;
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
__asm__ __volatile__ ( "lr\t0,%2\n"
"\tlr\t1,%3\n"
"\tstsi\t0(%4)\n"
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4c42065dea8..3c606cf8c8c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -914,7 +914,7 @@ config SCSI_INIA100
config SCSI_PPA
tristate "IOMEGA parallel port (ppa - older drives)"
- depends on SCSI && PARPORT
+ depends on SCSI && PARPORT_PC
---help---
This driver supports older versions of IOMEGA's parallel port ZIP
drive (a 100 MB removable media device).
@@ -941,7 +941,7 @@ config SCSI_PPA
config SCSI_IMM
tristate "IOMEGA parallel port (imm - newer drives)"
- depends on SCSI && PARPORT
+ depends on SCSI && PARPORT_PC
---help---
This driver supports newer versions of IOMEGA's parallel port ZIP
drive (a 100 MB removable media device).
@@ -968,7 +968,7 @@ config SCSI_IMM
config SCSI_IZIP_EPP16
bool "ppa/imm option - Use slow (but safe) EPP-16"
- depends on PARPORT && (SCSI_PPA || SCSI_IMM)
+ depends on SCSI_PPA || SCSI_IMM
---help---
EPP (Enhanced Parallel Port) is a standard for parallel ports which
allows them to act as expansion buses that can handle up to 64
@@ -983,7 +983,7 @@ config SCSI_IZIP_EPP16
config SCSI_IZIP_SLOW_CTR
bool "ppa/imm option - Assume slow parport control register"
- depends on PARPORT && (SCSI_PPA || SCSI_IMM)
+ depends on SCSI_PPA || SCSI_IMM
help
Some parallel ports are known to have excessive delays between
changing the parallel port control register and good data being
@@ -1815,7 +1815,7 @@ config SCSI_SUNESP
config ZFCP
tristate "FCP host bus adapter driver for IBM eServer zSeries"
- depends on ARCH_S390 && QDIO && SCSI
+ depends on S390 && QDIO && SCSI
select SCSI_FC_ATTRS
help
If you want to access SCSI devices attached to your IBM eServer
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index f062ea0f813..6e0c059df6a 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -45,7 +45,7 @@ obj-$(CONFIG_CYBERSTORMII_SCSI) += NCR53C9x.o cyberstormII.o
obj-$(CONFIG_BLZ2060_SCSI) += NCR53C9x.o blz2060.o
obj-$(CONFIG_BLZ1230_SCSI) += NCR53C9x.o blz1230.o
obj-$(CONFIG_FASTLANE_SCSI) += NCR53C9x.o fastlane.o
-obj-$(CONFIG_OKTAGON_SCSI) += NCR53C9x.o oktagon_esp.o oktagon_io.o
+obj-$(CONFIG_OKTAGON_SCSI) += NCR53C9x.o oktagon_esp_mod.o
obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o
@@ -164,6 +164,7 @@ CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
libata-objs := libata-core.o libata-scsi.o
+oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
# Files generated that shall be removed upon make clean
clean-files := 53c7xx_d.h 53c700_d.h \
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 640590bd014..c7dd0154d01 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -1799,6 +1799,7 @@ static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
*/
int oldphase, i = 0; /* or where we left off last time ?? esp->current_data ?? */
int fifocnt = 0;
+ unsigned char *p = phys_to_virt((unsigned long)SCptr->SCp.ptr);
oldphase = esp_read(eregs->esp_status) & ESP_STAT_PMASK;
@@ -1860,7 +1861,7 @@ static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
/* read fifo */
for(j=0;j<fifocnt;j++)
- SCptr->SCp.ptr[i++] = esp_read(eregs->esp_fdata);
+ p[i++] = esp_read(eregs->esp_fdata);
ESPDATA(("(%d) ", i));
@@ -1882,7 +1883,7 @@ static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
/* fill fifo */
for(j=0;j<this_count;j++)
- esp_write(eregs->esp_fdata, SCptr->SCp.ptr[i++]);
+ esp_write(eregs->esp_fdata, p[i++]);
/* how many left if this goes out ?? */
hmuch -= this_count;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 3cb68af9045..9b9062f0246 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -46,7 +46,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
-#include <linux/ioctl32.h>
#include <linux/delay.h>
#include <linux/smp_lock.h>
#include <asm/semaphore.h>
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 887eaa2a3eb..d113290b5fc 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -214,7 +214,6 @@ static struct scsi_host_template ahci_sht = {
.dma_boundary = AHCI_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
};
static const struct ata_port_operations ahci_ops = {
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 09ed05727bc..dda5a5f79c5 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -146,7 +146,6 @@
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/ecard.h>
#include "../scsi.h"
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 804125e35fc..a2894015670 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -33,7 +33,6 @@
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/ecard.h>
#include "../scsi.h"
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 81e266be26d..e6c9491dc5c 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -13,7 +13,6 @@
#include <asm/ecard.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/system.h>
#include "../scsi.h"
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 3a7a46b0dc4..583d2d8c833 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -31,7 +31,6 @@
#include <asm/dma.h>
#include <asm/ecard.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/pgtable.h>
#include "../scsi.h"
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 4d1e8f52c92..3ffec7efc9d 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -35,7 +35,6 @@
#include <linux/dma-mapping.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/dma.h>
#include <asm/ecard.h>
#include <asm/pgtable.h>
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 3333d7b3913..3113bdcedb1 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -22,7 +22,6 @@
#include <asm/dma.h>
#include <asm/ecard.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/pgtable.h>
#include "../scsi.h"
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 0ea27873b9f..557788ec4ee 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -166,6 +166,8 @@ static struct pci_driver piix_pci_driver = {
.id_table = piix_pci_tbl,
.probe = piix_init_one,
.remove = ata_pci_remove_one,
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
};
static struct scsi_host_template piix_sht = {
@@ -185,7 +187,8 @@ static struct scsi_host_template piix_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
+ .resume = ata_scsi_device_resume,
+ .suspend = ata_scsi_device_suspend,
};
static const struct ata_port_operations piix_pata_ops = {
diff --git a/drivers/scsi/blz1230.c b/drivers/scsi/blz1230.c
index 763e409a1ff..3867ac2de4c 100644
--- a/drivers/scsi/blz1230.c
+++ b/drivers/scsi/blz1230.c
@@ -224,7 +224,7 @@ static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
static void dma_dump_state(struct NCR_ESP *esp)
{
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
- custom.intreqr, custom.intenar));
+ amiga_custom.intreqr, amiga_custom.intenar));
}
void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -298,7 +298,7 @@ static int dma_irq_p(struct NCR_ESP *esp)
static int dma_ports_p(struct NCR_ESP *esp)
{
- return ((custom.intenar) & IF_PORTS);
+ return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/blz2060.c b/drivers/scsi/blz2060.c
index d72d05fffdf..4ebe69e3275 100644
--- a/drivers/scsi/blz2060.c
+++ b/drivers/scsi/blz2060.c
@@ -190,7 +190,7 @@ static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
static void dma_dump_state(struct NCR_ESP *esp)
{
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
- custom.intreqr, custom.intenar));
+ amiga_custom.intreqr, amiga_custom.intenar));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -251,7 +251,7 @@ static void dma_led_on(struct NCR_ESP *esp)
static int dma_ports_p(struct NCR_ESP *esp)
{
- return ((custom.intenar) & IF_PORTS);
+ return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 0920220f331..4299fabca55 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -20,7 +20,6 @@
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
-#include <linux/ioctl32.h>
#include <linux/compat.h>
#include <linux/chio.h> /* here are all the ioctls */
diff --git a/drivers/scsi/cyberstorm.c b/drivers/scsi/cyberstorm.c
index f9b940e5643..a4a4fac5c0a 100644
--- a/drivers/scsi/cyberstorm.c
+++ b/drivers/scsi/cyberstorm.c
@@ -223,7 +223,7 @@ static void dma_dump_state(struct NCR_ESP *esp)
esp->esp_id, ((struct cyber_dma_registers *)
(esp->dregs))->cond_reg));
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
- custom.intreqr, custom.intenar));
+ amiga_custom.intreqr, amiga_custom.intenar));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -322,7 +322,7 @@ static void dma_led_on(struct NCR_ESP *esp)
static int dma_ports_p(struct NCR_ESP *esp)
{
- return ((custom.intenar) & IF_PORTS);
+ return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/cyberstormII.c b/drivers/scsi/cyberstormII.c
index a3caabfd755..3a803d73bc5 100644
--- a/drivers/scsi/cyberstormII.c
+++ b/drivers/scsi/cyberstormII.c
@@ -200,7 +200,7 @@ static void dma_dump_state(struct NCR_ESP *esp)
esp->esp_id, ((struct cyberII_dma_registers *)
(esp->dregs))->cond_reg));
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
- custom.intreqr, custom.intenar));
+ amiga_custom.intreqr, amiga_custom.intenar));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -259,7 +259,7 @@ static void dma_led_on(struct NCR_ESP *esp)
static int dma_ports_p(struct NCR_ESP *esp)
{
- return ((custom.intenar) & IF_PORTS);
+ return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/fastlane.c b/drivers/scsi/fastlane.c
index ccee68b52f7..8ae9c406a83 100644
--- a/drivers/scsi/fastlane.c
+++ b/drivers/scsi/fastlane.c
@@ -268,7 +268,7 @@ static void dma_dump_state(struct NCR_ESP *esp)
esp->esp_id, ((struct fastlane_dma_registers *)
(esp->dregs))->cond_reg));
ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
- custom.intreqr, custom.intenar));
+ amiga_custom.intreqr, amiga_custom.intenar));
}
static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -368,7 +368,7 @@ static void dma_led_on(struct NCR_ESP *esp)
static int dma_ports_p(struct NCR_ESP *esp)
{
- return ((custom.intenar) & IF_PORTS);
+ return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 5b9c2c5a7f0..66783c860a1 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -347,17 +347,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->cmd_per_lun = sht->cmd_per_lun;
shost->unchecked_isa_dma = sht->unchecked_isa_dma;
shost->use_clustering = sht->use_clustering;
- shost->ordered_flush = sht->ordered_flush;
shost->ordered_tag = sht->ordered_tag;
- /*
- * hosts/devices that do queueing must support ordered tags
- */
- if (shost->can_queue > 1 && shost->ordered_flush) {
- printk(KERN_ERR "scsi: ordered flushes don't support queueing\n");
- shost->ordered_flush = 0;
- }
-
if (sht->max_host_blocked)
shost->max_host_blocked = sht->max_host_blocked;
else
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 4cb1f3ed910..3c688ef5466 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -1046,7 +1046,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
/* kill current request */
blkdev_dequeue_request(req);
- end_that_request_last(req);
+ end_that_request_last(req, 0);
if (req->flags & REQ_SENSE)
kfree(scsi->pc->buffer);
kfree(scsi->pc);
@@ -1056,7 +1056,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
/* now nuke the drive queue */
while ((req = elv_next_request(drive->queue))) {
blkdev_dequeue_request(req);
- end_that_request_last(req);
+ end_that_request_last(req, 0);
}
HWGROUP(drive)->rq = NULL;
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 9ea10258791..f55b9b3f7b3 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -562,16 +562,28 @@ static const u8 ata_rw_cmds[] = {
ATA_CMD_WRITE_MULTI,
ATA_CMD_READ_MULTI_EXT,
ATA_CMD_WRITE_MULTI_EXT,
+ 0,
+ 0,
+ 0,
+ ATA_CMD_WRITE_MULTI_FUA_EXT,
/* pio */
ATA_CMD_PIO_READ,
ATA_CMD_PIO_WRITE,
ATA_CMD_PIO_READ_EXT,
ATA_CMD_PIO_WRITE_EXT,
+ 0,
+ 0,
+ 0,
+ 0,
/* dma */
ATA_CMD_READ,
ATA_CMD_WRITE,
ATA_CMD_READ_EXT,
- ATA_CMD_WRITE_EXT
+ ATA_CMD_WRITE_EXT,
+ 0,
+ 0,
+ 0,
+ ATA_CMD_WRITE_FUA_EXT
};
/**
@@ -584,25 +596,32 @@ static const u8 ata_rw_cmds[] = {
* LOCKING:
* caller.
*/
-void ata_rwcmd_protocol(struct ata_queued_cmd *qc)
+int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
+ u8 cmd;
- int index, lba48, write;
+ int index, fua, lba48, write;
+ fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
if (dev->flags & ATA_DFLAG_PIO) {
tf->protocol = ATA_PROT_PIO;
- index = dev->multi_count ? 0 : 4;
+ index = dev->multi_count ? 0 : 8;
} else {
tf->protocol = ATA_PROT_DMA;
- index = 8;
+ index = 16;
}
- tf->command = ata_rw_cmds[index + lba48 + write];
+ cmd = ata_rw_cmds[index + fua + lba48 + write];
+ if (cmd) {
+ tf->command = cmd;
+ return 0;
+ }
+ return -1;
}
static const char * const xfer_mode_str[] = {
@@ -4154,6 +4173,96 @@ err_out:
* Inherited from caller.
*/
+/*
+ * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
+ * without filling any other registers
+ */
+static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
+ u8 cmd)
+{
+ struct ata_taskfile tf;
+ int err;
+
+ ata_tf_init(ap, &tf, dev->devno);
+
+ tf.command = cmd;
+ tf.flags |= ATA_TFLAG_DEVICE;
+ tf.protocol = ATA_PROT_NODATA;
+
+ err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
+ if (err)
+ printk(KERN_ERR "%s: ata command failed: %d\n",
+ __FUNCTION__, err);
+
+ return err;
+}
+
+static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
+{
+ u8 cmd;
+
+ if (!ata_try_flush_cache(dev))
+ return 0;
+
+ if (ata_id_has_flush_ext(dev->id))
+ cmd = ATA_CMD_FLUSH_EXT;
+ else
+ cmd = ATA_CMD_FLUSH;
+
+ return ata_do_simple_cmd(ap, dev, cmd);
+}
+
+static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
+{
+ return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
+}
+
+static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
+{
+ return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
+}
+
+/**
+ * ata_device_resume - wakeup a previously suspended devices
+ *
+ * Kick the drive back into action, by sending it an idle immediate
+ * command and making sure its transfer mode matches between drive
+ * and host.
+ *
+ */
+int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
+{
+ if (ap->flags & ATA_FLAG_SUSPENDED) {
+ ap->flags &= ~ATA_FLAG_SUSPENDED;
+ ata_set_mode(ap);
+ }
+ if (!ata_dev_present(dev))
+ return 0;
+ if (dev->class == ATA_DEV_ATA)
+ ata_start_drive(ap, dev);
+
+ return 0;
+}
+
+/**
+ * ata_device_suspend - prepare a device for suspend
+ *
+ * Flush the cache on the drive, if appropriate, then issue a
+ * standbynow command.
+ *
+ */
+int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
+{
+ if (!ata_dev_present(dev))
+ return 0;
+ if (dev->class == ATA_DEV_ATA)
+ ata_flush_cache(ap, dev);
+
+ ata_standby_drive(ap, dev);
+ ap->flags |= ATA_FLAG_SUSPENDED;
+ return 0;
+}
+
int ata_port_start (struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
@@ -4902,6 +5011,23 @@ int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
return (tmp == bits->val) ? 1 : 0;
}
+
+int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+int ata_pci_device_resume(struct pci_dev *pdev)
+{
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_device(pdev);
+ pci_set_master(pdev);
+ return 0;
+}
#endif /* CONFIG_PCI */
@@ -5005,4 +5131,11 @@ EXPORT_SYMBOL_GPL(ata_pci_host_stop);
EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
EXPORT_SYMBOL_GPL(ata_pci_init_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
+EXPORT_SYMBOL_GPL(ata_pci_device_resume);
#endif /* CONFIG_PCI */
+
+EXPORT_SYMBOL_GPL(ata_device_suspend);
+EXPORT_SYMBOL_GPL(ata_device_resume);
+EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
+EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index e0439be4b57..cfbceb50471 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -396,6 +396,22 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
}
}
+int ata_scsi_device_resume(struct scsi_device *sdev)
+{
+ struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
+ struct ata_device *dev = &ap->device[sdev->id];
+
+ return ata_device_resume(ap, dev);
+}
+
+int ata_scsi_device_suspend(struct scsi_device *sdev)
+{
+ struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
+ struct ata_device *dev = &ap->device[sdev->id];
+
+ return ata_device_suspend(ap, dev);
+}
+
/**
* ata_to_sense_error - convert ATA error to SCSI error
* @id: ATA device number
@@ -1080,11 +1096,13 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
scsicmd[0] == WRITE_16)
tf->flags |= ATA_TFLAG_WRITE;
- /* Calculate the SCSI LBA and transfer length. */
+ /* Calculate the SCSI LBA, transfer length and FUA. */
switch (scsicmd[0]) {
case READ_10:
case WRITE_10:
scsi_10_lba_len(scsicmd, &block, &n_block);
+ if (unlikely(scsicmd[1] & (1 << 3)))
+ tf->flags |= ATA_TFLAG_FUA;
break;
case READ_6:
case WRITE_6:
@@ -1099,6 +1117,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
case READ_16:
case WRITE_16:
scsi_16_lba_len(scsicmd, &block, &n_block);
+ if (unlikely(scsicmd[1] & (1 << 3)))
+ tf->flags |= ATA_TFLAG_FUA;
break;
default:
DPRINTK("no-byte command\n");
@@ -1142,7 +1162,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
tf->device |= (block >> 24) & 0xf;
}
- ata_rwcmd_protocol(qc);
+ if (unlikely(ata_rwcmd_protocol(qc) < 0))
+ goto invalid_fld;
qc->nsect = n_block;
tf->nsect = n_block & 0xff;
@@ -1160,7 +1181,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
if ((block >> 28) || (n_block > 256))
goto out_of_range;
- ata_rwcmd_protocol(qc);
+ if (unlikely(ata_rwcmd_protocol(qc) < 0))
+ goto invalid_fld;
/* Convert LBA to CHS */
track = (u32)block / dev->sectors;
@@ -1695,6 +1717,7 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
unsigned int buflen)
{
+ struct ata_device *dev = args->dev;
u8 *scsicmd = args->cmd->cmnd, *p, *last;
const u8 sat_blk_desc[] = {
0, 0, 0, 0, /* number of blocks: sat unspecified */
@@ -1703,6 +1726,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
};
u8 pg, spg;
unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen;
+ u8 dpofua;
VPRINTK("ENTER\n");
@@ -1771,9 +1795,17 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
if (minlen < 1)
return 0;
+
+ dpofua = 0;
+ if (ata_id_has_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 &&
+ (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
+ dpofua = 1 << 4;
+
if (six_byte) {
output_len--;
rbuf[0] = output_len;
+ if (minlen > 2)
+ rbuf[2] |= dpofua;
if (ebd) {
if (minlen > 3)
rbuf[3] = sizeof(sat_blk_desc);
@@ -1786,6 +1818,8 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
rbuf[0] = output_len >> 8;
if (minlen > 1)
rbuf[1] = output_len;
+ if (minlen > 3)
+ rbuf[3] |= dpofua;
if (ebd) {
if (minlen > 7)
rbuf[7] = sizeof(sat_blk_desc);
@@ -2446,7 +2480,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
if (xlat_func)
ata_scsi_translate(ap, dev, cmd, done, xlat_func);
else
- ata_scsi_simulate(dev->id, cmd, done);
+ ata_scsi_simulate(ap, dev, cmd, done);
} else
ata_scsi_translate(ap, dev, cmd, done, atapi_xlat);
@@ -2469,14 +2503,16 @@ out_unlock:
* spin_lock_irqsave(host_set lock)
*/
-void ata_scsi_simulate(u16 *id,
+void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
struct ata_scsi_args args;
const u8 *scsicmd = cmd->cmnd;
- args.id = id;
+ args.ap = ap;
+ args.dev = dev;
+ args.id = dev->id;
args.cmd = cmd;
args.done = done;
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 251e53bdc6e..e03ce48b7b4 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -32,6 +32,8 @@
#define DRV_VERSION "1.20" /* must be exactly four chars */
struct ata_scsi_args {
+ struct ata_port *ap;
+ struct ata_device *dev;
u16 *id;
struct scsi_cmnd *cmd;
void (*done)(struct scsi_cmnd *);
@@ -41,7 +43,7 @@ struct ata_scsi_args {
extern int atapi_enabled;
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
struct ata_device *dev);
-extern void ata_rwcmd_protocol(struct ata_queued_cmd *qc);
+extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
extern void ata_qc_free(struct ata_queued_cmd *qc);
extern int ata_qc_issue(struct ata_queued_cmd *qc);
extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 932dcf0366e..311a4122bd7 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -432,11 +432,12 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
struct Scsi_Host *host;
void *dma_cmd_space;
unsigned char *clkprop;
- int proplen;
+ int proplen, rc = -ENODEV;
if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
- printk(KERN_ERR "mac53c94: expected 2 addrs and intrs (got %d/%d)\n",
- node->n_addrs, node->n_intrs);
+ printk(KERN_ERR "mac53c94: expected 2 addrs and intrs"
+ " (got %d/%d)\n",
+ macio_resource_count(mdev), macio_irq_count(mdev));
return -ENODEV;
}
@@ -448,6 +449,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
host = scsi_host_alloc(&mac53c94_template, sizeof(struct fsc_state));
if (host == NULL) {
printk(KERN_ERR "mac53c94: couldn't register host");
+ rc = -ENOMEM;
goto out_release;
}
@@ -486,6 +488,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
if (dma_cmd_space == 0) {
printk(KERN_ERR "mac53c94: couldn't allocate dma "
"command space for %s\n", node->full_name);
+ rc = -ENOMEM;
goto out_free;
}
state->dma_cmds = (struct dbdma_cmd *)DBDMA_ALIGN(dma_cmd_space);
@@ -495,18 +498,21 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
mac53c94_init(state);
- if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94", state)) {
+ if (request_irq(state->intr, do_mac53c94_interrupt, 0, "53C94",state)) {
printk(KERN_ERR "mac53C94: can't get irq %d for %s\n",
state->intr, node->full_name);
goto out_free_dma;
}
- /* XXX FIXME: handle failure */
- scsi_add_host(host, &mdev->ofdev.dev);
- scsi_scan_host(host);
+ rc = scsi_add_host(host, &mdev->ofdev.dev);
+ if (rc != 0)
+ goto out_release_irq;
+ scsi_scan_host(host);
return 0;
+ out_release_irq:
+ free_irq(state->intr, state);
out_free_dma:
kfree(state->dma_cmd_space);
out_free:
@@ -518,7 +524,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat
out_release:
macio_release_resources(mdev);
- return -ENODEV;
+ return rc;
}
static int mac53c94_remove(struct macio_dev *mdev)
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index eb8c390a0fa..3d9e67d6849 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -22,7 +22,6 @@
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/list.h>
-#include <linux/ioctl32.h>
#include "mbox_defs.h"
#include "megaraid_ioctl.h"
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index bdccf73cf9f..d6d2125f904 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1869,7 +1869,8 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) {
printk(KERN_ERR "mesh: expected 2 addrs and 2 intrs"
- " (got %d,%d)\n", mesh->n_addrs, mesh->n_intrs);
+ " (got %d,%d)\n", macio_resource_count(mdev),
+ macio_irq_count(mdev));
return -ENODEV;
}
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
index 5d9c9ada814..dee426f8c07 100644
--- a/drivers/scsi/oktagon_esp.c
+++ b/drivers/scsi/oktagon_esp.c
@@ -490,7 +490,7 @@ static void dma_led_on(struct NCR_ESP *esp)
static int dma_ports_p(struct NCR_ESP *esp)
{
- return ((custom.intenar) & IF_PORTS);
+ return ((amiga_custom.intenar) & IF_PORTS);
}
static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index b2bf16a9bf4..cd54244058b 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -374,7 +374,6 @@ static struct scsi_host_template mv_sht = {
.dma_boundary = MV_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
};
static const struct ata_port_operations mv5_ops = {
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 4954896dfdb..bbbb55eeb73 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -29,6 +29,12 @@
* NV-specific details such as register offsets, SATA phy location,
* hotplug info, etc.
*
+ * 0.10
+ * - Fixed spurious interrupts issue seen with the Maxtor 6H500F0 500GB
+ * drive. Also made the check_hotplug() callbacks return whether there
+ * was a hotplug interrupt or not. This was not the source of the
+ * spurious interrupts, but is the right thing to do anyway.
+ *
* 0.09
* - Fixed bug introduced by 0.08's MCP51 and MCP55 support.
*
@@ -124,10 +130,10 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void nv_host_stop (struct ata_host_set *host_set);
static void nv_enable_hotplug(struct ata_probe_ent *probe_ent);
static void nv_disable_hotplug(struct ata_host_set *host_set);
-static void nv_check_hotplug(struct ata_host_set *host_set);
+static int nv_check_hotplug(struct ata_host_set *host_set);
static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent);
static void nv_disable_hotplug_ck804(struct ata_host_set *host_set);
-static void nv_check_hotplug_ck804(struct ata_host_set *host_set);
+static int nv_check_hotplug_ck804(struct ata_host_set *host_set);
enum nv_host_type
{
@@ -176,7 +182,7 @@ struct nv_host_desc
enum nv_host_type host_type;
void (*enable_hotplug)(struct ata_probe_ent *probe_ent);
void (*disable_hotplug)(struct ata_host_set *host_set);
- void (*check_hotplug)(struct ata_host_set *host_set);
+ int (*check_hotplug)(struct ata_host_set *host_set);
};
static struct nv_host_desc nv_device_tbl[] = {
@@ -235,7 +241,6 @@ static struct scsi_host_template nv_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
};
static const struct ata_port_operations nv_ops = {
@@ -310,12 +315,16 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && (!(qc->tf.ctl & ATA_NIEN)))
handled += ata_host_intr(ap, qc);
+ else
+ // No request pending? Clear interrupt status
+ // anyway, in case there's one pending.
+ ap->ops->check_status(ap);
}
}
if (host->host_desc->check_hotplug)
- host->host_desc->check_hotplug(host_set);
+ handled += host->host_desc->check_hotplug(host_set);
spin_unlock_irqrestore(&host_set->lock, flags);
@@ -498,7 +507,7 @@ static void nv_disable_hotplug(struct ata_host_set *host_set)
outb(intr_mask, host_set->ports[0]->ioaddr.scr_addr + NV_INT_ENABLE);
}
-static void nv_check_hotplug(struct ata_host_set *host_set)
+static int nv_check_hotplug(struct ata_host_set *host_set)
{
u8 intr_status;
@@ -523,7 +532,11 @@ static void nv_check_hotplug(struct ata_host_set *host_set)
if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
printk(KERN_WARNING "nv_sata: "
"Secondary device removed\n");
+
+ return 1;
}
+
+ return 0;
}
static void nv_enable_hotplug_ck804(struct ata_probe_ent *probe_ent)
@@ -561,7 +574,7 @@ static void nv_disable_hotplug_ck804(struct ata_host_set *host_set)
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
}
-static void nv_check_hotplug_ck804(struct ata_host_set *host_set)
+static int nv_check_hotplug_ck804(struct ata_host_set *host_set)
{
u8 intr_status;
@@ -586,7 +599,11 @@ static void nv_check_hotplug_ck804(struct ata_host_set *host_set)
if (intr_status & NV_INT_STATUS_SDEV_REMOVED)
printk(KERN_WARNING "nv_sata: "
"Secondary device removed\n");
+
+ return 1;
}
+
+ return 0;
}
static int __init nv_init(void)
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index da7fa04b8a7..3d1ea09a06a 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -114,7 +114,6 @@ static struct scsi_host_template pdc_ata_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
};
static const struct ata_port_operations pdc_sata_ops = {
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index d2053487c73..b017f85e6d6 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -147,7 +147,6 @@ static struct scsi_host_template sil_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
};
static const struct ata_port_operations sil_ops = {
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index a0ad3ed2200..923130185a9 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -292,7 +292,6 @@ static struct scsi_host_template sil24_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1, /* NCQ not supported yet */
};
static const struct ata_port_operations sil24_ops = {
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 32e12620b16..2df8c5632ac 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -99,7 +99,6 @@ static struct scsi_host_template sis_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
};
static const struct ata_port_operations sis_ops = {
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 6e7f7c83a75..668373590aa 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -303,7 +303,6 @@ static struct scsi_host_template k2_sata_sht = {
.proc_info = k2_sata_proc_info,
#endif
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
};
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 94b253b80da..bc87c16c80d 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -194,7 +194,6 @@ static struct scsi_host_template pdc_sata_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
};
static const struct ata_port_operations pdc_20621_ops = {
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index b2422a0f25c..9635ca70097 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -87,7 +87,6 @@ static struct scsi_host_template uli_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
};
static const struct ata_port_operations uli_ops = {
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index c76215692da..6d5b0a794cf 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -106,7 +106,6 @@ static struct scsi_host_template svia_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
};
static const struct ata_port_operations svia_sata_ops = {
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index fcfa486965b..2e2c3b7acb0 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -235,7 +235,6 @@ static struct scsi_host_template vsc_sata_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
- .ordered_flush = 1,
};
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 180676d7115..ee5f4dfdab1 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -69,7 +69,6 @@
#include "scsi_logging.h"
static void scsi_done(struct scsi_cmnd *cmd);
-static int scsi_retry_command(struct scsi_cmnd *cmd);
/*
* Definitions and constants.
@@ -752,7 +751,7 @@ static void scsi_done(struct scsi_cmnd *cmd)
* isn't running --- used by scsi_times_out */
void __scsi_done(struct scsi_cmnd *cmd)
{
- unsigned long flags;
+ struct request *rq = cmd->request;
/*
* Set the serial numbers back to zero
@@ -763,71 +762,14 @@ void __scsi_done(struct scsi_cmnd *cmd)
if (cmd->result)
atomic_inc(&cmd->device->ioerr_cnt);
+ BUG_ON(!rq);
+
/*
- * Next, enqueue the command into the done queue.
- * It is a per-CPU queue, so we just disable local interrupts
- * and need no spinlock.
+ * The uptodate/nbytes values don't matter, as we allow partial
+ * completes and thus will check this in the softirq callback
*/
- local_irq_save(flags);
- list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q));
- raise_softirq_irqoff(SCSI_SOFTIRQ);
- local_irq_restore(flags);
-}
-
-/**
- * scsi_softirq - Perform post-interrupt processing of finished SCSI commands.
- *
- * This is the consumer of the done queue.
- *
- * This is called with all interrupts enabled. This should reduce
- * interrupt latency, stack depth, and reentrancy of the low-level
- * drivers.
- */
-static void scsi_softirq(struct softirq_action *h)
-{
- int disposition;
- LIST_HEAD(local_q);
-
- local_irq_disable();
- list_splice_init(&__get_cpu_var(scsi_done_q), &local_q);
- local_irq_enable();
-
- while (!list_empty(&local_q)) {
- struct scsi_cmnd *cmd = list_entry(local_q.next,
- struct scsi_cmnd, eh_entry);
- /* The longest time any command should be outstanding is the
- * per command timeout multiplied by the number of retries.
- *
- * For a typical command, this is 2.5 minutes */
- unsigned long wait_for
- = cmd->allowed * cmd->timeout_per_command;
- list_del_init(&cmd->eh_entry);
-
- disposition = scsi_decide_disposition(cmd);
- if (disposition != SUCCESS &&
- time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
- sdev_printk(KERN_ERR, cmd->device,
- "timing out command, waited %lus\n",
- wait_for/HZ);
- disposition = SUCCESS;
- }
-
- scsi_log_completion(cmd, disposition);
- switch (disposition) {
- case SUCCESS:
- scsi_finish_command(cmd);
- break;
- case NEEDS_RETRY:
- scsi_retry_command(cmd);
- break;
- case ADD_TO_MLQUEUE:
- scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
- break;
- default:
- if (!scsi_eh_scmd_add(cmd, 0))
- scsi_finish_command(cmd);
- }
- }
+ rq->completion_data = cmd;
+ blk_complete_request(rq);
}
/*
@@ -840,7 +782,7 @@ static void scsi_softirq(struct softirq_action *h)
* level drivers should not become re-entrant as a result of
* this.
*/
-static int scsi_retry_command(struct scsi_cmnd *cmd)
+int scsi_retry_command(struct scsi_cmnd *cmd)
{
/*
* Restore the SCSI command state.
@@ -1273,38 +1215,6 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery)
}
EXPORT_SYMBOL(scsi_device_cancel);
-#ifdef CONFIG_HOTPLUG_CPU
-static int scsi_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- int cpu = (unsigned long)hcpu;
-
- switch(action) {
- case CPU_DEAD:
- /* Drain scsi_done_q. */
- local_irq_disable();
- list_splice_init(&per_cpu(scsi_done_q, cpu),
- &__get_cpu_var(scsi_done_q));
- raise_softirq_irqoff(SCSI_SOFTIRQ);
- local_irq_enable();
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block __devinitdata scsi_cpu_nb = {
- .notifier_call = scsi_cpu_notify,
-};
-
-#define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb)
-#define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb)
-#else
-#define register_scsi_cpu()
-#define unregister_scsi_cpu()
-#endif /* CONFIG_HOTPLUG_CPU */
-
MODULE_DESCRIPTION("SCSI core");
MODULE_LICENSE("GPL");
@@ -1338,8 +1248,6 @@ static int __init init_scsi(void)
INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
devfs_mk_dir("scsi");
- open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL);
- register_scsi_cpu();
printk(KERN_NOTICE "SCSI subsystem initialized\n");
return 0;
@@ -1367,7 +1275,6 @@ static void __exit exit_scsi(void)
devfs_remove("scsi");
scsi_exit_procfs();
scsi_exit_queue();
- unregister_scsi_cpu();
}
subsys_initcall(init_scsi);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a7f3f0c84db..00c9bf383e2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -308,7 +308,7 @@ struct scsi_io_context {
static kmem_cache_t *scsi_io_context_cache;
-static void scsi_end_async(struct request *req)
+static void scsi_end_async(struct request *req, int uptodate)
{
struct scsi_io_context *sioc = req->end_io_data;
@@ -791,7 +791,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
spin_lock_irqsave(q->queue_lock, flags);
if (blk_rq_tagged(req))
blk_queue_end_tag(q, req);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
spin_unlock_irqrestore(q->queue_lock, flags);
/*
@@ -932,9 +932,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
int sense_valid = 0;
int sense_deferred = 0;
- if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
- return;
-
/*
* Free up any indirection buffers we allocated for DMA purposes.
* For the case of a READ, we need to copy the data out of the
@@ -1199,38 +1196,6 @@ static int scsi_init_io(struct scsi_cmnd *cmd)
return BLKPREP_KILL;
}
-static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
-{
- struct scsi_device *sdev = q->queuedata;
- struct scsi_driver *drv;
-
- if (sdev->sdev_state == SDEV_RUNNING) {
- drv = *(struct scsi_driver **) rq->rq_disk->private_data;
-
- if (drv->prepare_flush)
- return drv->prepare_flush(q, rq);
- }
-
- return 0;
-}
-
-static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
-{
- struct scsi_device *sdev = q->queuedata;
- struct request *flush_rq = rq->end_io_data;
- struct scsi_driver *drv;
-
- if (flush_rq->errors) {
- printk("scsi: barrier error, disabling flush support\n");
- blk_queue_ordered(q, QUEUE_ORDERED_NONE);
- }
-
- if (sdev->sdev_state == SDEV_RUNNING) {
- drv = *(struct scsi_driver **) rq->rq_disk->private_data;
- drv->end_flush(q, rq);
- }
-}
-
static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
sector_t *error_sector)
{
@@ -1528,6 +1493,41 @@ static void scsi_kill_request(struct request *req, request_queue_t *q)
__scsi_done(cmd);
}
+static void scsi_softirq_done(struct request *rq)
+{
+ struct scsi_cmnd *cmd = rq->completion_data;
+ unsigned long wait_for = cmd->allowed * cmd->timeout_per_command;
+ int disposition;
+
+ INIT_LIST_HEAD(&cmd->eh_entry);
+
+ disposition = scsi_decide_disposition(cmd);
+ if (disposition != SUCCESS &&
+ time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+ sdev_printk(KERN_ERR, cmd->device,
+ "timing out command, waited %lus\n",
+ wait_for/HZ);
+ disposition = SUCCESS;
+ }
+
+ scsi_log_completion(cmd, disposition);
+
+ switch (disposition) {
+ case SUCCESS:
+ scsi_finish_command(cmd);
+ break;
+ case NEEDS_RETRY:
+ scsi_retry_command(cmd);
+ break;
+ case ADD_TO_MLQUEUE:
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
+ break;
+ default:
+ if (!scsi_eh_scmd_add(cmd, 0))
+ scsi_finish_command(cmd);
+ }
+}
+
/*
* Function: scsi_request_fn()
*
@@ -1702,17 +1702,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
blk_queue_segment_boundary(q, shost->dma_boundary);
blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
-
- /*
- * ordered tags are superior to flush ordering
- */
- if (shost->ordered_tag)
- blk_queue_ordered(q, QUEUE_ORDERED_TAG);
- else if (shost->ordered_flush) {
- blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
- q->prepare_flush_fn = scsi_prepare_flush_fn;
- q->end_flush_fn = scsi_end_flush_fn;
- }
+ blk_queue_softirq_done(q, scsi_softirq_done);
if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index f04e7e11f57..14a6198cb8d 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -44,6 +44,7 @@ extern void scsi_init_cmd_from_req(struct scsi_cmnd *cmd,
struct scsi_request *sreq);
extern void __scsi_release_request(struct scsi_request *sreq);
extern void __scsi_done(struct scsi_cmnd *cmd);
+extern int scsi_retry_command(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_LOGGING
void scsi_log_send(struct scsi_cmnd *cmd);
void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 15842b1f0f4..ea7f3a43357 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -263,9 +263,40 @@ static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
}
+static int scsi_bus_suspend(struct device * dev, pm_message_t state)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_host_template *sht = sdev->host->hostt;
+ int err;
+
+ err = scsi_device_quiesce(sdev);
+ if (err)
+ return err;
+
+ if (sht->suspend)
+ err = sht->suspend(sdev);
+
+ return err;
+}
+
+static int scsi_bus_resume(struct device * dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct scsi_host_template *sht = sdev->host->hostt;
+ int err = 0;
+
+ if (sht->resume)
+ err = sht->resume(sdev);
+
+ scsi_device_resume(sdev);
+ return err;
+}
+
struct bus_type scsi_bus_type = {
.name = "scsi",
.match = scsi_bus_match,
+ .suspend = scsi_bus_suspend,
+ .resume = scsi_bus_resume,
};
int scsi_sysfs_register(void)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3d3ad7d1b77..4c5127ed379 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -102,6 +102,7 @@ struct scsi_disk {
u8 write_prot;
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */
+ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
};
static DEFINE_IDR(sd_index_idr);
@@ -121,8 +122,7 @@ static void sd_shutdown(struct device *dev);
static void sd_rescan(struct device *);
static int sd_init_command(struct scsi_cmnd *);
static int sd_issue_flush(struct device *, sector_t *);
-static void sd_end_flush(request_queue_t *, struct request *);
-static int sd_prepare_flush(request_queue_t *, struct request *);
+static void sd_prepare_flush(request_queue_t *, struct request *);
static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
unsigned char *buffer);
@@ -137,8 +137,6 @@ static struct scsi_driver sd_template = {
.rescan = sd_rescan,
.init_command = sd_init_command,
.issue_flush = sd_issue_flush,
- .prepare_flush = sd_prepare_flush,
- .end_flush = sd_end_flush,
};
/*
@@ -346,6 +344,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
if (block > 0xffffffff) {
SCpnt->cmnd[0] += READ_16 - READ_6;
+ SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
@@ -365,6 +364,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
this_count = 0xffff;
SCpnt->cmnd[0] += READ_10 - READ_6;
+ SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
@@ -373,6 +373,17 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
} else {
+ if (unlikely(blk_fua_rq(rq))) {
+ /*
+ * This happens only if this drive failed
+ * 10byte rw command with ILLEGAL_REQUEST
+ * during operation and thus turned off
+ * use_10_for_rw.
+ */
+ printk(KERN_ERR "sd: FUA write on READ/WRITE(6) drive\n");
+ return 0;
+ }
+
SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff);
SCpnt->cmnd[3] = (unsigned char) block & 0xff;
@@ -516,7 +527,7 @@ static int sd_release(struct inode *inode, struct file *filp)
return 0;
}
-static int sd_hdio_getgeo(struct block_device *bdev, struct hd_geometry __user *loc)
+static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
struct scsi_device *sdp = sdkp->device;
@@ -534,15 +545,9 @@ static int sd_hdio_getgeo(struct block_device *bdev, struct hd_geometry __user *
else
scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
- if (put_user(diskinfo[0], &loc->heads))
- return -EFAULT;
- if (put_user(diskinfo[1], &loc->sectors))
- return -EFAULT;
- if (put_user(diskinfo[2], &loc->cylinders))
- return -EFAULT;
- if (put_user((unsigned)get_start_sect(bdev),
- (unsigned long __user *)&loc->start))
- return -EFAULT;
+ geo->heads = diskinfo[0];
+ geo->sectors = diskinfo[1];
+ geo->cylinders = diskinfo[2];
return 0;
}
@@ -582,12 +587,6 @@ static int sd_ioctl(struct inode * inode, struct file * filp,
if (!scsi_block_when_processing_errors(sdp) || !error)
return error;
- if (cmd == HDIO_GETGEO) {
- if (!arg)
- return -EINVAL;
- return sd_hdio_getgeo(bdev, p);
- }
-
/*
* Send SCSI addressing ioctls directly to mid level, send other
* ioctls to block level and then onto mid level if they can't be
@@ -729,42 +728,13 @@ static int sd_issue_flush(struct device *dev, sector_t *error_sector)
return ret;
}
-static void sd_end_flush(request_queue_t *q, struct request *flush_rq)
+static void sd_prepare_flush(request_queue_t *q, struct request *rq)
{
- struct request *rq = flush_rq->end_io_data;
- struct scsi_cmnd *cmd = rq->special;
- unsigned int bytes = rq->hard_nr_sectors << 9;
-
- if (!flush_rq->errors) {
- spin_unlock(q->queue_lock);
- scsi_io_completion(cmd, bytes, 0);
- spin_lock(q->queue_lock);
- } else if (blk_barrier_postflush(rq)) {
- spin_unlock(q->queue_lock);
- scsi_io_completion(cmd, 0, bytes);
- spin_lock(q->queue_lock);
- } else {
- /*
- * force journal abort of barriers
- */
- end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors);
- end_that_request_last(rq);
- }
-}
-
-static int sd_prepare_flush(request_queue_t *q, struct request *rq)
-{
- struct scsi_device *sdev = q->queuedata;
- struct scsi_disk *sdkp = dev_get_drvdata(&sdev->sdev_gendev);
-
- if (!sdkp || !sdkp->WCE)
- return 0;
-
memset(rq->cmd, 0, sizeof(rq->cmd));
- rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
+ rq->flags |= REQ_BLOCK_PC;
rq->timeout = SD_TIMEOUT;
rq->cmd[0] = SYNCHRONIZE_CACHE;
- return 1;
+ rq->cmd_len = 10;
}
static void sd_rescan(struct device *dev)
@@ -818,6 +788,7 @@ static struct block_device_operations sd_fops = {
.open = sd_open,
.release = sd_release,
.ioctl = sd_ioctl,
+ .getgeo = sd_getgeo,
#ifdef CONFIG_COMPAT
.compat_ioctl = sd_compat_ioctl,
#endif
@@ -1427,10 +1398,18 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
sdkp->RCD = 0;
}
+ sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
+ if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+ printk(KERN_NOTICE "SCSI device %s: uses "
+ "READ/WRITE(6), disabling FUA\n", diskname);
+ sdkp->DPOFUA = 0;
+ }
+
ct = sdkp->RCD + 2*sdkp->WCE;
- printk(KERN_NOTICE "SCSI device %s: drive cache: %s\n",
- diskname, types[ct]);
+ printk(KERN_NOTICE "SCSI device %s: drive cache: %s%s\n",
+ diskname, types[ct],
+ sdkp->DPOFUA ? " w/ FUA" : "");
return;
}
@@ -1462,6 +1441,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
unsigned char *buffer;
+ unsigned ordered;
SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name));
@@ -1498,7 +1478,21 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_write_protect_flag(sdkp, disk->disk_name, buffer);
sd_read_cache_type(sdkp, disk->disk_name, buffer);
}
-
+
+ /*
+ * We now have all cache related info, determine how we deal
+ * with ordered requests. Note that as the current SCSI
+ * dispatch function can alter request order, we cannot use
+ * QUEUE_ORDERED_TAG_* even when ordered tag is supported.
+ */
+ if (sdkp->WCE)
+ ordered = sdkp->DPOFUA
+ ? QUEUE_ORDERED_DRAIN_FUA : QUEUE_ORDERED_DRAIN_FLUSH;
+ else
+ ordered = QUEUE_ORDERED_DRAIN;
+
+ blk_queue_ordered(sdkp->disk->queue, ordered, sd_prepare_flush);
+
set_capacity(disk, sdkp->capacity);
kfree(buffer);
@@ -1598,6 +1592,7 @@ static int sd_probe(struct device *dev)
strcpy(gd->devfs_name, sdp->devfs_name);
gd->private_data = &sdkp->driver;
+ gd->queue = sdkp->device->request_queue;
sd_revalidate_disk(gd);
@@ -1605,7 +1600,6 @@ static int sd_probe(struct device *dev)
gd->flags = GENHD_FL_DRIVERFS;
if (sdp->removable)
gd->flags |= GENHD_FL_REMOVABLE;
- gd->queue = sdkp->device->request_queue;
dev_set_drvdata(dev, sdkp);
add_disk(gd);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 221e96e2620..78aad9582bc 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2493,7 +2493,7 @@ sg_page_malloc(int rqSz, int lowDma, int *retSzp)
}
if (resp) {
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
- memset(resp, 0, resSz);
+ memset(page_address(resp), 0, resSz);
if (retSzp)
*retSzp = resSz;
}
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index fd63add6a57..fb53eeaee61 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -465,7 +465,7 @@ wd33c93_execute(struct Scsi_Host *instance)
*/
cmd = (struct scsi_cmnd *) hostdata->input_Q;
- prev = 0;
+ prev = NULL;
while (cmd) {
if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
break;
@@ -1569,7 +1569,7 @@ wd33c93_abort(struct scsi_cmnd * cmd)
*/
tmp = (struct scsi_cmnd *) hostdata->input_Q;
- prev = 0;
+ prev = NULL;
while (tmp) {
if (tmp == cmd) {
if (prev)
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
index b5cf39468d1..221999bcf8f 100644
--- a/drivers/serial/21285.c
+++ b/drivers/serial/21285.c
@@ -94,15 +94,6 @@ static irqreturn_t serial21285_rx_chars(int irq, void *dev_id, struct pt_regs *r
status = *CSR_UARTFLG;
while (!(status & 0x10) && max_count--) {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- if (tty->low_latency)
- tty_flip_buffer_push(tty);
- /*
- * If this failed then we will throw away the
- * bytes but must do so to clear interrupts
- */
- }
-
ch = *CSR_UARTDR;
flag = TTY_NORMAL;
port->icount.rx++;
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 67e9afa000c..4dd5c3f9816 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -294,7 +294,7 @@ static _INLINE_ void receive_chars(struct m68k_serial *info, struct pt_regs *reg
{
struct tty_struct *tty = info->tty;
m68328_uart *uart = &uart_addr[info->line];
- unsigned char ch;
+ unsigned char ch, flag;
/*
* This do { } while() loop will get ALL chars out of Rx FIFO
@@ -332,26 +332,24 @@ static _INLINE_ void receive_chars(struct m68k_serial *info, struct pt_regs *reg
/*
* Make sure that we do not overflow the buffer
*/
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
+ if (tty_request_buffer_room(tty, 1) == 0) {
schedule_work(&tty->flip.work);
return;
}
+ flag = TTY_NORMAL;
+
if(rx & URX_PARITY_ERROR) {
- *tty->flip.flag_buf_ptr++ = TTY_PARITY;
+ flag = TTY_PARITY;
status_handle(info, rx);
} else if(rx & URX_OVRUN) {
- *tty->flip.flag_buf_ptr++ = TTY_OVERRUN;
+ flag = TTY_OVERRUN;
status_handle(info, rx);
} else if(rx & URX_FRAME_ERROR) {
- *tty->flip.flag_buf_ptr++ = TTY_FRAME;
+ flag = TTY_FRAME;
status_handle(info, rx);
- } else {
- *tty->flip.flag_buf_ptr++ = 0; /* XXX */
}
- *tty->flip.char_buf_ptr++ = ch;
- tty->flip.count++;
-
+ tty_insert_flip_char(tty, ch, flag);
#ifndef CONFIG_XCOPILOT_BUGS
} while((rx = uart->urx.w) & URX_DATA_READY);
#endif
diff --git a/drivers/serial/68360serial.c b/drivers/serial/68360serial.c
index 170c9d2a749..60f5a5dc17f 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/serial/68360serial.c
@@ -394,7 +394,7 @@ static void rs_360_start(struct tty_struct *tty)
static _INLINE_ void receive_chars(ser_info_t *info)
{
struct tty_struct *tty = info->tty;
- unsigned char ch, *cp;
+ unsigned char ch, flag, *cp;
/*int ignored = 0;*/
int i;
ushort status;
@@ -438,24 +438,15 @@ static _INLINE_ void receive_chars(ser_info_t *info)
cp = (char *)bdp->buf;
status = bdp->status;
- /* Check to see if there is room in the tty buffer for
- * the characters in our BD buffer. If not, we exit
- * now, leaving the BD with the characters. We'll pick
- * them up again on the next receive interrupt (which could
- * be a timeout).
- */
- if ((tty->flip.count + i) >= TTY_FLIPBUF_SIZE)
- break;
-
while (i-- > 0) {
ch = *cp++;
- *tty->flip.char_buf_ptr = ch;
icount->rx++;
#ifdef SERIAL_DEBUG_INTR
printk("DR%02x:%02x...", ch, status);
#endif
- *tty->flip.flag_buf_ptr = 0;
+ flag = TTY_NORMAL;
+
if (status & (BD_SC_BR | BD_SC_FR |
BD_SC_PR | BD_SC_OV)) {
/*
@@ -490,30 +481,18 @@ static _INLINE_ void receive_chars(ser_info_t *info)
if (info->flags & ASYNC_SAK)
do_SAK(tty);
} else if (status & BD_SC_PR)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (status & BD_SC_FR)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
- if (status & BD_SC_OV) {
- /*
- * Overrun is special, since it's
- * reported immediately, and doesn't
- * affect the current character
- */
- if (tty->flip.count < TTY_FLIPBUF_SIZE) {
- tty->flip.count++;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- *tty->flip.flag_buf_ptr =
- TTY_OVERRUN;
- }
- }
+ flag = TTY_FRAME;
}
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- break;
-
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ tty_insert_flip_char(tty, ch, flag);
+ if (status & BD_SC_OV)
+ /*
+ * Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
/* This BD is ready to be used again. Clear status.
@@ -541,12 +520,7 @@ static _INLINE_ void receive_break(ser_info_t *info)
/* Check to see if there is room in the tty buffer for
* the break. If not, we exit now, losing the break. FIXME
*/
- if ((tty->flip.count + 1) >= TTY_FLIPBUF_SIZE)
- return;
- *(tty->flip.flag_buf_ptr++) = TTY_BREAK;
- *(tty->flip.char_buf_ptr++) = 0;
- tty->flip.count++;
-
+ tty_insert_flip_char(tty, 0, TTY_BREAK);
schedule_work(&tty->flip.work);
}
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index d2bcd1f87cd..fb610c3634a 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -41,6 +41,7 @@
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/nmi.h>
+#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -54,6 +55,8 @@
*/
static unsigned int share_irqs = SERIAL8250_SHARE_IRQS;
+static unsigned int nr_uarts = CONFIG_SERIAL_8250_RUNTIME_UARTS;
+
/*
* Debugging.
*/
@@ -296,7 +299,7 @@ static inline int map_8250_out_reg(struct uart_8250_port *up, int offset)
#endif
-static _INLINE_ unsigned int serial_in(struct uart_8250_port *up, int offset)
+static unsigned int serial_in(struct uart_8250_port *up, int offset)
{
offset = map_8250_in_reg(up, offset) << up->port.regshift;
@@ -321,7 +324,7 @@ static _INLINE_ unsigned int serial_in(struct uart_8250_port *up, int offset)
}
}
-static _INLINE_ void
+static void
serial_out(struct uart_8250_port *up, int offset, int value)
{
offset = map_8250_out_reg(up, offset) << up->port.regshift;
@@ -1131,7 +1134,7 @@ static void serial8250_enable_ms(struct uart_port *port)
serial_out(up, UART_IER, up->ier);
}
-static _INLINE_ void
+static void
receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs)
{
struct tty_struct *tty = up->port.info->tty;
@@ -1140,19 +1143,6 @@ receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs)
char flag;
do {
- /* The following is not allowed by the tty layer and
- unsafe. It should be fixed ASAP */
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- if (tty->low_latency) {
- spin_unlock(&up->port.lock);
- tty_flip_buffer_push(tty);
- spin_lock(&up->port.lock);
- }
- /*
- * If this failed then we will throw away the
- * bytes but must do so to clear interrupts
- */
- }
ch = serial_inp(up, UART_RX);
flag = TTY_NORMAL;
up->port.icount.rx++;
@@ -1217,7 +1207,7 @@ receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs)
*status = lsr;
}
-static _INLINE_ void transmit_chars(struct uart_8250_port *up)
+static void transmit_chars(struct uart_8250_port *up)
{
struct circ_buf *xmit = &up->port.info->xmit;
int count;
@@ -1255,25 +1245,24 @@ static _INLINE_ void transmit_chars(struct uart_8250_port *up)
__stop_tx(up);
}
-static _INLINE_ void check_modem_status(struct uart_8250_port *up)
+static unsigned int check_modem_status(struct uart_8250_port *up)
{
- int status;
-
- status = serial_in(up, UART_MSR);
+ unsigned int status = serial_in(up, UART_MSR);
- if ((status & UART_MSR_ANY_DELTA) == 0)
- return;
+ if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI) {
+ if (status & UART_MSR_TERI)
+ up->port.icount.rng++;
+ if (status & UART_MSR_DDSR)
+ up->port.icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
+ if (status & UART_MSR_DCTS)
+ uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
- if (status & UART_MSR_TERI)
- up->port.icount.rng++;
- if (status & UART_MSR_DDSR)
- up->port.icount.dsr++;
- if (status & UART_MSR_DDCD)
- uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
- if (status & UART_MSR_DCTS)
- uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
+ wake_up_interruptible(&up->port.info->delta_msr_wait);
+ }
- wake_up_interruptible(&up->port.info->delta_msr_wait);
+ return status;
}
/*
@@ -1282,7 +1271,11 @@ static _INLINE_ void check_modem_status(struct uart_8250_port *up)
static inline void
serial8250_handle_port(struct uart_8250_port *up, struct pt_regs *regs)
{
- unsigned int status = serial_inp(up, UART_LSR);
+ unsigned int status;
+
+ spin_lock(&up->port.lock);
+
+ status = serial_inp(up, UART_LSR);
DEBUG_INTR("status = %x...", status);
@@ -1291,6 +1284,8 @@ serial8250_handle_port(struct uart_8250_port *up, struct pt_regs *regs)
check_modem_status(up);
if (status & UART_LSR_THRE)
transmit_chars(up);
+
+ spin_unlock(&up->port.lock);
}
/*
@@ -1326,9 +1321,7 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id, struct pt_regs *r
iir = serial_in(up, UART_IIR);
if (!(iir & UART_IIR_NO_INT)) {
- spin_lock(&up->port.lock);
serial8250_handle_port(up, regs);
- spin_unlock(&up->port.lock);
handled = 1;
@@ -1427,11 +1420,8 @@ static void serial8250_timeout(unsigned long data)
unsigned int iir;
iir = serial_in(up, UART_IIR);
- if (!(iir & UART_IIR_NO_INT)) {
- spin_lock(&up->port.lock);
+ if (!(iir & UART_IIR_NO_INT))
serial8250_handle_port(up, NULL);
- spin_unlock(&up->port.lock);
- }
timeout = up->port.timeout;
timeout = timeout > 6 ? (timeout / 2 - 2) : 1;
@@ -1454,10 +1444,10 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
static unsigned int serial8250_get_mctrl(struct uart_port *port)
{
struct uart_8250_port *up = (struct uart_8250_port *)port;
- unsigned char status;
+ unsigned int status;
unsigned int ret;
- status = serial_in(up, UART_MSR);
+ status = check_modem_status(up);
ret = 0;
if (status & UART_MSR_DCD)
@@ -2118,7 +2108,7 @@ static void __init serial8250_isa_init_ports(void)
return;
first = 0;
- for (i = 0; i < UART_NR; i++) {
+ for (i = 0; i < nr_uarts; i++) {
struct uart_8250_port *up = &serial8250_ports[i];
up->port.line = i;
@@ -2137,7 +2127,7 @@ static void __init serial8250_isa_init_ports(void)
}
for (i = 0, up = serial8250_ports;
- i < ARRAY_SIZE(old_serial_port) && i < UART_NR;
+ i < ARRAY_SIZE(old_serial_port) && i < nr_uarts;
i++, up++) {
up->port.iobase = old_serial_port[i].port;
up->port.irq = irq_canonicalize(old_serial_port[i].irq);
@@ -2159,7 +2149,7 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
serial8250_isa_init_ports();
- for (i = 0; i < UART_NR; i++) {
+ for (i = 0; i < nr_uarts; i++) {
struct uart_8250_port *up = &serial8250_ports[i];
up->port.dev = dev;
@@ -2262,7 +2252,7 @@ static int serial8250_console_setup(struct console *co, char *options)
* if so, search for the first available port that does have
* console support.
*/
- if (co->index >= UART_NR)
+ if (co->index >= nr_uarts)
co->index = 0;
port = &serial8250_ports[co->index].port;
if (!port->iobase && !port->membase)
@@ -2298,11 +2288,9 @@ static int __init find_port(struct uart_port *p)
int line;
struct uart_port *port;
- for (line = 0; line < UART_NR; line++) {
+ for (line = 0; line < nr_uarts; line++) {
port = &serial8250_ports[line].port;
- if (p->iotype == port->iotype &&
- p->iobase == port->iobase &&
- p->membase == port->membase)
+ if (uart_match_port(p, port))
return line;
}
return -ENODEV;
@@ -2422,7 +2410,7 @@ static int __devexit serial8250_remove(struct platform_device *dev)
{
int i;
- for (i = 0; i < UART_NR; i++) {
+ for (i = 0; i < nr_uarts; i++) {
struct uart_8250_port *up = &serial8250_ports[i];
if (up->port.dev == &dev->dev)
@@ -2480,7 +2468,7 @@ static struct platform_device *serial8250_isa_devs;
* 16x50 serial ports to be configured at run-time, to support PCMCIA
* modems and PCI multiport cards.
*/
-static DECLARE_MUTEX(serial_sem);
+static DEFINE_MUTEX(serial_mutex);
static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *port)
{
@@ -2489,7 +2477,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
/*
* First, find a port entry which matches.
*/
- for (i = 0; i < UART_NR; i++)
+ for (i = 0; i < nr_uarts; i++)
if (uart_match_port(&serial8250_ports[i].port, port))
return &serial8250_ports[i];
@@ -2498,7 +2486,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
* free entry. We look for one which hasn't been previously
* used (indicated by zero iobase).
*/
- for (i = 0; i < UART_NR; i++)
+ for (i = 0; i < nr_uarts; i++)
if (serial8250_ports[i].port.type == PORT_UNKNOWN &&
serial8250_ports[i].port.iobase == 0)
return &serial8250_ports[i];
@@ -2507,7 +2495,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
* That also failed. Last resort is to find any entry which
* doesn't have a real port associated with it.
*/
- for (i = 0; i < UART_NR; i++)
+ for (i = 0; i < nr_uarts; i++)
if (serial8250_ports[i].port.type == PORT_UNKNOWN)
return &serial8250_ports[i];
@@ -2535,7 +2523,7 @@ int serial8250_register_port(struct uart_port *port)
if (port->uartclk == 0)
return -EINVAL;
- down(&serial_sem);
+ mutex_lock(&serial_mutex);
uart = serial8250_find_match_or_unused(port);
if (uart) {
@@ -2557,7 +2545,7 @@ int serial8250_register_port(struct uart_port *port)
if (ret == 0)
ret = uart->port.line;
}
- up(&serial_sem);
+ mutex_unlock(&serial_mutex);
return ret;
}
@@ -2574,7 +2562,7 @@ void serial8250_unregister_port(int line)
{
struct uart_8250_port *uart = &serial8250_ports[line];
- down(&serial_sem);
+ mutex_lock(&serial_mutex);
uart_remove_one_port(&serial8250_reg, &uart->port);
if (serial8250_isa_devs) {
uart->port.flags &= ~UPF_BOOT_AUTOCONF;
@@ -2584,7 +2572,7 @@ void serial8250_unregister_port(int line)
} else {
uart->port.dev = NULL;
}
- up(&serial_sem);
+ mutex_unlock(&serial_mutex);
}
EXPORT_SYMBOL(serial8250_unregister_port);
@@ -2592,8 +2580,11 @@ static int __init serial8250_init(void)
{
int ret, i;
+ if (nr_uarts > UART_NR)
+ nr_uarts = UART_NR;
+
printk(KERN_INFO "Serial: 8250/16550 driver $Revision: 1.90 $ "
- "%d ports, IRQ sharing %sabled\n", (int) UART_NR,
+ "%d ports, IRQ sharing %sabled\n", nr_uarts,
share_irqs ? "en" : "dis");
for (i = 0; i < NR_IRQS; i++)
@@ -2653,6 +2644,9 @@ module_param(share_irqs, uint, 0644);
MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
" (unsafe)");
+module_param(nr_uarts, uint, 0644);
+MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
+
#ifdef CONFIG_SERIAL_8250_RSA
module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
diff --git a/drivers/serial/8250.h b/drivers/serial/8250.h
index a607b98016d..490606b8709 100644
--- a/drivers/serial/8250.h
+++ b/drivers/serial/8250.h
@@ -51,12 +51,6 @@ struct serial8250_config {
#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
#define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */
-#if defined(__i386__) && (defined(CONFIG_M386) || defined(CONFIG_M486))
-#define _INLINE_ inline
-#else
-#define _INLINE_
-#endif
-
#define PROBE_RSA (1 << 0)
#define PROBE_ANY (~0)
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 8adca0ce267..589fb076654 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -837,8 +837,8 @@ static struct pci_serial_quirk *find_quirk(struct pci_dev *dev)
return quirk;
}
-static _INLINE_ int
-get_pci_irq(struct pci_dev *dev, struct pciserial_board *board)
+static inline int get_pci_irq(struct pci_dev *dev,
+ struct pciserial_board *board)
{
if (board->flags & FL_NOIRQ)
return 0;
@@ -853,14 +853,15 @@ get_pci_irq(struct pci_dev *dev, struct pciserial_board *board)
* driver_data member.
*
* The makeup of these names are:
- * pbn_bn{_bt}_n_baud
+ * pbn_bn{_bt}_n_baud{_offsetinhex}
*
- * bn = PCI BAR number
- * bt = Index using PCI BARs
- * n = number of serial ports
- * baud = baud rate
+ * bn = PCI BAR number
+ * bt = Index using PCI BARs
+ * n = number of serial ports
+ * baud = baud rate
+ * offsetinhex = offset for each sequential port (in hex)
*
- * This table is sorted by (in order): baud, bt, bn, n.
+ * This table is sorted by (in order): bn, bt, baud, offsetindex, n.
*
* Please note: in theory if n = 1, _bt infix should make no difference.
* ie, pbn_b0_1_115200 is the same as pbn_b0_bt_1_115200
@@ -881,6 +882,13 @@ enum pci_board_num_t {
pbn_b0_4_1152000,
+ pbn_b0_2_1843200,
+ pbn_b0_4_1843200,
+
+ pbn_b0_2_1843200_200,
+ pbn_b0_4_1843200_200,
+ pbn_b0_8_1843200_200,
+
pbn_b0_bt_1_115200,
pbn_b0_bt_2_115200,
pbn_b0_bt_8_115200,
@@ -904,6 +912,8 @@ enum pci_board_num_t {
pbn_b1_4_921600,
pbn_b1_8_921600,
+ pbn_b1_2_1250000,
+
pbn_b1_bt_2_921600,
pbn_b1_1_1382400,
@@ -1029,6 +1039,38 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.uart_offset = 8,
},
+ [pbn_b0_2_1843200] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 1843200,
+ .uart_offset = 8,
+ },
+ [pbn_b0_4_1843200] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 1843200,
+ .uart_offset = 8,
+ },
+
+ [pbn_b0_2_1843200_200] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 1843200,
+ .uart_offset = 0x200,
+ },
+ [pbn_b0_4_1843200_200] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 1843200,
+ .uart_offset = 0x200,
+ },
+ [pbn_b0_8_1843200_200] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 1843200,
+ .uart_offset = 0x200,
+ },
+
[pbn_b0_bt_1_115200] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 1,
@@ -1141,6 +1183,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.base_baud = 921600,
.uart_offset = 8,
},
+ [pbn_b1_2_1250000] = {
+ .flags = FL_BASE1,
+ .num_ports = 2,
+ .base_baud = 1250000,
+ .uart_offset = 8,
+ },
[pbn_b1_bt_2_921600] = {
.flags = FL_BASE1|FL_BASE_BARS,
@@ -1801,6 +1849,66 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_SUBVENDOR_ID_CONNECT_TECH,
PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1, 0, 0,
pbn_b1_4_921600 },
+ { PCI_VENDOR_ID_V3, PCI_DEVICE_ID_V3_V351,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ, 0, 0,
+ pbn_b1_2_1250000 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2, 0, 0,
+ pbn_b0_2_1843200 },
+ { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4, 0, 0,
+ pbn_b0_4_1843200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232, 0, 0,
+ pbn_b0_2_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232, 0, 0,
+ pbn_b0_4_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232, 0, 0,
+ pbn_b0_8_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1, 0, 0,
+ pbn_b0_2_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2, 0, 0,
+ pbn_b0_4_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4, 0, 0,
+ pbn_b0_8_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2, 0, 0,
+ pbn_b0_2_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4, 0, 0,
+ pbn_b0_4_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8, 0, 0,
+ pbn_b0_8_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485, 0, 0,
+ pbn_b0_2_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C154,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485, 0, 0,
+ pbn_b0_4_1843200_200 },
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C158,
+ PCI_SUBVENDOR_ID_CONNECT_TECH,
+ PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485, 0, 0,
+ pbn_b0_8_1843200_200 },
{ PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_U530,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 812bae62c8e..843717275d4 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -95,6 +95,16 @@ config SERIAL_8250_NR_UARTS
PCI enumeration and any ports that may be added at run-time
via hot-plug, or any ISA multi-port serial cards.
+config SERIAL_8250_RUNTIME_UARTS
+ int "Number of 8250/16550 serial ports to register at runtime"
+ depends on SERIAL_8250
+ default "4"
+ help
+ Set this to the maximum number of serial ports you want
+ the kernel to register at boot time. This can be overriden
+ with the module parameter "nr_uarts", or boot-time parameter
+ 8250.nr_uarts
+
config SERIAL_8250_EXTENDED
bool "Extended 8250/16550 serial driver options"
depends on SERIAL_8250
@@ -270,6 +280,40 @@ config SERIAL_AMBA_PL011_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
+config SERIAL_AT91
+ bool "AT91RM9200 serial port support"
+ depends on ARM && ARCH_AT91RM9200
+ select SERIAL_CORE
+ help
+ This enables the driver for the on-chip UARTs of the AT91RM9200
+ processor.
+
+config SERIAL_AT91_CONSOLE
+ bool "Support for console on AT91RM9200 serial port"
+ depends on SERIAL_AT91=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you wish to use a UART on the AT91RM9200 as the system
+ console (the system console is the device which receives all kernel
+ messages and warnings and which allows logins in single user mode).
+
+config SERIAL_AT91_TTYAT
+ bool "Install as device ttyAT0-4 instead of ttyS0-4"
+ depends on SERIAL_AT91=y
+ help
+ Say Y here if you wish to have the five internal AT91RM9200 UARTs
+ appear as /dev/ttyAT0-4 (major 240, minor 0-4) instead of the
+ normal /dev/ttyS0-4 (major 4, minor 64-68). This is necessary if
+ you also want other UARTs, such as external 8250/16C550 compatible
+ UARTs.
+ The ttySn nodes are legally reserved for the 8250 serial driver
+ but are often misused by other serial drivers.
+
+ To use this, you should create suitable ttyATn device nodes in
+ /dev/, and pass "console=ttyATn" to the kernel.
+
+ Say Y if you have an external 8250/16C550 UART. If unsure, say N.
+
config SERIAL_CLPS711X
tristate "CLPS711X serial port support"
depends on ARM && ARCH_CLPS711X
@@ -359,29 +403,6 @@ config SERIAL_21285_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
-config SERIAL_UART00
- bool "Excalibur serial port (uart00) support"
- depends on ARM && ARCH_CAMELOT
- select SERIAL_CORE
- help
- Say Y here if you want to use the hard logic uart on Excalibur. This
- driver also supports soft logic implementations of this uart core.
-
-config SERIAL_UART00_CONSOLE
- bool "Support for console on Excalibur serial port"
- depends on SERIAL_UART00
- select SERIAL_CORE_CONSOLE
- help
- Say Y here if you want to support a serial console on an Excalibur
- hard logic uart or uart00 IP core.
-
- Even if you say Y here, the currently visible virtual console
- (/dev/tty0) will still be used as the system console by default, but
- you can alter that using a kernel command line option such as
- "console=ttyS1". (Try "man bootparam" or see the documentation of
- your boot loader (lilo or loadlin) about how to pass options to the
- kernel at boot time.)
-
config SERIAL_MPSC
bool "Marvell MPSC serial port support"
depends on PPC32 && MV64X60
@@ -873,7 +894,7 @@ config SERIAL_VR41XX_CONSOLE
config SERIAL_JSM
tristate "Digi International NEO PCI Support"
- depends on PCI
+ depends on PCI && BROKEN
select SERIAL_CORE
help
This is a driver for Digi International's Neo series
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index d7c7c7180e3..24a583e482b 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
obj-$(CONFIG_SERIAL_PXA) += pxa.o
obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
-obj-$(CONFIG_SERIAL_UART00) += uart00.o
obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o
obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o
obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
@@ -57,3 +56,4 @@ obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o
+obj-$(CONFIG_SERIAL_AT91) += at91_serial.o
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index ddd0307fece..3490022e9fd 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -47,12 +47,12 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/serial.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/hardware.h>
-#include <asm/hardware/amba.h>
-#include <asm/hardware/amba_serial.h>
#define UART_NR 2
@@ -154,15 +154,6 @@ pl010_rx_chars(struct uart_port *port)
status = UART_GET_FR(port);
while (UART_RX_DATA(status) && max_count--) {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- if (tty->low_latency)
- tty_flip_buffer_push(tty);
- /*
- * If this failed then we will throw away the
- * bytes but must do so to clear interrupts.
- */
- }
-
ch = UART_GET_CHAR(port);
flag = TTY_NORMAL;
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index d84476ee659..034a029e356 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -47,12 +47,12 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/serial.h>
+#include <linux/clk.h>
#include <asm/io.h>
#include <asm/sizes.h>
-#include <asm/hardware/amba.h>
-#include <asm/hardware/clock.h>
-#include <asm/hardware/amba_serial.h>
#define UART_NR 14
@@ -120,15 +120,6 @@ pl011_rx_chars(struct uart_amba_port *uap)
status = readw(uap->port.membase + UART01x_FR);
while ((status & UART01x_FR_RXFE) == 0 && max_count--) {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- if (tty->low_latency)
- tty_flip_buffer_push(tty);
- /*
- * If this failed then we will throw away the
- * bytes but must do so to clear interrupts
- */
- }
-
ch = readw(uap->port.membase + UART01x_DR) | UART_DUMMY_DR_RX;
flag = TTY_NORMAL;
uap->port.icount.rx++;
@@ -761,10 +752,6 @@ static int pl011_probe(struct amba_device *dev, void *id)
goto unmap;
}
- ret = clk_use(uap->clk);
- if (ret)
- goto putclk;
-
uap->port.dev = &dev->dev;
uap->port.mapbase = dev->res.start;
uap->port.membase = base;
@@ -782,8 +769,6 @@ static int pl011_probe(struct amba_device *dev, void *id)
if (ret) {
amba_set_drvdata(dev, NULL);
amba_ports[i] = NULL;
- clk_unuse(uap->clk);
- putclk:
clk_put(uap->clk);
unmap:
iounmap(base);
@@ -808,7 +793,6 @@ static int pl011_remove(struct amba_device *dev)
amba_ports[i] = NULL;
iounmap(uap->port.membase);
- clk_unuse(uap->clk);
clk_put(uap->clk);
kfree(uap);
return 0;
diff --git a/drivers/serial/at91_serial.c b/drivers/serial/at91_serial.c
new file mode 100644
index 00000000000..0e206063d68
--- /dev/null
+++ b/drivers/serial/at91_serial.c
@@ -0,0 +1,894 @@
+/*
+ * linux/drivers/char/at91_serial.c
+ *
+ * Driver for Atmel AT91RM9200 Serial ports
+ *
+ * Copyright (C) 2003 Rick Bronson
+ *
+ * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/tty_flip.h>
+
+#include <asm/io.h>
+
+#include <asm/arch/at91rm9200_usart.h>
+#include <asm/mach/serial_at91rm9200.h>
+#include <asm/arch/board.h>
+#include <asm/arch/pio.h>
+
+
+#if defined(CONFIG_SERIAL_AT91_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+#ifdef CONFIG_SERIAL_AT91_TTYAT
+
+/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
+ * should coexist with the 8250 driver, such as if we have an external 16C550
+ * UART. */
+#define SERIAL_AT91_MAJOR 204
+#define MINOR_START 154
+#define AT91_DEVICENAME "ttyAT"
+
+#else
+
+/* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
+ * name, but it is legally reserved for the 8250 driver. */
+#define SERIAL_AT91_MAJOR TTY_MAJOR
+#define MINOR_START 64
+#define AT91_DEVICENAME "ttyS"
+
+#endif
+
+#define AT91_VA_BASE_DBGU ((unsigned long) AT91_VA_BASE_SYS + AT91_DBGU)
+#define AT91_ISR_PASS_LIMIT 256
+
+#define UART_PUT_CR(port,v) writel(v, (port)->membase + AT91_US_CR)
+#define UART_GET_MR(port) readl((port)->membase + AT91_US_MR)
+#define UART_PUT_MR(port,v) writel(v, (port)->membase + AT91_US_MR)
+#define UART_PUT_IER(port,v) writel(v, (port)->membase + AT91_US_IER)
+#define UART_PUT_IDR(port,v) writel(v, (port)->membase + AT91_US_IDR)
+#define UART_GET_IMR(port) readl((port)->membase + AT91_US_IMR)
+#define UART_GET_CSR(port) readl((port)->membase + AT91_US_CSR)
+#define UART_GET_CHAR(port) readl((port)->membase + AT91_US_RHR)
+#define UART_PUT_CHAR(port,v) writel(v, (port)->membase + AT91_US_THR)
+#define UART_GET_BRGR(port) readl((port)->membase + AT91_US_BRGR)
+#define UART_PUT_BRGR(port,v) writel(v, (port)->membase + AT91_US_BRGR)
+#define UART_PUT_RTOR(port,v) writel(v, (port)->membase + AT91_US_RTOR)
+
+// #define UART_GET_CR(port) readl((port)->membase + AT91_US_CR) // is write-only
+
+ /* PDC registers */
+#define UART_PUT_PTCR(port,v) writel(v, (port)->membase + AT91_PDC_PTCR)
+#define UART_PUT_RPR(port,v) writel(v, (port)->membase + AT91_PDC_RPR)
+#define UART_PUT_RCR(port,v) writel(v, (port)->membase + AT91_PDC_RCR)
+#define UART_GET_RCR(port) readl((port)->membase + AT91_PDC_RCR)
+#define UART_PUT_RNPR(port,v) writel(v, (port)->membase + AT91_PDC_RNPR)
+#define UART_PUT_RNCR(port,v) writel(v, (port)->membase + AT91_PDC_RNCR)
+
+
+static int (*at91_open)(struct uart_port *);
+static void (*at91_close)(struct uart_port *);
+
+#ifdef SUPPORT_SYSRQ
+static struct console at91_console;
+#endif
+
+/*
+ * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
+ */
+static u_int at91_tx_empty(struct uart_port *port)
+{
+ return (UART_GET_CSR(port) & AT91_US_TXEMPTY) ? TIOCSER_TEMT : 0;
+}
+
+/*
+ * Set state of the modem control output lines
+ */
+static void at91_set_mctrl(struct uart_port *port, u_int mctrl)
+{
+ unsigned int control = 0;
+
+ /*
+ * Errata #39: RTS0 is not internally connected to PA21. We need to drive
+ * the pin manually.
+ */
+ if (port->mapbase == AT91_VA_BASE_US0) {
+ if (mctrl & TIOCM_RTS)
+ at91_sys_write(AT91_PIOA + PIO_CODR, AT91_PA21_RTS0);
+ else
+ at91_sys_write(AT91_PIOA + PIO_SODR, AT91_PA21_RTS0);
+ }
+
+ if (mctrl & TIOCM_RTS)
+ control |= AT91_US_RTSEN;
+ else
+ control |= AT91_US_RTSDIS;
+
+ if (mctrl & TIOCM_DTR)
+ control |= AT91_US_DTREN;
+ else
+ control |= AT91_US_DTRDIS;
+
+ UART_PUT_CR(port,control);
+}
+
+/*
+ * Get state of the modem control input lines
+ */
+static u_int at91_get_mctrl(struct uart_port *port)
+{
+ unsigned int status, ret = 0;
+
+ status = UART_GET_CSR(port);
+
+ /*
+ * The control signals are active low.
+ */
+ if (!(status & AT91_US_DCD))
+ ret |= TIOCM_CD;
+ if (!(status & AT91_US_CTS))
+ ret |= TIOCM_CTS;
+ if (!(status & AT91_US_DSR))
+ ret |= TIOCM_DSR;
+ if (!(status & AT91_US_RI))
+ ret |= TIOCM_RI;
+
+ return ret;
+}
+
+/*
+ * Stop transmitting.
+ */
+static void at91_stop_tx(struct uart_port *port)
+{
+ UART_PUT_IDR(port, AT91_US_TXRDY);
+ port->read_status_mask &= ~AT91_US_TXRDY;
+}
+
+/*
+ * Start transmitting.
+ */
+static void at91_start_tx(struct uart_port *port)
+{
+ port->read_status_mask |= AT91_US_TXRDY;
+ UART_PUT_IER(port, AT91_US_TXRDY);
+}
+
+/*
+ * Stop receiving - port is in process of being closed.
+ */
+static void at91_stop_rx(struct uart_port *port)
+{
+ UART_PUT_IDR(port, AT91_US_RXRDY);
+}
+
+/*
+ * Enable modem status interrupts
+ */
+static void at91_enable_ms(struct uart_port *port)
+{
+ port->read_status_mask |= (AT91_US_RIIC | AT91_US_DSRIC | AT91_US_DCDIC | AT91_US_CTSIC);
+ UART_PUT_IER(port, AT91_US_RIIC | AT91_US_DSRIC | AT91_US_DCDIC | AT91_US_CTSIC);
+}
+
+/*
+ * Control the transmission of a break signal
+ */
+static void at91_break_ctl(struct uart_port *port, int break_state)
+{
+ if (break_state != 0)
+ UART_PUT_CR(port, AT91_US_STTBRK); /* start break */
+ else
+ UART_PUT_CR(port, AT91_US_STPBRK); /* stop break */
+}
+
+/*
+ * Characters received (called from interrupt handler)
+ */
+static void at91_rx_chars(struct uart_port *port, struct pt_regs *regs)
+{
+ struct tty_struct *tty = port->info->tty;
+ unsigned int status, ch, flg;
+
+ status = UART_GET_CSR(port) & port->read_status_mask;
+ while (status & (AT91_US_RXRDY)) {
+ ch = UART_GET_CHAR(port);
+
+ if (tty->flip.count >= TTY_FLIPBUF_SIZE)
+ goto ignore_char;
+ port->icount.rx++;
+
+ flg = TTY_NORMAL;
+
+ /*
+ * note that the error handling code is
+ * out of the main execution path
+ */
+ if (unlikely(status & (AT91_US_PARE | AT91_US_FRAME | AT91_US_OVRE))) {
+ UART_PUT_CR(port, AT91_US_RSTSTA); /* clear error */
+ if (status & (AT91_US_PARE))
+ port->icount.parity++;
+ if (status & (AT91_US_FRAME))
+ port->icount.frame++;
+ if (status & (AT91_US_OVRE))
+ port->icount.overrun++;
+
+ if (status & AT91_US_PARE)
+ flg = TTY_PARITY;
+ else if (status & AT91_US_FRAME)
+ flg = TTY_FRAME;
+ if (status & AT91_US_OVRE) {
+ /*
+ * overrun does *not* affect the character
+ * we read from the FIFO
+ */
+ tty_insert_flip_char(tty, ch, flg);
+ ch = 0;
+ flg = TTY_OVERRUN;
+ }
+#ifdef SUPPORT_SYSRQ
+ port->sysrq = 0;
+#endif
+ }
+
+ if (uart_handle_sysrq_char(port, ch, regs))
+ goto ignore_char;
+
+ tty_insert_flip_char(tty, ch, flg);
+
+ ignore_char:
+ status = UART_GET_CSR(port) & port->read_status_mask;
+ }
+
+ tty_flip_buffer_push(tty);
+}
+
+/*
+ * Transmit characters (called from interrupt handler)
+ */
+static void at91_tx_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->info->xmit;
+
+ if (port->x_char) {
+ UART_PUT_CHAR(port, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ at91_stop_tx(port);
+ return;
+ }
+
+ while (UART_GET_CSR(port) & AT91_US_TXRDY) {
+ UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ at91_stop_tx(port);
+}
+
+/*
+ * Interrupt handler
+ */
+static irqreturn_t at91_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct uart_port *port = dev_id;
+ unsigned int status, pending, pass_counter = 0;
+
+ status = UART_GET_CSR(port);
+ pending = status & port->read_status_mask;
+ if (pending) {
+ do {
+ if (pending & AT91_US_RXRDY)
+ at91_rx_chars(port, regs);
+
+ /* Clear the relevent break bits */
+ if (pending & AT91_US_RXBRK) {
+ UART_PUT_CR(port, AT91_US_RSTSTA);
+ port->icount.brk++;
+ uart_handle_break(port);
+ }
+
+ // TODO: All reads to CSR will clear these interrupts!
+ if (pending & AT91_US_RIIC) port->icount.rng++;
+ if (pending & AT91_US_DSRIC) port->icount.dsr++;
+ if (pending & AT91_US_DCDIC)
+ uart_handle_dcd_change(port, !(status & AT91_US_DCD));
+ if (pending & AT91_US_CTSIC)
+ uart_handle_cts_change(port, !(status & AT91_US_CTS));
+ if (pending & (AT91_US_RIIC | AT91_US_DSRIC | AT91_US_DCDIC | AT91_US_CTSIC))
+ wake_up_interruptible(&port->info->delta_msr_wait);
+
+ if (pending & AT91_US_TXRDY)
+ at91_tx_chars(port);
+ if (pass_counter++ > AT91_ISR_PASS_LIMIT)
+ break;
+
+ status = UART_GET_CSR(port);
+ pending = status & port->read_status_mask;
+ } while (pending);
+ }
+ return IRQ_HANDLED;
+}
+
+/*
+ * Perform initialization and enable port for reception
+ */
+static int at91_startup(struct uart_port *port)
+{
+ int retval;
+
+ /*
+ * Ensure that no interrupts are enabled otherwise when
+ * request_irq() is called we could get stuck trying to
+ * handle an unexpected interrupt
+ */
+ UART_PUT_IDR(port, -1);
+
+ /*
+ * Allocate the IRQ
+ */
+ retval = request_irq(port->irq, at91_interrupt, SA_SHIRQ, "at91_serial", port);
+ if (retval) {
+ printk("at91_serial: at91_startup - Can't get irq\n");
+ return retval;
+ }
+
+ /*
+ * If there is a specific "open" function (to register
+ * control line interrupts)
+ */
+ if (at91_open) {
+ retval = at91_open(port);
+ if (retval) {
+ free_irq(port->irq, port);
+ return retval;
+ }
+ }
+
+ port->read_status_mask = AT91_US_RXRDY | AT91_US_TXRDY | AT91_US_OVRE
+ | AT91_US_FRAME | AT91_US_PARE | AT91_US_RXBRK;
+ /*
+ * Finally, enable the serial port
+ */
+ UART_PUT_CR(port, AT91_US_RSTSTA | AT91_US_RSTRX);
+ UART_PUT_CR(port, AT91_US_TXEN | AT91_US_RXEN); /* enable xmit & rcvr */
+ UART_PUT_IER(port, AT91_US_RXRDY); /* do receive only */
+ return 0;
+}
+
+/*
+ * Disable the port
+ */
+static void at91_shutdown(struct uart_port *port)
+{
+ /*
+ * Disable all interrupts, port and break condition.
+ */
+ UART_PUT_CR(port, AT91_US_RSTSTA);
+ UART_PUT_IDR(port, -1);
+
+ /*
+ * Free the interrupt
+ */
+ free_irq(port->irq, port);
+
+ /*
+ * If there is a specific "close" function (to unregister
+ * control line interrupts)
+ */
+ if (at91_close)
+ at91_close(port);
+}
+
+/*
+ * Power / Clock management.
+ */
+static void at91_serial_pm(struct uart_port *port, unsigned int state, unsigned int oldstate)
+{
+ switch (state) {
+ case 0:
+ /*
+ * Enable the peripheral clock for this serial port.
+ * This is called on uart_open() or a resume event.
+ */
+ at91_sys_write(AT91_PMC_PCER, 1 << port->irq);
+ break;
+ case 3:
+ /*
+ * Disable the peripheral clock for this serial port.
+ * This is called on uart_close() or a suspend event.
+ */
+ if (port->irq != AT91_ID_SYS) /* is this a shared clock? */
+ at91_sys_write(AT91_PMC_PCDR, 1 << port->irq);
+ break;
+ default:
+ printk(KERN_ERR "at91_serial: unknown pm %d\n", state);
+ }
+}
+
+/*
+ * Change the port parameters
+ */
+static void at91_set_termios(struct uart_port *port, struct termios * termios, struct termios * old)
+{
+ unsigned long flags;
+ unsigned int mode, imr, quot, baud;
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ quot = uart_get_divisor(port, baud);
+
+ /* Get current mode register */
+ mode = UART_GET_MR(port) & ~(AT91_US_CHRL | AT91_US_NBSTOP | AT91_US_PAR);
+
+ /* byte size */
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ mode |= AT91_US_CHRL_5;
+ break;
+ case CS6:
+ mode |= AT91_US_CHRL_6;
+ break;
+ case CS7:
+ mode |= AT91_US_CHRL_7;
+ break;
+ default:
+ mode |= AT91_US_CHRL_8;
+ break;
+ }
+
+ /* stop bits */
+ if (termios->c_cflag & CSTOPB)
+ mode |= AT91_US_NBSTOP_2;
+
+ /* parity */
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & CMSPAR) { /* Mark or Space parity */
+ if (termios->c_cflag & PARODD)
+ mode |= AT91_US_PAR_MARK;
+ else
+ mode |= AT91_US_PAR_SPACE;
+ }
+ else if (termios->c_cflag & PARODD)
+ mode |= AT91_US_PAR_ODD;
+ else
+ mode |= AT91_US_PAR_EVEN;
+ }
+ else
+ mode |= AT91_US_PAR_NONE;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port->read_status_mask |= AT91_US_OVRE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= AT91_US_FRAME | AT91_US_PARE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= AT91_US_RXBRK;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= (AT91_US_FRAME | AT91_US_PARE);
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= AT91_US_RXBRK;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= AT91_US_OVRE;
+ }
+
+ // TODO: Ignore all characters if CREAD is set.
+
+ /* update the per-port timeout */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ /* disable interrupts and drain transmitter */
+ imr = UART_GET_IMR(port); /* get interrupt mask */
+ UART_PUT_IDR(port, -1); /* disable all interrupts */
+ while (!(UART_GET_CSR(port) & AT91_US_TXEMPTY)) { barrier(); }
+
+ /* disable receiver and transmitter */
+ UART_PUT_CR(port, AT91_US_TXDIS | AT91_US_RXDIS);
+
+ /* set the parity, stop bits and data size */
+ UART_PUT_MR(port, mode);
+
+ /* set the baud rate */
+ UART_PUT_BRGR(port, quot);
+ UART_PUT_CR(port, AT91_US_RSTSTA | AT91_US_RSTRX);
+ UART_PUT_CR(port, AT91_US_TXEN | AT91_US_RXEN);
+
+ /* restore interrupts */
+ UART_PUT_IER(port, imr);
+
+ /* CTS flow-control and modem-status interrupts */
+ if (UART_ENABLE_MS(port, termios->c_cflag))
+ port->ops->enable_ms(port);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/*
+ * Return string describing the specified port
+ */
+static const char *at91_type(struct uart_port *port)
+{
+ return (port->type == PORT_AT91RM9200) ? "AT91_SERIAL" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'.
+ */
+static void at91_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase,
+ (port->mapbase == AT91_VA_BASE_DBGU) ? 512 : SZ_16K);
+}
+
+/*
+ * Request the memory region(s) being used by 'port'.
+ */
+static int at91_request_port(struct uart_port *port)
+{
+ return request_mem_region(port->mapbase,
+ (port->mapbase == AT91_VA_BASE_DBGU) ? 512 : SZ_16K,
+ "at91_serial") != NULL ? 0 : -EBUSY;
+
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void at91_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_AT91RM9200;
+ at91_request_port(port);
+ }
+}
+
+/*
+ * Verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int at91_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ int ret = 0;
+ if (ser->type != PORT_UNKNOWN && ser->type != PORT_AT91RM9200)
+ ret = -EINVAL;
+ if (port->irq != ser->irq)
+ ret = -EINVAL;
+ if (ser->io_type != SERIAL_IO_MEM)
+ ret = -EINVAL;
+ if (port->uartclk / 16 != ser->baud_base)
+ ret = -EINVAL;
+ if ((void *)port->mapbase != ser->iomem_base)
+ ret = -EINVAL;
+ if (port->iobase != ser->port)
+ ret = -EINVAL;
+ if (ser->hub6 != 0)
+ ret = -EINVAL;
+ return ret;
+}
+
+static struct uart_ops at91_pops = {
+ .tx_empty = at91_tx_empty,
+ .set_mctrl = at91_set_mctrl,
+ .get_mctrl = at91_get_mctrl,
+ .stop_tx = at91_stop_tx,
+ .start_tx = at91_start_tx,
+ .stop_rx = at91_stop_rx,
+ .enable_ms = at91_enable_ms,
+ .break_ctl = at91_break_ctl,
+ .startup = at91_startup,
+ .shutdown = at91_shutdown,
+ .set_termios = at91_set_termios,
+ .type = at91_type,
+ .release_port = at91_release_port,
+ .request_port = at91_request_port,
+ .config_port = at91_config_port,
+ .verify_port = at91_verify_port,
+ .pm = at91_serial_pm,
+};
+
+static struct uart_port at91_ports[AT91_NR_UART];
+
+void __init at91_init_ports(void)
+{
+ static int first = 1;
+ int i;
+
+ if (!first)
+ return;
+ first = 0;
+
+ for (i = 0; i < AT91_NR_UART; i++) {
+ at91_ports[i].iotype = UPIO_MEM;
+ at91_ports[i].flags = UPF_BOOT_AUTOCONF;
+ at91_ports[i].uartclk = at91_master_clock;
+ at91_ports[i].ops = &at91_pops;
+ at91_ports[i].fifosize = 1;
+ at91_ports[i].line = i;
+ }
+}
+
+void __init at91_register_uart_fns(struct at91rm9200_port_fns *fns)
+{
+ if (fns->enable_ms)
+ at91_pops.enable_ms = fns->enable_ms;
+ if (fns->get_mctrl)
+ at91_pops.get_mctrl = fns->get_mctrl;
+ if (fns->set_mctrl)
+ at91_pops.set_mctrl = fns->set_mctrl;
+ at91_open = fns->open;
+ at91_close = fns->close;
+ at91_pops.pm = fns->pm;
+ at91_pops.set_wake = fns->set_wake;
+}
+
+/*
+ * Setup ports.
+ */
+void __init at91_register_uart(int idx, int port)
+{
+ if ((idx < 0) || (idx >= AT91_NR_UART)) {
+ printk(KERN_ERR "%s: bad index number %d\n", __FUNCTION__, idx);
+ return;
+ }
+
+ switch (port) {
+ case 0:
+ at91_ports[idx].membase = (void __iomem *) AT91_VA_BASE_US0;
+ at91_ports[idx].mapbase = AT91_VA_BASE_US0;
+ at91_ports[idx].irq = AT91_ID_US0;
+ AT91_CfgPIO_USART0();
+ break;
+ case 1:
+ at91_ports[idx].membase = (void __iomem *) AT91_VA_BASE_US1;
+ at91_ports[idx].mapbase = AT91_VA_BASE_US1;
+ at91_ports[idx].irq = AT91_ID_US1;
+ AT91_CfgPIO_USART1();
+ break;
+ case 2:
+ at91_ports[idx].membase = (void __iomem *) AT91_VA_BASE_US2;
+ at91_ports[idx].mapbase = AT91_VA_BASE_US2;
+ at91_ports[idx].irq = AT91_ID_US2;
+ AT91_CfgPIO_USART2();
+ break;
+ case 3:
+ at91_ports[idx].membase = (void __iomem *) AT91_VA_BASE_US3;
+ at91_ports[idx].mapbase = AT91_VA_BASE_US3;
+ at91_ports[idx].irq = AT91_ID_US3;
+ AT91_CfgPIO_USART3();
+ break;
+ case 4:
+ at91_ports[idx].membase = (void __iomem *) AT91_VA_BASE_DBGU;
+ at91_ports[idx].mapbase = AT91_VA_BASE_DBGU;
+ at91_ports[idx].irq = AT91_ID_SYS;
+ AT91_CfgPIO_DBGU();
+ break;
+ default:
+ printk(KERN_ERR "%s : bad port number %d\n", __FUNCTION__, port);
+ }
+}
+
+#ifdef CONFIG_SERIAL_AT91_CONSOLE
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void at91_console_write(struct console *co, const char *s, u_int count)
+{
+ struct uart_port *port = at91_ports + co->index;
+ unsigned int status, i, imr;
+
+ /*
+ * First, save IMR and then disable interrupts
+ */
+ imr = UART_GET_IMR(port); /* get interrupt mask */
+ UART_PUT_IDR(port, AT91_US_RXRDY | AT91_US_TXRDY);
+
+ /*
+ * Now, do each character
+ */
+ for (i = 0; i < count; i++) {
+ do {
+ status = UART_GET_CSR(port);
+ } while (!(status & AT91_US_TXRDY));
+ UART_PUT_CHAR(port, s[i]);
+ if (s[i] == '\n') {
+ do {
+ status = UART_GET_CSR(port);
+ } while (!(status & AT91_US_TXRDY));
+ UART_PUT_CHAR(port, '\r');
+ }
+ }
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore IMR
+ */
+ do {
+ status = UART_GET_CSR(port);
+ } while (!(status & AT91_US_TXRDY));
+ UART_PUT_IER(port, imr); /* set interrupts back the way they were */
+}
+
+/*
+ * If the port was already initialised (eg, by a boot loader), try to determine
+ * the current setup.
+ */
+static void __init at91_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits)
+{
+ unsigned int mr, quot;
+
+// TODO: CR is a write-only register
+// unsigned int cr;
+//
+// cr = UART_GET_CR(port) & (AT91_US_RXEN | AT91_US_TXEN);
+// if (cr == (AT91_US_RXEN | AT91_US_TXEN)) {
+// /* ok, the port was enabled */
+// }
+
+ mr = UART_GET_MR(port) & AT91_US_CHRL;
+ if (mr == AT91_US_CHRL_8)
+ *bits = 8;
+ else
+ *bits = 7;
+
+ mr = UART_GET_MR(port) & AT91_US_PAR;
+ if (mr == AT91_US_PAR_EVEN)
+ *parity = 'e';
+ else if (mr == AT91_US_PAR_ODD)
+ *parity = 'o';
+
+ quot = UART_GET_BRGR(port);
+ *baud = port->uartclk / (16 * (quot));
+}
+
+static int __init at91_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ port = uart_get_console(at91_ports, AT91_NR_UART, co);
+
+ /*
+ * Enable the serial console, in-case bootloader did not do it.
+ */
+ at91_sys_write(AT91_PMC_PCER, 1 << port->irq); /* enable clock */
+ UART_PUT_IDR(port, -1); /* disable interrupts */
+ UART_PUT_CR(port, AT91_US_RSTSTA | AT91_US_RSTRX);
+ UART_PUT_CR(port, AT91_US_TXEN | AT91_US_RXEN);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else
+ at91_console_get_options(port, &baud, &parity, &bits);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver at91_uart;
+
+static struct console at91_console = {
+ .name = AT91_DEVICENAME,
+ .write = at91_console_write,
+ .device = uart_console_device,
+ .setup = at91_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &at91_uart,
+};
+
+#define AT91_CONSOLE_DEVICE &at91_console
+
+static int __init at91_console_init(void)
+{
+ at91_init_ports();
+
+ at91_console.index = at91_console_port;
+ register_console(&at91_console);
+ return 0;
+}
+console_initcall(at91_console_init);
+
+#else
+#define AT91_CONSOLE_DEVICE NULL
+#endif
+
+static struct uart_driver at91_uart = {
+ .owner = THIS_MODULE,
+ .driver_name = AT91_DEVICENAME,
+ .dev_name = AT91_DEVICENAME,
+ .devfs_name = AT91_DEVICENAME,
+ .major = SERIAL_AT91_MAJOR,
+ .minor = MINOR_START,
+ .nr = AT91_NR_UART,
+ .cons = AT91_CONSOLE_DEVICE,
+};
+
+static int __init at91_serial_init(void)
+{
+ int ret, i;
+
+ at91_init_ports();
+
+ ret = uart_register_driver(&at91_uart);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < AT91_NR_UART; i++) {
+ if (at91_serial_map[i] >= 0)
+ uart_add_one_port(&at91_uart, &at91_ports[i]);
+ }
+
+ return 0;
+}
+
+static void __exit at91_serial_exit(void)
+{
+ int i;
+
+ for (i = 0; i < AT91_NR_UART; i++) {
+ if (at91_serial_map[i] >= 0)
+ uart_remove_one_port(&at91_uart, &at91_ports[i]);
+ }
+
+ uart_unregister_driver(&at91_uart);
+}
+
+module_init(at91_serial_init);
+module_exit(at91_serial_exit);
+
+MODULE_AUTHOR("Rick Bronson");
+MODULE_DESCRIPTION("AT91 generic serial port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/au1x00_uart.c b/drivers/serial/au1x00_uart.c
index a274ebf256a..ceb5d7f37bb 100644
--- a/drivers/serial/au1x00_uart.c
+++ b/drivers/serial/au1x00_uart.c
@@ -241,18 +241,12 @@ static _INLINE_ void
receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs)
{
struct tty_struct *tty = up->port.info->tty;
- unsigned char ch;
+ unsigned char ch, flag;
int max_count = 256;
do {
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- tty->flip.work.func((void *)tty);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- return; // if TTY_DONT_FLIP is set
- }
ch = serial_inp(up, UART_RX);
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = TTY_NORMAL;
+ flag = TTY_NORMAL;
up->port.icount.rx++;
if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
@@ -292,30 +286,23 @@ receive_chars(struct uart_8250_port *up, int *status, struct pt_regs *regs)
#endif
if (*status & UART_LSR_BI) {
DEBUG_INTR("handling break....");
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ flag = TTY_BREAK;
} else if (*status & UART_LSR_PE)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (*status & UART_LSR_FE)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
+ flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch, regs))
goto ignore_char;
- if ((*status & up->port.ignore_status_mask) == 0) {
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
- if ((*status & UART_LSR_OE) &&
- tty->flip.count < TTY_FLIPBUF_SIZE) {
+ if ((*status & up->port.ignore_status_mask) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+ if (*status & UART_LSR_OE)
/*
* Overrun is special, since it's reported
* immediately, and doesn't affect the current
* character.
*/
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
ignore_char:
*status = serial_inp(up, UART_LSR);
diff --git a/drivers/serial/clps711x.c b/drivers/serial/clps711x.c
index 87ef368384f..8ef999481f9 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/serial/clps711x.c
@@ -104,8 +104,6 @@ static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id, struct pt_regs *re
while (!(status & SYSFLG_URXFE)) {
ch = clps_readl(UARTDR(port));
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- goto ignore_char;
port->icount.rx++;
flg = TTY_NORMAL;
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index 987d22b53c2..16af5626c24 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -608,7 +608,7 @@ static int cpm_uart_tx_pump(struct uart_port *port)
p = cpm2cpu_addr(bdp->cbd_bufaddr);
- *p++ = xmit->buf[xmit->tail];
+ *p++ = port->x_char;
bdp->cbd_datlen = 1;
bdp->cbd_sc |= BD_SC_READY;
/* Get next BD. */
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 08c42c00018..be12623d854 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -442,6 +442,7 @@ static char *serial_version = "$Revision: 1.25 $";
#include <linux/init.h>
#include <asm/uaccess.h>
#include <linux/kernel.h>
+#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -1315,11 +1316,7 @@ static const struct control_pins e100_modem_pins[NR_PORTS] =
* memory if large numbers of serial ports are open.
*/
static unsigned char *tmp_buf;
-#ifdef DECLARE_MUTEX
-static DECLARE_MUTEX(tmp_buf_sem);
-#else
-static struct semaphore tmp_buf_sem = MUTEX;
-#endif
+static DEFINE_MUTEX(tmp_buf_mutex);
/* Calculate the chartime depending on baudrate, numbor of bits etc. */
static void update_char_time(struct e100_serial * info)
@@ -3661,7 +3658,7 @@ rs_raw_write(struct tty_struct * tty, int from_user,
* design.
*/
if (from_user) {
- down(&tmp_buf_sem);
+ mutex_lock(&tmp_buf_mutex);
while (1) {
int c1;
c = CIRC_SPACE_TO_END(info->xmit.head,
@@ -3692,7 +3689,7 @@ rs_raw_write(struct tty_struct * tty, int from_user,
count -= c;
ret += c;
}
- up(&tmp_buf_sem);
+ mutex_unlock(&tmp_buf_mutex);
} else {
cli();
while (count) {
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c
index 4d8516d1bb7..a64ba26a94e 100644
--- a/drivers/serial/dz.c
+++ b/drivers/serial/dz.c
@@ -216,8 +216,6 @@ static inline void dz_receive_chars(struct dz_port *dport)
if (!tty)
break;
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- break;
icount->rx++;
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index eb31125c6a3..144a7a352b2 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -729,19 +729,20 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
unsigned short int status;
struct uart_icount *icount;
unsigned long offset;
+ unsigned char flag;
trace(icom_port, "RCV_COMPLETE", 0);
rcv_buff = icom_port->next_rcv;
status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
while (status & SA_FL_RCV_DONE) {
+ int first = -1;
trace(icom_port, "FID_STATUS", status);
count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength);
+ count = tty_buffer_request_room(tty, count);
trace(icom_port, "RCV_COUNT", count);
- if (count > (TTY_FLIPBUF_SIZE - tty->flip.count))
- count = TTY_FLIPBUF_SIZE - tty->flip.count;
trace(icom_port, "REAL_COUNT", count);
@@ -749,15 +750,10 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
cpu_to_le32(icom_port->statStg->rcv[rcv_buff].leBuffer) -
icom_port->recv_buf_pci;
- memcpy(tty->flip.char_buf_ptr,(unsigned char *)
- ((unsigned long)icom_port->recv_buf + offset), count);
-
+ /* Block copy all but the last byte as this may have status */
if (count > 0) {
- tty->flip.count += count - 1;
- tty->flip.char_buf_ptr += count - 1;
-
- memset(tty->flip.flag_buf_ptr, 0, count);
- tty->flip.flag_buf_ptr += count - 1;
+ first = icom_port->recv_buf[offset];
+ tty_insert_flip_string(tty, icom_port->recv_buf + offset, count - 1);
}
icount = &icom_port->uart_port.icount;
@@ -765,12 +761,14 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
/* Break detect logic */
if ((status & SA_FLAGS_FRAME_ERROR)
- && (tty->flip.char_buf_ptr[0] == 0x00)) {
+ && first == 0) {
status &= ~SA_FLAGS_FRAME_ERROR;
status |= SA_FLAGS_BREAK_DET;
trace(icom_port, "BREAK_DET", 0);
}
+ flag = TTY_NORMAL;
+
if (status &
(SA_FLAGS_BREAK_DET | SA_FLAGS_PARITY_ERROR |
SA_FLAGS_FRAME_ERROR | SA_FLAGS_OVERRUN)) {
@@ -797,33 +795,26 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
status &= icom_port->read_status_mask;
if (status & SA_FLAGS_BREAK_DET) {
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ flag = TTY_BREAK;
} else if (status & SA_FLAGS_PARITY_ERROR) {
trace(icom_port, "PARITY_ERROR", 0);
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
} else if (status & SA_FLAGS_FRAME_ERROR)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
-
- if (status & SA_FLAGS_OVERRUN) {
- /*
- * Overrun is special, since it's
- * reported immediately, and doesn't
- * affect the current character
- */
- if (tty->flip.count < TTY_FLIPBUF_SIZE) {
- tty->flip.count++;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- }
- }
+ flag = TTY_FRAME;
+
}
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- ignore_char:
- icom_port->statStg->rcv[rcv_buff].flags = 0;
+ tty_insert_flip_char(tty, *(icom_port->recv_buf + offset + count - 1), flag);
+
+ if (status & SA_FLAGS_OVERRUN)
+ /*
+ * Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ignore_char:
+ icom_port->statStg->rcv[rcv_buff].flags = 0;
icom_port->statStg->rcv[rcv_buff].leLength = 0;
icom_port->statStg->rcv[rcv_buff].WorkingLength =
(unsigned short int) cpu_to_le16(RCV_BUFF_SZ);
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 83c4c121658..5c098be9346 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -256,9 +256,6 @@ static irqreturn_t imx_rxint(int irq, void *dev_id, struct pt_regs *regs)
error_return:
tty_insert_flip_char(tty, rx, flg);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- goto out;
-
ignore_char:
rx = URXD0((u32)sport->port.membase);
} while(rx & URXD_CHARRDY);
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index 771676abee6..1d85533d46d 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -2327,19 +2327,13 @@ static void receive_chars(struct uart_port *the_port)
spin_lock_irqsave(&the_port->lock, pflags);
tty = info->tty;
- if (request_count > TTY_FLIPBUF_SIZE - tty->flip.count)
- request_count = TTY_FLIPBUF_SIZE - tty->flip.count;
+ request_count = tty_buffer_request_room(tty, IOC4_MAX_CHARS - 2);
if (request_count > 0) {
icount = &the_port->icount;
read_count = do_read(the_port, ch, request_count);
if (read_count > 0) {
- flip = 1;
- memcpy(tty->flip.char_buf_ptr, ch, read_count);
- memset(tty->flip.flag_buf_ptr, TTY_NORMAL, read_count);
- tty->flip.char_buf_ptr += read_count;
- tty->flip.flag_buf_ptr += read_count;
- tty->flip.count += read_count;
+ tty_insert_flip_string(tty, ch, read_count);
icount->rx += read_count;
}
}
diff --git a/drivers/serial/ip22zilog.c b/drivers/serial/ip22zilog.c
index ef132349f31..66f117d1506 100644
--- a/drivers/serial/ip22zilog.c
+++ b/drivers/serial/ip22zilog.c
@@ -259,13 +259,7 @@ static void ip22zilog_receive_chars(struct uart_ip22zilog_port *up,
struct tty_struct *tty = up->port.info->tty; /* XXX info==NULL? */
while (1) {
- unsigned char ch, r1;
-
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- tty->flip.work.func((void *)tty);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- return; /* XXX Ignores SysRq when we need it most. Fix. */
- }
+ unsigned char ch, r1, flag;
r1 = read_zsreg(channel, R1);
if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
@@ -303,8 +297,7 @@ static void ip22zilog_receive_chars(struct uart_ip22zilog_port *up,
}
/* A real serial line, record the character and status. */
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = TTY_NORMAL;
+ flag = TTY_NORMAL;
up->port.icount.rx++;
if (r1 & (BRK_ABRT | PAR_ERR | Rx_OVR | CRC_ERR)) {
if (r1 & BRK_ABRT) {
@@ -321,28 +314,21 @@ static void ip22zilog_receive_chars(struct uart_ip22zilog_port *up,
up->port.icount.overrun++;
r1 &= up->port.read_status_mask;
if (r1 & BRK_ABRT)
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ flag = TTY_BREAK;
else if (r1 & PAR_ERR)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (r1 & CRC_ERR)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
+ flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch, regs))
goto next_char;
if (up->port.ignore_status_mask == 0xff ||
- (r1 & up->port.ignore_status_mask) == 0) {
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
- if ((r1 & Rx_OVR) &&
- tty->flip.count < TTY_FLIPBUF_SIZE) {
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
+ (r1 & up->port.ignore_status_mask) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+
+ if (r1 & Rx_OVR)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
next_char:
ch = readb(&channel->control);
ZSDELAY();
diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c
index b0ecc7537ce..b48066a64a7 100644
--- a/drivers/serial/m32r_sio.c
+++ b/drivers/serial/m32r_sio.c
@@ -331,17 +331,12 @@ static _INLINE_ void receive_chars(struct uart_sio_port *up, int *status,
{
struct tty_struct *tty = up->port.info->tty;
unsigned char ch;
+ unsigned char flag;
int max_count = 256;
do {
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- tty->flip.work.func((void *)tty);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- return; // if TTY_DONT_FLIP is set
- }
ch = sio_in(up, SIORXB);
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = TTY_NORMAL;
+ flag = TTY_NORMAL;
up->port.icount.rx++;
if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
@@ -380,30 +375,24 @@ static _INLINE_ void receive_chars(struct uart_sio_port *up, int *status,
if (*status & UART_LSR_BI) {
DEBUG_INTR("handling break....");
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ flag = TTY_BREAK;
} else if (*status & UART_LSR_PE)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (*status & UART_LSR_FE)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
+ flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch, regs))
goto ignore_char;
- if ((*status & up->port.ignore_status_mask) == 0) {
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
- if ((*status & UART_LSR_OE) &&
- tty->flip.count < TTY_FLIPBUF_SIZE) {
+ if ((*status & up->port.ignore_status_mask) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+
+ if (*status & UART_LSR_OE) {
/*
* Overrun is special, since it's reported
* immediately, and doesn't affect the current
* character.
*/
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
ignore_char:
*status = serial_in(up, UART_LSR);
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index 47f7404cb04..d957a3a9edf 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -313,7 +313,7 @@ static inline void receive_chars(struct mcf_serial *info)
{
volatile unsigned char *uartp;
struct tty_struct *tty = info->tty;
- unsigned char status, ch;
+ unsigned char status, ch, flag;
if (!tty)
return;
@@ -321,10 +321,6 @@ static inline void receive_chars(struct mcf_serial *info)
uartp = info->addr;
while ((status = uartp[MCFUART_USR]) & MCFUART_USR_RXREADY) {
-
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- break;
-
ch = uartp[MCFUART_URB];
info->stats.rx++;
@@ -335,29 +331,24 @@ static inline void receive_chars(struct mcf_serial *info)
}
#endif
- tty->flip.count++;
+ flag = TTY_NORMAL;
if (status & MCFUART_USR_RXERR) {
uartp[MCFUART_UCR] = MCFUART_UCR_CMDRESETERR;
if (status & MCFUART_USR_RXBREAK) {
info->stats.rxbreak++;
- *tty->flip.flag_buf_ptr++ = TTY_BREAK;
+ flag = TTY_BREAK;
} else if (status & MCFUART_USR_RXPARITY) {
info->stats.rxparity++;
- *tty->flip.flag_buf_ptr++ = TTY_PARITY;
+ flag = TTY_PARITY;
} else if (status & MCFUART_USR_RXOVERRUN) {
info->stats.rxoverrun++;
- *tty->flip.flag_buf_ptr++ = TTY_OVERRUN;
+ flag = TTY_OVERRUN;
} else if (status & MCFUART_USR_RXFRAMING) {
info->stats.rxframing++;
- *tty->flip.flag_buf_ptr++ = TTY_FRAME;
- } else {
- /* This should never happen... */
- *tty->flip.flag_buf_ptr++ = 0;
+ flag = TTY_FRAME;
}
- } else {
- *tty->flip.flag_buf_ptr++ = 0;
}
- *tty->flip.char_buf_ptr++ = ch;
+ tty_insert_flip_char(tty, ch, flag);
}
schedule_work(&tty->flip.work);
@@ -1525,7 +1516,7 @@ static void mcfrs_irqinit(struct mcf_serial *info)
icrp = (volatile unsigned char *) (MCF_MBAR + MCFICM_INTC0 +
MCFINTC_ICR0 + MCFINT_UART0 + info->line);
- *icrp = 0x33; /* UART0 with level 6, priority 3 */
+ *icrp = 0x30 + info->line; /* level 6, line based priority */
imrp = (volatile unsigned long *) (MCF_MBAR + MCFICM_INTC0 +
MCFINTC_IMRL);
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index b8727d9bf69..61dd17d7bac 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -37,11 +37,11 @@
* by the bootloader or in the platform init code.
*
* The idx field must be equal to the PSC index ( e.g. 0 for PSC1, 1 for PSC2,
- * and so on). So the PSC1 is mapped to /dev/ttyS0, PSC2 to /dev/ttyS1 and so
- * on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly for
- * the console code : without this 1:1 mapping, at early boot time, when we are
- * parsing the kernel args console=ttyS?, we wouldn't know wich PSC it will be
- * mapped to.
+ * and so on). So the PSC1 is mapped to /dev/ttyPSC0, PSC2 to /dev/ttyPSC1 and
+ * so on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly
+ * fpr the console code : without this 1:1 mapping, at early boot time, when we
+ * are parsing the kernel args console=ttyPSC?, we wouldn't know wich PSC it
+ * will be mapped to.
*/
#include <linux/config.h>
@@ -65,6 +65,10 @@
#include <linux/serial_core.h>
+/* We've been assigned a range on the "Low-density serial ports" major */
+#define SERIAL_PSC_MAJOR 204
+#define SERIAL_PSC_MINOR 148
+
#define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */
@@ -401,17 +405,13 @@ static inline int
mpc52xx_uart_int_rx_chars(struct uart_port *port, struct pt_regs *regs)
{
struct tty_struct *tty = port->info->tty;
- unsigned char ch;
+ unsigned char ch, flag;
unsigned short status;
/* While we can read, do so ! */
while ( (status = in_be16(&PSC(port)->mpc52xx_psc_status)) &
MPC52xx_PSC_SR_RXRDY) {
- /* If we are full, just stop reading */
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- break;
-
/* Get the char */
ch = in_8(&PSC(port)->mpc52xx_psc_buffer_8);
@@ -424,45 +424,35 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port, struct pt_regs *regs)
#endif
/* Store it */
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = 0;
+
+ flag = TTY_NORMAL;
port->icount.rx++;
if ( status & (MPC52xx_PSC_SR_PE |
MPC52xx_PSC_SR_FE |
- MPC52xx_PSC_SR_RB |
- MPC52xx_PSC_SR_OE) ) {
+ MPC52xx_PSC_SR_RB) ) {
if (status & MPC52xx_PSC_SR_RB) {
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ flag = TTY_BREAK;
uart_handle_break(port);
} else if (status & MPC52xx_PSC_SR_PE)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (status & MPC52xx_PSC_SR_FE)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
- if (status & MPC52xx_PSC_SR_OE) {
- /*
- * Overrun is special, since it's
- * reported immediately, and doesn't
- * affect the current character
- */
- if (tty->flip.count < (TTY_FLIPBUF_SIZE-1)) {
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- }
+ flag = TTY_FRAME;
/* Clear error condition */
out_8(&PSC(port)->command,MPC52xx_PSC_RST_ERR_STAT);
}
-
- tty->flip.char_buf_ptr++;
- tty->flip.flag_buf_ptr++;
- tty->flip.count++;
-
+ tty_insert_flip_char(tty, ch, flag);
+ if (status & MPC52xx_PSC_SR_OE) {
+ /*
+ * Overrun is special, since it's
+ * reported immediately, and doesn't
+ * affect the current character
+ */
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
}
tty_flip_buffer_push(tty);
@@ -668,15 +658,15 @@ mpc52xx_console_setup(struct console *co, char *options)
}
-extern struct uart_driver mpc52xx_uart_driver;
+static struct uart_driver mpc52xx_uart_driver;
static struct console mpc52xx_console = {
- .name = "ttyS",
+ .name = "ttyPSC",
.write = mpc52xx_console_write,
.device = uart_console_device,
.setup = mpc52xx_console_setup,
.flags = CON_PRINTBUFFER,
- .index = -1, /* Specified on the cmdline (e.g. console=ttyS0 ) */
+ .index = -1, /* Specified on the cmdline (e.g. console=ttyPSC0 ) */
.data = &mpc52xx_uart_driver,
};
@@ -703,10 +693,10 @@ console_initcall(mpc52xx_console_init);
static struct uart_driver mpc52xx_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "mpc52xx_psc_uart",
- .dev_name = "ttyS",
- .devfs_name = "ttyS",
- .major = TTY_MAJOR,
- .minor = 64,
+ .dev_name = "ttyPSC",
+ .devfs_name = "ttyPSC",
+ .major = SERIAL_PSC_MAJOR,
+ .minor = SERIAL_PSC_MINOR,
.nr = MPC52xx_PSC_MAXNUM,
.cons = MPC52xx_PSC_CONSOLE,
};
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index 8f83e4007ec..0ca83ac31d0 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -769,12 +769,12 @@ mpsc_rx_intr(struct mpsc_port_info *pi, struct pt_regs *regs)
bytes_in = be16_to_cpu(rxre->bytecnt);
/* Following use of tty struct directly is deprecated */
- if (unlikely((tty->flip.count + bytes_in) >= TTY_FLIPBUF_SIZE)){
+ if (unlikely(tty_buffer_request_room(tty, bytes_in) < bytes_in)) {
if (tty->low_latency)
tty_flip_buffer_push(tty);
/*
- * If this failed then we will throw awa the bytes
- * but mst do so to clear interrupts.
+ * If this failed then we will throw away the bytes
+ * but must do so to clear interrupts.
*/
}
diff --git a/drivers/serial/mux.c b/drivers/serial/mux.c
index 7633132a10a..4e49168c317 100644
--- a/drivers/serial/mux.c
+++ b/drivers/serial/mux.c
@@ -223,11 +223,6 @@ static void mux_read(struct uart_port *port)
if (MUX_EOFIFO(data))
break;
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- continue;
-
- *tty->flip.char_buf_ptr = data & 0xffu;
- *tty->flip.flag_buf_ptr = TTY_NORMAL;
port->icount.rx++;
if (MUX_BREAK(data)) {
@@ -239,9 +234,7 @@ static void mux_read(struct uart_port *port)
if (uart_handle_sysrq_char(port, data & 0xffu, NULL))
continue;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ tty_insert_flip_char(tty, data & 0xFF, TTY_NORMAL);
}
if (start_count != port->icount.rx) {
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index 5ddd8ab1f10..5f52883e64d 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -60,6 +60,7 @@
#include <linux/pmu.h>
#include <linux/bitops.h>
#include <linux/sysrq.h>
+#include <linux/mutex.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -96,7 +97,7 @@ MODULE_LICENSE("GPL");
*/
static struct uart_pmac_port pmz_ports[MAX_ZS_PORTS];
static int pmz_ports_count;
-static DECLARE_MUTEX(pmz_irq_sem);
+static DEFINE_MUTEX(pmz_irq_mutex);
static struct uart_driver pmz_uart_reg = {
.owner = THIS_MODULE,
@@ -210,10 +211,9 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap,
struct pt_regs *regs)
{
struct tty_struct *tty = NULL;
- unsigned char ch, r1, drop, error;
+ unsigned char ch, r1, drop, error, flag;
int loops = 0;
- retry:
/* The interrupt can be enabled when the port isn't open, typically
* that happens when using one port is open and the other closed (stale
* interrupt) or when one port is used as a console.
@@ -246,20 +246,6 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap,
error = 0;
drop = 0;
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- /* Have to drop the lock here */
- pmz_debug("pmz: flip overflow\n");
- spin_unlock(&uap->port.lock);
- tty->flip.work.func((void *)tty);
- spin_lock(&uap->port.lock);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- drop = 1;
- if (ZS_IS_ASLEEP(uap))
- return NULL;
- if (!ZS_IS_OPEN(uap))
- goto retry;
- }
-
r1 = read_zsreg(uap, R1);
ch = read_zsdata(uap);
@@ -295,8 +281,7 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap,
if (drop)
goto next_char;
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = TTY_NORMAL;
+ flag = TTY_NORMAL;
uap->port.icount.rx++;
if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR | BRK_ABRT)) {
@@ -316,26 +301,19 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap,
uap->port.icount.overrun++;
r1 &= uap->port.read_status_mask;
if (r1 & BRK_ABRT)
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ flag = TTY_BREAK;
else if (r1 & PAR_ERR)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (r1 & CRC_ERR)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
+ flag = TTY_FRAME;
}
if (uap->port.ignore_status_mask == 0xff ||
(r1 & uap->port.ignore_status_mask) == 0) {
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
- if ((r1 & Rx_OVR) &&
- tty->flip.count < TTY_FLIPBUF_SIZE) {
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ tty_insert_flip_char(tty, ch, flag);
}
+ if (r1 & Rx_OVR)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
next_char:
/* We can get stuck in an infinite loop getting char 0 when the
* line is in a wrong HW state, we break that here.
@@ -945,7 +923,7 @@ static int pmz_startup(struct uart_port *port)
if (uap->node == NULL)
return -ENODEV;
- down(&pmz_irq_sem);
+ mutex_lock(&pmz_irq_mutex);
uap->flags |= PMACZILOG_FLAG_IS_OPEN;
@@ -963,11 +941,11 @@ static int pmz_startup(struct uart_port *port)
dev_err(&uap->dev->ofdev.dev,
"Unable to register zs interrupt handler.\n");
pmz_set_scc_power(uap, 0);
- up(&pmz_irq_sem);
+ mutex_unlock(&pmz_irq_mutex);
return -ENXIO;
}
- up(&pmz_irq_sem);
+ mutex_unlock(&pmz_irq_mutex);
/* Right now, we deal with delay by blocking here, I'll be
* smarter later on
@@ -1004,7 +982,7 @@ static void pmz_shutdown(struct uart_port *port)
if (uap->node == NULL)
return;
- down(&pmz_irq_sem);
+ mutex_lock(&pmz_irq_mutex);
/* Release interrupt handler */
free_irq(uap->port.irq, uap);
@@ -1025,7 +1003,7 @@ static void pmz_shutdown(struct uart_port *port)
if (ZS_IS_CONS(uap) || ZS_IS_ASLEEP(uap)) {
spin_unlock_irqrestore(&port->lock, flags);
- up(&pmz_irq_sem);
+ mutex_unlock(&pmz_irq_mutex);
return;
}
@@ -1042,7 +1020,7 @@ static void pmz_shutdown(struct uart_port *port)
spin_unlock_irqrestore(&port->lock, flags);
- up(&pmz_irq_sem);
+ mutex_unlock(&pmz_irq_mutex);
pmz_debug("pmz: shutdown() done.\n");
}
@@ -1431,11 +1409,14 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
char name[1];
} *slots;
int len;
+ struct resource r_ports, r_rxdma, r_txdma;
/*
* Request & map chip registers
*/
- uap->port.mapbase = np->addrs[0].address;
+ if (of_address_to_resource(np, 0, &r_ports))
+ return -ENODEV;
+ uap->port.mapbase = r_ports.start;
uap->port.membase = ioremap(uap->port.mapbase, 0x1000);
uap->control_reg = uap->port.membase;
@@ -1445,16 +1426,20 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
* Request & map DBDMA registers
*/
#ifdef HAS_DBDMA
- if (np->n_addrs >= 3 && np->n_intrs >= 3)
+ if (of_address_to_resource(np, 1, &r_txdma) == 0 &&
+ of_address_to_resource(np, 2, &r_rxdma) == 0)
uap->flags |= PMACZILOG_FLAG_HAS_DMA;
+#else
+ memset(&r_txdma, 0, sizeof(struct resource));
+ memset(&r_rxdma, 0, sizeof(struct resource));
#endif
if (ZS_HAS_DMA(uap)) {
- uap->tx_dma_regs = ioremap(np->addrs[np->n_addrs - 2].address, 0x1000);
+ uap->tx_dma_regs = ioremap(r_txdma.start, 0x100);
if (uap->tx_dma_regs == NULL) {
uap->flags &= ~PMACZILOG_FLAG_HAS_DMA;
goto no_dma;
}
- uap->rx_dma_regs = ioremap(np->addrs[np->n_addrs - 1].address, 0x1000);
+ uap->rx_dma_regs = ioremap(r_rxdma.start, 0x100);
if (uap->rx_dma_regs == NULL) {
iounmap(uap->tx_dma_regs);
uap->tx_dma_regs = NULL;
@@ -1607,7 +1592,7 @@ static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
state = pmz_uart_reg.state + uap->port.line;
- down(&pmz_irq_sem);
+ mutex_lock(&pmz_irq_mutex);
down(&state->sem);
spin_lock_irqsave(&uap->port.lock, flags);
@@ -1640,7 +1625,7 @@ static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
pmz_set_scc_power(uap, 0);
up(&state->sem);
- up(&pmz_irq_sem);
+ mutex_unlock(&pmz_irq_mutex);
pmz_debug("suspend, switching complete\n");
@@ -1667,7 +1652,7 @@ static int pmz_resume(struct macio_dev *mdev)
state = pmz_uart_reg.state + uap->port.line;
- down(&pmz_irq_sem);
+ mutex_lock(&pmz_irq_mutex);
down(&state->sem);
spin_lock_irqsave(&uap->port.lock, flags);
@@ -1701,7 +1686,7 @@ static int pmz_resume(struct macio_dev *mdev)
bail:
up(&state->sem);
- up(&pmz_irq_sem);
+ mutex_unlock(&pmz_irq_mutex);
/* Right now, we deal with delay by blocking here, I'll be
* smarter later on
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index cc998b99a19..10535f00301 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -107,14 +107,6 @@ receive_chars(struct uart_pxa_port *up, int *status, struct pt_regs *regs)
int max_count = 256;
do {
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- if (tty->low_latency)
- tty_flip_buffer_push(tty);
- /*
- * If this failed then we will throw away the
- * bytes but must do so to clear interrupts
- */
- }
ch = serial_in(up, UART_RX);
flag = TTY_NORMAL;
up->port.icount.rx++;
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index 47681c4654e..eb4883efb7c 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -72,12 +72,12 @@
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/delay.h>
+#include <linux/clk.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/hardware.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/regs-serial.h>
#include <asm/arch/regs-gpio.h>
@@ -323,16 +323,6 @@ s3c24xx_serial_rx_chars(int irq, void *dev_id, struct pt_regs *regs)
if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
break;
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- if (tty->low_latency)
- tty_flip_buffer_push(tty);
-
- /*
- * If this failed then we will throw away the
- * bytes but must do so to clear interrupts
- */
- }
-
uerstat = rd_regl(port, S3C2410_UERSTAT);
ch = rd_regb(port, S3C2410_URXH);
@@ -782,11 +772,9 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
if (ourport->baudclk != NULL && !IS_ERR(ourport->baudclk)) {
clk_disable(ourport->baudclk);
- clk_unuse(ourport->baudclk);
ourport->baudclk = NULL;
}
- clk_use(clk);
clk_enable(clk);
ourport->clksrc = clksrc;
@@ -1077,9 +1065,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
ourport->clk = clk_get(&platdev->dev, "uart");
- if (ourport->clk != NULL && !IS_ERR(ourport->clk))
- clk_use(ourport->clk);
-
dbg("port: map=%08x, mem=%08x, irq=%d, clock=%ld\n",
port->mapbase, port->membase, port->irq, port->uartclk);
diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c
index 25a086458ab..1bd93168f50 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/serial/sa1100.c
@@ -201,8 +201,6 @@ sa1100_rx_chars(struct sa1100_port *sport, struct pt_regs *regs)
while (status & UTSR1_TO_SM(UTSR1_RNE)) {
ch = UART_GET_CHAR(sport);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- goto ignore_char;
sport->port.icount.rx++;
flg = TTY_NORMAL;
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index c17d680e3f0..2ca620900bc 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -33,6 +33,7 @@
#include <linux/device.h>
#include <linux/serial.h> /* for serial_state and serial_icounter_struct */
#include <linux/delay.h>
+#include <linux/mutex.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
@@ -47,7 +48,7 @@
/*
* This is used to lock changes in serial line configuration.
*/
-static DECLARE_MUTEX(port_sem);
+static DEFINE_MUTEX(port_mutex);
#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
@@ -1440,6 +1441,7 @@ uart_block_til_ready(struct file *filp, struct uart_state *state)
* modem is ready for us.
*/
spin_lock_irq(&port->lock);
+ port->ops->enable_ms(port);
mctrl = port->ops->get_mctrl(port);
spin_unlock_irq(&port->lock);
if (mctrl & TIOCM_CAR)
@@ -1471,7 +1473,7 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line)
{
struct uart_state *state;
- down(&port_sem);
+ mutex_lock(&port_mutex);
state = drv->state + line;
if (down_interruptible(&state->sem)) {
state = ERR_PTR(-ERESTARTSYS);
@@ -1508,7 +1510,7 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line)
}
out:
- up(&port_sem);
+ mutex_unlock(&port_mutex);
return state;
}
@@ -2218,7 +2220,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
state = drv->state + port->line;
- down(&port_sem);
+ mutex_lock(&port_mutex);
if (state->port) {
ret = -EINVAL;
goto out;
@@ -2254,7 +2256,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
register_console(port->cons);
out:
- up(&port_sem);
+ mutex_unlock(&port_mutex);
return ret;
}
@@ -2278,7 +2280,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
printk(KERN_ALERT "Removing wrong port: %p != %p\n",
state->port, port);
- down(&port_sem);
+ mutex_lock(&port_mutex);
/*
* Remove the devices from devfs
@@ -2287,7 +2289,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
uart_unconfigure_port(drv, state);
state->port = NULL;
- up(&port_sem);
+ mutex_unlock(&port_mutex);
return 0;
}
@@ -2307,7 +2309,7 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2)
return (port1->iobase == port2->iobase) &&
(port1->hub6 == port2->hub6);
case UPIO_MEM:
- return (port1->membase == port2->membase);
+ return (port1->mapbase == port2->mapbase);
}
return 0;
}
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 96969cb960a..c30333694fd 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -785,6 +785,8 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "3CXEM556.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "3CXEM556.cis"),
PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0x0710, "SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
+ PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
+ PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "MT5634ZLX.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"),
PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"),
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/serial/serial_lh7a40x.c
index d01dbe5da3b..d4a1f0e798c 100644
--- a/drivers/serial/serial_lh7a40x.c
+++ b/drivers/serial/serial_lh7a40x.c
@@ -148,15 +148,6 @@ lh7a40xuart_rx_chars (struct uart_port* port)
unsigned int data, flag;/* Received data and status */
while (!(UR (port, UART_R_STATUS) & nRxRdy) && --cbRxMax) {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- if (tty->low_latency)
- tty_flip_buffer_push(tty);
- /*
- * If this failed then we will throw away the
- * bytes but must do so to clear interrupts
- */
- }
-
data = UR (port, UART_R_DATA);
flag = TTY_NORMAL;
++port->icount.rx;
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index f10c86d60b6..ee98a867bc6 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -52,6 +52,7 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
+#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -303,17 +304,6 @@ receive_chars(struct uart_txx9_port *up, unsigned int *status, struct pt_regs *r
char flag;
do {
- /* The following is not allowed by the tty layer and
- unsafe. It should be fixed ASAP */
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- if (tty->low_latency) {
- spin_unlock(&up->port.lock);
- tty_flip_buffer_push(tty);
- spin_lock(&up->port.lock);
- }
- /* If this failed then we will throw away the
- bytes but must do so to clear interrupts */
- }
ch = sio_in(up, TXX9_SIRFIFO);
flag = TTY_NORMAL;
up->port.icount.rx++;
@@ -1029,7 +1019,7 @@ static void serial_txx9_resume_port(int line)
uart_resume_port(&serial_txx9_reg, &serial_txx9_ports[line].port);
}
-static DECLARE_MUTEX(serial_txx9_sem);
+static DEFINE_MUTEX(serial_txx9_mutex);
/**
* serial_txx9_register_port - register a serial port
@@ -1048,7 +1038,7 @@ static int __devinit serial_txx9_register_port(struct uart_port *port)
struct uart_txx9_port *uart;
int ret = -ENOSPC;
- down(&serial_txx9_sem);
+ mutex_lock(&serial_txx9_mutex);
for (i = 0; i < UART_NR; i++) {
uart = &serial_txx9_ports[i];
if (uart->port.type == PORT_UNKNOWN)
@@ -1069,7 +1059,7 @@ static int __devinit serial_txx9_register_port(struct uart_port *port)
if (ret == 0)
ret = uart->port.line;
}
- up(&serial_txx9_sem);
+ mutex_unlock(&serial_txx9_mutex);
return ret;
}
@@ -1084,7 +1074,7 @@ static void __devexit serial_txx9_unregister_port(int line)
{
struct uart_txx9_port *uart = &serial_txx9_ports[line];
- down(&serial_txx9_sem);
+ mutex_lock(&serial_txx9_mutex);
uart_remove_one_port(&serial_txx9_reg, &uart->port);
uart->port.flags = 0;
uart->port.type = PORT_UNKNOWN;
@@ -1093,7 +1083,7 @@ static void __devexit serial_txx9_unregister_port(int line)
uart->port.membase = 0;
uart->port.dev = NULL;
uart_add_one_port(&serial_txx9_reg, &uart->port);
- up(&serial_txx9_sem);
+ mutex_unlock(&serial_txx9_mutex);
}
/*
@@ -1195,7 +1185,7 @@ static int __init serial_txx9_init(void)
serial_txx9_register_ports(&serial_txx9_reg);
#ifdef ENABLE_SERIAL_TXX9_PCI
- ret = pci_module_init(&serial_txx9_pci_driver);
+ ret = pci_register_driver(&serial_txx9_pci_driver);
#endif
}
return ret;
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 430754ebac8..a9e07075962 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -482,6 +482,7 @@ static inline void sci_receive_chars(struct uart_port *port,
struct tty_struct *tty = port->info->tty;
int i, count, copied = 0;
unsigned short status;
+ unsigned char flag;
status = sci_in(port, SCxSR);
if (!(status & SCxSR_RDxF(port)))
@@ -499,8 +500,7 @@ static inline void sci_receive_chars(struct uart_port *port,
#endif
/* Don't copy more bytes than there is room for in the buffer */
- if (tty->flip.count + count > TTY_FLIPBUF_SIZE)
- count = TTY_FLIPBUF_SIZE - tty->flip.count;
+ count = tty_buffer_request_room(tty, count);
/* If for any reason we can't copy more data, we're done! */
if (count == 0)
@@ -512,8 +512,7 @@ static inline void sci_receive_chars(struct uart_port *port,
|| uart_handle_sysrq_char(port, c, regs)) {
count = 0;
} else {
- tty->flip.char_buf_ptr[0] = c;
- tty->flip.flag_buf_ptr[0] = TTY_NORMAL;
+ tty_insert_flip_char(tty, c, TTY_NORMAL);
}
} else {
for (i=0; i<count; i++) {
@@ -542,26 +541,21 @@ static inline void sci_receive_chars(struct uart_port *port,
}
/* Store data and status */
- tty->flip.char_buf_ptr[i] = c;
if (status&SCxSR_FER(port)) {
- tty->flip.flag_buf_ptr[i] = TTY_FRAME;
+ flag = TTY_FRAME;
pr_debug("sci: frame error\n");
} else if (status&SCxSR_PER(port)) {
- tty->flip.flag_buf_ptr[i] = TTY_PARITY;
+ flag = TTY_PARITY;
pr_debug("sci: parity error\n");
- } else {
- tty->flip.flag_buf_ptr[i] = TTY_NORMAL;
- }
+ } else
+ flag = TTY_NORMAL;
+ tty_insert_flip_char(tty, c, flag);
}
}
sci_in(port, SCxSR); /* dummy read */
sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
- /* Update the kernel buffer end */
- tty->flip.count += count;
- tty->flip.char_buf_ptr += count;
- tty->flip.flag_buf_ptr += count;
copied += count;
port->icount.rx += count;
}
@@ -608,48 +602,45 @@ static inline int sci_handle_errors(struct uart_port *port)
unsigned short status = sci_in(port, SCxSR);
struct tty_struct *tty = port->info->tty;
- if (status&SCxSR_ORER(port) && tty->flip.count<TTY_FLIPBUF_SIZE) {
+ if (status&SCxSR_ORER(port)) {
/* overrun error */
- copied++;
- *tty->flip.flag_buf_ptr++ = TTY_OVERRUN;
+ if(tty_insert_flip_char(tty, 0, TTY_OVERRUN))
+ copied++;
pr_debug("sci: overrun error\n");
}
- if (status&SCxSR_FER(port) && tty->flip.count<TTY_FLIPBUF_SIZE) {
+ if (status&SCxSR_FER(port)) {
if (sci_rxd_in(port) == 0) {
/* Notify of BREAK */
struct sci_port * sci_port = (struct sci_port *)port;
- if(!sci_port->break_flag) {
- sci_port->break_flag = 1;
- sci_schedule_break_timer((struct sci_port *)port);
+ if(!sci_port->break_flag) {
+ sci_port->break_flag = 1;
+ sci_schedule_break_timer((struct sci_port *)port);
/* Do sysrq handling. */
- if(uart_handle_break(port)) {
+ if(uart_handle_break(port))
return 0;
- }
pr_debug("sci: BREAK detected\n");
- copied++;
- *tty->flip.flag_buf_ptr++ = TTY_BREAK;
+ if(tty_insert_flip_char(tty, 0, TTY_BREAK))
+ copied++;
}
}
else {
/* frame error */
- copied++;
- *tty->flip.flag_buf_ptr++ = TTY_FRAME;
+ if(tty_insert_flip_char(tty, 0, TTY_FRAME))
+ copied++;
pr_debug("sci: frame error\n");
}
}
- if (status&SCxSR_PER(port) && tty->flip.count<TTY_FLIPBUF_SIZE) {
+ if (status&SCxSR_PER(port)) {
+ if(tty_insert_flip_char(tty, 0, TTY_PARITY))
+ copied++;
/* parity error */
- copied++;
- *tty->flip.flag_buf_ptr++ = TTY_PARITY;
pr_debug("sci: parity error\n");
}
- if (copied) {
- tty->flip.count += copied;
+ if (copied)
tty_flip_buffer_push(tty);
- }
return copied;
}
@@ -661,15 +652,14 @@ static inline int sci_handle_breaks(struct uart_port *port)
struct tty_struct *tty = port->info->tty;
struct sci_port *s = &sci_ports[port->line];
- if (!s->break_flag && status & SCxSR_BRK(port) &&
- tty->flip.count < TTY_FLIPBUF_SIZE) {
+ if (!s->break_flag && status & SCxSR_BRK(port))
#if defined(CONFIG_CPU_SH3)
/* Debounce break */
s->break_flag = 1;
#endif
/* Notify of BREAK */
- copied++;
- *tty->flip.flag_buf_ptr++ = TTY_BREAK;
+ if(tty_insert_flip_char(tty, 0, TTY_BREAK))
+ copied++;
pr_debug("sci: BREAK detected\n");
}
@@ -677,19 +667,15 @@ static inline int sci_handle_breaks(struct uart_port *port)
/* XXX: Handle SCIF overrun error */
if (port->type == PORT_SCIF && (sci_in(port, SCLSR) & SCIF_ORER) != 0) {
sci_out(port, SCLSR, 0);
- if(tty->flip.count<TTY_FLIPBUF_SIZE) {
+ if(tty_insert_flip_char(tty, 0, TTY_OVERRUN)) {
copied++;
- *tty->flip.flag_buf_ptr++ = TTY_OVERRUN;
pr_debug("sci: overrun error\n");
}
}
#endif
- if (copied) {
- tty->flip.count += copied;
+ if (copied)
tty_flip_buffer_push(tty);
- }
-
return copied;
}
@@ -732,12 +718,9 @@ static irqreturn_t sci_er_interrupt(int irq, void *ptr, struct pt_regs *regs)
struct tty_struct *tty = port->info->tty;
sci_out(port, SCLSR, 0);
- if(tty->flip.count<TTY_FLIPBUF_SIZE) {
- *tty->flip.flag_buf_ptr++ = TTY_OVERRUN;
- tty->flip.count++;
- tty_flip_buffer_push(tty);
- pr_debug("scif: overrun error\n");
- }
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_flip_buffer_push(tty);
+ pr_debug("scif: overrun error\n");
}
#endif
sci_rx_interrupt(irq, ptr, regs);
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 313f9df24a2..5468e5a767e 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -519,11 +519,7 @@ sn_receive_chars(struct sn_cons_port *port, struct pt_regs *regs,
/* record the character to pass up to the tty layer */
if (tty) {
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = TTY_NORMAL;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- if (tty->flip.count == TTY_FLIPBUF_SIZE)
+ if(tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0)
break;
}
port->sc_port.icount.rx++;
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index ba9381fd3f2..7e773ff76c6 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -159,21 +159,14 @@ receive_chars(struct uart_sunsab_port *up,
saw_console_brk = 1;
for (i = 0; i < count; i++) {
- unsigned char ch = buf[i];
+ unsigned char ch = buf[i], flag;
if (tty == NULL) {
uart_handle_sysrq_char(&up->port, ch, regs);
continue;
}
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- tty->flip.work.func((void *)tty);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- return tty; // if TTY_DONT_FLIP is set
- }
-
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = TTY_NORMAL;
+ flag = TTY_NORMAL;
up->port.icount.rx++;
if (unlikely(stat->sreg.isr0 & (SAB82532_ISR0_PERR |
@@ -209,34 +202,21 @@ receive_chars(struct uart_sunsab_port *up,
stat->sreg.isr1 &= ((up->port.read_status_mask >> 8) & 0xff);
if (stat->sreg.isr1 & SAB82532_ISR1_BRK) {
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ flag = TTY_BREAK;
} else if (stat->sreg.isr0 & SAB82532_ISR0_PERR)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (stat->sreg.isr0 & SAB82532_ISR0_FERR)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
+ flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch, regs))
continue;
if ((stat->sreg.isr0 & (up->port.ignore_status_mask & 0xff)) == 0 &&
- (stat->sreg.isr1 & ((up->port.ignore_status_mask >> 8) & 0xff)) == 0){
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
- if ((stat->sreg.isr0 & SAB82532_ISR0_RFO) &&
- tty->flip.count < TTY_FLIPBUF_SIZE) {
- /*
- * Overrun is special, since it's reported
- * immediately, and doesn't affect the current
- * character.
- */
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
+ (stat->sreg.isr1 & ((up->port.ignore_status_mask >> 8) & 0xff)) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+ if (stat->sreg.isr0 & SAB82532_ISR0_RFO)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
if (saw_console_brk)
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index f0738533f39..9a3665b34d9 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -323,19 +323,13 @@ static _INLINE_ struct tty_struct *
receive_chars(struct uart_sunsu_port *up, unsigned char *status, struct pt_regs *regs)
{
struct tty_struct *tty = up->port.info->tty;
- unsigned char ch;
+ unsigned char ch, flag;
int max_count = 256;
int saw_console_brk = 0;
do {
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- tty->flip.work.func((void *)tty);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- return tty; // if TTY_DONT_FLIP is set
- }
ch = serial_inp(up, UART_RX);
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = TTY_NORMAL;
+ flag = TTY_NORMAL;
up->port.icount.rx++;
if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
@@ -377,31 +371,23 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status, struct pt_regs
}
if (*status & UART_LSR_BI) {
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ flag = TTY_BREAK;
} else if (*status & UART_LSR_PE)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (*status & UART_LSR_FE)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
+ flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch, regs))
goto ignore_char;
- if ((*status & up->port.ignore_status_mask) == 0) {
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
- if ((*status & UART_LSR_OE) &&
- tty->flip.count < TTY_FLIPBUF_SIZE) {
+ if ((*status & up->port.ignore_status_mask) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+ if (*status & UART_LSR_OE)
/*
* Overrun is special, since it's reported
* immediately, and doesn't affect the current
* character.
*/
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
ignore_char:
*status = serial_inp(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 7653d6cf05a..3c72484adea 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -319,7 +319,7 @@ sunzilog_receive_chars(struct uart_sunzilog_port *up,
struct pt_regs *regs)
{
struct tty_struct *tty;
- unsigned char ch, r1;
+ unsigned char ch, r1, flag;
tty = NULL;
if (up->port.info != NULL && /* Unopened serial console */
@@ -362,19 +362,8 @@ sunzilog_receive_chars(struct uart_sunzilog_port *up,
continue;
}
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- tty->flip.work.func((void *)tty);
- /*
- * The 8250 bails out of the loop here,
- * but we need to read everything, or die.
- */
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- continue;
- }
-
/* A real serial line, record the character and status. */
- *tty->flip.char_buf_ptr = ch;
- *tty->flip.flag_buf_ptr = TTY_NORMAL;
+ flag = TTY_NORMAL;
up->port.icount.rx++;
if (r1 & (BRK_ABRT | PAR_ERR | Rx_OVR | CRC_ERR)) {
if (r1 & BRK_ABRT) {
@@ -391,28 +380,21 @@ sunzilog_receive_chars(struct uart_sunzilog_port *up,
up->port.icount.overrun++;
r1 &= up->port.read_status_mask;
if (r1 & BRK_ABRT)
- *tty->flip.flag_buf_ptr = TTY_BREAK;
+ flag = TTY_BREAK;
else if (r1 & PAR_ERR)
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
else if (r1 & CRC_ERR)
- *tty->flip.flag_buf_ptr = TTY_FRAME;
+ flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&up->port, ch, regs))
continue;
if (up->port.ignore_status_mask == 0xff ||
(r1 & up->port.ignore_status_mask) == 0) {
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
- }
- if ((r1 & Rx_OVR) &&
- tty->flip.count < TTY_FLIPBUF_SIZE) {
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ tty_insert_flip_char(tty, ch, flag);
}
+ if (r1 & Rx_OVR)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
}
return tty;
diff --git a/drivers/serial/uart00.c b/drivers/serial/uart00.c
deleted file mode 100644
index 47b504ff38b..00000000000
--- a/drivers/serial/uart00.c
+++ /dev/null
@@ -1,782 +0,0 @@
-/*
- * linux/drivers/serial/uart00.c
- *
- * Driver for UART00 serial ports
- *
- * Based on drivers/char/serial_amba.c, by ARM Limited &
- * Deep Blue Solutions Ltd.
- * Copyright 2001 Altera Corporation
- *
- * Update for 2.6.4 by Dirk Behme <dirk.behme@de.bosch.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * $Id: uart00.c,v 1.35 2002/07/28 10:03:28 rmk Exp $
- *
- */
-#include <linux/config.h>
-
-#if defined(CONFIG_SERIAL_UART00_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/sizes.h>
-
-#include <asm/arch/excalibur.h>
-#define UART00_TYPE (volatile unsigned int*)
-#include <asm/arch/uart00.h>
-#include <asm/arch/int_ctrl00.h>
-
-#define UART_NR 2
-
-#define SERIAL_UART00_NAME "ttyUA"
-#define SERIAL_UART00_MAJOR 204
-#define SERIAL_UART00_MINOR 16 /* Temporary - will change in future */
-#define SERIAL_UART00_NR UART_NR
-#define UART_PORT_SIZE 0x50
-
-#define UART00_ISR_PASS_LIMIT 256
-
-/*
- * Access macros for the UART00 UARTs
- */
-#define UART_GET_INT_STATUS(p) inl(UART_ISR((p)->membase))
-#define UART_PUT_IES(p, c) outl(c,UART_IES((p)->membase))
-#define UART_GET_IES(p) inl(UART_IES((p)->membase))
-#define UART_PUT_IEC(p, c) outl(c,UART_IEC((p)->membase))
-#define UART_GET_IEC(p) inl(UART_IEC((p)->membase))
-#define UART_PUT_CHAR(p, c) outl(c,UART_TD((p)->membase))
-#define UART_GET_CHAR(p) inl(UART_RD((p)->membase))
-#define UART_GET_RSR(p) inl(UART_RSR((p)->membase))
-#define UART_GET_RDS(p) inl(UART_RDS((p)->membase))
-#define UART_GET_MSR(p) inl(UART_MSR((p)->membase))
-#define UART_GET_MCR(p) inl(UART_MCR((p)->membase))
-#define UART_PUT_MCR(p, c) outl(c,UART_MCR((p)->membase))
-#define UART_GET_MC(p) inl(UART_MC((p)->membase))
-#define UART_PUT_MC(p, c) outl(c,UART_MC((p)->membase))
-#define UART_GET_TSR(p) inl(UART_TSR((p)->membase))
-#define UART_GET_DIV_HI(p) inl(UART_DIV_HI((p)->membase))
-#define UART_PUT_DIV_HI(p,c) outl(c,UART_DIV_HI((p)->membase))
-#define UART_GET_DIV_LO(p) inl(UART_DIV_LO((p)->membase))
-#define UART_PUT_DIV_LO(p,c) outl(c,UART_DIV_LO((p)->membase))
-#define UART_RX_DATA(s) ((s) & UART_RSR_RX_LEVEL_MSK)
-#define UART_TX_READY(s) (((s) & UART_TSR_TX_LEVEL_MSK) < 15)
-//#define UART_TX_EMPTY(p) ((UART_GET_FR(p) & UART00_UARTFR_TMSK) == 0)
-
-static void uart00_stop_tx(struct uart_port *port)
-{
- UART_PUT_IEC(port, UART_IEC_TIE_MSK);
-}
-
-static void uart00_stop_rx(struct uart_port *port)
-{
- UART_PUT_IEC(port, UART_IEC_RE_MSK);
-}
-
-static void uart00_enable_ms(struct uart_port *port)
-{
- UART_PUT_IES(port, UART_IES_ME_MSK);
-}
-
-static void
-uart00_rx_chars(struct uart_port *port, struct pt_regs *regs)
-{
- struct tty_struct *tty = port->info->tty;
- unsigned int status, ch, rds, flg, ignored = 0;
-
- status = UART_GET_RSR(port);
- while (UART_RX_DATA(status)) {
- /*
- * We need to read rds before reading the
- * character from the fifo
- */
- rds = UART_GET_RDS(port);
- ch = UART_GET_CHAR(port);
- port->icount.rx++;
-
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- goto ignore_char;
-
- flg = TTY_NORMAL;
-
- /*
- * Note that the error handling code is
- * out of the main execution path
- */
- if (rds & (UART_RDS_BI_MSK |UART_RDS_FE_MSK|
- UART_RDS_PE_MSK |UART_RDS_PE_MSK))
- goto handle_error;
- if (uart_handle_sysrq_char(port, ch, regs))
- goto ignore_char;
-
- error_return:
- tty_insert_flip_char(tty, ch, flg);
-
- ignore_char:
- status = UART_GET_RSR(port);
- }
- out:
- tty_flip_buffer_push(tty);
- return;
-
- handle_error:
- if (rds & UART_RDS_BI_MSK) {
- status &= ~(UART_RDS_FE_MSK | UART_RDS_PE_MSK);
- port->icount.brk++;
- if (uart_handle_break(port))
- goto ignore_char;
- } else if (rds & UART_RDS_PE_MSK)
- port->icount.parity++;
- else if (rds & UART_RDS_FE_MSK)
- port->icount.frame++;
- if (rds & UART_RDS_OE_MSK)
- port->icount.overrun++;
-
- if (rds & port->ignore_status_mask) {
- if (++ignored > 100)
- goto out;
- goto ignore_char;
- }
- rds &= port->read_status_mask;
-
- if (rds & UART_RDS_BI_MSK)
- flg = TTY_BREAK;
- else if (rds & UART_RDS_PE_MSK)
- flg = TTY_PARITY;
- else if (rds & UART_RDS_FE_MSK)
- flg = TTY_FRAME;
-
- if (rds & UART_RDS_OE_MSK) {
- /*
- * CHECK: does overrun affect the current character?
- * ASSUMPTION: it does not.
- */
- tty_insert_flip_char(tty, ch, flg);
- ch = 0;
- flg = TTY_OVERRUN;
- }
-#ifdef SUPPORT_SYSRQ
- port->sysrq = 0;
-#endif
- goto error_return;
-}
-
-static void uart00_tx_chars(struct uart_port *port)
-{
- struct circ_buf *xmit = &port->info->xmit;
- int count;
-
- if (port->x_char) {
- while ((UART_GET_TSR(port) & UART_TSR_TX_LEVEL_MSK) == 15)
- barrier();
- UART_PUT_CHAR(port, port->x_char);
- port->icount.tx++;
- port->x_char = 0;
- return;
- }
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- uart00_stop_tx(port);
- return;
- }
-
- count = port->fifosize >> 1;
- do {
- while ((UART_GET_TSR(port) & UART_TSR_TX_LEVEL_MSK) == 15)
- barrier();
- UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- } while (--count > 0);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-
- if (uart_circ_empty(xmit))
- uart00_stop_tx(port);
-}
-
-static void uart00_start_tx(struct uart_port *port)
-{
- UART_PUT_IES(port, UART_IES_TIE_MSK);
- uart00_tx_chars(port);
-}
-
-static void uart00_modem_status(struct uart_port *port)
-{
- unsigned int status;
-
- status = UART_GET_MSR(port);
-
- if (!(status & (UART_MSR_DCTS_MSK | UART_MSR_DDSR_MSK |
- UART_MSR_TERI_MSK | UART_MSR_DDCD_MSK)))
- return;
-
- if (status & UART_MSR_DDCD_MSK)
- uart_handle_dcd_change(port, status & UART_MSR_DCD_MSK);
-
- if (status & UART_MSR_DDSR_MSK)
- port->icount.dsr++;
-
- if (status & UART_MSR_DCTS_MSK)
- uart_handle_cts_change(port, status & UART_MSR_CTS_MSK);
-
- wake_up_interruptible(&port->info->delta_msr_wait);
-}
-
-static irqreturn_t uart00_int(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct uart_port *port = dev_id;
- unsigned int status, pass_counter = 0;
-
- status = UART_GET_INT_STATUS(port);
- do {
- if (status & UART_ISR_RI_MSK)
- uart00_rx_chars(port, regs);
- if (status & UART_ISR_MI_MSK)
- uart00_modem_status(port);
- if (status & (UART_ISR_TI_MSK | UART_ISR_TII_MSK))
- uart00_tx_chars(port);
- if (pass_counter++ > UART00_ISR_PASS_LIMIT)
- break;
-
- status = UART_GET_INT_STATUS(port);
- } while (status);
-
- return IRQ_HANDLED;
-}
-
-static unsigned int uart00_tx_empty(struct uart_port *port)
-{
- return UART_GET_TSR(port) & UART_TSR_TX_LEVEL_MSK? 0 : TIOCSER_TEMT;
-}
-
-static unsigned int uart00_get_mctrl(struct uart_port *port)
-{
- unsigned int result = 0;
- unsigned int status;
-
- status = UART_GET_MSR(port);
- if (status & UART_MSR_DCD_MSK)
- result |= TIOCM_CAR;
- if (status & UART_MSR_DSR_MSK)
- result |= TIOCM_DSR;
- if (status & UART_MSR_CTS_MSK)
- result |= TIOCM_CTS;
- if (status & UART_MSR_RI_MSK)
- result |= TIOCM_RI;
-
- return result;
-}
-
-static void uart00_set_mctrl_null(struct uart_port *port, unsigned int mctrl)
-{
-}
-
-static void uart00_break_ctl(struct uart_port *port, int break_state)
-{
- unsigned long flags;
- unsigned int mcr;
-
- spin_lock_irqsave(&port->lock, flags);
- mcr = UART_GET_MCR(port);
- if (break_state == -1)
- mcr |= UART_MCR_BR_MSK;
- else
- mcr &= ~UART_MCR_BR_MSK;
- UART_PUT_MCR(port, mcr);
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void
-uart00_set_termios(struct uart_port *port, struct termios *termios,
- struct termios *old)
-{
- unsigned int uart_mc, old_ies, baud, quot;
- unsigned long flags;
-
- /*
- * We don't support CREAD (yet)
- */
- termios->c_cflag |= CREAD;
-
- /*
- * Ask the core to calculate the divisor for us.
- */
- baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
- quot = uart_get_divisor(port, baud);
-
- /* byte size and parity */
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- uart_mc = UART_MC_CLS_CHARLEN_5;
- break;
- case CS6:
- uart_mc = UART_MC_CLS_CHARLEN_6;
- break;
- case CS7:
- uart_mc = UART_MC_CLS_CHARLEN_7;
- break;
- default: // CS8
- uart_mc = UART_MC_CLS_CHARLEN_8;
- break;
- }
- if (termios->c_cflag & CSTOPB)
- uart_mc|= UART_MC_ST_TWO;
- if (termios->c_cflag & PARENB) {
- uart_mc |= UART_MC_PE_MSK;
- if (!(termios->c_cflag & PARODD))
- uart_mc |= UART_MC_EP_MSK;
- }
-
- spin_lock_irqsave(&port->lock, flags);
-
- /*
- * Update the per-port timeout.
- */
- uart_update_timeout(port, termios->c_cflag, baud);
-
- port->read_status_mask = UART_RDS_OE_MSK;
- if (termios->c_iflag & INPCK)
- port->read_status_mask |= UART_RDS_FE_MSK | UART_RDS_PE_MSK;
- if (termios->c_iflag & (BRKINT | PARMRK))
- port->read_status_mask |= UART_RDS_BI_MSK;
-
- /*
- * Characters to ignore
- */
- port->ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_RDS_FE_MSK | UART_RDS_PE_MSK;
- if (termios->c_iflag & IGNBRK) {
- port->ignore_status_mask |= UART_RDS_BI_MSK;
- /*
- * If we're ignoring parity and break indicators,
- * ignore overruns to (for real raw support).
- */
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= UART_RDS_OE_MSK;
- }
-
- /* first, disable everything */
- old_ies = UART_GET_IES(port);
-
- if (UART_ENABLE_MS(port, termios->c_cflag))
- old_ies |= UART_IES_ME_MSK;
-
- /* Set baud rate */
- UART_PUT_DIV_LO(port, (quot & 0xff));
- UART_PUT_DIV_HI(port, ((quot & 0xf00) >> 8));
-
- UART_PUT_MC(port, uart_mc);
- UART_PUT_IES(port, old_ies);
-
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static int uart00_startup(struct uart_port *port)
-{
- int result;
-
- /*
- * Allocate the IRQ
- */
- result = request_irq(port->irq, uart00_int, 0, "uart00", port);
- if (result) {
- printk(KERN_ERR "Request of irq %d failed\n", port->irq);
- return result;
- }
-
- /*
- * Finally, enable interrupts. Use the TII interrupt to minimise
- * the number of interrupts generated. If higher performance is
- * needed, consider using the TI interrupt with a suitable FIFO
- * threshold
- */
- UART_PUT_IES(port, UART_IES_RE_MSK | UART_IES_TIE_MSK);
-
- return 0;
-}
-
-static void uart00_shutdown(struct uart_port *port)
-{
- /*
- * disable all interrupts, disable the port
- */
- UART_PUT_IEC(port, 0xff);
-
- /* disable break condition and fifos */
- UART_PUT_MCR(port, UART_GET_MCR(port) &~UART_MCR_BR_MSK);
-
- /*
- * Free the interrupt
- */
- free_irq(port->irq, port);
-}
-
-static const char *uart00_type(struct uart_port *port)
-{
- return port->type == PORT_UART00 ? "Altera UART00" : NULL;
-}
-
-/*
- * Release the memory region(s) being used by 'port'
- */
-static void uart00_release_port(struct uart_port *port)
-{
- release_mem_region(port->mapbase, UART_PORT_SIZE);
-
-#ifdef CONFIG_ARCH_CAMELOT
- if (port->membase != (void*)IO_ADDRESS(EXC_UART00_BASE)) {
- iounmap(port->membase);
- }
-#endif
-}
-
-/*
- * Request the memory region(s) being used by 'port'
- */
-static int uart00_request_port(struct uart_port *port)
-{
- return request_mem_region(port->mapbase, UART_PORT_SIZE, "serial_uart00")
- != NULL ? 0 : -EBUSY;
-}
-
-/*
- * Configure/autoconfigure the port.
- */
-static void uart00_config_port(struct uart_port *port, int flags)
-{
-
- /*
- * Map the io memory if this is a soft uart
- */
- if (!port->membase)
- port->membase = ioremap_nocache(port->mapbase,SZ_4K);
-
- if (!port->membase)
- printk(KERN_ERR "serial00: cannot map io memory\n");
- else
- port->type = PORT_UART00;
-
-}
-
-/*
- * verify the new serial_struct (for TIOCSSERIAL).
- */
-static int uart00_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- int ret = 0;
- if (ser->type != PORT_UNKNOWN && ser->type != PORT_UART00)
- ret = -EINVAL;
- if (ser->irq < 0 || ser->irq >= NR_IRQS)
- ret = -EINVAL;
- if (ser->baud_base < 9600)
- ret = -EINVAL;
- return ret;
-}
-
-static struct uart_ops uart00_pops = {
- .tx_empty = uart00_tx_empty,
- .set_mctrl = uart00_set_mctrl_null,
- .get_mctrl = uart00_get_mctrl,
- .stop_tx = uart00_stop_tx,
- .start_tx = uart00_start_tx,
- .stop_rx = uart00_stop_rx,
- .enable_ms = uart00_enable_ms,
- .break_ctl = uart00_break_ctl,
- .startup = uart00_startup,
- .shutdown = uart00_shutdown,
- .set_termios = uart00_set_termios,
- .type = uart00_type,
- .release_port = uart00_release_port,
- .request_port = uart00_request_port,
- .config_port = uart00_config_port,
- .verify_port = uart00_verify_port,
-};
-
-
-#ifdef CONFIG_ARCH_CAMELOT
-static struct uart_port epxa10db_port = {
- .membase = (void*)IO_ADDRESS(EXC_UART00_BASE),
- .mapbase = EXC_UART00_BASE,
- .iotype = SERIAL_IO_MEM,
- .irq = IRQ_UART,
- .uartclk = EXC_AHB2_CLK_FREQUENCY,
- .fifosize = 16,
- .ops = &uart00_pops,
- .flags = ASYNC_BOOT_AUTOCONF,
-};
-#endif
-
-
-#ifdef CONFIG_SERIAL_UART00_CONSOLE
-static void uart00_console_write(struct console *co, const char *s, unsigned count)
-{
-#ifdef CONFIG_ARCH_CAMELOT
- struct uart_port *port = &epxa10db_port;
- unsigned int status, old_ies;
- int i;
-
- /*
- * First save the CR then disable the interrupts
- */
- old_ies = UART_GET_IES(port);
- UART_PUT_IEC(port,0xff);
-
- /*
- * Now, do each character
- */
- for (i = 0; i < count; i++) {
- do {
- status = UART_GET_TSR(port);
- } while (!UART_TX_READY(status));
- UART_PUT_CHAR(port, s[i]);
- if (s[i] == '\n') {
- do {
- status = UART_GET_TSR(port);
- } while (!UART_TX_READY(status));
- UART_PUT_CHAR(port, '\r');
- }
- }
-
- /*
- * Finally, wait for transmitter to become empty
- * and restore the IES
- */
- do {
- status = UART_GET_TSR(port);
- } while (status & UART_TSR_TX_LEVEL_MSK);
- UART_PUT_IES(port, old_ies);
-#endif
-}
-
-static void __init
-uart00_console_get_options(struct uart_port *port, int *baud,
- int *parity, int *bits)
-{
- unsigned int uart_mc, quot;
-
- uart_mc = UART_GET_MC(port);
-
- *parity = 'n';
- if (uart_mc & UART_MC_PE_MSK) {
- if (uart_mc & UART_MC_EP_MSK)
- *parity = 'e';
- else
- *parity = 'o';
- }
-
- switch (uart_mc & UART_MC_CLS_MSK) {
- case UART_MC_CLS_CHARLEN_5:
- *bits = 5;
- break;
- case UART_MC_CLS_CHARLEN_6:
- *bits = 6;
- break;
- case UART_MC_CLS_CHARLEN_7:
- *bits = 7;
- break;
- case UART_MC_CLS_CHARLEN_8:
- *bits = 8;
- break;
- }
- quot = UART_GET_DIV_LO(port) | (UART_GET_DIV_HI(port) << 8);
- *baud = port->uartclk / (16 *quot );
-}
-
-static int __init uart00_console_setup(struct console *co, char *options)
-{
- struct uart_port *port;
- int baud = 115200;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
-#ifdef CONFIG_ARCH_CAMELOT
- port = &epxa10db_port; ;
-#else
- return -ENODEV;
-#endif
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
- else
- uart00_console_get_options(port, &baud, &parity, &bits);
-
- return uart_set_options(port, co, baud, parity, bits, flow);
-}
-
-extern struct uart_driver uart00_reg;
-static struct console uart00_console = {
- .name = SERIAL_UART00_NAME,
- .write = uart00_console_write,
- .device = uart_console_device,
- .setup = uart00_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = 0,
- .data = &uart00_reg,
-};
-
-static int __init uart00_console_init(void)
-{
- register_console(&uart00_console);
- return 0;
-}
-console_initcall(uart00_console_init);
-
-#define UART00_CONSOLE &uart00_console
-#else
-#define UART00_CONSOLE NULL
-#endif
-
-static struct uart_driver uart00_reg = {
- .owner = NULL,
- .driver_name = SERIAL_UART00_NAME,
- .dev_name = SERIAL_UART00_NAME,
- .major = SERIAL_UART00_MAJOR,
- .minor = SERIAL_UART00_MINOR,
- .nr = UART_NR,
- .cons = UART00_CONSOLE,
-};
-
-struct dev_port_entry{
- unsigned int base_addr;
- struct uart_port *port;
-};
-
-#ifdef CONFIG_PLD_HOTSWAP
-
-static struct dev_port_entry dev_port_map[UART_NR];
-
-/*
- * Keep a mapping of dev_info addresses -> port lines to use when
- * removing ports dev==NULL indicates unused entry
- */
-
-struct uart00_ps_data{
- unsigned int clk;
- unsigned int fifosize;
-};
-
-int uart00_add_device(struct pldhs_dev_info* dev_info, void* dev_ps_data)
-{
- struct uart00_ps_data* dev_ps=dev_ps_data;
- struct uart_port * port;
- int i,result;
-
- i=0;
- while(dev_port_map[i].port)
- i++;
-
- if(i==UART_NR){
- printk(KERN_WARNING "uart00: Maximum number of ports reached\n");
- return 0;
- }
-
- port=kmalloc(sizeof(struct uart_port),GFP_KERNEL);
- if(!port)
- return -ENOMEM;
-
- printk("clk=%d fifo=%d\n",dev_ps->clk,dev_ps->fifosize);
- port->membase=0;
- port->mapbase=dev_info->base_addr;
- port->iotype=SERIAL_IO_MEM;
- port->irq=dev_info->irq;
- port->uartclk=dev_ps->clk;
- port->fifosize=dev_ps->fifosize;
- port->ops=&uart00_pops;
- port->line=i;
- port->flags=ASYNC_BOOT_AUTOCONF;
-
- result=uart_add_one_port(&uart00_reg, port);
- if(result){
- printk("uart_add_one_port returned %d\n",result);
- return result;
- }
- dev_port_map[i].base_addr=dev_info->base_addr;
- dev_port_map[i].port=port;
- printk("uart00: added device at %x as ttyUA%d\n",dev_port_map[i].base_addr,i);
- return 0;
-
-}
-
-int uart00_remove_devices(void)
-{
- int i,result;
-
-
- result=0;
- for(i=1;i<UART_NR;i++){
- if(dev_port_map[i].base_addr){
- result=uart_remove_one_port(&uart00_reg, dev_port_map[i].port);
- if(result)
- return result;
-
- /* port removed sucessfully, so now tidy up */
- kfree(dev_port_map[i].port);
- dev_port_map[i].base_addr=0;
- dev_port_map[i].port=NULL;
- }
- }
- return 0;
-
-}
-
-struct pld_hotswap_ops uart00_pldhs_ops={
- .name = "uart00",
- .add_device = uart00_add_device,
- .remove_devices = uart00_remove_devices,
-};
-
-#endif
-
-static int __init uart00_init(void)
-{
- int result;
-
- printk(KERN_INFO "Serial: UART00 driver $Revision: 1.35 $\n");
-
- printk(KERN_WARNING "serial_uart00:Using temporary major/minor pairs"
- " - these WILL change in the future\n");
-
- result = uart_register_driver(&uart00_reg);
- if (result)
- return result;
-#ifdef CONFIG_ARCH_CAMELOT
- result = uart_add_one_port(&uart00_reg,&epxa10db_port);
-#endif
- if (result)
- uart_unregister_driver(&uart00_reg);
-
-#ifdef CONFIG_PLD_HOTSWAP
- pldhs_register_driver(&uart00_pldhs_ops);
-#endif
- return result;
-}
-
-__initcall(uart00_init);
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/serial/vr41xx_siu.c
index 865d4dea65d..d61494d185c 100644
--- a/drivers/serial/vr41xx_siu.c
+++ b/drivers/serial/vr41xx_siu.c
@@ -1,7 +1,7 @@
/*
* Driver for NEC VR4100 series Serial Interface Unit.
*
- * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* Based on drivers/serial/8250.c, by Russell King.
*
@@ -371,11 +371,6 @@ static inline void receive_chars(struct uart_port *port, uint8_t *status,
lsr = *status;
do {
- if (unlikely(tty->flip.count >= TTY_FLIPBUF_SIZE)) {
- if (tty->low_latency)
- tty_flip_buffer_push(tty);
- }
-
ch = siu_read(port, UART_RX);
port->icount.rx++;
flag = TTY_NORMAL;
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index b2833614865..c1b47d74e20 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -532,9 +532,9 @@ static void speedtch_handle_int(struct urb *int_urb, struct pt_regs *regs)
int ret = int_urb->status;
/* The magic interrupt for "up state" */
- const static unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 };
+ static const unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 };
/* The magic interrupt for "down state" */
- const static unsigned char down_int[6] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ static const unsigned char down_int[6] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00 };
atm_dbg(usbatm, "%s entered\n", __func__);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 248279e44c9..b9fd39fd1b5 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -335,14 +335,9 @@ next_buffer:
dbg("acm_rx_tasklet: procesing buf 0x%p, size = %d\n", buf, buf->size);
- for (i = 0; i < buf->size && !acm->throttle; i++) {
- /* if we insert more than TTY_FLIPBUF_SIZE characters,
- we drop them. */
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
- tty_insert_flip_char(tty, buf->base[i], 0);
- }
+ tty_buffer_request_room(tty, buf->size);
+ if (!acm->throttle)
+ tty_insert_flip_string(tty, buf->base, buf->size);
tty_flip_buffer_push(tty);
spin_lock(&acm->throttle_lock);
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index c44bbedec81..3cf945cc5b9 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -184,13 +184,13 @@ static void update_bus(struct dentry *bus)
bus->d_inode->i_gid = busgid;
bus->d_inode->i_mode = S_IFDIR | busmode;
- down(&bus->d_inode->i_sem);
+ mutex_lock(&bus->d_inode->i_mutex);
- list_for_each_entry(dev, &bus->d_subdirs, d_child)
+ list_for_each_entry(dev, &bus->d_subdirs, d_u.d_child)
if (dev->d_inode)
update_dev(dev);
- up(&bus->d_inode->i_sem);
+ mutex_unlock(&bus->d_inode->i_mutex);
}
static void update_sb(struct super_block *sb)
@@ -201,9 +201,9 @@ static void update_sb(struct super_block *sb)
if (!root)
return;
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
- list_for_each_entry(bus, &root->d_subdirs, d_child) {
+ list_for_each_entry(bus, &root->d_subdirs, d_u.d_child) {
if (bus->d_inode) {
switch (S_IFMT & bus->d_inode->i_mode) {
case S_IFDIR:
@@ -219,7 +219,7 @@ static void update_sb(struct super_block *sb)
}
}
- up(&root->d_inode->i_sem);
+ mutex_unlock(&root->d_inode->i_mutex);
}
static int remount(struct super_block *sb, int *flags, char *data)
@@ -319,7 +319,7 @@ static int usbfs_empty (struct dentry *dentry)
spin_lock(&dcache_lock);
list_for_each(list, &dentry->d_subdirs) {
- struct dentry *de = list_entry(list, struct dentry, d_child);
+ struct dentry *de = list_entry(list, struct dentry, d_u.d_child);
if (usbfs_positive(de)) {
spin_unlock(&dcache_lock);
return 0;
@@ -333,10 +333,10 @@ static int usbfs_empty (struct dentry *dentry)
static int usbfs_unlink (struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
dentry->d_inode->i_nlink--;
dput(dentry);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
d_delete(dentry);
return 0;
}
@@ -346,7 +346,7 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
int error = -ENOTEMPTY;
struct inode * inode = dentry->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
dentry_unhash(dentry);
if (usbfs_empty(dentry)) {
dentry->d_inode->i_nlink -= 2;
@@ -355,7 +355,7 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
dir->i_nlink--;
error = 0;
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (!error)
d_delete(dentry);
dput(dentry);
@@ -380,7 +380,7 @@ static loff_t default_file_lseek (struct file *file, loff_t offset, int orig)
{
loff_t retval = -EINVAL;
- down(&file->f_dentry->d_inode->i_sem);
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch(orig) {
case 0:
if (offset > 0) {
@@ -397,7 +397,7 @@ static loff_t default_file_lseek (struct file *file, loff_t offset, int orig)
default:
break;
}
- up(&file->f_dentry->d_inode->i_sem);
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return retval;
}
@@ -480,7 +480,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
}
*dentry = NULL;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry)) {
if ((mode & S_IFMT) == S_IFDIR)
@@ -489,7 +489,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
error = usbfs_create (parent->d_inode, *dentry, mode);
} else
error = PTR_ERR(dentry);
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
return error;
}
@@ -528,7 +528,7 @@ static void fs_remove_file (struct dentry *dentry)
if (!parent || !parent->d_inode)
return;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
if (usbfs_positive(dentry)) {
if (dentry->d_inode) {
if (S_ISDIR(dentry->d_inode->i_mode))
@@ -538,7 +538,7 @@ static void fs_remove_file (struct dentry *dentry)
dput(dentry);
}
}
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
}
/* --------------------------------------------------------------------- */
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 0cea9782d7d..de59c58896d 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -1891,7 +1891,7 @@ static int fsync_sub(struct lun *curlun)
return -EINVAL;
inode = filp->f_dentry->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
current->flags |= PF_SYNCWRITE;
rc = filemap_fdatawrite(inode->i_mapping);
err = filp->f_op->fsync(filp, filp->f_dentry, 1);
@@ -1901,7 +1901,7 @@ static int fsync_sub(struct lun *curlun)
if (!rc)
rc = err;
current->flags &= ~PF_SYNCWRITE;
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
VLDBG(curlun, "fdatasync -> %d\n", rc);
return rc;
}
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 5c40980a5bd..c6c279de832 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1562,10 +1562,10 @@ restart:
spin_unlock_irq (&dev->lock);
/* break link to dcache */
- down (&parent->i_sem);
+ mutex_lock (&parent->i_mutex);
d_delete (dentry);
dput (dentry);
- up (&parent->i_sem);
+ mutex_unlock (&parent->i_mutex);
/* fds may still be open */
goto restart;
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index 65e084a2c87..2e6926b3345 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -1271,6 +1271,7 @@ static int gs_recv_packet(struct gs_dev *dev, char *packet, unsigned int size)
unsigned int len;
struct gs_port *port;
int ret;
+ struct tty_struct *tty;
/* TEMPORARY -- only port 0 is supported right now */
port = dev->dev_port[0];
@@ -1290,7 +1291,10 @@ static int gs_recv_packet(struct gs_dev *dev, char *packet, unsigned int size)
goto exit;
}
- if (port->port_tty == NULL) {
+
+ tty = port->port_tty;
+
+ if (tty == NULL) {
printk(KERN_ERR "gs_recv_packet: port=%d, NULL tty pointer\n",
port->port_num);
ret = -EIO;
@@ -1304,20 +1308,13 @@ static int gs_recv_packet(struct gs_dev *dev, char *packet, unsigned int size)
goto exit;
}
- len = (unsigned int)(TTY_FLIPBUF_SIZE - port->port_tty->flip.count);
- if (len < size)
- size = len;
-
- if (size > 0) {
- memcpy(port->port_tty->flip.char_buf_ptr, packet, size);
- port->port_tty->flip.char_buf_ptr += size;
- port->port_tty->flip.count += size;
+ len = tty_buffer_request_room(tty, size);
+ if (len > 0) {
+ tty_insert_flip_string(tty, packet, len);
tty_flip_buffer_push(port->port_tty);
wake_up_interruptible(&port->port_tty->read_wait);
}
-
ret = 0;
-
exit:
spin_unlock(&port->port_lock);
return ret;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index ed1899d307d..be3fd9bce57 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -11,14 +11,14 @@ config USB_EHCI_HCD
The Enhanced Host Controller Interface (EHCI) is standard for USB 2.0
"high speed" (480 Mbit/sec, 60 Mbyte/sec) host controller hardware.
If your USB host controller supports USB 2.0, you will likely want to
- configure this Host Controller Driver. At this writing, the primary
- implementation of EHCI is a chip from NEC, widely available in add-on
- PCI cards, but implementations are in the works from other vendors
- including Intel and Philips. Motherboard support is appearing.
+ configure this Host Controller Driver. At the time of this writing,
+ the primary implementation of EHCI is a chip from NEC, widely available
+ in add-on PCI cards, but implementations are in the works from other
+ vendors including Intel and Philips. Motherboard support is appearing.
EHCI controllers are packaged with "companion" host controllers (OHCI
or UHCI) to handle USB 1.1 devices connected to root hub ports. Ports
- will connect to EHCI if it the device is high speed, otherwise they
+ will connect to EHCI if the device is high speed, otherwise they
connect to a companion controller. If you configure EHCI, you should
probably configure the OHCI (for NEC and some other vendors) USB Host
Controller Driver or UHCI (for Via motherboards) Host Controller
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index d9cf3b327d9..77cd6ac07e3 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -19,6 +19,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/signal.h>
#include <asm/mach-au1x00/au1000.h>
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index 3959ccc8833..0020ed7a39d 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -17,6 +17,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/signal.h>
#include <asm/hardware.h>
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index c9e29d80871..3785b3f7df1 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -17,6 +17,7 @@
#include <linux/signal.h> /* SA_INTERRUPT */
#include <linux/jiffies.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <asm/hardware.h>
#include <asm/io.h>
@@ -27,7 +28,6 @@
#include <asm/arch/gpio.h>
#include <asm/arch/fpga.h>
#include <asm/arch/usb.h>
-#include <asm/hardware/clock.h>
/* OMAP-1510 OHCI has its own MMU for DMA */
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index 2ec6a78bd65..b2a8dfa4887 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -15,6 +15,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/signal.h>
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 35cc9402adc..372527a8359 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -20,9 +20,9 @@
*/
#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <asm/hardware.h>
-#include <asm/hardware/clock.h>
#include <asm/arch/usb-control.h>
#define valid_port(idx) ((idx) == 1 || (idx) == 2)
@@ -363,7 +363,6 @@ int usb_hcd_s3c2410_probe (const struct hc_driver *driver,
goto err1;
}
- clk_use(clk);
s3c2410_start_hc(dev, hcd);
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
@@ -384,7 +383,6 @@ int usb_hcd_s3c2410_probe (const struct hc_driver *driver,
err2:
s3c2410_stop_hc(dev);
iounmap(hcd->regs);
- clk_unuse(clk);
clk_put(clk);
err1:
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 458f2acdeb0..28538db9eaf 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -674,7 +674,7 @@ struct vendor_product
/* These are taken from the msmUSB.inf file on the Windows driver CD */
-const static struct vendor_product mts_supported_products[] =
+static const struct vendor_product mts_supported_products[] =
{
{ "Phantom 336CX", mts_sup_unknown},
{ "Phantom 336CX", mts_sup_unknown},
diff --git a/drivers/usb/media/dsbr100.c b/drivers/usb/media/dsbr100.c
index 6a5700e9d42..25646804d5b 100644
--- a/drivers/usb/media/dsbr100.c
+++ b/drivers/usb/media/dsbr100.c
@@ -127,6 +127,7 @@ static struct file_operations usb_dsbr100_fops = {
.open = usb_dsbr100_open,
.release = usb_dsbr100_close,
.ioctl = usb_dsbr100_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/usb/media/ov511.c b/drivers/usb/media/ov511.c
index 3a0e8ce67eb..8af665bbe33 100644
--- a/drivers/usb/media/ov511.c
+++ b/drivers/usb/media/ov511.c
@@ -4774,6 +4774,7 @@ static struct file_operations ov511_fops = {
.read = ov51x_v4l1_read,
.mmap = ov51x_v4l1_mmap,
.ioctl = ov51x_v4l1_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/usb/media/pwc/pwc-if.c b/drivers/usb/media/pwc/pwc-if.c
index 09ca6128ac2..4f9b0dc6fd7 100644
--- a/drivers/usb/media/pwc/pwc-if.c
+++ b/drivers/usb/media/pwc/pwc-if.c
@@ -154,6 +154,7 @@ static struct file_operations pwc_fops = {
.poll = pwc_video_poll,
.mmap = pwc_video_mmap,
.ioctl = pwc_video_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
static struct video_device pwc_template = {
diff --git a/drivers/usb/media/se401.c b/drivers/usb/media/se401.c
index b2ae29af594..2ba562285fd 100644
--- a/drivers/usb/media/se401.c
+++ b/drivers/usb/media/se401.c
@@ -1193,6 +1193,7 @@ static struct file_operations se401_fops = {
.read = se401_read,
.mmap = se401_mmap,
.ioctl = se401_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
static struct video_device se401_template = {
diff --git a/drivers/usb/media/stv680.c b/drivers/usb/media/stv680.c
index 774038b352c..b497a6a0a20 100644
--- a/drivers/usb/media/stv680.c
+++ b/drivers/usb/media/stv680.c
@@ -1343,6 +1343,7 @@ static struct file_operations stv680_fops = {
.read = stv680_read,
.mmap = stv680_mmap,
.ioctl = stv680_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
static struct video_device stv680_template = {
diff --git a/drivers/usb/media/usbvideo.c b/drivers/usb/media/usbvideo.c
index 4bd113325ef..63a72e550a1 100644
--- a/drivers/usb/media/usbvideo.c
+++ b/drivers/usb/media/usbvideo.c
@@ -953,6 +953,7 @@ static struct file_operations usbvideo_fops = {
.read = usbvideo_v4l_read,
.mmap = usbvideo_v4l_mmap,
.ioctl = usbvideo_v4l_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
static const struct video_device usbvideo_template = {
diff --git a/drivers/usb/media/vicam.c b/drivers/usb/media/vicam.c
index 1c73155c8d7..5df14407387 100644
--- a/drivers/usb/media/vicam.c
+++ b/drivers/usb/media/vicam.c
@@ -1236,6 +1236,7 @@ static struct file_operations vicam_fops = {
.read = vicam_read,
.mmap = vicam_mmap,
.ioctl = vicam_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.llseek = no_llseek,
};
diff --git a/drivers/usb/media/w9968cf.c b/drivers/usb/media/w9968cf.c
index 04d69339c05..bff9434c8e5 100644
--- a/drivers/usb/media/w9968cf.c
+++ b/drivers/usb/media/w9968cf.c
@@ -1533,12 +1533,12 @@ static int w9968cf_i2c_attach_inform(struct i2c_client* client)
}
} else {
DBG(4, "Rejected client [%s] with driver [%s]",
- client->name, client->driver->name)
+ client->name, client->driver->driver.name)
return -EINVAL;
}
DBG(5, "I2C attach client [%s] with driver [%s]",
- client->name, client->driver->name)
+ client->name, client->driver->driver.name)
return 0;
}
@@ -3490,6 +3490,7 @@ static struct file_operations w9968cf_fops = {
.release = w9968cf_release,
.read = w9968cf_read,
.ioctl = w9968cf_ioctl,
+ .compat_ioctl = v4l_compat_ioctl32,
.mmap = w9968cf_mmap,
.llseek = no_llseek,
};
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 14f55fd26a6..be5dc80836c 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -84,7 +84,7 @@ config USB_SERIAL_BELKIN
config USB_SERIAL_WHITEHEAT
tristate "USB ConnectTech WhiteHEAT Serial Driver"
- depends on USB_SERIAL && BROKEN_ON_SMP
+ depends on USB_SERIAL
help
Say Y here if you want to use a ConnectTech WhiteHEAT 4 port
USB to serial converter device.
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 6d18d4eaba3..2357b1d102d 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -364,7 +364,6 @@ static void cyberjack_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
short todo;
- int i;
int result;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -381,14 +380,8 @@ static void cyberjack_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
return;
}
if (urb->actual_length) {
- for (i = 0; i < urb->actual_length ; ++i) {
- /* if we insert more than TTY_FLIPBUF_SIZE characters, we drop them. */
- if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
- /* this doesn't actually push the data through unless tty->low_latency is set */
- tty_insert_flip_char(tty, data[i], 0);
- }
+ tty_buffer_request_room(tty, urb->actual_length);
+ tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index af18355e94c..68067fe117a 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -357,7 +357,7 @@ static int cypress_serial_control (struct usb_serial_port *port, unsigned baud_m
} while (retval != 5 && retval != ENODEV);
if (retval != 5) {
- err("%s - failed to retreive serial line settings - %d", __FUNCTION__, retval);
+ err("%s - failed to retrieve serial line settings - %d", __FUNCTION__, retval);
return retval;
} else {
spin_lock_irqsave(&priv->lock, flags);
@@ -1263,12 +1263,10 @@ static void cypress_read_int_callback(struct urb *urb, struct pt_regs *regs)
/* process read if there is data other than line status */
if (tty && (bytes > i)) {
+ bytes = tty_buffer_request_room(tty, bytes);
for (; i < bytes ; ++i) {
dbg("pushing byte number %d - %d - %c", i, data[i],
data[i]);
- if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
tty_insert_flip_char(tty, data[i], tty_flag);
}
tty_flip_buffer_push(port->tty);
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 8fc414bd5b2..b3f776a90c9 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -946,13 +946,10 @@ dbg( "digi_rx_unthrottle: TOP: port=%d", priv->dp_port_num );
spin_lock_irqsave( &priv->dp_port_lock, flags );
/* send any buffered chars from throttle time on to tty subsystem */
- len = min(priv->dp_in_buf_len, TTY_FLIPBUF_SIZE - tty->flip.count );
+
+ len = tty_buffer_request_room(tty, priv->dp_in_buf_len);
if( len > 0 ) {
- memcpy( tty->flip.char_buf_ptr, priv->dp_in_buf, len );
- memcpy( tty->flip.flag_buf_ptr, priv->dp_in_flag_buf, len );
- tty->flip.char_buf_ptr += len;
- tty->flip.flag_buf_ptr += len;
- tty->flip.count += len;
+ tty_insert_flip_string_flags(tty, priv->dp_in_buf, priv->dp_in_flag_buf, len);
tty_flip_buffer_push( tty );
}
@@ -1827,6 +1824,7 @@ static int digi_read_inb_callback( struct urb *urb )
int status = ((unsigned char *)urb->transfer_buffer)[2];
unsigned char *data = ((unsigned char *)urb->transfer_buffer)+3;
int flag,throttled;
+ int i;
/* do not process callbacks on closed ports */
/* but do continue the read chain */
@@ -1885,20 +1883,18 @@ static int digi_read_inb_callback( struct urb *urb )
}
} else {
-
- len = min( len, TTY_FLIPBUF_SIZE - tty->flip.count );
-
+ len = tty_buffer_request_room(tty, len);
if( len > 0 ) {
- memcpy( tty->flip.char_buf_ptr, data, len );
- memset( tty->flip.flag_buf_ptr, flag, len );
- tty->flip.char_buf_ptr += len;
- tty->flip.flag_buf_ptr += len;
- tty->flip.count += len;
+ /* Hot path */
+ if(flag == TTY_NORMAL)
+ tty_insert_flip_string(tty, data, len);
+ else {
+ for(i = 0; i < len; i++)
+ tty_insert_flip_char(tty, data[i], flag);
+ }
tty_flip_buffer_push( tty );
}
-
}
-
}
spin_unlock( &priv->dp_port_lock );
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 79a766e9ca2..63f7c78a115 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -344,7 +344,6 @@ static void empeg_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
- int i;
int result;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -359,19 +358,8 @@ static void empeg_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
tty = port->tty;
if (urb->actual_length) {
- for (i = 0; i < urb->actual_length ; ++i) {
- /* gb - 2000/11/13
- * If we insert too many characters we'll overflow the buffer.
- * This means we'll lose bytes - Decidedly bad.
- */
- if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
- tty_insert_flip_char(tty, data[i], 0);
- }
- /* gb - 2000/11/13
- * Goes straight through instead of scheduling - if tty->low_latency is set.
- */
+ tty_buffer_request_room(tty, urb->actual_length);
+ tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
bytes_in += urb->actual_length;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index eb863b3f2d7..10bc1bf23b3 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1610,24 +1610,11 @@ static void ftdi_process_read (void *param)
length = 0;
}
- /* have to make sure we don't overflow the buffer
- with tty_insert_flip_char's */
- if (tty->flip.count+length > TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- need_flip = 0;
-
- if (tty->flip.count != 0) {
- /* flip didn't work, this happens when ftdi_process_read() is
- * called from ftdi_unthrottle, because TTY_DONT_FLIP is set */
- dbg("%s - flip buffer push failed", __FUNCTION__);
- break;
- }
- }
if (priv->rx_flags & THROTTLED) {
dbg("%s - throttled", __FUNCTION__);
break;
}
- if (tty->ldisc.receive_room(tty)-tty->flip.count < length) {
+ if (tty_buffer_request_room(tty, length) < length) {
/* break out & wait for throttling/unthrottling to happen */
dbg("%s - receive room low", __FUNCTION__);
break;
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 452efce7271..d6f55e9dcca 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -275,23 +275,14 @@ static void send_to_tty(struct usb_serial_port *port,
char *data, unsigned int actual_length)
{
struct tty_struct *tty = port->tty;
- int i;
if (tty && actual_length) {
usb_serial_debug_data(debug, &port->dev,
__FUNCTION__, actual_length, data);
- for (i = 0; i < actual_length ; ++i) {
- /* if we insert more than TTY_FLIPBUF_SIZE characters,
- we drop them. */
- if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
- /* this doesn't actually push the data through unless
- tty->low_latency is set */
- tty_insert_flip_char(tty, data[i], 0);
- }
+ tty_buffer_request_room(tty, actual_length);
+ tty_insert_flip_string(tty, data, actual_length);
tty_flip_buffer_push(tty);
}
}
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 4ddac620fc0..476cda107f4 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -254,7 +254,6 @@ void usb_serial_generic_read_bulk_callback (struct urb *urb, struct pt_regs *reg
struct usb_serial *serial = port->serial;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
- int i;
int result;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -268,14 +267,8 @@ void usb_serial_generic_read_bulk_callback (struct urb *urb, struct pt_regs *reg
tty = port->tty;
if (tty && urb->actual_length) {
- for (i = 0; i < urb->actual_length ; ++i) {
- /* if we insert more than TTY_FLIPBUF_SIZE characters, we drop them. */
- if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
- /* this doesn't actually push the data through unless tty->low_latency is set */
- tty_insert_flip_char(tty, data[i], 0);
- }
+ tty_buffer_request_room(tty, urb->actual_length);
+ tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index faedbeb6ba4..3f29e6b0fd1 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1965,20 +1965,14 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty, unsigned c
int cnt;
do {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- dev_err(dev, "%s - dropping data, %d bytes lost\n",
- __FUNCTION__, length);
- return;
- }
+ cnt = tty_buffer_request_room(tty, length);
+ if (cnt < length) {
+ dev_err(dev, "%s - dropping data, %d bytes lost\n",
+ __FUNCTION__, length - cnt);
+ if(cnt == 0)
+ break;
}
- cnt = min(length, TTY_FLIPBUF_SIZE - tty->flip.count);
- memcpy(tty->flip.char_buf_ptr, data, cnt);
- memset(tty->flip.flag_buf_ptr, 0, cnt);
- tty->flip.char_buf_ptr += cnt;
- tty->flip.flag_buf_ptr += cnt;
- tty->flip.count += cnt;
+ tty_insert_flip_string(tty, data, cnt);
data += cnt;
length -= cnt;
} while (length > 0);
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 2edf9cabad2..afc0f34b3a4 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1865,20 +1865,14 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty, unsigned c
int cnt;
do {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- dev_err(dev, "%s - dropping data, %d bytes lost\n",
- __FUNCTION__, length);
- return;
- }
+ cnt = tty_buffer_request_room(tty, length);
+ if (cnt < length) {
+ dev_err(dev, "%s - dropping data, %d bytes lost\n",
+ __FUNCTION__, length - cnt);
+ if(cnt == 0)
+ break;
}
- cnt = min(length, TTY_FLIPBUF_SIZE - tty->flip.count);
- memcpy(tty->flip.char_buf_ptr, data, cnt);
- memset(tty->flip.flag_buf_ptr, 0, cnt);
- tty->flip.char_buf_ptr += cnt;
- tty->flip.flag_buf_ptr += cnt;
- tty->flip.count += cnt;
+ tty_insert_flip_string(tty, data, cnt);
data += cnt;
length -= cnt;
} while (length > 0);
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 06d07cea0b7..9a5c9798956 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -711,7 +711,7 @@ static void ipaq_read_bulk_callback(struct urb *urb, struct pt_regs *regs)
struct usb_serial_port *port = (struct usb_serial_port *)urb->context;
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
- int i, result;
+ int result;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -724,14 +724,8 @@ static void ipaq_read_bulk_callback(struct urb *urb, struct pt_regs *regs)
tty = port->tty;
if (tty && urb->actual_length) {
- for (i = 0; i < urb->actual_length ; ++i) {
- /* if we insert more than TTY_FLIPBUF_SIZE characters, we drop them. */
- if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
- /* this doesn't actually push the data through unless tty->low_latency is set */
- tty_insert_flip_char(tty, data[i], 0);
- }
+ tty_buffer_request_room(tty, urb->actual_length);
+ tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
bytes_in += urb->actual_length;
}
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 2dd191f5fe7..e760a70242c 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -166,7 +166,6 @@ static void ipw_read_bulk_callback(struct urb *urb, struct pt_regs *regs)
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
struct tty_struct *tty;
- int i;
int result;
dbg("%s - port %d", __FUNCTION__, port->number);
@@ -180,14 +179,8 @@ static void ipw_read_bulk_callback(struct urb *urb, struct pt_regs *regs)
tty = port->tty;
if (tty && urb->actual_length) {
- for (i = 0; i < urb->actual_length ; ++i) {
- /* if we insert more than TTY_FLIPBUF_SIZE characters, we drop them. */
- if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
- /* this doesn't actually push the data through unless tty->low_latency is set */
- tty_insert_flip_char(tty, data[i], 0);
- }
+ tty_buffer_request_room(tty, urb->actual_length);
+ tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 4e2f7dfb58b..78335a5f774 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -648,7 +648,6 @@ static void klsi_105_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
usb_serial_debug_data(debug, &port->dev, __FUNCTION__,
urb->actual_length, data);
} else {
- int i;
int bytes_sent = ((__u8 *) data)[0] +
((unsigned int) ((__u8 *) data)[1] << 8);
tty = port->tty;
@@ -669,16 +668,8 @@ static void klsi_105_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
bytes_sent = urb->actual_length - 2;
}
- for (i = 2; i < 2+bytes_sent; i++) {
- /* if we insert more than TTY_FLIPBUF_SIZE characters,
- * we drop them. */
- if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
- /* this doesn't actually push the data through unless
- * tty->low_latency is set */
- tty_insert_flip_char(tty, ((__u8*) data)[i], 0);
- }
+ tty_buffer_request_room(tty, bytes_sent);
+ tty_insert_flip_string(tty, data + 2, bytes_sent);
tty_flip_buffer_push(tty);
/* again lockless, but debug info only */
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index d9c21e27513..b8b213185d0 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -365,7 +365,6 @@ static void kobil_close (struct usb_serial_port *port, struct file *filp)
static void kobil_read_int_callback( struct urb *purb, struct pt_regs *regs)
{
- int i;
int result;
struct usb_serial_port *port = (struct usb_serial_port *) purb->context;
struct tty_struct *tty;
@@ -397,14 +396,8 @@ static void kobil_read_int_callback( struct urb *purb, struct pt_regs *regs)
*/
// END DEBUG
- for (i = 0; i < purb->actual_length; ++i) {
- // if we insert more than TTY_FLIPBUF_SIZE characters, we drop them.
- if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
- // this doesn't actually push the data through unless tty->low_latency is set
- tty_insert_flip_char(tty, data[i], 0);
- }
+ tty_buffer_request_room(tty, purb->actual_length);
+ tty_insert_flip_string(tty, data, purb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3fd2405304f..52bdf6fe46f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -321,7 +321,7 @@ static int option_write(struct usb_serial_port *port,
static void option_indat_callback(struct urb *urb, struct pt_regs *regs)
{
- int i, err;
+ int err;
int endpoint;
struct usb_serial_port *port;
struct tty_struct *tty;
@@ -338,11 +338,8 @@ static void option_indat_callback(struct urb *urb, struct pt_regs *regs)
} else {
tty = port->tty;
if (urb->actual_length) {
- for (i = 0; i < urb->actual_length ; ++i) {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- tty_flip_buffer_push(tty);
- tty_insert_flip_char(tty, data[i], 0);
- }
+ tty_buffer_request_room(tty, urb->actual_length);
+ tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
} else {
dbg("%s: empty read urb received", __FUNCTION__);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index f0372105619..9ffff193823 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -924,16 +924,12 @@ static void pl2303_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
tty = port->tty;
if (tty && urb->actual_length) {
+ tty_buffer_request_room(tty, urb->actual_length + 1);
/* overrun is special, not associated with a char */
if (status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
-
- for (i = 0; i < urb->actual_length; ++i) {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
+ for (i = 0; i < urb->actual_length; ++i)
tty_insert_flip_char (tty, data[i], tty_flag);
- }
tty_flip_buffer_push (tty);
}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index abb830cb77b..c18db325707 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1280,24 +1280,18 @@ static void ti_recv(struct device *dev, struct tty_struct *tty,
int cnt;
do {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
- dev_err(dev, "%s - dropping data, %d bytes lost\n", __FUNCTION__, length);
- return;
- }
+ cnt = tty_buffer_request_room(tty, length);
+ if (cnt < length) {
+ dev_err(dev, "%s - dropping data, %d bytes lost\n", __FUNCTION__, length - cnt);
+ if(cnt == 0)
+ break;
}
- cnt = min(length, TTY_FLIPBUF_SIZE - tty->flip.count);
- memcpy(tty->flip.char_buf_ptr, data, cnt);
- memset(tty->flip.flag_buf_ptr, 0, cnt);
- tty->flip.char_buf_ptr += cnt;
- tty->flip.flag_buf_ptr += cnt;
- tty->flip.count += cnt;
+ tty_insert_flip_string(tty, data, cnt);
+ tty_flip_buffer_push(tty);
data += cnt;
length -= cnt;
} while (length > 0);
- tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8bc8337c99c..4dd6865d32b 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -584,7 +584,7 @@ static struct usb_serial_driver *search_serial_device(struct usb_interface *ifac
const struct usb_device_id *id;
struct usb_serial_driver *t;
- /* List trough know devices and see if the usb id matches */
+ /* Check if the usb id matches a known device */
list_for_each(p, &usb_serial_driver_list) {
t = list_entry(p, struct usb_serial_driver, driver_list);
id = usb_match_id(iface, t->id_table);
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 49b1fbe61f2..bce3d55affd 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -488,7 +488,6 @@ static void visor_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
unsigned char *data = urb->transfer_buffer;
struct tty_struct *tty;
unsigned long flags;
- int i;
int throttled;
int result;
@@ -503,14 +502,8 @@ static void visor_read_bulk_callback (struct urb *urb, struct pt_regs *regs)
tty = port->tty;
if (tty && urb->actual_length) {
- for (i = 0; i < urb->actual_length ; ++i) {
- /* if we insert more than TTY_FLIPBUF_SIZE characters, we drop them. */
- if(tty->flip.count >= TTY_FLIPBUF_SIZE) {
- tty_flip_buffer_push(tty);
- }
- /* this doesn't actually push the data through unless tty->low_latency is set */
- tty_insert_flip_char(tty, data[i], 0);
- }
+ tty_buffer_request_room(tty, urb->actual_length);
+ tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index a7c3c4734d8..557411c6e7c 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -1434,7 +1434,9 @@ static void rx_data_softint(void *private)
urb = wrap->urb;
if (tty && urb->actual_length) {
- if (urb->actual_length > TTY_FLIPBUF_SIZE - tty->flip.count) {
+ int len = tty_buffer_request_room(tty, urb->actual_length);
+ /* This stuff can go away now I suspect */
+ if (unlikely(len < urb->actual_length)) {
spin_lock_irqsave(&info->lock, flags);
list_add(tmp, &info->rx_urb_q);
spin_unlock_irqrestore(&info->lock, flags);
@@ -1442,11 +1444,8 @@ static void rx_data_softint(void *private)
schedule_work(&info->rx_work);
return;
}
-
- memcpy(tty->flip.char_buf_ptr, urb->transfer_buffer, urb->actual_length);
- tty->flip.char_buf_ptr += urb->actual_length;
- tty->flip.count += urb->actual_length;
- sent += urb->actual_length;
+ tty_insert_flip_string(tty, urb->transfer_buffer, len);
+ sent += len;
}
urb->dev = port->serial->dev;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index cc8e3bf5001..3e153d313bb 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -993,12 +993,6 @@ config FB_ATY_GENERIC_LCD
Say Y if you have a laptop with an ATI Rage LT PRO, Rage Mobility,
Rage XC, or Rage XL chipset.
-config FB_ATY_XL_INIT
- bool "Rage XL No-BIOS Init support"
- depends on FB_ATY_CT
- help
- Say Y here to support booting a Rage XL without BIOS support.
-
config FB_ATY_GX
bool "Mach64 GX support" if PCI
depends on FB_ATY
@@ -1151,7 +1145,7 @@ config FB_VOODOO1
config FB_CYBLA
tristate "Cyberblade/i1 support"
- depends on FB && PCI
+ depends on FB && PCI && X86_32 && !64BIT
select FB_CFB_IMAGEBLIT
select VIDEO_SELECT
---help---
@@ -1376,7 +1370,7 @@ config FB_PXA
This driver is also available as a module ( = code which can be
inserted and removed from the running kernel whenever you want). The
- module will be called vfb. If you want to compile it as a module,
+ module will be called pxafb. If you want to compile it as a module,
say M here and read <file:Documentation/modules.txt>.
If unsure, say N.
@@ -1409,7 +1403,7 @@ config FB_W100
This driver is also available as a module ( = code which can be
inserted and removed from the running kernel whenever you want). The
- module will be called vfb. If you want to compile it as a module,
+ module will be called w100fb. If you want to compile it as a module,
say M here and read <file:Documentation/modules.txt>.
If unsure, say N.
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index a3c2c45e29e..0da4083ba90 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -21,12 +21,11 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/list.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/clcd.h>
+#include <linux/clk.h>
#include <asm/sizes.h>
-#include <asm/hardware/amba.h>
-#include <asm/hardware/clock.h>
-
-#include <asm/hardware/amba_clcd.h>
#define to_clcd(info) container_of(info, struct clcd_fb, fb)
@@ -346,10 +345,6 @@ static int clcdfb_register(struct clcd_fb *fb)
goto out;
}
- ret = clk_use(fb->clk);
- if (ret)
- goto free_clk;
-
fb->fb.fix.mmio_start = fb->dev->res.start;
fb->fb.fix.mmio_len = SZ_4K;
@@ -357,7 +352,7 @@ static int clcdfb_register(struct clcd_fb *fb)
if (!fb->regs) {
printk(KERN_ERR "CLCD: unable to remap registers\n");
ret = -ENOMEM;
- goto unuse_clk;
+ goto free_clk;
}
fb->fb.fbops = &clcdfb_ops;
@@ -427,8 +422,6 @@ static int clcdfb_register(struct clcd_fb *fb)
printk(KERN_ERR "CLCD: cannot register framebuffer (%d)\n", ret);
iounmap(fb->regs);
- unuse_clk:
- clk_unuse(fb->clk);
free_clk:
clk_put(fb->clk);
out:
@@ -489,7 +482,6 @@ static int clcdfb_remove(struct amba_device *dev)
clcdfb_disable(fb);
unregister_framebuffer(&fb->fb);
iounmap(fb->regs);
- clk_unuse(fb->clk);
clk_put(fb->clk);
fb->board->remove(fb);
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index d549e215f3c..2c42a812655 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -590,6 +590,8 @@ static u_short maxfmode, chipset;
#define highw(x) ((u_long)(x)>>16 & 0xffff)
#define loww(x) ((u_long)(x) & 0xffff)
+#define custom amiga_custom
+
#define VBlankOn() custom.intena = IF_SETCLR|IF_COPER
#define VBlankOff() custom.intena = IF_COPER
@@ -1164,8 +1166,8 @@ static void ami_update_display(void);
static void ami_init_display(void);
static void ami_do_blank(void);
static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix);
-static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data);
-static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data);
+static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
+static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
static int ami_get_cursorstate(struct fb_cursorstate *state);
static int ami_set_cursorstate(struct fb_cursorstate *state);
static void ami_set_sprite(void);
@@ -2179,6 +2181,7 @@ static int amifb_ioctl(struct inode *inode, struct file *file,
struct fb_var_cursorinfo var;
struct fb_cursorstate state;
} crsr;
+ void __user *argp = (void __user *)arg;
int i;
switch (cmd) {
@@ -2186,33 +2189,32 @@ static int amifb_ioctl(struct inode *inode, struct file *file,
i = ami_get_fix_cursorinfo(&crsr.fix);
if (i)
return i;
- return copy_to_user((void *)arg, &crsr.fix,
+ return copy_to_user(argp, &crsr.fix,
sizeof(crsr.fix)) ? -EFAULT : 0;
case FBIOGET_VCURSORINFO:
i = ami_get_var_cursorinfo(&crsr.var,
- ((struct fb_var_cursorinfo *)arg)->data);
+ ((struct fb_var_cursorinfo __user *)arg)->data);
if (i)
return i;
- return copy_to_user((void *)arg, &crsr.var,
+ return copy_to_user(argp, &crsr.var,
sizeof(crsr.var)) ? -EFAULT : 0;
case FBIOPUT_VCURSORINFO:
- if (copy_from_user(&crsr.var, (void *)arg,
- sizeof(crsr.var)))
+ if (copy_from_user(&crsr.var, argp, sizeof(crsr.var)))
return -EFAULT;
return ami_set_var_cursorinfo(&crsr.var,
- ((struct fb_var_cursorinfo *)arg)->data);
+ ((struct fb_var_cursorinfo __user *)arg)->data);
case FBIOGET_CURSORSTATE:
i = ami_get_cursorstate(&crsr.state);
if (i)
return i;
- return copy_to_user((void *)arg, &crsr.state,
+ return copy_to_user(argp, &crsr.state,
sizeof(crsr.state)) ? -EFAULT : 0;
case FBIOPUT_CURSORSTATE:
- if (copy_from_user(&crsr.state, (void *)arg,
+ if (copy_from_user(&crsr.state, argp,
sizeof(crsr.state)))
return -EFAULT;
return ami_set_cursorstate(&crsr.state);
@@ -3325,7 +3327,7 @@ static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix)
return 0;
}
-static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
+static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
{
struct amifb_par *par = &currentpar;
register u_short *lspr, *sspr;
@@ -3347,14 +3349,14 @@ static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
var->yspot = par->crsr.spot_y;
if (size > var->height*var->width)
return -ENAMETOOLONG;
- if (!access_ok(VERIFY_WRITE, (void *)data, size))
+ if (!access_ok(VERIFY_WRITE, data, size))
return -EFAULT;
delta = 1<<par->crsr.fmode;
lspr = lofsprite + (delta<<1);
if (par->bplcon0 & BPC0_LACE)
sspr = shfsprite + (delta<<1);
else
- sspr = 0;
+ sspr = NULL;
for (height = (short)var->height-1; height >= 0; height--) {
bits = 0; words = delta; datawords = 0;
for (width = (short)var->width-1; width >= 0; width--) {
@@ -3400,7 +3402,7 @@ static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
return 0;
}
-static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
+static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
{
struct amifb_par *par = &currentpar;
register u_short *lspr, *sspr;
@@ -3427,7 +3429,7 @@ static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
return -EINVAL;
if (!var->height)
return -EINVAL;
- if (!access_ok(VERIFY_READ, (void *)data, var->width*var->height))
+ if (!access_ok(VERIFY_READ, data, var->width*var->height))
return -EFAULT;
delta = 1<<fmode;
lofsprite = shfsprite = (u_short *)spritememory;
@@ -3442,13 +3444,13 @@ static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
if (((var->height+2)<<fmode<<2) > SPRITEMEMSIZE)
return -EINVAL;
memset(lspr, 0, (var->height+2)<<fmode<<2);
- sspr = 0;
+ sspr = NULL;
}
for (height = (short)var->height-1; height >= 0; height--) {
bits = 16; words = delta; datawords = 0;
for (width = (short)var->width-1; width >= 0; width--) {
unsigned long tdata = 0;
- get_user(tdata, (char *)data);
+ get_user(tdata, data);
data++;
#ifdef __mc68000__
asm volatile (
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index 2784f0a9d69..89060b2db8e 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -366,7 +366,8 @@ static void arcfb_lcd_update(struct arcfb_par *par, unsigned int dx,
}
}
-void arcfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+static void arcfb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
{
struct arcfb_par *par = info->par;
@@ -376,7 +377,8 @@ void arcfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
arcfb_lcd_update(par, rect->dx, rect->dy, rect->width, rect->height);
}
-void arcfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+static void arcfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
{
struct arcfb_par *par = info->par;
@@ -386,7 +388,7 @@ void arcfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
arcfb_lcd_update(par, area->dx, area->dy, area->width, area->height);
}
-void arcfb_imageblit(struct fb_info *info, const struct fb_image *image)
+static void arcfb_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct arcfb_par *par = info->par;
diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c
index c64de59398f..69f75547865 100644
--- a/drivers/video/asiliantfb.c
+++ b/drivers/video/asiliantfb.c
@@ -549,7 +549,7 @@ asiliantfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
if (!request_mem_region(addr, size, "asiliantfb"))
return -EBUSY;
- p = framebuffer_alloc(sizeof(u32) * 256, &dp->dev);
+ p = framebuffer_alloc(sizeof(u32) * 16, &dp->dev);
if (!p) {
release_mem_region(addr, size);
return -ENOMEM;
diff --git a/drivers/video/aty/Makefile b/drivers/video/aty/Makefile
index 9dec96249ff..18521397a6e 100644
--- a/drivers/video/aty/Makefile
+++ b/drivers/video/aty/Makefile
@@ -5,7 +5,6 @@ obj-$(CONFIG_FB_RADEON) += radeonfb.o
atyfb-y := atyfb_base.o mach64_accel.o mach64_cursor.o
atyfb-$(CONFIG_FB_ATY_GX) += mach64_gx.o
atyfb-$(CONFIG_FB_ATY_CT) += mach64_ct.o
-atyfb-$(CONFIG_FB_ATY_XL_INIT) += xlinit.o
atyfb-objs := $(atyfb-y)
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/aty/atyfb.h
index 09de173c116..e9b7a64c1ac 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/aty/atyfb.h
@@ -50,6 +50,7 @@ struct pll_info {
int sclk, mclk, mclk_pm, xclk;
int ref_div;
int ref_clk;
+ int ecp_max;
};
typedef struct {
@@ -354,6 +355,5 @@ static inline void wait_for_idle(struct atyfb_par *par)
extern void aty_reset_engine(const struct atyfb_par *par);
extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern int atyfb_xl_init(struct fb_info *info);
extern void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par);
extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 3fefdb0cbf0..ed81005cbdb 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -109,9 +109,18 @@
#define GUI_RESERVE (1 * PAGE_SIZE)
/* FIXME: remove the FAIL definition */
-#define FAIL(msg) do { printk(KERN_CRIT "atyfb: " msg "\n"); return -EINVAL; } while (0)
-#define FAIL_MAX(msg, x, _max_) do { if(x > _max_) { printk(KERN_CRIT "atyfb: " msg " %x(%x)\n", x, _max_); return -EINVAL; } } while (0)
-
+#define FAIL(msg) do { \
+ if (!(var->activate & FB_ACTIVATE_TEST)) \
+ printk(KERN_CRIT "atyfb: " msg "\n"); \
+ return -EINVAL; \
+} while (0)
+#define FAIL_MAX(msg, x, _max_) do { \
+ if (x > _max_) { \
+ if (!(var->activate & FB_ACTIVATE_TEST)) \
+ printk(KERN_CRIT "atyfb: " msg " %x(%x)\n", x, _max_); \
+ return -EINVAL; \
+ } \
+} while (0)
#ifdef DEBUG
#define DPRINTK(fmt, args...) printk(KERN_DEBUG "atyfb: " fmt, ## args)
#else
@@ -340,6 +349,7 @@ static unsigned long phys_guiregbase[FB_MAX] __initdata = { 0, };
#define ATI_CHIP_264VT3 (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_GTB_DSP | M64F_SDRAM_MAGIC_PLL)
#define ATI_CHIP_264VT4 (M64F_VT | M64F_INTEGRATED | M64F_GTB_DSP)
+/* FIXME what is this chip? */
#define ATI_CHIP_264LT (M64F_GT | M64F_INTEGRATED | M64F_GTB_DSP)
/* make sets shorter */
@@ -359,58 +369,60 @@ static unsigned long phys_guiregbase[FB_MAX] __initdata = { 0, };
static struct {
u16 pci_id;
const char *name;
- int pll, mclk, xclk;
+ int pll, mclk, xclk, ecp_max;
u32 features;
} aty_chips[] __devinitdata = {
#ifdef CONFIG_FB_ATY_GX
/* Mach64 GX */
- { PCI_CHIP_MACH64GX, "ATI888GX00 (Mach64 GX)", 135, 50, 50, ATI_CHIP_88800GX },
- { PCI_CHIP_MACH64CX, "ATI888CX00 (Mach64 CX)", 135, 50, 50, ATI_CHIP_88800CX },
+ { PCI_CHIP_MACH64GX, "ATI888GX00 (Mach64 GX)", 135, 50, 50, 0, ATI_CHIP_88800GX },
+ { PCI_CHIP_MACH64CX, "ATI888CX00 (Mach64 CX)", 135, 50, 50, 0, ATI_CHIP_88800CX },
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
- { PCI_CHIP_MACH64CT, "ATI264CT (Mach64 CT)", 135, 60, 60, ATI_CHIP_264CT },
- { PCI_CHIP_MACH64ET, "ATI264ET (Mach64 ET)", 135, 60, 60, ATI_CHIP_264ET },
- { PCI_CHIP_MACH64VT, "ATI264VT? (Mach64 VT)", 170, 67, 67, ATI_CHIP_264VT },
- { PCI_CHIP_MACH64GT, "3D RAGE (Mach64 GT)", 135, 63, 63, ATI_CHIP_264GT },
- /* FIXME { ...ATI_264GU, maybe ATI_CHIP_264GTDVD }, */
- { PCI_CHIP_MACH64GU, "3D RAGE II+ (Mach64 GTB)", 200, 67, 67, ATI_CHIP_264GTB },
- { PCI_CHIP_MACH64VU, "ATI264VTB (Mach64 VU)", 200, 67, 67, ATI_CHIP_264VT3 },
-
- { PCI_CHIP_MACH64LT, "3D RAGE LT (Mach64 LT)", 135, 63, 63, ATI_CHIP_264LT },
- /* FIXME chipset maybe ATI_CHIP_264LTPRO ? */
- { PCI_CHIP_MACH64LG, "3D RAGE LT-G (Mach64 LG)", 230, 63, 63, ATI_CHIP_264LTG | M64F_LT_LCD_REGS | M64F_G3_PB_1024x768 },
-
- { PCI_CHIP_MACH64VV, "ATI264VT4 (Mach64 VV)", 230, 83, 83, ATI_CHIP_264VT4 },
-
- { PCI_CHIP_MACH64GV, "3D RAGE IIC (Mach64 GV, PCI)", 230, 83, 83, ATI_CHIP_264GT2C },
- { PCI_CHIP_MACH64GW, "3D RAGE IIC (Mach64 GW, AGP)", 230, 83, 83, ATI_CHIP_264GT2C },
- { PCI_CHIP_MACH64GY, "3D RAGE IIC (Mach64 GY, PCI)", 230, 83, 83, ATI_CHIP_264GT2C },
- { PCI_CHIP_MACH64GZ, "3D RAGE IIC (Mach64 GZ, AGP)", 230, 83, 83, ATI_CHIP_264GT2C },
-
- { PCI_CHIP_MACH64GB, "3D RAGE PRO (Mach64 GB, BGA, AGP)", 230, 100, 100, ATI_CHIP_264GTPRO },
- { PCI_CHIP_MACH64GD, "3D RAGE PRO (Mach64 GD, BGA, AGP 1x)", 230, 100, 100, ATI_CHIP_264GTPRO },
- { PCI_CHIP_MACH64GI, "3D RAGE PRO (Mach64 GI, BGA, PCI)", 230, 100, 100, ATI_CHIP_264GTPRO | M64F_MAGIC_VRAM_SIZE },
- { PCI_CHIP_MACH64GP, "3D RAGE PRO (Mach64 GP, PQFP, PCI)", 230, 100, 100, ATI_CHIP_264GTPRO },
- { PCI_CHIP_MACH64GQ, "3D RAGE PRO (Mach64 GQ, PQFP, PCI, limited 3D)", 230, 100, 100, ATI_CHIP_264GTPRO },
-
- { PCI_CHIP_MACH64LB, "3D RAGE LT PRO (Mach64 LB, AGP)", 236, 75, 100, ATI_CHIP_264LTPRO },
- { PCI_CHIP_MACH64LD, "3D RAGE LT PRO (Mach64 LD, AGP)", 230, 100, 100, ATI_CHIP_264LTPRO },
- { PCI_CHIP_MACH64LI, "3D RAGE LT PRO (Mach64 LI, PCI)", 230, 100, 100, ATI_CHIP_264LTPRO | M64F_G3_PB_1_1 | M64F_G3_PB_1024x768 },
- { PCI_CHIP_MACH64LP, "3D RAGE LT PRO (Mach64 LP, PCI)", 230, 100, 100, ATI_CHIP_264LTPRO },
- { PCI_CHIP_MACH64LQ, "3D RAGE LT PRO (Mach64 LQ, PCI)", 230, 100, 100, ATI_CHIP_264LTPRO },
-
- { PCI_CHIP_MACH64GM, "3D RAGE XL (Mach64 GM, AGP)", 230, 83, 63, ATI_CHIP_264XL },
- { PCI_CHIP_MACH64GN, "3D RAGE XL (Mach64 GN, AGP)", 230, 83, 63, ATI_CHIP_264XL },
- { PCI_CHIP_MACH64GO, "3D RAGE XL (Mach64 GO, PCI-66/BGA)", 230, 83, 63, ATI_CHIP_264XL },
- { PCI_CHIP_MACH64GR, "3D RAGE XL (Mach64 GR, PCI-33MHz)", 235, 83, 63, ATI_CHIP_264XL | M64F_SDRAM_MAGIC_PLL },
- { PCI_CHIP_MACH64GL, "3D RAGE XL (Mach64 GL, PCI)", 230, 83, 63, ATI_CHIP_264XL },
- { PCI_CHIP_MACH64GS, "3D RAGE XL (Mach64 GS, PCI)", 230, 83, 63, ATI_CHIP_264XL },
-
- { PCI_CHIP_MACH64LM, "3D RAGE Mobility P/M (Mach64 LM, AGP 2x)", 230, 83, 125, ATI_CHIP_MOBILITY },
- { PCI_CHIP_MACH64LN, "3D RAGE Mobility L (Mach64 LN, AGP 2x)", 230, 83, 125, ATI_CHIP_MOBILITY },
- { PCI_CHIP_MACH64LR, "3D RAGE Mobility P/M (Mach64 LR, PCI)", 230, 83, 125, ATI_CHIP_MOBILITY },
- { PCI_CHIP_MACH64LS, "3D RAGE Mobility L (Mach64 LS, PCI)", 230, 83, 125, ATI_CHIP_MOBILITY },
+ { PCI_CHIP_MACH64CT, "ATI264CT (Mach64 CT)", 135, 60, 60, 0, ATI_CHIP_264CT },
+ { PCI_CHIP_MACH64ET, "ATI264ET (Mach64 ET)", 135, 60, 60, 0, ATI_CHIP_264ET },
+
+ /* FIXME what is this chip? */
+ { PCI_CHIP_MACH64LT, "ATI264LT (Mach64 LT)", 135, 63, 63, 0, ATI_CHIP_264LT },
+
+ { PCI_CHIP_MACH64VT, "ATI264VT (Mach64 VT)", 170, 67, 67, 80, ATI_CHIP_264VT },
+ { PCI_CHIP_MACH64GT, "3D RAGE (Mach64 GT)", 135, 63, 63, 80, ATI_CHIP_264GT },
+
+ { PCI_CHIP_MACH64VU, "ATI264VT3 (Mach64 VU)", 200, 67, 67, 80, ATI_CHIP_264VT3 },
+ { PCI_CHIP_MACH64GU, "3D RAGE II+ (Mach64 GU)", 200, 67, 67, 100, ATI_CHIP_264GTB },
+
+ { PCI_CHIP_MACH64LG, "3D RAGE LT (Mach64 LG)", 230, 63, 63, 100, ATI_CHIP_264LTG | M64F_LT_LCD_REGS | M64F_G3_PB_1024x768 },
+
+ { PCI_CHIP_MACH64VV, "ATI264VT4 (Mach64 VV)", 230, 83, 83, 100, ATI_CHIP_264VT4 },
+
+ { PCI_CHIP_MACH64GV, "3D RAGE IIC (Mach64 GV, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
+ { PCI_CHIP_MACH64GW, "3D RAGE IIC (Mach64 GW, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
+ { PCI_CHIP_MACH64GY, "3D RAGE IIC (Mach64 GY, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
+ { PCI_CHIP_MACH64GZ, "3D RAGE IIC (Mach64 GZ, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
+
+ { PCI_CHIP_MACH64GB, "3D RAGE PRO (Mach64 GB, BGA, AGP)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
+ { PCI_CHIP_MACH64GD, "3D RAGE PRO (Mach64 GD, BGA, AGP 1x)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
+ { PCI_CHIP_MACH64GI, "3D RAGE PRO (Mach64 GI, BGA, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO | M64F_MAGIC_VRAM_SIZE },
+ { PCI_CHIP_MACH64GP, "3D RAGE PRO (Mach64 GP, PQFP, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
+ { PCI_CHIP_MACH64GQ, "3D RAGE PRO (Mach64 GQ, PQFP, PCI, limited 3D)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
+
+ { PCI_CHIP_MACH64LB, "3D RAGE LT PRO (Mach64 LB, AGP)", 236, 75, 100, 135, ATI_CHIP_264LTPRO },
+ { PCI_CHIP_MACH64LD, "3D RAGE LT PRO (Mach64 LD, AGP)", 230, 100, 100, 135, ATI_CHIP_264LTPRO },
+ { PCI_CHIP_MACH64LI, "3D RAGE LT PRO (Mach64 LI, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO | M64F_G3_PB_1_1 | M64F_G3_PB_1024x768 },
+ { PCI_CHIP_MACH64LP, "3D RAGE LT PRO (Mach64 LP, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO },
+ { PCI_CHIP_MACH64LQ, "3D RAGE LT PRO (Mach64 LQ, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO },
+
+ { PCI_CHIP_MACH64GM, "3D RAGE XL (Mach64 GM, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL },
+ { PCI_CHIP_MACH64GN, "3D RAGE XC (Mach64 GN, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL },
+ { PCI_CHIP_MACH64GO, "3D RAGE XL (Mach64 GO, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL },
+ { PCI_CHIP_MACH64GL, "3D RAGE XC (Mach64 GL, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL },
+ { PCI_CHIP_MACH64GR, "3D RAGE XL (Mach64 GR, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL | M64F_SDRAM_MAGIC_PLL },
+ { PCI_CHIP_MACH64GS, "3D RAGE XC (Mach64 GS, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL },
+
+ { PCI_CHIP_MACH64LM, "3D RAGE Mobility P/M (Mach64 LM, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
+ { PCI_CHIP_MACH64LN, "3D RAGE Mobility L (Mach64 LN, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
+ { PCI_CHIP_MACH64LR, "3D RAGE Mobility P/M (Mach64 LR, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
+ { PCI_CHIP_MACH64LS, "3D RAGE Mobility L (Mach64 LS, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
#endif /* CONFIG_FB_ATY_CT */
};
@@ -431,6 +443,7 @@ static int __devinit correct_chipset(struct atyfb_par *par)
par->pll_limits.pll_max = aty_chips[i].pll;
par->pll_limits.mclk = aty_chips[i].mclk;
par->pll_limits.xclk = aty_chips[i].xclk;
+ par->pll_limits.ecp_max = aty_chips[i].ecp_max;
par->features = aty_chips[i].features;
chip_id = aty_ld_le32(CONFIG_CHIP_ID, par);
@@ -450,39 +463,63 @@ static int __devinit correct_chipset(struct atyfb_par *par)
#endif
#ifdef CONFIG_FB_ATY_CT
case PCI_CHIP_MACH64VT:
- rev &= 0xc7;
- if(rev == 0x00) {
- name = "ATI264VTA3 (Mach64 VT)";
- par->pll_limits.pll_max = 170;
- par->pll_limits.mclk = 67;
- par->pll_limits.xclk = 67;
- par->features = ATI_CHIP_264VT;
- } else if(rev == 0x40) {
- name = "ATI264VTA4 (Mach64 VT)";
+ switch (rev & 0x07) {
+ case 0x00:
+ switch (rev & 0xc0) {
+ case 0x00:
+ name = "ATI264VT (A3) (Mach64 VT)";
+ par->pll_limits.pll_max = 170;
+ par->pll_limits.mclk = 67;
+ par->pll_limits.xclk = 67;
+ par->pll_limits.ecp_max = 80;
+ par->features = ATI_CHIP_264VT;
+ break;
+ case 0x40:
+ name = "ATI264VT2 (A4) (Mach64 VT)";
+ par->pll_limits.pll_max = 200;
+ par->pll_limits.mclk = 67;
+ par->pll_limits.xclk = 67;
+ par->pll_limits.ecp_max = 80;
+ par->features = ATI_CHIP_264VT | M64F_MAGIC_POSTDIV;
+ break;
+ }
+ break;
+ case 0x01:
+ name = "ATI264VT3 (B1) (Mach64 VT)";
par->pll_limits.pll_max = 200;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
- par->features = ATI_CHIP_264VT | M64F_MAGIC_POSTDIV;
- } else {
- name = "ATI264VTB (Mach64 VT)";
+ par->pll_limits.ecp_max = 80;
+ par->features = ATI_CHIP_264VTB;
+ break;
+ case 0x02:
+ name = "ATI264VT3 (B2) (Mach64 VT)";
par->pll_limits.pll_max = 200;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
- par->features = ATI_CHIP_264VTB;
+ par->pll_limits.ecp_max = 80;
+ par->features = ATI_CHIP_264VT3;
+ break;
}
break;
case PCI_CHIP_MACH64GT:
- rev &= 0x07;
- if(rev == 0x01) {
+ switch (rev & 0x07) {
+ case 0x01:
+ name = "3D RAGE II (Mach64 GT)";
par->pll_limits.pll_max = 170;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
+ par->pll_limits.ecp_max = 80;
par->features = ATI_CHIP_264GTB;
- } else if(rev == 0x02) {
+ break;
+ case 0x02:
+ name = "3D RAGE II+ (Mach64 GT)";
par->pll_limits.pll_max = 200;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
+ par->pll_limits.ecp_max = 100;
par->features = ATI_CHIP_264GTB;
+ break;
}
break;
#endif
@@ -692,7 +729,7 @@ static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) |
(SHADOW_EN | SHADOW_RW_EN), par);
- DPRINTK("set secondary CRT to %ix%i %c%c\n",
+ DPRINTK("set shadow CRT to %ix%i %c%c\n",
((((crtc->shadow_h_tot_disp>>16) & 0xff) + 1)<<3), (((crtc->shadow_v_tot_disp>>16) & 0x7ff) + 1),
(crtc->shadow_h_sync_strt_wid & 0x200000)?'N':'P', (crtc->shadow_v_sync_strt_wid & 0x200000)?'N':'P');
@@ -840,11 +877,14 @@ static int aty_var_to_crtc(const struct fb_info *info,
know if one is connected. So it's better to fail then.
*/
if (crtc->lcd_gen_cntl & CRT_ON) {
- PRINTKI("Disable lcd panel, because video mode does not fit.\n");
+ if (!(var->activate & FB_ACTIVATE_TEST))
+ PRINTKI("Disable LCD panel, because video mode does not fit.\n");
crtc->lcd_gen_cntl &= ~LCD_ON;
/*aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);*/
} else {
- FAIL("Video mode exceeds size of lcd panel.\nConnect this computer to a conventional monitor if you really need this mode.");
+ if (!(var->activate & FB_ACTIVATE_TEST))
+ PRINTKE("Video mode exceeds size of LCD panel.\nConnect this computer to a conventional monitor if you really need this mode.\n");
+ return -EINVAL;
}
}
}
@@ -858,9 +898,9 @@ static int aty_var_to_crtc(const struct fb_info *info,
vmode &= ~(FB_VMODE_DOUBLE | FB_VMODE_INTERLACED);
/* This is horror! When we simulate, say 640x480 on an 800x600
- lcd monitor, the CRTC should be programmed 800x600 values for
+ LCD monitor, the CRTC should be programmed 800x600 values for
the non visible part, but 640x480 for the visible part.
- This code has been tested on a laptop with it's 1400x1050 lcd
+ This code has been tested on a laptop with it's 1400x1050 LCD
monitor and a conventional monitor both switched on.
Tested modes: 1280x1024, 1152x864, 1024x768, 800x600,
works with little glitches also with DOUBLESCAN modes
@@ -955,16 +995,6 @@ static int aty_var_to_crtc(const struct fb_info *info,
vdisplay = yres;
if(vmode & FB_VMODE_DOUBLE)
vdisplay <<= 1;
- if(vmode & FB_VMODE_INTERLACED) {
- vdisplay >>= 1;
-
- /* The prefered mode for the lcd is not interlaced, so disable it if
- it was enabled. For doublescan there is no problem, because we can
- compensate for it in the hardware stretching (we stretch half as much)
- */
- vmode &= ~FB_VMODE_INTERLACED;
- /*crtc->gen_cntl &= ~CRTC_INTERLACE_EN;*/
- }
crtc->gen_cntl &= ~(CRTC2_EN | CRTC2_PIX_WIDTH);
crtc->lcd_gen_cntl &= ~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 |
/*TVCLK_PM_EN | VCLK_DAC_PM_EN |*/
@@ -980,7 +1010,7 @@ static int aty_var_to_crtc(const struct fb_info *info,
crtc->horz_stretching &=
~(HORZ_STRETCH_RATIO | HORZ_STRETCH_LOOP | AUTO_HORZ_RATIO |
HORZ_STRETCH_MODE | HORZ_STRETCH_EN);
- if (xres < par->lcd_width) {
+ if (xres < par->lcd_width && crtc->lcd_gen_cntl & LCD_ON) {
do {
/*
* The horizontal blender misbehaves when HDisplay is less than a
@@ -1042,7 +1072,7 @@ static int aty_var_to_crtc(const struct fb_info *info,
} while (0);
}
- if (vdisplay < par->lcd_height) {
+ if (vdisplay < par->lcd_height && crtc->lcd_gen_cntl & LCD_ON) {
crtc->vert_stretching = (VERT_STRETCH_USE0 | VERT_STRETCH_EN |
(((vdisplay * (VERT_STRETCH_RATIO0 + 1)) / par->lcd_height) & VERT_STRETCH_RATIO0));
@@ -1065,9 +1095,8 @@ static int aty_var_to_crtc(const struct fb_info *info,
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
if (M64_HAS(MAGIC_FIFO)) {
- /* Not VTB/GTB */
- /* FIXME: magic FIFO values */
- crtc->gen_cntl |= (aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC2_PIX_WIDTH);
+ /* FIXME: display FIFO low watermark values */
+ crtc->gen_cntl |= (aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_FIFO_LWM);
}
crtc->dp_pix_width = dp_pix_width;
crtc->dp_chain_mask = dp_chain_mask;
@@ -1184,7 +1213,8 @@ static int aty_crtc_to_var(const struct crtc *crtc, struct fb_var_screeninfo *va
var->transp.length = 8;
break;
default:
- FAIL("Invalid pixel width");
+ PRINTKE("Invalid pixel width\n");
+ return -EINVAL;
}
/* output */
@@ -1241,7 +1271,8 @@ static int atyfb_set_par(struct fb_info *info)
pixclock = atyfb_get_pixclock(var, par);
if (pixclock == 0) {
- FAIL("Invalid pixclock");
+ PRINTKE("Invalid pixclock\n");
+ return -EINVAL;
} else {
if((err = par->pll_ops->var_to_pll(info, pixclock, var->bits_per_pixel, &par->pll)))
return err;
@@ -1446,7 +1477,9 @@ static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
pixclock = atyfb_get_pixclock(var, par);
if (pixclock == 0) {
- FAIL("Invalid pixclock");
+ if (!(var->activate & FB_ACTIVATE_TEST))
+ PRINTKE("Invalid pixclock\n");
+ return -EINVAL;
} else {
if((err = par->pll_ops->var_to_pll(info, pixclock, var->bits_per_pixel, &pll)))
return err;
@@ -2291,10 +2324,6 @@ static int __init aty_init(struct fb_info *info, const char *name)
par->dac_ops = &aty_dac_ct;
par->pll_ops = &aty_pll_ct;
par->bus_type = PCI;
-#ifdef CONFIG_FB_ATY_XL_INIT
- if (IS_XL(par->pci_id))
- atyfb_xl_init(info);
-#endif
par->ram_type = (aty_ld_le32(CONFIG_STAT0, par) & 0x07);
ramname = aty_ct_ram[par->ram_type];
/* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
@@ -2638,16 +2667,16 @@ static int __init store_video_par(char *video_str, unsigned char m64_num)
static int atyfb_blank(int blank, struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
- u8 gen_cntl;
+ u32 gen_cntl;
if (par->lock_blank || par->asleep)
return 0;
#ifdef CONFIG_PMAC_BACKLIGHT
- if ((_machine == _MACH_Pmac) && blank)
+ if ((_machine == _MACH_Pmac) && blank > FB_BLANK_NORMAL)
set_backlight_enable(0);
#elif defined(CONFIG_FB_ATY_GENERIC_LCD)
- if (par->lcd_table && blank &&
+ if (par->lcd_table && blank > FB_BLANK_NORMAL &&
(aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
pm &= ~PWR_BLON;
@@ -2655,31 +2684,31 @@ static int atyfb_blank(int blank, struct fb_info *info)
}
#endif
- gen_cntl = aty_ld_8(CRTC_GEN_CNTL, par);
+ gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
switch (blank) {
case FB_BLANK_UNBLANK:
- gen_cntl &= ~(0x4c);
+ gen_cntl &= ~0x400004c;
break;
case FB_BLANK_NORMAL:
- gen_cntl |= 0x40;
+ gen_cntl |= 0x4000040;
break;
case FB_BLANK_VSYNC_SUSPEND:
- gen_cntl |= 0x8;
+ gen_cntl |= 0x4000048;
break;
case FB_BLANK_HSYNC_SUSPEND:
- gen_cntl |= 0x4;
+ gen_cntl |= 0x4000044;
break;
case FB_BLANK_POWERDOWN:
- gen_cntl |= 0x4c;
+ gen_cntl |= 0x400004c;
break;
}
- aty_st_8(CRTC_GEN_CNTL, gen_cntl, par);
+ aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par);
#ifdef CONFIG_PMAC_BACKLIGHT
- if ((_machine == _MACH_Pmac) && !blank)
+ if ((_machine == _MACH_Pmac) && blank <= FB_BLANK_NORMAL)
set_backlight_enable(1);
#elif defined(CONFIG_FB_ATY_GENERIC_LCD)
- if (par->lcd_table && !blank &&
+ if (par->lcd_table && blank <= FB_BLANK_NORMAL &&
(aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
pm |= PWR_BLON;
@@ -3157,15 +3186,15 @@ static void aty_init_lcd(struct atyfb_par *par, u32 bios_base)
refresh_rates_buf, lcd_refresh_rates[default_refresh_rate]);
par->lcd_refreshrate = lcd_refresh_rates[default_refresh_rate];
/* We now need to determine the crtc parameters for the
- * lcd monitor. This is tricky, because they are not stored
+ * LCD monitor. This is tricky, because they are not stored
* individually in the BIOS. Instead, the BIOS contains a
* table of display modes that work for this monitor.
*
* The idea is that we search for a mode of the same dimensions
- * as the dimensions of the lcd monitor. Say our lcd monitor
+ * as the dimensions of the LCD monitor. Say our LCD monitor
* is 800x600 pixels, we search for a 800x600 monitor.
* The CRTC parameters we find here are the ones that we need
- * to use to simulate other resolutions on the lcd screen.
+ * to use to simulate other resolutions on the LCD screen.
*/
lcdmodeptr = (u16 *)(par->lcd_table + 64);
while (*lcdmodeptr != 0) {
@@ -3472,7 +3501,7 @@ err_release_mem:
static int __devinit atyfb_atari_probe(void)
{
- struct aty_par *par;
+ struct atyfb_par *par;
struct fb_info *info;
int m64_num;
u32 clock_r;
@@ -3692,9 +3721,7 @@ static int __init atyfb_init(void)
atyfb_setup(option);
#endif
-#ifdef CONFIG_PCI
pci_register_driver(&atyfb_driver);
-#endif
#ifdef CONFIG_ATARI
atyfb_atari_probe();
#endif
@@ -3703,9 +3730,7 @@ static int __init atyfb_init(void)
static void __exit atyfb_exit(void)
{
-#ifdef CONFIG_PCI
pci_unregister_driver(&atyfb_driver);
-#endif
}
module_init(atyfb_init);
diff --git a/drivers/video/aty/mach64_ct.c b/drivers/video/aty/mach64_ct.c
index 9bdb2aab01a..e7056934c6a 100644
--- a/drivers/video/aty/mach64_ct.c
+++ b/drivers/video/aty/mach64_ct.c
@@ -206,9 +206,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
{
u32 q;
struct atyfb_par *par = (struct atyfb_par *) info->par;
-#ifdef DEBUG
int pllvclk;
-#endif
/* FIXME: use the VTB/GTB /{3,6,12} post dividers if they're better suited */
q = par->ref_clk_per * pll->pll_ref_div * 4 / vclk_per;
@@ -223,13 +221,26 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
// pll->vclk_post_div <<= 6;
pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
-#ifdef DEBUG
pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
(par->ref_clk_per * pll->pll_ref_div);
+#ifdef DEBUG
printk("atyfb(%s): pllvclk=%d MHz, vclk=%d MHz\n",
__FUNCTION__, pllvclk, pllvclk / pll->vclk_post_div_real);
#endif
pll->pll_vclk_cntl = 0x03; /* VCLK = PLL_VCLK/VCLKx_POST */
+
+ /* Set ECP (scaler/overlay clock) divider */
+ if (par->pll_limits.ecp_max) {
+ int ecp = pllvclk / pll->vclk_post_div_real;
+ int ecp_div = 0;
+
+ while (ecp > par->pll_limits.ecp_max && ecp_div < 2) {
+ ecp >>= 1;
+ ecp_div++;
+ }
+ pll->pll_vclk_cntl |= ecp_div << 4;
+ }
+
return 0;
}
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c
index ea7c8630691..7f9838dceab 100644
--- a/drivers/video/aty/radeon_monitor.c
+++ b/drivers/video/aty/radeon_monitor.c
@@ -423,7 +423,7 @@ static int __devinit radeon_parse_monitor_layout(struct radeonfb_info *rinfo,
/*
* Probe display on both primary and secondary card's connector (if any)
* by various available techniques (i2c, OF device tree, BIOS, ...) and
- * try to retreive EDID. The algorithm here comes from XFree's radeon
+ * try to retrieve EDID. The algorithm here comes from XFree's radeon
* driver
*/
void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
diff --git a/drivers/video/aty/xlinit.c b/drivers/video/aty/xlinit.c
deleted file mode 100644
index a085cbf74ec..00000000000
--- a/drivers/video/aty/xlinit.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * ATI Rage XL Initialization. Support for Xpert98 and Victoria
- * PCI cards.
- *
- * Copyright (C) 2002 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * stevel@mvista.com or source@mvista.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <video/mach64.h>
-#include "atyfb.h"
-
-#define MPLL_GAIN 0xad
-#define VPLL_GAIN 0xd5
-
-enum {
- VICTORIA = 0,
- XPERT98,
- NUM_XL_CARDS
-};
-
-extern const struct aty_pll_ops aty_pll_ct;
-
-#define DEFAULT_CARD XPERT98
-static int xl_card = DEFAULT_CARD;
-
-static const struct xl_card_cfg_t {
- int ref_crystal; // 10^4 Hz
- int mem_type;
- int mem_size;
- u32 mem_cntl;
- u32 ext_mem_cntl;
- u32 mem_addr_config;
- u32 bus_cntl;
- u32 dac_cntl;
- u32 hw_debug;
- u32 custom_macro_cntl;
- u8 dll2_cntl;
- u8 pll_yclk_cntl;
-} card_cfg[NUM_XL_CARDS] = {
- // VICTORIA
- { 2700, SDRAM, 0x800000,
- 0x10757A3B, 0x64000C81, 0x00110202, 0x7b33A040,
- 0x82010102, 0x48803800, 0x005E0179,
- 0x50, 0x25
- },
- // XPERT98
- { 1432, WRAM, 0x800000,
- 0x00165A2B, 0xE0000CF1, 0x00200213, 0x7333A001,
- 0x8000000A, 0x48833800, 0x007F0779,
- 0x10, 0x19
- }
-};
-
-typedef struct {
- u8 lcd_reg;
- u32 val;
-} lcd_tbl_t;
-
-static const lcd_tbl_t lcd_tbl[] = {
- { 0x01, 0x000520C0 },
- { 0x08, 0x02000408 },
- { 0x03, 0x00000F00 },
- { 0x00, 0x00000000 },
- { 0x02, 0x00000000 },
- { 0x04, 0x00000000 },
- { 0x05, 0x00000000 },
- { 0x06, 0x00000000 },
- { 0x33, 0x00000000 },
- { 0x34, 0x00000000 },
- { 0x35, 0x00000000 },
- { 0x36, 0x00000000 },
- { 0x37, 0x00000000 }
-};
-
-static void reset_gui(struct atyfb_par *par)
-{
- aty_st_8(GEN_TEST_CNTL+1, 0x01, par);
- aty_st_8(GEN_TEST_CNTL+1, 0x00, par);
- aty_st_8(GEN_TEST_CNTL+1, 0x02, par);
- mdelay(5);
-}
-
-static void reset_sdram(struct atyfb_par *par)
-{
- u8 temp;
-
- temp = aty_ld_8(EXT_MEM_CNTL, par);
- temp |= 0x02;
- aty_st_8(EXT_MEM_CNTL, temp, par); // MEM_SDRAM_RESET = 1b
- temp |= 0x08;
- aty_st_8(EXT_MEM_CNTL, temp, par); // MEM_CYC_TEST = 10b
- temp |= 0x0c;
- aty_st_8(EXT_MEM_CNTL, temp, par); // MEM_CYC_TEST = 11b
- mdelay(5);
- temp &= 0xf3;
- aty_st_8(EXT_MEM_CNTL, temp, par); // MEM_CYC_TEST = 00b
- temp &= 0xfd;
- aty_st_8(EXT_MEM_CNTL, temp, par); // MEM_SDRAM_REST = 0b
- mdelay(5);
-}
-
-static void init_dll(struct atyfb_par *par)
-{
- // enable DLL
- aty_st_pll_ct(PLL_GEN_CNTL,
- aty_ld_pll_ct(PLL_GEN_CNTL, par) & 0x7f,
- par);
-
- // reset DLL
- aty_st_pll_ct(DLL_CNTL, 0x82, par);
- aty_st_pll_ct(DLL_CNTL, 0xE2, par);
- mdelay(5);
- aty_st_pll_ct(DLL_CNTL, 0x82, par);
- mdelay(6);
-}
-
-static void reset_clocks(struct atyfb_par *par, struct pll_ct *pll,
- int hsync_enb)
-{
- reset_gui(par);
- aty_st_pll_ct(MCLK_FB_DIV, pll->mclk_fb_div, par);
- aty_st_pll_ct(SCLK_FB_DIV, pll->sclk_fb_div, par);
-
- mdelay(15);
- init_dll(par);
- aty_st_8(GEN_TEST_CNTL+1, 0x00, par);
- mdelay(5);
- aty_st_8(CRTC_GEN_CNTL+3, 0x04, par);
- mdelay(6);
- reset_sdram(par);
- aty_st_8(CRTC_GEN_CNTL+3,
- hsync_enb ? 0x00 : 0x04, par);
-
- aty_st_pll_ct(SPLL_CNTL2, pll->spll_cntl2, par);
- aty_st_pll_ct(PLL_GEN_CNTL, pll->pll_gen_cntl, par);
- aty_st_pll_ct(PLL_VCLK_CNTL, pll->pll_vclk_cntl, par);
-}
-
-int atyfb_xl_init(struct fb_info *info)
-{
- const struct xl_card_cfg_t * card = &card_cfg[xl_card];
- struct atyfb_par *par = (struct atyfb_par *) info->par;
- union aty_pll pll;
- int err;
- u32 temp;
-
- aty_st_8(CONFIG_STAT0, 0x85, par);
- mdelay(10);
-
- /*
- * The following needs to be set before the call
- * to var_to_pll() below. They'll be re-set again
- * to the same values in aty_init().
- */
- par->ref_clk_per = 100000000UL/card->ref_crystal;
- par->ram_type = card->mem_type;
- info->fix.smem_len = card->mem_size;
- if (xl_card == VICTORIA) {
- // the MCLK, XCLK are 120MHz on victoria card
- par->mclk_per = 1000000/120;
- par->xclk_per = 1000000/120;
- par->features &= ~M64F_MFB_FORCE_4;
- }
-
- /*
- * Calculate mclk and xclk dividers, etc. The passed
- * pixclock and bpp values don't matter yet, the vclk
- * isn't programmed until later.
- */
- if ((err = aty_pll_ct.var_to_pll(info, 39726, 8, &pll)))
- return err;
-
- aty_st_pll_ct(LVDS_CNTL0, 0x00, par);
- aty_st_pll_ct(DLL2_CNTL, card->dll2_cntl, par);
- aty_st_pll_ct(V2PLL_CNTL, 0x10, par);
- aty_st_pll_ct(MPLL_CNTL, MPLL_GAIN, par);
- aty_st_pll_ct(VPLL_CNTL, VPLL_GAIN, par);
- aty_st_pll_ct(PLL_VCLK_CNTL, 0x00, par);
- aty_st_pll_ct(VFC_CNTL, 0x1B, par);
- aty_st_pll_ct(PLL_REF_DIV, pll.ct.pll_ref_div, par);
- aty_st_pll_ct(PLL_EXT_CNTL, pll.ct.pll_ext_cntl, par);
- aty_st_pll_ct(SPLL_CNTL2, 0x03, par);
- aty_st_pll_ct(PLL_GEN_CNTL, 0x44, par);
-
- reset_clocks(par, &pll.ct, 0);
- mdelay(10);
-
- aty_st_pll_ct(VCLK_POST_DIV, 0x03, par);
- aty_st_pll_ct(VCLK0_FB_DIV, 0xDA, par);
- aty_st_pll_ct(VCLK_POST_DIV, 0x0F, par);
- aty_st_pll_ct(VCLK1_FB_DIV, 0xF5, par);
- aty_st_pll_ct(VCLK_POST_DIV, 0x3F, par);
- aty_st_pll_ct(PLL_EXT_CNTL, 0x40 | pll.ct.pll_ext_cntl, par);
- aty_st_pll_ct(VCLK2_FB_DIV, 0x00, par);
- aty_st_pll_ct(VCLK_POST_DIV, 0xFF, par);
- aty_st_pll_ct(PLL_EXT_CNTL, 0xC0 | pll.ct.pll_ext_cntl, par);
- aty_st_pll_ct(VCLK3_FB_DIV, 0x00, par);
-
- aty_st_8(BUS_CNTL, 0x01, par);
- aty_st_le32(BUS_CNTL, card->bus_cntl | 0x08000000, par);
-
- aty_st_le32(CRTC_GEN_CNTL, 0x04000200, par);
- aty_st_le16(CONFIG_STAT0, 0x0020, par);
- aty_st_le32(MEM_CNTL, 0x10151A33, par);
- aty_st_le32(EXT_MEM_CNTL, 0xE0000C01, par);
- aty_st_le16(CRTC_GEN_CNTL+2, 0x0000, par);
- aty_st_le32(DAC_CNTL, card->dac_cntl, par);
- aty_st_le16(GEN_TEST_CNTL, 0x0100, par);
- aty_st_le32(CUSTOM_MACRO_CNTL, 0x003C0171, par);
- aty_st_le32(MEM_BUF_CNTL, 0x00382848, par);
-
- aty_st_le32(HW_DEBUG, card->hw_debug, par);
- aty_st_le16(MEM_ADDR_CONFIG, 0x0000, par);
- aty_st_le16(GP_IO+2, 0x0000, par);
- aty_st_le16(GEN_TEST_CNTL, 0x0000, par);
- aty_st_le16(EXT_DAC_REGS+2, 0x0000, par);
- aty_st_le32(CRTC_INT_CNTL, 0x00000000, par);
- aty_st_le32(TIMER_CONFIG, 0x00000000, par);
- aty_st_le32(0xEC, 0x00000000, par);
- aty_st_le32(0xFC, 0x00000000, par);
-
-#if defined (CONFIG_FB_ATY_GENERIC_LCD)
- {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(lcd_tbl); i++)
- aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par);
- }
-#endif
-
- aty_st_le16(CONFIG_STAT0, 0x00A4, par);
- mdelay(10);
-
- aty_st_8(BUS_CNTL+1, 0xA0, par);
- mdelay(10);
-
- reset_clocks(par, &pll.ct, 1);
- mdelay(10);
-
- // something about power management
- aty_st_8(LCD_INDEX, 0x08, par);
- aty_st_8(LCD_DATA, 0x0A, par);
- aty_st_8(LCD_INDEX, 0x08, par);
- aty_st_8(LCD_DATA+3, 0x02, par);
- aty_st_8(LCD_INDEX, 0x08, par);
- aty_st_8(LCD_DATA, 0x0B, par);
- mdelay(2);
-
- // enable display requests, enable CRTC
- aty_st_8(CRTC_GEN_CNTL+3, 0x02, par);
- // disable display
- aty_st_8(CRTC_GEN_CNTL, 0x40, par);
- // disable display requests, disable CRTC
- aty_st_8(CRTC_GEN_CNTL+3, 0x04, par);
- mdelay(10);
-
- aty_st_pll_ct(PLL_YCLK_CNTL, 0x25, par);
-
- aty_st_le16(CUSTOM_MACRO_CNTL, 0x0179, par);
- aty_st_le16(CUSTOM_MACRO_CNTL+2, 0x005E, par);
- aty_st_le16(CUSTOM_MACRO_CNTL+2, card->custom_macro_cntl>>16, par);
- aty_st_8(CUSTOM_MACRO_CNTL+1,
- (card->custom_macro_cntl>>8) & 0xff, par);
-
- aty_st_le32(MEM_ADDR_CONFIG, card->mem_addr_config, par);
- aty_st_le32(MEM_CNTL, card->mem_cntl, par);
- aty_st_le32(EXT_MEM_CNTL, card->ext_mem_cntl, par);
-
- aty_st_8(CONFIG_STAT0, 0xA0 | card->mem_type, par);
-
- aty_st_pll_ct(PLL_YCLK_CNTL, 0x01, par);
- mdelay(15);
- aty_st_pll_ct(PLL_YCLK_CNTL, card->pll_yclk_cntl, par);
- mdelay(1);
-
- reset_clocks(par, &pll.ct, 0);
- mdelay(50);
- reset_clocks(par, &pll.ct, 0);
- mdelay(50);
-
- // enable extended register block
- aty_st_8(BUS_CNTL+3, 0x7B, par);
- mdelay(1);
- // disable extended register block
- aty_st_8(BUS_CNTL+3, 0x73, par);
-
- aty_st_8(CONFIG_STAT0, 0x80 | card->mem_type, par);
-
- // disable display requests, disable CRTC
- aty_st_8(CRTC_GEN_CNTL+3, 0x04, par);
- // disable mapping registers in VGA aperture
- aty_st_8(CONFIG_CNTL, aty_ld_8(CONFIG_CNTL, par) & ~0x04, par);
- mdelay(50);
- // enable display requests, enable CRTC
- aty_st_8(CRTC_GEN_CNTL+3, 0x02, par);
-
- // make GPIO's 14,15,16 all inputs
- aty_st_8(LCD_INDEX, 0x07, par);
- aty_st_8(LCD_DATA+3, 0x00, par);
-
- // enable the display
- aty_st_8(CRTC_GEN_CNTL, 0x00, par);
- mdelay(17);
- // reset the memory controller
- aty_st_8(GEN_TEST_CNTL+1, 0x02, par);
- mdelay(15);
- aty_st_8(GEN_TEST_CNTL+1, 0x00, par);
- mdelay(30);
-
- // enable extended register block
- aty_st_8(BUS_CNTL+3,
- (u8)(aty_ld_8(BUS_CNTL+3, par) | 0x08),
- par);
- // set FIFO size to 512 (PIO)
- aty_st_le32(GUI_CNTL,
- aty_ld_le32(GUI_CNTL, par) & ~0x3,
- par);
-
- // enable CRT and disable lcd
- aty_st_8(LCD_INDEX, 0x01, par);
- temp = aty_ld_le32(LCD_DATA, par);
- temp = (temp | 0x01) & ~0x02;
- aty_st_le32(LCD_DATA, temp, par);
- return 0;
-}
-
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
index 6a219b2c77e..d0aaf450e8c 100644
--- a/drivers/video/backlight/corgi_bl.c
+++ b/drivers/video/backlight/corgi_bl.c
@@ -20,6 +20,7 @@
#include <linux/backlight.h>
#include <asm/arch/sharpsl.h>
+#include <asm/hardware/sharpsl_pm.h>
#define CORGI_DEFAULT_INTENSITY 0x1f
#define CORGI_LIMIT_MASK 0x0b
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index a5d09e159cd..6ee449858a5 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -6,7 +6,7 @@ menu "Console display driver support"
config VGA_CONSOLE
bool "VGA text console" if EMBEDDED || !X86
- depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC && !M68K && !PARISC && !ARCH_VERSATILE
+ depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !ARCH_VERSATILE
default y
help
Saying Y here will allow you to use Linux in text mode through a
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
index e65fc3ef763..eea422eb1ab 100644
--- a/drivers/video/console/bitblit.c
+++ b/drivers/video/console/bitblit.c
@@ -234,14 +234,14 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void bit_cursor(struct vc_data *vc, struct fb_info *info,
- struct display *p, int mode, int softback_lines, int fg, int bg)
+static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ int softback_lines, int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
+ struct fbcon_ops *ops = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = (vc->vc_font.width + 7) >> 3, c;
- int y = real_y(p, vc->vc_y);
+ int y = real_y(ops->p, vc->vc_y);
int attribute, use_sw = (vc->vc_cursor_type & 0x10);
int err = 1;
char *src;
@@ -310,7 +310,7 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info,
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != p->cursor_shape ||
+ vc->vc_cursor_type != ops->p->cursor_shape ||
ops->cursor_state.mask == NULL ||
ops->cursor_reset) {
char *mask = kmalloc(w*vc->vc_font.height, GFP_ATOMIC);
@@ -323,10 +323,10 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info,
kfree(ops->cursor_state.mask);
ops->cursor_state.mask = mask;
- p->cursor_shape = vc->vc_cursor_type;
+ ops->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (p->cursor_shape & CUR_HWMASK) {
+ switch (ops->p->cursor_shape & CUR_HWMASK) {
case CUR_NONE:
cur_height = 0;
break;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3660e51b261..041d0698786 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -209,13 +209,13 @@ static irqreturn_t fb_vbl_detect(int irq, void *dummy, struct pt_regs *fp)
#endif
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION
-static inline void fbcon_set_rotation(struct fb_info *info, struct display *p)
+static inline void fbcon_set_rotation(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
if (!(info->flags & FBINFO_MISC_TILEBLITTING) &&
- p->con_rotate < 4)
- ops->rotate = p->con_rotate;
+ ops->p->con_rotate < 4)
+ ops->rotate = ops->p->con_rotate;
else
ops->rotate = 0;
}
@@ -265,7 +265,7 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
fbcon_set_all_vcs(info);
}
#else
-static inline void fbcon_set_rotation(struct fb_info *info, struct display *p)
+static inline void fbcon_set_rotation(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
@@ -402,7 +402,7 @@ static void fb_flashcursor(void *private)
c = scr_readw((u16 *) vc->vc_pos);
mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
CM_ERASE : CM_DRAW;
- ops->cursor(vc, info, p, mode, softback_lines, get_color(vc, info, c, 1),
+ ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
release_console_sem();
}
@@ -647,29 +647,27 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
}
#ifdef CONFIG_FB_TILEBLITTING
-static void set_blitting_type(struct vc_data *vc, struct fb_info *info,
- struct display *p)
+static void set_blitting_type(struct vc_data *vc, struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- ops->p = (p) ? p : &fb_display[vc->vc_num];
+ ops->p = &fb_display[vc->vc_num];
if ((info->flags & FBINFO_MISC_TILEBLITTING))
- fbcon_set_tileops(vc, info, p, ops);
+ fbcon_set_tileops(vc, info);
else {
- fbcon_set_rotation(info, ops->p);
+ fbcon_set_rotation(info);
fbcon_set_bitops(ops);
}
}
#else
-static void set_blitting_type(struct vc_data *vc, struct fb_info *info,
- struct display *p)
+static void set_blitting_type(struct vc_data *vc, struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
info->flags &= ~FBINFO_MISC_TILEBLITTING;
- ops->p = (p) ? p : &fb_display[vc->vc_num];
- fbcon_set_rotation(info, ops->p);
+ ops->p = &fb_display[vc->vc_num];
+ fbcon_set_rotation(info);
fbcon_set_bitops(ops);
}
#endif /* CONFIG_MISC_TILEBLITTING */
@@ -689,15 +687,14 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
err = -ENODEV;
if (!err) {
- ops = kmalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
+ ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
if (!ops)
err = -ENOMEM;
}
if (!err) {
- memset(ops, 0, sizeof(struct fbcon_ops));
info->fbcon_par = ops;
- set_blitting_type(vc, info, NULL);
+ set_blitting_type(vc, info);
}
if (err) {
@@ -921,19 +918,18 @@ static const char *fbcon_startup(void)
return NULL;
}
- ops = kmalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
+ ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
if (!ops) {
module_put(owner);
return NULL;
}
- memset(ops, 0, sizeof(struct fbcon_ops));
ops->currcon = -1;
ops->graphics = 1;
ops->cur_rotate = -1;
info->fbcon_par = ops;
p->con_rotate = rotate;
- set_blitting_type(vc, info, NULL);
+ set_blitting_type(vc, info);
if (info->fix.type != FB_TYPE_TEXT) {
if (fbcon_softback_size) {
@@ -1093,7 +1089,7 @@ static void fbcon_init(struct vc_data *vc, int init)
ops = info->fbcon_par;
p->con_rotate = rotate;
- set_blitting_type(vc, info, NULL);
+ set_blitting_type(vc, info);
cols = vc->vc_cols;
rows = vc->vc_rows;
@@ -1110,7 +1106,7 @@ static void fbcon_init(struct vc_data *vc, int init)
*
* We need to do it in fbcon_init() to prevent screen corruption.
*/
- if (CON_IS_VISIBLE(vc)) {
+ if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) {
if (info->fbops->fb_set_par &&
!(ops->flags & FBCON_FLAGS_INIT))
info->fbops->fb_set_par(info);
@@ -1141,9 +1137,9 @@ static void fbcon_init(struct vc_data *vc, int init)
if (vc == svc && softback_buf)
fbcon_update_softback(vc);
- if (ops->rotate_font && ops->rotate_font(info, vc, p)) {
+ if (ops->rotate_font && ops->rotate_font(info, vc)) {
ops->rotate = FB_ROTATE_UR;
- set_blitting_type(vc, info, p);
+ set_blitting_type(vc, info);
}
}
@@ -1243,7 +1239,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[vc->vc_num];
int y;
int c = scr_readw((u16 *) vc->vc_pos);
@@ -1260,7 +1255,7 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
y = 0;
}
- ops->cursor(vc, info, p, mode, y, get_color(vc, info, c, 1),
+ ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
vbl_cursor_cnt = CURSOR_DRAW_DELAY;
}
@@ -1411,16 +1406,13 @@ static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
struct display *p = &fb_display[vc->vc_num];
- int redraw = 0;
p->yscroll += count;
+
if (p->yscroll > p->vrows - vc->vc_rows) {
p->yscroll -= p->vrows - vc->vc_rows;
- redraw = 1;
- }
-
- if (redraw)
fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
+ }
ops->var.xoffset = 0;
ops->var.yoffset = p->yscroll * vc->vc_font.height;
@@ -1462,16 +1454,13 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
struct display *p = &fb_display[vc->vc_num];
- int redraw = 0;
p->yscroll -= count;
+
if (p->yscroll < 0) {
p->yscroll += p->vrows - vc->vc_rows;
- redraw = 1;
- }
-
- if (redraw)
fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
+ }
ops->var.xoffset = 0;
ops->var.yoffset = p->yscroll * vc->vc_font.height;
@@ -1968,7 +1957,8 @@ static __inline__ void updatescrollmode(struct display *p,
divides(ypan, vc->vc_font.height) && vyres > yres;
int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) &&
divides(ywrap, vc->vc_font.height) &&
- divides(vc->vc_font.height, vyres);
+ divides(vc->vc_font.height, vyres) &&
+ divides(vc->vc_font.height, yres);
int reading_fast = cap & FBINFO_READS_FAST;
int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) &&
!(cap & FBINFO_HWACCEL_DISABLED);
@@ -2107,16 +2097,19 @@ static int fbcon_switch(struct vc_data *vc)
info->flags & FBINFO_MISC_ALWAYS_SETPAR)) {
if (info->fbops->fb_set_par)
info->fbops->fb_set_par(info);
- fbcon_del_cursor_timer(old_info);
- fbcon_add_cursor_timer(info);
+
+ if (old_info != info) {
+ fbcon_del_cursor_timer(old_info);
+ fbcon_add_cursor_timer(info);
+ }
}
- set_blitting_type(vc, info, p);
+ set_blitting_type(vc, info);
ops->cursor_reset = 1;
- if (ops->rotate_font && ops->rotate_font(info, vc, p)) {
+ if (ops->rotate_font && ops->rotate_font(info, vc)) {
ops->rotate = FB_ROTATE_UR;
- set_blitting_type(vc, info, p);
+ set_blitting_type(vc, info);
}
vc->vc_can_do_color = (fb_get_color_depth(&info->var, &info->fix)!=1);
@@ -2739,7 +2732,7 @@ static void fbcon_modechanged(struct fb_info *info)
return;
p = &fb_display[vc->vc_num];
- set_blitting_type(vc, info, p);
+ set_blitting_type(vc, info);
if (CON_IS_VISIBLE(vc)) {
var_to_display(p, &info->var, info);
@@ -2781,7 +2774,7 @@ static void fbcon_set_all_vcs(struct fb_info *info)
continue;
p = &fb_display[vc->vc_num];
- set_blitting_type(vc, info, p);
+ set_blitting_type(vc, info);
var_to_display(p, &info->var, info);
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
@@ -2806,6 +2799,8 @@ static void fbcon_set_all_vcs(struct fb_info *info)
fbcon_update_softback(vc);
}
}
+
+ ops->p = &fb_display[ops->currcon];
}
static int fbcon_mode_deleted(struct fb_info *info,
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index 6892e7ff34d..c38c3d8e7a7 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -62,12 +62,10 @@ struct fbcon_ops {
int fg, int bg);
void (*clear_margins)(struct vc_data *vc, struct fb_info *info,
int bottom_only);
- void (*cursor)(struct vc_data *vc, struct fb_info *info,
- struct display *p, int mode, int softback_lines,
- int fg, int bg);
+ void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode,
+ int softback_lines, int fg, int bg);
int (*update_start)(struct fb_info *info);
- int (*rotate_font)(struct fb_info *info, struct vc_data *vc,
- struct display *p);
+ int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
struct timer_list cursor_timer; /* Cursor timer */
struct fb_cursor cursor_state;
@@ -173,8 +171,7 @@ struct fbcon_ops {
#define SCROLL_PAN_REDRAW 0x005
#ifdef CONFIG_FB_TILEBLITTING
-extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info,
- struct display *p, struct fbcon_ops *ops);
+extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
#endif
extern void fbcon_set_bitops(struct fbcon_ops *ops);
extern int soft_cursor(struct fb_info *info, struct fb_cursor *cursor);
diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
index 4952b66ae20..990289a69b7 100644
--- a/drivers/video/console/fbcon_ccw.c
+++ b/drivers/video/console/fbcon_ccw.c
@@ -219,19 +219,18 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void ccw_cursor(struct vc_data *vc, struct fb_info *info,
- struct display *p, int mode, int softback_lines,
- int fg, int bg)
+static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ int softback_lines, int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
+ struct fbcon_ops *ops = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = (vc->vc_font.height + 7) >> 3, c;
- int y = real_y(p, vc->vc_y);
+ int y = real_y(ops->p, vc->vc_y);
int attribute, use_sw = (vc->vc_cursor_type & 0x10);
int err = 1, dx, dy;
char *src;
- u32 vyres = GETVYRES(p->scrollmode, info);
+ u32 vyres = GETVYRES(ops->p->scrollmode, info);
if (!ops->fontbuffer)
return;
@@ -303,7 +302,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info,
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != p->cursor_shape ||
+ vc->vc_cursor_type != ops->p->cursor_shape ||
ops->cursor_state.mask == NULL ||
ops->cursor_reset) {
char *tmp, *mask = kmalloc(w*vc->vc_font.width, GFP_ATOMIC);
@@ -323,10 +322,10 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info,
kfree(ops->cursor_state.mask);
ops->cursor_state.mask = mask;
- p->cursor_shape = vc->vc_cursor_type;
+ ops->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (p->cursor_shape & CUR_HWMASK) {
+ switch (ops->p->cursor_shape & CUR_HWMASK) {
case CUR_NONE:
cur_height = 0;
break;
diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c
index 6d92b845620..d44c5fa515f 100644
--- a/drivers/video/console/fbcon_cw.c
+++ b/drivers/video/console/fbcon_cw.c
@@ -203,19 +203,18 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void cw_cursor(struct vc_data *vc, struct fb_info *info,
- struct display *p, int mode, int softback_lines,
- int fg, int bg)
+static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ int softback_lines, int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
+ struct fbcon_ops *ops = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = (vc->vc_font.height + 7) >> 3, c;
- int y = real_y(p, vc->vc_y);
+ int y = real_y(ops->p, vc->vc_y);
int attribute, use_sw = (vc->vc_cursor_type & 0x10);
int err = 1, dx, dy;
char *src;
- u32 vxres = GETVXRES(p->scrollmode, info);
+ u32 vxres = GETVXRES(ops->p->scrollmode, info);
if (!ops->fontbuffer)
return;
@@ -287,7 +286,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info,
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != p->cursor_shape ||
+ vc->vc_cursor_type != ops->p->cursor_shape ||
ops->cursor_state.mask == NULL ||
ops->cursor_reset) {
char *tmp, *mask = kmalloc(w*vc->vc_font.width, GFP_ATOMIC);
@@ -307,10 +306,10 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info,
kfree(ops->cursor_state.mask);
ops->cursor_state.mask = mask;
- p->cursor_shape = vc->vc_cursor_type;
+ ops->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (p->cursor_shape & CUR_HWMASK) {
+ switch (ops->p->cursor_shape & CUR_HWMASK) {
case CUR_NONE:
cur_height = 0;
break;
diff --git a/drivers/video/console/fbcon_rotate.c b/drivers/video/console/fbcon_rotate.c
index ec0dd8fe241..2dc091fbd5c 100644
--- a/drivers/video/console/fbcon_rotate.c
+++ b/drivers/video/console/fbcon_rotate.c
@@ -18,8 +18,7 @@
#include "fbcon.h"
#include "fbcon_rotate.h"
-static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc,
- struct display *p)
+static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc)
{
struct fbcon_ops *ops = info->fbcon_par;
int len, err = 0;
@@ -28,12 +27,12 @@ static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc,
u8 *dst;
if (vc->vc_font.data == ops->fontdata &&
- p->con_rotate == ops->cur_rotate)
+ ops->p->con_rotate == ops->cur_rotate)
goto finished;
src = ops->fontdata = vc->vc_font.data;
- ops->cur_rotate = p->con_rotate;
- len = (!p->userfont) ? 256 : FNTCHARCNT(src);
+ ops->cur_rotate = ops->p->con_rotate;
+ len = (!ops->p->userfont) ? 256 : FNTCHARCNT(src);
s_cellsize = ((vc->vc_font.width + 7)/8) *
vc->vc_font.height;
d_cellsize = s_cellsize;
diff --git a/drivers/video/console/fbcon_rotate.h b/drivers/video/console/fbcon_rotate.h
index 1b8f92fdc6a..75be5ce53dc 100644
--- a/drivers/video/console/fbcon_rotate.h
+++ b/drivers/video/console/fbcon_rotate.h
@@ -11,8 +11,6 @@
#ifndef _FBCON_ROTATE_H
#define _FBCON_ROTATE_H
-#define FNTCHARCNT(fd) (((int *)(fd))[-3])
-
#define GETVYRES(s,i) ({ \
(s == SCROLL_REDRAW || s == SCROLL_MOVE) ? \
(i)->var.yres : (i)->var.yres_virtual; })
diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c
index 9dd059e8b64..f56ed068a5b 100644
--- a/drivers/video/console/fbcon_ud.c
+++ b/drivers/video/console/fbcon_ud.c
@@ -249,20 +249,19 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void ud_cursor(struct vc_data *vc, struct fb_info *info,
- struct display *p, int mode, int softback_lines,
- int fg, int bg)
+static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ int softback_lines, int fg, int bg)
{
struct fb_cursor cursor;
- struct fbcon_ops *ops = (struct fbcon_ops *) info->fbcon_par;
+ struct fbcon_ops *ops = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = (vc->vc_font.width + 7) >> 3, c;
- int y = real_y(p, vc->vc_y);
+ int y = real_y(ops->p, vc->vc_y);
int attribute, use_sw = (vc->vc_cursor_type & 0x10);
int err = 1, dx, dy;
char *src;
- u32 vyres = GETVYRES(p->scrollmode, info);
- u32 vxres = GETVXRES(p->scrollmode, info);
+ u32 vyres = GETVYRES(ops->p->scrollmode, info);
+ u32 vxres = GETVXRES(ops->p->scrollmode, info);
if (!ops->fontbuffer)
return;
@@ -334,7 +333,7 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info,
}
if (cursor.set & FB_CUR_SETSIZE ||
- vc->vc_cursor_type != p->cursor_shape ||
+ vc->vc_cursor_type != ops->p->cursor_shape ||
ops->cursor_state.mask == NULL ||
ops->cursor_reset) {
char *mask = kmalloc(w*vc->vc_font.height, GFP_ATOMIC);
@@ -347,10 +346,10 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info,
kfree(ops->cursor_state.mask);
ops->cursor_state.mask = mask;
- p->cursor_shape = vc->vc_cursor_type;
+ ops->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
- switch (p->cursor_shape & CUR_HWMASK) {
+ switch (ops->p->cursor_shape & CUR_HWMASK) {
case CUR_NONE:
cur_height = 0;
break;
diff --git a/drivers/video/console/softcursor.c b/drivers/video/console/softcursor.c
index 8529bf08db2..3957fc7523e 100644
--- a/drivers/video/console/softcursor.c
+++ b/drivers/video/console/softcursor.c
@@ -17,6 +17,8 @@
#include <asm/uaccess.h>
#include <asm/io.h>
+#include "fbcon.h"
+
int soft_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
unsigned int scan_align = info->pixmap.scan_align - 1;
diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c
index cb25324a563..153352ca946 100644
--- a/drivers/video/console/tileblit.c
+++ b/drivers/video/console/tileblit.c
@@ -80,9 +80,8 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
return;
}
-static void tile_cursor(struct vc_data *vc, struct fb_info *info,
- struct display *p, int mode, int softback_lines,
- int fg, int bg)
+static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ int softback_lines, int fg, int bg)
{
struct fb_tilecursor cursor;
int use_sw = (vc->vc_cursor_type & 0x01);
@@ -130,10 +129,10 @@ static int tile_update_start(struct fb_info *info)
return err;
}
-void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info,
- struct display *p, struct fbcon_ops *ops)
+void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info)
{
struct fb_tilemap map;
+ struct fbcon_ops *ops = info->fbcon_par;
ops->bmove = tile_bmove;
ops->clear = tile_clear;
@@ -142,13 +141,13 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info,
ops->cursor = tile_cursor;
ops->update_start = tile_update_start;
- if (p) {
+ if (ops->p) {
map.width = vc->vc_font.width;
map.height = vc->vc_font.height;
map.depth = 1;
- map.length = (p->userfont) ?
- FNTCHARCNT(p->fontdata) : 256;
- map.data = p->fontdata;
+ map.length = (ops->p->userfont) ?
+ FNTCHARCNT(ops->p->fontdata) : 256;
+ map.data = ops->p->fontdata;
info->tileops->fb_settile(info, &map);
}
}
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 167de397e4b..12d9329d140 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -56,6 +56,8 @@
static DEFINE_SPINLOCK(vga_lock);
static int cursor_size_lastfrom;
static int cursor_size_lastto;
+static u32 vgacon_xres;
+static u32 vgacon_yres;
static struct vgastate state;
#define BLANK 0x0020
@@ -69,7 +71,7 @@ static struct vgastate state;
* appear.
*/
#undef TRIDENT_GLITCH
-
+#define VGA_FONTWIDTH 8 /* VGA does not support fontwidths != 8 */
/*
* Interface used by the world
*/
@@ -325,6 +327,10 @@ static const char __init *vgacon_startup(void)
vga_scan_lines =
vga_video_font_height * vga_video_num_lines;
}
+
+ vgacon_xres = ORIG_VIDEO_COLS * VGA_FONTWIDTH;
+ vgacon_yres = vga_scan_lines;
+
return display_desc;
}
@@ -503,10 +509,18 @@ static int vgacon_doresize(struct vc_data *c,
{
unsigned long flags;
unsigned int scanlines = height * c->vc_font.height;
- u8 scanlines_lo, r7, vsync_end, mode;
+ u8 scanlines_lo, r7, vsync_end, mode, max_scan;
spin_lock_irqsave(&vga_lock, flags);
+ outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
+ max_scan = inb_p(vga_video_port_val);
+
+ if (max_scan & 0x80)
+ scanlines <<= 1;
+
+ vgacon_xres = width * VGA_FONTWIDTH;
+ vgacon_yres = height * c->vc_font.height;
outb_p(VGA_CRTC_MODE, vga_video_port_reg);
mode = inb_p(vga_video_port_val);
@@ -551,6 +565,10 @@ static int vgacon_doresize(struct vc_data *c,
static int vgacon_switch(struct vc_data *c)
{
+ int x = c->vc_cols * VGA_FONTWIDTH;
+ int y = c->vc_rows * c->vc_font.height;
+ int rows = ORIG_VIDEO_LINES * vga_default_font_height/
+ c->vc_font.height;
/*
* We need to save screen size here as it's the only way
* we can spot the screen has been resized and we need to
@@ -566,10 +584,11 @@ static int vgacon_switch(struct vc_data *c)
scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf,
c->vc_screenbuf_size > vga_vram_size ?
vga_vram_size : c->vc_screenbuf_size);
- if (!(vga_video_num_columns % 2) &&
- vga_video_num_columns <= ORIG_VIDEO_COLS &&
- vga_video_num_lines <= (ORIG_VIDEO_LINES *
- vga_default_font_height) / c->vc_font.height)
+
+ if ((vgacon_xres != x || vgacon_yres != y) &&
+ (!(vga_video_num_columns % 2) &&
+ vga_video_num_columns <= ORIG_VIDEO_COLS &&
+ vga_video_num_lines <= rows))
vgacon_doresize(c, c->vc_cols, c->vc_rows);
}
@@ -993,7 +1012,8 @@ static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigne
if (vga_video_type < VIDEO_TYPE_EGAM)
return -EINVAL;
- if (font->width != 8 || (charcount != 256 && charcount != 512))
+ if (font->width != VGA_FONTWIDTH ||
+ (charcount != 256 && charcount != 512))
return -EINVAL;
rc = vgacon_do_font_op(&state, font->data, 1, charcount == 512);
@@ -1010,7 +1030,7 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
if (vga_video_type < VIDEO_TYPE_EGAM)
return -EINVAL;
- font->width = 8;
+ font->width = VGA_FONTWIDTH;
font->height = c->vc_font.height;
font->charcount = vga_512_chars ? 512 : 256;
if (!font->data)
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 403d17377f8..03798e9c882 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -133,12 +133,6 @@ static int controlfb_mmap(struct fb_info *info, struct file *file,
static int controlfb_set_par (struct fb_info *info);
static int controlfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info);
-/*
- * inititialization
- */
-int control_init(void);
-void control_setup(char *);
-
/******************** Prototypes for internal functions **********************/
static void set_control_clock(unsigned char *params);
@@ -550,9 +544,46 @@ static void control_set_hardware(struct fb_info_control *p, struct fb_par_contro
/*
- * Called from fbmem.c for probing & initializing
+ * Parse user speficied options (`video=controlfb:')
*/
-int __init control_init(void)
+static void __init control_setup(char *options)
+{
+ char *this_opt;
+
+ if (!options || !*options)
+ return;
+
+ while ((this_opt = strsep(&options, ",")) != NULL) {
+ if (!strncmp(this_opt, "vmode:", 6)) {
+ int vmode = simple_strtoul(this_opt+6, NULL, 0);
+ if (vmode > 0 && vmode <= VMODE_MAX &&
+ control_mac_modes[vmode - 1].m[1] >= 0)
+ default_vmode = vmode;
+ } else if (!strncmp(this_opt, "cmode:", 6)) {
+ int depth = simple_strtoul(this_opt+6, NULL, 0);
+ switch (depth) {
+ case CMODE_8:
+ case CMODE_16:
+ case CMODE_32:
+ default_cmode = depth;
+ break;
+ case 8:
+ default_cmode = CMODE_8;
+ break;
+ case 15:
+ case 16:
+ default_cmode = CMODE_16;
+ break;
+ case 24:
+ case 32:
+ default_cmode = CMODE_32;
+ break;
+ }
+ }
+ }
+}
+
+static int __init control_init(void)
{
struct device_node *dp;
char *option = NULL;
@@ -651,15 +682,16 @@ static void __init find_vram_size(struct fb_info_control *p)
static int __init control_of_init(struct device_node *dp)
{
struct fb_info_control *p;
- unsigned long addr;
- int i;
+ struct resource fb_res, reg_res;
if (control_fb) {
printk(KERN_ERR "controlfb: only one control is supported\n");
return -ENXIO;
}
- if(dp->n_addrs != 2) {
- printk(KERN_ERR "expecting 2 address for control (got %d)", dp->n_addrs);
+
+ if (of_pci_address_to_resource(dp, 2, &fb_res) ||
+ of_pci_address_to_resource(dp, 1, &reg_res)) {
+ printk(KERN_ERR "can't get 2 addresses for control\n");
return -ENXIO;
}
p = kmalloc(sizeof(*p), GFP_KERNEL);
@@ -669,18 +701,12 @@ static int __init control_of_init(struct device_node *dp)
memset(p, 0, sizeof(*p));
/* Map in frame buffer and registers */
- for (i = 0; i < dp->n_addrs; ++i) {
- addr = dp->addrs[i].address;
- if (dp->addrs[i].size >= 0x800000) {
- p->fb_orig_base = addr;
- p->fb_orig_size = dp->addrs[i].size;
- /* use the big-endian aperture (??) */
- p->frame_buffer_phys = addr + 0x800000;
- } else {
- p->control_regs_phys = addr;
- p->control_regs_size = dp->addrs[i].size;
- }
- }
+ p->fb_orig_base = fb_res.start;
+ p->fb_orig_size = fb_res.end - fb_res.start + 1;
+ /* use the big-endian aperture (??) */
+ p->frame_buffer_phys = fb_res.start + 0x800000;
+ p->control_regs_phys = reg_res.start;
+ p->control_regs_size = reg_res.end - reg_res.start + 1;
if (!p->fb_orig_base ||
!request_mem_region(p->fb_orig_base,p->fb_orig_size,"controlfb")) {
@@ -1059,43 +1085,3 @@ static void control_cleanup(void)
}
-/*
- * Parse user speficied options (`video=controlfb:')
- */
-void __init control_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!strncmp(this_opt, "vmode:", 6)) {
- int vmode = simple_strtoul(this_opt+6, NULL, 0);
- if (vmode > 0 && vmode <= VMODE_MAX &&
- control_mac_modes[vmode - 1].m[1] >= 0)
- default_vmode = vmode;
- } else if (!strncmp(this_opt, "cmode:", 6)) {
- int depth = simple_strtoul(this_opt+6, NULL, 0);
- switch (depth) {
- case CMODE_8:
- case CMODE_16:
- case CMODE_32:
- default_cmode = depth;
- break;
- case 8:
- default_cmode = CMODE_8;
- break;
- case 15:
- case 16:
- default_cmode = CMODE_16;
- break;
- case 24:
- case 32:
- default_cmode = CMODE_32;
- break;
- }
- }
- }
-}
-
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c
index a9300f930ef..55a3514157e 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/cyber2000fb.c
@@ -50,7 +50,6 @@
#include <linux/init.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/uaccess.h>
diff --git a/drivers/video/cyblafb.c b/drivers/video/cyblafb.c
index 03fbe83d71a..2b972461a03 100644
--- a/drivers/video/cyblafb.c
+++ b/drivers/video/cyblafb.c
@@ -7,11 +7,12 @@
* tridentfb.c by Jani Monoses
* see files above for further credits
*
- * TODO:
- *
*/
#define CYBLAFB_DEBUG 0
+#define CYBLAFB_KD_GRAPHICS_QUIRK 1
+
+#define CYBLAFB_PIXMAPSIZE 8192
#include <linux/config.h>
#include <linux/module.h>
@@ -22,7 +23,7 @@
#include <asm/types.h>
#include <video/cyblafb.h>
-#define VERSION "0.54"
+#define VERSION "0.62"
struct cyblafb_par {
u32 pseudo_pal[16];
@@ -32,7 +33,9 @@ struct cyblafb_par {
static struct fb_fix_screeninfo cyblafb_fix __devinitdata = {
.id = "CyBla",
.type = FB_TYPE_PACKED_PIXELS,
+ .xpanstep = 1,
.ypanstep = 1,
+ .ywrapstep = 1,
.visual = FB_VISUAL_PSEUDOCOLOR,
.accel = FB_ACCEL_NONE,
};
@@ -43,8 +46,9 @@ static int ref __devinitdata = 75;
static int fp __devinitdata;
static int crt __devinitdata;
static int memsize __devinitdata;
-static int vesafb __devinitdata;
+static int basestride;
+static int vesafb;
static int nativex;
static int center;
static int stretch;
@@ -52,26 +56,50 @@ static int pciwb = 1;
static int pcirb = 1;
static int pciwr = 1;
static int pcirr = 1;
+static int disabled;
static int verbosity;
static int displaytype;
-static void __iomem * io_virt; // iospace virtual memory address
-
-module_param(mode,charp,0);
-module_param(bpp,int,0);
-module_param(ref,int,0);
-module_param(fp,int,0);
-module_param(crt,int,0);
-module_param(nativex,int,0);
-module_param(center,int,0);
-module_param(stretch,int,0);
-module_param(pciwb,int,0);
-module_param(pcirb,int,0);
-module_param(pciwr,int,0);
-module_param(pcirr,int,0);
-module_param(memsize,int,0);
-module_param(verbosity,int,0);
-module_param(vesafb,int,0);
+static void __iomem *io_virt; // iospace virtual memory address
+
+module_param(mode, charp, 0);
+module_param(bpp, int, 0);
+module_param(ref, int, 0);
+module_param(fp, int, 0);
+module_param(crt, int, 0);
+module_param(nativex, int, 0);
+module_param(center, int, 0);
+module_param(stretch, int, 0);
+module_param(pciwb, int, 0);
+module_param(pcirb, int, 0);
+module_param(pciwr, int, 0);
+module_param(pcirr, int, 0);
+module_param(memsize, int, 0);
+module_param(verbosity, int, 0);
+
+//=========================================
+//
+// Well, we have to fix the upper layers.
+// Until this has been done, we work around
+// the bugs.
+//
+//=========================================
+
+#if (CYBLAFB_KD_GRAPHICS_QUIRK && CYBLAFB_DEBUG)
+ if (disabled) { \
+ printk("********\n");\
+ dump_stack();\
+ return val;\
+ }
+
+#elif CYBLAFB_KD_GRAPHICS_QUIRK
+#define KD_GRAPHICS_RETURN(val)\
+ if (disabled) {\
+ return val;\
+ }
+#else
+#define KD_GRAPHICS_RETURN(val)
+#endif
//=========================================
//
@@ -79,10 +107,10 @@ module_param(vesafb,int,0);
//
//=========================================
-#define out8(r,v) writeb(v,io_virt+r)
-#define out32(r,v) writel(v,io_virt+r)
-#define in8(r) readb(io_virt+r)
-#define in32(r) readl(io_virt+r)
+#define out8(r, v) writeb(v, io_virt + r)
+#define out32(r, v) writel(v, io_virt + r)
+#define in8(r) readb(io_virt + r)
+#define in32(r) readl(io_virt + r)
//======================================
//
@@ -90,47 +118,47 @@ module_param(vesafb,int,0);
//
//======================================
-static inline unsigned char read3X4(int reg)
+static inline u8 read3X4(u32 reg)
{
- out8(0x3D4,reg);
+ out8(0x3D4, reg);
return in8(0x3D5);
}
-static inline unsigned char read3C4(int reg)
+static inline u8 read3C4(u32 reg)
{
- out8(0x3C4,reg);
+ out8(0x3C4, reg);
return in8(0x3C5);
}
-static inline unsigned char read3CE(int reg)
+static inline u8 read3CE(u32 reg)
{
- out8(0x3CE,reg);
+ out8(0x3CE, reg);
return in8(0x3CF);
}
-static inline void write3X4(int reg,unsigned char val)
+static inline void write3X4(u32 reg, u8 val)
{
- out8(0x3D4,reg);
- out8(0x3D5,val);
+ out8(0x3D4, reg);
+ out8(0x3D5, val);
}
-static inline void write3C4(int reg,unsigned char val)
+static inline void write3C4(u32 reg, u8 val)
{
- out8(0x3C4,reg);
- out8(0x3C5,val);
+ out8(0x3C4, reg);
+ out8(0x3C5, val);
}
-static inline void write3CE(int reg,unsigned char val)
+static inline void write3CE(u32 reg, u8 val)
{
- out8(0x3CE,reg);
- out8(0x3CF,val);
+ out8(0x3CE, reg);
+ out8(0x3CF, val);
}
-static inline void write3C0(int reg,unsigned char val)
+static inline void write3C0(u32 reg, u8 val)
{
- in8(0x3DA); // read to reset index
- out8(0x3C0,reg);
- out8(0x3C0,val);
+ in8(0x3DA); // read to reset index
+ out8(0x3C0, reg);
+ out8(0x3C0, val);
}
//=================================================
@@ -139,58 +167,62 @@ static inline void write3C0(int reg,unsigned char val)
//
//=================================================
-static inline void enable_mmio(void)
+static void enable_mmio(void)
{
- int tmp;
+ u8 tmp;
- outb(0x0B,0x3C4);
+ outb(0x0B, 0x3C4);
inb(0x3C5); // Set NEW mode
- outb(SR0E,0x3C4); // write enable a lot of extended ports
- outb(0x80,0x3C5);
+ outb(SR0E, 0x3C4); // write enable a lot of extended ports
+ outb(0x80, 0x3C5);
- outb(SR11,0x3C4); // write enable those extended ports that
- outb(0x87,0x3C5); // are not affected by SR0E_New
+ outb(SR11, 0x3C4); // write enable those extended ports that
+ outb(0x87, 0x3C5); // are not affected by SR0E_New
- outb(CR1E,0x3d4); // clear write protect bit for port 0x3c2
- tmp=inb(0x3d5) & 0xBF;
- outb(CR1E,0x3d4);
- outb(tmp,0x3d5);
+ outb(CR1E, 0x3d4); // clear write protect bit for port 0x3c2
+ tmp = inb(0x3d5) & 0xBF;
+ outb(CR1E, 0x3d4);
+ outb(tmp, 0x3d5);
- outb(CR39,0x3D4);
- outb(inb(0x3D5)|0x01,0x3D5); // Enable mmio, everything else untouched
+ outb(CR39, 0x3D4);
+ outb(inb(0x3D5) | 0x01, 0x3D5); // Enable mmio
}
//=================================================
//
// Set pixel clock VCLK1
-// - multipliers set elswhere
-// - freq in units of 0.01 MHz
+// - multipliers set elswhere
+// - freq in units of 0.01 MHz
+//
+// Hardware bug: SR18 >= 250 is broken for the
+// cyberblade/i1
//
//=================================================
static void set_vclk(struct cyblafb_par *par, int freq)
{
- u32 m,n,k;
- int f,fi,d,di;
- u8 lo=0,hi=0;
+ u32 m, n, k;
+ int f, fi, d, di;
+ u8 lo = 0, hi = 0;
d = 2000;
k = freq >= 10000 ? 0 : freq >= 5000 ? 1 : freq >= 2500 ? 2 : 3;
- for(m = 0;m<64;m++)
- for(n = 0;n<250;n++) { // max 249 is a hardware limit for cybla/i1 !
- fi = (int)(((5864727*(n+8))/((m+2)*(1<<k)))>>12);
- if ((di = abs(fi - freq)) < d) {
- d = di;
- f = fi;
- lo = (u8) n;
- hi = (u8) ((k<<6) | m);
+ for (m = 0; m < 64; m++)
+ for (n = 0; n < 250; n++) {
+ fi = (int)(((5864727 * (n + 8)) /
+ ((m + 2) * (1 << k))) >> 12);
+ if ((di = abs(fi - freq)) < d) {
+ d = di;
+ f = fi;
+ lo = (u8) n;
+ hi = (u8) ((k << 6) | m);
+ }
}
- }
- write3C4(SR19,hi);
- write3C4(SR18,lo);
- if(verbosity > 1)
+ write3C4(SR19, hi);
+ write3C4(SR18, lo);
+ if (verbosity > 0)
output("pixclock = %d.%02d MHz, k/m/n %x %x %x\n",
- freq/100,freq%100,(hi&0xc0)>>6,hi&0x3f,lo);
+ freq / 100, freq % 100, (hi & 0xc0) >> 6, hi & 0x3f, lo);
}
//================================================
@@ -199,83 +231,83 @@ static void set_vclk(struct cyblafb_par *par, int freq)
//
//================================================
-static void cyblafb_setup_GE(int pitch,int bpp)
+static void cyblafb_setup_GE(int pitch, int bpp)
{
- int base = (pitch>>3)<<20;
+ KD_GRAPHICS_RETURN();
switch (bpp) {
- case 8: base |= (0<<29); break;
- case 15: base |= (5<<29); break;
- case 16: base |= (1<<29); break;
- case 24:
- case 32: base |= (2<<29); break;
+ case 8:
+ basestride = ((pitch >> 3) << 20) | (0 << 29);
+ break;
+ case 15:
+ basestride = ((pitch >> 3) << 20) | (5 << 29);
+ break;
+ case 16:
+ basestride = ((pitch >> 3) << 20) | (1 << 29);
+ break;
+ case 24:
+ case 32:
+ basestride = ((pitch >> 3) << 20) | (2 << 29);
+ break;
}
- write3X4(CR36,0x90); // reset GE
- write3X4(CR36,0x80); // enable GE
-
- out32(GE24,1<<7); // reset all GE pointers
- out32(GE24,0);
-
- write3X4(CR2D,0x00); // GE Timinigs, no delays
-
- out32(GEB8,base); // Destination Stride / Buffer Base 0, p 133
- out32(GEBC,base); // Destination Stride / Buffer Base 1, p 133
- out32(GEC0,base); // Destination Stride / Buffer Base 2, p 133
- out32(GEC4,base); // Destination Stride / Buffer Base 3, p 133
- out32(GEC8,base); // Source Stride / Buffer Base 0, p 133
- out32(GECC,base); // Source Stride / Buffer Base 1, p 133
- out32(GED0,base); // Source Stride / Buffer Base 2, p 133
- out32(GED4,base); // Source Stride / Buffer Base 3, p 133
- out32(GE6C,0); // Pattern and Style, p 129, ok
+ write3X4(CR36, 0x90); // reset GE
+ write3X4(CR36, 0x80); // enable GE
+ out32(GE24, 1 << 7); // reset all GE pointers by toggling
+ out32(GE24, 0); // d7 of GE24
+ write3X4(CR2D, 0x00); // GE Timinigs, no delays
+ out32(GE6C, 0); // Pattern and Style, p 129, ok
}
//=====================================================================
//
-// Although this is a .fb_sync function that could be enabled in
-// cyblafb_ops, we do not include it there. We sync immediately before
-// new GE operations to improve performance.
+// Cyberblade specific syncing
+//
+// A timeout might be caused by disabled mmio.
+// Cause:
+// - bit CR39 & 1 == 0 upon return, X trident driver bug
+// - kdm bug (KD_GRAPHICS not set on first switch)
+// - kernel design flaw (it believes in the correctness
+// of kdm/X
+// First we try to sync ignoring that problem, as most of the
+// time that will succeed immediately and the enable_mmio()
+// would only degrade performance.
//
//=====================================================================
static int cyblafb_sync(struct fb_info *info)
{
- int status, i=100000;
- while( ((status=in32(GE20)) & 0xFA800000) && i != 0)
+ u32 status, i = 100000;
+
+ KD_GRAPHICS_RETURN(0);
+
+ while (((status = in32(GE20)) & 0xFe800000) && i != 0)
i--;
if (i == 0) {
- // The timeout might be caused by disabled mmio.
- // Cause:
- // - bit CR39 & 1 == 0 upon return, X trident driver bug
- // - kdm bug (KD_GRAPHICS not set on first switch)
- // - kernel design flaw (it believes in the correctness
- // of kdm/X
- // So we make sure that mmio is enabled first ...
enable_mmio();
-// show_trace(NULL,&status);
- i=1000000;
- while( ((status=in32(GE20)) & 0xFA800000) && i != 0)
+ i = 1000000;
+ while (((status = in32(GE20)) & 0xFA800000) && i != 0)
i--;
if (i == 0) {
- output("GE Timeout, status: %x\n",status);
- if(status & 0x80000000)
+ output("GE Timeout, status: %x\n", status);
+ if (status & 0x80000000)
output("Bresenham Engine : Busy\n");
- if(status & 0x40000000)
+ if (status & 0x40000000)
output("Setup Engine : Busy\n");
- if(status & 0x20000000)
+ if (status & 0x20000000)
output("SP / DPE : Busy\n");
- if(status & 0x10000000)
+ if (status & 0x10000000)
output("Memory Interface : Busy\n");
- if(status & 0x08000000)
+ if (status & 0x08000000)
output("Com Lst Proc : Busy\n");
- if(status & 0x04000000)
+ if (status & 0x04000000)
output("Block Write : Busy\n");
- if(status & 0x02000000)
+ if (status & 0x02000000)
output("Command Buffer : Full\n");
- if(status & 0x01000000)
+ if (status & 0x01000000)
output("RESERVED : Busy\n");
- if(status & 0x00800000)
+ if (status & 0x00800000)
output("PCI Write Buffer : Busy\n");
cyblafb_setup_GE(info->var.xres,
info->var.bits_per_pixel);
@@ -291,142 +323,193 @@ static int cyblafb_sync(struct fb_info *info)
//
//==============================
-static void cyblafb_fillrect(struct fb_info * info,
- const struct fb_fillrect *fr)
+static void cyblafb_fillrect(struct fb_info *info, const struct fb_fillrect *fr)
{
- int bpp = info->var.bits_per_pixel;
- int col;
+ u32 bpp = info->var.bits_per_pixel, col, desty, height;
+
+ KD_GRAPHICS_RETURN();
switch (bpp) {
- default:
- case 8: col = fr->color;
- col |= col <<8;
- col |= col <<16;
- break;
- case 16: col = ((u32 *)(info->pseudo_palette))[fr->color];
- col |= col <<16;
- break;
- case 32: col = ((u32 *)(info->pseudo_palette))[fr->color];
- break;
+ default:
+ case 8:
+ col = fr->color;
+ col |= col << 8;
+ col |= col << 16;
+ break;
+ case 16:
+ col = ((u32 *) (info->pseudo_palette))[fr->color];
+ col |= col << 16;
+ break;
+ case 32:
+ col = ((u32 *) (info->pseudo_palette))[fr->color];
+ break;
}
- cyblafb_sync(info);
-
- out32(GE60,col);
- out32(GE48,fr->rop ? 0x66:ROP_S);
- out32(GE44,0x20000000|1<<19|1<<4|2<<2);
- out32(GE08,point(fr->dx,fr->dy));
- out32(GE0C,point(fr->dx+fr->width-1,fr->dy+fr->height-1));
-
+ desty = fr->dy;
+ height = fr->height;
+ while (height) {
+ out32(GEB8, basestride | ((desty * info->var.xres_virtual *
+ bpp) >> 6));
+ out32(GE60, col);
+ out32(GE48, fr->rop ? 0x66 : ROP_S);
+ out32(GE44, 0x20000000 | 1 << 19 | 1 << 4 | 2 << 2);
+ out32(GE08, point(fr->dx, 0));
+ out32(GE0C, point(fr->dx + fr->width - 1,
+ height > 4096 ? 4095 : height - 1));
+ if (likely(height <= 4096))
+ return;
+ desty += 4096;
+ height -= 4096;
+ }
}
-//==============================
+//================================================
//
// Cyberblade specific copyarea
//
-//==============================
+// This function silently assumes that it never
+// will be called with width or height exceeding
+// 4096.
+//
+//================================================
-static void cyblafb_copyarea(struct fb_info *info,
- const struct fb_copyarea *ca)
+static void cyblafb_copyarea(struct fb_info *info, const struct fb_copyarea *ca)
{
- __u32 s1,s2,d1,d2;
- int direction;
+ u32 s1, s2, d1, d2, direction;
+
+ KD_GRAPHICS_RETURN();
+
+ s1 = point(ca->sx, 0);
+ s2 = point(ca->sx + ca->width - 1, ca->height - 1);
+ d1 = point(ca->dx, 0);
+ d2 = point(ca->dx + ca->width - 1, ca->height - 1);
- s1 = point(ca->sx,ca->sy);
- s2 = point(ca->sx+ca->width-1,ca->sy+ca->height-1);
- d1 = point(ca->dx,ca->dy);
- d2 = point(ca->dx+ca->width-1,ca->dy+ca->height-1);
if ((ca->sy > ca->dy) || ((ca->sy == ca->dy) && (ca->sx > ca->dx)))
direction = 0;
else
direction = 2;
- cyblafb_sync(info);
-
- out32(GE44,0xa0000000|1<<19|1<<2|direction);
- out32(GE00,direction?s2:s1);
- out32(GE04,direction?s1:s2);
- out32(GE08,direction?d2:d1);
- out32(GE0C,direction?d1:d2);
-
+ out32(GEB8, basestride | ((ca->dy * info->var.xres_virtual *
+ info->var.bits_per_pixel) >> 6));
+ out32(GEC8, basestride | ((ca->sy * info->var.xres_virtual *
+ info->var.bits_per_pixel) >> 6));
+ out32(GE44, 0xa0000000 | 1 << 19 | 1 << 2 | direction);
+ out32(GE00, direction ? s2 : s1);
+ out32(GE04, direction ? s1 : s2);
+ out32(GE08, direction ? d2 : d1);
+ out32(GE0C, direction ? d1 : d2);
}
//=======================================================================
//
// Cyberblade specific imageblit
//
-// Accelerated for the most usual case, blitting 1-bit deep character
-// character images. Everything else is passed to the generic imageblit.
+// Accelerated for the most usual case, blitting 1 - bit deep
+// character images. Everything else is passed to the generic imageblit
+// unless it is so insane that it is better to printk an alert.
+//
+// Hardware bug: _Never_ blit across pixel column 2048, that will lock
+// the system. We split those blit requests into three blitting
+// operations.
//
//=======================================================================
static void cyblafb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
-
u32 fgcol, bgcol;
+ u32 *pd = (u32 *) image->data;
+ u32 bpp = info->var.bits_per_pixel;
- int i;
- int bpp = info->var.bits_per_pixel;
- int index = 0;
- int index_end=image->height * image->width / 8;
- int width_dds=image->width / 32;
- int width_dbs=image->width % 32;
-
- if (image->depth != 1 || bpp < 8 || bpp > 32 || bpp % 8 != 0 ||
- image->width % 8 != 0 || image->width == 0 || image->height == 0) {
- cfb_imageblit(info,image);
+ KD_GRAPHICS_RETURN();
+
+ // Used only for drawing the penguine (image->depth > 1)
+ if (image->depth != 1) {
+ cfb_imageblit(info, image);
+ return;
+ }
+ // That should never happen, but it would be fatal
+ if (image->width == 0 || image->height == 0) {
+ output("imageblit: width/height 0 detected\n");
return;
}
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
- fgcol = ((u32*)(info->pseudo_palette))[image->fg_color];
- bgcol = ((u32*)(info->pseudo_palette))[image->bg_color];
+ fgcol = ((u32 *) (info->pseudo_palette))[image->fg_color];
+ bgcol = ((u32 *) (info->pseudo_palette))[image->bg_color];
} else {
fgcol = image->fg_color;
bgcol = image->bg_color;
}
switch (bpp) {
- case 8:
- fgcol |= fgcol <<8; fgcol |= fgcol <<16;
- bgcol |= bgcol <<8; bgcol |= bgcol <<16;
- break;
- case 16:
- fgcol |= fgcol <<16;
- bgcol |= bgcol <<16;
- break;
- default:
- break;
+ case 8:
+ fgcol |= fgcol << 8;
+ bgcol |= bgcol << 8;
+ case 16:
+ fgcol |= fgcol << 16;
+ bgcol |= bgcol << 16;
+ default:
+ break;
}
- cyblafb_sync(info);
-
- out32(GE60,fgcol);
- out32(GE64,bgcol);
- out32(GE44,0xa0000000 | 1<<20 | 1<<19);
- out32(GE08,point(image->dx,image->dy));
- out32(GE0C,point(image->dx+image->width-1,image->dy+image->height-1));
+ out32(GEB8, basestride | ((image->dy * info->var.xres_virtual *
+ bpp) >> 6));
+ out32(GE60, fgcol);
+ out32(GE64, bgcol);
+
+ if (!(image->dx < 2048 && (image->dx + image->width - 1) >= 2048)) {
+ u32 dds = ((image->width + 31) >> 5) * image->height;
+ out32(GE44, 0xa0000000 | 1 << 20 | 1 << 19);
+ out32(GE08, point(image->dx, 0));
+ out32(GE0C, point(image->dx + image->width - 1,
+ image->height - 1));
+ while (dds--)
+ out32(GE9C, *pd++);
+ } else {
+ int i, j;
+ u32 ddstotal = (image->width + 31) >> 5;
+ u32 ddsleft = (2048 - image->dx + 31) >> 5;
+ u32 skipleft = ddstotal - ddsleft;
+
+ out32(GE44, 0xa0000000 | 1 << 20 | 1 << 19);
+ out32(GE08, point(image->dx, 0));
+ out32(GE0C, point(2048 - 1, image->height - 1));
+ for (i = 0; i < image->height; i++) {
+ for (j = 0; j < ddsleft; j++)
+ out32(GE9C, *pd++);
+ pd += skipleft;
+ }
- while(index < index_end) {
- const char *p = image->data + index;
- for(i=0;i<width_dds;i++) {
- out32(GE9C,*(u32*)p);
- p+=4;
- index+=4;
+ if (image->dx % 32) {
+ out32(GE44, 0xa0000000 | 1 << 20 | 1 << 19);
+ out32(GE08, point(2048, 0));
+ if (image->width > ddsleft << 5)
+ out32(GE0C, point(image->dx + (ddsleft << 5) -
+ 1, image->height - 1));
+ else
+ out32(GE0C, point(image->dx + image->width - 1,
+ image->height - 1));
+ pd = ((u32 *) image->data) + ddstotal - skipleft - 1;
+ for (i = 0; i < image->height; i++) {
+ out32(GE9C, swab32(swab32(*pd) << ((32 -
+ (image->dx & 31)) & 31)));
+ pd += ddstotal;
+ }
}
- switch(width_dbs) {
- case 0: break;
- case 8: out32(GE9C,*(u8*)p);
- index+=1;
- break;
- case 16: out32(GE9C,*(u16*)p);
- index+=2;
- break;
- case 24: out32(GE9C,*(u16*)p | *(u8*)(p+2)<<16);
- index+=3;
- break;
+
+ if (skipleft) {
+ out32(GE44, 0xa0000000 | 1 << 20 | 1 << 19);
+ out32(GE08, point(image->dx + (ddsleft << 5), 0));
+ out32(GE0C, point(image->dx + image->width - 1,
+ image->height - 1));
+ pd = (u32 *) image->data;
+ for (i = 0; i < image->height; i++) {
+ pd += ddsleft;
+ for (j = 0; j < skipleft; j++)
+ out32(GE9C, *pd++);
+ }
}
}
}
@@ -443,7 +526,6 @@ static int cyblafb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int bpp = var->bits_per_pixel;
- int s,t,maxvyres;
//
// we try to support 8, 16, 24 and 32 bpp modes,
@@ -453,9 +535,9 @@ static int cyblafb_check_var(struct fb_var_screeninfo *var,
// (This is what tridentfb does ... will be changed in the future)
//
//
- if ( bpp % 8 != 0 || bpp < 8 || bpp >32)
+ if (bpp % 8 != 0 || bpp < 8 || bpp > 32)
bpp = 8;
- if (bpp == 24 )
+ if (bpp == 24)
bpp = var->bits_per_pixel = 32;
//
@@ -472,65 +554,93 @@ static int cyblafb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
//
- // xres != xres_virtual is broken, fail if such an
- // unusual mode is requested
+ // we do not allow vclk to exceed 230 MHz. If the requested
+ // vclk is too high, we default to 200 MHz
//
- if (var->xres != var->xres_virtual)
- return -EINVAL;
+ if ((bpp == 32 ? 200000000 : 100000000) / var->pixclock > 23000)
+ var->pixclock = (bpp == 32 ? 200000000 : 100000000) / 20000;
//
- // we do not allow vclk to exceed 230 MHz
+ // enforce (h|v)sync_len limits
//
- if ((bpp==32 ? 200000000 : 100000000) / var->pixclock > 23000)
- return -EINVAL;
+ var->hsync_len &= ~7;
+ if(var->hsync_len > 248)
+ var->hsync_len = 248;
+
+ var->vsync_len &= 15;
//
- // calc max yres_virtual that would fit in memory
- // and max yres_virtual that could be used for scrolling
- // and use minimum of the results as maxvyres
- //
- // adjust vyres_virtual to maxvyres if necessary
- // fail if requested yres is bigger than maxvyres
+ // Enforce horizontal and vertical hardware limits.
+ // 1600x1200 is mentioned as a maximum, but higher resolutions could
+ // work with slow refresh, small margins and short sync.
//
- s = (0x1fffff / (var->xres * bpp/8)) + var->yres;
- t = info->fix.smem_len / (var->xres * bpp/8);
- maxvyres = t < s ? t : s;
- if (maxvyres < var->yres_virtual)
- var->yres_virtual=maxvyres;
- if (maxvyres < var->yres)
+ var->xres &= ~7;
+
+ if (((var->xres + var->left_margin + var->right_margin +
+ var->hsync_len) > (bpp == 32 ? 2040 : 4088)) ||
+ ((var->yres + var->upper_margin + var->lower_margin +
+ var->vsync_len) > 2047))
return -EINVAL;
- switch (bpp) {
- case 8:
- var->red.offset = 0;
- var->green.offset = 0;
- var->blue.offset = 0;
- var->red.length = 6;
- var->green.length = 6;
- var->blue.length = 6;
- break;
- case 16:
- var->red.offset = 11;
- var->green.offset = 5;
- var->blue.offset = 0;
- var->red.length = 5;
- var->green.length = 6;
- var->blue.length = 5;
- break;
- case 32:
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- break;
- default:
+ if ((var->xres > 1600) || (var->yres > 1200))
+ output("Mode %dx%d exceeds documented limits.\n",
+ var->xres, var->yres);
+ //
+ // try to be smart about (x|y)res_virtual problems.
+ //
+ if (var->xres > var->xres_virtual)
+ var->xres_virtual = var->xres;
+ if (var->yres > var->yres_virtual)
+ var->yres_virtual = var->yres;
+
+ if (bpp == 8 || bpp == 16) {
+ if (var->xres_virtual > 4088)
+ var->xres_virtual = 4088;
+ } else {
+ if (var->xres_virtual > 2040)
+ var->xres_virtual = 2040;
+ }
+ var->xres_virtual &= ~7;
+ while (var->xres_virtual * var->yres_virtual * bpp / 8 >
+ info->fix.smem_len) {
+ if (var->yres_virtual > var->yres)
+ var->yres_virtual--;
+ else if (var->xres_virtual > var->xres)
+ var->xres_virtual -= 8;
+ else
return -EINVAL;
}
- return 0;
+ switch (bpp) {
+ case 8:
+ var->red.offset = 0;
+ var->green.offset = 0;
+ var->blue.offset = 0;
+ var->red.length = 6;
+ var->green.length = 6;
+ var->blue.length = 6;
+ break;
+ case 16:
+ var->red.offset = 11;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 6;
+ var->blue.length = 5;
+ break;
+ case 32:
+ var->red.offset = 16;
+ var->green.offset = 8;
+ var->blue.offset = 0;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
}
//=====================================================================
@@ -543,23 +653,25 @@ static int cyblafb_check_var(struct fb_var_screeninfo *var,
// it, so it is also safe to be used here. BTW: datasheet CR0E on page
// 90 really is CR1E, the real CRE is documented on page 72.
//
+// BUT:
+//
+// As of internal version 0.60 we do not use vga panning any longer.
+// Vga panning did not allow us the use of all available video memory
+// and thus prevented ywrap scrolling. We do use the "right view"
+// register now.
+//
+//
//=====================================================================
static int cyblafb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- unsigned int offset;
+ KD_GRAPHICS_RETURN(0);
- offset=(var->xoffset+(var->yoffset*var->xres))*var->bits_per_pixel/32;
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
-
- write3X4(CR0D,offset & 0xFF);
- write3X4(CR0C,(offset & 0xFF00) >> 8);
- write3X4(CR1E,(read3X4(CR1E) & 0xDF) | ((offset & 0x10000) >> 11));
- write3X4(CR27,(read3X4(CR27) & 0xF8) | ((offset & 0xE0000) >> 17));
- write3X4(CR2B,(read3X4(CR2B) & 0xDF) | ((offset & 0x100000) >> 15));
-
+ out32(GE10, 0x80000000 | ((var->xoffset + (var->yoffset *
+ var->xres_virtual)) * var->bits_per_pixel / 32));
return 0;
}
@@ -578,56 +690,96 @@ static void regdump(struct cyblafb_par *par)
return;
printk("\n");
- for(i=0; i<=0xff; i++) {
- outb(i,0x3d4);
- printk("CR%02x=%02x ",i,inb(0x3d5));
- if (i%16==15)
+ for (i = 0; i <= 0xff; i++) {
+ outb(i, 0x3d4);
+ printk("CR%02x=%02x ", i, inb(0x3d5));
+ if (i % 16 == 15)
printk("\n");
}
- outb(0x30,0x3ce);
- outb(inb(0x3cf) | 0x40,0x3cf);
- for(i=0; i<=0x1f; i++) {
- if (i==0 || (i>2 && i<8) || i==0x10 || i==0x11 || i==0x16) {
- outb(i,0x3d4);
- printk("CR%02x=%02x ",i,inb(0x3d5));
+ outb(0x30, 0x3ce);
+ outb(inb(0x3cf) | 0x40, 0x3cf);
+ for (i = 0; i <= 0x1f; i++) {
+ if (i == 0 || (i > 2 && i < 8) || i == 0x10 || i == 0x11
+ || i == 0x16) {
+ outb(i, 0x3d4);
+ printk("CR%02x=%02x ", i, inb(0x3d5));
} else
printk("------- ");
- if (i%16==15)
+ if (i % 16 == 15)
printk("\n");
}
- outb(0x30,0x3ce);
- outb(inb(0x3cf) & 0xbf,0x3cf);
+ outb(0x30, 0x3ce);
+ outb(inb(0x3cf) & 0xbf, 0x3cf);
printk("\n");
- for(i=0; i<=0x7f; i++) {
- outb(i,0x3ce);
- printk("GR%02x=%02x ",i,inb(0x3cf));
- if (i%16==15)
+ for (i = 0; i <= 0x7f; i++) {
+ outb(i, 0x3ce);
+ printk("GR%02x=%02x ", i, inb(0x3cf));
+ if (i % 16 == 15)
printk("\n");
}
printk("\n");
- for(i=0; i<=0xff; i++) {
- outb(i,0x3c4);
- printk("SR%02x=%02x ",i,inb(0x3c5));
- if (i%16==15)
+ for (i = 0; i <= 0xff; i++) {
+ outb(i, 0x3c4);
+ printk("SR%02x=%02x ", i, inb(0x3c5));
+ if (i % 16 == 15)
printk("\n");
}
printk("\n");
- for(i=0; i <= 0x1F; i++) {
- inb(0x3da); // next access is index!
- outb(i,0x3c0);
- printk("AR%02x=%02x ",i,inb(0x3c1));
- if (i%16==15)
+ for (i = 0; i <= 0x1F; i++) {
+ inb(0x3da); // next access is index!
+ outb(i, 0x3c0);
+ printk("AR%02x=%02x ", i, inb(0x3c1));
+ if (i % 16 == 15)
printk("\n");
}
printk("\n");
- inb(0x3DA); // reset internal flag to 3c0 index
- outb(0x20,0x3C0); // enable attr
+ inb(0x3DA); // reset internal flag to 3c0 index
+ outb(0x20, 0x3C0); // enable attr
+
+ return;
+}
+
+//=======================================================================
+//
+// Save State
+//
+// This function is called while a switch to KD_TEXT is in progress,
+// before any of the other functions are called.
+//
+//=======================================================================
+static void cyblafb_save_state(struct fb_info *info)
+{
+ struct cyblafb_par *par = info->par;
+ if (verbosity > 0)
+ output("Switching to KD_TEXT\n");
+ disabled = 0;
+ regdump(par);
+ enable_mmio();
+ return;
+}
+
+//=======================================================================
+//
+// Restore State
+//
+// This function is called while a switch to KD_GRAPHICS is in progress,
+// We have to turn on vga style panning registers again because the
+// trident driver of X does not know about GE10.
+//
+//=======================================================================
+
+static void cyblafb_restore_state(struct fb_info *info)
+{
+ if (verbosity > 0)
+ output("Switching to KD_GRAPHICS\n");
+ out32(GE10, 0);
+ disabled = 1;
return;
}
@@ -640,32 +792,34 @@ static void regdump(struct cyblafb_par *par)
static int cyblafb_set_par(struct fb_info *info)
{
struct cyblafb_par *par = info->par;
- u32
- htotal,hdispend,hsyncstart,hsyncend,hblankstart,hblankend,preendfetch,
- vtotal,vdispend,vsyncstart,vsyncend,vblankstart,vblankend;
+ u32 htotal, hdispend, hsyncstart, hsyncend, hblankstart,
+ hblankend, preendfetch, vtotal, vdispend, vsyncstart,
+ vsyncend, vblankstart, vblankend;
struct fb_var_screeninfo *var = &info->var;
int bpp = var->bits_per_pixel;
int i;
+ KD_GRAPHICS_RETURN(0);
+
if (verbosity > 0)
output("Switching to new mode: "
"fbset -g %d %d %d %d %d -t %d %d %d %d %d %d %d\n",
- var->xres,var->yres,var->xres_virtual,
- var->yres_virtual,var->bits_per_pixel,var->pixclock,
- var->left_margin,var->right_margin,var->upper_margin,
- var->lower_margin,var->hsync_len,var->vsync_len);
+ var->xres, var->yres, var->xres_virtual,
+ var->yres_virtual, var->bits_per_pixel, var->pixclock,
+ var->left_margin, var->right_margin, var->upper_margin,
+ var->lower_margin, var->hsync_len, var->vsync_len);
htotal = (var->xres + var->left_margin + var->right_margin +
- var->hsync_len) / 8 - 5;
- hdispend = var->xres/8 - 1;
- hsyncstart = (var->xres + var->right_margin)/8;
- hsyncend = var->hsync_len/8;
+ var->hsync_len) / 8 - 5;
+ hdispend = var->xres / 8 - 1;
+ hsyncstart = (var->xres + var->right_margin) / 8;
+ hsyncend = var->hsync_len / 8;
hblankstart = hdispend + 1;
hblankend = htotal + 3; // should be htotal + 5, bios does it this way
- preendfetch = ((var->xres >> 3) + 1) * ((bpp+1) >> 3);
+ preendfetch = ((var->xres >> 3) + 1) * ((bpp + 1) >> 3);
vtotal = var->yres + var->upper_margin + var->lower_margin +
- var->vsync_len - 2;
+ var->vsync_len - 2;
vdispend = var->yres - 1;
vsyncstart = var->yres + var->lower_margin;
vblankstart = var->yres;
@@ -674,101 +828,99 @@ static int cyblafb_set_par(struct fb_info *info)
enable_mmio(); // necessary! ... check X ...
- write3X4(CR11,read3X4(CR11) & 0x7F); // unlock cr00 .. cr07
+ write3X4(CR11, read3X4(CR11) & 0x7F); // unlock cr00 .. cr07
- write3CE(GR30,8);
+ write3CE(GR30, 8);
if ((displaytype == DISPLAY_FP) && var->xres < nativex) {
// stretch or center ?
- out8(0x3C2,0xEB);
+ out8(0x3C2, 0xEB);
- write3CE(GR30,read3CE(GR30) | 0x81); // shadow mode on
+ write3CE(GR30, read3CE(GR30) | 0x81); // shadow mode on
if (center) {
- write3CE(GR52,(read3CE(GR52) & 0x7C) | 0x80);
- write3CE(GR53,(read3CE(GR53) & 0x7C) | 0x80);
- }
- else if (stretch) {
- write3CE(GR5D,0);
- write3CE(GR52,(read3CE(GR52) & 0x7C) | 1);
- write3CE(GR53,(read3CE(GR53) & 0x7C) | 1);
+ write3CE(GR52, (read3CE(GR52) & 0x7C) | 0x80);
+ write3CE(GR53, (read3CE(GR53) & 0x7C) | 0x80);
+ } else if (stretch) {
+ write3CE(GR5D, 0);
+ write3CE(GR52, (read3CE(GR52) & 0x7C) | 1);
+ write3CE(GR53, (read3CE(GR53) & 0x7C) | 1);
}
} else {
- out8(0x3C2,0x2B);
- write3CE(GR30,8);
+ out8(0x3C2, 0x2B);
+ write3CE(GR30, 8);
}
//
// Setup CRxx regs
//
- write3X4(CR00,htotal & 0xFF);
- write3X4(CR01,hdispend & 0xFF);
- write3X4(CR02,hblankstart & 0xFF);
- write3X4(CR03,hblankend & 0x1F);
- write3X4(CR04,hsyncstart & 0xFF);
- write3X4(CR05,(hsyncend & 0x1F) | ((hblankend & 0x20)<<2));
- write3X4(CR06,vtotal & 0xFF);
- write3X4(CR07,(vtotal & 0x100) >> 8 |
- (vdispend & 0x100) >> 7 |
- (vsyncstart & 0x100) >> 6 |
- (vblankstart & 0x100) >> 5 |
- 0x10 |
- (vtotal & 0x200) >> 4 |
- (vdispend & 0x200) >> 3 |
- (vsyncstart & 0x200) >> 2);
- write3X4(CR08,0);
- write3X4(CR09,(vblankstart & 0x200) >> 4 | 0x40 | // FIX !!!
- ((info->var.vmode & FB_VMODE_DOUBLE) ? 0x80 : 0));
- write3X4(CR0A,0); // Init to some reasonable default
- write3X4(CR0B,0); // Init to some reasonable default
- write3X4(CR0C,0); // Offset 0
- write3X4(CR0D,0); // Offset 0
- write3X4(CR0E,0); // Init to some reasonable default
- write3X4(CR0F,0); // Init to some reasonable default
- write3X4(CR10,vsyncstart & 0xFF);
- write3X4(CR11,(vsyncend & 0x0F));
- write3X4(CR12,vdispend & 0xFF);
- write3X4(CR13,((info->var.xres * bpp)/(4*16)) & 0xFF);
- write3X4(CR14,0x40); // double word mode
- write3X4(CR15,vblankstart & 0xFF);
- write3X4(CR16,vblankend & 0xFF);
- write3X4(CR17,0xC3);
- write3X4(CR18,0xFF);
+ write3X4(CR00, htotal & 0xFF);
+ write3X4(CR01, hdispend & 0xFF);
+ write3X4(CR02, hblankstart & 0xFF);
+ write3X4(CR03, hblankend & 0x1F);
+ write3X4(CR04, hsyncstart & 0xFF);
+ write3X4(CR05, (hsyncend & 0x1F) | ((hblankend & 0x20) << 2));
+ write3X4(CR06, vtotal & 0xFF);
+ write3X4(CR07, (vtotal & 0x100) >> 8 |
+ (vdispend & 0x100) >> 7 |
+ (vsyncstart & 0x100) >> 6 |
+ (vblankstart & 0x100) >> 5 |
+ 0x10 |
+ (vtotal & 0x200) >> 4 |
+ (vdispend & 0x200) >> 3 | (vsyncstart & 0x200) >> 2);
+ write3X4(CR08, 0);
+ write3X4(CR09, (vblankstart & 0x200) >> 4 | 0x40 | // FIX !!!
+ ((info->var.vmode & FB_VMODE_DOUBLE) ? 0x80 : 0));
+ write3X4(CR0A, 0); // Init to some reasonable default
+ write3X4(CR0B, 0); // Init to some reasonable default
+ write3X4(CR0C, 0); // Offset 0
+ write3X4(CR0D, 0); // Offset 0
+ write3X4(CR0E, 0); // Init to some reasonable default
+ write3X4(CR0F, 0); // Init to some reasonable default
+ write3X4(CR10, vsyncstart & 0xFF);
+ write3X4(CR11, (vsyncend & 0x0F));
+ write3X4(CR12, vdispend & 0xFF);
+ write3X4(CR13, ((info->var.xres_virtual * bpp) / (4 * 16)) & 0xFF);
+ write3X4(CR14, 0x40); // double word mode
+ write3X4(CR15, vblankstart & 0xFF);
+ write3X4(CR16, vblankend & 0xFF);
+ write3X4(CR17, 0xE3);
+ write3X4(CR18, 0xFF);
// CR19: needed for interlaced modes ... ignore it for now
- write3X4(CR1A,0x07); // Arbitration Control Counter 1
- write3X4(CR1B,0x07); // Arbitration Control Counter 2
- write3X4(CR1C,0x07); // Arbitration Control Counter 3
- write3X4(CR1D,0x00); // Don't know, doesn't hurt ;-)
- write3X4(CR1E,(info->var.vmode & FB_VMODE_INTERLACED) ? 0x84 : 0x80);
+ write3X4(CR1A, 0x07); // Arbitration Control Counter 1
+ write3X4(CR1B, 0x07); // Arbitration Control Counter 2
+ write3X4(CR1C, 0x07); // Arbitration Control Counter 3
+ write3X4(CR1D, 0x00); // Don't know, doesn't hurt ; -)
+ write3X4(CR1E, (info->var.vmode & FB_VMODE_INTERLACED) ? 0x84 : 0x80);
// CR1F: do not set, contains BIOS info about memsize
- write3X4(CR20,0x20); // enabe wr buf, disable 16bit planar mode
- write3X4(CR21,0x20); // enable linear memory access
+ write3X4(CR20, 0x20); // enabe wr buf, disable 16bit planar mode
+ write3X4(CR21, 0x20); // enable linear memory access
// CR22: RO cpu latch readback
// CR23: ???
// CR24: RO AR flag state
// CR25: RAMDAC rw timing, pclk buffer tristate control ????
// CR26: ???
- write3X4(CR27,(vdispend & 0x400) >> 6 |
- (vsyncstart & 0x400) >> 5 |
- (vblankstart & 0x400) >> 4 |
- (vtotal & 0x400) >> 3 |
- 0x8);
+ write3X4(CR27, (vdispend & 0x400) >> 6 |
+ (vsyncstart & 0x400) >> 5 |
+ (vblankstart & 0x400) >> 4 |
+ (vtotal & 0x400) >> 3 |
+ 0x8);
// CR28: ???
- write3X4(CR29,(read3X4(CR29) & 0xCF) |
- ((((info->var.xres * bpp) / (4*16)) & 0x300) >>4));
- write3X4(CR2A,read3X4(CR2A) | 0x40);
- write3X4(CR2B,(htotal & 0x100) >> 8 |
- (hdispend & 0x100) >> 7 |
- // (0x00 & 0x100) >> 6 | hinterlace para bit 8 ???
- (hsyncstart & 0x100) >> 5 |
- (hblankstart & 0x100) >> 4);
+ write3X4(CR29, (read3X4(CR29) & 0xCF) | ((((info->var.xres_virtual *
+ bpp) / (4 * 16)) & 0x300) >> 4));
+ write3X4(CR2A, read3X4(CR2A) | 0x40);
+ write3X4(CR2B, (htotal & 0x100) >> 8 |
+ (hdispend & 0x100) >> 7 |
+ // (0x00 & 0x100) >> 6 | hinterlace para bit 8 ???
+ (hsyncstart & 0x100) >> 5 |
+ (hblankstart & 0x100) >> 4);
// CR2C: ???
// CR2D: initialized in cyblafb_setup_GE()
- write3X4(CR2F,0x92); // conservative, better signal quality
+ write3X4(CR2F, 0x92); // conservative, better signal quality
// CR30: reserved
// CR31: reserved
// CR32: reserved
@@ -777,96 +929,116 @@ static int cyblafb_set_par(struct fb_info *info)
// CR35: disabled in CR36
// CR36: initialized in cyblafb_setup_GE
// CR37: i2c, ignore for now
- write3X4(CR38,(bpp == 8) ? 0x00 : //
- (bpp == 16) ? 0x05 : // highcolor
- (bpp == 24) ? 0x29 : // packed 24bit truecolor
- (bpp == 32) ? 0x09 : 0); // truecolor, 16 bit pixelbus
- write3X4(CR39,0x01 | // MMIO enable
- (pcirb ? 0x02 : 0) | // pci read burst enable
- (pciwb ? 0x04 : 0)); // pci write burst enable
- write3X4(CR55,0x1F | // pci clocks * 2 for STOP# during 1st data phase
- (pcirr ? 0x40 : 0) | // pci read retry enable
- (pciwr ? 0x80 : 0)); // pci write retry enable
- write3X4(CR56,preendfetch >> 8 < 2 ? (preendfetch >> 8 & 0x01)|2 : 0);
- write3X4(CR57,preendfetch >> 8 < 2 ? preendfetch & 0xff : 0);
- write3X4(CR58,0x82); // Bios does this .... don't know more
+ write3X4(CR38, (bpp == 8) ? 0x00 : //
+ (bpp == 16) ? 0x05 : // highcolor
+ (bpp == 24) ? 0x29 : // packed 24bit truecolor
+ (bpp == 32) ? 0x09 : 0); // truecolor, 16 bit pixelbus
+ write3X4(CR39, 0x01 | // MMIO enable
+ (pcirb ? 0x02 : 0) | // pci read burst enable
+ (pciwb ? 0x04 : 0)); // pci write burst enable
+ write3X4(CR55, 0x1F | // pci clocks * 2 for STOP# during 1st data phase
+ (pcirr ? 0x40 : 0) | // pci read retry enable
+ (pciwr ? 0x80 : 0)); // pci write retry enable
+ write3X4(CR56, preendfetch >> 8 < 2 ? (preendfetch >> 8 & 0x01) | 2
+ : 0);
+ write3X4(CR57, preendfetch >> 8 < 2 ? preendfetch & 0xff : 0);
+ write3X4(CR58, 0x82); // Bios does this .... don't know more
//
// Setup SRxx regs
//
- write3C4(SR00,3);
- write3C4(SR01,1); //set char clock 8 dots wide
- write3C4(SR02,0x0F); //enable 4 maps needed in chain4 mode
- write3C4(SR03,0); //no character map select
- write3C4(SR04,0x0E); //memory mode: ext mem, even, chain4
+ write3C4(SR00, 3);
+ write3C4(SR01, 1); //set char clock 8 dots wide
+ write3C4(SR02, 0x0F); //enable 4 maps needed in chain4 mode
+ write3C4(SR03, 0); //no character map select
+ write3C4(SR04, 0x0E); //memory mode: ext mem, even, chain4
- out8(0x3C4,0x0b);
+ out8(0x3C4, 0x0b);
in8(0x3C5); // Set NEW mode
- write3C4(SR0D,0x00); // test ... check
+ write3C4(SR0D, 0x00); // test ... check
- set_vclk(par,(bpp==32 ? 200000000 : 100000000)/
- info->var.pixclock); //SR18,SR19
+ set_vclk(par, (bpp == 32 ? 200000000 : 100000000)
+ / info->var.pixclock); //SR18, SR19
//
// Setup GRxx regs
//
- write3CE(GR00,0x00); // test ... check
- write3CE(GR01,0x00); // test ... check
- write3CE(GR02,0x00); // test ... check
- write3CE(GR03,0x00); // test ... check
- write3CE(GR04,0x00); // test ... check
- write3CE(GR05,0x40); // no CGA compat,allow 256 col
- write3CE(GR06,0x05); // graphics mode
- write3CE(GR07,0x0F); // planes?
- write3CE(GR08,0xFF); // test ... check
- write3CE(GR0F,(bpp==32)?0x1A:0x12); // div vclk by 2 if 32bpp, chain4
- write3CE(GR20,0xC0); // test ... check
- write3CE(GR2F,0xA0); // PCLK = VCLK, no skew,
+ write3CE(GR00, 0x00); // test ... check
+ write3CE(GR01, 0x00); // test ... check
+ write3CE(GR02, 0x00); // test ... check
+ write3CE(GR03, 0x00); // test ... check
+ write3CE(GR04, 0x00); // test ... check
+ write3CE(GR05, 0x40); // no CGA compat, allow 256 col
+ write3CE(GR06, 0x05); // graphics mode
+ write3CE(GR07, 0x0F); // planes?
+ write3CE(GR08, 0xFF); // test ... check
+ write3CE(GR0F, (bpp == 32) ? 0x1A : 0x12); // vclk / 2 if 32bpp, chain4
+ write3CE(GR20, 0xC0); // test ... check
+ write3CE(GR2F, 0xA0); // PCLK = VCLK, no skew,
//
// Setup ARxx regs
//
- for(i = 0;i < 0x10;i++) // set AR00 .. AR0f
- write3C0(i,i);
- write3C0(AR10,0x41); // graphics mode and support 256 color modes
- write3C0(AR12,0x0F); // planes
- write3C0(AR13,0); // horizontal pel panning
+ for (i = 0; i < 0x10; i++) // set AR00 .. AR0f
+ write3C0(i, i);
+ write3C0(AR10, 0x41); // graphics mode and support 256 color modes
+ write3C0(AR12, 0x0F); // planes
+ write3C0(AR13, 0); // horizontal pel panning
in8(0x3DA); // reset internal flag to 3c0 index
- out8(0x3C0,0x20); // enable attr
+ out8(0x3C0, 0x20); // enable attr
//
// Setup hidden RAMDAC command register
//
- in8(0x3C8); // these reads are
- in8(0x3C6); // necessary to
- in8(0x3C6); // unmask the RAMDAC
- in8(0x3C6); // command reg, otherwise
- in8(0x3C6); // we would write the pixelmask reg!
- out8(0x3C6,(bpp == 8) ? 0x00 : // 256 colors
- (bpp == 15) ? 0x10 : //
- (bpp == 16) ? 0x30 : // hicolor
- (bpp == 24) ? 0xD0 : // truecolor
- (bpp == 32) ? 0xD0 : 0); // truecolor
+ in8(0x3C8); // these reads are
+ in8(0x3C6); // necessary to
+ in8(0x3C6); // unmask the RAMDAC
+ in8(0x3C6); // command reg, otherwise
+ in8(0x3C6); // we would write the pixelmask reg!
+ out8(0x3C6, (bpp == 8) ? 0x00 : // 256 colors
+ (bpp == 15) ? 0x10 : //
+ (bpp == 16) ? 0x30 : // hicolor
+ (bpp == 24) ? 0xD0 : // truecolor
+ (bpp == 32) ? 0xD0 : 0); // truecolor
in8(0x3C8);
//
// GR31 is not mentioned in the datasheet
//
if (displaytype == DISPLAY_FP)
- write3CE(GR31,(read3CE(GR31) & 0x8F) |
+ write3CE(GR31, (read3CE(GR31) & 0x8F) |
((info->var.yres > 1024) ? 0x50 :
- (info->var.yres > 768) ? 0x30 :
- (info->var.yres > 600) ? 0x20 :
- (info->var.yres > 480) ? 0x10 : 0));
+ (info->var.yres > 768) ? 0x30 :
+ (info->var.yres > 600) ? 0x20 :
+ (info->var.yres > 480) ? 0x10 : 0));
info->fix.visual = (bpp == 8) ? FB_VISUAL_PSEUDOCOLOR
: FB_VISUAL_TRUECOLOR;
- info->fix.line_length = info->var.xres * (bpp >> 3);
- info->cmap.len = (bpp == 8) ? 256: 16;
+ info->fix.line_length = info->var.xres_virtual * (bpp >> 3);
+ info->cmap.len = (bpp == 8) ? 256 : 16;
//
// init acceleration engine
//
- cyblafb_setup_GE(info->var.xres,info->var.bits_per_pixel);
+ cyblafb_setup_GE(info->var.xres_virtual, info->var.bits_per_pixel);
+
+ //
+ // Set/clear flags to allow proper scroll mode selection.
+ //
+ if (var->xres == var->xres_virtual)
+ info->flags &= ~FBINFO_HWACCEL_XPAN;
+ else
+ info->flags |= FBINFO_HWACCEL_XPAN;
+
+ if (var->yres == var->yres_virtual)
+ info->flags &= ~FBINFO_HWACCEL_YPAN;
+ else
+ info->flags |= FBINFO_HWACCEL_YPAN;
+
+ if (info->fix.smem_len !=
+ var->xres_virtual * var->yres_virtual * bpp / 8)
+ info->flags &= ~FBINFO_HWACCEL_YWRAP;
+ else
+ info->flags |= FBINFO_HWACCEL_YWRAP;
regdump(par);
@@ -885,27 +1057,27 @@ static int cyblafb_setcolreg(unsigned regno, unsigned red, unsigned green,
{
int bpp = info->var.bits_per_pixel;
+ KD_GRAPHICS_RETURN(0);
+
if (regno >= info->cmap.len)
return 1;
if (bpp == 8) {
- out8(0x3C6,0xFF);
- out8(0x3C8,regno);
- out8(0x3C9,red>>10);
- out8(0x3C9,green>>10);
- out8(0x3C9,blue>>10);
-
- } else if (bpp == 16) // RGB 565
- ((u32*)info->pseudo_palette)[regno] =
- (red & 0xF800) |
- ((green & 0xFC00) >> 5) |
- ((blue & 0xF800) >> 11);
- else if (bpp == 32) // ARGB 8888
- ((u32*)info->pseudo_palette)[regno] =
- ((transp & 0xFF00) <<16) |
- ((red & 0xFF00) << 8) |
- ((green & 0xFF00)) |
- ((blue & 0xFF00)>>8);
+ out8(0x3C6, 0xFF);
+ out8(0x3C8, regno);
+ out8(0x3C9, red >> 10);
+ out8(0x3C9, green >> 10);
+ out8(0x3C9, blue >> 10);
+
+ } else if (bpp == 16) // RGB 565
+ ((u32 *) info->pseudo_palette)[regno] =
+ (red & 0xF800) |
+ ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11);
+ else if (bpp == 32) // ARGB 8888
+ ((u32 *) info->pseudo_palette)[regno] =
+ ((transp & 0xFF00) << 16) |
+ ((red & 0xFF00) << 8) |
+ ((green & 0xFF00)) | ((blue & 0xFF00) >> 8);
return 0;
}
@@ -918,40 +1090,41 @@ static int cyblafb_setcolreg(unsigned regno, unsigned red, unsigned green,
static int cyblafb_blank(int blank_mode, struct fb_info *info)
{
- unsigned char PMCont,DPMSCont;
+ unsigned char PMCont, DPMSCont;
+
+ KD_GRAPHICS_RETURN(0);
if (displaytype == DISPLAY_FP)
return 0;
- out8(0x83C8,0x04); // DPMS Control
+ out8(0x83C8, 0x04); // DPMS Control
PMCont = in8(0x83C6) & 0xFC;
DPMSCont = read3CE(GR23) & 0xFC;
- switch (blank_mode)
- {
- case FB_BLANK_UNBLANK: // Screen: On, HSync: On, VSync: On
- case FB_BLANK_NORMAL: // Screen: Off, HSync: On, VSync: On
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK: // Screen: On, HSync: On, VSync: On
+ case FB_BLANK_NORMAL: // Screen: Off, HSync: On, VSync: On
PMCont |= 0x03;
DPMSCont |= 0x00;
break;
- case FB_BLANK_HSYNC_SUSPEND: // Screen: Off, HSync: Off, VSync: On
+ case FB_BLANK_HSYNC_SUSPEND: // Screen: Off, HSync: Off, VSync: On
PMCont |= 0x02;
DPMSCont |= 0x01;
break;
- case FB_BLANK_VSYNC_SUSPEND: // Screen: Off, HSync: On, VSync: Off
+ case FB_BLANK_VSYNC_SUSPEND: // Screen: Off, HSync: On, VSync: Off
PMCont |= 0x02;
DPMSCont |= 0x02;
break;
- case FB_BLANK_POWERDOWN: // Screen: Off, HSync: Off, VSync: Off
+ case FB_BLANK_POWERDOWN: // Screen: Off, HSync: Off, VSync: Off
PMCont |= 0x00;
DPMSCont |= 0x03;
break;
}
- write3CE(GR23,DPMSCont);
- out8(0x83C8,4);
- out8(0x83C6,PMCont);
+ write3CE(GR23, DPMSCont);
+ out8(0x83C8, 4);
+ out8(0x83C6, PMCont);
//
// let fbcon do a softblank for us
//
@@ -959,15 +1132,18 @@ static int cyblafb_blank(int blank_mode, struct fb_info *info)
}
static struct fb_ops cyblafb_ops __devinitdata = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.fb_setcolreg = cyblafb_setcolreg,
.fb_pan_display = cyblafb_pan_display,
.fb_blank = cyblafb_blank,
.fb_check_var = cyblafb_check_var,
.fb_set_par = cyblafb_set_par,
.fb_fillrect = cyblafb_fillrect,
- .fb_copyarea= cyblafb_copyarea,
+ .fb_copyarea = cyblafb_copyarea,
.fb_imageblit = cyblafb_imageblit,
+ .fb_sync = cyblafb_sync,
+ .fb_restore_state = cyblafb_restore_state,
+ .fb_save_state = cyblafb_save_state,
};
//==========================================================================
@@ -986,74 +1162,89 @@ static struct fb_ops cyblafb_ops __devinitdata = {
static int __devinit getstartupmode(struct fb_info *info)
{
- u32 htotal,hdispend,hsyncstart,hsyncend,hblankstart,hblankend,
- vtotal,vdispend,vsyncstart,vsyncend,vblankstart,vblankend,
- cr00,cr01,cr02,cr03,cr04,cr05,cr2b,
- cr06,cr07,cr09,cr10,cr11,cr12,cr15,cr16,cr27,
- cr38,
- sr0d,sr18,sr19,
- gr0f,
- fi,pxclkdiv,vclkdiv,tmp,i;
+ u32 htotal, hdispend, hsyncstart, hsyncend, hblankstart, hblankend,
+ vtotal, vdispend, vsyncstart, vsyncend, vblankstart, vblankend,
+ cr00, cr01, cr02, cr03, cr04, cr05, cr2b,
+ cr06, cr07, cr09, cr10, cr11, cr12, cr15, cr16, cr27,
+ cr38, sr0d, sr18, sr19, gr0f, fi, pxclkdiv, vclkdiv, tmp, i;
struct modus {
- int xres; int yres; int vyres; int bpp; int pxclk;
- int left_margin; int right_margin; int upper_margin;
- int lower_margin; int hsync_len; int vsync_len;
- } modedb[5] = {
- { 0, 0, 8000, 0, 0, 0, 0, 0, 0, 0, 0},
- { 640, 480, 3756, 0, 0, -40, 24, 17, 0, 216, 3},
- { 800, 600, 3221, 0, 0, 96, 24, 14, 0, 136, 11},
- {1024, 768, 2815, 0, 0, 144, 24, 29, 0, 120, 3},
- {1280, 1024, 2662, 0, 0, 232, 16, 39, 0, 160, 3}
+ int xres; int vxres; int yres; int vyres;
+ int bpp; int pxclk;
+ int left_margin; int right_margin;
+ int upper_margin; int lower_margin;
+ int hsync_len; int vsync_len;
+ } modedb[5] = {
+ {
+ 0, 2048, 0, 4096, 0, 0, 0, 0, 0, 0, 0, 0}, {
+ 640, 2048, 480, 4096, 0, 0, -40, 24, 17, 0, 216, 3}, {
+ 800, 2048, 600, 4096, 0, 0, 96, 24, 14, 0, 136, 11}, {
+ 1024, 2048, 768, 4096, 0, 0, 144, 24, 29, 0, 120, 3}, {
+ 1280, 2048, 1024, 4096, 0, 0, 232, 16, 39, 0, 160, 3}
};
- outb(0x00,0x3d4); cr00=inb(0x3d5); outb(0x01,0x3d4); cr01=inb(0x3d5);
- outb(0x02,0x3d4); cr02=inb(0x3d5); outb(0x03,0x3d4); cr03=inb(0x3d5);
- outb(0x04,0x3d4); cr04=inb(0x3d5); outb(0x05,0x3d4); cr05=inb(0x3d5);
- outb(0x06,0x3d4); cr06=inb(0x3d5); outb(0x07,0x3d4); cr07=inb(0x3d5);
- outb(0x09,0x3d4); cr09=inb(0x3d5); outb(0x10,0x3d4); cr10=inb(0x3d5);
- outb(0x11,0x3d4); cr11=inb(0x3d5); outb(0x12,0x3d4); cr12=inb(0x3d5);
- outb(0x15,0x3d4); cr15=inb(0x3d5); outb(0x16,0x3d4); cr16=inb(0x3d5);
- outb(0x27,0x3d4); cr27=inb(0x3d5); outb(0x2b,0x3d4); cr2b=inb(0x3d5);
- outb(0x38,0x3d4); cr38=inb(0x3d5); outb(0x0b,0x3c4); inb(0x3c5);
- outb(0x0d,0x3c4); sr0d=inb(0x3c5); outb(0x18,0x3c4); sr18=inb(0x3c5);
- outb(0x19,0x3c4); sr19=inb(0x3c5); outb(0x0f,0x3ce); gr0f=inb(0x3cf);
-
- htotal = cr00 | (cr2b & 0x01) << 8;
- hdispend = cr01 | (cr2b & 0x02) << 7;
+ outb(0x00, 0x3d4); cr00 = inb(0x3d5);
+ outb(0x01, 0x3d4); cr01 = inb(0x3d5);
+ outb(0x02, 0x3d4); cr02 = inb(0x3d5);
+ outb(0x03, 0x3d4); cr03 = inb(0x3d5);
+ outb(0x04, 0x3d4); cr04 = inb(0x3d5);
+ outb(0x05, 0x3d4); cr05 = inb(0x3d5);
+ outb(0x06, 0x3d4); cr06 = inb(0x3d5);
+ outb(0x07, 0x3d4); cr07 = inb(0x3d5);
+ outb(0x09, 0x3d4); cr09 = inb(0x3d5);
+ outb(0x10, 0x3d4); cr10 = inb(0x3d5);
+ outb(0x11, 0x3d4); cr11 = inb(0x3d5);
+ outb(0x12, 0x3d4); cr12 = inb(0x3d5);
+ outb(0x15, 0x3d4); cr15 = inb(0x3d5);
+ outb(0x16, 0x3d4); cr16 = inb(0x3d5);
+ outb(0x27, 0x3d4); cr27 = inb(0x3d5);
+ outb(0x2b, 0x3d4); cr2b = inb(0x3d5);
+ outb(0x38, 0x3d4); cr38 = inb(0x3d5);
+
+ outb(0x0b, 0x3c4);
+ inb(0x3c5);
+
+ outb(0x0d, 0x3c4); sr0d = inb(0x3c5);
+ outb(0x18, 0x3c4); sr18 = inb(0x3c5);
+ outb(0x19, 0x3c4); sr19 = inb(0x3c5);
+ outb(0x0f, 0x3ce); gr0f = inb(0x3cf);
+
+ htotal = cr00 | (cr2b & 0x01) << 8;
+ hdispend = cr01 | (cr2b & 0x02) << 7;
hblankstart = cr02 | (cr2b & 0x10) << 4;
- hblankend = (cr03 & 0x1f) | (cr05 & 0x80) >> 2;
- hsyncstart = cr04 | (cr2b & 0x08) << 5;
- hsyncend = cr05 & 0x1f;
+ hblankend = (cr03 & 0x1f) | (cr05 & 0x80) >> 2;
+ hsyncstart = cr04 | (cr2b & 0x08) << 5;
+ hsyncend = cr05 & 0x1f;
modedb[0].xres = hblankstart * 8;
modedb[0].hsync_len = hsyncend * 8;
modedb[0].right_margin = hsyncstart * 8 - modedb[0].xres;
modedb[0].left_margin = (htotal + 5) * 8 - modedb[0].xres -
- modedb[0].right_margin - modedb[0].hsync_len;
-
- vtotal = cr06 | (cr07 & 0x01) << 8 | (cr07 & 0x20) << 4
- | (cr27 & 0x80) << 3;
- vdispend = cr12 | (cr07 & 0x02) << 7 | (cr07 & 0x40) << 3
- | (cr27 & 0x10) << 6;
- vsyncstart = cr10 | (cr07 & 0x04) << 6 | (cr07 & 0x80) << 2
- | (cr27 & 0x20) << 5;
- vsyncend = cr11 & 0x0f;
+ modedb[0].right_margin - modedb[0].hsync_len;
+
+ vtotal = cr06 | (cr07 & 0x01) << 8 | (cr07 & 0x20) << 4
+ | (cr27 & 0x80) << 3;
+ vdispend = cr12 | (cr07 & 0x02) << 7 | (cr07 & 0x40) << 3
+ | (cr27 & 0x10) << 6;
+ vsyncstart = cr10 | (cr07 & 0x04) << 6 | (cr07 & 0x80) << 2
+ | (cr27 & 0x20) << 5;
+ vsyncend = cr11 & 0x0f;
vblankstart = cr15 | (cr07 & 0x08) << 5 | (cr09 & 0x20) << 4
- | (cr27 & 0x40) << 4;
- vblankend = cr16;
+ | (cr27 & 0x40) << 4;
+ vblankend = cr16;
- modedb[0].yres = vdispend + 1;
- modedb[0].vsync_len = vsyncend;
+ modedb[0].yres = vdispend + 1;
+ modedb[0].vsync_len = vsyncend;
modedb[0].lower_margin = vsyncstart - modedb[0].yres;
modedb[0].upper_margin = vtotal - modedb[0].yres -
- modedb[0].lower_margin - modedb[0].vsync_len + 2;
+ modedb[0].lower_margin - modedb[0].vsync_len + 2;
tmp = cr38 & 0x3c;
modedb[0].bpp = tmp == 0 ? 8 : tmp == 4 ? 16 : tmp == 28 ? 24 :
- tmp == 8 ? 32 : 8;
+ tmp == 8 ? 32 : 8;
- fi = ((5864727*(sr18+8))/(((sr19&0x3f)+2)*(1<<((sr19&0xc0)>>6))))>>12;
+ fi = ((5864727 * (sr18 + 8)) /
+ (((sr19 & 0x3f) + 2) * (1 << ((sr19 & 0xc0) >> 6)))) >> 12;
pxclkdiv = ((gr0f & 0x08) >> 3 | (gr0f & 0x40) >> 5) + 1;
tmp = sr0d & 0x06;
vclkdiv = tmp == 0 ? 2 : tmp == 2 ? 4 : tmp == 4 ? 8 : 3; // * 2 !
@@ -1062,10 +1253,10 @@ static int __devinit getstartupmode(struct fb_info *info)
if (verbosity > 0)
output("detected startup mode: "
"fbset -g %d %d %d ??? %d -t %d %d %d %d %d %d %d\n",
- modedb[0].xres,modedb[0].yres,modedb[0].xres,
- modedb[0].bpp,modedb[0].pxclk,modedb[0].left_margin,
- modedb[0].right_margin,modedb[0].upper_margin,
- modedb[0].lower_margin,modedb[0].hsync_len,
+ modedb[0].xres, modedb[0].yres, modedb[0].xres,
+ modedb[0].bpp, modedb[0].pxclk, modedb[0].left_margin,
+ modedb[0].right_margin, modedb[0].upper_margin,
+ modedb[0].lower_margin, modedb[0].hsync_len,
modedb[0].vsync_len);
//
@@ -1073,36 +1264,39 @@ static int __devinit getstartupmode(struct fb_info *info)
// do not want to do it in another way!
//
- tryagain:
+ tryagain:
i = (mode == NULL) ? 0 :
- !strncmp(mode,"640x480",7) ? 1 :
- !strncmp(mode,"800x600",7) ? 2 :
- !strncmp(mode,"1024x768",8) ? 3 :
- !strncmp(mode,"1280x1024",9) ? 4 : 0;
+ !strncmp(mode, "640x480", 7) ? 1 :
+ !strncmp(mode, "800x600", 7) ? 2 :
+ !strncmp(mode, "1024x768", 8) ? 3 :
+ !strncmp(mode, "1280x1024", 9) ? 4 : 0;
ref = (ref < 50) ? 50 : (ref > 85) ? 85 : ref;
- if(i==0) {
+ if (i == 0) {
info->var.pixclock = modedb[i].pxclk;
info->var.bits_per_pixel = modedb[i].bpp;
} else {
info->var.pixclock = (100000000 /
- ((modedb[i].left_margin + modedb[i].xres +
- modedb[i].right_margin + modedb[i].hsync_len
- ) * (
- modedb[i].upper_margin + modedb[i].yres +
- modedb[i].lower_margin + modedb[i].vsync_len
- ) *
- ref / 10000
- ));
+ ((modedb[i].left_margin +
+ modedb[i].xres +
+ modedb[i].right_margin +
+ modedb[i].hsync_len) *
+ (modedb[i].upper_margin +
+ modedb[i].yres +
+ modedb[i].lower_margin +
+ modedb[i].vsync_len) * ref / 10000));
info->var.bits_per_pixel = bpp;
}
info->var.left_margin = modedb[i].left_margin;
info->var.right_margin = modedb[i].right_margin;
info->var.xres = modedb[i].xres;
- info->var.xres_virtual = modedb[i].xres;
+ if (!(modedb[i].yres == 1280 && modedb[i].bpp == 32))
+ info->var.xres_virtual = modedb[i].vxres;
+ else
+ info->var.xres_virtual = modedb[i].xres;
info->var.xoffset = 0;
info->var.hsync_len = modedb[i].hsync_len;
info->var.upper_margin = modedb[i].upper_margin;
@@ -1114,33 +1308,32 @@ static int __devinit getstartupmode(struct fb_info *info)
info->var.sync = 0;
info->var.vmode = FB_VMODE_NONINTERLACED;
- if(cyblafb_check_var(&info->var,info)) {
- // 640x480-8@75 should really never fail. One case would
+ if (cyblafb_check_var(&info->var, info)) {
+ // 640x480 - 8@75 should really never fail. One case would
// be fp == 1 and nativex < 640 ... give up then
- if(i==1 && bpp == 8 && ref == 75){
+ if (i == 1 && bpp == 8 && ref == 75) {
output("Can't find a valid mode :-(\n");
return -EINVAL;
}
// Our detected mode is unlikely to fail. If it does,
- // try 640x480-8@75 ...
- if(i==0) {
- mode="640x480";
- bpp=8;
- ref=75;
+ // try 640x480 - 8@75 ...
+ if (i == 0) {
+ mode = "640x480";
+ bpp = 8;
+ ref = 75;
output("Detected mode failed check_var! "
- "Trying 640x480-8@75\n");
+ "Trying 640x480 - 8@75\n");
goto tryagain;
}
// A specified video mode failed for some reason.
// Try the startup mode first
output("Specified mode '%s' failed check! "
- "Falling back to startup mode.\n",mode);
- mode=NULL;
+ "Falling back to startup mode.\n", mode);
+ mode = NULL;
goto tryagain;
}
return 0;
-
}
//========================================================
@@ -1160,21 +1353,28 @@ static unsigned int __devinit get_memsize(void)
else {
tmp = read3X4(CR1F) & 0x0F;
switch (tmp) {
- case 0x03: k = 1 * Mb; break;
- case 0x07: k = 2 * Mb; break;
- case 0x0F: k = 4 * Mb; break;
- case 0x04: k = 8 * Mb; break;
- default:
- k = 1 * Mb;
- output("Unknown memory size code %x in CR1F."
- " We default to 1 Mb for now, please"
- " do provide a memsize parameter!\n",
- tmp);
+ case 0x03:
+ k = 1 * 1024 * 1024;
+ break;
+ case 0x07:
+ k = 2 * 1024 * 1024;
+ break;
+ case 0x0F:
+ k = 4 * 1024 * 1024;
+ break;
+ case 0x04:
+ k = 8 * 1024 * 1024;
+ break;
+ default:
+ k = 1 * 1024 * 1024;
+ output("Unknown memory size code %x in CR1F."
+ " We default to 1 Mb for now, please"
+ " do provide a memsize parameter!\n", tmp);
}
}
if (verbosity > 0)
- output("framebuffer size = %d Kb\n",k/Kb);
+ output("framebuffer size = %d Kb\n", k / Kb);
return k;
}
@@ -1192,7 +1392,7 @@ static unsigned int __devinit get_displaytype(void)
return DISPLAY_FP;
if (crt)
return DISPLAY_CRT;
- return (read3CE(GR33) & 0x10)?DISPLAY_FP:DISPLAY_CRT;
+ return (read3CE(GR33) & 0x10) ? DISPLAY_FP : DISPLAY_CRT;
}
//=====================================
@@ -1203,7 +1403,7 @@ static unsigned int __devinit get_displaytype(void)
static int __devinit get_nativex(void)
{
- int x,y,tmp;
+ int x, y, tmp;
if (nativex)
return nativex;
@@ -1211,29 +1411,45 @@ static int __devinit get_nativex(void)
tmp = (read3CE(GR52) >> 4) & 3;
switch (tmp) {
- case 0: x = 1280; y = 1024; break;
- case 2: x = 1024; y = 768; break;
- case 3: x = 800; y = 600; break;
- case 4: x = 1400; y = 1050; break;
- case 1:
- default: x = 640; y = 480; break;
+ case 0: x = 1280; y = 1024;
+ break;
+ case 2: x = 1024; y = 768;
+ break;
+ case 3: x = 800; y = 600;
+ break;
+ case 4: x = 1400; y = 1050;
+ break;
+ case 1:
+ default:
+ x = 640; y = 480;
+ break;
}
if (verbosity > 0)
- output("%dx%d flat panel found\n",x,y);
+ output("%dx%d flat panel found\n", x, y);
return x;
}
-static int __devinit cybla_pci_probe(struct pci_dev * dev,
- const struct pci_device_id * id)
+static int __devinit cybla_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct fb_info *info;
struct cyblafb_par *par;
- info = framebuffer_alloc(sizeof(struct cyblafb_par),&dev->dev);
-
+ info = framebuffer_alloc(sizeof(struct cyblafb_par), &dev->dev);
if (!info)
- goto errout_alloc;
+ goto errout_alloc_info;
+
+ info->pixmap.addr = kzalloc(CYBLAFB_PIXMAPSIZE, GFP_KERNEL);
+ if (!info->pixmap.addr) {
+ output("allocation of pixmap buffer failed!\n");
+ goto errout_alloc_pixmap;
+ }
+ info->pixmap.size = CYBLAFB_PIXMAPSIZE - 4;
+ info->pixmap.buf_align = 4;
+ info->pixmap.access_align = 32;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+ info->pixmap.scan_align = 4;
par = info->par;
par->ops = cyblafb_ops;
@@ -1246,26 +1462,31 @@ static int __devinit cybla_pci_probe(struct pci_dev * dev,
output("could not enable device!\n");
goto errout_enable;
}
-
// might already be requested by vga console or vesafb,
// so we do care about success
- request_region(0x3c0,32,"cyblafb");
+ if (!request_region(0x3c0, 0x20, "cyblafb")) {
+ output("region 0x3c0/0x20 already reserved\n");
+ vesafb |= 1;
+ }
//
// Graphics Engine Registers
//
- request_region(GEBase,0x100,"cyblafb");
+ if (!request_region(GEBase, 0x100, "cyblafb")) {
+ output("region %#x/0x100 already reserved\n", GEBase);
+ vesafb |= 2;
+ }
regdump(par);
enable_mmio();
// setup MMIO region
- info->fix.mmio_start = pci_resource_start(dev,1);
+ info->fix.mmio_start = pci_resource_start(dev, 1);
info->fix.mmio_len = 0x20000;
if (!request_mem_region(info->fix.mmio_start,
- info->fix.mmio_len,"cyblafb")) {
+ info->fix.mmio_len, "cyblafb")) {
output("request_mem_region failed for mmio region!\n");
goto errout_mmio_reqmem;
}
@@ -1276,18 +1497,17 @@ static int __devinit cybla_pci_probe(struct pci_dev * dev,
output("ioremap failed for mmio region\n");
goto errout_mmio_remap;
}
-
// setup framebuffer memory ... might already be requested
// by vesafb. Not to fail in case of an unsuccessful request
- // is useful for the development cycle
- info->fix.smem_start = pci_resource_start(dev,0);
+ // is useful if both are loaded.
+ info->fix.smem_start = pci_resource_start(dev, 0);
info->fix.smem_len = get_memsize();
if (!request_mem_region(info->fix.smem_start,
- info->fix.smem_len,"cyblafb")) {
- output("request_mem_region failed for smem region!\n");
- if (!vesafb)
- goto errout_smem_req;
+ info->fix.smem_len, "cyblafb")) {
+ output("region %#lx/%#x already reserved\n",
+ info->fix.smem_start, info->fix.smem_len);
+ vesafb |= 4;
}
info->screen_base = ioremap_nocache(info->fix.smem_start,
@@ -1300,31 +1520,30 @@ static int __devinit cybla_pci_probe(struct pci_dev * dev,
displaytype = get_displaytype();
- if(displaytype == DISPLAY_FP)
+ if (displaytype == DISPLAY_FP)
nativex = get_nativex();
- //
- // FBINFO_HWACCEL_YWRAP .... does not work (could be made to work?)
- // FBINFO_PARTIAL_PAN_OK .... is not ok
- // FBINFO_READS_FAST .... is necessary for optimal scrolling
- //
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN
- | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT
- | FBINFO_HWACCEL_IMAGEBLIT | FBINFO_READS_FAST;
+ info->flags = FBINFO_DEFAULT
+ | FBINFO_HWACCEL_COPYAREA
+ | FBINFO_HWACCEL_FILLRECT
+ | FBINFO_HWACCEL_IMAGEBLIT
+ | FBINFO_READS_FAST
+// | FBINFO_PARTIAL_PAN_OK
+ | FBINFO_MISC_ALWAYS_SETPAR;
info->pseudo_palette = par->pseudo_pal;
- if(getstartupmode(info))
+ if (getstartupmode(info))
goto errout_findmode;
- fb_alloc_cmap(&info->cmap,256,0);
+ fb_alloc_cmap(&info->cmap, 256, 0);
if (register_framebuffer(info)) {
output("Could not register CyBla framebuffer\n");
goto errout_register;
}
- pci_set_drvdata(dev,info);
+ pci_set_drvdata(dev, info);
//
// normal exit and error paths
@@ -1332,23 +1551,24 @@ static int __devinit cybla_pci_probe(struct pci_dev * dev,
return 0;
- errout_register:
- errout_findmode:
+ errout_register:
+ errout_findmode:
iounmap(info->screen_base);
- errout_smem_remap:
- release_mem_region(info->fix.smem_start,
- info->fix.smem_len);
- errout_smem_req:
+ errout_smem_remap:
+ if (!(vesafb & 4))
+ release_mem_region(info->fix.smem_start, info->fix.smem_len);
iounmap(io_virt);
- errout_mmio_remap:
- release_mem_region(info->fix.mmio_start,
- info->fix.mmio_len);
- errout_mmio_reqmem:
-// release_region(0x3c0,32);
- errout_enable:
+ errout_mmio_remap:
+ release_mem_region(info->fix.mmio_start, info->fix.mmio_len);
+ errout_mmio_reqmem:
+ if (!(vesafb & 1))
+ release_region(0x3c0, 32);
+ errout_enable:
+ kfree(info->pixmap.addr);
+ errout_alloc_pixmap:
framebuffer_release(info);
- errout_alloc:
- output("CyblaFB version %s aborting init.\n",VERSION);
+ errout_alloc_info:
+ output("CyblaFB version %s aborting init.\n", VERSION);
return -ENODEV;
}
@@ -1359,35 +1579,41 @@ static void __devexit cybla_pci_remove(struct pci_dev *dev)
unregister_framebuffer(info);
iounmap(io_virt);
iounmap(info->screen_base);
- release_mem_region(info->fix.smem_start,info->fix.smem_len);
- release_mem_region(info->fix.mmio_start,info->fix.mmio_len);
+ if (!(vesafb & 4))
+ release_mem_region(info->fix.smem_start, info->fix.smem_len);
+ release_mem_region(info->fix.mmio_start, info->fix.mmio_len);
fb_dealloc_cmap(&info->cmap);
+ if (!(vesafb & 2))
+ release_region(GEBase, 0x100);
+ if (!(vesafb & 1))
+ release_region(0x3c0, 32);
+ kfree(info->pixmap.addr);
framebuffer_release(info);
- output("CyblaFB version %s normal exit.\n",VERSION);
+ output("CyblaFB version %s normal exit.\n", VERSION);
}
//
// List of boards that we are trying to support
//
static struct pci_device_id cybla_devices[] = {
- {PCI_VENDOR_ID_TRIDENT,CYBERBLADEi1,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
+ {PCI_VENDOR_ID_TRIDENT, CYBERBLADEi1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
-MODULE_DEVICE_TABLE(pci,cybla_devices);
+MODULE_DEVICE_TABLE(pci, cybla_devices);
static struct pci_driver cyblafb_pci_driver = {
- .name = "cyblafb",
- .id_table = cybla_devices,
- .probe = cybla_pci_probe,
- .remove = __devexit_p(cybla_pci_remove)
+ .name = "cyblafb",
+ .id_table = cybla_devices,
+ .probe = cybla_pci_probe,
+ .remove = __devexit_p(cybla_pci_remove)
};
//=============================================================
//
// kernel command line example:
//
-// video=cyblafb:1280x1024,bpp=16,ref=50 ...
+// video=cyblafb:1280x1024, bpp=16, ref=50 ...
//
// modprobe command line example:
//
@@ -1401,46 +1627,46 @@ static int __devinit cyblafb_init(void)
char *options = NULL;
char *opt;
- if (fb_get_options("cyblafb",&options))
+ if (fb_get_options("cyblafb", &options))
return -ENODEV;
if (options && *options)
- while((opt = strsep(&options,",")) != NULL ) {
- if (!*opt) continue;
- else if (!strncmp(opt,"bpp=",4))
- bpp = simple_strtoul(opt+4,NULL,0);
- else if (!strncmp(opt,"ref=",4))
- ref = simple_strtoul(opt+4,NULL,0);
- else if (!strncmp(opt,"fp",2))
+ while ((opt = strsep(&options, ",")) != NULL) {
+ if (!*opt)
+ continue;
+ else if (!strncmp(opt, "bpp=", 4))
+ bpp = simple_strtoul(opt + 4, NULL, 0);
+ else if (!strncmp(opt, "ref=", 4))
+ ref = simple_strtoul(opt + 4, NULL, 0);
+ else if (!strncmp(opt, "fp", 2))
displaytype = DISPLAY_FP;
- else if (!strncmp(opt,"crt",3))
+ else if (!strncmp(opt, "crt", 3))
displaytype = DISPLAY_CRT;
- else if (!strncmp(opt,"nativex=",8))
- nativex = simple_strtoul(opt+8,NULL,0);
- else if (!strncmp(opt,"center",6))
+ else if (!strncmp(opt, "nativex=", 8))
+ nativex = simple_strtoul(opt + 8, NULL, 0);
+ else if (!strncmp(opt, "center", 6))
center = 1;
- else if (!strncmp(opt,"stretch",7))
+ else if (!strncmp(opt, "stretch", 7))
stretch = 1;
- else if (!strncmp(opt,"pciwb=",6))
- pciwb = simple_strtoul(opt+6,NULL,0);
- else if (!strncmp(opt,"pcirb=",6))
- pcirb = simple_strtoul(opt+6,NULL,0);
- else if (!strncmp(opt,"pciwr=",6))
- pciwr = simple_strtoul(opt+6,NULL,0);
- else if (!strncmp(opt,"pcirr=",6))
- pcirr = simple_strtoul(opt+6,NULL,0);
- else if (!strncmp(opt,"memsize=",8))
- memsize = simple_strtoul(opt+8,NULL,0);
- else if (!strncmp(opt,"verbosity=",10))
- verbosity = simple_strtoul(opt+10,NULL,0);
- else if (!strncmp(opt,"vesafb",6))
- vesafb = 1;
+ else if (!strncmp(opt, "pciwb=", 6))
+ pciwb = simple_strtoul(opt + 6, NULL, 0);
+ else if (!strncmp(opt, "pcirb=", 6))
+ pcirb = simple_strtoul(opt + 6, NULL, 0);
+ else if (!strncmp(opt, "pciwr=", 6))
+ pciwr = simple_strtoul(opt + 6, NULL, 0);
+ else if (!strncmp(opt, "pcirr=", 6))
+ pcirr = simple_strtoul(opt + 6, NULL, 0);
+ else if (!strncmp(opt, "memsize=", 8))
+ memsize = simple_strtoul(opt + 8, NULL, 0);
+ else if (!strncmp(opt, "verbosity=", 10))
+ verbosity = simple_strtoul(opt + 10, NULL, 0);
else
mode = opt;
}
#endif
- output("CyblaFB version %s initializing\n",VERSION);
+ output("CyblaFB version %s initializing\n", VERSION);
return pci_module_init(&cyblafb_pci_driver);
+ return pci_register_driver(&cyblafb_pci_driver);
}
static void __exit cyblafb_exit(void)
diff --git a/drivers/video/fbcvt.c b/drivers/video/fbcvt.c
index 0b6af00d197..ac90883dc3a 100644
--- a/drivers/video/fbcvt.c
+++ b/drivers/video/fbcvt.c
@@ -214,12 +214,11 @@ static void fb_cvt_print_name(struct fb_cvt_data *cvt)
{
u32 pixcount, pixcount_mod;
int cnt = 255, offset = 0, read = 0;
- u8 *buf = kmalloc(256, GFP_KERNEL);
+ u8 *buf = kzalloc(256, GFP_KERNEL);
if (!buf)
return;
- memset(buf, 0, 256);
pixcount = (cvt->xres * (cvt->yres/cvt->interlace))/1000000;
pixcount_mod = (cvt->xres * (cvt->yres/cvt->interlace)) % 1000000;
pixcount_mod /= 1000;
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 10dfdf03526..32a9b69becc 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -589,17 +589,19 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
return info->fbops->fb_read(file, buf, count, ppos);
total_size = info->screen_size;
+
if (total_size == 0)
total_size = info->fix.smem_len;
if (p >= total_size)
- return 0;
+ return 0;
+
if (count >= total_size)
- count = total_size;
+ count = total_size;
+
if (count + p > total_size)
count = total_size - p;
- cnt = 0;
buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count,
GFP_KERNEL);
if (!buffer)
@@ -636,6 +638,7 @@ fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
}
kfree(buffer);
+
return (err) ? err : cnt;
}
@@ -648,7 +651,7 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
struct fb_info *info = registered_fb[fbidx];
u32 *buffer, *src;
u32 __iomem *dst;
- int c, i, cnt = 0, err;
+ int c, i, cnt = 0, err = 0;
unsigned long total_size;
if (!info || !info->screen_base)
@@ -661,19 +664,19 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
return info->fbops->fb_write(file, buf, count, ppos);
total_size = info->screen_size;
+
if (total_size == 0)
total_size = info->fix.smem_len;
if (p > total_size)
- return -ENOSPC;
+ return 0;
+
if (count >= total_size)
- count = total_size;
- err = 0;
- if (count + p > total_size) {
- count = total_size - p;
- err = -ENOSPC;
- }
- cnt = 0;
+ count = total_size;
+
+ if (count + p > total_size)
+ count = total_size - p;
+
buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count,
GFP_KERNEL);
if (!buffer)
@@ -687,12 +690,15 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
while (count) {
c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
src = buffer;
+
if (copy_from_user(src, buf, c)) {
err = -EFAULT;
break;
}
+
for (i = c >> 2; i--; )
fb_writel(*src++, dst++);
+
if (c & 3) {
u8 *src8 = (u8 *) src;
u8 __iomem *dst8 = (u8 __iomem *) dst;
@@ -702,11 +708,13 @@ fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
dst = (u32 __iomem *) dst8;
}
+
*ppos += c;
buf += c;
cnt += c;
count -= c;
}
+
kfree(buffer);
return (err) ? err : cnt;
@@ -1226,6 +1234,7 @@ fb_open(struct inode *inode, struct file *file)
return -ENODEV;
if (!try_module_get(info->fbops->owner))
return -ENODEV;
+ file->private_data = info;
if (info->fbops->fb_open) {
res = info->fbops->fb_open(info,1);
if (res)
@@ -1237,11 +1246,9 @@ fb_open(struct inode *inode, struct file *file)
static int
fb_release(struct inode *inode, struct file *file)
{
- int fbidx = iminor(inode);
- struct fb_info *info;
+ struct fb_info * const info = file->private_data;
lock_kernel();
- info = registered_fb[fbidx];
if (info->fbops->fb_release)
info->fbops->fb_release(info,1);
module_put(info->fbops->owner);
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index fc7965b6677..7c74e7325d9 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -317,26 +317,29 @@ static int edid_is_monitor_block(unsigned char *block)
static void calc_mode_timings(int xres, int yres, int refresh,
struct fb_videomode *mode)
{
- struct fb_var_screeninfo var;
- struct fb_info info;
+ struct fb_var_screeninfo *var;
- memset(&var, 0, sizeof(struct fb_var_screeninfo));
- var.xres = xres;
- var.yres = yres;
- fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON,
- refresh, &var, &info);
- mode->xres = xres;
- mode->yres = yres;
- mode->pixclock = var.pixclock;
- mode->refresh = refresh;
- mode->left_margin = var.left_margin;
- mode->right_margin = var.right_margin;
- mode->upper_margin = var.upper_margin;
- mode->lower_margin = var.lower_margin;
- mode->hsync_len = var.hsync_len;
- mode->vsync_len = var.vsync_len;
- mode->vmode = 0;
- mode->sync = 0;
+ var = kzalloc(sizeof(struct fb_var_screeninfo), GFP_KERNEL);
+
+ if (var) {
+ var->xres = xres;
+ var->yres = yres;
+ fb_get_mode(FB_VSYNCTIMINGS | FB_IGNOREMON,
+ refresh, var, NULL);
+ mode->xres = xres;
+ mode->yres = yres;
+ mode->pixclock = var->pixclock;
+ mode->refresh = refresh;
+ mode->left_margin = var->left_margin;
+ mode->right_margin = var->right_margin;
+ mode->upper_margin = var->upper_margin;
+ mode->lower_margin = var->lower_margin;
+ mode->hsync_len = var->hsync_len;
+ mode->vsync_len = var->vsync_len;
+ mode->vmode = 0;
+ mode->sync = 0;
+ kfree(var);
+ }
}
static int get_est_timing(unsigned char *block, struct fb_videomode *mode)
@@ -525,10 +528,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize)
unsigned char *block;
int num = 0, i;
- mode = kmalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
+ mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
if (mode == NULL)
return NULL;
- memset(mode, 0, 50 * sizeof(struct fb_videomode));
if (edid == NULL || !edid_checksum(edid) ||
!edid_check_header(edid)) {
@@ -1105,15 +1107,21 @@ static void fb_timings_dclk(struct __fb_timings *timings)
*/
int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_info *info)
{
- struct __fb_timings timings;
+ struct __fb_timings *timings;
u32 interlace = 1, dscan = 1;
- u32 hfmin, hfmax, vfmin, vfmax, dclkmin, dclkmax;
+ u32 hfmin, hfmax, vfmin, vfmax, dclkmin, dclkmax, err = 0;
+
+
+ timings = kzalloc(sizeof(struct __fb_timings), GFP_KERNEL);
+
+ if (!timings)
+ return -ENOMEM;
/*
* If monspecs are invalid, use values that are enough
* for 640x480@60
*/
- if (!info->monspecs.hfmax || !info->monspecs.vfmax ||
+ if (!info || !info->monspecs.hfmax || !info->monspecs.vfmax ||
!info->monspecs.dclkmax ||
info->monspecs.hfmax < info->monspecs.hfmin ||
info->monspecs.vfmax < info->monspecs.vfmin ||
@@ -1130,65 +1138,66 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
dclkmax = info->monspecs.dclkmax;
}
- memset(&timings, 0, sizeof(struct __fb_timings));
- timings.hactive = var->xres;
- timings.vactive = var->yres;
+ timings->hactive = var->xres;
+ timings->vactive = var->yres;
if (var->vmode & FB_VMODE_INTERLACED) {
- timings.vactive /= 2;
+ timings->vactive /= 2;
interlace = 2;
}
if (var->vmode & FB_VMODE_DOUBLE) {
- timings.vactive *= 2;
+ timings->vactive *= 2;
dscan = 2;
}
switch (flags & ~FB_IGNOREMON) {
case FB_MAXTIMINGS: /* maximize refresh rate */
- timings.hfreq = hfmax;
- fb_timings_hfreq(&timings);
- if (timings.vfreq > vfmax) {
- timings.vfreq = vfmax;
- fb_timings_vfreq(&timings);
+ timings->hfreq = hfmax;
+ fb_timings_hfreq(timings);
+ if (timings->vfreq > vfmax) {
+ timings->vfreq = vfmax;
+ fb_timings_vfreq(timings);
}
- if (timings.dclk > dclkmax) {
- timings.dclk = dclkmax;
- fb_timings_dclk(&timings);
+ if (timings->dclk > dclkmax) {
+ timings->dclk = dclkmax;
+ fb_timings_dclk(timings);
}
break;
case FB_VSYNCTIMINGS: /* vrefresh driven */
- timings.vfreq = val;
- fb_timings_vfreq(&timings);
+ timings->vfreq = val;
+ fb_timings_vfreq(timings);
break;
case FB_HSYNCTIMINGS: /* hsync driven */
- timings.hfreq = val;
- fb_timings_hfreq(&timings);
+ timings->hfreq = val;
+ fb_timings_hfreq(timings);
break;
case FB_DCLKTIMINGS: /* pixelclock driven */
- timings.dclk = PICOS2KHZ(val) * 1000;
- fb_timings_dclk(&timings);
+ timings->dclk = PICOS2KHZ(val) * 1000;
+ fb_timings_dclk(timings);
break;
default:
- return -EINVAL;
+ err = -EINVAL;
}
- if (!(flags & FB_IGNOREMON) &&
- (timings.vfreq < vfmin || timings.vfreq > vfmax ||
- timings.hfreq < hfmin || timings.hfreq > hfmax ||
- timings.dclk < dclkmin || timings.dclk > dclkmax))
- return -EINVAL;
-
- var->pixclock = KHZ2PICOS(timings.dclk/1000);
- var->hsync_len = (timings.htotal * 8)/100;
- var->right_margin = (timings.hblank/2) - var->hsync_len;
- var->left_margin = timings.hblank - var->right_margin - var->hsync_len;
-
- var->vsync_len = (3 * interlace)/dscan;
- var->lower_margin = (1 * interlace)/dscan;
- var->upper_margin = (timings.vblank * interlace)/dscan -
- (var->vsync_len + var->lower_margin);
+ if (err || (!(flags & FB_IGNOREMON) &&
+ (timings->vfreq < vfmin || timings->vfreq > vfmax ||
+ timings->hfreq < hfmin || timings->hfreq > hfmax ||
+ timings->dclk < dclkmin || timings->dclk > dclkmax))) {
+ err = -EINVAL;
+ } else {
+ var->pixclock = KHZ2PICOS(timings->dclk/1000);
+ var->hsync_len = (timings->htotal * 8)/100;
+ var->right_margin = (timings->hblank/2) - var->hsync_len;
+ var->left_margin = timings->hblank - var->right_margin -
+ var->hsync_len;
+ var->vsync_len = (3 * interlace)/dscan;
+ var->lower_margin = (1 * interlace)/dscan;
+ var->upper_margin = (timings->vblank * interlace)/dscan -
+ (var->vsync_len + var->lower_margin);
+ }
- return 0;
+ kfree(timings);
+ return err;
}
#else
int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 08dac9580d1..6d26057337e 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -43,10 +43,11 @@ struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
if (size)
fb_info_size += PADDING;
- p = kmalloc(fb_info_size + size, GFP_KERNEL);
+ p = kzalloc(fb_info_size + size, GFP_KERNEL);
+
if (!p)
return NULL;
- memset(p, 0, fb_info_size + size);
+
info = (struct fb_info *) p;
if (size)
@@ -106,8 +107,7 @@ static int mode_string(char *buf, unsigned int offset,
static ssize_t store_mode(struct class_device *class_device, const char * buf,
size_t count)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
char mstr[100];
struct fb_var_screeninfo var;
struct fb_modelist *modelist;
@@ -137,8 +137,7 @@ static ssize_t store_mode(struct class_device *class_device, const char * buf,
static ssize_t show_mode(struct class_device *class_device, char *buf)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
if (!fb_info->mode)
return 0;
@@ -149,8 +148,7 @@ static ssize_t show_mode(struct class_device *class_device, char *buf)
static ssize_t store_modes(struct class_device *class_device, const char * buf,
size_t count)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
LIST_HEAD(old_list);
int i = count / sizeof(struct fb_videomode);
@@ -174,8 +172,7 @@ static ssize_t store_modes(struct class_device *class_device, const char * buf,
static ssize_t show_modes(struct class_device *class_device, char *buf)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
unsigned int i;
struct list_head *pos;
struct fb_modelist *modelist;
@@ -193,8 +190,7 @@ static ssize_t show_modes(struct class_device *class_device, char *buf)
static ssize_t store_bpp(struct class_device *class_device, const char * buf,
size_t count)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
struct fb_var_screeninfo var;
char ** last = NULL;
int err;
@@ -208,8 +204,7 @@ static ssize_t store_bpp(struct class_device *class_device, const char * buf,
static ssize_t show_bpp(struct class_device *class_device, char *buf)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->var.bits_per_pixel);
}
@@ -280,8 +275,7 @@ static ssize_t show_con_rotate(struct class_device *class_device, char *buf)
static ssize_t store_virtual(struct class_device *class_device,
const char * buf, size_t count)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
struct fb_var_screeninfo var;
char *last = NULL;
int err;
@@ -300,16 +294,14 @@ static ssize_t store_virtual(struct class_device *class_device,
static ssize_t show_virtual(struct class_device *class_device, char *buf)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
return snprintf(buf, PAGE_SIZE, "%d,%d\n", fb_info->var.xres_virtual,
fb_info->var.yres_virtual);
}
static ssize_t show_stride(struct class_device *class_device, char *buf)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->fix.line_length);
}
@@ -320,7 +312,7 @@ static ssize_t show_stride(struct class_device *class_device, char *buf)
static ssize_t store_cmap(struct class_device *class_device, const char *buf,
size_t count)
{
- struct fb_info *fb_info = (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
int rc, i, start, length, transp = 0;
if ((count > PAGE_SIZE) || ((count % 16) != 0))
@@ -380,8 +372,7 @@ static ssize_t store_cmap(struct class_device *class_device, const char *buf,
static ssize_t show_cmap(struct class_device *class_device, char *buf)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
unsigned int i;
if (!fb_info->cmap.red || !fb_info->cmap.blue ||
@@ -405,8 +396,7 @@ static ssize_t show_cmap(struct class_device *class_device, char *buf)
static ssize_t store_blank(struct class_device *class_device, const char * buf,
size_t count)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
char *last = NULL;
int err;
@@ -422,41 +412,40 @@ static ssize_t store_blank(struct class_device *class_device, const char * buf,
static ssize_t show_blank(struct class_device *class_device, char *buf)
{
-// struct fb_info *fb_info = (struct fb_info *)class_get_devdata(class_device);
+// struct fb_info *fb_info = class_get_devdata(class_device);
return 0;
}
static ssize_t store_console(struct class_device *class_device,
const char * buf, size_t count)
{
-// struct fb_info *fb_info = (struct fb_info *)class_get_devdata(class_device);
+// struct fb_info *fb_info = class_get_devdata(class_device);
return 0;
}
static ssize_t show_console(struct class_device *class_device, char *buf)
{
-// struct fb_info *fb_info = (struct fb_info *)class_get_devdata(class_device);
+// struct fb_info *fb_info = class_get_devdata(class_device);
return 0;
}
static ssize_t store_cursor(struct class_device *class_device,
const char * buf, size_t count)
{
-// struct fb_info *fb_info = (struct fb_info *)class_get_devdata(class_device);
+// struct fb_info *fb_info = class_get_devdata(class_device);
return 0;
}
static ssize_t show_cursor(struct class_device *class_device, char *buf)
{
-// struct fb_info *fb_info = (struct fb_info *)class_get_devdata(class_device);
+// struct fb_info *fb_info = class_get_devdata(class_device);
return 0;
}
static ssize_t store_pan(struct class_device *class_device, const char * buf,
size_t count)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
struct fb_var_screeninfo var;
char *last = NULL;
int err;
@@ -479,19 +468,40 @@ static ssize_t store_pan(struct class_device *class_device, const char * buf,
static ssize_t show_pan(struct class_device *class_device, char *buf)
{
- struct fb_info *fb_info =
- (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
return snprintf(buf, PAGE_SIZE, "%d,%d\n", fb_info->var.xoffset,
fb_info->var.xoffset);
}
static ssize_t show_name(struct class_device *class_device, char *buf)
{
- struct fb_info *fb_info = (struct fb_info *)class_get_devdata(class_device);
+ struct fb_info *fb_info = class_get_devdata(class_device);
return snprintf(buf, PAGE_SIZE, "%s\n", fb_info->fix.id);
}
+static ssize_t store_fbstate(struct class_device *class_device,
+ const char *buf, size_t count)
+{
+ struct fb_info *fb_info = class_get_devdata(class_device);
+ u32 state;
+ char *last = NULL;
+
+ state = simple_strtoul(buf, &last, 0);
+
+ acquire_console_sem();
+ fb_set_suspend(fb_info, (int)state);
+ release_console_sem();
+
+ return count;
+}
+
+static ssize_t show_fbstate(struct class_device *class_device, char *buf)
+{
+ struct fb_info *fb_info = class_get_devdata(class_device);
+ return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->state);
+}
+
static struct class_device_attribute class_device_attrs[] = {
__ATTR(bits_per_pixel, S_IRUGO|S_IWUSR, show_bpp, store_bpp),
__ATTR(blank, S_IRUGO|S_IWUSR, show_blank, store_blank),
@@ -507,6 +517,7 @@ static struct class_device_attribute class_device_attrs[] = {
__ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
__ATTR(con_rotate, S_IRUGO|S_IWUSR, show_con_rotate, store_con_rotate),
__ATTR(con_rotate_all, S_IWUSR, NULL, store_con_rotate_all),
+ __ATTR(state, S_IRUGO|S_IWUSR, show_fbstate, store_fbstate),
};
int fb_init_class_device(struct fb_info *fb_info)
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index b37cea7d109..4e39035cf33 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -42,6 +42,7 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/ioport.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/vga.h>
@@ -107,7 +108,7 @@ static DEFINE_SPINLOCK(hga_reg_lock);
/* Framebuffer driver structures */
-static struct fb_var_screeninfo hga_default_var = {
+static struct fb_var_screeninfo __initdata hga_default_var = {
.xres = 720,
.yres = 348,
.xres_virtual = 720,
@@ -121,7 +122,7 @@ static struct fb_var_screeninfo hga_default_var = {
.width = -1,
};
-static struct fb_fix_screeninfo hga_fix = {
+static struct fb_fix_screeninfo __initdata hga_fix = {
.id = "HGA",
.type = FB_TYPE_PACKED_PIXELS, /* (not sure) */
.visual = FB_VISUAL_MONO10,
@@ -131,8 +132,6 @@ static struct fb_fix_screeninfo hga_fix = {
.accel = FB_ACCEL_NONE
};
-static struct fb_info fb_info;
-
/* Don't assume that tty1 will be the initial current console. */
static int release_io_port = 0;
static int release_io_ports = 0;
@@ -549,10 +548,9 @@ static struct fb_ops hgafb_ops = {
* Initialization
*/
-static int __init hgafb_init(void)
+static int __init hgafb_probe(struct device *device)
{
- if (fb_get_options("hgafb", NULL))
- return -ENODEV;
+ struct fb_info *info;
if (! hga_card_detect()) {
printk(KERN_INFO "hgafb: HGA card not detected.\n");
@@ -564,41 +562,95 @@ static int __init hgafb_init(void)
printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
hga_type_name, hga_vram_len/1024);
+ info = framebuffer_alloc(0, NULL);
+ if (!info) {
+ iounmap(hga_vram);
+ return -ENOMEM;
+ }
+
hga_fix.smem_start = (unsigned long)hga_vram;
hga_fix.smem_len = hga_vram_len;
- fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
- fb_info.var = hga_default_var;
- fb_info.fix = hga_fix;
- fb_info.monspecs.hfmin = 0;
- fb_info.monspecs.hfmax = 0;
- fb_info.monspecs.vfmin = 10000;
- fb_info.monspecs.vfmax = 10000;
- fb_info.monspecs.dpms = 0;
- fb_info.fbops = &hgafb_ops;
- fb_info.screen_base = hga_vram;
-
- if (register_framebuffer(&fb_info) < 0) {
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
+ info->var = hga_default_var;
+ info->fix = hga_fix;
+ info->monspecs.hfmin = 0;
+ info->monspecs.hfmax = 0;
+ info->monspecs.vfmin = 10000;
+ info->monspecs.vfmax = 10000;
+ info->monspecs.dpms = 0;
+ info->fbops = &hgafb_ops;
+ info->screen_base = hga_vram;
+
+ if (register_framebuffer(info) < 0) {
+ framebuffer_release(info);
iounmap(hga_vram);
return -EINVAL;
}
printk(KERN_INFO "fb%d: %s frame buffer device\n",
- fb_info.node, fb_info.fix.id);
+ info->node, info->fix.id);
+ dev_set_drvdata(device, info);
return 0;
}
-#ifdef MODULE
-static void __exit hgafb_exit(void)
+static int hgafb_remove(struct device *device)
{
+ struct fb_info *info = dev_get_drvdata(device);
+
hga_txt_mode();
hga_clear_screen();
- unregister_framebuffer(&fb_info);
+
+ if (info) {
+ unregister_framebuffer(info);
+ framebuffer_release(info);
+ }
+
iounmap(hga_vram);
- if (release_io_ports) release_region(0x3b0, 12);
- if (release_io_port) release_region(0x3bf, 1);
+
+ if (release_io_ports)
+ release_region(0x3b0, 12);
+
+ if (release_io_port)
+ release_region(0x3bf, 1);
+
+ return 0;
+}
+
+static struct device_driver hgafb_driver = {
+ .name = "hgafb",
+ .bus = &platform_bus_type,
+ .probe = hgafb_probe,
+ .remove = hgafb_remove,
+};
+
+static struct platform_device hgafb_device = {
+ .name = "hgafb",
+};
+
+static int __init hgafb_init(void)
+{
+ int ret;
+
+ if (fb_get_options("hgafb", NULL))
+ return -ENODEV;
+
+ ret = driver_register(&hgafb_driver);
+
+ if (!ret) {
+ ret = platform_device_register(&hgafb_device);
+ if (ret)
+ driver_unregister(&hgafb_driver);
+ }
+
+ return ret;
+}
+
+static void __exit hgafb_exit(void)
+{
+ platform_device_unregister(&hgafb_device);
+ driver_unregister(&hgafb_driver);
}
-#endif
/* -------------------------------------------------------------------------
*
@@ -613,7 +665,4 @@ MODULE_LICENSE("GPL");
module_param(nologo, bool, 0);
MODULE_PARM_DESC(nologo, "Disables startup logo if != 0 (default=0)");
module_init(hgafb_init);
-
-#ifdef MODULE
module_exit(hgafb_exit);
-#endif
diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/i810/i810-i2c.c
index c61bad0da20..bd410e06db7 100644
--- a/drivers/video/i810/i810-i2c.c
+++ b/drivers/video/i810/i810-i2c.c
@@ -17,6 +17,7 @@
#include <linux/fb.h>
#include "i810.h"
#include "i810_regs.h"
+#include "i810_main.h"
#include "../edid.h"
#define I810_DDC 0x50
@@ -42,7 +43,7 @@
static void i810i2c_setscl(void *data, int state)
{
- struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
+ struct i810fb_i2c_chan *chan = data;
struct i810fb_par *par = chan->par;
u8 __iomem *mmio = par->mmio_start_virtual;
diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
index 64cd1c827cf..76764ea3486 100644
--- a/drivers/video/i810/i810_accel.c
+++ b/drivers/video/i810/i810_accel.c
@@ -14,6 +14,7 @@
#include "i810_regs.h"
#include "i810.h"
+#include "i810_main.h"
static u32 i810fb_rop[] = {
COLOR_COPY_ROP, /* ROP_COPY */
@@ -57,7 +58,7 @@ static inline void i810_report_error(u8 __iomem *mmio)
*/
static inline int wait_for_space(struct fb_info *info, u32 space)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u32 head, count = WAIT_COUNT, tail;
u8 __iomem *mmio = par->mmio_start_virtual;
@@ -88,7 +89,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
*/
static inline int wait_for_engine_idle(struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
int count = WAIT_COUNT;
@@ -116,7 +117,7 @@ static inline int wait_for_engine_idle(struct fb_info *info)
*/
static inline u32 begin_iring(struct fb_info *info, u32 space)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
if (par->dev_flags & ALWAYS_SYNC)
wait_for_engine_idle(info);
@@ -161,7 +162,7 @@ static inline void source_copy_blit(int dwidth, int dheight, int dpitch,
int xdir, int src, int dest, int rop,
int blit_bpp, struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
if (begin_iring(info, 24 + IRING_PAD)) return;
@@ -195,7 +196,7 @@ static inline void color_blit(int width, int height, int pitch, int dest,
int rop, int what, int blit_bpp,
struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
if (begin_iring(info, 24 + IRING_PAD)) return;
@@ -236,7 +237,7 @@ static inline void mono_src_copy_imm_blit(int dwidth, int dheight, int dpitch,
int dest, const u32 *src, int bg,
int fg, struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
if (begin_iring(info, 24 + (dsize << 2) + IRING_PAD)) return;
@@ -254,7 +255,7 @@ static inline void mono_src_copy_imm_blit(int dwidth, int dheight, int dpitch,
static inline void load_front(int offset, struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
if (begin_iring(info, 8 + IRING_PAD)) return;
@@ -296,7 +297,7 @@ static inline void i810fb_iring_enable(struct i810fb_par *par, u32 mode)
void i810fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u32 dx, dy, width, height, dest, rop = 0, color = 0;
if (!info->var.accel_flags || par->dev_flags & LOCKUP ||
@@ -322,7 +323,7 @@ void i810fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void i810fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u32 sx, sy, dx, dy, pitch, width, height, src, dest, xdir;
if (!info->var.accel_flags || par->dev_flags & LOCKUP ||
@@ -361,7 +362,7 @@ void i810fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void i810fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u32 fg = 0, bg = 0, size, dst;
if (!info->var.accel_flags || par->dev_flags & LOCKUP ||
@@ -397,7 +398,7 @@ void i810fb_imageblit(struct fb_info *info, const struct fb_image *image)
int i810fb_sync(struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
if (!info->var.accel_flags || par->dev_flags & LOCKUP)
return 0;
@@ -407,7 +408,7 @@ int i810fb_sync(struct fb_info *info)
void i810fb_load_front(u32 offset, struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
if (!info->var.accel_flags || par->dev_flags & LOCKUP)
@@ -427,7 +428,7 @@ void i810fb_load_front(u32 offset, struct fb_info *info)
*/
void i810fb_init_ringbuffer(struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u32 tmp1, tmp2;
u8 __iomem *mmio = par->mmio_start_virtual;
diff --git a/drivers/video/i810/i810_gtf.c b/drivers/video/i810/i810_gtf.c
index 64f087a4466..9743d51e7f8 100644
--- a/drivers/video/i810/i810_gtf.c
+++ b/drivers/video/i810/i810_gtf.c
@@ -14,6 +14,7 @@
#include "i810_regs.h"
#include "i810.h"
+#include "i810_main.h"
/*
* FIFO and Watermark tables - based almost wholly on i810_wmark.c in
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index c0c974b1afa..266d0ab9266 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -42,20 +42,62 @@
#include <linux/pci_ids.h>
#include <linux/resource.h>
#include <linux/unistd.h>
+#include <linux/console.h>
#include <asm/io.h>
#include <asm/div64.h>
-
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
-
#include <asm/page.h>
#include "i810_regs.h"
#include "i810.h"
#include "i810_main.h"
+/*
+ * voffset - framebuffer offset in MiB from aperture start address. In order for
+ * the driver to work with X, we must try to use memory holes left untouched by X. The
+ * following table lists where X's different surfaces start at.
+ *
+ * ---------------------------------------------
+ * : : 64 MiB : 32 MiB :
+ * ----------------------------------------------
+ * : FrontBuffer : 0 : 0 :
+ * : DepthBuffer : 48 : 16 :
+ * : BackBuffer : 56 : 24 :
+ * ----------------------------------------------
+ *
+ * So for chipsets with 64 MiB Aperture sizes, 32 MiB for v_offset is okay, allowing up to
+ * 15 + 1 MiB of Framebuffer memory. For 32 MiB Aperture sizes, a v_offset of 8 MiB should
+ * work, allowing 7 + 1 MiB of Framebuffer memory.
+ * Note, the size of the hole may change depending on how much memory you allocate to X,
+ * and how the memory is split up between these surfaces.
+ *
+ * Note: Anytime the DepthBuffer or FrontBuffer is overlapped, X would still run but with
+ * DRI disabled. But if the Frontbuffer is overlapped, X will fail to load.
+ *
+ * Experiment with v_offset to find out which works best for you.
+ */
+static u32 v_offset_default __initdata; /* For 32 MiB Aper size, 8 should be the default */
+static u32 voffset __initdata = 0;
+
+static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor);
+static int __devinit i810fb_init_pci (struct pci_dev *dev,
+ const struct pci_device_id *entry);
+static void __exit i810fb_remove_pci(struct pci_dev *dev);
+static int i810fb_resume(struct pci_dev *dev);
+static int i810fb_suspend(struct pci_dev *dev, pm_message_t state);
+
+/* Chipset Specific Functions */
+static int i810fb_set_par (struct fb_info *info);
+static int i810fb_getcolreg (u8 regno, u8 *red, u8 *green, u8 *blue,
+ u8 *transp, struct fb_info *info);
+static int i810fb_setcolreg (unsigned regno, unsigned red, unsigned green, unsigned blue,
+ unsigned transp, struct fb_info *info);
+static int i810fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info);
+static int i810fb_blank (int blank_mode, struct fb_info *info);
+
+/* Initialization */
+static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
+
/* PCI */
static const char *i810_pci_list[] __devinitdata = {
"Intel(R) 810 Framebuffer Device" ,
@@ -776,7 +818,7 @@ static void i810_load_cursor_image(int width, int height, u8 *data,
static void i810_load_cursor_colors(int fg, int bg, struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
u8 red, green, blue, trans, temp;
@@ -949,7 +991,7 @@ static void set_color_bitfields(struct fb_var_screeninfo *var)
static int i810_check_params(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
int line_length, vidmem, mode_valid = 0, retval = 0;
u32 vyres = var->yres_virtual, vxres = var->xres_virtual;
/*
@@ -1043,7 +1085,7 @@ static int i810_check_params(struct fb_var_screeninfo *var,
*/
static int encode_fix(struct fb_fix_screeninfo *fix, struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
memset(fix, 0, sizeof(struct fb_fix_screeninfo));
@@ -1154,7 +1196,7 @@ static void decode_var(const struct fb_var_screeninfo *var,
static int i810fb_getcolreg(u8 regno, u8 *red, u8 *green, u8 *blue,
u8 *transp, struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
u8 temp;
@@ -1193,7 +1235,7 @@ static int i810fb_getcolreg(u8 regno, u8 *red, u8 *green, u8 *blue,
static int i810fb_open(struct fb_info *info, int user)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u32 count = atomic_read(&par->use_count);
if (count == 0) {
@@ -1212,7 +1254,7 @@ static int i810fb_open(struct fb_info *info, int user)
static int i810fb_release(struct fb_info *info, int user)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u32 count;
count = atomic_read(&par->use_count);
@@ -1234,7 +1276,7 @@ static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
u8 temp;
int i;
@@ -1328,7 +1370,7 @@ static int i810fb_setcolreg(unsigned regno, unsigned red, unsigned green,
static int i810fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u32 total;
total = var->xoffset * par->depth +
@@ -1340,7 +1382,7 @@ static int i810fb_pan_display(struct fb_var_screeninfo *var,
static int i810fb_blank (int blank_mode, struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
int mode = 0, pwr, scr_off = 0;
@@ -1385,7 +1427,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info)
static int i810fb_set_par(struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
decode_var(&info->var, par);
i810_load_regs(par);
@@ -1429,7 +1471,7 @@ static int i810fb_check_var(struct fb_var_screeninfo *var,
static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
- struct i810fb_par *par = (struct i810fb_par *)info->par;
+ struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
if (!par->dev_flags & LOCKUP)
@@ -1516,36 +1558,29 @@ static struct fb_ops i810fb_ops __devinitdata = {
static int i810fb_suspend(struct pci_dev *dev, pm_message_t state)
{
struct fb_info *info = pci_get_drvdata(dev);
- struct i810fb_par *par = (struct i810fb_par *) info->par;
- int blank = 0, prev_state = par->cur_state;
-
- if (state.event == prev_state)
- return 0;
+ struct i810fb_par *par = info->par;
par->cur_state = state.event;
- switch (state.event) {
- case 1:
- blank = VESA_VSYNC_SUSPEND;
- break;
- case 2:
- blank = VESA_HSYNC_SUSPEND;
- break;
- case 3:
- blank = VESA_POWERDOWN;
- break;
- default:
- return -EINVAL;
+ if (state.event == PM_EVENT_FREEZE) {
+ dev->dev.power.power_state = state;
+ return 0;
}
- info->fbops->fb_blank(blank, info);
- if (!prev_state) {
- agp_unbind_memory(par->i810_gtt.i810_fb_memory);
- agp_unbind_memory(par->i810_gtt.i810_cursor_memory);
- pci_disable_device(dev);
- }
+ acquire_console_sem();
+ fb_set_suspend(info, 1);
+
+ if (info->fbops->fb_sync)
+ info->fbops->fb_sync(info);
+
+ i810fb_blank(FB_BLANK_POWERDOWN, info);
+ agp_unbind_memory(par->i810_gtt.i810_fb_memory);
+ agp_unbind_memory(par->i810_gtt.i810_cursor_memory);
+
pci_save_state(dev);
+ pci_disable_device(dev);
pci_set_power_state(dev, pci_choose_state(dev, state));
+ release_console_sem();
return 0;
}
@@ -1553,23 +1588,29 @@ static int i810fb_suspend(struct pci_dev *dev, pm_message_t state)
static int i810fb_resume(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
+ int cur_state = par->cur_state;
+
+ par->cur_state = PM_EVENT_ON;
- if (par->cur_state == 0)
+ if (cur_state == PM_EVENT_FREEZE) {
+ pci_set_power_state(dev, PCI_D0);
return 0;
+ }
- pci_restore_state(dev);
+ acquire_console_sem();
pci_set_power_state(dev, PCI_D0);
+ pci_restore_state(dev);
pci_enable_device(dev);
+ pci_set_master(dev);
agp_bind_memory(par->i810_gtt.i810_fb_memory,
par->fb.offset);
agp_bind_memory(par->i810_gtt.i810_cursor_memory,
par->cursor_heap.offset);
-
+ i810fb_set_par(info);
+ fb_set_suspend (info, 0);
info->fbops->fb_blank(VESA_NO_BLANKING, info);
-
- par->cur_state = 0;
-
+ release_console_sem();
return 0;
}
/***********************************************************************
@@ -1610,7 +1651,7 @@ static void __devinit i810_fix_offsets(struct i810fb_par *par)
static int __devinit i810_alloc_agp_mem(struct fb_info *info)
{
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
int size;
struct agp_bridge_data *bridge;
@@ -2074,7 +2115,7 @@ static void i810fb_release_resource(struct fb_info *info,
static void __exit i810fb_remove_pci(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
- struct i810fb_par *par = (struct i810fb_par *) info->par;
+ struct i810fb_par *par = info->par;
unregister_framebuffer(info);
i810fb_release_resource(info, par);
diff --git a/drivers/video/i810/i810_main.h b/drivers/video/i810/i810_main.h
index 06072a6466f..51d4f3d4116 100644
--- a/drivers/video/i810/i810_main.h
+++ b/drivers/video/i810/i810_main.h
@@ -14,55 +14,6 @@
#ifndef __I810_MAIN_H__
#define __I810_MAIN_H__
-static int __devinit i810fb_init_pci (struct pci_dev *dev,
- const struct pci_device_id *entry);
-static void __exit i810fb_remove_pci(struct pci_dev *dev);
-static int i810fb_resume(struct pci_dev *dev);
-static int i810fb_suspend(struct pci_dev *dev, pm_message_t state);
-
-/*
- * voffset - framebuffer offset in MiB from aperture start address. In order for
- * the driver to work with X, we must try to use memory holes left untouched by X. The
- * following table lists where X's different surfaces start at.
- *
- * ---------------------------------------------
- * : : 64 MiB : 32 MiB :
- * ----------------------------------------------
- * : FrontBuffer : 0 : 0 :
- * : DepthBuffer : 48 : 16 :
- * : BackBuffer : 56 : 24 :
- * ----------------------------------------------
- *
- * So for chipsets with 64 MiB Aperture sizes, 32 MiB for v_offset is okay, allowing up to
- * 15 + 1 MiB of Framebuffer memory. For 32 MiB Aperture sizes, a v_offset of 8 MiB should
- * work, allowing 7 + 1 MiB of Framebuffer memory.
- * Note, the size of the hole may change depending on how much memory you allocate to X,
- * and how the memory is split up between these surfaces.
- *
- * Note: Anytime the DepthBuffer or FrontBuffer is overlapped, X would still run but with
- * DRI disabled. But if the Frontbuffer is overlapped, X will fail to load.
- *
- * Experiment with v_offset to find out which works best for you.
- */
-static u32 v_offset_default __initdata; /* For 32 MiB Aper size, 8 should be the default */
-static u32 voffset __initdata = 0;
-
-static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor);
-
-/* Chipset Specific Functions */
-static int i810fb_set_par (struct fb_info *info);
-static int i810fb_getcolreg (u8 regno, u8 *red, u8 *green, u8 *blue,
- u8 *transp, struct fb_info *info);
-static int i810fb_setcolreg (unsigned regno, unsigned red, unsigned green, unsigned blue,
- unsigned transp, struct fb_info *info);
-static int i810fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info);
-static int i810fb_blank (int blank_mode, struct fb_info *info);
-
-/* Initialization */
-static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
-extern int __init agp_intel_init(void);
-
-
/* Video Timings */
extern void round_off_xres (u32 *xres);
extern void round_off_yres (u32 *xres, u32 *yres);
@@ -101,7 +52,7 @@ static inline void i810_delete_i2c_busses(struct i810fb_par *par) { }
/* Conditionals */
#ifdef CONFIG_X86
-inline void flush_cache(void)
+static inline void flush_cache(void)
{
asm volatile ("wbinvd":::"memory");
}
@@ -110,7 +61,9 @@ inline void flush_cache(void)
#endif
#ifdef CONFIG_MTRR
-#define KERNEL_HAS_MTRR 1
+
+#include <asm/mtrr.h>
+
static inline void __devinit set_mtrr(struct i810fb_par *par)
{
par->mtrr_reg = mtrr_add((u32) par->aperture.physical,
@@ -128,7 +81,6 @@ static inline void unset_mtrr(struct i810fb_par *par)
par->aperture.size);
}
#else
-#define KERNEL_HAS_MTRR 0
#define set_mtrr(x) printk("set_mtrr: MTRR is disabled in the kernel\n")
#define unset_mtrr(x) do { } while (0)
diff --git a/drivers/video/imsttfb.c b/drivers/video/imsttfb.c
index 7fbe24206b1..a5d813050db 100644
--- a/drivers/video/imsttfb.c
+++ b/drivers/video/imsttfb.c
@@ -323,6 +323,7 @@ struct imstt_par {
unsigned long cmap_regs_phys;
__u8 *cmap_regs;
__u32 ramdac;
+ __u32 palette[16];
};
enum {
@@ -657,7 +658,7 @@ set_imstt_regvals_tvp (struct imstt_par *par, u_int bpp)
static void
set_imstt_regvals (struct fb_info *info, u_int bpp)
{
- struct imstt_par *par = (struct imstt_par *) info->par;
+ struct imstt_par *par = info->par;
struct imstt_regvals *init = &par->init;
__u32 ctl, pitch, byteswap, scr;
@@ -749,7 +750,7 @@ set_imstt_regvals (struct fb_info *info, u_int bpp)
static inline void
set_offset (struct fb_var_screeninfo *var, struct fb_info *info)
{
- struct imstt_par *par = (struct imstt_par *) info->par;
+ struct imstt_par *par = info->par;
__u32 off = var->yoffset * (info->fix.line_length >> 3)
+ ((var->xoffset * (var->bits_per_pixel >> 3)) >> 3);
write_reg_le32(par->dc_regs, SSR, off);
@@ -863,7 +864,7 @@ imsttfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
static int
imsttfb_set_par(struct fb_info *info)
{
- struct imstt_par *par = (struct imstt_par *) info->par;
+ struct imstt_par *par = info->par;
if (!compute_imstt_regvals(par, info->var.xres, info->var.yres))
return -EINVAL;
@@ -881,7 +882,7 @@ static int
imsttfb_setcolreg (u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
- struct imstt_par *par = (struct imstt_par *) info->par;
+ struct imstt_par *par = info->par;
u_int bpp = info->var.bits_per_pixel;
if (regno > 255)
@@ -905,14 +906,17 @@ imsttfb_setcolreg (u_int regno, u_int red, u_int green, u_int blue,
if (regno < 16)
switch (bpp) {
case 16:
- ((u16 *)info->pseudo_palette)[regno] = (regno << (info->var.green.length == 5 ? 10 : 11)) | (regno << 5) | regno;
+ par->palette[regno] =
+ (regno << (info->var.green.length ==
+ 5 ? 10 : 11)) | (regno << 5) | regno;
break;
case 24:
- ((u32 *)info->pseudo_palette)[regno] = (regno << 16) | (regno << 8) | regno;
+ par->palette[regno] =
+ (regno << 16) | (regno << 8) | regno;
break;
case 32: {
int i = (regno << 8) | regno;
- ((u32 *)info->pseudo_palette)[regno] = (i << 16) | i;
+ par->palette[regno] = (i << 16) |i;
break;
}
}
@@ -935,7 +939,7 @@ imsttfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
static int
imsttfb_blank(int blank, struct fb_info *info)
{
- struct imstt_par *par = (struct imstt_par *) info->par;
+ struct imstt_par *par = info->par;
__u32 ctrl;
ctrl = read_reg_le32(par->dc_regs, STGCTL);
@@ -989,7 +993,7 @@ imsttfb_blank(int blank, struct fb_info *info)
static void
imsttfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct imstt_par *par = (struct imstt_par *) info->par;
+ struct imstt_par *par = info->par;
__u32 Bpp, line_pitch, bgc, dx, dy, width, height;
bgc = rect->color;
@@ -1033,7 +1037,7 @@ imsttfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
static void
imsttfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
- struct imstt_par *par = (struct imstt_par *) info->par;
+ struct imstt_par *par = info->par;
__u32 Bpp, line_pitch, fb_offset_old, fb_offset_new, sp, dp_octl;
__u32 cnt, bltctl, sx, sy, dx, dy, height, width;
@@ -1195,7 +1199,7 @@ imstt_set_cursor(struct imstt_par *par, struct fb_image *d, int on)
static int
imsttfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
- struct imstt_par *par = (struct imstt_par *) info->par;
+ struct imstt_par *par = info->par;
u32 flags = cursor->set, fg, bg, xx, yy;
if (cursor->dest == NULL && cursor->rop == ROP_XOR)
@@ -1266,7 +1270,7 @@ static int
imsttfb_ioctl(struct inode *inode, struct file *file, u_int cmd,
u_long arg, struct fb_info *info)
{
- struct imstt_par *par = (struct imstt_par *) info->par;
+ struct imstt_par *par = info->par;
void __user *argp = (void __user *)arg;
__u32 reg[2];
__u8 idx[2];
@@ -1350,7 +1354,7 @@ static struct fb_ops imsttfb_ops = {
static void __devinit
init_imstt(struct fb_info *info)
{
- struct imstt_par *par = (struct imstt_par *) info->par;
+ struct imstt_par *par = info->par;
__u32 i, tmp, *ip, *end;
tmp = read_reg_le32(par->dc_regs, PRC);
@@ -1413,7 +1417,7 @@ init_imstt(struct fb_info *info)
if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
|| !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
- kfree(info);
+ framebuffer_release(info);
return;
}
@@ -1449,7 +1453,7 @@ init_imstt(struct fb_info *info)
fb_alloc_cmap(&info->cmap, 0, 0);
if (register_framebuffer(info) < 0) {
- kfree(info);
+ framebuffer_release(info);
return;
}
@@ -1474,26 +1478,21 @@ imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
printk(KERN_ERR "imsttfb: no OF node for pci device\n");
#endif /* CONFIG_PPC_OF */
- size = sizeof(struct fb_info) + sizeof(struct imstt_par) +
- sizeof(u32) * 16;
-
- info = kmalloc(size, GFP_KERNEL);
+ info = framebuffer_alloc(sizeof(struct imstt_par), &pdev->dev);
if (!info) {
printk(KERN_ERR "imsttfb: Can't allocate memory\n");
return -ENOMEM;
}
- memset(info, 0, size);
-
- par = (struct imstt_par *) (info + 1);
+ par = info->par;
addr = pci_resource_start (pdev, 0);
size = pci_resource_len (pdev, 0);
if (!request_mem_region(addr, size, "imsttfb")) {
printk(KERN_ERR "imsttfb: Can't reserve memory region\n");
- kfree(info);
+ framebuffer_release(info);
return -ENODEV;
}
@@ -1516,14 +1515,13 @@ imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
info->fix.smem_start = addr;
- info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ? 0x400000 : 0x800000);
+ info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
+ 0x400000 : 0x800000);
info->fix.mmio_start = addr + 0x800000;
par->dc_regs = ioremap(addr + 0x800000, 0x1000);
par->cmap_regs_phys = addr + 0x840000;
par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
- info->par = par;
- info->pseudo_palette = (void *) (par + 1);
- info->device = &pdev->dev;
+ info->pseudo_palette = par->palette;
init_imstt(info);
pci_set_drvdata(pdev, info);
@@ -1534,7 +1532,7 @@ static void __devexit
imsttfb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
- struct imstt_par *par = (struct imstt_par *) info->par;
+ struct imstt_par *par = info->par;
int size = pci_resource_len(pdev, 0);
unregister_framebuffer(info);
@@ -1542,7 +1540,7 @@ imsttfb_remove(struct pci_dev *pdev)
iounmap(par->dc_regs);
iounmap(info->screen_base);
release_mem_region(info->fix.smem_start, size);
- kfree(info);
+ framebuffer_release(info);
}
#ifndef MODULE
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 5924cc225c9..1718baaeed2 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -554,7 +554,7 @@ static int __init imxfb_probe(struct platform_device *pdev)
inf = pdev->dev.platform_data;
if(!inf) {
- dev_err(dev,"No platform_data available\n");
+ dev_err(&pdev->dev,"No platform_data available\n");
return -ENOMEM;
}
@@ -579,7 +579,7 @@ static int __init imxfb_probe(struct platform_device *pdev)
if (!inf->fixed_screen_cpu) {
ret = imxfb_map_video_memory(info);
if (ret) {
- dev_err(dev, "Failed to allocate video RAM: %d\n", ret);
+ dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
ret = -ENOMEM;
goto failed_map;
}
@@ -608,7 +608,7 @@ static int __init imxfb_probe(struct platform_device *pdev)
imxfb_set_par(info);
ret = register_framebuffer(info);
if (ret < 0) {
- dev_err(dev, "failed to register framebuffer\n");
+ dev_err(&pdev->dev, "failed to register framebuffer\n");
goto failed_register;
}
diff --git a/drivers/video/kyro/STG4000InitDevice.c b/drivers/video/kyro/STG4000InitDevice.c
index 7e33cd307d4..ab5285a7f1d 100644
--- a/drivers/video/kyro/STG4000InitDevice.c
+++ b/drivers/video/kyro/STG4000InitDevice.c
@@ -15,6 +15,7 @@
#include <linux/pci.h>
#include "STG4000Reg.h"
+#include "STG4000Interface.h"
/* SDRAM fixed settings */
#define SDRAM_CFG_0 0x49A1
diff --git a/drivers/video/kyro/STG4000Interface.h b/drivers/video/kyro/STG4000Interface.h
index e75b3b4a4aa..b7c83d5dfb1 100644
--- a/drivers/video/kyro/STG4000Interface.h
+++ b/drivers/video/kyro/STG4000Interface.h
@@ -11,7 +11,8 @@
#ifndef _STG4000INTERFACE_H
#define _STG4000INTERFACE_H
-struct pci_dev;
+#include <linux/pci.h>
+#include <video/kyro.h>
/*
* Ramdac Setup
diff --git a/drivers/video/kyro/STG4000OverlayDevice.c b/drivers/video/kyro/STG4000OverlayDevice.c
index 2ae9bafacdd..a8c9713413e 100644
--- a/drivers/video/kyro/STG4000OverlayDevice.c
+++ b/drivers/video/kyro/STG4000OverlayDevice.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include "STG4000Reg.h"
+#include "STG4000Interface.h"
/* HW Defines */
diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c
index 5eb4d5c177b..bcd359b6d4f 100644
--- a/drivers/video/kyro/fbdev.c
+++ b/drivers/video/kyro/fbdev.c
@@ -73,8 +73,6 @@ static struct fb_var_screeninfo kyro_var __devinitdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct kyrofb_info *currentpar;
-
typedef struct {
STG4000REG __iomem *pSTGReg; /* Virtual address of PCI register region */
u32 ulNextFreeVidMem; /* Offset from start of vid mem to next free region */
@@ -309,7 +307,7 @@ enum {
/* Accessors */
static int kyro_dev_video_mode_set(struct fb_info *info)
{
- struct kyrofb_info *par = (struct kyrofb_info *)info->par;
+ struct kyrofb_info *par = info->par;
/* Turn off display */
StopVTG(deviceInfo.pSTGReg);
@@ -402,7 +400,7 @@ static inline unsigned long get_line_length(int x, int bpp)
static int kyrofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
- struct kyrofb_info *par = (struct kyrofb_info *)info->par;
+ struct kyrofb_info *par = info->par;
if (var->bits_per_pixel != 16 && var->bits_per_pixel != 32) {
printk(KERN_WARNING "kyrofb: depth not supported: %u\n", var->bits_per_pixel);
@@ -478,7 +476,7 @@ static int kyrofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
static int kyrofb_set_par(struct fb_info *info)
{
- struct kyrofb_info *par = (struct kyrofb_info *)info->par;
+ struct kyrofb_info *par = info->par;
unsigned long lineclock;
unsigned long frameclock;
@@ -536,20 +534,22 @@ static int kyrofb_set_par(struct fb_info *info)
static int kyrofb_setcolreg(u_int regno, u_int red, u_int green,
u_int blue, u_int transp, struct fb_info *info)
{
+ struct kyrofb_info *par = info->par;
+
if (regno > 255)
return 1; /* Invalid register */
if (regno < 16) {
switch (info->var.bits_per_pixel) {
case 16:
- ((u16*)(info->pseudo_palette))[regno] =
+ par->palette[regno] =
(red & 0xf800) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
break;
case 32:
red >>= 8; green >>= 8; blue >>= 8; transp >>= 8;
- ((u32*)(info->pseudo_palette))[regno] =
+ par->palette[regno] =
(transp << 24) | (red << 16) | (green << 8) | blue;
break;
}
@@ -675,6 +675,7 @@ static int __devinit kyrofb_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct fb_info *info;
+ struct kyrofb_info *currentpar;
unsigned long size;
int err;
@@ -683,14 +684,11 @@ static int __devinit kyrofb_probe(struct pci_dev *pdev,
return err;
}
- size = sizeof(struct fb_info) + sizeof(struct kyrofb_info) + 16 * sizeof(u32);
- info = kmalloc(size, GFP_KERNEL);
+ info = framebuffer_alloc(sizeof(struct kyrofb_info), &pdev->dev);
if (!info)
return -ENOMEM;
- memset(info, 0, size);
-
- currentpar = (struct kyrofb_info *)(info + 1);
+ currentpar = info->par;
kyro_fix.smem_start = pci_resource_start(pdev, 0);
kyro_fix.smem_len = pci_resource_len(pdev, 0);
@@ -716,8 +714,7 @@ static int __devinit kyrofb_probe(struct pci_dev *pdev,
info->fbops = &kyrofb_ops;
info->fix = kyro_fix;
- info->par = currentpar;
- info->pseudo_palette = (void *)(currentpar + 1);
+ info->pseudo_palette = currentpar->palette;
info->flags = FBINFO_DEFAULT;
SetCoreClockPLL(deviceInfo.pSTGReg, pdev);
@@ -741,7 +738,6 @@ static int __devinit kyrofb_probe(struct pci_dev *pdev,
fb_memset(info->screen_base, 0, size);
- info->device = &pdev->dev;
if (register_framebuffer(info) < 0)
goto out_unmap;
@@ -757,7 +753,7 @@ static int __devinit kyrofb_probe(struct pci_dev *pdev,
out_unmap:
iounmap(currentpar->regbase);
iounmap(info->screen_base);
- kfree(info);
+ framebuffer_release(info);
return -EINVAL;
}
@@ -765,7 +761,7 @@ out_unmap:
static void __devexit kyrofb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
- struct kyrofb_info *par = (struct kyrofb_info *)info->par;
+ struct kyrofb_info *par = info->par;
/* Reset the board */
StopVTG(deviceInfo.pSTGReg);
@@ -789,7 +785,7 @@ static void __devexit kyrofb_remove(struct pci_dev *pdev)
unregister_framebuffer(info);
pci_set_drvdata(pdev, NULL);
- kfree(info);
+ framebuffer_release(info);
}
static int __init kyrofb_init(void)
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index d0244c04af5..4ef5cd19609 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_LOGO_M32R_CLUT224) += logo_m32r_clut224.o
# How to generate logo's
-# Use logo-cfiles to retreive list of .c files to be built
+# Use logo-cfiles to retrieve list of .c files to be built
logo-cfiles = $(notdir $(patsubst %.$(2), %.c, \
$(wildcard $(srctree)/$(src)/*$(1).$(2))))
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index cfc748e9427..e6cbd9de944 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -609,18 +609,19 @@ void __init macfb_setup(char *options)
}
}
-void __init macfb_init(void)
+static int __init macfb_init(void)
{
int video_cmap_len, video_is_nubus = 0;
struct nubus_dev* ndev = NULL;
char *option = NULL;
+ int err;
if (fb_get_options("macfb", &option))
return -ENODEV;
macfb_setup(option);
if (!MACH_IS_MAC)
- return;
+ return -ENODEV;
/* There can only be one internal video controller anyway so
we're not too worried about this */
@@ -958,11 +959,11 @@ void __init macfb_init(void)
fb_alloc_cmap(&fb_info.cmap, video_cmap_len, 0);
- if (register_framebuffer(&fb_info) < 0)
- return;
-
- printk("fb%d: %s frame buffer device\n",
- fb_info.node, fb_info.fix.id);
+ err = register_framebuffer(&fb_info);
+ if (!err)
+ printk("fb%d: %s frame buffer device\n",
+ fb_info.node, fb_info.fix.id);
+ return err;
}
module_init(macfb_init);
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index a8c47ad2cdb..3a3e1804c56 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -50,8 +50,6 @@
#include <asm/mtrr.h>
#endif
-#include "../console/fbcon.h"
-
#if defined(CONFIG_PPC_PMAC)
#include <asm/prom.h>
#include <asm/pci-bridge.h>
@@ -351,8 +349,6 @@ struct matrox_bios {
} output;
};
-extern struct display fb_display[];
-
struct matrox_switch;
struct matroxfb_driver;
struct matroxfb_dh_fb_info;
diff --git a/drivers/video/matrox/matroxfb_g450.c b/drivers/video/matrox/matroxfb_g450.c
index 35008af7db7..c122d8743dd 100644
--- a/drivers/video/matrox/matroxfb_g450.c
+++ b/drivers/video/matrox/matroxfb_g450.c
@@ -20,6 +20,8 @@
#include <asm/uaccess.h>
#include <asm/div64.h>
+#include "matroxfb_g450.h"
+
/* Definition of the various controls */
struct mctl {
struct v4l2_queryctrl desc;
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index ad60bbb16cd..a1f2c5e8fc8 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -1288,18 +1288,13 @@ static int maven_detach_client(struct i2c_client* client) {
return 0;
}
-static int maven_command(struct i2c_client* client, unsigned int cmd, void* arg) {
- return -ENOIOCTLCMD; /* or -EINVAL, depends on who will call this */
-}
-
static struct i2c_driver maven_driver={
- .owner = THIS_MODULE,
- .name = "maven",
+ .driver = {
+ .name = "maven",
+ },
.id = I2C_DRIVERID_MGATVO,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = maven_attach_adapter,
.detach_client = maven_detach_client,
- .command = maven_command,
};
/* ************************** */
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c
index d9d3e9f6c08..455a46ce840 100644
--- a/drivers/video/matrox/matroxfb_misc.c
+++ b/drivers/video/matrox/matroxfb_misc.c
@@ -192,11 +192,8 @@ int matroxfb_vgaHWinit(WPMINFO struct my_timming* m) {
unsigned int wd;
unsigned int divider;
int i;
- int fwidth;
struct matrox_hw_state * const hw = &ACCESS_FBINFO(hw);
- fwidth = 8;
-
DBG(__FUNCTION__)
hw->SEQ[0] = 0x00;
@@ -235,10 +232,7 @@ int matroxfb_vgaHWinit(WPMINFO struct my_timming* m) {
hw->ATTR[16] = 0x41;
hw->ATTR[17] = 0xFF;
hw->ATTR[18] = 0x0F;
- if (fwidth == 9)
- hw->ATTR[19] = 0x08;
- else
- hw->ATTR[19] = 0x00;
+ hw->ATTR[19] = 0x00;
hw->ATTR[20] = 0x00;
hd = m->HDisplay >> 3;
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 8486e77872d..e18c9f98a40 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -485,7 +485,7 @@ static void vgaHWRestore(const struct fb_info *info,
*/
static inline int neo2200_sync(struct fb_info *info)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
int waitcycles;
while (readl(&par->neo2200->bltStat) & 1)
@@ -525,7 +525,7 @@ static inline void neo2200_wait_fifo(struct fb_info *info,
static inline void neo2200_accel_init(struct fb_info *info,
struct fb_var_screeninfo *var)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
Neo2200 __iomem *neo2200 = par->neo2200;
u32 bltMod, pitch;
@@ -560,7 +560,7 @@ static inline void neo2200_accel_init(struct fb_info *info,
static int
neofb_open(struct fb_info *info, int user)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
int cnt = atomic_read(&par->ref_count);
if (!cnt) {
@@ -575,7 +575,7 @@ neofb_open(struct fb_info *info, int user)
static int
neofb_release(struct fb_info *info, int user)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
int cnt = atomic_read(&par->ref_count);
if (!cnt)
@@ -590,7 +590,7 @@ neofb_release(struct fb_info *info, int user)
static int
neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
unsigned int pixclock = var->pixclock;
struct xtimings timings;
int memlen, vramlen;
@@ -757,7 +757,7 @@ neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
static int neofb_set_par(struct fb_info *info)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
struct xtimings timings;
unsigned char temp;
int i, clock_hi = 0;
@@ -1216,7 +1216,7 @@ static int neofb_set_par(struct fb_info *info)
static void neofb_update_start(struct fb_info *info,
struct fb_var_screeninfo *var)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
struct vgastate *state = &par->state;
int oldExtCRTDispAddr;
int Base;
@@ -1331,7 +1331,7 @@ static int neofb_blank(int blank_mode, struct fb_info *info)
* wms...Enable VESA DPMS compatible powerdown mode
* run "setterm -powersave powerdown" to take advantage
*/
- struct neofb_par *par = (struct neofb_par *)info->par;
+ struct neofb_par *par = info->par;
int seqflags, lcdflags, dpmsflags, reg;
switch (blank_mode) {
@@ -1404,7 +1404,7 @@ static int neofb_blank(int blank_mode, struct fb_info *info)
static void
neo2200_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
u_long dst, rop;
dst = rect->dx + rect->dy * info->var.xres_virtual;
@@ -1440,7 +1440,7 @@ static void
neo2200_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
u32 sx = area->sx, sy = area->sy, dx = area->dx, dy = area->dy;
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
u_long src, dst, bltCntl;
bltCntl = NEO_BC3_FIFO_EN | NEO_BC3_SKIP_MAPPING | 0x0C0000;
@@ -1472,7 +1472,7 @@ neo2200_copyarea(struct fb_info *info, const struct fb_copyarea *area)
static void
neo2200_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
int s_pitch = (image->width * image->depth + 7) >> 3;
int scan_align = info->pixmap.scan_align - 1;
int buf_align = info->pixmap.buf_align - 1;
@@ -1686,7 +1686,7 @@ static struct fb_videomode __devinitdata mode800x480 = {
static int __devinit neo_map_mmio(struct fb_info *info,
struct pci_dev *dev)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
DBG("neo_map_mmio");
@@ -1733,7 +1733,7 @@ static int __devinit neo_map_mmio(struct fb_info *info,
static void neo_unmap_mmio(struct fb_info *info)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
DBG("neo_unmap_mmio");
@@ -1796,7 +1796,7 @@ static void neo_unmap_video(struct fb_info *info)
#ifdef CONFIG_MTRR
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
mtrr_del(par->mtrr, info->fix.smem_start,
info->fix.smem_len);
@@ -1811,7 +1811,7 @@ static void neo_unmap_video(struct fb_info *info)
static int __devinit neo_scan_monitor(struct fb_info *info)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
unsigned char type, display;
int w;
@@ -1890,7 +1890,7 @@ static int __devinit neo_scan_monitor(struct fb_info *info)
static int __devinit neo_init_hw(struct fb_info *info)
{
- struct neofb_par *par = (struct neofb_par *) info->par;
+ struct neofb_par *par = info->par;
int videoRam = 896;
int maxClock = 65000;
int CursorMem = 1024;
@@ -2014,7 +2014,7 @@ static struct fb_info *__devinit neo_alloc_fb_info(struct pci_dev *dev, const st
struct fb_info *info;
struct neofb_par *par;
- info = framebuffer_alloc(sizeof(struct neofb_par) + sizeof(u32) * 256, &dev->dev);
+ info = framebuffer_alloc(sizeof(struct neofb_par), &dev->dev);
if (!info)
return NULL;
@@ -2081,7 +2081,7 @@ static struct fb_info *__devinit neo_alloc_fb_info(struct pci_dev *dev, const st
info->fix.accel = id->driver_data;
info->fbops = &neofb_ops;
- info->pseudo_palette = (void *) (par + 1);
+ info->pseudo_palette = par->palette;
return info;
}
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index b989358437b..99c3a8e6a23 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -52,6 +52,7 @@
#include <linux/pci.h>
#include "nv_type.h"
#include "nv_local.h"
+#include "nv_proto.h"
void NVLockUnlock(struct nvidia_par *par, int Lock)
{
@@ -848,7 +849,7 @@ void NVCalcStateExt(struct nvidia_par *par,
int width,
int hDisplaySize, int height, int dotClock, int flags)
{
- int pixelDepth, VClk;
+ int pixelDepth, VClk = 0;
/*
* Save mode parameters.
*/
@@ -938,15 +939,24 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
if (par->Architecture == NV_ARCH_04) {
NV_WR32(par->PFB, 0x0200, state->config);
- } else if ((par->Chipset & 0xfff0) == 0x0090) {
- for (i = 0; i < 15; i++) {
- NV_WR32(par->PFB, 0x0600 + (i * 0x10), 0);
- NV_WR32(par->PFB, 0x0604 + (i * 0x10), par->FbMapSize - 1);
- }
- } else {
+ } else if ((par->Architecture < NV_ARCH_40) ||
+ (par->Chipset & 0xfff0) == 0x0040) {
for (i = 0; i < 8; i++) {
NV_WR32(par->PFB, 0x0240 + (i * 0x10), 0);
- NV_WR32(par->PFB, 0x0244 + (i * 0x10), par->FbMapSize - 1);
+ NV_WR32(par->PFB, 0x0244 + (i * 0x10),
+ par->FbMapSize - 1);
+ }
+ } else {
+ int regions = 12;
+
+ if (((par->Chipset & 0xfff0) == 0x0090) ||
+ ((par->Chipset & 0xfff0) == 0x01D0) ||
+ ((par->Chipset & 0xfff0) == 0x0290))
+ regions = 15;
+ for(i = 0; i < regions; i++) {
+ NV_WR32(par->PFB, 0x0600 + (i * 0x10), 0);
+ NV_WR32(par->PFB, 0x0604 + (i * 0x10),
+ par->FbMapSize - 1);
}
}
@@ -1182,11 +1192,17 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF);
} else {
if (par->Architecture >= NV_ARCH_40) {
+ u32 tmp;
+
NV_WR32(par->PGRAPH, 0x0084, 0x401287c0);
NV_WR32(par->PGRAPH, 0x008C, 0x60de8051);
NV_WR32(par->PGRAPH, 0x0090, 0x00008000);
NV_WR32(par->PGRAPH, 0x0610, 0x00be3c5f);
+ tmp = NV_RD32(par->REGS, 0x1540) & 0xff;
+ for(i = 0; tmp && !(tmp & 1); tmp >>= 1, i++);
+ NV_WR32(par->PGRAPH, 0x5000, i);
+
if ((par->Chipset & 0xfff0) == 0x0040) {
NV_WR32(par->PGRAPH, 0x09b0,
0x83280fff);
@@ -1211,6 +1227,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
0xffff7fff);
break;
case 0x00C0:
+ case 0x0120:
NV_WR32(par->PGRAPH, 0x0828,
0x007596ff);
NV_WR32(par->PGRAPH, 0x082C,
@@ -1245,6 +1262,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
0x00100000);
break;
case 0x0090:
+ case 0x0290:
NV_WR32(par->PRAMDAC, 0x0608,
NV_RD32(par->PRAMDAC, 0x0608) |
0x00100000);
@@ -1310,14 +1328,44 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
}
}
- if ((par->Chipset & 0xfff0) == 0x0090) {
- for (i = 0; i < 60; i++)
- NV_WR32(par->PGRAPH, 0x0D00 + i,
- NV_RD32(par->PFB, 0x0600 + i));
+ if ((par->Architecture < NV_ARCH_40) ||
+ ((par->Chipset & 0xfff0) == 0x0040)) {
+ for (i = 0; i < 32; i++) {
+ NV_WR32(par->PGRAPH, 0x0900 + i*4,
+ NV_RD32(par->PFB, 0x0240 +i*4));
+ NV_WR32(par->PGRAPH, 0x6900 + i*4,
+ NV_RD32(par->PFB, 0x0240 +i*4));
+ }
} else {
- for (i = 0; i < 32; i++)
- NV_WR32(par->PGRAPH, 0x0900 + i,
- NV_RD32(par->PFB, 0x0240 + i));
+ if (((par->Chipset & 0xfff0) == 0x0090) ||
+ ((par->Chipset & 0xfff0) == 0x01D0) ||
+ ((par->Chipset & 0xfff0) == 0x0290)) {
+ for (i = 0; i < 60; i++) {
+ NV_WR32(par->PGRAPH,
+ 0x0D00 + i*4,
+ NV_RD32(par->PFB,
+ 0x0600 + i*4));
+ NV_WR32(par->PGRAPH,
+ 0x6900 + i*4,
+ NV_RD32(par->PFB,
+ 0x0600 + i*4));
+ }
+ } else {
+ for (i = 0; i < 48; i++) {
+ NV_WR32(par->PGRAPH,
+ 0x0900 + i*4,
+ NV_RD32(par->PFB,
+ 0x0600 + i*4));
+ if(((par->Chipset & 0xfff0)
+ != 0x0160) &&
+ ((par->Chipset & 0xfff0)
+ != 0x0220))
+ NV_WR32(par->PGRAPH,
+ 0x6900 + i*4,
+ NV_RD32(par->PFB,
+ 0x0600 + i*4));
+ }
+ }
}
if (par->Architecture >= NV_ARCH_40) {
@@ -1338,7 +1386,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
NV_WR32(par->PGRAPH, 0x0868,
par->FbMapSize - 1);
} else {
- if((par->Chipset & 0xfff0) == 0x0090) {
+ if ((par->Chipset & 0xfff0) == 0x0090 ||
+ (par->Chipset & 0xfff0) == 0x01D0 ||
+ (par->Chipset & 0xfff0) == 0x0290) {
NV_WR32(par->PGRAPH, 0x0DF0,
NV_RD32(par->PFB, 0x0200));
NV_WR32(par->PGRAPH, 0x0DF4,
diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/nvidia/nv_i2c.c
index 12f2884d3f0..bd9eca05e14 100644
--- a/drivers/video/nvidia/nv_i2c.c
+++ b/drivers/video/nvidia/nv_i2c.c
@@ -46,7 +46,7 @@ static void nvidia_gpio_setscl(void *data, int state)
static void nvidia_gpio_setsda(void *data, int state)
{
- struct nvidia_i2c_chan *chan = (struct nvidia_i2c_chan *)data;
+ struct nvidia_i2c_chan *chan = data;
struct nvidia_par *par = chan->par;
u32 val;
@@ -64,7 +64,7 @@ static void nvidia_gpio_setsda(void *data, int state)
static int nvidia_gpio_getscl(void *data)
{
- struct nvidia_i2c_chan *chan = (struct nvidia_i2c_chan *)data;
+ struct nvidia_i2c_chan *chan = data;
struct nvidia_par *par = chan->par;
u32 val = 0;
@@ -79,7 +79,7 @@ static int nvidia_gpio_getscl(void *data)
static int nvidia_gpio_getsda(void *data)
{
- struct nvidia_i2c_chan *chan = (struct nvidia_i2c_chan *)data;
+ struct nvidia_i2c_chan *chan = data;
struct nvidia_par *par = chan->par;
u32 val = 0;
@@ -136,13 +136,13 @@ void nvidia_create_i2c_busses(struct nvidia_par *par)
par->chan[2].par = par;
par->chan[0].ddc_base = 0x3e;
- nvidia_setup_i2c_bus(&par->chan[0], "BUS1");
+ nvidia_setup_i2c_bus(&par->chan[0], "nvidia #0");
par->chan[1].ddc_base = 0x36;
- nvidia_setup_i2c_bus(&par->chan[1], "BUS2");
+ nvidia_setup_i2c_bus(&par->chan[1], "nvidia #1");
par->chan[2].ddc_base = 0x50;
- nvidia_setup_i2c_bus(&par->chan[2], "BUS3");
+ nvidia_setup_i2c_bus(&par->chan[2], "nvidia #2");
}
void nvidia_delete_i2c_busses(struct nvidia_par *par)
diff --git a/drivers/video/nvidia/nv_proto.h b/drivers/video/nvidia/nv_proto.h
index 3353103e8b0..b149a690ee0 100644
--- a/drivers/video/nvidia/nv_proto.h
+++ b/drivers/video/nvidia/nv_proto.h
@@ -4,7 +4,7 @@
#define __NV_PROTO_H__
/* in nv_setup.c */
-void NVCommonSetup(struct fb_info *info);
+int NVCommonSetup(struct fb_info *info);
void NVWriteCrtc(struct nvidia_par *par, u8 index, u8 value);
u8 NVReadCrtc(struct nvidia_par *par, u8 index);
void NVWriteGr(struct nvidia_par *par, u8 index, u8 value);
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c
index 1f06a9f1bd0..a18a9aebf05 100644
--- a/drivers/video/nvidia/nv_setup.c
+++ b/drivers/video/nvidia/nv_setup.c
@@ -285,28 +285,34 @@ static void nv10GetConfig(struct nvidia_par *par)
par->CrystalFreqKHz = 27000;
}
- par->CursorStart = (par->RamAmountKBytes - 96) * 1024;
par->CURSOR = NULL; /* can't set this here */
par->MinVClockFreqKHz = 12000;
par->MaxVClockFreqKHz = par->twoStagePLL ? 400000 : 350000;
}
-void NVCommonSetup(struct fb_info *info)
+int NVCommonSetup(struct fb_info *info)
{
struct nvidia_par *par = info->par;
- struct fb_var_screeninfo var;
+ struct fb_var_screeninfo *var;
u16 implementation = par->Chipset & 0x0ff0;
u8 *edidA = NULL, *edidB = NULL;
- struct fb_monspecs monitorA, monitorB;
+ struct fb_monspecs *monitorA, *monitorB;
struct fb_monspecs *monA = NULL, *monB = NULL;
int mobile = 0;
int tvA = 0;
int tvB = 0;
int FlatPanel = -1; /* really means the CRTC is slaved */
int Television = 0;
+ int err = 0;
- memset(&monitorA, 0, sizeof(struct fb_monspecs));
- memset(&monitorB, 0, sizeof(struct fb_monspecs));
+ var = kzalloc(sizeof(struct fb_var_screeninfo), GFP_KERNEL);
+ monitorA = kzalloc(sizeof(struct fb_monspecs), GFP_KERNEL);
+ monitorB = kzalloc(sizeof(struct fb_monspecs), GFP_KERNEL);
+
+ if (!var || !monitorA || !monitorB) {
+ err = -ENOMEM;
+ goto done;
+ }
par->PRAMIN = par->REGS + (0x00710000 / 4);
par->PCRTC0 = par->REGS + (0x00600000 / 4);
@@ -382,6 +388,8 @@ void NVCommonSetup(struct fb_info *info)
case 0x0146:
case 0x0147:
case 0x0148:
+ case 0x0098:
+ case 0x0099:
mobile = 1;
break;
default:
@@ -406,9 +414,9 @@ void NVCommonSetup(struct fb_info *info)
par->CRTCnumber = 0;
if (nvidia_probe_i2c_connector(info, 1, &edidA))
nvidia_probe_of_connector(info, 1, &edidA);
- if (edidA && !fb_parse_edid(edidA, &var)) {
+ if (edidA && !fb_parse_edid(edidA, var)) {
printk("nvidiafb: EDID found from BUS1\n");
- monA = &monitorA;
+ monA = monitorA;
fb_edid_to_monspecs(edidA, monA);
FlatPanel = (monA->input & FB_DISP_DDI) ? 1 : 0;
@@ -494,17 +502,17 @@ void NVCommonSetup(struct fb_info *info)
if (nvidia_probe_i2c_connector(info, 1, &edidA))
nvidia_probe_of_connector(info, 1, &edidA);
- if (edidA && !fb_parse_edid(edidA, &var)) {
+ if (edidA && !fb_parse_edid(edidA, var)) {
printk("nvidiafb: EDID found from BUS1\n");
- monA = &monitorA;
+ monA = monitorA;
fb_edid_to_monspecs(edidA, monA);
}
if (nvidia_probe_i2c_connector(info, 2, &edidB))
nvidia_probe_of_connector(info, 2, &edidB);
- if (edidB && !fb_parse_edid(edidB, &var)) {
+ if (edidB && !fb_parse_edid(edidB, var)) {
printk("nvidiafb: EDID found from BUS2\n");
- monB = &monitorB;
+ monB = monitorB;
fb_edid_to_monspecs(edidB, monB);
}
@@ -639,4 +647,9 @@ void NVCommonSetup(struct fb_info *info)
kfree(edidA);
kfree(edidB);
+done:
+ kfree(var);
+ kfree(monitorA);
+ kfree(monitorB);
+ return err;
}
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index bee09c6e48f..dbcb8962e57 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -284,6 +284,16 @@ static struct pci_device_id nvidiafb_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_GEFORCE_6200,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_NVIDIA, PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_NVIDIA, 0x0252,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_NVIDIA, 0x0313,
@@ -418,6 +428,7 @@ static int noaccel __devinitdata = 0;
static int noscale __devinitdata = 0;
static int paneltweak __devinitdata = 0;
static int vram __devinitdata = 0;
+static int bpp __devinitdata = 8;
#ifdef CONFIG_MTRR
static int nomtrr __devinitdata = 0;
#endif
@@ -485,7 +496,7 @@ static int nvidia_backlight_levels[] = {
static int nvidia_set_backlight_enable(int on, int level, void *data)
{
- struct nvidia_par *par = (struct nvidia_par *)data;
+ struct nvidia_par *par = data;
u32 tmp_pcrt, tmp_pmc, fpcontrol;
tmp_pmc = NV_RD32(par->PMC, 0x10F0) & 0x0000FFFF;
@@ -1382,24 +1393,36 @@ static int __devinit nvidia_set_fbinfo(struct fb_info *info)
info->monspecs.modedb_len, &info->modelist);
fb_var_to_videomode(&modedb, &nvidiafb_default_var);
+ switch (bpp) {
+ case 0 ... 8:
+ bpp = 8;
+ break;
+ case 9 ... 16:
+ bpp = 16;
+ break;
+ default:
+ bpp = 32;
+ break;
+ }
+
if (specs->modedb != NULL) {
struct fb_videomode *modedb;
modedb = fb_find_best_display(specs, &info->modelist);
fb_videomode_to_var(&nvidiafb_default_var, modedb);
- nvidiafb_default_var.bits_per_pixel = 8;
+ nvidiafb_default_var.bits_per_pixel = bpp;
} else if (par->fpWidth && par->fpHeight) {
char buf[16];
memset(buf, 0, 16);
snprintf(buf, 15, "%dx%dMR", par->fpWidth, par->fpHeight);
fb_find_mode(&nvidiafb_default_var, info, buf, specs->modedb,
- specs->modedb_len, &modedb, 8);
+ specs->modedb_len, &modedb, bpp);
}
if (mode_option)
fb_find_mode(&nvidiafb_default_var, info, mode_option,
- specs->modedb, specs->modedb_len, &modedb, 8);
+ specs->modedb, specs->modedb_len, &modedb, bpp);
info->var = nvidiafb_default_var;
info->fix.visual = (info->var.bits_per_pixel == 8) ?
@@ -1448,11 +1471,34 @@ static int __devinit nvidia_set_fbinfo(struct fb_info *info)
return nvidiafb_check_var(&info->var, info);
}
-static u32 __devinit nvidia_get_arch(struct pci_dev *pd)
+static u32 __devinit nvidia_get_chipset(struct fb_info *info)
{
+ struct nvidia_par *par = info->par;
+ u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device;
+
+ printk("nvidiafb: PCI id - %x\n", id);
+ if ((id & 0xfff0) == 0x00f0) {
+ /* pci-e */
+ printk("nvidiafb: PCI-E card\n");
+ id = NV_RD32(par->REGS, 0x1800);
+
+ if ((id & 0x0000ffff) == 0x000010DE)
+ id = 0x10DE0000 | (id >> 16);
+ else if ((id & 0xffff0000) == 0xDE100000) /* wrong endian */
+ id = 0x10DE0000 | ((id << 8) & 0x0000ff00) |
+ ((id >> 8) & 0x000000ff);
+ }
+
+ printk("nvidiafb: Actual id - %x\n", id);
+ return id;
+}
+
+static u32 __devinit nvidia_get_arch(struct fb_info *info)
+{
+ struct nvidia_par *par = info->par;
u32 arch = 0;
- switch (pd->device & 0x0ff0) {
+ switch (par->Chipset & 0x0ff0) {
case 0x0100: /* GeForce 256 */
case 0x0110: /* GeForce2 MX */
case 0x0150: /* GeForce2 */
@@ -1485,6 +1531,8 @@ static u32 __devinit nvidia_get_arch(struct pci_dev *pd)
case 0x0210:
case 0x0220:
case 0x0230:
+ case 0x0290:
+ case 0x0390:
arch = NV_ARCH_40;
break;
case 0x0020: /* TNT, TNT2 */
@@ -1513,7 +1561,7 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
if (!info)
goto err_out;
- par = (struct nvidia_par *)info->par;
+ par = info->par;
par->pci_dev = pd;
info->pixmap.addr = kmalloc(8 * 1024, GFP_KERNEL);
@@ -1533,18 +1581,6 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
goto err_out_request;
}
- par->Architecture = nvidia_get_arch(pd);
-
- par->Chipset = (pd->vendor << 16) | pd->device;
- printk(KERN_INFO PFX "nVidia device/chipset %X\n", par->Chipset);
-
- if (par->Architecture == 0) {
- printk(KERN_ERR PFX "unknown NV_ARCH\n");
- goto err_out_free_base0;
- }
-
- sprintf(nvidiafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4);
-
par->FlatPanel = flatpanel;
if (flatpanel == 1)
printk(KERN_INFO PFX "flatpanel support enabled\n");
@@ -1570,7 +1606,19 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
goto err_out_free_base0;
}
- NVCommonSetup(info);
+ par->Chipset = nvidia_get_chipset(info);
+ printk(KERN_INFO PFX "nVidia device/chipset %X\n", par->Chipset);
+ par->Architecture = nvidia_get_arch(info);
+
+ if (par->Architecture == 0) {
+ printk(KERN_ERR PFX "unknown NV_ARCH\n");
+ goto err_out_arch;
+ }
+
+ sprintf(nvidiafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4);
+
+ if (NVCommonSetup(info))
+ goto err_out_arch;
par->FbAddress = nvidiafb_fix.smem_start;
par->FbMapSize = par->RamAmountKBytes * 1024;
@@ -1581,10 +1629,15 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
if (par->FbMapSize > 64 * 1024 * 1024)
par->FbMapSize = 64 * 1024 * 1024;
- par->FbUsableSize = par->FbMapSize - (128 * 1024);
+ if(par->Architecture >= NV_ARCH_40)
+ par->FbUsableSize = par->FbMapSize - (560 * 1024);
+ else
+ par->FbUsableSize = par->FbMapSize - (128 * 1024);
par->ScratchBufferSize = (par->Architecture < NV_ARCH_10) ? 8 * 1024 :
16 * 1024;
par->ScratchBufferStart = par->FbUsableSize - par->ScratchBufferSize;
+ par->CursorStart = par->FbUsableSize + (32 * 1024);
+
info->screen_base = ioremap(nvidiafb_fix.smem_start, par->FbMapSize);
info->screen_size = par->FbUsableSize;
nvidiafb_fix.smem_len = par->RamAmountKBytes * 1024;
@@ -1640,21 +1693,22 @@ static int __devinit nvidiafb_probe(struct pci_dev *pd,
NVTRACE_LEAVE();
return 0;
- err_out_iounmap_fb:
+err_out_iounmap_fb:
iounmap(info->screen_base);
- err_out_free_base1:
+err_out_free_base1:
fb_destroy_modedb(info->monspecs.modedb);
nvidia_delete_i2c_busses(par);
+err_out_arch:
iounmap(par->REGS);
- err_out_free_base0:
+err_out_free_base0:
pci_release_regions(pd);
- err_out_request:
+err_out_request:
pci_disable_device(pd);
- err_out_enable:
+err_out_enable:
kfree(info->pixmap.addr);
- err_out_kfree:
+err_out_kfree:
framebuffer_release(info);
- err_out:
+err_out:
return -ENODEV;
}
@@ -1729,6 +1783,8 @@ static int __devinit nvidiafb_setup(char *options)
#endif
} else if (!strncmp(this_opt, "fpdither:", 9)) {
fpdither = simple_strtol(this_opt+9, NULL, 0);
+ } else if (!strncmp(this_opt, "bpp:", 4)) {
+ bpp = simple_strtoul(this_opt+4, NULL, 0);
} else
mode_option = this_opt;
}
@@ -1804,6 +1860,11 @@ module_param(vram, int, 0);
MODULE_PARM_DESC(vram,
"amount of framebuffer memory to remap in MiB"
"(default=0 - remap entire memory)");
+module_param(mode_option, charp, 0);
+MODULE_PARM_DESC(mode_option, "Specify initial video mode");
+module_param(bpp, int, 0);
+MODULE_PARM_DESC(bpp, "pixel width in bits"
+ "(default=8)");
#ifdef CONFIG_MTRR
module_param(nomtrr, bool, 0);
MODULE_PARM_DESC(nomtrr, "Disables MTRR support (0 or 1=disabled) "
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 00d87f5bb7b..ad1434e3f22 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -223,6 +223,7 @@ static int offb_blank(int blank, struct fb_info *info)
int __init offb_init(void)
{
struct device_node *dp = NULL, *boot_disp = NULL;
+
#if defined(CONFIG_BOOTX_TEXT) && defined(CONFIG_PPC32)
struct device_node *macos_display = NULL;
#endif
@@ -234,60 +235,54 @@ int __init offb_init(void)
if (boot_infos != 0) {
unsigned long addr =
(unsigned long) boot_infos->dispDeviceBase;
+ u32 *addrp;
+ u64 daddr, dsize;
+ unsigned int flags;
+
/* find the device node corresponding to the macos display */
while ((dp = of_find_node_by_type(dp, "display"))) {
int i;
- /*
- * Grrr... It looks like the MacOS ATI driver
- * munges the assigned-addresses property (but
- * the AAPL,address value is OK).
- */
- if (strncmp(dp->name, "ATY,", 4) == 0
- && dp->n_addrs == 1) {
- unsigned int *ap =
- (unsigned int *) get_property(dp,
- "AAPL,address",
- NULL);
- if (ap != NULL) {
- dp->addrs[0].address = *ap;
- dp->addrs[0].size = 0x01000000;
- }
- }
/*
- * The LTPro on the Lombard powerbook has no addresses
- * on the display nodes, they are on their parent.
+ * Look for an AAPL,address property first.
*/
- if (dp->n_addrs == 0
- && device_is_compatible(dp, "ATY,264LTPro")) {
- int na;
- unsigned int *ap = (unsigned int *)
- get_property(dp, "AAPL,address", &na);
- if (ap != 0)
- for (na /= sizeof(unsigned int);
- na > 0; --na, ++ap)
- if (*ap <= addr
- && addr <
- *ap + 0x1000000)
- goto foundit;
+ unsigned int na;
+ unsigned int *ap =
+ (unsigned int *)get_property(dp, "AAPL,address",
+ &na);
+ if (ap != 0) {
+ for (na /= sizeof(unsigned int); na > 0;
+ --na, ++ap)
+ if (*ap <= addr &&
+ addr < *ap + 0x1000000) {
+ macos_display = dp;
+ goto foundit;
+ }
}
/*
* See if the display address is in one of the address
* ranges for this display.
*/
- for (i = 0; i < dp->n_addrs; ++i) {
- if (dp->addrs[i].address <= addr
- && addr <
- dp->addrs[i].address +
- dp->addrs[i].size)
+ i = 0;
+ for (;;) {
+ addrp = of_get_address(dp, i++, &dsize, &flags);
+ if (addrp == NULL)
break;
+ if (!(flags & IORESOURCE_MEM))
+ continue;
+ daddr = of_translate_address(dp, addrp);
+ if (daddr == OF_BAD_ADDR)
+ continue;
+ if (daddr <= addr && addr < (daddr + dsize)) {
+ macos_display = dp;
+ goto foundit;
+ }
}
- if (i < dp->n_addrs) {
- foundit:
+ foundit:
+ if (macos_display) {
printk(KERN_INFO "MacOS display is %s\n",
dp->full_name);
- macos_display = dp;
break;
}
}
@@ -326,8 +321,10 @@ static void __init offb_init_nodriver(struct device_node *dp)
int *pp, i;
unsigned int len;
int width = 640, height = 480, depth = 8, pitch;
- unsigned int rsize, *up;
- unsigned long address = 0;
+ unsigned int flags, rsize, *up;
+ u64 address = OF_BAD_ADDR;
+ u32 *addrp;
+ u64 asize;
if ((pp = (int *) get_property(dp, "depth", &len)) != NULL
&& len == sizeof(int))
@@ -363,7 +360,7 @@ static void __init offb_init_nodriver(struct device_node *dp)
break;
}
if (pdev) {
- for (i = 0; i < 6 && address == 0; i++) {
+ for (i = 0; i < 6 && address == OF_BAD_ADDR; i++) {
if ((pci_resource_flags(pdev, i) &
IORESOURCE_MEM) &&
(pci_resource_len(pdev, i) >= rsize))
@@ -374,27 +371,33 @@ static void __init offb_init_nodriver(struct device_node *dp)
}
#endif /* CONFIG_PCI */
- if (address == 0 &&
- (up = (unsigned *) get_property(dp, "address", &len)) != NULL &&
- len == sizeof(unsigned))
- address = (u_long) * up;
- if (address == 0) {
- for (i = 0; i < dp->n_addrs; ++i)
- if (dp->addrs[i].size >=
- pitch * height * depth / 8)
- break;
- if (i >= dp->n_addrs) {
+ /* This one is dodgy, we may drop it ... */
+ if (address == OF_BAD_ADDR &&
+ (up = (unsigned *) get_property(dp, "address", &len)) != NULL &&
+ len == sizeof(unsigned int))
+ address = (u64) * up;
+
+ if (address == OF_BAD_ADDR) {
+ for (i = 0; (addrp = of_get_address(dp, i, &asize, &flags))
+ != NULL; i++) {
+ if (!(flags & IORESOURCE_MEM))
+ continue;
+ if (asize >= pitch * height * depth / 8)
+ break;
+ }
+ if (addrp == NULL) {
printk(KERN_ERR
"no framebuffer address found for %s\n",
dp->full_name);
return;
}
-
- address = (u_long) dp->addrs[i].address;
-
-#ifdef CONFIG_PPC64
- address += ((struct pci_dn *)dp->data)->phb->pci_mem_offset;
-#endif
+ address = of_translate_address(dp, addrp);
+ if (address == OF_BAD_ADDR) {
+ printk(KERN_ERR
+ "can't translate framebuffer address for %s\n",
+ dp->full_name);
+ return;
+ }
/* kludge for valkyrie */
if (strcmp(dp->name, "valkyrie") == 0)
@@ -459,7 +462,9 @@ static void __init offb_init_fb(const char *name, const char *full_name,
par->cmap_type = cmap_unknown;
if (depth == 8) {
- /* XXX kludge for ati */
+
+ /* Palette hacks disabled for now */
+#if 0
if (dp && !strncmp(name, "ATY,Rage128", 11)) {
unsigned long regbase = dp->addrs[2].address;
par->cmap_adr = ioremap(regbase, 0x1FFF);
@@ -490,6 +495,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
par->cmap_adr = ioremap(regbase + 0x6000, 0x1000);
par->cmap_type = cmap_gxt2000;
}
+#endif
fix->visual = par->cmap_adr ? FB_VISUAL_PSEUDOCOLOR
: FB_VISUAL_STATIC_PSEUDOCOLOR;
} else
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index ca4082ae5a1..335e3746555 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -69,6 +69,8 @@ struct fb_info_platinum {
unsigned long total_vram;
int clktype;
int dactype;
+
+ struct resource rsrc_fb, rsrc_reg;
};
/*
@@ -97,9 +99,6 @@ static int platinum_var_to_par(struct fb_var_screeninfo *var,
* Interface used by the world
*/
-int platinumfb_init(void);
-int platinumfb_setup(char*);
-
static struct fb_ops platinumfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = platinumfb_check_var,
@@ -138,13 +137,15 @@ static int platinumfb_set_par (struct fb_info *info)
init = platinum_reg_init[pinfo->vmode-1];
- if (pinfo->vmode == 13 && pinfo->cmode > 0)
- offset = 0x10;
+ if ((pinfo->vmode == VMODE_832_624_75) && (pinfo->cmode > CMODE_8))
+ offset = 0x10;
+
info->screen_base = pinfo->frame_buffer + init->fb_offset + offset;
info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset;
info->fix.visual = (pinfo->cmode == CMODE_8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
- info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode) + offset;
+ info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode)
+ + offset;
printk("line_length: %x\n", info->fix.line_length);
return 0;
}
@@ -221,7 +222,9 @@ static int platinumfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
static inline int platinum_vram_reqd(int video_mode, int color_mode)
{
return vmode_attrs[video_mode-1].vres *
- (vmode_attrs[video_mode-1].hres * (1<<color_mode) + 0x20) +0x1000;
+ (vmode_attrs[video_mode-1].hres * (1<<color_mode) +
+ ((video_mode == VMODE_832_624_75) &&
+ (color_mode > CMODE_8)) ? 0x10 : 0x20) + 0x1000;
}
#define STORE_D2(a, d) { \
@@ -481,7 +484,7 @@ static int platinum_var_to_par(struct fb_var_screeninfo *var,
/*
* Parse user speficied options (`video=platinumfb:')
*/
-int __init platinumfb_setup(char *options)
+static int __init platinumfb_setup(char *options)
{
char *this_opt;
@@ -522,19 +525,15 @@ int __init platinumfb_setup(char *options)
#define invalidate_cache(addr)
#endif
-static int __devinit platinumfb_probe(struct of_device* odev, const struct of_device_id *match)
+static int __devinit platinumfb_probe(struct of_device* odev,
+ const struct of_device_id *match)
{
struct device_node *dp = odev->node;
struct fb_info *info;
struct fb_info_platinum *pinfo;
- unsigned long addr, size;
volatile __u8 *fbuffer;
- int i, bank0, bank1, bank2, bank3, rc;
+ int bank0, bank1, bank2, bank3, rc;
- if (dp->n_addrs != 2) {
- printk(KERN_ERR "expecting 2 address for platinum (got %d)", dp->n_addrs);
- return -ENXIO;
- }
printk(KERN_INFO "platinumfb: Found Apple Platinum video hardware\n");
info = framebuffer_alloc(sizeof(*pinfo), &odev->dev);
@@ -542,26 +541,39 @@ static int __devinit platinumfb_probe(struct of_device* odev, const struct of_de
return -ENOMEM;
pinfo = info->par;
- /* Map in frame buffer and registers */
- for (i = 0; i < dp->n_addrs; ++i) {
- addr = dp->addrs[i].address;
- size = dp->addrs[i].size;
- /* Let's assume we can request either all or nothing */
- if (!request_mem_region(addr, size, "platinumfb")) {
- framebuffer_release(info);
- return -ENXIO;
- }
- if (size >= 0x400000) {
- /* frame buffer - map only 4MB */
- pinfo->frame_buffer_phys = addr;
- pinfo->frame_buffer = __ioremap(addr, 0x400000, _PAGE_WRITETHRU);
- pinfo->base_frame_buffer = pinfo->frame_buffer;
- } else {
- /* registers */
- pinfo->platinum_regs_phys = addr;
- pinfo->platinum_regs = ioremap(addr, size);
- }
+ if (of_address_to_resource(dp, 0, &pinfo->rsrc_reg) ||
+ of_address_to_resource(dp, 1, &pinfo->rsrc_fb)) {
+ printk(KERN_ERR "platinumfb: Can't get resources\n");
+ framebuffer_release(info);
+ return -ENXIO;
}
+ if (!request_mem_region(pinfo->rsrc_reg.start,
+ pinfo->rsrc_reg.start -
+ pinfo->rsrc_reg.end + 1,
+ "platinumfb registers")) {
+ framebuffer_release(info);
+ return -ENXIO;
+ }
+ if (!request_mem_region(pinfo->rsrc_fb.start,
+ pinfo->rsrc_fb.start
+ - pinfo->rsrc_fb.end + 1,
+ "platinumfb framebuffer")) {
+ release_mem_region(pinfo->rsrc_reg.start,
+ pinfo->rsrc_reg.end -
+ pinfo->rsrc_reg.start + 1);
+ framebuffer_release(info);
+ return -ENXIO;
+ }
+
+ /* frame buffer - map only 4MB */
+ pinfo->frame_buffer_phys = pinfo->rsrc_fb.start;
+ pinfo->frame_buffer = __ioremap(pinfo->rsrc_fb.start, 0x400000,
+ _PAGE_WRITETHRU);
+ pinfo->base_frame_buffer = pinfo->frame_buffer;
+
+ /* registers */
+ pinfo->platinum_regs_phys = pinfo->rsrc_reg.start;
+ pinfo->platinum_regs = ioremap(pinfo->rsrc_reg.start, 0x1000);
pinfo->cmap_regs_phys = 0xf301b000; /* XXX not in prom? */
request_mem_region(pinfo->cmap_regs_phys, 0x1000, "platinumfb cmap");
@@ -624,18 +636,16 @@ static int __devexit platinumfb_remove(struct of_device* odev)
{
struct fb_info *info = dev_get_drvdata(&odev->dev);
struct fb_info_platinum *pinfo = info->par;
- struct device_node *dp = odev->node;
- unsigned long addr, size;
- int i;
unregister_framebuffer (info);
/* Unmap frame buffer and registers */
- for (i = 0; i < dp->n_addrs; ++i) {
- addr = dp->addrs[i].address;
- size = dp->addrs[i].size;
- release_mem_region(addr, size);
- }
+ release_mem_region(pinfo->rsrc_fb.start,
+ pinfo->rsrc_fb.end -
+ pinfo->rsrc_fb.start + 1);
+ release_mem_region(pinfo->rsrc_reg.start,
+ pinfo->rsrc_reg.end -
+ pinfo->rsrc_reg.start + 1);
iounmap(pinfo->frame_buffer);
iounmap(pinfo->platinum_regs);
release_mem_region(pinfo->cmap_regs_phys, 0x1000);
@@ -662,7 +672,7 @@ static struct of_platform_driver platinum_driver =
.remove = platinumfb_remove,
};
-int __init platinumfb_init(void)
+static int __init platinumfb_init(void)
{
#ifndef MODULE
char *option = NULL;
@@ -676,7 +686,7 @@ int __init platinumfb_init(void)
return 0;
}
-void __exit platinumfb_exit(void)
+static void __exit platinumfb_exit(void)
{
of_unregister_driver(&platinum_driver);
}
diff --git a/drivers/video/platinumfb.h b/drivers/video/platinumfb.h
index 2834fc1c344..f6bd77cafd1 100644
--- a/drivers/video/platinumfb.h
+++ b/drivers/video/platinumfb.h
@@ -158,7 +158,9 @@ static struct platinum_regvals platinum_reg_init_14 = {
/* 832x624, 75Hz (13) */
static struct platinum_regvals platinum_reg_init_13 = {
0x70,
- { 864, 1680, 3360 }, /* MacOS does 1680 instead of 1696 to fit 16bpp in 1MB */
+ { 864, 1680, 3344 }, /* MacOS does 1680 instead of 1696 to fit 16bpp in 1MB,
+ * and we use 3344 instead of 3360 to fit in 2Mb
+ */
{ 0xff0, 4, 0, 0, 0, 0, 0x299, 0,
0, 0x21e, 0x120, 0x10, 0x23f, 0x1f, 0x25, 0x37,
0x8a, 0x22a, 0x23e, 0x536, 0x534, 4, 9, 0x52,
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 0277ce031e5..5fe197943de 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -91,6 +91,7 @@ struct pm2fb_par
u32 mem_config; /* MemConfig reg at probe */
u32 mem_control; /* MemControl reg at probe */
u32 boot_address; /* BootAddress reg at probe */
+ u32 palette[16];
};
/*
@@ -674,7 +675,7 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
*/
static int pm2fb_set_par(struct fb_info *info)
{
- struct pm2fb_par *par = (struct pm2fb_par *) info->par;
+ struct pm2fb_par *par = info->par;
u32 pixclock;
u32 width, height, depth;
u32 hsstart, hsend, hbend, htotal;
@@ -854,7 +855,7 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
- struct pm2fb_par *par = (struct pm2fb_par *) info->par;
+ struct pm2fb_par *par = info->par;
if (regno >= info->cmap.len) /* no. of hw registers */
return 1;
@@ -929,7 +930,7 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
case 16:
case 24:
case 32:
- ((u32*)(info->pseudo_palette))[regno] = v;
+ par->palette[regno] = v;
break;
}
return 0;
@@ -955,7 +956,7 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
static int pm2fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct pm2fb_par *p = (struct pm2fb_par *) info->par;
+ struct pm2fb_par *p = info->par;
u32 base;
u32 depth;
u32 xres;
@@ -987,7 +988,7 @@ static int pm2fb_pan_display(struct fb_var_screeninfo *var,
*/
static int pm2fb_blank(int blank_mode, struct fb_info *info)
{
- struct pm2fb_par *par = (struct pm2fb_par *) info->par;
+ struct pm2fb_par *par = info->par;
u32 video = par->video;
DPRINTK("blank_mode %d\n", blank_mode);
@@ -1054,8 +1055,7 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
{
struct pm2fb_par *default_par;
struct fb_info *info;
- int size, err;
- int err_retval = -ENXIO;
+ int err, err_retval = -ENXIO;
err = pci_enable_device(pdev);
if ( err ) {
@@ -1063,11 +1063,10 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
return err;
}
- size = sizeof(struct pm2fb_par) + 256 * sizeof(u32);
- info = framebuffer_alloc(size, &pdev->dev);
+ info = framebuffer_alloc(sizeof(struct pm2fb_par), &pdev->dev);
if ( !info )
return -ENOMEM;
- default_par = (struct pm2fb_par *) info->par;
+ default_par = info->par;
switch (pdev->device) {
case PCI_DEVICE_ID_TI_TVP4020:
@@ -1171,7 +1170,7 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
info->fbops = &pm2fb_ops;
info->fix = pm2fb_fix;
- info->pseudo_palette = (void *)(default_par + 1);
+ info->pseudo_palette = default_par->palette;
info->flags = FBINFO_DEFAULT |
FBINFO_HWACCEL_YPAN;
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 3e9f96e9237..6c19ab6afb0 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -630,7 +630,7 @@ static void riva_load_video_mode(struct fb_info *info)
int bpp, width, hDisplaySize, hDisplay, hStart,
hEnd, hTotal, height, vDisplay, vStart, vEnd, vTotal, dotClock;
int hBlankStart, hBlankEnd, vBlankStart, vBlankEnd;
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
struct riva_regs newmode;
NVTRACE_ENTER();
@@ -925,7 +925,7 @@ riva_set_rop_solid(struct riva_par *par, int rop)
static void riva_setup_accel(struct fb_info *info)
{
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
RIVA_FIFO_FREE(par->riva, Clip, 2);
NV_WR32(&par->riva.Clip->TopLeft, 0, 0x0);
@@ -979,7 +979,7 @@ static int riva_get_cmap_len(const struct fb_var_screeninfo *var)
#ifdef CONFIG_PMAC_BACKLIGHT
static int riva_set_backlight_enable(int on, int level, void *data)
{
- struct riva_par *par = (struct riva_par *)data;
+ struct riva_par *par = data;
U032 tmp_pcrt, tmp_pmc;
tmp_pmc = par->riva.PMC[0x10F0/4] & 0x0000FFFF;
@@ -1008,7 +1008,7 @@ static int riva_set_backlight_level(int level, void *data)
static int rivafb_open(struct fb_info *info, int user)
{
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
int cnt = atomic_read(&par->ref_count);
NVTRACE_ENTER();
@@ -1034,7 +1034,7 @@ static int rivafb_open(struct fb_info *info, int user)
static int rivafb_release(struct fb_info *info, int user)
{
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
int cnt = atomic_read(&par->ref_count);
NVTRACE_ENTER();
@@ -1057,7 +1057,7 @@ static int rivafb_release(struct fb_info *info, int user)
static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct fb_videomode *mode;
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
int nom, den; /* translating from pixels->bytes */
int mode_valid = 0;
@@ -1166,7 +1166,7 @@ static int rivafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
static int rivafb_set_par(struct fb_info *info)
{
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
NVTRACE_ENTER();
/* vgaHWunlock() + riva unlock (0x7F) */
@@ -1205,43 +1205,19 @@ static int rivafb_set_par(struct fb_info *info)
static int rivafb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct riva_par *par = (struct riva_par *)info->par;
+ struct riva_par *par = info->par;
unsigned int base;
NVTRACE_ENTER();
- if (var->xoffset > (var->xres_virtual - var->xres))
- return -EINVAL;
- if (var->yoffset > (var->yres_virtual - var->yres))
- return -EINVAL;
-
- if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset < 0
- || var->yoffset >= info->var.yres_virtual
- || var->xoffset) return -EINVAL;
- } else {
- if (var->xoffset + info->var.xres > info->var.xres_virtual ||
- var->yoffset + info->var.yres > info->var.yres_virtual)
- return -EINVAL;
- }
-
base = var->yoffset * info->fix.line_length + var->xoffset;
-
par->riva.SetStartAddress(&par->riva, base);
-
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
-
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
NVTRACE_LEAVE();
return 0;
}
static int rivafb_blank(int blank, struct fb_info *info)
{
- struct riva_par *par= (struct riva_par *)info->par;
+ struct riva_par *par= info->par;
unsigned char tmp, vesa;
tmp = SEQin(par, 0x01) & ~0x20; /* screen on/off */
@@ -1304,7 +1280,7 @@ static int rivafb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
- struct riva_par *par = (struct riva_par *)info->par;
+ struct riva_par *par = info->par;
RIVA_HW_INST *chip = &par->riva;
int i;
@@ -1393,7 +1369,7 @@ static int rivafb_setcolreg(unsigned regno, unsigned red, unsigned green,
*/
static void rivafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
u_int color, rop = 0;
if ((info->flags & FBINFO_HWACCEL_DISABLED)) {
@@ -1449,7 +1425,7 @@ static void rivafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect
*/
static void rivafb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
if ((info->flags & FBINFO_HWACCEL_DISABLED)) {
cfb_copyarea(info, region);
@@ -1495,7 +1471,7 @@ static inline void convert_bgcolor_16(u32 *col)
static void rivafb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
u32 fgx = 0, bgx = 0, width, tmp;
u8 *cdat = (u8 *) image->data;
volatile u32 __iomem *d;
@@ -1580,7 +1556,7 @@ static void rivafb_imageblit(struct fb_info *info,
*/
static int rivafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
u8 data[MAX_CURS * MAX_CURS/8];
int i, set = cursor->set;
u16 fg, bg;
@@ -1664,7 +1640,7 @@ static int rivafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
static int rivafb_sync(struct fb_info *info)
{
- struct riva_par *par = (struct riva_par *)info->par;
+ struct riva_par *par = info->par;
wait_for_idle(par);
return 0;
@@ -1696,7 +1672,7 @@ static struct fb_ops riva_fb_ops = {
static int __devinit riva_set_fbinfo(struct fb_info *info)
{
unsigned int cmap_len;
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
NVTRACE_ENTER();
info->flags = FBINFO_DEFAULT
@@ -1733,7 +1709,7 @@ static int __devinit riva_set_fbinfo(struct fb_info *info)
#ifdef CONFIG_PPC_OF
static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
{
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
struct device_node *dp;
unsigned char *pedid = NULL;
unsigned char *disptype = NULL;
@@ -1767,7 +1743,7 @@ static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
#if defined(CONFIG_FB_RIVA_I2C) && !defined(CONFIG_PPC_OF)
static int __devinit riva_get_EDID_i2c(struct fb_info *info)
{
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
struct fb_var_screeninfo var;
int i;
@@ -1837,7 +1813,7 @@ static void __devinit riva_get_EDID(struct fb_info *info, struct pci_dev *pdev)
static void __devinit riva_get_edidinfo(struct fb_info *info)
{
struct fb_var_screeninfo *var = &rivafb_default_var;
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
fb_edid_to_monspecs(par->EDID, &info->monspecs);
fb_videomode_to_modelist(info->monspecs.modedb, info->monspecs.modedb_len,
@@ -1909,7 +1885,7 @@ static int __devinit rivafb_probe(struct pci_dev *pd,
ret = -ENOMEM;
goto err_ret;
}
- default_par = (struct riva_par *) info->par;
+ default_par = info->par;
default_par->pdev = pd;
info->pixmap.addr = kmalloc(8 * 1024, GFP_KERNEL);
@@ -2070,7 +2046,7 @@ static int __devinit rivafb_probe(struct pci_dev *pd,
err_iounmap_screen_base:
#ifdef CONFIG_FB_RIVA_I2C
- riva_delete_i2c_busses((struct riva_par *) info->par);
+ riva_delete_i2c_busses(info->par);
#endif
iounmap(info->screen_base);
err_iounmap_pramin:
@@ -2093,7 +2069,7 @@ err_ret:
static void __exit rivafb_remove(struct pci_dev *pd)
{
struct fb_info *info = pci_get_drvdata(pd);
- struct riva_par *par = (struct riva_par *) info->par;
+ struct riva_par *par = info->par;
NVTRACE_ENTER();
if (!info)
diff --git a/drivers/video/riva/rivafb-i2c.c b/drivers/video/riva/rivafb-i2c.c
index 77151d8e076..8b1967fc116 100644
--- a/drivers/video/riva/rivafb-i2c.c
+++ b/drivers/video/riva/rivafb-i2c.c
@@ -30,7 +30,7 @@
static void riva_gpio_setscl(void* data, int state)
{
- struct riva_i2c_chan *chan = (struct riva_i2c_chan *)data;
+ struct riva_i2c_chan *chan = data;
struct riva_par *par = chan->par;
u32 val;
@@ -48,7 +48,7 @@ static void riva_gpio_setscl(void* data, int state)
static void riva_gpio_setsda(void* data, int state)
{
- struct riva_i2c_chan *chan = (struct riva_i2c_chan *)data;
+ struct riva_i2c_chan *chan = data;
struct riva_par *par = chan->par;
u32 val;
@@ -66,7 +66,7 @@ static void riva_gpio_setsda(void* data, int state)
static int riva_gpio_getscl(void* data)
{
- struct riva_i2c_chan *chan = (struct riva_i2c_chan *)data;
+ struct riva_i2c_chan *chan = data;
struct riva_par *par = chan->par;
u32 val = 0;
@@ -81,7 +81,7 @@ static int riva_gpio_getscl(void* data)
static int riva_gpio_getsda(void* data)
{
- struct riva_i2c_chan *chan = (struct riva_i2c_chan *)data;
+ struct riva_i2c_chan *chan = data;
struct riva_par *par = chan->par;
u32 val = 0;
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index ce6e749db3a..d574dd3c9c8 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -87,6 +87,7 @@
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/platform_device.h>
+#include <linux/clk.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -96,7 +97,6 @@
#include <asm/arch/regs-lcd.h>
#include <asm/arch/regs-gpio.h>
#include <asm/arch/fb.h>
-#include <asm/hardware/clock.h>
#ifdef CONFIG_PM
#include <linux/pm.h>
@@ -552,7 +552,7 @@ static inline void modify_gpio(void __iomem *reg,
* s3c2410fb_init_registers - Initialise all LCD-related registers
*/
-int s3c2410fb_init_registers(struct s3c2410fb_info *fbi)
+static int s3c2410fb_init_registers(struct s3c2410fb_info *fbi)
{
unsigned long flags;
@@ -634,7 +634,7 @@ static irqreturn_t s3c2410fb_irq(int irq, void *dev_id, struct pt_regs *r)
static char driver_name[]="s3c2410fb";
-int __init s3c2410fb_probe(struct platform_device *pdev)
+static int __init s3c2410fb_probe(struct platform_device *pdev)
{
struct s3c2410fb_info *info;
struct fb_info *fbinfo;
@@ -667,8 +667,6 @@ int __init s3c2410fb_probe(struct platform_device *pdev)
info->fb = fbinfo;
platform_set_drvdata(pdev, fbinfo);
- s3c2410fb_init_registers(info);
-
dprintk("devinit\n");
strcpy(fbinfo->fix.id, driver_name);
@@ -701,8 +699,8 @@ int __init s3c2410fb_probe(struct platform_device *pdev)
fbinfo->var.yres_virtual = mach_info->yres.defval;
fbinfo->var.bits_per_pixel = mach_info->bpp.defval;
- fbinfo->var.upper_margin = S3C2410_LCDCON2_GET_VBPD(mregs->lcdcon2) +1;
- fbinfo->var.lower_margin = S3C2410_LCDCON2_GET_VFPD(mregs->lcdcon2) +1;
+ fbinfo->var.upper_margin = S3C2410_LCDCON2_GET_VBPD(mregs->lcdcon2) + 1;
+ fbinfo->var.lower_margin = S3C2410_LCDCON2_GET_VFPD(mregs->lcdcon2) + 1;
fbinfo->var.vsync_len = S3C2410_LCDCON2_GET_VSPW(mregs->lcdcon2) + 1;
fbinfo->var.left_margin = S3C2410_LCDCON3_GET_HFPD(mregs->lcdcon3) + 1;
@@ -746,7 +744,6 @@ int __init s3c2410fb_probe(struct platform_device *pdev)
goto release_irq;
}
- clk_use(info->clk);
clk_enable(info->clk);
dprintk("got and enabled clock\n");
@@ -783,7 +780,6 @@ free_video_memory:
s3c2410fb_unmap_video_memory(info);
release_clock:
clk_disable(info->clk);
- clk_unuse(info->clk);
clk_put(info->clk);
release_irq:
free_irq(irq,info);
@@ -828,7 +824,6 @@ static int s3c2410fb_remove(struct platform_device *pdev)
if (info->clk) {
clk_disable(info->clk);
- clk_unuse(info->clk);
clk_put(info->clk);
info->clk = NULL;
}
diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c
index 2ea1354e439..087e58689e4 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/sa1100fb.c
@@ -178,7 +178,6 @@
#include <asm/hardware.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/uaccess.h>
#include <asm/arch/assabet.h>
@@ -1455,7 +1454,11 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev)
static int __init sa1100fb_probe(struct platform_device *pdev)
{
struct sa1100fb_info *fbi;
- int ret;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
if (!request_mem_region(0xb0100000, 0x10000, "LCD"))
return -EBUSY;
@@ -1470,7 +1473,7 @@ static int __init sa1100fb_probe(struct platform_device *pdev)
if (ret)
goto failed;
- ret = request_irq(IRQ_LCD, sa1100fb_handle_irq, SA_INTERRUPT,
+ ret = request_irq(irq, sa1100fb_handle_irq, SA_INTERRUPT,
"LCD", fbi);
if (ret) {
printk(KERN_ERR "sa1100fb: request_irq failed: %d\n", ret);
@@ -1492,7 +1495,7 @@ static int __init sa1100fb_probe(struct platform_device *pdev)
ret = register_framebuffer(&fbi->fb);
if (ret < 0)
- goto failed;
+ goto err_free_irq;
#ifdef CONFIG_CPU_FREQ
fbi->freq_transition.notifier_call = sa1100fb_freq_transition;
@@ -1504,7 +1507,9 @@ static int __init sa1100fb_probe(struct platform_device *pdev)
/* This driver cannot be unloaded at the moment */
return 0;
-failed:
+ err_free_irq:
+ free_irq(irq, fbi);
+ failed:
platform_set_drvdata(pdev, NULL);
kfree(fbi);
release_mem_region(0xb0100000, 0x10000);
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index 3c98457783c..00719a91479 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -49,7 +49,7 @@
static void savage4_gpio_setscl(void *data, int val)
{
- struct savagefb_i2c_chan *chan = (struct savagefb_i2c_chan *)data;
+ struct savagefb_i2c_chan *chan = data;
unsigned int r;
r = readl(chan->ioaddr + chan->reg);
@@ -63,7 +63,7 @@ static void savage4_gpio_setscl(void *data, int val)
static void savage4_gpio_setsda(void *data, int val)
{
- struct savagefb_i2c_chan *chan = (struct savagefb_i2c_chan *)data;
+ struct savagefb_i2c_chan *chan = data;
unsigned int r;
r = readl(chan->ioaddr + chan->reg);
@@ -77,21 +77,21 @@ static void savage4_gpio_setsda(void *data, int val)
static int savage4_gpio_getscl(void *data)
{
- struct savagefb_i2c_chan *chan = (struct savagefb_i2c_chan *)data;
+ struct savagefb_i2c_chan *chan = data;
return (0 != (readl(chan->ioaddr + chan->reg) & SAVAGE4_I2C_SCL_IN));
}
static int savage4_gpio_getsda(void *data)
{
- struct savagefb_i2c_chan *chan = (struct savagefb_i2c_chan *)data;
+ struct savagefb_i2c_chan *chan = data;
return (0 != (readl(chan->ioaddr + chan->reg) & SAVAGE4_I2C_SDA_IN));
}
static void prosavage_gpio_setscl(void* data, int val)
{
- struct savagefb_i2c_chan *chan = (struct savagefb_i2c_chan *)data;
+ struct savagefb_i2c_chan *chan = data;
u32 r;
SET_CR_IX(chan->ioaddr, chan->reg);
@@ -107,7 +107,7 @@ static void prosavage_gpio_setscl(void* data, int val)
static void prosavage_gpio_setsda(void* data, int val)
{
- struct savagefb_i2c_chan *chan = (struct savagefb_i2c_chan *)data;
+ struct savagefb_i2c_chan *chan = data;
unsigned int r;
SET_CR_IX(chan->ioaddr, chan->reg);
@@ -123,7 +123,7 @@ static void prosavage_gpio_setsda(void* data, int val)
static int prosavage_gpio_getscl(void* data)
{
- struct savagefb_i2c_chan *chan = (struct savagefb_i2c_chan *)data;
+ struct savagefb_i2c_chan *chan = data;
SET_CR_IX(chan->ioaddr, chan->reg);
return (0 != (GET_CR_DATA(chan->ioaddr) & PROSAVAGE_I2C_SCL_IN));
@@ -131,7 +131,7 @@ static int prosavage_gpio_getscl(void* data)
static int prosavage_gpio_getsda(void* data)
{
- struct savagefb_i2c_chan *chan = (struct savagefb_i2c_chan *)data;
+ struct savagefb_i2c_chan *chan = data;
SET_CR_IX(chan->ioaddr, chan->reg);
return (0 != (GET_CR_DATA(chan->ioaddr) & PROSAVAGE_I2C_SDA_IN));
@@ -140,10 +140,9 @@ static int prosavage_gpio_getsda(void* data)
static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan,
const char *name)
{
- int (*add_bus)(struct i2c_adapter *) = symbol_get(i2c_bit_add_bus);
int rc = 0;
- if (add_bus && chan->par) {
+ if (chan->par) {
strcpy(chan->adapter.name, name);
chan->adapter.owner = THIS_MODULE;
chan->adapter.id = I2C_HW_B_SAVAGE;
@@ -161,7 +160,7 @@ static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan,
chan->algo.setscl(chan, 1);
udelay(20);
- rc = add_bus(&chan->adapter);
+ rc = i2c_bit_add_bus(&chan->adapter);
if (rc == 0)
dev_dbg(&chan->par->pcidev->dev,
@@ -169,8 +168,6 @@ static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan,
else
dev_warn(&chan->par->pcidev->dev,
"Failed to register I2C bus %s.\n", name);
-
- symbol_put(i2c_bit_add_bus);
} else
chan->par = NULL;
@@ -179,7 +176,7 @@ static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan,
void savagefb_create_i2c_busses(struct fb_info *info)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
par->chan.par = par;
switch(info->fix.accel) {
@@ -193,6 +190,7 @@ void savagefb_create_i2c_busses(struct fb_info *info)
par->chan.algo.getscl = prosavage_gpio_getscl;
break;
case FB_ACCEL_SAVAGE4:
+ case FB_ACCEL_SAVAGE2000:
par->chan.reg = 0xff20;
par->chan.ioaddr = par->mmio.vbase;
par->chan.algo.setsda = savage4_gpio_setsda;
@@ -209,14 +207,10 @@ void savagefb_create_i2c_busses(struct fb_info *info)
void savagefb_delete_i2c_busses(struct fb_info *info)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
- int (*del_bus)(struct i2c_adapter *) =
- symbol_get(i2c_bit_del_bus);
+ struct savagefb_par *par = info->par;
- if (del_bus && par->chan.par) {
- del_bus(&par->chan.adapter);
- symbol_put(i2c_bit_del_bus);
- }
+ if (par->chan.par)
+ i2c_bit_del_bus(&par->chan.adapter);
par->chan.par = NULL;
}
@@ -224,8 +218,6 @@ void savagefb_delete_i2c_busses(struct fb_info *info)
static u8 *savage_do_probe_i2c_edid(struct savagefb_i2c_chan *chan)
{
u8 start = 0x0;
- int (*transfer)(struct i2c_adapter *, struct i2c_msg *, int) =
- symbol_get(i2c_transfer);
struct i2c_msg msgs[] = {
{
.addr = SAVAGE_DDC,
@@ -239,21 +231,19 @@ static u8 *savage_do_probe_i2c_edid(struct savagefb_i2c_chan *chan)
};
u8 *buf = NULL;
- if (transfer && chan->par) {
+ if (chan->par) {
buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (buf) {
msgs[1].buf = buf;
- if (transfer(&chan->adapter, msgs, 2) != 2) {
+ if (i2c_transfer(&chan->adapter, msgs, 2) != 2) {
dev_dbg(&chan->par->pcidev->dev,
"Unable to read EDID block.\n");
kfree(buf);
buf = NULL;
}
}
-
- symbol_put(i2c_transfer);
}
return buf;
diff --git a/drivers/video/savage/savagefb_accel.c b/drivers/video/savage/savagefb_accel.c
index bac8ea3a010..bbcc055d3bb 100644
--- a/drivers/video/savage/savagefb_accel.c
+++ b/drivers/video/savage/savagefb_accel.c
@@ -21,7 +21,7 @@ static u32 savagefb_rop[] = {
int savagefb_sync(struct fb_info *info)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
par->SavageWaitIdle(par);
return 0;
@@ -29,7 +29,7 @@ int savagefb_sync(struct fb_info *info)
void savagefb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
int sx = region->sx, dx = region->dx;
int sy = region->sy, dy = region->dy;
int cmd;
@@ -63,7 +63,7 @@ void savagefb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
void savagefb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
int cmd, color;
if (!rect->width || !rect->height)
@@ -90,7 +90,7 @@ void savagefb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
void savagefb_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
int fg, bg, size, i, width;
int cmd;
u32 *src = (u32 *) image->data;
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 09e2f284190..ab727eaa7f4 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -686,7 +686,7 @@ static void savage_update_var(struct fb_var_screeninfo *var, struct fb_videomode
static int savagefb_check_var (struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
int memlen, vramlen, mode_valid = 0;
DBG("savagefb_check_var");
@@ -1025,7 +1025,7 @@ static int savagefb_setcolreg(unsigned regno,
unsigned transp,
struct fb_info *info)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
if (regno >= NR_PALETTE)
return -EINVAL;
@@ -1328,7 +1328,7 @@ static void savagefb_set_fix(struct fb_info *info)
#if defined(CONFIG_FB_SAVAGE_ACCEL)
static void savagefb_set_clip(struct fb_info *info)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
int cmd;
cmd = BCI_CMD_NOP | BCI_CMD_CLIP_NEW;
@@ -1342,7 +1342,7 @@ static void savagefb_set_clip(struct fb_info *info)
static int savagefb_set_par (struct fb_info *info)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
struct fb_var_screeninfo *var = &info->var;
int err;
@@ -1381,29 +1381,9 @@ static int savagefb_set_par (struct fb_info *info)
static int savagefb_pan_display (struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
- u_int y_bottom;
-
- y_bottom = var->yoffset;
-
- if (!(var->vmode & FB_VMODE_YWRAP))
- y_bottom += var->yres;
-
- if (var->xoffset > (var->xres_virtual - var->xres))
- return -EINVAL;
- if (y_bottom > info->var.yres_virtual)
- return -EINVAL;
+ struct savagefb_par *par = info->par;
savagefb_update_start (par, var);
-
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
-
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
-
return 0;
}
@@ -1534,7 +1514,7 @@ static void savage_disable_mmio (struct savagefb_par *par)
static int __devinit savage_map_mmio (struct fb_info *info)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
DBG ("savage_map_mmio");
if (S3_SAVAGE3D_SERIES (par->chip))
@@ -1567,7 +1547,7 @@ static int __devinit savage_map_mmio (struct fb_info *info)
static void __devinit savage_unmap_mmio (struct fb_info *info)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
DBG ("savage_unmap_mmio");
savage_disable_mmio(par);
@@ -1581,7 +1561,7 @@ static void __devinit savage_unmap_mmio (struct fb_info *info)
static int __devinit savage_map_video (struct fb_info *info,
int video_len)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
int resource;
DBG("savage_map_video");
@@ -1619,7 +1599,7 @@ static int __devinit savage_map_video (struct fb_info *info,
static void __devinit savage_unmap_video (struct fb_info *info)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
DBG("savage_unmap_video");
@@ -1869,7 +1849,7 @@ static int __devinit savage_init_fb_info (struct fb_info *info,
struct pci_dev *dev,
const struct pci_device_id *id)
{
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct savagefb_par *par = info->par;
int err = 0;
par->pcidev = dev;
@@ -2139,8 +2119,7 @@ static int __devinit savagefb_probe (struct pci_dev* dev,
static void __devexit savagefb_remove (struct pci_dev *dev)
{
- struct fb_info *info =
- (struct fb_info *)pci_get_drvdata(dev);
+ struct fb_info *info = pci_get_drvdata(dev);
DBG("savagefb_remove");
@@ -2174,9 +2153,8 @@ static void __devexit savagefb_remove (struct pci_dev *dev)
static int savagefb_suspend (struct pci_dev* dev, pm_message_t state)
{
- struct fb_info *info =
- (struct fb_info *)pci_get_drvdata(dev);
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct fb_info *info = pci_get_drvdata(dev);
+ struct savagefb_par *par = info->par;
DBG("savagefb_suspend");
@@ -2210,9 +2188,8 @@ static int savagefb_suspend (struct pci_dev* dev, pm_message_t state)
static int savagefb_resume (struct pci_dev* dev)
{
- struct fb_info *info =
- (struct fb_info *)pci_get_drvdata(dev);
- struct savagefb_par *par = (struct savagefb_par *)info->par;
+ struct fb_info *info = pci_get_drvdata(dev);
+ struct savagefb_par *par = info->par;
int cur_state = par->pm_state;
DBG("savage_resume");
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index a01e7ecc15e..9b707771d75 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -115,7 +115,8 @@ static struct fb_fix_screeninfo xxxfb_fix __initdata = {
/*
* If your driver supports multiple boards or it supports multiple
* framebuffers, you should make these arrays, or allocate them
- * dynamically (using kmalloc()).
+ * dynamically using framebuffer_alloc() and free them with
+ * framebuffer_release().
*/
static struct fb_info info;
@@ -179,18 +180,31 @@ static int xxxfb_release(const struct fb_info *info, int user)
* intent to only test a mode and not actually set it. The stuff in
* modedb.c is a example of this. If the var passed in is slightly
* off by what the hardware can support then we alter the var PASSED in
- * to what we can do. If the hardware doesn't support mode change
- * a -EINVAL will be returned by the upper layers. You don't need to
- * implement this function then. If you hardware doesn't support
- * changing the resolution then this function is not needed. In this
- * case the driver woudl just provide a var that represents the static
- * state the screen is in.
+ * to what we can do.
+ *
+ * For values that are off, this function must round them _up_ to the
+ * next value that is supported by the hardware. If the value is
+ * greater than the highest value supported by the hardware, then this
+ * function must return -EINVAL.
+ *
+ * Exception to the above rule: Some drivers have a fixed mode, ie,
+ * the hardware is already set at boot up, and cannot be changed. In
+ * this case, it is more acceptable that this function just return
+ * a copy of the currently working var (info->var). Better is to not
+ * implement this function, as the upper layer will do the copying
+ * of the current var for you.
+ *
+ * Note: This is the only function where the contents of var can be
+ * freely adjusted after the driver has been registered. If you find
+ * that you have code outside of this function that alters the content
+ * of var, then you are doing something wrong. Note also that the
+ * contents of info->var must be left untouched at all times after
+ * driver registration.
*
* Returns negative errno on error, or zero on success.
*/
static int xxxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
- const struct xxx_par *par = (const struct xxx_par *) info->par;
/* ... */
return 0;
}
@@ -204,14 +218,39 @@ static int xxxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
* fb_fix_screeninfo stored in fb_info. It doesn't not alter var in
* fb_info since we are using that data. This means we depend on the
* data in var inside fb_info to be supported by the hardware.
- * xxxfb_check_var is always called before xxxfb_set_par to ensure this.
+ *
+ * This function is also used to recover/restore the hardware to a
+ * known working state.
+ *
+ * xxxfb_check_var is always called before xxxfb_set_par to ensure that
+ * the contents of var is always valid.
+ *
* Again if you can't change the resolution you don't need this function.
*
+ * However, even if your hardware does not support mode changing,
+ * a set_par might be needed to at least initialize the hardware to
+ * a known working state, especially if it came back from another
+ * process that also modifies the same hardware, such as X.
+ *
+ * If this is the case, a combination such as the following should work:
+ *
+ * static int xxxfb_check_var(struct fb_var_screeninfo *var,
+ * struct fb_info *info)
+ * {
+ * *var = info->var;
+ * return 0;
+ * }
+ *
+ * static int xxxfb_set_par(struct fb_info *info)
+ * {
+ * init your hardware here
+ * }
+ *
* Returns negative errno on error, or zero on success.
*/
static int xxxfb_set_par(struct fb_info *info)
{
- struct xxx_par *par = (struct xxx_par *) info->par;
+ struct xxx_par *par = info->par;
/* ... */
return 0;
}
@@ -258,70 +297,110 @@ static int xxxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
* var->{color}.offset contains start of bitfield
* var->{color}.length contains length of bitfield
* {hardwarespecific} contains width of DAC
- * cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset)
+ * pseudo_palette[X] is programmed to (X << red.offset) |
+ * (X << green.offset) |
+ * (X << blue.offset)
* RAMDAC[X] is programmed to (red, green, blue)
+ * color depth = SUM(var->{color}.length)
*
* Pseudocolor:
- * uses offset = 0 && length = DAC register width.
* var->{color}.offset is 0
- * var->{color}.length contains widht of DAC
- * cmap is not used
- * DAC[X] is programmed to (red, green, blue)
+ * var->{color}.length contains width of DAC or the number of unique
+ * colors available (color depth)
+ * pseudo_palette is not used
+ * RAMDAC[X] is programmed to (red, green, blue)
+ * color depth = var->{color}.length
+ *
+ * Static pseudocolor:
+ * same as Pseudocolor, but the RAMDAC is not programmed (read-only)
+ *
+ * Mono01/Mono10:
+ * Has only 2 values, black on white or white on black (fg on bg),
+ * var->{color}.offset is 0
+ * white = (1 << var->{color}.length) - 1, black = 0
+ * pseudo_palette is not used
+ * RAMDAC does not exist
+ * color depth is always 2
+ *
* Truecolor:
* does not use RAMDAC (usually has 3 of them).
* var->{color}.offset contains start of bitfield
* var->{color}.length contains length of bitfield
- * cmap is programmed to (red << red.offset) | (green << green.offset) |
- * (blue << blue.offset) | (transp << transp.offset)
+ * pseudo_palette is programmed to (red << red.offset) |
+ * (green << green.offset) |
+ * (blue << blue.offset) |
+ * (transp << transp.offset)
* RAMDAC does not exist
+ * color depth = SUM(var->{color}.length})
+ *
+ * The color depth is used by fbcon for choosing the logo and also
+ * for color palette transformation if color depth < 4
+ *
+ * As can be seen from the above, the field bits_per_pixel is _NOT_
+ * a criteria for describing the color visual.
+ *
+ * A common mistake is assuming that bits_per_pixel <= 8 is pseudocolor,
+ * and higher than that, true/directcolor. This is incorrect, one needs
+ * to look at the fix->visual.
+ *
+ * Another common mistake is using bits_per_pixel to calculate the color
+ * depth. The bits_per_pixel field does not directly translate to color
+ * depth. You have to compute for the color depth (using the color
+ * bitfields) and fix->visual as seen above.
+ */
+
+ /*
+ * This is the point where the color is converted to something that
+ * is acceptable by the hardware.
*/
#define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16)
- switch (info->fix.visual) {
- case FB_VISUAL_TRUECOLOR:
- case FB_VISUAL_PSEUDOCOLOR:
- red = CNVT_TOHW(red, info->var.red.length);
- green = CNVT_TOHW(green, info->var.green.length);
- blue = CNVT_TOHW(blue, info->var.blue.length);
- transp = CNVT_TOHW(transp, info->var.transp.length);
- break;
- case FB_VISUAL_DIRECTCOLOR:
- /* example here assumes 8 bit DAC. Might be different
- * for your hardware */
- red = CNVT_TOHW(red, 8);
- green = CNVT_TOHW(green, 8);
- blue = CNVT_TOHW(blue, 8);
- /* hey, there is bug in transp handling... */
- transp = CNVT_TOHW(transp, 8);
- break;
- }
+ red = CNVT_TOHW(red, info->var.red.length);
+ green = CNVT_TOHW(green, info->var.green.length);
+ blue = CNVT_TOHW(blue, info->var.blue.length);
+ transp = CNVT_TOHW(transp, info->var.transp.length);
#undef CNVT_TOHW
- /* Truecolor has hardware independent palette */
- if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
- u32 v;
-
- if (regno >= 16)
- return -EINVAL;
-
- v = (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset) |
- (transp << info->var.transp.offset);
-
- switch (info->var.bits_per_pixel) {
- case 8:
- /* Yes some hand held devices have this. */
- ((u8*)(info->pseudo_palette))[regno] = v;
- break;
- case 16:
- ((u16*)(info->pseudo_palette))[regno] = v;
- break;
- case 24:
- case 32:
- ((u32*)(info->pseudo_palette))[regno] = v;
- break;
- }
- return 0;
+ /*
+ * This is the point where the function feeds the color to the hardware
+ * palette after converting the colors to something acceptable by
+ * the hardware. Note, only FB_VISUAL_DIRECTCOLOR and
+ * FB_VISUAL_PSEUDOCOLOR visuals need to write to the hardware palette.
+ * If you have code that writes to the hardware CLUT, and it's not
+ * any of the above visuals, then you are doing something wrong.
+ */
+ if (info->fix.visual == FB_VISUAL_DIRECTCOLOR ||
+ info->fix.visual == FB_VISUAL_TRUECOLOR)
+ write_{red|green|blue|transp}_to_clut();
+
+ /* This is the point were you need to fill up the contents of
+ * info->pseudo_palette. This structure is used _only_ by fbcon, thus
+ * it only contains 16 entries to match the number of colors supported
+ * by the console. The pseudo_palette is used only if the visual is
+ * in directcolor or truecolor mode. With other visuals, the
+ * pseudo_palette is not used. (This might change in the future.)
+ *
+ * The contents of the pseudo_palette is in raw pixel format. Ie, each
+ * entry can be written directly to the framebuffer without any conversion.
+ * The pseudo_palette is (void *). However, if using the generic
+ * drawing functions (cfb_imageblit, cfb_fillrect), the pseudo_palette
+ * must be casted to (u32 *) _regardless_ of the bits per pixel. If the
+ * driver is using its own drawing functions, then it can use whatever
+ * size it wants.
+ */
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ u32 v;
+
+ if (regno >= 16)
+ return -EINVAL;
+
+ v = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+
+ ((u32*)(info->pseudo_palette))[regno] = v;
}
+
/* ... */
return 0;
}
@@ -340,6 +419,17 @@ static int xxxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
static int xxxfb_pan_display(struct fb_var_screeninfo *var,
const struct fb_info *info)
{
+ /*
+ * If your hardware does not support panning, _do_ _not_ implement this
+ * function. Creating a dummy function will just confuse user apps.
+ */
+
+ /*
+ * Note that even if this function is fully functional, a setting of
+ * 0 in both xpanstep and ypanstep means that this function will never
+ * get called.
+ */
+
/* ... */
return 0;
}
@@ -349,15 +439,20 @@ static int xxxfb_pan_display(struct fb_var_screeninfo *var,
* @blank_mode: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*
- * Blank the screen if blank_mode != 0, else unblank. Return 0 if
- * blanking succeeded, != 0 if un-/blanking failed due to e.g. a
- * video mode which doesn't support it. Implements VESA suspend
- * and powerdown modes on hardware that supports disabling hsync/vsync:
- * blank_mode == 2: suspend vsync
- * blank_mode == 3: suspend hsync
- * blank_mode == 4: powerdown
+ * Blank the screen if blank_mode != FB_BLANK_UNBLANK, else unblank.
+ * Return 0 if blanking succeeded, != 0 if un-/blanking failed due to
+ * e.g. a video mode which doesn't support it.
*
- * Returns negative errno on error, or zero on success.
+ * Implements VESA suspend and powerdown modes on hardware that supports
+ * disabling hsync/vsync:
+ *
+ * FB_BLANK_NORMAL = display is blanked, syncs are on.
+ * FB_BLANK_HSYNC_SUSPEND = hsync off
+ * FB_BLANK_VSYNC_SUSPEND = vsync off
+ * FB_BLANK_POWERDOWN = hsync and vsync off
+ *
+ * If implementing this function, at least support FB_BLANK_UNBLANK.
+ * Return !0 for any modes that are unimplemented.
*
*/
static int xxxfb_blank(int blank_mode, const struct fb_info *info)
@@ -454,6 +549,14 @@ void xxxfb_imageblit(struct fb_info *p, const struct fb_image *image)
* @data: The actual data used to construct the image on the display.
* @cmap: The colormap used for color images.
*/
+
+/*
+ * The generic function, cfb_imageblit, expects that the bitmap scanlines are
+ * padded to the next byte. Most hardware accelerators may require padding to
+ * the next u16 or the next u32. If that is the case, the driver can specify
+ * this by setting info->pixmap.scan_align = 2 or 4. See a more
+ * comprehensive description of the pixmap below.
+ */
}
/**
@@ -517,6 +620,7 @@ int xxxfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
*/
void xxxfb_rotate(struct fb_info *info, int angle)
{
+/* Will be deprecated */
}
/**
@@ -540,6 +644,9 @@ void xxxfb_poll(struct fb_info *info, poll_table *wait)
* so we can have consistent display output.
*
* @info: frame buffer structure that represents a single frame buffer
+ *
+ * If the driver has implemented its own hardware-based drawing function,
+ * implementing this function is highly recommended.
*/
void xxxfb_sync(struct fb_info *info)
{
@@ -549,20 +656,25 @@ void xxxfb_sync(struct fb_info *info)
* Initialization
*/
-int __init xxxfb_init(void)
+/* static int __init xxfb_probe (struct device *device) -- for platform devs */
+static int __init xxxfb_probe(struct pci_dev *dev,
+ const_struct pci_device_id *ent)
{
+ struct fb_info *info;
+ struct xxx_par *par;
+ struct device = &dev->dev; /* for pci drivers */
int cmap_len, retval;
/*
- * For kernel boot options (in 'video=xxxfb:<options>' format)
+ * Dynamically allocate info and par
*/
-#ifndef MODULE
- char *option = NULL;
+ info = framebuffer_alloc(sizeof(struct xxx_par), device);
- if (fb_get_options("xxxfb", &option))
- return -ENODEV;
- xxxfb_setup(option);
-#endif
+ if (!info) {
+ /* goto error path */
+ }
+
+ par = info->par;
/*
* Here we set the screen_base to the virtual memory address
@@ -570,18 +682,87 @@ int __init xxxfb_init(void)
* from the bus layer and then translate it to virtual memory
* space via ioremap. Consult ioport.h.
*/
- info.screen_base = framebuffer_virtual_memory;
- info.fbops = &xxxfb_ops;
- info.fix = xxxfb_fix;
- info.pseudo_palette = pseudo_palette;
-
+ info->screen_base = framebuffer_virtual_memory;
+ info->fbops = &xxxfb_ops;
+ info->fix = xxxfb_fix; /* this will be the only time xxxfb_fix will be
+ * used, so mark it as __initdata
+ */
+ info->pseudo_palette = pseudo_palette; /* The pseudopalette is an
+ * 16-member array
+ */
/*
* Set up flags to indicate what sort of acceleration your
* driver can provide (pan/wrap/copyarea/etc.) and whether it
* is a module -- see FBINFO_* in include/linux/fb.h
+ *
+ * If your hardware can support any of the hardware accelerated functions
+ * fbcon performance will improve if info->flags is set properly.
+ *
+ * FBINFO_HWACCEL_COPYAREA - hardware moves
+ * FBINFO_HWACCEL_FILLRECT - hardware fills
+ * FBINFO_HWACCEL_IMAGEBLIT - hardware mono->color expansion
+ * FBINFO_HWACCEL_YPAN - hardware can pan display in y-axis
+ * FBINFO_HWACCEL_YWRAP - hardware can wrap display in y-axis
+ * FBINFO_HWACCEL_DISABLED - supports hardware accels, but disabled
+ * FBINFO_READS_FAST - if set, prefer moves over mono->color expansion
+ * FBINFO_MISC_TILEBLITTING - hardware can do tile blits
+ *
+ * NOTE: These are for fbcon use only.
+ */
+ info->flags = FBINFO_DEFAULT;
+
+/********************* This stage is optional ******************************/
+ /*
+ * The struct pixmap is a scratch pad for the drawing functions. This
+ * is where the monochrome bitmap is constructed by the higher layers
+ * and then passed to the accelerator. For drivers that uses
+ * cfb_imageblit, you can skip this part. For those that have a more
+ * rigorous requirement, this stage is needed
+ */
+
+ /* PIXMAP_SIZE should be small enough to optimize drawing, but not
+ * large enough that memory is wasted. A safe size is
+ * (max_xres * max_font_height/8). max_xres is driver dependent,
+ * max_font_height is 32.
+ */
+ info->pixmap.addr = kmalloc(PIXMAP_SIZE, GFP_KERNEL);
+ if (!info->pixmap.addr) {
+ /* goto error */
+ }
+
+ info->pixmap.size = PIXMAP_SIZE;
+
+ /*
+ * FB_PIXMAP_SYSTEM - memory is in system ram
+ * FB_PIXMAP_IO - memory is iomapped
+ * FB_PIXMAP_SYNC - if set, will call fb_sync() per access to pixmap,
+ * usually if FB_PIXMAP_IO is set.
+ *
+ * Currently, FB_PIXMAP_IO is unimplemented.
+ */
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ /*
+ * scan_align is the number of padding for each scanline. It is in bytes.
+ * Thus for accelerators that need padding to the next u32, put 4 here.
+ */
+ info->pixmap.scan_align = 4;
+
+ /*
+ * buf_align is the amount to be padded for the buffer. For example,
+ * the i810fb needs a scan_align of 2 but expects it to be fed with
+ * dwords, so a buf_align = 4 is required.
*/
- info.flags = FBINFO_DEFAULT;
- info.par = current_par;
+ info->pixmap.buf_align = 4;
+
+ /* access_align is how many bits can be accessed from the framebuffer
+ * ie. some epson cards allow 16-bit access only. Most drivers will
+ * be safe with u32 here.
+ *
+ * NOTE: This field is currently unused.
+ */
+ info->pixmap.scan_align = 32
+/***************************** End optional stage ***************************/
/*
* This should give a reasonable default video mode. The following is
@@ -590,42 +771,145 @@ int __init xxxfb_init(void)
if (!mode_option)
mode_option = "640x480@60";
- retval = fb_find_mode(&info.var, &info, mode_option, NULL, 0, NULL, 8);
+ retval = fb_find_mode(info->var, info, mode_option, NULL, 0, NULL, 8);
if (!retval || retval == 4)
return -EINVAL;
/* This has to been done !!! */
- fb_alloc_cmap(&info.cmap, cmap_len, 0);
+ fb_alloc_cmap(info->cmap, cmap_len, 0);
/*
* The following is done in the case of having hardware with a static
* mode. If we are setting the mode ourselves we don't call this.
*/
- info.var = xxxfb_var;
-
- if (register_framebuffer(&info) < 0)
+ info->var = xxxfb_var;
+
+ /*
+ * For drivers that can...
+ */
+ xxxfb_check_var(&info->var, info);
+
+ /*
+ * Does a call to fb_set_par() before register_framebuffer needed? This
+ * will depend on you and the hardware. If you are sure that your driver
+ * is the only device in the system, a call to fb_set_par() is safe.
+ *
+ * Hardware in x86 systems has a VGA core. Calling set_par() at this
+ * point will corrupt the VGA console, so it might be safer to skip a
+ * call to set_par here and just allow fbcon to do it for you.
+ */
+ /* xxxfb_set_par(info); */
+
+ if (register_framebuffer(info) < 0)
return -EINVAL;
- printk(KERN_INFO "fb%d: %s frame buffer device\n", info.node,
- info.fix.id);
+ printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
+ info->fix.id);
+ pci_set_drvdata(dev, info); /* or dev_set_drvdata(device, info) */
return 0;
}
/*
* Cleanup
*/
+/* static void __exit xxxfb_remove(struct device *device) */
+static void __exit xxxfb_remove(struct pci_dev *dev)
+{
+ struct fb_info *info = pci_get_drv_data(dev);
+ /* or dev_get_drv_data(device); */
+
+ if (info) {
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info.cmap);
+ /* ... */
+ framebuffer_release(info);
+ }
+
+ return 0;
+}
-static void __exit xxxfb_cleanup(void)
+#if CONFIG_PCI
+/* For PCI drivers */
+static struct pci_driver xxxfb_driver = {
+ .name = "xxxfb",
+ .id_table = xxxfb_devices,
+ .probe = xxxfb_probe,
+ .remove = __devexit_p(xxxfb_remove),
+ .suspend = xxxfb_suspend, /* optional */
+ .resume = xxxfb_resume, /* optional */
+};
+
+static int __init xxxfb_init(void)
{
- /*
- * If your driver supports multiple boards, you should unregister and
- * clean up all instances.
- */
+ /*
+ * For kernel boot options (in 'video=xxxfb:<options>' format)
+ */
+#ifndef MODULE
+ char *option = NULL;
- unregister_framebuffer(info);
- fb_dealloc_cmap(&info.cmap);
- /* ... */
+ if (fb_get_options("xxxfb", &option))
+ return -ENODEV;
+ xxxfb_setup(option);
+#endif
+
+ return pci_register_driver(&xxxfb_driver);
+}
+
+static void __exit xxxfb_exit(void)
+{
+ pci_unregister_driver(&xxxfb_driver);
}
+#else
+#include <linux/platform_device.h>
+/* for platform devices */
+static struct device_driver xxxfb_driver = {
+ .name = "xxxfb",
+ .bus = &platform_bus_type,
+ .probe = xxxfb_probe,
+ .remove = xxxfb_remove,
+ .suspend = xxxfb_suspend, /* optional */
+ .resume = xxxfb_resume, /* optional */
+};
+
+static struct platform_device xxxfb_device = {
+ .name = "xxxfb",
+};
+
+static int __init xxxfb_init(void)
+{
+ int ret;
+ /*
+ * For kernel boot options (in 'video=xxxfb:<options>' format)
+ */
+#ifndef MODULE
+ char *option = NULL;
+
+ if (fb_get_options("xxxfb", &option))
+ return -ENODEV;
+ xxxfb_setup(option);
+#endif
+ ret = driver_register(&xxxfb_driver);
+
+ if (!ret) {
+ ret = platform_device_register(&xxxfb_device);
+ if (ret)
+ driver_unregister(&xxxfb_driver);
+ }
+
+ return ret;
+}
+
+static void __exit xxxfb_exit(void)
+{
+ platform_device_unregister(&xxxfb_device);
+ driver_unregister(&xxxfb_driver);
+}
+#endif
+
+MODULE_LICENSE("GPL");
+module_init(xxxfb_init);
+module_exit(xxxfb_exit);
+
/*
* Setup
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index e0f14df840d..8a5ce210bb2 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -382,7 +382,7 @@ static void sstfb_clear_screen(struct fb_info *info)
static int sstfb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
int hSyncOff = var->xres + var->right_margin + var->left_margin;
int vSyncOff = var->yres + var->lower_margin + var->upper_margin;
int vBackPorch = var->left_margin, yDim = var->yres;
@@ -542,7 +542,7 @@ static int sstfb_check_var(struct fb_var_screeninfo *var,
*/
static int sstfb_set_par(struct fb_info *info)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
u32 lfbmode, fbiinit1, fbiinit2, fbiinit3, fbiinit5, fbiinit6=0;
struct pci_dev *sst_dev = par->dev;
unsigned int freq;
@@ -748,13 +748,14 @@ static int sstfb_set_par(struct fb_info *info)
static int sstfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
+ struct sstfb_par *par = info->par;
u32 col;
f_dddprintk("sstfb_setcolreg\n");
f_dddprintk("%-2d rgbt: %#x, %#x, %#x, %#x\n",
regno, red, green, blue, transp);
- if (regno >= 16)
- return -EINVAL;
+ if (regno > 15)
+ return 0;
red >>= (16 - info->var.red.length);
green >>= (16 - info->var.green.length);
@@ -765,7 +766,7 @@ static int sstfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
| (blue << info->var.blue.offset)
| (transp << info->var.transp.offset);
- ((u32 *)info->pseudo_palette)[regno] = col;
+ par->palette[regno] = col;
return 0;
}
@@ -773,7 +774,7 @@ static int sstfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
static int sstfb_ioctl(struct inode *inode, struct file *file,
u_int cmd, u_long arg, struct fb_info *info )
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
struct pci_dev *sst_dev = par->dev;
u32 fbiinit0, tmp, val;
u_long p;
@@ -830,7 +831,7 @@ static int sstfb_ioctl(struct inode *inode, struct file *file,
#if 0
static void sstfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
u32 stride = info->fix.line_length;
if (!IS_VOODOO2(par))
@@ -855,7 +856,7 @@ static void sstfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
*/
static void sstfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
u32 stride = info->fix.line_length;
if (!IS_VOODOO2(par))
@@ -925,7 +926,7 @@ static int __devinit sst_get_memsize(struct fb_info *info, __u32 *memsize)
static int __devinit sst_detect_att(struct fb_info *info)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
int i, mir, dir;
for (i=0; i<3; i++) {
@@ -950,7 +951,7 @@ static int __devinit sst_detect_att(struct fb_info *info)
static int __devinit sst_detect_ti(struct fb_info *info)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
int i, mir, dir;
for (i = 0; i<3; i++) {
@@ -986,7 +987,7 @@ static int __devinit sst_detect_ti(struct fb_info *info)
*/
static int __devinit sst_detect_ics(struct fb_info *info)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
int m_clk0_1, m_clk0_7, m_clk1_b;
int n_clk0_1, n_clk0_7, n_clk1_b;
int i;
@@ -1023,7 +1024,7 @@ static int __devinit sst_detect_ics(struct fb_info *info)
static int sst_set_pll_att_ti(struct fb_info *info,
const struct pll_timing *t, const int clock)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
u8 cr0, cc;
/* enable indexed mode */
@@ -1077,7 +1078,7 @@ static int sst_set_pll_att_ti(struct fb_info *info,
static int sst_set_pll_ics(struct fb_info *info,
const struct pll_timing *t, const int clock)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
u8 pll_ctrl;
sst_dac_write(DACREG_ICS_PLLRMA, DACREG_ICS_PLL_CTRL);
@@ -1114,7 +1115,7 @@ static int sst_set_pll_ics(struct fb_info *info,
static void sst_set_vidmod_att_ti(struct fb_info *info, const int bpp)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
u8 cr0;
sst_dac_write(DACREG_WMA, 0); /* backdoor */
@@ -1149,7 +1150,7 @@ static void sst_set_vidmod_att_ti(struct fb_info *info, const int bpp)
static void sst_set_vidmod_ics(struct fb_info *info, const int bpp)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
switch(bpp) {
case 16:
@@ -1308,7 +1309,7 @@ static int __devinit sst_init(struct fb_info *info, struct sstfb_par *par)
static void __devexit sst_shutdown(struct fb_info *info)
{
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
struct pci_dev *dev = par->dev;
struct pll_timing gfx_timings;
int Fout;
@@ -1394,12 +1395,6 @@ static int __devinit sstfb_probe(struct pci_dev *pdev,
struct sst_spec *spec;
int err;
- struct all_info {
- struct fb_info info;
- struct sstfb_par par;
- u32 pseudo_palette[16];
- } *all;
-
/* Enable device in PCI config. */
if ((err=pci_enable_device(pdev))) {
eprintk("cannot enable device\n");
@@ -1407,14 +1402,13 @@ static int __devinit sstfb_probe(struct pci_dev *pdev,
}
/* Allocate the fb and par structures. */
- all = kmalloc(sizeof(*all), GFP_KERNEL);
- if (!all)
+ info = framebuffer_alloc(sizeof(struct sstfb_par), &pdev->dev);
+ if (!info)
return -ENOMEM;
- memset(all, 0, sizeof(*all));
- pci_set_drvdata(pdev, all);
+
+ pci_set_drvdata(pdev, info);
- info = &all->info;
- par = info->par = &all->par;
+ par = info->par;
fix = &info->fix;
par->type = id->driver_data;
@@ -1471,7 +1465,7 @@ static int __devinit sstfb_probe(struct pci_dev *pdev,
info->flags = FBINFO_DEFAULT;
info->fbops = &sstfb_ops;
- info->pseudo_palette = &all->pseudo_palette;
+ info->pseudo_palette = par->palette;
fix->type = FB_TYPE_PACKED_PIXELS;
fix->visual = FB_VISUAL_TRUECOLOR;
@@ -1527,7 +1521,7 @@ fail_mmio_remap:
fail_fb_mem:
release_mem_region(fix->mmio_start, info->fix.mmio_len);
fail_mmio_mem:
- kfree(info);
+ framebuffer_release(info);
return -ENXIO; /* no voodoo detected */
}
@@ -1537,7 +1531,7 @@ static void __devexit sstfb_remove(struct pci_dev *pdev)
struct fb_info *info;
info = pci_get_drvdata(pdev);
- par = (struct sstfb_par *) info->par;
+ par = info->par;
sst_shutdown(info);
unregister_framebuffer(info);
@@ -1545,7 +1539,7 @@ static void __devexit sstfb_remove(struct pci_dev *pdev)
iounmap(par->mmio_vbase);
release_mem_region(info->fix.smem_start, 0x400000);
release_mem_region(info->fix.mmio_start, info->fix.mmio_len);
- kfree(info);
+ framebuffer_release(info);
}
@@ -1613,7 +1607,7 @@ static int sstfb_dump_regs(struct fb_info *info)
const int pci_s = sizeof(pci_regs)/sizeof(pci_regs[0]);
const int sst_s = sizeof(sst_regs)/sizeof(sst_regs[0]);
- struct sstfb_par *par = (struct sstfb_par *) info->par;
+ struct sstfb_par *par = info->par;
struct pci_dev *dev = par->dev;
u32 pci_res[pci_s];
u32 sst_res[sst_s];
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index fbb17332afd..56d71d6e9a7 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -3,7 +3,7 @@
* Low level Frame buffer driver for HP workstations with
* STI (standard text interface) video firmware.
*
- * Copyright (C) 2001-2004 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2001-2005 Helge Deller <deller@gmx.de>
* Portions Copyright (C) 2001 Thomas Bogendoerfer <tsbogend@alpha.franken.de>
*
* Based on:
@@ -73,16 +73,13 @@
#include "sticore.h"
/* REGION_BASE(fb_info, index) returns the virtual address for region <index> */
-#ifdef __LP64__
- #define REGION_BASE(fb_info, index) \
- (fb_info->sti->glob_cfg->region_ptrs[index] | 0xffffffff00000000)
-#else
- #define REGION_BASE(fb_info, index) \
- fb_info->sti->glob_cfg->region_ptrs[index]
-#endif
+#define REGION_BASE(fb_info, index) \
+ F_EXTEND(fb_info->sti->glob_cfg->region_ptrs[index])
#define NGLEDEVDEPROM_CRT_REGION 1
+#define NR_PALETTE 256
+
typedef struct {
__s32 video_config_reg;
__s32 misc_video_start;
@@ -112,7 +109,7 @@ struct stifb_info {
ngle_rom_t ngle_rom;
struct sti_struct *sti;
int deviceSpecificConfig;
- u32 pseudo_palette[256];
+ u32 pseudo_palette[16];
};
static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
@@ -352,10 +349,10 @@ ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
#define IS_888_DEVICE(fb) \
(!(IS_24_DEVICE(fb)))
-#define GET_FIFO_SLOTS(fb, cnt, numslots) \
-{ while (cnt < numslots) \
+#define GET_FIFO_SLOTS(fb, cnt, numslots) \
+{ while (cnt < numslots) \
cnt = READ_WORD(fb, REG_34); \
- cnt -= numslots; \
+ cnt -= numslots; \
}
#define IndexedDcd 0 /* Pixel data is indexed (pseudo) color */
@@ -995,7 +992,7 @@ stifb_setcolreg(u_int regno, u_int red, u_int green,
struct stifb_info *fb = (struct stifb_info *) info;
u32 color;
- if (regno >= 256) /* no. of hw registers */
+ if (regno >= NR_PALETTE)
return 1;
red >>= 8;
@@ -1005,8 +1002,8 @@ stifb_setcolreg(u_int regno, u_int red, u_int green,
DEBUG_OFF();
START_IMAGE_COLORMAP_ACCESS(fb);
-
- if (fb->info.var.grayscale) {
+
+ if (unlikely(fb->info.var.grayscale)) {
/* gray = 0.30*R + 0.59*G + 0.11*B */
color = ((red * 77) +
(green * 151) +
@@ -1017,17 +1014,17 @@ stifb_setcolreg(u_int regno, u_int red, u_int green,
(blue));
}
- if (info->var.bits_per_pixel == 32) {
- ((u32 *)(info->pseudo_palette))[regno] =
- (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset);
- } else {
- ((u32 *)(info->pseudo_palette))[regno] = regno;
+ if (fb->info.fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ struct fb_var_screeninfo *var = &fb->info.var;
+ if (regno < 16)
+ ((u32 *)fb->info.pseudo_palette)[regno] =
+ regno << var->red.offset |
+ regno << var->green.offset |
+ regno << var->blue.offset;
}
WRITE_IMAGE_COLOR(fb, regno, color);
-
+
if (fb->id == S9000_ID_HCRX) {
NgleLutBltCtl lutBltCtl;
@@ -1066,9 +1063,9 @@ stifb_blank(int blank_mode, struct fb_info *info)
case S9000_ID_HCRX:
HYPER_ENABLE_DISABLE_DISPLAY(fb, enable);
break;
- case S9000_ID_A1659A:; /* fall through */
- case S9000_ID_TIMBER:;
- case CRX24_OVERLAY_PLANES:;
+ case S9000_ID_A1659A: /* fall through */
+ case S9000_ID_TIMBER:
+ case CRX24_OVERLAY_PLANES:
default:
ENABLE_DISABLE_DISPLAY(fb, enable);
break;
@@ -1250,12 +1247,10 @@ stifb_init_fb(struct sti_struct *sti, int bpp_pref)
memset(&fb->ngle_rom, 0, sizeof(fb->ngle_rom));
if ((fb->sti->regions_phys[0] & 0xfc000000) ==
(fb->sti->regions_phys[2] & 0xfc000000))
- sti_rom_address = fb->sti->regions_phys[0];
+ sti_rom_address = F_EXTEND(fb->sti->regions_phys[0]);
else
- sti_rom_address = fb->sti->regions_phys[1];
-#ifdef __LP64__
- sti_rom_address |= 0xffffffff00000000;
-#endif
+ sti_rom_address = F_EXTEND(fb->sti->regions_phys[1]);
+
fb->deviceSpecificConfig = gsc_readl(sti_rom_address);
if (IS_24_DEVICE(fb)) {
if (bpp_pref == 8 || bpp_pref == 32)
@@ -1315,7 +1310,7 @@ stifb_init_fb(struct sti_struct *sti, int bpp_pref)
break;
case 32:
fix->type = FB_TYPE_PACKED_PIXELS;
- fix->visual = FB_VISUAL_TRUECOLOR;
+ fix->visual = FB_VISUAL_DIRECTCOLOR;
var->red.length = var->green.length = var->blue.length = var->transp.length = 8;
var->blue.offset = 0;
var->green.offset = 8;
@@ -1337,7 +1332,7 @@ stifb_init_fb(struct sti_struct *sti, int bpp_pref)
info->pseudo_palette = &fb->pseudo_palette;
/* This has to been done !!! */
- fb_alloc_cmap(&info->cmap, 256, 0);
+ fb_alloc_cmap(&info->cmap, NR_PALETTE, 0);
stifb_init_display(fb);
if (!request_mem_region(fix->smem_start, fix->smem_len, "stifb fb")) {
@@ -1488,7 +1483,3 @@ module_exit(stifb_cleanup);
MODULE_AUTHOR("Helge Deller <deller@gmx.de>, Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_DESCRIPTION("Framebuffer driver for HP's NGLE series graphics cards in HP PARISC machines");
MODULE_LICENSE("GPL v2");
-
-MODULE_PARM(bpp, "i");
-MODULE_PARM_DESC(mem, "Bits per pixel (default: 8)");
-
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 9d53387e6a6..3e7baf4c9fa 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -291,7 +291,7 @@ static inline void banshee_make_room(struct tdfx_par *par, int size)
static int banshee_wait_idle(struct fb_info *info)
{
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
int i = 0;
banshee_make_room(par, 1);
@@ -364,7 +364,7 @@ static u32 do_calc_pll(int freq, int* freq_out)
static void do_write_regs(struct fb_info *info, struct banshee_reg* reg)
{
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
int i;
banshee_wait_idle(info);
@@ -469,7 +469,7 @@ static unsigned long do_lfb_size(struct tdfx_par *par, unsigned short dev_id)
static int tdfxfb_check_var(struct fb_var_screeninfo *var,struct fb_info *info)
{
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
u32 lpitch;
if (var->bits_per_pixel != 8 && var->bits_per_pixel != 16 &&
@@ -558,7 +558,7 @@ static int tdfxfb_check_var(struct fb_var_screeninfo *var,struct fb_info *info)
static int tdfxfb_set_par(struct fb_info *info)
{
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
u32 hdispend, hsyncsta, hsyncend, htotal;
u32 hd, hs, he, ht, hbs, hbe;
u32 vd, vs, ve, vt, vbs, vbe;
@@ -780,7 +780,7 @@ static int tdfxfb_set_par(struct fb_info *info)
static int tdfxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue,unsigned transp,struct fb_info *info)
{
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
u32 rgbcol;
if (regno >= info->cmap.len || regno > 255) return 1;
@@ -794,11 +794,15 @@ static int tdfxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
break;
/* Truecolor has no hardware color palettes. */
case FB_VISUAL_TRUECOLOR:
- rgbcol = (CNVT_TOHW( red, info->var.red.length) << info->var.red.offset) |
- (CNVT_TOHW( green, info->var.green.length) << info->var.green.offset) |
- (CNVT_TOHW( blue, info->var.blue.length) << info->var.blue.offset) |
- (CNVT_TOHW( transp, info->var.transp.length) << info->var.transp.offset);
- ((u32*)(info->pseudo_palette))[regno] = rgbcol;
+ rgbcol = (CNVT_TOHW( red, info->var.red.length) <<
+ info->var.red.offset) |
+ (CNVT_TOHW( green, info->var.green.length) <<
+ info->var.green.offset) |
+ (CNVT_TOHW( blue, info->var.blue.length) <<
+ info->var.blue.offset) |
+ (CNVT_TOHW( transp, info->var.transp.length) <<
+ info->var.transp.offset);
+ par->palette[regno] = rgbcol;
break;
default:
DPRINTK("bad depth %u\n", info->var.bits_per_pixel);
@@ -810,7 +814,7 @@ static int tdfxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
/* 0 unblank, 1 blank, 2 no vsync, 3 no hsync, 4 off */
static int tdfxfb_blank(int blank, struct fb_info *info)
{
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
u32 dacmode, state = 0, vgablank = 0;
dacmode = tdfx_inl(par, DACMODE);
@@ -855,7 +859,7 @@ static int tdfxfb_blank(int blank, struct fb_info *info)
static int tdfxfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
u32 addr;
if (nopan || var->xoffset || (var->yoffset > var->yres_virtual))
@@ -878,7 +882,7 @@ static int tdfxfb_pan_display(struct fb_var_screeninfo *var,
*/
static void tdfxfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
u32 bpp = info->var.bits_per_pixel;
u32 stride = info->fix.line_length;
u32 fmt= stride | ((bpp+((bpp==8) ? 0 : 8)) << 13);
@@ -894,7 +898,7 @@ static void tdfxfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect
if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR) {
tdfx_outl(par, COLORFORE, rect->color);
} else { /* FB_VISUAL_TRUECOLOR */
- tdfx_outl(par, COLORFORE, ((u32*)(info->pseudo_palette))[rect->color]);
+ tdfx_outl(par, COLORFORE, par->palette[rect->color]);
}
tdfx_outl(par, COMMAND_2D, COMMAND_2D_FILLRECT | (tdfx_rop << 24));
tdfx_outl(par, DSTSIZE, rect->width | (rect->height << 16));
@@ -906,7 +910,7 @@ static void tdfxfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect
*/
static void tdfxfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
u32 sx = area->sx, sy = area->sy, dx = area->dx, dy = area->dy;
u32 bpp = info->var.bits_per_pixel;
u32 stride = info->fix.line_length;
@@ -938,7 +942,7 @@ static void tdfxfb_copyarea(struct fb_info *info, const struct fb_copyarea *area
static void tdfxfb_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
int size = image->height * ((image->width * image->depth + 7)>>3);
int fifo_free;
int i, stride = info->fix.line_length;
@@ -961,8 +965,10 @@ static void tdfxfb_imageblit(struct fb_info *info, const struct fb_image *image)
break;
case FB_VISUAL_TRUECOLOR:
default:
- tdfx_outl(par, COLORFORE, ((u32*)(info->pseudo_palette))[image->fg_color]);
- tdfx_outl(par, COLORBACK, ((u32*)(info->pseudo_palette))[image->bg_color]);
+ tdfx_outl(par, COLORFORE,
+ par->palette[image->fg_color]);
+ tdfx_outl(par, COLORBACK,
+ par->palette[image->bg_color]);
}
#ifdef __BIG_ENDIAN
srcfmt = 0x400000 | BIT(20);
@@ -1007,7 +1013,7 @@ static void tdfxfb_imageblit(struct fb_info *info, const struct fb_image *image)
#ifdef TDFX_HARDWARE_CURSOR
static int tdfxfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
unsigned long flags;
/*
@@ -1157,18 +1163,17 @@ static int __devinit tdfxfb_probe(struct pci_dev *pdev,
{
struct tdfx_par *default_par;
struct fb_info *info;
- int size, err, lpitch;
+ int err, lpitch;
if ((err = pci_enable_device(pdev))) {
printk(KERN_WARNING "tdfxfb: Can't enable pdev: %d\n", err);
return err;
}
- size = sizeof(struct tdfx_par)+256*sizeof(u32);
+ info = framebuffer_alloc(sizeof(struct tdfx_par), &pdev->dev);
- info = framebuffer_alloc(size, &pdev->dev);
-
- if (!info) return -ENOMEM;
+ if (!info)
+ return -ENOMEM;
default_par = info->par;
@@ -1248,7 +1253,7 @@ static int __devinit tdfxfb_probe(struct pci_dev *pdev,
info->fbops = &tdfxfb_ops;
info->fix = tdfx_fix;
- info->pseudo_palette = (void *)(default_par + 1);
+ info->pseudo_palette = default_par->palette;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
#ifdef CONFIG_FB_3DFX_ACCEL
info->flags |= FBINFO_HWACCEL_FILLRECT |
@@ -1307,7 +1312,7 @@ out_err:
}
#ifndef MODULE
-void tdfxfb_setup(char *options)
+static void tdfxfb_setup(char *options)
{
char* this_opt;
@@ -1340,7 +1345,7 @@ void tdfxfb_setup(char *options)
static void __devexit tdfxfb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
- struct tdfx_par *par = (struct tdfx_par *) info->par;
+ struct tdfx_par *par = info->par;
unregister_framebuffer(info);
iounmap(par->regbase_virt);
diff --git a/drivers/video/valkyriefb.c b/drivers/video/valkyriefb.c
index ce97ec8eae9..2bdeb4baa95 100644
--- a/drivers/video/valkyriefb.c
+++ b/drivers/video/valkyriefb.c
@@ -342,19 +342,19 @@ int __init valkyriefb_init(void)
#else /* ppc (!CONFIG_MAC) */
{
struct device_node *dp;
+ struct resource r;
- dp = find_devices("valkyrie");
+ dp = of_find_node_by_name(NULL, "valkyrie");
if (dp == 0)
return 0;
- if (dp->n_addrs != 1) {
- printk(KERN_ERR "expecting 1 address for valkyrie (got %d)\n",
- dp->n_addrs);
+ if (of_address_to_resource(dp, 0, &r)) {
+ printk(KERN_ERR "can't find address for valkyrie\n");
return 0;
}
- frame_buffer_phys = dp->addrs[0].address;
- cmap_regs_phys = dp->addrs[0].address+0x304000;
+ frame_buffer_phys = r.start;
+ cmap_regs_phys = r.start + 0x304000;
flags = _PAGE_WRITETHRU;
}
#endif /* ppc (!CONFIG_MAC) */
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index 3e58ddc2bc3..8982e540214 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -57,7 +57,6 @@ static unsigned short *pmi_base = NULL;
static void (*pmi_start)(void);
static void (*pmi_pal)(void);
static int depth;
-static int vga_compat;
/* --------------------------------------------------------------------- */
@@ -67,15 +66,6 @@ static int vesafb_pan_display(struct fb_var_screeninfo *var,
#ifdef __i386__
int offset;
- if (!ypan)
- return -EINVAL;
- if (var->xoffset)
- return -EINVAL;
- if (var->yoffset > var->yres_virtual)
- return -EINVAL;
- if ((ypan==1) && var->yoffset+var->yres > var->yres_virtual)
- return -EINVAL;
-
offset = (var->yoffset * info->fix.line_length + var->xoffset) / 4;
__asm__ __volatile__(
@@ -90,37 +80,6 @@ static int vesafb_pan_display(struct fb_var_screeninfo *var,
return 0;
}
-static int vesafb_blank(int blank, struct fb_info *info)
-{
- int err = 1;
-
- if (vga_compat) {
- int loop = 10000;
- u8 seq = 0, crtc17 = 0;
-
- if (blank == FB_BLANK_POWERDOWN) {
- seq = 0x20;
- crtc17 = 0x00;
- err = 0;
- } else {
- seq = 0x00;
- crtc17 = 0x80;
- err = (blank == FB_BLANK_UNBLANK) ? 0 : -EINVAL;
- }
-
- vga_wseq(NULL, 0x00, 0x01);
- seq |= vga_rseq(NULL, 0x01) & ~0x20;
- vga_wseq(NULL, 0x00, seq);
-
- crtc17 |= vga_rcrt(NULL, 0x17) & ~0x80;
- while (loop--);
- vga_wcrt(NULL, 0x17, crtc17);
- vga_wseq(NULL, 0x00, 0x03);
- }
-
- return err;
-}
-
static void vesa_setpalette(int regno, unsigned red, unsigned green,
unsigned blue)
{
@@ -205,7 +164,6 @@ static struct fb_ops vesafb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = vesafb_setcolreg,
.fb_pan_display = vesafb_pan_display,
- .fb_blank = vesafb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -459,9 +417,8 @@ static int __init vesafb_probe(struct platform_device *dev)
info->flags = FBINFO_FLAG_DEFAULT |
(ypan) ? FBINFO_HWACCEL_YPAN : 0;
- vga_compat = (screen_info.capabilities & 2) ? 0 : 1;
- printk("vesafb: Mode is %sVGA compatible\n",
- (vga_compat) ? "" : "not ");
+ if (!ypan)
+ info->fbops->fb_pan_display = NULL;
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
err = -ENOMEM;
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 226ae8a8848..f3f16fd9f23 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -705,15 +705,7 @@ static int vga16fb_setcolreg(unsigned regno, unsigned red, unsigned green,
static int vga16fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- if (var->xoffset + info->var.xres > info->var.xres_virtual ||
- var->yoffset + info->var.yres > info->var.yres_virtual)
- return -EINVAL;
-
vga16fb_pan_var(info, var);
-
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
- info->var.vmode &= ~FB_VMODE_YWRAP;
return 0;
}
diff --git a/drivers/video/vgastate.c b/drivers/video/vgastate.c
index d9e01daee63..15179ec6233 100644
--- a/drivers/video/vgastate.c
+++ b/drivers/video/vgastate.c
@@ -356,10 +356,11 @@ int save_vga(struct vgastate *state)
{
struct regstate *saved;
- saved = kmalloc(sizeof(struct regstate), GFP_KERNEL);
+ saved = kzalloc(sizeof(struct regstate), GFP_KERNEL);
+
if (saved == NULL)
return 1;
- memset (saved, 0, sizeof(struct regstate));
+
state->vidstate = (void *)saved;
if (state->flags & VGA_SAVE_CMAP) {
diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig
index 9a1e00dd3e0..4baf61a2232 100644
--- a/drivers/w1/Kconfig
+++ b/drivers/w1/Kconfig
@@ -3,7 +3,7 @@ menu "Dallas's 1-wire bus"
config W1
tristate "Dallas's 1-wire support"
---help---
- Dallas's 1-wire bus is usefull to connect slow 1-pin devices
+ Dallas's 1-wire bus is useful to connect slow 1-pin devices
such as iButtons and thermal sensors.
If you want W1 support, you should say Y here.
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 1a409c2c320..7aa2d3de6d3 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -45,7 +45,7 @@ proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
}
static ssize_t
-proc_bus_zorro_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos)
+proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
struct inode *ino = file->f_dentry->d_inode;
struct proc_dir_entry *dp = PDE(ino);
diff --git a/fs/9p/9p.c b/fs/9p/9p.c
index e847f504a47..1a6d08761f3 100644
--- a/fs/9p/9p.c
+++ b/fs/9p/9p.c
@@ -1,8 +1,9 @@
/*
* linux/fs/9p/9p.c
*
- * This file contains functions 9P2000 functions
+ * This file contains functions to perform synchronous 9P calls
*
+ * Copyright (C) 2004 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
@@ -33,6 +34,7 @@
#include "debug.h"
#include "v9fs.h"
#include "9p.h"
+#include "conv.h"
#include "mux.h"
/**
@@ -46,16 +48,21 @@
int
v9fs_t_version(struct v9fs_session_info *v9ses, u32 msize,
- char *version, struct v9fs_fcall **fcall)
+ char *version, struct v9fs_fcall **rcp)
{
- struct v9fs_fcall msg;
+ int ret;
+ struct v9fs_fcall *tc;
dprintk(DEBUG_9P, "msize: %d version: %s\n", msize, version);
- msg.id = TVERSION;
- msg.params.tversion.msize = msize;
- msg.params.tversion.version = version;
+ tc = v9fs_create_tversion(msize, version);
- return v9fs_mux_rpc(v9ses, &msg, fcall);
+ if (!IS_ERR(tc)) {
+ ret = v9fs_mux_rpc(v9ses->mux, tc, rcp);
+ kfree(tc);
+ } else
+ ret = PTR_ERR(tc);
+
+ return ret;
}
/**
@@ -71,19 +78,45 @@ v9fs_t_version(struct v9fs_session_info *v9ses, u32 msize,
int
v9fs_t_attach(struct v9fs_session_info *v9ses, char *uname, char *aname,
- u32 fid, u32 afid, struct v9fs_fcall **fcall)
+ u32 fid, u32 afid, struct v9fs_fcall **rcp)
{
- struct v9fs_fcall msg;
+ int ret;
+ struct v9fs_fcall* tc;
dprintk(DEBUG_9P, "uname '%s' aname '%s' fid %d afid %d\n", uname,
aname, fid, afid);
- msg.id = TATTACH;
- msg.params.tattach.fid = fid;
- msg.params.tattach.afid = afid;
- msg.params.tattach.uname = uname;
- msg.params.tattach.aname = aname;
- return v9fs_mux_rpc(v9ses, &msg, fcall);
+ tc = v9fs_create_tattach(fid, afid, uname, aname);
+ if (!IS_ERR(tc)) {
+ ret = v9fs_mux_rpc(v9ses->mux, tc, rcp);
+ kfree(tc);
+ } else
+ ret = PTR_ERR(tc);
+
+ return ret;
+}
+
+static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc,
+ struct v9fs_fcall *rc, int err)
+{
+ int fid;
+ struct v9fs_session_info *v9ses;
+
+ if (err)
+ return;
+
+ fid = tc->params.tclunk.fid;
+ kfree(tc);
+
+ if (!rc)
+ return;
+
+ dprintk(DEBUG_9P, "tcall id %d rcall id %d\n", tc->id, rc->id);
+ v9ses = a;
+ if (rc->id == RCLUNK)
+ v9fs_put_idpool(fid, &v9ses->fidpool);
+
+ kfree(rc);
}
/**
@@ -95,16 +128,25 @@ v9fs_t_attach(struct v9fs_session_info *v9ses, char *uname, char *aname,
*/
int
-v9fs_t_clunk(struct v9fs_session_info *v9ses, u32 fid,
- struct v9fs_fcall **fcall)
+v9fs_t_clunk(struct v9fs_session_info *v9ses, u32 fid)
{
- struct v9fs_fcall msg;
+ int ret;
+ struct v9fs_fcall *tc, *rc;
dprintk(DEBUG_9P, "fid %d\n", fid);
- msg.id = TCLUNK;
- msg.params.tclunk.fid = fid;
- return v9fs_mux_rpc(v9ses, &msg, fcall);
+ rc = NULL;
+ tc = v9fs_create_tclunk(fid);
+ if (!IS_ERR(tc))
+ ret = v9fs_mux_rpc(v9ses->mux, tc, &rc);
+ else
+ ret = PTR_ERR(tc);
+
+ if (ret)
+ dprintk(DEBUG_ERROR, "failed fid %d err %d\n", fid, ret);
+
+ v9fs_t_clunk_cb(v9ses, tc, rc, ret);
+ return ret;
}
/**
@@ -114,14 +156,21 @@ v9fs_t_clunk(struct v9fs_session_info *v9ses, u32 fid,
*
*/
-int v9fs_t_flush(struct v9fs_session_info *v9ses, u16 tag)
+int v9fs_t_flush(struct v9fs_session_info *v9ses, u16 oldtag)
{
- struct v9fs_fcall msg;
+ int ret;
+ struct v9fs_fcall *tc;
+
+ dprintk(DEBUG_9P, "oldtag %d\n", oldtag);
+
+ tc = v9fs_create_tflush(oldtag);
+ if (!IS_ERR(tc)) {
+ ret = v9fs_mux_rpc(v9ses->mux, tc, NULL);
+ kfree(tc);
+ } else
+ ret = PTR_ERR(tc);
- dprintk(DEBUG_9P, "oldtag %d\n", tag);
- msg.id = TFLUSH;
- msg.params.tflush.oldtag = tag;
- return v9fs_mux_rpc(v9ses, &msg, NULL);
+ return ret;
}
/**
@@ -133,17 +182,22 @@ int v9fs_t_flush(struct v9fs_session_info *v9ses, u16 tag)
*/
int
-v9fs_t_stat(struct v9fs_session_info *v9ses, u32 fid, struct v9fs_fcall **fcall)
+v9fs_t_stat(struct v9fs_session_info *v9ses, u32 fid, struct v9fs_fcall **rcp)
{
- struct v9fs_fcall msg;
+ int ret;
+ struct v9fs_fcall *tc;
dprintk(DEBUG_9P, "fid %d\n", fid);
- if (fcall)
- *fcall = NULL;
- msg.id = TSTAT;
- msg.params.tstat.fid = fid;
- return v9fs_mux_rpc(v9ses, &msg, fcall);
+ ret = -ENOMEM;
+ tc = v9fs_create_tstat(fid);
+ if (!IS_ERR(tc)) {
+ ret = v9fs_mux_rpc(v9ses->mux, tc, rcp);
+ kfree(tc);
+ } else
+ ret = PTR_ERR(tc);
+
+ return ret;
}
/**
@@ -157,16 +211,21 @@ v9fs_t_stat(struct v9fs_session_info *v9ses, u32 fid, struct v9fs_fcall **fcall)
int
v9fs_t_wstat(struct v9fs_session_info *v9ses, u32 fid,
- struct v9fs_stat *stat, struct v9fs_fcall **fcall)
+ struct v9fs_wstat *wstat, struct v9fs_fcall **rcp)
{
- struct v9fs_fcall msg;
+ int ret;
+ struct v9fs_fcall *tc;
+
+ dprintk(DEBUG_9P, "fid %d\n", fid);
- dprintk(DEBUG_9P, "fid %d length %d\n", fid, (int)stat->length);
- msg.id = TWSTAT;
- msg.params.twstat.fid = fid;
- msg.params.twstat.stat = stat;
+ tc = v9fs_create_twstat(fid, wstat, v9ses->extended);
+ if (!IS_ERR(tc)) {
+ ret = v9fs_mux_rpc(v9ses->mux, tc, rcp);
+ kfree(tc);
+ } else
+ ret = PTR_ERR(tc);
- return v9fs_mux_rpc(v9ses, &msg, fcall);
+ return ret;
}
/**
@@ -183,23 +242,27 @@ v9fs_t_wstat(struct v9fs_session_info *v9ses, u32 fid,
int
v9fs_t_walk(struct v9fs_session_info *v9ses, u32 fid, u32 newfid,
- char *name, struct v9fs_fcall **fcall)
+ char *name, struct v9fs_fcall **rcp)
{
- struct v9fs_fcall msg;
+ int ret;
+ struct v9fs_fcall *tc;
+ int nwname;
dprintk(DEBUG_9P, "fid %d newfid %d wname '%s'\n", fid, newfid, name);
- msg.id = TWALK;
- msg.params.twalk.fid = fid;
- msg.params.twalk.newfid = newfid;
-
- if (name) {
- msg.params.twalk.nwname = 1;
- msg.params.twalk.wnames = &name;
- } else {
- msg.params.twalk.nwname = 0;
- }
-
- return v9fs_mux_rpc(v9ses, &msg, fcall);
+
+ if (name)
+ nwname = 1;
+ else
+ nwname = 0;
+
+ tc = v9fs_create_twalk(fid, newfid, nwname, &name);
+ if (!IS_ERR(tc)) {
+ ret = v9fs_mux_rpc(v9ses->mux, tc, rcp);
+ kfree(tc);
+ } else
+ ret = PTR_ERR(tc);
+
+ return ret;
}
/**
@@ -214,19 +277,21 @@ v9fs_t_walk(struct v9fs_session_info *v9ses, u32 fid, u32 newfid,
int
v9fs_t_open(struct v9fs_session_info *v9ses, u32 fid, u8 mode,
- struct v9fs_fcall **fcall)
+ struct v9fs_fcall **rcp)
{
- struct v9fs_fcall msg;
- long errorno = -1;
+ int ret;
+ struct v9fs_fcall *tc;
dprintk(DEBUG_9P, "fid %d mode %d\n", fid, mode);
- msg.id = TOPEN;
- msg.params.topen.fid = fid;
- msg.params.topen.mode = mode;
- errorno = v9fs_mux_rpc(v9ses, &msg, fcall);
+ tc = v9fs_create_topen(fid, mode);
+ if (!IS_ERR(tc)) {
+ ret = v9fs_mux_rpc(v9ses->mux, tc, rcp);
+ kfree(tc);
+ } else
+ ret = PTR_ERR(tc);
- return errorno;
+ return ret;
}
/**
@@ -239,14 +304,21 @@ v9fs_t_open(struct v9fs_session_info *v9ses, u32 fid, u8 mode,
int
v9fs_t_remove(struct v9fs_session_info *v9ses, u32 fid,
- struct v9fs_fcall **fcall)
+ struct v9fs_fcall **rcp)
{
- struct v9fs_fcall msg;
+ int ret;
+ struct v9fs_fcall *tc;
dprintk(DEBUG_9P, "fid %d\n", fid);
- msg.id = TREMOVE;
- msg.params.tremove.fid = fid;
- return v9fs_mux_rpc(v9ses, &msg, fcall);
+
+ tc = v9fs_create_tremove(fid);
+ if (!IS_ERR(tc)) {
+ ret = v9fs_mux_rpc(v9ses->mux, tc, rcp);
+ kfree(tc);
+ } else
+ ret = PTR_ERR(tc);
+
+ return ret;
}
/**
@@ -262,20 +334,22 @@ v9fs_t_remove(struct v9fs_session_info *v9ses, u32 fid,
int
v9fs_t_create(struct v9fs_session_info *v9ses, u32 fid, char *name,
- u32 perm, u8 mode, struct v9fs_fcall **fcall)
+ u32 perm, u8 mode, struct v9fs_fcall **rcp)
{
- struct v9fs_fcall msg;
+ int ret;
+ struct v9fs_fcall *tc;
dprintk(DEBUG_9P, "fid %d name '%s' perm %x mode %d\n",
fid, name, perm, mode);
- msg.id = TCREATE;
- msg.params.tcreate.fid = fid;
- msg.params.tcreate.name = name;
- msg.params.tcreate.perm = perm;
- msg.params.tcreate.mode = mode;
+ tc = v9fs_create_tcreate(fid, name, perm, mode);
+ if (!IS_ERR(tc)) {
+ ret = v9fs_mux_rpc(v9ses->mux, tc, rcp);
+ kfree(tc);
+ } else
+ ret = PTR_ERR(tc);
- return v9fs_mux_rpc(v9ses, &msg, fcall);
+ return ret;
}
/**
@@ -290,31 +364,29 @@ v9fs_t_create(struct v9fs_session_info *v9ses, u32 fid, char *name,
int
v9fs_t_read(struct v9fs_session_info *v9ses, u32 fid, u64 offset,
- u32 count, struct v9fs_fcall **fcall)
+ u32 count, struct v9fs_fcall **rcp)
{
- struct v9fs_fcall msg;
- struct v9fs_fcall *rc = NULL;
- long errorno = -1;
-
- dprintk(DEBUG_9P, "fid %d offset 0x%lx count 0x%x\n", fid,
- (long unsigned int)offset, count);
- msg.id = TREAD;
- msg.params.tread.fid = fid;
- msg.params.tread.offset = offset;
- msg.params.tread.count = count;
- errorno = v9fs_mux_rpc(v9ses, &msg, &rc);
-
- if (!errorno) {
- errorno = rc->params.rread.count;
- dump_data(rc->params.rread.data, rc->params.rread.count);
- }
-
- if (fcall)
- *fcall = rc;
- else
- kfree(rc);
-
- return errorno;
+ int ret;
+ struct v9fs_fcall *tc, *rc;
+
+ dprintk(DEBUG_9P, "fid %d offset 0x%llux count 0x%x\n", fid,
+ (long long unsigned) offset, count);
+
+ tc = v9fs_create_tread(fid, offset, count);
+ if (!IS_ERR(tc)) {
+ ret = v9fs_mux_rpc(v9ses->mux, tc, &rc);
+ if (!ret)
+ ret = rc->params.rread.count;
+ if (rcp)
+ *rcp = rc;
+ else
+ kfree(rc);
+
+ kfree(tc);
+ } else
+ ret = PTR_ERR(tc);
+
+ return ret;
}
/**
@@ -328,32 +400,30 @@ v9fs_t_read(struct v9fs_session_info *v9ses, u32 fid, u64 offset,
*/
int
-v9fs_t_write(struct v9fs_session_info *v9ses, u32 fid,
- u64 offset, u32 count, void *data, struct v9fs_fcall **fcall)
+v9fs_t_write(struct v9fs_session_info *v9ses, u32 fid, u64 offset, u32 count,
+ const char __user *data, struct v9fs_fcall **rcp)
{
- struct v9fs_fcall msg;
- struct v9fs_fcall *rc = NULL;
- long errorno = -1;
+ int ret;
+ struct v9fs_fcall *tc, *rc;
- dprintk(DEBUG_9P, "fid %d offset 0x%llx count 0x%x\n", fid,
- (unsigned long long)offset, count);
- dump_data(data, count);
+ dprintk(DEBUG_9P, "fid %d offset 0x%llux count 0x%x\n", fid,
+ (long long unsigned) offset, count);
- msg.id = TWRITE;
- msg.params.twrite.fid = fid;
- msg.params.twrite.offset = offset;
- msg.params.twrite.count = count;
- msg.params.twrite.data = data;
+ tc = v9fs_create_twrite(fid, offset, count, data);
+ if (!IS_ERR(tc)) {
+ ret = v9fs_mux_rpc(v9ses->mux, tc, &rc);
- errorno = v9fs_mux_rpc(v9ses, &msg, &rc);
+ if (!ret)
+ ret = rc->params.rwrite.count;
+ if (rcp)
+ *rcp = rc;
+ else
+ kfree(rc);
- if (!errorno)
- errorno = rc->params.rwrite.count;
+ kfree(tc);
+ } else
+ ret = PTR_ERR(tc);
- if (fcall)
- *fcall = rc;
- else
- kfree(rc);
-
- return errorno;
+ return ret;
}
+
diff --git a/fs/9p/9p.h b/fs/9p/9p.h
index f55424216be..0cd374d9471 100644
--- a/fs/9p/9p.h
+++ b/fs/9p/9p.h
@@ -3,6 +3,7 @@
*
* 9P protocol definitions.
*
+ * Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
@@ -100,9 +101,18 @@ enum {
V9FS_QTFILE = 0x00,
};
+#define V9FS_NOTAG (u16)(~0)
+#define V9FS_NOFID (u32)(~0)
+#define V9FS_MAXWELEM 16
+
/* ample room for Twrite/Rread header (iounit) */
#define V9FS_IOHDRSZ 24
+struct v9fs_str {
+ u16 len;
+ char *str;
+};
+
/* qids are the unique ID for a file (like an inode */
struct v9fs_qid {
u8 type;
@@ -120,6 +130,29 @@ struct v9fs_stat {
u32 atime;
u32 mtime;
u64 length;
+ struct v9fs_str name;
+ struct v9fs_str uid;
+ struct v9fs_str gid;
+ struct v9fs_str muid;
+ struct v9fs_str extension; /* 9p2000.u extensions */
+ u32 n_uid; /* 9p2000.u extensions */
+ u32 n_gid; /* 9p2000.u extensions */
+ u32 n_muid; /* 9p2000.u extensions */
+};
+
+/* file metadata (stat) structure used to create Twstat message
+ The is similar to v9fs_stat, but the strings don't point to
+ the same memory block and should be freed separately
+*/
+struct v9fs_wstat {
+ u16 size;
+ u16 type;
+ u32 dev;
+ struct v9fs_qid qid;
+ u32 mode;
+ u32 atime;
+ u32 mtime;
+ u64 length;
char *name;
char *uid;
char *gid;
@@ -128,25 +161,24 @@ struct v9fs_stat {
u32 n_uid; /* 9p2000.u extensions */
u32 n_gid; /* 9p2000.u extensions */
u32 n_muid; /* 9p2000.u extensions */
- char data[0];
};
/* Structures for Protocol Operations */
struct Tversion {
u32 msize;
- char *version;
+ struct v9fs_str version;
};
struct Rversion {
u32 msize;
- char *version;
+ struct v9fs_str version;
};
struct Tauth {
u32 afid;
- char *uname;
- char *aname;
+ struct v9fs_str uname;
+ struct v9fs_str aname;
};
struct Rauth {
@@ -154,12 +186,12 @@ struct Rauth {
};
struct Rerror {
- char *error;
+ struct v9fs_str error;
u32 errno; /* 9p2000.u extension */
};
struct Tflush {
- u32 oldtag;
+ u16 oldtag;
};
struct Rflush {
@@ -168,8 +200,8 @@ struct Rflush {
struct Tattach {
u32 fid;
u32 afid;
- char *uname;
- char *aname;
+ struct v9fs_str uname;
+ struct v9fs_str aname;
};
struct Rattach {
@@ -179,13 +211,13 @@ struct Rattach {
struct Twalk {
u32 fid;
u32 newfid;
- u32 nwname;
- char **wnames;
+ u16 nwname;
+ struct v9fs_str wnames[16];
};
struct Rwalk {
- u32 nwqid;
- struct v9fs_qid *wqids;
+ u16 nwqid;
+ struct v9fs_qid wqids[16];
};
struct Topen {
@@ -200,7 +232,7 @@ struct Ropen {
struct Tcreate {
u32 fid;
- char *name;
+ struct v9fs_str name;
u32 perm;
u8 mode;
};
@@ -251,12 +283,12 @@ struct Tstat {
};
struct Rstat {
- struct v9fs_stat *stat;
+ struct v9fs_stat stat;
};
struct Twstat {
u32 fid;
- struct v9fs_stat *stat;
+ struct v9fs_stat stat;
};
struct Rwstat {
@@ -271,6 +303,7 @@ struct v9fs_fcall {
u32 size;
u8 id;
u16 tag;
+ void *sdata;
union {
struct Tversion tversion;
@@ -303,7 +336,9 @@ struct v9fs_fcall {
} params;
};
-#define FCALL_ERROR(fcall) (fcall ? fcall->params.rerror.error : "")
+#define PRINT_FCALL_ERROR(s, fcall) dprintk(DEBUG_ERROR, "%s: %.*s\n", s, \
+ fcall?fcall->params.rerror.error.len:0, \
+ fcall?fcall->params.rerror.error.str:"");
int v9fs_t_version(struct v9fs_session_info *v9ses, u32 msize,
char *version, struct v9fs_fcall **rcall);
@@ -311,8 +346,7 @@ int v9fs_t_version(struct v9fs_session_info *v9ses, u32 msize,
int v9fs_t_attach(struct v9fs_session_info *v9ses, char *uname, char *aname,
u32 fid, u32 afid, struct v9fs_fcall **rcall);
-int v9fs_t_clunk(struct v9fs_session_info *v9ses, u32 fid,
- struct v9fs_fcall **rcall);
+int v9fs_t_clunk(struct v9fs_session_info *v9ses, u32 fid);
int v9fs_t_flush(struct v9fs_session_info *v9ses, u16 oldtag);
@@ -320,7 +354,7 @@ int v9fs_t_stat(struct v9fs_session_info *v9ses, u32 fid,
struct v9fs_fcall **rcall);
int v9fs_t_wstat(struct v9fs_session_info *v9ses, u32 fid,
- struct v9fs_stat *stat, struct v9fs_fcall **rcall);
+ struct v9fs_wstat *wstat, struct v9fs_fcall **rcall);
int v9fs_t_walk(struct v9fs_session_info *v9ses, u32 fid, u32 newfid,
char *name, struct v9fs_fcall **rcall);
@@ -338,4 +372,5 @@ int v9fs_t_read(struct v9fs_session_info *v9ses, u32 fid,
u64 offset, u32 count, struct v9fs_fcall **rcall);
int v9fs_t_write(struct v9fs_session_info *v9ses, u32 fid, u64 offset,
- u32 count, void *data, struct v9fs_fcall **rcall);
+ u32 count, const char __user * data,
+ struct v9fs_fcall **rcall);
diff --git a/fs/9p/Makefile b/fs/9p/Makefile
index e4e4ffe5a7d..3d023089707 100644
--- a/fs/9p/Makefile
+++ b/fs/9p/Makefile
@@ -1,17 +1,17 @@
obj-$(CONFIG_9P_FS) := 9p2000.o
9p2000-objs := \
+ trans_fd.o \
+ trans_sock.o \
+ mux.o \
+ 9p.o \
+ conv.o \
vfs_super.o \
vfs_inode.o \
vfs_file.o \
vfs_dir.o \
vfs_dentry.o \
error.o \
- mux.o \
- trans_fd.o \
- trans_sock.o \
- 9p.o \
- conv.o \
v9fs.o \
fid.o
diff --git a/fs/9p/conv.c b/fs/9p/conv.c
index 18121af99d3..55ccfa10ee9 100644
--- a/fs/9p/conv.c
+++ b/fs/9p/conv.c
@@ -30,7 +30,7 @@
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/idr.h>
-
+#include <asm/uaccess.h>
#include "debug.h"
#include "v9fs.h"
#include "9p.h"
@@ -58,12 +58,15 @@ static inline int buf_check_overflow(struct cbuf *buf)
static inline int buf_check_size(struct cbuf *buf, int len)
{
- if (buf->p+len > buf->ep) {
+ if (buf->p + len > buf->ep) {
if (buf->p < buf->ep) {
- eprintk(KERN_ERR, "buffer overflow\n");
+ eprintk(KERN_ERR, "buffer overflow: want %d has %d\n",
+ len, (int)(buf->ep - buf->p));
+ dump_stack();
buf->p = buf->ep + 1;
- return 0;
}
+
+ return 0;
}
return 1;
@@ -127,14 +130,6 @@ static inline void buf_put_string(struct cbuf *buf, const char *s)
buf_put_stringn(buf, s, strlen(s));
}
-static inline void buf_put_data(struct cbuf *buf, void *data, u32 datalen)
-{
- if (buf_check_size(buf, datalen)) {
- memcpy(buf->p, data, datalen);
- buf->p += datalen;
- }
-}
-
static inline u8 buf_get_int8(struct cbuf *buf)
{
u8 ret = 0;
@@ -183,86 +178,37 @@ static inline u64 buf_get_int64(struct cbuf *buf)
return ret;
}
-static inline int
-buf_get_string(struct cbuf *buf, char *data, unsigned int datalen)
-{
- u16 len = 0;
-
- len = buf_get_int16(buf);
- if (!buf_check_overflow(buf) && buf_check_size(buf, len) && len+1>datalen) {
- memcpy(data, buf->p, len);
- data[len] = 0;
- buf->p += len;
- len++;
- }
-
- return len;
-}
-
-static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf)
-{
- char *ret;
- u16 len;
-
- ret = NULL;
- len = buf_get_int16(buf);
-
- if (!buf_check_overflow(buf) && buf_check_size(buf, len) &&
- buf_check_size(sbuf, len+1)) {
-
- memcpy(sbuf->p, buf->p, len);
- sbuf->p[len] = 0;
- ret = sbuf->p;
- buf->p += len;
- sbuf->p += len + 1;
- }
-
- return ret;
-}
-
-static inline int buf_get_data(struct cbuf *buf, void *data, int datalen)
+static inline void buf_get_str(struct cbuf *buf, struct v9fs_str *vstr)
{
- int ret = 0;
-
- if (buf_check_size(buf, datalen)) {
- memcpy(data, buf->p, datalen);
- buf->p += datalen;
- ret = datalen;
+ vstr->len = buf_get_int16(buf);
+ if (!buf_check_overflow(buf) && buf_check_size(buf, vstr->len)) {
+ vstr->str = buf->p;
+ buf->p += vstr->len;
+ } else {
+ vstr->len = 0;
+ vstr->str = NULL;
}
-
- return ret;
}
-static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf,
- int datalen)
+static inline void buf_get_qid(struct cbuf *bufp, struct v9fs_qid *qid)
{
- char *ret = NULL;
- int n = 0;
-
- if (buf_check_size(dbuf, datalen)) {
- n = buf_get_data(buf, dbuf->p, datalen);
- if (n > 0) {
- ret = dbuf->p;
- dbuf->p += n;
- }
- }
-
- return ret;
+ qid->type = buf_get_int8(bufp);
+ qid->version = buf_get_int32(bufp);
+ qid->path = buf_get_int64(bufp);
}
/**
- * v9fs_size_stat - calculate the size of a variable length stat struct
- * @v9ses: session information
+ * v9fs_size_wstat - calculate the size of a variable length stat struct
* @stat: metadata (stat) structure
+ * @extended: non-zero if 9P2000.u
*
*/
-static int v9fs_size_stat(struct v9fs_session_info *v9ses,
- struct v9fs_stat *stat)
+static int v9fs_size_wstat(struct v9fs_wstat *wstat, int extended)
{
int size = 0;
- if (stat == NULL) {
+ if (wstat == NULL) {
eprintk(KERN_ERR, "v9fs_size_stat: got a NULL stat pointer\n");
return 0;
}
@@ -279,82 +225,38 @@ static int v9fs_size_stat(struct v9fs_session_info *v9ses,
8 + /* length[8] */
8; /* minimum sum of string lengths */
- if (stat->name)
- size += strlen(stat->name);
- if (stat->uid)
- size += strlen(stat->uid);
- if (stat->gid)
- size += strlen(stat->gid);
- if (stat->muid)
- size += strlen(stat->muid);
+ if (wstat->name)
+ size += strlen(wstat->name);
+ if (wstat->uid)
+ size += strlen(wstat->uid);
+ if (wstat->gid)
+ size += strlen(wstat->gid);
+ if (wstat->muid)
+ size += strlen(wstat->muid);
- if (v9ses->extended) {
+ if (extended) {
size += 4 + /* n_uid[4] */
4 + /* n_gid[4] */
4 + /* n_muid[4] */
2; /* string length of extension[4] */
- if (stat->extension)
- size += strlen(stat->extension);
+ if (wstat->extension)
+ size += strlen(wstat->extension);
}
return size;
}
/**
- * serialize_stat - safely format a stat structure for transmission
- * @v9ses: session info
- * @stat: metadata (stat) structure
- * @bufp: buffer to serialize structure into
- *
- */
-
-static int
-serialize_stat(struct v9fs_session_info *v9ses, struct v9fs_stat *stat,
- struct cbuf *bufp)
-{
- buf_put_int16(bufp, stat->size);
- buf_put_int16(bufp, stat->type);
- buf_put_int32(bufp, stat->dev);
- buf_put_int8(bufp, stat->qid.type);
- buf_put_int32(bufp, stat->qid.version);
- buf_put_int64(bufp, stat->qid.path);
- buf_put_int32(bufp, stat->mode);
- buf_put_int32(bufp, stat->atime);
- buf_put_int32(bufp, stat->mtime);
- buf_put_int64(bufp, stat->length);
-
- buf_put_string(bufp, stat->name);
- buf_put_string(bufp, stat->uid);
- buf_put_string(bufp, stat->gid);
- buf_put_string(bufp, stat->muid);
-
- if (v9ses->extended) {
- buf_put_string(bufp, stat->extension);
- buf_put_int32(bufp, stat->n_uid);
- buf_put_int32(bufp, stat->n_gid);
- buf_put_int32(bufp, stat->n_muid);
- }
-
- if (buf_check_overflow(bufp))
- return 0;
-
- return stat->size;
-}
-
-/**
- * deserialize_stat - safely decode a recieved metadata (stat) structure
- * @v9ses: session info
+ * buf_get_stat - safely decode a recieved metadata (stat) structure
* @bufp: buffer to deserialize
* @stat: metadata (stat) structure
- * @dbufp: buffer to deserialize variable strings into
+ * @extended: non-zero if 9P2000.u
*
*/
-static inline int
-deserialize_stat(struct v9fs_session_info *v9ses, struct cbuf *bufp,
- struct v9fs_stat *stat, struct cbuf *dbufp)
+static inline void
+buf_get_stat(struct cbuf *bufp, struct v9fs_stat *stat, int extended)
{
-
stat->size = buf_get_int16(bufp);
stat->type = buf_get_int16(bufp);
stat->dev = buf_get_int32(bufp);
@@ -365,282 +267,82 @@ deserialize_stat(struct v9fs_session_info *v9ses, struct cbuf *bufp,
stat->atime = buf_get_int32(bufp);
stat->mtime = buf_get_int32(bufp);
stat->length = buf_get_int64(bufp);
- stat->name = buf_get_stringb(bufp, dbufp);
- stat->uid = buf_get_stringb(bufp, dbufp);
- stat->gid = buf_get_stringb(bufp, dbufp);
- stat->muid = buf_get_stringb(bufp, dbufp);
+ buf_get_str(bufp, &stat->name);
+ buf_get_str(bufp, &stat->uid);
+ buf_get_str(bufp, &stat->gid);
+ buf_get_str(bufp, &stat->muid);
- if (v9ses->extended) {
- stat->extension = buf_get_stringb(bufp, dbufp);
+ if (extended) {
+ buf_get_str(bufp, &stat->extension);
stat->n_uid = buf_get_int32(bufp);
stat->n_gid = buf_get_int32(bufp);
stat->n_muid = buf_get_int32(bufp);
}
-
- if (buf_check_overflow(bufp) || buf_check_overflow(dbufp))
- return 0;
-
- return stat->size + 2;
-}
-
-/**
- * deserialize_statb - wrapper for decoding a received metadata structure
- * @v9ses: session info
- * @bufp: buffer to deserialize
- * @dbufp: buffer to deserialize variable strings into
- *
- */
-
-static inline struct v9fs_stat *deserialize_statb(struct v9fs_session_info
- *v9ses, struct cbuf *bufp,
- struct cbuf *dbufp)
-{
- struct v9fs_stat *ret = buf_alloc(dbufp, sizeof(struct v9fs_stat));
-
- if (ret) {
- int n = deserialize_stat(v9ses, bufp, ret, dbufp);
- if (n <= 0)
- return NULL;
- }
-
- return ret;
}
/**
* v9fs_deserialize_stat - decode a received metadata structure
- * @v9ses: session info
* @buf: buffer to deserialize
* @buflen: length of received buffer
* @stat: metadata structure to decode into
- * @statlen: length of destination metadata structure
+ * @extended: non-zero if 9P2000.u
*
+ * Note: stat will point to the buf region.
*/
int
-v9fs_deserialize_stat(struct v9fs_session_info *v9ses, void *buf,
- u32 buflen, struct v9fs_stat *stat, u32 statlen)
+v9fs_deserialize_stat(void *buf, u32 buflen, struct v9fs_stat *stat,
+ int extended)
{
struct cbuf buffer;
struct cbuf *bufp = &buffer;
- struct cbuf dbuffer;
- struct cbuf *dbufp = &dbuffer;
+ unsigned char *p;
buf_init(bufp, buf, buflen);
- buf_init(dbufp, (char *)stat + sizeof(struct v9fs_stat),
- statlen - sizeof(struct v9fs_stat));
-
- return deserialize_stat(v9ses, bufp, stat, dbufp);
-}
-
-static inline int
-v9fs_size_fcall(struct v9fs_session_info *v9ses, struct v9fs_fcall *fcall)
-{
- int size = 4 + 1 + 2; /* size[4] msg[1] tag[2] */
- int i = 0;
-
- switch (fcall->id) {
- default:
- eprintk(KERN_ERR, "bad msg type %d\n", fcall->id);
- return 0;
- case TVERSION: /* msize[4] version[s] */
- size += 4 + 2 + strlen(fcall->params.tversion.version);
- break;
- case TAUTH: /* afid[4] uname[s] aname[s] */
- size += 4 + 2 + strlen(fcall->params.tauth.uname) +
- 2 + strlen(fcall->params.tauth.aname);
- break;
- case TFLUSH: /* oldtag[2] */
- size += 2;
- break;
- case TATTACH: /* fid[4] afid[4] uname[s] aname[s] */
- size += 4 + 4 + 2 + strlen(fcall->params.tattach.uname) +
- 2 + strlen(fcall->params.tattach.aname);
- break;
- case TWALK: /* fid[4] newfid[4] nwname[2] nwname*(wname[s]) */
- size += 4 + 4 + 2;
- /* now compute total for the array of names */
- for (i = 0; i < fcall->params.twalk.nwname; i++)
- size += 2 + strlen(fcall->params.twalk.wnames[i]);
- break;
- case TOPEN: /* fid[4] mode[1] */
- size += 4 + 1;
- break;
- case TCREATE: /* fid[4] name[s] perm[4] mode[1] */
- size += 4 + 2 + strlen(fcall->params.tcreate.name) + 4 + 1;
- break;
- case TREAD: /* fid[4] offset[8] count[4] */
- size += 4 + 8 + 4;
- break;
- case TWRITE: /* fid[4] offset[8] count[4] data[count] */
- size += 4 + 8 + 4 + fcall->params.twrite.count;
- break;
- case TCLUNK: /* fid[4] */
- size += 4;
- break;
- case TREMOVE: /* fid[4] */
- size += 4;
- break;
- case TSTAT: /* fid[4] */
- size += 4;
- break;
- case TWSTAT: /* fid[4] stat[n] */
- fcall->params.twstat.stat->size =
- v9fs_size_stat(v9ses, fcall->params.twstat.stat);
- size += 4 + 2 + 2 + fcall->params.twstat.stat->size;
- }
- return size;
-}
-
-/*
- * v9fs_serialize_fcall - marshall fcall struct into a packet
- * @v9ses: session information
- * @fcall: structure to convert
- * @data: buffer to serialize fcall into
- * @datalen: length of buffer to serialize fcall into
- *
- */
-
-int
-v9fs_serialize_fcall(struct v9fs_session_info *v9ses, struct v9fs_fcall *fcall,
- void *data, u32 datalen)
-{
- int i = 0;
- struct v9fs_stat *stat = NULL;
- struct cbuf buffer;
- struct cbuf *bufp = &buffer;
-
- buf_init(bufp, data, datalen);
-
- if (!fcall) {
- eprintk(KERN_ERR, "no fcall\n");
- return -EINVAL;
- }
-
- fcall->size = v9fs_size_fcall(v9ses, fcall);
-
- buf_put_int32(bufp, fcall->size);
- buf_put_int8(bufp, fcall->id);
- buf_put_int16(bufp, fcall->tag);
-
- dprintk(DEBUG_CONV, "size %d id %d tag %d\n", fcall->size, fcall->id,
- fcall->tag);
-
- /* now encode it */
- switch (fcall->id) {
- default:
- eprintk(KERN_ERR, "bad msg type: %d\n", fcall->id);
- return -EPROTO;
- case TVERSION:
- buf_put_int32(bufp, fcall->params.tversion.msize);
- buf_put_string(bufp, fcall->params.tversion.version);
- break;
- case TAUTH:
- buf_put_int32(bufp, fcall->params.tauth.afid);
- buf_put_string(bufp, fcall->params.tauth.uname);
- buf_put_string(bufp, fcall->params.tauth.aname);
- break;
- case TFLUSH:
- buf_put_int16(bufp, fcall->params.tflush.oldtag);
- break;
- case TATTACH:
- buf_put_int32(bufp, fcall->params.tattach.fid);
- buf_put_int32(bufp, fcall->params.tattach.afid);
- buf_put_string(bufp, fcall->params.tattach.uname);
- buf_put_string(bufp, fcall->params.tattach.aname);
- break;
- case TWALK:
- buf_put_int32(bufp, fcall->params.twalk.fid);
- buf_put_int32(bufp, fcall->params.twalk.newfid);
- buf_put_int16(bufp, fcall->params.twalk.nwname);
- for (i = 0; i < fcall->params.twalk.nwname; i++)
- buf_put_string(bufp, fcall->params.twalk.wnames[i]);
- break;
- case TOPEN:
- buf_put_int32(bufp, fcall->params.topen.fid);
- buf_put_int8(bufp, fcall->params.topen.mode);
- break;
- case TCREATE:
- buf_put_int32(bufp, fcall->params.tcreate.fid);
- buf_put_string(bufp, fcall->params.tcreate.name);
- buf_put_int32(bufp, fcall->params.tcreate.perm);
- buf_put_int8(bufp, fcall->params.tcreate.mode);
- break;
- case TREAD:
- buf_put_int32(bufp, fcall->params.tread.fid);
- buf_put_int64(bufp, fcall->params.tread.offset);
- buf_put_int32(bufp, fcall->params.tread.count);
- break;
- case TWRITE:
- buf_put_int32(bufp, fcall->params.twrite.fid);
- buf_put_int64(bufp, fcall->params.twrite.offset);
- buf_put_int32(bufp, fcall->params.twrite.count);
- buf_put_data(bufp, fcall->params.twrite.data,
- fcall->params.twrite.count);
- break;
- case TCLUNK:
- buf_put_int32(bufp, fcall->params.tclunk.fid);
- break;
- case TREMOVE:
- buf_put_int32(bufp, fcall->params.tremove.fid);
- break;
- case TSTAT:
- buf_put_int32(bufp, fcall->params.tstat.fid);
- break;
- case TWSTAT:
- buf_put_int32(bufp, fcall->params.twstat.fid);
- stat = fcall->params.twstat.stat;
-
- buf_put_int16(bufp, stat->size + 2);
- serialize_stat(v9ses, stat, bufp);
- break;
- }
+ p = bufp->p;
+ buf_get_stat(bufp, stat, extended);
if (buf_check_overflow(bufp))
- return -EIO;
-
- return fcall->size;
+ return 0;
+ else
+ return bufp->p - p;
}
/**
* deserialize_fcall - unmarshal a response
- * @v9ses: session information
- * @msgsize: size of rcall message
* @buf: recieved buffer
* @buflen: length of received buffer
* @rcall: fcall structure to populate
* @rcalllen: length of fcall structure to populate
+ * @extended: non-zero if 9P2000.u
*
*/
int
-v9fs_deserialize_fcall(struct v9fs_session_info *v9ses, u32 msgsize,
- void *buf, u32 buflen, struct v9fs_fcall *rcall,
- int rcalllen)
+v9fs_deserialize_fcall(void *buf, u32 buflen, struct v9fs_fcall *rcall,
+ int extended)
{
struct cbuf buffer;
struct cbuf *bufp = &buffer;
- struct cbuf dbuffer;
- struct cbuf *dbufp = &dbuffer;
int i = 0;
buf_init(bufp, buf, buflen);
- buf_init(dbufp, (char *)rcall + sizeof(struct v9fs_fcall),
- rcalllen - sizeof(struct v9fs_fcall));
- rcall->size = msgsize;
+ rcall->size = buf_get_int32(bufp);
rcall->id = buf_get_int8(bufp);
rcall->tag = buf_get_int16(bufp);
dprintk(DEBUG_CONV, "size %d id %d tag %d\n", rcall->size, rcall->id,
rcall->tag);
+
switch (rcall->id) {
default:
eprintk(KERN_ERR, "unknown message type: %d\n", rcall->id);
return -EPROTO;
case RVERSION:
rcall->params.rversion.msize = buf_get_int32(bufp);
- rcall->params.rversion.version = buf_get_stringb(bufp, dbufp);
+ buf_get_str(bufp, &rcall->params.rversion.version);
break;
case RFLUSH:
break;
@@ -651,34 +353,27 @@ v9fs_deserialize_fcall(struct v9fs_session_info *v9ses, u32 msgsize,
break;
case RWALK:
rcall->params.rwalk.nwqid = buf_get_int16(bufp);
- rcall->params.rwalk.wqids = buf_alloc(dbufp,
- rcall->params.rwalk.nwqid * sizeof(struct v9fs_qid));
- if (rcall->params.rwalk.wqids)
- for (i = 0; i < rcall->params.rwalk.nwqid; i++) {
- rcall->params.rwalk.wqids[i].type =
- buf_get_int8(bufp);
- rcall->params.rwalk.wqids[i].version =
- buf_get_int16(bufp);
- rcall->params.rwalk.wqids[i].path =
- buf_get_int64(bufp);
- }
+ if (rcall->params.rwalk.nwqid > V9FS_MAXWELEM) {
+ eprintk(KERN_ERR, "Rwalk with more than %d qids: %d\n",
+ V9FS_MAXWELEM, rcall->params.rwalk.nwqid);
+ return -EPROTO;
+ }
+
+ for (i = 0; i < rcall->params.rwalk.nwqid; i++)
+ buf_get_qid(bufp, &rcall->params.rwalk.wqids[i]);
break;
case ROPEN:
- rcall->params.ropen.qid.type = buf_get_int8(bufp);
- rcall->params.ropen.qid.version = buf_get_int32(bufp);
- rcall->params.ropen.qid.path = buf_get_int64(bufp);
+ buf_get_qid(bufp, &rcall->params.ropen.qid);
rcall->params.ropen.iounit = buf_get_int32(bufp);
break;
case RCREATE:
- rcall->params.rcreate.qid.type = buf_get_int8(bufp);
- rcall->params.rcreate.qid.version = buf_get_int32(bufp);
- rcall->params.rcreate.qid.path = buf_get_int64(bufp);
+ buf_get_qid(bufp, &rcall->params.rcreate.qid);
rcall->params.rcreate.iounit = buf_get_int32(bufp);
break;
case RREAD:
rcall->params.rread.count = buf_get_int32(bufp);
- rcall->params.rread.data = buf_get_datab(bufp, dbufp,
- rcall->params.rread.count);
+ rcall->params.rread.data = bufp->p;
+ buf_check_size(bufp, rcall->params.rread.count);
break;
case RWRITE:
rcall->params.rwrite.count = buf_get_int32(bufp);
@@ -689,20 +384,443 @@ v9fs_deserialize_fcall(struct v9fs_session_info *v9ses, u32 msgsize,
break;
case RSTAT:
buf_get_int16(bufp);
- rcall->params.rstat.stat =
- deserialize_statb(v9ses, bufp, dbufp);
+ buf_get_stat(bufp, &rcall->params.rstat.stat, extended);
break;
case RWSTAT:
break;
case RERROR:
- rcall->params.rerror.error = buf_get_stringb(bufp, dbufp);
- if (v9ses->extended)
+ buf_get_str(bufp, &rcall->params.rerror.error);
+ if (extended)
rcall->params.rerror.errno = buf_get_int16(bufp);
break;
}
- if (buf_check_overflow(bufp) || buf_check_overflow(dbufp))
+ if (buf_check_overflow(bufp)) {
+ dprintk(DEBUG_ERROR, "buffer overflow\n");
return -EIO;
+ }
+
+ return bufp->p - bufp->sp;
+}
+
+static inline void v9fs_put_int8(struct cbuf *bufp, u8 val, u8 * p)
+{
+ *p = val;
+ buf_put_int8(bufp, val);
+}
+
+static inline void v9fs_put_int16(struct cbuf *bufp, u16 val, u16 * p)
+{
+ *p = val;
+ buf_put_int16(bufp, val);
+}
+
+static inline void v9fs_put_int32(struct cbuf *bufp, u32 val, u32 * p)
+{
+ *p = val;
+ buf_put_int32(bufp, val);
+}
+
+static inline void v9fs_put_int64(struct cbuf *bufp, u64 val, u64 * p)
+{
+ *p = val;
+ buf_put_int64(bufp, val);
+}
- return rcall->size;
+static inline void
+v9fs_put_str(struct cbuf *bufp, char *data, struct v9fs_str *str)
+{
+ if (data) {
+ str->len = strlen(data);
+ str->str = bufp->p;
+ } else {
+ str->len = 0;
+ str->str = NULL;
+ }
+
+ buf_put_stringn(bufp, data, str->len);
+}
+
+static inline int
+v9fs_put_user_data(struct cbuf *bufp, const char __user * data, int count,
+ unsigned char **pdata)
+{
+ *pdata = buf_alloc(bufp, count);
+ return copy_from_user(*pdata, data, count);
+}
+
+static void
+v9fs_put_wstat(struct cbuf *bufp, struct v9fs_wstat *wstat,
+ struct v9fs_stat *stat, int statsz, int extended)
+{
+ v9fs_put_int16(bufp, statsz, &stat->size);
+ v9fs_put_int16(bufp, wstat->type, &stat->type);
+ v9fs_put_int32(bufp, wstat->dev, &stat->dev);
+ v9fs_put_int8(bufp, wstat->qid.type, &stat->qid.type);
+ v9fs_put_int32(bufp, wstat->qid.version, &stat->qid.version);
+ v9fs_put_int64(bufp, wstat->qid.path, &stat->qid.path);
+ v9fs_put_int32(bufp, wstat->mode, &stat->mode);
+ v9fs_put_int32(bufp, wstat->atime, &stat->atime);
+ v9fs_put_int32(bufp, wstat->mtime, &stat->mtime);
+ v9fs_put_int64(bufp, wstat->length, &stat->length);
+
+ v9fs_put_str(bufp, wstat->name, &stat->name);
+ v9fs_put_str(bufp, wstat->uid, &stat->uid);
+ v9fs_put_str(bufp, wstat->gid, &stat->gid);
+ v9fs_put_str(bufp, wstat->muid, &stat->muid);
+
+ if (extended) {
+ v9fs_put_str(bufp, wstat->extension, &stat->extension);
+ v9fs_put_int32(bufp, wstat->n_uid, &stat->n_uid);
+ v9fs_put_int32(bufp, wstat->n_gid, &stat->n_gid);
+ v9fs_put_int32(bufp, wstat->n_muid, &stat->n_muid);
+ }
+}
+
+static struct v9fs_fcall *
+v9fs_create_common(struct cbuf *bufp, u32 size, u8 id)
+{
+ struct v9fs_fcall *fc;
+
+ size += 4 + 1 + 2; /* size[4] id[1] tag[2] */
+ fc = kmalloc(sizeof(struct v9fs_fcall) + size, GFP_KERNEL);
+ if (!fc)
+ return ERR_PTR(-ENOMEM);
+
+ fc->sdata = (char *)fc + sizeof(*fc);
+
+ buf_init(bufp, (char *)fc->sdata, size);
+ v9fs_put_int32(bufp, size, &fc->size);
+ v9fs_put_int8(bufp, id, &fc->id);
+ v9fs_put_int16(bufp, V9FS_NOTAG, &fc->tag);
+
+ return fc;
+}
+
+void v9fs_set_tag(struct v9fs_fcall *fc, u16 tag)
+{
+ fc->tag = tag;
+ *(__le16 *) (fc->sdata + 5) = cpu_to_le16(tag);
+}
+
+struct v9fs_fcall *v9fs_create_tversion(u32 msize, char *version)
+{
+ int size;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ size = 4 + 2 + strlen(version); /* msize[4] version[s] */
+ fc = v9fs_create_common(bufp, size, TVERSION);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, msize, &fc->params.tversion.msize);
+ v9fs_put_str(bufp, version, &fc->params.tversion.version);
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *v9fs_create_tauth(u32 afid, char *uname, char *aname)
+{
+ int size;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ size = 4 + 2 + strlen(uname) + 2 + strlen(aname); /* afid[4] uname[s] aname[s] */
+ fc = v9fs_create_common(bufp, size, TAUTH);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, afid, &fc->params.tauth.afid);
+ v9fs_put_str(bufp, uname, &fc->params.tauth.uname);
+ v9fs_put_str(bufp, aname, &fc->params.tauth.aname);
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *
+v9fs_create_tattach(u32 fid, u32 afid, char *uname, char *aname)
+{
+ int size;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ size = 4 + 4 + 2 + strlen(uname) + 2 + strlen(aname); /* fid[4] afid[4] uname[s] aname[s] */
+ fc = v9fs_create_common(bufp, size, TATTACH);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, fid, &fc->params.tattach.fid);
+ v9fs_put_int32(bufp, afid, &fc->params.tattach.afid);
+ v9fs_put_str(bufp, uname, &fc->params.tattach.uname);
+ v9fs_put_str(bufp, aname, &fc->params.tattach.aname);
+
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *v9fs_create_tflush(u16 oldtag)
+{
+ int size;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ size = 2; /* oldtag[2] */
+ fc = v9fs_create_common(bufp, size, TFLUSH);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int16(bufp, oldtag, &fc->params.tflush.oldtag);
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *v9fs_create_twalk(u32 fid, u32 newfid, u16 nwname,
+ char **wnames)
+{
+ int i, size;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ if (nwname > V9FS_MAXWELEM) {
+ dprintk(DEBUG_ERROR, "nwname > %d\n", V9FS_MAXWELEM);
+ return NULL;
+ }
+
+ size = 4 + 4 + 2; /* fid[4] newfid[4] nwname[2] ... */
+ for (i = 0; i < nwname; i++) {
+ size += 2 + strlen(wnames[i]); /* wname[s] */
+ }
+
+ fc = v9fs_create_common(bufp, size, TWALK);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, fid, &fc->params.twalk.fid);
+ v9fs_put_int32(bufp, newfid, &fc->params.twalk.newfid);
+ v9fs_put_int16(bufp, nwname, &fc->params.twalk.nwname);
+ for (i = 0; i < nwname; i++) {
+ v9fs_put_str(bufp, wnames[i], &fc->params.twalk.wnames[i]);
+ }
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *v9fs_create_topen(u32 fid, u8 mode)
+{
+ int size;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ size = 4 + 1; /* fid[4] mode[1] */
+ fc = v9fs_create_common(bufp, size, TOPEN);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, fid, &fc->params.topen.fid);
+ v9fs_put_int8(bufp, mode, &fc->params.topen.mode);
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *v9fs_create_tcreate(u32 fid, char *name, u32 perm, u8 mode)
+{
+ int size;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ size = 4 + 2 + strlen(name) + 4 + 1; /* fid[4] name[s] perm[4] mode[1] */
+ fc = v9fs_create_common(bufp, size, TCREATE);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, fid, &fc->params.tcreate.fid);
+ v9fs_put_str(bufp, name, &fc->params.tcreate.name);
+ v9fs_put_int32(bufp, perm, &fc->params.tcreate.perm);
+ v9fs_put_int8(bufp, mode, &fc->params.tcreate.mode);
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *v9fs_create_tread(u32 fid, u64 offset, u32 count)
+{
+ int size;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ size = 4 + 8 + 4; /* fid[4] offset[8] count[4] */
+ fc = v9fs_create_common(bufp, size, TREAD);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, fid, &fc->params.tread.fid);
+ v9fs_put_int64(bufp, offset, &fc->params.tread.offset);
+ v9fs_put_int32(bufp, count, &fc->params.tread.count);
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *v9fs_create_twrite(u32 fid, u64 offset, u32 count,
+ const char __user * data)
+{
+ int size, err;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ size = 4 + 8 + 4 + count; /* fid[4] offset[8] count[4] data[count] */
+ fc = v9fs_create_common(bufp, size, TWRITE);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, fid, &fc->params.twrite.fid);
+ v9fs_put_int64(bufp, offset, &fc->params.twrite.offset);
+ v9fs_put_int32(bufp, count, &fc->params.twrite.count);
+ err = v9fs_put_user_data(bufp, data, count, &fc->params.twrite.data);
+ if (err) {
+ kfree(fc);
+ fc = ERR_PTR(err);
+ }
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *v9fs_create_tclunk(u32 fid)
+{
+ int size;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ size = 4; /* fid[4] */
+ fc = v9fs_create_common(bufp, size, TCLUNK);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, fid, &fc->params.tclunk.fid);
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *v9fs_create_tremove(u32 fid)
+{
+ int size;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ size = 4; /* fid[4] */
+ fc = v9fs_create_common(bufp, size, TREMOVE);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, fid, &fc->params.tremove.fid);
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *v9fs_create_tstat(u32 fid)
+{
+ int size;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ size = 4; /* fid[4] */
+ fc = v9fs_create_common(bufp, size, TSTAT);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, fid, &fc->params.tstat.fid);
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
+}
+
+struct v9fs_fcall *v9fs_create_twstat(u32 fid, struct v9fs_wstat *wstat,
+ int extended)
+{
+ int size, statsz;
+ struct v9fs_fcall *fc;
+ struct cbuf buffer;
+ struct cbuf *bufp = &buffer;
+
+ statsz = v9fs_size_wstat(wstat, extended);
+ size = 4 + 2 + 2 + statsz; /* fid[4] stat[n] */
+ fc = v9fs_create_common(bufp, size, TWSTAT);
+ if (IS_ERR(fc))
+ goto error;
+
+ v9fs_put_int32(bufp, fid, &fc->params.twstat.fid);
+ buf_put_int16(bufp, statsz + 2);
+ v9fs_put_wstat(bufp, wstat, &fc->params.twstat.stat, statsz, extended);
+
+ if (buf_check_overflow(bufp)) {
+ kfree(fc);
+ fc = ERR_PTR(-ENOMEM);
+ }
+ error:
+ return fc;
}
diff --git a/fs/9p/conv.h b/fs/9p/conv.h
index ee849613c61..26a736e4a2e 100644
--- a/fs/9p/conv.h
+++ b/fs/9p/conv.h
@@ -1,8 +1,9 @@
/*
* linux/fs/9p/conv.h
*
- * 9P protocol conversion definitions
+ * 9P protocol conversion definitions.
*
+ * Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
@@ -24,13 +25,27 @@
*
*/
-int v9fs_deserialize_stat(struct v9fs_session_info *, void *buf,
- u32 buflen, struct v9fs_stat *stat, u32 statlen);
-int v9fs_serialize_fcall(struct v9fs_session_info *, struct v9fs_fcall *tcall,
- void *buf, u32 buflen);
-int v9fs_deserialize_fcall(struct v9fs_session_info *, u32 msglen,
- void *buf, u32 buflen, struct v9fs_fcall *rcall,
- int rcalllen);
+int v9fs_deserialize_stat(void *buf, u32 buflen, struct v9fs_stat *stat,
+ int extended);
+int v9fs_deserialize_fcall(void *buf, u32 buflen, struct v9fs_fcall *rcall,
+ int extended);
-/* this one is actually in error.c right now */
-int v9fs_errstr2errno(char *errstr);
+void v9fs_set_tag(struct v9fs_fcall *fc, u16 tag);
+
+struct v9fs_fcall *v9fs_create_tversion(u32 msize, char *version);
+struct v9fs_fcall *v9fs_create_tauth(u32 afid, char *uname, char *aname);
+struct v9fs_fcall *v9fs_create_tattach(u32 fid, u32 afid, char *uname,
+ char *aname);
+struct v9fs_fcall *v9fs_create_tflush(u16 oldtag);
+struct v9fs_fcall *v9fs_create_twalk(u32 fid, u32 newfid, u16 nwname,
+ char **wnames);
+struct v9fs_fcall *v9fs_create_topen(u32 fid, u8 mode);
+struct v9fs_fcall *v9fs_create_tcreate(u32 fid, char *name, u32 perm, u8 mode);
+struct v9fs_fcall *v9fs_create_tread(u32 fid, u64 offset, u32 count);
+struct v9fs_fcall *v9fs_create_twrite(u32 fid, u64 offset, u32 count,
+ const char __user *data);
+struct v9fs_fcall *v9fs_create_tclunk(u32 fid);
+struct v9fs_fcall *v9fs_create_tremove(u32 fid);
+struct v9fs_fcall *v9fs_create_tstat(u32 fid);
+struct v9fs_fcall *v9fs_create_twstat(u32 fid, struct v9fs_wstat *wstat,
+ int extended);
diff --git a/fs/9p/debug.h b/fs/9p/debug.h
index 4445f06919d..fe551032788 100644
--- a/fs/9p/debug.h
+++ b/fs/9p/debug.h
@@ -51,16 +51,23 @@ do { \
#if DEBUG_DUMP_PKT
static inline void dump_data(const unsigned char *data, unsigned int datalen)
{
- int i, j;
- int len = datalen;
+ int i, n;
+ char buf[5*8];
- printk(KERN_DEBUG "data ");
- for (i = 0; i < len; i += 4) {
- for (j = 0; (j < 4) && (i + j < len); j++)
- printk(KERN_DEBUG "%02x", data[i + j]);
- printk(KERN_DEBUG " ");
+ n = 0;
+ i = 0;
+ while (i < datalen) {
+ n += snprintf(buf+n, sizeof(buf)-n, "%02x", data[i++]);
+ if (i%4 == 0)
+ n += snprintf(buf+n, sizeof(buf)-n, " ");
+
+ if (i%16 == 0) {
+ dprintk(DEBUG_ERROR, "%s\n", buf);
+ n = 0;
+ }
}
- printk(KERN_DEBUG "\n");
+
+ dprintk(DEBUG_ERROR, "%s\n", buf);
}
#else /* DEBUG_DUMP_PKT */
static inline void dump_data(const unsigned char *data, unsigned int datalen)
diff --git a/fs/9p/error.c b/fs/9p/error.c
index 834cb179e38..e4b6f8f38b6 100644
--- a/fs/9p/error.c
+++ b/fs/9p/error.c
@@ -33,7 +33,6 @@
#include <linux/list.h>
#include <linux/jhash.h>
-#include <linux/string.h>
#include "debug.h"
#include "error.h"
@@ -55,7 +54,8 @@ int v9fs_error_init(void)
/* load initial error map into hash table */
for (c = errmap; c->name != NULL; c++) {
- bucket = jhash(c->name, strlen(c->name), 0) % ERRHASHSZ;
+ c->namelen = strlen(c->name);
+ bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
INIT_HLIST_NODE(&c->list);
hlist_add_head(&c->list, &hash_errmap[bucket]);
}
@@ -69,15 +69,15 @@ int v9fs_error_init(void)
*
*/
-int v9fs_errstr2errno(char *errstr)
+int v9fs_errstr2errno(char *errstr, int len)
{
int errno = 0;
struct hlist_node *p = NULL;
struct errormap *c = NULL;
- int bucket = jhash(errstr, strlen(errstr), 0) % ERRHASHSZ;
+ int bucket = jhash(errstr, len, 0) % ERRHASHSZ;
hlist_for_each_entry(c, p, &hash_errmap[bucket], list) {
- if (!strcmp(c->name, errstr)) {
+ if (c->namelen==len && !memcmp(c->name, errstr, len)) {
errno = c->val;
break;
}
diff --git a/fs/9p/error.h b/fs/9p/error.h
index 78f89acf7c9..a9794e85fe5 100644
--- a/fs/9p/error.h
+++ b/fs/9p/error.h
@@ -36,6 +36,7 @@ struct errormap {
char *name;
int val;
+ int namelen;
struct hlist_node list;
};
@@ -175,4 +176,3 @@ static struct errormap errmap[] = {
};
extern int v9fs_error_init(void);
-extern int v9fs_errstr2errno(char *errstr);
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index d95f8626d17..eda449778fa 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -31,9 +31,6 @@
#include "v9fs.h"
#include "9p.h"
#include "v9fs_vfs.h"
-#include "transport.h"
-#include "mux.h"
-#include "conv.h"
#include "fid.h"
/**
@@ -164,7 +161,7 @@ static struct v9fs_fid *v9fs_fid_walk_up(struct dentry *dentry)
return v9fs_fid_create(dentry, v9ses, fidnum, 0);
clunk_fid:
- v9fs_t_clunk(v9ses, fidnum, NULL);
+ v9fs_t_clunk(v9ses, fidnum);
return ERR_PTR(err);
}
diff --git a/fs/9p/mux.c b/fs/9p/mux.c
index 8835b576f74..945cb368d45 100644
--- a/fs/9p/mux.c
+++ b/fs/9p/mux.c
@@ -4,7 +4,7 @@
* Protocol Multiplexer
*
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
- * Copyright (C) 2004 by Latchesar Ionkov <lucho@ionkov.net>
+ * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,448 +28,943 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/poll.h>
#include <linux/kthread.h>
#include <linux/idr.h>
#include "debug.h"
#include "v9fs.h"
#include "9p.h"
-#include "transport.h"
#include "conv.h"
+#include "transport.h"
#include "mux.h"
+#define ERREQFLUSH 1
+#define SCHED_TIMEOUT 10
+#define MAXPOLLWADDR 2
+
+enum {
+ Rworksched = 1, /* read work scheduled or running */
+ Rpending = 2, /* can read */
+ Wworksched = 4, /* write work scheduled or running */
+ Wpending = 8, /* can write */
+};
+
+struct v9fs_mux_poll_task;
+
+struct v9fs_req {
+ int tag;
+ struct v9fs_fcall *tcall;
+ struct v9fs_fcall *rcall;
+ int err;
+ v9fs_mux_req_callback cb;
+ void *cba;
+ struct list_head req_list;
+};
+
+struct v9fs_mux_data {
+ spinlock_t lock;
+ struct list_head mux_list;
+ struct v9fs_mux_poll_task *poll_task;
+ int msize;
+ unsigned char *extended;
+ struct v9fs_transport *trans;
+ struct v9fs_idpool tidpool;
+ int err;
+ wait_queue_head_t equeue;
+ struct list_head req_list;
+ struct list_head unsent_req_list;
+ struct v9fs_fcall *rcall;
+ int rpos;
+ char *rbuf;
+ int wpos;
+ int wsize;
+ char *wbuf;
+ wait_queue_t poll_wait[MAXPOLLWADDR];
+ wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
+ poll_table pt;
+ struct work_struct rq;
+ struct work_struct wq;
+ unsigned long wsched;
+};
+
+struct v9fs_mux_poll_task {
+ struct task_struct *task;
+ struct list_head mux_list;
+ int muxnum;
+};
+
+struct v9fs_mux_rpc {
+ struct v9fs_mux_data *m;
+ struct v9fs_req *req;
+ int err;
+ struct v9fs_fcall *rcall;
+ wait_queue_head_t wqueue;
+};
+
+static int v9fs_poll_proc(void *);
+static void v9fs_read_work(void *);
+static void v9fs_write_work(void *);
+static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
+ poll_table * p);
+static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
+static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16);
+
+static DECLARE_MUTEX(v9fs_mux_task_lock);
+static struct workqueue_struct *v9fs_mux_wq;
+
+static int v9fs_mux_num;
+static int v9fs_mux_poll_task_num;
+static struct v9fs_mux_poll_task v9fs_mux_poll_tasks[100];
+
+int v9fs_mux_global_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++)
+ v9fs_mux_poll_tasks[i].task = NULL;
+
+ v9fs_mux_wq = create_workqueue("v9fs");
+ if (!v9fs_mux_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void v9fs_mux_global_exit(void)
+{
+ destroy_workqueue(v9fs_mux_wq);
+}
+
/**
- * dprintcond - print condition of session info
- * @v9ses: session info structure
- * @req: RPC request structure
+ * v9fs_mux_calc_poll_procs - calculates the number of polling procs
+ * based on the number of mounted v9fs filesystems.
*
+ * The current implementation returns sqrt of the number of mounts.
*/
+inline int v9fs_mux_calc_poll_procs(int muxnum)
+{
+ int n;
+
+ if (v9fs_mux_poll_task_num)
+ n = muxnum / v9fs_mux_poll_task_num +
+ (muxnum % v9fs_mux_poll_task_num ? 1 : 0);
+ else
+ n = 1;
+
+ if (n > ARRAY_SIZE(v9fs_mux_poll_tasks))
+ n = ARRAY_SIZE(v9fs_mux_poll_tasks);
+
+ return n;
+}
-static inline int
-dprintcond(struct v9fs_session_info *v9ses, struct v9fs_rpcreq *req)
+static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
{
- dprintk(DEBUG_MUX, "condition: %d, %p\n", v9ses->transport->status,
- req->rcall);
+ int i, n;
+ struct v9fs_mux_poll_task *vpt, *vptlast;
+ struct task_struct *pproc;
+
+ dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num,
+ v9fs_mux_poll_task_num);
+ up(&v9fs_mux_task_lock);
+
+ n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1);
+ if (n > v9fs_mux_poll_task_num) {
+ for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
+ if (v9fs_mux_poll_tasks[i].task == NULL) {
+ vpt = &v9fs_mux_poll_tasks[i];
+ dprintk(DEBUG_MUX, "create proc %p\n", vpt);
+ pproc = kthread_create(v9fs_poll_proc, vpt,
+ "v9fs-poll");
+
+ if (!IS_ERR(pproc)) {
+ vpt->task = pproc;
+ INIT_LIST_HEAD(&vpt->mux_list);
+ vpt->muxnum = 0;
+ v9fs_mux_poll_task_num++;
+ wake_up_process(vpt->task);
+ }
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks))
+ dprintk(DEBUG_ERROR, "warning: no free poll slots\n");
+ }
+
+ n = (v9fs_mux_num + 1) / v9fs_mux_poll_task_num +
+ ((v9fs_mux_num + 1) % v9fs_mux_poll_task_num ? 1 : 0);
+
+ vptlast = NULL;
+ for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
+ vpt = &v9fs_mux_poll_tasks[i];
+ if (vpt->task != NULL) {
+ vptlast = vpt;
+ if (vpt->muxnum < n) {
+ dprintk(DEBUG_MUX, "put in proc %d\n", i);
+ list_add(&m->mux_list, &vpt->mux_list);
+ vpt->muxnum++;
+ m->poll_task = vpt;
+ memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
+ init_poll_funcptr(&m->pt, v9fs_pollwait);
+ break;
+ }
+ }
+ }
+
+ if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks)) {
+ if (vptlast == NULL)
+ return -ENOMEM;
+
+ dprintk(DEBUG_MUX, "put in proc %d\n", i);
+ list_add(&m->mux_list, &vptlast->mux_list);
+ vptlast->muxnum++;
+ m->poll_task = vptlast;
+ memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
+ init_poll_funcptr(&m->pt, v9fs_pollwait);
+ }
+
+ v9fs_mux_num++;
+ down(&v9fs_mux_task_lock);
+
return 0;
}
+static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
+{
+ int i;
+ struct v9fs_mux_poll_task *vpt;
+
+ up(&v9fs_mux_task_lock);
+ vpt = m->poll_task;
+ list_del(&m->mux_list);
+ for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
+ if (m->poll_waddr[i] != NULL) {
+ remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
+ m->poll_waddr[i] = NULL;
+ }
+ }
+ vpt->muxnum--;
+ if (!vpt->muxnum) {
+ dprintk(DEBUG_MUX, "destroy proc %p\n", vpt);
+ send_sig(SIGKILL, vpt->task, 1);
+ vpt->task = NULL;
+ v9fs_mux_poll_task_num--;
+ }
+ v9fs_mux_num--;
+ down(&v9fs_mux_task_lock);
+}
+
/**
- * xread - force read of a certain number of bytes
- * @v9ses: session info structure
- * @ptr: pointer to buffer
- * @sz: number of bytes to read
+ * v9fs_mux_init - allocate and initialize the per-session mux data
+ * Creates the polling task if this is the first session.
*
- * Chuck Cranor CS-533 project1
+ * @trans - transport structure
+ * @msize - maximum message size
+ * @extended - pointer to the extended flag
*/
-
-static int xread(struct v9fs_session_info *v9ses, void *ptr, unsigned long sz)
+struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
+ unsigned char *extended)
{
- int rd = 0;
- int ret = 0;
- while (rd < sz) {
- ret = v9ses->transport->read(v9ses->transport, ptr, sz - rd);
- if (ret <= 0) {
- dprintk(DEBUG_ERROR, "xread errno %d\n", ret);
- return ret;
+ int i, n;
+ struct v9fs_mux_data *m, *mtmp;
+
+ dprintk(DEBUG_MUX, "transport %p msize %d\n", trans, msize);
+ m = kmalloc(sizeof(struct v9fs_mux_data), GFP_KERNEL);
+ if (!m)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&m->lock);
+ INIT_LIST_HEAD(&m->mux_list);
+ m->msize = msize;
+ m->extended = extended;
+ m->trans = trans;
+ idr_init(&m->tidpool.pool);
+ init_MUTEX(&m->tidpool.lock);
+ m->err = 0;
+ init_waitqueue_head(&m->equeue);
+ INIT_LIST_HEAD(&m->req_list);
+ INIT_LIST_HEAD(&m->unsent_req_list);
+ m->rcall = NULL;
+ m->rpos = 0;
+ m->rbuf = NULL;
+ m->wpos = m->wsize = 0;
+ m->wbuf = NULL;
+ INIT_WORK(&m->rq, v9fs_read_work, m);
+ INIT_WORK(&m->wq, v9fs_write_work, m);
+ m->wsched = 0;
+ memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
+ m->poll_task = NULL;
+ n = v9fs_mux_poll_start(m);
+ if (n)
+ return ERR_PTR(n);
+
+ n = trans->poll(trans, &m->pt);
+ if (n & POLLIN) {
+ dprintk(DEBUG_MUX, "mux %p can read\n", m);
+ set_bit(Rpending, &m->wsched);
+ }
+
+ if (n & POLLOUT) {
+ dprintk(DEBUG_MUX, "mux %p can write\n", m);
+ set_bit(Wpending, &m->wsched);
+ }
+
+ for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
+ if (IS_ERR(m->poll_waddr[i])) {
+ v9fs_mux_poll_stop(m);
+ mtmp = (void *)m->poll_waddr; /* the error code */
+ kfree(m);
+ m = mtmp;
+ break;
}
- rd += ret;
- ptr += ret;
}
- return (rd);
+
+ return m;
}
/**
- * read_message - read a full 9P2000 fcall packet
- * @v9ses: session info structure
- * @rcall: fcall structure to read into
- * @rcalllen: size of fcall buffer
- *
+ * v9fs_mux_destroy - cancels all pending requests and frees mux resources
*/
+void v9fs_mux_destroy(struct v9fs_mux_data *m)
+{
+ dprintk(DEBUG_MUX, "mux %p prev %p next %p\n", m,
+ m->mux_list.prev, m->mux_list.next);
+ v9fs_mux_cancel(m, -ECONNRESET);
+
+ if (!list_empty(&m->req_list)) {
+ /* wait until all processes waiting on this session exit */
+ dprintk(DEBUG_MUX, "mux %p waiting for empty request queue\n",
+ m);
+ wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
+ dprintk(DEBUG_MUX, "mux %p request queue empty: %d\n", m,
+ list_empty(&m->req_list));
+ }
+
+ v9fs_mux_poll_stop(m);
+ m->trans = NULL;
+
+ kfree(m);
+}
-static int
-read_message(struct v9fs_session_info *v9ses,
- struct v9fs_fcall *rcall, int rcalllen)
+/**
+ * v9fs_pollwait - called by files poll operation to add v9fs-poll task
+ * to files wait queue
+ */
+static void
+v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
+ poll_table * p)
{
- unsigned char buf[4];
- void *data;
- int size = 0;
- int res = 0;
-
- res = xread(v9ses, buf, sizeof(buf));
- if (res < 0) {
- dprintk(DEBUG_ERROR,
- "Reading of count field failed returned: %d\n", res);
- return res;
+ int i;
+ struct v9fs_mux_data *m;
+
+ m = container_of(p, struct v9fs_mux_data, pt);
+ for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
+ if (m->poll_waddr[i] == NULL)
+ break;
+
+ if (i >= ARRAY_SIZE(m->poll_waddr)) {
+ dprintk(DEBUG_ERROR, "not enough wait_address slots\n");
+ return;
}
- if (res < 4) {
- dprintk(DEBUG_ERROR,
- "Reading of count field failed returned: %d\n", res);
- return -EIO;
+ m->poll_waddr[i] = wait_address;
+
+ if (!wait_address) {
+ dprintk(DEBUG_ERROR, "no wait_address\n");
+ m->poll_waddr[i] = ERR_PTR(-EIO);
+ return;
}
- size = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
- dprintk(DEBUG_MUX, "got a packet count: %d\n", size);
+ init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
+ add_wait_queue(wait_address, &m->poll_wait[i]);
+}
+
+/**
+ * v9fs_poll_mux - polls a mux and schedules read or write works if necessary
+ */
+static inline void v9fs_poll_mux(struct v9fs_mux_data *m)
+{
+ int n;
- /* adjust for the four bytes of size */
- size -= 4;
+ if (m->err < 0)
+ return;
- if (size > v9ses->maxdata) {
- dprintk(DEBUG_ERROR, "packet too big: %d\n", size);
- return -E2BIG;
+ n = m->trans->poll(m->trans, NULL);
+ if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
+ dprintk(DEBUG_MUX, "error mux %p err %d\n", m, n);
+ if (n >= 0)
+ n = -ECONNRESET;
+ v9fs_mux_cancel(m, n);
}
- data = kmalloc(size, GFP_KERNEL);
- if (!data) {
- eprintk(KERN_WARNING, "out of memory\n");
- return -ENOMEM;
+ if (n & POLLIN) {
+ set_bit(Rpending, &m->wsched);
+ dprintk(DEBUG_MUX, "mux %p can read\n", m);
+ if (!test_and_set_bit(Rworksched, &m->wsched)) {
+ dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
+ queue_work(v9fs_mux_wq, &m->rq);
+ }
}
- res = xread(v9ses, data, size);
- if (res < size) {
- dprintk(DEBUG_ERROR, "Reading of fcall failed returned: %d\n",
- res);
- kfree(data);
- return res;
+ if (n & POLLOUT) {
+ set_bit(Wpending, &m->wsched);
+ dprintk(DEBUG_MUX, "mux %p can write\n", m);
+ if ((m->wsize || !list_empty(&m->unsent_req_list))
+ && !test_and_set_bit(Wworksched, &m->wsched)) {
+ dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
+ queue_work(v9fs_mux_wq, &m->wq);
+ }
}
+}
+
+/**
+ * v9fs_poll_proc - polls all v9fs transports for new events and queues
+ * the appropriate work to the work queue
+ */
+static int v9fs_poll_proc(void *a)
+{
+ struct v9fs_mux_data *m, *mtmp;
+ struct v9fs_mux_poll_task *vpt;
- /* we now have an in-memory string that is the reply.
- * deserialize it. There is very little to go wrong at this point
- * save for v9fs_alloc errors.
- */
- res = v9fs_deserialize_fcall(v9ses, size, data, v9ses->maxdata,
- rcall, rcalllen);
+ vpt = a;
+ dprintk(DEBUG_MUX, "start %p %p\n", current, vpt);
+ allow_signal(SIGKILL);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ break;
- kfree(data);
+ list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
+ v9fs_poll_mux(m);
+ }
- if (res < 0)
- return res;
+ dprintk(DEBUG_MUX, "sleeping...\n");
+ schedule_timeout(SCHED_TIMEOUT * HZ);
+ }
+ __set_current_state(TASK_RUNNING);
+ dprintk(DEBUG_MUX, "finish\n");
return 0;
}
/**
- * v9fs_recv - receive an RPC response for a particular tag
- * @v9ses: session info structure
- * @req: RPC request structure
- *
+ * v9fs_write_work - called when a transport can send some data
*/
-
-static int v9fs_recv(struct v9fs_session_info *v9ses, struct v9fs_rpcreq *req)
+static void v9fs_write_work(void *a)
{
- int ret = 0;
+ int n, err;
+ struct v9fs_mux_data *m;
+ struct v9fs_req *req;
- dprintk(DEBUG_MUX, "waiting for response: %d\n", req->tcall->tag);
- ret = wait_event_interruptible(v9ses->read_wait,
- ((v9ses->transport->status != Connected) ||
- (req->rcall != 0) || (req->err < 0) ||
- dprintcond(v9ses, req)));
+ m = a;
- dprintk(DEBUG_MUX, "got it: rcall %p\n", req->rcall);
+ if (m->err < 0) {
+ clear_bit(Wworksched, &m->wsched);
+ return;
+ }
- spin_lock(&v9ses->muxlock);
- list_del(&req->next);
- spin_unlock(&v9ses->muxlock);
+ if (!m->wsize) {
+ if (list_empty(&m->unsent_req_list)) {
+ clear_bit(Wworksched, &m->wsched);
+ return;
+ }
- if (req->err < 0)
- return req->err;
+ spin_lock(&m->lock);
+ req =
+ list_entry(m->unsent_req_list.next, struct v9fs_req,
+ req_list);
+ list_move_tail(&req->req_list, &m->req_list);
+ m->wbuf = req->tcall->sdata;
+ m->wsize = req->tcall->size;
+ m->wpos = 0;
+ dump_data(m->wbuf, m->wsize);
+ spin_unlock(&m->lock);
+ }
- if (v9ses->transport->status == Disconnected)
- return -ECONNRESET;
+ dprintk(DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, m->wsize);
+ clear_bit(Wpending, &m->wsched);
+ err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
+ dprintk(DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
+ if (err == -EAGAIN) {
+ clear_bit(Wworksched, &m->wsched);
+ return;
+ }
- return ret;
-}
+ if (err <= 0)
+ goto error;
-/**
- * v9fs_send - send a 9P request
- * @v9ses: session info structure
- * @req: RPC request to send
- *
- */
+ m->wpos += err;
+ if (m->wpos == m->wsize)
+ m->wpos = m->wsize = 0;
+
+ if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
+ if (test_and_clear_bit(Wpending, &m->wsched))
+ n = POLLOUT;
+ else
+ n = m->trans->poll(m->trans, NULL);
+
+ if (n & POLLOUT) {
+ dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
+ queue_work(v9fs_mux_wq, &m->wq);
+ } else
+ clear_bit(Wworksched, &m->wsched);
+ } else
+ clear_bit(Wworksched, &m->wsched);
+
+ return;
-static int v9fs_send(struct v9fs_session_info *v9ses, struct v9fs_rpcreq *req)
+ error:
+ v9fs_mux_cancel(m, err);
+ clear_bit(Wworksched, &m->wsched);
+}
+
+static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
{
- int ret = -1;
- void *data = NULL;
- struct v9fs_fcall *tcall = req->tcall;
+ int ecode, tag;
+ struct v9fs_str *ename;
- data = kmalloc(v9ses->maxdata + V9FS_IOHDRSZ, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ tag = req->tag;
+ if (req->rcall->id == RERROR && !req->err) {
+ ecode = req->rcall->params.rerror.errno;
+ ename = &req->rcall->params.rerror.error;
- tcall->size = 0; /* enforce size recalculation */
- ret =
- v9fs_serialize_fcall(v9ses, tcall, data,
- v9ses->maxdata + V9FS_IOHDRSZ);
- if (ret < 0)
- goto free_data;
+ dprintk(DEBUG_MUX, "Rerror %.*s\n", ename->len, ename->str);
- spin_lock(&v9ses->muxlock);
- list_add(&req->next, &v9ses->mux_fcalls);
- spin_unlock(&v9ses->muxlock);
+ if (*m->extended)
+ req->err = -ecode;
- dprintk(DEBUG_MUX, "sending message: tag %d size %d\n", tcall->tag,
- tcall->size);
- ret = v9ses->transport->write(v9ses->transport, data, tcall->size);
+ if (!req->err) {
+ req->err = v9fs_errstr2errno(ename->str, ename->len);
- if (ret != tcall->size) {
- spin_lock(&v9ses->muxlock);
- list_del(&req->next);
- kfree(req->rcall);
+ if (!req->err) { /* string match failed */
+ PRINT_FCALL_ERROR("unknown error", req->rcall);
+ }
+
+ if (!req->err)
+ req->err = -ESERVERFAULT;
+ }
+ } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
+ dprintk(DEBUG_ERROR, "fcall mismatch: expected %d, got %d\n",
+ req->tcall->id + 1, req->rcall->id);
+ if (!req->err)
+ req->err = -EIO;
+ }
- spin_unlock(&v9ses->muxlock);
- if (ret >= 0)
- ret = -EREMOTEIO;
+ if (req->cb && req->err != ERREQFLUSH) {
+ dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
+ req->tcall, req->rcall);
+
+ (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
+ req->cb = NULL;
} else
- ret = 0;
+ kfree(req->rcall);
- free_data:
- kfree(data);
- return ret;
+ v9fs_mux_put_tag(m, tag);
+
+ wake_up(&m->equeue);
+ kfree(req);
}
/**
- * v9fs_mux_rpc - send a request, receive a response
- * @v9ses: session info structure
- * @tcall: fcall to send
- * @rcall: buffer to place response into
- *
+ * v9fs_read_work - called when there is some data to be read from a transport
*/
-
-long
-v9fs_mux_rpc(struct v9fs_session_info *v9ses, struct v9fs_fcall *tcall,
- struct v9fs_fcall **rcall)
+static void v9fs_read_work(void *a)
{
- int tid = -1;
- struct v9fs_fcall *fcall = NULL;
- struct v9fs_rpcreq req;
- int ret = -1;
-
- if (!v9ses)
- return -EINVAL;
-
- if (!v9ses->transport || v9ses->transport->status != Connected)
- return -EIO;
+ int n, err;
+ struct v9fs_mux_data *m;
+ struct v9fs_req *req, *rptr, *rreq;
+ struct v9fs_fcall *rcall;
+ char *rbuf;
+
+ m = a;
+
+ if (m->err < 0)
+ return;
+
+ rcall = NULL;
+ dprintk(DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
+
+ if (!m->rcall) {
+ m->rcall =
+ kmalloc(sizeof(struct v9fs_fcall) + m->msize, GFP_KERNEL);
+ if (!m->rcall) {
+ err = -ENOMEM;
+ goto error;
+ }
- if (rcall)
- *rcall = NULL;
+ m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
+ m->rpos = 0;
+ }
- if (tcall->id != TVERSION) {
- tid = v9fs_get_idpool(&v9ses->tidpool);
- if (tid < 0)
- return -ENOMEM;
+ clear_bit(Rpending, &m->wsched);
+ err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
+ dprintk(DEBUG_MUX, "mux %p got %d bytes\n", m, err);
+ if (err == -EAGAIN) {
+ clear_bit(Rworksched, &m->wsched);
+ return;
}
- tcall->tag = tid;
+ if (err <= 0)
+ goto error;
- req.tcall = tcall;
- req.err = 0;
- req.rcall = NULL;
+ m->rpos += err;
+ while (m->rpos > 4) {
+ n = le32_to_cpu(*(__le32 *) m->rbuf);
+ if (n >= m->msize) {
+ dprintk(DEBUG_ERROR,
+ "requested packet size too big: %d\n", n);
+ err = -EIO;
+ goto error;
+ }
- ret = v9fs_send(v9ses, &req);
+ if (m->rpos < n)
+ break;
- if (ret < 0) {
- if (tcall->id != TVERSION)
- v9fs_put_idpool(tid, &v9ses->tidpool);
- dprintk(DEBUG_MUX, "error %d\n", ret);
- return ret;
- }
+ dump_data(m->rbuf, n);
+ err =
+ v9fs_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended);
+ if (err < 0) {
+ goto error;
+ }
+
+ rcall = m->rcall;
+ rbuf = m->rbuf;
+ if (m->rpos > n) {
+ m->rcall = kmalloc(sizeof(struct v9fs_fcall) + m->msize,
+ GFP_KERNEL);
+ if (!m->rcall) {
+ err = -ENOMEM;
+ goto error;
+ }
- ret = v9fs_recv(v9ses, &req);
-
- fcall = req.rcall;
-
- dprintk(DEBUG_MUX, "received: tag=%x, ret=%d\n", tcall->tag, ret);
- if (ret == -ERESTARTSYS) {
- if (v9ses->transport->status != Disconnected
- && tcall->id != TFLUSH) {
- unsigned long flags;
-
- dprintk(DEBUG_MUX, "flushing the tag: %d\n",
- tcall->tag);
- clear_thread_flag(TIF_SIGPENDING);
- v9fs_t_flush(v9ses, tcall->tag);
- spin_lock_irqsave(&current->sighand->siglock, flags);
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock,
- flags);
- dprintk(DEBUG_MUX, "flushing done\n");
+ m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
+ memmove(m->rbuf, rbuf + n, m->rpos - n);
+ m->rpos -= n;
+ } else {
+ m->rcall = NULL;
+ m->rbuf = NULL;
+ m->rpos = 0;
}
- goto release_req;
- } else if (ret < 0)
- goto release_req;
-
- if (!fcall)
- ret = -EIO;
- else {
- if (fcall->id == RERROR) {
- ret = v9fs_errstr2errno(fcall->params.rerror.error);
- if (ret == 0) { /* string match failed */
- if (fcall->params.rerror.errno)
- ret = -(fcall->params.rerror.errno);
- else
- ret = -ESERVERFAULT;
+ dprintk(DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, rcall->id,
+ rcall->tag);
+
+ req = NULL;
+ spin_lock(&m->lock);
+ list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
+ if (rreq->tag == rcall->tag) {
+ req = rreq;
+ req->rcall = rcall;
+ list_del(&req->req_list);
+ spin_unlock(&m->lock);
+ process_request(m, req);
+ break;
}
- } else if (fcall->id != tcall->id + 1) {
- dprintk(DEBUG_ERROR,
- "fcall mismatch: expected %d, got %d\n",
- tcall->id + 1, fcall->id);
- ret = -EIO;
+
+ }
+
+ if (!req) {
+ spin_unlock(&m->lock);
+ if (err >= 0 && rcall->id != RFLUSH)
+ dprintk(DEBUG_ERROR,
+ "unexpected response mux %p id %d tag %d\n",
+ m, rcall->id, rcall->tag);
+ kfree(rcall);
}
}
- release_req:
- if (tcall->id != TVERSION)
- v9fs_put_idpool(tid, &v9ses->tidpool);
- if (rcall)
- *rcall = fcall;
- else
- kfree(fcall);
+ if (!list_empty(&m->req_list)) {
+ if (test_and_clear_bit(Rpending, &m->wsched))
+ n = POLLIN;
+ else
+ n = m->trans->poll(m->trans, NULL);
+
+ if (n & POLLIN) {
+ dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
+ queue_work(v9fs_mux_wq, &m->rq);
+ } else
+ clear_bit(Rworksched, &m->wsched);
+ } else
+ clear_bit(Rworksched, &m->wsched);
+
+ return;
- return ret;
+ error:
+ v9fs_mux_cancel(m, err);
+ clear_bit(Rworksched, &m->wsched);
}
/**
- * v9fs_mux_cancel_requests - cancels all pending requests
+ * v9fs_send_request - send 9P request
+ * The function can sleep until the request is scheduled for sending.
+ * The function can be interrupted. Return from the function is not
+ * a guarantee that the request is sent succesfully. Can return errors
+ * that can be retrieved by PTR_ERR macros.
*
- * @v9ses: session info structure
- * @err: error code to return to the requests
+ * @m: mux data
+ * @tc: request to be sent
+ * @cb: callback function to call when response is received
+ * @cba: parameter to pass to the callback function
*/
-void v9fs_mux_cancel_requests(struct v9fs_session_info *v9ses, int err)
+static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
+ struct v9fs_fcall *tc,
+ v9fs_mux_req_callback cb, void *cba)
{
- struct v9fs_rpcreq *rptr;
- struct v9fs_rpcreq *rreq;
+ int n;
+ struct v9fs_req *req;
- dprintk(DEBUG_MUX, " %d\n", err);
- spin_lock(&v9ses->muxlock);
- list_for_each_entry_safe(rreq, rptr, &v9ses->mux_fcalls, next) {
- rreq->err = err;
- }
- spin_unlock(&v9ses->muxlock);
- wake_up_all(&v9ses->read_wait);
-}
+ dprintk(DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
+ tc, tc->id);
+ if (m->err < 0)
+ return ERR_PTR(m->err);
-/**
- * v9fs_recvproc - kproc to handle demultiplexing responses
- * @data: session info structure
- *
- */
+ req = kmalloc(sizeof(struct v9fs_req), GFP_KERNEL);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
-static int v9fs_recvproc(void *data)
-{
- struct v9fs_session_info *v9ses = (struct v9fs_session_info *)data;
- struct v9fs_fcall *rcall = NULL;
- struct v9fs_rpcreq *rptr;
- struct v9fs_rpcreq *req;
- struct v9fs_rpcreq *rreq;
- int err = 0;
+ if (tc->id == TVERSION)
+ n = V9FS_NOTAG;
+ else
+ n = v9fs_mux_get_tag(m);
- allow_signal(SIGKILL);
- set_current_state(TASK_INTERRUPTIBLE);
- complete(&v9ses->proccmpl);
- while (!kthread_should_stop() && err >= 0) {
- req = rptr = rreq = NULL;
-
- rcall = kmalloc(v9ses->maxdata + V9FS_IOHDRSZ, GFP_KERNEL);
- if (!rcall) {
- eprintk(KERN_ERR, "no memory for buffers\n");
- break;
- }
+ if (n < 0)
+ return ERR_PTR(-ENOMEM);
- err = read_message(v9ses, rcall, v9ses->maxdata + V9FS_IOHDRSZ);
- spin_lock(&v9ses->muxlock);
- if (err < 0) {
- list_for_each_entry_safe(rreq, rptr, &v9ses->mux_fcalls, next) {
- rreq->err = err;
- }
- if(err != -ERESTARTSYS)
- eprintk(KERN_ERR,
- "Transport error while reading message %d\n", err);
- } else {
- list_for_each_entry_safe(rreq, rptr, &v9ses->mux_fcalls, next) {
- if (rreq->tcall->tag == rcall->tag) {
- req = rreq;
- req->rcall = rcall;
- break;
- }
- }
- }
+ v9fs_set_tag(tc, n);
- if (req && (req->tcall->id == TFLUSH)) {
- struct v9fs_rpcreq *treq = NULL;
- list_for_each_entry_safe(treq, rptr, &v9ses->mux_fcalls, next) {
- if (treq->tcall->tag ==
- req->tcall->params.tflush.oldtag) {
- list_del(&rptr->next);
- kfree(treq->rcall);
- break;
- }
+ req->tag = n;
+ req->tcall = tc;
+ req->rcall = NULL;
+ req->err = 0;
+ req->cb = cb;
+ req->cba = cba;
+
+ spin_lock(&m->lock);
+ list_add_tail(&req->req_list, &m->unsent_req_list);
+ spin_unlock(&m->lock);
+
+ if (test_and_clear_bit(Wpending, &m->wsched))
+ n = POLLOUT;
+ else
+ n = m->trans->poll(m->trans, NULL);
+
+ if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
+ queue_work(v9fs_mux_wq, &m->wq);
+
+ return req;
+}
+
+static inline void
+v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc,
+ int err)
+{
+ v9fs_mux_req_callback cb;
+ int tag;
+ struct v9fs_mux_data *m;
+ struct v9fs_req *req, *rptr;
+
+ m = a;
+ dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc,
+ rc, err, tc->params.tflush.oldtag);
+
+ spin_lock(&m->lock);
+ cb = NULL;
+ tag = tc->params.tflush.oldtag;
+ list_for_each_entry_safe(req, rptr, &m->req_list, req_list) {
+ if (req->tag == tag) {
+ list_del(&req->req_list);
+ if (req->cb) {
+ cb = req->cb;
+ req->cb = NULL;
+ spin_unlock(&m->lock);
+ (*cb) (req->cba, req->tcall, req->rcall,
+ req->err);
}
+ kfree(req);
+ wake_up(&m->equeue);
+ break;
}
+ }
- spin_unlock(&v9ses->muxlock);
+ if (!cb)
+ spin_unlock(&m->lock);
- if (!req) {
- if (err >= 0)
- dprintk(DEBUG_ERROR,
- "unexpected response: id %d tag %d\n",
- rcall->id, rcall->tag);
+ v9fs_mux_put_tag(m, tag);
+ kfree(tc);
+ kfree(rc);
+}
- kfree(rcall);
- }
+static void
+v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
+{
+ struct v9fs_fcall *fc;
- wake_up_all(&v9ses->read_wait);
- set_current_state(TASK_INTERRUPTIBLE);
+ dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
+
+ fc = v9fs_create_tflush(req->tag);
+ v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
+}
+
+static void
+v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err)
+{
+ struct v9fs_mux_rpc *r;
+
+ if (err == ERREQFLUSH) {
+ dprintk(DEBUG_MUX, "err req flush\n");
+ return;
}
- v9ses->transport->close(v9ses->transport);
+ r = a;
+ dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req,
+ tc, rc, err);
+ r->rcall = rc;
+ r->err = err;
+ wake_up(&r->wqueue);
+}
- /* Inform all pending processes about the failure */
- wake_up_all(&v9ses->read_wait);
+/**
+ * v9fs_mux_rpc - sends 9P request and waits until a response is available.
+ * The function can be interrupted.
+ * @m: mux data
+ * @tc: request to be sent
+ * @rc: pointer where a pointer to the response is stored
+ */
+int
+v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
+ struct v9fs_fcall **rc)
+{
+ int err;
+ unsigned long flags;
+ struct v9fs_req *req;
+ struct v9fs_mux_rpc r;
+
+ r.err = 0;
+ r.rcall = NULL;
+ r.m = m;
+ init_waitqueue_head(&r.wqueue);
+
+ if (rc)
+ *rc = NULL;
+
+ req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ dprintk(DEBUG_MUX, "error %d\n", err);
+ return PTR_ERR(req);
+ }
- if (signal_pending(current))
- complete(&v9ses->proccmpl);
+ r.req = req;
+ dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc,
+ req->tag, &r, req);
+ err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
+ if (r.err < 0)
+ err = r.err;
+
+ if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
+ spin_lock(&m->lock);
+ req->tcall = NULL;
+ req->err = ERREQFLUSH;
+ spin_unlock(&m->lock);
+
+ clear_thread_flag(TIF_SIGPENDING);
+ v9fs_mux_flush_request(m, req);
+ spin_lock_irqsave(&current->sighand->siglock, flags);
+ recalc_sigpending();
+ spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ }
- dprintk(DEBUG_MUX, "recvproc: end\n");
- v9ses->recvproc = NULL;
+ if (!err) {
+ if (r.rcall)
+ dprintk(DEBUG_MUX, "got response id %d tag %d\n",
+ r.rcall->id, r.rcall->tag);
+
+ if (rc)
+ *rc = r.rcall;
+ else
+ kfree(r.rcall);
+ } else {
+ kfree(r.rcall);
+ dprintk(DEBUG_MUX, "got error %d\n", err);
+ if (err > 0)
+ err = -EIO;
+ }
- return err >= 0;
+ return err;
}
/**
- * v9fs_mux_init - initialize multiplexer (spawn kproc)
- * @v9ses: session info structure
- * @dev_name: mount device information (to create unique kproc)
- *
+ * v9fs_mux_rpcnb - sends 9P request without waiting for response.
+ * @m: mux data
+ * @tc: request to be sent
+ * @cb: callback function to be called when response arrives
+ * @cba: value to pass to the callback function
*/
+int v9fs_mux_rpcnb(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
+ v9fs_mux_req_callback cb, void *a)
+{
+ int err;
+ struct v9fs_req *req;
+
+ req = v9fs_send_request(m, tc, cb, a);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ dprintk(DEBUG_MUX, "error %d\n", err);
+ return PTR_ERR(req);
+ }
+
+ dprintk(DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
+ return 0;
+}
-int v9fs_mux_init(struct v9fs_session_info *v9ses, const char *dev_name)
+/**
+ * v9fs_mux_cancel - cancel all pending requests with error
+ * @m: mux data
+ * @err: error code
+ */
+void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
{
- char procname[60];
-
- strncpy(procname, dev_name, sizeof(procname));
- procname[sizeof(procname) - 1] = 0;
-
- init_waitqueue_head(&v9ses->read_wait);
- init_completion(&v9ses->fcread);
- init_completion(&v9ses->proccmpl);
- spin_lock_init(&v9ses->muxlock);
- INIT_LIST_HEAD(&v9ses->mux_fcalls);
- v9ses->recvproc = NULL;
- v9ses->curfcall = NULL;
-
- v9ses->recvproc = kthread_create(v9fs_recvproc, v9ses,
- "v9fs_recvproc %s", procname);
-
- if (IS_ERR(v9ses->recvproc)) {
- eprintk(KERN_ERR, "cannot create receiving thread\n");
- v9fs_session_close(v9ses);
- return -ECONNABORTED;
+ struct v9fs_req *req, *rtmp;
+ LIST_HEAD(cancel_list);
+
+ dprintk(DEBUG_MUX, "mux %p err %d\n", m, err);
+ m->err = err;
+ spin_lock(&m->lock);
+ list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
+ list_move(&req->req_list, &cancel_list);
}
+ spin_unlock(&m->lock);
- wake_up_process(v9ses->recvproc);
- wait_for_completion(&v9ses->proccmpl);
+ list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
+ list_del(&req->req_list);
+ if (!req->err)
+ req->err = err;
- return 0;
+ if (req->cb)
+ (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
+ else
+ kfree(req->rcall);
+
+ kfree(req);
+ }
+
+ wake_up(&m->equeue);
+}
+
+static u16 v9fs_mux_get_tag(struct v9fs_mux_data *m)
+{
+ int tag;
+
+ tag = v9fs_get_idpool(&m->tidpool);
+ if (tag < 0)
+ return V9FS_NOTAG;
+ else
+ return (u16) tag;
+}
+
+static void v9fs_mux_put_tag(struct v9fs_mux_data *m, u16 tag)
+{
+ if (tag != V9FS_NOTAG && v9fs_check_idpool(tag, &m->tidpool))
+ v9fs_put_idpool(tag, &m->tidpool);
}
diff --git a/fs/9p/mux.h b/fs/9p/mux.h
index 4994cb10bad..9473b84f24b 100644
--- a/fs/9p/mux.h
+++ b/fs/9p/mux.h
@@ -3,6 +3,7 @@
*
* Multiplexer Definitions
*
+ * Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -23,19 +24,35 @@
*
*/
-/* structure to manage each RPC transaction */
+struct v9fs_mux_data;
-struct v9fs_rpcreq {
- struct v9fs_fcall *tcall;
- struct v9fs_fcall *rcall;
- int err; /* error code if response failed */
+/**
+ * v9fs_mux_req_callback - callback function that is called when the
+ * response of a request is received. The callback is called from
+ * a workqueue and shouldn't block.
+ *
+ * @a - the pointer that was specified when the request was send to be
+ * passed to the callback
+ * @tc - request call
+ * @rc - response call
+ * @err - error code (non-zero if error occured)
+ */
+typedef void (*v9fs_mux_req_callback)(void *a, struct v9fs_fcall *tc,
+ struct v9fs_fcall *rc, int err);
+
+int v9fs_mux_global_init(void);
+void v9fs_mux_global_exit(void);
- /* XXX - could we put scatter/gather buffers here? */
+struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
+ unsigned char *extended);
+void v9fs_mux_destroy(struct v9fs_mux_data *);
- struct list_head next;
-};
+int v9fs_mux_send(struct v9fs_mux_data *m, struct v9fs_fcall *tc);
+struct v9fs_fcall *v9fs_mux_recv(struct v9fs_mux_data *m);
+int v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, struct v9fs_fcall **rc);
+int v9fs_mux_rpcnb(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
+ v9fs_mux_req_callback cb, void *a);
-int v9fs_mux_init(struct v9fs_session_info *v9ses, const char *dev_name);
-long v9fs_mux_rpc(struct v9fs_session_info *v9ses,
- struct v9fs_fcall *tcall, struct v9fs_fcall **rcall);
-void v9fs_mux_cancel_requests(struct v9fs_session_info *v9ses, int err);
+void v9fs_mux_flush(struct v9fs_mux_data *m, int sendflush);
+void v9fs_mux_cancel(struct v9fs_mux_data *m, int err);
+int v9fs_errstr2errno(char *errstr, int len);
diff --git a/fs/9p/trans_fd.c b/fs/9p/trans_fd.c
index 63b58ce98ff..1a28ef97a3d 100644
--- a/fs/9p/trans_fd.c
+++ b/fs/9p/trans_fd.c
@@ -3,6 +3,7 @@
*
* File Descriptor Transport Layer
*
+ * Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -106,9 +107,6 @@ v9fs_fd_init(struct v9fs_session_info *v9ses, const char *addr, char *data)
return -ENOPROTOOPT;
}
- sema_init(&trans->writelock, 1);
- sema_init(&trans->readlock, 1);
-
ts = kmalloc(sizeof(struct v9fs_trans_fd), GFP_KERNEL);
if (!ts)
@@ -148,12 +146,12 @@ static void v9fs_fd_close(struct v9fs_transport *trans)
if (!trans)
return;
- trans->status = Disconnected;
- ts = trans->priv;
+ ts = xchg(&trans->priv, NULL);
if (!ts)
return;
+ trans->status = Disconnected;
if (ts->in_file)
fput(ts->in_file);
@@ -163,10 +161,55 @@ static void v9fs_fd_close(struct v9fs_transport *trans)
kfree(ts);
}
+static unsigned int
+v9fs_fd_poll(struct v9fs_transport *trans, struct poll_table_struct *pt)
+{
+ int ret, n;
+ struct v9fs_trans_fd *ts;
+ mm_segment_t oldfs;
+
+ if (!trans)
+ return -EIO;
+
+ ts = trans->priv;
+ if (trans->status != Connected || !ts)
+ return -EIO;
+
+ oldfs = get_fs();
+ set_fs(get_ds());
+
+ if (!ts->in_file->f_op || !ts->in_file->f_op->poll) {
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = ts->in_file->f_op->poll(ts->in_file, pt);
+
+ if (ts->out_file != ts->in_file) {
+ if (!ts->out_file->f_op || !ts->out_file->f_op->poll) {
+ ret = -EIO;
+ goto end;
+ }
+
+ n = ts->out_file->f_op->poll(ts->out_file, pt);
+
+ ret &= ~POLLOUT;
+ n &= ~POLLIN;
+
+ ret |= n;
+ }
+
+end:
+ set_fs(oldfs);
+ return ret;
+}
+
+
struct v9fs_transport v9fs_trans_fd = {
.init = v9fs_fd_init,
.write = v9fs_fd_send,
.read = v9fs_fd_recv,
.close = v9fs_fd_close,
+ .poll = v9fs_fd_poll,
};
diff --git a/fs/9p/trans_sock.c b/fs/9p/trans_sock.c
index 6a9a75d40f7..44e830697ac 100644
--- a/fs/9p/trans_sock.c
+++ b/fs/9p/trans_sock.c
@@ -3,6 +3,7 @@
*
* Socket Transport Layer
*
+ * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
* Copyright (C) 1995, 1996 by Olaf Kirch <okir@monad.swb.de>
@@ -36,6 +37,7 @@
#include <asm/uaccess.h>
#include <linux/inet.h>
#include <linux/idr.h>
+#include <linux/file.h>
#include "debug.h"
#include "v9fs.h"
@@ -45,6 +47,7 @@
struct v9fs_trans_sock {
struct socket *s;
+ struct file *filp;
};
/**
@@ -57,41 +60,26 @@ struct v9fs_trans_sock {
static int v9fs_sock_recv(struct v9fs_transport *trans, void *v, int len)
{
- struct msghdr msg;
- struct kvec iov;
- int result;
- mm_segment_t oldfs;
- struct v9fs_trans_sock *ts = trans ? trans->priv : NULL;
+ int ret;
+ struct v9fs_trans_sock *ts;
- if (trans->status == Disconnected)
+ if (!trans || trans->status == Disconnected) {
+ dprintk(DEBUG_ERROR, "disconnected ...\n");
return -EREMOTEIO;
+ }
- result = -EINVAL;
-
- oldfs = get_fs();
- set_fs(get_ds());
-
- iov.iov_base = v;
- iov.iov_len = len;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_namelen = 0;
- msg.msg_flags = MSG_NOSIGNAL;
+ ts = trans->priv;
- result = kernel_recvmsg(ts->s, &msg, &iov, 1, len, 0);
+ if (!(ts->filp->f_flags & O_NONBLOCK))
+ dprintk(DEBUG_ERROR, "blocking read ...\n");
- dprintk(DEBUG_TRANS, "socket state %d\n", ts->s->state);
- set_fs(oldfs);
-
- if (result <= 0) {
- if (result != -ERESTARTSYS)
+ ret = kernel_read(ts->filp, ts->filp->f_pos, v, len);
+ if (ret <= 0) {
+ if (ret != -ERESTARTSYS && ret != -EAGAIN)
trans->status = Disconnected;
}
- return result;
+ return ret;
}
/**
@@ -104,40 +92,72 @@ static int v9fs_sock_recv(struct v9fs_transport *trans, void *v, int len)
static int v9fs_sock_send(struct v9fs_transport *trans, void *v, int len)
{
- struct kvec iov;
- struct msghdr msg;
- int result = -1;
+ int ret;
mm_segment_t oldfs;
- struct v9fs_trans_sock *ts = trans ? trans->priv : NULL;
+ struct v9fs_trans_sock *ts;
- dprintk(DEBUG_TRANS, "Sending packet size %d (%x)\n", len, len);
- dump_data(v, len);
+ if (!trans || trans->status == Disconnected) {
+ dprintk(DEBUG_ERROR, "disconnected ...\n");
+ return -EREMOTEIO;
+ }
+
+ ts = trans->priv;
+ if (!ts) {
+ dprintk(DEBUG_ERROR, "no transport ...\n");
+ return -EREMOTEIO;
+ }
- down(&trans->writelock);
+ if (!(ts->filp->f_flags & O_NONBLOCK))
+ dprintk(DEBUG_ERROR, "blocking write ...\n");
oldfs = get_fs();
set_fs(get_ds());
- iov.iov_base = v;
- iov.iov_len = len;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_namelen = 0;
- msg.msg_flags = MSG_NOSIGNAL;
- result = kernel_sendmsg(ts->s, &msg, &iov, 1, len);
+ ret = vfs_write(ts->filp, (void __user *)v, len, &ts->filp->f_pos);
set_fs(oldfs);
- if (result < 0) {
- if (result != -ERESTARTSYS)
+ if (ret < 0) {
+ if (ret != -ERESTARTSYS)
trans->status = Disconnected;
}
- up(&trans->writelock);
- return result;
+ return ret;
+}
+
+static unsigned int v9fs_sock_poll(struct v9fs_transport *trans,
+ struct poll_table_struct *pt) {
+
+ int ret;
+ struct v9fs_trans_sock *ts;
+ mm_segment_t oldfs;
+
+ if (!trans) {
+ dprintk(DEBUG_ERROR, "no transport\n");
+ return -EIO;
+ }
+
+ ts = trans->priv;
+ if (trans->status != Connected || !ts) {
+ dprintk(DEBUG_ERROR, "transport disconnected: %d\n", trans->status);
+ return -EIO;
+ }
+
+ oldfs = get_fs();
+ set_fs(get_ds());
+
+ if (!ts->filp->f_op || !ts->filp->f_op->poll) {
+ dprintk(DEBUG_ERROR, "no poll operation\n");
+ ret = -EIO;
+ goto end;
+ }
+
+ ret = ts->filp->f_op->poll(ts->filp, pt);
+
+end:
+ set_fs(oldfs);
+ return ret;
}
+
/**
* v9fs_tcp_init - initialize TCP socket
* @v9ses: session information
@@ -154,9 +174,9 @@ v9fs_tcp_init(struct v9fs_session_info *v9ses, const char *addr, char *data)
int rc = 0;
struct v9fs_trans_sock *ts = NULL;
struct v9fs_transport *trans = v9ses->transport;
+ int fd;
- sema_init(&trans->writelock, 1);
- sema_init(&trans->readlock, 1);
+ trans->status = Disconnected;
ts = kmalloc(sizeof(struct v9fs_trans_sock), GFP_KERNEL);
@@ -165,6 +185,7 @@ v9fs_tcp_init(struct v9fs_session_info *v9ses, const char *addr, char *data)
trans->priv = ts;
ts->s = NULL;
+ ts->filp = NULL;
if (!addr)
return -EINVAL;
@@ -185,7 +206,18 @@ v9fs_tcp_init(struct v9fs_session_info *v9ses, const char *addr, char *data)
return rc;
}
csocket->sk->sk_allocation = GFP_NOIO;
+
+ fd = sock_map_fd(csocket);
+ if (fd < 0) {
+ sock_release(csocket);
+ kfree(ts);
+ trans->priv = NULL;
+ return fd;
+ }
+
ts->s = csocket;
+ ts->filp = fget(fd);
+ ts->filp->f_flags |= O_NONBLOCK;
trans->status = Connected;
return 0;
@@ -203,7 +235,7 @@ static int
v9fs_unix_init(struct v9fs_session_info *v9ses, const char *dev_name,
char *data)
{
- int rc;
+ int rc, fd;
struct socket *csocket;
struct sockaddr_un sun_server;
struct v9fs_transport *trans;
@@ -213,6 +245,8 @@ v9fs_unix_init(struct v9fs_session_info *v9ses, const char *dev_name,
csocket = NULL;
trans = v9ses->transport;
+ trans->status = Disconnected;
+
if (strlen(dev_name) > UNIX_PATH_MAX) {
eprintk(KERN_ERR, "v9fs_trans_unix: address too long: %s\n",
dev_name);
@@ -225,9 +259,7 @@ v9fs_unix_init(struct v9fs_session_info *v9ses, const char *dev_name,
trans->priv = ts;
ts->s = NULL;
-
- sema_init(&trans->writelock, 1);
- sema_init(&trans->readlock, 1);
+ ts->filp = NULL;
sun_server.sun_family = PF_UNIX;
strcpy(sun_server.sun_path, dev_name);
@@ -241,7 +273,18 @@ v9fs_unix_init(struct v9fs_session_info *v9ses, const char *dev_name,
return rc;
}
csocket->sk->sk_allocation = GFP_NOIO;
+
+ fd = sock_map_fd(csocket);
+ if (fd < 0) {
+ sock_release(csocket);
+ kfree(ts);
+ trans->priv = NULL;
+ return fd;
+ }
+
ts->s = csocket;
+ ts->filp = fget(fd);
+ ts->filp->f_flags |= O_NONBLOCK;
trans->status = Connected;
return 0;
@@ -262,12 +305,11 @@ static void v9fs_sock_close(struct v9fs_transport *trans)
ts = trans->priv;
- if ((ts) && (ts->s)) {
- dprintk(DEBUG_TRANS, "closing the socket %p\n", ts->s);
- sock_release(ts->s);
+ if ((ts) && (ts->filp)) {
+ fput(ts->filp);
+ ts->filp = NULL;
ts->s = NULL;
trans->status = Disconnected;
- dprintk(DEBUG_TRANS, "socket closed\n");
}
kfree(ts);
@@ -280,6 +322,7 @@ struct v9fs_transport v9fs_trans_tcp = {
.write = v9fs_sock_send,
.read = v9fs_sock_recv,
.close = v9fs_sock_close,
+ .poll = v9fs_sock_poll,
};
struct v9fs_transport v9fs_trans_unix = {
@@ -287,4 +330,5 @@ struct v9fs_transport v9fs_trans_unix = {
.write = v9fs_sock_send,
.read = v9fs_sock_recv,
.close = v9fs_sock_close,
+ .poll = v9fs_sock_poll,
};
diff --git a/fs/9p/transport.h b/fs/9p/transport.h
index 9e9cd418efd..91fcdb94b36 100644
--- a/fs/9p/transport.h
+++ b/fs/9p/transport.h
@@ -3,6 +3,7 @@
*
* Transport Definition
*
+ * Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -31,14 +32,13 @@ enum v9fs_transport_status {
struct v9fs_transport {
enum v9fs_transport_status status;
- struct semaphore writelock;
- struct semaphore readlock;
void *priv;
int (*init) (struct v9fs_session_info *, const char *, char *);
int (*write) (struct v9fs_transport *, void *, int);
int (*read) (struct v9fs_transport *, void *, int);
void (*close) (struct v9fs_transport *);
+ unsigned int (*poll)(struct v9fs_transport *, struct poll_table_struct *);
};
extern struct v9fs_transport v9fs_trans_tcp;
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 418c3743fde..5250c428fc1 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -37,7 +37,6 @@
#include "v9fs_vfs.h"
#include "transport.h"
#include "mux.h"
-#include "conv.h"
/* TODO: sysfs or debugfs interface */
int v9fs_debug_level = 0; /* feature-rific global debug level */
@@ -213,7 +212,8 @@ retry:
return -1;
}
- error = idr_get_new(&p->pool, NULL, &i);
+ /* no need to store exactly p, we just need something non-null */
+ error = idr_get_new(&p->pool, p, &i);
up(&p->lock);
if (error == -EAGAIN)
@@ -243,6 +243,16 @@ void v9fs_put_idpool(int id, struct v9fs_idpool *p)
}
/**
+ * v9fs_check_idpool - check if the specified id is available
+ * @id - id to check
+ * @p - pool
+ */
+int v9fs_check_idpool(int id, struct v9fs_idpool *p)
+{
+ return idr_find(&p->pool, id) != NULL;
+}
+
+/**
* v9fs_session_init - initialize session
* @v9ses: session information structure
* @dev_name: device being mounted
@@ -259,6 +269,7 @@ v9fs_session_init(struct v9fs_session_info *v9ses,
int n = 0;
int newfid = -1;
int retval = -EINVAL;
+ struct v9fs_str *version;
v9ses->name = __getname();
if (!v9ses->name)
@@ -281,9 +292,6 @@ v9fs_session_init(struct v9fs_session_info *v9ses,
/* id pools that are session-dependent: FIDs and TIDs */
idr_init(&v9ses->fidpool.pool);
init_MUTEX(&v9ses->fidpool.lock);
- idr_init(&v9ses->tidpool.pool);
- init_MUTEX(&v9ses->tidpool.lock);
-
switch (v9ses->proto) {
case PROTO_TCP:
@@ -320,7 +328,12 @@ v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->shutdown = 0;
v9ses->session_hung = 0;
- if ((retval = v9fs_mux_init(v9ses, dev_name)) < 0) {
+ v9ses->mux = v9fs_mux_init(v9ses->transport, v9ses->maxdata + V9FS_IOHDRSZ,
+ &v9ses->extended);
+
+ if (IS_ERR(v9ses->mux)) {
+ retval = PTR_ERR(v9ses->mux);
+ v9ses->mux = NULL;
dprintk(DEBUG_ERROR, "problem initializing mux\n");
goto SessCleanUp;
}
@@ -339,13 +352,16 @@ v9fs_session_init(struct v9fs_session_info *v9ses,
goto FreeFcall;
}
- /* Really should check for 9P1 and report error */
- if (!strcmp(fcall->params.rversion.version, "9P2000.u")) {
+ version = &fcall->params.rversion.version;
+ if (version->len==8 && !memcmp(version->str, "9P2000.u", 8)) {
dprintk(DEBUG_9P, "9P2000 UNIX extensions enabled\n");
v9ses->extended = 1;
- } else {
+ } else if (version->len==6 && !memcmp(version->str, "9P2000", 6)) {
dprintk(DEBUG_9P, "9P2000 legacy mode enabled\n");
v9ses->extended = 0;
+ } else {
+ retval = -EREMOTEIO;
+ goto FreeFcall;
}
n = fcall->params.rversion.msize;
@@ -381,7 +397,7 @@ v9fs_session_init(struct v9fs_session_info *v9ses,
}
if (v9ses->afid != ~0) {
- if (v9fs_t_clunk(v9ses, v9ses->afid, NULL))
+ if (v9fs_t_clunk(v9ses, v9ses->afid))
dprintk(DEBUG_ERROR, "clunk failed\n");
}
@@ -403,13 +419,16 @@ v9fs_session_init(struct v9fs_session_info *v9ses,
void v9fs_session_close(struct v9fs_session_info *v9ses)
{
- if (v9ses->recvproc) {
- send_sig(SIGKILL, v9ses->recvproc, 1);
- wait_for_completion(&v9ses->proccmpl);
+ if (v9ses->mux) {
+ v9fs_mux_destroy(v9ses->mux);
+ v9ses->mux = NULL;
}
- if (v9ses->transport)
+ if (v9ses->transport) {
v9ses->transport->close(v9ses->transport);
+ kfree(v9ses->transport);
+ v9ses->transport = NULL;
+ }
__putname(v9ses->name);
__putname(v9ses->remotename);
@@ -420,8 +439,9 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
* and cancel all pending requests.
*/
void v9fs_session_cancel(struct v9fs_session_info *v9ses) {
+ dprintk(DEBUG_ERROR, "cancel session %p\n", v9ses);
v9ses->transport->status = Disconnected;
- v9fs_mux_cancel_requests(v9ses, -EIO);
+ v9fs_mux_cancel(v9ses->mux, -EIO);
}
extern int v9fs_error_init(void);
@@ -433,11 +453,17 @@ extern int v9fs_error_init(void);
static int __init init_v9fs(void)
{
+ int ret;
+
v9fs_error_init();
printk(KERN_INFO "Installing v9fs 9P2000 file system support\n");
- return register_filesystem(&v9fs_fs_type);
+ ret = v9fs_mux_global_init();
+ if (!ret)
+ ret = register_filesystem(&v9fs_fs_type);
+
+ return ret;
}
/**
@@ -447,6 +473,7 @@ static int __init init_v9fs(void)
static void __exit exit_v9fs(void)
{
+ v9fs_mux_global_exit();
unregister_filesystem(&v9fs_fs_type);
}
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 45dcef42bdd..f337da7a0ee 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -57,24 +57,14 @@ struct v9fs_session_info {
/* book keeping */
struct v9fs_idpool fidpool; /* The FID pool for file descriptors */
- struct v9fs_idpool tidpool; /* The TID pool for transactions ids */
- /* transport information */
struct v9fs_transport *transport;
+ struct v9fs_mux_data *mux;
int inprogress; /* session in progress => true */
int shutdown; /* session shutting down. no more attaches. */
unsigned char session_hung;
-
- /* mux private data */
- struct v9fs_fcall *curfcall;
- wait_queue_head_t read_wait;
- struct completion fcread;
- struct completion proccmpl;
- struct task_struct *recvproc;
-
- spinlock_t muxlock;
- struct list_head mux_fcalls;
+ struct dentry *debugfs_dir;
};
/* possible values of ->proto */
@@ -84,11 +74,14 @@ enum {
PROTO_FD,
};
+extern struct dentry *v9fs_debugfs_root;
+
int v9fs_session_init(struct v9fs_session_info *, const char *, char *);
struct v9fs_session_info *v9fs_inode2v9ses(struct inode *);
void v9fs_session_close(struct v9fs_session_info *v9ses);
int v9fs_get_idpool(struct v9fs_idpool *p);
void v9fs_put_idpool(int id, struct v9fs_idpool *p);
+int v9fs_check_idpool(int id, struct v9fs_idpool *p);
void v9fs_session_cancel(struct v9fs_session_info *v9ses);
#define V9FS_MAGIC 0x01021997
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 2f2cea7ee3e..c78502ad00e 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -45,9 +45,8 @@ extern struct dentry_operations v9fs_dentry_operations;
struct inode *v9fs_get_inode(struct super_block *sb, int mode);
ino_t v9fs_qid2ino(struct v9fs_qid *qid);
-void v9fs_mistat2inode(struct v9fs_stat *, struct inode *,
- struct super_block *);
+void v9fs_stat2inode(struct v9fs_stat *, struct inode *, struct super_block *);
int v9fs_dir_release(struct inode *inode, struct file *filp);
int v9fs_file_open(struct inode *inode, struct file *file);
-void v9fs_inode2mistat(struct inode *inode, struct v9fs_stat *mistat);
+void v9fs_inode2stat(struct inode *inode, struct v9fs_stat *stat);
void v9fs_dentry_release(struct dentry *);
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index a6aa947de0f..2dd806dac9f 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -40,7 +40,6 @@
#include "v9fs.h"
#include "9p.h"
#include "v9fs_vfs.h"
-#include "conv.h"
#include "fid.h"
/**
@@ -95,24 +94,22 @@ static int v9fs_dentry_validate(struct dentry *dentry, struct nameidata *nd)
void v9fs_dentry_release(struct dentry *dentry)
{
+ int err;
+
dprintk(DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry);
if (dentry->d_fsdata != NULL) {
struct list_head *fid_list = dentry->d_fsdata;
struct v9fs_fid *temp = NULL;
struct v9fs_fid *current_fid = NULL;
- struct v9fs_fcall *fcall = NULL;
list_for_each_entry_safe(current_fid, temp, fid_list, list) {
- if (v9fs_t_clunk
- (current_fid->v9ses, current_fid->fid, &fcall))
- dprintk(DEBUG_ERROR, "clunk failed: %s\n",
- FCALL_ERROR(fcall));
+ err = v9fs_t_clunk(current_fid->v9ses, current_fid->fid);
- v9fs_put_idpool(current_fid->fid,
- &current_fid->v9ses->fidpool);
+ if (err < 0)
+ dprintk(DEBUG_ERROR, "clunk failed: %d name %s\n",
+ err, dentry->d_iname);
- kfree(fcall);
v9fs_fid_destroy(current_fid);
}
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 57a43b8feef..ae6d032b9b5 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -37,8 +37,8 @@
#include "debug.h"
#include "v9fs.h"
#include "9p.h"
-#include "v9fs_vfs.h"
#include "conv.h"
+#include "v9fs_vfs.h"
#include "fid.h"
/**
@@ -74,20 +74,16 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct inode *inode = filp->f_dentry->d_inode;
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
struct v9fs_fid *file = filp->private_data;
- unsigned int i, n;
+ unsigned int i, n, s;
int fid = -1;
int ret = 0;
- struct v9fs_stat *mi = NULL;
+ struct v9fs_stat stat;
int over = 0;
dprintk(DEBUG_VFS, "name %s\n", filp->f_dentry->d_name.name);
fid = file->fid;
- mi = kmalloc(v9ses->maxdata, GFP_KERNEL);
- if (!mi)
- return -ENOMEM;
-
if (file->rdir_fcall && (filp->f_pos != file->rdir_pos)) {
kfree(file->rdir_fcall);
file->rdir_fcall = NULL;
@@ -97,20 +93,20 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
n = file->rdir_fcall->params.rread.count;
i = file->rdir_fpos;
while (i < n) {
- int s = v9fs_deserialize_stat(v9ses,
- file->rdir_fcall->params.rread.data + i,
- n - i, mi, v9ses->maxdata);
+ s = v9fs_deserialize_stat(
+ file->rdir_fcall->params.rread.data + i,
+ n - i, &stat, v9ses->extended);
if (s == 0) {
dprintk(DEBUG_ERROR,
- "error while deserializing mistat\n");
+ "error while deserializing stat\n");
ret = -EIO;
goto FreeStructs;
}
- over = filldir(dirent, mi->name, strlen(mi->name),
- filp->f_pos, v9fs_qid2ino(&mi->qid),
- dt_type(mi));
+ over = filldir(dirent, stat.name.str, stat.name.len,
+ filp->f_pos, v9fs_qid2ino(&stat.qid),
+ dt_type(&stat));
if (over) {
file->rdir_fpos = i;
@@ -130,7 +126,7 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
while (!over) {
ret = v9fs_t_read(v9ses, fid, filp->f_pos,
- v9ses->maxdata-V9FS_IOHDRSZ, &fcall);
+ v9ses->maxdata-V9FS_IOHDRSZ, &fcall);
if (ret < 0) {
dprintk(DEBUG_ERROR, "error while reading: %d: %p\n",
ret, fcall);
@@ -141,19 +137,18 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
n = ret;
i = 0;
while (i < n) {
- int s = v9fs_deserialize_stat(v9ses,
- fcall->params.rread.data + i, n - i, mi,
- v9ses->maxdata);
+ s = v9fs_deserialize_stat(fcall->params.rread.data + i,
+ n - i, &stat, v9ses->extended);
if (s == 0) {
dprintk(DEBUG_ERROR,
- "error while deserializing mistat\n");
+ "error while deserializing stat\n");
return -EIO;
}
- over = filldir(dirent, mi->name, strlen(mi->name),
- filp->f_pos, v9fs_qid2ino(&mi->qid),
- dt_type(mi));
+ over = filldir(dirent, stat.name.str, stat.name.len,
+ filp->f_pos, v9fs_qid2ino(&stat.qid),
+ dt_type(&stat));
if (over) {
file->rdir_fcall = fcall;
@@ -172,7 +167,6 @@ static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir)
FreeStructs:
kfree(fcall);
- kfree(mi);
return ret;
}
@@ -193,18 +187,15 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
fid->fid);
fidnum = fid->fid;
- filemap_fdatawrite(inode->i_mapping);
- filemap_fdatawait(inode->i_mapping);
+ filemap_write_and_wait(inode->i_mapping);
if (fidnum >= 0) {
dprintk(DEBUG_VFS, "fidopen: %d v9f->fid: %d\n", fid->fidopen,
fid->fid);
- if (v9fs_t_clunk(v9ses, fidnum, NULL))
+ if (v9fs_t_clunk(v9ses, fidnum))
dprintk(DEBUG_ERROR, "clunk failed\n");
- v9fs_put_idpool(fid->fid, &v9ses->fidpool);
-
kfree(fid->rdir_fcall);
kfree(fid);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 89c849da850..6852f0eb96e 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -32,6 +32,7 @@
#include <linux/string.h>
#include <linux/smp_lock.h>
#include <linux/inet.h>
+#include <linux/version.h>
#include <linux/list.h>
#include <asm/uaccess.h>
#include <linux/idr.h>
@@ -117,9 +118,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
result = v9fs_t_open(v9ses, newfid, open_mode, &fcall);
if (result < 0) {
- dprintk(DEBUG_ERROR,
- "open failed, open_mode 0x%x: %s\n", open_mode,
- FCALL_ERROR(fcall));
+ PRINT_FCALL_ERROR("open failed", fcall);
kfree(fcall);
return result;
}
@@ -165,8 +164,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
return -ENOLCK;
if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
- filemap_fdatawrite(inode->i_mapping);
- filemap_fdatawait(inode->i_mapping);
+ filemap_write_and_wait(inode->i_mapping);
invalidate_inode_pages(&inode->i_data);
}
@@ -257,7 +255,6 @@ v9fs_file_write(struct file *filp, const char __user * data,
int result = -EIO;
int rsize = 0;
int total = 0;
- char *buf;
dprintk(DEBUG_VFS, "data %p count %d offset %x\n", data, (int)count,
(int)*offset);
@@ -265,28 +262,14 @@ v9fs_file_write(struct file *filp, const char __user * data,
if (v9fid->iounit != 0 && rsize > v9fid->iounit)
rsize = v9fid->iounit;
- buf = kmalloc(v9ses->maxdata - V9FS_IOHDRSZ, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
do {
if (count < rsize)
rsize = count;
- result = copy_from_user(buf, data, rsize);
- if (result) {
- dprintk(DEBUG_ERROR, "Problem copying from user\n");
- kfree(buf);
- return -EFAULT;
- }
-
- dump_data(buf, rsize);
- result = v9fs_t_write(v9ses, fid, *offset, rsize, buf, &fcall);
+ result = v9fs_t_write(v9ses, fid, *offset, rsize, data, &fcall);
if (result < 0) {
- eprintk(KERN_ERR, "error while writing: %s(%d)\n",
- FCALL_ERROR(fcall), result);
+ PRINT_FCALL_ERROR("error while writing", fcall);
kfree(fcall);
- kfree(buf);
return result;
} else
*offset += result;
@@ -306,7 +289,6 @@ v9fs_file_write(struct file *filp, const char __user * data,
total += result;
} while (count);
- kfree(buf);
return total;
}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 0ea965c3bb7..a17b2885428 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -40,7 +40,6 @@
#include "v9fs.h"
#include "9p.h"
#include "v9fs_vfs.h"
-#include "conv.h"
#include "fid.h"
static struct inode_operations v9fs_dir_inode_operations;
@@ -127,100 +126,32 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
}
/**
- * v9fs_blank_mistat - helper function to setup a 9P stat structure
+ * v9fs_blank_wstat - helper function to setup a 9P stat structure
* @v9ses: 9P session info (for determining extended mode)
- * @mistat: structure to initialize
+ * @wstat: structure to initialize
*
*/
static void
-v9fs_blank_mistat(struct v9fs_session_info *v9ses, struct v9fs_stat *mistat)
+v9fs_blank_wstat(struct v9fs_wstat *wstat)
{
- mistat->type = ~0;
- mistat->dev = ~0;
- mistat->qid.type = ~0;
- mistat->qid.version = ~0;
- *((long long *)&mistat->qid.path) = ~0;
- mistat->mode = ~0;
- mistat->atime = ~0;
- mistat->mtime = ~0;
- mistat->length = ~0;
- mistat->name = mistat->data;
- mistat->uid = mistat->data;
- mistat->gid = mistat->data;
- mistat->muid = mistat->data;
- if (v9ses->extended) {
- mistat->n_uid = ~0;
- mistat->n_gid = ~0;
- mistat->n_muid = ~0;
- mistat->extension = mistat->data;
- }
- *mistat->data = 0;
-}
-
-/**
- * v9fs_mistat2unix - convert mistat to unix stat
- * @mistat: Plan 9 metadata (mistat) structure
- * @buf: unix metadata (stat) structure to populate
- * @sb: superblock
- *
- */
-
-static void
-v9fs_mistat2unix(struct v9fs_stat *mistat, struct stat *buf,
- struct super_block *sb)
-{
- struct v9fs_session_info *v9ses = sb ? sb->s_fs_info : NULL;
-
- buf->st_nlink = 1;
-
- buf->st_atime = mistat->atime;
- buf->st_mtime = mistat->mtime;
- buf->st_ctime = mistat->mtime;
-
- buf->st_uid = (unsigned short)-1;
- buf->st_gid = (unsigned short)-1;
-
- if (v9ses && v9ses->extended) {
- /* TODO: string to uid mapping via user-space daemon */
- if (mistat->n_uid != -1)
- sscanf(mistat->uid, "%x", (unsigned int *)&buf->st_uid);
-
- if (mistat->n_gid != -1)
- sscanf(mistat->gid, "%x", (unsigned int *)&buf->st_gid);
- }
-
- if (buf->st_uid == (unsigned short)-1)
- buf->st_uid = v9ses->uid;
- if (buf->st_gid == (unsigned short)-1)
- buf->st_gid = v9ses->gid;
-
- buf->st_mode = p9mode2unixmode(v9ses, mistat->mode);
- if ((S_ISBLK(buf->st_mode)) || (S_ISCHR(buf->st_mode))) {
- char type = 0;
- int major = -1;
- int minor = -1;
- sscanf(mistat->extension, "%c %u %u", &type, &major, &minor);
- switch (type) {
- case 'c':
- buf->st_mode &= ~S_IFBLK;
- buf->st_mode |= S_IFCHR;
- break;
- case 'b':
- break;
- default:
- dprintk(DEBUG_ERROR, "Unknown special type %c (%s)\n",
- type, mistat->extension);
- };
- buf->st_rdev = MKDEV(major, minor);
- } else
- buf->st_rdev = 0;
-
- buf->st_size = mistat->length;
-
- buf->st_blksize = sb->s_blocksize;
- buf->st_blocks =
- (buf->st_size + buf->st_blksize - 1) >> sb->s_blocksize_bits;
+ wstat->type = ~0;
+ wstat->dev = ~0;
+ wstat->qid.type = ~0;
+ wstat->qid.version = ~0;
+ *((long long *)&wstat->qid.path) = ~0;
+ wstat->mode = ~0;
+ wstat->atime = ~0;
+ wstat->mtime = ~0;
+ wstat->length = ~0;
+ wstat->name = NULL;
+ wstat->uid = NULL;
+ wstat->gid = NULL;
+ wstat->muid = NULL;
+ wstat->n_uid = ~0;
+ wstat->n_gid = ~0;
+ wstat->n_muid = ~0;
+ wstat->extension = NULL;
}
/**
@@ -312,12 +243,12 @@ v9fs_create(struct inode *dir,
struct inode *file_inode = NULL;
struct v9fs_fcall *fcall = NULL;
struct v9fs_qid qid;
- struct stat newstat;
int dirfidnum = -1;
long newfid = -1;
int result = 0;
unsigned int iounit = 0;
int wfidno = -1;
+ int err;
perm = unixmode2p9mode(v9ses, perm);
@@ -349,57 +280,64 @@ v9fs_create(struct inode *dir,
result = v9fs_t_walk(v9ses, dirfidnum, newfid, NULL, &fcall);
if (result < 0) {
- dprintk(DEBUG_ERROR, "clone error: %s\n", FCALL_ERROR(fcall));
+ PRINT_FCALL_ERROR("clone error", fcall);
v9fs_put_idpool(newfid, &v9ses->fidpool);
newfid = -1;
goto CleanUpFid;
}
kfree(fcall);
+ fcall = NULL;
result = v9fs_t_create(v9ses, newfid, (char *)file_dentry->d_name.name,
perm, open_mode, &fcall);
if (result < 0) {
- dprintk(DEBUG_ERROR, "create fails: %s(%d)\n",
- FCALL_ERROR(fcall), result);
-
+ PRINT_FCALL_ERROR("create fails", fcall);
goto CleanUpFid;
}
iounit = fcall->params.rcreate.iounit;
qid = fcall->params.rcreate.qid;
kfree(fcall);
+ fcall = NULL;
- fid = v9fs_fid_create(file_dentry, v9ses, newfid, 1);
- dprintk(DEBUG_VFS, "fid %p %d\n", fid, fid->fidcreate);
- if (!fid) {
- result = -ENOMEM;
- goto CleanUpFid;
- }
+ if (!(perm&V9FS_DMDIR)) {
+ fid = v9fs_fid_create(file_dentry, v9ses, newfid, 1);
+ dprintk(DEBUG_VFS, "fid %p %d\n", fid, fid->fidcreate);
+ if (!fid) {
+ result = -ENOMEM;
+ goto CleanUpFid;
+ }
- fid->qid = qid;
- fid->iounit = iounit;
+ fid->qid = qid;
+ fid->iounit = iounit;
+ } else {
+ err = v9fs_t_clunk(v9ses, newfid);
+ newfid = -1;
+ if (err < 0)
+ dprintk(DEBUG_ERROR, "clunk for mkdir failed: %d\n", err);
+ }
/* walk to the newly created file and put the fid in the dentry */
wfidno = v9fs_get_idpool(&v9ses->fidpool);
- if (newfid < 0) {
+ if (wfidno < 0) {
eprintk(KERN_WARNING, "no free fids available\n");
return -ENOSPC;
}
result = v9fs_t_walk(v9ses, dirfidnum, wfidno,
- (char *) file_dentry->d_name.name, NULL);
+ (char *) file_dentry->d_name.name, &fcall);
if (result < 0) {
- dprintk(DEBUG_ERROR, "clone error: %s\n", FCALL_ERROR(fcall));
+ PRINT_FCALL_ERROR("clone error", fcall);
v9fs_put_idpool(wfidno, &v9ses->fidpool);
wfidno = -1;
goto CleanUpFid;
}
+ kfree(fcall);
+ fcall = NULL;
if (!v9fs_fid_create(file_dentry, v9ses, wfidno, 0)) {
- if (!v9fs_t_clunk(v9ses, newfid, &fcall)) {
- v9fs_put_idpool(wfidno, &v9ses->fidpool);
- }
+ v9fs_put_idpool(wfidno, &v9ses->fidpool);
goto CleanUpFid;
}
@@ -409,62 +347,43 @@ v9fs_create(struct inode *dir,
(perm & V9FS_DMDEVICE))
return 0;
- result = v9fs_t_stat(v9ses, newfid, &fcall);
+ result = v9fs_t_stat(v9ses, wfidno, &fcall);
if (result < 0) {
- dprintk(DEBUG_ERROR, "stat error: %s(%d)\n", FCALL_ERROR(fcall),
- result);
+ PRINT_FCALL_ERROR("stat error", fcall);
goto CleanUpFid;
}
- v9fs_mistat2unix(fcall->params.rstat.stat, &newstat, sb);
- file_inode = v9fs_get_inode(sb, newstat.st_mode);
+ file_inode = v9fs_get_inode(sb,
+ p9mode2unixmode(v9ses, fcall->params.rstat.stat.mode));
+
if ((!file_inode) || IS_ERR(file_inode)) {
dprintk(DEBUG_ERROR, "create inode failed\n");
result = -EBADF;
goto CleanUpFid;
}
- v9fs_mistat2inode(fcall->params.rstat.stat, file_inode, sb);
+ v9fs_stat2inode(&fcall->params.rstat.stat, file_inode, sb);
kfree(fcall);
fcall = NULL;
file_dentry->d_op = &v9fs_dentry_operations;
d_instantiate(file_dentry, file_inode);
- if (perm & V9FS_DMDIR) {
- if (!v9fs_t_clunk(v9ses, newfid, &fcall))
- v9fs_put_idpool(newfid, &v9ses->fidpool);
- else
- dprintk(DEBUG_ERROR, "clunk for mkdir failed: %s\n",
- FCALL_ERROR(fcall));
- kfree(fcall);
- fid->fidopen = 0;
- fid->fidcreate = 0;
- d_drop(file_dentry);
- }
-
return 0;
CleanUpFid:
kfree(fcall);
+ fcall = NULL;
if (newfid >= 0) {
- if (!v9fs_t_clunk(v9ses, newfid, &fcall))
- v9fs_put_idpool(newfid, &v9ses->fidpool);
- else
- dprintk(DEBUG_ERROR, "clunk failed: %s\n",
- FCALL_ERROR(fcall));
-
- kfree(fcall);
+ err = v9fs_t_clunk(v9ses, newfid);
+ if (err < 0)
+ dprintk(DEBUG_ERROR, "clunk failed: %d\n", err);
}
if (wfidno >= 0) {
- if (!v9fs_t_clunk(v9ses, wfidno, &fcall))
- v9fs_put_idpool(wfidno, &v9ses->fidpool);
- else
- dprintk(DEBUG_ERROR, "clunk failed: %s\n",
- FCALL_ERROR(fcall));
-
- kfree(fcall);
+ err = v9fs_t_clunk(v9ses, wfidno);
+ if (err < 0)
+ dprintk(DEBUG_ERROR, "clunk failed: %d\n", err);
}
return result;
}
@@ -509,10 +428,9 @@ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir)
}
result = v9fs_t_remove(v9ses, fid, &fcall);
- if (result < 0)
- dprintk(DEBUG_ERROR, "remove of file fails: %s(%d)\n",
- FCALL_ERROR(fcall), result);
- else {
+ if (result < 0) {
+ PRINT_FCALL_ERROR("remove fails", fcall);
+ } else {
v9fs_put_idpool(fid, &v9ses->fidpool);
v9fs_fid_destroy(v9fid);
}
@@ -567,7 +485,6 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
struct v9fs_fid *fid;
struct inode *inode;
struct v9fs_fcall *fcall = NULL;
- struct stat newstat;
int dirfidnum = -1;
int newfid = -1;
int result = 0;
@@ -620,8 +537,8 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
goto FreeFcall;
}
- v9fs_mistat2unix(fcall->params.rstat.stat, &newstat, sb);
- inode = v9fs_get_inode(sb, newstat.st_mode);
+ inode = v9fs_get_inode(sb, p9mode2unixmode(v9ses,
+ fcall->params.rstat.stat.mode));
if (IS_ERR(inode) && (PTR_ERR(inode) == -ENOSPC)) {
eprintk(KERN_WARNING, "inode alloc failes, returns %ld\n",
@@ -631,7 +548,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
goto FreeFcall;
}
- inode->i_ino = v9fs_qid2ino(&fcall->params.rstat.stat->qid);
+ inode->i_ino = v9fs_qid2ino(&fcall->params.rstat.stat.qid);
fid = v9fs_fid_create(dentry, v9ses, newfid, 0);
if (fid == NULL) {
@@ -640,10 +557,10 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
goto FreeFcall;
}
- fid->qid = fcall->params.rstat.stat->qid;
+ fid->qid = fcall->params.rstat.stat.qid;
dentry->d_op = &v9fs_dentry_operations;
- v9fs_mistat2inode(fcall->params.rstat.stat, inode, inode->i_sb);
+ v9fs_stat2inode(&fcall->params.rstat.stat, inode, inode->i_sb);
d_add(dentry, inode);
kfree(fcall);
@@ -699,7 +616,7 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
v9fs_fid_lookup(old_dentry->d_parent);
struct v9fs_fid *newdirfid =
v9fs_fid_lookup(new_dentry->d_parent);
- struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL);
+ struct v9fs_wstat wstat;
struct v9fs_fcall *fcall = NULL;
int fid = -1;
int olddirfidnum = -1;
@@ -708,9 +625,6 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
dprintk(DEBUG_VFS, "\n");
- if (!mistat)
- return -ENOMEM;
-
if ((!oldfid) || (!olddirfid) || (!newdirfid)) {
dprintk(DEBUG_ERROR, "problem with arguments\n");
return -EBADF;
@@ -734,33 +648,22 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto FreeFcallnBail;
}
- v9fs_blank_mistat(v9ses, mistat);
+ v9fs_blank_wstat(&wstat);
+ wstat.muid = v9ses->name;
+ wstat.name = (char *) new_dentry->d_name.name;
- strcpy(mistat->data + 1, v9ses->name);
- mistat->name = mistat->data + 1 + strlen(v9ses->name);
-
- if (new_dentry->d_name.len >
- (v9ses->maxdata - strlen(v9ses->name) - sizeof(struct v9fs_stat))) {
- dprintk(DEBUG_ERROR, "new name too long\n");
- goto FreeFcallnBail;
- }
-
- strcpy(mistat->name, new_dentry->d_name.name);
- retval = v9fs_t_wstat(v9ses, fid, mistat, &fcall);
+ retval = v9fs_t_wstat(v9ses, fid, &wstat, &fcall);
FreeFcallnBail:
- kfree(mistat);
-
if (retval < 0)
- dprintk(DEBUG_ERROR, "v9fs_t_wstat error: %s\n",
- FCALL_ERROR(fcall));
+ PRINT_FCALL_ERROR("wstat error", fcall);
kfree(fcall);
return retval;
}
/**
- * v9fs_vfs_getattr - retreive file metadata
+ * v9fs_vfs_getattr - retrieve file metadata
* @mnt - mount information
* @dentry - file to get attributes on
* @stat - metadata structure to populate
@@ -788,7 +691,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
if (err < 0)
dprintk(DEBUG_ERROR, "stat error\n");
else {
- v9fs_mistat2inode(fcall->params.rstat.stat, dentry->d_inode,
+ v9fs_stat2inode(&fcall->params.rstat.stat, dentry->d_inode,
dentry->d_inode->i_sb);
generic_fillattr(dentry->d_inode, stat);
}
@@ -809,57 +712,44 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode);
struct v9fs_fid *fid = v9fs_fid_lookup(dentry);
struct v9fs_fcall *fcall = NULL;
- struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL);
+ struct v9fs_wstat wstat;
int res = -EPERM;
dprintk(DEBUG_VFS, "\n");
- if (!mistat)
- return -ENOMEM;
-
if (!fid) {
dprintk(DEBUG_ERROR,
"Couldn't find fid associated with dentry\n");
return -EBADF;
}
- v9fs_blank_mistat(v9ses, mistat);
+ v9fs_blank_wstat(&wstat);
if (iattr->ia_valid & ATTR_MODE)
- mistat->mode = unixmode2p9mode(v9ses, iattr->ia_mode);
+ wstat.mode = unixmode2p9mode(v9ses, iattr->ia_mode);
if (iattr->ia_valid & ATTR_MTIME)
- mistat->mtime = iattr->ia_mtime.tv_sec;
+ wstat.mtime = iattr->ia_mtime.tv_sec;
if (iattr->ia_valid & ATTR_ATIME)
- mistat->atime = iattr->ia_atime.tv_sec;
+ wstat.atime = iattr->ia_atime.tv_sec;
if (iattr->ia_valid & ATTR_SIZE)
- mistat->length = iattr->ia_size;
+ wstat.length = iattr->ia_size;
if (v9ses->extended) {
- char *ptr = mistat->data+1;
-
- if (iattr->ia_valid & ATTR_UID) {
- mistat->uid = ptr;
- ptr += 1+sprintf(ptr, "%08x", iattr->ia_uid);
- mistat->n_uid = iattr->ia_uid;
- }
+ if (iattr->ia_valid & ATTR_UID)
+ wstat.n_uid = iattr->ia_uid;
- if (iattr->ia_valid & ATTR_GID) {
- mistat->gid = ptr;
- ptr += 1+sprintf(ptr, "%08x", iattr->ia_gid);
- mistat->n_gid = iattr->ia_gid;
- }
+ if (iattr->ia_valid & ATTR_GID)
+ wstat.n_gid = iattr->ia_gid;
}
- res = v9fs_t_wstat(v9ses, fid->fid, mistat, &fcall);
+ res = v9fs_t_wstat(v9ses, fid->fid, &wstat, &fcall);
if (res < 0)
- dprintk(DEBUG_ERROR, "wstat error: %s\n", FCALL_ERROR(fcall));
+ PRINT_FCALL_ERROR("wstat error", fcall);
- kfree(mistat);
kfree(fcall);
-
if (res >= 0)
res = inode_setattr(dentry->d_inode, iattr);
@@ -867,51 +757,47 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
}
/**
- * v9fs_mistat2inode - populate an inode structure with mistat info
- * @mistat: Plan 9 metadata (mistat) structure
+ * v9fs_stat2inode - populate an inode structure with mistat info
+ * @stat: Plan 9 metadata (mistat) structure
* @inode: inode to populate
* @sb: superblock of filesystem
*
*/
void
-v9fs_mistat2inode(struct v9fs_stat *mistat, struct inode *inode,
- struct super_block *sb)
+v9fs_stat2inode(struct v9fs_stat *stat, struct inode *inode,
+ struct super_block *sb)
{
+ int n;
+ char ext[32];
struct v9fs_session_info *v9ses = sb->s_fs_info;
inode->i_nlink = 1;
- inode->i_atime.tv_sec = mistat->atime;
- inode->i_mtime.tv_sec = mistat->mtime;
- inode->i_ctime.tv_sec = mistat->mtime;
+ inode->i_atime.tv_sec = stat->atime;
+ inode->i_mtime.tv_sec = stat->mtime;
+ inode->i_ctime.tv_sec = stat->mtime;
- inode->i_uid = -1;
- inode->i_gid = -1;
+ inode->i_uid = v9ses->uid;
+ inode->i_gid = v9ses->gid;
if (v9ses->extended) {
- /* TODO: string to uid mapping via user-space daemon */
- inode->i_uid = mistat->n_uid;
- inode->i_gid = mistat->n_gid;
-
- if (mistat->n_uid == -1)
- sscanf(mistat->uid, "%x", &inode->i_uid);
-
- if (mistat->n_gid == -1)
- sscanf(mistat->gid, "%x", &inode->i_gid);
+ inode->i_uid = stat->n_uid;
+ inode->i_gid = stat->n_gid;
}
- if (inode->i_uid == -1)
- inode->i_uid = v9ses->uid;
- if (inode->i_gid == -1)
- inode->i_gid = v9ses->gid;
-
- inode->i_mode = p9mode2unixmode(v9ses, mistat->mode);
+ inode->i_mode = p9mode2unixmode(v9ses, stat->mode);
if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) {
char type = 0;
int major = -1;
int minor = -1;
- sscanf(mistat->extension, "%c %u %u", &type, &major, &minor);
+
+ n = stat->extension.len;
+ if (n > sizeof(ext)-1)
+ n = sizeof(ext)-1;
+ memmove(ext, stat->extension.str, n);
+ ext[n] = 0;
+ sscanf(ext, "%c %u %u", &type, &major, &minor);
switch (type) {
case 'c':
inode->i_mode &= ~S_IFBLK;
@@ -920,14 +806,14 @@ v9fs_mistat2inode(struct v9fs_stat *mistat, struct inode *inode,
case 'b':
break;
default:
- dprintk(DEBUG_ERROR, "Unknown special type %c (%s)\n",
- type, mistat->extension);
+ dprintk(DEBUG_ERROR, "Unknown special type %c (%.*s)\n",
+ type, stat->extension.len, stat->extension.str);
};
inode->i_rdev = MKDEV(major, minor);
} else
inode->i_rdev = 0;
- inode->i_size = mistat->length;
+ inode->i_size = stat->length;
inode->i_blksize = sb->s_blocksize;
inode->i_blocks =
@@ -955,71 +841,6 @@ ino_t v9fs_qid2ino(struct v9fs_qid *qid)
}
/**
- * v9fs_vfs_symlink - helper function to create symlinks
- * @dir: directory inode containing symlink
- * @dentry: dentry for symlink
- * @symname: symlink data
- *
- * See 9P2000.u RFC for more information
- *
- */
-
-static int
-v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
-{
- int retval = -EPERM;
- struct v9fs_fid *newfid;
- struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
- struct v9fs_fcall *fcall = NULL;
- struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL);
-
- dprintk(DEBUG_VFS, " %lu,%s,%s\n", dir->i_ino, dentry->d_name.name,
- symname);
-
- if (!mistat)
- return -ENOMEM;
-
- if (!v9ses->extended) {
- dprintk(DEBUG_ERROR, "not extended\n");
- goto FreeFcall;
- }
-
- /* issue a create */
- retval = v9fs_create(dir, dentry, S_IFLNK, 0);
- if (retval != 0)
- goto FreeFcall;
-
- newfid = v9fs_fid_lookup(dentry);
-
- /* issue a twstat */
- v9fs_blank_mistat(v9ses, mistat);
- strcpy(mistat->data + 1, symname);
- mistat->extension = mistat->data + 1;
- retval = v9fs_t_wstat(v9ses, newfid->fid, mistat, &fcall);
- if (retval < 0) {
- dprintk(DEBUG_ERROR, "v9fs_t_wstat error: %s\n",
- FCALL_ERROR(fcall));
- goto FreeFcall;
- }
-
- kfree(fcall);
-
- if (v9fs_t_clunk(v9ses, newfid->fid, &fcall)) {
- dprintk(DEBUG_ERROR, "clunk for symlink failed: %s\n",
- FCALL_ERROR(fcall));
- goto FreeFcall;
- }
-
- d_drop(dentry); /* FID - will this also clunk? */
-
- FreeFcall:
- kfree(mistat);
- kfree(fcall);
-
- return retval;
-}
-
-/**
* v9fs_readlink - read a symlink's location (internal version)
* @dentry: dentry for symlink
* @buffer: buffer to load symlink location into
@@ -1058,16 +879,17 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
if (!fcall)
return -EIO;
- if (!(fcall->params.rstat.stat->mode & V9FS_DMSYMLINK)) {
+ if (!(fcall->params.rstat.stat.mode & V9FS_DMSYMLINK)) {
retval = -EINVAL;
goto FreeFcall;
}
/* copy extension buffer into buffer */
- if (strlen(fcall->params.rstat.stat->extension) < buflen)
- buflen = strlen(fcall->params.rstat.stat->extension);
+ if (fcall->params.rstat.stat.extension.len < buflen)
+ buflen = fcall->params.rstat.stat.extension.len;
- memcpy(buffer, fcall->params.rstat.stat->extension, buflen + 1);
+ memcpy(buffer, fcall->params.rstat.stat.extension.str, buflen - 1);
+ buffer[buflen-1] = 0;
retval = buflen;
@@ -1157,6 +979,77 @@ static void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void
__putname(s);
}
+static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
+ int mode, const char *extension)
+{
+ int err, retval;
+ struct v9fs_session_info *v9ses;
+ struct v9fs_fcall *fcall;
+ struct v9fs_fid *fid;
+ struct v9fs_wstat wstat;
+
+ v9ses = v9fs_inode2v9ses(dir);
+ retval = -EPERM;
+ fcall = NULL;
+
+ if (!v9ses->extended) {
+ dprintk(DEBUG_ERROR, "not extended\n");
+ goto free_mem;
+ }
+
+ /* issue a create */
+ retval = v9fs_create(dir, dentry, mode, 0);
+ if (retval != 0)
+ goto free_mem;
+
+ fid = v9fs_fid_get_created(dentry);
+ if (!fid) {
+ dprintk(DEBUG_ERROR, "couldn't resolve fid from dentry\n");
+ goto free_mem;
+ }
+
+ /* issue a Twstat */
+ v9fs_blank_wstat(&wstat);
+ wstat.muid = v9ses->name;
+ wstat.extension = (char *) extension;
+ retval = v9fs_t_wstat(v9ses, fid->fid, &wstat, &fcall);
+ if (retval < 0) {
+ PRINT_FCALL_ERROR("wstat error", fcall);
+ goto free_mem;
+ }
+
+ err = v9fs_t_clunk(v9ses, fid->fid);
+ if (err < 0) {
+ dprintk(DEBUG_ERROR, "clunk failed: %d\n", err);
+ goto free_mem;
+ }
+
+ d_drop(dentry); /* FID - will this also clunk? */
+
+free_mem:
+ kfree(fcall);
+ return retval;
+}
+
+/**
+ * v9fs_vfs_symlink - helper function to create symlinks
+ * @dir: directory inode containing symlink
+ * @dentry: dentry for symlink
+ * @symname: symlink data
+ *
+ * See 9P2000.u RFC for more information
+ *
+ */
+
+static int
+v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+ dprintk(DEBUG_VFS, " %lu,%s,%s\n", dir->i_ino, dentry->d_name.name,
+ symname);
+
+ return v9fs_vfs_mkspecial(dir, dentry, S_IFLNK, symname);
+}
+
/**
* v9fs_vfs_link - create a hardlink
* @old_dentry: dentry for file to link to
@@ -1173,64 +1066,24 @@ static int
v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
- int retval = -EPERM;
- struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
- struct v9fs_fcall *fcall = NULL;
- struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL);
- struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry);
- struct v9fs_fid *newfid = NULL;
- char *symname = __getname();
+ int retval;
+ struct v9fs_fid *oldfid;
+ char *name;
dprintk(DEBUG_VFS, " %lu,%s,%s\n", dir->i_ino, dentry->d_name.name,
old_dentry->d_name.name);
- if (!v9ses->extended) {
- dprintk(DEBUG_ERROR, "not extended\n");
- goto FreeMem;
- }
-
- /* get fid of old_dentry */
- sprintf(symname, "hardlink(%d)\n", oldfid->fid);
-
- /* issue a create */
- retval = v9fs_create(dir, dentry, V9FS_DMLINK, 0);
- if (retval != 0)
- goto FreeMem;
-
- newfid = v9fs_fid_lookup(dentry);
- if (!newfid) {
- dprintk(DEBUG_ERROR, "couldn't resolve fid from dentry\n");
- goto FreeMem;
- }
-
- /* issue a twstat */
- v9fs_blank_mistat(v9ses, mistat);
- strcpy(mistat->data + 1, symname);
- mistat->extension = mistat->data + 1;
- retval = v9fs_t_wstat(v9ses, newfid->fid, mistat, &fcall);
- if (retval < 0) {
- dprintk(DEBUG_ERROR, "v9fs_t_wstat error: %s\n",
- FCALL_ERROR(fcall));
- goto FreeMem;
- }
-
- kfree(fcall);
-
- if (v9fs_t_clunk(v9ses, newfid->fid, &fcall)) {
- dprintk(DEBUG_ERROR, "clunk for symlink failed: %s\n",
- FCALL_ERROR(fcall));
- goto FreeMem;
+ oldfid = v9fs_fid_lookup(old_dentry);
+ if (!oldfid) {
+ dprintk(DEBUG_ERROR, "can't find oldfid\n");
+ return -EPERM;
}
- d_drop(dentry); /* FID - will this also clunk? */
-
- kfree(fcall);
- fcall = NULL;
+ name = __getname();
+ sprintf(name, "hardlink(%d)\n", oldfid->fid);
+ retval = v9fs_vfs_mkspecial(dir, dentry, V9FS_DMLINK, name);
+ __putname(name);
- FreeMem:
- kfree(mistat);
- kfree(fcall);
- __putname(symname);
return retval;
}
@@ -1246,82 +1099,30 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
static int
v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
{
- int retval = -EPERM;
- struct v9fs_fid *newfid;
- struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
- struct v9fs_fcall *fcall = NULL;
- struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL);
- char *symname = __getname();
+ int retval;
+ char *name;
dprintk(DEBUG_VFS, " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
dentry->d_name.name, mode, MAJOR(rdev), MINOR(rdev));
- if (!mistat)
- return -ENOMEM;
-
- if (!new_valid_dev(rdev)) {
- retval = -EINVAL;
- goto FreeMem;
- }
-
- if (!v9ses->extended) {
- dprintk(DEBUG_ERROR, "not extended\n");
- goto FreeMem;
- }
-
- /* issue a create */
- retval = v9fs_create(dir, dentry, mode, 0);
-
- if (retval != 0)
- goto FreeMem;
-
- newfid = v9fs_fid_lookup(dentry);
- if (!newfid) {
- dprintk(DEBUG_ERROR, "coudn't resove fid from dentry\n");
- retval = -EINVAL;
- goto FreeMem;
- }
+ if (!new_valid_dev(rdev))
+ return -EINVAL;
+ name = __getname();
/* build extension */
if (S_ISBLK(mode))
- sprintf(symname, "b %u %u", MAJOR(rdev), MINOR(rdev));
+ sprintf(name, "b %u %u", MAJOR(rdev), MINOR(rdev));
else if (S_ISCHR(mode))
- sprintf(symname, "c %u %u", MAJOR(rdev), MINOR(rdev));
+ sprintf(name, "c %u %u", MAJOR(rdev), MINOR(rdev));
else if (S_ISFIFO(mode))
- ; /* DO NOTHING */
+ *name = 0;
else {
- retval = -EINVAL;
- goto FreeMem;
- }
-
- if (!S_ISFIFO(mode)) {
- /* issue a twstat */
- v9fs_blank_mistat(v9ses, mistat);
- strcpy(mistat->data + 1, symname);
- mistat->extension = mistat->data + 1;
- retval = v9fs_t_wstat(v9ses, newfid->fid, mistat, &fcall);
- if (retval < 0) {
- dprintk(DEBUG_ERROR, "v9fs_t_wstat error: %s\n",
- FCALL_ERROR(fcall));
- goto FreeMem;
- }
+ __putname(name);
+ return -EINVAL;
}
- /* need to update dcache so we show up */
- kfree(fcall);
-
- if (v9fs_t_clunk(v9ses, newfid->fid, &fcall)) {
- dprintk(DEBUG_ERROR, "clunk for symlink failed: %s\n",
- FCALL_ERROR(fcall));
- goto FreeMem;
- }
-
- d_drop(dentry); /* FID - will this also clunk? */
-
- FreeMem:
- kfree(mistat);
- kfree(fcall);
- __putname(symname);
+ retval = v9fs_vfs_mkspecial(dir, dentry, mode, name);
+ __putname(name);
return retval;
}
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 82c5b008407..2c4fa75be02 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -44,7 +44,6 @@
#include "v9fs.h"
#include "9p.h"
#include "v9fs_vfs.h"
-#include "conv.h"
#include "fid.h"
static void v9fs_clear_inode(struct inode *);
@@ -92,7 +91,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
sb->s_op = &v9fs_super_ops;
sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC |
- MS_NODIRATIME | MS_NOATIME;
+ MS_NOATIME;
}
/**
@@ -123,12 +122,13 @@ static struct super_block *v9fs_get_sb(struct file_system_type
dprintk(DEBUG_VFS, " \n");
- v9ses = kcalloc(1, sizeof(struct v9fs_session_info), GFP_KERNEL);
+ v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL);
if (!v9ses)
return ERR_PTR(-ENOMEM);
if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) {
dprintk(DEBUG_ERROR, "problem initiating session\n");
+ kfree(v9ses);
return ERR_PTR(newfid);
}
@@ -157,7 +157,7 @@ static struct super_block *v9fs_get_sb(struct file_system_type
stat_result = v9fs_t_stat(v9ses, newfid, &fcall);
if (stat_result < 0) {
dprintk(DEBUG_ERROR, "stat error\n");
- v9fs_t_clunk(v9ses, newfid, NULL);
+ v9fs_t_clunk(v9ses, newfid);
v9fs_put_idpool(newfid, &v9ses->fidpool);
} else {
/* Setup the Root Inode */
@@ -167,10 +167,10 @@ static struct super_block *v9fs_get_sb(struct file_system_type
goto put_back_sb;
}
- root_fid->qid = fcall->params.rstat.stat->qid;
+ root_fid->qid = fcall->params.rstat.stat.qid;
root->d_inode->i_ino =
- v9fs_qid2ino(&fcall->params.rstat.stat->qid);
- v9fs_mistat2inode(fcall->params.rstat.stat, root->d_inode, sb);
+ v9fs_qid2ino(&fcall->params.rstat.stat.qid);
+ v9fs_stat2inode(&fcall->params.rstat.stat, root->d_inode, sb);
}
kfree(fcall);
diff --git a/fs/Kconfig b/fs/Kconfig
index d5255e627b5..ef78e3a42d3 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -70,6 +70,7 @@ config FS_XIP
config EXT3_FS
tristate "Ext3 journalling file system support"
+ select JBD
help
This is the journaling version of the Second extended file system
(often called ext3), the de facto standard Linux file system
@@ -138,23 +139,20 @@ config EXT3_FS_SECURITY
extended attributes for file security labels, say N.
config JBD
-# CONFIG_JBD could be its own option (even modular), but until there are
-# other users than ext3, we will simply make it be the same as CONFIG_EXT3_FS
-# dep_tristate ' Journal Block Device support (JBD for ext3)' CONFIG_JBD $CONFIG_EXT3_FS
tristate
- default EXT3_FS
help
This is a generic journaling layer for block devices. It is
- currently used by the ext3 file system, but it could also be used to
- add journal support to other file systems or block devices such as
- RAID or LVM.
+ currently used by the ext3 and OCFS2 file systems, but it could
+ also be used to add journal support to other file systems or block
+ devices such as RAID or LVM.
- If you are using the ext3 file system, you need to say Y here. If
- you are not using ext3 then you will probably want to say N.
+ If you are using the ext3 or OCFS2 file systems, you need to
+ say Y here. If you are not using ext3 OCFS2 then you will probably
+ want to say N.
To compile this device as a module, choose M here: the module will be
- called jbd. If you are compiling ext3 into the kernel, you cannot
- compile this code as a module.
+ called jbd. If you are compiling ext3 or OCFS2 into the kernel,
+ you cannot compile this code as a module.
config JBD_DEBUG
bool "JBD (ext3) debugging support"
@@ -326,6 +324,38 @@ config FS_POSIX_ACL
source "fs/xfs/Kconfig"
+config OCFS2_FS
+ tristate "OCFS2 file system support (EXPERIMENTAL)"
+ depends on NET && EXPERIMENTAL
+ select CONFIGFS_FS
+ select JBD
+ select CRC32
+ select INET
+ help
+ OCFS2 is a general purpose extent based shared disk cluster file
+ system with many similarities to ext3. It supports 64 bit inode
+ numbers, and has automatically extending metadata groups which may
+ also make it attractive for non-clustered use.
+
+ You'll want to install the ocfs2-tools package in order to at least
+ get "mount.ocfs2".
+
+ Project web page: http://oss.oracle.com/projects/ocfs2
+ Tools web page: http://oss.oracle.com/projects/ocfs2-tools
+ OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
+
+ Note: Features which OCFS2 does not support yet:
+ - extended attributes
+ - shared writeable mmap
+ - loopback is supported, but data written will not
+ be cluster coherent.
+ - quotas
+ - cluster aware flock
+ - Directory change notification (F_NOTIFY)
+ - Distributed Caching (F_SETLEASE/F_GETLEASE/break_lease)
+ - POSIX ACLs
+ - readpages / writepages (not user visible)
+
config MINIX_FS
tristate "Minix fs support"
help
@@ -768,7 +798,7 @@ config PROC_KCORE
config PROC_VMCORE
bool "/proc/vmcore support (EXPERIMENTAL)"
- depends on PROC_FS && EMBEDDED && EXPERIMENTAL && CRASH_DUMP
+ depends on PROC_FS && EXPERIMENTAL && CRASH_DUMP
help
Exports the dump image of crashed kernel in ELF format.
@@ -841,6 +871,20 @@ config RELAYFS_FS
If unsure, say N.
+config CONFIGFS_FS
+ tristate "Userspace-driven configuration filesystem (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ configfs is a ram-based filesystem that provides the converse
+ of sysfs's functionality. Where sysfs is a filesystem-based
+ view of kernel objects, configfs is a filesystem-based manager
+ of kernel objects, or config_items.
+
+ Both sysfs and configfs can and should exist together on the
+ same system. One is not a replacement for the other.
+
+ If unsure, say N.
+
endmenu
menu "Miscellaneous filesystems"
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 175b2e8177c..f3d3d81eb7e 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -1,6 +1,6 @@
config BINFMT_ELF
bool "Kernel support for ELF binaries"
- depends on MMU
+ depends on MMU && (BROKEN || !FRV)
default y
---help---
ELF (Executable and Linkable Format) is a format for libraries and
diff --git a/fs/Makefile b/fs/Makefile
index 4c265575907..1db711319c8 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -10,11 +10,11 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \
ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \
attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \
seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \
- ioprio.o pnode.o
+ ioprio.o pnode.o drop_caches.o
obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_EPOLL) += eventpoll.o
-obj-$(CONFIG_COMPAT) += compat.o
+obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
nfsd-$(CONFIG_NFSD) := nfsctl.o
obj-y += $(nfsd-y) $(nfsd-m)
@@ -101,3 +101,5 @@ obj-$(CONFIG_BEFS_FS) += befs/
obj-$(CONFIG_HOSTFS) += hostfs/
obj-$(CONFIG_HPPFS) += hppfs/
obj-$(CONFIG_DEBUG_FS) += debugfs/
+obj-$(CONFIG_CONFIGFS_FS) += configfs/
+obj-$(CONFIG_OCFS2_FS) += ocfs2/
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 9ebe881c678..44d439cb69f 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -244,10 +244,10 @@ affs_put_inode(struct inode *inode)
pr_debug("AFFS: put_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink);
affs_free_prealloc(inode);
if (atomic_read(&inode->i_count) == 1) {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (inode->i_size != AFFS_I(inode)->mmu_private)
affs_truncate(inode);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
}
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 0a57fd7c726..9eef6bf156a 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -118,7 +118,7 @@ static int kafscmd(void *arg)
_SRXAFSCM_xxxx_t func;
int die;
- printk("kAFS: Started kafscmd %d\n", current->pid);
+ printk(KERN_INFO "kAFS: Started kafscmd %d\n", current->pid);
daemonize("kafscmd");
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 6682d6d7f29..5c61c24dab2 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -137,7 +137,7 @@ static inline void afs_dir_check_page(struct inode *dir, struct page *page)
#endif
/* determine how many magic numbers there should be in this page */
- latter = dir->i_size - (page->index << PAGE_CACHE_SHIFT);
+ latter = dir->i_size - page_offset(page);
if (latter >= PAGE_SIZE)
qty = PAGE_SIZE;
else
diff --git a/fs/afs/volume.h b/fs/afs/volume.h
index 1e691889c4c..bfdcf19ba3f 100644
--- a/fs/afs/volume.h
+++ b/fs/afs/volume.h
@@ -18,8 +18,6 @@
#include "kafsasyncd.h"
#include "cache.h"
-#define __packed __attribute__((packed))
-
typedef enum {
AFS_VLUPD_SLEEP, /* sleeping waiting for update timer to fire */
AFS_VLUPD_PENDING, /* on pending queue */
@@ -115,7 +113,7 @@ struct afs_volume
struct cachefs_cookie *cache; /* caching cookie */
#endif
afs_volid_t vid; /* volume ID */
- afs_voltype_t __packed type; /* type of volume */
+ afs_voltype_t type; /* type of volume */
char type_force; /* force volume type (suppress R/O -> R/W) */
unsigned short nservers; /* number of server slots filled */
unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */
diff --git a/fs/aio.c b/fs/aio.c
index 5a28b69ad22..aec2b1916d1 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -29,7 +29,6 @@
#include <linux/highmem.h>
#include <linux/workqueue.h>
#include <linux/security.h>
-#include <linux/rcuref.h>
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
@@ -514,7 +513,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
/* Must be done under the lock to serialise against cancellation.
* Call this aio_fput as it duplicates fput via the fput_work.
*/
- if (unlikely(rcuref_dec_and_test(&req->ki_filp->f_count))) {
+ if (unlikely(atomic_dec_and_test(&req->ki_filp->f_count))) {
get_ioctx(ctx);
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
diff --git a/fs/attr.c b/fs/attr.c
index 67bcd9b14ea..97de9467087 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -10,11 +10,11 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/smp_lock.h>
+#include <linux/capability.h>
#include <linux/fsnotify.h>
#include <linux/fcntl.h>
#include <linux/quotaops.h>
#include <linux/security.h>
-#include <linux/time.h>
/* Taken over from the old code... */
@@ -67,20 +67,12 @@ EXPORT_SYMBOL(inode_change_ok);
int inode_setattr(struct inode * inode, struct iattr * attr)
{
unsigned int ia_valid = attr->ia_valid;
- int error = 0;
-
- if (ia_valid & ATTR_SIZE) {
- if (attr->ia_size != i_size_read(inode)) {
- error = vmtruncate(inode, attr->ia_size);
- if (error || (ia_valid == ATTR_SIZE))
- goto out;
- } else {
- /*
- * We skipped the truncate but must still update
- * timestamps
- */
- ia_valid |= ATTR_MTIME|ATTR_CTIME;
- }
+
+ if (ia_valid & ATTR_SIZE &&
+ attr->ia_size != i_size_read(inode)) {
+ int error = vmtruncate(inode, attr->ia_size);
+ if (error)
+ return error;
}
if (ia_valid & ATTR_UID)
@@ -104,8 +96,8 @@ int inode_setattr(struct inode * inode, struct iattr * attr)
inode->i_mode = mode;
}
mark_inode_dirty(inode);
-out:
- return error;
+
+ return 0;
}
EXPORT_SYMBOL(inode_setattr);
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index a1ab1c0ed21..870e2cf3301 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -10,6 +10,7 @@
*
* ------------------------------------------------------------------------- */
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/stat.h>
#include <linux/param.h>
@@ -229,9 +230,9 @@ static struct dentry *autofs_root_lookup(struct inode *dir, struct dentry *dentr
dentry->d_flags |= DCACHE_AUTOFS_PENDING;
d_add(dentry, NULL);
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
autofs_revalidate(dentry, nd);
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
/*
* If we are still pending, check if we had to handle
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index fca83e28edc..385bed09b0d 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -209,7 +209,7 @@ static inline int simple_empty_nolock(struct dentry *dentry)
struct dentry *child;
int ret = 0;
- list_for_each_entry(child, &dentry->d_subdirs, d_child)
+ list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child)
if (simple_positive(child))
goto out;
ret = 1;
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index feb6ac427d0..dc39589df16 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -105,7 +105,7 @@ repeat:
next = this_parent->d_subdirs.next;
resume:
while (next != &this_parent->d_subdirs) {
- struct dentry *dentry = list_entry(next, struct dentry, d_child);
+ struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
/* Negative dentry - give up */
if (!simple_positive(dentry)) {
@@ -138,7 +138,7 @@ resume:
}
if (this_parent != top) {
- next = this_parent->d_child.next;
+ next = this_parent->d_u.d_child.next;
this_parent = this_parent->d_parent;
goto resume;
}
@@ -163,7 +163,7 @@ repeat:
next = this_parent->d_subdirs.next;
resume:
while (next != &this_parent->d_subdirs) {
- struct dentry *dentry = list_entry(next, struct dentry, d_child);
+ struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
/* Negative dentry - give up */
if (!simple_positive(dentry)) {
@@ -199,7 +199,7 @@ cont:
}
if (this_parent != parent) {
- next = this_parent->d_child.next;
+ next = this_parent->d_u.d_child.next;
this_parent = this_parent->d_parent;
goto resume;
}
@@ -238,7 +238,7 @@ static struct dentry *autofs4_expire(struct super_block *sb,
/* On exit from the loop expire is set to a dgot dentry
* to expire or it's NULL */
while ( next != &root->d_subdirs ) {
- struct dentry *dentry = list_entry(next, struct dentry, d_child);
+ struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
/* Negative dentry - give up */
if ( !simple_positive(dentry) ) {
@@ -302,7 +302,7 @@ next:
expired, (int)expired->d_name.len, expired->d_name.name);
spin_lock(&dcache_lock);
list_del(&expired->d_parent->d_subdirs);
- list_add(&expired->d_parent->d_subdirs, &expired->d_child);
+ list_add(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
spin_unlock(&dcache_lock);
return expired;
}
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 818b37be515..2d3082854a2 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -91,7 +91,7 @@ repeat:
next = this_parent->d_subdirs.next;
resume:
while (next != &this_parent->d_subdirs) {
- struct dentry *dentry = list_entry(next, struct dentry, d_child);
+ struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child);
/* Negative dentry - don`t care */
if (!simple_positive(dentry)) {
@@ -117,7 +117,7 @@ resume:
if (this_parent != sbi->root) {
struct dentry *dentry = this_parent;
- next = this_parent->d_child.next;
+ next = this_parent->d_u.d_child.next;
this_parent = this_parent->d_parent;
spin_unlock(&dcache_lock);
DPRINTK("parent dentry %p %.*s",
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 2a771ec6695..e93a7ae467c 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -12,6 +12,7 @@
*
* ------------------------------------------------------------------------- */
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/stat.h>
#include <linux/param.h>
@@ -86,7 +87,7 @@ static int autofs4_root_readdir(struct file *file, void *dirent,
/* Update usage from here to top of tree, so that scan of
top-level directories will give a useful result */
-static void autofs4_update_usage(struct dentry *dentry)
+static void autofs4_update_usage(struct vfsmount *mnt, struct dentry *dentry)
{
struct dentry *top = dentry->d_sb->s_root;
@@ -95,7 +96,7 @@ static void autofs4_update_usage(struct dentry *dentry)
struct autofs_info *ino = autofs4_dentry_ino(dentry);
if (ino) {
- update_atime(dentry->d_inode);
+ touch_atime(mnt, dentry);
ino->last_used = jiffies;
}
}
@@ -143,7 +144,8 @@ static int autofs4_dcache_readdir(struct file * filp, void * dirent, filldir_t f
}
while(1) {
- struct dentry *de = list_entry(list, struct dentry, d_child);
+ struct dentry *de = list_entry(list,
+ struct dentry, d_u.d_child);
if (!d_unhashed(de) && de->d_inode) {
spin_unlock(&dcache_lock);
@@ -288,10 +290,10 @@ out:
return autofs4_dcache_readdir(file, dirent, filldir);
}
-static int try_to_fill_dentry(struct dentry *dentry,
- struct super_block *sb,
- struct autofs_sb_info *sbi, int flags)
+static int try_to_fill_dentry(struct vfsmount *mnt, struct dentry *dentry, int flags)
{
+ struct super_block *sb = mnt->mnt_sb;
+ struct autofs_sb_info *sbi = autofs4_sbi(sb);
struct autofs_info *de_info = autofs4_dentry_ino(dentry);
int status = 0;
@@ -366,7 +368,7 @@ static int try_to_fill_dentry(struct dentry *dentry,
/* We don't update the usages for the autofs daemon itself, this
is necessary for recursive autofs mounts */
if (!autofs4_oz_mode(sbi))
- autofs4_update_usage(dentry);
+ autofs4_update_usage(mnt, dentry);
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
@@ -391,7 +393,7 @@ static int autofs4_revalidate(struct dentry * dentry, struct nameidata *nd)
/* Pending dentry */
if (autofs4_ispending(dentry)) {
if (!oz_mode)
- status = try_to_fill_dentry(dentry, dir->i_sb, sbi, flags);
+ status = try_to_fill_dentry(nd->mnt, dentry, flags);
return status;
}
@@ -408,14 +410,14 @@ static int autofs4_revalidate(struct dentry * dentry, struct nameidata *nd)
dentry, dentry->d_name.len, dentry->d_name.name);
spin_unlock(&dcache_lock);
if (!oz_mode)
- status = try_to_fill_dentry(dentry, dir->i_sb, sbi, flags);
+ status = try_to_fill_dentry(nd->mnt, dentry, flags);
return status;
}
spin_unlock(&dcache_lock);
/* Update the usage list */
if (!oz_mode)
- autofs4_update_usage(dentry);
+ autofs4_update_usage(nd->mnt, dentry);
return 1;
}
@@ -488,9 +490,9 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
d_add(dentry, NULL);
if (dentry->d_op && dentry->d_op->d_revalidate) {
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
(dentry->d_op->d_revalidate)(dentry, nd);
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
}
/*
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 72011826f0c..f312103434d 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -33,8 +33,6 @@ static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
static int load_aout_library(struct file*);
static int aout_core_dump(long signr, struct pt_regs * regs, struct file *file);
-extern void dump_thread(struct pt_regs *, struct user *);
-
static struct linux_binfmt aout_format = {
.module = THIS_MODULE,
.load_binary = load_aout_binary,
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index f36f2210204..f979ebbce49 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -58,7 +58,7 @@ extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
* If we don't support core dumping, then supply a NULL so we
* don't even try.
*/
-#ifdef USE_ELF_CORE_DUMP
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
#else
#define elf_core_dump NULL
@@ -288,11 +288,17 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
struct elf_phdr *eppnt, int prot, int type)
{
unsigned long map_addr;
+ unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
down_write(&current->mm->mmap_sem);
- map_addr = do_mmap(filep, ELF_PAGESTART(addr),
- eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type,
- eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
+ /* mmap() will return -EINVAL if given a zero size, but a
+ * segment with zero filesize is perfectly valid */
+ if (eppnt->p_filesz + pageoffset)
+ map_addr = do_mmap(filep, ELF_PAGESTART(addr),
+ eppnt->p_filesz + pageoffset, prot, type,
+ eppnt->p_offset - pageoffset);
+ else
+ map_addr = ELF_PAGESTART(addr);
up_write(&current->mm->mmap_sem);
return(map_addr);
}
@@ -616,7 +622,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
goto out_free_file;
retval = -ENOMEM;
- elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
+ elf_interpreter = kmalloc(elf_ppnt->p_filesz,
GFP_KERNEL);
if (!elf_interpreter)
goto out_free_file;
@@ -1107,7 +1113,7 @@ out:
* Note that some platforms still use traditional core dumps and not
* the ELF core dump. Each platform can select it as appropriate.
*/
-#ifdef USE_ELF_CORE_DUMP
+#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
/*
* ELF core dumper
@@ -1628,17 +1634,17 @@ static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
ELF_CORE_WRITE_EXTRA_DATA;
#endif
- if ((off_t) file->f_pos != offset) {
+ if ((off_t)file->f_pos != offset) {
/* Sanity check */
- printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
- (off_t) file->f_pos, offset);
+ printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
+ (off_t)file->f_pos, offset);
}
end_coredump:
set_fs(fs);
cleanup:
- while(!list_empty(&thread_list)) {
+ while (!list_empty(&thread_list)) {
struct list_head *tmp = thread_list.next;
list_del(tmp);
kfree(list_entry(tmp, struct elf_thread_status, list));
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index e0344f69c79..5b3076e8ee9 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -187,7 +187,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, struct pt_regs *regs
goto error;
/* read the name of the interpreter into memory */
- interpreter_name = (char *) kmalloc(phdr->p_filesz, GFP_KERNEL);
+ interpreter_name = kmalloc(phdr->p_filesz, GFP_KERNEL);
if (!interpreter_name)
goto error;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 9d6625829b9..108d56bbd0d 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -77,8 +77,6 @@ static int load_flat_shared_library(int id, struct lib_info *p);
static int load_flat_binary(struct linux_binprm *, struct pt_regs * regs);
static int flat_core_dump(long signr, struct pt_regs * regs, struct file *file);
-extern void dump_thread(struct pt_regs *, struct user *);
-
static struct linux_binfmt flat_format = {
.module = THIS_MODULE,
.load_binary = load_flat_binary,
@@ -444,19 +442,22 @@ static int load_flat_file(struct linux_binprm * bprm,
flags = ntohl(hdr->flags);
rev = ntohl(hdr->rev);
- if (flags & FLAT_FLAG_KTRACE)
- printk("BINFMT_FLAT: Loading file: %s\n", bprm->filename);
-
- if (strncmp(hdr->magic, "bFLT", 4) ||
- (rev != FLAT_VERSION && rev != OLD_FLAT_VERSION)) {
+ if (strncmp(hdr->magic, "bFLT", 4)) {
/*
* because a lot of people do not manage to produce good
* flat binaries, we leave this printk to help them realise
* the problem. We only print the error if its not a script file
*/
if (strncmp(hdr->magic, "#!", 2))
- printk("BINFMT_FLAT: bad magic/rev (0x%x, need 0x%x)\n",
- rev, (int) FLAT_VERSION);
+ printk("BINFMT_FLAT: bad header magic\n");
+ return -ENOEXEC;
+ }
+
+ if (flags & FLAT_FLAG_KTRACE)
+ printk("BINFMT_FLAT: Loading file: %s\n", bprm->filename);
+
+ if (rev != FLAT_VERSION && rev != OLD_FLAT_VERSION) {
+ printk("BINFMT_FLAT: bad flat file version 0x%x (supported 0x%x and 0x%x)\n", rev, FLAT_VERSION, OLD_FLAT_VERSION);
return -ENOEXEC;
}
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 2568eb41cb3..9ccc7d8275b 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -588,11 +588,11 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
case 2: set_bit(Enabled, &e->flags);
break;
case 3: root = dget(file->f_vfsmnt->mnt_sb->s_root);
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
kill_node(e);
- up(&root->d_inode->i_sem);
+ mutex_unlock(&root->d_inode->i_mutex);
dput(root);
break;
default: return res;
@@ -622,7 +622,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
return PTR_ERR(e);
root = dget(sb->s_root);
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
dentry = lookup_one_len(e->name, root, strlen(e->name));
err = PTR_ERR(dentry);
if (IS_ERR(dentry))
@@ -658,7 +658,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
out2:
dput(dentry);
out:
- up(&root->d_inode->i_sem);
+ mutex_unlock(&root->d_inode->i_mutex);
dput(root);
if (err) {
@@ -703,12 +703,12 @@ static ssize_t bm_status_write(struct file * file, const char __user * buffer,
case 1: enabled = 0; break;
case 2: enabled = 1; break;
case 3: root = dget(file->f_vfsmnt->mnt_sb->s_root);
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
while (!list_empty(&entries))
kill_node(list_entry(entries.next, Node, list));
- up(&root->d_inode->i_sem);
+ mutex_unlock(&root->d_inode->i_mutex);
dput(root);
default: return res;
}
diff --git a/fs/bio.c b/fs/bio.c
index 38d3e8023a0..7b306958995 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -126,6 +126,7 @@ static void bio_fs_destructor(struct bio *bio)
inline void bio_init(struct bio *bio)
{
bio->bi_next = NULL;
+ bio->bi_bdev = NULL;
bio->bi_flags = 1 << BIO_UPTODATE;
bio->bi_rw = 0;
bio->bi_vcnt = 0;
@@ -325,10 +326,31 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
if (unlikely(bio_flagged(bio, BIO_CLONED)))
return 0;
- if (bio->bi_vcnt >= bio->bi_max_vecs)
+ if (((bio->bi_size + len) >> 9) > max_sectors)
return 0;
- if (((bio->bi_size + len) >> 9) > max_sectors)
+ /*
+ * For filesystems with a blocksize smaller than the pagesize
+ * we will often be called with the same page as last time and
+ * a consecutive offset. Optimize this special case.
+ */
+ if (bio->bi_vcnt > 0) {
+ struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
+
+ if (page == prev->bv_page &&
+ offset == prev->bv_offset + prev->bv_len) {
+ prev->bv_len += len;
+ if (q->merge_bvec_fn &&
+ q->merge_bvec_fn(q, bio, prev) < len) {
+ prev->bv_len -= len;
+ return 0;
+ }
+
+ goto done;
+ }
+ }
+
+ if (bio->bi_vcnt >= bio->bi_max_vecs)
return 0;
/*
@@ -382,6 +404,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
bio->bi_vcnt++;
bio->bi_phys_segments++;
bio->bi_hw_segments++;
+ done:
bio->bi_size += len;
return len;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index e0df94c37b7..6e50346fb1e 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -202,7 +202,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
loff_t size;
loff_t retval;
- down(&bd_inode->i_sem);
+ mutex_lock(&bd_inode->i_mutex);
size = i_size_read(bd_inode);
switch (origin) {
@@ -219,7 +219,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
}
retval = offset;
}
- up(&bd_inode->i_sem);
+ mutex_unlock(&bd_inode->i_mutex);
return retval;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index 5287be18633..b9bb7ad6897 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -26,6 +26,7 @@
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
+#include <linux/capability.h>
#include <linux/blkdev.h>
#include <linux/file.h>
#include <linux/quotaops.h>
@@ -153,14 +154,8 @@ int sync_blockdev(struct block_device *bdev)
{
int ret = 0;
- if (bdev) {
- int err;
-
- ret = filemap_fdatawrite(bdev->bd_inode->i_mapping);
- err = filemap_fdatawait(bdev->bd_inode->i_mapping);
- if (!ret)
- ret = err;
- }
+ if (bdev)
+ ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
return ret;
}
EXPORT_SYMBOL(sync_blockdev);
@@ -358,11 +353,11 @@ static long do_fsync(unsigned int fd, int datasync)
* We need to protect against concurrent writers,
* which could cause livelocks in fsync_buffers_list
*/
- down(&mapping->host->i_sem);
+ mutex_lock(&mapping->host->i_mutex);
err = file->f_op->fsync(file, file->f_dentry, datasync);
if (!ret)
ret = err;
- up(&mapping->host->i_sem);
+ mutex_unlock(&mapping->host->i_mutex);
err = filemap_fdatawait(mapping);
if (!ret)
ret = err;
@@ -1768,7 +1763,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
* handle that here by just cleaning them.
*/
- block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
head = page_buffers(page);
bh = head;
@@ -2160,11 +2155,12 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
* truncates. Uses prepare/commit_write to allow the filesystem to
* deal with the hole.
*/
-int generic_cont_expand(struct inode *inode, loff_t size)
+static int __generic_cont_expand(struct inode *inode, loff_t size,
+ pgoff_t index, unsigned int offset)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
- unsigned long index, offset, limit;
+ unsigned long limit;
int err;
err = -EFBIG;
@@ -2176,24 +2172,24 @@ int generic_cont_expand(struct inode *inode, loff_t size)
if (size > inode->i_sb->s_maxbytes)
goto out;
- offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
-
- /* ugh. in prepare/commit_write, if from==to==start of block, we
- ** skip the prepare. make sure we never send an offset for the start
- ** of a block
- */
- if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
- offset++;
- }
- index = size >> PAGE_CACHE_SHIFT;
err = -ENOMEM;
page = grab_cache_page(mapping, index);
if (!page)
goto out;
err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
- if (!err) {
- err = mapping->a_ops->commit_write(NULL, page, offset, offset);
+ if (err) {
+ /*
+ * ->prepare_write() may have instantiated a few blocks
+ * outside i_size. Trim these off again.
+ */
+ unlock_page(page);
+ page_cache_release(page);
+ vmtruncate(inode, inode->i_size);
+ goto out;
}
+
+ err = mapping->a_ops->commit_write(NULL, page, offset, offset);
+
unlock_page(page);
page_cache_release(page);
if (err > 0)
@@ -2202,6 +2198,36 @@ out:
return err;
}
+int generic_cont_expand(struct inode *inode, loff_t size)
+{
+ pgoff_t index;
+ unsigned int offset;
+
+ offset = (size & (PAGE_CACHE_SIZE - 1)); /* Within page */
+
+ /* ugh. in prepare/commit_write, if from==to==start of block, we
+ ** skip the prepare. make sure we never send an offset for the start
+ ** of a block
+ */
+ if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
+ /* caller must handle this extra byte. */
+ offset++;
+ }
+ index = size >> PAGE_CACHE_SHIFT;
+
+ return __generic_cont_expand(inode, size, index, offset);
+}
+
+int generic_cont_expand_simple(struct inode *inode, loff_t size)
+{
+ loff_t pos = size - 1;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ unsigned int offset = (pos & (PAGE_CACHE_SIZE - 1)) + 1;
+
+ /* prepare/commit_write can handle even if from==to==start of block. */
+ return __generic_cont_expand(inode, size, index, offset);
+}
+
/*
* For moronic filesystems that do not allow holes in file.
* We may have to extend the file.
@@ -2313,7 +2339,7 @@ int generic_commit_write(struct file *file, struct page *page,
__block_commit_write(inode,page,from,to);
/*
* No need to use i_size_read() here, the i_size
- * cannot change under us because we hold i_sem.
+ * cannot change under us because we hold i_mutex.
*/
if (pos > inode->i_size) {
i_size_write(inode, pos);
@@ -2610,7 +2636,7 @@ int block_truncate_page(struct address_space *mapping,
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE-1);
unsigned blocksize;
- pgoff_t iblock;
+ sector_t iblock;
unsigned length, pos;
struct inode *inode = mapping->host;
struct page *page;
@@ -2626,7 +2652,7 @@ int block_truncate_page(struct address_space *mapping,
return 0;
length = blocksize - length;
- iblock = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+ iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
page = grab_cache_page(mapping, index);
err = -ENOMEM;
@@ -3145,6 +3171,7 @@ EXPORT_SYMBOL(fsync_bdev);
EXPORT_SYMBOL(generic_block_bmap);
EXPORT_SYMBOL(generic_commit_write);
EXPORT_SYMBOL(generic_cont_expand);
+EXPORT_SYMBOL(generic_cont_expand_simple);
EXPORT_SYMBOL(init_buffer);
EXPORT_SYMBOL(invalidate_bdev);
EXPORT_SYMBOL(ll_rw_block);
diff --git a/fs/cifs/cifs_uniupr.h b/fs/cifs/cifs_uniupr.h
index decd138f14d..da2ad5b451a 100644
--- a/fs/cifs/cifs_uniupr.h
+++ b/fs/cifs/cifs_uniupr.h
@@ -242,7 +242,7 @@ static signed char UniCaseRangeLff20[27] = {
/*
* Lower Case Range
*/
-const static struct UniCaseRange CifsUniLowerRange[] = {
+static const struct UniCaseRange CifsUniLowerRange[] = {
0x0380, 0x03ab, UniCaseRangeL0380,
0x0400, 0x042f, UniCaseRangeL0400,
0x0490, 0x04cb, UniCaseRangeL0490,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 2a13a2bac8f..e10213b7541 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -860,9 +860,9 @@ static int cifs_oplock_thread(void * dummyarg)
DeleteOplockQEntry(oplock_item);
/* can not grab inode sem here since it would
deadlock when oplock received on delete
- since vfs_unlink holds the i_sem across
+ since vfs_unlink holds the i_mutex across
the call */
- /* down(&inode->i_sem);*/
+ /* mutex_lock(&inode->i_mutex);*/
if (S_ISREG(inode->i_mode)) {
rc = filemap_fdatawrite(inode->i_mapping);
if(CIFS_I(inode)->clientCanCacheRead == 0) {
@@ -871,7 +871,7 @@ static int cifs_oplock_thread(void * dummyarg)
}
} else
rc = 0;
- /* up(&inode->i_sem);*/
+ /* mutex_unlock(&inode->i_mutex);*/
if (rc)
CIFS_I(inode)->write_behind_rc = rc;
cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 14a1c72ced9..5ade53d7bca 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -127,8 +127,7 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
if (file->f_dentry->d_inode->i_mapping) {
/* BB no need to lock inode until after invalidate
since namei code should already have it locked? */
- filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
- filemap_fdatawait(file->f_dentry->d_inode->i_mapping);
+ filemap_write_and_wait(file->f_dentry->d_inode->i_mapping);
}
cFYI(1, ("invalidating remote inode since open detected it "
"changed"));
@@ -419,8 +418,7 @@ static int cifs_reopen_file(struct inode *inode, struct file *file,
pCifsInode = CIFS_I(inode);
if (pCifsInode) {
if (can_flush) {
- filemap_fdatawrite(inode->i_mapping);
- filemap_fdatawait(inode->i_mapping);
+ filemap_write_and_wait(inode->i_mapping);
/* temporarily disable caching while we
go to server to get inode info */
pCifsInode->clientCanCacheAll = FALSE;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 411c1f7f84d..3ebce9430f4 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1040,9 +1040,9 @@ int cifs_revalidate(struct dentry *direntry)
}
/* can not grab this sem since kernel filesys locking documentation
- indicates i_sem may be taken by the kernel on lookup and rename
- which could deadlock if we grab the i_sem here as well */
-/* down(&direntry->d_inode->i_sem);*/
+ indicates i_mutex may be taken by the kernel on lookup and rename
+ which could deadlock if we grab the i_mutex here as well */
+/* mutex_lock(&direntry->d_inode->i_mutex);*/
/* need to write out dirty pages here */
if (direntry->d_inode->i_mapping) {
/* do we need to lock inode until after invalidate completes
@@ -1066,7 +1066,7 @@ int cifs_revalidate(struct dentry *direntry)
}
}
}
-/* up(&direntry->d_inode->i_sem); */
+/* mutex_unlock(&direntry->d_inode->i_mutex); */
kfree(full_path);
FreeXid(xid);
@@ -1148,8 +1148,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
/* BB check if we need to refresh inode from server now ? BB */
/* need to flush data before changing file size on server */
- filemap_fdatawrite(direntry->d_inode->i_mapping);
- filemap_fdatawait(direntry->d_inode->i_mapping);
+ filemap_write_and_wait(direntry->d_inode->i_mapping);
if (attrs->ia_valid & ATTR_SIZE) {
/* To avoid spurious oplock breaks from server, in the case of
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index 80072fd9b7f..c607d923350 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -93,7 +93,7 @@ static void coda_flag_children(struct dentry *parent, int flag)
spin_lock(&dcache_lock);
list_for_each(child, &parent->d_subdirs)
{
- de = list_entry(child, struct dentry, d_child);
+ de = list_entry(child, struct dentry, d_u.d_child);
/* don't know what to do with negative dentries */
if ( ! de->d_inode )
continue;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 2391766e9c7..8f1a517f8b4 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -453,7 +453,7 @@ int coda_readdir(struct file *coda_file, void *dirent, filldir_t filldir)
coda_vfs_stat.readdir++;
host_inode = host_file->f_dentry->d_inode;
- down(&host_inode->i_sem);
+ mutex_lock(&host_inode->i_mutex);
host_file->f_pos = coda_file->f_pos;
if (!host_file->f_op->readdir) {
@@ -475,7 +475,7 @@ int coda_readdir(struct file *coda_file, void *dirent, filldir_t filldir)
}
out:
coda_file->f_pos = host_file->f_pos;
- up(&host_inode->i_sem);
+ mutex_unlock(&host_inode->i_mutex);
return ret;
}
diff --git a/fs/coda/file.c b/fs/coda/file.c
index e6bc022568f..30b4630bd73 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -77,14 +77,14 @@ coda_file_write(struct file *coda_file, const char __user *buf, size_t count, lo
return -EINVAL;
host_inode = host_file->f_dentry->d_inode;
- down(&coda_inode->i_sem);
+ mutex_lock(&coda_inode->i_mutex);
ret = host_file->f_op->write(host_file, buf, count, ppos);
coda_inode->i_size = host_inode->i_size;
coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
coda_inode->i_mtime = coda_inode->i_ctime = CURRENT_TIME_SEC;
- up(&coda_inode->i_sem);
+ mutex_unlock(&coda_inode->i_mutex);
return ret;
}
@@ -272,9 +272,9 @@ int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync)
if (host_file->f_op && host_file->f_op->fsync) {
host_dentry = host_file->f_dentry;
host_inode = host_dentry->d_inode;
- down(&host_inode->i_sem);
+ mutex_lock(&host_inode->i_mutex);
err = host_file->f_op->fsync(host_file, host_dentry, datasync);
- up(&host_inode->i_sem);
+ mutex_unlock(&host_inode->i_mutex);
}
if ( !err && !datasync ) {
diff --git a/fs/compat.c b/fs/compat.c
index 55ac0324aaf..271b75d1597 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -494,9 +494,21 @@ asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
ret = sys_fcntl(fd, cmd, (unsigned long)&f);
set_fs(old_fs);
if (cmd == F_GETLK && ret == 0) {
- if ((f.l_start >= COMPAT_OFF_T_MAX) ||
- ((f.l_start + f.l_len) > COMPAT_OFF_T_MAX))
+ /* GETLK was successfule and we need to return the data...
+ * but it needs to fit in the compat structure.
+ * l_start shouldn't be too big, unless the original
+ * start + end is greater than COMPAT_OFF_T_MAX, in which
+ * case the app was asking for trouble, so we return
+ * -EOVERFLOW in that case.
+ * l_len could be too big, in which case we just truncate it,
+ * and only allow the app to see that part of the conflicting
+ * lock that might make sense to it anyway
+ */
+
+ if (f.l_start > COMPAT_OFF_T_MAX)
ret = -EOVERFLOW;
+ if (f.l_len > COMPAT_OFF_T_MAX)
+ f.l_len = COMPAT_OFF_T_MAX;
if (ret == 0)
ret = put_compat_flock(&f, compat_ptr(arg));
}
@@ -515,9 +527,11 @@ asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
(unsigned long)&f);
set_fs(old_fs);
if (cmd == F_GETLK64 && ret == 0) {
- if ((f.l_start >= COMPAT_LOFF_T_MAX) ||
- ((f.l_start + f.l_len) > COMPAT_LOFF_T_MAX))
+ /* need to return lock information - see above for commentary */
+ if (f.l_start > COMPAT_LOFF_T_MAX)
ret = -EOVERFLOW;
+ if (f.l_len > COMPAT_LOFF_T_MAX)
+ f.l_len = COMPAT_LOFF_T_MAX;
if (ret == 0)
ret = put_compat_flock64(&f, compat_ptr(arg));
}
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 43a2508ac69..5dd0207ffd4 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -10,11 +10,11 @@
* ioctls.
*/
-#ifdef INCLUDES
#include <linux/config.h>
#include <linux/types.h>
#include <linux/compat.h>
#include <linux/kernel.h>
+#include <linux/capability.h>
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/smp.h>
@@ -81,13 +81,9 @@
#include <linux/capi.h>
#include <scsi/scsi.h>
-/* Ugly hack. */
-#undef __KERNEL__
#include <scsi/scsi_ioctl.h>
-#define __KERNEL__
#include <scsi/sg.h>
-#include <asm/types.h>
#include <asm/uaccess.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
@@ -95,7 +91,6 @@
#include <linux/watchdog.h>
#include <linux/dm-ioctl.h>
-#include <asm/module.h>
#include <linux/soundcard.h>
#include <linux/lp.h>
#include <linux/ppdev.h>
@@ -127,11 +122,7 @@
#include <linux/dvb/dmx.h>
#include <linux/dvb/frontend.h>
#include <linux/dvb/video.h>
-
-#undef INCLUDES
-#endif
-
-#ifdef CODE
+#include <linux/lp.h>
/* Aiee. Someone does not find a difference between int and long */
#define EXT2_IOC32_GETFLAGS _IOR('f', 1, int)
@@ -148,6 +139,12 @@
#define EXT2_IOC32_GETVERSION _IOR('v', 1, int)
#define EXT2_IOC32_SETVERSION _IOW('v', 2, int)
+static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
+ unsigned long arg, struct file *f)
+{
+ return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg));
+}
+
static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg)
{
mm_segment_t old_fs = get_fs();
@@ -207,244 +204,6 @@ static int do_ext3_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
return sys_ioctl(fd, cmd, (unsigned long)compat_ptr(arg));
}
-struct video_tuner32 {
- compat_int_t tuner;
- char name[32];
- compat_ulong_t rangelow, rangehigh;
- u32 flags; /* It is really u32 in videodev.h */
- u16 mode, signal;
-};
-
-static int get_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user *up)
-{
- int i;
-
- if(get_user(kp->tuner, &up->tuner))
- return -EFAULT;
- for(i = 0; i < 32; i++)
- __get_user(kp->name[i], &up->name[i]);
- __get_user(kp->rangelow, &up->rangelow);
- __get_user(kp->rangehigh, &up->rangehigh);
- __get_user(kp->flags, &up->flags);
- __get_user(kp->mode, &up->mode);
- __get_user(kp->signal, &up->signal);
- return 0;
-}
-
-static int put_video_tuner32(struct video_tuner *kp, struct video_tuner32 __user *up)
-{
- int i;
-
- if(put_user(kp->tuner, &up->tuner))
- return -EFAULT;
- for(i = 0; i < 32; i++)
- __put_user(kp->name[i], &up->name[i]);
- __put_user(kp->rangelow, &up->rangelow);
- __put_user(kp->rangehigh, &up->rangehigh);
- __put_user(kp->flags, &up->flags);
- __put_user(kp->mode, &up->mode);
- __put_user(kp->signal, &up->signal);
- return 0;
-}
-
-struct video_buffer32 {
- compat_caddr_t base;
- compat_int_t height, width, depth, bytesperline;
-};
-
-static int get_video_buffer32(struct video_buffer *kp, struct video_buffer32 __user *up)
-{
- u32 tmp;
-
- if (get_user(tmp, &up->base))
- return -EFAULT;
-
- /* This is actually a physical address stored
- * as a void pointer.
- */
- kp->base = (void *)(unsigned long) tmp;
-
- __get_user(kp->height, &up->height);
- __get_user(kp->width, &up->width);
- __get_user(kp->depth, &up->depth);
- __get_user(kp->bytesperline, &up->bytesperline);
- return 0;
-}
-
-static int put_video_buffer32(struct video_buffer *kp, struct video_buffer32 __user *up)
-{
- u32 tmp = (u32)((unsigned long)kp->base);
-
- if(put_user(tmp, &up->base))
- return -EFAULT;
- __put_user(kp->height, &up->height);
- __put_user(kp->width, &up->width);
- __put_user(kp->depth, &up->depth);
- __put_user(kp->bytesperline, &up->bytesperline);
- return 0;
-}
-
-struct video_clip32 {
- s32 x, y, width, height; /* Its really s32 in videodev.h */
- compat_caddr_t next;
-};
-
-struct video_window32 {
- u32 x, y, width, height, chromakey, flags;
- compat_caddr_t clips;
- compat_int_t clipcount;
-};
-
-/* You get back everything except the clips... */
-static int put_video_window32(struct video_window *kp, struct video_window32 __user *up)
-{
- if(put_user(kp->x, &up->x))
- return -EFAULT;
- __put_user(kp->y, &up->y);
- __put_user(kp->width, &up->width);
- __put_user(kp->height, &up->height);
- __put_user(kp->chromakey, &up->chromakey);
- __put_user(kp->flags, &up->flags);
- __put_user(kp->clipcount, &up->clipcount);
- return 0;
-}
-
-#define VIDIOCGTUNER32 _IOWR('v',4, struct video_tuner32)
-#define VIDIOCSTUNER32 _IOW('v',5, struct video_tuner32)
-#define VIDIOCGWIN32 _IOR('v',9, struct video_window32)
-#define VIDIOCSWIN32 _IOW('v',10, struct video_window32)
-#define VIDIOCGFBUF32 _IOR('v',11, struct video_buffer32)
-#define VIDIOCSFBUF32 _IOW('v',12, struct video_buffer32)
-#define VIDIOCGFREQ32 _IOR('v',14, u32)
-#define VIDIOCSFREQ32 _IOW('v',15, u32)
-
-enum {
- MaxClips = (~0U-sizeof(struct video_window))/sizeof(struct video_clip)
-};
-
-static int do_set_window(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct video_window32 __user *up = compat_ptr(arg);
- struct video_window __user *vw;
- struct video_clip __user *p;
- int nclips;
- u32 n;
-
- if (get_user(nclips, &up->clipcount))
- return -EFAULT;
-
- /* Peculiar interface... */
- if (nclips < 0)
- nclips = VIDEO_CLIPMAP_SIZE;
-
- if (nclips > MaxClips)
- return -ENOMEM;
-
- vw = compat_alloc_user_space(sizeof(struct video_window) +
- nclips * sizeof(struct video_clip));
-
- p = nclips ? (struct video_clip __user *)(vw + 1) : NULL;
-
- if (get_user(n, &up->x) || put_user(n, &vw->x) ||
- get_user(n, &up->y) || put_user(n, &vw->y) ||
- get_user(n, &up->width) || put_user(n, &vw->width) ||
- get_user(n, &up->height) || put_user(n, &vw->height) ||
- get_user(n, &up->chromakey) || put_user(n, &vw->chromakey) ||
- get_user(n, &up->flags) || put_user(n, &vw->flags) ||
- get_user(n, &up->clipcount) || put_user(n, &vw->clipcount) ||
- get_user(n, &up->clips) || put_user(p, &vw->clips))
- return -EFAULT;
-
- if (nclips) {
- struct video_clip32 __user *u = compat_ptr(n);
- int i;
- if (!u)
- return -EINVAL;
- for (i = 0; i < nclips; i++, u++, p++) {
- s32 v;
- if (get_user(v, &u->x) ||
- put_user(v, &p->x) ||
- get_user(v, &u->y) ||
- put_user(v, &p->y) ||
- get_user(v, &u->width) ||
- put_user(v, &p->width) ||
- get_user(v, &u->height) ||
- put_user(v, &p->height) ||
- put_user(NULL, &p->next))
- return -EFAULT;
- }
- }
-
- return sys_ioctl(fd, VIDIOCSWIN, (unsigned long)p);
-}
-
-static int do_video_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- union {
- struct video_tuner vt;
- struct video_buffer vb;
- struct video_window vw;
- unsigned long vx;
- } karg;
- mm_segment_t old_fs = get_fs();
- void __user *up = compat_ptr(arg);
- int err = 0;
-
- /* First, convert the command. */
- switch(cmd) {
- case VIDIOCGTUNER32: cmd = VIDIOCGTUNER; break;
- case VIDIOCSTUNER32: cmd = VIDIOCSTUNER; break;
- case VIDIOCGWIN32: cmd = VIDIOCGWIN; break;
- case VIDIOCGFBUF32: cmd = VIDIOCGFBUF; break;
- case VIDIOCSFBUF32: cmd = VIDIOCSFBUF; break;
- case VIDIOCGFREQ32: cmd = VIDIOCGFREQ; break;
- case VIDIOCSFREQ32: cmd = VIDIOCSFREQ; break;
- };
-
- switch(cmd) {
- case VIDIOCSTUNER:
- case VIDIOCGTUNER:
- err = get_video_tuner32(&karg.vt, up);
- break;
-
- case VIDIOCSFBUF:
- err = get_video_buffer32(&karg.vb, up);
- break;
-
- case VIDIOCSFREQ:
- err = get_user(karg.vx, (u32 __user *)up);
- break;
- };
- if(err)
- goto out;
-
- set_fs(KERNEL_DS);
- err = sys_ioctl(fd, cmd, (unsigned long)&karg);
- set_fs(old_fs);
-
- if(err == 0) {
- switch(cmd) {
- case VIDIOCGTUNER:
- err = put_video_tuner32(&karg.vt, up);
- break;
-
- case VIDIOCGWIN:
- err = put_video_window32(&karg.vw, up);
- break;
-
- case VIDIOCGFBUF:
- err = put_video_buffer32(&karg.vb, up);
- break;
-
- case VIDIOCGFREQ:
- err = put_user(((u32)karg.vx), (u32 __user *)up);
- break;
- };
- }
-out:
- return err;
-}
-
struct compat_dmx_event {
dmx_event_t event;
compat_time_t timeStamp;
@@ -1158,6 +917,40 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
return err;
}
+struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
+ char req_state;
+ char orphan;
+ char sg_io_owned;
+ char problem;
+ int pack_id;
+ compat_uptr_t usr_ptr;
+ unsigned int duration;
+ int unused;
+};
+
+static int sg_grt_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ int err, i;
+ sg_req_info_t *r;
+ struct compat_sg_req_info *o = (struct compat_sg_req_info *)arg;
+ r = compat_alloc_user_space(sizeof(sg_req_info_t)*SG_MAX_QUEUE);
+ err = sys_ioctl(fd,cmd,(unsigned long)r);
+ if (err < 0)
+ return err;
+ for (i = 0; i < SG_MAX_QUEUE; i++) {
+ void __user *ptr;
+ int d;
+
+ if (copy_in_user(o + i, r + i, offsetof(sg_req_info_t, usr_ptr)) ||
+ get_user(ptr, &r[i].usr_ptr) ||
+ get_user(d, &r[i].duration) ||
+ put_user((u32)(unsigned long)(ptr), &o[i].usr_ptr) ||
+ put_user(d, &o[i].duration))
+ return -EFAULT;
+ }
+ return err;
+}
+
struct sock_fprog32 {
unsigned short len;
compat_caddr_t filter;
@@ -2713,6 +2506,49 @@ static int old_bridge_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg
return -EINVAL;
}
+#define RTC_IRQP_READ32 _IOR('p', 0x0b, compat_ulong_t)
+#define RTC_IRQP_SET32 _IOW('p', 0x0c, compat_ulong_t)
+#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t)
+#define RTC_EPOCH_SET32 _IOW('p', 0x0e, compat_ulong_t)
+
+static int rtc_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
+{
+ mm_segment_t oldfs = get_fs();
+ compat_ulong_t val32;
+ unsigned long kval;
+ int ret;
+
+ switch (cmd) {
+ case RTC_IRQP_READ32:
+ case RTC_EPOCH_READ32:
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, (cmd == RTC_IRQP_READ32) ?
+ RTC_IRQP_READ : RTC_EPOCH_READ,
+ (unsigned long)&kval);
+ set_fs(oldfs);
+ if (ret)
+ return ret;
+ val32 = kval;
+ return put_user(val32, (unsigned int __user *)arg);
+ case RTC_IRQP_SET32:
+ case RTC_EPOCH_SET32:
+ ret = get_user(val32, (unsigned int __user *)arg);
+ if (ret)
+ return ret;
+ kval = val32;
+
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, (cmd == RTC_IRQP_SET32) ?
+ RTC_IRQP_SET : RTC_EPOCH_SET,
+ (unsigned long)&kval);
+ set_fs(oldfs);
+ return ret;
+ default:
+ /* unreached */
+ return -ENOIOCTLCMD;
+ }
+}
+
#if defined(CONFIG_NCP_FS) || defined(CONFIG_NCP_FS_MODULE)
struct ncp_ioctl_request_32 {
u32 function;
@@ -2900,10 +2736,34 @@ static int do_ncp_setprivatedata(unsigned int fd, unsigned int cmd, unsigned lon
}
#endif
-#undef CODE
-#endif
+static int
+lp_timeout_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct compat_timeval *tc = (struct compat_timeval *)arg;
+ struct timeval *tn = compat_alloc_user_space(sizeof(struct timeval));
+ struct timeval ts;
+ if (get_user(ts.tv_sec, &tc->tv_sec) ||
+ get_user(ts.tv_usec, &tc->tv_usec) ||
+ put_user(ts.tv_sec, &tn->tv_sec) ||
+ put_user(ts.tv_usec, &tn->tv_usec))
+ return -EFAULT;
+ return sys_ioctl(fd, cmd, (unsigned long)tn);
+}
+
+#define HANDLE_IOCTL(cmd,handler) \
+ { (cmd), (ioctl_trans_handler_t)(handler) },
+
+/* pointer to compatible structure or no argument */
+#define COMPATIBLE_IOCTL(cmd) \
+ { (cmd), do_ioctl32_pointer },
+
+/* argument is an unsigned long integer, not a pointer */
+#define ULONG_IOCTL(cmd) \
+ { (cmd), (ioctl_trans_handler_t)sys_ioctl },
-#ifdef DECLARES
+
+struct ioctl_trans ioctl_start[] = {
+#include <linux/compat_ioctl.h>
HANDLE_IOCTL(MEMREADOOB32, mtd_rw_oob)
HANDLE_IOCTL(MEMWRITEOOB32, mtd_rw_oob)
#ifdef CONFIG_NET
@@ -2983,6 +2843,7 @@ HANDLE_IOCTL(FDPOLLDRVSTAT32, fd_ioctl_trans)
HANDLE_IOCTL(FDGETFDCSTAT32, fd_ioctl_trans)
HANDLE_IOCTL(FDWERRORGET32, fd_ioctl_trans)
HANDLE_IOCTL(SG_IO,sg_ioctl_trans)
+HANDLE_IOCTL(SG_GET_REQUEST_TABLE, sg_grt_trans)
HANDLE_IOCTL(PPPIOCGIDLE32, ppp_ioctl_trans)
HANDLE_IOCTL(PPPIOCSCOMPRESS32, ppp_ioctl_trans)
HANDLE_IOCTL(PPPIOCSPASS32, ppp_sock_fprog_ioctl_trans)
@@ -3015,14 +2876,6 @@ COMPATIBLE_IOCTL(EXT3_IOC_GROUP_ADD)
#ifdef CONFIG_JBD_DEBUG
HANDLE_IOCTL(EXT3_IOC32_WAIT_FOR_READONLY, do_ext3_ioctl)
#endif
-HANDLE_IOCTL(VIDIOCGTUNER32, do_video_ioctl)
-HANDLE_IOCTL(VIDIOCSTUNER32, do_video_ioctl)
-HANDLE_IOCTL(VIDIOCGWIN32, do_video_ioctl)
-HANDLE_IOCTL(VIDIOCSWIN32, do_set_window)
-HANDLE_IOCTL(VIDIOCGFBUF32, do_video_ioctl)
-HANDLE_IOCTL(VIDIOCSFBUF32, do_video_ioctl)
-HANDLE_IOCTL(VIDIOCGFREQ32, do_video_ioctl)
-HANDLE_IOCTL(VIDIOCSFREQ32, do_video_ioctl)
/* One SMB ioctl needs translations. */
#define SMB_IOC_GETMOUNTUID_32 _IOR('u', 1, compat_uid_t)
HANDLE_IOCTL(SMB_IOC_GETMOUNTUID_32, do_smb_getmountuid)
@@ -3104,6 +2957,10 @@ HANDLE_IOCTL(SIOCSIWENCODE, do_wireless_ioctl)
HANDLE_IOCTL(SIOCGIWENCODE, do_wireless_ioctl)
HANDLE_IOCTL(SIOCSIFBR, old_bridge_ioctl)
HANDLE_IOCTL(SIOCGIFBR, old_bridge_ioctl)
+HANDLE_IOCTL(RTC_IRQP_READ32, rtc_ioctl)
+HANDLE_IOCTL(RTC_IRQP_SET32, rtc_ioctl)
+HANDLE_IOCTL(RTC_EPOCH_READ32, rtc_ioctl)
+HANDLE_IOCTL(RTC_EPOCH_SET32, rtc_ioctl)
#if defined(CONFIG_NCP_FS) || defined(CONFIG_NCP_FS_MODULE)
HANDLE_IOCTL(NCP_IOC_NCPREQUEST_32, do_ncp_ncprequest)
@@ -3121,5 +2978,19 @@ HANDLE_IOCTL(VIDEO_GET_EVENT, do_video_get_event)
HANDLE_IOCTL(VIDEO_STILLPICTURE, do_video_stillpicture)
HANDLE_IOCTL(VIDEO_SET_SPU_PALETTE, do_video_set_spu_palette)
-#undef DECLARES
-#endif
+/* parport */
+COMPATIBLE_IOCTL(LPTIME)
+COMPATIBLE_IOCTL(LPCHAR)
+COMPATIBLE_IOCTL(LPABORTOPEN)
+COMPATIBLE_IOCTL(LPCAREFUL)
+COMPATIBLE_IOCTL(LPWAIT)
+COMPATIBLE_IOCTL(LPSETIRQ)
+COMPATIBLE_IOCTL(LPGETSTATUS)
+COMPATIBLE_IOCTL(LPGETSTATUS)
+COMPATIBLE_IOCTL(LPRESET)
+/*LPGETSTATS not implemented, but no kernels seem to compile it in anyways*/
+COMPATIBLE_IOCTL(LPGETFLAGS)
+HANDLE_IOCTL(LPSETTIMEOUT, lp_timeout_trans)
+};
+
+int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/fs/configfs/Makefile b/fs/configfs/Makefile
new file mode 100644
index 00000000000..00ffb278e98
--- /dev/null
+++ b/fs/configfs/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the configfs virtual filesystem
+#
+
+obj-$(CONFIG_CONFIGFS_FS) += configfs.o
+
+configfs-objs := inode.o file.o dir.o symlink.o mount.o item.o
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
new file mode 100644
index 00000000000..8899d9c5f6b
--- /dev/null
+++ b/fs/configfs/configfs_internal.h
@@ -0,0 +1,142 @@
+/* -*- mode: c; c-basic-offset:8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * configfs_internal.h - Internal stuff for configfs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+
+struct configfs_dirent {
+ atomic_t s_count;
+ struct list_head s_sibling;
+ struct list_head s_children;
+ struct list_head s_links;
+ void * s_element;
+ int s_type;
+ umode_t s_mode;
+ struct dentry * s_dentry;
+};
+
+#define CONFIGFS_ROOT 0x0001
+#define CONFIGFS_DIR 0x0002
+#define CONFIGFS_ITEM_ATTR 0x0004
+#define CONFIGFS_ITEM_LINK 0x0020
+#define CONFIGFS_USET_DIR 0x0040
+#define CONFIGFS_USET_DEFAULT 0x0080
+#define CONFIGFS_USET_DROPPING 0x0100
+#define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR)
+
+extern struct vfsmount * configfs_mount;
+
+extern int configfs_is_root(struct config_item *item);
+
+extern struct inode * configfs_new_inode(mode_t mode);
+extern int configfs_create(struct dentry *, int mode, int (*init)(struct inode *));
+
+extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
+extern int configfs_make_dirent(struct configfs_dirent *,
+ struct dentry *, void *, umode_t, int);
+
+extern int configfs_add_file(struct dentry *, const struct configfs_attribute *, int);
+extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
+
+extern const unsigned char * configfs_get_name(struct configfs_dirent *sd);
+extern void configfs_drop_dentry(struct configfs_dirent *sd, struct dentry *parent);
+
+extern int configfs_pin_fs(void);
+extern void configfs_release_fs(void);
+
+extern struct rw_semaphore configfs_rename_sem;
+extern struct super_block * configfs_sb;
+extern struct file_operations configfs_dir_operations;
+extern struct file_operations configfs_file_operations;
+extern struct file_operations bin_fops;
+extern struct inode_operations configfs_dir_inode_operations;
+extern struct inode_operations configfs_symlink_inode_operations;
+
+extern int configfs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname);
+extern int configfs_unlink(struct inode *dir, struct dentry *dentry);
+
+struct configfs_symlink {
+ struct list_head sl_list;
+ struct config_item *sl_target;
+};
+
+extern int configfs_create_link(struct configfs_symlink *sl,
+ struct dentry *parent,
+ struct dentry *dentry);
+
+static inline struct config_item * to_item(struct dentry * dentry)
+{
+ struct configfs_dirent * sd = dentry->d_fsdata;
+ return ((struct config_item *) sd->s_element);
+}
+
+static inline struct configfs_attribute * to_attr(struct dentry * dentry)
+{
+ struct configfs_dirent * sd = dentry->d_fsdata;
+ return ((struct configfs_attribute *) sd->s_element);
+}
+
+static inline struct config_item *configfs_get_config_item(struct dentry *dentry)
+{
+ struct config_item * item = NULL;
+
+ spin_lock(&dcache_lock);
+ if (!d_unhashed(dentry)) {
+ struct configfs_dirent * sd = dentry->d_fsdata;
+ if (sd->s_type & CONFIGFS_ITEM_LINK) {
+ struct configfs_symlink * sl = sd->s_element;
+ item = config_item_get(sl->sl_target);
+ } else
+ item = config_item_get(sd->s_element);
+ }
+ spin_unlock(&dcache_lock);
+
+ return item;
+}
+
+static inline void release_configfs_dirent(struct configfs_dirent * sd)
+{
+ if (!(sd->s_type & CONFIGFS_ROOT))
+ kfree(sd);
+}
+
+static inline struct configfs_dirent * configfs_get(struct configfs_dirent * sd)
+{
+ if (sd) {
+ WARN_ON(!atomic_read(&sd->s_count));
+ atomic_inc(&sd->s_count);
+ }
+ return sd;
+}
+
+static inline void configfs_put(struct configfs_dirent * sd)
+{
+ WARN_ON(!atomic_read(&sd->s_count));
+ if (atomic_dec_and_test(&sd->s_count))
+ release_configfs_dirent(sd);
+}
+
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
new file mode 100644
index 00000000000..b668ec61527
--- /dev/null
+++ b/fs/configfs/dir.c
@@ -0,0 +1,1102 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dir.c - Operations for configfs directories.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ */
+
+#undef DEBUG
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/configfs.h>
+#include "configfs_internal.h"
+
+DECLARE_RWSEM(configfs_rename_sem);
+
+static void configfs_d_iput(struct dentry * dentry,
+ struct inode * inode)
+{
+ struct configfs_dirent * sd = dentry->d_fsdata;
+
+ if (sd) {
+ BUG_ON(sd->s_dentry != dentry);
+ sd->s_dentry = NULL;
+ configfs_put(sd);
+ }
+ iput(inode);
+}
+
+/*
+ * We _must_ delete our dentries on last dput, as the chain-to-parent
+ * behavior is required to clear the parents of default_groups.
+ */
+static int configfs_d_delete(struct dentry *dentry)
+{
+ return 1;
+}
+
+static struct dentry_operations configfs_dentry_ops = {
+ .d_iput = configfs_d_iput,
+ /* simple_delete_dentry() isn't exported */
+ .d_delete = configfs_d_delete,
+};
+
+/*
+ * Allocates a new configfs_dirent and links it to the parent configfs_dirent
+ */
+static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent * parent_sd,
+ void * element)
+{
+ struct configfs_dirent * sd;
+
+ sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return NULL;
+
+ memset(sd, 0, sizeof(*sd));
+ atomic_set(&sd->s_count, 1);
+ INIT_LIST_HEAD(&sd->s_links);
+ INIT_LIST_HEAD(&sd->s_children);
+ list_add(&sd->s_sibling, &parent_sd->s_children);
+ sd->s_element = element;
+
+ return sd;
+}
+
+int configfs_make_dirent(struct configfs_dirent * parent_sd,
+ struct dentry * dentry, void * element,
+ umode_t mode, int type)
+{
+ struct configfs_dirent * sd;
+
+ sd = configfs_new_dirent(parent_sd, element);
+ if (!sd)
+ return -ENOMEM;
+
+ sd->s_mode = mode;
+ sd->s_type = type;
+ sd->s_dentry = dentry;
+ if (dentry) {
+ dentry->d_fsdata = configfs_get(sd);
+ dentry->d_op = &configfs_dentry_ops;
+ }
+
+ return 0;
+}
+
+static int init_dir(struct inode * inode)
+{
+ inode->i_op = &configfs_dir_inode_operations;
+ inode->i_fop = &configfs_dir_operations;
+
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inode->i_nlink++;
+ return 0;
+}
+
+static int init_file(struct inode * inode)
+{
+ inode->i_size = PAGE_SIZE;
+ inode->i_fop = &configfs_file_operations;
+ return 0;
+}
+
+static int init_symlink(struct inode * inode)
+{
+ inode->i_op = &configfs_symlink_inode_operations;
+ return 0;
+}
+
+static int create_dir(struct config_item * k, struct dentry * p,
+ struct dentry * d)
+{
+ int error;
+ umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
+
+ error = configfs_create(d, mode, init_dir);
+ if (!error) {
+ error = configfs_make_dirent(p->d_fsdata, d, k, mode,
+ CONFIGFS_DIR);
+ if (!error) {
+ p->d_inode->i_nlink++;
+ (d)->d_op = &configfs_dentry_ops;
+ }
+ }
+ return error;
+}
+
+
+/**
+ * configfs_create_dir - create a directory for an config_item.
+ * @item: config_itemwe're creating directory for.
+ * @dentry: config_item's dentry.
+ */
+
+static int configfs_create_dir(struct config_item * item, struct dentry *dentry)
+{
+ struct dentry * parent;
+ int error = 0;
+
+ BUG_ON(!item);
+
+ if (item->ci_parent)
+ parent = item->ci_parent->ci_dentry;
+ else if (configfs_mount && configfs_mount->mnt_sb)
+ parent = configfs_mount->mnt_sb->s_root;
+ else
+ return -EFAULT;
+
+ error = create_dir(item,parent,dentry);
+ if (!error)
+ item->ci_dentry = dentry;
+ return error;
+}
+
+int configfs_create_link(struct configfs_symlink *sl,
+ struct dentry *parent,
+ struct dentry *dentry)
+{
+ int err = 0;
+ umode_t mode = S_IFLNK | S_IRWXUGO;
+
+ err = configfs_create(dentry, mode, init_symlink);
+ if (!err) {
+ err = configfs_make_dirent(parent->d_fsdata, dentry, sl,
+ mode, CONFIGFS_ITEM_LINK);
+ if (!err)
+ dentry->d_op = &configfs_dentry_ops;
+ }
+ return err;
+}
+
+static void remove_dir(struct dentry * d)
+{
+ struct dentry * parent = dget(d->d_parent);
+ struct configfs_dirent * sd;
+
+ sd = d->d_fsdata;
+ list_del_init(&sd->s_sibling);
+ configfs_put(sd);
+ if (d->d_inode)
+ simple_rmdir(parent->d_inode,d);
+
+ pr_debug(" o %s removing done (%d)\n",d->d_name.name,
+ atomic_read(&d->d_count));
+
+ dput(parent);
+}
+
+/**
+ * configfs_remove_dir - remove an config_item's directory.
+ * @item: config_item we're removing.
+ *
+ * The only thing special about this is that we remove any files in
+ * the directory before we remove the directory, and we've inlined
+ * what used to be configfs_rmdir() below, instead of calling separately.
+ */
+
+static void configfs_remove_dir(struct config_item * item)
+{
+ struct dentry * dentry = dget(item->ci_dentry);
+
+ if (!dentry)
+ return;
+
+ remove_dir(dentry);
+ /**
+ * Drop reference from dget() on entrance.
+ */
+ dput(dentry);
+}
+
+
+/* attaches attribute's configfs_dirent to the dentry corresponding to the
+ * attribute file
+ */
+static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * dentry)
+{
+ struct configfs_attribute * attr = sd->s_element;
+ int error;
+
+ error = configfs_create(dentry, (attr->ca_mode & S_IALLUGO) | S_IFREG, init_file);
+ if (error)
+ return error;
+
+ dentry->d_op = &configfs_dentry_ops;
+ dentry->d_fsdata = configfs_get(sd);
+ sd->s_dentry = dentry;
+ d_rehash(dentry);
+
+ return 0;
+}
+
+static struct dentry * configfs_lookup(struct inode *dir,
+ struct dentry *dentry,
+ struct nameidata *nd)
+{
+ struct configfs_dirent * parent_sd = dentry->d_parent->d_fsdata;
+ struct configfs_dirent * sd;
+ int found = 0;
+ int err = 0;
+
+ list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
+ if (sd->s_type & CONFIGFS_NOT_PINNED) {
+ const unsigned char * name = configfs_get_name(sd);
+
+ if (strcmp(name, dentry->d_name.name))
+ continue;
+
+ found = 1;
+ err = configfs_attach_attr(sd, dentry);
+ break;
+ }
+ }
+
+ if (!found) {
+ /*
+ * If it doesn't exist and it isn't a NOT_PINNED item,
+ * it must be negative.
+ */
+ return simple_lookup(dir, dentry, nd);
+ }
+
+ return ERR_PTR(err);
+}
+
+/*
+ * Only subdirectories count here. Files (CONFIGFS_NOT_PINNED) are
+ * attributes and are removed by rmdir(). We recurse, taking i_mutex
+ * on all children that are candidates for default detach. If the
+ * result is clean, then configfs_detach_group() will handle dropping
+ * i_mutex. If there is an error, the caller will clean up the i_mutex
+ * holders via configfs_detach_rollback().
+ */
+static int configfs_detach_prep(struct dentry *dentry)
+{
+ struct configfs_dirent *parent_sd = dentry->d_fsdata;
+ struct configfs_dirent *sd;
+ int ret;
+
+ ret = -EBUSY;
+ if (!list_empty(&parent_sd->s_links))
+ goto out;
+
+ ret = 0;
+ list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
+ if (sd->s_type & CONFIGFS_NOT_PINNED)
+ continue;
+ if (sd->s_type & CONFIGFS_USET_DEFAULT) {
+ mutex_lock(&sd->s_dentry->d_inode->i_mutex);
+ /* Mark that we've taken i_mutex */
+ sd->s_type |= CONFIGFS_USET_DROPPING;
+
+ ret = configfs_detach_prep(sd->s_dentry);
+ if (!ret)
+ continue;
+ } else
+ ret = -ENOTEMPTY;
+
+ break;
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * Walk the tree, dropping i_mutex wherever CONFIGFS_USET_DROPPING is
+ * set.
+ */
+static void configfs_detach_rollback(struct dentry *dentry)
+{
+ struct configfs_dirent *parent_sd = dentry->d_fsdata;
+ struct configfs_dirent *sd;
+
+ list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
+ if (sd->s_type & CONFIGFS_USET_DEFAULT) {
+ configfs_detach_rollback(sd->s_dentry);
+
+ if (sd->s_type & CONFIGFS_USET_DROPPING) {
+ sd->s_type &= ~CONFIGFS_USET_DROPPING;
+ mutex_unlock(&sd->s_dentry->d_inode->i_mutex);
+ }
+ }
+ }
+}
+
+static void detach_attrs(struct config_item * item)
+{
+ struct dentry * dentry = dget(item->ci_dentry);
+ struct configfs_dirent * parent_sd;
+ struct configfs_dirent * sd, * tmp;
+
+ if (!dentry)
+ return;
+
+ pr_debug("configfs %s: dropping attrs for dir\n",
+ dentry->d_name.name);
+
+ parent_sd = dentry->d_fsdata;
+ list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) {
+ if (!sd->s_element || !(sd->s_type & CONFIGFS_NOT_PINNED))
+ continue;
+ list_del_init(&sd->s_sibling);
+ configfs_drop_dentry(sd, dentry);
+ configfs_put(sd);
+ }
+
+ /**
+ * Drop reference from dget() on entrance.
+ */
+ dput(dentry);
+}
+
+static int populate_attrs(struct config_item *item)
+{
+ struct config_item_type *t = item->ci_type;
+ struct configfs_attribute *attr;
+ int error = 0;
+ int i;
+
+ if (!t)
+ return -EINVAL;
+ if (t->ct_attrs) {
+ for (i = 0; (attr = t->ct_attrs[i]) != NULL; i++) {
+ if ((error = configfs_create_file(item, attr)))
+ break;
+ }
+ }
+
+ if (error)
+ detach_attrs(item);
+
+ return error;
+}
+
+static int configfs_attach_group(struct config_item *parent_item,
+ struct config_item *item,
+ struct dentry *dentry);
+static void configfs_detach_group(struct config_item *item);
+
+static void detach_groups(struct config_group *group)
+{
+ struct dentry * dentry = dget(group->cg_item.ci_dentry);
+ struct dentry *child;
+ struct configfs_dirent *parent_sd;
+ struct configfs_dirent *sd, *tmp;
+
+ if (!dentry)
+ return;
+
+ parent_sd = dentry->d_fsdata;
+ list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) {
+ if (!sd->s_element ||
+ !(sd->s_type & CONFIGFS_USET_DEFAULT))
+ continue;
+
+ child = sd->s_dentry;
+
+ configfs_detach_group(sd->s_element);
+ child->d_inode->i_flags |= S_DEAD;
+
+ /*
+ * From rmdir/unregister, a configfs_detach_prep() pass
+ * has taken our i_mutex for us. Drop it.
+ * From mkdir/register cleanup, there is no sem held.
+ */
+ if (sd->s_type & CONFIGFS_USET_DROPPING)
+ mutex_unlock(&child->d_inode->i_mutex);
+
+ d_delete(child);
+ dput(child);
+ }
+
+ /**
+ * Drop reference from dget() on entrance.
+ */
+ dput(dentry);
+}
+
+/*
+ * This fakes mkdir(2) on a default_groups[] entry. It
+ * creates a dentry, attachs it, and then does fixup
+ * on the sd->s_type.
+ *
+ * We could, perhaps, tweak our parent's ->mkdir for a minute and
+ * try using vfs_mkdir. Just a thought.
+ */
+static int create_default_group(struct config_group *parent_group,
+ struct config_group *group)
+{
+ int ret;
+ struct qstr name;
+ struct configfs_dirent *sd;
+ /* We trust the caller holds a reference to parent */
+ struct dentry *child, *parent = parent_group->cg_item.ci_dentry;
+
+ if (!group->cg_item.ci_name)
+ group->cg_item.ci_name = group->cg_item.ci_namebuf;
+ name.name = group->cg_item.ci_name;
+ name.len = strlen(name.name);
+ name.hash = full_name_hash(name.name, name.len);
+
+ ret = -ENOMEM;
+ child = d_alloc(parent, &name);
+ if (child) {
+ d_add(child, NULL);
+
+ ret = configfs_attach_group(&parent_group->cg_item,
+ &group->cg_item, child);
+ if (!ret) {
+ sd = child->d_fsdata;
+ sd->s_type |= CONFIGFS_USET_DEFAULT;
+ } else {
+ d_delete(child);
+ dput(child);
+ }
+ }
+
+ return ret;
+}
+
+static int populate_groups(struct config_group *group)
+{
+ struct config_group *new_group;
+ struct dentry *dentry = group->cg_item.ci_dentry;
+ int ret = 0;
+ int i;
+
+ if (group && group->default_groups) {
+ /* FYI, we're faking mkdir here
+ * I'm not sure we need this semaphore, as we're called
+ * from our parent's mkdir. That holds our parent's
+ * i_mutex, so afaik lookup cannot continue through our
+ * parent to find us, let alone mess with our tree.
+ * That said, taking our i_mutex is closer to mkdir
+ * emulation, and shouldn't hurt. */
+ mutex_lock(&dentry->d_inode->i_mutex);
+
+ for (i = 0; group->default_groups[i]; i++) {
+ new_group = group->default_groups[i];
+
+ ret = create_default_group(group, new_group);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&dentry->d_inode->i_mutex);
+ }
+
+ if (ret)
+ detach_groups(group);
+
+ return ret;
+}
+
+/*
+ * All of link_obj/unlink_obj/link_group/unlink_group require that
+ * subsys->su_sem is held.
+ */
+
+static void unlink_obj(struct config_item *item)
+{
+ struct config_group *group;
+
+ group = item->ci_group;
+ if (group) {
+ list_del_init(&item->ci_entry);
+
+ item->ci_group = NULL;
+ item->ci_parent = NULL;
+ config_item_put(item);
+
+ config_group_put(group);
+ }
+}
+
+static void link_obj(struct config_item *parent_item, struct config_item *item)
+{
+ /* Parent seems redundant with group, but it makes certain
+ * traversals much nicer. */
+ item->ci_parent = parent_item;
+ item->ci_group = config_group_get(to_config_group(parent_item));
+ list_add_tail(&item->ci_entry, &item->ci_group->cg_children);
+
+ config_item_get(item);
+}
+
+static void unlink_group(struct config_group *group)
+{
+ int i;
+ struct config_group *new_group;
+
+ if (group->default_groups) {
+ for (i = 0; group->default_groups[i]; i++) {
+ new_group = group->default_groups[i];
+ unlink_group(new_group);
+ }
+ }
+
+ group->cg_subsys = NULL;
+ unlink_obj(&group->cg_item);
+}
+
+static void link_group(struct config_group *parent_group, struct config_group *group)
+{
+ int i;
+ struct config_group *new_group;
+ struct configfs_subsystem *subsys = NULL; /* gcc is a turd */
+
+ link_obj(&parent_group->cg_item, &group->cg_item);
+
+ if (parent_group->cg_subsys)
+ subsys = parent_group->cg_subsys;
+ else if (configfs_is_root(&parent_group->cg_item))
+ subsys = to_configfs_subsystem(group);
+ else
+ BUG();
+ group->cg_subsys = subsys;
+
+ if (group->default_groups) {
+ for (i = 0; group->default_groups[i]; i++) {
+ new_group = group->default_groups[i];
+ link_group(group, new_group);
+ }
+ }
+}
+
+/*
+ * The goal is that configfs_attach_item() (and
+ * configfs_attach_group()) can be called from either the VFS or this
+ * module. That is, they assume that the items have been created,
+ * the dentry allocated, and the dcache is all ready to go.
+ *
+ * If they fail, they must clean up after themselves as if they
+ * had never been called. The caller (VFS or local function) will
+ * handle cleaning up the dcache bits.
+ *
+ * configfs_detach_group() and configfs_detach_item() behave similarly on
+ * the way out. They assume that the proper semaphores are held, they
+ * clean up the configfs items, and they expect their callers will
+ * handle the dcache bits.
+ */
+static int configfs_attach_item(struct config_item *parent_item,
+ struct config_item *item,
+ struct dentry *dentry)
+{
+ int ret;
+
+ ret = configfs_create_dir(item, dentry);
+ if (!ret) {
+ ret = populate_attrs(item);
+ if (ret) {
+ configfs_remove_dir(item);
+ d_delete(dentry);
+ }
+ }
+
+ return ret;
+}
+
+static void configfs_detach_item(struct config_item *item)
+{
+ detach_attrs(item);
+ configfs_remove_dir(item);
+}
+
+static int configfs_attach_group(struct config_item *parent_item,
+ struct config_item *item,
+ struct dentry *dentry)
+{
+ int ret;
+ struct configfs_dirent *sd;
+
+ ret = configfs_attach_item(parent_item, item, dentry);
+ if (!ret) {
+ sd = dentry->d_fsdata;
+ sd->s_type |= CONFIGFS_USET_DIR;
+
+ ret = populate_groups(to_config_group(item));
+ if (ret) {
+ configfs_detach_item(item);
+ d_delete(dentry);
+ }
+ }
+
+ return ret;
+}
+
+static void configfs_detach_group(struct config_item *item)
+{
+ detach_groups(to_config_group(item));
+ configfs_detach_item(item);
+}
+
+/*
+ * Drop the initial reference from make_item()/make_group()
+ * This function assumes that reference is held on item
+ * and that item holds a valid reference to the parent. Also, it
+ * assumes the caller has validated ci_type.
+ */
+static void client_drop_item(struct config_item *parent_item,
+ struct config_item *item)
+{
+ struct config_item_type *type;
+
+ type = parent_item->ci_type;
+ BUG_ON(!type);
+
+ if (type->ct_group_ops && type->ct_group_ops->drop_item)
+ type->ct_group_ops->drop_item(to_config_group(parent_item),
+ item);
+ else
+ config_item_put(item);
+}
+
+
+static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ int ret;
+ struct config_group *group;
+ struct config_item *item;
+ struct config_item *parent_item;
+ struct configfs_subsystem *subsys;
+ struct configfs_dirent *sd;
+ struct config_item_type *type;
+ struct module *owner;
+ char *name;
+
+ if (dentry->d_parent == configfs_sb->s_root)
+ return -EPERM;
+
+ sd = dentry->d_parent->d_fsdata;
+ if (!(sd->s_type & CONFIGFS_USET_DIR))
+ return -EPERM;
+
+ parent_item = configfs_get_config_item(dentry->d_parent);
+ type = parent_item->ci_type;
+ subsys = to_config_group(parent_item)->cg_subsys;
+ BUG_ON(!subsys);
+
+ if (!type || !type->ct_group_ops ||
+ (!type->ct_group_ops->make_group &&
+ !type->ct_group_ops->make_item)) {
+ config_item_put(parent_item);
+ return -EPERM; /* What lack-of-mkdir returns */
+ }
+
+ name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL);
+ if (!name) {
+ config_item_put(parent_item);
+ return -ENOMEM;
+ }
+ snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name);
+
+ down(&subsys->su_sem);
+ group = NULL;
+ item = NULL;
+ if (type->ct_group_ops->make_group) {
+ group = type->ct_group_ops->make_group(to_config_group(parent_item), name);
+ if (group) {
+ link_group(to_config_group(parent_item), group);
+ item = &group->cg_item;
+ }
+ } else {
+ item = type->ct_group_ops->make_item(to_config_group(parent_item), name);
+ if (item)
+ link_obj(parent_item, item);
+ }
+ up(&subsys->su_sem);
+
+ kfree(name);
+ if (!item) {
+ config_item_put(parent_item);
+ return -ENOMEM;
+ }
+
+ ret = -EINVAL;
+ type = item->ci_type;
+ if (type) {
+ owner = type->ct_owner;
+ if (try_module_get(owner)) {
+ if (group) {
+ ret = configfs_attach_group(parent_item,
+ item,
+ dentry);
+ } else {
+ ret = configfs_attach_item(parent_item,
+ item,
+ dentry);
+ }
+
+ if (ret) {
+ down(&subsys->su_sem);
+ if (group)
+ unlink_group(group);
+ else
+ unlink_obj(item);
+ client_drop_item(parent_item, item);
+ up(&subsys->su_sem);
+
+ config_item_put(parent_item);
+ module_put(owner);
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ struct config_item *parent_item;
+ struct config_item *item;
+ struct configfs_subsystem *subsys;
+ struct configfs_dirent *sd;
+ struct module *owner = NULL;
+ int ret;
+
+ if (dentry->d_parent == configfs_sb->s_root)
+ return -EPERM;
+
+ sd = dentry->d_fsdata;
+ if (sd->s_type & CONFIGFS_USET_DEFAULT)
+ return -EPERM;
+
+ parent_item = configfs_get_config_item(dentry->d_parent);
+ subsys = to_config_group(parent_item)->cg_subsys;
+ BUG_ON(!subsys);
+
+ if (!parent_item->ci_type) {
+ config_item_put(parent_item);
+ return -EINVAL;
+ }
+
+ ret = configfs_detach_prep(dentry);
+ if (ret) {
+ configfs_detach_rollback(dentry);
+ config_item_put(parent_item);
+ return ret;
+ }
+
+ item = configfs_get_config_item(dentry);
+
+ /* Drop reference from above, item already holds one. */
+ config_item_put(parent_item);
+
+ if (item->ci_type)
+ owner = item->ci_type->ct_owner;
+
+ if (sd->s_type & CONFIGFS_USET_DIR) {
+ configfs_detach_group(item);
+
+ down(&subsys->su_sem);
+ unlink_group(to_config_group(item));
+ } else {
+ configfs_detach_item(item);
+
+ down(&subsys->su_sem);
+ unlink_obj(item);
+ }
+
+ client_drop_item(parent_item, item);
+ up(&subsys->su_sem);
+
+ /* Drop our reference from above */
+ config_item_put(item);
+
+ module_put(owner);
+
+ return 0;
+}
+
+struct inode_operations configfs_dir_inode_operations = {
+ .mkdir = configfs_mkdir,
+ .rmdir = configfs_rmdir,
+ .symlink = configfs_symlink,
+ .unlink = configfs_unlink,
+ .lookup = configfs_lookup,
+};
+
+#if 0
+int configfs_rename_dir(struct config_item * item, const char *new_name)
+{
+ int error = 0;
+ struct dentry * new_dentry, * parent;
+
+ if (!strcmp(config_item_name(item), new_name))
+ return -EINVAL;
+
+ if (!item->parent)
+ return -EINVAL;
+
+ down_write(&configfs_rename_sem);
+ parent = item->parent->dentry;
+
+ mutex_lock(&parent->d_inode->i_mutex);
+
+ new_dentry = lookup_one_len(new_name, parent, strlen(new_name));
+ if (!IS_ERR(new_dentry)) {
+ if (!new_dentry->d_inode) {
+ error = config_item_set_name(item, "%s", new_name);
+ if (!error) {
+ d_add(new_dentry, NULL);
+ d_move(item->dentry, new_dentry);
+ }
+ else
+ d_delete(new_dentry);
+ } else
+ error = -EEXIST;
+ dput(new_dentry);
+ }
+ mutex_unlock(&parent->d_inode->i_mutex);
+ up_write(&configfs_rename_sem);
+
+ return error;
+}
+#endif
+
+static int configfs_dir_open(struct inode *inode, struct file *file)
+{
+ struct dentry * dentry = file->f_dentry;
+ struct configfs_dirent * parent_sd = dentry->d_fsdata;
+
+ mutex_lock(&dentry->d_inode->i_mutex);
+ file->private_data = configfs_new_dirent(parent_sd, NULL);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+
+ return file->private_data ? 0 : -ENOMEM;
+
+}
+
+static int configfs_dir_close(struct inode *inode, struct file *file)
+{
+ struct dentry * dentry = file->f_dentry;
+ struct configfs_dirent * cursor = file->private_data;
+
+ mutex_lock(&dentry->d_inode->i_mutex);
+ list_del_init(&cursor->s_sibling);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+
+ release_configfs_dirent(cursor);
+
+ return 0;
+}
+
+/* Relationship between s_mode and the DT_xxx types */
+static inline unsigned char dt_type(struct configfs_dirent *sd)
+{
+ return (sd->s_mode >> 12) & 15;
+}
+
+static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+ struct dentry *dentry = filp->f_dentry;
+ struct configfs_dirent * parent_sd = dentry->d_fsdata;
+ struct configfs_dirent *cursor = filp->private_data;
+ struct list_head *p, *q = &cursor->s_sibling;
+ ino_t ino;
+ int i = filp->f_pos;
+
+ switch (i) {
+ case 0:
+ ino = dentry->d_inode->i_ino;
+ if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0)
+ break;
+ filp->f_pos++;
+ i++;
+ /* fallthrough */
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0)
+ break;
+ filp->f_pos++;
+ i++;
+ /* fallthrough */
+ default:
+ if (filp->f_pos == 2) {
+ list_del(q);
+ list_add(q, &parent_sd->s_children);
+ }
+ for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
+ struct configfs_dirent *next;
+ const char * name;
+ int len;
+
+ next = list_entry(p, struct configfs_dirent,
+ s_sibling);
+ if (!next->s_element)
+ continue;
+
+ name = configfs_get_name(next);
+ len = strlen(name);
+ if (next->s_dentry)
+ ino = next->s_dentry->d_inode->i_ino;
+ else
+ ino = iunique(configfs_sb, 2);
+
+ if (filldir(dirent, name, len, filp->f_pos, ino,
+ dt_type(next)) < 0)
+ return 0;
+
+ list_del(q);
+ list_add(q, p);
+ p = q;
+ filp->f_pos++;
+ }
+ }
+ return 0;
+}
+
+static loff_t configfs_dir_lseek(struct file * file, loff_t offset, int origin)
+{
+ struct dentry * dentry = file->f_dentry;
+
+ mutex_lock(&dentry->d_inode->i_mutex);
+ switch (origin) {
+ case 1:
+ offset += file->f_pos;
+ case 0:
+ if (offset >= 0)
+ break;
+ default:
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
+ return -EINVAL;
+ }
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ if (file->f_pos >= 2) {
+ struct configfs_dirent *sd = dentry->d_fsdata;
+ struct configfs_dirent *cursor = file->private_data;
+ struct list_head *p;
+ loff_t n = file->f_pos - 2;
+
+ list_del(&cursor->s_sibling);
+ p = sd->s_children.next;
+ while (n && p != &sd->s_children) {
+ struct configfs_dirent *next;
+ next = list_entry(p, struct configfs_dirent,
+ s_sibling);
+ if (next->s_element)
+ n--;
+ p = p->next;
+ }
+ list_add_tail(&cursor->s_sibling, p);
+ }
+ }
+ mutex_unlock(&dentry->d_inode->i_mutex);
+ return offset;
+}
+
+struct file_operations configfs_dir_operations = {
+ .open = configfs_dir_open,
+ .release = configfs_dir_close,
+ .llseek = configfs_dir_lseek,
+ .read = generic_read_dir,
+ .readdir = configfs_readdir,
+};
+
+int configfs_register_subsystem(struct configfs_subsystem *subsys)
+{
+ int err;
+ struct config_group *group = &subsys->su_group;
+ struct qstr name;
+ struct dentry *dentry;
+ struct configfs_dirent *sd;
+
+ err = configfs_pin_fs();
+ if (err)
+ return err;
+
+ if (!group->cg_item.ci_name)
+ group->cg_item.ci_name = group->cg_item.ci_namebuf;
+
+ sd = configfs_sb->s_root->d_fsdata;
+ link_group(to_config_group(sd->s_element), group);
+
+ mutex_lock(&configfs_sb->s_root->d_inode->i_mutex);
+
+ name.name = group->cg_item.ci_name;
+ name.len = strlen(name.name);
+ name.hash = full_name_hash(name.name, name.len);
+
+ err = -ENOMEM;
+ dentry = d_alloc(configfs_sb->s_root, &name);
+ if (!dentry)
+ goto out_release;
+
+ d_add(dentry, NULL);
+
+ err = configfs_attach_group(sd->s_element, &group->cg_item,
+ dentry);
+ if (!err)
+ dentry = NULL;
+ else
+ d_delete(dentry);
+
+ mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex);
+
+ if (dentry) {
+ dput(dentry);
+out_release:
+ unlink_group(group);
+ configfs_release_fs();
+ }
+
+ return err;
+}
+
+void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
+{
+ struct config_group *group = &subsys->su_group;
+ struct dentry *dentry = group->cg_item.ci_dentry;
+
+ if (dentry->d_parent != configfs_sb->s_root) {
+ printk(KERN_ERR "configfs: Tried to unregister non-subsystem!\n");
+ return;
+ }
+
+ mutex_lock(&configfs_sb->s_root->d_inode->i_mutex);
+ mutex_lock(&dentry->d_inode->i_mutex);
+ if (configfs_detach_prep(dentry)) {
+ printk(KERN_ERR "configfs: Tried to unregister non-empty subsystem!\n");
+ }
+ configfs_detach_group(&group->cg_item);
+ dentry->d_inode->i_flags |= S_DEAD;
+ mutex_unlock(&dentry->d_inode->i_mutex);
+
+ d_delete(dentry);
+
+ mutex_unlock(&configfs_sb->s_root->d_inode->i_mutex);
+
+ dput(dentry);
+
+ unlink_group(group);
+ configfs_release_fs();
+}
+
+EXPORT_SYMBOL(configfs_register_subsystem);
+EXPORT_SYMBOL(configfs_unregister_subsystem);
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
new file mode 100644
index 00000000000..c26cd61f13a
--- /dev/null
+++ b/fs/configfs/file.c
@@ -0,0 +1,360 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * file.c - operations for regular (text) files.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/dnotify.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+
+#include <linux/configfs.h>
+#include "configfs_internal.h"
+
+
+struct configfs_buffer {
+ size_t count;
+ loff_t pos;
+ char * page;
+ struct configfs_item_operations * ops;
+ struct semaphore sem;
+ int needs_read_fill;
+};
+
+
+/**
+ * fill_read_buffer - allocate and fill buffer from item.
+ * @dentry: dentry pointer.
+ * @buffer: data buffer for file.
+ *
+ * Allocate @buffer->page, if it hasn't been already, then call the
+ * config_item's show() method to fill the buffer with this attribute's
+ * data.
+ * This is called only once, on the file's first read.
+ */
+static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer)
+{
+ struct configfs_attribute * attr = to_attr(dentry);
+ struct config_item * item = to_item(dentry->d_parent);
+ struct configfs_item_operations * ops = buffer->ops;
+ int ret = 0;
+ ssize_t count;
+
+ if (!buffer->page)
+ buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
+ if (!buffer->page)
+ return -ENOMEM;
+
+ count = ops->show_attribute(item,attr,buffer->page);
+ buffer->needs_read_fill = 0;
+ BUG_ON(count > (ssize_t)PAGE_SIZE);
+ if (count >= 0)
+ buffer->count = count;
+ else
+ ret = count;
+ return ret;
+}
+
+
+/**
+ * flush_read_buffer - push buffer to userspace.
+ * @buffer: data buffer for file.
+ * @userbuf: user-passed buffer.
+ * @count: number of bytes requested.
+ * @ppos: file position.
+ *
+ * Copy the buffer we filled in fill_read_buffer() to userspace.
+ * This is done at the reader's leisure, copying and advancing
+ * the amount they specify each time.
+ * This may be called continuously until the buffer is empty.
+ */
+static int flush_read_buffer(struct configfs_buffer * buffer, char __user * buf,
+ size_t count, loff_t * ppos)
+{
+ int error;
+
+ if (*ppos > buffer->count)
+ return 0;
+
+ if (count > (buffer->count - *ppos))
+ count = buffer->count - *ppos;
+
+ error = copy_to_user(buf,buffer->page + *ppos,count);
+ if (!error)
+ *ppos += count;
+ return error ? -EFAULT : count;
+}
+
+/**
+ * configfs_read_file - read an attribute.
+ * @file: file pointer.
+ * @buf: buffer to fill.
+ * @count: number of bytes to read.
+ * @ppos: starting offset in file.
+ *
+ * Userspace wants to read an attribute file. The attribute descriptor
+ * is in the file's ->d_fsdata. The target item is in the directory's
+ * ->d_fsdata.
+ *
+ * We call fill_read_buffer() to allocate and fill the buffer from the
+ * item's show() method exactly once (if the read is happening from
+ * the beginning of the file). That should fill the entire buffer with
+ * all the data the item has to offer for that attribute.
+ * We then call flush_read_buffer() to copy the buffer to userspace
+ * in the increments specified.
+ */
+
+static ssize_t
+configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct configfs_buffer * buffer = file->private_data;
+ ssize_t retval = 0;
+
+ down(&buffer->sem);
+ if (buffer->needs_read_fill) {
+ if ((retval = fill_read_buffer(file->f_dentry,buffer)))
+ goto out;
+ }
+ pr_debug("%s: count = %d, ppos = %lld, buf = %s\n",
+ __FUNCTION__,count,*ppos,buffer->page);
+ retval = flush_read_buffer(buffer,buf,count,ppos);
+out:
+ up(&buffer->sem);
+ return retval;
+}
+
+
+/**
+ * fill_write_buffer - copy buffer from userspace.
+ * @buffer: data buffer for file.
+ * @userbuf: data from user.
+ * @count: number of bytes in @userbuf.
+ *
+ * Allocate @buffer->page if it hasn't been already, then
+ * copy the user-supplied buffer into it.
+ */
+
+static int
+fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count)
+{
+ int error;
+
+ if (!buffer->page)
+ buffer->page = (char *)get_zeroed_page(GFP_KERNEL);
+ if (!buffer->page)
+ return -ENOMEM;
+
+ if (count > PAGE_SIZE)
+ count = PAGE_SIZE;
+ error = copy_from_user(buffer->page,buf,count);
+ buffer->needs_read_fill = 1;
+ return error ? -EFAULT : count;
+}
+
+
+/**
+ * flush_write_buffer - push buffer to config_item.
+ * @file: file pointer.
+ * @buffer: data buffer for file.
+ *
+ * Get the correct pointers for the config_item and the attribute we're
+ * dealing with, then call the store() method for the attribute,
+ * passing the buffer that we acquired in fill_write_buffer().
+ */
+
+static int
+flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count)
+{
+ struct configfs_attribute * attr = to_attr(dentry);
+ struct config_item * item = to_item(dentry->d_parent);
+ struct configfs_item_operations * ops = buffer->ops;
+
+ return ops->store_attribute(item,attr,buffer->page,count);
+}
+
+
+/**
+ * configfs_write_file - write an attribute.
+ * @file: file pointer
+ * @buf: data to write
+ * @count: number of bytes
+ * @ppos: starting offset
+ *
+ * Similar to configfs_read_file(), though working in the opposite direction.
+ * We allocate and fill the data from the user in fill_write_buffer(),
+ * then push it to the config_item in flush_write_buffer().
+ * There is no easy way for us to know if userspace is only doing a partial
+ * write, so we don't support them. We expect the entire buffer to come
+ * on the first write.
+ * Hint: if you're writing a value, first read the file, modify only the
+ * the value you're changing, then write entire buffer back.
+ */
+
+static ssize_t
+configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct configfs_buffer * buffer = file->private_data;
+
+ down(&buffer->sem);
+ count = fill_write_buffer(buffer,buf,count);
+ if (count > 0)
+ count = flush_write_buffer(file->f_dentry,buffer,count);
+ if (count > 0)
+ *ppos += count;
+ up(&buffer->sem);
+ return count;
+}
+
+static int check_perm(struct inode * inode, struct file * file)
+{
+ struct config_item *item = configfs_get_config_item(file->f_dentry->d_parent);
+ struct configfs_attribute * attr = to_attr(file->f_dentry);
+ struct configfs_buffer * buffer;
+ struct configfs_item_operations * ops = NULL;
+ int error = 0;
+
+ if (!item || !attr)
+ goto Einval;
+
+ /* Grab the module reference for this attribute if we have one */
+ if (!try_module_get(attr->ca_owner)) {
+ error = -ENODEV;
+ goto Done;
+ }
+
+ if (item->ci_type)
+ ops = item->ci_type->ct_item_ops;
+ else
+ goto Eaccess;
+
+ /* File needs write support.
+ * The inode's perms must say it's ok,
+ * and we must have a store method.
+ */
+ if (file->f_mode & FMODE_WRITE) {
+
+ if (!(inode->i_mode & S_IWUGO) || !ops->store_attribute)
+ goto Eaccess;
+
+ }
+
+ /* File needs read support.
+ * The inode's perms must say it's ok, and we there
+ * must be a show method for it.
+ */
+ if (file->f_mode & FMODE_READ) {
+ if (!(inode->i_mode & S_IRUGO) || !ops->show_attribute)
+ goto Eaccess;
+ }
+
+ /* No error? Great, allocate a buffer for the file, and store it
+ * it in file->private_data for easy access.
+ */
+ buffer = kmalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
+ if (buffer) {
+ memset(buffer,0,sizeof(struct configfs_buffer));
+ init_MUTEX(&buffer->sem);
+ buffer->needs_read_fill = 1;
+ buffer->ops = ops;
+ file->private_data = buffer;
+ } else
+ error = -ENOMEM;
+ goto Done;
+
+ Einval:
+ error = -EINVAL;
+ goto Done;
+ Eaccess:
+ error = -EACCES;
+ module_put(attr->ca_owner);
+ Done:
+ if (error && item)
+ config_item_put(item);
+ return error;
+}
+
+static int configfs_open_file(struct inode * inode, struct file * filp)
+{
+ return check_perm(inode,filp);
+}
+
+static int configfs_release(struct inode * inode, struct file * filp)
+{
+ struct config_item * item = to_item(filp->f_dentry->d_parent);
+ struct configfs_attribute * attr = to_attr(filp->f_dentry);
+ struct module * owner = attr->ca_owner;
+ struct configfs_buffer * buffer = filp->private_data;
+
+ if (item)
+ config_item_put(item);
+ /* After this point, attr should not be accessed. */
+ module_put(owner);
+
+ if (buffer) {
+ if (buffer->page)
+ free_page((unsigned long)buffer->page);
+ kfree(buffer);
+ }
+ return 0;
+}
+
+struct file_operations configfs_file_operations = {
+ .read = configfs_read_file,
+ .write = configfs_write_file,
+ .llseek = generic_file_llseek,
+ .open = configfs_open_file,
+ .release = configfs_release,
+};
+
+
+int configfs_add_file(struct dentry * dir, const struct configfs_attribute * attr, int type)
+{
+ struct configfs_dirent * parent_sd = dir->d_fsdata;
+ umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
+ int error = 0;
+
+ mutex_lock(&dir->d_inode->i_mutex);
+ error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
+ mutex_unlock(&dir->d_inode->i_mutex);
+
+ return error;
+}
+
+
+/**
+ * configfs_create_file - create an attribute file for an item.
+ * @item: item we're creating for.
+ * @attr: atrribute descriptor.
+ */
+
+int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr)
+{
+ BUG_ON(!item || !item->ci_dentry || !attr);
+
+ return configfs_add_file(item->ci_dentry, attr,
+ CONFIGFS_ITEM_ATTR);
+}
+
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
new file mode 100644
index 00000000000..6577c588de9
--- /dev/null
+++ b/fs/configfs/inode.c
@@ -0,0 +1,162 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * inode.c - basic inode and dentry operations.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * Please see Documentation/filesystems/configfs.txt for more information.
+ */
+
+#undef DEBUG
+
+#include <linux/pagemap.h>
+#include <linux/namei.h>
+#include <linux/backing-dev.h>
+
+#include <linux/configfs.h>
+#include "configfs_internal.h"
+
+extern struct super_block * configfs_sb;
+
+static struct address_space_operations configfs_aops = {
+ .readpage = simple_readpage,
+ .prepare_write = simple_prepare_write,
+ .commit_write = simple_commit_write
+};
+
+static struct backing_dev_info configfs_backing_dev_info = {
+ .ra_pages = 0, /* No readahead */
+ .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+};
+
+struct inode * configfs_new_inode(mode_t mode)
+{
+ struct inode * inode = new_inode(configfs_sb);
+ if (inode) {
+ inode->i_mode = mode;
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_mapping->a_ops = &configfs_aops;
+ inode->i_mapping->backing_dev_info = &configfs_backing_dev_info;
+ }
+ return inode;
+}
+
+int configfs_create(struct dentry * dentry, int mode, int (*init)(struct inode *))
+{
+ int error = 0;
+ struct inode * inode = NULL;
+ if (dentry) {
+ if (!dentry->d_inode) {
+ if ((inode = configfs_new_inode(mode))) {
+ if (dentry->d_parent && dentry->d_parent->d_inode) {
+ struct inode *p_inode = dentry->d_parent->d_inode;
+ p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME;
+ }
+ goto Proceed;
+ }
+ else
+ error = -ENOMEM;
+ } else
+ error = -EEXIST;
+ } else
+ error = -ENOENT;
+ goto Done;
+
+ Proceed:
+ if (init)
+ error = init(inode);
+ if (!error) {
+ d_instantiate(dentry, inode);
+ if (S_ISDIR(mode) || S_ISLNK(mode))
+ dget(dentry); /* pin link and directory dentries in core */
+ } else
+ iput(inode);
+ Done:
+ return error;
+}
+
+/*
+ * Get the name for corresponding element represented by the given configfs_dirent
+ */
+const unsigned char * configfs_get_name(struct configfs_dirent *sd)
+{
+ struct attribute * attr;
+
+ if (!sd || !sd->s_element)
+ BUG();
+
+ /* These always have a dentry, so use that */
+ if (sd->s_type & (CONFIGFS_DIR | CONFIGFS_ITEM_LINK))
+ return sd->s_dentry->d_name.name;
+
+ if (sd->s_type & CONFIGFS_ITEM_ATTR) {
+ attr = sd->s_element;
+ return attr->name;
+ }
+ return NULL;
+}
+
+
+/*
+ * Unhashes the dentry corresponding to given configfs_dirent
+ * Called with parent inode's i_mutex held.
+ */
+void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
+{
+ struct dentry * dentry = sd->s_dentry;
+
+ if (dentry) {
+ spin_lock(&dcache_lock);
+ if (!(d_unhashed(dentry) && dentry->d_inode)) {
+ dget_locked(dentry);
+ __d_drop(dentry);
+ spin_unlock(&dcache_lock);
+ simple_unlink(parent->d_inode, dentry);
+ } else
+ spin_unlock(&dcache_lock);
+ }
+}
+
+void configfs_hash_and_remove(struct dentry * dir, const char * name)
+{
+ struct configfs_dirent * sd;
+ struct configfs_dirent * parent_sd = dir->d_fsdata;
+
+ mutex_lock(&dir->d_inode->i_mutex);
+ list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
+ if (!sd->s_element)
+ continue;
+ if (!strcmp(configfs_get_name(sd), name)) {
+ list_del_init(&sd->s_sibling);
+ configfs_drop_dentry(sd, dir);
+ configfs_put(sd);
+ break;
+ }
+ }
+ mutex_unlock(&dir->d_inode->i_mutex);
+}
+
+
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
new file mode 100644
index 00000000000..e07485ac50a
--- /dev/null
+++ b/fs/configfs/item.c
@@ -0,0 +1,227 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * item.c - library routines for handling generic config items
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on kobject:
+ * kobject is Copyright (c) 2002-2003 Patrick Mochel
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * Please see the file Documentation/filesystems/configfs.txt for
+ * critical information about using the config_item interface.
+ */
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+
+#include <linux/configfs.h>
+
+
+static inline struct config_item * to_item(struct list_head * entry)
+{
+ return container_of(entry,struct config_item,ci_entry);
+}
+
+/* Evil kernel */
+static void config_item_release(struct kref *kref);
+
+/**
+ * config_item_init - initialize item.
+ * @item: item in question.
+ */
+void config_item_init(struct config_item * item)
+{
+ kref_init(&item->ci_kref);
+ INIT_LIST_HEAD(&item->ci_entry);
+}
+
+/**
+ * config_item_set_name - Set the name of an item
+ * @item: item.
+ * @name: name.
+ *
+ * If strlen(name) >= CONFIGFS_ITEM_NAME_LEN, then use a
+ * dynamically allocated string that @item->ci_name points to.
+ * Otherwise, use the static @item->ci_namebuf array.
+ */
+
+int config_item_set_name(struct config_item * item, const char * fmt, ...)
+{
+ int error = 0;
+ int limit = CONFIGFS_ITEM_NAME_LEN;
+ int need;
+ va_list args;
+ char * name;
+
+ /*
+ * First, try the static array
+ */
+ va_start(args,fmt);
+ need = vsnprintf(item->ci_namebuf,limit,fmt,args);
+ va_end(args);
+ if (need < limit)
+ name = item->ci_namebuf;
+ else {
+ /*
+ * Need more space? Allocate it and try again
+ */
+ limit = need + 1;
+ name = kmalloc(limit,GFP_KERNEL);
+ if (!name) {
+ error = -ENOMEM;
+ goto Done;
+ }
+ va_start(args,fmt);
+ need = vsnprintf(name,limit,fmt,args);
+ va_end(args);
+
+ /* Still? Give up. */
+ if (need >= limit) {
+ kfree(name);
+ error = -EFAULT;
+ goto Done;
+ }
+ }
+
+ /* Free the old name, if necessary. */
+ if (item->ci_name && item->ci_name != item->ci_namebuf)
+ kfree(item->ci_name);
+
+ /* Now, set the new name */
+ item->ci_name = name;
+ Done:
+ return error;
+}
+
+EXPORT_SYMBOL(config_item_set_name);
+
+void config_item_init_type_name(struct config_item *item,
+ const char *name,
+ struct config_item_type *type)
+{
+ config_item_set_name(item, name);
+ item->ci_type = type;
+ config_item_init(item);
+}
+EXPORT_SYMBOL(config_item_init_type_name);
+
+void config_group_init_type_name(struct config_group *group, const char *name,
+ struct config_item_type *type)
+{
+ config_item_set_name(&group->cg_item, name);
+ group->cg_item.ci_type = type;
+ config_group_init(group);
+}
+EXPORT_SYMBOL(config_group_init_type_name);
+
+struct config_item * config_item_get(struct config_item * item)
+{
+ if (item)
+ kref_get(&item->ci_kref);
+ return item;
+}
+
+/**
+ * config_item_cleanup - free config_item resources.
+ * @item: item.
+ */
+
+void config_item_cleanup(struct config_item * item)
+{
+ struct config_item_type * t = item->ci_type;
+ struct config_group * s = item->ci_group;
+ struct config_item * parent = item->ci_parent;
+
+ pr_debug("config_item %s: cleaning up\n",config_item_name(item));
+ if (item->ci_name != item->ci_namebuf)
+ kfree(item->ci_name);
+ item->ci_name = NULL;
+ if (t && t->ct_item_ops && t->ct_item_ops->release)
+ t->ct_item_ops->release(item);
+ if (s)
+ config_group_put(s);
+ if (parent)
+ config_item_put(parent);
+}
+
+static void config_item_release(struct kref *kref)
+{
+ config_item_cleanup(container_of(kref, struct config_item, ci_kref));
+}
+
+/**
+ * config_item_put - decrement refcount for item.
+ * @item: item.
+ *
+ * Decrement the refcount, and if 0, call config_item_cleanup().
+ */
+void config_item_put(struct config_item * item)
+{
+ if (item)
+ kref_put(&item->ci_kref, config_item_release);
+}
+
+
+/**
+ * config_group_init - initialize a group for use
+ * @k: group
+ */
+
+void config_group_init(struct config_group *group)
+{
+ config_item_init(&group->cg_item);
+ INIT_LIST_HEAD(&group->cg_children);
+}
+
+
+/**
+ * config_group_find_obj - search for item in group.
+ * @group: group we're looking in.
+ * @name: item's name.
+ *
+ * Lock group via @group->cg_subsys, and iterate over @group->cg_list,
+ * looking for a matching config_item. If matching item is found
+ * take a reference and return the item.
+ */
+
+struct config_item * config_group_find_obj(struct config_group * group, const char * name)
+{
+ struct list_head * entry;
+ struct config_item * ret = NULL;
+
+ /* XXX LOCKING! */
+ list_for_each(entry,&group->cg_children) {
+ struct config_item * item = to_item(entry);
+ if (config_item_name(item) &&
+ !strcmp(config_item_name(item), name)) {
+ ret = config_item_get(item);
+ break;
+ }
+ }
+ return ret;
+}
+
+
+EXPORT_SYMBOL(config_item_init);
+EXPORT_SYMBOL(config_group_init);
+EXPORT_SYMBOL(config_item_get);
+EXPORT_SYMBOL(config_item_put);
+
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
new file mode 100644
index 00000000000..1a2f6f6a4d9
--- /dev/null
+++ b/fs/configfs/mount.c
@@ -0,0 +1,159 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * mount.c - operations for initializing and mounting configfs.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+
+#include <linux/configfs.h>
+#include "configfs_internal.h"
+
+/* Random magic number */
+#define CONFIGFS_MAGIC 0x62656570
+
+struct vfsmount * configfs_mount = NULL;
+struct super_block * configfs_sb = NULL;
+static int configfs_mnt_count = 0;
+
+static struct super_operations configfs_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+};
+
+static struct config_group configfs_root_group = {
+ .cg_item = {
+ .ci_namebuf = "root",
+ .ci_name = configfs_root_group.cg_item.ci_namebuf,
+ },
+};
+
+int configfs_is_root(struct config_item *item)
+{
+ return item == &configfs_root_group.cg_item;
+}
+
+static struct configfs_dirent configfs_root = {
+ .s_sibling = LIST_HEAD_INIT(configfs_root.s_sibling),
+ .s_children = LIST_HEAD_INIT(configfs_root.s_children),
+ .s_element = &configfs_root_group.cg_item,
+ .s_type = CONFIGFS_ROOT,
+};
+
+static int configfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *inode;
+ struct dentry *root;
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = CONFIGFS_MAGIC;
+ sb->s_op = &configfs_ops;
+ configfs_sb = sb;
+
+ inode = configfs_new_inode(S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO);
+ if (inode) {
+ inode->i_op = &configfs_dir_inode_operations;
+ inode->i_fop = &configfs_dir_operations;
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inode->i_nlink++;
+ } else {
+ pr_debug("configfs: could not get root inode\n");
+ return -ENOMEM;
+ }
+
+ root = d_alloc_root(inode);
+ if (!root) {
+ pr_debug("%s: could not get root dentry!\n",__FUNCTION__);
+ iput(inode);
+ return -ENOMEM;
+ }
+ config_group_init(&configfs_root_group);
+ configfs_root_group.cg_item.ci_dentry = root;
+ root->d_fsdata = &configfs_root;
+ sb->s_root = root;
+ return 0;
+}
+
+static struct super_block *configfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return get_sb_single(fs_type, flags, data, configfs_fill_super);
+}
+
+static struct file_system_type configfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "configfs",
+ .get_sb = configfs_get_sb,
+ .kill_sb = kill_litter_super,
+};
+
+int configfs_pin_fs(void)
+{
+ return simple_pin_fs("configfs", &configfs_mount,
+ &configfs_mnt_count);
+}
+
+void configfs_release_fs(void)
+{
+ simple_release_fs(&configfs_mount, &configfs_mnt_count);
+}
+
+
+static decl_subsys(config, NULL, NULL);
+
+static int __init configfs_init(void)
+{
+ int err;
+
+ kset_set_kset_s(&config_subsys, kernel_subsys);
+ err = subsystem_register(&config_subsys);
+ if (err)
+ return err;
+
+ err = register_filesystem(&configfs_fs_type);
+ if (err) {
+ printk(KERN_ERR "configfs: Unable to register filesystem!\n");
+ subsystem_unregister(&config_subsys);
+ }
+
+ return err;
+}
+
+static void __exit configfs_exit(void)
+{
+ unregister_filesystem(&configfs_fs_type);
+ subsystem_unregister(&config_subsys);
+}
+
+MODULE_AUTHOR("Oracle");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.1");
+MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration.");
+
+module_init(configfs_init);
+module_exit(configfs_exit);
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
new file mode 100644
index 00000000000..50f5840521a
--- /dev/null
+++ b/fs/configfs/symlink.c
@@ -0,0 +1,281 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * symlink.c - operations for configfs symlinks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/namei.h>
+
+#include <linux/configfs.h>
+#include "configfs_internal.h"
+
+static int item_depth(struct config_item * item)
+{
+ struct config_item * p = item;
+ int depth = 0;
+ do { depth++; } while ((p = p->ci_parent) && !configfs_is_root(p));
+ return depth;
+}
+
+static int item_path_length(struct config_item * item)
+{
+ struct config_item * p = item;
+ int length = 1;
+ do {
+ length += strlen(config_item_name(p)) + 1;
+ p = p->ci_parent;
+ } while (p && !configfs_is_root(p));
+ return length;
+}
+
+static void fill_item_path(struct config_item * item, char * buffer, int length)
+{
+ struct config_item * p;
+
+ --length;
+ for (p = item; p && !configfs_is_root(p); p = p->ci_parent) {
+ int cur = strlen(config_item_name(p));
+
+ /* back up enough to print this bus id with '/' */
+ length -= cur;
+ strncpy(buffer + length,config_item_name(p),cur);
+ *(buffer + --length) = '/';
+ }
+}
+
+static int create_link(struct config_item *parent_item,
+ struct config_item *item,
+ struct dentry *dentry)
+{
+ struct configfs_dirent *target_sd = item->ci_dentry->d_fsdata;
+ struct configfs_symlink *sl;
+ int ret;
+
+ ret = -ENOMEM;
+ sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
+ if (sl) {
+ sl->sl_target = config_item_get(item);
+ /* FIXME: needs a lock, I'd bet */
+ list_add(&sl->sl_list, &target_sd->s_links);
+ ret = configfs_create_link(sl, parent_item->ci_dentry,
+ dentry);
+ if (ret) {
+ list_del_init(&sl->sl_list);
+ config_item_put(item);
+ kfree(sl);
+ }
+ }
+
+ return ret;
+}
+
+
+static int get_target(const char *symname, struct nameidata *nd,
+ struct config_item **target)
+{
+ int ret;
+
+ ret = path_lookup(symname, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, nd);
+ if (!ret) {
+ if (nd->dentry->d_sb == configfs_sb) {
+ *target = configfs_get_config_item(nd->dentry);
+ if (!*target) {
+ ret = -ENOENT;
+ path_release(nd);
+ }
+ } else
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
+
+int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+ int ret;
+ struct nameidata nd;
+ struct config_item *parent_item;
+ struct config_item *target_item;
+ struct config_item_type *type;
+
+ ret = -EPERM; /* What lack-of-symlink returns */
+ if (dentry->d_parent == configfs_sb->s_root)
+ goto out;
+
+ parent_item = configfs_get_config_item(dentry->d_parent);
+ type = parent_item->ci_type;
+
+ if (!type || !type->ct_item_ops ||
+ !type->ct_item_ops->allow_link)
+ goto out_put;
+
+ ret = get_target(symname, &nd, &target_item);
+ if (ret)
+ goto out_put;
+
+ ret = type->ct_item_ops->allow_link(parent_item, target_item);
+ if (!ret)
+ ret = create_link(parent_item, target_item, dentry);
+
+ config_item_put(target_item);
+ path_release(&nd);
+
+out_put:
+ config_item_put(parent_item);
+
+out:
+ return ret;
+}
+
+int configfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct configfs_dirent *sd = dentry->d_fsdata;
+ struct configfs_symlink *sl;
+ struct config_item *parent_item;
+ struct config_item_type *type;
+ int ret;
+
+ ret = -EPERM; /* What lack-of-symlink returns */
+ if (!(sd->s_type & CONFIGFS_ITEM_LINK))
+ goto out;
+
+ if (dentry->d_parent == configfs_sb->s_root)
+ BUG();
+
+ sl = sd->s_element;
+
+ parent_item = configfs_get_config_item(dentry->d_parent);
+ type = parent_item->ci_type;
+
+ list_del_init(&sd->s_sibling);
+ configfs_drop_dentry(sd, dentry->d_parent);
+ dput(dentry);
+ configfs_put(sd);
+
+ /*
+ * drop_link() must be called before
+ * list_del_init(&sl->sl_list), so that the order of
+ * drop_link(this, target) and drop_item(target) is preserved.
+ */
+ if (type && type->ct_item_ops &&
+ type->ct_item_ops->drop_link)
+ type->ct_item_ops->drop_link(parent_item,
+ sl->sl_target);
+
+ /* FIXME: Needs lock */
+ list_del_init(&sl->sl_list);
+
+ /* Put reference from create_link() */
+ config_item_put(sl->sl_target);
+ kfree(sl);
+
+ config_item_put(parent_item);
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int configfs_get_target_path(struct config_item * item, struct config_item * target,
+ char *path)
+{
+ char * s;
+ int depth, size;
+
+ depth = item_depth(item);
+ size = item_path_length(target) + depth * 3 - 1;
+ if (size > PATH_MAX)
+ return -ENAMETOOLONG;
+
+ pr_debug("%s: depth = %d, size = %d\n", __FUNCTION__, depth, size);
+
+ for (s = path; depth--; s += 3)
+ strcpy(s,"../");
+
+ fill_item_path(target, path, size);
+ pr_debug("%s: path = '%s'\n", __FUNCTION__, path);
+
+ return 0;
+}
+
+static int configfs_getlink(struct dentry *dentry, char * path)
+{
+ struct config_item *item, *target_item;
+ int error = 0;
+
+ item = configfs_get_config_item(dentry->d_parent);
+ if (!item)
+ return -EINVAL;
+
+ target_item = configfs_get_config_item(dentry);
+ if (!target_item) {
+ config_item_put(item);
+ return -EINVAL;
+ }
+
+ down_read(&configfs_rename_sem);
+ error = configfs_get_target_path(item, target_item, path);
+ up_read(&configfs_rename_sem);
+
+ config_item_put(item);
+ config_item_put(target_item);
+ return error;
+
+}
+
+static void *configfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ int error = -ENOMEM;
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
+
+ if (page) {
+ error = configfs_getlink(dentry, (char *)page);
+ if (!error) {
+ nd_set_link(nd, (char *)page);
+ return (void *)page;
+ }
+ }
+
+ nd_set_link(nd, ERR_PTR(error));
+ return NULL;
+}
+
+static void configfs_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+{
+ if (cookie) {
+ unsigned long page = (unsigned long)cookie;
+ free_page(page);
+ }
+}
+
+struct inode_operations configfs_symlink_inode_operations = {
+ .follow_link = configfs_follow_link,
+ .readlink = generic_readlink,
+ .put_link = configfs_put_link,
+};
+
diff --git a/fs/dcache.c b/fs/dcache.c
index 17e43913868..134d6775183 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -71,7 +71,7 @@ struct dentry_stat_t dentry_stat = {
static void d_callback(struct rcu_head *head)
{
- struct dentry * dentry = container_of(head, struct dentry, d_rcu);
+ struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
if (dname_external(dentry))
kfree(dentry->d_name.name);
@@ -86,7 +86,7 @@ static void d_free(struct dentry *dentry)
{
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
- call_rcu(&dentry->d_rcu, d_callback);
+ call_rcu(&dentry->d_u.d_rcu, d_callback);
}
/*
@@ -193,7 +193,7 @@ kill_it: {
list_del(&dentry->d_lru);
dentry_stat.nr_unused--;
}
- list_del(&dentry->d_child);
+ list_del(&dentry->d_u.d_child);
dentry_stat.nr_dentry--; /* For d_free, below */
/*drops the locks, at that point nobody can reach this dentry */
dentry_iput(dentry);
@@ -367,7 +367,7 @@ static inline void prune_one_dentry(struct dentry * dentry)
struct dentry * parent;
__d_drop(dentry);
- list_del(&dentry->d_child);
+ list_del(&dentry->d_u.d_child);
dentry_stat.nr_dentry--; /* For d_free, below */
dentry_iput(dentry);
parent = dentry->d_parent;
@@ -518,7 +518,7 @@ repeat:
resume:
while (next != &this_parent->d_subdirs) {
struct list_head *tmp = next;
- struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
next = tmp->next;
/* Have we found a mount point ? */
if (d_mountpoint(dentry))
@@ -532,7 +532,7 @@ resume:
* All done at this level ... ascend and resume the search.
*/
if (this_parent != parent) {
- next = this_parent->d_child.next;
+ next = this_parent->d_u.d_child.next;
this_parent = this_parent->d_parent;
goto resume;
}
@@ -569,7 +569,7 @@ repeat:
resume:
while (next != &this_parent->d_subdirs) {
struct list_head *tmp = next;
- struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
next = tmp->next;
if (!list_empty(&dentry->d_lru)) {
@@ -610,7 +610,7 @@ dentry->d_parent->d_name.name, dentry->d_name.name, found);
* All done at this level ... ascend and resume the search.
*/
if (this_parent != parent) {
- next = this_parent->d_child.next;
+ next = this_parent->d_u.d_child.next;
this_parent = this_parent->d_parent;
#ifdef DCACHE_DEBUG
printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
@@ -753,12 +753,12 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
dentry->d_parent = dget(parent);
dentry->d_sb = parent->d_sb;
} else {
- INIT_LIST_HEAD(&dentry->d_child);
+ INIT_LIST_HEAD(&dentry->d_u.d_child);
}
spin_lock(&dcache_lock);
if (parent)
- list_add(&dentry->d_child, &parent->d_subdirs);
+ list_add(&dentry->d_u.d_child, &parent->d_subdirs);
dentry_stat.nr_dentry++;
spin_unlock(&dcache_lock);
@@ -808,10 +808,14 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
*
* Fill in inode information in the entry. On success, it returns NULL.
* If an unhashed alias of "entry" already exists, then we return the
- * aliased dentry instead.
+ * aliased dentry instead and drop one reference to inode.
*
* Note that in order to avoid conflicts with rename() etc, the caller
* had better be holding the parent directory semaphore.
+ *
+ * This also assumes that the inode count has been incremented
+ * (or otherwise set) by the caller to indicate that it is now
+ * in use by the dcache.
*/
struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
{
@@ -838,6 +842,7 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
dget_locked(alias);
spin_unlock(&dcache_lock);
BUG_ON(!d_unhashed(alias));
+ iput(inode);
return alias;
}
list_add(&entry->d_alias, &inode->i_dentry);
@@ -1310,8 +1315,8 @@ already_unhashed:
/* Unhash the target: dput() will then get rid of it */
__d_drop(target);
- list_del(&dentry->d_child);
- list_del(&target->d_child);
+ list_del(&dentry->d_u.d_child);
+ list_del(&target->d_u.d_child);
/* Switch the names.. */
switch_names(dentry, target);
@@ -1322,15 +1327,15 @@ already_unhashed:
if (IS_ROOT(dentry)) {
dentry->d_parent = target->d_parent;
target->d_parent = target;
- INIT_LIST_HEAD(&target->d_child);
+ INIT_LIST_HEAD(&target->d_u.d_child);
} else {
do_switch(dentry->d_parent, target->d_parent);
/* And add them back to the (new) parent lists */
- list_add(&target->d_child, &target->d_parent->d_subdirs);
+ list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
}
- list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
+ list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
spin_unlock(&target->d_lock);
spin_unlock(&dentry->d_lock);
write_sequnlock(&rename_lock);
@@ -1568,7 +1573,7 @@ repeat:
resume:
while (next != &this_parent->d_subdirs) {
struct list_head *tmp = next;
- struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
next = tmp->next;
if (d_unhashed(dentry)||!dentry->d_inode)
continue;
@@ -1579,7 +1584,7 @@ resume:
atomic_dec(&dentry->d_count);
}
if (this_parent != root) {
- next = this_parent->d_child.next;
+ next = this_parent->d_u.d_child.next;
atomic_dec(&this_parent->d_count);
this_parent = this_parent->d_parent;
goto resume;
diff --git a/fs/dcookies.c b/fs/dcookies.c
index 02aa0ddc582..f8274a8f83b 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/mount.h>
+#include <linux/capability.h>
#include <linux/dcache.h>
#include <linux/mm.h>
#include <linux/errno.h>
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index a86ac4aeaed..d4f1a2cddd4 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -146,7 +146,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
}
*dentry = NULL;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry)) {
if ((mode & S_IFMT) == S_IFDIR)
@@ -155,7 +155,7 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
error = debugfs_create(parent->d_inode, *dentry, mode);
} else
error = PTR_ERR(dentry);
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
return error;
}
@@ -273,7 +273,7 @@ void debugfs_remove(struct dentry *dentry)
if (!parent || !parent->d_inode)
return;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
if (debugfs_positive(dentry)) {
if (dentry->d_inode) {
if (S_ISDIR(dentry->d_inode->i_mode))
@@ -283,7 +283,7 @@ void debugfs_remove(struct dentry *dentry)
dput(dentry);
}
}
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
EXPORT_SYMBOL_GPL(debugfs_remove);
diff --git a/fs/devfs/base.c b/fs/devfs/base.c
index 1274422a538..b621521e09d 100644
--- a/fs/devfs/base.c
+++ b/fs/devfs/base.c
@@ -2162,27 +2162,27 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
*
* make sure that
* d_instantiate always runs under lock
- * we release i_sem lock before going to sleep
+ * we release i_mutex lock before going to sleep
*
* unfortunately sometimes d_revalidate is called with
- * and sometimes without i_sem lock held. The following checks
+ * and sometimes without i_mutex lock held. The following checks
* attempt to deduce when we need to add (and drop resp.) lock
* here. This relies on current (2.6.2) calling coventions:
*
- * lookup_hash is always run under i_sem and is passing NULL
+ * lookup_hash is always run under i_mutex and is passing NULL
* as nd
*
- * open(...,O_CREATE,...) calls _lookup_hash under i_sem
+ * open(...,O_CREATE,...) calls _lookup_hash under i_mutex
* and sets flags to LOOKUP_OPEN|LOOKUP_CREATE
*
* all other invocations of ->d_revalidate seem to happen
- * outside of i_sem
+ * outside of i_mutex
*/
need_lock = nd &&
(!(nd->flags & LOOKUP_CREATE) || (nd->flags & LOOKUP_PARENT));
if (need_lock)
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
if (is_devfsd_or_child(fs_info)) {
devfs_handle_t de = lookup_info->de;
@@ -2221,9 +2221,9 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
add_wait_queue(&lookup_info->wait_queue, &wait);
read_unlock(&parent->u.dir.lock);
/* at this point it is always (hopefully) locked */
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
schedule();
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
/*
* This does not need nor should remove wait from wait_queue.
* Wait queue head is never reused - nothing is ever added to it
@@ -2238,7 +2238,7 @@ static int devfs_d_revalidate_wait(struct dentry *dentry, struct nameidata *nd)
out:
if (need_lock)
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
return 1;
} /* End Function devfs_d_revalidate_wait */
@@ -2284,9 +2284,9 @@ static struct dentry *devfs_lookup(struct inode *dir, struct dentry *dentry,
/* Unlock directory semaphore, which will release any waiters. They
will get the hashed dentry, and may be forced to wait for
revalidation */
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
wait_for_devfsd_finished(fs_info); /* If I'm not devfsd, must wait */
- down(&dir->i_sem); /* Grab it again because them's the rules */
+ mutex_lock(&dir->i_mutex); /* Grab it again because them's the rules */
de = lookup_info.de;
/* If someone else has been so kind as to make the inode, we go home
early */
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index f2be44d4491..bfb8a230bac 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -130,7 +130,7 @@ static struct dentry *get_node(int num)
{
char s[12];
struct dentry *root = devpts_root;
- down(&root->d_inode->i_sem);
+ mutex_lock(&root->d_inode->i_mutex);
return lookup_one_len(s, root, sprintf(s, "%d", num));
}
@@ -161,7 +161,7 @@ int devpts_pty_new(struct tty_struct *tty)
if (!IS_ERR(dentry) && !dentry->d_inode)
d_instantiate(dentry, inode);
- up(&devpts_root->d_inode->i_sem);
+ mutex_unlock(&devpts_root->d_inode->i_mutex);
return 0;
}
@@ -178,7 +178,7 @@ struct tty_struct *devpts_get_tty(int number)
dput(dentry);
}
- up(&devpts_root->d_inode->i_sem);
+ mutex_unlock(&devpts_root->d_inode->i_mutex);
return tty;
}
@@ -196,7 +196,7 @@ void devpts_pty_kill(int number)
}
dput(dentry);
}
- up(&devpts_root->d_inode->i_sem);
+ mutex_unlock(&devpts_root->d_inode->i_mutex);
}
static int __init init_devpts_fs(void)
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 3931e7f1e6b..30dbbd1df51 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -56,7 +56,7 @@
* lock_type is DIO_LOCKING for regular files on direct-IO-naive filesystems.
* This determines whether we need to do the fancy locking which prevents
* direct-IO from being able to read uninitialised disk blocks. If its zero
- * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_sem is
+ * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_mutex is
* not held for the entire direct write (taken briefly, initially, during a
* direct read though, but its never held for the duration of a direct-IO).
*/
@@ -930,7 +930,7 @@ out:
}
/*
- * Releases both i_sem and i_alloc_sem
+ * Releases both i_mutex and i_alloc_sem
*/
static ssize_t
direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
@@ -1062,11 +1062,11 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
/*
* All block lookups have been performed. For READ requests
- * we can let i_sem go now that its achieved its purpose
+ * we can let i_mutex go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
if ((rw == READ) && (dio->lock_type == DIO_LOCKING))
- up(&dio->inode->i_sem);
+ mutex_unlock(&dio->inode->i_mutex);
/*
* OK, all BIOs are submitted, so we can decrement bio_count to truly
@@ -1145,18 +1145,18 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
* The locking rules are governed by the dio_lock_type parameter.
*
* DIO_NO_LOCKING (no locking, for raw block device access)
- * For writes, i_sem is not held on entry; it is never taken.
+ * For writes, i_mutex is not held on entry; it is never taken.
*
* DIO_LOCKING (simple locking for regular files)
- * For writes we are called under i_sem and return with i_sem held, even though
+ * For writes we are called under i_mutex and return with i_mutex held, even though
* it is internally dropped.
- * For reads, i_sem is not held on entry, but it is taken and dropped before
+ * For reads, i_mutex is not held on entry, but it is taken and dropped before
* returning.
*
* DIO_OWN_LOCKING (filesystem provides synchronisation and handling of
* uninitialised data, allowing parallel direct readers and writers)
- * For writes we are called without i_sem, return without it, never touch it.
- * For reads, i_sem is held on entry and will be released before returning.
+ * For writes we are called without i_mutex, return without it, never touch it.
+ * For reads, i_mutex is held on entry and will be released before returning.
*
* Additional i_alloc_sem locking requirements described inline below.
*/
@@ -1214,11 +1214,11 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
* For block device access DIO_NO_LOCKING is used,
* neither readers nor writers do any locking at all
* For regular files using DIO_LOCKING,
- * readers need to grab i_sem and i_alloc_sem
- * writers need to grab i_alloc_sem only (i_sem is already held)
+ * readers need to grab i_mutex and i_alloc_sem
+ * writers need to grab i_alloc_sem only (i_mutex is already held)
* For regular files using DIO_OWN_LOCKING,
* neither readers nor writers take any locks here
- * (i_sem is already held and release for writers here)
+ * (i_mutex is already held and release for writers here)
*/
dio->lock_type = dio_lock_type;
if (dio_lock_type != DIO_NO_LOCKING) {
@@ -1228,7 +1228,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
mapping = iocb->ki_filp->f_mapping;
if (dio_lock_type != DIO_OWN_LOCKING) {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
reader_with_isem = 1;
}
@@ -1240,7 +1240,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
}
if (dio_lock_type == DIO_OWN_LOCKING) {
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
reader_with_isem = 0;
}
}
@@ -1266,7 +1266,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
out:
if (reader_with_isem)
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (rw & WRITE)
current->flags &= ~PF_SYNCWRITE;
return retval;
diff --git a/fs/dquot.c b/fs/dquot.c
index 2a62b3dc20e..1966c890b48 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -77,6 +77,7 @@
#include <linux/kmod.h>
#include <linux/namei.h>
#include <linux/buffer_head.h>
+#include <linux/capability.h>
#include <linux/quotaops.h>
#include <asm/uaccess.h>
@@ -100,7 +101,7 @@
* operation is just reading pointers from inode (or not using them at all) the
* read lock is enough. If pointers are altered function must hold write lock
* (these locking rules also apply for S_NOQUOTA flag in the inode - note that
- * for altering the flag i_sem is also needed). If operation is holding
+ * for altering the flag i_mutex is also needed). If operation is holding
* reference to dquot in other way (e.g. quotactl ops) it must be guarded by
* dqonoff_sem.
* This locking assures that:
@@ -117,9 +118,9 @@
* spinlock to internal buffers before writing.
*
* Lock ordering (including related VFS locks) is the following:
- * i_sem > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem >
+ * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem >
* > dquot->dq_lock > dqio_sem
- * i_sem on quota files is special (it's below dqio_sem)
+ * i_mutex on quota files is special (it's below dqio_sem)
*/
static DEFINE_SPINLOCK(dq_list_lock);
@@ -1369,11 +1370,11 @@ int vfs_quota_off(struct super_block *sb, int type)
/* If quota was reenabled in the meantime, we have
* nothing to do */
if (!sb_has_quota_enabled(sb, cnt)) {
- down(&toputinode[cnt]->i_sem);
+ mutex_lock(&toputinode[cnt]->i_mutex);
toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
S_NOATIME | S_NOQUOTA);
truncate_inode_pages(&toputinode[cnt]->i_data, 0);
- up(&toputinode[cnt]->i_sem);
+ mutex_unlock(&toputinode[cnt]->i_mutex);
mark_inode_dirty(toputinode[cnt]);
iput(toputinode[cnt]);
}
@@ -1417,7 +1418,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
write_inode_now(inode, 1);
/* And now flush the block cache so that kernel sees the changes */
invalidate_bdev(sb->s_bdev, 0);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
down(&dqopt->dqonoff_sem);
if (sb_has_quota_enabled(sb, type)) {
error = -EBUSY;
@@ -1449,7 +1450,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
goto out_file_init;
}
up(&dqopt->dqio_sem);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
set_enable_flags(dqopt, type);
add_dquot_ref(sb, type);
@@ -1470,7 +1471,7 @@ out_lock:
inode->i_flags |= oldflags;
up_write(&dqopt->dqptr_sem);
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out_fmt:
put_quota_format(fmt);
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
new file mode 100644
index 00000000000..4e4762389bd
--- /dev/null
+++ b/fs/drop_caches.c
@@ -0,0 +1,68 @@
+/*
+ * Implement the manual drop-all-pagecache function
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/writeback.h>
+#include <linux/sysctl.h>
+#include <linux/gfp.h>
+
+/* A global variable is a bit ugly, but it keeps the code simple */
+int sysctl_drop_caches;
+
+static void drop_pagecache_sb(struct super_block *sb)
+{
+ struct inode *inode;
+
+ spin_lock(&inode_lock);
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ if (inode->i_state & (I_FREEING|I_WILL_FREE))
+ continue;
+ invalidate_inode_pages(inode->i_mapping);
+ }
+ spin_unlock(&inode_lock);
+}
+
+void drop_pagecache(void)
+{
+ struct super_block *sb;
+
+ spin_lock(&sb_lock);
+restart:
+ list_for_each_entry(sb, &super_blocks, s_list) {
+ sb->s_count++;
+ spin_unlock(&sb_lock);
+ down_read(&sb->s_umount);
+ if (sb->s_root)
+ drop_pagecache_sb(sb);
+ up_read(&sb->s_umount);
+ spin_lock(&sb_lock);
+ if (__put_super_and_need_restart(sb))
+ goto restart;
+ }
+ spin_unlock(&sb_lock);
+}
+
+void drop_slab(void)
+{
+ int nr_objects;
+
+ do {
+ nr_objects = shrink_slab(1000, GFP_KERNEL, 1000);
+ } while (nr_objects > 10);
+}
+
+int drop_caches_sysctl_handler(ctl_table *table, int write,
+ struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+{
+ proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ if (write) {
+ if (sysctl_drop_caches & 1)
+ drop_pagecache();
+ if (sysctl_drop_caches & 2)
+ drop_slab();
+ }
+ return 0;
+}
diff --git a/fs/exec.c b/fs/exec.c
index 22533cce061..b5bcf1aae0a 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -324,7 +324,7 @@ void install_arg_page(struct vm_area_struct *vma,
lru_cache_add_active(page);
set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
page, vma->vm_page_prot))));
- page_add_anon_rmap(page, vma, address);
+ page_add_new_anon_rmap(page, vma, address);
pte_unmap_unlock(pte, ptl);
/* no need for flush_tlb */
@@ -632,10 +632,10 @@ static inline int de_thread(struct task_struct *tsk)
* synchronize with any firing (by calling del_timer_sync)
* before we can safely let the old group leader die.
*/
- sig->real_timer.data = (unsigned long)current;
+ sig->real_timer.data = current;
spin_unlock_irq(lock);
- if (del_timer_sync(&sig->real_timer))
- add_timer(&sig->real_timer);
+ if (hrtimer_cancel(&sig->real_timer))
+ hrtimer_restart(&sig->real_timer);
spin_lock_irq(lock);
}
while (atomic_read(&sig->count) > count) {
@@ -760,7 +760,7 @@ no_thread_group:
spin_lock(&oldsighand->siglock);
spin_lock(&newsighand->siglock);
- current->sighand = newsighand;
+ rcu_assign_pointer(current->sighand, newsighand);
recalc_sigpending();
spin_unlock(&newsighand->siglock);
@@ -768,7 +768,7 @@ no_thread_group:
write_unlock_irq(&tasklist_lock);
if (atomic_dec_and_test(&oldsighand->count))
- kmem_cache_free(sighand_cachep, oldsighand);
+ sighand_free(oldsighand);
}
BUG_ON(!thread_group_leader(current));
@@ -1462,6 +1462,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) {
current->signal->flags = SIGNAL_GROUP_EXIT;
current->signal->group_exit_code = exit_code;
+ current->signal->group_stop_count = 0;
retval = 0;
}
spin_unlock_irq(&current->sighand->siglock);
@@ -1477,7 +1478,6 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
* Clear any false indication of pending signals that might
* be seen by the filesystem code called to write the core file.
*/
- current->signal->group_stop_count = 0;
clear_thread_flag(TIF_SIGPENDING);
if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
@@ -1505,7 +1505,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
goto close_fail;
if (!file->f_op->write)
goto close_fail;
- if (do_truncate(file->f_dentry, 0, file) != 0)
+ if (do_truncate(file->f_dentry, 0, 0, file) != 0)
goto close_fail;
retval = binfmt->core_dump(signr, regs, file);
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index c49d6254379..5bfe40085fb 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -177,9 +177,9 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
struct dentry *ppd;
struct dentry *npd;
- down(&pd->d_inode->i_sem);
+ mutex_lock(&pd->d_inode->i_mutex);
ppd = CALL(nops,get_parent)(pd);
- up(&pd->d_inode->i_sem);
+ mutex_unlock(&pd->d_inode->i_mutex);
if (IS_ERR(ppd)) {
err = PTR_ERR(ppd);
@@ -201,9 +201,9 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
break;
}
dprintk("find_exported_dentry: found name: %s\n", nbuf);
- down(&ppd->d_inode->i_sem);
+ mutex_lock(&ppd->d_inode->i_mutex);
npd = lookup_one_len(nbuf, ppd, strlen(nbuf));
- up(&ppd->d_inode->i_sem);
+ mutex_unlock(&ppd->d_inode->i_mutex);
if (IS_ERR(npd)) {
err = PTR_ERR(npd);
dprintk("find_exported_dentry: lookup failed: %d\n", err);
@@ -242,9 +242,9 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
struct dentry *nresult;
err = CALL(nops,get_name)(target_dir, nbuf, result);
if (!err) {
- down(&target_dir->d_inode->i_sem);
+ mutex_lock(&target_dir->d_inode->i_mutex);
nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf));
- up(&target_dir->d_inode->i_sem);
+ mutex_unlock(&target_dir->d_inode->i_mutex);
if (!IS_ERR(nresult)) {
if (nresult->d_inode) {
dput(result);
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 6af2f413029..35acc43b897 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -4,6 +4,7 @@
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
+#include <linux/capability.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -149,7 +150,7 @@ ext2_iset_acl(struct inode *inode, struct posix_acl **i_acl,
}
/*
- * inode->i_sem: don't care
+ * inode->i_mutex: don't care
*/
static struct posix_acl *
ext2_get_acl(struct inode *inode, int type)
@@ -211,7 +212,7 @@ ext2_get_acl(struct inode *inode, int type)
}
/*
- * inode->i_sem: down
+ * inode->i_mutex: down
*/
static int
ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
@@ -301,8 +302,8 @@ ext2_permission(struct inode *inode, int mask, struct nameidata *nd)
/*
* Initialize the ACLs of a new inode. Called from ext2_new_inode.
*
- * dir->i_sem: down
- * inode->i_sem: up (access to inode is still exclusive)
+ * dir->i_mutex: down
+ * inode->i_mutex: up (access to inode is still exclusive)
*/
int
ext2_init_acl(struct inode *inode, struct inode *dir)
@@ -361,7 +362,7 @@ cleanup:
* for directories) are added. There are no more bits available in the
* file mode.
*
- * inode->i_sem: down
+ * inode->i_mutex: down
*/
int
ext2_acl_chmod(struct inode *inode)
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index bb690806649..2c00953d4b0 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -16,6 +16,7 @@
#include <linux/quotaops.h>
#include <linux/sched.h>
#include <linux/buffer_head.h>
+#include <linux/capability.h>
/*
* balloc.c contains the blocks allocation and deallocation routines
diff --git a/fs/ext2/bitmap.c b/fs/ext2/bitmap.c
index 20145b74623..e9983a0dd39 100644
--- a/fs/ext2/bitmap.c
+++ b/fs/ext2/bitmap.c
@@ -7,8 +7,12 @@
* Universite Pierre et Marie Curie (Paris VI)
*/
+#ifdef EXT2FS_DEBUG
+
#include <linux/buffer_head.h>
+#include "ext2.h"
+
static int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
unsigned long ext2_count_free (struct buffer_head * map, unsigned int numchars)
@@ -23,3 +27,6 @@ unsigned long ext2_count_free (struct buffer_head * map, unsigned int numchars)
nibblemap[(map->b_data[i] >> 4) & 0xf];
return (sum);
}
+
+#endif /* EXT2FS_DEBUG */
+
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 5b5f52876b4..7442bdd1267 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -592,7 +592,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
goto fail;
}
kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr, 0, chunk_size);
+ memset(kaddr, 0, chunk_size);
de = (struct ext2_dir_entry_2 *)kaddr;
de->name_len = 1;
de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index e977f8566d1..00de0a7312a 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -53,7 +53,7 @@ struct ext2_inode_info {
#ifdef CONFIG_EXT2_FS_XATTR
/*
* Extended attributes can be read independently of the main file
- * data. Taking i_sem even when reading would cause contention
+ * data. Taking i_mutex even when reading would cause contention
* between readers of EAs and writers of regular file data, so
* instead we synchronize on xattr_sem when reading or changing
* EAs.
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 709d8676b96..3ca9afdf713 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -8,6 +8,7 @@
*/
#include "ext2.h"
+#include <linux/capability.h>
#include <linux/time.h>
#include <linux/sched.h>
#include <asm/current.h>
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 522fa70dd8e..8d6819846fc 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1152,7 +1152,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
struct buffer_head tmp_bh;
struct buffer_head *bh;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -1189,7 +1189,7 @@ out:
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 0099462d427..a2ca3107d47 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -325,7 +325,7 @@ cleanup:
/*
* Inode operation listxattr()
*
- * dentry->d_inode->i_sem: don't care
+ * dentry->d_inode->i_mutex: don't care
*/
ssize_t
ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
@@ -389,10 +389,6 @@ ext2_xattr_set(struct inode *inode, int name_index, const char *name,
ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
name_index, name, value, (long)value_len);
- if (IS_RDONLY(inode))
- return -EROFS;
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return -EPERM;
if (value == NULL)
value_len = 0;
if (name == NULL)
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 52b30ee6a25..f28a6a499c9 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/string.h>
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/smp_lock.h>
#include <linux/ext2_fs.h>
@@ -38,8 +39,6 @@ ext2_xattr_trusted_get(struct inode *inode, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
return ext2_xattr_get(inode, EXT2_XATTR_INDEX_TRUSTED, name,
buffer, size);
}
@@ -50,8 +49,6 @@ ext2_xattr_trusted_set(struct inode *inode, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
return ext2_xattr_set(inode, EXT2_XATTR_INDEX_TRUSTED, name,
value, size, flags);
}
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index 0c03ea131a9..f383e7c3a7b 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -35,16 +35,10 @@ static int
ext2_xattr_user_get(struct inode *inode, const char *name,
void *buffer, size_t size)
{
- int error;
-
if (strcmp(name, "") == 0)
return -EINVAL;
if (!test_opt(inode->i_sb, XATTR_USER))
return -EOPNOTSUPP;
- error = permission(inode, MAY_READ, NULL);
- if (error)
- return error;
-
return ext2_xattr_get(inode, EXT2_XATTR_INDEX_USER, name, buffer, size);
}
@@ -52,18 +46,10 @@ static int
ext2_xattr_user_set(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
- int error;
-
if (strcmp(name, "") == 0)
return -EINVAL;
if (!test_opt(inode->i_sb, XATTR_USER))
return -EOPNOTSUPP;
- if ( !S_ISREG(inode->i_mode) &&
- (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
- return -EPERM;
- error = permission(inode, MAY_WRITE, NULL);
- if (error)
- return error;
return ext2_xattr_set(inode, EXT2_XATTR_INDEX_USER, name,
value, size, flags);
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 3ac38266fc9..47a9da2dfb4 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/ext3_jbd.h>
#include <linux/ext3_fs.h>
@@ -152,7 +153,7 @@ ext3_iset_acl(struct inode *inode, struct posix_acl **i_acl,
/*
* Inode operation get_posix_acl().
*
- * inode->i_sem: don't care
+ * inode->i_mutex: don't care
*/
static struct posix_acl *
ext3_get_acl(struct inode *inode, int type)
@@ -216,7 +217,7 @@ ext3_get_acl(struct inode *inode, int type)
/*
* Set the access or default ACL of an inode.
*
- * inode->i_sem: down unless called from ext3_new_inode
+ * inode->i_mutex: down unless called from ext3_new_inode
*/
static int
ext3_set_acl(handle_t *handle, struct inode *inode, int type,
@@ -306,8 +307,8 @@ ext3_permission(struct inode *inode, int mask, struct nameidata *nd)
/*
* Initialize the ACLs of a new inode. Called from ext3_new_inode.
*
- * dir->i_sem: down
- * inode->i_sem: up (access to inode is still exclusive)
+ * dir->i_mutex: down
+ * inode->i_mutex: up (access to inode is still exclusive)
*/
int
ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
@@ -368,7 +369,7 @@ cleanup:
* for directories) are added. There are no more bits available in the
* file mode.
*
- * inode->i_sem: down
+ * inode->i_mutex: down
*/
int
ext3_acl_chmod(struct inode *inode)
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index ae1148c24c5..6250fcdf14a 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -13,6 +13,7 @@
#include <linux/config.h>
#include <linux/time.h>
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/ext3_fs.h>
@@ -20,8 +21,6 @@
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
-#include "bitmap.h"
-
/*
* balloc.c contains the blocks allocation and deallocation routines
*/
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
index 5b4ba3e246e..cb16b4c5d5d 100644
--- a/fs/ext3/bitmap.c
+++ b/fs/ext3/bitmap.c
@@ -7,8 +7,11 @@
* Universite Pierre et Marie Curie (Paris VI)
*/
+#ifdef EXT3FS_DEBUG
+
#include <linux/buffer_head.h>
-#include "bitmap.h"
+
+#include "ext3_fs.h"
static int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
@@ -24,3 +27,6 @@ unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars)
nibblemap[(map->b_data[i] >> 4) & 0xf];
return (sum);
}
+
+#endif /* EXT3FS_DEBUG */
+
diff --git a/fs/ext3/bitmap.h b/fs/ext3/bitmap.h
deleted file mode 100644
index 6ee503a6bb4..00000000000
--- a/fs/ext3/bitmap.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* linux/fs/ext3/bitmap.c
- *
- * Copyright (C) 2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
-*/
-
-extern unsigned long ext3_count_free (struct buffer_head *, unsigned int );
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 9e4a2437621..dc826464f31 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -26,7 +26,6 @@
#include <asm/byteorder.h>
-#include "bitmap.h"
#include "xattr.h"
#include "acl.h"
@@ -651,7 +650,7 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
/* Error cases - e2fsck has already cleaned up for us */
if (ino > max_ino) {
ext3_warning(sb, __FUNCTION__,
- "bad orphan ino %lu! e2fsck was run?\n", ino);
+ "bad orphan ino %lu! e2fsck was run?", ino);
goto out;
}
@@ -660,7 +659,7 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
bitmap_bh = read_inode_bitmap(sb, block_group);
if (!bitmap_bh) {
ext3_warning(sb, __FUNCTION__,
- "inode bitmap error for orphan %lu\n", ino);
+ "inode bitmap error for orphan %lu", ino);
goto out;
}
@@ -672,7 +671,7 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
!(inode = iget(sb, ino)) || is_bad_inode(inode) ||
NEXT_ORPHAN(inode) > max_ino) {
ext3_warning(sb, __FUNCTION__,
- "bad orphan inode %lu! e2fsck was run?\n", ino);
+ "bad orphan inode %lu! e2fsck was run?", ino);
printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
bit, (unsigned long long)bitmap_bh->b_blocknr,
ext3_test_bit(bit, bitmap_bh->b_data));
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 706d6860838..556cd551007 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -9,6 +9,7 @@
#include <linux/fs.h>
#include <linux/jbd.h>
+#include <linux/capability.h>
#include <linux/ext3_fs.h>
#include <linux/ext3_jbd.h>
#include <linux/time.h>
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index b3c690a3b54..af193a304ee 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1476,7 +1476,7 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
if (levels && (dx_get_count(frames->entries) ==
dx_get_limit(frames->entries))) {
ext3_warning(sb, __FUNCTION__,
- "Directory index full!\n");
+ "Directory index full!");
err = -ENOSPC;
goto cleanup;
}
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 6104ad31050..1041dab6de2 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -31,7 +31,7 @@ static int verify_group_input(struct super_block *sb,
unsigned start = le32_to_cpu(es->s_blocks_count);
unsigned end = start + input->blocks_count;
unsigned group = input->group;
- unsigned itend = input->inode_table + EXT3_SB(sb)->s_itb_per_group;
+ unsigned itend = input->inode_table + sbi->s_itb_per_group;
unsigned overhead = ext3_bg_has_super(sb, group) ?
(1 + ext3_bg_num_gdb(sb, group) +
le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
@@ -340,7 +340,7 @@ static int verify_reserved_gdb(struct super_block *sb,
while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){
ext3_warning(sb, __FUNCTION__,
- "reserved GDT %ld missing grp %d (%ld)\n",
+ "reserved GDT %ld missing grp %d (%ld)",
blk, grp,
grp * EXT3_BLOCKS_PER_GROUP(sb) + blk);
return -EINVAL;
@@ -393,7 +393,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
if (EXT3_SB(sb)->s_sbh->b_blocknr !=
le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) {
ext3_warning(sb, __FUNCTION__,
- "won't resize using backup superblock at %llu\n",
+ "won't resize using backup superblock at %llu",
(unsigned long long)EXT3_SB(sb)->s_sbh->b_blocknr);
return -EPERM;
}
@@ -417,7 +417,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
data = (__u32 *)dind->b_data;
if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
ext3_warning(sb, __FUNCTION__,
- "new group %u GDT block %lu not reserved\n",
+ "new group %u GDT block %lu not reserved",
input->group, gdblock);
err = -EINVAL;
goto exit_dind;
@@ -540,7 +540,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
for (res = 0; res < reserved_gdb; res++, blk++) {
if (le32_to_cpu(*data) != blk) {
ext3_warning(sb, __FUNCTION__,
- "reserved block %lu not at offset %ld\n",
+ "reserved block %lu not at offset %ld",
blk, (long)(data - (__u32 *)dind->b_data));
err = -EINVAL;
goto exit_bh;
@@ -683,7 +683,7 @@ exit_err:
if (err) {
ext3_warning(sb, __FUNCTION__,
"can't update backup for group %d (err %d), "
- "forcing fsck on next reboot\n", group, err);
+ "forcing fsck on next reboot", group, err);
sbi->s_mount_state &= ~EXT3_VALID_FS;
sbi->s_es->s_state &= ~cpu_to_le16(EXT3_VALID_FS);
mark_buffer_dirty(sbi->s_sbh);
@@ -722,7 +722,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
if (gdb_off == 0 && !EXT3_HAS_RO_COMPAT_FEATURE(sb,
EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
ext3_warning(sb, __FUNCTION__,
- "Can't resize non-sparse filesystem further\n");
+ "Can't resize non-sparse filesystem further");
return -EPERM;
}
@@ -730,13 +730,13 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
if (!EXT3_HAS_COMPAT_FEATURE(sb,
EXT3_FEATURE_COMPAT_RESIZE_INODE)){
ext3_warning(sb, __FUNCTION__,
- "No reserved GDT blocks, can't resize\n");
+ "No reserved GDT blocks, can't resize");
return -EPERM;
}
inode = iget(sb, EXT3_RESIZE_INO);
if (!inode || is_bad_inode(inode)) {
ext3_warning(sb, __FUNCTION__,
- "Error opening resize inode\n");
+ "Error opening resize inode");
iput(inode);
return -ENOENT;
}
@@ -764,9 +764,9 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
}
lock_super(sb);
- if (input->group != EXT3_SB(sb)->s_groups_count) {
+ if (input->group != sbi->s_groups_count) {
ext3_warning(sb, __FUNCTION__,
- "multiple resizers run on filesystem!\n");
+ "multiple resizers run on filesystem!");
err = -EBUSY;
goto exit_journal;
}
@@ -799,7 +799,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
* data. So we need to be careful to set all of the relevant
* group descriptor data etc. *before* we enable the group.
*
- * The key field here is EXT3_SB(sb)->s_groups_count: as long as
+ * The key field here is sbi->s_groups_count: as long as
* that retains its old value, nobody is going to access the new
* group.
*
@@ -859,7 +859,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
smp_wmb();
/* Update the global fs size fields */
- EXT3_SB(sb)->s_groups_count++;
+ sbi->s_groups_count++;
ext3_journal_dirty_metadata(handle, primary);
@@ -874,7 +874,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
percpu_counter_mod(&sbi->s_freeinodes_counter,
EXT3_INODES_PER_GROUP(sb));
- ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+ ext3_journal_dirty_metadata(handle, sbi->s_sbh);
sb->s_dirt = 1;
exit_journal:
@@ -937,7 +937,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
if (last == 0) {
ext3_warning(sb, __FUNCTION__,
- "need to use ext2online to resize further\n");
+ "need to use ext2online to resize further");
return -EPERM;
}
@@ -973,7 +973,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
lock_super(sb);
if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) {
ext3_warning(sb, __FUNCTION__,
- "multiple resizers run on filesystem!\n");
+ "multiple resizers run on filesystem!");
err = -EBUSY;
goto exit_put;
}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 4e6730622d9..56bf7658601 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -43,7 +43,8 @@
#include "acl.h"
#include "namei.h"
-static int ext3_load_journal(struct super_block *, struct ext3_super_block *);
+static int ext3_load_journal(struct super_block *, struct ext3_super_block *,
+ unsigned long journal_devnum);
static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
int);
static void ext3_commit_super (struct super_block * sb,
@@ -628,7 +629,7 @@ enum {
Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh,
- Opt_commit, Opt_journal_update, Opt_journal_inum,
+ Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
@@ -666,6 +667,7 @@ static match_table_t tokens = {
{Opt_commit, "commit=%u"},
{Opt_journal_update, "journal=update"},
{Opt_journal_inum, "journal=%u"},
+ {Opt_journal_dev, "journal_dev=%u"},
{Opt_abort, "abort"},
{Opt_data_journal, "data=journal"},
{Opt_data_ordered, "data=ordered"},
@@ -705,8 +707,9 @@ static unsigned long get_sb_block(void **data)
return sb_block;
}
-static int parse_options (char * options, struct super_block *sb,
- unsigned long * inum, unsigned long *n_blocks_count, int is_remount)
+static int parse_options (char *options, struct super_block *sb,
+ unsigned long *inum, unsigned long *journal_devnum,
+ unsigned long *n_blocks_count, int is_remount)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
char * p;
@@ -839,6 +842,16 @@ static int parse_options (char * options, struct super_block *sb,
return 0;
*inum = option;
break;
+ case Opt_journal_dev:
+ if (is_remount) {
+ printk(KERN_ERR "EXT3-fs: cannot specify "
+ "journal on remount\n");
+ return 0;
+ }
+ if (match_int(&args[0], &option))
+ return 0;
+ *journal_devnum = option;
+ break;
case Opt_noload:
set_opt (sbi->s_mount_opt, NOLOAD);
break;
@@ -1331,6 +1344,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
unsigned long logic_sb_block;
unsigned long offset = 0;
unsigned long journal_inum = 0;
+ unsigned long journal_devnum = 0;
unsigned long def_mount_opts;
struct inode *root;
int blocksize;
@@ -1411,7 +1425,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
set_opt(sbi->s_mount_opt, RESERVATION);
- if (!parse_options ((char *) data, sb, &journal_inum, NULL, 0))
+ if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
+ NULL, 0))
goto failed_mount;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
@@ -1622,7 +1637,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
*/
if (!test_opt(sb, NOLOAD) &&
EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
- if (ext3_load_journal(sb, es))
+ if (ext3_load_journal(sb, es, journal_devnum))
goto failed_mount2;
} else if (journal_inum) {
if (ext3_create_journal(sb, es, journal_inum))
@@ -1902,15 +1917,24 @@ out_bdev:
return NULL;
}
-static int ext3_load_journal(struct super_block * sb,
- struct ext3_super_block * es)
+static int ext3_load_journal(struct super_block *sb,
+ struct ext3_super_block *es,
+ unsigned long journal_devnum)
{
journal_t *journal;
int journal_inum = le32_to_cpu(es->s_journal_inum);
- dev_t journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
+ dev_t journal_dev;
int err = 0;
int really_read_only;
+ if (journal_devnum &&
+ journal_devnum != le32_to_cpu(es->s_journal_dev)) {
+ printk(KERN_INFO "EXT3-fs: external journal device major/minor "
+ "numbers have changed\n");
+ journal_dev = new_decode_dev(journal_devnum);
+ } else
+ journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
+
really_read_only = bdev_read_only(sb->s_bdev);
/*
@@ -1969,6 +1993,16 @@ static int ext3_load_journal(struct super_block * sb,
EXT3_SB(sb)->s_journal = journal;
ext3_clear_journal_err(sb, es);
+
+ if (journal_devnum &&
+ journal_devnum != le32_to_cpu(es->s_journal_dev)) {
+ es->s_journal_dev = cpu_to_le32(journal_devnum);
+ sb->s_dirt = 1;
+
+ /* Make sure we flush the recovery flag to disk. */
+ ext3_commit_super(sb, es, 1);
+ }
+
return 0;
}
@@ -2116,7 +2150,7 @@ int ext3_force_commit(struct super_block *sb)
static void ext3_write_super (struct super_block * sb)
{
- if (down_trylock(&sb->s_lock) == 0)
+ if (mutex_trylock(&sb->s_lock) != 0)
BUG();
sb->s_dirt = 0;
}
@@ -2197,7 +2231,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
/*
* Allow the "check" option to be passed as a remount option.
*/
- if (!parse_options(data, sb, NULL, &n_blocks_count, 1)) {
+ if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) {
err = -EINVAL;
goto restore_opts;
}
@@ -2567,7 +2601,7 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -2610,7 +2644,7 @@ out:
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ext3_mark_inode_dirty(handle, inode);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 430de9f63be..e8d60bf6b7d 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -140,7 +140,7 @@ ext3_xattr_handler(int name_index)
/*
* Inode operation listxattr()
*
- * dentry->d_inode->i_sem: don't care
+ * dentry->d_inode->i_mutex: don't care
*/
ssize_t
ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
@@ -946,10 +946,6 @@ ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
};
int error;
- if (IS_RDONLY(inode))
- return -EROFS;
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return -EPERM;
if (!name)
return -EINVAL;
if (strlen(name) > 255)
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
index f68bfd1cf51..86d91f1186d 100644
--- a/fs/ext3/xattr_trusted.c
+++ b/fs/ext3/xattr_trusted.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/string.h>
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/smp_lock.h>
#include <linux/ext3_jbd.h>
@@ -39,8 +40,6 @@ ext3_xattr_trusted_get(struct inode *inode, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
return ext3_xattr_get(inode, EXT3_XATTR_INDEX_TRUSTED, name,
buffer, size);
}
@@ -51,8 +50,6 @@ ext3_xattr_trusted_set(struct inode *inode, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
return ext3_xattr_set(inode, EXT3_XATTR_INDEX_TRUSTED, name,
value, size, flags);
}
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
index e907cae7a07..a85a0a17c4f 100644
--- a/fs/ext3/xattr_user.c
+++ b/fs/ext3/xattr_user.c
@@ -37,16 +37,10 @@ static int
ext3_xattr_user_get(struct inode *inode, const char *name,
void *buffer, size_t size)
{
- int error;
-
if (strcmp(name, "") == 0)
return -EINVAL;
if (!test_opt(inode->i_sb, XATTR_USER))
return -EOPNOTSUPP;
- error = permission(inode, MAY_READ, NULL);
- if (error)
- return error;
-
return ext3_xattr_get(inode, EXT3_XATTR_INDEX_USER, name, buffer, size);
}
@@ -54,19 +48,10 @@ static int
ext3_xattr_user_set(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
- int error;
-
if (strcmp(name, "") == 0)
return -EINVAL;
if (!test_opt(inode->i_sb, XATTR_USER))
return -EOPNOTSUPP;
- if ( !S_ISREG(inode->i_mode) &&
- (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
- return -EPERM;
- error = permission(inode, MAY_WRITE, NULL);
- if (error)
- return error;
-
return ext3_xattr_set(inode, EXT3_XATTR_INDEX_USER, name,
value, size, flags);
}
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 77c24fcf712..1acc941245f 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -295,7 +295,8 @@ static int fat_bmap_cluster(struct inode *inode, int cluster)
return dclus;
}
-int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys)
+int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
+ unsigned long *mapped_blocks)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
@@ -303,9 +304,12 @@ int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys)
int cluster, offset;
*phys = 0;
+ *mapped_blocks = 0;
if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) {
- if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits))
+ if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
*phys = sector + sbi->dir_start;
+ *mapped_blocks = 1;
+ }
return 0;
}
last_block = (MSDOS_I(inode)->mmu_private + (sb->s_blocksize - 1))
@@ -318,7 +322,11 @@ int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys)
cluster = fat_bmap_cluster(inode, cluster);
if (cluster < 0)
return cluster;
- else if (cluster)
+ else if (cluster) {
*phys = fat_clus_to_blknr(sbi, cluster) + offset;
+ *mapped_blocks = sbi->sec_per_clus - offset;
+ if (*mapped_blocks > last_block - sector)
+ *mapped_blocks = last_block - sector;
+ }
return 0;
}
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index ba824964b9b..db0de5c621c 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -45,8 +45,8 @@ static inline void fat_dir_readahead(struct inode *dir, sector_t iblock,
if ((sbi->fat_bits != 32) && (dir->i_ino == MSDOS_ROOT_INO))
return;
- bh = sb_getblk(sb, phys);
- if (bh && !buffer_uptodate(bh)) {
+ bh = sb_find_get_block(sb, phys);
+ if (bh == NULL || !buffer_uptodate(bh)) {
for (sec = 0; sec < sbi->sec_per_clus; sec++)
sb_breadahead(sb, phys + sec);
}
@@ -68,8 +68,8 @@ static int fat__get_entry(struct inode *dir, loff_t *pos,
{
struct super_block *sb = dir->i_sb;
sector_t phys, iblock;
- int offset;
- int err;
+ unsigned long mapped_blocks;
+ int err, offset;
next:
if (*bh)
@@ -77,7 +77,7 @@ next:
*bh = NULL;
iblock = *pos >> sb->s_blocksize_bits;
- err = fat_bmap(dir, iblock, &phys);
+ err = fat_bmap(dir, iblock, &phys, &mapped_blocks);
if (err || !phys)
return -1; /* beyond EOF or error */
@@ -418,7 +418,7 @@ EODir:
return err;
}
-EXPORT_SYMBOL(fat_search_long);
+EXPORT_SYMBOL_GPL(fat_search_long);
struct fat_ioctl_filldir_callback {
struct dirent __user *dirent;
@@ -729,13 +729,13 @@ static int fat_dir_ioctl(struct inode * inode, struct file * filp,
buf.dirent = d1;
buf.result = 0;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = -ENOENT;
if (!IS_DEADDIR(inode)) {
ret = __fat_readdir(inode, filp, &buf, fat_ioctl_filldir,
short_only, both);
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret >= 0)
ret = buf.result;
return ret;
@@ -780,7 +780,7 @@ int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh,
return -ENOENT;
}
-EXPORT_SYMBOL(fat_get_dotdot_entry);
+EXPORT_SYMBOL_GPL(fat_get_dotdot_entry);
/* See if directory is empty */
int fat_dir_empty(struct inode *dir)
@@ -803,7 +803,7 @@ int fat_dir_empty(struct inode *dir)
return result;
}
-EXPORT_SYMBOL(fat_dir_empty);
+EXPORT_SYMBOL_GPL(fat_dir_empty);
/*
* fat_subdirs counts the number of sub-directories of dir. It can be run
@@ -849,7 +849,7 @@ int fat_scan(struct inode *dir, const unsigned char *name,
return -ENOENT;
}
-EXPORT_SYMBOL(fat_scan);
+EXPORT_SYMBOL_GPL(fat_scan);
static int __fat_remove_entries(struct inode *dir, loff_t pos, int nr_slots)
{
@@ -936,7 +936,7 @@ int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo)
return 0;
}
-EXPORT_SYMBOL(fat_remove_entries);
+EXPORT_SYMBOL_GPL(fat_remove_entries);
static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
struct buffer_head **bhs, int nr_bhs)
@@ -1048,7 +1048,7 @@ error:
return err;
}
-EXPORT_SYMBOL(fat_alloc_new_dir);
+EXPORT_SYMBOL_GPL(fat_alloc_new_dir);
static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
int *nr_cluster, struct msdos_dir_entry **de,
@@ -1264,4 +1264,4 @@ error_remove:
return err;
}
-EXPORT_SYMBOL(fat_add_entries);
+EXPORT_SYMBOL_GPL(fat_add_entries);
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 4164cd54c4d..a1a9e045121 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -476,6 +476,7 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
sbi->prev_free = entry;
if (sbi->free_clusters != -1)
sbi->free_clusters--;
+ sb->s_dirt = 1;
cluster[idx_clus] = entry;
idx_clus++;
@@ -496,6 +497,7 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
/* Couldn't allocate the free entries */
sbi->free_clusters = 0;
+ sb->s_dirt = 1;
err = -ENOSPC;
out:
@@ -509,7 +511,6 @@ out:
}
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
- fat_clusters_flush(sb);
if (err && idx_clus)
fat_free_clusters(inode, cluster[0]);
@@ -542,8 +543,10 @@ int fat_free_clusters(struct inode *inode, int cluster)
}
ops->ent_put(&fatent, FAT_ENT_FREE);
- if (sbi->free_clusters != -1)
+ if (sbi->free_clusters != -1) {
sbi->free_clusters++;
+ sb->s_dirt = 1;
+ }
if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
if (sb->s_flags & MS_SYNCHRONOUS) {
@@ -578,7 +581,7 @@ error:
return err;
}
-EXPORT_SYMBOL(fat_free_clusters);
+EXPORT_SYMBOL_GPL(fat_free_clusters);
int fat_count_free_clusters(struct super_block *sb)
{
@@ -605,6 +608,7 @@ int fat_count_free_clusters(struct super_block *sb)
} while (fat_ent_next(sbi, &fatent));
}
sbi->free_clusters = free;
+ sb->s_dirt = 1;
fatent_brelse(&fatent);
out:
unlock_fat(sbi);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 7134403d5be..e99c5a73b39 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -6,11 +6,13 @@
* regular file handling primitives for fat-based filesystems
*/
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/time.h>
#include <linux/msdos_fs.h>
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
+#include <linux/writeback.h>
int fat_generic_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
@@ -40,7 +42,7 @@ int fat_generic_ioctl(struct inode *inode, struct file *filp,
if (err)
return err;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (IS_RDONLY(inode)) {
err = -EROFS;
@@ -102,7 +104,7 @@ int fat_generic_ioctl(struct inode *inode, struct file *filp,
MSDOS_I(inode)->i_attrs = attr & ATTR_UNUSED;
mark_inode_dirty(inode);
up:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return err;
}
default:
@@ -124,6 +126,24 @@ struct file_operations fat_file_operations = {
.sendfile = generic_file_sendfile,
};
+static int fat_cont_expand(struct inode *inode, loff_t size)
+{
+ struct address_space *mapping = inode->i_mapping;
+ loff_t start = inode->i_size, count = size - inode->i_size;
+ int err;
+
+ err = generic_cont_expand_simple(inode, size);
+ if (err)
+ goto out;
+
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+ mark_inode_dirty(inode);
+ if (IS_SYNC(inode))
+ err = sync_page_range_nolock(inode, mapping, start, count);
+out:
+ return err;
+}
+
int fat_notify_change(struct dentry *dentry, struct iattr *attr)
{
struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
@@ -132,11 +152,17 @@ int fat_notify_change(struct dentry *dentry, struct iattr *attr)
lock_kernel();
- /* FAT cannot truncate to a longer file */
+ /*
+ * Expand the file. Since inode_setattr() updates ->i_size
+ * before calling the ->truncate(), but FAT needs to fill the
+ * hole before it.
+ */
if (attr->ia_valid & ATTR_SIZE) {
if (attr->ia_size > inode->i_size) {
- error = -EPERM;
- goto out;
+ error = fat_cont_expand(inode, attr->ia_size);
+ if (error || attr->ia_valid == ATTR_SIZE)
+ goto out;
+ attr->ia_valid &= ~ATTR_SIZE;
}
}
@@ -173,7 +199,7 @@ out:
return error;
}
-EXPORT_SYMBOL(fat_notify_change);
+EXPORT_SYMBOL_GPL(fat_notify_change);
/* Free all clusters after the skip'th cluster. */
static int fat_free(struct inode *inode, int skip)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index a0f9b9fe130..e7f4aa7fc68 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -18,10 +18,12 @@
#include <linux/seq_file.h>
#include <linux/msdos_fs.h>
#include <linux/pagemap.h>
+#include <linux/mpage.h>
#include <linux/buffer_head.h>
#include <linux/mount.h>
#include <linux/vfs.h>
#include <linux/parser.h>
+#include <linux/uio.h>
#include <asm/unaligned.h>
#ifndef CONFIG_FAT_DEFAULT_IOCHARSET
@@ -48,51 +50,97 @@ static int fat_add_cluster(struct inode *inode)
return err;
}
-static int fat_get_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
+static int __fat_get_blocks(struct inode *inode, sector_t iblock,
+ unsigned long *max_blocks,
+ struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
sector_t phys;
- int err;
+ unsigned long mapped_blocks;
+ int err, offset;
- err = fat_bmap(inode, iblock, &phys);
+ err = fat_bmap(inode, iblock, &phys, &mapped_blocks);
if (err)
return err;
if (phys) {
map_bh(bh_result, sb, phys);
+ *max_blocks = min(mapped_blocks, *max_blocks);
return 0;
}
if (!create)
return 0;
+
if (iblock != MSDOS_I(inode)->mmu_private >> sb->s_blocksize_bits) {
fat_fs_panic(sb, "corrupted file size (i_pos %lld, %lld)",
MSDOS_I(inode)->i_pos, MSDOS_I(inode)->mmu_private);
return -EIO;
}
- if (!((unsigned long)iblock & (MSDOS_SB(sb)->sec_per_clus - 1))) {
+
+ offset = (unsigned long)iblock & (sbi->sec_per_clus - 1);
+ if (!offset) {
+ /* TODO: multiple cluster allocation would be desirable. */
err = fat_add_cluster(inode);
if (err)
return err;
}
- MSDOS_I(inode)->mmu_private += sb->s_blocksize;
- err = fat_bmap(inode, iblock, &phys);
+ /* available blocks on this cluster */
+ mapped_blocks = sbi->sec_per_clus - offset;
+
+ *max_blocks = min(mapped_blocks, *max_blocks);
+ MSDOS_I(inode)->mmu_private += *max_blocks << sb->s_blocksize_bits;
+
+ err = fat_bmap(inode, iblock, &phys, &mapped_blocks);
if (err)
return err;
- if (!phys)
- BUG();
+ BUG_ON(!phys);
+ BUG_ON(*max_blocks != mapped_blocks);
set_buffer_new(bh_result);
map_bh(bh_result, sb, phys);
return 0;
}
+static int fat_get_blocks(struct inode *inode, sector_t iblock,
+ unsigned long max_blocks,
+ struct buffer_head *bh_result, int create)
+{
+ struct super_block *sb = inode->i_sb;
+ int err;
+
+ err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create);
+ if (err)
+ return err;
+ bh_result->b_size = max_blocks << sb->s_blocksize_bits;
+ return 0;
+}
+
+static int fat_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ unsigned long max_blocks = 1;
+ return __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create);
+}
+
static int fat_writepage(struct page *page, struct writeback_control *wbc)
{
return block_write_full_page(page, fat_get_block, wbc);
}
+static int fat_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ return mpage_writepages(mapping, wbc, fat_get_block);
+}
+
static int fat_readpage(struct file *file, struct page *page)
{
- return block_read_full_page(page, fat_get_block);
+ return mpage_readpage(page, fat_get_block);
+}
+
+static int fat_readpages(struct file *file, struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages)
+{
+ return mpage_readpages(mapping, pages, nr_pages, fat_get_block);
}
static int fat_prepare_write(struct file *file, struct page *page,
@@ -115,6 +163,34 @@ static int fat_commit_write(struct file *file, struct page *page,
return err;
}
+static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
+ const struct iovec *iov,
+ loff_t offset, unsigned long nr_segs)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+
+ if (rw == WRITE) {
+ /*
+ * FIXME: blockdev_direct_IO() doesn't use ->prepare_write(),
+ * so we need to update the ->mmu_private to block boundary.
+ *
+ * But we must fill the remaining area or hole by nul for
+ * updating ->mmu_private.
+ */
+ loff_t size = offset + iov_length(iov, nr_segs);
+ if (MSDOS_I(inode)->mmu_private < size)
+ return -EINVAL;
+ }
+
+ /*
+ * FAT need to use the DIO_LOCKING for avoiding the race
+ * condition of fat_get_block() and ->truncate().
+ */
+ return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+ offset, nr_segs, fat_get_blocks, NULL);
+}
+
static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping, block, fat_get_block);
@@ -122,10 +198,13 @@ static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
static struct address_space_operations fat_aops = {
.readpage = fat_readpage,
+ .readpages = fat_readpages,
.writepage = fat_writepage,
+ .writepages = fat_writepages,
.sync_page = block_sync_page,
.prepare_write = fat_prepare_write,
.commit_write = fat_commit_write,
+ .direct_IO = fat_direct_IO,
.bmap = _fat_bmap
};
@@ -182,7 +261,7 @@ void fat_attach(struct inode *inode, loff_t i_pos)
spin_unlock(&sbi->inode_hash_lock);
}
-EXPORT_SYMBOL(fat_attach);
+EXPORT_SYMBOL_GPL(fat_attach);
void fat_detach(struct inode *inode)
{
@@ -193,7 +272,7 @@ void fat_detach(struct inode *inode)
spin_unlock(&sbi->inode_hash_lock);
}
-EXPORT_SYMBOL(fat_detach);
+EXPORT_SYMBOL_GPL(fat_detach);
struct inode *fat_iget(struct super_block *sb, loff_t i_pos)
{
@@ -347,7 +426,7 @@ out:
return inode;
}
-EXPORT_SYMBOL(fat_build_inode);
+EXPORT_SYMBOL_GPL(fat_build_inode);
static void fat_delete_inode(struct inode *inode)
{
@@ -374,12 +453,17 @@ static void fat_clear_inode(struct inode *inode)
unlock_kernel();
}
-static void fat_put_super(struct super_block *sb)
+static void fat_write_super(struct super_block *sb)
{
- struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ sb->s_dirt = 0;
if (!(sb->s_flags & MS_RDONLY))
fat_clusters_flush(sb);
+}
+
+static void fat_put_super(struct super_block *sb)
+{
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
if (sbi->nls_disk) {
unload_nls(sbi->nls_disk);
@@ -537,7 +621,7 @@ int fat_sync_inode(struct inode *inode)
return fat_write_inode(inode, 1);
}
-EXPORT_SYMBOL(fat_sync_inode);
+EXPORT_SYMBOL_GPL(fat_sync_inode);
static int fat_show_options(struct seq_file *m, struct vfsmount *mnt);
static struct super_operations fat_sops = {
@@ -546,6 +630,7 @@ static struct super_operations fat_sops = {
.write_inode = fat_write_inode,
.delete_inode = fat_delete_inode,
.put_super = fat_put_super,
+ .write_super = fat_write_super,
.statfs = fat_statfs,
.clear_inode = fat_clear_inode,
.remount_fs = fat_remount,
@@ -1347,7 +1432,7 @@ out_fail:
return error;
}
-EXPORT_SYMBOL(fat_fill_super);
+EXPORT_SYMBOL_GPL(fat_fill_super);
int __init fat_cache_init(void);
void fat_cache_destroy(void);
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 2a0df2122f5..32fb0a3f1da 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -33,7 +33,7 @@ void fat_fs_panic(struct super_block *s, const char *fmt, ...)
}
}
-EXPORT_SYMBOL(fat_fs_panic);
+EXPORT_SYMBOL_GPL(fat_fs_panic);
/* Flushes the number of free clusters on FAT32 */
/* XXX: Need to write one per FSINFO block. Currently only writes 1 */
@@ -67,8 +67,6 @@ void fat_clusters_flush(struct super_block *sb)
if (sbi->prev_free != -1)
fsinfo->next_cluster = cpu_to_le32(sbi->prev_free);
mark_buffer_dirty(bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
- sync_dirty_buffer(bh);
}
brelse(bh);
}
@@ -194,7 +192,7 @@ void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date)
*date = cpu_to_le16(nl_day-day_n[month-1]+1+(month << 5)+(year << 9));
}
-EXPORT_SYMBOL(fat_date_unix2dos);
+EXPORT_SYMBOL_GPL(fat_date_unix2dos);
int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
{
@@ -222,4 +220,4 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
return err;
}
-EXPORT_SYMBOL(fat_sync_bhs);
+EXPORT_SYMBOL_GPL(fat_sync_bhs);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 863b46e0d78..d0767fe5836 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/file.h>
+#include <linux/capability.h>
#include <linux/dnotify.h>
#include <linux/smp_lock.h>
#include <linux/slab.h>
@@ -457,11 +458,11 @@ static void send_sigio_to_task(struct task_struct *p,
else
si.si_band = band_table[reason - POLL_IN];
si.si_fd = fd;
- if (!send_group_sig_info(fown->signum, &si, p))
+ if (!group_send_sig_info(fown->signum, &si, p))
break;
/* fall-through: fall back on the old plain SIGIO signal */
case 0:
- send_group_sig_info(SIGIO, SEND_SIG_PRIV, p);
+ group_send_sig_info(SIGIO, SEND_SIG_PRIV, p);
}
}
@@ -495,7 +496,7 @@ static void send_sigurg_to_task(struct task_struct *p,
struct fown_struct *fown)
{
if (sigio_perm(p, fown, SIGURG))
- send_group_sig_info(SIGURG, SEND_SIG_PRIV, p);
+ group_send_sig_info(SIGURG, SEND_SIG_PRIV, p);
}
int send_sigurg(struct fown_struct *fown)
diff --git a/fs/fifo.c b/fs/fifo.c
index 5455916241f..923371b753a 100644
--- a/fs/fifo.c
+++ b/fs/fifo.c
@@ -35,7 +35,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
int ret;
ret = -ERESTARTSYS;
- if (down_interruptible(PIPE_SEM(*inode)))
+ if (mutex_lock_interruptible(PIPE_MUTEX(*inode)))
goto err_nolock_nocleanup;
if (!inode->i_pipe) {
@@ -119,7 +119,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
}
/* Ok! */
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return 0;
err_rd:
@@ -139,7 +139,7 @@ err:
free_pipe_info(inode);
err_nocleanup:
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
err_nolock_nocleanup:
return ret;
diff --git a/fs/file_table.c b/fs/file_table.c
index c3a5e2fd663..768b5816754 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -16,6 +16,7 @@
#include <linux/eventpoll.h>
#include <linux/rcupdate.h>
#include <linux/mount.h>
+#include <linux/capability.h>
#include <linux/cdev.h>
#include <linux/fsnotify.h>
@@ -117,7 +118,7 @@ EXPORT_SYMBOL(get_empty_filp);
void fastcall fput(struct file *file)
{
- if (rcuref_dec_and_test(&file->f_count))
+ if (atomic_dec_and_test(&file->f_count))
__fput(file);
}
@@ -166,7 +167,7 @@ struct file fastcall *fget(unsigned int fd)
rcu_read_lock();
file = fcheck_files(files, fd);
if (file) {
- if (!rcuref_inc_lf(&file->f_count)) {
+ if (!atomic_inc_not_zero(&file->f_count)) {
/* File object ref couldn't be taken */
rcu_read_unlock();
return NULL;
@@ -198,7 +199,7 @@ struct file fastcall *fget_light(unsigned int fd, int *fput_needed)
rcu_read_lock();
file = fcheck_files(files, fd);
if (file) {
- if (rcuref_inc_lf(&file->f_count))
+ if (atomic_inc_not_zero(&file->f_count))
*fput_needed = 1;
else
/* Didn't get the reference, someone's freed */
@@ -213,7 +214,7 @@ struct file fastcall *fget_light(unsigned int fd, int *fput_needed)
void put_filp(struct file *file)
{
- if (rcuref_dec_and_test(&file->f_count)) {
+ if (atomic_dec_and_test(&file->f_count)) {
security_file_free(file);
file_kill(file);
file_free(file);
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c
index d0401dc68d4..6f5df1700e9 100644
--- a/fs/freevxfs/vxfs_immed.c
+++ b/fs/freevxfs/vxfs_immed.c
@@ -99,8 +99,8 @@ static int
vxfs_immed_readpage(struct file *fp, struct page *pp)
{
struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host);
- u_int64_t offset = pp->index << PAGE_CACHE_SHIFT;
- caddr_t kaddr;
+ u_int64_t offset = (u_int64_t)pp->index << PAGE_CACHE_SHIFT;
+ caddr_t kaddr;
kaddr = kmap(pp);
memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 8f873e621f4..e08ab4702d9 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -148,6 +148,26 @@ void fuse_release_background(struct fuse_req *req)
spin_unlock(&fuse_lock);
}
+static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
+{
+ int i;
+ struct fuse_init_out *arg = &req->misc.init_out;
+
+ if (arg->major != FUSE_KERNEL_VERSION)
+ fc->conn_error = 1;
+ else {
+ fc->minor = arg->minor;
+ fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
+ }
+
+ /* After INIT reply is received other requests can go
+ out. So do (FUSE_MAX_OUTSTANDING - 1) number of
+ up()s on outstanding_sem. The last up() is done in
+ fuse_putback_request() */
+ for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
+ up(&fc->outstanding_sem);
+}
+
/*
* This function is called when a request is finished. Either a reply
* has arrived or it was interrupted (and not yet sent) or some error
@@ -172,19 +192,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
up_read(&fc->sbput_sem);
}
wake_up(&req->waitq);
- if (req->in.h.opcode == FUSE_INIT) {
- int i;
-
- if (req->misc.init_in_out.major != FUSE_KERNEL_VERSION)
- fc->conn_error = 1;
-
- /* After INIT reply is received other requests can go
- out. So do (FUSE_MAX_OUTSTANDING - 1) number of
- up()s on outstanding_sem. The last up() is done in
- fuse_putback_request() */
- for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
- up(&fc->outstanding_sem);
- } else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) {
+ if (req->in.h.opcode == FUSE_INIT)
+ process_init_reply(fc, req);
+ else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) {
/* Special case for failed iget in CREATE */
u64 nodeid = req->in.h.nodeid;
__fuse_get_request(req);
@@ -357,7 +367,7 @@ void fuse_send_init(struct fuse_conn *fc)
/* This is called from fuse_read_super() so there's guaranteed
to be a request available */
struct fuse_req *req = do_get_request(fc);
- struct fuse_init_in_out *arg = &req->misc.init_in_out;
+ struct fuse_init_in *arg = &req->misc.init_in;
arg->major = FUSE_KERNEL_VERSION;
arg->minor = FUSE_KERNEL_MINOR_VERSION;
req->in.h.opcode = FUSE_INIT;
@@ -365,8 +375,12 @@ void fuse_send_init(struct fuse_conn *fc)
req->in.args[0].size = sizeof(*arg);
req->in.args[0].value = arg;
req->out.numargs = 1;
- req->out.args[0].size = sizeof(*arg);
- req->out.args[0].value = arg;
+ /* Variable length arguement used for backward compatibility
+ with interface version < 7.5. Rest of init_out is zeroed
+ by do_get_request(), so a short reply is not a problem */
+ req->out.argvar = 1;
+ req->out.args[0].size = sizeof(struct fuse_init_out);
+ req->out.args[0].value = &req->misc.init_out;
request_send_background(fc, req);
}
@@ -615,6 +629,7 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
struct fuse_copy_state cs;
unsigned reqsize;
+ restart:
spin_lock(&fuse_lock);
fc = file->private_data;
err = -EPERM;
@@ -630,20 +645,25 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
req = list_entry(fc->pending.next, struct fuse_req, list);
list_del_init(&req->list);
- spin_unlock(&fuse_lock);
in = &req->in;
- reqsize = req->in.h.len;
- fuse_copy_init(&cs, 1, req, iov, nr_segs);
- err = -EINVAL;
- if (iov_length(iov, nr_segs) >= reqsize) {
- err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
- if (!err)
- err = fuse_copy_args(&cs, in->numargs, in->argpages,
- (struct fuse_arg *) in->args, 0);
+ reqsize = in->h.len;
+ /* If request is too large, reply with an error and restart the read */
+ if (iov_length(iov, nr_segs) < reqsize) {
+ req->out.h.error = -EIO;
+ /* SETXATTR is special, since it may contain too large data */
+ if (in->h.opcode == FUSE_SETXATTR)
+ req->out.h.error = -E2BIG;
+ request_end(fc, req);
+ goto restart;
}
+ spin_unlock(&fuse_lock);
+ fuse_copy_init(&cs, 1, req, iov, nr_segs);
+ err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
+ if (!err)
+ err = fuse_copy_args(&cs, in->numargs, in->argpages,
+ (struct fuse_arg *) in->args, 0);
fuse_copy_finish(&cs);
-
spin_lock(&fuse_lock);
req->locked = 0;
if (!err && req->interrupted)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 51f5da65277..417bcee466f 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -13,8 +13,16 @@
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/namei.h>
-#include <linux/mount.h>
+/*
+ * FUSE caches dentries and attributes with separate timeout. The
+ * time in jiffies until the dentry/attributes are valid is stored in
+ * dentry->d_time and fuse_inode->i_time respectively.
+ */
+
+/*
+ * Calculate the time in jiffies until a dentry/attributes are valid
+ */
static inline unsigned long time_to_jiffies(unsigned long sec,
unsigned long nsec)
{
@@ -22,6 +30,50 @@ static inline unsigned long time_to_jiffies(unsigned long sec,
return jiffies + timespec_to_jiffies(&ts);
}
+/*
+ * Set dentry and possibly attribute timeouts from the lookup/mk*
+ * replies
+ */
+static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o)
+{
+ entry->d_time = time_to_jiffies(o->entry_valid, o->entry_valid_nsec);
+ if (entry->d_inode)
+ get_fuse_inode(entry->d_inode)->i_time =
+ time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
+}
+
+/*
+ * Mark the attributes as stale, so that at the next call to
+ * ->getattr() they will be fetched from userspace
+ */
+void fuse_invalidate_attr(struct inode *inode)
+{
+ get_fuse_inode(inode)->i_time = jiffies - 1;
+}
+
+/*
+ * Just mark the entry as stale, so that a next attempt to look it up
+ * will result in a new lookup call to userspace
+ *
+ * This is called when a dentry is about to become negative and the
+ * timeout is unknown (unlink, rmdir, rename and in some cases
+ * lookup)
+ */
+static void fuse_invalidate_entry_cache(struct dentry *entry)
+{
+ entry->d_time = jiffies - 1;
+}
+
+/*
+ * Same as fuse_invalidate_entry_cache(), but also try to remove the
+ * dentry from the hash
+ */
+static void fuse_invalidate_entry(struct dentry *entry)
+{
+ d_invalidate(entry);
+ fuse_invalidate_entry_cache(entry);
+}
+
static void fuse_lookup_init(struct fuse_req *req, struct inode *dir,
struct dentry *entry,
struct fuse_entry_out *outarg)
@@ -37,17 +89,34 @@ static void fuse_lookup_init(struct fuse_req *req, struct inode *dir,
req->out.args[0].value = outarg;
}
+/*
+ * Check whether the dentry is still valid
+ *
+ * If the entry validity timeout has expired and the dentry is
+ * positive, try to redo the lookup. If the lookup results in a
+ * different inode, then let the VFS invalidate the dentry and redo
+ * the lookup once more. If the lookup results in the same inode,
+ * then refresh the attributes, timeouts and mark the dentry valid.
+ */
static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
{
- if (!entry->d_inode || is_bad_inode(entry->d_inode))
+ struct inode *inode = entry->d_inode;
+
+ if (inode && is_bad_inode(inode))
return 0;
else if (time_after(jiffies, entry->d_time)) {
int err;
struct fuse_entry_out outarg;
- struct inode *inode = entry->d_inode;
- struct fuse_inode *fi = get_fuse_inode(inode);
- struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req = fuse_get_request(fc);
+ struct fuse_conn *fc;
+ struct fuse_req *req;
+
+ /* Doesn't hurt to "reset" the validity timeout */
+ fuse_invalidate_entry_cache(entry);
+ if (!inode)
+ return 0;
+
+ fc = get_fuse_conn(inode);
+ req = fuse_get_request(fc);
if (!req)
return 0;
@@ -55,6 +124,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
request_send(fc, req);
err = req->out.h.error;
if (!err) {
+ struct fuse_inode *fi = get_fuse_inode(inode);
if (outarg.nodeid != get_node_id(inode)) {
fuse_send_forget(fc, req, outarg.nodeid, 1);
return 0;
@@ -66,18 +136,18 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
return 0;
fuse_change_attributes(inode, &outarg.attr);
- entry->d_time = time_to_jiffies(outarg.entry_valid,
- outarg.entry_valid_nsec);
- fi->i_time = time_to_jiffies(outarg.attr_valid,
- outarg.attr_valid_nsec);
+ fuse_change_timeout(entry, &outarg);
}
return 1;
}
+/*
+ * Check if there's already a hashed alias of this directory inode.
+ * If yes, then lookup and mkdir must not create a new alias.
+ */
static int dir_alias(struct inode *inode)
{
if (S_ISDIR(inode->i_mode)) {
- /* Don't allow creating an alias to a directory */
struct dentry *alias = d_find_alias(inode);
if (alias) {
dput(alias);
@@ -96,8 +166,14 @@ static struct dentry_operations fuse_dentry_operations = {
.d_revalidate = fuse_dentry_revalidate,
};
-static int fuse_lookup_iget(struct inode *dir, struct dentry *entry,
- struct inode **inodep)
+static inline int valid_mode(int m)
+{
+ return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
+ S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
+}
+
+static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
+ struct nameidata *nd)
{
int err;
struct fuse_entry_out outarg;
@@ -106,53 +182,49 @@ static int fuse_lookup_iget(struct inode *dir, struct dentry *entry,
struct fuse_req *req;
if (entry->d_name.len > FUSE_NAME_MAX)
- return -ENAMETOOLONG;
+ return ERR_PTR(-ENAMETOOLONG);
req = fuse_get_request(fc);
if (!req)
- return -EINTR;
+ return ERR_PTR(-EINTR);
fuse_lookup_init(req, dir, entry, &outarg);
request_send(fc, req);
err = req->out.h.error;
- if (!err && invalid_nodeid(outarg.nodeid))
+ if (!err && ((outarg.nodeid && invalid_nodeid(outarg.nodeid)) ||
+ !valid_mode(outarg.attr.mode)))
err = -EIO;
- if (!err) {
+ if (!err && outarg.nodeid) {
inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
&outarg.attr);
if (!inode) {
fuse_send_forget(fc, req, outarg.nodeid, 1);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
}
fuse_put_request(fc, req);
if (err && err != -ENOENT)
- return err;
+ return ERR_PTR(err);
- if (inode) {
- struct fuse_inode *fi = get_fuse_inode(inode);
- entry->d_time = time_to_jiffies(outarg.entry_valid,
- outarg.entry_valid_nsec);
- fi->i_time = time_to_jiffies(outarg.attr_valid,
- outarg.attr_valid_nsec);
+ if (inode && dir_alias(inode)) {
+ iput(inode);
+ return ERR_PTR(-EIO);
}
-
+ d_add(entry, inode);
entry->d_op = &fuse_dentry_operations;
- *inodep = inode;
- return 0;
-}
-
-void fuse_invalidate_attr(struct inode *inode)
-{
- get_fuse_inode(inode)->i_time = jiffies - 1;
-}
-
-static void fuse_invalidate_entry(struct dentry *entry)
-{
- d_invalidate(entry);
- entry->d_time = jiffies - 1;
+ if (!err)
+ fuse_change_timeout(entry, &outarg);
+ else
+ fuse_invalidate_entry_cache(entry);
+ return NULL;
}
+/*
+ * Atomic create+open operation
+ *
+ * If the filesystem doesn't support this, then fall back to separate
+ * 'mknod' + 'open' requests.
+ */
static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
struct nameidata *nd)
{
@@ -163,7 +235,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
struct fuse_open_in inarg;
struct fuse_open_out outopen;
struct fuse_entry_out outentry;
- struct fuse_inode *fi;
struct fuse_file *ff;
struct file *file;
int flags = nd->intent.open.flags - 1;
@@ -172,10 +243,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
if (fc->no_create)
goto out;
- err = -ENAMETOOLONG;
- if (entry->d_name.len > FUSE_NAME_MAX)
- goto out;
-
err = -EINTR;
req = fuse_get_request(fc);
if (!req)
@@ -220,17 +287,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
if (!inode) {
flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
ff->fh = outopen.fh;
+ /* Special release, with inode = NULL, this will
+ trigger a 'forget' request when the release is
+ complete */
fuse_send_release(fc, ff, outentry.nodeid, NULL, flags, 0);
goto out_put_request;
}
fuse_put_request(fc, req);
- entry->d_time = time_to_jiffies(outentry.entry_valid,
- outentry.entry_valid_nsec);
- fi = get_fuse_inode(inode);
- fi->i_time = time_to_jiffies(outentry.attr_valid,
- outentry.attr_valid_nsec);
-
d_instantiate(entry, inode);
+ fuse_change_timeout(entry, &outentry);
file = lookup_instantiate_filp(nd, entry, generic_file_open);
if (IS_ERR(file)) {
ff->fh = outopen.fh;
@@ -248,13 +313,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
return err;
}
+/*
+ * Code shared between mknod, mkdir, symlink and link
+ */
static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
struct inode *dir, struct dentry *entry,
int mode)
{
struct fuse_entry_out outarg;
struct inode *inode;
- struct fuse_inode *fi;
int err;
req->in.h.nodeid = get_node_id(dir);
@@ -268,10 +335,13 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
fuse_put_request(fc, req);
return err;
}
- if (invalid_nodeid(outarg.nodeid)) {
- fuse_put_request(fc, req);
- return -EIO;
- }
+ err = -EIO;
+ if (invalid_nodeid(outarg.nodeid))
+ goto out_put_request;
+
+ if ((outarg.attr.mode ^ mode) & S_IFMT)
+ goto out_put_request;
+
inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
&outarg.attr);
if (!inode) {
@@ -280,22 +350,19 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
}
fuse_put_request(fc, req);
- /* Don't allow userspace to do really stupid things... */
- if (((inode->i_mode ^ mode) & S_IFMT) || dir_alias(inode)) {
+ if (dir_alias(inode)) {
iput(inode);
return -EIO;
}
- entry->d_time = time_to_jiffies(outarg.entry_valid,
- outarg.entry_valid_nsec);
-
- fi = get_fuse_inode(inode);
- fi->i_time = time_to_jiffies(outarg.attr_valid,
- outarg.attr_valid_nsec);
-
d_instantiate(entry, inode);
+ fuse_change_timeout(entry, &outarg);
fuse_invalidate_attr(dir);
return 0;
+
+ out_put_request:
+ fuse_put_request(fc, req);
+ return err;
}
static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
@@ -355,12 +422,7 @@ static int fuse_symlink(struct inode *dir, struct dentry *entry,
{
struct fuse_conn *fc = get_fuse_conn(dir);
unsigned len = strlen(link) + 1;
- struct fuse_req *req;
-
- if (len > FUSE_SYMLINK_MAX)
- return -ENAMETOOLONG;
-
- req = fuse_get_request(fc);
+ struct fuse_req *req = fuse_get_request(fc);
if (!req)
return -EINTR;
@@ -399,6 +461,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
inode->i_nlink = 0;
fuse_invalidate_attr(inode);
fuse_invalidate_attr(dir);
+ fuse_invalidate_entry_cache(entry);
} else if (err == -EINTR)
fuse_invalidate_entry(entry);
return err;
@@ -424,6 +487,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
if (!err) {
entry->d_inode->i_nlink = 0;
fuse_invalidate_attr(dir);
+ fuse_invalidate_entry_cache(entry);
} else if (err == -EINTR)
fuse_invalidate_entry(entry);
return err;
@@ -459,6 +523,10 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
fuse_invalidate_attr(olddir);
if (olddir != newdir)
fuse_invalidate_attr(newdir);
+
+ /* newent will end up negative */
+ if (newent->d_inode)
+ fuse_invalidate_entry_cache(newent);
} else if (err == -EINTR) {
/* If request was interrupted, DEITY only knows if the
rename actually took place. If the invalidation
@@ -566,6 +634,15 @@ static int fuse_allow_task(struct fuse_conn *fc, struct task_struct *task)
return 0;
}
+/*
+ * Check whether the inode attributes are still valid
+ *
+ * If the attribute validity timeout has expired, then fetch the fresh
+ * attributes with a 'getattr' request
+ *
+ * I'm not sure why cached attributes are never returned for the root
+ * inode, this is probably being too cautious.
+ */
static int fuse_revalidate(struct dentry *entry)
{
struct inode *inode = entry->d_inode;
@@ -613,6 +690,19 @@ static int fuse_access(struct inode *inode, int mask)
return err;
}
+/*
+ * Check permission. The two basic access models of FUSE are:
+ *
+ * 1) Local access checking ('default_permissions' mount option) based
+ * on file mode. This is the plain old disk filesystem permission
+ * modell.
+ *
+ * 2) "Remote" access checking, where server is responsible for
+ * checking permission in each inode operation. An exception to this
+ * is if ->permission() was invoked from sys_access() in which case an
+ * access request is sent. Execute permission is still checked
+ * locally based on file mode.
+ */
static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd)
{
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -631,14 +721,10 @@ static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd)
err = generic_permission(inode, mask, NULL);
}
- /* FIXME: Need some mechanism to revoke permissions:
- currently if the filesystem suddenly changes the
- file mode, we will not be informed about it, and
- continue to allow access to the file/directory.
-
- This is actually not so grave, since the user can
- simply keep access to the file/directory anyway by
- keeping it open... */
+ /* Note: the opposite of the above test does not
+ exist. So if permissions are revoked this won't be
+ noticed immediately, only after the attribute
+ timeout has expired */
return err;
} else {
@@ -691,7 +777,12 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
struct page *page;
struct inode *inode = file->f_dentry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req = fuse_get_request(fc);
+ struct fuse_req *req;
+
+ if (is_bad_inode(inode))
+ return -EIO;
+
+ req = fuse_get_request(fc);
if (!req)
return -EINTR;
@@ -806,6 +897,15 @@ static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
}
}
+/*
+ * Set attributes, and at the same time refresh them.
+ *
+ * Truncation is slightly complicated, because the 'truncate' request
+ * may fail, in which case we don't want to touch the mapping.
+ * vmtruncate() doesn't allow for this case. So do the rlimit
+ * checking by hand and call vmtruncate() only after the file has
+ * actually been truncated.
+ */
static int fuse_setattr(struct dentry *entry, struct iattr *attr)
{
struct inode *inode = entry->d_inode;
@@ -883,23 +983,6 @@ static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
return err;
}
-static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
- struct nameidata *nd)
-{
- struct inode *inode;
- int err;
-
- err = fuse_lookup_iget(dir, entry, &inode);
- if (err)
- return ERR_PTR(err);
- if (inode && dir_alias(inode)) {
- iput(inode);
- return ERR_PTR(-EIO);
- }
- d_add(entry, inode);
- return NULL;
-}
-
static int fuse_setxattr(struct dentry *entry, const char *name,
const void *value, size_t size, int flags)
{
@@ -909,9 +992,6 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
struct fuse_setxattr_in inarg;
int err;
- if (size > FUSE_XATTR_SIZE_MAX)
- return -E2BIG;
-
if (fc->no_setxattr)
return -EOPNOTSUPP;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 2ca86141d13..63d2980df5c 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -163,6 +163,9 @@ static int fuse_flush(struct file *file)
struct fuse_flush_in inarg;
int err;
+ if (is_bad_inode(inode))
+ return -EIO;
+
if (fc->no_flush)
return 0;
@@ -199,6 +202,9 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
struct fuse_fsync_in inarg;
int err;
+ if (is_bad_inode(inode))
+ return -EIO;
+
if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
return 0;
@@ -272,16 +278,22 @@ static int fuse_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
- loff_t pos = (loff_t) page->index << PAGE_CACHE_SHIFT;
- struct fuse_req *req = fuse_get_request(fc);
- int err = -EINTR;
+ struct fuse_req *req;
+ int err;
+
+ err = -EIO;
+ if (is_bad_inode(inode))
+ goto out;
+
+ err = -EINTR;
+ req = fuse_get_request(fc);
if (!req)
goto out;
req->out.page_zeroing = 1;
req->num_pages = 1;
req->pages[0] = page;
- fuse_send_read(req, file, inode, pos, PAGE_CACHE_SIZE);
+ fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE);
err = req->out.h.error;
fuse_put_request(fc, req);
if (!err)
@@ -295,7 +307,7 @@ static int fuse_readpage(struct file *file, struct page *page)
static int fuse_send_readpages(struct fuse_req *req, struct file *file,
struct inode *inode)
{
- loff_t pos = (loff_t) req->pages[0]->index << PAGE_CACHE_SHIFT;
+ loff_t pos = page_offset(req->pages[0]);
size_t count = req->num_pages << PAGE_CACHE_SHIFT;
unsigned i;
req->out.page_zeroing = 1;
@@ -345,6 +357,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_readpages_data data;
int err;
+
+ if (is_bad_inode(inode))
+ return -EIO;
+
data.file = file;
data.inode = inode;
data.req = fuse_get_request(fc);
@@ -402,8 +418,13 @@ static int fuse_commit_write(struct file *file, struct page *page,
unsigned count = to - offset;
struct inode *inode = page->mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
- loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset;
- struct fuse_req *req = fuse_get_request(fc);
+ loff_t pos = page_offset(page) + offset;
+ struct fuse_req *req;
+
+ if (is_bad_inode(inode))
+ return -EIO;
+
+ req = fuse_get_request(fc);
if (!req)
return -EINTR;
@@ -454,7 +475,7 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
- npages = min(npages, FUSE_MAX_PAGES_PER_REQ);
+ npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ);
down_read(&current->mm->mmap_sem);
npages = get_user_pages(current, current->mm, user_addr, npages, write,
0, req->pages, NULL);
@@ -475,12 +496,16 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
size_t nmax = write ? fc->max_write : fc->max_read;
loff_t pos = *ppos;
ssize_t res = 0;
- struct fuse_req *req = fuse_get_request(fc);
+ struct fuse_req *req;
+
+ if (is_bad_inode(inode))
+ return -EIO;
+
+ req = fuse_get_request(fc);
if (!req)
return -EINTR;
while (count) {
- size_t tmp;
size_t nres;
size_t nbytes = min(count, nmax);
int err = fuse_get_user_pages(req, buf, nbytes, !write);
@@ -488,8 +513,8 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
res = err;
break;
}
- tmp = (req->num_pages << PAGE_SHIFT) - req->page_offset;
- nbytes = min(nbytes, tmp);
+ nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
+ nbytes = min(count, nbytes);
if (write)
nres = fuse_send_write(req, file, inode, pos, nbytes);
else
@@ -535,9 +560,9 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
struct inode *inode = file->f_dentry->d_inode;
ssize_t res;
/* Don't allow parallel writes to the same file */
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
res = fuse_direct_io(file, buf, count, ppos, 1);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return res;
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 0ea5301f86b..74c8d098a14 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -21,6 +21,9 @@
/** If more requests are outstanding, then the operation will block */
#define FUSE_MAX_OUTSTANDING 10
+/** It could be as large as PATH_MAX, but would that have any uses? */
+#define FUSE_NAME_MAX 1024
+
/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem
module will check permissions based on the file mode. Otherwise no
permission checking is done in the kernel */
@@ -108,9 +111,6 @@ struct fuse_out {
struct fuse_arg args[3];
};
-struct fuse_req;
-struct fuse_conn;
-
/**
* A request to the client
*/
@@ -159,7 +159,8 @@ struct fuse_req {
union {
struct fuse_forget_in forget_in;
struct fuse_release_in release_in;
- struct fuse_init_in_out init_in_out;
+ struct fuse_init_in init_in;
+ struct fuse_init_out init_out;
} misc;
/** page vector */
@@ -272,6 +273,9 @@ struct fuse_conn {
/** Is create not implemented by fs? */
unsigned no_create : 1;
+ /** Negotiated minor version */
+ unsigned minor;
+
/** Backing dev info */
struct backing_dev_info bdi;
};
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e69a546844d..04c80cc957a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -135,12 +135,8 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
fuse_init_common(inode);
init_special_inode(inode, inode->i_mode,
new_decode_dev(attr->rdev));
- } else {
- /* Don't let user create weird files */
- inode->i_mode = S_IFREG;
- fuse_init_common(inode);
- fuse_init_file_inode(inode);
- }
+ } else
+ BUG();
}
static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
@@ -218,6 +214,7 @@ static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr
{
stbuf->f_type = FUSE_SUPER_MAGIC;
stbuf->f_bsize = attr->bsize;
+ stbuf->f_frsize = attr->frsize;
stbuf->f_blocks = attr->blocks;
stbuf->f_bfree = attr->bfree;
stbuf->f_bavail = attr->bavail;
@@ -238,10 +235,12 @@ static int fuse_statfs(struct super_block *sb, struct kstatfs *buf)
if (!req)
return -EINTR;
+ memset(&outarg, 0, sizeof(outarg));
req->in.numargs = 0;
req->in.h.opcode = FUSE_STATFS;
req->out.numargs = 1;
- req->out.args[0].size = sizeof(outarg);
+ req->out.args[0].size =
+ fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
req->out.args[0].value = &outarg;
request_send(fc, req);
err = req->out.h.error;
@@ -482,7 +481,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fc->max_read = d.max_read;
if (fc->max_read / PAGE_CACHE_SIZE < fc->bdi.ra_pages)
fc->bdi.ra_pages = fc->max_read / PAGE_CACHE_SIZE;
- fc->max_write = FUSE_MAX_IN / 2;
err = -ENOMEM;
root = get_root_inode(sb, d.rootmode);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index d499393a8ae..050a4927649 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -547,13 +547,13 @@ static int hfs_file_release(struct inode *inode, struct file *file)
if (atomic_read(&file->f_count) != 0)
return 0;
if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
hfs_file_truncate(inode);
//if (inode->i_flags & S_DEAD) {
// hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
// hfs_delete_inode(inode);
//}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
return 0;
}
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index c7d316455fa..9fb51632303 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -29,7 +29,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *ma
return size;
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
- down(&HFSPLUS_SB(sb).alloc_file->i_sem);
+ mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
page = read_cache_page(mapping, offset / PAGE_CACHE_BITS,
(filler_t *)mapping->a_ops->readpage, NULL);
@@ -143,7 +143,7 @@ done:
sb->s_dirt = 1;
dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
out:
- up(&HFSPLUS_SB(sb).alloc_file->i_sem);
+ mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
return start;
}
@@ -164,7 +164,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
if ((offset + count) > HFSPLUS_SB(sb).total_blocks)
return -2;
- down(&HFSPLUS_SB(sb).alloc_file->i_sem);
+ mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
pnr = offset / PAGE_CACHE_BITS;
page = read_cache_page(mapping, pnr, (filler_t *)mapping->a_ops->readpage, NULL);
@@ -215,7 +215,7 @@ out:
kunmap(page);
HFSPLUS_SB(sb).free_blocks += len;
sb->s_dirt = 1;
- up(&HFSPLUS_SB(sb).alloc_file->i_sem);
+ mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
return 0;
}
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index df16fcbff3f..0fa1ab6250b 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -143,9 +143,6 @@ struct hfsplus_sb_info {
unsigned long flags;
- atomic_t inode_cnt;
- u32 last_inode_cnt;
-
struct hlist_head rsrc_inodes;
};
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index fc98583cf04..7acff6c5464 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -182,11 +182,6 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dent
igrab(dir);
hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes);
mark_inode_dirty(inode);
- {
- void hfsplus_inode_check(struct super_block *sb);
- atomic_inc(&HFSPLUS_SB(sb).inode_cnt);
- hfsplus_inode_check(sb);
- }
out:
d_add(dentry, inode);
return NULL;
@@ -276,13 +271,13 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
if (atomic_read(&file->f_count) != 0)
return 0;
if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
hfsplus_file_truncate(inode);
if (inode->i_flags & S_DEAD) {
hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
hfsplus_delete_inode(inode);
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
return 0;
}
@@ -317,11 +312,6 @@ struct inode *hfsplus_new_inode(struct super_block *sb, int mode)
if (!inode)
return NULL;
- {
- void hfsplus_inode_check(struct super_block *sb);
- atomic_inc(&HFSPLUS_SB(sb).inode_cnt);
- hfsplus_inode_check(sb);
- }
inode->i_ino = HFSPLUS_SB(sb).next_cnid++;
inode->i_mode = mode;
inode->i_uid = current->fsuid;
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index e07aa096e07..13cf848ac83 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -12,6 +12,7 @@
* hfsplus ioctls
*/
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/xattr.h>
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 8093351bd7c..d791780def5 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -22,29 +22,12 @@ static void hfsplus_destroy_inode(struct inode *inode);
#include "hfsplus_fs.h"
-void hfsplus_inode_check(struct super_block *sb)
-{
-#if 0
- u32 cnt = atomic_read(&HFSPLUS_SB(sb).inode_cnt);
- u32 last_cnt = HFSPLUS_SB(sb).last_inode_cnt;
-
- if (cnt <= (last_cnt / 2) ||
- cnt >= (last_cnt * 2)) {
- HFSPLUS_SB(sb).last_inode_cnt = cnt;
- printk("inode_check: %u,%u,%u\n", cnt, last_cnt,
- HFSPLUS_SB(sb).cat_tree ? HFSPLUS_SB(sb).cat_tree->node_hash_cnt : 0);
- }
-#endif
-}
-
static void hfsplus_read_inode(struct inode *inode)
{
struct hfs_find_data fd;
struct hfsplus_vh *vhdr;
int err;
- atomic_inc(&HFSPLUS_SB(inode->i_sb).inode_cnt);
- hfsplus_inode_check(inode->i_sb);
INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
init_MUTEX(&HFSPLUS_I(inode).extents_lock);
HFSPLUS_I(inode).flags = 0;
@@ -155,12 +138,10 @@ static int hfsplus_write_inode(struct inode *inode, int unused)
static void hfsplus_clear_inode(struct inode *inode)
{
dprint(DBG_INODE, "hfsplus_clear_inode: %lu\n", inode->i_ino);
- atomic_dec(&HFSPLUS_SB(inode->i_sb).inode_cnt);
if (HFSPLUS_IS_RSRC(inode)) {
HFSPLUS_I(HFSPLUS_I(inode).rsrc_inode).rsrc_inode = NULL;
iput(HFSPLUS_I(inode).rsrc_inode);
}
- hfsplus_inode_check(inode->i_sb);
}
static void hfsplus_write_super(struct super_block *sb)
@@ -320,7 +301,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
/* temporarily use utf8 to correctly find the hidden dir below */
nls = sbi->nls;
sbi->nls = load_nls("utf8");
- if (!nls) {
+ if (!sbi->nls) {
printk("HFS+: unable to load nls for utf8\n");
err = -EINVAL;
goto cleanup;
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 0217c3a0444..5591f9623aa 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -32,19 +32,19 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
/*printk("dir lseek\n");*/
if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
- down(&i->i_sem);
+ mutex_lock(&i->i_mutex);
pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1;
while (pos != new_off) {
if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh);
else goto fail;
if (pos == 12) goto fail;
}
- up(&i->i_sem);
+ mutex_unlock(&i->i_mutex);
ok:
unlock_kernel();
return filp->f_pos = new_off;
fail:
- up(&i->i_sem);
+ mutex_unlock(&i->i_mutex);
/*printk("illegal lseek: %016llx\n", new_off);*/
unlock_kernel();
return -ESPIPE;
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index 52930915bad..a44dc589739 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -171,12 +171,12 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
err = -ENOMEM;
parent = HPPFS_I(ino)->proc_dentry;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
proc_dentry = d_lookup(parent, &dentry->d_name);
if(proc_dentry == NULL){
proc_dentry = d_alloc(parent, &dentry->d_name);
if(proc_dentry == NULL){
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
goto out;
}
new = (*parent->d_inode->i_op->lookup)(parent->d_inode,
@@ -186,7 +186,7 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
proc_dentry = new;
}
}
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
if(IS_ERR(proc_dentry))
return(proc_dentry);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8c1cef3bb67..ab4c3a9d51b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -18,6 +18,7 @@
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/string.h>
+#include <linux/capability.h>
#include <linux/backing-dev.h>
#include <linux/hugetlb.h>
#include <linux/pagevec.h>
@@ -100,9 +101,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
loff_t len, vma_len;
int ret;
- if ((vma->vm_flags & (VM_MAYSHARE | VM_WRITE)) == VM_WRITE)
- return -EINVAL;
-
if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1))
return -EINVAL;
@@ -121,7 +119,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
file_accessed(file);
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
vma->vm_ops = &hugetlb_vm_ops;
@@ -136,7 +134,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
if (inode->i_size < len)
inode->i_size = len;
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return ret;
}
diff --git a/fs/inode.c b/fs/inode.c
index d8d04bd72b5..108138d4e90 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -22,6 +22,7 @@
#include <linux/cdev.h>
#include <linux/bootmem.h>
#include <linux/inotify.h>
+#include <linux/mount.h>
/*
* This is needed for the following functions:
@@ -192,7 +193,7 @@ void inode_init_once(struct inode *inode)
INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_dentry);
INIT_LIST_HEAD(&inode->i_devices);
- sema_init(&inode->i_sem, 1);
+ mutex_init(&inode->i_mutex);
init_rwsem(&inode->i_alloc_sem);
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
rwlock_init(&inode->i_data.tree_lock);
@@ -770,7 +771,7 @@ EXPORT_SYMBOL(igrab);
*
* Note, @test is called with the inode_lock held, so can't sleep.
*/
-static inline struct inode *ifind(struct super_block *sb,
+static struct inode *ifind(struct super_block *sb,
struct hlist_head *head, int (*test)(struct inode *, void *),
void *data, const int wait)
{
@@ -804,7 +805,7 @@ static inline struct inode *ifind(struct super_block *sb,
*
* Otherwise NULL is returned.
*/
-static inline struct inode *ifind_fast(struct super_block *sb,
+static struct inode *ifind_fast(struct super_block *sb,
struct hlist_head *head, unsigned long ino)
{
struct inode *inode;
@@ -1176,22 +1177,33 @@ sector_t bmap(struct inode * inode, sector_t block)
EXPORT_SYMBOL(bmap);
/**
- * update_atime - update the access time
+ * touch_atime - update the access time
+ * @mnt: mount the inode is accessed on
* @inode: inode accessed
*
* Update the accessed time on an inode and mark it for writeback.
* This function automatically handles read only file systems and media,
* as well as the "noatime" flag and inode specific "noatime" markers.
*/
-void update_atime(struct inode *inode)
+void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
{
+ struct inode *inode = dentry->d_inode;
struct timespec now;
- if (IS_NOATIME(inode))
+ if (IS_RDONLY(inode))
return;
- if (IS_NODIRATIME(inode) && S_ISDIR(inode->i_mode))
+
+ if ((inode->i_flags & S_NOATIME) ||
+ (inode->i_sb->s_flags & MS_NOATIME) ||
+ ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
return;
- if (IS_RDONLY(inode))
+
+ /*
+ * We may have a NULL vfsmount when coming from NFSD
+ */
+ if (mnt &&
+ ((mnt->mnt_flags & MNT_NOATIME) ||
+ ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))))
return;
now = current_fs_time(inode->i_sb);
@@ -1201,19 +1213,23 @@ void update_atime(struct inode *inode)
}
}
-EXPORT_SYMBOL(update_atime);
+EXPORT_SYMBOL(touch_atime);
/**
- * inode_update_time - update mtime and ctime time
- * @inode: inode accessed
- * @ctime_too: update ctime too
+ * file_update_time - update mtime and ctime time
+ * @file: file accessed
*
- * Update the mtime time on an inode and mark it for writeback.
- * When ctime_too is specified update the ctime too.
+ * Update the mtime and ctime members of an inode and mark the inode
+ * for writeback. Note that this function is meant exclusively for
+ * usage in the file write path of filesystems, and filesystems may
+ * choose to explicitly ignore update via this function with the
+ * S_NOCTIME inode flag, e.g. for network filesystem where these
+ * timestamps are handled by the server.
*/
-void inode_update_time(struct inode *inode, int ctime_too)
+void file_update_time(struct file *file)
{
+ struct inode *inode = file->f_dentry->d_inode;
struct timespec now;
int sync_it = 0;
@@ -1227,16 +1243,15 @@ void inode_update_time(struct inode *inode, int ctime_too)
sync_it = 1;
inode->i_mtime = now;
- if (ctime_too) {
- if (!timespec_equal(&inode->i_ctime, &now))
- sync_it = 1;
- inode->i_ctime = now;
- }
+ if (!timespec_equal(&inode->i_ctime, &now))
+ sync_it = 1;
+ inode->i_ctime = now;
+
if (sync_it)
mark_inode_dirty_sync(inode);
}
-EXPORT_SYMBOL(inode_update_time);
+EXPORT_SYMBOL(file_update_time);
int inode_needs_sync(struct inode *inode)
{
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 56920918142..f8aeec3ca10 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -8,6 +8,7 @@
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <linux/smp_lock.h>
+#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/security.h>
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 4bf1c6365a1..ca77008146c 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/ioprio.h>
#include <linux/blkdev.h>
+#include <linux/capability.h>
#include <linux/syscalls.h>
static int set_task_ioprio(struct task_struct *task, int ioprio)
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 014a51fd00d..cb3cef525c3 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -24,29 +24,75 @@
#include <linux/slab.h>
/*
- * Unlink a buffer from a transaction.
+ * Unlink a buffer from a transaction checkpoint list.
*
* Called with j_list_lock held.
*/
-static inline void __buffer_unlink(struct journal_head *jh)
+static void __buffer_unlink_first(struct journal_head *jh)
{
transaction_t *transaction;
transaction = jh->b_cp_transaction;
- jh->b_cp_transaction = NULL;
jh->b_cpnext->b_cpprev = jh->b_cpprev;
jh->b_cpprev->b_cpnext = jh->b_cpnext;
- if (transaction->t_checkpoint_list == jh)
+ if (transaction->t_checkpoint_list == jh) {
transaction->t_checkpoint_list = jh->b_cpnext;
- if (transaction->t_checkpoint_list == jh)
- transaction->t_checkpoint_list = NULL;
+ if (transaction->t_checkpoint_list == jh)
+ transaction->t_checkpoint_list = NULL;
+ }
+}
+
+/*
+ * Unlink a buffer from a transaction checkpoint(io) list.
+ *
+ * Called with j_list_lock held.
+ */
+
+static inline void __buffer_unlink(struct journal_head *jh)
+{
+ transaction_t *transaction;
+
+ transaction = jh->b_cp_transaction;
+
+ __buffer_unlink_first(jh);
+ if (transaction->t_checkpoint_io_list == jh) {
+ transaction->t_checkpoint_io_list = jh->b_cpnext;
+ if (transaction->t_checkpoint_io_list == jh)
+ transaction->t_checkpoint_io_list = NULL;
+ }
+}
+
+/*
+ * Move a buffer from the checkpoint list to the checkpoint io list
+ *
+ * Called with j_list_lock held
+ */
+
+static inline void __buffer_relink_io(struct journal_head *jh)
+{
+ transaction_t *transaction;
+
+ transaction = jh->b_cp_transaction;
+ __buffer_unlink_first(jh);
+
+ if (!transaction->t_checkpoint_io_list) {
+ jh->b_cpnext = jh->b_cpprev = jh;
+ } else {
+ jh->b_cpnext = transaction->t_checkpoint_io_list;
+ jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
+ jh->b_cpprev->b_cpnext = jh;
+ jh->b_cpnext->b_cpprev = jh;
+ }
+ transaction->t_checkpoint_io_list = jh;
}
/*
* Try to release a checkpointed buffer from its transaction.
- * Returns 1 if we released it.
+ * Returns 1 if we released it and 2 if we also released the
+ * whole transaction.
+ *
* Requires j_list_lock
* Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
*/
@@ -57,12 +103,11 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
JBUFFER_TRACE(jh, "remove from checkpoint list");
- __journal_remove_checkpoint(jh);
+ ret = __journal_remove_checkpoint(jh) + 1;
jbd_unlock_bh_state(bh);
journal_remove_journal_head(bh);
BUFFER_TRACE(bh, "release");
__brelse(bh);
- ret = 1;
} else {
jbd_unlock_bh_state(bh);
}
@@ -117,83 +162,53 @@ static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
}
/*
- * Clean up a transaction's checkpoint list.
- *
- * We wait for any pending IO to complete and make sure any clean
- * buffers are removed from the transaction.
- *
- * Return 1 if we performed any actions which might have destroyed the
- * checkpoint. (journal_remove_checkpoint() deletes the transaction when
- * the last checkpoint buffer is cleansed)
+ * Clean up transaction's list of buffers submitted for io.
+ * We wait for any pending IO to complete and remove any clean
+ * buffers. Note that we take the buffers in the opposite ordering
+ * from the one in which they were submitted for IO.
*
* Called with j_list_lock held.
*/
-static int __cleanup_transaction(journal_t *journal, transaction_t *transaction)
+
+static void __wait_cp_io(journal_t *journal, transaction_t *transaction)
{
- struct journal_head *jh, *next_jh, *last_jh;
+ struct journal_head *jh;
struct buffer_head *bh;
- int ret = 0;
-
- assert_spin_locked(&journal->j_list_lock);
- jh = transaction->t_checkpoint_list;
- if (!jh)
- return 0;
-
- last_jh = jh->b_cpprev;
- next_jh = jh;
- do {
- jh = next_jh;
+ tid_t this_tid;
+ int released = 0;
+
+ this_tid = transaction->t_tid;
+restart:
+ /* Didn't somebody clean up the transaction in the meanwhile */
+ if (journal->j_checkpoint_transactions != transaction ||
+ transaction->t_tid != this_tid)
+ return;
+ while (!released && transaction->t_checkpoint_io_list) {
+ jh = transaction->t_checkpoint_io_list;
bh = jh2bh(jh);
+ if (!jbd_trylock_bh_state(bh)) {
+ jbd_sync_bh(journal, bh);
+ spin_lock(&journal->j_list_lock);
+ goto restart;
+ }
if (buffer_locked(bh)) {
atomic_inc(&bh->b_count);
spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
wait_on_buffer(bh);
/* the journal_head may have gone by now */
BUFFER_TRACE(bh, "brelse");
__brelse(bh);
- goto out_return_1;
- }
-
- /*
- * This is foul
- */
- if (!jbd_trylock_bh_state(bh)) {
- jbd_sync_bh(journal, bh);
- goto out_return_1;
+ spin_lock(&journal->j_list_lock);
+ goto restart;
}
-
- if (jh->b_transaction != NULL) {
- transaction_t *t = jh->b_transaction;
- tid_t tid = t->t_tid;
-
- spin_unlock(&journal->j_list_lock);
- jbd_unlock_bh_state(bh);
- log_start_commit(journal, tid);
- log_wait_commit(journal, tid);
- goto out_return_1;
- }
-
/*
- * AKPM: I think the buffer_jbddirty test is redundant - it
- * shouldn't have NULL b_transaction?
+ * Now in whatever state the buffer currently is, we know that
+ * it has been written out and so we can drop it from the list
*/
- next_jh = jh->b_cpnext;
- if (!buffer_dirty(bh) && !buffer_jbddirty(bh)) {
- BUFFER_TRACE(bh, "remove from checkpoint");
- __journal_remove_checkpoint(jh);
- jbd_unlock_bh_state(bh);
- journal_remove_journal_head(bh);
- __brelse(bh);
- ret = 1;
- } else {
- jbd_unlock_bh_state(bh);
- }
- } while (jh != last_jh);
-
- return ret;
-out_return_1:
- spin_lock(&journal->j_list_lock);
- return 1;
+ released = __journal_remove_checkpoint(jh);
+ jbd_unlock_bh_state(bh);
+ }
}
#define NR_BATCH 64
@@ -203,9 +218,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
{
int i;
- spin_unlock(&journal->j_list_lock);
ll_rw_block(SWRITE, *batch_count, bhs);
- spin_lock(&journal->j_list_lock);
for (i = 0; i < *batch_count; i++) {
struct buffer_head *bh = bhs[i];
clear_buffer_jwrite(bh);
@@ -221,19 +234,46 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
* Return 1 if something happened which requires us to abort the current
* scan of the checkpoint list.
*
- * Called with j_list_lock held.
+ * Called with j_list_lock held and drops it if 1 is returned
* Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
*/
-static int __flush_buffer(journal_t *journal, struct journal_head *jh,
- struct buffer_head **bhs, int *batch_count,
- int *drop_count)
+static int __process_buffer(journal_t *journal, struct journal_head *jh,
+ struct buffer_head **bhs, int *batch_count)
{
struct buffer_head *bh = jh2bh(jh);
int ret = 0;
- if (buffer_dirty(bh) && !buffer_locked(bh) && jh->b_jlist == BJ_None) {
- J_ASSERT_JH(jh, jh->b_transaction == NULL);
+ if (buffer_locked(bh)) {
+ get_bh(bh);
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ wait_on_buffer(bh);
+ /* the journal_head may have gone by now */
+ BUFFER_TRACE(bh, "brelse");
+ put_bh(bh);
+ ret = 1;
+ }
+ else if (jh->b_transaction != NULL) {
+ transaction_t *t = jh->b_transaction;
+ tid_t tid = t->t_tid;
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ log_start_commit(journal, tid);
+ log_wait_commit(journal, tid);
+ ret = 1;
+ }
+ else if (!buffer_dirty(bh)) {
+ J_ASSERT_JH(jh, !buffer_jbddirty(bh));
+ BUFFER_TRACE(bh, "remove from checkpoint");
+ __journal_remove_checkpoint(jh);
+ spin_unlock(&journal->j_list_lock);
+ jbd_unlock_bh_state(bh);
+ journal_remove_journal_head(bh);
+ put_bh(bh);
+ ret = 1;
+ }
+ else {
/*
* Important: we are about to write the buffer, and
* possibly block, while still holding the journal lock.
@@ -246,45 +286,30 @@ static int __flush_buffer(journal_t *journal, struct journal_head *jh,
J_ASSERT_BH(bh, !buffer_jwrite(bh));
set_buffer_jwrite(bh);
bhs[*batch_count] = bh;
+ __buffer_relink_io(jh);
jbd_unlock_bh_state(bh);
(*batch_count)++;
if (*batch_count == NR_BATCH) {
+ spin_unlock(&journal->j_list_lock);
__flush_batch(journal, bhs, batch_count);
ret = 1;
}
- } else {
- int last_buffer = 0;
- if (jh->b_cpnext == jh) {
- /* We may be about to drop the transaction. Tell the
- * caller that the lists have changed.
- */
- last_buffer = 1;
- }
- if (__try_to_free_cp_buf(jh)) {
- (*drop_count)++;
- ret = last_buffer;
- }
}
return ret;
}
/*
- * Perform an actual checkpoint. We don't write out only enough to
- * satisfy the current blocked requests: rather we submit a reasonably
- * sized chunk of the outstanding data to disk at once for
- * efficiency. __log_wait_for_space() will retry if we didn't free enough.
+ * Perform an actual checkpoint. We take the first transaction on the
+ * list of transactions to be checkpointed and send all its buffers
+ * to disk. We submit larger chunks of data at once.
*
- * However, we _do_ take into account the amount requested so that once
- * the IO has been queued, we can return as soon as enough of it has
- * completed to disk.
- *
* The journal should be locked before calling this function.
*/
int log_do_checkpoint(journal_t *journal)
{
+ transaction_t *transaction;
+ tid_t this_tid;
int result;
- int batch_count = 0;
- struct buffer_head *bhs[NR_BATCH];
jbd_debug(1, "Start checkpoint\n");
@@ -299,79 +324,70 @@ int log_do_checkpoint(journal_t *journal)
return result;
/*
- * OK, we need to start writing disk blocks. Try to free up a
- * quarter of the log in a single checkpoint if we can.
+ * OK, we need to start writing disk blocks. Take one transaction
+ * and write it.
*/
+ spin_lock(&journal->j_list_lock);
+ if (!journal->j_checkpoint_transactions)
+ goto out;
+ transaction = journal->j_checkpoint_transactions;
+ this_tid = transaction->t_tid;
+restart:
/*
- * AKPM: check this code. I had a feeling a while back that it
- * degenerates into a busy loop at unmount time.
+ * If someone cleaned up this transaction while we slept, we're
+ * done (maybe it's a new transaction, but it fell at the same
+ * address).
*/
- spin_lock(&journal->j_list_lock);
- while (journal->j_checkpoint_transactions) {
- transaction_t *transaction;
- struct journal_head *jh, *last_jh, *next_jh;
- int drop_count = 0;
- int cleanup_ret, retry = 0;
- tid_t this_tid;
-
- transaction = journal->j_checkpoint_transactions;
- this_tid = transaction->t_tid;
- jh = transaction->t_checkpoint_list;
- last_jh = jh->b_cpprev;
- next_jh = jh;
- do {
+ if (journal->j_checkpoint_transactions == transaction ||
+ transaction->t_tid == this_tid) {
+ int batch_count = 0;
+ struct buffer_head *bhs[NR_BATCH];
+ struct journal_head *jh;
+ int retry = 0;
+
+ while (!retry && transaction->t_checkpoint_list) {
struct buffer_head *bh;
- jh = next_jh;
- next_jh = jh->b_cpnext;
+ jh = transaction->t_checkpoint_list;
bh = jh2bh(jh);
if (!jbd_trylock_bh_state(bh)) {
jbd_sync_bh(journal, bh);
- spin_lock(&journal->j_list_lock);
retry = 1;
break;
}
- retry = __flush_buffer(journal, jh, bhs, &batch_count, &drop_count);
- if (cond_resched_lock(&journal->j_list_lock)) {
+ retry = __process_buffer(journal, jh, bhs,
+ &batch_count);
+ if (!retry &&
+ lock_need_resched(&journal->j_list_lock)) {
+ spin_unlock(&journal->j_list_lock);
retry = 1;
break;
}
- } while (jh != last_jh && !retry);
+ }
if (batch_count) {
+ if (!retry) {
+ spin_unlock(&journal->j_list_lock);
+ retry = 1;
+ }
__flush_batch(journal, bhs, &batch_count);
- retry = 1;
}
+ if (retry) {
+ spin_lock(&journal->j_list_lock);
+ goto restart;
+ }
/*
- * If someone cleaned up this transaction while we slept, we're
- * done
- */
- if (journal->j_checkpoint_transactions != transaction)
- break;
- if (retry)
- continue;
- /*
- * Maybe it's a new transaction, but it fell at the same
- * address
- */
- if (transaction->t_tid != this_tid)
- continue;
- /*
- * We have walked the whole transaction list without
- * finding anything to write to disk. We had better be
- * able to make some progress or we are in trouble.
+ * Now we have cleaned up the first transaction's checkpoint
+ * list. Let's clean up the second one.
*/
- cleanup_ret = __cleanup_transaction(journal, transaction);
- J_ASSERT(drop_count != 0 || cleanup_ret != 0);
- if (journal->j_checkpoint_transactions != transaction)
- break;
+ __wait_cp_io(journal, transaction);
}
+out:
spin_unlock(&journal->j_list_lock);
result = cleanup_journal_tail(journal);
if (result < 0)
return result;
-
return 0;
}
@@ -456,52 +472,91 @@ int cleanup_journal_tail(journal_t *journal)
/* Checkpoint list management */
/*
+ * journal_clean_one_cp_list
+ *
+ * Find all the written-back checkpoint buffers in the given list and release them.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ * Returns number of bufers reaped (for debug)
+ */
+
+static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+{
+ struct journal_head *last_jh;
+ struct journal_head *next_jh = jh;
+ int ret, freed = 0;
+
+ *released = 0;
+ if (!jh)
+ return 0;
+
+ last_jh = jh->b_cpprev;
+ do {
+ jh = next_jh;
+ next_jh = jh->b_cpnext;
+ /* Use trylock because of the ranking */
+ if (jbd_trylock_bh_state(jh2bh(jh))) {
+ ret = __try_to_free_cp_buf(jh);
+ if (ret) {
+ freed++;
+ if (ret == 2) {
+ *released = 1;
+ return freed;
+ }
+ }
+ }
+ /*
+ * This function only frees up some memory if possible so we
+ * dont have an obligation to finish processing. Bail out if
+ * preemption requested:
+ */
+ if (need_resched())
+ return freed;
+ } while (jh != last_jh);
+
+ return freed;
+}
+
+/*
* journal_clean_checkpoint_list
*
* Find all the written-back checkpoint buffers in the journal and release them.
*
* Called with the journal locked.
* Called with j_list_lock held.
- * Returns number of bufers reaped (for debug)
+ * Returns number of buffers reaped (for debug)
*/
int __journal_clean_checkpoint_list(journal_t *journal)
{
transaction_t *transaction, *last_transaction, *next_transaction;
- int ret = 0;
+ int ret = 0, released;
transaction = journal->j_checkpoint_transactions;
- if (transaction == 0)
+ if (!transaction)
goto out;
last_transaction = transaction->t_cpprev;
next_transaction = transaction;
do {
- struct journal_head *jh;
-
transaction = next_transaction;
next_transaction = transaction->t_cpnext;
- jh = transaction->t_checkpoint_list;
- if (jh) {
- struct journal_head *last_jh = jh->b_cpprev;
- struct journal_head *next_jh = jh;
-
- do {
- jh = next_jh;
- next_jh = jh->b_cpnext;
- /* Use trylock because of the ranknig */
- if (jbd_trylock_bh_state(jh2bh(jh)))
- ret += __try_to_free_cp_buf(jh);
- /*
- * This function only frees up some memory
- * if possible so we dont have an obligation
- * to finish processing. Bail out if preemption
- * requested:
- */
- if (need_resched())
- goto out;
- } while (jh != last_jh);
- }
+ ret += journal_clean_one_cp_list(transaction->
+ t_checkpoint_list, &released);
+ if (need_resched())
+ goto out;
+ if (released)
+ continue;
+ /*
+ * It is essential that we are as careful as in the case of
+ * t_checkpoint_list with removing the buffer from the list as
+ * we can possibly see not yet submitted buffers on io_list
+ */
+ ret += journal_clean_one_cp_list(transaction->
+ t_checkpoint_io_list, &released);
+ if (need_resched())
+ goto out;
} while (transaction != last_transaction);
out:
return ret;
@@ -516,18 +571,22 @@ out:
* buffer updates committed in that transaction have safely been stored
* elsewhere on disk. To achieve this, all of the buffers in a
* transaction need to be maintained on the transaction's checkpoint
- * list until they have been rewritten, at which point this function is
+ * lists until they have been rewritten, at which point this function is
* called to remove the buffer from the existing transaction's
- * checkpoint list.
+ * checkpoint lists.
+ *
+ * The function returns 1 if it frees the transaction, 0 otherwise.
*
* This function is called with the journal locked.
* This function is called with j_list_lock held.
+ * This function is called with jbd_lock_bh_state(jh2bh(jh))
*/
-void __journal_remove_checkpoint(struct journal_head *jh)
+int __journal_remove_checkpoint(struct journal_head *jh)
{
transaction_t *transaction;
journal_t *journal;
+ int ret = 0;
JBUFFER_TRACE(jh, "entry");
@@ -538,8 +597,10 @@ void __journal_remove_checkpoint(struct journal_head *jh)
journal = transaction->t_journal;
__buffer_unlink(jh);
+ jh->b_cp_transaction = NULL;
- if (transaction->t_checkpoint_list != NULL)
+ if (transaction->t_checkpoint_list != NULL ||
+ transaction->t_checkpoint_io_list != NULL)
goto out;
JBUFFER_TRACE(jh, "transaction has no more buffers");
@@ -565,8 +626,10 @@ void __journal_remove_checkpoint(struct journal_head *jh)
/* Just in case anybody was waiting for more transactions to be
checkpointed... */
wake_up(&journal->j_wait_logspace);
+ ret = 1;
out:
JBUFFER_TRACE(jh, "exit");
+ return ret;
}
/*
@@ -628,6 +691,7 @@ void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
J_ASSERT(transaction->t_shadow_list == NULL);
J_ASSERT(transaction->t_log_list == NULL);
J_ASSERT(transaction->t_checkpoint_list == NULL);
+ J_ASSERT(transaction->t_checkpoint_io_list == NULL);
J_ASSERT(transaction->t_updates == 0);
J_ASSERT(journal->j_committing_transaction != transaction);
J_ASSERT(journal->j_running_transaction != transaction);
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index 3dcc6d2162c..fc3855a1aef 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -757,7 +757,7 @@ jffs_do_readpage_nolock(struct file *file, struct page *page)
read_len = 0;
result = 0;
- offset = page->index << PAGE_CACHE_SHIFT;
+ offset = page_offset(page);
kmap(page);
buf = page_address(page);
@@ -1415,7 +1415,7 @@ jffs_file_write(struct file *filp, const char *buf, size_t count,
* This will never trigger with sane page sizes. leave it in
* anyway, since I'm thinking about how to merge larger writes
* (the current idea is to poke a thread that does the actual
- * I/O and starts by doing a down(&inode->i_sem). then we
+ * I/O and starts by doing a mutex_lock(&inode->i_mutex). then we
* would need to get the page cache pages and have a list of
* I/O requests and do write-merging here.
* -- prumpf
@@ -1545,7 +1545,7 @@ jffs_commit_write(struct file *filp, struct page *page,
{
void *addr = page_address(page) + from;
/* XXX: PAGE_CACHE_SHIFT or PAGE_SHIFT */
- loff_t pos = (page->index<<PAGE_CACHE_SHIFT) + from;
+ loff_t pos = page_offset(page) + from;
return jffs_file_write(filp, addr, to-from, &pos);
} /* jffs_commit_write() */
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index d0fcc5f3497..09e5d10b884 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 68000a50ceb..2967b739341 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -302,8 +302,7 @@ int dbSync(struct inode *ipbmap)
/*
* write out dirty pages of bmap
*/
- filemap_fdatawrite(ipbmap->i_mapping);
- filemap_fdatawait(ipbmap->i_mapping);
+ filemap_write_and_wait(ipbmap->i_mapping);
diWriteSpecial(ipbmap, 0);
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 28201b194f5..31b4aa13dd4 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -265,8 +265,7 @@ int diSync(struct inode *ipimap)
/*
* write out dirty pages of imap
*/
- filemap_fdatawrite(ipimap->i_mapping);
- filemap_fdatawait(ipimap->i_mapping);
+ filemap_write_and_wait(ipimap->i_mapping);
diWriteSpecial(ipimap, 0);
@@ -565,8 +564,7 @@ void diFreeSpecial(struct inode *ip)
jfs_err("diFreeSpecial called with NULL ip!");
return;
}
- filemap_fdatawrite(ip->i_mapping);
- filemap_fdatawait(ip->i_mapping);
+ filemap_write_and_wait(ip->i_mapping);
truncate_inode_pages(ip->i_mapping, 0);
iput(ip);
}
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index c0fd7b3eadc..dc21a5bd54d 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -58,7 +58,7 @@ struct jfs_inode_info {
/*
* rdwrlock serializes xtree between reads & writes and synchronizes
* changes to special inodes. It's use would be redundant on
- * directories since the i_sem taken in the VFS is sufficient.
+ * directories since the i_mutex taken in the VFS is sufficient.
*/
struct rw_semaphore rdwrlock;
/*
@@ -68,7 +68,7 @@ struct jfs_inode_info {
* inode is blocked in txBegin or TxBeginAnon
*/
struct semaphore commit_sem;
- /* xattr_sem allows us to access the xattrs without taking i_sem */
+ /* xattr_sem allows us to access the xattrs without taking i_mutex */
struct rw_semaphore xattr_sem;
lid_t xtlid; /* lid of xtree lock on directory */
#ifdef CONFIG_JFS_POSIX_ACL
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index b660c93c92d..2ddb6b892bc 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -1231,10 +1231,8 @@ int txCommit(tid_t tid, /* transaction identifier */
* when we don't need to worry about it at all.
*
* if ((!S_ISDIR(ip->i_mode))
- * && (tblk->flag & COMMIT_DELETE) == 0) {
- * filemap_fdatawrite(ip->i_mapping);
- * filemap_fdatawait(ip->i_mapping);
- * }
+ * && (tblk->flag & COMMIT_DELETE) == 0)
+ * filemap_write_and_wait(ip->i_mapping);
*/
/*
diff --git a/fs/jfs/jfs_umount.c b/fs/jfs/jfs_umount.c
index 5cf91785b54..21eaf7ac0fc 100644
--- a/fs/jfs/jfs_umount.c
+++ b/fs/jfs/jfs_umount.c
@@ -108,8 +108,7 @@ int jfs_umount(struct super_block *sb)
* Make sure all metadata makes it to disk before we mark
* the superblock as clean
*/
- filemap_fdatawrite(sbi->direct_inode->i_mapping);
- filemap_fdatawait(sbi->direct_inode->i_mapping);
+ filemap_write_and_wait(sbi->direct_inode->i_mapping);
/*
* ensure all file system file pages are propagated to their
@@ -161,8 +160,7 @@ int jfs_umount_rw(struct super_block *sb)
* mark the superblock clean before everything is flushed to
* disk.
*/
- filemap_fdatawrite(sbi->direct_inode->i_mapping);
- filemap_fdatawait(sbi->direct_inode->i_mapping);
+ filemap_write_and_wait(sbi->direct_inode->i_mapping);
updateSuper(sb, FM_CLEAN);
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index c6dc254d325..45180361871 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -376,8 +376,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
* by txCommit();
*/
filemap_fdatawait(ipbmap->i_mapping);
- filemap_fdatawrite(ipbmap->i_mapping);
- filemap_fdatawait(ipbmap->i_mapping);
+ filemap_write_and_wait(ipbmap->i_mapping);
diWriteSpecial(ipbmap, 0);
newPage = nPages; /* first new page number */
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 4226af3ea91..8d31f133643 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -502,8 +502,7 @@ out_no_rw:
jfs_err("jfs_umount failed with return code %d", rc);
}
out_mount_failed:
- filemap_fdatawrite(sbi->direct_inode->i_mapping);
- filemap_fdatawait(sbi->direct_inode->i_mapping);
+ filemap_write_and_wait(sbi->direct_inode->i_mapping);
truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
make_bad_inode(sbi->direct_inode);
iput(sbi->direct_inode);
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 23aa5066b5a..f23048f9471 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -17,6 +17,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
@@ -83,21 +84,6 @@ struct ea_buffer {
#define EA_NEW 0x0004
#define EA_MALLOC 0x0008
-/* Namespaces */
-#define XATTR_SYSTEM_PREFIX "system."
-#define XATTR_SYSTEM_PREFIX_LEN (sizeof (XATTR_SYSTEM_PREFIX) - 1)
-
-#define XATTR_USER_PREFIX "user."
-#define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
-
-#define XATTR_OS2_PREFIX "os2."
-#define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1)
-
-/* XATTR_SECURITY_PREFIX is defined in include/linux/xattr.h */
-#define XATTR_SECURITY_PREFIX_LEN (sizeof (XATTR_SECURITY_PREFIX) - 1)
-
-#define XATTR_TRUSTED_PREFIX "trusted."
-#define XATTR_TRUSTED_PREFIX_LEN (sizeof (XATTR_TRUSTED_PREFIX) - 1)
/*
* These three routines are used to recognize on-disk extended attributes
@@ -773,36 +759,23 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
static int can_set_xattr(struct inode *inode, const char *name,
const void *value, size_t value_len)
{
- if (IS_RDONLY(inode))
- return -EROFS;
-
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return -EPERM;
-
- if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0)
- /*
- * "system.*"
- */
+ if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return can_set_system_xattr(inode, name, value, value_len);
- if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0)
- return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
-
-#ifdef CONFIG_JFS_SECURITY
- if (strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)
- == 0)
- return 0; /* Leave it to the security module */
-#endif
-
- if((strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) != 0) &&
- (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) != 0))
+ /*
+ * Don't allow setting an attribute in an unknown namespace.
+ */
+ if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) &&
+ strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
+ strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+ strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))
return -EOPNOTSUPP;
if (!S_ISREG(inode->i_mode) &&
(!S_ISDIR(inode->i_mode) || inode->i_mode &S_ISVTX))
return -EPERM;
- return permission(inode, MAY_WRITE, NULL);
+ return 0;
}
int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name,
@@ -972,22 +945,6 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
return rc;
}
-static int can_get_xattr(struct inode *inode, const char *name)
-{
-#ifdef CONFIG_JFS_SECURITY
- if(strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0)
- return 0;
-#endif
-
- if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0)
- return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
-
- if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0)
- return 0;
-
- return permission(inode, MAY_READ, NULL);
-}
-
ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
size_t buf_size)
{
@@ -998,12 +955,8 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
ssize_t size;
int namelen = strlen(name);
char *os2name = NULL;
- int rc;
char *value;
- if ((rc = can_get_xattr(inode, name)))
- return rc;
-
if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) {
os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1,
GFP_KERNEL);
diff --git a/fs/libfs.c b/fs/libfs.c
index 58101dff2c6..63c020e6589 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -74,7 +74,7 @@ int dcache_dir_close(struct inode *inode, struct file *file)
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
{
- down(&file->f_dentry->d_inode->i_sem);
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (origin) {
case 1:
offset += file->f_pos;
@@ -82,7 +82,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
if (offset >= 0)
break;
default:
- up(&file->f_dentry->d_inode->i_sem);
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return -EINVAL;
}
if (offset != file->f_pos) {
@@ -93,20 +93,20 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
loff_t n = file->f_pos - 2;
spin_lock(&dcache_lock);
- list_del(&cursor->d_child);
+ list_del(&cursor->d_u.d_child);
p = file->f_dentry->d_subdirs.next;
while (n && p != &file->f_dentry->d_subdirs) {
struct dentry *next;
- next = list_entry(p, struct dentry, d_child);
+ next = list_entry(p, struct dentry, d_u.d_child);
if (!d_unhashed(next) && next->d_inode)
n--;
p = p->next;
}
- list_add_tail(&cursor->d_child, p);
+ list_add_tail(&cursor->d_u.d_child, p);
spin_unlock(&dcache_lock);
}
}
- up(&file->f_dentry->d_inode->i_sem);
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return offset;
}
@@ -126,7 +126,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
struct dentry *dentry = filp->f_dentry;
struct dentry *cursor = filp->private_data;
- struct list_head *p, *q = &cursor->d_child;
+ struct list_head *p, *q = &cursor->d_u.d_child;
ino_t ino;
int i = filp->f_pos;
@@ -153,7 +153,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
}
for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
struct dentry *next;
- next = list_entry(p, struct dentry, d_child);
+ next = list_entry(p, struct dentry, d_u.d_child);
if (d_unhashed(next) || !next->d_inode)
continue;
@@ -261,7 +261,7 @@ int simple_empty(struct dentry *dentry)
int ret = 0;
spin_lock(&dcache_lock);
- list_for_each_entry(child, &dentry->d_subdirs, d_child)
+ list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child)
if (simple_positive(child))
goto out;
ret = 1;
@@ -356,7 +356,7 @@ int simple_commit_write(struct file *file, struct page *page,
/*
* No need to use i_size_read() here, the i_size
- * cannot change under us because we hold the i_sem.
+ * cannot change under us because we hold the i_mutex.
*/
if (pos > inode->i_size)
i_size_write(inode, pos);
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index c5a33648e9f..14552403957 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -26,11 +26,12 @@
static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
-static void nlmclnt_unlock_callback(struct rpc_task *);
-static void nlmclnt_cancel_callback(struct rpc_task *);
static int nlm_stat_to_errno(u32 stat);
static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
+static const struct rpc_call_ops nlmclnt_unlock_ops;
+static const struct rpc_call_ops nlmclnt_cancel_ops;
+
/*
* Cookie counter for NLM requests
*/
@@ -221,8 +222,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
goto done;
}
clnt->cl_softrtry = nfssrv->client->cl_softrtry;
- clnt->cl_intr = nfssrv->client->cl_intr;
- clnt->cl_chatty = nfssrv->client->cl_chatty;
+ clnt->cl_intr = nfssrv->client->cl_intr;
}
/* Keep the old signal mask */
@@ -399,8 +399,7 @@ in_grace_period:
/*
* Generic NLM call, async version.
*/
-int
-nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
+int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
@@ -419,13 +418,12 @@ nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
msg.rpc_proc = &clnt->cl_procinfo[proc];
/* bootstrap and kick off the async RPC call */
- status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
+ status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
return status;
}
-static int
-nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
+static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
@@ -448,7 +446,7 @@ nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
/* Increment host refcount */
nlm_get_host(host);
/* bootstrap and kick off the async RPC call */
- status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
+ status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
if (status < 0)
nlm_release_host(host);
return status;
@@ -664,7 +662,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
if (req->a_flags & RPC_TASK_ASYNC) {
status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
- nlmclnt_unlock_callback);
+ &nlmclnt_unlock_ops);
/* Hrmf... Do the unlock early since locks_remove_posix()
* really expects us to free the lock synchronously */
do_vfs_lock(fl);
@@ -692,10 +690,9 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
return -ENOLCK;
}
-static void
-nlmclnt_unlock_callback(struct rpc_task *task)
+static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
{
- struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata;
+ struct nlm_rqst *req = data;
int status = req->a_res.status;
if (RPC_ASSASSINATED(task))
@@ -722,6 +719,10 @@ die:
rpc_restart_call(task);
}
+static const struct rpc_call_ops nlmclnt_unlock_ops = {
+ .rpc_call_done = nlmclnt_unlock_callback,
+};
+
/*
* Cancel a blocked lock request.
* We always use an async RPC call for this in order not to hang a
@@ -750,8 +751,7 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
nlmclnt_setlockargs(req, fl);
- status = nlmclnt_async_call(req, NLMPROC_CANCEL,
- nlmclnt_cancel_callback);
+ status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
if (status < 0) {
nlmclnt_release_lockargs(req);
kfree(req);
@@ -765,10 +765,9 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
return status;
}
-static void
-nlmclnt_cancel_callback(struct rpc_task *task)
+static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
{
- struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata;
+ struct nlm_rqst *req = data;
if (RPC_ASSASSINATED(task))
goto die;
@@ -807,6 +806,10 @@ retry_cancel:
rpc_delay(task, 30 * HZ);
}
+static const struct rpc_call_ops nlmclnt_cancel_ops = {
+ .rpc_call_done = nlmclnt_cancel_callback,
+};
+
/*
* Convert an NLM status code to a generic kernel errno
*/
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index c4c8601096e..82f7a0b1d8a 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -177,7 +177,7 @@ nlm_bind_host(struct nlm_host *host)
if ((clnt = host->h_rpcclnt) != NULL) {
xprt = clnt->cl_xprt;
if (time_after_eq(jiffies, host->h_nextrebind)) {
- clnt->cl_port = 0;
+ rpc_force_rebind(clnt);
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
dprintk("lockd: next rebind in %ld jiffies\n",
host->h_nextrebind - jiffies);
@@ -217,7 +217,7 @@ nlm_rebind_host(struct nlm_host *host)
{
dprintk("lockd: rebind host %s\n", host->h_name);
if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
- host->h_rpcclnt->cl_port = 0;
+ rpc_force_rebind(host->h_rpcclnt);
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
}
}
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 2d144abe84a..0edc03e6796 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -123,7 +123,6 @@ nsm_create(void)
if (IS_ERR(clnt))
goto out_err;
clnt->cl_softrtry = 1;
- clnt->cl_chatty = 1;
clnt->cl_oneshot = 1;
return clnt;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 12a857c29e2..71a30b416d1 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -178,6 +178,8 @@ lockd(struct svc_rqst *rqstp)
}
+ flush_signals(current);
+
/*
* Check whether there's a new lockd process before
* shutting down the hosts and clearing the slot.
@@ -192,8 +194,6 @@ lockd(struct svc_rqst *rqstp)
"lockd: new process, skipping host shutdown\n");
wake_up(&lockd_exit);
- flush_signals(current);
-
/* Exit the RPC thread */
svc_exit_thread(rqstp);
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 489670e2176..4063095d849 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -22,7 +22,8 @@
#define NLMDBG_FACILITY NLMDBG_CLIENT
static u32 nlm4svc_callback(struct svc_rqst *, u32, struct nlm_res *);
-static void nlm4svc_callback_exit(struct rpc_task *);
+
+static const struct rpc_call_ops nlm4svc_callback_ops;
/*
* Obtain client and file from arguments
@@ -470,7 +471,6 @@ nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
}
-
/*
* This is the generic lockd callback for async RPC calls
*/
@@ -494,7 +494,7 @@ nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
call->a_host = host;
memcpy(&call->a_args, resp, sizeof(*resp));
- if (nlmsvc_async_call(call, proc, nlm4svc_callback_exit) < 0)
+ if (nlmsvc_async_call(call, proc, &nlm4svc_callback_ops) < 0)
goto error;
return rpc_success;
@@ -504,10 +504,9 @@ nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
return rpc_system_err;
}
-static void
-nlm4svc_callback_exit(struct rpc_task *task)
+static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
{
- struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
+ struct nlm_rqst *call = data;
if (task->tk_status < 0) {
dprintk("lockd: %4d callback failed (errno = %d)\n",
@@ -517,6 +516,10 @@ nlm4svc_callback_exit(struct rpc_task *task)
kfree(call);
}
+static const struct rpc_call_ops nlm4svc_callback_ops = {
+ .rpc_call_done = nlm4svc_callback_exit,
+};
+
/*
* NLM Server procedures.
*/
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 49f959796b6..9cfced65d4a 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -41,7 +41,8 @@
static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
static int nlmsvc_remove_block(struct nlm_block *block);
-static void nlmsvc_grant_callback(struct rpc_task *task);
+
+static const struct rpc_call_ops nlmsvc_grant_ops;
/*
* The list of blocked locks to retry
@@ -226,31 +227,27 @@ failed:
* It is the caller's responsibility to check whether the file
* can be closed hereafter.
*/
-static void
+static int
nlmsvc_delete_block(struct nlm_block *block, int unlock)
{
struct file_lock *fl = &block->b_call.a_args.lock.fl;
struct nlm_file *file = block->b_file;
struct nlm_block **bp;
+ int status = 0;
dprintk("lockd: deleting block %p...\n", block);
/* Remove block from list */
nlmsvc_remove_block(block);
- if (fl->fl_next)
- posix_unblock_lock(file->f_file, fl);
- if (unlock) {
- fl->fl_type = F_UNLCK;
- posix_lock_file(file->f_file, fl);
- block->b_granted = 0;
- }
+ if (unlock)
+ status = posix_unblock_lock(file->f_file, fl);
/* If the block is in the middle of a GRANT callback,
* don't kill it yet. */
if (block->b_incall) {
nlmsvc_insert_block(block, NLM_NEVER);
block->b_done = 1;
- return;
+ return status;
}
/* Remove block from file's list of blocks */
@@ -265,6 +262,7 @@ nlmsvc_delete_block(struct nlm_block *block, int unlock)
nlm_release_host(block->b_host);
nlmclnt_freegrantargs(&block->b_call);
kfree(block);
+ return status;
}
/*
@@ -275,6 +273,7 @@ int
nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, int action)
{
struct nlm_block *block, *next;
+ /* XXX: Will everything get cleaned up if we don't unlock here? */
down(&file->f_sema);
for (block = file->f_blocks; block; block = next) {
@@ -444,6 +443,7 @@ u32
nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
{
struct nlm_block *block;
+ int status = 0;
dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
file->f_file->f_dentry->d_inode->i_sb->s_id,
@@ -454,9 +454,9 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
down(&file->f_sema);
if ((block = nlmsvc_lookup_block(file, lock, 1)) != NULL)
- nlmsvc_delete_block(block, 1);
+ status = nlmsvc_delete_block(block, 1);
up(&file->f_sema);
- return nlm_granted;
+ return status ? nlm_lck_denied : nlm_granted;
}
/*
@@ -562,7 +562,7 @@ callback:
/* Call the client */
nlm_get_host(block->b_call.a_host);
if (nlmsvc_async_call(&block->b_call, NLMPROC_GRANTED_MSG,
- nlmsvc_grant_callback) < 0)
+ &nlmsvc_grant_ops) < 0)
nlm_release_host(block->b_call.a_host);
up(&file->f_sema);
}
@@ -575,10 +575,9 @@ callback:
* chain once more in order to have it removed by lockd itself (which can
* then sleep on the file semaphore without disrupting e.g. the nfs client).
*/
-static void
-nlmsvc_grant_callback(struct rpc_task *task)
+static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
{
- struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
+ struct nlm_rqst *call = data;
struct nlm_block *block;
unsigned long timeout;
struct sockaddr_in *peer_addr = RPC_PEERADDR(task->tk_client);
@@ -614,6 +613,10 @@ nlmsvc_grant_callback(struct rpc_task *task)
nlm_release_host(call->a_host);
}
+static const struct rpc_call_ops nlmsvc_grant_ops = {
+ .rpc_call_done = nlmsvc_grant_callback,
+};
+
/*
* We received a GRANT_RES callback. Try to find the corresponding
* block.
@@ -633,11 +636,12 @@ nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status
file->f_count++;
down(&file->f_sema);
- if ((block = nlmsvc_find_block(cookie,&rqstp->rq_addr)) != NULL) {
+ block = nlmsvc_find_block(cookie, &rqstp->rq_addr);
+ if (block) {
if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
/* Try again in a couple of seconds */
nlmsvc_insert_block(block, 10 * HZ);
- block = NULL;
+ up(&file->f_sema);
} else {
/* Lock is now held by client, or has been rejected.
* In both cases, the block should be removed. */
@@ -648,8 +652,6 @@ nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status
nlmsvc_delete_block(block, 1);
}
}
- if (!block)
- up(&file->f_sema);
nlm_release_file(file);
}
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 757e344cf20..3bc437e0cf5 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -23,7 +23,8 @@
#define NLMDBG_FACILITY NLMDBG_CLIENT
static u32 nlmsvc_callback(struct svc_rqst *, u32, struct nlm_res *);
-static void nlmsvc_callback_exit(struct rpc_task *);
+
+static const struct rpc_call_ops nlmsvc_callback_ops;
#ifdef CONFIG_LOCKD_V4
static u32
@@ -518,7 +519,7 @@ nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
call->a_host = host;
memcpy(&call->a_args, resp, sizeof(*resp));
- if (nlmsvc_async_call(call, proc, nlmsvc_callback_exit) < 0)
+ if (nlmsvc_async_call(call, proc, &nlmsvc_callback_ops) < 0)
goto error;
return rpc_success;
@@ -528,10 +529,9 @@ nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
return rpc_system_err;
}
-static void
-nlmsvc_callback_exit(struct rpc_task *task)
+static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
{
- struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
+ struct nlm_rqst *call = data;
if (task->tk_status < 0) {
dprintk("lockd: %4d callback failed (errno = %d)\n",
@@ -541,6 +541,10 @@ nlmsvc_callback_exit(struct rpc_task *task)
kfree(call);
}
+static const struct rpc_call_ops nlmsvc_callback_ops = {
+ .rpc_call_done = nlmsvc_callback_exit,
+};
+
/*
* NLM Server procedures.
*/
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index ae4d6b426c6..fdcf105a530 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -354,7 +354,9 @@ nlm4svc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp)
return 0;
argp->state = ntohl(*p++);
/* Preserve the address in network byte order */
- argp->addr = *p++;
+ argp->addr = *p++;
+ argp->vers = *p++;
+ argp->proto = *p++;
return xdr_argsize_check(rqstp, p);
}
diff --git a/fs/locks.c b/fs/locks.c
index 250ef53d25e..909eab8fb1d 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -154,7 +154,7 @@ static struct file_lock *locks_alloc_lock(void)
}
/* Free a lock which is not in use. */
-static inline void locks_free_lock(struct file_lock *fl)
+static void locks_free_lock(struct file_lock *fl)
{
if (fl == NULL) {
BUG();
@@ -475,8 +475,7 @@ static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
/*
* Check whether two locks have the same owner.
*/
-static inline int
-posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
+static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
{
if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
return fl2->fl_lmops == fl1->fl_lmops &&
@@ -487,7 +486,7 @@ posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
/* Remove waiter from blocker's block list.
* When blocker ends up pointing to itself then the list is empty.
*/
-static inline void __locks_delete_block(struct file_lock *waiter)
+static void __locks_delete_block(struct file_lock *waiter)
{
list_del_init(&waiter->fl_block);
list_del_init(&waiter->fl_link);
@@ -1958,22 +1957,18 @@ EXPORT_SYMBOL(posix_block_lock);
*
* lockd needs to block waiting for locks.
*/
-void
+int
posix_unblock_lock(struct file *filp, struct file_lock *waiter)
{
- /*
- * A remote machine may cancel the lock request after it's been
- * granted locally. If that happens, we need to delete the lock.
- */
+ int status = 0;
+
lock_kernel();
- if (waiter->fl_next) {
+ if (waiter->fl_next)
__locks_delete_block(waiter);
- unlock_kernel();
- } else {
- unlock_kernel();
- waiter->fl_type = F_UNLCK;
- posix_lock_file(filp, waiter);
- }
+ else
+ status = -ENOENT;
+ unlock_kernel();
+ return status;
}
EXPORT_SYMBOL(posix_unblock_lock);
diff --git a/fs/mpage.c b/fs/mpage.c
index c5adcdddf3c..e431cb3878d 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -184,7 +184,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
if (page_has_buffers(page))
goto confused;
- block_in_file = page->index << (PAGE_CACHE_SHIFT - blkbits);
+ block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
last_block = (i_size_read(inode) + blocksize - 1) >> blkbits;
bh.b_page = page;
@@ -466,7 +466,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
* The page has no buffers: map it to disk
*/
BUG_ON(!PageUptodate(page));
- block_in_file = page->index << (PAGE_CACHE_SHIFT - blkbits);
+ block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
last_block = (i_size - 1) >> blkbits;
map_bh.b_page = page;
for (page_block = 0; page_block < blocks_per_page; ) {
@@ -721,7 +721,7 @@ retry:
&last_block_in_bio, &ret, wbc,
page->mapping->a_ops->writepage);
}
- if (unlikely(ret == WRITEPAGE_ACTIVATE))
+ if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
unlock_page(page);
if (ret || (--(wbc->nr_to_write) <= 0))
done = 1;
diff --git a/fs/namei.c b/fs/namei.c
index 6dbbd42d8b9..1e5746eb138 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -28,6 +28,7 @@
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/audit.h>
+#include <linux/capability.h>
#include <linux/file.h>
#include <asm/namei.h>
#include <asm/uaccess.h>
@@ -438,7 +439,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
struct dentry * result;
struct inode *dir = parent->d_inode;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
/*
* First re-do the cached lookup just in case it was created
* while we waited for the directory semaphore..
@@ -464,7 +465,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
else
result = dentry;
}
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
return result;
}
@@ -472,7 +473,7 @@ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, s
* Uhhuh! Nasty case: the cache was re-populated while
* we waited on the semaphore. Need to revalidate.
*/
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
if (result->d_op && result->d_op->d_revalidate) {
if (!result->d_op->d_revalidate(result, nd) && !d_invalidate(result)) {
dput(result);
@@ -1366,7 +1367,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
struct dentry *p;
if (p1 == p2) {
- down(&p1->d_inode->i_sem);
+ mutex_lock(&p1->d_inode->i_mutex);
return NULL;
}
@@ -1374,30 +1375,30 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
for (p = p1; p->d_parent != p; p = p->d_parent) {
if (p->d_parent == p2) {
- down(&p2->d_inode->i_sem);
- down(&p1->d_inode->i_sem);
+ mutex_lock(&p2->d_inode->i_mutex);
+ mutex_lock(&p1->d_inode->i_mutex);
return p;
}
}
for (p = p2; p->d_parent != p; p = p->d_parent) {
if (p->d_parent == p1) {
- down(&p1->d_inode->i_sem);
- down(&p2->d_inode->i_sem);
+ mutex_lock(&p1->d_inode->i_mutex);
+ mutex_lock(&p2->d_inode->i_mutex);
return p;
}
}
- down(&p1->d_inode->i_sem);
- down(&p2->d_inode->i_sem);
+ mutex_lock(&p1->d_inode->i_mutex);
+ mutex_lock(&p2->d_inode->i_mutex);
return NULL;
}
void unlock_rename(struct dentry *p1, struct dentry *p2)
{
- up(&p1->d_inode->i_sem);
+ mutex_unlock(&p1->d_inode->i_mutex);
if (p1 != p2) {
- up(&p2->d_inode->i_sem);
+ mutex_unlock(&p2->d_inode->i_mutex);
up(&p1->d_inode->i_sb->s_vfs_rename_sem);
}
}
@@ -1491,7 +1492,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
if (!error) {
DQUOT_INIT(inode);
- error = do_truncate(dentry, 0, NULL);
+ error = do_truncate(dentry, 0, ATTR_MTIME|ATTR_CTIME, NULL);
}
put_write_access(inode);
if (error)
@@ -1563,14 +1564,14 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd)
dir = nd->dentry;
nd->flags &= ~LOOKUP_PARENT;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
path.dentry = lookup_hash(nd);
path.mnt = nd->mnt;
do_last:
error = PTR_ERR(path.dentry);
if (IS_ERR(path.dentry)) {
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
goto exit;
}
@@ -1579,7 +1580,7 @@ do_last:
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current->fs->umask;
error = vfs_create(dir->d_inode, path.dentry, mode, nd);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
dput(nd->dentry);
nd->dentry = path.dentry;
if (error)
@@ -1593,7 +1594,7 @@ do_last:
/*
* It already exists.
*/
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
error = -EEXIST;
if (flag & O_EXCL)
@@ -1665,7 +1666,7 @@ do_link:
goto exit;
}
dir = nd->dentry;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
path.dentry = lookup_hash(nd);
path.mnt = nd->mnt;
__putname(nd->last.name);
@@ -1680,13 +1681,13 @@ do_link:
* Simple function to lookup and return a dentry and create it
* if it doesn't exist. Is SMP-safe.
*
- * Returns with nd->dentry->d_inode->i_sem locked.
+ * Returns with nd->dentry->d_inode->i_mutex locked.
*/
struct dentry *lookup_create(struct nameidata *nd, int is_dir)
{
struct dentry *dentry = ERR_PTR(-EEXIST);
- down(&nd->dentry->d_inode->i_sem);
+ mutex_lock(&nd->dentry->d_inode->i_mutex);
/*
* Yucky last component or no last component at all?
* (foo/., foo/.., /////)
@@ -1784,7 +1785,7 @@ asmlinkage long sys_mknod(const char __user * filename, int mode, unsigned dev)
}
dput(dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
path_release(&nd);
out:
putname(tmp);
@@ -1836,7 +1837,7 @@ asmlinkage long sys_mkdir(const char __user * pathname, int mode)
error = vfs_mkdir(nd.dentry->d_inode, dentry, mode);
dput(dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
path_release(&nd);
out:
putname(tmp);
@@ -1885,7 +1886,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
DQUOT_INIT(dir);
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
dentry_unhash(dentry);
if (d_mountpoint(dentry))
error = -EBUSY;
@@ -1897,7 +1898,7 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
dentry->d_inode->i_flags |= S_DEAD;
}
}
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
if (!error) {
d_delete(dentry);
}
@@ -1932,14 +1933,14 @@ asmlinkage long sys_rmdir(const char __user * pathname)
error = -EBUSY;
goto exit1;
}
- down(&nd.dentry->d_inode->i_sem);
+ mutex_lock(&nd.dentry->d_inode->i_mutex);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
error = vfs_rmdir(nd.dentry->d_inode, dentry);
dput(dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
exit1:
path_release(&nd);
exit:
@@ -1959,7 +1960,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
DQUOT_INIT(dir);
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
if (d_mountpoint(dentry))
error = -EBUSY;
else {
@@ -1967,7 +1968,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
if (!error)
error = dir->i_op->unlink(dir, dentry);
}
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
@@ -1979,7 +1980,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
/*
* Make sure that the actual truncation of the file will occur outside its
- * directory's i_sem. Truncate can take a long time if there is a lot of
+ * directory's i_mutex. Truncate can take a long time if there is a lot of
* writeout happening, and we don't want to prevent access to the directory
* while waiting on the I/O.
*/
@@ -2001,7 +2002,7 @@ asmlinkage long sys_unlink(const char __user * pathname)
error = -EISDIR;
if (nd.last_type != LAST_NORM)
goto exit1;
- down(&nd.dentry->d_inode->i_sem);
+ mutex_lock(&nd.dentry->d_inode->i_mutex);
dentry = lookup_hash(&nd);
error = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
@@ -2015,7 +2016,7 @@ asmlinkage long sys_unlink(const char __user * pathname)
exit2:
dput(dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
if (inode)
iput(inode); /* truncate the inode here */
exit1:
@@ -2075,7 +2076,7 @@ asmlinkage long sys_symlink(const char __user * oldname, const char __user * new
error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO);
dput(dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
path_release(&nd);
out:
putname(to);
@@ -2113,10 +2114,10 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
if (error)
return error;
- down(&old_dentry->d_inode->i_sem);
+ mutex_lock(&old_dentry->d_inode->i_mutex);
DQUOT_INIT(dir);
error = dir->i_op->link(old_dentry, dir, new_dentry);
- up(&old_dentry->d_inode->i_sem);
+ mutex_unlock(&old_dentry->d_inode->i_mutex);
if (!error)
fsnotify_create(dir, new_dentry->d_name.name);
return error;
@@ -2157,7 +2158,7 @@ asmlinkage long sys_link(const char __user * oldname, const char __user * newnam
error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
dput(new_dentry);
}
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
out_release:
path_release(&nd);
out:
@@ -2178,7 +2179,7 @@ exit:
* sb->s_vfs_rename_sem. We might be more accurate, but that's another
* story.
* c) we have to lock _three_ objects - parents and victim (if it exists).
- * And that - after we got ->i_sem on parents (until then we don't know
+ * And that - after we got ->i_mutex on parents (until then we don't know
* whether the target exists). Solution: try to be smart with locking
* order for inodes. We rely on the fact that tree topology may change
* only under ->s_vfs_rename_sem _and_ that parent of the object we
@@ -2195,9 +2196,9 @@ exit:
* stuff into VFS), but the former is not going away. Solution: the same
* trick as in rmdir().
* e) conversion from fhandle to dentry may come in the wrong moment - when
- * we are removing the target. Solution: we will have to grab ->i_sem
+ * we are removing the target. Solution: we will have to grab ->i_mutex
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
- * ->i_sem on parents, which works but leads to some truely excessive
+ * ->i_mutex on parents, which works but leads to some truely excessive
* locking].
*/
static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
@@ -2222,7 +2223,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
target = new_dentry->d_inode;
if (target) {
- down(&target->i_sem);
+ mutex_lock(&target->i_mutex);
dentry_unhash(new_dentry);
}
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
@@ -2232,7 +2233,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
if (target) {
if (!error)
target->i_flags |= S_DEAD;
- up(&target->i_sem);
+ mutex_unlock(&target->i_mutex);
if (d_unhashed(new_dentry))
d_rehash(new_dentry);
dput(new_dentry);
@@ -2255,7 +2256,7 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
dget(new_dentry);
target = new_dentry->d_inode;
if (target)
- down(&target->i_sem);
+ mutex_lock(&target->i_mutex);
if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
error = -EBUSY;
else
@@ -2266,7 +2267,7 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
d_move(old_dentry, new_dentry);
}
if (target)
- up(&target->i_sem);
+ mutex_unlock(&target->i_mutex);
dput(new_dentry);
return error;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 2019899f2ab..8bc15b362d2 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/quotaops.h>
#include <linux/acct.h>
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/namespace.h>
@@ -355,14 +356,14 @@ static int show_vfsmnt(struct seq_file *m, void *v)
{ MS_SYNCHRONOUS, ",sync" },
{ MS_DIRSYNC, ",dirsync" },
{ MS_MANDLOCK, ",mand" },
- { MS_NOATIME, ",noatime" },
- { MS_NODIRATIME, ",nodiratime" },
{ 0, NULL }
};
static struct proc_fs_info mnt_info[] = {
{ MNT_NOSUID, ",nosuid" },
{ MNT_NODEV, ",nodev" },
{ MNT_NOEXEC, ",noexec" },
+ { MNT_NOATIME, ",noatime" },
+ { MNT_NODIRATIME, ",nodiratime" },
{ 0, NULL }
};
struct proc_fs_info *fs_infop;
@@ -451,7 +452,7 @@ EXPORT_SYMBOL(may_umount);
void release_mounts(struct list_head *head)
{
struct vfsmount *mnt;
- while(!list_empty(head)) {
+ while (!list_empty(head)) {
mnt = list_entry(head->next, struct vfsmount, mnt_hash);
list_del_init(&mnt->mnt_hash);
if (mnt->mnt_parent != mnt) {
@@ -814,7 +815,7 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
return -ENOTDIR;
err = -ENOENT;
- down(&nd->dentry->d_inode->i_sem);
+ mutex_lock(&nd->dentry->d_inode->i_mutex);
if (IS_DEADDIR(nd->dentry->d_inode))
goto out_unlock;
@@ -826,7 +827,7 @@ static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry))
err = attach_recursive_mnt(mnt, nd, NULL);
out_unlock:
- up(&nd->dentry->d_inode->i_sem);
+ mutex_unlock(&nd->dentry->d_inode->i_mutex);
if (!err)
security_sb_post_addmount(mnt, nd);
return err;
@@ -962,7 +963,7 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
goto out;
err = -ENOENT;
- down(&nd->dentry->d_inode->i_sem);
+ mutex_lock(&nd->dentry->d_inode->i_mutex);
if (IS_DEADDIR(nd->dentry->d_inode))
goto out1;
@@ -1004,7 +1005,7 @@ static int do_move_mount(struct nameidata *nd, char *old_name)
list_del_init(&old_nd.mnt->mnt_expire);
spin_unlock(&vfsmount_lock);
out1:
- up(&nd->dentry->d_inode->i_sem);
+ mutex_unlock(&nd->dentry->d_inode->i_mutex);
out:
up_write(&namespace_sem);
if (!err)
@@ -1286,7 +1287,13 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
mnt_flags |= MNT_NODEV;
if (flags & MS_NOEXEC)
mnt_flags |= MNT_NOEXEC;
- flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE);
+ if (flags & MS_NOATIME)
+ mnt_flags |= MNT_NOATIME;
+ if (flags & MS_NODIRATIME)
+ mnt_flags |= MNT_NODIRATIME;
+
+ flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
+ MS_NOATIME | MS_NODIRATIME);
/* ... and get the mountpoint */
retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
@@ -1526,6 +1533,10 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
* pointed to by put_old must yield the same directory as new_root. No other
* file system may be mounted on put_old. After all, new_root is a mountpoint.
*
+ * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
+ * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
+ * in this situation.
+ *
* Notes:
* - we don't move root/cwd if they are not at the root (reason: if something
* cared enough to change them, it's probably wrong to force them elsewhere)
@@ -1569,7 +1580,7 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
user_nd.dentry = dget(current->fs->root);
read_unlock(&current->fs->lock);
down_write(&namespace_sem);
- down(&old_nd.dentry->d_inode->i_sem);
+ mutex_lock(&old_nd.dentry->d_inode->i_mutex);
error = -EINVAL;
if (IS_MNT_SHARED(old_nd.mnt) ||
IS_MNT_SHARED(new_nd.mnt->mnt_parent) ||
@@ -1622,7 +1633,7 @@ asmlinkage long sys_pivot_root(const char __user * new_root,
path_release(&root_parent);
path_release(&parent_nd);
out2:
- up(&old_nd.dentry->d_inode->i_sem);
+ mutex_unlock(&old_nd.dentry->d_inode->i_mutex);
up_write(&namespace_sem);
path_release(&user_nd);
path_release(&old_nd);
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index a9f7a8ab1d5..cfd76f431dc 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -365,7 +365,7 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
spin_lock(&dcache_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
- dent = list_entry(next, struct dentry, d_child);
+ dent = list_entry(next, struct dentry, d_u.d_child);
if ((unsigned long)dent->d_fsdata == fpos) {
if (dent->d_inode)
dget_locked(dent);
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index 4947d9b11fc..973b444d691 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -262,7 +262,7 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
}
vfree(bouncebuffer);
- inode_update_time(inode, 1);
+ file_update_time(file);
*ppos = pos;
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index fd3efdca5ae..d6e0c089e1b 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -10,6 +10,7 @@
#include <linux/config.h>
#include <asm/uaccess.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 9e4dc30c243..799e5c2bec5 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -196,7 +196,7 @@ ncp_renew_dentries(struct dentry *parent)
spin_lock(&dcache_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
- dentry = list_entry(next, struct dentry, d_child);
+ dentry = list_entry(next, struct dentry, d_u.d_child);
if (dentry->d_fsdata == NULL)
ncp_age_dentry(server, dentry);
@@ -218,7 +218,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent)
spin_lock(&dcache_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
- dentry = list_entry(next, struct dentry, d_child);
+ dentry = list_entry(next, struct dentry, d_u.d_child);
dentry->d_fsdata = NULL;
ncp_age_dentry(server, dentry);
next = next->next;
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 8b3bb715d17..ec61fd56a1a 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -13,4 +13,5 @@ nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
delegation.o idmap.o \
callback.o callback_xdr.o callback_proc.o
nfs-$(CONFIG_NFS_DIRECTIO) += direct.o
+nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-objs := $(nfs-y)
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 30cae360286..fcd97406a77 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -34,6 +34,7 @@ static struct nfs_callback_data nfs_callback_info;
static DECLARE_MUTEX(nfs_callback_sema);
static struct svc_program nfs4_callback_program;
+unsigned int nfs_callback_set_tcpport;
unsigned short nfs_callback_tcpport;
/*
@@ -98,7 +99,7 @@ int nfs_callback_up(void)
if (!serv)
goto out_err;
/* FIXME: We don't want to register this socket with the portmapper */
- ret = svc_makesock(serv, IPPROTO_TCP, 0);
+ ret = svc_makesock(serv, IPPROTO_TCP, nfs_callback_set_tcpport);
if (ret < 0)
goto out_destroy;
if (!list_empty(&serv->sv_permsocks)) {
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index a0db2d4f941..b252e7fe53a 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -65,6 +65,7 @@ extern unsigned nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
extern int nfs_callback_up(void);
extern int nfs_callback_down(void);
+extern unsigned int nfs_callback_set_tcpport;
extern unsigned short nfs_callback_tcpport;
#endif /* __LINUX_FS_NFS_CALLBACK_H */
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 65f1e19e4d1..462cfceb50c 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -35,7 +35,9 @@ unsigned nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres
if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
goto out_iput;
res->size = i_size_read(inode);
- res->change_attr = NFS_CHANGE_ATTR(inode);
+ res->change_attr = delegation->change_attr;
+ if (nfsi->npages != 0)
+ res->change_attr++;
res->ctime = inode->i_ctime;
res->mtime = inode->i_mtime;
res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 618a327027b..c6f07c1c71e 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -8,6 +8,7 @@
*/
#include <linux/config.h>
#include <linux/completion.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
@@ -130,6 +131,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
sizeof(delegation->stateid.data));
delegation->type = res->delegation_type;
delegation->maxsize = res->maxsize;
+ delegation->change_attr = nfsi->change_attr;
delegation->cred = get_rpccred(cred);
delegation->inode = inode;
@@ -157,8 +159,6 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
{
int res = 0;
- __nfs_revalidate_inode(NFS_SERVER(inode), inode);
-
res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
nfs_free_delegation(delegation);
return res;
@@ -231,6 +231,49 @@ restart:
spin_unlock(&clp->cl_lock);
}
+int nfs_do_expire_all_delegations(void *ptr)
+{
+ struct nfs4_client *clp = ptr;
+ struct nfs_delegation *delegation;
+ struct inode *inode;
+
+ allow_signal(SIGKILL);
+restart:
+ spin_lock(&clp->cl_lock);
+ if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
+ goto out;
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
+ goto out;
+ list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
+ inode = igrab(delegation->inode);
+ if (inode == NULL)
+ continue;
+ spin_unlock(&clp->cl_lock);
+ nfs_inode_return_delegation(inode);
+ iput(inode);
+ goto restart;
+ }
+out:
+ spin_unlock(&clp->cl_lock);
+ nfs4_put_client(clp);
+ module_put_and_exit(0);
+}
+
+void nfs_expire_all_delegations(struct nfs4_client *clp)
+{
+ struct task_struct *task;
+
+ __module_get(THIS_MODULE);
+ atomic_inc(&clp->cl_count);
+ task = kthread_run(nfs_do_expire_all_delegations, clp,
+ "%u.%u.%u.%u-delegreturn",
+ NIPQUAD(clp->cl_addr));
+ if (!IS_ERR(task))
+ return;
+ nfs4_put_client(clp);
+ module_put(THIS_MODULE);
+}
+
/*
* Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
*/
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 2fcc30de924..7a0b2bfce77 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -21,6 +21,7 @@ struct nfs_delegation {
#define NFS_DELEGATION_NEED_RECLAIM 1
long flags;
loff_t maxsize;
+ __u64 change_attr;
};
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
@@ -30,6 +31,7 @@ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *s
struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle);
void nfs_return_all_delegations(struct super_block *sb);
+void nfs_expire_all_delegations(struct nfs4_client *clp);
void nfs_handle_cb_pathdown(struct nfs4_client *clp);
void nfs_delegation_mark_reclaim(struct nfs4_client *clp);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index c0d1a214572..a1554bead69 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -194,7 +194,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
spin_unlock(&inode->i_lock);
/* Ensure consistent page alignment of the data.
* Note: assumes we have exclusive access to this mapping either
- * through inode->i_sem or some other mechanism.
+ * through inode->i_mutex or some other mechanism.
*/
if (page->index == 0)
invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1);
@@ -573,7 +573,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
{
- down(&filp->f_dentry->d_inode->i_sem);
+ mutex_lock(&filp->f_dentry->d_inode->i_mutex);
switch (origin) {
case 1:
offset += filp->f_pos;
@@ -589,7 +589,7 @@ loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
((struct nfs_open_context *)filp->private_data)->dir_cookie = 0;
}
out:
- up(&filp->f_dentry->d_inode->i_sem);
+ mutex_unlock(&filp->f_dentry->d_inode->i_mutex);
return offset;
}
@@ -1001,7 +1001,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
openflags &= ~(O_CREAT|O_TRUNC);
/*
- * Note: we're not holding inode->i_sem and so may be racing with
+ * Note: we're not holding inode->i_mutex and so may be racing with
* operations that change the directory. We therefore save the
* change attribute *before* we do the RPC call.
*/
@@ -1051,7 +1051,7 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
return dentry;
if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
return NULL;
- /* Note: caller is already holding the dir->i_sem! */
+ /* Note: caller is already holding the dir->i_mutex! */
dentry = d_alloc(parent, &name);
if (dentry == NULL)
return NULL;
@@ -1550,8 +1550,10 @@ go_ahead:
}
nfs_inode_return_delegation(old_inode);
- if (new_inode)
+ if (new_inode != NULL) {
+ nfs_inode_return_delegation(new_inode);
d_delete(new_dentry);
+ }
nfs_begin_data_update(old_dir);
nfs_begin_data_update(new_dir);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 07922881760..10ae377e68f 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -122,9 +122,10 @@ nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
{
int i;
for (i = 0; i < npages; i++) {
- if (do_dirty)
- set_page_dirty_lock(pages[i]);
- page_cache_release(pages[i]);
+ struct page *page = pages[i];
+ if (do_dirty && !PageCompound(page))
+ set_page_dirty_lock(page);
+ page_cache_release(page);
}
kfree(pages);
}
@@ -154,6 +155,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, unsigned int
struct list_head *list;
struct nfs_direct_req *dreq;
unsigned int reads = 0;
+ unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
if (!dreq)
@@ -167,7 +169,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, unsigned int
list = &dreq->list;
for(;;) {
- struct nfs_read_data *data = nfs_readdata_alloc();
+ struct nfs_read_data *data = nfs_readdata_alloc(rpages);
if (unlikely(!data)) {
while (!list_empty(list)) {
@@ -268,8 +270,6 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq,
NFS_PROTO(inode)->read_setup(data);
data->task.tk_cookie = (unsigned long) inode;
- data->task.tk_calldata = data;
- data->task.tk_release = nfs_readdata_release;
data->complete = nfs_direct_read_result;
lock_kernel();
@@ -433,7 +433,7 @@ static ssize_t nfs_direct_write_seg(struct inode *inode,
struct nfs_writeverf first_verf;
struct nfs_write_data *wdata;
- wdata = nfs_writedata_alloc();
+ wdata = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
if (!wdata)
return -ENOMEM;
@@ -662,10 +662,10 @@ nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t
.iov_len = count,
};
- dprintk("nfs: direct read(%s/%s, %lu@%lu)\n",
+ dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
file->f_dentry->d_parent->d_name.name,
file->f_dentry->d_name.name,
- (unsigned long) count, (unsigned long) pos);
+ (unsigned long) count, (long long) pos);
if (!is_sync_kiocb(iocb))
goto out;
@@ -718,9 +718,7 @@ out:
ssize_t
nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
{
- ssize_t retval = -EINVAL;
- loff_t *ppos = &iocb->ki_pos;
- unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ ssize_t retval;
struct file *file = iocb->ki_filp;
struct nfs_open_context *ctx =
(struct nfs_open_context *) file->private_data;
@@ -728,35 +726,32 @@ nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count,
struct inode *inode = mapping->host;
struct iovec iov = {
.iov_base = (char __user *)buf,
- .iov_len = count,
};
- dfprintk(VFS, "nfs: direct write(%s/%s(%ld), %lu@%lu)\n",
+ dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
file->f_dentry->d_parent->d_name.name,
- file->f_dentry->d_name.name, inode->i_ino,
- (unsigned long) count, (unsigned long) pos);
+ file->f_dentry->d_name.name,
+ (unsigned long) count, (long long) pos);
+ retval = -EINVAL;
if (!is_sync_kiocb(iocb))
goto out;
- if (count < 0)
- goto out;
- if (pos < 0)
+
+ retval = generic_write_checks(file, &pos, &count, 0);
+ if (retval)
goto out;
- retval = -EFAULT;
- if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len))
+
+ retval = -EINVAL;
+ if ((ssize_t) count < 0)
goto out;
- retval = -EFBIG;
- if (limit != RLIM_INFINITY) {
- if (pos >= limit) {
- send_sig(SIGXFSZ, current, 0);
- goto out;
- }
- if (count > limit - (unsigned long) pos)
- count = limit - (unsigned long) pos;
- }
retval = 0;
if (!count)
goto out;
+ iov.iov_len = count,
+
+ retval = -EFAULT;
+ if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len))
+ goto out;
retval = nfs_sync_mapping(mapping);
if (retval)
@@ -766,7 +761,7 @@ nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count,
if (mapping->nrpages)
invalidate_inode_pages2(mapping);
if (retval > 0)
- *ppos = pos + retval;
+ iocb->ki_pos = pos + retval;
out:
return retval;
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index ffb8df91dc3..821edd30333 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -54,7 +54,11 @@
#define IDMAP_HASH_SZ 128
+/* Default cache timeout is 10 minutes */
+unsigned int nfs_idmap_cache_timeout = 600 * HZ;
+
struct idmap_hashent {
+ unsigned long ih_expires;
__u32 ih_id;
int ih_namelen;
char ih_name[IDMAP_NAMESZ];
@@ -149,6 +153,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
if (he->ih_namelen != len || memcmp(he->ih_name, name, len) != 0)
return NULL;
+ if (time_after(jiffies, he->ih_expires))
+ return NULL;
return he;
}
@@ -164,6 +170,8 @@ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
struct idmap_hashent *he = idmap_id_hash(h, id);
if (he->ih_id != id || he->ih_namelen == 0)
return NULL;
+ if (time_after(jiffies, he->ih_expires))
+ return NULL;
return he;
}
@@ -192,6 +200,7 @@ idmap_update_entry(struct idmap_hashent *he, const char *name,
memcpy(he->ih_name, name, namelen);
he->ih_name[namelen] = '\0';
he->ih_namelen = namelen;
+ he->ih_expires = jiffies + nfs_idmap_cache_timeout;
}
/*
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 432f41cd75e..a77ee95b7ef 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -40,6 +40,7 @@
#include <asm/uaccess.h>
#include "nfs4_fs.h"
+#include "callback.h"
#include "delegation.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -221,10 +222,10 @@ nfs_calc_block_size(u64 tsize)
static inline unsigned long
nfs_block_size(unsigned long bsize, unsigned char *nrbitsp)
{
- if (bsize < 1024)
- bsize = NFS_DEF_FILE_IO_BUFFER_SIZE;
- else if (bsize >= NFS_MAX_FILE_IO_BUFFER_SIZE)
- bsize = NFS_MAX_FILE_IO_BUFFER_SIZE;
+ if (bsize < NFS_MIN_FILE_IO_SIZE)
+ bsize = NFS_DEF_FILE_IO_SIZE;
+ else if (bsize >= NFS_MAX_FILE_IO_SIZE)
+ bsize = NFS_MAX_FILE_IO_SIZE;
return nfs_block_bits(bsize, nrbitsp);
}
@@ -307,20 +308,15 @@ nfs_sb_init(struct super_block *sb, rpc_authflavor_t authflavor)
max_rpc_payload = nfs_block_size(rpc_max_payload(server->client), NULL);
if (server->rsize > max_rpc_payload)
server->rsize = max_rpc_payload;
- if (server->wsize > max_rpc_payload)
- server->wsize = max_rpc_payload;
-
+ if (server->rsize > NFS_MAX_FILE_IO_SIZE)
+ server->rsize = NFS_MAX_FILE_IO_SIZE;
server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (server->rpages > NFS_READ_MAXIOV) {
- server->rpages = NFS_READ_MAXIOV;
- server->rsize = server->rpages << PAGE_CACHE_SHIFT;
- }
+ if (server->wsize > max_rpc_payload)
+ server->wsize = max_rpc_payload;
+ if (server->wsize > NFS_MAX_FILE_IO_SIZE)
+ server->wsize = NFS_MAX_FILE_IO_SIZE;
server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (server->wpages > NFS_WRITE_MAXIOV) {
- server->wpages = NFS_WRITE_MAXIOV;
- server->wsize = server->wpages << PAGE_CACHE_SHIFT;
- }
if (sb->s_blocksize == 0)
sb->s_blocksize = nfs_block_bits(server->wsize,
@@ -417,7 +413,6 @@ nfs_create_client(struct nfs_server *server, const struct nfs_mount_data *data)
clnt->cl_intr = 1;
clnt->cl_softrtry = 1;
- clnt->cl_chatty = 1;
return clnt;
@@ -575,11 +570,10 @@ nfs_statfs(struct super_block *sb, struct kstatfs *buf)
buf->f_namelen = server->namelen;
out:
unlock_kernel();
-
return 0;
out_err:
- printk(KERN_WARNING "nfs_statfs: statfs error = %d\n", -error);
+ dprintk("%s: statfs error = %d\n", __FUNCTION__, -error);
buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1;
goto out;
@@ -650,10 +644,7 @@ int nfs_sync_mapping(struct address_space *mapping)
if (mapping->nrpages == 0)
return 0;
unmap_mapping_range(mapping, 0, 0, 0);
- ret = filemap_fdatawrite(mapping);
- if (ret != 0)
- goto out;
- ret = filemap_fdatawait(mapping);
+ ret = filemap_write_and_wait(mapping);
if (ret != 0)
goto out;
ret = nfs_wb_all(mapping->host);
@@ -870,8 +861,7 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
nfs_begin_data_update(inode);
/* Write all dirty data if we're changing file permissions or size */
if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE)) != 0) {
- if (filemap_fdatawrite(inode->i_mapping) == 0)
- filemap_fdatawait(inode->i_mapping);
+ filemap_write_and_wait(inode->i_mapping);
nfs_wb_all(inode);
}
/*
@@ -958,11 +948,22 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
int err;
- if (__IS_FLG(inode, MS_NOATIME))
- need_atime = 0;
- else if (__IS_FLG(inode, MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+ /* Flush out writes to the server in order to update c/mtime */
+ nfs_sync_inode(inode, 0, 0, FLUSH_WAIT|FLUSH_NOCOMMIT);
+
+ /*
+ * We may force a getattr if the user cares about atime.
+ *
+ * Note that we only have to check the vfsmount flags here:
+ * - NFS always sets S_NOATIME by so checking it would give a
+ * bogus result
+ * - NFS never sets MS_NOATIME or MS_NODIRATIME so there is
+ * no point in checking those.
+ */
+ if ((mnt->mnt_flags & MNT_NOATIME) ||
+ ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
need_atime = 0;
- /* We may force a getattr if the user cares about atime */
+
if (need_atime)
err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
else
@@ -1252,6 +1253,33 @@ void nfs_end_data_update(struct inode *inode)
atomic_dec(&nfsi->data_updates);
}
+static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ if ((fattr->valid & NFS_ATTR_PRE_CHANGE) != 0
+ && nfsi->change_attr == fattr->pre_change_attr) {
+ nfsi->change_attr = fattr->change_attr;
+ nfsi->cache_change_attribute = jiffies;
+ }
+
+ /* If we have atomic WCC data, we may update some attributes */
+ if ((fattr->valid & NFS_ATTR_WCC) != 0) {
+ if (timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) {
+ memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+ nfsi->cache_change_attribute = jiffies;
+ }
+ if (timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
+ memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+ nfsi->cache_change_attribute = jiffies;
+ }
+ if (inode->i_size == fattr->pre_size && nfsi->npages == 0) {
+ inode->i_size = fattr->size;
+ nfsi->cache_change_attribute = jiffies;
+ }
+ }
+}
+
/**
* nfs_check_inode_attributes - verify consistency of the inode attribute cache
* @inode - pointer to inode
@@ -1268,22 +1296,20 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
int data_unstable;
+ if ((fattr->valid & NFS_ATTR_FATTR) == 0)
+ return 0;
+
/* Are we in the process of updating data on the server? */
data_unstable = nfs_caches_unstable(inode);
- if (fattr->valid & NFS_ATTR_FATTR_V4) {
- if ((fattr->valid & NFS_ATTR_PRE_CHANGE) != 0
- && nfsi->change_attr == fattr->pre_change_attr)
- nfsi->change_attr = fattr->change_attr;
- if (nfsi->change_attr != fattr->change_attr) {
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
- if (!data_unstable)
- nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
- }
- }
+ /* Do atomic weak cache consistency updates */
+ nfs_wcc_update_inode(inode, fattr);
- if ((fattr->valid & NFS_ATTR_FATTR) == 0) {
- return 0;
+ if ((fattr->valid & NFS_ATTR_FATTR_V4) != 0 &&
+ nfsi->change_attr != fattr->change_attr) {
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+ if (!data_unstable)
+ nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
}
/* Has the inode gone and changed behind our back? */
@@ -1295,14 +1321,6 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
cur_size = i_size_read(inode);
new_isize = nfs_size_to_loff_t(fattr->size);
- /* If we have atomic WCC data, we may update some attributes */
- if ((fattr->valid & NFS_ATTR_WCC) != 0) {
- if (timespec_equal(&inode->i_ctime, &fattr->pre_ctime))
- memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
- if (timespec_equal(&inode->i_mtime, &fattr->pre_mtime))
- memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
- }
-
/* Verify a few of the more important attributes */
if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) {
nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
@@ -1410,14 +1428,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if ((fattr->valid & NFS_ATTR_FATTR) == 0)
return 0;
- if (nfsi->fileid != fattr->fileid) {
- printk(KERN_ERR "%s: inode number mismatch\n"
- "expected (%s/0x%Lx), got (%s/0x%Lx)\n",
- __FUNCTION__,
- inode->i_sb->s_id, (long long)nfsi->fileid,
- inode->i_sb->s_id, (long long)fattr->fileid);
- goto out_err;
- }
+ if (nfsi->fileid != fattr->fileid)
+ goto out_fileid;
/*
* Make sure the inode's type hasn't changed.
@@ -1436,6 +1448,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (data_stable)
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
+ /* Do atomic weak cache consistency updates */
+ nfs_wcc_update_inode(inode, fattr);
+
/* Check if our cached file size is stale */
new_isize = nfs_size_to_loff_t(fattr->size);
cur_isize = i_size_read(inode);
@@ -1539,6 +1554,13 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
*/
nfs_invalidate_inode(inode);
return -ESTALE;
+
+ out_fileid:
+ printk(KERN_ERR "NFS: server %s error: fileid changed\n"
+ "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
+ NFS_SERVER(inode)->hostname, inode->i_sb->s_id,
+ (long long)nfsi->fileid, (long long)fattr->fileid);
+ goto out_err;
}
/*
@@ -1820,25 +1842,10 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data,
}
clnt->cl_intr = 1;
clnt->cl_softrtry = 1;
- clnt->cl_chatty = 1;
clp->cl_rpcclient = clnt;
- clp->cl_cred = rpcauth_lookupcred(clnt->cl_auth, 0);
- if (IS_ERR(clp->cl_cred)) {
- up_write(&clp->cl_sem);
- err = PTR_ERR(clp->cl_cred);
- clp->cl_cred = NULL;
- goto out_fail;
- }
memcpy(clp->cl_ipaddr, server->ip_addr, sizeof(clp->cl_ipaddr));
nfs_idmap_new(clp);
}
- if (list_empty(&clp->cl_superblocks)) {
- err = nfs4_init_client(clp);
- if (err != 0) {
- up_write(&clp->cl_sem);
- goto out_fail;
- }
- }
list_add_tail(&server->nfs4_siblings, &clp->cl_superblocks);
clnt = rpc_clone_client(clp->cl_rpcclient);
if (!IS_ERR(clnt))
@@ -2033,6 +2040,35 @@ static struct file_system_type nfs4_fs_type = {
.fs_flags = FS_ODD_RENAME|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
+static const int nfs_set_port_min = 0;
+static const int nfs_set_port_max = 65535;
+static int param_set_port(const char *val, struct kernel_param *kp)
+{
+ char *endp;
+ int num = simple_strtol(val, &endp, 0);
+ if (endp == val || *endp || num < nfs_set_port_min || num > nfs_set_port_max)
+ return -EINVAL;
+ *((int *)kp->arg) = num;
+ return 0;
+}
+
+module_param_call(callback_tcpport, param_set_port, param_get_int,
+ &nfs_callback_set_tcpport, 0644);
+
+static int param_set_idmap_timeout(const char *val, struct kernel_param *kp)
+{
+ char *endp;
+ int num = simple_strtol(val, &endp, 0);
+ int jif = num * HZ;
+ if (endp == val || *endp || num < 0 || jif < num)
+ return -EINVAL;
+ *((int *)kp->arg) = jif;
+ return 0;
+}
+
+module_param_call(idmap_cache_timeout, param_set_idmap_timeout, param_get_int,
+ &nfs_idmap_cache_timeout, 0644);
+
#define nfs4_init_once(nfsi) \
do { \
INIT_LIST_HEAD(&(nfsi)->open_states); \
@@ -2040,8 +2076,25 @@ static struct file_system_type nfs4_fs_type = {
nfsi->delegation_state = 0; \
init_rwsem(&nfsi->rwsem); \
} while(0)
-#define register_nfs4fs() register_filesystem(&nfs4_fs_type)
-#define unregister_nfs4fs() unregister_filesystem(&nfs4_fs_type)
+
+static inline int register_nfs4fs(void)
+{
+ int ret;
+
+ ret = nfs_register_sysctl();
+ if (ret != 0)
+ return ret;
+ ret = register_filesystem(&nfs4_fs_type);
+ if (ret != 0)
+ nfs_unregister_sysctl();
+ return ret;
+}
+
+static inline void unregister_nfs4fs(void)
+{
+ unregister_filesystem(&nfs4_fs_type);
+ nfs_unregister_sysctl();
+}
#else
#define nfs4_init_once(nfsi) \
do { } while (0)
@@ -2166,11 +2219,11 @@ out:
#ifdef CONFIG_PROC_FS
rpc_proc_unregister("nfs");
#endif
- nfs_destroy_writepagecache();
#ifdef CONFIG_NFS_DIRECTIO
-out0:
nfs_destroy_directcache();
+out0:
#endif
+ nfs_destroy_writepagecache();
out1:
nfs_destroy_readpagecache();
out2:
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 0e82617f2de..db99b8f678f 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -82,7 +82,6 @@ mnt_create(char *hostname, struct sockaddr_in *srvaddr, int version,
RPC_AUTH_UNIX);
if (!IS_ERR(clnt)) {
clnt->cl_softrtry = 1;
- clnt->cl_chatty = 1;
clnt->cl_oneshot = 1;
clnt->cl_intr = 1;
}
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 59049e864ca..7fc0560c89c 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -146,23 +146,23 @@ xdr_decode_fattr(u32 *p, struct nfs_fattr *fattr)
return p;
}
-#define SATTR(p, attr, flag, field) \
- *p++ = (attr->ia_valid & flag) ? htonl(attr->field) : ~(u32) 0
static inline u32 *
xdr_encode_sattr(u32 *p, struct iattr *attr)
{
- SATTR(p, attr, ATTR_MODE, ia_mode);
- SATTR(p, attr, ATTR_UID, ia_uid);
- SATTR(p, attr, ATTR_GID, ia_gid);
- SATTR(p, attr, ATTR_SIZE, ia_size);
+ const u32 not_set = __constant_htonl(0xFFFFFFFF);
+
+ *p++ = (attr->ia_valid & ATTR_MODE) ? htonl(attr->ia_mode) : not_set;
+ *p++ = (attr->ia_valid & ATTR_UID) ? htonl(attr->ia_uid) : not_set;
+ *p++ = (attr->ia_valid & ATTR_GID) ? htonl(attr->ia_gid) : not_set;
+ *p++ = (attr->ia_valid & ATTR_SIZE) ? htonl(attr->ia_size) : not_set;
if (attr->ia_valid & ATTR_ATIME_SET) {
p = xdr_encode_time(p, &attr->ia_atime);
} else if (attr->ia_valid & ATTR_ATIME) {
p = xdr_encode_current_server_time(p, &attr->ia_atime);
} else {
- *p++ = ~(u32) 0;
- *p++ = ~(u32) 0;
+ *p++ = not_set;
+ *p++ = not_set;
}
if (attr->ia_valid & ATTR_MTIME_SET) {
@@ -170,12 +170,11 @@ xdr_encode_sattr(u32 *p, struct iattr *attr)
} else if (attr->ia_valid & ATTR_MTIME) {
p = xdr_encode_current_server_time(p, &attr->ia_mtime);
} else {
- *p++ = ~(u32) 0;
- *p++ = ~(u32) 0;
+ *p++ = not_set;
+ *p++ = not_set;
}
return p;
}
-#undef SATTR
/*
* NFS encode functions
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 92c870d19cc..ed67567f055 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -68,27 +68,39 @@ nfs3_async_handle_jukebox(struct rpc_task *task)
return 1;
}
-/*
- * Bare-bones access to getattr: this is for nfs_read_super.
- */
static int
-nfs3_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
{
int status;
dprintk("%s: call fsinfo\n", __FUNCTION__);
nfs_fattr_init(info->fattr);
- status = rpc_call(server->client_sys, NFS3PROC_FSINFO, fhandle, info, 0);
+ status = rpc_call(client, NFS3PROC_FSINFO, fhandle, info, 0);
dprintk("%s: reply fsinfo: %d\n", __FUNCTION__, status);
if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
- status = rpc_call(server->client_sys, NFS3PROC_GETATTR, fhandle, info->fattr, 0);
+ status = rpc_call(client, NFS3PROC_GETATTR, fhandle, info->fattr, 0);
dprintk("%s: reply getattr: %d\n", __FUNCTION__, status);
}
return status;
}
/*
+ * Bare-bones access to getattr: this is for nfs_read_super.
+ */
+static int
+nfs3_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ int status;
+
+ status = do_proc_get_root(server->client, fhandle, info);
+ if (status && server->client_sys != server->client)
+ status = do_proc_get_root(server->client_sys, fhandle, info);
+ return status;
+}
+
+/*
* One function for each procedure in the NFS protocol.
*/
static int
@@ -732,19 +744,23 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
extern u32 *nfs3_decode_dirent(u32 *, struct nfs_entry *, int);
-static void
-nfs3_read_done(struct rpc_task *task)
+static void nfs3_read_done(struct rpc_task *task, void *calldata)
{
- struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
+ struct nfs_read_data *data = calldata;
if (nfs3_async_handle_jukebox(task))
return;
/* Call back common NFS readpage processing */
if (task->tk_status >= 0)
nfs_refresh_inode(data->inode, &data->fattr);
- nfs_readpage_result(task);
+ nfs_readpage_result(task, calldata);
}
+static const struct rpc_call_ops nfs3_read_ops = {
+ .rpc_call_done = nfs3_read_done,
+ .rpc_release = nfs_readdata_release,
+};
+
static void
nfs3_proc_read_setup(struct nfs_read_data *data)
{
@@ -762,23 +778,26 @@ nfs3_proc_read_setup(struct nfs_read_data *data)
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
/* Finalize the task. */
- rpc_init_task(task, NFS_CLIENT(inode), nfs3_read_done, flags);
+ rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_read_ops, data);
rpc_call_setup(task, &msg, 0);
}
-static void
-nfs3_write_done(struct rpc_task *task)
+static void nfs3_write_done(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data;
+ struct nfs_write_data *data = calldata;
if (nfs3_async_handle_jukebox(task))
return;
- data = (struct nfs_write_data *)task->tk_calldata;
if (task->tk_status >= 0)
nfs_post_op_update_inode(data->inode, data->res.fattr);
- nfs_writeback_done(task);
+ nfs_writeback_done(task, calldata);
}
+static const struct rpc_call_ops nfs3_write_ops = {
+ .rpc_call_done = nfs3_write_done,
+ .rpc_release = nfs_writedata_release,
+};
+
static void
nfs3_proc_write_setup(struct nfs_write_data *data, int how)
{
@@ -806,23 +825,26 @@ nfs3_proc_write_setup(struct nfs_write_data *data, int how)
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
/* Finalize the task. */
- rpc_init_task(task, NFS_CLIENT(inode), nfs3_write_done, flags);
+ rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_write_ops, data);
rpc_call_setup(task, &msg, 0);
}
-static void
-nfs3_commit_done(struct rpc_task *task)
+static void nfs3_commit_done(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data;
+ struct nfs_write_data *data = calldata;
if (nfs3_async_handle_jukebox(task))
return;
- data = (struct nfs_write_data *)task->tk_calldata;
if (task->tk_status >= 0)
nfs_post_op_update_inode(data->inode, data->res.fattr);
- nfs_commit_done(task);
+ nfs_commit_done(task, calldata);
}
+static const struct rpc_call_ops nfs3_commit_ops = {
+ .rpc_call_done = nfs3_commit_done,
+ .rpc_release = nfs_commit_release,
+};
+
static void
nfs3_proc_commit_setup(struct nfs_write_data *data, int how)
{
@@ -840,7 +862,7 @@ nfs3_proc_commit_setup(struct nfs_write_data *data, int how)
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
/* Finalize the task. */
- rpc_init_task(task, NFS_CLIENT(inode), nfs3_commit_done, flags);
+ rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_commit_ops, data);
rpc_call_setup(task, &msg, 0);
}
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 0498bd36602..b6c0b5012bc 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -182,7 +182,7 @@ xdr_encode_sattr(u32 *p, struct iattr *attr)
{
if (attr->ia_valid & ATTR_MODE) {
*p++ = xdr_one;
- *p++ = htonl(attr->ia_mode);
+ *p++ = htonl(attr->ia_mode & S_IALLUGO);
} else {
*p++ = xdr_zero;
}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index b7f262dcb6e..0f5e4e7cdde 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -38,7 +38,8 @@ struct idmap;
((err) != NFSERR_NOFILEHANDLE))
enum nfs4_client_state {
- NFS4CLNT_OK = 0,
+ NFS4CLNT_STATE_RECOVER = 0,
+ NFS4CLNT_LEASE_EXPIRED,
};
/*
@@ -67,7 +68,6 @@ struct nfs4_client {
atomic_t cl_count;
struct rpc_clnt * cl_rpcclient;
- struct rpc_cred * cl_cred;
struct list_head cl_superblocks; /* List of nfs_server structs */
@@ -76,7 +76,6 @@ struct nfs4_client {
struct work_struct cl_renewd;
struct work_struct cl_recoverd;
- wait_queue_head_t cl_waitq;
struct rpc_wait_queue cl_rpcwaitq;
/* used for the setclientid verifier */
@@ -182,8 +181,9 @@ struct nfs4_state {
nfs4_stateid stateid;
- unsigned int nreaders;
- unsigned int nwriters;
+ unsigned int n_rdonly;
+ unsigned int n_wronly;
+ unsigned int n_rdwr;
int state; /* State on the server (R,W, or RW) */
atomic_t count;
};
@@ -210,10 +210,10 @@ extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
/* nfs4proc.c */
extern int nfs4_map_errors(int err);
-extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short);
-extern int nfs4_proc_setclientid_confirm(struct nfs4_client *);
-extern int nfs4_proc_async_renew(struct nfs4_client *);
-extern int nfs4_proc_renew(struct nfs4_client *);
+extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short, struct rpc_cred *);
+extern int nfs4_proc_setclientid_confirm(struct nfs4_client *, struct rpc_cred *);
+extern int nfs4_proc_async_renew(struct nfs4_client *, struct rpc_cred *);
+extern int nfs4_proc_renew(struct nfs4_client *, struct rpc_cred *);
extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state);
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
@@ -237,8 +237,8 @@ extern void init_nfsv4_state(struct nfs_server *);
extern void destroy_nfsv4_state(struct nfs_server *);
extern struct nfs4_client *nfs4_get_client(struct in_addr *);
extern void nfs4_put_client(struct nfs4_client *clp);
-extern int nfs4_init_client(struct nfs4_client *clp);
extern struct nfs4_client *nfs4_find_client(struct in_addr *);
+struct rpc_cred *nfs4_get_renew_cred(struct nfs4_client *clp);
extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *);
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f988a9417b1..984ca3454d0 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -57,11 +57,13 @@
#define NFS4_POLL_RETRY_MIN (1*HZ)
#define NFS4_POLL_RETRY_MAX (15*HZ)
-static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid);
+struct nfs4_opendata;
+static int _nfs4_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *);
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry);
static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception);
+static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs4_client *clp);
extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus);
extern struct rpc_procinfo nfs4_procedures[];
@@ -173,8 +175,7 @@ static void nfs4_setup_readdir(u64 cookie, u32 *verifier, struct dentry *dentry,
kunmap_atomic(start, KM_USER0);
}
-static void
-renew_lease(struct nfs_server *server, unsigned long timestamp)
+static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
{
struct nfs4_client *clp = server->nfs4_state;
spin_lock(&clp->cl_lock);
@@ -194,21 +195,123 @@ static void update_changeattr(struct inode *inode, struct nfs4_change_info *cinf
spin_unlock(&inode->i_lock);
}
+struct nfs4_opendata {
+ atomic_t count;
+ struct nfs_openargs o_arg;
+ struct nfs_openres o_res;
+ struct nfs_open_confirmargs c_arg;
+ struct nfs_open_confirmres c_res;
+ struct nfs_fattr f_attr;
+ struct nfs_fattr dir_attr;
+ struct dentry *dentry;
+ struct dentry *dir;
+ struct nfs4_state_owner *owner;
+ struct iattr attrs;
+ unsigned long timestamp;
+ int rpc_status;
+ int cancelled;
+};
+
+static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
+ struct nfs4_state_owner *sp, int flags,
+ const struct iattr *attrs)
+{
+ struct dentry *parent = dget_parent(dentry);
+ struct inode *dir = parent->d_inode;
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs4_opendata *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL)
+ goto err;
+ p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
+ if (p->o_arg.seqid == NULL)
+ goto err_free;
+ atomic_set(&p->count, 1);
+ p->dentry = dget(dentry);
+ p->dir = parent;
+ p->owner = sp;
+ atomic_inc(&sp->so_count);
+ p->o_arg.fh = NFS_FH(dir);
+ p->o_arg.open_flags = flags,
+ p->o_arg.clientid = server->nfs4_state->cl_clientid;
+ p->o_arg.id = sp->so_id;
+ p->o_arg.name = &dentry->d_name;
+ p->o_arg.server = server;
+ p->o_arg.bitmask = server->attr_bitmask;
+ p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
+ p->o_res.f_attr = &p->f_attr;
+ p->o_res.dir_attr = &p->dir_attr;
+ p->o_res.server = server;
+ nfs_fattr_init(&p->f_attr);
+ nfs_fattr_init(&p->dir_attr);
+ if (flags & O_EXCL) {
+ u32 *s = (u32 *) p->o_arg.u.verifier.data;
+ s[0] = jiffies;
+ s[1] = current->pid;
+ } else if (flags & O_CREAT) {
+ p->o_arg.u.attrs = &p->attrs;
+ memcpy(&p->attrs, attrs, sizeof(p->attrs));
+ }
+ p->c_arg.fh = &p->o_res.fh;
+ p->c_arg.stateid = &p->o_res.stateid;
+ p->c_arg.seqid = p->o_arg.seqid;
+ return p;
+err_free:
+ kfree(p);
+err:
+ dput(parent);
+ return NULL;
+}
+
+static void nfs4_opendata_free(struct nfs4_opendata *p)
+{
+ if (p != NULL && atomic_dec_and_test(&p->count)) {
+ nfs_free_seqid(p->o_arg.seqid);
+ nfs4_put_state_owner(p->owner);
+ dput(p->dir);
+ dput(p->dentry);
+ kfree(p);
+ }
+}
+
/* Helper for asynchronous RPC calls */
-static int nfs4_call_async(struct rpc_clnt *clnt, rpc_action tk_begin,
- rpc_action tk_exit, void *calldata)
+static int nfs4_call_async(struct rpc_clnt *clnt,
+ const struct rpc_call_ops *tk_ops, void *calldata)
{
struct rpc_task *task;
- if (!(task = rpc_new_task(clnt, tk_exit, RPC_TASK_ASYNC)))
+ if (!(task = rpc_new_task(clnt, RPC_TASK_ASYNC, tk_ops, calldata)))
return -ENOMEM;
-
- task->tk_calldata = calldata;
- task->tk_action = tk_begin;
rpc_execute(task);
return 0;
}
+static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
+{
+ sigset_t oldset;
+ int ret;
+
+ rpc_clnt_sigmask(task->tk_client, &oldset);
+ ret = rpc_wait_for_completion_task(task);
+ rpc_clnt_sigunmask(task->tk_client, &oldset);
+ return ret;
+}
+
+static inline void update_open_stateflags(struct nfs4_state *state, mode_t open_flags)
+{
+ switch (open_flags) {
+ case FMODE_WRITE:
+ state->n_wronly++;
+ break;
+ case FMODE_READ:
+ state->n_rdonly++;
+ break;
+ case FMODE_READ|FMODE_WRITE:
+ state->n_rdwr++;
+ }
+}
+
static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags)
{
struct inode *inode = state->inode;
@@ -218,41 +321,134 @@ static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid,
spin_lock(&state->owner->so_lock);
spin_lock(&inode->i_lock);
memcpy(&state->stateid, stateid, sizeof(state->stateid));
- if ((open_flags & FMODE_WRITE))
- state->nwriters++;
- if (open_flags & FMODE_READ)
- state->nreaders++;
+ update_open_stateflags(state, open_flags);
nfs4_state_set_mode_locked(state, state->state | open_flags);
spin_unlock(&inode->i_lock);
spin_unlock(&state->owner->so_lock);
}
+static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
+{
+ struct inode *inode;
+ struct nfs4_state *state = NULL;
+
+ if (!(data->f_attr.valid & NFS_ATTR_FATTR))
+ goto out;
+ inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
+ if (inode == NULL)
+ goto out;
+ state = nfs4_get_open_state(inode, data->owner);
+ if (state == NULL)
+ goto put_inode;
+ update_open_stateid(state, &data->o_res.stateid, data->o_arg.open_flags);
+put_inode:
+ iput(inode);
+out:
+ return state;
+}
+
+static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
+{
+ struct nfs_inode *nfsi = NFS_I(state->inode);
+ struct nfs_open_context *ctx;
+
+ spin_lock(&state->inode->i_lock);
+ list_for_each_entry(ctx, &nfsi->open_files, list) {
+ if (ctx->state != state)
+ continue;
+ get_nfs_open_context(ctx);
+ spin_unlock(&state->inode->i_lock);
+ return ctx;
+ }
+ spin_unlock(&state->inode->i_lock);
+ return ERR_PTR(-ENOENT);
+}
+
+static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, mode_t openflags, nfs4_stateid *stateid)
+{
+ int ret;
+
+ opendata->o_arg.open_flags = openflags;
+ ret = _nfs4_proc_open(opendata);
+ if (ret != 0)
+ return ret;
+ memcpy(stateid->data, opendata->o_res.stateid.data,
+ sizeof(stateid->data));
+ return 0;
+}
+
+static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
+{
+ nfs4_stateid stateid;
+ struct nfs4_state *newstate;
+ int mode = 0;
+ int delegation = 0;
+ int ret;
+
+ /* memory barrier prior to reading state->n_* */
+ smp_rmb();
+ if (state->n_rdwr != 0) {
+ ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &stateid);
+ if (ret != 0)
+ return ret;
+ mode |= FMODE_READ|FMODE_WRITE;
+ if (opendata->o_res.delegation_type != 0)
+ delegation = opendata->o_res.delegation_type;
+ smp_rmb();
+ }
+ if (state->n_wronly != 0) {
+ ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &stateid);
+ if (ret != 0)
+ return ret;
+ mode |= FMODE_WRITE;
+ if (opendata->o_res.delegation_type != 0)
+ delegation = opendata->o_res.delegation_type;
+ smp_rmb();
+ }
+ if (state->n_rdonly != 0) {
+ ret = nfs4_open_recover_helper(opendata, FMODE_READ, &stateid);
+ if (ret != 0)
+ return ret;
+ mode |= FMODE_READ;
+ }
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ if (mode == 0)
+ return 0;
+ if (opendata->o_res.delegation_type == 0)
+ opendata->o_res.delegation_type = delegation;
+ opendata->o_arg.open_flags |= mode;
+ newstate = nfs4_opendata_to_nfs4_state(opendata);
+ if (newstate != NULL) {
+ if (opendata->o_res.delegation_type != 0) {
+ struct nfs_inode *nfsi = NFS_I(newstate->inode);
+ int delegation_flags = 0;
+ if (nfsi->delegation)
+ delegation_flags = nfsi->delegation->flags;
+ if (!(delegation_flags & NFS_DELEGATION_NEED_RECLAIM))
+ nfs_inode_set_delegation(newstate->inode,
+ opendata->owner->so_cred,
+ &opendata->o_res);
+ else
+ nfs_inode_reclaim_delegation(newstate->inode,
+ opendata->owner->so_cred,
+ &opendata->o_res);
+ }
+ nfs4_close_state(newstate, opendata->o_arg.open_flags);
+ }
+ if (newstate != state)
+ return -ESTALE;
+ return 0;
+}
+
/*
* OPEN_RECLAIM:
* reclaim state on the server after a reboot.
*/
-static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
+static int _nfs4_do_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry)
{
- struct inode *inode = state->inode;
- struct nfs_server *server = NFS_SERVER(inode);
- struct nfs_delegation *delegation = NFS_I(inode)->delegation;
- struct nfs_openargs o_arg = {
- .fh = NFS_FH(inode),
- .id = sp->so_id,
- .open_flags = state->state,
- .clientid = server->nfs4_state->cl_clientid,
- .claim = NFS4_OPEN_CLAIM_PREVIOUS,
- .bitmask = server->attr_bitmask,
- };
- struct nfs_openres o_res = {
- .server = server, /* Grrr */
- };
- struct rpc_message msg = {
- .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR],
- .rpc_argp = &o_arg,
- .rpc_resp = &o_res,
- .rpc_cred = sp->so_cred,
- };
+ struct nfs_delegation *delegation = NFS_I(state->inode)->delegation;
+ struct nfs4_opendata *opendata;
+ int delegation_type = 0;
int status;
if (delegation != NULL) {
@@ -262,38 +458,27 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st
set_bit(NFS_DELEGATED_STATE, &state->flags);
return 0;
}
- o_arg.u.delegation_type = delegation->type;
+ delegation_type = delegation->type;
}
- o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
- if (o_arg.seqid == NULL)
+ opendata = nfs4_opendata_alloc(dentry, sp, 0, NULL);
+ if (opendata == NULL)
return -ENOMEM;
- status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
- /* Confirm the sequence as being established */
- nfs_confirm_seqid(&sp->so_seqid, status);
- nfs_increment_open_seqid(status, o_arg.seqid);
- if (status == 0) {
- memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid));
- if (o_res.delegation_type != 0) {
- nfs_inode_reclaim_delegation(inode, sp->so_cred, &o_res);
- /* Did the server issue an immediate delegation recall? */
- if (o_res.do_recall)
- nfs_async_inode_return_delegation(inode, &o_res.stateid);
- }
- }
- nfs_free_seqid(o_arg.seqid);
- clear_bit(NFS_DELEGATED_STATE, &state->flags);
- /* Ensure we update the inode attributes */
- NFS_CACHEINV(inode);
+ opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
+ opendata->o_arg.fh = NFS_FH(state->inode);
+ nfs_copy_fh(&opendata->o_res.fh, opendata->o_arg.fh);
+ opendata->o_arg.u.delegation_type = delegation_type;
+ status = nfs4_open_recover(opendata, state);
+ nfs4_opendata_free(opendata);
return status;
}
-static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
+static int nfs4_do_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_exception exception = { };
int err;
do {
- err = _nfs4_open_reclaim(sp, state);
+ err = _nfs4_do_open_reclaim(sp, state, dentry);
if (err != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, err, &exception);
@@ -301,63 +486,36 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
return err;
}
+static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
+{
+ struct nfs_open_context *ctx;
+ int ret;
+
+ ctx = nfs4_state_find_open_context(state);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+ ret = nfs4_do_open_reclaim(sp, state, ctx->dentry);
+ put_nfs_open_context(ctx);
+ return ret;
+}
+
static int _nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state)
{
struct nfs4_state_owner *sp = state->owner;
- struct inode *inode = dentry->d_inode;
- struct nfs_server *server = NFS_SERVER(inode);
- struct dentry *parent = dget_parent(dentry);
- struct nfs_openargs arg = {
- .fh = NFS_FH(parent->d_inode),
- .clientid = server->nfs4_state->cl_clientid,
- .name = &dentry->d_name,
- .id = sp->so_id,
- .server = server,
- .bitmask = server->attr_bitmask,
- .claim = NFS4_OPEN_CLAIM_DELEGATE_CUR,
- };
- struct nfs_openres res = {
- .server = server,
- };
- struct rpc_message msg = {
- .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR],
- .rpc_argp = &arg,
- .rpc_resp = &res,
- .rpc_cred = sp->so_cred,
- };
- int status = 0;
+ struct nfs4_opendata *opendata;
+ int ret;
if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
- goto out;
- if (state->state == 0)
- goto out;
- arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
- status = -ENOMEM;
- if (arg.seqid == NULL)
- goto out;
- arg.open_flags = state->state;
- memcpy(arg.u.delegation.data, state->stateid.data, sizeof(arg.u.delegation.data));
- status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
- nfs_increment_open_seqid(status, arg.seqid);
- if (status != 0)
- goto out_free;
- if(res.rflags & NFS4_OPEN_RESULT_CONFIRM) {
- status = _nfs4_proc_open_confirm(server->client, NFS_FH(inode),
- sp, &res.stateid, arg.seqid);
- if (status != 0)
- goto out_free;
- }
- nfs_confirm_seqid(&sp->so_seqid, 0);
- if (status >= 0) {
- memcpy(state->stateid.data, res.stateid.data,
- sizeof(state->stateid.data));
- clear_bit(NFS_DELEGATED_STATE, &state->flags);
- }
-out_free:
- nfs_free_seqid(arg.seqid);
-out:
- dput(parent);
- return status;
+ return 0;
+ opendata = nfs4_opendata_alloc(dentry, sp, 0, NULL);
+ if (opendata == NULL)
+ return -ENOMEM;
+ opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
+ memcpy(opendata->o_arg.u.delegation.data, state->stateid.data,
+ sizeof(opendata->o_arg.u.delegation.data));
+ ret = nfs4_open_recover(opendata, state);
+ nfs4_opendata_free(opendata);
+ return ret;
}
int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state)
@@ -382,82 +540,202 @@ int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state)
return err;
}
-static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid)
+static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs_open_confirmargs arg = {
- .fh = fh,
- .seqid = seqid,
- .stateid = *stateid,
- };
- struct nfs_open_confirmres res;
- struct rpc_message msg = {
- .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
- .rpc_argp = &arg,
- .rpc_resp = &res,
- .rpc_cred = sp->so_cred,
+ struct nfs4_opendata *data = calldata;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
+ .rpc_argp = &data->c_arg,
+ .rpc_resp = &data->c_res,
+ .rpc_cred = data->owner->so_cred,
};
+ data->timestamp = jiffies;
+ rpc_call_setup(task, &msg, 0);
+}
+
+static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_opendata *data = calldata;
+
+ data->rpc_status = task->tk_status;
+ if (RPC_ASSASSINATED(task))
+ return;
+ if (data->rpc_status == 0) {
+ memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
+ sizeof(data->o_res.stateid.data));
+ renew_lease(data->o_res.server, data->timestamp);
+ }
+ nfs_increment_open_seqid(data->rpc_status, data->c_arg.seqid);
+ nfs_confirm_seqid(&data->owner->so_seqid, data->rpc_status);
+}
+
+static void nfs4_open_confirm_release(void *calldata)
+{
+ struct nfs4_opendata *data = calldata;
+ struct nfs4_state *state = NULL;
+
+ /* If this request hasn't been cancelled, do nothing */
+ if (data->cancelled == 0)
+ goto out_free;
+ /* In case of error, no cleanup! */
+ if (data->rpc_status != 0)
+ goto out_free;
+ nfs_confirm_seqid(&data->owner->so_seqid, 0);
+ state = nfs4_opendata_to_nfs4_state(data);
+ if (state != NULL)
+ nfs4_close_state(state, data->o_arg.open_flags);
+out_free:
+ nfs4_opendata_free(data);
+}
+
+static const struct rpc_call_ops nfs4_open_confirm_ops = {
+ .rpc_call_prepare = nfs4_open_confirm_prepare,
+ .rpc_call_done = nfs4_open_confirm_done,
+ .rpc_release = nfs4_open_confirm_release,
+};
+
+/*
+ * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
+ */
+static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
+{
+ struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
+ struct rpc_task *task;
int status;
- status = rpc_call_sync(clnt, &msg, RPC_TASK_NOINTR);
- /* Confirm the sequence as being established */
- nfs_confirm_seqid(&sp->so_seqid, status);
- nfs_increment_open_seqid(status, seqid);
- if (status >= 0)
- memcpy(stateid, &res.stateid, sizeof(*stateid));
+ atomic_inc(&data->count);
+ task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_confirm_ops, data);
+ if (IS_ERR(task)) {
+ nfs4_opendata_free(data);
+ return PTR_ERR(task);
+ }
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status != 0) {
+ data->cancelled = 1;
+ smp_wmb();
+ } else
+ status = data->rpc_status;
+ rpc_release_task(task);
return status;
}
-static int _nfs4_proc_open(struct inode *dir, struct nfs4_state_owner *sp, struct nfs_openargs *o_arg, struct nfs_openres *o_res)
+static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs4_opendata *data = calldata;
+ struct nfs4_state_owner *sp = data->owner;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
- .rpc_argp = o_arg,
- .rpc_resp = o_res,
+ .rpc_argp = &data->o_arg,
+ .rpc_resp = &data->o_res,
.rpc_cred = sp->so_cred,
};
- int status;
+
+ if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
+ return;
+ /* Update sequence id. */
+ data->o_arg.id = sp->so_id;
+ data->o_arg.clientid = sp->so_client->cl_clientid;
+ if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
+ msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
+ data->timestamp = jiffies;
+ rpc_call_setup(task, &msg, 0);
+}
- /* Update sequence id. The caller must serialize! */
- o_arg->id = sp->so_id;
- o_arg->clientid = sp->so_client->cl_clientid;
+static void nfs4_open_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_opendata *data = calldata;
- status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
- if (status == 0) {
- /* OPEN on anything except a regular file is disallowed in NFSv4 */
- switch (o_res->f_attr->mode & S_IFMT) {
+ data->rpc_status = task->tk_status;
+ if (RPC_ASSASSINATED(task))
+ return;
+ if (task->tk_status == 0) {
+ switch (data->o_res.f_attr->mode & S_IFMT) {
case S_IFREG:
break;
case S_IFLNK:
- status = -ELOOP;
+ data->rpc_status = -ELOOP;
break;
case S_IFDIR:
- status = -EISDIR;
+ data->rpc_status = -EISDIR;
break;
default:
- status = -ENOTDIR;
+ data->rpc_status = -ENOTDIR;
}
+ renew_lease(data->o_res.server, data->timestamp);
}
+ nfs_increment_open_seqid(data->rpc_status, data->o_arg.seqid);
+}
+
+static void nfs4_open_release(void *calldata)
+{
+ struct nfs4_opendata *data = calldata;
+ struct nfs4_state *state = NULL;
- nfs_increment_open_seqid(status, o_arg->seqid);
+ /* If this request hasn't been cancelled, do nothing */
+ if (data->cancelled == 0)
+ goto out_free;
+ /* In case of error, no cleanup! */
+ if (data->rpc_status != 0)
+ goto out_free;
+ /* In case we need an open_confirm, no cleanup! */
+ if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
+ goto out_free;
+ nfs_confirm_seqid(&data->owner->so_seqid, 0);
+ state = nfs4_opendata_to_nfs4_state(data);
+ if (state != NULL)
+ nfs4_close_state(state, data->o_arg.open_flags);
+out_free:
+ nfs4_opendata_free(data);
+}
+
+static const struct rpc_call_ops nfs4_open_ops = {
+ .rpc_call_prepare = nfs4_open_prepare,
+ .rpc_call_done = nfs4_open_done,
+ .rpc_release = nfs4_open_release,
+};
+
+/*
+ * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
+ */
+static int _nfs4_proc_open(struct nfs4_opendata *data)
+{
+ struct inode *dir = data->dir->d_inode;
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_openargs *o_arg = &data->o_arg;
+ struct nfs_openres *o_res = &data->o_res;
+ struct rpc_task *task;
+ int status;
+
+ atomic_inc(&data->count);
+ task = rpc_run_task(server->client, RPC_TASK_ASYNC, &nfs4_open_ops, data);
+ if (IS_ERR(task)) {
+ nfs4_opendata_free(data);
+ return PTR_ERR(task);
+ }
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status != 0) {
+ data->cancelled = 1;
+ smp_wmb();
+ } else
+ status = data->rpc_status;
+ rpc_release_task(task);
if (status != 0)
- goto out;
+ return status;
+
if (o_arg->open_flags & O_CREAT) {
update_changeattr(dir, &o_res->cinfo);
nfs_post_op_update_inode(dir, o_res->dir_attr);
} else
nfs_refresh_inode(dir, o_res->dir_attr);
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
- status = _nfs4_proc_open_confirm(server->client, &o_res->fh,
- sp, &o_res->stateid, o_arg->seqid);
+ status = _nfs4_proc_open_confirm(data);
if (status != 0)
- goto out;
+ return status;
}
- nfs_confirm_seqid(&sp->so_seqid, 0);
+ nfs_confirm_seqid(&data->owner->so_seqid, 0);
if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
- status = server->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr);
-out:
- return status;
+ return server->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr);
+ return 0;
}
static int _nfs4_do_access(struct inode *inode, struct rpc_cred *cred, int openflags)
@@ -488,6 +766,15 @@ out:
return -EACCES;
}
+int nfs4_recover_expired_lease(struct nfs_server *server)
+{
+ struct nfs4_client *clp = server->nfs4_state;
+
+ if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+ nfs4_schedule_state_recovery(clp);
+ return nfs4_wait_clnt_recover(server->client, clp);
+}
+
/*
* OPEN_EXPIRED:
* reclaim state on the server after a network partition.
@@ -495,77 +782,31 @@ out:
*/
static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry)
{
- struct dentry *parent = dget_parent(dentry);
- struct inode *dir = parent->d_inode;
struct inode *inode = state->inode;
- struct nfs_server *server = NFS_SERVER(dir);
struct nfs_delegation *delegation = NFS_I(inode)->delegation;
- struct nfs_fattr f_attr, dir_attr;
- struct nfs_openargs o_arg = {
- .fh = NFS_FH(dir),
- .open_flags = state->state,
- .name = &dentry->d_name,
- .bitmask = server->attr_bitmask,
- .claim = NFS4_OPEN_CLAIM_NULL,
- };
- struct nfs_openres o_res = {
- .f_attr = &f_attr,
- .dir_attr = &dir_attr,
- .server = server,
- };
- int status = 0;
+ struct nfs4_opendata *opendata;
+ int openflags = state->state & (FMODE_READ|FMODE_WRITE);
+ int ret;
if (delegation != NULL && !(delegation->flags & NFS_DELEGATION_NEED_RECLAIM)) {
- status = _nfs4_do_access(inode, sp->so_cred, state->state);
- if (status < 0)
- goto out;
+ ret = _nfs4_do_access(inode, sp->so_cred, openflags);
+ if (ret < 0)
+ return ret;
memcpy(&state->stateid, &delegation->stateid, sizeof(state->stateid));
set_bit(NFS_DELEGATED_STATE, &state->flags);
- goto out;
+ return 0;
}
- o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
- status = -ENOMEM;
- if (o_arg.seqid == NULL)
- goto out;
- nfs_fattr_init(&f_attr);
- nfs_fattr_init(&dir_attr);
- status = _nfs4_proc_open(dir, sp, &o_arg, &o_res);
- if (status != 0)
- goto out_nodeleg;
- /* Check if files differ */
- if ((f_attr.mode & S_IFMT) != (inode->i_mode & S_IFMT))
- goto out_stale;
- /* Has the file handle changed? */
- if (nfs_compare_fh(&o_res.fh, NFS_FH(inode)) != 0) {
- /* Verify if the change attributes are the same */
- if (f_attr.change_attr != NFS_I(inode)->change_attr)
- goto out_stale;
- if (nfs_size_to_loff_t(f_attr.size) != inode->i_size)
- goto out_stale;
- /* Lets just pretend that this is the same file */
- nfs_copy_fh(NFS_FH(inode), &o_res.fh);
- NFS_I(inode)->fileid = f_attr.fileid;
- }
- memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid));
- if (o_res.delegation_type != 0) {
- if (!(delegation->flags & NFS_DELEGATION_NEED_RECLAIM))
- nfs_inode_set_delegation(inode, sp->so_cred, &o_res);
- else
- nfs_inode_reclaim_delegation(inode, sp->so_cred, &o_res);
+ opendata = nfs4_opendata_alloc(dentry, sp, openflags, NULL);
+ if (opendata == NULL)
+ return -ENOMEM;
+ ret = nfs4_open_recover(opendata, state);
+ if (ret == -ESTALE) {
+ /* Invalidate the state owner so we don't ever use it again */
+ nfs4_drop_state_owner(sp);
+ d_drop(dentry);
}
-out_nodeleg:
- nfs_free_seqid(o_arg.seqid);
- clear_bit(NFS_DELEGATED_STATE, &state->flags);
-out:
- dput(parent);
- return status;
-out_stale:
- status = -ESTALE;
- /* Invalidate the state owner so we don't ever use it again */
- nfs4_drop_state_owner(sp);
- d_drop(dentry);
- /* Should we be trying to close that stateid? */
- goto out_nodeleg;
+ nfs4_opendata_free(opendata);
+ return ret;
}
static inline int nfs4_do_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state, struct dentry *dentry)
@@ -584,26 +825,19 @@ static inline int nfs4_do_open_expired(struct nfs4_state_owner *sp, struct nfs4_
static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
- struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_open_context *ctx;
- int status;
+ int ret;
- spin_lock(&state->inode->i_lock);
- list_for_each_entry(ctx, &nfsi->open_files, list) {
- if (ctx->state != state)
- continue;
- get_nfs_open_context(ctx);
- spin_unlock(&state->inode->i_lock);
- status = nfs4_do_open_expired(sp, state, ctx->dentry);
- put_nfs_open_context(ctx);
- return status;
- }
- spin_unlock(&state->inode->i_lock);
- return -ENOENT;
+ ctx = nfs4_state_find_open_context(state);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+ ret = nfs4_do_open_expired(sp, state, ctx->dentry);
+ put_nfs_open_context(ctx);
+ return ret;
}
/*
- * Returns an nfs4_state + an extra reference to the inode
+ * Returns a referenced nfs4_state if there is an open delegation on the file
*/
static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred *cred, struct nfs4_state **res)
{
@@ -616,6 +850,14 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred
int open_flags = flags & (FMODE_READ|FMODE_WRITE);
int err;
+ err = -ENOMEM;
+ if (!(sp = nfs4_get_state_owner(server, cred))) {
+ dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__);
+ return err;
+ }
+ err = nfs4_recover_expired_lease(server);
+ if (err != 0)
+ goto out_put_state_owner;
/* Protect against reboot recovery - NOTE ORDER! */
down_read(&clp->cl_sem);
/* Protect against delegation recall */
@@ -625,10 +867,6 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred
if (delegation == NULL || (delegation->type & open_flags) != open_flags)
goto out_err;
err = -ENOMEM;
- if (!(sp = nfs4_get_state_owner(server, cred))) {
- dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__);
- goto out_err;
- }
state = nfs4_get_open_state(inode, sp);
if (state == NULL)
goto out_err;
@@ -636,39 +874,34 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred
err = -ENOENT;
if ((state->state & open_flags) == open_flags) {
spin_lock(&inode->i_lock);
- if (open_flags & FMODE_READ)
- state->nreaders++;
- if (open_flags & FMODE_WRITE)
- state->nwriters++;
+ update_open_stateflags(state, open_flags);
spin_unlock(&inode->i_lock);
goto out_ok;
} else if (state->state != 0)
- goto out_err;
+ goto out_put_open_state;
lock_kernel();
err = _nfs4_do_access(inode, cred, open_flags);
unlock_kernel();
if (err != 0)
- goto out_err;
+ goto out_put_open_state;
set_bit(NFS_DELEGATED_STATE, &state->flags);
update_open_stateid(state, &delegation->stateid, open_flags);
out_ok:
nfs4_put_state_owner(sp);
up_read(&nfsi->rwsem);
up_read(&clp->cl_sem);
- igrab(inode);
*res = state;
- return 0;
+ return 0;
+out_put_open_state:
+ nfs4_put_open_state(state);
out_err:
- if (sp != NULL) {
- if (state != NULL)
- nfs4_put_open_state(state);
- nfs4_put_state_owner(sp);
- }
up_read(&nfsi->rwsem);
up_read(&clp->cl_sem);
if (err != -EACCES)
nfs_inode_return_delegation(inode);
+out_put_state_owner:
+ nfs4_put_state_owner(sp);
return err;
}
@@ -689,7 +922,7 @@ static struct nfs4_state *nfs4_open_delegated(struct inode *inode, int flags, st
}
/*
- * Returns an nfs4_state + an referenced inode
+ * Returns a referenced nfs4_state
*/
static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
{
@@ -697,73 +930,46 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st
struct nfs4_state *state = NULL;
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_client *clp = server->nfs4_state;
- struct inode *inode = NULL;
+ struct nfs4_opendata *opendata;
int status;
- struct nfs_fattr f_attr, dir_attr;
- struct nfs_openargs o_arg = {
- .fh = NFS_FH(dir),
- .open_flags = flags,
- .name = &dentry->d_name,
- .server = server,
- .bitmask = server->attr_bitmask,
- .claim = NFS4_OPEN_CLAIM_NULL,
- };
- struct nfs_openres o_res = {
- .f_attr = &f_attr,
- .dir_attr = &dir_attr,
- .server = server,
- };
/* Protect against reboot recovery conflicts */
- down_read(&clp->cl_sem);
status = -ENOMEM;
if (!(sp = nfs4_get_state_owner(server, cred))) {
dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
goto out_err;
}
- if (flags & O_EXCL) {
- u32 *p = (u32 *) o_arg.u.verifier.data;
- p[0] = jiffies;
- p[1] = current->pid;
- } else
- o_arg.u.attrs = sattr;
- /* Serialization for the sequence id */
+ status = nfs4_recover_expired_lease(server);
+ if (status != 0)
+ goto err_put_state_owner;
+ down_read(&clp->cl_sem);
+ status = -ENOMEM;
+ opendata = nfs4_opendata_alloc(dentry, sp, flags, sattr);
+ if (opendata == NULL)
+ goto err_put_state_owner;
- o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
- if (o_arg.seqid == NULL)
- return -ENOMEM;
- nfs_fattr_init(&f_attr);
- nfs_fattr_init(&dir_attr);
- status = _nfs4_proc_open(dir, sp, &o_arg, &o_res);
+ status = _nfs4_proc_open(opendata);
if (status != 0)
- goto out_err;
+ goto err_opendata_free;
status = -ENOMEM;
- inode = nfs_fhget(dir->i_sb, &o_res.fh, &f_attr);
- if (!inode)
- goto out_err;
- state = nfs4_get_open_state(inode, sp);
- if (!state)
- goto out_err;
- update_open_stateid(state, &o_res.stateid, flags);
- if (o_res.delegation_type != 0)
- nfs_inode_set_delegation(inode, cred, &o_res);
- nfs_free_seqid(o_arg.seqid);
+ state = nfs4_opendata_to_nfs4_state(opendata);
+ if (state == NULL)
+ goto err_opendata_free;
+ if (opendata->o_res.delegation_type != 0)
+ nfs_inode_set_delegation(state->inode, cred, &opendata->o_res);
+ nfs4_opendata_free(opendata);
nfs4_put_state_owner(sp);
up_read(&clp->cl_sem);
*res = state;
return 0;
+err_opendata_free:
+ nfs4_opendata_free(opendata);
+err_put_state_owner:
+ nfs4_put_state_owner(sp);
out_err:
- if (sp != NULL) {
- if (state != NULL)
- nfs4_put_open_state(state);
- nfs_free_seqid(o_arg.seqid);
- nfs4_put_state_owner(sp);
- }
/* Note: clp->cl_sem must be released before nfs4_put_open_state()! */
up_read(&clp->cl_sem);
- if (inode != NULL)
- iput(inode);
*res = NULL;
return status;
}
@@ -830,6 +1036,7 @@ static int _nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
.rpc_argp = &arg,
.rpc_resp = &res,
};
+ unsigned long timestamp = jiffies;
int status;
nfs_fattr_init(fattr);
@@ -841,6 +1048,8 @@ static int _nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
status = rpc_call_sync(server->client, &msg, 0);
+ if (status == 0 && state != NULL)
+ renew_lease(server, timestamp);
return status;
}
@@ -865,12 +1074,13 @@ struct nfs4_closedata {
struct nfs_closeargs arg;
struct nfs_closeres res;
struct nfs_fattr fattr;
+ unsigned long timestamp;
};
-static void nfs4_free_closedata(struct nfs4_closedata *calldata)
+static void nfs4_free_closedata(void *data)
{
- struct nfs4_state *state = calldata->state;
- struct nfs4_state_owner *sp = state->owner;
+ struct nfs4_closedata *calldata = data;
+ struct nfs4_state_owner *sp = calldata->state->owner;
nfs4_put_open_state(calldata->state);
nfs_free_seqid(calldata->arg.seqid);
@@ -878,12 +1088,14 @@ static void nfs4_free_closedata(struct nfs4_closedata *calldata)
kfree(calldata);
}
-static void nfs4_close_done(struct rpc_task *task)
+static void nfs4_close_done(struct rpc_task *task, void *data)
{
- struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata;
+ struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
struct nfs_server *server = NFS_SERVER(calldata->inode);
+ if (RPC_ASSASSINATED(task))
+ return;
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
*/
@@ -892,6 +1104,7 @@ static void nfs4_close_done(struct rpc_task *task)
case 0:
memcpy(&state->stateid, &calldata->res.stateid,
sizeof(state->stateid));
+ renew_lease(server, calldata->timestamp);
break;
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
@@ -904,12 +1117,11 @@ static void nfs4_close_done(struct rpc_task *task)
}
}
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
- nfs4_free_closedata(calldata);
}
-static void nfs4_close_begin(struct rpc_task *task)
+static void nfs4_close_prepare(struct rpc_task *task, void *data)
{
- struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata;
+ struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
@@ -918,10 +1130,8 @@ static void nfs4_close_begin(struct rpc_task *task)
.rpc_cred = state->owner->so_cred,
};
int mode = 0, old_mode;
- int status;
- status = nfs_wait_on_sequence(calldata->arg.seqid, task);
- if (status != 0)
+ if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
/* Recalculate the new open mode in case someone reopened the file
* while we were waiting in line to be scheduled.
@@ -929,26 +1139,34 @@ static void nfs4_close_begin(struct rpc_task *task)
spin_lock(&state->owner->so_lock);
spin_lock(&calldata->inode->i_lock);
mode = old_mode = state->state;
- if (state->nreaders == 0)
- mode &= ~FMODE_READ;
- if (state->nwriters == 0)
- mode &= ~FMODE_WRITE;
+ if (state->n_rdwr == 0) {
+ if (state->n_rdonly == 0)
+ mode &= ~FMODE_READ;
+ if (state->n_wronly == 0)
+ mode &= ~FMODE_WRITE;
+ }
nfs4_state_set_mode_locked(state, mode);
spin_unlock(&calldata->inode->i_lock);
spin_unlock(&state->owner->so_lock);
if (mode == old_mode || test_bit(NFS_DELEGATED_STATE, &state->flags)) {
- nfs4_free_closedata(calldata);
- task->tk_exit = NULL;
- rpc_exit(task, 0);
+ /* Note: exit _without_ calling nfs4_close_done */
+ task->tk_action = NULL;
return;
}
nfs_fattr_init(calldata->res.fattr);
if (mode != 0)
msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
calldata->arg.open_flags = mode;
+ calldata->timestamp = jiffies;
rpc_call_setup(task, &msg, 0);
}
+static const struct rpc_call_ops nfs4_close_ops = {
+ .rpc_call_prepare = nfs4_close_prepare,
+ .rpc_call_done = nfs4_close_done,
+ .rpc_release = nfs4_free_closedata,
+};
+
/*
* It is possible for data to be read/written from a mem-mapped file
* after the sys_close call (which hits the vfs layer as a flush).
@@ -981,8 +1199,7 @@ int nfs4_do_close(struct inode *inode, struct nfs4_state *state)
calldata->res.fattr = &calldata->fattr;
calldata->res.server = server;
- status = nfs4_call_async(server->client, nfs4_close_begin,
- nfs4_close_done, calldata);
+ status = nfs4_call_async(server->client, &nfs4_close_ops, calldata);
if (status == 0)
goto out;
@@ -1034,7 +1251,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
d_add(dentry, NULL);
return (struct dentry *)state;
}
- res = d_add_unique(dentry, state->inode);
+ res = d_add_unique(dentry, igrab(state->inode));
if (res != NULL)
dentry = res;
nfs4_intent_set_file(nd, dentry, state);
@@ -1046,7 +1263,6 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st
{
struct rpc_cred *cred;
struct nfs4_state *state;
- struct inode *inode;
cred = rpcauth_lookupcred(NFS_SERVER(dir)->client->cl_auth, 0);
if (IS_ERR(cred))
@@ -1070,9 +1286,7 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st
}
goto out_drop;
}
- inode = state->inode;
- iput(inode);
- if (inode == dentry->d_inode) {
+ if (state->inode == dentry->d_inode) {
nfs4_intent_set_file(nd, dentry, state);
return 1;
}
@@ -1508,11 +1722,13 @@ static int _nfs4_proc_write(struct nfs_write_data *wdata)
wdata->args.bitmask = server->attr_bitmask;
wdata->res.server = server;
+ wdata->timestamp = jiffies;
nfs_fattr_init(fattr);
status = rpc_call_sync(server->client, &msg, rpcflags);
dprintk("NFS reply write: %d\n", status);
if (status < 0)
return status;
+ renew_lease(server, wdata->timestamp);
nfs_post_op_update_inode(inode, fattr);
return wdata->res.count;
}
@@ -1547,8 +1763,11 @@ static int _nfs4_proc_commit(struct nfs_write_data *cdata)
cdata->args.bitmask = server->attr_bitmask;
cdata->res.server = server;
+ cdata->timestamp = jiffies;
nfs_fattr_init(fattr);
status = rpc_call_sync(server->client, &msg, 0);
+ if (status >= 0)
+ renew_lease(server, cdata->timestamp);
dprintk("NFS reply commit: %d\n", status);
if (status >= 0)
nfs_post_op_update_inode(inode, fattr);
@@ -1601,7 +1820,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
status = PTR_ERR(state);
goto out;
}
- d_instantiate(dentry, state->inode);
+ d_instantiate(dentry, igrab(state->inode));
if (flags & O_EXCL) {
struct nfs_fattr fattr;
status = nfs4_do_setattr(NFS_SERVER(dir), &fattr,
@@ -2125,10 +2344,9 @@ static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
return err;
}
-static void
-nfs4_read_done(struct rpc_task *task)
+static void nfs4_read_done(struct rpc_task *task, void *calldata)
{
- struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
+ struct nfs_read_data *data = calldata;
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
@@ -2138,9 +2356,14 @@ nfs4_read_done(struct rpc_task *task)
if (task->tk_status > 0)
renew_lease(NFS_SERVER(inode), data->timestamp);
/* Call back common NFS readpage processing */
- nfs_readpage_result(task);
+ nfs_readpage_result(task, calldata);
}
+static const struct rpc_call_ops nfs4_read_ops = {
+ .rpc_call_done = nfs4_read_done,
+ .rpc_release = nfs_readdata_release,
+};
+
static void
nfs4_proc_read_setup(struct nfs_read_data *data)
{
@@ -2160,14 +2383,13 @@ nfs4_proc_read_setup(struct nfs_read_data *data)
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
/* Finalize the task. */
- rpc_init_task(task, NFS_CLIENT(inode), nfs4_read_done, flags);
+ rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs4_read_ops, data);
rpc_call_setup(task, &msg, 0);
}
-static void
-nfs4_write_done(struct rpc_task *task)
+static void nfs4_write_done(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+ struct nfs_write_data *data = calldata;
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
@@ -2179,9 +2401,14 @@ nfs4_write_done(struct rpc_task *task)
nfs_post_op_update_inode(inode, data->res.fattr);
}
/* Call back common NFS writeback processing */
- nfs_writeback_done(task);
+ nfs_writeback_done(task, calldata);
}
+static const struct rpc_call_ops nfs4_write_ops = {
+ .rpc_call_done = nfs4_write_done,
+ .rpc_release = nfs_writedata_release,
+};
+
static void
nfs4_proc_write_setup(struct nfs_write_data *data, int how)
{
@@ -2214,14 +2441,13 @@ nfs4_proc_write_setup(struct nfs_write_data *data, int how)
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
/* Finalize the task. */
- rpc_init_task(task, NFS_CLIENT(inode), nfs4_write_done, flags);
+ rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs4_write_ops, data);
rpc_call_setup(task, &msg, 0);
}
-static void
-nfs4_commit_done(struct rpc_task *task)
+static void nfs4_commit_done(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+ struct nfs_write_data *data = calldata;
struct inode *inode = data->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) {
@@ -2231,9 +2457,14 @@ nfs4_commit_done(struct rpc_task *task)
if (task->tk_status >= 0)
nfs_post_op_update_inode(inode, data->res.fattr);
/* Call back common NFS writeback processing */
- nfs_commit_done(task);
+ nfs_commit_done(task, calldata);
}
+static const struct rpc_call_ops nfs4_commit_ops = {
+ .rpc_call_done = nfs4_commit_done,
+ .rpc_release = nfs_commit_release,
+};
+
static void
nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
{
@@ -2255,7 +2486,7 @@ nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
/* Finalize the task. */
- rpc_init_task(task, NFS_CLIENT(inode), nfs4_commit_done, flags);
+ rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs4_commit_ops, data);
rpc_call_setup(task, &msg, 0);
}
@@ -2263,11 +2494,10 @@ nfs4_proc_commit_setup(struct nfs_write_data *data, int how)
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
* standalone procedure for queueing an asynchronous RENEW.
*/
-static void
-renew_done(struct rpc_task *task)
+static void nfs4_renew_done(struct rpc_task *task, void *data)
{
struct nfs4_client *clp = (struct nfs4_client *)task->tk_msg.rpc_argp;
- unsigned long timestamp = (unsigned long)task->tk_calldata;
+ unsigned long timestamp = (unsigned long)data;
if (task->tk_status < 0) {
switch (task->tk_status) {
@@ -2284,26 +2514,28 @@ renew_done(struct rpc_task *task)
spin_unlock(&clp->cl_lock);
}
-int
-nfs4_proc_async_renew(struct nfs4_client *clp)
+static const struct rpc_call_ops nfs4_renew_ops = {
+ .rpc_call_done = nfs4_renew_done,
+};
+
+int nfs4_proc_async_renew(struct nfs4_client *clp, struct rpc_cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
- .rpc_cred = clp->cl_cred,
+ .rpc_cred = cred,
};
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
- renew_done, (void *)jiffies);
+ &nfs4_renew_ops, (void *)jiffies);
}
-int
-nfs4_proc_renew(struct nfs4_client *clp)
+int nfs4_proc_renew(struct nfs4_client *clp, struct rpc_cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
- .rpc_cred = clp->cl_cred,
+ .rpc_cred = cred,
};
unsigned long now = jiffies;
int status;
@@ -2519,7 +2751,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
case -NFS4ERR_EXPIRED:
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL, NULL);
nfs4_schedule_state_recovery(clp);
- if (test_bit(NFS4CLNT_OK, &clp->cl_state))
+ if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
rpc_wake_up_task(task);
task->tk_status = 0;
return -EAGAIN;
@@ -2536,25 +2768,25 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
return 0;
}
+static int nfs4_wait_bit_interruptible(void *word)
+{
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ schedule();
+ return 0;
+}
+
static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs4_client *clp)
{
- DEFINE_WAIT(wait);
sigset_t oldset;
- int interruptible, res = 0;
+ int res;
might_sleep();
rpc_clnt_sigmask(clnt, &oldset);
- interruptible = TASK_UNINTERRUPTIBLE;
- if (clnt->cl_intr)
- interruptible = TASK_INTERRUPTIBLE;
- prepare_to_wait(&clp->cl_waitq, &wait, interruptible);
- nfs4_schedule_state_recovery(clp);
- if (clnt->cl_intr && signalled())
- res = -ERESTARTSYS;
- else if (!test_bit(NFS4CLNT_OK, &clp->cl_state))
- schedule();
- finish_wait(&clp->cl_waitq, &wait);
+ res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
+ nfs4_wait_bit_interruptible,
+ TASK_INTERRUPTIBLE);
rpc_clnt_sigunmask(clnt, &oldset);
return res;
}
@@ -2597,6 +2829,7 @@ int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
+ nfs4_schedule_state_recovery(clp);
ret = nfs4_wait_clnt_recover(server->client, clp);
if (ret == 0)
exception->retry = 1;
@@ -2613,7 +2846,7 @@ int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct
return nfs4_map_errors(ret);
}
-int nfs4_proc_setclientid(struct nfs4_client *clp, u32 program, unsigned short port)
+int nfs4_proc_setclientid(struct nfs4_client *clp, u32 program, unsigned short port, struct rpc_cred *cred)
{
nfs4_verifier sc_verifier;
struct nfs4_setclientid setclientid = {
@@ -2624,7 +2857,7 @@ int nfs4_proc_setclientid(struct nfs4_client *clp, u32 program, unsigned short p
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
.rpc_argp = &setclientid,
.rpc_resp = clp,
- .rpc_cred = clp->cl_cred,
+ .rpc_cred = cred,
};
u32 *p;
int loop = 0;
@@ -2638,7 +2871,7 @@ int nfs4_proc_setclientid(struct nfs4_client *clp, u32 program, unsigned short p
setclientid.sc_name_len = scnprintf(setclientid.sc_name,
sizeof(setclientid.sc_name), "%s/%u.%u.%u.%u %s %u",
clp->cl_ipaddr, NIPQUAD(clp->cl_addr.s_addr),
- clp->cl_cred->cr_ops->cr_name,
+ cred->cr_ops->cr_name,
clp->cl_id_uniquifier);
setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
sizeof(setclientid.sc_netid), "tcp");
@@ -2661,14 +2894,14 @@ int nfs4_proc_setclientid(struct nfs4_client *clp, u32 program, unsigned short p
}
int
-nfs4_proc_setclientid_confirm(struct nfs4_client *clp)
+nfs4_proc_setclientid_confirm(struct nfs4_client *clp, struct rpc_cred *cred)
{
struct nfs_fsinfo fsinfo;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
.rpc_argp = clp,
.rpc_resp = &fsinfo,
- .rpc_cred = clp->cl_cred,
+ .rpc_cred = cred,
};
unsigned long now;
int status;
@@ -2679,24 +2912,92 @@ nfs4_proc_setclientid_confirm(struct nfs4_client *clp)
spin_lock(&clp->cl_lock);
clp->cl_lease_time = fsinfo.lease_time * HZ;
clp->cl_last_renewal = now;
+ clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
spin_unlock(&clp->cl_lock);
}
return status;
}
-static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid)
+struct nfs4_delegreturndata {
+ struct nfs4_delegreturnargs args;
+ struct nfs4_delegreturnres res;
+ struct nfs_fh fh;
+ nfs4_stateid stateid;
+ struct rpc_cred *cred;
+ unsigned long timestamp;
+ struct nfs_fattr fattr;
+ int rpc_status;
+};
+
+static void nfs4_delegreturn_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs4_delegreturnargs args = {
- .fhandle = NFS_FH(inode),
- .stateid = stateid,
- };
+ struct nfs4_delegreturndata *data = calldata;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
- .rpc_argp = &args,
- .rpc_cred = cred,
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = data->cred,
};
+ nfs_fattr_init(data->res.fattr);
+ rpc_call_setup(task, &msg, 0);
+}
- return rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_delegreturndata *data = calldata;
+ data->rpc_status = task->tk_status;
+ if (data->rpc_status == 0)
+ renew_lease(data->res.server, data->timestamp);
+}
+
+static void nfs4_delegreturn_release(void *calldata)
+{
+ struct nfs4_delegreturndata *data = calldata;
+
+ put_rpccred(data->cred);
+ kfree(calldata);
+}
+
+const static struct rpc_call_ops nfs4_delegreturn_ops = {
+ .rpc_call_prepare = nfs4_delegreturn_prepare,
+ .rpc_call_done = nfs4_delegreturn_done,
+ .rpc_release = nfs4_delegreturn_release,
+};
+
+static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid)
+{
+ struct nfs4_delegreturndata *data;
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct rpc_task *task;
+ int status;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+ data->args.fhandle = &data->fh;
+ data->args.stateid = &data->stateid;
+ data->args.bitmask = server->attr_bitmask;
+ nfs_copy_fh(&data->fh, NFS_FH(inode));
+ memcpy(&data->stateid, stateid, sizeof(data->stateid));
+ data->res.fattr = &data->fattr;
+ data->res.server = server;
+ data->cred = get_rpccred(cred);
+ data->timestamp = jiffies;
+ data->rpc_status = 0;
+
+ task = rpc_run_task(NFS_CLIENT(inode), RPC_TASK_ASYNC, &nfs4_delegreturn_ops, data);
+ if (IS_ERR(task)) {
+ nfs4_delegreturn_release(data);
+ return PTR_ERR(task);
+ }
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status == 0) {
+ status = data->rpc_status;
+ if (status == 0)
+ nfs_post_op_update_inode(inode, &data->fattr);
+ }
+ rpc_release_task(task);
+ return status;
}
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid)
@@ -2734,43 +3035,17 @@ nfs4_set_lock_task_retry(unsigned long timeout)
return timeout;
}
-static inline int
-nfs4_lck_type(int cmd, struct file_lock *request)
-{
- /* set lock type */
- switch (request->fl_type) {
- case F_RDLCK:
- return IS_SETLKW(cmd) ? NFS4_READW_LT : NFS4_READ_LT;
- case F_WRLCK:
- return IS_SETLKW(cmd) ? NFS4_WRITEW_LT : NFS4_WRITE_LT;
- case F_UNLCK:
- return NFS4_WRITE_LT;
- }
- BUG();
- return 0;
-}
-
-static inline uint64_t
-nfs4_lck_length(struct file_lock *request)
-{
- if (request->fl_end == OFFSET_MAX)
- return ~(uint64_t)0;
- return request->fl_end - request->fl_start + 1;
-}
-
static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct inode *inode = state->inode;
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_client *clp = server->nfs4_state;
- struct nfs_lockargs arg = {
+ struct nfs_lockt_args arg = {
.fh = NFS_FH(inode),
- .type = nfs4_lck_type(cmd, request),
- .offset = request->fl_start,
- .length = nfs4_lck_length(request),
+ .fl = request,
};
- struct nfs_lockres res = {
- .server = server,
+ struct nfs_lockt_res res = {
+ .denied = request,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
@@ -2778,36 +3053,23 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
.rpc_resp = &res,
.rpc_cred = state->owner->so_cred,
};
- struct nfs_lowner nlo;
struct nfs4_lock_state *lsp;
int status;
down_read(&clp->cl_sem);
- nlo.clientid = clp->cl_clientid;
+ arg.lock_owner.clientid = clp->cl_clientid;
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
lsp = request->fl_u.nfs4_fl.owner;
- nlo.id = lsp->ls_id;
- arg.u.lockt = &nlo;
+ arg.lock_owner.id = lsp->ls_id;
status = rpc_call_sync(server->client, &msg, 0);
- if (!status) {
- request->fl_type = F_UNLCK;
- } else if (status == -NFS4ERR_DENIED) {
- int64_t len, start, end;
- start = res.u.denied.offset;
- len = res.u.denied.length;
- end = start + len - 1;
- if (end < 0 || len == 0)
- request->fl_end = OFFSET_MAX;
- else
- request->fl_end = (loff_t)end;
- request->fl_start = (loff_t)start;
- request->fl_type = F_WRLCK;
- if (res.u.denied.type & 1)
- request->fl_type = F_RDLCK;
- request->fl_pid = 0;
- status = 0;
+ switch (status) {
+ case 0:
+ request->fl_type = F_UNLCK;
+ break;
+ case -NFS4ERR_DENIED:
+ status = 0;
}
out:
up_read(&clp->cl_sem);
@@ -2847,196 +3109,314 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
}
struct nfs4_unlockdata {
- struct nfs_lockargs arg;
- struct nfs_locku_opargs luargs;
- struct nfs_lockres res;
+ struct nfs_locku_args arg;
+ struct nfs_locku_res res;
struct nfs4_lock_state *lsp;
struct nfs_open_context *ctx;
- atomic_t refcount;
- struct completion completion;
+ struct file_lock fl;
+ const struct nfs_server *server;
+ unsigned long timestamp;
};
-static void nfs4_locku_release_calldata(struct nfs4_unlockdata *calldata)
-{
- if (atomic_dec_and_test(&calldata->refcount)) {
- nfs_free_seqid(calldata->luargs.seqid);
- nfs4_put_lock_state(calldata->lsp);
- put_nfs_open_context(calldata->ctx);
- kfree(calldata);
- }
+static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
+ struct nfs_open_context *ctx,
+ struct nfs4_lock_state *lsp,
+ struct nfs_seqid *seqid)
+{
+ struct nfs4_unlockdata *p;
+ struct inode *inode = lsp->ls_state->inode;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL)
+ return NULL;
+ p->arg.fh = NFS_FH(inode);
+ p->arg.fl = &p->fl;
+ p->arg.seqid = seqid;
+ p->arg.stateid = &lsp->ls_stateid;
+ p->lsp = lsp;
+ atomic_inc(&lsp->ls_count);
+ /* Ensure we don't close file until we're done freeing locks! */
+ p->ctx = get_nfs_open_context(ctx);
+ memcpy(&p->fl, fl, sizeof(p->fl));
+ p->server = NFS_SERVER(inode);
+ return p;
}
-static void nfs4_locku_complete(struct nfs4_unlockdata *calldata)
+static void nfs4_locku_release_calldata(void *data)
{
- complete(&calldata->completion);
- nfs4_locku_release_calldata(calldata);
+ struct nfs4_unlockdata *calldata = data;
+ nfs_free_seqid(calldata->arg.seqid);
+ nfs4_put_lock_state(calldata->lsp);
+ put_nfs_open_context(calldata->ctx);
+ kfree(calldata);
}
-static void nfs4_locku_done(struct rpc_task *task)
+static void nfs4_locku_done(struct rpc_task *task, void *data)
{
- struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata;
+ struct nfs4_unlockdata *calldata = data;
- nfs_increment_lock_seqid(task->tk_status, calldata->luargs.seqid);
+ if (RPC_ASSASSINATED(task))
+ return;
+ nfs_increment_lock_seqid(task->tk_status, calldata->arg.seqid);
switch (task->tk_status) {
case 0:
memcpy(calldata->lsp->ls_stateid.data,
- calldata->res.u.stateid.data,
+ calldata->res.stateid.data,
sizeof(calldata->lsp->ls_stateid.data));
+ renew_lease(calldata->server, calldata->timestamp);
break;
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
- nfs4_schedule_state_recovery(calldata->res.server->nfs4_state);
+ nfs4_schedule_state_recovery(calldata->server->nfs4_state);
break;
default:
- if (nfs4_async_handle_error(task, calldata->res.server) == -EAGAIN) {
+ if (nfs4_async_handle_error(task, calldata->server) == -EAGAIN) {
rpc_restart_call(task);
- return;
}
}
- nfs4_locku_complete(calldata);
}
-static void nfs4_locku_begin(struct rpc_task *task)
+static void nfs4_locku_prepare(struct rpc_task *task, void *data)
{
- struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata;
+ struct nfs4_unlockdata *calldata = data;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
.rpc_argp = &calldata->arg,
.rpc_resp = &calldata->res,
.rpc_cred = calldata->lsp->ls_state->owner->so_cred,
};
- int status;
- status = nfs_wait_on_sequence(calldata->luargs.seqid, task);
- if (status != 0)
+ if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
return;
if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
- nfs4_locku_complete(calldata);
- task->tk_exit = NULL;
- rpc_exit(task, 0);
+ /* Note: exit _without_ running nfs4_locku_done */
+ task->tk_action = NULL;
return;
}
+ calldata->timestamp = jiffies;
rpc_call_setup(task, &msg, 0);
}
+static const struct rpc_call_ops nfs4_locku_ops = {
+ .rpc_call_prepare = nfs4_locku_prepare,
+ .rpc_call_done = nfs4_locku_done,
+ .rpc_release = nfs4_locku_release_calldata,
+};
+
+static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
+ struct nfs_open_context *ctx,
+ struct nfs4_lock_state *lsp,
+ struct nfs_seqid *seqid)
+{
+ struct nfs4_unlockdata *data;
+ struct rpc_task *task;
+
+ data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
+ if (data == NULL) {
+ nfs_free_seqid(seqid);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Unlock _before_ we do the RPC call */
+ do_vfs_lock(fl->fl_file, fl);
+ task = rpc_run_task(NFS_CLIENT(lsp->ls_state->inode), RPC_TASK_ASYNC, &nfs4_locku_ops, data);
+ if (IS_ERR(task))
+ nfs4_locku_release_calldata(data);
+ return task;
+}
+
static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
{
- struct nfs4_unlockdata *calldata;
- struct inode *inode = state->inode;
- struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_seqid *seqid;
struct nfs4_lock_state *lsp;
- int status;
+ struct rpc_task *task;
+ int status = 0;
/* Is this a delegated lock? */
if (test_bit(NFS_DELEGATED_STATE, &state->flags))
- return do_vfs_lock(request->fl_file, request);
+ goto out_unlock;
+ /* Is this open_owner holding any locks on the server? */
+ if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
+ goto out_unlock;
status = nfs4_set_lock_state(state, request);
if (status != 0)
- return status;
+ goto out_unlock;
lsp = request->fl_u.nfs4_fl.owner;
- /* We might have lost the locks! */
- if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0)
- return 0;
- calldata = kmalloc(sizeof(*calldata), GFP_KERNEL);
- if (calldata == NULL)
- return -ENOMEM;
- calldata->luargs.seqid = nfs_alloc_seqid(&lsp->ls_seqid);
- if (calldata->luargs.seqid == NULL) {
- kfree(calldata);
- return -ENOMEM;
- }
- calldata->luargs.stateid = &lsp->ls_stateid;
- calldata->arg.fh = NFS_FH(inode);
- calldata->arg.type = nfs4_lck_type(cmd, request);
- calldata->arg.offset = request->fl_start;
- calldata->arg.length = nfs4_lck_length(request);
- calldata->arg.u.locku = &calldata->luargs;
- calldata->res.server = server;
- calldata->lsp = lsp;
- atomic_inc(&lsp->ls_count);
-
- /* Ensure we don't close file until we're done freeing locks! */
- calldata->ctx = get_nfs_open_context((struct nfs_open_context*)request->fl_file->private_data);
-
- atomic_set(&calldata->refcount, 2);
- init_completion(&calldata->completion);
-
- status = nfs4_call_async(NFS_SERVER(inode)->client, nfs4_locku_begin,
- nfs4_locku_done, calldata);
- if (status == 0)
- wait_for_completion_interruptible(&calldata->completion);
+ status = -ENOMEM;
+ seqid = nfs_alloc_seqid(&lsp->ls_seqid);
+ if (seqid == NULL)
+ goto out_unlock;
+ task = nfs4_do_unlck(request, request->fl_file->private_data, lsp, seqid);
+ status = PTR_ERR(task);
+ if (IS_ERR(task))
+ goto out_unlock;
+ status = nfs4_wait_for_completion_rpc_task(task);
+ rpc_release_task(task);
+ return status;
+out_unlock:
do_vfs_lock(request->fl_file, request);
- nfs4_locku_release_calldata(calldata);
return status;
}
-static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *request, int reclaim)
+struct nfs4_lockdata {
+ struct nfs_lock_args arg;
+ struct nfs_lock_res res;
+ struct nfs4_lock_state *lsp;
+ struct nfs_open_context *ctx;
+ struct file_lock fl;
+ unsigned long timestamp;
+ int rpc_status;
+ int cancelled;
+};
+
+static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
+ struct nfs_open_context *ctx, struct nfs4_lock_state *lsp)
{
- struct inode *inode = state->inode;
+ struct nfs4_lockdata *p;
+ struct inode *inode = lsp->ls_state->inode;
struct nfs_server *server = NFS_SERVER(inode);
- struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
- struct nfs_lock_opargs largs = {
- .lock_stateid = &lsp->ls_stateid,
- .open_stateid = &state->stateid,
- .lock_owner = {
- .clientid = server->nfs4_state->cl_clientid,
- .id = lsp->ls_id,
- },
- .reclaim = reclaim,
- };
- struct nfs_lockargs arg = {
- .fh = NFS_FH(inode),
- .type = nfs4_lck_type(cmd, request),
- .offset = request->fl_start,
- .length = nfs4_lck_length(request),
- .u = {
- .lock = &largs,
- },
- };
- struct nfs_lockres res = {
- .server = server,
- };
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL)
+ return NULL;
+
+ p->arg.fh = NFS_FH(inode);
+ p->arg.fl = &p->fl;
+ p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid);
+ if (p->arg.lock_seqid == NULL)
+ goto out_free;
+ p->arg.lock_stateid = &lsp->ls_stateid;
+ p->arg.lock_owner.clientid = server->nfs4_state->cl_clientid;
+ p->arg.lock_owner.id = lsp->ls_id;
+ p->lsp = lsp;
+ atomic_inc(&lsp->ls_count);
+ p->ctx = get_nfs_open_context(ctx);
+ memcpy(&p->fl, fl, sizeof(p->fl));
+ return p;
+out_free:
+ kfree(p);
+ return NULL;
+}
+
+static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_lockdata *data = calldata;
+ struct nfs4_state *state = data->lsp->ls_state;
+ struct nfs4_state_owner *sp = state->owner;
struct rpc_message msg = {
- .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
- .rpc_argp = &arg,
- .rpc_resp = &res,
- .rpc_cred = state->owner->so_cred,
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
+ .rpc_argp = &data->arg,
+ .rpc_resp = &data->res,
+ .rpc_cred = sp->so_cred,
};
- int status = -ENOMEM;
-
- largs.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid);
- if (largs.lock_seqid == NULL)
- return -ENOMEM;
- if (!(lsp->ls_seqid.flags & NFS_SEQID_CONFIRMED)) {
- struct nfs4_state_owner *owner = state->owner;
- largs.open_seqid = nfs_alloc_seqid(&owner->so_seqid);
- if (largs.open_seqid == NULL)
+ if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
+ return;
+ dprintk("%s: begin!\n", __FUNCTION__);
+ /* Do we need to do an open_to_lock_owner? */
+ if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
+ data->arg.open_seqid = nfs_alloc_seqid(&sp->so_seqid);
+ if (data->arg.open_seqid == NULL) {
+ data->rpc_status = -ENOMEM;
+ task->tk_action = NULL;
goto out;
- largs.new_lock_owner = 1;
- status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
- /* increment open seqid on success, and seqid mutating errors */
- if (largs.new_lock_owner != 0) {
- nfs_increment_open_seqid(status, largs.open_seqid);
- if (status == 0)
- nfs_confirm_seqid(&lsp->ls_seqid, 0);
}
- nfs_free_seqid(largs.open_seqid);
- } else
- status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
- /* increment lock seqid on success, and seqid mutating errors*/
- nfs_increment_lock_seqid(status, largs.lock_seqid);
- /* save the returned stateid. */
- if (status == 0) {
- memcpy(lsp->ls_stateid.data, res.u.stateid.data,
- sizeof(lsp->ls_stateid.data));
- lsp->ls_flags |= NFS_LOCK_INITIALIZED;
- } else if (status == -NFS4ERR_DENIED)
- status = -EAGAIN;
+ data->arg.open_stateid = &state->stateid;
+ data->arg.new_lock_owner = 1;
+ }
+ data->timestamp = jiffies;
+ rpc_call_setup(task, &msg, 0);
out:
- nfs_free_seqid(largs.lock_seqid);
- return status;
+ dprintk("%s: done!, ret = %d\n", __FUNCTION__, data->rpc_status);
+}
+
+static void nfs4_lock_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_lockdata *data = calldata;
+
+ dprintk("%s: begin!\n", __FUNCTION__);
+
+ data->rpc_status = task->tk_status;
+ if (RPC_ASSASSINATED(task))
+ goto out;
+ if (data->arg.new_lock_owner != 0) {
+ nfs_increment_open_seqid(data->rpc_status, data->arg.open_seqid);
+ if (data->rpc_status == 0)
+ nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
+ else
+ goto out;
+ }
+ if (data->rpc_status == 0) {
+ memcpy(data->lsp->ls_stateid.data, data->res.stateid.data,
+ sizeof(data->lsp->ls_stateid.data));
+ data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
+ renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
+ }
+ nfs_increment_lock_seqid(data->rpc_status, data->arg.lock_seqid);
+out:
+ dprintk("%s: done, ret = %d!\n", __FUNCTION__, data->rpc_status);
+}
+
+static void nfs4_lock_release(void *calldata)
+{
+ struct nfs4_lockdata *data = calldata;
+
+ dprintk("%s: begin!\n", __FUNCTION__);
+ if (data->arg.open_seqid != NULL)
+ nfs_free_seqid(data->arg.open_seqid);
+ if (data->cancelled != 0) {
+ struct rpc_task *task;
+ task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
+ data->arg.lock_seqid);
+ if (!IS_ERR(task))
+ rpc_release_task(task);
+ dprintk("%s: cancelling lock!\n", __FUNCTION__);
+ } else
+ nfs_free_seqid(data->arg.lock_seqid);
+ nfs4_put_lock_state(data->lsp);
+ put_nfs_open_context(data->ctx);
+ kfree(data);
+ dprintk("%s: done!\n", __FUNCTION__);
+}
+
+static const struct rpc_call_ops nfs4_lock_ops = {
+ .rpc_call_prepare = nfs4_lock_prepare,
+ .rpc_call_done = nfs4_lock_done,
+ .rpc_release = nfs4_lock_release,
+};
+
+static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int reclaim)
+{
+ struct nfs4_lockdata *data;
+ struct rpc_task *task;
+ int ret;
+
+ dprintk("%s: begin!\n", __FUNCTION__);
+ data = nfs4_alloc_lockdata(fl, fl->fl_file->private_data,
+ fl->fl_u.nfs4_fl.owner);
+ if (data == NULL)
+ return -ENOMEM;
+ if (IS_SETLKW(cmd))
+ data->arg.block = 1;
+ if (reclaim != 0)
+ data->arg.reclaim = 1;
+ task = rpc_run_task(NFS_CLIENT(state->inode), RPC_TASK_ASYNC,
+ &nfs4_lock_ops, data);
+ if (IS_ERR(task)) {
+ nfs4_lock_release(data);
+ return PTR_ERR(task);
+ }
+ ret = nfs4_wait_for_completion_rpc_task(task);
+ if (ret == 0) {
+ ret = data->rpc_status;
+ if (ret == -NFS4ERR_DENIED)
+ ret = -EAGAIN;
+ } else
+ data->cancelled = 1;
+ rpc_release_task(task);
+ dprintk("%s: done, ret = %d!\n", __FUNCTION__, ret);
+ return ret;
}
static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index a3001628ad3..5d764d8e6d8 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -54,6 +54,7 @@
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
+#include "delegation.h"
#define NFSDBG_FACILITY NFSDBG_PROC
@@ -61,6 +62,7 @@ void
nfs4_renew_state(void *data)
{
struct nfs4_client *clp = (struct nfs4_client *)data;
+ struct rpc_cred *cred;
long lease, timeout;
unsigned long last, now;
@@ -68,7 +70,7 @@ nfs4_renew_state(void *data)
dprintk("%s: start\n", __FUNCTION__);
/* Are there any active superblocks? */
if (list_empty(&clp->cl_superblocks))
- goto out;
+ goto out;
spin_lock(&clp->cl_lock);
lease = clp->cl_lease_time;
last = clp->cl_last_renewal;
@@ -76,9 +78,17 @@ nfs4_renew_state(void *data)
timeout = (2 * lease) / 3 + (long)last - (long)now;
/* Are we close to a lease timeout? */
if (time_after(now, last + lease/3)) {
+ cred = nfs4_get_renew_cred(clp);
+ if (cred == NULL) {
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ spin_unlock(&clp->cl_lock);
+ nfs_expire_all_delegations(clp);
+ goto out;
+ }
spin_unlock(&clp->cl_lock);
/* Queue an asynchronous RENEW. */
- nfs4_proc_async_renew(clp);
+ nfs4_proc_async_renew(clp, cred);
+ put_rpccred(cred);
timeout = (2 * lease) / 3;
spin_lock(&clp->cl_lock);
} else
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5ef4c57618f..afad0255e7d 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -43,6 +43,8 @@
#include <linux/smp_lock.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
@@ -57,8 +59,6 @@ const nfs4_stateid zero_stateid;
static DEFINE_SPINLOCK(state_spinlock);
static LIST_HEAD(nfs4_clientid_list);
-static void nfs4_recover_state(void *);
-
void
init_nfsv4_state(struct nfs_server *server)
{
@@ -91,11 +91,10 @@ nfs4_alloc_client(struct in_addr *addr)
if (nfs_callback_up() < 0)
return NULL;
- if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
+ if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
nfs_callback_down();
return NULL;
}
- memset(clp, 0, sizeof(*clp));
memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
init_rwsem(&clp->cl_sem);
INIT_LIST_HEAD(&clp->cl_delegations);
@@ -103,14 +102,12 @@ nfs4_alloc_client(struct in_addr *addr)
INIT_LIST_HEAD(&clp->cl_unused);
spin_lock_init(&clp->cl_lock);
atomic_set(&clp->cl_count, 1);
- INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
INIT_LIST_HEAD(&clp->cl_superblocks);
- init_waitqueue_head(&clp->cl_waitq);
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
clp->cl_rpcclient = ERR_PTR(-EINVAL);
clp->cl_boot_time = CURRENT_TIME;
- clp->cl_state = 1 << NFS4CLNT_OK;
+ clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
return clp;
}
@@ -127,8 +124,6 @@ nfs4_free_client(struct nfs4_client *clp)
kfree(sp);
}
BUG_ON(!list_empty(&clp->cl_state_owners));
- if (clp->cl_cred)
- put_rpccred(clp->cl_cred);
nfs_idmap_delete(clp);
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
@@ -193,27 +188,22 @@ nfs4_put_client(struct nfs4_client *clp)
list_del(&clp->cl_servers);
spin_unlock(&state_spinlock);
BUG_ON(!list_empty(&clp->cl_superblocks));
- wake_up_all(&clp->cl_waitq);
rpc_wake_up(&clp->cl_rpcwaitq);
nfs4_kill_renewd(clp);
nfs4_free_client(clp);
}
-static int __nfs4_init_client(struct nfs4_client *clp)
+static int nfs4_init_client(struct nfs4_client *clp, struct rpc_cred *cred)
{
- int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
+ int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
+ nfs_callback_tcpport, cred);
if (status == 0)
- status = nfs4_proc_setclientid_confirm(clp);
+ status = nfs4_proc_setclientid_confirm(clp, cred);
if (status == 0)
nfs4_schedule_state_renewal(clp);
return status;
}
-int nfs4_init_client(struct nfs4_client *clp)
-{
- return nfs4_map_errors(__nfs4_init_client(clp));
-}
-
u32
nfs4_alloc_lockowner_id(struct nfs4_client *clp)
{
@@ -235,6 +225,32 @@ nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
return sp;
}
+struct rpc_cred *nfs4_get_renew_cred(struct nfs4_client *clp)
+{
+ struct nfs4_state_owner *sp;
+ struct rpc_cred *cred = NULL;
+
+ list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
+ if (list_empty(&sp->so_states))
+ continue;
+ cred = get_rpccred(sp->so_cred);
+ break;
+ }
+ return cred;
+}
+
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs4_client *clp)
+{
+ struct nfs4_state_owner *sp;
+
+ if (!list_empty(&clp->cl_state_owners)) {
+ sp = list_entry(clp->cl_state_owners.next,
+ struct nfs4_state_owner, so_list);
+ return get_rpccred(sp->so_cred);
+ }
+ return NULL;
+}
+
static struct nfs4_state_owner *
nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
{
@@ -349,14 +365,9 @@ nfs4_alloc_open_state(void)
{
struct nfs4_state *state;
- state = kmalloc(sizeof(*state), GFP_KERNEL);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
- state->state = 0;
- state->nreaders = 0;
- state->nwriters = 0;
- state->flags = 0;
- memset(state->stateid.data, 0, sizeof(state->stateid.data));
atomic_set(&state->count, 1);
INIT_LIST_HEAD(&state->lock_states);
spin_lock_init(&state->state_lock);
@@ -475,15 +486,23 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode)
/* Protect against nfs4_find_state() */
spin_lock(&owner->so_lock);
spin_lock(&inode->i_lock);
- if (mode & FMODE_READ)
- state->nreaders--;
- if (mode & FMODE_WRITE)
- state->nwriters--;
+ switch (mode & (FMODE_READ | FMODE_WRITE)) {
+ case FMODE_READ:
+ state->n_rdonly--;
+ break;
+ case FMODE_WRITE:
+ state->n_wronly--;
+ break;
+ case FMODE_READ|FMODE_WRITE:
+ state->n_rdwr--;
+ }
oldstate = newstate = state->state;
- if (state->nreaders == 0)
- newstate &= ~FMODE_READ;
- if (state->nwriters == 0)
- newstate &= ~FMODE_WRITE;
+ if (state->n_rdwr == 0) {
+ if (state->n_rdonly == 0)
+ newstate &= ~FMODE_READ;
+ if (state->n_wronly == 0)
+ newstate &= ~FMODE_WRITE;
+ }
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
nfs4_state_set_mode_locked(state, newstate);
oldstate = newstate;
@@ -733,45 +752,43 @@ out:
}
static int reclaimer(void *);
-struct reclaimer_args {
- struct nfs4_client *clp;
- struct completion complete;
-};
+
+static inline void nfs4_clear_recover_bit(struct nfs4_client *clp)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
+ rpc_wake_up(&clp->cl_rpcwaitq);
+}
/*
* State recovery routine
*/
-void
-nfs4_recover_state(void *data)
+static void nfs4_recover_state(struct nfs4_client *clp)
{
- struct nfs4_client *clp = (struct nfs4_client *)data;
- struct reclaimer_args args = {
- .clp = clp,
- };
- might_sleep();
-
- init_completion(&args.complete);
+ struct task_struct *task;
- if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
- goto out_failed_clear;
- wait_for_completion(&args.complete);
- return;
-out_failed_clear:
- set_bit(NFS4CLNT_OK, &clp->cl_state);
- wake_up_all(&clp->cl_waitq);
- rpc_wake_up(&clp->cl_rpcwaitq);
+ __module_get(THIS_MODULE);
+ atomic_inc(&clp->cl_count);
+ task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
+ NIPQUAD(clp->cl_addr));
+ if (!IS_ERR(task))
+ return;
+ nfs4_clear_recover_bit(clp);
+ nfs4_put_client(clp);
+ module_put(THIS_MODULE);
}
/*
* Schedule a state recovery attempt
*/
-void
-nfs4_schedule_state_recovery(struct nfs4_client *clp)
+void nfs4_schedule_state_recovery(struct nfs4_client *clp)
{
if (!clp)
return;
- if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
- schedule_work(&clp->cl_recoverd);
+ if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
+ nfs4_recover_state(clp);
}
static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
@@ -887,18 +904,14 @@ static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
static int reclaimer(void *ptr)
{
- struct reclaimer_args *args = (struct reclaimer_args *)ptr;
- struct nfs4_client *clp = args->clp;
+ struct nfs4_client *clp = ptr;
struct nfs4_state_owner *sp;
struct nfs4_state_recovery_ops *ops;
+ struct rpc_cred *cred;
int status = 0;
- daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
allow_signal(SIGKILL);
- atomic_inc(&clp->cl_count);
- complete(&args->complete);
-
/* Ensure exclusive access to NFSv4 state */
lock_kernel();
down_write(&clp->cl_sem);
@@ -906,20 +919,33 @@ static int reclaimer(void *ptr)
if (list_empty(&clp->cl_superblocks))
goto out;
restart_loop:
- status = nfs4_proc_renew(clp);
- switch (status) {
- case 0:
- case -NFS4ERR_CB_PATH_DOWN:
- goto out;
- case -NFS4ERR_STALE_CLIENTID:
- case -NFS4ERR_LEASE_MOVED:
- ops = &nfs4_reboot_recovery_ops;
- break;
- default:
- ops = &nfs4_network_partition_recovery_ops;
- };
+ ops = &nfs4_network_partition_recovery_ops;
+ /* Are there any open files on this volume? */
+ cred = nfs4_get_renew_cred(clp);
+ if (cred != NULL) {
+ /* Yes there are: try to renew the old lease */
+ status = nfs4_proc_renew(clp, cred);
+ switch (status) {
+ case 0:
+ case -NFS4ERR_CB_PATH_DOWN:
+ put_rpccred(cred);
+ goto out;
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_LEASE_MOVED:
+ ops = &nfs4_reboot_recovery_ops;
+ }
+ } else {
+ /* "reboot" to ensure we clear all state on the server */
+ clp->cl_boot_time = CURRENT_TIME;
+ cred = nfs4_get_setclientid_cred(clp);
+ }
+ /* We're going to have to re-establish a clientid */
nfs4_state_mark_reclaim(clp);
- status = __nfs4_init_client(clp);
+ status = -ENOENT;
+ if (cred != NULL) {
+ status = nfs4_init_client(clp, cred);
+ put_rpccred(cred);
+ }
if (status)
goto out_error;
/* Mark all delegations for reclaim */
@@ -940,14 +966,13 @@ restart_loop:
}
nfs_delegation_reap_unclaimed(clp);
out:
- set_bit(NFS4CLNT_OK, &clp->cl_state);
up_write(&clp->cl_sem);
unlock_kernel();
- wake_up_all(&clp->cl_waitq);
- rpc_wake_up(&clp->cl_rpcwaitq);
if (status == -NFS4ERR_CB_PATH_DOWN)
nfs_handle_cb_pathdown(clp);
+ nfs4_clear_recover_bit(clp);
nfs4_put_client(clp);
+ module_put_and_exit(0);
return 0;
out_error:
printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index fbbace8a30c..4bbf5ef5778 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -392,9 +392,11 @@ static int nfs_stat_to_errno(int);
decode_getattr_maxsz)
#define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
- encode_delegreturn_maxsz)
+ encode_delegreturn_maxsz + \
+ encode_getattr_maxsz)
#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
- decode_delegreturn_maxsz)
+ decode_delegreturn_maxsz + \
+ decode_getattr_maxsz)
#define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
@@ -564,7 +566,7 @@ static int encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const s
}
if (iap->ia_valid & ATTR_MODE) {
bmval1 |= FATTR4_WORD1_MODE;
- WRITE32(iap->ia_mode);
+ WRITE32(iap->ia_mode & S_IALLUGO);
}
if (iap->ia_valid & ATTR_UID) {
bmval1 |= FATTR4_WORD1_OWNER;
@@ -742,69 +744,80 @@ static int encode_link(struct xdr_stream *xdr, const struct qstr *name)
return 0;
}
+static inline int nfs4_lock_type(struct file_lock *fl, int block)
+{
+ if ((fl->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) == F_RDLCK)
+ return block ? NFS4_READW_LT : NFS4_READ_LT;
+ return block ? NFS4_WRITEW_LT : NFS4_WRITE_LT;
+}
+
+static inline uint64_t nfs4_lock_length(struct file_lock *fl)
+{
+ if (fl->fl_end == OFFSET_MAX)
+ return ~(uint64_t)0;
+ return fl->fl_end - fl->fl_start + 1;
+}
+
/*
* opcode,type,reclaim,offset,length,new_lock_owner = 32
* open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40
*/
-static int encode_lock(struct xdr_stream *xdr, const struct nfs_lockargs *arg)
+static int encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args)
{
uint32_t *p;
- struct nfs_lock_opargs *opargs = arg->u.lock;
RESERVE_SPACE(32);
WRITE32(OP_LOCK);
- WRITE32(arg->type);
- WRITE32(opargs->reclaim);
- WRITE64(arg->offset);
- WRITE64(arg->length);
- WRITE32(opargs->new_lock_owner);
- if (opargs->new_lock_owner){
+ WRITE32(nfs4_lock_type(args->fl, args->block));
+ WRITE32(args->reclaim);
+ WRITE64(args->fl->fl_start);
+ WRITE64(nfs4_lock_length(args->fl));
+ WRITE32(args->new_lock_owner);
+ if (args->new_lock_owner){
RESERVE_SPACE(40);
- WRITE32(opargs->open_seqid->sequence->counter);
- WRITEMEM(opargs->open_stateid->data, sizeof(opargs->open_stateid->data));
- WRITE32(opargs->lock_seqid->sequence->counter);
- WRITE64(opargs->lock_owner.clientid);
+ WRITE32(args->open_seqid->sequence->counter);
+ WRITEMEM(args->open_stateid->data, sizeof(args->open_stateid->data));
+ WRITE32(args->lock_seqid->sequence->counter);
+ WRITE64(args->lock_owner.clientid);
WRITE32(4);
- WRITE32(opargs->lock_owner.id);
+ WRITE32(args->lock_owner.id);
}
else {
RESERVE_SPACE(20);
- WRITEMEM(opargs->lock_stateid->data, sizeof(opargs->lock_stateid->data));
- WRITE32(opargs->lock_seqid->sequence->counter);
+ WRITEMEM(args->lock_stateid->data, sizeof(args->lock_stateid->data));
+ WRITE32(args->lock_seqid->sequence->counter);
}
return 0;
}
-static int encode_lockt(struct xdr_stream *xdr, const struct nfs_lockargs *arg)
+static int encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args)
{
uint32_t *p;
- struct nfs_lowner *opargs = arg->u.lockt;
RESERVE_SPACE(40);
WRITE32(OP_LOCKT);
- WRITE32(arg->type);
- WRITE64(arg->offset);
- WRITE64(arg->length);
- WRITE64(opargs->clientid);
+ WRITE32(nfs4_lock_type(args->fl, 0));
+ WRITE64(args->fl->fl_start);
+ WRITE64(nfs4_lock_length(args->fl));
+ WRITE64(args->lock_owner.clientid);
WRITE32(4);
- WRITE32(opargs->id);
+ WRITE32(args->lock_owner.id);
return 0;
}
-static int encode_locku(struct xdr_stream *xdr, const struct nfs_lockargs *arg)
+static int encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args)
{
uint32_t *p;
- struct nfs_locku_opargs *opargs = arg->u.locku;
RESERVE_SPACE(44);
WRITE32(OP_LOCKU);
- WRITE32(arg->type);
- WRITE32(opargs->seqid->sequence->counter);
- WRITEMEM(opargs->stateid->data, sizeof(opargs->stateid->data));
- WRITE64(arg->offset);
- WRITE64(arg->length);
+ WRITE32(nfs4_lock_type(args->fl, 0));
+ WRITE32(args->seqid->sequence->counter);
+ WRITEMEM(args->stateid->data, sizeof(args->stateid->data));
+ WRITE64(args->fl->fl_start);
+ WRITE64(nfs4_lock_length(args->fl));
return 0;
}
@@ -964,9 +977,9 @@ static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_con
{
uint32_t *p;
- RESERVE_SPACE(8+sizeof(arg->stateid.data));
+ RESERVE_SPACE(8+sizeof(arg->stateid->data));
WRITE32(OP_OPEN_CONFIRM);
- WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
+ WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data));
WRITE32(arg->seqid->sequence->counter);
return 0;
@@ -1499,9 +1512,6 @@ static int nfs4_xdr_enc_open(struct rpc_rqst *req, uint32_t *p, struct nfs_opena
};
int status;
- status = nfs_wait_on_sequence(args->seqid, req->rq_task);
- if (status != 0)
- goto out;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
status = encode_putfh(&xdr, args->fh);
@@ -1538,9 +1548,6 @@ static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, uint32_t *p, struct n
};
int status;
- status = nfs_wait_on_sequence(args->seqid, req->rq_task);
- if (status != 0)
- goto out;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
status = encode_putfh(&xdr, args->fh);
@@ -1558,19 +1565,19 @@ static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, uint32_t *p, struct nf
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 2,
+ .nops = 3,
};
int status;
- status = nfs_wait_on_sequence(args->seqid, req->rq_task);
- if (status != 0)
- goto out;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
status = encode_putfh(&xdr, args->fh);
if (status)
goto out;
status = encode_open(&xdr, args);
+ if (status)
+ goto out;
+ status = encode_getfattr(&xdr, args->bitmask);
out:
return status;
}
@@ -1602,21 +1609,14 @@ out:
/*
* Encode a LOCK request
*/
-static int nfs4_xdr_enc_lock(struct rpc_rqst *req, uint32_t *p, struct nfs_lockargs *args)
+static int nfs4_xdr_enc_lock(struct rpc_rqst *req, uint32_t *p, struct nfs_lock_args *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
.nops = 2,
};
- struct nfs_lock_opargs *opargs = args->u.lock;
int status;
- status = nfs_wait_on_sequence(opargs->lock_seqid, req->rq_task);
- if (status != 0)
- goto out;
- /* Do we need to do an open_to_lock_owner? */
- if (opargs->lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)
- opargs->new_lock_owner = 0;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
status = encode_putfh(&xdr, args->fh);
@@ -1630,7 +1630,7 @@ out:
/*
* Encode a LOCKT request
*/
-static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, uint32_t *p, struct nfs_lockargs *args)
+static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, uint32_t *p, struct nfs_lockt_args *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
@@ -1651,7 +1651,7 @@ out:
/*
* Encode a LOCKU request
*/
-static int nfs4_xdr_enc_locku(struct rpc_rqst *req, uint32_t *p, struct nfs_lockargs *args)
+static int nfs4_xdr_enc_locku(struct rpc_rqst *req, uint32_t *p, struct nfs_locku_args *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
@@ -1985,14 +1985,20 @@ static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, uint32_t *p, const str
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 2,
+ .nops = 3,
};
int status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_compound_hdr(&xdr, &hdr);
- if ((status = encode_putfh(&xdr, args->fhandle)) == 0)
- status = encode_delegreturn(&xdr, args->stateid);
+ status = encode_putfh(&xdr, args->fhandle);
+ if (status != 0)
+ goto out;
+ status = encode_delegreturn(&xdr, args->stateid);
+ if (status != 0)
+ goto out;
+ status = encode_getfattr(&xdr, args->bitmask);
+out:
return status;
}
@@ -2955,55 +2961,64 @@ static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
/*
* We create the owner, so we know a proper owner.id length is 4.
*/
-static int decode_lock_denied (struct xdr_stream *xdr, struct nfs_lock_denied *denied)
+static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
{
+ uint64_t offset, length, clientid;
uint32_t *p;
- uint32_t namelen;
+ uint32_t namelen, type;
READ_BUF(32);
- READ64(denied->offset);
- READ64(denied->length);
- READ32(denied->type);
- READ64(denied->owner.clientid);
+ READ64(offset);
+ READ64(length);
+ READ32(type);
+ if (fl != NULL) {
+ fl->fl_start = (loff_t)offset;
+ fl->fl_end = fl->fl_start + (loff_t)length - 1;
+ if (length == ~(uint64_t)0)
+ fl->fl_end = OFFSET_MAX;
+ fl->fl_type = F_WRLCK;
+ if (type & 1)
+ fl->fl_type = F_RDLCK;
+ fl->fl_pid = 0;
+ }
+ READ64(clientid);
READ32(namelen);
READ_BUF(namelen);
- if (namelen == 4)
- READ32(denied->owner.id);
return -NFS4ERR_DENIED;
}
-static int decode_lock(struct xdr_stream *xdr, struct nfs_lockres *res)
+static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
{
uint32_t *p;
int status;
status = decode_op_hdr(xdr, OP_LOCK);
if (status == 0) {
- READ_BUF(sizeof(res->u.stateid.data));
- COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data));
+ READ_BUF(sizeof(res->stateid.data));
+ COPYMEM(res->stateid.data, sizeof(res->stateid.data));
} else if (status == -NFS4ERR_DENIED)
- return decode_lock_denied(xdr, &res->u.denied);
+ return decode_lock_denied(xdr, NULL);
return status;
}
-static int decode_lockt(struct xdr_stream *xdr, struct nfs_lockres *res)
+static int decode_lockt(struct xdr_stream *xdr, struct nfs_lockt_res *res)
{
int status;
status = decode_op_hdr(xdr, OP_LOCKT);
if (status == -NFS4ERR_DENIED)
- return decode_lock_denied(xdr, &res->u.denied);
+ return decode_lock_denied(xdr, res->denied);
return status;
}
-static int decode_locku(struct xdr_stream *xdr, struct nfs_lockres *res)
+static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
{
uint32_t *p;
int status;
status = decode_op_hdr(xdr, OP_LOCKU);
if (status == 0) {
- READ_BUF(sizeof(res->u.stateid.data));
- COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data));
+ READ_BUF(sizeof(res->stateid.data));
+ COPYMEM(res->stateid.data, sizeof(res->stateid.data));
}
return status;
}
@@ -3831,6 +3846,9 @@ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, uint32_t *p, struct
if (status)
goto out;
status = decode_open(&xdr, res);
+ if (status)
+ goto out;
+ decode_getfattr(&xdr, res->f_attr, res->server);
out:
return status;
}
@@ -3864,7 +3882,7 @@ out:
/*
* Decode LOCK response
*/
-static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_lockres *res)
+static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_lock_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -3885,7 +3903,7 @@ out:
/*
* Decode LOCKT response
*/
-static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_lockres *res)
+static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_lockt_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -3906,7 +3924,7 @@ out:
/*
* Decode LOCKU response
*/
-static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_lockres *res)
+static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_locku_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4174,7 +4192,7 @@ static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, uint32_t *p, s
/*
* DELEGRETURN request
*/
-static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, uint32_t *p, void *dummy)
+static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_delegreturnres *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4182,11 +4200,14 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, uint32_t *p, void *d
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
- if (status == 0) {
- status = decode_putfh(&xdr);
- if (status == 0)
- status = decode_delegreturn(&xdr);
- }
+ if (status != 0)
+ goto out;
+ status = decode_putfh(&xdr);
+ if (status != 0)
+ goto out;
+ status = decode_delegreturn(&xdr);
+ decode_getfattr(&xdr, res->fattr, res->server);
+out:
return status;
}
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 1b272a135a3..e897e00c2c9 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -275,7 +275,9 @@ static int __init root_nfs_parse(char *name, char *buf)
case Opt_noacl:
nfs_data.flags |= NFS_MOUNT_NOACL;
break;
- default :
+ default:
+ printk(KERN_WARNING "Root-NFS: unknown "
+ "option: %s\n", p);
return 0;
}
}
@@ -296,8 +298,8 @@ static int __init root_nfs_name(char *name)
nfs_port = -1;
nfs_data.version = NFS_MOUNT_VERSION;
nfs_data.flags = NFS_MOUNT_NONLM; /* No lockd in nfs root yet */
- nfs_data.rsize = NFS_DEF_FILE_IO_BUFFER_SIZE;
- nfs_data.wsize = NFS_DEF_FILE_IO_BUFFER_SIZE;
+ nfs_data.rsize = NFS_DEF_FILE_IO_SIZE;
+ nfs_data.wsize = NFS_DEF_FILE_IO_SIZE;
nfs_data.acregmin = 3;
nfs_data.acregmax = 60;
nfs_data.acdirmin = 30;
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index e1e3ca5d746..f5150d71c03 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -111,6 +111,9 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
};
int status;
+ /* Mask out the non-modebit related stuff from attr->ia_mode */
+ sattr->ia_mode &= S_IALLUGO;
+
dprintk("NFS call setattr\n");
nfs_fattr_init(fattr);
status = rpc_call(NFS_CLIENT(inode), NFSPROC_SETATTR, &arg, fattr, 0);
@@ -547,10 +550,9 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
extern u32 * nfs_decode_dirent(u32 *, struct nfs_entry *, int);
-static void
-nfs_read_done(struct rpc_task *task)
+static void nfs_read_done(struct rpc_task *task, void *calldata)
{
- struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
+ struct nfs_read_data *data = calldata;
if (task->tk_status >= 0) {
nfs_refresh_inode(data->inode, data->res.fattr);
@@ -560,9 +562,14 @@ nfs_read_done(struct rpc_task *task)
if (data->args.offset + data->args.count >= data->res.fattr->size)
data->res.eof = 1;
}
- nfs_readpage_result(task);
+ nfs_readpage_result(task, calldata);
}
+static const struct rpc_call_ops nfs_read_ops = {
+ .rpc_call_done = nfs_read_done,
+ .rpc_release = nfs_readdata_release,
+};
+
static void
nfs_proc_read_setup(struct nfs_read_data *data)
{
@@ -580,20 +587,24 @@ nfs_proc_read_setup(struct nfs_read_data *data)
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
/* Finalize the task. */
- rpc_init_task(task, NFS_CLIENT(inode), nfs_read_done, flags);
+ rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs_read_ops, data);
rpc_call_setup(task, &msg, 0);
}
-static void
-nfs_write_done(struct rpc_task *task)
+static void nfs_write_done(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+ struct nfs_write_data *data = calldata;
if (task->tk_status >= 0)
nfs_post_op_update_inode(data->inode, data->res.fattr);
- nfs_writeback_done(task);
+ nfs_writeback_done(task, calldata);
}
+static const struct rpc_call_ops nfs_write_ops = {
+ .rpc_call_done = nfs_write_done,
+ .rpc_release = nfs_writedata_release,
+};
+
static void
nfs_proc_write_setup(struct nfs_write_data *data, int how)
{
@@ -614,7 +625,7 @@ nfs_proc_write_setup(struct nfs_write_data *data, int how)
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
/* Finalize the task. */
- rpc_init_task(task, NFS_CLIENT(inode), nfs_write_done, flags);
+ rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs_write_ops, data);
rpc_call_setup(task, &msg, 0);
}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 5f20eafba8e..05eb43fadf8 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -42,9 +42,8 @@ mempool_t *nfs_rdata_mempool;
#define MIN_POOL_READ (32)
-void nfs_readdata_release(struct rpc_task *task)
+void nfs_readdata_release(void *data)
{
- struct nfs_read_data *data = (struct nfs_read_data *)task->tk_calldata;
nfs_readdata_free(data);
}
@@ -84,7 +83,7 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
int result;
struct nfs_read_data *rdata;
- rdata = nfs_readdata_alloc();
+ rdata = nfs_readdata_alloc(1);
if (!rdata)
return -ENOMEM;
@@ -220,9 +219,6 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
NFS_PROTO(inode)->read_setup(data);
data->task.tk_cookie = (unsigned long)inode;
- data->task.tk_calldata = data;
- /* Release requests */
- data->task.tk_release = nfs_readdata_release;
dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
data->task.tk_pid,
@@ -287,7 +283,7 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode)
nbytes = req->wb_bytes;
for(;;) {
- data = nfs_readdata_alloc();
+ data = nfs_readdata_alloc(1);
if (!data)
goto out_bad;
INIT_LIST_HEAD(&data->pages);
@@ -343,7 +339,7 @@ static int nfs_pagein_one(struct list_head *head, struct inode *inode)
if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
return nfs_pagein_multi(head, inode);
- data = nfs_readdata_alloc();
+ data = nfs_readdata_alloc(NFS_SERVER(inode)->rpages);
if (!data)
goto out_bad;
@@ -452,9 +448,9 @@ static void nfs_readpage_result_full(struct nfs_read_data *data, int status)
* This is the callback from RPC telling us whether a reply was
* received or some error occurred (timeout or socket shutdown).
*/
-void nfs_readpage_result(struct rpc_task *task)
+void nfs_readpage_result(struct rpc_task *task, void *calldata)
{
- struct nfs_read_data *data = (struct nfs_read_data *)task->tk_calldata;
+ struct nfs_read_data *data = calldata;
struct nfs_readargs *argp = &data->args;
struct nfs_readres *resp = &data->res;
int status = task->tk_status;
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
new file mode 100644
index 00000000000..4c486eb867c
--- /dev/null
+++ b/fs/nfs/sysctl.c
@@ -0,0 +1,84 @@
+/*
+ * linux/fs/nfs/sysctl.c
+ *
+ * Sysctl interface to NFS parameters
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include <linux/ctype.h>
+#include <linux/fs.h>
+#include <linux/sysctl.h>
+#include <linux/module.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_idmap.h>
+
+#include "callback.h"
+
+static const int nfs_set_port_min = 0;
+static const int nfs_set_port_max = 65535;
+static struct ctl_table_header *nfs_callback_sysctl_table;
+/*
+ * Something that isn't CTL_ANY, CTL_NONE or a value that may clash.
+ * Use the same values as fs/lockd/svc.c
+ */
+#define CTL_UNNUMBERED -2
+
+static ctl_table nfs_cb_sysctls[] = {
+#ifdef CONFIG_NFS_V4
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "nfs_callback_tcpport",
+ .data = &nfs_callback_set_tcpport,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = (int *)&nfs_set_port_min,
+ .extra2 = (int *)&nfs_set_port_max,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "idmap_cache_timeout",
+ .data = &nfs_idmap_cache_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ .strategy = &sysctl_jiffies,
+ },
+#endif
+ { .ctl_name = 0 }
+};
+
+static ctl_table nfs_cb_sysctl_dir[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "nfs",
+ .mode = 0555,
+ .child = nfs_cb_sysctls,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table nfs_cb_sysctl_root[] = {
+ {
+ .ctl_name = CTL_FS,
+ .procname = "fs",
+ .mode = 0555,
+ .child = nfs_cb_sysctl_dir,
+ },
+ { .ctl_name = 0 }
+};
+
+int nfs_register_sysctl(void)
+{
+ nfs_callback_sysctl_table = register_sysctl_table(nfs_cb_sysctl_root, 0);
+ if (nfs_callback_sysctl_table == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void nfs_unregister_sysctl(void)
+{
+ unregister_sysctl_table(nfs_callback_sysctl_table);
+ nfs_callback_sysctl_table = NULL;
+}
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index d639d172d56..a65c7b53d55 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -87,10 +87,9 @@ nfs_copy_dname(struct dentry *dentry, struct nfs_unlinkdata *data)
* We delay initializing RPC info until after the call to dentry_iput()
* in order to minimize races against rename().
*/
-static void
-nfs_async_unlink_init(struct rpc_task *task)
+static void nfs_async_unlink_init(struct rpc_task *task, void *calldata)
{
- struct nfs_unlinkdata *data = (struct nfs_unlinkdata *)task->tk_calldata;
+ struct nfs_unlinkdata *data = calldata;
struct dentry *dir = data->dir;
struct rpc_message msg = {
.rpc_cred = data->cred,
@@ -116,10 +115,9 @@ nfs_async_unlink_init(struct rpc_task *task)
*
* Do the directory attribute update.
*/
-static void
-nfs_async_unlink_done(struct rpc_task *task)
+static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
{
- struct nfs_unlinkdata *data = (struct nfs_unlinkdata *)task->tk_calldata;
+ struct nfs_unlinkdata *data = calldata;
struct dentry *dir = data->dir;
struct inode *dir_i;
@@ -141,13 +139,18 @@ nfs_async_unlink_done(struct rpc_task *task)
* We need to call nfs_put_unlinkdata as a 'tk_release' task since the
* rpc_task would be freed too.
*/
-static void
-nfs_async_unlink_release(struct rpc_task *task)
+static void nfs_async_unlink_release(void *calldata)
{
- struct nfs_unlinkdata *data = (struct nfs_unlinkdata *)task->tk_calldata;
+ struct nfs_unlinkdata *data = calldata;
nfs_put_unlinkdata(data);
}
+static const struct rpc_call_ops nfs_unlink_ops = {
+ .rpc_call_prepare = nfs_async_unlink_init,
+ .rpc_call_done = nfs_async_unlink_done,
+ .rpc_release = nfs_async_unlink_release,
+};
+
/**
* nfs_async_unlink - asynchronous unlinking of a file
* @dentry: dentry to unlink
@@ -157,7 +160,6 @@ nfs_async_unlink(struct dentry *dentry)
{
struct dentry *dir = dentry->d_parent;
struct nfs_unlinkdata *data;
- struct rpc_task *task;
struct rpc_clnt *clnt = NFS_CLIENT(dir->d_inode);
int status = -ENOMEM;
@@ -178,17 +180,13 @@ nfs_async_unlink(struct dentry *dentry)
nfs_deletes = data;
data->count = 1;
- task = &data->task;
- rpc_init_task(task, clnt, nfs_async_unlink_done , RPC_TASK_ASYNC);
- task->tk_calldata = data;
- task->tk_action = nfs_async_unlink_init;
- task->tk_release = nfs_async_unlink_release;
+ rpc_init_task(&data->task, clnt, RPC_TASK_ASYNC, &nfs_unlink_ops, data);
spin_lock(&dentry->d_lock);
dentry->d_flags |= DCACHE_NFSFS_RENAMED;
spin_unlock(&dentry->d_lock);
- rpc_sleep_on(&nfs_delete_queue, task, NULL, NULL);
+ rpc_sleep_on(&nfs_delete_queue, &data->task, NULL, NULL);
status = 0;
out:
return status;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3107908e5f3..9449b683550 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -89,24 +89,38 @@ static mempool_t *nfs_commit_mempool;
static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
-static inline struct nfs_write_data *nfs_commit_alloc(void)
+static inline struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount)
{
struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
+
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
+ if (pagecount < NFS_PAGEVEC_SIZE)
+ p->pagevec = &p->page_array[0];
+ else {
+ size_t size = ++pagecount * sizeof(struct page *);
+ p->pagevec = kmalloc(size, GFP_NOFS);
+ if (p->pagevec) {
+ memset(p->pagevec, 0, size);
+ } else {
+ mempool_free(p, nfs_commit_mempool);
+ p = NULL;
+ }
+ }
}
return p;
}
static inline void nfs_commit_free(struct nfs_write_data *p)
{
+ if (p && (p->pagevec != &p->page_array[0]))
+ kfree(p->pagevec);
mempool_free(p, nfs_commit_mempool);
}
-static void nfs_writedata_release(struct rpc_task *task)
+void nfs_writedata_release(void *wdata)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
nfs_writedata_free(wdata);
}
@@ -168,7 +182,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
int result, written = 0;
struct nfs_write_data *wdata;
- wdata = nfs_writedata_alloc();
+ wdata = nfs_writedata_alloc(1);
if (!wdata)
return -ENOMEM;
@@ -232,19 +246,16 @@ static int nfs_writepage_async(struct nfs_open_context *ctx,
unsigned int offset, unsigned int count)
{
struct nfs_page *req;
- int status;
req = nfs_update_request(ctx, inode, page, offset, count);
- status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
- if (status < 0)
- goto out;
+ if (IS_ERR(req))
+ return PTR_ERR(req);
/* Update file length */
nfs_grow_file(page, offset, count);
/* Set the PG_uptodate flag? */
nfs_mark_uptodate(page, offset, count);
nfs_unlock_request(req);
- out:
- return status;
+ return 0;
}
static int wb_priority(struct writeback_control *wbc)
@@ -304,11 +315,8 @@ do_it:
lock_kernel();
if (!IS_SYNC(inode) && inode_referenced) {
err = nfs_writepage_async(ctx, inode, page, 0, offset);
- if (err >= 0) {
- err = 0;
- if (wbc->for_reclaim)
- nfs_flush_inode(inode, 0, 0, FLUSH_STABLE);
- }
+ if (!wbc->for_writepages)
+ nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
} else {
err = nfs_writepage_sync(ctx, inode, page, 0,
offset, priority);
@@ -877,9 +885,6 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
data->task.tk_priority = flush_task_priority(how);
data->task.tk_cookie = (unsigned long)inode;
- data->task.tk_calldata = data;
- /* Release requests */
- data->task.tk_release = nfs_writedata_release;
dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
data->task.tk_pid,
@@ -919,7 +924,7 @@ static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how)
nbytes = req->wb_bytes;
for (;;) {
- data = nfs_writedata_alloc();
+ data = nfs_writedata_alloc(1);
if (!data)
goto out_bad;
list_add(&data->pages, &list);
@@ -983,7 +988,7 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
if (NFS_SERVER(inode)->wsize < PAGE_CACHE_SIZE)
return nfs_flush_multi(head, inode, how);
- data = nfs_writedata_alloc();
+ data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
if (!data)
goto out_bad;
@@ -1137,9 +1142,9 @@ static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
/*
* This function is called when the WRITE call is complete.
*/
-void nfs_writeback_done(struct rpc_task *task)
+void nfs_writeback_done(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
+ struct nfs_write_data *data = calldata;
struct nfs_writeargs *argp = &data->args;
struct nfs_writeres *resp = &data->res;
@@ -1206,9 +1211,8 @@ void nfs_writeback_done(struct rpc_task *task)
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-static void nfs_commit_release(struct rpc_task *task)
+void nfs_commit_release(void *wdata)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
nfs_commit_free(wdata);
}
@@ -1244,9 +1248,6 @@ static void nfs_commit_rpcsetup(struct list_head *head,
data->task.tk_priority = flush_task_priority(how);
data->task.tk_cookie = (unsigned long)inode;
- data->task.tk_calldata = data;
- /* Release requests */
- data->task.tk_release = nfs_commit_release;
dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
}
@@ -1255,12 +1256,12 @@ static void nfs_commit_rpcsetup(struct list_head *head,
* Commit dirty pages
*/
static int
-nfs_commit_list(struct list_head *head, int how)
+nfs_commit_list(struct inode *inode, struct list_head *head, int how)
{
struct nfs_write_data *data;
struct nfs_page *req;
- data = nfs_commit_alloc();
+ data = nfs_commit_alloc(NFS_SERVER(inode)->wpages);
if (!data)
goto out_bad;
@@ -1283,10 +1284,9 @@ nfs_commit_list(struct list_head *head, int how)
/*
* COMMIT call returned
*/
-void
-nfs_commit_done(struct rpc_task *task)
+void nfs_commit_done(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data = (struct nfs_write_data *)task->tk_calldata;
+ struct nfs_write_data *data = calldata;
struct nfs_page *req;
int res = 0;
@@ -1366,7 +1366,7 @@ int nfs_commit_inode(struct inode *inode, int how)
res = nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&nfsi->req_lock);
if (res) {
- error = nfs_commit_list(&head, how);
+ error = nfs_commit_list(inode, &head, how);
if (error < 0)
return error;
}
@@ -1377,22 +1377,23 @@ int nfs_commit_inode(struct inode *inode, int how)
int nfs_sync_inode(struct inode *inode, unsigned long idx_start,
unsigned int npages, int how)
{
- int error,
- wait;
+ int nocommit = how & FLUSH_NOCOMMIT;
+ int wait = how & FLUSH_WAIT;
+ int error;
- wait = how & FLUSH_WAIT;
- how &= ~FLUSH_WAIT;
+ how &= ~(FLUSH_WAIT|FLUSH_NOCOMMIT);
do {
- error = 0;
- if (wait)
+ if (wait) {
error = nfs_wait_on_requests(inode, idx_start, npages);
- if (error == 0)
- error = nfs_flush_inode(inode, idx_start, npages, how);
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
- if (error == 0)
+ if (error != 0)
+ continue;
+ }
+ error = nfs_flush_inode(inode, idx_start, npages, how);
+ if (error != 0)
+ continue;
+ if (!nocommit)
error = nfs_commit_inode(inode, how);
-#endif
} while (error > 0);
return error;
}
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 041380fe667..6d2dfed1de0 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -56,13 +56,20 @@ static int
nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp,
struct nfsd3_attrstat *resp)
{
- int nfserr;
+ int err, nfserr;
dprintk("nfsd: GETATTR(3) %s\n",
- SVCFH_fmt(&argp->fh));
+ SVCFH_fmt(&argp->fh));
fh_copy(&resp->fh, &argp->fh);
nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP);
+ if (nfserr)
+ RETURN_STATUS(nfserr);
+
+ err = vfs_getattr(resp->fh.fh_export->ex_mnt,
+ resp->fh.fh_dentry, &resp->stat);
+ nfserr = nfserrno(err);
+
RETURN_STATUS(nfserr);
}
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 9147b8524d0..243d94b9653 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -154,37 +154,34 @@ decode_sattr3(u32 *p, struct iattr *iap)
}
static inline u32 *
-encode_fattr3(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
+encode_fattr3(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp,
+ struct kstat *stat)
{
- struct vfsmount *mnt = fhp->fh_export->ex_mnt;
struct dentry *dentry = fhp->fh_dentry;
- struct kstat stat;
struct timespec time;
- vfs_getattr(mnt, dentry, &stat);
-
- *p++ = htonl(nfs3_ftypes[(stat.mode & S_IFMT) >> 12]);
- *p++ = htonl((u32) stat.mode);
- *p++ = htonl((u32) stat.nlink);
- *p++ = htonl((u32) nfsd_ruid(rqstp, stat.uid));
- *p++ = htonl((u32) nfsd_rgid(rqstp, stat.gid));
- if (S_ISLNK(stat.mode) && stat.size > NFS3_MAXPATHLEN) {
+ *p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
+ *p++ = htonl((u32) stat->mode);
+ *p++ = htonl((u32) stat->nlink);
+ *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
+ *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
+ if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) {
p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
} else {
- p = xdr_encode_hyper(p, (u64) stat.size);
+ p = xdr_encode_hyper(p, (u64) stat->size);
}
- p = xdr_encode_hyper(p, ((u64)stat.blocks) << 9);
- *p++ = htonl((u32) MAJOR(stat.rdev));
- *p++ = htonl((u32) MINOR(stat.rdev));
+ p = xdr_encode_hyper(p, ((u64)stat->blocks) << 9);
+ *p++ = htonl((u32) MAJOR(stat->rdev));
+ *p++ = htonl((u32) MINOR(stat->rdev));
if (is_fsid(fhp, rqstp->rq_reffh))
p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid);
else
- p = xdr_encode_hyper(p, (u64) huge_encode_dev(stat.dev));
- p = xdr_encode_hyper(p, (u64) stat.ino);
- p = encode_time3(p, &stat.atime);
+ p = xdr_encode_hyper(p, (u64) huge_encode_dev(stat->dev));
+ p = xdr_encode_hyper(p, (u64) stat->ino);
+ p = encode_time3(p, &stat->atime);
lease_get_mtime(dentry->d_inode, &time);
p = encode_time3(p, &time);
- p = encode_time3(p, &stat.ctime);
+ p = encode_time3(p, &stat->ctime);
return p;
}
@@ -232,8 +229,14 @@ encode_post_op_attr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
{
struct dentry *dentry = fhp->fh_dentry;
if (dentry && dentry->d_inode != NULL) {
- *p++ = xdr_one; /* attributes follow */
- return encode_fattr3(rqstp, p, fhp);
+ int err;
+ struct kstat stat;
+
+ err = vfs_getattr(fhp->fh_export->ex_mnt, dentry, &stat);
+ if (!err) {
+ *p++ = xdr_one; /* attributes follow */
+ return encode_fattr3(rqstp, p, fhp, &stat);
+ }
}
*p++ = xdr_zero;
return p;
@@ -616,7 +619,7 @@ nfs3svc_encode_attrstat(struct svc_rqst *rqstp, u32 *p,
struct nfsd3_attrstat *resp)
{
if (resp->status == 0)
- p = encode_fattr3(rqstp, p, &resp->fh);
+ p = encode_fattr3(rqstp, p, &resp->fh, &resp->stat);
return xdr_ressize_check(rqstp, p);
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 583c0710e45..d828662d737 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -53,7 +53,7 @@
#define NFSPROC4_CB_COMPOUND 1
/* declarations */
-static void nfs4_cb_null(struct rpc_task *task);
+static const struct rpc_call_ops nfs4_cb_null_ops;
/* Index of predefined Linux callback client operations */
@@ -431,7 +431,6 @@ nfsd4_probe_callback(struct nfs4_client *clp)
}
clnt->cl_intr = 0;
clnt->cl_softrtry = 1;
- clnt->cl_chatty = 1;
/* Kick rpciod, put the call on the wire. */
@@ -447,7 +446,7 @@ nfsd4_probe_callback(struct nfs4_client *clp)
msg.rpc_cred = nfsd4_lookupcred(clp,0);
if (IS_ERR(msg.rpc_cred))
goto out_rpciod;
- status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, nfs4_cb_null, NULL);
+ status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, &nfs4_cb_null_ops, NULL);
put_rpccred(msg.rpc_cred);
if (status != 0) {
@@ -469,7 +468,7 @@ out_err:
}
static void
-nfs4_cb_null(struct rpc_task *task)
+nfs4_cb_null(struct rpc_task *task, void *dummy)
{
struct nfs4_client *clp = (struct nfs4_client *)task->tk_msg.rpc_argp;
struct nfs4_callback *cb = &clp->cl_callback;
@@ -488,6 +487,10 @@ out:
put_nfs4_client(clp);
}
+static const struct rpc_call_ops nfs4_cb_null_ops = {
+ .rpc_call_done = nfs4_cb_null,
+};
+
/*
* called with dp->dl_count inc'ed.
* nfs4_lock_state() may or may not have been called.
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 954cf893d50..be963a133aa 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -121,9 +121,9 @@ out:
static void
nfsd4_sync_rec_dir(void)
{
- down(&rec_dir.dentry->d_inode->i_sem);
+ mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
nfsd_sync_dir(rec_dir.dentry);
- up(&rec_dir.dentry->d_inode->i_sem);
+ mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
}
int
@@ -143,7 +143,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
nfs4_save_user(&uid, &gid);
/* lock the parent */
- down(&rec_dir.dentry->d_inode->i_sem);
+ mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
dentry = lookup_one_len(dname, rec_dir.dentry, HEXDIR_LEN-1);
if (IS_ERR(dentry)) {
@@ -159,7 +159,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
out_put:
dput(dentry);
out_unlock:
- up(&rec_dir.dentry->d_inode->i_sem);
+ mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
if (status == 0) {
clp->cl_firststate = 1;
nfsd4_sync_rec_dir();
@@ -259,9 +259,9 @@ nfsd4_remove_clid_file(struct dentry *dir, struct dentry *dentry)
printk("nfsd4: non-file found in client recovery directory\n");
return -EINVAL;
}
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
status = vfs_unlink(dir->d_inode, dentry);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return status;
}
@@ -274,9 +274,9 @@ nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
* any regular files anyway, just in case the directory was created by
* a kernel from the future.... */
nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
status = vfs_rmdir(dir->d_inode, dentry);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return status;
}
@@ -288,9 +288,9 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
dprintk("NFSD: nfsd4_unlink_clid_dir. name %.*s\n", namlen, name);
- down(&rec_dir.dentry->d_inode->i_sem);
+ mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
dentry = lookup_one_len(name, rec_dir.dentry, namlen);
- up(&rec_dir.dentry->d_inode->i_sem);
+ mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
if (IS_ERR(dentry)) {
status = PTR_ERR(dentry);
return status;
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index b45999ff33e..aa7bb41b293 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -152,46 +152,44 @@ decode_sattr(u32 *p, struct iattr *iap)
}
static inline u32 *
-encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
+encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp,
+ struct kstat *stat)
{
- struct vfsmount *mnt = fhp->fh_export->ex_mnt;
struct dentry *dentry = fhp->fh_dentry;
- struct kstat stat;
int type;
struct timespec time;
- vfs_getattr(mnt, dentry, &stat);
- type = (stat.mode & S_IFMT);
+ type = (stat->mode & S_IFMT);
*p++ = htonl(nfs_ftypes[type >> 12]);
- *p++ = htonl((u32) stat.mode);
- *p++ = htonl((u32) stat.nlink);
- *p++ = htonl((u32) nfsd_ruid(rqstp, stat.uid));
- *p++ = htonl((u32) nfsd_rgid(rqstp, stat.gid));
+ *p++ = htonl((u32) stat->mode);
+ *p++ = htonl((u32) stat->nlink);
+ *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
+ *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
- if (S_ISLNK(type) && stat.size > NFS_MAXPATHLEN) {
+ if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) {
*p++ = htonl(NFS_MAXPATHLEN);
} else {
- *p++ = htonl((u32) stat.size);
+ *p++ = htonl((u32) stat->size);
}
- *p++ = htonl((u32) stat.blksize);
+ *p++ = htonl((u32) stat->blksize);
if (S_ISCHR(type) || S_ISBLK(type))
- *p++ = htonl(new_encode_dev(stat.rdev));
+ *p++ = htonl(new_encode_dev(stat->rdev));
else
*p++ = htonl(0xffffffff);
- *p++ = htonl((u32) stat.blocks);
+ *p++ = htonl((u32) stat->blocks);
if (is_fsid(fhp, rqstp->rq_reffh))
*p++ = htonl((u32) fhp->fh_export->ex_fsid);
else
- *p++ = htonl(new_encode_dev(stat.dev));
- *p++ = htonl((u32) stat.ino);
- *p++ = htonl((u32) stat.atime.tv_sec);
- *p++ = htonl(stat.atime.tv_nsec ? stat.atime.tv_nsec / 1000 : 0);
+ *p++ = htonl(new_encode_dev(stat->dev));
+ *p++ = htonl((u32) stat->ino);
+ *p++ = htonl((u32) stat->atime.tv_sec);
+ *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0);
lease_get_mtime(dentry->d_inode, &time);
*p++ = htonl((u32) time.tv_sec);
*p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0);
- *p++ = htonl((u32) stat.ctime.tv_sec);
- *p++ = htonl(stat.ctime.tv_nsec ? stat.ctime.tv_nsec / 1000 : 0);
+ *p++ = htonl((u32) stat->ctime.tv_sec);
+ *p++ = htonl(stat->ctime.tv_nsec ? stat->ctime.tv_nsec / 1000 : 0);
return p;
}
@@ -199,7 +197,9 @@ encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
/* Helper function for NFSv2 ACL code */
u32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, u32 *p, struct svc_fh *fhp)
{
- return encode_fattr(rqstp, p, fhp);
+ struct kstat stat;
+ vfs_getattr(fhp->fh_export->ex_mnt, fhp->fh_dentry, &stat);
+ return encode_fattr(rqstp, p, fhp, &stat);
}
/*
@@ -394,7 +394,7 @@ int
nfssvc_encode_attrstat(struct svc_rqst *rqstp, u32 *p,
struct nfsd_attrstat *resp)
{
- p = encode_fattr(rqstp, p, &resp->fh);
+ p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
return xdr_ressize_check(rqstp, p);
}
@@ -403,7 +403,7 @@ nfssvc_encode_diropres(struct svc_rqst *rqstp, u32 *p,
struct nfsd_diropres *resp)
{
p = encode_fh(p, &resp->fh);
- p = encode_fattr(rqstp, p, &resp->fh);
+ p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
return xdr_ressize_check(rqstp, p);
}
@@ -428,7 +428,7 @@ int
nfssvc_encode_readres(struct svc_rqst *rqstp, u32 *p,
struct nfsd_readres *resp)
{
- p = encode_fattr(rqstp, p, &resp->fh);
+ p = encode_fattr(rqstp, p, &resp->fh, &resp->stat);
*p++ = htonl(resp->count);
xdr_ressize_check(rqstp, p);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index af7c3c3074b..eef0576a778 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -48,8 +48,8 @@
#include <linux/fsnotify.h>
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
-#ifdef CONFIG_NFSD_V4
#include <linux/xattr.h>
+#ifdef CONFIG_NFSD_V4
#include <linux/nfs4.h>
#include <linux/nfs4_acl.h>
#include <linux/nfsd_idmap.h>
@@ -365,8 +365,30 @@ out_nfserr:
goto out;
}
-#if defined(CONFIG_NFSD_V4)
+#if defined(CONFIG_NFSD_V2_ACL) || \
+ defined(CONFIG_NFSD_V3_ACL) || \
+ defined(CONFIG_NFSD_V4)
+static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
+{
+ ssize_t buflen;
+ int error;
+
+ buflen = vfs_getxattr(dentry, key, NULL, 0);
+ if (buflen <= 0)
+ return buflen;
+
+ *buf = kmalloc(buflen, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ error = vfs_getxattr(dentry, key, *buf, buflen);
+ if (error < 0)
+ return error;
+ return buflen;
+}
+#endif
+
+#if defined(CONFIG_NFSD_V4)
static int
set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
{
@@ -374,7 +396,6 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
size_t buflen;
char *buf = NULL;
int error = 0;
- struct inode *inode = dentry->d_inode;
buflen = posix_acl_xattr_size(pacl->a_count);
buf = kmalloc(buflen, GFP_KERNEL);
@@ -388,15 +409,7 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
goto out;
}
- error = -EOPNOTSUPP;
- if (inode->i_op && inode->i_op->setxattr) {
- down(&inode->i_sem);
- security_inode_setxattr(dentry, key, buf, len, 0);
- error = inode->i_op->setxattr(dentry, key, buf, len, 0);
- if (!error)
- security_inode_post_setxattr(dentry, key, buf, len, 0);
- up(&inode->i_sem);
- }
+ error = vfs_setxattr(dentry, key, buf, len, 0);
out:
kfree(buf);
return error;
@@ -455,44 +468,19 @@ out_nfserr:
static struct posix_acl *
_get_posix_acl(struct dentry *dentry, char *key)
{
- struct inode *inode = dentry->d_inode;
- char *buf = NULL;
- int buflen, error = 0;
+ void *buf = NULL;
struct posix_acl *pacl = NULL;
+ int buflen;
- error = -EOPNOTSUPP;
- if (inode->i_op == NULL)
- goto out_err;
- if (inode->i_op->getxattr == NULL)
- goto out_err;
-
- error = security_inode_getxattr(dentry, key);
- if (error)
- goto out_err;
-
- buflen = inode->i_op->getxattr(dentry, key, NULL, 0);
- if (buflen <= 0) {
- error = buflen < 0 ? buflen : -ENODATA;
- goto out_err;
- }
-
- buf = kmalloc(buflen, GFP_KERNEL);
- if (buf == NULL) {
- error = -ENOMEM;
- goto out_err;
- }
-
- error = inode->i_op->getxattr(dentry, key, buf, buflen);
- if (error < 0)
- goto out_err;
+ buflen = nfsd_getxattr(dentry, key, &buf);
+ if (!buflen)
+ buflen = -ENODATA;
+ if (buflen <= 0)
+ return ERR_PTR(buflen);
pacl = posix_acl_from_xattr(buf, buflen);
- out:
kfree(buf);
return pacl;
- out_err:
- pacl = ERR_PTR(error);
- goto out;
}
int
@@ -717,27 +705,33 @@ nfsd_close(struct file *filp)
* As this calls fsync (not fdatasync) there is no need for a write_inode
* after it.
*/
-static inline void nfsd_dosync(struct file *filp, struct dentry *dp,
- struct file_operations *fop)
+static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
+ struct file_operations *fop)
{
struct inode *inode = dp->d_inode;
int (*fsync) (struct file *, struct dentry *, int);
+ int err = nfs_ok;
filemap_fdatawrite(inode->i_mapping);
if (fop && (fsync = fop->fsync))
- fsync(filp, dp, 0);
+ err=fsync(filp, dp, 0);
filemap_fdatawait(inode->i_mapping);
+
+ return nfserrno(err);
}
-static void
+static int
nfsd_sync(struct file *filp)
{
+ int err;
struct inode *inode = filp->f_dentry->d_inode;
dprintk("nfsd: sync file %s\n", filp->f_dentry->d_name.name);
- down(&inode->i_sem);
- nfsd_dosync(filp, filp->f_dentry, filp->f_op);
- up(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
+ err=nfsd_dosync(filp, filp->f_dentry, filp->f_op);
+ mutex_unlock(&inode->i_mutex);
+
+ return err;
}
void
@@ -874,6 +868,16 @@ out:
return err;
}
+static void kill_suid(struct dentry *dentry)
+{
+ struct iattr ia;
+ ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID;
+
+ mutex_lock(&dentry->d_inode->i_mutex);
+ notify_change(dentry, &ia);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+}
+
static inline int
nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
loff_t offset, struct kvec *vec, int vlen,
@@ -927,14 +931,8 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
}
/* clear setuid/setgid flag after write */
- if (err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID))) {
- struct iattr ia;
- ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID;
-
- down(&inode->i_sem);
- notify_change(dentry, &ia);
- up(&inode->i_sem);
- }
+ if (err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
+ kill_suid(dentry);
if (err >= 0 && stable) {
static ino_t last_ino;
@@ -962,7 +960,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
if (inode->i_state & I_DIRTY) {
dprintk("nfsd: write sync %d\n", current->pid);
- nfsd_sync(file);
+ err=nfsd_sync(file);
}
#if 0
wake_up(&inode->i_wait);
@@ -1066,7 +1064,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
return err;
if (EX_ISSYNC(fhp->fh_export)) {
if (file->f_op && file->f_op->fsync) {
- nfsd_sync(file);
+ err = nfsd_sync(file);
} else {
err = nfserr_notsupp;
}
@@ -1874,39 +1872,25 @@ nfsd_get_posix_acl(struct svc_fh *fhp, int type)
ssize_t size;
struct posix_acl *acl;
- if (!IS_POSIXACL(inode) || !inode->i_op || !inode->i_op->getxattr)
+ if (!IS_POSIXACL(inode))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name = POSIX_ACL_XATTR_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ name = POSIX_ACL_XATTR_DEFAULT;
+ break;
+ default:
return ERR_PTR(-EOPNOTSUPP);
- switch(type) {
- case ACL_TYPE_ACCESS:
- name = POSIX_ACL_XATTR_ACCESS;
- break;
- case ACL_TYPE_DEFAULT:
- name = POSIX_ACL_XATTR_DEFAULT;
- break;
- default:
- return ERR_PTR(-EOPNOTSUPP);
}
- size = inode->i_op->getxattr(fhp->fh_dentry, name, NULL, 0);
+ size = nfsd_getxattr(fhp->fh_dentry, name, &value);
+ if (size < 0)
+ return ERR_PTR(size);
- if (size < 0) {
- acl = ERR_PTR(size);
- goto getout;
- } else if (size > 0) {
- value = kmalloc(size, GFP_KERNEL);
- if (!value) {
- acl = ERR_PTR(-ENOMEM);
- goto getout;
- }
- size = inode->i_op->getxattr(fhp->fh_dentry, name, value, size);
- if (size < 0) {
- acl = ERR_PTR(size);
- goto getout;
- }
- }
acl = posix_acl_from_xattr(value, size);
-
-getout:
kfree(value);
return acl;
}
@@ -1947,16 +1931,13 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
} else
size = 0;
- if (!fhp->fh_locked)
- fh_lock(fhp); /* unlocking is done automatically */
if (size)
- error = inode->i_op->setxattr(fhp->fh_dentry, name,
- value, size, 0);
+ error = vfs_setxattr(fhp->fh_dentry, name, value, size, 0);
else {
if (!S_ISDIR(inode->i_mode) && type == ACL_TYPE_DEFAULT)
error = 0;
else {
- error = inode->i_op->removexattr(fhp->fh_dentry, name);
+ error = vfs_removexattr(fhp->fh_dentry, name);
if (error == -ENODATA)
error = 0;
}
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index eda056bac25..9480a0526cd 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1532,7 +1532,7 @@ int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
* NOTE to self: No changes in the attribute list are required to move from
* a resident to a non-resident attribute.
*
- * Locking: - The caller must hold i_sem on the inode.
+ * Locking: - The caller must hold i_mutex on the inode.
*/
int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
{
@@ -1728,7 +1728,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
/*
* This needs to be last since the address space operations ->readpage
* and ->writepage can run concurrently with us as they are not
- * serialized on i_sem. Note, we are not allowed to fail once we flip
+ * serialized on i_mutex. Note, we are not allowed to fail once we flip
* this switch, which is another reason to do this last.
*/
NInoSetNonResident(ni);
diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
index 795c3d1930f..b0690d4c890 100644
--- a/fs/ntfs/dir.c
+++ b/fs/ntfs/dir.c
@@ -69,7 +69,7 @@ ntfschar I30[5] = { const_cpu_to_le16('$'), const_cpu_to_le16('I'),
* work but we don't care for how quickly one can access them. This also fixes
* the dcache aliasing issues.
*
- * Locking: - Caller must hold i_sem on the directory.
+ * Locking: - Caller must hold i_mutex on the directory.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
@@ -1085,11 +1085,11 @@ static inline int ntfs_filldir(ntfs_volume *vol, loff_t fpos,
* While this will return the names in random order this doesn't matter for
* ->readdir but OTOH results in a faster ->readdir.
*
- * VFS calls ->readdir without BKL but with i_sem held. This protects the VFS
+ * VFS calls ->readdir without BKL but with i_mutex held. This protects the VFS
* parts (e.g. ->f_pos and ->i_size, and it also protects against directory
* modifications).
*
- * Locking: - Caller must hold i_sem on the directory.
+ * Locking: - Caller must hold i_mutex on the directory.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
@@ -1520,7 +1520,7 @@ static int ntfs_dir_open(struct inode *vi, struct file *filp)
* Note: In the past @filp could be NULL so we ignore it as we don't need it
* anyway.
*
- * Locking: Caller must hold i_sem on the inode.
+ * Locking: Caller must hold i_mutex on the inode.
*
* TODO: We should probably also write all attribute/index inodes associated
* with this inode but since we have no simple way of getting to them we ignore
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 72753389181..fb413d3d861 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -106,7 +106,7 @@ static int ntfs_file_open(struct inode *vi, struct file *filp)
* this is the case, the necessary zeroing will also have happened and that all
* metadata is self-consistent.
*
- * Locking: i_sem on the vfs inode corrseponsind to the ntfs inode @ni must be
+ * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
* held by the caller.
*/
static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size,
@@ -473,7 +473,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
* @bytes: number of bytes to be written
*
* This is called for non-resident attributes from ntfs_file_buffered_write()
- * with i_sem held on the inode (@pages[0]->mapping->host). There are
+ * with i_mutex held on the inode (@pages[0]->mapping->host). There are
* @nr_pages pages in @pages which are locked but not kmap()ped. The source
* data has not yet been copied into the @pages.
*
@@ -1637,7 +1637,7 @@ err_out:
* @pos: byte position in file at which the write begins
* @bytes: number of bytes to be written
*
- * This is called from ntfs_file_buffered_write() with i_sem held on the inode
+ * This is called from ntfs_file_buffered_write() with i_mutex held on the inode
* (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
* locked but not kmap()ped. The source data has already been copied into the
* @page. ntfs_prepare_pages_for_non_resident_write() has been called before
@@ -1814,7 +1814,7 @@ err_out:
/**
* ntfs_file_buffered_write -
*
- * Locking: The vfs is holding ->i_sem on the inode.
+ * Locking: The vfs is holding ->i_mutex on the inode.
*/
static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
const struct iovec *iov, unsigned long nr_segs,
@@ -2173,7 +2173,7 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
err = remove_suid(file->f_dentry);
if (err)
goto out;
- inode_update_time(inode, 1);
+ file_update_time(file);
written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
count);
out:
@@ -2196,9 +2196,9 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const char __user *buf,
BUG_ON(iocb->ki_pos != pos);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = ntfs_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err = sync_page_range(inode, mapping, pos, ret);
if (err < 0)
@@ -2221,12 +2221,12 @@ static ssize_t ntfs_file_writev(struct file *file, const struct iovec *iov,
struct kiocb kiocb;
ssize_t ret;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
init_sync_kiocb(&kiocb, file);
ret = ntfs_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
if (ret == -EIOCBQUEUED)
ret = wait_on_sync_kiocb(&kiocb);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err = sync_page_range(inode, mapping, *ppos - ret, ret);
if (err < 0)
@@ -2269,7 +2269,7 @@ static ssize_t ntfs_file_write(struct file *file, const char __user *buf,
* Note: In the past @filp could be NULL so we ignore it as we don't need it
* anyway.
*
- * Locking: Caller must hold i_sem on the inode.
+ * Locking: Caller must hold i_mutex on the inode.
*
* TODO: We should probably also write all attribute/index inodes associated
* with this inode but since we have no simple way of getting to them we ignore
diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c
index 8f2d5727546..9f5427c2d10 100644
--- a/fs/ntfs/index.c
+++ b/fs/ntfs/index.c
@@ -32,7 +32,7 @@
* Allocate a new index context, initialize it with @idx_ni and return it.
* Return NULL if allocation failed.
*
- * Locking: Caller must hold i_sem on the index inode.
+ * Locking: Caller must hold i_mutex on the index inode.
*/
ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
{
@@ -50,7 +50,7 @@ ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
*
* Release the index context @ictx, releasing all associated resources.
*
- * Locking: Caller must hold i_sem on the index inode.
+ * Locking: Caller must hold i_mutex on the index inode.
*/
void ntfs_index_ctx_put(ntfs_index_context *ictx)
{
@@ -106,7 +106,7 @@ void ntfs_index_ctx_put(ntfs_index_context *ictx)
* or ntfs_index_entry_write() before the call to ntfs_index_ctx_put() to
* ensure that the changes are written to disk.
*
- * Locking: - Caller must hold i_sem on the index inode.
+ * Locking: - Caller must hold i_mutex on the index inode.
* - Each page cache page in the index allocation mapping must be
* locked whilst being accessed otherwise we may find a corrupt
* page due to it being under ->writepage at the moment which
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index b24f4c4b2c5..ea1bd3feea1 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2125,13 +2125,13 @@ void ntfs_put_inode(struct inode *vi)
ntfs_inode *ni = NTFS_I(vi);
if (NInoIndexAllocPresent(ni)) {
struct inode *bvi = NULL;
- down(&vi->i_sem);
+ mutex_lock(&vi->i_mutex);
if (atomic_read(&vi->i_count) == 2) {
bvi = ni->itype.index.bmp_ino;
if (bvi)
ni->itype.index.bmp_ino = NULL;
}
- up(&vi->i_sem);
+ mutex_unlock(&vi->i_mutex);
if (bvi)
iput(bvi);
}
@@ -2311,7 +2311,7 @@ static const char *es = " Leaving inconsistent metadata. Unmount and run "
*
* Returns 0 on success or -errno on error.
*
- * Called with ->i_sem held. In all but one case ->i_alloc_sem is held for
+ * Called with ->i_mutex held. In all but one case ->i_alloc_sem is held for
* writing. The only case in the kernel where ->i_alloc_sem is not held is
* mm/filemap.c::generic_file_buffered_write() where vmtruncate() is called
* with the current i_size as the offset. The analogous place in NTFS is in
@@ -2767,7 +2767,25 @@ unm_done:
up_write(&ni->runlist.lock);
done:
/* Update the mtime and ctime on the base inode. */
- inode_update_time(VFS_I(base_ni), 1);
+ /* normally ->truncate shouldn't update ctime or mtime,
+ * but ntfs did before so it got a copy & paste version
+ * of file_update_time. one day someone should fix this
+ * for real.
+ */
+ if (!IS_NOCMTIME(VFS_I(base_ni)) && !IS_RDONLY(VFS_I(base_ni))) {
+ struct timespec now = current_fs_time(VFS_I(base_ni)->i_sb);
+ int sync_it = 0;
+
+ if (!timespec_equal(&VFS_I(base_ni)->i_mtime, &now) ||
+ !timespec_equal(&VFS_I(base_ni)->i_ctime, &now))
+ sync_it = 1;
+ VFS_I(base_ni)->i_mtime = now;
+ VFS_I(base_ni)->i_ctime = now;
+
+ if (sync_it)
+ mark_inode_dirty_sync(VFS_I(base_ni));
+ }
+
if (likely(!err)) {
NInoClearTruncateFailed(ni);
ntfs_debug("Done.");
@@ -2831,7 +2849,7 @@ void ntfs_truncate_vfs(struct inode *vi) {
* We also abort all changes of user, group, and mode as we do not implement
* the NTFS ACLs yet.
*
- * Called with ->i_sem held. For the ATTR_SIZE (i.e. ->truncate) case, also
+ * Called with ->i_mutex held. For the ATTR_SIZE (i.e. ->truncate) case, also
* called with ->i_alloc_sem held for writing.
*
* Basically this is a copy of generic notify_change() and inode_setattr()
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 351dbc3b6e4..5ea9eb93af6 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -96,7 +96,7 @@
* name. We then convert the name to the current NLS code page, and proceed
* searching for a dentry with this name, etc, as in case 2), above.
*
- * Locking: Caller must hold i_sem on the directory.
+ * Locking: Caller must hold i_mutex on the directory.
*/
static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
struct nameidata *nd)
@@ -254,7 +254,7 @@ handle_name:
nls_name.hash = full_name_hash(nls_name.name, nls_name.len);
/*
- * Note: No need for dent->d_lock lock as i_sem is held on the
+ * Note: No need for dent->d_lock lock as i_mutex is held on the
* parent inode.
*/
@@ -374,7 +374,7 @@ struct inode_operations ntfs_dir_inode_ops = {
* The code is based on the ext3 ->get_parent() implementation found in
* fs/ext3/namei.c::ext3_get_parent().
*
- * Note: ntfs_get_parent() is called with @child_dent->d_inode->i_sem down.
+ * Note: ntfs_get_parent() is called with @child_dent->d_inode->i_mutex down.
*
* Return the dentry of the parent directory on success or the error code on
* error (IS_ERR() is true).
diff --git a/fs/ntfs/quota.c b/fs/ntfs/quota.c
index 833df2a4e9f..d0ef4182147 100644
--- a/fs/ntfs/quota.c
+++ b/fs/ntfs/quota.c
@@ -48,7 +48,7 @@ BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
ntfs_error(vol->sb, "Quota inodes are not open.");
return FALSE;
}
- down(&vol->quota_q_ino->i_sem);
+ mutex_lock(&vol->quota_q_ino->i_mutex);
ictx = ntfs_index_ctx_get(NTFS_I(vol->quota_q_ino));
if (!ictx) {
ntfs_error(vol->sb, "Failed to get index context.");
@@ -98,7 +98,7 @@ BOOL ntfs_mark_quotas_out_of_date(ntfs_volume *vol)
ntfs_index_entry_mark_dirty(ictx);
set_done:
ntfs_index_ctx_put(ictx);
- up(&vol->quota_q_ino->i_sem);
+ mutex_unlock(&vol->quota_q_ino->i_mutex);
/*
* We set the flag so we do not try to mark the quotas out of date
* again on remount.
@@ -110,7 +110,7 @@ done:
err_out:
if (ictx)
ntfs_index_ctx_put(ictx);
- up(&vol->quota_q_ino->i_sem);
+ mutex_unlock(&vol->quota_q_ino->i_mutex);
return FALSE;
}
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 6c16db9e1a8..c3a3f1a8310 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -443,8 +443,8 @@ static int ntfs_remount(struct super_block *sb, int *flags, char *opt)
ntfs_debug("Entering with remount options string: %s", opt);
#ifndef NTFS_RW
- /* For read-only compiled driver, enforce all read-only flags. */
- *flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ /* For read-only compiled driver, enforce read-only flag. */
+ *flags |= MS_RDONLY;
#else /* NTFS_RW */
/*
* For the read-write compiled driver, if we are remounting read-write,
@@ -1213,10 +1213,10 @@ static int check_windows_hibernation_status(ntfs_volume *vol)
* Find the inode number for the hibernation file by looking up the
* filename hiberfil.sys in the root directory.
*/
- down(&vol->root_ino->i_sem);
+ mutex_lock(&vol->root_ino->i_mutex);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->root_ino), hiberfil, 12,
&name);
- up(&vol->root_ino->i_sem);
+ mutex_unlock(&vol->root_ino->i_mutex);
if (IS_ERR_MREF(mref)) {
ret = MREF_ERR(mref);
/* If the file does not exist, Windows is not hibernated. */
@@ -1307,10 +1307,10 @@ static BOOL load_and_init_quota(ntfs_volume *vol)
* Find the inode number for the quota file by looking up the filename
* $Quota in the extended system files directory $Extend.
*/
- down(&vol->extend_ino->i_sem);
+ mutex_lock(&vol->extend_ino->i_mutex);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->extend_ino), Quota, 6,
&name);
- up(&vol->extend_ino->i_sem);
+ mutex_unlock(&vol->extend_ino->i_mutex);
if (IS_ERR_MREF(mref)) {
/*
* If the file does not exist, quotas are disabled and have
@@ -1390,10 +1390,10 @@ static BOOL load_and_init_usnjrnl(ntfs_volume *vol)
* Find the inode number for the transaction log file by looking up the
* filename $UsnJrnl in the extended system files directory $Extend.
*/
- down(&vol->extend_ino->i_sem);
+ mutex_lock(&vol->extend_ino->i_mutex);
mref = ntfs_lookup_inode_by_name(NTFS_I(vol->extend_ino), UsnJrnl, 8,
&name);
- up(&vol->extend_ino->i_sem);
+ mutex_unlock(&vol->extend_ino->i_mutex);
if (IS_ERR_MREF(mref)) {
/*
* If the file does not exist, transaction logging is disabled,
@@ -1721,7 +1721,7 @@ static BOOL load_system_files(ntfs_volume *vol)
es3);
goto iput_mirr_err_out;
}
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s",
!vol->mftmirr_ino ? es1 : es2, es3);
} else
@@ -1837,7 +1837,7 @@ get_ctx_vol_failed:
es1, es2);
goto iput_vol_err_out;
}
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
@@ -1874,7 +1874,7 @@ get_ctx_vol_failed:
}
goto iput_logfile_err_out;
}
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
@@ -1919,7 +1919,7 @@ get_ctx_vol_failed:
es1, es2);
goto iput_root_err_out;
}
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
@@ -1943,7 +1943,7 @@ get_ctx_vol_failed:
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
/*
* Do not set NVolErrors() because ntfs_remount() might manage
* to set the dirty flag in which case all would be well.
@@ -1970,7 +1970,7 @@ get_ctx_vol_failed:
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
NVolSetErrors(vol);
}
#endif
@@ -1989,7 +1989,7 @@ get_ctx_vol_failed:
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
@@ -2030,7 +2030,7 @@ get_ctx_vol_failed:
es1, es2);
goto iput_quota_err_out;
}
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
@@ -2053,7 +2053,7 @@ get_ctx_vol_failed:
goto iput_quota_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
NVolSetErrors(vol);
}
/*
@@ -2074,7 +2074,7 @@ get_ctx_vol_failed:
es1, es2);
goto iput_usnjrnl_err_out;
}
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
@@ -2097,7 +2097,7 @@ get_ctx_vol_failed:
goto iput_usnjrnl_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
@@ -2312,9 +2312,9 @@ static void ntfs_put_super(struct super_block *sb)
if (!list_empty(&sb->s_dirty)) {
const char *s1, *s2;
- down(&vol->mft_ino->i_sem);
+ mutex_lock(&vol->mft_ino->i_mutex);
truncate_inode_pages(vol->mft_ino->i_mapping, 0);
- up(&vol->mft_ino->i_sem);
+ mutex_unlock(&vol->mft_ino->i_mutex);
write_inode_now(vol->mft_ino, 1);
if (!list_empty(&sb->s_dirty)) {
static const char *_s1 = "inodes";
@@ -2689,7 +2689,7 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent)
ntfs_debug("Entering.");
#ifndef NTFS_RW
- sb->s_flags |= MS_RDONLY | MS_NOATIME | MS_NODIRATIME;
+ sb->s_flags |= MS_RDONLY;
#endif /* ! NTFS_RW */
/* Allocate a new ntfs_volume and place it in sb->s_fs_info. */
sb->s_fs_info = kmalloc(sizeof(ntfs_volume), GFP_NOFS);
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
new file mode 100644
index 00000000000..7d3be845a61
--- /dev/null
+++ b/fs/ocfs2/Makefile
@@ -0,0 +1,33 @@
+EXTRA_CFLAGS += -Ifs/ocfs2
+
+EXTRA_CFLAGS += -DCATCH_BH_JBD_RACES
+
+obj-$(CONFIG_OCFS2_FS) += ocfs2.o
+
+ocfs2-objs := \
+ alloc.o \
+ aops.o \
+ buffer_head_io.o \
+ dcache.o \
+ dir.o \
+ dlmglue.o \
+ export.o \
+ extent_map.o \
+ file.o \
+ heartbeat.o \
+ inode.o \
+ journal.o \
+ localalloc.o \
+ mmap.o \
+ namei.o \
+ slot_map.o \
+ suballoc.o \
+ super.o \
+ symlink.o \
+ sysfile.o \
+ uptodate.o \
+ ver.o \
+ vote.o
+
+obj-$(CONFIG_OCFS2_FS) += cluster/
+obj-$(CONFIG_OCFS2_FS) += dlm/
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
new file mode 100644
index 00000000000..6b9812db377
--- /dev/null
+++ b/fs/ocfs2/alloc.c
@@ -0,0 +1,2040 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * alloc.c
+ *
+ * Extent allocs and frees
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+
+#define MLOG_MASK_PREFIX ML_DISK_ALLOC
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "inode.h"
+#include "journal.h"
+#include "localalloc.h"
+#include "suballoc.h"
+#include "sysfile.h"
+#include "file.h"
+#include "super.h"
+#include "uptodate.h"
+
+#include "buffer_head_io.h"
+
+static int ocfs2_extent_contig(struct inode *inode,
+ struct ocfs2_extent_rec *ext,
+ u64 blkno);
+
+static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ int wanted,
+ struct ocfs2_alloc_context *meta_ac,
+ struct buffer_head *bhs[]);
+
+static int ocfs2_add_branch(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct buffer_head *eb_bh,
+ struct buffer_head *last_eb_bh,
+ struct ocfs2_alloc_context *meta_ac);
+
+static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_alloc_context *meta_ac,
+ struct buffer_head **ret_new_eb_bh);
+
+static int ocfs2_do_insert_extent(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ u64 blkno,
+ u32 new_clusters);
+
+static int ocfs2_find_branch_target(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct buffer_head **target_bh);
+
+static int ocfs2_find_new_last_ext_blk(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct ocfs2_dinode *fe,
+ unsigned int new_i_clusters,
+ struct buffer_head *old_last_eb,
+ struct buffer_head **new_last_eb);
+
+static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
+
+static int ocfs2_extent_contig(struct inode *inode,
+ struct ocfs2_extent_rec *ext,
+ u64 blkno)
+{
+ return blkno == (le64_to_cpu(ext->e_blkno) +
+ ocfs2_clusters_to_blocks(inode->i_sb,
+ le32_to_cpu(ext->e_clusters)));
+}
+
+/*
+ * How many free extents have we got before we need more meta data?
+ */
+int ocfs2_num_free_extents(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct ocfs2_dinode *fe)
+{
+ int retval;
+ struct ocfs2_extent_list *el;
+ struct ocfs2_extent_block *eb;
+ struct buffer_head *eb_bh = NULL;
+
+ mlog_entry_void();
+
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
+ retval = -EIO;
+ goto bail;
+ }
+
+ if (fe->i_last_eb_blk) {
+ retval = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),
+ &eb_bh, OCFS2_BH_CACHED, inode);
+ if (retval < 0) {
+ mlog_errno(retval);
+ goto bail;
+ }
+ eb = (struct ocfs2_extent_block *) eb_bh->b_data;
+ el = &eb->h_list;
+ } else
+ el = &fe->id2.i_list;
+
+ BUG_ON(el->l_tree_depth != 0);
+
+ retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec);
+bail:
+ if (eb_bh)
+ brelse(eb_bh);
+
+ mlog_exit(retval);
+ return retval;
+}
+
+/* expects array to already be allocated
+ *
+ * sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and
+ * l_count for you
+ */
+static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ int wanted,
+ struct ocfs2_alloc_context *meta_ac,
+ struct buffer_head *bhs[])
+{
+ int count, status, i;
+ u16 suballoc_bit_start;
+ u32 num_got;
+ u64 first_blkno;
+ struct ocfs2_extent_block *eb;
+
+ mlog_entry_void();
+
+ count = 0;
+ while (count < wanted) {
+ status = ocfs2_claim_metadata(osb,
+ handle,
+ meta_ac,
+ wanted - count,
+ &suballoc_bit_start,
+ &num_got,
+ &first_blkno);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ for(i = count; i < (num_got + count); i++) {
+ bhs[i] = sb_getblk(osb->sb, first_blkno);
+ if (bhs[i] == NULL) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ ocfs2_set_new_buffer_uptodate(inode, bhs[i]);
+
+ status = ocfs2_journal_access(handle, inode, bhs[i],
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ memset(bhs[i]->b_data, 0, osb->sb->s_blocksize);
+ eb = (struct ocfs2_extent_block *) bhs[i]->b_data;
+ /* Ok, setup the minimal stuff here. */
+ strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
+ eb->h_blkno = cpu_to_le64(first_blkno);
+ eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
+
+#ifndef OCFS2_USE_ALL_METADATA_SUBALLOCATORS
+ /* we always use slot zero's suballocator */
+ eb->h_suballoc_slot = 0;
+#else
+ eb->h_suballoc_slot = cpu_to_le16(osb->slot_num);
+#endif
+ eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
+ eb->h_list.l_count =
+ cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
+
+ suballoc_bit_start++;
+ first_blkno++;
+
+ /* We'll also be dirtied by the caller, so
+ * this isn't absolutely necessary. */
+ status = ocfs2_journal_dirty(handle, bhs[i]);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ count += num_got;
+ }
+
+ status = 0;
+bail:
+ if (status < 0) {
+ for(i = 0; i < wanted; i++) {
+ if (bhs[i])
+ brelse(bhs[i]);
+ bhs[i] = NULL;
+ }
+ }
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * Add an entire tree branch to our inode. eb_bh is the extent block
+ * to start at, if we don't want to start the branch at the dinode
+ * structure.
+ *
+ * last_eb_bh is required as we have to update it's next_leaf pointer
+ * for the new last extent block.
+ *
+ * the new branch will be 'empty' in the sense that every block will
+ * contain a single record with e_clusters == 0.
+ */
+static int ocfs2_add_branch(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct buffer_head *eb_bh,
+ struct buffer_head *last_eb_bh,
+ struct ocfs2_alloc_context *meta_ac)
+{
+ int status, new_blocks, i;
+ u64 next_blkno, new_last_eb_blk;
+ struct buffer_head *bh;
+ struct buffer_head **new_eb_bhs = NULL;
+ struct ocfs2_dinode *fe;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_list *eb_el;
+ struct ocfs2_extent_list *el;
+
+ mlog_entry_void();
+
+ BUG_ON(!last_eb_bh);
+
+ fe = (struct ocfs2_dinode *) fe_bh->b_data;
+
+ if (eb_bh) {
+ eb = (struct ocfs2_extent_block *) eb_bh->b_data;
+ el = &eb->h_list;
+ } else
+ el = &fe->id2.i_list;
+
+ /* we never add a branch to a leaf. */
+ BUG_ON(!el->l_tree_depth);
+
+ new_blocks = le16_to_cpu(el->l_tree_depth);
+
+ /* allocate the number of new eb blocks we need */
+ new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *),
+ GFP_KERNEL);
+ if (!new_eb_bhs) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_create_new_meta_bhs(osb, handle, inode, new_blocks,
+ meta_ac, new_eb_bhs);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
+ * linked with the rest of the tree.
+ * conversly, new_eb_bhs[0] is the new bottommost leaf.
+ *
+ * when we leave the loop, new_last_eb_blk will point to the
+ * newest leaf, and next_blkno will point to the topmost extent
+ * block. */
+ next_blkno = new_last_eb_blk = 0;
+ for(i = 0; i < new_blocks; i++) {
+ bh = new_eb_bhs[i];
+ eb = (struct ocfs2_extent_block *) bh->b_data;
+ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
+ OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
+ status = -EIO;
+ goto bail;
+ }
+ eb_el = &eb->h_list;
+
+ status = ocfs2_journal_access(handle, inode, bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ eb->h_next_leaf_blk = 0;
+ eb_el->l_tree_depth = cpu_to_le16(i);
+ eb_el->l_next_free_rec = cpu_to_le16(1);
+ eb_el->l_recs[0].e_cpos = fe->i_clusters;
+ eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno);
+ eb_el->l_recs[0].e_clusters = cpu_to_le32(0);
+ if (!eb_el->l_tree_depth)
+ new_last_eb_blk = le64_to_cpu(eb->h_blkno);
+
+ status = ocfs2_journal_dirty(handle, bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ next_blkno = le64_to_cpu(eb->h_blkno);
+ }
+
+ /* This is a bit hairy. We want to update up to three blocks
+ * here without leaving any of them in an inconsistent state
+ * in case of error. We don't have to worry about
+ * journal_dirty erroring as it won't unless we've aborted the
+ * handle (in which case we would never be here) so reserving
+ * the write with journal_access is all we need to do. */
+ status = ocfs2_journal_access(handle, inode, last_eb_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ status = ocfs2_journal_access(handle, inode, fe_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ if (eb_bh) {
+ status = ocfs2_journal_access(handle, inode, eb_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ /* Link the new branch into the rest of the tree (el will
+ * either be on the fe, or the extent block passed in. */
+ i = le16_to_cpu(el->l_next_free_rec);
+ el->l_recs[i].e_blkno = cpu_to_le64(next_blkno);
+ el->l_recs[i].e_cpos = fe->i_clusters;
+ el->l_recs[i].e_clusters = 0;
+ le16_add_cpu(&el->l_next_free_rec, 1);
+
+ /* fe needs a new last extent block pointer, as does the
+ * next_leaf on the previously last-extent-block. */
+ fe->i_last_eb_blk = cpu_to_le64(new_last_eb_blk);
+
+ eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
+ eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk);
+
+ status = ocfs2_journal_dirty(handle, last_eb_bh);
+ if (status < 0)
+ mlog_errno(status);
+ status = ocfs2_journal_dirty(handle, fe_bh);
+ if (status < 0)
+ mlog_errno(status);
+ if (eb_bh) {
+ status = ocfs2_journal_dirty(handle, eb_bh);
+ if (status < 0)
+ mlog_errno(status);
+ }
+
+ status = 0;
+bail:
+ if (new_eb_bhs) {
+ for (i = 0; i < new_blocks; i++)
+ if (new_eb_bhs[i])
+ brelse(new_eb_bhs[i]);
+ kfree(new_eb_bhs);
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * adds another level to the allocation tree.
+ * returns back the new extent block so you can add a branch to it
+ * after this call.
+ */
+static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_alloc_context *meta_ac,
+ struct buffer_head **ret_new_eb_bh)
+{
+ int status, i;
+ struct buffer_head *new_eb_bh = NULL;
+ struct ocfs2_dinode *fe;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_list *fe_el;
+ struct ocfs2_extent_list *eb_el;
+
+ mlog_entry_void();
+
+ status = ocfs2_create_new_meta_bhs(osb, handle, inode, 1, meta_ac,
+ &new_eb_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ eb = (struct ocfs2_extent_block *) new_eb_bh->b_data;
+ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
+ OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
+ status = -EIO;
+ goto bail;
+ }
+
+ eb_el = &eb->h_list;
+ fe = (struct ocfs2_dinode *) fe_bh->b_data;
+ fe_el = &fe->id2.i_list;
+
+ status = ocfs2_journal_access(handle, inode, new_eb_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* copy the fe data into the new extent block */
+ eb_el->l_tree_depth = fe_el->l_tree_depth;
+ eb_el->l_next_free_rec = fe_el->l_next_free_rec;
+ for(i = 0; i < le16_to_cpu(fe_el->l_next_free_rec); i++) {
+ eb_el->l_recs[i].e_cpos = fe_el->l_recs[i].e_cpos;
+ eb_el->l_recs[i].e_clusters = fe_el->l_recs[i].e_clusters;
+ eb_el->l_recs[i].e_blkno = fe_el->l_recs[i].e_blkno;
+ }
+
+ status = ocfs2_journal_dirty(handle, new_eb_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_journal_access(handle, inode, fe_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* update fe now */
+ le16_add_cpu(&fe_el->l_tree_depth, 1);
+ fe_el->l_recs[0].e_cpos = 0;
+ fe_el->l_recs[0].e_blkno = eb->h_blkno;
+ fe_el->l_recs[0].e_clusters = fe->i_clusters;
+ for(i = 1; i < le16_to_cpu(fe_el->l_next_free_rec); i++) {
+ fe_el->l_recs[i].e_cpos = 0;
+ fe_el->l_recs[i].e_clusters = 0;
+ fe_el->l_recs[i].e_blkno = 0;
+ }
+ fe_el->l_next_free_rec = cpu_to_le16(1);
+
+ /* If this is our 1st tree depth shift, then last_eb_blk
+ * becomes the allocated extent block */
+ if (fe_el->l_tree_depth == cpu_to_le16(1))
+ fe->i_last_eb_blk = eb->h_blkno;
+
+ status = ocfs2_journal_dirty(handle, fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ *ret_new_eb_bh = new_eb_bh;
+ new_eb_bh = NULL;
+ status = 0;
+bail:
+ if (new_eb_bh)
+ brelse(new_eb_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * Expects the tree to already have room in the rightmost leaf for the
+ * extent. Updates all the extent blocks (and the dinode) on the way
+ * down.
+ */
+static int ocfs2_do_insert_extent(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ u64 start_blk,
+ u32 new_clusters)
+{
+ int status, i, num_bhs = 0;
+ u64 next_blkno;
+ u16 next_free;
+ struct buffer_head **eb_bhs = NULL;
+ struct ocfs2_dinode *fe;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_list *el;
+
+ mlog_entry_void();
+
+ status = ocfs2_journal_access(handle, inode, fe_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ fe = (struct ocfs2_dinode *) fe_bh->b_data;
+ el = &fe->id2.i_list;
+ if (el->l_tree_depth) {
+ /* This is another operation where we want to be
+ * careful about our tree updates. An error here means
+ * none of the previous changes we made should roll
+ * forward. As a result, we have to record the buffers
+ * for this part of the tree in an array and reserve a
+ * journal write to them before making any changes. */
+ num_bhs = le16_to_cpu(fe->id2.i_list.l_tree_depth);
+ eb_bhs = kcalloc(num_bhs, sizeof(struct buffer_head *),
+ GFP_KERNEL);
+ if (!eb_bhs) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ i = 0;
+ while(el->l_tree_depth) {
+ next_free = le16_to_cpu(el->l_next_free_rec);
+ if (next_free == 0) {
+ ocfs2_error(inode->i_sb,
+ "Dinode %"MLFu64" has a bad "
+ "extent list",
+ OCFS2_I(inode)->ip_blkno);
+ status = -EIO;
+ goto bail;
+ }
+ next_blkno = le64_to_cpu(el->l_recs[next_free - 1].e_blkno);
+
+ BUG_ON(i >= num_bhs);
+ status = ocfs2_read_block(osb, next_blkno, &eb_bhs[i],
+ OCFS2_BH_CACHED, inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ eb = (struct ocfs2_extent_block *) eb_bhs[i]->b_data;
+ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
+ OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb,
+ eb);
+ status = -EIO;
+ goto bail;
+ }
+
+ status = ocfs2_journal_access(handle, inode, eb_bhs[i],
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ el = &eb->h_list;
+ i++;
+ /* When we leave this loop, eb_bhs[num_bhs - 1] will
+ * hold the bottom-most leaf extent block. */
+ }
+ BUG_ON(el->l_tree_depth);
+
+ el = &fe->id2.i_list;
+ /* If we have tree depth, then the fe update is
+ * trivial, and we want to switch el out for the
+ * bottom-most leaf in order to update it with the
+ * actual extent data below. */
+ next_free = le16_to_cpu(el->l_next_free_rec);
+ if (next_free == 0) {
+ ocfs2_error(inode->i_sb,
+ "Dinode %"MLFu64" has a bad "
+ "extent list",
+ OCFS2_I(inode)->ip_blkno);
+ status = -EIO;
+ goto bail;
+ }
+ le32_add_cpu(&el->l_recs[next_free - 1].e_clusters,
+ new_clusters);
+ /* (num_bhs - 1) to avoid the leaf */
+ for(i = 0; i < (num_bhs - 1); i++) {
+ eb = (struct ocfs2_extent_block *) eb_bhs[i]->b_data;
+ el = &eb->h_list;
+
+ /* finally, make our actual change to the
+ * intermediate extent blocks. */
+ next_free = le16_to_cpu(el->l_next_free_rec);
+ le32_add_cpu(&el->l_recs[next_free - 1].e_clusters,
+ new_clusters);
+
+ status = ocfs2_journal_dirty(handle, eb_bhs[i]);
+ if (status < 0)
+ mlog_errno(status);
+ }
+ BUG_ON(i != (num_bhs - 1));
+ /* note that the leaf block wasn't touched in
+ * the loop above */
+ eb = (struct ocfs2_extent_block *) eb_bhs[num_bhs - 1]->b_data;
+ el = &eb->h_list;
+ BUG_ON(el->l_tree_depth);
+ }
+
+ /* yay, we can finally add the actual extent now! */
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
+ if (le16_to_cpu(el->l_next_free_rec) &&
+ ocfs2_extent_contig(inode, &el->l_recs[i], start_blk)) {
+ le32_add_cpu(&el->l_recs[i].e_clusters, new_clusters);
+ } else if (le16_to_cpu(el->l_next_free_rec) &&
+ (le32_to_cpu(el->l_recs[i].e_clusters) == 0)) {
+ /* having an empty extent at eof is legal. */
+ if (el->l_recs[i].e_cpos != fe->i_clusters) {
+ ocfs2_error(inode->i_sb,
+ "Dinode %"MLFu64" trailing extent is bad: "
+ "cpos (%u) != number of clusters (%u)",
+ le32_to_cpu(el->l_recs[i].e_cpos),
+ le32_to_cpu(fe->i_clusters));
+ status = -EIO;
+ goto bail;
+ }
+ el->l_recs[i].e_blkno = cpu_to_le64(start_blk);
+ el->l_recs[i].e_clusters = cpu_to_le32(new_clusters);
+ } else {
+ /* No contiguous record, or no empty record at eof, so
+ * we add a new one. */
+
+ BUG_ON(le16_to_cpu(el->l_next_free_rec) >=
+ le16_to_cpu(el->l_count));
+ i = le16_to_cpu(el->l_next_free_rec);
+
+ el->l_recs[i].e_blkno = cpu_to_le64(start_blk);
+ el->l_recs[i].e_clusters = cpu_to_le32(new_clusters);
+ el->l_recs[i].e_cpos = fe->i_clusters;
+ le16_add_cpu(&el->l_next_free_rec, 1);
+ }
+
+ /*
+ * extent_map errors are not fatal, so they are ignored outside
+ * of flushing the thing.
+ */
+ status = ocfs2_extent_map_append(inode, &el->l_recs[i],
+ new_clusters);
+ if (status) {
+ mlog_errno(status);
+ ocfs2_extent_map_drop(inode, le32_to_cpu(fe->i_clusters));
+ }
+
+ status = ocfs2_journal_dirty(handle, fe_bh);
+ if (status < 0)
+ mlog_errno(status);
+ if (fe->id2.i_list.l_tree_depth) {
+ status = ocfs2_journal_dirty(handle, eb_bhs[num_bhs - 1]);
+ if (status < 0)
+ mlog_errno(status);
+ }
+
+ status = 0;
+bail:
+ if (eb_bhs) {
+ for (i = 0; i < num_bhs; i++)
+ if (eb_bhs[i])
+ brelse(eb_bhs[i]);
+ kfree(eb_bhs);
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * Should only be called when there is no space left in any of the
+ * leaf nodes. What we want to do is find the lowest tree depth
+ * non-leaf extent block with room for new records. There are three
+ * valid results of this search:
+ *
+ * 1) a lowest extent block is found, then we pass it back in
+ * *lowest_eb_bh and return '0'
+ *
+ * 2) the search fails to find anything, but the dinode has room. We
+ * pass NULL back in *lowest_eb_bh, but still return '0'
+ *
+ * 3) the search fails to find anything AND the dinode is full, in
+ * which case we return > 0
+ *
+ * return status < 0 indicates an error.
+ */
+static int ocfs2_find_branch_target(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct buffer_head **target_bh)
+{
+ int status = 0, i;
+ u64 blkno;
+ struct ocfs2_dinode *fe;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_list *el;
+ struct buffer_head *bh = NULL;
+ struct buffer_head *lowest_bh = NULL;
+
+ mlog_entry_void();
+
+ *target_bh = NULL;
+
+ fe = (struct ocfs2_dinode *) fe_bh->b_data;
+ el = &fe->id2.i_list;
+
+ while(le16_to_cpu(el->l_tree_depth) > 1) {
+ if (le16_to_cpu(el->l_next_free_rec) == 0) {
+ ocfs2_error(inode->i_sb, "Dinode %"MLFu64" has empty "
+ "extent list (next_free_rec == 0)",
+ OCFS2_I(inode)->ip_blkno);
+ status = -EIO;
+ goto bail;
+ }
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
+ blkno = le64_to_cpu(el->l_recs[i].e_blkno);
+ if (!blkno) {
+ ocfs2_error(inode->i_sb, "Dinode %"MLFu64" has extent "
+ "list where extent # %d has no physical "
+ "block start",
+ OCFS2_I(inode)->ip_blkno, i);
+ status = -EIO;
+ goto bail;
+ }
+
+ if (bh) {
+ brelse(bh);
+ bh = NULL;
+ }
+
+ status = ocfs2_read_block(osb, blkno, &bh, OCFS2_BH_CACHED,
+ inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ eb = (struct ocfs2_extent_block *) bh->b_data;
+ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
+ OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
+ status = -EIO;
+ goto bail;
+ }
+ el = &eb->h_list;
+
+ if (le16_to_cpu(el->l_next_free_rec) <
+ le16_to_cpu(el->l_count)) {
+ if (lowest_bh)
+ brelse(lowest_bh);
+ lowest_bh = bh;
+ get_bh(lowest_bh);
+ }
+ }
+
+ /* If we didn't find one and the fe doesn't have any room,
+ * then return '1' */
+ if (!lowest_bh
+ && (fe->id2.i_list.l_next_free_rec == fe->id2.i_list.l_count))
+ status = 1;
+
+ *target_bh = lowest_bh;
+bail:
+ if (bh)
+ brelse(bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+/* the caller needs to update fe->i_clusters */
+int ocfs2_insert_extent(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ u64 start_blk,
+ u32 new_clusters,
+ struct ocfs2_alloc_context *meta_ac)
+{
+ int status, i, shift;
+ struct buffer_head *last_eb_bh = NULL;
+ struct buffer_head *bh = NULL;
+ struct ocfs2_dinode *fe;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_list *el;
+
+ mlog_entry_void();
+
+ mlog(0, "add %u clusters starting at block %"MLFu64" to "
+ "inode %"MLFu64"\n",
+ new_clusters, start_blk, OCFS2_I(inode)->ip_blkno);
+
+ fe = (struct ocfs2_dinode *) fe_bh->b_data;
+ el = &fe->id2.i_list;
+
+ if (el->l_tree_depth) {
+ /* jump to end of tree */
+ status = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),
+ &last_eb_bh, OCFS2_BH_CACHED, inode);
+ if (status < 0) {
+ mlog_exit(status);
+ goto bail;
+ }
+ eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
+ el = &eb->h_list;
+ }
+
+ /* Can we allocate without adding/shifting tree bits? */
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
+ if (le16_to_cpu(el->l_next_free_rec) == 0
+ || (le16_to_cpu(el->l_next_free_rec) < le16_to_cpu(el->l_count))
+ || le32_to_cpu(el->l_recs[i].e_clusters) == 0
+ || ocfs2_extent_contig(inode, &el->l_recs[i], start_blk))
+ goto out_add;
+
+ mlog(0, "ocfs2_allocate_extent: couldn't do a simple add, traversing "
+ "tree now.\n");
+
+ shift = ocfs2_find_branch_target(osb, inode, fe_bh, &bh);
+ if (shift < 0) {
+ status = shift;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* We traveled all the way to the bottom of the allocation tree
+ * and didn't find room for any more extents - we need to add
+ * another tree level */
+ if (shift) {
+ /* if we hit a leaf, we'd better be empty :) */
+ BUG_ON(le16_to_cpu(el->l_next_free_rec) !=
+ le16_to_cpu(el->l_count));
+ BUG_ON(bh);
+ mlog(0, "ocfs2_allocate_extent: need to shift tree depth "
+ "(current = %u)\n",
+ le16_to_cpu(fe->id2.i_list.l_tree_depth));
+
+ /* ocfs2_shift_tree_depth will return us a buffer with
+ * the new extent block (so we can pass that to
+ * ocfs2_add_branch). */
+ status = ocfs2_shift_tree_depth(osb, handle, inode, fe_bh,
+ meta_ac, &bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ /* Special case: we have room now if we shifted from
+ * tree_depth 0 */
+ if (fe->id2.i_list.l_tree_depth == cpu_to_le16(1))
+ goto out_add;
+ }
+
+ /* call ocfs2_add_branch to add the final part of the tree with
+ * the new data. */
+ mlog(0, "ocfs2_allocate_extent: add branch. bh = %p\n", bh);
+ status = ocfs2_add_branch(osb, handle, inode, fe_bh, bh, last_eb_bh,
+ meta_ac);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+out_add:
+ /* Finally, we can add clusters. */
+ status = ocfs2_do_insert_extent(osb, handle, inode, fe_bh,
+ start_blk, new_clusters);
+ if (status < 0)
+ mlog_errno(status);
+
+bail:
+ if (bh)
+ brelse(bh);
+
+ if (last_eb_bh)
+ brelse(last_eb_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+static inline int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb)
+{
+ struct buffer_head *tl_bh = osb->osb_tl_bh;
+ struct ocfs2_dinode *di;
+ struct ocfs2_truncate_log *tl;
+
+ di = (struct ocfs2_dinode *) tl_bh->b_data;
+ tl = &di->id2.i_dealloc;
+
+ mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count),
+ "slot %d, invalid truncate log parameters: used = "
+ "%u, count = %u\n", osb->slot_num,
+ le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count));
+ return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count);
+}
+
+static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl,
+ unsigned int new_start)
+{
+ unsigned int tail_index;
+ unsigned int current_tail;
+
+ /* No records, nothing to coalesce */
+ if (!le16_to_cpu(tl->tl_used))
+ return 0;
+
+ tail_index = le16_to_cpu(tl->tl_used) - 1;
+ current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start);
+ current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters);
+
+ return current_tail == new_start;
+}
+
+static int ocfs2_truncate_log_append(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ u64 start_blk,
+ unsigned int num_clusters)
+{
+ int status, index;
+ unsigned int start_cluster, tl_count;
+ struct inode *tl_inode = osb->osb_tl_inode;
+ struct buffer_head *tl_bh = osb->osb_tl_bh;
+ struct ocfs2_dinode *di;
+ struct ocfs2_truncate_log *tl;
+
+ mlog_entry("start_blk = %"MLFu64", num_clusters = %u\n", start_blk,
+ num_clusters);
+
+ BUG_ON(mutex_trylock(&tl_inode->i_mutex));
+
+ start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
+
+ di = (struct ocfs2_dinode *) tl_bh->b_data;
+ tl = &di->id2.i_dealloc;
+ if (!OCFS2_IS_VALID_DINODE(di)) {
+ OCFS2_RO_ON_INVALID_DINODE(osb->sb, di);
+ status = -EIO;
+ goto bail;
+ }
+
+ tl_count = le16_to_cpu(tl->tl_count);
+ mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||
+ tl_count == 0,
+ "Truncate record count on #%"MLFu64" invalid ("
+ "wanted %u, actual %u\n", OCFS2_I(tl_inode)->ip_blkno,
+ ocfs2_truncate_recs_per_inode(osb->sb),
+ le16_to_cpu(tl->tl_count));
+
+ /* Caller should have known to flush before calling us. */
+ index = le16_to_cpu(tl->tl_used);
+ if (index >= tl_count) {
+ status = -ENOSPC;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_journal_access(handle, tl_inode, tl_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ mlog(0, "Log truncate of %u clusters starting at cluster %u to "
+ "%"MLFu64" (index = %d)\n", num_clusters, start_cluster,
+ OCFS2_I(tl_inode)->ip_blkno, index);
+
+ if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {
+ /*
+ * Move index back to the record we are coalescing with.
+ * ocfs2_truncate_log_can_coalesce() guarantees nonzero
+ */
+ index--;
+
+ num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);
+ mlog(0, "Coalesce with index %u (start = %u, clusters = %u)\n",
+ index, le32_to_cpu(tl->tl_recs[index].t_start),
+ num_clusters);
+ } else {
+ tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);
+ tl->tl_used = cpu_to_le16(index + 1);
+ }
+ tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters);
+
+ status = ocfs2_journal_dirty(handle, tl_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *data_alloc_inode,
+ struct buffer_head *data_alloc_bh)
+{
+ int status = 0;
+ int i;
+ unsigned int num_clusters;
+ u64 start_blk;
+ struct ocfs2_truncate_rec rec;
+ struct ocfs2_dinode *di;
+ struct ocfs2_truncate_log *tl;
+ struct inode *tl_inode = osb->osb_tl_inode;
+ struct buffer_head *tl_bh = osb->osb_tl_bh;
+
+ mlog_entry_void();
+
+ di = (struct ocfs2_dinode *) tl_bh->b_data;
+ tl = &di->id2.i_dealloc;
+ i = le16_to_cpu(tl->tl_used) - 1;
+ while (i >= 0) {
+ /* Caller has given us at least enough credits to
+ * update the truncate log dinode */
+ status = ocfs2_journal_access(handle, tl_inode, tl_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ tl->tl_used = cpu_to_le16(i);
+
+ status = ocfs2_journal_dirty(handle, tl_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* TODO: Perhaps we can calculate the bulk of the
+ * credits up front rather than extending like
+ * this. */
+ status = ocfs2_extend_trans(handle,
+ OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ rec = tl->tl_recs[i];
+ start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb,
+ le32_to_cpu(rec.t_start));
+ num_clusters = le32_to_cpu(rec.t_clusters);
+
+ /* if start_blk is not set, we ignore the record as
+ * invalid. */
+ if (start_blk) {
+ mlog(0, "free record %d, start = %u, clusters = %u\n",
+ i, le32_to_cpu(rec.t_start), num_clusters);
+
+ status = ocfs2_free_clusters(handle, data_alloc_inode,
+ data_alloc_bh, start_blk,
+ num_clusters);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+ i--;
+ }
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+/* Expects you to already be holding tl_inode->i_mutex */
+static int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
+{
+ int status;
+ unsigned int num_to_flush;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct inode *tl_inode = osb->osb_tl_inode;
+ struct inode *data_alloc_inode = NULL;
+ struct buffer_head *tl_bh = osb->osb_tl_bh;
+ struct buffer_head *data_alloc_bh = NULL;
+ struct ocfs2_dinode *di;
+ struct ocfs2_truncate_log *tl;
+
+ mlog_entry_void();
+
+ BUG_ON(mutex_trylock(&tl_inode->i_mutex));
+
+ di = (struct ocfs2_dinode *) tl_bh->b_data;
+ tl = &di->id2.i_dealloc;
+ if (!OCFS2_IS_VALID_DINODE(di)) {
+ OCFS2_RO_ON_INVALID_DINODE(osb->sb, di);
+ status = -EIO;
+ goto bail;
+ }
+
+ num_to_flush = le16_to_cpu(tl->tl_used);
+ mlog(0, "Flush %u records from truncate log #%"MLFu64"\n",
+ num_to_flush, OCFS2_I(tl_inode)->ip_blkno);
+ if (!num_to_flush) {
+ status = 0;
+ goto bail;
+ }
+
+ handle = ocfs2_alloc_handle(osb);
+ if (!handle) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ data_alloc_inode = ocfs2_get_system_file_inode(osb,
+ GLOBAL_BITMAP_SYSTEM_INODE,
+ OCFS2_INVALID_SLOT);
+ if (!data_alloc_inode) {
+ status = -EINVAL;
+ mlog(ML_ERROR, "Could not get bitmap inode!\n");
+ goto bail;
+ }
+
+ ocfs2_handle_add_inode(handle, data_alloc_inode);
+ status = ocfs2_meta_lock(data_alloc_inode, handle, &data_alloc_bh, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ handle = ocfs2_start_trans(osb, handle, OCFS2_TRUNCATE_LOG_UPDATE);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode,
+ data_alloc_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+bail:
+ if (handle)
+ ocfs2_commit_trans(handle);
+
+ if (data_alloc_inode)
+ iput(data_alloc_inode);
+
+ if (data_alloc_bh)
+ brelse(data_alloc_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
+{
+ int status;
+ struct inode *tl_inode = osb->osb_tl_inode;
+
+ mutex_lock(&tl_inode->i_mutex);
+ status = __ocfs2_flush_truncate_log(osb);
+ mutex_unlock(&tl_inode->i_mutex);
+
+ return status;
+}
+
+static void ocfs2_truncate_log_worker(void *data)
+{
+ int status;
+ struct ocfs2_super *osb = data;
+
+ mlog_entry_void();
+
+ status = ocfs2_flush_truncate_log(osb);
+ if (status < 0)
+ mlog_errno(status);
+
+ mlog_exit(status);
+}
+
+#define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ)
+void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
+ int cancel)
+{
+ if (osb->osb_tl_inode) {
+ /* We want to push off log flushes while truncates are
+ * still running. */
+ if (cancel)
+ cancel_delayed_work(&osb->osb_truncate_log_wq);
+
+ queue_delayed_work(ocfs2_wq, &osb->osb_truncate_log_wq,
+ OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL);
+ }
+}
+
+static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
+ int slot_num,
+ struct inode **tl_inode,
+ struct buffer_head **tl_bh)
+{
+ int status;
+ struct inode *inode = NULL;
+ struct buffer_head *bh = NULL;
+
+ inode = ocfs2_get_system_file_inode(osb,
+ TRUNCATE_LOG_SYSTEM_INODE,
+ slot_num);
+ if (!inode) {
+ status = -EINVAL;
+ mlog(ML_ERROR, "Could not get load truncate log inode!\n");
+ goto bail;
+ }
+
+ status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
+ OCFS2_BH_CACHED, inode);
+ if (status < 0) {
+ iput(inode);
+ mlog_errno(status);
+ goto bail;
+ }
+
+ *tl_inode = inode;
+ *tl_bh = bh;
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+/* called during the 1st stage of node recovery. we stamp a clean
+ * truncate log and pass back a copy for processing later. if the
+ * truncate log does not require processing, a *tl_copy is set to
+ * NULL. */
+int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
+ int slot_num,
+ struct ocfs2_dinode **tl_copy)
+{
+ int status;
+ struct inode *tl_inode = NULL;
+ struct buffer_head *tl_bh = NULL;
+ struct ocfs2_dinode *di;
+ struct ocfs2_truncate_log *tl;
+
+ *tl_copy = NULL;
+
+ mlog(0, "recover truncate log from slot %d\n", slot_num);
+
+ status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ di = (struct ocfs2_dinode *) tl_bh->b_data;
+ tl = &di->id2.i_dealloc;
+ if (!OCFS2_IS_VALID_DINODE(di)) {
+ OCFS2_RO_ON_INVALID_DINODE(tl_inode->i_sb, di);
+ status = -EIO;
+ goto bail;
+ }
+
+ if (le16_to_cpu(tl->tl_used)) {
+ mlog(0, "We'll have %u logs to recover\n",
+ le16_to_cpu(tl->tl_used));
+
+ *tl_copy = kmalloc(tl_bh->b_size, GFP_KERNEL);
+ if (!(*tl_copy)) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* Assuming the write-out below goes well, this copy
+ * will be passed back to recovery for processing. */
+ memcpy(*tl_copy, tl_bh->b_data, tl_bh->b_size);
+
+ /* All we need to do to clear the truncate log is set
+ * tl_used. */
+ tl->tl_used = 0;
+
+ status = ocfs2_write_block(osb, tl_bh, tl_inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+bail:
+ if (tl_inode)
+ iput(tl_inode);
+ if (tl_bh)
+ brelse(tl_bh);
+
+ if (status < 0 && (*tl_copy)) {
+ kfree(*tl_copy);
+ *tl_copy = NULL;
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
+ struct ocfs2_dinode *tl_copy)
+{
+ int status = 0;
+ int i;
+ unsigned int clusters, num_recs, start_cluster;
+ u64 start_blk;
+ struct ocfs2_journal_handle *handle;
+ struct inode *tl_inode = osb->osb_tl_inode;
+ struct ocfs2_truncate_log *tl;
+
+ mlog_entry_void();
+
+ if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) {
+ mlog(ML_ERROR, "Asked to recover my own truncate log!\n");
+ return -EINVAL;
+ }
+
+ tl = &tl_copy->id2.i_dealloc;
+ num_recs = le16_to_cpu(tl->tl_used);
+ mlog(0, "cleanup %u records from %"MLFu64"\n", num_recs,
+ tl_copy->i_blkno);
+
+ mutex_lock(&tl_inode->i_mutex);
+ for(i = 0; i < num_recs; i++) {
+ if (ocfs2_truncate_log_needs_flush(osb)) {
+ status = __ocfs2_flush_truncate_log(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_up;
+ }
+ }
+
+ handle = ocfs2_start_trans(osb, NULL,
+ OCFS2_TRUNCATE_LOG_UPDATE);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ mlog_errno(status);
+ goto bail_up;
+ }
+
+ clusters = le32_to_cpu(tl->tl_recs[i].t_clusters);
+ start_cluster = le32_to_cpu(tl->tl_recs[i].t_start);
+ start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster);
+
+ status = ocfs2_truncate_log_append(osb, handle,
+ start_blk, clusters);
+ ocfs2_commit_trans(handle);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_up;
+ }
+ }
+
+bail_up:
+ mutex_unlock(&tl_inode->i_mutex);
+
+ mlog_exit(status);
+ return status;
+}
+
+void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
+{
+ int status;
+ struct inode *tl_inode = osb->osb_tl_inode;
+
+ mlog_entry_void();
+
+ if (tl_inode) {
+ cancel_delayed_work(&osb->osb_truncate_log_wq);
+ flush_workqueue(ocfs2_wq);
+
+ status = ocfs2_flush_truncate_log(osb);
+ if (status < 0)
+ mlog_errno(status);
+
+ brelse(osb->osb_tl_bh);
+ iput(osb->osb_tl_inode);
+ }
+
+ mlog_exit_void();
+}
+
+int ocfs2_truncate_log_init(struct ocfs2_super *osb)
+{
+ int status;
+ struct inode *tl_inode = NULL;
+ struct buffer_head *tl_bh = NULL;
+
+ mlog_entry_void();
+
+ status = ocfs2_get_truncate_log_info(osb,
+ osb->slot_num,
+ &tl_inode,
+ &tl_bh);
+ if (status < 0)
+ mlog_errno(status);
+
+ /* ocfs2_truncate_log_shutdown keys on the existence of
+ * osb->osb_tl_inode so we don't set any of the osb variables
+ * until we're sure all is well. */
+ INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb);
+ osb->osb_tl_bh = tl_bh;
+ osb->osb_tl_inode = tl_inode;
+
+ mlog_exit(status);
+ return status;
+}
+
+/* This function will figure out whether the currently last extent
+ * block will be deleted, and if it will, what the new last extent
+ * block will be so we can update his h_next_leaf_blk field, as well
+ * as the dinodes i_last_eb_blk */
+static int ocfs2_find_new_last_ext_blk(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct ocfs2_dinode *fe,
+ u32 new_i_clusters,
+ struct buffer_head *old_last_eb,
+ struct buffer_head **new_last_eb)
+{
+ int i, status = 0;
+ u64 block = 0;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_list *el;
+ struct buffer_head *bh = NULL;
+
+ *new_last_eb = NULL;
+
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
+ status = -EIO;
+ goto bail;
+ }
+
+ /* we have no tree, so of course, no last_eb. */
+ if (!fe->id2.i_list.l_tree_depth)
+ goto bail;
+
+ /* trunc to zero special case - this makes tree_depth = 0
+ * regardless of what it is. */
+ if (!new_i_clusters)
+ goto bail;
+
+ eb = (struct ocfs2_extent_block *) old_last_eb->b_data;
+ el = &(eb->h_list);
+ BUG_ON(!el->l_next_free_rec);
+
+ /* Make sure that this guy will actually be empty after we
+ * clear away the data. */
+ if (le32_to_cpu(el->l_recs[0].e_cpos) < new_i_clusters)
+ goto bail;
+
+ /* Ok, at this point, we know that last_eb will definitely
+ * change, so lets traverse the tree and find the second to
+ * last extent block. */
+ el = &(fe->id2.i_list);
+ /* go down the tree, */
+ do {
+ for(i = (le16_to_cpu(el->l_next_free_rec) - 1); i >= 0; i--) {
+ if (le32_to_cpu(el->l_recs[i].e_cpos) <
+ new_i_clusters) {
+ block = le64_to_cpu(el->l_recs[i].e_blkno);
+ break;
+ }
+ }
+ BUG_ON(i < 0);
+
+ if (bh) {
+ brelse(bh);
+ bh = NULL;
+ }
+
+ status = ocfs2_read_block(osb, block, &bh, OCFS2_BH_CACHED,
+ inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ eb = (struct ocfs2_extent_block *) bh->b_data;
+ el = &eb->h_list;
+ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
+ OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
+ status = -EIO;
+ goto bail;
+ }
+ } while (el->l_tree_depth);
+
+ *new_last_eb = bh;
+ get_bh(*new_last_eb);
+ mlog(0, "returning block %"MLFu64"\n", le64_to_cpu(eb->h_blkno));
+bail:
+ if (bh)
+ brelse(bh);
+
+ return status;
+}
+
+static int ocfs2_do_truncate(struct ocfs2_super *osb,
+ unsigned int clusters_to_del,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct buffer_head *old_last_eb_bh,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_truncate_context *tc)
+{
+ int status, i, depth;
+ struct ocfs2_dinode *fe;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_block *last_eb = NULL;
+ struct ocfs2_extent_list *el;
+ struct buffer_head *eb_bh = NULL;
+ struct buffer_head *last_eb_bh = NULL;
+ u64 next_eb = 0;
+ u64 delete_blk = 0;
+
+ fe = (struct ocfs2_dinode *) fe_bh->b_data;
+
+ status = ocfs2_find_new_last_ext_blk(osb,
+ inode,
+ fe,
+ le32_to_cpu(fe->i_clusters) -
+ clusters_to_del,
+ old_last_eb_bh,
+ &last_eb_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ if (last_eb_bh)
+ last_eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
+
+ status = ocfs2_journal_access(handle, inode, fe_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ el = &(fe->id2.i_list);
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) -
+ clusters_to_del;
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+ le32_add_cpu(&fe->i_clusters, -clusters_to_del);
+ fe->i_mtime = cpu_to_le64(CURRENT_TIME.tv_sec);
+ fe->i_mtime_nsec = cpu_to_le32(CURRENT_TIME.tv_nsec);
+
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
+
+ BUG_ON(le32_to_cpu(el->l_recs[i].e_clusters) < clusters_to_del);
+ le32_add_cpu(&el->l_recs[i].e_clusters, -clusters_to_del);
+ /* tree depth zero, we can just delete the clusters, otherwise
+ * we need to record the offset of the next level extent block
+ * as we may overwrite it. */
+ if (!el->l_tree_depth)
+ delete_blk = le64_to_cpu(el->l_recs[i].e_blkno)
+ + ocfs2_clusters_to_blocks(osb->sb,
+ le32_to_cpu(el->l_recs[i].e_clusters));
+ else
+ next_eb = le64_to_cpu(el->l_recs[i].e_blkno);
+
+ if (!el->l_recs[i].e_clusters) {
+ /* if we deleted the whole extent record, then clear
+ * out the other fields and update the extent
+ * list. For depth > 0 trees, we've already recorded
+ * the extent block in 'next_eb' */
+ el->l_recs[i].e_cpos = 0;
+ el->l_recs[i].e_blkno = 0;
+ BUG_ON(!el->l_next_free_rec);
+ le16_add_cpu(&el->l_next_free_rec, -1);
+ }
+
+ depth = le16_to_cpu(el->l_tree_depth);
+ if (!fe->i_clusters) {
+ /* trunc to zero is a special case. */
+ el->l_tree_depth = 0;
+ fe->i_last_eb_blk = 0;
+ } else if (last_eb)
+ fe->i_last_eb_blk = last_eb->h_blkno;
+
+ status = ocfs2_journal_dirty(handle, fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (last_eb) {
+ /* If there will be a new last extent block, then by
+ * definition, there cannot be any leaves to the right of
+ * him. */
+ status = ocfs2_journal_access(handle, inode, last_eb_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ last_eb->h_next_leaf_blk = 0;
+ status = ocfs2_journal_dirty(handle, last_eb_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ /* if our tree depth > 0, update all the tree blocks below us. */
+ while (depth) {
+ mlog(0, "traveling tree (depth = %d, next_eb = %"MLFu64")\n",
+ depth, next_eb);
+ status = ocfs2_read_block(osb, next_eb, &eb_bh,
+ OCFS2_BH_CACHED, inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ eb = (struct ocfs2_extent_block *)eb_bh->b_data;
+ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
+ OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
+ status = -EIO;
+ goto bail;
+ }
+ el = &(eb->h_list);
+
+ status = ocfs2_journal_access(handle, inode, eb_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0);
+ BUG_ON(depth != (le16_to_cpu(el->l_tree_depth) + 1));
+
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
+
+ mlog(0, "extent block %"MLFu64", before: record %d: "
+ "(%u, %u, %"MLFu64"), next = %u\n",
+ le64_to_cpu(eb->h_blkno), i,
+ le32_to_cpu(el->l_recs[i].e_cpos),
+ le32_to_cpu(el->l_recs[i].e_clusters),
+ le64_to_cpu(el->l_recs[i].e_blkno),
+ le16_to_cpu(el->l_next_free_rec));
+
+ BUG_ON(le32_to_cpu(el->l_recs[i].e_clusters) < clusters_to_del);
+ le32_add_cpu(&el->l_recs[i].e_clusters, -clusters_to_del);
+
+ next_eb = le64_to_cpu(el->l_recs[i].e_blkno);
+ /* bottom-most block requires us to delete data.*/
+ if (!el->l_tree_depth)
+ delete_blk = le64_to_cpu(el->l_recs[i].e_blkno)
+ + ocfs2_clusters_to_blocks(osb->sb,
+ le32_to_cpu(el->l_recs[i].e_clusters));
+ if (!el->l_recs[i].e_clusters) {
+ el->l_recs[i].e_cpos = 0;
+ el->l_recs[i].e_blkno = 0;
+ BUG_ON(!el->l_next_free_rec);
+ le16_add_cpu(&el->l_next_free_rec, -1);
+ }
+ mlog(0, "extent block %"MLFu64", after: record %d: "
+ "(%u, %u, %"MLFu64"), next = %u\n",
+ le64_to_cpu(eb->h_blkno), i,
+ le32_to_cpu(el->l_recs[i].e_cpos),
+ le32_to_cpu(el->l_recs[i].e_clusters),
+ le64_to_cpu(el->l_recs[i].e_blkno),
+ le16_to_cpu(el->l_next_free_rec));
+
+ status = ocfs2_journal_dirty(handle, eb_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (!el->l_next_free_rec) {
+ mlog(0, "deleting this extent block.\n");
+
+ ocfs2_remove_from_cache(inode, eb_bh);
+
+ BUG_ON(eb->h_suballoc_slot);
+ BUG_ON(el->l_recs[0].e_clusters);
+ BUG_ON(el->l_recs[0].e_cpos);
+ BUG_ON(el->l_recs[0].e_blkno);
+ status = ocfs2_free_extent_block(handle,
+ tc->tc_ext_alloc_inode,
+ tc->tc_ext_alloc_bh,
+ eb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+ brelse(eb_bh);
+ eb_bh = NULL;
+ depth--;
+ }
+
+ BUG_ON(!delete_blk);
+ status = ocfs2_truncate_log_append(osb, handle, delete_blk,
+ clusters_to_del);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ status = 0;
+bail:
+ if (!status)
+ ocfs2_extent_map_trunc(inode, le32_to_cpu(fe->i_clusters));
+ else
+ ocfs2_extent_map_drop(inode, 0);
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * It is expected, that by the time you call this function,
+ * inode->i_size and fe->i_size have been adjusted.
+ *
+ * WARNING: This will kfree the truncate context
+ */
+int ocfs2_commit_truncate(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_truncate_context *tc)
+{
+ int status, i, credits, tl_sem = 0;
+ u32 clusters_to_del, target_i_clusters;
+ u64 last_eb = 0;
+ struct ocfs2_dinode *fe;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_list *el;
+ struct buffer_head *last_eb_bh;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct inode *tl_inode = osb->osb_tl_inode;
+
+ mlog_entry_void();
+
+ down_write(&OCFS2_I(inode)->ip_alloc_sem);
+
+ target_i_clusters = ocfs2_clusters_for_bytes(osb->sb,
+ i_size_read(inode));
+
+ last_eb_bh = tc->tc_last_eb_bh;
+ tc->tc_last_eb_bh = NULL;
+
+ fe = (struct ocfs2_dinode *) fe_bh->b_data;
+
+ if (fe->id2.i_list.l_tree_depth) {
+ eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
+ el = &eb->h_list;
+ } else
+ el = &fe->id2.i_list;
+ last_eb = le64_to_cpu(fe->i_last_eb_blk);
+start:
+ mlog(0, "ocfs2_commit_truncate: fe->i_clusters = %u, "
+ "last_eb = %"MLFu64", fe->i_last_eb_blk = %"MLFu64", "
+ "fe->id2.i_list.l_tree_depth = %u last_eb_bh = %p\n",
+ le32_to_cpu(fe->i_clusters), last_eb,
+ le64_to_cpu(fe->i_last_eb_blk),
+ le16_to_cpu(fe->id2.i_list.l_tree_depth), last_eb_bh);
+
+ if (last_eb != le64_to_cpu(fe->i_last_eb_blk)) {
+ mlog(0, "last_eb changed!\n");
+ BUG_ON(!fe->id2.i_list.l_tree_depth);
+ last_eb = le64_to_cpu(fe->i_last_eb_blk);
+ /* i_last_eb_blk may have changed, read it if
+ * necessary. We don't have to worry about the
+ * truncate to zero case here (where there becomes no
+ * last_eb) because we never loop back after our work
+ * is done. */
+ if (last_eb_bh) {
+ brelse(last_eb_bh);
+ last_eb_bh = NULL;
+ }
+
+ status = ocfs2_read_block(osb, last_eb,
+ &last_eb_bh, OCFS2_BH_CACHED,
+ inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
+ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
+ OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
+ status = -EIO;
+ goto bail;
+ }
+ el = &(eb->h_list);
+ }
+
+ /* by now, el will point to the extent list on the bottom most
+ * portion of this tree. */
+ i = le16_to_cpu(el->l_next_free_rec) - 1;
+ if (le32_to_cpu(el->l_recs[i].e_cpos) >= target_i_clusters)
+ clusters_to_del = le32_to_cpu(el->l_recs[i].e_clusters);
+ else
+ clusters_to_del = (le32_to_cpu(el->l_recs[i].e_clusters) +
+ le32_to_cpu(el->l_recs[i].e_cpos)) -
+ target_i_clusters;
+
+ mlog(0, "clusters_to_del = %u in this pass\n", clusters_to_del);
+
+ mutex_lock(&tl_inode->i_mutex);
+ tl_sem = 1;
+ /* ocfs2_truncate_log_needs_flush guarantees us at least one
+ * record is free for use. If there isn't any, we flush to get
+ * an empty truncate log. */
+ if (ocfs2_truncate_log_needs_flush(osb)) {
+ status = __ocfs2_flush_truncate_log(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
+ fe, el);
+ handle = ocfs2_start_trans(osb, NULL, credits);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+ status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
+ if (status < 0)
+ mlog_errno(status);
+
+ status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh,
+ last_eb_bh, handle, tc);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ mutex_unlock(&tl_inode->i_mutex);
+ tl_sem = 0;
+
+ ocfs2_commit_trans(handle);
+ handle = NULL;
+
+ BUG_ON(le32_to_cpu(fe->i_clusters) < target_i_clusters);
+ if (le32_to_cpu(fe->i_clusters) > target_i_clusters)
+ goto start;
+bail:
+ up_write(&OCFS2_I(inode)->ip_alloc_sem);
+
+ ocfs2_schedule_truncate_log_flush(osb, 1);
+
+ if (tl_sem)
+ mutex_unlock(&tl_inode->i_mutex);
+
+ if (handle)
+ ocfs2_commit_trans(handle);
+
+ if (last_eb_bh)
+ brelse(last_eb_bh);
+
+ /* This will drop the ext_alloc cluster lock for us */
+ ocfs2_free_truncate_context(tc);
+
+ mlog_exit(status);
+ return status;
+}
+
+
+/*
+ * Expects the inode to already be locked. This will figure out which
+ * inodes need to be locked and will put them on the returned truncate
+ * context.
+ */
+int ocfs2_prepare_truncate(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_truncate_context **tc)
+{
+ int status, metadata_delete;
+ unsigned int new_i_clusters;
+ struct ocfs2_dinode *fe;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_list *el;
+ struct buffer_head *last_eb_bh = NULL;
+ struct inode *ext_alloc_inode = NULL;
+ struct buffer_head *ext_alloc_bh = NULL;
+
+ mlog_entry_void();
+
+ *tc = NULL;
+
+ new_i_clusters = ocfs2_clusters_for_bytes(osb->sb,
+ i_size_read(inode));
+ fe = (struct ocfs2_dinode *) fe_bh->b_data;
+
+ mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size ="
+ "%"MLFu64"\n", fe->i_clusters, new_i_clusters, fe->i_size);
+
+ if (le32_to_cpu(fe->i_clusters) <= new_i_clusters) {
+ ocfs2_error(inode->i_sb, "Dinode %"MLFu64" has cluster count "
+ "%u and size %"MLFu64" whereas struct inode has "
+ "cluster count %u and size %llu which caused an "
+ "invalid truncate to %u clusters.",
+ le64_to_cpu(fe->i_blkno),
+ le32_to_cpu(fe->i_clusters),
+ le64_to_cpu(fe->i_size),
+ OCFS2_I(inode)->ip_clusters, i_size_read(inode),
+ new_i_clusters);
+ mlog_meta_lvb(ML_ERROR, &OCFS2_I(inode)->ip_meta_lockres);
+ status = -EIO;
+ goto bail;
+ }
+
+ *tc = kcalloc(1, sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
+ if (!(*tc)) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ metadata_delete = 0;
+ if (fe->id2.i_list.l_tree_depth) {
+ /* If we have a tree, then the truncate may result in
+ * metadata deletes. Figure this out from the
+ * rightmost leaf block.*/
+ status = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk),
+ &last_eb_bh, OCFS2_BH_CACHED, inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
+ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
+ OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
+
+ brelse(last_eb_bh);
+ status = -EIO;
+ goto bail;
+ }
+ el = &(eb->h_list);
+ if (le32_to_cpu(el->l_recs[0].e_cpos) >= new_i_clusters)
+ metadata_delete = 1;
+ }
+
+ (*tc)->tc_last_eb_bh = last_eb_bh;
+
+ if (metadata_delete) {
+ mlog(0, "Will have to delete metadata for this trunc. "
+ "locking allocator.\n");
+ ext_alloc_inode = ocfs2_get_system_file_inode(osb, EXTENT_ALLOC_SYSTEM_INODE, 0);
+ if (!ext_alloc_inode) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ mutex_lock(&ext_alloc_inode->i_mutex);
+ (*tc)->tc_ext_alloc_inode = ext_alloc_inode;
+
+ status = ocfs2_meta_lock(ext_alloc_inode,
+ NULL,
+ &ext_alloc_bh,
+ 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ (*tc)->tc_ext_alloc_bh = ext_alloc_bh;
+ (*tc)->tc_ext_alloc_locked = 1;
+ }
+
+ status = 0;
+bail:
+ if (status < 0) {
+ if (*tc)
+ ocfs2_free_truncate_context(*tc);
+ *tc = NULL;
+ }
+ mlog_exit_void();
+ return status;
+}
+
+static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
+{
+ if (tc->tc_ext_alloc_inode) {
+ if (tc->tc_ext_alloc_locked)
+ ocfs2_meta_unlock(tc->tc_ext_alloc_inode, 1);
+
+ mutex_unlock(&tc->tc_ext_alloc_inode->i_mutex);
+ iput(tc->tc_ext_alloc_inode);
+ }
+
+ if (tc->tc_ext_alloc_bh)
+ brelse(tc->tc_ext_alloc_bh);
+
+ if (tc->tc_last_eb_bh)
+ brelse(tc->tc_last_eb_bh);
+
+ kfree(tc);
+}
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
new file mode 100644
index 00000000000..12ba897743f
--- /dev/null
+++ b/fs/ocfs2/alloc.h
@@ -0,0 +1,82 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * alloc.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_ALLOC_H
+#define OCFS2_ALLOC_H
+
+struct ocfs2_alloc_context;
+int ocfs2_insert_extent(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ u64 blkno,
+ u32 new_clusters,
+ struct ocfs2_alloc_context *meta_ac);
+int ocfs2_num_free_extents(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct ocfs2_dinode *fe);
+/* how many new metadata chunks would an allocation need at maximum? */
+static inline int ocfs2_extend_meta_needed(struct ocfs2_dinode *fe)
+{
+ /*
+ * Rather than do all the work of determining how much we need
+ * (involves a ton of reads and locks), just ask for the
+ * maximal limit. That's a tree depth shift. So, one block for
+ * level of the tree (current l_tree_depth), one block for the
+ * new tree_depth==0 extent_block, and one block at the new
+ * top-of-the tree.
+ */
+ return le16_to_cpu(fe->id2.i_list.l_tree_depth) + 2;
+}
+
+int ocfs2_truncate_log_init(struct ocfs2_super *osb);
+void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb);
+void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
+ int cancel);
+int ocfs2_flush_truncate_log(struct ocfs2_super *osb);
+int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
+ int slot_num,
+ struct ocfs2_dinode **tl_copy);
+int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
+ struct ocfs2_dinode *tl_copy);
+
+struct ocfs2_truncate_context {
+ struct inode *tc_ext_alloc_inode;
+ struct buffer_head *tc_ext_alloc_bh;
+ int tc_ext_alloc_locked; /* is it cluster locked? */
+ /* these get destroyed once it's passed to ocfs2_commit_truncate. */
+ struct buffer_head *tc_last_eb_bh;
+};
+
+int ocfs2_prepare_truncate(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_truncate_context **tc);
+int ocfs2_commit_truncate(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_truncate_context *tc);
+
+#endif /* OCFS2_ALLOC_H */
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
new file mode 100644
index 00000000000..8f4467a930a
--- /dev/null
+++ b/fs/ocfs2/aops.c
@@ -0,0 +1,643 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <asm/byteorder.h>
+
+#define MLOG_MASK_PREFIX ML_FILE_IO
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "aops.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "file.h"
+#include "inode.h"
+#include "journal.h"
+#include "super.h"
+#include "symlink.h"
+
+#include "buffer_head_io.h"
+
+static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ int err = -EIO;
+ int status;
+ struct ocfs2_dinode *fe = NULL;
+ struct buffer_head *bh = NULL;
+ struct buffer_head *buffer_cache_bh = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ void *kaddr;
+
+ mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
+ (unsigned long long)iblock, bh_result, create);
+
+ BUG_ON(ocfs2_inode_is_fast_symlink(inode));
+
+ if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
+ mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
+ (unsigned long long)iblock);
+ goto bail;
+ }
+
+ status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
+ OCFS2_I(inode)->ip_blkno,
+ &bh, OCFS2_BH_CACHED, inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ fe = (struct ocfs2_dinode *) bh->b_data;
+
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ mlog(ML_ERROR, "Invalid dinode #%"MLFu64": signature = %.*s\n",
+ fe->i_blkno, 7, fe->i_signature);
+ goto bail;
+ }
+
+ if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
+ le32_to_cpu(fe->i_clusters))) {
+ mlog(ML_ERROR, "block offset is outside the allocated size: "
+ "%llu\n", (unsigned long long)iblock);
+ goto bail;
+ }
+
+ /* We don't use the page cache to create symlink data, so if
+ * need be, copy it over from the buffer cache. */
+ if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
+ u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
+ iblock;
+ buffer_cache_bh = sb_getblk(osb->sb, blkno);
+ if (!buffer_cache_bh) {
+ mlog(ML_ERROR, "couldn't getblock for symlink!\n");
+ goto bail;
+ }
+
+ /* we haven't locked out transactions, so a commit
+ * could've happened. Since we've got a reference on
+ * the bh, even if it commits while we're doing the
+ * copy, the data is still good. */
+ if (buffer_jbd(buffer_cache_bh)
+ && ocfs2_inode_is_new(inode)) {
+ kaddr = kmap_atomic(bh_result->b_page, KM_USER0);
+ if (!kaddr) {
+ mlog(ML_ERROR, "couldn't kmap!\n");
+ goto bail;
+ }
+ memcpy(kaddr + (bh_result->b_size * iblock),
+ buffer_cache_bh->b_data,
+ bh_result->b_size);
+ kunmap_atomic(kaddr, KM_USER0);
+ set_buffer_uptodate(bh_result);
+ }
+ brelse(buffer_cache_bh);
+ }
+
+ map_bh(bh_result, inode->i_sb,
+ le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
+
+ err = 0;
+
+bail:
+ if (bh)
+ brelse(bh);
+
+ mlog_exit(err);
+ return err;
+}
+
+static int ocfs2_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create)
+{
+ int err = 0;
+ u64 p_blkno, past_eof;
+
+ mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
+ (unsigned long long)iblock, bh_result, create);
+
+ if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
+ mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
+ inode, inode->i_ino);
+
+ if (S_ISLNK(inode->i_mode)) {
+ /* this always does I/O for some reason. */
+ err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
+ goto bail;
+ }
+
+ /* this can happen if another node truncs after our extend! */
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ if (iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
+ OCFS2_I(inode)->ip_clusters))
+ err = -EIO;
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+ if (err)
+ goto bail;
+
+ err = ocfs2_extent_map_get_blocks(inode, iblock, 1, &p_blkno,
+ NULL);
+ if (err) {
+ mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
+ "%"MLFu64", NULL)\n", err, inode,
+ (unsigned long long)iblock, p_blkno);
+ goto bail;
+ }
+
+ map_bh(bh_result, inode->i_sb, p_blkno);
+
+ if (bh_result->b_blocknr == 0) {
+ err = -EIO;
+ mlog(ML_ERROR, "iblock = %llu p_blkno = %"MLFu64" "
+ "blkno=(%"MLFu64")\n", (unsigned long long)iblock,
+ p_blkno, OCFS2_I(inode)->ip_blkno);
+ }
+
+ past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
+ mlog(0, "Inode %lu, past_eof = %"MLFu64"\n", inode->i_ino, past_eof);
+
+ if (create && (iblock >= past_eof))
+ set_buffer_new(bh_result);
+
+bail:
+ if (err < 0)
+ err = -EIO;
+
+ mlog_exit(err);
+ return err;
+}
+
+static int ocfs2_readpage(struct file *file, struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
+ int ret, unlock = 1;
+
+ mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
+
+ ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page);
+ if (ret != 0) {
+ if (ret == AOP_TRUNCATED_PAGE)
+ unlock = 0;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ /*
+ * i_size might have just been updated as we grabed the meta lock. We
+ * might now be discovering a truncate that hit on another node.
+ * block_read_full_page->get_block freaks out if it is asked to read
+ * beyond the end of a file, so we check here. Callers
+ * (generic_file_read, fault->nopage) are clever enough to check i_size
+ * and notice that the page they just read isn't needed.
+ *
+ * XXX sys_readahead() seems to get that wrong?
+ */
+ if (start >= i_size_read(inode)) {
+ char *addr = kmap(page);
+ memset(addr, 0, PAGE_SIZE);
+ flush_dcache_page(page);
+ kunmap(page);
+ SetPageUptodate(page);
+ ret = 0;
+ goto out_alloc;
+ }
+
+ ret = ocfs2_data_lock_with_page(inode, 0, page);
+ if (ret != 0) {
+ if (ret == AOP_TRUNCATED_PAGE)
+ unlock = 0;
+ mlog_errno(ret);
+ goto out_alloc;
+ }
+
+ ret = block_read_full_page(page, ocfs2_get_block);
+ unlock = 0;
+
+ ocfs2_data_unlock(inode, 0);
+out_alloc:
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+ ocfs2_meta_unlock(inode, 0);
+out:
+ if (unlock)
+ unlock_page(page);
+ mlog_exit(ret);
+ return ret;
+}
+
+/* Note: Because we don't support holes, our allocation has
+ * already happened (allocation writes zeros to the file data)
+ * so we don't have to worry about ordered writes in
+ * ocfs2_writepage.
+ *
+ * ->writepage is called during the process of invalidating the page cache
+ * during blocked lock processing. It can't block on any cluster locks
+ * to during block mapping. It's relying on the fact that the block
+ * mapping can't have disappeared under the dirty pages that it is
+ * being asked to write back.
+ */
+static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
+{
+ int ret;
+
+ mlog_entry("(0x%p)\n", page);
+
+ ret = block_write_full_page(page, ocfs2_get_block, wbc);
+
+ mlog_exit(ret);
+
+ return ret;
+}
+
+/*
+ * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called
+ * from loopback. It must be able to perform its own locking around
+ * ocfs2_get_block().
+ */
+int ocfs2_prepare_write(struct file *file, struct page *page,
+ unsigned from, unsigned to)
+{
+ struct inode *inode = page->mapping->host;
+ int ret;
+
+ mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
+
+ ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page);
+ if (ret != 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ ret = block_prepare_write(page, from, to, ocfs2_get_block);
+
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+ ocfs2_meta_unlock(inode, 0);
+out:
+ mlog_exit(ret);
+ return ret;
+}
+
+/* Taken from ext3. We don't necessarily need the full blown
+ * functionality yet, but IMHO it's better to cut and paste the whole
+ * thing so we can avoid introducing our own bugs (and easily pick up
+ * their fixes when they happen) --Mark */
+static int walk_page_buffers( handle_t *handle,
+ struct buffer_head *head,
+ unsigned from,
+ unsigned to,
+ int *partial,
+ int (*fn)( handle_t *handle,
+ struct buffer_head *bh))
+{
+ struct buffer_head *bh;
+ unsigned block_start, block_end;
+ unsigned blocksize = head->b_size;
+ int err, ret = 0;
+ struct buffer_head *next;
+
+ for ( bh = head, block_start = 0;
+ ret == 0 && (bh != head || !block_start);
+ block_start = block_end, bh = next)
+ {
+ next = bh->b_this_page;
+ block_end = block_start + blocksize;
+ if (block_end <= from || block_start >= to) {
+ if (partial && !buffer_uptodate(bh))
+ *partial = 1;
+ continue;
+ }
+ err = (*fn)(handle, bh);
+ if (!ret)
+ ret = err;
+ }
+ return ret;
+}
+
+struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
+ struct page *page,
+ unsigned from,
+ unsigned to)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_journal_handle *handle = NULL;
+ int ret = 0;
+
+ handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+ if (!handle) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (ocfs2_should_order_data(inode)) {
+ ret = walk_page_buffers(handle->k_handle,
+ page_buffers(page),
+ from, to, NULL,
+ ocfs2_journal_dirty_data);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+out:
+ if (ret) {
+ if (handle)
+ ocfs2_commit_trans(handle);
+ handle = ERR_PTR(ret);
+ }
+ return handle;
+}
+
+static int ocfs2_commit_write(struct file *file, struct page *page,
+ unsigned from, unsigned to)
+{
+ int ret, extending = 0, locklevel = 0;
+ loff_t new_i_size;
+ struct buffer_head *di_bh = NULL;
+ struct inode *inode = page->mapping->host;
+ struct ocfs2_journal_handle *handle = NULL;
+
+ mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
+
+ /* NOTE: ocfs2_file_aio_write has ensured that it's safe for
+ * us to sample inode->i_size here without the metadata lock:
+ *
+ * 1) We're currently holding the inode alloc lock, so no
+ * nodes can change it underneath us.
+ *
+ * 2) We've had to take the metadata lock at least once
+ * already to check for extending writes, hence insuring
+ * that our current copy is also up to date.
+ */
+ new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+ if (new_i_size > i_size_read(inode)) {
+ extending = 1;
+ locklevel = 1;
+ }
+
+ ret = ocfs2_meta_lock_with_page(inode, NULL, &di_bh, locklevel, page);
+ if (ret != 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_data_lock_with_page(inode, 1, page);
+ if (ret != 0) {
+ mlog_errno(ret);
+ goto out_unlock_meta;
+ }
+
+ if (extending) {
+ handle = ocfs2_start_walk_page_trans(inode, page, from, to);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ handle = NULL;
+ goto out_unlock_data;
+ }
+
+ /* Mark our buffer early. We'd rather catch this error up here
+ * as opposed to after a successful commit_write which would
+ * require us to set back inode->i_size. */
+ ret = ocfs2_journal_access(handle, inode, di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+ }
+
+ /* might update i_size */
+ ret = generic_commit_write(file, page, from, to);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ if (extending) {
+ loff_t size = (u64) i_size_read(inode);
+ struct ocfs2_dinode *di =
+ (struct ocfs2_dinode *)di_bh->b_data;
+
+ /* ocfs2_mark_inode_dirty is too heavy to use here. */
+ inode->i_blocks = ocfs2_align_bytes_to_sectors(size);
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+
+ di->i_size = cpu_to_le64(size);
+ di->i_ctime = di->i_mtime =
+ cpu_to_le64(inode->i_mtime.tv_sec);
+ di->i_ctime_nsec = di->i_mtime_nsec =
+ cpu_to_le32(inode->i_mtime.tv_nsec);
+
+ ret = ocfs2_journal_dirty(handle, di_bh);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+ }
+
+ BUG_ON(extending && (i_size_read(inode) != new_i_size));
+
+out_commit:
+ if (handle)
+ ocfs2_commit_trans(handle);
+out_unlock_data:
+ ocfs2_data_unlock(inode, 1);
+out_unlock_meta:
+ ocfs2_meta_unlock(inode, locklevel);
+out:
+ if (di_bh)
+ brelse(di_bh);
+
+ mlog_exit(ret);
+ return ret;
+}
+
+static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
+{
+ sector_t status;
+ u64 p_blkno = 0;
+ int err = 0;
+ struct inode *inode = mapping->host;
+
+ mlog_entry("(block = %llu)\n", (unsigned long long)block);
+
+ /* We don't need to lock journal system files, since they aren't
+ * accessed concurrently from multiple nodes.
+ */
+ if (!INODE_JOURNAL(inode)) {
+ err = ocfs2_meta_lock(inode, NULL, NULL, 0);
+ if (err) {
+ if (err != -ENOENT)
+ mlog_errno(err);
+ goto bail;
+ }
+ down_read(&OCFS2_I(inode)->ip_alloc_sem);
+ }
+
+ err = ocfs2_extent_map_get_blocks(inode, block, 1, &p_blkno,
+ NULL);
+
+ if (!INODE_JOURNAL(inode)) {
+ up_read(&OCFS2_I(inode)->ip_alloc_sem);
+ ocfs2_meta_unlock(inode, 0);
+ }
+
+ if (err) {
+ mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
+ (unsigned long long)block);
+ mlog_errno(err);
+ goto bail;
+ }
+
+
+bail:
+ status = err ? 0 : p_blkno;
+
+ mlog_exit((int)status);
+
+ return status;
+}
+
+/*
+ * TODO: Make this into a generic get_blocks function.
+ *
+ * From do_direct_io in direct-io.c:
+ * "So what we do is to permit the ->get_blocks function to populate
+ * bh.b_size with the size of IO which is permitted at this offset and
+ * this i_blkbits."
+ *
+ * This function is called directly from get_more_blocks in direct-io.c.
+ *
+ * called like this: dio->get_blocks(dio->inode, fs_startblk,
+ * fs_count, map_bh, dio->rw == WRITE);
+ */
+static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
+ unsigned long max_blocks,
+ struct buffer_head *bh_result, int create)
+{
+ int ret;
+ u64 vbo_max; /* file offset, max_blocks from iblock */
+ u64 p_blkno;
+ int contig_blocks;
+ unsigned char blocksize_bits;
+
+ if (!inode || !bh_result) {
+ mlog(ML_ERROR, "inode or bh_result is null\n");
+ return -EIO;
+ }
+
+ blocksize_bits = inode->i_sb->s_blocksize_bits;
+
+ /* This function won't even be called if the request isn't all
+ * nicely aligned and of the right size, so there's no need
+ * for us to check any of that. */
+
+ vbo_max = ((u64)iblock + max_blocks) << blocksize_bits;
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ if ((iblock + max_blocks) >
+ ocfs2_clusters_to_blocks(inode->i_sb,
+ OCFS2_I(inode)->ip_clusters)) {
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+ ret = -EIO;
+ goto bail;
+ }
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ /* This figures out the size of the next contiguous block, and
+ * our logical offset */
+ ret = ocfs2_extent_map_get_blocks(inode, iblock, 1, &p_blkno,
+ &contig_blocks);
+ if (ret) {
+ mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
+ (unsigned long long)iblock);
+ ret = -EIO;
+ goto bail;
+ }
+
+ map_bh(bh_result, inode->i_sb, p_blkno);
+
+ /* make sure we don't map more than max_blocks blocks here as
+ that's all the kernel will handle at this point. */
+ if (max_blocks < contig_blocks)
+ contig_blocks = max_blocks;
+ bh_result->b_size = contig_blocks << blocksize_bits;
+bail:
+ return ret;
+}
+
+/*
+ * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
+ * particularly interested in the aio/dio case. Like the core uses
+ * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
+ * truncation on another.
+ */
+static void ocfs2_dio_end_io(struct kiocb *iocb,
+ loff_t offset,
+ ssize_t bytes,
+ void *private)
+{
+ struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
+
+ /* this io's submitter should not have unlocked this before we could */
+ BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
+ ocfs2_iocb_clear_rw_locked(iocb);
+ up_read(&inode->i_alloc_sem);
+ ocfs2_rw_unlock(inode, 0);
+}
+
+static ssize_t ocfs2_direct_IO(int rw,
+ struct kiocb *iocb,
+ const struct iovec *iov,
+ loff_t offset,
+ unsigned long nr_segs)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
+ int ret;
+
+ mlog_entry_void();
+ ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
+ inode->i_sb->s_bdev, iov, offset,
+ nr_segs,
+ ocfs2_direct_IO_get_blocks,
+ ocfs2_dio_end_io);
+ mlog_exit(ret);
+ return ret;
+}
+
+struct address_space_operations ocfs2_aops = {
+ .readpage = ocfs2_readpage,
+ .writepage = ocfs2_writepage,
+ .prepare_write = ocfs2_prepare_write,
+ .commit_write = ocfs2_commit_write,
+ .bmap = ocfs2_bmap,
+ .sync_page = block_sync_page,
+ .direct_IO = ocfs2_direct_IO
+};
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
new file mode 100644
index 00000000000..d40456d509a
--- /dev/null
+++ b/fs/ocfs2/aops.h
@@ -0,0 +1,41 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_AOPS_H
+#define OCFS2_AOPS_H
+
+int ocfs2_prepare_write(struct file *file, struct page *page,
+ unsigned from, unsigned to);
+
+struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
+ struct page *page,
+ unsigned from,
+ unsigned to);
+
+/* all ocfs2_dio_end_io()'s fault */
+#define ocfs2_iocb_is_rw_locked(iocb) \
+ test_bit(0, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_set_rw_locked(iocb) \
+ set_bit(0, (unsigned long *)&iocb->private)
+#define ocfs2_iocb_clear_rw_locked(iocb) \
+ clear_bit(0, (unsigned long *)&iocb->private)
+
+#endif /* OCFS2_FILE_H */
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
new file mode 100644
index 00000000000..d424041b38e
--- /dev/null
+++ b/fs/ocfs2/buffer_head_io.c
@@ -0,0 +1,232 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * io.c
+ *
+ * Buffer cache handling
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "inode.h"
+#include "journal.h"
+#include "uptodate.h"
+
+#include "buffer_head_io.h"
+
+int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
+ struct inode *inode)
+{
+ int ret = 0;
+
+ mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n",
+ (unsigned long long)bh->b_blocknr, inode);
+
+ BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
+ BUG_ON(buffer_jbd(bh));
+
+ /* No need to check for a soft readonly file system here. non
+ * journalled writes are only ever done on system files which
+ * can get modified during recovery even if read-only. */
+ if (ocfs2_is_hard_readonly(osb)) {
+ ret = -EROFS;
+ goto out;
+ }
+
+ down(&OCFS2_I(inode)->ip_io_sem);
+
+ lock_buffer(bh);
+ set_buffer_uptodate(bh);
+
+ /* remove from dirty list before I/O. */
+ clear_buffer_dirty(bh);
+
+ get_bh(bh); /* for end_buffer_write_sync() */
+ bh->b_end_io = end_buffer_write_sync;
+ submit_bh(WRITE, bh);
+
+ wait_on_buffer(bh);
+
+ if (buffer_uptodate(bh)) {
+ ocfs2_set_buffer_uptodate(inode, bh);
+ } else {
+ /* We don't need to remove the clustered uptodate
+ * information for this bh as it's not marked locally
+ * uptodate. */
+ ret = -EIO;
+ brelse(bh);
+ }
+
+ up(&OCFS2_I(inode)->ip_io_sem);
+out:
+ mlog_exit(ret);
+ return ret;
+}
+
+int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr,
+ struct buffer_head *bhs[], int flags,
+ struct inode *inode)
+{
+ int status = 0;
+ struct super_block *sb;
+ int i, ignore_cache = 0;
+ struct buffer_head *bh;
+
+ mlog_entry("(block=(%"MLFu64"), nr=(%d), flags=%d, inode=%p)\n",
+ block, nr, flags, inode);
+
+ if (osb == NULL || osb->sb == NULL || bhs == NULL) {
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (nr < 0) {
+ mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (nr == 0) {
+ mlog(ML_BH_IO, "No buffers will be read!\n");
+ status = 0;
+ goto bail;
+ }
+
+ sb = osb->sb;
+
+ if (flags & OCFS2_BH_CACHED && !inode)
+ flags &= ~OCFS2_BH_CACHED;
+
+ if (inode)
+ down(&OCFS2_I(inode)->ip_io_sem);
+ for (i = 0 ; i < nr ; i++) {
+ if (bhs[i] == NULL) {
+ bhs[i] = sb_getblk(sb, block++);
+ if (bhs[i] == NULL) {
+ if (inode)
+ up(&OCFS2_I(inode)->ip_io_sem);
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+ bh = bhs[i];
+ ignore_cache = 0;
+
+ if (flags & OCFS2_BH_CACHED &&
+ !ocfs2_buffer_uptodate(inode, bh)) {
+ mlog(ML_UPTODATE,
+ "bh (%llu), inode %"MLFu64" not uptodate\n",
+ (unsigned long long)bh->b_blocknr,
+ OCFS2_I(inode)->ip_blkno);
+ ignore_cache = 1;
+ }
+
+ /* XXX: Can we ever get this and *not* have the cached
+ * flag set? */
+ if (buffer_jbd(bh)) {
+ if (!(flags & OCFS2_BH_CACHED) || ignore_cache)
+ mlog(ML_BH_IO, "trying to sync read a jbd "
+ "managed bh (blocknr = %llu)\n",
+ (unsigned long long)bh->b_blocknr);
+ continue;
+ }
+
+ if (!(flags & OCFS2_BH_CACHED) || ignore_cache) {
+ if (buffer_dirty(bh)) {
+ /* This should probably be a BUG, or
+ * at least return an error. */
+ mlog(ML_BH_IO, "asking me to sync read a dirty "
+ "buffer! (blocknr = %llu)\n",
+ (unsigned long long)bh->b_blocknr);
+ continue;
+ }
+
+ lock_buffer(bh);
+ if (buffer_jbd(bh)) {
+#ifdef CATCH_BH_JBD_RACES
+ mlog(ML_ERROR, "block %llu had the JBD bit set "
+ "while I was in lock_buffer!",
+ (unsigned long long)bh->b_blocknr);
+ BUG();
+#else
+ unlock_buffer(bh);
+ continue;
+#endif
+ }
+ clear_buffer_uptodate(bh);
+ get_bh(bh); /* for end_buffer_read_sync() */
+ bh->b_end_io = end_buffer_read_sync;
+ if (flags & OCFS2_BH_READAHEAD)
+ submit_bh(READA, bh);
+ else
+ submit_bh(READ, bh);
+ continue;
+ }
+ }
+
+ status = 0;
+
+ for (i = (nr - 1); i >= 0; i--) {
+ bh = bhs[i];
+
+ /* We know this can't have changed as we hold the
+ * inode sem. Avoid doing any work on the bh if the
+ * journal has it. */
+ if (!buffer_jbd(bh))
+ wait_on_buffer(bh);
+
+ if (!buffer_uptodate(bh)) {
+ /* Status won't be cleared from here on out,
+ * so we can safely record this and loop back
+ * to cleanup the other buffers. Don't need to
+ * remove the clustered uptodate information
+ * for this bh as it's not marked locally
+ * uptodate. */
+ status = -EIO;
+ brelse(bh);
+ bhs[i] = NULL;
+ continue;
+ }
+
+ if (inode)
+ ocfs2_set_buffer_uptodate(inode, bh);
+ }
+ if (inode)
+ up(&OCFS2_I(inode)->ip_io_sem);
+
+ mlog(ML_BH_IO, "block=(%"MLFu64"), nr=(%d), cached=%s\n", block, nr,
+ (!(flags & OCFS2_BH_CACHED) || ignore_cache) ? "no" : "yes");
+
+bail:
+
+ mlog_exit(status);
+ return status;
+}
diff --git a/fs/ocfs2/buffer_head_io.h b/fs/ocfs2/buffer_head_io.h
new file mode 100644
index 00000000000..6ecb90937b6
--- /dev/null
+++ b/fs/ocfs2/buffer_head_io.h
@@ -0,0 +1,73 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ocfs2_buffer_head.h
+ *
+ * Buffer cache handling functions defined
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_BUFFER_HEAD_IO_H
+#define OCFS2_BUFFER_HEAD_IO_H
+
+#include <linux/buffer_head.h>
+
+void ocfs2_end_buffer_io_sync(struct buffer_head *bh,
+ int uptodate);
+
+static inline int ocfs2_read_block(struct ocfs2_super *osb,
+ u64 off,
+ struct buffer_head **bh,
+ int flags,
+ struct inode *inode);
+
+int ocfs2_write_block(struct ocfs2_super *osb,
+ struct buffer_head *bh,
+ struct inode *inode);
+int ocfs2_read_blocks(struct ocfs2_super *osb,
+ u64 block,
+ int nr,
+ struct buffer_head *bhs[],
+ int flags,
+ struct inode *inode);
+
+
+#define OCFS2_BH_CACHED 1
+#define OCFS2_BH_READAHEAD 8 /* use this to pass READA down to submit_bh */
+
+static inline int ocfs2_read_block(struct ocfs2_super * osb, u64 off,
+ struct buffer_head **bh, int flags,
+ struct inode *inode)
+{
+ int status = 0;
+
+ if (bh == NULL) {
+ printk("ocfs2: bh == NULL\n");
+ status = -EINVAL;
+ goto bail;
+ }
+
+ status = ocfs2_read_blocks(osb, off, 1, bh,
+ flags, inode);
+
+bail:
+ return status;
+}
+
+#endif /* OCFS2_BUFFER_HEAD_IO_H */
diff --git a/fs/ocfs2/cluster/Makefile b/fs/ocfs2/cluster/Makefile
new file mode 100644
index 00000000000..cdd162f1365
--- /dev/null
+++ b/fs/ocfs2/cluster/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_OCFS2_FS) += ocfs2_nodemanager.o
+
+ocfs2_nodemanager-objs := heartbeat.o masklog.o sys.o nodemanager.o \
+ quorum.o tcp.o ver.o
diff --git a/fs/ocfs2/cluster/endian.h b/fs/ocfs2/cluster/endian.h
new file mode 100644
index 00000000000..2df9082f4e3
--- /dev/null
+++ b/fs/ocfs2/cluster/endian.h
@@ -0,0 +1,30 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_CLUSTER_ENDIAN_H
+#define OCFS2_CLUSTER_ENDIAN_H
+
+static inline void be32_add_cpu(__be32 *var, u32 val)
+{
+ *var = cpu_to_be32(be32_to_cpu(*var) + val);
+}
+
+#endif /* OCFS2_CLUSTER_ENDIAN_H */
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
new file mode 100644
index 00000000000..7307ba52891
--- /dev/null
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -0,0 +1,1797 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <linux/kthread.h>
+#include <linux/configfs.h>
+#include <linux/random.h>
+#include <linux/crc32.h>
+#include <linux/time.h>
+
+#include "heartbeat.h"
+#include "tcp.h"
+#include "nodemanager.h"
+#include "quorum.h"
+
+#include "masklog.h"
+
+
+/*
+ * The first heartbeat pass had one global thread that would serialize all hb
+ * callback calls. This global serializing sem should only be removed once
+ * we've made sure that all callees can deal with being called concurrently
+ * from multiple hb region threads.
+ */
+static DECLARE_RWSEM(o2hb_callback_sem);
+
+/*
+ * multiple hb threads are watching multiple regions. A node is live
+ * whenever any of the threads sees activity from the node in its region.
+ */
+static spinlock_t o2hb_live_lock = SPIN_LOCK_UNLOCKED;
+static struct list_head o2hb_live_slots[O2NM_MAX_NODES];
+static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+static LIST_HEAD(o2hb_node_events);
+static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue);
+
+static LIST_HEAD(o2hb_all_regions);
+
+static struct o2hb_callback {
+ struct list_head list;
+} o2hb_callbacks[O2HB_NUM_CB];
+
+static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
+
+#define O2HB_DEFAULT_BLOCK_BITS 9
+
+unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
+
+/* Only sets a new threshold if there are no active regions.
+ *
+ * No locking or otherwise interesting code is required for reading
+ * o2hb_dead_threshold as it can't change once regions are active and
+ * it's not interesting to anyone until then anyway. */
+static void o2hb_dead_threshold_set(unsigned int threshold)
+{
+ if (threshold > O2HB_MIN_DEAD_THRESHOLD) {
+ spin_lock(&o2hb_live_lock);
+ if (list_empty(&o2hb_all_regions))
+ o2hb_dead_threshold = threshold;
+ spin_unlock(&o2hb_live_lock);
+ }
+}
+
+struct o2hb_node_event {
+ struct list_head hn_item;
+ enum o2hb_callback_type hn_event_type;
+ struct o2nm_node *hn_node;
+ int hn_node_num;
+};
+
+struct o2hb_disk_slot {
+ struct o2hb_disk_heartbeat_block *ds_raw_block;
+ u8 ds_node_num;
+ u64 ds_last_time;
+ u64 ds_last_generation;
+ u16 ds_equal_samples;
+ u16 ds_changed_samples;
+ struct list_head ds_live_item;
+};
+
+/* each thread owns a region.. when we're asked to tear down the region
+ * we ask the thread to stop, who cleans up the region */
+struct o2hb_region {
+ struct config_item hr_item;
+
+ struct list_head hr_all_item;
+ unsigned hr_unclean_stop:1;
+
+ /* protected by the hr_callback_sem */
+ struct task_struct *hr_task;
+
+ unsigned int hr_blocks;
+ unsigned long long hr_start_block;
+
+ unsigned int hr_block_bits;
+ unsigned int hr_block_bytes;
+
+ unsigned int hr_slots_per_page;
+ unsigned int hr_num_pages;
+
+ struct page **hr_slot_data;
+ struct block_device *hr_bdev;
+ struct o2hb_disk_slot *hr_slots;
+
+ /* let the person setting up hb wait for it to return until it
+ * has reached a 'steady' state. This will be fixed when we have
+ * a more complete api that doesn't lead to this sort of fragility. */
+ atomic_t hr_steady_iterations;
+
+ char hr_dev_name[BDEVNAME_SIZE];
+
+ unsigned int hr_timeout_ms;
+
+ /* randomized as the region goes up and down so that a node
+ * recognizes a node going up and down in one iteration */
+ u64 hr_generation;
+
+ struct work_struct hr_write_timeout_work;
+ unsigned long hr_last_timeout_start;
+
+ /* Used during o2hb_check_slot to hold a copy of the block
+ * being checked because we temporarily have to zero out the
+ * crc field. */
+ struct o2hb_disk_heartbeat_block *hr_tmp_block;
+};
+
+struct o2hb_bio_wait_ctxt {
+ atomic_t wc_num_reqs;
+ struct completion wc_io_complete;
+};
+
+static void o2hb_write_timeout(void *arg)
+{
+ struct o2hb_region *reg = arg;
+
+ mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
+ "milliseconds\n", reg->hr_dev_name,
+ jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
+ o2quo_disk_timeout();
+}
+
+static void o2hb_arm_write_timeout(struct o2hb_region *reg)
+{
+ mlog(0, "Queue write timeout for %u ms\n", O2HB_MAX_WRITE_TIMEOUT_MS);
+
+ cancel_delayed_work(&reg->hr_write_timeout_work);
+ reg->hr_last_timeout_start = jiffies;
+ schedule_delayed_work(&reg->hr_write_timeout_work,
+ msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
+}
+
+static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
+{
+ cancel_delayed_work(&reg->hr_write_timeout_work);
+ flush_scheduled_work();
+}
+
+static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc,
+ unsigned int num_ios)
+{
+ atomic_set(&wc->wc_num_reqs, num_ios);
+ init_completion(&wc->wc_io_complete);
+}
+
+/* Used in error paths too */
+static inline void o2hb_bio_wait_dec(struct o2hb_bio_wait_ctxt *wc,
+ unsigned int num)
+{
+ /* sadly atomic_sub_and_test() isn't available on all platforms. The
+ * good news is that the fast path only completes one at a time */
+ while(num--) {
+ if (atomic_dec_and_test(&wc->wc_num_reqs)) {
+ BUG_ON(num > 0);
+ complete(&wc->wc_io_complete);
+ }
+ }
+}
+
+static void o2hb_wait_on_io(struct o2hb_region *reg,
+ struct o2hb_bio_wait_ctxt *wc)
+{
+ struct address_space *mapping = reg->hr_bdev->bd_inode->i_mapping;
+
+ blk_run_address_space(mapping);
+
+ wait_for_completion(&wc->wc_io_complete);
+}
+
+static int o2hb_bio_end_io(struct bio *bio,
+ unsigned int bytes_done,
+ int error)
+{
+ struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
+
+ if (error)
+ mlog(ML_ERROR, "IO Error %d\n", error);
+
+ if (bio->bi_size)
+ return 1;
+
+ o2hb_bio_wait_dec(wc, 1);
+ return 0;
+}
+
+/* Setup a Bio to cover I/O against num_slots slots starting at
+ * start_slot. */
+static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
+ struct o2hb_bio_wait_ctxt *wc,
+ unsigned int start_slot,
+ unsigned int num_slots)
+{
+ int i, nr_vecs, len, first_page, last_page;
+ unsigned int vec_len, vec_start;
+ unsigned int bits = reg->hr_block_bits;
+ unsigned int spp = reg->hr_slots_per_page;
+ struct bio *bio;
+ struct page *page;
+
+ nr_vecs = (num_slots + spp - 1) / spp;
+
+ /* Testing has shown this allocation to take long enough under
+ * GFP_KERNEL that the local node can get fenced. It would be
+ * nicest if we could pre-allocate these bios and avoid this
+ * all together. */
+ bio = bio_alloc(GFP_ATOMIC, nr_vecs);
+ if (!bio) {
+ mlog(ML_ERROR, "Could not alloc slots BIO!\n");
+ bio = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ /* Must put everything in 512 byte sectors for the bio... */
+ bio->bi_sector = (reg->hr_start_block + start_slot) << (bits - 9);
+ bio->bi_bdev = reg->hr_bdev;
+ bio->bi_private = wc;
+ bio->bi_end_io = o2hb_bio_end_io;
+
+ first_page = start_slot / spp;
+ last_page = first_page + nr_vecs;
+ vec_start = (start_slot << bits) % PAGE_CACHE_SIZE;
+ for(i = first_page; i < last_page; i++) {
+ page = reg->hr_slot_data[i];
+
+ vec_len = PAGE_CACHE_SIZE;
+ /* last page might be short */
+ if (((i + 1) * spp) > (start_slot + num_slots))
+ vec_len = ((num_slots + start_slot) % spp) << bits;
+ vec_len -= vec_start;
+
+ mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n",
+ i, vec_len, vec_start);
+
+ len = bio_add_page(bio, page, vec_len, vec_start);
+ if (len != vec_len) {
+ bio_put(bio);
+ bio = ERR_PTR(-EIO);
+
+ mlog(ML_ERROR, "Error adding page to bio i = %d, "
+ "vec_len = %u, len = %d\n, start = %u\n",
+ i, vec_len, len, vec_start);
+ goto bail;
+ }
+
+ vec_start = 0;
+ }
+
+bail:
+ return bio;
+}
+
+/*
+ * Compute the maximum number of sectors the bdev can handle in one bio,
+ * as a power of two.
+ *
+ * Stolen from oracleasm, thanks Joel!
+ */
+static int compute_max_sectors(struct block_device *bdev)
+{
+ int max_pages, max_sectors, pow_two_sectors;
+
+ struct request_queue *q;
+
+ q = bdev_get_queue(bdev);
+ max_pages = q->max_sectors >> (PAGE_SHIFT - 9);
+ if (max_pages > BIO_MAX_PAGES)
+ max_pages = BIO_MAX_PAGES;
+ if (max_pages > q->max_phys_segments)
+ max_pages = q->max_phys_segments;
+ if (max_pages > q->max_hw_segments)
+ max_pages = q->max_hw_segments;
+ max_pages--; /* Handle I/Os that straddle a page */
+
+ max_sectors = max_pages << (PAGE_SHIFT - 9);
+
+ /* Why is fls() 1-based???? */
+ pow_two_sectors = 1 << (fls(max_sectors) - 1);
+
+ return pow_two_sectors;
+}
+
+static inline void o2hb_compute_request_limits(struct o2hb_region *reg,
+ unsigned int num_slots,
+ unsigned int *num_bios,
+ unsigned int *slots_per_bio)
+{
+ unsigned int max_sectors, io_sectors;
+
+ max_sectors = compute_max_sectors(reg->hr_bdev);
+
+ io_sectors = num_slots << (reg->hr_block_bits - 9);
+
+ *num_bios = (io_sectors + max_sectors - 1) / max_sectors;
+ *slots_per_bio = max_sectors >> (reg->hr_block_bits - 9);
+
+ mlog(ML_HB_BIO, "My io size is %u sectors for %u slots. This "
+ "device can handle %u sectors of I/O\n", io_sectors, num_slots,
+ max_sectors);
+ mlog(ML_HB_BIO, "Will need %u bios holding %u slots each\n",
+ *num_bios, *slots_per_bio);
+}
+
+static int o2hb_read_slots(struct o2hb_region *reg,
+ unsigned int max_slots)
+{
+ unsigned int num_bios, slots_per_bio, start_slot, num_slots;
+ int i, status;
+ struct o2hb_bio_wait_ctxt wc;
+ struct bio **bios;
+ struct bio *bio;
+
+ o2hb_compute_request_limits(reg, max_slots, &num_bios, &slots_per_bio);
+
+ bios = kcalloc(num_bios, sizeof(struct bio *), GFP_KERNEL);
+ if (!bios) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ return status;
+ }
+
+ o2hb_bio_wait_init(&wc, num_bios);
+
+ num_slots = slots_per_bio;
+ for(i = 0; i < num_bios; i++) {
+ start_slot = i * slots_per_bio;
+
+ /* adjust num_slots at last bio */
+ if (max_slots < (start_slot + num_slots))
+ num_slots = max_slots - start_slot;
+
+ bio = o2hb_setup_one_bio(reg, &wc, start_slot, num_slots);
+ if (IS_ERR(bio)) {
+ o2hb_bio_wait_dec(&wc, num_bios - i);
+
+ status = PTR_ERR(bio);
+ mlog_errno(status);
+ goto bail_and_wait;
+ }
+ bios[i] = bio;
+
+ submit_bio(READ, bio);
+ }
+
+ status = 0;
+
+bail_and_wait:
+ o2hb_wait_on_io(reg, &wc);
+
+ if (bios) {
+ for(i = 0; i < num_bios; i++)
+ if (bios[i])
+ bio_put(bios[i]);
+ kfree(bios);
+ }
+
+ return status;
+}
+
+static int o2hb_issue_node_write(struct o2hb_region *reg,
+ struct bio **write_bio,
+ struct o2hb_bio_wait_ctxt *write_wc)
+{
+ int status;
+ unsigned int slot;
+ struct bio *bio;
+
+ o2hb_bio_wait_init(write_wc, 1);
+
+ slot = o2nm_this_node();
+
+ bio = o2hb_setup_one_bio(reg, write_wc, slot, 1);
+ if (IS_ERR(bio)) {
+ status = PTR_ERR(bio);
+ mlog_errno(status);
+ goto bail;
+ }
+
+ submit_bio(WRITE, bio);
+
+ *write_bio = bio;
+ status = 0;
+bail:
+ return status;
+}
+
+static u32 o2hb_compute_block_crc_le(struct o2hb_region *reg,
+ struct o2hb_disk_heartbeat_block *hb_block)
+{
+ __le32 old_cksum;
+ u32 ret;
+
+ /* We want to compute the block crc with a 0 value in the
+ * hb_cksum field. Save it off here and replace after the
+ * crc. */
+ old_cksum = hb_block->hb_cksum;
+ hb_block->hb_cksum = 0;
+
+ ret = crc32_le(0, (unsigned char *) hb_block, reg->hr_block_bytes);
+
+ hb_block->hb_cksum = old_cksum;
+
+ return ret;
+}
+
+static void o2hb_dump_slot(struct o2hb_disk_heartbeat_block *hb_block)
+{
+ mlog(ML_ERROR, "Dump slot information: seq = 0x%"MLFx64", node = %u, "
+ "cksum = 0x%x, generation 0x%"MLFx64"\n",
+ le64_to_cpu(hb_block->hb_seq), hb_block->hb_node,
+ le32_to_cpu(hb_block->hb_cksum),
+ le64_to_cpu(hb_block->hb_generation));
+}
+
+static int o2hb_verify_crc(struct o2hb_region *reg,
+ struct o2hb_disk_heartbeat_block *hb_block)
+{
+ u32 read, computed;
+
+ read = le32_to_cpu(hb_block->hb_cksum);
+ computed = o2hb_compute_block_crc_le(reg, hb_block);
+
+ return read == computed;
+}
+
+/* We want to make sure that nobody is heartbeating on top of us --
+ * this will help detect an invalid configuration. */
+static int o2hb_check_last_timestamp(struct o2hb_region *reg)
+{
+ int node_num, ret;
+ struct o2hb_disk_slot *slot;
+ struct o2hb_disk_heartbeat_block *hb_block;
+
+ node_num = o2nm_this_node();
+
+ ret = 1;
+ slot = &reg->hr_slots[node_num];
+ /* Don't check on our 1st timestamp */
+ if (slot->ds_last_time) {
+ hb_block = slot->ds_raw_block;
+
+ if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time)
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static inline void o2hb_prepare_block(struct o2hb_region *reg,
+ u64 generation)
+{
+ int node_num;
+ u64 cputime;
+ struct o2hb_disk_slot *slot;
+ struct o2hb_disk_heartbeat_block *hb_block;
+
+ node_num = o2nm_this_node();
+ slot = &reg->hr_slots[node_num];
+
+ hb_block = (struct o2hb_disk_heartbeat_block *)slot->ds_raw_block;
+ memset(hb_block, 0, reg->hr_block_bytes);
+ /* TODO: time stuff */
+ cputime = CURRENT_TIME.tv_sec;
+ if (!cputime)
+ cputime = 1;
+
+ hb_block->hb_seq = cpu_to_le64(cputime);
+ hb_block->hb_node = node_num;
+ hb_block->hb_generation = cpu_to_le64(generation);
+
+ /* This step must always happen last! */
+ hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg,
+ hb_block));
+
+ mlog(ML_HB_BIO, "our node generation = 0x%"MLFx64", cksum = 0x%x\n",
+ cpu_to_le64(generation), le32_to_cpu(hb_block->hb_cksum));
+}
+
+static void o2hb_fire_callbacks(struct o2hb_callback *hbcall,
+ struct o2nm_node *node,
+ int idx)
+{
+ struct list_head *iter;
+ struct o2hb_callback_func *f;
+
+ list_for_each(iter, &hbcall->list) {
+ f = list_entry(iter, struct o2hb_callback_func, hc_item);
+ mlog(ML_HEARTBEAT, "calling funcs %p\n", f);
+ (f->hc_func)(node, idx, f->hc_data);
+ }
+}
+
+/* Will run the list in order until we process the passed event */
+static void o2hb_run_event_list(struct o2hb_node_event *queued_event)
+{
+ int empty;
+ struct o2hb_callback *hbcall;
+ struct o2hb_node_event *event;
+
+ spin_lock(&o2hb_live_lock);
+ empty = list_empty(&queued_event->hn_item);
+ spin_unlock(&o2hb_live_lock);
+ if (empty)
+ return;
+
+ /* Holding callback sem assures we don't alter the callback
+ * lists when doing this, and serializes ourselves with other
+ * processes wanting callbacks. */
+ down_write(&o2hb_callback_sem);
+
+ spin_lock(&o2hb_live_lock);
+ while (!list_empty(&o2hb_node_events)
+ && !list_empty(&queued_event->hn_item)) {
+ event = list_entry(o2hb_node_events.next,
+ struct o2hb_node_event,
+ hn_item);
+ list_del_init(&event->hn_item);
+ spin_unlock(&o2hb_live_lock);
+
+ mlog(ML_HEARTBEAT, "Node %s event for %d\n",
+ event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN",
+ event->hn_node_num);
+
+ hbcall = hbcall_from_type(event->hn_event_type);
+
+ /* We should *never* have gotten on to the list with a
+ * bad type... This isn't something that we should try
+ * to recover from. */
+ BUG_ON(IS_ERR(hbcall));
+
+ o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num);
+
+ spin_lock(&o2hb_live_lock);
+ }
+ spin_unlock(&o2hb_live_lock);
+
+ up_write(&o2hb_callback_sem);
+}
+
+static void o2hb_queue_node_event(struct o2hb_node_event *event,
+ enum o2hb_callback_type type,
+ struct o2nm_node *node,
+ int node_num)
+{
+ assert_spin_locked(&o2hb_live_lock);
+
+ event->hn_event_type = type;
+ event->hn_node = node;
+ event->hn_node_num = node_num;
+
+ mlog(ML_HEARTBEAT, "Queue node %s event for node %d\n",
+ type == O2HB_NODE_UP_CB ? "UP" : "DOWN", node_num);
+
+ list_add_tail(&event->hn_item, &o2hb_node_events);
+}
+
+static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot)
+{
+ struct o2hb_node_event event =
+ { .hn_item = LIST_HEAD_INIT(event.hn_item), };
+ struct o2nm_node *node;
+
+ node = o2nm_get_node_by_num(slot->ds_node_num);
+ if (!node)
+ return;
+
+ spin_lock(&o2hb_live_lock);
+ if (!list_empty(&slot->ds_live_item)) {
+ mlog(ML_HEARTBEAT, "Shutdown, node %d leaves region\n",
+ slot->ds_node_num);
+
+ list_del_init(&slot->ds_live_item);
+
+ if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
+ clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
+
+ o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
+ slot->ds_node_num);
+ }
+ }
+ spin_unlock(&o2hb_live_lock);
+
+ o2hb_run_event_list(&event);
+
+ o2nm_node_put(node);
+}
+
+static int o2hb_check_slot(struct o2hb_region *reg,
+ struct o2hb_disk_slot *slot)
+{
+ int changed = 0, gen_changed = 0;
+ struct o2hb_node_event event =
+ { .hn_item = LIST_HEAD_INIT(event.hn_item), };
+ struct o2nm_node *node;
+ struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block;
+ u64 cputime;
+
+ memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes);
+
+ /* Is this correct? Do we assume that the node doesn't exist
+ * if we're not configured for him? */
+ node = o2nm_get_node_by_num(slot->ds_node_num);
+ if (!node)
+ return 0;
+
+ if (!o2hb_verify_crc(reg, hb_block)) {
+ /* all paths from here will drop o2hb_live_lock for
+ * us. */
+ spin_lock(&o2hb_live_lock);
+
+ /* Don't print an error on the console in this case -
+ * a freshly formatted heartbeat area will not have a
+ * crc set on it. */
+ if (list_empty(&slot->ds_live_item))
+ goto out;
+
+ /* The node is live but pushed out a bad crc. We
+ * consider it a transient miss but don't populate any
+ * other values as they may be junk. */
+ mlog(ML_ERROR, "Node %d has written a bad crc to %s\n",
+ slot->ds_node_num, reg->hr_dev_name);
+ o2hb_dump_slot(hb_block);
+
+ slot->ds_equal_samples++;
+ goto fire_callbacks;
+ }
+
+ /* we don't care if these wrap.. the state transitions below
+ * clear at the right places */
+ cputime = le64_to_cpu(hb_block->hb_seq);
+ if (slot->ds_last_time != cputime)
+ slot->ds_changed_samples++;
+ else
+ slot->ds_equal_samples++;
+ slot->ds_last_time = cputime;
+
+ /* The node changed heartbeat generations. We assume this to
+ * mean it dropped off but came back before we timed out. We
+ * want to consider it down for the time being but don't want
+ * to lose any changed_samples state we might build up to
+ * considering it live again. */
+ if (slot->ds_last_generation != le64_to_cpu(hb_block->hb_generation)) {
+ gen_changed = 1;
+ slot->ds_equal_samples = 0;
+ mlog(ML_HEARTBEAT, "Node %d changed generation (0x%"MLFx64" "
+ "to 0x%"MLFx64")\n", slot->ds_node_num,
+ slot->ds_last_generation,
+ le64_to_cpu(hb_block->hb_generation));
+ }
+
+ slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation);
+
+ mlog(ML_HEARTBEAT, "Slot %d gen 0x%"MLFx64" cksum 0x%x "
+ "seq %"MLFu64" last %"MLFu64" changed %u equal %u\n",
+ slot->ds_node_num, slot->ds_last_generation,
+ le32_to_cpu(hb_block->hb_cksum), le64_to_cpu(hb_block->hb_seq),
+ slot->ds_last_time, slot->ds_changed_samples,
+ slot->ds_equal_samples);
+
+ spin_lock(&o2hb_live_lock);
+
+fire_callbacks:
+ /* dead nodes only come to life after some number of
+ * changes at any time during their dead time */
+ if (list_empty(&slot->ds_live_item) &&
+ slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) {
+ mlog(ML_HEARTBEAT, "Node %d (id 0x%"MLFx64") joined my "
+ "region\n", slot->ds_node_num, slot->ds_last_generation);
+
+ /* first on the list generates a callback */
+ if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
+ set_bit(slot->ds_node_num, o2hb_live_node_bitmap);
+
+ o2hb_queue_node_event(&event, O2HB_NODE_UP_CB, node,
+ slot->ds_node_num);
+
+ changed = 1;
+ }
+
+ list_add_tail(&slot->ds_live_item,
+ &o2hb_live_slots[slot->ds_node_num]);
+
+ slot->ds_equal_samples = 0;
+ goto out;
+ }
+
+ /* if the list is dead, we're done.. */
+ if (list_empty(&slot->ds_live_item))
+ goto out;
+
+ /* live nodes only go dead after enough consequtive missed
+ * samples.. reset the missed counter whenever we see
+ * activity */
+ if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) {
+ mlog(ML_HEARTBEAT, "Node %d left my region\n",
+ slot->ds_node_num);
+
+ /* last off the live_slot generates a callback */
+ list_del_init(&slot->ds_live_item);
+ if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
+ clear_bit(slot->ds_node_num, o2hb_live_node_bitmap);
+
+ o2hb_queue_node_event(&event, O2HB_NODE_DOWN_CB, node,
+ slot->ds_node_num);
+
+ changed = 1;
+ }
+
+ /* We don't clear this because the node is still
+ * actually writing new blocks. */
+ if (!gen_changed)
+ slot->ds_changed_samples = 0;
+ goto out;
+ }
+ if (slot->ds_changed_samples) {
+ slot->ds_changed_samples = 0;
+ slot->ds_equal_samples = 0;
+ }
+out:
+ spin_unlock(&o2hb_live_lock);
+
+ o2hb_run_event_list(&event);
+
+ o2nm_node_put(node);
+ return changed;
+}
+
+/* This could be faster if we just implmented a find_last_bit, but I
+ * don't think the circumstances warrant it. */
+static int o2hb_highest_node(unsigned long *nodes,
+ int numbits)
+{
+ int highest, node;
+
+ highest = numbits;
+ node = -1;
+ while ((node = find_next_bit(nodes, numbits, node + 1)) != -1) {
+ if (node >= numbits)
+ break;
+
+ highest = node;
+ }
+
+ return highest;
+}
+
+static void o2hb_do_disk_heartbeat(struct o2hb_region *reg)
+{
+ int i, ret, highest_node, change = 0;
+ unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ struct bio *write_bio;
+ struct o2hb_bio_wait_ctxt write_wc;
+
+ if (o2nm_configured_node_map(configured_nodes, sizeof(configured_nodes)))
+ return;
+
+ highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
+ if (highest_node >= O2NM_MAX_NODES) {
+ mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
+ return;
+ }
+
+ /* No sense in reading the slots of nodes that don't exist
+ * yet. Of course, if the node definitions have holes in them
+ * then we're reading an empty slot anyway... Consider this
+ * best-effort. */
+ ret = o2hb_read_slots(reg, highest_node + 1);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return;
+ }
+
+ /* With an up to date view of the slots, we can check that no
+ * other node has been improperly configured to heartbeat in
+ * our slot. */
+ if (!o2hb_check_last_timestamp(reg))
+ mlog(ML_ERROR, "Device \"%s\": another node is heartbeating "
+ "in our slot!\n", reg->hr_dev_name);
+
+ /* fill in the proper info for our next heartbeat */
+ o2hb_prepare_block(reg, reg->hr_generation);
+
+ /* And fire off the write. Note that we don't wait on this I/O
+ * until later. */
+ ret = o2hb_issue_node_write(reg, &write_bio, &write_wc);
+ if (ret < 0) {
+ mlog_errno(ret);
+ return;
+ }
+
+ i = -1;
+ while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
+
+ change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
+ }
+
+ /*
+ * We have to be sure we've advertised ourselves on disk
+ * before we can go to steady state. This ensures that
+ * people we find in our steady state have seen us.
+ */
+ o2hb_wait_on_io(reg, &write_wc);
+ bio_put(write_bio);
+ o2hb_arm_write_timeout(reg);
+
+ /* let the person who launched us know when things are steady */
+ if (!change && (atomic_read(&reg->hr_steady_iterations) != 0)) {
+ if (atomic_dec_and_test(&reg->hr_steady_iterations))
+ wake_up(&o2hb_steady_queue);
+ }
+}
+
+/* Subtract b from a, storing the result in a. a *must* have a larger
+ * value than b. */
+static void o2hb_tv_subtract(struct timeval *a,
+ struct timeval *b)
+{
+ /* just return 0 when a is after b */
+ if (a->tv_sec < b->tv_sec ||
+ (a->tv_sec == b->tv_sec && a->tv_usec < b->tv_usec)) {
+ a->tv_sec = 0;
+ a->tv_usec = 0;
+ return;
+ }
+
+ a->tv_sec -= b->tv_sec;
+ a->tv_usec -= b->tv_usec;
+ while ( a->tv_usec < 0 ) {
+ a->tv_sec--;
+ a->tv_usec += 1000000;
+ }
+}
+
+static unsigned int o2hb_elapsed_msecs(struct timeval *start,
+ struct timeval *end)
+{
+ struct timeval res = *end;
+
+ o2hb_tv_subtract(&res, start);
+
+ return res.tv_sec * 1000 + res.tv_usec / 1000;
+}
+
+/*
+ * we ride the region ref that the region dir holds. before the region
+ * dir is removed and drops it ref it will wait to tear down this
+ * thread.
+ */
+static int o2hb_thread(void *data)
+{
+ int i, ret;
+ struct o2hb_region *reg = data;
+ struct bio *write_bio;
+ struct o2hb_bio_wait_ctxt write_wc;
+ struct timeval before_hb, after_hb;
+ unsigned int elapsed_msec;
+
+ mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread running\n");
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop() && !reg->hr_unclean_stop) {
+ /* We track the time spent inside
+ * o2hb_do_disk_heartbeat so that we avoid more then
+ * hr_timeout_ms between disk writes. On busy systems
+ * this should result in a heartbeat which is less
+ * likely to time itself out. */
+ do_gettimeofday(&before_hb);
+
+ o2hb_do_disk_heartbeat(reg);
+
+ do_gettimeofday(&after_hb);
+ elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);
+
+ mlog(0, "start = %lu.%lu, end = %lu.%lu, msec = %u\n",
+ before_hb.tv_sec, before_hb.tv_usec,
+ after_hb.tv_sec, after_hb.tv_usec, elapsed_msec);
+
+ if (elapsed_msec < reg->hr_timeout_ms) {
+ /* the kthread api has blocked signals for us so no
+ * need to record the return value. */
+ msleep_interruptible(reg->hr_timeout_ms - elapsed_msec);
+ }
+ }
+
+ o2hb_disarm_write_timeout(reg);
+
+ /* unclean stop is only used in very bad situation */
+ for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
+ o2hb_shutdown_slot(&reg->hr_slots[i]);
+
+ /* Explicit down notification - avoid forcing the other nodes
+ * to timeout on this region when we could just as easily
+ * write a clear generation - thus indicating to them that
+ * this node has left this region.
+ *
+ * XXX: Should we skip this on unclean_stop? */
+ o2hb_prepare_block(reg, 0);
+ ret = o2hb_issue_node_write(reg, &write_bio, &write_wc);
+ if (ret == 0) {
+ o2hb_wait_on_io(reg, &write_wc);
+ bio_put(write_bio);
+ } else {
+ mlog_errno(ret);
+ }
+
+ mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
+
+ return 0;
+}
+
+void o2hb_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(o2hb_callbacks); i++)
+ INIT_LIST_HEAD(&o2hb_callbacks[i].list);
+
+ for (i = 0; i < ARRAY_SIZE(o2hb_live_slots); i++)
+ INIT_LIST_HEAD(&o2hb_live_slots[i]);
+
+ INIT_LIST_HEAD(&o2hb_node_events);
+
+ memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap));
+}
+
+/* if we're already in a callback then we're already serialized by the sem */
+static void o2hb_fill_node_map_from_callback(unsigned long *map,
+ unsigned bytes)
+{
+ BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long)));
+
+ memcpy(map, &o2hb_live_node_bitmap, bytes);
+}
+
+/*
+ * get a map of all nodes that are heartbeating in any regions
+ */
+void o2hb_fill_node_map(unsigned long *map, unsigned bytes)
+{
+ /* callers want to serialize this map and callbacks so that they
+ * can trust that they don't miss nodes coming to the party */
+ down_read(&o2hb_callback_sem);
+ spin_lock(&o2hb_live_lock);
+ o2hb_fill_node_map_from_callback(map, bytes);
+ spin_unlock(&o2hb_live_lock);
+ up_read(&o2hb_callback_sem);
+}
+EXPORT_SYMBOL_GPL(o2hb_fill_node_map);
+
+/*
+ * heartbeat configfs bits. The heartbeat set is a default set under
+ * the cluster set in nodemanager.c.
+ */
+
+static struct o2hb_region *to_o2hb_region(struct config_item *item)
+{
+ return item ? container_of(item, struct o2hb_region, hr_item) : NULL;
+}
+
+/* drop_item only drops its ref after killing the thread, nothing should
+ * be using the region anymore. this has to clean up any state that
+ * attributes might have built up. */
+static void o2hb_region_release(struct config_item *item)
+{
+ int i;
+ struct page *page;
+ struct o2hb_region *reg = to_o2hb_region(item);
+
+ if (reg->hr_tmp_block)
+ kfree(reg->hr_tmp_block);
+
+ if (reg->hr_slot_data) {
+ for (i = 0; i < reg->hr_num_pages; i++) {
+ page = reg->hr_slot_data[i];
+ if (page)
+ __free_page(page);
+ }
+ kfree(reg->hr_slot_data);
+ }
+
+ if (reg->hr_bdev)
+ blkdev_put(reg->hr_bdev);
+
+ if (reg->hr_slots)
+ kfree(reg->hr_slots);
+
+ spin_lock(&o2hb_live_lock);
+ list_del(&reg->hr_all_item);
+ spin_unlock(&o2hb_live_lock);
+
+ kfree(reg);
+}
+
+static int o2hb_read_block_input(struct o2hb_region *reg,
+ const char *page,
+ size_t count,
+ unsigned long *ret_bytes,
+ unsigned int *ret_bits)
+{
+ unsigned long bytes;
+ char *p = (char *)page;
+
+ bytes = simple_strtoul(p, &p, 0);
+ if (!p || (*p && (*p != '\n')))
+ return -EINVAL;
+
+ /* Heartbeat and fs min / max block sizes are the same. */
+ if (bytes > 4096 || bytes < 512)
+ return -ERANGE;
+ if (hweight16(bytes) != 1)
+ return -EINVAL;
+
+ if (ret_bytes)
+ *ret_bytes = bytes;
+ if (ret_bits)
+ *ret_bits = ffs(bytes) - 1;
+
+ return 0;
+}
+
+static ssize_t o2hb_region_block_bytes_read(struct o2hb_region *reg,
+ char *page)
+{
+ return sprintf(page, "%u\n", reg->hr_block_bytes);
+}
+
+static ssize_t o2hb_region_block_bytes_write(struct o2hb_region *reg,
+ const char *page,
+ size_t count)
+{
+ int status;
+ unsigned long block_bytes;
+ unsigned int block_bits;
+
+ if (reg->hr_bdev)
+ return -EINVAL;
+
+ status = o2hb_read_block_input(reg, page, count,
+ &block_bytes, &block_bits);
+ if (status)
+ return status;
+
+ reg->hr_block_bytes = (unsigned int)block_bytes;
+ reg->hr_block_bits = block_bits;
+
+ return count;
+}
+
+static ssize_t o2hb_region_start_block_read(struct o2hb_region *reg,
+ char *page)
+{
+ return sprintf(page, "%llu\n", reg->hr_start_block);
+}
+
+static ssize_t o2hb_region_start_block_write(struct o2hb_region *reg,
+ const char *page,
+ size_t count)
+{
+ unsigned long long tmp;
+ char *p = (char *)page;
+
+ if (reg->hr_bdev)
+ return -EINVAL;
+
+ tmp = simple_strtoull(p, &p, 0);
+ if (!p || (*p && (*p != '\n')))
+ return -EINVAL;
+
+ reg->hr_start_block = tmp;
+
+ return count;
+}
+
+static ssize_t o2hb_region_blocks_read(struct o2hb_region *reg,
+ char *page)
+{
+ return sprintf(page, "%d\n", reg->hr_blocks);
+}
+
+static ssize_t o2hb_region_blocks_write(struct o2hb_region *reg,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ char *p = (char *)page;
+
+ if (reg->hr_bdev)
+ return -EINVAL;
+
+ tmp = simple_strtoul(p, &p, 0);
+ if (!p || (*p && (*p != '\n')))
+ return -EINVAL;
+
+ if (tmp > O2NM_MAX_NODES || tmp == 0)
+ return -ERANGE;
+
+ reg->hr_blocks = (unsigned int)tmp;
+
+ return count;
+}
+
+static ssize_t o2hb_region_dev_read(struct o2hb_region *reg,
+ char *page)
+{
+ unsigned int ret = 0;
+
+ if (reg->hr_bdev)
+ ret = sprintf(page, "%s\n", reg->hr_dev_name);
+
+ return ret;
+}
+
+static void o2hb_init_region_params(struct o2hb_region *reg)
+{
+ reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits;
+ reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS;
+
+ mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n",
+ reg->hr_start_block, reg->hr_blocks);
+ mlog(ML_HEARTBEAT, "hr_block_bytes = %u, hr_block_bits = %u\n",
+ reg->hr_block_bytes, reg->hr_block_bits);
+ mlog(ML_HEARTBEAT, "hr_timeout_ms = %u\n", reg->hr_timeout_ms);
+ mlog(ML_HEARTBEAT, "dead threshold = %u\n", o2hb_dead_threshold);
+}
+
+static int o2hb_map_slot_data(struct o2hb_region *reg)
+{
+ int i, j;
+ unsigned int last_slot;
+ unsigned int spp = reg->hr_slots_per_page;
+ struct page *page;
+ char *raw;
+ struct o2hb_disk_slot *slot;
+
+ reg->hr_tmp_block = kmalloc(reg->hr_block_bytes, GFP_KERNEL);
+ if (reg->hr_tmp_block == NULL) {
+ mlog_errno(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ reg->hr_slots = kcalloc(reg->hr_blocks,
+ sizeof(struct o2hb_disk_slot), GFP_KERNEL);
+ if (reg->hr_slots == NULL) {
+ mlog_errno(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ for(i = 0; i < reg->hr_blocks; i++) {
+ slot = &reg->hr_slots[i];
+ slot->ds_node_num = i;
+ INIT_LIST_HEAD(&slot->ds_live_item);
+ slot->ds_raw_block = NULL;
+ }
+
+ reg->hr_num_pages = (reg->hr_blocks + spp - 1) / spp;
+ mlog(ML_HEARTBEAT, "Going to require %u pages to cover %u blocks "
+ "at %u blocks per page\n",
+ reg->hr_num_pages, reg->hr_blocks, spp);
+
+ reg->hr_slot_data = kcalloc(reg->hr_num_pages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!reg->hr_slot_data) {
+ mlog_errno(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ for(i = 0; i < reg->hr_num_pages; i++) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ mlog_errno(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ reg->hr_slot_data[i] = page;
+
+ last_slot = i * spp;
+ raw = page_address(page);
+ for (j = 0;
+ (j < spp) && ((j + last_slot) < reg->hr_blocks);
+ j++) {
+ BUG_ON((j + last_slot) >= reg->hr_blocks);
+
+ slot = &reg->hr_slots[j + last_slot];
+ slot->ds_raw_block =
+ (struct o2hb_disk_heartbeat_block *) raw;
+
+ raw += reg->hr_block_bytes;
+ }
+ }
+
+ return 0;
+}
+
+/* Read in all the slots available and populate the tracking
+ * structures so that we can start with a baseline idea of what's
+ * there. */
+static int o2hb_populate_slot_data(struct o2hb_region *reg)
+{
+ int ret, i;
+ struct o2hb_disk_slot *slot;
+ struct o2hb_disk_heartbeat_block *hb_block;
+
+ mlog_entry_void();
+
+ ret = o2hb_read_slots(reg, reg->hr_blocks);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /* We only want to get an idea of the values initially in each
+ * slot, so we do no verification - o2hb_check_slot will
+ * actually determine if each configured slot is valid and
+ * whether any values have changed. */
+ for(i = 0; i < reg->hr_blocks; i++) {
+ slot = &reg->hr_slots[i];
+ hb_block = (struct o2hb_disk_heartbeat_block *) slot->ds_raw_block;
+
+ /* Only fill the values that o2hb_check_slot uses to
+ * determine changing slots */
+ slot->ds_last_time = le64_to_cpu(hb_block->hb_seq);
+ slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation);
+ }
+
+out:
+ mlog_exit(ret);
+ return ret;
+}
+
+/* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */
+static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
+ const char *page,
+ size_t count)
+{
+ long fd;
+ int sectsize;
+ char *p = (char *)page;
+ struct file *filp = NULL;
+ struct inode *inode = NULL;
+ ssize_t ret = -EINVAL;
+
+ if (reg->hr_bdev)
+ goto out;
+
+ /* We can't heartbeat without having had our node number
+ * configured yet. */
+ if (o2nm_this_node() == O2NM_MAX_NODES)
+ goto out;
+
+ fd = simple_strtol(p, &p, 0);
+ if (!p || (*p && (*p != '\n')))
+ goto out;
+
+ if (fd < 0 || fd >= INT_MAX)
+ goto out;
+
+ filp = fget(fd);
+ if (filp == NULL)
+ goto out;
+
+ if (reg->hr_blocks == 0 || reg->hr_start_block == 0 ||
+ reg->hr_block_bytes == 0)
+ goto out;
+
+ inode = igrab(filp->f_mapping->host);
+ if (inode == NULL)
+ goto out;
+
+ if (!S_ISBLK(inode->i_mode))
+ goto out;
+
+ reg->hr_bdev = I_BDEV(filp->f_mapping->host);
+ ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, 0);
+ if (ret) {
+ reg->hr_bdev = NULL;
+ goto out;
+ }
+ inode = NULL;
+
+ bdevname(reg->hr_bdev, reg->hr_dev_name);
+
+ sectsize = bdev_hardsect_size(reg->hr_bdev);
+ if (sectsize != reg->hr_block_bytes) {
+ mlog(ML_ERROR,
+ "blocksize %u incorrect for device, expected %d",
+ reg->hr_block_bytes, sectsize);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ o2hb_init_region_params(reg);
+
+ /* Generation of zero is invalid */
+ do {
+ get_random_bytes(&reg->hr_generation,
+ sizeof(reg->hr_generation));
+ } while (reg->hr_generation == 0);
+
+ ret = o2hb_map_slot_data(reg);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = o2hb_populate_slot_data(reg);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ INIT_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout, reg);
+
+ /*
+ * A node is considered live after it has beat LIVE_THRESHOLD
+ * times. We're not steady until we've given them a chance
+ * _after_ our first read.
+ */
+ atomic_set(&reg->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1);
+
+ reg->hr_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
+ reg->hr_item.ci_name);
+ if (IS_ERR(reg->hr_task)) {
+ ret = PTR_ERR(reg->hr_task);
+ mlog_errno(ret);
+ reg->hr_task = NULL;
+ goto out;
+ }
+
+ ret = wait_event_interruptible(o2hb_steady_queue,
+ atomic_read(&reg->hr_steady_iterations) == 0);
+ if (ret) {
+ kthread_stop(reg->hr_task);
+ reg->hr_task = NULL;
+ goto out;
+ }
+
+ ret = count;
+out:
+ if (filp)
+ fput(filp);
+ if (inode)
+ iput(inode);
+ if (ret < 0) {
+ if (reg->hr_bdev) {
+ blkdev_put(reg->hr_bdev);
+ reg->hr_bdev = NULL;
+ }
+ }
+ return ret;
+}
+
+struct o2hb_region_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct o2hb_region *, char *);
+ ssize_t (*store)(struct o2hb_region *, const char *, size_t);
+};
+
+static struct o2hb_region_attribute o2hb_region_attr_block_bytes = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "block_bytes",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2hb_region_block_bytes_read,
+ .store = o2hb_region_block_bytes_write,
+};
+
+static struct o2hb_region_attribute o2hb_region_attr_start_block = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "start_block",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2hb_region_start_block_read,
+ .store = o2hb_region_start_block_write,
+};
+
+static struct o2hb_region_attribute o2hb_region_attr_blocks = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "blocks",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2hb_region_blocks_read,
+ .store = o2hb_region_blocks_write,
+};
+
+static struct o2hb_region_attribute o2hb_region_attr_dev = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "dev",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2hb_region_dev_read,
+ .store = o2hb_region_dev_write,
+};
+
+static struct configfs_attribute *o2hb_region_attrs[] = {
+ &o2hb_region_attr_block_bytes.attr,
+ &o2hb_region_attr_start_block.attr,
+ &o2hb_region_attr_blocks.attr,
+ &o2hb_region_attr_dev.attr,
+ NULL,
+};
+
+static ssize_t o2hb_region_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct o2hb_region *reg = to_o2hb_region(item);
+ struct o2hb_region_attribute *o2hb_region_attr =
+ container_of(attr, struct o2hb_region_attribute, attr);
+ ssize_t ret = 0;
+
+ if (o2hb_region_attr->show)
+ ret = o2hb_region_attr->show(reg, page);
+ return ret;
+}
+
+static ssize_t o2hb_region_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct o2hb_region *reg = to_o2hb_region(item);
+ struct o2hb_region_attribute *o2hb_region_attr =
+ container_of(attr, struct o2hb_region_attribute, attr);
+ ssize_t ret = -EINVAL;
+
+ if (o2hb_region_attr->store)
+ ret = o2hb_region_attr->store(reg, page, count);
+ return ret;
+}
+
+static struct configfs_item_operations o2hb_region_item_ops = {
+ .release = o2hb_region_release,
+ .show_attribute = o2hb_region_show,
+ .store_attribute = o2hb_region_store,
+};
+
+static struct config_item_type o2hb_region_type = {
+ .ct_item_ops = &o2hb_region_item_ops,
+ .ct_attrs = o2hb_region_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* heartbeat set */
+
+struct o2hb_heartbeat_group {
+ struct config_group hs_group;
+ /* some stuff? */
+};
+
+static struct o2hb_heartbeat_group *to_o2hb_heartbeat_group(struct config_group *group)
+{
+ return group ?
+ container_of(group, struct o2hb_heartbeat_group, hs_group)
+ : NULL;
+}
+
+static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *group,
+ const char *name)
+{
+ struct o2hb_region *reg = NULL;
+ struct config_item *ret = NULL;
+
+ reg = kcalloc(1, sizeof(struct o2hb_region), GFP_KERNEL);
+ if (reg == NULL)
+ goto out; /* ENOMEM */
+
+ config_item_init_type_name(&reg->hr_item, name, &o2hb_region_type);
+
+ ret = &reg->hr_item;
+
+ spin_lock(&o2hb_live_lock);
+ list_add_tail(&reg->hr_all_item, &o2hb_all_regions);
+ spin_unlock(&o2hb_live_lock);
+out:
+ if (ret == NULL)
+ kfree(reg);
+
+ return ret;
+}
+
+static void o2hb_heartbeat_group_drop_item(struct config_group *group,
+ struct config_item *item)
+{
+ struct o2hb_region *reg = to_o2hb_region(item);
+
+ /* stop the thread when the user removes the region dir */
+ if (reg->hr_task) {
+ kthread_stop(reg->hr_task);
+ reg->hr_task = NULL;
+ }
+
+ config_item_put(item);
+}
+
+struct o2hb_heartbeat_group_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct o2hb_heartbeat_group *, char *);
+ ssize_t (*store)(struct o2hb_heartbeat_group *, const char *, size_t);
+};
+
+static ssize_t o2hb_heartbeat_group_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item));
+ struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr =
+ container_of(attr, struct o2hb_heartbeat_group_attribute, attr);
+ ssize_t ret = 0;
+
+ if (o2hb_heartbeat_group_attr->show)
+ ret = o2hb_heartbeat_group_attr->show(reg, page);
+ return ret;
+}
+
+static ssize_t o2hb_heartbeat_group_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct o2hb_heartbeat_group *reg = to_o2hb_heartbeat_group(to_config_group(item));
+ struct o2hb_heartbeat_group_attribute *o2hb_heartbeat_group_attr =
+ container_of(attr, struct o2hb_heartbeat_group_attribute, attr);
+ ssize_t ret = -EINVAL;
+
+ if (o2hb_heartbeat_group_attr->store)
+ ret = o2hb_heartbeat_group_attr->store(reg, page, count);
+ return ret;
+}
+
+static ssize_t o2hb_heartbeat_group_threshold_show(struct o2hb_heartbeat_group *group,
+ char *page)
+{
+ return sprintf(page, "%u\n", o2hb_dead_threshold);
+}
+
+static ssize_t o2hb_heartbeat_group_threshold_store(struct o2hb_heartbeat_group *group,
+ const char *page,
+ size_t count)
+{
+ unsigned long tmp;
+ char *p = (char *)page;
+
+ tmp = simple_strtoul(p, &p, 10);
+ if (!p || (*p && (*p != '\n')))
+ return -EINVAL;
+
+ /* this will validate ranges for us. */
+ o2hb_dead_threshold_set((unsigned int) tmp);
+
+ return count;
+}
+
+static struct o2hb_heartbeat_group_attribute o2hb_heartbeat_group_attr_threshold = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "dead_threshold",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2hb_heartbeat_group_threshold_show,
+ .store = o2hb_heartbeat_group_threshold_store,
+};
+
+static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
+ &o2hb_heartbeat_group_attr_threshold.attr,
+ NULL,
+};
+
+static struct configfs_item_operations o2hb_hearbeat_group_item_ops = {
+ .show_attribute = o2hb_heartbeat_group_show,
+ .store_attribute = o2hb_heartbeat_group_store,
+};
+
+static struct configfs_group_operations o2hb_heartbeat_group_group_ops = {
+ .make_item = o2hb_heartbeat_group_make_item,
+ .drop_item = o2hb_heartbeat_group_drop_item,
+};
+
+static struct config_item_type o2hb_heartbeat_group_type = {
+ .ct_group_ops = &o2hb_heartbeat_group_group_ops,
+ .ct_item_ops = &o2hb_hearbeat_group_item_ops,
+ .ct_attrs = o2hb_heartbeat_group_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* this is just here to avoid touching group in heartbeat.h which the
+ * entire damn world #includes */
+struct config_group *o2hb_alloc_hb_set(void)
+{
+ struct o2hb_heartbeat_group *hs = NULL;
+ struct config_group *ret = NULL;
+
+ hs = kcalloc(1, sizeof(struct o2hb_heartbeat_group), GFP_KERNEL);
+ if (hs == NULL)
+ goto out;
+
+ config_group_init_type_name(&hs->hs_group, "heartbeat",
+ &o2hb_heartbeat_group_type);
+
+ ret = &hs->hs_group;
+out:
+ if (ret == NULL)
+ kfree(hs);
+ return ret;
+}
+
+void o2hb_free_hb_set(struct config_group *group)
+{
+ struct o2hb_heartbeat_group *hs = to_o2hb_heartbeat_group(group);
+ kfree(hs);
+}
+
+/* hb callback registration and issueing */
+
+static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type)
+{
+ if (type == O2HB_NUM_CB)
+ return ERR_PTR(-EINVAL);
+
+ return &o2hb_callbacks[type];
+}
+
+void o2hb_setup_callback(struct o2hb_callback_func *hc,
+ enum o2hb_callback_type type,
+ o2hb_cb_func *func,
+ void *data,
+ int priority)
+{
+ INIT_LIST_HEAD(&hc->hc_item);
+ hc->hc_func = func;
+ hc->hc_data = data;
+ hc->hc_priority = priority;
+ hc->hc_type = type;
+ hc->hc_magic = O2HB_CB_MAGIC;
+}
+EXPORT_SYMBOL_GPL(o2hb_setup_callback);
+
+int o2hb_register_callback(struct o2hb_callback_func *hc)
+{
+ struct o2hb_callback_func *tmp;
+ struct list_head *iter;
+ struct o2hb_callback *hbcall;
+ int ret;
+
+ BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
+ BUG_ON(!list_empty(&hc->hc_item));
+
+ hbcall = hbcall_from_type(hc->hc_type);
+ if (IS_ERR(hbcall)) {
+ ret = PTR_ERR(hbcall);
+ goto out;
+ }
+
+ down_write(&o2hb_callback_sem);
+
+ list_for_each(iter, &hbcall->list) {
+ tmp = list_entry(iter, struct o2hb_callback_func, hc_item);
+ if (hc->hc_priority < tmp->hc_priority) {
+ list_add_tail(&hc->hc_item, iter);
+ break;
+ }
+ }
+ if (list_empty(&hc->hc_item))
+ list_add_tail(&hc->hc_item, &hbcall->list);
+
+ up_write(&o2hb_callback_sem);
+ ret = 0;
+out:
+ mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n",
+ ret, __builtin_return_address(0), hc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(o2hb_register_callback);
+
+int o2hb_unregister_callback(struct o2hb_callback_func *hc)
+{
+ BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
+
+ mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n",
+ __builtin_return_address(0), hc);
+
+ if (list_empty(&hc->hc_item))
+ return 0;
+
+ down_write(&o2hb_callback_sem);
+
+ list_del_init(&hc->hc_item);
+
+ up_write(&o2hb_callback_sem);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(o2hb_unregister_callback);
+
+int o2hb_check_node_heartbeating(u8 node_num)
+{
+ unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+
+ o2hb_fill_node_map(testing_map, sizeof(testing_map));
+ if (!test_bit(node_num, testing_map)) {
+ mlog(ML_HEARTBEAT,
+ "node (%u) does not have heartbeating enabled.\n",
+ node_num);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating);
+
+int o2hb_check_node_heartbeating_from_callback(u8 node_num)
+{
+ unsigned long testing_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+
+ o2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
+ if (!test_bit(node_num, testing_map)) {
+ mlog(ML_HEARTBEAT,
+ "node (%u) does not have heartbeating enabled.\n",
+ node_num);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(o2hb_check_node_heartbeating_from_callback);
+
+/* Makes sure our local node is configured with a node number, and is
+ * heartbeating. */
+int o2hb_check_local_node_heartbeating(void)
+{
+ u8 node_num;
+
+ /* if this node was set then we have networking */
+ node_num = o2nm_this_node();
+ if (node_num == O2NM_MAX_NODES) {
+ mlog(ML_HEARTBEAT, "this node has not been configured.\n");
+ return 0;
+ }
+
+ return o2hb_check_node_heartbeating(node_num);
+}
+EXPORT_SYMBOL_GPL(o2hb_check_local_node_heartbeating);
+
+/*
+ * this is just a hack until we get the plumbing which flips file systems
+ * read only and drops the hb ref instead of killing the node dead.
+ */
+void o2hb_stop_all_regions(void)
+{
+ struct o2hb_region *reg;
+
+ mlog(ML_ERROR, "stopping heartbeat on all active regions.\n");
+
+ spin_lock(&o2hb_live_lock);
+
+ list_for_each_entry(reg, &o2hb_all_regions, hr_all_item)
+ reg->hr_unclean_stop = 1;
+
+ spin_unlock(&o2hb_live_lock);
+}
+EXPORT_SYMBOL_GPL(o2hb_stop_all_regions);
diff --git a/fs/ocfs2/cluster/heartbeat.h b/fs/ocfs2/cluster/heartbeat.h
new file mode 100644
index 00000000000..cac6223206a
--- /dev/null
+++ b/fs/ocfs2/cluster/heartbeat.h
@@ -0,0 +1,82 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * heartbeat.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef O2CLUSTER_HEARTBEAT_H
+#define O2CLUSTER_HEARTBEAT_H
+
+#include "ocfs2_heartbeat.h"
+
+#define O2HB_REGION_TIMEOUT_MS 2000
+
+/* number of changes to be seen as live */
+#define O2HB_LIVE_THRESHOLD 2
+/* number of equal samples to be seen as dead */
+extern unsigned int o2hb_dead_threshold;
+#define O2HB_DEFAULT_DEAD_THRESHOLD 7
+/* Otherwise MAX_WRITE_TIMEOUT will be zero... */
+#define O2HB_MIN_DEAD_THRESHOLD 2
+#define O2HB_MAX_WRITE_TIMEOUT_MS (O2HB_REGION_TIMEOUT_MS * (o2hb_dead_threshold - 1))
+
+#define O2HB_CB_MAGIC 0x51d1e4ec
+
+/* callback stuff */
+enum o2hb_callback_type {
+ O2HB_NODE_DOWN_CB = 0,
+ O2HB_NODE_UP_CB,
+ O2HB_NUM_CB
+};
+
+struct o2nm_node;
+typedef void (o2hb_cb_func)(struct o2nm_node *, int, void *);
+
+struct o2hb_callback_func {
+ u32 hc_magic;
+ struct list_head hc_item;
+ o2hb_cb_func *hc_func;
+ void *hc_data;
+ int hc_priority;
+ enum o2hb_callback_type hc_type;
+};
+
+struct config_group *o2hb_alloc_hb_set(void);
+void o2hb_free_hb_set(struct config_group *group);
+
+void o2hb_setup_callback(struct o2hb_callback_func *hc,
+ enum o2hb_callback_type type,
+ o2hb_cb_func *func,
+ void *data,
+ int priority);
+int o2hb_register_callback(struct o2hb_callback_func *hc);
+int o2hb_unregister_callback(struct o2hb_callback_func *hc);
+void o2hb_fill_node_map(unsigned long *map,
+ unsigned bytes);
+void o2hb_init(void);
+int o2hb_check_node_heartbeating(u8 node_num);
+int o2hb_check_node_heartbeating_from_callback(u8 node_num);
+int o2hb_check_local_node_heartbeating(void);
+void o2hb_stop_all_regions(void);
+
+#endif /* O2CLUSTER_HEARTBEAT_H */
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
new file mode 100644
index 00000000000..fd741cea570
--- /dev/null
+++ b/fs/ocfs2/cluster/masklog.c
@@ -0,0 +1,166 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+
+#include "masklog.h"
+
+struct mlog_bits mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
+EXPORT_SYMBOL_GPL(mlog_and_bits);
+struct mlog_bits mlog_not_bits = MLOG_BITS_RHS(MLOG_INITIAL_NOT_MASK);
+EXPORT_SYMBOL_GPL(mlog_not_bits);
+
+static ssize_t mlog_mask_show(u64 mask, char *buf)
+{
+ char *state;
+
+ if (__mlog_test_u64(mask, mlog_and_bits))
+ state = "allow";
+ else if (__mlog_test_u64(mask, mlog_not_bits))
+ state = "deny";
+ else
+ state = "off";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", state);
+}
+
+static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
+{
+ if (!strnicmp(buf, "allow", 5)) {
+ __mlog_set_u64(mask, mlog_and_bits);
+ __mlog_clear_u64(mask, mlog_not_bits);
+ } else if (!strnicmp(buf, "deny", 4)) {
+ __mlog_set_u64(mask, mlog_not_bits);
+ __mlog_clear_u64(mask, mlog_and_bits);
+ } else if (!strnicmp(buf, "off", 3)) {
+ __mlog_clear_u64(mask, mlog_not_bits);
+ __mlog_clear_u64(mask, mlog_and_bits);
+ } else
+ return -EINVAL;
+
+ return count;
+}
+
+struct mlog_attribute {
+ struct attribute attr;
+ u64 mask;
+};
+
+#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)
+
+#define define_mask(_name) { \
+ .attr = { \
+ .name = #_name, \
+ .mode = S_IRUGO | S_IWUSR, \
+ }, \
+ .mask = ML_##_name, \
+}
+
+static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
+ define_mask(ENTRY),
+ define_mask(EXIT),
+ define_mask(TCP),
+ define_mask(MSG),
+ define_mask(SOCKET),
+ define_mask(HEARTBEAT),
+ define_mask(HB_BIO),
+ define_mask(DLMFS),
+ define_mask(DLM),
+ define_mask(DLM_DOMAIN),
+ define_mask(DLM_THREAD),
+ define_mask(DLM_MASTER),
+ define_mask(DLM_RECOVERY),
+ define_mask(AIO),
+ define_mask(JOURNAL),
+ define_mask(DISK_ALLOC),
+ define_mask(SUPER),
+ define_mask(FILE_IO),
+ define_mask(EXTENT_MAP),
+ define_mask(DLM_GLUE),
+ define_mask(BH_IO),
+ define_mask(UPTODATE),
+ define_mask(NAMEI),
+ define_mask(INODE),
+ define_mask(VOTE),
+ define_mask(DCACHE),
+ define_mask(CONN),
+ define_mask(QUORUM),
+ define_mask(EXPORT),
+ define_mask(ERROR),
+ define_mask(NOTICE),
+ define_mask(KTHREAD),
+};
+
+static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
+
+static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
+ char *buf)
+{
+ struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
+
+ return mlog_mask_show(mlog_attr->mask, buf);
+}
+
+static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
+
+ return mlog_mask_store(mlog_attr->mask, buf, count);
+}
+
+static struct sysfs_ops mlog_attr_ops = {
+ .show = mlog_show,
+ .store = mlog_store,
+};
+
+static struct kobj_type mlog_ktype = {
+ .default_attrs = mlog_attr_ptrs,
+ .sysfs_ops = &mlog_attr_ops,
+};
+
+static struct kset mlog_kset = {
+ .kobj = {.name = "logmask", .ktype = &mlog_ktype},
+};
+
+int mlog_sys_init(struct subsystem *o2cb_subsys)
+{
+ int i = 0;
+
+ while (mlog_attrs[i].attr.mode) {
+ mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
+ i++;
+ }
+ mlog_attr_ptrs[i] = NULL;
+
+ mlog_kset.subsys = o2cb_subsys;
+ return kset_register(&mlog_kset);
+}
+
+void mlog_sys_shutdown(void)
+{
+ kset_unregister(&mlog_kset);
+}
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
new file mode 100644
index 00000000000..e8c56a3d9c6
--- /dev/null
+++ b/fs/ocfs2/cluster/masklog.h
@@ -0,0 +1,274 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef O2CLUSTER_MASKLOG_H
+#define O2CLUSTER_MASKLOG_H
+
+/*
+ * For now this is a trivial wrapper around printk() that gives the critical
+ * ability to enable sets of debugging output at run-time. In the future this
+ * will almost certainly be redirected to relayfs so that it can pay a
+ * substantially lower heisenberg tax.
+ *
+ * Callers associate the message with a bitmask and a global bitmask is
+ * maintained with help from /proc. If any of the bits match the message is
+ * output.
+ *
+ * We must have efficient bit tests on i386 and it seems gcc still emits crazy
+ * code for the 64bit compare. It emits very good code for the dual unsigned
+ * long tests, though, completely avoiding tests that can never pass if the
+ * caller gives a constant bitmask that fills one of the longs with all 0s. So
+ * the desire is to have almost all of the calls decided on by comparing just
+ * one of the longs. This leads to having infrequently given bits that are
+ * frequently matched in the high bits.
+ *
+ * _ERROR and _NOTICE are used for messages that always go to the console and
+ * have appropriate KERN_ prefixes. We wrap these in our function instead of
+ * just calling printk() so that this can eventually make its way through
+ * relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
+ * The inline tests and macro dance give GCC the opportunity to quite cleverly
+ * only emit the appropriage printk() when the caller passes in a constant
+ * mask, as is almost always the case.
+ *
+ * All this bitmask nonsense is hidden from the /proc interface so that Joel
+ * doesn't have an aneurism. Reading the file gives a straight forward
+ * indication of which bits are on or off:
+ * ENTRY off
+ * EXIT off
+ * TCP off
+ * MSG off
+ * SOCKET off
+ * ERROR off
+ * NOTICE on
+ *
+ * Writing changes the state of a given bit and requires a strictly formatted
+ * single write() call:
+ *
+ * write(fd, "ENTRY on", 8);
+ *
+ * would turn the entry bit on. "1" is also accepted in the place of "on", and
+ * "off" and "0" behave as expected.
+ *
+ * Some trivial shell can flip all the bits on or off:
+ *
+ * log_mask="/proc/fs/ocfs2_nodemanager/log_mask"
+ * cat $log_mask | (
+ * while read bit status; do
+ * # $1 is "on" or "off", say
+ * echo "$bit $1" > $log_mask
+ * done
+ * )
+ */
+
+/* for task_struct */
+#include <linux/sched.h>
+
+/* bits that are frequently given and infrequently matched in the low word */
+/* NOTE: If you add a flag, you need to also update mlog.c! */
+#define ML_ENTRY 0x0000000000000001ULL /* func call entry */
+#define ML_EXIT 0x0000000000000002ULL /* func call exit */
+#define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */
+#define ML_MSG 0x0000000000000008ULL /* net network messages */
+#define ML_SOCKET 0x0000000000000010ULL /* net socket lifetime */
+#define ML_HEARTBEAT 0x0000000000000020ULL /* hb all heartbeat tracking */
+#define ML_HB_BIO 0x0000000000000040ULL /* hb io tracing */
+#define ML_DLMFS 0x0000000000000080ULL /* dlm user dlmfs */
+#define ML_DLM 0x0000000000000100ULL /* dlm general debugging */
+#define ML_DLM_DOMAIN 0x0000000000000200ULL /* dlm domain debugging */
+#define ML_DLM_THREAD 0x0000000000000400ULL /* dlm domain thread */
+#define ML_DLM_MASTER 0x0000000000000800ULL /* dlm master functions */
+#define ML_DLM_RECOVERY 0x0000000000001000ULL /* dlm master functions */
+#define ML_AIO 0x0000000000002000ULL /* ocfs2 aio read and write */
+#define ML_JOURNAL 0x0000000000004000ULL /* ocfs2 journalling functions */
+#define ML_DISK_ALLOC 0x0000000000008000ULL /* ocfs2 disk allocation */
+#define ML_SUPER 0x0000000000010000ULL /* ocfs2 mount / umount */
+#define ML_FILE_IO 0x0000000000020000ULL /* ocfs2 file I/O */
+#define ML_EXTENT_MAP 0x0000000000040000ULL /* ocfs2 extent map caching */
+#define ML_DLM_GLUE 0x0000000000080000ULL /* ocfs2 dlm glue layer */
+#define ML_BH_IO 0x0000000000100000ULL /* ocfs2 buffer I/O */
+#define ML_UPTODATE 0x0000000000200000ULL /* ocfs2 caching sequence #'s */
+#define ML_NAMEI 0x0000000000400000ULL /* ocfs2 directory / namespace */
+#define ML_INODE 0x0000000000800000ULL /* ocfs2 inode manipulation */
+#define ML_VOTE 0x0000000001000000ULL /* ocfs2 node messaging */
+#define ML_DCACHE 0x0000000002000000ULL /* ocfs2 dcache operations */
+#define ML_CONN 0x0000000004000000ULL /* net connection management */
+#define ML_QUORUM 0x0000000008000000ULL /* net connection quorum */
+#define ML_EXPORT 0x0000000010000000ULL /* ocfs2 export operations */
+/* bits that are infrequently given and frequently matched in the high word */
+#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */
+#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */
+#define ML_KTHREAD 0x0000000400000000ULL /* kernel thread activity */
+
+#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
+#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT)
+#ifndef MLOG_MASK_PREFIX
+#define MLOG_MASK_PREFIX 0
+#endif
+
+#define MLOG_MAX_BITS 64
+
+struct mlog_bits {
+ unsigned long words[MLOG_MAX_BITS / BITS_PER_LONG];
+};
+
+extern struct mlog_bits mlog_and_bits, mlog_not_bits;
+
+#if BITS_PER_LONG == 32
+
+#define __mlog_test_u64(mask, bits) \
+ ( (u32)(mask & 0xffffffff) & bits.words[0] || \
+ ((u64)(mask) >> 32) & bits.words[1] )
+#define __mlog_set_u64(mask, bits) do { \
+ bits.words[0] |= (u32)(mask & 0xffffffff); \
+ bits.words[1] |= (u64)(mask) >> 32; \
+} while (0)
+#define __mlog_clear_u64(mask, bits) do { \
+ bits.words[0] &= ~((u32)(mask & 0xffffffff)); \
+ bits.words[1] &= ~((u64)(mask) >> 32); \
+} while (0)
+#define MLOG_BITS_RHS(mask) { \
+ { \
+ [0] = (u32)(mask & 0xffffffff), \
+ [1] = (u64)(mask) >> 32, \
+ } \
+}
+
+#else /* 32bit long above, 64bit long below */
+
+#define __mlog_test_u64(mask, bits) ((mask) & bits.words[0])
+#define __mlog_set_u64(mask, bits) do { \
+ bits.words[0] |= (mask); \
+} while (0)
+#define __mlog_clear_u64(mask, bits) do { \
+ bits.words[0] &= ~(mask); \
+} while (0)
+#define MLOG_BITS_RHS(mask) { { (mask) } }
+
+#endif
+
+/*
+ * smp_processor_id() "helpfully" screams when called outside preemptible
+ * regions in current kernels. sles doesn't have the variants that don't
+ * scream. just do this instead of trying to guess which we're building
+ * against.. *sigh*.
+ */
+#define __mlog_cpu_guess ({ \
+ unsigned long _cpu = get_cpu(); \
+ put_cpu(); \
+ _cpu; \
+})
+
+/* In the following two macros, the whitespace after the ',' just
+ * before ##args is intentional. Otherwise, gcc 2.95 will eat the
+ * previous token if args expands to nothing.
+ */
+#define __mlog_printk(level, fmt, args...) \
+ printk(level "(%u,%lu):%s:%d " fmt, current->pid, \
+ __mlog_cpu_guess, __PRETTY_FUNCTION__, __LINE__ , \
+ ##args)
+
+#define mlog(mask, fmt, args...) do { \
+ u64 __m = MLOG_MASK_PREFIX | (mask); \
+ if (__mlog_test_u64(__m, mlog_and_bits) && \
+ !__mlog_test_u64(__m, mlog_not_bits)) { \
+ if (__m & ML_ERROR) \
+ __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \
+ else if (__m & ML_NOTICE) \
+ __mlog_printk(KERN_NOTICE, fmt , ##args); \
+ else __mlog_printk(KERN_INFO, fmt , ##args); \
+ } \
+} while (0)
+
+#define mlog_errno(st) do { \
+ int _st = (st); \
+ if (_st != -ERESTARTSYS && _st != -EINTR && \
+ _st != AOP_TRUNCATED_PAGE) \
+ mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
+} while (0)
+
+#define mlog_entry(fmt, args...) do { \
+ mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \
+} while (0)
+
+#define mlog_entry_void() do { \
+ mlog(ML_ENTRY, "ENTRY:\n"); \
+} while (0)
+
+/*
+ * We disable this for sparse.
+ */
+#if !defined(__CHECKER__)
+#define mlog_exit(st) do { \
+ if (__builtin_types_compatible_p(typeof(st), unsigned long)) \
+ mlog(ML_EXIT, "EXIT: %lu\n", (unsigned long) (st)); \
+ else if (__builtin_types_compatible_p(typeof(st), signed long)) \
+ mlog(ML_EXIT, "EXIT: %ld\n", (signed long) (st)); \
+ else if (__builtin_types_compatible_p(typeof(st), unsigned int) \
+ || __builtin_types_compatible_p(typeof(st), unsigned short) \
+ || __builtin_types_compatible_p(typeof(st), unsigned char)) \
+ mlog(ML_EXIT, "EXIT: %u\n", (unsigned int) (st)); \
+ else if (__builtin_types_compatible_p(typeof(st), signed int) \
+ || __builtin_types_compatible_p(typeof(st), signed short) \
+ || __builtin_types_compatible_p(typeof(st), signed char)) \
+ mlog(ML_EXIT, "EXIT: %d\n", (signed int) (st)); \
+ else if (__builtin_types_compatible_p(typeof(st), long long)) \
+ mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \
+ else \
+ mlog(ML_EXIT, "EXIT: %llu\n", (unsigned long long) (st)); \
+} while (0)
+#else
+#define mlog_exit(st) do { \
+ mlog(ML_EXIT, "EXIT: %lld\n", (long long) (st)); \
+} while (0)
+#endif
+
+#define mlog_exit_ptr(ptr) do { \
+ mlog(ML_EXIT, "EXIT: %p\n", ptr); \
+} while (0)
+
+#define mlog_exit_void() do { \
+ mlog(ML_EXIT, "EXIT\n"); \
+} while (0)
+
+#define mlog_bug_on_msg(cond, fmt, args...) do { \
+ if (cond) { \
+ mlog(ML_ERROR, "bug expression: " #cond "\n"); \
+ mlog(ML_ERROR, fmt, ##args); \
+ BUG(); \
+ } \
+} while (0)
+
+#if (BITS_PER_LONG == 32) || defined(CONFIG_X86_64)
+#define MLFi64 "lld"
+#define MLFu64 "llu"
+#define MLFx64 "llx"
+#else
+#define MLFi64 "ld"
+#define MLFu64 "lu"
+#define MLFx64 "lx"
+#endif
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+int mlog_sys_init(struct subsystem *o2cb_subsys);
+void mlog_sys_shutdown(void);
+
+#endif /* O2CLUSTER_MASKLOG_H */
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
new file mode 100644
index 00000000000..cf7828f2336
--- /dev/null
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -0,0 +1,791 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sysctl.h>
+#include <linux/configfs.h>
+
+#include "endian.h"
+#include "tcp.h"
+#include "nodemanager.h"
+#include "heartbeat.h"
+#include "masklog.h"
+#include "sys.h"
+#include "ver.h"
+
+/* for now we operate under the assertion that there can be only one
+ * cluster active at a time. Changing this will require trickling
+ * cluster references throughout where nodes are looked up */
+static struct o2nm_cluster *o2nm_single_cluster = NULL;
+
+#define OCFS2_MAX_HB_CTL_PATH 256
+static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
+
+static ctl_table ocfs2_nm_table[] = {
+ {
+ .ctl_name = 1,
+ .procname = "hb_ctl_path",
+ .data = ocfs2_hb_ctl_path,
+ .maxlen = OCFS2_MAX_HB_CTL_PATH,
+ .mode = 0644,
+ .proc_handler = &proc_dostring,
+ .strategy = &sysctl_string,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table ocfs2_mod_table[] = {
+ {
+ .ctl_name = KERN_OCFS2_NM,
+ .procname = "nm",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0555,
+ .child = ocfs2_nm_table
+ },
+ { .ctl_name = 0}
+};
+
+static ctl_table ocfs2_kern_table[] = {
+ {
+ .ctl_name = KERN_OCFS2,
+ .procname = "ocfs2",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0555,
+ .child = ocfs2_mod_table
+ },
+ { .ctl_name = 0}
+};
+
+static ctl_table ocfs2_root_table[] = {
+ {
+ .ctl_name = CTL_FS,
+ .procname = "fs",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0555,
+ .child = ocfs2_kern_table
+ },
+ { .ctl_name = 0 }
+};
+
+static struct ctl_table_header *ocfs2_table_header = NULL;
+
+const char *o2nm_get_hb_ctl_path(void)
+{
+ return ocfs2_hb_ctl_path;
+}
+EXPORT_SYMBOL_GPL(o2nm_get_hb_ctl_path);
+
+struct o2nm_cluster {
+ struct config_group cl_group;
+ unsigned cl_has_local:1;
+ u8 cl_local_node;
+ rwlock_t cl_nodes_lock;
+ struct o2nm_node *cl_nodes[O2NM_MAX_NODES];
+ struct rb_root cl_node_ip_tree;
+ /* this bitmap is part of a hack for disk bitmap.. will go eventually. - zab */
+ unsigned long cl_nodes_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+};
+
+struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
+{
+ struct o2nm_node *node = NULL;
+
+ if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL)
+ goto out;
+
+ read_lock(&o2nm_single_cluster->cl_nodes_lock);
+ node = o2nm_single_cluster->cl_nodes[node_num];
+ if (node)
+ config_item_get(&node->nd_item);
+ read_unlock(&o2nm_single_cluster->cl_nodes_lock);
+out:
+ return node;
+}
+EXPORT_SYMBOL_GPL(o2nm_get_node_by_num);
+
+int o2nm_configured_node_map(unsigned long *map, unsigned bytes)
+{
+ struct o2nm_cluster *cluster = o2nm_single_cluster;
+
+ BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
+
+ if (cluster == NULL)
+ return -EINVAL;
+
+ read_lock(&cluster->cl_nodes_lock);
+ memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
+ read_unlock(&cluster->cl_nodes_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(o2nm_configured_node_map);
+
+static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster,
+ __be32 ip_needle,
+ struct rb_node ***ret_p,
+ struct rb_node **ret_parent)
+{
+ struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct o2nm_node *node, *ret = NULL;
+
+ while (*p) {
+ parent = *p;
+ node = rb_entry(parent, struct o2nm_node, nd_ip_node);
+
+ if (memcmp(&ip_needle, &node->nd_ipv4_address,
+ sizeof(ip_needle)) < 0)
+ p = &(*p)->rb_left;
+ else if (memcmp(&ip_needle, &node->nd_ipv4_address,
+ sizeof(ip_needle)) > 0)
+ p = &(*p)->rb_right;
+ else {
+ ret = node;
+ break;
+ }
+ }
+
+ if (ret_p != NULL)
+ *ret_p = p;
+ if (ret_parent != NULL)
+ *ret_parent = parent;
+
+ return ret;
+}
+
+struct o2nm_node *o2nm_get_node_by_ip(__be32 addr)
+{
+ struct o2nm_node *node = NULL;
+ struct o2nm_cluster *cluster = o2nm_single_cluster;
+
+ if (cluster == NULL)
+ goto out;
+
+ read_lock(&cluster->cl_nodes_lock);
+ node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
+ if (node)
+ config_item_get(&node->nd_item);
+ read_unlock(&cluster->cl_nodes_lock);
+
+out:
+ return node;
+}
+EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip);
+
+void o2nm_node_put(struct o2nm_node *node)
+{
+ config_item_put(&node->nd_item);
+}
+EXPORT_SYMBOL_GPL(o2nm_node_put);
+
+void o2nm_node_get(struct o2nm_node *node)
+{
+ config_item_get(&node->nd_item);
+}
+EXPORT_SYMBOL_GPL(o2nm_node_get);
+
+u8 o2nm_this_node(void)
+{
+ u8 node_num = O2NM_MAX_NODES;
+
+ if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local)
+ node_num = o2nm_single_cluster->cl_local_node;
+
+ return node_num;
+}
+EXPORT_SYMBOL_GPL(o2nm_this_node);
+
+/* node configfs bits */
+
+static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item)
+{
+ return item ?
+ container_of(to_config_group(item), struct o2nm_cluster,
+ cl_group)
+ : NULL;
+}
+
+static struct o2nm_node *to_o2nm_node(struct config_item *item)
+{
+ return item ? container_of(item, struct o2nm_node, nd_item) : NULL;
+}
+
+static void o2nm_node_release(struct config_item *item)
+{
+ struct o2nm_node *node = to_o2nm_node(item);
+ kfree(node);
+}
+
+static ssize_t o2nm_node_num_read(struct o2nm_node *node, char *page)
+{
+ return sprintf(page, "%d\n", node->nd_num);
+}
+
+static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
+{
+ /* through the first node_set .parent
+ * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
+ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
+}
+
+enum {
+ O2NM_NODE_ATTR_NUM = 0,
+ O2NM_NODE_ATTR_PORT,
+ O2NM_NODE_ATTR_ADDRESS,
+ O2NM_NODE_ATTR_LOCAL,
+};
+
+static ssize_t o2nm_node_num_write(struct o2nm_node *node, const char *page,
+ size_t count)
+{
+ struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ unsigned long tmp;
+ char *p = (char *)page;
+
+ tmp = simple_strtoul(p, &p, 0);
+ if (!p || (*p && (*p != '\n')))
+ return -EINVAL;
+
+ if (tmp >= O2NM_MAX_NODES)
+ return -ERANGE;
+
+ /* once we're in the cl_nodes tree networking can look us up by
+ * node number and try to use our address and port attributes
+ * to connect to this node.. make sure that they've been set
+ * before writing the node attribute? */
+ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
+ !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
+ return -EINVAL; /* XXX */
+
+ write_lock(&cluster->cl_nodes_lock);
+ if (cluster->cl_nodes[tmp])
+ p = NULL;
+ else {
+ cluster->cl_nodes[tmp] = node;
+ node->nd_num = tmp;
+ set_bit(tmp, cluster->cl_nodes_bitmap);
+ }
+ write_unlock(&cluster->cl_nodes_lock);
+ if (p == NULL)
+ return -EEXIST;
+
+ return count;
+}
+static ssize_t o2nm_node_ipv4_port_read(struct o2nm_node *node, char *page)
+{
+ return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
+}
+
+static ssize_t o2nm_node_ipv4_port_write(struct o2nm_node *node,
+ const char *page, size_t count)
+{
+ unsigned long tmp;
+ char *p = (char *)page;
+
+ tmp = simple_strtoul(p, &p, 0);
+ if (!p || (*p && (*p != '\n')))
+ return -EINVAL;
+
+ if (tmp == 0)
+ return -EINVAL;
+ if (tmp >= (u16)-1)
+ return -ERANGE;
+
+ node->nd_ipv4_port = htons(tmp);
+
+ return count;
+}
+
+static ssize_t o2nm_node_ipv4_address_read(struct o2nm_node *node, char *page)
+{
+ return sprintf(page, "%u.%u.%u.%u\n", NIPQUAD(node->nd_ipv4_address));
+}
+
+static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node,
+ const char *page,
+ size_t count)
+{
+ struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ int ret, i;
+ struct rb_node **p, *parent;
+ unsigned int octets[4];
+ __be32 ipv4_addr = 0;
+
+ ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
+ &octets[1], &octets[0]);
+ if (ret != 4)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(octets); i++) {
+ if (octets[i] > 255)
+ return -ERANGE;
+ be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
+ }
+
+ ret = 0;
+ write_lock(&cluster->cl_nodes_lock);
+ if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
+ ret = -EEXIST;
+ else {
+ rb_link_node(&node->nd_ip_node, parent, p);
+ rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
+ }
+ write_unlock(&cluster->cl_nodes_lock);
+ if (ret)
+ return ret;
+
+ memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
+
+ return count;
+}
+
+static ssize_t o2nm_node_local_read(struct o2nm_node *node, char *page)
+{
+ return sprintf(page, "%d\n", node->nd_local);
+}
+
+static ssize_t o2nm_node_local_write(struct o2nm_node *node, const char *page,
+ size_t count)
+{
+ struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ unsigned long tmp;
+ char *p = (char *)page;
+ ssize_t ret;
+
+ tmp = simple_strtoul(p, &p, 0);
+ if (!p || (*p && (*p != '\n')))
+ return -EINVAL;
+
+ tmp = !!tmp; /* boolean of whether this node wants to be local */
+
+ /* setting local turns on networking rx for now so we require having
+ * set everything else first */
+ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
+ !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
+ !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
+ return -EINVAL; /* XXX */
+
+ /* the only failure case is trying to set a new local node
+ * when a different one is already set */
+ if (tmp && tmp == cluster->cl_has_local &&
+ cluster->cl_local_node != node->nd_num)
+ return -EBUSY;
+
+ /* bring up the rx thread if we're setting the new local node. */
+ if (tmp && !cluster->cl_has_local) {
+ ret = o2net_start_listening(node);
+ if (ret)
+ return ret;
+ }
+
+ if (!tmp && cluster->cl_has_local &&
+ cluster->cl_local_node == node->nd_num) {
+ o2net_stop_listening(node);
+ cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
+ }
+
+ node->nd_local = tmp;
+ if (node->nd_local) {
+ cluster->cl_has_local = tmp;
+ cluster->cl_local_node = node->nd_num;
+ }
+
+ return count;
+}
+
+struct o2nm_node_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(struct o2nm_node *, char *);
+ ssize_t (*store)(struct o2nm_node *, const char *, size_t);
+};
+
+static struct o2nm_node_attribute o2nm_node_attr_num = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "num",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2nm_node_num_read,
+ .store = o2nm_node_num_write,
+};
+
+static struct o2nm_node_attribute o2nm_node_attr_ipv4_port = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "ipv4_port",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2nm_node_ipv4_port_read,
+ .store = o2nm_node_ipv4_port_write,
+};
+
+static struct o2nm_node_attribute o2nm_node_attr_ipv4_address = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "ipv4_address",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2nm_node_ipv4_address_read,
+ .store = o2nm_node_ipv4_address_write,
+};
+
+static struct o2nm_node_attribute o2nm_node_attr_local = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "local",
+ .ca_mode = S_IRUGO | S_IWUSR },
+ .show = o2nm_node_local_read,
+ .store = o2nm_node_local_write,
+};
+
+static struct configfs_attribute *o2nm_node_attrs[] = {
+ [O2NM_NODE_ATTR_NUM] = &o2nm_node_attr_num.attr,
+ [O2NM_NODE_ATTR_PORT] = &o2nm_node_attr_ipv4_port.attr,
+ [O2NM_NODE_ATTR_ADDRESS] = &o2nm_node_attr_ipv4_address.attr,
+ [O2NM_NODE_ATTR_LOCAL] = &o2nm_node_attr_local.attr,
+ NULL,
+};
+
+static int o2nm_attr_index(struct configfs_attribute *attr)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(o2nm_node_attrs); i++) {
+ if (attr == o2nm_node_attrs[i])
+ return i;
+ }
+ BUG();
+ return 0;
+}
+
+static ssize_t o2nm_node_show(struct config_item *item,
+ struct configfs_attribute *attr,
+ char *page)
+{
+ struct o2nm_node *node = to_o2nm_node(item);
+ struct o2nm_node_attribute *o2nm_node_attr =
+ container_of(attr, struct o2nm_node_attribute, attr);
+ ssize_t ret = 0;
+
+ if (o2nm_node_attr->show)
+ ret = o2nm_node_attr->show(node, page);
+ return ret;
+}
+
+static ssize_t o2nm_node_store(struct config_item *item,
+ struct configfs_attribute *attr,
+ const char *page, size_t count)
+{
+ struct o2nm_node *node = to_o2nm_node(item);
+ struct o2nm_node_attribute *o2nm_node_attr =
+ container_of(attr, struct o2nm_node_attribute, attr);
+ ssize_t ret;
+ int attr_index = o2nm_attr_index(attr);
+
+ if (o2nm_node_attr->store == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(attr_index, &node->nd_set_attributes))
+ return -EBUSY;
+
+ ret = o2nm_node_attr->store(node, page, count);
+ if (ret < count)
+ goto out;
+
+ set_bit(attr_index, &node->nd_set_attributes);
+out:
+ return ret;
+}
+
+static struct configfs_item_operations o2nm_node_item_ops = {
+ .release = o2nm_node_release,
+ .show_attribute = o2nm_node_show,
+ .store_attribute = o2nm_node_store,
+};
+
+static struct config_item_type o2nm_node_type = {
+ .ct_item_ops = &o2nm_node_item_ops,
+ .ct_attrs = o2nm_node_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* node set */
+
+struct o2nm_node_group {
+ struct config_group ns_group;
+ /* some stuff? */
+};
+
+#if 0
+static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group)
+{
+ return group ?
+ container_of(group, struct o2nm_node_group, ns_group)
+ : NULL;
+}
+#endif
+
+static struct config_item *o2nm_node_group_make_item(struct config_group *group,
+ const char *name)
+{
+ struct o2nm_node *node = NULL;
+ struct config_item *ret = NULL;
+
+ if (strlen(name) > O2NM_MAX_NAME_LEN)
+ goto out; /* ENAMETOOLONG */
+
+ node = kcalloc(1, sizeof(struct o2nm_node), GFP_KERNEL);
+ if (node == NULL)
+ goto out; /* ENOMEM */
+
+ strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
+ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
+ spin_lock_init(&node->nd_lock);
+
+ ret = &node->nd_item;
+
+out:
+ if (ret == NULL)
+ kfree(node);
+
+ return ret;
+}
+
+static void o2nm_node_group_drop_item(struct config_group *group,
+ struct config_item *item)
+{
+ struct o2nm_node *node = to_o2nm_node(item);
+ struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
+
+ o2net_disconnect_node(node);
+
+ if (cluster->cl_has_local &&
+ (cluster->cl_local_node == node->nd_num)) {
+ cluster->cl_has_local = 0;
+ cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
+ o2net_stop_listening(node);
+ }
+
+ /* XXX call into net to stop this node from trading messages */
+
+ write_lock(&cluster->cl_nodes_lock);
+
+ /* XXX sloppy */
+ if (node->nd_ipv4_address)
+ rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
+
+ /* nd_num might be 0 if the node number hasn't been set.. */
+ if (cluster->cl_nodes[node->nd_num] == node) {
+ cluster->cl_nodes[node->nd_num] = NULL;
+ clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
+ }
+ write_unlock(&cluster->cl_nodes_lock);
+
+ config_item_put(item);
+}
+
+static struct configfs_group_operations o2nm_node_group_group_ops = {
+ .make_item = o2nm_node_group_make_item,
+ .drop_item = o2nm_node_group_drop_item,
+};
+
+static struct config_item_type o2nm_node_group_type = {
+ .ct_group_ops = &o2nm_node_group_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* cluster */
+
+static void o2nm_cluster_release(struct config_item *item)
+{
+ struct o2nm_cluster *cluster = to_o2nm_cluster(item);
+
+ kfree(cluster->cl_group.default_groups);
+ kfree(cluster);
+}
+
+static struct configfs_item_operations o2nm_cluster_item_ops = {
+ .release = o2nm_cluster_release,
+};
+
+static struct config_item_type o2nm_cluster_type = {
+ .ct_item_ops = &o2nm_cluster_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+/* cluster set */
+
+struct o2nm_cluster_group {
+ struct configfs_subsystem cs_subsys;
+ /* some stuff? */
+};
+
+#if 0
+static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group)
+{
+ return group ?
+ container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys)
+ : NULL;
+}
+#endif
+
+static struct config_group *o2nm_cluster_group_make_group(struct config_group *group,
+ const char *name)
+{
+ struct o2nm_cluster *cluster = NULL;
+ struct o2nm_node_group *ns = NULL;
+ struct config_group *o2hb_group = NULL, *ret = NULL;
+ void *defs = NULL;
+
+ /* this runs under the parent dir's i_mutex; there can be only
+ * one caller in here at a time */
+ if (o2nm_single_cluster)
+ goto out; /* ENOSPC */
+
+ cluster = kcalloc(1, sizeof(struct o2nm_cluster), GFP_KERNEL);
+ ns = kcalloc(1, sizeof(struct o2nm_node_group), GFP_KERNEL);
+ defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
+ o2hb_group = o2hb_alloc_hb_set();
+ if (cluster == NULL || ns == NULL || o2hb_group == NULL || defs == NULL)
+ goto out;
+
+ config_group_init_type_name(&cluster->cl_group, name,
+ &o2nm_cluster_type);
+ config_group_init_type_name(&ns->ns_group, "node",
+ &o2nm_node_group_type);
+
+ cluster->cl_group.default_groups = defs;
+ cluster->cl_group.default_groups[0] = &ns->ns_group;
+ cluster->cl_group.default_groups[1] = o2hb_group;
+ cluster->cl_group.default_groups[2] = NULL;
+ rwlock_init(&cluster->cl_nodes_lock);
+ cluster->cl_node_ip_tree = RB_ROOT;
+
+ ret = &cluster->cl_group;
+ o2nm_single_cluster = cluster;
+
+out:
+ if (ret == NULL) {
+ kfree(cluster);
+ kfree(ns);
+ o2hb_free_hb_set(o2hb_group);
+ kfree(defs);
+ }
+
+ return ret;
+}
+
+static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item)
+{
+ struct o2nm_cluster *cluster = to_o2nm_cluster(item);
+ int i;
+ struct config_item *killme;
+
+ BUG_ON(o2nm_single_cluster != cluster);
+ o2nm_single_cluster = NULL;
+
+ for (i = 0; cluster->cl_group.default_groups[i]; i++) {
+ killme = &cluster->cl_group.default_groups[i]->cg_item;
+ cluster->cl_group.default_groups[i] = NULL;
+ config_item_put(killme);
+ }
+
+ config_item_put(item);
+}
+
+static struct configfs_group_operations o2nm_cluster_group_group_ops = {
+ .make_group = o2nm_cluster_group_make_group,
+ .drop_item = o2nm_cluster_group_drop_item,
+};
+
+static struct config_item_type o2nm_cluster_group_type = {
+ .ct_group_ops = &o2nm_cluster_group_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct o2nm_cluster_group o2nm_cluster_group = {
+ .cs_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "cluster",
+ .ci_type = &o2nm_cluster_group_type,
+ },
+ },
+ },
+};
+
+static void __exit exit_o2nm(void)
+{
+ if (ocfs2_table_header)
+ unregister_sysctl_table(ocfs2_table_header);
+
+ /* XXX sync with hb callbacks and shut down hb? */
+ o2net_unregister_hb_callbacks();
+ configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
+ o2cb_sys_shutdown();
+
+ o2net_exit();
+}
+
+static int __init init_o2nm(void)
+{
+ int ret = -1;
+
+ cluster_print_version();
+
+ o2hb_init();
+ o2net_init();
+
+ ocfs2_table_header = register_sysctl_table(ocfs2_root_table, 0);
+ if (!ocfs2_table_header) {
+ printk(KERN_ERR "nodemanager: unable to register sysctl\n");
+ ret = -ENOMEM; /* or something. */
+ goto out;
+ }
+
+ ret = o2net_register_hb_callbacks();
+ if (ret)
+ goto out_sysctl;
+
+ config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
+ init_MUTEX(&o2nm_cluster_group.cs_subsys.su_sem);
+ ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys);
+ if (ret) {
+ printk(KERN_ERR "nodemanager: Registration returned %d\n", ret);
+ goto out_callbacks;
+ }
+
+ ret = o2cb_sys_init();
+ if (!ret)
+ goto out;
+
+ configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
+out_callbacks:
+ o2net_unregister_hb_callbacks();
+out_sysctl:
+ unregister_sysctl_table(ocfs2_table_header);
+out:
+ return ret;
+}
+
+MODULE_AUTHOR("Oracle");
+MODULE_LICENSE("GPL");
+
+module_init(init_o2nm)
+module_exit(exit_o2nm)
diff --git a/fs/ocfs2/cluster/nodemanager.h b/fs/ocfs2/cluster/nodemanager.h
new file mode 100644
index 00000000000..fce8033c310
--- /dev/null
+++ b/fs/ocfs2/cluster/nodemanager.h
@@ -0,0 +1,64 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * nodemanager.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef O2CLUSTER_NODEMANAGER_H
+#define O2CLUSTER_NODEMANAGER_H
+
+#include "ocfs2_nodemanager.h"
+
+/* This totally doesn't belong here. */
+#include <linux/configfs.h>
+#include <linux/rbtree.h>
+
+#define KERN_OCFS2 988
+#define KERN_OCFS2_NM 1
+
+const char *o2nm_get_hb_ctl_path(void);
+
+struct o2nm_node {
+ spinlock_t nd_lock;
+ struct config_item nd_item;
+ char nd_name[O2NM_MAX_NAME_LEN+1]; /* replace? */
+ __u8 nd_num;
+ /* only one address per node, as attributes, for now. */
+ __be32 nd_ipv4_address;
+ __be16 nd_ipv4_port;
+ struct rb_node nd_ip_node;
+ /* there can be only one local node for now */
+ int nd_local;
+
+ unsigned long nd_set_attributes;
+};
+
+u8 o2nm_this_node(void);
+
+int o2nm_configured_node_map(unsigned long *map, unsigned bytes);
+struct o2nm_node *o2nm_get_node_by_num(u8 node_num);
+struct o2nm_node *o2nm_get_node_by_ip(__be32 addr);
+void o2nm_node_get(struct o2nm_node *node);
+void o2nm_node_put(struct o2nm_node *node);
+
+#endif /* O2CLUSTER_NODEMANAGER_H */
diff --git a/fs/ocfs2/cluster/ocfs2_heartbeat.h b/fs/ocfs2/cluster/ocfs2_heartbeat.h
new file mode 100644
index 00000000000..94096069cb4
--- /dev/null
+++ b/fs/ocfs2/cluster/ocfs2_heartbeat.h
@@ -0,0 +1,37 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ocfs2_heartbeat.h
+ *
+ * On-disk structures for ocfs2_heartbeat
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef _OCFS2_HEARTBEAT_H
+#define _OCFS2_HEARTBEAT_H
+
+struct o2hb_disk_heartbeat_block {
+ __le64 hb_seq;
+ __u8 hb_node;
+ __u8 hb_pad1[3];
+ __le32 hb_cksum;
+ __le64 hb_generation;
+};
+
+#endif /* _OCFS2_HEARTBEAT_H */
diff --git a/fs/ocfs2/cluster/ocfs2_nodemanager.h b/fs/ocfs2/cluster/ocfs2_nodemanager.h
new file mode 100644
index 00000000000..5b9854bad57
--- /dev/null
+++ b/fs/ocfs2/cluster/ocfs2_nodemanager.h
@@ -0,0 +1,39 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ocfs2_nodemanager.h
+ *
+ * Header describing the interface between userspace and the kernel
+ * for the ocfs2_nodemanager module.
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef _OCFS2_NODEMANAGER_H
+#define _OCFS2_NODEMANAGER_H
+
+#define O2NM_API_VERSION 5
+
+#define O2NM_MAX_NODES 255
+#define O2NM_INVALID_NODE_NUM 255
+
+/* host name, group name, cluster name all 64 bytes */
+#define O2NM_MAX_NAME_LEN 64 // __NEW_UTS_LEN
+
+#endif /* _OCFS2_NODEMANAGER_H */
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
new file mode 100644
index 00000000000..7bba98fbfc1
--- /dev/null
+++ b/fs/ocfs2/cluster/quorum.c
@@ -0,0 +1,315 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ *
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+/* This quorum hack is only here until we transition to some more rational
+ * approach that is driven from userspace. Honest. No foolin'.
+ *
+ * Imagine two nodes lose network connectivity to each other but they're still
+ * up and operating in every other way. Presumably a network timeout indicates
+ * that a node is broken and should be recovered. They can't both recover each
+ * other and both carry on without serialising their access to the file system.
+ * They need to decide who is authoritative. Now extend that problem to
+ * arbitrary groups of nodes losing connectivity between each other.
+ *
+ * So we declare that a node which has given up on connecting to a majority
+ * of nodes who are still heartbeating will fence itself.
+ *
+ * There are huge opportunities for races here. After we give up on a node's
+ * connection we need to wait long enough to give heartbeat an opportunity
+ * to declare the node as truly dead. We also need to be careful with the
+ * race between when we see a node start heartbeating and when we connect
+ * to it.
+ *
+ * So nodes that are in this transtion put a hold on the quorum decision
+ * with a counter. As they fall out of this transition they drop the count
+ * and if they're the last, they fire off the decision.
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "heartbeat.h"
+#include "nodemanager.h"
+#define MLOG_MASK_PREFIX ML_QUORUM
+#include "masklog.h"
+#include "quorum.h"
+
+static struct o2quo_state {
+ spinlock_t qs_lock;
+ struct work_struct qs_work;
+ int qs_pending;
+ int qs_heartbeating;
+ unsigned long qs_hb_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ int qs_connected;
+ unsigned long qs_conn_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ int qs_holds;
+ unsigned long qs_hold_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
+} o2quo_state;
+
+/* this is horribly heavy-handed. It should instead flip the file
+ * system RO and call some userspace script. */
+static void o2quo_fence_self(void)
+{
+ /* panic spins with interrupts enabled. with preempt
+ * threads can still schedule, etc, etc */
+ o2hb_stop_all_regions();
+ panic("ocfs2 is very sorry to be fencing this system by panicing\n");
+}
+
+/* Indicate that a timeout occured on a hearbeat region write. The
+ * other nodes in the cluster may consider us dead at that time so we
+ * want to "fence" ourselves so that we don't scribble on the disk
+ * after they think they've recovered us. This can't solve all
+ * problems related to writeout after recovery but this hack can at
+ * least close some of those gaps. When we have real fencing, this can
+ * go away as our node would be fenced externally before other nodes
+ * begin recovery. */
+void o2quo_disk_timeout(void)
+{
+ o2quo_fence_self();
+}
+
+static void o2quo_make_decision(void *arg)
+{
+ int quorum;
+ int lowest_hb, lowest_reachable = 0, fence = 0;
+ struct o2quo_state *qs = &o2quo_state;
+
+ spin_lock(&qs->qs_lock);
+
+ lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES);
+ if (lowest_hb != O2NM_MAX_NODES)
+ lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm);
+
+ mlog(0, "heartbeating: %d, connected: %d, "
+ "lowest: %d (%sreachable)\n", qs->qs_heartbeating,
+ qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un");
+
+ if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) ||
+ qs->qs_heartbeating == 1)
+ goto out;
+
+ if (qs->qs_heartbeating & 1) {
+ /* the odd numbered cluster case is straight forward --
+ * if we can't talk to the majority we're hosed */
+ quorum = (qs->qs_heartbeating + 1)/2;
+ if (qs->qs_connected < quorum) {
+ mlog(ML_ERROR, "fencing this node because it is "
+ "only connected to %u nodes and %u is needed "
+ "to make a quorum out of %u heartbeating nodes\n",
+ qs->qs_connected, quorum,
+ qs->qs_heartbeating);
+ fence = 1;
+ }
+ } else {
+ /* the even numbered cluster adds the possibility of each half
+ * of the cluster being able to talk amongst themselves.. in
+ * that case we're hosed if we can't talk to the group that has
+ * the lowest numbered node */
+ quorum = qs->qs_heartbeating / 2;
+ if (qs->qs_connected < quorum) {
+ mlog(ML_ERROR, "fencing this node because it is "
+ "only connected to %u nodes and %u is needed "
+ "to make a quorum out of %u heartbeating nodes\n",
+ qs->qs_connected, quorum,
+ qs->qs_heartbeating);
+ fence = 1;
+ }
+ else if ((qs->qs_connected == quorum) &&
+ !lowest_reachable) {
+ mlog(ML_ERROR, "fencing this node because it is "
+ "connected to a half-quorum of %u out of %u "
+ "nodes which doesn't include the lowest active "
+ "node %u\n", quorum, qs->qs_heartbeating,
+ lowest_hb);
+ fence = 1;
+ }
+ }
+
+out:
+ spin_unlock(&qs->qs_lock);
+ if (fence)
+ o2quo_fence_self();
+}
+
+static void o2quo_set_hold(struct o2quo_state *qs, u8 node)
+{
+ assert_spin_locked(&qs->qs_lock);
+
+ if (!test_and_set_bit(node, qs->qs_hold_bm)) {
+ qs->qs_holds++;
+ mlog_bug_on_msg(qs->qs_holds == O2NM_MAX_NODES,
+ "node %u\n", node);
+ mlog(0, "node %u, %d total\n", node, qs->qs_holds);
+ }
+}
+
+static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
+{
+ assert_spin_locked(&qs->qs_lock);
+
+ if (test_and_clear_bit(node, qs->qs_hold_bm)) {
+ mlog(0, "node %u, %d total\n", node, qs->qs_holds - 1);
+ if (--qs->qs_holds == 0) {
+ if (qs->qs_pending) {
+ qs->qs_pending = 0;
+ schedule_work(&qs->qs_work);
+ }
+ }
+ mlog_bug_on_msg(qs->qs_holds < 0, "node %u, holds %d\n",
+ node, qs->qs_holds);
+ }
+}
+
+/* as a node comes up we delay the quorum decision until we know the fate of
+ * the connection. the hold will be droped in conn_up or hb_down. it might be
+ * perpetuated by con_err until hb_down. if we already have a conn, we might
+ * be dropping a hold that conn_up got. */
+void o2quo_hb_up(u8 node)
+{
+ struct o2quo_state *qs = &o2quo_state;
+
+ spin_lock(&qs->qs_lock);
+
+ qs->qs_heartbeating++;
+ mlog_bug_on_msg(qs->qs_heartbeating == O2NM_MAX_NODES,
+ "node %u\n", node);
+ mlog_bug_on_msg(test_bit(node, qs->qs_hb_bm), "node %u\n", node);
+ set_bit(node, qs->qs_hb_bm);
+
+ mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
+
+ if (!test_bit(node, qs->qs_conn_bm))
+ o2quo_set_hold(qs, node);
+ else
+ o2quo_clear_hold(qs, node);
+
+ spin_unlock(&qs->qs_lock);
+}
+
+/* hb going down releases any holds we might have had due to this node from
+ * conn_up, conn_err, or hb_up */
+void o2quo_hb_down(u8 node)
+{
+ struct o2quo_state *qs = &o2quo_state;
+
+ spin_lock(&qs->qs_lock);
+
+ qs->qs_heartbeating--;
+ mlog_bug_on_msg(qs->qs_heartbeating < 0,
+ "node %u, %d heartbeating\n",
+ node, qs->qs_heartbeating);
+ mlog_bug_on_msg(!test_bit(node, qs->qs_hb_bm), "node %u\n", node);
+ clear_bit(node, qs->qs_hb_bm);
+
+ mlog(0, "node %u, %d total\n", node, qs->qs_heartbeating);
+
+ o2quo_clear_hold(qs, node);
+
+ spin_unlock(&qs->qs_lock);
+}
+
+/* this tells us that we've decided that the node is still heartbeating
+ * even though we've lost it's conn. it must only be called after conn_err
+ * and indicates that we must now make a quorum decision in the future,
+ * though we might be doing so after waiting for holds to drain. Here
+ * we'll be dropping the hold from conn_err. */
+void o2quo_hb_still_up(u8 node)
+{
+ struct o2quo_state *qs = &o2quo_state;
+
+ spin_lock(&qs->qs_lock);
+
+ mlog(0, "node %u\n", node);
+
+ qs->qs_pending = 1;
+ o2quo_clear_hold(qs, node);
+
+ spin_unlock(&qs->qs_lock);
+}
+
+/* This is analagous to hb_up. as a node's connection comes up we delay the
+ * quorum decision until we see it heartbeating. the hold will be droped in
+ * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
+ * it's already heartbeating we we might be dropping a hold that conn_up got.
+ * */
+void o2quo_conn_up(u8 node)
+{
+ struct o2quo_state *qs = &o2quo_state;
+
+ spin_lock(&qs->qs_lock);
+
+ qs->qs_connected++;
+ mlog_bug_on_msg(qs->qs_connected == O2NM_MAX_NODES,
+ "node %u\n", node);
+ mlog_bug_on_msg(test_bit(node, qs->qs_conn_bm), "node %u\n", node);
+ set_bit(node, qs->qs_conn_bm);
+
+ mlog(0, "node %u, %d total\n", node, qs->qs_connected);
+
+ if (!test_bit(node, qs->qs_hb_bm))
+ o2quo_set_hold(qs, node);
+ else
+ o2quo_clear_hold(qs, node);
+
+ spin_unlock(&qs->qs_lock);
+}
+
+/* we've decided that we won't ever be connecting to the node again. if it's
+ * still heartbeating we grab a hold that will delay decisions until either the
+ * node stops heartbeating from hb_down or the caller decides that the node is
+ * still up and calls still_up */
+void o2quo_conn_err(u8 node)
+{
+ struct o2quo_state *qs = &o2quo_state;
+
+ spin_lock(&qs->qs_lock);
+
+ if (test_bit(node, qs->qs_conn_bm)) {
+ qs->qs_connected--;
+ mlog_bug_on_msg(qs->qs_connected < 0,
+ "node %u, connected %d\n",
+ node, qs->qs_connected);
+
+ clear_bit(node, qs->qs_conn_bm);
+ }
+
+ mlog(0, "node %u, %d total\n", node, qs->qs_connected);
+
+ if (test_bit(node, qs->qs_hb_bm))
+ o2quo_set_hold(qs, node);
+
+ spin_unlock(&qs->qs_lock);
+}
+
+void o2quo_init(void)
+{
+ struct o2quo_state *qs = &o2quo_state;
+
+ spin_lock_init(&qs->qs_lock);
+ INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL);
+}
+
+void o2quo_exit(void)
+{
+ flush_scheduled_work();
+}
diff --git a/fs/ocfs2/cluster/quorum.h b/fs/ocfs2/cluster/quorum.h
new file mode 100644
index 00000000000..6649cc6f67c
--- /dev/null
+++ b/fs/ocfs2/cluster/quorum.h
@@ -0,0 +1,36 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef O2CLUSTER_QUORUM_H
+#define O2CLUSTER_QUORUM_H
+
+void o2quo_init(void);
+void o2quo_exit(void);
+
+void o2quo_hb_up(u8 node);
+void o2quo_hb_down(u8 node);
+void o2quo_hb_still_up(u8 node);
+void o2quo_conn_up(u8 node);
+void o2quo_conn_err(u8 node);
+void o2quo_disk_timeout(void);
+
+#endif /* O2CLUSTER_QUORUM_H */
diff --git a/fs/ocfs2/cluster/sys.c b/fs/ocfs2/cluster/sys.c
new file mode 100644
index 00000000000..1d9f6acafa2
--- /dev/null
+++ b/fs/ocfs2/cluster/sys.c
@@ -0,0 +1,124 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * sys.c
+ *
+ * OCFS2 cluster sysfs interface
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation,
+ * version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "ocfs2_nodemanager.h"
+#include "masklog.h"
+#include "sys.h"
+
+struct o2cb_attribute {
+ struct attribute attr;
+ ssize_t (*show)(char *buf);
+ ssize_t (*store)(const char *buf, size_t count);
+};
+
+#define O2CB_ATTR(_name, _mode, _show, _store) \
+struct o2cb_attribute o2cb_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+#define to_o2cb_subsys(k) container_of(to_kset(k), struct subsystem, kset)
+#define to_o2cb_attr(_attr) container_of(_attr, struct o2cb_attribute, attr)
+
+static ssize_t o2cb_interface_revision_show(char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", O2NM_API_VERSION);
+}
+
+static O2CB_ATTR(interface_revision, S_IFREG | S_IRUGO, o2cb_interface_revision_show, NULL);
+
+static struct attribute *o2cb_attrs[] = {
+ &o2cb_attr_interface_revision.attr,
+ NULL,
+};
+
+static ssize_t
+o2cb_show(struct kobject * kobj, struct attribute * attr, char * buffer);
+static ssize_t
+o2cb_store(struct kobject * kobj, struct attribute * attr,
+ const char * buffer, size_t count);
+static struct sysfs_ops o2cb_sysfs_ops = {
+ .show = o2cb_show,
+ .store = o2cb_store,
+};
+
+static struct kobj_type o2cb_subsys_type = {
+ .default_attrs = o2cb_attrs,
+ .sysfs_ops = &o2cb_sysfs_ops,
+};
+
+/* gives us o2cb_subsys */
+static decl_subsys(o2cb, NULL, NULL);
+
+static ssize_t
+o2cb_show(struct kobject * kobj, struct attribute * attr, char * buffer)
+{
+ struct o2cb_attribute *o2cb_attr = to_o2cb_attr(attr);
+ struct subsystem *sbs = to_o2cb_subsys(kobj);
+
+ BUG_ON(sbs != &o2cb_subsys);
+
+ if (o2cb_attr->show)
+ return o2cb_attr->show(buffer);
+ return -EIO;
+}
+
+static ssize_t
+o2cb_store(struct kobject * kobj, struct attribute * attr,
+ const char * buffer, size_t count)
+{
+ struct o2cb_attribute *o2cb_attr = to_o2cb_attr(attr);
+ struct subsystem *sbs = to_o2cb_subsys(kobj);
+
+ BUG_ON(sbs != &o2cb_subsys);
+
+ if (o2cb_attr->store)
+ return o2cb_attr->store(buffer, count);
+ return -EIO;
+}
+
+void o2cb_sys_shutdown(void)
+{
+ mlog_sys_shutdown();
+ subsystem_unregister(&o2cb_subsys);
+}
+
+int o2cb_sys_init(void)
+{
+ int ret;
+
+ o2cb_subsys.kset.kobj.ktype = &o2cb_subsys_type;
+ ret = subsystem_register(&o2cb_subsys);
+ if (ret)
+ return ret;
+
+ ret = mlog_sys_init(&o2cb_subsys);
+ if (ret)
+ subsystem_unregister(&o2cb_subsys);
+ return ret;
+}
diff --git a/fs/ocfs2/cluster/sys.h b/fs/ocfs2/cluster/sys.h
new file mode 100644
index 00000000000..d66b8ab0045
--- /dev/null
+++ b/fs/ocfs2/cluster/sys.h
@@ -0,0 +1,33 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * sys.h
+ *
+ * Function prototypes for o2cb sysfs interface
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation,
+ * version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef O2CLUSTER_SYS_H
+#define O2CLUSTER_SYS_H
+
+void o2cb_sys_shutdown(void);
+int o2cb_sys_init(void);
+
+#endif /* O2CLUSTER_SYS_H */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
new file mode 100644
index 00000000000..35d92c01a97
--- /dev/null
+++ b/fs/ocfs2/cluster/tcp.c
@@ -0,0 +1,1829 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ *
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * ----
+ *
+ * Callers for this were originally written against a very simple synchronus
+ * API. This implementation reflects those simple callers. Some day I'm sure
+ * we'll need to move to a more robust posting/callback mechanism.
+ *
+ * Transmit calls pass in kernel virtual addresses and block copying this into
+ * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
+ * for a failed socket to timeout. TX callers can also pass in a poniter to an
+ * 'int' which gets filled with an errno off the wire in response to the
+ * message they send.
+ *
+ * Handlers for unsolicited messages are registered. Each socket has a page
+ * that incoming data is copied into. First the header, then the data.
+ * Handlers are called from only one thread with a reference to this per-socket
+ * page. This page is destroyed after the handler call, so it can't be
+ * referenced beyond the call. Handlers may block but are discouraged from
+ * doing so.
+ *
+ * Any framing errors (bad magic, large payload lengths) close a connection.
+ *
+ * Our sock_container holds the state we associate with a socket. It's current
+ * framing state is held there as well as the refcounting we do around when it
+ * is safe to tear down the socket. The socket is only finally torn down from
+ * the container when the container loses all of its references -- so as long
+ * as you hold a ref on the container you can trust that the socket is valid
+ * for use with kernel socket APIs.
+ *
+ * Connections are initiated between a pair of nodes when the node with the
+ * higher node number gets a heartbeat callback which indicates that the lower
+ * numbered node has started heartbeating. The lower numbered node is passive
+ * and only accepts the connection if the higher numbered node is heartbeating.
+ */
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <net/tcp.h>
+
+#include <asm/uaccess.h>
+
+#include "heartbeat.h"
+#include "tcp.h"
+#include "nodemanager.h"
+#define MLOG_MASK_PREFIX ML_TCP
+#include "masklog.h"
+#include "quorum.h"
+
+#include "tcp_internal.h"
+
+/*
+ * The linux network stack isn't sparse endian clean.. It has macros like
+ * ntohs() which perform the endian checks and structs like sockaddr_in
+ * which aren't annotated. So __force is found here to get the build
+ * clean. When they emerge from the dark ages and annotate the code
+ * we can remove these.
+ */
+
+#define SC_NODEF_FMT "node %s (num %u) at %u.%u.%u.%u:%u"
+#define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \
+ NIPQUAD(sc->sc_node->nd_ipv4_address), \
+ ntohs(sc->sc_node->nd_ipv4_port)
+
+/*
+ * In the following two log macros, the whitespace after the ',' just
+ * before ##args is intentional. Otherwise, gcc 2.95 will eat the
+ * previous token if args expands to nothing.
+ */
+#define msglog(hdr, fmt, args...) do { \
+ typeof(hdr) __hdr = (hdr); \
+ mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \
+ "key %08x num %u] " fmt, \
+ be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \
+ be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \
+ be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \
+ be32_to_cpu(__hdr->msg_num) , ##args); \
+} while (0)
+
+#define sclog(sc, fmt, args...) do { \
+ typeof(sc) __sc = (sc); \
+ mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \
+ "pg_off %zu] " fmt, __sc, \
+ atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \
+ __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \
+ ##args); \
+} while (0)
+
+static rwlock_t o2net_handler_lock = RW_LOCK_UNLOCKED;
+static struct rb_root o2net_handler_tree = RB_ROOT;
+
+static struct o2net_node o2net_nodes[O2NM_MAX_NODES];
+
+/* XXX someday we'll need better accounting */
+static struct socket *o2net_listen_sock = NULL;
+
+/*
+ * listen work is only queued by the listening socket callbacks on the
+ * o2net_wq. teardown detaches the callbacks before destroying the workqueue.
+ * quorum work is queued as sock containers are shutdown.. stop_listening
+ * tears down all the node's sock containers, preventing future shutdowns
+ * and queued quroum work, before canceling delayed quorum work and
+ * destroying the work queue.
+ */
+static struct workqueue_struct *o2net_wq;
+static struct work_struct o2net_listen_work;
+
+static struct o2hb_callback_func o2net_hb_up, o2net_hb_down;
+#define O2NET_HB_PRI 0x1
+
+static struct o2net_handshake *o2net_hand;
+static struct o2net_msg *o2net_keep_req, *o2net_keep_resp;
+
+static int o2net_sys_err_translations[O2NET_ERR_MAX] =
+ {[O2NET_ERR_NONE] = 0,
+ [O2NET_ERR_NO_HNDLR] = -ENOPROTOOPT,
+ [O2NET_ERR_OVERFLOW] = -EOVERFLOW,
+ [O2NET_ERR_DIED] = -EHOSTDOWN,};
+
+/* can't quite avoid *all* internal declarations :/ */
+static void o2net_sc_connect_completed(void *arg);
+static void o2net_rx_until_empty(void *arg);
+static void o2net_shutdown_sc(void *arg);
+static void o2net_listen_data_ready(struct sock *sk, int bytes);
+static void o2net_sc_send_keep_req(void *arg);
+static void o2net_idle_timer(unsigned long data);
+static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
+
+static inline int o2net_sys_err_to_errno(enum o2net_system_error err)
+{
+ int trans;
+ BUG_ON(err >= O2NET_ERR_MAX);
+ trans = o2net_sys_err_translations[err];
+
+ /* Just in case we mess up the translation table above */
+ BUG_ON(err != O2NET_ERR_NONE && trans == 0);
+ return trans;
+}
+
+static struct o2net_node * o2net_nn_from_num(u8 node_num)
+{
+ BUG_ON(node_num >= ARRAY_SIZE(o2net_nodes));
+ return &o2net_nodes[node_num];
+}
+
+static u8 o2net_num_from_nn(struct o2net_node *nn)
+{
+ BUG_ON(nn == NULL);
+ return nn - o2net_nodes;
+}
+
+/* ------------------------------------------------------------ */
+
+static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw)
+{
+ int ret = 0;
+
+ do {
+ if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
+ ret = -EAGAIN;
+ break;
+ }
+ spin_lock(&nn->nn_lock);
+ ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id);
+ if (ret == 0)
+ list_add_tail(&nsw->ns_node_item,
+ &nn->nn_status_list);
+ spin_unlock(&nn->nn_lock);
+ } while (ret == -EAGAIN);
+
+ if (ret == 0) {
+ init_waitqueue_head(&nsw->ns_wq);
+ nsw->ns_sys_status = O2NET_ERR_NONE;
+ nsw->ns_status = 0;
+ }
+
+ return ret;
+}
+
+static void o2net_complete_nsw_locked(struct o2net_node *nn,
+ struct o2net_status_wait *nsw,
+ enum o2net_system_error sys_status,
+ s32 status)
+{
+ assert_spin_locked(&nn->nn_lock);
+
+ if (!list_empty(&nsw->ns_node_item)) {
+ list_del_init(&nsw->ns_node_item);
+ nsw->ns_sys_status = sys_status;
+ nsw->ns_status = status;
+ idr_remove(&nn->nn_status_idr, nsw->ns_id);
+ wake_up(&nsw->ns_wq);
+ }
+}
+
+static void o2net_complete_nsw(struct o2net_node *nn,
+ struct o2net_status_wait *nsw,
+ u64 id, enum o2net_system_error sys_status,
+ s32 status)
+{
+ spin_lock(&nn->nn_lock);
+ if (nsw == NULL) {
+ if (id > INT_MAX)
+ goto out;
+
+ nsw = idr_find(&nn->nn_status_idr, id);
+ if (nsw == NULL)
+ goto out;
+ }
+
+ o2net_complete_nsw_locked(nn, nsw, sys_status, status);
+
+out:
+ spin_unlock(&nn->nn_lock);
+ return;
+}
+
+static void o2net_complete_nodes_nsw(struct o2net_node *nn)
+{
+ struct list_head *iter, *tmp;
+ unsigned int num_kills = 0;
+ struct o2net_status_wait *nsw;
+
+ assert_spin_locked(&nn->nn_lock);
+
+ list_for_each_safe(iter, tmp, &nn->nn_status_list) {
+ nsw = list_entry(iter, struct o2net_status_wait, ns_node_item);
+ o2net_complete_nsw_locked(nn, nsw, O2NET_ERR_DIED, 0);
+ num_kills++;
+ }
+
+ mlog(0, "completed %d messages for node %u\n", num_kills,
+ o2net_num_from_nn(nn));
+}
+
+static int o2net_nsw_completed(struct o2net_node *nn,
+ struct o2net_status_wait *nsw)
+{
+ int completed;
+ spin_lock(&nn->nn_lock);
+ completed = list_empty(&nsw->ns_node_item);
+ spin_unlock(&nn->nn_lock);
+ return completed;
+}
+
+/* ------------------------------------------------------------ */
+
+static void sc_kref_release(struct kref *kref)
+{
+ struct o2net_sock_container *sc = container_of(kref,
+ struct o2net_sock_container, sc_kref);
+ sclog(sc, "releasing\n");
+
+ if (sc->sc_sock) {
+ sock_release(sc->sc_sock);
+ sc->sc_sock = NULL;
+ }
+
+ o2nm_node_put(sc->sc_node);
+ sc->sc_node = NULL;
+
+ kfree(sc);
+}
+
+static void sc_put(struct o2net_sock_container *sc)
+{
+ sclog(sc, "put\n");
+ kref_put(&sc->sc_kref, sc_kref_release);
+}
+static void sc_get(struct o2net_sock_container *sc)
+{
+ sclog(sc, "get\n");
+ kref_get(&sc->sc_kref);
+}
+static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
+{
+ struct o2net_sock_container *sc, *ret = NULL;
+ struct page *page = NULL;
+
+ page = alloc_page(GFP_NOFS);
+ sc = kcalloc(1, sizeof(*sc), GFP_NOFS);
+ if (sc == NULL || page == NULL)
+ goto out;
+
+ kref_init(&sc->sc_kref);
+ o2nm_node_get(node);
+ sc->sc_node = node;
+
+ INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc);
+ INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc);
+ INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc);
+ INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc);
+
+ init_timer(&sc->sc_idle_timeout);
+ sc->sc_idle_timeout.function = o2net_idle_timer;
+ sc->sc_idle_timeout.data = (unsigned long)sc;
+
+ sclog(sc, "alloced\n");
+
+ ret = sc;
+ sc->sc_page = page;
+ sc = NULL;
+ page = NULL;
+
+out:
+ if (page)
+ __free_page(page);
+ kfree(sc);
+
+ return ret;
+}
+
+/* ------------------------------------------------------------ */
+
+static void o2net_sc_queue_work(struct o2net_sock_container *sc,
+ struct work_struct *work)
+{
+ sc_get(sc);
+ if (!queue_work(o2net_wq, work))
+ sc_put(sc);
+}
+static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc,
+ struct work_struct *work,
+ int delay)
+{
+ sc_get(sc);
+ if (!queue_delayed_work(o2net_wq, work, delay))
+ sc_put(sc);
+}
+static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc,
+ struct work_struct *work)
+{
+ if (cancel_delayed_work(work))
+ sc_put(sc);
+}
+
+static void o2net_set_nn_state(struct o2net_node *nn,
+ struct o2net_sock_container *sc,
+ unsigned valid, int err)
+{
+ int was_valid = nn->nn_sc_valid;
+ int was_err = nn->nn_persistent_error;
+ struct o2net_sock_container *old_sc = nn->nn_sc;
+
+ assert_spin_locked(&nn->nn_lock);
+
+ /* the node num comparison and single connect/accept path should stop
+ * an non-null sc from being overwritten with another */
+ BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc);
+ mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid);
+ mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc);
+
+ /* we won't reconnect after our valid conn goes away for
+ * this hb iteration.. here so it shows up in the logs */
+ if (was_valid && !valid && err == 0)
+ err = -ENOTCONN;
+
+ mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n",
+ o2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid,
+ nn->nn_persistent_error, err);
+
+ nn->nn_sc = sc;
+ nn->nn_sc_valid = valid ? 1 : 0;
+ nn->nn_persistent_error = err;
+
+ /* mirrors o2net_tx_can_proceed() */
+ if (nn->nn_persistent_error || nn->nn_sc_valid)
+ wake_up(&nn->nn_sc_wq);
+
+ if (!was_err && nn->nn_persistent_error) {
+ o2quo_conn_err(o2net_num_from_nn(nn));
+ queue_delayed_work(o2net_wq, &nn->nn_still_up,
+ msecs_to_jiffies(O2NET_QUORUM_DELAY_MS));
+ }
+
+ if (was_valid && !valid) {
+ mlog(ML_NOTICE, "no longer connected to " SC_NODEF_FMT "\n",
+ SC_NODEF_ARGS(old_sc));
+ o2net_complete_nodes_nsw(nn);
+ }
+
+ if (!was_valid && valid) {
+ o2quo_conn_up(o2net_num_from_nn(nn));
+ /* this is a bit of a hack. we only try reconnecting
+ * when heartbeating starts until we get a connection.
+ * if that connection then dies we don't try reconnecting.
+ * the only way to start connecting again is to down
+ * heartbeat and bring it back up. */
+ cancel_delayed_work(&nn->nn_connect_expired);
+ mlog(ML_NOTICE, "%s " SC_NODEF_FMT "\n",
+ o2nm_this_node() > sc->sc_node->nd_num ?
+ "connected to" : "accepted connection from",
+ SC_NODEF_ARGS(sc));
+ }
+
+ /* trigger the connecting worker func as long as we're not valid,
+ * it will back off if it shouldn't connect. This can be called
+ * from node config teardown and so needs to be careful about
+ * the work queue actually being up. */
+ if (!valid && o2net_wq) {
+ unsigned long delay;
+ /* delay if we're withing a RECONNECT_DELAY of the
+ * last attempt */
+ delay = (nn->nn_last_connect_attempt +
+ msecs_to_jiffies(O2NET_RECONNECT_DELAY_MS))
+ - jiffies;
+ if (delay > msecs_to_jiffies(O2NET_RECONNECT_DELAY_MS))
+ delay = 0;
+ mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
+ queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay);
+ }
+
+ /* keep track of the nn's sc ref for the caller */
+ if ((old_sc == NULL) && sc)
+ sc_get(sc);
+ if (old_sc && (old_sc != sc)) {
+ o2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work);
+ sc_put(old_sc);
+ }
+}
+
+/* see o2net_register_callbacks() */
+static void o2net_data_ready(struct sock *sk, int bytes)
+{
+ void (*ready)(struct sock *sk, int bytes);
+
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_user_data) {
+ struct o2net_sock_container *sc = sk->sk_user_data;
+ sclog(sc, "data_ready hit\n");
+ do_gettimeofday(&sc->sc_tv_data_ready);
+ o2net_sc_queue_work(sc, &sc->sc_rx_work);
+ ready = sc->sc_data_ready;
+ } else {
+ ready = sk->sk_data_ready;
+ }
+ read_unlock(&sk->sk_callback_lock);
+
+ ready(sk, bytes);
+}
+
+/* see o2net_register_callbacks() */
+static void o2net_state_change(struct sock *sk)
+{
+ void (*state_change)(struct sock *sk);
+ struct o2net_sock_container *sc;
+
+ read_lock(&sk->sk_callback_lock);
+ sc = sk->sk_user_data;
+ if (sc == NULL) {
+ state_change = sk->sk_state_change;
+ goto out;
+ }
+
+ sclog(sc, "state_change to %d\n", sk->sk_state);
+
+ state_change = sc->sc_state_change;
+
+ switch(sk->sk_state) {
+ /* ignore connecting sockets as they make progress */
+ case TCP_SYN_SENT:
+ case TCP_SYN_RECV:
+ break;
+ case TCP_ESTABLISHED:
+ o2net_sc_queue_work(sc, &sc->sc_connect_work);
+ break;
+ default:
+ o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
+ break;
+ }
+out:
+ read_unlock(&sk->sk_callback_lock);
+ state_change(sk);
+}
+
+/*
+ * we register callbacks so we can queue work on events before calling
+ * the original callbacks. our callbacks our careful to test user_data
+ * to discover when they've reaced with o2net_unregister_callbacks().
+ */
+static void o2net_register_callbacks(struct sock *sk,
+ struct o2net_sock_container *sc)
+{
+ write_lock_bh(&sk->sk_callback_lock);
+
+ /* accepted sockets inherit the old listen socket data ready */
+ if (sk->sk_data_ready == o2net_listen_data_ready) {
+ sk->sk_data_ready = sk->sk_user_data;
+ sk->sk_user_data = NULL;
+ }
+
+ BUG_ON(sk->sk_user_data != NULL);
+ sk->sk_user_data = sc;
+ sc_get(sc);
+
+ sc->sc_data_ready = sk->sk_data_ready;
+ sc->sc_state_change = sk->sk_state_change;
+ sk->sk_data_ready = o2net_data_ready;
+ sk->sk_state_change = o2net_state_change;
+
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int o2net_unregister_callbacks(struct sock *sk,
+ struct o2net_sock_container *sc)
+{
+ int ret = 0;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (sk->sk_user_data == sc) {
+ ret = 1;
+ sk->sk_user_data = NULL;
+ sk->sk_data_ready = sc->sc_data_ready;
+ sk->sk_state_change = sc->sc_state_change;
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ return ret;
+}
+
+/*
+ * this is a little helper that is called by callers who have seen a problem
+ * with an sc and want to detach it from the nn if someone already hasn't beat
+ * them to it. if an error is given then the shutdown will be persistent
+ * and pending transmits will be canceled.
+ */
+static void o2net_ensure_shutdown(struct o2net_node *nn,
+ struct o2net_sock_container *sc,
+ int err)
+{
+ spin_lock(&nn->nn_lock);
+ if (nn->nn_sc == sc)
+ o2net_set_nn_state(nn, NULL, 0, err);
+ spin_unlock(&nn->nn_lock);
+}
+
+/*
+ * This work queue function performs the blocking parts of socket shutdown. A
+ * few paths lead here. set_nn_state will trigger this callback if it sees an
+ * sc detached from the nn. state_change will also trigger this callback
+ * directly when it sees errors. In that case we need to call set_nn_state
+ * ourselves as state_change couldn't get the nn_lock and call set_nn_state
+ * itself.
+ */
+static void o2net_shutdown_sc(void *arg)
+{
+ struct o2net_sock_container *sc = arg;
+ struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
+
+ sclog(sc, "shutting down\n");
+
+ /* drop the callbacks ref and call shutdown only once */
+ if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
+ /* we shouldn't flush as we're in the thread, the
+ * races with pending sc work structs are harmless */
+ del_timer_sync(&sc->sc_idle_timeout);
+ o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
+ sc_put(sc);
+ sc->sc_sock->ops->shutdown(sc->sc_sock,
+ RCV_SHUTDOWN|SEND_SHUTDOWN);
+ }
+
+ /* not fatal so failed connects before the other guy has our
+ * heartbeat can be retried */
+ o2net_ensure_shutdown(nn, sc, 0);
+ sc_put(sc);
+}
+
+/* ------------------------------------------------------------ */
+
+static int o2net_handler_cmp(struct o2net_msg_handler *nmh, u32 msg_type,
+ u32 key)
+{
+ int ret = memcmp(&nmh->nh_key, &key, sizeof(key));
+
+ if (ret == 0)
+ ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type));
+
+ return ret;
+}
+
+static struct o2net_msg_handler *
+o2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
+ struct rb_node **ret_parent)
+{
+ struct rb_node **p = &o2net_handler_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct o2net_msg_handler *nmh, *ret = NULL;
+ int cmp;
+
+ while (*p) {
+ parent = *p;
+ nmh = rb_entry(parent, struct o2net_msg_handler, nh_node);
+ cmp = o2net_handler_cmp(nmh, msg_type, key);
+
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else if (cmp > 0)
+ p = &(*p)->rb_right;
+ else {
+ ret = nmh;
+ break;
+ }
+ }
+
+ if (ret_p != NULL)
+ *ret_p = p;
+ if (ret_parent != NULL)
+ *ret_parent = parent;
+
+ return ret;
+}
+
+static void o2net_handler_kref_release(struct kref *kref)
+{
+ struct o2net_msg_handler *nmh;
+ nmh = container_of(kref, struct o2net_msg_handler, nh_kref);
+
+ kfree(nmh);
+}
+
+static void o2net_handler_put(struct o2net_msg_handler *nmh)
+{
+ kref_put(&nmh->nh_kref, o2net_handler_kref_release);
+}
+
+/* max_len is protection for the handler func. incoming messages won't
+ * be given to the handler if their payload is longer than the max. */
+int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
+ o2net_msg_handler_func *func, void *data,
+ struct list_head *unreg_list)
+{
+ struct o2net_msg_handler *nmh = NULL;
+ struct rb_node **p, *parent;
+ int ret = 0;
+
+ if (max_len > O2NET_MAX_PAYLOAD_BYTES) {
+ mlog(0, "max_len for message handler out of range: %u\n",
+ max_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!msg_type) {
+ mlog(0, "no message type provided: %u, %p\n", msg_type, func);
+ ret = -EINVAL;
+ goto out;
+
+ }
+ if (!func) {
+ mlog(0, "no message handler provided: %u, %p\n",
+ msg_type, func);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ nmh = kcalloc(1, sizeof(struct o2net_msg_handler), GFP_NOFS);
+ if (nmh == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ nmh->nh_func = func;
+ nmh->nh_func_data = data;
+ nmh->nh_msg_type = msg_type;
+ nmh->nh_max_len = max_len;
+ nmh->nh_key = key;
+ /* the tree and list get this ref.. they're both removed in
+ * unregister when this ref is dropped */
+ kref_init(&nmh->nh_kref);
+ INIT_LIST_HEAD(&nmh->nh_unregister_item);
+
+ write_lock(&o2net_handler_lock);
+ if (o2net_handler_tree_lookup(msg_type, key, &p, &parent))
+ ret = -EEXIST;
+ else {
+ rb_link_node(&nmh->nh_node, parent, p);
+ rb_insert_color(&nmh->nh_node, &o2net_handler_tree);
+ list_add_tail(&nmh->nh_unregister_item, unreg_list);
+
+ mlog(ML_TCP, "registered handler func %p type %u key %08x\n",
+ func, msg_type, key);
+ /* we've had some trouble with handlers seemingly vanishing. */
+ mlog_bug_on_msg(o2net_handler_tree_lookup(msg_type, key, &p,
+ &parent) == NULL,
+ "couldn't find handler we *just* registerd "
+ "for type %u key %08x\n", msg_type, key);
+ }
+ write_unlock(&o2net_handler_lock);
+ if (ret)
+ goto out;
+
+out:
+ if (ret)
+ kfree(nmh);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(o2net_register_handler);
+
+void o2net_unregister_handler_list(struct list_head *list)
+{
+ struct list_head *pos, *n;
+ struct o2net_msg_handler *nmh;
+
+ write_lock(&o2net_handler_lock);
+ list_for_each_safe(pos, n, list) {
+ nmh = list_entry(pos, struct o2net_msg_handler,
+ nh_unregister_item);
+ mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n",
+ nmh->nh_func, nmh->nh_msg_type, nmh->nh_key);
+ rb_erase(&nmh->nh_node, &o2net_handler_tree);
+ list_del_init(&nmh->nh_unregister_item);
+ kref_put(&nmh->nh_kref, o2net_handler_kref_release);
+ }
+ write_unlock(&o2net_handler_lock);
+}
+EXPORT_SYMBOL_GPL(o2net_unregister_handler_list);
+
+static struct o2net_msg_handler *o2net_handler_get(u32 msg_type, u32 key)
+{
+ struct o2net_msg_handler *nmh;
+
+ read_lock(&o2net_handler_lock);
+ nmh = o2net_handler_tree_lookup(msg_type, key, NULL, NULL);
+ if (nmh)
+ kref_get(&nmh->nh_kref);
+ read_unlock(&o2net_handler_lock);
+
+ return nmh;
+}
+
+/* ------------------------------------------------------------ */
+
+static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
+{
+ int ret;
+ mm_segment_t oldfs;
+ struct kvec vec = {
+ .iov_len = len,
+ .iov_base = data,
+ };
+ struct msghdr msg = {
+ .msg_iovlen = 1,
+ .msg_iov = (struct iovec *)&vec,
+ .msg_flags = MSG_DONTWAIT,
+ };
+
+ oldfs = get_fs();
+ set_fs(get_ds());
+ ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
+ set_fs(oldfs);
+
+ return ret;
+}
+
+static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
+ size_t veclen, size_t total)
+{
+ int ret;
+ mm_segment_t oldfs;
+ struct msghdr msg = {
+ .msg_iov = (struct iovec *)vec,
+ .msg_iovlen = veclen,
+ };
+
+ if (sock == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ oldfs = get_fs();
+ set_fs(get_ds());
+ ret = sock_sendmsg(sock, &msg, total);
+ set_fs(oldfs);
+ if (ret != total) {
+ mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret,
+ total);
+ if (ret >= 0)
+ ret = -EPIPE; /* should be smarter, I bet */
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret < 0)
+ mlog(0, "returning error: %d\n", ret);
+ return ret;
+}
+
+static void o2net_sendpage(struct o2net_sock_container *sc,
+ void *kmalloced_virt,
+ size_t size)
+{
+ struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
+ ssize_t ret;
+
+
+ ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
+ virt_to_page(kmalloced_virt),
+ (long)kmalloced_virt & ~PAGE_MASK,
+ size, MSG_DONTWAIT);
+ if (ret != size) {
+ mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
+ " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
+ o2net_ensure_shutdown(nn, sc, 0);
+ }
+}
+
+static void o2net_init_msg(struct o2net_msg *msg, u16 data_len, u16 msg_type, u32 key)
+{
+ memset(msg, 0, sizeof(struct o2net_msg));
+ msg->magic = cpu_to_be16(O2NET_MSG_MAGIC);
+ msg->data_len = cpu_to_be16(data_len);
+ msg->msg_type = cpu_to_be16(msg_type);
+ msg->sys_status = cpu_to_be32(O2NET_ERR_NONE);
+ msg->status = 0;
+ msg->key = cpu_to_be32(key);
+}
+
+static int o2net_tx_can_proceed(struct o2net_node *nn,
+ struct o2net_sock_container **sc_ret,
+ int *error)
+{
+ int ret = 0;
+
+ spin_lock(&nn->nn_lock);
+ if (nn->nn_persistent_error) {
+ ret = 1;
+ *sc_ret = NULL;
+ *error = nn->nn_persistent_error;
+ } else if (nn->nn_sc_valid) {
+ kref_get(&nn->nn_sc->sc_kref);
+
+ ret = 1;
+ *sc_ret = nn->nn_sc;
+ *error = 0;
+ }
+ spin_unlock(&nn->nn_lock);
+
+ return ret;
+}
+
+int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
+ size_t caller_veclen, u8 target_node, int *status)
+{
+ int ret, error = 0;
+ struct o2net_msg *msg = NULL;
+ size_t veclen, caller_bytes = 0;
+ struct kvec *vec = NULL;
+ struct o2net_sock_container *sc = NULL;
+ struct o2net_node *nn = o2net_nn_from_num(target_node);
+ struct o2net_status_wait nsw = {
+ .ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
+ };
+
+ if (o2net_wq == NULL) {
+ mlog(0, "attempt to tx without o2netd running\n");
+ ret = -ESRCH;
+ goto out;
+ }
+
+ if (caller_veclen == 0) {
+ mlog(0, "bad kvec array length\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen);
+ if (caller_bytes > O2NET_MAX_PAYLOAD_BYTES) {
+ mlog(0, "total payload len %zu too large\n", caller_bytes);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (target_node == o2nm_this_node()) {
+ ret = -ELOOP;
+ goto out;
+ }
+
+ ret = wait_event_interruptible(nn->nn_sc_wq,
+ o2net_tx_can_proceed(nn, &sc, &error));
+ if (!ret && error)
+ ret = error;
+ if (ret)
+ goto out;
+
+ veclen = caller_veclen + 1;
+ vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
+ if (vec == NULL) {
+ mlog(0, "failed to %zu element kvec!\n", veclen);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msg = kmalloc(sizeof(struct o2net_msg), GFP_ATOMIC);
+ if (!msg) {
+ mlog(0, "failed to allocate a o2net_msg!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ o2net_init_msg(msg, caller_bytes, msg_type, key);
+
+ vec[0].iov_len = sizeof(struct o2net_msg);
+ vec[0].iov_base = msg;
+ memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
+
+ ret = o2net_prep_nsw(nn, &nsw);
+ if (ret)
+ goto out;
+
+ msg->msg_num = cpu_to_be32(nsw.ns_id);
+
+ /* finally, convert the message header to network byte-order
+ * and send */
+ ret = o2net_send_tcp_msg(sc->sc_sock, vec, veclen,
+ sizeof(struct o2net_msg) + caller_bytes);
+ msglog(msg, "sending returned %d\n", ret);
+ if (ret < 0) {
+ mlog(0, "error returned from o2net_send_tcp_msg=%d\n", ret);
+ goto out;
+ }
+
+ /* wait on other node's handler */
+ wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw));
+
+ /* Note that we avoid overwriting the callers status return
+ * variable if a system error was reported on the other
+ * side. Callers beware. */
+ ret = o2net_sys_err_to_errno(nsw.ns_sys_status);
+ if (status && !ret)
+ *status = nsw.ns_status;
+
+ mlog(0, "woken, returning system status %d, user status %d\n",
+ ret, nsw.ns_status);
+out:
+ if (sc)
+ sc_put(sc);
+ if (vec)
+ kfree(vec);
+ if (msg)
+ kfree(msg);
+ o2net_complete_nsw(nn, &nsw, 0, 0, 0);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(o2net_send_message_vec);
+
+int o2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
+ u8 target_node, int *status)
+{
+ struct kvec vec = {
+ .iov_base = data,
+ .iov_len = len,
+ };
+ return o2net_send_message_vec(msg_type, key, &vec, 1,
+ target_node, status);
+}
+EXPORT_SYMBOL_GPL(o2net_send_message);
+
+static int o2net_send_status_magic(struct socket *sock, struct o2net_msg *hdr,
+ enum o2net_system_error syserr, int err)
+{
+ struct kvec vec = {
+ .iov_base = hdr,
+ .iov_len = sizeof(struct o2net_msg),
+ };
+
+ BUG_ON(syserr >= O2NET_ERR_MAX);
+
+ /* leave other fields intact from the incoming message, msg_num
+ * in particular */
+ hdr->sys_status = cpu_to_be32(syserr);
+ hdr->status = cpu_to_be32(err);
+ hdr->magic = cpu_to_be16(O2NET_MSG_STATUS_MAGIC); // twiddle the magic
+ hdr->data_len = 0;
+
+ msglog(hdr, "about to send status magic %d\n", err);
+ /* hdr has been in host byteorder this whole time */
+ return o2net_send_tcp_msg(sock, &vec, 1, sizeof(struct o2net_msg));
+}
+
+/* this returns -errno if the header was unknown or too large, etc.
+ * after this is called the buffer us reused for the next message */
+static int o2net_process_message(struct o2net_sock_container *sc,
+ struct o2net_msg *hdr)
+{
+ struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
+ int ret = 0, handler_status;
+ enum o2net_system_error syserr;
+ struct o2net_msg_handler *nmh = NULL;
+
+ msglog(hdr, "processing message\n");
+
+ o2net_sc_postpone_idle(sc);
+
+ switch(be16_to_cpu(hdr->magic)) {
+ case O2NET_MSG_STATUS_MAGIC:
+ /* special type for returning message status */
+ o2net_complete_nsw(nn, NULL,
+ be32_to_cpu(hdr->msg_num),
+ be32_to_cpu(hdr->sys_status),
+ be32_to_cpu(hdr->status));
+ goto out;
+ case O2NET_MSG_KEEP_REQ_MAGIC:
+ o2net_sendpage(sc, o2net_keep_resp,
+ sizeof(*o2net_keep_resp));
+ goto out;
+ case O2NET_MSG_KEEP_RESP_MAGIC:
+ goto out;
+ case O2NET_MSG_MAGIC:
+ break;
+ default:
+ msglog(hdr, "bad magic\n");
+ ret = -EINVAL;
+ goto out;
+ break;
+ }
+
+ /* find a handler for it */
+ handler_status = 0;
+ nmh = o2net_handler_get(be16_to_cpu(hdr->msg_type),
+ be32_to_cpu(hdr->key));
+ if (!nmh) {
+ mlog(ML_TCP, "couldn't find handler for type %u key %08x\n",
+ be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key));
+ syserr = O2NET_ERR_NO_HNDLR;
+ goto out_respond;
+ }
+
+ syserr = O2NET_ERR_NONE;
+
+ if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
+ syserr = O2NET_ERR_OVERFLOW;
+
+ if (syserr != O2NET_ERR_NONE)
+ goto out_respond;
+
+ do_gettimeofday(&sc->sc_tv_func_start);
+ sc->sc_msg_key = be32_to_cpu(hdr->key);
+ sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
+ handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
+ be16_to_cpu(hdr->data_len),
+ nmh->nh_func_data);
+ do_gettimeofday(&sc->sc_tv_func_stop);
+
+out_respond:
+ /* this destroys the hdr, so don't use it after this */
+ ret = o2net_send_status_magic(sc->sc_sock, hdr, syserr,
+ handler_status);
+ hdr = NULL;
+ mlog(0, "sending handler status %d, syserr %d returned %d\n",
+ handler_status, syserr, ret);
+
+out:
+ if (nmh)
+ o2net_handler_put(nmh);
+ return ret;
+}
+
+static int o2net_check_handshake(struct o2net_sock_container *sc)
+{
+ struct o2net_handshake *hand = page_address(sc->sc_page);
+ struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
+
+ if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) {
+ mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol "
+ "version %llu but %llu is required, disconnecting\n",
+ SC_NODEF_ARGS(sc),
+ (unsigned long long)be64_to_cpu(hand->protocol_version),
+ O2NET_PROTOCOL_VERSION);
+
+ /* don't bother reconnecting if its the wrong version. */
+ o2net_ensure_shutdown(nn, sc, -ENOTCONN);
+ return -1;
+ }
+
+ sc->sc_handshake_ok = 1;
+
+ spin_lock(&nn->nn_lock);
+ /* set valid and queue the idle timers only if it hasn't been
+ * shut down already */
+ if (nn->nn_sc == sc) {
+ o2net_sc_postpone_idle(sc);
+ o2net_set_nn_state(nn, sc, 1, 0);
+ }
+ spin_unlock(&nn->nn_lock);
+
+ /* shift everything up as though it wasn't there */
+ sc->sc_page_off -= sizeof(struct o2net_handshake);
+ if (sc->sc_page_off)
+ memmove(hand, hand + 1, sc->sc_page_off);
+
+ return 0;
+}
+
+/* this demuxes the queued rx bytes into header or payload bits and calls
+ * handlers as each full message is read off the socket. it returns -error,
+ * == 0 eof, or > 0 for progress made.*/
+static int o2net_advance_rx(struct o2net_sock_container *sc)
+{
+ struct o2net_msg *hdr;
+ int ret = 0;
+ void *data;
+ size_t datalen;
+
+ sclog(sc, "receiving\n");
+ do_gettimeofday(&sc->sc_tv_advance_start);
+
+ /* do we need more header? */
+ if (sc->sc_page_off < sizeof(struct o2net_msg)) {
+ data = page_address(sc->sc_page) + sc->sc_page_off;
+ datalen = sizeof(struct o2net_msg) - sc->sc_page_off;
+ ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
+ if (ret > 0) {
+ sc->sc_page_off += ret;
+
+ /* this working relies on the handshake being
+ * smaller than the normal message header */
+ if (sc->sc_page_off >= sizeof(struct o2net_handshake)&&
+ !sc->sc_handshake_ok && o2net_check_handshake(sc)) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ /* only swab incoming here.. we can
+ * only get here once as we cross from
+ * being under to over */
+ if (sc->sc_page_off == sizeof(struct o2net_msg)) {
+ hdr = page_address(sc->sc_page);
+ if (be16_to_cpu(hdr->data_len) >
+ O2NET_MAX_PAYLOAD_BYTES)
+ ret = -EOVERFLOW;
+ }
+ }
+ if (ret <= 0)
+ goto out;
+ }
+
+ if (sc->sc_page_off < sizeof(struct o2net_msg)) {
+ /* oof, still don't have a header */
+ goto out;
+ }
+
+ /* this was swabbed above when we first read it */
+ hdr = page_address(sc->sc_page);
+
+ msglog(hdr, "at page_off %zu\n", sc->sc_page_off);
+
+ /* do we need more payload? */
+ if (sc->sc_page_off - sizeof(struct o2net_msg) < be16_to_cpu(hdr->data_len)) {
+ /* need more payload */
+ data = page_address(sc->sc_page) + sc->sc_page_off;
+ datalen = (sizeof(struct o2net_msg) + be16_to_cpu(hdr->data_len)) -
+ sc->sc_page_off;
+ ret = o2net_recv_tcp_msg(sc->sc_sock, data, datalen);
+ if (ret > 0)
+ sc->sc_page_off += ret;
+ if (ret <= 0)
+ goto out;
+ }
+
+ if (sc->sc_page_off - sizeof(struct o2net_msg) == be16_to_cpu(hdr->data_len)) {
+ /* we can only get here once, the first time we read
+ * the payload.. so set ret to progress if the handler
+ * works out. after calling this the message is toast */
+ ret = o2net_process_message(sc, hdr);
+ if (ret == 0)
+ ret = 1;
+ sc->sc_page_off = 0;
+ }
+
+out:
+ sclog(sc, "ret = %d\n", ret);
+ do_gettimeofday(&sc->sc_tv_advance_stop);
+ return ret;
+}
+
+/* this work func is triggerd by data ready. it reads until it can read no
+ * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
+ * our work the work struct will be marked and we'll be called again. */
+static void o2net_rx_until_empty(void *arg)
+{
+ struct o2net_sock_container *sc = arg;
+ int ret;
+
+ do {
+ ret = o2net_advance_rx(sc);
+ } while (ret > 0);
+
+ if (ret <= 0 && ret != -EAGAIN) {
+ struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
+ sclog(sc, "saw error %d, closing\n", ret);
+ /* not permanent so read failed handshake can retry */
+ o2net_ensure_shutdown(nn, sc, 0);
+ }
+
+ sc_put(sc);
+}
+
+static int o2net_set_nodelay(struct socket *sock)
+{
+ int ret, val = 1;
+ mm_segment_t oldfs;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /*
+ * Dear unsuspecting programmer,
+ *
+ * Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level
+ * argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will
+ * silently turn into SO_DEBUG.
+ *
+ * Yours,
+ * Keeper of hilariously fragile interfaces.
+ */
+ ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY,
+ (char __user *)&val, sizeof(val));
+
+ set_fs(oldfs);
+ return ret;
+}
+
+/* ------------------------------------------------------------ */
+
+/* called when a connect completes and after a sock is accepted. the
+ * rx path will see the response and mark the sc valid */
+static void o2net_sc_connect_completed(void *arg)
+{
+ struct o2net_sock_container *sc = arg;
+
+ mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
+ (unsigned long long)O2NET_PROTOCOL_VERSION,
+ (unsigned long long)be64_to_cpu(o2net_hand->connector_id));
+
+ o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
+ sc_put(sc);
+}
+
+/* this is called as a work_struct func. */
+static void o2net_sc_send_keep_req(void *arg)
+{
+ struct o2net_sock_container *sc = arg;
+
+ o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req));
+ sc_put(sc);
+}
+
+/* socket shutdown does a del_timer_sync against this as it tears down.
+ * we can't start this timer until we've got to the point in sc buildup
+ * where shutdown is going to be involved */
+static void o2net_idle_timer(unsigned long data)
+{
+ struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
+ struct timeval now;
+
+ do_gettimeofday(&now);
+
+ mlog(ML_NOTICE, "connection to " SC_NODEF_FMT " has been idle for 10 "
+ "seconds, shutting it down.\n", SC_NODEF_ARGS(sc));
+ mlog(ML_NOTICE, "here are some times that might help debug the "
+ "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
+ "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
+ sc->sc_tv_timer.tv_sec, sc->sc_tv_timer.tv_usec,
+ now.tv_sec, now.tv_usec,
+ sc->sc_tv_data_ready.tv_sec, sc->sc_tv_data_ready.tv_usec,
+ sc->sc_tv_advance_start.tv_sec, sc->sc_tv_advance_start.tv_usec,
+ sc->sc_tv_advance_stop.tv_sec, sc->sc_tv_advance_stop.tv_usec,
+ sc->sc_msg_key, sc->sc_msg_type,
+ sc->sc_tv_func_start.tv_sec, sc->sc_tv_func_start.tv_usec,
+ sc->sc_tv_func_stop.tv_sec, sc->sc_tv_func_stop.tv_usec);
+
+ o2net_sc_queue_work(sc, &sc->sc_shutdown_work);
+}
+
+static void o2net_sc_postpone_idle(struct o2net_sock_container *sc)
+{
+ o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
+ o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
+ O2NET_KEEPALIVE_DELAY_SECS * HZ);
+ do_gettimeofday(&sc->sc_tv_timer);
+ mod_timer(&sc->sc_idle_timeout,
+ jiffies + (O2NET_IDLE_TIMEOUT_SECS * HZ));
+}
+
+/* this work func is kicked whenever a path sets the nn state which doesn't
+ * have valid set. This includes seeing hb come up, losing a connection,
+ * having a connect attempt fail, etc. This centralizes the logic which decides
+ * if a connect attempt should be made or if we should give up and all future
+ * transmit attempts should fail */
+static void o2net_start_connect(void *arg)
+{
+ struct o2net_node *nn = arg;
+ struct o2net_sock_container *sc = NULL;
+ struct o2nm_node *node = NULL;
+ struct socket *sock = NULL;
+ struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
+ int ret = 0;
+
+ /* if we're greater we initiate tx, otherwise we accept */
+ if (o2nm_this_node() <= o2net_num_from_nn(nn))
+ goto out;
+
+ /* watch for racing with tearing a node down */
+ node = o2nm_get_node_by_num(o2net_num_from_nn(nn));
+ if (node == NULL) {
+ ret = 0;
+ goto out;
+ }
+
+ spin_lock(&nn->nn_lock);
+ /* see if we already have one pending or have given up */
+ if (nn->nn_sc || nn->nn_persistent_error)
+ arg = NULL;
+ spin_unlock(&nn->nn_lock);
+ if (arg == NULL) /* *shrug*, needed some indicator */
+ goto out;
+
+ nn->nn_last_connect_attempt = jiffies;
+
+ sc = sc_alloc(node);
+ if (sc == NULL) {
+ mlog(0, "couldn't allocate sc\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ if (ret < 0) {
+ mlog(0, "can't create socket: %d\n", ret);
+ goto out;
+ }
+ sc->sc_sock = sock; /* freed by sc_kref_release */
+
+ sock->sk->sk_allocation = GFP_ATOMIC;
+
+ myaddr.sin_family = AF_INET;
+ myaddr.sin_port = (__force u16)htons(0); /* any port */
+
+ ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
+ sizeof(myaddr));
+ if (ret) {
+ mlog(0, "bind failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = o2net_set_nodelay(sc->sc_sock);
+ if (ret) {
+ mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
+ goto out;
+ }
+
+ o2net_register_callbacks(sc->sc_sock->sk, sc);
+
+ spin_lock(&nn->nn_lock);
+ /* handshake completion will set nn->nn_sc_valid */
+ o2net_set_nn_state(nn, sc, 0, 0);
+ spin_unlock(&nn->nn_lock);
+
+ remoteaddr.sin_family = AF_INET;
+ remoteaddr.sin_addr.s_addr = (__force u32)node->nd_ipv4_address;
+ remoteaddr.sin_port = (__force u16)node->nd_ipv4_port;
+
+ ret = sc->sc_sock->ops->connect(sc->sc_sock,
+ (struct sockaddr *)&remoteaddr,
+ sizeof(remoteaddr),
+ O_NONBLOCK);
+ if (ret == -EINPROGRESS)
+ ret = 0;
+
+out:
+ if (ret) {
+ mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed "
+ "with errno %d\n", SC_NODEF_ARGS(sc), ret);
+ /* 0 err so that another will be queued and attempted
+ * from set_nn_state */
+ if (sc)
+ o2net_ensure_shutdown(nn, sc, 0);
+ }
+ if (sc)
+ sc_put(sc);
+ if (node)
+ o2nm_node_put(node);
+
+ return;
+}
+
+static void o2net_connect_expired(void *arg)
+{
+ struct o2net_node *nn = arg;
+
+ spin_lock(&nn->nn_lock);
+ if (!nn->nn_sc_valid) {
+ mlog(ML_ERROR, "no connection established with node %u after "
+ "%u seconds, giving up and returning errors.\n",
+ o2net_num_from_nn(nn), O2NET_IDLE_TIMEOUT_SECS);
+
+ o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
+ }
+ spin_unlock(&nn->nn_lock);
+}
+
+static void o2net_still_up(void *arg)
+{
+ struct o2net_node *nn = arg;
+
+ o2quo_hb_still_up(o2net_num_from_nn(nn));
+}
+
+/* ------------------------------------------------------------ */
+
+void o2net_disconnect_node(struct o2nm_node *node)
+{
+ struct o2net_node *nn = o2net_nn_from_num(node->nd_num);
+
+ /* don't reconnect until it's heartbeating again */
+ spin_lock(&nn->nn_lock);
+ o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
+ spin_unlock(&nn->nn_lock);
+
+ if (o2net_wq) {
+ cancel_delayed_work(&nn->nn_connect_expired);
+ cancel_delayed_work(&nn->nn_connect_work);
+ cancel_delayed_work(&nn->nn_still_up);
+ flush_workqueue(o2net_wq);
+ }
+}
+
+static void o2net_hb_node_down_cb(struct o2nm_node *node, int node_num,
+ void *data)
+{
+ o2quo_hb_down(node_num);
+
+ if (node_num != o2nm_this_node())
+ o2net_disconnect_node(node);
+}
+
+static void o2net_hb_node_up_cb(struct o2nm_node *node, int node_num,
+ void *data)
+{
+ struct o2net_node *nn = o2net_nn_from_num(node_num);
+
+ o2quo_hb_up(node_num);
+
+ /* ensure an immediate connect attempt */
+ nn->nn_last_connect_attempt = jiffies -
+ (msecs_to_jiffies(O2NET_RECONNECT_DELAY_MS) + 1);
+
+ if (node_num != o2nm_this_node()) {
+ /* heartbeat doesn't work unless a local node number is
+ * configured and doing so brings up the o2net_wq, so we can
+ * use it.. */
+ queue_delayed_work(o2net_wq, &nn->nn_connect_expired,
+ O2NET_IDLE_TIMEOUT_SECS * HZ);
+
+ /* believe it or not, accept and node hearbeating testing
+ * can succeed for this node before we got here.. so
+ * only use set_nn_state to clear the persistent error
+ * if that hasn't already happened */
+ spin_lock(&nn->nn_lock);
+ if (nn->nn_persistent_error)
+ o2net_set_nn_state(nn, NULL, 0, 0);
+ spin_unlock(&nn->nn_lock);
+ }
+}
+
+void o2net_unregister_hb_callbacks(void)
+{
+ int ret;
+
+ ret = o2hb_unregister_callback(&o2net_hb_up);
+ if (ret < 0)
+ mlog(ML_ERROR, "Status return %d unregistering heartbeat up "
+ "callback!\n", ret);
+
+ ret = o2hb_unregister_callback(&o2net_hb_down);
+ if (ret < 0)
+ mlog(ML_ERROR, "Status return %d unregistering heartbeat down "
+ "callback!\n", ret);
+}
+
+int o2net_register_hb_callbacks(void)
+{
+ int ret;
+
+ o2hb_setup_callback(&o2net_hb_down, O2HB_NODE_DOWN_CB,
+ o2net_hb_node_down_cb, NULL, O2NET_HB_PRI);
+ o2hb_setup_callback(&o2net_hb_up, O2HB_NODE_UP_CB,
+ o2net_hb_node_up_cb, NULL, O2NET_HB_PRI);
+
+ ret = o2hb_register_callback(&o2net_hb_up);
+ if (ret == 0)
+ ret = o2hb_register_callback(&o2net_hb_down);
+
+ if (ret)
+ o2net_unregister_hb_callbacks();
+
+ return ret;
+}
+
+/* ------------------------------------------------------------ */
+
+static int o2net_accept_one(struct socket *sock)
+{
+ int ret, slen;
+ struct sockaddr_in sin;
+ struct socket *new_sock = NULL;
+ struct o2nm_node *node = NULL;
+ struct o2net_sock_container *sc = NULL;
+ struct o2net_node *nn;
+
+ BUG_ON(sock == NULL);
+ ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
+ sock->sk->sk_protocol, &new_sock);
+ if (ret)
+ goto out;
+
+ new_sock->type = sock->type;
+ new_sock->ops = sock->ops;
+ ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
+ if (ret < 0)
+ goto out;
+
+ new_sock->sk->sk_allocation = GFP_ATOMIC;
+
+ ret = o2net_set_nodelay(new_sock);
+ if (ret) {
+ mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
+ goto out;
+ }
+
+ slen = sizeof(sin);
+ ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin,
+ &slen, 1);
+ if (ret < 0)
+ goto out;
+
+ node = o2nm_get_node_by_ip((__force __be32)sin.sin_addr.s_addr);
+ if (node == NULL) {
+ mlog(ML_NOTICE, "attempt to connect from unknown node at "
+ "%u.%u.%u.%u:%d\n", NIPQUAD(sin.sin_addr.s_addr),
+ ntohs((__force __be16)sin.sin_port));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (o2nm_this_node() > node->nd_num) {
+ mlog(ML_NOTICE, "unexpected connect attempted from a lower "
+ "numbered node '%s' at " "%u.%u.%u.%u:%d with num %u\n",
+ node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
+ ntohs((__force __be16)sin.sin_port), node->nd_num);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* this happens all the time when the other node sees our heartbeat
+ * and tries to connect before we see their heartbeat */
+ if (!o2hb_check_node_heartbeating_from_callback(node->nd_num)) {
+ mlog(ML_CONN, "attempt to connect from node '%s' at "
+ "%u.%u.%u.%u:%d but it isn't heartbeating\n",
+ node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
+ ntohs((__force __be16)sin.sin_port));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ nn = o2net_nn_from_num(node->nd_num);
+
+ spin_lock(&nn->nn_lock);
+ if (nn->nn_sc)
+ ret = -EBUSY;
+ else
+ ret = 0;
+ spin_unlock(&nn->nn_lock);
+ if (ret) {
+ mlog(ML_NOTICE, "attempt to connect from node '%s' at "
+ "%u.%u.%u.%u:%d but it already has an open connection\n",
+ node->nd_name, NIPQUAD(sin.sin_addr.s_addr),
+ ntohs((__force __be16)sin.sin_port));
+ goto out;
+ }
+
+ sc = sc_alloc(node);
+ if (sc == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sc->sc_sock = new_sock;
+ new_sock = NULL;
+
+ spin_lock(&nn->nn_lock);
+ o2net_set_nn_state(nn, sc, 0, 0);
+ spin_unlock(&nn->nn_lock);
+
+ o2net_register_callbacks(sc->sc_sock->sk, sc);
+ o2net_sc_queue_work(sc, &sc->sc_rx_work);
+
+ o2net_sendpage(sc, o2net_hand, sizeof(*o2net_hand));
+
+out:
+ if (new_sock)
+ sock_release(new_sock);
+ if (node)
+ o2nm_node_put(node);
+ if (sc)
+ sc_put(sc);
+ return ret;
+}
+
+static void o2net_accept_many(void *arg)
+{
+ struct socket *sock = arg;
+ while (o2net_accept_one(sock) == 0)
+ cond_resched();
+}
+
+static void o2net_listen_data_ready(struct sock *sk, int bytes)
+{
+ void (*ready)(struct sock *sk, int bytes);
+
+ read_lock(&sk->sk_callback_lock);
+ ready = sk->sk_user_data;
+ if (ready == NULL) { /* check for teardown race */
+ ready = sk->sk_data_ready;
+ goto out;
+ }
+
+ /* ->sk_data_ready is also called for a newly established child socket
+ * before it has been accepted and the acceptor has set up their
+ * data_ready.. we only want to queue listen work for our listening
+ * socket */
+ if (sk->sk_state == TCP_LISTEN) {
+ mlog(ML_TCP, "bytes: %d\n", bytes);
+ queue_work(o2net_wq, &o2net_listen_work);
+ }
+
+out:
+ read_unlock(&sk->sk_callback_lock);
+ ready(sk, bytes);
+}
+
+static int o2net_open_listening_sock(__be16 port)
+{
+ struct socket *sock = NULL;
+ int ret;
+ struct sockaddr_in sin = {
+ .sin_family = PF_INET,
+ .sin_addr = { .s_addr = (__force u32)htonl(INADDR_ANY) },
+ .sin_port = (__force u16)port,
+ };
+
+ ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ if (ret < 0) {
+ mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret);
+ goto out;
+ }
+
+ sock->sk->sk_allocation = GFP_ATOMIC;
+
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_user_data = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = o2net_listen_data_ready;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+
+ o2net_listen_sock = sock;
+ INIT_WORK(&o2net_listen_work, o2net_accept_many, sock);
+
+ sock->sk->sk_reuse = 1;
+ ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
+ if (ret < 0) {
+ mlog(ML_ERROR, "unable to bind socket to port %d, ret=%d\n",
+ ntohs(port), ret);
+ goto out;
+ }
+
+ ret = sock->ops->listen(sock, 64);
+ if (ret < 0) {
+ mlog(ML_ERROR, "unable to listen on port %d, ret=%d\n",
+ ntohs(port), ret);
+ }
+
+out:
+ if (ret) {
+ o2net_listen_sock = NULL;
+ if (sock)
+ sock_release(sock);
+ }
+ return ret;
+}
+
+/*
+ * called from node manager when we should bring up our network listening
+ * socket. node manager handles all the serialization to only call this
+ * once and to match it with o2net_stop_listening(). note,
+ * o2nm_this_node() doesn't work yet as we're being called while it
+ * is being set up.
+ */
+int o2net_start_listening(struct o2nm_node *node)
+{
+ int ret = 0;
+
+ BUG_ON(o2net_wq != NULL);
+ BUG_ON(o2net_listen_sock != NULL);
+
+ mlog(ML_KTHREAD, "starting o2net thread...\n");
+ o2net_wq = create_singlethread_workqueue("o2net");
+ if (o2net_wq == NULL) {
+ mlog(ML_ERROR, "unable to launch o2net thread\n");
+ return -ENOMEM; /* ? */
+ }
+
+ ret = o2net_open_listening_sock(node->nd_ipv4_port);
+ if (ret) {
+ destroy_workqueue(o2net_wq);
+ o2net_wq = NULL;
+ } else
+ o2quo_conn_up(node->nd_num);
+
+ return ret;
+}
+
+/* again, o2nm_this_node() doesn't work here as we're involved in
+ * tearing it down */
+void o2net_stop_listening(struct o2nm_node *node)
+{
+ struct socket *sock = o2net_listen_sock;
+ size_t i;
+
+ BUG_ON(o2net_wq == NULL);
+ BUG_ON(o2net_listen_sock == NULL);
+
+ /* stop the listening socket from generating work */
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_data_ready = sock->sk->sk_user_data;
+ sock->sk->sk_user_data = NULL;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+
+ for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
+ struct o2nm_node *node = o2nm_get_node_by_num(i);
+ if (node) {
+ o2net_disconnect_node(node);
+ o2nm_node_put(node);
+ }
+ }
+
+ /* finish all work and tear down the work queue */
+ mlog(ML_KTHREAD, "waiting for o2net thread to exit....\n");
+ destroy_workqueue(o2net_wq);
+ o2net_wq = NULL;
+
+ sock_release(o2net_listen_sock);
+ o2net_listen_sock = NULL;
+
+ o2quo_conn_err(node->nd_num);
+}
+
+/* ------------------------------------------------------------ */
+
+int o2net_init(void)
+{
+ unsigned long i;
+
+ o2quo_init();
+
+ o2net_hand = kcalloc(1, sizeof(struct o2net_handshake), GFP_KERNEL);
+ o2net_keep_req = kcalloc(1, sizeof(struct o2net_msg), GFP_KERNEL);
+ o2net_keep_resp = kcalloc(1, sizeof(struct o2net_msg), GFP_KERNEL);
+ if (!o2net_hand || !o2net_keep_req || !o2net_keep_resp) {
+ kfree(o2net_hand);
+ kfree(o2net_keep_req);
+ kfree(o2net_keep_resp);
+ return -ENOMEM;
+ }
+
+ o2net_hand->protocol_version = cpu_to_be64(O2NET_PROTOCOL_VERSION);
+ o2net_hand->connector_id = cpu_to_be64(1);
+
+ o2net_keep_req->magic = cpu_to_be16(O2NET_MSG_KEEP_REQ_MAGIC);
+ o2net_keep_resp->magic = cpu_to_be16(O2NET_MSG_KEEP_RESP_MAGIC);
+
+ for (i = 0; i < ARRAY_SIZE(o2net_nodes); i++) {
+ struct o2net_node *nn = o2net_nn_from_num(i);
+
+ spin_lock_init(&nn->nn_lock);
+ INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn);
+ INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn);
+ INIT_WORK(&nn->nn_still_up, o2net_still_up, nn);
+ /* until we see hb from a node we'll return einval */
+ nn->nn_persistent_error = -ENOTCONN;
+ init_waitqueue_head(&nn->nn_sc_wq);
+ idr_init(&nn->nn_status_idr);
+ INIT_LIST_HEAD(&nn->nn_status_list);
+ }
+
+ return 0;
+}
+
+void o2net_exit(void)
+{
+ o2quo_exit();
+ kfree(o2net_hand);
+ kfree(o2net_keep_req);
+ kfree(o2net_keep_resp);
+}
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
new file mode 100644
index 00000000000..a6f4585501c
--- /dev/null
+++ b/fs/ocfs2/cluster/tcp.h
@@ -0,0 +1,113 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * tcp.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef O2CLUSTER_TCP_H
+#define O2CLUSTER_TCP_H
+
+#include <linux/socket.h>
+#ifdef __KERNEL__
+#include <net/sock.h>
+#include <linux/tcp.h>
+#else
+#include <sys/socket.h>
+#endif
+#include <linux/inet.h>
+#include <linux/in.h>
+
+struct o2net_msg
+{
+ __be16 magic;
+ __be16 data_len;
+ __be16 msg_type;
+ __be16 pad1;
+ __be32 sys_status;
+ __be32 status;
+ __be32 key;
+ __be32 msg_num;
+ __u8 buf[0];
+};
+
+typedef int (o2net_msg_handler_func)(struct o2net_msg *msg, u32 len, void *data);
+
+#define O2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct o2net_msg))
+
+/* TODO: figure this out.... */
+static inline int o2net_link_down(int err, struct socket *sock)
+{
+ if (sock) {
+ if (sock->sk->sk_state != TCP_ESTABLISHED &&
+ sock->sk->sk_state != TCP_CLOSE_WAIT)
+ return 1;
+ }
+
+ if (err >= 0)
+ return 0;
+ switch (err) {
+ /* ????????????????????????? */
+ case -ERESTARTSYS:
+ case -EBADF:
+ /* When the server has died, an ICMP port unreachable
+ * message prompts ECONNREFUSED. */
+ case -ECONNREFUSED:
+ case -ENOTCONN:
+ case -ECONNRESET:
+ case -EPIPE:
+ return 1;
+ }
+ return 0;
+}
+
+enum {
+ O2NET_DRIVER_UNINITED,
+ O2NET_DRIVER_READY,
+};
+
+int o2net_init_tcp_sock(struct inode *inode);
+int o2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
+ u8 target_node, int *status);
+int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec,
+ size_t veclen, u8 target_node, int *status);
+int o2net_broadcast_message(u32 msg_type, u32 key, void *data, u32 len,
+ struct inode *group);
+
+int o2net_register_handler(u32 msg_type, u32 key, u32 max_len,
+ o2net_msg_handler_func *func, void *data,
+ struct list_head *unreg_list);
+void o2net_unregister_handler_list(struct list_head *list);
+
+struct o2nm_node;
+int o2net_register_hb_callbacks(void);
+void o2net_unregister_hb_callbacks(void);
+int o2net_start_listening(struct o2nm_node *node);
+void o2net_stop_listening(struct o2nm_node *node);
+void o2net_disconnect_node(struct o2nm_node *node);
+
+int o2net_init(void);
+void o2net_exit(void);
+int o2net_proc_init(struct proc_dir_entry *parent);
+void o2net_proc_exit(struct proc_dir_entry *parent);
+
+#endif /* O2CLUSTER_TCP_H */
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
new file mode 100644
index 00000000000..ff9e2e2104c
--- /dev/null
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -0,0 +1,174 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef O2CLUSTER_TCP_INTERNAL_H
+#define O2CLUSTER_TCP_INTERNAL_H
+
+#define O2NET_MSG_MAGIC ((u16)0xfa55)
+#define O2NET_MSG_STATUS_MAGIC ((u16)0xfa56)
+#define O2NET_MSG_KEEP_REQ_MAGIC ((u16)0xfa57)
+#define O2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58)
+
+/* same as hb delay, we're waiting for another node to recognize our hb */
+#define O2NET_RECONNECT_DELAY_MS O2HB_REGION_TIMEOUT_MS
+
+/* we're delaying our quorum decision so that heartbeat will have timed
+ * out truly dead nodes by the time we come around to making decisions
+ * on their number */
+#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
+
+#define O2NET_KEEPALIVE_DELAY_SECS 5
+#define O2NET_IDLE_TIMEOUT_SECS 10
+
+/*
+ * This version number represents quite a lot, unfortunately. It not
+ * only represents the raw network message protocol on the wire but also
+ * locking semantics of the file system using the protocol. It should
+ * be somewhere else, I'm sure, but right now it isn't.
+ *
+ * New in version 2:
+ * - full 64 bit i_size in the metadata lock lvbs
+ * - introduction of "rw" lock and pushing meta/data locking down
+ */
+#define O2NET_PROTOCOL_VERSION 2ULL
+struct o2net_handshake {
+ __be64 protocol_version;
+ __be64 connector_id;
+};
+
+struct o2net_node {
+ /* this is never called from int/bh */
+ spinlock_t nn_lock;
+
+ /* set the moment an sc is allocated and a connect is started */
+ struct o2net_sock_container *nn_sc;
+ /* _valid is only set after the handshake passes and tx can happen */
+ unsigned nn_sc_valid:1;
+ /* if this is set tx just returns it */
+ int nn_persistent_error;
+
+ /* threads waiting for an sc to arrive wait on the wq for generation
+ * to increase. it is increased when a connecting socket succeeds
+ * or fails or when an accepted socket is attached. */
+ wait_queue_head_t nn_sc_wq;
+
+ struct idr nn_status_idr;
+ struct list_head nn_status_list;
+
+ /* connects are attempted from when heartbeat comes up until either hb
+ * goes down, the node is unconfigured, no connect attempts succeed
+ * before O2NET_CONN_IDLE_DELAY, or a connect succeeds. connect_work
+ * is queued from set_nn_state both from hb up and from itself if a
+ * connect attempt fails and so can be self-arming. shutdown is
+ * careful to first mark the nn such that no connects will be attempted
+ * before canceling delayed connect work and flushing the queue. */
+ struct work_struct nn_connect_work;
+ unsigned long nn_last_connect_attempt;
+
+ /* this is queued as nodes come up and is canceled when a connection is
+ * established. this expiring gives up on the node and errors out
+ * transmits */
+ struct work_struct nn_connect_expired;
+
+ /* after we give up on a socket we wait a while before deciding
+ * that it is still heartbeating and that we should do some
+ * quorum work */
+ struct work_struct nn_still_up;
+};
+
+struct o2net_sock_container {
+ struct kref sc_kref;
+ /* the next two are vaild for the life time of the sc */
+ struct socket *sc_sock;
+ struct o2nm_node *sc_node;
+
+ /* all of these sc work structs hold refs on the sc while they are
+ * queued. they should not be able to ref a freed sc. the teardown
+ * race is with o2net_wq destruction in o2net_stop_listening() */
+
+ /* rx and connect work are generated from socket callbacks. sc
+ * shutdown removes the callbacks and then flushes the work queue */
+ struct work_struct sc_rx_work;
+ struct work_struct sc_connect_work;
+ /* shutdown work is triggered in two ways. the simple way is
+ * for a code path calls ensure_shutdown which gets a lock, removes
+ * the sc from the nn, and queues the work. in this case the
+ * work is single-shot. the work is also queued from a sock
+ * callback, though, and in this case the work will find the sc
+ * still on the nn and will call ensure_shutdown itself.. this
+ * ends up triggering the shutdown work again, though nothing
+ * will be done in that second iteration. so work queue teardown
+ * has to be careful to remove the sc from the nn before waiting
+ * on the work queue so that the shutdown work doesn't remove the
+ * sc and rearm itself.
+ */
+ struct work_struct sc_shutdown_work;
+
+ struct timer_list sc_idle_timeout;
+ struct work_struct sc_keepalive_work;
+
+ unsigned sc_handshake_ok:1;
+
+ struct page *sc_page;
+ size_t sc_page_off;
+
+ /* original handlers for the sockets */
+ void (*sc_state_change)(struct sock *sk);
+ void (*sc_data_ready)(struct sock *sk, int bytes);
+
+ struct timeval sc_tv_timer;
+ struct timeval sc_tv_data_ready;
+ struct timeval sc_tv_advance_start;
+ struct timeval sc_tv_advance_stop;
+ struct timeval sc_tv_func_start;
+ struct timeval sc_tv_func_stop;
+ u32 sc_msg_key;
+ u16 sc_msg_type;
+};
+
+struct o2net_msg_handler {
+ struct rb_node nh_node;
+ u32 nh_max_len;
+ u32 nh_msg_type;
+ u32 nh_key;
+ o2net_msg_handler_func *nh_func;
+ o2net_msg_handler_func *nh_func_data;
+ struct kref nh_kref;
+ struct list_head nh_unregister_item;
+};
+
+enum o2net_system_error {
+ O2NET_ERR_NONE = 0,
+ O2NET_ERR_NO_HNDLR,
+ O2NET_ERR_OVERFLOW,
+ O2NET_ERR_DIED,
+ O2NET_ERR_MAX
+};
+
+struct o2net_status_wait {
+ enum o2net_system_error ns_sys_status;
+ s32 ns_status;
+ int ns_id;
+ wait_queue_head_t ns_wq;
+ struct list_head ns_node_item;
+};
+
+#endif /* O2CLUSTER_TCP_INTERNAL_H */
diff --git a/fs/ocfs2/cluster/ver.c b/fs/ocfs2/cluster/ver.c
new file mode 100644
index 00000000000..7286c48bb30
--- /dev/null
+++ b/fs/ocfs2/cluster/ver.c
@@ -0,0 +1,42 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ver.c
+ *
+ * version string
+ *
+ * Copyright (C) 2002, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "ver.h"
+
+#define CLUSTER_BUILD_VERSION "1.3.3"
+
+#define VERSION_STR "OCFS2 Node Manager " CLUSTER_BUILD_VERSION
+
+void cluster_print_version(void)
+{
+ printk(KERN_INFO "%s\n", VERSION_STR);
+}
+
+MODULE_DESCRIPTION(VERSION_STR);
+
+MODULE_VERSION(CLUSTER_BUILD_VERSION);
diff --git a/fs/ocfs2/cluster/ver.h b/fs/ocfs2/cluster/ver.h
new file mode 100644
index 00000000000..32554c3382c
--- /dev/null
+++ b/fs/ocfs2/cluster/ver.h
@@ -0,0 +1,31 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ver.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef O2CLUSTER_VER_H
+#define O2CLUSTER_VER_H
+
+void cluster_print_version(void);
+
+#endif /* O2CLUSTER_VER_H */
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
new file mode 100644
index 00000000000..bd85182e97b
--- /dev/null
+++ b/fs/ocfs2/dcache.c
@@ -0,0 +1,91 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dcache.c
+ *
+ * dentry cache handling code
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/namei.h>
+
+#define MLOG_MASK_PREFIX ML_DCACHE
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "dcache.h"
+#include "file.h"
+#include "inode.h"
+
+static int ocfs2_dentry_revalidate(struct dentry *dentry,
+ struct nameidata *nd)
+{
+ struct inode *inode = dentry->d_inode;
+ int ret = 0; /* if all else fails, just return false */
+ struct ocfs2_super *osb;
+
+ mlog_entry("(0x%p, '%.*s')\n", dentry,
+ dentry->d_name.len, dentry->d_name.name);
+
+ /* Never trust a negative dentry - force a new lookup. */
+ if (inode == NULL) {
+ mlog(0, "negative dentry: %.*s\n", dentry->d_name.len,
+ dentry->d_name.name);
+ goto bail;
+ }
+
+ osb = OCFS2_SB(inode->i_sb);
+
+ BUG_ON(!osb);
+
+ if (inode != osb->root_inode) {
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ /* did we or someone else delete this inode? */
+ if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+ mlog(0, "inode (%"MLFu64") deleted, returning false\n",
+ OCFS2_I(inode)->ip_blkno);
+ goto bail;
+ }
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ if (!inode->i_nlink) {
+ mlog(0, "Inode %"MLFu64" orphaned, returning false "
+ "dir = %d\n", OCFS2_I(inode)->ip_blkno,
+ S_ISDIR(inode->i_mode));
+ goto bail;
+ }
+ }
+
+ ret = 1;
+
+bail:
+ mlog_exit(ret);
+
+ return ret;
+}
+
+struct dentry_operations ocfs2_dentry_ops = {
+ .d_revalidate = ocfs2_dentry_revalidate,
+};
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h
new file mode 100644
index 00000000000..90072771114
--- /dev/null
+++ b/fs/ocfs2/dcache.h
@@ -0,0 +1,31 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dcache.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_DCACHE_H
+#define OCFS2_DCACHE_H
+
+extern struct dentry_operations ocfs2_dentry_ops;
+
+#endif /* OCFS2_DCACHE_H */
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
new file mode 100644
index 00000000000..57158fa75d9
--- /dev/null
+++ b/fs/ocfs2/dir.c
@@ -0,0 +1,618 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dir.c
+ *
+ * Creates, reads, walks and deletes directory-nodes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * Portions of this code from linux/fs/ext3/dir.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ * from
+ *
+ * linux/fs/minix/dir.c
+ *
+ * Copyright (C) 1991, 1992 Linux Torvalds
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+
+#define MLOG_MASK_PREFIX ML_NAMEI
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "dir.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "file.h"
+#include "inode.h"
+#include "journal.h"
+#include "namei.h"
+#include "suballoc.h"
+#include "uptodate.h"
+
+#include "buffer_head_io.h"
+
+static unsigned char ocfs2_filetype_table[] = {
+ DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+};
+
+static int ocfs2_extend_dir(struct ocfs2_super *osb,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct buffer_head **new_de_bh);
+/*
+ * ocfs2_readdir()
+ *
+ */
+int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+ int error = 0;
+ unsigned long offset, blk;
+ int i, num, stored;
+ struct buffer_head * bh, * tmp;
+ struct ocfs2_dir_entry * de;
+ int err;
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct super_block * sb = inode->i_sb;
+ int have_disk_lock = 0;
+
+ mlog_entry("dirino=%"MLFu64"\n", OCFS2_I(inode)->ip_blkno);
+
+ stored = 0;
+ bh = NULL;
+
+ error = ocfs2_meta_lock(inode, NULL, NULL, 0);
+ if (error < 0) {
+ if (error != -ENOENT)
+ mlog_errno(error);
+ /* we haven't got any yet, so propagate the error. */
+ stored = error;
+ goto bail;
+ }
+ have_disk_lock = 1;
+
+ offset = filp->f_pos & (sb->s_blocksize - 1);
+
+ while (!error && !stored && filp->f_pos < i_size_read(inode)) {
+ blk = (filp->f_pos) >> sb->s_blocksize_bits;
+ bh = ocfs2_bread(inode, blk, &err, 0);
+ if (!bh) {
+ mlog(ML_ERROR, "directory #%"MLFu64" contains a hole "
+ "at offset %lld\n",
+ OCFS2_I(inode)->ip_blkno,
+ filp->f_pos);
+ filp->f_pos += sb->s_blocksize - offset;
+ continue;
+ }
+
+ /*
+ * Do the readahead (8k)
+ */
+ if (!offset) {
+ for (i = 16 >> (sb->s_blocksize_bits - 9), num = 0;
+ i > 0; i--) {
+ tmp = ocfs2_bread(inode, ++blk, &err, 1);
+ if (tmp)
+ brelse(tmp);
+ }
+ }
+
+revalidate:
+ /* If the dir block has changed since the last call to
+ * readdir(2), then we might be pointing to an invalid
+ * dirent right now. Scan from the start of the block
+ * to make sure. */
+ if (filp->f_version != inode->i_version) {
+ for (i = 0; i < sb->s_blocksize && i < offset; ) {
+ de = (struct ocfs2_dir_entry *) (bh->b_data + i);
+ /* It's too expensive to do a full
+ * dirent test each time round this
+ * loop, but we do have to test at
+ * least that it is non-zero. A
+ * failure will be detected in the
+ * dirent test below. */
+ if (le16_to_cpu(de->rec_len) <
+ OCFS2_DIR_REC_LEN(1))
+ break;
+ i += le16_to_cpu(de->rec_len);
+ }
+ offset = i;
+ filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
+ | offset;
+ filp->f_version = inode->i_version;
+ }
+
+ while (!error && filp->f_pos < i_size_read(inode)
+ && offset < sb->s_blocksize) {
+ de = (struct ocfs2_dir_entry *) (bh->b_data + offset);
+ if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
+ /* On error, skip the f_pos to the
+ next block. */
+ filp->f_pos = (filp->f_pos |
+ (sb->s_blocksize - 1)) + 1;
+ brelse(bh);
+ goto bail;
+ }
+ offset += le16_to_cpu(de->rec_len);
+ if (le64_to_cpu(de->inode)) {
+ /* We might block in the next section
+ * if the data destination is
+ * currently swapped out. So, use a
+ * version stamp to detect whether or
+ * not the directory has been modified
+ * during the copy operation.
+ */
+ unsigned long version = filp->f_version;
+ unsigned char d_type = DT_UNKNOWN;
+
+ if (de->file_type < OCFS2_FT_MAX)
+ d_type = ocfs2_filetype_table[de->file_type];
+ error = filldir(dirent, de->name,
+ de->name_len,
+ filp->f_pos,
+ ino_from_blkno(sb, le64_to_cpu(de->inode)),
+ d_type);
+ if (error)
+ break;
+ if (version != filp->f_version)
+ goto revalidate;
+ stored ++;
+ }
+ filp->f_pos += le16_to_cpu(de->rec_len);
+ }
+ offset = 0;
+ brelse(bh);
+ }
+
+ stored = 0;
+bail:
+ if (have_disk_lock)
+ ocfs2_meta_unlock(inode, 0);
+
+ mlog_exit(stored);
+
+ return stored;
+}
+
+/*
+ * NOTE: this should always be called with parent dir i_mutex taken.
+ */
+int ocfs2_find_files_on_disk(const char *name,
+ int namelen,
+ u64 *blkno,
+ struct inode *inode,
+ struct buffer_head **dirent_bh,
+ struct ocfs2_dir_entry **dirent)
+{
+ int status = -ENOENT;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog_entry("(osb=%p, parent=%"MLFu64", name='%.*s', blkno=%p, "
+ "inode=%p)\n",
+ osb, OCFS2_I(inode)->ip_blkno, namelen, name, blkno, inode);
+
+ *dirent_bh = ocfs2_find_entry(name, namelen, inode, dirent);
+ if (!*dirent_bh || !*dirent) {
+ status = -ENOENT;
+ goto leave;
+ }
+
+ *blkno = le64_to_cpu((*dirent)->inode);
+
+ status = 0;
+leave:
+ if (status < 0) {
+ *dirent = NULL;
+ if (*dirent_bh) {
+ brelse(*dirent_bh);
+ *dirent_bh = NULL;
+ }
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+/* Check for a name within a directory.
+ *
+ * Return 0 if the name does not exist
+ * Return -EEXIST if the directory contains the name
+ *
+ * Callers should have i_mutex + a cluster lock on dir
+ */
+int ocfs2_check_dir_for_entry(struct inode *dir,
+ const char *name,
+ int namelen)
+{
+ int ret;
+ struct buffer_head *dirent_bh = NULL;
+ struct ocfs2_dir_entry *dirent = NULL;
+
+ mlog_entry("dir %"MLFu64", name '%.*s'\n", OCFS2_I(dir)->ip_blkno,
+ namelen, name);
+
+ ret = -EEXIST;
+ dirent_bh = ocfs2_find_entry(name, namelen, dir, &dirent);
+ if (dirent_bh)
+ goto bail;
+
+ ret = 0;
+bail:
+ if (dirent_bh)
+ brelse(dirent_bh);
+
+ mlog_exit(ret);
+ return ret;
+}
+
+/*
+ * routine to check that the specified directory is empty (for rmdir)
+ */
+int ocfs2_empty_dir(struct inode *inode)
+{
+ unsigned long offset;
+ struct buffer_head * bh;
+ struct ocfs2_dir_entry * de, * de1;
+ struct super_block * sb;
+ int err;
+
+ sb = inode->i_sb;
+ if ((i_size_read(inode) <
+ (OCFS2_DIR_REC_LEN(1) + OCFS2_DIR_REC_LEN(2))) ||
+ !(bh = ocfs2_bread(inode, 0, &err, 0))) {
+ mlog(ML_ERROR, "bad directory (dir #%"MLFu64") - "
+ "no data block\n",
+ OCFS2_I(inode)->ip_blkno);
+ return 1;
+ }
+
+ de = (struct ocfs2_dir_entry *) bh->b_data;
+ de1 = (struct ocfs2_dir_entry *)
+ ((char *)de + le16_to_cpu(de->rec_len));
+ if ((le64_to_cpu(de->inode) != OCFS2_I(inode)->ip_blkno) ||
+ !le64_to_cpu(de1->inode) ||
+ strcmp(".", de->name) ||
+ strcmp("..", de1->name)) {
+ mlog(ML_ERROR, "bad directory (dir #%"MLFu64") - "
+ "no `.' or `..'\n",
+ OCFS2_I(inode)->ip_blkno);
+ brelse(bh);
+ return 1;
+ }
+ offset = le16_to_cpu(de->rec_len) + le16_to_cpu(de1->rec_len);
+ de = (struct ocfs2_dir_entry *)((char *)de1 + le16_to_cpu(de1->rec_len));
+ while (offset < i_size_read(inode) ) {
+ if (!bh || (void *)de >= (void *)(bh->b_data + sb->s_blocksize)) {
+ brelse(bh);
+ bh = ocfs2_bread(inode,
+ offset >> sb->s_blocksize_bits, &err, 0);
+ if (!bh) {
+ mlog(ML_ERROR, "directory #%"MLFu64" contains "
+ "a hole at offset %lu\n",
+ OCFS2_I(inode)->ip_blkno, offset);
+ offset += sb->s_blocksize;
+ continue;
+ }
+ de = (struct ocfs2_dir_entry *) bh->b_data;
+ }
+ if (!ocfs2_check_dir_entry(inode, de, bh, offset)) {
+ brelse(bh);
+ return 1;
+ }
+ if (le64_to_cpu(de->inode)) {
+ brelse(bh);
+ return 0;
+ }
+ offset += le16_to_cpu(de->rec_len);
+ de = (struct ocfs2_dir_entry *)
+ ((char *)de + le16_to_cpu(de->rec_len));
+ }
+ brelse(bh);
+ return 1;
+}
+
+/* returns a bh of the 1st new block in the allocation. */
+int ocfs2_do_extend_dir(struct super_block *sb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_alloc_context *data_ac,
+ struct ocfs2_alloc_context *meta_ac,
+ struct buffer_head **new_bh)
+{
+ int status;
+ int extend;
+ u64 p_blkno;
+
+ spin_lock(&OCFS2_I(dir)->ip_lock);
+ extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters));
+ spin_unlock(&OCFS2_I(dir)->ip_lock);
+
+ if (extend) {
+ status = ocfs2_do_extend_allocation(OCFS2_SB(sb), dir, 1,
+ parent_fe_bh, handle,
+ data_ac, meta_ac, NULL);
+ BUG_ON(status == -EAGAIN);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ status = ocfs2_extent_map_get_blocks(dir, (dir->i_blocks >>
+ (sb->s_blocksize_bits - 9)),
+ 1, &p_blkno, NULL);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ *new_bh = sb_getblk(sb, p_blkno);
+ if (!*new_bh) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ status = 0;
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+/* assumes you already have a cluster lock on the directory. */
+static int ocfs2_extend_dir(struct ocfs2_super *osb,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct buffer_head **new_de_bh)
+{
+ int status = 0;
+ int credits, num_free_extents;
+ loff_t dir_i_size;
+ struct ocfs2_dinode *fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
+ struct ocfs2_alloc_context *data_ac = NULL;
+ struct ocfs2_alloc_context *meta_ac = NULL;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct buffer_head *new_bh = NULL;
+ struct ocfs2_dir_entry * de;
+ struct super_block *sb = osb->sb;
+
+ mlog_entry_void();
+
+ dir_i_size = i_size_read(dir);
+ mlog(0, "extending dir %"MLFu64" (i_size = %lld)\n",
+ OCFS2_I(dir)->ip_blkno, dir_i_size);
+
+ handle = ocfs2_alloc_handle(osb);
+ if (handle == NULL) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* dir->i_size is always block aligned. */
+ spin_lock(&OCFS2_I(dir)->ip_lock);
+ if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) {
+ spin_unlock(&OCFS2_I(dir)->ip_lock);
+ num_free_extents = ocfs2_num_free_extents(osb, dir, fe);
+ if (num_free_extents < 0) {
+ status = num_free_extents;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (!num_free_extents) {
+ status = ocfs2_reserve_new_metadata(osb, handle,
+ fe, &meta_ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ credits = ocfs2_calc_extend_credits(sb, fe, 1);
+ } else {
+ spin_unlock(&OCFS2_I(dir)->ip_lock);
+ credits = OCFS2_SIMPLE_DIR_EXTEND_CREDITS;
+ }
+
+ handle = ocfs2_start_trans(osb, handle, credits);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_do_extend_dir(osb->sb, handle, dir, parent_fe_bh,
+ data_ac, meta_ac, &new_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_set_new_buffer_uptodate(dir, new_bh);
+
+ status = ocfs2_journal_access(handle, dir, new_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ memset(new_bh->b_data, 0, sb->s_blocksize);
+ de = (struct ocfs2_dir_entry *) new_bh->b_data;
+ de->inode = 0;
+ de->rec_len = cpu_to_le16(sb->s_blocksize);
+ status = ocfs2_journal_dirty(handle, new_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ dir_i_size += dir->i_sb->s_blocksize;
+ i_size_write(dir, dir_i_size);
+ dir->i_blocks = ocfs2_align_bytes_to_sectors(dir_i_size);
+ status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ *new_de_bh = new_bh;
+ get_bh(*new_de_bh);
+bail:
+ if (handle)
+ ocfs2_commit_trans(handle);
+
+ if (data_ac)
+ ocfs2_free_alloc_context(data_ac);
+ if (meta_ac)
+ ocfs2_free_alloc_context(meta_ac);
+
+ if (new_bh)
+ brelse(new_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * Search the dir for a good spot, extending it if necessary. The
+ * block containing an appropriate record is returned in ret_de_bh.
+ */
+int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ const char *name,
+ int namelen,
+ struct buffer_head **ret_de_bh)
+{
+ unsigned long offset;
+ struct buffer_head * bh = NULL;
+ unsigned short rec_len;
+ struct ocfs2_dinode *fe;
+ struct ocfs2_dir_entry *de;
+ struct super_block *sb;
+ int status;
+
+ mlog_entry_void();
+
+ mlog(0, "getting ready to insert namelen %d into dir %"MLFu64"\n",
+ namelen, OCFS2_I(dir)->ip_blkno);
+
+ BUG_ON(!S_ISDIR(dir->i_mode));
+ fe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
+ BUG_ON(le64_to_cpu(fe->i_size) != i_size_read(dir));
+
+ sb = dir->i_sb;
+
+ if (!namelen) {
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ bh = ocfs2_bread(dir, 0, &status, 0);
+ if (!bh) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ rec_len = OCFS2_DIR_REC_LEN(namelen);
+ offset = 0;
+ de = (struct ocfs2_dir_entry *) bh->b_data;
+ while (1) {
+ if ((char *)de >= sb->s_blocksize + bh->b_data) {
+ brelse(bh);
+ bh = NULL;
+
+ if (i_size_read(dir) <= offset) {
+ status = ocfs2_extend_dir(osb,
+ dir,
+ parent_fe_bh,
+ &bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ BUG_ON(!bh);
+ *ret_de_bh = bh;
+ get_bh(*ret_de_bh);
+ goto bail;
+ }
+ bh = ocfs2_bread(dir,
+ offset >> sb->s_blocksize_bits,
+ &status,
+ 0);
+ if (!bh) {
+ mlog_errno(status);
+ goto bail;
+ }
+ /* move to next block */
+ de = (struct ocfs2_dir_entry *) bh->b_data;
+ }
+ if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
+ status = -ENOENT;
+ goto bail;
+ }
+ if (ocfs2_match(namelen, name, de)) {
+ status = -EEXIST;
+ goto bail;
+ }
+ if (((le64_to_cpu(de->inode) == 0) &&
+ (le16_to_cpu(de->rec_len) >= rec_len)) ||
+ (le16_to_cpu(de->rec_len) >=
+ (OCFS2_DIR_REC_LEN(de->name_len) + rec_len))) {
+ /* Ok, we found a spot. Return this bh and let
+ * the caller actually fill it in. */
+ *ret_de_bh = bh;
+ get_bh(*ret_de_bh);
+ status = 0;
+ goto bail;
+ }
+ offset += le16_to_cpu(de->rec_len);
+ de = (struct ocfs2_dir_entry *)((char *) de + le16_to_cpu(de->rec_len));
+ }
+
+ status = 0;
+bail:
+ if (bh)
+ brelse(bh);
+
+ mlog_exit(status);
+ return status;
+}
diff --git a/fs/ocfs2/dir.h b/fs/ocfs2/dir.h
new file mode 100644
index 00000000000..5f614ec9649
--- /dev/null
+++ b/fs/ocfs2/dir.h
@@ -0,0 +1,54 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dir.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_DIR_H
+#define OCFS2_DIR_H
+
+int ocfs2_check_dir_for_entry(struct inode *dir,
+ const char *name,
+ int namelen);
+int ocfs2_empty_dir(struct inode *inode); /* FIXME: to namei.c */
+int ocfs2_find_files_on_disk(const char *name,
+ int namelen,
+ u64 *blkno,
+ struct inode *inode,
+ struct buffer_head **dirent_bh,
+ struct ocfs2_dir_entry **dirent);
+int ocfs2_readdir(struct file *filp, void *dirent, filldir_t filldir);
+int ocfs2_prepare_dir_for_insert(struct ocfs2_super *osb,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ const char *name,
+ int namelen,
+ struct buffer_head **ret_de_bh);
+struct ocfs2_alloc_context;
+int ocfs2_do_extend_dir(struct super_block *sb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *dir,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_alloc_context *data_ac,
+ struct ocfs2_alloc_context *meta_ac,
+ struct buffer_head **new_bh);
+#endif /* OCFS2_DIR_H */
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
new file mode 100644
index 00000000000..ce3f7c29d27
--- /dev/null
+++ b/fs/ocfs2/dlm/Makefile
@@ -0,0 +1,8 @@
+EXTRA_CFLAGS += -Ifs/ocfs2
+
+obj-$(CONFIG_OCFS2_FS) += ocfs2_dlm.o ocfs2_dlmfs.o
+
+ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \
+ dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o
+
+ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
new file mode 100644
index 00000000000..53652f51c0e
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -0,0 +1,214 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmapi.h
+ *
+ * externally exported dlm interfaces
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef DLMAPI_H
+#define DLMAPI_H
+
+struct dlm_lock;
+struct dlm_ctxt;
+
+/* NOTE: changes made to this enum should be reflected in dlmdebug.c */
+enum dlm_status {
+ DLM_NORMAL = 0, /* 0: request in progress */
+ DLM_GRANTED, /* 1: request granted */
+ DLM_DENIED, /* 2: request denied */
+ DLM_DENIED_NOLOCKS, /* 3: request denied, out of system resources */
+ DLM_WORKING, /* 4: async request in progress */
+ DLM_BLOCKED, /* 5: lock request blocked */
+ DLM_BLOCKED_ORPHAN, /* 6: lock request blocked by a orphan lock*/
+ DLM_DENIED_GRACE_PERIOD, /* 7: topological change in progress */
+ DLM_SYSERR, /* 8: system error */
+ DLM_NOSUPPORT, /* 9: unsupported */
+ DLM_CANCELGRANT, /* 10: can't cancel convert: already granted */
+ DLM_IVLOCKID, /* 11: bad lockid */
+ DLM_SYNC, /* 12: synchronous request granted */
+ DLM_BADTYPE, /* 13: bad resource type */
+ DLM_BADRESOURCE, /* 14: bad resource handle */
+ DLM_MAXHANDLES, /* 15: no more resource handles */
+ DLM_NOCLINFO, /* 16: can't contact cluster manager */
+ DLM_NOLOCKMGR, /* 17: can't contact lock manager */
+ DLM_NOPURGED, /* 18: can't contact purge daemon */
+ DLM_BADARGS, /* 19: bad api args */
+ DLM_VOID, /* 20: no status */
+ DLM_NOTQUEUED, /* 21: NOQUEUE was specified and request failed */
+ DLM_IVBUFLEN, /* 22: invalid resource name length */
+ DLM_CVTUNGRANT, /* 23: attempted to convert ungranted lock */
+ DLM_BADPARAM, /* 24: invalid lock mode specified */
+ DLM_VALNOTVALID, /* 25: value block has been invalidated */
+ DLM_REJECTED, /* 26: request rejected, unrecognized client */
+ DLM_ABORT, /* 27: blocked lock request cancelled */
+ DLM_CANCEL, /* 28: conversion request cancelled */
+ DLM_IVRESHANDLE, /* 29: invalid resource handle */
+ DLM_DEADLOCK, /* 30: deadlock recovery refused this request */
+ DLM_DENIED_NOASTS, /* 31: failed to allocate AST */
+ DLM_FORWARD, /* 32: request must wait for primary's response */
+ DLM_TIMEOUT, /* 33: timeout value for lock has expired */
+ DLM_IVGROUPID, /* 34: invalid group specification */
+ DLM_VERS_CONFLICT, /* 35: version conflicts prevent request handling */
+ DLM_BAD_DEVICE_PATH, /* 36: Locks device does not exist or path wrong */
+ DLM_NO_DEVICE_PERMISSION, /* 37: Client has insufficient pers for device */
+ DLM_NO_CONTROL_DEVICE, /* 38: Cannot set options on opened device */
+
+ DLM_RECOVERING, /* 39: extension, allows caller to fail a lock
+ request if it is being recovered */
+ DLM_MIGRATING, /* 40: extension, allows caller to fail a lock
+ request if it is being migrated */
+ DLM_MAXSTATS, /* 41: upper limit for return code validation */
+};
+
+/* for pretty-printing dlm_status error messages */
+const char *dlm_errmsg(enum dlm_status err);
+/* for pretty-printing dlm_status error names */
+const char *dlm_errname(enum dlm_status err);
+
+/* Eventually the DLM will use standard errno values, but in the
+ * meantime this lets us track dlm errors as they bubble up. When we
+ * bring its error reporting into line with the rest of the stack,
+ * these can just be replaced with calls to mlog_errno. */
+#define dlm_error(st) do { \
+ if ((st) != DLM_RECOVERING && \
+ (st) != DLM_MIGRATING && \
+ (st) != DLM_FORWARD) \
+ mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
+} while (0)
+
+#define DLM_LKSB_UNUSED1 0x01
+#define DLM_LKSB_PUT_LVB 0x02
+#define DLM_LKSB_GET_LVB 0x04
+#define DLM_LKSB_UNUSED2 0x08
+#define DLM_LKSB_UNUSED3 0x10
+#define DLM_LKSB_UNUSED4 0x20
+#define DLM_LKSB_UNUSED5 0x40
+#define DLM_LKSB_UNUSED6 0x80
+
+#define DLM_LVB_LEN 64
+
+/* Callers are only allowed access to the lvb and status members of
+ * this struct. */
+struct dlm_lockstatus {
+ enum dlm_status status;
+ u32 flags;
+ struct dlm_lock *lockid;
+ char lvb[DLM_LVB_LEN];
+};
+
+/* Valid lock modes. */
+#define LKM_IVMODE (-1) /* invalid mode */
+#define LKM_NLMODE 0 /* null lock */
+#define LKM_CRMODE 1 /* concurrent read unsupported */
+#define LKM_CWMODE 2 /* concurrent write unsupported */
+#define LKM_PRMODE 3 /* protected read */
+#define LKM_PWMODE 4 /* protected write unsupported */
+#define LKM_EXMODE 5 /* exclusive */
+#define LKM_MAXMODE 5
+#define LKM_MODEMASK 0xff
+
+/* Flags passed to dlmlock and dlmunlock:
+ * reserved: flags used by the "real" dlm
+ * only a few are supported by this dlm
+ * (U) = unsupported by ocfs2 dlm */
+#define LKM_ORPHAN 0x00000010 /* this lock is orphanable (U) */
+#define LKM_PARENTABLE 0x00000020 /* this lock was orphaned (U) */
+#define LKM_BLOCK 0x00000040 /* blocking lock request (U) */
+#define LKM_LOCAL 0x00000080 /* local lock request */
+#define LKM_VALBLK 0x00000100 /* lock value block request */
+#define LKM_NOQUEUE 0x00000200 /* non blocking request */
+#define LKM_CONVERT 0x00000400 /* conversion request */
+#define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */
+#define LKM_UNLOCK 0x00001000 /* deallocate this lock */
+#define LKM_CANCEL 0x00002000 /* cancel conversion request */
+#define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */
+#define LKM_INVVALBLK 0x00008000 /* invalidate lock value block */
+#define LKM_SYNCSTS 0x00010000 /* return synchronous status if poss (U) */
+#define LKM_TIMEOUT 0x00020000 /* lock request contains timeout (U) */
+#define LKM_SNGLDLCK 0x00040000 /* request can self-deadlock (U) */
+#define LKM_FINDLOCAL 0x00080000 /* find local lock request (U) */
+#define LKM_PROC_OWNED 0x00100000 /* owned by process, not group (U) */
+#define LKM_XID 0x00200000 /* use transaction id for deadlock (U) */
+#define LKM_XID_CONFLICT 0x00400000 /* do not allow lock inheritance (U) */
+#define LKM_FORCE 0x00800000 /* force unlock flag */
+#define LKM_REVVALBLK 0x01000000 /* temporary solution: re-validate
+ lock value block (U) */
+/* unused */
+#define LKM_UNUSED1 0x00000001 /* unused */
+#define LKM_UNUSED2 0x00000002 /* unused */
+#define LKM_UNUSED3 0x00000004 /* unused */
+#define LKM_UNUSED4 0x00000008 /* unused */
+#define LKM_UNUSED5 0x02000000 /* unused */
+#define LKM_UNUSED6 0x04000000 /* unused */
+#define LKM_UNUSED7 0x08000000 /* unused */
+
+/* ocfs2 extensions: internal only
+ * should never be used by caller */
+#define LKM_MIGRATION 0x10000000 /* extension: lockres is to be migrated
+ to another node */
+#define LKM_PUT_LVB 0x20000000 /* extension: lvb is being passed
+ should be applied to lockres */
+#define LKM_GET_LVB 0x40000000 /* extension: lvb should be copied
+ from lockres when lock is granted */
+#define LKM_RECOVERY 0x80000000 /* extension: flag for recovery lock
+ used to avoid recovery rwsem */
+
+
+typedef void (dlm_astlockfunc_t)(void *);
+typedef void (dlm_bastlockfunc_t)(void *, int);
+typedef void (dlm_astunlockfunc_t)(void *, enum dlm_status);
+
+enum dlm_status dlmlock(struct dlm_ctxt *dlm,
+ int mode,
+ struct dlm_lockstatus *lksb,
+ int flags,
+ const char *name,
+ dlm_astlockfunc_t *ast,
+ void *data,
+ dlm_bastlockfunc_t *bast);
+
+enum dlm_status dlmunlock(struct dlm_ctxt *dlm,
+ struct dlm_lockstatus *lksb,
+ int flags,
+ dlm_astunlockfunc_t *unlockast,
+ void *data);
+
+struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key);
+
+void dlm_unregister_domain(struct dlm_ctxt *dlm);
+
+void dlm_print_one_lock(struct dlm_lock *lockid);
+
+typedef void (dlm_eviction_func)(int, void *);
+struct dlm_eviction_cb {
+ struct list_head ec_item;
+ dlm_eviction_func *ec_func;
+ void *ec_data;
+};
+void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb,
+ dlm_eviction_func *f,
+ void *data);
+void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
+ struct dlm_eviction_cb *cb);
+void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb);
+
+#endif /* DLMAPI_H */
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
new file mode 100644
index 00000000000..8d17d28ef91
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -0,0 +1,466 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmast.c
+ *
+ * AST and BAST functionality for local and remote nodes
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/spinlock.h>
+
+
+#include "cluster/heartbeat.h"
+#include "cluster/nodemanager.h"
+#include "cluster/tcp.h"
+#include "cluster/endian.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+
+#define MLOG_MASK_PREFIX ML_DLM
+#include "cluster/masklog.h"
+
+static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+
+/* Should be called as an ast gets queued to see if the new
+ * lock level will obsolete a pending bast.
+ * For example, if dlm_thread queued a bast for an EX lock that
+ * was blocking another EX, but before sending the bast the
+ * lock owner downconverted to NL, the bast is now obsolete.
+ * Only the ast should be sent.
+ * This is needed because the lock and convert paths can queue
+ * asts out-of-band (not waiting for dlm_thread) in order to
+ * allow for LKM_NOQUEUE to get immediate responses. */
+static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+{
+ assert_spin_locked(&dlm->ast_lock);
+ assert_spin_locked(&lock->spinlock);
+
+ if (lock->ml.highest_blocked == LKM_IVMODE)
+ return 0;
+ BUG_ON(lock->ml.highest_blocked == LKM_NLMODE);
+
+ if (lock->bast_pending &&
+ list_empty(&lock->bast_list))
+ /* old bast already sent, ok */
+ return 0;
+
+ if (lock->ml.type == LKM_EXMODE)
+ /* EX blocks anything left, any bast still valid */
+ return 0;
+ else if (lock->ml.type == LKM_NLMODE)
+ /* NL blocks nothing, no reason to send any bast, cancel it */
+ return 1;
+ else if (lock->ml.highest_blocked != LKM_EXMODE)
+ /* PR only blocks EX */
+ return 1;
+
+ return 0;
+}
+
+static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+{
+ mlog_entry_void();
+
+ BUG_ON(!dlm);
+ BUG_ON(!lock);
+
+ assert_spin_locked(&dlm->ast_lock);
+ if (!list_empty(&lock->ast_list)) {
+ mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n",
+ lock->ast_pending, lock->ml.type);
+ BUG();
+ }
+ BUG_ON(!list_empty(&lock->ast_list));
+ if (lock->ast_pending)
+ mlog(0, "lock has an ast getting flushed right now\n");
+
+ /* putting lock on list, add a ref */
+ dlm_lock_get(lock);
+ spin_lock(&lock->spinlock);
+
+ /* check to see if this ast obsoletes the bast */
+ if (dlm_should_cancel_bast(dlm, lock)) {
+ struct dlm_lock_resource *res = lock->lockres;
+ mlog(0, "%s: cancelling bast for %.*s\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ lock->bast_pending = 0;
+ list_del_init(&lock->bast_list);
+ lock->ml.highest_blocked = LKM_IVMODE;
+ /* removing lock from list, remove a ref. guaranteed
+ * this won't be the last ref because of the get above,
+ * so res->spinlock will not be taken here */
+ dlm_lock_put(lock);
+ /* free up the reserved bast that we are cancelling.
+ * guaranteed that this will not be the last reserved
+ * ast because *both* an ast and a bast were reserved
+ * to get to this point. the res->spinlock will not be
+ * taken here */
+ dlm_lockres_release_ast(dlm, res);
+ }
+ list_add_tail(&lock->ast_list, &dlm->pending_asts);
+ lock->ast_pending = 1;
+ spin_unlock(&lock->spinlock);
+}
+
+void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+{
+ mlog_entry_void();
+
+ BUG_ON(!dlm);
+ BUG_ON(!lock);
+
+ spin_lock(&dlm->ast_lock);
+ __dlm_queue_ast(dlm, lock);
+ spin_unlock(&dlm->ast_lock);
+}
+
+
+static void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+{
+ mlog_entry_void();
+
+ BUG_ON(!dlm);
+ BUG_ON(!lock);
+ assert_spin_locked(&dlm->ast_lock);
+
+ BUG_ON(!list_empty(&lock->bast_list));
+ if (lock->bast_pending)
+ mlog(0, "lock has a bast getting flushed right now\n");
+
+ /* putting lock on list, add a ref */
+ dlm_lock_get(lock);
+ spin_lock(&lock->spinlock);
+ list_add_tail(&lock->bast_list, &dlm->pending_basts);
+ lock->bast_pending = 1;
+ spin_unlock(&lock->spinlock);
+}
+
+void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+{
+ mlog_entry_void();
+
+ BUG_ON(!dlm);
+ BUG_ON(!lock);
+
+ spin_lock(&dlm->ast_lock);
+ __dlm_queue_bast(dlm, lock);
+ spin_unlock(&dlm->ast_lock);
+}
+
+static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ struct dlm_lockstatus *lksb = lock->lksb;
+ BUG_ON(!lksb);
+
+ /* only updates if this node masters the lockres */
+ if (res->owner == dlm->node_num) {
+
+ spin_lock(&res->spinlock);
+ /* check the lksb flags for the direction */
+ if (lksb->flags & DLM_LKSB_GET_LVB) {
+ mlog(0, "getting lvb from lockres for %s node\n",
+ lock->ml.node == dlm->node_num ? "master" :
+ "remote");
+ memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
+ } else if (lksb->flags & DLM_LKSB_PUT_LVB) {
+ mlog(0, "setting lvb from lockres for %s node\n",
+ lock->ml.node == dlm->node_num ? "master" :
+ "remote");
+ memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
+ }
+ spin_unlock(&res->spinlock);
+ }
+
+ /* reset any lvb flags on the lksb */
+ lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
+}
+
+void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ dlm_astlockfunc_t *fn;
+ struct dlm_lockstatus *lksb;
+
+ mlog_entry_void();
+
+ lksb = lock->lksb;
+ fn = lock->ast;
+ BUG_ON(lock->ml.node != dlm->node_num);
+
+ dlm_update_lvb(dlm, res, lock);
+ (*fn)(lock->astdata);
+}
+
+
+int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ int ret;
+ struct dlm_lockstatus *lksb;
+ int lksbflags;
+
+ mlog_entry_void();
+
+ lksb = lock->lksb;
+ BUG_ON(lock->ml.node == dlm->node_num);
+
+ lksbflags = lksb->flags;
+ dlm_update_lvb(dlm, res, lock);
+
+ /* lock request came from another node
+ * go do the ast over there */
+ ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags);
+ return ret;
+}
+
+void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int blocked_type)
+{
+ dlm_bastlockfunc_t *fn = lock->bast;
+
+ mlog_entry_void();
+ BUG_ON(lock->ml.node != dlm->node_num);
+
+ (*fn)(lock->astdata, blocked_type);
+}
+
+
+
+int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ int ret;
+ unsigned int locklen;
+ struct dlm_ctxt *dlm = data;
+ struct dlm_lock_resource *res = NULL;
+ struct dlm_lock *lock = NULL;
+ struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
+ char *name;
+ struct list_head *iter, *head=NULL;
+ u64 cookie;
+ u32 flags;
+
+ if (!dlm_grab(dlm)) {
+ dlm_error(DLM_REJECTED);
+ return DLM_REJECTED;
+ }
+
+ mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
+ "Domain %s not fully joined!\n", dlm->name);
+
+ name = past->name;
+ locklen = past->namelen;
+ cookie = be64_to_cpu(past->cookie);
+ flags = be32_to_cpu(past->flags);
+
+ if (locklen > DLM_LOCKID_NAME_MAX) {
+ ret = DLM_IVBUFLEN;
+ mlog(ML_ERROR, "Invalid name length in proxy ast handler!\n");
+ goto leave;
+ }
+
+ if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
+ (LKM_PUT_LVB|LKM_GET_LVB)) {
+ mlog(ML_ERROR, "both PUT and GET lvb specified\n");
+ ret = DLM_BADARGS;
+ goto leave;
+ }
+
+ mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
+ (flags & LKM_GET_LVB ? "get lvb" : "none"));
+
+ mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type);
+
+ if (past->type != DLM_AST &&
+ past->type != DLM_BAST) {
+ mlog(ML_ERROR, "Unknown ast type! %d, cookie=%"MLFu64", "
+ "name=%.*s\n", past->type, cookie, locklen, name);
+ ret = DLM_IVLOCKID;
+ goto leave;
+ }
+
+ res = dlm_lookup_lockres(dlm, name, locklen);
+ if (!res) {
+ mlog(ML_ERROR, "got %sast for unknown lockres! "
+ "cookie=%"MLFu64", name=%.*s, namelen=%u\n",
+ past->type == DLM_AST ? "" : "b",
+ cookie, locklen, name, locklen);
+ ret = DLM_IVLOCKID;
+ goto leave;
+ }
+
+ /* cannot get a proxy ast message if this node owns it */
+ BUG_ON(res->owner == dlm->node_num);
+
+ mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name);
+
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ mlog(0, "responding with DLM_RECOVERING!\n");
+ ret = DLM_RECOVERING;
+ goto unlock_out;
+ }
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+ mlog(0, "responding with DLM_MIGRATING!\n");
+ ret = DLM_MIGRATING;
+ goto unlock_out;
+ }
+ /* try convert queue for both ast/bast */
+ head = &res->converting;
+ lock = NULL;
+ list_for_each(iter, head) {
+ lock = list_entry (iter, struct dlm_lock, list);
+ if (be64_to_cpu(lock->ml.cookie) == cookie)
+ goto do_ast;
+ }
+
+ /* if not on convert, try blocked for ast, granted for bast */
+ if (past->type == DLM_AST)
+ head = &res->blocked;
+ else
+ head = &res->granted;
+
+ list_for_each(iter, head) {
+ lock = list_entry (iter, struct dlm_lock, list);
+ if (be64_to_cpu(lock->ml.cookie) == cookie)
+ goto do_ast;
+ }
+
+ mlog(ML_ERROR, "got %sast for unknown lock! cookie=%"MLFu64", "
+ "name=%.*s, namelen=%u\n",
+ past->type == DLM_AST ? "" : "b", cookie, locklen, name, locklen);
+
+ ret = DLM_NORMAL;
+unlock_out:
+ spin_unlock(&res->spinlock);
+ goto leave;
+
+do_ast:
+ ret = DLM_NORMAL;
+ if (past->type == DLM_AST) {
+ /* do not alter lock refcount. switching lists. */
+ list_del_init(&lock->list);
+ list_add_tail(&lock->list, &res->granted);
+ mlog(0, "ast: adding to granted list... type=%d, "
+ "convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
+ if (lock->ml.convert_type != LKM_IVMODE) {
+ lock->ml.type = lock->ml.convert_type;
+ lock->ml.convert_type = LKM_IVMODE;
+ } else {
+ // should already be there....
+ }
+
+ lock->lksb->status = DLM_NORMAL;
+
+ /* if we requested the lvb, fetch it into our lksb now */
+ if (flags & LKM_GET_LVB) {
+ BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB));
+ memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN);
+ }
+ }
+ spin_unlock(&res->spinlock);
+
+ if (past->type == DLM_AST)
+ dlm_do_local_ast(dlm, res, lock);
+ else
+ dlm_do_local_bast(dlm, res, lock, past->blocked_type);
+
+leave:
+
+ if (res)
+ dlm_lockres_put(res);
+
+ dlm_put(dlm);
+ return ret;
+}
+
+
+
+int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int msg_type,
+ int blocked_type, int flags)
+{
+ int ret = 0;
+ struct dlm_proxy_ast past;
+ struct kvec vec[2];
+ size_t veclen = 1;
+ int status;
+
+ mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n",
+ res->lockname.len, res->lockname.name, lock->ml.node,
+ msg_type, blocked_type);
+
+ memset(&past, 0, sizeof(struct dlm_proxy_ast));
+ past.node_idx = dlm->node_num;
+ past.type = msg_type;
+ past.blocked_type = blocked_type;
+ past.namelen = res->lockname.len;
+ memcpy(past.name, res->lockname.name, past.namelen);
+ past.cookie = lock->ml.cookie;
+
+ vec[0].iov_len = sizeof(struct dlm_proxy_ast);
+ vec[0].iov_base = &past;
+ if (flags & DLM_LKSB_GET_LVB) {
+ mlog(0, "returning requested LVB data\n");
+ be32_add_cpu(&past.flags, LKM_GET_LVB);
+ vec[1].iov_len = DLM_LVB_LEN;
+ vec[1].iov_base = lock->lksb->lvb;
+ veclen++;
+ }
+
+ ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
+ lock->ml.node, &status);
+ if (ret < 0)
+ mlog_errno(ret);
+ else {
+ if (status == DLM_RECOVERING) {
+ mlog(ML_ERROR, "sent AST to node %u, it thinks this "
+ "node is dead!\n", lock->ml.node);
+ BUG();
+ } else if (status == DLM_MIGRATING) {
+ mlog(ML_ERROR, "sent AST to node %u, it returned "
+ "DLM_MIGRATING!\n", lock->ml.node);
+ BUG();
+ } else if (status != DLM_NORMAL) {
+ mlog(ML_ERROR, "AST to node %u returned %d!\n",
+ lock->ml.node, status);
+ /* ignore it */
+ }
+ ret = 0;
+ }
+ return ret;
+}
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
new file mode 100644
index 00000000000..3fecba0a602
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -0,0 +1,884 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmcommon.h
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef DLMCOMMON_H
+#define DLMCOMMON_H
+
+#include <linux/kref.h>
+
+#define DLM_HB_NODE_DOWN_PRI (0xf000000)
+#define DLM_HB_NODE_UP_PRI (0x8000000)
+
+#define DLM_LOCKID_NAME_MAX 32
+
+#define DLM_DOMAIN_NAME_MAX_LEN 255
+#define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
+#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
+#define DLM_THREAD_MS 200 // flush at least every 200 ms
+
+#define DLM_HASH_BITS 7
+#define DLM_HASH_SIZE (1 << DLM_HASH_BITS)
+#define DLM_HASH_MASK (DLM_HASH_SIZE - 1)
+
+enum dlm_ast_type {
+ DLM_AST = 0,
+ DLM_BAST,
+ DLM_ASTUNLOCK
+};
+
+
+#define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
+ LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \
+ LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)
+
+#define DLM_RECOVERY_LOCK_NAME "$RECOVERY"
+#define DLM_RECOVERY_LOCK_NAME_LEN 9
+
+static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
+{
+ if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&
+ memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)
+ return 1;
+ return 0;
+}
+
+#define DLM_RECO_STATE_ACTIVE 0x0001
+
+struct dlm_recovery_ctxt
+{
+ struct list_head resources;
+ struct list_head received;
+ struct list_head node_data;
+ u8 new_master;
+ u8 dead_node;
+ u16 state;
+ unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ wait_queue_head_t event;
+};
+
+enum dlm_ctxt_state {
+ DLM_CTXT_NEW = 0,
+ DLM_CTXT_JOINED,
+ DLM_CTXT_IN_SHUTDOWN,
+ DLM_CTXT_LEAVING,
+};
+
+struct dlm_ctxt
+{
+ struct list_head list;
+ struct list_head *resources;
+ struct list_head dirty_list;
+ struct list_head purge_list;
+ struct list_head pending_asts;
+ struct list_head pending_basts;
+ unsigned int purge_count;
+ spinlock_t spinlock;
+ spinlock_t ast_lock;
+ char *name;
+ u8 node_num;
+ u32 key;
+ u8 joining_node;
+ wait_queue_head_t dlm_join_events;
+ unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ struct dlm_recovery_ctxt reco;
+ spinlock_t master_lock;
+ struct list_head master_list;
+ struct list_head mle_hb_events;
+
+ /* these give a really vague idea of the system load */
+ atomic_t local_resources;
+ atomic_t remote_resources;
+ atomic_t unknown_resources;
+
+ /* NOTE: Next three are protected by dlm_domain_lock */
+ struct kref dlm_refs;
+ enum dlm_ctxt_state dlm_state;
+ unsigned int num_joins;
+
+ struct o2hb_callback_func dlm_hb_up;
+ struct o2hb_callback_func dlm_hb_down;
+ struct task_struct *dlm_thread_task;
+ struct task_struct *dlm_reco_thread_task;
+ wait_queue_head_t dlm_thread_wq;
+ wait_queue_head_t dlm_reco_thread_wq;
+ wait_queue_head_t ast_wq;
+ wait_queue_head_t migration_wq;
+
+ struct work_struct dispatched_work;
+ struct list_head work_list;
+ spinlock_t work_lock;
+ struct list_head dlm_domain_handlers;
+ struct list_head dlm_eviction_callbacks;
+};
+
+/* these keventd work queue items are for less-frequently
+ * called functions that cannot be directly called from the
+ * net message handlers for some reason, usually because
+ * they need to send net messages of their own. */
+void dlm_dispatch_work(void *data);
+
+struct dlm_lock_resource;
+struct dlm_work_item;
+
+typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);
+
+struct dlm_request_all_locks_priv
+{
+ u8 reco_master;
+ u8 dead_node;
+};
+
+struct dlm_mig_lockres_priv
+{
+ struct dlm_lock_resource *lockres;
+ u8 real_master;
+};
+
+struct dlm_assert_master_priv
+{
+ struct dlm_lock_resource *lockres;
+ u8 request_from;
+ u32 flags;
+ unsigned ignore_higher:1;
+};
+
+
+struct dlm_work_item
+{
+ struct list_head list;
+ dlm_workfunc_t *func;
+ struct dlm_ctxt *dlm;
+ void *data;
+ union {
+ struct dlm_request_all_locks_priv ral;
+ struct dlm_mig_lockres_priv ml;
+ struct dlm_assert_master_priv am;
+ } u;
+};
+
+static inline void dlm_init_work_item(struct dlm_ctxt *dlm,
+ struct dlm_work_item *i,
+ dlm_workfunc_t *f, void *data)
+{
+ memset(i, 0, sizeof(*i));
+ i->func = f;
+ INIT_LIST_HEAD(&i->list);
+ i->data = data;
+ i->dlm = dlm; /* must have already done a dlm_grab on this! */
+}
+
+
+
+static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
+ u8 node)
+{
+ assert_spin_locked(&dlm->spinlock);
+
+ dlm->joining_node = node;
+ wake_up(&dlm->dlm_join_events);
+}
+
+#define DLM_LOCK_RES_UNINITED 0x00000001
+#define DLM_LOCK_RES_RECOVERING 0x00000002
+#define DLM_LOCK_RES_READY 0x00000004
+#define DLM_LOCK_RES_DIRTY 0x00000008
+#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
+#define DLM_LOCK_RES_MIGRATING 0x00000020
+
+#define DLM_PURGE_INTERVAL_MS (8 * 1000)
+
+struct dlm_lock_resource
+{
+ /* WARNING: Please see the comment in dlm_init_lockres before
+ * adding fields here. */
+ struct list_head list;
+ struct kref refs;
+
+ /* please keep these next 3 in this order
+ * some funcs want to iterate over all lists */
+ struct list_head granted;
+ struct list_head converting;
+ struct list_head blocked;
+
+ struct list_head dirty;
+ struct list_head recovering; // dlm_recovery_ctxt.resources list
+
+ /* unused lock resources have their last_used stamped and are
+ * put on a list for the dlm thread to run. */
+ struct list_head purge;
+ unsigned long last_used;
+
+ unsigned migration_pending:1;
+ atomic_t asts_reserved;
+ spinlock_t spinlock;
+ wait_queue_head_t wq;
+ u8 owner; //node which owns the lock resource, or unknown
+ u16 state;
+ struct qstr lockname;
+ char lvb[DLM_LVB_LEN];
+};
+
+struct dlm_migratable_lock
+{
+ __be64 cookie;
+
+ /* these 3 are just padding for the in-memory structure, but
+ * list and flags are actually used when sent over the wire */
+ __be16 pad1;
+ u8 list; // 0=granted, 1=converting, 2=blocked
+ u8 flags;
+
+ s8 type;
+ s8 convert_type;
+ s8 highest_blocked;
+ u8 node;
+}; // 16 bytes
+
+struct dlm_lock
+{
+ struct dlm_migratable_lock ml;
+
+ struct list_head list;
+ struct list_head ast_list;
+ struct list_head bast_list;
+ struct dlm_lock_resource *lockres;
+ spinlock_t spinlock;
+ struct kref lock_refs;
+
+ // ast and bast must be callable while holding a spinlock!
+ dlm_astlockfunc_t *ast;
+ dlm_bastlockfunc_t *bast;
+ void *astdata;
+ struct dlm_lockstatus *lksb;
+ unsigned ast_pending:1,
+ bast_pending:1,
+ convert_pending:1,
+ lock_pending:1,
+ cancel_pending:1,
+ unlock_pending:1,
+ lksb_kernel_allocated:1;
+};
+
+
+#define DLM_LKSB_UNUSED1 0x01
+#define DLM_LKSB_PUT_LVB 0x02
+#define DLM_LKSB_GET_LVB 0x04
+#define DLM_LKSB_UNUSED2 0x08
+#define DLM_LKSB_UNUSED3 0x10
+#define DLM_LKSB_UNUSED4 0x20
+#define DLM_LKSB_UNUSED5 0x40
+#define DLM_LKSB_UNUSED6 0x80
+
+
+enum dlm_lockres_list {
+ DLM_GRANTED_LIST = 0,
+ DLM_CONVERTING_LIST,
+ DLM_BLOCKED_LIST
+};
+
+static inline struct list_head *
+dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
+{
+ struct list_head *ret = NULL;
+ if (idx == DLM_GRANTED_LIST)
+ ret = &res->granted;
+ else if (idx == DLM_CONVERTING_LIST)
+ ret = &res->converting;
+ else if (idx == DLM_BLOCKED_LIST)
+ ret = &res->blocked;
+ else
+ BUG();
+ return ret;
+}
+
+
+
+
+struct dlm_node_iter
+{
+ unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ int curnode;
+};
+
+
+enum {
+ DLM_MASTER_REQUEST_MSG = 500,
+ DLM_UNUSED_MSG1, /* 501 */
+ DLM_ASSERT_MASTER_MSG, /* 502 */
+ DLM_CREATE_LOCK_MSG, /* 503 */
+ DLM_CONVERT_LOCK_MSG, /* 504 */
+ DLM_PROXY_AST_MSG, /* 505 */
+ DLM_UNLOCK_LOCK_MSG, /* 506 */
+ DLM_UNUSED_MSG2, /* 507 */
+ DLM_MIGRATE_REQUEST_MSG, /* 508 */
+ DLM_MIG_LOCKRES_MSG, /* 509 */
+ DLM_QUERY_JOIN_MSG, /* 510 */
+ DLM_ASSERT_JOINED_MSG, /* 511 */
+ DLM_CANCEL_JOIN_MSG, /* 512 */
+ DLM_EXIT_DOMAIN_MSG, /* 513 */
+ DLM_MASTER_REQUERY_MSG, /* 514 */
+ DLM_LOCK_REQUEST_MSG, /* 515 */
+ DLM_RECO_DATA_DONE_MSG, /* 516 */
+ DLM_BEGIN_RECO_MSG, /* 517 */
+ DLM_FINALIZE_RECO_MSG /* 518 */
+};
+
+struct dlm_reco_node_data
+{
+ int state;
+ u8 node_num;
+ struct list_head list;
+};
+
+enum {
+ DLM_RECO_NODE_DATA_DEAD = -1,
+ DLM_RECO_NODE_DATA_INIT = 0,
+ DLM_RECO_NODE_DATA_REQUESTING,
+ DLM_RECO_NODE_DATA_REQUESTED,
+ DLM_RECO_NODE_DATA_RECEIVING,
+ DLM_RECO_NODE_DATA_DONE,
+ DLM_RECO_NODE_DATA_FINALIZE_SENT,
+};
+
+
+enum {
+ DLM_MASTER_RESP_NO = 0,
+ DLM_MASTER_RESP_YES,
+ DLM_MASTER_RESP_MAYBE,
+ DLM_MASTER_RESP_ERROR
+};
+
+
+struct dlm_master_request
+{
+ u8 node_idx;
+ u8 namelen;
+ __be16 pad1;
+ __be32 flags;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+#define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
+#define DLM_ASSERT_MASTER_REQUERY 0x00000002
+#define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
+struct dlm_assert_master
+{
+ u8 node_idx;
+ u8 namelen;
+ __be16 pad1;
+ __be32 flags;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+struct dlm_migrate_request
+{
+ u8 master;
+ u8 new_master;
+ u8 namelen;
+ u8 pad1;
+ __be32 pad2;
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+struct dlm_master_requery
+{
+ u8 pad1;
+ u8 pad2;
+ u8 node_idx;
+ u8 namelen;
+ __be32 pad3;
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+#define DLM_MRES_RECOVERY 0x01
+#define DLM_MRES_MIGRATION 0x02
+#define DLM_MRES_ALL_DONE 0x04
+
+/*
+ * We would like to get one whole lockres into a single network
+ * message whenever possible. Generally speaking, there will be
+ * at most one dlm_lock on a lockres for each node in the cluster,
+ * plus (infrequently) any additional locks coming in from userdlm.
+ *
+ * struct _dlm_lockres_page
+ * {
+ * dlm_migratable_lockres mres;
+ * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS];
+ * u8 pad[DLM_MIG_LOCKRES_RESERVED];
+ * };
+ *
+ * from ../cluster/tcp.h
+ * NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg))
+ * (roughly 4080 bytes)
+ * and sizeof(dlm_migratable_lockres) = 112 bytes
+ * and sizeof(dlm_migratable_lock) = 16 bytes
+ *
+ * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and
+ * DLM_MIG_LOCKRES_RESERVED=128 means we have this:
+ *
+ * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) +
+ * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED =
+ * NET_MAX_PAYLOAD_BYTES
+ * (240 * 16) + 112 + 128 = 4080
+ *
+ * So a lockres would need more than 240 locks before it would
+ * use more than one network packet to recover. Not too bad.
+ */
+#define DLM_MAX_MIGRATABLE_LOCKS 240
+
+struct dlm_migratable_lockres
+{
+ u8 master;
+ u8 lockname_len;
+ u8 num_locks; // locks sent in this structure
+ u8 flags;
+ __be32 total_locks; // locks to be sent for this migration cookie
+ __be64 mig_cookie; // cookie for this lockres migration
+ // or zero if not needed
+ // 16 bytes
+ u8 lockname[DLM_LOCKID_NAME_MAX];
+ // 48 bytes
+ u8 lvb[DLM_LVB_LEN];
+ // 112 bytes
+ struct dlm_migratable_lock ml[0]; // 16 bytes each, begins at byte 112
+};
+#define DLM_MIG_LOCKRES_MAX_LEN \
+ (sizeof(struct dlm_migratable_lockres) + \
+ (sizeof(struct dlm_migratable_lock) * \
+ DLM_MAX_MIGRATABLE_LOCKS) )
+
+/* from above, 128 bytes
+ * for some undetermined future use */
+#define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \
+ DLM_MIG_LOCKRES_MAX_LEN)
+
+struct dlm_create_lock
+{
+ __be64 cookie;
+
+ __be32 flags;
+ u8 pad1;
+ u8 node_idx;
+ s8 requested_type;
+ u8 namelen;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+};
+
+struct dlm_convert_lock
+{
+ __be64 cookie;
+
+ __be32 flags;
+ u8 pad1;
+ u8 node_idx;
+ s8 requested_type;
+ u8 namelen;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+
+ s8 lvb[0];
+};
+#define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
+
+struct dlm_unlock_lock
+{
+ __be64 cookie;
+
+ __be32 flags;
+ __be16 pad1;
+ u8 node_idx;
+ u8 namelen;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+
+ s8 lvb[0];
+};
+#define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
+
+struct dlm_proxy_ast
+{
+ __be64 cookie;
+
+ __be32 flags;
+ u8 node_idx;
+ u8 type;
+ u8 blocked_type;
+ u8 namelen;
+
+ u8 name[O2NM_MAX_NAME_LEN];
+
+ s8 lvb[0];
+};
+#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
+
+#define DLM_MOD_KEY (0x666c6172)
+enum dlm_query_join_response {
+ JOIN_DISALLOW = 0,
+ JOIN_OK,
+ JOIN_OK_NO_MAP,
+};
+
+struct dlm_lock_request
+{
+ u8 node_idx;
+ u8 dead_node;
+ __be16 pad1;
+ __be32 pad2;
+};
+
+struct dlm_reco_data_done
+{
+ u8 node_idx;
+ u8 dead_node;
+ __be16 pad1;
+ __be32 pad2;
+
+ /* unused for now */
+ /* eventually we can use this to attempt
+ * lvb recovery based on each node's info */
+ u8 reco_lvb[DLM_LVB_LEN];
+};
+
+struct dlm_begin_reco
+{
+ u8 node_idx;
+ u8 dead_node;
+ __be16 pad1;
+ __be32 pad2;
+};
+
+
+struct dlm_query_join_request
+{
+ u8 node_idx;
+ u8 pad1[2];
+ u8 name_len;
+ u8 domain[O2NM_MAX_NAME_LEN];
+};
+
+struct dlm_assert_joined
+{
+ u8 node_idx;
+ u8 pad1[2];
+ u8 name_len;
+ u8 domain[O2NM_MAX_NAME_LEN];
+};
+
+struct dlm_cancel_join
+{
+ u8 node_idx;
+ u8 pad1[2];
+ u8 name_len;
+ u8 domain[O2NM_MAX_NAME_LEN];
+};
+
+struct dlm_exit_domain
+{
+ u8 node_idx;
+ u8 pad1[3];
+};
+
+struct dlm_finalize_reco
+{
+ u8 node_idx;
+ u8 dead_node;
+ __be16 pad1;
+ __be32 pad2;
+};
+
+static inline enum dlm_status
+__dlm_lockres_state_to_status(struct dlm_lock_resource *res)
+{
+ enum dlm_status status = DLM_NORMAL;
+
+ assert_spin_locked(&res->spinlock);
+
+ if (res->state & DLM_LOCK_RES_RECOVERING)
+ status = DLM_RECOVERING;
+ else if (res->state & DLM_LOCK_RES_MIGRATING)
+ status = DLM_MIGRATING;
+ else if (res->state & DLM_LOCK_RES_IN_PROGRESS)
+ status = DLM_FORWARD;
+
+ return status;
+}
+
+struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
+ struct dlm_lockstatus *lksb);
+void dlm_lock_get(struct dlm_lock *lock);
+void dlm_lock_put(struct dlm_lock *lock);
+
+void dlm_lock_attach_lockres(struct dlm_lock *lock,
+ struct dlm_lock_resource *res);
+
+int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data);
+int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data);
+int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data);
+
+void dlm_revert_pending_convert(struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+void dlm_revert_pending_lock(struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+
+int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data);
+void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+
+int dlm_launch_thread(struct dlm_ctxt *dlm);
+void dlm_complete_thread(struct dlm_ctxt *dlm);
+int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
+void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
+void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
+
+void dlm_put(struct dlm_ctxt *dlm);
+struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
+int dlm_domain_fully_joined(struct dlm_ctxt *dlm);
+
+void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+void dlm_purge_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *lockres);
+void dlm_lockres_get(struct dlm_lock_resource *res);
+void dlm_lockres_put(struct dlm_lock_resource *res);
+void __dlm_unhash_lockres(struct dlm_lock_resource *res);
+void __dlm_insert_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int len);
+struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int len);
+
+int dlm_is_host_down(int errno);
+void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 owner);
+struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
+ const char *lockid,
+ int flags);
+struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int namelen);
+
+void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+void dlm_do_local_ast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+int dlm_do_remote_ast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock);
+void dlm_do_local_bast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ int blocked_type);
+int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ int msg_type,
+ int blocked_type, int flags);
+static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ int blocked_type)
+{
+ return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST,
+ blocked_type, 0);
+}
+
+static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ int flags)
+{
+ return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST,
+ 0, flags);
+}
+
+void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
+void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
+
+u8 dlm_nm_this_node(struct dlm_ctxt *dlm);
+void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+
+
+int dlm_nm_init(struct dlm_ctxt *dlm);
+int dlm_heartbeat_init(struct dlm_ctxt *dlm);
+void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
+void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
+
+int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
+int dlm_migrate_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 target);
+int dlm_finish_migration(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 old_master);
+void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
+
+int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data);
+int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data);
+int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data);
+int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data);
+int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data);
+int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data);
+int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data);
+int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data);
+int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data);
+
+int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ int ignore_higher,
+ u8 request_from,
+ u32 flags);
+
+
+int dlm_send_one_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_migratable_lockres *mres,
+ u8 send_to,
+ u8 flags);
+void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+
+/* will exit holding res->spinlock, but may drop in function */
+void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
+void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags);
+
+/* will exit holding res->spinlock, but may drop in function */
+static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
+{
+ __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS|
+ DLM_LOCK_RES_RECOVERING|
+ DLM_LOCK_RES_MIGRATING));
+}
+
+
+int dlm_init_mle_cache(void);
+void dlm_destroy_mle_cache(void);
+void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
+void dlm_clean_master_list(struct dlm_ctxt *dlm,
+ u8 dead_node);
+int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
+
+
+static inline const char * dlm_lock_mode_name(int mode)
+{
+ switch (mode) {
+ case LKM_EXMODE:
+ return "EX";
+ case LKM_PRMODE:
+ return "PR";
+ case LKM_NLMODE:
+ return "NL";
+ }
+ return "UNKNOWN";
+}
+
+
+static inline int dlm_lock_compatible(int existing, int request)
+{
+ /* NO_LOCK compatible with all */
+ if (request == LKM_NLMODE ||
+ existing == LKM_NLMODE)
+ return 1;
+
+ /* EX incompatible with all non-NO_LOCK */
+ if (request == LKM_EXMODE)
+ return 0;
+
+ /* request must be PR, which is compatible with PR */
+ if (existing == LKM_PRMODE)
+ return 1;
+
+ return 0;
+}
+
+static inline int dlm_lock_on_list(struct list_head *head,
+ struct dlm_lock *lock)
+{
+ struct list_head *iter;
+ struct dlm_lock *tmplock;
+
+ list_for_each(iter, head) {
+ tmplock = list_entry(iter, struct dlm_lock, list);
+ if (tmplock == lock)
+ return 1;
+ }
+ return 0;
+}
+
+
+static inline enum dlm_status dlm_err_to_dlm_status(int err)
+{
+ enum dlm_status ret;
+ if (err == -ENOMEM)
+ ret = DLM_SYSERR;
+ else if (err == -ETIMEDOUT || o2net_link_down(err, NULL))
+ ret = DLM_NOLOCKMGR;
+ else if (err == -EINVAL)
+ ret = DLM_BADPARAM;
+ else if (err == -ENAMETOOLONG)
+ ret = DLM_IVBUFLEN;
+ else
+ ret = DLM_BADARGS;
+ return ret;
+}
+
+
+static inline void dlm_node_iter_init(unsigned long *map,
+ struct dlm_node_iter *iter)
+{
+ memcpy(iter->node_map, map, sizeof(iter->node_map));
+ iter->curnode = -1;
+}
+
+static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
+{
+ int bit;
+ bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
+ if (bit >= O2NM_MAX_NODES) {
+ iter->curnode = O2NM_MAX_NODES;
+ return -ENOENT;
+ }
+ iter->curnode = bit;
+ return bit;
+}
+
+
+
+#endif /* DLMCOMMON_H */
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
new file mode 100644
index 00000000000..6001b22a997
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -0,0 +1,530 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmconvert.c
+ *
+ * underlying calls for lock conversion
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/spinlock.h>
+
+
+#include "cluster/heartbeat.h"
+#include "cluster/nodemanager.h"
+#include "cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+
+#include "dlmconvert.h"
+
+#define MLOG_MASK_PREFIX ML_DLM
+#include "cluster/masklog.h"
+
+/* NOTE: __dlmconvert_master is the only function in here that
+ * needs a spinlock held on entry (res->spinlock) and it is the
+ * only one that holds a lock on exit (res->spinlock).
+ * All other functions in here need no locks and drop all of
+ * the locks that they acquire. */
+static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags,
+ int type, int *call_ast,
+ int *kick_thread);
+static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type);
+
+/*
+ * this is only called directly by dlmlock(), and only when the
+ * local node is the owner of the lockres
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock
+ * held on exit: none
+ * returns: see __dlmconvert_master
+ */
+enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type)
+{
+ int call_ast = 0, kick_thread = 0;
+ enum dlm_status status;
+
+ spin_lock(&res->spinlock);
+ /* we are not in a network handler, this is fine */
+ __dlm_wait_on_lockres(res);
+ __dlm_lockres_reserve_ast(res);
+ res->state |= DLM_LOCK_RES_IN_PROGRESS;
+
+ status = __dlmconvert_master(dlm, res, lock, flags, type,
+ &call_ast, &kick_thread);
+
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ if (status != DLM_NORMAL && status != DLM_NOTQUEUED)
+ dlm_error(status);
+
+ /* either queue the ast or release it */
+ if (call_ast)
+ dlm_queue_ast(dlm, lock);
+ else
+ dlm_lockres_release_ast(dlm, res);
+
+ if (kick_thread)
+ dlm_kick_thread(dlm, res);
+
+ return status;
+}
+
+/* performs lock conversion at the lockres master site
+ * locking:
+ * caller needs: res->spinlock
+ * taken: takes and drops lock->spinlock
+ * held on exit: res->spinlock
+ * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED
+ * call_ast: whether ast should be called for this lock
+ * kick_thread: whether dlm_kick_thread should be called
+ */
+static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags,
+ int type, int *call_ast,
+ int *kick_thread)
+{
+ enum dlm_status status = DLM_NORMAL;
+ struct list_head *iter;
+ struct dlm_lock *tmplock=NULL;
+
+ assert_spin_locked(&res->spinlock);
+
+ mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n",
+ lock->ml.type, lock->ml.convert_type, type);
+
+ spin_lock(&lock->spinlock);
+
+ /* already converting? */
+ if (lock->ml.convert_type != LKM_IVMODE) {
+ mlog(ML_ERROR, "attempted to convert a lock with a lock "
+ "conversion pending\n");
+ status = DLM_DENIED;
+ goto unlock_exit;
+ }
+
+ /* must be on grant queue to convert */
+ if (!dlm_lock_on_list(&res->granted, lock)) {
+ mlog(ML_ERROR, "attempted to convert a lock not on grant "
+ "queue\n");
+ status = DLM_DENIED;
+ goto unlock_exit;
+ }
+
+ if (flags & LKM_VALBLK) {
+ switch (lock->ml.type) {
+ case LKM_EXMODE:
+ /* EX + LKM_VALBLK + convert == set lvb */
+ mlog(0, "will set lvb: converting %s->%s\n",
+ dlm_lock_mode_name(lock->ml.type),
+ dlm_lock_mode_name(type));
+ lock->lksb->flags |= DLM_LKSB_PUT_LVB;
+ break;
+ case LKM_PRMODE:
+ case LKM_NLMODE:
+ /* refetch if new level is not NL */
+ if (type > LKM_NLMODE) {
+ mlog(0, "will fetch new value into "
+ "lvb: converting %s->%s\n",
+ dlm_lock_mode_name(lock->ml.type),
+ dlm_lock_mode_name(type));
+ lock->lksb->flags |= DLM_LKSB_GET_LVB;
+ } else {
+ mlog(0, "will NOT fetch new value "
+ "into lvb: converting %s->%s\n",
+ dlm_lock_mode_name(lock->ml.type),
+ dlm_lock_mode_name(type));
+ flags &= ~(LKM_VALBLK);
+ }
+ break;
+ }
+ }
+
+
+ /* in-place downconvert? */
+ if (type <= lock->ml.type)
+ goto grant;
+
+ /* upconvert from here on */
+ status = DLM_NORMAL;
+ list_for_each(iter, &res->granted) {
+ tmplock = list_entry(iter, struct dlm_lock, list);
+ if (tmplock == lock)
+ continue;
+ if (!dlm_lock_compatible(tmplock->ml.type, type))
+ goto switch_queues;
+ }
+
+ list_for_each(iter, &res->converting) {
+ tmplock = list_entry(iter, struct dlm_lock, list);
+ if (!dlm_lock_compatible(tmplock->ml.type, type))
+ goto switch_queues;
+ /* existing conversion requests take precedence */
+ if (!dlm_lock_compatible(tmplock->ml.convert_type, type))
+ goto switch_queues;
+ }
+
+ /* fall thru to grant */
+
+grant:
+ mlog(0, "res %.*s, granting %s lock\n", res->lockname.len,
+ res->lockname.name, dlm_lock_mode_name(type));
+ /* immediately grant the new lock type */
+ lock->lksb->status = DLM_NORMAL;
+ if (lock->ml.node == dlm->node_num)
+ mlog(0, "doing in-place convert for nonlocal lock\n");
+ lock->ml.type = type;
+ status = DLM_NORMAL;
+ *call_ast = 1;
+ goto unlock_exit;
+
+switch_queues:
+ if (flags & LKM_NOQUEUE) {
+ mlog(0, "failed to convert NOQUEUE lock %.*s from "
+ "%d to %d...\n", res->lockname.len, res->lockname.name,
+ lock->ml.type, type);
+ status = DLM_NOTQUEUED;
+ goto unlock_exit;
+ }
+ mlog(0, "res %.*s, queueing...\n", res->lockname.len,
+ res->lockname.name);
+
+ lock->ml.convert_type = type;
+ /* do not alter lock refcount. switching lists. */
+ list_del_init(&lock->list);
+ list_add_tail(&lock->list, &res->converting);
+
+unlock_exit:
+ spin_unlock(&lock->spinlock);
+ if (status == DLM_DENIED) {
+ __dlm_print_one_lock_resource(res);
+ }
+ if (status == DLM_NORMAL)
+ *kick_thread = 1;
+ return status;
+}
+
+void dlm_revert_pending_convert(struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ /* do not alter lock refcount. switching lists. */
+ list_del_init(&lock->list);
+ list_add_tail(&lock->list, &res->granted);
+ lock->ml.convert_type = LKM_IVMODE;
+ lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
+}
+
+/* messages the master site to do lock conversion
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node
+ */
+enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type)
+{
+ enum dlm_status status;
+
+ mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+ lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ mlog(0, "bailing out early since res is RECOVERING "
+ "on secondary queue\n");
+ /* __dlm_print_one_lock_resource(res); */
+ status = DLM_RECOVERING;
+ goto bail;
+ }
+ /* will exit this call with spinlock held */
+ __dlm_wait_on_lockres(res);
+
+ if (lock->ml.convert_type != LKM_IVMODE) {
+ __dlm_print_one_lock_resource(res);
+ mlog(ML_ERROR, "converting a remote lock that is already "
+ "converting! (cookie=%"MLFu64", conv=%d)\n",
+ lock->ml.cookie, lock->ml.convert_type);
+ status = DLM_DENIED;
+ goto bail;
+ }
+ res->state |= DLM_LOCK_RES_IN_PROGRESS;
+ /* move lock to local convert queue */
+ /* do not alter lock refcount. switching lists. */
+ list_del_init(&lock->list);
+ list_add_tail(&lock->list, &res->converting);
+ lock->convert_pending = 1;
+ lock->ml.convert_type = type;
+
+ if (flags & LKM_VALBLK) {
+ if (lock->ml.type == LKM_EXMODE) {
+ flags |= LKM_PUT_LVB;
+ lock->lksb->flags |= DLM_LKSB_PUT_LVB;
+ } else {
+ if (lock->ml.convert_type == LKM_NLMODE)
+ flags &= ~LKM_VALBLK;
+ else {
+ flags |= LKM_GET_LVB;
+ lock->lksb->flags |= DLM_LKSB_GET_LVB;
+ }
+ }
+ }
+ spin_unlock(&res->spinlock);
+
+ /* no locks held here.
+ * need to wait for a reply as to whether it got queued or not. */
+ status = dlm_send_remote_convert_request(dlm, res, lock, flags, type);
+
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ lock->convert_pending = 0;
+ /* if it failed, move it back to granted queue */
+ if (status != DLM_NORMAL) {
+ if (status != DLM_NOTQUEUED)
+ dlm_error(status);
+ dlm_revert_pending_convert(res, lock);
+ }
+bail:
+ spin_unlock(&res->spinlock);
+
+ /* TODO: should this be a wake_one? */
+ /* wake up any IN_PROGRESS waiters */
+ wake_up(&res->wq);
+
+ return status;
+}
+
+/* sends DLM_CONVERT_LOCK_MSG to master site
+ * locking:
+ * caller needs: none
+ * taken: none
+ * held on exit: none
+ * returns: DLM_NOLOCKMGR, status from remote node
+ */
+static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type)
+{
+ struct dlm_convert_lock convert;
+ int tmpret;
+ enum dlm_status ret;
+ int status = 0;
+ struct kvec vec[2];
+ size_t veclen = 1;
+
+ mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
+
+ memset(&convert, 0, sizeof(struct dlm_convert_lock));
+ convert.node_idx = dlm->node_num;
+ convert.requested_type = type;
+ convert.cookie = lock->ml.cookie;
+ convert.namelen = res->lockname.len;
+ convert.flags = cpu_to_be32(flags);
+ memcpy(convert.name, res->lockname.name, convert.namelen);
+
+ vec[0].iov_len = sizeof(struct dlm_convert_lock);
+ vec[0].iov_base = &convert;
+
+ if (flags & LKM_PUT_LVB) {
+ /* extra data to send if we are updating lvb */
+ vec[1].iov_len = DLM_LVB_LEN;
+ vec[1].iov_base = lock->lksb->lvb;
+ veclen++;
+ }
+
+ tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key,
+ vec, veclen, res->owner, &status);
+ if (tmpret >= 0) {
+ // successfully sent and received
+ ret = status; // this is already a dlm_status
+ if (ret == DLM_RECOVERING) {
+ mlog(0, "node %u returned DLM_RECOVERING from convert "
+ "message!\n", res->owner);
+ } else if (ret == DLM_MIGRATING) {
+ mlog(0, "node %u returned DLM_MIGRATING from convert "
+ "message!\n", res->owner);
+ } else if (ret == DLM_FORWARD) {
+ mlog(0, "node %u returned DLM_FORWARD from convert "
+ "message!\n", res->owner);
+ } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED)
+ dlm_error(ret);
+ } else {
+ mlog_errno(tmpret);
+ if (dlm_is_host_down(tmpret)) {
+ ret = DLM_RECOVERING;
+ mlog(0, "node %u died so returning DLM_RECOVERING "
+ "from convert message!\n", res->owner);
+ } else {
+ ret = dlm_err_to_dlm_status(tmpret);
+ }
+ }
+
+ return ret;
+}
+
+/* handler for DLM_CONVERT_LOCK_MSG on master site
+ * locking:
+ * caller needs: none
+ * taken: takes and drop res->spinlock
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS,
+ * status from __dlmconvert_master
+ */
+int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ struct list_head *iter;
+ struct dlm_lock *lock = NULL;
+ struct dlm_lockstatus *lksb;
+ enum dlm_status status = DLM_NORMAL;
+ u32 flags;
+ int call_ast = 0, kick_thread = 0;
+
+ if (!dlm_grab(dlm)) {
+ dlm_error(DLM_REJECTED);
+ return DLM_REJECTED;
+ }
+
+ mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
+ "Domain %s not fully joined!\n", dlm->name);
+
+ if (cnv->namelen > DLM_LOCKID_NAME_MAX) {
+ status = DLM_IVBUFLEN;
+ dlm_error(status);
+ goto leave;
+ }
+
+ flags = be32_to_cpu(cnv->flags);
+
+ if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
+ (LKM_PUT_LVB|LKM_GET_LVB)) {
+ mlog(ML_ERROR, "both PUT and GET lvb specified\n");
+ status = DLM_BADARGS;
+ goto leave;
+ }
+
+ mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
+ (flags & LKM_GET_LVB ? "get lvb" : "none"));
+
+ status = DLM_IVLOCKID;
+ res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen);
+ if (!res) {
+ dlm_error(status);
+ goto leave;
+ }
+
+ spin_lock(&res->spinlock);
+ list_for_each(iter, &res->granted) {
+ lock = list_entry(iter, struct dlm_lock, list);
+ if (lock->ml.cookie == cnv->cookie &&
+ lock->ml.node == cnv->node_idx) {
+ dlm_lock_get(lock);
+ break;
+ }
+ lock = NULL;
+ }
+ spin_unlock(&res->spinlock);
+ if (!lock) {
+ status = DLM_IVLOCKID;
+ dlm_error(status);
+ goto leave;
+ }
+
+ /* found the lock */
+ lksb = lock->lksb;
+
+ /* see if caller needed to get/put lvb */
+ if (flags & LKM_PUT_LVB) {
+ BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
+ lksb->flags |= DLM_LKSB_PUT_LVB;
+ memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN);
+ } else if (flags & LKM_GET_LVB) {
+ BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
+ lksb->flags |= DLM_LKSB_GET_LVB;
+ }
+
+ spin_lock(&res->spinlock);
+ status = __dlm_lockres_state_to_status(res);
+ if (status == DLM_NORMAL) {
+ __dlm_lockres_reserve_ast(res);
+ res->state |= DLM_LOCK_RES_IN_PROGRESS;
+ status = __dlmconvert_master(dlm, res, lock, flags,
+ cnv->requested_type,
+ &call_ast, &kick_thread);
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ }
+ spin_unlock(&res->spinlock);
+
+ if (status != DLM_NORMAL) {
+ if (status != DLM_NOTQUEUED)
+ dlm_error(status);
+ lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
+ }
+
+leave:
+ if (!lock)
+ mlog(ML_ERROR, "did not find lock to convert on grant queue! "
+ "cookie=%"MLFu64"\n",
+ cnv->cookie);
+ else
+ dlm_lock_put(lock);
+
+ /* either queue the ast or release it */
+ if (call_ast)
+ dlm_queue_ast(dlm, lock);
+ else
+ dlm_lockres_release_ast(dlm, res);
+
+ if (kick_thread)
+ dlm_kick_thread(dlm, res);
+
+ if (res)
+ dlm_lockres_put(res);
+
+ dlm_put(dlm);
+
+ return status;
+}
diff --git a/fs/ocfs2/dlm/dlmconvert.h b/fs/ocfs2/dlm/dlmconvert.h
new file mode 100644
index 00000000000..b2e3677df87
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmconvert.h
@@ -0,0 +1,35 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmconvert.h
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef DLMCONVERT_H
+#define DLMCONVERT_H
+
+enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type);
+enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags, int type);
+
+#endif
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
new file mode 100644
index 00000000000..f339fe27975
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -0,0 +1,246 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmdebug.c
+ *
+ * debug functionality for the dlm
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/utsname.h>
+#include <linux/sysctl.h>
+#include <linux/spinlock.h>
+
+#include "cluster/heartbeat.h"
+#include "cluster/nodemanager.h"
+#include "cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+#include "dlmdebug.h"
+
+#include "dlmdomain.h"
+#include "dlmdebug.h"
+
+#define MLOG_MASK_PREFIX ML_DLM
+#include "cluster/masklog.h"
+
+void dlm_print_one_lock_resource(struct dlm_lock_resource *res)
+{
+ mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n",
+ res->lockname.len, res->lockname.name,
+ res->owner, res->state);
+ spin_lock(&res->spinlock);
+ __dlm_print_one_lock_resource(res);
+ spin_unlock(&res->spinlock);
+}
+
+void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
+{
+ struct list_head *iter2;
+ struct dlm_lock *lock;
+
+ assert_spin_locked(&res->spinlock);
+
+ mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n",
+ res->lockname.len, res->lockname.name,
+ res->owner, res->state);
+ mlog(ML_NOTICE, " last used: %lu, on purge list: %s\n",
+ res->last_used, list_empty(&res->purge) ? "no" : "yes");
+ mlog(ML_NOTICE, " granted queue: \n");
+ list_for_each(iter2, &res->granted) {
+ lock = list_entry(iter2, struct dlm_lock, list);
+ spin_lock(&lock->spinlock);
+ mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
+ "cookie=%"MLFu64", ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n",
+ lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.cookie,
+ list_empty(&lock->ast_list) ? 'y' : 'n',
+ lock->ast_pending ? 'y' : 'n',
+ list_empty(&lock->bast_list) ? 'y' : 'n',
+ lock->bast_pending ? 'y' : 'n');
+ spin_unlock(&lock->spinlock);
+ }
+ mlog(ML_NOTICE, " converting queue: \n");
+ list_for_each(iter2, &res->converting) {
+ lock = list_entry(iter2, struct dlm_lock, list);
+ spin_lock(&lock->spinlock);
+ mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
+ "cookie=%"MLFu64", ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n",
+ lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.cookie,
+ list_empty(&lock->ast_list) ? 'y' : 'n',
+ lock->ast_pending ? 'y' : 'n',
+ list_empty(&lock->bast_list) ? 'y' : 'n',
+ lock->bast_pending ? 'y' : 'n');
+ spin_unlock(&lock->spinlock);
+ }
+ mlog(ML_NOTICE, " blocked queue: \n");
+ list_for_each(iter2, &res->blocked) {
+ lock = list_entry(iter2, struct dlm_lock, list);
+ spin_lock(&lock->spinlock);
+ mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, "
+ "cookie=%"MLFu64", ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n",
+ lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.cookie,
+ list_empty(&lock->ast_list) ? 'y' : 'n',
+ lock->ast_pending ? 'y' : 'n',
+ list_empty(&lock->bast_list) ? 'y' : 'n',
+ lock->bast_pending ? 'y' : 'n');
+ spin_unlock(&lock->spinlock);
+ }
+}
+
+void dlm_print_one_lock(struct dlm_lock *lockid)
+{
+ dlm_print_one_lock_resource(lockid->lockres);
+}
+EXPORT_SYMBOL_GPL(dlm_print_one_lock);
+
+void dlm_dump_lock_resources(struct dlm_ctxt *dlm)
+{
+ struct dlm_lock_resource *res;
+ struct list_head *iter;
+ struct list_head *bucket;
+ int i;
+
+ mlog(ML_NOTICE, "struct dlm_ctxt: %s, node=%u, key=%u\n",
+ dlm->name, dlm->node_num, dlm->key);
+ if (!dlm || !dlm->name) {
+ mlog(ML_ERROR, "dlm=%p\n", dlm);
+ return;
+ }
+
+ spin_lock(&dlm->spinlock);
+ for (i=0; i<DLM_HASH_SIZE; i++) {
+ bucket = &(dlm->resources[i]);
+ list_for_each(iter, bucket) {
+ res = list_entry(iter, struct dlm_lock_resource, list);
+ dlm_print_one_lock_resource(res);
+ }
+ }
+ spin_unlock(&dlm->spinlock);
+}
+
+static const char *dlm_errnames[] = {
+ [DLM_NORMAL] = "DLM_NORMAL",
+ [DLM_GRANTED] = "DLM_GRANTED",
+ [DLM_DENIED] = "DLM_DENIED",
+ [DLM_DENIED_NOLOCKS] = "DLM_DENIED_NOLOCKS",
+ [DLM_WORKING] = "DLM_WORKING",
+ [DLM_BLOCKED] = "DLM_BLOCKED",
+ [DLM_BLOCKED_ORPHAN] = "DLM_BLOCKED_ORPHAN",
+ [DLM_DENIED_GRACE_PERIOD] = "DLM_DENIED_GRACE_PERIOD",
+ [DLM_SYSERR] = "DLM_SYSERR",
+ [DLM_NOSUPPORT] = "DLM_NOSUPPORT",
+ [DLM_CANCELGRANT] = "DLM_CANCELGRANT",
+ [DLM_IVLOCKID] = "DLM_IVLOCKID",
+ [DLM_SYNC] = "DLM_SYNC",
+ [DLM_BADTYPE] = "DLM_BADTYPE",
+ [DLM_BADRESOURCE] = "DLM_BADRESOURCE",
+ [DLM_MAXHANDLES] = "DLM_MAXHANDLES",
+ [DLM_NOCLINFO] = "DLM_NOCLINFO",
+ [DLM_NOLOCKMGR] = "DLM_NOLOCKMGR",
+ [DLM_NOPURGED] = "DLM_NOPURGED",
+ [DLM_BADARGS] = "DLM_BADARGS",
+ [DLM_VOID] = "DLM_VOID",
+ [DLM_NOTQUEUED] = "DLM_NOTQUEUED",
+ [DLM_IVBUFLEN] = "DLM_IVBUFLEN",
+ [DLM_CVTUNGRANT] = "DLM_CVTUNGRANT",
+ [DLM_BADPARAM] = "DLM_BADPARAM",
+ [DLM_VALNOTVALID] = "DLM_VALNOTVALID",
+ [DLM_REJECTED] = "DLM_REJECTED",
+ [DLM_ABORT] = "DLM_ABORT",
+ [DLM_CANCEL] = "DLM_CANCEL",
+ [DLM_IVRESHANDLE] = "DLM_IVRESHANDLE",
+ [DLM_DEADLOCK] = "DLM_DEADLOCK",
+ [DLM_DENIED_NOASTS] = "DLM_DENIED_NOASTS",
+ [DLM_FORWARD] = "DLM_FORWARD",
+ [DLM_TIMEOUT] = "DLM_TIMEOUT",
+ [DLM_IVGROUPID] = "DLM_IVGROUPID",
+ [DLM_VERS_CONFLICT] = "DLM_VERS_CONFLICT",
+ [DLM_BAD_DEVICE_PATH] = "DLM_BAD_DEVICE_PATH",
+ [DLM_NO_DEVICE_PERMISSION] = "DLM_NO_DEVICE_PERMISSION",
+ [DLM_NO_CONTROL_DEVICE ] = "DLM_NO_CONTROL_DEVICE ",
+ [DLM_RECOVERING] = "DLM_RECOVERING",
+ [DLM_MIGRATING] = "DLM_MIGRATING",
+ [DLM_MAXSTATS] = "DLM_MAXSTATS",
+};
+
+static const char *dlm_errmsgs[] = {
+ [DLM_NORMAL] = "request in progress",
+ [DLM_GRANTED] = "request granted",
+ [DLM_DENIED] = "request denied",
+ [DLM_DENIED_NOLOCKS] = "request denied, out of system resources",
+ [DLM_WORKING] = "async request in progress",
+ [DLM_BLOCKED] = "lock request blocked",
+ [DLM_BLOCKED_ORPHAN] = "lock request blocked by a orphan lock",
+ [DLM_DENIED_GRACE_PERIOD] = "topological change in progress",
+ [DLM_SYSERR] = "system error",
+ [DLM_NOSUPPORT] = "unsupported",
+ [DLM_CANCELGRANT] = "can't cancel convert: already granted",
+ [DLM_IVLOCKID] = "bad lockid",
+ [DLM_SYNC] = "synchronous request granted",
+ [DLM_BADTYPE] = "bad resource type",
+ [DLM_BADRESOURCE] = "bad resource handle",
+ [DLM_MAXHANDLES] = "no more resource handles",
+ [DLM_NOCLINFO] = "can't contact cluster manager",
+ [DLM_NOLOCKMGR] = "can't contact lock manager",
+ [DLM_NOPURGED] = "can't contact purge daemon",
+ [DLM_BADARGS] = "bad api args",
+ [DLM_VOID] = "no status",
+ [DLM_NOTQUEUED] = "NOQUEUE was specified and request failed",
+ [DLM_IVBUFLEN] = "invalid resource name length",
+ [DLM_CVTUNGRANT] = "attempted to convert ungranted lock",
+ [DLM_BADPARAM] = "invalid lock mode specified",
+ [DLM_VALNOTVALID] = "value block has been invalidated",
+ [DLM_REJECTED] = "request rejected, unrecognized client",
+ [DLM_ABORT] = "blocked lock request cancelled",
+ [DLM_CANCEL] = "conversion request cancelled",
+ [DLM_IVRESHANDLE] = "invalid resource handle",
+ [DLM_DEADLOCK] = "deadlock recovery refused this request",
+ [DLM_DENIED_NOASTS] = "failed to allocate AST",
+ [DLM_FORWARD] = "request must wait for primary's response",
+ [DLM_TIMEOUT] = "timeout value for lock has expired",
+ [DLM_IVGROUPID] = "invalid group specification",
+ [DLM_VERS_CONFLICT] = "version conflicts prevent request handling",
+ [DLM_BAD_DEVICE_PATH] = "Locks device does not exist or path wrong",
+ [DLM_NO_DEVICE_PERMISSION] = "Client has insufficient perms for device",
+ [DLM_NO_CONTROL_DEVICE] = "Cannot set options on opened device ",
+ [DLM_RECOVERING] = "lock resource being recovered",
+ [DLM_MIGRATING] = "lock resource being migrated",
+ [DLM_MAXSTATS] = "invalid error number",
+};
+
+const char *dlm_errmsg(enum dlm_status err)
+{
+ if (err >= DLM_MAXSTATS || err < 0)
+ return dlm_errmsgs[DLM_MAXSTATS];
+ return dlm_errmsgs[err];
+}
+EXPORT_SYMBOL_GPL(dlm_errmsg);
+
+const char *dlm_errname(enum dlm_status err)
+{
+ if (err >= DLM_MAXSTATS || err < 0)
+ return dlm_errnames[DLM_MAXSTATS];
+ return dlm_errnames[err];
+}
+EXPORT_SYMBOL_GPL(dlm_errname);
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
new file mode 100644
index 00000000000..6858510c3cc
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -0,0 +1,30 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmdebug.h
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef DLMDEBUG_H
+#define DLMDEBUG_H
+
+void dlm_dump_lock_resources(struct dlm_ctxt *dlm);
+
+#endif
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
new file mode 100644
index 00000000000..da3c22045f8
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -0,0 +1,1469 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmdomain.c
+ *
+ * defines domain join / leave apis
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include "cluster/heartbeat.h"
+#include "cluster/nodemanager.h"
+#include "cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+
+#include "dlmdebug.h"
+#include "dlmdomain.h"
+
+#include "dlmver.h"
+
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
+#include "cluster/masklog.h"
+
+/*
+ *
+ * spinlock lock ordering: if multiple locks are needed, obey this ordering:
+ * dlm_domain_lock
+ * struct dlm_ctxt->spinlock
+ * struct dlm_lock_resource->spinlock
+ * struct dlm_ctxt->master_lock
+ * struct dlm_ctxt->ast_lock
+ * dlm_master_list_entry->spinlock
+ * dlm_lock->spinlock
+ *
+ */
+
+spinlock_t dlm_domain_lock = SPIN_LOCK_UNLOCKED;
+LIST_HEAD(dlm_domains);
+static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
+
+#define DLM_DOMAIN_BACKOFF_MS 200
+
+static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data);
+static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data);
+static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data);
+static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data);
+
+static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
+
+void __dlm_unhash_lockres(struct dlm_lock_resource *lockres)
+{
+ list_del_init(&lockres->list);
+ dlm_lockres_put(lockres);
+}
+
+void __dlm_insert_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ struct list_head *bucket;
+ struct qstr *q;
+
+ assert_spin_locked(&dlm->spinlock);
+
+ q = &res->lockname;
+ q->hash = full_name_hash(q->name, q->len);
+ bucket = &(dlm->resources[q->hash & DLM_HASH_MASK]);
+
+ /* get a reference for our hashtable */
+ dlm_lockres_get(res);
+
+ list_add_tail(&res->list, bucket);
+}
+
+struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int len)
+{
+ unsigned int hash;
+ struct list_head *iter;
+ struct dlm_lock_resource *tmpres=NULL;
+ struct list_head *bucket;
+
+ mlog_entry("%.*s\n", len, name);
+
+ assert_spin_locked(&dlm->spinlock);
+
+ hash = full_name_hash(name, len);
+
+ bucket = &(dlm->resources[hash & DLM_HASH_MASK]);
+
+ /* check for pre-existing lock */
+ list_for_each(iter, bucket) {
+ tmpres = list_entry(iter, struct dlm_lock_resource, list);
+ if (tmpres->lockname.len == len &&
+ memcmp(tmpres->lockname.name, name, len) == 0) {
+ dlm_lockres_get(tmpres);
+ break;
+ }
+
+ tmpres = NULL;
+ }
+ return tmpres;
+}
+
+struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int len)
+{
+ struct dlm_lock_resource *res;
+
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres(dlm, name, len);
+ spin_unlock(&dlm->spinlock);
+ return res;
+}
+
+static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len)
+{
+ struct dlm_ctxt *tmp = NULL;
+ struct list_head *iter;
+
+ assert_spin_locked(&dlm_domain_lock);
+
+ /* tmp->name here is always NULL terminated,
+ * but domain may not be! */
+ list_for_each(iter, &dlm_domains) {
+ tmp = list_entry (iter, struct dlm_ctxt, list);
+ if (strlen(tmp->name) == len &&
+ memcmp(tmp->name, domain, len)==0)
+ break;
+ tmp = NULL;
+ }
+
+ return tmp;
+}
+
+/* For null terminated domain strings ONLY */
+static struct dlm_ctxt * __dlm_lookup_domain(const char *domain)
+{
+ assert_spin_locked(&dlm_domain_lock);
+
+ return __dlm_lookup_domain_full(domain, strlen(domain));
+}
+
+
+/* returns true on one of two conditions:
+ * 1) the domain does not exist
+ * 2) the domain exists and it's state is "joined" */
+static int dlm_wait_on_domain_helper(const char *domain)
+{
+ int ret = 0;
+ struct dlm_ctxt *tmp = NULL;
+
+ spin_lock(&dlm_domain_lock);
+
+ tmp = __dlm_lookup_domain(domain);
+ if (!tmp)
+ ret = 1;
+ else if (tmp->dlm_state == DLM_CTXT_JOINED)
+ ret = 1;
+
+ spin_unlock(&dlm_domain_lock);
+ return ret;
+}
+
+static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
+{
+ if (dlm->resources)
+ free_page((unsigned long) dlm->resources);
+
+ if (dlm->name)
+ kfree(dlm->name);
+
+ kfree(dlm);
+}
+
+/* A little strange - this function will be called while holding
+ * dlm_domain_lock and is expected to be holding it on the way out. We
+ * will however drop and reacquire it multiple times */
+static void dlm_ctxt_release(struct kref *kref)
+{
+ struct dlm_ctxt *dlm;
+
+ dlm = container_of(kref, struct dlm_ctxt, dlm_refs);
+
+ BUG_ON(dlm->num_joins);
+ BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED);
+
+ /* we may still be in the list if we hit an error during join. */
+ list_del_init(&dlm->list);
+
+ spin_unlock(&dlm_domain_lock);
+
+ mlog(0, "freeing memory from domain %s\n", dlm->name);
+
+ wake_up(&dlm_domain_events);
+
+ dlm_free_ctxt_mem(dlm);
+
+ spin_lock(&dlm_domain_lock);
+}
+
+void dlm_put(struct dlm_ctxt *dlm)
+{
+ spin_lock(&dlm_domain_lock);
+ kref_put(&dlm->dlm_refs, dlm_ctxt_release);
+ spin_unlock(&dlm_domain_lock);
+}
+
+static void __dlm_get(struct dlm_ctxt *dlm)
+{
+ kref_get(&dlm->dlm_refs);
+}
+
+/* given a questionable reference to a dlm object, gets a reference if
+ * it can find it in the list, otherwise returns NULL in which case
+ * you shouldn't trust your pointer. */
+struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm)
+{
+ struct list_head *iter;
+ struct dlm_ctxt *target = NULL;
+
+ spin_lock(&dlm_domain_lock);
+
+ list_for_each(iter, &dlm_domains) {
+ target = list_entry (iter, struct dlm_ctxt, list);
+
+ if (target == dlm) {
+ __dlm_get(target);
+ break;
+ }
+
+ target = NULL;
+ }
+
+ spin_unlock(&dlm_domain_lock);
+
+ return target;
+}
+
+int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
+{
+ int ret;
+
+ spin_lock(&dlm_domain_lock);
+ ret = (dlm->dlm_state == DLM_CTXT_JOINED) ||
+ (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN);
+ spin_unlock(&dlm_domain_lock);
+
+ return ret;
+}
+
+static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
+{
+ dlm_unregister_domain_handlers(dlm);
+ dlm_complete_thread(dlm);
+ dlm_complete_recovery_thread(dlm);
+
+ /* We've left the domain. Now we can take ourselves out of the
+ * list and allow the kref stuff to help us free the
+ * memory. */
+ spin_lock(&dlm_domain_lock);
+ list_del_init(&dlm->list);
+ spin_unlock(&dlm_domain_lock);
+
+ /* Wake up anyone waiting for us to remove this domain */
+ wake_up(&dlm_domain_events);
+}
+
+static void dlm_migrate_all_locks(struct dlm_ctxt *dlm)
+{
+ int i;
+ struct dlm_lock_resource *res;
+
+ mlog(0, "Migrating locks from domain %s\n", dlm->name);
+restart:
+ spin_lock(&dlm->spinlock);
+ for (i=0; i<DLM_HASH_SIZE; i++) {
+ while (!list_empty(&dlm->resources[i])) {
+ res = list_entry(dlm->resources[i].next,
+ struct dlm_lock_resource, list);
+ /* need reference when manually grabbing lockres */
+ dlm_lockres_get(res);
+ /* this should unhash the lockres
+ * and exit with dlm->spinlock */
+ mlog(0, "purging res=%p\n", res);
+ if (dlm_lockres_is_dirty(dlm, res)) {
+ /* HACK! this should absolutely go.
+ * need to figure out why some empty
+ * lockreses are still marked dirty */
+ mlog(ML_ERROR, "lockres %.*s dirty!\n",
+ res->lockname.len, res->lockname.name);
+
+ spin_unlock(&dlm->spinlock);
+ dlm_kick_thread(dlm, res);
+ wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
+ dlm_lockres_put(res);
+ goto restart;
+ }
+ dlm_purge_lockres(dlm, res);
+ dlm_lockres_put(res);
+ }
+ }
+ spin_unlock(&dlm->spinlock);
+
+ mlog(0, "DONE Migrating locks from domain %s\n", dlm->name);
+}
+
+static int dlm_no_joining_node(struct dlm_ctxt *dlm)
+{
+ int ret;
+
+ spin_lock(&dlm->spinlock);
+ ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN;
+ spin_unlock(&dlm->spinlock);
+
+ return ret;
+}
+
+static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm)
+{
+ /* Yikes, a double spinlock! I need domain_lock for the dlm
+ * state and the dlm spinlock for join state... Sorry! */
+again:
+ spin_lock(&dlm_domain_lock);
+ spin_lock(&dlm->spinlock);
+
+ if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ mlog(0, "Node %d is joining, we wait on it.\n",
+ dlm->joining_node);
+ spin_unlock(&dlm->spinlock);
+ spin_unlock(&dlm_domain_lock);
+
+ wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm));
+ goto again;
+ }
+
+ dlm->dlm_state = DLM_CTXT_LEAVING;
+ spin_unlock(&dlm->spinlock);
+ spin_unlock(&dlm_domain_lock);
+}
+
+static void __dlm_print_nodes(struct dlm_ctxt *dlm)
+{
+ int node = -1;
+
+ assert_spin_locked(&dlm->spinlock);
+
+ mlog(ML_NOTICE, "Nodes in my domain (\"%s\"):\n", dlm->name);
+
+ while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
+ mlog(ML_NOTICE, " node %d\n", node);
+ }
+}
+
+static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ unsigned int node;
+ struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
+
+ mlog_entry("%p %u %p", msg, len, data);
+
+ if (!dlm_grab(dlm))
+ return 0;
+
+ node = exit_msg->node_idx;
+
+ mlog(0, "Node %u leaves domain %s\n", node, dlm->name);
+
+ spin_lock(&dlm->spinlock);
+ clear_bit(node, dlm->domain_map);
+ __dlm_print_nodes(dlm);
+
+ /* notify anything attached to the heartbeat events */
+ dlm_hb_event_notify_attached(dlm, node, 0);
+
+ spin_unlock(&dlm->spinlock);
+
+ dlm_put(dlm);
+
+ return 0;
+}
+
+static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm,
+ unsigned int node)
+{
+ int status;
+ struct dlm_exit_domain leave_msg;
+
+ mlog(0, "Asking node %u if we can leave the domain %s me = %u\n",
+ node, dlm->name, dlm->node_num);
+
+ memset(&leave_msg, 0, sizeof(leave_msg));
+ leave_msg.node_idx = dlm->node_num;
+
+ status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key,
+ &leave_msg, sizeof(leave_msg), node,
+ NULL);
+
+ mlog(0, "status return %d from o2net_send_message\n", status);
+
+ return status;
+}
+
+
+static void dlm_leave_domain(struct dlm_ctxt *dlm)
+{
+ int node, clear_node, status;
+
+ /* At this point we've migrated away all our locks and won't
+ * accept mastership of new ones. The dlm is responsible for
+ * almost nothing now. We make sure not to confuse any joining
+ * nodes and then commence shutdown procedure. */
+
+ spin_lock(&dlm->spinlock);
+ /* Clear ourselves from the domain map */
+ clear_bit(dlm->node_num, dlm->domain_map);
+ while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
+ 0)) < O2NM_MAX_NODES) {
+ /* Drop the dlm spinlock. This is safe wrt the domain_map.
+ * -nodes cannot be added now as the
+ * query_join_handlers knows to respond with OK_NO_MAP
+ * -we catch the right network errors if a node is
+ * removed from the map while we're sending him the
+ * exit message. */
+ spin_unlock(&dlm->spinlock);
+
+ clear_node = 1;
+
+ status = dlm_send_one_domain_exit(dlm, node);
+ if (status < 0 &&
+ status != -ENOPROTOOPT &&
+ status != -ENOTCONN) {
+ mlog(ML_NOTICE, "Error %d sending domain exit message "
+ "to node %d\n", status, node);
+
+ /* Not sure what to do here but lets sleep for
+ * a bit in case this was a transient
+ * error... */
+ msleep(DLM_DOMAIN_BACKOFF_MS);
+ clear_node = 0;
+ }
+
+ spin_lock(&dlm->spinlock);
+ /* If we're not clearing the node bit then we intend
+ * to loop back around to try again. */
+ if (clear_node)
+ clear_bit(node, dlm->domain_map);
+ }
+ spin_unlock(&dlm->spinlock);
+}
+
+int dlm_joined(struct dlm_ctxt *dlm)
+{
+ int ret = 0;
+
+ spin_lock(&dlm_domain_lock);
+
+ if (dlm->dlm_state == DLM_CTXT_JOINED)
+ ret = 1;
+
+ spin_unlock(&dlm_domain_lock);
+
+ return ret;
+}
+
+int dlm_shutting_down(struct dlm_ctxt *dlm)
+{
+ int ret = 0;
+
+ spin_lock(&dlm_domain_lock);
+
+ if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
+ ret = 1;
+
+ spin_unlock(&dlm_domain_lock);
+
+ return ret;
+}
+
+void dlm_unregister_domain(struct dlm_ctxt *dlm)
+{
+ int leave = 0;
+
+ spin_lock(&dlm_domain_lock);
+ BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED);
+ BUG_ON(!dlm->num_joins);
+
+ dlm->num_joins--;
+ if (!dlm->num_joins) {
+ /* We mark it "in shutdown" now so new register
+ * requests wait until we've completely left the
+ * domain. Don't use DLM_CTXT_LEAVING yet as we still
+ * want new domain joins to communicate with us at
+ * least until we've completed migration of our
+ * resources. */
+ dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN;
+ leave = 1;
+ }
+ spin_unlock(&dlm_domain_lock);
+
+ if (leave) {
+ mlog(0, "shutting down domain %s\n", dlm->name);
+
+ /* We changed dlm state, notify the thread */
+ dlm_kick_thread(dlm, NULL);
+
+ dlm_migrate_all_locks(dlm);
+ dlm_mark_domain_leaving(dlm);
+ dlm_leave_domain(dlm);
+ dlm_complete_dlm_shutdown(dlm);
+ }
+ dlm_put(dlm);
+}
+EXPORT_SYMBOL_GPL(dlm_unregister_domain);
+
+static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_query_join_request *query;
+ enum dlm_query_join_response response;
+ struct dlm_ctxt *dlm = NULL;
+
+ query = (struct dlm_query_join_request *) msg->buf;
+
+ mlog(0, "node %u wants to join domain %s\n", query->node_idx,
+ query->domain);
+
+ /*
+ * If heartbeat doesn't consider the node live, tell it
+ * to back off and try again. This gives heartbeat a chance
+ * to catch up.
+ */
+ if (!o2hb_check_node_heartbeating(query->node_idx)) {
+ mlog(0, "node %u is not in our live map yet\n",
+ query->node_idx);
+
+ response = JOIN_DISALLOW;
+ goto respond;
+ }
+
+ response = JOIN_OK_NO_MAP;
+
+ spin_lock(&dlm_domain_lock);
+ dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
+ /* Once the dlm ctxt is marked as leaving then we don't want
+ * to be put in someone's domain map. */
+ if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
+ spin_lock(&dlm->spinlock);
+
+ if (dlm->dlm_state == DLM_CTXT_NEW &&
+ dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /*If this is a brand new context and we
+ * haven't started our join process yet, then
+ * the other node won the race. */
+ response = JOIN_OK_NO_MAP;
+ } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /* Disallow parallel joins. */
+ response = JOIN_DISALLOW;
+ } else {
+ /* Alright we're fully a part of this domain
+ * so we keep some state as to who's joining
+ * and indicate to him that needs to be fixed
+ * up. */
+ response = JOIN_OK;
+ __dlm_set_joining_node(dlm, query->node_idx);
+ }
+
+ spin_unlock(&dlm->spinlock);
+ }
+ spin_unlock(&dlm_domain_lock);
+
+respond:
+ mlog(0, "We respond with %u\n", response);
+
+ return response;
+}
+
+static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_assert_joined *assert;
+ struct dlm_ctxt *dlm = NULL;
+
+ assert = (struct dlm_assert_joined *) msg->buf;
+
+ mlog(0, "node %u asserts join on domain %s\n", assert->node_idx,
+ assert->domain);
+
+ spin_lock(&dlm_domain_lock);
+ dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len);
+ /* XXX should we consider no dlm ctxt an error? */
+ if (dlm) {
+ spin_lock(&dlm->spinlock);
+
+ /* Alright, this node has officially joined our
+ * domain. Set him in the map and clean up our
+ * leftover join state. */
+ BUG_ON(dlm->joining_node != assert->node_idx);
+ set_bit(assert->node_idx, dlm->domain_map);
+ __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
+
+ __dlm_print_nodes(dlm);
+
+ /* notify anything attached to the heartbeat events */
+ dlm_hb_event_notify_attached(dlm, assert->node_idx, 1);
+
+ spin_unlock(&dlm->spinlock);
+ }
+ spin_unlock(&dlm_domain_lock);
+
+ return 0;
+}
+
+static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_cancel_join *cancel;
+ struct dlm_ctxt *dlm = NULL;
+
+ cancel = (struct dlm_cancel_join *) msg->buf;
+
+ mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx,
+ cancel->domain);
+
+ spin_lock(&dlm_domain_lock);
+ dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len);
+
+ if (dlm) {
+ spin_lock(&dlm->spinlock);
+
+ /* Yikes, this guy wants to cancel his join. No
+ * problem, we simply cleanup our join state. */
+ BUG_ON(dlm->joining_node != cancel->node_idx);
+ __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
+
+ spin_unlock(&dlm->spinlock);
+ }
+ spin_unlock(&dlm_domain_lock);
+
+ return 0;
+}
+
+static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm,
+ unsigned int node)
+{
+ int status;
+ struct dlm_cancel_join cancel_msg;
+
+ memset(&cancel_msg, 0, sizeof(cancel_msg));
+ cancel_msg.node_idx = dlm->node_num;
+ cancel_msg.name_len = strlen(dlm->name);
+ memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
+
+ status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
+ &cancel_msg, sizeof(cancel_msg), node,
+ NULL);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+bail:
+ return status;
+}
+
+/* map_size should be in bytes. */
+static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
+ unsigned long *node_map,
+ unsigned int map_size)
+{
+ int status, tmpstat;
+ unsigned int node;
+
+ if (map_size != (BITS_TO_LONGS(O2NM_MAX_NODES) *
+ sizeof(unsigned long))) {
+ mlog(ML_ERROR,
+ "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n",
+ map_size, BITS_TO_LONGS(O2NM_MAX_NODES));
+ return -EINVAL;
+ }
+
+ status = 0;
+ node = -1;
+ while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
+ if (node == dlm->node_num)
+ continue;
+
+ tmpstat = dlm_send_one_join_cancel(dlm, node);
+ if (tmpstat) {
+ mlog(ML_ERROR, "Error return %d cancelling join on "
+ "node %d\n", tmpstat, node);
+ if (!status)
+ status = tmpstat;
+ }
+ }
+
+ if (status)
+ mlog_errno(status);
+ return status;
+}
+
+static int dlm_request_join(struct dlm_ctxt *dlm,
+ int node,
+ enum dlm_query_join_response *response)
+{
+ int status, retval;
+ struct dlm_query_join_request join_msg;
+
+ mlog(0, "querying node %d\n", node);
+
+ memset(&join_msg, 0, sizeof(join_msg));
+ join_msg.node_idx = dlm->node_num;
+ join_msg.name_len = strlen(dlm->name);
+ memcpy(join_msg.domain, dlm->name, join_msg.name_len);
+
+ status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
+ sizeof(join_msg), node, &retval);
+ if (status < 0 && status != -ENOPROTOOPT) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* -ENOPROTOOPT from the net code means the other side isn't
+ listening for our message type -- that's fine, it means
+ his dlm isn't up, so we can consider him a 'yes' but not
+ joined into the domain. */
+ if (status == -ENOPROTOOPT) {
+ status = 0;
+ *response = JOIN_OK_NO_MAP;
+ } else if (retval == JOIN_DISALLOW ||
+ retval == JOIN_OK ||
+ retval == JOIN_OK_NO_MAP) {
+ *response = retval;
+ } else {
+ status = -EINVAL;
+ mlog(ML_ERROR, "invalid response %d from node %u\n", retval,
+ node);
+ }
+
+ mlog(0, "status %d, node %d response is %d\n", status, node,
+ *response);
+
+bail:
+ return status;
+}
+
+static int dlm_send_one_join_assert(struct dlm_ctxt *dlm,
+ unsigned int node)
+{
+ int status;
+ struct dlm_assert_joined assert_msg;
+
+ mlog(0, "Sending join assert to node %u\n", node);
+
+ memset(&assert_msg, 0, sizeof(assert_msg));
+ assert_msg.node_idx = dlm->node_num;
+ assert_msg.name_len = strlen(dlm->name);
+ memcpy(assert_msg.domain, dlm->name, assert_msg.name_len);
+
+ status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
+ &assert_msg, sizeof(assert_msg), node,
+ NULL);
+ if (status < 0)
+ mlog_errno(status);
+
+ return status;
+}
+
+static void dlm_send_join_asserts(struct dlm_ctxt *dlm,
+ unsigned long *node_map)
+{
+ int status, node, live;
+
+ status = 0;
+ node = -1;
+ while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
+ if (node == dlm->node_num)
+ continue;
+
+ do {
+ /* It is very important that this message be
+ * received so we spin until either the node
+ * has died or it gets the message. */
+ status = dlm_send_one_join_assert(dlm, node);
+
+ spin_lock(&dlm->spinlock);
+ live = test_bit(node, dlm->live_nodes_map);
+ spin_unlock(&dlm->spinlock);
+
+ if (status) {
+ mlog(ML_ERROR, "Error return %d asserting "
+ "join on node %d\n", status, node);
+
+ /* give us some time between errors... */
+ if (live)
+ msleep(DLM_DOMAIN_BACKOFF_MS);
+ }
+ } while (status && live);
+ }
+}
+
+struct domain_join_ctxt {
+ unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+};
+
+static int dlm_should_restart_join(struct dlm_ctxt *dlm,
+ struct domain_join_ctxt *ctxt,
+ enum dlm_query_join_response response)
+{
+ int ret;
+
+ if (response == JOIN_DISALLOW) {
+ mlog(0, "Latest response of disallow -- should restart\n");
+ return 1;
+ }
+
+ spin_lock(&dlm->spinlock);
+ /* For now, we restart the process if the node maps have
+ * changed at all */
+ ret = memcmp(ctxt->live_map, dlm->live_nodes_map,
+ sizeof(dlm->live_nodes_map));
+ spin_unlock(&dlm->spinlock);
+
+ if (ret)
+ mlog(0, "Node maps changed -- should restart\n");
+
+ return ret;
+}
+
+static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
+{
+ int status = 0, tmpstat, node;
+ struct domain_join_ctxt *ctxt;
+ enum dlm_query_join_response response;
+
+ mlog_entry("%p", dlm);
+
+ ctxt = kcalloc(1, sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* group sem locking should work for us here -- we're already
+ * registered for heartbeat events so filling this should be
+ * atomic wrt getting those handlers called. */
+ o2hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map));
+
+ spin_lock(&dlm->spinlock);
+ memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map));
+
+ __dlm_set_joining_node(dlm, dlm->node_num);
+
+ spin_unlock(&dlm->spinlock);
+
+ node = -1;
+ while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES,
+ node + 1)) < O2NM_MAX_NODES) {
+ if (node == dlm->node_num)
+ continue;
+
+ status = dlm_request_join(dlm, node, &response);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* Ok, either we got a response or the node doesn't have a
+ * dlm up. */
+ if (response == JOIN_OK)
+ set_bit(node, ctxt->yes_resp_map);
+
+ if (dlm_should_restart_join(dlm, ctxt, response)) {
+ status = -EAGAIN;
+ goto bail;
+ }
+ }
+
+ mlog(0, "Yay, done querying nodes!\n");
+
+ /* Yay, everyone agree's we can join the domain. My domain is
+ * comprised of all nodes who were put in the
+ * yes_resp_map. Copy that into our domain map and send a join
+ * assert message to clean up everyone elses state. */
+ spin_lock(&dlm->spinlock);
+ memcpy(dlm->domain_map, ctxt->yes_resp_map,
+ sizeof(ctxt->yes_resp_map));
+ set_bit(dlm->node_num, dlm->domain_map);
+ spin_unlock(&dlm->spinlock);
+
+ dlm_send_join_asserts(dlm, ctxt->yes_resp_map);
+
+ /* Joined state *must* be set before the joining node
+ * information, otherwise the query_join handler may read no
+ * current joiner but a state of NEW and tell joining nodes
+ * we're not in the domain. */
+ spin_lock(&dlm_domain_lock);
+ dlm->dlm_state = DLM_CTXT_JOINED;
+ dlm->num_joins++;
+ spin_unlock(&dlm_domain_lock);
+
+bail:
+ spin_lock(&dlm->spinlock);
+ __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
+ if (!status)
+ __dlm_print_nodes(dlm);
+ spin_unlock(&dlm->spinlock);
+
+ if (ctxt) {
+ /* Do we need to send a cancel message to any nodes? */
+ if (status < 0) {
+ tmpstat = dlm_send_join_cancels(dlm,
+ ctxt->yes_resp_map,
+ sizeof(ctxt->yes_resp_map));
+ if (tmpstat < 0)
+ mlog_errno(tmpstat);
+ }
+ kfree(ctxt);
+ }
+
+ mlog(0, "returning %d\n", status);
+ return status;
+}
+
+static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
+{
+ o2hb_unregister_callback(&dlm->dlm_hb_up);
+ o2hb_unregister_callback(&dlm->dlm_hb_down);
+ o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
+}
+
+static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
+{
+ int status;
+
+ mlog(0, "registering handlers.\n");
+
+ o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
+ dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
+ status = o2hb_register_callback(&dlm->dlm_hb_down);
+ if (status)
+ goto bail;
+
+ o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
+ dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
+ status = o2hb_register_callback(&dlm->dlm_hb_up);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key,
+ sizeof(struct dlm_master_request),
+ dlm_master_request_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key,
+ sizeof(struct dlm_assert_master),
+ dlm_assert_master_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key,
+ sizeof(struct dlm_create_lock),
+ dlm_create_lock_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key,
+ DLM_CONVERT_LOCK_MAX_LEN,
+ dlm_convert_lock_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key,
+ DLM_UNLOCK_LOCK_MAX_LEN,
+ dlm_unlock_lock_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key,
+ DLM_PROXY_AST_MAX_LEN,
+ dlm_proxy_ast_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key,
+ sizeof(struct dlm_exit_domain),
+ dlm_exit_domain_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key,
+ sizeof(struct dlm_migrate_request),
+ dlm_migrate_request_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key,
+ DLM_MIG_LOCKRES_MAX_LEN,
+ dlm_mig_lockres_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key,
+ sizeof(struct dlm_master_requery),
+ dlm_master_requery_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key,
+ sizeof(struct dlm_lock_request),
+ dlm_request_all_locks_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key,
+ sizeof(struct dlm_reco_data_done),
+ dlm_reco_data_done_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key,
+ sizeof(struct dlm_begin_reco),
+ dlm_begin_reco_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key,
+ sizeof(struct dlm_finalize_reco),
+ dlm_finalize_reco_handler,
+ dlm, &dlm->dlm_domain_handlers);
+ if (status)
+ goto bail;
+
+bail:
+ if (status)
+ dlm_unregister_domain_handlers(dlm);
+
+ return status;
+}
+
+static int dlm_join_domain(struct dlm_ctxt *dlm)
+{
+ int status;
+
+ BUG_ON(!dlm);
+
+ mlog(0, "Join domain %s\n", dlm->name);
+
+ status = dlm_register_domain_handlers(dlm);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = dlm_launch_thread(dlm);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = dlm_launch_recovery_thread(dlm);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ do {
+ unsigned int backoff;
+ status = dlm_try_to_join_domain(dlm);
+
+ /* If we're racing another node to the join, then we
+ * need to back off temporarily and let them
+ * complete. */
+ if (status == -EAGAIN) {
+ if (signal_pending(current)) {
+ status = -ERESTARTSYS;
+ goto bail;
+ }
+
+ /*
+ * <chip> After you!
+ * <dale> No, after you!
+ * <chip> I insist!
+ * <dale> But you first!
+ * ...
+ */
+ backoff = (unsigned int)(jiffies & 0x3);
+ backoff *= DLM_DOMAIN_BACKOFF_MS;
+ mlog(0, "backoff %d\n", backoff);
+ msleep(backoff);
+ }
+ } while (status == -EAGAIN);
+
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = 0;
+bail:
+ wake_up(&dlm_domain_events);
+
+ if (status) {
+ dlm_unregister_domain_handlers(dlm);
+ dlm_complete_thread(dlm);
+ dlm_complete_recovery_thread(dlm);
+ }
+
+ return status;
+}
+
+static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
+ u32 key)
+{
+ int i;
+ struct dlm_ctxt *dlm = NULL;
+
+ dlm = kcalloc(1, sizeof(*dlm), GFP_KERNEL);
+ if (!dlm) {
+ mlog_errno(-ENOMEM);
+ goto leave;
+ }
+
+ dlm->name = kmalloc(strlen(domain) + 1, GFP_KERNEL);
+ if (dlm->name == NULL) {
+ mlog_errno(-ENOMEM);
+ kfree(dlm);
+ dlm = NULL;
+ goto leave;
+ }
+
+ dlm->resources = (struct list_head *) __get_free_page(GFP_KERNEL);
+ if (!dlm->resources) {
+ mlog_errno(-ENOMEM);
+ kfree(dlm->name);
+ kfree(dlm);
+ dlm = NULL;
+ goto leave;
+ }
+ memset(dlm->resources, 0, PAGE_SIZE);
+
+ for (i=0; i<DLM_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&dlm->resources[i]);
+
+ strcpy(dlm->name, domain);
+ dlm->key = key;
+ dlm->node_num = o2nm_this_node();
+
+ spin_lock_init(&dlm->spinlock);
+ spin_lock_init(&dlm->master_lock);
+ spin_lock_init(&dlm->ast_lock);
+ INIT_LIST_HEAD(&dlm->list);
+ INIT_LIST_HEAD(&dlm->dirty_list);
+ INIT_LIST_HEAD(&dlm->reco.resources);
+ INIT_LIST_HEAD(&dlm->reco.received);
+ INIT_LIST_HEAD(&dlm->reco.node_data);
+ INIT_LIST_HEAD(&dlm->purge_list);
+ INIT_LIST_HEAD(&dlm->dlm_domain_handlers);
+ dlm->reco.state = 0;
+
+ INIT_LIST_HEAD(&dlm->pending_asts);
+ INIT_LIST_HEAD(&dlm->pending_basts);
+
+ mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n",
+ dlm->recovery_map, &(dlm->recovery_map[0]));
+
+ memset(dlm->recovery_map, 0, sizeof(dlm->recovery_map));
+ memset(dlm->live_nodes_map, 0, sizeof(dlm->live_nodes_map));
+ memset(dlm->domain_map, 0, sizeof(dlm->domain_map));
+
+ dlm->dlm_thread_task = NULL;
+ dlm->dlm_reco_thread_task = NULL;
+ init_waitqueue_head(&dlm->dlm_thread_wq);
+ init_waitqueue_head(&dlm->dlm_reco_thread_wq);
+ init_waitqueue_head(&dlm->reco.event);
+ init_waitqueue_head(&dlm->ast_wq);
+ init_waitqueue_head(&dlm->migration_wq);
+ INIT_LIST_HEAD(&dlm->master_list);
+ INIT_LIST_HEAD(&dlm->mle_hb_events);
+
+ dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN;
+ init_waitqueue_head(&dlm->dlm_join_events);
+
+ dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
+ dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
+ atomic_set(&dlm->local_resources, 0);
+ atomic_set(&dlm->remote_resources, 0);
+ atomic_set(&dlm->unknown_resources, 0);
+
+ spin_lock_init(&dlm->work_lock);
+ INIT_LIST_HEAD(&dlm->work_list);
+ INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm);
+
+ kref_init(&dlm->dlm_refs);
+ dlm->dlm_state = DLM_CTXT_NEW;
+
+ INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks);
+
+ mlog(0, "context init: refcount %u\n",
+ atomic_read(&dlm->dlm_refs.refcount));
+
+leave:
+ return dlm;
+}
+
+/*
+ * dlm_register_domain: one-time setup per "domain"
+ */
+struct dlm_ctxt * dlm_register_domain(const char *domain,
+ u32 key)
+{
+ int ret;
+ struct dlm_ctxt *dlm = NULL;
+ struct dlm_ctxt *new_ctxt = NULL;
+
+ if (strlen(domain) > O2NM_MAX_NAME_LEN) {
+ ret = -ENAMETOOLONG;
+ mlog(ML_ERROR, "domain name length too long\n");
+ goto leave;
+ }
+
+ if (!o2hb_check_local_node_heartbeating()) {
+ mlog(ML_ERROR, "the local node has not been configured, or is "
+ "not heartbeating\n");
+ ret = -EPROTO;
+ goto leave;
+ }
+
+ mlog(0, "register called for domain \"%s\"\n", domain);
+
+retry:
+ dlm = NULL;
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ spin_lock(&dlm_domain_lock);
+
+ dlm = __dlm_lookup_domain(domain);
+ if (dlm) {
+ if (dlm->dlm_state != DLM_CTXT_JOINED) {
+ spin_unlock(&dlm_domain_lock);
+
+ mlog(0, "This ctxt is not joined yet!\n");
+ wait_event_interruptible(dlm_domain_events,
+ dlm_wait_on_domain_helper(
+ domain));
+ goto retry;
+ }
+
+ __dlm_get(dlm);
+ dlm->num_joins++;
+
+ spin_unlock(&dlm_domain_lock);
+
+ ret = 0;
+ goto leave;
+ }
+
+ /* doesn't exist */
+ if (!new_ctxt) {
+ spin_unlock(&dlm_domain_lock);
+
+ new_ctxt = dlm_alloc_ctxt(domain, key);
+ if (new_ctxt)
+ goto retry;
+
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ /* a little variable switch-a-roo here... */
+ dlm = new_ctxt;
+ new_ctxt = NULL;
+
+ /* add the new domain */
+ list_add_tail(&dlm->list, &dlm_domains);
+ spin_unlock(&dlm_domain_lock);
+
+ ret = dlm_join_domain(dlm);
+ if (ret) {
+ mlog_errno(ret);
+ dlm_put(dlm);
+ goto leave;
+ }
+
+ ret = 0;
+leave:
+ if (new_ctxt)
+ dlm_free_ctxt_mem(new_ctxt);
+
+ if (ret < 0)
+ dlm = ERR_PTR(ret);
+
+ return dlm;
+}
+EXPORT_SYMBOL_GPL(dlm_register_domain);
+
+static LIST_HEAD(dlm_join_handlers);
+
+static void dlm_unregister_net_handlers(void)
+{
+ o2net_unregister_handler_list(&dlm_join_handlers);
+}
+
+static int dlm_register_net_handlers(void)
+{
+ int status = 0;
+
+ status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
+ sizeof(struct dlm_query_join_request),
+ dlm_query_join_handler,
+ NULL, &dlm_join_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
+ sizeof(struct dlm_assert_joined),
+ dlm_assert_joined_handler,
+ NULL, &dlm_join_handlers);
+ if (status)
+ goto bail;
+
+ status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
+ sizeof(struct dlm_cancel_join),
+ dlm_cancel_join_handler,
+ NULL, &dlm_join_handlers);
+
+bail:
+ if (status < 0)
+ dlm_unregister_net_handlers();
+
+ return status;
+}
+
+/* Domain eviction callback handling.
+ *
+ * The file system requires notification of node death *before* the
+ * dlm completes it's recovery work, otherwise it may be able to
+ * acquire locks on resources requiring recovery. Since the dlm can
+ * evict a node from it's domain *before* heartbeat fires, a similar
+ * mechanism is required. */
+
+/* Eviction is not expected to happen often, so a per-domain lock is
+ * not necessary. Eviction callbacks are allowed to sleep for short
+ * periods of time. */
+static DECLARE_RWSEM(dlm_callback_sem);
+
+void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
+ int node_num)
+{
+ struct list_head *iter;
+ struct dlm_eviction_cb *cb;
+
+ down_read(&dlm_callback_sem);
+ list_for_each(iter, &dlm->dlm_eviction_callbacks) {
+ cb = list_entry(iter, struct dlm_eviction_cb, ec_item);
+
+ cb->ec_func(node_num, cb->ec_data);
+ }
+ up_read(&dlm_callback_sem);
+}
+
+void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb,
+ dlm_eviction_func *f,
+ void *data)
+{
+ INIT_LIST_HEAD(&cb->ec_item);
+ cb->ec_func = f;
+ cb->ec_data = data;
+}
+EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb);
+
+void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
+ struct dlm_eviction_cb *cb)
+{
+ down_write(&dlm_callback_sem);
+ list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks);
+ up_write(&dlm_callback_sem);
+}
+EXPORT_SYMBOL_GPL(dlm_register_eviction_cb);
+
+void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb)
+{
+ down_write(&dlm_callback_sem);
+ list_del_init(&cb->ec_item);
+ up_write(&dlm_callback_sem);
+}
+EXPORT_SYMBOL_GPL(dlm_unregister_eviction_cb);
+
+static int __init dlm_init(void)
+{
+ int status;
+
+ dlm_print_version();
+
+ status = dlm_init_mle_cache();
+ if (status)
+ return -1;
+
+ status = dlm_register_net_handlers();
+ if (status) {
+ dlm_destroy_mle_cache();
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __exit dlm_exit (void)
+{
+ dlm_unregister_net_handlers();
+ dlm_destroy_mle_cache();
+}
+
+MODULE_AUTHOR("Oracle");
+MODULE_LICENSE("GPL");
+
+module_init(dlm_init);
+module_exit(dlm_exit);
diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
new file mode 100644
index 00000000000..2f7f60bfeb3
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmdomain.h
@@ -0,0 +1,36 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmdomain.h
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+#ifndef DLMDOMAIN_H
+#define DLMDOMAIN_H
+
+extern spinlock_t dlm_domain_lock;
+extern struct list_head dlm_domains;
+
+int dlm_joined(struct dlm_ctxt *dlm);
+int dlm_shutting_down(struct dlm_ctxt *dlm);
+void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
+ int node_num);
+
+#endif
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
new file mode 100644
index 00000000000..dd2d24dc25e
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -0,0 +1,640 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmfs.c
+ *
+ * Code which implements the kernel side of a minimal userspace
+ * interface to our DLM. This file handles the virtual file system
+ * used for communication with userspace. Credit should go to ramfs,
+ * which was a template for the fs side of this module.
+ *
+ * Copyright (C) 2003, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+/* Simple VFS hooks based on: */
+/*
+ * Resizable simple ram filesystem for Linux.
+ *
+ * Copyright (C) 2000 Linus Torvalds.
+ * 2000 Transmeta Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/backing-dev.h>
+
+#include <asm/uaccess.h>
+
+
+#include "cluster/nodemanager.h"
+#include "cluster/heartbeat.h"
+#include "cluster/tcp.h"
+
+#include "dlmapi.h"
+
+#include "userdlm.h"
+
+#include "dlmfsver.h"
+
+#define MLOG_MASK_PREFIX ML_DLMFS
+#include "cluster/masklog.h"
+
+static struct super_operations dlmfs_ops;
+static struct file_operations dlmfs_file_operations;
+static struct inode_operations dlmfs_dir_inode_operations;
+static struct inode_operations dlmfs_root_inode_operations;
+static struct inode_operations dlmfs_file_inode_operations;
+static kmem_cache_t *dlmfs_inode_cache;
+
+struct workqueue_struct *user_dlm_worker;
+
+/*
+ * decodes a set of open flags into a valid lock level and a set of flags.
+ * returns < 0 if we have invalid flags
+ * flags which mean something to us:
+ * O_RDONLY -> PRMODE level
+ * O_WRONLY -> EXMODE level
+ *
+ * O_NONBLOCK -> LKM_NOQUEUE
+ */
+static int dlmfs_decode_open_flags(int open_flags,
+ int *level,
+ int *flags)
+{
+ if (open_flags & (O_WRONLY|O_RDWR))
+ *level = LKM_EXMODE;
+ else
+ *level = LKM_PRMODE;
+
+ *flags = 0;
+ if (open_flags & O_NONBLOCK)
+ *flags |= LKM_NOQUEUE;
+
+ return 0;
+}
+
+static int dlmfs_file_open(struct inode *inode,
+ struct file *file)
+{
+ int status, level, flags;
+ struct dlmfs_filp_private *fp = NULL;
+ struct dlmfs_inode_private *ip;
+
+ if (S_ISDIR(inode->i_mode))
+ BUG();
+
+ mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino,
+ file->f_flags);
+
+ status = dlmfs_decode_open_flags(file->f_flags, &level, &flags);
+ if (status < 0)
+ goto bail;
+
+ /* We don't want to honor O_APPEND at read/write time as it
+ * doesn't make sense for LVB writes. */
+ file->f_flags &= ~O_APPEND;
+
+ fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+ if (!fp) {
+ status = -ENOMEM;
+ goto bail;
+ }
+ fp->fp_lock_level = level;
+
+ ip = DLMFS_I(inode);
+
+ status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags);
+ if (status < 0) {
+ /* this is a strange error to return here but I want
+ * to be able userspace to be able to distinguish a
+ * valid lock request from one that simply couldn't be
+ * granted. */
+ if (flags & LKM_NOQUEUE && status == -EAGAIN)
+ status = -ETXTBSY;
+ kfree(fp);
+ goto bail;
+ }
+
+ file->private_data = fp;
+bail:
+ return status;
+}
+
+static int dlmfs_file_release(struct inode *inode,
+ struct file *file)
+{
+ int level, status;
+ struct dlmfs_inode_private *ip = DLMFS_I(inode);
+ struct dlmfs_filp_private *fp =
+ (struct dlmfs_filp_private *) file->private_data;
+
+ if (S_ISDIR(inode->i_mode))
+ BUG();
+
+ mlog(0, "close called on inode %lu\n", inode->i_ino);
+
+ status = 0;
+ if (fp) {
+ level = fp->fp_lock_level;
+ if (level != LKM_IVMODE)
+ user_dlm_cluster_unlock(&ip->ip_lockres, level);
+
+ kfree(fp);
+ file->private_data = NULL;
+ }
+
+ return 0;
+}
+
+static ssize_t dlmfs_file_read(struct file *filp,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int bytes_left;
+ ssize_t readlen;
+ char *lvb_buf;
+ struct inode *inode = filp->f_dentry->d_inode;
+
+ mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
+ inode->i_ino, count, *ppos);
+
+ if (*ppos >= i_size_read(inode))
+ return 0;
+
+ if (!count)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ /* don't read past the lvb */
+ if ((count + *ppos) > i_size_read(inode))
+ readlen = i_size_read(inode) - *ppos;
+ else
+ readlen = count - *ppos;
+
+ lvb_buf = kmalloc(readlen, GFP_KERNEL);
+ if (!lvb_buf)
+ return -ENOMEM;
+
+ user_dlm_read_lvb(inode, lvb_buf, readlen);
+ bytes_left = __copy_to_user(buf, lvb_buf, readlen);
+ readlen -= bytes_left;
+
+ kfree(lvb_buf);
+
+ *ppos = *ppos + readlen;
+
+ mlog(0, "read %zd bytes\n", readlen);
+ return readlen;
+}
+
+static ssize_t dlmfs_file_write(struct file *filp,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int bytes_left;
+ ssize_t writelen;
+ char *lvb_buf;
+ struct inode *inode = filp->f_dentry->d_inode;
+
+ mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
+ inode->i_ino, count, *ppos);
+
+ if (*ppos >= i_size_read(inode))
+ return -ENOSPC;
+
+ if (!count)
+ return 0;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ /* don't write past the lvb */
+ if ((count + *ppos) > i_size_read(inode))
+ writelen = i_size_read(inode) - *ppos;
+ else
+ writelen = count - *ppos;
+
+ lvb_buf = kmalloc(writelen, GFP_KERNEL);
+ if (!lvb_buf)
+ return -ENOMEM;
+
+ bytes_left = copy_from_user(lvb_buf, buf, writelen);
+ writelen -= bytes_left;
+ if (writelen)
+ user_dlm_write_lvb(inode, lvb_buf, writelen);
+
+ kfree(lvb_buf);
+
+ *ppos = *ppos + writelen;
+ mlog(0, "wrote %zd bytes\n", writelen);
+ return writelen;
+}
+
+static void dlmfs_init_once(void *foo,
+ kmem_cache_t *cachep,
+ unsigned long flags)
+{
+ struct dlmfs_inode_private *ip =
+ (struct dlmfs_inode_private *) foo;
+
+ if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+ SLAB_CTOR_CONSTRUCTOR) {
+ ip->ip_dlm = NULL;
+ ip->ip_parent = NULL;
+
+ inode_init_once(&ip->ip_vfs_inode);
+ }
+}
+
+static struct inode *dlmfs_alloc_inode(struct super_block *sb)
+{
+ struct dlmfs_inode_private *ip;
+
+ ip = kmem_cache_alloc(dlmfs_inode_cache, SLAB_NOFS);
+ if (!ip)
+ return NULL;
+
+ return &ip->ip_vfs_inode;
+}
+
+static void dlmfs_destroy_inode(struct inode *inode)
+{
+ kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
+}
+
+static void dlmfs_clear_inode(struct inode *inode)
+{
+ int status;
+ struct dlmfs_inode_private *ip;
+
+ if (!inode)
+ return;
+
+ mlog(0, "inode %lu\n", inode->i_ino);
+
+ ip = DLMFS_I(inode);
+
+ if (S_ISREG(inode->i_mode)) {
+ status = user_dlm_destroy_lock(&ip->ip_lockres);
+ if (status < 0)
+ mlog_errno(status);
+ iput(ip->ip_parent);
+ goto clear_fields;
+ }
+
+ mlog(0, "we're a directory, ip->ip_dlm = 0x%p\n", ip->ip_dlm);
+ /* we must be a directory. If required, lets unregister the
+ * dlm context now. */
+ if (ip->ip_dlm)
+ user_dlm_unregister_context(ip->ip_dlm);
+clear_fields:
+ ip->ip_parent = NULL;
+ ip->ip_dlm = NULL;
+}
+
+static struct backing_dev_info dlmfs_backing_dev_info = {
+ .ra_pages = 0, /* No readahead */
+ .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+};
+
+static struct inode *dlmfs_get_root_inode(struct super_block *sb)
+{
+ struct inode *inode = new_inode(sb);
+ int mode = S_IFDIR | 0755;
+ struct dlmfs_inode_private *ip;
+
+ if (inode) {
+ ip = DLMFS_I(inode);
+
+ inode->i_mode = mode;
+ inode->i_uid = current->fsuid;
+ inode->i_gid = current->fsgid;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_nlink++;
+
+ inode->i_fop = &simple_dir_operations;
+ inode->i_op = &dlmfs_root_inode_operations;
+ }
+
+ return inode;
+}
+
+static struct inode *dlmfs_get_inode(struct inode *parent,
+ struct dentry *dentry,
+ int mode)
+{
+ struct super_block *sb = parent->i_sb;
+ struct inode * inode = new_inode(sb);
+ struct dlmfs_inode_private *ip;
+
+ if (!inode)
+ return NULL;
+
+ inode->i_mode = mode;
+ inode->i_uid = current->fsuid;
+ inode->i_gid = current->fsgid;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+
+ ip = DLMFS_I(inode);
+ ip->ip_dlm = DLMFS_I(parent)->ip_dlm;
+
+ switch (mode & S_IFMT) {
+ default:
+ /* for now we don't support anything other than
+ * directories and regular files. */
+ BUG();
+ break;
+ case S_IFREG:
+ inode->i_op = &dlmfs_file_inode_operations;
+ inode->i_fop = &dlmfs_file_operations;
+
+ i_size_write(inode, DLM_LVB_LEN);
+
+ user_dlm_lock_res_init(&ip->ip_lockres, dentry);
+
+ /* released at clear_inode time, this insures that we
+ * get to drop the dlm reference on each lock *before*
+ * we call the unregister code for releasing parent
+ * directories. */
+ ip->ip_parent = igrab(parent);
+ BUG_ON(!ip->ip_parent);
+ break;
+ case S_IFDIR:
+ inode->i_op = &dlmfs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+
+ /* directory inodes start off with i_nlink ==
+ * 2 (for "." entry) */
+ inode->i_nlink++;
+ break;
+ }
+
+ if (parent->i_mode & S_ISGID) {
+ inode->i_gid = parent->i_gid;
+ if (S_ISDIR(mode))
+ inode->i_mode |= S_ISGID;
+ }
+
+ return inode;
+}
+
+/*
+ * File creation. Allocate an inode, and we're done..
+ */
+/* SMP-safe */
+static int dlmfs_mkdir(struct inode * dir,
+ struct dentry * dentry,
+ int mode)
+{
+ int status;
+ struct inode *inode = NULL;
+ struct qstr *domain = &dentry->d_name;
+ struct dlmfs_inode_private *ip;
+ struct dlm_ctxt *dlm;
+
+ mlog(0, "mkdir %.*s\n", domain->len, domain->name);
+
+ /* verify that we have a proper domain */
+ if (domain->len >= O2NM_MAX_NAME_LEN) {
+ status = -EINVAL;
+ mlog(ML_ERROR, "invalid domain name for directory.\n");
+ goto bail;
+ }
+
+ inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR);
+ if (!inode) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ip = DLMFS_I(inode);
+
+ dlm = user_dlm_register_context(domain);
+ if (IS_ERR(dlm)) {
+ status = PTR_ERR(dlm);
+ mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
+ status, domain->len, domain->name);
+ goto bail;
+ }
+ ip->ip_dlm = dlm;
+
+ dir->i_nlink++;
+ d_instantiate(dentry, inode);
+ dget(dentry); /* Extra count - pin the dentry in core */
+
+ status = 0;
+bail:
+ if (status < 0)
+ iput(inode);
+ return status;
+}
+
+static int dlmfs_create(struct inode *dir,
+ struct dentry *dentry,
+ int mode,
+ struct nameidata *nd)
+{
+ int status = 0;
+ struct inode *inode;
+ struct qstr *name = &dentry->d_name;
+
+ mlog(0, "create %.*s\n", name->len, name->name);
+
+ /* verify name is valid and doesn't contain any dlm reserved
+ * characters */
+ if (name->len >= USER_DLM_LOCK_ID_MAX_LEN ||
+ name->name[0] == '$') {
+ status = -EINVAL;
+ mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len,
+ name->name);
+ goto bail;
+ }
+
+ inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG);
+ if (!inode) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ d_instantiate(dentry, inode);
+ dget(dentry); /* Extra count - pin the dentry in core */
+bail:
+ return status;
+}
+
+static int dlmfs_unlink(struct inode *dir,
+ struct dentry *dentry)
+{
+ int status;
+ struct inode *inode = dentry->d_inode;
+
+ mlog(0, "unlink inode %lu\n", inode->i_ino);
+
+ /* if there are no current holders, or none that are waiting
+ * to acquire a lock, this basically destroys our lockres. */
+ status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres);
+ if (status < 0) {
+ mlog(ML_ERROR, "unlink %.*s, error %d from destroy\n",
+ dentry->d_name.len, dentry->d_name.name, status);
+ goto bail;
+ }
+ status = simple_unlink(dir, dentry);
+bail:
+ return status;
+}
+
+static int dlmfs_fill_super(struct super_block * sb,
+ void * data,
+ int silent)
+{
+ struct inode * inode;
+ struct dentry * root;
+
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = DLMFS_MAGIC;
+ sb->s_op = &dlmfs_ops;
+ inode = dlmfs_get_root_inode(sb);
+ if (!inode)
+ return -ENOMEM;
+
+ root = d_alloc_root(inode);
+ if (!root) {
+ iput(inode);
+ return -ENOMEM;
+ }
+ sb->s_root = root;
+ return 0;
+}
+
+static struct file_operations dlmfs_file_operations = {
+ .open = dlmfs_file_open,
+ .release = dlmfs_file_release,
+ .read = dlmfs_file_read,
+ .write = dlmfs_file_write,
+};
+
+static struct inode_operations dlmfs_dir_inode_operations = {
+ .create = dlmfs_create,
+ .lookup = simple_lookup,
+ .unlink = dlmfs_unlink,
+};
+
+/* this way we can restrict mkdir to only the toplevel of the fs. */
+static struct inode_operations dlmfs_root_inode_operations = {
+ .lookup = simple_lookup,
+ .mkdir = dlmfs_mkdir,
+ .rmdir = simple_rmdir,
+};
+
+static struct super_operations dlmfs_ops = {
+ .statfs = simple_statfs,
+ .alloc_inode = dlmfs_alloc_inode,
+ .destroy_inode = dlmfs_destroy_inode,
+ .clear_inode = dlmfs_clear_inode,
+ .drop_inode = generic_delete_inode,
+};
+
+static struct inode_operations dlmfs_file_inode_operations = {
+ .getattr = simple_getattr,
+};
+
+static struct super_block *dlmfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return get_sb_nodev(fs_type, flags, data, dlmfs_fill_super);
+}
+
+static struct file_system_type dlmfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ocfs2_dlmfs",
+ .get_sb = dlmfs_get_sb,
+ .kill_sb = kill_litter_super,
+};
+
+static int __init init_dlmfs_fs(void)
+{
+ int status;
+ int cleanup_inode = 0, cleanup_worker = 0;
+
+ dlmfs_print_version();
+
+ dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
+ sizeof(struct dlmfs_inode_private),
+ 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
+ dlmfs_init_once, NULL);
+ if (!dlmfs_inode_cache)
+ return -ENOMEM;
+ cleanup_inode = 1;
+
+ user_dlm_worker = create_singlethread_workqueue("user_dlm");
+ if (!user_dlm_worker) {
+ status = -ENOMEM;
+ goto bail;
+ }
+ cleanup_worker = 1;
+
+ status = register_filesystem(&dlmfs_fs_type);
+bail:
+ if (status) {
+ if (cleanup_inode)
+ kmem_cache_destroy(dlmfs_inode_cache);
+ if (cleanup_worker)
+ destroy_workqueue(user_dlm_worker);
+ } else
+ printk("OCFS2 User DLM kernel interface loaded\n");
+ return status;
+}
+
+static void __exit exit_dlmfs_fs(void)
+{
+ unregister_filesystem(&dlmfs_fs_type);
+
+ flush_workqueue(user_dlm_worker);
+ destroy_workqueue(user_dlm_worker);
+
+ if (kmem_cache_destroy(dlmfs_inode_cache))
+ printk(KERN_INFO "dlmfs_inode_cache: not all structures "
+ "were freed\n");
+}
+
+MODULE_AUTHOR("Oracle");
+MODULE_LICENSE("GPL");
+
+module_init(init_dlmfs_fs)
+module_exit(exit_dlmfs_fs)
diff --git a/fs/ocfs2/dlm/dlmfsver.c b/fs/ocfs2/dlm/dlmfsver.c
new file mode 100644
index 00000000000..d2be3ad841f
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmfsver.c
@@ -0,0 +1,42 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmfsver.c
+ *
+ * version string
+ *
+ * Copyright (C) 2002, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "dlmfsver.h"
+
+#define DLM_BUILD_VERSION "1.3.3"
+
+#define VERSION_STR "OCFS2 DLMFS " DLM_BUILD_VERSION
+
+void dlmfs_print_version(void)
+{
+ printk(KERN_INFO "%s\n", VERSION_STR);
+}
+
+MODULE_DESCRIPTION(VERSION_STR);
+
+MODULE_VERSION(DLM_BUILD_VERSION);
diff --git a/fs/ocfs2/dlm/dlmfsver.h b/fs/ocfs2/dlm/dlmfsver.h
new file mode 100644
index 00000000000..f35eadbed25
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmfsver.h
@@ -0,0 +1,31 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmver.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef DLMFS_VER_H
+#define DLMFS_VER_H
+
+void dlmfs_print_version(void);
+
+#endif /* DLMFS_VER_H */
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
new file mode 100644
index 00000000000..d1a0038557a
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -0,0 +1,676 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmlock.c
+ *
+ * underlying calls for lock creation
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+
+#include "cluster/heartbeat.h"
+#include "cluster/nodemanager.h"
+#include "cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+
+#include "dlmconvert.h"
+
+#define MLOG_MASK_PREFIX ML_DLM
+#include "cluster/masklog.h"
+
+static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED;
+static u64 dlm_next_cookie = 1;
+
+static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags);
+static void dlm_init_lock(struct dlm_lock *newlock, int type,
+ u8 node, u64 cookie);
+static void dlm_lock_release(struct kref *kref);
+static void dlm_lock_detach_lockres(struct dlm_lock *lock);
+
+/* Tell us whether we can grant a new lock request.
+ * locking:
+ * caller needs: res->spinlock
+ * taken: none
+ * held on exit: none
+ * returns: 1 if the lock can be granted, 0 otherwise.
+ */
+static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ struct list_head *iter;
+ struct dlm_lock *tmplock;
+
+ list_for_each(iter, &res->granted) {
+ tmplock = list_entry(iter, struct dlm_lock, list);
+
+ if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
+ return 0;
+ }
+
+ list_for_each(iter, &res->converting) {
+ tmplock = list_entry(iter, struct dlm_lock, list);
+
+ if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
+ return 0;
+ }
+
+ return 1;
+}
+
+/* performs lock creation at the lockres master site
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_NOTQUEUED
+ */
+static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags)
+{
+ int call_ast = 0, kick_thread = 0;
+ enum dlm_status status = DLM_NORMAL;
+
+ mlog_entry("type=%d\n", lock->ml.type);
+
+ spin_lock(&res->spinlock);
+ /* if called from dlm_create_lock_handler, need to
+ * ensure it will not sleep in dlm_wait_on_lockres */
+ status = __dlm_lockres_state_to_status(res);
+ if (status != DLM_NORMAL &&
+ lock->ml.node != dlm->node_num) {
+ /* erf. state changed after lock was dropped. */
+ spin_unlock(&res->spinlock);
+ dlm_error(status);
+ return status;
+ }
+ __dlm_wait_on_lockres(res);
+ __dlm_lockres_reserve_ast(res);
+
+ if (dlm_can_grant_new_lock(res, lock)) {
+ mlog(0, "I can grant this lock right away\n");
+ /* got it right away */
+ lock->lksb->status = DLM_NORMAL;
+ status = DLM_NORMAL;
+ dlm_lock_get(lock);
+ list_add_tail(&lock->list, &res->granted);
+
+ /* for the recovery lock, we can't allow the ast
+ * to be queued since the dlmthread is already
+ * frozen. but the recovery lock is always locked
+ * with LKM_NOQUEUE so we do not need the ast in
+ * this special case */
+ if (!dlm_is_recovery_lock(res->lockname.name,
+ res->lockname.len)) {
+ kick_thread = 1;
+ call_ast = 1;
+ }
+ } else {
+ /* for NOQUEUE request, unless we get the
+ * lock right away, return DLM_NOTQUEUED */
+ if (flags & LKM_NOQUEUE)
+ status = DLM_NOTQUEUED;
+ else {
+ dlm_lock_get(lock);
+ list_add_tail(&lock->list, &res->blocked);
+ kick_thread = 1;
+ }
+ }
+
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
+ /* either queue the ast or release it */
+ if (call_ast)
+ dlm_queue_ast(dlm, lock);
+ else
+ dlm_lockres_release_ast(dlm, res);
+
+ dlm_lockres_calc_usage(dlm, res);
+ if (kick_thread)
+ dlm_kick_thread(dlm, res);
+
+ return status;
+}
+
+void dlm_revert_pending_lock(struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ /* remove from local queue if it failed */
+ list_del_init(&lock->list);
+ lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
+}
+
+
+/*
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock
+ * held on exit: none
+ * returns: DLM_DENIED, DLM_RECOVERING, or net status
+ */
+static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags)
+{
+ enum dlm_status status = DLM_DENIED;
+
+ mlog_entry("type=%d\n", lock->ml.type);
+ mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len,
+ res->lockname.name, flags);
+
+ spin_lock(&res->spinlock);
+
+ /* will exit this call with spinlock held */
+ __dlm_wait_on_lockres(res);
+ res->state |= DLM_LOCK_RES_IN_PROGRESS;
+
+ /* add lock to local (secondary) queue */
+ dlm_lock_get(lock);
+ list_add_tail(&lock->list, &res->blocked);
+ lock->lock_pending = 1;
+ spin_unlock(&res->spinlock);
+
+ /* spec seems to say that you will get DLM_NORMAL when the lock
+ * has been queued, meaning we need to wait for a reply here. */
+ status = dlm_send_remote_lock_request(dlm, res, lock, flags);
+
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ lock->lock_pending = 0;
+ if (status != DLM_NORMAL) {
+ if (status != DLM_NOTQUEUED)
+ dlm_error(status);
+ dlm_revert_pending_lock(res, lock);
+ dlm_lock_put(lock);
+ }
+ spin_unlock(&res->spinlock);
+
+ dlm_lockres_calc_usage(dlm, res);
+
+ wake_up(&res->wq);
+ return status;
+}
+
+
+/* for remote lock creation.
+ * locking:
+ * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS
+ * taken: none
+ * held on exit: none
+ * returns: DLM_NOLOCKMGR, or net status
+ */
+static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock, int flags)
+{
+ struct dlm_create_lock create;
+ int tmpret, status = 0;
+ enum dlm_status ret;
+
+ mlog_entry_void();
+
+ memset(&create, 0, sizeof(create));
+ create.node_idx = dlm->node_num;
+ create.requested_type = lock->ml.type;
+ create.cookie = lock->ml.cookie;
+ create.namelen = res->lockname.len;
+ create.flags = cpu_to_be32(flags);
+ memcpy(create.name, res->lockname.name, create.namelen);
+
+ tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
+ sizeof(create), res->owner, &status);
+ if (tmpret >= 0) {
+ // successfully sent and received
+ ret = status; // this is already a dlm_status
+ } else {
+ mlog_errno(tmpret);
+ if (dlm_is_host_down(tmpret)) {
+ ret = DLM_RECOVERING;
+ mlog(0, "node %u died so returning DLM_RECOVERING "
+ "from lock message!\n", res->owner);
+ } else {
+ ret = dlm_err_to_dlm_status(tmpret);
+ }
+ }
+
+ return ret;
+}
+
+void dlm_lock_get(struct dlm_lock *lock)
+{
+ kref_get(&lock->lock_refs);
+}
+
+void dlm_lock_put(struct dlm_lock *lock)
+{
+ kref_put(&lock->lock_refs, dlm_lock_release);
+}
+
+static void dlm_lock_release(struct kref *kref)
+{
+ struct dlm_lock *lock;
+
+ lock = container_of(kref, struct dlm_lock, lock_refs);
+
+ BUG_ON(!list_empty(&lock->list));
+ BUG_ON(!list_empty(&lock->ast_list));
+ BUG_ON(!list_empty(&lock->bast_list));
+ BUG_ON(lock->ast_pending);
+ BUG_ON(lock->bast_pending);
+
+ dlm_lock_detach_lockres(lock);
+
+ if (lock->lksb_kernel_allocated) {
+ mlog(0, "freeing kernel-allocated lksb\n");
+ kfree(lock->lksb);
+ }
+ kfree(lock);
+}
+
+/* associate a lock with it's lockres, getting a ref on the lockres */
+void dlm_lock_attach_lockres(struct dlm_lock *lock,
+ struct dlm_lock_resource *res)
+{
+ dlm_lockres_get(res);
+ lock->lockres = res;
+}
+
+/* drop ref on lockres, if there is still one associated with lock */
+static void dlm_lock_detach_lockres(struct dlm_lock *lock)
+{
+ struct dlm_lock_resource *res;
+
+ res = lock->lockres;
+ if (res) {
+ lock->lockres = NULL;
+ mlog(0, "removing lock's lockres reference\n");
+ dlm_lockres_put(res);
+ }
+}
+
+static void dlm_init_lock(struct dlm_lock *newlock, int type,
+ u8 node, u64 cookie)
+{
+ INIT_LIST_HEAD(&newlock->list);
+ INIT_LIST_HEAD(&newlock->ast_list);
+ INIT_LIST_HEAD(&newlock->bast_list);
+ spin_lock_init(&newlock->spinlock);
+ newlock->ml.type = type;
+ newlock->ml.convert_type = LKM_IVMODE;
+ newlock->ml.highest_blocked = LKM_IVMODE;
+ newlock->ml.node = node;
+ newlock->ml.pad1 = 0;
+ newlock->ml.list = 0;
+ newlock->ml.flags = 0;
+ newlock->ast = NULL;
+ newlock->bast = NULL;
+ newlock->astdata = NULL;
+ newlock->ml.cookie = cpu_to_be64(cookie);
+ newlock->ast_pending = 0;
+ newlock->bast_pending = 0;
+ newlock->convert_pending = 0;
+ newlock->lock_pending = 0;
+ newlock->unlock_pending = 0;
+ newlock->cancel_pending = 0;
+ newlock->lksb_kernel_allocated = 0;
+
+ kref_init(&newlock->lock_refs);
+}
+
+struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
+ struct dlm_lockstatus *lksb)
+{
+ struct dlm_lock *lock;
+ int kernel_allocated = 0;
+
+ lock = kcalloc(1, sizeof(*lock), GFP_KERNEL);
+ if (!lock)
+ return NULL;
+
+ if (!lksb) {
+ /* zero memory only if kernel-allocated */
+ lksb = kcalloc(1, sizeof(*lksb), GFP_KERNEL);
+ if (!lksb) {
+ kfree(lock);
+ return NULL;
+ }
+ kernel_allocated = 1;
+ }
+
+ dlm_init_lock(lock, type, node, cookie);
+ if (kernel_allocated)
+ lock->lksb_kernel_allocated = 1;
+ lock->lksb = lksb;
+ lksb->lockid = lock;
+ return lock;
+}
+
+/* handler for lock creation net message
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED
+ */
+int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ struct dlm_lock *newlock = NULL;
+ struct dlm_lockstatus *lksb = NULL;
+ enum dlm_status status = DLM_NORMAL;
+ char *name;
+ unsigned int namelen;
+
+ BUG_ON(!dlm);
+
+ mlog_entry_void();
+
+ if (!dlm_grab(dlm))
+ return DLM_REJECTED;
+
+ mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
+ "Domain %s not fully joined!\n", dlm->name);
+
+ name = create->name;
+ namelen = create->namelen;
+
+ status = DLM_IVBUFLEN;
+ if (namelen > DLM_LOCKID_NAME_MAX) {
+ dlm_error(status);
+ goto leave;
+ }
+
+ status = DLM_SYSERR;
+ newlock = dlm_new_lock(create->requested_type,
+ create->node_idx,
+ be64_to_cpu(create->cookie), NULL);
+ if (!newlock) {
+ dlm_error(status);
+ goto leave;
+ }
+
+ lksb = newlock->lksb;
+
+ if (be32_to_cpu(create->flags) & LKM_GET_LVB) {
+ lksb->flags |= DLM_LKSB_GET_LVB;
+ mlog(0, "set DLM_LKSB_GET_LVB flag\n");
+ }
+
+ status = DLM_IVLOCKID;
+ res = dlm_lookup_lockres(dlm, name, namelen);
+ if (!res) {
+ dlm_error(status);
+ goto leave;
+ }
+
+ spin_lock(&res->spinlock);
+ status = __dlm_lockres_state_to_status(res);
+ spin_unlock(&res->spinlock);
+
+ if (status != DLM_NORMAL) {
+ mlog(0, "lockres recovering/migrating/in-progress\n");
+ goto leave;
+ }
+
+ dlm_lock_attach_lockres(newlock, res);
+
+ status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags));
+leave:
+ if (status != DLM_NORMAL)
+ if (newlock)
+ dlm_lock_put(newlock);
+
+ if (res)
+ dlm_lockres_put(res);
+
+ dlm_put(dlm);
+
+ return status;
+}
+
+
+/* fetch next node-local (u8 nodenum + u56 cookie) into u64 */
+static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie)
+{
+ u64 tmpnode = node_num;
+
+ /* shift single byte of node num into top 8 bits */
+ tmpnode <<= 56;
+
+ spin_lock(&dlm_cookie_lock);
+ *cookie = (dlm_next_cookie | tmpnode);
+ if (++dlm_next_cookie & 0xff00000000000000ull) {
+ mlog(0, "This node's cookie will now wrap!\n");
+ dlm_next_cookie = 1;
+ }
+ spin_unlock(&dlm_cookie_lock);
+}
+
+enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode,
+ struct dlm_lockstatus *lksb, int flags,
+ const char *name, dlm_astlockfunc_t *ast, void *data,
+ dlm_bastlockfunc_t *bast)
+{
+ enum dlm_status status;
+ struct dlm_lock_resource *res = NULL;
+ struct dlm_lock *lock = NULL;
+ int convert = 0, recovery = 0;
+
+ /* yes this function is a mess.
+ * TODO: clean this up. lots of common code in the
+ * lock and convert paths, especially in the retry blocks */
+ if (!lksb) {
+ dlm_error(DLM_BADARGS);
+ return DLM_BADARGS;
+ }
+
+ status = DLM_BADPARAM;
+ if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) {
+ dlm_error(status);
+ goto error;
+ }
+
+ if (flags & ~LKM_VALID_FLAGS) {
+ dlm_error(status);
+ goto error;
+ }
+
+ convert = (flags & LKM_CONVERT);
+ recovery = (flags & LKM_RECOVERY);
+
+ if (recovery &&
+ (!dlm_is_recovery_lock(name, strlen(name)) || convert) ) {
+ dlm_error(status);
+ goto error;
+ }
+ if (convert && (flags & LKM_LOCAL)) {
+ mlog(ML_ERROR, "strange LOCAL convert request!\n");
+ goto error;
+ }
+
+ if (convert) {
+ /* CONVERT request */
+
+ /* if converting, must pass in a valid dlm_lock */
+ lock = lksb->lockid;
+ if (!lock) {
+ mlog(ML_ERROR, "NULL lock pointer in convert "
+ "request\n");
+ goto error;
+ }
+
+ res = lock->lockres;
+ if (!res) {
+ mlog(ML_ERROR, "NULL lockres pointer in convert "
+ "request\n");
+ goto error;
+ }
+ dlm_lockres_get(res);
+
+ /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are
+ * static after the original lock call. convert requests will
+ * ensure that everything is the same, or return DLM_BADARGS.
+ * this means that DLM_DENIED_NOASTS will never be returned.
+ */
+ if (lock->lksb != lksb || lock->ast != ast ||
+ lock->bast != bast || lock->astdata != data) {
+ status = DLM_BADARGS;
+ mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, "
+ "astdata=%p\n", lksb, ast, bast, data);
+ mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, "
+ "astdata=%p\n", lock->lksb, lock->ast,
+ lock->bast, lock->astdata);
+ goto error;
+ }
+retry_convert:
+ dlm_wait_for_recovery(dlm);
+
+ if (res->owner == dlm->node_num)
+ status = dlmconvert_master(dlm, res, lock, flags, mode);
+ else
+ status = dlmconvert_remote(dlm, res, lock, flags, mode);
+ if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
+ status == DLM_FORWARD) {
+ /* for now, see how this works without sleeping
+ * and just retry right away. I suspect the reco
+ * or migration will complete fast enough that
+ * no waiting will be necessary */
+ mlog(0, "retrying convert with migration/recovery/"
+ "in-progress\n");
+ msleep(100);
+ goto retry_convert;
+ }
+ } else {
+ u64 tmpcookie;
+
+ /* LOCK request */
+ status = DLM_BADARGS;
+ if (!name) {
+ dlm_error(status);
+ goto error;
+ }
+
+ status = DLM_IVBUFLEN;
+ if (strlen(name) > DLM_LOCKID_NAME_MAX || strlen(name) < 1) {
+ dlm_error(status);
+ goto error;
+ }
+
+ dlm_get_next_cookie(dlm->node_num, &tmpcookie);
+ lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb);
+ if (!lock) {
+ dlm_error(status);
+ goto error;
+ }
+
+ if (!recovery)
+ dlm_wait_for_recovery(dlm);
+
+ /* find or create the lock resource */
+ res = dlm_get_lock_resource(dlm, name, flags);
+ if (!res) {
+ status = DLM_IVLOCKID;
+ dlm_error(status);
+ goto error;
+ }
+
+ mlog(0, "type=%d, flags = 0x%x\n", mode, flags);
+ mlog(0, "creating lock: lock=%p res=%p\n", lock, res);
+
+ dlm_lock_attach_lockres(lock, res);
+ lock->ast = ast;
+ lock->bast = bast;
+ lock->astdata = data;
+
+retry_lock:
+ if (flags & LKM_VALBLK) {
+ mlog(0, "LKM_VALBLK passed by caller\n");
+
+ /* LVB requests for non PR, PW or EX locks are
+ * ignored. */
+ if (mode < LKM_PRMODE)
+ flags &= ~LKM_VALBLK;
+ else {
+ flags |= LKM_GET_LVB;
+ lock->lksb->flags |= DLM_LKSB_GET_LVB;
+ }
+ }
+
+ if (res->owner == dlm->node_num)
+ status = dlmlock_master(dlm, res, lock, flags);
+ else
+ status = dlmlock_remote(dlm, res, lock, flags);
+
+ if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
+ status == DLM_FORWARD) {
+ mlog(0, "retrying lock with migration/"
+ "recovery/in progress\n");
+ msleep(100);
+ dlm_wait_for_recovery(dlm);
+ goto retry_lock;
+ }
+
+ if (status != DLM_NORMAL) {
+ lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
+ if (status != DLM_NOTQUEUED)
+ dlm_error(status);
+ goto error;
+ }
+ }
+
+error:
+ if (status != DLM_NORMAL) {
+ if (lock && !convert)
+ dlm_lock_put(lock);
+ // this is kind of unnecessary
+ lksb->status = status;
+ }
+
+ /* put lockres ref from the convert path
+ * or from dlm_get_lock_resource */
+ if (res)
+ dlm_lockres_put(res);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(dlmlock);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
new file mode 100644
index 00000000000..27e984f7e4c
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -0,0 +1,2664 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmmod.c
+ *
+ * standalone DLM module
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+
+#include "cluster/heartbeat.h"
+#include "cluster/nodemanager.h"
+#include "cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+#include "dlmdebug.h"
+#include "dlmdomain.h"
+
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
+#include "cluster/masklog.h"
+
+enum dlm_mle_type {
+ DLM_MLE_BLOCK,
+ DLM_MLE_MASTER,
+ DLM_MLE_MIGRATION
+};
+
+struct dlm_lock_name
+{
+ u8 len;
+ u8 name[DLM_LOCKID_NAME_MAX];
+};
+
+struct dlm_master_list_entry
+{
+ struct list_head list;
+ struct list_head hb_events;
+ struct dlm_ctxt *dlm;
+ spinlock_t spinlock;
+ wait_queue_head_t wq;
+ atomic_t woken;
+ struct kref mle_refs;
+ unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ u8 master;
+ u8 new_master;
+ enum dlm_mle_type type;
+ struct o2hb_callback_func mle_hb_up;
+ struct o2hb_callback_func mle_hb_down;
+ union {
+ struct dlm_lock_resource *res;
+ struct dlm_lock_name name;
+ } u;
+};
+
+static void dlm_mle_node_down(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle,
+ struct o2nm_node *node,
+ int idx);
+static void dlm_mle_node_up(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle,
+ struct o2nm_node *node,
+ int idx);
+
+static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
+static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
+ unsigned int namelen, void *nodemap,
+ u32 flags);
+
+static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle,
+ const char *name,
+ unsigned int namelen)
+{
+ struct dlm_lock_resource *res;
+
+ if (dlm != mle->dlm)
+ return 0;
+
+ if (mle->type == DLM_MLE_BLOCK ||
+ mle->type == DLM_MLE_MIGRATION) {
+ if (namelen != mle->u.name.len ||
+ memcmp(name, mle->u.name.name, namelen)!=0)
+ return 0;
+ } else {
+ res = mle->u.res;
+ if (namelen != res->lockname.len ||
+ memcmp(res->lockname.name, name, namelen) != 0)
+ return 0;
+ }
+ return 1;
+}
+
+#if 0
+/* Code here is included but defined out as it aids debugging */
+
+void dlm_print_one_mle(struct dlm_master_list_entry *mle)
+{
+ int i = 0, refs;
+ char *type;
+ char attached;
+ u8 master;
+ unsigned int namelen;
+ const char *name;
+ struct kref *k;
+
+ k = &mle->mle_refs;
+ if (mle->type == DLM_MLE_BLOCK)
+ type = "BLK";
+ else if (mle->type == DLM_MLE_MASTER)
+ type = "MAS";
+ else
+ type = "MIG";
+ refs = atomic_read(&k->refcount);
+ master = mle->master;
+ attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
+
+ if (mle->type != DLM_MLE_MASTER) {
+ namelen = mle->u.name.len;
+ name = mle->u.name.name;
+ } else {
+ namelen = mle->u.res->lockname.len;
+ name = mle->u.res->lockname.name;
+ }
+
+ mlog(ML_NOTICE, " #%3d: %3s %3d %3u %3u %c (%d)%.*s\n",
+ i, type, refs, master, mle->new_master, attached,
+ namelen, namelen, name);
+}
+
+static void dlm_dump_mles(struct dlm_ctxt *dlm)
+{
+ struct dlm_master_list_entry *mle;
+ struct list_head *iter;
+
+ mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
+ mlog(ML_NOTICE, " ####: type refs owner new events? lockname nodemap votemap respmap maybemap\n");
+ spin_lock(&dlm->master_lock);
+ list_for_each(iter, &dlm->master_list) {
+ mle = list_entry(iter, struct dlm_master_list_entry, list);
+ dlm_print_one_mle(mle);
+ }
+ spin_unlock(&dlm->master_lock);
+}
+
+int dlm_dump_all_mles(const char __user *data, unsigned int len)
+{
+ struct list_head *iter;
+ struct dlm_ctxt *dlm;
+
+ spin_lock(&dlm_domain_lock);
+ list_for_each(iter, &dlm_domains) {
+ dlm = list_entry (iter, struct dlm_ctxt, list);
+ mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
+ dlm_dump_mles(dlm);
+ }
+ spin_unlock(&dlm_domain_lock);
+ return len;
+}
+EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
+
+#endif /* 0 */
+
+
+static kmem_cache_t *dlm_mle_cache = NULL;
+
+
+static void dlm_mle_release(struct kref *kref);
+static void dlm_init_mle(struct dlm_master_list_entry *mle,
+ enum dlm_mle_type type,
+ struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ const char *name,
+ unsigned int namelen);
+static void dlm_put_mle(struct dlm_master_list_entry *mle);
+static void __dlm_put_mle(struct dlm_master_list_entry *mle);
+static int dlm_find_mle(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry **mle,
+ char *name, unsigned int namelen);
+
+static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to);
+
+
+static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ int *blocked);
+static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ int blocked);
+static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ struct dlm_master_list_entry **oldmle,
+ const char *name, unsigned int namelen,
+ u8 new_master, u8 master);
+
+static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
+static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 target);
+
+
+int dlm_is_host_down(int errno)
+{
+ switch (errno) {
+ case -EBADF:
+ case -ECONNREFUSED:
+ case -ENOTCONN:
+ case -ECONNRESET:
+ case -EPIPE:
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ETIMEDOUT:
+ case -ECONNABORTED:
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ case -ENETRESET:
+ case -ESHUTDOWN:
+ case -ENOPROTOOPT:
+ case -EINVAL: /* if returned from our tcp code,
+ this means there is no socket */
+ return 1;
+ }
+ return 0;
+}
+
+
+/*
+ * MASTER LIST FUNCTIONS
+ */
+
+
+/*
+ * regarding master list entries and heartbeat callbacks:
+ *
+ * in order to avoid sleeping and allocation that occurs in
+ * heartbeat, master list entries are simply attached to the
+ * dlm's established heartbeat callbacks. the mle is attached
+ * when it is created, and since the dlm->spinlock is held at
+ * that time, any heartbeat event will be properly discovered
+ * by the mle. the mle needs to be detached from the
+ * dlm->mle_hb_events list as soon as heartbeat events are no
+ * longer useful to the mle, and before the mle is freed.
+ *
+ * as a general rule, heartbeat events are no longer needed by
+ * the mle once an "answer" regarding the lock master has been
+ * received.
+ */
+static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle)
+{
+ assert_spin_locked(&dlm->spinlock);
+
+ list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
+}
+
+
+static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle)
+{
+ if (!list_empty(&mle->hb_events))
+ list_del_init(&mle->hb_events);
+}
+
+
+static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle)
+{
+ spin_lock(&dlm->spinlock);
+ __dlm_mle_detach_hb_events(dlm, mle);
+ spin_unlock(&dlm->spinlock);
+}
+
+/* remove from list and free */
+static void __dlm_put_mle(struct dlm_master_list_entry *mle)
+{
+ struct dlm_ctxt *dlm;
+ dlm = mle->dlm;
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&dlm->master_lock);
+ BUG_ON(!atomic_read(&mle->mle_refs.refcount));
+
+ kref_put(&mle->mle_refs, dlm_mle_release);
+}
+
+
+/* must not have any spinlocks coming in */
+static void dlm_put_mle(struct dlm_master_list_entry *mle)
+{
+ struct dlm_ctxt *dlm;
+ dlm = mle->dlm;
+
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+ __dlm_put_mle(mle);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+}
+
+static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
+{
+ kref_get(&mle->mle_refs);
+}
+
+static void dlm_init_mle(struct dlm_master_list_entry *mle,
+ enum dlm_mle_type type,
+ struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ const char *name,
+ unsigned int namelen)
+{
+ assert_spin_locked(&dlm->spinlock);
+
+ mle->dlm = dlm;
+ mle->type = type;
+ INIT_LIST_HEAD(&mle->list);
+ INIT_LIST_HEAD(&mle->hb_events);
+ memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
+ spin_lock_init(&mle->spinlock);
+ init_waitqueue_head(&mle->wq);
+ atomic_set(&mle->woken, 0);
+ kref_init(&mle->mle_refs);
+ memset(mle->response_map, 0, sizeof(mle->response_map));
+ mle->master = O2NM_MAX_NODES;
+ mle->new_master = O2NM_MAX_NODES;
+
+ if (mle->type == DLM_MLE_MASTER) {
+ BUG_ON(!res);
+ mle->u.res = res;
+ } else if (mle->type == DLM_MLE_BLOCK) {
+ BUG_ON(!name);
+ memcpy(mle->u.name.name, name, namelen);
+ mle->u.name.len = namelen;
+ } else /* DLM_MLE_MIGRATION */ {
+ BUG_ON(!name);
+ memcpy(mle->u.name.name, name, namelen);
+ mle->u.name.len = namelen;
+ }
+
+ /* copy off the node_map and register hb callbacks on our copy */
+ memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
+ memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
+ clear_bit(dlm->node_num, mle->vote_map);
+ clear_bit(dlm->node_num, mle->node_map);
+
+ /* attach the mle to the domain node up/down events */
+ __dlm_mle_attach_hb_events(dlm, mle);
+}
+
+
+/* returns 1 if found, 0 if not */
+static int dlm_find_mle(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry **mle,
+ char *name, unsigned int namelen)
+{
+ struct dlm_master_list_entry *tmpmle;
+ struct list_head *iter;
+
+ assert_spin_locked(&dlm->master_lock);
+
+ list_for_each(iter, &dlm->master_list) {
+ tmpmle = list_entry(iter, struct dlm_master_list_entry, list);
+ if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
+ continue;
+ dlm_get_mle(tmpmle);
+ *mle = tmpmle;
+ return 1;
+ }
+ return 0;
+}
+
+void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
+{
+ struct dlm_master_list_entry *mle;
+ struct list_head *iter;
+
+ assert_spin_locked(&dlm->spinlock);
+
+ list_for_each(iter, &dlm->mle_hb_events) {
+ mle = list_entry(iter, struct dlm_master_list_entry,
+ hb_events);
+ if (node_up)
+ dlm_mle_node_up(dlm, mle, NULL, idx);
+ else
+ dlm_mle_node_down(dlm, mle, NULL, idx);
+ }
+}
+
+static void dlm_mle_node_down(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle,
+ struct o2nm_node *node, int idx)
+{
+ spin_lock(&mle->spinlock);
+
+ if (!test_bit(idx, mle->node_map))
+ mlog(0, "node %u already removed from nodemap!\n", idx);
+ else
+ clear_bit(idx, mle->node_map);
+
+ spin_unlock(&mle->spinlock);
+}
+
+static void dlm_mle_node_up(struct dlm_ctxt *dlm,
+ struct dlm_master_list_entry *mle,
+ struct o2nm_node *node, int idx)
+{
+ spin_lock(&mle->spinlock);
+
+ if (test_bit(idx, mle->node_map))
+ mlog(0, "node %u already in node map!\n", idx);
+ else
+ set_bit(idx, mle->node_map);
+
+ spin_unlock(&mle->spinlock);
+}
+
+
+int dlm_init_mle_cache(void)
+{
+ dlm_mle_cache = kmem_cache_create("dlm_mle_cache",
+ sizeof(struct dlm_master_list_entry),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (dlm_mle_cache == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void dlm_destroy_mle_cache(void)
+{
+ if (dlm_mle_cache)
+ kmem_cache_destroy(dlm_mle_cache);
+}
+
+static void dlm_mle_release(struct kref *kref)
+{
+ struct dlm_master_list_entry *mle;
+ struct dlm_ctxt *dlm;
+
+ mlog_entry_void();
+
+ mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
+ dlm = mle->dlm;
+
+ if (mle->type != DLM_MLE_MASTER) {
+ mlog(0, "calling mle_release for %.*s, type %d\n",
+ mle->u.name.len, mle->u.name.name, mle->type);
+ } else {
+ mlog(0, "calling mle_release for %.*s, type %d\n",
+ mle->u.res->lockname.len,
+ mle->u.res->lockname.name, mle->type);
+ }
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&dlm->master_lock);
+
+ /* remove from list if not already */
+ if (!list_empty(&mle->list))
+ list_del_init(&mle->list);
+
+ /* detach the mle from the domain node up/down events */
+ __dlm_mle_detach_hb_events(dlm, mle);
+
+ /* NOTE: kfree under spinlock here.
+ * if this is bad, we can move this to a freelist. */
+ kmem_cache_free(dlm_mle_cache, mle);
+}
+
+
+/*
+ * LOCK RESOURCE FUNCTIONS
+ */
+
+static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 owner)
+{
+ assert_spin_locked(&res->spinlock);
+
+ mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
+
+ if (owner == dlm->node_num)
+ atomic_inc(&dlm->local_resources);
+ else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
+ atomic_inc(&dlm->unknown_resources);
+ else
+ atomic_inc(&dlm->remote_resources);
+
+ res->owner = owner;
+}
+
+void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, u8 owner)
+{
+ assert_spin_locked(&res->spinlock);
+
+ if (owner == res->owner)
+ return;
+
+ if (res->owner == dlm->node_num)
+ atomic_dec(&dlm->local_resources);
+ else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
+ atomic_dec(&dlm->unknown_resources);
+ else
+ atomic_dec(&dlm->remote_resources);
+
+ dlm_set_lockres_owner(dlm, res, owner);
+}
+
+
+static void dlm_lockres_release(struct kref *kref)
+{
+ struct dlm_lock_resource *res;
+
+ res = container_of(kref, struct dlm_lock_resource, refs);
+
+ /* This should not happen -- all lockres' have a name
+ * associated with them at init time. */
+ BUG_ON(!res->lockname.name);
+
+ mlog(0, "destroying lockres %.*s\n", res->lockname.len,
+ res->lockname.name);
+
+ /* By the time we're ready to blow this guy away, we shouldn't
+ * be on any lists. */
+ BUG_ON(!list_empty(&res->list));
+ BUG_ON(!list_empty(&res->granted));
+ BUG_ON(!list_empty(&res->converting));
+ BUG_ON(!list_empty(&res->blocked));
+ BUG_ON(!list_empty(&res->dirty));
+ BUG_ON(!list_empty(&res->recovering));
+ BUG_ON(!list_empty(&res->purge));
+
+ kfree(res->lockname.name);
+
+ kfree(res);
+}
+
+void dlm_lockres_get(struct dlm_lock_resource *res)
+{
+ kref_get(&res->refs);
+}
+
+void dlm_lockres_put(struct dlm_lock_resource *res)
+{
+ kref_put(&res->refs, dlm_lockres_release);
+}
+
+static void dlm_init_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ const char *name, unsigned int namelen)
+{
+ char *qname;
+
+ /* If we memset here, we lose our reference to the kmalloc'd
+ * res->lockname.name, so be sure to init every field
+ * correctly! */
+
+ qname = (char *) res->lockname.name;
+ memcpy(qname, name, namelen);
+
+ res->lockname.len = namelen;
+ res->lockname.hash = full_name_hash(name, namelen);
+
+ init_waitqueue_head(&res->wq);
+ spin_lock_init(&res->spinlock);
+ INIT_LIST_HEAD(&res->list);
+ INIT_LIST_HEAD(&res->granted);
+ INIT_LIST_HEAD(&res->converting);
+ INIT_LIST_HEAD(&res->blocked);
+ INIT_LIST_HEAD(&res->dirty);
+ INIT_LIST_HEAD(&res->recovering);
+ INIT_LIST_HEAD(&res->purge);
+ atomic_set(&res->asts_reserved, 0);
+ res->migration_pending = 0;
+
+ kref_init(&res->refs);
+
+ /* just for consistency */
+ spin_lock(&res->spinlock);
+ dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
+ spin_unlock(&res->spinlock);
+
+ res->state = DLM_LOCK_RES_IN_PROGRESS;
+
+ res->last_used = 0;
+
+ memset(res->lvb, 0, DLM_LVB_LEN);
+}
+
+struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
+ const char *name,
+ unsigned int namelen)
+{
+ struct dlm_lock_resource *res;
+
+ res = kmalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
+ if (!res)
+ return NULL;
+
+ res->lockname.name = kmalloc(namelen, GFP_KERNEL);
+ if (!res->lockname.name) {
+ kfree(res);
+ return NULL;
+ }
+
+ dlm_init_lockres(dlm, res, name, namelen);
+ return res;
+}
+
+/*
+ * lookup a lock resource by name.
+ * may already exist in the hashtable.
+ * lockid is null terminated
+ *
+ * if not, allocate enough for the lockres and for
+ * the temporary structure used in doing the mastering.
+ *
+ * also, do a lookup in the dlm->master_list to see
+ * if another node has begun mastering the same lock.
+ * if so, there should be a block entry in there
+ * for this name, and we should *not* attempt to master
+ * the lock here. need to wait around for that node
+ * to assert_master (or die).
+ *
+ */
+struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
+ const char *lockid,
+ int flags)
+{
+ struct dlm_lock_resource *tmpres=NULL, *res=NULL;
+ struct dlm_master_list_entry *mle = NULL;
+ struct dlm_master_list_entry *alloc_mle = NULL;
+ int blocked = 0;
+ int ret, nodenum;
+ struct dlm_node_iter iter;
+ unsigned int namelen;
+ int tries = 0;
+
+ BUG_ON(!lockid);
+
+ namelen = strlen(lockid);
+
+ mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
+
+lookup:
+ spin_lock(&dlm->spinlock);
+ tmpres = __dlm_lookup_lockres(dlm, lockid, namelen);
+ if (tmpres) {
+ spin_unlock(&dlm->spinlock);
+ mlog(0, "found in hash!\n");
+ if (res)
+ dlm_lockres_put(res);
+ res = tmpres;
+ goto leave;
+ }
+
+ if (!res) {
+ spin_unlock(&dlm->spinlock);
+ mlog(0, "allocating a new resource\n");
+ /* nothing found and we need to allocate one. */
+ alloc_mle = (struct dlm_master_list_entry *)
+ kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
+ if (!alloc_mle)
+ goto leave;
+ res = dlm_new_lockres(dlm, lockid, namelen);
+ if (!res)
+ goto leave;
+ goto lookup;
+ }
+
+ mlog(0, "no lockres found, allocated our own: %p\n", res);
+
+ if (flags & LKM_LOCAL) {
+ /* caller knows it's safe to assume it's not mastered elsewhere
+ * DONE! return right away */
+ spin_lock(&res->spinlock);
+ dlm_change_lockres_owner(dlm, res, dlm->node_num);
+ __dlm_insert_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ /* lockres still marked IN_PROGRESS */
+ goto wake_waiters;
+ }
+
+ /* check master list to see if another node has started mastering it */
+ spin_lock(&dlm->master_lock);
+
+ /* if we found a block, wait for lock to be mastered by another node */
+ blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
+ if (blocked) {
+ if (mle->type == DLM_MLE_MASTER) {
+ mlog(ML_ERROR, "master entry for nonexistent lock!\n");
+ BUG();
+ } else if (mle->type == DLM_MLE_MIGRATION) {
+ /* migration is in progress! */
+ /* the good news is that we now know the
+ * "current" master (mle->master). */
+
+ spin_unlock(&dlm->master_lock);
+ assert_spin_locked(&dlm->spinlock);
+
+ /* set the lockres owner and hash it */
+ spin_lock(&res->spinlock);
+ dlm_set_lockres_owner(dlm, res, mle->master);
+ __dlm_insert_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+
+ /* master is known, detach */
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+ mle = NULL;
+ goto wake_waiters;
+ }
+ } else {
+ /* go ahead and try to master lock on this node */
+ mle = alloc_mle;
+ /* make sure this does not get freed below */
+ alloc_mle = NULL;
+ dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
+ set_bit(dlm->node_num, mle->maybe_map);
+ list_add(&mle->list, &dlm->master_list);
+ }
+
+ /* at this point there is either a DLM_MLE_BLOCK or a
+ * DLM_MLE_MASTER on the master list, so it's safe to add the
+ * lockres to the hashtable. anyone who finds the lock will
+ * still have to wait on the IN_PROGRESS. */
+
+ /* finally add the lockres to its hash bucket */
+ __dlm_insert_lockres(dlm, res);
+ /* get an extra ref on the mle in case this is a BLOCK
+ * if so, the creator of the BLOCK may try to put the last
+ * ref at this time in the assert master handler, so we
+ * need an extra one to keep from a bad ptr deref. */
+ dlm_get_mle(mle);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+ /* must wait for lock to be mastered elsewhere */
+ if (blocked)
+ goto wait;
+
+redo_request:
+ ret = -EINVAL;
+ dlm_node_iter_init(mle->vote_map, &iter);
+ while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
+ ret = dlm_do_master_request(mle, nodenum);
+ if (ret < 0)
+ mlog_errno(ret);
+ if (mle->master != O2NM_MAX_NODES) {
+ /* found a master ! */
+ break;
+ }
+ }
+
+wait:
+ /* keep going until the response map includes all nodes */
+ ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
+ if (ret < 0) {
+ mlog(0, "%s:%.*s: node map changed, redo the "
+ "master request now, blocked=%d\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name, blocked);
+ if (++tries > 20) {
+ mlog(ML_ERROR, "%s:%.*s: spinning on "
+ "dlm_wait_for_lock_mastery, blocked=%d\n",
+ dlm->name, res->lockname.len,
+ res->lockname.name, blocked);
+ dlm_print_one_lock_resource(res);
+ /* dlm_print_one_mle(mle); */
+ tries = 0;
+ }
+ goto redo_request;
+ }
+
+ mlog(0, "lockres mastered by %u\n", res->owner);
+ /* make sure we never continue without this */
+ BUG_ON(res->owner == O2NM_MAX_NODES);
+
+ /* master is known, detach if not already detached */
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+ /* put the extra ref */
+ dlm_put_mle(mle);
+
+wake_waiters:
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
+leave:
+ /* need to free the unused mle */
+ if (alloc_mle)
+ kmem_cache_free(dlm_mle_cache, alloc_mle);
+
+ return res;
+}
+
+
+#define DLM_MASTERY_TIMEOUT_MS 5000
+
+static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ int *blocked)
+{
+ u8 m;
+ int ret, bit;
+ int map_changed, voting_done;
+ int assert, sleep;
+
+recheck:
+ ret = 0;
+ assert = 0;
+
+ /* check if another node has already become the owner */
+ spin_lock(&res->spinlock);
+ if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ spin_unlock(&res->spinlock);
+ goto leave;
+ }
+ spin_unlock(&res->spinlock);
+
+ spin_lock(&mle->spinlock);
+ m = mle->master;
+ map_changed = (memcmp(mle->vote_map, mle->node_map,
+ sizeof(mle->vote_map)) != 0);
+ voting_done = (memcmp(mle->vote_map, mle->response_map,
+ sizeof(mle->vote_map)) == 0);
+
+ /* restart if we hit any errors */
+ if (map_changed) {
+ int b;
+ mlog(0, "%s: %.*s: node map changed, restarting\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
+ b = (mle->type == DLM_MLE_BLOCK);
+ if ((*blocked && !b) || (!*blocked && b)) {
+ mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
+ dlm->name, res->lockname.len, res->lockname.name,
+ *blocked, b);
+ *blocked = b;
+ }
+ spin_unlock(&mle->spinlock);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto leave;
+ }
+ mlog(0, "%s:%.*s: restart lock mastery succeeded, "
+ "rechecking now\n", dlm->name, res->lockname.len,
+ res->lockname.name);
+ goto recheck;
+ }
+
+ if (m != O2NM_MAX_NODES) {
+ /* another node has done an assert!
+ * all done! */
+ sleep = 0;
+ } else {
+ sleep = 1;
+ /* have all nodes responded? */
+ if (voting_done && !*blocked) {
+ bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
+ if (dlm->node_num <= bit) {
+ /* my node number is lowest.
+ * now tell other nodes that I am
+ * mastering this. */
+ mle->master = dlm->node_num;
+ assert = 1;
+ sleep = 0;
+ }
+ /* if voting is done, but we have not received
+ * an assert master yet, we must sleep */
+ }
+ }
+
+ spin_unlock(&mle->spinlock);
+
+ /* sleep if we haven't finished voting yet */
+ if (sleep) {
+ unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
+
+ /*
+ if (atomic_read(&mle->mle_refs.refcount) < 2)
+ mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
+ atomic_read(&mle->mle_refs.refcount),
+ res->lockname.len, res->lockname.name);
+ */
+ atomic_set(&mle->woken, 0);
+ (void)wait_event_timeout(mle->wq,
+ (atomic_read(&mle->woken) == 1),
+ timeo);
+ if (res->owner == O2NM_MAX_NODES) {
+ mlog(0, "waiting again\n");
+ goto recheck;
+ }
+ mlog(0, "done waiting, master is %u\n", res->owner);
+ ret = 0;
+ goto leave;
+ }
+
+ ret = 0; /* done */
+ if (assert) {
+ m = dlm->node_num;
+ mlog(0, "about to master %.*s here, this=%u\n",
+ res->lockname.len, res->lockname.name, m);
+ ret = dlm_do_assert_master(dlm, res->lockname.name,
+ res->lockname.len, mle->vote_map, 0);
+ if (ret) {
+ /* This is a failure in the network path,
+ * not in the response to the assert_master
+ * (any nonzero response is a BUG on this node).
+ * Most likely a socket just got disconnected
+ * due to node death. */
+ mlog_errno(ret);
+ }
+ /* no longer need to restart lock mastery.
+ * all living nodes have been contacted. */
+ ret = 0;
+ }
+
+ /* set the lockres owner */
+ spin_lock(&res->spinlock);
+ dlm_change_lockres_owner(dlm, res, m);
+ spin_unlock(&res->spinlock);
+
+leave:
+ return ret;
+}
+
+struct dlm_bitmap_diff_iter
+{
+ int curnode;
+ unsigned long *orig_bm;
+ unsigned long *cur_bm;
+ unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
+};
+
+enum dlm_node_state_change
+{
+ NODE_DOWN = -1,
+ NODE_NO_CHANGE = 0,
+ NODE_UP
+};
+
+static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
+ unsigned long *orig_bm,
+ unsigned long *cur_bm)
+{
+ unsigned long p1, p2;
+ int i;
+
+ iter->curnode = -1;
+ iter->orig_bm = orig_bm;
+ iter->cur_bm = cur_bm;
+
+ for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
+ p1 = *(iter->orig_bm + i);
+ p2 = *(iter->cur_bm + i);
+ iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
+ }
+}
+
+static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
+ enum dlm_node_state_change *state)
+{
+ int bit;
+
+ if (iter->curnode >= O2NM_MAX_NODES)
+ return -ENOENT;
+
+ bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
+ iter->curnode+1);
+ if (bit >= O2NM_MAX_NODES) {
+ iter->curnode = O2NM_MAX_NODES;
+ return -ENOENT;
+ }
+
+ /* if it was there in the original then this node died */
+ if (test_bit(bit, iter->orig_bm))
+ *state = NODE_DOWN;
+ else
+ *state = NODE_UP;
+
+ iter->curnode = bit;
+ return bit;
+}
+
+
+static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ int blocked)
+{
+ struct dlm_bitmap_diff_iter bdi;
+ enum dlm_node_state_change sc;
+ int node;
+ int ret = 0;
+
+ mlog(0, "something happened such that the "
+ "master process may need to be restarted!\n");
+
+ assert_spin_locked(&mle->spinlock);
+
+ dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
+ node = dlm_bitmap_diff_iter_next(&bdi, &sc);
+ while (node >= 0) {
+ if (sc == NODE_UP) {
+ /* a node came up. easy. might not even need
+ * to talk to it if its node number is higher
+ * or if we are already blocked. */
+ mlog(0, "node up! %d\n", node);
+ if (blocked)
+ goto next;
+
+ if (node > dlm->node_num) {
+ mlog(0, "node > this node. skipping.\n");
+ goto next;
+ }
+
+ /* redo the master request, but only for the new node */
+ mlog(0, "sending request to new node\n");
+ clear_bit(node, mle->response_map);
+ set_bit(node, mle->vote_map);
+ } else {
+ mlog(ML_ERROR, "node down! %d\n", node);
+
+ /* if the node wasn't involved in mastery skip it,
+ * but clear it out from the maps so that it will
+ * not affect mastery of this lockres */
+ clear_bit(node, mle->response_map);
+ clear_bit(node, mle->vote_map);
+ if (!test_bit(node, mle->maybe_map))
+ goto next;
+
+ /* if we're already blocked on lock mastery, and the
+ * dead node wasn't the expected master, or there is
+ * another node in the maybe_map, keep waiting */
+ if (blocked) {
+ int lowest = find_next_bit(mle->maybe_map,
+ O2NM_MAX_NODES, 0);
+
+ /* act like it was never there */
+ clear_bit(node, mle->maybe_map);
+
+ if (node != lowest)
+ goto next;
+
+ mlog(ML_ERROR, "expected master %u died while "
+ "this node was blocked waiting on it!\n",
+ node);
+ lowest = find_next_bit(mle->maybe_map,
+ O2NM_MAX_NODES,
+ lowest+1);
+ if (lowest < O2NM_MAX_NODES) {
+ mlog(0, "still blocked. waiting "
+ "on %u now\n", lowest);
+ goto next;
+ }
+
+ /* mle is an MLE_BLOCK, but there is now
+ * nothing left to block on. we need to return
+ * all the way back out and try again with
+ * an MLE_MASTER. dlm_do_local_recovery_cleanup
+ * has already run, so the mle refcount is ok */
+ mlog(0, "no longer blocking. we can "
+ "try to master this here\n");
+ mle->type = DLM_MLE_MASTER;
+ memset(mle->maybe_map, 0,
+ sizeof(mle->maybe_map));
+ memset(mle->response_map, 0,
+ sizeof(mle->maybe_map));
+ memcpy(mle->vote_map, mle->node_map,
+ sizeof(mle->node_map));
+ mle->u.res = res;
+ set_bit(dlm->node_num, mle->maybe_map);
+
+ ret = -EAGAIN;
+ goto next;
+ }
+
+ clear_bit(node, mle->maybe_map);
+ if (node > dlm->node_num)
+ goto next;
+
+ mlog(0, "dead node in map!\n");
+ /* yuck. go back and re-contact all nodes
+ * in the vote_map, removing this node. */
+ memset(mle->response_map, 0,
+ sizeof(mle->response_map));
+ }
+ ret = -EAGAIN;
+next:
+ node = dlm_bitmap_diff_iter_next(&bdi, &sc);
+ }
+ return ret;
+}
+
+
+/*
+ * DLM_MASTER_REQUEST_MSG
+ *
+ * returns: 0 on success,
+ * -errno on a network error
+ *
+ * on error, the caller should assume the target node is "dead"
+ *
+ */
+
+static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to)
+{
+ struct dlm_ctxt *dlm = mle->dlm;
+ struct dlm_master_request request;
+ int ret, response=0, resend;
+
+ memset(&request, 0, sizeof(request));
+ request.node_idx = dlm->node_num;
+
+ BUG_ON(mle->type == DLM_MLE_MIGRATION);
+
+ if (mle->type != DLM_MLE_MASTER) {
+ request.namelen = mle->u.name.len;
+ memcpy(request.name, mle->u.name.name, request.namelen);
+ } else {
+ request.namelen = mle->u.res->lockname.len;
+ memcpy(request.name, mle->u.res->lockname.name,
+ request.namelen);
+ }
+
+again:
+ ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
+ sizeof(request), to, &response);
+ if (ret < 0) {
+ if (ret == -ESRCH) {
+ /* should never happen */
+ mlog(ML_ERROR, "TCP stack not ready!\n");
+ BUG();
+ } else if (ret == -EINVAL) {
+ mlog(ML_ERROR, "bad args passed to o2net!\n");
+ BUG();
+ } else if (ret == -ENOMEM) {
+ mlog(ML_ERROR, "out of memory while trying to send "
+ "network message! retrying\n");
+ /* this is totally crude */
+ msleep(50);
+ goto again;
+ } else if (!dlm_is_host_down(ret)) {
+ /* not a network error. bad. */
+ mlog_errno(ret);
+ mlog(ML_ERROR, "unhandled error!");
+ BUG();
+ }
+ /* all other errors should be network errors,
+ * and likely indicate node death */
+ mlog(ML_ERROR, "link to %d went down!\n", to);
+ goto out;
+ }
+
+ ret = 0;
+ resend = 0;
+ spin_lock(&mle->spinlock);
+ switch (response) {
+ case DLM_MASTER_RESP_YES:
+ set_bit(to, mle->response_map);
+ mlog(0, "node %u is the master, response=YES\n", to);
+ mle->master = to;
+ break;
+ case DLM_MASTER_RESP_NO:
+ mlog(0, "node %u not master, response=NO\n", to);
+ set_bit(to, mle->response_map);
+ break;
+ case DLM_MASTER_RESP_MAYBE:
+ mlog(0, "node %u not master, response=MAYBE\n", to);
+ set_bit(to, mle->response_map);
+ set_bit(to, mle->maybe_map);
+ break;
+ case DLM_MASTER_RESP_ERROR:
+ mlog(0, "node %u hit an error, resending\n", to);
+ resend = 1;
+ response = 0;
+ break;
+ default:
+ mlog(ML_ERROR, "bad response! %u\n", response);
+ BUG();
+ }
+ spin_unlock(&mle->spinlock);
+ if (resend) {
+ /* this is also totally crude */
+ msleep(50);
+ goto again;
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * locks that can be taken here:
+ * dlm->spinlock
+ * res->spinlock
+ * mle->spinlock
+ * dlm->master_list
+ *
+ * if possible, TRIM THIS DOWN!!!
+ */
+int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ u8 response = DLM_MASTER_RESP_MAYBE;
+ struct dlm_ctxt *dlm = data;
+ struct dlm_lock_resource *res;
+ struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
+ struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
+ char *name;
+ unsigned int namelen;
+ int found, ret;
+ int set_maybe;
+
+ if (!dlm_grab(dlm))
+ return DLM_MASTER_RESP_NO;
+
+ if (!dlm_domain_fully_joined(dlm)) {
+ response = DLM_MASTER_RESP_NO;
+ goto send_response;
+ }
+
+ name = request->name;
+ namelen = request->namelen;
+
+ if (namelen > DLM_LOCKID_NAME_MAX) {
+ response = DLM_IVBUFLEN;
+ goto send_response;
+ }
+
+way_up_top:
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres(dlm, name, namelen);
+ if (res) {
+ spin_unlock(&dlm->spinlock);
+
+ /* take care of the easy cases up front */
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ spin_unlock(&res->spinlock);
+ mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
+ "being recovered\n");
+ response = DLM_MASTER_RESP_ERROR;
+ if (mle)
+ kmem_cache_free(dlm_mle_cache, mle);
+ goto send_response;
+ }
+
+ if (res->owner == dlm->node_num) {
+ u32 flags = DLM_ASSERT_MASTER_MLE_CLEANUP;
+ spin_unlock(&res->spinlock);
+ // mlog(0, "this node is the master\n");
+ response = DLM_MASTER_RESP_YES;
+ if (mle)
+ kmem_cache_free(dlm_mle_cache, mle);
+
+ /* this node is the owner.
+ * there is some extra work that needs to
+ * happen now. the requesting node has
+ * caused all nodes up to this one to
+ * create mles. this node now needs to
+ * go back and clean those up. */
+ mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
+ dlm->node_num, res->lockname.len, res->lockname.name);
+ ret = dlm_dispatch_assert_master(dlm, res, 1,
+ request->node_idx,
+ flags);
+ if (ret < 0) {
+ mlog(ML_ERROR, "failed to dispatch assert "
+ "master work\n");
+ response = DLM_MASTER_RESP_ERROR;
+ }
+ goto send_response;
+ } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ spin_unlock(&res->spinlock);
+ // mlog(0, "node %u is the master\n", res->owner);
+ response = DLM_MASTER_RESP_NO;
+ if (mle)
+ kmem_cache_free(dlm_mle_cache, mle);
+ goto send_response;
+ }
+
+ /* ok, there is no owner. either this node is
+ * being blocked, or it is actively trying to
+ * master this lock. */
+ if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
+ mlog(ML_ERROR, "lock with no owner should be "
+ "in-progress!\n");
+ BUG();
+ }
+
+ // mlog(0, "lockres is in progress...\n");
+ spin_lock(&dlm->master_lock);
+ found = dlm_find_mle(dlm, &tmpmle, name, namelen);
+ if (!found) {
+ mlog(ML_ERROR, "no mle found for this lock!\n");
+ BUG();
+ }
+ set_maybe = 1;
+ spin_lock(&tmpmle->spinlock);
+ if (tmpmle->type == DLM_MLE_BLOCK) {
+ // mlog(0, "this node is waiting for "
+ // "lockres to be mastered\n");
+ response = DLM_MASTER_RESP_NO;
+ } else if (tmpmle->type == DLM_MLE_MIGRATION) {
+ mlog(0, "node %u is master, but trying to migrate to "
+ "node %u.\n", tmpmle->master, tmpmle->new_master);
+ if (tmpmle->master == dlm->node_num) {
+ response = DLM_MASTER_RESP_YES;
+ mlog(ML_ERROR, "no owner on lockres, but this "
+ "node is trying to migrate it to %u?!\n",
+ tmpmle->new_master);
+ BUG();
+ } else {
+ /* the real master can respond on its own */
+ response = DLM_MASTER_RESP_NO;
+ }
+ } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ set_maybe = 0;
+ if (tmpmle->master == dlm->node_num)
+ response = DLM_MASTER_RESP_YES;
+ else
+ response = DLM_MASTER_RESP_NO;
+ } else {
+ // mlog(0, "this node is attempting to "
+ // "master lockres\n");
+ response = DLM_MASTER_RESP_MAYBE;
+ }
+ if (set_maybe)
+ set_bit(request->node_idx, tmpmle->maybe_map);
+ spin_unlock(&tmpmle->spinlock);
+
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&res->spinlock);
+
+ /* keep the mle attached to heartbeat events */
+ dlm_put_mle(tmpmle);
+ if (mle)
+ kmem_cache_free(dlm_mle_cache, mle);
+ goto send_response;
+ }
+
+ /*
+ * lockres doesn't exist on this node
+ * if there is an MLE_BLOCK, return NO
+ * if there is an MLE_MASTER, return MAYBE
+ * otherwise, add an MLE_BLOCK, return NO
+ */
+ spin_lock(&dlm->master_lock);
+ found = dlm_find_mle(dlm, &tmpmle, name, namelen);
+ if (!found) {
+ /* this lockid has never been seen on this node yet */
+ // mlog(0, "no mle found\n");
+ if (!mle) {
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+ mle = (struct dlm_master_list_entry *)
+ kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
+ if (!mle) {
+ // bad bad bad... this sucks.
+ response = DLM_MASTER_RESP_ERROR;
+ goto send_response;
+ }
+ spin_lock(&dlm->spinlock);
+ dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL,
+ name, namelen);
+ spin_unlock(&dlm->spinlock);
+ goto way_up_top;
+ }
+
+ // mlog(0, "this is second time thru, already allocated, "
+ // "add the block.\n");
+ set_bit(request->node_idx, mle->maybe_map);
+ list_add(&mle->list, &dlm->master_list);
+ response = DLM_MASTER_RESP_NO;
+ } else {
+ // mlog(0, "mle was found\n");
+ set_maybe = 1;
+ spin_lock(&tmpmle->spinlock);
+ if (tmpmle->type == DLM_MLE_BLOCK)
+ response = DLM_MASTER_RESP_NO;
+ else if (tmpmle->type == DLM_MLE_MIGRATION) {
+ mlog(0, "migration mle was found (%u->%u)\n",
+ tmpmle->master, tmpmle->new_master);
+ if (tmpmle->master == dlm->node_num) {
+ mlog(ML_ERROR, "no lockres, but migration mle "
+ "says that this node is master!\n");
+ BUG();
+ }
+ /* real master can respond on its own */
+ response = DLM_MASTER_RESP_NO;
+ } else {
+ if (tmpmle->master == dlm->node_num) {
+ response = DLM_MASTER_RESP_YES;
+ set_maybe = 0;
+ } else
+ response = DLM_MASTER_RESP_MAYBE;
+ }
+ if (set_maybe)
+ set_bit(request->node_idx, tmpmle->maybe_map);
+ spin_unlock(&tmpmle->spinlock);
+ }
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+ if (found) {
+ /* keep the mle attached to heartbeat events */
+ dlm_put_mle(tmpmle);
+ }
+send_response:
+ dlm_put(dlm);
+ return response;
+}
+
+/*
+ * DLM_ASSERT_MASTER_MSG
+ */
+
+
+/*
+ * NOTE: this can be used for debugging
+ * can periodically run all locks owned by this node
+ * and re-assert across the cluster...
+ */
+static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
+ unsigned int namelen, void *nodemap,
+ u32 flags)
+{
+ struct dlm_assert_master assert;
+ int to, tmpret;
+ struct dlm_node_iter iter;
+ int ret = 0;
+
+ BUG_ON(namelen > O2NM_MAX_NAME_LEN);
+
+ /* note that if this nodemap is empty, it returns 0 */
+ dlm_node_iter_init(nodemap, &iter);
+ while ((to = dlm_node_iter_next(&iter)) >= 0) {
+ int r = 0;
+ mlog(0, "sending assert master to %d (%.*s)\n", to,
+ namelen, lockname);
+ memset(&assert, 0, sizeof(assert));
+ assert.node_idx = dlm->node_num;
+ assert.namelen = namelen;
+ memcpy(assert.name, lockname, namelen);
+ assert.flags = cpu_to_be32(flags);
+
+ tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
+ &assert, sizeof(assert), to, &r);
+ if (tmpret < 0) {
+ mlog(ML_ERROR, "assert_master returned %d!\n", tmpret);
+ if (!dlm_is_host_down(tmpret)) {
+ mlog(ML_ERROR, "unhandled error!\n");
+ BUG();
+ }
+ /* a node died. finish out the rest of the nodes. */
+ mlog(ML_ERROR, "link to %d went down!\n", to);
+ /* any nonzero status return will do */
+ ret = tmpret;
+ } else if (r < 0) {
+ /* ok, something horribly messed. kill thyself. */
+ mlog(ML_ERROR,"during assert master of %.*s to %u, "
+ "got %d.\n", namelen, lockname, to, r);
+ dlm_dump_lock_resources(dlm);
+ BUG();
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * locks that can be taken here:
+ * dlm->spinlock
+ * res->spinlock
+ * mle->spinlock
+ * dlm->master_list
+ *
+ * if possible, TRIM THIS DOWN!!!
+ */
+int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_master_list_entry *mle = NULL;
+ struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ char *name;
+ unsigned int namelen;
+ u32 flags;
+
+ if (!dlm_grab(dlm))
+ return 0;
+
+ name = assert->name;
+ namelen = assert->namelen;
+ flags = be32_to_cpu(assert->flags);
+
+ if (namelen > DLM_LOCKID_NAME_MAX) {
+ mlog(ML_ERROR, "Invalid name length!");
+ goto done;
+ }
+
+ spin_lock(&dlm->spinlock);
+
+ if (flags)
+ mlog(0, "assert_master with flags: %u\n", flags);
+
+ /* find the MLE */
+ spin_lock(&dlm->master_lock);
+ if (!dlm_find_mle(dlm, &mle, name, namelen)) {
+ /* not an error, could be master just re-asserting */
+ mlog(0, "just got an assert_master from %u, but no "
+ "MLE for it! (%.*s)\n", assert->node_idx,
+ namelen, name);
+ } else {
+ int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
+ if (bit >= O2NM_MAX_NODES) {
+ /* not necessarily an error, though less likely.
+ * could be master just re-asserting. */
+ mlog(ML_ERROR, "no bits set in the maybe_map, but %u "
+ "is asserting! (%.*s)\n", assert->node_idx,
+ namelen, name);
+ } else if (bit != assert->node_idx) {
+ if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
+ mlog(0, "master %u was found, %u should "
+ "back off\n", assert->node_idx, bit);
+ } else {
+ /* with the fix for bug 569, a higher node
+ * number winning the mastery will respond
+ * YES to mastery requests, but this node
+ * had no way of knowing. let it pass. */
+ mlog(ML_ERROR, "%u is the lowest node, "
+ "%u is asserting. (%.*s) %u must "
+ "have begun after %u won.\n", bit,
+ assert->node_idx, namelen, name, bit,
+ assert->node_idx);
+ }
+ }
+ }
+ spin_unlock(&dlm->master_lock);
+
+ /* ok everything checks out with the MLE
+ * now check to see if there is a lockres */
+ res = __dlm_lookup_lockres(dlm, name, namelen);
+ if (res) {
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ mlog(ML_ERROR, "%u asserting but %.*s is "
+ "RECOVERING!\n", assert->node_idx, namelen, name);
+ goto kill;
+ }
+ if (!mle) {
+ if (res->owner != assert->node_idx) {
+ mlog(ML_ERROR, "assert_master from "
+ "%u, but current owner is "
+ "%u! (%.*s)\n",
+ assert->node_idx, res->owner,
+ namelen, name);
+ goto kill;
+ }
+ } else if (mle->type != DLM_MLE_MIGRATION) {
+ if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /* owner is just re-asserting */
+ if (res->owner == assert->node_idx) {
+ mlog(0, "owner %u re-asserting on "
+ "lock %.*s\n", assert->node_idx,
+ namelen, name);
+ goto ok;
+ }
+ mlog(ML_ERROR, "got assert_master from "
+ "node %u, but %u is the owner! "
+ "(%.*s)\n", assert->node_idx,
+ res->owner, namelen, name);
+ goto kill;
+ }
+ if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
+ mlog(ML_ERROR, "got assert from %u, but lock "
+ "with no owner should be "
+ "in-progress! (%.*s)\n",
+ assert->node_idx,
+ namelen, name);
+ goto kill;
+ }
+ } else /* mle->type == DLM_MLE_MIGRATION */ {
+ /* should only be getting an assert from new master */
+ if (assert->node_idx != mle->new_master) {
+ mlog(ML_ERROR, "got assert from %u, but "
+ "new master is %u, and old master "
+ "was %u (%.*s)\n",
+ assert->node_idx, mle->new_master,
+ mle->master, namelen, name);
+ goto kill;
+ }
+
+ }
+ok:
+ spin_unlock(&res->spinlock);
+ }
+ spin_unlock(&dlm->spinlock);
+
+ // mlog(0, "woo! got an assert_master from node %u!\n",
+ // assert->node_idx);
+ if (mle) {
+ int extra_ref;
+
+ spin_lock(&mle->spinlock);
+ extra_ref = !!(mle->type == DLM_MLE_BLOCK
+ || mle->type == DLM_MLE_MIGRATION);
+ mle->master = assert->node_idx;
+ atomic_set(&mle->woken, 1);
+ wake_up(&mle->wq);
+ spin_unlock(&mle->spinlock);
+
+ if (mle->type == DLM_MLE_MIGRATION && res) {
+ mlog(0, "finishing off migration of lockres %.*s, "
+ "from %u to %u\n",
+ res->lockname.len, res->lockname.name,
+ dlm->node_num, mle->new_master);
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ dlm_change_lockres_owner(dlm, res, mle->new_master);
+ BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
+ spin_unlock(&res->spinlock);
+ }
+ /* master is known, detach if not already detached */
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+
+ if (extra_ref) {
+ /* the assert master message now balances the extra
+ * ref given by the master / migration request message.
+ * if this is the last put, it will be removed
+ * from the list. */
+ dlm_put_mle(mle);
+ }
+ }
+
+done:
+ if (res)
+ dlm_lockres_put(res);
+ dlm_put(dlm);
+ return 0;
+
+kill:
+ /* kill the caller! */
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ dlm_lockres_put(res);
+ mlog(ML_ERROR, "Bad message received from another node. Dumping state "
+ "and killing the other node now! This node is OK and can continue.\n");
+ dlm_dump_lock_resources(dlm);
+ dlm_put(dlm);
+ return -EINVAL;
+}
+
+int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ int ignore_higher, u8 request_from, u32 flags)
+{
+ struct dlm_work_item *item;
+ item = kcalloc(1, sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+
+ /* queue up work for dlm_assert_master_worker */
+ dlm_grab(dlm); /* get an extra ref for the work item */
+ dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
+ item->u.am.lockres = res; /* already have a ref */
+ /* can optionally ignore node numbers higher than this node */
+ item->u.am.ignore_higher = ignore_higher;
+ item->u.am.request_from = request_from;
+ item->u.am.flags = flags;
+
+ spin_lock(&dlm->work_lock);
+ list_add_tail(&item->list, &dlm->work_list);
+ spin_unlock(&dlm->work_lock);
+
+ schedule_work(&dlm->dispatched_work);
+ return 0;
+}
+
+static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ int ret = 0;
+ struct dlm_lock_resource *res;
+ unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ int ignore_higher;
+ int bit;
+ u8 request_from;
+ u32 flags;
+
+ dlm = item->dlm;
+ res = item->u.am.lockres;
+ ignore_higher = item->u.am.ignore_higher;
+ request_from = item->u.am.request_from;
+ flags = item->u.am.flags;
+
+ spin_lock(&dlm->spinlock);
+ memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
+ spin_unlock(&dlm->spinlock);
+
+ clear_bit(dlm->node_num, nodemap);
+ if (ignore_higher) {
+ /* if is this just to clear up mles for nodes below
+ * this node, do not send the message to the original
+ * caller or any node number higher than this */
+ clear_bit(request_from, nodemap);
+ bit = dlm->node_num;
+ while (1) {
+ bit = find_next_bit(nodemap, O2NM_MAX_NODES,
+ bit+1);
+ if (bit >= O2NM_MAX_NODES)
+ break;
+ clear_bit(bit, nodemap);
+ }
+ }
+
+ /* this call now finishes out the nodemap
+ * even if one or more nodes die */
+ mlog(0, "worker about to master %.*s here, this=%u\n",
+ res->lockname.len, res->lockname.name, dlm->node_num);
+ ret = dlm_do_assert_master(dlm, res->lockname.name,
+ res->lockname.len,
+ nodemap, flags);
+ if (ret < 0) {
+ /* no need to restart, we are done */
+ mlog_errno(ret);
+ }
+
+ dlm_lockres_put(res);
+
+ mlog(0, "finished with dlm_assert_master_worker\n");
+}
+
+
+/*
+ * DLM_MIGRATE_LOCKRES
+ */
+
+
+int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ u8 target)
+{
+ struct dlm_master_list_entry *mle = NULL;
+ struct dlm_master_list_entry *oldmle = NULL;
+ struct dlm_migratable_lockres *mres = NULL;
+ int ret = -EINVAL;
+ const char *name;
+ unsigned int namelen;
+ int mle_added = 0;
+ struct list_head *queue, *iter;
+ int i;
+ struct dlm_lock *lock;
+ int empty = 1;
+
+ if (!dlm_grab(dlm))
+ return -EINVAL;
+
+ name = res->lockname.name;
+ namelen = res->lockname.len;
+
+ mlog(0, "migrating %.*s to %u\n", namelen, name, target);
+
+ /*
+ * ensure this lockres is a proper candidate for migration
+ */
+ spin_lock(&res->spinlock);
+ if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ mlog(0, "cannot migrate lockres with unknown owner!\n");
+ spin_unlock(&res->spinlock);
+ goto leave;
+ }
+ if (res->owner != dlm->node_num) {
+ mlog(0, "cannot migrate lockres this node doesn't own!\n");
+ spin_unlock(&res->spinlock);
+ goto leave;
+ }
+ mlog(0, "checking queues...\n");
+ queue = &res->granted;
+ for (i=0; i<3; i++) {
+ list_for_each(iter, queue) {
+ lock = list_entry (iter, struct dlm_lock, list);
+ empty = 0;
+ if (lock->ml.node == dlm->node_num) {
+ mlog(0, "found a lock owned by this node "
+ "still on the %s queue! will not "
+ "migrate this lockres\n",
+ i==0 ? "granted" :
+ (i==1 ? "converting" : "blocked"));
+ spin_unlock(&res->spinlock);
+ ret = -ENOTEMPTY;
+ goto leave;
+ }
+ }
+ queue++;
+ }
+ mlog(0, "all locks on this lockres are nonlocal. continuing\n");
+ spin_unlock(&res->spinlock);
+
+ /* no work to do */
+ if (empty) {
+ mlog(0, "no locks were found on this lockres! done!\n");
+ ret = 0;
+ goto leave;
+ }
+
+ /*
+ * preallocate up front
+ * if this fails, abort
+ */
+
+ ret = -ENOMEM;
+ mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_KERNEL);
+ if (!mres) {
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
+ GFP_KERNEL);
+ if (!mle) {
+ mlog_errno(ret);
+ goto leave;
+ }
+ ret = 0;
+
+ /*
+ * find a node to migrate the lockres to
+ */
+
+ mlog(0, "picking a migration node\n");
+ spin_lock(&dlm->spinlock);
+ /* pick a new node */
+ if (!test_bit(target, dlm->domain_map) ||
+ target >= O2NM_MAX_NODES) {
+ target = dlm_pick_migration_target(dlm, res);
+ }
+ mlog(0, "node %u chosen for migration\n", target);
+
+ if (target >= O2NM_MAX_NODES ||
+ !test_bit(target, dlm->domain_map)) {
+ /* target chosen is not alive */
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ spin_unlock(&dlm->spinlock);
+ goto fail;
+ }
+
+ mlog(0, "continuing with target = %u\n", target);
+
+ /*
+ * clear any existing master requests and
+ * add the migration mle to the list
+ */
+ spin_lock(&dlm->master_lock);
+ ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
+ namelen, target, dlm->node_num);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+ if (ret == -EEXIST) {
+ mlog(0, "another process is already migrating it\n");
+ goto fail;
+ }
+ mle_added = 1;
+
+ /*
+ * set the MIGRATING flag and flush asts
+ * if we fail after this we need to re-dirty the lockres
+ */
+ if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
+ mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
+ "the target went down.\n", res->lockname.len,
+ res->lockname.name, target);
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ spin_unlock(&res->spinlock);
+ ret = -EINVAL;
+ }
+
+fail:
+ if (oldmle) {
+ /* master is known, detach if not already detached */
+ dlm_mle_detach_hb_events(dlm, oldmle);
+ dlm_put_mle(oldmle);
+ }
+
+ if (ret < 0) {
+ if (mle_added) {
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+ } else if (mle) {
+ kmem_cache_free(dlm_mle_cache, mle);
+ }
+ goto leave;
+ }
+
+ /*
+ * at this point, we have a migration target, an mle
+ * in the master list, and the MIGRATING flag set on
+ * the lockres
+ */
+
+
+ /* get an extra reference on the mle.
+ * otherwise the assert_master from the new
+ * master will destroy this.
+ * also, make sure that all callers of dlm_get_mle
+ * take both dlm->spinlock and dlm->master_lock */
+ spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->master_lock);
+ dlm_get_mle(mle);
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+ /* notify new node and send all lock state */
+ /* call send_one_lockres with migration flag.
+ * this serves as notice to the target node that a
+ * migration is starting. */
+ ret = dlm_send_one_lockres(dlm, res, mres, target,
+ DLM_MRES_MIGRATION);
+
+ if (ret < 0) {
+ mlog(0, "migration to node %u failed with %d\n",
+ target, ret);
+ /* migration failed, detach and clean up mle */
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+ dlm_put_mle(mle);
+ goto leave;
+ }
+
+ /* at this point, the target sends a message to all nodes,
+ * (using dlm_do_migrate_request). this node is skipped since
+ * we had to put an mle in the list to begin the process. this
+ * node now waits for target to do an assert master. this node
+ * will be the last one notified, ensuring that the migration
+ * is complete everywhere. if the target dies while this is
+ * going on, some nodes could potentially see the target as the
+ * master, so it is important that my recovery finds the migration
+ * mle and sets the master to UNKNONWN. */
+
+
+ /* wait for new node to assert master */
+ while (1) {
+ ret = wait_event_interruptible_timeout(mle->wq,
+ (atomic_read(&mle->woken) == 1),
+ msecs_to_jiffies(5000));
+
+ if (ret >= 0) {
+ if (atomic_read(&mle->woken) == 1 ||
+ res->owner == target)
+ break;
+
+ mlog(0, "timed out during migration\n");
+ }
+ if (ret == -ERESTARTSYS) {
+ /* migration failed, detach and clean up mle */
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+ dlm_put_mle(mle);
+ goto leave;
+ }
+ /* TODO: if node died: stop, clean up, return error */
+ }
+
+ /* all done, set the owner, clear the flag */
+ spin_lock(&res->spinlock);
+ dlm_set_lockres_owner(dlm, res, target);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ dlm_remove_nonlocal_locks(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
+ /* master is known, detach if not already detached */
+ dlm_mle_detach_hb_events(dlm, mle);
+ dlm_put_mle(mle);
+ ret = 0;
+
+ dlm_lockres_calc_usage(dlm, res);
+
+leave:
+ /* re-dirty the lockres if we failed */
+ if (ret < 0)
+ dlm_kick_thread(dlm, res);
+
+ /* TODO: cleanup */
+ if (mres)
+ free_page((unsigned long)mres);
+
+ dlm_put(dlm);
+
+ mlog(0, "returning %d\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dlm_migrate_lockres);
+
+int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
+{
+ int ret;
+ spin_lock(&dlm->ast_lock);
+ spin_lock(&lock->spinlock);
+ ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
+ spin_unlock(&lock->spinlock);
+ spin_unlock(&dlm->ast_lock);
+ return ret;
+}
+
+static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 mig_target)
+{
+ int can_proceed;
+ spin_lock(&res->spinlock);
+ can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
+ spin_unlock(&res->spinlock);
+
+ /* target has died, so make the caller break out of the
+ * wait_event, but caller must recheck the domain_map */
+ spin_lock(&dlm->spinlock);
+ if (!test_bit(mig_target, dlm->domain_map))
+ can_proceed = 1;
+ spin_unlock(&dlm->spinlock);
+ return can_proceed;
+}
+
+int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
+{
+ int ret;
+ spin_lock(&res->spinlock);
+ ret = !!(res->state & DLM_LOCK_RES_DIRTY);
+ spin_unlock(&res->spinlock);
+ return ret;
+}
+
+
+static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 target)
+{
+ int ret = 0;
+
+ mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
+ res->lockname.len, res->lockname.name, dlm->node_num,
+ target);
+ /* need to set MIGRATING flag on lockres. this is done by
+ * ensuring that all asts have been flushed for this lockres. */
+ spin_lock(&res->spinlock);
+ BUG_ON(res->migration_pending);
+ res->migration_pending = 1;
+ /* strategy is to reserve an extra ast then release
+ * it below, letting the release do all of the work */
+ __dlm_lockres_reserve_ast(res);
+ spin_unlock(&res->spinlock);
+
+ /* now flush all the pending asts.. hang out for a bit */
+ dlm_kick_thread(dlm, res);
+ wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
+ dlm_lockres_release_ast(dlm, res);
+
+ mlog(0, "about to wait on migration_wq, dirty=%s\n",
+ res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
+ /* if the extra ref we just put was the final one, this
+ * will pass thru immediately. otherwise, we need to wait
+ * for the last ast to finish. */
+again:
+ ret = wait_event_interruptible_timeout(dlm->migration_wq,
+ dlm_migration_can_proceed(dlm, res, target),
+ msecs_to_jiffies(1000));
+ if (ret < 0) {
+ mlog(0, "woken again: migrating? %s, dead? %s\n",
+ res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
+ test_bit(target, dlm->domain_map) ? "no":"yes");
+ } else {
+ mlog(0, "all is well: migrating? %s, dead? %s\n",
+ res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
+ test_bit(target, dlm->domain_map) ? "no":"yes");
+ }
+ if (!dlm_migration_can_proceed(dlm, res, target)) {
+ mlog(0, "trying again...\n");
+ goto again;
+ }
+
+ /* did the target go down or die? */
+ spin_lock(&dlm->spinlock);
+ if (!test_bit(target, dlm->domain_map)) {
+ mlog(ML_ERROR, "aha. migration target %u just went down\n",
+ target);
+ ret = -EHOSTDOWN;
+ }
+ spin_unlock(&dlm->spinlock);
+
+ /*
+ * at this point:
+ *
+ * o the DLM_LOCK_RES_MIGRATING flag is set
+ * o there are no pending asts on this lockres
+ * o all processes trying to reserve an ast on this
+ * lockres must wait for the MIGRATING flag to clear
+ */
+ return ret;
+}
+
+/* last step in the migration process.
+ * original master calls this to free all of the dlm_lock
+ * structures that used to be for other nodes. */
+static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ struct list_head *iter, *iter2;
+ struct list_head *queue = &res->granted;
+ int i;
+ struct dlm_lock *lock;
+
+ assert_spin_locked(&res->spinlock);
+
+ BUG_ON(res->owner == dlm->node_num);
+
+ for (i=0; i<3; i++) {
+ list_for_each_safe(iter, iter2, queue) {
+ lock = list_entry (iter, struct dlm_lock, list);
+ if (lock->ml.node != dlm->node_num) {
+ mlog(0, "putting lock for node %u\n",
+ lock->ml.node);
+ /* be extra careful */
+ BUG_ON(!list_empty(&lock->ast_list));
+ BUG_ON(!list_empty(&lock->bast_list));
+ BUG_ON(lock->ast_pending);
+ BUG_ON(lock->bast_pending);
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ }
+ }
+ queue++;
+ }
+}
+
+/* for now this is not too intelligent. we will
+ * need stats to make this do the right thing.
+ * this just finds the first lock on one of the
+ * queues and uses that node as the target. */
+static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ int i;
+ struct list_head *queue = &res->granted;
+ struct list_head *iter;
+ struct dlm_lock *lock;
+ int nodenum;
+
+ assert_spin_locked(&dlm->spinlock);
+
+ spin_lock(&res->spinlock);
+ for (i=0; i<3; i++) {
+ list_for_each(iter, queue) {
+ /* up to the caller to make sure this node
+ * is alive */
+ lock = list_entry (iter, struct dlm_lock, list);
+ if (lock->ml.node != dlm->node_num) {
+ spin_unlock(&res->spinlock);
+ return lock->ml.node;
+ }
+ }
+ queue++;
+ }
+ spin_unlock(&res->spinlock);
+ mlog(0, "have not found a suitable target yet! checking domain map\n");
+
+ /* ok now we're getting desperate. pick anyone alive. */
+ nodenum = -1;
+ while (1) {
+ nodenum = find_next_bit(dlm->domain_map,
+ O2NM_MAX_NODES, nodenum+1);
+ mlog(0, "found %d in domain map\n", nodenum);
+ if (nodenum >= O2NM_MAX_NODES)
+ break;
+ if (nodenum != dlm->node_num) {
+ mlog(0, "picking %d\n", nodenum);
+ return nodenum;
+ }
+ }
+
+ mlog(0, "giving up. no master to migrate to\n");
+ return DLM_LOCK_RES_OWNER_UNKNOWN;
+}
+
+
+
+/* this is called by the new master once all lockres
+ * data has been received */
+static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 master, u8 new_master,
+ struct dlm_node_iter *iter)
+{
+ struct dlm_migrate_request migrate;
+ int ret, status = 0;
+ int nodenum;
+
+ memset(&migrate, 0, sizeof(migrate));
+ migrate.namelen = res->lockname.len;
+ memcpy(migrate.name, res->lockname.name, migrate.namelen);
+ migrate.new_master = new_master;
+ migrate.master = master;
+
+ ret = 0;
+
+ /* send message to all nodes, except the master and myself */
+ while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
+ if (nodenum == master ||
+ nodenum == new_master)
+ continue;
+
+ ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
+ &migrate, sizeof(migrate), nodenum,
+ &status);
+ if (ret < 0)
+ mlog_errno(ret);
+ else if (status < 0) {
+ mlog(0, "migrate request (node %u) returned %d!\n",
+ nodenum, status);
+ ret = status;
+ }
+ }
+
+ if (ret < 0)
+ mlog_errno(ret);
+
+ mlog(0, "returning ret=%d\n", ret);
+ return ret;
+}
+
+
+/* if there is an existing mle for this lockres, we now know who the master is.
+ * (the one who sent us *this* message) we can clear it up right away.
+ * since the process that put the mle on the list still has a reference to it,
+ * we can unhash it now, set the master and wake the process. as a result,
+ * we will have no mle in the list to start with. now we can add an mle for
+ * the migration and this should be the only one found for those scanning the
+ * list. */
+int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_lock_resource *res = NULL;
+ struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
+ struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
+ const char *name;
+ unsigned int namelen;
+ int ret = 0;
+
+ if (!dlm_grab(dlm))
+ return -EINVAL;
+
+ name = migrate->name;
+ namelen = migrate->namelen;
+
+ /* preallocate.. if this fails, abort */
+ mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
+ GFP_KERNEL);
+
+ if (!mle) {
+ ret = -ENOMEM;
+ goto leave;
+ }
+
+ /* check for pre-existing lock */
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres(dlm, name, namelen);
+ spin_lock(&dlm->master_lock);
+
+ if (res) {
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ /* if all is working ok, this can only mean that we got
+ * a migrate request from a node that we now see as
+ * dead. what can we do here? drop it to the floor? */
+ spin_unlock(&res->spinlock);
+ mlog(ML_ERROR, "Got a migrate request, but the "
+ "lockres is marked as recovering!");
+ kmem_cache_free(dlm_mle_cache, mle);
+ ret = -EINVAL; /* need a better solution */
+ goto unlock;
+ }
+ res->state |= DLM_LOCK_RES_MIGRATING;
+ spin_unlock(&res->spinlock);
+ }
+
+ /* ignore status. only nonzero status would BUG. */
+ ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
+ name, namelen,
+ migrate->new_master,
+ migrate->master);
+
+unlock:
+ spin_unlock(&dlm->master_lock);
+ spin_unlock(&dlm->spinlock);
+
+ if (oldmle) {
+ /* master is known, detach if not already detached */
+ dlm_mle_detach_hb_events(dlm, oldmle);
+ dlm_put_mle(oldmle);
+ }
+
+ if (res)
+ dlm_lockres_put(res);
+leave:
+ dlm_put(dlm);
+ return ret;
+}
+
+/* must be holding dlm->spinlock and dlm->master_lock
+ * when adding a migration mle, we can clear any other mles
+ * in the master list because we know with certainty that
+ * the master is "master". so we remove any old mle from
+ * the list after setting it's master field, and then add
+ * the new migration mle. this way we can hold with the rule
+ * of having only one mle for a given lock name at all times. */
+static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_master_list_entry *mle,
+ struct dlm_master_list_entry **oldmle,
+ const char *name, unsigned int namelen,
+ u8 new_master, u8 master)
+{
+ int found;
+ int ret = 0;
+
+ *oldmle = NULL;
+
+ mlog_entry_void();
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&dlm->master_lock);
+
+ /* caller is responsible for any ref taken here on oldmle */
+ found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
+ if (found) {
+ struct dlm_master_list_entry *tmp = *oldmle;
+ spin_lock(&tmp->spinlock);
+ if (tmp->type == DLM_MLE_MIGRATION) {
+ if (master == dlm->node_num) {
+ /* ah another process raced me to it */
+ mlog(0, "tried to migrate %.*s, but some "
+ "process beat me to it\n",
+ namelen, name);
+ ret = -EEXIST;
+ } else {
+ /* bad. 2 NODES are trying to migrate! */
+ mlog(ML_ERROR, "migration error mle: "
+ "master=%u new_master=%u // request: "
+ "master=%u new_master=%u // "
+ "lockres=%.*s\n",
+ tmp->master, tmp->new_master,
+ master, new_master,
+ namelen, name);
+ BUG();
+ }
+ } else {
+ /* this is essentially what assert_master does */
+ tmp->master = master;
+ atomic_set(&tmp->woken, 1);
+ wake_up(&tmp->wq);
+ /* remove it from the list so that only one
+ * mle will be found */
+ list_del_init(&tmp->list);
+ }
+ spin_unlock(&tmp->spinlock);
+ }
+
+ /* now add a migration mle to the tail of the list */
+ dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
+ mle->new_master = new_master;
+ mle->master = master;
+ /* do this for consistency with other mle types */
+ set_bit(new_master, mle->maybe_map);
+ list_add(&mle->list, &dlm->master_list);
+
+ return ret;
+}
+
+
+void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
+{
+ struct list_head *iter, *iter2;
+ struct dlm_master_list_entry *mle;
+ struct dlm_lock_resource *res;
+
+ mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
+top:
+ assert_spin_locked(&dlm->spinlock);
+
+ /* clean the master list */
+ spin_lock(&dlm->master_lock);
+ list_for_each_safe(iter, iter2, &dlm->master_list) {
+ mle = list_entry(iter, struct dlm_master_list_entry, list);
+
+ BUG_ON(mle->type != DLM_MLE_BLOCK &&
+ mle->type != DLM_MLE_MASTER &&
+ mle->type != DLM_MLE_MIGRATION);
+
+ /* MASTER mles are initiated locally. the waiting
+ * process will notice the node map change
+ * shortly. let that happen as normal. */
+ if (mle->type == DLM_MLE_MASTER)
+ continue;
+
+
+ /* BLOCK mles are initiated by other nodes.
+ * need to clean up if the dead node would have
+ * been the master. */
+ if (mle->type == DLM_MLE_BLOCK) {
+ int bit;
+
+ spin_lock(&mle->spinlock);
+ bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
+ if (bit != dead_node) {
+ mlog(0, "mle found, but dead node %u would "
+ "not have been master\n", dead_node);
+ spin_unlock(&mle->spinlock);
+ } else {
+ /* must drop the refcount by one since the
+ * assert_master will never arrive. this
+ * may result in the mle being unlinked and
+ * freed, but there may still be a process
+ * waiting in the dlmlock path which is fine. */
+ mlog(ML_ERROR, "node %u was expected master\n",
+ dead_node);
+ atomic_set(&mle->woken, 1);
+ spin_unlock(&mle->spinlock);
+ wake_up(&mle->wq);
+ /* final put will take care of list removal */
+ __dlm_put_mle(mle);
+ }
+ continue;
+ }
+
+ /* everything else is a MIGRATION mle */
+
+ /* the rule for MIGRATION mles is that the master
+ * becomes UNKNOWN if *either* the original or
+ * the new master dies. all UNKNOWN lockreses
+ * are sent to whichever node becomes the recovery
+ * master. the new master is responsible for
+ * determining if there is still a master for
+ * this lockres, or if he needs to take over
+ * mastery. either way, this node should expect
+ * another message to resolve this. */
+ if (mle->master != dead_node &&
+ mle->new_master != dead_node)
+ continue;
+
+ /* if we have reached this point, this mle needs to
+ * be removed from the list and freed. */
+
+ /* remove from the list early. NOTE: unlinking
+ * list_head while in list_for_each_safe */
+ spin_lock(&mle->spinlock);
+ list_del_init(&mle->list);
+ atomic_set(&mle->woken, 1);
+ spin_unlock(&mle->spinlock);
+ wake_up(&mle->wq);
+
+ mlog(0, "node %u died during migration from "
+ "%u to %u!\n", dead_node,
+ mle->master, mle->new_master);
+ /* if there is a lockres associated with this
+ * mle, find it and set its owner to UNKNOWN */
+ res = __dlm_lookup_lockres(dlm, mle->u.name.name,
+ mle->u.name.len);
+ if (res) {
+ /* unfortunately if we hit this rare case, our
+ * lock ordering is messed. we need to drop
+ * the master lock so that we can take the
+ * lockres lock, meaning that we will have to
+ * restart from the head of list. */
+ spin_unlock(&dlm->master_lock);
+
+ /* move lockres onto recovery list */
+ spin_lock(&res->spinlock);
+ dlm_set_lockres_owner(dlm, res,
+ DLM_LOCK_RES_OWNER_UNKNOWN);
+ dlm_move_lockres_to_recovery_list(dlm, res);
+ spin_unlock(&res->spinlock);
+ dlm_lockres_put(res);
+
+ /* dump the mle */
+ spin_lock(&dlm->master_lock);
+ __dlm_put_mle(mle);
+ spin_unlock(&dlm->master_lock);
+
+ /* restart */
+ goto top;
+ }
+
+ /* this may be the last reference */
+ __dlm_put_mle(mle);
+ }
+ spin_unlock(&dlm->master_lock);
+}
+
+
+int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ u8 old_master)
+{
+ struct dlm_node_iter iter;
+ int ret = 0;
+
+ spin_lock(&dlm->spinlock);
+ dlm_node_iter_init(dlm->domain_map, &iter);
+ clear_bit(old_master, iter.node_map);
+ clear_bit(dlm->node_num, iter.node_map);
+ spin_unlock(&dlm->spinlock);
+
+ mlog(0, "now time to do a migrate request to other nodes\n");
+ ret = dlm_do_migrate_request(dlm, res, old_master,
+ dlm->node_num, &iter);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto leave;
+ }
+
+ mlog(0, "doing assert master of %.*s to all except the original node\n",
+ res->lockname.len, res->lockname.name);
+ /* this call now finishes out the nodemap
+ * even if one or more nodes die */
+ ret = dlm_do_assert_master(dlm, res->lockname.name,
+ res->lockname.len, iter.node_map,
+ DLM_ASSERT_MASTER_FINISH_MIGRATION);
+ if (ret < 0) {
+ /* no longer need to retry. all living nodes contacted. */
+ mlog_errno(ret);
+ ret = 0;
+ }
+
+ memset(iter.node_map, 0, sizeof(iter.node_map));
+ set_bit(old_master, iter.node_map);
+ mlog(0, "doing assert master of %.*s back to %u\n",
+ res->lockname.len, res->lockname.name, old_master);
+ ret = dlm_do_assert_master(dlm, res->lockname.name,
+ res->lockname.len, iter.node_map,
+ DLM_ASSERT_MASTER_FINISH_MIGRATION);
+ if (ret < 0) {
+ mlog(0, "assert master to original master failed "
+ "with %d.\n", ret);
+ /* the only nonzero status here would be because of
+ * a dead original node. we're done. */
+ ret = 0;
+ }
+
+ /* all done, set the owner, clear the flag */
+ spin_lock(&res->spinlock);
+ dlm_set_lockres_owner(dlm, res, dlm->node_num);
+ res->state &= ~DLM_LOCK_RES_MIGRATING;
+ spin_unlock(&res->spinlock);
+ /* re-dirty it on the new master */
+ dlm_kick_thread(dlm, res);
+ wake_up(&res->wq);
+leave:
+ return ret;
+}
+
+/*
+ * LOCKRES AST REFCOUNT
+ * this is integral to migration
+ */
+
+/* for future intent to call an ast, reserve one ahead of time.
+ * this should be called only after waiting on the lockres
+ * with dlm_wait_on_lockres, and while still holding the
+ * spinlock after the call. */
+void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+ __dlm_print_one_lock_resource(res);
+ }
+ BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
+
+ atomic_inc(&res->asts_reserved);
+}
+
+/*
+ * used to drop the reserved ast, either because it went unused,
+ * or because the ast/bast was actually called.
+ *
+ * also, if there is a pending migration on this lockres,
+ * and this was the last pending ast on the lockres,
+ * atomically set the MIGRATING flag before we drop the lock.
+ * this is how we ensure that migration can proceed with no
+ * asts in progress. note that it is ok if the state of the
+ * queues is such that a lock should be granted in the future
+ * or that a bast should be fired, because the new master will
+ * shuffle the lists on this lockres as soon as it is migrated.
+ */
+void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
+ return;
+
+ if (!res->migration_pending) {
+ spin_unlock(&res->spinlock);
+ return;
+ }
+
+ BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
+ res->migration_pending = 0;
+ res->state |= DLM_LOCK_RES_MIGRATING;
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ wake_up(&dlm->migration_wq);
+}
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
new file mode 100644
index 00000000000..0c8eb1093f0
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -0,0 +1,2132 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmrecovery.c
+ *
+ * recovery stuff
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/timer.h>
+#include <linux/kthread.h>
+
+
+#include "cluster/heartbeat.h"
+#include "cluster/nodemanager.h"
+#include "cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+#include "dlmdomain.h"
+
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
+#include "cluster/masklog.h"
+
+static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
+
+static int dlm_recovery_thread(void *data);
+void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
+int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
+static void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
+static int dlm_do_recovery(struct dlm_ctxt *dlm);
+
+static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
+static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
+static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
+static int dlm_request_all_locks(struct dlm_ctxt *dlm,
+ u8 request_from, u8 dead_node);
+static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
+
+static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
+static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
+ const char *lockname, int namelen,
+ int total_locks, u64 cookie,
+ u8 flags, u8 master);
+static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
+ struct dlm_migratable_lockres *mres,
+ u8 send_to,
+ struct dlm_lock_resource *res,
+ int total_locks);
+static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 *real_master);
+static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_migratable_lockres *mres);
+static int dlm_do_master_requery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 nodenum, u8 *real_master);
+static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
+static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
+ u8 dead_node, u8 send_to);
+static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
+static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
+ struct list_head *list, u8 dead_node);
+static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
+ u8 dead_node, u8 new_master);
+static void dlm_reco_ast(void *astdata);
+static void dlm_reco_bast(void *astdata, int blocked_type);
+static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
+static void dlm_request_all_locks_worker(struct dlm_work_item *item,
+ void *data);
+static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
+
+static u64 dlm_get_next_mig_cookie(void);
+
+static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED;
+static u64 dlm_mig_cookie = 1;
+
+static u64 dlm_get_next_mig_cookie(void)
+{
+ u64 c;
+ spin_lock(&dlm_mig_cookie_lock);
+ c = dlm_mig_cookie;
+ if (dlm_mig_cookie == (~0ULL))
+ dlm_mig_cookie = 1;
+ else
+ dlm_mig_cookie++;
+ spin_unlock(&dlm_mig_cookie_lock);
+ return c;
+}
+
+static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
+{
+ spin_lock(&dlm->spinlock);
+ clear_bit(dlm->reco.dead_node, dlm->recovery_map);
+ dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
+ dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
+ spin_unlock(&dlm->spinlock);
+}
+
+/* Worker function used during recovery. */
+void dlm_dispatch_work(void *data)
+{
+ struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
+ LIST_HEAD(tmp_list);
+ struct list_head *iter, *iter2;
+ struct dlm_work_item *item;
+ dlm_workfunc_t *workfunc;
+
+ spin_lock(&dlm->work_lock);
+ list_splice_init(&dlm->work_list, &tmp_list);
+ spin_unlock(&dlm->work_lock);
+
+ list_for_each_safe(iter, iter2, &tmp_list) {
+ item = list_entry(iter, struct dlm_work_item, list);
+ workfunc = item->func;
+ list_del_init(&item->list);
+
+ /* already have ref on dlm to avoid having
+ * it disappear. just double-check. */
+ BUG_ON(item->dlm != dlm);
+
+ /* this is allowed to sleep and
+ * call network stuff */
+ workfunc(item, item->data);
+
+ dlm_put(dlm);
+ kfree(item);
+ }
+}
+
+/*
+ * RECOVERY THREAD
+ */
+
+static void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
+{
+ /* wake the recovery thread
+ * this will wake the reco thread in one of three places
+ * 1) sleeping with no recovery happening
+ * 2) sleeping with recovery mastered elsewhere
+ * 3) recovery mastered here, waiting on reco data */
+
+ wake_up(&dlm->dlm_reco_thread_wq);
+}
+
+/* Launch the recovery thread */
+int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
+{
+ mlog(0, "starting dlm recovery thread...\n");
+
+ dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
+ "dlm_reco_thread");
+ if (IS_ERR(dlm->dlm_reco_thread_task)) {
+ mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
+ dlm->dlm_reco_thread_task = NULL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
+{
+ if (dlm->dlm_reco_thread_task) {
+ mlog(0, "waiting for dlm recovery thread to exit\n");
+ kthread_stop(dlm->dlm_reco_thread_task);
+ dlm->dlm_reco_thread_task = NULL;
+ }
+}
+
+
+
+/*
+ * this is lame, but here's how recovery works...
+ * 1) all recovery threads cluster wide will work on recovering
+ * ONE node at a time
+ * 2) negotiate who will take over all the locks for the dead node.
+ * thats right... ALL the locks.
+ * 3) once a new master is chosen, everyone scans all locks
+ * and moves aside those mastered by the dead guy
+ * 4) each of these locks should be locked until recovery is done
+ * 5) the new master collects up all of secondary lock queue info
+ * one lock at a time, forcing each node to communicate back
+ * before continuing
+ * 6) each secondary lock queue responds with the full known lock info
+ * 7) once the new master has run all its locks, it sends a ALLDONE!
+ * message to everyone
+ * 8) upon receiving this message, the secondary queue node unlocks
+ * and responds to the ALLDONE
+ * 9) once the new master gets responses from everyone, he unlocks
+ * everything and recovery for this dead node is done
+ *10) go back to 2) while there are still dead nodes
+ *
+ */
+
+
+#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
+
+static int dlm_recovery_thread(void *data)
+{
+ int status;
+ struct dlm_ctxt *dlm = data;
+ unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
+
+ mlog(0, "dlm thread running for %s...\n", dlm->name);
+
+ while (!kthread_should_stop()) {
+ if (dlm_joined(dlm)) {
+ status = dlm_do_recovery(dlm);
+ if (status == -EAGAIN) {
+ /* do not sleep, recheck immediately. */
+ continue;
+ }
+ if (status < 0)
+ mlog_errno(status);
+ }
+
+ wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
+ kthread_should_stop(),
+ timeout);
+ }
+
+ mlog(0, "quitting DLM recovery thread\n");
+ return 0;
+}
+
+/* callers of the top-level api calls (dlmlock/dlmunlock) should
+ * block on the dlm->reco.event when recovery is in progress.
+ * the dlm recovery thread will set this state when it begins
+ * recovering a dead node (as the new master or not) and clear
+ * the state and wake as soon as all affected lock resources have
+ * been marked with the RECOVERY flag */
+static int dlm_in_recovery(struct dlm_ctxt *dlm)
+{
+ int in_recovery;
+ spin_lock(&dlm->spinlock);
+ in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
+ spin_unlock(&dlm->spinlock);
+ return in_recovery;
+}
+
+
+void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
+{
+ wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
+}
+
+static void dlm_begin_recovery(struct dlm_ctxt *dlm)
+{
+ spin_lock(&dlm->spinlock);
+ BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
+ dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
+ spin_unlock(&dlm->spinlock);
+}
+
+static void dlm_end_recovery(struct dlm_ctxt *dlm)
+{
+ spin_lock(&dlm->spinlock);
+ BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
+ dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
+ spin_unlock(&dlm->spinlock);
+ wake_up(&dlm->reco.event);
+}
+
+static int dlm_do_recovery(struct dlm_ctxt *dlm)
+{
+ int status = 0;
+
+ spin_lock(&dlm->spinlock);
+
+ /* check to see if the new master has died */
+ if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
+ test_bit(dlm->reco.new_master, dlm->recovery_map)) {
+ mlog(0, "new master %u died while recovering %u!\n",
+ dlm->reco.new_master, dlm->reco.dead_node);
+ /* unset the new_master, leave dead_node */
+ dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
+ }
+
+ /* select a target to recover */
+ if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
+ int bit;
+
+ bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
+ if (bit >= O2NM_MAX_NODES || bit < 0)
+ dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
+ else
+ dlm->reco.dead_node = bit;
+ } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
+ /* BUG? */
+ mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
+ dlm->reco.dead_node);
+ dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
+ }
+
+ if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
+ // mlog(0, "nothing to recover! sleeping now!\n");
+ spin_unlock(&dlm->spinlock);
+ /* return to main thread loop and sleep. */
+ return 0;
+ }
+ mlog(0, "recovery thread found node %u in the recovery map!\n",
+ dlm->reco.dead_node);
+ spin_unlock(&dlm->spinlock);
+
+ /* take write barrier */
+ /* (stops the list reshuffling thread, proxy ast handling) */
+ dlm_begin_recovery(dlm);
+
+ if (dlm->reco.new_master == dlm->node_num)
+ goto master_here;
+
+ if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
+ /* choose a new master */
+ if (!dlm_pick_recovery_master(dlm)) {
+ /* already notified everyone. go. */
+ dlm->reco.new_master = dlm->node_num;
+ goto master_here;
+ }
+ mlog(0, "another node will master this recovery session.\n");
+ }
+ mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n",
+ dlm->name, dlm->reco.new_master,
+ dlm->node_num, dlm->reco.dead_node);
+
+ /* it is safe to start everything back up here
+ * because all of the dead node's lock resources
+ * have been marked as in-recovery */
+ dlm_end_recovery(dlm);
+
+ /* sleep out in main dlm_recovery_thread loop. */
+ return 0;
+
+master_here:
+ mlog(0, "mastering recovery of %s:%u here(this=%u)!\n",
+ dlm->name, dlm->reco.dead_node, dlm->node_num);
+
+ status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
+ if (status < 0) {
+ mlog(ML_ERROR, "error %d remastering locks for node %u, "
+ "retrying.\n", status, dlm->reco.dead_node);
+ } else {
+ /* success! see if any other nodes need recovery */
+ dlm_reset_recovery(dlm);
+ }
+ dlm_end_recovery(dlm);
+
+ /* continue and look for another dead node */
+ return -EAGAIN;
+}
+
+static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
+{
+ int status = 0;
+ struct dlm_reco_node_data *ndata;
+ struct list_head *iter;
+ int all_nodes_done;
+ int destroy = 0;
+ int pass = 0;
+
+ status = dlm_init_recovery_area(dlm, dead_node);
+ if (status < 0)
+ goto leave;
+
+ /* safe to access the node data list without a lock, since this
+ * process is the only one to change the list */
+ list_for_each(iter, &dlm->reco.node_data) {
+ ndata = list_entry (iter, struct dlm_reco_node_data, list);
+ BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
+ ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
+
+ mlog(0, "requesting lock info from node %u\n",
+ ndata->node_num);
+
+ if (ndata->node_num == dlm->node_num) {
+ ndata->state = DLM_RECO_NODE_DATA_DONE;
+ continue;
+ }
+
+ status = dlm_request_all_locks(dlm, ndata->node_num, dead_node);
+ if (status < 0) {
+ mlog_errno(status);
+ if (dlm_is_host_down(status))
+ ndata->state = DLM_RECO_NODE_DATA_DEAD;
+ else {
+ destroy = 1;
+ goto leave;
+ }
+ }
+
+ switch (ndata->state) {
+ case DLM_RECO_NODE_DATA_INIT:
+ case DLM_RECO_NODE_DATA_FINALIZE_SENT:
+ case DLM_RECO_NODE_DATA_REQUESTED:
+ BUG();
+ break;
+ case DLM_RECO_NODE_DATA_DEAD:
+ mlog(0, "node %u died after requesting "
+ "recovery info for node %u\n",
+ ndata->node_num, dead_node);
+ // start all over
+ destroy = 1;
+ status = -EAGAIN;
+ goto leave;
+ case DLM_RECO_NODE_DATA_REQUESTING:
+ ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
+ mlog(0, "now receiving recovery data from "
+ "node %u for dead node %u\n",
+ ndata->node_num, dead_node);
+ break;
+ case DLM_RECO_NODE_DATA_RECEIVING:
+ mlog(0, "already receiving recovery data from "
+ "node %u for dead node %u\n",
+ ndata->node_num, dead_node);
+ break;
+ case DLM_RECO_NODE_DATA_DONE:
+ mlog(0, "already DONE receiving recovery data "
+ "from node %u for dead node %u\n",
+ ndata->node_num, dead_node);
+ break;
+ }
+ }
+
+ mlog(0, "done requesting all lock info\n");
+
+ /* nodes should be sending reco data now
+ * just need to wait */
+
+ while (1) {
+ /* check all the nodes now to see if we are
+ * done, or if anyone died */
+ all_nodes_done = 1;
+ spin_lock(&dlm_reco_state_lock);
+ list_for_each(iter, &dlm->reco.node_data) {
+ ndata = list_entry (iter, struct dlm_reco_node_data, list);
+
+ mlog(0, "checking recovery state of node %u\n",
+ ndata->node_num);
+ switch (ndata->state) {
+ case DLM_RECO_NODE_DATA_INIT:
+ case DLM_RECO_NODE_DATA_REQUESTING:
+ mlog(ML_ERROR, "bad ndata state for "
+ "node %u: state=%d\n",
+ ndata->node_num, ndata->state);
+ BUG();
+ break;
+ case DLM_RECO_NODE_DATA_DEAD:
+ mlog(0, "node %u died after "
+ "requesting recovery info for "
+ "node %u\n", ndata->node_num,
+ dead_node);
+ spin_unlock(&dlm_reco_state_lock);
+ // start all over
+ destroy = 1;
+ status = -EAGAIN;
+ goto leave;
+ case DLM_RECO_NODE_DATA_RECEIVING:
+ case DLM_RECO_NODE_DATA_REQUESTED:
+ all_nodes_done = 0;
+ break;
+ case DLM_RECO_NODE_DATA_DONE:
+ break;
+ case DLM_RECO_NODE_DATA_FINALIZE_SENT:
+ break;
+ }
+ }
+ spin_unlock(&dlm_reco_state_lock);
+
+ mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
+ all_nodes_done?"yes":"no");
+ if (all_nodes_done) {
+ int ret;
+
+ /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
+ * just send a finalize message to everyone and
+ * clean up */
+ mlog(0, "all nodes are done! send finalize\n");
+ ret = dlm_send_finalize_reco_message(dlm);
+ if (ret < 0)
+ mlog_errno(ret);
+
+ spin_lock(&dlm->spinlock);
+ dlm_finish_local_lockres_recovery(dlm, dead_node,
+ dlm->node_num);
+ spin_unlock(&dlm->spinlock);
+ mlog(0, "should be done with recovery!\n");
+
+ mlog(0, "finishing recovery of %s at %lu, "
+ "dead=%u, this=%u, new=%u\n", dlm->name,
+ jiffies, dlm->reco.dead_node,
+ dlm->node_num, dlm->reco.new_master);
+ destroy = 1;
+ status = ret;
+ /* rescan everything marked dirty along the way */
+ dlm_kick_thread(dlm, NULL);
+ break;
+ }
+ /* wait to be signalled, with periodic timeout
+ * to check for node death */
+ wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
+ kthread_should_stop(),
+ msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
+
+ }
+
+leave:
+ if (destroy)
+ dlm_destroy_recovery_area(dlm, dead_node);
+
+ mlog_exit(status);
+ return status;
+}
+
+static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
+{
+ int num=0;
+ struct dlm_reco_node_data *ndata;
+
+ spin_lock(&dlm->spinlock);
+ memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
+ /* nodes can only be removed (by dying) after dropping
+ * this lock, and death will be trapped later, so this should do */
+ spin_unlock(&dlm->spinlock);
+
+ while (1) {
+ num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
+ if (num >= O2NM_MAX_NODES) {
+ break;
+ }
+ BUG_ON(num == dead_node);
+
+ ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL);
+ if (!ndata) {
+ dlm_destroy_recovery_area(dlm, dead_node);
+ return -ENOMEM;
+ }
+ ndata->node_num = num;
+ ndata->state = DLM_RECO_NODE_DATA_INIT;
+ spin_lock(&dlm_reco_state_lock);
+ list_add_tail(&ndata->list, &dlm->reco.node_data);
+ spin_unlock(&dlm_reco_state_lock);
+ num++;
+ }
+
+ return 0;
+}
+
+static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
+{
+ struct list_head *iter, *iter2;
+ struct dlm_reco_node_data *ndata;
+ LIST_HEAD(tmplist);
+
+ spin_lock(&dlm_reco_state_lock);
+ list_splice_init(&dlm->reco.node_data, &tmplist);
+ spin_unlock(&dlm_reco_state_lock);
+
+ list_for_each_safe(iter, iter2, &tmplist) {
+ ndata = list_entry (iter, struct dlm_reco_node_data, list);
+ list_del_init(&ndata->list);
+ kfree(ndata);
+ }
+}
+
+static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
+ u8 dead_node)
+{
+ struct dlm_lock_request lr;
+ enum dlm_status ret;
+
+ mlog(0, "\n");
+
+
+ mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
+ "to %u\n", dead_node, request_from);
+
+ memset(&lr, 0, sizeof(lr));
+ lr.node_idx = dlm->node_num;
+ lr.dead_node = dead_node;
+
+ // send message
+ ret = DLM_NOLOCKMGR;
+ ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
+ &lr, sizeof(lr), request_from, NULL);
+
+ /* negative status is handled by caller */
+ if (ret < 0)
+ mlog_errno(ret);
+
+ // return from here, then
+ // sleep until all received or error
+ return ret;
+
+}
+
+int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
+ char *buf = NULL;
+ struct dlm_work_item *item = NULL;
+
+ if (!dlm_grab(dlm))
+ return -EINVAL;
+
+ BUG_ON(lr->dead_node != dlm->reco.dead_node);
+
+ item = kcalloc(1, sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ dlm_put(dlm);
+ return -ENOMEM;
+ }
+
+ /* this will get freed by dlm_request_all_locks_worker */
+ buf = (char *) __get_free_page(GFP_KERNEL);
+ if (!buf) {
+ kfree(item);
+ dlm_put(dlm);
+ return -ENOMEM;
+ }
+
+ /* queue up work for dlm_request_all_locks_worker */
+ dlm_grab(dlm); /* get an extra ref for the work item */
+ dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
+ item->u.ral.reco_master = lr->node_idx;
+ item->u.ral.dead_node = lr->dead_node;
+ spin_lock(&dlm->work_lock);
+ list_add_tail(&item->list, &dlm->work_list);
+ spin_unlock(&dlm->work_lock);
+ schedule_work(&dlm->dispatched_work);
+
+ dlm_put(dlm);
+ return 0;
+}
+
+static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
+{
+ struct dlm_migratable_lockres *mres;
+ struct dlm_lock_resource *res;
+ struct dlm_ctxt *dlm;
+ LIST_HEAD(resources);
+ struct list_head *iter;
+ int ret;
+ u8 dead_node, reco_master;
+
+ dlm = item->dlm;
+ dead_node = item->u.ral.dead_node;
+ reco_master = item->u.ral.reco_master;
+ BUG_ON(dead_node != dlm->reco.dead_node);
+ BUG_ON(reco_master != dlm->reco.new_master);
+
+ mres = (struct dlm_migratable_lockres *)data;
+
+ /* lock resources should have already been moved to the
+ * dlm->reco.resources list. now move items from that list
+ * to a temp list if the dead owner matches. note that the
+ * whole cluster recovers only one node at a time, so we
+ * can safely move UNKNOWN lock resources for each recovery
+ * session. */
+ dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
+
+ /* now we can begin blasting lockreses without the dlm lock */
+ list_for_each(iter, &resources) {
+ res = list_entry (iter, struct dlm_lock_resource, recovering);
+ ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
+ DLM_MRES_RECOVERY);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+
+ /* move the resources back to the list */
+ spin_lock(&dlm->spinlock);
+ list_splice_init(&resources, &dlm->reco.resources);
+ spin_unlock(&dlm->spinlock);
+
+ ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
+ if (ret < 0)
+ mlog_errno(ret);
+
+ free_page((unsigned long)data);
+}
+
+
+static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
+{
+ int ret, tmpret;
+ struct dlm_reco_data_done done_msg;
+
+ memset(&done_msg, 0, sizeof(done_msg));
+ done_msg.node_idx = dlm->node_num;
+ done_msg.dead_node = dead_node;
+ mlog(0, "sending DATA DONE message to %u, "
+ "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
+ done_msg.dead_node);
+
+ ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
+ sizeof(done_msg), send_to, &tmpret);
+ /* negative status is ignored by the caller */
+ if (ret >= 0)
+ ret = tmpret;
+ return ret;
+}
+
+
+int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
+ struct list_head *iter;
+ struct dlm_reco_node_data *ndata = NULL;
+ int ret = -EINVAL;
+
+ if (!dlm_grab(dlm))
+ return -EINVAL;
+
+ mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
+ "node_idx=%u, this node=%u\n", done->dead_node,
+ dlm->reco.dead_node, done->node_idx, dlm->node_num);
+ BUG_ON(done->dead_node != dlm->reco.dead_node);
+
+ spin_lock(&dlm_reco_state_lock);
+ list_for_each(iter, &dlm->reco.node_data) {
+ ndata = list_entry (iter, struct dlm_reco_node_data, list);
+ if (ndata->node_num != done->node_idx)
+ continue;
+
+ switch (ndata->state) {
+ case DLM_RECO_NODE_DATA_INIT:
+ case DLM_RECO_NODE_DATA_DEAD:
+ case DLM_RECO_NODE_DATA_DONE:
+ case DLM_RECO_NODE_DATA_FINALIZE_SENT:
+ mlog(ML_ERROR, "bad ndata state for node %u:"
+ " state=%d\n", ndata->node_num,
+ ndata->state);
+ BUG();
+ break;
+ case DLM_RECO_NODE_DATA_RECEIVING:
+ case DLM_RECO_NODE_DATA_REQUESTED:
+ case DLM_RECO_NODE_DATA_REQUESTING:
+ mlog(0, "node %u is DONE sending "
+ "recovery data!\n",
+ ndata->node_num);
+
+ ndata->state = DLM_RECO_NODE_DATA_DONE;
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock(&dlm_reco_state_lock);
+
+ /* wake the recovery thread, some node is done */
+ if (!ret)
+ dlm_kick_recovery_thread(dlm);
+
+ if (ret < 0)
+ mlog(ML_ERROR, "failed to find recovery node data for node "
+ "%u\n", done->node_idx);
+ dlm_put(dlm);
+
+ mlog(0, "leaving reco data done handler, ret=%d\n", ret);
+ return ret;
+}
+
+static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
+ struct list_head *list,
+ u8 dead_node)
+{
+ struct dlm_lock_resource *res;
+ struct list_head *iter, *iter2;
+
+ spin_lock(&dlm->spinlock);
+ list_for_each_safe(iter, iter2, &dlm->reco.resources) {
+ res = list_entry (iter, struct dlm_lock_resource, recovering);
+ if (dlm_is_recovery_lock(res->lockname.name,
+ res->lockname.len))
+ continue;
+ if (res->owner == dead_node) {
+ mlog(0, "found lockres owned by dead node while "
+ "doing recovery for node %u. sending it.\n",
+ dead_node);
+ list_del_init(&res->recovering);
+ list_add_tail(&res->recovering, list);
+ } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ mlog(0, "found UNKNOWN owner while doing recovery "
+ "for node %u. sending it.\n", dead_node);
+ list_del_init(&res->recovering);
+ list_add_tail(&res->recovering, list);
+ }
+ }
+ spin_unlock(&dlm->spinlock);
+}
+
+static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
+{
+ int total_locks = 0;
+ struct list_head *iter, *queue = &res->granted;
+ int i;
+
+ for (i=0; i<3; i++) {
+ list_for_each(iter, queue)
+ total_locks++;
+ queue++;
+ }
+ return total_locks;
+}
+
+
+static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
+ struct dlm_migratable_lockres *mres,
+ u8 send_to,
+ struct dlm_lock_resource *res,
+ int total_locks)
+{
+ u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
+ int mres_total_locks = be32_to_cpu(mres->total_locks);
+ int sz, ret = 0, status = 0;
+ u8 orig_flags = mres->flags,
+ orig_master = mres->master;
+
+ BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
+ if (!mres->num_locks)
+ return 0;
+
+ sz = sizeof(struct dlm_migratable_lockres) +
+ (mres->num_locks * sizeof(struct dlm_migratable_lock));
+
+ /* add an all-done flag if we reached the last lock */
+ orig_flags = mres->flags;
+ BUG_ON(total_locks > mres_total_locks);
+ if (total_locks == mres_total_locks)
+ mres->flags |= DLM_MRES_ALL_DONE;
+
+ /* send it */
+ ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
+ sz, send_to, &status);
+ if (ret < 0) {
+ /* XXX: negative status is not handled.
+ * this will end up killing this node. */
+ mlog_errno(ret);
+ } else {
+ /* might get an -ENOMEM back here */
+ ret = status;
+ if (ret < 0) {
+ mlog_errno(ret);
+
+ if (ret == -EFAULT) {
+ mlog(ML_ERROR, "node %u told me to kill "
+ "myself!\n", send_to);
+ BUG();
+ }
+ }
+ }
+
+ /* zero and reinit the message buffer */
+ dlm_init_migratable_lockres(mres, res->lockname.name,
+ res->lockname.len, mres_total_locks,
+ mig_cookie, orig_flags, orig_master);
+ return ret;
+}
+
+static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
+ const char *lockname, int namelen,
+ int total_locks, u64 cookie,
+ u8 flags, u8 master)
+{
+ /* mres here is one full page */
+ memset(mres, 0, PAGE_SIZE);
+ mres->lockname_len = namelen;
+ memcpy(mres->lockname, lockname, namelen);
+ mres->num_locks = 0;
+ mres->total_locks = cpu_to_be32(total_locks);
+ mres->mig_cookie = cpu_to_be64(cookie);
+ mres->flags = flags;
+ mres->master = master;
+}
+
+
+/* returns 1 if this lock fills the network structure,
+ * 0 otherwise */
+static int dlm_add_lock_to_array(struct dlm_lock *lock,
+ struct dlm_migratable_lockres *mres, int queue)
+{
+ struct dlm_migratable_lock *ml;
+ int lock_num = mres->num_locks;
+
+ ml = &(mres->ml[lock_num]);
+ ml->cookie = lock->ml.cookie;
+ ml->type = lock->ml.type;
+ ml->convert_type = lock->ml.convert_type;
+ ml->highest_blocked = lock->ml.highest_blocked;
+ ml->list = queue;
+ if (lock->lksb) {
+ ml->flags = lock->lksb->flags;
+ /* send our current lvb */
+ if (ml->type == LKM_EXMODE ||
+ ml->type == LKM_PRMODE) {
+ /* if it is already set, this had better be a PR
+ * and it has to match */
+ if (mres->lvb[0] && (ml->type == LKM_EXMODE ||
+ memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
+ mlog(ML_ERROR, "mismatched lvbs!\n");
+ __dlm_print_one_lock_resource(lock->lockres);
+ BUG();
+ }
+ memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
+ }
+ }
+ ml->node = lock->ml.node;
+ mres->num_locks++;
+ /* we reached the max, send this network message */
+ if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
+ return 1;
+ return 0;
+}
+
+
+int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
+ struct dlm_migratable_lockres *mres,
+ u8 send_to, u8 flags)
+{
+ struct list_head *queue, *iter;
+ int total_locks, i;
+ u64 mig_cookie = 0;
+ struct dlm_lock *lock;
+ int ret = 0;
+
+ BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
+
+ mlog(0, "sending to %u\n", send_to);
+
+ total_locks = dlm_num_locks_in_lockres(res);
+ if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
+ /* rare, but possible */
+ mlog(0, "argh. lockres has %d locks. this will "
+ "require more than one network packet to "
+ "migrate\n", total_locks);
+ mig_cookie = dlm_get_next_mig_cookie();
+ }
+
+ dlm_init_migratable_lockres(mres, res->lockname.name,
+ res->lockname.len, total_locks,
+ mig_cookie, flags, res->owner);
+
+ total_locks = 0;
+ for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
+ queue = dlm_list_idx_to_ptr(res, i);
+ list_for_each(iter, queue) {
+ lock = list_entry (iter, struct dlm_lock, list);
+
+ /* add another lock. */
+ total_locks++;
+ if (!dlm_add_lock_to_array(lock, mres, i))
+ continue;
+
+ /* this filled the lock message,
+ * we must send it immediately. */
+ ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
+ res, total_locks);
+ if (ret < 0) {
+ // TODO
+ mlog(ML_ERROR, "dlm_send_mig_lockres_msg "
+ "returned %d, TODO\n", ret);
+ BUG();
+ }
+ }
+ }
+ /* flush any remaining locks */
+ ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
+ if (ret < 0) {
+ // TODO
+ mlog(ML_ERROR, "dlm_send_mig_lockres_msg returned %d, "
+ "TODO\n", ret);
+ BUG();
+ }
+ return ret;
+}
+
+
+
+/*
+ * this message will contain no more than one page worth of
+ * recovery data, and it will work on only one lockres.
+ * there may be many locks in this page, and we may need to wait
+ * for additional packets to complete all the locks (rare, but
+ * possible).
+ */
+/*
+ * NOTE: the allocation error cases here are scary
+ * we really cannot afford to fail an alloc in recovery
+ * do we spin? returning an error only delays the problem really
+ */
+
+int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_migratable_lockres *mres =
+ (struct dlm_migratable_lockres *)msg->buf;
+ int ret = 0;
+ u8 real_master;
+ char *buf = NULL;
+ struct dlm_work_item *item = NULL;
+ struct dlm_lock_resource *res = NULL;
+
+ if (!dlm_grab(dlm))
+ return -EINVAL;
+
+ BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
+
+ real_master = mres->master;
+ if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /* cannot migrate a lockres with no master */
+ BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
+ }
+
+ mlog(0, "%s message received from node %u\n",
+ (mres->flags & DLM_MRES_RECOVERY) ?
+ "recovery" : "migration", mres->master);
+ if (mres->flags & DLM_MRES_ALL_DONE)
+ mlog(0, "all done flag. all lockres data received!\n");
+
+ ret = -ENOMEM;
+ buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL);
+ item = kcalloc(1, sizeof(*item), GFP_KERNEL);
+ if (!buf || !item)
+ goto leave;
+
+ /* lookup the lock to see if we have a secondary queue for this
+ * already... just add the locks in and this will have its owner
+ * and RECOVERY flag changed when it completes. */
+ res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
+ if (res) {
+ /* this will get a ref on res */
+ /* mark it as recovering/migrating and hash it */
+ spin_lock(&res->spinlock);
+ if (mres->flags & DLM_MRES_RECOVERY) {
+ res->state |= DLM_LOCK_RES_RECOVERING;
+ } else {
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+ /* this is at least the second
+ * lockres message */
+ mlog(0, "lock %.*s is already migrating\n",
+ mres->lockname_len,
+ mres->lockname);
+ } else if (res->state & DLM_LOCK_RES_RECOVERING) {
+ /* caller should BUG */
+ mlog(ML_ERROR, "node is attempting to migrate "
+ "lock %.*s, but marked as recovering!\n",
+ mres->lockname_len, mres->lockname);
+ ret = -EFAULT;
+ spin_unlock(&res->spinlock);
+ goto leave;
+ }
+ res->state |= DLM_LOCK_RES_MIGRATING;
+ }
+ spin_unlock(&res->spinlock);
+ } else {
+ /* need to allocate, just like if it was
+ * mastered here normally */
+ res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
+ if (!res)
+ goto leave;
+
+ /* to match the ref that we would have gotten if
+ * dlm_lookup_lockres had succeeded */
+ dlm_lockres_get(res);
+
+ /* mark it as recovering/migrating and hash it */
+ if (mres->flags & DLM_MRES_RECOVERY)
+ res->state |= DLM_LOCK_RES_RECOVERING;
+ else
+ res->state |= DLM_LOCK_RES_MIGRATING;
+
+ spin_lock(&dlm->spinlock);
+ __dlm_insert_lockres(dlm, res);
+ spin_unlock(&dlm->spinlock);
+
+ /* now that the new lockres is inserted,
+ * make it usable by other processes */
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ spin_unlock(&res->spinlock);
+
+ /* add an extra ref for just-allocated lockres
+ * otherwise the lockres will be purged immediately */
+ dlm_lockres_get(res);
+
+ }
+
+ /* at this point we have allocated everything we need,
+ * and we have a hashed lockres with an extra ref and
+ * the proper res->state flags. */
+ ret = 0;
+ if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /* migration cannot have an unknown master */
+ BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
+ mlog(0, "recovery has passed me a lockres with an "
+ "unknown owner.. will need to requery: "
+ "%.*s\n", mres->lockname_len, mres->lockname);
+ } else {
+ spin_lock(&res->spinlock);
+ dlm_change_lockres_owner(dlm, res, dlm->node_num);
+ spin_unlock(&res->spinlock);
+ }
+
+ /* queue up work for dlm_mig_lockres_worker */
+ dlm_grab(dlm); /* get an extra ref for the work item */
+ memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
+ dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
+ item->u.ml.lockres = res; /* already have a ref */
+ item->u.ml.real_master = real_master;
+ spin_lock(&dlm->work_lock);
+ list_add_tail(&item->list, &dlm->work_list);
+ spin_unlock(&dlm->work_lock);
+ schedule_work(&dlm->dispatched_work);
+
+leave:
+ dlm_put(dlm);
+ if (ret < 0) {
+ if (buf)
+ kfree(buf);
+ if (item)
+ kfree(item);
+ }
+
+ mlog_exit(ret);
+ return ret;
+}
+
+
+static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_migratable_lockres *mres;
+ int ret = 0;
+ struct dlm_lock_resource *res;
+ u8 real_master;
+
+ dlm = item->dlm;
+ mres = (struct dlm_migratable_lockres *)data;
+
+ res = item->u.ml.lockres;
+ real_master = item->u.ml.real_master;
+
+ if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ /* this case is super-rare. only occurs if
+ * node death happens during migration. */
+again:
+ ret = dlm_lockres_master_requery(dlm, res, &real_master);
+ if (ret < 0) {
+ mlog(0, "dlm_lockres_master_requery failure: %d\n",
+ ret);
+ goto again;
+ }
+ if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
+ mlog(0, "lockres %.*s not claimed. "
+ "this node will take it.\n",
+ res->lockname.len, res->lockname.name);
+ } else {
+ mlog(0, "master needs to respond to sender "
+ "that node %u still owns %.*s\n",
+ real_master, res->lockname.len,
+ res->lockname.name);
+ /* cannot touch this lockres */
+ goto leave;
+ }
+ }
+
+ ret = dlm_process_recovery_data(dlm, res, mres);
+ if (ret < 0)
+ mlog(0, "dlm_process_recovery_data returned %d\n", ret);
+ else
+ mlog(0, "dlm_process_recovery_data succeeded\n");
+
+ if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
+ (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
+ ret = dlm_finish_migration(dlm, res, mres->master);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+
+leave:
+ kfree(data);
+ mlog_exit(ret);
+}
+
+
+
+static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 *real_master)
+{
+ struct dlm_node_iter iter;
+ int nodenum;
+ int ret = 0;
+
+ *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
+
+ /* we only reach here if one of the two nodes in a
+ * migration died while the migration was in progress.
+ * at this point we need to requery the master. we
+ * know that the new_master got as far as creating
+ * an mle on at least one node, but we do not know
+ * if any nodes had actually cleared the mle and set
+ * the master to the new_master. the old master
+ * is supposed to set the owner to UNKNOWN in the
+ * event of a new_master death, so the only possible
+ * responses that we can get from nodes here are
+ * that the master is new_master, or that the master
+ * is UNKNOWN.
+ * if all nodes come back with UNKNOWN then we know
+ * the lock needs remastering here.
+ * if any node comes back with a valid master, check
+ * to see if that master is the one that we are
+ * recovering. if so, then the new_master died and
+ * we need to remaster this lock. if not, then the
+ * new_master survived and that node will respond to
+ * other nodes about the owner.
+ * if there is an owner, this node needs to dump this
+ * lockres and alert the sender that this lockres
+ * was rejected. */
+ spin_lock(&dlm->spinlock);
+ dlm_node_iter_init(dlm->domain_map, &iter);
+ spin_unlock(&dlm->spinlock);
+
+ while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
+ /* do not send to self */
+ if (nodenum == dlm->node_num)
+ continue;
+ ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
+ if (ret < 0) {
+ mlog_errno(ret);
+ BUG();
+ /* TODO: need to figure a way to restart this */
+ }
+ if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
+ mlog(0, "lock master is %u\n", *real_master);
+ break;
+ }
+ }
+ return ret;
+}
+
+
+static int dlm_do_master_requery(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ u8 nodenum, u8 *real_master)
+{
+ int ret = -EINVAL;
+ struct dlm_master_requery req;
+ int status = DLM_LOCK_RES_OWNER_UNKNOWN;
+
+ memset(&req, 0, sizeof(req));
+ req.node_idx = dlm->node_num;
+ req.namelen = res->lockname.len;
+ memcpy(req.name, res->lockname.name, res->lockname.len);
+
+ ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
+ &req, sizeof(req), nodenum, &status);
+ /* XXX: negative status not handled properly here. */
+ if (ret < 0)
+ mlog_errno(ret);
+ else {
+ BUG_ON(status < 0);
+ BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
+ *real_master = (u8) (status & 0xff);
+ mlog(0, "node %u responded to master requery with %u\n",
+ nodenum, *real_master);
+ ret = 0;
+ }
+ return ret;
+}
+
+
+/* this function cannot error, so unless the sending
+ * or receiving of the message failed, the owner can
+ * be trusted */
+int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ int master = DLM_LOCK_RES_OWNER_UNKNOWN;
+ u32 flags = DLM_ASSERT_MASTER_REQUERY;
+
+ if (!dlm_grab(dlm)) {
+ /* since the domain has gone away on this
+ * node, the proper response is UNKNOWN */
+ return master;
+ }
+
+ spin_lock(&dlm->spinlock);
+ res = __dlm_lookup_lockres(dlm, req->name, req->namelen);
+ if (res) {
+ spin_lock(&res->spinlock);
+ master = res->owner;
+ if (master == dlm->node_num) {
+ int ret = dlm_dispatch_assert_master(dlm, res,
+ 0, 0, flags);
+ if (ret < 0) {
+ mlog_errno(-ENOMEM);
+ /* retry!? */
+ BUG();
+ }
+ }
+ spin_unlock(&res->spinlock);
+ }
+ spin_unlock(&dlm->spinlock);
+
+ dlm_put(dlm);
+ return master;
+}
+
+static inline struct list_head *
+dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
+{
+ struct list_head *ret;
+ BUG_ON(list_num < 0);
+ BUG_ON(list_num > 2);
+ ret = &(res->granted);
+ ret += list_num;
+ return ret;
+}
+/* TODO: do ast flush business
+ * TODO: do MIGRATING and RECOVERING spinning
+ */
+
+/*
+* NOTE about in-flight requests during migration:
+*
+* Before attempting the migrate, the master has marked the lockres as
+* MIGRATING and then flushed all of its pending ASTS. So any in-flight
+* requests either got queued before the MIGRATING flag got set, in which
+* case the lock data will reflect the change and a return message is on
+* the way, or the request failed to get in before MIGRATING got set. In
+* this case, the caller will be told to spin and wait for the MIGRATING
+* flag to be dropped, then recheck the master.
+* This holds true for the convert, cancel and unlock cases, and since lvb
+* updates are tied to these same messages, it applies to lvb updates as
+* well. For the lock case, there is no way a lock can be on the master
+* queue and not be on the secondary queue since the lock is always added
+* locally first. This means that the new target node will never be sent
+* a lock that he doesn't already have on the list.
+* In total, this means that the local lock is correct and should not be
+* updated to match the one sent by the master. Any messages sent back
+* from the master before the MIGRATING flag will bring the lock properly
+* up-to-date, and the change will be ordered properly for the waiter.
+* We will *not* attempt to modify the lock underneath the waiter.
+*/
+
+static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_migratable_lockres *mres)
+{
+ struct dlm_migratable_lock *ml;
+ struct list_head *queue;
+ struct dlm_lock *newlock = NULL;
+ struct dlm_lockstatus *lksb = NULL;
+ int ret = 0;
+ int i;
+ struct list_head *iter;
+ struct dlm_lock *lock = NULL;
+
+ mlog(0, "running %d locks for this lockres\n", mres->num_locks);
+ for (i=0; i<mres->num_locks; i++) {
+ ml = &(mres->ml[i]);
+ BUG_ON(ml->highest_blocked != LKM_IVMODE);
+ newlock = NULL;
+ lksb = NULL;
+
+ queue = dlm_list_num_to_pointer(res, ml->list);
+
+ /* if the lock is for the local node it needs to
+ * be moved to the proper location within the queue.
+ * do not allocate a new lock structure. */
+ if (ml->node == dlm->node_num) {
+ /* MIGRATION ONLY! */
+ BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
+
+ spin_lock(&res->spinlock);
+ list_for_each(iter, queue) {
+ lock = list_entry (iter, struct dlm_lock, list);
+ if (lock->ml.cookie != ml->cookie)
+ lock = NULL;
+ else
+ break;
+ }
+
+ /* lock is always created locally first, and
+ * destroyed locally last. it must be on the list */
+ if (!lock) {
+ mlog(ML_ERROR, "could not find local lock "
+ "with cookie %"MLFu64"!\n",
+ ml->cookie);
+ BUG();
+ }
+ BUG_ON(lock->ml.node != ml->node);
+
+ /* see NOTE above about why we do not update
+ * to match the master here */
+
+ /* move the lock to its proper place */
+ /* do not alter lock refcount. switching lists. */
+ list_del_init(&lock->list);
+ list_add_tail(&lock->list, queue);
+ spin_unlock(&res->spinlock);
+
+ mlog(0, "just reordered a local lock!\n");
+ continue;
+ }
+
+ /* lock is for another node. */
+ newlock = dlm_new_lock(ml->type, ml->node,
+ be64_to_cpu(ml->cookie), NULL);
+ if (!newlock) {
+ ret = -ENOMEM;
+ goto leave;
+ }
+ lksb = newlock->lksb;
+ dlm_lock_attach_lockres(newlock, res);
+
+ if (ml->convert_type != LKM_IVMODE) {
+ BUG_ON(queue != &res->converting);
+ newlock->ml.convert_type = ml->convert_type;
+ }
+ lksb->flags |= (ml->flags &
+ (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
+
+ if (mres->lvb[0]) {
+ if (lksb->flags & DLM_LKSB_PUT_LVB) {
+ /* other node was trying to update
+ * lvb when node died. recreate the
+ * lksb with the updated lvb. */
+ memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
+ } else {
+ /* otherwise, the node is sending its
+ * most recent valid lvb info */
+ BUG_ON(ml->type != LKM_EXMODE &&
+ ml->type != LKM_PRMODE);
+ if (res->lvb[0] && (ml->type == LKM_EXMODE ||
+ memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
+ mlog(ML_ERROR, "received bad lvb!\n");
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
+ memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
+ }
+ }
+
+
+ /* NOTE:
+ * wrt lock queue ordering and recovery:
+ * 1. order of locks on granted queue is
+ * meaningless.
+ * 2. order of locks on converting queue is
+ * LOST with the node death. sorry charlie.
+ * 3. order of locks on the blocked queue is
+ * also LOST.
+ * order of locks does not affect integrity, it
+ * just means that a lock request may get pushed
+ * back in line as a result of the node death.
+ * also note that for a given node the lock order
+ * for its secondary queue locks is preserved
+ * relative to each other, but clearly *not*
+ * preserved relative to locks from other nodes.
+ */
+ spin_lock(&res->spinlock);
+ dlm_lock_get(newlock);
+ list_add_tail(&newlock->list, queue);
+ spin_unlock(&res->spinlock);
+ }
+ mlog(0, "done running all the locks\n");
+
+leave:
+ if (ret < 0) {
+ mlog_errno(ret);
+ if (newlock)
+ dlm_lock_put(newlock);
+ }
+
+ mlog_exit(ret);
+ return ret;
+}
+
+void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ int i;
+ struct list_head *queue, *iter, *iter2;
+ struct dlm_lock *lock;
+
+ res->state |= DLM_LOCK_RES_RECOVERING;
+ if (!list_empty(&res->recovering))
+ list_del_init(&res->recovering);
+ list_add_tail(&res->recovering, &dlm->reco.resources);
+
+ /* find any pending locks and put them back on proper list */
+ for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
+ queue = dlm_list_idx_to_ptr(res, i);
+ list_for_each_safe(iter, iter2, queue) {
+ lock = list_entry (iter, struct dlm_lock, list);
+ dlm_lock_get(lock);
+ if (lock->convert_pending) {
+ /* move converting lock back to granted */
+ BUG_ON(i != DLM_CONVERTING_LIST);
+ mlog(0, "node died with convert pending "
+ "on %.*s. move back to granted list.\n",
+ res->lockname.len, res->lockname.name);
+ dlm_revert_pending_convert(res, lock);
+ lock->convert_pending = 0;
+ } else if (lock->lock_pending) {
+ /* remove pending lock requests completely */
+ BUG_ON(i != DLM_BLOCKED_LIST);
+ mlog(0, "node died with lock pending "
+ "on %.*s. remove from blocked list and skip.\n",
+ res->lockname.len, res->lockname.name);
+ /* lock will be floating until ref in
+ * dlmlock_remote is freed after the network
+ * call returns. ok for it to not be on any
+ * list since no ast can be called
+ * (the master is dead). */
+ dlm_revert_pending_lock(res, lock);
+ lock->lock_pending = 0;
+ } else if (lock->unlock_pending) {
+ /* if an unlock was in progress, treat as
+ * if this had completed successfully
+ * before sending this lock state to the
+ * new master. note that the dlm_unlock
+ * call is still responsible for calling
+ * the unlockast. that will happen after
+ * the network call times out. for now,
+ * just move lists to prepare the new
+ * recovery master. */
+ BUG_ON(i != DLM_GRANTED_LIST);
+ mlog(0, "node died with unlock pending "
+ "on %.*s. remove from blocked list and skip.\n",
+ res->lockname.len, res->lockname.name);
+ dlm_commit_pending_unlock(res, lock);
+ lock->unlock_pending = 0;
+ } else if (lock->cancel_pending) {
+ /* if a cancel was in progress, treat as
+ * if this had completed successfully
+ * before sending this lock state to the
+ * new master */
+ BUG_ON(i != DLM_CONVERTING_LIST);
+ mlog(0, "node died with cancel pending "
+ "on %.*s. move back to granted list.\n",
+ res->lockname.len, res->lockname.name);
+ dlm_commit_pending_cancel(res, lock);
+ lock->cancel_pending = 0;
+ }
+ dlm_lock_put(lock);
+ }
+ }
+}
+
+
+
+/* removes all recovered locks from the recovery list.
+ * sets the res->owner to the new master.
+ * unsets the RECOVERY flag and wakes waiters. */
+static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
+ u8 dead_node, u8 new_master)
+{
+ int i;
+ struct list_head *iter, *iter2, *bucket;
+ struct dlm_lock_resource *res;
+
+ mlog_entry_void();
+
+ assert_spin_locked(&dlm->spinlock);
+
+ list_for_each_safe(iter, iter2, &dlm->reco.resources) {
+ res = list_entry (iter, struct dlm_lock_resource, recovering);
+ if (res->owner == dead_node) {
+ list_del_init(&res->recovering);
+ spin_lock(&res->spinlock);
+ dlm_change_lockres_owner(dlm, res, new_master);
+ res->state &= ~DLM_LOCK_RES_RECOVERING;
+ __dlm_dirty_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ }
+ }
+
+ /* this will become unnecessary eventually, but
+ * for now we need to run the whole hash, clear
+ * the RECOVERING state and set the owner
+ * if necessary */
+ for (i=0; i<DLM_HASH_SIZE; i++) {
+ bucket = &(dlm->resources[i]);
+ list_for_each(iter, bucket) {
+ res = list_entry (iter, struct dlm_lock_resource, list);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ if (res->owner == dead_node) {
+ mlog(0, "(this=%u) res %.*s owner=%u "
+ "was not on recovering list, but "
+ "clearing state anyway\n",
+ dlm->node_num, res->lockname.len,
+ res->lockname.name, new_master);
+ } else if (res->owner == dlm->node_num) {
+ mlog(0, "(this=%u) res %.*s owner=%u "
+ "was not on recovering list, "
+ "owner is THIS node, clearing\n",
+ dlm->node_num, res->lockname.len,
+ res->lockname.name, new_master);
+ } else
+ continue;
+
+ spin_lock(&res->spinlock);
+ dlm_change_lockres_owner(dlm, res, new_master);
+ res->state &= ~DLM_LOCK_RES_RECOVERING;
+ __dlm_dirty_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ }
+ }
+ }
+}
+
+static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
+{
+ if (local) {
+ if (lock->ml.type != LKM_EXMODE &&
+ lock->ml.type != LKM_PRMODE)
+ return 1;
+ } else if (lock->ml.type == LKM_EXMODE)
+ return 1;
+ return 0;
+}
+
+static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, u8 dead_node)
+{
+ struct list_head *iter, *queue;
+ struct dlm_lock *lock;
+ int blank_lvb = 0, local = 0;
+ int i;
+ u8 search_node;
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ if (res->owner == dlm->node_num)
+ /* if this node owned the lockres, and if the dead node
+ * had an EX when he died, blank out the lvb */
+ search_node = dead_node;
+ else {
+ /* if this is a secondary lockres, and we had no EX or PR
+ * locks granted, we can no longer trust the lvb */
+ search_node = dlm->node_num;
+ local = 1; /* check local state for valid lvb */
+ }
+
+ for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
+ queue = dlm_list_idx_to_ptr(res, i);
+ list_for_each(iter, queue) {
+ lock = list_entry (iter, struct dlm_lock, list);
+ if (lock->ml.node == search_node) {
+ if (dlm_lvb_needs_invalidation(lock, local)) {
+ /* zero the lksb lvb and lockres lvb */
+ blank_lvb = 1;
+ memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
+ }
+ }
+ }
+ }
+
+ if (blank_lvb) {
+ mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
+ res->lockname.len, res->lockname.name, dead_node);
+ memset(res->lvb, 0, DLM_LVB_LEN);
+ }
+}
+
+static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res, u8 dead_node)
+{
+ struct list_head *iter, *tmpiter;
+ struct dlm_lock *lock;
+
+ /* this node is the lockres master:
+ * 1) remove any stale locks for the dead node
+ * 2) if the dead node had an EX when he died, blank out the lvb
+ */
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ /* TODO: check pending_asts, pending_basts here */
+ list_for_each_safe(iter, tmpiter, &res->granted) {
+ lock = list_entry (iter, struct dlm_lock, list);
+ if (lock->ml.node == dead_node) {
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ }
+ }
+ list_for_each_safe(iter, tmpiter, &res->converting) {
+ lock = list_entry (iter, struct dlm_lock, list);
+ if (lock->ml.node == dead_node) {
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ }
+ }
+ list_for_each_safe(iter, tmpiter, &res->blocked) {
+ lock = list_entry (iter, struct dlm_lock, list);
+ if (lock->ml.node == dead_node) {
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ }
+ }
+
+ /* do not kick thread yet */
+ __dlm_dirty_lockres(dlm, res);
+}
+
+/* if this node is the recovery master, and there are no
+ * locks for a given lockres owned by this node that are in
+ * either PR or EX mode, zero out the lvb before requesting.
+ *
+ */
+
+
+static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
+{
+ struct list_head *iter;
+ struct dlm_lock_resource *res;
+ int i;
+ struct list_head *bucket;
+
+
+ /* purge any stale mles */
+ dlm_clean_master_list(dlm, dead_node);
+
+ /*
+ * now clean up all lock resources. there are two rules:
+ *
+ * 1) if the dead node was the master, move the lockres
+ * to the recovering list. set the RECOVERING flag.
+ * this lockres needs to be cleaned up before it can
+ * be used further.
+ *
+ * 2) if this node was the master, remove all locks from
+ * each of the lockres queues that were owned by the
+ * dead node. once recovery finishes, the dlm thread
+ * can be kicked again to see if any ASTs or BASTs
+ * need to be fired as a result.
+ */
+ for (i=0; i<DLM_HASH_SIZE; i++) {
+ bucket = &(dlm->resources[i]);
+ list_for_each(iter, bucket) {
+ res = list_entry (iter, struct dlm_lock_resource, list);
+ if (dlm_is_recovery_lock(res->lockname.name,
+ res->lockname.len))
+ continue;
+
+ spin_lock(&res->spinlock);
+ /* zero the lvb if necessary */
+ dlm_revalidate_lvb(dlm, res, dead_node);
+ if (res->owner == dead_node)
+ dlm_move_lockres_to_recovery_list(dlm, res);
+ else if (res->owner == dlm->node_num) {
+ dlm_free_dead_locks(dlm, res, dead_node);
+ __dlm_lockres_calc_usage(dlm, res);
+ }
+ spin_unlock(&res->spinlock);
+ }
+ }
+
+}
+
+static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
+{
+ assert_spin_locked(&dlm->spinlock);
+
+ /* check to see if the node is already considered dead */
+ if (!test_bit(idx, dlm->live_nodes_map)) {
+ mlog(0, "for domain %s, node %d is already dead. "
+ "another node likely did recovery already.\n",
+ dlm->name, idx);
+ return;
+ }
+
+ /* check to see if we do not care about this node */
+ if (!test_bit(idx, dlm->domain_map)) {
+ /* This also catches the case that we get a node down
+ * but haven't joined the domain yet. */
+ mlog(0, "node %u already removed from domain!\n", idx);
+ return;
+ }
+
+ clear_bit(idx, dlm->live_nodes_map);
+
+ /* Clean up join state on node death. */
+ if (dlm->joining_node == idx) {
+ mlog(0, "Clearing join state for node %u\n", idx);
+ __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
+ }
+
+ /* make sure local cleanup occurs before the heartbeat events */
+ if (!test_bit(idx, dlm->recovery_map))
+ dlm_do_local_recovery_cleanup(dlm, idx);
+
+ /* notify anything attached to the heartbeat events */
+ dlm_hb_event_notify_attached(dlm, idx, 0);
+
+ mlog(0, "node %u being removed from domain map!\n", idx);
+ clear_bit(idx, dlm->domain_map);
+ /* wake up migration waiters if a node goes down.
+ * perhaps later we can genericize this for other waiters. */
+ wake_up(&dlm->migration_wq);
+
+ if (test_bit(idx, dlm->recovery_map))
+ mlog(0, "domain %s, node %u already added "
+ "to recovery map!\n", dlm->name, idx);
+ else
+ set_bit(idx, dlm->recovery_map);
+}
+
+void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+
+ if (!dlm_grab(dlm))
+ return;
+
+ spin_lock(&dlm->spinlock);
+ __dlm_hb_node_down(dlm, idx);
+ spin_unlock(&dlm->spinlock);
+
+ dlm_put(dlm);
+}
+
+void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+
+ if (!dlm_grab(dlm))
+ return;
+
+ spin_lock(&dlm->spinlock);
+
+ set_bit(idx, dlm->live_nodes_map);
+
+ /* notify any mles attached to the heartbeat events */
+ dlm_hb_event_notify_attached(dlm, idx, 1);
+
+ spin_unlock(&dlm->spinlock);
+
+ dlm_put(dlm);
+}
+
+static void dlm_reco_ast(void *astdata)
+{
+ struct dlm_ctxt *dlm = astdata;
+ mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
+ dlm->node_num, dlm->name);
+}
+static void dlm_reco_bast(void *astdata, int blocked_type)
+{
+ struct dlm_ctxt *dlm = astdata;
+ mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
+ dlm->node_num, dlm->name);
+}
+static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
+{
+ mlog(0, "unlockast for recovery lock fired!\n");
+}
+
+
+static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
+{
+ enum dlm_status ret;
+ struct dlm_lockstatus lksb;
+ int status = -EINVAL;
+
+ mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
+ dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
+retry:
+ memset(&lksb, 0, sizeof(lksb));
+
+ ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
+ DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
+
+ if (ret == DLM_NORMAL) {
+ mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
+ dlm->name, dlm->node_num);
+ /* I am master, send message to all nodes saying
+ * that I am beginning a recovery session */
+ status = dlm_send_begin_reco_message(dlm,
+ dlm->reco.dead_node);
+
+ /* recovery lock is a special case. ast will not get fired,
+ * so just go ahead and unlock it. */
+ ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
+ if (ret != DLM_NORMAL) {
+ /* this would really suck. this could only happen
+ * if there was a network error during the unlock
+ * because of node death. this means the unlock
+ * is actually "done" and the lock structure is
+ * even freed. we can continue, but only
+ * because this specific lock name is special. */
+ mlog(0, "dlmunlock returned %d\n", ret);
+ }
+
+ if (status < 0) {
+ mlog(0, "failed to send recovery message. "
+ "must retry with new node map.\n");
+ goto retry;
+ }
+ } else if (ret == DLM_NOTQUEUED) {
+ mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
+ dlm->name, dlm->node_num);
+ /* another node is master. wait on
+ * reco.new_master != O2NM_INVALID_NODE_NUM */
+ status = -EEXIST;
+ }
+
+ return status;
+}
+
+static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
+{
+ struct dlm_begin_reco br;
+ int ret = 0;
+ struct dlm_node_iter iter;
+ int nodenum;
+ int status;
+
+ mlog_entry("%u\n", dead_node);
+
+ mlog(0, "dead node is %u\n", dead_node);
+
+ spin_lock(&dlm->spinlock);
+ dlm_node_iter_init(dlm->domain_map, &iter);
+ spin_unlock(&dlm->spinlock);
+
+ clear_bit(dead_node, iter.node_map);
+
+ memset(&br, 0, sizeof(br));
+ br.node_idx = dlm->node_num;
+ br.dead_node = dead_node;
+
+ while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
+ ret = 0;
+ if (nodenum == dead_node) {
+ mlog(0, "not sending begin reco to dead node "
+ "%u\n", dead_node);
+ continue;
+ }
+ if (nodenum == dlm->node_num) {
+ mlog(0, "not sending begin reco to self\n");
+ continue;
+ }
+
+ ret = -EINVAL;
+ mlog(0, "attempting to send begin reco msg to %d\n",
+ nodenum);
+ ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
+ &br, sizeof(br), nodenum, &status);
+ /* negative status is handled ok by caller here */
+ if (ret >= 0)
+ ret = status;
+ if (ret < 0) {
+ struct dlm_lock_resource *res;
+ mlog_errno(ret);
+ mlog(ML_ERROR, "begin reco of dlm %s to node %u "
+ " returned %d\n", dlm->name, nodenum, ret);
+ res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
+ DLM_RECOVERY_LOCK_NAME_LEN);
+ if (res) {
+ dlm_print_one_lock_resource(res);
+ dlm_lockres_put(res);
+ } else {
+ mlog(ML_ERROR, "recovery lock not found\n");
+ }
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
+
+ /* ok to return 0, domain has gone away */
+ if (!dlm_grab(dlm))
+ return 0;
+
+ mlog(0, "node %u wants to recover node %u\n",
+ br->node_idx, br->dead_node);
+
+ dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
+
+ spin_lock(&dlm->spinlock);
+ if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
+ mlog(0, "new_master already set to %u!\n",
+ dlm->reco.new_master);
+ }
+ if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
+ mlog(0, "dead_node already set to %u!\n",
+ dlm->reco.dead_node);
+ }
+ dlm->reco.new_master = br->node_idx;
+ dlm->reco.dead_node = br->dead_node;
+ if (!test_bit(br->dead_node, dlm->recovery_map)) {
+ mlog(ML_ERROR, "recovery master %u sees %u as dead, but this "
+ "node has not yet. marking %u as dead\n",
+ br->node_idx, br->dead_node, br->dead_node);
+ __dlm_hb_node_down(dlm, br->dead_node);
+ }
+ spin_unlock(&dlm->spinlock);
+
+ dlm_kick_recovery_thread(dlm);
+ dlm_put(dlm);
+ return 0;
+}
+
+static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
+{
+ int ret = 0;
+ struct dlm_finalize_reco fr;
+ struct dlm_node_iter iter;
+ int nodenum;
+ int status;
+
+ mlog(0, "finishing recovery for node %s:%u\n",
+ dlm->name, dlm->reco.dead_node);
+
+ spin_lock(&dlm->spinlock);
+ dlm_node_iter_init(dlm->domain_map, &iter);
+ spin_unlock(&dlm->spinlock);
+
+ memset(&fr, 0, sizeof(fr));
+ fr.node_idx = dlm->node_num;
+ fr.dead_node = dlm->reco.dead_node;
+
+ while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
+ if (nodenum == dlm->node_num)
+ continue;
+ ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
+ &fr, sizeof(fr), nodenum, &status);
+ if (ret >= 0) {
+ ret = status;
+ if (dlm_is_host_down(ret)) {
+ /* this has no effect on this recovery
+ * session, so set the status to zero to
+ * finish out the last recovery */
+ mlog(ML_ERROR, "node %u went down after this "
+ "node finished recovery.\n", nodenum);
+ ret = 0;
+ }
+ }
+ if (ret < 0) {
+ mlog_errno(ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
+
+ /* ok to return 0, domain has gone away */
+ if (!dlm_grab(dlm))
+ return 0;
+
+ mlog(0, "node %u finalizing recovery of node %u\n",
+ fr->node_idx, fr->dead_node);
+
+ spin_lock(&dlm->spinlock);
+
+ if (dlm->reco.new_master != fr->node_idx) {
+ mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
+ "%u is supposed to be the new master, dead=%u\n",
+ fr->node_idx, dlm->reco.new_master, fr->dead_node);
+ BUG();
+ }
+ if (dlm->reco.dead_node != fr->dead_node) {
+ mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
+ "node %u, but node %u is supposed to be dead\n",
+ fr->node_idx, fr->dead_node, dlm->reco.dead_node);
+ BUG();
+ }
+
+ dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
+
+ spin_unlock(&dlm->spinlock);
+
+ dlm_reset_recovery(dlm);
+
+ dlm_kick_recovery_thread(dlm);
+ dlm_put(dlm);
+ return 0;
+}
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
new file mode 100644
index 00000000000..5be9d14f12c
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -0,0 +1,692 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmthread.c
+ *
+ * standalone DLM module
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/timer.h>
+#include <linux/kthread.h>
+
+
+#include "cluster/heartbeat.h"
+#include "cluster/nodemanager.h"
+#include "cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+#include "dlmdomain.h"
+
+#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
+#include "cluster/masklog.h"
+
+static int dlm_thread(void *data);
+
+static void dlm_flush_asts(struct dlm_ctxt *dlm);
+
+#define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
+
+/* will exit holding res->spinlock, but may drop in function */
+/* waits until flags are cleared on res->state */
+void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ assert_spin_locked(&res->spinlock);
+
+ add_wait_queue(&res->wq, &wait);
+repeat:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (res->state & flags) {
+ spin_unlock(&res->spinlock);
+ schedule();
+ spin_lock(&res->spinlock);
+ goto repeat;
+ }
+ remove_wait_queue(&res->wq, &wait);
+ current->state = TASK_RUNNING;
+}
+
+
+static int __dlm_lockres_unused(struct dlm_lock_resource *res)
+{
+ if (list_empty(&res->granted) &&
+ list_empty(&res->converting) &&
+ list_empty(&res->blocked) &&
+ list_empty(&res->dirty))
+ return 1;
+ return 0;
+}
+
+
+/* Call whenever you may have added or deleted something from one of
+ * the lockres queue's. This will figure out whether it belongs on the
+ * unused list or not and does the appropriate thing. */
+void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ if (__dlm_lockres_unused(res)){
+ if (list_empty(&res->purge)) {
+ mlog(0, "putting lockres %.*s from purge list\n",
+ res->lockname.len, res->lockname.name);
+
+ res->last_used = jiffies;
+ list_add_tail(&res->purge, &dlm->purge_list);
+ dlm->purge_count++;
+ }
+ } else if (!list_empty(&res->purge)) {
+ mlog(0, "removing lockres %.*s from purge list\n",
+ res->lockname.len, res->lockname.name);
+
+ list_del_init(&res->purge);
+ dlm->purge_count--;
+ }
+}
+
+void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
+ spin_lock(&dlm->spinlock);
+ spin_lock(&res->spinlock);
+
+ __dlm_lockres_calc_usage(dlm, res);
+
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+}
+
+/* TODO: Eventual API: Called with the dlm spinlock held, may drop it
+ * to do migration, but will re-acquire before exit. */
+void dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *lockres)
+{
+ int master;
+ int ret;
+
+ spin_lock(&lockres->spinlock);
+ master = lockres->owner == dlm->node_num;
+ spin_unlock(&lockres->spinlock);
+
+ mlog(0, "purging lockres %.*s, master = %d\n", lockres->lockname.len,
+ lockres->lockname.name, master);
+
+ /* Non master is the easy case -- no migration required, just
+ * quit. */
+ if (!master)
+ goto finish;
+
+ /* Wheee! Migrate lockres here! */
+ spin_unlock(&dlm->spinlock);
+again:
+
+ ret = dlm_migrate_lockres(dlm, lockres, O2NM_MAX_NODES);
+ if (ret == -ENOTEMPTY) {
+ mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
+ lockres->lockname.len, lockres->lockname.name);
+
+ BUG();
+ } else if (ret < 0) {
+ mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n",
+ lockres->lockname.len, lockres->lockname.name);
+ goto again;
+ }
+
+ spin_lock(&dlm->spinlock);
+
+finish:
+ if (!list_empty(&lockres->purge)) {
+ list_del_init(&lockres->purge);
+ dlm->purge_count--;
+ }
+ __dlm_unhash_lockres(lockres);
+}
+
+static void dlm_run_purge_list(struct dlm_ctxt *dlm,
+ int purge_now)
+{
+ unsigned int run_max, unused;
+ unsigned long purge_jiffies;
+ struct dlm_lock_resource *lockres;
+
+ spin_lock(&dlm->spinlock);
+ run_max = dlm->purge_count;
+
+ while(run_max && !list_empty(&dlm->purge_list)) {
+ run_max--;
+
+ lockres = list_entry(dlm->purge_list.next,
+ struct dlm_lock_resource, purge);
+
+ /* Status of the lockres *might* change so double
+ * check. If the lockres is unused, holding the dlm
+ * spinlock will prevent people from getting and more
+ * refs on it -- there's no need to keep the lockres
+ * spinlock. */
+ spin_lock(&lockres->spinlock);
+ unused = __dlm_lockres_unused(lockres);
+ spin_unlock(&lockres->spinlock);
+
+ if (!unused)
+ continue;
+
+ purge_jiffies = lockres->last_used +
+ msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
+
+ /* Make sure that we want to be processing this guy at
+ * this time. */
+ if (!purge_now && time_after(purge_jiffies, jiffies)) {
+ /* Since resources are added to the purge list
+ * in tail order, we can stop at the first
+ * unpurgable resource -- anyone added after
+ * him will have a greater last_used value */
+ break;
+ }
+
+ list_del_init(&lockres->purge);
+ dlm->purge_count--;
+
+ /* This may drop and reacquire the dlm spinlock if it
+ * has to do migration. */
+ mlog(0, "calling dlm_purge_lockres!\n");
+ dlm_purge_lockres(dlm, lockres);
+ mlog(0, "DONE calling dlm_purge_lockres!\n");
+
+ /* Avoid adding any scheduling latencies */
+ cond_resched_lock(&dlm->spinlock);
+ }
+
+ spin_unlock(&dlm->spinlock);
+}
+
+static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ struct dlm_lock *lock, *target;
+ struct list_head *iter;
+ struct list_head *head;
+ int can_grant = 1;
+
+ //mlog(0, "res->lockname.len=%d\n", res->lockname.len);
+ //mlog(0, "res->lockname.name=%p\n", res->lockname.name);
+ //mlog(0, "shuffle res %.*s\n", res->lockname.len,
+ // res->lockname.name);
+
+ /* because this function is called with the lockres
+ * spinlock, and because we know that it is not migrating/
+ * recovering/in-progress, it is fine to reserve asts and
+ * basts right before queueing them all throughout */
+ assert_spin_locked(&res->spinlock);
+ BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
+ DLM_LOCK_RES_RECOVERING|
+ DLM_LOCK_RES_IN_PROGRESS)));
+
+converting:
+ if (list_empty(&res->converting))
+ goto blocked;
+ mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
+ res->lockname.name);
+
+ target = list_entry(res->converting.next, struct dlm_lock, list);
+ if (target->ml.convert_type == LKM_IVMODE) {
+ mlog(ML_ERROR, "%.*s: converting a lock with no "
+ "convert_type!\n", res->lockname.len, res->lockname.name);
+ BUG();
+ }
+ head = &res->granted;
+ list_for_each(iter, head) {
+ lock = list_entry(iter, struct dlm_lock, list);
+ if (lock==target)
+ continue;
+ if (!dlm_lock_compatible(lock->ml.type,
+ target->ml.convert_type)) {
+ can_grant = 0;
+ /* queue the BAST if not already */
+ if (lock->ml.highest_blocked == LKM_IVMODE) {
+ __dlm_lockres_reserve_ast(res);
+ dlm_queue_bast(dlm, lock);
+ }
+ /* update the highest_blocked if needed */
+ if (lock->ml.highest_blocked < target->ml.convert_type)
+ lock->ml.highest_blocked =
+ target->ml.convert_type;
+ }
+ }
+ head = &res->converting;
+ list_for_each(iter, head) {
+ lock = list_entry(iter, struct dlm_lock, list);
+ if (lock==target)
+ continue;
+ if (!dlm_lock_compatible(lock->ml.type,
+ target->ml.convert_type)) {
+ can_grant = 0;
+ if (lock->ml.highest_blocked == LKM_IVMODE) {
+ __dlm_lockres_reserve_ast(res);
+ dlm_queue_bast(dlm, lock);
+ }
+ if (lock->ml.highest_blocked < target->ml.convert_type)
+ lock->ml.highest_blocked =
+ target->ml.convert_type;
+ }
+ }
+
+ /* we can convert the lock */
+ if (can_grant) {
+ spin_lock(&target->spinlock);
+ BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
+
+ mlog(0, "calling ast for converting lock: %.*s, have: %d, "
+ "granting: %d, node: %u\n", res->lockname.len,
+ res->lockname.name, target->ml.type,
+ target->ml.convert_type, target->ml.node);
+
+ target->ml.type = target->ml.convert_type;
+ target->ml.convert_type = LKM_IVMODE;
+ list_del_init(&target->list);
+ list_add_tail(&target->list, &res->granted);
+
+ BUG_ON(!target->lksb);
+ target->lksb->status = DLM_NORMAL;
+
+ spin_unlock(&target->spinlock);
+
+ __dlm_lockres_reserve_ast(res);
+ dlm_queue_ast(dlm, target);
+ /* go back and check for more */
+ goto converting;
+ }
+
+blocked:
+ if (list_empty(&res->blocked))
+ goto leave;
+ target = list_entry(res->blocked.next, struct dlm_lock, list);
+
+ head = &res->granted;
+ list_for_each(iter, head) {
+ lock = list_entry(iter, struct dlm_lock, list);
+ if (lock==target)
+ continue;
+ if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
+ can_grant = 0;
+ if (lock->ml.highest_blocked == LKM_IVMODE) {
+ __dlm_lockres_reserve_ast(res);
+ dlm_queue_bast(dlm, lock);
+ }
+ if (lock->ml.highest_blocked < target->ml.type)
+ lock->ml.highest_blocked = target->ml.type;
+ }
+ }
+
+ head = &res->converting;
+ list_for_each(iter, head) {
+ lock = list_entry(iter, struct dlm_lock, list);
+ if (lock==target)
+ continue;
+ if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
+ can_grant = 0;
+ if (lock->ml.highest_blocked == LKM_IVMODE) {
+ __dlm_lockres_reserve_ast(res);
+ dlm_queue_bast(dlm, lock);
+ }
+ if (lock->ml.highest_blocked < target->ml.type)
+ lock->ml.highest_blocked = target->ml.type;
+ }
+ }
+
+ /* we can grant the blocked lock (only
+ * possible if converting list empty) */
+ if (can_grant) {
+ spin_lock(&target->spinlock);
+ BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
+
+ mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
+ "node: %u\n", res->lockname.len, res->lockname.name,
+ target->ml.type, target->ml.node);
+
+ // target->ml.type is already correct
+ list_del_init(&target->list);
+ list_add_tail(&target->list, &res->granted);
+
+ BUG_ON(!target->lksb);
+ target->lksb->status = DLM_NORMAL;
+
+ spin_unlock(&target->spinlock);
+
+ __dlm_lockres_reserve_ast(res);
+ dlm_queue_ast(dlm, target);
+ /* go back and check for more */
+ goto converting;
+ }
+
+leave:
+ return;
+}
+
+/* must have NO locks when calling this with res !=NULL * */
+void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
+{
+ mlog_entry("dlm=%p, res=%p\n", dlm, res);
+ if (res) {
+ spin_lock(&dlm->spinlock);
+ spin_lock(&res->spinlock);
+ __dlm_dirty_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ }
+ wake_up(&dlm->dlm_thread_wq);
+}
+
+void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
+{
+ mlog_entry("dlm=%p, res=%p\n", dlm, res);
+
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ /* don't shuffle secondary queues */
+ if ((res->owner == dlm->node_num) &&
+ !(res->state & DLM_LOCK_RES_DIRTY)) {
+ list_add_tail(&res->dirty, &dlm->dirty_list);
+ res->state |= DLM_LOCK_RES_DIRTY;
+ }
+}
+
+
+/* Launch the NM thread for the mounted volume */
+int dlm_launch_thread(struct dlm_ctxt *dlm)
+{
+ mlog(0, "starting dlm thread...\n");
+
+ dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
+ if (IS_ERR(dlm->dlm_thread_task)) {
+ mlog_errno(PTR_ERR(dlm->dlm_thread_task));
+ dlm->dlm_thread_task = NULL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void dlm_complete_thread(struct dlm_ctxt *dlm)
+{
+ if (dlm->dlm_thread_task) {
+ mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
+ kthread_stop(dlm->dlm_thread_task);
+ dlm->dlm_thread_task = NULL;
+ }
+}
+
+static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
+{
+ int empty;
+
+ spin_lock(&dlm->spinlock);
+ empty = list_empty(&dlm->dirty_list);
+ spin_unlock(&dlm->spinlock);
+
+ return empty;
+}
+
+static void dlm_flush_asts(struct dlm_ctxt *dlm)
+{
+ int ret;
+ struct dlm_lock *lock;
+ struct dlm_lock_resource *res;
+ u8 hi;
+
+ spin_lock(&dlm->ast_lock);
+ while (!list_empty(&dlm->pending_asts)) {
+ lock = list_entry(dlm->pending_asts.next,
+ struct dlm_lock, ast_list);
+ /* get an extra ref on lock */
+ dlm_lock_get(lock);
+ res = lock->lockres;
+ mlog(0, "delivering an ast for this lockres\n");
+
+ BUG_ON(!lock->ast_pending);
+
+ /* remove from list (including ref) */
+ list_del_init(&lock->ast_list);
+ dlm_lock_put(lock);
+ spin_unlock(&dlm->ast_lock);
+
+ if (lock->ml.node != dlm->node_num) {
+ ret = dlm_do_remote_ast(dlm, res, lock);
+ if (ret < 0)
+ mlog_errno(ret);
+ } else
+ dlm_do_local_ast(dlm, res, lock);
+
+ spin_lock(&dlm->ast_lock);
+
+ /* possible that another ast was queued while
+ * we were delivering the last one */
+ if (!list_empty(&lock->ast_list)) {
+ mlog(0, "aha another ast got queued while "
+ "we were finishing the last one. will "
+ "keep the ast_pending flag set.\n");
+ } else
+ lock->ast_pending = 0;
+
+ /* drop the extra ref.
+ * this may drop it completely. */
+ dlm_lock_put(lock);
+ dlm_lockres_release_ast(dlm, res);
+ }
+
+ while (!list_empty(&dlm->pending_basts)) {
+ lock = list_entry(dlm->pending_basts.next,
+ struct dlm_lock, bast_list);
+ /* get an extra ref on lock */
+ dlm_lock_get(lock);
+ res = lock->lockres;
+
+ BUG_ON(!lock->bast_pending);
+
+ /* get the highest blocked lock, and reset */
+ spin_lock(&lock->spinlock);
+ BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
+ hi = lock->ml.highest_blocked;
+ lock->ml.highest_blocked = LKM_IVMODE;
+ spin_unlock(&lock->spinlock);
+
+ /* remove from list (including ref) */
+ list_del_init(&lock->bast_list);
+ dlm_lock_put(lock);
+ spin_unlock(&dlm->ast_lock);
+
+ mlog(0, "delivering a bast for this lockres "
+ "(blocked = %d\n", hi);
+
+ if (lock->ml.node != dlm->node_num) {
+ ret = dlm_send_proxy_bast(dlm, res, lock, hi);
+ if (ret < 0)
+ mlog_errno(ret);
+ } else
+ dlm_do_local_bast(dlm, res, lock, hi);
+
+ spin_lock(&dlm->ast_lock);
+
+ /* possible that another bast was queued while
+ * we were delivering the last one */
+ if (!list_empty(&lock->bast_list)) {
+ mlog(0, "aha another bast got queued while "
+ "we were finishing the last one. will "
+ "keep the bast_pending flag set.\n");
+ } else
+ lock->bast_pending = 0;
+
+ /* drop the extra ref.
+ * this may drop it completely. */
+ dlm_lock_put(lock);
+ dlm_lockres_release_ast(dlm, res);
+ }
+ wake_up(&dlm->ast_wq);
+ spin_unlock(&dlm->ast_lock);
+}
+
+
+#define DLM_THREAD_TIMEOUT_MS (4 * 1000)
+#define DLM_THREAD_MAX_DIRTY 100
+#define DLM_THREAD_MAX_ASTS 10
+
+static int dlm_thread(void *data)
+{
+ struct dlm_lock_resource *res;
+ struct dlm_ctxt *dlm = data;
+ unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
+
+ mlog(0, "dlm thread running for %s...\n", dlm->name);
+
+ while (!kthread_should_stop()) {
+ int n = DLM_THREAD_MAX_DIRTY;
+
+ /* dlm_shutting_down is very point-in-time, but that
+ * doesn't matter as we'll just loop back around if we
+ * get false on the leading edge of a state
+ * transition. */
+ dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
+
+ /* We really don't want to hold dlm->spinlock while
+ * calling dlm_shuffle_lists on each lockres that
+ * needs to have its queues adjusted and AST/BASTs
+ * run. So let's pull each entry off the dirty_list
+ * and drop dlm->spinlock ASAP. Once off the list,
+ * res->spinlock needs to be taken again to protect
+ * the queues while calling dlm_shuffle_lists. */
+ spin_lock(&dlm->spinlock);
+ while (!list_empty(&dlm->dirty_list)) {
+ int delay = 0;
+ res = list_entry(dlm->dirty_list.next,
+ struct dlm_lock_resource, dirty);
+
+ /* peel a lockres off, remove it from the list,
+ * unset the dirty flag and drop the dlm lock */
+ BUG_ON(!res);
+ dlm_lockres_get(res);
+
+ spin_lock(&res->spinlock);
+ res->state &= ~DLM_LOCK_RES_DIRTY;
+ list_del_init(&res->dirty);
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+
+ /* lockres can be re-dirtied/re-added to the
+ * dirty_list in this gap, but that is ok */
+
+ spin_lock(&res->spinlock);
+ if (res->owner != dlm->node_num) {
+ __dlm_print_one_lock_resource(res);
+ mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n",
+ res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no",
+ res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no",
+ res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no",
+ res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
+ }
+ BUG_ON(res->owner != dlm->node_num);
+
+ /* it is now ok to move lockreses in these states
+ * to the dirty list, assuming that they will only be
+ * dirty for a short while. */
+ if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
+ DLM_LOCK_RES_MIGRATING |
+ DLM_LOCK_RES_RECOVERING)) {
+ /* move it to the tail and keep going */
+ spin_unlock(&res->spinlock);
+ mlog(0, "delaying list shuffling for in-"
+ "progress lockres %.*s, state=%d\n",
+ res->lockname.len, res->lockname.name,
+ res->state);
+ delay = 1;
+ goto in_progress;
+ }
+
+ /* at this point the lockres is not migrating/
+ * recovering/in-progress. we have the lockres
+ * spinlock and do NOT have the dlm lock.
+ * safe to reserve/queue asts and run the lists. */
+
+ mlog(0, "calling dlm_shuffle_lists with dlm=%p, "
+ "res=%p\n", dlm, res);
+
+ /* called while holding lockres lock */
+ dlm_shuffle_lists(dlm, res);
+ spin_unlock(&res->spinlock);
+
+ dlm_lockres_calc_usage(dlm, res);
+
+in_progress:
+
+ spin_lock(&dlm->spinlock);
+ /* if the lock was in-progress, stick
+ * it on the back of the list */
+ if (delay) {
+ spin_lock(&res->spinlock);
+ list_add_tail(&res->dirty, &dlm->dirty_list);
+ res->state |= DLM_LOCK_RES_DIRTY;
+ spin_unlock(&res->spinlock);
+ }
+ dlm_lockres_put(res);
+
+ /* unlikely, but we may need to give time to
+ * other tasks */
+ if (!--n) {
+ mlog(0, "throttling dlm_thread\n");
+ break;
+ }
+ }
+
+ spin_unlock(&dlm->spinlock);
+ dlm_flush_asts(dlm);
+
+ /* yield and continue right away if there is more work to do */
+ if (!n) {
+ yield();
+ continue;
+ }
+
+ wait_event_interruptible_timeout(dlm->dlm_thread_wq,
+ !dlm_dirty_list_empty(dlm) ||
+ kthread_should_stop(),
+ timeout);
+ }
+
+ mlog(0, "quitting DLM thread\n");
+ return 0;
+}
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
new file mode 100644
index 00000000000..cec2ce1cd31
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -0,0 +1,672 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmunlock.c
+ *
+ * underlying calls for unlocking locks
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/sysctl.h>
+#include <linux/random.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+
+#include "cluster/heartbeat.h"
+#include "cluster/nodemanager.h"
+#include "cluster/tcp.h"
+
+#include "dlmapi.h"
+#include "dlmcommon.h"
+
+#define MLOG_MASK_PREFIX ML_DLM
+#include "cluster/masklog.h"
+
+#define DLM_UNLOCK_FREE_LOCK 0x00000001
+#define DLM_UNLOCK_CALL_AST 0x00000002
+#define DLM_UNLOCK_REMOVE_LOCK 0x00000004
+#define DLM_UNLOCK_REGRANT_LOCK 0x00000008
+#define DLM_UNLOCK_CLEAR_CONVERT_TYPE 0x00000010
+
+
+static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int *actions);
+static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int *actions);
+
+static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int flags,
+ u8 owner);
+
+
+/*
+ * according to the spec:
+ * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf
+ *
+ * flags & LKM_CANCEL != 0: must be converting or blocked
+ * flags & LKM_CANCEL == 0: must be granted
+ *
+ * So to unlock a converting lock, you must first cancel the
+ * convert (passing LKM_CANCEL in flags), then call the unlock
+ * again (with no LKM_CANCEL in flags).
+ */
+
+
+/*
+ * locking:
+ * caller needs: none
+ * taken: res->spinlock and lock->spinlock taken and dropped
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
+ * all callers should have taken an extra ref on lock coming in
+ */
+static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int flags, int *call_ast,
+ int master_node)
+{
+ enum dlm_status status;
+ int actions = 0;
+ int in_use;
+ u8 owner;
+
+ mlog(0, "master_node = %d, valblk = %d\n", master_node,
+ flags & LKM_VALBLK);
+
+ if (master_node)
+ BUG_ON(res->owner != dlm->node_num);
+ else
+ BUG_ON(res->owner == dlm->node_num);
+
+ spin_lock(&dlm->spinlock);
+ /* We want to be sure that we're not freeing a lock
+ * that still has AST's pending... */
+ in_use = !list_empty(&lock->ast_list);
+ spin_unlock(&dlm->spinlock);
+ if (in_use) {
+ mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock "
+ "while waiting for an ast!", res->lockname.len,
+ res->lockname.name);
+ return DLM_BADPARAM;
+ }
+
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
+ if (master_node) {
+ mlog(ML_ERROR, "lockres in progress!\n");
+ spin_unlock(&res->spinlock);
+ return DLM_FORWARD;
+ }
+ /* ok for this to sleep if not in a network handler */
+ __dlm_wait_on_lockres(res);
+ res->state |= DLM_LOCK_RES_IN_PROGRESS;
+ }
+ spin_lock(&lock->spinlock);
+
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ status = DLM_RECOVERING;
+ goto leave;
+ }
+
+
+ /* see above for what the spec says about
+ * LKM_CANCEL and the lock queue state */
+ if (flags & LKM_CANCEL)
+ status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions);
+ else
+ status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
+
+ if (status != DLM_NORMAL)
+ goto leave;
+
+ /* By now this has been masked out of cancel requests. */
+ if (flags & LKM_VALBLK) {
+ /* make the final update to the lvb */
+ if (master_node)
+ memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
+ else
+ flags |= LKM_PUT_LVB; /* let the send function
+ * handle it. */
+ }
+
+ if (!master_node) {
+ owner = res->owner;
+ /* drop locks and send message */
+ if (flags & LKM_CANCEL)
+ lock->cancel_pending = 1;
+ else
+ lock->unlock_pending = 1;
+ spin_unlock(&lock->spinlock);
+ spin_unlock(&res->spinlock);
+ status = dlm_send_remote_unlock_request(dlm, res, lock, lksb,
+ flags, owner);
+ spin_lock(&res->spinlock);
+ spin_lock(&lock->spinlock);
+ /* if the master told us the lock was already granted,
+ * let the ast handle all of these actions */
+ if (status == DLM_NORMAL &&
+ lksb->status == DLM_CANCELGRANT) {
+ actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
+ DLM_UNLOCK_REGRANT_LOCK|
+ DLM_UNLOCK_CLEAR_CONVERT_TYPE);
+ }
+ if (flags & LKM_CANCEL)
+ lock->cancel_pending = 0;
+ else
+ lock->unlock_pending = 0;
+
+ }
+
+ /* get an extra ref on lock. if we are just switching
+ * lists here, we dont want the lock to go away. */
+ dlm_lock_get(lock);
+
+ if (actions & DLM_UNLOCK_REMOVE_LOCK) {
+ list_del_init(&lock->list);
+ dlm_lock_put(lock);
+ }
+ if (actions & DLM_UNLOCK_REGRANT_LOCK) {
+ dlm_lock_get(lock);
+ list_add_tail(&lock->list, &res->granted);
+ }
+ if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) {
+ mlog(0, "clearing convert_type at %smaster node\n",
+ master_node ? "" : "non-");
+ lock->ml.convert_type = LKM_IVMODE;
+ }
+
+ /* remove the extra ref on lock */
+ dlm_lock_put(lock);
+
+leave:
+ res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ if (!dlm_lock_on_list(&res->converting, lock))
+ BUG_ON(lock->ml.convert_type != LKM_IVMODE);
+ else
+ BUG_ON(lock->ml.convert_type == LKM_IVMODE);
+ spin_unlock(&lock->spinlock);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
+ /* let the caller's final dlm_lock_put handle the actual kfree */
+ if (actions & DLM_UNLOCK_FREE_LOCK) {
+ /* this should always be coupled with list removal */
+ BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK));
+ mlog(0, "lock %"MLFu64" should be gone now! refs=%d\n",
+ lock->ml.cookie, atomic_read(&lock->lock_refs.refcount)-1);
+ dlm_lock_put(lock);
+ }
+ if (actions & DLM_UNLOCK_CALL_AST)
+ *call_ast = 1;
+
+ /* if cancel or unlock succeeded, lvb work is done */
+ if (status == DLM_NORMAL)
+ lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
+
+ return status;
+}
+
+void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ /* leave DLM_LKSB_PUT_LVB on the lksb so any final
+ * update of the lvb will be sent to the new master */
+ list_del_init(&lock->list);
+}
+
+void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
+ struct dlm_lock *lock)
+{
+ list_del_init(&lock->list);
+ list_add_tail(&lock->list, &res->granted);
+ lock->ml.convert_type = LKM_IVMODE;
+}
+
+
+static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int flags,
+ int *call_ast)
+{
+ return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1);
+}
+
+static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int flags, int *call_ast)
+{
+ return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0);
+}
+
+/*
+ * locking:
+ * caller needs: none
+ * taken: none
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network
+ */
+static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int flags,
+ u8 owner)
+{
+ struct dlm_unlock_lock unlock;
+ int tmpret;
+ enum dlm_status ret;
+ int status = 0;
+ struct kvec vec[2];
+ size_t veclen = 1;
+
+ mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
+
+ memset(&unlock, 0, sizeof(unlock));
+ unlock.node_idx = dlm->node_num;
+ unlock.flags = cpu_to_be32(flags);
+ unlock.cookie = lock->ml.cookie;
+ unlock.namelen = res->lockname.len;
+ memcpy(unlock.name, res->lockname.name, unlock.namelen);
+
+ vec[0].iov_len = sizeof(struct dlm_unlock_lock);
+ vec[0].iov_base = &unlock;
+
+ if (flags & LKM_PUT_LVB) {
+ /* extra data to send if we are updating lvb */
+ vec[1].iov_len = DLM_LVB_LEN;
+ vec[1].iov_base = lock->lksb->lvb;
+ veclen++;
+ }
+
+ tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key,
+ vec, veclen, owner, &status);
+ if (tmpret >= 0) {
+ // successfully sent and received
+ if (status == DLM_CANCELGRANT)
+ ret = DLM_NORMAL;
+ else if (status == DLM_FORWARD) {
+ mlog(0, "master was in-progress. retry\n");
+ ret = DLM_FORWARD;
+ } else
+ ret = status;
+ lksb->status = status;
+ } else {
+ mlog_errno(tmpret);
+ if (dlm_is_host_down(tmpret)) {
+ /* NOTE: this seems strange, but it is what we want.
+ * when the master goes down during a cancel or
+ * unlock, the recovery code completes the operation
+ * as if the master had not died, then passes the
+ * updated state to the recovery master. this thread
+ * just needs to finish out the operation and call
+ * the unlockast. */
+ ret = DLM_NORMAL;
+ } else {
+ /* something bad. this will BUG in ocfs2 */
+ ret = dlm_err_to_dlm_status(tmpret);
+ }
+ lksb->status = ret;
+ }
+
+ return ret;
+}
+
+/*
+ * locking:
+ * caller needs: none
+ * taken: takes and drops res->spinlock
+ * held on exit: none
+ * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID,
+ * return value from dlmunlock_master
+ */
+int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data)
+{
+ struct dlm_ctxt *dlm = data;
+ struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf;
+ struct dlm_lock_resource *res = NULL;
+ struct list_head *iter;
+ struct dlm_lock *lock = NULL;
+ enum dlm_status status = DLM_NORMAL;
+ int found = 0, i;
+ struct dlm_lockstatus *lksb = NULL;
+ int ignore;
+ u32 flags;
+ struct list_head *queue;
+
+ flags = be32_to_cpu(unlock->flags);
+
+ if (flags & LKM_GET_LVB) {
+ mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n");
+ return DLM_BADARGS;
+ }
+
+ if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) {
+ mlog(ML_ERROR, "bad args! cannot modify lvb on a CANCEL "
+ "request!\n");
+ return DLM_BADARGS;
+ }
+
+ if (unlock->namelen > DLM_LOCKID_NAME_MAX) {
+ mlog(ML_ERROR, "Invalid name length in unlock handler!\n");
+ return DLM_IVBUFLEN;
+ }
+
+ if (!dlm_grab(dlm))
+ return DLM_REJECTED;
+
+ mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
+ "Domain %s not fully joined!\n", dlm->name);
+
+ mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none");
+
+ res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen);
+ if (!res) {
+ /* We assume here that a no lock resource simply means
+ * it was migrated away and destroyed before the other
+ * node could detect it. */
+ mlog(0, "returning DLM_FORWARD -- res no longer exists\n");
+ status = DLM_FORWARD;
+ goto not_found;
+ }
+
+ queue=&res->granted;
+ found = 0;
+ spin_lock(&res->spinlock);
+ if (res->state & DLM_LOCK_RES_RECOVERING) {
+ spin_unlock(&res->spinlock);
+ mlog(0, "returning DLM_RECOVERING\n");
+ status = DLM_RECOVERING;
+ goto leave;
+ }
+
+ if (res->state & DLM_LOCK_RES_MIGRATING) {
+ spin_unlock(&res->spinlock);
+ mlog(0, "returning DLM_MIGRATING\n");
+ status = DLM_MIGRATING;
+ goto leave;
+ }
+
+ if (res->owner != dlm->node_num) {
+ spin_unlock(&res->spinlock);
+ mlog(0, "returning DLM_FORWARD -- not master\n");
+ status = DLM_FORWARD;
+ goto leave;
+ }
+
+ for (i=0; i<3; i++) {
+ list_for_each(iter, queue) {
+ lock = list_entry(iter, struct dlm_lock, list);
+ if (lock->ml.cookie == unlock->cookie &&
+ lock->ml.node == unlock->node_idx) {
+ dlm_lock_get(lock);
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ break;
+ /* scan granted -> converting -> blocked queues */
+ queue++;
+ }
+ spin_unlock(&res->spinlock);
+ if (!found) {
+ status = DLM_IVLOCKID;
+ goto not_found;
+ }
+
+ /* lock was found on queue */
+ lksb = lock->lksb;
+ /* unlockast only called on originating node */
+ if (flags & LKM_PUT_LVB) {
+ lksb->flags |= DLM_LKSB_PUT_LVB;
+ memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN);
+ }
+
+ /* if this is in-progress, propagate the DLM_FORWARD
+ * all the way back out */
+ status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore);
+ if (status == DLM_FORWARD)
+ mlog(0, "lockres is in progress\n");
+
+ if (flags & LKM_PUT_LVB)
+ lksb->flags &= ~DLM_LKSB_PUT_LVB;
+
+ dlm_lockres_calc_usage(dlm, res);
+ dlm_kick_thread(dlm, res);
+
+not_found:
+ if (!found)
+ mlog(ML_ERROR, "failed to find lock to unlock! "
+ "cookie=%"MLFu64"\n",
+ unlock->cookie);
+ else {
+ /* send the lksb->status back to the other node */
+ status = lksb->status;
+ dlm_lock_put(lock);
+ }
+
+leave:
+ if (res)
+ dlm_lockres_put(res);
+
+ dlm_put(dlm);
+
+ return status;
+}
+
+
+static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int *actions)
+{
+ enum dlm_status status;
+
+ if (dlm_lock_on_list(&res->blocked, lock)) {
+ /* cancel this outright */
+ lksb->status = DLM_NORMAL;
+ status = DLM_NORMAL;
+ *actions = (DLM_UNLOCK_CALL_AST |
+ DLM_UNLOCK_REMOVE_LOCK);
+ } else if (dlm_lock_on_list(&res->converting, lock)) {
+ /* cancel the request, put back on granted */
+ lksb->status = DLM_NORMAL;
+ status = DLM_NORMAL;
+ *actions = (DLM_UNLOCK_CALL_AST |
+ DLM_UNLOCK_REMOVE_LOCK |
+ DLM_UNLOCK_REGRANT_LOCK |
+ DLM_UNLOCK_CLEAR_CONVERT_TYPE);
+ } else if (dlm_lock_on_list(&res->granted, lock)) {
+ /* too late, already granted. DLM_CANCELGRANT */
+ lksb->status = DLM_CANCELGRANT;
+ status = DLM_NORMAL;
+ *actions = DLM_UNLOCK_CALL_AST;
+ } else {
+ mlog(ML_ERROR, "lock to cancel is not on any list!\n");
+ lksb->status = DLM_IVLOCKID;
+ status = DLM_IVLOCKID;
+ *actions = 0;
+ }
+ return status;
+}
+
+static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res,
+ struct dlm_lock *lock,
+ struct dlm_lockstatus *lksb,
+ int *actions)
+{
+ enum dlm_status status;
+
+ /* unlock request */
+ if (!dlm_lock_on_list(&res->granted, lock)) {
+ lksb->status = DLM_DENIED;
+ status = DLM_DENIED;
+ dlm_error(status);
+ *actions = 0;
+ } else {
+ /* unlock granted lock */
+ lksb->status = DLM_NORMAL;
+ status = DLM_NORMAL;
+ *actions = (DLM_UNLOCK_FREE_LOCK |
+ DLM_UNLOCK_CALL_AST |
+ DLM_UNLOCK_REMOVE_LOCK);
+ }
+ return status;
+}
+
+/* there seems to be no point in doing this async
+ * since (even for the remote case) there is really
+ * no work to queue up... so just do it and fire the
+ * unlockast by hand when done... */
+enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb,
+ int flags, dlm_astunlockfunc_t *unlockast, void *data)
+{
+ enum dlm_status status;
+ struct dlm_lock_resource *res;
+ struct dlm_lock *lock = NULL;
+ int call_ast, is_master;
+
+ mlog_entry_void();
+
+ if (!lksb) {
+ dlm_error(DLM_BADARGS);
+ return DLM_BADARGS;
+ }
+
+ if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) {
+ dlm_error(DLM_BADPARAM);
+ return DLM_BADPARAM;
+ }
+
+ if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) {
+ mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n");
+ flags &= ~LKM_VALBLK;
+ }
+
+ if (!lksb->lockid || !lksb->lockid->lockres) {
+ dlm_error(DLM_BADPARAM);
+ return DLM_BADPARAM;
+ }
+
+ lock = lksb->lockid;
+ BUG_ON(!lock);
+ dlm_lock_get(lock);
+
+ res = lock->lockres;
+ BUG_ON(!res);
+ dlm_lockres_get(res);
+retry:
+ call_ast = 0;
+ /* need to retry up here because owner may have changed */
+ mlog(0, "lock=%p res=%p\n", lock, res);
+
+ spin_lock(&res->spinlock);
+ is_master = (res->owner == dlm->node_num);
+ spin_unlock(&res->spinlock);
+
+ if (is_master) {
+ status = dlmunlock_master(dlm, res, lock, lksb, flags,
+ &call_ast);
+ mlog(0, "done calling dlmunlock_master: returned %d, "
+ "call_ast is %d\n", status, call_ast);
+ } else {
+ status = dlmunlock_remote(dlm, res, lock, lksb, flags,
+ &call_ast);
+ mlog(0, "done calling dlmunlock_remote: returned %d, "
+ "call_ast is %d\n", status, call_ast);
+ }
+
+ if (status == DLM_RECOVERING ||
+ status == DLM_MIGRATING ||
+ status == DLM_FORWARD) {
+ /* We want to go away for a tiny bit to allow recovery
+ * / migration to complete on this resource. I don't
+ * know of any wait queue we could sleep on as this
+ * may be happening on another node. Perhaps the
+ * proper solution is to queue up requests on the
+ * other end? */
+
+ /* do we want to yield(); ?? */
+ msleep(50);
+
+ mlog(0, "retrying unlock due to pending recovery/"
+ "migration/in-progress\n");
+ goto retry;
+ }
+
+ if (call_ast) {
+ mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status);
+ if (is_master) {
+ /* it is possible that there is one last bast
+ * pending. make sure it is flushed, then
+ * call the unlockast.
+ * not an issue if this is a mastered remotely,
+ * since this lock has been removed from the
+ * lockres queues and cannot be found. */
+ dlm_kick_thread(dlm, NULL);
+ wait_event(dlm->ast_wq,
+ dlm_lock_basts_flushed(dlm, lock));
+ }
+ (*unlockast)(data, lksb->status);
+ }
+
+ if (status == DLM_NORMAL) {
+ mlog(0, "kicking the thread\n");
+ dlm_kick_thread(dlm, res);
+ } else
+ dlm_error(status);
+
+ dlm_lockres_calc_usage(dlm, res);
+ dlm_lockres_put(res);
+ dlm_lock_put(lock);
+
+ mlog(0, "returning status=%d!\n", status);
+ return status;
+}
+EXPORT_SYMBOL_GPL(dlmunlock);
+
diff --git a/fs/ocfs2/dlm/dlmver.c b/fs/ocfs2/dlm/dlmver.c
new file mode 100644
index 00000000000..7ef2653f8f4
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmver.c
@@ -0,0 +1,42 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmver.c
+ *
+ * version string
+ *
+ * Copyright (C) 2002, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "dlmver.h"
+
+#define DLM_BUILD_VERSION "1.3.3"
+
+#define VERSION_STR "OCFS2 DLM " DLM_BUILD_VERSION
+
+void dlm_print_version(void)
+{
+ printk(KERN_INFO "%s\n", VERSION_STR);
+}
+
+MODULE_DESCRIPTION(VERSION_STR);
+
+MODULE_VERSION(DLM_BUILD_VERSION);
diff --git a/fs/ocfs2/dlm/dlmver.h b/fs/ocfs2/dlm/dlmver.h
new file mode 100644
index 00000000000..f674aee77a1
--- /dev/null
+++ b/fs/ocfs2/dlm/dlmver.h
@@ -0,0 +1,31 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmfsver.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef DLM_VER_H
+#define DLM_VER_H
+
+void dlm_print_version(void);
+
+#endif /* DLM_VER_H */
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
new file mode 100644
index 00000000000..e1fdd288796
--- /dev/null
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -0,0 +1,658 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * userdlm.c
+ *
+ * Code which implements the kernel side of a minimal userspace
+ * interface to our DLM.
+ *
+ * Many of the functions here are pared down versions of dlmglue.c
+ * functions.
+ *
+ * Copyright (C) 2003, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <asm/signal.h>
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/crc32.h>
+
+
+#include "cluster/nodemanager.h"
+#include "cluster/heartbeat.h"
+#include "cluster/tcp.h"
+
+#include "dlmapi.h"
+
+#include "userdlm.h"
+
+#define MLOG_MASK_PREFIX ML_DLMFS
+#include "cluster/masklog.h"
+
+static inline int user_check_wait_flag(struct user_lock_res *lockres,
+ int flag)
+{
+ int ret;
+
+ spin_lock(&lockres->l_lock);
+ ret = lockres->l_flags & flag;
+ spin_unlock(&lockres->l_lock);
+
+ return ret;
+}
+
+static inline void user_wait_on_busy_lock(struct user_lock_res *lockres)
+
+{
+ wait_event(lockres->l_event,
+ !user_check_wait_flag(lockres, USER_LOCK_BUSY));
+}
+
+static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
+
+{
+ wait_event(lockres->l_event,
+ !user_check_wait_flag(lockres, USER_LOCK_BLOCKED));
+}
+
+/* I heart container_of... */
+static inline struct dlm_ctxt *
+dlm_ctxt_from_user_lockres(struct user_lock_res *lockres)
+{
+ struct dlmfs_inode_private *ip;
+
+ ip = container_of(lockres,
+ struct dlmfs_inode_private,
+ ip_lockres);
+ return ip->ip_dlm;
+}
+
+static struct inode *
+user_dlm_inode_from_user_lockres(struct user_lock_res *lockres)
+{
+ struct dlmfs_inode_private *ip;
+
+ ip = container_of(lockres,
+ struct dlmfs_inode_private,
+ ip_lockres);
+ return &ip->ip_vfs_inode;
+}
+
+static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
+{
+ spin_lock(&lockres->l_lock);
+ lockres->l_flags &= ~USER_LOCK_BUSY;
+ spin_unlock(&lockres->l_lock);
+}
+
+#define user_log_dlm_error(_func, _stat, _lockres) do { \
+ mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
+ "resource %s: %s\n", dlm_errname(_stat), _func, \
+ _lockres->l_name, dlm_errmsg(_stat)); \
+} while (0)
+
+/* WARNING: This function lives in a world where the only three lock
+ * levels are EX, PR, and NL. It *will* have to be adjusted when more
+ * lock types are added. */
+static inline int user_highest_compat_lock_level(int level)
+{
+ int new_level = LKM_EXMODE;
+
+ if (level == LKM_EXMODE)
+ new_level = LKM_NLMODE;
+ else if (level == LKM_PRMODE)
+ new_level = LKM_PRMODE;
+ return new_level;
+}
+
+static void user_ast(void *opaque)
+{
+ struct user_lock_res *lockres = opaque;
+ struct dlm_lockstatus *lksb;
+
+ mlog(0, "AST fired for lockres %s\n", lockres->l_name);
+
+ spin_lock(&lockres->l_lock);
+
+ lksb = &(lockres->l_lksb);
+ if (lksb->status != DLM_NORMAL) {
+ mlog(ML_ERROR, "lksb status value of %u on lockres %s\n",
+ lksb->status, lockres->l_name);
+ spin_unlock(&lockres->l_lock);
+ return;
+ }
+
+ /* we're downconverting. */
+ if (lockres->l_requested < lockres->l_level) {
+ if (lockres->l_requested <=
+ user_highest_compat_lock_level(lockres->l_blocking)) {
+ lockres->l_blocking = LKM_NLMODE;
+ lockres->l_flags &= ~USER_LOCK_BLOCKED;
+ }
+ }
+
+ lockres->l_level = lockres->l_requested;
+ lockres->l_requested = LKM_IVMODE;
+ lockres->l_flags |= USER_LOCK_ATTACHED;
+ lockres->l_flags &= ~USER_LOCK_BUSY;
+
+ spin_unlock(&lockres->l_lock);
+
+ wake_up(&lockres->l_event);
+}
+
+static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
+{
+ struct inode *inode;
+ inode = user_dlm_inode_from_user_lockres(lockres);
+ if (!igrab(inode))
+ BUG();
+}
+
+static void user_dlm_unblock_lock(void *opaque);
+
+static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
+{
+ if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
+ user_dlm_grab_inode_ref(lockres);
+
+ INIT_WORK(&lockres->l_work, user_dlm_unblock_lock,
+ lockres);
+
+ queue_work(user_dlm_worker, &lockres->l_work);
+ lockres->l_flags |= USER_LOCK_QUEUED;
+ }
+}
+
+static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
+{
+ int queue = 0;
+
+ if (!(lockres->l_flags & USER_LOCK_BLOCKED))
+ return;
+
+ switch (lockres->l_blocking) {
+ case LKM_EXMODE:
+ if (!lockres->l_ex_holders && !lockres->l_ro_holders)
+ queue = 1;
+ break;
+ case LKM_PRMODE:
+ if (!lockres->l_ex_holders)
+ queue = 1;
+ break;
+ default:
+ BUG();
+ }
+
+ if (queue)
+ __user_dlm_queue_lockres(lockres);
+}
+
+static void user_bast(void *opaque, int level)
+{
+ struct user_lock_res *lockres = opaque;
+
+ mlog(0, "Blocking AST fired for lockres %s. Blocking level %d\n",
+ lockres->l_name, level);
+
+ spin_lock(&lockres->l_lock);
+ lockres->l_flags |= USER_LOCK_BLOCKED;
+ if (level > lockres->l_blocking)
+ lockres->l_blocking = level;
+
+ __user_dlm_queue_lockres(lockres);
+ spin_unlock(&lockres->l_lock);
+
+ wake_up(&lockres->l_event);
+}
+
+static void user_unlock_ast(void *opaque, enum dlm_status status)
+{
+ struct user_lock_res *lockres = opaque;
+
+ mlog(0, "UNLOCK AST called on lock %s\n", lockres->l_name);
+
+ if (status != DLM_NORMAL)
+ mlog(ML_ERROR, "Dlm returns status %d\n", status);
+
+ spin_lock(&lockres->l_lock);
+ if (lockres->l_flags & USER_LOCK_IN_TEARDOWN)
+ lockres->l_level = LKM_IVMODE;
+ else {
+ lockres->l_requested = LKM_IVMODE; /* cancel an
+ * upconvert
+ * request. */
+ lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
+ /* we want the unblock thread to look at it again
+ * now. */
+ __user_dlm_queue_lockres(lockres);
+ }
+
+ lockres->l_flags &= ~USER_LOCK_BUSY;
+ spin_unlock(&lockres->l_lock);
+
+ wake_up(&lockres->l_event);
+}
+
+static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
+{
+ struct inode *inode;
+ inode = user_dlm_inode_from_user_lockres(lockres);
+ iput(inode);
+}
+
+static void user_dlm_unblock_lock(void *opaque)
+{
+ int new_level, status;
+ struct user_lock_res *lockres = (struct user_lock_res *) opaque;
+ struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+
+ mlog(0, "processing lockres %s\n", lockres->l_name);
+
+ spin_lock(&lockres->l_lock);
+
+ BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED));
+ BUG_ON(!(lockres->l_flags & USER_LOCK_QUEUED));
+
+ /* notice that we don't clear USER_LOCK_BLOCKED here. That's
+ * for user_ast to do. */
+ lockres->l_flags &= ~USER_LOCK_QUEUED;
+
+ if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
+ mlog(0, "lock is in teardown so we do nothing\n");
+ spin_unlock(&lockres->l_lock);
+ goto drop_ref;
+ }
+
+ if (lockres->l_flags & USER_LOCK_BUSY) {
+ mlog(0, "BUSY flag detected...\n");
+ if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
+ spin_unlock(&lockres->l_lock);
+ goto drop_ref;
+ }
+
+ lockres->l_flags |= USER_LOCK_IN_CANCEL;
+ spin_unlock(&lockres->l_lock);
+
+ status = dlmunlock(dlm,
+ &lockres->l_lksb,
+ LKM_CANCEL,
+ user_unlock_ast,
+ lockres);
+ if (status == DLM_CANCELGRANT) {
+ /* If we got this, then the ast was fired
+ * before we could cancel. We cleanup our
+ * state, and restart the function. */
+ spin_lock(&lockres->l_lock);
+ lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
+ spin_unlock(&lockres->l_lock);
+ } else if (status != DLM_NORMAL)
+ user_log_dlm_error("dlmunlock", status, lockres);
+ goto drop_ref;
+ }
+
+ /* If there are still incompat holders, we can exit safely
+ * without worrying about re-queueing this lock as that will
+ * happen on the last call to user_cluster_unlock. */
+ if ((lockres->l_blocking == LKM_EXMODE)
+ && (lockres->l_ex_holders || lockres->l_ro_holders)) {
+ spin_unlock(&lockres->l_lock);
+ mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n",
+ lockres->l_ro_holders, lockres->l_ex_holders);
+ goto drop_ref;
+ }
+
+ if ((lockres->l_blocking == LKM_PRMODE)
+ && lockres->l_ex_holders) {
+ spin_unlock(&lockres->l_lock);
+ mlog(0, "can't downconvert for pr: ex = %u\n",
+ lockres->l_ex_holders);
+ goto drop_ref;
+ }
+
+ /* yay, we can downconvert now. */
+ new_level = user_highest_compat_lock_level(lockres->l_blocking);
+ lockres->l_requested = new_level;
+ lockres->l_flags |= USER_LOCK_BUSY;
+ mlog(0, "Downconvert lock from %d to %d\n",
+ lockres->l_level, new_level);
+ spin_unlock(&lockres->l_lock);
+
+ /* need lock downconvert request now... */
+ status = dlmlock(dlm,
+ new_level,
+ &lockres->l_lksb,
+ LKM_CONVERT|LKM_VALBLK,
+ lockres->l_name,
+ user_ast,
+ lockres,
+ user_bast);
+ if (status != DLM_NORMAL) {
+ user_log_dlm_error("dlmlock", status, lockres);
+ user_recover_from_dlm_error(lockres);
+ }
+
+drop_ref:
+ user_dlm_drop_inode_ref(lockres);
+}
+
+static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
+ int level)
+{
+ switch(level) {
+ case LKM_EXMODE:
+ lockres->l_ex_holders++;
+ break;
+ case LKM_PRMODE:
+ lockres->l_ro_holders++;
+ break;
+ default:
+ BUG();
+ }
+}
+
+/* predict what lock level we'll be dropping down to on behalf
+ * of another node, and return true if the currently wanted
+ * level will be compatible with it. */
+static inline int
+user_may_continue_on_blocked_lock(struct user_lock_res *lockres,
+ int wanted)
+{
+ BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED));
+
+ return wanted <= user_highest_compat_lock_level(lockres->l_blocking);
+}
+
+int user_dlm_cluster_lock(struct user_lock_res *lockres,
+ int level,
+ int lkm_flags)
+{
+ int status, local_flags;
+ struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+
+ if (level != LKM_EXMODE &&
+ level != LKM_PRMODE) {
+ mlog(ML_ERROR, "lockres %s: invalid request!\n",
+ lockres->l_name);
+ status = -EINVAL;
+ goto bail;
+ }
+
+ mlog(0, "lockres %s: asking for %s lock, passed flags = 0x%x\n",
+ lockres->l_name,
+ (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE",
+ lkm_flags);
+
+again:
+ if (signal_pending(current)) {
+ status = -ERESTARTSYS;
+ goto bail;
+ }
+
+ spin_lock(&lockres->l_lock);
+
+ /* We only compare against the currently granted level
+ * here. If the lock is blocked waiting on a downconvert,
+ * we'll get caught below. */
+ if ((lockres->l_flags & USER_LOCK_BUSY) &&
+ (level > lockres->l_level)) {
+ /* is someone sitting in dlm_lock? If so, wait on
+ * them. */
+ spin_unlock(&lockres->l_lock);
+
+ user_wait_on_busy_lock(lockres);
+ goto again;
+ }
+
+ if ((lockres->l_flags & USER_LOCK_BLOCKED) &&
+ (!user_may_continue_on_blocked_lock(lockres, level))) {
+ /* is the lock is currently blocked on behalf of
+ * another node */
+ spin_unlock(&lockres->l_lock);
+
+ user_wait_on_blocked_lock(lockres);
+ goto again;
+ }
+
+ if (level > lockres->l_level) {
+ local_flags = lkm_flags | LKM_VALBLK;
+ if (lockres->l_level != LKM_IVMODE)
+ local_flags |= LKM_CONVERT;
+
+ lockres->l_requested = level;
+ lockres->l_flags |= USER_LOCK_BUSY;
+ spin_unlock(&lockres->l_lock);
+
+ BUG_ON(level == LKM_IVMODE);
+ BUG_ON(level == LKM_NLMODE);
+
+ mlog(0, "lock %s, get lock from %d to level = %d\n",
+ lockres->l_name, lockres->l_level, level);
+
+ /* call dlm_lock to upgrade lock now */
+ status = dlmlock(dlm,
+ level,
+ &lockres->l_lksb,
+ local_flags,
+ lockres->l_name,
+ user_ast,
+ lockres,
+ user_bast);
+ if (status != DLM_NORMAL) {
+ if ((lkm_flags & LKM_NOQUEUE) &&
+ (status == DLM_NOTQUEUED))
+ status = -EAGAIN;
+ else {
+ user_log_dlm_error("dlmlock", status, lockres);
+ status = -EINVAL;
+ }
+ user_recover_from_dlm_error(lockres);
+ goto bail;
+ }
+
+ mlog(0, "lock %s, successfull return from dlmlock\n",
+ lockres->l_name);
+
+ user_wait_on_busy_lock(lockres);
+ goto again;
+ }
+
+ user_dlm_inc_holders(lockres, level);
+ spin_unlock(&lockres->l_lock);
+
+ mlog(0, "lockres %s: Got %s lock!\n", lockres->l_name,
+ (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE");
+
+ status = 0;
+bail:
+ return status;
+}
+
+static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
+ int level)
+{
+ switch(level) {
+ case LKM_EXMODE:
+ BUG_ON(!lockres->l_ex_holders);
+ lockres->l_ex_holders--;
+ break;
+ case LKM_PRMODE:
+ BUG_ON(!lockres->l_ro_holders);
+ lockres->l_ro_holders--;
+ break;
+ default:
+ BUG();
+ }
+}
+
+void user_dlm_cluster_unlock(struct user_lock_res *lockres,
+ int level)
+{
+ if (level != LKM_EXMODE &&
+ level != LKM_PRMODE) {
+ mlog(ML_ERROR, "lockres %s: invalid request!\n", lockres->l_name);
+ return;
+ }
+
+ mlog(0, "lockres %s: dropping %s lock\n", lockres->l_name,
+ (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE");
+
+ spin_lock(&lockres->l_lock);
+ user_dlm_dec_holders(lockres, level);
+ __user_dlm_cond_queue_lockres(lockres);
+ spin_unlock(&lockres->l_lock);
+}
+
+void user_dlm_write_lvb(struct inode *inode,
+ const char *val,
+ unsigned int len)
+{
+ struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
+ char *lvb = lockres->l_lksb.lvb;
+
+ BUG_ON(len > DLM_LVB_LEN);
+
+ spin_lock(&lockres->l_lock);
+
+ BUG_ON(lockres->l_level < LKM_EXMODE);
+ memcpy(lvb, val, len);
+
+ spin_unlock(&lockres->l_lock);
+}
+
+void user_dlm_read_lvb(struct inode *inode,
+ char *val,
+ unsigned int len)
+{
+ struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
+ char *lvb = lockres->l_lksb.lvb;
+
+ BUG_ON(len > DLM_LVB_LEN);
+
+ spin_lock(&lockres->l_lock);
+
+ BUG_ON(lockres->l_level < LKM_PRMODE);
+ memcpy(val, lvb, len);
+
+ spin_unlock(&lockres->l_lock);
+}
+
+void user_dlm_lock_res_init(struct user_lock_res *lockres,
+ struct dentry *dentry)
+{
+ memset(lockres, 0, sizeof(*lockres));
+
+ spin_lock_init(&lockres->l_lock);
+ init_waitqueue_head(&lockres->l_event);
+ lockres->l_level = LKM_IVMODE;
+ lockres->l_requested = LKM_IVMODE;
+ lockres->l_blocking = LKM_IVMODE;
+
+ /* should have been checked before getting here. */
+ BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
+
+ memcpy(lockres->l_name,
+ dentry->d_name.name,
+ dentry->d_name.len);
+}
+
+int user_dlm_destroy_lock(struct user_lock_res *lockres)
+{
+ int status = -EBUSY;
+ struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres);
+
+ mlog(0, "asked to destroy %s\n", lockres->l_name);
+
+ spin_lock(&lockres->l_lock);
+ while (lockres->l_flags & USER_LOCK_BUSY) {
+ spin_unlock(&lockres->l_lock);
+
+ mlog(0, "lock %s is busy\n", lockres->l_name);
+
+ user_wait_on_busy_lock(lockres);
+
+ spin_lock(&lockres->l_lock);
+ }
+
+ if (lockres->l_ro_holders || lockres->l_ex_holders) {
+ spin_unlock(&lockres->l_lock);
+ mlog(0, "lock %s has holders\n", lockres->l_name);
+ goto bail;
+ }
+
+ status = 0;
+ if (!(lockres->l_flags & USER_LOCK_ATTACHED)) {
+ spin_unlock(&lockres->l_lock);
+ mlog(0, "lock %s is not attached\n", lockres->l_name);
+ goto bail;
+ }
+
+ lockres->l_flags &= ~USER_LOCK_ATTACHED;
+ lockres->l_flags |= USER_LOCK_BUSY;
+ lockres->l_flags |= USER_LOCK_IN_TEARDOWN;
+ spin_unlock(&lockres->l_lock);
+
+ mlog(0, "unlocking lockres %s\n", lockres->l_name);
+ status = dlmunlock(dlm,
+ &lockres->l_lksb,
+ LKM_VALBLK,
+ user_unlock_ast,
+ lockres);
+ if (status != DLM_NORMAL) {
+ user_log_dlm_error("dlmunlock", status, lockres);
+ status = -EINVAL;
+ goto bail;
+ }
+
+ user_wait_on_busy_lock(lockres);
+
+ status = 0;
+bail:
+ return status;
+}
+
+struct dlm_ctxt *user_dlm_register_context(struct qstr *name)
+{
+ struct dlm_ctxt *dlm;
+ u32 dlm_key;
+ char *domain;
+
+ domain = kmalloc(name->len + 1, GFP_KERNEL);
+ if (!domain) {
+ mlog_errno(-ENOMEM);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dlm_key = crc32_le(0, name->name, name->len);
+
+ snprintf(domain, name->len + 1, "%.*s", name->len, name->name);
+
+ dlm = dlm_register_domain(domain, dlm_key);
+ if (IS_ERR(dlm))
+ mlog_errno(PTR_ERR(dlm));
+
+ kfree(domain);
+ return dlm;
+}
+
+void user_dlm_unregister_context(struct dlm_ctxt *dlm)
+{
+ dlm_unregister_domain(dlm);
+}
diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlm/userdlm.h
new file mode 100644
index 00000000000..04178bc40b7
--- /dev/null
+++ b/fs/ocfs2/dlm/userdlm.h
@@ -0,0 +1,111 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * userdlm.h
+ *
+ * Userspace dlm defines
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+
+#ifndef USERDLM_H
+#define USERDLM_H
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+/* user_lock_res->l_flags flags. */
+#define USER_LOCK_ATTACHED (0x00000001) /* have we initialized
+ * the lvb */
+#define USER_LOCK_BUSY (0x00000002) /* we are currently in
+ * dlm_lock */
+#define USER_LOCK_BLOCKED (0x00000004) /* blocked waiting to
+ * downconvert*/
+#define USER_LOCK_IN_TEARDOWN (0x00000008) /* we're currently
+ * destroying this
+ * lock. */
+#define USER_LOCK_QUEUED (0x00000010) /* lock is on the
+ * workqueue */
+#define USER_LOCK_IN_CANCEL (0x00000020)
+
+struct user_lock_res {
+ spinlock_t l_lock;
+
+ int l_flags;
+
+#define USER_DLM_LOCK_ID_MAX_LEN 32
+ char l_name[USER_DLM_LOCK_ID_MAX_LEN];
+ int l_level;
+ unsigned int l_ro_holders;
+ unsigned int l_ex_holders;
+ struct dlm_lockstatus l_lksb;
+
+ int l_requested;
+ int l_blocking;
+
+ wait_queue_head_t l_event;
+
+ struct work_struct l_work;
+};
+
+extern struct workqueue_struct *user_dlm_worker;
+
+void user_dlm_lock_res_init(struct user_lock_res *lockres,
+ struct dentry *dentry);
+int user_dlm_destroy_lock(struct user_lock_res *lockres);
+int user_dlm_cluster_lock(struct user_lock_res *lockres,
+ int level,
+ int lkm_flags);
+void user_dlm_cluster_unlock(struct user_lock_res *lockres,
+ int level);
+void user_dlm_write_lvb(struct inode *inode,
+ const char *val,
+ unsigned int len);
+void user_dlm_read_lvb(struct inode *inode,
+ char *val,
+ unsigned int len);
+struct dlm_ctxt *user_dlm_register_context(struct qstr *name);
+void user_dlm_unregister_context(struct dlm_ctxt *dlm);
+
+struct dlmfs_inode_private {
+ struct dlm_ctxt *ip_dlm;
+
+ struct user_lock_res ip_lockres; /* unused for directories. */
+ struct inode *ip_parent;
+
+ struct inode ip_vfs_inode;
+};
+
+static inline struct dlmfs_inode_private *
+DLMFS_I(struct inode *inode)
+{
+ return container_of(inode,
+ struct dlmfs_inode_private,
+ ip_vfs_inode);
+}
+
+struct dlmfs_filp_private {
+ int fp_lock_level;
+};
+
+#define DLMFS_MAGIC 0x76a9f425
+
+#endif /* USERDLM_H */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
new file mode 100644
index 00000000000..e971ec2f840
--- /dev/null
+++ b/fs/ocfs2/dlmglue.c
@@ -0,0 +1,2904 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmglue.c
+ *
+ * Code which implements an OCFS2 specific interface to our DLM.
+ *
+ * Copyright (C) 2003, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/crc32.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <cluster/heartbeat.h>
+#include <cluster/nodemanager.h>
+#include <cluster/tcp.h>
+
+#include <dlm/dlmapi.h>
+
+#define MLOG_MASK_PREFIX ML_DLM_GLUE
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "heartbeat.h"
+#include "inode.h"
+#include "journal.h"
+#include "slot_map.h"
+#include "super.h"
+#include "uptodate.h"
+#include "vote.h"
+
+#include "buffer_head_io.h"
+
+struct ocfs2_mask_waiter {
+ struct list_head mw_item;
+ int mw_status;
+ struct completion mw_complete;
+ unsigned long mw_mask;
+ unsigned long mw_goal;
+};
+
+static void ocfs2_inode_ast_func(void *opaque);
+static void ocfs2_inode_bast_func(void *opaque,
+ int level);
+static void ocfs2_super_ast_func(void *opaque);
+static void ocfs2_super_bast_func(void *opaque,
+ int level);
+static void ocfs2_rename_ast_func(void *opaque);
+static void ocfs2_rename_bast_func(void *opaque,
+ int level);
+
+/* so far, all locks have gotten along with the same unlock ast */
+static void ocfs2_unlock_ast_func(void *opaque,
+ enum dlm_status status);
+static int ocfs2_do_unblock_meta(struct inode *inode,
+ int *requeue);
+static int ocfs2_unblock_meta(struct ocfs2_lock_res *lockres,
+ int *requeue);
+static int ocfs2_unblock_data(struct ocfs2_lock_res *lockres,
+ int *requeue);
+static int ocfs2_unblock_inode_lock(struct ocfs2_lock_res *lockres,
+ int *requeue);
+static int ocfs2_unblock_osb_lock(struct ocfs2_lock_res *lockres,
+ int *requeue);
+typedef void (ocfs2_convert_worker_t)(struct ocfs2_lock_res *, int);
+static int ocfs2_generic_unblock_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int *requeue,
+ ocfs2_convert_worker_t *worker);
+
+struct ocfs2_lock_res_ops {
+ void (*ast)(void *);
+ void (*bast)(void *, int);
+ void (*unlock_ast)(void *, enum dlm_status);
+ int (*unblock)(struct ocfs2_lock_res *, int *);
+};
+
+static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
+ .ast = ocfs2_inode_ast_func,
+ .bast = ocfs2_inode_bast_func,
+ .unlock_ast = ocfs2_unlock_ast_func,
+ .unblock = ocfs2_unblock_inode_lock,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_inode_meta_lops = {
+ .ast = ocfs2_inode_ast_func,
+ .bast = ocfs2_inode_bast_func,
+ .unlock_ast = ocfs2_unlock_ast_func,
+ .unblock = ocfs2_unblock_meta,
+};
+
+static void ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking);
+
+static struct ocfs2_lock_res_ops ocfs2_inode_data_lops = {
+ .ast = ocfs2_inode_ast_func,
+ .bast = ocfs2_inode_bast_func,
+ .unlock_ast = ocfs2_unlock_ast_func,
+ .unblock = ocfs2_unblock_data,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_super_lops = {
+ .ast = ocfs2_super_ast_func,
+ .bast = ocfs2_super_bast_func,
+ .unlock_ast = ocfs2_unlock_ast_func,
+ .unblock = ocfs2_unblock_osb_lock,
+};
+
+static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
+ .ast = ocfs2_rename_ast_func,
+ .bast = ocfs2_rename_bast_func,
+ .unlock_ast = ocfs2_unlock_ast_func,
+ .unblock = ocfs2_unblock_osb_lock,
+};
+
+static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
+{
+ return lockres->l_type == OCFS2_LOCK_TYPE_META ||
+ lockres->l_type == OCFS2_LOCK_TYPE_DATA ||
+ lockres->l_type == OCFS2_LOCK_TYPE_RW;
+}
+
+static inline int ocfs2_is_super_lock(struct ocfs2_lock_res *lockres)
+{
+ return lockres->l_type == OCFS2_LOCK_TYPE_SUPER;
+}
+
+static inline int ocfs2_is_rename_lock(struct ocfs2_lock_res *lockres)
+{
+ return lockres->l_type == OCFS2_LOCK_TYPE_RENAME;
+}
+
+static inline struct ocfs2_super *ocfs2_lock_res_super(struct ocfs2_lock_res *lockres)
+{
+ BUG_ON(!ocfs2_is_super_lock(lockres)
+ && !ocfs2_is_rename_lock(lockres));
+
+ return (struct ocfs2_super *) lockres->l_priv;
+}
+
+static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
+{
+ BUG_ON(!ocfs2_is_inode_lock(lockres));
+
+ return (struct inode *) lockres->l_priv;
+}
+
+static int ocfs2_lock_create(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level,
+ int dlm_flags);
+static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
+ int wanted);
+static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level);
+static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
+static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
+static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
+static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
+static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
+ int convert);
+#define ocfs2_log_dlm_error(_func, _stat, _lockres) do { \
+ mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
+ "resource %s: %s\n", dlm_errname(_stat), _func, \
+ _lockres->l_name, dlm_errmsg(_stat)); \
+} while (0)
+static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+static int ocfs2_meta_lock_update(struct inode *inode,
+ struct buffer_head **bh);
+static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
+static inline int ocfs2_highest_compat_lock_level(int level);
+static inline int ocfs2_can_downconvert_meta_lock(struct inode *inode,
+ struct ocfs2_lock_res *lockres,
+ int new_level);
+
+static char *ocfs2_lock_type_strings[] = {
+ [OCFS2_LOCK_TYPE_META] = "Meta",
+ [OCFS2_LOCK_TYPE_DATA] = "Data",
+ [OCFS2_LOCK_TYPE_SUPER] = "Super",
+ [OCFS2_LOCK_TYPE_RENAME] = "Rename",
+ /* Need to differntiate from [R]ename.. serializing writes is the
+ * important job it does, anyway. */
+ [OCFS2_LOCK_TYPE_RW] = "Write/Read",
+};
+
+static char *ocfs2_lock_type_string(enum ocfs2_lock_type type)
+{
+ mlog_bug_on_msg(type >= OCFS2_NUM_LOCK_TYPES, "%d\n", type);
+ return ocfs2_lock_type_strings[type];
+}
+
+static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
+ u64 blkno,
+ u32 generation,
+ char *name)
+{
+ int len;
+
+ mlog_entry_void();
+
+ BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
+
+ len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016"MLFx64"%08x",
+ ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD, blkno,
+ generation);
+
+ BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
+
+ mlog(0, "built lock resource with name: %s\n", name);
+
+ mlog_exit_void();
+}
+
+static spinlock_t ocfs2_dlm_tracking_lock = SPIN_LOCK_UNLOCKED;
+
+static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
+ struct ocfs2_dlm_debug *dlm_debug)
+{
+ mlog(0, "Add tracking for lockres %s\n", res->l_name);
+
+ spin_lock(&ocfs2_dlm_tracking_lock);
+ list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
+ spin_unlock(&ocfs2_dlm_tracking_lock);
+}
+
+static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
+{
+ spin_lock(&ocfs2_dlm_tracking_lock);
+ if (!list_empty(&res->l_debug_list))
+ list_del_init(&res->l_debug_list);
+ spin_unlock(&ocfs2_dlm_tracking_lock);
+}
+
+static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *res,
+ enum ocfs2_lock_type type,
+ u64 blkno,
+ u32 generation,
+ struct ocfs2_lock_res_ops *ops,
+ void *priv)
+{
+ ocfs2_build_lock_name(type, blkno, generation, res->l_name);
+
+ res->l_type = type;
+ res->l_ops = ops;
+ res->l_priv = priv;
+
+ res->l_level = LKM_IVMODE;
+ res->l_requested = LKM_IVMODE;
+ res->l_blocking = LKM_IVMODE;
+ res->l_action = OCFS2_AST_INVALID;
+ res->l_unlock_action = OCFS2_UNLOCK_INVALID;
+
+ res->l_flags = OCFS2_LOCK_INITIALIZED;
+
+ ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
+}
+
+void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
+{
+ /* This also clears out the lock status block */
+ memset(res, 0, sizeof(struct ocfs2_lock_res));
+ spin_lock_init(&res->l_lock);
+ init_waitqueue_head(&res->l_event);
+ INIT_LIST_HEAD(&res->l_blocked_list);
+ INIT_LIST_HEAD(&res->l_mask_waiters);
+}
+
+void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
+ enum ocfs2_lock_type type,
+ struct inode *inode)
+{
+ struct ocfs2_lock_res_ops *ops;
+
+ switch(type) {
+ case OCFS2_LOCK_TYPE_RW:
+ ops = &ocfs2_inode_rw_lops;
+ break;
+ case OCFS2_LOCK_TYPE_META:
+ ops = &ocfs2_inode_meta_lops;
+ break;
+ case OCFS2_LOCK_TYPE_DATA:
+ ops = &ocfs2_inode_data_lops;
+ break;
+ default:
+ mlog_bug_on_msg(1, "type: %d\n", type);
+ ops = NULL; /* thanks, gcc */
+ break;
+ };
+
+ ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type,
+ OCFS2_I(inode)->ip_blkno,
+ inode->i_generation, ops, inode);
+}
+
+static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
+ struct ocfs2_super *osb)
+{
+ /* Superblock lockres doesn't come from a slab so we call init
+ * once on it manually. */
+ ocfs2_lock_res_init_once(res);
+ ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
+ OCFS2_SUPER_BLOCK_BLKNO, 0,
+ &ocfs2_super_lops, osb);
+}
+
+static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
+ struct ocfs2_super *osb)
+{
+ /* Rename lockres doesn't come from a slab so we call init
+ * once on it manually. */
+ ocfs2_lock_res_init_once(res);
+ ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME, 0, 0,
+ &ocfs2_rename_lops, osb);
+}
+
+void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
+{
+ mlog_entry_void();
+
+ if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
+ return;
+
+ ocfs2_remove_lockres_tracking(res);
+
+ mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
+ "Lockres %s is on the blocked list\n",
+ res->l_name);
+ mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
+ "Lockres %s has mask waiters pending\n",
+ res->l_name);
+ mlog_bug_on_msg(spin_is_locked(&res->l_lock),
+ "Lockres %s is locked\n",
+ res->l_name);
+ mlog_bug_on_msg(res->l_ro_holders,
+ "Lockres %s has %u ro holders\n",
+ res->l_name, res->l_ro_holders);
+ mlog_bug_on_msg(res->l_ex_holders,
+ "Lockres %s has %u ex holders\n",
+ res->l_name, res->l_ex_holders);
+
+ /* Need to clear out the lock status block for the dlm */
+ memset(&res->l_lksb, 0, sizeof(res->l_lksb));
+
+ res->l_flags = 0UL;
+ mlog_exit_void();
+}
+
+static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
+ int level)
+{
+ mlog_entry_void();
+
+ BUG_ON(!lockres);
+
+ switch(level) {
+ case LKM_EXMODE:
+ lockres->l_ex_holders++;
+ break;
+ case LKM_PRMODE:
+ lockres->l_ro_holders++;
+ break;
+ default:
+ BUG();
+ }
+
+ mlog_exit_void();
+}
+
+static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
+ int level)
+{
+ mlog_entry_void();
+
+ BUG_ON(!lockres);
+
+ switch(level) {
+ case LKM_EXMODE:
+ BUG_ON(!lockres->l_ex_holders);
+ lockres->l_ex_holders--;
+ break;
+ case LKM_PRMODE:
+ BUG_ON(!lockres->l_ro_holders);
+ lockres->l_ro_holders--;
+ break;
+ default:
+ BUG();
+ }
+ mlog_exit_void();
+}
+
+/* WARNING: This function lives in a world where the only three lock
+ * levels are EX, PR, and NL. It *will* have to be adjusted when more
+ * lock types are added. */
+static inline int ocfs2_highest_compat_lock_level(int level)
+{
+ int new_level = LKM_EXMODE;
+
+ if (level == LKM_EXMODE)
+ new_level = LKM_NLMODE;
+ else if (level == LKM_PRMODE)
+ new_level = LKM_PRMODE;
+ return new_level;
+}
+
+static void lockres_set_flags(struct ocfs2_lock_res *lockres,
+ unsigned long newflags)
+{
+ struct list_head *pos, *tmp;
+ struct ocfs2_mask_waiter *mw;
+
+ assert_spin_locked(&lockres->l_lock);
+
+ lockres->l_flags = newflags;
+
+ list_for_each_safe(pos, tmp, &lockres->l_mask_waiters) {
+ mw = list_entry(pos, struct ocfs2_mask_waiter, mw_item);
+ if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
+ continue;
+
+ list_del_init(&mw->mw_item);
+ mw->mw_status = 0;
+ complete(&mw->mw_complete);
+ }
+}
+static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
+{
+ lockres_set_flags(lockres, lockres->l_flags | or);
+}
+static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
+ unsigned long clear)
+{
+ lockres_set_flags(lockres, lockres->l_flags & ~clear);
+}
+
+static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
+{
+ mlog_entry_void();
+
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
+ BUG_ON(lockres->l_blocking <= LKM_NLMODE);
+
+ lockres->l_level = lockres->l_requested;
+ if (lockres->l_level <=
+ ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
+ lockres->l_blocking = LKM_NLMODE;
+ lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
+ }
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+
+ mlog_exit_void();
+}
+
+static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
+{
+ mlog_entry_void();
+
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
+
+ /* Convert from RO to EX doesn't really need anything as our
+ * information is already up to data. Convert from NL to
+ * *anything* however should mark ourselves as needing an
+ * update */
+ if (lockres->l_level == LKM_NLMODE)
+ lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
+
+ lockres->l_level = lockres->l_requested;
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+
+ mlog_exit_void();
+}
+
+static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
+{
+ mlog_entry_void();
+
+ BUG_ON((!lockres->l_flags & OCFS2_LOCK_BUSY));
+ BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
+
+ if (lockres->l_requested > LKM_NLMODE &&
+ !(lockres->l_flags & OCFS2_LOCK_LOCAL))
+ lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
+
+ lockres->l_level = lockres->l_requested;
+ lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+
+ mlog_exit_void();
+}
+
+static void ocfs2_inode_ast_func(void *opaque)
+{
+ struct ocfs2_lock_res *lockres = opaque;
+ struct inode *inode;
+ struct dlm_lockstatus *lksb;
+ unsigned long flags;
+
+ mlog_entry_void();
+
+ inode = ocfs2_lock_res_inode(lockres);
+
+ mlog(0, "AST fired for inode %"MLFu64", l_action = %u, type = %s\n",
+ OCFS2_I(inode)->ip_blkno, lockres->l_action,
+ ocfs2_lock_type_string(lockres->l_type));
+
+ BUG_ON(!ocfs2_is_inode_lock(lockres));
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
+ lksb = &(lockres->l_lksb);
+ if (lksb->status != DLM_NORMAL) {
+ mlog(ML_ERROR, "ocfs2_inode_ast_func: lksb status value of %u "
+ "on inode %"MLFu64"\n", lksb->status,
+ OCFS2_I(inode)->ip_blkno);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ mlog_exit_void();
+ return;
+ }
+
+ switch(lockres->l_action) {
+ case OCFS2_AST_ATTACH:
+ ocfs2_generic_handle_attach_action(lockres);
+ lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
+ break;
+ case OCFS2_AST_CONVERT:
+ ocfs2_generic_handle_convert_action(lockres);
+ break;
+ case OCFS2_AST_DOWNCONVERT:
+ ocfs2_generic_handle_downconvert_action(lockres);
+ break;
+ default:
+ mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
+ "lockres flags = 0x%lx, unlock action: %u\n",
+ lockres->l_name, lockres->l_action, lockres->l_flags,
+ lockres->l_unlock_action);
+
+ BUG();
+ }
+
+ /* data and rw locking ignores refresh flag for now. */
+ if (lockres->l_type != OCFS2_LOCK_TYPE_META)
+ lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
+
+ /* set it to something invalid so if we get called again we
+ * can catch it. */
+ lockres->l_action = OCFS2_AST_INVALID;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ wake_up(&lockres->l_event);
+
+ mlog_exit_void();
+}
+
+static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
+ int level)
+{
+ int needs_downconvert = 0;
+ mlog_entry_void();
+
+ assert_spin_locked(&lockres->l_lock);
+
+ lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
+
+ if (level > lockres->l_blocking) {
+ /* only schedule a downconvert if we haven't already scheduled
+ * one that goes low enough to satisfy the level we're
+ * blocking. this also catches the case where we get
+ * duplicate BASTs */
+ if (ocfs2_highest_compat_lock_level(level) <
+ ocfs2_highest_compat_lock_level(lockres->l_blocking))
+ needs_downconvert = 1;
+
+ lockres->l_blocking = level;
+ }
+
+ mlog_exit(needs_downconvert);
+ return needs_downconvert;
+}
+
+static void ocfs2_generic_bast_func(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level)
+{
+ int needs_downconvert;
+ unsigned long flags;
+
+ mlog_entry_void();
+
+ BUG_ON(level <= LKM_NLMODE);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
+ if (needs_downconvert)
+ ocfs2_schedule_blocked_lock(osb, lockres);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ocfs2_kick_vote_thread(osb);
+
+ wake_up(&lockres->l_event);
+ mlog_exit_void();
+}
+
+static void ocfs2_inode_bast_func(void *opaque, int level)
+{
+ struct ocfs2_lock_res *lockres = opaque;
+ struct inode *inode;
+ struct ocfs2_super *osb;
+
+ mlog_entry_void();
+
+ BUG_ON(!ocfs2_is_inode_lock(lockres));
+
+ inode = ocfs2_lock_res_inode(lockres);
+ osb = OCFS2_SB(inode->i_sb);
+
+ mlog(0, "BAST fired for inode %"MLFu64", blocking = %d, level = %d "
+ "type = %s\n", OCFS2_I(inode)->ip_blkno, level,
+ lockres->l_level,
+ ocfs2_lock_type_string(lockres->l_type));
+
+ ocfs2_generic_bast_func(osb, lockres, level);
+
+ mlog_exit_void();
+}
+
+static void ocfs2_generic_ast_func(struct ocfs2_lock_res *lockres,
+ int ignore_refresh)
+{
+ struct dlm_lockstatus *lksb = &lockres->l_lksb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
+ if (lksb->status != DLM_NORMAL) {
+ mlog(ML_ERROR, "lockres %s: lksb status value of %u!\n",
+ lockres->l_name, lksb->status);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ return;
+ }
+
+ switch(lockres->l_action) {
+ case OCFS2_AST_ATTACH:
+ ocfs2_generic_handle_attach_action(lockres);
+ break;
+ case OCFS2_AST_CONVERT:
+ ocfs2_generic_handle_convert_action(lockres);
+ break;
+ case OCFS2_AST_DOWNCONVERT:
+ ocfs2_generic_handle_downconvert_action(lockres);
+ break;
+ default:
+ BUG();
+ }
+
+ if (ignore_refresh)
+ lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
+
+ /* set it to something invalid so if we get called again we
+ * can catch it. */
+ lockres->l_action = OCFS2_AST_INVALID;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ wake_up(&lockres->l_event);
+}
+
+static void ocfs2_super_ast_func(void *opaque)
+{
+ struct ocfs2_lock_res *lockres = opaque;
+
+ mlog_entry_void();
+ mlog(0, "Superblock AST fired\n");
+
+ BUG_ON(!ocfs2_is_super_lock(lockres));
+ ocfs2_generic_ast_func(lockres, 0);
+
+ mlog_exit_void();
+}
+
+static void ocfs2_super_bast_func(void *opaque,
+ int level)
+{
+ struct ocfs2_lock_res *lockres = opaque;
+ struct ocfs2_super *osb;
+
+ mlog_entry_void();
+ mlog(0, "Superblock BAST fired\n");
+
+ BUG_ON(!ocfs2_is_super_lock(lockres));
+ osb = ocfs2_lock_res_super(lockres);
+ ocfs2_generic_bast_func(osb, lockres, level);
+
+ mlog_exit_void();
+}
+
+static void ocfs2_rename_ast_func(void *opaque)
+{
+ struct ocfs2_lock_res *lockres = opaque;
+
+ mlog_entry_void();
+
+ mlog(0, "Rename AST fired\n");
+
+ BUG_ON(!ocfs2_is_rename_lock(lockres));
+
+ ocfs2_generic_ast_func(lockres, 1);
+
+ mlog_exit_void();
+}
+
+static void ocfs2_rename_bast_func(void *opaque,
+ int level)
+{
+ struct ocfs2_lock_res *lockres = opaque;
+ struct ocfs2_super *osb;
+
+ mlog_entry_void();
+
+ mlog(0, "Rename BAST fired\n");
+
+ BUG_ON(!ocfs2_is_rename_lock(lockres));
+
+ osb = ocfs2_lock_res_super(lockres);
+ ocfs2_generic_bast_func(osb, lockres, level);
+
+ mlog_exit_void();
+}
+
+static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
+ int convert)
+{
+ unsigned long flags;
+
+ mlog_entry_void();
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+ if (convert)
+ lockres->l_action = OCFS2_AST_INVALID;
+ else
+ lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ wake_up(&lockres->l_event);
+ mlog_exit_void();
+}
+
+/* Note: If we detect another process working on the lock (i.e.,
+ * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
+ * to do the right thing in that case.
+ */
+static int ocfs2_lock_create(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level,
+ int dlm_flags)
+{
+ int ret = 0;
+ enum dlm_status status;
+ unsigned long flags;
+
+ mlog_entry_void();
+
+ mlog(0, "lock %s, level = %d, flags = %d\n", lockres->l_name, level,
+ dlm_flags);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
+ (lockres->l_flags & OCFS2_LOCK_BUSY)) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ goto bail;
+ }
+
+ lockres->l_action = OCFS2_AST_ATTACH;
+ lockres->l_requested = level;
+ lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ status = dlmlock(osb->dlm,
+ level,
+ &lockres->l_lksb,
+ dlm_flags,
+ lockres->l_name,
+ lockres->l_ops->ast,
+ lockres,
+ lockres->l_ops->bast);
+ if (status != DLM_NORMAL) {
+ ocfs2_log_dlm_error("dlmlock", status, lockres);
+ ret = -EINVAL;
+ ocfs2_recover_from_dlm_error(lockres, 1);
+ }
+
+ mlog(0, "lock %s, successfull return from dlmlock\n", lockres->l_name);
+
+bail:
+ mlog_exit(ret);
+ return ret;
+}
+
+static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
+ int flag)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ ret = lockres->l_flags & flag;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ return ret;
+}
+
+static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
+
+{
+ wait_event(lockres->l_event,
+ !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
+}
+
+static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
+
+{
+ wait_event(lockres->l_event,
+ !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
+}
+
+/* predict what lock level we'll be dropping down to on behalf
+ * of another node, and return true if the currently wanted
+ * level will be compatible with it. */
+static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
+ int wanted)
+{
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
+
+ return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
+}
+
+static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
+{
+ INIT_LIST_HEAD(&mw->mw_item);
+ init_completion(&mw->mw_complete);
+}
+
+static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
+{
+ wait_for_completion(&mw->mw_complete);
+ /* Re-arm the completion in case we want to wait on it again */
+ INIT_COMPLETION(mw->mw_complete);
+ return mw->mw_status;
+}
+
+static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
+ struct ocfs2_mask_waiter *mw,
+ unsigned long mask,
+ unsigned long goal)
+{
+ BUG_ON(!list_empty(&mw->mw_item));
+
+ assert_spin_locked(&lockres->l_lock);
+
+ list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
+ mw->mw_mask = mask;
+ mw->mw_goal = goal;
+}
+
+/* returns 0 if the mw that was removed was already satisfied, -EBUSY
+ * if the mask still hadn't reached its goal */
+static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
+ struct ocfs2_mask_waiter *mw)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (!list_empty(&mw->mw_item)) {
+ if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
+ ret = -EBUSY;
+
+ list_del_init(&mw->mw_item);
+ init_completion(&mw->mw_complete);
+ }
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ return ret;
+
+}
+
+static int ocfs2_cluster_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level,
+ int lkm_flags,
+ int arg_flags)
+{
+ struct ocfs2_mask_waiter mw;
+ enum dlm_status status;
+ int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
+ int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
+ unsigned long flags;
+
+ mlog_entry_void();
+
+ ocfs2_init_mask_waiter(&mw);
+
+again:
+ wait = 0;
+
+ if (catch_signals && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
+ mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
+ "Cluster lock called on freeing lockres %s! flags "
+ "0x%lx\n", lockres->l_name, lockres->l_flags);
+
+ /* We only compare against the currently granted level
+ * here. If the lock is blocked waiting on a downconvert,
+ * we'll get caught below. */
+ if (lockres->l_flags & OCFS2_LOCK_BUSY &&
+ level > lockres->l_level) {
+ /* is someone sitting in dlm_lock? If so, wait on
+ * them. */
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
+ wait = 1;
+ goto unlock;
+ }
+
+ if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
+ /* lock has not been created yet. */
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ goto again;
+ }
+
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
+ !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
+ /* is the lock is currently blocked on behalf of
+ * another node */
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
+ wait = 1;
+ goto unlock;
+ }
+
+ if (level > lockres->l_level) {
+ if (lockres->l_action != OCFS2_AST_INVALID)
+ mlog(ML_ERROR, "lockres %s has action %u pending\n",
+ lockres->l_name, lockres->l_action);
+
+ lockres->l_action = OCFS2_AST_CONVERT;
+ lockres->l_requested = level;
+ lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ BUG_ON(level == LKM_IVMODE);
+ BUG_ON(level == LKM_NLMODE);
+
+ mlog(0, "lock %s, convert from %d to level = %d\n",
+ lockres->l_name, lockres->l_level, level);
+
+ /* call dlm_lock to upgrade lock now */
+ status = dlmlock(osb->dlm,
+ level,
+ &lockres->l_lksb,
+ lkm_flags|LKM_CONVERT|LKM_VALBLK,
+ lockres->l_name,
+ lockres->l_ops->ast,
+ lockres,
+ lockres->l_ops->bast);
+ if (status != DLM_NORMAL) {
+ if ((lkm_flags & LKM_NOQUEUE) &&
+ (status == DLM_NOTQUEUED))
+ ret = -EAGAIN;
+ else {
+ ocfs2_log_dlm_error("dlmlock", status,
+ lockres);
+ ret = -EINVAL;
+ }
+ ocfs2_recover_from_dlm_error(lockres, 1);
+ goto out;
+ }
+
+ mlog(0, "lock %s, successfull return from dlmlock\n",
+ lockres->l_name);
+
+ /* At this point we've gone inside the dlm and need to
+ * complete our work regardless. */
+ catch_signals = 0;
+
+ /* wait for busy to clear and carry on */
+ goto again;
+ }
+
+ /* Ok, if we get here then we're good to go. */
+ ocfs2_inc_holders(lockres, level);
+
+ ret = 0;
+unlock:
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+out:
+ /*
+ * This is helping work around a lock inversion between the page lock
+ * and dlm locks. One path holds the page lock while calling aops
+ * which block acquiring dlm locks. The voting thread holds dlm
+ * locks while acquiring page locks while down converting data locks.
+ * This block is helping an aop path notice the inversion and back
+ * off to unlock its page lock before trying the dlm lock again.
+ */
+ if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
+ mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
+ wait = 0;
+ if (lockres_remove_mask_waiter(lockres, &mw))
+ ret = -EAGAIN;
+ else
+ goto again;
+ }
+ if (wait) {
+ ret = ocfs2_wait_for_mask(&mw);
+ if (ret == 0)
+ goto again;
+ mlog_errno(ret);
+ }
+
+ mlog_exit(ret);
+ return ret;
+}
+
+static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int level)
+{
+ unsigned long flags;
+
+ mlog_entry_void();
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ ocfs2_dec_holders(lockres, level);
+ ocfs2_vote_on_unlock(osb, lockres);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ mlog_exit_void();
+}
+
+static int ocfs2_create_new_inode_lock(struct inode *inode,
+ struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
+ lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ return ocfs2_lock_create(osb, lockres, LKM_EXMODE, LKM_LOCAL);
+}
+
+/* Grants us an EX lock on the data and metadata resources, skipping
+ * the normal cluster directory lookup. Use this ONLY on newly created
+ * inodes which other nodes can't possibly see, and which haven't been
+ * hashed in the inode hash yet. This can give us a good performance
+ * increase as it'll skip the network broadcast normally associated
+ * with creating a new lock resource. */
+int ocfs2_create_new_inode_locks(struct inode *inode)
+{
+ int ret;
+
+ BUG_ON(!inode);
+ BUG_ON(!ocfs2_inode_is_new(inode));
+
+ mlog_entry_void();
+
+ mlog(0, "Inode %"MLFu64"\n", OCFS2_I(inode)->ip_blkno);
+
+ /* NOTE: That we don't increment any of the holder counts, nor
+ * do we add anything to a journal handle. Since this is
+ * supposed to be a new inode which the cluster doesn't know
+ * about yet, there is no need to. As far as the LVB handling
+ * is concerned, this is basically like acquiring an EX lock
+ * on a resource which has an invalid one -- we'll set it
+ * valid when we release the EX. */
+
+ ret = ocfs2_create_new_inode_lock(inode,
+ &OCFS2_I(inode)->ip_rw_lockres);
+ if (ret) {
+ mlog_errno(ret);
+ goto bail;
+ }
+
+ ret = ocfs2_create_new_inode_lock(inode,
+ &OCFS2_I(inode)->ip_meta_lockres);
+ if (ret) {
+ mlog_errno(ret);
+ goto bail;
+ }
+
+ ret = ocfs2_create_new_inode_lock(inode,
+ &OCFS2_I(inode)->ip_data_lockres);
+ if (ret) {
+ mlog_errno(ret);
+ goto bail;
+ }
+
+bail:
+ mlog_exit(ret);
+ return ret;
+}
+
+int ocfs2_rw_lock(struct inode *inode, int write)
+{
+ int status, level;
+ struct ocfs2_lock_res *lockres;
+
+ BUG_ON(!inode);
+
+ mlog_entry_void();
+
+ mlog(0, "inode %"MLFu64" take %s RW lock\n",
+ OCFS2_I(inode)->ip_blkno,
+ write ? "EXMODE" : "PRMODE");
+
+ lockres = &OCFS2_I(inode)->ip_rw_lockres;
+
+ level = write ? LKM_EXMODE : LKM_PRMODE;
+
+ status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
+ 0);
+ if (status < 0)
+ mlog_errno(status);
+
+ mlog_exit(status);
+ return status;
+}
+
+void ocfs2_rw_unlock(struct inode *inode, int write)
+{
+ int level = write ? LKM_EXMODE : LKM_PRMODE;
+ struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
+
+ mlog_entry_void();
+
+ mlog(0, "inode %"MLFu64" drop %s RW lock\n",
+ OCFS2_I(inode)->ip_blkno,
+ write ? "EXMODE" : "PRMODE");
+
+ ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
+
+ mlog_exit_void();
+}
+
+int ocfs2_data_lock_full(struct inode *inode,
+ int write,
+ int arg_flags)
+{
+ int status = 0, level;
+ struct ocfs2_lock_res *lockres;
+
+ BUG_ON(!inode);
+
+ mlog_entry_void();
+
+ mlog(0, "inode %"MLFu64" take %s DATA lock\n",
+ OCFS2_I(inode)->ip_blkno,
+ write ? "EXMODE" : "PRMODE");
+
+ /* We'll allow faking a readonly data lock for
+ * rodevices. */
+ if (ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb))) {
+ if (write) {
+ status = -EROFS;
+ mlog_errno(status);
+ }
+ goto out;
+ }
+
+ lockres = &OCFS2_I(inode)->ip_data_lockres;
+
+ level = write ? LKM_EXMODE : LKM_PRMODE;
+
+ status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level,
+ 0, arg_flags);
+ if (status < 0 && status != -EAGAIN)
+ mlog_errno(status);
+
+out:
+ mlog_exit(status);
+ return status;
+}
+
+/* see ocfs2_meta_lock_with_page() */
+int ocfs2_data_lock_with_page(struct inode *inode,
+ int write,
+ struct page *page)
+{
+ int ret;
+
+ ret = ocfs2_data_lock_full(inode, write, OCFS2_LOCK_NONBLOCK);
+ if (ret == -EAGAIN) {
+ unlock_page(page);
+ if (ocfs2_data_lock(inode, write) == 0)
+ ocfs2_data_unlock(inode, write);
+ ret = AOP_TRUNCATED_PAGE;
+ }
+
+ return ret;
+}
+
+static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ int kick = 0;
+
+ mlog_entry_void();
+
+ /* If we know that another node is waiting on our lock, kick
+ * the vote thread * pre-emptively when we reach a release
+ * condition. */
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
+ switch(lockres->l_blocking) {
+ case LKM_EXMODE:
+ if (!lockres->l_ex_holders && !lockres->l_ro_holders)
+ kick = 1;
+ break;
+ case LKM_PRMODE:
+ if (!lockres->l_ex_holders)
+ kick = 1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ if (kick)
+ ocfs2_kick_vote_thread(osb);
+
+ mlog_exit_void();
+}
+
+void ocfs2_data_unlock(struct inode *inode,
+ int write)
+{
+ int level = write ? LKM_EXMODE : LKM_PRMODE;
+ struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_data_lockres;
+
+ mlog_entry_void();
+
+ mlog(0, "inode %"MLFu64" drop %s DATA lock\n",
+ OCFS2_I(inode)->ip_blkno,
+ write ? "EXMODE" : "PRMODE");
+
+ if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)))
+ ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
+
+ mlog_exit_void();
+}
+
+#define OCFS2_SEC_BITS 34
+#define OCFS2_SEC_SHIFT (64 - 34)
+#define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
+
+/* LVB only has room for 64 bits of time here so we pack it for
+ * now. */
+static u64 ocfs2_pack_timespec(struct timespec *spec)
+{
+ u64 res;
+ u64 sec = spec->tv_sec;
+ u32 nsec = spec->tv_nsec;
+
+ res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
+
+ return res;
+}
+
+/* Call this with the lockres locked. I am reasonably sure we don't
+ * need ip_lock in this function as anyone who would be changing those
+ * values is supposed to be blocked in ocfs2_meta_lock right now. */
+static void __ocfs2_stuff_meta_lvb(struct inode *inode)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
+ struct ocfs2_meta_lvb *lvb;
+
+ mlog_entry_void();
+
+ lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
+
+ lvb->lvb_version = cpu_to_be32(OCFS2_LVB_VERSION);
+ lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
+ lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
+ lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
+ lvb->lvb_igid = cpu_to_be32(inode->i_gid);
+ lvb->lvb_imode = cpu_to_be16(inode->i_mode);
+ lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
+ lvb->lvb_iatime_packed =
+ cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
+ lvb->lvb_ictime_packed =
+ cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
+ lvb->lvb_imtime_packed =
+ cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
+
+ mlog_meta_lvb(0, lockres);
+
+ mlog_exit_void();
+}
+
+static void ocfs2_unpack_timespec(struct timespec *spec,
+ u64 packed_time)
+{
+ spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
+ spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
+}
+
+static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
+ struct ocfs2_meta_lvb *lvb;
+
+ mlog_entry_void();
+
+ mlog_meta_lvb(0, lockres);
+
+ lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
+
+ /* We're safe here without the lockres lock... */
+ spin_lock(&oi->ip_lock);
+ oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
+ i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
+
+ /* fast-symlinks are a special case */
+ if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
+ inode->i_blocks = 0;
+ else
+ inode->i_blocks =
+ ocfs2_align_bytes_to_sectors(i_size_read(inode));
+
+ inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
+ inode->i_gid = be32_to_cpu(lvb->lvb_igid);
+ inode->i_mode = be16_to_cpu(lvb->lvb_imode);
+ inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
+ ocfs2_unpack_timespec(&inode->i_atime,
+ be64_to_cpu(lvb->lvb_iatime_packed));
+ ocfs2_unpack_timespec(&inode->i_mtime,
+ be64_to_cpu(lvb->lvb_imtime_packed));
+ ocfs2_unpack_timespec(&inode->i_ctime,
+ be64_to_cpu(lvb->lvb_ictime_packed));
+ spin_unlock(&oi->ip_lock);
+
+ mlog_exit_void();
+}
+
+static inline int ocfs2_meta_lvb_is_trustable(struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
+
+ if (be32_to_cpu(lvb->lvb_version) == OCFS2_LVB_VERSION)
+ return 1;
+ return 0;
+}
+
+/* Determine whether a lock resource needs to be refreshed, and
+ * arbitrate who gets to refresh it.
+ *
+ * 0 means no refresh needed.
+ *
+ * > 0 means you need to refresh this and you MUST call
+ * ocfs2_complete_lock_res_refresh afterwards. */
+static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
+{
+ unsigned long flags;
+ int status = 0;
+
+ mlog_entry_void();
+
+refresh_check:
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ goto bail;
+ }
+
+ if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ ocfs2_wait_on_refreshing_lock(lockres);
+ goto refresh_check;
+ }
+
+ /* Ok, I'll be the one to refresh this lock. */
+ lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ status = 1;
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+/* If status is non zero, I'll mark it as not being in refresh
+ * anymroe, but i won't clear the needs refresh flag. */
+static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
+ int status)
+{
+ unsigned long flags;
+ mlog_entry_void();
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
+ if (!status)
+ lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ wake_up(&lockres->l_event);
+
+ mlog_exit_void();
+}
+
+/* may or may not return a bh if it went to disk. */
+static int ocfs2_meta_lock_update(struct inode *inode,
+ struct buffer_head **bh)
+{
+ int status = 0;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_dinode *fe;
+
+ mlog_entry_void();
+
+ spin_lock(&oi->ip_lock);
+ if (oi->ip_flags & OCFS2_INODE_DELETED) {
+ mlog(0, "Orphaned inode %"MLFu64" was deleted while we "
+ "were waiting on a lock. ip_flags = 0x%x\n",
+ oi->ip_blkno, oi->ip_flags);
+ spin_unlock(&oi->ip_lock);
+ status = -ENOENT;
+ goto bail;
+ }
+ spin_unlock(&oi->ip_lock);
+
+ lockres = &oi->ip_meta_lockres;
+
+ if (!ocfs2_should_refresh_lock_res(lockres))
+ goto bail;
+
+ /* This will discard any caching information we might have had
+ * for the inode metadata. */
+ ocfs2_metadata_cache_purge(inode);
+
+ /* will do nothing for inode types that don't use the extent
+ * map (directories, bitmap files, etc) */
+ ocfs2_extent_map_trunc(inode, 0);
+
+ if (ocfs2_meta_lvb_is_trustable(lockres)) {
+ mlog(0, "Trusting LVB on inode %"MLFu64"\n",
+ oi->ip_blkno);
+ ocfs2_refresh_inode_from_lvb(inode);
+ } else {
+ /* Boo, we have to go to disk. */
+ /* read bh, cast, ocfs2_refresh_inode */
+ status = ocfs2_read_block(OCFS2_SB(inode->i_sb), oi->ip_blkno,
+ bh, OCFS2_BH_CACHED, inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_refresh;
+ }
+ fe = (struct ocfs2_dinode *) (*bh)->b_data;
+
+ /* This is a good chance to make sure we're not
+ * locking an invalid object.
+ *
+ * We bug on a stale inode here because we checked
+ * above whether it was wiped from disk. The wiping
+ * node provides a guarantee that we receive that
+ * message and can mark the inode before dropping any
+ * locks associated with it. */
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
+ status = -EIO;
+ goto bail_refresh;
+ }
+ mlog_bug_on_msg(inode->i_generation !=
+ le32_to_cpu(fe->i_generation),
+ "Invalid dinode %"MLFu64" disk generation: %u "
+ "inode->i_generation: %u\n",
+ oi->ip_blkno, le32_to_cpu(fe->i_generation),
+ inode->i_generation);
+ mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
+ !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
+ "Stale dinode %"MLFu64" dtime: %"MLFu64" "
+ "flags: 0x%x\n", oi->ip_blkno,
+ le64_to_cpu(fe->i_dtime),
+ le32_to_cpu(fe->i_flags));
+
+ ocfs2_refresh_inode(inode, fe);
+ }
+
+ status = 0;
+bail_refresh:
+ ocfs2_complete_lock_res_refresh(lockres, status);
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_assign_bh(struct inode *inode,
+ struct buffer_head **ret_bh,
+ struct buffer_head *passed_bh)
+{
+ int status;
+
+ if (passed_bh) {
+ /* Ok, the update went to disk for us, use the
+ * returned bh. */
+ *ret_bh = passed_bh;
+ get_bh(*ret_bh);
+
+ return 0;
+ }
+
+ status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
+ OCFS2_I(inode)->ip_blkno,
+ ret_bh,
+ OCFS2_BH_CACHED,
+ inode);
+ if (status < 0)
+ mlog_errno(status);
+
+ return status;
+}
+
+/*
+ * returns < 0 error if the callback will never be called, otherwise
+ * the result of the lock will be communicated via the callback.
+ */
+int ocfs2_meta_lock_full(struct inode *inode,
+ struct ocfs2_journal_handle *handle,
+ struct buffer_head **ret_bh,
+ int ex,
+ int arg_flags)
+{
+ int status, level, dlm_flags, acquired;
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct buffer_head *local_bh = NULL;
+
+ BUG_ON(!inode);
+
+ mlog_entry_void();
+
+ mlog(0, "inode %"MLFu64", take %s META lock\n",
+ OCFS2_I(inode)->ip_blkno,
+ ex ? "EXMODE" : "PRMODE");
+
+ status = 0;
+ acquired = 0;
+ /* We'll allow faking a readonly metadata lock for
+ * rodevices. */
+ if (ocfs2_is_hard_readonly(osb)) {
+ if (ex)
+ status = -EROFS;
+ goto bail;
+ }
+
+ if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
+ wait_event(osb->recovery_event,
+ ocfs2_node_map_is_empty(osb, &osb->recovery_map));
+
+ acquired = 0;
+ lockres = &OCFS2_I(inode)->ip_meta_lockres;
+ level = ex ? LKM_EXMODE : LKM_PRMODE;
+ dlm_flags = 0;
+ if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
+ dlm_flags |= LKM_NOQUEUE;
+
+ status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags);
+ if (status < 0) {
+ if (status != -EAGAIN && status != -EIOCBRETRY)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* Notify the error cleanup path to drop the cluster lock. */
+ acquired = 1;
+
+ /* We wait twice because a node may have died while we were in
+ * the lower dlm layers. The second time though, we've
+ * committed to owning this lock so we don't allow signals to
+ * abort the operation. */
+ if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
+ wait_event(osb->recovery_event,
+ ocfs2_node_map_is_empty(osb, &osb->recovery_map));
+
+ /* This is fun. The caller may want a bh back, or it may
+ * not. ocfs2_meta_lock_update definitely wants one in, but
+ * may or may not read one, depending on what's in the
+ * LVB. The result of all of this is that we've *only* gone to
+ * disk if we have to, so the complexity is worthwhile. */
+ status = ocfs2_meta_lock_update(inode, &local_bh);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (ret_bh) {
+ status = ocfs2_assign_bh(inode, ret_bh, local_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ if (handle) {
+ status = ocfs2_handle_add_lock(handle, inode);
+ if (status < 0)
+ mlog_errno(status);
+ }
+
+bail:
+ if (status < 0) {
+ if (ret_bh && (*ret_bh)) {
+ brelse(*ret_bh);
+ *ret_bh = NULL;
+ }
+ if (acquired)
+ ocfs2_meta_unlock(inode, ex);
+ }
+
+ if (local_bh)
+ brelse(local_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * This is working around a lock inversion between tasks acquiring DLM locks
+ * while holding a page lock and the vote thread which blocks dlm lock acquiry
+ * while acquiring page locks.
+ *
+ * ** These _with_page variantes are only intended to be called from aop
+ * methods that hold page locks and return a very specific *positive* error
+ * code that aop methods pass up to the VFS -- test for errors with != 0. **
+ *
+ * The DLM is called such that it returns -EAGAIN if it would have blocked
+ * waiting for the vote thread. In that case we unlock our page so the vote
+ * thread can make progress. Once we've done this we have to return
+ * AOP_TRUNCATED_PAGE so the aop method that called us can bubble that back up
+ * into the VFS who will then immediately retry the aop call.
+ *
+ * We do a blocking lock and immediate unlock before returning, though, so that
+ * the lock has a great chance of being cached on this node by the time the VFS
+ * calls back to retry the aop. This has a potential to livelock as nodes
+ * ping locks back and forth, but that's a risk we're willing to take to avoid
+ * the lock inversion simply.
+ */
+int ocfs2_meta_lock_with_page(struct inode *inode,
+ struct ocfs2_journal_handle *handle,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct page *page)
+{
+ int ret;
+
+ ret = ocfs2_meta_lock_full(inode, handle, ret_bh, ex,
+ OCFS2_LOCK_NONBLOCK);
+ if (ret == -EAGAIN) {
+ unlock_page(page);
+ if (ocfs2_meta_lock(inode, handle, ret_bh, ex) == 0)
+ ocfs2_meta_unlock(inode, ex);
+ ret = AOP_TRUNCATED_PAGE;
+ }
+
+ return ret;
+}
+
+void ocfs2_meta_unlock(struct inode *inode,
+ int ex)
+{
+ int level = ex ? LKM_EXMODE : LKM_PRMODE;
+ struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
+
+ mlog_entry_void();
+
+ mlog(0, "inode %"MLFu64" drop %s META lock\n",
+ OCFS2_I(inode)->ip_blkno,
+ ex ? "EXMODE" : "PRMODE");
+
+ if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)))
+ ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
+
+ mlog_exit_void();
+}
+
+int ocfs2_super_lock(struct ocfs2_super *osb,
+ int ex)
+{
+ int status;
+ int level = ex ? LKM_EXMODE : LKM_PRMODE;
+ struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
+ struct buffer_head *bh;
+ struct ocfs2_slot_info *si = osb->slot_info;
+
+ mlog_entry_void();
+
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* The super block lock path is really in the best position to
+ * know when resources covered by the lock need to be
+ * refreshed, so we do it here. Of course, making sense of
+ * everything is up to the caller :) */
+ status = ocfs2_should_refresh_lock_res(lockres);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ if (status) {
+ bh = si->si_bh;
+ status = ocfs2_read_block(osb, bh->b_blocknr, &bh, 0,
+ si->si_inode);
+ if (status == 0)
+ ocfs2_update_slot_info(si);
+
+ ocfs2_complete_lock_res_refresh(lockres, status);
+
+ if (status < 0)
+ mlog_errno(status);
+ }
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+void ocfs2_super_unlock(struct ocfs2_super *osb,
+ int ex)
+{
+ int level = ex ? LKM_EXMODE : LKM_PRMODE;
+ struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
+
+ ocfs2_cluster_unlock(osb, lockres, level);
+}
+
+int ocfs2_rename_lock(struct ocfs2_super *osb)
+{
+ int status;
+ struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
+
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0);
+ if (status < 0)
+ mlog_errno(status);
+
+ return status;
+}
+
+void ocfs2_rename_unlock(struct ocfs2_super *osb)
+{
+ struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
+
+ ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
+}
+
+/* Reference counting of the dlm debug structure. We want this because
+ * open references on the debug inodes can live on after a mount, so
+ * we can't rely on the ocfs2_super to always exist. */
+static void ocfs2_dlm_debug_free(struct kref *kref)
+{
+ struct ocfs2_dlm_debug *dlm_debug;
+
+ dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
+
+ kfree(dlm_debug);
+}
+
+void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
+{
+ if (dlm_debug)
+ kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
+}
+
+static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
+{
+ kref_get(&debug->d_refcnt);
+}
+
+struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
+{
+ struct ocfs2_dlm_debug *dlm_debug;
+
+ dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
+ if (!dlm_debug) {
+ mlog_errno(-ENOMEM);
+ goto out;
+ }
+
+ kref_init(&dlm_debug->d_refcnt);
+ INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
+ dlm_debug->d_locking_state = NULL;
+out:
+ return dlm_debug;
+}
+
+/* Access to this is arbitrated for us via seq_file->sem. */
+struct ocfs2_dlm_seq_priv {
+ struct ocfs2_dlm_debug *p_dlm_debug;
+ struct ocfs2_lock_res p_iter_res;
+ struct ocfs2_lock_res p_tmp_res;
+};
+
+static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
+ struct ocfs2_dlm_seq_priv *priv)
+{
+ struct ocfs2_lock_res *iter, *ret = NULL;
+ struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
+
+ assert_spin_locked(&ocfs2_dlm_tracking_lock);
+
+ list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
+ /* discover the head of the list */
+ if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
+ mlog(0, "End of list found, %p\n", ret);
+ break;
+ }
+
+ /* We track our "dummy" iteration lockres' by a NULL
+ * l_ops field. */
+ if (iter->l_ops != NULL) {
+ ret = iter;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
+{
+ struct ocfs2_dlm_seq_priv *priv = m->private;
+ struct ocfs2_lock_res *iter;
+
+ spin_lock(&ocfs2_dlm_tracking_lock);
+ iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
+ if (iter) {
+ /* Since lockres' have the lifetime of their container
+ * (which can be inodes, ocfs2_supers, etc) we want to
+ * copy this out to a temporary lockres while still
+ * under the spinlock. Obviously after this we can't
+ * trust any pointers on the copy returned, but that's
+ * ok as the information we want isn't typically held
+ * in them. */
+ priv->p_tmp_res = *iter;
+ iter = &priv->p_tmp_res;
+ }
+ spin_unlock(&ocfs2_dlm_tracking_lock);
+
+ return iter;
+}
+
+static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ocfs2_dlm_seq_priv *priv = m->private;
+ struct ocfs2_lock_res *iter = v;
+ struct ocfs2_lock_res *dummy = &priv->p_iter_res;
+
+ spin_lock(&ocfs2_dlm_tracking_lock);
+ iter = ocfs2_dlm_next_res(iter, priv);
+ list_del_init(&dummy->l_debug_list);
+ if (iter) {
+ list_add(&dummy->l_debug_list, &iter->l_debug_list);
+ priv->p_tmp_res = *iter;
+ iter = &priv->p_tmp_res;
+ }
+ spin_unlock(&ocfs2_dlm_tracking_lock);
+
+ return iter;
+}
+
+/* So that debugfs.ocfs2 can determine which format is being used */
+#define OCFS2_DLM_DEBUG_STR_VERSION 1
+static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
+{
+ int i;
+ char *lvb;
+ struct ocfs2_lock_res *lockres = v;
+
+ if (!lockres)
+ return -EINVAL;
+
+ seq_printf(m, "0x%x\t"
+ "%.*s\t"
+ "%d\t"
+ "0x%lx\t"
+ "0x%x\t"
+ "0x%x\t"
+ "%u\t"
+ "%u\t"
+ "%d\t"
+ "%d\t",
+ OCFS2_DLM_DEBUG_STR_VERSION,
+ OCFS2_LOCK_ID_MAX_LEN, lockres->l_name,
+ lockres->l_level,
+ lockres->l_flags,
+ lockres->l_action,
+ lockres->l_unlock_action,
+ lockres->l_ro_holders,
+ lockres->l_ex_holders,
+ lockres->l_requested,
+ lockres->l_blocking);
+
+ /* Dump the raw LVB */
+ lvb = lockres->l_lksb.lvb;
+ for(i = 0; i < DLM_LVB_LEN; i++)
+ seq_printf(m, "0x%x\t", lvb[i]);
+
+ /* End the line */
+ seq_printf(m, "\n");
+ return 0;
+}
+
+static struct seq_operations ocfs2_dlm_seq_ops = {
+ .start = ocfs2_dlm_seq_start,
+ .stop = ocfs2_dlm_seq_stop,
+ .next = ocfs2_dlm_seq_next,
+ .show = ocfs2_dlm_seq_show,
+};
+
+static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = (struct seq_file *) file->private_data;
+ struct ocfs2_dlm_seq_priv *priv = seq->private;
+ struct ocfs2_lock_res *res = &priv->p_iter_res;
+
+ ocfs2_remove_lockres_tracking(res);
+ ocfs2_put_dlm_debug(priv->p_dlm_debug);
+ return seq_release_private(inode, file);
+}
+
+static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct ocfs2_dlm_seq_priv *priv;
+ struct seq_file *seq;
+ struct ocfs2_super *osb;
+
+ priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+ osb = (struct ocfs2_super *) inode->u.generic_ip;
+ ocfs2_get_dlm_debug(osb->osb_dlm_debug);
+ priv->p_dlm_debug = osb->osb_dlm_debug;
+ INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
+
+ ret = seq_open(file, &ocfs2_dlm_seq_ops);
+ if (ret) {
+ kfree(priv);
+ mlog_errno(ret);
+ goto out;
+ }
+
+ seq = (struct seq_file *) file->private_data;
+ seq->private = priv;
+
+ ocfs2_add_lockres_tracking(&priv->p_iter_res,
+ priv->p_dlm_debug);
+
+out:
+ return ret;
+}
+
+static struct file_operations ocfs2_dlm_debug_fops = {
+ .open = ocfs2_dlm_debug_open,
+ .release = ocfs2_dlm_debug_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
+{
+ int ret = 0;
+ struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
+
+ dlm_debug->d_locking_state = debugfs_create_file("locking_state",
+ S_IFREG|S_IRUSR,
+ osb->osb_debug_root,
+ osb,
+ &ocfs2_dlm_debug_fops);
+ if (!dlm_debug->d_locking_state) {
+ ret = -EINVAL;
+ mlog(ML_ERROR,
+ "Unable to create locking state debugfs file.\n");
+ goto out;
+ }
+
+ ocfs2_get_dlm_debug(dlm_debug);
+out:
+ return ret;
+}
+
+static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
+{
+ struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
+
+ if (dlm_debug) {
+ debugfs_remove(dlm_debug->d_locking_state);
+ ocfs2_put_dlm_debug(dlm_debug);
+ }
+}
+
+int ocfs2_dlm_init(struct ocfs2_super *osb)
+{
+ int status;
+ u32 dlm_key;
+ struct dlm_ctxt *dlm;
+
+ mlog_entry_void();
+
+ status = ocfs2_dlm_init_debug(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* launch vote thread */
+ osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote-%d",
+ osb->osb_id);
+ if (IS_ERR(osb->vote_task)) {
+ status = PTR_ERR(osb->vote_task);
+ osb->vote_task = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* used by the dlm code to make message headers unique, each
+ * node in this domain must agree on this. */
+ dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
+
+ /* for now, uuid == domain */
+ dlm = dlm_register_domain(osb->uuid_str, dlm_key);
+ if (IS_ERR(dlm)) {
+ status = PTR_ERR(dlm);
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
+ ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
+
+ dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
+
+ osb->dlm = dlm;
+
+ status = 0;
+bail:
+ if (status < 0) {
+ ocfs2_dlm_shutdown_debug(osb);
+ if (osb->vote_task)
+ kthread_stop(osb->vote_task);
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+void ocfs2_dlm_shutdown(struct ocfs2_super *osb)
+{
+ mlog_entry_void();
+
+ dlm_unregister_eviction_cb(&osb->osb_eviction_cb);
+
+ ocfs2_drop_osb_locks(osb);
+
+ if (osb->vote_task) {
+ kthread_stop(osb->vote_task);
+ osb->vote_task = NULL;
+ }
+
+ ocfs2_lock_res_free(&osb->osb_super_lockres);
+ ocfs2_lock_res_free(&osb->osb_rename_lockres);
+
+ dlm_unregister_domain(osb->dlm);
+ osb->dlm = NULL;
+
+ ocfs2_dlm_shutdown_debug(osb);
+
+ mlog_exit_void();
+}
+
+static void ocfs2_unlock_ast_func(void *opaque, enum dlm_status status)
+{
+ struct ocfs2_lock_res *lockres = opaque;
+ unsigned long flags;
+
+ mlog_entry_void();
+
+ mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
+ lockres->l_unlock_action);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ /* We tried to cancel a convert request, but it was already
+ * granted. All we want to do here is clear our unlock
+ * state. The wake_up call done at the bottom is redundant
+ * (ocfs2_prepare_cancel_convert doesn't sleep on this) but doesn't
+ * hurt anything anyway */
+ if (status == DLM_CANCELGRANT &&
+ lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
+ mlog(0, "Got cancelgrant for %s\n", lockres->l_name);
+
+ /* We don't clear the busy flag in this case as it
+ * should have been cleared by the ast which the dlm
+ * has called. */
+ goto complete_unlock;
+ }
+
+ if (status != DLM_NORMAL) {
+ mlog(ML_ERROR, "Dlm passes status %d for lock %s, "
+ "unlock_action %d\n", status, lockres->l_name,
+ lockres->l_unlock_action);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ return;
+ }
+
+ switch(lockres->l_unlock_action) {
+ case OCFS2_UNLOCK_CANCEL_CONVERT:
+ mlog(0, "Cancel convert success for %s\n", lockres->l_name);
+ lockres->l_action = OCFS2_AST_INVALID;
+ break;
+ case OCFS2_UNLOCK_DROP_LOCK:
+ lockres->l_level = LKM_IVMODE;
+ break;
+ default:
+ BUG();
+ }
+
+ lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+complete_unlock:
+ lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ wake_up(&lockres->l_event);
+
+ mlog_exit_void();
+}
+
+typedef void (ocfs2_pre_drop_cb_t)(struct ocfs2_lock_res *, void *);
+
+struct drop_lock_cb {
+ ocfs2_pre_drop_cb_t *drop_func;
+ void *drop_data;
+};
+
+static int ocfs2_drop_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ struct drop_lock_cb *dcb)
+{
+ enum dlm_status status;
+ unsigned long flags;
+
+ /* We didn't get anywhere near actually using this lockres. */
+ if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
+ goto out;
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
+ mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
+ "lockres %s, flags 0x%lx\n",
+ lockres->l_name, lockres->l_flags);
+
+ while (lockres->l_flags & OCFS2_LOCK_BUSY) {
+ mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
+ "%u, unlock_action = %u\n",
+ lockres->l_name, lockres->l_flags, lockres->l_action,
+ lockres->l_unlock_action);
+
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ /* XXX: Today we just wait on any busy
+ * locks... Perhaps we need to cancel converts in the
+ * future? */
+ ocfs2_wait_on_busy_lock(lockres);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ }
+
+ if (dcb)
+ dcb->drop_func(lockres, dcb->drop_data);
+
+ if (lockres->l_flags & OCFS2_LOCK_BUSY)
+ mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
+ lockres->l_name);
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
+ mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
+
+ if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ goto out;
+ }
+
+ lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
+
+ /* make sure we never get here while waiting for an ast to
+ * fire. */
+ BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
+
+ /* is this necessary? */
+ lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+ lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ mlog(0, "lock %s\n", lockres->l_name);
+
+ status = dlmunlock(osb->dlm, &lockres->l_lksb, LKM_VALBLK,
+ lockres->l_ops->unlock_ast, lockres);
+ if (status != DLM_NORMAL) {
+ ocfs2_log_dlm_error("dlmunlock", status, lockres);
+ mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
+ dlm_print_one_lock(lockres->l_lksb.lockid);
+ BUG();
+ }
+ mlog(0, "lock %s, successfull return from dlmunlock\n",
+ lockres->l_name);
+
+ ocfs2_wait_on_busy_lock(lockres);
+out:
+ mlog_exit(0);
+ return 0;
+}
+
+/* Mark the lockres as being dropped. It will no longer be
+ * queued if blocking, but we still may have to wait on it
+ * being dequeued from the vote thread before we can consider
+ * it safe to drop.
+ *
+ * You can *not* attempt to call cluster_lock on this lockres anymore. */
+void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
+{
+ int status;
+ struct ocfs2_mask_waiter mw;
+ unsigned long flags;
+
+ ocfs2_init_mask_waiter(&mw);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ lockres->l_flags |= OCFS2_LOCK_FREEING;
+ while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
+ lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ mlog(0, "Waiting on lockres %s\n", lockres->l_name);
+
+ status = ocfs2_wait_for_mask(&mw);
+ if (status)
+ mlog_errno(status);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ }
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+}
+
+static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
+{
+ int status;
+
+ mlog_entry_void();
+
+ ocfs2_mark_lockres_freeing(&osb->osb_super_lockres);
+
+ status = ocfs2_drop_lock(osb, &osb->osb_super_lockres, NULL);
+ if (status < 0)
+ mlog_errno(status);
+
+ ocfs2_mark_lockres_freeing(&osb->osb_rename_lockres);
+
+ status = ocfs2_drop_lock(osb, &osb->osb_rename_lockres, NULL);
+ if (status < 0)
+ mlog_errno(status);
+
+ mlog_exit(status);
+}
+
+static void ocfs2_meta_pre_drop(struct ocfs2_lock_res *lockres, void *data)
+{
+ struct inode *inode = data;
+
+ /* the metadata lock requires a bit more work as we have an
+ * LVB to worry about. */
+ if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
+ lockres->l_level == LKM_EXMODE &&
+ !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
+ __ocfs2_stuff_meta_lvb(inode);
+}
+
+int ocfs2_drop_inode_locks(struct inode *inode)
+{
+ int status, err;
+ struct drop_lock_cb meta_dcb = { ocfs2_meta_pre_drop, inode, };
+
+ mlog_entry_void();
+
+ /* No need to call ocfs2_mark_lockres_freeing here -
+ * ocfs2_clear_inode has done it for us. */
+
+ err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
+ &OCFS2_I(inode)->ip_data_lockres,
+ NULL);
+ if (err < 0)
+ mlog_errno(err);
+
+ status = err;
+
+ err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
+ &OCFS2_I(inode)->ip_meta_lockres,
+ &meta_dcb);
+ if (err < 0)
+ mlog_errno(err);
+ if (err < 0 && !status)
+ status = err;
+
+ err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
+ &OCFS2_I(inode)->ip_rw_lockres,
+ NULL);
+ if (err < 0)
+ mlog_errno(err);
+ if (err < 0 && !status)
+ status = err;
+
+ mlog_exit(status);
+ return status;
+}
+
+static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
+ int new_level)
+{
+ assert_spin_locked(&lockres->l_lock);
+
+ BUG_ON(lockres->l_blocking <= LKM_NLMODE);
+
+ if (lockres->l_level <= new_level) {
+ mlog(ML_ERROR, "lockres->l_level (%u) <= new_level (%u)\n",
+ lockres->l_level, new_level);
+ BUG();
+ }
+
+ mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
+ lockres->l_name, new_level, lockres->l_blocking);
+
+ lockres->l_action = OCFS2_AST_DOWNCONVERT;
+ lockres->l_requested = new_level;
+ lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
+}
+
+static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int new_level,
+ int lvb)
+{
+ int ret, dlm_flags = LKM_CONVERT;
+ enum dlm_status status;
+
+ mlog_entry_void();
+
+ if (lvb)
+ dlm_flags |= LKM_VALBLK;
+
+ status = dlmlock(osb->dlm,
+ new_level,
+ &lockres->l_lksb,
+ dlm_flags,
+ lockres->l_name,
+ lockres->l_ops->ast,
+ lockres,
+ lockres->l_ops->bast);
+ if (status != DLM_NORMAL) {
+ ocfs2_log_dlm_error("dlmlock", status, lockres);
+ ret = -EINVAL;
+ ocfs2_recover_from_dlm_error(lockres, 1);
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mlog_exit(ret);
+ return ret;
+}
+
+/* returns 1 when the caller should unlock and call dlmunlock */
+static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ assert_spin_locked(&lockres->l_lock);
+
+ mlog_entry_void();
+ mlog(0, "lock %s\n", lockres->l_name);
+
+ if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
+ /* If we're already trying to cancel a lock conversion
+ * then just drop the spinlock and allow the caller to
+ * requeue this lock. */
+
+ mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
+ return 0;
+ }
+
+ /* were we in a convert when we got the bast fire? */
+ BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
+ lockres->l_action != OCFS2_AST_DOWNCONVERT);
+ /* set things up for the unlockast to know to just
+ * clear out the ast_action and unset busy, etc. */
+ lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
+
+ mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
+ "lock %s, invalid flags: 0x%lx\n",
+ lockres->l_name, lockres->l_flags);
+
+ return 1;
+}
+
+static int ocfs2_cancel_convert(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ int ret;
+ enum dlm_status status;
+
+ mlog_entry_void();
+ mlog(0, "lock %s\n", lockres->l_name);
+
+ ret = 0;
+ status = dlmunlock(osb->dlm,
+ &lockres->l_lksb,
+ LKM_CANCEL,
+ lockres->l_ops->unlock_ast,
+ lockres);
+ if (status != DLM_NORMAL) {
+ ocfs2_log_dlm_error("dlmunlock", status, lockres);
+ ret = -EINVAL;
+ ocfs2_recover_from_dlm_error(lockres, 0);
+ }
+
+ mlog(0, "lock %s return from dlmunlock\n", lockres->l_name);
+
+ mlog_exit(ret);
+ return ret;
+}
+
+static inline int ocfs2_can_downconvert_meta_lock(struct inode *inode,
+ struct ocfs2_lock_res *lockres,
+ int new_level)
+{
+ int ret;
+
+ mlog_entry_void();
+
+ BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE);
+
+ if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
+ ret = 0;
+ mlog(0, "lockres %s currently being refreshed -- backing "
+ "off!\n", lockres->l_name);
+ } else if (new_level == LKM_PRMODE)
+ ret = !lockres->l_ex_holders &&
+ ocfs2_inode_fully_checkpointed(inode);
+ else /* Must be NLMODE we're converting to. */
+ ret = !lockres->l_ro_holders && !lockres->l_ex_holders &&
+ ocfs2_inode_fully_checkpointed(inode);
+
+ mlog_exit(ret);
+ return ret;
+}
+
+static int ocfs2_do_unblock_meta(struct inode *inode,
+ int *requeue)
+{
+ int new_level;
+ int set_lvb = 0;
+ int ret = 0;
+ struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
+ unsigned long flags;
+
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog_entry_void();
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
+
+ mlog(0, "l_level=%d, l_blocking=%d\n", lockres->l_level,
+ lockres->l_blocking);
+
+ BUG_ON(lockres->l_level != LKM_EXMODE &&
+ lockres->l_level != LKM_PRMODE);
+
+ if (lockres->l_flags & OCFS2_LOCK_BUSY) {
+ *requeue = 1;
+ ret = ocfs2_prepare_cancel_convert(osb, lockres);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ if (ret) {
+ ret = ocfs2_cancel_convert(osb, lockres);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+ goto leave;
+ }
+
+ new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
+
+ mlog(0, "l_level=%d, l_blocking=%d, new_level=%d\n",
+ lockres->l_level, lockres->l_blocking, new_level);
+
+ if (ocfs2_can_downconvert_meta_lock(inode, lockres, new_level)) {
+ if (lockres->l_level == LKM_EXMODE)
+ set_lvb = 1;
+
+ /* If the lock hasn't been refreshed yet (rare), then
+ * our memory inode values are old and we skip
+ * stuffing the lvb. There's no need to actually clear
+ * out the lvb here as it's value is still valid. */
+ if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
+ if (set_lvb)
+ __ocfs2_stuff_meta_lvb(inode);
+ } else
+ mlog(0, "lockres %s: downconverting stale lock!\n",
+ lockres->l_name);
+
+ mlog(0, "calling ocfs2_downconvert_lock with l_level=%d, "
+ "l_blocking=%d, new_level=%d\n",
+ lockres->l_level, lockres->l_blocking, new_level);
+
+ ocfs2_prepare_downconvert(lockres, new_level);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
+ goto leave;
+ }
+ if (!ocfs2_inode_fully_checkpointed(inode))
+ ocfs2_start_checkpoint(osb);
+
+ *requeue = 1;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ret = 0;
+leave:
+ mlog_exit(ret);
+ return ret;
+}
+
+static int ocfs2_generic_unblock_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres,
+ int *requeue,
+ ocfs2_convert_worker_t *worker)
+{
+ unsigned long flags;
+ int blocking;
+ int new_level;
+ int ret = 0;
+
+ mlog_entry_void();
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
+ BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
+
+recheck:
+ if (lockres->l_flags & OCFS2_LOCK_BUSY) {
+ *requeue = 1;
+ ret = ocfs2_prepare_cancel_convert(osb, lockres);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ if (ret) {
+ ret = ocfs2_cancel_convert(osb, lockres);
+ if (ret < 0)
+ mlog_errno(ret);
+ }
+ goto leave;
+ }
+
+ /* if we're blocking an exclusive and we have *any* holders,
+ * then requeue. */
+ if ((lockres->l_blocking == LKM_EXMODE)
+ && (lockres->l_ex_holders || lockres->l_ro_holders)) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ *requeue = 1;
+ ret = 0;
+ goto leave;
+ }
+
+ /* If it's a PR we're blocking, then only
+ * requeue if we've got any EX holders */
+ if (lockres->l_blocking == LKM_PRMODE &&
+ lockres->l_ex_holders) {
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ *requeue = 1;
+ ret = 0;
+ goto leave;
+ }
+
+ /* If we get here, then we know that there are no more
+ * incompatible holders (and anyone asking for an incompatible
+ * lock is blocked). We can now downconvert the lock */
+ if (!worker)
+ goto downconvert;
+
+ /* Some lockres types want to do a bit of work before
+ * downconverting a lock. Allow that here. The worker function
+ * may sleep, so we save off a copy of what we're blocking as
+ * it may change while we're not holding the spin lock. */
+ blocking = lockres->l_blocking;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ worker(lockres, blocking);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (blocking != lockres->l_blocking) {
+ /* If this changed underneath us, then we can't drop
+ * it just yet. */
+ goto recheck;
+ }
+
+downconvert:
+ *requeue = 0;
+ new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
+
+ ocfs2_prepare_downconvert(lockres, new_level);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ret = ocfs2_downconvert_lock(osb, lockres, new_level, 0);
+leave:
+ mlog_exit(ret);
+ return ret;
+}
+
+static void ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
+ int blocking)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+
+ mlog_entry_void();
+
+ inode = ocfs2_lock_res_inode(lockres);
+ mapping = inode->i_mapping;
+
+ if (filemap_fdatawrite(mapping)) {
+ mlog(ML_ERROR, "Could not sync inode %"MLFu64" for downconvert!",
+ OCFS2_I(inode)->ip_blkno);
+ }
+ sync_mapping_buffers(mapping);
+ if (blocking == LKM_EXMODE) {
+ truncate_inode_pages(mapping, 0);
+ unmap_mapping_range(mapping, 0, 0, 0);
+ } else {
+ /* We only need to wait on the I/O if we're not also
+ * truncating pages because truncate_inode_pages waits
+ * for us above. We don't truncate pages if we're
+ * blocking anything < EXMODE because we want to keep
+ * them around in that case. */
+ filemap_fdatawait(mapping);
+ }
+
+ mlog_exit_void();
+}
+
+int ocfs2_unblock_data(struct ocfs2_lock_res *lockres,
+ int *requeue)
+{
+ int status;
+ struct inode *inode;
+ struct ocfs2_super *osb;
+
+ mlog_entry_void();
+
+ inode = ocfs2_lock_res_inode(lockres);
+ osb = OCFS2_SB(inode->i_sb);
+
+ mlog(0, "unblock inode %"MLFu64"\n", OCFS2_I(inode)->ip_blkno);
+
+ status = ocfs2_generic_unblock_lock(osb,
+ lockres,
+ requeue,
+ ocfs2_data_convert_worker);
+ if (status < 0)
+ mlog_errno(status);
+
+ mlog(0, "inode %"MLFu64", requeue = %d\n",
+ OCFS2_I(inode)->ip_blkno, *requeue);
+
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_unblock_inode_lock(struct ocfs2_lock_res *lockres,
+ int *requeue)
+{
+ int status;
+ struct inode *inode;
+
+ mlog_entry_void();
+
+ mlog(0, "Unblock lockres %s\n", lockres->l_name);
+
+ inode = ocfs2_lock_res_inode(lockres);
+
+ status = ocfs2_generic_unblock_lock(OCFS2_SB(inode->i_sb),
+ lockres,
+ requeue,
+ NULL);
+ if (status < 0)
+ mlog_errno(status);
+
+ mlog_exit(status);
+ return status;
+}
+
+
+int ocfs2_unblock_meta(struct ocfs2_lock_res *lockres,
+ int *requeue)
+{
+ int status;
+ struct inode *inode;
+
+ mlog_entry_void();
+
+ inode = ocfs2_lock_res_inode(lockres);
+
+ mlog(0, "unblock inode %"MLFu64"\n", OCFS2_I(inode)->ip_blkno);
+
+ status = ocfs2_do_unblock_meta(inode, requeue);
+ if (status < 0)
+ mlog_errno(status);
+
+ mlog(0, "inode %"MLFu64", requeue = %d\n",
+ OCFS2_I(inode)->ip_blkno, *requeue);
+
+ mlog_exit(status);
+ return status;
+}
+
+/* Generic unblock function for any lockres whose private data is an
+ * ocfs2_super pointer. */
+static int ocfs2_unblock_osb_lock(struct ocfs2_lock_res *lockres,
+ int *requeue)
+{
+ int status;
+ struct ocfs2_super *osb;
+
+ mlog_entry_void();
+
+ mlog(0, "Unblock lockres %s\n", lockres->l_name);
+
+ osb = ocfs2_lock_res_super(lockres);
+
+ status = ocfs2_generic_unblock_lock(osb,
+ lockres,
+ requeue,
+ NULL);
+ if (status < 0)
+ mlog_errno(status);
+
+ mlog_exit(status);
+ return status;
+}
+
+void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ int status;
+ int requeue = 0;
+ unsigned long flags;
+
+ /* Our reference to the lockres in this function can be
+ * considered valid until we remove the OCFS2_LOCK_QUEUED
+ * flag. */
+
+ mlog_entry_void();
+
+ BUG_ON(!lockres);
+ BUG_ON(!lockres->l_ops);
+ BUG_ON(!lockres->l_ops->unblock);
+
+ mlog(0, "lockres %s blocked.\n", lockres->l_name);
+
+ /* Detect whether a lock has been marked as going away while
+ * the vote thread was processing other things. A lock can
+ * still be marked with OCFS2_LOCK_FREEING after this check,
+ * but short circuiting here will still save us some
+ * performance. */
+ spin_lock_irqsave(&lockres->l_lock, flags);
+ if (lockres->l_flags & OCFS2_LOCK_FREEING)
+ goto unqueue;
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ status = lockres->l_ops->unblock(lockres, &requeue);
+ if (status < 0)
+ mlog_errno(status);
+
+ spin_lock_irqsave(&lockres->l_lock, flags);
+unqueue:
+ if (lockres->l_flags & OCFS2_LOCK_FREEING || !requeue) {
+ lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
+ } else
+ ocfs2_schedule_blocked_lock(osb, lockres);
+
+ mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
+ requeue ? "yes" : "no");
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+
+ mlog_exit_void();
+}
+
+static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres)
+{
+ mlog_entry_void();
+
+ assert_spin_locked(&lockres->l_lock);
+
+ if (lockres->l_flags & OCFS2_LOCK_FREEING) {
+ /* Do not schedule a lock for downconvert when it's on
+ * the way to destruction - any nodes wanting access
+ * to the resource will get it soon. */
+ mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
+ lockres->l_name, lockres->l_flags);
+ return;
+ }
+
+ lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
+
+ spin_lock(&osb->vote_task_lock);
+ if (list_empty(&lockres->l_blocked_list)) {
+ list_add_tail(&lockres->l_blocked_list,
+ &osb->blocked_lock_list);
+ osb->blocked_lock_count++;
+ }
+ spin_unlock(&osb->vote_task_lock);
+
+ mlog_exit_void();
+}
+
+/* This aids in debugging situations where a bad LVB might be involved. */
+void ocfs2_dump_meta_lvb_info(u64 level,
+ const char *function,
+ unsigned int line,
+ struct ocfs2_lock_res *lockres)
+{
+ struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
+
+ mlog(level, "LVB information for %s (called from %s:%u):\n",
+ lockres->l_name, function, line);
+ mlog(level, "version: %u, clusters: %u\n",
+ be32_to_cpu(lvb->lvb_version), be32_to_cpu(lvb->lvb_iclusters));
+ mlog(level, "size: %"MLFu64", uid %u, gid %u, mode 0x%x\n",
+ be64_to_cpu(lvb->lvb_isize), be32_to_cpu(lvb->lvb_iuid),
+ be32_to_cpu(lvb->lvb_igid), be16_to_cpu(lvb->lvb_imode));
+ mlog(level, "nlink %u, atime_packed 0x%"MLFx64", "
+ "ctime_packed 0x%"MLFx64", mtime_packed 0x%"MLFx64"\n",
+ be16_to_cpu(lvb->lvb_inlink), be64_to_cpu(lvb->lvb_iatime_packed),
+ be64_to_cpu(lvb->lvb_ictime_packed),
+ be64_to_cpu(lvb->lvb_imtime_packed));
+}
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
new file mode 100644
index 00000000000..8f2d1db2d9e
--- /dev/null
+++ b/fs/ocfs2/dlmglue.h
@@ -0,0 +1,111 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * dlmglue.h
+ *
+ * description here
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+
+#ifndef DLMGLUE_H
+#define DLMGLUE_H
+
+#define OCFS2_LVB_VERSION 2
+
+struct ocfs2_meta_lvb {
+ __be32 lvb_version;
+ __be32 lvb_iclusters;
+ __be32 lvb_iuid;
+ __be32 lvb_igid;
+ __be64 lvb_iatime_packed;
+ __be64 lvb_ictime_packed;
+ __be64 lvb_imtime_packed;
+ __be64 lvb_isize;
+ __be16 lvb_imode;
+ __be16 lvb_inlink;
+ __be32 lvb_reserved[3];
+};
+
+/* ocfs2_meta_lock_full() and ocfs2_data_lock_full() 'arg_flags' flags */
+/* don't wait on recovery. */
+#define OCFS2_META_LOCK_RECOVERY (0x01)
+/* Instruct the dlm not to queue ourselves on the other node. */
+#define OCFS2_META_LOCK_NOQUEUE (0x02)
+/* don't block waiting for the vote thread, instead return -EAGAIN */
+#define OCFS2_LOCK_NONBLOCK (0x04)
+
+int ocfs2_dlm_init(struct ocfs2_super *osb);
+void ocfs2_dlm_shutdown(struct ocfs2_super *osb);
+void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res);
+void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
+ enum ocfs2_lock_type type,
+ struct inode *inode);
+void ocfs2_lock_res_free(struct ocfs2_lock_res *res);
+int ocfs2_create_new_inode_locks(struct inode *inode);
+int ocfs2_drop_inode_locks(struct inode *inode);
+int ocfs2_data_lock_full(struct inode *inode,
+ int write,
+ int arg_flags);
+#define ocfs2_data_lock(inode, write) ocfs2_data_lock_full(inode, write, 0)
+int ocfs2_data_lock_with_page(struct inode *inode,
+ int write,
+ struct page *page);
+void ocfs2_data_unlock(struct inode *inode,
+ int write);
+int ocfs2_rw_lock(struct inode *inode, int write);
+void ocfs2_rw_unlock(struct inode *inode, int write);
+int ocfs2_meta_lock_full(struct inode *inode,
+ struct ocfs2_journal_handle *handle,
+ struct buffer_head **ret_bh,
+ int ex,
+ int arg_flags);
+int ocfs2_meta_lock_with_page(struct inode *inode,
+ struct ocfs2_journal_handle *handle,
+ struct buffer_head **ret_bh,
+ int ex,
+ struct page *page);
+/* 99% of the time we don't want to supply any additional flags --
+ * those are for very specific cases only. */
+#define ocfs2_meta_lock(i, h, b, e) ocfs2_meta_lock_full(i, h, b, e, 0)
+void ocfs2_meta_unlock(struct inode *inode,
+ int ex);
+int ocfs2_super_lock(struct ocfs2_super *osb,
+ int ex);
+void ocfs2_super_unlock(struct ocfs2_super *osb,
+ int ex);
+int ocfs2_rename_lock(struct ocfs2_super *osb);
+void ocfs2_rename_unlock(struct ocfs2_super *osb);
+void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres);
+
+/* for the vote thread */
+void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
+ struct ocfs2_lock_res *lockres);
+
+struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void);
+void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
+
+/* aids in debugging and tracking lvbs */
+void ocfs2_dump_meta_lvb_info(u64 level,
+ const char *function,
+ unsigned int line,
+ struct ocfs2_lock_res *lockres);
+#define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
+
+#endif /* DLMGLUE_H */
diff --git a/fs/ocfs2/endian.h b/fs/ocfs2/endian.h
new file mode 100644
index 00000000000..f226b220762
--- /dev/null
+++ b/fs/ocfs2/endian.h
@@ -0,0 +1,45 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_ENDIAN_H
+#define OCFS2_ENDIAN_H
+
+static inline void le16_add_cpu(__le16 *var, u16 val)
+{
+ *var = cpu_to_le16(le16_to_cpu(*var) + val);
+}
+
+static inline void le32_add_cpu(__le32 *var, u32 val)
+{
+ *var = cpu_to_le32(le32_to_cpu(*var) + val);
+}
+
+static inline void le32_and_cpu(__le32 *var, u32 val)
+{
+ *var = cpu_to_le32(le32_to_cpu(*var) & val);
+}
+
+static inline void be32_add_cpu(__be32 *var, u32 val)
+{
+ *var = cpu_to_be32(be32_to_cpu(*var) + val);
+}
+
+#endif /* OCFS2_ENDIAN_H */
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
new file mode 100644
index 00000000000..5810160d92a
--- /dev/null
+++ b/fs/ocfs2/export.c
@@ -0,0 +1,248 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * export.c
+ *
+ * Functions to facilitate NFS exporting
+ *
+ * Copyright (C) 2002, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+
+#define MLOG_MASK_PREFIX ML_EXPORT
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "dir.h"
+#include "dlmglue.h"
+#include "export.h"
+#include "inode.h"
+
+#include "buffer_head_io.h"
+
+struct ocfs2_inode_handle
+{
+ u64 ih_blkno;
+ u32 ih_generation;
+};
+
+static struct dentry *ocfs2_get_dentry(struct super_block *sb, void *vobjp)
+{
+ struct ocfs2_inode_handle *handle = vobjp;
+ struct inode *inode;
+ struct dentry *result;
+
+ mlog_entry("(0x%p, 0x%p)\n", sb, handle);
+
+ if (handle->ih_blkno == 0) {
+ mlog_errno(-ESTALE);
+ return ERR_PTR(-ESTALE);
+ }
+
+ inode = ocfs2_iget(OCFS2_SB(sb), handle->ih_blkno);
+
+ if (IS_ERR(inode)) {
+ mlog_errno(PTR_ERR(inode));
+ return (void *)inode;
+ }
+
+ if (handle->ih_generation != inode->i_generation) {
+ iput(inode);
+ mlog_errno(-ESTALE);
+ return ERR_PTR(-ESTALE);
+ }
+
+ result = d_alloc_anon(inode);
+
+ if (!result) {
+ iput(inode);
+ mlog_errno(-ENOMEM);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mlog_exit_ptr(result);
+ return result;
+}
+
+static struct dentry *ocfs2_get_parent(struct dentry *child)
+{
+ int status;
+ u64 blkno;
+ struct dentry *parent;
+ struct inode *inode;
+ struct inode *dir = child->d_inode;
+ struct buffer_head *dirent_bh = NULL;
+ struct ocfs2_dir_entry *dirent;
+
+ mlog_entry("(0x%p, '%.*s')\n", child,
+ child->d_name.len, child->d_name.name);
+
+ mlog(0, "find parent of directory %"MLFu64"\n",
+ OCFS2_I(dir)->ip_blkno);
+
+ status = ocfs2_meta_lock(dir, NULL, NULL, 0);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ parent = ERR_PTR(status);
+ goto bail;
+ }
+
+ status = ocfs2_find_files_on_disk("..", 2, &blkno, dir, &dirent_bh,
+ &dirent);
+ if (status < 0) {
+ parent = ERR_PTR(-ENOENT);
+ goto bail_unlock;
+ }
+
+ inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno);
+ if (IS_ERR(inode)) {
+ mlog(ML_ERROR, "Unable to create inode %"MLFu64"\n", blkno);
+ parent = ERR_PTR(-EACCES);
+ goto bail_unlock;
+ }
+
+ parent = d_alloc_anon(inode);
+ if (!parent) {
+ iput(inode);
+ parent = ERR_PTR(-ENOMEM);
+ }
+
+bail_unlock:
+ ocfs2_meta_unlock(dir, 0);
+
+ if (dirent_bh)
+ brelse(dirent_bh);
+
+bail:
+ mlog_exit_ptr(parent);
+
+ return parent;
+}
+
+static int ocfs2_encode_fh(struct dentry *dentry, __be32 *fh, int *max_len,
+ int connectable)
+{
+ struct inode *inode = dentry->d_inode;
+ int len = *max_len;
+ int type = 1;
+ u64 blkno;
+ u32 generation;
+
+ mlog_entry("(0x%p, '%.*s', 0x%p, %d, %d)\n", dentry,
+ dentry->d_name.len, dentry->d_name.name,
+ fh, len, connectable);
+
+ if (len < 3 || (connectable && len < 6)) {
+ mlog(ML_ERROR, "fh buffer is too small for encoding\n");
+ type = 255;
+ goto bail;
+ }
+
+ blkno = OCFS2_I(inode)->ip_blkno;
+ generation = inode->i_generation;
+
+ mlog(0, "Encoding fh: blkno: %"MLFu64", generation: %u\n",
+ blkno, generation);
+
+ len = 3;
+ fh[0] = cpu_to_le32((u32)(blkno >> 32));
+ fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff));
+ fh[2] = cpu_to_le32(generation);
+
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+ struct inode *parent;
+
+ spin_lock(&dentry->d_lock);
+
+ parent = dentry->d_parent->d_inode;
+ blkno = OCFS2_I(parent)->ip_blkno;
+ generation = parent->i_generation;
+
+ fh[3] = cpu_to_le32((u32)(blkno >> 32));
+ fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff));
+ fh[5] = cpu_to_le32(generation);
+
+ spin_unlock(&dentry->d_lock);
+
+ len = 6;
+ type = 2;
+
+ mlog(0, "Encoding parent: blkno: %"MLFu64", generation: %u\n",
+ blkno, generation);
+ }
+
+ *max_len = len;
+
+bail:
+ mlog_exit(type);
+ return type;
+}
+
+static struct dentry *ocfs2_decode_fh(struct super_block *sb, __be32 *fh,
+ int fh_len, int fileid_type,
+ int (*acceptable)(void *context,
+ struct dentry *de),
+ void *context)
+{
+ struct ocfs2_inode_handle handle, parent;
+ struct dentry *ret = NULL;
+
+ mlog_entry("(0x%p, 0x%p, %d, %d, 0x%p, 0x%p)\n",
+ sb, fh, fh_len, fileid_type, acceptable, context);
+
+ if (fh_len < 3 || fileid_type > 2)
+ goto bail;
+
+ if (fileid_type == 2) {
+ if (fh_len < 6)
+ goto bail;
+
+ parent.ih_blkno = (u64)le32_to_cpu(fh[3]) << 32;
+ parent.ih_blkno |= (u64)le32_to_cpu(fh[4]);
+ parent.ih_generation = le32_to_cpu(fh[5]);
+
+ mlog(0, "Decoding parent: blkno: %"MLFu64", generation: %u\n",
+ parent.ih_blkno, parent.ih_generation);
+ }
+
+ handle.ih_blkno = (u64)le32_to_cpu(fh[0]) << 32;
+ handle.ih_blkno |= (u64)le32_to_cpu(fh[1]);
+ handle.ih_generation = le32_to_cpu(fh[2]);
+
+ mlog(0, "Encoding fh: blkno: %"MLFu64", generation: %u\n",
+ handle.ih_blkno, handle.ih_generation);
+
+ ret = ocfs2_export_ops.find_exported_dentry(sb, &handle, &parent,
+ acceptable, context);
+
+bail:
+ mlog_exit_ptr(ret);
+ return ret;
+}
+
+struct export_operations ocfs2_export_ops = {
+ .decode_fh = ocfs2_decode_fh,
+ .encode_fh = ocfs2_encode_fh,
+
+ .get_parent = ocfs2_get_parent,
+ .get_dentry = ocfs2_get_dentry,
+};
diff --git a/fs/ocfs2/export.h b/fs/ocfs2/export.h
new file mode 100644
index 00000000000..5b77ee7866e
--- /dev/null
+++ b/fs/ocfs2/export.h
@@ -0,0 +1,31 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * export.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_EXPORT_H
+#define OCFS2_EXPORT_H
+
+extern struct export_operations ocfs2_export_ops;
+
+#endif /* OCFS2_EXPORT_H */
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
new file mode 100644
index 00000000000..f2fb40cd296
--- /dev/null
+++ b/fs/ocfs2/extent_map.c
@@ -0,0 +1,994 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * extent_map.c
+ *
+ * In-memory extent map for OCFS2. Man, this code was prettier in
+ * the library.
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+
+#define MLOG_MASK_PREFIX ML_EXTENT_MAP
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "extent_map.h"
+#include "inode.h"
+#include "super.h"
+
+#include "buffer_head_io.h"
+
+
+/*
+ * SUCK SUCK SUCK
+ * Our headers are so bad that struct ocfs2_extent_map is in ocfs.h
+ */
+
+struct ocfs2_extent_map_entry {
+ struct rb_node e_node;
+ int e_tree_depth;
+ struct ocfs2_extent_rec e_rec;
+};
+
+struct ocfs2_em_insert_context {
+ int need_left;
+ int need_right;
+ struct ocfs2_extent_map_entry *new_ent;
+ struct ocfs2_extent_map_entry *old_ent;
+ struct ocfs2_extent_map_entry *left_ent;
+ struct ocfs2_extent_map_entry *right_ent;
+};
+
+static kmem_cache_t *ocfs2_em_ent_cachep = NULL;
+
+
+static struct ocfs2_extent_map_entry *
+ocfs2_extent_map_lookup(struct ocfs2_extent_map *em,
+ u32 cpos, u32 clusters,
+ struct rb_node ***ret_p,
+ struct rb_node **ret_parent);
+static int ocfs2_extent_map_insert(struct inode *inode,
+ struct ocfs2_extent_rec *rec,
+ int tree_depth);
+static int ocfs2_extent_map_insert_entry(struct ocfs2_extent_map *em,
+ struct ocfs2_extent_map_entry *ent);
+static int ocfs2_extent_map_find_leaf(struct inode *inode,
+ u32 cpos, u32 clusters,
+ struct ocfs2_extent_list *el);
+static int ocfs2_extent_map_lookup_read(struct inode *inode,
+ u32 cpos, u32 clusters,
+ struct ocfs2_extent_map_entry **ret_ent);
+static int ocfs2_extent_map_try_insert(struct inode *inode,
+ struct ocfs2_extent_rec *rec,
+ int tree_depth,
+ struct ocfs2_em_insert_context *ctxt);
+
+/* returns 1 only if the rec contains all the given clusters -- that is that
+ * rec's cpos is <= the cluster cpos and that the rec endpoint (cpos +
+ * clusters) is >= the argument's endpoint */
+static int ocfs2_extent_rec_contains_clusters(struct ocfs2_extent_rec *rec,
+ u32 cpos, u32 clusters)
+{
+ if (le32_to_cpu(rec->e_cpos) > cpos)
+ return 0;
+ if (cpos + clusters > le32_to_cpu(rec->e_cpos) +
+ le32_to_cpu(rec->e_clusters))
+ return 0;
+ return 1;
+}
+
+
+/*
+ * Find an entry in the tree that intersects the region passed in.
+ * Note that this will find straddled intervals, it is up to the
+ * callers to enforce any boundary conditions.
+ *
+ * Callers must hold ip_lock. This lookup is not guaranteed to return
+ * a tree_depth 0 match, and as such can race inserts if the lock
+ * were not held.
+ *
+ * The rb_node garbage lets insertion share the search. Trivial
+ * callers pass NULL.
+ */
+static struct ocfs2_extent_map_entry *
+ocfs2_extent_map_lookup(struct ocfs2_extent_map *em,
+ u32 cpos, u32 clusters,
+ struct rb_node ***ret_p,
+ struct rb_node **ret_parent)
+{
+ struct rb_node **p = &em->em_extents.rb_node;
+ struct rb_node *parent = NULL;
+ struct ocfs2_extent_map_entry *ent = NULL;
+
+ while (*p)
+ {
+ parent = *p;
+ ent = rb_entry(parent, struct ocfs2_extent_map_entry,
+ e_node);
+ if ((cpos + clusters) <= le32_to_cpu(ent->e_rec.e_cpos)) {
+ p = &(*p)->rb_left;
+ ent = NULL;
+ } else if (cpos >= (le32_to_cpu(ent->e_rec.e_cpos) +
+ le32_to_cpu(ent->e_rec.e_clusters))) {
+ p = &(*p)->rb_right;
+ ent = NULL;
+ } else
+ break;
+ }
+
+ if (ret_p != NULL)
+ *ret_p = p;
+ if (ret_parent != NULL)
+ *ret_parent = parent;
+ return ent;
+}
+
+/*
+ * Find the leaf containing the interval we want. While we're on our
+ * way down the tree, fill in every record we see at any depth, because
+ * we might want it later.
+ *
+ * Note that this code is run without ip_lock. That's because it
+ * sleeps while reading. If someone is also filling the extent list at
+ * the same time we are, we might have to restart.
+ */
+static int ocfs2_extent_map_find_leaf(struct inode *inode,
+ u32 cpos, u32 clusters,
+ struct ocfs2_extent_list *el)
+{
+ int i, ret;
+ struct buffer_head *eb_bh = NULL;
+ u64 blkno;
+ u32 rec_end;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_extent_rec *rec;
+
+ /*
+ * The bh data containing the el cannot change here, because
+ * we hold alloc_sem. So we can do this without other
+ * locks.
+ */
+ while (el->l_tree_depth)
+ {
+ blkno = 0;
+ for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
+ rec = &el->l_recs[i];
+ rec_end = (le32_to_cpu(rec->e_cpos) +
+ le32_to_cpu(rec->e_clusters));
+
+ ret = -EBADR;
+ if (rec_end > OCFS2_I(inode)->ip_clusters) {
+ mlog_errno(ret);
+ goto out_free;
+ }
+
+ if (rec_end <= cpos) {
+ ret = ocfs2_extent_map_insert(inode, rec,
+ le16_to_cpu(el->l_tree_depth));
+ if (ret && (ret != -EEXIST)) {
+ mlog_errno(ret);
+ goto out_free;
+ }
+ continue;
+ }
+ if ((cpos + clusters) <= le32_to_cpu(rec->e_cpos)) {
+ ret = ocfs2_extent_map_insert(inode, rec,
+ le16_to_cpu(el->l_tree_depth));
+ if (ret && (ret != -EEXIST)) {
+ mlog_errno(ret);
+ goto out_free;
+ }
+ continue;
+ }
+
+ /*
+ * We've found a record that matches our
+ * interval. We don't insert it because we're
+ * about to traverse it.
+ */
+
+ /* Check to see if we're stradling */
+ ret = -ESRCH;
+ if (!ocfs2_extent_rec_contains_clusters(rec,
+ cpos,
+ clusters)) {
+ mlog_errno(ret);
+ goto out_free;
+ }
+
+ /*
+ * If we've already found a record, the el has
+ * two records covering the same interval.
+ * EEEK!
+ */
+ ret = -EBADR;
+ if (blkno) {
+ mlog_errno(ret);
+ goto out_free;
+ }
+
+ blkno = le64_to_cpu(rec->e_blkno);
+ }
+
+ /*
+ * We don't support holes, and we're still up
+ * in the branches, so we'd better have found someone
+ */
+ ret = -EBADR;
+ if (!blkno) {
+ mlog_errno(ret);
+ goto out_free;
+ }
+
+ if (eb_bh) {
+ brelse(eb_bh);
+ eb_bh = NULL;
+ }
+ ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
+ blkno, &eb_bh, OCFS2_BH_CACHED,
+ inode);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_free;
+ }
+ eb = (struct ocfs2_extent_block *)eb_bh->b_data;
+ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
+ OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
+ ret = -EIO;
+ goto out_free;
+ }
+ el = &eb->h_list;
+ }
+
+ if (el->l_tree_depth)
+ BUG();
+
+ for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
+ rec = &el->l_recs[i];
+ ret = ocfs2_extent_map_insert(inode, rec,
+ le16_to_cpu(el->l_tree_depth));
+ if (ret) {
+ mlog_errno(ret);
+ goto out_free;
+ }
+ }
+
+ ret = 0;
+
+out_free:
+ if (eb_bh)
+ brelse(eb_bh);
+
+ return ret;
+}
+
+/*
+ * This lookup actually will read from disk. It has one invariant:
+ * It will never re-traverse blocks. This means that all inserts should
+ * be new regions or more granular regions (both allowed by insert).
+ */
+static int ocfs2_extent_map_lookup_read(struct inode *inode,
+ u32 cpos,
+ u32 clusters,
+ struct ocfs2_extent_map_entry **ret_ent)
+{
+ int ret;
+ u64 blkno;
+ struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
+ struct ocfs2_extent_map_entry *ent;
+ struct buffer_head *bh = NULL;
+ struct ocfs2_extent_block *eb;
+ struct ocfs2_dinode *di;
+ struct ocfs2_extent_list *el;
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ ent = ocfs2_extent_map_lookup(em, cpos, clusters, NULL, NULL);
+ if (ent) {
+ if (!ent->e_tree_depth) {
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+ *ret_ent = ent;
+ return 0;
+ }
+ blkno = le64_to_cpu(ent->e_rec.e_blkno);
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), blkno, &bh,
+ OCFS2_BH_CACHED, inode);
+ if (ret) {
+ mlog_errno(ret);
+ if (bh)
+ brelse(bh);
+ return ret;
+ }
+ eb = (struct ocfs2_extent_block *)bh->b_data;
+ if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
+ OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
+ brelse(bh);
+ return -EIO;
+ }
+ el = &eb->h_list;
+ } else {
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
+ OCFS2_I(inode)->ip_blkno, &bh,
+ OCFS2_BH_CACHED, inode);
+ if (ret) {
+ mlog_errno(ret);
+ if (bh)
+ brelse(bh);
+ return ret;
+ }
+ di = (struct ocfs2_dinode *)bh->b_data;
+ if (!OCFS2_IS_VALID_DINODE(di)) {
+ brelse(bh);
+ OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, di);
+ return -EIO;
+ }
+ el = &di->id2.i_list;
+ }
+
+ ret = ocfs2_extent_map_find_leaf(inode, cpos, clusters, el);
+ brelse(bh);
+ if (ret) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ ent = ocfs2_extent_map_lookup(em, cpos, clusters, NULL, NULL);
+ if (!ent) {
+ ret = -ESRCH;
+ mlog_errno(ret);
+ return ret;
+ }
+
+ if (ent->e_tree_depth)
+ BUG(); /* FIXME: Make sure this isn't a corruption */
+
+ *ret_ent = ent;
+
+ return 0;
+}
+
+/*
+ * Callers must hold ip_lock. This can insert pieces of the tree,
+ * thus racing lookup if the lock weren't held.
+ */
+static int ocfs2_extent_map_insert_entry(struct ocfs2_extent_map *em,
+ struct ocfs2_extent_map_entry *ent)
+{
+ struct rb_node **p, *parent;
+ struct ocfs2_extent_map_entry *old_ent;
+
+ old_ent = ocfs2_extent_map_lookup(em, le32_to_cpu(ent->e_rec.e_cpos),
+ le32_to_cpu(ent->e_rec.e_clusters),
+ &p, &parent);
+ if (old_ent)
+ return -EEXIST;
+
+ rb_link_node(&ent->e_node, parent, p);
+ rb_insert_color(&ent->e_node, &em->em_extents);
+
+ return 0;
+}
+
+
+/*
+ * Simple rule: on any return code other than -EAGAIN, anything left
+ * in the insert_context will be freed.
+ */
+static int ocfs2_extent_map_try_insert(struct inode *inode,
+ struct ocfs2_extent_rec *rec,
+ int tree_depth,
+ struct ocfs2_em_insert_context *ctxt)
+{
+ int ret;
+ struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
+ struct ocfs2_extent_map_entry *old_ent;
+
+ ctxt->need_left = 0;
+ ctxt->need_right = 0;
+ ctxt->old_ent = NULL;
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ ret = ocfs2_extent_map_insert_entry(em, ctxt->new_ent);
+ if (!ret) {
+ ctxt->new_ent = NULL;
+ goto out_unlock;
+ }
+
+ old_ent = ocfs2_extent_map_lookup(em, le32_to_cpu(rec->e_cpos),
+ le32_to_cpu(rec->e_clusters), NULL,
+ NULL);
+
+ if (!old_ent)
+ BUG();
+
+ ret = -EEXIST;
+ if (old_ent->e_tree_depth < tree_depth)
+ goto out_unlock;
+
+ if (old_ent->e_tree_depth == tree_depth) {
+ if (!memcmp(rec, &old_ent->e_rec,
+ sizeof(struct ocfs2_extent_rec)))
+ ret = 0;
+
+ /* FIXME: Should this be ESRCH/EBADR??? */
+ goto out_unlock;
+ }
+
+ /*
+ * We do it in this order specifically so that no actual tree
+ * changes occur until we have all the pieces we need. We
+ * don't want malloc failures to leave an inconsistent tree.
+ * Whenever we drop the lock, another process could be
+ * inserting. Also note that, if another process just beat us
+ * to an insert, we might not need the same pieces we needed
+ * the first go round. In the end, the pieces we need will
+ * be used, and the pieces we don't will be freed.
+ */
+ ctxt->need_left = !!(le32_to_cpu(rec->e_cpos) >
+ le32_to_cpu(old_ent->e_rec.e_cpos));
+ ctxt->need_right = !!((le32_to_cpu(old_ent->e_rec.e_cpos) +
+ le32_to_cpu(old_ent->e_rec.e_clusters)) >
+ (le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters)));
+ ret = -EAGAIN;
+ if (ctxt->need_left) {
+ if (!ctxt->left_ent)
+ goto out_unlock;
+ *(ctxt->left_ent) = *old_ent;
+ ctxt->left_ent->e_rec.e_clusters =
+ cpu_to_le32(le32_to_cpu(rec->e_cpos) -
+ le32_to_cpu(ctxt->left_ent->e_rec.e_cpos));
+ }
+ if (ctxt->need_right) {
+ if (!ctxt->right_ent)
+ goto out_unlock;
+ *(ctxt->right_ent) = *old_ent;
+ ctxt->right_ent->e_rec.e_cpos =
+ cpu_to_le32(le32_to_cpu(rec->e_cpos) +
+ le32_to_cpu(rec->e_clusters));
+ ctxt->right_ent->e_rec.e_clusters =
+ cpu_to_le32((le32_to_cpu(old_ent->e_rec.e_cpos) +
+ le32_to_cpu(old_ent->e_rec.e_clusters)) -
+ le32_to_cpu(ctxt->right_ent->e_rec.e_cpos));
+ }
+
+ rb_erase(&old_ent->e_node, &em->em_extents);
+ /* Now that he's erased, set him up for deletion */
+ ctxt->old_ent = old_ent;
+
+ if (ctxt->need_left) {
+ ret = ocfs2_extent_map_insert_entry(em,
+ ctxt->left_ent);
+ if (ret)
+ goto out_unlock;
+ ctxt->left_ent = NULL;
+ }
+
+ if (ctxt->need_right) {
+ ret = ocfs2_extent_map_insert_entry(em,
+ ctxt->right_ent);
+ if (ret)
+ goto out_unlock;
+ ctxt->right_ent = NULL;
+ }
+
+ ret = ocfs2_extent_map_insert_entry(em, ctxt->new_ent);
+
+ if (!ret)
+ ctxt->new_ent = NULL;
+
+out_unlock:
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ return ret;
+}
+
+
+static int ocfs2_extent_map_insert(struct inode *inode,
+ struct ocfs2_extent_rec *rec,
+ int tree_depth)
+{
+ int ret;
+ struct ocfs2_em_insert_context ctxt = {0, };
+
+ if ((le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters)) >
+ OCFS2_I(inode)->ip_map.em_clusters) {
+ ret = -EBADR;
+ mlog_errno(ret);
+ return ret;
+ }
+
+ /* Zero e_clusters means a truncated tail record. It better be EOF */
+ if (!rec->e_clusters) {
+ if ((le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters)) !=
+ OCFS2_I(inode)->ip_map.em_clusters) {
+ ret = -EBADR;
+ mlog_errno(ret);
+ return ret;
+ }
+
+ /* Ignore the truncated tail */
+ return 0;
+ }
+
+ ret = -ENOMEM;
+ ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep,
+ GFP_KERNEL);
+ if (!ctxt.new_ent) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ ctxt.new_ent->e_rec = *rec;
+ ctxt.new_ent->e_tree_depth = tree_depth;
+
+ do {
+ ret = -ENOMEM;
+ if (ctxt.need_left && !ctxt.left_ent) {
+ ctxt.left_ent =
+ kmem_cache_alloc(ocfs2_em_ent_cachep,
+ GFP_KERNEL);
+ if (!ctxt.left_ent)
+ break;
+ }
+ if (ctxt.need_right && !ctxt.right_ent) {
+ ctxt.right_ent =
+ kmem_cache_alloc(ocfs2_em_ent_cachep,
+ GFP_KERNEL);
+ if (!ctxt.right_ent)
+ break;
+ }
+
+ ret = ocfs2_extent_map_try_insert(inode, rec,
+ tree_depth, &ctxt);
+ } while (ret == -EAGAIN);
+
+ if (ret < 0)
+ mlog_errno(ret);
+
+ if (ctxt.left_ent)
+ kmem_cache_free(ocfs2_em_ent_cachep, ctxt.left_ent);
+ if (ctxt.right_ent)
+ kmem_cache_free(ocfs2_em_ent_cachep, ctxt.right_ent);
+ if (ctxt.old_ent)
+ kmem_cache_free(ocfs2_em_ent_cachep, ctxt.old_ent);
+ if (ctxt.new_ent)
+ kmem_cache_free(ocfs2_em_ent_cachep, ctxt.new_ent);
+
+ return ret;
+}
+
+/*
+ * Append this record to the tail of the extent map. It must be
+ * tree_depth 0. The record might be an extension of an existing
+ * record, and as such that needs to be handled. eg:
+ *
+ * Existing record in the extent map:
+ *
+ * cpos = 10, len = 10
+ * |---------|
+ *
+ * New Record:
+ *
+ * cpos = 10, len = 20
+ * |------------------|
+ *
+ * The passed record is the new on-disk record. The new_clusters value
+ * is how many clusters were added to the file. If the append is a
+ * contiguous append, the new_clusters has been added to
+ * rec->e_clusters. If the append is an entirely new extent, then
+ * rec->e_clusters is == new_clusters.
+ */
+int ocfs2_extent_map_append(struct inode *inode,
+ struct ocfs2_extent_rec *rec,
+ u32 new_clusters)
+{
+ int ret;
+ struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
+ struct ocfs2_extent_map_entry *ent;
+ struct ocfs2_extent_rec *old;
+
+ BUG_ON(!new_clusters);
+ BUG_ON(le32_to_cpu(rec->e_clusters) < new_clusters);
+
+ if (em->em_clusters < OCFS2_I(inode)->ip_clusters) {
+ /*
+ * Size changed underneath us on disk. Drop any
+ * straddling records and update our idea of
+ * i_clusters
+ */
+ ocfs2_extent_map_drop(inode, em->em_clusters - 1);
+ em->em_clusters = OCFS2_I(inode)->ip_clusters;
+ }
+
+ mlog_bug_on_msg((le32_to_cpu(rec->e_cpos) +
+ le32_to_cpu(rec->e_clusters)) !=
+ (em->em_clusters + new_clusters),
+ "Inode %"MLFu64":\n"
+ "rec->e_cpos = %u + rec->e_clusters = %u = %u\n"
+ "em->em_clusters = %u + new_clusters = %u = %u\n",
+ OCFS2_I(inode)->ip_blkno,
+ le32_to_cpu(rec->e_cpos), le32_to_cpu(rec->e_clusters),
+ le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters),
+ em->em_clusters, new_clusters,
+ em->em_clusters + new_clusters);
+
+ em->em_clusters += new_clusters;
+
+ ret = -ENOENT;
+ if (le32_to_cpu(rec->e_clusters) > new_clusters) {
+ /* This is a contiguous append */
+ ent = ocfs2_extent_map_lookup(em, le32_to_cpu(rec->e_cpos), 1,
+ NULL, NULL);
+ if (ent) {
+ old = &ent->e_rec;
+ BUG_ON((le32_to_cpu(rec->e_cpos) +
+ le32_to_cpu(rec->e_clusters)) !=
+ (le32_to_cpu(old->e_cpos) +
+ le32_to_cpu(old->e_clusters) +
+ new_clusters));
+ if (ent->e_tree_depth == 0) {
+ BUG_ON(le32_to_cpu(old->e_cpos) !=
+ le32_to_cpu(rec->e_cpos));
+ BUG_ON(le64_to_cpu(old->e_blkno) !=
+ le64_to_cpu(rec->e_blkno));
+ ret = 0;
+ }
+ /*
+ * Let non-leafs fall through as -ENOENT to
+ * force insertion of the new leaf.
+ */
+ le32_add_cpu(&old->e_clusters, new_clusters);
+ }
+ }
+
+ if (ret == -ENOENT)
+ ret = ocfs2_extent_map_insert(inode, rec, 0);
+ if (ret < 0)
+ mlog_errno(ret);
+ return ret;
+}
+
+#if 0
+/* Code here is included but defined out as it completes the extent
+ * map api and may be used in the future. */
+
+/*
+ * Look up the record containing this cluster offset. This record is
+ * part of the extent map. Do not free it. Any changes you make to
+ * it will reflect in the extent map. So, if your last extent
+ * is (cpos = 10, clusters = 10) and you truncate the file by 5
+ * clusters, you can do:
+ *
+ * ret = ocfs2_extent_map_get_rec(em, orig_size - 5, &rec);
+ * rec->e_clusters -= 5;
+ *
+ * The lookup does not read from disk. If the map isn't filled in for
+ * an entry, you won't find it.
+ *
+ * Also note that the returned record is valid until alloc_sem is
+ * dropped. After that, truncate and extend can happen. Caveat Emptor.
+ */
+int ocfs2_extent_map_get_rec(struct inode *inode, u32 cpos,
+ struct ocfs2_extent_rec **rec,
+ int *tree_depth)
+{
+ int ret = -ENOENT;
+ struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
+ struct ocfs2_extent_map_entry *ent;
+
+ *rec = NULL;
+
+ if (cpos >= OCFS2_I(inode)->ip_clusters)
+ return -EINVAL;
+
+ if (cpos >= em->em_clusters) {
+ /*
+ * Size changed underneath us on disk. Drop any
+ * straddling records and update our idea of
+ * i_clusters
+ */
+ ocfs2_extent_map_drop(inode, em->em_clusters - 1);
+ em->em_clusters = OCFS2_I(inode)->ip_clusters ;
+ }
+
+ ent = ocfs2_extent_map_lookup(&OCFS2_I(inode)->ip_map, cpos, 1,
+ NULL, NULL);
+
+ if (ent) {
+ *rec = &ent->e_rec;
+ if (tree_depth)
+ *tree_depth = ent->e_tree_depth;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int ocfs2_extent_map_get_clusters(struct inode *inode,
+ u32 v_cpos, int count,
+ u32 *p_cpos, int *ret_count)
+{
+ int ret;
+ u32 coff, ccount;
+ struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
+ struct ocfs2_extent_map_entry *ent = NULL;
+
+ *p_cpos = ccount = 0;
+
+ if ((v_cpos + count) > OCFS2_I(inode)->ip_clusters)
+ return -EINVAL;
+
+ if ((v_cpos + count) > em->em_clusters) {
+ /*
+ * Size changed underneath us on disk. Drop any
+ * straddling records and update our idea of
+ * i_clusters
+ */
+ ocfs2_extent_map_drop(inode, em->em_clusters - 1);
+ em->em_clusters = OCFS2_I(inode)->ip_clusters;
+ }
+
+
+ ret = ocfs2_extent_map_lookup_read(inode, v_cpos, count, &ent);
+ if (ret)
+ return ret;
+
+ if (ent) {
+ /* We should never find ourselves straddling an interval */
+ if (!ocfs2_extent_rec_contains_clusters(&ent->e_rec,
+ v_cpos,
+ count))
+ return -ESRCH;
+
+ coff = v_cpos - le32_to_cpu(ent->e_rec.e_cpos);
+ *p_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
+ le64_to_cpu(ent->e_rec.e_blkno)) +
+ coff;
+
+ if (ret_count)
+ *ret_count = le32_to_cpu(ent->e_rec.e_clusters) - coff;
+
+ return 0;
+ }
+
+
+ return -ENOENT;
+}
+
+#endif /* 0 */
+
+int ocfs2_extent_map_get_blocks(struct inode *inode,
+ u64 v_blkno, int count,
+ u64 *p_blkno, int *ret_count)
+{
+ int ret;
+ u64 boff;
+ u32 cpos, clusters;
+ int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
+ struct ocfs2_extent_map_entry *ent = NULL;
+ struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
+ struct ocfs2_extent_rec *rec;
+
+ *p_blkno = 0;
+
+ cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno);
+ clusters = ocfs2_blocks_to_clusters(inode->i_sb,
+ (u64)count + bpc - 1);
+ if ((cpos + clusters) > OCFS2_I(inode)->ip_clusters) {
+ ret = -EINVAL;
+ mlog_errno(ret);
+ return ret;
+ }
+
+ if ((cpos + clusters) > em->em_clusters) {
+ /*
+ * Size changed underneath us on disk. Drop any
+ * straddling records and update our idea of
+ * i_clusters
+ */
+ ocfs2_extent_map_drop(inode, em->em_clusters - 1);
+ em->em_clusters = OCFS2_I(inode)->ip_clusters;
+ }
+
+ ret = ocfs2_extent_map_lookup_read(inode, cpos, clusters, &ent);
+ if (ret) {
+ mlog_errno(ret);
+ return ret;
+ }
+
+ if (ent)
+ {
+ rec = &ent->e_rec;
+
+ /* We should never find ourselves straddling an interval */
+ if (!ocfs2_extent_rec_contains_clusters(rec, cpos, clusters)) {
+ ret = -ESRCH;
+ mlog_errno(ret);
+ return ret;
+ }
+
+ boff = ocfs2_clusters_to_blocks(inode->i_sb, cpos -
+ le32_to_cpu(rec->e_cpos));
+ boff += (v_blkno & (u64)(bpc - 1));
+ *p_blkno = le64_to_cpu(rec->e_blkno) + boff;
+
+ if (ret_count) {
+ *ret_count = ocfs2_clusters_to_blocks(inode->i_sb,
+ le32_to_cpu(rec->e_clusters)) - boff;
+ }
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int ocfs2_extent_map_init(struct inode *inode)
+{
+ struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
+
+ em->em_extents = RB_ROOT;
+ em->em_clusters = 0;
+
+ return 0;
+}
+
+/* Needs the lock */
+static void __ocfs2_extent_map_drop(struct inode *inode,
+ u32 new_clusters,
+ struct rb_node **free_head,
+ struct ocfs2_extent_map_entry **tail_ent)
+{
+ struct rb_node *node, *next;
+ struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
+ struct ocfs2_extent_map_entry *ent;
+
+ *free_head = NULL;
+
+ ent = NULL;
+ node = rb_last(&em->em_extents);
+ while (node)
+ {
+ next = rb_prev(node);
+
+ ent = rb_entry(node, struct ocfs2_extent_map_entry,
+ e_node);
+ if (le32_to_cpu(ent->e_rec.e_cpos) < new_clusters)
+ break;
+
+ rb_erase(&ent->e_node, &em->em_extents);
+
+ node->rb_right = *free_head;
+ *free_head = node;
+
+ ent = NULL;
+ node = next;
+ }
+
+ /* Do we have an entry straddling new_clusters? */
+ if (tail_ent) {
+ if (ent &&
+ ((le32_to_cpu(ent->e_rec.e_cpos) +
+ le32_to_cpu(ent->e_rec.e_clusters)) > new_clusters))
+ *tail_ent = ent;
+ else
+ *tail_ent = NULL;
+ }
+}
+
+static void __ocfs2_extent_map_drop_cleanup(struct rb_node *free_head)
+{
+ struct rb_node *node;
+ struct ocfs2_extent_map_entry *ent;
+
+ while (free_head) {
+ node = free_head;
+ free_head = node->rb_right;
+
+ ent = rb_entry(node, struct ocfs2_extent_map_entry,
+ e_node);
+ kmem_cache_free(ocfs2_em_ent_cachep, ent);
+ }
+}
+
+/*
+ * Remove all entries past new_clusters, inclusive of an entry that
+ * contains new_clusters. This is effectively a cache forget.
+ *
+ * If you want to also clip the last extent by some number of clusters,
+ * you need to call ocfs2_extent_map_trunc().
+ * This code does not check or modify ip_clusters.
+ */
+int ocfs2_extent_map_drop(struct inode *inode, u32 new_clusters)
+{
+ struct rb_node *free_head = NULL;
+ struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
+ struct ocfs2_extent_map_entry *ent;
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+
+ __ocfs2_extent_map_drop(inode, new_clusters, &free_head, &ent);
+
+ if (ent) {
+ rb_erase(&ent->e_node, &em->em_extents);
+ ent->e_node.rb_right = free_head;
+ free_head = &ent->e_node;
+ }
+
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ if (free_head)
+ __ocfs2_extent_map_drop_cleanup(free_head);
+
+ return 0;
+}
+
+/*
+ * Remove all entries past new_clusters and also clip any extent
+ * straddling new_clusters, if there is one. This does not check
+ * or modify ip_clusters
+ */
+int ocfs2_extent_map_trunc(struct inode *inode, u32 new_clusters)
+{
+ struct rb_node *free_head = NULL;
+ struct ocfs2_extent_map_entry *ent = NULL;
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+
+ __ocfs2_extent_map_drop(inode, new_clusters, &free_head, &ent);
+
+ if (ent)
+ ent->e_rec.e_clusters = cpu_to_le32(new_clusters -
+ le32_to_cpu(ent->e_rec.e_cpos));
+
+ OCFS2_I(inode)->ip_map.em_clusters = new_clusters;
+
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ if (free_head)
+ __ocfs2_extent_map_drop_cleanup(free_head);
+
+ return 0;
+}
+
+int __init init_ocfs2_extent_maps(void)
+{
+ ocfs2_em_ent_cachep =
+ kmem_cache_create("ocfs2_em_ent",
+ sizeof(struct ocfs2_extent_map_entry),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!ocfs2_em_ent_cachep)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void __exit exit_ocfs2_extent_maps(void)
+{
+ kmem_cache_destroy(ocfs2_em_ent_cachep);
+}
diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h
new file mode 100644
index 00000000000..fa3745efa88
--- /dev/null
+++ b/fs/ocfs2/extent_map.h
@@ -0,0 +1,46 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * extent_map.h
+ *
+ * In-memory file extent mappings for OCFS2.
+ *
+ * Copyright (C) 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef _EXTENT_MAP_H
+#define _EXTENT_MAP_H
+
+int init_ocfs2_extent_maps(void);
+void exit_ocfs2_extent_maps(void);
+
+/*
+ * EVERY CALL here except _init, _trunc, and _drop expects alloc_sem
+ * to be held. The allocation cannot change at all while the map is
+ * in the process of being updated.
+ */
+int ocfs2_extent_map_init(struct inode *inode);
+int ocfs2_extent_map_append(struct inode *inode,
+ struct ocfs2_extent_rec *rec,
+ u32 new_clusters);
+int ocfs2_extent_map_get_blocks(struct inode *inode,
+ u64 v_blkno, int count,
+ u64 *p_blkno, int *ret_count);
+int ocfs2_extent_map_drop(struct inode *inode, u32 new_clusters);
+int ocfs2_extent_map_trunc(struct inode *inode, u32 new_clusters);
+
+#endif /* _EXTENT_MAP_H */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
new file mode 100644
index 00000000000..eaf33caa0a1
--- /dev/null
+++ b/fs/ocfs2/file.c
@@ -0,0 +1,1238 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * file.c
+ *
+ * File open, close, extend, truncate
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/capability.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/uio.h>
+
+#define MLOG_MASK_PREFIX ML_INODE
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "aops.h"
+#include "dir.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "file.h"
+#include "sysfile.h"
+#include "inode.h"
+#include "journal.h"
+#include "mmap.h"
+#include "suballoc.h"
+#include "super.h"
+
+#include "buffer_head_io.h"
+
+static int ocfs2_sync_inode(struct inode *inode)
+{
+ filemap_fdatawrite(inode->i_mapping);
+ return sync_mapping_buffers(inode->i_mapping);
+}
+
+static int ocfs2_file_open(struct inode *inode, struct file *file)
+{
+ int status;
+ int mode = file->f_flags;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
+ file->f_dentry->d_name.len, file->f_dentry->d_name.name);
+
+ spin_lock(&oi->ip_lock);
+
+ /* Check that the inode hasn't been wiped from disk by another
+ * node. If it hasn't then we're safe as long as we hold the
+ * spin lock until our increment of open count. */
+ if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
+ spin_unlock(&oi->ip_lock);
+
+ status = -ENOENT;
+ goto leave;
+ }
+
+ if (mode & O_DIRECT)
+ oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
+
+ oi->ip_open_count++;
+ spin_unlock(&oi->ip_lock);
+ status = 0;
+leave:
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_file_release(struct inode *inode, struct file *file)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
+ file->f_dentry->d_name.len,
+ file->f_dentry->d_name.name);
+
+ spin_lock(&oi->ip_lock);
+ if (!--oi->ip_open_count)
+ oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
+ spin_unlock(&oi->ip_lock);
+
+ mlog_exit(0);
+
+ return 0;
+}
+
+static int ocfs2_sync_file(struct file *file,
+ struct dentry *dentry,
+ int datasync)
+{
+ int err = 0;
+ journal_t *journal;
+ struct inode *inode = dentry->d_inode;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync,
+ dentry->d_name.len, dentry->d_name.name);
+
+ err = ocfs2_sync_inode(dentry->d_inode);
+ if (err)
+ goto bail;
+
+ journal = osb->journal->j_journal;
+ err = journal_force_commit(journal);
+
+bail:
+ mlog_exit(err);
+
+ return (err < 0) ? -EIO : 0;
+}
+
+int ocfs2_set_inode_size(struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ u64 new_i_size)
+{
+ int status;
+
+ mlog_entry_void();
+ i_size_write(inode, new_i_size);
+ inode->i_blocks = ocfs2_align_bytes_to_sectors(new_i_size);
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+
+ status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_simple_size_update(struct inode *inode,
+ struct buffer_head *di_bh,
+ u64 new_i_size)
+{
+ int ret;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_journal_handle *handle = NULL;
+
+ handle = ocfs2_start_trans(osb, NULL,
+ OCFS2_INODE_UPDATE_CREDITS);
+ if (handle == NULL) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_set_inode_size(handle, inode, di_bh,
+ new_i_size);
+ if (ret < 0)
+ mlog_errno(ret);
+
+ ocfs2_commit_trans(handle);
+out:
+ return ret;
+}
+
+static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ u64 new_i_size)
+{
+ int status;
+ struct ocfs2_journal_handle *handle;
+
+ mlog_entry_void();
+
+ /* TODO: This needs to actually orphan the inode in this
+ * transaction. */
+
+ handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ mlog_errno(status);
+ goto out;
+ }
+
+ status = ocfs2_set_inode_size(handle, inode, fe_bh, new_i_size);
+ if (status < 0)
+ mlog_errno(status);
+
+ ocfs2_commit_trans(handle);
+out:
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_truncate_file(struct inode *inode,
+ struct buffer_head *di_bh,
+ u64 new_i_size)
+{
+ int status = 0;
+ struct ocfs2_dinode *fe = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_truncate_context *tc = NULL;
+
+ mlog_entry("(inode = %"MLFu64", new_i_size = %"MLFu64"\n",
+ OCFS2_I(inode)->ip_blkno, new_i_size);
+
+ truncate_inode_pages(inode->i_mapping, new_i_size);
+
+ fe = (struct ocfs2_dinode *) di_bh->b_data;
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
+ status = -EIO;
+ goto bail;
+ }
+
+ mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
+ "Inode %"MLFu64", inode i_size = %lld != di "
+ "i_size = %"MLFu64", i_flags = 0x%x\n",
+ OCFS2_I(inode)->ip_blkno,
+ i_size_read(inode),
+ le64_to_cpu(fe->i_size), le32_to_cpu(fe->i_flags));
+
+ if (new_i_size > le64_to_cpu(fe->i_size)) {
+ mlog(0, "asked to truncate file with size (%"MLFu64") "
+ "to size (%"MLFu64")!\n",
+ le64_to_cpu(fe->i_size), new_i_size);
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ mlog(0, "inode %"MLFu64", i_size = %"MLFu64", new_i_size = %"MLFu64"\n",
+ le64_to_cpu(fe->i_blkno), le64_to_cpu(fe->i_size), new_i_size);
+
+ /* lets handle the simple truncate cases before doing any more
+ * cluster locking. */
+ if (new_i_size == le64_to_cpu(fe->i_size))
+ goto bail;
+
+ if (le32_to_cpu(fe->i_clusters) ==
+ ocfs2_clusters_for_bytes(osb->sb, new_i_size)) {
+ mlog(0, "fe->i_clusters = %u, so we do a simple truncate\n",
+ fe->i_clusters);
+ /* No allocation change is required, so lets fast path
+ * this truncate. */
+ status = ocfs2_simple_size_update(inode, di_bh, new_i_size);
+ if (status < 0)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* This forces other nodes to sync and drop their pages */
+ status = ocfs2_data_lock(inode, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ ocfs2_data_unlock(inode, 1);
+
+ /* alright, we're going to need to do a full blown alloc size
+ * change. Orphan the inode so that recovery can complete the
+ * truncate if necessary. This does the task of marking
+ * i_size. */
+ status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_commit_truncate(osb, inode, di_bh, tc);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* TODO: orphan dir cleanup here. */
+bail:
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * extend allocation only here.
+ * we'll update all the disk stuff, and oip->alloc_size
+ *
+ * expect stuff to be locked, a transaction started and enough data /
+ * metadata reservations in the contexts.
+ *
+ * Will return -EAGAIN, and a reason if a restart is needed.
+ * If passed in, *reason will always be set, even in error.
+ */
+int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
+ struct inode *inode,
+ u32 clusters_to_add,
+ struct buffer_head *fe_bh,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *data_ac,
+ struct ocfs2_alloc_context *meta_ac,
+ enum ocfs2_alloc_restarted *reason_ret)
+{
+ int status = 0;
+ int free_extents;
+ struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
+ enum ocfs2_alloc_restarted reason = RESTART_NONE;
+ u32 bit_off, num_bits;
+ u64 block;
+
+ BUG_ON(!clusters_to_add);
+
+ free_extents = ocfs2_num_free_extents(osb, inode, fe);
+ if (free_extents < 0) {
+ status = free_extents;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* there are two cases which could cause us to EAGAIN in the
+ * we-need-more-metadata case:
+ * 1) we haven't reserved *any*
+ * 2) we are so fragmented, we've needed to add metadata too
+ * many times. */
+ if (!free_extents && !meta_ac) {
+ mlog(0, "we haven't reserved any metadata!\n");
+ status = -EAGAIN;
+ reason = RESTART_META;
+ goto leave;
+ } else if ((!free_extents)
+ && (ocfs2_alloc_context_bits_left(meta_ac)
+ < ocfs2_extend_meta_needed(fe))) {
+ mlog(0, "filesystem is really fragmented...\n");
+ status = -EAGAIN;
+ reason = RESTART_META;
+ goto leave;
+ }
+
+ status = ocfs2_claim_clusters(osb, handle, data_ac, 1,
+ &bit_off, &num_bits);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto leave;
+ }
+
+ BUG_ON(num_bits > clusters_to_add);
+
+ /* reserve our write early -- insert_extent may update the inode */
+ status = ocfs2_journal_access(handle, inode, fe_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
+ mlog(0, "Allocating %u clusters at block %u for inode %"MLFu64"\n",
+ num_bits, bit_off, OCFS2_I(inode)->ip_blkno);
+ status = ocfs2_insert_extent(osb, handle, inode, fe_bh, block,
+ num_bits, meta_ac);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ le32_add_cpu(&fe->i_clusters, num_bits);
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ status = ocfs2_journal_dirty(handle, fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ clusters_to_add -= num_bits;
+
+ if (clusters_to_add) {
+ mlog(0, "need to alloc once more, clusters = %u, wanted = "
+ "%u\n", fe->i_clusters, clusters_to_add);
+ status = -EAGAIN;
+ reason = RESTART_TRANS;
+ }
+
+leave:
+ mlog_exit(status);
+ if (reason_ret)
+ *reason_ret = reason;
+ return status;
+}
+
+static int ocfs2_extend_allocation(struct inode *inode,
+ u32 clusters_to_add)
+{
+ int status = 0;
+ int restart_func = 0;
+ int drop_alloc_sem = 0;
+ int credits, num_free_extents;
+ u32 prev_clusters;
+ struct buffer_head *bh = NULL;
+ struct ocfs2_dinode *fe = NULL;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct ocfs2_alloc_context *data_ac = NULL;
+ struct ocfs2_alloc_context *meta_ac = NULL;
+ enum ocfs2_alloc_restarted why;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
+
+ status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
+ OCFS2_BH_CACHED, inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ fe = (struct ocfs2_dinode *) bh->b_data;
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
+ status = -EIO;
+ goto leave;
+ }
+
+restart_all:
+ BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
+
+ mlog(0, "extend inode %"MLFu64", i_size = %lld, fe->i_clusters = %u, "
+ "clusters_to_add = %u\n",
+ OCFS2_I(inode)->ip_blkno, i_size_read(inode),
+ fe->i_clusters, clusters_to_add);
+
+ handle = ocfs2_alloc_handle(osb);
+ if (handle == NULL) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ num_free_extents = ocfs2_num_free_extents(osb,
+ inode,
+ fe);
+ if (num_free_extents < 0) {
+ status = num_free_extents;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ if (!num_free_extents) {
+ status = ocfs2_reserve_new_metadata(osb,
+ handle,
+ fe,
+ &meta_ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto leave;
+ }
+ }
+
+ status = ocfs2_reserve_clusters(osb,
+ handle,
+ clusters_to_add,
+ &data_ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* blocks peope in read/write from reading our allocation
+ * until we're done changing it. We depend on i_mutex to block
+ * other extend/truncate calls while we're here. Ordering wrt
+ * start_trans is important here -- always do it before! */
+ down_write(&OCFS2_I(inode)->ip_alloc_sem);
+ drop_alloc_sem = 1;
+
+ credits = ocfs2_calc_extend_credits(osb->sb, fe, clusters_to_add);
+ handle = ocfs2_start_trans(osb, handle, credits);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto leave;
+ }
+
+restarted_transaction:
+ /* reserve a write to the file entry early on - that we if we
+ * run out of credits in the allocation path, we can still
+ * update i_size. */
+ status = ocfs2_journal_access(handle, inode, bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ prev_clusters = OCFS2_I(inode)->ip_clusters;
+
+ status = ocfs2_do_extend_allocation(osb,
+ inode,
+ clusters_to_add,
+ bh,
+ handle,
+ data_ac,
+ meta_ac,
+ &why);
+ if ((status < 0) && (status != -EAGAIN)) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_journal_dirty(handle, bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ if (why != RESTART_NONE && clusters_to_add) {
+ if (why == RESTART_META) {
+ mlog(0, "restarting function.\n");
+ restart_func = 1;
+ } else {
+ BUG_ON(why != RESTART_TRANS);
+
+ mlog(0, "restarting transaction.\n");
+ /* TODO: This can be more intelligent. */
+ credits = ocfs2_calc_extend_credits(osb->sb,
+ fe,
+ clusters_to_add);
+ status = ocfs2_extend_trans(handle, credits);
+ if (status < 0) {
+ /* handle still has to be committed at
+ * this point. */
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto leave;
+ }
+ goto restarted_transaction;
+ }
+ }
+
+ mlog(0, "fe: i_clusters = %u, i_size=%"MLFu64"\n",
+ fe->i_clusters, fe->i_size);
+ mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
+ OCFS2_I(inode)->ip_clusters, i_size_read(inode));
+
+leave:
+ if (drop_alloc_sem) {
+ up_write(&OCFS2_I(inode)->ip_alloc_sem);
+ drop_alloc_sem = 0;
+ }
+ if (handle) {
+ ocfs2_commit_trans(handle);
+ handle = NULL;
+ }
+ if (data_ac) {
+ ocfs2_free_alloc_context(data_ac);
+ data_ac = NULL;
+ }
+ if (meta_ac) {
+ ocfs2_free_alloc_context(meta_ac);
+ meta_ac = NULL;
+ }
+ if ((!status) && restart_func) {
+ restart_func = 0;
+ goto restart_all;
+ }
+ if (bh) {
+ brelse(bh);
+ bh = NULL;
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+/* Some parts of this taken from generic_cont_expand, which turned out
+ * to be too fragile to do exactly what we need without us having to
+ * worry about recursive locking in ->commit_write(). */
+static int ocfs2_write_zero_page(struct inode *inode,
+ u64 size)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ unsigned long index;
+ unsigned int offset;
+ struct ocfs2_journal_handle *handle = NULL;
+ int ret;
+
+ offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
+ /* ugh. in prepare/commit_write, if from==to==start of block, we
+ ** skip the prepare. make sure we never send an offset for the start
+ ** of a block
+ */
+ if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
+ offset++;
+ }
+ index = size >> PAGE_CACHE_SHIFT;
+
+ page = grab_cache_page(mapping, index);
+ if (!page) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_prepare_write(NULL, page, offset, offset);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
+ if (ocfs2_should_order_data(inode)) {
+ handle = ocfs2_start_walk_page_trans(inode, page, offset,
+ offset);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ handle = NULL;
+ goto out_unlock;
+ }
+ }
+
+ /* must not update i_size! */
+ ret = block_commit_write(page, offset, offset);
+ if (ret < 0)
+ mlog_errno(ret);
+ else
+ ret = 0;
+
+ if (handle)
+ ocfs2_commit_trans(handle);
+out_unlock:
+ unlock_page(page);
+ page_cache_release(page);
+out:
+ return ret;
+}
+
+static int ocfs2_zero_extend(struct inode *inode,
+ u64 zero_to_size)
+{
+ int ret = 0;
+ u64 start_off;
+ struct super_block *sb = inode->i_sb;
+
+ start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
+ while (start_off < zero_to_size) {
+ ret = ocfs2_write_zero_page(inode, start_off);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ start_off += sb->s_blocksize;
+ }
+
+out:
+ return ret;
+}
+
+static int ocfs2_extend_file(struct inode *inode,
+ struct buffer_head *di_bh,
+ u64 new_i_size)
+{
+ int ret = 0;
+ u32 clusters_to_add;
+
+ /* setattr sometimes calls us like this. */
+ if (new_i_size == 0)
+ goto out;
+
+ if (i_size_read(inode) == new_i_size)
+ goto out;
+ BUG_ON(new_i_size < i_size_read(inode));
+
+ clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size) -
+ OCFS2_I(inode)->ip_clusters;
+
+ if (clusters_to_add) {
+ ret = ocfs2_extend_allocation(inode, clusters_to_add);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_zero_extend(inode, new_i_size);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ /* No allocation required, we just use this helper to
+ * do a trivial update of i_size. */
+ ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ int status = 0, size_change;
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+ struct buffer_head *bh = NULL;
+ struct ocfs2_journal_handle *handle = NULL;
+
+ mlog_entry("(0x%p, '%.*s')\n", dentry,
+ dentry->d_name.len, dentry->d_name.name);
+
+ if (attr->ia_valid & ATTR_MODE)
+ mlog(0, "mode change: %d\n", attr->ia_mode);
+ if (attr->ia_valid & ATTR_UID)
+ mlog(0, "uid change: %d\n", attr->ia_uid);
+ if (attr->ia_valid & ATTR_GID)
+ mlog(0, "gid change: %d\n", attr->ia_gid);
+ if (attr->ia_valid & ATTR_SIZE)
+ mlog(0, "size change...\n");
+ if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
+ mlog(0, "time change...\n");
+
+#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
+ | ATTR_GID | ATTR_UID | ATTR_MODE)
+ if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
+ mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
+ return 0;
+ }
+
+ status = inode_change_ok(inode, attr);
+ if (status)
+ return status;
+
+ size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
+ if (size_change) {
+ status = ocfs2_rw_lock(inode, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ status = ocfs2_meta_lock(inode, NULL, &bh, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto bail_unlock_rw;
+ }
+
+ if (size_change && attr->ia_size != i_size_read(inode)) {
+ if (i_size_read(inode) > attr->ia_size)
+ status = ocfs2_truncate_file(inode, bh, attr->ia_size);
+ else
+ status = ocfs2_extend_file(inode, bh, attr->ia_size);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ status = -ENOSPC;
+ goto bail_unlock;
+ }
+ }
+
+ handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ mlog_errno(status);
+ goto bail_unlock;
+ }
+
+ status = inode_setattr(inode, attr);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_commit;
+ }
+
+ status = ocfs2_mark_inode_dirty(handle, inode, bh);
+ if (status < 0)
+ mlog_errno(status);
+
+bail_commit:
+ ocfs2_commit_trans(handle);
+bail_unlock:
+ ocfs2_meta_unlock(inode, 1);
+bail_unlock_rw:
+ if (size_change)
+ ocfs2_rw_unlock(inode, 1);
+bail:
+ if (bh)
+ brelse(bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_getattr(struct vfsmount *mnt,
+ struct dentry *dentry,
+ struct kstat *stat)
+{
+ struct inode *inode = dentry->d_inode;
+ struct super_block *sb = dentry->d_inode->i_sb;
+ struct ocfs2_super *osb = sb->s_fs_info;
+ int err;
+
+ mlog_entry_void();
+
+ err = ocfs2_inode_revalidate(dentry);
+ if (err) {
+ if (err != -ENOENT)
+ mlog_errno(err);
+ goto bail;
+ }
+
+ generic_fillattr(inode, stat);
+
+ /* We set the blksize from the cluster size for performance */
+ stat->blksize = osb->s_clustersize;
+
+bail:
+ mlog_exit(err);
+
+ return err;
+}
+
+static int ocfs2_write_remove_suid(struct inode *inode)
+{
+ int ret;
+ struct buffer_head *bh = NULL;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_journal_handle *handle;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_dinode *di;
+
+ mlog_entry("(Inode %"MLFu64", mode 0%o)\n", oi->ip_blkno,
+ inode->i_mode);
+
+ handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
+ if (handle == NULL) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_read_block(osb, oi->ip_blkno, &bh, OCFS2_BH_CACHED, inode);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_trans;
+ }
+
+ ret = ocfs2_journal_access(handle, inode, bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out_bh;
+ }
+
+ inode->i_mode &= ~S_ISUID;
+ if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
+ inode->i_mode &= ~S_ISGID;
+
+ di = (struct ocfs2_dinode *) bh->b_data;
+ di->i_mode = cpu_to_le16(inode->i_mode);
+
+ ret = ocfs2_journal_dirty(handle, bh);
+ if (ret < 0)
+ mlog_errno(ret);
+out_bh:
+ brelse(bh);
+out_trans:
+ ocfs2_commit_trans(handle);
+out:
+ mlog_exit(ret);
+ return ret;
+}
+
+static inline int ocfs2_write_should_remove_suid(struct inode *inode)
+{
+ mode_t mode = inode->i_mode;
+
+ if (!capable(CAP_FSETID)) {
+ if (unlikely(mode & S_ISUID))
+ return 1;
+
+ if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
+ return 1;
+ }
+ return 0;
+}
+
+static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
+ const char __user *buf,
+ size_t count,
+ loff_t pos)
+{
+ struct iovec local_iov = { .iov_base = (void __user *)buf,
+ .iov_len = count };
+ int ret, rw_level = -1, meta_level = -1, have_alloc_sem = 0;
+ u32 clusters;
+ struct file *filp = iocb->ki_filp;
+ struct inode *inode = filp->f_dentry->d_inode;
+ loff_t newsize, saved_pos;
+#ifdef OCFS2_ORACORE_WORKAROUNDS
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+#endif
+
+ mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", filp, buf,
+ (unsigned int)count,
+ filp->f_dentry->d_name.len,
+ filp->f_dentry->d_name.name);
+
+ /* happy write of zero bytes */
+ if (count == 0)
+ return 0;
+
+ if (!inode) {
+ mlog(0, "bad inode\n");
+ return -EIO;
+ }
+
+#ifdef OCFS2_ORACORE_WORKAROUNDS
+ /* ugh, work around some applications which open everything O_DIRECT +
+ * O_APPEND and really don't mean to use O_DIRECT. */
+ if (osb->s_mount_opt & OCFS2_MOUNT_COMPAT_OCFS &&
+ (filp->f_flags & O_APPEND) && (filp->f_flags & O_DIRECT))
+ filp->f_flags &= ~O_DIRECT;
+#endif
+
+ mutex_lock(&inode->i_mutex);
+ /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
+ if (filp->f_flags & O_DIRECT) {
+ have_alloc_sem = 1;
+ down_read(&inode->i_alloc_sem);
+ }
+
+ /* concurrent O_DIRECT writes are allowed */
+ rw_level = (filp->f_flags & O_DIRECT) ? 0 : 1;
+ ret = ocfs2_rw_lock(inode, rw_level);
+ if (ret < 0) {
+ rw_level = -1;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /*
+ * We sample i_size under a read level meta lock to see if our write
+ * is extending the file, if it is we back off and get a write level
+ * meta lock.
+ */
+ meta_level = (filp->f_flags & O_APPEND) ? 1 : 0;
+ for(;;) {
+ ret = ocfs2_meta_lock(inode, NULL, NULL, meta_level);
+ if (ret < 0) {
+ meta_level = -1;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /* Clear suid / sgid if necessary. We do this here
+ * instead of later in the write path because
+ * remove_suid() calls ->setattr without any hint that
+ * we may have already done our cluster locking. Since
+ * ocfs2_setattr() *must* take cluster locks to
+ * proceeed, this will lead us to recursively lock the
+ * inode. There's also the dinode i_size state which
+ * can be lost via setattr during extending writes (we
+ * set inode->i_size at the end of a write. */
+ if (ocfs2_write_should_remove_suid(inode)) {
+ if (meta_level == 0) {
+ ocfs2_meta_unlock(inode, meta_level);
+ meta_level = 1;
+ continue;
+ }
+
+ ret = ocfs2_write_remove_suid(inode);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ }
+
+ /* work on a copy of ppos until we're sure that we won't have
+ * to recalculate it due to relocking. */
+ if (filp->f_flags & O_APPEND) {
+ saved_pos = i_size_read(inode);
+ mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
+ } else {
+ saved_pos = iocb->ki_pos;
+ }
+ newsize = count + saved_pos;
+
+ mlog(0, "pos=%lld newsize=%"MLFu64" cursize=%lld\n",
+ saved_pos, newsize, i_size_read(inode));
+
+ /* No need for a higher level metadata lock if we're
+ * never going past i_size. */
+ if (newsize <= i_size_read(inode))
+ break;
+
+ if (meta_level == 0) {
+ ocfs2_meta_unlock(inode, meta_level);
+ meta_level = 1;
+ continue;
+ }
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ clusters = ocfs2_clusters_for_bytes(inode->i_sb, newsize) -
+ OCFS2_I(inode)->ip_clusters;
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ mlog(0, "Writing at EOF, may need more allocation: "
+ "i_size = %lld, newsize = %"MLFu64", need %u clusters\n",
+ i_size_read(inode), newsize, clusters);
+
+ /* We only want to continue the rest of this loop if
+ * our extend will actually require more
+ * allocation. */
+ if (!clusters)
+ break;
+
+ ret = ocfs2_extend_allocation(inode, clusters);
+ if (ret < 0) {
+ if (ret != -ENOSPC)
+ mlog_errno(ret);
+ goto out;
+ }
+
+ /* Fill any holes which would've been created by this
+ * write. If we're O_APPEND, this will wind up
+ * (correctly) being a noop. */
+ ret = ocfs2_zero_extend(inode, (u64) newsize - count);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+ break;
+ }
+
+ /* ok, we're done with i_size and alloc work */
+ iocb->ki_pos = saved_pos;
+ ocfs2_meta_unlock(inode, meta_level);
+ meta_level = -1;
+
+ /* communicate with ocfs2_dio_end_io */
+ ocfs2_iocb_set_rw_locked(iocb);
+
+#ifdef OCFS2_ORACORE_WORKAROUNDS
+ if (osb->s_mount_opt & OCFS2_MOUNT_COMPAT_OCFS &&
+ filp->f_flags & O_DIRECT) {
+ unsigned int saved_flags = filp->f_flags;
+ int sector_size = 1 << osb->s_sectsize_bits;
+
+ if ((saved_pos & (sector_size - 1)) ||
+ (count & (sector_size - 1)) ||
+ ((unsigned long)buf & (sector_size - 1))) {
+ filp->f_flags |= O_SYNC;
+ filp->f_flags &= ~O_DIRECT;
+ }
+
+ ret = generic_file_aio_write_nolock(iocb, &local_iov, 1,
+ &iocb->ki_pos);
+
+ filp->f_flags = saved_flags;
+ } else
+#endif
+ ret = generic_file_aio_write_nolock(iocb, &local_iov, 1,
+ &iocb->ki_pos);
+
+ /* buffered aio wouldn't have proper lock coverage today */
+ BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
+
+ /*
+ * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
+ * function pointer which is called when o_direct io completes so that
+ * it can unlock our rw lock. (it's the clustered equivalent of
+ * i_alloc_sem; protects truncate from racing with pending ios).
+ * Unfortunately there are error cases which call end_io and others
+ * that don't. so we don't have to unlock the rw_lock if either an
+ * async dio is going to do it in the future or an end_io after an
+ * error has already done it.
+ */
+ if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
+ rw_level = -1;
+ have_alloc_sem = 0;
+ }
+
+out:
+ if (meta_level != -1)
+ ocfs2_meta_unlock(inode, meta_level);
+ if (have_alloc_sem)
+ up_read(&inode->i_alloc_sem);
+ if (rw_level != -1)
+ ocfs2_rw_unlock(inode, rw_level);
+ mutex_unlock(&inode->i_mutex);
+
+ mlog_exit(ret);
+ return ret;
+}
+
+static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
+ char __user *buf,
+ size_t count,
+ loff_t pos)
+{
+ int ret = 0, rw_level = -1, have_alloc_sem = 0;
+ struct file *filp = iocb->ki_filp;
+ struct inode *inode = filp->f_dentry->d_inode;
+#ifdef OCFS2_ORACORE_WORKAROUNDS
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+#endif
+
+ mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", filp, buf,
+ (unsigned int)count,
+ filp->f_dentry->d_name.len,
+ filp->f_dentry->d_name.name);
+
+ if (!inode) {
+ ret = -EINVAL;
+ mlog_errno(ret);
+ goto bail;
+ }
+
+#ifdef OCFS2_ORACORE_WORKAROUNDS
+ if (osb->s_mount_opt & OCFS2_MOUNT_COMPAT_OCFS) {
+ if (filp->f_flags & O_DIRECT) {
+ int sector_size = 1 << osb->s_sectsize_bits;
+
+ if ((pos & (sector_size - 1)) ||
+ (count & (sector_size - 1)) ||
+ ((unsigned long)buf & (sector_size - 1)) ||
+ (i_size_read(inode) & (sector_size -1))) {
+ filp->f_flags &= ~O_DIRECT;
+ }
+ }
+ }
+#endif
+
+ /*
+ * buffered reads protect themselves in ->readpage(). O_DIRECT reads
+ * need locks to protect pending reads from racing with truncate.
+ */
+ if (filp->f_flags & O_DIRECT) {
+ down_read(&inode->i_alloc_sem);
+ have_alloc_sem = 1;
+
+ ret = ocfs2_rw_lock(inode, 0);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto bail;
+ }
+ rw_level = 0;
+ /* communicate with ocfs2_dio_end_io */
+ ocfs2_iocb_set_rw_locked(iocb);
+ }
+
+ ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos);
+ if (ret == -EINVAL)
+ mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
+
+ /* buffered aio wouldn't have proper lock coverage today */
+ BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
+
+ /* see ocfs2_file_aio_write */
+ if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
+ rw_level = -1;
+ have_alloc_sem = 0;
+ }
+
+bail:
+ if (have_alloc_sem)
+ up_read(&inode->i_alloc_sem);
+ if (rw_level != -1)
+ ocfs2_rw_unlock(inode, rw_level);
+ mlog_exit(ret);
+
+ return ret;
+}
+
+struct inode_operations ocfs2_file_iops = {
+ .setattr = ocfs2_setattr,
+ .getattr = ocfs2_getattr,
+};
+
+struct inode_operations ocfs2_special_file_iops = {
+ .setattr = ocfs2_setattr,
+ .getattr = ocfs2_getattr,
+};
+
+struct file_operations ocfs2_fops = {
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .sendfile = generic_file_sendfile,
+ .mmap = ocfs2_mmap,
+ .fsync = ocfs2_sync_file,
+ .release = ocfs2_file_release,
+ .open = ocfs2_file_open,
+ .aio_read = ocfs2_file_aio_read,
+ .aio_write = ocfs2_file_aio_write,
+};
+
+struct file_operations ocfs2_dops = {
+ .read = generic_read_dir,
+ .readdir = ocfs2_readdir,
+ .fsync = ocfs2_sync_file,
+};
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
new file mode 100644
index 00000000000..a5ea33b2406
--- /dev/null
+++ b/fs/ocfs2/file.h
@@ -0,0 +1,57 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * file.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_FILE_H
+#define OCFS2_FILE_H
+
+extern struct file_operations ocfs2_fops;
+extern struct file_operations ocfs2_dops;
+extern struct inode_operations ocfs2_file_iops;
+extern struct inode_operations ocfs2_special_file_iops;
+struct ocfs2_alloc_context;
+
+enum ocfs2_alloc_restarted {
+ RESTART_NONE = 0,
+ RESTART_TRANS,
+ RESTART_META
+};
+int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
+ struct inode *inode,
+ u32 clusters_to_add,
+ struct buffer_head *fe_bh,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *data_ac,
+ struct ocfs2_alloc_context *meta_ac,
+ enum ocfs2_alloc_restarted *reason);
+int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
+int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat);
+
+int ocfs2_set_inode_size(struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ u64 new_i_size);
+
+#endif /* OCFS2_FILE_H */
diff --git a/fs/ocfs2/heartbeat.c b/fs/ocfs2/heartbeat.c
new file mode 100644
index 00000000000..0bbd22f46c8
--- /dev/null
+++ b/fs/ocfs2/heartbeat.c
@@ -0,0 +1,378 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * heartbeat.c
+ *
+ * Register ourselves with the heartbaet service, keep our node maps
+ * up to date, and fire off recovery when needed.
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/kmod.h>
+
+#include <cluster/heartbeat.h>
+#include <cluster/nodemanager.h>
+
+#include <dlm/dlmapi.h>
+
+#define MLOG_MASK_PREFIX ML_SUPER
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "heartbeat.h"
+#include "inode.h"
+#include "journal.h"
+#include "vote.h"
+
+#include "buffer_head_io.h"
+
+#define OCFS2_HB_NODE_DOWN_PRI (0x0000002)
+#define OCFS2_HB_NODE_UP_PRI OCFS2_HB_NODE_DOWN_PRI
+
+static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map,
+ int bit);
+static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map,
+ int bit);
+static inline int __ocfs2_node_map_is_empty(struct ocfs2_node_map *map);
+static void __ocfs2_node_map_dup(struct ocfs2_node_map *target,
+ struct ocfs2_node_map *from);
+static void __ocfs2_node_map_set(struct ocfs2_node_map *target,
+ struct ocfs2_node_map *from);
+
+void ocfs2_init_node_maps(struct ocfs2_super *osb)
+{
+ spin_lock_init(&osb->node_map_lock);
+ ocfs2_node_map_init(&osb->mounted_map);
+ ocfs2_node_map_init(&osb->recovery_map);
+ ocfs2_node_map_init(&osb->umount_map);
+}
+
+static void ocfs2_do_node_down(int node_num,
+ struct ocfs2_super *osb)
+{
+ BUG_ON(osb->node_num == node_num);
+
+ mlog(0, "ocfs2: node down event for %d\n", node_num);
+
+ if (!osb->dlm) {
+ /*
+ * No DLM means we're not even ready to participate yet.
+ * We check the slots after the DLM comes up, so we will
+ * notice the node death then. We can safely ignore it
+ * here.
+ */
+ return;
+ }
+
+ if (ocfs2_node_map_test_bit(osb, &osb->umount_map, node_num)) {
+ /* If a node is in the umount map, then we've been
+ * expecting him to go down and we know ahead of time
+ * that recovery is not necessary. */
+ ocfs2_node_map_clear_bit(osb, &osb->umount_map, node_num);
+ return;
+ }
+
+ ocfs2_recovery_thread(osb, node_num);
+
+ ocfs2_remove_node_from_vote_queues(osb, node_num);
+}
+
+static void ocfs2_hb_node_down_cb(struct o2nm_node *node,
+ int node_num,
+ void *data)
+{
+ ocfs2_do_node_down(node_num, (struct ocfs2_super *) data);
+}
+
+/* Called from the dlm when it's about to evict a node. We may also
+ * get a heartbeat callback later. */
+static void ocfs2_dlm_eviction_cb(int node_num,
+ void *data)
+{
+ struct ocfs2_super *osb = (struct ocfs2_super *) data;
+ struct super_block *sb = osb->sb;
+
+ mlog(ML_NOTICE, "device (%u,%u): dlm has evicted node %d\n",
+ MAJOR(sb->s_dev), MINOR(sb->s_dev), node_num);
+
+ ocfs2_do_node_down(node_num, osb);
+}
+
+static void ocfs2_hb_node_up_cb(struct o2nm_node *node,
+ int node_num,
+ void *data)
+{
+ struct ocfs2_super *osb = data;
+
+ BUG_ON(osb->node_num == node_num);
+
+ mlog(0, "node up event for %d\n", node_num);
+ ocfs2_node_map_clear_bit(osb, &osb->umount_map, node_num);
+}
+
+void ocfs2_setup_hb_callbacks(struct ocfs2_super *osb)
+{
+ o2hb_setup_callback(&osb->osb_hb_down, O2HB_NODE_DOWN_CB,
+ ocfs2_hb_node_down_cb, osb,
+ OCFS2_HB_NODE_DOWN_PRI);
+
+ o2hb_setup_callback(&osb->osb_hb_up, O2HB_NODE_UP_CB,
+ ocfs2_hb_node_up_cb, osb, OCFS2_HB_NODE_UP_PRI);
+
+ /* Not exactly a heartbeat callback, but leads to essentially
+ * the same path so we set it up here. */
+ dlm_setup_eviction_cb(&osb->osb_eviction_cb,
+ ocfs2_dlm_eviction_cb,
+ osb);
+}
+
+/* Most functions here are just stubs for now... */
+int ocfs2_register_hb_callbacks(struct ocfs2_super *osb)
+{
+ int status;
+
+ status = o2hb_register_callback(&osb->osb_hb_down);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = o2hb_register_callback(&osb->osb_hb_up);
+ if (status < 0)
+ mlog_errno(status);
+
+bail:
+ return status;
+}
+
+void ocfs2_clear_hb_callbacks(struct ocfs2_super *osb)
+{
+ int status;
+
+ status = o2hb_unregister_callback(&osb->osb_hb_down);
+ if (status < 0)
+ mlog_errno(status);
+
+ status = o2hb_unregister_callback(&osb->osb_hb_up);
+ if (status < 0)
+ mlog_errno(status);
+}
+
+void ocfs2_stop_heartbeat(struct ocfs2_super *osb)
+{
+ int ret;
+ char *argv[5], *envp[3];
+
+ if (!osb->uuid_str) {
+ /* This can happen if we don't get far enough in mount... */
+ mlog(0, "No UUID with which to stop heartbeat!\n\n");
+ return;
+ }
+
+ argv[0] = (char *)o2nm_get_hb_ctl_path();
+ argv[1] = "-K";
+ argv[2] = "-u";
+ argv[3] = osb->uuid_str;
+ argv[4] = NULL;
+
+ mlog(0, "Run: %s %s %s %s\n", argv[0], argv[1], argv[2], argv[3]);
+
+ /* minimal command environment taken from cpu_run_sbin_hotplug */
+ envp[0] = "HOME=/";
+ envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+ envp[2] = NULL;
+
+ ret = call_usermodehelper(argv[0], argv, envp, 1);
+ if (ret < 0)
+ mlog_errno(ret);
+}
+
+/* special case -1 for now
+ * TODO: should *really* make sure the calling func never passes -1!! */
+void ocfs2_node_map_init(struct ocfs2_node_map *map)
+{
+ map->num_nodes = OCFS2_NODE_MAP_MAX_NODES;
+ memset(map->map, 0, BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES) *
+ sizeof(unsigned long));
+}
+
+static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map,
+ int bit)
+{
+ set_bit(bit, map->map);
+}
+
+void ocfs2_node_map_set_bit(struct ocfs2_super *osb,
+ struct ocfs2_node_map *map,
+ int bit)
+{
+ if (bit==-1)
+ return;
+ BUG_ON(bit >= map->num_nodes);
+ spin_lock(&osb->node_map_lock);
+ __ocfs2_node_map_set_bit(map, bit);
+ spin_unlock(&osb->node_map_lock);
+}
+
+static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map,
+ int bit)
+{
+ clear_bit(bit, map->map);
+}
+
+void ocfs2_node_map_clear_bit(struct ocfs2_super *osb,
+ struct ocfs2_node_map *map,
+ int bit)
+{
+ if (bit==-1)
+ return;
+ BUG_ON(bit >= map->num_nodes);
+ spin_lock(&osb->node_map_lock);
+ __ocfs2_node_map_clear_bit(map, bit);
+ spin_unlock(&osb->node_map_lock);
+}
+
+int ocfs2_node_map_test_bit(struct ocfs2_super *osb,
+ struct ocfs2_node_map *map,
+ int bit)
+{
+ int ret;
+ if (bit >= map->num_nodes) {
+ mlog(ML_ERROR, "bit=%d map->num_nodes=%d\n", bit, map->num_nodes);
+ BUG();
+ }
+ spin_lock(&osb->node_map_lock);
+ ret = test_bit(bit, map->map);
+ spin_unlock(&osb->node_map_lock);
+ return ret;
+}
+
+static inline int __ocfs2_node_map_is_empty(struct ocfs2_node_map *map)
+{
+ int bit;
+ bit = find_next_bit(map->map, map->num_nodes, 0);
+ if (bit < map->num_nodes)
+ return 0;
+ return 1;
+}
+
+int ocfs2_node_map_is_empty(struct ocfs2_super *osb,
+ struct ocfs2_node_map *map)
+{
+ int ret;
+ BUG_ON(map->num_nodes == 0);
+ spin_lock(&osb->node_map_lock);
+ ret = __ocfs2_node_map_is_empty(map);
+ spin_unlock(&osb->node_map_lock);
+ return ret;
+}
+
+static void __ocfs2_node_map_dup(struct ocfs2_node_map *target,
+ struct ocfs2_node_map *from)
+{
+ BUG_ON(from->num_nodes == 0);
+ ocfs2_node_map_init(target);
+ __ocfs2_node_map_set(target, from);
+}
+
+/* returns 1 if bit is the only bit set in target, 0 otherwise */
+int ocfs2_node_map_is_only(struct ocfs2_super *osb,
+ struct ocfs2_node_map *target,
+ int bit)
+{
+ struct ocfs2_node_map temp;
+ int ret;
+
+ spin_lock(&osb->node_map_lock);
+ __ocfs2_node_map_dup(&temp, target);
+ __ocfs2_node_map_clear_bit(&temp, bit);
+ ret = __ocfs2_node_map_is_empty(&temp);
+ spin_unlock(&osb->node_map_lock);
+
+ return ret;
+}
+
+static void __ocfs2_node_map_set(struct ocfs2_node_map *target,
+ struct ocfs2_node_map *from)
+{
+ int num_longs, i;
+
+ BUG_ON(target->num_nodes != from->num_nodes);
+ BUG_ON(target->num_nodes == 0);
+
+ num_longs = BITS_TO_LONGS(target->num_nodes);
+ for (i = 0; i < num_longs; i++)
+ target->map[i] = from->map[i];
+}
+
+/* Returns whether the recovery bit was actually set - it may not be
+ * if a node is still marked as needing recovery */
+int ocfs2_recovery_map_set(struct ocfs2_super *osb,
+ int num)
+{
+ int set = 0;
+
+ spin_lock(&osb->node_map_lock);
+
+ __ocfs2_node_map_clear_bit(&osb->mounted_map, num);
+
+ if (!test_bit(num, osb->recovery_map.map)) {
+ __ocfs2_node_map_set_bit(&osb->recovery_map, num);
+ set = 1;
+ }
+
+ spin_unlock(&osb->node_map_lock);
+
+ return set;
+}
+
+void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
+ int num)
+{
+ ocfs2_node_map_clear_bit(osb, &osb->recovery_map, num);
+}
+
+int ocfs2_node_map_iterate(struct ocfs2_super *osb,
+ struct ocfs2_node_map *map,
+ int idx)
+{
+ int i = idx;
+
+ idx = O2NM_INVALID_NODE_NUM;
+ spin_lock(&osb->node_map_lock);
+ if ((i != O2NM_INVALID_NODE_NUM) &&
+ (i >= 0) &&
+ (i < map->num_nodes)) {
+ while(i < map->num_nodes) {
+ if (test_bit(i, map->map)) {
+ idx = i;
+ break;
+ }
+ i++;
+ }
+ }
+ spin_unlock(&osb->node_map_lock);
+ return idx;
+}
diff --git a/fs/ocfs2/heartbeat.h b/fs/ocfs2/heartbeat.h
new file mode 100644
index 00000000000..e8fb079122e
--- /dev/null
+++ b/fs/ocfs2/heartbeat.h
@@ -0,0 +1,67 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * heartbeat.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_HEARTBEAT_H
+#define OCFS2_HEARTBEAT_H
+
+void ocfs2_init_node_maps(struct ocfs2_super *osb);
+
+void ocfs2_setup_hb_callbacks(struct ocfs2_super *osb);
+int ocfs2_register_hb_callbacks(struct ocfs2_super *osb);
+void ocfs2_clear_hb_callbacks(struct ocfs2_super *osb);
+void ocfs2_stop_heartbeat(struct ocfs2_super *osb);
+
+/* node map functions - used to keep track of mounted and in-recovery
+ * nodes. */
+void ocfs2_node_map_init(struct ocfs2_node_map *map);
+int ocfs2_node_map_is_empty(struct ocfs2_super *osb,
+ struct ocfs2_node_map *map);
+void ocfs2_node_map_set_bit(struct ocfs2_super *osb,
+ struct ocfs2_node_map *map,
+ int bit);
+void ocfs2_node_map_clear_bit(struct ocfs2_super *osb,
+ struct ocfs2_node_map *map,
+ int bit);
+int ocfs2_node_map_test_bit(struct ocfs2_super *osb,
+ struct ocfs2_node_map *map,
+ int bit);
+int ocfs2_node_map_iterate(struct ocfs2_super *osb,
+ struct ocfs2_node_map *map,
+ int idx);
+static inline int ocfs2_node_map_first_set_bit(struct ocfs2_super *osb,
+ struct ocfs2_node_map *map)
+{
+ return ocfs2_node_map_iterate(osb, map, 0);
+}
+int ocfs2_recovery_map_set(struct ocfs2_super *osb,
+ int num);
+void ocfs2_recovery_map_clear(struct ocfs2_super *osb,
+ int num);
+/* returns 1 if bit is the only bit set in target, 0 otherwise */
+int ocfs2_node_map_is_only(struct ocfs2_super *osb,
+ struct ocfs2_node_map *target,
+ int bit);
+
+#endif /* OCFS2_HEARTBEAT_H */
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
new file mode 100644
index 00000000000..d4ecc062771
--- /dev/null
+++ b/fs/ocfs2/inode.c
@@ -0,0 +1,1140 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * inode.c
+ *
+ * vfs' aops, fops, dops and iops
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+
+#include <asm/byteorder.h>
+
+#define MLOG_MASK_PREFIX ML_INODE
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "file.h"
+#include "inode.h"
+#include "journal.h"
+#include "namei.h"
+#include "suballoc.h"
+#include "super.h"
+#include "symlink.h"
+#include "sysfile.h"
+#include "uptodate.h"
+#include "vote.h"
+
+#include "buffer_head_io.h"
+
+#define OCFS2_FI_FLAG_NOWAIT 0x1
+#define OCFS2_FI_FLAG_DELETE 0x2
+struct ocfs2_find_inode_args
+{
+ u64 fi_blkno;
+ unsigned long fi_ino;
+ unsigned int fi_flags;
+};
+
+static int ocfs2_read_locked_inode(struct inode *inode,
+ struct ocfs2_find_inode_args *args);
+static int ocfs2_init_locked_inode(struct inode *inode, void *opaque);
+static int ocfs2_find_actor(struct inode *inode, void *opaque);
+static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct buffer_head *fe_bh);
+
+struct inode *ocfs2_ilookup_for_vote(struct ocfs2_super *osb,
+ u64 blkno,
+ int delete_vote)
+{
+ struct ocfs2_find_inode_args args;
+
+ /* ocfs2_ilookup_for_vote should *only* be called from the
+ * vote thread */
+ BUG_ON(current != osb->vote_task);
+
+ args.fi_blkno = blkno;
+ args.fi_flags = OCFS2_FI_FLAG_NOWAIT;
+ if (delete_vote)
+ args.fi_flags |= OCFS2_FI_FLAG_DELETE;
+ args.fi_ino = ino_from_blkno(osb->sb, blkno);
+ return ilookup5(osb->sb, args.fi_ino, ocfs2_find_actor, &args);
+}
+
+struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno)
+{
+ struct inode *inode = NULL;
+ struct super_block *sb = osb->sb;
+ struct ocfs2_find_inode_args args;
+
+ mlog_entry("(blkno = %"MLFu64")\n", blkno);
+
+ /* Ok. By now we've either got the offsets passed to us by the
+ * caller, or we just pulled them off the bh. Lets do some
+ * sanity checks to make sure they're OK. */
+ if (blkno == 0) {
+ inode = ERR_PTR(-EINVAL);
+ mlog_errno(PTR_ERR(inode));
+ goto bail;
+ }
+
+ args.fi_blkno = blkno;
+ args.fi_flags = 0;
+ args.fi_ino = ino_from_blkno(sb, blkno);
+
+ inode = iget5_locked(sb, args.fi_ino, ocfs2_find_actor,
+ ocfs2_init_locked_inode, &args);
+ /* inode was *not* in the inode cache. 2.6.x requires
+ * us to do our own read_inode call and unlock it
+ * afterwards. */
+ if (inode && inode->i_state & I_NEW) {
+ mlog(0, "Inode was not in inode cache, reading it.\n");
+ ocfs2_read_locked_inode(inode, &args);
+ unlock_new_inode(inode);
+ }
+ if (inode == NULL) {
+ inode = ERR_PTR(-ENOMEM);
+ mlog_errno(PTR_ERR(inode));
+ goto bail;
+ }
+ if (is_bad_inode(inode)) {
+ iput(inode);
+ inode = ERR_PTR(-ESTALE);
+ mlog_errno(PTR_ERR(inode));
+ goto bail;
+ }
+
+bail:
+ if (!IS_ERR(inode)) {
+ mlog(0, "returning inode with number %"MLFu64"\n",
+ OCFS2_I(inode)->ip_blkno);
+ mlog_exit_ptr(inode);
+ } else
+ mlog_errno(PTR_ERR(inode));
+
+ return inode;
+}
+
+
+/*
+ * here's how inodes get read from disk:
+ * iget5_locked -> find_actor -> OCFS2_FIND_ACTOR
+ * found? : return the in-memory inode
+ * not found? : get_new_inode -> OCFS2_INIT_LOCKED_INODE
+ */
+
+static int ocfs2_find_actor(struct inode *inode, void *opaque)
+{
+ struct ocfs2_find_inode_args *args = NULL;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ int ret = 0;
+
+ mlog_entry("(0x%p, %lu, 0x%p)\n", inode, inode->i_ino, opaque);
+
+ args = opaque;
+
+ mlog_bug_on_msg(!inode, "No inode in find actor!\n");
+
+ if (oi->ip_blkno != args->fi_blkno)
+ goto bail;
+
+ /* OCFS2_FI_FLAG_NOWAIT is *only* set from
+ * ocfs2_ilookup_for_vote which won't create an inode for one
+ * that isn't found. The vote thread which doesn't want to get
+ * an inode which is in the process of going away - otherwise
+ * the call to __wait_on_freeing_inode in find_inode_fast will
+ * cause it to deadlock on an inode which may be waiting on a
+ * vote (or lock release) in delete_inode */
+ if ((args->fi_flags & OCFS2_FI_FLAG_NOWAIT) &&
+ (inode->i_state & (I_FREEING|I_CLEAR))) {
+ /* As stated above, we're not going to return an
+ * inode. In the case of a delete vote, the voting
+ * code is going to signal the other node to go
+ * ahead. Mark that state here, so this freeing inode
+ * has the state when it gets to delete_inode. */
+ if (args->fi_flags & OCFS2_FI_FLAG_DELETE) {
+ spin_lock(&oi->ip_lock);
+ ocfs2_mark_inode_remotely_deleted(inode);
+ spin_unlock(&oi->ip_lock);
+ }
+ goto bail;
+ }
+
+ ret = 1;
+bail:
+ mlog_exit(ret);
+ return ret;
+}
+
+/*
+ * initialize the new inode, but don't do anything that would cause
+ * us to sleep.
+ * return 0 on success, 1 on failure
+ */
+static int ocfs2_init_locked_inode(struct inode *inode, void *opaque)
+{
+ struct ocfs2_find_inode_args *args = opaque;
+
+ mlog_entry("inode = %p, opaque = %p\n", inode, opaque);
+
+ inode->i_ino = args->fi_ino;
+ OCFS2_I(inode)->ip_blkno = args->fi_blkno;
+
+ mlog_exit(0);
+ return 0;
+}
+
+int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
+ int create_ino)
+{
+ struct super_block *sb;
+ struct ocfs2_super *osb;
+ int status = -EINVAL;
+
+ mlog_entry("(0x%p, size:%"MLFu64")\n", inode, fe->i_size);
+
+ sb = inode->i_sb;
+ osb = OCFS2_SB(sb);
+
+ /* this means that read_inode cannot create a superblock inode
+ * today. change if needed. */
+ if (!OCFS2_IS_VALID_DINODE(fe) ||
+ !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL))) {
+ mlog(ML_ERROR, "Invalid dinode: i_ino=%lu, i_blkno=%"MLFu64", "
+ "signature = %.*s, flags = 0x%x\n",
+ inode->i_ino, le64_to_cpu(fe->i_blkno), 7,
+ fe->i_signature, le32_to_cpu(fe->i_flags));
+ goto bail;
+ }
+
+ if (le32_to_cpu(fe->i_fs_generation) != osb->fs_generation) {
+ mlog(ML_ERROR, "file entry generation does not match "
+ "superblock! osb->fs_generation=%x, "
+ "fe->i_fs_generation=%x\n",
+ osb->fs_generation, le32_to_cpu(fe->i_fs_generation));
+ goto bail;
+ }
+
+ inode->i_version = 1;
+ inode->i_generation = le32_to_cpu(fe->i_generation);
+ inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
+ inode->i_mode = le16_to_cpu(fe->i_mode);
+ inode->i_uid = le32_to_cpu(fe->i_uid);
+ inode->i_gid = le32_to_cpu(fe->i_gid);
+ inode->i_blksize = (u32)osb->s_clustersize;
+
+ /* Fast symlinks will have i_size but no allocated clusters. */
+ if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
+ inode->i_blocks = 0;
+ else
+ inode->i_blocks =
+ ocfs2_align_bytes_to_sectors(le64_to_cpu(fe->i_size));
+ inode->i_mapping->a_ops = &ocfs2_aops;
+ inode->i_flags |= S_NOATIME;
+ inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
+ inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
+ inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
+ inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
+ inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime);
+ inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec);
+
+ if (OCFS2_I(inode)->ip_blkno != le64_to_cpu(fe->i_blkno))
+ mlog(ML_ERROR,
+ "ip_blkno %"MLFu64" != i_blkno %"MLFu64"!\n",
+ OCFS2_I(inode)->ip_blkno, fe->i_blkno);
+
+ OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
+ OCFS2_I(inode)->ip_orphaned_slot = OCFS2_INVALID_SLOT;
+
+ if (create_ino)
+ inode->i_ino = ino_from_blkno(inode->i_sb,
+ le64_to_cpu(fe->i_blkno));
+
+ mlog(0, "blkno = %"MLFu64", ino = %lu, create_ino = %s\n",
+ fe->i_blkno, inode->i_ino, create_ino ? "true" : "false");
+
+ inode->i_nlink = le16_to_cpu(fe->i_links_count);
+
+ if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) {
+ OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
+ mlog(0, "local alloc inode: i_ino=%lu\n", inode->i_ino);
+ } else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) {
+ OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
+ } else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) {
+ mlog(0, "superblock inode: i_ino=%lu\n", inode->i_ino);
+ /* we can't actually hit this as read_inode can't
+ * handle superblocks today ;-) */
+ BUG();
+ }
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ inode->i_fop = &ocfs2_fops;
+ inode->i_op = &ocfs2_file_iops;
+ i_size_write(inode, le64_to_cpu(fe->i_size));
+ break;
+ case S_IFDIR:
+ inode->i_op = &ocfs2_dir_iops;
+ inode->i_fop = &ocfs2_dops;
+ i_size_write(inode, le64_to_cpu(fe->i_size));
+ break;
+ case S_IFLNK:
+ if (ocfs2_inode_is_fast_symlink(inode))
+ inode->i_op = &ocfs2_fast_symlink_inode_operations;
+ else
+ inode->i_op = &ocfs2_symlink_inode_operations;
+ i_size_write(inode, le64_to_cpu(fe->i_size));
+ break;
+ default:
+ inode->i_op = &ocfs2_special_file_iops;
+ init_special_inode(inode, inode->i_mode,
+ inode->i_rdev);
+ break;
+ }
+
+ ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_rw_lockres,
+ OCFS2_LOCK_TYPE_RW, inode);
+ ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_meta_lockres,
+ OCFS2_LOCK_TYPE_META, inode);
+ ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_data_lockres,
+ OCFS2_LOCK_TYPE_DATA, inode);
+
+ status = 0;
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_read_locked_inode(struct inode *inode,
+ struct ocfs2_find_inode_args *args)
+{
+ struct super_block *sb;
+ struct ocfs2_super *osb;
+ struct ocfs2_dinode *fe;
+ struct buffer_head *bh = NULL;
+ int status;
+ int sysfile = 0;
+
+ mlog_entry("(0x%p, 0x%p)\n", inode, args);
+
+ status = -EINVAL;
+ if (inode == NULL || inode->i_sb == NULL) {
+ mlog(ML_ERROR, "bad inode\n");
+ goto bail;
+ }
+ sb = inode->i_sb;
+ osb = OCFS2_SB(sb);
+
+ if (!args) {
+ mlog(ML_ERROR, "bad inode args\n");
+ make_bad_inode(inode);
+ goto bail;
+ }
+
+ /* Read the FE off disk. This is safe because the kernel only
+ * does one read_inode2 for a new inode, and if it doesn't
+ * exist yet then nobody can be working on it! */
+ status = ocfs2_read_block(osb, args->fi_blkno, &bh, 0, NULL);
+ if (status < 0) {
+ mlog_errno(status);
+ make_bad_inode(inode);
+ goto bail;
+ }
+
+ fe = (struct ocfs2_dinode *) bh->b_data;
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ mlog(ML_ERROR, "Invalid dinode #%"MLFu64": signature = %.*s\n",
+ fe->i_blkno, 7, fe->i_signature);
+ make_bad_inode(inode);
+ goto bail;
+ }
+
+ if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL))
+ sysfile = 1;
+
+ if (S_ISCHR(le16_to_cpu(fe->i_mode)) ||
+ S_ISBLK(le16_to_cpu(fe->i_mode)))
+ inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
+
+ status = -EINVAL;
+ if (ocfs2_populate_inode(inode, fe, 0) < 0) {
+ mlog(ML_ERROR, "populate inode failed! i_blkno=%"MLFu64", "
+ "i_ino=%lu\n", fe->i_blkno, inode->i_ino);
+ make_bad_inode(inode);
+ goto bail;
+ }
+
+ BUG_ON(args->fi_blkno != le64_to_cpu(fe->i_blkno));
+
+ if (sysfile)
+ OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE;
+
+ status = 0;
+
+bail:
+ if (args && bh)
+ brelse(bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+void ocfs2_sync_blockdev(struct super_block *sb)
+{
+ sync_blockdev(sb->s_bdev);
+}
+
+static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
+ struct inode *inode,
+ struct buffer_head *fe_bh)
+{
+ int status = 0;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct ocfs2_truncate_context *tc = NULL;
+ struct ocfs2_dinode *fe;
+
+ mlog_entry_void();
+
+ fe = (struct ocfs2_dinode *) fe_bh->b_data;
+
+ /* zero allocation, zero truncate :) */
+ if (!fe->i_clusters)
+ goto bail;
+
+ handle = ocfs2_start_trans(osb, handle, OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_set_inode_size(handle, inode, fe_bh, 0ULL);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_commit_trans(handle);
+ handle = NULL;
+
+ status = ocfs2_prepare_truncate(osb, inode, fe_bh, &tc);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_commit_truncate(osb, inode, fe_bh, tc);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+bail:
+ if (handle)
+ ocfs2_commit_trans(handle);
+
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_remove_inode(struct inode *inode,
+ struct buffer_head *di_bh,
+ struct inode *orphan_dir_inode,
+ struct buffer_head *orphan_dir_bh)
+{
+ int status;
+ struct inode *inode_alloc_inode = NULL;
+ struct buffer_head *inode_alloc_bh = NULL;
+ struct ocfs2_journal_handle *handle;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
+
+ inode_alloc_inode =
+ ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
+ le16_to_cpu(di->i_suballoc_slot));
+ if (!inode_alloc_inode) {
+ status = -EEXIST;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ mutex_lock(&inode_alloc_inode->i_mutex);
+ status = ocfs2_meta_lock(inode_alloc_inode, NULL, &inode_alloc_bh, 1);
+ if (status < 0) {
+ mutex_unlock(&inode_alloc_inode->i_mutex);
+
+ mlog_errno(status);
+ goto bail;
+ }
+
+ handle = ocfs2_start_trans(osb, NULL, OCFS2_DELETE_INODE_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ mlog_errno(status);
+ goto bail_unlock;
+ }
+
+ status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode,
+ orphan_dir_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_commit;
+ }
+
+ /* set the inodes dtime */
+ status = ocfs2_journal_access(handle, inode, di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_commit;
+ }
+
+ di->i_dtime = cpu_to_le64(CURRENT_TIME.tv_sec);
+ le32_and_cpu(&di->i_flags, ~(OCFS2_VALID_FL | OCFS2_ORPHANED_FL));
+
+ status = ocfs2_journal_dirty(handle, di_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_commit;
+ }
+
+ ocfs2_remove_from_cache(inode, di_bh);
+
+ status = ocfs2_free_dinode(handle, inode_alloc_inode,
+ inode_alloc_bh, di);
+ if (status < 0)
+ mlog_errno(status);
+
+bail_commit:
+ ocfs2_commit_trans(handle);
+bail_unlock:
+ ocfs2_meta_unlock(inode_alloc_inode, 1);
+ mutex_unlock(&inode_alloc_inode->i_mutex);
+ brelse(inode_alloc_bh);
+bail:
+ iput(inode_alloc_inode);
+
+ return status;
+}
+
+static int ocfs2_wipe_inode(struct inode *inode,
+ struct buffer_head *di_bh)
+{
+ int status, orphaned_slot;
+ struct inode *orphan_dir_inode = NULL;
+ struct buffer_head *orphan_dir_bh = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ /* We've already voted on this so it should be readonly - no
+ * spinlock needed. */
+ orphaned_slot = OCFS2_I(inode)->ip_orphaned_slot;
+ orphan_dir_inode = ocfs2_get_system_file_inode(osb,
+ ORPHAN_DIR_SYSTEM_INODE,
+ orphaned_slot);
+ if (!orphan_dir_inode) {
+ status = -EEXIST;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* Lock the orphan dir. The lock will be held for the entire
+ * delete_inode operation. We do this now to avoid races with
+ * recovery completion on other nodes. */
+ mutex_lock(&orphan_dir_inode->i_mutex);
+ status = ocfs2_meta_lock(orphan_dir_inode, NULL, &orphan_dir_bh, 1);
+ if (status < 0) {
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* we do this while holding the orphan dir lock because we
+ * don't want recovery being run from another node to vote for
+ * an inode delete on us -- this will result in two nodes
+ * truncating the same file! */
+ status = ocfs2_truncate_for_delete(osb, inode, di_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_unlock_dir;
+ }
+
+ status = ocfs2_remove_inode(inode, di_bh, orphan_dir_inode,
+ orphan_dir_bh);
+ if (status < 0)
+ mlog_errno(status);
+
+bail_unlock_dir:
+ ocfs2_meta_unlock(orphan_dir_inode, 1);
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+ brelse(orphan_dir_bh);
+bail:
+ iput(orphan_dir_inode);
+
+ return status;
+}
+
+/* There is a series of simple checks that should be done before a
+ * vote is even considered. Encapsulate those in this function. */
+static int ocfs2_inode_is_valid_to_delete(struct inode *inode)
+{
+ int ret = 0;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ /* We shouldn't be getting here for the root directory
+ * inode.. */
+ if (inode == osb->root_inode) {
+ mlog(ML_ERROR, "Skipping delete of root inode.\n");
+ goto bail;
+ }
+
+ /* If we're coming from process_vote we can't go into our own
+ * voting [hello, deadlock city!], so unforuntately we just
+ * have to skip deleting this guy. That's OK though because
+ * the node who's doing the actual deleting should handle it
+ * anyway. */
+ if (current == osb->vote_task) {
+ mlog(0, "Skipping delete of %lu because we're currently "
+ "in process_vote\n", inode->i_ino);
+ goto bail;
+ }
+
+ spin_lock(&oi->ip_lock);
+ /* OCFS2 *never* deletes system files. This should technically
+ * never get here as system file inodes should always have a
+ * positive link count. */
+ if (oi->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
+ mlog(ML_ERROR, "Skipping delete of system file %"MLFu64".\n",
+ oi->ip_blkno);
+ goto bail_unlock;
+ }
+
+ /* If we have voted "yes" on the wipe of this inode for
+ * another node, it will be marked here so we can safely skip
+ * it. Recovery will cleanup any inodes we might inadvertantly
+ * skip here. */
+ if (oi->ip_flags & OCFS2_INODE_SKIP_DELETE) {
+ mlog(0, "Skipping delete of %lu because another node "
+ "has done this for us.\n", inode->i_ino);
+ goto bail_unlock;
+ }
+
+ ret = 1;
+bail_unlock:
+ spin_unlock(&oi->ip_lock);
+bail:
+ return ret;
+}
+
+/* Query the cluster to determine whether we should wipe an inode from
+ * disk or not.
+ *
+ * Requires the inode to have the cluster lock. */
+static int ocfs2_query_inode_wipe(struct inode *inode,
+ struct buffer_head *di_bh,
+ int *wipe)
+{
+ int status = 0;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_dinode *di;
+
+ *wipe = 0;
+
+ /* While we were waiting for the cluster lock in
+ * ocfs2_delete_inode, another node might have asked to delete
+ * the inode. Recheck our flags to catch this. */
+ if (!ocfs2_inode_is_valid_to_delete(inode)) {
+ mlog(0, "Skipping delete of %"MLFu64" because flags changed\n",
+ oi->ip_blkno);
+ goto bail;
+ }
+
+ /* Now that we have an up to date inode, we can double check
+ * the link count. */
+ if (inode->i_nlink) {
+ mlog(0, "Skipping delete of %"MLFu64" because nlink = %u\n",
+ oi->ip_blkno, inode->i_nlink);
+ goto bail;
+ }
+
+ /* Do some basic inode verification... */
+ di = (struct ocfs2_dinode *) di_bh->b_data;
+ if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL))) {
+ /* for lack of a better error? */
+ status = -EEXIST;
+ mlog(ML_ERROR,
+ "Inode %"MLFu64" (on-disk %"MLFu64") not orphaned! "
+ "Disk flags 0x%x, inode flags 0x%x\n",
+ oi->ip_blkno, di->i_blkno, di->i_flags, oi->ip_flags);
+ goto bail;
+ }
+
+ /* has someone already deleted us?! baaad... */
+ if (di->i_dtime) {
+ status = -EEXIST;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_request_delete_vote(inode);
+ /* -EBUSY means that other nodes are still using the
+ * inode. We're done here though, so avoid doing anything on
+ * disk and let them worry about deleting it. */
+ if (status == -EBUSY) {
+ status = 0;
+ mlog(0, "Skipping delete of %"MLFu64" because it is in use on"
+ "other nodes\n", oi->ip_blkno);
+ goto bail;
+ }
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ spin_lock(&oi->ip_lock);
+ if (oi->ip_orphaned_slot == OCFS2_INVALID_SLOT) {
+ /* Nobody knew which slot this inode was orphaned
+ * into. This may happen during node death and
+ * recovery knows how to clean it up so we can safely
+ * ignore this inode for now on. */
+ mlog(0, "Nobody knew where inode %"MLFu64" was orphaned!\n",
+ oi->ip_blkno);
+ } else {
+ *wipe = 1;
+
+ mlog(0, "Inode %"MLFu64" is ok to wipe from orphan dir %d\n",
+ oi->ip_blkno, oi->ip_orphaned_slot);
+ }
+ spin_unlock(&oi->ip_lock);
+
+bail:
+ return status;
+}
+
+/* Support function for ocfs2_delete_inode. Will help us keep the
+ * inode data in a consistent state for clear_inode. Always truncates
+ * pages, optionally sync's them first. */
+static void ocfs2_cleanup_delete_inode(struct inode *inode,
+ int sync_data)
+{
+ mlog(0, "Cleanup inode %"MLFu64", sync = %d\n",
+ OCFS2_I(inode)->ip_blkno, sync_data);
+ if (sync_data)
+ write_inode_now(inode, 1);
+ truncate_inode_pages(&inode->i_data, 0);
+}
+
+void ocfs2_delete_inode(struct inode *inode)
+{
+ int wipe, status;
+ sigset_t blocked, oldset;
+ struct buffer_head *di_bh = NULL;
+
+ mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino);
+
+ if (is_bad_inode(inode)) {
+ mlog(0, "Skipping delete of bad inode\n");
+ goto bail;
+ }
+
+ if (!ocfs2_inode_is_valid_to_delete(inode)) {
+ /* It's probably not necessary to truncate_inode_pages
+ * here but we do it for safety anyway (it will most
+ * likely be a no-op anyway) */
+ ocfs2_cleanup_delete_inode(inode, 0);
+ goto bail;
+ }
+
+ /* We want to block signals in delete_inode as the lock and
+ * messaging paths may return us -ERESTARTSYS. Which would
+ * cause us to exit early, resulting in inodes being orphaned
+ * forever. */
+ sigfillset(&blocked);
+ status = sigprocmask(SIG_BLOCK, &blocked, &oldset);
+ if (status < 0) {
+ mlog_errno(status);
+ ocfs2_cleanup_delete_inode(inode, 1);
+ goto bail;
+ }
+
+ /* Lock down the inode. This gives us an up to date view of
+ * it's metadata (for verification), and allows us to
+ * serialize delete_inode votes.
+ *
+ * Even though we might be doing a truncate, we don't take the
+ * allocation lock here as it won't be needed - nobody will
+ * have the file open.
+ */
+ status = ocfs2_meta_lock(inode, NULL, &di_bh, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ ocfs2_cleanup_delete_inode(inode, 0);
+ goto bail_unblock;
+ }
+
+ /* Query the cluster. This will be the final decision made
+ * before we go ahead and wipe the inode. */
+ status = ocfs2_query_inode_wipe(inode, di_bh, &wipe);
+ if (!wipe || status < 0) {
+ /* Error and inode busy vote both mean we won't be
+ * removing the inode, so they take almost the same
+ * path. */
+ if (status < 0)
+ mlog_errno(status);
+
+ /* Someone in the cluster has voted to not wipe this
+ * inode, or it was never completely orphaned. Write
+ * out the pages and exit now. */
+ ocfs2_cleanup_delete_inode(inode, 1);
+ goto bail_unlock_inode;
+ }
+
+ ocfs2_cleanup_delete_inode(inode, 0);
+
+ status = ocfs2_wipe_inode(inode, di_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail_unlock_inode;
+ }
+
+ /* Mark the inode as successfully deleted. This is important
+ * for ocfs2_clear_inode as it will check this flag and skip
+ * any checkpointing work */
+ OCFS2_I(inode)->ip_flags |= OCFS2_INODE_DELETED;
+
+bail_unlock_inode:
+ ocfs2_meta_unlock(inode, 1);
+ brelse(di_bh);
+bail_unblock:
+ status = sigprocmask(SIG_SETMASK, &oldset, NULL);
+ if (status < 0)
+ mlog_errno(status);
+bail:
+ clear_inode(inode);
+ mlog_exit_void();
+}
+
+void ocfs2_clear_inode(struct inode *inode)
+{
+ int status;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ mlog_entry_void();
+
+ if (!inode)
+ goto bail;
+
+ mlog(0, "Clearing inode: %"MLFu64", nlink = %u\n",
+ OCFS2_I(inode)->ip_blkno, inode->i_nlink);
+
+ mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
+ "Inode=%lu\n", inode->i_ino);
+
+ /* Do these before all the other work so that we don't bounce
+ * the vote thread while waiting to destroy the locks. */
+ ocfs2_mark_lockres_freeing(&oi->ip_rw_lockres);
+ ocfs2_mark_lockres_freeing(&oi->ip_meta_lockres);
+ ocfs2_mark_lockres_freeing(&oi->ip_data_lockres);
+
+ /* We very well may get a clear_inode before all an inodes
+ * metadata has hit disk. Of course, we can't drop any cluster
+ * locks until the journal has finished with it. The only
+ * exception here are successfully wiped inodes - their
+ * metadata can now be considered to be part of the system
+ * inodes from which it came. */
+ if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED))
+ ocfs2_checkpoint_inode(inode);
+
+ mlog_bug_on_msg(!list_empty(&oi->ip_io_markers),
+ "Clear inode of %"MLFu64", inode has io markers\n",
+ oi->ip_blkno);
+
+ ocfs2_extent_map_drop(inode, 0);
+ ocfs2_extent_map_init(inode);
+
+ status = ocfs2_drop_inode_locks(inode);
+ if (status < 0)
+ mlog_errno(status);
+
+ ocfs2_lock_res_free(&oi->ip_rw_lockres);
+ ocfs2_lock_res_free(&oi->ip_meta_lockres);
+ ocfs2_lock_res_free(&oi->ip_data_lockres);
+
+ ocfs2_metadata_cache_purge(inode);
+
+ mlog_bug_on_msg(oi->ip_metadata_cache.ci_num_cached,
+ "Clear inode of %"MLFu64", inode has %u cache items\n",
+ oi->ip_blkno, oi->ip_metadata_cache.ci_num_cached);
+
+ mlog_bug_on_msg(!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE),
+ "Clear inode of %"MLFu64", inode has a bad flag\n",
+ oi->ip_blkno);
+
+ mlog_bug_on_msg(spin_is_locked(&oi->ip_lock),
+ "Clear inode of %"MLFu64", inode is locked\n",
+ oi->ip_blkno);
+
+ mlog_bug_on_msg(down_trylock(&oi->ip_io_sem),
+ "Clear inode of %"MLFu64", io_sem is locked\n",
+ oi->ip_blkno);
+ up(&oi->ip_io_sem);
+
+ /*
+ * down_trylock() returns 0, down_write_trylock() returns 1
+ * kernel 1, world 0
+ */
+ mlog_bug_on_msg(!down_write_trylock(&oi->ip_alloc_sem),
+ "Clear inode of %"MLFu64", alloc_sem is locked\n",
+ oi->ip_blkno);
+ up_write(&oi->ip_alloc_sem);
+
+ mlog_bug_on_msg(oi->ip_open_count,
+ "Clear inode of %"MLFu64" has open count %d\n",
+ oi->ip_blkno, oi->ip_open_count);
+ mlog_bug_on_msg(!list_empty(&oi->ip_handle_list),
+ "Clear inode of %"MLFu64" has non empty handle list\n",
+ oi->ip_blkno);
+ mlog_bug_on_msg(oi->ip_handle,
+ "Clear inode of %"MLFu64" has non empty handle pointer\n",
+ oi->ip_blkno);
+
+ /* Clear all other flags. */
+ oi->ip_flags = OCFS2_INODE_CACHE_INLINE;
+ oi->ip_created_trans = 0;
+ oi->ip_last_trans = 0;
+ oi->ip_dir_start_lookup = 0;
+ oi->ip_blkno = 0ULL;
+
+bail:
+ mlog_exit_void();
+}
+
+/* Called under inode_lock, with no more references on the
+ * struct inode, so it's safe here to check the flags field
+ * and to manipulate i_nlink without any other locks. */
+void ocfs2_drop_inode(struct inode *inode)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ mlog_entry_void();
+
+ mlog(0, "Drop inode %"MLFu64", nlink = %u, ip_flags = 0x%x\n",
+ oi->ip_blkno, inode->i_nlink, oi->ip_flags);
+
+ /* Testing ip_orphaned_slot here wouldn't work because we may
+ * not have gotten a delete_inode vote from any other nodes
+ * yet. */
+ if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED) {
+ mlog(0, "Inode was orphaned on another node, clearing nlink.\n");
+ inode->i_nlink = 0;
+ }
+
+ generic_drop_inode(inode);
+
+ mlog_exit_void();
+}
+
+/*
+ * TODO: this should probably be merged into ocfs2_get_block
+ *
+ * However, you now need to pay attention to the cont_prepare_write()
+ * stuff in ocfs2_get_block (that is, ocfs2_get_block pretty much
+ * expects never to extend).
+ */
+struct buffer_head *ocfs2_bread(struct inode *inode,
+ int block, int *err, int reada)
+{
+ struct buffer_head *bh = NULL;
+ int tmperr;
+ u64 p_blkno;
+ int readflags = OCFS2_BH_CACHED;
+
+#if 0
+ /* only turn this on if we know we can deal with read_block
+ * returning nothing */
+ if (reada)
+ readflags |= OCFS2_BH_READAHEAD;
+#endif
+
+ if (((u64)block << inode->i_sb->s_blocksize_bits) >=
+ i_size_read(inode)) {
+ BUG_ON(!reada);
+ return NULL;
+ }
+
+ tmperr = ocfs2_extent_map_get_blocks(inode, block, 1,
+ &p_blkno, NULL);
+ if (tmperr < 0) {
+ mlog_errno(tmperr);
+ goto fail;
+ }
+
+ tmperr = ocfs2_read_block(OCFS2_SB(inode->i_sb), p_blkno, &bh,
+ readflags, inode);
+ if (tmperr < 0)
+ goto fail;
+
+ tmperr = 0;
+
+ *err = 0;
+ return bh;
+
+fail:
+ if (bh) {
+ brelse(bh);
+ bh = NULL;
+ }
+ *err = -EIO;
+ return NULL;
+}
+
+/*
+ * This is called from our getattr.
+ */
+int ocfs2_inode_revalidate(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ int status = 0;
+
+ mlog_entry("(inode = 0x%p, ino = %"MLFu64")\n", inode,
+ inode ? OCFS2_I(inode)->ip_blkno : 0ULL);
+
+ if (!inode) {
+ mlog(0, "eep, no inode!\n");
+ status = -ENOENT;
+ goto bail;
+ }
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+ mlog(0, "inode deleted!\n");
+ status = -ENOENT;
+ goto bail;
+ }
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ /* Let ocfs2_meta_lock do the work of updating our struct
+ * inode for us. */
+ status = ocfs2_meta_lock(inode, NULL, NULL, 0);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto bail;
+ }
+ ocfs2_meta_unlock(inode, 0);
+bail:
+ mlog_exit(status);
+
+ return status;
+}
+
+/*
+ * Updates a disk inode from a
+ * struct inode.
+ * Only takes ip_lock.
+ */
+int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *bh)
+{
+ int status;
+ struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
+
+ mlog_entry("(inode %"MLFu64")\n", OCFS2_I(inode)->ip_blkno);
+
+ status = ocfs2_journal_access(handle, inode, bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ fe->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters);
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ fe->i_size = cpu_to_le64(i_size_read(inode));
+ fe->i_links_count = cpu_to_le16(inode->i_nlink);
+ fe->i_uid = cpu_to_le32(inode->i_uid);
+ fe->i_gid = cpu_to_le32(inode->i_gid);
+ fe->i_mode = cpu_to_le16(inode->i_mode);
+ fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
+ fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
+ fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+ fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
+ fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+
+ status = ocfs2_journal_dirty(handle, bh);
+ if (status < 0)
+ mlog_errno(status);
+
+ status = 0;
+leave:
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ *
+ * Updates a struct inode from a disk inode.
+ * does no i/o, only takes ip_lock.
+ */
+void ocfs2_refresh_inode(struct inode *inode,
+ struct ocfs2_dinode *fe)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+
+ OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
+ i_size_write(inode, le64_to_cpu(fe->i_size));
+ inode->i_nlink = le16_to_cpu(fe->i_links_count);
+ inode->i_uid = le32_to_cpu(fe->i_uid);
+ inode->i_gid = le32_to_cpu(fe->i_gid);
+ inode->i_mode = le16_to_cpu(fe->i_mode);
+ inode->i_blksize = (u32) osb->s_clustersize;
+ if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0)
+ inode->i_blocks = 0;
+ else
+ inode->i_blocks = ocfs2_align_bytes_to_sectors(i_size_read(inode));
+ inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
+ inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
+ inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
+ inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
+ inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime);
+ inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec);
+
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+}
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
new file mode 100644
index 00000000000..9b017743365
--- /dev/null
+++ b/fs/ocfs2/inode.h
@@ -0,0 +1,145 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * inode.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_INODE_H
+#define OCFS2_INODE_H
+
+/* OCFS2 Inode Private Data */
+struct ocfs2_inode_info
+{
+ u64 ip_blkno;
+
+ struct ocfs2_lock_res ip_rw_lockres;
+ struct ocfs2_lock_res ip_meta_lockres;
+ struct ocfs2_lock_res ip_data_lockres;
+
+ /* protects allocation changes on this inode. */
+ struct rw_semaphore ip_alloc_sem;
+
+ /* These fields are protected by ip_lock */
+ spinlock_t ip_lock;
+ u32 ip_open_count;
+ u32 ip_clusters;
+ struct ocfs2_extent_map ip_map;
+ struct list_head ip_io_markers;
+ int ip_orphaned_slot;
+
+ struct semaphore ip_io_sem;
+
+ /* Used by the journalling code to attach an inode to a
+ * handle. These are protected by ip_io_sem in order to lock
+ * out other I/O to the inode until we either commit or
+ * abort. */
+ struct list_head ip_handle_list;
+ struct ocfs2_journal_handle *ip_handle;
+
+ u32 ip_flags; /* see below */
+
+ /* protected by recovery_lock. */
+ struct inode *ip_next_orphan;
+
+ u32 ip_dir_start_lookup;
+
+ /* next two are protected by trans_inc_lock */
+ /* which transaction were we created on? Zero if none. */
+ unsigned long ip_created_trans;
+ /* last transaction we were a part of. */
+ unsigned long ip_last_trans;
+
+ struct ocfs2_caching_info ip_metadata_cache;
+
+ struct inode vfs_inode;
+};
+
+/*
+ * Flags for the ip_flags field
+ */
+/* System file inodes */
+#define OCFS2_INODE_SYSTEM_FILE 0x00000001
+#define OCFS2_INODE_JOURNAL 0x00000002
+#define OCFS2_INODE_BITMAP 0x00000004
+/* This inode has been wiped from disk */
+#define OCFS2_INODE_DELETED 0x00000008
+/* Another node is deleting, so our delete is a nop */
+#define OCFS2_INODE_SKIP_DELETE 0x00000010
+/* Has the inode been orphaned on another node?
+ *
+ * This hints to ocfs2_drop_inode that it should clear i_nlink before
+ * continuing.
+ *
+ * We *only* set this on unlink vote from another node. If the inode
+ * was locally orphaned, then we're sure of the state and don't need
+ * to twiddle i_nlink later - it's either zero or not depending on
+ * whether our unlink succeeded. Otherwise we got this from a node
+ * whose intention was to orphan the inode, however he may have
+ * crashed, failed etc, so we let ocfs2_drop_inode zero the value and
+ * rely on ocfs2_delete_inode to sort things out under the proper
+ * cluster locks.
+ */
+#define OCFS2_INODE_MAYBE_ORPHANED 0x00000020
+/* Does someone have the file open O_DIRECT */
+#define OCFS2_INODE_OPEN_DIRECT 0x00000040
+/* Indicates that the metadata cache should be used as an array. */
+#define OCFS2_INODE_CACHE_INLINE 0x00000080
+
+static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode)
+{
+ return container_of(inode, struct ocfs2_inode_info, vfs_inode);
+}
+
+#define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL)
+#define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL)
+
+extern kmem_cache_t *ocfs2_inode_cache;
+
+extern struct address_space_operations ocfs2_aops;
+
+struct buffer_head *ocfs2_bread(struct inode *inode, int block,
+ int *err, int reada);
+void ocfs2_clear_inode(struct inode *inode);
+void ocfs2_delete_inode(struct inode *inode);
+void ocfs2_drop_inode(struct inode *inode);
+struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 feoff);
+struct inode *ocfs2_ilookup_for_vote(struct ocfs2_super *osb,
+ u64 blkno,
+ int delete_vote);
+int ocfs2_inode_init_private(struct inode *inode);
+int ocfs2_inode_revalidate(struct dentry *dentry);
+int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
+ int create_ino);
+void ocfs2_read_inode(struct inode *inode);
+void ocfs2_read_inode2(struct inode *inode, void *opaque);
+ssize_t ocfs2_rw_direct(int rw, struct file *filp, char *buf,
+ size_t size, loff_t *offp);
+void ocfs2_sync_blockdev(struct super_block *sb);
+void ocfs2_refresh_inode(struct inode *inode,
+ struct ocfs2_dinode *fe);
+int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *bh);
+int ocfs2_aio_read(struct file *file, struct kiocb *req, struct iocb *iocb);
+int ocfs2_aio_write(struct file *file, struct kiocb *req, struct iocb *iocb);
+
+#endif /* OCFS2_INODE_H */
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
new file mode 100644
index 00000000000..303c8d96457
--- /dev/null
+++ b/fs/ocfs2/journal.c
@@ -0,0 +1,1652 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * journal.c
+ *
+ * Defines functions of journalling api
+ *
+ * Copyright (C) 2003, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/kthread.h>
+
+#define MLOG_MASK_PREFIX ML_JOURNAL
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "heartbeat.h"
+#include "inode.h"
+#include "journal.h"
+#include "localalloc.h"
+#include "namei.h"
+#include "slot_map.h"
+#include "super.h"
+#include "vote.h"
+#include "sysfile.h"
+
+#include "buffer_head_io.h"
+
+spinlock_t trans_inc_lock = SPIN_LOCK_UNLOCKED;
+
+static int ocfs2_force_read_journal(struct inode *inode);
+static int ocfs2_recover_node(struct ocfs2_super *osb,
+ int node_num);
+static int __ocfs2_recovery_thread(void *arg);
+static int ocfs2_commit_cache(struct ocfs2_super *osb);
+static int ocfs2_wait_on_mount(struct ocfs2_super *osb);
+static void ocfs2_handle_cleanup_locks(struct ocfs2_journal *journal,
+ struct ocfs2_journal_handle *handle);
+static void ocfs2_commit_unstarted_handle(struct ocfs2_journal_handle *handle);
+static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
+ int dirty);
+static int ocfs2_trylock_journal(struct ocfs2_super *osb,
+ int slot_num);
+static int ocfs2_recover_orphans(struct ocfs2_super *osb,
+ int slot);
+static int ocfs2_commit_thread(void *arg);
+
+static int ocfs2_commit_cache(struct ocfs2_super *osb)
+{
+ int status = 0;
+ unsigned int flushed;
+ unsigned long old_id;
+ struct ocfs2_journal *journal = NULL;
+
+ mlog_entry_void();
+
+ journal = osb->journal;
+
+ /* Flush all pending commits and checkpoint the journal. */
+ down_write(&journal->j_trans_barrier);
+
+ if (atomic_read(&journal->j_num_trans) == 0) {
+ up_write(&journal->j_trans_barrier);
+ mlog(0, "No transactions for me to flush!\n");
+ goto finally;
+ }
+
+ journal_lock_updates(journal->j_journal);
+ status = journal_flush(journal->j_journal);
+ journal_unlock_updates(journal->j_journal);
+ if (status < 0) {
+ up_write(&journal->j_trans_barrier);
+ mlog_errno(status);
+ goto finally;
+ }
+
+ old_id = ocfs2_inc_trans_id(journal);
+
+ flushed = atomic_read(&journal->j_num_trans);
+ atomic_set(&journal->j_num_trans, 0);
+ up_write(&journal->j_trans_barrier);
+
+ mlog(0, "commit_thread: flushed transaction %lu (%u handles)\n",
+ journal->j_trans_id, flushed);
+
+ ocfs2_kick_vote_thread(osb);
+ wake_up(&journal->j_checkpointed);
+finally:
+ mlog_exit(status);
+ return status;
+}
+
+struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb)
+{
+ struct ocfs2_journal_handle *retval = NULL;
+
+ retval = kcalloc(1, sizeof(*retval), GFP_KERNEL);
+ if (!retval) {
+ mlog(ML_ERROR, "Failed to allocate memory for journal "
+ "handle!\n");
+ return NULL;
+ }
+
+ retval->max_buffs = 0;
+ retval->num_locks = 0;
+ retval->k_handle = NULL;
+
+ INIT_LIST_HEAD(&retval->locks);
+ INIT_LIST_HEAD(&retval->inode_list);
+ retval->journal = osb->journal;
+
+ return retval;
+}
+
+/* pass it NULL and it will allocate a new handle object for you. If
+ * you pass it a handle however, it may still return error, in which
+ * case it has free'd the passed handle for you. */
+struct ocfs2_journal_handle *ocfs2_start_trans(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ int max_buffs)
+{
+ int ret;
+ journal_t *journal = osb->journal->j_journal;
+
+ mlog_entry("(max_buffs = %d)\n", max_buffs);
+
+ if (!osb || !osb->journal->j_journal)
+ BUG();
+
+ if (ocfs2_is_hard_readonly(osb)) {
+ ret = -EROFS;
+ goto done_free;
+ }
+
+ BUG_ON(osb->journal->j_state == OCFS2_JOURNAL_FREE);
+ BUG_ON(max_buffs <= 0);
+
+ /* JBD might support this, but our journalling code doesn't yet. */
+ if (journal_current_handle()) {
+ mlog(ML_ERROR, "Recursive transaction attempted!\n");
+ BUG();
+ }
+
+ if (!handle)
+ handle = ocfs2_alloc_handle(osb);
+ if (!handle) {
+ ret = -ENOMEM;
+ mlog(ML_ERROR, "Failed to allocate memory for journal "
+ "handle!\n");
+ goto done_free;
+ }
+
+ handle->max_buffs = max_buffs;
+
+ down_read(&osb->journal->j_trans_barrier);
+
+ /* actually start the transaction now */
+ handle->k_handle = journal_start(journal, max_buffs);
+ if (IS_ERR(handle->k_handle)) {
+ up_read(&osb->journal->j_trans_barrier);
+
+ ret = PTR_ERR(handle->k_handle);
+ handle->k_handle = NULL;
+ mlog_errno(ret);
+
+ if (is_journal_aborted(journal)) {
+ ocfs2_abort(osb->sb, "Detected aborted journal");
+ ret = -EROFS;
+ }
+ goto done_free;
+ }
+
+ atomic_inc(&(osb->journal->j_num_trans));
+ handle->flags |= OCFS2_HANDLE_STARTED;
+
+ mlog_exit_ptr(handle);
+ return handle;
+
+done_free:
+ if (handle)
+ ocfs2_commit_unstarted_handle(handle); /* will kfree handle */
+
+ mlog_exit(ret);
+ return ERR_PTR(ret);
+}
+
+void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle,
+ struct inode *inode)
+{
+ BUG_ON(!handle);
+ BUG_ON(!inode);
+
+ atomic_inc(&inode->i_count);
+
+ /* we're obviously changing it... */
+ mutex_lock(&inode->i_mutex);
+
+ /* sanity check */
+ BUG_ON(OCFS2_I(inode)->ip_handle);
+ BUG_ON(!list_empty(&OCFS2_I(inode)->ip_handle_list));
+
+ OCFS2_I(inode)->ip_handle = handle;
+ list_del(&(OCFS2_I(inode)->ip_handle_list));
+ list_add_tail(&(OCFS2_I(inode)->ip_handle_list), &(handle->inode_list));
+}
+
+static void ocfs2_handle_unlock_inodes(struct ocfs2_journal_handle *handle)
+{
+ struct list_head *p, *n;
+ struct inode *inode;
+ struct ocfs2_inode_info *oi;
+
+ list_for_each_safe(p, n, &handle->inode_list) {
+ oi = list_entry(p, struct ocfs2_inode_info,
+ ip_handle_list);
+ inode = &oi->vfs_inode;
+
+ OCFS2_I(inode)->ip_handle = NULL;
+ list_del_init(&OCFS2_I(inode)->ip_handle_list);
+
+ mutex_unlock(&inode->i_mutex);
+ iput(inode);
+ }
+}
+
+/* This is trivial so we do it out of the main commit
+ * paths. Beware, it can be called from start_trans too! */
+static void ocfs2_commit_unstarted_handle(struct ocfs2_journal_handle *handle)
+{
+ mlog_entry_void();
+
+ BUG_ON(handle->flags & OCFS2_HANDLE_STARTED);
+
+ ocfs2_handle_unlock_inodes(handle);
+ /* You are allowed to add journal locks before the transaction
+ * has started. */
+ ocfs2_handle_cleanup_locks(handle->journal, handle);
+
+ kfree(handle);
+
+ mlog_exit_void();
+}
+
+void ocfs2_commit_trans(struct ocfs2_journal_handle *handle)
+{
+ handle_t *jbd_handle;
+ int retval;
+ struct ocfs2_journal *journal = handle->journal;
+
+ mlog_entry_void();
+
+ BUG_ON(!handle);
+
+ if (!(handle->flags & OCFS2_HANDLE_STARTED)) {
+ ocfs2_commit_unstarted_handle(handle);
+ mlog_exit_void();
+ return;
+ }
+
+ /* release inode semaphores we took during this transaction */
+ ocfs2_handle_unlock_inodes(handle);
+
+ /* ocfs2_extend_trans may have had to call journal_restart
+ * which will always commit the transaction, but may return
+ * error for any number of reasons. If this is the case, we
+ * clear k_handle as it's not valid any more. */
+ if (handle->k_handle) {
+ jbd_handle = handle->k_handle;
+
+ if (handle->flags & OCFS2_HANDLE_SYNC)
+ jbd_handle->h_sync = 1;
+ else
+ jbd_handle->h_sync = 0;
+
+ /* actually stop the transaction. if we've set h_sync,
+ * it'll have been committed when we return */
+ retval = journal_stop(jbd_handle);
+ if (retval < 0) {
+ mlog_errno(retval);
+ mlog(ML_ERROR, "Could not commit transaction\n");
+ BUG();
+ }
+
+ handle->k_handle = NULL; /* it's been free'd in journal_stop */
+ }
+
+ ocfs2_handle_cleanup_locks(journal, handle);
+
+ up_read(&journal->j_trans_barrier);
+
+ kfree(handle);
+ mlog_exit_void();
+}
+
+/*
+ * 'nblocks' is what you want to add to the current
+ * transaction. extend_trans will either extend the current handle by
+ * nblocks, or commit it and start a new one with nblocks credits.
+ *
+ * WARNING: This will not release any semaphores or disk locks taken
+ * during the transaction, so make sure they were taken *before*
+ * start_trans or we'll have ordering deadlocks.
+ *
+ * WARNING2: Note that we do *not* drop j_trans_barrier here. This is
+ * good because transaction ids haven't yet been recorded on the
+ * cluster locks associated with this handle.
+ */
+int ocfs2_extend_trans(struct ocfs2_journal_handle *handle,
+ int nblocks)
+{
+ int status;
+
+ BUG_ON(!handle);
+ BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
+ BUG_ON(!nblocks);
+
+ mlog_entry_void();
+
+ mlog(0, "Trying to extend transaction by %d blocks\n", nblocks);
+
+ status = journal_extend(handle->k_handle, nblocks);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (status > 0) {
+ mlog(0, "journal_extend failed, trying journal_restart\n");
+ status = journal_restart(handle->k_handle, nblocks);
+ if (status < 0) {
+ handle->k_handle = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+ handle->max_buffs = nblocks;
+ } else
+ handle->max_buffs += nblocks;
+
+ status = 0;
+bail:
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *bh,
+ int type)
+{
+ int status;
+
+ BUG_ON(!inode);
+ BUG_ON(!handle);
+ BUG_ON(!bh);
+ BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
+
+ mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %hu\n",
+ (unsigned long long)bh->b_blocknr, type,
+ (type == OCFS2_JOURNAL_ACCESS_CREATE) ?
+ "OCFS2_JOURNAL_ACCESS_CREATE" :
+ "OCFS2_JOURNAL_ACCESS_WRITE",
+ bh->b_size);
+
+ /* we can safely remove this assertion after testing. */
+ if (!buffer_uptodate(bh)) {
+ mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
+ mlog(ML_ERROR, "b_blocknr=%llu\n",
+ (unsigned long long)bh->b_blocknr);
+ BUG();
+ }
+
+ /* Set the current transaction information on the inode so
+ * that the locking code knows whether it can drop it's locks
+ * on this inode or not. We're protected from the commit
+ * thread updating the current transaction id until
+ * ocfs2_commit_trans() because ocfs2_start_trans() took
+ * j_trans_barrier for us. */
+ ocfs2_set_inode_lock_trans(OCFS2_SB(inode->i_sb)->journal, inode);
+
+ down(&OCFS2_I(inode)->ip_io_sem);
+ switch (type) {
+ case OCFS2_JOURNAL_ACCESS_CREATE:
+ case OCFS2_JOURNAL_ACCESS_WRITE:
+ status = journal_get_write_access(handle->k_handle, bh);
+ break;
+
+ case OCFS2_JOURNAL_ACCESS_UNDO:
+ status = journal_get_undo_access(handle->k_handle, bh);
+ break;
+
+ default:
+ status = -EINVAL;
+ mlog(ML_ERROR, "Uknown access type!\n");
+ }
+ up(&OCFS2_I(inode)->ip_io_sem);
+
+ if (status < 0)
+ mlog(ML_ERROR, "Error %d getting %d access to buffer!\n",
+ status, type);
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_journal_dirty(struct ocfs2_journal_handle *handle,
+ struct buffer_head *bh)
+{
+ int status;
+
+ BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED));
+
+ mlog_entry("(bh->b_blocknr=%llu)\n",
+ (unsigned long long)bh->b_blocknr);
+
+ status = journal_dirty_metadata(handle->k_handle, bh);
+ if (status < 0)
+ mlog(ML_ERROR, "Could not dirty metadata buffer. "
+ "(bh->b_blocknr=%llu)\n",
+ (unsigned long long)bh->b_blocknr);
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_journal_dirty_data(handle_t *handle,
+ struct buffer_head *bh)
+{
+ int err = journal_dirty_data(handle, bh);
+ if (err)
+ mlog_errno(err);
+ /* TODO: When we can handle it, abort the handle and go RO on
+ * error here. */
+
+ return err;
+}
+
+/* We always assume you're adding a metadata lock at level 'ex' */
+int ocfs2_handle_add_lock(struct ocfs2_journal_handle *handle,
+ struct inode *inode)
+{
+ int status;
+ struct ocfs2_journal_lock *lock;
+
+ BUG_ON(!inode);
+
+ lock = kmem_cache_alloc(ocfs2_lock_cache, GFP_NOFS);
+ if (!lock) {
+ status = -ENOMEM;
+ mlog_errno(-ENOMEM);
+ goto bail;
+ }
+
+ if (!igrab(inode))
+ BUG();
+ lock->jl_inode = inode;
+
+ list_add_tail(&(lock->jl_lock_list), &(handle->locks));
+ handle->num_locks++;
+
+ status = 0;
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static void ocfs2_handle_cleanup_locks(struct ocfs2_journal *journal,
+ struct ocfs2_journal_handle *handle)
+{
+ struct list_head *p, *n;
+ struct ocfs2_journal_lock *lock;
+ struct inode *inode;
+
+ list_for_each_safe(p, n, &(handle->locks)) {
+ lock = list_entry(p, struct ocfs2_journal_lock,
+ jl_lock_list);
+ list_del(&lock->jl_lock_list);
+ handle->num_locks--;
+
+ inode = lock->jl_inode;
+ ocfs2_meta_unlock(inode, 1);
+ if (atomic_read(&inode->i_count) == 1)
+ mlog(ML_ERROR,
+ "Inode %"MLFu64", I'm doing a last iput for!",
+ OCFS2_I(inode)->ip_blkno);
+ iput(inode);
+ kmem_cache_free(ocfs2_lock_cache, lock);
+ }
+}
+
+#define OCFS2_DEFAULT_COMMIT_INTERVAL (HZ * 5)
+
+void ocfs2_set_journal_params(struct ocfs2_super *osb)
+{
+ journal_t *journal = osb->journal->j_journal;
+
+ spin_lock(&journal->j_state_lock);
+ journal->j_commit_interval = OCFS2_DEFAULT_COMMIT_INTERVAL;
+ if (osb->s_mount_opt & OCFS2_MOUNT_BARRIER)
+ journal->j_flags |= JFS_BARRIER;
+ else
+ journal->j_flags &= ~JFS_BARRIER;
+ spin_unlock(&journal->j_state_lock);
+}
+
+int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty)
+{
+ int status = -1;
+ struct inode *inode = NULL; /* the journal inode */
+ journal_t *j_journal = NULL;
+ struct ocfs2_dinode *di = NULL;
+ struct buffer_head *bh = NULL;
+ struct ocfs2_super *osb;
+ int meta_lock = 0;
+
+ mlog_entry_void();
+
+ BUG_ON(!journal);
+
+ osb = journal->j_osb;
+
+ /* already have the inode for our journal */
+ inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
+ osb->slot_num);
+ if (inode == NULL) {
+ status = -EACCES;
+ mlog_errno(status);
+ goto done;
+ }
+ if (is_bad_inode(inode)) {
+ mlog(ML_ERROR, "access error (bad inode)\n");
+ iput(inode);
+ inode = NULL;
+ status = -EACCES;
+ goto done;
+ }
+
+ SET_INODE_JOURNAL(inode);
+ OCFS2_I(inode)->ip_open_count++;
+
+ status = ocfs2_meta_lock(inode, NULL, &bh, 1);
+ if (status < 0) {
+ if (status != -ERESTARTSYS)
+ mlog(ML_ERROR, "Could not get lock on journal!\n");
+ goto done;
+ }
+
+ meta_lock = 1;
+ di = (struct ocfs2_dinode *)bh->b_data;
+
+ if (inode->i_size < OCFS2_MIN_JOURNAL_SIZE) {
+ mlog(ML_ERROR, "Journal file size (%lld) is too small!\n",
+ inode->i_size);
+ status = -EINVAL;
+ goto done;
+ }
+
+ mlog(0, "inode->i_size = %lld\n", inode->i_size);
+ mlog(0, "inode->i_blocks = %lu\n", inode->i_blocks);
+ mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters);
+
+ /* call the kernels journal init function now */
+ j_journal = journal_init_inode(inode);
+ if (j_journal == NULL) {
+ mlog(ML_ERROR, "Linux journal layer error\n");
+ status = -EINVAL;
+ goto done;
+ }
+
+ mlog(0, "Returned from journal_init_inode\n");
+ mlog(0, "j_journal->j_maxlen = %u\n", j_journal->j_maxlen);
+
+ *dirty = (le32_to_cpu(di->id1.journal1.ij_flags) &
+ OCFS2_JOURNAL_DIRTY_FL);
+
+ journal->j_journal = j_journal;
+ journal->j_inode = inode;
+ journal->j_bh = bh;
+
+ ocfs2_set_journal_params(osb);
+
+ journal->j_state = OCFS2_JOURNAL_LOADED;
+
+ status = 0;
+done:
+ if (status < 0) {
+ if (meta_lock)
+ ocfs2_meta_unlock(inode, 1);
+ if (bh != NULL)
+ brelse(bh);
+ if (inode) {
+ OCFS2_I(inode)->ip_open_count--;
+ iput(inode);
+ }
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb,
+ int dirty)
+{
+ int status;
+ unsigned int flags;
+ struct ocfs2_journal *journal = osb->journal;
+ struct buffer_head *bh = journal->j_bh;
+ struct ocfs2_dinode *fe;
+
+ mlog_entry_void();
+
+ fe = (struct ocfs2_dinode *)bh->b_data;
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ /* This is called from startup/shutdown which will
+ * handle the errors in a specific manner, so no need
+ * to call ocfs2_error() here. */
+ mlog(ML_ERROR, "Journal dinode %"MLFu64" has invalid "
+ "signature: %.*s", fe->i_blkno, 7, fe->i_signature);
+ status = -EIO;
+ goto out;
+ }
+
+ flags = le32_to_cpu(fe->id1.journal1.ij_flags);
+ if (dirty)
+ flags |= OCFS2_JOURNAL_DIRTY_FL;
+ else
+ flags &= ~OCFS2_JOURNAL_DIRTY_FL;
+ fe->id1.journal1.ij_flags = cpu_to_le32(flags);
+
+ status = ocfs2_write_block(osb, bh, journal->j_inode);
+ if (status < 0)
+ mlog_errno(status);
+
+out:
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * If the journal has been kmalloc'd it needs to be freed after this
+ * call.
+ */
+void ocfs2_journal_shutdown(struct ocfs2_super *osb)
+{
+ struct ocfs2_journal *journal = NULL;
+ int status = 0;
+ struct inode *inode = NULL;
+ int num_running_trans = 0;
+
+ mlog_entry_void();
+
+ if (!osb)
+ BUG();
+
+ journal = osb->journal;
+ if (!journal)
+ goto done;
+
+ inode = journal->j_inode;
+
+ if (journal->j_state != OCFS2_JOURNAL_LOADED)
+ goto done;
+
+ /* need to inc inode use count as journal_destroy will iput. */
+ if (!igrab(inode))
+ BUG();
+
+ num_running_trans = atomic_read(&(osb->journal->j_num_trans));
+ if (num_running_trans > 0)
+ mlog(0, "Shutting down journal: must wait on %d "
+ "running transactions!\n",
+ num_running_trans);
+
+ /* Do a commit_cache here. It will flush our journal, *and*
+ * release any locks that are still held.
+ * set the SHUTDOWN flag and release the trans lock.
+ * the commit thread will take the trans lock for us below. */
+ journal->j_state = OCFS2_JOURNAL_IN_SHUTDOWN;
+
+ /* The OCFS2_JOURNAL_IN_SHUTDOWN will signal to commit_cache to not
+ * drop the trans_lock (which we want to hold until we
+ * completely destroy the journal. */
+ if (osb->commit_task) {
+ /* Wait for the commit thread */
+ mlog(0, "Waiting for ocfs2commit to exit....\n");
+ kthread_stop(osb->commit_task);
+ osb->commit_task = NULL;
+ }
+
+ BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0);
+
+ status = ocfs2_journal_toggle_dirty(osb, 0);
+ if (status < 0)
+ mlog_errno(status);
+
+ /* Shutdown the kernel journal system */
+ journal_destroy(journal->j_journal);
+
+ OCFS2_I(inode)->ip_open_count--;
+
+ /* unlock our journal */
+ ocfs2_meta_unlock(inode, 1);
+
+ brelse(journal->j_bh);
+ journal->j_bh = NULL;
+
+ journal->j_state = OCFS2_JOURNAL_FREE;
+
+// up_write(&journal->j_trans_barrier);
+done:
+ if (inode)
+ iput(inode);
+ mlog_exit_void();
+}
+
+static void ocfs2_clear_journal_error(struct super_block *sb,
+ journal_t *journal,
+ int slot)
+{
+ int olderr;
+
+ olderr = journal_errno(journal);
+ if (olderr) {
+ mlog(ML_ERROR, "File system error %d recorded in "
+ "journal %u.\n", olderr, slot);
+ mlog(ML_ERROR, "File system on device %s needs checking.\n",
+ sb->s_id);
+
+ journal_ack_err(journal);
+ journal_clear_err(journal);
+ }
+}
+
+int ocfs2_journal_load(struct ocfs2_journal *journal)
+{
+ int status = 0;
+ struct ocfs2_super *osb;
+
+ mlog_entry_void();
+
+ if (!journal)
+ BUG();
+
+ osb = journal->j_osb;
+
+ status = journal_load(journal->j_journal);
+ if (status < 0) {
+ mlog(ML_ERROR, "Failed to load journal!\n");
+ goto done;
+ }
+
+ ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
+
+ status = ocfs2_journal_toggle_dirty(osb, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto done;
+ }
+
+ /* Launch the commit thread */
+ osb->commit_task = kthread_run(ocfs2_commit_thread, osb, "ocfs2cmt-%d",
+ osb->osb_id);
+ if (IS_ERR(osb->commit_task)) {
+ status = PTR_ERR(osb->commit_task);
+ osb->commit_task = NULL;
+ mlog(ML_ERROR, "unable to launch ocfs2commit thread, error=%d",
+ status);
+ goto done;
+ }
+
+done:
+ mlog_exit(status);
+ return status;
+}
+
+
+/* 'full' flag tells us whether we clear out all blocks or if we just
+ * mark the journal clean */
+int ocfs2_journal_wipe(struct ocfs2_journal *journal, int full)
+{
+ int status;
+
+ mlog_entry_void();
+
+ if (!journal)
+ BUG();
+
+ status = journal_wipe(journal->j_journal, full);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_journal_toggle_dirty(journal->j_osb, 0);
+ if (status < 0)
+ mlog_errno(status);
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * JBD Might read a cached version of another nodes journal file. We
+ * don't want this as this file changes often and we get no
+ * notification on those changes. The only way to be sure that we've
+ * got the most up to date version of those blocks then is to force
+ * read them off disk. Just searching through the buffer cache won't
+ * work as there may be pages backing this file which are still marked
+ * up to date. We know things can't change on this file underneath us
+ * as we have the lock by now :)
+ */
+static int ocfs2_force_read_journal(struct inode *inode)
+{
+ int status = 0;
+ int i, p_blocks;
+ u64 v_blkno, p_blkno;
+#define CONCURRENT_JOURNAL_FILL 32
+ struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
+
+ mlog_entry_void();
+
+ BUG_ON(inode->i_blocks !=
+ ocfs2_align_bytes_to_sectors(i_size_read(inode)));
+
+ memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
+
+ mlog(0, "Force reading %lu blocks\n",
+ (inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9)));
+
+ v_blkno = 0;
+ while (v_blkno <
+ (inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9))) {
+
+ status = ocfs2_extent_map_get_blocks(inode, v_blkno,
+ 1, &p_blkno,
+ &p_blocks);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (p_blocks > CONCURRENT_JOURNAL_FILL)
+ p_blocks = CONCURRENT_JOURNAL_FILL;
+
+ status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb),
+ p_blkno, p_blocks, bhs, 0,
+ inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ for(i = 0; i < p_blocks; i++) {
+ brelse(bhs[i]);
+ bhs[i] = NULL;
+ }
+
+ v_blkno += p_blocks;
+ }
+
+bail:
+ for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++)
+ if (bhs[i])
+ brelse(bhs[i]);
+ mlog_exit(status);
+ return status;
+}
+
+struct ocfs2_la_recovery_item {
+ struct list_head lri_list;
+ int lri_slot;
+ struct ocfs2_dinode *lri_la_dinode;
+ struct ocfs2_dinode *lri_tl_dinode;
+};
+
+/* Does the second half of the recovery process. By this point, the
+ * node is marked clean and can actually be considered recovered,
+ * hence it's no longer in the recovery map, but there's still some
+ * cleanup we can do which shouldn't happen within the recovery thread
+ * as locking in that context becomes very difficult if we are to take
+ * recovering nodes into account.
+ *
+ * NOTE: This function can and will sleep on recovery of other nodes
+ * during cluster locking, just like any other ocfs2 process.
+ */
+void ocfs2_complete_recovery(void *data)
+{
+ int ret;
+ struct ocfs2_super *osb = data;
+ struct ocfs2_journal *journal = osb->journal;
+ struct ocfs2_dinode *la_dinode, *tl_dinode;
+ struct ocfs2_la_recovery_item *item;
+ struct list_head *p, *n;
+ LIST_HEAD(tmp_la_list);
+
+ mlog_entry_void();
+
+ mlog(0, "completing recovery from keventd\n");
+
+ spin_lock(&journal->j_lock);
+ list_splice_init(&journal->j_la_cleanups, &tmp_la_list);
+ spin_unlock(&journal->j_lock);
+
+ list_for_each_safe(p, n, &tmp_la_list) {
+ item = list_entry(p, struct ocfs2_la_recovery_item, lri_list);
+ list_del_init(&item->lri_list);
+
+ mlog(0, "Complete recovery for slot %d\n", item->lri_slot);
+
+ la_dinode = item->lri_la_dinode;
+ if (la_dinode) {
+ mlog(0, "Clean up local alloc %"MLFu64"\n",
+ la_dinode->i_blkno);
+
+ ret = ocfs2_complete_local_alloc_recovery(osb,
+ la_dinode);
+ if (ret < 0)
+ mlog_errno(ret);
+
+ kfree(la_dinode);
+ }
+
+ tl_dinode = item->lri_tl_dinode;
+ if (tl_dinode) {
+ mlog(0, "Clean up truncate log %"MLFu64"\n",
+ tl_dinode->i_blkno);
+
+ ret = ocfs2_complete_truncate_log_recovery(osb,
+ tl_dinode);
+ if (ret < 0)
+ mlog_errno(ret);
+
+ kfree(tl_dinode);
+ }
+
+ ret = ocfs2_recover_orphans(osb, item->lri_slot);
+ if (ret < 0)
+ mlog_errno(ret);
+
+ kfree(item);
+ }
+
+ mlog(0, "Recovery completion\n");
+ mlog_exit_void();
+}
+
+/* NOTE: This function always eats your references to la_dinode and
+ * tl_dinode, either manually on error, or by passing them to
+ * ocfs2_complete_recovery */
+static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal,
+ int slot_num,
+ struct ocfs2_dinode *la_dinode,
+ struct ocfs2_dinode *tl_dinode)
+{
+ struct ocfs2_la_recovery_item *item;
+
+ item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_KERNEL);
+ if (!item) {
+ /* Though we wish to avoid it, we are in fact safe in
+ * skipping local alloc cleanup as fsck.ocfs2 is more
+ * than capable of reclaiming unused space. */
+ if (la_dinode)
+ kfree(la_dinode);
+
+ if (tl_dinode)
+ kfree(tl_dinode);
+
+ mlog_errno(-ENOMEM);
+ return;
+ }
+
+ INIT_LIST_HEAD(&item->lri_list);
+ item->lri_la_dinode = la_dinode;
+ item->lri_slot = slot_num;
+ item->lri_tl_dinode = tl_dinode;
+
+ spin_lock(&journal->j_lock);
+ list_add_tail(&item->lri_list, &journal->j_la_cleanups);
+ queue_work(ocfs2_wq, &journal->j_recovery_work);
+ spin_unlock(&journal->j_lock);
+}
+
+/* Called by the mount code to queue recovery the last part of
+ * recovery for it's own slot. */
+void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
+{
+ struct ocfs2_journal *journal = osb->journal;
+
+ if (osb->dirty) {
+ /* No need to queue up our truncate_log as regular
+ * cleanup will catch that. */
+ ocfs2_queue_recovery_completion(journal,
+ osb->slot_num,
+ osb->local_alloc_copy,
+ NULL);
+ ocfs2_schedule_truncate_log_flush(osb, 0);
+
+ osb->local_alloc_copy = NULL;
+ osb->dirty = 0;
+ }
+}
+
+static int __ocfs2_recovery_thread(void *arg)
+{
+ int status, node_num;
+ struct ocfs2_super *osb = arg;
+
+ mlog_entry_void();
+
+ status = ocfs2_wait_on_mount(osb);
+ if (status < 0) {
+ goto bail;
+ }
+
+restart:
+ status = ocfs2_super_lock(osb, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ while(!ocfs2_node_map_is_empty(osb, &osb->recovery_map)) {
+ node_num = ocfs2_node_map_first_set_bit(osb,
+ &osb->recovery_map);
+ if (node_num == O2NM_INVALID_NODE_NUM) {
+ mlog(0, "Out of nodes to recover.\n");
+ break;
+ }
+
+ status = ocfs2_recover_node(osb, node_num);
+ if (status < 0) {
+ mlog(ML_ERROR,
+ "Error %d recovering node %d on device (%u,%u)!\n",
+ status, node_num,
+ MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
+ mlog(ML_ERROR, "Volume requires unmount.\n");
+ continue;
+ }
+
+ ocfs2_recovery_map_clear(osb, node_num);
+ }
+ ocfs2_super_unlock(osb, 1);
+
+ /* We always run recovery on our own orphan dir - the dead
+ * node(s) may have voted "no" on an inode delete earlier. A
+ * revote is therefore required. */
+ ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL,
+ NULL);
+
+bail:
+ down(&osb->recovery_lock);
+ if (!status &&
+ !ocfs2_node_map_is_empty(osb, &osb->recovery_map)) {
+ up(&osb->recovery_lock);
+ goto restart;
+ }
+
+ osb->recovery_thread_task = NULL;
+ mb(); /* sync with ocfs2_recovery_thread_running */
+ wake_up(&osb->recovery_event);
+
+ up(&osb->recovery_lock);
+
+ mlog_exit(status);
+ /* no one is callint kthread_stop() for us so the kthread() api
+ * requires that we call do_exit(). And it isn't exported, but
+ * complete_and_exit() seems to be a minimal wrapper around it. */
+ complete_and_exit(NULL, status);
+ return status;
+}
+
+void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
+{
+ mlog_entry("(node_num=%d, osb->node_num = %d)\n",
+ node_num, osb->node_num);
+
+ down(&osb->recovery_lock);
+ if (osb->disable_recovery)
+ goto out;
+
+ /* People waiting on recovery will wait on
+ * the recovery map to empty. */
+ if (!ocfs2_recovery_map_set(osb, node_num))
+ mlog(0, "node %d already be in recovery.\n", node_num);
+
+ mlog(0, "starting recovery thread...\n");
+
+ if (osb->recovery_thread_task)
+ goto out;
+
+ osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb,
+ "ocfs2rec-%d", osb->osb_id);
+ if (IS_ERR(osb->recovery_thread_task)) {
+ mlog_errno((int)PTR_ERR(osb->recovery_thread_task));
+ osb->recovery_thread_task = NULL;
+ }
+
+out:
+ up(&osb->recovery_lock);
+ wake_up(&osb->recovery_event);
+
+ mlog_exit_void();
+}
+
+/* Does the actual journal replay and marks the journal inode as
+ * clean. Will only replay if the journal inode is marked dirty. */
+static int ocfs2_replay_journal(struct ocfs2_super *osb,
+ int node_num,
+ int slot_num)
+{
+ int status;
+ int got_lock = 0;
+ unsigned int flags;
+ struct inode *inode = NULL;
+ struct ocfs2_dinode *fe;
+ journal_t *journal = NULL;
+ struct buffer_head *bh = NULL;
+
+ inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
+ slot_num);
+ if (inode == NULL) {
+ status = -EACCES;
+ mlog_errno(status);
+ goto done;
+ }
+ if (is_bad_inode(inode)) {
+ status = -EACCES;
+ iput(inode);
+ inode = NULL;
+ mlog_errno(status);
+ goto done;
+ }
+ SET_INODE_JOURNAL(inode);
+
+ status = ocfs2_meta_lock_full(inode, NULL, &bh, 1,
+ OCFS2_META_LOCK_RECOVERY);
+ if (status < 0) {
+ mlog(0, "status returned from ocfs2_meta_lock=%d\n", status);
+ if (status != -ERESTARTSYS)
+ mlog(ML_ERROR, "Could not lock journal!\n");
+ goto done;
+ }
+ got_lock = 1;
+
+ fe = (struct ocfs2_dinode *) bh->b_data;
+
+ flags = le32_to_cpu(fe->id1.journal1.ij_flags);
+
+ if (!(flags & OCFS2_JOURNAL_DIRTY_FL)) {
+ mlog(0, "No recovery required for node %d\n", node_num);
+ goto done;
+ }
+
+ mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n",
+ node_num, slot_num,
+ MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
+
+ OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
+
+ status = ocfs2_force_read_journal(inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto done;
+ }
+
+ mlog(0, "calling journal_init_inode\n");
+ journal = journal_init_inode(inode);
+ if (journal == NULL) {
+ mlog(ML_ERROR, "Linux journal layer error\n");
+ status = -EIO;
+ goto done;
+ }
+
+ status = journal_load(journal);
+ if (status < 0) {
+ mlog_errno(status);
+ if (!igrab(inode))
+ BUG();
+ journal_destroy(journal);
+ goto done;
+ }
+
+ ocfs2_clear_journal_error(osb->sb, journal, slot_num);
+
+ /* wipe the journal */
+ mlog(0, "flushing the journal.\n");
+ journal_lock_updates(journal);
+ status = journal_flush(journal);
+ journal_unlock_updates(journal);
+ if (status < 0)
+ mlog_errno(status);
+
+ /* This will mark the node clean */
+ flags = le32_to_cpu(fe->id1.journal1.ij_flags);
+ flags &= ~OCFS2_JOURNAL_DIRTY_FL;
+ fe->id1.journal1.ij_flags = cpu_to_le32(flags);
+
+ status = ocfs2_write_block(osb, bh, inode);
+ if (status < 0)
+ mlog_errno(status);
+
+ if (!igrab(inode))
+ BUG();
+
+ journal_destroy(journal);
+
+done:
+ /* drop the lock on this nodes journal */
+ if (got_lock)
+ ocfs2_meta_unlock(inode, 1);
+
+ if (inode)
+ iput(inode);
+
+ if (bh)
+ brelse(bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * Do the most important parts of node recovery:
+ * - Replay it's journal
+ * - Stamp a clean local allocator file
+ * - Stamp a clean truncate log
+ * - Mark the node clean
+ *
+ * If this function completes without error, a node in OCFS2 can be
+ * said to have been safely recovered. As a result, failure during the
+ * second part of a nodes recovery process (local alloc recovery) is
+ * far less concerning.
+ */
+static int ocfs2_recover_node(struct ocfs2_super *osb,
+ int node_num)
+{
+ int status = 0;
+ int slot_num;
+ struct ocfs2_slot_info *si = osb->slot_info;
+ struct ocfs2_dinode *la_copy = NULL;
+ struct ocfs2_dinode *tl_copy = NULL;
+
+ mlog_entry("(node_num=%d, osb->node_num = %d)\n",
+ node_num, osb->node_num);
+
+ mlog(0, "checking node %d\n", node_num);
+
+ /* Should not ever be called to recover ourselves -- in that
+ * case we should've called ocfs2_journal_load instead. */
+ if (osb->node_num == node_num)
+ BUG();
+
+ slot_num = ocfs2_node_num_to_slot(si, node_num);
+ if (slot_num == OCFS2_INVALID_SLOT) {
+ status = 0;
+ mlog(0, "no slot for this node, so no recovery required.\n");
+ goto done;
+ }
+
+ mlog(0, "node %d was using slot %d\n", node_num, slot_num);
+
+ status = ocfs2_replay_journal(osb, node_num, slot_num);
+ if (status < 0) {
+ mlog_errno(status);
+ goto done;
+ }
+
+ /* Stamp a clean local alloc file AFTER recovering the journal... */
+ status = ocfs2_begin_local_alloc_recovery(osb, slot_num, &la_copy);
+ if (status < 0) {
+ mlog_errno(status);
+ goto done;
+ }
+
+ /* An error from begin_truncate_log_recovery is not
+ * serious enough to warrant halting the rest of
+ * recovery. */
+ status = ocfs2_begin_truncate_log_recovery(osb, slot_num, &tl_copy);
+ if (status < 0)
+ mlog_errno(status);
+
+ /* Likewise, this would be a strange but ultimately not so
+ * harmful place to get an error... */
+ ocfs2_clear_slot(si, slot_num);
+ status = ocfs2_update_disk_slots(osb, si);
+ if (status < 0)
+ mlog_errno(status);
+
+ /* This will kfree the memory pointed to by la_copy and tl_copy */
+ ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy,
+ tl_copy);
+
+ status = 0;
+done:
+
+ mlog_exit(status);
+ return status;
+}
+
+/* Test node liveness by trylocking his journal. If we get the lock,
+ * we drop it here. Return 0 if we got the lock, -EAGAIN if node is
+ * still alive (we couldn't get the lock) and < 0 on error. */
+static int ocfs2_trylock_journal(struct ocfs2_super *osb,
+ int slot_num)
+{
+ int status, flags;
+ struct inode *inode = NULL;
+
+ inode = ocfs2_get_system_file_inode(osb, JOURNAL_SYSTEM_INODE,
+ slot_num);
+ if (inode == NULL) {
+ mlog(ML_ERROR, "access error\n");
+ status = -EACCES;
+ goto bail;
+ }
+ if (is_bad_inode(inode)) {
+ mlog(ML_ERROR, "access error (bad inode)\n");
+ iput(inode);
+ inode = NULL;
+ status = -EACCES;
+ goto bail;
+ }
+ SET_INODE_JOURNAL(inode);
+
+ flags = OCFS2_META_LOCK_RECOVERY | OCFS2_META_LOCK_NOQUEUE;
+ status = ocfs2_meta_lock_full(inode, NULL, NULL, 1, flags);
+ if (status < 0) {
+ if (status != -EAGAIN)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_meta_unlock(inode, 1);
+bail:
+ if (inode)
+ iput(inode);
+
+ return status;
+}
+
+/* Call this underneath ocfs2_super_lock. It also assumes that the
+ * slot info struct has been updated from disk. */
+int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
+{
+ int status, i, node_num;
+ struct ocfs2_slot_info *si = osb->slot_info;
+
+ /* This is called with the super block cluster lock, so we
+ * know that the slot map can't change underneath us. */
+
+ spin_lock(&si->si_lock);
+ for(i = 0; i < si->si_num_slots; i++) {
+ if (i == osb->slot_num)
+ continue;
+ if (ocfs2_is_empty_slot(si, i))
+ continue;
+
+ node_num = si->si_global_node_nums[i];
+ if (ocfs2_node_map_test_bit(osb, &osb->recovery_map, node_num))
+ continue;
+ spin_unlock(&si->si_lock);
+
+ /* Ok, we have a slot occupied by another node which
+ * is not in the recovery map. We trylock his journal
+ * file here to test if he's alive. */
+ status = ocfs2_trylock_journal(osb, i);
+ if (!status) {
+ /* Since we're called from mount, we know that
+ * the recovery thread can't race us on
+ * setting / checking the recovery bits. */
+ ocfs2_recovery_thread(osb, node_num);
+ } else if ((status < 0) && (status != -EAGAIN)) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ spin_lock(&si->si_lock);
+ }
+ spin_unlock(&si->si_lock);
+
+ status = 0;
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_recover_orphans(struct ocfs2_super *osb,
+ int slot)
+{
+ int status = 0;
+ int have_disk_lock = 0;
+ struct inode *inode = NULL;
+ struct inode *iter;
+ struct inode *orphan_dir_inode = NULL;
+ unsigned long offset, blk, local;
+ struct buffer_head *bh = NULL;
+ struct ocfs2_dir_entry *de;
+ struct super_block *sb = osb->sb;
+ struct ocfs2_inode_info *oi;
+
+ mlog(0, "Recover inodes from orphan dir in slot %d\n", slot);
+
+ orphan_dir_inode = ocfs2_get_system_file_inode(osb,
+ ORPHAN_DIR_SYSTEM_INODE,
+ slot);
+ if (!orphan_dir_inode) {
+ status = -ENOENT;
+ mlog_errno(status);
+ goto out;
+ }
+
+ mutex_lock(&orphan_dir_inode->i_mutex);
+ status = ocfs2_meta_lock(orphan_dir_inode, NULL, NULL, 0);
+ if (status < 0) {
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+ mlog_errno(status);
+ goto out;
+ }
+ have_disk_lock = 1;
+
+ offset = 0;
+ iter = NULL;
+ while(offset < i_size_read(orphan_dir_inode)) {
+ blk = offset >> sb->s_blocksize_bits;
+
+ bh = ocfs2_bread(orphan_dir_inode, blk, &status, 0);
+ if (!bh)
+ status = -EINVAL;
+ if (status < 0) {
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+ if (bh)
+ brelse(bh);
+ mlog_errno(status);
+ goto out;
+ }
+
+ local = 0;
+ while(offset < i_size_read(orphan_dir_inode)
+ && local < sb->s_blocksize) {
+ de = (struct ocfs2_dir_entry *) (bh->b_data + local);
+
+ if (!ocfs2_check_dir_entry(orphan_dir_inode,
+ de, bh, local)) {
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+ status = -EINVAL;
+ mlog_errno(status);
+ brelse(bh);
+ goto out;
+ }
+
+ local += le16_to_cpu(de->rec_len);
+ offset += le16_to_cpu(de->rec_len);
+
+ /* I guess we silently fail on no inode? */
+ if (!le64_to_cpu(de->inode))
+ continue;
+ if (de->file_type > OCFS2_FT_MAX) {
+ mlog(ML_ERROR,
+ "block %llu contains invalid de: "
+ "inode = %"MLFu64", rec_len = %u, "
+ "name_len = %u, file_type = %u, "
+ "name='%.*s'\n",
+ (unsigned long long)bh->b_blocknr,
+ le64_to_cpu(de->inode),
+ le16_to_cpu(de->rec_len),
+ de->name_len,
+ de->file_type,
+ de->name_len,
+ de->name);
+ continue;
+ }
+ if (de->name_len == 1 && !strncmp(".", de->name, 1))
+ continue;
+ if (de->name_len == 2 && !strncmp("..", de->name, 2))
+ continue;
+
+ iter = ocfs2_iget(osb, le64_to_cpu(de->inode));
+ if (IS_ERR(iter))
+ continue;
+
+ mlog(0, "queue orphan %"MLFu64"\n",
+ OCFS2_I(iter)->ip_blkno);
+ OCFS2_I(iter)->ip_next_orphan = inode;
+ inode = iter;
+ }
+ brelse(bh);
+ }
+ mutex_unlock(&orphan_dir_inode->i_mutex);
+
+ ocfs2_meta_unlock(orphan_dir_inode, 0);
+ have_disk_lock = 0;
+
+ iput(orphan_dir_inode);
+ orphan_dir_inode = NULL;
+
+ while (inode) {
+ oi = OCFS2_I(inode);
+ mlog(0, "iput orphan %"MLFu64"\n", oi->ip_blkno);
+
+ iter = oi->ip_next_orphan;
+
+ spin_lock(&oi->ip_lock);
+ /* Delete voting may have set these on the assumption
+ * that the other node would wipe them successfully.
+ * If they are still in the node's orphan dir, we need
+ * to reset that state. */
+ oi->ip_flags &= ~(OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE);
+
+ /* Set the proper information to get us going into
+ * ocfs2_delete_inode. */
+ oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
+ oi->ip_orphaned_slot = slot;
+ spin_unlock(&oi->ip_lock);
+
+ iput(inode);
+
+ inode = iter;
+ }
+
+out:
+ if (have_disk_lock)
+ ocfs2_meta_unlock(orphan_dir_inode, 0);
+
+ if (orphan_dir_inode)
+ iput(orphan_dir_inode);
+
+ return status;
+}
+
+static int ocfs2_wait_on_mount(struct ocfs2_super *osb)
+{
+ /* This check is good because ocfs2 will wait on our recovery
+ * thread before changing it to something other than MOUNTED
+ * or DISABLED. */
+ wait_event(osb->osb_mount_event,
+ atomic_read(&osb->vol_state) == VOLUME_MOUNTED ||
+ atomic_read(&osb->vol_state) == VOLUME_DISABLED);
+
+ /* If there's an error on mount, then we may never get to the
+ * MOUNTED flag, but this is set right before
+ * dismount_volume() so we can trust it. */
+ if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) {
+ mlog(0, "mount error, exiting!\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int ocfs2_commit_thread(void *arg)
+{
+ int status;
+ struct ocfs2_super *osb = arg;
+ struct ocfs2_journal *journal = osb->journal;
+
+ /* we can trust j_num_trans here because _should_stop() is only set in
+ * shutdown and nobody other than ourselves should be able to start
+ * transactions. committing on shutdown might take a few iterations
+ * as final transactions put deleted inodes on the list */
+ while (!(kthread_should_stop() &&
+ atomic_read(&journal->j_num_trans) == 0)) {
+
+ wait_event_interruptible_timeout(osb->checkpoint_event,
+ atomic_read(&journal->j_num_trans)
+ || kthread_should_stop(),
+ OCFS2_CHECKPOINT_INTERVAL);
+
+ status = ocfs2_commit_cache(osb);
+ if (status < 0)
+ mlog_errno(status);
+
+ if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){
+ mlog(ML_KTHREAD,
+ "commit_thread: %u transactions pending on "
+ "shutdown\n",
+ atomic_read(&journal->j_num_trans));
+ }
+ }
+
+ return 0;
+}
+
+/* Look for a dirty journal without taking any cluster locks. Used for
+ * hard readonly access to determine whether the file system journals
+ * require recovery. */
+int ocfs2_check_journals_nolocks(struct ocfs2_super *osb)
+{
+ int ret = 0;
+ unsigned int slot;
+ struct buffer_head *di_bh;
+ struct ocfs2_dinode *di;
+ struct inode *journal = NULL;
+
+ for(slot = 0; slot < osb->max_slots; slot++) {
+ journal = ocfs2_get_system_file_inode(osb,
+ JOURNAL_SYSTEM_INODE,
+ slot);
+ if (!journal || is_bad_inode(journal)) {
+ ret = -EACCES;
+ mlog_errno(ret);
+ goto out;
+ }
+
+ di_bh = NULL;
+ ret = ocfs2_read_block(osb, OCFS2_I(journal)->ip_blkno, &di_bh,
+ 0, journal);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ di = (struct ocfs2_dinode *) di_bh->b_data;
+
+ if (le32_to_cpu(di->id1.journal1.ij_flags) &
+ OCFS2_JOURNAL_DIRTY_FL)
+ ret = -EROFS;
+
+ brelse(di_bh);
+ if (ret)
+ break;
+ }
+
+out:
+ if (journal)
+ iput(journal);
+
+ return ret;
+}
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
new file mode 100644
index 00000000000..7d0a816184f
--- /dev/null
+++ b/fs/ocfs2/journal.h
@@ -0,0 +1,457 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * journal.h
+ *
+ * Defines journalling api and structures.
+ *
+ * Copyright (C) 2003, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_JOURNAL_H
+#define OCFS2_JOURNAL_H
+
+#include <linux/fs.h>
+#include <linux/jbd.h>
+
+#define OCFS2_CHECKPOINT_INTERVAL (8 * HZ)
+
+enum ocfs2_journal_state {
+ OCFS2_JOURNAL_FREE = 0,
+ OCFS2_JOURNAL_LOADED,
+ OCFS2_JOURNAL_IN_SHUTDOWN,
+};
+
+struct ocfs2_super;
+struct ocfs2_dinode;
+struct ocfs2_journal_handle;
+
+struct ocfs2_journal {
+ enum ocfs2_journal_state j_state; /* Journals current state */
+
+ journal_t *j_journal; /* The kernels journal type */
+ struct inode *j_inode; /* Kernel inode pointing to
+ * this journal */
+ struct ocfs2_super *j_osb; /* pointer to the super
+ * block for the node
+ * we're currently
+ * running on -- not
+ * necessarily the super
+ * block from the node
+ * which we usually run
+ * from (recovery,
+ * etc) */
+ struct buffer_head *j_bh; /* Journal disk inode block */
+ atomic_t j_num_trans; /* Number of transactions
+ * currently in the system. */
+ unsigned long j_trans_id;
+ struct rw_semaphore j_trans_barrier;
+ wait_queue_head_t j_checkpointed;
+
+ spinlock_t j_lock;
+ struct list_head j_la_cleanups;
+ struct work_struct j_recovery_work;
+};
+
+extern spinlock_t trans_inc_lock;
+
+/* wrap j_trans_id so we never have it equal to zero. */
+static inline unsigned long ocfs2_inc_trans_id(struct ocfs2_journal *j)
+{
+ unsigned long old_id;
+ spin_lock(&trans_inc_lock);
+ old_id = j->j_trans_id++;
+ if (unlikely(!j->j_trans_id))
+ j->j_trans_id = 1;
+ spin_unlock(&trans_inc_lock);
+ return old_id;
+}
+
+static inline void ocfs2_set_inode_lock_trans(struct ocfs2_journal *journal,
+ struct inode *inode)
+{
+ spin_lock(&trans_inc_lock);
+ OCFS2_I(inode)->ip_last_trans = journal->j_trans_id;
+ spin_unlock(&trans_inc_lock);
+}
+
+/* Used to figure out whether it's safe to drop a metadata lock on an
+ * inode. Returns true if all the inodes changes have been
+ * checkpointed to disk. You should be holding the spinlock on the
+ * metadata lock while calling this to be sure that nobody can take
+ * the lock and put it on another transaction. */
+static inline int ocfs2_inode_fully_checkpointed(struct inode *inode)
+{
+ int ret;
+ struct ocfs2_journal *journal = OCFS2_SB(inode->i_sb)->journal;
+
+ spin_lock(&trans_inc_lock);
+ ret = time_after(journal->j_trans_id, OCFS2_I(inode)->ip_last_trans);
+ spin_unlock(&trans_inc_lock);
+ return ret;
+}
+
+/* convenience function to check if an inode is still new (has never
+ * hit disk) Will do you a favor and set created_trans = 0 when you've
+ * been checkpointed. returns '1' if the inode is still new. */
+static inline int ocfs2_inode_is_new(struct inode *inode)
+{
+ int ret;
+
+ /* System files are never "new" as they're written out by
+ * mkfs. This helps us early during mount, before we have the
+ * journal open and j_trans_id could be junk. */
+ if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
+ return 0;
+ spin_lock(&trans_inc_lock);
+ ret = !(time_after(OCFS2_SB(inode->i_sb)->journal->j_trans_id,
+ OCFS2_I(inode)->ip_created_trans));
+ if (!ret)
+ OCFS2_I(inode)->ip_created_trans = 0;
+ spin_unlock(&trans_inc_lock);
+ return ret;
+}
+
+static inline void ocfs2_inode_set_new(struct ocfs2_super *osb,
+ struct inode *inode)
+{
+ spin_lock(&trans_inc_lock);
+ OCFS2_I(inode)->ip_created_trans = osb->journal->j_trans_id;
+ spin_unlock(&trans_inc_lock);
+}
+
+extern kmem_cache_t *ocfs2_lock_cache;
+
+struct ocfs2_journal_lock {
+ struct inode *jl_inode;
+ struct list_head jl_lock_list;
+};
+
+struct ocfs2_journal_handle {
+ handle_t *k_handle; /* kernel handle. */
+ struct ocfs2_journal *journal;
+ u32 flags; /* see flags below. */
+ int max_buffs; /* Buffs reserved by this handle */
+
+ /* The following two fields are for ocfs2_handle_add_lock */
+ int num_locks;
+ struct list_head locks; /* A bunch of locks to
+ * release on commit. This
+ * should be a list_head */
+
+ struct list_head inode_list;
+};
+
+#define OCFS2_HANDLE_STARTED 1
+/* should we sync-commit this handle? */
+#define OCFS2_HANDLE_SYNC 2
+static inline int ocfs2_handle_started(struct ocfs2_journal_handle *handle)
+{
+ return handle->flags & OCFS2_HANDLE_STARTED;
+}
+
+static inline void ocfs2_handle_set_sync(struct ocfs2_journal_handle *handle, int sync)
+{
+ if (sync)
+ handle->flags |= OCFS2_HANDLE_SYNC;
+ else
+ handle->flags &= ~OCFS2_HANDLE_SYNC;
+}
+
+/* Exported only for the journal struct init code in super.c. Do not call. */
+void ocfs2_complete_recovery(void *data);
+
+/*
+ * Journal Control:
+ * Initialize, Load, Shutdown, Wipe a journal.
+ *
+ * ocfs2_journal_init - Initialize journal structures in the OSB.
+ * ocfs2_journal_load - Load the given journal off disk. Replay it if
+ * there's transactions still in there.
+ * ocfs2_journal_shutdown - Shutdown a journal, this will flush all
+ * uncommitted, uncheckpointed transactions.
+ * ocfs2_journal_wipe - Wipe transactions from a journal. Optionally
+ * zero out each block.
+ * ocfs2_recovery_thread - Perform recovery on a node. osb is our own osb.
+ * ocfs2_mark_dead_nodes - Start recovery on nodes we won't get a heartbeat
+ * event on.
+ * ocfs2_start_checkpoint - Kick the commit thread to do a checkpoint.
+ */
+void ocfs2_set_journal_params(struct ocfs2_super *osb);
+int ocfs2_journal_init(struct ocfs2_journal *journal,
+ int *dirty);
+void ocfs2_journal_shutdown(struct ocfs2_super *osb);
+int ocfs2_journal_wipe(struct ocfs2_journal *journal,
+ int full);
+int ocfs2_journal_load(struct ocfs2_journal *journal);
+int ocfs2_check_journals_nolocks(struct ocfs2_super *osb);
+void ocfs2_recovery_thread(struct ocfs2_super *osb,
+ int node_num);
+int ocfs2_mark_dead_nodes(struct ocfs2_super *osb);
+void ocfs2_complete_mount_recovery(struct ocfs2_super *osb);
+
+static inline void ocfs2_start_checkpoint(struct ocfs2_super *osb)
+{
+ atomic_set(&osb->needs_checkpoint, 1);
+ wake_up(&osb->checkpoint_event);
+}
+
+static inline void ocfs2_checkpoint_inode(struct inode *inode)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ if (!ocfs2_inode_fully_checkpointed(inode)) {
+ /* WARNING: This only kicks off a single
+ * checkpoint. If someone races you and adds more
+ * metadata to the journal, you won't know, and will
+ * wind up waiting *alot* longer than necessary. Right
+ * now we only use this in clear_inode so that's
+ * OK. */
+ ocfs2_start_checkpoint(osb);
+
+ wait_event(osb->journal->j_checkpointed,
+ ocfs2_inode_fully_checkpointed(inode));
+ }
+}
+
+/*
+ * Transaction Handling:
+ * Manage the lifetime of a transaction handle.
+ *
+ * ocfs2_alloc_handle - Only allocate a handle so we can start putting
+ * cluster locks on it. To actually change blocks,
+ * call ocfs2_start_trans with the handle returned
+ * from this function. You may call ocfs2_commit_trans
+ * at any time in the lifetime of a handle.
+ * ocfs2_start_trans - Begin a transaction. Give it an upper estimate of
+ * the number of blocks that will be changed during
+ * this handle.
+ * ocfs2_commit_trans - Complete a handle.
+ * ocfs2_extend_trans - Extend a handle by nblocks credits. This may
+ * commit the handle to disk in the process, but will
+ * not release any locks taken during the transaction.
+ * ocfs2_journal_access - Notify the handle that we want to journal this
+ * buffer. Will have to call ocfs2_journal_dirty once
+ * we've actually dirtied it. Type is one of . or .
+ * ocfs2_journal_dirty - Mark a journalled buffer as having dirty data.
+ * ocfs2_journal_dirty_data - Indicate that a data buffer should go out before
+ * the current handle commits.
+ * ocfs2_handle_add_lock - Sometimes we need to delay lock release
+ * until after a transaction has been completed. Use
+ * ocfs2_handle_add_lock to indicate that a lock needs
+ * to be released at the end of that handle. Locks
+ * will be released in the order that they are added.
+ * ocfs2_handle_add_inode - Add a locked inode to a transaction.
+ */
+
+/* You must always start_trans with a number of buffs > 0, but it's
+ * perfectly legal to go through an entire transaction without having
+ * dirtied any buffers. */
+struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb);
+struct ocfs2_journal_handle *ocfs2_start_trans(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ int max_buffs);
+void ocfs2_commit_trans(struct ocfs2_journal_handle *handle);
+int ocfs2_extend_trans(struct ocfs2_journal_handle *handle,
+ int nblocks);
+
+/*
+ * Create access is for when we get a newly created buffer and we're
+ * not gonna read it off disk, but rather fill it ourselves. Right
+ * now, we don't do anything special with this (it turns into a write
+ * request), but this is a good placeholder in case we do...
+ *
+ * Write access is for when we read a block off disk and are going to
+ * modify it. This way the journalling layer knows it may need to make
+ * a copy of that block (if it's part of another, uncommitted
+ * transaction) before we do so.
+ */
+#define OCFS2_JOURNAL_ACCESS_CREATE 0
+#define OCFS2_JOURNAL_ACCESS_WRITE 1
+#define OCFS2_JOURNAL_ACCESS_UNDO 2
+
+int ocfs2_journal_access(struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct buffer_head *bh,
+ int type);
+/*
+ * A word about the journal_access/journal_dirty "dance". It is
+ * entirely legal to journal_access a buffer more than once (as long
+ * as the access type is the same -- I'm not sure what will happen if
+ * access type is different but this should never happen anyway) It is
+ * also legal to journal_dirty a buffer more than once. In fact, you
+ * can even journal_access a buffer after you've done a
+ * journal_access/journal_dirty pair. The only thing you cannot do
+ * however, is journal_dirty a buffer which you haven't yet passed to
+ * journal_access at least once.
+ *
+ * That said, 99% of the time this doesn't matter and this is what the
+ * path looks like:
+ *
+ * <read a bh>
+ * ocfs2_journal_access(handle, bh, OCFS2_JOURNAL_ACCESS_WRITE);
+ * <modify the bh>
+ * ocfs2_journal_dirty(handle, bh);
+ */
+int ocfs2_journal_dirty(struct ocfs2_journal_handle *handle,
+ struct buffer_head *bh);
+int ocfs2_journal_dirty_data(handle_t *handle,
+ struct buffer_head *bh);
+int ocfs2_handle_add_lock(struct ocfs2_journal_handle *handle,
+ struct inode *inode);
+/*
+ * Use this to protect from other processes reading buffer state while
+ * it's in flight.
+ */
+void ocfs2_handle_add_inode(struct ocfs2_journal_handle *handle,
+ struct inode *inode);
+
+/*
+ * Credit Macros:
+ * Convenience macros to calculate number of credits needed.
+ *
+ * For convenience sake, I have a set of macros here which calculate
+ * the *maximum* number of sectors which will be changed for various
+ * metadata updates.
+ */
+
+/* simple file updates like chmod, etc. */
+#define OCFS2_INODE_UPDATE_CREDITS 1
+
+/* get one bit out of a suballocator: dinode + group descriptor +
+ * prev. group desc. if we relink. */
+#define OCFS2_SUBALLOC_ALLOC (3)
+
+/* dinode + group descriptor update. We don't relink on free yet. */
+#define OCFS2_SUBALLOC_FREE (2)
+
+#define OCFS2_TRUNCATE_LOG_UPDATE OCFS2_INODE_UPDATE_CREDITS
+#define OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC (OCFS2_SUBALLOC_FREE \
+ + OCFS2_TRUNCATE_LOG_UPDATE)
+
+/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe +
+ * bitmap block for the new bit) */
+#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2)
+
+/* parent fe, parent block, new file entry, inode alloc fe, inode alloc
+ * group descriptor + mkdir/symlink blocks */
+#define OCFS2_MKNOD_CREDITS (3 + OCFS2_SUBALLOC_ALLOC \
+ + OCFS2_DIR_LINK_ADDITIONAL_CREDITS)
+
+/* local alloc metadata change + main bitmap updates */
+#define OCFS2_WINDOW_MOVE_CREDITS (OCFS2_INODE_UPDATE_CREDITS \
+ + OCFS2_SUBALLOC_ALLOC + OCFS2_SUBALLOC_FREE)
+
+/* used when we don't need an allocation change for a dir extend. One
+ * for the dinode, one for the new block. */
+#define OCFS2_SIMPLE_DIR_EXTEND_CREDITS (2)
+
+/* file update (nlink, etc) + dir entry block */
+#define OCFS2_LINK_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1)
+
+/* inode + dir inode (if we unlink a dir), + dir entry block + orphan
+ * dir inode link */
+#define OCFS2_UNLINK_CREDITS (2 * OCFS2_INODE_UPDATE_CREDITS + 1 \
+ + OCFS2_LINK_CREDITS)
+
+/* dinode + orphan dir dinode + inode alloc dinode + orphan dir entry +
+ * inode alloc group descriptor */
+#define OCFS2_DELETE_INODE_CREDITS (3 * OCFS2_INODE_UPDATE_CREDITS + 1 + 1)
+
+/* dinode update, old dir dinode update, new dir dinode update, old
+ * dir dir entry, new dir dir entry, dir entry update for renaming
+ * directory + target unlink */
+#define OCFS2_RENAME_CREDITS (3 * OCFS2_INODE_UPDATE_CREDITS + 3 \
+ + OCFS2_UNLINK_CREDITS)
+
+static inline int ocfs2_calc_extend_credits(struct super_block *sb,
+ struct ocfs2_dinode *fe,
+ u32 bits_wanted)
+{
+ int bitmap_blocks, sysfile_bitmap_blocks, dinode_blocks;
+
+ /* bitmap dinode, group desc. + relinked group. */
+ bitmap_blocks = OCFS2_SUBALLOC_ALLOC;
+
+ /* we might need to shift tree depth so lets assume an
+ * absolute worst case of complete fragmentation. Even with
+ * that, we only need one update for the dinode, and then
+ * however many metadata chunks needed * a remaining suballoc
+ * alloc. */
+ sysfile_bitmap_blocks = 1 +
+ (OCFS2_SUBALLOC_ALLOC - 1) * ocfs2_extend_meta_needed(fe);
+
+ /* this does not include *new* metadata blocks, which are
+ * accounted for in sysfile_bitmap_blocks. fe +
+ * prev. last_eb_blk + blocks along edge of tree.
+ * calc_symlink_credits passes because we just need 1
+ * credit for the dinode there. */
+ dinode_blocks = 1 + 1 + le16_to_cpu(fe->id2.i_list.l_tree_depth);
+
+ return bitmap_blocks + sysfile_bitmap_blocks + dinode_blocks;
+}
+
+static inline int ocfs2_calc_symlink_credits(struct super_block *sb)
+{
+ int blocks = OCFS2_MKNOD_CREDITS;
+
+ /* links can be longer than one block so we may update many
+ * within our single allocated extent. */
+ blocks += ocfs2_clusters_to_blocks(sb, 1);
+
+ return blocks;
+}
+
+static inline int ocfs2_calc_group_alloc_credits(struct super_block *sb,
+ unsigned int cpg)
+{
+ int blocks;
+ int bitmap_blocks = OCFS2_SUBALLOC_ALLOC + 1;
+ /* parent inode update + new block group header + bitmap inode update
+ + bitmap blocks affected */
+ blocks = 1 + 1 + 1 + bitmap_blocks;
+ return blocks;
+}
+
+static inline int ocfs2_calc_tree_trunc_credits(struct super_block *sb,
+ unsigned int clusters_to_del,
+ struct ocfs2_dinode *fe,
+ struct ocfs2_extent_list *last_el)
+{
+ /* for dinode + all headers in this pass + update to next leaf */
+ u16 next_free = le16_to_cpu(last_el->l_next_free_rec);
+ u16 tree_depth = le16_to_cpu(fe->id2.i_list.l_tree_depth);
+ int credits = 1 + tree_depth + 1;
+ int i;
+
+ i = next_free - 1;
+ BUG_ON(i < 0);
+
+ /* We may be deleting metadata blocks, so metadata alloc dinode +
+ one desc. block for each possible delete. */
+ if (tree_depth && next_free == 1 &&
+ le32_to_cpu(last_el->l_recs[i].e_clusters) == clusters_to_del)
+ credits += 1 + tree_depth;
+
+ /* update to the truncate log. */
+ credits += OCFS2_TRUNCATE_LOG_UPDATE;
+
+ return credits;
+}
+
+#endif /* OCFS2_JOURNAL_H */
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
new file mode 100644
index 00000000000..149b3518166
--- /dev/null
+++ b/fs/ocfs2/localalloc.c
@@ -0,0 +1,983 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * localalloc.c
+ *
+ * Node local data allocation
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/bitops.h>
+
+#define MLOG_MASK_PREFIX ML_DISK_ALLOC
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "dlmglue.h"
+#include "inode.h"
+#include "journal.h"
+#include "localalloc.h"
+#include "suballoc.h"
+#include "super.h"
+#include "sysfile.h"
+
+#include "buffer_head_io.h"
+
+#define OCFS2_LOCAL_ALLOC(dinode) (&((dinode)->id2.i_lab))
+
+static inline int ocfs2_local_alloc_window_bits(struct ocfs2_super *osb);
+
+static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc);
+
+static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
+ struct ocfs2_dinode *alloc,
+ u32 numbits);
+
+static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc);
+
+static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_dinode *alloc,
+ struct inode *main_bm_inode,
+ struct buffer_head *main_bm_bh);
+
+static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context **ac,
+ struct inode **bitmap_inode,
+ struct buffer_head **bitmap_bh);
+
+static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *ac);
+
+static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
+ struct inode *local_alloc_inode);
+
+/*
+ * Determine how large our local alloc window should be, in bits.
+ *
+ * These values (and the behavior in ocfs2_alloc_should_use_local) have
+ * been chosen so that most allocations, including new block groups go
+ * through local alloc.
+ */
+static inline int ocfs2_local_alloc_window_bits(struct ocfs2_super *osb)
+{
+ BUG_ON(osb->s_clustersize_bits < 12);
+
+ return 2048 >> (osb->s_clustersize_bits - 12);
+}
+
+/*
+ * Tell us whether a given allocation should use the local alloc
+ * file. Otherwise, it has to go to the main bitmap.
+ */
+int ocfs2_alloc_should_use_local(struct ocfs2_super *osb, u64 bits)
+{
+ int la_bits = ocfs2_local_alloc_window_bits(osb);
+
+ if (osb->local_alloc_state != OCFS2_LA_ENABLED)
+ return 0;
+
+ /* la_bits should be at least twice the size (in clusters) of
+ * a new block group. We want to be sure block group
+ * allocations go through the local alloc, so allow an
+ * allocation to take up to half the bitmap. */
+ if (bits > (la_bits / 2))
+ return 0;
+
+ return 1;
+}
+
+int ocfs2_load_local_alloc(struct ocfs2_super *osb)
+{
+ int status = 0;
+ struct ocfs2_dinode *alloc = NULL;
+ struct buffer_head *alloc_bh = NULL;
+ u32 num_used;
+ struct inode *inode = NULL;
+ struct ocfs2_local_alloc *la;
+
+ mlog_entry_void();
+
+ /* read the alloc off disk */
+ inode = ocfs2_get_system_file_inode(osb, LOCAL_ALLOC_SYSTEM_INODE,
+ osb->slot_num);
+ if (!inode) {
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno,
+ &alloc_bh, 0, inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
+ la = OCFS2_LOCAL_ALLOC(alloc);
+
+ if (!(le32_to_cpu(alloc->i_flags) &
+ (OCFS2_LOCAL_ALLOC_FL|OCFS2_BITMAP_FL))) {
+ mlog(ML_ERROR, "Invalid local alloc inode, %"MLFu64"\n",
+ OCFS2_I(inode)->ip_blkno);
+ status = -EINVAL;
+ goto bail;
+ }
+
+ if ((la->la_size == 0) ||
+ (le16_to_cpu(la->la_size) > ocfs2_local_alloc_size(inode->i_sb))) {
+ mlog(ML_ERROR, "Local alloc size is invalid (la_size = %u)\n",
+ le16_to_cpu(la->la_size));
+ status = -EINVAL;
+ goto bail;
+ }
+
+ /* do a little verification. */
+ num_used = ocfs2_local_alloc_count_bits(alloc);
+
+ /* hopefully the local alloc has always been recovered before
+ * we load it. */
+ if (num_used
+ || alloc->id1.bitmap1.i_used
+ || alloc->id1.bitmap1.i_total
+ || la->la_bm_off)
+ mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
+ "found = %u, set = %u, taken = %u, off = %u\n",
+ num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
+ le32_to_cpu(alloc->id1.bitmap1.i_total),
+ OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
+
+ osb->local_alloc_bh = alloc_bh;
+ osb->local_alloc_state = OCFS2_LA_ENABLED;
+
+bail:
+ if (status < 0)
+ if (alloc_bh)
+ brelse(alloc_bh);
+ if (inode)
+ iput(inode);
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * return any unused bits to the bitmap and write out a clean
+ * local_alloc.
+ *
+ * local_alloc_bh is optional. If not passed, we will simply use the
+ * one off osb. If you do pass it however, be warned that it *will* be
+ * returned brelse'd and NULL'd out.*/
+void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
+{
+ int status;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct inode *local_alloc_inode = NULL;
+ struct buffer_head *bh = NULL;
+ struct buffer_head *main_bm_bh = NULL;
+ struct inode *main_bm_inode = NULL;
+ struct ocfs2_dinode *alloc_copy = NULL;
+ struct ocfs2_dinode *alloc = NULL;
+
+ mlog_entry_void();
+
+ if (osb->local_alloc_state == OCFS2_LA_UNUSED)
+ goto bail;
+
+ local_alloc_inode =
+ ocfs2_get_system_file_inode(osb,
+ LOCAL_ALLOC_SYSTEM_INODE,
+ osb->slot_num);
+ if (!local_alloc_inode) {
+ status = -ENOENT;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ osb->local_alloc_state = OCFS2_LA_DISABLED;
+
+ handle = ocfs2_alloc_handle(osb);
+ if (!handle) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ main_bm_inode = ocfs2_get_system_file_inode(osb,
+ GLOBAL_BITMAP_SYSTEM_INODE,
+ OCFS2_INVALID_SLOT);
+ if (!main_bm_inode) {
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_handle_add_inode(handle, main_bm_inode);
+ status = ocfs2_meta_lock(main_bm_inode, handle, &main_bm_bh, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* WINDOW_MOVE_CREDITS is a bit heavy... */
+ handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS);
+ if (IS_ERR(handle)) {
+ mlog_errno(PTR_ERR(handle));
+ handle = NULL;
+ goto bail;
+ }
+
+ bh = osb->local_alloc_bh;
+ alloc = (struct ocfs2_dinode *) bh->b_data;
+
+ alloc_copy = kmalloc(bh->b_size, GFP_KERNEL);
+ if (!alloc_copy) {
+ status = -ENOMEM;
+ goto bail;
+ }
+ memcpy(alloc_copy, alloc, bh->b_size);
+
+ status = ocfs2_journal_access(handle, local_alloc_inode, bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_clear_local_alloc(alloc);
+
+ status = ocfs2_journal_dirty(handle, bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ brelse(bh);
+ osb->local_alloc_bh = NULL;
+ osb->local_alloc_state = OCFS2_LA_UNUSED;
+
+ status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
+ main_bm_inode, main_bm_bh);
+ if (status < 0)
+ mlog_errno(status);
+
+bail:
+ if (handle)
+ ocfs2_commit_trans(handle);
+
+ if (main_bm_bh)
+ brelse(main_bm_bh);
+
+ if (main_bm_inode)
+ iput(main_bm_inode);
+
+ if (local_alloc_inode)
+ iput(local_alloc_inode);
+
+ if (alloc_copy)
+ kfree(alloc_copy);
+
+ mlog_exit_void();
+}
+
+/*
+ * We want to free the bitmap bits outside of any recovery context as
+ * we'll need a cluster lock to do so, but we must clear the local
+ * alloc before giving up the recovered nodes journal. To solve this,
+ * we kmalloc a copy of the local alloc before it's change for the
+ * caller to process with ocfs2_complete_local_alloc_recovery
+ */
+int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
+ int slot_num,
+ struct ocfs2_dinode **alloc_copy)
+{
+ int status = 0;
+ struct buffer_head *alloc_bh = NULL;
+ struct inode *inode = NULL;
+ struct ocfs2_dinode *alloc;
+
+ mlog_entry("(slot_num = %d)\n", slot_num);
+
+ *alloc_copy = NULL;
+
+ inode = ocfs2_get_system_file_inode(osb,
+ LOCAL_ALLOC_SYSTEM_INODE,
+ slot_num);
+ if (!inode) {
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ mutex_lock(&inode->i_mutex);
+
+ status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno,
+ &alloc_bh, 0, inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ *alloc_copy = kmalloc(alloc_bh->b_size, GFP_KERNEL);
+ if (!(*alloc_copy)) {
+ status = -ENOMEM;
+ goto bail;
+ }
+ memcpy((*alloc_copy), alloc_bh->b_data, alloc_bh->b_size);
+
+ alloc = (struct ocfs2_dinode *) alloc_bh->b_data;
+ ocfs2_clear_local_alloc(alloc);
+
+ status = ocfs2_write_block(osb, alloc_bh, inode);
+ if (status < 0)
+ mlog_errno(status);
+
+bail:
+ if ((status < 0) && (*alloc_copy)) {
+ kfree(*alloc_copy);
+ *alloc_copy = NULL;
+ }
+
+ if (alloc_bh)
+ brelse(alloc_bh);
+
+ if (inode) {
+ mutex_unlock(&inode->i_mutex);
+ iput(inode);
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * Step 2: By now, we've completed the journal recovery, we've stamped
+ * a clean local alloc on disk and dropped the node out of the
+ * recovery map. Dlm locks will no longer stall, so lets clear out the
+ * main bitmap.
+ */
+int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb,
+ struct ocfs2_dinode *alloc)
+{
+ int status;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct buffer_head *main_bm_bh = NULL;
+ struct inode *main_bm_inode = NULL;
+
+ mlog_entry_void();
+
+ handle = ocfs2_alloc_handle(osb);
+ if (!handle) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ main_bm_inode = ocfs2_get_system_file_inode(osb,
+ GLOBAL_BITMAP_SYSTEM_INODE,
+ OCFS2_INVALID_SLOT);
+ if (!main_bm_inode) {
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_handle_add_inode(handle, main_bm_inode);
+ status = ocfs2_meta_lock(main_bm_inode, handle, &main_bm_bh, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* we want the bitmap change to be recorded on disk asap */
+ ocfs2_handle_set_sync(handle, 1);
+
+ status = ocfs2_sync_local_to_main(osb, handle, alloc,
+ main_bm_inode, main_bm_bh);
+ if (status < 0)
+ mlog_errno(status);
+
+bail:
+ if (handle)
+ ocfs2_commit_trans(handle);
+
+ if (main_bm_bh)
+ brelse(main_bm_bh);
+
+ if (main_bm_inode)
+ iput(main_bm_inode);
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * make sure we've got at least bitswanted contiguous bits in the
+ * local alloc. You lose them when you drop i_mutex.
+ *
+ * We will add ourselves to the transaction passed in, but may start
+ * our own in order to shift windows.
+ */
+int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *passed_handle,
+ u32 bits_wanted,
+ struct ocfs2_alloc_context *ac)
+{
+ int status;
+ struct ocfs2_dinode *alloc;
+ struct inode *local_alloc_inode;
+ unsigned int free_bits;
+
+ mlog_entry_void();
+
+ BUG_ON(!passed_handle);
+ BUG_ON(!ac);
+ BUG_ON(passed_handle->flags & OCFS2_HANDLE_STARTED);
+
+ local_alloc_inode =
+ ocfs2_get_system_file_inode(osb,
+ LOCAL_ALLOC_SYSTEM_INODE,
+ osb->slot_num);
+ if (!local_alloc_inode) {
+ status = -ENOENT;
+ mlog_errno(status);
+ goto bail;
+ }
+ ocfs2_handle_add_inode(passed_handle, local_alloc_inode);
+
+ if (osb->local_alloc_state != OCFS2_LA_ENABLED) {
+ status = -ENOSPC;
+ goto bail;
+ }
+
+ if (bits_wanted > ocfs2_local_alloc_window_bits(osb)) {
+ mlog(0, "Asking for more than my max window size!\n");
+ status = -ENOSPC;
+ goto bail;
+ }
+
+ alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
+
+ if (le32_to_cpu(alloc->id1.bitmap1.i_used) !=
+ ocfs2_local_alloc_count_bits(alloc)) {
+ ocfs2_error(osb->sb, "local alloc inode %"MLFu64" says it has "
+ "%u free bits, but a count shows %u",
+ le64_to_cpu(alloc->i_blkno),
+ le32_to_cpu(alloc->id1.bitmap1.i_used),
+ ocfs2_local_alloc_count_bits(alloc));
+ status = -EIO;
+ goto bail;
+ }
+
+ free_bits = le32_to_cpu(alloc->id1.bitmap1.i_total) -
+ le32_to_cpu(alloc->id1.bitmap1.i_used);
+ if (bits_wanted > free_bits) {
+ /* uhoh, window change time. */
+ status =
+ ocfs2_local_alloc_slide_window(osb, local_alloc_inode);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ ac->ac_inode = igrab(local_alloc_inode);
+ get_bh(osb->local_alloc_bh);
+ ac->ac_bh = osb->local_alloc_bh;
+ ac->ac_which = OCFS2_AC_USE_LOCAL;
+ status = 0;
+bail:
+ if (local_alloc_inode)
+ iput(local_alloc_inode);
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 min_bits,
+ u32 *bit_off,
+ u32 *num_bits)
+{
+ int status, start;
+ struct inode *local_alloc_inode;
+ u32 bits_wanted;
+ void *bitmap;
+ struct ocfs2_dinode *alloc;
+ struct ocfs2_local_alloc *la;
+
+ mlog_entry_void();
+ BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
+
+ bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
+ local_alloc_inode = ac->ac_inode;
+ alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
+ la = OCFS2_LOCAL_ALLOC(alloc);
+
+ start = ocfs2_local_alloc_find_clear_bits(osb, alloc, bits_wanted);
+ if (start == -1) {
+ /* TODO: Shouldn't we just BUG here? */
+ status = -ENOSPC;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ bitmap = la->la_bitmap;
+ *bit_off = le32_to_cpu(la->la_bm_off) + start;
+ /* local alloc is always contiguous by nature -- we never
+ * delete bits from it! */
+ *num_bits = bits_wanted;
+
+ status = ocfs2_journal_access(handle, local_alloc_inode,
+ osb->local_alloc_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ while(bits_wanted--)
+ ocfs2_set_bit(start++, bitmap);
+
+ alloc->id1.bitmap1.i_used = cpu_to_le32(*num_bits +
+ le32_to_cpu(alloc->id1.bitmap1.i_used));
+
+ status = ocfs2_journal_dirty(handle, osb->local_alloc_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = 0;
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
+{
+ int i;
+ u8 *buffer;
+ u32 count = 0;
+ struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
+
+ mlog_entry_void();
+
+ buffer = la->la_bitmap;
+ for (i = 0; i < le16_to_cpu(la->la_size); i++)
+ count += hweight8(buffer[i]);
+
+ mlog_exit(count);
+ return count;
+}
+
+static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
+ struct ocfs2_dinode *alloc,
+ u32 numbits)
+{
+ int numfound, bitoff, left, startoff, lastzero;
+ void *bitmap = NULL;
+
+ mlog_entry("(numbits wanted = %u)\n", numbits);
+
+ if (!alloc->id1.bitmap1.i_total) {
+ mlog(0, "No bits in my window!\n");
+ bitoff = -1;
+ goto bail;
+ }
+
+ bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap;
+
+ numfound = bitoff = startoff = 0;
+ lastzero = -1;
+ left = le32_to_cpu(alloc->id1.bitmap1.i_total);
+ while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) {
+ if (bitoff == left) {
+ /* mlog(0, "bitoff (%d) == left", bitoff); */
+ break;
+ }
+ /* mlog(0, "Found a zero: bitoff = %d, startoff = %d, "
+ "numfound = %d\n", bitoff, startoff, numfound);*/
+
+ /* Ok, we found a zero bit... is it contig. or do we
+ * start over?*/
+ if (bitoff == startoff) {
+ /* we found a zero */
+ numfound++;
+ startoff++;
+ } else {
+ /* got a zero after some ones */
+ numfound = 1;
+ startoff = bitoff+1;
+ }
+ /* we got everything we needed */
+ if (numfound == numbits) {
+ /* mlog(0, "Found it all!\n"); */
+ break;
+ }
+ }
+
+ mlog(0, "Exiting loop, bitoff = %d, numfound = %d\n", bitoff,
+ numfound);
+
+ if (numfound == numbits)
+ bitoff = startoff - numfound;
+ else
+ bitoff = -1;
+
+bail:
+ mlog_exit(bitoff);
+ return bitoff;
+}
+
+static void ocfs2_clear_local_alloc(struct ocfs2_dinode *alloc)
+{
+ struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
+ int i;
+ mlog_entry_void();
+
+ alloc->id1.bitmap1.i_total = 0;
+ alloc->id1.bitmap1.i_used = 0;
+ la->la_bm_off = 0;
+ for(i = 0; i < le16_to_cpu(la->la_size); i++)
+ la->la_bitmap[i] = 0;
+
+ mlog_exit_void();
+}
+
+#if 0
+/* turn this on and uncomment below to aid debugging window shifts. */
+static void ocfs2_verify_zero_bits(unsigned long *bitmap,
+ unsigned int start,
+ unsigned int count)
+{
+ unsigned int tmp = count;
+ while(tmp--) {
+ if (ocfs2_test_bit(start + tmp, bitmap)) {
+ printk("ocfs2_verify_zero_bits: start = %u, count = "
+ "%u\n", start, count);
+ printk("ocfs2_verify_zero_bits: bit %u is set!",
+ start + tmp);
+ BUG();
+ }
+ }
+}
+#endif
+
+/*
+ * sync the local alloc to main bitmap.
+ *
+ * assumes you've already locked the main bitmap -- the bitmap inode
+ * passed is used for caching.
+ */
+static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_dinode *alloc,
+ struct inode *main_bm_inode,
+ struct buffer_head *main_bm_bh)
+{
+ int status = 0;
+ int bit_off, left, count, start;
+ u64 la_start_blk;
+ u64 blkno;
+ void *bitmap;
+ struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
+
+ mlog_entry("total = %u, COUNT = %u, used = %u\n",
+ le32_to_cpu(alloc->id1.bitmap1.i_total),
+ ocfs2_local_alloc_count_bits(alloc),
+ le32_to_cpu(alloc->id1.bitmap1.i_used));
+
+ if (!alloc->id1.bitmap1.i_total) {
+ mlog(0, "nothing to sync!\n");
+ goto bail;
+ }
+
+ if (le32_to_cpu(alloc->id1.bitmap1.i_used) ==
+ le32_to_cpu(alloc->id1.bitmap1.i_total)) {
+ mlog(0, "all bits were taken!\n");
+ goto bail;
+ }
+
+ la_start_blk = ocfs2_clusters_to_blocks(osb->sb,
+ le32_to_cpu(la->la_bm_off));
+ bitmap = la->la_bitmap;
+ start = count = bit_off = 0;
+ left = le32_to_cpu(alloc->id1.bitmap1.i_total);
+
+ while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start))
+ != -1) {
+ if ((bit_off < left) && (bit_off == start)) {
+ count++;
+ start++;
+ continue;
+ }
+ if (count) {
+ blkno = la_start_blk +
+ ocfs2_clusters_to_blocks(osb->sb,
+ start - count);
+
+ mlog(0, "freeing %u bits starting at local "
+ "alloc bit %u (la_start_blk = %"MLFu64", "
+ "blkno = %"MLFu64")\n", count, start - count,
+ la_start_blk, blkno);
+
+ status = ocfs2_free_clusters(handle, main_bm_inode,
+ main_bm_bh, blkno, count);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+ if (bit_off >= left)
+ break;
+ count = 1;
+ start = bit_off + 1;
+ }
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context **ac,
+ struct inode **bitmap_inode,
+ struct buffer_head **bitmap_bh)
+{
+ int status;
+
+ *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
+ if (!(*ac)) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ (*ac)->ac_handle = handle;
+ (*ac)->ac_bits_wanted = ocfs2_local_alloc_window_bits(osb);
+
+ status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ *bitmap_inode = (*ac)->ac_inode;
+ igrab(*bitmap_inode);
+ *bitmap_bh = (*ac)->ac_bh;
+ get_bh(*bitmap_bh);
+ status = 0;
+bail:
+ if ((status < 0) && *ac) {
+ ocfs2_free_alloc_context(*ac);
+ *ac = NULL;
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * pass it the bitmap lock in lock_bh if you have it.
+ */
+static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *ac)
+{
+ int status = 0;
+ u32 cluster_off, cluster_count;
+ struct ocfs2_dinode *alloc = NULL;
+ struct ocfs2_local_alloc *la;
+
+ mlog_entry_void();
+
+ alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
+ la = OCFS2_LOCAL_ALLOC(alloc);
+
+ if (alloc->id1.bitmap1.i_total)
+ mlog(0, "asking me to alloc a new window over a non-empty "
+ "one\n");
+
+ mlog(0, "Allocating %u clusters for a new window.\n",
+ ocfs2_local_alloc_window_bits(osb));
+ /* we used the generic suballoc reserve function, but we set
+ * everything up nicely, so there's no reason why we can't use
+ * the more specific cluster api to claim bits. */
+ status = ocfs2_claim_clusters(osb, handle, ac,
+ ocfs2_local_alloc_window_bits(osb),
+ &cluster_off, &cluster_count);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ la->la_bm_off = cpu_to_le32(cluster_off);
+ alloc->id1.bitmap1.i_total = cpu_to_le32(cluster_count);
+ /* just in case... In the future when we find space ourselves,
+ * we don't have to get all contiguous -- but we'll have to
+ * set all previously used bits in bitmap and update
+ * la_bits_set before setting the bits in the main bitmap. */
+ alloc->id1.bitmap1.i_used = 0;
+ memset(OCFS2_LOCAL_ALLOC(alloc)->la_bitmap, 0,
+ le16_to_cpu(la->la_size));
+
+ mlog(0, "New window allocated:\n");
+ mlog(0, "window la_bm_off = %u\n",
+ OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
+ mlog(0, "window bits = %u\n", le32_to_cpu(alloc->id1.bitmap1.i_total));
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+/* Note that we do *NOT* lock the local alloc inode here as
+ * it's been locked already for us. */
+static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
+ struct inode *local_alloc_inode)
+{
+ int status = 0;
+ struct buffer_head *main_bm_bh = NULL;
+ struct inode *main_bm_inode = NULL;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct ocfs2_dinode *alloc;
+ struct ocfs2_dinode *alloc_copy = NULL;
+ struct ocfs2_alloc_context *ac = NULL;
+
+ mlog_entry_void();
+
+ handle = ocfs2_alloc_handle(osb);
+ if (!handle) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* This will lock the main bitmap for us. */
+ status = ocfs2_local_alloc_reserve_for_window(osb,
+ handle,
+ &ac,
+ &main_bm_inode,
+ &main_bm_bh);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ handle = ocfs2_start_trans(osb, handle, OCFS2_WINDOW_MOVE_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
+
+ /* We want to clear the local alloc before doing anything
+ * else, so that if we error later during this operation,
+ * local alloc shutdown won't try to double free main bitmap
+ * bits. Make a copy so the sync function knows which bits to
+ * free. */
+ alloc_copy = kmalloc(osb->local_alloc_bh->b_size, GFP_KERNEL);
+ if (!alloc_copy) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+ memcpy(alloc_copy, alloc, osb->local_alloc_bh->b_size);
+
+ status = ocfs2_journal_access(handle, local_alloc_inode,
+ osb->local_alloc_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_clear_local_alloc(alloc);
+
+ status = ocfs2_journal_dirty(handle, osb->local_alloc_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_sync_local_to_main(osb, handle, alloc_copy,
+ main_bm_inode, main_bm_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_local_alloc_new_window(osb, handle, ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ atomic_inc(&osb->alloc_stats.moves);
+
+ status = 0;
+bail:
+ if (handle)
+ ocfs2_commit_trans(handle);
+
+ if (main_bm_bh)
+ brelse(main_bm_bh);
+
+ if (main_bm_inode)
+ iput(main_bm_inode);
+
+ if (alloc_copy)
+ kfree(alloc_copy);
+
+ if (ac)
+ ocfs2_free_alloc_context(ac);
+
+ mlog_exit(status);
+ return status;
+}
+
diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h
new file mode 100644
index 00000000000..30f88ce14e4
--- /dev/null
+++ b/fs/ocfs2/localalloc.h
@@ -0,0 +1,56 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * localalloc.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_LOCALALLOC_H
+#define OCFS2_LOCALALLOC_H
+
+int ocfs2_load_local_alloc(struct ocfs2_super *osb);
+
+void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb);
+
+int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb,
+ int node_num,
+ struct ocfs2_dinode **alloc_copy);
+
+int ocfs2_complete_local_alloc_recovery(struct ocfs2_super *osb,
+ struct ocfs2_dinode *alloc);
+
+int ocfs2_alloc_should_use_local(struct ocfs2_super *osb,
+ u64 bits);
+
+struct ocfs2_alloc_context;
+int ocfs2_reserve_local_alloc_bits(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *passed_handle,
+ u32 bits_wanted,
+ struct ocfs2_alloc_context *ac);
+
+int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 min_bits,
+ u32 *bit_off,
+ u32 *num_bits);
+
+#endif /* OCFS2_LOCALALLOC_H */
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
new file mode 100644
index 00000000000..843cf9ddefe
--- /dev/null
+++ b/fs/ocfs2/mmap.c
@@ -0,0 +1,98 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * mmap.c
+ *
+ * Code to deal with the mess that is clustered mmap.
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/uio.h>
+#include <linux/signal.h>
+#include <linux/rbtree.h>
+
+#define MLOG_MASK_PREFIX ML_FILE_IO
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "dlmglue.h"
+#include "file.h"
+#include "inode.h"
+#include "mmap.h"
+
+static struct page *ocfs2_nopage(struct vm_area_struct * area,
+ unsigned long address,
+ int *type)
+{
+ struct inode *inode = area->vm_file->f_dentry->d_inode;
+ struct page *page = NOPAGE_SIGBUS;
+ sigset_t blocked, oldset;
+ int ret;
+
+ mlog_entry("(inode %lu, address %lu)\n", inode->i_ino, address);
+
+ /* The best way to deal with signals in this path is
+ * to block them upfront, rather than allowing the
+ * locking paths to return -ERESTARTSYS. */
+ sigfillset(&blocked);
+
+ /* We should technically never get a bad ret return
+ * from sigprocmask */
+ ret = sigprocmask(SIG_BLOCK, &blocked, &oldset);
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ page = filemap_nopage(area, address, type);
+
+ ret = sigprocmask(SIG_SETMASK, &oldset, NULL);
+ if (ret < 0)
+ mlog_errno(ret);
+out:
+ mlog_exit_ptr(page);
+ return page;
+}
+
+static struct vm_operations_struct ocfs2_file_vm_ops = {
+ .nopage = ocfs2_nopage,
+};
+
+int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /* We don't want to support shared writable mappings yet. */
+ if (((vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_MAYSHARE))
+ && ((vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_MAYWRITE))) {
+ mlog(0, "disallow shared writable mmaps %lx\n", vma->vm_flags);
+ /* This is -EINVAL because generic_file_readonly_mmap
+ * returns it in a similar situation. */
+ return -EINVAL;
+ }
+
+ file_accessed(file);
+ vma->vm_ops = &ocfs2_file_vm_ops;
+ return 0;
+}
+
diff --git a/fs/ocfs2/mmap.h b/fs/ocfs2/mmap.h
new file mode 100644
index 00000000000..1274ee0f1fe
--- /dev/null
+++ b/fs/ocfs2/mmap.h
@@ -0,0 +1,6 @@
+#ifndef OCFS2_MMAP_H
+#define OCFS2_MMAP_H
+
+int ocfs2_mmap(struct file *file, struct vm_area_struct *vma);
+
+#endif /* OCFS2_MMAP_H */
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
new file mode 100644
index 00000000000..f6b77ff1d2b
--- /dev/null
+++ b/fs/ocfs2/namei.c
@@ -0,0 +1,2264 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * namei.c
+ *
+ * Create and rename file, directory, symlinks
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * Portions of this code from linux/fs/ext3/dir.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ * from
+ *
+ * linux/fs/minix/dir.c
+ *
+ * Copyright (C) 1991, 1992 Linux Torvalds
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+
+#define MLOG_MASK_PREFIX ML_NAMEI
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "dcache.h"
+#include "dir.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "file.h"
+#include "inode.h"
+#include "journal.h"
+#include "namei.h"
+#include "suballoc.h"
+#include "symlink.h"
+#include "sysfile.h"
+#include "uptodate.h"
+#include "vote.h"
+
+#include "buffer_head_io.h"
+
+#define NAMEI_RA_CHUNKS 2
+#define NAMEI_RA_BLOCKS 4
+#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
+
+static int inline ocfs2_search_dirblock(struct buffer_head *bh,
+ struct inode *dir,
+ const char *name, int namelen,
+ unsigned long offset,
+ struct ocfs2_dir_entry **res_dir);
+
+static int ocfs2_delete_entry(struct ocfs2_journal_handle *handle,
+ struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh);
+
+static int __ocfs2_add_entry(struct ocfs2_journal_handle *handle,
+ struct inode *dir,
+ const char *name, int namelen,
+ struct inode *inode, u64 blkno,
+ struct buffer_head *parent_fe_bh,
+ struct buffer_head *insert_bh);
+
+static int ocfs2_mknod_locked(struct ocfs2_super *osb,
+ struct inode *dir,
+ struct dentry *dentry, int mode,
+ dev_t dev,
+ struct buffer_head **new_fe_bh,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_journal_handle *handle,
+ struct inode **ret_inode,
+ struct ocfs2_alloc_context *inode_ac);
+
+static int ocfs2_fill_new_dir(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *parent,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_alloc_context *data_ac);
+
+static int ocfs2_double_lock(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct buffer_head **bh1,
+ struct inode *inode1,
+ struct buffer_head **bh2,
+ struct inode *inode2);
+
+static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ char *name,
+ struct buffer_head **de_bh);
+
+static int ocfs2_orphan_add(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct ocfs2_dinode *fe,
+ char *name,
+ struct buffer_head *de_bh);
+
+static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ const char *symname);
+
+static inline int ocfs2_add_entry(struct ocfs2_journal_handle *handle,
+ struct dentry *dentry,
+ struct inode *inode, u64 blkno,
+ struct buffer_head *parent_fe_bh,
+ struct buffer_head *insert_bh)
+{
+ return __ocfs2_add_entry(handle, dentry->d_parent->d_inode,
+ dentry->d_name.name, dentry->d_name.len,
+ inode, blkno, parent_fe_bh, insert_bh);
+}
+
+/* An orphan dir name is an 8 byte value, printed as a hex string */
+#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
+
+static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd)
+{
+ int status;
+ u64 blkno;
+ struct buffer_head *dirent_bh = NULL;
+ struct inode *inode = NULL;
+ struct dentry *ret;
+ struct ocfs2_dir_entry *dirent;
+ struct ocfs2_inode_info *oi;
+
+ mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry,
+ dentry->d_name.len, dentry->d_name.name);
+
+ if (dentry->d_name.len > OCFS2_MAX_FILENAME_LEN) {
+ ret = ERR_PTR(-ENAMETOOLONG);
+ goto bail;
+ }
+
+ mlog(0, "find name %.*s in directory %"MLFu64"\n", dentry->d_name.len,
+ dentry->d_name.name, OCFS2_I(dir)->ip_blkno);
+
+ status = ocfs2_meta_lock(dir, NULL, NULL, 0);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ ret = ERR_PTR(status);
+ goto bail;
+ }
+
+ status = ocfs2_find_files_on_disk(dentry->d_name.name,
+ dentry->d_name.len, &blkno,
+ dir, &dirent_bh, &dirent);
+ if (status < 0)
+ goto bail_add;
+
+ inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno);
+ if (IS_ERR(inode)) {
+ mlog(ML_ERROR, "Unable to create inode %"MLFu64"\n", blkno);
+ ret = ERR_PTR(-EACCES);
+ goto bail_unlock;
+ }
+
+ oi = OCFS2_I(inode);
+ /* Clear any orphaned state... If we were able to look up the
+ * inode from a directory, it certainly can't be orphaned. We
+ * might have the bad state from a node which intended to
+ * orphan this inode but crashed before it could commit the
+ * unlink. */
+ spin_lock(&oi->ip_lock);
+ oi->ip_flags &= ~OCFS2_INODE_MAYBE_ORPHANED;
+ oi->ip_orphaned_slot = OCFS2_INVALID_SLOT;
+ spin_unlock(&oi->ip_lock);
+
+bail_add:
+
+ dentry->d_op = &ocfs2_dentry_ops;
+ ret = d_splice_alias(inode, dentry);
+
+bail_unlock:
+ /* Don't drop the cluster lock until *after* the d_add --
+ * unlink on another node will message us to remove that
+ * dentry under this lock so otherwise we can race this with
+ * the vote thread and have a stale dentry. */
+ ocfs2_meta_unlock(dir, 0);
+
+bail:
+ if (dirent_bh)
+ brelse(dirent_bh);
+
+ mlog_exit_ptr(ret);
+
+ return ret;
+}
+
+static int ocfs2_fill_new_dir(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *parent,
+ struct inode *inode,
+ struct buffer_head *fe_bh,
+ struct ocfs2_alloc_context *data_ac)
+{
+ int status;
+ struct buffer_head *new_bh = NULL;
+ struct ocfs2_dir_entry *de = NULL;
+
+ mlog_entry_void();
+
+ status = ocfs2_do_extend_dir(osb->sb, handle, inode, fe_bh,
+ data_ac, NULL, &new_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ocfs2_set_new_buffer_uptodate(inode, new_bh);
+
+ status = ocfs2_journal_access(handle, inode, new_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ memset(new_bh->b_data, 0, osb->sb->s_blocksize);
+
+ de = (struct ocfs2_dir_entry *) new_bh->b_data;
+ de->inode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
+ de->name_len = 1;
+ de->rec_len =
+ cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
+ strcpy(de->name, ".");
+ ocfs2_set_de_type(de, S_IFDIR);
+ de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
+ de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
+ de->rec_len = cpu_to_le16(inode->i_sb->s_blocksize -
+ OCFS2_DIR_REC_LEN(1));
+ de->name_len = 2;
+ strcpy(de->name, "..");
+ ocfs2_set_de_type(de, S_IFDIR);
+
+ status = ocfs2_journal_dirty(handle, new_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ i_size_write(inode, inode->i_sb->s_blocksize);
+ inode->i_nlink = 2;
+ inode->i_blocks = ocfs2_align_bytes_to_sectors(inode->i_sb->s_blocksize);
+ status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = 0;
+bail:
+ if (new_bh)
+ brelse(new_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_mknod(struct inode *dir,
+ struct dentry *dentry,
+ int mode,
+ dev_t dev)
+{
+ int status = 0;
+ struct buffer_head *parent_fe_bh = NULL;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct ocfs2_super *osb;
+ struct ocfs2_dinode *dirfe;
+ struct buffer_head *new_fe_bh = NULL;
+ struct buffer_head *de_bh = NULL;
+ struct inode *inode = NULL;
+ struct ocfs2_alloc_context *inode_ac = NULL;
+ struct ocfs2_alloc_context *data_ac = NULL;
+
+ mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode,
+ (unsigned long)dev, dentry->d_name.len,
+ dentry->d_name.name);
+
+ /* get our super block */
+ osb = OCFS2_SB(dir->i_sb);
+
+ if (S_ISDIR(mode) && (dir->i_nlink >= OCFS2_LINK_MAX)) {
+ mlog(ML_ERROR, "inode %"MLFu64" has i_nlink of %u\n",
+ OCFS2_I(dir)->ip_blkno, dir->i_nlink);
+ status = -EMLINK;
+ goto leave;
+ }
+
+ handle = ocfs2_alloc_handle(osb);
+ if (handle == NULL) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto leave;
+ }
+
+ dirfe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
+ if (!dirfe->i_links_count) {
+ /* can't make a file in a deleted directory. */
+ status = -ENOENT;
+ goto leave;
+ }
+
+ status = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
+ dentry->d_name.len);
+ if (status)
+ goto leave;
+
+ /* get a spot inside the dir. */
+ status = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh,
+ dentry->d_name.name,
+ dentry->d_name.len, &de_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* reserve an inode spot */
+ status = ocfs2_reserve_new_inode(osb, handle, &inode_ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* are we making a directory? If so, reserve a cluster for his
+ * 1st extent. */
+ if (S_ISDIR(mode)) {
+ status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto leave;
+ }
+ }
+
+ handle = ocfs2_start_trans(osb, handle, OCFS2_MKNOD_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* do the real work now. */
+ status = ocfs2_mknod_locked(osb, dir, dentry, mode, dev,
+ &new_fe_bh, parent_fe_bh, handle,
+ &inode, inode_ac);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ if (S_ISDIR(mode)) {
+ status = ocfs2_fill_new_dir(osb, handle, dir, inode,
+ new_fe_bh, data_ac);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_journal_access(handle, dir, parent_fe_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+ le16_add_cpu(&dirfe->i_links_count, 1);
+ status = ocfs2_journal_dirty(handle, parent_fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+ dir->i_nlink++;
+ }
+
+ status = ocfs2_add_entry(handle, dentry, inode,
+ OCFS2_I(inode)->ip_blkno, parent_fe_bh,
+ de_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ insert_inode_hash(inode);
+ dentry->d_op = &ocfs2_dentry_ops;
+ d_instantiate(dentry, inode);
+ status = 0;
+leave:
+ if (handle)
+ ocfs2_commit_trans(handle);
+
+ if (status == -ENOSPC)
+ mlog(0, "Disk is full\n");
+
+ if (new_fe_bh)
+ brelse(new_fe_bh);
+
+ if (de_bh)
+ brelse(de_bh);
+
+ if (parent_fe_bh)
+ brelse(parent_fe_bh);
+
+ if ((status < 0) && inode)
+ iput(inode);
+
+ if (inode_ac)
+ ocfs2_free_alloc_context(inode_ac);
+
+ if (data_ac)
+ ocfs2_free_alloc_context(data_ac);
+
+ mlog_exit(status);
+
+ return status;
+}
+
+static int ocfs2_mknod_locked(struct ocfs2_super *osb,
+ struct inode *dir,
+ struct dentry *dentry, int mode,
+ dev_t dev,
+ struct buffer_head **new_fe_bh,
+ struct buffer_head *parent_fe_bh,
+ struct ocfs2_journal_handle *handle,
+ struct inode **ret_inode,
+ struct ocfs2_alloc_context *inode_ac)
+{
+ int status = 0;
+ struct ocfs2_dinode *fe = NULL;
+ struct ocfs2_extent_list *fel;
+ u64 fe_blkno = 0;
+ u16 suballoc_bit;
+ struct inode *inode = NULL;
+
+ mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode,
+ (unsigned long)dev, dentry->d_name.len,
+ dentry->d_name.name);
+
+ *new_fe_bh = NULL;
+ *ret_inode = NULL;
+
+ status = ocfs2_claim_new_inode(osb, handle, inode_ac, &suballoc_bit,
+ &fe_blkno);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ inode = new_inode(dir->i_sb);
+ if (IS_ERR(inode)) {
+ status = PTR_ERR(inode);
+ mlog(ML_ERROR, "new_inode failed!\n");
+ goto leave;
+ }
+
+ /* populate as many fields early on as possible - many of
+ * these are used by the support functions here and in
+ * callers. */
+ inode->i_ino = ino_from_blkno(osb->sb, fe_blkno);
+ OCFS2_I(inode)->ip_blkno = fe_blkno;
+ if (S_ISDIR(mode))
+ inode->i_nlink = 2;
+ else
+ inode->i_nlink = 1;
+ inode->i_mode = mode;
+ spin_lock(&osb->osb_lock);
+ inode->i_generation = osb->s_next_generation++;
+ spin_unlock(&osb->osb_lock);
+
+ *new_fe_bh = sb_getblk(osb->sb, fe_blkno);
+ if (!*new_fe_bh) {
+ status = -EIO;
+ mlog_errno(status);
+ goto leave;
+ }
+ ocfs2_set_new_buffer_uptodate(inode, *new_fe_bh);
+
+ status = ocfs2_journal_access(handle, inode, *new_fe_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ fe = (struct ocfs2_dinode *) (*new_fe_bh)->b_data;
+ memset(fe, 0, osb->sb->s_blocksize);
+
+ fe->i_generation = cpu_to_le32(inode->i_generation);
+ fe->i_fs_generation = cpu_to_le32(osb->fs_generation);
+ fe->i_blkno = cpu_to_le64(fe_blkno);
+ fe->i_suballoc_bit = cpu_to_le16(suballoc_bit);
+ fe->i_suballoc_slot = cpu_to_le16(osb->slot_num);
+ fe->i_uid = cpu_to_le32(current->fsuid);
+ if (dir->i_mode & S_ISGID) {
+ fe->i_gid = cpu_to_le32(dir->i_gid);
+ if (S_ISDIR(mode))
+ mode |= S_ISGID;
+ } else
+ fe->i_gid = cpu_to_le32(current->fsgid);
+ fe->i_mode = cpu_to_le16(mode);
+ if (S_ISCHR(mode) || S_ISBLK(mode))
+ fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev));
+
+ fe->i_links_count = cpu_to_le16(inode->i_nlink);
+
+ fe->i_last_eb_blk = 0;
+ strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
+ le32_add_cpu(&fe->i_flags, OCFS2_VALID_FL);
+ fe->i_atime = fe->i_ctime = fe->i_mtime =
+ cpu_to_le64(CURRENT_TIME.tv_sec);
+ fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec =
+ cpu_to_le32(CURRENT_TIME.tv_nsec);
+ fe->i_dtime = 0;
+
+ fel = &fe->id2.i_list;
+ fel->l_tree_depth = 0;
+ fel->l_next_free_rec = 0;
+ fel->l_count = cpu_to_le16(ocfs2_extent_recs_per_inode(osb->sb));
+
+ status = ocfs2_journal_dirty(handle, *new_fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ if (ocfs2_populate_inode(inode, fe, 1) < 0) {
+ mlog(ML_ERROR, "populate inode failed! bh->b_blocknr=%llu, "
+ "i_blkno=%"MLFu64", i_ino=%lu\n",
+ (unsigned long long) (*new_fe_bh)->b_blocknr,
+ fe->i_blkno, inode->i_ino);
+ BUG();
+ }
+
+ ocfs2_inode_set_new(osb, inode);
+ status = ocfs2_create_new_inode_locks(inode);
+ if (status < 0)
+ mlog_errno(status);
+
+ status = 0; /* error in ocfs2_create_new_inode_locks is not
+ * critical */
+
+ *ret_inode = inode;
+leave:
+ if (status < 0) {
+ if (*new_fe_bh) {
+ brelse(*new_fe_bh);
+ *new_fe_bh = NULL;
+ }
+ if (inode)
+ iput(inode);
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_mkdir(struct inode *dir,
+ struct dentry *dentry,
+ int mode)
+{
+ int ret;
+
+ mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode,
+ dentry->d_name.len, dentry->d_name.name);
+ ret = ocfs2_mknod(dir, dentry, mode | S_IFDIR, 0);
+ mlog_exit(ret);
+
+ return ret;
+}
+
+static int ocfs2_create(struct inode *dir,
+ struct dentry *dentry,
+ int mode,
+ struct nameidata *nd)
+{
+ int ret;
+
+ mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, mode,
+ dentry->d_name.len, dentry->d_name.name);
+ ret = ocfs2_mknod(dir, dentry, mode | S_IFREG, 0);
+ mlog_exit(ret);
+
+ return ret;
+}
+
+static int ocfs2_link(struct dentry *old_dentry,
+ struct inode *dir,
+ struct dentry *dentry)
+{
+ struct ocfs2_journal_handle *handle = NULL;
+ struct inode *inode = old_dentry->d_inode;
+ int err;
+ struct buffer_head *fe_bh = NULL;
+ struct buffer_head *parent_fe_bh = NULL;
+ struct buffer_head *de_bh = NULL;
+ struct ocfs2_dinode *fe = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+
+ mlog_entry("(inode=%lu, old='%.*s' new='%.*s')\n", inode->i_ino,
+ old_dentry->d_name.len, old_dentry->d_name.name,
+ dentry->d_name.len, dentry->d_name.name);
+
+ if (S_ISDIR(inode->i_mode)) {
+ err = -EPERM;
+ goto bail;
+ }
+
+ if (inode->i_nlink >= OCFS2_LINK_MAX) {
+ err = -EMLINK;
+ goto bail;
+ }
+
+ handle = ocfs2_alloc_handle(osb);
+ if (handle == NULL) {
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ err = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1);
+ if (err < 0) {
+ if (err != -ENOENT)
+ mlog_errno(err);
+ goto bail;
+ }
+
+ err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
+ dentry->d_name.len);
+ if (err)
+ goto bail;
+
+ err = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh,
+ dentry->d_name.name,
+ dentry->d_name.len, &de_bh);
+ if (err < 0) {
+ mlog_errno(err);
+ goto bail;
+ }
+
+ err = ocfs2_meta_lock(inode, handle, &fe_bh, 1);
+ if (err < 0) {
+ if (err != -ENOENT)
+ mlog_errno(err);
+ goto bail;
+ }
+
+ fe = (struct ocfs2_dinode *) fe_bh->b_data;
+ if (le16_to_cpu(fe->i_links_count) >= OCFS2_LINK_MAX) {
+ err = -EMLINK;
+ goto bail;
+ }
+
+ handle = ocfs2_start_trans(osb, handle, OCFS2_LINK_CREDITS);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(err);
+ goto bail;
+ }
+
+ err = ocfs2_journal_access(handle, inode, fe_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (err < 0) {
+ mlog_errno(err);
+ goto bail;
+ }
+
+ inode->i_nlink++;
+ inode->i_ctime = CURRENT_TIME;
+ fe->i_links_count = cpu_to_le16(inode->i_nlink);
+ fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+ fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+
+ err = ocfs2_journal_dirty(handle, fe_bh);
+ if (err < 0) {
+ le16_add_cpu(&fe->i_links_count, -1);
+ inode->i_nlink--;
+ mlog_errno(err);
+ goto bail;
+ }
+
+ err = ocfs2_add_entry(handle, dentry, inode,
+ OCFS2_I(inode)->ip_blkno,
+ parent_fe_bh, de_bh);
+ if (err) {
+ le16_add_cpu(&fe->i_links_count, -1);
+ inode->i_nlink--;
+ mlog_errno(err);
+ goto bail;
+ }
+
+ atomic_inc(&inode->i_count);
+ dentry->d_op = &ocfs2_dentry_ops;
+ d_instantiate(dentry, inode);
+bail:
+ if (handle)
+ ocfs2_commit_trans(handle);
+ if (de_bh)
+ brelse(de_bh);
+ if (fe_bh)
+ brelse(fe_bh);
+ if (parent_fe_bh)
+ brelse(parent_fe_bh);
+
+ mlog_exit(err);
+
+ return err;
+}
+
+static int ocfs2_unlink(struct inode *dir,
+ struct dentry *dentry)
+{
+ int status;
+ unsigned int saved_nlink = 0;
+ struct inode *inode = dentry->d_inode;
+ struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
+ u64 blkno;
+ struct ocfs2_dinode *fe = NULL;
+ struct buffer_head *fe_bh = NULL;
+ struct buffer_head *parent_node_bh = NULL;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct ocfs2_dir_entry *dirent = NULL;
+ struct buffer_head *dirent_bh = NULL;
+ char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
+ struct buffer_head *orphan_entry_bh = NULL;
+
+ mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry,
+ dentry->d_name.len, dentry->d_name.name);
+
+ BUG_ON(dentry->d_parent->d_inode != dir);
+
+ mlog(0, "ino = %"MLFu64"\n", OCFS2_I(inode)->ip_blkno);
+
+ if (inode == osb->root_inode) {
+ mlog(0, "Cannot delete the root directory\n");
+ status = -EPERM;
+ goto leave;
+ }
+
+ handle = ocfs2_alloc_handle(osb);
+ if (handle == NULL) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_meta_lock(dir, handle, &parent_node_bh, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_find_files_on_disk(dentry->d_name.name,
+ dentry->d_name.len, &blkno,
+ dir, &dirent_bh, &dirent);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto leave;
+ }
+
+ if (OCFS2_I(inode)->ip_blkno != blkno) {
+ status = -ENOENT;
+
+ mlog(0, "ip_blkno (%"MLFu64") != dirent blkno (%"MLFu64") "
+ "ip_flags = %x\n", OCFS2_I(inode)->ip_blkno, blkno,
+ OCFS2_I(inode)->ip_flags);
+ goto leave;
+ }
+
+ status = ocfs2_meta_lock(inode, handle, &fe_bh, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto leave;
+ }
+
+ if (S_ISDIR(inode->i_mode)) {
+ if (!ocfs2_empty_dir(inode)) {
+ status = -ENOTEMPTY;
+ goto leave;
+ } else if (inode->i_nlink != 2) {
+ status = -ENOTEMPTY;
+ goto leave;
+ }
+ }
+
+ /* There are still a few steps left until we can consider the
+ * unlink to have succeeded. Save off nlink here before
+ * modification so we can set it back in case we hit an issue
+ * before commit. */
+ saved_nlink = inode->i_nlink;
+ if (S_ISDIR(inode->i_mode))
+ inode->i_nlink = 0;
+ else
+ inode->i_nlink--;
+
+ status = ocfs2_request_unlink_vote(inode, dentry,
+ (unsigned int) inode->i_nlink);
+ if (status < 0) {
+ /* This vote should succeed under all normal
+ * circumstances. */
+ mlog_errno(status);
+ goto leave;
+ }
+
+ if (!inode->i_nlink) {
+ status = ocfs2_prepare_orphan_dir(osb, handle, inode,
+ orphan_name,
+ &orphan_entry_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+ }
+
+ handle = ocfs2_start_trans(osb, handle, OCFS2_UNLINK_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_journal_access(handle, inode, fe_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ fe = (struct ocfs2_dinode *) fe_bh->b_data;
+
+ if (!inode->i_nlink) {
+ status = ocfs2_orphan_add(osb, handle, inode, fe, orphan_name,
+ orphan_entry_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+ }
+
+ /* delete the name from the parent dir */
+ status = ocfs2_delete_entry(handle, dir, dirent, dirent_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* We can set nlink on the dinode now. clear the saved version
+ * so that it doesn't get set later. */
+ fe->i_links_count = cpu_to_le16(inode->i_nlink);
+ saved_nlink = 0;
+
+ status = ocfs2_journal_dirty(handle, fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ if (S_ISDIR(inode->i_mode)) {
+ dir->i_nlink--;
+ status = ocfs2_mark_inode_dirty(handle, dir,
+ parent_node_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ dir->i_nlink++;
+ }
+ }
+
+leave:
+ if (status < 0 && saved_nlink)
+ inode->i_nlink = saved_nlink;
+
+ if (handle)
+ ocfs2_commit_trans(handle);
+
+ if (fe_bh)
+ brelse(fe_bh);
+
+ if (dirent_bh)
+ brelse(dirent_bh);
+
+ if (parent_node_bh)
+ brelse(parent_node_bh);
+
+ if (orphan_entry_bh)
+ brelse(orphan_entry_bh);
+
+ mlog_exit(status);
+
+ return status;
+}
+
+/*
+ * The only place this should be used is rename!
+ * if they have the same id, then the 1st one is the only one locked.
+ */
+static int ocfs2_double_lock(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct buffer_head **bh1,
+ struct inode *inode1,
+ struct buffer_head **bh2,
+ struct inode *inode2)
+{
+ int status;
+ struct ocfs2_inode_info *oi1 = OCFS2_I(inode1);
+ struct ocfs2_inode_info *oi2 = OCFS2_I(inode2);
+ struct buffer_head **tmpbh;
+ struct inode *tmpinode;
+
+ mlog_entry("(inode1 = %"MLFu64", inode2 = %"MLFu64")\n",
+ oi1->ip_blkno, oi2->ip_blkno);
+
+ BUG_ON(!handle);
+
+ if (*bh1)
+ *bh1 = NULL;
+ if (*bh2)
+ *bh2 = NULL;
+
+ /* we always want to lock the one with the lower lockid first. */
+ if (oi1->ip_blkno != oi2->ip_blkno) {
+ if (oi1->ip_blkno < oi2->ip_blkno) {
+ /* switch id1 and id2 around */
+ mlog(0, "switching them around...\n");
+ tmpbh = bh2;
+ bh2 = bh1;
+ bh1 = tmpbh;
+
+ tmpinode = inode2;
+ inode2 = inode1;
+ inode1 = tmpinode;
+ }
+ /* lock id2 */
+ status = ocfs2_meta_lock(inode2, handle, bh2, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+ /* lock id1 */
+ status = ocfs2_meta_lock(inode1, handle, bh1, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto bail;
+ }
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+#define PARENT_INO(buffer) \
+ ((struct ocfs2_dir_entry *) \
+ ((char *)buffer + \
+ le16_to_cpu(((struct ocfs2_dir_entry *)buffer)->rec_len)))->inode
+
+static int ocfs2_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry)
+{
+ int status = 0, rename_lock = 0;
+ struct inode *old_inode = old_dentry->d_inode;
+ struct inode *new_inode = new_dentry->d_inode;
+ struct ocfs2_dinode *newfe = NULL;
+ char orphan_name[OCFS2_ORPHAN_NAMELEN + 1];
+ struct buffer_head *orphan_entry_bh = NULL;
+ struct buffer_head *newfe_bh = NULL;
+ struct buffer_head *insert_entry_bh = NULL;
+ struct ocfs2_super *osb = NULL;
+ u64 newfe_blkno;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct buffer_head *old_dir_bh = NULL;
+ struct buffer_head *new_dir_bh = NULL;
+ struct ocfs2_dir_entry *old_de = NULL, *new_de = NULL; // dirent for old_dentry
+ // and new_dentry
+ struct buffer_head *new_de_bh = NULL, *old_de_bh = NULL; // bhs for above
+ struct buffer_head *old_inode_de_bh = NULL; // if old_dentry is a dir,
+ // this is the 1st dirent bh
+ nlink_t old_dir_nlink = old_dir->i_nlink, new_dir_nlink = new_dir->i_nlink;
+ unsigned int links_count;
+
+ /* At some point it might be nice to break this function up a
+ * bit. */
+
+ mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p, from='%.*s' to='%.*s')\n",
+ old_dir, old_dentry, new_dir, new_dentry,
+ old_dentry->d_name.len, old_dentry->d_name.name,
+ new_dentry->d_name.len, new_dentry->d_name.name);
+
+ osb = OCFS2_SB(old_dir->i_sb);
+
+ if (new_inode) {
+ if (!igrab(new_inode))
+ BUG();
+ }
+
+ if (atomic_read(&old_dentry->d_count) > 2) {
+ shrink_dcache_parent(old_dentry);
+ if (atomic_read(&old_dentry->d_count) > 2) {
+ status = -EBUSY;
+ goto bail;
+ }
+ }
+
+ /* Assume a directory heirarchy thusly:
+ * a/b/c
+ * a/d
+ * a,b,c, and d are all directories.
+ *
+ * from cwd of 'a' on both nodes:
+ * node1: mv b/c d
+ * node2: mv d b/c
+ *
+ * And that's why, just like the VFS, we need a file system
+ * rename lock. */
+ if (old_dentry != new_dentry) {
+ status = ocfs2_rename_lock(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ rename_lock = 1;
+ }
+
+ handle = ocfs2_alloc_handle(osb);
+ if (handle == NULL) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* if old and new are the same, this'll just do one lock. */
+ status = ocfs2_double_lock(osb, handle,
+ &old_dir_bh, old_dir,
+ &new_dir_bh, new_dir);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* make sure both dirs have bhs
+ * get an extra ref on old_dir_bh if old==new */
+ if (!new_dir_bh) {
+ if (old_dir_bh) {
+ new_dir_bh = old_dir_bh;
+ get_bh(new_dir_bh);
+ } else {
+ mlog(ML_ERROR, "no old_dir_bh!\n");
+ status = -EIO;
+ goto bail;
+ }
+ }
+
+ if (S_ISDIR(old_inode->i_mode)) {
+ /* Directories actually require metadata updates to
+ * the directory info so we can't get away with not
+ * doing node locking on it. */
+ status = ocfs2_meta_lock(old_inode, handle, NULL, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_request_rename_vote(old_inode, old_dentry);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = -EIO;
+ old_inode_de_bh = ocfs2_bread(old_inode, 0, &status, 0);
+ if (!old_inode_de_bh)
+ goto bail;
+
+ status = -EIO;
+ if (le64_to_cpu(PARENT_INO(old_inode_de_bh->b_data)) !=
+ OCFS2_I(old_dir)->ip_blkno)
+ goto bail;
+ status = -EMLINK;
+ if (!new_inode && new_dir!=old_dir &&
+ new_dir->i_nlink >= OCFS2_LINK_MAX)
+ goto bail;
+ } else {
+ /* Ah, the simple case - we're a file so just send a
+ * message. */
+ status = ocfs2_request_rename_vote(old_inode, old_dentry);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ status = -ENOENT;
+ old_de_bh = ocfs2_find_entry(old_dentry->d_name.name,
+ old_dentry->d_name.len,
+ old_dir, &old_de);
+ if (!old_de_bh)
+ goto bail;
+
+ /*
+ * Check for inode number is _not_ due to possible IO errors.
+ * We might rmdir the source, keep it as pwd of some process
+ * and merrily kill the link to whatever was created under the
+ * same name. Goodbye sticky bit ;-<
+ */
+ if (le64_to_cpu(old_de->inode) != OCFS2_I(old_inode)->ip_blkno)
+ goto bail;
+
+ /* check if the target already exists (in which case we need
+ * to delete it */
+ status = ocfs2_find_files_on_disk(new_dentry->d_name.name,
+ new_dentry->d_name.len,
+ &newfe_blkno, new_dir, &new_de_bh,
+ &new_de);
+ /* The only error we allow here is -ENOENT because the new
+ * file not existing is perfectly valid. */
+ if ((status < 0) && (status != -ENOENT)) {
+ /* If we cannot find the file specified we should just */
+ /* return the error... */
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (!new_de && new_inode)
+ mlog(ML_ERROR, "inode %lu does not exist in it's parent "
+ "directory!", new_inode->i_ino);
+
+ /* In case we need to overwrite an existing file, we blow it
+ * away first */
+ if (new_de) {
+ /* VFS didn't think there existed an inode here, but
+ * someone else in the cluster must have raced our
+ * rename to create one. Today we error cleanly, in
+ * the future we should consider calling iget to build
+ * a new struct inode for this entry. */
+ if (!new_inode) {
+ status = -EACCES;
+
+ mlog(0, "We found an inode for name %.*s but VFS "
+ "didn't give us one.\n", new_dentry->d_name.len,
+ new_dentry->d_name.name);
+ goto bail;
+ }
+
+ if (OCFS2_I(new_inode)->ip_blkno != newfe_blkno) {
+ status = -EACCES;
+
+ mlog(0, "Inode blkno (%"MLFu64") and dir (%"MLFu64") "
+ "disagree. ip_flags = %x\n",
+ OCFS2_I(new_inode)->ip_blkno, newfe_blkno,
+ OCFS2_I(new_inode)->ip_flags);
+ goto bail;
+ }
+
+ status = ocfs2_meta_lock(new_inode, handle, &newfe_bh, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (S_ISDIR(new_inode->i_mode))
+ links_count = 0;
+ else
+ links_count = (unsigned int) (new_inode->i_nlink - 1);
+
+ status = ocfs2_request_unlink_vote(new_inode, new_dentry,
+ links_count);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ newfe = (struct ocfs2_dinode *) newfe_bh->b_data;
+
+ mlog(0, "aha rename over existing... new_de=%p "
+ "new_blkno=%"MLFu64" newfebh=%p bhblocknr=%llu\n",
+ new_de, newfe_blkno, newfe_bh, newfe_bh ?
+ (unsigned long long)newfe_bh->b_blocknr : 0ULL);
+
+ if (S_ISDIR(new_inode->i_mode) || (new_inode->i_nlink == 1)) {
+ status = ocfs2_prepare_orphan_dir(osb, handle,
+ new_inode,
+ orphan_name,
+ &orphan_entry_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+ } else {
+ BUG_ON(new_dentry->d_parent->d_inode != new_dir);
+
+ status = ocfs2_check_dir_for_entry(new_dir,
+ new_dentry->d_name.name,
+ new_dentry->d_name.len);
+ if (status)
+ goto bail;
+
+ status = ocfs2_prepare_dir_for_insert(osb, new_dir, new_dir_bh,
+ new_dentry->d_name.name,
+ new_dentry->d_name.len,
+ &insert_entry_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ handle = ocfs2_start_trans(osb, handle, OCFS2_RENAME_CREDITS);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (new_de) {
+ if (S_ISDIR(new_inode->i_mode)) {
+ if (!ocfs2_empty_dir(new_inode) ||
+ new_inode->i_nlink != 2) {
+ status = -ENOTEMPTY;
+ goto bail;
+ }
+ }
+ status = ocfs2_journal_access(handle, new_inode, newfe_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (S_ISDIR(new_inode->i_mode) ||
+ (newfe->i_links_count == cpu_to_le16(1))){
+ status = ocfs2_orphan_add(osb, handle, new_inode,
+ newfe, orphan_name,
+ orphan_entry_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ /* change the dirent to point to the correct inode */
+ status = ocfs2_journal_access(handle, new_dir, new_de_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ new_de->inode = cpu_to_le64(OCFS2_I(old_inode)->ip_blkno);
+ new_de->file_type = old_de->file_type;
+ new_dir->i_version++;
+ status = ocfs2_journal_dirty(handle, new_de_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (S_ISDIR(new_inode->i_mode))
+ newfe->i_links_count = 0;
+ else
+ le16_add_cpu(&newfe->i_links_count, -1);
+
+ status = ocfs2_journal_dirty(handle, newfe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ } else {
+ /* if the name was not found in new_dir, add it now */
+ status = ocfs2_add_entry(handle, new_dentry, old_inode,
+ OCFS2_I(old_inode)->ip_blkno,
+ new_dir_bh, insert_entry_bh);
+ }
+
+ old_inode->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(old_inode);
+
+ /* now that the name has been added to new_dir, remove the old name */
+ status = ocfs2_delete_entry(handle, old_dir, old_de, old_de_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (new_inode) {
+ new_inode->i_nlink--;
+ new_inode->i_ctime = CURRENT_TIME;
+ }
+ old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME;
+ if (old_inode_de_bh) {
+ status = ocfs2_journal_access(handle, old_inode,
+ old_inode_de_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ PARENT_INO(old_inode_de_bh->b_data) =
+ cpu_to_le64(OCFS2_I(new_dir)->ip_blkno);
+ status = ocfs2_journal_dirty(handle, old_inode_de_bh);
+ old_dir->i_nlink--;
+ if (new_inode) {
+ new_inode->i_nlink--;
+ } else {
+ new_dir->i_nlink++;
+ mark_inode_dirty(new_dir);
+ }
+ }
+ mark_inode_dirty(old_dir);
+ if (new_inode)
+ mark_inode_dirty(new_inode);
+
+ if (old_dir != new_dir)
+ if (new_dir_nlink != new_dir->i_nlink) {
+ if (!new_dir_bh) {
+ mlog(ML_ERROR, "need to change nlink for new "
+ "dir %"MLFu64" from %d to %d but bh is "
+ "NULL\n", OCFS2_I(new_dir)->ip_blkno,
+ (int)new_dir_nlink, new_dir->i_nlink);
+ } else {
+ struct ocfs2_dinode *fe;
+ status = ocfs2_journal_access(handle,
+ new_dir,
+ new_dir_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ fe = (struct ocfs2_dinode *) new_dir_bh->b_data;
+ fe->i_links_count = cpu_to_le16(new_dir->i_nlink);
+ status = ocfs2_journal_dirty(handle, new_dir_bh);
+ }
+ }
+
+ if (old_dir_nlink != old_dir->i_nlink) {
+ if (!old_dir_bh) {
+ mlog(ML_ERROR, "need to change nlink for old dir "
+ "%"MLFu64" from %d to %d but bh is NULL!\n",
+ OCFS2_I(old_dir)->ip_blkno,
+ (int)old_dir_nlink,
+ old_dir->i_nlink);
+ } else {
+ struct ocfs2_dinode *fe;
+ status = ocfs2_journal_access(handle, old_dir,
+ old_dir_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ fe = (struct ocfs2_dinode *) old_dir_bh->b_data;
+ fe->i_links_count = cpu_to_le16(old_dir->i_nlink);
+ status = ocfs2_journal_dirty(handle, old_dir_bh);
+ }
+ }
+
+ status = 0;
+bail:
+ if (rename_lock)
+ ocfs2_rename_unlock(osb);
+
+ if (handle)
+ ocfs2_commit_trans(handle);
+
+ if (new_inode)
+ sync_mapping_buffers(old_inode->i_mapping);
+
+ if (new_inode)
+ iput(new_inode);
+ if (newfe_bh)
+ brelse(newfe_bh);
+ if (old_dir_bh)
+ brelse(old_dir_bh);
+ if (new_dir_bh)
+ brelse(new_dir_bh);
+ if (new_de_bh)
+ brelse(new_de_bh);
+ if (old_de_bh)
+ brelse(old_de_bh);
+ if (old_inode_de_bh)
+ brelse(old_inode_de_bh);
+ if (orphan_entry_bh)
+ brelse(orphan_entry_bh);
+ if (insert_entry_bh)
+ brelse(insert_entry_bh);
+
+ mlog_exit(status);
+
+ return status;
+}
+
+/*
+ * we expect i_size = strlen(symname). Copy symname into the file
+ * data, including the null terminator.
+ */
+static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ const char *symname)
+{
+ struct buffer_head **bhs = NULL;
+ const char *c;
+ struct super_block *sb = osb->sb;
+ u64 p_blkno;
+ int p_blocks;
+ int virtual, blocks, status, i, bytes_left;
+
+ bytes_left = i_size_read(inode) + 1;
+ /* we can't trust i_blocks because we're actually going to
+ * write i_size + 1 bytes. */
+ blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
+
+ mlog_entry("i_blocks = %lu, i_size = %llu, blocks = %d\n",
+ inode->i_blocks, i_size_read(inode), blocks);
+
+ /* Sanity check -- make sure we're going to fit. */
+ if (bytes_left >
+ ocfs2_clusters_to_bytes(sb, OCFS2_I(inode)->ip_clusters)) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ bhs = kcalloc(blocks, sizeof(struct buffer_head *), GFP_KERNEL);
+ if (!bhs) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_extent_map_get_blocks(inode, 0, 1, &p_blkno,
+ &p_blocks);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* links can never be larger than one cluster so we know this
+ * is all going to be contiguous, but do a sanity check
+ * anyway. */
+ if ((p_blocks << sb->s_blocksize_bits) < bytes_left) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ virtual = 0;
+ while(bytes_left > 0) {
+ c = &symname[virtual * sb->s_blocksize];
+
+ bhs[virtual] = sb_getblk(sb, p_blkno);
+ if (!bhs[virtual]) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+ ocfs2_set_new_buffer_uptodate(inode, bhs[virtual]);
+
+ status = ocfs2_journal_access(handle, inode, bhs[virtual],
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ memset(bhs[virtual]->b_data, 0, sb->s_blocksize);
+
+ memcpy(bhs[virtual]->b_data, c,
+ (bytes_left > sb->s_blocksize) ? sb->s_blocksize :
+ bytes_left);
+
+ status = ocfs2_journal_dirty(handle, bhs[virtual]);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ virtual++;
+ p_blkno++;
+ bytes_left -= sb->s_blocksize;
+ }
+
+ status = 0;
+bail:
+
+ if (bhs) {
+ for(i = 0; i < blocks; i++)
+ if (bhs[i])
+ brelse(bhs[i]);
+ kfree(bhs);
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_symlink(struct inode *dir,
+ struct dentry *dentry,
+ const char *symname)
+{
+ int status, l, credits;
+ u64 newsize;
+ struct ocfs2_super *osb = NULL;
+ struct inode *inode = NULL;
+ struct super_block *sb;
+ struct buffer_head *new_fe_bh = NULL;
+ struct buffer_head *de_bh = NULL;
+ struct buffer_head *parent_fe_bh = NULL;
+ struct ocfs2_dinode *fe = NULL;
+ struct ocfs2_dinode *dirfe;
+ struct ocfs2_journal_handle *handle = NULL;
+ struct ocfs2_alloc_context *inode_ac = NULL;
+ struct ocfs2_alloc_context *data_ac = NULL;
+
+ mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir,
+ dentry, symname, dentry->d_name.len, dentry->d_name.name);
+
+ sb = dir->i_sb;
+ osb = OCFS2_SB(sb);
+
+ l = strlen(symname) + 1;
+
+ credits = ocfs2_calc_symlink_credits(sb);
+
+ handle = ocfs2_alloc_handle(osb);
+ if (handle == NULL) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* lock the parent directory */
+ status = ocfs2_meta_lock(dir, handle, &parent_fe_bh, 1);
+ if (status < 0) {
+ if (status != -ENOENT)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ dirfe = (struct ocfs2_dinode *) parent_fe_bh->b_data;
+ if (!dirfe->i_links_count) {
+ /* can't make a file in a deleted directory. */
+ status = -ENOENT;
+ goto bail;
+ }
+
+ status = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
+ dentry->d_name.len);
+ if (status)
+ goto bail;
+
+ status = ocfs2_prepare_dir_for_insert(osb, dir, parent_fe_bh,
+ dentry->d_name.name,
+ dentry->d_name.len, &de_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_reserve_new_inode(osb, handle, &inode_ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* don't reserve bitmap space for fast symlinks. */
+ if (l > ocfs2_fast_symlink_chars(sb)) {
+ status = ocfs2_reserve_clusters(osb, handle, 1, &data_ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ handle = ocfs2_start_trans(osb, handle, credits);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_mknod_locked(osb, dir, dentry,
+ S_IFLNK | S_IRWXUGO, 0,
+ &new_fe_bh, parent_fe_bh, handle,
+ &inode, inode_ac);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
+ inode->i_rdev = 0;
+ newsize = l - 1;
+ if (l > ocfs2_fast_symlink_chars(sb)) {
+ inode->i_op = &ocfs2_symlink_inode_operations;
+ status = ocfs2_do_extend_allocation(osb, inode, 1, new_fe_bh,
+ handle, data_ac, NULL,
+ NULL);
+ if (status < 0) {
+ if (status != -ENOSPC && status != -EINTR) {
+ mlog(ML_ERROR, "Failed to extend file to "
+ "%"MLFu64"\n",
+ newsize);
+ mlog_errno(status);
+ status = -ENOSPC;
+ }
+ goto bail;
+ }
+ i_size_write(inode, newsize);
+ inode->i_blocks = ocfs2_align_bytes_to_sectors(newsize);
+ } else {
+ inode->i_op = &ocfs2_fast_symlink_inode_operations;
+ memcpy((char *) fe->id2.i_symlink, symname, l);
+ i_size_write(inode, newsize);
+ inode->i_blocks = 0;
+ }
+
+ status = ocfs2_mark_inode_dirty(handle, inode, new_fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (!ocfs2_inode_is_fast_symlink(inode)) {
+ status = ocfs2_create_symlink_data(osb, handle, inode,
+ symname);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ status = ocfs2_add_entry(handle, dentry, inode,
+ le64_to_cpu(fe->i_blkno), parent_fe_bh,
+ de_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ insert_inode_hash(inode);
+ dentry->d_op = &ocfs2_dentry_ops;
+ d_instantiate(dentry, inode);
+bail:
+ if (handle)
+ ocfs2_commit_trans(handle);
+ if (new_fe_bh)
+ brelse(new_fe_bh);
+ if (parent_fe_bh)
+ brelse(parent_fe_bh);
+ if (de_bh)
+ brelse(de_bh);
+ if (inode_ac)
+ ocfs2_free_alloc_context(inode_ac);
+ if (data_ac)
+ ocfs2_free_alloc_context(data_ac);
+ if ((status < 0) && inode)
+ iput(inode);
+
+ mlog_exit(status);
+
+ return status;
+}
+
+int ocfs2_check_dir_entry(struct inode * dir,
+ struct ocfs2_dir_entry * de,
+ struct buffer_head * bh,
+ unsigned long offset)
+{
+ const char *error_msg = NULL;
+ const int rlen = le16_to_cpu(de->rec_len);
+
+ if (rlen < OCFS2_DIR_REC_LEN(1))
+ error_msg = "rec_len is smaller than minimal";
+ else if (rlen % 4 != 0)
+ error_msg = "rec_len % 4 != 0";
+ else if (rlen < OCFS2_DIR_REC_LEN(de->name_len))
+ error_msg = "rec_len is too small for name_len";
+ else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+ error_msg = "directory entry across blocks";
+
+ if (error_msg != NULL)
+ mlog(ML_ERROR, "bad entry in directory #%"MLFu64": %s - "
+ "offset=%lu, inode=%"MLFu64", rec_len=%d, name_len=%d\n",
+ OCFS2_I(dir)->ip_blkno, error_msg, offset,
+ le64_to_cpu(de->inode), rlen, de->name_len);
+ return error_msg == NULL ? 1 : 0;
+}
+
+/* we don't always have a dentry for what we want to add, so people
+ * like orphan dir can call this instead.
+ *
+ * If you pass me insert_bh, I'll skip the search of the other dir
+ * blocks and put the record in there.
+ */
+static int __ocfs2_add_entry(struct ocfs2_journal_handle *handle,
+ struct inode *dir,
+ const char *name, int namelen,
+ struct inode *inode, u64 blkno,
+ struct buffer_head *parent_fe_bh,
+ struct buffer_head *insert_bh)
+{
+ unsigned long offset;
+ unsigned short rec_len;
+ struct ocfs2_dir_entry *de, *de1;
+ struct super_block *sb;
+ int retval, status;
+
+ mlog_entry_void();
+
+ sb = dir->i_sb;
+
+ if (!namelen)
+ return -EINVAL;
+
+ rec_len = OCFS2_DIR_REC_LEN(namelen);
+ offset = 0;
+ de = (struct ocfs2_dir_entry *) insert_bh->b_data;
+ while (1) {
+ BUG_ON((char *)de >= sb->s_blocksize + insert_bh->b_data);
+ /* These checks should've already been passed by the
+ * prepare function, but I guess we can leave them
+ * here anyway. */
+ if (!ocfs2_check_dir_entry(dir, de, insert_bh, offset)) {
+ retval = -ENOENT;
+ goto bail;
+ }
+ if (ocfs2_match(namelen, name, de)) {
+ retval = -EEXIST;
+ goto bail;
+ }
+ if (((le64_to_cpu(de->inode) == 0) &&
+ (le16_to_cpu(de->rec_len) >= rec_len)) ||
+ (le16_to_cpu(de->rec_len) >=
+ (OCFS2_DIR_REC_LEN(de->name_len) + rec_len))) {
+ status = ocfs2_journal_access(handle, dir, insert_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ /* By now the buffer is marked for journaling */
+ offset += le16_to_cpu(de->rec_len);
+ if (le64_to_cpu(de->inode)) {
+ de1 = (struct ocfs2_dir_entry *)((char *) de +
+ OCFS2_DIR_REC_LEN(de->name_len));
+ de1->rec_len =
+ cpu_to_le16(le16_to_cpu(de->rec_len) -
+ OCFS2_DIR_REC_LEN(de->name_len));
+ de->rec_len = cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
+ de = de1;
+ }
+ de->file_type = OCFS2_FT_UNKNOWN;
+ if (blkno) {
+ de->inode = cpu_to_le64(blkno);
+ ocfs2_set_de_type(de, inode->i_mode);
+ } else
+ de->inode = 0;
+ de->name_len = namelen;
+ memcpy(de->name, name, namelen);
+
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ dir->i_version++;
+ status = ocfs2_journal_dirty(handle, insert_bh);
+ retval = 0;
+ goto bail;
+ }
+ offset += le16_to_cpu(de->rec_len);
+ de = (struct ocfs2_dir_entry *) ((char *) de + le16_to_cpu(de->rec_len));
+ }
+
+ /* when you think about it, the assert above should prevent us
+ * from ever getting here. */
+ retval = -ENOSPC;
+bail:
+
+ mlog_exit(retval);
+ return retval;
+}
+
+
+/*
+ * ocfs2_delete_entry deletes a directory entry by merging it with the
+ * previous entry
+ */
+static int ocfs2_delete_entry(struct ocfs2_journal_handle *handle,
+ struct inode *dir,
+ struct ocfs2_dir_entry *de_del,
+ struct buffer_head *bh)
+{
+ struct ocfs2_dir_entry *de, *pde;
+ int i, status = -ENOENT;
+
+ mlog_entry("(0x%p, 0x%p, 0x%p, 0x%p)\n", handle, dir, de_del, bh);
+
+ i = 0;
+ pde = NULL;
+ de = (struct ocfs2_dir_entry *) bh->b_data;
+ while (i < bh->b_size) {
+ if (!ocfs2_check_dir_entry(dir, de, bh, i)) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ if (de == de_del) {
+ status = ocfs2_journal_access(handle, dir, bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ if (pde)
+ pde->rec_len =
+ cpu_to_le16(le16_to_cpu(pde->rec_len) +
+ le16_to_cpu(de->rec_len));
+ else
+ de->inode = 0;
+ dir->i_version++;
+ status = ocfs2_journal_dirty(handle, bh);
+ goto bail;
+ }
+ i += le16_to_cpu(de->rec_len);
+ pde = de;
+ de = (struct ocfs2_dir_entry *)((char *)de + le16_to_cpu(de->rec_len));
+ }
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * Returns 0 if not found, -1 on failure, and 1 on success
+ */
+static int inline ocfs2_search_dirblock(struct buffer_head *bh,
+ struct inode *dir,
+ const char *name, int namelen,
+ unsigned long offset,
+ struct ocfs2_dir_entry **res_dir)
+{
+ struct ocfs2_dir_entry *de;
+ char *dlimit, *de_buf;
+ int de_len;
+ int ret = 0;
+
+ mlog_entry_void();
+
+ de_buf = bh->b_data;
+ dlimit = de_buf + dir->i_sb->s_blocksize;
+
+ while (de_buf < dlimit) {
+ /* this code is executed quadratically often */
+ /* do minimal checking `by hand' */
+
+ de = (struct ocfs2_dir_entry *) de_buf;
+
+ if (de_buf + namelen <= dlimit &&
+ ocfs2_match(namelen, name, de)) {
+ /* found a match - just to be sure, do a full check */
+ if (!ocfs2_check_dir_entry(dir, de, bh, offset)) {
+ ret = -1;
+ goto bail;
+ }
+ *res_dir = de;
+ ret = 1;
+ goto bail;
+ }
+
+ /* prevent looping on a bad block */
+ de_len = le16_to_cpu(de->rec_len);
+ if (de_len <= 0) {
+ ret = -1;
+ goto bail;
+ }
+
+ de_buf += de_len;
+ offset += de_len;
+ }
+
+bail:
+ mlog_exit(ret);
+ return ret;
+}
+
+struct buffer_head *ocfs2_find_entry(const char *name, int namelen,
+ struct inode *dir,
+ struct ocfs2_dir_entry **res_dir)
+{
+ struct super_block *sb;
+ struct buffer_head *bh_use[NAMEI_RA_SIZE];
+ struct buffer_head *bh, *ret = NULL;
+ unsigned long start, block, b;
+ int ra_max = 0; /* Number of bh's in the readahead
+ buffer, bh_use[] */
+ int ra_ptr = 0; /* Current index into readahead
+ buffer */
+ int num = 0;
+ int nblocks, i, err;
+
+ mlog_entry_void();
+
+ *res_dir = NULL;
+ sb = dir->i_sb;
+
+ nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
+ start = OCFS2_I(dir)->ip_dir_start_lookup;
+ if (start >= nblocks)
+ start = 0;
+ block = start;
+
+restart:
+ do {
+ /*
+ * We deal with the read-ahead logic here.
+ */
+ if (ra_ptr >= ra_max) {
+ /* Refill the readahead buffer */
+ ra_ptr = 0;
+ b = block;
+ for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
+ /*
+ * Terminate if we reach the end of the
+ * directory and must wrap, or if our
+ * search has finished at this block.
+ */
+ if (b >= nblocks || (num && block == start)) {
+ bh_use[ra_max] = NULL;
+ break;
+ }
+ num++;
+
+ /* XXX: questionable readahead stuff here */
+ bh = ocfs2_bread(dir, b++, &err, 1);
+ bh_use[ra_max] = bh;
+#if 0 // ???
+ if (bh)
+ ll_rw_block(READ, 1, &bh);
+#endif
+ }
+ }
+ if ((bh = bh_use[ra_ptr++]) == NULL)
+ goto next;
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh)) {
+ /* read error, skip block & hope for the best */
+ brelse(bh);
+ goto next;
+ }
+ i = ocfs2_search_dirblock(bh, dir, name, namelen,
+ block << sb->s_blocksize_bits,
+ res_dir);
+ if (i == 1) {
+ OCFS2_I(dir)->ip_dir_start_lookup = block;
+ ret = bh;
+ goto cleanup_and_exit;
+ } else {
+ brelse(bh);
+ if (i < 0)
+ goto cleanup_and_exit;
+ }
+ next:
+ if (++block >= nblocks)
+ block = 0;
+ } while (block != start);
+
+ /*
+ * If the directory has grown while we were searching, then
+ * search the last part of the directory before giving up.
+ */
+ block = nblocks;
+ nblocks = i_size_read(dir) >> sb->s_blocksize_bits;
+ if (block < nblocks) {
+ start = 0;
+ goto restart;
+ }
+
+cleanup_and_exit:
+ /* Clean up the read-ahead blocks */
+ for (; ra_ptr < ra_max; ra_ptr++)
+ brelse(bh_use[ra_ptr]);
+
+ mlog_exit_ptr(ret);
+ return ret;
+}
+
+static int ocfs2_blkno_stringify(u64 blkno, char *name)
+{
+ int status, namelen;
+
+ mlog_entry_void();
+
+ namelen = snprintf(name, OCFS2_ORPHAN_NAMELEN + 1, "%016"MLFx64,
+ blkno);
+ if (namelen <= 0) {
+ if (namelen)
+ status = namelen;
+ else
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+ if (namelen != OCFS2_ORPHAN_NAMELEN) {
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ mlog(0, "built filename '%s' for orphan dir (len=%d)\n", name,
+ namelen);
+
+ status = 0;
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ char *name,
+ struct buffer_head **de_bh)
+{
+ struct inode *orphan_dir_inode = NULL;
+ struct buffer_head *orphan_dir_bh = NULL;
+ int status = 0;
+
+ status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ orphan_dir_inode = ocfs2_get_system_file_inode(osb,
+ ORPHAN_DIR_SYSTEM_INODE,
+ osb->slot_num);
+ if (!orphan_dir_inode) {
+ status = -ENOENT;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ ocfs2_handle_add_inode(handle, orphan_dir_inode);
+ status = ocfs2_meta_lock(orphan_dir_inode, handle, &orphan_dir_bh, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode,
+ orphan_dir_bh, name,
+ OCFS2_ORPHAN_NAMELEN, de_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+leave:
+ if (orphan_dir_inode)
+ iput(orphan_dir_inode);
+
+ if (orphan_dir_bh)
+ brelse(orphan_dir_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_orphan_add(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *inode,
+ struct ocfs2_dinode *fe,
+ char *name,
+ struct buffer_head *de_bh)
+{
+ struct inode *orphan_dir_inode = NULL;
+ struct buffer_head *orphan_dir_bh = NULL;
+ int status = 0;
+ struct ocfs2_dinode *orphan_fe;
+
+ mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino);
+
+ orphan_dir_inode = ocfs2_get_system_file_inode(osb,
+ ORPHAN_DIR_SYSTEM_INODE,
+ osb->slot_num);
+ if (!orphan_dir_inode) {
+ status = -ENOENT;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_read_block(osb,
+ OCFS2_I(orphan_dir_inode)->ip_blkno,
+ &orphan_dir_bh, OCFS2_BH_CACHED,
+ orphan_dir_inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_journal_access(handle, orphan_dir_inode, orphan_dir_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* we're a cluster, and nlink can change on disk from
+ * underneath us... */
+ orphan_fe = (struct ocfs2_dinode *) orphan_dir_bh->b_data;
+ if (S_ISDIR(inode->i_mode))
+ le16_add_cpu(&orphan_fe->i_links_count, 1);
+ orphan_dir_inode->i_nlink = le16_to_cpu(orphan_fe->i_links_count);
+
+ status = ocfs2_journal_dirty(handle, orphan_dir_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = __ocfs2_add_entry(handle, orphan_dir_inode, name,
+ OCFS2_ORPHAN_NAMELEN, inode,
+ OCFS2_I(inode)->ip_blkno,
+ orphan_dir_bh, de_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ le32_add_cpu(&fe->i_flags, OCFS2_ORPHANED_FL);
+
+ /* Record which orphan dir our inode now resides
+ * in. delete_inode will use this to determine which orphan
+ * dir to lock. */
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ OCFS2_I(inode)->ip_orphaned_slot = osb->slot_num;
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ mlog(0, "Inode %"MLFu64" orphaned in slot %d\n",
+ OCFS2_I(inode)->ip_blkno, osb->slot_num);
+
+leave:
+ if (orphan_dir_inode)
+ iput(orphan_dir_inode);
+
+ if (orphan_dir_bh)
+ brelse(orphan_dir_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+/* unlike orphan_add, we expect the orphan dir to already be locked here. */
+int ocfs2_orphan_del(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *orphan_dir_inode,
+ struct inode *inode,
+ struct buffer_head *orphan_dir_bh)
+{
+ char name[OCFS2_ORPHAN_NAMELEN + 1];
+ struct ocfs2_dinode *orphan_fe;
+ int status = 0;
+ struct buffer_head *target_de_bh = NULL;
+ struct ocfs2_dir_entry *target_de = NULL;
+
+ mlog_entry_void();
+
+ status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ mlog(0, "removing '%s' from orphan dir %"MLFu64" (namelen=%d)\n",
+ name, OCFS2_I(orphan_dir_inode)->ip_blkno, OCFS2_ORPHAN_NAMELEN);
+
+ /* find it's spot in the orphan directory */
+ target_de_bh = ocfs2_find_entry(name, OCFS2_ORPHAN_NAMELEN,
+ orphan_dir_inode, &target_de);
+ if (!target_de_bh) {
+ status = -ENOENT;
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* remove it from the orphan directory */
+ status = ocfs2_delete_entry(handle, orphan_dir_inode, target_de,
+ target_de_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_journal_access(handle,orphan_dir_inode, orphan_dir_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* do the i_nlink dance! :) */
+ orphan_fe = (struct ocfs2_dinode *) orphan_dir_bh->b_data;
+ if (S_ISDIR(inode->i_mode))
+ le16_add_cpu(&orphan_fe->i_links_count, -1);
+ orphan_dir_inode->i_nlink = le16_to_cpu(orphan_fe->i_links_count);
+
+ status = ocfs2_journal_dirty(handle, orphan_dir_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+leave:
+ if (target_de_bh)
+ brelse(target_de_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+struct inode_operations ocfs2_dir_iops = {
+ .create = ocfs2_create,
+ .lookup = ocfs2_lookup,
+ .link = ocfs2_link,
+ .unlink = ocfs2_unlink,
+ .rmdir = ocfs2_unlink,
+ .symlink = ocfs2_symlink,
+ .mkdir = ocfs2_mkdir,
+ .mknod = ocfs2_mknod,
+ .rename = ocfs2_rename,
+ .setattr = ocfs2_setattr,
+ .getattr = ocfs2_getattr,
+};
diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h
new file mode 100644
index 00000000000..deaaa97dbf0
--- /dev/null
+++ b/fs/ocfs2/namei.h
@@ -0,0 +1,58 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * namei.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_NAMEI_H
+#define OCFS2_NAMEI_H
+
+extern struct inode_operations ocfs2_dir_iops;
+
+struct dentry *ocfs2_get_parent(struct dentry *child);
+
+int ocfs2_check_dir_entry (struct inode *dir,
+ struct ocfs2_dir_entry *de,
+ struct buffer_head *bh,
+ unsigned long offset);
+struct buffer_head *ocfs2_find_entry(const char *name,
+ int namelen,
+ struct inode *dir,
+ struct ocfs2_dir_entry **res_dir);
+int ocfs2_orphan_del(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct inode *orphan_dir_inode,
+ struct inode *inode,
+ struct buffer_head *orphan_dir_bh);
+
+static inline int ocfs2_match(int len,
+ const char * const name,
+ struct ocfs2_dir_entry *de)
+{
+ if (len != de->name_len)
+ return 0;
+ if (!de->inode)
+ return 0;
+ return !memcmp(name, de->name, len);
+}
+
+#endif /* OCFS2_NAMEI_H */
diff --git a/fs/ocfs2/ocfs1_fs_compat.h b/fs/ocfs2/ocfs1_fs_compat.h
new file mode 100644
index 00000000000..0b499bccec5
--- /dev/null
+++ b/fs/ocfs2/ocfs1_fs_compat.h
@@ -0,0 +1,109 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ocfs1_fs_compat.h
+ *
+ * OCFS1 volume header definitions. OCFS2 creates valid but unmountable
+ * OCFS1 volume headers on the first two sectors of an OCFS2 volume.
+ * This allows an OCFS1 volume to see the partition and cleanly fail to
+ * mount it.
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef _OCFS1_FS_COMPAT_H
+#define _OCFS1_FS_COMPAT_H
+
+#define OCFS1_MAX_VOL_SIGNATURE_LEN 128
+#define OCFS1_MAX_MOUNT_POINT_LEN 128
+#define OCFS1_MAX_VOL_ID_LENGTH 16
+#define OCFS1_MAX_VOL_LABEL_LEN 64
+#define OCFS1_MAX_CLUSTER_NAME_LEN 64
+
+#define OCFS1_MAJOR_VERSION (2)
+#define OCFS1_MINOR_VERSION (0)
+#define OCFS1_VOLUME_SIGNATURE "OracleCFS"
+
+/*
+ * OCFS1 superblock. Lives at sector 0.
+ */
+struct ocfs1_vol_disk_hdr
+{
+/*00*/ __u32 minor_version;
+ __u32 major_version;
+/*08*/ __u8 signature[OCFS1_MAX_VOL_SIGNATURE_LEN];
+/*88*/ __u8 mount_point[OCFS1_MAX_MOUNT_POINT_LEN];
+/*108*/ __u64 serial_num;
+/*110*/ __u64 device_size;
+ __u64 start_off;
+/*120*/ __u64 bitmap_off;
+ __u64 publ_off;
+/*130*/ __u64 vote_off;
+ __u64 root_bitmap_off;
+/*140*/ __u64 data_start_off;
+ __u64 root_bitmap_size;
+/*150*/ __u64 root_off;
+ __u64 root_size;
+/*160*/ __u64 cluster_size;
+ __u64 num_nodes;
+/*170*/ __u64 num_clusters;
+ __u64 dir_node_size;
+/*180*/ __u64 file_node_size;
+ __u64 internal_off;
+/*190*/ __u64 node_cfg_off;
+ __u64 node_cfg_size;
+/*1A0*/ __u64 new_cfg_off;
+ __u32 prot_bits;
+ __s32 excl_mount;
+/*1B0*/
+};
+
+
+struct ocfs1_disk_lock
+{
+/*00*/ __u32 curr_master;
+ __u8 file_lock;
+ __u8 compat_pad[3]; /* Not in orignal definition. Used to
+ make the already existing alignment
+ explicit */
+ __u64 last_write_time;
+/*10*/ __u64 last_read_time;
+ __u32 writer_node_num;
+ __u32 reader_node_num;
+/*20*/ __u64 oin_node_map;
+ __u64 dlock_seq_num;
+/*30*/
+};
+
+/*
+ * OCFS1 volume label. Lives at sector 1.
+ */
+struct ocfs1_vol_label
+{
+/*00*/ struct ocfs1_disk_lock disk_lock;
+/*30*/ __u8 label[OCFS1_MAX_VOL_LABEL_LEN];
+/*70*/ __u16 label_len;
+/*72*/ __u8 vol_id[OCFS1_MAX_VOL_ID_LENGTH];
+/*82*/ __u16 vol_id_len;
+/*84*/ __u8 cluster_name[OCFS1_MAX_CLUSTER_NAME_LEN];
+/*A4*/ __u16 cluster_name_len;
+/*A6*/
+};
+
+
+#endif /* _OCFS1_FS_COMPAT_H */
+
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
new file mode 100644
index 00000000000..f468c600cf9
--- /dev/null
+++ b/fs/ocfs2/ocfs2.h
@@ -0,0 +1,464 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ocfs2.h
+ *
+ * Defines macros and structures used in OCFS2
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_H
+#define OCFS2_H
+
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/workqueue.h>
+#include <linux/kref.h>
+
+#include "cluster/nodemanager.h"
+#include "cluster/heartbeat.h"
+#include "cluster/tcp.h"
+
+#include "dlm/dlmapi.h"
+
+#include "ocfs2_fs.h"
+#include "endian.h"
+#include "ocfs2_lockid.h"
+
+struct ocfs2_extent_map {
+ u32 em_clusters;
+ struct rb_root em_extents;
+};
+
+/* Most user visible OCFS2 inodes will have very few pieces of
+ * metadata, but larger files (including bitmaps, etc) must be taken
+ * into account when designing an access scheme. We allow a small
+ * amount of inlined blocks to be stored on an array and grow the
+ * structure into a rb tree when necessary. */
+#define OCFS2_INODE_MAX_CACHE_ARRAY 2
+
+struct ocfs2_caching_info {
+ unsigned int ci_num_cached;
+ union {
+ sector_t ci_array[OCFS2_INODE_MAX_CACHE_ARRAY];
+ struct rb_root ci_tree;
+ } ci_cache;
+};
+
+/* this limits us to 256 nodes
+ * if we need more, we can do a kmalloc for the map */
+#define OCFS2_NODE_MAP_MAX_NODES 256
+struct ocfs2_node_map {
+ u16 num_nodes;
+ unsigned long map[BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES)];
+};
+
+enum ocfs2_ast_action {
+ OCFS2_AST_INVALID = 0,
+ OCFS2_AST_ATTACH,
+ OCFS2_AST_CONVERT,
+ OCFS2_AST_DOWNCONVERT,
+};
+
+/* actions for an unlockast function to take. */
+enum ocfs2_unlock_action {
+ OCFS2_UNLOCK_INVALID = 0,
+ OCFS2_UNLOCK_CANCEL_CONVERT,
+ OCFS2_UNLOCK_DROP_LOCK,
+};
+
+/* ocfs2_lock_res->l_flags flags. */
+#define OCFS2_LOCK_ATTACHED (0x00000001) /* have we initialized
+ * the lvb */
+#define OCFS2_LOCK_BUSY (0x00000002) /* we are currently in
+ * dlm_lock */
+#define OCFS2_LOCK_BLOCKED (0x00000004) /* blocked waiting to
+ * downconvert*/
+#define OCFS2_LOCK_LOCAL (0x00000008) /* newly created inode */
+#define OCFS2_LOCK_NEEDS_REFRESH (0x00000010)
+#define OCFS2_LOCK_REFRESHING (0x00000020)
+#define OCFS2_LOCK_INITIALIZED (0x00000040) /* track initialization
+ * for shutdown paths */
+#define OCFS2_LOCK_FREEING (0x00000080) /* help dlmglue track
+ * when to skip queueing
+ * a lock because it's
+ * about to be
+ * dropped. */
+#define OCFS2_LOCK_QUEUED (0x00000100) /* queued for downconvert */
+
+struct ocfs2_lock_res_ops;
+
+typedef void (*ocfs2_lock_callback)(int status, unsigned long data);
+
+struct ocfs2_lock_res {
+ void *l_priv;
+ struct ocfs2_lock_res_ops *l_ops;
+ spinlock_t l_lock;
+
+ struct list_head l_blocked_list;
+ struct list_head l_mask_waiters;
+
+ enum ocfs2_lock_type l_type;
+ unsigned long l_flags;
+ char l_name[OCFS2_LOCK_ID_MAX_LEN];
+ int l_level;
+ unsigned int l_ro_holders;
+ unsigned int l_ex_holders;
+ struct dlm_lockstatus l_lksb;
+
+ /* used from AST/BAST funcs. */
+ enum ocfs2_ast_action l_action;
+ enum ocfs2_unlock_action l_unlock_action;
+ int l_requested;
+ int l_blocking;
+
+ wait_queue_head_t l_event;
+
+ struct list_head l_debug_list;
+};
+
+struct ocfs2_dlm_debug {
+ struct kref d_refcnt;
+ struct dentry *d_locking_state;
+ struct list_head d_lockres_tracking;
+};
+
+enum ocfs2_vol_state
+{
+ VOLUME_INIT = 0,
+ VOLUME_MOUNTED,
+ VOLUME_DISMOUNTED,
+ VOLUME_DISABLED
+};
+
+struct ocfs2_alloc_stats
+{
+ atomic_t moves;
+ atomic_t local_data;
+ atomic_t bitmap_data;
+ atomic_t bg_allocs;
+ atomic_t bg_extends;
+};
+
+enum ocfs2_local_alloc_state
+{
+ OCFS2_LA_UNUSED = 0,
+ OCFS2_LA_ENABLED,
+ OCFS2_LA_DISABLED
+};
+
+enum ocfs2_mount_options
+{
+ OCFS2_MOUNT_HB_LOCAL = 1 << 0, /* Heartbeat started in local mode */
+ OCFS2_MOUNT_BARRIER = 1 << 1, /* Use block barriers */
+ OCFS2_MOUNT_NOINTR = 1 << 2, /* Don't catch signals */
+ OCFS2_MOUNT_ERRORS_PANIC = 1 << 3, /* Panic on errors */
+ OCFS2_MOUNT_DATA_WRITEBACK = 1 << 4, /* No data ordering */
+#ifdef OCFS2_ORACORE_WORKAROUNDS
+ OCFS2_MOUNT_COMPAT_OCFS = 1 << 30, /* ocfs1 compatibility mode */
+#endif
+};
+
+#define OCFS2_OSB_SOFT_RO 0x0001
+#define OCFS2_OSB_HARD_RO 0x0002
+#define OCFS2_OSB_ERROR_FS 0x0004
+
+struct ocfs2_journal;
+struct ocfs2_journal_handle;
+struct ocfs2_super
+{
+ u32 osb_id; /* id used by the proc interface */
+ struct task_struct *commit_task;
+ struct super_block *sb;
+ struct inode *root_inode;
+ struct inode *sys_root_inode;
+ struct inode *system_inodes[NUM_SYSTEM_INODES];
+
+ struct ocfs2_slot_info *slot_info;
+
+ spinlock_t node_map_lock;
+ struct ocfs2_node_map mounted_map;
+ struct ocfs2_node_map recovery_map;
+ struct ocfs2_node_map umount_map;
+
+ u32 num_clusters;
+ u64 root_blkno;
+ u64 system_dir_blkno;
+ u64 bitmap_blkno;
+ u32 bitmap_cpg;
+ u8 *uuid;
+ char *uuid_str;
+ u8 *vol_label;
+ u64 first_cluster_group_blkno;
+ u32 fs_generation;
+
+ u32 s_feature_compat;
+ u32 s_feature_incompat;
+ u32 s_feature_ro_compat;
+
+ /* Protects s_next_generaion, osb_flags. Could protect more on
+ * osb as it's very short lived. */
+ spinlock_t osb_lock;
+ u32 s_next_generation;
+ unsigned long osb_flags;
+
+ unsigned long s_mount_opt;
+
+ u16 max_slots;
+ u16 num_nodes;
+ s16 node_num;
+ s16 slot_num;
+ int s_sectsize_bits;
+ int s_clustersize;
+ int s_clustersize_bits;
+ struct proc_dir_entry *proc_sub_dir; /* points to /proc/fs/ocfs2/<maj_min> */
+
+ atomic_t vol_state;
+ struct semaphore recovery_lock;
+ struct task_struct *recovery_thread_task;
+ int disable_recovery;
+ wait_queue_head_t checkpoint_event;
+ atomic_t needs_checkpoint;
+ struct ocfs2_journal *journal;
+
+ enum ocfs2_local_alloc_state local_alloc_state;
+ struct buffer_head *local_alloc_bh;
+
+ /* Next two fields are for local node slot recovery during
+ * mount. */
+ int dirty;
+ struct ocfs2_dinode *local_alloc_copy;
+
+ struct ocfs2_alloc_stats alloc_stats;
+ char dev_str[20]; /* "major,minor" of the device */
+
+ struct dlm_ctxt *dlm;
+ struct ocfs2_lock_res osb_super_lockres;
+ struct ocfs2_lock_res osb_rename_lockres;
+ struct dlm_eviction_cb osb_eviction_cb;
+ struct ocfs2_dlm_debug *osb_dlm_debug;
+
+ struct dentry *osb_debug_root;
+
+ wait_queue_head_t recovery_event;
+
+ spinlock_t vote_task_lock;
+ struct task_struct *vote_task;
+ wait_queue_head_t vote_event;
+ unsigned long vote_wake_sequence;
+ unsigned long vote_work_sequence;
+
+ struct list_head blocked_lock_list;
+ unsigned long blocked_lock_count;
+
+ struct list_head vote_list;
+ int vote_count;
+
+ u32 net_key;
+ spinlock_t net_response_lock;
+ unsigned int net_response_ids;
+ struct list_head net_response_list;
+
+ struct o2hb_callback_func osb_hb_up;
+ struct o2hb_callback_func osb_hb_down;
+
+ struct list_head osb_net_handlers;
+
+ wait_queue_head_t osb_mount_event;
+
+ /* Truncate log info */
+ struct inode *osb_tl_inode;
+ struct buffer_head *osb_tl_bh;
+ struct work_struct osb_truncate_log_wq;
+};
+
+#define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info)
+#define OCFS2_MAX_OSB_ID 65536
+
+static inline int ocfs2_should_order_data(struct inode *inode)
+{
+ if (!S_ISREG(inode->i_mode))
+ return 0;
+ if (OCFS2_SB(inode->i_sb)->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)
+ return 0;
+ return 1;
+}
+
+/* set / clear functions because cluster events can make these happen
+ * in parallel so we want the transitions to be atomic. this also
+ * means that any future flags osb_flags must be protected by spinlock
+ * too! */
+static inline void ocfs2_set_osb_flag(struct ocfs2_super *osb,
+ unsigned long flag)
+{
+ spin_lock(&osb->osb_lock);
+ osb->osb_flags |= flag;
+ spin_unlock(&osb->osb_lock);
+}
+
+static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb,
+ int hard)
+{
+ spin_lock(&osb->osb_lock);
+ osb->osb_flags &= ~(OCFS2_OSB_SOFT_RO|OCFS2_OSB_HARD_RO);
+ if (hard)
+ osb->osb_flags |= OCFS2_OSB_HARD_RO;
+ else
+ osb->osb_flags |= OCFS2_OSB_SOFT_RO;
+ spin_unlock(&osb->osb_lock);
+}
+
+static inline int ocfs2_is_hard_readonly(struct ocfs2_super *osb)
+{
+ int ret;
+
+ spin_lock(&osb->osb_lock);
+ ret = osb->osb_flags & OCFS2_OSB_HARD_RO;
+ spin_unlock(&osb->osb_lock);
+
+ return ret;
+}
+
+static inline int ocfs2_is_soft_readonly(struct ocfs2_super *osb)
+{
+ int ret;
+
+ spin_lock(&osb->osb_lock);
+ ret = osb->osb_flags & OCFS2_OSB_SOFT_RO;
+ spin_unlock(&osb->osb_lock);
+
+ return ret;
+}
+
+#define OCFS2_IS_VALID_DINODE(ptr) \
+ (!strcmp((ptr)->i_signature, OCFS2_INODE_SIGNATURE))
+
+#define OCFS2_RO_ON_INVALID_DINODE(__sb, __di) do { \
+ typeof(__di) ____di = (__di); \
+ ocfs2_error((__sb), \
+ "Dinode # %"MLFu64" has bad signature %.*s", \
+ (____di)->i_blkno, 7, \
+ (____di)->i_signature); \
+} while (0);
+
+#define OCFS2_IS_VALID_EXTENT_BLOCK(ptr) \
+ (!strcmp((ptr)->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE))
+
+#define OCFS2_RO_ON_INVALID_EXTENT_BLOCK(__sb, __eb) do { \
+ typeof(__eb) ____eb = (__eb); \
+ ocfs2_error((__sb), \
+ "Extent Block # %"MLFu64" has bad signature %.*s", \
+ (____eb)->h_blkno, 7, \
+ (____eb)->h_signature); \
+} while (0);
+
+#define OCFS2_IS_VALID_GROUP_DESC(ptr) \
+ (!strcmp((ptr)->bg_signature, OCFS2_GROUP_DESC_SIGNATURE))
+
+#define OCFS2_RO_ON_INVALID_GROUP_DESC(__sb, __gd) do { \
+ typeof(__gd) ____gd = (__gd); \
+ ocfs2_error((__sb), \
+ "Group Descriptor # %"MLFu64" has bad signature %.*s", \
+ (____gd)->bg_blkno, 7, \
+ (____gd)->bg_signature); \
+} while (0);
+
+static inline unsigned long ino_from_blkno(struct super_block *sb,
+ u64 blkno)
+{
+ return (unsigned long)(blkno & (u64)ULONG_MAX);
+}
+
+static inline u64 ocfs2_clusters_to_blocks(struct super_block *sb,
+ u32 clusters)
+{
+ int c_to_b_bits = OCFS2_SB(sb)->s_clustersize_bits -
+ sb->s_blocksize_bits;
+
+ return (u64)clusters << c_to_b_bits;
+}
+
+static inline u32 ocfs2_blocks_to_clusters(struct super_block *sb,
+ u64 blocks)
+{
+ int b_to_c_bits = OCFS2_SB(sb)->s_clustersize_bits -
+ sb->s_blocksize_bits;
+
+ return (u32)(blocks >> b_to_c_bits);
+}
+
+static inline unsigned int ocfs2_clusters_for_bytes(struct super_block *sb,
+ u64 bytes)
+{
+ int cl_bits = OCFS2_SB(sb)->s_clustersize_bits;
+ unsigned int clusters;
+
+ bytes += OCFS2_SB(sb)->s_clustersize - 1;
+ /* OCFS2 just cannot have enough clusters to overflow this */
+ clusters = (unsigned int)(bytes >> cl_bits);
+
+ return clusters;
+}
+
+static inline u64 ocfs2_blocks_for_bytes(struct super_block *sb,
+ u64 bytes)
+{
+ bytes += sb->s_blocksize - 1;
+ return bytes >> sb->s_blocksize_bits;
+}
+
+static inline u64 ocfs2_clusters_to_bytes(struct super_block *sb,
+ u32 clusters)
+{
+ return (u64)clusters << OCFS2_SB(sb)->s_clustersize_bits;
+}
+
+static inline u64 ocfs2_align_bytes_to_clusters(struct super_block *sb,
+ u64 bytes)
+{
+ int cl_bits = OCFS2_SB(sb)->s_clustersize_bits;
+ unsigned int clusters;
+
+ clusters = ocfs2_clusters_for_bytes(sb, bytes);
+ return (u64)clusters << cl_bits;
+}
+
+static inline u64 ocfs2_align_bytes_to_blocks(struct super_block *sb,
+ u64 bytes)
+{
+ u64 blocks;
+
+ blocks = ocfs2_blocks_for_bytes(sb, bytes);
+ return blocks << sb->s_blocksize_bits;
+}
+
+static inline unsigned long ocfs2_align_bytes_to_sectors(u64 bytes)
+{
+ return (unsigned long)((bytes + 511) >> 9);
+}
+
+#define ocfs2_set_bit ext2_set_bit
+#define ocfs2_clear_bit ext2_clear_bit
+#define ocfs2_test_bit ext2_test_bit
+#define ocfs2_find_next_zero_bit ext2_find_next_zero_bit
+#endif /* OCFS2_H */
+
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
new file mode 100644
index 00000000000..dfb8a5bedfc
--- /dev/null
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -0,0 +1,638 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ocfs2_fs.h
+ *
+ * On-disk structures for OCFS2.
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef _OCFS2_FS_H
+#define _OCFS2_FS_H
+
+/* Version */
+#define OCFS2_MAJOR_REV_LEVEL 0
+#define OCFS2_MINOR_REV_LEVEL 90
+
+/*
+ * An OCFS2 volume starts this way:
+ * Sector 0: Valid ocfs1_vol_disk_hdr that cleanly fails to mount OCFS.
+ * Sector 1: Valid ocfs1_vol_label that cleanly fails to mount OCFS.
+ * Block OCFS2_SUPER_BLOCK_BLKNO: OCFS2 superblock.
+ *
+ * All other structures are found from the superblock information.
+ *
+ * OCFS2_SUPER_BLOCK_BLKNO is in blocks, not sectors. eg, for a
+ * blocksize of 2K, it is 4096 bytes into disk.
+ */
+#define OCFS2_SUPER_BLOCK_BLKNO 2
+
+/*
+ * Cluster size limits. The maximum is kept arbitrarily at 1 MB, and could
+ * grow if needed.
+ */
+#define OCFS2_MIN_CLUSTERSIZE 4096
+#define OCFS2_MAX_CLUSTERSIZE 1048576
+
+/*
+ * Blocks cannot be bigger than clusters, so the maximum blocksize is the
+ * minimum cluster size.
+ */
+#define OCFS2_MIN_BLOCKSIZE 512
+#define OCFS2_MAX_BLOCKSIZE OCFS2_MIN_CLUSTERSIZE
+
+/* Filesystem magic number */
+#define OCFS2_SUPER_MAGIC 0x7461636f
+
+/* Object signatures */
+#define OCFS2_SUPER_BLOCK_SIGNATURE "OCFSV2"
+#define OCFS2_INODE_SIGNATURE "INODE01"
+#define OCFS2_EXTENT_BLOCK_SIGNATURE "EXBLK01"
+#define OCFS2_GROUP_DESC_SIGNATURE "GROUP01"
+
+/* Compatibility flags */
+#define OCFS2_HAS_COMPAT_FEATURE(sb,mask) \
+ ( OCFS2_SB(sb)->s_feature_compat & (mask) )
+#define OCFS2_HAS_RO_COMPAT_FEATURE(sb,mask) \
+ ( OCFS2_SB(sb)->s_feature_ro_compat & (mask) )
+#define OCFS2_HAS_INCOMPAT_FEATURE(sb,mask) \
+ ( OCFS2_SB(sb)->s_feature_incompat & (mask) )
+#define OCFS2_SET_COMPAT_FEATURE(sb,mask) \
+ OCFS2_SB(sb)->s_feature_compat |= (mask)
+#define OCFS2_SET_RO_COMPAT_FEATURE(sb,mask) \
+ OCFS2_SB(sb)->s_feature_ro_compat |= (mask)
+#define OCFS2_SET_INCOMPAT_FEATURE(sb,mask) \
+ OCFS2_SB(sb)->s_feature_incompat |= (mask)
+#define OCFS2_CLEAR_COMPAT_FEATURE(sb,mask) \
+ OCFS2_SB(sb)->s_feature_compat &= ~(mask)
+#define OCFS2_CLEAR_RO_COMPAT_FEATURE(sb,mask) \
+ OCFS2_SB(sb)->s_feature_ro_compat &= ~(mask)
+#define OCFS2_CLEAR_INCOMPAT_FEATURE(sb,mask) \
+ OCFS2_SB(sb)->s_feature_incompat &= ~(mask)
+
+#define OCFS2_FEATURE_COMPAT_SUPP 0
+#define OCFS2_FEATURE_INCOMPAT_SUPP 0
+#define OCFS2_FEATURE_RO_COMPAT_SUPP 0
+
+/*
+ * Heartbeat-only devices are missing journals and other files. The
+ * filesystem driver can't load them, but the library can. Never put
+ * this in OCFS2_FEATURE_INCOMPAT_SUPP, *ever*.
+ */
+#define OCFS2_FEATURE_INCOMPAT_HEARTBEAT_DEV 0x0002
+
+
+/*
+ * Flags on ocfs2_dinode.i_flags
+ */
+#define OCFS2_VALID_FL (0x00000001) /* Inode is valid */
+#define OCFS2_UNUSED2_FL (0x00000002)
+#define OCFS2_ORPHANED_FL (0x00000004) /* On the orphan list */
+#define OCFS2_UNUSED3_FL (0x00000008)
+/* System inode flags */
+#define OCFS2_SYSTEM_FL (0x00000010) /* System inode */
+#define OCFS2_SUPER_BLOCK_FL (0x00000020) /* Super block */
+#define OCFS2_LOCAL_ALLOC_FL (0x00000040) /* Slot local alloc bitmap */
+#define OCFS2_BITMAP_FL (0x00000080) /* Allocation bitmap */
+#define OCFS2_JOURNAL_FL (0x00000100) /* Slot local journal */
+#define OCFS2_HEARTBEAT_FL (0x00000200) /* Heartbeat area */
+#define OCFS2_CHAIN_FL (0x00000400) /* Chain allocator */
+#define OCFS2_DEALLOC_FL (0x00000800) /* Truncate log */
+
+/*
+ * Journal Flags (ocfs2_dinode.id1.journal1.i_flags)
+ */
+#define OCFS2_JOURNAL_DIRTY_FL (0x00000001) /* Journal needs recovery */
+
+/*
+ * superblock s_state flags
+ */
+#define OCFS2_ERROR_FS (0x00000001) /* FS saw errors */
+
+/* Limit of space in ocfs2_dir_entry */
+#define OCFS2_MAX_FILENAME_LEN 255
+
+/* Maximum slots on an ocfs2 file system */
+#define OCFS2_MAX_SLOTS 255
+
+/* Slot map indicator for an empty slot */
+#define OCFS2_INVALID_SLOT -1
+
+#define OCFS2_VOL_UUID_LEN 16
+#define OCFS2_MAX_VOL_LABEL_LEN 64
+
+/* Journal limits (in bytes) */
+#define OCFS2_MIN_JOURNAL_SIZE (4 * 1024 * 1024)
+#define OCFS2_MAX_JOURNAL_SIZE (500 * 1024 * 1024)
+
+struct ocfs2_system_inode_info {
+ char *si_name;
+ int si_iflags;
+ int si_mode;
+};
+
+/* System file index */
+enum {
+ BAD_BLOCK_SYSTEM_INODE = 0,
+ GLOBAL_INODE_ALLOC_SYSTEM_INODE,
+ SLOT_MAP_SYSTEM_INODE,
+#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE
+ HEARTBEAT_SYSTEM_INODE,
+ GLOBAL_BITMAP_SYSTEM_INODE,
+#define OCFS2_LAST_GLOBAL_SYSTEM_INODE GLOBAL_BITMAP_SYSTEM_INODE
+ ORPHAN_DIR_SYSTEM_INODE,
+ EXTENT_ALLOC_SYSTEM_INODE,
+ INODE_ALLOC_SYSTEM_INODE,
+ JOURNAL_SYSTEM_INODE,
+ LOCAL_ALLOC_SYSTEM_INODE,
+ TRUNCATE_LOG_SYSTEM_INODE,
+ NUM_SYSTEM_INODES
+};
+
+static struct ocfs2_system_inode_info ocfs2_system_inodes[NUM_SYSTEM_INODES] = {
+ /* Global system inodes (single copy) */
+ /* The first two are only used from userspace mfks/tunefs */
+ [BAD_BLOCK_SYSTEM_INODE] = { "bad_blocks", 0, S_IFREG | 0644 },
+ [GLOBAL_INODE_ALLOC_SYSTEM_INODE] = { "global_inode_alloc", OCFS2_BITMAP_FL | OCFS2_CHAIN_FL, S_IFREG | 0644 },
+
+ /* These are used by the running filesystem */
+ [SLOT_MAP_SYSTEM_INODE] = { "slot_map", 0, S_IFREG | 0644 },
+ [HEARTBEAT_SYSTEM_INODE] = { "heartbeat", OCFS2_HEARTBEAT_FL, S_IFREG | 0644 },
+ [GLOBAL_BITMAP_SYSTEM_INODE] = { "global_bitmap", 0, S_IFREG | 0644 },
+
+ /* Slot-specific system inodes (one copy per slot) */
+ [ORPHAN_DIR_SYSTEM_INODE] = { "orphan_dir:%04d", 0, S_IFDIR | 0755 },
+ [EXTENT_ALLOC_SYSTEM_INODE] = { "extent_alloc:%04d", OCFS2_BITMAP_FL | OCFS2_CHAIN_FL, S_IFREG | 0644 },
+ [INODE_ALLOC_SYSTEM_INODE] = { "inode_alloc:%04d", OCFS2_BITMAP_FL | OCFS2_CHAIN_FL, S_IFREG | 0644 },
+ [JOURNAL_SYSTEM_INODE] = { "journal:%04d", OCFS2_JOURNAL_FL, S_IFREG | 0644 },
+ [LOCAL_ALLOC_SYSTEM_INODE] = { "local_alloc:%04d", OCFS2_BITMAP_FL | OCFS2_LOCAL_ALLOC_FL, S_IFREG | 0644 },
+ [TRUNCATE_LOG_SYSTEM_INODE] = { "truncate_log:%04d", OCFS2_DEALLOC_FL, S_IFREG | 0644 }
+};
+
+/* Parameter passed from mount.ocfs2 to module */
+#define OCFS2_HB_NONE "heartbeat=none"
+#define OCFS2_HB_LOCAL "heartbeat=local"
+
+/*
+ * OCFS2 directory file types. Only the low 3 bits are used. The
+ * other bits are reserved for now.
+ */
+#define OCFS2_FT_UNKNOWN 0
+#define OCFS2_FT_REG_FILE 1
+#define OCFS2_FT_DIR 2
+#define OCFS2_FT_CHRDEV 3
+#define OCFS2_FT_BLKDEV 4
+#define OCFS2_FT_FIFO 5
+#define OCFS2_FT_SOCK 6
+#define OCFS2_FT_SYMLINK 7
+
+#define OCFS2_FT_MAX 8
+
+/*
+ * OCFS2_DIR_PAD defines the directory entries boundaries
+ *
+ * NOTE: It must be a multiple of 4
+ */
+#define OCFS2_DIR_PAD 4
+#define OCFS2_DIR_ROUND (OCFS2_DIR_PAD - 1)
+#define OCFS2_DIR_MEMBER_LEN offsetof(struct ocfs2_dir_entry, name)
+#define OCFS2_DIR_REC_LEN(name_len) (((name_len) + OCFS2_DIR_MEMBER_LEN + \
+ OCFS2_DIR_ROUND) & \
+ ~OCFS2_DIR_ROUND)
+
+#define OCFS2_LINK_MAX 32000
+
+#define S_SHIFT 12
+static unsigned char ocfs2_type_by_mode[S_IFMT >> S_SHIFT] = {
+ [S_IFREG >> S_SHIFT] = OCFS2_FT_REG_FILE,
+ [S_IFDIR >> S_SHIFT] = OCFS2_FT_DIR,
+ [S_IFCHR >> S_SHIFT] = OCFS2_FT_CHRDEV,
+ [S_IFBLK >> S_SHIFT] = OCFS2_FT_BLKDEV,
+ [S_IFIFO >> S_SHIFT] = OCFS2_FT_FIFO,
+ [S_IFSOCK >> S_SHIFT] = OCFS2_FT_SOCK,
+ [S_IFLNK >> S_SHIFT] = OCFS2_FT_SYMLINK,
+};
+
+
+/*
+ * Convenience casts
+ */
+#define OCFS2_RAW_SB(dinode) (&((dinode)->id2.i_super))
+
+/*
+ * On disk extent record for OCFS2
+ * It describes a range of clusters on disk.
+ */
+struct ocfs2_extent_rec {
+/*00*/ __le32 e_cpos; /* Offset into the file, in clusters */
+ __le32 e_clusters; /* Clusters covered by this extent */
+ __le64 e_blkno; /* Physical disk offset, in blocks */
+/*10*/
+};
+
+struct ocfs2_chain_rec {
+ __le32 c_free; /* Number of free bits in this chain. */
+ __le32 c_total; /* Number of total bits in this chain */
+ __le64 c_blkno; /* Physical disk offset (blocks) of 1st group */
+};
+
+struct ocfs2_truncate_rec {
+ __le32 t_start; /* 1st cluster in this log */
+ __le32 t_clusters; /* Number of total clusters covered */
+};
+
+/*
+ * On disk extent list for OCFS2 (node in the tree). Note that this
+ * is contained inside ocfs2_dinode or ocfs2_extent_block, so the
+ * offsets are relative to ocfs2_dinode.id2.i_list or
+ * ocfs2_extent_block.h_list, respectively.
+ */
+struct ocfs2_extent_list {
+/*00*/ __le16 l_tree_depth; /* Extent tree depth from this
+ point. 0 means data extents
+ hang directly off this
+ header (a leaf) */
+ __le16 l_count; /* Number of extent records */
+ __le16 l_next_free_rec; /* Next unused extent slot */
+ __le16 l_reserved1;
+ __le64 l_reserved2; /* Pad to
+ sizeof(ocfs2_extent_rec) */
+/*10*/ struct ocfs2_extent_rec l_recs[0]; /* Extent records */
+};
+
+/*
+ * On disk allocation chain list for OCFS2. Note that this is
+ * contained inside ocfs2_dinode, so the offsets are relative to
+ * ocfs2_dinode.id2.i_chain.
+ */
+struct ocfs2_chain_list {
+/*00*/ __le16 cl_cpg; /* Clusters per Block Group */
+ __le16 cl_bpc; /* Bits per cluster */
+ __le16 cl_count; /* Total chains in this list */
+ __le16 cl_next_free_rec; /* Next unused chain slot */
+ __le64 cl_reserved1;
+/*10*/ struct ocfs2_chain_rec cl_recs[0]; /* Chain records */
+};
+
+/*
+ * On disk deallocation log for OCFS2. Note that this is
+ * contained inside ocfs2_dinode, so the offsets are relative to
+ * ocfs2_dinode.id2.i_dealloc.
+ */
+struct ocfs2_truncate_log {
+/*00*/ __le16 tl_count; /* Total records in this log */
+ __le16 tl_used; /* Number of records in use */
+ __le32 tl_reserved1;
+/*08*/ struct ocfs2_truncate_rec tl_recs[0]; /* Truncate records */
+};
+
+/*
+ * On disk extent block (indirect block) for OCFS2
+ */
+struct ocfs2_extent_block
+{
+/*00*/ __u8 h_signature[8]; /* Signature for verification */
+ __le64 h_reserved1;
+/*10*/ __le16 h_suballoc_slot; /* Slot suballocator this
+ extent_header belongs to */
+ __le16 h_suballoc_bit; /* Bit offset in suballocator
+ block group */
+ __le32 h_fs_generation; /* Must match super block */
+ __le64 h_blkno; /* Offset on disk, in blocks */
+/*20*/ __le64 h_reserved3;
+ __le64 h_next_leaf_blk; /* Offset on disk, in blocks,
+ of next leaf header pointing
+ to data */
+/*30*/ struct ocfs2_extent_list h_list; /* Extent record list */
+/* Actual on-disk size is one block */
+};
+
+/*
+ * On disk superblock for OCFS2
+ * Note that it is contained inside an ocfs2_dinode, so all offsets
+ * are relative to the start of ocfs2_dinode.id2.
+ */
+struct ocfs2_super_block {
+/*00*/ __le16 s_major_rev_level;
+ __le16 s_minor_rev_level;
+ __le16 s_mnt_count;
+ __le16 s_max_mnt_count;
+ __le16 s_state; /* File system state */
+ __le16 s_errors; /* Behaviour when detecting errors */
+ __le32 s_checkinterval; /* Max time between checks */
+/*10*/ __le64 s_lastcheck; /* Time of last check */
+ __le32 s_creator_os; /* OS */
+ __le32 s_feature_compat; /* Compatible feature set */
+/*20*/ __le32 s_feature_incompat; /* Incompatible feature set */
+ __le32 s_feature_ro_compat; /* Readonly-compatible feature set */
+ __le64 s_root_blkno; /* Offset, in blocks, of root directory
+ dinode */
+/*30*/ __le64 s_system_dir_blkno; /* Offset, in blocks, of system
+ directory dinode */
+ __le32 s_blocksize_bits; /* Blocksize for this fs */
+ __le32 s_clustersize_bits; /* Clustersize for this fs */
+/*40*/ __le16 s_max_slots; /* Max number of simultaneous mounts
+ before tunefs required */
+ __le16 s_reserved1;
+ __le32 s_reserved2;
+ __le64 s_first_cluster_group; /* Block offset of 1st cluster
+ * group header */
+/*50*/ __u8 s_label[OCFS2_MAX_VOL_LABEL_LEN]; /* Label for mounting, etc. */
+/*90*/ __u8 s_uuid[OCFS2_VOL_UUID_LEN]; /* 128-bit uuid */
+/*A0*/
+};
+
+/*
+ * Local allocation bitmap for OCFS2 slots
+ * Note that it exists inside an ocfs2_dinode, so all offsets are
+ * relative to the start of ocfs2_dinode.id2.
+ */
+struct ocfs2_local_alloc
+{
+/*00*/ __le32 la_bm_off; /* Starting bit offset in main bitmap */
+ __le16 la_size; /* Size of included bitmap, in bytes */
+ __le16 la_reserved1;
+ __le64 la_reserved2;
+/*10*/ __u8 la_bitmap[0];
+};
+
+/*
+ * On disk inode for OCFS2
+ */
+struct ocfs2_dinode {
+/*00*/ __u8 i_signature[8]; /* Signature for validation */
+ __le32 i_generation; /* Generation number */
+ __le16 i_suballoc_slot; /* Slot suballocator this inode
+ belongs to */
+ __le16 i_suballoc_bit; /* Bit offset in suballocator
+ block group */
+/*10*/ __le32 i_reserved0;
+ __le32 i_clusters; /* Cluster count */
+ __le32 i_uid; /* Owner UID */
+ __le32 i_gid; /* Owning GID */
+/*20*/ __le64 i_size; /* Size in bytes */
+ __le16 i_mode; /* File mode */
+ __le16 i_links_count; /* Links count */
+ __le32 i_flags; /* File flags */
+/*30*/ __le64 i_atime; /* Access time */
+ __le64 i_ctime; /* Creation time */
+/*40*/ __le64 i_mtime; /* Modification time */
+ __le64 i_dtime; /* Deletion time */
+/*50*/ __le64 i_blkno; /* Offset on disk, in blocks */
+ __le64 i_last_eb_blk; /* Pointer to last extent
+ block */
+/*60*/ __le32 i_fs_generation; /* Generation per fs-instance */
+ __le32 i_atime_nsec;
+ __le32 i_ctime_nsec;
+ __le32 i_mtime_nsec;
+/*70*/ __le64 i_reserved1[9];
+/*B8*/ union {
+ __le64 i_pad1; /* Generic way to refer to this
+ 64bit union */
+ struct {
+ __le64 i_rdev; /* Device number */
+ } dev1;
+ struct { /* Info for bitmap system
+ inodes */
+ __le32 i_used; /* Bits (ie, clusters) used */
+ __le32 i_total; /* Total bits (clusters)
+ available */
+ } bitmap1;
+ struct { /* Info for journal system
+ inodes */
+ __le32 ij_flags; /* Mounted, version, etc. */
+ __le32 ij_pad;
+ } journal1;
+ } id1; /* Inode type dependant 1 */
+/*C0*/ union {
+ struct ocfs2_super_block i_super;
+ struct ocfs2_local_alloc i_lab;
+ struct ocfs2_chain_list i_chain;
+ struct ocfs2_extent_list i_list;
+ struct ocfs2_truncate_log i_dealloc;
+ __u8 i_symlink[0];
+ } id2;
+/* Actual on-disk size is one block */
+};
+
+/*
+ * On-disk directory entry structure for OCFS2
+ *
+ * Packed as this structure could be accessed unaligned on 64-bit platforms
+ */
+struct ocfs2_dir_entry {
+/*00*/ __le64 inode; /* Inode number */
+ __le16 rec_len; /* Directory entry length */
+ __u8 name_len; /* Name length */
+ __u8 file_type;
+/*0C*/ char name[OCFS2_MAX_FILENAME_LEN]; /* File name */
+/* Actual on-disk length specified by rec_len */
+} __attribute__ ((packed));
+
+/*
+ * On disk allocator group structure for OCFS2
+ */
+struct ocfs2_group_desc
+{
+/*00*/ __u8 bg_signature[8]; /* Signature for validation */
+ __le16 bg_size; /* Size of included bitmap in
+ bytes. */
+ __le16 bg_bits; /* Bits represented by this
+ group. */
+ __le16 bg_free_bits_count; /* Free bits count */
+ __le16 bg_chain; /* What chain I am in. */
+/*10*/ __le32 bg_generation;
+ __le32 bg_reserved1;
+ __le64 bg_next_group; /* Next group in my list, in
+ blocks */
+/*20*/ __le64 bg_parent_dinode; /* dinode which owns me, in
+ blocks */
+ __le64 bg_blkno; /* Offset on disk, in blocks */
+/*30*/ __le64 bg_reserved2[2];
+/*40*/ __u8 bg_bitmap[0];
+};
+
+#ifdef __KERNEL__
+static inline int ocfs2_fast_symlink_chars(struct super_block *sb)
+{
+ return sb->s_blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_symlink);
+}
+
+static inline int ocfs2_extent_recs_per_inode(struct super_block *sb)
+{
+ int size;
+
+ size = sb->s_blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_list.l_recs);
+
+ return size / sizeof(struct ocfs2_extent_rec);
+}
+
+static inline int ocfs2_chain_recs_per_inode(struct super_block *sb)
+{
+ int size;
+
+ size = sb->s_blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_chain.cl_recs);
+
+ return size / sizeof(struct ocfs2_chain_rec);
+}
+
+static inline u16 ocfs2_extent_recs_per_eb(struct super_block *sb)
+{
+ int size;
+
+ size = sb->s_blocksize -
+ offsetof(struct ocfs2_extent_block, h_list.l_recs);
+
+ return size / sizeof(struct ocfs2_extent_rec);
+}
+
+static inline u16 ocfs2_local_alloc_size(struct super_block *sb)
+{
+ u16 size;
+
+ size = sb->s_blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_lab.la_bitmap);
+
+ return size;
+}
+
+static inline int ocfs2_group_bitmap_size(struct super_block *sb)
+{
+ int size;
+
+ size = sb->s_blocksize -
+ offsetof(struct ocfs2_group_desc, bg_bitmap);
+
+ return size;
+}
+
+static inline int ocfs2_truncate_recs_per_inode(struct super_block *sb)
+{
+ int size;
+
+ size = sb->s_blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_dealloc.tl_recs);
+
+ return size / sizeof(struct ocfs2_truncate_rec);
+}
+#else
+static inline int ocfs2_fast_symlink_chars(int blocksize)
+{
+ return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
+}
+
+static inline int ocfs2_extent_recs_per_inode(int blocksize)
+{
+ int size;
+
+ size = blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_list.l_recs);
+
+ return size / sizeof(struct ocfs2_extent_rec);
+}
+
+static inline int ocfs2_chain_recs_per_inode(int blocksize)
+{
+ int size;
+
+ size = blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_chain.cl_recs);
+
+ return size / sizeof(struct ocfs2_chain_rec);
+}
+
+static inline int ocfs2_extent_recs_per_eb(int blocksize)
+{
+ int size;
+
+ size = blocksize -
+ offsetof(struct ocfs2_extent_block, h_list.l_recs);
+
+ return size / sizeof(struct ocfs2_extent_rec);
+}
+
+static inline int ocfs2_local_alloc_size(int blocksize)
+{
+ int size;
+
+ size = blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_lab.la_bitmap);
+
+ return size;
+}
+
+static inline int ocfs2_group_bitmap_size(int blocksize)
+{
+ int size;
+
+ size = blocksize -
+ offsetof(struct ocfs2_group_desc, bg_bitmap);
+
+ return size;
+}
+
+static inline int ocfs2_truncate_recs_per_inode(int blocksize)
+{
+ int size;
+
+ size = blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_dealloc.tl_recs);
+
+ return size / sizeof(struct ocfs2_truncate_rec);
+}
+#endif /* __KERNEL__ */
+
+
+static inline int ocfs2_system_inode_is_global(int type)
+{
+ return ((type >= 0) &&
+ (type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE));
+}
+
+static inline int ocfs2_sprintf_system_inode_name(char *buf, int len,
+ int type, int slot)
+{
+ int chars;
+
+ /*
+ * Global system inodes can only have one copy. Everything
+ * after OCFS2_LAST_GLOBAL_SYSTEM_INODE in the system inode
+ * list has a copy per slot.
+ */
+ if (type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE)
+ chars = snprintf(buf, len,
+ ocfs2_system_inodes[type].si_name);
+ else
+ chars = snprintf(buf, len,
+ ocfs2_system_inodes[type].si_name,
+ slot);
+
+ return chars;
+}
+
+static inline void ocfs2_set_de_type(struct ocfs2_dir_entry *de,
+ umode_t mode)
+{
+ de->file_type = ocfs2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
+}
+
+#endif /* _OCFS2_FS_H */
+
diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h
new file mode 100644
index 00000000000..7dd9e1e705b
--- /dev/null
+++ b/fs/ocfs2/ocfs2_lockid.h
@@ -0,0 +1,73 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ocfs2_lockid.h
+ *
+ * Defines OCFS2 lockid bits.
+ *
+ * Copyright (C) 2002, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_LOCKID_H
+#define OCFS2_LOCKID_H
+
+/* lock ids are made up in the following manner:
+ * name[0] --> type
+ * name[1-6] --> 6 pad characters, reserved for now
+ * name[7-22] --> block number, expressed in hex as 16 chars
+ * name[23-30] --> i_generation, expressed in hex 8 chars
+ * name[31] --> '\0' */
+#define OCFS2_LOCK_ID_MAX_LEN 32
+#define OCFS2_LOCK_ID_PAD "000000"
+
+enum ocfs2_lock_type {
+ OCFS2_LOCK_TYPE_META = 0,
+ OCFS2_LOCK_TYPE_DATA,
+ OCFS2_LOCK_TYPE_SUPER,
+ OCFS2_LOCK_TYPE_RENAME,
+ OCFS2_LOCK_TYPE_RW,
+ OCFS2_NUM_LOCK_TYPES
+};
+
+static inline char ocfs2_lock_type_char(enum ocfs2_lock_type type)
+{
+ char c;
+ switch (type) {
+ case OCFS2_LOCK_TYPE_META:
+ c = 'M';
+ break;
+ case OCFS2_LOCK_TYPE_DATA:
+ c = 'D';
+ break;
+ case OCFS2_LOCK_TYPE_SUPER:
+ c = 'S';
+ break;
+ case OCFS2_LOCK_TYPE_RENAME:
+ c = 'R';
+ break;
+ case OCFS2_LOCK_TYPE_RW:
+ c = 'W';
+ break;
+ default:
+ c = '\0';
+ }
+
+ return c;
+}
+
+#endif /* OCFS2_LOCKID_H */
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
new file mode 100644
index 00000000000..871627961d6
--- /dev/null
+++ b/fs/ocfs2/slot_map.c
@@ -0,0 +1,303 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * slot_map.c
+ *
+ *
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/smp_lock.h>
+
+#define MLOG_MASK_PREFIX ML_SUPER
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "heartbeat.h"
+#include "inode.h"
+#include "slot_map.h"
+#include "super.h"
+#include "sysfile.h"
+
+#include "buffer_head_io.h"
+
+static s16 __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
+ s16 global);
+static void __ocfs2_fill_slot(struct ocfs2_slot_info *si,
+ s16 slot_num,
+ s16 node_num);
+
+/* Use the slot information we've collected to create a map of mounted
+ * nodes. Should be holding an EX on super block. assumes slot info is
+ * up to date. Note that we call this *after* we find a slot, so our
+ * own node should be set in the map too... */
+void ocfs2_populate_mounted_map(struct ocfs2_super *osb)
+{
+ int i;
+ struct ocfs2_slot_info *si = osb->slot_info;
+
+ spin_lock(&si->si_lock);
+
+ for (i = 0; i < si->si_size; i++)
+ if (si->si_global_node_nums[i] != OCFS2_INVALID_SLOT)
+ ocfs2_node_map_set_bit(osb, &osb->mounted_map,
+ si->si_global_node_nums[i]);
+
+ spin_unlock(&si->si_lock);
+}
+
+/* post the slot information on disk into our slot_info struct. */
+void ocfs2_update_slot_info(struct ocfs2_slot_info *si)
+{
+ int i;
+ __le16 *disk_info;
+
+ /* we don't read the slot block here as ocfs2_super_lock
+ * should've made sure we have the most recent copy. */
+ spin_lock(&si->si_lock);
+ disk_info = (__le16 *) si->si_bh->b_data;
+
+ for (i = 0; i < si->si_size; i++)
+ si->si_global_node_nums[i] = le16_to_cpu(disk_info[i]);
+
+ spin_unlock(&si->si_lock);
+}
+
+/* post the our slot info stuff into it's destination bh and write it
+ * out. */
+int ocfs2_update_disk_slots(struct ocfs2_super *osb,
+ struct ocfs2_slot_info *si)
+{
+ int status, i;
+ __le16 *disk_info = (__le16 *) si->si_bh->b_data;
+
+ spin_lock(&si->si_lock);
+ for (i = 0; i < si->si_size; i++)
+ disk_info[i] = cpu_to_le16(si->si_global_node_nums[i]);
+ spin_unlock(&si->si_lock);
+
+ status = ocfs2_write_block(osb, si->si_bh, si->si_inode);
+ if (status < 0)
+ mlog_errno(status);
+
+ return status;
+}
+
+/* try to find global node in the slot info. Returns
+ * OCFS2_INVALID_SLOT if nothing is found. */
+static s16 __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
+ s16 global)
+{
+ int i;
+ s16 ret = OCFS2_INVALID_SLOT;
+
+ for(i = 0; i < si->si_num_slots; i++) {
+ if (global == si->si_global_node_nums[i]) {
+ ret = (s16) i;
+ break;
+ }
+ }
+ return ret;
+}
+
+static s16 __ocfs2_find_empty_slot(struct ocfs2_slot_info *si)
+{
+ int i;
+ s16 ret = OCFS2_INVALID_SLOT;
+
+ for(i = 0; i < si->si_num_slots; i++) {
+ if (OCFS2_INVALID_SLOT == si->si_global_node_nums[i]) {
+ ret = (s16) i;
+ break;
+ }
+ }
+ return ret;
+}
+
+s16 ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
+ s16 global)
+{
+ s16 ret;
+
+ spin_lock(&si->si_lock);
+ ret = __ocfs2_node_num_to_slot(si, global);
+ spin_unlock(&si->si_lock);
+ return ret;
+}
+
+static void __ocfs2_fill_slot(struct ocfs2_slot_info *si,
+ s16 slot_num,
+ s16 node_num)
+{
+ BUG_ON(slot_num == OCFS2_INVALID_SLOT);
+ BUG_ON(slot_num >= si->si_num_slots);
+ BUG_ON((node_num != O2NM_INVALID_NODE_NUM) &&
+ (node_num >= O2NM_MAX_NODES));
+
+ si->si_global_node_nums[slot_num] = node_num;
+}
+
+void ocfs2_clear_slot(struct ocfs2_slot_info *si,
+ s16 slot_num)
+{
+ spin_lock(&si->si_lock);
+ __ocfs2_fill_slot(si, slot_num, OCFS2_INVALID_SLOT);
+ spin_unlock(&si->si_lock);
+}
+
+int ocfs2_init_slot_info(struct ocfs2_super *osb)
+{
+ int status, i;
+ u64 blkno;
+ struct inode *inode = NULL;
+ struct buffer_head *bh = NULL;
+ struct ocfs2_slot_info *si;
+
+ si = kcalloc(1, sizeof(struct ocfs2_slot_info), GFP_KERNEL);
+ if (!si) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ spin_lock_init(&si->si_lock);
+ si->si_num_slots = osb->max_slots;
+ si->si_size = OCFS2_MAX_SLOTS;
+
+ for(i = 0; i < si->si_num_slots; i++)
+ si->si_global_node_nums[i] = OCFS2_INVALID_SLOT;
+
+ inode = ocfs2_get_system_file_inode(osb, SLOT_MAP_SYSTEM_INODE,
+ OCFS2_INVALID_SLOT);
+ if (!inode) {
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_extent_map_get_blocks(inode, 0ULL, 1, &blkno, NULL);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_read_block(osb, blkno, &bh, 0, inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ si->si_inode = inode;
+ si->si_bh = bh;
+ osb->slot_info = si;
+bail:
+ if (status < 0 && si)
+ ocfs2_free_slot_info(si);
+
+ return status;
+}
+
+void ocfs2_free_slot_info(struct ocfs2_slot_info *si)
+{
+ if (si->si_inode)
+ iput(si->si_inode);
+ if (si->si_bh)
+ brelse(si->si_bh);
+ kfree(si);
+}
+
+int ocfs2_find_slot(struct ocfs2_super *osb)
+{
+ int status;
+ s16 slot;
+ struct ocfs2_slot_info *si;
+
+ mlog_entry_void();
+
+ si = osb->slot_info;
+
+ ocfs2_update_slot_info(si);
+
+ spin_lock(&si->si_lock);
+ /* search for ourselves first and take the slot if it already
+ * exists. Perhaps we need to mark this in a variable for our
+ * own journal recovery? Possibly not, though we certainly
+ * need to warn to the user */
+ slot = __ocfs2_node_num_to_slot(si, osb->node_num);
+ if (slot == OCFS2_INVALID_SLOT) {
+ /* if no slot yet, then just take 1st available
+ * one. */
+ slot = __ocfs2_find_empty_slot(si);
+ if (slot == OCFS2_INVALID_SLOT) {
+ spin_unlock(&si->si_lock);
+ mlog(ML_ERROR, "no free slots available!\n");
+ status = -EINVAL;
+ goto bail;
+ }
+ } else
+ mlog(ML_NOTICE, "slot %d is already allocated to this node!\n",
+ slot);
+
+ __ocfs2_fill_slot(si, slot, osb->node_num);
+ osb->slot_num = slot;
+ spin_unlock(&si->si_lock);
+
+ mlog(ML_NOTICE, "taking node slot %d\n", osb->slot_num);
+
+ status = ocfs2_update_disk_slots(osb, si);
+ if (status < 0)
+ mlog_errno(status);
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+void ocfs2_put_slot(struct ocfs2_super *osb)
+{
+ int status;
+ struct ocfs2_slot_info *si = osb->slot_info;
+
+ if (!si)
+ return;
+
+ ocfs2_update_slot_info(si);
+
+ spin_lock(&si->si_lock);
+ __ocfs2_fill_slot(si, osb->slot_num, OCFS2_INVALID_SLOT);
+ osb->slot_num = OCFS2_INVALID_SLOT;
+ spin_unlock(&si->si_lock);
+
+ status = ocfs2_update_disk_slots(osb, si);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+bail:
+ osb->slot_info = NULL;
+ ocfs2_free_slot_info(si);
+}
+
diff --git a/fs/ocfs2/slot_map.h b/fs/ocfs2/slot_map.h
new file mode 100644
index 00000000000..d8c8ceed031
--- /dev/null
+++ b/fs/ocfs2/slot_map.h
@@ -0,0 +1,66 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * slotmap.h
+ *
+ * description here
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+
+#ifndef SLOTMAP_H
+#define SLOTMAP_H
+
+struct ocfs2_slot_info {
+ spinlock_t si_lock;
+
+ struct inode *si_inode;
+ struct buffer_head *si_bh;
+ unsigned int si_num_slots;
+ unsigned int si_size;
+ s16 si_global_node_nums[OCFS2_MAX_SLOTS];
+};
+
+int ocfs2_init_slot_info(struct ocfs2_super *osb);
+void ocfs2_free_slot_info(struct ocfs2_slot_info *si);
+
+int ocfs2_find_slot(struct ocfs2_super *osb);
+void ocfs2_put_slot(struct ocfs2_super *osb);
+
+void ocfs2_update_slot_info(struct ocfs2_slot_info *si);
+int ocfs2_update_disk_slots(struct ocfs2_super *osb,
+ struct ocfs2_slot_info *si);
+
+s16 ocfs2_node_num_to_slot(struct ocfs2_slot_info *si,
+ s16 global);
+void ocfs2_clear_slot(struct ocfs2_slot_info *si,
+ s16 slot_num);
+
+void ocfs2_populate_mounted_map(struct ocfs2_super *osb);
+
+static inline int ocfs2_is_empty_slot(struct ocfs2_slot_info *si,
+ int slot_num)
+{
+ BUG_ON(slot_num == OCFS2_INVALID_SLOT);
+ assert_spin_locked(&si->si_lock);
+
+ return si->si_global_node_nums[slot_num] == OCFS2_INVALID_SLOT;
+}
+
+#endif
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
new file mode 100644
index 00000000000..c46c164aefb
--- /dev/null
+++ b/fs/ocfs2/suballoc.c
@@ -0,0 +1,1651 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * suballoc.c
+ *
+ * metadata alloc and free
+ * Inspired by ext3 block groups.
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+
+#define MLOG_MASK_PREFIX ML_DISK_ALLOC
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "dlmglue.h"
+#include "inode.h"
+#include "journal.h"
+#include "localalloc.h"
+#include "suballoc.h"
+#include "super.h"
+#include "sysfile.h"
+#include "uptodate.h"
+
+#include "buffer_head_io.h"
+
+static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
+static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
+static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
+static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle,
+ struct inode *alloc_inode,
+ struct buffer_head *bg_bh,
+ u64 group_blkno,
+ u16 my_chain,
+ struct ocfs2_chain_list *cl);
+static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
+ struct inode *alloc_inode,
+ struct buffer_head *bh);
+
+static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac);
+
+static int ocfs2_cluster_group_search(struct inode *inode,
+ struct buffer_head *group_bh,
+ u32 bits_wanted, u32 min_bits,
+ u16 *bit_off, u16 *bits_found);
+static int ocfs2_block_group_search(struct inode *inode,
+ struct buffer_head *group_bh,
+ u32 bits_wanted, u32 min_bits,
+ u16 *bit_off, u16 *bits_found);
+static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
+ u32 bits_wanted,
+ u32 min_bits,
+ u16 *bit_off,
+ unsigned int *num_bits,
+ u64 *bg_blkno);
+static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac,
+ u32 bits_wanted,
+ u32 min_bits,
+ u16 *bit_off,
+ unsigned int *num_bits,
+ u64 *bg_blkno);
+static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
+ int nr);
+static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
+ struct buffer_head *bg_bh,
+ unsigned int bits_wanted,
+ u16 *bit_off,
+ u16 *bits_found);
+static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle,
+ struct inode *alloc_inode,
+ struct ocfs2_group_desc *bg,
+ struct buffer_head *group_bh,
+ unsigned int bit_off,
+ unsigned int num_bits);
+static inline int ocfs2_block_group_clear_bits(struct ocfs2_journal_handle *handle,
+ struct inode *alloc_inode,
+ struct ocfs2_group_desc *bg,
+ struct buffer_head *group_bh,
+ unsigned int bit_off,
+ unsigned int num_bits);
+
+static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle,
+ struct inode *alloc_inode,
+ struct buffer_head *fe_bh,
+ struct buffer_head *bg_bh,
+ struct buffer_head *prev_bg_bh,
+ u16 chain);
+static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
+ u32 wanted);
+static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle,
+ struct inode *alloc_inode,
+ struct buffer_head *alloc_bh,
+ unsigned int start_bit,
+ u64 bg_blkno,
+ unsigned int count);
+static inline u64 ocfs2_which_suballoc_group(u64 block,
+ unsigned int bit);
+static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
+ u64 bg_blkno,
+ u16 bg_bit_off);
+static inline u64 ocfs2_which_cluster_group(struct inode *inode,
+ u32 cluster);
+static inline void ocfs2_block_to_cluster_group(struct inode *inode,
+ u64 data_blkno,
+ u64 *bg_blkno,
+ u16 *bg_bit_off);
+
+void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
+{
+ if (ac->ac_inode)
+ iput(ac->ac_inode);
+ if (ac->ac_bh)
+ brelse(ac->ac_bh);
+ kfree(ac);
+}
+
+static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
+{
+ return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
+}
+
+static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle,
+ struct inode *alloc_inode,
+ struct buffer_head *bg_bh,
+ u64 group_blkno,
+ u16 my_chain,
+ struct ocfs2_chain_list *cl)
+{
+ int status = 0;
+ struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+ struct super_block * sb = alloc_inode->i_sb;
+
+ mlog_entry_void();
+
+ if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
+ ocfs2_error(alloc_inode->i_sb, "group block (%"MLFu64") "
+ "!= b_blocknr (%llu)", group_blkno,
+ (unsigned long long) bg_bh->b_blocknr);
+ status = -EIO;
+ goto bail;
+ }
+
+ status = ocfs2_journal_access(handle,
+ alloc_inode,
+ bg_bh,
+ OCFS2_JOURNAL_ACCESS_CREATE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ memset(bg, 0, sb->s_blocksize);
+ strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
+ bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
+ bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb));
+ bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
+ bg->bg_chain = cpu_to_le16(my_chain);
+ bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
+ bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
+ bg->bg_blkno = cpu_to_le64(group_blkno);
+ /* set the 1st bit in the bitmap to account for the descriptor block */
+ ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
+ bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
+
+ status = ocfs2_journal_dirty(handle, bg_bh);
+ if (status < 0)
+ mlog_errno(status);
+
+ /* There is no need to zero out or otherwise initialize the
+ * other blocks in a group - All valid FS metadata in a block
+ * group stores the superblock fs_generation value at
+ * allocation time. */
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl)
+{
+ u16 curr, best;
+
+ best = curr = 0;
+ while (curr < le16_to_cpu(cl->cl_count)) {
+ if (le32_to_cpu(cl->cl_recs[best].c_total) >
+ le32_to_cpu(cl->cl_recs[curr].c_total))
+ best = curr;
+ curr++;
+ }
+ return best;
+}
+
+/*
+ * We expect the block group allocator to already be locked.
+ */
+static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
+ struct inode *alloc_inode,
+ struct buffer_head *bh)
+{
+ int status, credits;
+ struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
+ struct ocfs2_chain_list *cl;
+ struct ocfs2_alloc_context *ac = NULL;
+ struct ocfs2_journal_handle *handle = NULL;
+ u32 bit_off, num_bits;
+ u16 alloc_rec;
+ u64 bg_blkno;
+ struct buffer_head *bg_bh = NULL;
+ struct ocfs2_group_desc *bg;
+
+ BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
+
+ mlog_entry_void();
+
+ handle = ocfs2_alloc_handle(osb);
+ if (!handle) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ cl = &fe->id2.i_chain;
+ status = ocfs2_reserve_clusters(osb,
+ handle,
+ le16_to_cpu(cl->cl_cpg),
+ &ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ credits = ocfs2_calc_group_alloc_credits(osb->sb,
+ le16_to_cpu(cl->cl_cpg));
+ handle = ocfs2_start_trans(osb, handle, credits);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ handle = NULL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_claim_clusters(osb,
+ handle,
+ ac,
+ le16_to_cpu(cl->cl_cpg),
+ &bit_off,
+ &num_bits);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ alloc_rec = ocfs2_find_smallest_chain(cl);
+
+ /* setup the group */
+ bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
+ mlog(0, "new descriptor, record %u, at block %"MLFu64"\n",
+ alloc_rec, bg_blkno);
+
+ bg_bh = sb_getblk(osb->sb, bg_blkno);
+ if (!bg_bh) {
+ status = -EIO;
+ mlog_errno(status);
+ goto bail;
+ }
+ ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh);
+
+ status = ocfs2_block_group_fill(handle,
+ alloc_inode,
+ bg_bh,
+ bg_blkno,
+ alloc_rec,
+ cl);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+
+ status = ocfs2_journal_access(handle, alloc_inode,
+ bh, OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
+ le16_to_cpu(bg->bg_free_bits_count));
+ le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits));
+ cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg_blkno);
+ if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
+ le16_add_cpu(&cl->cl_next_free_rec, 1);
+
+ le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
+ le16_to_cpu(bg->bg_free_bits_count));
+ le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
+ le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
+
+ status = ocfs2_journal_dirty(handle, bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
+ OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
+ fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
+ le32_to_cpu(fe->i_clusters)));
+ spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
+ i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
+ alloc_inode->i_blocks =
+ ocfs2_align_bytes_to_sectors(i_size_read(alloc_inode));
+
+ status = 0;
+bail:
+ if (handle)
+ ocfs2_commit_trans(handle);
+
+ if (ac)
+ ocfs2_free_alloc_context(ac);
+
+ if (bg_bh)
+ brelse(bg_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac)
+{
+ int status;
+ u32 bits_wanted = ac->ac_bits_wanted;
+ struct inode *alloc_inode = ac->ac_inode;
+ struct buffer_head *bh = NULL;
+ struct ocfs2_journal_handle *handle = ac->ac_handle;
+ struct ocfs2_dinode *fe;
+ u32 free_bits;
+
+ mlog_entry_void();
+
+ BUG_ON(handle->flags & OCFS2_HANDLE_STARTED);
+
+ ocfs2_handle_add_inode(handle, alloc_inode);
+ status = ocfs2_meta_lock(alloc_inode, handle, &bh, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ fe = (struct ocfs2_dinode *) bh->b_data;
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
+ status = -EIO;
+ goto bail;
+ }
+ if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
+ ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator "
+ "# %"MLFu64, le64_to_cpu(fe->i_blkno));
+ status = -EIO;
+ goto bail;
+ }
+
+ free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) -
+ le32_to_cpu(fe->id1.bitmap1.i_used);
+
+ if (bits_wanted > free_bits) {
+ /* cluster bitmap never grows */
+ if (ocfs2_is_cluster_bitmap(alloc_inode)) {
+ mlog(0, "Disk Full: wanted=%u, free_bits=%u\n",
+ bits_wanted, free_bits);
+ status = -ENOSPC;
+ goto bail;
+ }
+
+ status = ocfs2_block_group_alloc(osb, alloc_inode, bh);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+ atomic_inc(&osb->alloc_stats.bg_extends);
+
+ /* You should never ask for this much metadata */
+ BUG_ON(bits_wanted >
+ (le32_to_cpu(fe->id1.bitmap1.i_total)
+ - le32_to_cpu(fe->id1.bitmap1.i_used)));
+ }
+
+ get_bh(bh);
+ ac->ac_bh = bh;
+bail:
+ if (bh)
+ brelse(bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_dinode *fe,
+ struct ocfs2_alloc_context **ac)
+{
+ int status;
+ struct inode *alloc_inode = NULL;
+
+ *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
+ if (!(*ac)) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ (*ac)->ac_bits_wanted = ocfs2_extend_meta_needed(fe);
+ (*ac)->ac_handle = handle;
+ (*ac)->ac_which = OCFS2_AC_USE_META;
+
+#ifndef OCFS2_USE_ALL_METADATA_SUBALLOCATORS
+ alloc_inode = ocfs2_get_system_file_inode(osb,
+ EXTENT_ALLOC_SYSTEM_INODE,
+ 0);
+#else
+ alloc_inode = ocfs2_get_system_file_inode(osb,
+ EXTENT_ALLOC_SYSTEM_INODE,
+ osb->slot_num);
+#endif
+ if (!alloc_inode) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ (*ac)->ac_inode = igrab(alloc_inode);
+ (*ac)->ac_group_search = ocfs2_block_group_search;
+
+ status = ocfs2_reserve_suballoc_bits(osb, (*ac));
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = 0;
+bail:
+ if ((status < 0) && *ac) {
+ ocfs2_free_alloc_context(*ac);
+ *ac = NULL;
+ }
+
+ if (alloc_inode)
+ iput(alloc_inode);
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context **ac)
+{
+ int status;
+ struct inode *alloc_inode = NULL;
+
+ *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
+ if (!(*ac)) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ (*ac)->ac_bits_wanted = 1;
+ (*ac)->ac_handle = handle;
+ (*ac)->ac_which = OCFS2_AC_USE_INODE;
+
+ alloc_inode = ocfs2_get_system_file_inode(osb,
+ INODE_ALLOC_SYSTEM_INODE,
+ osb->slot_num);
+ if (!alloc_inode) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ (*ac)->ac_inode = igrab(alloc_inode);
+ (*ac)->ac_group_search = ocfs2_block_group_search;
+
+ status = ocfs2_reserve_suballoc_bits(osb, *ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = 0;
+bail:
+ if ((status < 0) && *ac) {
+ ocfs2_free_alloc_context(*ac);
+ *ac = NULL;
+ }
+
+ if (alloc_inode)
+ iput(alloc_inode);
+
+ mlog_exit(status);
+ return status;
+}
+
+/* local alloc code has to do the same thing, so rather than do this
+ * twice.. */
+int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac)
+{
+ int status;
+
+ ac->ac_inode = ocfs2_get_system_file_inode(osb,
+ GLOBAL_BITMAP_SYSTEM_INODE,
+ OCFS2_INVALID_SLOT);
+ if (!ac->ac_inode) {
+ status = -EINVAL;
+ mlog(ML_ERROR, "Could not get bitmap inode!\n");
+ goto bail;
+ }
+ ac->ac_which = OCFS2_AC_USE_MAIN;
+ ac->ac_group_search = ocfs2_cluster_group_search;
+
+ status = ocfs2_reserve_suballoc_bits(osb, ac);
+ if (status < 0 && status != -ENOSPC)
+ mlog_errno(status);
+bail:
+ return status;
+}
+
+/* Callers don't need to care which bitmap (local alloc or main) to
+ * use so we figure it out for them, but unfortunately this clutters
+ * things a bit. */
+int ocfs2_reserve_clusters(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ u32 bits_wanted,
+ struct ocfs2_alloc_context **ac)
+{
+ int status;
+
+ mlog_entry_void();
+
+ BUG_ON(!handle);
+
+ *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
+ if (!(*ac)) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ (*ac)->ac_bits_wanted = bits_wanted;
+ (*ac)->ac_handle = handle;
+
+ status = -ENOSPC;
+ if (ocfs2_alloc_should_use_local(osb, bits_wanted)) {
+ status = ocfs2_reserve_local_alloc_bits(osb,
+ handle,
+ bits_wanted,
+ *ac);
+ if ((status < 0) && (status != -ENOSPC)) {
+ mlog_errno(status);
+ goto bail;
+ } else if (status == -ENOSPC) {
+ /* reserve_local_bits will return enospc with
+ * the local alloc inode still locked, so we
+ * can change this safely here. */
+ mlog(0, "Disabling local alloc\n");
+ /* We set to OCFS2_LA_DISABLED so that umount
+ * can clean up what's left of the local
+ * allocation */
+ osb->local_alloc_state = OCFS2_LA_DISABLED;
+ }
+ }
+
+ if (status == -ENOSPC) {
+ status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ status = 0;
+bail:
+ if ((status < 0) && *ac) {
+ ocfs2_free_alloc_context(*ac);
+ *ac = NULL;
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * More or less lifted from ext3. I'll leave their description below:
+ *
+ * "For ext3 allocations, we must not reuse any blocks which are
+ * allocated in the bitmap buffer's "last committed data" copy. This
+ * prevents deletes from freeing up the page for reuse until we have
+ * committed the delete transaction.
+ *
+ * If we didn't do this, then deleting something and reallocating it as
+ * data would allow the old block to be overwritten before the
+ * transaction committed (because we force data to disk before commit).
+ * This would lead to corruption if we crashed between overwriting the
+ * data and committing the delete.
+ *
+ * @@@ We may want to make this allocation behaviour conditional on
+ * data-writes at some point, and disable it for metadata allocations or
+ * sync-data inodes."
+ *
+ * Note: OCFS2 already does this differently for metadata vs data
+ * allocations, as those bitmaps are seperate and undo access is never
+ * called on a metadata group descriptor.
+ */
+static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
+ int nr)
+{
+ struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+
+ if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
+ return 0;
+ if (!buffer_jbd(bg_bh) || !bh2jh(bg_bh)->b_committed_data)
+ return 1;
+
+ bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
+ return !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
+}
+
+static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
+ struct buffer_head *bg_bh,
+ unsigned int bits_wanted,
+ u16 *bit_off,
+ u16 *bits_found)
+{
+ void *bitmap;
+ u16 best_offset, best_size;
+ int offset, start, found, status = 0;
+ struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+
+ if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
+ OCFS2_RO_ON_INVALID_GROUP_DESC(osb->sb, bg);
+ return -EIO;
+ }
+
+ found = start = best_offset = best_size = 0;
+ bitmap = bg->bg_bitmap;
+
+ while((offset = ocfs2_find_next_zero_bit(bitmap,
+ le16_to_cpu(bg->bg_bits),
+ start)) != -1) {
+ if (offset == le16_to_cpu(bg->bg_bits))
+ break;
+
+ if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
+ /* We found a zero, but we can't use it as it
+ * hasn't been put to disk yet! */
+ found = 0;
+ start = offset + 1;
+ } else if (offset == start) {
+ /* we found a zero */
+ found++;
+ /* move start to the next bit to test */
+ start++;
+ } else {
+ /* got a zero after some ones */
+ found = 1;
+ start = offset + 1;
+ }
+ if (found > best_size) {
+ best_size = found;
+ best_offset = start - found;
+ }
+ /* we got everything we needed */
+ if (found == bits_wanted) {
+ /* mlog(0, "Found it all!\n"); */
+ break;
+ }
+ }
+
+ /* XXX: I think the first clause is equivalent to the second
+ * - jlbec */
+ if (found == bits_wanted) {
+ *bit_off = start - found;
+ *bits_found = found;
+ } else if (best_size) {
+ *bit_off = best_offset;
+ *bits_found = best_size;
+ } else {
+ status = -ENOSPC;
+ /* No error log here -- see the comment above
+ * ocfs2_test_bg_bit_allocatable */
+ }
+
+ return status;
+}
+
+static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle,
+ struct inode *alloc_inode,
+ struct ocfs2_group_desc *bg,
+ struct buffer_head *group_bh,
+ unsigned int bit_off,
+ unsigned int num_bits)
+{
+ int status;
+ void *bitmap = bg->bg_bitmap;
+ int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
+
+ mlog_entry_void();
+
+ if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
+ OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
+ status = -EIO;
+ goto bail;
+ }
+ BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
+
+ mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
+ num_bits);
+
+ if (ocfs2_is_cluster_bitmap(alloc_inode))
+ journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
+
+ status = ocfs2_journal_access(handle,
+ alloc_inode,
+ group_bh,
+ journal_type);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
+
+ while(num_bits--)
+ ocfs2_set_bit(bit_off++, bitmap);
+
+ status = ocfs2_journal_dirty(handle,
+ group_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+/* find the one with the most empty bits */
+static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl)
+{
+ u16 curr, best;
+
+ BUG_ON(!cl->cl_next_free_rec);
+
+ best = curr = 0;
+ while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
+ if (le32_to_cpu(cl->cl_recs[curr].c_free) >
+ le32_to_cpu(cl->cl_recs[best].c_free))
+ best = curr;
+ curr++;
+ }
+
+ BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec));
+ return best;
+}
+
+static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle,
+ struct inode *alloc_inode,
+ struct buffer_head *fe_bh,
+ struct buffer_head *bg_bh,
+ struct buffer_head *prev_bg_bh,
+ u16 chain)
+{
+ int status;
+ /* there is a really tiny chance the journal calls could fail,
+ * but we wouldn't want inconsistent blocks in *any* case. */
+ u64 fe_ptr, bg_ptr, prev_bg_ptr;
+ struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
+ struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
+ struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data;
+
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
+ status = -EIO;
+ goto out;
+ }
+ if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
+ OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
+ status = -EIO;
+ goto out;
+ }
+ if (!OCFS2_IS_VALID_GROUP_DESC(prev_bg)) {
+ OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, prev_bg);
+ status = -EIO;
+ goto out;
+ }
+
+ mlog(0, "In suballoc %"MLFu64", chain %u, move group %"MLFu64" to "
+ "top, prev = %"MLFu64"\n",
+ fe->i_blkno, chain, bg->bg_blkno, prev_bg->bg_blkno);
+
+ fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno);
+ bg_ptr = le64_to_cpu(bg->bg_next_group);
+ prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group);
+
+ status = ocfs2_journal_access(handle, alloc_inode, prev_bg_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_rollback;
+ }
+
+ prev_bg->bg_next_group = bg->bg_next_group;
+
+ status = ocfs2_journal_dirty(handle, prev_bg_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_rollback;
+ }
+
+ status = ocfs2_journal_access(handle, alloc_inode, bg_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_rollback;
+ }
+
+ bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno;
+
+ status = ocfs2_journal_dirty(handle, bg_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_rollback;
+ }
+
+ status = ocfs2_journal_access(handle, alloc_inode, fe_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_rollback;
+ }
+
+ fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno;
+
+ status = ocfs2_journal_dirty(handle, fe_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto out_rollback;
+ }
+
+ status = 0;
+out_rollback:
+ if (status < 0) {
+ fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr);
+ bg->bg_next_group = cpu_to_le64(bg_ptr);
+ prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
+ }
+out:
+ mlog_exit(status);
+ return status;
+}
+
+static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
+ u32 wanted)
+{
+ return le16_to_cpu(bg->bg_free_bits_count) > wanted;
+}
+
+/* return 0 on success, -ENOSPC to keep searching and any other < 0
+ * value on error. */
+static int ocfs2_cluster_group_search(struct inode *inode,
+ struct buffer_head *group_bh,
+ u32 bits_wanted, u32 min_bits,
+ u16 *bit_off, u16 *bits_found)
+{
+ int search = -ENOSPC;
+ int ret;
+ struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data;
+ u16 tmp_off, tmp_found;
+
+ BUG_ON(!ocfs2_is_cluster_bitmap(inode));
+
+ if (bg->bg_free_bits_count) {
+ ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
+ group_bh, bits_wanted,
+ &tmp_off, &tmp_found);
+ if (ret)
+ return ret;
+
+ /* ocfs2_block_group_find_clear_bits() might
+ * return success, but we still want to return
+ * -ENOSPC unless it found the minimum number
+ * of bits. */
+ if (min_bits <= tmp_found) {
+ *bit_off = tmp_off;
+ *bits_found = tmp_found;
+ search = 0; /* success */
+ }
+ }
+
+ return search;
+}
+
+static int ocfs2_block_group_search(struct inode *inode,
+ struct buffer_head *group_bh,
+ u32 bits_wanted, u32 min_bits,
+ u16 *bit_off, u16 *bits_found)
+{
+ int ret = -ENOSPC;
+ struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data;
+
+ BUG_ON(min_bits != 1);
+ BUG_ON(ocfs2_is_cluster_bitmap(inode));
+
+ if (bg->bg_free_bits_count)
+ ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
+ group_bh, bits_wanted,
+ bit_off, bits_found);
+
+ return ret;
+}
+
+static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
+ u32 bits_wanted,
+ u32 min_bits,
+ u16 *bit_off,
+ unsigned int *num_bits,
+ u64 *bg_blkno)
+{
+ int status;
+ u16 chain, tmp_bits;
+ u32 tmp_used;
+ u64 next_group;
+ struct ocfs2_journal_handle *handle = ac->ac_handle;
+ struct inode *alloc_inode = ac->ac_inode;
+ struct buffer_head *group_bh = NULL;
+ struct buffer_head *prev_group_bh = NULL;
+ struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
+ struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
+ struct ocfs2_group_desc *bg;
+
+ chain = ac->ac_chain;
+ mlog(0, "trying to alloc %u bits from chain %u, inode %"MLFu64"\n",
+ bits_wanted, chain, OCFS2_I(alloc_inode)->ip_blkno);
+
+ status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
+ le64_to_cpu(cl->cl_recs[chain].c_blkno),
+ &group_bh, OCFS2_BH_CACHED, alloc_inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ bg = (struct ocfs2_group_desc *) group_bh->b_data;
+ if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
+ OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
+ status = -EIO;
+ goto bail;
+ }
+
+ status = -ENOSPC;
+ /* for now, the chain search is a bit simplistic. We just use
+ * the 1st group with any empty bits. */
+ while ((status = ac->ac_group_search(alloc_inode, group_bh,
+ bits_wanted, min_bits, bit_off,
+ &tmp_bits)) == -ENOSPC) {
+ if (!bg->bg_next_group)
+ break;
+
+ if (prev_group_bh) {
+ brelse(prev_group_bh);
+ prev_group_bh = NULL;
+ }
+ next_group = le64_to_cpu(bg->bg_next_group);
+ prev_group_bh = group_bh;
+ group_bh = NULL;
+ status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
+ next_group, &group_bh,
+ OCFS2_BH_CACHED, alloc_inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ bg = (struct ocfs2_group_desc *) group_bh->b_data;
+ if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
+ OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
+ status = -EIO;
+ goto bail;
+ }
+ }
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ mlog(0, "alloc succeeds: we give %u bits from block group %"MLFu64"\n",
+ tmp_bits, bg->bg_blkno);
+
+ *num_bits = tmp_bits;
+
+ BUG_ON(*num_bits == 0);
+
+ /*
+ * Keep track of previous block descriptor read. When
+ * we find a target, if we have read more than X
+ * number of descriptors, and the target is reasonably
+ * empty, relink him to top of his chain.
+ *
+ * We've read 0 extra blocks and only send one more to
+ * the transaction, yet the next guy to search has a
+ * much easier time.
+ *
+ * Do this *after* figuring out how many bits we're taking out
+ * of our target group.
+ */
+ if (ac->ac_allow_chain_relink &&
+ (prev_group_bh) &&
+ (ocfs2_block_group_reasonably_empty(bg, *num_bits))) {
+ status = ocfs2_relink_block_group(handle, alloc_inode,
+ ac->ac_bh, group_bh,
+ prev_group_bh, chain);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
+ /* Ok, claim our bits now: set the info on dinode, chainlist
+ * and then the group */
+ status = ocfs2_journal_access(handle,
+ alloc_inode,
+ ac->ac_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
+ fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used);
+ le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits));
+
+ status = ocfs2_journal_dirty(handle,
+ ac->ac_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_block_group_set_bits(handle,
+ alloc_inode,
+ bg,
+ group_bh,
+ *bit_off,
+ *num_bits);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ mlog(0, "Allocated %u bits from suballocator %"MLFu64"\n",
+ *num_bits, fe->i_blkno);
+
+ *bg_blkno = le64_to_cpu(bg->bg_blkno);
+bail:
+ if (group_bh)
+ brelse(group_bh);
+ if (prev_group_bh)
+ brelse(prev_group_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+/* will give out up to bits_wanted contiguous bits. */
+static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac,
+ u32 bits_wanted,
+ u32 min_bits,
+ u16 *bit_off,
+ unsigned int *num_bits,
+ u64 *bg_blkno)
+{
+ int status;
+ u16 victim, i;
+ struct ocfs2_chain_list *cl;
+ struct ocfs2_dinode *fe;
+
+ mlog_entry_void();
+
+ BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
+ BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
+ BUG_ON(!ac->ac_bh);
+
+ fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ OCFS2_RO_ON_INVALID_DINODE(osb->sb, fe);
+ status = -EIO;
+ goto bail;
+ }
+ if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
+ le32_to_cpu(fe->id1.bitmap1.i_total)) {
+ ocfs2_error(osb->sb, "Chain allocator dinode %"MLFu64" has %u"
+ "used bits but only %u total.",
+ le64_to_cpu(fe->i_blkno),
+ le32_to_cpu(fe->id1.bitmap1.i_used),
+ le32_to_cpu(fe->id1.bitmap1.i_total));
+ status = -EIO;
+ goto bail;
+ }
+
+ cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
+
+ victim = ocfs2_find_victim_chain(cl);
+ ac->ac_chain = victim;
+ ac->ac_allow_chain_relink = 1;
+
+ status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off,
+ num_bits, bg_blkno);
+ if (!status)
+ goto bail;
+ if (status < 0 && status != -ENOSPC) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ mlog(0, "Search of victim chain %u came up with nothing, "
+ "trying all chains now.\n", victim);
+
+ /* If we didn't pick a good victim, then just default to
+ * searching each chain in order. Don't allow chain relinking
+ * because we only calculate enough journal credits for one
+ * relink per alloc. */
+ ac->ac_allow_chain_relink = 0;
+ for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
+ if (i == victim)
+ continue;
+ if (!cl->cl_recs[i].c_free)
+ continue;
+
+ ac->ac_chain = i;
+ status = ocfs2_search_chain(ac, bits_wanted, min_bits,
+ bit_off, num_bits,
+ bg_blkno);
+ if (!status)
+ break;
+ if (status < 0 && status != -ENOSPC) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+bail:
+
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_claim_metadata(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 bits_wanted,
+ u16 *suballoc_bit_start,
+ unsigned int *num_bits,
+ u64 *blkno_start)
+{
+ int status;
+ u64 bg_blkno;
+
+ BUG_ON(!ac);
+ BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
+ BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
+ BUG_ON(ac->ac_handle != handle);
+
+ status = ocfs2_claim_suballoc_bits(osb,
+ ac,
+ bits_wanted,
+ 1,
+ suballoc_bit_start,
+ num_bits,
+ &bg_blkno);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ atomic_inc(&osb->alloc_stats.bg_allocs);
+
+ *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
+ ac->ac_bits_given += (*num_bits);
+ status = 0;
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+int ocfs2_claim_new_inode(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *ac,
+ u16 *suballoc_bit,
+ u64 *fe_blkno)
+{
+ int status;
+ unsigned int num_bits;
+ u64 bg_blkno;
+
+ mlog_entry_void();
+
+ BUG_ON(!ac);
+ BUG_ON(ac->ac_bits_given != 0);
+ BUG_ON(ac->ac_bits_wanted != 1);
+ BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
+ BUG_ON(ac->ac_handle != handle);
+
+ status = ocfs2_claim_suballoc_bits(osb,
+ ac,
+ 1,
+ 1,
+ suballoc_bit,
+ &num_bits,
+ &bg_blkno);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ atomic_inc(&osb->alloc_stats.bg_allocs);
+
+ BUG_ON(num_bits != 1);
+
+ *fe_blkno = bg_blkno + (u64) (*suballoc_bit);
+ ac->ac_bits_given++;
+ status = 0;
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+/* translate a group desc. blkno and it's bitmap offset into
+ * disk cluster offset. */
+static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
+ u64 bg_blkno,
+ u16 bg_bit_off)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ u32 cluster = 0;
+
+ BUG_ON(!ocfs2_is_cluster_bitmap(inode));
+
+ if (bg_blkno != osb->first_cluster_group_blkno)
+ cluster = ocfs2_blocks_to_clusters(inode->i_sb, bg_blkno);
+ cluster += (u32) bg_bit_off;
+ return cluster;
+}
+
+/* given a cluster offset, calculate which block group it belongs to
+ * and return that block offset. */
+static inline u64 ocfs2_which_cluster_group(struct inode *inode,
+ u32 cluster)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ u32 group_no;
+
+ BUG_ON(!ocfs2_is_cluster_bitmap(inode));
+
+ group_no = cluster / osb->bitmap_cpg;
+ if (!group_no)
+ return osb->first_cluster_group_blkno;
+ return ocfs2_clusters_to_blocks(inode->i_sb,
+ group_no * osb->bitmap_cpg);
+}
+
+/* given the block number of a cluster start, calculate which cluster
+ * group and descriptor bitmap offset that corresponds to. */
+static inline void ocfs2_block_to_cluster_group(struct inode *inode,
+ u64 data_blkno,
+ u64 *bg_blkno,
+ u16 *bg_bit_off)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ u32 data_cluster = ocfs2_blocks_to_clusters(osb->sb, data_blkno);
+
+ BUG_ON(!ocfs2_is_cluster_bitmap(inode));
+
+ *bg_blkno = ocfs2_which_cluster_group(inode,
+ data_cluster);
+
+ if (*bg_blkno == osb->first_cluster_group_blkno)
+ *bg_bit_off = (u16) data_cluster;
+ else
+ *bg_bit_off = (u16) ocfs2_blocks_to_clusters(osb->sb,
+ data_blkno - *bg_blkno);
+}
+
+/*
+ * min_bits - minimum contiguous chunk from this total allocation we
+ * can handle. set to what we asked for originally for a full
+ * contig. allocation, set to '1' to indicate we can deal with extents
+ * of any size.
+ */
+int ocfs2_claim_clusters(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 min_clusters,
+ u32 *cluster_start,
+ u32 *num_clusters)
+{
+ int status;
+ unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
+ u64 bg_blkno;
+ u16 bg_bit_off;
+
+ mlog_entry_void();
+
+ BUG_ON(!ac);
+ BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
+
+ BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
+ && ac->ac_which != OCFS2_AC_USE_MAIN);
+ BUG_ON(ac->ac_handle != handle);
+
+ if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
+ status = ocfs2_claim_local_alloc_bits(osb,
+ handle,
+ ac,
+ bits_wanted,
+ cluster_start,
+ num_clusters);
+ if (!status)
+ atomic_inc(&osb->alloc_stats.local_data);
+ } else {
+ if (min_clusters > (osb->bitmap_cpg - 1)) {
+ /* The only paths asking for contiguousness
+ * should know about this already. */
+ mlog(ML_ERROR, "minimum allocation requested exceeds "
+ "group bitmap size!");
+ status = -ENOSPC;
+ goto bail;
+ }
+ /* clamp the current request down to a realistic size. */
+ if (bits_wanted > (osb->bitmap_cpg - 1))
+ bits_wanted = osb->bitmap_cpg - 1;
+
+ status = ocfs2_claim_suballoc_bits(osb,
+ ac,
+ bits_wanted,
+ min_clusters,
+ &bg_bit_off,
+ num_clusters,
+ &bg_blkno);
+ if (!status) {
+ *cluster_start =
+ ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
+ bg_blkno,
+ bg_bit_off);
+ atomic_inc(&osb->alloc_stats.bitmap_data);
+ }
+ }
+ if (status < 0) {
+ if (status != -ENOSPC)
+ mlog_errno(status);
+ goto bail;
+ }
+
+ ac->ac_bits_given += *num_clusters;
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static inline int ocfs2_block_group_clear_bits(struct ocfs2_journal_handle *handle,
+ struct inode *alloc_inode,
+ struct ocfs2_group_desc *bg,
+ struct buffer_head *group_bh,
+ unsigned int bit_off,
+ unsigned int num_bits)
+{
+ int status;
+ unsigned int tmp;
+ int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
+ struct ocfs2_group_desc *undo_bg = NULL;
+
+ mlog_entry_void();
+
+ if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
+ OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
+ status = -EIO;
+ goto bail;
+ }
+
+ mlog(0, "off = %u, num = %u\n", bit_off, num_bits);
+
+ if (ocfs2_is_cluster_bitmap(alloc_inode))
+ journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
+
+ status = ocfs2_journal_access(handle, alloc_inode, group_bh,
+ journal_type);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ if (ocfs2_is_cluster_bitmap(alloc_inode))
+ undo_bg = (struct ocfs2_group_desc *) bh2jh(group_bh)->b_committed_data;
+
+ tmp = num_bits;
+ while(tmp--) {
+ ocfs2_clear_bit((bit_off + tmp),
+ (unsigned long *) bg->bg_bitmap);
+ if (ocfs2_is_cluster_bitmap(alloc_inode))
+ ocfs2_set_bit(bit_off + tmp,
+ (unsigned long *) undo_bg->bg_bitmap);
+ }
+ le16_add_cpu(&bg->bg_free_bits_count, num_bits);
+
+ status = ocfs2_journal_dirty(handle, group_bh);
+ if (status < 0)
+ mlog_errno(status);
+bail:
+ return status;
+}
+
+/*
+ * expects the suballoc inode to already be locked.
+ */
+static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle,
+ struct inode *alloc_inode,
+ struct buffer_head *alloc_bh,
+ unsigned int start_bit,
+ u64 bg_blkno,
+ unsigned int count)
+{
+ int status = 0;
+ u32 tmp_used;
+ struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
+ struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data;
+ struct ocfs2_chain_list *cl = &fe->id2.i_chain;
+ struct buffer_head *group_bh = NULL;
+ struct ocfs2_group_desc *group;
+
+ mlog_entry_void();
+
+ if (!OCFS2_IS_VALID_DINODE(fe)) {
+ OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
+ status = -EIO;
+ goto bail;
+ }
+ BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
+
+ mlog(0, "suballocator %"MLFu64": freeing %u bits from group %"MLFu64
+ ", starting at %u\n",
+ OCFS2_I(alloc_inode)->ip_blkno, count, bg_blkno,
+ start_bit);
+
+ status = ocfs2_read_block(osb, bg_blkno, &group_bh, OCFS2_BH_CACHED,
+ alloc_inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ group = (struct ocfs2_group_desc *) group_bh->b_data;
+ if (!OCFS2_IS_VALID_GROUP_DESC(group)) {
+ OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, group);
+ status = -EIO;
+ goto bail;
+ }
+ BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
+
+ status = ocfs2_block_group_clear_bits(handle, alloc_inode,
+ group, group_bh,
+ start_bit, count);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_journal_access(handle, alloc_inode, alloc_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
+ count);
+ tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
+ fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
+
+ status = ocfs2_journal_dirty(handle, alloc_bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+bail:
+ if (group_bh)
+ brelse(group_bh);
+
+ mlog_exit(status);
+ return status;
+}
+
+static inline u64 ocfs2_which_suballoc_group(u64 block, unsigned int bit)
+{
+ u64 group = block - (u64) bit;
+
+ return group;
+}
+
+int ocfs2_free_dinode(struct ocfs2_journal_handle *handle,
+ struct inode *inode_alloc_inode,
+ struct buffer_head *inode_alloc_bh,
+ struct ocfs2_dinode *di)
+{
+ u64 blk = le64_to_cpu(di->i_blkno);
+ u16 bit = le16_to_cpu(di->i_suballoc_bit);
+ u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
+
+ return ocfs2_free_suballoc_bits(handle, inode_alloc_inode,
+ inode_alloc_bh, bit, bg_blkno, 1);
+}
+
+int ocfs2_free_extent_block(struct ocfs2_journal_handle *handle,
+ struct inode *eb_alloc_inode,
+ struct buffer_head *eb_alloc_bh,
+ struct ocfs2_extent_block *eb)
+{
+ u64 blk = le64_to_cpu(eb->h_blkno);
+ u16 bit = le16_to_cpu(eb->h_suballoc_bit);
+ u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
+
+ return ocfs2_free_suballoc_bits(handle, eb_alloc_inode, eb_alloc_bh,
+ bit, bg_blkno, 1);
+}
+
+int ocfs2_free_clusters(struct ocfs2_journal_handle *handle,
+ struct inode *bitmap_inode,
+ struct buffer_head *bitmap_bh,
+ u64 start_blk,
+ unsigned int num_clusters)
+{
+ int status;
+ u16 bg_start_bit;
+ u64 bg_blkno;
+ struct ocfs2_dinode *fe;
+
+ /* You can't ever have a contiguous set of clusters
+ * bigger than a block group bitmap so we never have to worry
+ * about looping on them. */
+
+ mlog_entry_void();
+
+ /* This is expensive. We can safely remove once this stuff has
+ * gotten tested really well. */
+ BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
+
+ fe = (struct ocfs2_dinode *) bitmap_bh->b_data;
+
+ ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
+ &bg_start_bit);
+
+ mlog(0, "want to free %u clusters starting at block %"MLFu64"\n",
+ num_clusters, start_blk);
+ mlog(0, "bg_blkno = %"MLFu64", bg_start_bit = %u\n",
+ bg_blkno, bg_start_bit);
+
+ status = ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
+ bg_start_bit, bg_blkno,
+ num_clusters);
+ if (status < 0)
+ mlog_errno(status);
+
+ mlog_exit(status);
+ return status;
+}
+
+static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg)
+{
+ printk("Block Group:\n");
+ printk("bg_signature: %s\n", bg->bg_signature);
+ printk("bg_size: %u\n", bg->bg_size);
+ printk("bg_bits: %u\n", bg->bg_bits);
+ printk("bg_free_bits_count: %u\n", bg->bg_free_bits_count);
+ printk("bg_chain: %u\n", bg->bg_chain);
+ printk("bg_generation: %u\n", le32_to_cpu(bg->bg_generation));
+ printk("bg_next_group: %"MLFu64"\n", bg->bg_next_group);
+ printk("bg_parent_dinode: %"MLFu64"\n", bg->bg_parent_dinode);
+ printk("bg_blkno: %"MLFu64"\n", bg->bg_blkno);
+}
+
+static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe)
+{
+ int i;
+
+ printk("Suballoc Inode %"MLFu64":\n", fe->i_blkno);
+ printk("i_signature: %s\n", fe->i_signature);
+ printk("i_size: %"MLFu64"\n", fe->i_size);
+ printk("i_clusters: %u\n", fe->i_clusters);
+ printk("i_generation: %u\n",
+ le32_to_cpu(fe->i_generation));
+ printk("id1.bitmap1.i_used: %u\n",
+ le32_to_cpu(fe->id1.bitmap1.i_used));
+ printk("id1.bitmap1.i_total: %u\n",
+ le32_to_cpu(fe->id1.bitmap1.i_total));
+ printk("id2.i_chain.cl_cpg: %u\n", fe->id2.i_chain.cl_cpg);
+ printk("id2.i_chain.cl_bpc: %u\n", fe->id2.i_chain.cl_bpc);
+ printk("id2.i_chain.cl_count: %u\n", fe->id2.i_chain.cl_count);
+ printk("id2.i_chain.cl_next_free_rec: %u\n",
+ fe->id2.i_chain.cl_next_free_rec);
+ for(i = 0; i < fe->id2.i_chain.cl_next_free_rec; i++) {
+ printk("fe->id2.i_chain.cl_recs[%d].c_free: %u\n", i,
+ fe->id2.i_chain.cl_recs[i].c_free);
+ printk("fe->id2.i_chain.cl_recs[%d].c_total: %u\n", i,
+ fe->id2.i_chain.cl_recs[i].c_total);
+ printk("fe->id2.i_chain.cl_recs[%d].c_blkno: %"MLFu64"\n", i,
+ fe->id2.i_chain.cl_recs[i].c_blkno);
+ }
+}
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
new file mode 100644
index 00000000000..a76c82a7cea
--- /dev/null
+++ b/fs/ocfs2/suballoc.h
@@ -0,0 +1,132 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * suballoc.h
+ *
+ * Defines sub allocator api
+ *
+ * Copyright (C) 2003, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef _CHAINALLOC_H_
+#define _CHAINALLOC_H_
+
+typedef int (group_search_t)(struct inode *,
+ struct buffer_head *,
+ u32,
+ u32,
+ u16 *,
+ u16 *);
+
+struct ocfs2_alloc_context {
+ struct inode *ac_inode; /* which bitmap are we allocating from? */
+ struct buffer_head *ac_bh; /* file entry bh */
+ u32 ac_bits_wanted;
+ u32 ac_bits_given;
+#define OCFS2_AC_USE_LOCAL 1
+#define OCFS2_AC_USE_MAIN 2
+#define OCFS2_AC_USE_INODE 3
+#define OCFS2_AC_USE_META 4
+ u32 ac_which;
+ struct ocfs2_journal_handle *ac_handle;
+
+ /* these are used by the chain search */
+ u16 ac_chain;
+ int ac_allow_chain_relink;
+ group_search_t *ac_group_search;
+};
+
+void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac);
+static inline int ocfs2_alloc_context_bits_left(struct ocfs2_alloc_context *ac)
+{
+ return ac->ac_bits_wanted - ac->ac_bits_given;
+}
+
+int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_dinode *fe,
+ struct ocfs2_alloc_context **ac);
+int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context **ac);
+int ocfs2_reserve_clusters(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ u32 bits_wanted,
+ struct ocfs2_alloc_context **ac);
+
+int ocfs2_claim_metadata(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 bits_wanted,
+ u16 *suballoc_bit_start,
+ u32 *num_bits,
+ u64 *blkno_start);
+int ocfs2_claim_new_inode(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *ac,
+ u16 *suballoc_bit,
+ u64 *fe_blkno);
+int ocfs2_claim_clusters(struct ocfs2_super *osb,
+ struct ocfs2_journal_handle *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 min_clusters,
+ u32 *cluster_start,
+ u32 *num_clusters);
+
+int ocfs2_free_dinode(struct ocfs2_journal_handle *handle,
+ struct inode *inode_alloc_inode,
+ struct buffer_head *inode_alloc_bh,
+ struct ocfs2_dinode *di);
+int ocfs2_free_extent_block(struct ocfs2_journal_handle *handle,
+ struct inode *eb_alloc_inode,
+ struct buffer_head *eb_alloc_bh,
+ struct ocfs2_extent_block *eb);
+int ocfs2_free_clusters(struct ocfs2_journal_handle *handle,
+ struct inode *bitmap_inode,
+ struct buffer_head *bitmap_bh,
+ u64 start_blk,
+ unsigned int num_clusters);
+
+static inline u32 ocfs2_cluster_from_desc(struct ocfs2_super *osb,
+ u64 bg_blkno)
+{
+ /* This should work for all block group descriptors as only
+ * the 1st group descriptor of the cluster bitmap is
+ * different. */
+
+ if (bg_blkno == osb->first_cluster_group_blkno)
+ return 0;
+
+ /* the rest of the block groups are located at the beginning
+ * of their 1st cluster, so a direct translation just
+ * works. */
+ return ocfs2_blocks_to_clusters(osb->sb, bg_blkno);
+}
+
+static inline int ocfs2_is_cluster_bitmap(struct inode *inode)
+{
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ return osb->bitmap_blkno == OCFS2_I(inode)->ip_blkno;
+}
+
+/* This is for local alloc ONLY. Others should use the task-specific
+ * apis above. */
+int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
+ struct ocfs2_alloc_context *ac);
+
+#endif /* _CHAINALLOC_H_ */
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
new file mode 100644
index 00000000000..364d64bd5f1
--- /dev/null
+++ b/fs/ocfs2/super.c
@@ -0,0 +1,1733 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * super.c
+ *
+ * load/unload driver, mount/dismount volumes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/statfs.h>
+#include <linux/moduleparam.h>
+#include <linux/blkdev.h>
+#include <linux/socket.h>
+#include <linux/inet.h>
+#include <linux/parser.h>
+#include <linux/crc32.h>
+#include <linux/debugfs.h>
+
+#include <cluster/nodemanager.h>
+
+#define MLOG_MASK_PREFIX ML_SUPER
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+/* this should be the only file to include a version 1 header */
+#include "ocfs1_fs_compat.h"
+
+#include "alloc.h"
+#include "dlmglue.h"
+#include "export.h"
+#include "extent_map.h"
+#include "heartbeat.h"
+#include "inode.h"
+#include "journal.h"
+#include "localalloc.h"
+#include "namei.h"
+#include "slot_map.h"
+#include "super.h"
+#include "sysfile.h"
+#include "uptodate.h"
+#include "ver.h"
+#include "vote.h"
+
+#include "buffer_head_io.h"
+
+/*
+ * Globals
+ */
+static spinlock_t ocfs2_globals_lock = SPIN_LOCK_UNLOCKED;
+
+static u32 osb_id; /* Keeps track of next available OSB Id */
+
+static kmem_cache_t *ocfs2_inode_cachep = NULL;
+
+kmem_cache_t *ocfs2_lock_cache = NULL;
+
+/* OCFS2 needs to schedule several differnt types of work which
+ * require cluster locking, disk I/O, recovery waits, etc. Since these
+ * types of work tend to be heavy we avoid using the kernel events
+ * workqueue and schedule on our own. */
+struct workqueue_struct *ocfs2_wq = NULL;
+
+static struct dentry *ocfs2_debugfs_root = NULL;
+
+MODULE_AUTHOR("Oracle");
+MODULE_LICENSE("GPL");
+
+static int ocfs2_parse_options(struct super_block *sb, char *options,
+ unsigned long *mount_opt, int is_remount);
+static void ocfs2_put_super(struct super_block *sb);
+static int ocfs2_mount_volume(struct super_block *sb);
+static int ocfs2_remount(struct super_block *sb, int *flags, char *data);
+static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err);
+static int ocfs2_initialize_mem_caches(void);
+static void ocfs2_free_mem_caches(void);
+static void ocfs2_delete_osb(struct ocfs2_super *osb);
+
+static int ocfs2_statfs(struct super_block *sb, struct kstatfs *buf);
+
+static int ocfs2_sync_fs(struct super_block *sb, int wait);
+
+static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb);
+static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb);
+static int ocfs2_release_system_inodes(struct ocfs2_super *osb);
+static int ocfs2_fill_local_node_info(struct ocfs2_super *osb);
+static int ocfs2_check_volume(struct ocfs2_super *osb);
+static int ocfs2_verify_volume(struct ocfs2_dinode *di,
+ struct buffer_head *bh,
+ u32 sectsize);
+static int ocfs2_initialize_super(struct super_block *sb,
+ struct buffer_head *bh,
+ int sector_size);
+static int ocfs2_get_sector(struct super_block *sb,
+ struct buffer_head **bh,
+ int block,
+ int sect_size);
+static void ocfs2_write_super(struct super_block *sb);
+static struct inode *ocfs2_alloc_inode(struct super_block *sb);
+static void ocfs2_destroy_inode(struct inode *inode);
+
+static unsigned long long ocfs2_max_file_offset(unsigned int blockshift);
+
+static struct super_operations ocfs2_sops = {
+ .statfs = ocfs2_statfs,
+ .alloc_inode = ocfs2_alloc_inode,
+ .destroy_inode = ocfs2_destroy_inode,
+ .drop_inode = ocfs2_drop_inode,
+ .clear_inode = ocfs2_clear_inode,
+ .delete_inode = ocfs2_delete_inode,
+ .sync_fs = ocfs2_sync_fs,
+ .write_super = ocfs2_write_super,
+ .put_super = ocfs2_put_super,
+ .remount_fs = ocfs2_remount,
+};
+
+enum {
+ Opt_barrier,
+ Opt_err_panic,
+ Opt_err_ro,
+ Opt_intr,
+ Opt_nointr,
+ Opt_hb_none,
+ Opt_hb_local,
+ Opt_data_ordered,
+ Opt_data_writeback,
+ Opt_err,
+};
+
+static match_table_t tokens = {
+ {Opt_barrier, "barrier=%u"},
+ {Opt_err_panic, "errors=panic"},
+ {Opt_err_ro, "errors=remount-ro"},
+ {Opt_intr, "intr"},
+ {Opt_nointr, "nointr"},
+ {Opt_hb_none, OCFS2_HB_NONE},
+ {Opt_hb_local, OCFS2_HB_LOCAL},
+ {Opt_data_ordered, "data=ordered"},
+ {Opt_data_writeback, "data=writeback"},
+ {Opt_err, NULL}
+};
+
+/*
+ * write_super and sync_fs ripped right out of ext3.
+ */
+static void ocfs2_write_super(struct super_block *sb)
+{
+ if (mutex_trylock(&sb->s_lock) != 0)
+ BUG();
+ sb->s_dirt = 0;
+}
+
+static int ocfs2_sync_fs(struct super_block *sb, int wait)
+{
+ int status = 0;
+ tid_t target;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+
+ sb->s_dirt = 0;
+
+ if (ocfs2_is_hard_readonly(osb))
+ return -EROFS;
+
+ if (wait) {
+ status = ocfs2_flush_truncate_log(osb);
+ if (status < 0)
+ mlog_errno(status);
+ } else {
+ ocfs2_schedule_truncate_log_flush(osb, 0);
+ }
+
+ if (journal_start_commit(OCFS2_SB(sb)->journal->j_journal, &target)) {
+ if (wait)
+ log_wait_commit(OCFS2_SB(sb)->journal->j_journal,
+ target);
+ }
+ return 0;
+}
+
+static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
+{
+ struct inode *new = NULL;
+ int status = 0;
+ int i;
+
+ mlog_entry_void();
+
+ new = ocfs2_iget(osb, osb->root_blkno);
+ if (IS_ERR(new)) {
+ status = PTR_ERR(new);
+ mlog_errno(status);
+ goto bail;
+ }
+ osb->root_inode = new;
+
+ new = ocfs2_iget(osb, osb->system_dir_blkno);
+ if (IS_ERR(new)) {
+ status = PTR_ERR(new);
+ mlog_errno(status);
+ goto bail;
+ }
+ osb->sys_root_inode = new;
+
+ for (i = OCFS2_FIRST_ONLINE_SYSTEM_INODE;
+ i <= OCFS2_LAST_GLOBAL_SYSTEM_INODE; i++) {
+ new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
+ if (!new) {
+ ocfs2_release_system_inodes(osb);
+ status = -EINVAL;
+ mlog_errno(status);
+ /* FIXME: Should ERROR_RO_FS */
+ mlog(ML_ERROR, "Unable to load system inode %d, "
+ "possibly corrupt fs?", i);
+ goto bail;
+ }
+ // the array now has one ref, so drop this one
+ iput(new);
+ }
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
+{
+ struct inode *new = NULL;
+ int status = 0;
+ int i;
+
+ mlog_entry_void();
+
+ for (i = OCFS2_LAST_GLOBAL_SYSTEM_INODE + 1;
+ i < NUM_SYSTEM_INODES;
+ i++) {
+ new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
+ if (!new) {
+ ocfs2_release_system_inodes(osb);
+ status = -EINVAL;
+ mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n",
+ status, i, osb->slot_num);
+ goto bail;
+ }
+ /* the array now has one ref, so drop this one */
+ iput(new);
+ }
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_release_system_inodes(struct ocfs2_super *osb)
+{
+ int status = 0, i;
+ struct inode *inode;
+
+ mlog_entry_void();
+
+ for (i = 0; i < NUM_SYSTEM_INODES; i++) {
+ inode = osb->system_inodes[i];
+ if (inode) {
+ iput(inode);
+ osb->system_inodes[i] = NULL;
+ }
+ }
+
+ inode = osb->sys_root_inode;
+ if (inode) {
+ iput(inode);
+ osb->sys_root_inode = NULL;
+ }
+
+ inode = osb->root_inode;
+ if (inode) {
+ iput(inode);
+ osb->root_inode = NULL;
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+/* We're allocating fs objects, use GFP_NOFS */
+static struct inode *ocfs2_alloc_inode(struct super_block *sb)
+{
+ struct ocfs2_inode_info *oi;
+
+ oi = kmem_cache_alloc(ocfs2_inode_cachep, SLAB_NOFS);
+ if (!oi)
+ return NULL;
+
+ return &oi->vfs_inode;
+}
+
+static void ocfs2_destroy_inode(struct inode *inode)
+{
+ kmem_cache_free(ocfs2_inode_cachep, OCFS2_I(inode));
+}
+
+/* From xfs_super.c:xfs_max_file_offset
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.
+ */
+static unsigned long long ocfs2_max_file_offset(unsigned int blockshift)
+{
+ unsigned int pagefactor = 1;
+ unsigned int bitshift = BITS_PER_LONG - 1;
+
+ /* Figure out maximum filesize, on Linux this can depend on
+ * the filesystem blocksize (on 32 bit platforms).
+ * __block_prepare_write does this in an [unsigned] long...
+ * page->index << (PAGE_CACHE_SHIFT - bbits)
+ * So, for page sized blocks (4K on 32 bit platforms),
+ * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
+ * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+ * but for smaller blocksizes it is less (bbits = log2 bsize).
+ * Note1: get_block_t takes a long (implicit cast from above)
+ * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
+ * can optionally convert the [unsigned] long from above into
+ * an [unsigned] long long.
+ */
+
+#if BITS_PER_LONG == 32
+# if defined(CONFIG_LBD)
+ BUG_ON(sizeof(sector_t) != 8);
+ pagefactor = PAGE_CACHE_SIZE;
+ bitshift = BITS_PER_LONG;
+# else
+ pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
+# endif
+#endif
+
+ return (((unsigned long long)pagefactor) << bitshift) - 1;
+}
+
+static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
+{
+ int incompat_features;
+ int ret = 0;
+ unsigned long parsed_options;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+
+ if (!ocfs2_parse_options(sb, data, &parsed_options, 1)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if ((osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) !=
+ (parsed_options & OCFS2_MOUNT_HB_LOCAL)) {
+ ret = -EINVAL;
+ mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n");
+ goto out;
+ }
+
+ if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) !=
+ (parsed_options & OCFS2_MOUNT_DATA_WRITEBACK)) {
+ ret = -EINVAL;
+ mlog(ML_ERROR, "Cannot change data mode on remount\n");
+ goto out;
+ }
+
+ /* We're going to/from readonly mode. */
+ if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
+ /* Lock here so the check of HARD_RO and the potential
+ * setting of SOFT_RO is atomic. */
+ spin_lock(&osb->osb_lock);
+ if (osb->osb_flags & OCFS2_OSB_HARD_RO) {
+ mlog(ML_ERROR, "Remount on readonly device is forbidden.\n");
+ ret = -EROFS;
+ goto unlock_osb;
+ }
+
+ if (*flags & MS_RDONLY) {
+ mlog(0, "Going to ro mode.\n");
+ sb->s_flags |= MS_RDONLY;
+ osb->osb_flags |= OCFS2_OSB_SOFT_RO;
+ } else {
+ mlog(0, "Making ro filesystem writeable.\n");
+
+ if (osb->osb_flags & OCFS2_OSB_ERROR_FS) {
+ mlog(ML_ERROR, "Cannot remount RDWR "
+ "filesystem due to previous errors.\n");
+ ret = -EROFS;
+ goto unlock_osb;
+ }
+ incompat_features = OCFS2_HAS_RO_COMPAT_FEATURE(sb, ~OCFS2_FEATURE_RO_COMPAT_SUPP);
+ if (incompat_features) {
+ mlog(ML_ERROR, "Cannot remount RDWR because "
+ "of unsupported optional features "
+ "(%x).\n", incompat_features);
+ ret = -EINVAL;
+ goto unlock_osb;
+ }
+ sb->s_flags &= ~MS_RDONLY;
+ osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
+ }
+unlock_osb:
+ spin_unlock(&osb->osb_lock);
+ }
+
+ if (!ret) {
+ if (!ocfs2_is_hard_readonly(osb))
+ ocfs2_set_journal_params(osb);
+
+ /* Only save off the new mount options in case of a successful
+ * remount. */
+ osb->s_mount_opt = parsed_options;
+ }
+out:
+ return ret;
+}
+
+static int ocfs2_sb_probe(struct super_block *sb,
+ struct buffer_head **bh,
+ int *sector_size)
+{
+ int status = 0, tmpstat;
+ struct ocfs1_vol_disk_hdr *hdr;
+ struct ocfs2_dinode *di;
+ int blksize;
+
+ *bh = NULL;
+
+ /* may be > 512 */
+ *sector_size = bdev_hardsect_size(sb->s_bdev);
+ if (*sector_size > OCFS2_MAX_BLOCKSIZE) {
+ mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n",
+ *sector_size, OCFS2_MAX_BLOCKSIZE);
+ status = -EINVAL;
+ goto bail;
+ }
+
+ /* Can this really happen? */
+ if (*sector_size < OCFS2_MIN_BLOCKSIZE)
+ *sector_size = OCFS2_MIN_BLOCKSIZE;
+
+ /* check block zero for old format */
+ status = ocfs2_get_sector(sb, bh, 0, *sector_size);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ hdr = (struct ocfs1_vol_disk_hdr *) (*bh)->b_data;
+ if (hdr->major_version == OCFS1_MAJOR_VERSION) {
+ mlog(ML_ERROR, "incompatible version: %u.%u\n",
+ hdr->major_version, hdr->minor_version);
+ status = -EINVAL;
+ }
+ if (memcmp(hdr->signature, OCFS1_VOLUME_SIGNATURE,
+ strlen(OCFS1_VOLUME_SIGNATURE)) == 0) {
+ mlog(ML_ERROR, "incompatible volume signature: %8s\n",
+ hdr->signature);
+ status = -EINVAL;
+ }
+ brelse(*bh);
+ *bh = NULL;
+ if (status < 0) {
+ mlog(ML_ERROR, "This is an ocfs v1 filesystem which must be "
+ "upgraded before mounting with ocfs v2\n");
+ goto bail;
+ }
+
+ /*
+ * Now check at magic offset for 512, 1024, 2048, 4096
+ * blocksizes. 4096 is the maximum blocksize because it is
+ * the minimum clustersize.
+ */
+ status = -EINVAL;
+ for (blksize = *sector_size;
+ blksize <= OCFS2_MAX_BLOCKSIZE;
+ blksize <<= 1) {
+ tmpstat = ocfs2_get_sector(sb, bh,
+ OCFS2_SUPER_BLOCK_BLKNO,
+ blksize);
+ if (tmpstat < 0) {
+ status = tmpstat;
+ mlog_errno(status);
+ goto bail;
+ }
+ di = (struct ocfs2_dinode *) (*bh)->b_data;
+ status = ocfs2_verify_volume(di, *bh, blksize);
+ if (status >= 0)
+ goto bail;
+ brelse(*bh);
+ *bh = NULL;
+ if (status != -EAGAIN)
+ break;
+ }
+
+bail:
+ return status;
+}
+
+static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct dentry *root;
+ int status, sector_size;
+ unsigned long parsed_opt;
+ struct inode *inode = NULL;
+ struct ocfs2_super *osb = NULL;
+ struct buffer_head *bh = NULL;
+
+ mlog_entry("%p, %p, %i", sb, data, silent);
+
+ /* for now we only have one cluster/node, make sure we see it
+ * in the heartbeat universe */
+ if (!o2hb_check_local_node_heartbeating()) {
+ status = -EINVAL;
+ goto read_super_error;
+ }
+
+ /* probe for superblock */
+ status = ocfs2_sb_probe(sb, &bh, &sector_size);
+ if (status < 0) {
+ mlog(ML_ERROR, "superblock probe failed!\n");
+ goto read_super_error;
+ }
+
+ status = ocfs2_initialize_super(sb, bh, sector_size);
+ osb = OCFS2_SB(sb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto read_super_error;
+ }
+ brelse(bh);
+ bh = NULL;
+
+ if (!ocfs2_parse_options(sb, data, &parsed_opt, 0)) {
+ status = -EINVAL;
+ goto read_super_error;
+ }
+ osb->s_mount_opt = parsed_opt;
+
+ sb->s_magic = OCFS2_SUPER_MAGIC;
+
+ /* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
+ * heartbeat=none */
+ if (bdev_read_only(sb->s_bdev)) {
+ if (!(sb->s_flags & MS_RDONLY)) {
+ status = -EACCES;
+ mlog(ML_ERROR, "Readonly device detected but readonly "
+ "mount was not specified.\n");
+ goto read_super_error;
+ }
+
+ /* You should not be able to start a local heartbeat
+ * on a readonly device. */
+ if (osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) {
+ status = -EROFS;
+ mlog(ML_ERROR, "Local heartbeat specified on readonly "
+ "device.\n");
+ goto read_super_error;
+ }
+
+ status = ocfs2_check_journals_nolocks(osb);
+ if (status < 0) {
+ if (status == -EROFS)
+ mlog(ML_ERROR, "Recovery required on readonly "
+ "file system, but write access is "
+ "unavailable.\n");
+ else
+ mlog_errno(status);
+ goto read_super_error;
+ }
+
+ ocfs2_set_ro_flag(osb, 1);
+
+ printk(KERN_NOTICE "Readonly device detected. No cluster "
+ "services will be utilized for this mount. Recovery "
+ "will be skipped.\n");
+ }
+
+ if (!ocfs2_is_hard_readonly(osb)) {
+ /* If this isn't a hard readonly mount, then we need
+ * to make sure that heartbeat is in a valid state,
+ * and that we mark ourselves soft readonly is -oro
+ * was specified. */
+ if (!(osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL)) {
+ mlog(ML_ERROR, "No heartbeat for device (%s)\n",
+ sb->s_id);
+ status = -EINVAL;
+ goto read_super_error;
+ }
+
+ if (sb->s_flags & MS_RDONLY)
+ ocfs2_set_ro_flag(osb, 0);
+ }
+
+ osb->osb_debug_root = debugfs_create_dir(osb->uuid_str,
+ ocfs2_debugfs_root);
+ if (!osb->osb_debug_root) {
+ status = -EINVAL;
+ mlog(ML_ERROR, "Unable to create per-mount debugfs root.\n");
+ goto read_super_error;
+ }
+
+ status = ocfs2_mount_volume(sb);
+ if (osb->root_inode)
+ inode = igrab(osb->root_inode);
+
+ if (status < 0)
+ goto read_super_error;
+
+ if (!inode) {
+ status = -EIO;
+ mlog_errno(status);
+ goto read_super_error;
+ }
+
+ root = d_alloc_root(inode);
+ if (!root) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto read_super_error;
+ }
+
+ sb->s_root = root;
+
+ ocfs2_complete_mount_recovery(osb);
+
+ printk("ocfs2: Mounting device (%u,%u) on (node %d, slot %d) with %s "
+ "data mode.\n",
+ MAJOR(sb->s_dev), MINOR(sb->s_dev), osb->node_num,
+ osb->slot_num,
+ osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" :
+ "ordered");
+
+ atomic_set(&osb->vol_state, VOLUME_MOUNTED);
+ wake_up(&osb->osb_mount_event);
+
+ mlog_exit(status);
+ return status;
+
+read_super_error:
+ if (bh != NULL)
+ brelse(bh);
+
+ if (inode)
+ iput(inode);
+
+ if (osb) {
+ atomic_set(&osb->vol_state, VOLUME_DISABLED);
+ wake_up(&osb->osb_mount_event);
+ ocfs2_dismount_volume(sb, 1);
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+static struct super_block *ocfs2_get_sb(struct file_system_type *fs_type,
+ int flags,
+ const char *dev_name,
+ void *data)
+{
+ return get_sb_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super);
+}
+
+static struct file_system_type ocfs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ocfs2",
+ .get_sb = ocfs2_get_sb, /* is this called when we mount
+ * the fs? */
+ .kill_sb = kill_block_super, /* set to the generic one
+ * right now, but do we
+ * need to change that? */
+ .fs_flags = FS_REQUIRES_DEV,
+ .next = NULL
+};
+
+static int ocfs2_parse_options(struct super_block *sb,
+ char *options,
+ unsigned long *mount_opt,
+ int is_remount)
+{
+ int status;
+ char *p;
+
+ mlog_entry("remount: %d, options: \"%s\"\n", is_remount,
+ options ? options : "(none)");
+
+ *mount_opt = 0;
+
+ if (!options) {
+ status = 1;
+ goto bail;
+ }
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token, option;
+ substring_t args[MAX_OPT_ARGS];
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case Opt_hb_local:
+ *mount_opt |= OCFS2_MOUNT_HB_LOCAL;
+ break;
+ case Opt_hb_none:
+ *mount_opt &= ~OCFS2_MOUNT_HB_LOCAL;
+ break;
+ case Opt_barrier:
+ if (match_int(&args[0], &option)) {
+ status = 0;
+ goto bail;
+ }
+ if (option)
+ *mount_opt |= OCFS2_MOUNT_BARRIER;
+ else
+ *mount_opt &= ~OCFS2_MOUNT_BARRIER;
+ break;
+ case Opt_intr:
+ *mount_opt &= ~OCFS2_MOUNT_NOINTR;
+ break;
+ case Opt_nointr:
+ *mount_opt |= OCFS2_MOUNT_NOINTR;
+ break;
+ case Opt_err_panic:
+ *mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
+ break;
+ case Opt_err_ro:
+ *mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
+ break;
+ case Opt_data_ordered:
+ *mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK;
+ break;
+ case Opt_data_writeback:
+ *mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK;
+ break;
+ default:
+ mlog(ML_ERROR,
+ "Unrecognized mount option \"%s\" "
+ "or missing value\n", p);
+ status = 0;
+ goto bail;
+ }
+ }
+
+ status = 1;
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+static int __init ocfs2_init(void)
+{
+ int status;
+
+ mlog_entry_void();
+
+ ocfs2_print_version();
+
+ if (init_ocfs2_extent_maps())
+ return -ENOMEM;
+
+ status = init_ocfs2_uptodate_cache();
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_initialize_mem_caches();
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ ocfs2_wq = create_singlethread_workqueue("ocfs2_wq");
+ if (!ocfs2_wq) {
+ status = -ENOMEM;
+ goto leave;
+ }
+
+ spin_lock(&ocfs2_globals_lock);
+ osb_id = 0;
+ spin_unlock(&ocfs2_globals_lock);
+
+ ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL);
+ if (!ocfs2_debugfs_root) {
+ status = -EFAULT;
+ mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n");
+ }
+
+leave:
+ if (status < 0) {
+ ocfs2_free_mem_caches();
+ exit_ocfs2_uptodate_cache();
+ exit_ocfs2_extent_maps();
+ }
+
+ mlog_exit(status);
+
+ if (status >= 0) {
+ return register_filesystem(&ocfs2_fs_type);
+ } else
+ return -1;
+}
+
+static void __exit ocfs2_exit(void)
+{
+ mlog_entry_void();
+
+ if (ocfs2_wq) {
+ flush_workqueue(ocfs2_wq);
+ destroy_workqueue(ocfs2_wq);
+ }
+
+ debugfs_remove(ocfs2_debugfs_root);
+
+ ocfs2_free_mem_caches();
+
+ unregister_filesystem(&ocfs2_fs_type);
+
+ exit_ocfs2_extent_maps();
+
+ exit_ocfs2_uptodate_cache();
+
+ mlog_exit_void();
+}
+
+static void ocfs2_put_super(struct super_block *sb)
+{
+ mlog_entry("(0x%p)\n", sb);
+
+ ocfs2_sync_blockdev(sb);
+ ocfs2_dismount_volume(sb, 0);
+
+ mlog_exit_void();
+}
+
+static int ocfs2_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+ struct ocfs2_super *osb;
+ u32 numbits, freebits;
+ int status;
+ struct ocfs2_dinode *bm_lock;
+ struct buffer_head *bh = NULL;
+ struct inode *inode = NULL;
+
+ mlog_entry("(%p, %p)\n", sb, buf);
+
+ osb = OCFS2_SB(sb);
+
+ inode = ocfs2_get_system_file_inode(osb,
+ GLOBAL_BITMAP_SYSTEM_INODE,
+ OCFS2_INVALID_SLOT);
+ if (!inode) {
+ mlog(ML_ERROR, "failed to get bitmap inode\n");
+ status = -EIO;
+ goto bail;
+ }
+
+ status = ocfs2_meta_lock(inode, NULL, &bh, 0);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ bm_lock = (struct ocfs2_dinode *) bh->b_data;
+
+ numbits = le32_to_cpu(bm_lock->id1.bitmap1.i_total);
+ freebits = numbits - le32_to_cpu(bm_lock->id1.bitmap1.i_used);
+
+ buf->f_type = OCFS2_SUPER_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_namelen = OCFS2_MAX_FILENAME_LEN;
+ buf->f_blocks = ((sector_t) numbits) *
+ (osb->s_clustersize >> osb->sb->s_blocksize_bits);
+ buf->f_bfree = ((sector_t) freebits) *
+ (osb->s_clustersize >> osb->sb->s_blocksize_bits);
+ buf->f_bavail = buf->f_bfree;
+ buf->f_files = numbits;
+ buf->f_ffree = freebits;
+
+ brelse(bh);
+
+ ocfs2_meta_unlock(inode, 0);
+ status = 0;
+bail:
+ if (inode)
+ iput(inode);
+
+ mlog_exit(status);
+
+ return status;
+}
+
+static void ocfs2_inode_init_once(void *data,
+ kmem_cache_t *cachep,
+ unsigned long flags)
+{
+ struct ocfs2_inode_info *oi = data;
+
+ if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+ SLAB_CTOR_CONSTRUCTOR) {
+ oi->ip_flags = 0;
+ oi->ip_open_count = 0;
+ spin_lock_init(&oi->ip_lock);
+ ocfs2_extent_map_init(&oi->vfs_inode);
+ INIT_LIST_HEAD(&oi->ip_handle_list);
+ INIT_LIST_HEAD(&oi->ip_io_markers);
+ oi->ip_handle = NULL;
+ oi->ip_created_trans = 0;
+ oi->ip_last_trans = 0;
+ oi->ip_dir_start_lookup = 0;
+
+ init_rwsem(&oi->ip_alloc_sem);
+ init_MUTEX(&(oi->ip_io_sem));
+
+ oi->ip_blkno = 0ULL;
+ oi->ip_clusters = 0;
+
+ ocfs2_lock_res_init_once(&oi->ip_rw_lockres);
+ ocfs2_lock_res_init_once(&oi->ip_meta_lockres);
+ ocfs2_lock_res_init_once(&oi->ip_data_lockres);
+
+ ocfs2_metadata_cache_init(&oi->vfs_inode);
+
+ inode_init_once(&oi->vfs_inode);
+ }
+}
+
+static int ocfs2_initialize_mem_caches(void)
+{
+ ocfs2_inode_cachep = kmem_cache_create("ocfs2_inode_cache",
+ sizeof(struct ocfs2_inode_info),
+ 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
+ ocfs2_inode_init_once, NULL);
+ if (!ocfs2_inode_cachep)
+ return -ENOMEM;
+
+ ocfs2_lock_cache = kmem_cache_create("ocfs2_lock",
+ sizeof(struct ocfs2_journal_lock),
+ 0,
+ SLAB_NO_REAP|SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!ocfs2_lock_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ocfs2_free_mem_caches(void)
+{
+ if (ocfs2_inode_cachep)
+ kmem_cache_destroy(ocfs2_inode_cachep);
+ if (ocfs2_lock_cache)
+ kmem_cache_destroy(ocfs2_lock_cache);
+
+ ocfs2_inode_cachep = NULL;
+ ocfs2_lock_cache = NULL;
+}
+
+static int ocfs2_get_sector(struct super_block *sb,
+ struct buffer_head **bh,
+ int block,
+ int sect_size)
+{
+ if (!sb_set_blocksize(sb, sect_size)) {
+ mlog(ML_ERROR, "unable to set blocksize\n");
+ return -EIO;
+ }
+
+ *bh = sb_getblk(sb, block);
+ if (!*bh) {
+ mlog_errno(-EIO);
+ return -EIO;
+ }
+ lock_buffer(*bh);
+ if (!buffer_dirty(*bh))
+ clear_buffer_uptodate(*bh);
+ unlock_buffer(*bh);
+ ll_rw_block(READ, 1, bh);
+ wait_on_buffer(*bh);
+ return 0;
+}
+
+/* ocfs2 1.0 only allows one cluster and node identity per kernel image. */
+static int ocfs2_fill_local_node_info(struct ocfs2_super *osb)
+{
+ int status;
+
+ /* XXX hold a ref on the node while mounte? easy enough, if
+ * desirable. */
+ osb->node_num = o2nm_this_node();
+ if (osb->node_num == O2NM_MAX_NODES) {
+ mlog(ML_ERROR, "could not find this host's node number\n");
+ status = -ENOENT;
+ goto bail;
+ }
+
+ mlog(ML_NOTICE, "I am node %d\n", osb->node_num);
+
+ status = 0;
+bail:
+ return status;
+}
+
+static int ocfs2_mount_volume(struct super_block *sb)
+{
+ int status = 0;
+ int unlock_super = 0;
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+
+ mlog_entry_void();
+
+ if (ocfs2_is_hard_readonly(osb))
+ goto leave;
+
+ status = ocfs2_fill_local_node_info(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_register_hb_callbacks(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_dlm_init(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* requires vote_thread to be running. */
+ status = ocfs2_register_net_handlers(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_super_lock(osb, 1);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+ unlock_super = 1;
+
+ /* This will load up the node map and add ourselves to it. */
+ status = ocfs2_find_slot(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ ocfs2_populate_mounted_map(osb);
+
+ /* load all node-local system inodes */
+ status = ocfs2_init_local_system_inodes(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_check_volume(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ status = ocfs2_truncate_log_init(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
+ /* This should be sent *after* we recovered our journal as it
+ * will cause other nodes to unmark us as needing
+ * recovery. However, we need to send it *before* dropping the
+ * super block lock as otherwise their recovery threads might
+ * try to clean us up while we're live! */
+ status = ocfs2_request_mount_vote(osb);
+ if (status < 0)
+ mlog_errno(status);
+
+leave:
+ if (unlock_super)
+ ocfs2_super_unlock(osb, 1);
+
+ mlog_exit(status);
+ return status;
+}
+
+/* we can't grab the goofy sem lock from inside wait_event, so we use
+ * memory barriers to make sure that we'll see the null task before
+ * being woken up */
+static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
+{
+ mb();
+ return osb->recovery_thread_task != NULL;
+}
+
+static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
+{
+ int tmp;
+ struct ocfs2_super *osb = NULL;
+
+ mlog_entry("(0x%p)\n", sb);
+
+ BUG_ON(!sb);
+ osb = OCFS2_SB(sb);
+ BUG_ON(!osb);
+
+ ocfs2_shutdown_local_alloc(osb);
+
+ ocfs2_truncate_log_shutdown(osb);
+
+ /* disable any new recovery threads and wait for any currently
+ * running ones to exit. Do this before setting the vol_state. */
+ down(&osb->recovery_lock);
+ osb->disable_recovery = 1;
+ up(&osb->recovery_lock);
+ wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
+
+ /* At this point, we know that no more recovery threads can be
+ * launched, so wait for any recovery completion work to
+ * complete. */
+ flush_workqueue(ocfs2_wq);
+
+ ocfs2_journal_shutdown(osb);
+
+ ocfs2_sync_blockdev(sb);
+
+ /* No dlm means we've failed during mount, so skip all the
+ * steps which depended on that to complete. */
+ if (osb->dlm) {
+ tmp = ocfs2_super_lock(osb, 1);
+ if (tmp < 0) {
+ mlog_errno(tmp);
+ return;
+ }
+
+ tmp = ocfs2_request_umount_vote(osb);
+ if (tmp < 0)
+ mlog_errno(tmp);
+
+ if (osb->slot_num != OCFS2_INVALID_SLOT)
+ ocfs2_put_slot(osb);
+
+ ocfs2_super_unlock(osb, 1);
+ }
+
+ ocfs2_release_system_inodes(osb);
+
+ if (osb->dlm) {
+ ocfs2_unregister_net_handlers(osb);
+
+ ocfs2_dlm_shutdown(osb);
+ }
+
+ ocfs2_clear_hb_callbacks(osb);
+
+ debugfs_remove(osb->osb_debug_root);
+
+ if (!mnt_err)
+ ocfs2_stop_heartbeat(osb);
+
+ atomic_set(&osb->vol_state, VOLUME_DISMOUNTED);
+
+ printk("ocfs2: Unmounting device (%u,%u) on (node %d)\n",
+ MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev), osb->node_num);
+
+ ocfs2_delete_osb(osb);
+ kfree(osb);
+ sb->s_dev = 0;
+ sb->s_fs_info = NULL;
+}
+
+static int ocfs2_setup_osb_uuid(struct ocfs2_super *osb, const unsigned char *uuid,
+ unsigned uuid_bytes)
+{
+ int i, ret;
+ char *ptr;
+
+ BUG_ON(uuid_bytes != OCFS2_VOL_UUID_LEN);
+
+ osb->uuid_str = kcalloc(1, OCFS2_VOL_UUID_LEN * 2 + 1, GFP_KERNEL);
+ if (osb->uuid_str == NULL)
+ return -ENOMEM;
+
+ memcpy(osb->uuid, uuid, OCFS2_VOL_UUID_LEN);
+
+ for (i = 0, ptr = osb->uuid_str; i < OCFS2_VOL_UUID_LEN; i++) {
+ /* print with null */
+ ret = snprintf(ptr, 3, "%02X", uuid[i]);
+ if (ret != 2) /* drop super cleans up */
+ return -EINVAL;
+ /* then only advance past the last char */
+ ptr += 2;
+ }
+
+ return 0;
+}
+
+static int ocfs2_initialize_super(struct super_block *sb,
+ struct buffer_head *bh,
+ int sector_size)
+{
+ int status = 0;
+ int i;
+ struct ocfs2_dinode *di = NULL;
+ struct inode *inode = NULL;
+ struct buffer_head *bitmap_bh = NULL;
+ struct ocfs2_journal *journal;
+ __le32 uuid_net_key;
+ struct ocfs2_super *osb;
+
+ mlog_entry_void();
+
+ osb = kcalloc(1, sizeof(struct ocfs2_super), GFP_KERNEL);
+ if (!osb) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ sb->s_fs_info = osb;
+ sb->s_op = &ocfs2_sops;
+ sb->s_export_op = &ocfs2_export_ops;
+ sb->s_flags |= MS_NOATIME;
+ /* this is needed to support O_LARGEFILE */
+ sb->s_maxbytes = ocfs2_max_file_offset(sb->s_blocksize_bits);
+
+ osb->sb = sb;
+ /* Save off for ocfs2_rw_direct */
+ osb->s_sectsize_bits = blksize_bits(sector_size);
+ if (!osb->s_sectsize_bits)
+ BUG();
+
+ osb->net_response_ids = 0;
+ spin_lock_init(&osb->net_response_lock);
+ INIT_LIST_HEAD(&osb->net_response_list);
+
+ INIT_LIST_HEAD(&osb->osb_net_handlers);
+ init_waitqueue_head(&osb->recovery_event);
+ spin_lock_init(&osb->vote_task_lock);
+ init_waitqueue_head(&osb->vote_event);
+ osb->vote_work_sequence = 0;
+ osb->vote_wake_sequence = 0;
+ INIT_LIST_HEAD(&osb->blocked_lock_list);
+ osb->blocked_lock_count = 0;
+ INIT_LIST_HEAD(&osb->vote_list);
+ spin_lock_init(&osb->osb_lock);
+
+ atomic_set(&osb->alloc_stats.moves, 0);
+ atomic_set(&osb->alloc_stats.local_data, 0);
+ atomic_set(&osb->alloc_stats.bitmap_data, 0);
+ atomic_set(&osb->alloc_stats.bg_allocs, 0);
+ atomic_set(&osb->alloc_stats.bg_extends, 0);
+
+ ocfs2_init_node_maps(osb);
+
+ snprintf(osb->dev_str, sizeof(osb->dev_str), "%u,%u",
+ MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev));
+
+ init_MUTEX(&osb->recovery_lock);
+
+ osb->disable_recovery = 0;
+ osb->recovery_thread_task = NULL;
+
+ init_waitqueue_head(&osb->checkpoint_event);
+ atomic_set(&osb->needs_checkpoint, 0);
+
+ osb->node_num = O2NM_INVALID_NODE_NUM;
+ osb->slot_num = OCFS2_INVALID_SLOT;
+
+ osb->local_alloc_state = OCFS2_LA_UNUSED;
+ osb->local_alloc_bh = NULL;
+
+ ocfs2_setup_hb_callbacks(osb);
+
+ init_waitqueue_head(&osb->osb_mount_event);
+
+ osb->vol_label = kmalloc(OCFS2_MAX_VOL_LABEL_LEN, GFP_KERNEL);
+ if (!osb->vol_label) {
+ mlog(ML_ERROR, "unable to alloc vol label\n");
+ status = -ENOMEM;
+ goto bail;
+ }
+
+ osb->uuid = kmalloc(OCFS2_VOL_UUID_LEN, GFP_KERNEL);
+ if (!osb->uuid) {
+ mlog(ML_ERROR, "unable to alloc uuid\n");
+ status = -ENOMEM;
+ goto bail;
+ }
+
+ di = (struct ocfs2_dinode *)bh->b_data;
+
+ osb->max_slots = le16_to_cpu(di->id2.i_super.s_max_slots);
+ if (osb->max_slots > OCFS2_MAX_SLOTS || osb->max_slots == 0) {
+ mlog(ML_ERROR, "Invalid number of node slots (%u)\n",
+ osb->max_slots);
+ status = -EINVAL;
+ goto bail;
+ }
+ mlog(ML_NOTICE, "max_slots for this device: %u\n", osb->max_slots);
+
+ osb->s_feature_compat =
+ le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_compat);
+ osb->s_feature_ro_compat =
+ le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_ro_compat);
+ osb->s_feature_incompat =
+ le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_incompat);
+
+ if ((i = OCFS2_HAS_INCOMPAT_FEATURE(osb->sb, ~OCFS2_FEATURE_INCOMPAT_SUPP))) {
+ mlog(ML_ERROR, "couldn't mount because of unsupported "
+ "optional features (%x).\n", i);
+ status = -EINVAL;
+ goto bail;
+ }
+ if (!(osb->sb->s_flags & MS_RDONLY) &&
+ (i = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb, ~OCFS2_FEATURE_RO_COMPAT_SUPP))) {
+ mlog(ML_ERROR, "couldn't mount RDWR because of "
+ "unsupported optional features (%x).\n", i);
+ status = -EINVAL;
+ goto bail;
+ }
+
+ get_random_bytes(&osb->s_next_generation, sizeof(u32));
+
+ /* FIXME
+ * This should be done in ocfs2_journal_init(), but unknown
+ * ordering issues will cause the filesystem to crash.
+ * If anyone wants to figure out what part of the code
+ * refers to osb->journal before ocfs2_journal_init() is run,
+ * be my guest.
+ */
+ /* initialize our journal structure */
+
+ journal = kcalloc(1, sizeof(struct ocfs2_journal), GFP_KERNEL);
+ if (!journal) {
+ mlog(ML_ERROR, "unable to alloc journal\n");
+ status = -ENOMEM;
+ goto bail;
+ }
+ osb->journal = journal;
+ journal->j_osb = osb;
+
+ atomic_set(&journal->j_num_trans, 0);
+ init_rwsem(&journal->j_trans_barrier);
+ init_waitqueue_head(&journal->j_checkpointed);
+ spin_lock_init(&journal->j_lock);
+ journal->j_trans_id = (unsigned long) 1;
+ INIT_LIST_HEAD(&journal->j_la_cleanups);
+ INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb);
+ journal->j_state = OCFS2_JOURNAL_FREE;
+
+ /* get some pseudo constants for clustersize bits */
+ osb->s_clustersize_bits =
+ le32_to_cpu(di->id2.i_super.s_clustersize_bits);
+ osb->s_clustersize = 1 << osb->s_clustersize_bits;
+ mlog(0, "clusterbits=%d\n", osb->s_clustersize_bits);
+
+ if (osb->s_clustersize < OCFS2_MIN_CLUSTERSIZE ||
+ osb->s_clustersize > OCFS2_MAX_CLUSTERSIZE) {
+ mlog(ML_ERROR, "Volume has invalid cluster size (%d)\n",
+ osb->s_clustersize);
+ status = -EINVAL;
+ goto bail;
+ }
+
+ if (ocfs2_clusters_to_blocks(osb->sb, le32_to_cpu(di->i_clusters) - 1)
+ > (u32)~0UL) {
+ mlog(ML_ERROR, "Volume might try to write to blocks beyond "
+ "what jbd can address in 32 bits.\n");
+ status = -EINVAL;
+ goto bail;
+ }
+
+ if (ocfs2_setup_osb_uuid(osb, di->id2.i_super.s_uuid,
+ sizeof(di->id2.i_super.s_uuid))) {
+ mlog(ML_ERROR, "Out of memory trying to setup our uuid.\n");
+ status = -ENOMEM;
+ goto bail;
+ }
+
+ memcpy(&uuid_net_key, &osb->uuid[i], sizeof(osb->net_key));
+ osb->net_key = le32_to_cpu(uuid_net_key);
+
+ strncpy(osb->vol_label, di->id2.i_super.s_label, 63);
+ osb->vol_label[63] = '\0';
+ osb->root_blkno = le64_to_cpu(di->id2.i_super.s_root_blkno);
+ osb->system_dir_blkno = le64_to_cpu(di->id2.i_super.s_system_dir_blkno);
+ osb->first_cluster_group_blkno =
+ le64_to_cpu(di->id2.i_super.s_first_cluster_group);
+ osb->fs_generation = le32_to_cpu(di->i_fs_generation);
+ mlog(0, "vol_label: %s\n", osb->vol_label);
+ mlog(0, "uuid: %s\n", osb->uuid_str);
+ mlog(0, "root_blkno=%"MLFu64", system_dir_blkno=%"MLFu64"\n",
+ osb->root_blkno, osb->system_dir_blkno);
+
+ osb->osb_dlm_debug = ocfs2_new_dlm_debug();
+ if (!osb->osb_dlm_debug) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ atomic_set(&osb->vol_state, VOLUME_INIT);
+
+ /* load root, system_dir, and all global system inodes */
+ status = ocfs2_init_global_system_inodes(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /*
+ * global bitmap
+ */
+ inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
+ OCFS2_INVALID_SLOT);
+ if (!inode) {
+ status = -EINVAL;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ osb->bitmap_blkno = OCFS2_I(inode)->ip_blkno;
+
+ status = ocfs2_read_block(osb, osb->bitmap_blkno, &bitmap_bh, 0,
+ inode);
+ iput(inode);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ di = (struct ocfs2_dinode *) bitmap_bh->b_data;
+ osb->bitmap_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg);
+ osb->num_clusters = le32_to_cpu(di->id1.bitmap1.i_total);
+ brelse(bitmap_bh);
+ mlog(0, "cluster bitmap inode: %"MLFu64", clusters per group: %u\n",
+ osb->bitmap_blkno, osb->bitmap_cpg);
+
+ status = ocfs2_init_slot_info(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ /* Link this osb onto the global linked list of all osb structures. */
+ /* The Global Link List is mainted for the whole driver . */
+ spin_lock(&ocfs2_globals_lock);
+ osb->osb_id = osb_id;
+ if (osb_id < OCFS2_MAX_OSB_ID)
+ osb_id++;
+ else {
+ mlog(ML_ERROR, "Too many volumes mounted\n");
+ status = -ENOMEM;
+ }
+ spin_unlock(&ocfs2_globals_lock);
+
+bail:
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * will return: -EAGAIN if it is ok to keep searching for superblocks
+ * -EINVAL if there is a bad superblock
+ * 0 on success
+ */
+static int ocfs2_verify_volume(struct ocfs2_dinode *di,
+ struct buffer_head *bh,
+ u32 blksz)
+{
+ int status = -EAGAIN;
+
+ mlog_entry_void();
+
+ if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE,
+ strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) {
+ status = -EINVAL;
+ if ((1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits)) != blksz) {
+ mlog(ML_ERROR, "found superblock with incorrect block "
+ "size: found %u, should be %u\n",
+ 1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits),
+ blksz);
+ } else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) !=
+ OCFS2_MAJOR_REV_LEVEL ||
+ le16_to_cpu(di->id2.i_super.s_minor_rev_level) !=
+ OCFS2_MINOR_REV_LEVEL) {
+ mlog(ML_ERROR, "found superblock with bad version: "
+ "found %u.%u, should be %u.%u\n",
+ le16_to_cpu(di->id2.i_super.s_major_rev_level),
+ le16_to_cpu(di->id2.i_super.s_minor_rev_level),
+ OCFS2_MAJOR_REV_LEVEL,
+ OCFS2_MINOR_REV_LEVEL);
+ } else if (bh->b_blocknr != le64_to_cpu(di->i_blkno)) {
+ mlog(ML_ERROR, "bad block number on superblock: "
+ "found %"MLFu64", should be %llu\n",
+ di->i_blkno, (unsigned long long)bh->b_blocknr);
+ } else if (le32_to_cpu(di->id2.i_super.s_clustersize_bits) < 12 ||
+ le32_to_cpu(di->id2.i_super.s_clustersize_bits) > 20) {
+ mlog(ML_ERROR, "bad cluster size found: %u\n",
+ 1 << le32_to_cpu(di->id2.i_super.s_clustersize_bits));
+ } else if (!le64_to_cpu(di->id2.i_super.s_root_blkno)) {
+ mlog(ML_ERROR, "bad root_blkno: 0\n");
+ } else if (!le64_to_cpu(di->id2.i_super.s_system_dir_blkno)) {
+ mlog(ML_ERROR, "bad system_dir_blkno: 0\n");
+ } else if (le16_to_cpu(di->id2.i_super.s_max_slots) > OCFS2_MAX_SLOTS) {
+ mlog(ML_ERROR,
+ "Superblock slots found greater than file system "
+ "maximum: found %u, max %u\n",
+ le16_to_cpu(di->id2.i_super.s_max_slots),
+ OCFS2_MAX_SLOTS);
+ } else {
+ /* found it! */
+ status = 0;
+ }
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+static int ocfs2_check_volume(struct ocfs2_super *osb)
+{
+ int status = 0;
+ int dirty;
+ struct ocfs2_dinode *local_alloc = NULL; /* only used if we
+ * recover
+ * ourselves. */
+
+ mlog_entry_void();
+
+ /* Init our journal object. */
+ status = ocfs2_journal_init(osb->journal, &dirty);
+ if (status < 0) {
+ mlog(ML_ERROR, "Could not initialize journal!\n");
+ goto finally;
+ }
+
+ /* If the journal was unmounted cleanly then we don't want to
+ * recover anything. Otherwise, journal_load will do that
+ * dirty work for us :) */
+ if (!dirty) {
+ status = ocfs2_journal_wipe(osb->journal, 0);
+ if (status < 0) {
+ mlog_errno(status);
+ goto finally;
+ }
+ } else {
+ mlog(ML_NOTICE, "File system was not unmounted cleanly, "
+ "recovering volume.\n");
+ }
+
+ /* will play back anything left in the journal. */
+ ocfs2_journal_load(osb->journal);
+
+ if (dirty) {
+ /* recover my local alloc if we didn't unmount cleanly. */
+ status = ocfs2_begin_local_alloc_recovery(osb,
+ osb->slot_num,
+ &local_alloc);
+ if (status < 0) {
+ mlog_errno(status);
+ goto finally;
+ }
+ /* we complete the recovery process after we've marked
+ * ourselves as mounted. */
+ }
+
+ mlog(0, "Journal loaded.\n");
+
+ status = ocfs2_load_local_alloc(osb);
+ if (status < 0) {
+ mlog_errno(status);
+ goto finally;
+ }
+
+ if (dirty) {
+ /* Recovery will be completed after we've mounted the
+ * rest of the volume. */
+ osb->dirty = 1;
+ osb->local_alloc_copy = local_alloc;
+ local_alloc = NULL;
+ }
+
+ /* go through each journal, trylock it and if you get the
+ * lock, and it's marked as dirty, set the bit in the recover
+ * map and launch a recovery thread for it. */
+ status = ocfs2_mark_dead_nodes(osb);
+ if (status < 0)
+ mlog_errno(status);
+
+finally:
+ if (local_alloc)
+ kfree(local_alloc);
+
+ mlog_exit(status);
+ return status;
+}
+
+/*
+ * The routine gets called from dismount or close whenever a dismount on
+ * volume is requested and the osb open count becomes 1.
+ * It will remove the osb from the global list and also free up all the
+ * initialized resources and fileobject.
+ */
+static void ocfs2_delete_osb(struct ocfs2_super *osb)
+{
+ mlog_entry_void();
+
+ /* This function assumes that the caller has the main osb resource */
+
+ if (osb->slot_info)
+ ocfs2_free_slot_info(osb->slot_info);
+
+ /* FIXME
+ * This belongs in journal shutdown, but because we have to
+ * allocate osb->journal at the start of ocfs2_initalize_osb(),
+ * we free it here.
+ */
+ kfree(osb->journal);
+ if (osb->local_alloc_copy)
+ kfree(osb->local_alloc_copy);
+ kfree(osb->uuid_str);
+ ocfs2_put_dlm_debug(osb->osb_dlm_debug);
+ memset(osb, 0, sizeof(struct ocfs2_super));
+
+ mlog_exit_void();
+}
+
+/* Put OCFS2 into a readonly state, or (if the user specifies it),
+ * panic(). We do not support continue-on-error operation. */
+static void ocfs2_handle_error(struct super_block *sb)
+{
+ struct ocfs2_super *osb = OCFS2_SB(sb);
+
+ if (osb->s_mount_opt & OCFS2_MOUNT_ERRORS_PANIC)
+ panic("OCFS2: (device %s): panic forced after error\n",
+ sb->s_id);
+
+ ocfs2_set_osb_flag(osb, OCFS2_OSB_ERROR_FS);
+
+ if (sb->s_flags & MS_RDONLY &&
+ (ocfs2_is_soft_readonly(osb) ||
+ ocfs2_is_hard_readonly(osb)))
+ return;
+
+ printk(KERN_CRIT "File system is now read-only due to the potential "
+ "of on-disk corruption. Please run fsck.ocfs2 once the file "
+ "system is unmounted.\n");
+ sb->s_flags |= MS_RDONLY;
+ ocfs2_set_ro_flag(osb, 0);
+}
+
+static char error_buf[1024];
+
+void __ocfs2_error(struct super_block *sb,
+ const char *function,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vsprintf(error_buf, fmt, args);
+ va_end(args);
+
+ /* Not using mlog here because we want to show the actual
+ * function the error came from. */
+ printk(KERN_CRIT "OCFS2: ERROR (device %s): %s: %s\n",
+ sb->s_id, function, error_buf);
+
+ ocfs2_handle_error(sb);
+}
+
+/* Handle critical errors. This is intentionally more drastic than
+ * ocfs2_handle_error, so we only use for things like journal errors,
+ * etc. */
+void __ocfs2_abort(struct super_block* sb,
+ const char *function,
+ const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vsprintf(error_buf, fmt, args);
+ va_end(args);
+
+ printk(KERN_CRIT "OCFS2: abort (device %s): %s: %s\n",
+ sb->s_id, function, error_buf);
+
+ /* We don't have the cluster support yet to go straight to
+ * hard readonly in here. Until then, we want to keep
+ * ocfs2_abort() so that we can at least mark critical
+ * errors.
+ *
+ * TODO: This should abort the journal and alert other nodes
+ * that our slot needs recovery. */
+
+ /* Force a panic(). This stinks, but it's better than letting
+ * things continue without having a proper hard readonly
+ * here. */
+ OCFS2_SB(sb)->s_mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
+ ocfs2_handle_error(sb);
+}
+
+module_init(ocfs2_init);
+module_exit(ocfs2_exit);
diff --git a/fs/ocfs2/super.h b/fs/ocfs2/super.h
new file mode 100644
index 00000000000..c564177dfbd
--- /dev/null
+++ b/fs/ocfs2/super.h
@@ -0,0 +1,44 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * super.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_SUPER_H
+#define OCFS2_SUPER_H
+
+extern struct workqueue_struct *ocfs2_wq;
+
+int ocfs2_publish_get_mount_state(struct ocfs2_super *osb,
+ int node_num);
+
+void __ocfs2_error(struct super_block *sb,
+ const char *function,
+ const char *fmt, ...);
+#define ocfs2_error(sb, fmt, args...) __ocfs2_error(sb, __PRETTY_FUNCTION__, fmt, ##args)
+
+void __ocfs2_abort(struct super_block *sb,
+ const char *function,
+ const char *fmt, ...);
+#define ocfs2_abort(sb, fmt, args...) __ocfs2_abort(sb, __PRETTY_FUNCTION__, fmt, ##args)
+
+#endif /* OCFS2_SUPER_H */
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
new file mode 100644
index 00000000000..f6986bd79e7
--- /dev/null
+++ b/fs/ocfs2/symlink.c
@@ -0,0 +1,180 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * linux/cluster/ssi/cfs/symlink.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE
+ * or NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Questions/Comments/Bugfixes to ssic-linux-devel@lists.sourceforge.net
+ *
+ * Copyright (C) 1992 Rick Sladkey
+ *
+ * Optimization changes Copyright (C) 1994 Florian La Roche
+ *
+ * Jun 7 1999, cache symlink lookups in the page cache. -DaveM
+ *
+ * Portions Copyright (C) 2001 Compaq Computer Corporation
+ *
+ * ocfs2 symlink handling code.
+ *
+ * Copyright (C) 2004, 2005 Oracle.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/utsname.h>
+
+#define MLOG_MASK_PREFIX ML_NAMEI
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "file.h"
+#include "inode.h"
+#include "journal.h"
+#include "symlink.h"
+
+#include "buffer_head_io.h"
+
+static char *ocfs2_page_getlink(struct dentry * dentry,
+ struct page **ppage);
+static char *ocfs2_fast_symlink_getlink(struct inode *inode,
+ struct buffer_head **bh);
+
+/* get the link contents into pagecache */
+static char *ocfs2_page_getlink(struct dentry * dentry,
+ struct page **ppage)
+{
+ struct page * page;
+ struct address_space *mapping = dentry->d_inode->i_mapping;
+ page = read_cache_page(mapping, 0,
+ (filler_t *)mapping->a_ops->readpage, NULL);
+ if (IS_ERR(page))
+ goto sync_fail;
+ wait_on_page_locked(page);
+ if (!PageUptodate(page))
+ goto async_fail;
+ *ppage = page;
+ return kmap(page);
+
+async_fail:
+ page_cache_release(page);
+ return ERR_PTR(-EIO);
+
+sync_fail:
+ return (char*)page;
+}
+
+static char *ocfs2_fast_symlink_getlink(struct inode *inode,
+ struct buffer_head **bh)
+{
+ int status;
+ char *link = NULL;
+ struct ocfs2_dinode *fe;
+
+ mlog_entry_void();
+
+ status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
+ OCFS2_I(inode)->ip_blkno,
+ bh,
+ OCFS2_BH_CACHED,
+ inode);
+ if (status < 0) {
+ mlog_errno(status);
+ link = ERR_PTR(status);
+ goto bail;
+ }
+
+ fe = (struct ocfs2_dinode *) (*bh)->b_data;
+ link = (char *) fe->id2.i_symlink;
+bail:
+ mlog_exit(status);
+
+ return link;
+}
+
+static int ocfs2_readlink(struct dentry *dentry,
+ char __user *buffer,
+ int buflen)
+{
+ int ret;
+ char *link;
+ struct buffer_head *bh = NULL;
+ struct inode *inode = dentry->d_inode;
+
+ mlog_entry_void();
+
+ link = ocfs2_fast_symlink_getlink(inode, &bh);
+ if (IS_ERR(link)) {
+ ret = PTR_ERR(link);
+ goto out;
+ }
+
+ ret = vfs_readlink(dentry, buffer, buflen, link);
+
+ brelse(bh);
+out:
+ mlog_exit(ret);
+ return ret;
+}
+
+static void *ocfs2_follow_link(struct dentry *dentry,
+ struct nameidata *nd)
+{
+ int status;
+ char *link;
+ struct inode *inode = dentry->d_inode;
+ struct page *page = NULL;
+ struct buffer_head *bh = NULL;
+
+ if (ocfs2_inode_is_fast_symlink(inode))
+ link = ocfs2_fast_symlink_getlink(inode, &bh);
+ else
+ link = ocfs2_page_getlink(dentry, &page);
+ if (IS_ERR(link)) {
+ status = PTR_ERR(link);
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = vfs_follow_link(nd, link);
+ if (status)
+ mlog_errno(status);
+bail:
+ if (page) {
+ kunmap(page);
+ page_cache_release(page);
+ }
+ if (bh)
+ brelse(bh);
+
+ return ERR_PTR(status);
+}
+
+struct inode_operations ocfs2_symlink_inode_operations = {
+ .readlink = page_readlink,
+ .follow_link = ocfs2_follow_link,
+ .getattr = ocfs2_getattr,
+};
+struct inode_operations ocfs2_fast_symlink_inode_operations = {
+ .readlink = ocfs2_readlink,
+ .follow_link = ocfs2_follow_link,
+ .getattr = ocfs2_getattr,
+};
diff --git a/fs/ocfs2/symlink.h b/fs/ocfs2/symlink.h
new file mode 100644
index 00000000000..1ea9e4d9e9e
--- /dev/null
+++ b/fs/ocfs2/symlink.h
@@ -0,0 +1,42 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * symlink.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_SYMLINK_H
+#define OCFS2_SYMLINK_H
+
+extern struct inode_operations ocfs2_symlink_inode_operations;
+extern struct inode_operations ocfs2_fast_symlink_inode_operations;
+
+/*
+ * Test whether an inode is a fast symlink.
+ */
+static inline int ocfs2_inode_is_fast_symlink(struct inode *inode)
+{
+ return (S_ISLNK(inode->i_mode) &&
+ inode->i_blocks == 0);
+}
+
+
+#endif /* OCFS2_SYMLINK_H */
diff --git a/fs/ocfs2/sysfile.c b/fs/ocfs2/sysfile.c
new file mode 100644
index 00000000000..600a8bc5b54
--- /dev/null
+++ b/fs/ocfs2/sysfile.c
@@ -0,0 +1,131 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * sysfile.c
+ *
+ * Initialize, read, write, etc. system files.
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+
+#include "ocfs2.h"
+
+#define MLOG_MASK_PREFIX ML_INODE
+#include <cluster/masklog.h>
+
+#include "alloc.h"
+#include "dir.h"
+#include "inode.h"
+#include "journal.h"
+#include "sysfile.h"
+
+#include "buffer_head_io.h"
+
+static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
+ int type,
+ u32 slot);
+
+static inline int is_global_system_inode(int type);
+static inline int is_in_system_inode_array(struct ocfs2_super *osb,
+ int type,
+ u32 slot);
+
+static inline int is_global_system_inode(int type)
+{
+ return type >= OCFS2_FIRST_ONLINE_SYSTEM_INODE &&
+ type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE;
+}
+
+static inline int is_in_system_inode_array(struct ocfs2_super *osb,
+ int type,
+ u32 slot)
+{
+ return slot == osb->slot_num || is_global_system_inode(type);
+}
+
+struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb,
+ int type,
+ u32 slot)
+{
+ struct inode *inode = NULL;
+ struct inode **arr = NULL;
+
+ /* avoid the lookup if cached in local system file array */
+ if (is_in_system_inode_array(osb, type, slot))
+ arr = &(osb->system_inodes[type]);
+
+ if (arr && ((inode = *arr) != NULL)) {
+ /* get a ref in addition to the array ref */
+ inode = igrab(inode);
+ if (!inode)
+ BUG();
+
+ return inode;
+ }
+
+ /* this gets one ref thru iget */
+ inode = _ocfs2_get_system_file_inode(osb, type, slot);
+
+ /* add one more if putting into array for first time */
+ if (arr && inode) {
+ *arr = igrab(inode);
+ if (!*arr)
+ BUG();
+ }
+ return inode;
+}
+
+static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
+ int type,
+ u32 slot)
+{
+ char namebuf[40];
+ struct inode *inode = NULL;
+ u64 blkno;
+ struct buffer_head *dirent_bh = NULL;
+ struct ocfs2_dir_entry *de = NULL;
+ int status = 0;
+
+ ocfs2_sprintf_system_inode_name(namebuf,
+ sizeof(namebuf),
+ type, slot);
+
+ status = ocfs2_find_files_on_disk(namebuf, strlen(namebuf),
+ &blkno, osb->sys_root_inode,
+ &dirent_bh, &de);
+ if (status < 0) {
+ goto bail;
+ }
+
+ inode = ocfs2_iget(osb, blkno);
+ if (IS_ERR(inode)) {
+ mlog_errno(PTR_ERR(inode));
+ inode = NULL;
+ goto bail;
+ }
+bail:
+ if (dirent_bh)
+ brelse(dirent_bh);
+ return inode;
+}
+
diff --git a/fs/ocfs2/sysfile.h b/fs/ocfs2/sysfile.h
new file mode 100644
index 00000000000..cc9ea661ffc
--- /dev/null
+++ b/fs/ocfs2/sysfile.h
@@ -0,0 +1,33 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * sysfile.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_SYSFILE_H
+#define OCFS2_SYSFILE_H
+
+struct inode * ocfs2_get_system_file_inode(struct ocfs2_super *osb,
+ int type,
+ u32 slot);
+
+#endif /* OCFS2_SYSFILE_H */
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
new file mode 100644
index 00000000000..3a0458fd3e1
--- /dev/null
+++ b/fs/ocfs2/uptodate.c
@@ -0,0 +1,544 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * uptodate.c
+ *
+ * Tracking the up-to-date-ness of a local buffer_head with respect to
+ * the cluster.
+ *
+ * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Standard buffer head caching flags (uptodate, etc) are insufficient
+ * in a clustered environment - a buffer may be marked up to date on
+ * our local node but could have been modified by another cluster
+ * member. As a result an additional (and performant) caching scheme
+ * is required. A further requirement is that we consume as little
+ * memory as possible - we never pin buffer_head structures in order
+ * to cache them.
+ *
+ * We track the existence of up to date buffers on the inodes which
+ * are associated with them. Because we don't want to pin
+ * buffer_heads, this is only a (strong) hint and several other checks
+ * are made in the I/O path to ensure that we don't use a stale or
+ * invalid buffer without going to disk:
+ * - buffer_jbd is used liberally - if a bh is in the journal on
+ * this node then it *must* be up to date.
+ * - the standard buffer_uptodate() macro is used to detect buffers
+ * which may be invalid (even if we have an up to date tracking
+ * item for them)
+ *
+ * For a full understanding of how this code works together, one
+ * should read the callers in dlmglue.c, the I/O functions in
+ * buffer_head_io.c and ocfs2_journal_access in journal.c
+ */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/buffer_head.h>
+#include <linux/rbtree.h>
+#include <linux/jbd.h>
+
+#define MLOG_MASK_PREFIX ML_UPTODATE
+
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "inode.h"
+#include "uptodate.h"
+
+struct ocfs2_meta_cache_item {
+ struct rb_node c_node;
+ sector_t c_block;
+};
+
+static kmem_cache_t *ocfs2_uptodate_cachep = NULL;
+
+void ocfs2_metadata_cache_init(struct inode *inode)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
+
+ oi->ip_flags |= OCFS2_INODE_CACHE_INLINE;
+ ci->ci_num_cached = 0;
+}
+
+/* No lock taken here as 'root' is not expected to be visible to other
+ * processes. */
+static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root)
+{
+ unsigned int purged = 0;
+ struct rb_node *node;
+ struct ocfs2_meta_cache_item *item;
+
+ while ((node = rb_last(root)) != NULL) {
+ item = rb_entry(node, struct ocfs2_meta_cache_item, c_node);
+
+ mlog(0, "Purge item %llu\n",
+ (unsigned long long) item->c_block);
+
+ rb_erase(&item->c_node, root);
+ kmem_cache_free(ocfs2_uptodate_cachep, item);
+
+ purged++;
+ }
+ return purged;
+}
+
+/* Called from locking and called from ocfs2_clear_inode. Dump the
+ * cache for a given inode.
+ *
+ * This function is a few more lines longer than necessary due to some
+ * accounting done here, but I think it's worth tracking down those
+ * bugs sooner -- Mark */
+void ocfs2_metadata_cache_purge(struct inode *inode)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ unsigned int tree, to_purge, purged;
+ struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
+ struct rb_root root = RB_ROOT;
+
+ spin_lock(&oi->ip_lock);
+ tree = !(oi->ip_flags & OCFS2_INODE_CACHE_INLINE);
+ to_purge = ci->ci_num_cached;
+
+ mlog(0, "Purge %u %s items from Inode %"MLFu64"\n", to_purge,
+ tree ? "array" : "tree", oi->ip_blkno);
+
+ /* If we're a tree, save off the root so that we can safely
+ * initialize the cache. We do the work to free tree members
+ * without the spinlock. */
+ if (tree)
+ root = ci->ci_cache.ci_tree;
+
+ ocfs2_metadata_cache_init(inode);
+ spin_unlock(&oi->ip_lock);
+
+ purged = ocfs2_purge_copied_metadata_tree(&root);
+ /* If possible, track the number wiped so that we can more
+ * easily detect counting errors. Unfortunately, this is only
+ * meaningful for trees. */
+ if (tree && purged != to_purge)
+ mlog(ML_ERROR, "Inode %"MLFu64", count = %u, purged = %u\n",
+ oi->ip_blkno, to_purge, purged);
+}
+
+/* Returns the index in the cache array, -1 if not found.
+ * Requires ip_lock. */
+static int ocfs2_search_cache_array(struct ocfs2_caching_info *ci,
+ sector_t item)
+{
+ int i;
+
+ for (i = 0; i < ci->ci_num_cached; i++) {
+ if (item == ci->ci_cache.ci_array[i])
+ return i;
+ }
+
+ return -1;
+}
+
+/* Returns the cache item if found, otherwise NULL.
+ * Requires ip_lock. */
+static struct ocfs2_meta_cache_item *
+ocfs2_search_cache_tree(struct ocfs2_caching_info *ci,
+ sector_t block)
+{
+ struct rb_node * n = ci->ci_cache.ci_tree.rb_node;
+ struct ocfs2_meta_cache_item *item = NULL;
+
+ while (n) {
+ item = rb_entry(n, struct ocfs2_meta_cache_item, c_node);
+
+ if (block < item->c_block)
+ n = n->rb_left;
+ else if (block > item->c_block)
+ n = n->rb_right;
+ else
+ return item;
+ }
+
+ return NULL;
+}
+
+static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi,
+ struct buffer_head *bh)
+{
+ int index = -1;
+ struct ocfs2_meta_cache_item *item = NULL;
+
+ spin_lock(&oi->ip_lock);
+
+ mlog(0, "Inode %"MLFu64", query block %llu (inline = %u)\n",
+ oi->ip_blkno, (unsigned long long) bh->b_blocknr,
+ !!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE));
+
+ if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE)
+ index = ocfs2_search_cache_array(&oi->ip_metadata_cache,
+ bh->b_blocknr);
+ else
+ item = ocfs2_search_cache_tree(&oi->ip_metadata_cache,
+ bh->b_blocknr);
+
+ spin_unlock(&oi->ip_lock);
+
+ mlog(0, "index = %d, item = %p\n", index, item);
+
+ return (index != -1) || (item != NULL);
+}
+
+/* Warning: even if it returns true, this does *not* guarantee that
+ * the block is stored in our inode metadata cache. */
+int ocfs2_buffer_uptodate(struct inode *inode,
+ struct buffer_head *bh)
+{
+ /* Doesn't matter if the bh is in our cache or not -- if it's
+ * not marked uptodate then we know it can't have correct
+ * data. */
+ if (!buffer_uptodate(bh))
+ return 0;
+
+ /* OCFS2 does not allow multiple nodes to be changing the same
+ * block at the same time. */
+ if (buffer_jbd(bh))
+ return 1;
+
+ /* Ok, locally the buffer is marked as up to date, now search
+ * our cache to see if we can trust that. */
+ return ocfs2_buffer_cached(OCFS2_I(inode), bh);
+}
+
+/* Requires ip_lock */
+static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci,
+ sector_t block)
+{
+ BUG_ON(ci->ci_num_cached >= OCFS2_INODE_MAX_CACHE_ARRAY);
+
+ mlog(0, "block %llu takes position %u\n", (unsigned long long) block,
+ ci->ci_num_cached);
+
+ ci->ci_cache.ci_array[ci->ci_num_cached] = block;
+ ci->ci_num_cached++;
+}
+
+/* By now the caller should have checked that the item does *not*
+ * exist in the tree.
+ * Requires ip_lock. */
+static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci,
+ struct ocfs2_meta_cache_item *new)
+{
+ sector_t block = new->c_block;
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &ci->ci_cache.ci_tree.rb_node;
+ struct ocfs2_meta_cache_item *tmp;
+
+ mlog(0, "Insert block %llu num = %u\n", (unsigned long long) block,
+ ci->ci_num_cached);
+
+ while(*p) {
+ parent = *p;
+
+ tmp = rb_entry(parent, struct ocfs2_meta_cache_item, c_node);
+
+ if (block < tmp->c_block)
+ p = &(*p)->rb_left;
+ else if (block > tmp->c_block)
+ p = &(*p)->rb_right;
+ else {
+ /* This should never happen! */
+ mlog(ML_ERROR, "Duplicate block %llu cached!\n",
+ (unsigned long long) block);
+ BUG();
+ }
+ }
+
+ rb_link_node(&new->c_node, parent, p);
+ rb_insert_color(&new->c_node, &ci->ci_cache.ci_tree);
+ ci->ci_num_cached++;
+}
+
+static inline int ocfs2_insert_can_use_array(struct ocfs2_inode_info *oi,
+ struct ocfs2_caching_info *ci)
+{
+ assert_spin_locked(&oi->ip_lock);
+
+ return (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) &&
+ (ci->ci_num_cached < OCFS2_INODE_MAX_CACHE_ARRAY);
+}
+
+/* tree should be exactly OCFS2_INODE_MAX_CACHE_ARRAY wide. NULL the
+ * pointers in tree after we use them - this allows caller to detect
+ * when to free in case of error. */
+static void ocfs2_expand_cache(struct ocfs2_inode_info *oi,
+ struct ocfs2_meta_cache_item **tree)
+{
+ int i;
+ struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
+
+ mlog_bug_on_msg(ci->ci_num_cached != OCFS2_INODE_MAX_CACHE_ARRAY,
+ "Inode %"MLFu64", num cached = %u, should be %u\n",
+ oi->ip_blkno, ci->ci_num_cached,
+ OCFS2_INODE_MAX_CACHE_ARRAY);
+ mlog_bug_on_msg(!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE),
+ "Inode %"MLFu64" not marked as inline anymore!\n",
+ oi->ip_blkno);
+ assert_spin_locked(&oi->ip_lock);
+
+ /* Be careful to initialize the tree members *first* because
+ * once the ci_tree is used, the array is junk... */
+ for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++)
+ tree[i]->c_block = ci->ci_cache.ci_array[i];
+
+ oi->ip_flags &= ~OCFS2_INODE_CACHE_INLINE;
+ ci->ci_cache.ci_tree = RB_ROOT;
+ /* this will be set again by __ocfs2_insert_cache_tree */
+ ci->ci_num_cached = 0;
+
+ for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) {
+ __ocfs2_insert_cache_tree(ci, tree[i]);
+ tree[i] = NULL;
+ }
+
+ mlog(0, "Expanded %"MLFu64" to a tree cache: flags 0x%x, num = %u\n",
+ oi->ip_blkno, oi->ip_flags, ci->ci_num_cached);
+}
+
+/* Slow path function - memory allocation is necessary. See the
+ * comment above ocfs2_set_buffer_uptodate for more information. */
+static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi,
+ sector_t block,
+ int expand_tree)
+{
+ int i;
+ struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
+ struct ocfs2_meta_cache_item *new = NULL;
+ struct ocfs2_meta_cache_item *tree[OCFS2_INODE_MAX_CACHE_ARRAY] =
+ { NULL, };
+
+ mlog(0, "Inode %"MLFu64", block %llu, expand = %d\n",
+ oi->ip_blkno, (unsigned long long) block, expand_tree);
+
+ new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_KERNEL);
+ if (!new) {
+ mlog_errno(-ENOMEM);
+ return;
+ }
+ new->c_block = block;
+
+ if (expand_tree) {
+ /* Do *not* allocate an array here - the removal code
+ * has no way of tracking that. */
+ for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) {
+ tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
+ GFP_KERNEL);
+ if (!tree[i]) {
+ mlog_errno(-ENOMEM);
+ goto out_free;
+ }
+
+ /* These are initialized in ocfs2_expand_cache! */
+ }
+ }
+
+ spin_lock(&oi->ip_lock);
+ if (ocfs2_insert_can_use_array(oi, ci)) {
+ mlog(0, "Someone cleared the tree underneath us\n");
+ /* Ok, items were removed from the cache in between
+ * locks. Detect this and revert back to the fast path */
+ ocfs2_append_cache_array(ci, block);
+ spin_unlock(&oi->ip_lock);
+ goto out_free;
+ }
+
+ if (expand_tree)
+ ocfs2_expand_cache(oi, tree);
+
+ __ocfs2_insert_cache_tree(ci, new);
+ spin_unlock(&oi->ip_lock);
+
+ new = NULL;
+out_free:
+ if (new)
+ kmem_cache_free(ocfs2_uptodate_cachep, new);
+
+ /* If these were used, then ocfs2_expand_cache re-set them to
+ * NULL for us. */
+ if (tree[0]) {
+ for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++)
+ if (tree[i])
+ kmem_cache_free(ocfs2_uptodate_cachep,
+ tree[i]);
+ }
+}
+
+/* Item insertion is guarded by ip_io_sem, so the insertion path takes
+ * advantage of this by not rechecking for a duplicate insert during
+ * the slow case. Additionally, if the cache needs to be bumped up to
+ * a tree, the code will not recheck after acquiring the lock --
+ * multiple paths cannot be expanding to a tree at the same time.
+ *
+ * The slow path takes into account that items can be removed
+ * (including the whole tree wiped and reset) when this process it out
+ * allocating memory. In those cases, it reverts back to the fast
+ * path.
+ *
+ * Note that this function may actually fail to insert the block if
+ * memory cannot be allocated. This is not fatal however (but may
+ * result in a performance penalty) */
+void ocfs2_set_buffer_uptodate(struct inode *inode,
+ struct buffer_head *bh)
+{
+ int expand;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
+
+ /* The block may very well exist in our cache already, so avoid
+ * doing any more work in that case. */
+ if (ocfs2_buffer_cached(oi, bh))
+ return;
+
+ mlog(0, "Inode %"MLFu64", inserting block %llu\n", oi->ip_blkno,
+ (unsigned long long) bh->b_blocknr);
+
+ /* No need to recheck under spinlock - insertion is guarded by
+ * ip_io_sem */
+ spin_lock(&oi->ip_lock);
+ if (ocfs2_insert_can_use_array(oi, ci)) {
+ /* Fast case - it's an array and there's a free
+ * spot. */
+ ocfs2_append_cache_array(ci, bh->b_blocknr);
+ spin_unlock(&oi->ip_lock);
+ return;
+ }
+
+ expand = 0;
+ if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) {
+ /* We need to bump things up to a tree. */
+ expand = 1;
+ }
+ spin_unlock(&oi->ip_lock);
+
+ __ocfs2_set_buffer_uptodate(oi, bh->b_blocknr, expand);
+}
+
+/* Called against a newly allocated buffer. Most likely nobody should
+ * be able to read this sort of metadata while it's still being
+ * allocated, but this is careful to take ip_io_sem anyway. */
+void ocfs2_set_new_buffer_uptodate(struct inode *inode,
+ struct buffer_head *bh)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ /* This should definitely *not* exist in our cache */
+ BUG_ON(ocfs2_buffer_cached(oi, bh));
+
+ set_buffer_uptodate(bh);
+
+ down(&oi->ip_io_sem);
+ ocfs2_set_buffer_uptodate(inode, bh);
+ up(&oi->ip_io_sem);
+}
+
+/* Requires ip_lock. */
+static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci,
+ int index)
+{
+ sector_t *array = ci->ci_cache.ci_array;
+ int bytes;
+
+ BUG_ON(index < 0 || index >= OCFS2_INODE_MAX_CACHE_ARRAY);
+ BUG_ON(index >= ci->ci_num_cached);
+ BUG_ON(!ci->ci_num_cached);
+
+ mlog(0, "remove index %d (num_cached = %u\n", index,
+ ci->ci_num_cached);
+
+ ci->ci_num_cached--;
+
+ /* don't need to copy if the array is now empty, or if we
+ * removed at the tail */
+ if (ci->ci_num_cached && index < ci->ci_num_cached) {
+ bytes = sizeof(sector_t) * (ci->ci_num_cached - index);
+ memmove(&array[index], &array[index + 1], bytes);
+ }
+}
+
+/* Requires ip_lock. */
+static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci,
+ struct ocfs2_meta_cache_item *item)
+{
+ mlog(0, "remove block %llu from tree\n",
+ (unsigned long long) item->c_block);
+
+ rb_erase(&item->c_node, &ci->ci_cache.ci_tree);
+ ci->ci_num_cached--;
+}
+
+/* Called when we remove a chunk of metadata from an inode. We don't
+ * bother reverting things to an inlined array in the case of a remove
+ * which moves us back under the limit. */
+void ocfs2_remove_from_cache(struct inode *inode,
+ struct buffer_head *bh)
+{
+ int index;
+ sector_t block = bh->b_blocknr;
+ struct ocfs2_meta_cache_item *item = NULL;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+ struct ocfs2_caching_info *ci = &oi->ip_metadata_cache;
+
+ spin_lock(&oi->ip_lock);
+ mlog(0, "Inode %"MLFu64", remove %llu, items = %u, array = %u\n",
+ oi->ip_blkno, (unsigned long long) block, ci->ci_num_cached,
+ oi->ip_flags & OCFS2_INODE_CACHE_INLINE);
+
+ if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) {
+ index = ocfs2_search_cache_array(ci, block);
+ if (index != -1)
+ ocfs2_remove_metadata_array(ci, index);
+ } else {
+ item = ocfs2_search_cache_tree(ci, block);
+ if (item)
+ ocfs2_remove_metadata_tree(ci, item);
+ }
+ spin_unlock(&oi->ip_lock);
+
+ if (item)
+ kmem_cache_free(ocfs2_uptodate_cachep, item);
+}
+
+int __init init_ocfs2_uptodate_cache(void)
+{
+ ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate",
+ sizeof(struct ocfs2_meta_cache_item),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!ocfs2_uptodate_cachep)
+ return -ENOMEM;
+
+ mlog(0, "%u inlined cache items per inode.\n",
+ OCFS2_INODE_MAX_CACHE_ARRAY);
+
+ return 0;
+}
+
+void __exit exit_ocfs2_uptodate_cache(void)
+{
+ if (ocfs2_uptodate_cachep)
+ kmem_cache_destroy(ocfs2_uptodate_cachep);
+}
diff --git a/fs/ocfs2/uptodate.h b/fs/ocfs2/uptodate.h
new file mode 100644
index 00000000000..e5aacdf4eab
--- /dev/null
+++ b/fs/ocfs2/uptodate.h
@@ -0,0 +1,44 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * uptodate.h
+ *
+ * Cluster uptodate tracking
+ *
+ * Copyright (C) 2002, 2004, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_UPTODATE_H
+#define OCFS2_UPTODATE_H
+
+int __init init_ocfs2_uptodate_cache(void);
+void __exit exit_ocfs2_uptodate_cache(void);
+
+void ocfs2_metadata_cache_init(struct inode *inode);
+void ocfs2_metadata_cache_purge(struct inode *inode);
+
+int ocfs2_buffer_uptodate(struct inode *inode,
+ struct buffer_head *bh);
+void ocfs2_set_buffer_uptodate(struct inode *inode,
+ struct buffer_head *bh);
+void ocfs2_set_new_buffer_uptodate(struct inode *inode,
+ struct buffer_head *bh);
+void ocfs2_remove_from_cache(struct inode *inode,
+ struct buffer_head *bh);
+
+#endif /* OCFS2_UPTODATE_H */
diff --git a/fs/ocfs2/ver.c b/fs/ocfs2/ver.c
new file mode 100644
index 00000000000..5405ce121c9
--- /dev/null
+++ b/fs/ocfs2/ver.c
@@ -0,0 +1,43 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ver.c
+ *
+ * version string
+ *
+ * Copyright (C) 2002, 2005 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#include "ver.h"
+
+#define OCFS2_BUILD_VERSION "1.3.3"
+
+#define VERSION_STR "OCFS2 " OCFS2_BUILD_VERSION
+
+void ocfs2_print_version(void)
+{
+ printk(KERN_INFO "%s\n", VERSION_STR);
+}
+
+MODULE_DESCRIPTION(VERSION_STR);
+
+MODULE_VERSION(OCFS2_BUILD_VERSION);
diff --git a/fs/ocfs2/ver.h b/fs/ocfs2/ver.h
new file mode 100644
index 00000000000..d7395cb91d2
--- /dev/null
+++ b/fs/ocfs2/ver.h
@@ -0,0 +1,31 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ver.h
+ *
+ * Function prototypes
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef OCFS2_VER_H
+#define OCFS2_VER_H
+
+void ocfs2_print_version(void);
+
+#endif /* OCFS2_VER_H */
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
new file mode 100644
index 00000000000..021978e0576
--- /dev/null
+++ b/fs/ocfs2/vote.c
@@ -0,0 +1,1202 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * vote.c
+ *
+ * description here
+ *
+ * Copyright (C) 2003, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/smp_lock.h>
+#include <linux/kthread.h>
+
+#include <cluster/heartbeat.h>
+#include <cluster/nodemanager.h>
+#include <cluster/tcp.h>
+
+#include <dlm/dlmapi.h>
+
+#define MLOG_MASK_PREFIX ML_VOTE
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+
+#include "alloc.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "heartbeat.h"
+#include "inode.h"
+#include "journal.h"
+#include "slot_map.h"
+#include "vote.h"
+
+#include "buffer_head_io.h"
+
+#define OCFS2_MESSAGE_TYPE_VOTE (0x1)
+#define OCFS2_MESSAGE_TYPE_RESPONSE (0x2)
+struct ocfs2_msg_hdr
+{
+ __be32 h_response_id; /* used to lookup message handle on sending
+ * node. */
+ __be32 h_request;
+ __be64 h_blkno;
+ __be32 h_generation;
+ __be32 h_node_num; /* node sending this particular message. */
+};
+
+/* OCFS2_MAX_FILENAME_LEN is 255 characters, but we want to align this
+ * for the network. */
+#define OCFS2_VOTE_FILENAME_LEN 256
+struct ocfs2_vote_msg
+{
+ struct ocfs2_msg_hdr v_hdr;
+ union {
+ __be32 v_generic1;
+ __be32 v_orphaned_slot; /* Used during delete votes */
+ __be32 v_nlink; /* Used during unlink votes */
+ } md1; /* Message type dependant 1 */
+ __be32 v_unlink_namelen;
+ __be64 v_unlink_parent;
+ u8 v_unlink_dirent[OCFS2_VOTE_FILENAME_LEN];
+};
+
+/* Responses are given these values to maintain backwards
+ * compatibility with older ocfs2 versions */
+#define OCFS2_RESPONSE_OK (0)
+#define OCFS2_RESPONSE_BUSY (-16)
+#define OCFS2_RESPONSE_BAD_MSG (-22)
+
+struct ocfs2_response_msg
+{
+ struct ocfs2_msg_hdr r_hdr;
+ __be32 r_response;
+ __be32 r_orphaned_slot;
+};
+
+struct ocfs2_vote_work {
+ struct list_head w_list;
+ struct ocfs2_vote_msg w_msg;
+};
+
+enum ocfs2_vote_request {
+ OCFS2_VOTE_REQ_INVALID = 0,
+ OCFS2_VOTE_REQ_DELETE,
+ OCFS2_VOTE_REQ_UNLINK,
+ OCFS2_VOTE_REQ_RENAME,
+ OCFS2_VOTE_REQ_MOUNT,
+ OCFS2_VOTE_REQ_UMOUNT,
+ OCFS2_VOTE_REQ_LAST
+};
+
+static inline int ocfs2_is_valid_vote_request(int request)
+{
+ return OCFS2_VOTE_REQ_INVALID < request &&
+ request < OCFS2_VOTE_REQ_LAST;
+}
+
+typedef void (*ocfs2_net_response_callback)(void *priv,
+ struct ocfs2_response_msg *resp);
+struct ocfs2_net_response_cb {
+ ocfs2_net_response_callback rc_cb;
+ void *rc_priv;
+};
+
+struct ocfs2_net_wait_ctxt {
+ struct list_head n_list;
+ u32 n_response_id;
+ wait_queue_head_t n_event;
+ struct ocfs2_node_map n_node_map;
+ int n_response; /* an agreggate response. 0 if
+ * all nodes are go, < 0 on any
+ * negative response from any
+ * node or network error. */
+ struct ocfs2_net_response_cb *n_callback;
+};
+
+static void ocfs2_process_mount_request(struct ocfs2_super *osb,
+ unsigned int node_num)
+{
+ mlog(0, "MOUNT vote from node %u\n", node_num);
+ /* The other node only sends us this message when he has an EX
+ * on the superblock, so our recovery threads (if having been
+ * launched) are waiting on it.*/
+ ocfs2_recovery_map_clear(osb, node_num);
+ ocfs2_node_map_set_bit(osb, &osb->mounted_map, node_num);
+
+ /* We clear the umount map here because a node may have been
+ * previously mounted, safely unmounted but never stopped
+ * heartbeating - in which case we'd have a stale entry. */
+ ocfs2_node_map_clear_bit(osb, &osb->umount_map, node_num);
+}
+
+static void ocfs2_process_umount_request(struct ocfs2_super *osb,
+ unsigned int node_num)
+{
+ mlog(0, "UMOUNT vote from node %u\n", node_num);
+ ocfs2_node_map_clear_bit(osb, &osb->mounted_map, node_num);
+ ocfs2_node_map_set_bit(osb, &osb->umount_map, node_num);
+}
+
+void ocfs2_mark_inode_remotely_deleted(struct inode *inode)
+{
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ assert_spin_locked(&oi->ip_lock);
+ /* We set the SKIP_DELETE flag on the inode so we don't try to
+ * delete it in delete_inode ourselves, thus avoiding
+ * unecessary lock pinging. If the other node failed to wipe
+ * the inode as a result of a crash, then recovery will pick
+ * up the slack. */
+ oi->ip_flags |= OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE;
+}
+
+static int ocfs2_process_delete_request(struct inode *inode,
+ int *orphaned_slot)
+{
+ int response = OCFS2_RESPONSE_BUSY;
+
+ mlog(0, "DELETE vote on inode %lu, read lnk_cnt = %u, slot = %d\n",
+ inode->i_ino, inode->i_nlink, *orphaned_slot);
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+
+ /* Whatever our vote response is, we want to make sure that
+ * the orphaned slot is recorded properly on this node *and*
+ * on the requesting node. Technically, if the requesting node
+ * did not know which slot the inode is orphaned in but we
+ * respond with BUSY he doesn't actually need the orphaned
+ * slot, but it doesn't hurt to do it here anyway. */
+ if ((*orphaned_slot) != OCFS2_INVALID_SLOT) {
+ mlog_bug_on_msg(OCFS2_I(inode)->ip_orphaned_slot !=
+ OCFS2_INVALID_SLOT &&
+ OCFS2_I(inode)->ip_orphaned_slot !=
+ (*orphaned_slot),
+ "Inode %"MLFu64": This node thinks it's "
+ "orphaned in slot %d, messaged it's in %d\n",
+ OCFS2_I(inode)->ip_blkno,
+ OCFS2_I(inode)->ip_orphaned_slot,
+ *orphaned_slot);
+
+ mlog(0, "Setting orphaned slot for inode %"MLFu64" to %d\n",
+ OCFS2_I(inode)->ip_blkno, *orphaned_slot);
+
+ OCFS2_I(inode)->ip_orphaned_slot = *orphaned_slot;
+ } else {
+ mlog(0, "Sending back orphaned slot %d for inode %"MLFu64"\n",
+ OCFS2_I(inode)->ip_orphaned_slot,
+ OCFS2_I(inode)->ip_blkno);
+
+ *orphaned_slot = OCFS2_I(inode)->ip_orphaned_slot;
+ }
+
+ /* vote no if the file is still open. */
+ if (OCFS2_I(inode)->ip_open_count) {
+ mlog(0, "open count = %u\n",
+ OCFS2_I(inode)->ip_open_count);
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+ goto done;
+ }
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ /* directories are a bit ugly... What if someone is sitting in
+ * it? We want to make sure the inode is removed completely as
+ * a result of the iput in process_vote. */
+ if (S_ISDIR(inode->i_mode) && (atomic_read(&inode->i_count) != 1)) {
+ mlog(0, "i_count = %u\n", atomic_read(&inode->i_count));
+ goto done;
+ }
+
+ if (filemap_fdatawrite(inode->i_mapping)) {
+ mlog(ML_ERROR, "Could not sync inode %"MLFu64" for delete!\n",
+ OCFS2_I(inode)->ip_blkno);
+ goto done;
+ }
+ sync_mapping_buffers(inode->i_mapping);
+ truncate_inode_pages(inode->i_mapping, 0);
+ ocfs2_extent_map_trunc(inode, 0);
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ /* double check open count - someone might have raced this
+ * thread into ocfs2_file_open while we were writing out
+ * data. If we're to allow a wipe of this inode now, we *must*
+ * hold the spinlock until we've marked it. */
+ if (OCFS2_I(inode)->ip_open_count) {
+ mlog(0, "Raced to wipe! open count = %u\n",
+ OCFS2_I(inode)->ip_open_count);
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+ goto done;
+ }
+
+ /* Mark the inode as being wiped from disk. */
+ ocfs2_mark_inode_remotely_deleted(inode);
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ /* Not sure this is necessary anymore. */
+ d_prune_aliases(inode);
+
+ /* If we get here, then we're voting 'yes', so commit the
+ * delete on our side. */
+ response = OCFS2_RESPONSE_OK;
+done:
+ return response;
+}
+
+static int ocfs2_match_dentry(struct dentry *dentry,
+ u64 parent_blkno,
+ unsigned int namelen,
+ const char *name)
+{
+ struct inode *parent;
+
+ if (!dentry->d_parent) {
+ mlog(0, "Detached from parent.\n");
+ return 0;
+ }
+
+ parent = dentry->d_parent->d_inode;
+ /* Negative parent dentry? */
+ if (!parent)
+ return 0;
+
+ /* Name is in a different directory. */
+ if (OCFS2_I(parent)->ip_blkno != parent_blkno)
+ return 0;
+
+ if (dentry->d_name.len != namelen)
+ return 0;
+
+ /* comparison above guarantees this is safe. */
+ if (memcmp(dentry->d_name.name, name, namelen))
+ return 0;
+
+ return 1;
+}
+
+static void ocfs2_process_dentry_request(struct inode *inode,
+ int rename,
+ unsigned int new_nlink,
+ u64 parent_blkno,
+ unsigned int namelen,
+ const char *name)
+{
+ struct dentry *dentry = NULL;
+ struct list_head *p;
+ struct ocfs2_inode_info *oi = OCFS2_I(inode);
+
+ mlog(0, "parent %"MLFu64", namelen = %u, name = %.*s\n", parent_blkno,
+ namelen, namelen, name);
+
+ spin_lock(&dcache_lock);
+
+ /* Another node is removing this name from the system. It is
+ * up to us to find the corresponding dentry and if it exists,
+ * unhash it from the dcache. */
+ list_for_each(p, &inode->i_dentry) {
+ dentry = list_entry(p, struct dentry, d_alias);
+
+ if (ocfs2_match_dentry(dentry, parent_blkno, namelen, name)) {
+ mlog(0, "dentry found: %.*s\n",
+ dentry->d_name.len, dentry->d_name.name);
+
+ dget_locked(dentry);
+ break;
+ }
+
+ dentry = NULL;
+ }
+
+ spin_unlock(&dcache_lock);
+
+ if (dentry) {
+ d_delete(dentry);
+ dput(dentry);
+ }
+
+ /* rename votes don't send link counts */
+ if (!rename) {
+ mlog(0, "new_nlink = %u\n", new_nlink);
+
+ /* We don't have the proper locks here to directly
+ * change i_nlink and besides, the vote is sent
+ * *before* the operation so it may have failed on the
+ * other node. This passes a hint to ocfs2_drop_inode
+ * to force ocfs2_delete_inode, who will take the
+ * proper cluster locks to sort things out. */
+ if (new_nlink == 0) {
+ spin_lock(&oi->ip_lock);
+ oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+ }
+ }
+}
+
+static void ocfs2_process_vote(struct ocfs2_super *osb,
+ struct ocfs2_vote_msg *msg)
+{
+ int net_status, vote_response;
+ int orphaned_slot = 0;
+ int rename = 0;
+ unsigned int node_num, generation, new_nlink, namelen;
+ u64 blkno, parent_blkno;
+ enum ocfs2_vote_request request;
+ struct inode *inode = NULL;
+ struct ocfs2_msg_hdr *hdr = &msg->v_hdr;
+ struct ocfs2_response_msg response;
+
+ /* decode the network mumbo jumbo into local variables. */
+ request = be32_to_cpu(hdr->h_request);
+ blkno = be64_to_cpu(hdr->h_blkno);
+ generation = be32_to_cpu(hdr->h_generation);
+ node_num = be32_to_cpu(hdr->h_node_num);
+ if (request == OCFS2_VOTE_REQ_DELETE)
+ orphaned_slot = be32_to_cpu(msg->md1.v_orphaned_slot);
+
+ mlog(0, "processing vote: request = %u, blkno = %"MLFu64", "
+ "generation = %u, node_num = %u, priv1 = %u\n", request,
+ blkno, generation, node_num, be32_to_cpu(msg->md1.v_generic1));
+
+ if (!ocfs2_is_valid_vote_request(request)) {
+ mlog(ML_ERROR, "Invalid vote request %d from node %u\n",
+ request, node_num);
+ vote_response = OCFS2_RESPONSE_BAD_MSG;
+ goto respond;
+ }
+
+ vote_response = OCFS2_RESPONSE_OK;
+
+ switch (request) {
+ case OCFS2_VOTE_REQ_UMOUNT:
+ ocfs2_process_umount_request(osb, node_num);
+ goto respond;
+ case OCFS2_VOTE_REQ_MOUNT:
+ ocfs2_process_mount_request(osb, node_num);
+ goto respond;
+ default:
+ /* avoids a gcc warning */
+ break;
+ }
+
+ /* We cannot process the remaining message types before we're
+ * fully mounted. It's perfectly safe however to send a 'yes'
+ * response as we can't possibly have any of the state they're
+ * asking us to modify yet. */
+ if (atomic_read(&osb->vol_state) == VOLUME_INIT)
+ goto respond;
+
+ /* If we get here, then the request is against an inode. */
+ inode = ocfs2_ilookup_for_vote(osb, blkno,
+ request == OCFS2_VOTE_REQ_DELETE);
+
+ /* Not finding the inode is perfectly valid - it means we're
+ * not interested in what the other node is about to do to it
+ * so in those cases we automatically respond with an
+ * affirmative. Cluster locking ensures that we won't race
+ * interest in the inode with this vote request. */
+ if (!inode)
+ goto respond;
+
+ /* Check generation values. It's possible for us to get a
+ * request against a stale inode. If so then we proceed as if
+ * we had not found an inode in the first place. */
+ if (inode->i_generation != generation) {
+ mlog(0, "generation passed %u != inode generation = %u, "
+ "ip_flags = %x, ip_blkno = %"MLFu64", msg %"MLFu64", "
+ "i_count = %u, message type = %u\n",
+ generation, inode->i_generation, OCFS2_I(inode)->ip_flags,
+ OCFS2_I(inode)->ip_blkno, blkno,
+ atomic_read(&inode->i_count), request);
+ iput(inode);
+ inode = NULL;
+ goto respond;
+ }
+
+ switch (request) {
+ case OCFS2_VOTE_REQ_DELETE:
+ vote_response = ocfs2_process_delete_request(inode,
+ &orphaned_slot);
+ break;
+ case OCFS2_VOTE_REQ_RENAME:
+ rename = 1;
+ /* fall through */
+ case OCFS2_VOTE_REQ_UNLINK:
+ parent_blkno = be64_to_cpu(msg->v_unlink_parent);
+ namelen = be32_to_cpu(msg->v_unlink_namelen);
+ /* new_nlink will be ignored in case of a rename vote */
+ new_nlink = be32_to_cpu(msg->md1.v_nlink);
+ ocfs2_process_dentry_request(inode, rename, new_nlink,
+ parent_blkno, namelen,
+ msg->v_unlink_dirent);
+ break;
+ default:
+ mlog(ML_ERROR, "node %u, invalid request: %u\n",
+ node_num, request);
+ vote_response = OCFS2_RESPONSE_BAD_MSG;
+ }
+
+respond:
+ /* Response struture is small so we just put it on the stack
+ * and stuff it inline. */
+ memset(&response, 0, sizeof(struct ocfs2_response_msg));
+ response.r_hdr.h_response_id = hdr->h_response_id;
+ response.r_hdr.h_blkno = hdr->h_blkno;
+ response.r_hdr.h_generation = hdr->h_generation;
+ response.r_hdr.h_node_num = cpu_to_be32(osb->node_num);
+ response.r_response = cpu_to_be32(vote_response);
+ response.r_orphaned_slot = cpu_to_be32(orphaned_slot);
+
+ net_status = o2net_send_message(OCFS2_MESSAGE_TYPE_RESPONSE,
+ osb->net_key,
+ &response,
+ sizeof(struct ocfs2_response_msg),
+ node_num,
+ NULL);
+ /* We still want to error print for ENOPROTOOPT here. The
+ * sending node shouldn't have unregistered his net handler
+ * without sending an unmount vote 1st */
+ if (net_status < 0
+ && net_status != -ETIMEDOUT
+ && net_status != -ENOTCONN)
+ mlog(ML_ERROR, "message to node %u fails with error %d!\n",
+ node_num, net_status);
+
+ if (inode)
+ iput(inode);
+}
+
+static void ocfs2_vote_thread_do_work(struct ocfs2_super *osb)
+{
+ unsigned long processed;
+ struct ocfs2_lock_res *lockres;
+ struct ocfs2_vote_work *work;
+
+ mlog_entry_void();
+
+ spin_lock(&osb->vote_task_lock);
+ /* grab this early so we know to try again if a state change and
+ * wake happens part-way through our work */
+ osb->vote_work_sequence = osb->vote_wake_sequence;
+
+ processed = osb->blocked_lock_count;
+ while (processed) {
+ BUG_ON(list_empty(&osb->blocked_lock_list));
+
+ lockres = list_entry(osb->blocked_lock_list.next,
+ struct ocfs2_lock_res, l_blocked_list);
+ list_del_init(&lockres->l_blocked_list);
+ osb->blocked_lock_count--;
+ spin_unlock(&osb->vote_task_lock);
+
+ BUG_ON(!processed);
+ processed--;
+
+ ocfs2_process_blocked_lock(osb, lockres);
+
+ spin_lock(&osb->vote_task_lock);
+ }
+
+ while (osb->vote_count) {
+ BUG_ON(list_empty(&osb->vote_list));
+ work = list_entry(osb->vote_list.next,
+ struct ocfs2_vote_work, w_list);
+ list_del(&work->w_list);
+ osb->vote_count--;
+ spin_unlock(&osb->vote_task_lock);
+
+ ocfs2_process_vote(osb, &work->w_msg);
+ kfree(work);
+
+ spin_lock(&osb->vote_task_lock);
+ }
+ spin_unlock(&osb->vote_task_lock);
+
+ mlog_exit_void();
+}
+
+static int ocfs2_vote_thread_lists_empty(struct ocfs2_super *osb)
+{
+ int empty = 0;
+
+ spin_lock(&osb->vote_task_lock);
+ if (list_empty(&osb->blocked_lock_list) &&
+ list_empty(&osb->vote_list))
+ empty = 1;
+
+ spin_unlock(&osb->vote_task_lock);
+ return empty;
+}
+
+static int ocfs2_vote_thread_should_wake(struct ocfs2_super *osb)
+{
+ int should_wake = 0;
+
+ spin_lock(&osb->vote_task_lock);
+ if (osb->vote_work_sequence != osb->vote_wake_sequence)
+ should_wake = 1;
+ spin_unlock(&osb->vote_task_lock);
+
+ return should_wake;
+}
+
+int ocfs2_vote_thread(void *arg)
+{
+ int status = 0;
+ struct ocfs2_super *osb = arg;
+
+ /* only quit once we've been asked to stop and there is no more
+ * work available */
+ while (!(kthread_should_stop() &&
+ ocfs2_vote_thread_lists_empty(osb))) {
+
+ wait_event_interruptible(osb->vote_event,
+ ocfs2_vote_thread_should_wake(osb) ||
+ kthread_should_stop());
+
+ mlog(0, "vote_thread: awoken\n");
+
+ ocfs2_vote_thread_do_work(osb);
+ }
+
+ osb->vote_task = NULL;
+ return status;
+}
+
+static struct ocfs2_net_wait_ctxt *ocfs2_new_net_wait_ctxt(unsigned int response_id)
+{
+ struct ocfs2_net_wait_ctxt *w;
+
+ w = kcalloc(1, sizeof(*w), GFP_KERNEL);
+ if (!w) {
+ mlog_errno(-ENOMEM);
+ goto bail;
+ }
+
+ INIT_LIST_HEAD(&w->n_list);
+ init_waitqueue_head(&w->n_event);
+ ocfs2_node_map_init(&w->n_node_map);
+ w->n_response_id = response_id;
+ w->n_callback = NULL;
+bail:
+ return w;
+}
+
+static unsigned int ocfs2_new_response_id(struct ocfs2_super *osb)
+{
+ unsigned int ret;
+
+ spin_lock(&osb->net_response_lock);
+ ret = ++osb->net_response_ids;
+ spin_unlock(&osb->net_response_lock);
+
+ return ret;
+}
+
+static void ocfs2_dequeue_net_wait_ctxt(struct ocfs2_super *osb,
+ struct ocfs2_net_wait_ctxt *w)
+{
+ spin_lock(&osb->net_response_lock);
+ list_del(&w->n_list);
+ spin_unlock(&osb->net_response_lock);
+}
+
+static void ocfs2_queue_net_wait_ctxt(struct ocfs2_super *osb,
+ struct ocfs2_net_wait_ctxt *w)
+{
+ spin_lock(&osb->net_response_lock);
+ list_add_tail(&w->n_list,
+ &osb->net_response_list);
+ spin_unlock(&osb->net_response_lock);
+}
+
+static void __ocfs2_mark_node_responded(struct ocfs2_super *osb,
+ struct ocfs2_net_wait_ctxt *w,
+ int node_num)
+{
+ assert_spin_locked(&osb->net_response_lock);
+
+ ocfs2_node_map_clear_bit(osb, &w->n_node_map, node_num);
+ if (ocfs2_node_map_is_empty(osb, &w->n_node_map))
+ wake_up(&w->n_event);
+}
+
+/* Intended to be called from the node down callback, we fake remove
+ * the node from all our response contexts */
+void ocfs2_remove_node_from_vote_queues(struct ocfs2_super *osb,
+ int node_num)
+{
+ struct list_head *p;
+ struct ocfs2_net_wait_ctxt *w = NULL;
+
+ spin_lock(&osb->net_response_lock);
+
+ list_for_each(p, &osb->net_response_list) {
+ w = list_entry(p, struct ocfs2_net_wait_ctxt, n_list);
+
+ __ocfs2_mark_node_responded(osb, w, node_num);
+ }
+
+ spin_unlock(&osb->net_response_lock);
+}
+
+static int ocfs2_broadcast_vote(struct ocfs2_super *osb,
+ struct ocfs2_vote_msg *request,
+ unsigned int response_id,
+ int *response,
+ struct ocfs2_net_response_cb *callback)
+{
+ int status, i, remote_err;
+ struct ocfs2_net_wait_ctxt *w = NULL;
+ int dequeued = 0;
+
+ mlog_entry_void();
+
+ w = ocfs2_new_net_wait_ctxt(response_id);
+ if (!w) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+ w->n_callback = callback;
+
+ /* we're pretty much ready to go at this point, and this fills
+ * in n_response which we need anyway... */
+ ocfs2_queue_net_wait_ctxt(osb, w);
+
+ i = ocfs2_node_map_iterate(osb, &osb->mounted_map, 0);
+
+ while (i != O2NM_INVALID_NODE_NUM) {
+ if (i != osb->node_num) {
+ mlog(0, "trying to send request to node %i\n", i);
+ ocfs2_node_map_set_bit(osb, &w->n_node_map, i);
+
+ remote_err = 0;
+ status = o2net_send_message(OCFS2_MESSAGE_TYPE_VOTE,
+ osb->net_key,
+ request,
+ sizeof(*request),
+ i,
+ &remote_err);
+ if (status == -ETIMEDOUT) {
+ mlog(0, "remote node %d timed out!\n", i);
+ status = -EAGAIN;
+ goto bail;
+ }
+ if (remote_err < 0) {
+ status = remote_err;
+ mlog(0, "remote error %d on node %d!\n",
+ remote_err, i);
+ mlog_errno(status);
+ goto bail;
+ }
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+ i++;
+ i = ocfs2_node_map_iterate(osb, &osb->mounted_map, i);
+ mlog(0, "next is %d, i am %d\n", i, osb->node_num);
+ }
+ mlog(0, "done sending, now waiting on responses...\n");
+
+ wait_event(w->n_event, ocfs2_node_map_is_empty(osb, &w->n_node_map));
+
+ ocfs2_dequeue_net_wait_ctxt(osb, w);
+ dequeued = 1;
+
+ *response = w->n_response;
+ status = 0;
+bail:
+ if (w) {
+ if (!dequeued)
+ ocfs2_dequeue_net_wait_ctxt(osb, w);
+ kfree(w);
+ }
+
+ mlog_exit(status);
+ return status;
+}
+
+static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb,
+ u64 blkno,
+ unsigned int generation,
+ enum ocfs2_vote_request type,
+ u32 priv)
+{
+ struct ocfs2_vote_msg *request;
+ struct ocfs2_msg_hdr *hdr;
+
+ BUG_ON(!ocfs2_is_valid_vote_request(type));
+
+ request = kcalloc(1, sizeof(*request), GFP_KERNEL);
+ if (!request) {
+ mlog_errno(-ENOMEM);
+ } else {
+ hdr = &request->v_hdr;
+ hdr->h_node_num = cpu_to_be32(osb->node_num);
+ hdr->h_request = cpu_to_be32(type);
+ hdr->h_blkno = cpu_to_be64(blkno);
+ hdr->h_generation = cpu_to_be32(generation);
+
+ request->md1.v_generic1 = cpu_to_be32(priv);
+ }
+
+ return request;
+}
+
+/* Complete the buildup of a new vote request and process the
+ * broadcast return value. */
+static int ocfs2_do_request_vote(struct ocfs2_super *osb,
+ struct ocfs2_vote_msg *request,
+ struct ocfs2_net_response_cb *callback)
+{
+ int status, response;
+ unsigned int response_id;
+ struct ocfs2_msg_hdr *hdr;
+
+ response_id = ocfs2_new_response_id(osb);
+
+ hdr = &request->v_hdr;
+ hdr->h_response_id = cpu_to_be32(response_id);
+
+ status = ocfs2_broadcast_vote(osb, request, response_id, &response,
+ callback);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = response;
+bail:
+
+ return status;
+}
+
+static int ocfs2_request_vote(struct inode *inode,
+ struct ocfs2_vote_msg *request,
+ struct ocfs2_net_response_cb *callback)
+{
+ int status;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+ if (ocfs2_inode_is_new(inode))
+ return 0;
+
+ status = -EAGAIN;
+ while (status == -EAGAIN) {
+ if (!(osb->s_mount_opt & OCFS2_MOUNT_NOINTR) &&
+ signal_pending(current))
+ return -ERESTARTSYS;
+
+ status = ocfs2_super_lock(osb, 0);
+ if (status < 0) {
+ mlog_errno(status);
+ break;
+ }
+
+ status = 0;
+ if (!ocfs2_node_map_is_only(osb, &osb->mounted_map,
+ osb->node_num))
+ status = ocfs2_do_request_vote(osb, request, callback);
+
+ ocfs2_super_unlock(osb, 0);
+ }
+ return status;
+}
+
+static void ocfs2_delete_response_cb(void *priv,
+ struct ocfs2_response_msg *resp)
+{
+ int orphaned_slot, node;
+ struct inode *inode = priv;
+
+ orphaned_slot = be32_to_cpu(resp->r_orphaned_slot);
+ node = be32_to_cpu(resp->r_hdr.h_node_num);
+ mlog(0, "node %d tells us that inode %"MLFu64" is orphaned in slot "
+ "%d\n", node, OCFS2_I(inode)->ip_blkno, orphaned_slot);
+
+ /* The other node may not actually know which slot the inode
+ * is orphaned in. */
+ if (orphaned_slot == OCFS2_INVALID_SLOT)
+ return;
+
+ /* Ok, the responding node knows which slot this inode is
+ * orphaned in. We verify that the information is correct and
+ * then record this in the inode. ocfs2_delete_inode will use
+ * this information to determine which lock to take. */
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ mlog_bug_on_msg(OCFS2_I(inode)->ip_orphaned_slot != orphaned_slot &&
+ OCFS2_I(inode)->ip_orphaned_slot
+ != OCFS2_INVALID_SLOT, "Inode %"MLFu64": Node %d "
+ "says it's orphaned in slot %d, we think it's in %d\n",
+ OCFS2_I(inode)->ip_blkno,
+ be32_to_cpu(resp->r_hdr.h_node_num),
+ orphaned_slot, OCFS2_I(inode)->ip_orphaned_slot);
+
+ OCFS2_I(inode)->ip_orphaned_slot = orphaned_slot;
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+}
+
+int ocfs2_request_delete_vote(struct inode *inode)
+{
+ int orphaned_slot, status;
+ struct ocfs2_net_response_cb delete_cb;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_vote_msg *request;
+
+ spin_lock(&OCFS2_I(inode)->ip_lock);
+ orphaned_slot = OCFS2_I(inode)->ip_orphaned_slot;
+ spin_unlock(&OCFS2_I(inode)->ip_lock);
+
+ delete_cb.rc_cb = ocfs2_delete_response_cb;
+ delete_cb.rc_priv = inode;
+
+ mlog(0, "Inode %"MLFu64", we start thinking orphaned slot is %d\n",
+ OCFS2_I(inode)->ip_blkno, orphaned_slot);
+
+ status = -ENOMEM;
+ request = ocfs2_new_vote_request(osb, OCFS2_I(inode)->ip_blkno,
+ inode->i_generation,
+ OCFS2_VOTE_REQ_DELETE, orphaned_slot);
+ if (request) {
+ status = ocfs2_request_vote(inode, request, &delete_cb);
+
+ kfree(request);
+ }
+
+ return status;
+}
+
+static void ocfs2_setup_unlink_vote(struct ocfs2_vote_msg *request,
+ struct dentry *dentry)
+{
+ struct inode *parent = dentry->d_parent->d_inode;
+
+ /* We need some values which will uniquely identify a dentry
+ * on the other nodes so that they can find it and run
+ * d_delete against it. Parent directory block and full name
+ * should suffice. */
+
+ mlog(0, "unlink/rename request: parent: %"MLFu64" name: %.*s\n",
+ OCFS2_I(parent)->ip_blkno, dentry->d_name.len,
+ dentry->d_name.name);
+
+ request->v_unlink_parent = cpu_to_be64(OCFS2_I(parent)->ip_blkno);
+ request->v_unlink_namelen = cpu_to_be32(dentry->d_name.len);
+ memcpy(request->v_unlink_dirent, dentry->d_name.name,
+ dentry->d_name.len);
+}
+
+int ocfs2_request_unlink_vote(struct inode *inode,
+ struct dentry *dentry,
+ unsigned int nlink)
+{
+ int status;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_vote_msg *request;
+
+ if (dentry->d_name.len > OCFS2_VOTE_FILENAME_LEN)
+ return -ENAMETOOLONG;
+
+ status = -ENOMEM;
+ request = ocfs2_new_vote_request(osb, OCFS2_I(inode)->ip_blkno,
+ inode->i_generation,
+ OCFS2_VOTE_REQ_UNLINK, nlink);
+ if (request) {
+ ocfs2_setup_unlink_vote(request, dentry);
+
+ status = ocfs2_request_vote(inode, request, NULL);
+
+ kfree(request);
+ }
+ return status;
+}
+
+int ocfs2_request_rename_vote(struct inode *inode,
+ struct dentry *dentry)
+{
+ int status;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ struct ocfs2_vote_msg *request;
+
+ if (dentry->d_name.len > OCFS2_VOTE_FILENAME_LEN)
+ return -ENAMETOOLONG;
+
+ status = -ENOMEM;
+ request = ocfs2_new_vote_request(osb, OCFS2_I(inode)->ip_blkno,
+ inode->i_generation,
+ OCFS2_VOTE_REQ_RENAME, 0);
+ if (request) {
+ ocfs2_setup_unlink_vote(request, dentry);
+
+ status = ocfs2_request_vote(inode, request, NULL);
+
+ kfree(request);
+ }
+ return status;
+}
+
+int ocfs2_request_mount_vote(struct ocfs2_super *osb)
+{
+ int status;
+ struct ocfs2_vote_msg *request = NULL;
+
+ request = ocfs2_new_vote_request(osb, 0ULL, 0,
+ OCFS2_VOTE_REQ_MOUNT, 0);
+ if (!request) {
+ status = -ENOMEM;
+ goto bail;
+ }
+
+ status = -EAGAIN;
+ while (status == -EAGAIN) {
+ if (!(osb->s_mount_opt & OCFS2_MOUNT_NOINTR) &&
+ signal_pending(current)) {
+ status = -ERESTARTSYS;
+ goto bail;
+ }
+
+ if (ocfs2_node_map_is_only(osb, &osb->mounted_map,
+ osb->node_num)) {
+ status = 0;
+ goto bail;
+ }
+
+ status = ocfs2_do_request_vote(osb, request, NULL);
+ }
+
+bail:
+ if (request)
+ kfree(request);
+
+ return status;
+}
+
+int ocfs2_request_umount_vote(struct ocfs2_super *osb)
+{
+ int status;
+ struct ocfs2_vote_msg *request = NULL;
+
+ request = ocfs2_new_vote_request(osb, 0ULL, 0,
+ OCFS2_VOTE_REQ_UMOUNT, 0);
+ if (!request) {
+ status = -ENOMEM;
+ goto bail;
+ }
+
+ status = -EAGAIN;
+ while (status == -EAGAIN) {
+ /* Do not check signals on this vote... We really want
+ * this one to go all the way through. */
+
+ if (ocfs2_node_map_is_only(osb, &osb->mounted_map,
+ osb->node_num)) {
+ status = 0;
+ goto bail;
+ }
+
+ status = ocfs2_do_request_vote(osb, request, NULL);
+ }
+
+bail:
+ if (request)
+ kfree(request);
+
+ return status;
+}
+
+/* TODO: This should eventually be a hash table! */
+static struct ocfs2_net_wait_ctxt * __ocfs2_find_net_wait_ctxt(struct ocfs2_super *osb,
+ u32 response_id)
+{
+ struct list_head *p;
+ struct ocfs2_net_wait_ctxt *w = NULL;
+
+ list_for_each(p, &osb->net_response_list) {
+ w = list_entry(p, struct ocfs2_net_wait_ctxt, n_list);
+ if (response_id == w->n_response_id)
+ break;
+ w = NULL;
+ }
+
+ return w;
+}
+
+/* Translate response codes into local node errno values */
+static inline int ocfs2_translate_response(int response)
+{
+ int ret;
+
+ switch (response) {
+ case OCFS2_RESPONSE_OK:
+ ret = 0;
+ break;
+
+ case OCFS2_RESPONSE_BUSY:
+ ret = -EBUSY;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ocfs2_handle_response_message(struct o2net_msg *msg,
+ u32 len,
+ void *data)
+{
+ unsigned int response_id, node_num;
+ int response_status;
+ struct ocfs2_super *osb = data;
+ struct ocfs2_response_msg *resp;
+ struct ocfs2_net_wait_ctxt * w;
+ struct ocfs2_net_response_cb *resp_cb;
+
+ resp = (struct ocfs2_response_msg *) msg->buf;
+
+ response_id = be32_to_cpu(resp->r_hdr.h_response_id);
+ node_num = be32_to_cpu(resp->r_hdr.h_node_num);
+ response_status =
+ ocfs2_translate_response(be32_to_cpu(resp->r_response));
+
+ mlog(0, "received response message:\n");
+ mlog(0, "h_response_id = %u\n", response_id);
+ mlog(0, "h_request = %u\n", be32_to_cpu(resp->r_hdr.h_request));
+ mlog(0, "h_blkno = %"MLFu64"\n", be64_to_cpu(resp->r_hdr.h_blkno));
+ mlog(0, "h_generation = %u\n", be32_to_cpu(resp->r_hdr.h_generation));
+ mlog(0, "h_node_num = %u\n", node_num);
+ mlog(0, "r_response = %d\n", response_status);
+
+ spin_lock(&osb->net_response_lock);
+ w = __ocfs2_find_net_wait_ctxt(osb, response_id);
+ if (!w) {
+ mlog(0, "request not found!\n");
+ goto bail;
+ }
+ resp_cb = w->n_callback;
+
+ if (response_status && (!w->n_response)) {
+ /* we only really need one negative response so don't
+ * set it twice. */
+ w->n_response = response_status;
+ }
+
+ if (resp_cb) {
+ spin_unlock(&osb->net_response_lock);
+
+ resp_cb->rc_cb(resp_cb->rc_priv, resp);
+
+ spin_lock(&osb->net_response_lock);
+ }
+
+ __ocfs2_mark_node_responded(osb, w, node_num);
+bail:
+ spin_unlock(&osb->net_response_lock);
+
+ return 0;
+}
+
+static int ocfs2_handle_vote_message(struct o2net_msg *msg,
+ u32 len,
+ void *data)
+{
+ int status;
+ struct ocfs2_super *osb = data;
+ struct ocfs2_vote_work *work;
+
+ work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_KERNEL);
+ if (!work) {
+ status = -ENOMEM;
+ mlog_errno(status);
+ goto bail;
+ }
+
+ INIT_LIST_HEAD(&work->w_list);
+ memcpy(&work->w_msg, msg->buf, sizeof(struct ocfs2_vote_msg));
+
+ mlog(0, "scheduling vote request:\n");
+ mlog(0, "h_response_id = %u\n",
+ be32_to_cpu(work->w_msg.v_hdr.h_response_id));
+ mlog(0, "h_request = %u\n", be32_to_cpu(work->w_msg.v_hdr.h_request));
+ mlog(0, "h_blkno = %"MLFu64"\n",
+ be64_to_cpu(work->w_msg.v_hdr.h_blkno));
+ mlog(0, "h_generation = %u\n",
+ be32_to_cpu(work->w_msg.v_hdr.h_generation));
+ mlog(0, "h_node_num = %u\n",
+ be32_to_cpu(work->w_msg.v_hdr.h_node_num));
+ mlog(0, "v_generic1 = %u\n", be32_to_cpu(work->w_msg.md1.v_generic1));
+
+ spin_lock(&osb->vote_task_lock);
+ list_add_tail(&work->w_list, &osb->vote_list);
+ osb->vote_count++;
+ spin_unlock(&osb->vote_task_lock);
+
+ ocfs2_kick_vote_thread(osb);
+
+ status = 0;
+bail:
+ return status;
+}
+
+void ocfs2_unregister_net_handlers(struct ocfs2_super *osb)
+{
+ if (!osb->net_key)
+ return;
+
+ o2net_unregister_handler_list(&osb->osb_net_handlers);
+
+ if (!list_empty(&osb->net_response_list))
+ mlog(ML_ERROR, "net response list not empty!\n");
+
+ osb->net_key = 0;
+}
+
+int ocfs2_register_net_handlers(struct ocfs2_super *osb)
+{
+ int status = 0;
+
+ status = o2net_register_handler(OCFS2_MESSAGE_TYPE_RESPONSE,
+ osb->net_key,
+ sizeof(struct ocfs2_response_msg),
+ ocfs2_handle_response_message,
+ osb, &osb->osb_net_handlers);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = o2net_register_handler(OCFS2_MESSAGE_TYPE_VOTE,
+ osb->net_key,
+ sizeof(struct ocfs2_vote_msg),
+ ocfs2_handle_vote_message,
+ osb, &osb->osb_net_handlers);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+bail:
+ if (status < 0)
+ ocfs2_unregister_net_handlers(osb);
+
+ return status;
+}
diff --git a/fs/ocfs2/vote.h b/fs/ocfs2/vote.h
new file mode 100644
index 00000000000..9cce6070346
--- /dev/null
+++ b/fs/ocfs2/vote.h
@@ -0,0 +1,56 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * vote.h
+ *
+ * description here
+ *
+ * Copyright (C) 2002, 2004 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+
+#ifndef VOTE_H
+#define VOTE_H
+
+int ocfs2_vote_thread(void *arg);
+static inline void ocfs2_kick_vote_thread(struct ocfs2_super *osb)
+{
+ spin_lock(&osb->vote_task_lock);
+ /* make sure the voting thread gets a swipe at whatever changes
+ * the caller may have made to the voting state */
+ osb->vote_wake_sequence++;
+ spin_unlock(&osb->vote_task_lock);
+ wake_up(&osb->vote_event);
+}
+
+int ocfs2_request_delete_vote(struct inode *inode);
+int ocfs2_request_unlink_vote(struct inode *inode,
+ struct dentry *dentry,
+ unsigned int nlink);
+int ocfs2_request_rename_vote(struct inode *inode,
+ struct dentry *dentry);
+int ocfs2_request_mount_vote(struct ocfs2_super *osb);
+int ocfs2_request_umount_vote(struct ocfs2_super *osb);
+int ocfs2_register_net_handlers(struct ocfs2_super *osb);
+void ocfs2_unregister_net_handlers(struct ocfs2_super *osb);
+
+void ocfs2_mark_inode_remotely_deleted(struct inode *inode);
+
+void ocfs2_remove_node_from_vote_queues(struct ocfs2_super *osb,
+ int node_num);
+#endif
diff --git a/fs/open.c b/fs/open.c
index f53a5b9ffb7..8e20c1f3256 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -16,6 +16,7 @@
#include <linux/tty.h>
#include <linux/namei.h>
#include <linux/backing-dev.h>
+#include <linux/capability.h>
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/vfs.h>
@@ -194,7 +195,8 @@ out:
return error;
}
-int do_truncate(struct dentry *dentry, loff_t length, struct file *filp)
+int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
+ struct file *filp)
{
int err;
struct iattr newattrs;
@@ -204,19 +206,19 @@ int do_truncate(struct dentry *dentry, loff_t length, struct file *filp)
return -EINVAL;
newattrs.ia_size = length;
- newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+ newattrs.ia_valid = ATTR_SIZE | time_attrs;
if (filp) {
newattrs.ia_file = filp;
newattrs.ia_valid |= ATTR_FILE;
}
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
err = notify_change(dentry, &newattrs);
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
return err;
}
-static inline long do_sys_truncate(const char __user * path, loff_t length)
+static long do_sys_truncate(const char __user * path, loff_t length)
{
struct nameidata nd;
struct inode * inode;
@@ -266,7 +268,7 @@ static inline long do_sys_truncate(const char __user * path, loff_t length)
error = locks_verify_truncate(inode, NULL, length);
if (!error) {
DQUOT_INIT(inode);
- error = do_truncate(nd.dentry, length, NULL);
+ error = do_truncate(nd.dentry, length, 0, NULL);
}
put_write_access(inode);
@@ -282,7 +284,7 @@ asmlinkage long sys_truncate(const char __user * path, unsigned long length)
return do_sys_truncate(path, (long)length);
}
-static inline long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
+static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
{
struct inode * inode;
struct dentry *dentry;
@@ -318,7 +320,7 @@ static inline long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
error = locks_verify_truncate(inode, file, length);
if (!error)
- error = do_truncate(dentry, length, file);
+ error = do_truncate(dentry, length, 0, file);
out_putf:
fput(file);
out:
@@ -397,9 +399,9 @@ asmlinkage long sys_utime(char __user * filename, struct utimbuf __user * times)
(error = vfs_permission(&nd, MAY_WRITE)) != 0)
goto dput_and_out;
}
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
error = notify_change(nd.dentry, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
dput_and_out:
path_release(&nd);
out:
@@ -450,9 +452,9 @@ long do_utimes(char __user * filename, struct timeval * times)
(error = vfs_permission(&nd, MAY_WRITE)) != 0)
goto dput_and_out;
}
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
error = notify_change(nd.dentry, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
dput_and_out:
path_release(&nd);
out:
@@ -619,13 +621,13 @@ asmlinkage long sys_fchmod(unsigned int fd, mode_t mode)
err = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
goto out_putf;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (mode == (mode_t) -1)
mode = inode->i_mode;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
err = notify_change(dentry, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out_putf:
fput(file);
@@ -653,13 +655,13 @@ asmlinkage long sys_chmod(const char __user * filename, mode_t mode)
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
goto dput_and_out;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (mode == (mode_t) -1)
mode = inode->i_mode;
newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
error = notify_change(nd.dentry, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
dput_and_out:
path_release(&nd);
@@ -695,9 +697,9 @@ static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
}
if (!S_ISDIR(inode->i_mode))
newattrs.ia_valid |= ATTR_KILL_SUID|ATTR_KILL_SGID;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
error = notify_change(dentry, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out:
return error;
}
@@ -970,7 +972,7 @@ out:
EXPORT_SYMBOL(get_unused_fd);
-static inline void __put_unused_fd(struct files_struct *files, unsigned int fd)
+static void __put_unused_fd(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = files_fdtable(files);
__FD_CLR(fd, fdt->open_fds);
diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig
index 656bc43431b..7490cc9208b 100644
--- a/fs/partitions/Kconfig
+++ b/fs/partitions/Kconfig
@@ -21,26 +21,30 @@ config ACORN_PARTITION
Support hard disks partitioned under Acorn operating systems.
config ACORN_PARTITION_CUMANA
- bool "Cumana partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+ bool "Cumana partition support" if PARTITION_ADVANCED
default y if ARCH_ACORN
+ depends on ACORN_PARTITION
help
Say Y here if you would like to use hard disks under Linux which
were partitioned using the Cumana interface on Acorn machines.
config ACORN_PARTITION_EESOX
- bool "EESOX partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+ bool "EESOX partition support" if PARTITION_ADVANCED
default y if ARCH_ACORN
+ depends on ACORN_PARTITION
config ACORN_PARTITION_ICS
- bool "ICS partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+ bool "ICS partition support" if PARTITION_ADVANCED
default y if ARCH_ACORN
+ depends on ACORN_PARTITION
help
Say Y here if you would like to use hard disks under Linux which
were partitioned using the ICS interface on Acorn machines.
config ACORN_PARTITION_ADFS
- bool "Native filecore partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+ bool "Native filecore partition support" if PARTITION_ADVANCED
default y if ARCH_ACORN
+ depends on ACORN_PARTITION
help
The Acorn Disc Filing System is the standard file system of the
RiscOS operating system which runs on Acorn's ARM-based Risc PC
@@ -48,15 +52,17 @@ config ACORN_PARTITION_ADFS
`Y' here, Linux will support disk partitions created under ADFS.
config ACORN_PARTITION_POWERTEC
- bool "PowerTec partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+ bool "PowerTec partition support" if PARTITION_ADVANCED
default y if ARCH_ACORN
+ depends on ACORN_PARTITION
help
Support reading partition tables created on Acorn machines using
the PowerTec SCSI drive.
config ACORN_PARTITION_RISCIX
- bool "RISCiX partition support" if PARTITION_ADVANCED && ACORN_PARTITION
+ bool "RISCiX partition support" if PARTITION_ADVANCED
default y if ARCH_ACORN
+ depends on ACORN_PARTITION
help
Once upon a time, there was a native Unix port for the Acorn series
of machines called RISCiX. If you say 'Y' here, Linux will be able
@@ -85,7 +91,7 @@ config ATARI_PARTITION
config IBM_PARTITION
bool "IBM disk label and partition support"
- depends on PARTITION_ADVANCED && ARCH_S390
+ depends on PARTITION_ADVANCED && S390
help
Say Y here if you would like to be able to read the hard disk
partition table format used by IBM DASD disks operating under CMS.
@@ -224,5 +230,3 @@ config EFI_PARTITION
Say Y here if you would like to use hard disks under Linux which
were partitioned using EFI GPT. Presently only useful on the
IA-64 platform.
-
-# define_bool CONFIG_ACORN_PARTITION_CUMANA y
diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c
index 6327bcb2d73..78010ad60e4 100644
--- a/fs/partitions/ibm.c
+++ b/fs/partitions/ibm.c
@@ -56,7 +56,10 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
struct hd_geometry *geo;
char type[5] = {0,};
char name[7] = {0,};
- struct vtoc_volume_label *vlabel;
+ union label_t {
+ struct vtoc_volume_label vol;
+ struct vtoc_cms_label cms;
+ } *label;
unsigned char *data;
Sector sect;
@@ -64,9 +67,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
goto out_noinfo;
if ((geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL)) == NULL)
goto out_nogeo;
- if ((vlabel = kmalloc(sizeof(struct vtoc_volume_label),
- GFP_KERNEL)) == NULL)
- goto out_novlab;
+ if ((label = kmalloc(sizeof(union label_t), GFP_KERNEL)) == NULL)
+ goto out_nolab;
if (ioctl_by_bdev(bdev, BIODASDINFO, (unsigned long)info) != 0 ||
ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
@@ -87,7 +89,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
strncpy(name, data + 8, 6);
else
strncpy(name, data + 4, 6);
- memcpy (vlabel, data, sizeof(struct vtoc_volume_label));
+ memcpy(label, data, sizeof(union label_t));
put_dev_sector(sect);
EBCASC(type, 4);
@@ -100,14 +102,12 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
/*
* VM style CMS1 labeled disk
*/
- int *label = (int *) vlabel;
-
- if (label[13] != 0) {
+ if (label->cms.disk_offset != 0) {
printk("CMS1/%8s(MDSK):", name);
/* disk is reserved minidisk */
- blocksize = label[3];
- offset = label[13];
- size = (label[7] - 1)*(blocksize >> 9);
+ blocksize = label->cms.block_size;
+ offset = label->cms.disk_offset;
+ size = (label->cms.block_count - 1) * (blocksize >> 9);
} else {
printk("CMS1/%8s:", name);
offset = (info->label_block + 1);
@@ -126,7 +126,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
printk("VOL1/%8s:", name);
/* get block number and read then go through format1 labels */
- blk = cchhb2blk(&vlabel->vtoc, geo) + 1;
+ blk = cchhb2blk(&label->vol.vtoc, geo) + 1;
counter = 0;
while ((data = read_dev_sector(bdev, blk*(blocksize/512),
&sect)) != NULL) {
@@ -174,7 +174,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
}
printk("\n");
- kfree(vlabel);
+ kfree(label);
kfree(geo);
kfree(info);
return 1;
@@ -182,8 +182,8 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
out_readerr:
out_badsect:
out_noioctl:
- kfree(vlabel);
-out_novlab:
+ kfree(label);
+out_nolab:
kfree(geo);
out_nogeo:
kfree(info);
diff --git a/fs/pipe.c b/fs/pipe.c
index 66aa0b938d6..eef0f29e86e 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -44,10 +44,10 @@ void pipe_wait(struct inode * inode)
* is considered a noninteractive wait:
*/
prepare_to_wait(PIPE_WAIT(*inode), &wait, TASK_INTERRUPTIBLE|TASK_NONINTERACTIVE);
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
schedule();
finish_wait(PIPE_WAIT(*inode), &wait);
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
}
static inline int
@@ -136,7 +136,7 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
do_wakeup = 0;
ret = 0;
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
info = inode->i_pipe;
for (;;) {
int bufs = info->nrbufs;
@@ -200,7 +200,7 @@ pipe_readv(struct file *filp, const struct iovec *_iov,
}
pipe_wait(inode);
}
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
wake_up_interruptible(PIPE_WAIT(*inode));
@@ -237,7 +237,7 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
do_wakeup = 0;
ret = 0;
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
info = inode->i_pipe;
if (!PIPE_READERS(*inode)) {
@@ -341,13 +341,13 @@ pipe_writev(struct file *filp, const struct iovec *_iov,
PIPE_WAITING_WRITERS(*inode)--;
}
out:
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
if (do_wakeup) {
wake_up_interruptible(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
}
if (ret > 0)
- inode_update_time(inode, 1); /* mtime and ctime */
+ file_update_time(filp);
return ret;
}
@@ -381,7 +381,7 @@ pipe_ioctl(struct inode *pino, struct file *filp,
switch (cmd) {
case FIONREAD:
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
info = inode->i_pipe;
count = 0;
buf = info->curbuf;
@@ -390,7 +390,7 @@ pipe_ioctl(struct inode *pino, struct file *filp,
count += info->bufs[buf].len;
buf = (buf+1) & (PIPE_BUFFERS-1);
}
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return put_user(count, (int __user *)arg);
default:
return -EINVAL;
@@ -433,7 +433,7 @@ pipe_poll(struct file *filp, poll_table *wait)
static int
pipe_release(struct inode *inode, int decr, int decw)
{
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
PIPE_READERS(*inode) -= decr;
PIPE_WRITERS(*inode) -= decw;
if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode)) {
@@ -443,7 +443,7 @@ pipe_release(struct inode *inode, int decr, int decw)
kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
}
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return 0;
}
@@ -454,9 +454,9 @@ pipe_read_fasync(int fd, struct file *filp, int on)
struct inode *inode = filp->f_dentry->d_inode;
int retval;
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
if (retval < 0)
return retval;
@@ -471,9 +471,9 @@ pipe_write_fasync(int fd, struct file *filp, int on)
struct inode *inode = filp->f_dentry->d_inode;
int retval;
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
if (retval < 0)
return retval;
@@ -488,14 +488,14 @@ pipe_rdwr_fasync(int fd, struct file *filp, int on)
struct inode *inode = filp->f_dentry->d_inode;
int retval;
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode));
if (retval >= 0)
retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode));
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
if (retval < 0)
return retval;
@@ -534,9 +534,9 @@ pipe_read_open(struct inode *inode, struct file *filp)
{
/* We could have perhaps used atomic_t, but this and friends
below are the only places. So it doesn't seem worthwhile. */
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
PIPE_READERS(*inode)++;
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return 0;
}
@@ -544,9 +544,9 @@ pipe_read_open(struct inode *inode, struct file *filp)
static int
pipe_write_open(struct inode *inode, struct file *filp)
{
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
PIPE_WRITERS(*inode)++;
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return 0;
}
@@ -554,12 +554,12 @@ pipe_write_open(struct inode *inode, struct file *filp)
static int
pipe_rdwr_open(struct inode *inode, struct file *filp)
{
- down(PIPE_SEM(*inode));
+ mutex_lock(PIPE_MUTEX(*inode));
if (filp->f_mode & FMODE_READ)
PIPE_READERS(*inode)++;
if (filp->f_mode & FMODE_WRITE)
PIPE_WRITERS(*inode)++;
- up(PIPE_SEM(*inode));
+ mutex_unlock(PIPE_MUTEX(*inode));
return 0;
}
diff --git a/fs/pnode.c b/fs/pnode.c
index aeeec8ba8dd..f1871f773f6 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -103,7 +103,7 @@ static struct vfsmount *propagation_next(struct vfsmount *m,
struct vfsmount *next;
struct vfsmount *master = m->mnt_master;
- if ( master == origin->mnt_master ) {
+ if (master == origin->mnt_master) {
next = next_peer(m);
return ((next == origin) ? NULL : next);
} else if (m->mnt_slave.next != &master->mnt_slave_list)
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 3e1239e4b30..7eb1bd7f800 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -308,7 +308,7 @@ int proc_pid_status(struct task_struct *task, char * buffer)
buffer = task_sig(task, buffer);
buffer = task_cap(task, buffer);
buffer = cpuset_task_status_allowed(task, buffer);
-#if defined(CONFIG_ARCH_S390)
+#if defined(CONFIG_S390)
buffer = task_show_regs(task, buffer);
#endif
return buffer - orig;
@@ -330,7 +330,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
unsigned long min_flt = 0, maj_flt = 0;
cputime_t cutime, cstime, utime, stime;
unsigned long rsslim = 0;
- unsigned long it_real_value = 0;
+ DEFINE_KTIME(it_real_value);
struct task_struct *t;
char tcomm[sizeof(task->comm)];
@@ -386,7 +386,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
utime = cputime_add(utime, task->signal->utime);
stime = cputime_add(stime, task->signal->stime);
}
- it_real_value = task->signal->it_real_value;
+ it_real_value = task->signal->real_timer.expires;
}
ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0;
read_unlock(&tasklist_lock);
@@ -435,7 +435,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole)
priority,
nice,
num_threads,
- jiffies_to_clock_t(it_real_value),
+ (long) ktime_to_clock_t(it_real_value),
start_time,
vsize,
mm ? get_mm_rss(mm) : 0,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 634355e1698..20feb7568de 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -55,6 +55,7 @@
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/init.h>
+#include <linux/capability.h>
#include <linux/file.h>
#include <linux/string.h>
#include <linux/seq_file.h>
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 72b431d0a0a..20e5c4509a4 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -21,6 +21,8 @@
#include <linux/bitops.h>
#include <asm/uaccess.h>
+#include "internal.h"
+
static ssize_t proc_file_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos);
static ssize_t proc_file_write(struct file *file, const char __user *buffer,
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index e6a818a93f3..6573f31f1fd 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -19,7 +19,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
-extern void free_proc_entry(struct proc_dir_entry *);
+#include "internal.h"
static inline struct proc_dir_entry * de_get(struct proc_dir_entry *de)
{
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 3e55198f980..95a1cf32b83 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -37,6 +37,10 @@ extern int proc_tgid_stat(struct task_struct *, char *);
extern int proc_pid_status(struct task_struct *, char *);
extern int proc_pid_statm(struct task_struct *, char *);
+void free_proc_entry(struct proc_dir_entry *de);
+
+int proc_init_inodecache(void);
+
static inline struct task_struct *proc_task(struct inode *inode)
{
return PROC_I(inode)->task;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 1c7da988fcc..adc2cd95169 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -14,6 +14,7 @@
#include <linux/proc_fs.h>
#include <linux/user.h>
#include <linux/a.out.h>
+#include <linux/capability.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/vmalloc.h>
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index fb117b74809..9bdd077d6f5 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -81,6 +81,30 @@ void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop
__proc_device_tree_add_prop(pde, prop);
}
+void proc_device_tree_remove_prop(struct proc_dir_entry *pde,
+ struct property *prop)
+{
+ remove_proc_entry(prop->name, pde);
+}
+
+void proc_device_tree_update_prop(struct proc_dir_entry *pde,
+ struct property *newprop,
+ struct property *oldprop)
+{
+ struct proc_dir_entry *ent;
+
+ for (ent = pde->subdir; ent != NULL; ent = ent->next)
+ if (ent->data == oldprop)
+ break;
+ if (ent == NULL) {
+ printk(KERN_WARNING "device-tree: property \"%s\" "
+ " does not exist\n", oldprop->name);
+ } else {
+ ent->data = newprop;
+ ent->size = newprop->length;
+ }
+}
+
/*
* Process a node, adding entries for its children and its properties.
*/
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 5b6b0b6038a..63bf6c00fa0 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -323,6 +323,7 @@ static struct file_operations proc_modules_operations = {
};
#endif
+#ifdef CONFIG_SLAB
extern struct seq_operations slabinfo_op;
extern ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
static int slabinfo_open(struct inode *inode, struct file *file)
@@ -336,6 +337,7 @@ static struct file_operations proc_slabinfo_operations = {
.llseek = seq_lseek,
.release = seq_release,
};
+#endif
static int show_stat(struct seq_file *p, void *v)
{
@@ -600,7 +602,9 @@ void __init proc_misc_init(void)
create_seq_entry("partitions", 0, &proc_partitions_operations);
create_seq_entry("stat", 0, &proc_stat_operations);
create_seq_entry("interrupts", 0, &proc_interrupts_operations);
+#ifdef CONFIG_SLAB
create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
+#endif
create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations);
create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
create_seq_entry("zoneinfo",S_IRUGO, &proc_zoneinfo_file_operations);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index aef148f099a..68896283c8a 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -18,6 +18,8 @@
#include <linux/bitops.h>
#include <linux/smp_lock.h>
+#include "internal.h"
+
struct proc_dir_entry *proc_net, *proc_net_stat, *proc_bus, *proc_root_fs, *proc_root_driver;
#ifdef CONFIG_SYSCTL
@@ -36,7 +38,6 @@ static struct file_system_type proc_fs_type = {
.kill_sb = kill_anon_super,
};
-extern int __init proc_init_inodecache(void);
void __init proc_root_init(void)
{
int err = proc_init_inodecache();
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 50bd5a8f044..0eaad41f465 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -390,129 +390,12 @@ struct seq_operations proc_pid_smaps_op = {
};
#ifdef CONFIG_NUMA
-
-struct numa_maps {
- unsigned long pages;
- unsigned long anon;
- unsigned long mapped;
- unsigned long mapcount_max;
- unsigned long node[MAX_NUMNODES];
-};
-
-/*
- * Calculate numa node maps for a vma
- */
-static struct numa_maps *get_numa_maps(struct vm_area_struct *vma)
-{
- int i;
- struct page *page;
- unsigned long vaddr;
- struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
-
- if (!md)
- return NULL;
- md->pages = 0;
- md->anon = 0;
- md->mapped = 0;
- md->mapcount_max = 0;
- for_each_node(i)
- md->node[i] =0;
-
- for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
- page = follow_page(vma, vaddr, 0);
- if (page) {
- int count = page_mapcount(page);
-
- if (count)
- md->mapped++;
- if (count > md->mapcount_max)
- md->mapcount_max = count;
- md->pages++;
- if (PageAnon(page))
- md->anon++;
- md->node[page_to_nid(page)]++;
- }
- cond_resched();
- }
- return md;
-}
-
-static int show_numa_map(struct seq_file *m, void *v)
-{
- struct task_struct *task = m->private;
- struct vm_area_struct *vma = v;
- struct mempolicy *pol;
- struct numa_maps *md;
- struct zone **z;
- int n;
- int first;
-
- if (!vma->vm_mm)
- return 0;
-
- md = get_numa_maps(vma);
- if (!md)
- return 0;
-
- seq_printf(m, "%08lx", vma->vm_start);
- pol = get_vma_policy(task, vma, vma->vm_start);
- /* Print policy */
- switch (pol->policy) {
- case MPOL_PREFERRED:
- seq_printf(m, " prefer=%d", pol->v.preferred_node);
- break;
- case MPOL_BIND:
- seq_printf(m, " bind={");
- first = 1;
- for (z = pol->v.zonelist->zones; *z; z++) {
-
- if (!first)
- seq_putc(m, ',');
- else
- first = 0;
- seq_printf(m, "%d/%s", (*z)->zone_pgdat->node_id,
- (*z)->name);
- }
- seq_putc(m, '}');
- break;
- case MPOL_INTERLEAVE:
- seq_printf(m, " interleave={");
- first = 1;
- for_each_node(n) {
- if (node_isset(n, pol->v.nodes)) {
- if (!first)
- seq_putc(m,',');
- else
- first = 0;
- seq_printf(m, "%d",n);
- }
- }
- seq_putc(m, '}');
- break;
- default:
- seq_printf(m," default");
- break;
- }
- seq_printf(m, " MaxRef=%lu Pages=%lu Mapped=%lu",
- md->mapcount_max, md->pages, md->mapped);
- if (md->anon)
- seq_printf(m," Anon=%lu",md->anon);
-
- for_each_online_node(n) {
- if (md->node[n])
- seq_printf(m, " N%d=%lu", n, md->node[n]);
- }
- seq_putc(m, '\n');
- kfree(md);
- if (m->count < m->size) /* vma is copied successfully */
- m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
- return 0;
-}
+extern int show_numa_map(struct seq_file *m, void *v);
struct seq_operations proc_pid_numa_maps_op = {
- .start = m_start,
- .next = m_next,
- .stop = m_stop,
- .show = show_numa_map
+ .start = m_start,
+ .next = m_next,
+ .stop = m_stop,
+ .show = show_numa_map
};
#endif
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 3b2e7b69e63..4063fb32f78 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -14,7 +14,6 @@
#include <linux/a.out.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
-#include <linux/proc_fs.h>
#include <linux/highmem.h>
#include <linux/bootmem.h>
#include <linux/init.h>
@@ -35,11 +34,14 @@ static size_t elfcorebuf_sz;
/* Total size of vmcore file. */
static u64 vmcore_size;
+/* Stores the physical address of elf header of crash image. */
+unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
+
struct proc_dir_entry *proc_vmcore = NULL;
/* Reads a page from the oldmem device from given offset. */
static ssize_t read_from_oldmem(char *buf, size_t count,
- loff_t *ppos, int userbuf)
+ u64 *ppos, int userbuf)
{
unsigned long pfn, offset;
size_t nr_bytes;
diff --git a/fs/quota.c b/fs/quota.c
index 612e04db4b9..ba9e0bf32f6 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -15,6 +15,7 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/buffer_head.h>
+#include <linux/capability.h>
#include <linux/quotaops.h>
/* Check validity of generic quotactl commands */
@@ -168,7 +169,7 @@ static void quota_sync_sb(struct super_block *sb, int type)
sync_blockdev(sb->s_bdev);
/* Now when everything is written we can discard the pagecache so
- * that userspace sees the changes. We need i_sem and so we could
+ * that userspace sees the changes. We need i_mutex and so we could
* not do it inside dqonoff_sem. Moreover we need to be carefull
* about races with quotaoff() (that is the reason why we have own
* reference to inode). */
@@ -184,9 +185,9 @@ static void quota_sync_sb(struct super_block *sb, int type)
up(&sb_dqopt(sb)->dqonoff_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (discard[cnt]) {
- down(&discard[cnt]->i_sem);
+ mutex_lock(&discard[cnt]->i_mutex);
truncate_inode_pages(&discard[cnt]->i_data, 0);
- up(&discard[cnt]->i_sem);
+ mutex_unlock(&discard[cnt]->i_mutex);
iput(discard[cnt]);
}
}
diff --git a/fs/ramfs/Makefile b/fs/ramfs/Makefile
index f096f300709..5a0236e02ee 100644
--- a/fs/ramfs/Makefile
+++ b/fs/ramfs/Makefile
@@ -4,4 +4,6 @@
obj-$(CONFIG_RAMFS) += ramfs.o
-ramfs-objs := inode.o
+file-mmu-y := file-nommu.o
+file-mmu-$(CONFIG_MMU) := file-mmu.o
+ramfs-objs += inode.o $(file-mmu-y)
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
new file mode 100644
index 00000000000..2115383dcc8
--- /dev/null
+++ b/fs/ramfs/file-mmu.c
@@ -0,0 +1,57 @@
+/* file-mmu.c: ramfs MMU-based file operations
+ *
+ * Resizable simple ram filesystem for Linux.
+ *
+ * Copyright (C) 2000 Linus Torvalds.
+ * 2000 Transmeta Corp.
+ *
+ * Usage limits added by David Gibson, Linuxcare Australia.
+ * This file is released under the GPL.
+ */
+
+/*
+ * NOTE! This filesystem is probably most useful
+ * not as a real filesystem, but as an example of
+ * how virtual filesystems can be written.
+ *
+ * It doesn't get much simpler than this. Consider
+ * that this file implements the full semantics of
+ * a POSIX-compliant read-write filesystem.
+ *
+ * Note in particular how the filesystem does not
+ * need to implement any data structures of its own
+ * to keep track of the virtual data: using the VFS
+ * caches is sufficient.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/backing-dev.h>
+#include <linux/ramfs.h>
+
+#include <asm/uaccess.h>
+#include "internal.h"
+
+struct address_space_operations ramfs_aops = {
+ .readpage = simple_readpage,
+ .prepare_write = simple_prepare_write,
+ .commit_write = simple_commit_write
+};
+
+struct file_operations ramfs_file_operations = {
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .mmap = generic_file_mmap,
+ .fsync = simple_sync_file,
+ .sendfile = generic_file_sendfile,
+ .llseek = generic_file_llseek,
+};
+
+struct inode_operations ramfs_file_inode_operations = {
+ .getattr = simple_getattr,
+};
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
new file mode 100644
index 00000000000..3f810acd0bf
--- /dev/null
+++ b/fs/ramfs/file-nommu.c
@@ -0,0 +1,292 @@
+/* file-nommu.c: no-MMU version of ramfs
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/smp_lock.h>
+#include <linux/backing-dev.h>
+#include <linux/ramfs.h>
+#include <linux/quotaops.h>
+#include <linux/pagevec.h>
+#include <linux/mman.h>
+
+#include <asm/uaccess.h>
+#include "internal.h"
+
+static int ramfs_nommu_setattr(struct dentry *, struct iattr *);
+
+struct address_space_operations ramfs_aops = {
+ .readpage = simple_readpage,
+ .prepare_write = simple_prepare_write,
+ .commit_write = simple_commit_write
+};
+
+struct file_operations ramfs_file_operations = {
+ .mmap = ramfs_nommu_mmap,
+ .get_unmapped_area = ramfs_nommu_get_unmapped_area,
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .fsync = simple_sync_file,
+ .sendfile = generic_file_sendfile,
+ .llseek = generic_file_llseek,
+};
+
+struct inode_operations ramfs_file_inode_operations = {
+ .setattr = ramfs_nommu_setattr,
+ .getattr = simple_getattr,
+};
+
+/*****************************************************************************/
+/*
+ * add a contiguous set of pages into a ramfs inode when it's truncated from
+ * size 0 on the assumption that it's going to be used for an mmap of shared
+ * memory
+ */
+static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
+{
+ struct pagevec lru_pvec;
+ unsigned long npages, xpages, loop, limit;
+ struct page *pages;
+ unsigned order;
+ void *data;
+ int ret;
+
+ /* make various checks */
+ order = get_order(newsize);
+ if (unlikely(order >= MAX_ORDER))
+ goto too_big;
+
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && newsize > limit)
+ goto fsize_exceeded;
+
+ if (newsize > inode->i_sb->s_maxbytes)
+ goto too_big;
+
+ i_size_write(inode, newsize);
+
+ /* allocate enough contiguous pages to be able to satisfy the
+ * request */
+ pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order);
+ if (!pages)
+ return -ENOMEM;
+
+ /* split the high-order page into an array of single pages */
+ xpages = 1UL << order;
+ npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ for (loop = 0; loop < npages; loop++)
+ set_page_count(pages + loop, 1);
+
+ /* trim off any pages we don't actually require */
+ for (loop = npages; loop < xpages; loop++)
+ __free_page(pages + loop);
+
+ /* clear the memory we allocated */
+ newsize = PAGE_SIZE * npages;
+ data = page_address(pages);
+ memset(data, 0, newsize);
+
+ /* attach all the pages to the inode's address space */
+ pagevec_init(&lru_pvec, 0);
+ for (loop = 0; loop < npages; loop++) {
+ struct page *page = pages + loop;
+
+ ret = add_to_page_cache(page, inode->i_mapping, loop, GFP_KERNEL);
+ if (ret < 0)
+ goto add_error;
+
+ if (!pagevec_add(&lru_pvec, page))
+ __pagevec_lru_add(&lru_pvec);
+
+ unlock_page(page);
+ }
+
+ pagevec_lru_add(&lru_pvec);
+ return 0;
+
+ fsize_exceeded:
+ send_sig(SIGXFSZ, current, 0);
+ too_big:
+ return -EFBIG;
+
+ add_error:
+ page_cache_release(pages + loop);
+ for (loop++; loop < npages; loop++)
+ __free_page(pages + loop);
+ return ret;
+}
+
+/*****************************************************************************/
+/*
+ * check that file shrinkage doesn't leave any VMAs dangling in midair
+ */
+static int ramfs_nommu_check_mappings(struct inode *inode,
+ size_t newsize, size_t size)
+{
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+
+ /* search for VMAs that fall within the dead zone */
+ vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
+ newsize >> PAGE_SHIFT,
+ (size + PAGE_SIZE - 1) >> PAGE_SHIFT
+ ) {
+ /* found one - only interested if it's shared out of the page
+ * cache */
+ if (vma->vm_flags & VM_SHARED)
+ return -ETXTBSY; /* not quite true, but near enough */
+ }
+
+ return 0;
+}
+
+/*****************************************************************************/
+/*
+ *
+ */
+static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
+{
+ int ret;
+
+ /* assume a truncate from zero size is going to be for the purposes of
+ * shared mmap */
+ if (size == 0) {
+ if (unlikely(newsize >> 32))
+ return -EFBIG;
+
+ return ramfs_nommu_expand_for_mapping(inode, newsize);
+ }
+
+ /* check that a decrease in size doesn't cut off any shared mappings */
+ if (newsize < size) {
+ ret = ramfs_nommu_check_mappings(inode, newsize, size);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = vmtruncate(inode, size);
+
+ return ret;
+}
+
+/*****************************************************************************/
+/*
+ * handle a change of attributes
+ * - we're specifically interested in a change of size
+ */
+static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
+{
+ struct inode *inode = dentry->d_inode;
+ unsigned int old_ia_valid = ia->ia_valid;
+ int ret = 0;
+
+ /* by providing our own setattr() method, we skip this quotaism */
+ if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) ||
+ (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid))
+ ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0;
+
+ /* pick out size-changing events */
+ if (ia->ia_valid & ATTR_SIZE) {
+ loff_t size = i_size_read(inode);
+ if (ia->ia_size != size) {
+ ret = ramfs_nommu_resize(inode, ia->ia_size, size);
+ if (ret < 0 || ia->ia_valid == ATTR_SIZE)
+ goto out;
+ } else {
+ /* we skipped the truncate but must still update
+ * timestamps
+ */
+ ia->ia_valid |= ATTR_MTIME|ATTR_CTIME;
+ }
+ }
+
+ ret = inode_setattr(inode, ia);
+ out:
+ ia->ia_valid = old_ia_valid;
+ return ret;
+}
+
+/*****************************************************************************/
+/*
+ * try to determine where a shared mapping can be made
+ * - we require that:
+ * - the pages to be mapped must exist
+ * - the pages be physically contiguous in sequence
+ */
+unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ unsigned long maxpages, lpages, nr, loop, ret;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct page **pages = NULL, **ptr, *page;
+ loff_t isize;
+
+ if (!(flags & MAP_SHARED))
+ return addr;
+
+ /* the mapping mustn't extend beyond the EOF */
+ lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ isize = i_size_read(inode);
+
+ ret = -EINVAL;
+ maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (pgoff >= maxpages)
+ goto out;
+
+ if (maxpages - pgoff < lpages)
+ goto out;
+
+ /* gang-find the pages */
+ ret = -ENOMEM;
+ pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto out;
+
+ nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages);
+ if (nr != lpages)
+ goto out; /* leave if some pages were missing */
+
+ /* check the pages for physical adjacency */
+ ptr = pages;
+ page = *ptr++;
+ page++;
+ for (loop = lpages; loop > 1; loop--)
+ if (*ptr++ != page++)
+ goto out;
+
+ /* okay - all conditions fulfilled */
+ ret = (unsigned long) page_address(pages[0]);
+
+ out:
+ if (pages) {
+ ptr = pages;
+ for (loop = lpages; loop > 0; loop--)
+ put_page(*ptr++);
+ kfree(pages);
+ }
+
+ return ret;
+}
+
+/*****************************************************************************/
+/*
+ * set up a mapping
+ */
+int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return 0;
+}
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 0a88917605a..c66bd5e4c05 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -34,13 +34,12 @@
#include <linux/ramfs.h>
#include <asm/uaccess.h>
+#include "internal.h"
/* some random number */
#define RAMFS_MAGIC 0x858458f6
static struct super_operations ramfs_ops;
-static struct address_space_operations ramfs_aops;
-static struct inode_operations ramfs_file_inode_operations;
static struct inode_operations ramfs_dir_inode_operations;
static struct backing_dev_info ramfs_backing_dev_info = {
@@ -142,25 +141,6 @@ static int ramfs_symlink(struct inode * dir, struct dentry *dentry, const char *
return error;
}
-static struct address_space_operations ramfs_aops = {
- .readpage = simple_readpage,
- .prepare_write = simple_prepare_write,
- .commit_write = simple_commit_write
-};
-
-struct file_operations ramfs_file_operations = {
- .read = generic_file_read,
- .write = generic_file_write,
- .mmap = generic_file_mmap,
- .fsync = simple_sync_file,
- .sendfile = generic_file_sendfile,
- .llseek = generic_file_llseek,
-};
-
-static struct inode_operations ramfs_file_inode_operations = {
- .getattr = simple_getattr,
-};
-
static struct inode_operations ramfs_dir_inode_operations = {
.create = ramfs_create,
.lookup = simple_lookup,
diff --git a/fs/ramfs/internal.h b/fs/ramfs/internal.h
new file mode 100644
index 00000000000..272c8a7120b
--- /dev/null
+++ b/fs/ramfs/internal.h
@@ -0,0 +1,15 @@
+/* internal.h: ramfs internal definitions
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+extern struct address_space_operations ramfs_aops;
+extern struct file_operations ramfs_file_operations;
+extern struct inode_operations ramfs_file_inode_operations;
diff --git a/fs/read_write.c b/fs/read_write.c
index df3468a22fe..3f7a1a62165 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -33,7 +33,7 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
long long retval;
struct inode *inode = file->f_mapping->host;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
switch (origin) {
case 2:
offset += inode->i_size;
@@ -49,7 +49,7 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
}
retval = offset;
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return retval;
}
diff --git a/fs/readdir.c b/fs/readdir.c
index b03579bc021..b6109329b60 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -30,13 +30,13 @@ int vfs_readdir(struct file *file, filldir_t filler, void *buf)
if (res)
goto out;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
res = -ENOENT;
if (!IS_DEADDIR(inode)) {
res = file->f_op->readdir(file, buf, filler);
file_accessed(file);
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out:
return res;
}
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 7892a865b58..ad6fa964b0e 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -49,7 +49,7 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
}
reiserfs_write_lock(inode->i_sb);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
/* freeing preallocation only involves relogging blocks that
* are already in the current transaction. preallocation gets
* freed at the end of each transaction, so it is impossible for
@@ -100,7 +100,7 @@ static int reiserfs_file_release(struct inode *inode, struct file *filp)
err = reiserfs_truncate_file(inode, 0);
}
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
reiserfs_write_unlock(inode->i_sb);
return err;
}
@@ -1342,7 +1342,7 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
if (unlikely(!access_ok(VERIFY_READ, buf, count)))
return -EFAULT;
- down(&inode->i_sem); // locks the entire file for just us
+ mutex_lock(&inode->i_mutex); // locks the entire file for just us
pos = *ppos;
@@ -1360,7 +1360,7 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
if (res)
goto out;
- inode_update_time(inode, 1); /* Both mtime and ctime */
+ file_update_time(file);
// Ok, we are done with all the checks.
@@ -1532,12 +1532,12 @@ static ssize_t reiserfs_file_write(struct file *file, /* the file we are going t
generic_osync_inode(inode, file->f_mapping,
OSYNC_METADATA | OSYNC_DATA);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
reiserfs_async_progress_wait(inode->i_sb);
return (already_written != 0) ? already_written : res;
out:
- up(&inode->i_sem); // unlock the file on exit.
+ mutex_unlock(&inode->i_mutex); // unlock the file on exit.
return res;
}
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a5e3a0ddbe5..ffa34b861bd 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -40,12 +40,12 @@ void reiserfs_delete_inode(struct inode *inode)
/* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
reiserfs_delete_xattrs(inode);
if (journal_begin(&th, inode->i_sb, jbegin_count)) {
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
goto out;
}
reiserfs_update_inode_transaction(inode);
@@ -59,11 +59,11 @@ void reiserfs_delete_inode(struct inode *inode)
DQUOT_FREE_INODE(inode);
if (journal_end(&th, inode->i_sb, jbegin_count)) {
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
goto out;
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
/* check return value from reiserfs_delete_object after
* ending the transaction
@@ -551,7 +551,7 @@ static int convert_tail_for_hole(struct inode *inode,
/* we don't have to make sure the conversion did not happen while
** we were locking the page because anyone that could convert
- ** must first take i_sem.
+ ** must first take i_mutex.
**
** We must fix the tail page for writing because it might have buffers
** that are mapped, but have a block number of 0. This indicates tail
@@ -586,7 +586,7 @@ static inline int _allocate_block(struct reiserfs_transaction_handle *th,
BUG_ON(!th->t_trans_id);
#ifdef REISERFS_PREALLOCATE
- if (!(flags & GET_BLOCK_NO_ISEM)) {
+ if (!(flags & GET_BLOCK_NO_IMUX)) {
return reiserfs_new_unf_blocknrs2(th, inode, allocated_block_nr,
path, block);
}
@@ -2318,7 +2318,7 @@ static int map_block_for_writepage(struct inode *inode,
/* this is where we fill in holes in the file. */
if (use_get_block) {
retval = reiserfs_get_block(inode, block, bh_result,
- GET_BLOCK_CREATE | GET_BLOCK_NO_ISEM
+ GET_BLOCK_CREATE | GET_BLOCK_NO_IMUX
| GET_BLOCK_NO_DANGLE);
if (!retval) {
if (!buffer_mapped(bh_result)
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 81fc00285f6..745c8810089 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -2,6 +2,7 @@
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/reiserfs_fs.h>
#include <linux/time.h>
@@ -120,7 +121,7 @@ static int reiserfs_unpack(struct inode *inode, struct file *filp)
/* we need to make sure nobody is changing the file size beneath
** us
*/
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
write_from = inode->i_size & (blocksize - 1);
/* if we are on a block boundary, we are already unpacked. */
@@ -156,7 +157,7 @@ static int reiserfs_unpack(struct inode *inode, struct file *filp)
page_cache_release(page);
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
reiserfs_write_unlock(inode->i_sb);
return retval;
}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 3f17ef844fb..4491fcf2a0e 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -3925,10 +3925,13 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
flush = 1;
}
#ifdef REISERFS_PREALLOCATE
- /* quota ops might need to nest, setup the journal_info pointer for them */
+ /* quota ops might need to nest, setup the journal_info pointer for them
+ * and raise the refcount so that it is > 0. */
current->journal_info = th;
+ th->t_refcount++;
reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
* the transaction */
+ th->t_refcount--;
current->journal_info = th->t_handle_save;
#endif
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 42afb5bef11..397d9590c8f 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2211,7 +2211,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
size_t towrite = len;
struct buffer_head tmp_bh, *bh;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -2250,7 +2250,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index c92e124f628..196e971c03c 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -205,7 +205,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, struct inode *p_s_in
1) * p_s_sb->s_blocksize;
pos1 = pos;
- // we are protected by i_sem. The tail can not disapper, not
+ // we are protected by i_mutex. The tail can not disapper, not
// append can be done either
// we are in truncate or packing tail in file_release
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 02091eaac0b..cc061bfd437 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -30,6 +30,7 @@
*/
#include <linux/reiserfs_fs.h>
+#include <linux/capability.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/errno.h>
@@ -67,11 +68,11 @@ static struct dentry *create_xa_root(struct super_block *sb)
goto out;
} else if (!xaroot->d_inode) {
int err;
- down(&privroot->d_inode->i_sem);
+ mutex_lock(&privroot->d_inode->i_mutex);
err =
privroot->d_inode->i_op->mkdir(privroot->d_inode, xaroot,
0700);
- up(&privroot->d_inode->i_sem);
+ mutex_unlock(&privroot->d_inode->i_mutex);
if (err) {
dput(xaroot);
@@ -219,7 +220,7 @@ static struct dentry *get_xa_file_dentry(const struct inode *inode,
} else if (flags & XATTR_REPLACE || flags & FL_READONLY) {
goto out;
} else {
- /* inode->i_sem is down, so nothing else can try to create
+ /* inode->i_mutex is down, so nothing else can try to create
* the same xattr */
err = xadir->d_inode->i_op->create(xadir->d_inode, xafile,
0700 | S_IFREG, NULL);
@@ -268,7 +269,7 @@ static struct file *open_xa_file(const struct inode *inode, const char *name,
* and don't mess with f->f_pos, but the idea is the same. Do some
* action on each and every entry in the directory.
*
- * we're called with i_sem held, so there are no worries about the directory
+ * we're called with i_mutex held, so there are no worries about the directory
* changing underneath us.
*/
static int __xattr_readdir(struct file *filp, void *dirent, filldir_t filldir)
@@ -426,7 +427,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf)
int res = -ENOTDIR;
if (!file->f_op || !file->f_op->readdir)
goto out;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
// down(&inode->i_zombie);
res = -ENOENT;
if (!IS_DEADDIR(inode)) {
@@ -435,7 +436,7 @@ int xattr_readdir(struct file *file, filldir_t filler, void *buf)
unlock_kernel();
}
// up(&inode->i_zombie);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out:
return res;
}
@@ -480,7 +481,7 @@ static inline __u32 xattr_hash(const char *msg, int len)
/* Generic extended attribute operations that can be used by xa plugins */
/*
- * inode->i_sem: down
+ * inode->i_mutex: down
*/
int
reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
@@ -497,12 +498,6 @@ reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
struct iattr newattrs;
__u32 xahash = 0;
- if (IS_RDONLY(inode))
- return -EROFS;
-
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return -EPERM;
-
if (get_inode_sd_version(inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
@@ -535,7 +530,7 @@ reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
/* Resize it so we're ok to write there */
newattrs.ia_size = buffer_size;
newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
- down(&xinode->i_sem);
+ mutex_lock(&xinode->i_mutex);
err = notify_change(fp->f_dentry, &newattrs);
if (err)
goto out_filp;
@@ -598,7 +593,7 @@ reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
}
out_filp:
- up(&xinode->i_sem);
+ mutex_unlock(&xinode->i_mutex);
fput(fp);
out:
@@ -606,7 +601,7 @@ reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
}
/*
- * inode->i_sem: down
+ * inode->i_mutex: down
*/
int
reiserfs_xattr_get(const struct inode *inode, const char *name, void *buffer,
@@ -758,9 +753,6 @@ int reiserfs_xattr_del(struct inode *inode, const char *name)
struct dentry *dir;
int err;
- if (IS_RDONLY(inode))
- return -EROFS;
-
dir = open_xa_dir(inode, FL_READONLY);
if (IS_ERR(dir)) {
err = PTR_ERR(dir);
@@ -793,7 +785,7 @@ reiserfs_delete_xattrs_filler(void *buf, const char *name, int namelen,
}
-/* This is called w/ inode->i_sem downed */
+/* This is called w/ inode->i_mutex downed */
int reiserfs_delete_xattrs(struct inode *inode)
{
struct file *fp;
@@ -946,7 +938,7 @@ int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs)
/*
* Inode operation getxattr()
- * Preliminary locking: we down dentry->d_inode->i_sem
+ * Preliminary locking: we down dentry->d_inode->i_mutex
*/
ssize_t
reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
@@ -970,7 +962,7 @@ reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
/*
* Inode operation setxattr()
*
- * dentry->d_inode->i_sem down
+ * dentry->d_inode->i_mutex down
*/
int
reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
@@ -984,12 +976,6 @@ reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
- if (IS_RDONLY(dentry->d_inode))
- return -EROFS;
-
- if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode))
- return -EROFS;
-
reiserfs_write_lock_xattr_i(dentry->d_inode);
lock = !has_xattr_dir(dentry->d_inode);
if (lock)
@@ -1008,7 +994,7 @@ reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
/*
* Inode operation removexattr()
*
- * dentry->d_inode->i_sem down
+ * dentry->d_inode->i_mutex down
*/
int reiserfs_removexattr(struct dentry *dentry, const char *name)
{
@@ -1019,12 +1005,6 @@ int reiserfs_removexattr(struct dentry *dentry, const char *name)
get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
- if (IS_RDONLY(dentry->d_inode))
- return -EROFS;
-
- if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode))
- return -EPERM;
-
reiserfs_write_lock_xattr_i(dentry->d_inode);
reiserfs_read_lock_xattrs(dentry->d_sb);
@@ -1091,7 +1071,7 @@ reiserfs_listxattr_filler(void *buf, const char *name, int namelen,
/*
* Inode operation listxattr()
*
- * Preliminary locking: we down dentry->d_inode->i_sem
+ * Preliminary locking: we down dentry->d_inode->i_mutex
*/
ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
{
@@ -1289,9 +1269,9 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
if (!IS_ERR(dentry)) {
if (!(mount_flags & MS_RDONLY) && !dentry->d_inode) {
struct inode *inode = dentry->d_parent->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
err = inode->i_op->mkdir(inode, dentry, 0700);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (err) {
dput(dentry);
dentry = NULL;
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index a47ac9aac8b..43de3ba8333 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -1,3 +1,4 @@
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/posix_acl.h>
#include <linux/reiserfs_fs.h>
@@ -174,7 +175,7 @@ static void *posix_acl_to_disk(const struct posix_acl *acl, size_t * size)
/*
* Inode operation get_posix_acl().
*
- * inode->i_sem: down
+ * inode->i_mutex: down
* BKL held [before 2.5.x]
*/
struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
@@ -237,7 +238,7 @@ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
/*
* Inode operation set_posix_acl().
*
- * inode->i_sem: down
+ * inode->i_mutex: down
* BKL held [before 2.5.x]
*/
static int
@@ -312,7 +313,7 @@ reiserfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
return error;
}
-/* dir->i_sem: down,
+/* dir->i_mutex: locked,
* inode is new and not released into the wild yet */
int
reiserfs_inherit_default_acl(struct inode *dir, struct dentry *dentry,
diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c
index 2501f7e66ab..024a938ca60 100644
--- a/fs/reiserfs/xattr_trusted.c
+++ b/fs/reiserfs/xattr_trusted.c
@@ -1,4 +1,5 @@
#include <linux/reiserfs_fs.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c
index 51458048ca6..073f39364b1 100644
--- a/fs/reiserfs/xattr_user.c
+++ b/fs/reiserfs/xattr_user.c
@@ -16,18 +16,10 @@ static int
user_get(struct inode *inode, const char *name, void *buffer, size_t size)
{
- int error;
-
if (strlen(name) < sizeof(XATTR_USER_PREFIX))
return -EINVAL;
-
if (!reiserfs_xattrs_user(inode->i_sb))
return -EOPNOTSUPP;
-
- error = reiserfs_permission_locked(inode, MAY_READ, NULL);
- if (error)
- return error;
-
return reiserfs_xattr_get(inode, name, buffer, size);
}
@@ -36,43 +28,21 @@ user_set(struct inode *inode, const char *name, const void *buffer,
size_t size, int flags)
{
- int error;
-
if (strlen(name) < sizeof(XATTR_USER_PREFIX))
return -EINVAL;
if (!reiserfs_xattrs_user(inode->i_sb))
return -EOPNOTSUPP;
-
- if (!S_ISREG(inode->i_mode) &&
- (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
- return -EPERM;
-
- error = reiserfs_permission_locked(inode, MAY_WRITE, NULL);
- if (error)
- return error;
-
return reiserfs_xattr_set(inode, name, buffer, size, flags);
}
static int user_del(struct inode *inode, const char *name)
{
- int error;
-
if (strlen(name) < sizeof(XATTR_USER_PREFIX))
return -EINVAL;
if (!reiserfs_xattrs_user(inode->i_sb))
return -EOPNOTSUPP;
-
- if (!S_ISREG(inode->i_mode) &&
- (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
- return -EPERM;
-
- error = reiserfs_permission_locked(inode, MAY_WRITE, NULL);
- if (error)
- return error;
-
return 0;
}
diff --git a/fs/relayfs/buffers.c b/fs/relayfs/buffers.c
index 84e21ffa5ca..10187812771 100644
--- a/fs/relayfs/buffers.c
+++ b/fs/relayfs/buffers.c
@@ -185,5 +185,6 @@ void relay_destroy_buf(struct rchan_buf *buf)
void relay_remove_buf(struct kref *kref)
{
struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
- relayfs_remove(buf->dentry);
+ buf->chan->cb->remove_buf_file(buf->dentry);
+ relay_destroy_buf(buf);
}
diff --git a/fs/relayfs/inode.c b/fs/relayfs/inode.c
index 0f7f88d067a..383523011aa 100644
--- a/fs/relayfs/inode.c
+++ b/fs/relayfs/inode.c
@@ -26,31 +26,22 @@
static struct vfsmount * relayfs_mount;
static int relayfs_mount_count;
-static kmem_cache_t * relayfs_inode_cachep;
static struct backing_dev_info relayfs_backing_dev_info = {
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
};
-static struct inode *relayfs_get_inode(struct super_block *sb, int mode,
- struct rchan *chan)
+static struct inode *relayfs_get_inode(struct super_block *sb,
+ int mode,
+ struct file_operations *fops,
+ void *data)
{
- struct rchan_buf *buf = NULL;
struct inode *inode;
- if (S_ISREG(mode)) {
- BUG_ON(!chan);
- buf = relay_create_buf(chan);
- if (!buf)
- return NULL;
- }
-
inode = new_inode(sb);
- if (!inode) {
- relay_destroy_buf(buf);
+ if (!inode)
return NULL;
- }
inode->i_mode = mode;
inode->i_uid = 0;
@@ -61,8 +52,9 @@ static struct inode *relayfs_get_inode(struct super_block *sb, int mode,
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch (mode & S_IFMT) {
case S_IFREG:
- inode->i_fop = &relayfs_file_operations;
- RELAYFS_I(inode)->buf = buf;
+ inode->i_fop = fops;
+ if (data)
+ inode->u.generic_ip = data;
break;
case S_IFDIR:
inode->i_op = &simple_dir_inode_operations;
@@ -83,7 +75,8 @@ static struct inode *relayfs_get_inode(struct super_block *sb, int mode,
* @name: the name of the file to create
* @parent: parent directory
* @mode: mode
- * @chan: relay channel associated with the file
+ * @fops: file operations to use for the file
+ * @data: user-associated data for this file
*
* Returns the new dentry, NULL on failure
*
@@ -92,7 +85,8 @@ static struct inode *relayfs_get_inode(struct super_block *sb, int mode,
static struct dentry *relayfs_create_entry(const char *name,
struct dentry *parent,
int mode,
- struct rchan *chan)
+ struct file_operations *fops,
+ void *data)
{
struct dentry *d;
struct inode *inode;
@@ -115,7 +109,7 @@ static struct dentry *relayfs_create_entry(const char *name,
}
parent = dget(parent);
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
d = lookup_one_len(name, parent, strlen(name));
if (IS_ERR(d)) {
d = NULL;
@@ -127,7 +121,7 @@ static struct dentry *relayfs_create_entry(const char *name,
goto release_mount;
}
- inode = relayfs_get_inode(parent->d_inode->i_sb, mode, chan);
+ inode = relayfs_get_inode(parent->d_inode->i_sb, mode, fops, data);
if (!inode) {
d = NULL;
goto release_mount;
@@ -145,7 +139,7 @@ release_mount:
simple_release_fs(&relayfs_mount, &relayfs_mount_count);
exit:
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
dput(parent);
return d;
}
@@ -155,20 +149,26 @@ exit:
* @name: the name of the file to create
* @parent: parent directory
* @mode: mode, if not specied the default perms are used
- * @chan: channel associated with the file
+ * @fops: file operations to use for the file
+ * @data: user-associated data for this file
*
* Returns file dentry if successful, NULL otherwise.
*
* The file will be created user r on behalf of current user.
*/
-struct dentry *relayfs_create_file(const char *name, struct dentry *parent,
- int mode, struct rchan *chan)
+struct dentry *relayfs_create_file(const char *name,
+ struct dentry *parent,
+ int mode,
+ struct file_operations *fops,
+ void *data)
{
+ BUG_ON(!fops);
+
if (!mode)
mode = S_IRUSR;
mode = (mode & S_IALLUGO) | S_IFREG;
- return relayfs_create_entry(name, parent, mode, chan);
+ return relayfs_create_entry(name, parent, mode, fops, data);
}
/**
@@ -183,7 +183,7 @@ struct dentry *relayfs_create_file(const char *name, struct dentry *parent,
struct dentry *relayfs_create_dir(const char *name, struct dentry *parent)
{
int mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
- return relayfs_create_entry(name, parent, mode, NULL);
+ return relayfs_create_entry(name, parent, mode, NULL, NULL);
}
/**
@@ -204,7 +204,7 @@ int relayfs_remove(struct dentry *dentry)
return -EINVAL;
parent = dget(parent);
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
if (dentry->d_inode) {
if (S_ISDIR(dentry->d_inode->i_mode))
error = simple_rmdir(parent->d_inode, dentry);
@@ -215,7 +215,7 @@ int relayfs_remove(struct dentry *dentry)
}
if (!error)
dput(dentry);
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
dput(parent);
if (!error)
@@ -225,6 +225,17 @@ int relayfs_remove(struct dentry *dentry)
}
/**
+ * relayfs_remove_file - remove a file from relay filesystem
+ * @dentry: directory dentry
+ *
+ * Returns 0 if successful, negative otherwise.
+ */
+int relayfs_remove_file(struct dentry *dentry)
+{
+ return relayfs_remove(dentry);
+}
+
+/**
* relayfs_remove_dir - remove a directory in the relay filesystem
* @dentry: directory dentry
*
@@ -236,45 +247,45 @@ int relayfs_remove_dir(struct dentry *dentry)
}
/**
- * relayfs_open - open file op for relayfs files
+ * relay_file_open - open file op for relay files
* @inode: the inode
* @filp: the file
*
* Increments the channel buffer refcount.
*/
-static int relayfs_open(struct inode *inode, struct file *filp)
+static int relay_file_open(struct inode *inode, struct file *filp)
{
- struct rchan_buf *buf = RELAYFS_I(inode)->buf;
+ struct rchan_buf *buf = inode->u.generic_ip;
kref_get(&buf->kref);
+ filp->private_data = buf;
return 0;
}
/**
- * relayfs_mmap - mmap file op for relayfs files
+ * relay_file_mmap - mmap file op for relay files
* @filp: the file
* @vma: the vma describing what to map
*
* Calls upon relay_mmap_buf to map the file into user space.
*/
-static int relayfs_mmap(struct file *filp, struct vm_area_struct *vma)
+static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct inode *inode = filp->f_dentry->d_inode;
- return relay_mmap_buf(RELAYFS_I(inode)->buf, vma);
+ struct rchan_buf *buf = filp->private_data;
+ return relay_mmap_buf(buf, vma);
}
/**
- * relayfs_poll - poll file op for relayfs files
+ * relay_file_poll - poll file op for relay files
* @filp: the file
* @wait: poll table
*
* Poll implemention.
*/
-static unsigned int relayfs_poll(struct file *filp, poll_table *wait)
+static unsigned int relay_file_poll(struct file *filp, poll_table *wait)
{
unsigned int mask = 0;
- struct inode *inode = filp->f_dentry->d_inode;
- struct rchan_buf *buf = RELAYFS_I(inode)->buf;
+ struct rchan_buf *buf = filp->private_data;
if (buf->finalized)
return POLLERR;
@@ -289,27 +300,27 @@ static unsigned int relayfs_poll(struct file *filp, poll_table *wait)
}
/**
- * relayfs_release - release file op for relayfs files
+ * relay_file_release - release file op for relay files
* @inode: the inode
* @filp: the file
*
* Decrements the channel refcount, as the filesystem is
* no longer using it.
*/
-static int relayfs_release(struct inode *inode, struct file *filp)
+static int relay_file_release(struct inode *inode, struct file *filp)
{
- struct rchan_buf *buf = RELAYFS_I(inode)->buf;
+ struct rchan_buf *buf = filp->private_data;
kref_put(&buf->kref, relay_remove_buf);
return 0;
}
/**
- * relayfs_read_consume - update the consumed count for the buffer
+ * relay_file_read_consume - update the consumed count for the buffer
*/
-static void relayfs_read_consume(struct rchan_buf *buf,
- size_t read_pos,
- size_t bytes_consumed)
+static void relay_file_read_consume(struct rchan_buf *buf,
+ size_t read_pos,
+ size_t bytes_consumed)
{
size_t subbuf_size = buf->chan->subbuf_size;
size_t n_subbufs = buf->chan->n_subbufs;
@@ -332,9 +343,9 @@ static void relayfs_read_consume(struct rchan_buf *buf,
}
/**
- * relayfs_read_avail - boolean, are there unconsumed bytes available?
+ * relay_file_read_avail - boolean, are there unconsumed bytes available?
*/
-static int relayfs_read_avail(struct rchan_buf *buf, size_t read_pos)
+static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
{
size_t bytes_produced, bytes_consumed, write_offset;
size_t subbuf_size = buf->chan->subbuf_size;
@@ -365,16 +376,16 @@ static int relayfs_read_avail(struct rchan_buf *buf, size_t read_pos)
if (bytes_produced == bytes_consumed)
return 0;
- relayfs_read_consume(buf, read_pos, 0);
+ relay_file_read_consume(buf, read_pos, 0);
return 1;
}
/**
- * relayfs_read_subbuf_avail - return bytes available in sub-buffer
+ * relay_file_read_subbuf_avail - return bytes available in sub-buffer
*/
-static size_t relayfs_read_subbuf_avail(size_t read_pos,
- struct rchan_buf *buf)
+static size_t relay_file_read_subbuf_avail(size_t read_pos,
+ struct rchan_buf *buf)
{
size_t padding, avail = 0;
size_t read_subbuf, read_offset, write_subbuf, write_offset;
@@ -396,14 +407,14 @@ static size_t relayfs_read_subbuf_avail(size_t read_pos,
}
/**
- * relayfs_read_start_pos - find the first available byte to read
+ * relay_file_read_start_pos - find the first available byte to read
*
* If the read_pos is in the middle of padding, return the
* position of the first actually available byte, otherwise
* return the original value.
*/
-static size_t relayfs_read_start_pos(size_t read_pos,
- struct rchan_buf *buf)
+static size_t relay_file_read_start_pos(size_t read_pos,
+ struct rchan_buf *buf)
{
size_t read_subbuf, padding, padding_start, padding_end;
size_t subbuf_size = buf->chan->subbuf_size;
@@ -422,11 +433,11 @@ static size_t relayfs_read_start_pos(size_t read_pos,
}
/**
- * relayfs_read_end_pos - return the new read position
+ * relay_file_read_end_pos - return the new read position
*/
-static size_t relayfs_read_end_pos(struct rchan_buf *buf,
- size_t read_pos,
- size_t count)
+static size_t relay_file_read_end_pos(struct rchan_buf *buf,
+ size_t read_pos,
+ size_t count)
{
size_t read_subbuf, padding, end_pos;
size_t subbuf_size = buf->chan->subbuf_size;
@@ -445,7 +456,7 @@ static size_t relayfs_read_end_pos(struct rchan_buf *buf,
}
/**
- * relayfs_read - read file op for relayfs files
+ * relay_file_read - read file op for relay files
* @filp: the file
* @buffer: the userspace buffer
* @count: number of bytes to read
@@ -454,23 +465,23 @@ static size_t relayfs_read_end_pos(struct rchan_buf *buf,
* Reads count bytes or the number of bytes available in the
* current sub-buffer being read, whichever is smaller.
*/
-static ssize_t relayfs_read(struct file *filp,
- char __user *buffer,
- size_t count,
- loff_t *ppos)
+static ssize_t relay_file_read(struct file *filp,
+ char __user *buffer,
+ size_t count,
+ loff_t *ppos)
{
+ struct rchan_buf *buf = filp->private_data;
struct inode *inode = filp->f_dentry->d_inode;
- struct rchan_buf *buf = RELAYFS_I(inode)->buf;
size_t read_start, avail;
ssize_t ret = 0;
void *from;
- down(&inode->i_sem);
- if(!relayfs_read_avail(buf, *ppos))
+ mutex_lock(&inode->i_mutex);
+ if(!relay_file_read_avail(buf, *ppos))
goto out;
- read_start = relayfs_read_start_pos(*ppos, buf);
- avail = relayfs_read_subbuf_avail(read_start, buf);
+ read_start = relay_file_read_start_pos(*ppos, buf);
+ avail = relay_file_read_subbuf_avail(read_start, buf);
if (!avail)
goto out;
@@ -480,58 +491,25 @@ static ssize_t relayfs_read(struct file *filp,
ret = -EFAULT;
goto out;
}
- relayfs_read_consume(buf, read_start, count);
- *ppos = relayfs_read_end_pos(buf, read_start, count);
+ relay_file_read_consume(buf, read_start, count);
+ *ppos = relay_file_read_end_pos(buf, read_start, count);
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return ret;
}
-/**
- * relayfs alloc_inode() implementation
- */
-static struct inode *relayfs_alloc_inode(struct super_block *sb)
-{
- struct relayfs_inode_info *p = kmem_cache_alloc(relayfs_inode_cachep, SLAB_KERNEL);
- if (!p)
- return NULL;
- p->buf = NULL;
-
- return &p->vfs_inode;
-}
-
-/**
- * relayfs destroy_inode() implementation
- */
-static void relayfs_destroy_inode(struct inode *inode)
-{
- if (RELAYFS_I(inode)->buf)
- relay_destroy_buf(RELAYFS_I(inode)->buf);
-
- kmem_cache_free(relayfs_inode_cachep, RELAYFS_I(inode));
-}
-
-static void init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
-{
- struct relayfs_inode_info *i = p;
- if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR)
- inode_init_once(&i->vfs_inode);
-}
-
-struct file_operations relayfs_file_operations = {
- .open = relayfs_open,
- .poll = relayfs_poll,
- .mmap = relayfs_mmap,
- .read = relayfs_read,
+struct file_operations relay_file_operations = {
+ .open = relay_file_open,
+ .poll = relay_file_poll,
+ .mmap = relay_file_mmap,
+ .read = relay_file_read,
.llseek = no_llseek,
- .release = relayfs_release,
+ .release = relay_file_release,
};
static struct super_operations relayfs_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
- .alloc_inode = relayfs_alloc_inode,
- .destroy_inode = relayfs_destroy_inode,
};
static int relayfs_fill_super(struct super_block * sb, void * data, int silent)
@@ -544,7 +522,7 @@ static int relayfs_fill_super(struct super_block * sb, void * data, int silent)
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = RELAYFS_MAGIC;
sb->s_op = &relayfs_ops;
- inode = relayfs_get_inode(sb, mode, NULL);
+ inode = relayfs_get_inode(sb, mode, NULL, NULL);
if (!inode)
return -ENOMEM;
@@ -575,33 +553,27 @@ static struct file_system_type relayfs_fs_type = {
static int __init init_relayfs_fs(void)
{
- int err;
-
- relayfs_inode_cachep = kmem_cache_create("relayfs_inode_cache",
- sizeof(struct relayfs_inode_info), 0,
- 0, init_once, NULL);
- if (!relayfs_inode_cachep)
- return -ENOMEM;
-
- err = register_filesystem(&relayfs_fs_type);
- if (err)
- kmem_cache_destroy(relayfs_inode_cachep);
-
- return err;
+ return register_filesystem(&relayfs_fs_type);
}
static void __exit exit_relayfs_fs(void)
{
+
+
+
+
+
unregister_filesystem(&relayfs_fs_type);
- kmem_cache_destroy(relayfs_inode_cachep);
}
module_init(init_relayfs_fs)
module_exit(exit_relayfs_fs)
-EXPORT_SYMBOL_GPL(relayfs_file_operations);
+EXPORT_SYMBOL_GPL(relay_file_operations);
EXPORT_SYMBOL_GPL(relayfs_create_dir);
EXPORT_SYMBOL_GPL(relayfs_remove_dir);
+EXPORT_SYMBOL_GPL(relayfs_create_file);
+EXPORT_SYMBOL_GPL(relayfs_remove_file);
MODULE_AUTHOR("Tom Zanussi <zanussi@us.ibm.com> and Karim Yaghmour <karim@opersys.com>");
MODULE_DESCRIPTION("Relay Filesystem");
diff --git a/fs/relayfs/relay.c b/fs/relayfs/relay.c
index 2a6f7f12b7f..abf3ceaace4 100644
--- a/fs/relayfs/relay.c
+++ b/fs/relayfs/relay.c
@@ -80,11 +80,34 @@ static void buf_unmapped_default_callback(struct rchan_buf *buf,
{
}
+/*
+ * create_buf_file_create() default callback. Creates file to represent buf.
+ */
+static struct dentry *create_buf_file_default_callback(const char *filename,
+ struct dentry *parent,
+ int mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ return relayfs_create_file(filename, parent, mode,
+ &relay_file_operations, buf);
+}
+
+/*
+ * remove_buf_file() default callback. Removes file representing relay buffer.
+ */
+static int remove_buf_file_default_callback(struct dentry *dentry)
+{
+ return relayfs_remove(dentry);
+}
+
/* relay channel default callbacks */
static struct rchan_callbacks default_channel_callbacks = {
.subbuf_start = subbuf_start_default_callback,
.buf_mapped = buf_mapped_default_callback,
.buf_unmapped = buf_unmapped_default_callback,
+ .create_buf_file = create_buf_file_default_callback,
+ .remove_buf_file = remove_buf_file_default_callback,
};
/**
@@ -148,14 +171,16 @@ static inline void __relay_reset(struct rchan_buf *buf, unsigned int init)
void relay_reset(struct rchan *chan)
{
unsigned int i;
+ struct rchan_buf *prev = NULL;
if (!chan)
return;
for (i = 0; i < NR_CPUS; i++) {
- if (!chan->buf[i])
- continue;
+ if (!chan->buf[i] || chan->buf[i] == prev)
+ break;
__relay_reset(chan->buf[i], 0);
+ prev = chan->buf[i];
}
}
@@ -166,17 +191,27 @@ void relay_reset(struct rchan *chan)
*/
static struct rchan_buf *relay_open_buf(struct rchan *chan,
const char *filename,
- struct dentry *parent)
+ struct dentry *parent,
+ int *is_global)
{
struct rchan_buf *buf;
struct dentry *dentry;
+ if (*is_global)
+ return chan->buf[0];
+
+ buf = relay_create_buf(chan);
+ if (!buf)
+ return NULL;
+
/* Create file in fs */
- dentry = relayfs_create_file(filename, parent, S_IRUSR, chan);
- if (!dentry)
+ dentry = chan->cb->create_buf_file(filename, parent, S_IRUSR,
+ buf, is_global);
+ if (!dentry) {
+ relay_destroy_buf(buf);
return NULL;
+ }
- buf = RELAYFS_I(dentry->d_inode)->buf;
buf->dentry = dentry;
__relay_reset(buf, 1);
@@ -214,6 +249,10 @@ static inline void setup_callbacks(struct rchan *chan,
cb->buf_mapped = buf_mapped_default_callback;
if (!cb->buf_unmapped)
cb->buf_unmapped = buf_unmapped_default_callback;
+ if (!cb->create_buf_file)
+ cb->create_buf_file = create_buf_file_default_callback;
+ if (!cb->remove_buf_file)
+ cb->remove_buf_file = remove_buf_file_default_callback;
chan->cb = cb;
}
@@ -241,6 +280,7 @@ struct rchan *relay_open(const char *base_filename,
unsigned int i;
struct rchan *chan;
char *tmpname;
+ int is_global = 0;
if (!base_filename)
return NULL;
@@ -265,7 +305,8 @@ struct rchan *relay_open(const char *base_filename,
for_each_online_cpu(i) {
sprintf(tmpname, "%s%d", base_filename, i);
- chan->buf[i] = relay_open_buf(chan, tmpname, parent);
+ chan->buf[i] = relay_open_buf(chan, tmpname, parent,
+ &is_global);
chan->buf[i]->cpu = i;
if (!chan->buf[i])
goto free_bufs;
@@ -279,6 +320,8 @@ free_bufs:
if (!chan->buf[i])
break;
relay_close_buf(chan->buf[i]);
+ if (is_global)
+ break;
}
kfree(tmpname);
@@ -388,14 +431,16 @@ void relay_destroy_channel(struct kref *kref)
void relay_close(struct rchan *chan)
{
unsigned int i;
+ struct rchan_buf *prev = NULL;
if (!chan)
return;
for (i = 0; i < NR_CPUS; i++) {
- if (!chan->buf[i])
- continue;
+ if (!chan->buf[i] || chan->buf[i] == prev)
+ break;
relay_close_buf(chan->buf[i]);
+ prev = chan->buf[i];
}
if (chan->last_toobig)
@@ -415,14 +460,16 @@ void relay_close(struct rchan *chan)
void relay_flush(struct rchan *chan)
{
unsigned int i;
+ struct rchan_buf *prev = NULL;
if (!chan)
return;
for (i = 0; i < NR_CPUS; i++) {
- if (!chan->buf[i])
- continue;
+ if (!chan->buf[i] || chan->buf[i] == prev)
+ break;
relay_switch_subbuf(chan->buf[i], 0);
+ prev = chan->buf[i];
}
}
diff --git a/fs/relayfs/relay.h b/fs/relayfs/relay.h
index 703503fa22b..0993d3e5753 100644
--- a/fs/relayfs/relay.h
+++ b/fs/relayfs/relay.h
@@ -1,10 +1,6 @@
#ifndef _RELAY_H
#define _RELAY_H
-struct dentry *relayfs_create_file(const char *name,
- struct dentry *parent,
- int mode,
- struct rchan *chan);
extern int relayfs_remove(struct dentry *dentry);
extern int relay_buf_empty(struct rchan_buf *buf);
extern void relay_destroy_channel(struct kref *kref);
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index c74f382dabb..0a13859fd57 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -418,7 +418,7 @@ static int
romfs_readpage(struct file *file, struct page * page)
{
struct inode *inode = page->mapping->host;
- unsigned long offset, avail, readlen;
+ loff_t offset, avail, readlen;
void *buf;
int result = -EIO;
@@ -429,8 +429,8 @@ romfs_readpage(struct file *file, struct page * page)
goto err_out;
/* 32 bit warning -- but not for us :) */
- offset = page->index << PAGE_CACHE_SHIFT;
- if (offset < inode->i_size) {
+ offset = page_offset(page);
+ if (offset < i_size_read(inode)) {
avail = inode->i_size-offset;
readlen = min_t(unsigned long, avail, PAGE_SIZE);
if (romfs_copyfrom(inode, buf, ROMFS_I(inode)->i_dataoffset+offset, readlen) == readlen) {
diff --git a/fs/smbfs/cache.c b/fs/smbfs/cache.c
index f3e6b81288a..74b86d9725a 100644
--- a/fs/smbfs/cache.c
+++ b/fs/smbfs/cache.c
@@ -66,7 +66,7 @@ smb_invalidate_dircache_entries(struct dentry *parent)
spin_lock(&dcache_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
- dentry = list_entry(next, struct dentry, d_child);
+ dentry = list_entry(next, struct dentry, d_u.d_child);
dentry->d_fsdata = NULL;
smb_age_dentry(server, dentry);
next = next->next;
@@ -100,7 +100,7 @@ smb_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos)
spin_lock(&dcache_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
- dent = list_entry(next, struct dentry, d_child);
+ dent = list_entry(next, struct dentry, d_u.d_child);
if ((unsigned long)dent->d_fsdata == fpos) {
if (dent->d_inode)
dget_locked(dent);
diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c
index b4fcfa8b55a..7042e62726a 100644
--- a/fs/smbfs/file.c
+++ b/fs/smbfs/file.c
@@ -209,8 +209,8 @@ smb_updatepage(struct file *file, struct page *page, unsigned long offset,
{
struct dentry *dentry = file->f_dentry;
- DEBUG1("(%s/%s %d@%ld)\n", DENTRY_PATH(dentry),
- count, (page->index << PAGE_CACHE_SHIFT)+offset);
+ DEBUG1("(%s/%s %d@%lld)\n", DENTRY_PATH(dentry), count,
+ ((unsigned long long)page->index << PAGE_CACHE_SHIFT) + offset);
return smb_writepage_sync(dentry->d_inode, page, offset, count);
}
@@ -374,8 +374,7 @@ smb_file_release(struct inode *inode, struct file * file)
/* We must flush any dirty pages now as we won't be able to
write anything after close. mmap can trigger this.
"openers" should perhaps include mmap'ers ... */
- filemap_fdatawrite(inode->i_mapping);
- filemap_fdatawait(inode->i_mapping);
+ filemap_write_and_wait(inode->i_mapping);
smb_close(inode);
}
unlock_kernel();
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 10b994428fe..6ec88bf59b2 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -697,8 +697,7 @@ smb_notify_change(struct dentry *dentry, struct iattr *attr)
DENTRY_PATH(dentry),
(long) inode->i_size, (long) attr->ia_size);
- filemap_fdatawrite(inode->i_mapping);
- filemap_fdatawait(inode->i_mapping);
+ filemap_write_and_wait(inode->i_mapping);
error = smb_open(dentry, O_WRONLY);
if (error)
diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
index 38ab558835c..b1b878b8173 100644
--- a/fs/smbfs/proc.c
+++ b/fs/smbfs/proc.c
@@ -8,6 +8,7 @@
*/
#include <linux/types.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/fs.h>
@@ -3113,7 +3114,7 @@ smb_proc_setattr_unix(struct dentry *d, struct iattr *attr,
LSET(data, 32, SMB_TIME_NO_CHANGE);
LSET(data, 40, SMB_UID_NO_CHANGE);
LSET(data, 48, SMB_GID_NO_CHANGE);
- LSET(data, 56, smb_filetype_from_mode(attr->ia_mode));
+ DSET(data, 56, smb_filetype_from_mode(attr->ia_mode));
LSET(data, 60, major);
LSET(data, 68, minor);
LSET(data, 76, 0);
diff --git a/fs/super.c b/fs/super.c
index 5a347a4f673..c177b92419c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -72,7 +72,7 @@ static struct super_block *alloc_super(void)
INIT_HLIST_HEAD(&s->s_anon);
INIT_LIST_HEAD(&s->s_inodes);
init_rwsem(&s->s_umount);
- sema_init(&s->s_lock, 1);
+ mutex_init(&s->s_lock);
down_write(&s->s_umount);
s->s_count = S_BIAS;
atomic_set(&s->s_active, 1);
@@ -700,8 +700,7 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
s->s_flags = flags;
strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
- s->s_old_blocksize = block_size(bdev);
- sb_set_blocksize(s, s->s_old_blocksize);
+ sb_set_blocksize(s, block_size(bdev));
error = fill_super(s, data, flags & MS_VERBOSE ? 1 : 0);
if (error) {
up_write(&s->s_umount);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index d3678038217..49bd219275d 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -99,7 +99,7 @@ static int create_dir(struct kobject * k, struct dentry * p,
int error;
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
- down(&p->d_inode->i_sem);
+ mutex_lock(&p->d_inode->i_mutex);
*d = lookup_one_len(n, p, strlen(n));
if (!IS_ERR(*d)) {
error = sysfs_make_dirent(p->d_fsdata, *d, k, mode, SYSFS_DIR);
@@ -122,7 +122,7 @@ static int create_dir(struct kobject * k, struct dentry * p,
dput(*d);
} else
error = PTR_ERR(*d);
- up(&p->d_inode->i_sem);
+ mutex_unlock(&p->d_inode->i_mutex);
return error;
}
@@ -246,7 +246,7 @@ static void remove_dir(struct dentry * d)
struct dentry * parent = dget(d->d_parent);
struct sysfs_dirent * sd;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
d_delete(d);
sd = d->d_fsdata;
list_del_init(&sd->s_sibling);
@@ -257,7 +257,7 @@ static void remove_dir(struct dentry * d)
pr_debug(" o %s removing done (%d)\n",d->d_name.name,
atomic_read(&d->d_count));
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
dput(parent);
}
@@ -286,7 +286,7 @@ void sysfs_remove_dir(struct kobject * kobj)
return;
pr_debug("sysfs %s: removing dir\n",dentry->d_name.name);
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
parent_sd = dentry->d_fsdata;
list_for_each_entry_safe(sd, tmp, &parent_sd->s_children, s_sibling) {
if (!sd->s_element || !(sd->s_type & SYSFS_NOT_PINNED))
@@ -295,7 +295,7 @@ void sysfs_remove_dir(struct kobject * kobj)
sysfs_drop_dentry(sd, dentry);
sysfs_put(sd);
}
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
remove_dir(dentry);
/**
@@ -318,7 +318,7 @@ int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
down_write(&sysfs_rename_sem);
parent = kobj->parent->dentry;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
new_dentry = lookup_one_len(new_name, parent, strlen(new_name));
if (!IS_ERR(new_dentry)) {
@@ -334,7 +334,7 @@ int sysfs_rename_dir(struct kobject * kobj, const char *new_name)
error = -EEXIST;
dput(new_dentry);
}
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
up_write(&sysfs_rename_sem);
return error;
@@ -345,9 +345,9 @@ static int sysfs_dir_open(struct inode *inode, struct file *file)
struct dentry * dentry = file->f_dentry;
struct sysfs_dirent * parent_sd = dentry->d_fsdata;
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
file->private_data = sysfs_new_dirent(parent_sd, NULL);
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
return file->private_data ? 0 : -ENOMEM;
@@ -358,9 +358,9 @@ static int sysfs_dir_close(struct inode *inode, struct file *file)
struct dentry * dentry = file->f_dentry;
struct sysfs_dirent * cursor = file->private_data;
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
list_del_init(&cursor->s_sibling);
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
release_sysfs_dirent(cursor);
@@ -436,7 +436,7 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
{
struct dentry * dentry = file->f_dentry;
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
switch (origin) {
case 1:
offset += file->f_pos;
@@ -444,7 +444,7 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
if (offset >= 0)
break;
default:
- up(&file->f_dentry->d_inode->i_sem);
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return -EINVAL;
}
if (offset != file->f_pos) {
@@ -468,7 +468,7 @@ static loff_t sysfs_dir_lseek(struct file * file, loff_t offset, int origin)
list_add_tail(&cursor->s_sibling, p);
}
}
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
return offset;
}
@@ -483,4 +483,3 @@ struct file_operations sysfs_dir_operations = {
EXPORT_SYMBOL_GPL(sysfs_create_dir);
EXPORT_SYMBOL_GPL(sysfs_remove_dir);
EXPORT_SYMBOL_GPL(sysfs_rename_dir);
-
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 4013d7905e8..d0e3d849516 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -364,9 +364,9 @@ int sysfs_add_file(struct dentry * dir, const struct attribute * attr, int type)
umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG;
int error = 0;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
error = sysfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return error;
}
@@ -398,7 +398,7 @@ int sysfs_update_file(struct kobject * kobj, const struct attribute * attr)
struct dentry * victim;
int res = -ENOENT;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
victim = lookup_one_len(attr->name, dir, strlen(attr->name));
if (!IS_ERR(victim)) {
/* make sure dentry is really there */
@@ -420,7 +420,7 @@ int sysfs_update_file(struct kobject * kobj, const struct attribute * attr)
*/
dput(victim);
}
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return res;
}
@@ -441,22 +441,22 @@ int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
struct iattr newattrs;
int res = -ENOENT;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
victim = lookup_one_len(attr->name, dir, strlen(attr->name));
if (!IS_ERR(victim)) {
if (victim->d_inode &&
(victim->d_parent->d_inode == dir->d_inode)) {
inode = victim->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
newattrs.ia_mode = (mode & S_IALLUGO) |
(inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
res = notify_change(victim, &newattrs);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
dput(victim);
}
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return res;
}
@@ -480,4 +480,3 @@ void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
EXPORT_SYMBOL_GPL(sysfs_create_file);
EXPORT_SYMBOL_GPL(sysfs_remove_file);
EXPORT_SYMBOL_GPL(sysfs_update_file);
-
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 970a33f0329..689f7bcfaf3 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -11,6 +11,7 @@
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/backing-dev.h>
+#include <linux/capability.h>
#include "sysfs.h"
extern struct super_block * sysfs_sb;
@@ -201,7 +202,7 @@ const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
/*
* Unhashes the dentry corresponding to given sysfs_dirent
- * Called with parent inode's i_sem held.
+ * Called with parent inode's i_mutex held.
*/
void sysfs_drop_dentry(struct sysfs_dirent * sd, struct dentry * parent)
{
@@ -232,7 +233,7 @@ void sysfs_hash_and_remove(struct dentry * dir, const char * name)
/* no inode means this hasn't been made visible yet */
return;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
if (!sd->s_element)
continue;
@@ -243,7 +244,5 @@ void sysfs_hash_and_remove(struct dentry * dir, const char * name)
break;
}
}
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
}
-
-
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index de402fa915f..e38d6338a20 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -86,9 +86,9 @@ int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char
BUG_ON(!kobj || !kobj->dentry || !name);
- down(&dentry->d_inode->i_sem);
+ mutex_lock(&dentry->d_inode->i_mutex);
error = sysfs_add_link(dentry, name, target);
- up(&dentry->d_inode->i_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
return error;
}
@@ -177,4 +177,3 @@ struct inode_operations sysfs_symlink_inode_operations = {
EXPORT_SYMBOL_GPL(sysfs_create_link);
EXPORT_SYMBOL_GPL(sysfs_remove_link);
-
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 69a085abad6..cce8b05cba5 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -103,7 +103,7 @@ static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir)
offset = (char *)de - kaddr;
over = filldir(dirent, name, strnlen(name,SYSV_NAMELEN),
- (n<<PAGE_CACHE_SHIFT) | offset,
+ ((loff_t)n<<PAGE_CACHE_SHIFT) | offset,
fs16_to_cpu(SYSV_SB(sb), de->inode),
DT_UNKNOWN);
if (over) {
@@ -115,7 +115,7 @@ static int sysv_readdir(struct file * filp, void * dirent, filldir_t filldir)
}
done:
- filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
+ filp->f_pos = ((loff_t)n << PAGE_CACHE_SHIFT) | offset;
unlock_kernel();
return 0;
}
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 6598a5037ac..4fae57d9d11 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -41,7 +41,7 @@
#define uint(x) xuint(x)
#define xuint(x) __le ## x
-extern inline int find_next_one_bit (void * addr, int size, int offset)
+static inline int find_next_one_bit (void * addr, int size, int offset)
{
uintBPL_t * p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
int result = offset & ~(BITS_PER_LONG-1);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 8a388289040..a6f2acc1f15 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -31,6 +31,7 @@
#include <asm/uaccess.h>
#include <linux/kernel.h>
#include <linux/string.h> /* memset */
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/smp_lock.h>
#include <linux/pagemap.h>
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 4014f17d382..395e582ee54 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1957,11 +1957,6 @@ int8_t inode_bmap(struct inode *inode, int block, kernel_lb_addr *bloc, uint32_t
printk(KERN_ERR "udf: inode_bmap: block < 0\n");
return -1;
}
- if (!inode)
- {
- printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
- return -1;
- }
*extoffset = 0;
*elen = 0;
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index faf1512173e..a9f4421ddb6 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -13,6 +13,7 @@
#include <linux/string.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
+#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index d0915fba155..7c10c68902a 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -491,7 +491,7 @@ int ufs_delete_entry (struct inode * inode, struct ufs_dir_entry * dir,
UFSD(("ino %u, reclen %u, namlen %u, name %s\n",
fs32_to_cpu(sb, de->d_ino),
- fs16to_cpu(sb, de->d_reclen),
+ fs16_to_cpu(sb, de->d_reclen),
ufs_get_de_namlen(sb, de), de->d_name))
while (i < bh->b_size) {
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 54828ebcf1b..e9a42c711a9 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -1275,7 +1275,7 @@ static ssize_t ufs_quota_write(struct super_block *sb, int type,
size_t towrite = len;
struct buffer_head *bh;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -1296,14 +1296,16 @@ static ssize_t ufs_quota_write(struct super_block *sb, int type,
blk++;
}
out:
- if (len == towrite)
+ if (len == towrite) {
+ mutex_unlock(&inode->i_mutex);
return err;
+ }
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index b2640076679..e45ad537987 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -255,8 +255,8 @@ extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head
((struct ufs_super_block_first *)((ubh)->bh[0]->b_data))
#define ubh_get_usb_second(ubh) \
- ((struct ufs_super_block_second *)(ubh)-> \
- bh[UFS_SECTOR_SIZE >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE & ~uspi->s_fmask))
+ ((struct ufs_super_block_second *)((ubh)->\
+ bh[UFS_SECTOR_SIZE >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE & ~uspi->s_fmask)))
#define ubh_get_usb_third(ubh) \
((struct ufs_super_block_third *)((ubh)-> \
diff --git a/fs/xattr.c b/fs/xattr.c
index bcc2156d4d2..80eca7d3d69 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -19,6 +19,149 @@
#include <linux/fsnotify.h>
#include <asm/uaccess.h>
+
+/*
+ * Check permissions for extended attribute access. This is a bit complicated
+ * because different namespaces have very different rules.
+ */
+static int
+xattr_permission(struct inode *inode, const char *name, int mask)
+{
+ /*
+ * We can never set or remove an extended attribute on a read-only
+ * filesystem or on an immutable / append-only inode.
+ */
+ if (mask & MAY_WRITE) {
+ if (IS_RDONLY(inode))
+ return -EROFS;
+ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+ return -EPERM;
+ }
+
+ /*
+ * No restriction for security.* and system.* from the VFS. Decision
+ * on these is left to the underlying filesystem / security module.
+ */
+ if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) ||
+ !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ return 0;
+
+ /*
+ * The trusted.* namespace can only accessed by a privilegued user.
+ */
+ if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
+ return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
+
+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
+ if (!S_ISREG(inode->i_mode) &&
+ (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
+ return -EPERM;
+ }
+
+ return permission(inode, mask, NULL);
+}
+
+int
+vfs_setxattr(struct dentry *dentry, char *name, void *value,
+ size_t size, int flags)
+{
+ struct inode *inode = dentry->d_inode;
+ int error;
+
+ error = xattr_permission(inode, name, MAY_WRITE);
+ if (error)
+ return error;
+
+ mutex_lock(&inode->i_mutex);
+ error = security_inode_setxattr(dentry, name, value, size, flags);
+ if (error)
+ goto out;
+ error = -EOPNOTSUPP;
+ if (inode->i_op->setxattr) {
+ error = inode->i_op->setxattr(dentry, name, value, size, flags);
+ if (!error) {
+ fsnotify_xattr(dentry);
+ security_inode_post_setxattr(dentry, name, value,
+ size, flags);
+ }
+ } else if (!strncmp(name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN)) {
+ const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
+ error = security_inode_setsecurity(inode, suffix, value,
+ size, flags);
+ if (!error)
+ fsnotify_xattr(dentry);
+ }
+out:
+ mutex_unlock(&inode->i_mutex);
+ return error;
+}
+EXPORT_SYMBOL_GPL(vfs_setxattr);
+
+ssize_t
+vfs_getxattr(struct dentry *dentry, char *name, void *value, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ int error;
+
+ error = xattr_permission(inode, name, MAY_READ);
+ if (error)
+ return error;
+
+ error = security_inode_getxattr(dentry, name);
+ if (error)
+ return error;
+
+ if (inode->i_op->getxattr)
+ error = inode->i_op->getxattr(dentry, name, value, size);
+ else
+ error = -EOPNOTSUPP;
+
+ if (!strncmp(name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN)) {
+ const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
+ int ret = security_inode_getsecurity(inode, suffix, value,
+ size, error);
+ /*
+ * Only overwrite the return value if a security module
+ * is actually active.
+ */
+ if (ret != -EOPNOTSUPP)
+ error = ret;
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(vfs_getxattr);
+
+int
+vfs_removexattr(struct dentry *dentry, char *name)
+{
+ struct inode *inode = dentry->d_inode;
+ int error;
+
+ if (!inode->i_op->removexattr)
+ return -EOPNOTSUPP;
+
+ error = xattr_permission(inode, name, MAY_WRITE);
+ if (error)
+ return error;
+
+ error = security_inode_removexattr(dentry, name);
+ if (error)
+ return error;
+
+ mutex_lock(&inode->i_mutex);
+ error = inode->i_op->removexattr(dentry, name);
+ mutex_unlock(&inode->i_mutex);
+
+ if (!error)
+ fsnotify_xattr(dentry);
+ return error;
+}
+EXPORT_SYMBOL_GPL(vfs_removexattr);
+
+
/*
* Extended attribute SET operations
*/
@@ -51,29 +194,7 @@ setxattr(struct dentry *d, char __user *name, void __user *value,
}
}
- down(&d->d_inode->i_sem);
- error = security_inode_setxattr(d, kname, kvalue, size, flags);
- if (error)
- goto out;
- error = -EOPNOTSUPP;
- if (d->d_inode->i_op && d->d_inode->i_op->setxattr) {
- error = d->d_inode->i_op->setxattr(d, kname, kvalue,
- size, flags);
- if (!error) {
- fsnotify_xattr(d);
- security_inode_post_setxattr(d, kname, kvalue,
- size, flags);
- }
- } else if (!strncmp(kname, XATTR_SECURITY_PREFIX,
- sizeof XATTR_SECURITY_PREFIX - 1)) {
- const char *suffix = kname + sizeof XATTR_SECURITY_PREFIX - 1;
- error = security_inode_setsecurity(d->d_inode, suffix, kvalue,
- size, flags);
- if (!error)
- fsnotify_xattr(d);
- }
-out:
- up(&d->d_inode->i_sem);
+ error = vfs_setxattr(d, kname, kvalue, size, flags);
kfree(kvalue);
return error;
}
@@ -147,22 +268,7 @@ getxattr(struct dentry *d, char __user *name, void __user *value, size_t size)
return -ENOMEM;
}
- error = security_inode_getxattr(d, kname);
- if (error)
- goto out;
- error = -EOPNOTSUPP;
- if (d->d_inode->i_op && d->d_inode->i_op->getxattr)
- error = d->d_inode->i_op->getxattr(d, kname, kvalue, size);
-
- if (!strncmp(kname, XATTR_SECURITY_PREFIX,
- sizeof XATTR_SECURITY_PREFIX - 1)) {
- const char *suffix = kname + sizeof XATTR_SECURITY_PREFIX - 1;
- int rv = security_inode_getsecurity(d->d_inode, suffix, kvalue,
- size, error);
- /* Security module active: overwrite error value */
- if (rv != -EOPNOTSUPP)
- error = rv;
- }
+ error = vfs_getxattr(d, kname, kvalue, size);
if (error > 0) {
if (size && copy_to_user(value, kvalue, error))
error = -EFAULT;
@@ -171,7 +277,6 @@ getxattr(struct dentry *d, char __user *name, void __user *value, size_t size)
than XATTR_SIZE_MAX bytes. Not possible. */
error = -E2BIG;
}
-out:
kfree(kvalue);
return error;
}
@@ -318,19 +423,7 @@ removexattr(struct dentry *d, char __user *name)
if (error < 0)
return error;
- error = -EOPNOTSUPP;
- if (d->d_inode->i_op && d->d_inode->i_op->removexattr) {
- error = security_inode_removexattr(d, kname);
- if (error)
- goto out;
- down(&d->d_inode->i_sem);
- error = d->d_inode->i_op->removexattr(d, kname);
- up(&d->d_inode->i_sem);
- if (!error)
- fsnotify_xattr(d);
- }
-out:
- return error;
+ return vfs_removexattr(d, kname);
}
asmlinkage long
diff --git a/fs/xfs/Kbuild b/fs/xfs/Kbuild
new file mode 100644
index 00000000000..2566e96706f
--- /dev/null
+++ b/fs/xfs/Kbuild
@@ -0,0 +1,6 @@
+#
+# The xfs people like to share Makefile with 2.6 and 2.4.
+# Utilise file named Kbuild file which has precedence over Makefile.
+#
+
+include $(srctree)/$(obj)/Makefile-linux-2.6
diff --git a/fs/xfs/linux-2.6/mutex.h b/fs/xfs/linux-2.6/mutex.h
index ce773d89a92..d3369b6ca16 100644
--- a/fs/xfs/linux-2.6/mutex.h
+++ b/fs/xfs/linux-2.6/mutex.h
@@ -19,7 +19,7 @@
#define __XFS_SUPPORT_MUTEX_H__
#include <linux/spinlock.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
/*
* Map the mutex'es from IRIX to Linux semaphores.
@@ -28,12 +28,8 @@
* callers.
*/
#define MUTEX_DEFAULT 0x0
-typedef struct semaphore mutex_t;
-#define mutex_init(lock, type, name) sema_init(lock, 1)
-#define mutex_destroy(lock) sema_init(lock, -99)
-#define mutex_lock(lock, num) down(lock)
-#define mutex_trylock(lock) (down_trylock(lock) ? 0 : 1)
-#define mutex_unlock(lock) up(lock)
+typedef struct mutex mutex_t;
+//#define mutex_destroy(lock) do{}while(0)
#endif /* __XFS_SUPPORT_MUTEX_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 94d3cdfbf9b..d1db8c17a74 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -40,11 +40,10 @@
#include "xfs_rw.h"
#include "xfs_iomap.h"
#include <linux/mpage.h>
+#include <linux/pagevec.h>
#include <linux/writeback.h>
STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
-STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
- struct writeback_control *wbc, void *, int, int);
#if defined(XFS_RW_TRACE)
void
@@ -55,17 +54,15 @@ xfs_page_trace(
int mask)
{
xfs_inode_t *ip;
- bhv_desc_t *bdp;
vnode_t *vp = LINVFS_GET_VP(inode);
loff_t isize = i_size_read(inode);
- loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+ loff_t offset = page_offset(page);
int delalloc = -1, unmapped = -1, unwritten = -1;
if (page_has_buffers(page))
xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
- bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
- ip = XFS_BHVTOI(bdp);
+ ip = xfs_vtoi(vp);
if (!ip->i_rwtrace)
return;
@@ -103,15 +100,56 @@ xfs_finish_ioend(
queue_work(xfsdatad_workqueue, &ioend->io_work);
}
+/*
+ * We're now finished for good with this ioend structure.
+ * Update the page state via the associated buffer_heads,
+ * release holds on the inode and bio, and finally free
+ * up memory. Do not use the ioend after this.
+ */
STATIC void
xfs_destroy_ioend(
xfs_ioend_t *ioend)
{
+ struct buffer_head *bh, *next;
+
+ for (bh = ioend->io_buffer_head; bh; bh = next) {
+ next = bh->b_private;
+ bh->b_end_io(bh, ioend->io_uptodate);
+ }
+
vn_iowake(ioend->io_vnode);
mempool_free(ioend, xfs_ioend_pool);
}
/*
+ * Buffered IO write completion for delayed allocate extents.
+ * TODO: Update ondisk isize now that we know the file data
+ * has been flushed (i.e. the notorious "NULL file" problem).
+ */
+STATIC void
+xfs_end_bio_delalloc(
+ void *data)
+{
+ xfs_ioend_t *ioend = data;
+
+ xfs_destroy_ioend(ioend);
+}
+
+/*
+ * Buffered IO write completion for regular, written extents.
+ */
+STATIC void
+xfs_end_bio_written(
+ void *data)
+{
+ xfs_ioend_t *ioend = data;
+
+ xfs_destroy_ioend(ioend);
+}
+
+/*
+ * IO write completion for unwritten extents.
+ *
* Issue transactions to convert a buffer range from unwritten
* to written extents.
*/
@@ -123,21 +161,10 @@ xfs_end_bio_unwritten(
vnode_t *vp = ioend->io_vnode;
xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size;
- struct buffer_head *bh, *next;
int error;
if (ioend->io_uptodate)
VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
-
- /* ioend->io_buffer_head is only non-NULL for buffered I/O */
- for (bh = ioend->io_buffer_head; bh; bh = next) {
- next = bh->b_private;
-
- bh->b_end_io = NULL;
- clear_buffer_unwritten(bh);
- end_buffer_async_write(bh, ioend->io_uptodate);
- }
-
xfs_destroy_ioend(ioend);
}
@@ -149,7 +176,8 @@ xfs_end_bio_unwritten(
*/
STATIC xfs_ioend_t *
xfs_alloc_ioend(
- struct inode *inode)
+ struct inode *inode,
+ unsigned int type)
{
xfs_ioend_t *ioend;
@@ -162,45 +190,25 @@ xfs_alloc_ioend(
*/
atomic_set(&ioend->io_remaining, 1);
ioend->io_uptodate = 1; /* cleared if any I/O fails */
+ ioend->io_list = NULL;
+ ioend->io_type = type;
ioend->io_vnode = LINVFS_GET_VP(inode);
ioend->io_buffer_head = NULL;
+ ioend->io_buffer_tail = NULL;
atomic_inc(&ioend->io_vnode->v_iocount);
ioend->io_offset = 0;
ioend->io_size = 0;
- INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+ if (type == IOMAP_UNWRITTEN)
+ INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+ else if (type == IOMAP_DELAY)
+ INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
+ else
+ INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
return ioend;
}
-void
-linvfs_unwritten_done(
- struct buffer_head *bh,
- int uptodate)
-{
- xfs_ioend_t *ioend = bh->b_private;
- static spinlock_t unwritten_done_lock = SPIN_LOCK_UNLOCKED;
- unsigned long flags;
-
- ASSERT(buffer_unwritten(bh));
- bh->b_end_io = NULL;
-
- if (!uptodate)
- ioend->io_uptodate = 0;
-
- /*
- * Deep magic here. We reuse b_private in the buffer_heads to build
- * a chain for completing the I/O from user context after we've issued
- * a transaction to convert the unwritten extent.
- */
- spin_lock_irqsave(&unwritten_done_lock, flags);
- bh->b_private = ioend->io_buffer_head;
- ioend->io_buffer_head = bh;
- spin_unlock_irqrestore(&unwritten_done_lock, flags);
-
- xfs_finish_ioend(ioend);
-}
-
STATIC int
xfs_map_blocks(
struct inode *inode,
@@ -218,138 +226,260 @@ xfs_map_blocks(
return -error;
}
+STATIC inline int
+xfs_iomap_valid(
+ xfs_iomap_t *iomapp,
+ loff_t offset)
+{
+ return offset >= iomapp->iomap_offset &&
+ offset < iomapp->iomap_offset + iomapp->iomap_bsize;
+}
+
/*
- * Finds the corresponding mapping in block @map array of the
- * given @offset within a @page.
+ * BIO completion handler for buffered IO.
*/
-STATIC xfs_iomap_t *
-xfs_offset_to_map(
+STATIC int
+xfs_end_bio(
+ struct bio *bio,
+ unsigned int bytes_done,
+ int error)
+{
+ xfs_ioend_t *ioend = bio->bi_private;
+
+ if (bio->bi_size)
+ return 1;
+
+ ASSERT(ioend);
+ ASSERT(atomic_read(&bio->bi_cnt) >= 1);
+
+ /* Toss bio and pass work off to an xfsdatad thread */
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ ioend->io_uptodate = 0;
+ bio->bi_private = NULL;
+ bio->bi_end_io = NULL;
+
+ bio_put(bio);
+ xfs_finish_ioend(ioend);
+ return 0;
+}
+
+STATIC void
+xfs_submit_ioend_bio(
+ xfs_ioend_t *ioend,
+ struct bio *bio)
+{
+ atomic_inc(&ioend->io_remaining);
+
+ bio->bi_private = ioend;
+ bio->bi_end_io = xfs_end_bio;
+
+ submit_bio(WRITE, bio);
+ ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
+ bio_put(bio);
+}
+
+STATIC struct bio *
+xfs_alloc_ioend_bio(
+ struct buffer_head *bh)
+{
+ struct bio *bio;
+ int nvecs = bio_get_nr_vecs(bh->b_bdev);
+
+ do {
+ bio = bio_alloc(GFP_NOIO, nvecs);
+ nvecs >>= 1;
+ } while (!bio);
+
+ ASSERT(bio->bi_private == NULL);
+ bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+ bio->bi_bdev = bh->b_bdev;
+ bio_get(bio);
+ return bio;
+}
+
+STATIC void
+xfs_start_buffer_writeback(
+ struct buffer_head *bh)
+{
+ ASSERT(buffer_mapped(bh));
+ ASSERT(buffer_locked(bh));
+ ASSERT(!buffer_delay(bh));
+ ASSERT(!buffer_unwritten(bh));
+
+ mark_buffer_async_write(bh);
+ set_buffer_uptodate(bh);
+ clear_buffer_dirty(bh);
+}
+
+STATIC void
+xfs_start_page_writeback(
struct page *page,
- xfs_iomap_t *iomapp,
- unsigned long offset)
+ struct writeback_control *wbc,
+ int clear_dirty,
+ int buffers)
+{
+ ASSERT(PageLocked(page));
+ ASSERT(!PageWriteback(page));
+ set_page_writeback(page);
+ if (clear_dirty)
+ clear_page_dirty(page);
+ unlock_page(page);
+ if (!buffers) {
+ end_page_writeback(page);
+ wbc->pages_skipped++; /* We didn't write this page */
+ }
+}
+
+static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
+{
+ return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+}
+
+/*
+ * Submit all of the bios for all of the ioends we have saved up,
+ * covering the initial writepage page and also any probed pages.
+ */
+STATIC void
+xfs_submit_ioend(
+ xfs_ioend_t *ioend)
+{
+ xfs_ioend_t *next;
+ struct buffer_head *bh;
+ struct bio *bio;
+ sector_t lastblock = 0;
+
+ do {
+ next = ioend->io_list;
+ bio = NULL;
+
+ for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+ xfs_start_buffer_writeback(bh);
+
+ if (!bio) {
+ retry:
+ bio = xfs_alloc_ioend_bio(bh);
+ } else if (bh->b_blocknr != lastblock + 1) {
+ xfs_submit_ioend_bio(ioend, bio);
+ goto retry;
+ }
+
+ if (bio_add_buffer(bio, bh) != bh->b_size) {
+ xfs_submit_ioend_bio(ioend, bio);
+ goto retry;
+ }
+
+ lastblock = bh->b_blocknr;
+ }
+ if (bio)
+ xfs_submit_ioend_bio(ioend, bio);
+ xfs_finish_ioend(ioend);
+ } while ((ioend = next) != NULL);
+}
+
+/*
+ * Cancel submission of all buffer_heads so far in this endio.
+ * Toss the endio too. Only ever called for the initial page
+ * in a writepage request, so only ever one page.
+ */
+STATIC void
+xfs_cancel_ioend(
+ xfs_ioend_t *ioend)
+{
+ xfs_ioend_t *next;
+ struct buffer_head *bh, *next_bh;
+
+ do {
+ next = ioend->io_list;
+ bh = ioend->io_buffer_head;
+ do {
+ next_bh = bh->b_private;
+ clear_buffer_async_write(bh);
+ unlock_buffer(bh);
+ } while ((bh = next_bh) != NULL);
+
+ vn_iowake(ioend->io_vnode);
+ mempool_free(ioend, xfs_ioend_pool);
+ } while ((ioend = next) != NULL);
+}
+
+/*
+ * Test to see if we've been building up a completion structure for
+ * earlier buffers -- if so, we try to append to this ioend if we
+ * can, otherwise we finish off any current ioend and start another.
+ * Return true if we've finished the given ioend.
+ */
+STATIC void
+xfs_add_to_ioend(
+ struct inode *inode,
+ struct buffer_head *bh,
+ xfs_off_t offset,
+ unsigned int type,
+ xfs_ioend_t **result,
+ int need_ioend)
{
- loff_t full_offset; /* offset from start of file */
+ xfs_ioend_t *ioend = *result;
- ASSERT(offset < PAGE_CACHE_SIZE);
+ if (!ioend || need_ioend || type != ioend->io_type) {
+ xfs_ioend_t *previous = *result;
- full_offset = page->index; /* NB: using 64bit number */
- full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */
- full_offset += offset; /* offset from page start */
+ ioend = xfs_alloc_ioend(inode, type);
+ ioend->io_offset = offset;
+ ioend->io_buffer_head = bh;
+ ioend->io_buffer_tail = bh;
+ if (previous)
+ previous->io_list = ioend;
+ *result = ioend;
+ } else {
+ ioend->io_buffer_tail->b_private = bh;
+ ioend->io_buffer_tail = bh;
+ }
- if (full_offset < iomapp->iomap_offset)
- return NULL;
- if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
- return iomapp;
- return NULL;
+ bh->b_private = NULL;
+ ioend->io_size += bh->b_size;
}
STATIC void
xfs_map_at_offset(
- struct page *page,
struct buffer_head *bh,
- unsigned long offset,
+ loff_t offset,
int block_bits,
xfs_iomap_t *iomapp)
{
xfs_daddr_t bn;
- loff_t delta;
int sector_shift;
ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
- delta = page->index;
- delta <<= PAGE_CACHE_SHIFT;
- delta += offset;
- delta -= iomapp->iomap_offset;
- delta >>= block_bits;
-
sector_shift = block_bits - BBSHIFT;
- bn = iomapp->iomap_bn >> sector_shift;
- bn += delta;
- BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME));
+ bn = (iomapp->iomap_bn >> sector_shift) +
+ ((offset - iomapp->iomap_offset) >> block_bits);
+
+ ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME));
ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
lock_buffer(bh);
bh->b_blocknr = bn;
- bh->b_bdev = iomapp->iomap_target->pbr_bdev;
+ bh->b_bdev = iomapp->iomap_target->bt_bdev;
set_buffer_mapped(bh);
clear_buffer_delay(bh);
+ clear_buffer_unwritten(bh);
}
/*
- * Look for a page at index which is unlocked and contains our
- * unwritten extent flagged buffers at its head. Returns page
- * locked and with an extra reference count, and length of the
- * unwritten extent component on this page that we can write,
- * in units of filesystem blocks.
- */
-STATIC struct page *
-xfs_probe_unwritten_page(
- struct address_space *mapping,
- pgoff_t index,
- xfs_iomap_t *iomapp,
- xfs_ioend_t *ioend,
- unsigned long max_offset,
- unsigned long *fsbs,
- unsigned int bbits)
-{
- struct page *page;
-
- page = find_trylock_page(mapping, index);
- if (!page)
- return NULL;
- if (PageWriteback(page))
- goto out;
-
- if (page->mapping && page_has_buffers(page)) {
- struct buffer_head *bh, *head;
- unsigned long p_offset = 0;
-
- *fsbs = 0;
- bh = head = page_buffers(page);
- do {
- if (!buffer_unwritten(bh) || !buffer_uptodate(bh))
- break;
- if (!xfs_offset_to_map(page, iomapp, p_offset))
- break;
- if (p_offset >= max_offset)
- break;
- xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
- set_buffer_unwritten_io(bh);
- bh->b_private = ioend;
- p_offset += bh->b_size;
- (*fsbs)++;
- } while ((bh = bh->b_this_page) != head);
-
- if (p_offset)
- return page;
- }
-
-out:
- unlock_page(page);
- return NULL;
-}
-
-/*
- * Look for a page at index which is unlocked and not mapped
- * yet - clustering for mmap write case.
+ * Look for a page at index that is suitable for clustering.
*/
STATIC unsigned int
-xfs_probe_unmapped_page(
- struct address_space *mapping,
- pgoff_t index,
- unsigned int pg_offset)
+xfs_probe_page(
+ struct page *page,
+ unsigned int pg_offset,
+ int mapped)
{
- struct page *page;
int ret = 0;
- page = find_trylock_page(mapping, index);
- if (!page)
- return 0;
if (PageWriteback(page))
- goto out;
+ return 0;
if (page->mapping && PageDirty(page)) {
if (page_has_buffers(page)) {
@@ -357,79 +487,101 @@ xfs_probe_unmapped_page(
bh = head = page_buffers(page);
do {
- if (buffer_mapped(bh) || !buffer_uptodate(bh))
+ if (!buffer_uptodate(bh))
+ break;
+ if (mapped != buffer_mapped(bh))
break;
ret += bh->b_size;
if (ret >= pg_offset)
break;
} while ((bh = bh->b_this_page) != head);
} else
- ret = PAGE_CACHE_SIZE;
+ ret = mapped ? 0 : PAGE_CACHE_SIZE;
}
-out:
- unlock_page(page);
return ret;
}
-STATIC unsigned int
-xfs_probe_unmapped_cluster(
+STATIC size_t
+xfs_probe_cluster(
struct inode *inode,
struct page *startpage,
struct buffer_head *bh,
- struct buffer_head *head)
+ struct buffer_head *head,
+ int mapped)
{
+ struct pagevec pvec;
pgoff_t tindex, tlast, tloff;
- unsigned int pg_offset, len, total = 0;
- struct address_space *mapping = inode->i_mapping;
+ size_t total = 0;
+ int done = 0, i;
/* First sum forwards in this page */
do {
- if (buffer_mapped(bh))
- break;
+ if (mapped != buffer_mapped(bh))
+ return total;
total += bh->b_size;
} while ((bh = bh->b_this_page) != head);
- /* If we reached the end of the page, sum forwards in
- * following pages.
- */
- if (bh == head) {
- tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
- /* Prune this back to avoid pathological behavior */
- tloff = min(tlast, startpage->index + 64);
- for (tindex = startpage->index + 1; tindex < tloff; tindex++) {
- len = xfs_probe_unmapped_page(mapping, tindex,
- PAGE_CACHE_SIZE);
- if (!len)
- return total;
+ /* if we reached the end of the page, sum forwards in following pages */
+ tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+ tindex = startpage->index + 1;
+
+ /* Prune this back to avoid pathological behavior */
+ tloff = min(tlast, startpage->index + 64);
+
+ pagevec_init(&pvec, 0);
+ while (!done && tindex <= tloff) {
+ unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
+
+ if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
+ break;
+
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ struct page *page = pvec.pages[i];
+ size_t pg_offset, len = 0;
+
+ if (tindex == tlast) {
+ pg_offset =
+ i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
+ if (!pg_offset) {
+ done = 1;
+ break;
+ }
+ } else
+ pg_offset = PAGE_CACHE_SIZE;
+
+ if (page->index == tindex && !TestSetPageLocked(page)) {
+ len = xfs_probe_page(page, pg_offset, mapped);
+ unlock_page(page);
+ }
+
+ if (!len) {
+ done = 1;
+ break;
+ }
+
total += len;
+ tindex++;
}
- if (tindex == tlast &&
- (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
- total += xfs_probe_unmapped_page(mapping,
- tindex, pg_offset);
- }
+
+ pagevec_release(&pvec);
+ cond_resched();
}
+
return total;
}
/*
- * Probe for a given page (index) in the inode and test if it is delayed
- * and without unwritten buffers. Returns page locked and with an extra
- * reference count.
+ * Test if a given page is suitable for writing as part of an unwritten
+ * or delayed allocate extent.
*/
-STATIC struct page *
-xfs_probe_delalloc_page(
- struct inode *inode,
- pgoff_t index)
+STATIC int
+xfs_is_delayed_page(
+ struct page *page,
+ unsigned int type)
{
- struct page *page;
-
- page = find_trylock_page(inode->i_mapping, index);
- if (!page)
- return NULL;
if (PageWriteback(page))
- goto out;
+ return 0;
if (page->mapping && page_has_buffers(page)) {
struct buffer_head *bh, *head;
@@ -437,243 +589,156 @@ xfs_probe_delalloc_page(
bh = head = page_buffers(page);
do {
- if (buffer_unwritten(bh)) {
- acceptable = 0;
+ if (buffer_unwritten(bh))
+ acceptable = (type == IOMAP_UNWRITTEN);
+ else if (buffer_delay(bh))
+ acceptable = (type == IOMAP_DELAY);
+ else if (buffer_mapped(bh))
+ acceptable = (type == 0);
+ else
break;
- } else if (buffer_delay(bh)) {
- acceptable = 1;
- }
} while ((bh = bh->b_this_page) != head);
if (acceptable)
- return page;
- }
-
-out:
- unlock_page(page);
- return NULL;
-}
-
-STATIC int
-xfs_map_unwritten(
- struct inode *inode,
- struct page *start_page,
- struct buffer_head *head,
- struct buffer_head *curr,
- unsigned long p_offset,
- int block_bits,
- xfs_iomap_t *iomapp,
- struct writeback_control *wbc,
- int startio,
- int all_bh)
-{
- struct buffer_head *bh = curr;
- xfs_iomap_t *tmp;
- xfs_ioend_t *ioend;
- loff_t offset;
- unsigned long nblocks = 0;
-
- offset = start_page->index;
- offset <<= PAGE_CACHE_SHIFT;
- offset += p_offset;
-
- ioend = xfs_alloc_ioend(inode);
-
- /* First map forwards in the page consecutive buffers
- * covering this unwritten extent
- */
- do {
- if (!buffer_unwritten(bh))
- break;
- tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
- if (!tmp)
- break;
- xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
- set_buffer_unwritten_io(bh);
- bh->b_private = ioend;
- p_offset += bh->b_size;
- nblocks++;
- } while ((bh = bh->b_this_page) != head);
-
- atomic_add(nblocks, &ioend->io_remaining);
-
- /* If we reached the end of the page, map forwards in any
- * following pages which are also covered by this extent.
- */
- if (bh == head) {
- struct address_space *mapping = inode->i_mapping;
- pgoff_t tindex, tloff, tlast;
- unsigned long bs;
- unsigned int pg_offset, bbits = inode->i_blkbits;
- struct page *page;
-
- tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
- tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
- tloff = min(tlast, tloff);
- for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
- page = xfs_probe_unwritten_page(mapping,
- tindex, iomapp, ioend,
- PAGE_CACHE_SIZE, &bs, bbits);
- if (!page)
- break;
- nblocks += bs;
- atomic_add(bs, &ioend->io_remaining);
- xfs_convert_page(inode, page, iomapp, wbc, ioend,
- startio, all_bh);
- /* stop if converting the next page might add
- * enough blocks that the corresponding byte
- * count won't fit in our ulong page buf length */
- if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
- goto enough;
- }
-
- if (tindex == tlast &&
- (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
- page = xfs_probe_unwritten_page(mapping,
- tindex, iomapp, ioend,
- pg_offset, &bs, bbits);
- if (page) {
- nblocks += bs;
- atomic_add(bs, &ioend->io_remaining);
- xfs_convert_page(inode, page, iomapp, wbc, ioend,
- startio, all_bh);
- if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
- goto enough;
- }
- }
+ return 1;
}
-enough:
- ioend->io_size = (xfs_off_t)nblocks << block_bits;
- ioend->io_offset = offset;
- xfs_finish_ioend(ioend);
return 0;
}
-STATIC void
-xfs_submit_page(
- struct page *page,
- struct writeback_control *wbc,
- struct buffer_head *bh_arr[],
- int bh_count,
- int probed_page,
- int clear_dirty)
-{
- struct buffer_head *bh;
- int i;
-
- BUG_ON(PageWriteback(page));
- if (bh_count)
- set_page_writeback(page);
- if (clear_dirty)
- clear_page_dirty(page);
- unlock_page(page);
-
- if (bh_count) {
- for (i = 0; i < bh_count; i++) {
- bh = bh_arr[i];
- mark_buffer_async_write(bh);
- if (buffer_unwritten(bh))
- set_buffer_unwritten_io(bh);
- set_buffer_uptodate(bh);
- clear_buffer_dirty(bh);
- }
-
- for (i = 0; i < bh_count; i++)
- submit_bh(WRITE, bh_arr[i]);
-
- if (probed_page && clear_dirty)
- wbc->nr_to_write--; /* Wrote an "extra" page */
- }
-}
-
/*
* Allocate & map buffers for page given the extent map. Write it out.
* except for the original page of a writepage, this is called on
* delalloc/unwritten pages only, for the original page it is possible
* that the page has no mapping at all.
*/
-STATIC void
+STATIC int
xfs_convert_page(
struct inode *inode,
struct page *page,
- xfs_iomap_t *iomapp,
+ loff_t tindex,
+ xfs_iomap_t *mp,
+ xfs_ioend_t **ioendp,
struct writeback_control *wbc,
- void *private,
int startio,
int all_bh)
{
- struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
- xfs_iomap_t *mp = iomapp, *tmp;
- unsigned long offset, end_offset;
- int index = 0;
+ struct buffer_head *bh, *head;
+ xfs_off_t end_offset;
+ unsigned long p_offset;
+ unsigned int type;
int bbits = inode->i_blkbits;
int len, page_dirty;
+ int count = 0, done = 0, uptodate = 1;
+ xfs_off_t offset = page_offset(page);
- end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
+ if (page->index != tindex)
+ goto fail;
+ if (TestSetPageLocked(page))
+ goto fail;
+ if (PageWriteback(page))
+ goto fail_unlock_page;
+ if (page->mapping != inode->i_mapping)
+ goto fail_unlock_page;
+ if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
+ goto fail_unlock_page;
/*
* page_dirty is initially a count of buffers on the page before
* EOF and is decrememted as we move each into a cleanable state.
+ *
+ * Derivation:
+ *
+ * End offset is the highest offset that this page should represent.
+ * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
+ * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
+ * hence give us the correct page_dirty count. On any other page,
+ * it will be zero and in that case we need page_dirty to be the
+ * count of buffers on the page.
*/
+ end_offset = min_t(unsigned long long,
+ (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
+ i_size_read(inode));
+
len = 1 << inode->i_blkbits;
- end_offset = max(end_offset, PAGE_CACHE_SIZE);
- end_offset = roundup(end_offset, len);
- page_dirty = end_offset / len;
+ p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
+ PAGE_CACHE_SIZE);
+ p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
+ page_dirty = p_offset / len;
- offset = 0;
bh = head = page_buffers(page);
do {
if (offset >= end_offset)
break;
- if (!(PageUptodate(page) || buffer_uptodate(bh)))
+ if (!buffer_uptodate(bh))
+ uptodate = 0;
+ if (!(PageUptodate(page) || buffer_uptodate(bh))) {
+ done = 1;
continue;
- if (buffer_mapped(bh) && all_bh &&
- !(buffer_unwritten(bh) || buffer_delay(bh))) {
+ }
+
+ if (buffer_unwritten(bh) || buffer_delay(bh)) {
+ if (buffer_unwritten(bh))
+ type = IOMAP_UNWRITTEN;
+ else
+ type = IOMAP_DELAY;
+
+ if (!xfs_iomap_valid(mp, offset)) {
+ done = 1;
+ continue;
+ }
+
+ ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
+ ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
+
+ xfs_map_at_offset(bh, offset, bbits, mp);
if (startio) {
+ xfs_add_to_ioend(inode, bh, offset,
+ type, ioendp, done);
+ } else {
+ set_buffer_dirty(bh);
+ unlock_buffer(bh);
+ mark_buffer_dirty(bh);
+ }
+ page_dirty--;
+ count++;
+ } else {
+ type = 0;
+ if (buffer_mapped(bh) && all_bh && startio) {
lock_buffer(bh);
- bh_arr[index++] = bh;
+ xfs_add_to_ioend(inode, bh, offset,
+ type, ioendp, done);
+ count++;
page_dirty--;
+ } else {
+ done = 1;
}
- continue;
}
- tmp = xfs_offset_to_map(page, mp, offset);
- if (!tmp)
- continue;
- ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
- ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
+ } while (offset += len, (bh = bh->b_this_page) != head);
- /* If this is a new unwritten extent buffer (i.e. one
- * that we haven't passed in private data for, we must
- * now map this buffer too.
- */
- if (buffer_unwritten(bh) && !bh->b_end_io) {
- ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);
- xfs_map_unwritten(inode, page, head, bh, offset,
- bbits, tmp, wbc, startio, all_bh);
- } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {
- xfs_map_at_offset(page, bh, offset, bbits, tmp);
- if (buffer_unwritten(bh)) {
- set_buffer_unwritten_io(bh);
- bh->b_private = private;
- ASSERT(private);
+ if (uptodate && bh == head)
+ SetPageUptodate(page);
+
+ if (startio) {
+ if (count) {
+ struct backing_dev_info *bdi;
+
+ bdi = inode->i_mapping->backing_dev_info;
+ if (bdi_write_congested(bdi)) {
+ wbc->encountered_congestion = 1;
+ done = 1;
+ } else if (--wbc->nr_to_write <= 0) {
+ done = 1;
}
}
- if (startio) {
- bh_arr[index++] = bh;
- } else {
- set_buffer_dirty(bh);
- unlock_buffer(bh);
- mark_buffer_dirty(bh);
- }
- page_dirty--;
- } while (offset += len, (bh = bh->b_this_page) != head);
-
- if (startio && index) {
- xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
- } else {
- unlock_page(page);
+ xfs_start_page_writeback(page, wbc, !page_dirty, count);
}
+
+ return done;
+ fail_unlock_page:
+ unlock_page(page);
+ fail:
+ return 1;
}
/*
@@ -685,19 +750,31 @@ xfs_cluster_write(
struct inode *inode,
pgoff_t tindex,
xfs_iomap_t *iomapp,
+ xfs_ioend_t **ioendp,
struct writeback_control *wbc,
int startio,
int all_bh,
pgoff_t tlast)
{
- struct page *page;
+ struct pagevec pvec;
+ int done = 0, i;
- for (; tindex <= tlast; tindex++) {
- page = xfs_probe_delalloc_page(inode, tindex);
- if (!page)
+ pagevec_init(&pvec, 0);
+ while (!done && tindex <= tlast) {
+ unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
+
+ if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
break;
- xfs_convert_page(inode, page, iomapp, wbc, NULL,
- startio, all_bh);
+
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ done = xfs_convert_page(inode, pvec.pages[i], tindex++,
+ iomapp, ioendp, wbc, startio, all_bh);
+ if (done)
+ break;
+ }
+
+ pagevec_release(&pvec);
+ cond_resched();
}
}
@@ -728,18 +805,22 @@ xfs_page_state_convert(
int startio,
int unmapped) /* also implies page uptodate */
{
- struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
- xfs_iomap_t *iomp, iomap;
+ struct buffer_head *bh, *head;
+ xfs_iomap_t iomap;
+ xfs_ioend_t *ioend = NULL, *iohead = NULL;
loff_t offset;
unsigned long p_offset = 0;
+ unsigned int type;
__uint64_t end_offset;
pgoff_t end_index, last_index, tlast;
- int len, err, i, cnt = 0, uptodate = 1;
- int flags;
- int page_dirty;
+ ssize_t size, len;
+ int flags, err, iomap_valid = 0, uptodate = 1;
+ int page_dirty, count = 0, trylock_flag = 0;
+ int all_bh = unmapped;
/* wait for other IO threads? */
- flags = (startio && wbc->sync_mode != WB_SYNC_NONE) ? 0 : BMAPI_TRYLOCK;
+ if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking))
+ trylock_flag |= BMAPI_TRYLOCK;
/* Is this page beyond the end of the file? */
offset = i_size_read(inode);
@@ -754,161 +835,173 @@ xfs_page_state_convert(
}
}
- end_offset = min_t(unsigned long long,
- (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
- offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
-
/*
* page_dirty is initially a count of buffers on the page before
* EOF and is decrememted as we move each into a cleanable state.
- */
+ *
+ * Derivation:
+ *
+ * End offset is the highest offset that this page should represent.
+ * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
+ * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
+ * hence give us the correct page_dirty count. On any other page,
+ * it will be zero and in that case we need page_dirty to be the
+ * count of buffers on the page.
+ */
+ end_offset = min_t(unsigned long long,
+ (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
len = 1 << inode->i_blkbits;
- p_offset = max(p_offset, PAGE_CACHE_SIZE);
- p_offset = roundup(p_offset, len);
+ p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
+ PAGE_CACHE_SIZE);
+ p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
page_dirty = p_offset / len;
- iomp = NULL;
- p_offset = 0;
bh = head = page_buffers(page);
+ offset = page_offset(page);
+ flags = -1;
+ type = 0;
+
+ /* TODO: cleanup count and page_dirty */
do {
if (offset >= end_offset)
break;
if (!buffer_uptodate(bh))
uptodate = 0;
- if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)
+ if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
+ /*
+ * the iomap is actually still valid, but the ioend
+ * isn't. shouldn't happen too often.
+ */
+ iomap_valid = 0;
continue;
-
- if (iomp) {
- iomp = xfs_offset_to_map(page, &iomap, p_offset);
}
+ if (iomap_valid)
+ iomap_valid = xfs_iomap_valid(&iomap, offset);
+
/*
* First case, map an unwritten extent and prepare for
* extent state conversion transaction on completion.
- */
- if (buffer_unwritten(bh)) {
- if (!startio)
- continue;
- if (!iomp) {
- err = xfs_map_blocks(inode, offset, len, &iomap,
- BMAPI_WRITE|BMAPI_IGNSTATE);
- if (err) {
- goto error;
- }
- iomp = xfs_offset_to_map(page, &iomap,
- p_offset);
+ *
+ * Second case, allocate space for a delalloc buffer.
+ * We can return EAGAIN here in the release page case.
+ *
+ * Third case, an unmapped buffer was found, and we are
+ * in a path where we need to write the whole page out.
+ */
+ if (buffer_unwritten(bh) || buffer_delay(bh) ||
+ ((buffer_uptodate(bh) || PageUptodate(page)) &&
+ !buffer_mapped(bh) && (unmapped || startio))) {
+ /*
+ * Make sure we don't use a read-only iomap
+ */
+ if (flags == BMAPI_READ)
+ iomap_valid = 0;
+
+ if (buffer_unwritten(bh)) {
+ type = IOMAP_UNWRITTEN;
+ flags = BMAPI_WRITE|BMAPI_IGNSTATE;
+ } else if (buffer_delay(bh)) {
+ type = IOMAP_DELAY;
+ flags = BMAPI_ALLOCATE;
+ if (!startio)
+ flags |= trylock_flag;
+ } else {
+ type = IOMAP_NEW;
+ flags = BMAPI_WRITE|BMAPI_MMAP;
}
- if (iomp) {
- if (!bh->b_end_io) {
- err = xfs_map_unwritten(inode, page,
- head, bh, p_offset,
- inode->i_blkbits, iomp,
- wbc, startio, unmapped);
- if (err) {
- goto error;
- }
+
+ if (!iomap_valid) {
+ if (type == IOMAP_NEW) {
+ size = xfs_probe_cluster(inode,
+ page, bh, head, 0);
} else {
- set_bit(BH_Lock, &bh->b_state);
+ size = len;
}
- BUG_ON(!buffer_locked(bh));
- bh_arr[cnt++] = bh;
- page_dirty--;
- }
- /*
- * Second case, allocate space for a delalloc buffer.
- * We can return EAGAIN here in the release page case.
- */
- } else if (buffer_delay(bh)) {
- if (!iomp) {
- err = xfs_map_blocks(inode, offset, len, &iomap,
- BMAPI_ALLOCATE | flags);
- if (err) {
+
+ err = xfs_map_blocks(inode, offset, size,
+ &iomap, flags);
+ if (err)
goto error;
- }
- iomp = xfs_offset_to_map(page, &iomap,
- p_offset);
+ iomap_valid = xfs_iomap_valid(&iomap, offset);
}
- if (iomp) {
- xfs_map_at_offset(page, bh, p_offset,
- inode->i_blkbits, iomp);
+ if (iomap_valid) {
+ xfs_map_at_offset(bh, offset,
+ inode->i_blkbits, &iomap);
if (startio) {
- bh_arr[cnt++] = bh;
+ xfs_add_to_ioend(inode, bh, offset,
+ type, &ioend,
+ !iomap_valid);
} else {
set_buffer_dirty(bh);
unlock_buffer(bh);
mark_buffer_dirty(bh);
}
page_dirty--;
+ count++;
+ }
+ } else if (buffer_uptodate(bh) && startio) {
+ /*
+ * we got here because the buffer is already mapped.
+ * That means it must already have extents allocated
+ * underneath it. Map the extent by reading it.
+ */
+ if (!iomap_valid || type != 0) {
+ flags = BMAPI_READ;
+ size = xfs_probe_cluster(inode, page, bh,
+ head, 1);
+ err = xfs_map_blocks(inode, offset, size,
+ &iomap, flags);
+ if (err)
+ goto error;
+ iomap_valid = xfs_iomap_valid(&iomap, offset);
}
- } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
- (unmapped || startio)) {
- if (!buffer_mapped(bh)) {
- int size;
-
- /*
- * Getting here implies an unmapped buffer
- * was found, and we are in a path where we
- * need to write the whole page out.
- */
- if (!iomp) {
- size = xfs_probe_unmapped_cluster(
- inode, page, bh, head);
- err = xfs_map_blocks(inode, offset,
- size, &iomap,
- BMAPI_WRITE|BMAPI_MMAP);
- if (err) {
- goto error;
- }
- iomp = xfs_offset_to_map(page, &iomap,
- p_offset);
- }
- if (iomp) {
- xfs_map_at_offset(page,
- bh, p_offset,
- inode->i_blkbits, iomp);
- if (startio) {
- bh_arr[cnt++] = bh;
- } else {
- set_buffer_dirty(bh);
- unlock_buffer(bh);
- mark_buffer_dirty(bh);
- }
- page_dirty--;
- }
- } else if (startio) {
- if (buffer_uptodate(bh) &&
- !test_and_set_bit(BH_Lock, &bh->b_state)) {
- bh_arr[cnt++] = bh;
- page_dirty--;
- }
+ type = 0;
+ if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
+ ASSERT(buffer_mapped(bh));
+ if (iomap_valid)
+ all_bh = 1;
+ xfs_add_to_ioend(inode, bh, offset, type,
+ &ioend, !iomap_valid);
+ page_dirty--;
+ count++;
+ } else {
+ iomap_valid = 0;
}
+ } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
+ (unmapped || startio)) {
+ iomap_valid = 0;
}
- } while (offset += len, p_offset += len,
- ((bh = bh->b_this_page) != head));
+
+ if (!iohead)
+ iohead = ioend;
+
+ } while (offset += len, ((bh = bh->b_this_page) != head));
if (uptodate && bh == head)
SetPageUptodate(page);
- if (startio) {
- xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);
- }
+ if (startio)
+ xfs_start_page_writeback(page, wbc, 1, count);
- if (iomp) {
- offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
+ if (ioend && iomap_valid) {
+ offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
PAGE_CACHE_SHIFT;
tlast = min_t(pgoff_t, offset, last_index);
- xfs_cluster_write(inode, page->index + 1, iomp, wbc,
- startio, unmapped, tlast);
+ xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
+ wbc, startio, all_bh, tlast);
}
+ if (iohead)
+ xfs_submit_ioend(iohead);
+
return page_dirty;
error:
- for (i = 0; i < cnt; i++) {
- unlock_buffer(bh_arr[i]);
- }
+ if (iohead)
+ xfs_cancel_ioend(iohead);
/*
* If it's delalloc and we have nowhere to put it,
@@ -916,9 +1009,8 @@ error:
* us to try again.
*/
if (err != -EAGAIN) {
- if (!unmapped) {
+ if (!unmapped)
block_invalidatepage(page, 0);
- }
ClearPageUptodate(page);
}
return err;
@@ -982,7 +1074,7 @@ __linvfs_get_block(
}
/* If this is a realtime file, data might be on a new device */
- bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
+ bh_result->b_bdev = iomap.iomap_target->bt_bdev;
/* If we previously allocated a block out beyond eof and
* we are now coming back to use it then we will need to
@@ -1094,10 +1186,10 @@ linvfs_direct_IO(
if (error)
return -error;
- iocb->private = xfs_alloc_ioend(inode);
+ iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
- iomap.iomap_target->pbr_bdev,
+ iomap.iomap_target->bt_bdev,
iov, offset, nr_segs,
linvfs_get_blocks_direct,
linvfs_end_io_direct);
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 4720758a9ad..55339dd5a30 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -23,14 +23,24 @@ extern mempool_t *xfs_ioend_pool;
typedef void (*xfs_ioend_func_t)(void *);
+/*
+ * xfs_ioend struct manages large extent writes for XFS.
+ * It can manage several multi-page bio's at once.
+ */
typedef struct xfs_ioend {
+ struct xfs_ioend *io_list; /* next ioend in chain */
+ unsigned int io_type; /* delalloc / unwritten */
unsigned int io_uptodate; /* I/O status register */
atomic_t io_remaining; /* hold count */
struct vnode *io_vnode; /* file being written to */
struct buffer_head *io_buffer_head;/* buffer linked list head */
+ struct buffer_head *io_buffer_tail;/* buffer linked list tail */
size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */
struct work_struct io_work; /* xfsdatad work queue */
} xfs_ioend_t;
+extern struct address_space_operations linvfs_aops;
+extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
+
#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 6fe21d2b884..e44b7c1a3a3 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -31,76 +31,77 @@
#include <linux/kthread.h>
#include "xfs_linux.h"
-STATIC kmem_cache_t *pagebuf_zone;
-STATIC kmem_shaker_t pagebuf_shake;
+STATIC kmem_zone_t *xfs_buf_zone;
+STATIC kmem_shaker_t xfs_buf_shake;
+STATIC int xfsbufd(void *);
STATIC int xfsbufd_wakeup(int, gfp_t);
-STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
+STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
STATIC struct workqueue_struct *xfslogd_workqueue;
struct workqueue_struct *xfsdatad_workqueue;
-#ifdef PAGEBUF_TRACE
+#ifdef XFS_BUF_TRACE
void
-pagebuf_trace(
- xfs_buf_t *pb,
+xfs_buf_trace(
+ xfs_buf_t *bp,
char *id,
void *data,
void *ra)
{
- ktrace_enter(pagebuf_trace_buf,
- pb, id,
- (void *)(unsigned long)pb->pb_flags,
- (void *)(unsigned long)pb->pb_hold.counter,
- (void *)(unsigned long)pb->pb_sema.count.counter,
+ ktrace_enter(xfs_buf_trace_buf,
+ bp, id,
+ (void *)(unsigned long)bp->b_flags,
+ (void *)(unsigned long)bp->b_hold.counter,
+ (void *)(unsigned long)bp->b_sema.count.counter,
(void *)current,
data, ra,
- (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),
- (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),
- (void *)(unsigned long)pb->pb_buffer_length,
+ (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
+ (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
+ (void *)(unsigned long)bp->b_buffer_length,
NULL, NULL, NULL, NULL, NULL);
}
-ktrace_t *pagebuf_trace_buf;
-#define PAGEBUF_TRACE_SIZE 4096
-#define PB_TRACE(pb, id, data) \
- pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))
+ktrace_t *xfs_buf_trace_buf;
+#define XFS_BUF_TRACE_SIZE 4096
+#define XB_TRACE(bp, id, data) \
+ xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
#else
-#define PB_TRACE(pb, id, data) do { } while (0)
+#define XB_TRACE(bp, id, data) do { } while (0)
#endif
-#ifdef PAGEBUF_LOCK_TRACKING
-# define PB_SET_OWNER(pb) ((pb)->pb_last_holder = current->pid)
-# define PB_CLEAR_OWNER(pb) ((pb)->pb_last_holder = -1)
-# define PB_GET_OWNER(pb) ((pb)->pb_last_holder)
+#ifdef XFS_BUF_LOCK_TRACKING
+# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
+# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
+# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
#else
-# define PB_SET_OWNER(pb) do { } while (0)
-# define PB_CLEAR_OWNER(pb) do { } while (0)
-# define PB_GET_OWNER(pb) do { } while (0)
+# define XB_SET_OWNER(bp) do { } while (0)
+# define XB_CLEAR_OWNER(bp) do { } while (0)
+# define XB_GET_OWNER(bp) do { } while (0)
#endif
-#define pb_to_gfp(flags) \
- ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \
- ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
+#define xb_to_gfp(flags) \
+ ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
+ ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
-#define pb_to_km(flags) \
- (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
+#define xb_to_km(flags) \
+ (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
-#define pagebuf_allocate(flags) \
- kmem_zone_alloc(pagebuf_zone, pb_to_km(flags))
-#define pagebuf_deallocate(pb) \
- kmem_zone_free(pagebuf_zone, (pb));
+#define xfs_buf_allocate(flags) \
+ kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
+#define xfs_buf_deallocate(bp) \
+ kmem_zone_free(xfs_buf_zone, (bp));
/*
- * Page Region interfaces.
+ * Page Region interfaces.
*
- * For pages in filesystems where the blocksize is smaller than the
- * pagesize, we use the page->private field (long) to hold a bitmap
- * of uptodate regions within the page.
+ * For pages in filesystems where the blocksize is smaller than the
+ * pagesize, we use the page->private field (long) to hold a bitmap
+ * of uptodate regions within the page.
*
- * Each such region is "bytes per page / bits per long" bytes long.
+ * Each such region is "bytes per page / bits per long" bytes long.
*
- * NBPPR == number-of-bytes-per-page-region
- * BTOPR == bytes-to-page-region (rounded up)
- * BTOPRT == bytes-to-page-region-truncated (rounded down)
+ * NBPPR == number-of-bytes-per-page-region
+ * BTOPR == bytes-to-page-region (rounded up)
+ * BTOPRT == bytes-to-page-region-truncated (rounded down)
*/
#if (BITS_PER_LONG == 32)
#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
@@ -159,7 +160,7 @@ test_page_region(
}
/*
- * Mapping of multi-page buffers into contiguous virtual space
+ * Mapping of multi-page buffers into contiguous virtual space
*/
typedef struct a_list {
@@ -172,7 +173,7 @@ STATIC int as_list_len;
STATIC DEFINE_SPINLOCK(as_lock);
/*
- * Try to batch vunmaps because they are costly.
+ * Try to batch vunmaps because they are costly.
*/
STATIC void
free_address(
@@ -215,83 +216,83 @@ purge_addresses(void)
}
/*
- * Internal pagebuf object manipulation
+ * Internal xfs_buf_t object manipulation
*/
STATIC void
-_pagebuf_initialize(
- xfs_buf_t *pb,
+_xfs_buf_initialize(
+ xfs_buf_t *bp,
xfs_buftarg_t *target,
- loff_t range_base,
+ xfs_off_t range_base,
size_t range_length,
- page_buf_flags_t flags)
+ xfs_buf_flags_t flags)
{
/*
- * We don't want certain flags to appear in pb->pb_flags.
+ * We don't want certain flags to appear in b_flags.
*/
- flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
-
- memset(pb, 0, sizeof(xfs_buf_t));
- atomic_set(&pb->pb_hold, 1);
- init_MUTEX_LOCKED(&pb->pb_iodonesema);
- INIT_LIST_HEAD(&pb->pb_list);
- INIT_LIST_HEAD(&pb->pb_hash_list);
- init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */
- PB_SET_OWNER(pb);
- pb->pb_target = target;
- pb->pb_file_offset = range_base;
+ flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
+
+ memset(bp, 0, sizeof(xfs_buf_t));
+ atomic_set(&bp->b_hold, 1);
+ init_MUTEX_LOCKED(&bp->b_iodonesema);
+ INIT_LIST_HEAD(&bp->b_list);
+ INIT_LIST_HEAD(&bp->b_hash_list);
+ init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
+ XB_SET_OWNER(bp);
+ bp->b_target = target;
+ bp->b_file_offset = range_base;
/*
* Set buffer_length and count_desired to the same value initially.
* I/O routines should use count_desired, which will be the same in
* most cases but may be reset (e.g. XFS recovery).
*/
- pb->pb_buffer_length = pb->pb_count_desired = range_length;
- pb->pb_flags = flags;
- pb->pb_bn = XFS_BUF_DADDR_NULL;
- atomic_set(&pb->pb_pin_count, 0);
- init_waitqueue_head(&pb->pb_waiters);
-
- XFS_STATS_INC(pb_create);
- PB_TRACE(pb, "initialize", target);
+ bp->b_buffer_length = bp->b_count_desired = range_length;
+ bp->b_flags = flags;
+ bp->b_bn = XFS_BUF_DADDR_NULL;
+ atomic_set(&bp->b_pin_count, 0);
+ init_waitqueue_head(&bp->b_waiters);
+
+ XFS_STATS_INC(xb_create);
+ XB_TRACE(bp, "initialize", target);
}
/*
- * Allocate a page array capable of holding a specified number
- * of pages, and point the page buf at it.
+ * Allocate a page array capable of holding a specified number
+ * of pages, and point the page buf at it.
*/
STATIC int
-_pagebuf_get_pages(
- xfs_buf_t *pb,
+_xfs_buf_get_pages(
+ xfs_buf_t *bp,
int page_count,
- page_buf_flags_t flags)
+ xfs_buf_flags_t flags)
{
/* Make sure that we have a page list */
- if (pb->pb_pages == NULL) {
- pb->pb_offset = page_buf_poff(pb->pb_file_offset);
- pb->pb_page_count = page_count;
- if (page_count <= PB_PAGES) {
- pb->pb_pages = pb->pb_page_array;
+ if (bp->b_pages == NULL) {
+ bp->b_offset = xfs_buf_poff(bp->b_file_offset);
+ bp->b_page_count = page_count;
+ if (page_count <= XB_PAGES) {
+ bp->b_pages = bp->b_page_array;
} else {
- pb->pb_pages = kmem_alloc(sizeof(struct page *) *
- page_count, pb_to_km(flags));
- if (pb->pb_pages == NULL)
+ bp->b_pages = kmem_alloc(sizeof(struct page *) *
+ page_count, xb_to_km(flags));
+ if (bp->b_pages == NULL)
return -ENOMEM;
}
- memset(pb->pb_pages, 0, sizeof(struct page *) * page_count);
+ memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
}
return 0;
}
/*
- * Frees pb_pages if it was malloced.
+ * Frees b_pages if it was allocated.
*/
STATIC void
-_pagebuf_free_pages(
+_xfs_buf_free_pages(
xfs_buf_t *bp)
{
- if (bp->pb_pages != bp->pb_page_array) {
- kmem_free(bp->pb_pages,
- bp->pb_page_count * sizeof(struct page *));
+ if (bp->b_pages != bp->b_page_array) {
+ kmem_free(bp->b_pages,
+ bp->b_page_count * sizeof(struct page *));
}
}
@@ -299,79 +300,79 @@ _pagebuf_free_pages(
* Releases the specified buffer.
*
* The modification state of any associated pages is left unchanged.
- * The buffer most not be on any hash - use pagebuf_rele instead for
+ * The buffer most not be on any hash - use xfs_buf_rele instead for
* hashed and refcounted buffers
*/
void
-pagebuf_free(
+xfs_buf_free(
xfs_buf_t *bp)
{
- PB_TRACE(bp, "free", 0);
+ XB_TRACE(bp, "free", 0);
- ASSERT(list_empty(&bp->pb_hash_list));
+ ASSERT(list_empty(&bp->b_hash_list));
- if (bp->pb_flags & _PBF_PAGE_CACHE) {
+ if (bp->b_flags & _XBF_PAGE_CACHE) {
uint i;
- if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))
- free_address(bp->pb_addr - bp->pb_offset);
+ if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
+ free_address(bp->b_addr - bp->b_offset);
- for (i = 0; i < bp->pb_page_count; i++)
- page_cache_release(bp->pb_pages[i]);
- _pagebuf_free_pages(bp);
- } else if (bp->pb_flags & _PBF_KMEM_ALLOC) {
+ for (i = 0; i < bp->b_page_count; i++)
+ page_cache_release(bp->b_pages[i]);
+ _xfs_buf_free_pages(bp);
+ } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
/*
- * XXX(hch): bp->pb_count_desired might be incorrect (see
- * pagebuf_associate_memory for details), but fortunately
+ * XXX(hch): bp->b_count_desired might be incorrect (see
+ * xfs_buf_associate_memory for details), but fortunately
* the Linux version of kmem_free ignores the len argument..
*/
- kmem_free(bp->pb_addr, bp->pb_count_desired);
- _pagebuf_free_pages(bp);
+ kmem_free(bp->b_addr, bp->b_count_desired);
+ _xfs_buf_free_pages(bp);
}
- pagebuf_deallocate(bp);
+ xfs_buf_deallocate(bp);
}
/*
* Finds all pages for buffer in question and builds it's page list.
*/
STATIC int
-_pagebuf_lookup_pages(
+_xfs_buf_lookup_pages(
xfs_buf_t *bp,
uint flags)
{
- struct address_space *mapping = bp->pb_target->pbr_mapping;
- size_t blocksize = bp->pb_target->pbr_bsize;
- size_t size = bp->pb_count_desired;
+ struct address_space *mapping = bp->b_target->bt_mapping;
+ size_t blocksize = bp->b_target->bt_bsize;
+ size_t size = bp->b_count_desired;
size_t nbytes, offset;
- gfp_t gfp_mask = pb_to_gfp(flags);
+ gfp_t gfp_mask = xb_to_gfp(flags);
unsigned short page_count, i;
pgoff_t first;
- loff_t end;
+ xfs_off_t end;
int error;
- end = bp->pb_file_offset + bp->pb_buffer_length;
- page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);
+ end = bp->b_file_offset + bp->b_buffer_length;
+ page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
- error = _pagebuf_get_pages(bp, page_count, flags);
+ error = _xfs_buf_get_pages(bp, page_count, flags);
if (unlikely(error))
return error;
- bp->pb_flags |= _PBF_PAGE_CACHE;
+ bp->b_flags |= _XBF_PAGE_CACHE;
- offset = bp->pb_offset;
- first = bp->pb_file_offset >> PAGE_CACHE_SHIFT;
+ offset = bp->b_offset;
+ first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
- for (i = 0; i < bp->pb_page_count; i++) {
+ for (i = 0; i < bp->b_page_count; i++) {
struct page *page;
uint retries = 0;
retry:
page = find_or_create_page(mapping, first + i, gfp_mask);
if (unlikely(page == NULL)) {
- if (flags & PBF_READ_AHEAD) {
- bp->pb_page_count = i;
- for (i = 0; i < bp->pb_page_count; i++)
- unlock_page(bp->pb_pages[i]);
+ if (flags & XBF_READ_AHEAD) {
+ bp->b_page_count = i;
+ for (i = 0; i < bp->b_page_count; i++)
+ unlock_page(bp->b_pages[i]);
return -ENOMEM;
}
@@ -387,13 +388,13 @@ _pagebuf_lookup_pages(
"deadlock in %s (mode:0x%x)\n",
__FUNCTION__, gfp_mask);
- XFS_STATS_INC(pb_page_retries);
+ XFS_STATS_INC(xb_page_retries);
xfsbufd_wakeup(0, gfp_mask);
blk_congestion_wait(WRITE, HZ/50);
goto retry;
}
- XFS_STATS_INC(pb_page_found);
+ XFS_STATS_INC(xb_page_found);
nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
size -= nbytes;
@@ -401,27 +402,27 @@ _pagebuf_lookup_pages(
if (!PageUptodate(page)) {
page_count--;
if (blocksize >= PAGE_CACHE_SIZE) {
- if (flags & PBF_READ)
- bp->pb_locked = 1;
+ if (flags & XBF_READ)
+ bp->b_locked = 1;
} else if (!PagePrivate(page)) {
if (test_page_region(page, offset, nbytes))
page_count++;
}
}
- bp->pb_pages[i] = page;
+ bp->b_pages[i] = page;
offset = 0;
}
- if (!bp->pb_locked) {
- for (i = 0; i < bp->pb_page_count; i++)
- unlock_page(bp->pb_pages[i]);
+ if (!bp->b_locked) {
+ for (i = 0; i < bp->b_page_count; i++)
+ unlock_page(bp->b_pages[i]);
}
- if (page_count == bp->pb_page_count)
- bp->pb_flags |= PBF_DONE;
+ if (page_count == bp->b_page_count)
+ bp->b_flags |= XBF_DONE;
- PB_TRACE(bp, "lookup_pages", (long)page_count);
+ XB_TRACE(bp, "lookup_pages", (long)page_count);
return error;
}
@@ -429,23 +430,23 @@ _pagebuf_lookup_pages(
* Map buffer into kernel address-space if nessecary.
*/
STATIC int
-_pagebuf_map_pages(
+_xfs_buf_map_pages(
xfs_buf_t *bp,
uint flags)
{
/* A single page buffer is always mappable */
- if (bp->pb_page_count == 1) {
- bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;
- bp->pb_flags |= PBF_MAPPED;
- } else if (flags & PBF_MAPPED) {
+ if (bp->b_page_count == 1) {
+ bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
+ bp->b_flags |= XBF_MAPPED;
+ } else if (flags & XBF_MAPPED) {
if (as_list_len > 64)
purge_addresses();
- bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,
- VM_MAP, PAGE_KERNEL);
- if (unlikely(bp->pb_addr == NULL))
+ bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
+ VM_MAP, PAGE_KERNEL);
+ if (unlikely(bp->b_addr == NULL))
return -ENOMEM;
- bp->pb_addr += bp->pb_offset;
- bp->pb_flags |= PBF_MAPPED;
+ bp->b_addr += bp->b_offset;
+ bp->b_flags |= XBF_MAPPED;
}
return 0;
@@ -456,9 +457,7 @@ _pagebuf_map_pages(
*/
/*
- * _pagebuf_find
- *
- * Looks up, and creates if absent, a lockable buffer for
+ * Look up, and creates if absent, a lockable buffer for
* a given range of an inode. The buffer is returned
* locked. If other overlapping buffers exist, they are
* released before the new buffer is created and locked,
@@ -466,55 +465,55 @@ _pagebuf_map_pages(
* are unlocked. No I/O is implied by this call.
*/
xfs_buf_t *
-_pagebuf_find(
+_xfs_buf_find(
xfs_buftarg_t *btp, /* block device target */
- loff_t ioff, /* starting offset of range */
+ xfs_off_t ioff, /* starting offset of range */
size_t isize, /* length of range */
- page_buf_flags_t flags, /* PBF_TRYLOCK */
- xfs_buf_t *new_pb)/* newly allocated buffer */
+ xfs_buf_flags_t flags,
+ xfs_buf_t *new_bp)
{
- loff_t range_base;
+ xfs_off_t range_base;
size_t range_length;
xfs_bufhash_t *hash;
- xfs_buf_t *pb, *n;
+ xfs_buf_t *bp, *n;
range_base = (ioff << BBSHIFT);
range_length = (isize << BBSHIFT);
/* Check for IOs smaller than the sector size / not sector aligned */
- ASSERT(!(range_length < (1 << btp->pbr_sshift)));
- ASSERT(!(range_base & (loff_t)btp->pbr_smask));
+ ASSERT(!(range_length < (1 << btp->bt_sshift)));
+ ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
spin_lock(&hash->bh_lock);
- list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) {
- ASSERT(btp == pb->pb_target);
- if (pb->pb_file_offset == range_base &&
- pb->pb_buffer_length == range_length) {
+ list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
+ ASSERT(btp == bp->b_target);
+ if (bp->b_file_offset == range_base &&
+ bp->b_buffer_length == range_length) {
/*
- * If we look at something bring it to the
+ * If we look at something, bring it to the
* front of the list for next time.
*/
- atomic_inc(&pb->pb_hold);
- list_move(&pb->pb_hash_list, &hash->bh_list);
+ atomic_inc(&bp->b_hold);
+ list_move(&bp->b_hash_list, &hash->bh_list);
goto found;
}
}
/* No match found */
- if (new_pb) {
- _pagebuf_initialize(new_pb, btp, range_base,
+ if (new_bp) {
+ _xfs_buf_initialize(new_bp, btp, range_base,
range_length, flags);
- new_pb->pb_hash = hash;
- list_add(&new_pb->pb_hash_list, &hash->bh_list);
+ new_bp->b_hash = hash;
+ list_add(&new_bp->b_hash_list, &hash->bh_list);
} else {
- XFS_STATS_INC(pb_miss_locked);
+ XFS_STATS_INC(xb_miss_locked);
}
spin_unlock(&hash->bh_lock);
- return new_pb;
+ return new_bp;
found:
spin_unlock(&hash->bh_lock);
@@ -523,74 +522,72 @@ found:
* if this does not work then we need to drop the
* spinlock and do a hard attempt on the semaphore.
*/
- if (down_trylock(&pb->pb_sema)) {
- if (!(flags & PBF_TRYLOCK)) {
+ if (down_trylock(&bp->b_sema)) {
+ if (!(flags & XBF_TRYLOCK)) {
/* wait for buffer ownership */
- PB_TRACE(pb, "get_lock", 0);
- pagebuf_lock(pb);
- XFS_STATS_INC(pb_get_locked_waited);
+ XB_TRACE(bp, "get_lock", 0);
+ xfs_buf_lock(bp);
+ XFS_STATS_INC(xb_get_locked_waited);
} else {
/* We asked for a trylock and failed, no need
* to look at file offset and length here, we
- * know that this pagebuf at least overlaps our
- * pagebuf and is locked, therefore our buffer
- * either does not exist, or is this buffer
+ * know that this buffer at least overlaps our
+ * buffer and is locked, therefore our buffer
+ * either does not exist, or is this buffer.
*/
-
- pagebuf_rele(pb);
- XFS_STATS_INC(pb_busy_locked);
- return (NULL);
+ xfs_buf_rele(bp);
+ XFS_STATS_INC(xb_busy_locked);
+ return NULL;
}
} else {
/* trylock worked */
- PB_SET_OWNER(pb);
+ XB_SET_OWNER(bp);
}
- if (pb->pb_flags & PBF_STALE) {
- ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0);
- pb->pb_flags &= PBF_MAPPED;
+ if (bp->b_flags & XBF_STALE) {
+ ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
+ bp->b_flags &= XBF_MAPPED;
}
- PB_TRACE(pb, "got_lock", 0);
- XFS_STATS_INC(pb_get_locked);
- return (pb);
+ XB_TRACE(bp, "got_lock", 0);
+ XFS_STATS_INC(xb_get_locked);
+ return bp;
}
/*
- * xfs_buf_get_flags assembles a buffer covering the specified range.
- *
+ * Assembles a buffer covering the specified range.
* Storage in memory for all portions of the buffer will be allocated,
* although backing storage may not be.
*/
xfs_buf_t *
-xfs_buf_get_flags( /* allocate a buffer */
+xfs_buf_get_flags(
xfs_buftarg_t *target,/* target for buffer */
- loff_t ioff, /* starting offset of range */
+ xfs_off_t ioff, /* starting offset of range */
size_t isize, /* length of range */
- page_buf_flags_t flags) /* PBF_TRYLOCK */
+ xfs_buf_flags_t flags)
{
- xfs_buf_t *pb, *new_pb;
+ xfs_buf_t *bp, *new_bp;
int error = 0, i;
- new_pb = pagebuf_allocate(flags);
- if (unlikely(!new_pb))
+ new_bp = xfs_buf_allocate(flags);
+ if (unlikely(!new_bp))
return NULL;
- pb = _pagebuf_find(target, ioff, isize, flags, new_pb);
- if (pb == new_pb) {
- error = _pagebuf_lookup_pages(pb, flags);
+ bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
+ if (bp == new_bp) {
+ error = _xfs_buf_lookup_pages(bp, flags);
if (error)
goto no_buffer;
} else {
- pagebuf_deallocate(new_pb);
- if (unlikely(pb == NULL))
+ xfs_buf_deallocate(new_bp);
+ if (unlikely(bp == NULL))
return NULL;
}
- for (i = 0; i < pb->pb_page_count; i++)
- mark_page_accessed(pb->pb_pages[i]);
+ for (i = 0; i < bp->b_page_count; i++)
+ mark_page_accessed(bp->b_pages[i]);
- if (!(pb->pb_flags & PBF_MAPPED)) {
- error = _pagebuf_map_pages(pb, flags);
+ if (!(bp->b_flags & XBF_MAPPED)) {
+ error = _xfs_buf_map_pages(bp, flags);
if (unlikely(error)) {
printk(KERN_WARNING "%s: failed to map pages\n",
__FUNCTION__);
@@ -598,97 +595,97 @@ xfs_buf_get_flags( /* allocate a buffer */
}
}
- XFS_STATS_INC(pb_get);
+ XFS_STATS_INC(xb_get);
/*
* Always fill in the block number now, the mapped cases can do
* their own overlay of this later.
*/
- pb->pb_bn = ioff;
- pb->pb_count_desired = pb->pb_buffer_length;
+ bp->b_bn = ioff;
+ bp->b_count_desired = bp->b_buffer_length;
- PB_TRACE(pb, "get", (unsigned long)flags);
- return pb;
+ XB_TRACE(bp, "get", (unsigned long)flags);
+ return bp;
no_buffer:
- if (flags & (PBF_LOCK | PBF_TRYLOCK))
- pagebuf_unlock(pb);
- pagebuf_rele(pb);
+ if (flags & (XBF_LOCK | XBF_TRYLOCK))
+ xfs_buf_unlock(bp);
+ xfs_buf_rele(bp);
return NULL;
}
xfs_buf_t *
xfs_buf_read_flags(
xfs_buftarg_t *target,
- loff_t ioff,
+ xfs_off_t ioff,
size_t isize,
- page_buf_flags_t flags)
+ xfs_buf_flags_t flags)
{
- xfs_buf_t *pb;
-
- flags |= PBF_READ;
-
- pb = xfs_buf_get_flags(target, ioff, isize, flags);
- if (pb) {
- if (!XFS_BUF_ISDONE(pb)) {
- PB_TRACE(pb, "read", (unsigned long)flags);
- XFS_STATS_INC(pb_get_read);
- pagebuf_iostart(pb, flags);
- } else if (flags & PBF_ASYNC) {
- PB_TRACE(pb, "read_async", (unsigned long)flags);
+ xfs_buf_t *bp;
+
+ flags |= XBF_READ;
+
+ bp = xfs_buf_get_flags(target, ioff, isize, flags);
+ if (bp) {
+ if (!XFS_BUF_ISDONE(bp)) {
+ XB_TRACE(bp, "read", (unsigned long)flags);
+ XFS_STATS_INC(xb_get_read);
+ xfs_buf_iostart(bp, flags);
+ } else if (flags & XBF_ASYNC) {
+ XB_TRACE(bp, "read_async", (unsigned long)flags);
/*
* Read ahead call which is already satisfied,
* drop the buffer
*/
goto no_buffer;
} else {
- PB_TRACE(pb, "read_done", (unsigned long)flags);
+ XB_TRACE(bp, "read_done", (unsigned long)flags);
/* We do not want read in the flags */
- pb->pb_flags &= ~PBF_READ;
+ bp->b_flags &= ~XBF_READ;
}
}
- return pb;
+ return bp;
no_buffer:
- if (flags & (PBF_LOCK | PBF_TRYLOCK))
- pagebuf_unlock(pb);
- pagebuf_rele(pb);
+ if (flags & (XBF_LOCK | XBF_TRYLOCK))
+ xfs_buf_unlock(bp);
+ xfs_buf_rele(bp);
return NULL;
}
/*
- * If we are not low on memory then do the readahead in a deadlock
- * safe manner.
+ * If we are not low on memory then do the readahead in a deadlock
+ * safe manner.
*/
void
-pagebuf_readahead(
+xfs_buf_readahead(
xfs_buftarg_t *target,
- loff_t ioff,
+ xfs_off_t ioff,
size_t isize,
- page_buf_flags_t flags)
+ xfs_buf_flags_t flags)
{
struct backing_dev_info *bdi;
- bdi = target->pbr_mapping->backing_dev_info;
+ bdi = target->bt_mapping->backing_dev_info;
if (bdi_read_congested(bdi))
return;
- flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD);
+ flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
xfs_buf_read_flags(target, ioff, isize, flags);
}
xfs_buf_t *
-pagebuf_get_empty(
+xfs_buf_get_empty(
size_t len,
xfs_buftarg_t *target)
{
- xfs_buf_t *pb;
+ xfs_buf_t *bp;
- pb = pagebuf_allocate(0);
- if (pb)
- _pagebuf_initialize(pb, target, 0, len, 0);
- return pb;
+ bp = xfs_buf_allocate(0);
+ if (bp)
+ _xfs_buf_initialize(bp, target, 0, len, 0);
+ return bp;
}
static inline struct page *
@@ -704,8 +701,8 @@ mem_to_page(
}
int
-pagebuf_associate_memory(
- xfs_buf_t *pb,
+xfs_buf_associate_memory(
+ xfs_buf_t *bp,
void *mem,
size_t len)
{
@@ -722,40 +719,40 @@ pagebuf_associate_memory(
page_count++;
/* Free any previous set of page pointers */
- if (pb->pb_pages)
- _pagebuf_free_pages(pb);
+ if (bp->b_pages)
+ _xfs_buf_free_pages(bp);
- pb->pb_pages = NULL;
- pb->pb_addr = mem;
+ bp->b_pages = NULL;
+ bp->b_addr = mem;
- rval = _pagebuf_get_pages(pb, page_count, 0);
+ rval = _xfs_buf_get_pages(bp, page_count, 0);
if (rval)
return rval;
- pb->pb_offset = offset;
+ bp->b_offset = offset;
ptr = (size_t) mem & PAGE_CACHE_MASK;
end = PAGE_CACHE_ALIGN((size_t) mem + len);
end_cur = end;
/* set up first page */
- pb->pb_pages[0] = mem_to_page(mem);
+ bp->b_pages[0] = mem_to_page(mem);
ptr += PAGE_CACHE_SIZE;
- pb->pb_page_count = ++i;
+ bp->b_page_count = ++i;
while (ptr < end) {
- pb->pb_pages[i] = mem_to_page((void *)ptr);
- pb->pb_page_count = ++i;
+ bp->b_pages[i] = mem_to_page((void *)ptr);
+ bp->b_page_count = ++i;
ptr += PAGE_CACHE_SIZE;
}
- pb->pb_locked = 0;
+ bp->b_locked = 0;
- pb->pb_count_desired = pb->pb_buffer_length = len;
- pb->pb_flags |= PBF_MAPPED;
+ bp->b_count_desired = bp->b_buffer_length = len;
+ bp->b_flags |= XBF_MAPPED;
return 0;
}
xfs_buf_t *
-pagebuf_get_no_daddr(
+xfs_buf_get_noaddr(
size_t len,
xfs_buftarg_t *target)
{
@@ -764,10 +761,10 @@ pagebuf_get_no_daddr(
void *data;
int error;
- bp = pagebuf_allocate(0);
+ bp = xfs_buf_allocate(0);
if (unlikely(bp == NULL))
goto fail;
- _pagebuf_initialize(bp, target, 0, len, 0);
+ _xfs_buf_initialize(bp, target, 0, len, 0);
try_again:
data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
@@ -776,78 +773,73 @@ pagebuf_get_no_daddr(
/* check whether alignment matches.. */
if ((__psunsigned_t)data !=
- ((__psunsigned_t)data & ~target->pbr_smask)) {
+ ((__psunsigned_t)data & ~target->bt_smask)) {
/* .. else double the size and try again */
kmem_free(data, malloc_len);
malloc_len <<= 1;
goto try_again;
}
- error = pagebuf_associate_memory(bp, data, len);
+ error = xfs_buf_associate_memory(bp, data, len);
if (error)
goto fail_free_mem;
- bp->pb_flags |= _PBF_KMEM_ALLOC;
+ bp->b_flags |= _XBF_KMEM_ALLOC;
- pagebuf_unlock(bp);
+ xfs_buf_unlock(bp);
- PB_TRACE(bp, "no_daddr", data);
+ XB_TRACE(bp, "no_daddr", data);
return bp;
fail_free_mem:
kmem_free(data, malloc_len);
fail_free_buf:
- pagebuf_free(bp);
+ xfs_buf_free(bp);
fail:
return NULL;
}
/*
- * pagebuf_hold
- *
* Increment reference count on buffer, to hold the buffer concurrently
* with another thread which may release (free) the buffer asynchronously.
- *
* Must hold the buffer already to call this function.
*/
void
-pagebuf_hold(
- xfs_buf_t *pb)
+xfs_buf_hold(
+ xfs_buf_t *bp)
{
- atomic_inc(&pb->pb_hold);
- PB_TRACE(pb, "hold", 0);
+ atomic_inc(&bp->b_hold);
+ XB_TRACE(bp, "hold", 0);
}
/*
- * pagebuf_rele
- *
- * pagebuf_rele releases a hold on the specified buffer. If the
- * the hold count is 1, pagebuf_rele calls pagebuf_free.
+ * Releases a hold on the specified buffer. If the
+ * the hold count is 1, calls xfs_buf_free.
*/
void
-pagebuf_rele(
- xfs_buf_t *pb)
+xfs_buf_rele(
+ xfs_buf_t *bp)
{
- xfs_bufhash_t *hash = pb->pb_hash;
+ xfs_bufhash_t *hash = bp->b_hash;
- PB_TRACE(pb, "rele", pb->pb_relse);
+ XB_TRACE(bp, "rele", bp->b_relse);
- if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) {
- if (pb->pb_relse) {
- atomic_inc(&pb->pb_hold);
+ if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
+ if (bp->b_relse) {
+ atomic_inc(&bp->b_hold);
spin_unlock(&hash->bh_lock);
- (*(pb->pb_relse)) (pb);
- } else if (pb->pb_flags & PBF_FS_MANAGED) {
+ (*(bp->b_relse)) (bp);
+ } else if (bp->b_flags & XBF_FS_MANAGED) {
spin_unlock(&hash->bh_lock);
} else {
- ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)));
- list_del_init(&pb->pb_hash_list);
+ ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
+ list_del_init(&bp->b_hash_list);
spin_unlock(&hash->bh_lock);
- pagebuf_free(pb);
+ xfs_buf_free(bp);
}
} else {
/*
* Catch reference count leaks
*/
- ASSERT(atomic_read(&pb->pb_hold) >= 0);
+ ASSERT(atomic_read(&bp->b_hold) >= 0);
}
}
@@ -863,168 +855,122 @@ pagebuf_rele(
*/
/*
- * pagebuf_cond_lock
- *
- * pagebuf_cond_lock locks a buffer object, if it is not already locked.
- * Note that this in no way
- * locks the underlying pages, so it is only useful for synchronizing
- * concurrent use of page buffer objects, not for synchronizing independent
- * access to the underlying pages.
+ * Locks a buffer object, if it is not already locked.
+ * Note that this in no way locks the underlying pages, so it is only
+ * useful for synchronizing concurrent use of buffer objects, not for
+ * synchronizing independent access to the underlying pages.
*/
int
-pagebuf_cond_lock( /* lock buffer, if not locked */
- /* returns -EBUSY if locked) */
- xfs_buf_t *pb)
+xfs_buf_cond_lock(
+ xfs_buf_t *bp)
{
int locked;
- locked = down_trylock(&pb->pb_sema) == 0;
+ locked = down_trylock(&bp->b_sema) == 0;
if (locked) {
- PB_SET_OWNER(pb);
+ XB_SET_OWNER(bp);
}
- PB_TRACE(pb, "cond_lock", (long)locked);
- return(locked ? 0 : -EBUSY);
+ XB_TRACE(bp, "cond_lock", (long)locked);
+ return locked ? 0 : -EBUSY;
}
#if defined(DEBUG) || defined(XFS_BLI_TRACE)
-/*
- * pagebuf_lock_value
- *
- * Return lock value for a pagebuf
- */
int
-pagebuf_lock_value(
- xfs_buf_t *pb)
+xfs_buf_lock_value(
+ xfs_buf_t *bp)
{
- return(atomic_read(&pb->pb_sema.count));
+ return atomic_read(&bp->b_sema.count);
}
#endif
/*
- * pagebuf_lock
- *
- * pagebuf_lock locks a buffer object. Note that this in no way
- * locks the underlying pages, so it is only useful for synchronizing
- * concurrent use of page buffer objects, not for synchronizing independent
- * access to the underlying pages.
+ * Locks a buffer object.
+ * Note that this in no way locks the underlying pages, so it is only
+ * useful for synchronizing concurrent use of buffer objects, not for
+ * synchronizing independent access to the underlying pages.
*/
-int
-pagebuf_lock(
- xfs_buf_t *pb)
+void
+xfs_buf_lock(
+ xfs_buf_t *bp)
{
- PB_TRACE(pb, "lock", 0);
- if (atomic_read(&pb->pb_io_remaining))
- blk_run_address_space(pb->pb_target->pbr_mapping);
- down(&pb->pb_sema);
- PB_SET_OWNER(pb);
- PB_TRACE(pb, "locked", 0);
- return 0;
+ XB_TRACE(bp, "lock", 0);
+ if (atomic_read(&bp->b_io_remaining))
+ blk_run_address_space(bp->b_target->bt_mapping);
+ down(&bp->b_sema);
+ XB_SET_OWNER(bp);
+ XB_TRACE(bp, "locked", 0);
}
/*
- * pagebuf_unlock
- *
- * pagebuf_unlock releases the lock on the buffer object created by
- * pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
- * created by pagebuf_pin).
- *
+ * Releases the lock on the buffer object.
* If the buffer is marked delwri but is not queued, do so before we
- * unlock the buffer as we need to set flags correctly. We also need to
+ * unlock the buffer as we need to set flags correctly. We also need to
* take a reference for the delwri queue because the unlocker is going to
* drop their's and they don't know we just queued it.
*/
void
-pagebuf_unlock( /* unlock buffer */
- xfs_buf_t *pb) /* buffer to unlock */
+xfs_buf_unlock(
+ xfs_buf_t *bp)
{
- if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) {
- atomic_inc(&pb->pb_hold);
- pb->pb_flags |= PBF_ASYNC;
- pagebuf_delwri_queue(pb, 0);
+ if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
+ atomic_inc(&bp->b_hold);
+ bp->b_flags |= XBF_ASYNC;
+ xfs_buf_delwri_queue(bp, 0);
}
- PB_CLEAR_OWNER(pb);
- up(&pb->pb_sema);
- PB_TRACE(pb, "unlock", 0);
+ XB_CLEAR_OWNER(bp);
+ up(&bp->b_sema);
+ XB_TRACE(bp, "unlock", 0);
}
/*
* Pinning Buffer Storage in Memory
- */
-
-/*
- * pagebuf_pin
- *
- * pagebuf_pin locks all of the memory represented by a buffer in
- * memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for
- * the same or different buffers affecting a given page, will
- * properly count the number of outstanding "pin" requests. The
- * buffer may be released after the pagebuf_pin and a different
- * buffer used when calling pagebuf_unpin, if desired.
- * pagebuf_pin should be used by the file system when it wants be
- * assured that no attempt will be made to force the affected
- * memory to disk. It does not assure that a given logical page
- * will not be moved to a different physical page.
+ * Ensure that no attempt to force a buffer to disk will succeed.
*/
void
-pagebuf_pin(
- xfs_buf_t *pb)
+xfs_buf_pin(
+ xfs_buf_t *bp)
{
- atomic_inc(&pb->pb_pin_count);
- PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);
+ atomic_inc(&bp->b_pin_count);
+ XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
}
-/*
- * pagebuf_unpin
- *
- * pagebuf_unpin reverses the locking of memory performed by
- * pagebuf_pin. Note that both functions affected the logical
- * pages associated with the buffer, not the buffer itself.
- */
void
-pagebuf_unpin(
- xfs_buf_t *pb)
+xfs_buf_unpin(
+ xfs_buf_t *bp)
{
- if (atomic_dec_and_test(&pb->pb_pin_count)) {
- wake_up_all(&pb->pb_waiters);
- }
- PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
+ if (atomic_dec_and_test(&bp->b_pin_count))
+ wake_up_all(&bp->b_waiters);
+ XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
}
int
-pagebuf_ispin(
- xfs_buf_t *pb)
+xfs_buf_ispin(
+ xfs_buf_t *bp)
{
- return atomic_read(&pb->pb_pin_count);
+ return atomic_read(&bp->b_pin_count);
}
-/*
- * pagebuf_wait_unpin
- *
- * pagebuf_wait_unpin waits until all of the memory associated
- * with the buffer is not longer locked in memory. It returns
- * immediately if none of the affected pages are locked.
- */
-static inline void
-_pagebuf_wait_unpin(
- xfs_buf_t *pb)
+STATIC void
+xfs_buf_wait_unpin(
+ xfs_buf_t *bp)
{
DECLARE_WAITQUEUE (wait, current);
- if (atomic_read(&pb->pb_pin_count) == 0)
+ if (atomic_read(&bp->b_pin_count) == 0)
return;
- add_wait_queue(&pb->pb_waiters, &wait);
+ add_wait_queue(&bp->b_waiters, &wait);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
- if (atomic_read(&pb->pb_pin_count) == 0)
+ if (atomic_read(&bp->b_pin_count) == 0)
break;
- if (atomic_read(&pb->pb_io_remaining))
- blk_run_address_space(pb->pb_target->pbr_mapping);
+ if (atomic_read(&bp->b_io_remaining))
+ blk_run_address_space(bp->b_target->bt_mapping);
schedule();
}
- remove_wait_queue(&pb->pb_waiters, &wait);
+ remove_wait_queue(&bp->b_waiters, &wait);
set_current_state(TASK_RUNNING);
}
@@ -1032,241 +978,216 @@ _pagebuf_wait_unpin(
* Buffer Utility Routines
*/
-/*
- * pagebuf_iodone
- *
- * pagebuf_iodone marks a buffer for which I/O is in progress
- * done with respect to that I/O. The pb_iodone routine, if
- * present, will be called as a side-effect.
- */
STATIC void
-pagebuf_iodone_work(
+xfs_buf_iodone_work(
void *v)
{
xfs_buf_t *bp = (xfs_buf_t *)v;
- if (bp->pb_iodone)
- (*(bp->pb_iodone))(bp);
- else if (bp->pb_flags & PBF_ASYNC)
+ if (bp->b_iodone)
+ (*(bp->b_iodone))(bp);
+ else if (bp->b_flags & XBF_ASYNC)
xfs_buf_relse(bp);
}
void
-pagebuf_iodone(
- xfs_buf_t *pb,
+xfs_buf_ioend(
+ xfs_buf_t *bp,
int schedule)
{
- pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
- if (pb->pb_error == 0)
- pb->pb_flags |= PBF_DONE;
+ bp->b_flags &= ~(XBF_READ | XBF_WRITE);
+ if (bp->b_error == 0)
+ bp->b_flags |= XBF_DONE;
- PB_TRACE(pb, "iodone", pb->pb_iodone);
+ XB_TRACE(bp, "iodone", bp->b_iodone);
- if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
+ if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
if (schedule) {
- INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
- queue_work(xfslogd_workqueue, &pb->pb_iodone_work);
+ INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
+ queue_work(xfslogd_workqueue, &bp->b_iodone_work);
} else {
- pagebuf_iodone_work(pb);
+ xfs_buf_iodone_work(bp);
}
} else {
- up(&pb->pb_iodonesema);
+ up(&bp->b_iodonesema);
}
}
-/*
- * pagebuf_ioerror
- *
- * pagebuf_ioerror sets the error code for a buffer.
- */
void
-pagebuf_ioerror( /* mark/clear buffer error flag */
- xfs_buf_t *pb, /* buffer to mark */
- int error) /* error to store (0 if none) */
+xfs_buf_ioerror(
+ xfs_buf_t *bp,
+ int error)
{
ASSERT(error >= 0 && error <= 0xffff);
- pb->pb_error = (unsigned short)error;
- PB_TRACE(pb, "ioerror", (unsigned long)error);
+ bp->b_error = (unsigned short)error;
+ XB_TRACE(bp, "ioerror", (unsigned long)error);
}
/*
- * pagebuf_iostart
- *
- * pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
- * If necessary, it will arrange for any disk space allocation required,
- * and it will break up the request if the block mappings require it.
- * The pb_iodone routine in the buffer supplied will only be called
+ * Initiate I/O on a buffer, based on the flags supplied.
+ * The b_iodone routine in the buffer supplied will only be called
* when all of the subsidiary I/O requests, if any, have been completed.
- * pagebuf_iostart calls the pagebuf_ioinitiate routine or
- * pagebuf_iorequest, if the former routine is not defined, to start
- * the I/O on a given low-level request.
*/
int
-pagebuf_iostart( /* start I/O on a buffer */
- xfs_buf_t *pb, /* buffer to start */
- page_buf_flags_t flags) /* PBF_LOCK, PBF_ASYNC, PBF_READ, */
- /* PBF_WRITE, PBF_DELWRI, */
- /* PBF_DONT_BLOCK */
+xfs_buf_iostart(
+ xfs_buf_t *bp,
+ xfs_buf_flags_t flags)
{
int status = 0;
- PB_TRACE(pb, "iostart", (unsigned long)flags);
+ XB_TRACE(bp, "iostart", (unsigned long)flags);
- if (flags & PBF_DELWRI) {
- pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);
- pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC);
- pagebuf_delwri_queue(pb, 1);
+ if (flags & XBF_DELWRI) {
+ bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
+ bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
+ xfs_buf_delwri_queue(bp, 1);
return status;
}
- pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \
- PBF_READ_AHEAD | _PBF_RUN_QUEUES);
- pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
- PBF_READ_AHEAD | _PBF_RUN_QUEUES);
+ bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
+ XBF_READ_AHEAD | _XBF_RUN_QUEUES);
+ bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
+ XBF_READ_AHEAD | _XBF_RUN_QUEUES);
- BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);
+ BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
/* For writes allow an alternate strategy routine to precede
* the actual I/O request (which may not be issued at all in
* a shutdown situation, for example).
*/
- status = (flags & PBF_WRITE) ?
- pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);
+ status = (flags & XBF_WRITE) ?
+ xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
/* Wait for I/O if we are not an async request.
* Note: async I/O request completion will release the buffer,
* and that can already be done by this point. So using the
* buffer pointer from here on, after async I/O, is invalid.
*/
- if (!status && !(flags & PBF_ASYNC))
- status = pagebuf_iowait(pb);
+ if (!status && !(flags & XBF_ASYNC))
+ status = xfs_buf_iowait(bp);
return status;
}
-/*
- * Helper routine for pagebuf_iorequest
- */
-
STATIC __inline__ int
-_pagebuf_iolocked(
- xfs_buf_t *pb)
+_xfs_buf_iolocked(
+ xfs_buf_t *bp)
{
- ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE));
- if (pb->pb_flags & PBF_READ)
- return pb->pb_locked;
+ ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
+ if (bp->b_flags & XBF_READ)
+ return bp->b_locked;
return 0;
}
STATIC __inline__ void
-_pagebuf_iodone(
- xfs_buf_t *pb,
+_xfs_buf_ioend(
+ xfs_buf_t *bp,
int schedule)
{
- if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
- pb->pb_locked = 0;
- pagebuf_iodone(pb, schedule);
+ if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
+ bp->b_locked = 0;
+ xfs_buf_ioend(bp, schedule);
}
}
STATIC int
-bio_end_io_pagebuf(
+xfs_buf_bio_end_io(
struct bio *bio,
unsigned int bytes_done,
int error)
{
- xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private;
- unsigned int blocksize = pb->pb_target->pbr_bsize;
+ xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
+ unsigned int blocksize = bp->b_target->bt_bsize;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
if (bio->bi_size)
return 1;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- pb->pb_error = EIO;
+ bp->b_error = EIO;
do {
struct page *page = bvec->bv_page;
- if (unlikely(pb->pb_error)) {
- if (pb->pb_flags & PBF_READ)
+ if (unlikely(bp->b_error)) {
+ if (bp->b_flags & XBF_READ)
ClearPageUptodate(page);
SetPageError(page);
- } else if (blocksize == PAGE_CACHE_SIZE) {
+ } else if (blocksize >= PAGE_CACHE_SIZE) {
SetPageUptodate(page);
} else if (!PagePrivate(page) &&
- (pb->pb_flags & _PBF_PAGE_CACHE)) {
+ (bp->b_flags & _XBF_PAGE_CACHE)) {
set_page_region(page, bvec->bv_offset, bvec->bv_len);
}
if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags);
- if (_pagebuf_iolocked(pb)) {
+ if (_xfs_buf_iolocked(bp)) {
unlock_page(page);
}
} while (bvec >= bio->bi_io_vec);
- _pagebuf_iodone(pb, 1);
+ _xfs_buf_ioend(bp, 1);
bio_put(bio);
return 0;
}
STATIC void
-_pagebuf_ioapply(
- xfs_buf_t *pb)
+_xfs_buf_ioapply(
+ xfs_buf_t *bp)
{
int i, rw, map_i, total_nr_pages, nr_pages;
struct bio *bio;
- int offset = pb->pb_offset;
- int size = pb->pb_count_desired;
- sector_t sector = pb->pb_bn;
- unsigned int blocksize = pb->pb_target->pbr_bsize;
- int locking = _pagebuf_iolocked(pb);
+ int offset = bp->b_offset;
+ int size = bp->b_count_desired;
+ sector_t sector = bp->b_bn;
+ unsigned int blocksize = bp->b_target->bt_bsize;
+ int locking = _xfs_buf_iolocked(bp);
- total_nr_pages = pb->pb_page_count;
+ total_nr_pages = bp->b_page_count;
map_i = 0;
- if (pb->pb_flags & _PBF_RUN_QUEUES) {
- pb->pb_flags &= ~_PBF_RUN_QUEUES;
- rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC;
+ if (bp->b_flags & _XBF_RUN_QUEUES) {
+ bp->b_flags &= ~_XBF_RUN_QUEUES;
+ rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC;
} else {
- rw = (pb->pb_flags & PBF_READ) ? READ : WRITE;
+ rw = (bp->b_flags & XBF_READ) ? READ : WRITE;
}
- if (pb->pb_flags & PBF_ORDERED) {
- ASSERT(!(pb->pb_flags & PBF_READ));
+ if (bp->b_flags & XBF_ORDERED) {
+ ASSERT(!(bp->b_flags & XBF_READ));
rw = WRITE_BARRIER;
}
- /* Special code path for reading a sub page size pagebuf in --
+ /* Special code path for reading a sub page size buffer in --
* we populate up the whole page, and hence the other metadata
* in the same page. This optimization is only valid when the
- * filesystem block size and the page size are equal.
+ * filesystem block size is not smaller than the page size.
*/
- if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) &&
- (pb->pb_flags & PBF_READ) && locking &&
- (blocksize == PAGE_CACHE_SIZE)) {
+ if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
+ (bp->b_flags & XBF_READ) && locking &&
+ (blocksize >= PAGE_CACHE_SIZE)) {
bio = bio_alloc(GFP_NOIO, 1);
- bio->bi_bdev = pb->pb_target->pbr_bdev;
+ bio->bi_bdev = bp->b_target->bt_bdev;
bio->bi_sector = sector - (offset >> BBSHIFT);
- bio->bi_end_io = bio_end_io_pagebuf;
- bio->bi_private = pb;
+ bio->bi_end_io = xfs_buf_bio_end_io;
+ bio->bi_private = bp;
- bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0);
+ bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
size = 0;
- atomic_inc(&pb->pb_io_remaining);
+ atomic_inc(&bp->b_io_remaining);
goto submit_io;
}
/* Lock down the pages which we need to for the request */
- if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) {
+ if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
for (i = 0; size; i++) {
int nbytes = PAGE_CACHE_SIZE - offset;
- struct page *page = pb->pb_pages[i];
+ struct page *page = bp->b_pages[i];
if (nbytes > size)
nbytes = size;
@@ -1276,30 +1197,30 @@ _pagebuf_ioapply(
size -= nbytes;
offset = 0;
}
- offset = pb->pb_offset;
- size = pb->pb_count_desired;
+ offset = bp->b_offset;
+ size = bp->b_count_desired;
}
next_chunk:
- atomic_inc(&pb->pb_io_remaining);
+ atomic_inc(&bp->b_io_remaining);
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
if (nr_pages > total_nr_pages)
nr_pages = total_nr_pages;
bio = bio_alloc(GFP_NOIO, nr_pages);
- bio->bi_bdev = pb->pb_target->pbr_bdev;
+ bio->bi_bdev = bp->b_target->bt_bdev;
bio->bi_sector = sector;
- bio->bi_end_io = bio_end_io_pagebuf;
- bio->bi_private = pb;
+ bio->bi_end_io = xfs_buf_bio_end_io;
+ bio->bi_private = bp;
for (; size && nr_pages; nr_pages--, map_i++) {
- int nbytes = PAGE_CACHE_SIZE - offset;
+ int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
if (nbytes > size)
nbytes = size;
- if (bio_add_page(bio, pb->pb_pages[map_i],
- nbytes, offset) < nbytes)
+ rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
+ if (rbytes < nbytes)
break;
offset = 0;
@@ -1315,107 +1236,102 @@ submit_io:
goto next_chunk;
} else {
bio_put(bio);
- pagebuf_ioerror(pb, EIO);
+ xfs_buf_ioerror(bp, EIO);
}
}
-/*
- * pagebuf_iorequest -- the core I/O request routine.
- */
int
-pagebuf_iorequest( /* start real I/O */
- xfs_buf_t *pb) /* buffer to convey to device */
+xfs_buf_iorequest(
+ xfs_buf_t *bp)
{
- PB_TRACE(pb, "iorequest", 0);
+ XB_TRACE(bp, "iorequest", 0);
- if (pb->pb_flags & PBF_DELWRI) {
- pagebuf_delwri_queue(pb, 1);
+ if (bp->b_flags & XBF_DELWRI) {
+ xfs_buf_delwri_queue(bp, 1);
return 0;
}
- if (pb->pb_flags & PBF_WRITE) {
- _pagebuf_wait_unpin(pb);
+ if (bp->b_flags & XBF_WRITE) {
+ xfs_buf_wait_unpin(bp);
}
- pagebuf_hold(pb);
+ xfs_buf_hold(bp);
/* Set the count to 1 initially, this will stop an I/O
* completion callout which happens before we have started
- * all the I/O from calling pagebuf_iodone too early.
+ * all the I/O from calling xfs_buf_ioend too early.
*/
- atomic_set(&pb->pb_io_remaining, 1);
- _pagebuf_ioapply(pb);
- _pagebuf_iodone(pb, 0);
+ atomic_set(&bp->b_io_remaining, 1);
+ _xfs_buf_ioapply(bp);
+ _xfs_buf_ioend(bp, 0);
- pagebuf_rele(pb);
+ xfs_buf_rele(bp);
return 0;
}
/*
- * pagebuf_iowait
- *
- * pagebuf_iowait waits for I/O to complete on the buffer supplied.
- * It returns immediately if no I/O is pending. In any case, it returns
- * the error code, if any, or 0 if there is no error.
+ * Waits for I/O to complete on the buffer supplied.
+ * It returns immediately if no I/O is pending.
+ * It returns the I/O error code, if any, or 0 if there was no error.
*/
int
-pagebuf_iowait(
- xfs_buf_t *pb)
+xfs_buf_iowait(
+ xfs_buf_t *bp)
{
- PB_TRACE(pb, "iowait", 0);
- if (atomic_read(&pb->pb_io_remaining))
- blk_run_address_space(pb->pb_target->pbr_mapping);
- down(&pb->pb_iodonesema);
- PB_TRACE(pb, "iowaited", (long)pb->pb_error);
- return pb->pb_error;
+ XB_TRACE(bp, "iowait", 0);
+ if (atomic_read(&bp->b_io_remaining))
+ blk_run_address_space(bp->b_target->bt_mapping);
+ down(&bp->b_iodonesema);
+ XB_TRACE(bp, "iowaited", (long)bp->b_error);
+ return bp->b_error;
}
-caddr_t
-pagebuf_offset(
- xfs_buf_t *pb,
+xfs_caddr_t
+xfs_buf_offset(
+ xfs_buf_t *bp,
size_t offset)
{
struct page *page;
- offset += pb->pb_offset;
+ if (bp->b_flags & XBF_MAPPED)
+ return XFS_BUF_PTR(bp) + offset;
- page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT];
- return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1));
+ offset += bp->b_offset;
+ page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
+ return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
}
/*
- * pagebuf_iomove
- *
* Move data into or out of a buffer.
*/
void
-pagebuf_iomove(
- xfs_buf_t *pb, /* buffer to process */
+xfs_buf_iomove(
+ xfs_buf_t *bp, /* buffer to process */
size_t boff, /* starting buffer offset */
size_t bsize, /* length to copy */
caddr_t data, /* data address */
- page_buf_rw_t mode) /* read/write flag */
+ xfs_buf_rw_t mode) /* read/write/zero flag */
{
size_t bend, cpoff, csize;
struct page *page;
bend = boff + bsize;
while (boff < bend) {
- page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)];
- cpoff = page_buf_poff(boff + pb->pb_offset);
+ page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
+ cpoff = xfs_buf_poff(boff + bp->b_offset);
csize = min_t(size_t,
- PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff);
+ PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
switch (mode) {
- case PBRW_ZERO:
+ case XBRW_ZERO:
memset(page_address(page) + cpoff, 0, csize);
break;
- case PBRW_READ:
+ case XBRW_READ:
memcpy(data, page_address(page) + cpoff, csize);
break;
- case PBRW_WRITE:
+ case XBRW_WRITE:
memcpy(page_address(page) + cpoff, data, csize);
}
@@ -1425,12 +1341,12 @@ pagebuf_iomove(
}
/*
- * Handling of buftargs.
+ * Handling of buffer targets (buftargs).
*/
/*
- * Wait for any bufs with callbacks that have been submitted but
- * have not yet returned... walk the hash list for the target.
+ * Wait for any bufs with callbacks that have been submitted but
+ * have not yet returned... walk the hash list for the target.
*/
void
xfs_wait_buftarg(
@@ -1444,15 +1360,15 @@ xfs_wait_buftarg(
hash = &btp->bt_hash[i];
again:
spin_lock(&hash->bh_lock);
- list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) {
- ASSERT(btp == bp->pb_target);
- if (!(bp->pb_flags & PBF_FS_MANAGED)) {
+ list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
+ ASSERT(btp == bp->b_target);
+ if (!(bp->b_flags & XBF_FS_MANAGED)) {
spin_unlock(&hash->bh_lock);
/*
* Catch superblock reference count leaks
* immediately
*/
- BUG_ON(bp->pb_bn == 0);
+ BUG_ON(bp->b_bn == 0);
delay(100);
goto again;
}
@@ -1462,9 +1378,9 @@ again:
}
/*
- * Allocate buffer hash table for a given target.
- * For devices containing metadata (i.e. not the log/realtime devices)
- * we need to allocate a much larger hash table.
+ * Allocate buffer hash table for a given target.
+ * For devices containing metadata (i.e. not the log/realtime devices)
+ * we need to allocate a much larger hash table.
*/
STATIC void
xfs_alloc_bufhash(
@@ -1487,11 +1403,34 @@ STATIC void
xfs_free_bufhash(
xfs_buftarg_t *btp)
{
- kmem_free(btp->bt_hash,
- (1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t));
+ kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
btp->bt_hash = NULL;
}
+/*
+ * buftarg list for delwrite queue processing
+ */
+STATIC LIST_HEAD(xfs_buftarg_list);
+STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
+
+STATIC void
+xfs_register_buftarg(
+ xfs_buftarg_t *btp)
+{
+ spin_lock(&xfs_buftarg_lock);
+ list_add(&btp->bt_list, &xfs_buftarg_list);
+ spin_unlock(&xfs_buftarg_lock);
+}
+
+STATIC void
+xfs_unregister_buftarg(
+ xfs_buftarg_t *btp)
+{
+ spin_lock(&xfs_buftarg_lock);
+ list_del(&btp->bt_list);
+ spin_unlock(&xfs_buftarg_lock);
+}
+
void
xfs_free_buftarg(
xfs_buftarg_t *btp,
@@ -1499,9 +1438,16 @@ xfs_free_buftarg(
{
xfs_flush_buftarg(btp, 1);
if (external)
- xfs_blkdev_put(btp->pbr_bdev);
+ xfs_blkdev_put(btp->bt_bdev);
xfs_free_bufhash(btp);
- iput(btp->pbr_mapping->host);
+ iput(btp->bt_mapping->host);
+
+ /* Unregister the buftarg first so that we don't get a
+ * wakeup finding a non-existent task
+ */
+ xfs_unregister_buftarg(btp);
+ kthread_stop(btp->bt_task);
+
kmem_free(btp, sizeof(*btp));
}
@@ -1512,11 +1458,11 @@ xfs_setsize_buftarg_flags(
unsigned int sectorsize,
int verbose)
{
- btp->pbr_bsize = blocksize;
- btp->pbr_sshift = ffs(sectorsize) - 1;
- btp->pbr_smask = sectorsize - 1;
+ btp->bt_bsize = blocksize;
+ btp->bt_sshift = ffs(sectorsize) - 1;
+ btp->bt_smask = sectorsize - 1;
- if (set_blocksize(btp->pbr_bdev, sectorsize)) {
+ if (set_blocksize(btp->bt_bdev, sectorsize)) {
printk(KERN_WARNING
"XFS: Cannot set_blocksize to %u on device %s\n",
sectorsize, XFS_BUFTARG_NAME(btp));
@@ -1536,10 +1482,10 @@ xfs_setsize_buftarg_flags(
}
/*
-* When allocating the initial buffer target we have not yet
-* read in the superblock, so don't know what sized sectors
-* are being used is at this early stage. Play safe.
-*/
+ * When allocating the initial buffer target we have not yet
+ * read in the superblock, so don't know what sized sectors
+ * are being used is at this early stage. Play safe.
+ */
STATIC int
xfs_setsize_buftarg_early(
xfs_buftarg_t *btp,
@@ -1587,10 +1533,30 @@ xfs_mapping_buftarg(
mapping->a_ops = &mapping_aops;
mapping->backing_dev_info = bdi;
mapping_set_gfp_mask(mapping, GFP_NOFS);
- btp->pbr_mapping = mapping;
+ btp->bt_mapping = mapping;
return 0;
}
+STATIC int
+xfs_alloc_delwrite_queue(
+ xfs_buftarg_t *btp)
+{
+ int error = 0;
+
+ INIT_LIST_HEAD(&btp->bt_list);
+ INIT_LIST_HEAD(&btp->bt_delwrite_queue);
+ spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
+ btp->bt_flags = 0;
+ btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
+ if (IS_ERR(btp->bt_task)) {
+ error = PTR_ERR(btp->bt_task);
+ goto out_error;
+ }
+ xfs_register_buftarg(btp);
+out_error:
+ return error;
+}
+
xfs_buftarg_t *
xfs_alloc_buftarg(
struct block_device *bdev,
@@ -1600,12 +1566,14 @@ xfs_alloc_buftarg(
btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
- btp->pbr_dev = bdev->bd_dev;
- btp->pbr_bdev = bdev;
+ btp->bt_dev = bdev->bd_dev;
+ btp->bt_bdev = bdev;
if (xfs_setsize_buftarg_early(btp, bdev))
goto error;
if (xfs_mapping_buftarg(btp, bdev))
goto error;
+ if (xfs_alloc_delwrite_queue(btp))
+ goto error;
xfs_alloc_bufhash(btp, external);
return btp;
@@ -1616,83 +1584,81 @@ error:
/*
- * Pagebuf delayed write buffer handling
+ * Delayed write buffer handling
*/
-
-STATIC LIST_HEAD(pbd_delwrite_queue);
-STATIC DEFINE_SPINLOCK(pbd_delwrite_lock);
-
STATIC void
-pagebuf_delwri_queue(
- xfs_buf_t *pb,
+xfs_buf_delwri_queue(
+ xfs_buf_t *bp,
int unlock)
{
- PB_TRACE(pb, "delwri_q", (long)unlock);
- ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) ==
- (PBF_DELWRI|PBF_ASYNC));
+ struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
+ spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
+
+ XB_TRACE(bp, "delwri_q", (long)unlock);
+ ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
- spin_lock(&pbd_delwrite_lock);
+ spin_lock(dwlk);
/* If already in the queue, dequeue and place at tail */
- if (!list_empty(&pb->pb_list)) {
- ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
- if (unlock) {
- atomic_dec(&pb->pb_hold);
- }
- list_del(&pb->pb_list);
+ if (!list_empty(&bp->b_list)) {
+ ASSERT(bp->b_flags & _XBF_DELWRI_Q);
+ if (unlock)
+ atomic_dec(&bp->b_hold);
+ list_del(&bp->b_list);
}
- pb->pb_flags |= _PBF_DELWRI_Q;
- list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
- pb->pb_queuetime = jiffies;
- spin_unlock(&pbd_delwrite_lock);
+ bp->b_flags |= _XBF_DELWRI_Q;
+ list_add_tail(&bp->b_list, dwq);
+ bp->b_queuetime = jiffies;
+ spin_unlock(dwlk);
if (unlock)
- pagebuf_unlock(pb);
+ xfs_buf_unlock(bp);
}
void
-pagebuf_delwri_dequeue(
- xfs_buf_t *pb)
+xfs_buf_delwri_dequeue(
+ xfs_buf_t *bp)
{
+ spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
int dequeued = 0;
- spin_lock(&pbd_delwrite_lock);
- if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
- ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
- list_del_init(&pb->pb_list);
+ spin_lock(dwlk);
+ if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
+ ASSERT(bp->b_flags & _XBF_DELWRI_Q);
+ list_del_init(&bp->b_list);
dequeued = 1;
}
- pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
- spin_unlock(&pbd_delwrite_lock);
+ bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
+ spin_unlock(dwlk);
if (dequeued)
- pagebuf_rele(pb);
+ xfs_buf_rele(bp);
- PB_TRACE(pb, "delwri_dq", (long)dequeued);
+ XB_TRACE(bp, "delwri_dq", (long)dequeued);
}
STATIC void
-pagebuf_runall_queues(
+xfs_buf_runall_queues(
struct workqueue_struct *queue)
{
flush_workqueue(queue);
}
-/* Defines for pagebuf daemon */
-STATIC struct task_struct *xfsbufd_task;
-STATIC int xfsbufd_force_flush;
-STATIC int xfsbufd_force_sleep;
-
STATIC int
xfsbufd_wakeup(
int priority,
gfp_t mask)
{
- if (xfsbufd_force_sleep)
- return 0;
- xfsbufd_force_flush = 1;
- barrier();
- wake_up_process(xfsbufd_task);
+ xfs_buftarg_t *btp;
+
+ spin_lock(&xfs_buftarg_lock);
+ list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
+ if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
+ continue;
+ set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
+ wake_up_process(btp->bt_task);
+ }
+ spin_unlock(&xfs_buftarg_lock);
return 0;
}
@@ -1702,67 +1668,70 @@ xfsbufd(
{
struct list_head tmp;
unsigned long age;
- xfs_buftarg_t *target;
- xfs_buf_t *pb, *n;
+ xfs_buftarg_t *target = (xfs_buftarg_t *)data;
+ xfs_buf_t *bp, *n;
+ struct list_head *dwq = &target->bt_delwrite_queue;
+ spinlock_t *dwlk = &target->bt_delwrite_lock;
current->flags |= PF_MEMALLOC;
INIT_LIST_HEAD(&tmp);
do {
if (unlikely(freezing(current))) {
- xfsbufd_force_sleep = 1;
+ set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
refrigerator();
} else {
- xfsbufd_force_sleep = 0;
+ clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
}
schedule_timeout_interruptible(
xfs_buf_timer_centisecs * msecs_to_jiffies(10));
age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
- spin_lock(&pbd_delwrite_lock);
- list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
- PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
- ASSERT(pb->pb_flags & PBF_DELWRI);
-
- if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
- if (!xfsbufd_force_flush &&
+ spin_lock(dwlk);
+ list_for_each_entry_safe(bp, n, dwq, b_list) {
+ XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
+ ASSERT(bp->b_flags & XBF_DELWRI);
+
+ if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
+ if (!test_bit(XBT_FORCE_FLUSH,
+ &target->bt_flags) &&
time_before(jiffies,
- pb->pb_queuetime + age)) {
- pagebuf_unlock(pb);
+ bp->b_queuetime + age)) {
+ xfs_buf_unlock(bp);
break;
}
- pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
- pb->pb_flags |= PBF_WRITE;
- list_move(&pb->pb_list, &tmp);
+ bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
+ bp->b_flags |= XBF_WRITE;
+ list_move(&bp->b_list, &tmp);
}
}
- spin_unlock(&pbd_delwrite_lock);
+ spin_unlock(dwlk);
while (!list_empty(&tmp)) {
- pb = list_entry(tmp.next, xfs_buf_t, pb_list);
- target = pb->pb_target;
+ bp = list_entry(tmp.next, xfs_buf_t, b_list);
+ ASSERT(target == bp->b_target);
- list_del_init(&pb->pb_list);
- pagebuf_iostrategy(pb);
+ list_del_init(&bp->b_list);
+ xfs_buf_iostrategy(bp);
- blk_run_address_space(target->pbr_mapping);
+ blk_run_address_space(target->bt_mapping);
}
if (as_list_len > 0)
purge_addresses();
- xfsbufd_force_flush = 0;
+ clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
} while (!kthread_should_stop());
return 0;
}
/*
- * Go through all incore buffers, and release buffers if they belong to
- * the given device. This is used in filesystem error handling to
- * preserve the consistency of its metadata.
+ * Go through all incore buffers, and release buffers if they belong to
+ * the given device. This is used in filesystem error handling to
+ * preserve the consistency of its metadata.
*/
int
xfs_flush_buftarg(
@@ -1770,73 +1739,72 @@ xfs_flush_buftarg(
int wait)
{
struct list_head tmp;
- xfs_buf_t *pb, *n;
+ xfs_buf_t *bp, *n;
int pincount = 0;
+ struct list_head *dwq = &target->bt_delwrite_queue;
+ spinlock_t *dwlk = &target->bt_delwrite_lock;
- pagebuf_runall_queues(xfsdatad_workqueue);
- pagebuf_runall_queues(xfslogd_workqueue);
+ xfs_buf_runall_queues(xfsdatad_workqueue);
+ xfs_buf_runall_queues(xfslogd_workqueue);
INIT_LIST_HEAD(&tmp);
- spin_lock(&pbd_delwrite_lock);
- list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
-
- if (pb->pb_target != target)
- continue;
-
- ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
- PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
- if (pagebuf_ispin(pb)) {
+ spin_lock(dwlk);
+ list_for_each_entry_safe(bp, n, dwq, b_list) {
+ ASSERT(bp->b_target == target);
+ ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
+ XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
+ if (xfs_buf_ispin(bp)) {
pincount++;
continue;
}
- list_move(&pb->pb_list, &tmp);
+ list_move(&bp->b_list, &tmp);
}
- spin_unlock(&pbd_delwrite_lock);
+ spin_unlock(dwlk);
/*
* Dropped the delayed write list lock, now walk the temporary list
*/
- list_for_each_entry_safe(pb, n, &tmp, pb_list) {
- pagebuf_lock(pb);
- pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
- pb->pb_flags |= PBF_WRITE;
+ list_for_each_entry_safe(bp, n, &tmp, b_list) {
+ xfs_buf_lock(bp);
+ bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
+ bp->b_flags |= XBF_WRITE;
if (wait)
- pb->pb_flags &= ~PBF_ASYNC;
+ bp->b_flags &= ~XBF_ASYNC;
else
- list_del_init(&pb->pb_list);
+ list_del_init(&bp->b_list);
- pagebuf_iostrategy(pb);
+ xfs_buf_iostrategy(bp);
}
/*
* Remaining list items must be flushed before returning
*/
while (!list_empty(&tmp)) {
- pb = list_entry(tmp.next, xfs_buf_t, pb_list);
+ bp = list_entry(tmp.next, xfs_buf_t, b_list);
- list_del_init(&pb->pb_list);
- xfs_iowait(pb);
- xfs_buf_relse(pb);
+ list_del_init(&bp->b_list);
+ xfs_iowait(bp);
+ xfs_buf_relse(bp);
}
if (wait)
- blk_run_address_space(target->pbr_mapping);
+ blk_run_address_space(target->bt_mapping);
return pincount;
}
int __init
-pagebuf_init(void)
+xfs_buf_init(void)
{
int error = -ENOMEM;
-#ifdef PAGEBUF_TRACE
- pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
+#ifdef XFS_BUF_TRACE
+ xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
#endif
- pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
- if (!pagebuf_zone)
+ xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
+ if (!xfs_buf_zone)
goto out_free_trace_buf;
xfslogd_workqueue = create_workqueue("xfslogd");
@@ -1847,42 +1815,33 @@ pagebuf_init(void)
if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue;
- xfsbufd_task = kthread_run(xfsbufd, NULL, "xfsbufd");
- if (IS_ERR(xfsbufd_task)) {
- error = PTR_ERR(xfsbufd_task);
+ xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
+ if (!xfs_buf_shake)
goto out_destroy_xfsdatad_workqueue;
- }
-
- pagebuf_shake = kmem_shake_register(xfsbufd_wakeup);
- if (!pagebuf_shake)
- goto out_stop_xfsbufd;
return 0;
- out_stop_xfsbufd:
- kthread_stop(xfsbufd_task);
out_destroy_xfsdatad_workqueue:
destroy_workqueue(xfsdatad_workqueue);
out_destroy_xfslogd_workqueue:
destroy_workqueue(xfslogd_workqueue);
out_free_buf_zone:
- kmem_zone_destroy(pagebuf_zone);
+ kmem_zone_destroy(xfs_buf_zone);
out_free_trace_buf:
-#ifdef PAGEBUF_TRACE
- ktrace_free(pagebuf_trace_buf);
+#ifdef XFS_BUF_TRACE
+ ktrace_free(xfs_buf_trace_buf);
#endif
return error;
}
void
-pagebuf_terminate(void)
+xfs_buf_terminate(void)
{
- kmem_shake_deregister(pagebuf_shake);
- kthread_stop(xfsbufd_task);
+ kmem_shake_deregister(xfs_buf_shake);
destroy_workqueue(xfsdatad_workqueue);
destroy_workqueue(xfslogd_workqueue);
- kmem_zone_destroy(pagebuf_zone);
-#ifdef PAGEBUF_TRACE
- ktrace_free(pagebuf_trace_buf);
+ kmem_zone_destroy(xfs_buf_zone);
+#ifdef XFS_BUF_TRACE
+ ktrace_free(xfs_buf_trace_buf);
#endif
}
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 237a35b915d..4dd6592d5a4 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -32,44 +32,47 @@
* Base types
*/
-#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
-
-#define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
-#define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
-#define page_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
-#define page_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
-
-typedef enum page_buf_rw_e {
- PBRW_READ = 1, /* transfer into target memory */
- PBRW_WRITE = 2, /* transfer from target memory */
- PBRW_ZERO = 3 /* Zero target memory */
-} page_buf_rw_t;
-
-
-typedef enum page_buf_flags_e { /* pb_flags values */
- PBF_READ = (1 << 0), /* buffer intended for reading from device */
- PBF_WRITE = (1 << 1), /* buffer intended for writing to device */
- PBF_MAPPED = (1 << 2), /* buffer mapped (pb_addr valid) */
- PBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
- PBF_DONE = (1 << 5), /* all pages in the buffer uptodate */
- PBF_DELWRI = (1 << 6), /* buffer has dirty pages */
- PBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
- PBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
- PBF_ORDERED = (1 << 11), /* use ordered writes */
- PBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
+#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
+
+#define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
+#define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
+#define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
+#define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
+
+typedef enum {
+ XBRW_READ = 1, /* transfer into target memory */
+ XBRW_WRITE = 2, /* transfer from target memory */
+ XBRW_ZERO = 3, /* Zero target memory */
+} xfs_buf_rw_t;
+
+typedef enum {
+ XBF_READ = (1 << 0), /* buffer intended for reading from device */
+ XBF_WRITE = (1 << 1), /* buffer intended for writing to device */
+ XBF_MAPPED = (1 << 2), /* buffer mapped (b_addr valid) */
+ XBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
+ XBF_DONE = (1 << 5), /* all pages in the buffer uptodate */
+ XBF_DELWRI = (1 << 6), /* buffer has dirty pages */
+ XBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
+ XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
+ XBF_ORDERED = (1 << 11), /* use ordered writes */
+ XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
/* flags used only as arguments to access routines */
- PBF_LOCK = (1 << 14), /* lock requested */
- PBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
- PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
+ XBF_LOCK = (1 << 14), /* lock requested */
+ XBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
+ XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
/* flags used only internally */
- _PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
- _PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
- _PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
- _PBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
-} page_buf_flags_t;
+ _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
+ _XBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
+ _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
+ _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
+} xfs_buf_flags_t;
+typedef enum {
+ XBT_FORCE_SLEEP = (0 << 1),
+ XBT_FORCE_FLUSH = (1 << 1),
+} xfs_buftarg_flags_t;
typedef struct xfs_bufhash {
struct list_head bh_list;
@@ -77,477 +80,350 @@ typedef struct xfs_bufhash {
} xfs_bufhash_t;
typedef struct xfs_buftarg {
- dev_t pbr_dev;
- struct block_device *pbr_bdev;
- struct address_space *pbr_mapping;
- unsigned int pbr_bsize;
- unsigned int pbr_sshift;
- size_t pbr_smask;
-
- /* per-device buffer hash table */
+ dev_t bt_dev;
+ struct block_device *bt_bdev;
+ struct address_space *bt_mapping;
+ unsigned int bt_bsize;
+ unsigned int bt_sshift;
+ size_t bt_smask;
+
+ /* per device buffer hash table */
uint bt_hashmask;
uint bt_hashshift;
xfs_bufhash_t *bt_hash;
+
+ /* per device delwri queue */
+ struct task_struct *bt_task;
+ struct list_head bt_list;
+ struct list_head bt_delwrite_queue;
+ spinlock_t bt_delwrite_lock;
+ unsigned long bt_flags;
} xfs_buftarg_t;
/*
- * xfs_buf_t: Buffer structure for page cache-based buffers
+ * xfs_buf_t: Buffer structure for pagecache-based buffers
+ *
+ * This buffer structure is used by the pagecache buffer management routines
+ * to refer to an assembly of pages forming a logical buffer.
*
- * This buffer structure is used by the page cache buffer management routines
- * to refer to an assembly of pages forming a logical buffer. The actual I/O
- * is performed with buffer_head structures, as required by drivers.
- *
- * The buffer structure is used on temporary basis only, and discarded when
- * released. The real data storage is recorded in the page cache. Metadata is
+ * The buffer structure is used on a temporary basis only, and discarded when
+ * released. The real data storage is recorded in the pagecache. Buffers are
* hashed to the block device on which the file system resides.
*/
struct xfs_buf;
+typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
+typedef void (*xfs_buf_relse_t)(struct xfs_buf *);
+typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
-/* call-back function on I/O completion */
-typedef void (*page_buf_iodone_t)(struct xfs_buf *);
-/* call-back function on I/O completion */
-typedef void (*page_buf_relse_t)(struct xfs_buf *);
-/* pre-write function */
-typedef int (*page_buf_bdstrat_t)(struct xfs_buf *);
-
-#define PB_PAGES 2
+#define XB_PAGES 2
typedef struct xfs_buf {
- struct semaphore pb_sema; /* semaphore for lockables */
- unsigned long pb_queuetime; /* time buffer was queued */
- atomic_t pb_pin_count; /* pin count */
- wait_queue_head_t pb_waiters; /* unpin waiters */
- struct list_head pb_list;
- page_buf_flags_t pb_flags; /* status flags */
- struct list_head pb_hash_list; /* hash table list */
- xfs_bufhash_t *pb_hash; /* hash table list start */
- xfs_buftarg_t *pb_target; /* buffer target (device) */
- atomic_t pb_hold; /* reference count */
- xfs_daddr_t pb_bn; /* block number for I/O */
- loff_t pb_file_offset; /* offset in file */
- size_t pb_buffer_length; /* size of buffer in bytes */
- size_t pb_count_desired; /* desired transfer size */
- void *pb_addr; /* virtual address of buffer */
- struct work_struct pb_iodone_work;
- atomic_t pb_io_remaining;/* #outstanding I/O requests */
- page_buf_iodone_t pb_iodone; /* I/O completion function */
- page_buf_relse_t pb_relse; /* releasing function */
- page_buf_bdstrat_t pb_strat; /* pre-write function */
- struct semaphore pb_iodonesema; /* Semaphore for I/O waiters */
- void *pb_fspriv;
- void *pb_fspriv2;
- void *pb_fspriv3;
- unsigned short pb_error; /* error code on I/O */
- unsigned short pb_locked; /* page array is locked */
- unsigned int pb_page_count; /* size of page array */
- unsigned int pb_offset; /* page offset in first page */
- struct page **pb_pages; /* array of page pointers */
- struct page *pb_page_array[PB_PAGES]; /* inline pages */
-#ifdef PAGEBUF_LOCK_TRACKING
- int pb_last_holder;
+ struct semaphore b_sema; /* semaphore for lockables */
+ unsigned long b_queuetime; /* time buffer was queued */
+ atomic_t b_pin_count; /* pin count */
+ wait_queue_head_t b_waiters; /* unpin waiters */
+ struct list_head b_list;
+ xfs_buf_flags_t b_flags; /* status flags */
+ struct list_head b_hash_list; /* hash table list */
+ xfs_bufhash_t *b_hash; /* hash table list start */
+ xfs_buftarg_t *b_target; /* buffer target (device) */
+ atomic_t b_hold; /* reference count */
+ xfs_daddr_t b_bn; /* block number for I/O */
+ xfs_off_t b_file_offset; /* offset in file */
+ size_t b_buffer_length;/* size of buffer in bytes */
+ size_t b_count_desired;/* desired transfer size */
+ void *b_addr; /* virtual address of buffer */
+ struct work_struct b_iodone_work;
+ atomic_t b_io_remaining; /* #outstanding I/O requests */
+ xfs_buf_iodone_t b_iodone; /* I/O completion function */
+ xfs_buf_relse_t b_relse; /* releasing function */
+ xfs_buf_bdstrat_t b_strat; /* pre-write function */
+ struct semaphore b_iodonesema; /* Semaphore for I/O waiters */
+ void *b_fspriv;
+ void *b_fspriv2;
+ void *b_fspriv3;
+ unsigned short b_error; /* error code on I/O */
+ unsigned short b_locked; /* page array is locked */
+ unsigned int b_page_count; /* size of page array */
+ unsigned int b_offset; /* page offset in first page */
+ struct page **b_pages; /* array of page pointers */
+ struct page *b_page_array[XB_PAGES]; /* inline pages */
+#ifdef XFS_BUF_LOCK_TRACKING
+ int b_last_holder;
#endif
} xfs_buf_t;
/* Finding and Reading Buffers */
-
-extern xfs_buf_t *_pagebuf_find( /* find buffer for block if */
- /* the block is in memory */
- xfs_buftarg_t *, /* inode for block */
- loff_t, /* starting offset of range */
- size_t, /* length of range */
- page_buf_flags_t, /* PBF_LOCK */
- xfs_buf_t *); /* newly allocated buffer */
-
+extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
+ xfs_buf_flags_t, xfs_buf_t *);
#define xfs_incore(buftarg,blkno,len,lockit) \
- _pagebuf_find(buftarg, blkno ,len, lockit, NULL)
-
-extern xfs_buf_t *xfs_buf_get_flags( /* allocate a buffer */
- xfs_buftarg_t *, /* inode for buffer */
- loff_t, /* starting offset of range */
- size_t, /* length of range */
- page_buf_flags_t); /* PBF_LOCK, PBF_READ, */
- /* PBF_ASYNC */
+ _xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
+extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t,
+ xfs_buf_flags_t);
#define xfs_buf_get(target, blkno, len, flags) \
- xfs_buf_get_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
-
-extern xfs_buf_t *xfs_buf_read_flags( /* allocate and read a buffer */
- xfs_buftarg_t *, /* inode for buffer */
- loff_t, /* starting offset of range */
- size_t, /* length of range */
- page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC */
+ xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
+extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t,
+ xfs_buf_flags_t);
#define xfs_buf_read(target, blkno, len, flags) \
- xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
-
-extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */
- /* no memory or disk address */
- size_t len,
- xfs_buftarg_t *); /* mount point "fake" inode */
-
-extern xfs_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct */
- /* without disk address */
- size_t len,
- xfs_buftarg_t *); /* mount point "fake" inode */
-
-extern int pagebuf_associate_memory(
- xfs_buf_t *,
- void *,
- size_t);
-
-extern void pagebuf_hold( /* increment reference count */
- xfs_buf_t *); /* buffer to hold */
+ xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
-extern void pagebuf_readahead( /* read ahead into cache */
- xfs_buftarg_t *, /* target for buffer (or NULL) */
- loff_t, /* starting offset of range */
- size_t, /* length of range */
- page_buf_flags_t); /* additional read flags */
+extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
+extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
+extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
+extern void xfs_buf_hold(xfs_buf_t *);
+extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t,
+ xfs_buf_flags_t);
/* Releasing Buffers */
-
-extern void pagebuf_free( /* deallocate a buffer */
- xfs_buf_t *); /* buffer to deallocate */
-
-extern void pagebuf_rele( /* release hold on a buffer */
- xfs_buf_t *); /* buffer to release */
+extern void xfs_buf_free(xfs_buf_t *);
+extern void xfs_buf_rele(xfs_buf_t *);
/* Locking and Unlocking Buffers */
-
-extern int pagebuf_cond_lock( /* lock buffer, if not locked */
- /* (returns -EBUSY if locked) */
- xfs_buf_t *); /* buffer to lock */
-
-extern int pagebuf_lock_value( /* return count on lock */
- xfs_buf_t *); /* buffer to check */
-
-extern int pagebuf_lock( /* lock buffer */
- xfs_buf_t *); /* buffer to lock */
-
-extern void pagebuf_unlock( /* unlock buffer */
- xfs_buf_t *); /* buffer to unlock */
+extern int xfs_buf_cond_lock(xfs_buf_t *);
+extern int xfs_buf_lock_value(xfs_buf_t *);
+extern void xfs_buf_lock(xfs_buf_t *);
+extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */
-
-extern void pagebuf_iodone( /* mark buffer I/O complete */
- xfs_buf_t *, /* buffer to mark */
- int); /* run completion locally, or in
- * a helper thread. */
-
-extern void pagebuf_ioerror( /* mark buffer in error (or not) */
- xfs_buf_t *, /* buffer to mark */
- int); /* error to store (0 if none) */
-
-extern int pagebuf_iostart( /* start I/O on a buffer */
- xfs_buf_t *, /* buffer to start */
- page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC, */
- /* PBF_READ, PBF_WRITE, */
- /* PBF_DELWRI */
-
-extern int pagebuf_iorequest( /* start real I/O */
- xfs_buf_t *); /* buffer to convey to device */
-
-extern int pagebuf_iowait( /* wait for buffer I/O done */
- xfs_buf_t *); /* buffer to wait on */
-
-extern void pagebuf_iomove( /* move data in/out of pagebuf */
- xfs_buf_t *, /* buffer to manipulate */
- size_t, /* starting buffer offset */
- size_t, /* length in buffer */
- caddr_t, /* data pointer */
- page_buf_rw_t); /* direction */
-
-static inline int pagebuf_iostrategy(xfs_buf_t *pb)
+extern void xfs_buf_ioend(xfs_buf_t *, int);
+extern void xfs_buf_ioerror(xfs_buf_t *, int);
+extern int xfs_buf_iostart(xfs_buf_t *, xfs_buf_flags_t);
+extern int xfs_buf_iorequest(xfs_buf_t *);
+extern int xfs_buf_iowait(xfs_buf_t *);
+extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
+ xfs_buf_rw_t);
+
+static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
{
- return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb);
+ return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
}
-static inline int pagebuf_geterror(xfs_buf_t *pb)
+static inline int xfs_buf_geterror(xfs_buf_t *bp)
{
- return pb ? pb->pb_error : ENOMEM;
+ return bp ? bp->b_error : ENOMEM;
}
/* Buffer Utility Routines */
-
-extern caddr_t pagebuf_offset( /* pointer at offset in buffer */
- xfs_buf_t *, /* buffer to offset into */
- size_t); /* offset */
+extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
/* Pinning Buffer Storage in Memory */
-
-extern void pagebuf_pin( /* pin buffer in memory */
- xfs_buf_t *); /* buffer to pin */
-
-extern void pagebuf_unpin( /* unpin buffered data */
- xfs_buf_t *); /* buffer to unpin */
-
-extern int pagebuf_ispin( /* check if buffer is pinned */
- xfs_buf_t *); /* buffer to check */
+extern void xfs_buf_pin(xfs_buf_t *);
+extern void xfs_buf_unpin(xfs_buf_t *);
+extern int xfs_buf_ispin(xfs_buf_t *);
/* Delayed Write Buffer Routines */
-
-extern void pagebuf_delwri_dequeue(xfs_buf_t *);
+extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
/* Buffer Daemon Setup Routines */
+extern int xfs_buf_init(void);
+extern void xfs_buf_terminate(void);
-extern int pagebuf_init(void);
-extern void pagebuf_terminate(void);
-
-
-#ifdef PAGEBUF_TRACE
-extern ktrace_t *pagebuf_trace_buf;
-extern void pagebuf_trace(
- xfs_buf_t *, /* buffer being traced */
- char *, /* description of operation */
- void *, /* arbitrary diagnostic value */
- void *); /* return address */
+#ifdef XFS_BUF_TRACE
+extern ktrace_t *xfs_buf_trace_buf;
+extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
#else
-# define pagebuf_trace(pb, id, ptr, ra) do { } while (0)
+#define xfs_buf_trace(bp,id,ptr,ra) do { } while (0)
#endif
-#define pagebuf_target_name(target) \
- ({ char __b[BDEVNAME_SIZE]; bdevname((target)->pbr_bdev, __b); __b; })
+#define xfs_buf_target_name(target) \
+ ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
+#define XFS_B_ASYNC XBF_ASYNC
+#define XFS_B_DELWRI XBF_DELWRI
+#define XFS_B_READ XBF_READ
+#define XFS_B_WRITE XBF_WRITE
+#define XFS_B_STALE XBF_STALE
-/* These are just for xfs_syncsub... it sets an internal variable
- * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
- */
-#define XFS_B_ASYNC PBF_ASYNC
-#define XFS_B_DELWRI PBF_DELWRI
-#define XFS_B_READ PBF_READ
-#define XFS_B_WRITE PBF_WRITE
-#define XFS_B_STALE PBF_STALE
-
-#define XFS_BUF_TRYLOCK PBF_TRYLOCK
-#define XFS_INCORE_TRYLOCK PBF_TRYLOCK
-#define XFS_BUF_LOCK PBF_LOCK
-#define XFS_BUF_MAPPED PBF_MAPPED
-
-#define BUF_BUSY PBF_DONT_BLOCK
-
-#define XFS_BUF_BFLAGS(x) ((x)->pb_flags)
-#define XFS_BUF_ZEROFLAGS(x) \
- ((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI))
-
-#define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE)
-#define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE)
-#define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE)
-#define XFS_BUF_SUPER_STALE(x) do { \
- XFS_BUF_STALE(x); \
- pagebuf_delwri_dequeue(x); \
- XFS_BUF_DONE(x); \
- } while (0)
+#define XFS_BUF_TRYLOCK XBF_TRYLOCK
+#define XFS_INCORE_TRYLOCK XBF_TRYLOCK
+#define XFS_BUF_LOCK XBF_LOCK
+#define XFS_BUF_MAPPED XBF_MAPPED
-#define XFS_BUF_MANAGE PBF_FS_MANAGED
-#define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED)
-
-#define XFS_BUF_DELAYWRITE(x) ((x)->pb_flags |= PBF_DELWRI)
-#define XFS_BUF_UNDELAYWRITE(x) pagebuf_delwri_dequeue(x)
-#define XFS_BUF_ISDELAYWRITE(x) ((x)->pb_flags & PBF_DELWRI)
-
-#define XFS_BUF_ERROR(x,no) pagebuf_ioerror(x,no)
-#define XFS_BUF_GETERROR(x) pagebuf_geterror(x)
-#define XFS_BUF_ISERROR(x) (pagebuf_geterror(x)?1:0)
-
-#define XFS_BUF_DONE(x) ((x)->pb_flags |= PBF_DONE)
-#define XFS_BUF_UNDONE(x) ((x)->pb_flags &= ~PBF_DONE)
-#define XFS_BUF_ISDONE(x) ((x)->pb_flags & PBF_DONE)
-
-#define XFS_BUF_BUSY(x) do { } while (0)
-#define XFS_BUF_UNBUSY(x) do { } while (0)
-#define XFS_BUF_ISBUSY(x) (1)
-
-#define XFS_BUF_ASYNC(x) ((x)->pb_flags |= PBF_ASYNC)
-#define XFS_BUF_UNASYNC(x) ((x)->pb_flags &= ~PBF_ASYNC)
-#define XFS_BUF_ISASYNC(x) ((x)->pb_flags & PBF_ASYNC)
-
-#define XFS_BUF_ORDERED(x) ((x)->pb_flags |= PBF_ORDERED)
-#define XFS_BUF_UNORDERED(x) ((x)->pb_flags &= ~PBF_ORDERED)
-#define XFS_BUF_ISORDERED(x) ((x)->pb_flags & PBF_ORDERED)
-
-#define XFS_BUF_SHUT(x) printk("XFS_BUF_SHUT not implemented yet\n")
-#define XFS_BUF_UNSHUT(x) printk("XFS_BUF_UNSHUT not implemented yet\n")
-#define XFS_BUF_ISSHUT(x) (0)
-
-#define XFS_BUF_HOLD(x) pagebuf_hold(x)
-#define XFS_BUF_READ(x) ((x)->pb_flags |= PBF_READ)
-#define XFS_BUF_UNREAD(x) ((x)->pb_flags &= ~PBF_READ)
-#define XFS_BUF_ISREAD(x) ((x)->pb_flags & PBF_READ)
-
-#define XFS_BUF_WRITE(x) ((x)->pb_flags |= PBF_WRITE)
-#define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE)
-#define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE)
-
-#define XFS_BUF_ISUNINITIAL(x) (0)
-#define XFS_BUF_UNUNINITIAL(x) (0)
-
-#define XFS_BUF_BP_ISMAPPED(bp) 1
-
-#define XFS_BUF_IODONE_FUNC(buf) (buf)->pb_iodone
-#define XFS_BUF_SET_IODONE_FUNC(buf, func) \
- (buf)->pb_iodone = (func)
-#define XFS_BUF_CLR_IODONE_FUNC(buf) \
- (buf)->pb_iodone = NULL
-#define XFS_BUF_SET_BDSTRAT_FUNC(buf, func) \
- (buf)->pb_strat = (func)
-#define XFS_BUF_CLR_BDSTRAT_FUNC(buf) \
- (buf)->pb_strat = NULL
-
-#define XFS_BUF_FSPRIVATE(buf, type) \
- ((type)(buf)->pb_fspriv)
-#define XFS_BUF_SET_FSPRIVATE(buf, value) \
- (buf)->pb_fspriv = (void *)(value)
-#define XFS_BUF_FSPRIVATE2(buf, type) \
- ((type)(buf)->pb_fspriv2)
-#define XFS_BUF_SET_FSPRIVATE2(buf, value) \
- (buf)->pb_fspriv2 = (void *)(value)
-#define XFS_BUF_FSPRIVATE3(buf, type) \
- ((type)(buf)->pb_fspriv3)
-#define XFS_BUF_SET_FSPRIVATE3(buf, value) \
- (buf)->pb_fspriv3 = (void *)(value)
-#define XFS_BUF_SET_START(buf)
-
-#define XFS_BUF_SET_BRELSE_FUNC(buf, value) \
- (buf)->pb_relse = (value)
-
-#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr)
-
-static inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
-{
- if (bp->pb_flags & PBF_MAPPED)
- return XFS_BUF_PTR(bp) + offset;
- return (xfs_caddr_t) pagebuf_offset(bp, offset);
-}
+#define BUF_BUSY XBF_DONT_BLOCK
+
+#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
+#define XFS_BUF_ZEROFLAGS(bp) \
+ ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI))
+
+#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
+#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
+#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE)
+#define XFS_BUF_SUPER_STALE(bp) do { \
+ XFS_BUF_STALE(bp); \
+ xfs_buf_delwri_dequeue(bp); \
+ XFS_BUF_DONE(bp); \
+ } while (0)
-#define XFS_BUF_SET_PTR(bp, val, count) \
- pagebuf_associate_memory(bp, val, count)
-#define XFS_BUF_ADDR(bp) ((bp)->pb_bn)
-#define XFS_BUF_SET_ADDR(bp, blk) \
- ((bp)->pb_bn = (xfs_daddr_t)(blk))
-#define XFS_BUF_OFFSET(bp) ((bp)->pb_file_offset)
-#define XFS_BUF_SET_OFFSET(bp, off) \
- ((bp)->pb_file_offset = (off))
-#define XFS_BUF_COUNT(bp) ((bp)->pb_count_desired)
-#define XFS_BUF_SET_COUNT(bp, cnt) \
- ((bp)->pb_count_desired = (cnt))
-#define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length)
-#define XFS_BUF_SET_SIZE(bp, cnt) \
- ((bp)->pb_buffer_length = (cnt))
-#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
-#define XFS_BUF_SET_VTYPE(bp, type)
-#define XFS_BUF_SET_REF(bp, ref)
-
-#define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp)
-
-#define XFS_BUF_VALUSEMA(bp) pagebuf_lock_value(bp)
-#define XFS_BUF_CPSEMA(bp) (pagebuf_cond_lock(bp) == 0)
-#define XFS_BUF_VSEMA(bp) pagebuf_unlock(bp)
-#define XFS_BUF_PSEMA(bp,x) pagebuf_lock(bp)
-#define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema);
-
-/* setup the buffer target from a buftarg structure */
-#define XFS_BUF_SET_TARGET(bp, target) \
- (bp)->pb_target = (target)
-#define XFS_BUF_TARGET(bp) ((bp)->pb_target)
-#define XFS_BUFTARG_NAME(target) \
- pagebuf_target_name(target)
-
-#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
-#define XFS_BUF_SET_VTYPE(bp, type)
-#define XFS_BUF_SET_REF(bp, ref)
-
-static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
+#define XFS_BUF_MANAGE XBF_FS_MANAGED
+#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
+
+#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
+#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
+#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
+
+#define XFS_BUF_ERROR(bp,no) xfs_buf_ioerror(bp,no)
+#define XFS_BUF_GETERROR(bp) xfs_buf_geterror(bp)
+#define XFS_BUF_ISERROR(bp) (xfs_buf_geterror(bp) ? 1 : 0)
+
+#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
+#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
+#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
+
+#define XFS_BUF_BUSY(bp) do { } while (0)
+#define XFS_BUF_UNBUSY(bp) do { } while (0)
+#define XFS_BUF_ISBUSY(bp) (1)
+
+#define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC)
+#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
+#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
+
+#define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED)
+#define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED)
+#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED)
+
+#define XFS_BUF_SHUT(bp) do { } while (0)
+#define XFS_BUF_UNSHUT(bp) do { } while (0)
+#define XFS_BUF_ISSHUT(bp) (0)
+
+#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
+#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
+#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
+#define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ)
+
+#define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE)
+#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
+#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
+
+#define XFS_BUF_ISUNINITIAL(bp) (0)
+#define XFS_BUF_UNUNINITIAL(bp) (0)
+
+#define XFS_BUF_BP_ISMAPPED(bp) (1)
+
+#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
+#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
+#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
+#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func))
+#define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL)
+
+#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
+#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
+#define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2)
+#define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val))
+#define XFS_BUF_FSPRIVATE3(bp, type) ((type)(bp)->b_fspriv3)
+#define XFS_BUF_SET_FSPRIVATE3(bp, val) ((bp)->b_fspriv3 = (void*)(val))
+#define XFS_BUF_SET_START(bp) do { } while (0)
+#define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func))
+
+#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr)
+#define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt)
+#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
+#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
+#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset)
+#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off))
+#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired)
+#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt))
+#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)
+#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt))
+
+#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) do { } while (0)
+#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
+#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
+
+#define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp)
+
+#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
+#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
+#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
+#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
+#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema);
+
+#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
+#define XFS_BUF_TARGET(bp) ((bp)->b_target)
+#define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target)
+
+static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
{
- bp->pb_fspriv3 = mp;
- bp->pb_strat = xfs_bdstrat_cb;
- pagebuf_delwri_dequeue(bp);
- return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | _PBF_RUN_QUEUES);
+ bp->b_fspriv3 = mp;
+ bp->b_strat = xfs_bdstrat_cb;
+ xfs_buf_delwri_dequeue(bp);
+ return xfs_buf_iostart(bp, XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
}
-static inline void xfs_buf_relse(xfs_buf_t *bp)
+static inline void xfs_buf_relse(xfs_buf_t *bp)
{
- if (!bp->pb_relse)
- pagebuf_unlock(bp);
- pagebuf_rele(bp);
+ if (!bp->b_relse)
+ xfs_buf_unlock(bp);
+ xfs_buf_rele(bp);
}
-#define xfs_bpin(bp) pagebuf_pin(bp)
-#define xfs_bunpin(bp) pagebuf_unpin(bp)
+#define xfs_bpin(bp) xfs_buf_pin(bp)
+#define xfs_bunpin(bp) xfs_buf_unpin(bp)
#define xfs_buftrace(id, bp) \
- pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
+ xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
-#define xfs_biodone(pb) \
- pagebuf_iodone(pb, 0)
+#define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
-#define xfs_biomove(pb, off, len, data, rw) \
- pagebuf_iomove((pb), (off), (len), (data), \
- ((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ)
+#define xfs_biomove(bp, off, len, data, rw) \
+ xfs_buf_iomove((bp), (off), (len), (data), \
+ ((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
-#define xfs_biozero(pb, off, len) \
- pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO)
+#define xfs_biozero(bp, off, len) \
+ xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
-static inline int XFS_bwrite(xfs_buf_t *pb)
+static inline int XFS_bwrite(xfs_buf_t *bp)
{
- int iowait = (pb->pb_flags & PBF_ASYNC) == 0;
+ int iowait = (bp->b_flags & XBF_ASYNC) == 0;
int error = 0;
if (!iowait)
- pb->pb_flags |= _PBF_RUN_QUEUES;
+ bp->b_flags |= _XBF_RUN_QUEUES;
- pagebuf_delwri_dequeue(pb);
- pagebuf_iostrategy(pb);
+ xfs_buf_delwri_dequeue(bp);
+ xfs_buf_iostrategy(bp);
if (iowait) {
- error = pagebuf_iowait(pb);
- xfs_buf_relse(pb);
+ error = xfs_buf_iowait(bp);
+ xfs_buf_relse(bp);
}
return error;
}
-#define XFS_bdwrite(pb) \
- pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
+#define XFS_bdwrite(bp) xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC)
static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
{
- bp->pb_strat = xfs_bdstrat_cb;
- bp->pb_fspriv3 = mp;
-
- return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC);
+ bp->b_strat = xfs_bdstrat_cb;
+ bp->b_fspriv3 = mp;
+ return xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC);
}
-#define XFS_bdstrat(bp) pagebuf_iorequest(bp)
+#define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
-#define xfs_iowait(pb) pagebuf_iowait(pb)
+#define xfs_iowait(bp) xfs_buf_iowait(bp)
#define xfs_baread(target, rablkno, ralen) \
- pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK)
-
-#define xfs_buf_get_empty(len, target) pagebuf_get_empty((len), (target))
-#define xfs_buf_get_noaddr(len, target) pagebuf_get_no_daddr((len), (target))
-#define xfs_buf_free(bp) pagebuf_free(bp)
+ xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
/*
* Handling of buftargs.
*/
-
extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
extern void xfs_free_buftarg(xfs_buftarg_t *, int);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
-#define xfs_getsize_buftarg(buftarg) \
- block_size((buftarg)->pbr_bdev)
-#define xfs_readonly_buftarg(buftarg) \
- bdev_read_only((buftarg)->pbr_bdev)
-#define xfs_binval(buftarg) \
- xfs_flush_buftarg(buftarg, 1)
-#define XFS_bflush(buftarg) \
- xfs_flush_buftarg(buftarg, 1)
+#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
+#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
+
+#define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1)
+#define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1)
#endif /* __XFS_BUF_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_cred.h b/fs/xfs/linux-2.6/xfs_cred.h
index 4af49102472..e7f3da61c6c 100644
--- a/fs/xfs/linux-2.6/xfs_cred.h
+++ b/fs/xfs/linux-2.6/xfs_cred.h
@@ -18,6 +18,8 @@
#ifndef __XFS_CRED_H__
#define __XFS_CRED_H__
+#include <linux/capability.h>
+
/*
* Credentials
*/
@@ -27,7 +29,7 @@ typedef struct cred {
extern struct cred *sys_cred;
-/* this is a hack.. (assums sys_cred is the only cred_t in the system) */
+/* this is a hack.. (assumes sys_cred is the only cred_t in the system) */
static __inline int capable_cred(cred_t *cr, int cid)
{
return (cr == sys_cred) ? 1 : capable(cid);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 06111d0bbae..ced4404339c 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -509,16 +509,14 @@ linvfs_open_exec(
vnode_t *vp = LINVFS_GET_VP(inode);
xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
int error = 0;
- bhv_desc_t *bdp;
xfs_inode_t *ip;
if (vp->v_vfsp->vfs_flag & VFS_DMI) {
- bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
- if (!bdp) {
+ ip = xfs_vtoi(vp);
+ if (!ip) {
error = -EINVAL;
goto open_exec_out;
}
- ip = XFS_BHVTOI(bdp);
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)) {
error = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
0, 0, 0, NULL);
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index f89340c61bf..4fa4b1a5187 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -79,8 +79,7 @@ fs_flushinval_pages(
struct inode *ip = LINVFS_GET_IP(vp);
if (VN_CACHED(vp)) {
- filemap_fdatawrite(ip->i_mapping);
- filemap_fdatawait(ip->i_mapping);
+ filemap_write_and_wait(ip->i_mapping);
truncate_inode_pages(ip->i_mapping, first);
}
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index b78b5eb9e96..4db47790415 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -52,6 +52,7 @@
#include "xfs_dfrag.h"
#include "xfs_fsops.h"
+#include <linux/capability.h>
#include <linux/dcache.h>
#include <linux/mount.h>
#include <linux/namei.h>
@@ -145,13 +146,10 @@ xfs_find_handle(
if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
xfs_inode_t *ip;
- bhv_desc_t *bhv;
int lock_mode;
/* need to get access to the xfs_inode to read the generation */
- bhv = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops);
- ASSERT(bhv);
- ip = XFS_BHVTOI(bhv);
+ ip = xfs_vtoi(vp);
ASSERT(ip);
lock_mode = xfs_ilock_map_shared(ip);
@@ -530,6 +528,8 @@ xfs_attrmulti_attr_set(
char *kbuf;
int error = EFAULT;
+ if (IS_RDONLY(&vp->v_inode))
+ return -EROFS;
if (IS_IMMUTABLE(&vp->v_inode) || IS_APPEND(&vp->v_inode))
return EPERM;
if (len > XATTR_SIZE_MAX)
@@ -557,6 +557,9 @@ xfs_attrmulti_attr_remove(
{
int error;
+
+ if (IS_RDONLY(&vp->v_inode))
+ return -EROFS;
if (IS_IMMUTABLE(&vp->v_inode) || IS_APPEND(&vp->v_inode))
return EPERM;
@@ -745,9 +748,8 @@ xfs_ioctl(
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
- da.d_mem = da.d_miniosz = 1 << target->pbr_sshift;
- /* The size dio will do in one go */
- da.d_maxiosz = 64 * PAGE_CACHE_SIZE;
+ da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
+ da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
if (copy_to_user(arg, &da, sizeof(da)))
return -XFS_ERROR(EFAULT);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index c83ae15bb0e..a7c9ba1a9f7 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -19,7 +19,6 @@
#include <linux/compat.h>
#include <linux/init.h>
#include <linux/ioctl.h>
-#include <linux/ioctl32.h>
#include <linux/syscalls.h>
#include <linux/types.h>
#include <linux/fs.h>
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 14215a7db59..4bd3d03b23e 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -51,8 +51,47 @@
#include "xfs_buf_item.h"
#include "xfs_utils.h"
+#include <linux/capability.h>
#include <linux/xattr.h>
#include <linux/namei.h>
+#include <linux/security.h>
+
+#define IS_NOATIME(inode) ((inode->i_sb->s_flags & MS_NOATIME) || \
+ (S_ISDIR(inode->i_mode) && inode->i_sb->s_flags & MS_NODIRATIME))
+
+/*
+ * Get a XFS inode from a given vnode.
+ */
+xfs_inode_t *
+xfs_vtoi(
+ struct vnode *vp)
+{
+ bhv_desc_t *bdp;
+
+ bdp = bhv_lookup_range(VN_BHV_HEAD(vp),
+ VNODE_POSITION_XFS, VNODE_POSITION_XFS);
+ if (unlikely(bdp == NULL))
+ return NULL;
+ return XFS_BHVTOI(bdp);
+}
+
+/*
+ * Bring the atime in the XFS inode uptodate.
+ * Used before logging the inode to disk or when the Linux inode goes away.
+ */
+void
+xfs_synchronize_atime(
+ xfs_inode_t *ip)
+{
+ vnode_t *vp;
+
+ vp = XFS_ITOV_NULL(ip);
+ if (vp) {
+ struct inode *inode = &vp->v_inode;
+ ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
+ ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
+ }
+}
/*
* Change the requested timestamp in the given inode.
@@ -73,23 +112,6 @@ xfs_ichgtime(
struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
timespec_t tv;
- /*
- * We're not supposed to change timestamps in readonly-mounted
- * filesystems. Throw it away if anyone asks us.
- */
- if (unlikely(IS_RDONLY(inode)))
- return;
-
- /*
- * Don't update access timestamps on reads if mounted "noatime".
- * Throw it away if anyone asks us.
- */
- if (unlikely(
- (ip->i_mount->m_flags & XFS_MOUNT_NOATIME || IS_NOATIME(inode)) &&
- (flags & (XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD|XFS_ICHGTIME_CHG)) ==
- XFS_ICHGTIME_ACC))
- return;
-
nanotime(&tv);
if (flags & XFS_ICHGTIME_MOD) {
inode->i_mtime = tv;
@@ -126,8 +148,6 @@ xfs_ichgtime(
* Variant on the above which avoids querying the system clock
* in situations where we know the Linux inode timestamps have
* just been updated (and so we can update our inode cheaply).
- * We also skip the readonly and noatime checks here, they are
- * also catered for already.
*/
void
xfs_ichgtime_fast(
@@ -138,20 +158,16 @@ xfs_ichgtime_fast(
timespec_t *tvp;
/*
- * We're not supposed to change timestamps in readonly-mounted
- * filesystems. Throw it away if anyone asks us.
+ * Atime updates for read() & friends are handled lazily now, and
+ * explicit updates must go through xfs_ichgtime()
*/
- if (unlikely(IS_RDONLY(inode)))
- return;
+ ASSERT((flags & XFS_ICHGTIME_ACC) == 0);
/*
- * Don't update access timestamps on reads if mounted "noatime".
- * Throw it away if anyone asks us.
+ * We're not supposed to change timestamps in readonly-mounted
+ * filesystems. Throw it away if anyone asks us.
*/
- if (unlikely(
- (ip->i_mount->m_flags & XFS_MOUNT_NOATIME || IS_NOATIME(inode)) &&
- ((flags & (XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD|XFS_ICHGTIME_CHG)) ==
- XFS_ICHGTIME_ACC)))
+ if (unlikely(IS_RDONLY(inode)))
return;
if (flags & XFS_ICHGTIME_MOD) {
@@ -159,11 +175,6 @@ xfs_ichgtime_fast(
ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec;
ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec;
}
- if (flags & XFS_ICHGTIME_ACC) {
- tvp = &inode->i_atime;
- ip->i_d.di_atime.t_sec = (__int32_t)tvp->tv_sec;
- ip->i_d.di_atime.t_nsec = (__int32_t)tvp->tv_nsec;
- }
if (flags & XFS_ICHGTIME_CHG) {
tvp = &inode->i_ctime;
ip->i_d.di_ctime.t_sec = (__int32_t)tvp->tv_sec;
@@ -203,13 +214,46 @@ validate_fields(
ip->i_nlink = va.va_nlink;
ip->i_blocks = va.va_nblocks;
- /* we're under i_sem so i_size can't change under us */
+ /* we're under i_mutex so i_size can't change under us */
if (i_size_read(ip) != va.va_size)
i_size_write(ip, va.va_size);
}
}
/*
+ * Hook in SELinux. This is not quite correct yet, what we really need
+ * here (as we do for default ACLs) is a mechanism by which creation of
+ * these attrs can be journalled at inode creation time (along with the
+ * inode, of course, such that log replay can't cause these to be lost).
+ */
+STATIC int
+linvfs_init_security(
+ struct vnode *vp,
+ struct inode *dir)
+{
+ struct inode *ip = LINVFS_GET_IP(vp);
+ size_t length;
+ void *value;
+ char *name;
+ int error;
+
+ error = security_inode_init_security(ip, dir, &name, &value, &length);
+ if (error) {
+ if (error == -EOPNOTSUPP)
+ return 0;
+ return -error;
+ }
+
+ VOP_ATTR_SET(vp, name, value, length, ATTR_SECURE, NULL, error);
+ if (!error)
+ VMODIFY(vp);
+
+ kfree(name);
+ kfree(value);
+ return error;
+}
+
+/*
* Determine whether a process has a valid fs_struct (kernel daemons
* like knfsd don't have an fs_struct).
*
@@ -274,6 +318,9 @@ linvfs_mknod(
break;
}
+ if (!error)
+ error = linvfs_init_security(vp, dir);
+
if (default_acl) {
if (!error) {
error = _ACL_INHERIT(vp, &va, default_acl);
@@ -290,8 +337,6 @@ linvfs_mknod(
teardown.d_inode = ip = LINVFS_GET_IP(vp);
teardown.d_name = dentry->d_name;
- vn_mark_bad(vp);
-
if (S_ISDIR(mode))
VOP_RMDIR(dvp, &teardown, NULL, err2);
else
@@ -502,7 +547,7 @@ linvfs_follow_link(
ASSERT(dentry);
ASSERT(nd);
- link = (char *)kmalloc(MAXNAMELEN+1, GFP_KERNEL);
+ link = (char *)kmalloc(MAXPATHLEN+1, GFP_KERNEL);
if (!link) {
nd_set_link(nd, ERR_PTR(-ENOMEM));
return NULL;
@@ -518,12 +563,12 @@ linvfs_follow_link(
vp = LINVFS_GET_VP(dentry->d_inode);
iov.iov_base = link;
- iov.iov_len = MAXNAMELEN;
+ iov.iov_len = MAXPATHLEN;
uio->uio_iov = &iov;
uio->uio_offset = 0;
uio->uio_segflg = UIO_SYSSPACE;
- uio->uio_resid = MAXNAMELEN;
+ uio->uio_resid = MAXPATHLEN;
uio->uio_iovcnt = 1;
VOP_READLINK(vp, uio, 0, NULL, error);
@@ -531,7 +576,7 @@ linvfs_follow_link(
kfree(link);
link = ERR_PTR(-error);
} else {
- link[MAXNAMELEN - uio->uio_resid] = '\0';
+ link[MAXPATHLEN - uio->uio_resid] = '\0';
}
kfree(uio);
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h
index ee784b63acb..6899a6b4a50 100644
--- a/fs/xfs/linux-2.6/xfs_iops.h
+++ b/fs/xfs/linux-2.6/xfs_iops.h
@@ -26,11 +26,6 @@ extern struct file_operations linvfs_file_operations;
extern struct file_operations linvfs_invis_file_operations;
extern struct file_operations linvfs_dir_operations;
-extern struct address_space_operations linvfs_aops;
-
-extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
-extern void linvfs_unwritten_done(struct buffer_head *, int);
-
extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *,
int, unsigned int, void __user *);
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index d8e21ba0ccc..67389b74552 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -110,10 +110,6 @@
* delalloc and these ondisk-uninitialised buffers.
*/
BUFFER_FNS(PrivateStart, unwritten);
-static inline void set_buffer_unwritten_io(struct buffer_head *bh)
-{
- bh->b_end_io = linvfs_unwritten_done;
-}
#define restricted_chown xfs_params.restrict_chown.val
#define irix_sgid_inherit xfs_params.sgid_inherit.val
@@ -232,7 +228,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define xfs_itruncate_data(ip, off) \
(-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
#define xfs_statvfs_fsid(statp, mp) \
- ({ u64 id = huge_encode_dev((mp)->m_dev); \
+ ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \
__kernel_fsid_t *fsid = &(statp)->f_fsid; \
(fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); })
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 279e9bc92ab..e0ab45fbfeb 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -233,8 +233,8 @@ xfs_read(
xfs_buftarg_t *target =
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
- if ((*offset & target->pbr_smask) ||
- (size & target->pbr_smask)) {
+ if ((*offset & target->bt_smask) ||
+ (size & target->bt_smask)) {
if (*offset == ip->i_d.di_size) {
return (0);
}
@@ -254,7 +254,7 @@ xfs_read(
}
if (unlikely(ioflags & IO_ISDIRECT))
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
@@ -281,12 +281,9 @@ xfs_read(
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- if (likely(!(ioflags & IO_INVIS)))
- xfs_ichgtime_fast(ip, inode, XFS_ICHGTIME_ACC);
-
unlock_isem:
if (unlikely(ioflags & IO_ISDIRECT))
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return ret;
}
@@ -346,9 +343,6 @@ xfs_sendfile(
if (ret > 0)
XFS_STATS_ADD(xs_read_bytes, ret);
- if (likely(!(ioflags & IO_INVIS)))
- xfs_ichgtime_fast(ip, LINVFS_GET_IP(vp), XFS_ICHGTIME_ACC);
-
return ret;
}
@@ -362,7 +356,6 @@ STATIC int /* error (positive) */
xfs_zero_last_block(
struct inode *ip,
xfs_iocore_t *io,
- xfs_off_t offset,
xfs_fsize_t isize,
xfs_fsize_t end_size)
{
@@ -371,19 +364,16 @@ xfs_zero_last_block(
int nimaps;
int zero_offset;
int zero_len;
- int isize_fsb_offset;
int error = 0;
xfs_bmbt_irec_t imap;
loff_t loff;
- size_t lsize;
ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
- ASSERT(offset > isize);
mp = io->io_mount;
- isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize);
- if (isize_fsb_offset == 0) {
+ zero_offset = XFS_B_FSB_OFFSET(mp, isize);
+ if (zero_offset == 0) {
/*
* There are no extra bytes in the last block on disk to
* zero, so return.
@@ -413,10 +403,8 @@ xfs_zero_last_block(
*/
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
loff = XFS_FSB_TO_B(mp, last_fsb);
- lsize = XFS_FSB_TO_B(mp, 1);
- zero_offset = isize_fsb_offset;
- zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset;
+ zero_len = mp->m_sb.sb_blocksize - zero_offset;
error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
@@ -447,20 +435,17 @@ xfs_zero_eof(
struct inode *ip = LINVFS_GET_IP(vp);
xfs_fileoff_t start_zero_fsb;
xfs_fileoff_t end_zero_fsb;
- xfs_fileoff_t prev_zero_fsb;
xfs_fileoff_t zero_count_fsb;
xfs_fileoff_t last_fsb;
xfs_extlen_t buf_len_fsb;
- xfs_extlen_t prev_zero_count;
xfs_mount_t *mp;
int nimaps;
int error = 0;
xfs_bmbt_irec_t imap;
- loff_t loff;
- size_t lsize;
ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
+ ASSERT(offset > isize);
mp = io->io_mount;
@@ -468,7 +453,7 @@ xfs_zero_eof(
* First handle zeroing the block on which isize resides.
* We only zero a part of that block so it is handled specially.
*/
- error = xfs_zero_last_block(ip, io, offset, isize, end_size);
+ error = xfs_zero_last_block(ip, io, isize, end_size);
if (error) {
ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
@@ -496,8 +481,6 @@ xfs_zero_eof(
}
ASSERT(start_zero_fsb <= end_zero_fsb);
- prev_zero_fsb = NULLFILEOFF;
- prev_zero_count = 0;
while (start_zero_fsb <= end_zero_fsb) {
nimaps = 1;
zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
@@ -519,10 +502,7 @@ xfs_zero_eof(
* that sits on a hole and sets the page as P_HOLE
* and calls remapf if it is a mapped file.
*/
- prev_zero_fsb = NULLFILEOFF;
- prev_zero_count = 0;
- start_zero_fsb = imap.br_startoff +
- imap.br_blockcount;
+ start_zero_fsb = imap.br_startoff + imap.br_blockcount;
ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
continue;
}
@@ -543,17 +523,15 @@ xfs_zero_eof(
*/
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
- loff = XFS_FSB_TO_B(mp, start_zero_fsb);
- lsize = XFS_FSB_TO_B(mp, buf_len_fsb);
-
- error = xfs_iozero(ip, loff, lsize, end_size);
+ error = xfs_iozero(ip,
+ XFS_FSB_TO_B(mp, start_zero_fsb),
+ XFS_FSB_TO_B(mp, buf_len_fsb),
+ end_size);
if (error) {
goto out_lock;
}
- prev_zero_fsb = start_zero_fsb;
- prev_zero_count = buf_len_fsb;
start_zero_fsb = imap.br_startoff + buf_len_fsb;
ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
@@ -640,7 +618,7 @@ xfs_write(
(xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
- if ((pos & target->pbr_smask) || (count & target->pbr_smask))
+ if ((pos & target->bt_smask) || (count & target->bt_smask))
return XFS_ERROR(-EINVAL);
if (!VN_CACHED(vp) && pos < i_size_read(inode))
@@ -655,7 +633,7 @@ relock:
iolock = XFS_IOLOCK_EXCL;
locktype = VRWLOCK_WRITE;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
} else {
iolock = XFS_IOLOCK_SHARED;
locktype = VRWLOCK_WRITE_DIRECT;
@@ -686,7 +664,7 @@ start:
int dmflags = FILP_DELAY_FLAG(file);
if (need_isem)
- dmflags |= DM_FLAGS_ISEM;
+ dmflags |= DM_FLAGS_IMUX;
xfs_iunlock(xip, XFS_ILOCK_EXCL);
error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
@@ -713,7 +691,7 @@ start:
}
if (likely(!(ioflags & IO_INVIS))) {
- inode_update_time(inode, 1);
+ file_update_time(file);
xfs_ichgtime_fast(xip, inode,
XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
}
@@ -772,7 +750,7 @@ retry:
if (need_isem) {
/* demote the lock now the cached pages are gone */
XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
iolock = XFS_IOLOCK_SHARED;
locktype = VRWLOCK_WRITE_DIRECT;
@@ -817,20 +795,24 @@ retry:
xfs_rwunlock(bdp, locktype);
if (need_isem)
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
0, 0, 0); /* Delay flag intentionally unused */
if (error)
goto out_nounlocks;
if (need_isem)
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
xfs_rwlock(bdp, locktype);
pos = xip->i_d.di_size;
ret = 0;
goto retry;
}
+ isize = i_size_read(inode);
+ if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
+ *offset = isize;
+
if (*offset > xip->i_d.di_size) {
xfs_ilock(xip, XFS_ILOCK_EXCL);
if (*offset > xip->i_d.di_size) {
@@ -926,7 +908,7 @@ retry:
xfs_rwunlock(bdp, locktype);
if (need_isem)
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
error = sync_page_range(inode, mapping, pos, ret);
if (!error)
@@ -938,7 +920,7 @@ retry:
xfs_rwunlock(bdp, locktype);
out_unlock_isem:
if (need_isem)
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
out_nounlocks:
return -error;
}
@@ -956,7 +938,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
if (!XFS_FORCED_SHUTDOWN(mp)) {
- pagebuf_iorequest(bp);
+ xfs_buf_iorequest(bp);
return 0;
} else {
xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
@@ -1009,7 +991,7 @@ xfsbdstrat(
* if (XFS_BUF_IS_GRIO(bp)) {
*/
- pagebuf_iorequest(bp);
+ xfs_buf_iorequest(bp);
return 0;
}
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c
index 6c40a74be7c..8955720a2c6 100644
--- a/fs/xfs/linux-2.6/xfs_stats.c
+++ b/fs/xfs/linux-2.6/xfs_stats.c
@@ -34,7 +34,7 @@ xfs_read_xfsstats(
__uint64_t xs_write_bytes = 0;
__uint64_t xs_read_bytes = 0;
- static struct xstats_entry {
+ static const struct xstats_entry {
char *desc;
int endpoint;
} xstats[] = {
diff --git a/fs/xfs/linux-2.6/xfs_stats.h b/fs/xfs/linux-2.6/xfs_stats.h
index 50027c4a561..8ba7a2fa6c1 100644
--- a/fs/xfs/linux-2.6/xfs_stats.h
+++ b/fs/xfs/linux-2.6/xfs_stats.h
@@ -109,15 +109,15 @@ struct xfsstats {
__uint32_t vn_remove; /* # times vn_remove called */
__uint32_t vn_free; /* # times vn_free called */
#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9)
- __uint32_t pb_get;
- __uint32_t pb_create;
- __uint32_t pb_get_locked;
- __uint32_t pb_get_locked_waited;
- __uint32_t pb_busy_locked;
- __uint32_t pb_miss_locked;
- __uint32_t pb_page_retries;
- __uint32_t pb_page_found;
- __uint32_t pb_get_read;
+ __uint32_t xb_get;
+ __uint32_t xb_create;
+ __uint32_t xb_get_locked;
+ __uint32_t xb_get_locked_waited;
+ __uint32_t xb_busy_locked;
+ __uint32_t xb_miss_locked;
+ __uint32_t xb_page_retries;
+ __uint32_t xb_page_found;
+ __uint32_t xb_get_read;
/* Extra precision counters */
__uint64_t xs_xstrat_bytes;
__uint64_t xs_write_bytes;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 6116b5bf433..f22e426d9e4 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -306,13 +306,15 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
xfs_fs_cmn_err(CE_NOTE, mp,
"Disabling barriers, not supported with external log device");
mp->m_flags &= ~XFS_MOUNT_BARRIER;
+ return;
}
- if (mp->m_ddev_targp->pbr_bdev->bd_disk->queue->ordered ==
+ if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
QUEUE_ORDERED_NONE) {
xfs_fs_cmn_err(CE_NOTE, mp,
"Disabling barriers, not supported by the underlying device");
mp->m_flags &= ~XFS_MOUNT_BARRIER;
+ return;
}
error = xfs_barrier_test(mp);
@@ -320,6 +322,7 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
xfs_fs_cmn_err(CE_NOTE, mp,
"Disabling barriers, trial barrier write failed");
mp->m_flags &= ~XFS_MOUNT_BARRIER;
+ return;
}
}
@@ -327,7 +330,7 @@ void
xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg)
{
- blkdev_issue_flush(buftarg->pbr_bdev, NULL);
+ blkdev_issue_flush(buftarg->bt_bdev, NULL);
}
STATIC struct inode *
@@ -576,7 +579,7 @@ xfssyncd(
timeleft = schedule_timeout_interruptible(timeleft);
/* swsusp */
try_to_freeze();
- if (kthread_should_stop())
+ if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list))
break;
spin_lock(&vfsp->vfs_sync_lock);
@@ -966,9 +969,9 @@ init_xfs_fs( void )
if (error < 0)
goto undo_zones;
- error = pagebuf_init();
+ error = xfs_buf_init();
if (error < 0)
- goto undo_pagebuf;
+ goto undo_buffers;
vn_init();
xfs_init();
@@ -982,9 +985,9 @@ init_xfs_fs( void )
return 0;
undo_register:
- pagebuf_terminate();
+ xfs_buf_terminate();
-undo_pagebuf:
+undo_buffers:
linvfs_destroy_zones();
undo_zones:
@@ -998,7 +1001,7 @@ exit_xfs_fs( void )
XFS_DM_EXIT(&xfs_fs_type);
unregister_filesystem(&xfs_fs_type);
xfs_cleanup();
- pagebuf_terminate();
+ xfs_buf_terminate();
linvfs_destroy_zones();
ktrace_uninit();
}
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index e9bbcb4d624..260dd8415dd 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -106,7 +106,6 @@ vn_revalidate_core(
inode->i_blocks = vap->va_nblocks;
inode->i_mtime = vap->va_mtime;
inode->i_ctime = vap->va_ctime;
- inode->i_atime = vap->va_atime;
inode->i_blksize = vap->va_blocksize;
if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
inode->i_flags |= S_IMMUTABLE;
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index f2bbb327c08..0fe2419461d 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -566,6 +566,25 @@ static inline int VN_BAD(struct vnode *vp)
}
/*
+ * Extracting atime values in various formats
+ */
+static inline void vn_atime_to_bstime(struct vnode *vp, xfs_bstime_t *bs_atime)
+{
+ bs_atime->tv_sec = vp->v_inode.i_atime.tv_sec;
+ bs_atime->tv_nsec = vp->v_inode.i_atime.tv_nsec;
+}
+
+static inline void vn_atime_to_timespec(struct vnode *vp, struct timespec *ts)
+{
+ *ts = vp->v_inode.i_atime;
+}
+
+static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt)
+{
+ *tt = vp->v_inode.i_atime.tv_sec;
+}
+
+/*
* Some useful predicates.
*/
#define VN_MAPPED(vp) mapping_mapped(LINVFS_GET_IP(vp)->i_mapping)
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 00b5043dfa5..772ac48329e 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -104,7 +104,7 @@ xfs_qm_dqinit(
*/
if (brandnewdquot) {
dqp->dq_flnext = dqp->dq_flprev = dqp;
- mutex_init(&dqp->q_qlock, MUTEX_DEFAULT, "xdq");
+ mutex_init(&dqp->q_qlock);
initnsema(&dqp->q_flock, 1, "fdq");
sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq");
@@ -1382,7 +1382,7 @@ void
xfs_dqlock(
xfs_dquot_t *dqp)
{
- mutex_lock(&(dqp->q_qlock), PINOD);
+ mutex_lock(&(dqp->q_qlock));
}
void
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 2f69822344e..2ec6b441849 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -239,7 +239,7 @@ xfs_qm_dquot_logitem_pushbuf(
* trying to duplicate our effort.
*/
ASSERT(qip->qli_pushbuf_flag != 0);
- ASSERT(qip->qli_push_owner == get_thread_id());
+ ASSERT(qip->qli_push_owner == current_pid());
/*
* If flushlock isn't locked anymore, chances are that the
@@ -333,7 +333,7 @@ xfs_qm_dquot_logitem_trylock(
qip->qli_pushbuf_flag = 1;
ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
#ifdef DEBUG
- qip->qli_push_owner = get_thread_id();
+ qip->qli_push_owner = current_pid();
#endif
/*
* The dquot is left locked.
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 5328a293712..7dcdd0640c3 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -167,7 +167,7 @@ xfs_Gqm_init(void)
xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
xqm->qm_nrefs = 0;
#ifdef DEBUG
- mutex_init(&qcheck_lock, MUTEX_DEFAULT, "qchk");
+ xfs_mutex_init(&qcheck_lock, MUTEX_DEFAULT, "qchk");
#endif
return xqm;
}
@@ -1166,7 +1166,7 @@ xfs_qm_init_quotainfo(
qinf->qi_dqreclaims = 0;
/* mutex used to serialize quotaoffs */
- mutex_init(&qinf->qi_quotaofflock, MUTEX_DEFAULT, "qoff");
+ mutex_init(&qinf->qi_quotaofflock);
/* Precalc some constants */
qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
@@ -1285,7 +1285,7 @@ xfs_qm_list_init(
char *str,
int n)
{
- mutex_init(&list->qh_lock, MUTEX_DEFAULT, str);
+ mutex_init(&list->qh_lock);
list->qh_next = NULL;
list->qh_version = 0;
list->qh_nelems = 0;
@@ -1392,11 +1392,12 @@ xfs_qm_qino_alloc(
{
xfs_trans_t *tp;
int error;
- unsigned long s;
+ unsigned long s;
cred_t zerocr;
+ xfs_inode_t zeroino;
int committed;
- tp = xfs_trans_alloc(mp,XFS_TRANS_QM_QINOCREATE);
+ tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
if ((error = xfs_trans_reserve(tp,
XFS_QM_QINOCREATE_SPACE_RES(mp),
XFS_CREATE_LOG_RES(mp), 0,
@@ -1406,8 +1407,9 @@ xfs_qm_qino_alloc(
return (error);
}
memset(&zerocr, 0, sizeof(zerocr));
+ memset(&zeroino, 0, sizeof(zeroino));
- if ((error = xfs_dir_ialloc(&tp, mp->m_rootip, S_IFREG, 1, 0,
+ if ((error = xfs_dir_ialloc(&tp, &zeroino, S_IFREG, 1, 0,
&zerocr, 0, 1, ip, &committed))) {
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT);
@@ -1918,9 +1920,7 @@ xfs_qm_quotacheck(
* at this point (because we intentionally didn't in dqget_noattach).
*/
if (error) {
- xfs_qm_dqpurge_all(mp,
- XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA|
- XFS_QMOPT_PQUOTA|XFS_QMOPT_QUOTAOFF);
+ xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);
goto error_return;
}
/*
@@ -2743,6 +2743,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
xfs_dqunlock(udqp);
ASSERT(ip->i_udquot == NULL);
ip->i_udquot = udqp;
+ ASSERT(XFS_IS_UQUOTA_ON(tp->t_mountp));
ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
}
@@ -2752,7 +2753,10 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
xfs_dqunlock(gdqp);
ASSERT(ip->i_gdquot == NULL);
ip->i_gdquot = gdqp;
- ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
+ ASSERT(XFS_IS_OQUOTA_ON(tp->t_mountp));
+ ASSERT((XFS_IS_GQUOTA_ON(tp->t_mountp) ?
+ ip->i_d.di_gid : ip->i_d.di_projid) ==
+ be32_to_cpu(gdqp->q_core.d_id));
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
}
@@ -2762,7 +2766,7 @@ STATIC void
xfs_qm_freelist_init(xfs_frlist_t *ql)
{
ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
- mutex_init(&ql->qh_lock, MUTEX_DEFAULT, "dqf");
+ mutex_init(&ql->qh_lock);
ql->qh_version = 0;
ql->qh_nelems = 0;
}
@@ -2772,7 +2776,7 @@ xfs_qm_freelist_destroy(xfs_frlist_t *ql)
{
xfs_dquot_t *dqp, *nextdqp;
- mutex_lock(&ql->qh_lock, PINOD);
+ mutex_lock(&ql->qh_lock);
for (dqp = ql->qh_next;
dqp != (xfs_dquot_t *)ql; ) {
xfs_dqlock(dqp);
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index 12da259f2fc..4568deb6da8 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -165,7 +165,7 @@ typedef struct xfs_dquot_acct {
#define XFS_QM_IWARNLIMIT 5
#define XFS_QM_RTBWARNLIMIT 5
-#define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock, PINOD))
+#define XFS_QM_LOCK(xqm) (mutex_lock(&xqm##_lock))
#define XFS_QM_UNLOCK(xqm) (mutex_unlock(&xqm##_lock))
#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++)
#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--)
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index d9d2993de43..90402a1c398 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -363,7 +363,7 @@ xfs_qm_init(void)
KERN_INFO "SGI XFS Quota Management subsystem\n";
printk(message);
- mutex_init(&xfs_Gqm_lock, MUTEX_DEFAULT, "xfs_qmlock");
+ mutex_init(&xfs_Gqm_lock);
vfs_bhv_set_custom(&xfs_qmops, &xfs_qmcore_xfs);
xfs_qm_init_procfs();
}
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 24690e1af65..676884394aa 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -15,6 +15,9 @@
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+
+#include <linux/capability.h>
+
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_bit.h"
@@ -233,7 +236,7 @@ xfs_qm_scall_quotaoff(
*/
ASSERT(mp->m_quotainfo);
if (mp->m_quotainfo)
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);
+ mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
ASSERT(mp->m_quotainfo);
@@ -508,7 +511,7 @@ xfs_qm_scall_quotaon(
/*
* Switch on quota enforcement in core.
*/
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);
+ mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
@@ -617,7 +620,7 @@ xfs_qm_scall_setqlim(
* a quotaoff from happening). (XXXThis doesn't currently happen
* because we take the vfslock before calling xfs_qm_sysent).
*/
- mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);
+ mutex_lock(&(XFS_QI_QOFFLOCK(mp)));
/*
* Get the dquot (locked), and join it to the transaction.
@@ -1426,7 +1429,7 @@ xfs_qm_internalqcheck(
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
XFS_bflush(mp->m_ddev_targp);
- mutex_lock(&qcheck_lock, PINOD);
+ mutex_lock(&qcheck_lock);
/* There should be absolutely no quota activity while this
is going on. */
qmtest_udqtab = kmem_zalloc(qmtest_hashmask *
diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h
index 7a9f3beb818..b7ddd04aae3 100644
--- a/fs/xfs/quota/xfs_quota_priv.h
+++ b/fs/xfs/quota/xfs_quota_priv.h
@@ -51,7 +51,7 @@
#define XFS_QI_MPLNEXT(mp) ((mp)->m_quotainfo->qi_dqlist.qh_next)
#define XFS_QI_MPLNDQUOTS(mp) ((mp)->m_quotainfo->qi_dqlist.qh_nelems)
-#define XQMLCK(h) (mutex_lock(&((h)->qh_lock), PINOD))
+#define XQMLCK(h) (mutex_lock(&((h)->qh_lock)))
#define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock)))
#ifdef DEBUG
struct xfs_dqhash;
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index bb6dc91ea26..b08b3d9345b 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -27,45 +27,12 @@ static DEFINE_SPINLOCK(xfs_err_lock);
/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
#define XFS_MAX_ERR_LEVEL 7
#define XFS_ERR_MASK ((1 << 3) - 1)
-static char *err_level[XFS_MAX_ERR_LEVEL+1] =
+static const char * const err_level[XFS_MAX_ERR_LEVEL+1] =
{KERN_EMERG, KERN_ALERT, KERN_CRIT,
KERN_ERR, KERN_WARNING, KERN_NOTICE,
KERN_INFO, KERN_DEBUG};
void
-assfail(char *a, char *f, int l)
-{
- printk("XFS assertion failed: %s, file: %s, line: %d\n", a, f, l);
- BUG();
-}
-
-#if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM))
-
-unsigned long
-random(void)
-{
- static unsigned long RandomValue = 1;
- /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
- register long rv = RandomValue;
- register long lo;
- register long hi;
-
- hi = rv / 127773;
- lo = rv % 127773;
- rv = 16807 * lo - 2836 * hi;
- if( rv <= 0 ) rv += 2147483647;
- return( RandomValue = rv );
-}
-
-int
-get_thread_id(void)
-{
- return current->pid;
-}
-
-#endif /* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */
-
-void
cmn_err(register int level, char *fmt, ...)
{
char *fp = fmt;
@@ -90,7 +57,6 @@ cmn_err(register int level, char *fmt, ...)
BUG();
}
-
void
icmn_err(register int level, char *fmt, va_list ap)
{
@@ -109,3 +75,27 @@ icmn_err(register int level, char *fmt, va_list ap)
if (level == CE_PANIC)
BUG();
}
+
+void
+assfail(char *expr, char *file, int line)
+{
+ printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line);
+ BUG();
+}
+
+#if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM))
+unsigned long random(void)
+{
+ static unsigned long RandomValue = 1;
+ /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
+ register long rv = RandomValue;
+ register long lo;
+ register long hi;
+
+ hi = rv / 127773;
+ lo = rv % 127773;
+ rv = 16807 * lo - 2836 * hi;
+ if (rv <= 0) rv += 2147483647;
+ return RandomValue = rv;
+}
+#endif /* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */
diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h
index aff558664c3..e3bf58112e7 100644
--- a/fs/xfs/support/debug.h
+++ b/fs/xfs/support/debug.h
@@ -31,24 +31,23 @@ extern void icmn_err(int, char *, va_list)
__attribute__ ((format (printf, 2, 0)));
extern void cmn_err(int, char *, ...)
__attribute__ ((format (printf, 2, 3)));
+extern void assfail(char *expr, char *f, int l);
-#ifndef STATIC
-# define STATIC static
-#endif
+#define prdev(fmt,targ,args...) \
+ printk("Device %s - " fmt "\n", XFS_BUFTARG_NAME(targ), ## args)
-#ifdef DEBUG
-# define ASSERT(EX) ((EX) ? ((void)0) : assfail(#EX, __FILE__, __LINE__))
-#else
-# define ASSERT(x) ((void)0)
-#endif
+#define ASSERT_ALWAYS(expr) \
+ (unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
-extern void assfail(char *, char *, int);
-#ifdef DEBUG
+#ifndef DEBUG
+# define ASSERT(expr) ((void)0)
+#else
+# define ASSERT(expr) ASSERT_ALWAYS(expr)
extern unsigned long random(void);
-extern int get_thread_id(void);
#endif
-#define ASSERT_ALWAYS(EX) ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__))
-#define debug_stop_all_cpus(param) /* param is "cpumask_t *" */
+#ifndef STATIC
+# define STATIC static
+#endif
#endif /* __XFS_SUPPORT_DEBUG_H__ */
diff --git a/fs/xfs/support/uuid.c b/fs/xfs/support/uuid.c
index 70ce40914c8..a3d565a6773 100644
--- a/fs/xfs/support/uuid.c
+++ b/fs/xfs/support/uuid.c
@@ -24,9 +24,19 @@ static uuid_t *uuid_table;
void
uuid_init(void)
{
- mutex_init(&uuid_monitor, MUTEX_DEFAULT, "uuid_monitor");
+ mutex_init(&uuid_monitor);
}
+
+/* IRIX interpretation of an uuid_t */
+typedef struct {
+ __be32 uu_timelow;
+ __be16 uu_timemid;
+ __be16 uu_timehi;
+ __be16 uu_clockseq;
+ __be16 uu_node[3];
+} xfs_uu_t;
+
/*
* uuid_getnodeuniq - obtain the node unique fields of a UUID.
*
@@ -36,16 +46,11 @@ uuid_init(void)
void
uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
{
- char *uu = (char *)uuid;
-
- /* on IRIX, this function assumes big-endian fields within
- * the uuid, so we use INT_GET to get the same result on
- * little-endian systems
- */
+ xfs_uu_t *uup = (xfs_uu_t *)uuid;
- fsid[0] = (INT_GET(*(u_int16_t*)(uu+8), ARCH_CONVERT) << 16) +
- INT_GET(*(u_int16_t*)(uu+4), ARCH_CONVERT);
- fsid[1] = INT_GET(*(u_int32_t*)(uu ), ARCH_CONVERT);
+ fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) |
+ be16_to_cpu(uup->uu_timemid);
+ fsid[1] = be16_to_cpu(uup->uu_timelow);
}
void
@@ -94,7 +99,7 @@ uuid_table_insert(uuid_t *uuid)
{
int i, hole;
- mutex_lock(&uuid_monitor, PVFS);
+ mutex_lock(&uuid_monitor);
for (i = 0, hole = -1; i < uuid_table_size; i++) {
if (uuid_is_nil(&uuid_table[i])) {
hole = i;
@@ -122,7 +127,7 @@ uuid_table_remove(uuid_t *uuid)
{
int i;
- mutex_lock(&uuid_monitor, PVFS);
+ mutex_lock(&uuid_monitor);
for (i = 0; i < uuid_table_size; i++) {
if (uuid_is_nil(&uuid_table[i]))
continue;
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index cc9c91b9e77..4ff0f4e41c6 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -36,6 +36,7 @@
#include "xfs_mac.h"
#include "xfs_attr.h"
+#include <linux/capability.h>
#include <linux/posix_acl_xattr.h>
STATIC int xfs_acl_setmode(vnode_t *, xfs_acl_t *, int *);
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
index 68e5051d8e2..c4836890b72 100644
--- a/fs/xfs/xfs_arch.h
+++ b/fs/xfs/xfs_arch.h
@@ -40,6 +40,22 @@
#undef XFS_NATIVE_HOST
#endif
+#ifdef XFS_NATIVE_HOST
+#define cpu_to_be16(val) ((__be16)(val))
+#define cpu_to_be32(val) ((__be32)(val))
+#define cpu_to_be64(val) ((__be64)(val))
+#define be16_to_cpu(val) ((__uint16_t)(val))
+#define be32_to_cpu(val) ((__uint32_t)(val))
+#define be64_to_cpu(val) ((__uint64_t)(val))
+#else
+#define cpu_to_be16(val) (__swab16((__uint16_t)(val)))
+#define cpu_to_be32(val) (__swab32((__uint32_t)(val)))
+#define cpu_to_be64(val) (__swab64((__uint64_t)(val)))
+#define be16_to_cpu(val) (__swab16((__be16)(val)))
+#define be32_to_cpu(val) (__swab32((__be32)(val)))
+#define be64_to_cpu(val) (__swab64((__be64)(val)))
+#endif
+
#endif /* __KERNEL__ */
/* do we need conversion? */
@@ -186,7 +202,7 @@ static inline void be64_add(__be64 *a, __s64 b)
*/
#define XFS_GET_DIR_INO4(di) \
- (((u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
+ (((__u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
#define XFS_PUT_DIR_INO4(from, di) \
do { \
@@ -197,9 +213,9 @@ do { \
} while (0)
#define XFS_DI_HI(di) \
- (((u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
+ (((__u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
#define XFS_DI_LO(di) \
- (((u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7]))
+ (((__u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7]))
#define XFS_GET_DIR_INO8(di) \
(((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 5484eeb460c..e5e91e9c7e8 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -15,6 +15,9 @@
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+
+#include <linux/capability.h>
+
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
@@ -117,11 +120,6 @@ xfs_attr_fetch(xfs_inode_t *ip, const char *name, int namelen,
ip->i_d.di_anextents == 0))
return(ENOATTR);
- if (!(flags & (ATTR_KERNACCESS|ATTR_SECURE))) {
- if ((error = xfs_iaccess(ip, S_IRUSR, cred)))
- return(XFS_ERROR(error));
- }
-
/*
* Fill in the arg structure for this request.
*/
@@ -425,7 +423,7 @@ xfs_attr_set(bhv_desc_t *bdp, const char *name, char *value, int valuelen, int f
struct cred *cred)
{
xfs_inode_t *dp;
- int namelen, error;
+ int namelen;
namelen = strlen(name);
if (namelen >= MAXNAMELEN)
@@ -437,14 +435,6 @@ xfs_attr_set(bhv_desc_t *bdp, const char *name, char *value, int valuelen, int f
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return (EIO);
- xfs_ilock(dp, XFS_ILOCK_SHARED);
- if (!(flags & ATTR_SECURE) &&
- (error = xfs_iaccess(dp, S_IWUSR, cred))) {
- xfs_iunlock(dp, XFS_ILOCK_SHARED);
- return(XFS_ERROR(error));
- }
- xfs_iunlock(dp, XFS_ILOCK_SHARED);
-
return xfs_attr_set_int(dp, name, namelen, value, valuelen, flags);
}
@@ -579,7 +569,7 @@ int
xfs_attr_remove(bhv_desc_t *bdp, const char *name, int flags, struct cred *cred)
{
xfs_inode_t *dp;
- int namelen, error;
+ int namelen;
namelen = strlen(name);
if (namelen >= MAXNAMELEN)
@@ -592,11 +582,7 @@ xfs_attr_remove(bhv_desc_t *bdp, const char *name, int flags, struct cred *cred)
return (EIO);
xfs_ilock(dp, XFS_ILOCK_SHARED);
- if (!(flags & ATTR_SECURE) &&
- (error = xfs_iaccess(dp, S_IWUSR, cred))) {
- xfs_iunlock(dp, XFS_ILOCK_SHARED);
- return(XFS_ERROR(error));
- } else if (XFS_IFORK_Q(dp) == 0 ||
+ if (XFS_IFORK_Q(dp) == 0 ||
(dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
dp->i_d.di_anextents == 0)) {
xfs_iunlock(dp, XFS_ILOCK_SHARED);
@@ -668,12 +654,6 @@ xfs_attr_list(bhv_desc_t *bdp, char *buffer, int bufsize, int flags,
return (EIO);
xfs_ilock(dp, XFS_ILOCK_SHARED);
- if (!(flags & ATTR_SECURE) &&
- (error = xfs_iaccess(dp, S_IRUSR, cred))) {
- xfs_iunlock(dp, XFS_ILOCK_SHARED);
- return(XFS_ERROR(error));
- }
-
/*
* Decide on what work routines to call based on the inode size.
*/
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 1c7421840c1..fe91eac4e2a 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -128,7 +128,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
return (offset >= minforkoff) ? minforkoff : 0;
}
- if (unlikely(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) {
+ if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
if (bytes <= XFS_IFORK_ASIZE(dp))
return mp->m_attroffset >> 3;
return 0;
@@ -157,7 +157,7 @@ xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
{
unsigned long s;
- if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR) &&
+ if ((mp->m_flags & XFS_MOUNT_ATTR2) &&
!(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) {
s = XFS_SB_LOCK(mp);
if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) {
@@ -311,7 +311,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
*/
totsize -= size;
if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname &&
- !(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) {
+ (mp->m_flags & XFS_MOUNT_ATTR2)) {
/*
* Last attribute now removed, revert to original
* inode format making all literal area available
@@ -330,7 +330,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
ASSERT(dp->i_d.di_forkoff);
ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname ||
- (mp->m_flags & XFS_MOUNT_COMPAT_ATTR));
+ !(mp->m_flags & XFS_MOUNT_ATTR2));
dp->i_afp->if_ext_max =
XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
dp->i_df.if_ext_max =
@@ -739,7 +739,7 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
+ name_loc->namelen
+ INT_GET(name_loc->valuelen, ARCH_CONVERT);
}
- if (!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR) &&
+ if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
(bytes == sizeof(struct xfs_attr_sf_hdr)))
return(-1);
return(xfs_attr_shortform_bytesfit(dp, bytes));
@@ -778,7 +778,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
goto out;
if (forkoff == -1) {
- ASSERT(!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR));
+ ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
/*
* Last attribute was removed, revert to original
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index f6143ff251a..541e34109bb 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -63,7 +63,7 @@ struct xfs_trans;
* the leaf_entry. The namespaces are independent only because we also look
* at the namespace bit when we are looking for a matching attribute name.
*
- * We also store a "incomplete" bit in the leaf_entry. It shows that an
+ * We also store an "incomplete" bit in the leaf_entry. It shows that an
* attribute is in the middle of being created and should not be shown to
* the user if we crash during the time that the bit is set. We clear the
* bit when we have finished setting up the attribute. We do this because
@@ -72,42 +72,48 @@ struct xfs_trans;
*/
#define XFS_ATTR_LEAF_MAPSIZE 3 /* how many freespace slots */
+typedef struct xfs_attr_leaf_map { /* RLE map of free bytes */
+ __uint16_t base; /* base of free region */
+ __uint16_t size; /* length of free region */
+} xfs_attr_leaf_map_t;
+
+typedef struct xfs_attr_leaf_hdr { /* constant-structure header block */
+ xfs_da_blkinfo_t info; /* block type, links, etc. */
+ __uint16_t count; /* count of active leaf_entry's */
+ __uint16_t usedbytes; /* num bytes of names/values stored */
+ __uint16_t firstused; /* first used byte in name area */
+ __uint8_t holes; /* != 0 if blk needs compaction */
+ __uint8_t pad1;
+ xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
+ /* N largest free regions */
+} xfs_attr_leaf_hdr_t;
+
+typedef struct xfs_attr_leaf_entry { /* sorted on key, not name */
+ xfs_dahash_t hashval; /* hash value of name */
+ __uint16_t nameidx; /* index into buffer of name/value */
+ __uint8_t flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
+ __uint8_t pad2; /* unused pad byte */
+} xfs_attr_leaf_entry_t;
+
+typedef struct xfs_attr_leaf_name_local {
+ __uint16_t valuelen; /* number of bytes in value */
+ __uint8_t namelen; /* length of name bytes */
+ __uint8_t nameval[1]; /* name/value bytes */
+} xfs_attr_leaf_name_local_t;
+
+typedef struct xfs_attr_leaf_name_remote {
+ xfs_dablk_t valueblk; /* block number of value bytes */
+ __uint32_t valuelen; /* number of bytes in value */
+ __uint8_t namelen; /* length of name bytes */
+ __uint8_t name[1]; /* name bytes */
+} xfs_attr_leaf_name_remote_t;
+
typedef struct xfs_attr_leafblock {
- struct xfs_attr_leaf_hdr { /* constant-structure header block */
- xfs_da_blkinfo_t info; /* block type, links, etc. */
- __uint16_t count; /* count of active leaf_entry's */
- __uint16_t usedbytes; /* num bytes of names/values stored */
- __uint16_t firstused; /* first used byte in name area */
- __uint8_t holes; /* != 0 if blk needs compaction */
- __uint8_t pad1;
- struct xfs_attr_leaf_map { /* RLE map of free bytes */
- __uint16_t base; /* base of free region */
- __uint16_t size; /* length of free region */
- } freemap[XFS_ATTR_LEAF_MAPSIZE]; /* N largest free regions */
- } hdr;
- struct xfs_attr_leaf_entry { /* sorted on key, not name */
- xfs_dahash_t hashval; /* hash value of name */
- __uint16_t nameidx; /* index into buffer of name/value */
- __uint8_t flags; /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
- __uint8_t pad2; /* unused pad byte */
- } entries[1]; /* variable sized array */
- struct xfs_attr_leaf_name_local {
- __uint16_t valuelen; /* number of bytes in value */
- __uint8_t namelen; /* length of name bytes */
- __uint8_t nameval[1]; /* name/value bytes */
- } namelist; /* grows from bottom of buf */
- struct xfs_attr_leaf_name_remote {
- xfs_dablk_t valueblk; /* block number of value bytes */
- __uint32_t valuelen; /* number of bytes in value */
- __uint8_t namelen; /* length of name bytes */
- __uint8_t name[1]; /* name bytes */
- } valuelist; /* grows from bottom of buf */
+ xfs_attr_leaf_hdr_t hdr; /* constant-structure header block */
+ xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */
+ xfs_attr_leaf_name_local_t namelist; /* grows from bottom of buf */
+ xfs_attr_leaf_name_remote_t valuelist; /* grows from bottom of buf */
} xfs_attr_leafblock_t;
-typedef struct xfs_attr_leaf_hdr xfs_attr_leaf_hdr_t;
-typedef struct xfs_attr_leaf_map xfs_attr_leaf_map_t;
-typedef struct xfs_attr_leaf_entry xfs_attr_leaf_entry_t;
-typedef struct xfs_attr_leaf_name_local xfs_attr_leaf_name_local_t;
-typedef struct xfs_attr_leaf_name_remote xfs_attr_leaf_name_remote_t;
/*
* Flags used in the leaf_entry[i].flags field.
@@ -150,7 +156,8 @@ xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
(leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)];
}
-#define XFS_ATTR_LEAF_NAME(leafp,idx) xfs_attr_leaf_name(leafp,idx)
+#define XFS_ATTR_LEAF_NAME(leafp,idx) \
+ xfs_attr_leaf_name(leafp,idx)
static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
{
return (&((char *)
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index e415a4698e9..70625e577c7 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2146,13 +2146,176 @@ xfs_bmap_add_extent_hole_real(
return 0; /* keep gcc quite */
}
+/*
+ * Adjust the size of the new extent based on di_extsize and rt extsize.
+ */
+STATIC int
+xfs_bmap_extsize_align(
+ xfs_mount_t *mp,
+ xfs_bmbt_irec_t *gotp, /* next extent pointer */
+ xfs_bmbt_irec_t *prevp, /* previous extent pointer */
+ xfs_extlen_t extsz, /* align to this extent size */
+ int rt, /* is this a realtime inode? */
+ int eof, /* is extent at end-of-file? */
+ int delay, /* creating delalloc extent? */
+ int convert, /* overwriting unwritten extent? */
+ xfs_fileoff_t *offp, /* in/out: aligned offset */
+ xfs_extlen_t *lenp) /* in/out: aligned length */
+{
+ xfs_fileoff_t orig_off; /* original offset */
+ xfs_extlen_t orig_alen; /* original length */
+ xfs_fileoff_t orig_end; /* original off+len */
+ xfs_fileoff_t nexto; /* next file offset */
+ xfs_fileoff_t prevo; /* previous file offset */
+ xfs_fileoff_t align_off; /* temp for offset */
+ xfs_extlen_t align_alen; /* temp for length */
+ xfs_extlen_t temp; /* temp for calculations */
+
+ if (convert)
+ return 0;
+
+ orig_off = align_off = *offp;
+ orig_alen = align_alen = *lenp;
+ orig_end = orig_off + orig_alen;
+
+ /*
+ * If this request overlaps an existing extent, then don't
+ * attempt to perform any additional alignment.
+ */
+ if (!delay && !eof &&
+ (orig_off >= gotp->br_startoff) &&
+ (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
+ return 0;
+ }
+
+ /*
+ * If the file offset is unaligned vs. the extent size
+ * we need to align it. This will be possible unless
+ * the file was previously written with a kernel that didn't
+ * perform this alignment, or if a truncate shot us in the
+ * foot.
+ */
+ temp = do_mod(orig_off, extsz);
+ if (temp) {
+ align_alen += temp;
+ align_off -= temp;
+ }
+ /*
+ * Same adjustment for the end of the requested area.
+ */
+ if ((temp = (align_alen % extsz))) {
+ align_alen += extsz - temp;
+ }
+ /*
+ * If the previous block overlaps with this proposed allocation
+ * then move the start forward without adjusting the length.
+ */
+ if (prevp->br_startoff != NULLFILEOFF) {
+ if (prevp->br_startblock == HOLESTARTBLOCK)
+ prevo = prevp->br_startoff;
+ else
+ prevo = prevp->br_startoff + prevp->br_blockcount;
+ } else
+ prevo = 0;
+ if (align_off != orig_off && align_off < prevo)
+ align_off = prevo;
+ /*
+ * If the next block overlaps with this proposed allocation
+ * then move the start back without adjusting the length,
+ * but not before offset 0.
+ * This may of course make the start overlap previous block,
+ * and if we hit the offset 0 limit then the next block
+ * can still overlap too.
+ */
+ if (!eof && gotp->br_startoff != NULLFILEOFF) {
+ if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
+ (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
+ nexto = gotp->br_startoff + gotp->br_blockcount;
+ else
+ nexto = gotp->br_startoff;
+ } else
+ nexto = NULLFILEOFF;
+ if (!eof &&
+ align_off + align_alen != orig_end &&
+ align_off + align_alen > nexto)
+ align_off = nexto > align_alen ? nexto - align_alen : 0;
+ /*
+ * If we're now overlapping the next or previous extent that
+ * means we can't fit an extsz piece in this hole. Just move
+ * the start forward to the first valid spot and set
+ * the length so we hit the end.
+ */
+ if (align_off != orig_off && align_off < prevo)
+ align_off = prevo;
+ if (align_off + align_alen != orig_end &&
+ align_off + align_alen > nexto &&
+ nexto != NULLFILEOFF) {
+ ASSERT(nexto > prevo);
+ align_alen = nexto - align_off;
+ }
+
+ /*
+ * If realtime, and the result isn't a multiple of the realtime
+ * extent size we need to remove blocks until it is.
+ */
+ if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
+ /*
+ * We're not covering the original request, or
+ * we won't be able to once we fix the length.
+ */
+ if (orig_off < align_off ||
+ orig_end > align_off + align_alen ||
+ align_alen - temp < orig_alen)
+ return XFS_ERROR(EINVAL);
+ /*
+ * Try to fix it by moving the start up.
+ */
+ if (align_off + temp <= orig_off) {
+ align_alen -= temp;
+ align_off += temp;
+ }
+ /*
+ * Try to fix it by moving the end in.
+ */
+ else if (align_off + align_alen - temp >= orig_end)
+ align_alen -= temp;
+ /*
+ * Set the start to the minimum then trim the length.
+ */
+ else {
+ align_alen -= orig_off - align_off;
+ align_off = orig_off;
+ align_alen -= align_alen % mp->m_sb.sb_rextsize;
+ }
+ /*
+ * Result doesn't cover the request, fail it.
+ */
+ if (orig_off < align_off || orig_end > align_off + align_alen)
+ return XFS_ERROR(EINVAL);
+ } else {
+ ASSERT(orig_off >= align_off);
+ ASSERT(orig_end <= align_off + align_alen);
+ }
+
+#ifdef DEBUG
+ if (!eof && gotp->br_startoff != NULLFILEOFF)
+ ASSERT(align_off + align_alen <= gotp->br_startoff);
+ if (prevp->br_startoff != NULLFILEOFF)
+ ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
+#endif
+
+ *lenp = align_alen;
+ *offp = align_off;
+ return 0;
+}
+
#define XFS_ALLOC_GAP_UNITS 4
/*
* xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
* It figures out where to ask the underlying allocator to put the new extent.
*/
-STATIC int /* error */
+STATIC int
xfs_bmap_alloc(
xfs_bmalloca_t *ap) /* bmap alloc argument struct */
{
@@ -2163,10 +2326,10 @@ xfs_bmap_alloc(
xfs_mount_t *mp; /* mount point structure */
int nullfb; /* true if ap->firstblock isn't set */
int rt; /* true if inode is realtime */
-#ifdef __KERNEL__
- xfs_extlen_t prod=0; /* product factor for allocators */
- xfs_extlen_t ralen=0; /* realtime allocation length */
-#endif
+ xfs_extlen_t prod = 0; /* product factor for allocators */
+ xfs_extlen_t ralen = 0; /* realtime allocation length */
+ xfs_extlen_t align; /* minimum allocation alignment */
+ xfs_rtblock_t rtx;
#define ISVALID(x,y) \
(rt ? \
@@ -2182,125 +2345,25 @@ xfs_bmap_alloc(
nullfb = ap->firstblock == NULLFSBLOCK;
rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
-#ifdef __KERNEL__
if (rt) {
- xfs_extlen_t extsz; /* file extent size for rt */
- xfs_fileoff_t nexto; /* next file offset */
- xfs_extlen_t orig_alen; /* original ap->alen */
- xfs_fileoff_t orig_end; /* original off+len */
- xfs_fileoff_t orig_off; /* original ap->off */
- xfs_extlen_t mod_off; /* modulus calculations */
- xfs_fileoff_t prevo; /* previous file offset */
- xfs_rtblock_t rtx; /* realtime extent number */
- xfs_extlen_t temp; /* temp for rt calculations */
-
- /*
- * Set prod to match the realtime extent size.
- */
- if (!(extsz = ap->ip->i_d.di_extsize))
- extsz = mp->m_sb.sb_rextsize;
- prod = extsz / mp->m_sb.sb_rextsize;
- orig_off = ap->off;
- orig_alen = ap->alen;
- orig_end = orig_off + orig_alen;
- /*
- * If the file offset is unaligned vs. the extent size
- * we need to align it. This will be possible unless
- * the file was previously written with a kernel that didn't
- * perform this alignment.
- */
- mod_off = do_mod(orig_off, extsz);
- if (mod_off) {
- ap->alen += mod_off;
- ap->off -= mod_off;
- }
- /*
- * Same adjustment for the end of the requested area.
- */
- if ((temp = (ap->alen % extsz)))
- ap->alen += extsz - temp;
- /*
- * If the previous block overlaps with this proposed allocation
- * then move the start forward without adjusting the length.
- */
- prevo =
- ap->prevp->br_startoff == NULLFILEOFF ?
- 0 :
- (ap->prevp->br_startoff +
- ap->prevp->br_blockcount);
- if (ap->off != orig_off && ap->off < prevo)
- ap->off = prevo;
- /*
- * If the next block overlaps with this proposed allocation
- * then move the start back without adjusting the length,
- * but not before offset 0.
- * This may of course make the start overlap previous block,
- * and if we hit the offset 0 limit then the next block
- * can still overlap too.
- */
- nexto = (ap->eof || ap->gotp->br_startoff == NULLFILEOFF) ?
- NULLFILEOFF : ap->gotp->br_startoff;
- if (!ap->eof &&
- ap->off + ap->alen != orig_end &&
- ap->off + ap->alen > nexto)
- ap->off = nexto > ap->alen ? nexto - ap->alen : 0;
- /*
- * If we're now overlapping the next or previous extent that
- * means we can't fit an extsz piece in this hole. Just move
- * the start forward to the first valid spot and set
- * the length so we hit the end.
- */
- if ((ap->off != orig_off && ap->off < prevo) ||
- (ap->off + ap->alen != orig_end &&
- ap->off + ap->alen > nexto)) {
- ap->off = prevo;
- ap->alen = nexto - prevo;
- }
- /*
- * If the result isn't a multiple of rtextents we need to
- * remove blocks until it is.
- */
- if ((temp = (ap->alen % mp->m_sb.sb_rextsize))) {
- /*
- * We're not covering the original request, or
- * we won't be able to once we fix the length.
- */
- if (orig_off < ap->off ||
- orig_end > ap->off + ap->alen ||
- ap->alen - temp < orig_alen)
- return XFS_ERROR(EINVAL);
- /*
- * Try to fix it by moving the start up.
- */
- if (ap->off + temp <= orig_off) {
- ap->alen -= temp;
- ap->off += temp;
- }
- /*
- * Try to fix it by moving the end in.
- */
- else if (ap->off + ap->alen - temp >= orig_end)
- ap->alen -= temp;
- /*
- * Set the start to the minimum then trim the length.
- */
- else {
- ap->alen -= orig_off - ap->off;
- ap->off = orig_off;
- ap->alen -= ap->alen % mp->m_sb.sb_rextsize;
- }
- /*
- * Result doesn't cover the request, fail it.
- */
- if (orig_off < ap->off || orig_end > ap->off + ap->alen)
- return XFS_ERROR(EINVAL);
- }
+ align = ap->ip->i_d.di_extsize ?
+ ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize;
+ /* Set prod to match the extent size */
+ prod = align / mp->m_sb.sb_rextsize;
+
+ error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
+ align, rt, ap->eof, 0,
+ ap->conv, &ap->off, &ap->alen);
+ if (error)
+ return error;
+ ASSERT(ap->alen);
ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
+
/*
* If the offset & length are not perfectly aligned
* then kill prod, it will just get us in trouble.
*/
- if (do_mod(ap->off, extsz) || ap->alen % extsz)
+ if (do_mod(ap->off, align) || ap->alen % align)
prod = 1;
/*
* Set ralen to be the actual requested length in rtextents.
@@ -2326,15 +2389,24 @@ xfs_bmap_alloc(
ap->rval = rtx * mp->m_sb.sb_rextsize;
} else
ap->rval = 0;
+ } else {
+ align = (ap->userdata && ap->ip->i_d.di_extsize &&
+ (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ?
+ ap->ip->i_d.di_extsize : 0;
+ if (unlikely(align)) {
+ error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
+ align, rt,
+ ap->eof, 0, ap->conv,
+ &ap->off, &ap->alen);
+ ASSERT(!error);
+ ASSERT(ap->alen);
+ }
+ if (nullfb)
+ ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
+ else
+ ap->rval = ap->firstblock;
}
-#else
- if (rt)
- ap->rval = 0;
-#endif /* __KERNEL__ */
- else if (nullfb)
- ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
- else
- ap->rval = ap->firstblock;
+
/*
* If allocating at eof, and there's a previous real block,
* try to use it's last block as our starting point.
@@ -2598,11 +2670,12 @@ xfs_bmap_alloc(
args.total = ap->total;
args.minlen = ap->minlen;
}
- if (ap->ip->i_d.di_extsize) {
+ if (unlikely(ap->userdata && ap->ip->i_d.di_extsize &&
+ (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) {
args.prod = ap->ip->i_d.di_extsize;
if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
args.mod = (xfs_extlen_t)(args.prod - args.mod);
- } else if (mp->m_sb.sb_blocksize >= NBPP) {
+ } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) {
args.prod = 1;
args.mod = 0;
} else {
@@ -3580,14 +3653,16 @@ xfs_bmap_search_extents(
ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp,
lastxp, gotp, prevp);
- rt = ip->i_d.di_flags & XFS_DIFLAG_REALTIME;
- if(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM)) {
+ rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
+ if (unlikely(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM))) {
cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld "
"start_block : %llx start_off : %llx blkcnt : %llx "
"extent-state : %x \n",
- (ip->i_mount)->m_fsname,(long long)ip->i_ino,
- gotp->br_startblock, gotp->br_startoff,
- gotp->br_blockcount,gotp->br_state);
+ (ip->i_mount)->m_fsname, (long long)ip->i_ino,
+ (unsigned long long)gotp->br_startblock,
+ (unsigned long long)gotp->br_startoff,
+ (unsigned long long)gotp->br_blockcount,
+ gotp->br_state);
}
return ep;
}
@@ -3875,7 +3950,7 @@ xfs_bmap_add_attrfork(
ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
if (!ip->i_d.di_forkoff)
ip->i_d.di_forkoff = mp->m_attroffset >> 3;
- else if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR))
+ else if (mp->m_flags & XFS_MOUNT_ATTR2)
version = 2;
break;
default:
@@ -4023,13 +4098,13 @@ xfs_bmap_compute_maxlevels(
*/
if (whichfork == XFS_DATA_FORK) {
maxleafents = MAXEXTNUM;
- sz = (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) ?
- mp->m_attroffset : XFS_BMDR_SPACE_CALC(MINDBTPTRS);
+ sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
+ XFS_BMDR_SPACE_CALC(MINDBTPTRS) : mp->m_attroffset;
} else {
maxleafents = MAXAEXTNUM;
- sz = (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) ?
- mp->m_sb.sb_inodesize - mp->m_attroffset :
- XFS_BMDR_SPACE_CALC(MINABTPTRS);
+ sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
+ XFS_BMDR_SPACE_CALC(MINABTPTRS) :
+ mp->m_sb.sb_inodesize - mp->m_attroffset;
}
maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0);
minleafrecs = mp->m_bmap_dmnr[0];
@@ -4418,8 +4493,8 @@ xfs_bmap_read_extents(
num_recs = be16_to_cpu(block->bb_numrecs);
if (unlikely(i + num_recs > room)) {
ASSERT(i + num_recs <= room);
- xfs_fs_cmn_err(CE_WARN, ip->i_mount,
- "corrupt dinode %Lu, (btree extents). Unmount and run xfs_repair.",
+ xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ "corrupt dinode %Lu, (btree extents).",
(unsigned long long) ip->i_ino);
XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
XFS_ERRLEVEL_LOW,
@@ -4590,6 +4665,7 @@ xfs_bmapi(
char contig; /* allocation must be one extent */
char delay; /* this request is for delayed alloc */
char exact; /* don't do all of wasdelayed extent */
+ char convert; /* unwritten extent I/O completion */
xfs_bmbt_rec_t *ep; /* extent list entry pointer */
int error; /* error return */
xfs_bmbt_irec_t got; /* current extent list record */
@@ -4643,7 +4719,7 @@ xfs_bmapi(
}
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- rt = XFS_IS_REALTIME_INODE(ip);
+ rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(ifp->if_ext_max ==
XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
@@ -4654,6 +4730,7 @@ xfs_bmapi(
delay = (flags & XFS_BMAPI_DELAY) != 0;
trim = (flags & XFS_BMAPI_ENTIRE) == 0;
userdata = (flags & XFS_BMAPI_METADATA) == 0;
+ convert = (flags & XFS_BMAPI_CONVERT) != 0;
exact = (flags & XFS_BMAPI_EXACT) != 0;
rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
contig = (flags & XFS_BMAPI_CONTIG) != 0;
@@ -4748,15 +4825,25 @@ xfs_bmapi(
}
minlen = contig ? alen : 1;
if (delay) {
- xfs_extlen_t extsz = 0;
+ xfs_extlen_t extsz;
/* Figure out the extent size, adjust alen */
if (rt) {
if (!(extsz = ip->i_d.di_extsize))
extsz = mp->m_sb.sb_rextsize;
- alen = roundup(alen, extsz);
- extsz = alen / mp->m_sb.sb_rextsize;
+ } else {
+ extsz = ip->i_d.di_extsize;
}
+ if (extsz) {
+ error = xfs_bmap_extsize_align(mp,
+ &got, &prev, extsz,
+ rt, eof, delay, convert,
+ &aoff, &alen);
+ ASSERT(!error);
+ }
+
+ if (rt)
+ extsz = alen / mp->m_sb.sb_rextsize;
/*
* Make a transaction-less quota reservation for
@@ -4785,32 +4872,33 @@ xfs_bmapi(
xfs_bmap_worst_indlen(ip, alen);
ASSERT(indlen > 0);
- if (rt)
+ if (rt) {
error = xfs_mod_incore_sb(mp,
XFS_SBS_FREXTENTS,
-(extsz), rsvd);
- else
+ } else {
error = xfs_mod_incore_sb(mp,
XFS_SBS_FDBLOCKS,
-(alen), rsvd);
+ }
if (!error) {
error = xfs_mod_incore_sb(mp,
XFS_SBS_FDBLOCKS,
-(indlen), rsvd);
- if (error && rt) {
- xfs_mod_incore_sb(ip->i_mount,
+ if (error && rt)
+ xfs_mod_incore_sb(mp,
XFS_SBS_FREXTENTS,
extsz, rsvd);
- } else if (error) {
- xfs_mod_incore_sb(ip->i_mount,
+ else if (error)
+ xfs_mod_incore_sb(mp,
XFS_SBS_FDBLOCKS,
alen, rsvd);
- }
}
if (error) {
- if (XFS_IS_QUOTA_ON(ip->i_mount))
+ if (XFS_IS_QUOTA_ON(mp))
/* unreserve the blocks now */
+ (void)
XFS_TRANS_UNRESERVE_QUOTA_NBLKS(
mp, NULL, ip,
(long)alen, 0, rt ?
@@ -4849,6 +4937,7 @@ xfs_bmapi(
bma.firstblock = *firstblock;
bma.alen = alen;
bma.off = aoff;
+ bma.conv = convert;
bma.wasdel = wasdelay;
bma.minlen = minlen;
bma.low = flist->xbf_low;
@@ -5270,8 +5359,7 @@ xfs_bunmapi(
return 0;
}
XFS_STATS_INC(xs_blk_unmap);
- isrt = (whichfork == XFS_DATA_FORK) &&
- (ip->i_d.di_flags & XFS_DIFLAG_REALTIME);
+ isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
start = bno;
bno = start + len - 1;
ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
@@ -5443,7 +5531,7 @@ xfs_bunmapi(
}
if (wasdel) {
ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
- /* Update realtim/data freespace, unreserve quota */
+ /* Update realtime/data freespace, unreserve quota */
if (isrt) {
xfs_filblks_t rtexts;
@@ -5451,14 +5539,14 @@ xfs_bunmapi(
do_div(rtexts, mp->m_sb.sb_rextsize);
xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
(int)rtexts, rsvd);
- XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip,
- -((long)del.br_blockcount), 0,
+ (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
+ NULL, ip, -((long)del.br_blockcount), 0,
XFS_QMOPT_RES_RTBLKS);
} else {
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
(int)del.br_blockcount, rsvd);
- XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip,
- -((long)del.br_blockcount), 0,
+ (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
+ NULL, ip, -((long)del.br_blockcount), 0,
XFS_QMOPT_RES_REGBLKS);
}
ip->i_delayed_blks -= del.br_blockcount;
@@ -5652,7 +5740,9 @@ xfs_getbmap(
ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
return XFS_ERROR(EINVAL);
if (whichfork == XFS_DATA_FORK) {
- if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) {
+ if ((ip->i_d.di_extsize && (ip->i_d.di_flags &
+ (XFS_DIFLAG_REALTIME|XFS_DIFLAG_EXTSIZE))) ||
+ ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
prealloced = 1;
fixlen = XFS_MAXIOFFSET(mp);
} else {
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 2e0717a0130..12cc63dfc2c 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -62,6 +62,10 @@ typedef struct xfs_bmap_free
#define XFS_BMAPI_IGSTATE 0x200 /* Ignore state - */
/* combine contig. space */
#define XFS_BMAPI_CONTIG 0x400 /* must allocate only one extent */
+/* XFS_BMAPI_DIRECT_IO 0x800 */
+#define XFS_BMAPI_CONVERT 0x1000 /* unwritten extent conversion - */
+ /* need write cache flushing and no */
+ /* additional allocation alignments */
#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w)
static inline int xfs_bmapi_aflag(int w)
@@ -101,7 +105,8 @@ typedef struct xfs_bmalloca {
char wasdel; /* replacing a delayed allocation */
char userdata;/* set if is user data */
char low; /* low on space, using seq'l ags */
- char aeof; /* allocated space at eof */
+ char aeof; /* allocated space at eof */
+ char conv; /* overwriting unwritten extents */
} xfs_bmalloca_t;
#ifdef __KERNEL__
diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h
index 328a528b926..f57cc9ac875 100644
--- a/fs/xfs/xfs_clnt.h
+++ b/fs/xfs/xfs_clnt.h
@@ -57,7 +57,7 @@ struct xfs_mount_args {
/*
* XFS mount option flags -- args->flags1
*/
-#define XFSMNT_COMPAT_ATTR 0x00000001 /* do not use ATTR2 format */
+#define XFSMNT_ATTR2 0x00000001 /* allow ATTR2 EA format */
#define XFSMNT_WSYNC 0x00000002 /* safe mode nfs mount
* compatible */
#define XFSMNT_INO64 0x00000004 /* move inode numbers up
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 070259a4254..c6191d00ad2 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -60,8 +60,6 @@ xfs_swapext(
xfs_bstat_t *sbp;
struct file *fp = NULL, *tfp = NULL;
vnode_t *vp, *tvp;
- bhv_desc_t *bdp, *tbdp;
- vn_bhv_head_t *bhp, *tbhp;
static uint lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL;
int ilf_fields, tilf_fields;
int error = 0;
@@ -90,13 +88,10 @@ xfs_swapext(
goto error0;
}
- bhp = VN_BHV_HEAD(vp);
- bdp = vn_bhv_lookup(bhp, &xfs_vnodeops);
- if (bdp == NULL) {
+ ip = xfs_vtoi(vp);
+ if (ip == NULL) {
error = XFS_ERROR(EBADF);
goto error0;
- } else {
- ip = XFS_BHVTOI(bdp);
}
if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) ||
@@ -105,13 +100,10 @@ xfs_swapext(
goto error0;
}
- tbhp = VN_BHV_HEAD(tvp);
- tbdp = vn_bhv_lookup(tbhp, &xfs_vnodeops);
- if (tbdp == NULL) {
+ tip = xfs_vtoi(tvp);
+ if (tip == NULL) {
error = XFS_ERROR(EBADF);
goto error0;
- } else {
- tip = XFS_BHVTOI(tbdp);
}
if (ip->i_mount != tip->i_mount) {
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index c5a0e537ff1..79d0d9e1fba 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -199,10 +199,16 @@ typedef enum xfs_dinode_fmt
#define XFS_DFORK_DSIZE(dip,mp) \
XFS_CFORK_DSIZE_DISK(&(dip)->di_core, mp)
+#define XFS_DFORK_DSIZE_HOST(dip,mp) \
+ XFS_CFORK_DSIZE(&(dip)->di_core, mp)
#define XFS_DFORK_ASIZE(dip,mp) \
XFS_CFORK_ASIZE_DISK(&(dip)->di_core, mp)
+#define XFS_DFORK_ASIZE_HOST(dip,mp) \
+ XFS_CFORK_ASIZE(&(dip)->di_core, mp)
#define XFS_DFORK_SIZE(dip,mp,w) \
XFS_CFORK_SIZE_DISK(&(dip)->di_core, mp, w)
+#define XFS_DFORK_SIZE_HOST(dip,mp,w) \
+ XFS_CFORK_SIZE(&(dip)->di_core, mp, w)
#define XFS_DFORK_Q(dip) XFS_CFORK_Q_DISK(&(dip)->di_core)
#define XFS_DFORK_BOFF(dip) XFS_CFORK_BOFF_DISK(&(dip)->di_core)
@@ -216,6 +222,7 @@ typedef enum xfs_dinode_fmt
#define XFS_CFORK_FMT_SET(dcp,w,n) \
((w) == XFS_DATA_FORK ? \
((dcp)->di_format = (n)) : ((dcp)->di_aformat = (n)))
+#define XFS_DFORK_FORMAT(dip,w) XFS_CFORK_FORMAT(&(dip)->di_core, w)
#define XFS_CFORK_NEXTENTS_DISK(dcp,w) \
((w) == XFS_DATA_FORK ? \
@@ -223,13 +230,13 @@ typedef enum xfs_dinode_fmt
INT_GET((dcp)->di_anextents, ARCH_CONVERT))
#define XFS_CFORK_NEXTENTS(dcp,w) \
((w) == XFS_DATA_FORK ? (dcp)->di_nextents : (dcp)->di_anextents)
+#define XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w)
+#define XFS_DFORK_NEXTENTS_HOST(dip,w) XFS_CFORK_NEXTENTS(&(dip)->di_core, w)
#define XFS_CFORK_NEXT_SET(dcp,w,n) \
((w) == XFS_DATA_FORK ? \
((dcp)->di_nextents = (n)) : ((dcp)->di_anextents = (n)))
-#define XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w)
-
#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp))
/*
@@ -246,8 +253,10 @@ typedef enum xfs_dinode_fmt
#define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */
#define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */
#define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */
-#define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */
-#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */
+#define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */
+#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */
+#define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */
+#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
@@ -259,11 +268,14 @@ typedef enum xfs_dinode_fmt
#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT)
#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT)
#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
+#define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT)
+#define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
#define XFS_DIFLAG_ANY \
(XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
- XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS)
+ XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
+ XFS_DIFLAG_EXTSZINHERIT)
#endif /* __XFS_DINODE_H__ */
diff --git a/fs/xfs/xfs_dir.c b/fs/xfs/xfs_dir.c
index 3dd30391f55..bb87d2a700a 100644
--- a/fs/xfs/xfs_dir.c
+++ b/fs/xfs/xfs_dir.c
@@ -176,7 +176,7 @@ xfs_dir_mount(xfs_mount_t *mp)
uint shortcount, leafcount, count;
mp->m_dirversion = 1;
- if (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) {
+ if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
shortcount = (mp->m_attroffset -
(uint)sizeof(xfs_dir_sf_hdr_t)) /
(uint)sizeof(xfs_dir_sf_entry_t);
diff --git a/fs/xfs/xfs_dir.h b/fs/xfs/xfs_dir.h
index 488defe86ba..8cc8afb9f6c 100644
--- a/fs/xfs/xfs_dir.h
+++ b/fs/xfs/xfs_dir.h
@@ -135,6 +135,8 @@ void xfs_dir_startup(void); /* called exactly once */
((mp)->m_dirops.xd_shortform_to_single(args))
#define XFS_DIR_IS_V1(mp) ((mp)->m_dirversion == 1)
+#define XFS_DIR_IS_V2(mp) ((mp)->m_dirversion == 2)
extern xfs_dirops_t xfsv1_dirops;
+extern xfs_dirops_t xfsv2_dirops;
#endif /* __XFS_DIR_H__ */
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
index 7e24ffeda9e..3158f5dc431 100644
--- a/fs/xfs/xfs_dir2.h
+++ b/fs/xfs/xfs_dir2.h
@@ -72,9 +72,6 @@ typedef struct xfs_dir2_put_args {
struct uio *uio; /* uio control structure */
} xfs_dir2_put_args_t;
-#define XFS_DIR_IS_V2(mp) ((mp)->m_dirversion == 2)
-extern xfs_dirops_t xfsv2_dirops;
-
/*
* Other interfaces used by the rest of the dir v2 code.
*/
diff --git a/fs/xfs/xfs_dir_leaf.h b/fs/xfs/xfs_dir_leaf.h
index ab6b09eef9a..eb8cd9a4667 100644
--- a/fs/xfs/xfs_dir_leaf.h
+++ b/fs/xfs/xfs_dir_leaf.h
@@ -67,34 +67,38 @@ struct xfs_trans;
*/
#define XFS_DIR_LEAF_MAPSIZE 3 /* how many freespace slots */
+typedef struct xfs_dir_leaf_map { /* RLE map of free bytes */
+ __uint16_t base; /* base of free region */
+ __uint16_t size; /* run length of free region */
+} xfs_dir_leaf_map_t;
+
+typedef struct xfs_dir_leaf_hdr { /* constant-structure header block */
+ xfs_da_blkinfo_t info; /* block type, links, etc. */
+ __uint16_t count; /* count of active leaf_entry's */
+ __uint16_t namebytes; /* num bytes of name strings stored */
+ __uint16_t firstused; /* first used byte in name area */
+ __uint8_t holes; /* != 0 if blk needs compaction */
+ __uint8_t pad1;
+ xfs_dir_leaf_map_t freemap[XFS_DIR_LEAF_MAPSIZE];
+} xfs_dir_leaf_hdr_t;
+
+typedef struct xfs_dir_leaf_entry { /* sorted on key, not name */
+ xfs_dahash_t hashval; /* hash value of name */
+ __uint16_t nameidx; /* index into buffer of name */
+ __uint8_t namelen; /* length of name string */
+ __uint8_t pad2;
+} xfs_dir_leaf_entry_t;
+
+typedef struct xfs_dir_leaf_name {
+ xfs_dir_ino_t inumber; /* inode number for this key */
+ __uint8_t name[1]; /* name string itself */
+} xfs_dir_leaf_name_t;
+
typedef struct xfs_dir_leafblock {
- struct xfs_dir_leaf_hdr { /* constant-structure header block */
- xfs_da_blkinfo_t info; /* block type, links, etc. */
- __uint16_t count; /* count of active leaf_entry's */
- __uint16_t namebytes; /* num bytes of name strings stored */
- __uint16_t firstused; /* first used byte in name area */
- __uint8_t holes; /* != 0 if blk needs compaction */
- __uint8_t pad1;
- struct xfs_dir_leaf_map {/* RLE map of free bytes */
- __uint16_t base; /* base of free region */
- __uint16_t size; /* run length of free region */
- } freemap[XFS_DIR_LEAF_MAPSIZE]; /* N largest free regions */
- } hdr;
- struct xfs_dir_leaf_entry { /* sorted on key, not name */
- xfs_dahash_t hashval; /* hash value of name */
- __uint16_t nameidx; /* index into buffer of name */
- __uint8_t namelen; /* length of name string */
- __uint8_t pad2;
- } entries[1]; /* var sized array */
- struct xfs_dir_leaf_name {
- xfs_dir_ino_t inumber; /* inode number for this key */
- __uint8_t name[1]; /* name string itself */
- } namelist[1]; /* grows from bottom of buf */
+ xfs_dir_leaf_hdr_t hdr; /* constant-structure header block */
+ xfs_dir_leaf_entry_t entries[1]; /* var sized array */
+ xfs_dir_leaf_name_t namelist[1]; /* grows from bottom of buf */
} xfs_dir_leafblock_t;
-typedef struct xfs_dir_leaf_hdr xfs_dir_leaf_hdr_t;
-typedef struct xfs_dir_leaf_map xfs_dir_leaf_map_t;
-typedef struct xfs_dir_leaf_entry xfs_dir_leaf_entry_t;
-typedef struct xfs_dir_leaf_name xfs_dir_leaf_name_t;
/*
* Length of name for which a 512-byte block filesystem
@@ -126,11 +130,10 @@ typedef union {
#define XFS_PUT_COOKIE(c,mp,bno,entry,hash) \
((c).s.be = XFS_DA_MAKE_BNOENTRY(mp, bno, entry), (c).s.h = (hash))
-typedef struct xfs_dir_put_args
-{
+typedef struct xfs_dir_put_args {
xfs_dircook_t cook; /* cookie of (next) entry */
xfs_intino_t ino; /* inode number */
- struct xfs_dirent *dbp; /* buffer pointer */
+ struct xfs_dirent *dbp; /* buffer pointer */
char *name; /* directory entry name */
int namelen; /* length of name */
int done; /* output: set if value was stored */
@@ -138,7 +141,8 @@ typedef struct xfs_dir_put_args
struct uio *uio; /* uio control structure */
} xfs_dir_put_args_t;
-#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) xfs_dir_leaf_entsize_byname(len)
+#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len) \
+ xfs_dir_leaf_entsize_byname(len)
static inline int xfs_dir_leaf_entsize_byname(int len)
{
return (uint)sizeof(xfs_dir_leaf_name_t)-1 + len;
diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h
index 864bf695568..b4c7f2bc55a 100644
--- a/fs/xfs/xfs_dmapi.h
+++ b/fs/xfs/xfs_dmapi.h
@@ -152,7 +152,7 @@ typedef enum {
#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
-#define DM_FLAGS_ISEM 0x004 /* thread holds i_sem */
+#define DM_FLAGS_IMUX 0x004 /* thread holds i_mutex */
#define DM_FLAGS_IALLOCSEM_RD 0x010 /* thread holds i_alloc_sem rd */
#define DM_FLAGS_IALLOCSEM_WR 0x020 /* thread holds i_alloc_sem wr */
@@ -161,21 +161,21 @@ typedef enum {
*/
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
- DM_FLAGS_ISEM : 0)
-#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_ISEM)
+ DM_FLAGS_IMUX : 0)
+#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX)
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,22))
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
- DM_FLAGS_IALLOCSEM_RD : DM_FLAGS_ISEM)
-#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_ISEM)
+ DM_FLAGS_IALLOCSEM_RD : DM_FLAGS_IMUX)
+#define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX)
#endif
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,21)
#define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \
- 0 : DM_FLAGS_ISEM)
-#define DM_SEM_FLAG_WR (DM_FLAGS_ISEM)
+ 0 : DM_FLAGS_IMUX)
+#define DM_SEM_FLAG_WR (DM_FLAGS_IMUX)
#endif
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index d7b6b5d1670..2a21c502401 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -54,7 +54,6 @@ xfs_error_trap(int e)
if (e != xfs_etrap[i])
continue;
cmn_err(CE_NOTE, "xfs_error_trap: error %d", e);
- debug_stop_all_cpus((void *)-1LL);
BUG();
break;
}
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 06d8a8426c1..26b8e709a56 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -18,9 +18,6 @@
#ifndef __XFS_ERROR_H__
#define __XFS_ERROR_H__
-#define prdev(fmt,targ,args...) \
- printk("XFS: device %s - " fmt "\n", XFS_BUFTARG_NAME(targ), ## args)
-
#define XFS_ERECOVER 1 /* Failure to recover log */
#define XFS_ELOGSTAT 2 /* Failure to stat log in user space */
#define XFS_ENOLOGSPACE 3 /* Reservation too large */
@@ -182,8 +179,11 @@ extern int xfs_errortag_clearall_umount(int64_t fsid, char *fsname, int loud);
struct xfs_mount;
/* PRINTFLIKE4 */
extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp,
- char *fmt, ...);
+ char *fmt, ...);
/* PRINTFLIKE3 */
extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...);
+#define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \
+ xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args)
+
#endif /* __XFS_ERROR_H__ */
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index ba096f80f48..14010f1fa82 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -3,15 +3,15 @@
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * GNU Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
+ * You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -65,6 +65,8 @@ struct fsxattr {
#define XFS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */
#define XFS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */
#define XFS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */
+#define XFS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */
+#define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */
#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
/*
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index d1236d6f404..163031c1e39 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -540,6 +540,32 @@ xfs_reserve_blocks(
return(0);
}
+void
+xfs_fs_log_dummy(xfs_mount_t *mp)
+{
+ xfs_trans_t *tp;
+ xfs_inode_t *ip;
+
+
+ tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
+ atomic_inc(&mp->m_active_trans);
+ if (xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0)) {
+ xfs_trans_cancel(tp, 0);
+ return;
+ }
+
+ ip = mp->m_rootip;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_ihold(tp, ip);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ xfs_trans_set_sync(tp);
+ xfs_trans_commit(tp, 0, NULL);
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+}
+
int
xfs_fs_goingdown(
xfs_mount_t *mp,
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index f32713f14f9..300d0c9d61a 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -25,5 +25,6 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
xfs_fsop_resblks_t *outval);
extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
+extern void xfs_fs_log_dummy(xfs_mount_t *mp);
#endif /* __XFS_FSOPS_H__ */
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index fc19eedbd11..8e380a1fb79 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -493,7 +493,6 @@ xfs_iget(
retry:
if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) {
- bhv_desc_t *bdp;
xfs_inode_t *ip;
vp = LINVFS_GET_VP(inode);
@@ -517,14 +516,12 @@ retry:
* to wait for the inode to go away.
*/
if (is_bad_inode(inode) ||
- ((bdp = vn_bhv_lookup(VN_BHV_HEAD(vp),
- &xfs_vnodeops)) == NULL)) {
+ ((ip = xfs_vtoi(vp)) == NULL)) {
iput(inode);
delay(1);
goto retry;
}
- ip = XFS_BHVTOI(bdp);
if (lock_flags != 0)
xfs_ilock(ip, lock_flags);
XFS_STATS_INC(xs_ig_found);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index df0d4572d70..1d7f5a7e063 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -404,9 +404,8 @@ xfs_iformat(
INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) +
INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) >
INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) {
- xfs_fs_cmn_err(CE_WARN, ip->i_mount,
- "corrupt dinode %Lu, extent total = %d, nblocks = %Lu."
- " Unmount and run xfs_repair.",
+ xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
(unsigned long long)ip->i_ino,
(int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT)
+ INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)),
@@ -418,9 +417,8 @@ xfs_iformat(
}
if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) {
- xfs_fs_cmn_err(CE_WARN, ip->i_mount,
- "corrupt dinode %Lu, forkoff = 0x%x."
- " Unmount and run xfs_repair.",
+ xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ "corrupt dinode %Lu, forkoff = 0x%x.",
(unsigned long long)ip->i_ino,
(int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT)));
XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
@@ -451,8 +449,9 @@ xfs_iformat(
* no local regular files yet
*/
if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & S_IFMT) == S_IFREG)) {
- xfs_fs_cmn_err(CE_WARN, ip->i_mount,
- "corrupt inode (local format for regular file) %Lu. Unmount and run xfs_repair.",
+ xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ "corrupt inode %Lu "
+ "(local format for regular file).",
(unsigned long long) ip->i_ino);
XFS_CORRUPTION_ERROR("xfs_iformat(4)",
XFS_ERRLEVEL_LOW,
@@ -462,8 +461,9 @@ xfs_iformat(
di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT);
if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
- xfs_fs_cmn_err(CE_WARN, ip->i_mount,
- "corrupt inode %Lu (bad size %Ld for local inode). Unmount and run xfs_repair.",
+ xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ "corrupt inode %Lu "
+ "(bad size %Ld for local inode).",
(unsigned long long) ip->i_ino,
(long long) di_size);
XFS_CORRUPTION_ERROR("xfs_iformat(5)",
@@ -551,8 +551,9 @@ xfs_iformat_local(
* kmem_alloc() or memcpy() below.
*/
if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
- xfs_fs_cmn_err(CE_WARN, ip->i_mount,
- "corrupt inode %Lu (bad size %d for local fork, size = %d). Unmount and run xfs_repair.",
+ xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ "corrupt inode %Lu "
+ "(bad size %d for local fork, size = %d).",
(unsigned long long) ip->i_ino, size,
XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
@@ -610,8 +611,8 @@ xfs_iformat_extents(
* kmem_alloc() or memcpy() below.
*/
if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
- xfs_fs_cmn_err(CE_WARN, ip->i_mount,
- "corrupt inode %Lu ((a)extents = %d). Unmount and run xfs_repair.",
+ xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ "corrupt inode %Lu ((a)extents = %d).",
(unsigned long long) ip->i_ino, nex);
XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
ip->i_mount, dip);
@@ -692,8 +693,8 @@ xfs_iformat_btree(
|| XFS_BMDR_SPACE_CALC(nrecs) >
XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
|| XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
- xfs_fs_cmn_err(CE_WARN, ip->i_mount,
- "corrupt inode %Lu (btree). Unmount and run xfs_repair.",
+ xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ "corrupt inode %Lu (btree).",
(unsigned long long) ip->i_ino);
XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
ip->i_mount);
@@ -809,6 +810,10 @@ _xfs_dic2xflags(
flags |= XFS_XFLAG_PROJINHERIT;
if (di_flags & XFS_DIFLAG_NOSYMLINKS)
flags |= XFS_XFLAG_NOSYMLINKS;
+ if (di_flags & XFS_DIFLAG_EXTSIZE)
+ flags |= XFS_XFLAG_EXTSIZE;
+ if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
+ flags |= XFS_XFLAG_EXTSZINHERIT;
}
return flags;
@@ -1192,11 +1197,19 @@ xfs_ialloc(
if ((mode & S_IFMT) == S_IFDIR) {
if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
di_flags |= XFS_DIFLAG_RTINHERIT;
- } else {
+ if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
+ di_flags |= XFS_DIFLAG_EXTSZINHERIT;
+ ip->i_d.di_extsize = pip->i_d.di_extsize;
+ }
+ } else if ((mode & S_IFMT) == S_IFREG) {
if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) {
di_flags |= XFS_DIFLAG_REALTIME;
ip->i_iocore.io_flags |= XFS_IOCORE_RT;
}
+ if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
+ di_flags |= XFS_DIFLAG_EXTSIZE;
+ ip->i_d.di_extsize = pip->i_d.di_extsize;
+ }
}
if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
xfs_inherit_noatime)
@@ -1262,7 +1275,7 @@ xfs_isize_check(
if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
return;
- if ( ip->i_d.di_flags & XFS_DIFLAG_REALTIME )
+ if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE))
return;
nimaps = 2;
@@ -1765,22 +1778,19 @@ xfs_igrow_start(
xfs_fsize_t new_size,
cred_t *credp)
{
- xfs_fsize_t isize;
int error;
ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
ASSERT(new_size > ip->i_d.di_size);
- error = 0;
- isize = ip->i_d.di_size;
/*
* Zero any pages that may have been created by
* xfs_write_file() beyond the end of the file
* and any blocks between the old and new file sizes.
*/
- error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, isize,
- new_size);
+ error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size,
+ ip->i_d.di_size, new_size);
return error;
}
@@ -3355,6 +3365,11 @@ xfs_iflush_int(
ip->i_update_core = 0;
SYNCHRONIZE();
+ /*
+ * Make sure to get the latest atime from the Linux inode.
+ */
+ xfs_synchronize_atime(ip);
+
if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC,
mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 124d30e6143..1cfbcf18ce8 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -436,6 +436,10 @@ void xfs_ichgtime(xfs_inode_t *, int);
xfs_fsize_t xfs_file_last_byte(xfs_inode_t *);
void xfs_lock_inodes(xfs_inode_t **, int, int, uint);
+xfs_inode_t *xfs_vtoi(struct vnode *vp);
+
+void xfs_synchronize_atime(xfs_inode_t *);
+
#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
#ifdef DEBUG
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 7f3363c621e..36aa1fcb90a 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -271,6 +271,11 @@ xfs_inode_item_format(
if (ip->i_update_size)
ip->i_update_size = 0;
+ /*
+ * Make sure to get the latest atime from the Linux inode.
+ */
+ xfs_synchronize_atime(ip);
+
vecp->i_addr = (xfs_caddr_t)&ip->i_d;
vecp->i_len = sizeof(xfs_dinode_core_t);
XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE);
@@ -603,7 +608,7 @@ xfs_inode_item_trylock(
if (iip->ili_pushbuf_flag == 0) {
iip->ili_pushbuf_flag = 1;
#ifdef DEBUG
- iip->ili_push_owner = get_thread_id();
+ iip->ili_push_owner = current_pid();
#endif
/*
* Inode is left locked in shared mode.
@@ -782,7 +787,7 @@ xfs_inode_item_pushbuf(
* trying to duplicate our effort.
*/
ASSERT(iip->ili_pushbuf_flag != 0);
- ASSERT(iip->ili_push_owner == get_thread_id());
+ ASSERT(iip->ili_push_owner == current_pid());
/*
* If flushlock isn't locked anymore, chances are that the
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 45a77a3a6c0..788917f355c 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -40,7 +40,6 @@
#include "xfs_ialloc.h"
#include "xfs_btree.h"
#include "xfs_bmap.h"
-#include "xfs_bit.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
@@ -263,7 +262,7 @@ phase2:
case BMAPI_WRITE:
/* If we found an extent, return it */
if (nimaps &&
- (imap.br_startblock != HOLESTARTBLOCK) &&
+ (imap.br_startblock != HOLESTARTBLOCK) &&
(imap.br_startblock != DELAYSTARTBLOCK)) {
xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io,
offset, count, iomapp, &imap, flags);
@@ -318,6 +317,58 @@ out:
}
STATIC int
+xfs_iomap_eof_align_last_fsb(
+ xfs_mount_t *mp,
+ xfs_iocore_t *io,
+ xfs_fsize_t isize,
+ xfs_extlen_t extsize,
+ xfs_fileoff_t *last_fsb)
+{
+ xfs_fileoff_t new_last_fsb = 0;
+ xfs_extlen_t align;
+ int eof, error;
+
+ if (io->io_flags & XFS_IOCORE_RT)
+ ;
+ /*
+ * If mounted with the "-o swalloc" option, roundup the allocation
+ * request to a stripe width boundary if the file size is >=
+ * stripe width and we are allocating past the allocation eof.
+ */
+ else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) &&
+ (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)))
+ new_last_fsb = roundup_64(*last_fsb, mp->m_swidth);
+ /*
+ * Roundup the allocation request to a stripe unit (m_dalign) boundary
+ * if the file size is >= stripe unit size, and we are allocating past
+ * the allocation eof.
+ */
+ else if (mp->m_dalign && (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)))
+ new_last_fsb = roundup_64(*last_fsb, mp->m_dalign);
+
+ /*
+ * Always round up the allocation request to an extent boundary
+ * (when file on a real-time subvolume or has di_extsize hint).
+ */
+ if (extsize) {
+ if (new_last_fsb)
+ align = roundup_64(new_last_fsb, extsize);
+ else
+ align = extsize;
+ new_last_fsb = roundup_64(*last_fsb, align);
+ }
+
+ if (new_last_fsb) {
+ error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
+ if (error)
+ return error;
+ if (eof)
+ *last_fsb = new_last_fsb;
+ }
+ return 0;
+}
+
+STATIC int
xfs_flush_space(
xfs_inode_t *ip,
int *fsynced,
@@ -363,19 +414,20 @@ xfs_iomap_write_direct(
xfs_iocore_t *io = &ip->i_iocore;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t last_fsb;
- xfs_filblks_t count_fsb;
+ xfs_filblks_t count_fsb, resaligned;
xfs_fsblock_t firstfsb;
+ xfs_extlen_t extsz, temp;
+ xfs_fsize_t isize;
int nimaps;
- int error;
int bmapi_flag;
int quota_flag;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imap;
xfs_bmap_free_t free_list;
- xfs_filblks_t qblocks, resblks;
+ uint qblocks, resblks, resrtextents;
int committed;
- int resrtextents;
+ int error;
/*
* Make sure that the dquots are there. This doesn't hold
@@ -385,38 +437,53 @@ xfs_iomap_write_direct(
if (error)
return XFS_ERROR(error);
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
- count_fsb = last_fsb - offset_fsb;
- if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) {
- xfs_fileoff_t map_last_fsb;
-
- map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff;
- if (map_last_fsb < last_fsb) {
- last_fsb = map_last_fsb;
- count_fsb = last_fsb - offset_fsb;
- }
- ASSERT(count_fsb > 0);
+ rt = XFS_IS_REALTIME_INODE(ip);
+ if (unlikely(rt)) {
+ if (!(extsz = ip->i_d.di_extsize))
+ extsz = mp->m_sb.sb_rextsize;
+ } else {
+ extsz = ip->i_d.di_extsize;
}
- /*
- * Determine if reserving space on the data or realtime partition.
- */
- if ((rt = XFS_IS_REALTIME_INODE(ip))) {
- xfs_extlen_t extsz;
+ isize = ip->i_d.di_size;
+ if (io->io_new_size > isize)
+ isize = io->io_new_size;
- if (!(extsz = ip->i_d.di_extsize))
- extsz = mp->m_sb.sb_rextsize;
- resrtextents = qblocks = (count_fsb + extsz - 1);
- do_div(resrtextents, mp->m_sb.sb_rextsize);
- resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
- quota_flag = XFS_QMOPT_RES_RTBLKS;
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
+ if ((offset + count) > isize) {
+ error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,
+ &last_fsb);
+ if (error)
+ goto error_out;
} else {
- resrtextents = 0;
- resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, count_fsb);
- quota_flag = XFS_QMOPT_RES_REGBLKS;
+ if (found && (ret_imap->br_startblock == HOLESTARTBLOCK))
+ last_fsb = MIN(last_fsb, (xfs_fileoff_t)
+ ret_imap->br_blockcount +
+ ret_imap->br_startoff);
+ }
+ count_fsb = last_fsb - offset_fsb;
+ ASSERT(count_fsb > 0);
+
+ resaligned = count_fsb;
+ if (unlikely(extsz)) {
+ if ((temp = do_mod(offset_fsb, extsz)))
+ resaligned += temp;
+ if ((temp = do_mod(resaligned, extsz)))
+ resaligned += extsz - temp;
}
+ if (unlikely(rt)) {
+ resrtextents = qblocks = resaligned;
+ resrtextents /= mp->m_sb.sb_rextsize;
+ resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+ quota_flag = XFS_QMOPT_RES_RTBLKS;
+ } else {
+ resrtextents = 0;
+ resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
+ quota_flag = XFS_QMOPT_RES_REGBLKS;
+ }
+
/*
* Allocate and setup the transaction
*/
@@ -426,7 +493,6 @@ xfs_iomap_write_direct(
XFS_WRITE_LOG_RES(mp), resrtextents,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
-
/*
* Check for running out of space, note: need lock to return
*/
@@ -436,20 +502,20 @@ xfs_iomap_write_direct(
if (error)
goto error_out;
- if (XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag)) {
- error = (EDQUOT);
+ error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,
+ qblocks, 0, quota_flag);
+ if (error)
goto error1;
- }
- bmapi_flag = XFS_BMAPI_WRITE;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
- if (!(flags & BMAPI_MMAP) && (offset < ip->i_d.di_size || rt))
+ bmapi_flag = XFS_BMAPI_WRITE;
+ if ((flags & BMAPI_DIRECT) && (offset < ip->i_d.di_size || extsz))
bmapi_flag |= XFS_BMAPI_PREALLOC;
/*
- * Issue the bmapi() call to allocate the blocks
+ * Issue the xfs_bmapi() call to allocate the blocks
*/
XFS_BMAP_INIT(&free_list, &firstfsb);
nimaps = 1;
@@ -484,8 +550,10 @@ xfs_iomap_write_direct(
"extent-state : %x \n",
(ip->i_mount)->m_fsname,
(long long)ip->i_ino,
- ret_imap->br_startblock, ret_imap->br_startoff,
- ret_imap->br_blockcount,ret_imap->br_state);
+ (unsigned long long)ret_imap->br_startblock,
+ (unsigned long long)ret_imap->br_startoff,
+ (unsigned long long)ret_imap->br_blockcount,
+ ret_imap->br_state);
}
return 0;
@@ -501,6 +569,63 @@ error_out:
return XFS_ERROR(error);
}
+/*
+ * If the caller is doing a write at the end of the file,
+ * then extend the allocation out to the file system's write
+ * iosize. We clean up any extra space left over when the
+ * file is closed in xfs_inactive().
+ *
+ * For sync writes, we are flushing delayed allocate space to
+ * try to make additional space available for allocation near
+ * the filesystem full boundary - preallocation hurts in that
+ * situation, of course.
+ */
+STATIC int
+xfs_iomap_eof_want_preallocate(
+ xfs_mount_t *mp,
+ xfs_iocore_t *io,
+ xfs_fsize_t isize,
+ xfs_off_t offset,
+ size_t count,
+ int ioflag,
+ xfs_bmbt_irec_t *imap,
+ int nimaps,
+ int *prealloc)
+{
+ xfs_fileoff_t start_fsb;
+ xfs_filblks_t count_fsb;
+ xfs_fsblock_t firstblock;
+ int n, error, imaps;
+
+ *prealloc = 0;
+ if ((ioflag & BMAPI_SYNC) || (offset + count) <= isize)
+ return 0;
+
+ /*
+ * If there are any real blocks past eof, then don't
+ * do any speculative allocation.
+ */
+ start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
+ count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
+ while (count_fsb > 0) {
+ imaps = nimaps;
+ firstblock = NULLFSBLOCK;
+ error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
+ 0, &firstblock, 0, imap, &imaps, NULL);
+ if (error)
+ return error;
+ for (n = 0; n < imaps; n++) {
+ if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
+ (imap[n].br_startblock != DELAYSTARTBLOCK))
+ return 0;
+ start_fsb += imap[n].br_blockcount;
+ count_fsb -= imap[n].br_blockcount;
+ }
+ }
+ *prealloc = 1;
+ return 0;
+}
+
int
xfs_iomap_write_delay(
xfs_inode_t *ip,
@@ -514,13 +639,15 @@ xfs_iomap_write_delay(
xfs_iocore_t *io = &ip->i_iocore;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t last_fsb;
- xfs_fsize_t isize;
+ xfs_off_t aligned_offset;
+ xfs_fileoff_t ioalign;
xfs_fsblock_t firstblock;
+ xfs_extlen_t extsz;
+ xfs_fsize_t isize;
int nimaps;
- int error;
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
- int aeof;
- int fsynced = 0;
+ int prealloc, fsynced = 0;
+ int error;
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
@@ -528,152 +655,57 @@ xfs_iomap_write_delay(
* Make sure that the dquots are there. This doesn't hold
* the ilock across a disk read.
*/
-
error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);
if (error)
return XFS_ERROR(error);
+ if (XFS_IS_REALTIME_INODE(ip)) {
+ if (!(extsz = ip->i_d.di_extsize))
+ extsz = mp->m_sb.sb_rextsize;
+ } else {
+ extsz = ip->i_d.di_extsize;
+ }
+
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
retry:
isize = ip->i_d.di_size;
- if (io->io_new_size > isize) {
+ if (io->io_new_size > isize)
isize = io->io_new_size;
- }
- aeof = 0;
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
- /*
- * If the caller is doing a write at the end of the file,
- * then extend the allocation (and the buffer used for the write)
- * out to the file system's write iosize. We clean up any extra
- * space left over when the file is closed in xfs_inactive().
- *
- * For sync writes, we are flushing delayed allocate space to
- * try to make additional space available for allocation near
- * the filesystem full boundary - preallocation hurts in that
- * situation, of course.
- */
- if (!(ioflag & BMAPI_SYNC) && ((offset + count) > ip->i_d.di_size)) {
- xfs_off_t aligned_offset;
- xfs_filblks_t count_fsb;
- unsigned int iosize;
- xfs_fileoff_t ioalign;
- int n;
- xfs_fileoff_t start_fsb;
+ error = xfs_iomap_eof_want_preallocate(mp, io, isize, offset, count,
+ ioflag, imap, XFS_WRITE_IMAPS, &prealloc);
+ if (error)
+ return error;
- /*
- * If there are any real blocks past eof, then don't
- * do any speculative allocation.
- */
- start_fsb = XFS_B_TO_FSBT(mp,
- ((xfs_ufsize_t)(offset + count - 1)));
- count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
- while (count_fsb > 0) {
- nimaps = XFS_WRITE_IMAPS;
- error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
- 0, &firstblock, 0, imap, &nimaps, NULL);
- if (error) {
- return error;
- }
- for (n = 0; n < nimaps; n++) {
- if ( !(io->io_flags & XFS_IOCORE_RT) &&
- !imap[n].br_startblock) {
- cmn_err(CE_PANIC,"Access to block "
- "zero: fs <%s> inode: %lld "
- "start_block : %llx start_off "
- ": %llx blkcnt : %llx "
- "extent-state : %x \n",
- (ip->i_mount)->m_fsname,
- (long long)ip->i_ino,
- imap[n].br_startblock,
- imap[n].br_startoff,
- imap[n].br_blockcount,
- imap[n].br_state);
- }
- if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
- (imap[n].br_startblock != DELAYSTARTBLOCK)) {
- goto write_map;
- }
- start_fsb += imap[n].br_blockcount;
- count_fsb -= imap[n].br_blockcount;
- }
- }
- iosize = mp->m_writeio_blocks;
+ if (prealloc) {
aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
- last_fsb = ioalign + iosize;
- aeof = 1;
+ last_fsb = ioalign + mp->m_writeio_blocks;
+ } else {
+ last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
}
-write_map:
- nimaps = XFS_WRITE_IMAPS;
- firstblock = NULLFSBLOCK;
- /*
- * If mounted with the "-o swalloc" option, roundup the allocation
- * request to a stripe width boundary if the file size is >=
- * stripe width and we are allocating past the allocation eof.
- */
- if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_swidth
- && (mp->m_flags & XFS_MOUNT_SWALLOC)
- && (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)) && aeof) {
- int eof;
- xfs_fileoff_t new_last_fsb;
-
- new_last_fsb = roundup_64(last_fsb, mp->m_swidth);
- error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
- if (error) {
- return error;
- }
- if (eof) {
- last_fsb = new_last_fsb;
- }
- /*
- * Roundup the allocation request to a stripe unit (m_dalign) boundary
- * if the file size is >= stripe unit size, and we are allocating past
- * the allocation eof.
- */
- } else if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_dalign &&
- (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)) && aeof) {
- int eof;
- xfs_fileoff_t new_last_fsb;
- new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
- error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
- if (error) {
- return error;
- }
- if (eof) {
- last_fsb = new_last_fsb;
- }
- /*
- * Round up the allocation request to a real-time extent boundary
- * if the file is on the real-time subvolume.
- */
- } else if (io->io_flags & XFS_IOCORE_RT && aeof) {
- int eof;
- xfs_fileoff_t new_last_fsb;
-
- new_last_fsb = roundup_64(last_fsb, mp->m_sb.sb_rextsize);
- error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
- if (error) {
+ if (prealloc || extsz) {
+ error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,
+ &last_fsb);
+ if (error)
return error;
- }
- if (eof)
- last_fsb = new_last_fsb;
}
+
+ nimaps = XFS_WRITE_IMAPS;
+ firstblock = NULLFSBLOCK;
error = xfs_bmapi(NULL, ip, offset_fsb,
(xfs_filblks_t)(last_fsb - offset_fsb),
XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
&nimaps, NULL);
- /*
- * This can be EDQUOT, if nimaps == 0
- */
- if (error && (error != ENOSPC)) {
+ if (error && (error != ENOSPC))
return XFS_ERROR(error);
- }
+
/*
* If bmapi returned us nothing, and if we didn't get back EDQUOT,
- * then we must have run out of space.
+ * then we must have run out of space - flush delalloc, and retry..
*/
if (nimaps == 0) {
xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE,
@@ -685,17 +717,21 @@ write_map:
goto retry;
}
- *ret_imap = imap[0];
- *nmaps = 1;
- if ( !(io->io_flags & XFS_IOCORE_RT) && !ret_imap->br_startblock) {
+ if (!(io->io_flags & XFS_IOCORE_RT) && !ret_imap->br_startblock) {
cmn_err(CE_PANIC,"Access to block zero: fs <%s> inode: %lld "
"start_block : %llx start_off : %llx blkcnt : %llx "
"extent-state : %x \n",
(ip->i_mount)->m_fsname,
(long long)ip->i_ino,
- ret_imap->br_startblock, ret_imap->br_startoff,
- ret_imap->br_blockcount,ret_imap->br_state);
+ (unsigned long long)ret_imap->br_startblock,
+ (unsigned long long)ret_imap->br_startoff,
+ (unsigned long long)ret_imap->br_blockcount,
+ ret_imap->br_state);
}
+
+ *ret_imap = imap[0];
+ *nmaps = 1;
+
return 0;
}
@@ -821,17 +857,21 @@ xfs_iomap_write_allocate(
*/
for (i = 0; i < nimaps; i++) {
- if ( !(io->io_flags & XFS_IOCORE_RT) &&
- !imap[i].br_startblock) {
+ if (!(io->io_flags & XFS_IOCORE_RT) &&
+ !imap[i].br_startblock) {
cmn_err(CE_PANIC,"Access to block zero: "
"fs <%s> inode: %lld "
- "start_block : %llx start_off : %llx "
+ "start_block : %llx start_off : %llx "
"blkcnt : %llx extent-state : %x \n",
(ip->i_mount)->m_fsname,
(long long)ip->i_ino,
- imap[i].br_startblock,
- imap[i].br_startoff,
- imap[i].br_blockcount,imap[i].br_state);
+ (unsigned long long)
+ imap[i].br_startblock,
+ (unsigned long long)
+ imap[i].br_startoff,
+ (unsigned long long)
+ imap[i].br_blockcount,
+ imap[i].br_state);
}
if ((offset_fsb >= imap[i].br_startoff) &&
(offset_fsb < (imap[i].br_startoff +
@@ -868,17 +908,17 @@ xfs_iomap_write_unwritten(
{
xfs_mount_t *mp = ip->i_mount;
xfs_iocore_t *io = &ip->i_iocore;
- xfs_trans_t *tp;
xfs_fileoff_t offset_fsb;
xfs_filblks_t count_fsb;
xfs_filblks_t numblks_fsb;
- xfs_bmbt_irec_t imap;
+ xfs_fsblock_t firstfsb;
+ int nimaps;
+ xfs_trans_t *tp;
+ xfs_bmbt_irec_t imap;
+ xfs_bmap_free_t free_list;
+ uint resblks;
int committed;
int error;
- int nres;
- int nimaps;
- xfs_fsblock_t firstfsb;
- xfs_bmap_free_t free_list;
xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN,
&ip->i_iocore, offset, count);
@@ -887,9 +927,9 @@ xfs_iomap_write_unwritten(
count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
- do {
- nres = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+ resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
+ do {
/*
* set up a transaction to convert the range of extents
* from unwritten to real. Do allocations in a loop until
@@ -897,7 +937,7 @@ xfs_iomap_write_unwritten(
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
- error = xfs_trans_reserve(tp, nres,
+ error = xfs_trans_reserve(tp, resblks,
XFS_WRITE_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
@@ -916,7 +956,7 @@ xfs_iomap_write_unwritten(
XFS_BMAP_INIT(&free_list, &firstfsb);
nimaps = 1;
error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
- XFS_BMAPI_WRITE, &firstfsb,
+ XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,
1, &imap, &nimaps, &free_list);
if (error)
goto error_on_bmapi_transaction;
@@ -930,15 +970,17 @@ xfs_iomap_write_unwritten(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
goto error0;
-
+
if ( !(io->io_flags & XFS_IOCORE_RT) && !imap.br_startblock) {
cmn_err(CE_PANIC,"Access to block zero: fs <%s> "
"inode: %lld start_block : %llx start_off : "
"%llx blkcnt : %llx extent-state : %x \n",
(ip->i_mount)->m_fsname,
(long long)ip->i_ino,
- imap.br_startblock,imap.br_startoff,
- imap.br_blockcount,imap.br_state);
+ (unsigned long long)imap.br_startblock,
+ (unsigned long long)imap.br_startoff,
+ (unsigned long long)imap.br_blockcount,
+ imap.br_state);
}
if ((numblks_fsb = imap.br_blockcount) == 0) {
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f63646ead81..c59450e1be4 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -56,6 +56,7 @@ xfs_bulkstat_one_iget(
{
xfs_dinode_core_t *dic; /* dinode core info pointer */
xfs_inode_t *ip; /* incore inode pointer */
+ vnode_t *vp;
int error;
error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, bno);
@@ -72,6 +73,7 @@ xfs_bulkstat_one_iget(
goto out_iput;
}
+ vp = XFS_ITOV(ip);
dic = &ip->i_d;
/* xfs_iget returns the following without needing
@@ -84,8 +86,7 @@ xfs_bulkstat_one_iget(
buf->bs_uid = dic->di_uid;
buf->bs_gid = dic->di_gid;
buf->bs_size = dic->di_size;
- buf->bs_atime.tv_sec = dic->di_atime.t_sec;
- buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
+ vn_atime_to_bstime(vp, &buf->bs_atime);
buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 29af51275ca..3d9a36e7736 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -178,6 +178,83 @@ xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
#define xlog_trace_iclog(iclog,state)
#endif /* XFS_LOG_TRACE */
+
+static void
+xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
+{
+ if (*qp) {
+ tic->t_next = (*qp);
+ tic->t_prev = (*qp)->t_prev;
+ (*qp)->t_prev->t_next = tic;
+ (*qp)->t_prev = tic;
+ } else {
+ tic->t_prev = tic->t_next = tic;
+ *qp = tic;
+ }
+
+ tic->t_flags |= XLOG_TIC_IN_Q;
+}
+
+static void
+xlog_del_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
+{
+ if (tic == tic->t_next) {
+ *qp = NULL;
+ } else {
+ *qp = tic->t_next;
+ tic->t_next->t_prev = tic->t_prev;
+ tic->t_prev->t_next = tic->t_next;
+ }
+
+ tic->t_next = tic->t_prev = NULL;
+ tic->t_flags &= ~XLOG_TIC_IN_Q;
+}
+
+static void
+xlog_grant_sub_space(struct log *log, int bytes)
+{
+ log->l_grant_write_bytes -= bytes;
+ if (log->l_grant_write_bytes < 0) {
+ log->l_grant_write_bytes += log->l_logsize;
+ log->l_grant_write_cycle--;
+ }
+
+ log->l_grant_reserve_bytes -= bytes;
+ if ((log)->l_grant_reserve_bytes < 0) {
+ log->l_grant_reserve_bytes += log->l_logsize;
+ log->l_grant_reserve_cycle--;
+ }
+
+}
+
+static void
+xlog_grant_add_space_write(struct log *log, int bytes)
+{
+ log->l_grant_write_bytes += bytes;
+ if (log->l_grant_write_bytes > log->l_logsize) {
+ log->l_grant_write_bytes -= log->l_logsize;
+ log->l_grant_write_cycle++;
+ }
+}
+
+static void
+xlog_grant_add_space_reserve(struct log *log, int bytes)
+{
+ log->l_grant_reserve_bytes += bytes;
+ if (log->l_grant_reserve_bytes > log->l_logsize) {
+ log->l_grant_reserve_bytes -= log->l_logsize;
+ log->l_grant_reserve_cycle++;
+ }
+}
+
+static inline void
+xlog_grant_add_space(struct log *log, int bytes)
+{
+ xlog_grant_add_space_write(log, bytes);
+ xlog_grant_add_space_reserve(log, bytes);
+}
+
+
/*
* NOTES:
*
@@ -428,7 +505,7 @@ xfs_log_mount(xfs_mount_t *mp,
if (readonly)
vfsp->vfs_flag &= ~VFS_RDONLY;
- error = xlog_recover(mp->m_log, readonly);
+ error = xlog_recover(mp->m_log);
if (readonly)
vfsp->vfs_flag |= VFS_RDONLY;
@@ -1320,8 +1397,7 @@ xlog_sync(xlog_t *log,
/* move grant heads by roundoff in sync */
s = GRANT_LOCK(log);
- XLOG_GRANT_ADD_SPACE(log, roundoff, 'w');
- XLOG_GRANT_ADD_SPACE(log, roundoff, 'r');
+ xlog_grant_add_space(log, roundoff);
GRANT_UNLOCK(log, s);
/* put cycle number in every block */
@@ -1515,7 +1591,6 @@ xlog_state_finish_copy(xlog_t *log,
* print out info relating to regions written which consume
* the reservation
*/
-#if defined(XFS_LOG_RES_DEBUG)
STATIC void
xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
{
@@ -1605,11 +1680,11 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
ticket->t_res_arr_sum, ticket->t_res_o_flow,
ticket->t_res_num_ophdrs, ophdr_spc,
ticket->t_res_arr_sum +
- ticket->t_res_o_flow + ophdr_spc,
+ ticket->t_res_o_flow + ophdr_spc,
ticket->t_res_num);
for (i = 0; i < ticket->t_res_num; i++) {
- uint r_type = ticket->t_res_arr[i].r_type;
+ uint r_type = ticket->t_res_arr[i].r_type;
cmn_err(CE_WARN,
"region[%u]: %s - %u bytes\n",
i,
@@ -1618,9 +1693,6 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
ticket->t_res_arr[i].r_len);
}
}
-#else
-#define xlog_print_tic_res(mp, ticket)
-#endif
/*
* Write some region out to in-core log
@@ -2389,7 +2461,7 @@ xlog_grant_log_space(xlog_t *log,
/* something is already sleeping; insert new transaction at end */
if (log->l_reserve_headq) {
- XLOG_INS_TICKETQ(log->l_reserve_headq, tic);
+ xlog_ins_ticketq(&log->l_reserve_headq, tic);
xlog_trace_loggrant(log, tic,
"xlog_grant_log_space: sleep 1");
/*
@@ -2422,7 +2494,7 @@ redo:
log->l_grant_reserve_bytes);
if (free_bytes < need_bytes) {
if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
- XLOG_INS_TICKETQ(log->l_reserve_headq, tic);
+ xlog_ins_ticketq(&log->l_reserve_headq, tic);
xlog_trace_loggrant(log, tic,
"xlog_grant_log_space: sleep 2");
XFS_STATS_INC(xs_sleep_logspace);
@@ -2439,11 +2511,10 @@ redo:
s = GRANT_LOCK(log);
goto redo;
} else if (tic->t_flags & XLOG_TIC_IN_Q)
- XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
+ xlog_del_ticketq(&log->l_reserve_headq, tic);
/* we've got enough space */
- XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w');
- XLOG_GRANT_ADD_SPACE(log, need_bytes, 'r');
+ xlog_grant_add_space(log, need_bytes);
#ifdef DEBUG
tail_lsn = log->l_tail_lsn;
/*
@@ -2464,7 +2535,7 @@ redo:
error_return:
if (tic->t_flags & XLOG_TIC_IN_Q)
- XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
+ xlog_del_ticketq(&log->l_reserve_headq, tic);
xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret");
/*
* If we are failing, make sure the ticket doesn't have any
@@ -2533,7 +2604,7 @@ xlog_regrant_write_log_space(xlog_t *log,
if (ntic != log->l_write_headq) {
if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
- XLOG_INS_TICKETQ(log->l_write_headq, tic);
+ xlog_ins_ticketq(&log->l_write_headq, tic);
xlog_trace_loggrant(log, tic,
"xlog_regrant_write_log_space: sleep 1");
@@ -2565,7 +2636,7 @@ redo:
log->l_grant_write_bytes);
if (free_bytes < need_bytes) {
if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
- XLOG_INS_TICKETQ(log->l_write_headq, tic);
+ xlog_ins_ticketq(&log->l_write_headq, tic);
XFS_STATS_INC(xs_sleep_logspace);
sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
@@ -2581,9 +2652,10 @@ redo:
s = GRANT_LOCK(log);
goto redo;
} else if (tic->t_flags & XLOG_TIC_IN_Q)
- XLOG_DEL_TICKETQ(log->l_write_headq, tic);
+ xlog_del_ticketq(&log->l_write_headq, tic);
- XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); /* we've got enough space */
+ /* we've got enough space */
+ xlog_grant_add_space_write(log, need_bytes);
#ifdef DEBUG
tail_lsn = log->l_tail_lsn;
if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
@@ -2600,7 +2672,7 @@ redo:
error_return:
if (tic->t_flags & XLOG_TIC_IN_Q)
- XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
+ xlog_del_ticketq(&log->l_reserve_headq, tic);
xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret");
/*
* If we are failing, make sure the ticket doesn't have any
@@ -2633,8 +2705,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
ticket->t_cnt--;
s = GRANT_LOCK(log);
- XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w');
- XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
+ xlog_grant_sub_space(log, ticket->t_curr_res);
ticket->t_curr_res = ticket->t_unit_res;
XLOG_TIC_RESET_RES(ticket);
xlog_trace_loggrant(log, ticket,
@@ -2647,7 +2718,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
return;
}
- XLOG_GRANT_ADD_SPACE(log, ticket->t_unit_res, 'r');
+ xlog_grant_add_space_reserve(log, ticket->t_unit_res);
xlog_trace_loggrant(log, ticket,
"xlog_regrant_reserve_log_space: exit");
xlog_verify_grant_head(log, 0);
@@ -2683,8 +2754,7 @@ xlog_ungrant_log_space(xlog_t *log,
s = GRANT_LOCK(log);
xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter");
- XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w');
- XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
+ xlog_grant_sub_space(log, ticket->t_curr_res);
xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current");
@@ -2693,8 +2763,7 @@ xlog_ungrant_log_space(xlog_t *log,
*/
if (ticket->t_cnt > 0) {
ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
- XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'w');
- XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'r');
+ xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
}
xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit");
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 158829ca56f..4b2ac88dbb8 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -30,13 +30,7 @@
* By comparing each compnent, we don't have to worry about extra
* endian issues in treating two 32 bit numbers as one 64 bit number
*/
-static
-#if defined(__GNUC__) && (__GNUC__ == 2) && ( (__GNUC_MINOR__ == 95) || (__GNUC_MINOR__ == 96))
-__attribute__((unused)) /* gcc 2.95, 2.96 miscompile this when inlined */
-#else
-__inline__
-#endif
-xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
+static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
{
if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
@@ -102,7 +96,6 @@ xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
/* Region types for iovec's i_type */
-#if defined(XFS_LOG_RES_DEBUG)
#define XLOG_REG_TYPE_BFORMAT 1
#define XLOG_REG_TYPE_BCHUNK 2
#define XLOG_REG_TYPE_EFI_FORMAT 3
@@ -123,21 +116,13 @@ xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
#define XLOG_REG_TYPE_COMMIT 18
#define XLOG_REG_TYPE_TRANSHDR 19
#define XLOG_REG_TYPE_MAX 19
-#endif
-#if defined(XFS_LOG_RES_DEBUG)
#define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t))
-#else
-#define XLOG_VEC_SET_TYPE(vecp, t)
-#endif
-
typedef struct xfs_log_iovec {
xfs_caddr_t i_addr; /* beginning address of region */
int i_len; /* length in bytes of region */
-#if defined(XFS_LOG_RES_DEBUG)
- uint i_type; /* type of region */
-#endif
+ uint i_type; /* type of region */
} xfs_log_iovec_t;
typedef void* xfs_log_ticket_t;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 4518b188ade..34bcbf50789 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -253,7 +253,6 @@ typedef __uint32_t xlog_tid_t;
/* Ticket reservation region accounting */
-#if defined(XFS_LOG_RES_DEBUG)
#define XLOG_TIC_LEN_MAX 15
#define XLOG_TIC_RESET_RES(t) ((t)->t_res_num = \
(t)->t_res_arr_sum = (t)->t_res_num_ophdrs = 0)
@@ -278,15 +277,9 @@ typedef __uint32_t xlog_tid_t;
* we don't care about.
*/
typedef struct xlog_res {
- uint r_len;
- uint r_type;
+ uint r_len; /* region length :4 */
+ uint r_type; /* region's transaction type :4 */
} xlog_res_t;
-#else
-#define XLOG_TIC_RESET_RES(t)
-#define XLOG_TIC_ADD_OPHDR(t)
-#define XLOG_TIC_ADD_REGION(t, len, type)
-#endif
-
typedef struct xlog_ticket {
sv_t t_sema; /* sleep on this semaphore : 20 */
@@ -301,14 +294,12 @@ typedef struct xlog_ticket {
char t_flags; /* properties of reservation : 1 */
uint t_trans_type; /* transaction type : 4 */
-#if defined (XFS_LOG_RES_DEBUG)
/* reservation array fields */
uint t_res_num; /* num in array : 4 */
- xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : X */
uint t_res_num_ophdrs; /* num op hdrs : 4 */
uint t_res_arr_sum; /* array sum : 4 */
uint t_res_o_flow; /* sum overflow : 4 */
-#endif
+ xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : 8 * 15 */
} xlog_ticket_t;
#endif
@@ -494,71 +485,13 @@ typedef struct log {
#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
-#define XLOG_GRANT_SUB_SPACE(log,bytes,type) \
- { \
- if (type == 'w') { \
- (log)->l_grant_write_bytes -= (bytes); \
- if ((log)->l_grant_write_bytes < 0) { \
- (log)->l_grant_write_bytes += (log)->l_logsize; \
- (log)->l_grant_write_cycle--; \
- } \
- } else { \
- (log)->l_grant_reserve_bytes -= (bytes); \
- if ((log)->l_grant_reserve_bytes < 0) { \
- (log)->l_grant_reserve_bytes += (log)->l_logsize;\
- (log)->l_grant_reserve_cycle--; \
- } \
- } \
- }
-#define XLOG_GRANT_ADD_SPACE(log,bytes,type) \
- { \
- if (type == 'w') { \
- (log)->l_grant_write_bytes += (bytes); \
- if ((log)->l_grant_write_bytes > (log)->l_logsize) { \
- (log)->l_grant_write_bytes -= (log)->l_logsize; \
- (log)->l_grant_write_cycle++; \
- } \
- } else { \
- (log)->l_grant_reserve_bytes += (bytes); \
- if ((log)->l_grant_reserve_bytes > (log)->l_logsize) { \
- (log)->l_grant_reserve_bytes -= (log)->l_logsize;\
- (log)->l_grant_reserve_cycle++; \
- } \
- } \
- }
-#define XLOG_INS_TICKETQ(q, tic) \
- { \
- if (q) { \
- (tic)->t_next = (q); \
- (tic)->t_prev = (q)->t_prev; \
- (q)->t_prev->t_next = (tic); \
- (q)->t_prev = (tic); \
- } else { \
- (tic)->t_prev = (tic)->t_next = (tic); \
- (q) = (tic); \
- } \
- (tic)->t_flags |= XLOG_TIC_IN_Q; \
- }
-#define XLOG_DEL_TICKETQ(q, tic) \
- { \
- if ((tic) == (tic)->t_next) { \
- (q) = NULL; \
- } else { \
- (q) = (tic)->t_next; \
- (tic)->t_next->t_prev = (tic)->t_prev; \
- (tic)->t_prev->t_next = (tic)->t_next; \
- } \
- (tic)->t_next = (tic)->t_prev = NULL; \
- (tic)->t_flags &= ~XLOG_TIC_IN_Q; \
- }
/* common routines */
extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
extern int xlog_find_tail(xlog_t *log,
xfs_daddr_t *head_blk,
- xfs_daddr_t *tail_blk,
- int readonly);
-extern int xlog_recover(xlog_t *log, int readonly);
+ xfs_daddr_t *tail_blk);
+extern int xlog_recover(xlog_t *log);
extern int xlog_recover_finish(xlog_t *log, int mfsi_flags);
extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
extern void xlog_recover_process_iunlinks(xlog_t *log);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 8ab7df76806..7d46cbd6a07 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -783,8 +783,7 @@ int
xlog_find_tail(
xlog_t *log,
xfs_daddr_t *head_blk,
- xfs_daddr_t *tail_blk,
- int readonly)
+ xfs_daddr_t *tail_blk)
{
xlog_rec_header_t *rhead;
xlog_op_header_t *op_head;
@@ -2563,10 +2562,12 @@ xlog_recover_do_quotaoff_trans(
/*
* The logitem format's flag tells us if this was user quotaoff,
- * group quotaoff or both.
+ * group/project quotaoff or both.
*/
if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
log->l_quotaoffs_flag |= XFS_DQ_USER;
+ if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
+ log->l_quotaoffs_flag |= XFS_DQ_PROJ;
if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
log->l_quotaoffs_flag |= XFS_DQ_GROUP;
@@ -3890,14 +3891,13 @@ xlog_do_recover(
*/
int
xlog_recover(
- xlog_t *log,
- int readonly)
+ xlog_t *log)
{
xfs_daddr_t head_blk, tail_blk;
int error;
/* find the tail of the log */
- if ((error = xlog_find_tail(log, &head_blk, &tail_blk, readonly)))
+ if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
return error;
if (tail_blk != head_blk) {
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 541d5dd474b..6088e14f84e 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -51,7 +51,7 @@ STATIC int xfs_uuid_mount(xfs_mount_t *);
STATIC void xfs_uuid_unmount(xfs_mount_t *mp);
STATIC void xfs_unmountfs_wait(xfs_mount_t *);
-static struct {
+static const struct {
short offset;
short type; /* 0 = integer
* 1 = binary / string (no translation)
@@ -117,7 +117,7 @@ xfs_mount_init(void)
AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail");
spinlock_init(&mp->m_sb_lock, "xfs_sb");
- mutex_init(&mp->m_ilock, MUTEX_DEFAULT, "xfs_ilock");
+ mutex_init(&mp->m_ilock);
initnsema(&mp->m_growlock, 1, "xfs_grow");
/*
* Initialize the AIL.
@@ -1077,8 +1077,7 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
xfs_iflush_all(mp);
- XFS_QM_DQPURGEALL(mp,
- XFS_QMOPT_UQUOTA | XFS_QMOPT_GQUOTA | XFS_QMOPT_UMOUNTING);
+ XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
/*
* Flush out the log synchronously so that we know for sure
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 08b2e0a5d80..cd3cf9613a0 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -308,7 +308,6 @@ typedef struct xfs_mount {
xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
-#define m_dev m_ddev_targp->pbr_dev
__uint8_t m_dircook_elog; /* log d-cookie entry bits */
__uint8_t m_blkbit_log; /* blocklog + NBBY */
__uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
@@ -393,7 +392,7 @@ typedef struct xfs_mount {
user */
#define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment
allocations */
-#define XFS_MOUNT_COMPAT_ATTR (1ULL << 8) /* do not use attr2 format */
+#define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */
/* (1ULL << 9) -- currently unused */
#define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */
#define XFS_MOUNT_SHARED (1ULL << 11) /* shared mount */
@@ -533,7 +532,7 @@ typedef struct xfs_mod_sb {
int msb_delta; /* Change to make to specified field */
} xfs_mod_sb_t;
-#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock), PINOD)
+#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock))
#define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock))
#define XFS_SB_LOCK(mp) mutex_spinlock(&(mp)->m_sb_lock)
#define XFS_SB_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_sb_lock,(s))
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index 4d4e8f4e768..81a05cfd77d 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -243,7 +243,6 @@ xfs_rename(
xfs_inode_t *inodes[4];
int target_ip_dropped = 0; /* dropped target_ip link? */
vnode_t *src_dir_vp;
- bhv_desc_t *target_dir_bdp;
int spaceres;
int target_link_zero = 0;
int num_inodes;
@@ -260,14 +259,12 @@ xfs_rename(
* Find the XFS behavior descriptor for the target directory
* vnode since it was not handed to us.
*/
- target_dir_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(target_dir_vp),
- &xfs_vnodeops);
- if (target_dir_bdp == NULL) {
+ target_dp = xfs_vtoi(target_dir_vp);
+ if (target_dp == NULL) {
return XFS_ERROR(EXDEV);
}
src_dp = XFS_BHVTOI(src_dir_bdp);
- target_dp = XFS_BHVTOI(target_dir_bdp);
mp = src_dp->i_mount;
if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_RENAME) ||
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index c4b20872f07..a59c102cf21 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -238,6 +238,7 @@ xfs_bioerror_relse(
}
return (EIO);
}
+
/*
* Prints out an ALERT message about I/O error.
*/
@@ -252,11 +253,9 @@ xfs_ioerror_alert(
"I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx"
" (\"%s\") error %d buf count %zd",
(!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname,
- XFS_BUFTARG_NAME(bp->pb_target),
- (__uint64_t)blkno,
- func,
- XFS_BUF_GETERROR(bp),
- XFS_BUF_COUNT(bp));
+ XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
+ (__uint64_t)blkno, func,
+ XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp));
}
/*
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h
index 4a17d335f89..bf168a91ddb 100644
--- a/fs/xfs/xfs_sb.h
+++ b/fs/xfs/xfs_sb.h
@@ -68,18 +68,6 @@ struct xfs_mount;
(XFS_SB_VERSION_NUMBITS | \
XFS_SB_VERSION_OKREALFBITS | \
XFS_SB_VERSION_OKSASHFBITS)
-#define XFS_SB_VERSION_MKFS(ia,dia,extflag,dirv2,na,sflag,morebits) \
- (((ia) || (dia) || (extflag) || (dirv2) || (na) || (sflag) || \
- (morebits)) ? \
- (XFS_SB_VERSION_4 | \
- ((ia) ? XFS_SB_VERSION_ALIGNBIT : 0) | \
- ((dia) ? XFS_SB_VERSION_DALIGNBIT : 0) | \
- ((extflag) ? XFS_SB_VERSION_EXTFLGBIT : 0) | \
- ((dirv2) ? XFS_SB_VERSION_DIRV2BIT : 0) | \
- ((na) ? XFS_SB_VERSION_LOGV2BIT : 0) | \
- ((sflag) ? XFS_SB_VERSION_SECTORBIT : 0) | \
- ((morebits) ? XFS_SB_VERSION_MOREBITSBIT : 0)) : \
- XFS_SB_VERSION_1)
/*
* There are two words to hold XFS "feature" bits: the original
@@ -105,11 +93,6 @@ struct xfs_mount;
(XFS_SB_VERSION2_OKREALFBITS | \
XFS_SB_VERSION2_OKSASHFBITS )
-/*
- * mkfs macro to set up sb_features2 word
- */
-#define XFS_SB_VERSION2_MKFS(resvd1, sbcntr) 0
-
typedef struct xfs_sb
{
__uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 279e043d732..d3d714e6b32 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1014,6 +1014,7 @@ xfs_trans_cancel(
xfs_log_item_t *lip;
int i;
#endif
+ xfs_mount_t *mp = tp->t_mountp;
/*
* See if the caller is being too lazy to figure out if
@@ -1026,9 +1027,10 @@ xfs_trans_cancel(
* filesystem. This happens in paths where we detect
* corruption and decide to give up.
*/
- if ((tp->t_flags & XFS_TRANS_DIRTY) &&
- !XFS_FORCED_SHUTDOWN(tp->t_mountp))
- xfs_force_shutdown(tp->t_mountp, XFS_CORRUPT_INCORE);
+ if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) {
+ XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
+ xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
+ }
#ifdef DEBUG
if (!(flags & XFS_TRANS_ABORT)) {
licp = &(tp->t_items);
@@ -1040,7 +1042,7 @@ xfs_trans_cancel(
}
lip = lidp->lid_item;
- if (!XFS_FORCED_SHUTDOWN(tp->t_mountp))
+ if (!XFS_FORCED_SHUTDOWN(mp))
ASSERT(!(lip->li_type == XFS_LI_EFD));
}
licp = licp->lic_next;
@@ -1048,7 +1050,7 @@ xfs_trans_cancel(
}
#endif
xfs_trans_unreserve_and_mod_sb(tp);
- XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp);
+ XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp);
if (tp->t_ticket) {
if (flags & XFS_TRANS_RELEASE_LOG_RES) {
@@ -1057,7 +1059,7 @@ xfs_trans_cancel(
} else {
log_flags = 0;
}
- xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags);
+ xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
}
/* mark this thread as no longer being in a transaction */
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index a889963fdd1..d77901c07f6 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -973,7 +973,6 @@ void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
-void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index fefe1d60377..34654ec6ae1 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -55,16 +55,13 @@ xfs_get_dir_entry(
xfs_inode_t **ipp)
{
vnode_t *vp;
- bhv_desc_t *bdp;
vp = VNAME_TO_VNODE(dentry);
- bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops);
- if (!bdp) {
- *ipp = NULL;
+
+ *ipp = xfs_vtoi(vp);
+ if (!*ipp)
return XFS_ERROR(ENOENT);
- }
VN_HOLD(vp);
- *ipp = XFS_BHVTOI(bdp);
return 0;
}
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 7bdbd991ab1..b6ad370fab3 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -53,6 +53,7 @@
#include "xfs_acl.h"
#include "xfs_attr.h"
#include "xfs_clnt.h"
+#include "xfs_fsops.h"
STATIC int xfs_sync(bhv_desc_t *, int, cred_t *);
@@ -290,8 +291,8 @@ xfs_start_flags(
mp->m_flags |= XFS_MOUNT_IDELETE;
if (ap->flags & XFSMNT_DIRSYNC)
mp->m_flags |= XFS_MOUNT_DIRSYNC;
- if (ap->flags & XFSMNT_COMPAT_ATTR)
- mp->m_flags |= XFS_MOUNT_COMPAT_ATTR;
+ if (ap->flags & XFSMNT_ATTR2)
+ mp->m_flags |= XFS_MOUNT_ATTR2;
if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE)
mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
@@ -312,6 +313,8 @@ xfs_start_flags(
mp->m_flags |= XFS_MOUNT_NOUUID;
if (ap->flags & XFSMNT_BARRIER)
mp->m_flags |= XFS_MOUNT_BARRIER;
+ else
+ mp->m_flags &= ~XFS_MOUNT_BARRIER;
return 0;
}
@@ -330,10 +333,11 @@ xfs_finish_flags(
/* Fail a mount where the logbuf is smaller then the log stripe */
if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) {
- if ((ap->logbufsize == -1) &&
+ if ((ap->logbufsize <= 0) &&
(mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) {
mp->m_logbsize = mp->m_sb.sb_logsunit;
- } else if (ap->logbufsize < mp->m_sb.sb_logsunit) {
+ } else if (ap->logbufsize > 0 &&
+ ap->logbufsize < mp->m_sb.sb_logsunit) {
cmn_err(CE_WARN,
"XFS: logbuf size must be greater than or equal to log stripe size");
return XFS_ERROR(EINVAL);
@@ -347,6 +351,10 @@ xfs_finish_flags(
}
}
+ if (XFS_SB_VERSION_HASATTR2(&mp->m_sb)) {
+ mp->m_flags |= XFS_MOUNT_ATTR2;
+ }
+
/*
* prohibit r/w mounts of read-only filesystems
*/
@@ -382,10 +390,6 @@ xfs_finish_flags(
return XFS_ERROR(EINVAL);
}
- if (XFS_SB_VERSION_HASATTR2(&mp->m_sb)) {
- mp->m_flags &= ~XFS_MOUNT_COMPAT_ATTR;
- }
-
return 0;
}
@@ -504,13 +508,13 @@ xfs_mount(
if (error)
goto error2;
+ if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY))
+ xfs_mountfs_check_barriers(mp);
+
error = XFS_IOINIT(vfsp, args, flags);
if (error)
goto error2;
- if ((args->flags & XFSMNT_BARRIER) &&
- !(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY))
- xfs_mountfs_check_barriers(mp);
return 0;
error2:
@@ -655,6 +659,11 @@ xfs_mntupdate(
else
mp->m_flags &= ~XFS_MOUNT_NOATIME;
+ if (args->flags & XFSMNT_BARRIER)
+ mp->m_flags |= XFS_MOUNT_BARRIER;
+ else
+ mp->m_flags &= ~XFS_MOUNT_BARRIER;
+
if ((vfsp->vfs_flag & VFS_RDONLY) &&
!(*flags & MS_RDONLY)) {
vfsp->vfs_flag &= ~VFS_RDONLY;
@@ -1634,6 +1643,7 @@ xfs_vget(
#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
* unwritten extent conversion */
+#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
@@ -1680,7 +1690,6 @@ xfs_parseargs(
int iosize;
args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
- args->flags |= XFSMNT_COMPAT_ATTR;
#if 0 /* XXX: off by default, until some remaining issues ironed out */
args->flags |= XFSMNT_IDELETE; /* default to on */
@@ -1806,6 +1815,8 @@ xfs_parseargs(
args->flags |= XFSMNT_NOUUID;
} else if (!strcmp(this_char, MNTOPT_BARRIER)) {
args->flags |= XFSMNT_BARRIER;
+ } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
+ args->flags &= ~XFSMNT_BARRIER;
} else if (!strcmp(this_char, MNTOPT_IKEEP)) {
args->flags &= ~XFSMNT_IDELETE;
} else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
@@ -1815,9 +1826,9 @@ xfs_parseargs(
} else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
} else if (!strcmp(this_char, MNTOPT_ATTR2)) {
- args->flags &= ~XFSMNT_COMPAT_ATTR;
+ args->flags |= XFSMNT_ATTR2;
} else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
- args->flags |= XFSMNT_COMPAT_ATTR;
+ args->flags &= ~XFSMNT_ATTR2;
} else if (!strcmp(this_char, "osyncisdsync")) {
/* no-op, this is now the default */
printk("XFS: osyncisdsync is now the default, option is deprecated.\n");
@@ -1892,7 +1903,6 @@ xfs_showargs(
{ XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
{ XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
{ XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC },
- { XFS_MOUNT_BARRIER, "," MNTOPT_BARRIER },
{ XFS_MOUNT_IDELETE, "," MNTOPT_NOIKEEP },
{ 0, NULL }
};
@@ -1914,33 +1924,28 @@ xfs_showargs(
if (mp->m_logbufs > 0)
seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
-
if (mp->m_logbsize > 0)
seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
if (mp->m_logname)
seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
-
if (mp->m_rtname)
seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
if (mp->m_dalign > 0)
seq_printf(m, "," MNTOPT_SUNIT "=%d",
(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
-
if (mp->m_swidth > 0)
seq_printf(m, "," MNTOPT_SWIDTH "=%d",
(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
- if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR))
- seq_printf(m, "," MNTOPT_ATTR2);
-
if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE))
seq_printf(m, "," MNTOPT_LARGEIO);
+ if (mp->m_flags & XFS_MOUNT_BARRIER)
+ seq_printf(m, "," MNTOPT_BARRIER);
if (!(vfsp->vfs_flag & VFS_32BITINODES))
seq_printf(m, "," MNTOPT_64BITINODE);
-
if (vfsp->vfs_flag & VFS_GRPID)
seq_printf(m, "," MNTOPT_GRPID);
@@ -1959,6 +1964,7 @@ xfs_freeze(
/* Push the superblock and write an unmount record */
xfs_log_unmount_write(mp);
xfs_unmountfs_writesb(mp);
+ xfs_fs_log_dummy(mp);
}
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index e03fa2a3d5e..8076cc981e1 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -15,6 +15,9 @@
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+
+#include <linux/capability.h>
+
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
@@ -182,8 +185,7 @@ xfs_getattr(
break;
}
- vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec;
- vap->va_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
+ vn_atime_to_timespec(vp, &vap->va_atime);
vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
@@ -541,24 +543,6 @@ xfs_setattr(
}
/*
- * Can't set extent size unless the file is marked, or
- * about to be marked as a realtime file.
- *
- * This check will be removed when fixed size extents
- * with buffered data writes is implemented.
- *
- */
- if ((mask & XFS_AT_EXTSIZE) &&
- ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
- vap->va_extsize) &&
- (!((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ||
- ((mask & XFS_AT_XFLAGS) &&
- (vap->va_xflags & XFS_XFLAG_REALTIME))))) {
- code = XFS_ERROR(EINVAL);
- goto error_return;
- }
-
- /*
* Can't change realtime flag if any extents are allocated.
*/
if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
@@ -820,13 +804,17 @@ xfs_setattr(
di_flags |= XFS_DIFLAG_RTINHERIT;
if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS)
di_flags |= XFS_DIFLAG_NOSYMLINKS;
- } else {
+ if (vap->va_xflags & XFS_XFLAG_EXTSZINHERIT)
+ di_flags |= XFS_DIFLAG_EXTSZINHERIT;
+ } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
if (vap->va_xflags & XFS_XFLAG_REALTIME) {
di_flags |= XFS_DIFLAG_REALTIME;
ip->i_iocore.io_flags |= XFS_IOCORE_RT;
} else {
ip->i_iocore.io_flags &= ~XFS_IOCORE_RT;
}
+ if (vap->va_xflags & XFS_XFLAG_EXTSIZE)
+ di_flags |= XFS_DIFLAG_EXTSIZE;
}
ip->i_d.di_flags = di_flags;
}
@@ -996,10 +984,6 @@ xfs_readlink(
goto error_return;
}
- if (!(ioflags & IO_INVIS)) {
- xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
- }
-
/*
* See if the symlink is stored inline.
*/
@@ -1231,7 +1215,8 @@ xfs_inactive_free_eofblocks(
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!error && (nimaps != 0) &&
- (imap.br_startblock != HOLESTARTBLOCK)) {
+ (imap.br_startblock != HOLESTARTBLOCK ||
+ ip->i_delayed_blks)) {
/*
* Attach the dquots to the inode up front.
*/
@@ -1566,9 +1551,11 @@ xfs_release(
if (ip->i_d.di_nlink != 0) {
if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
- ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) &&
+ ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 ||
+ ip->i_delayed_blks > 0)) &&
(ip->i_df.if_flags & XFS_IFEXTENTS)) &&
- (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)))) {
+ (!(ip->i_d.di_flags &
+ (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
if ((error = xfs_inactive_free_eofblocks(mp, ip)))
return (error);
/* Update linux inode block count after free above */
@@ -1625,7 +1612,8 @@ xfs_inactive(
* only one with a reference to the inode.
*/
truncate = ((ip->i_d.di_nlink == 0) &&
- ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0)) &&
+ ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0) ||
+ (ip->i_delayed_blks > 0)) &&
((ip->i_d.di_mode & S_IFMT) == S_IFREG));
mp = ip->i_mount;
@@ -1643,10 +1631,12 @@ xfs_inactive(
if (ip->i_d.di_nlink != 0) {
if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
- ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) &&
- (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
- (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)) ||
- (ip->i_delayed_blks != 0))) {
+ ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 ||
+ ip->i_delayed_blks > 0)) &&
+ (ip->i_df.if_flags & XFS_IFEXTENTS) &&
+ (!(ip->i_d.di_flags &
+ (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
+ (ip->i_delayed_blks != 0)))) {
if ((error = xfs_inactive_free_eofblocks(mp, ip)))
return (VN_INACTIVE_CACHE);
/* Update linux inode block count after free above */
@@ -2590,7 +2580,6 @@ xfs_link(
int cancel_flags;
int committed;
vnode_t *target_dir_vp;
- bhv_desc_t *src_bdp;
int resblks;
char *target_name = VNAME(dentry);
int target_namelen;
@@ -2603,8 +2592,7 @@ xfs_link(
if (VN_ISDIR(src_vp))
return XFS_ERROR(EPERM);
- src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops);
- sip = XFS_BHVTOI(src_bdp);
+ sip = xfs_vtoi(src_vp);
tdp = XFS_BHVTOI(target_dir_bdp);
mp = tdp->i_mount;
if (XFS_FORCED_SHUTDOWN(mp))
@@ -3237,7 +3225,6 @@ xfs_readdir(
xfs_trans_t *tp = NULL;
int error = 0;
uint lock_mode;
- xfs_off_t start_offset;
vn_trace_entry(BHV_TO_VNODE(dir_bdp), __FUNCTION__,
(inst_t *)__return_address);
@@ -3248,11 +3235,7 @@ xfs_readdir(
}
lock_mode = xfs_ilock_map_shared(dp);
- start_offset = uiop->uio_offset;
error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp);
- if (start_offset != uiop->uio_offset) {
- xfs_ichgtime(dp, XFS_ICHGTIME_ACC);
- }
xfs_iunlock_map_shared(dp, lock_mode);
return error;
}
@@ -3829,7 +3812,12 @@ xfs_reclaim(
vn_iowait(vp);
ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
- ASSERT(VN_CACHED(vp) == 0);
+
+ /*
+ * Make sure the atime in the XFS inode is correct before freeing the
+ * Linux inode.
+ */
+ xfs_synchronize_atime(ip);
/* If we have nothing to flush with this inode then complete the
* teardown now, otherwise break the link between the xfs inode
@@ -3999,42 +3987,36 @@ xfs_alloc_file_space(
int alloc_type,
int attr_flags)
{
+ xfs_mount_t *mp = ip->i_mount;
+ xfs_off_t count;
xfs_filblks_t allocated_fsb;
xfs_filblks_t allocatesize_fsb;
- int committed;
- xfs_off_t count;
- xfs_filblks_t datablocks;
- int error;
+ xfs_extlen_t extsz, temp;
+ xfs_fileoff_t startoffset_fsb;
xfs_fsblock_t firstfsb;
- xfs_bmap_free_t free_list;
- xfs_bmbt_irec_t *imapp;
- xfs_bmbt_irec_t imaps[1];
- xfs_mount_t *mp;
- int numrtextents;
- int reccount;
- uint resblks;
+ int nimaps;
+ int bmapi_flag;
+ int quota_flag;
int rt;
- int rtextsize;
- xfs_fileoff_t startoffset_fsb;
xfs_trans_t *tp;
- int xfs_bmapi_flags;
+ xfs_bmbt_irec_t imaps[1], *imapp;
+ xfs_bmap_free_t free_list;
+ uint qblocks, resblks, resrtextents;
+ int committed;
+ int error;
vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
- mp = ip->i_mount;
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- /*
- * determine if this is a realtime file
- */
- if ((rt = XFS_IS_REALTIME_INODE(ip)) != 0) {
- if (ip->i_d.di_extsize)
- rtextsize = ip->i_d.di_extsize;
- else
- rtextsize = mp->m_sb.sb_rextsize;
- } else
- rtextsize = 0;
+ rt = XFS_IS_REALTIME_INODE(ip);
+ if (unlikely(rt)) {
+ if (!(extsz = ip->i_d.di_extsize))
+ extsz = mp->m_sb.sb_rextsize;
+ } else {
+ extsz = ip->i_d.di_extsize;
+ }
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
return error;
@@ -4045,8 +4027,8 @@ xfs_alloc_file_space(
count = len;
error = 0;
imapp = &imaps[0];
- reccount = 1;
- xfs_bmapi_flags = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0);
+ nimaps = 1;
+ bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0);
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
allocatesize_fsb = XFS_B_TO_FSB(mp, count);
@@ -4067,43 +4049,51 @@ xfs_alloc_file_space(
}
/*
- * allocate file space until done or until there is an error
+ * Allocate file space until done or until there is an error
*/
retry:
while (allocatesize_fsb && !error) {
+ xfs_fileoff_t s, e;
+
/*
- * determine if reserving space on
- * the data or realtime partition.
+ * Determine space reservations for data/realtime.
*/
- if (rt) {
- xfs_fileoff_t s, e;
-
+ if (unlikely(extsz)) {
s = startoffset_fsb;
- do_div(s, rtextsize);
- s *= rtextsize;
- e = roundup_64(startoffset_fsb + allocatesize_fsb,
- rtextsize);
- numrtextents = (int)(e - s) / mp->m_sb.sb_rextsize;
- datablocks = 0;
+ do_div(s, extsz);
+ s *= extsz;
+ e = startoffset_fsb + allocatesize_fsb;
+ if ((temp = do_mod(startoffset_fsb, extsz)))
+ e += temp;
+ if ((temp = do_mod(e, extsz)))
+ e += extsz - temp;
} else {
- datablocks = allocatesize_fsb;
- numrtextents = 0;
+ s = 0;
+ e = allocatesize_fsb;
+ }
+
+ if (unlikely(rt)) {
+ resrtextents = qblocks = (uint)(e - s);
+ resrtextents /= mp->m_sb.sb_rextsize;
+ resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+ quota_flag = XFS_QMOPT_RES_RTBLKS;
+ } else {
+ resrtextents = 0;
+ resblks = qblocks = \
+ XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s));
+ quota_flag = XFS_QMOPT_RES_REGBLKS;
}
/*
- * allocate and setup the transaction
+ * Allocate and setup the transaction.
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
- resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
- error = xfs_trans_reserve(tp,
- resblks,
- XFS_WRITE_LOG_RES(mp),
- numrtextents,
+ error = xfs_trans_reserve(tp, resblks,
+ XFS_WRITE_LOG_RES(mp), resrtextents,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
-
/*
- * check for running out of space
+ * Check for running out of space
*/
if (error) {
/*
@@ -4114,8 +4104,8 @@ retry:
break;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = XFS_TRANS_RESERVE_QUOTA(mp, tp,
- ip->i_udquot, ip->i_gdquot, resblks, 0, 0);
+ error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,
+ qblocks, 0, quota_flag);
if (error)
goto error1;
@@ -4123,19 +4113,19 @@ retry:
xfs_trans_ihold(tp, ip);
/*
- * issue the bmapi() call to allocate the blocks
+ * Issue the xfs_bmapi() call to allocate the blocks
*/
XFS_BMAP_INIT(&free_list, &firstfsb);
error = xfs_bmapi(tp, ip, startoffset_fsb,
- allocatesize_fsb, xfs_bmapi_flags,
- &firstfsb, 0, imapp, &reccount,
+ allocatesize_fsb, bmapi_flag,
+ &firstfsb, 0, imapp, &nimaps,
&free_list);
if (error) {
goto error0;
}
/*
- * complete the transaction
+ * Complete the transaction
*/
error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
if (error) {
@@ -4150,7 +4140,7 @@ retry:
allocated_fsb = imapp->br_blockcount;
- if (reccount == 0) {
+ if (nimaps == 0) {
error = XFS_ERROR(ENOSPC);
break;
}
@@ -4173,9 +4163,11 @@ dmapi_enospc_check:
return error;
- error0:
+error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
xfs_bmap_cancel(&free_list);
- error1:
+ XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag);
+
+error1: /* Just cancel transaction */
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
goto dmapi_enospc_check;
@@ -4420,8 +4412,8 @@ xfs_free_file_space(
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = XFS_TRANS_RESERVE_QUOTA(mp, tp,
- ip->i_udquot, ip->i_gdquot, resblks, 0, rt ?
- XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+ ip->i_udquot, ip->i_gdquot, resblks, 0,
+ XFS_QMOPT_RES_REGBLKS);
if (error)
goto error1;
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index 6183eab006d..fc77f741308 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -176,6 +176,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
@@ -216,4 +217,5 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
+#include <asm-generic/atomic.h>
#endif /* _ALPHA_ATOMIC_H */
diff --git a/include/asm-alpha/cache.h b/include/asm-alpha/cache.h
index e69b29501a5..e6d4d1695e2 100644
--- a/include/asm-alpha/cache.h
+++ b/include/asm-alpha/cache.h
@@ -20,6 +20,5 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define L1_CACHE_SHIFT_MAX L1_CACHE_SHIFT
#endif
diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h
index 0a4a8b40dfc..00c6f57ad9a 100644
--- a/include/asm-alpha/compiler.h
+++ b/include/asm-alpha/compiler.h
@@ -98,9 +98,7 @@
#undef inline
#undef __inline__
#undef __inline
-#if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3
#undef __always_inline
#define __always_inline inline __attribute__((always_inline))
-#endif
#endif /* __ALPHA_COMPILER_H */
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
index 680f7ecbb28..9dc7256cf97 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/include/asm-alpha/dma-mapping.h
@@ -16,7 +16,7 @@
#define dma_free_coherent(dev, size, va, addr) \
pci_free_consistent(alpha_gendev_to_pci(dev), size, va, addr)
#define dma_map_page(dev, page, off, size, dir) \
- pci_map_single(alpha_gendev_to_pci(dev), page, off, size, dir)
+ pci_map_page(alpha_gendev_to_pci(dev), page, off, size, dir)
#define dma_unmap_page(dev, addr, size, dir) \
pci_unmap_page(alpha_gendev_to_pci(dev), addr, size, dir)
#define dma_map_sg(dev, sg, nents, dir) \
diff --git a/include/asm-alpha/futex.h b/include/asm-alpha/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-alpha/futex.h
+++ b/include/asm-alpha/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
index c0593f9b21e..7bb6a36c96a 100644
--- a/include/asm-alpha/hardirq.h
+++ b/include/asm-alpha/hardirq.h
@@ -13,6 +13,8 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+void ack_bad_irq(unsigned int irq);
+
#define HARDIRQ_BITS 12
/*
diff --git a/include/asm-alpha/mman.h b/include/asm-alpha/mman.h
index eb9c279045e..f6439532a26 100644
--- a/include/asm-alpha/mman.h
+++ b/include/asm-alpha/mman.h
@@ -42,6 +42,7 @@
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_SPACEAVAIL 5 /* ensure resources are available */
#define MADV_DONTNEED 6 /* don't need these pages */
+#define MADV_REMOVE 7 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index a714d0cdc20..6f92482cc96 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -156,7 +156,7 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
/* Always update the PCB ASN. Another thread may have allocated
a new mm->context (via flush_tlb_mm) without the ASN serial
number wrapping. We have no way to detect when this is needed. */
- next->thread_info->pcb.asn = mmc & HARDWARE_ASN_MASK;
+ task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
}
__EXTERN_INLINE void
@@ -235,7 +235,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
if (cpu_online(i))
mm->context[i] = 0;
if (tsk != current)
- tsk->thread_info->pcb.ptbr
+ task_thread_info(tsk)->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
return 0;
}
@@ -249,7 +249,7 @@ destroy_context(struct mm_struct *mm)
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
- tsk->thread_info->pcb.ptbr
+ task_thread_info(tsk)->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
}
diff --git a/include/asm-alpha/mutex.h b/include/asm-alpha/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-alpha/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
index 059780a7d3d..425b7b6d28c 100644
--- a/include/asm-alpha/processor.h
+++ b/include/asm-alpha/processor.h
@@ -52,19 +52,10 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
unsigned long get_wchan(struct task_struct *p);
-/* See arch/alpha/kernel/ptrace.c for details. */
-#define PT_REG(reg) \
- (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
-
-#define SW_REG(reg) \
- (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
- + offsetof(struct switch_stack, reg))
-
-#define KSTK_EIP(tsk) \
- (*(unsigned long *)(PT_REG(pc) + (unsigned long) ((tsk)->thread_info)))
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
#define KSTK_ESP(tsk) \
- ((tsk) == current ? rdusp() : (tsk)->thread_info->pcb.usp)
+ ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
#define cpu_relax() barrier()
@@ -77,7 +68,6 @@ unsigned long get_wchan(struct task_struct *p);
#define spin_lock_prefetch(lock) do { } while (0)
#endif
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
extern inline void prefetch(const void *ptr)
{
__builtin_prefetch(ptr, 0, 3);
@@ -95,24 +85,4 @@ extern inline void spin_lock_prefetch(const void *ptr)
}
#endif
-#else
-extern inline void prefetch(const void *ptr)
-{
- __asm__ ("ldl $31,%0" : : "m"(*(char *)ptr));
-}
-
-extern inline void prefetchw(const void *ptr)
-{
- __asm__ ("ldq $31,%0" : : "m"(*(char *)ptr));
-}
-
-#ifdef CONFIG_SMP
-extern inline void spin_lock_prefetch(const void *ptr)
-{
- __asm__ ("ldq $31,%0" : : "m"(*(char *)ptr));
-}
-#endif
-
-#endif /* GCC 3.1 */
-
#endif /* __ASM_ALPHA_PROCESSOR_H */
diff --git a/include/asm-alpha/ptrace.h b/include/asm-alpha/ptrace.h
index 072375c135b..9933b8b3612 100644
--- a/include/asm-alpha/ptrace.h
+++ b/include/asm-alpha/ptrace.h
@@ -75,10 +75,10 @@ struct switch_stack {
#define profile_pc(regs) instruction_pointer(regs)
extern void show_regs(struct pt_regs *);
-#define alpha_task_regs(task) \
- ((struct pt_regs *) ((long) (task)->thread_info + 2*PAGE_SIZE) - 1)
+#define task_pt_regs(task) \
+ ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
-#define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0)
+#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0)
#endif
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 050e86d1289..cc9c7e8cced 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -131,15 +131,25 @@ struct el_common_EV6_mcheck {
extern void halt(void) __attribute__((noreturn));
#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
-#define switch_to(P,N,L) \
- do { \
- (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P)); \
- check_mmu_context(); \
+#define switch_to(P,N,L) \
+ do { \
+ (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \
+ check_mmu_context(); \
} while (0)
struct task_struct;
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
#define imb() \
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h
index d51491ed00b..69ffd93f8e2 100644
--- a/include/asm-alpha/thread_info.h
+++ b/include/asm-alpha/thread_info.h
@@ -54,8 +54,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define alloc_thread_info(tsk) \
((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-arm/arch-aaec2000/dma.h b/include/asm-arm/arch-aaec2000/dma.h
index 28c890b4a1d..e100b1e526f 100644
--- a/include/asm-arm/arch-aaec2000/dma.h
+++ b/include/asm-arm/arch-aaec2000/dma.h
@@ -7,11 +7,3 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#define MAX_DMA_ADDRESS 0xffffffff
-#define MAX_DMA_CHANNELS 0
-
-#endif
diff --git a/include/asm-arm/arch-at91rm9200/at91rm9200.h b/include/asm-arm/arch-at91rm9200/at91rm9200.h
new file mode 100644
index 00000000000..58f40931a5c
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/at91rm9200.h
@@ -0,0 +1,261 @@
+/*
+ * include/asm-arm/arch-at91rm9200/at91rm9200.h
+ *
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * Common definitions.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91RM9200_H
+#define AT91RM9200_H
+
+/*
+ * Peripheral identifiers/interrupts.
+ */
+#define AT91_ID_FIQ 0 /* Advanced Interrupt Controller (FIQ) */
+#define AT91_ID_SYS 1 /* System Peripheral */
+#define AT91_ID_PIOA 2 /* Parallel IO Controller A */
+#define AT91_ID_PIOB 3 /* Parallel IO Controller B */
+#define AT91_ID_PIOC 4 /* Parallel IO Controller C */
+#define AT91_ID_PIOD 5 /* Parallel IO Controller D */
+#define AT91_ID_US0 6 /* USART 0 */
+#define AT91_ID_US1 7 /* USART 1 */
+#define AT91_ID_US2 8 /* USART 2 */
+#define AT91_ID_US3 9 /* USART 3 */
+#define AT91_ID_MCI 10 /* Multimedia Card Interface */
+#define AT91_ID_UDP 11 /* USB Device Port */
+#define AT91_ID_TWI 12 /* Two-Wire Interface */
+#define AT91_ID_SPI 13 /* Serial Peripheral Interface */
+#define AT91_ID_SSC0 14 /* Serial Synchronous Controller 0 */
+#define AT91_ID_SSC1 15 /* Serial Synchronous Controller 1 */
+#define AT91_ID_SSC2 16 /* Serial Synchronous Controller 2 */
+#define AT91_ID_TC0 17 /* Timer Counter 0 */
+#define AT91_ID_TC1 18 /* Timer Counter 1 */
+#define AT91_ID_TC2 19 /* Timer Counter 2 */
+#define AT91_ID_TC3 20 /* Timer Counter 3 */
+#define AT91_ID_TC4 21 /* Timer Counter 4 */
+#define AT91_ID_TC5 22 /* Timer Counter 5 */
+#define AT91_ID_UHP 23 /* USB Host port */
+#define AT91_ID_EMAC 24 /* Ethernet MAC */
+#define AT91_ID_IRQ0 25 /* Advanced Interrupt Controller (IRQ0) */
+#define AT91_ID_IRQ1 26 /* Advanced Interrupt Controller (IRQ1) */
+#define AT91_ID_IRQ2 27 /* Advanced Interrupt Controller (IRQ2) */
+#define AT91_ID_IRQ3 28 /* Advanced Interrupt Controller (IRQ3) */
+#define AT91_ID_IRQ4 29 /* Advanced Interrupt Controller (IRQ4) */
+#define AT91_ID_IRQ5 30 /* Advanced Interrupt Controller (IRQ5) */
+#define AT91_ID_IRQ6 31 /* Advanced Interrupt Controller (IRQ6) */
+
+
+/*
+ * Peripheral physical base addresses.
+ */
+#define AT91_BASE_TCB0 0xfffa0000
+#define AT91_BASE_TC0 0xfffa0000
+#define AT91_BASE_TC1 0xfffa0040
+#define AT91_BASE_TC2 0xfffa0080
+#define AT91_BASE_TCB1 0xfffa4000
+#define AT91_BASE_TC3 0xfffa4000
+#define AT91_BASE_TC4 0xfffa4040
+#define AT91_BASE_TC5 0xfffa4080
+#define AT91_BASE_UDP 0xfffb0000
+#define AT91_BASE_MCI 0xfffb4000
+#define AT91_BASE_TWI 0xfffb8000
+#define AT91_BASE_EMAC 0xfffbc000
+#define AT91_BASE_US0 0xfffc0000
+#define AT91_BASE_US1 0xfffc4000
+#define AT91_BASE_US2 0xfffc8000
+#define AT91_BASE_US3 0xfffcc000
+#define AT91_BASE_SSC0 0xfffd0000
+#define AT91_BASE_SSC1 0xfffd4000
+#define AT91_BASE_SSC2 0xfffd8000
+#define AT91_BASE_SPI 0xfffe0000
+#define AT91_BASE_SYS 0xfffff000
+
+
+/*
+ * PIO pin definitions (peripheral A/B multiplexing).
+ */
+#define AT91_PA0_MISO (1 << 0) /* A: SPI Master-In Slave-Out */
+#define AT91_PA0_PCK3 (1 << 0) /* B: PMC Programmable Clock Output 3 */
+#define AT91_PA1_MOSI (1 << 1) /* A: SPI Master-Out Slave-In */
+#define AT91_PA1_PCK0 (1 << 1) /* B: PMC Programmable Clock Output 0 */
+#define AT91_PA2_SPCK (1 << 2) /* A: SPI Serial Clock */
+#define AT91_PA2_IRQ4 (1 << 2) /* B: External Interrupt 4 */
+#define AT91_PA3_NPCS0 (1 << 3) /* A: SPI Peripheral Chip Select 0 */
+#define AT91_PA3_IRQ5 (1 << 3) /* B: External Interrupt 5 */
+#define AT91_PA4_NPCS1 (1 << 4) /* A: SPI Peripheral Chip Select 1 */
+#define AT91_PA4_PCK1 (1 << 4) /* B: PMC Programmable Clock Output 1 */
+#define AT91_PA5_NPCS2 (1 << 5) /* A: SPI Peripheral Chip Select 2 */
+#define AT91_PA5_TXD3 (1 << 5) /* B: USART Transmit Data 3 */
+#define AT91_PA6_NPCS3 (1 << 6) /* A: SPI Peripheral Chip Select 3 */
+#define AT91_PA6_RXD3 (1 << 6) /* B: USART Receive Data 3 */
+#define AT91_PA7_ETXCK_EREFCK (1 << 7) /* A: Ethernet Reference Clock / Transmit Clock */
+#define AT91_PA7_PCK2 (1 << 7) /* B: PMC Programmable Clock Output 2 */
+#define AT91_PA8_ETXEN (1 << 8) /* A: Ethernet Transmit Enable */
+#define AT91_PA8_MCCDB (1 << 8) /* B: MMC Multimedia Card B Command */
+#define AT91_PA9_ETX0 (1 << 9) /* A: Ethernet Transmit Data 0 */
+#define AT91_PA9_MCDB0 (1 << 9) /* B: MMC Multimedia Card B Data 0 */
+#define AT91_PA10_ETX1 (1 << 10) /* A: Ethernet Transmit Data 1 */
+#define AT91_PA10_MCDB1 (1 << 10) /* B: MMC Multimedia Card B Data 1 */
+#define AT91_PA11_ECRS_ECRSDV (1 << 11) /* A: Ethernet Carrier Sense / Data Valid */
+#define AT91_PA11_MCDB2 (1 << 11) /* B: MMC Multimedia Card B Data 2 */
+#define AT91_PA12_ERX0 (1 << 12) /* A: Ethernet Receive Data 0 */
+#define AT91_PA12_MCDB3 (1 << 12) /* B: MMC Multimedia Card B Data 3 */
+#define AT91_PA13_ERX1 (1 << 13) /* A: Ethernet Receive Data 1 */
+#define AT91_PA13_TCLK0 (1 << 13) /* B: TC External Clock Input 0 */
+#define AT91_PA14_ERXER (1 << 14) /* A: Ethernet Receive Error */
+#define AT91_PA14_TCLK1 (1 << 14) /* B: TC External Clock Input 1 */
+#define AT91_PA15_EMDC (1 << 15) /* A: Ethernet Management Data Clock */
+#define AT91_PA15_TCLK2 (1 << 15) /* B: TC External Clock Input 2 */
+#define AT91_PA16_EMDIO (1 << 16) /* A: Ethernet Management Data I/O */
+#define AT91_PA16_IRQ6 (1 << 16) /* B: External Interrupt 6 */
+#define AT91_PA17_TXD0 (1 << 17) /* A: USART Transmit Data 0 */
+#define AT91_PA17_TIOA0 (1 << 17) /* B: TC I/O Line A 0 */
+#define AT91_PA18_RXD0 (1 << 18) /* A: USART Receive Data 0 */
+#define AT91_PA18_TIOB0 (1 << 18) /* B: TC I/O Line B 0 */
+#define AT91_PA19_SCK0 (1 << 19) /* A: USART Serial Clock 0 */
+#define AT91_PA19_TIOA1 (1 << 19) /* B: TC I/O Line A 1 */
+#define AT91_PA20_CTS0 (1 << 20) /* A: USART Clear To Send 0 */
+#define AT91_PA20_TIOB1 (1 << 20) /* B: TC I/O Line B 1 */
+#define AT91_PA21_RTS0 (1 << 21) /* A: USART Ready To Send 0 */
+#define AT91_PA21_TIOA2 (1 << 21) /* B: TC I/O Line A 2 */
+#define AT91_PA22_RXD2 (1 << 22) /* A: USART Receive Data 2 */
+#define AT91_PA22_TIOB2 (1 << 22) /* B: TC I/O Line B 2 */
+#define AT91_PA23_TXD2 (1 << 23) /* A: USART Transmit Data 2 */
+#define AT91_PA23_IRQ3 (1 << 23) /* B: External Interrupt 3 */
+#define AT91_PA24_SCK2 (1 << 24) /* A: USART Serial Clock 2 */
+#define AT91_PA24_PCK1 (1 << 24) /* B: PMC Programmable Clock Output 1 */
+#define AT91_PA25_TWD (1 << 25) /* A: TWI Two-wire Serial Data */
+#define AT91_PA25_IRQ2 (1 << 25) /* B: External Interrupt 2 */
+#define AT91_PA26_TWCK (1 << 26) /* A: TWI Two-wire Serial Clock */
+#define AT91_PA26_IRQ1 (1 << 26) /* B: External Interrupt 1 */
+#define AT91_PA27_MCCK (1 << 27) /* A: MMC Multimedia Card Clock */
+#define AT91_PA27_TCLK3 (1 << 27) /* B: TC External Clock Input 3 */
+#define AT91_PA28_MCCDA (1 << 28) /* A: MMC Multimedia Card A Command */
+#define AT91_PA28_TCLK4 (1 << 28) /* B: TC External Clock Input 4 */
+#define AT91_PA29_MCDA0 (1 << 29) /* A: MMC Multimedia Card A Data 0 */
+#define AT91_PA29_TCLK5 (1 << 29) /* B: TC External Clock Input 5 */
+#define AT91_PA30_DRXD (1 << 30) /* A: DBGU Receive Data */
+#define AT91_PA30_CTS2 (1 << 30) /* B: USART Clear To Send 2 */
+#define AT91_PA31_DTXD (1 << 31) /* A: DBGU Transmit Data */
+#define AT91_PA31_RTS2 (1 << 31) /* B: USART Ready To Send 2 */
+
+#define AT91_PB0_TF0 (1 << 0) /* A: SSC Transmit Frame Sync 0 */
+#define AT91_PB0_RTS3 (1 << 0) /* B: USART Ready To Send 3 */
+#define AT91_PB1_TK0 (1 << 1) /* A: SSC Transmit Clock 0 */
+#define AT91_PB1_CTS3 (1 << 1) /* B: USART Clear To Send 3 */
+#define AT91_PB2_TD0 (1 << 2) /* A: SSC Transmit Data 0 */
+#define AT91_PB2_SCK3 (1 << 2) /* B: USART Serial Clock 3 */
+#define AT91_PB3_RD0 (1 << 3) /* A: SSC Receive Data 0 */
+#define AT91_PB3_MCDA1 (1 << 3) /* B: MMC Multimedia Card A Data 1 */
+#define AT91_PB4_RK0 (1 << 4) /* A: SSC Receive Clock 0 */
+#define AT91_PB4_MCDA2 (1 << 4) /* B: MMC Multimedia Card A Data 2 */
+#define AT91_PB5_RF0 (1 << 5) /* A: SSC Receive Frame Sync 0 */
+#define AT91_PB5_MCDA3 (1 << 5) /* B: MMC Multimedia Card A Data 3 */
+#define AT91_PB6_TF1 (1 << 6) /* A: SSC Transmit Frame Sync 1 */
+#define AT91_PB6_TIOA3 (1 << 6) /* B: TC I/O Line A 3 */
+#define AT91_PB7_TK1 (1 << 7) /* A: SSC Transmit Clock 1 */
+#define AT91_PB7_TIOB3 (1 << 7) /* B: TC I/O Line B 3 */
+#define AT91_PB8_TD1 (1 << 8) /* A: SSC Transmit Data 1 */
+#define AT91_PB8_TIOA4 (1 << 8) /* B: TC I/O Line A 4 */
+#define AT91_PB9_RD1 (1 << 9) /* A: SSC Receive Data 1 */
+#define AT91_PB9_TIOB4 (1 << 9) /* B: TC I/O Line B 4 */
+#define AT91_PB10_RK1 (1 << 10) /* A: SSC Receive Clock 1 */
+#define AT91_PB10_TIOA5 (1 << 10) /* B: TC I/O Line A 5 */
+#define AT91_PB11_RF1 (1 << 11) /* A: SSC Receive Frame Sync 1 */
+#define AT91_PB11_TIOB5 (1 << 11) /* B: TC I/O Line B 5 */
+#define AT91_PB12_TF2 (1 << 12) /* A: SSC Transmit Frame Sync 2 */
+#define AT91_PB12_ETX2 (1 << 12) /* B: Ethernet Transmit Data 2 */
+#define AT91_PB13_TK2 (1 << 13) /* A: SSC Transmit Clock 3 */
+#define AT91_PB13_ETX3 (1 << 13) /* B: Ethernet Transmit Data 3 */
+#define AT91_PB14_TD2 (1 << 14) /* A: SSC Transmit Data 2 */
+#define AT91_PB14_ETXER (1 << 14) /* B: Ethernet Transmit Coding Error */
+#define AT91_PB15_RD2 (1 << 15) /* A: SSC Receive Data 2 */
+#define AT91_PB15_ERX2 (1 << 15) /* B: Ethernet Receive Data 2 */
+#define AT91_PB16_RK2 (1 << 16) /* A: SSC Receive Clock 2 */
+#define AT91_PB16_ERX3 (1 << 16) /* B: Ethernet Receive Data 3 */
+#define AT91_PB17_RF2 (1 << 17) /* A: SSC Receive Frame Sync 2 */
+#define AT91_PB17_ERXDV (1 << 17) /* B: Ethernet Receive Data Valid */
+#define AT91_PB18_RI1 (1 << 18) /* A: USART Ring Indicator 1 */
+#define AT91_PB18_ECOL (1 << 18) /* B: Ethernet Collision Detected */
+#define AT91_PB19_DTR1 (1 << 19) /* A: USART Data Terminal Ready 1 */
+#define AT91_PB19_ERXCK (1 << 19) /* B: Ethernet Receive Clock */
+#define AT91_PB20_TXD1 (1 << 20) /* A: USART Transmit Data 1 */
+#define AT91_PB21_RXD1 (1 << 21) /* A: USART Receive Data 1 */
+#define AT91_PB22_SCK1 (1 << 22) /* A: USART Serial Clock 1 */
+#define AT91_PB23_DCD1 (1 << 23) /* A: USART Data Carrier Detect 1 */
+#define AT91_PB24_CTS1 (1 << 24) /* A: USART Clear To Send 1 */
+#define AT91_PB25_DSR1 (1 << 25) /* A: USART Data Set Ready 1 */
+#define AT91_PB25_EF100 (1 << 25) /* B: Ethernet Force 100 Mbit */
+#define AT91_PB26_RTS1 (1 << 26) /* A: USART Ready To Send 1 */
+#define AT91_PB27_PCK0 (1 << 27) /* B: PMC Programmable Clock Output 0 */
+#define AT91_PB28_FIQ (1 << 28) /* A: Fast Interrupt */
+#define AT91_PB29_IRQ0 (1 << 29) /* A: External Interrupt 0 */
+
+#define AT91_PC0_BFCK (1 << 0) /* A: Burst Flash Clock */
+#define AT91_PC1_BFRDY_SMOE (1 << 1) /* A: Burst Flash Ready / SmartMedia Output Enable */
+#define AT91_PC2_BFAVD (1 << 2) /* A: Burst Flash Address Valid */
+#define AT91_PC3_BFBAA_SMWE (1 << 3) /* A: Burst Flash Address Advance / SmartMedia Write Enable */
+#define AT91_PC4_BFOE (1 << 4) /* A: Burst Flash Output Enable */
+#define AT91_PC5_BFWE (1 << 5) /* A: Burst Flash Write Enable */
+#define AT91_PC6_NWAIT (1 << 6) /* A: SMC Wait Signal */
+#define AT91_PC7_A23 (1 << 7) /* A: Address Bus 23 */
+#define AT91_PC8_A24 (1 << 8) /* A: Address Bus 24 */
+#define AT91_PC9_A25_CFRNW (1 << 9) /* A: Address Bus 25 / Compact Flash Read Not Write */
+#define AT91_PC10_NCS4_CFCS (1 << 10) /* A: SMC Chip Select 4 / Compact Flash Chip Select */
+#define AT91_PC11_NCS5_CFCE1 (1 << 11) /* A: SMC Chip Select 5 / Compact Flash Chip Enable 1 */
+#define AT91_PC12_NCS6_CFCE2 (1 << 12) /* A: SMC Chip Select 6 / Compact Flash Chip Enable 2 */
+#define AT91_PC13_NCS7 (1 << 13) /* A: Chip Select 7 */
+
+#define AT91_PD0_ETX0 (1 << 0) /* A: Ethernet Transmit Data 0 */
+#define AT91_PD1_ETX1 (1 << 1) /* A: Ethernet Transmit Data 1 */
+#define AT91_PD2_ETX2 (1 << 2) /* A: Ethernet Transmit Data 2 */
+#define AT91_PD3_ETX3 (1 << 3) /* A: Ethernet Transmit Data 3 */
+#define AT91_PD4_ETXEN (1 << 4) /* A: Ethernet Transmit Enable */
+#define AT91_PD5_ETXER (1 << 5) /* A: Ethernet Transmit Coding Error */
+#define AT91_PD6_DTXD (1 << 6) /* A: DBGU Transmit Data */
+#define AT91_PD7_PCK0 (1 << 7) /* A: PMC Programmable Clock Output 0 */
+#define AT91_PD7_TSYNC (1 << 7) /* B: ETM Trace Synchronization Signal */
+#define AT91_PD8_PCK1 (1 << 8) /* A: PMC Programmable Clock Output 1 */
+#define AT91_PD8_TCLK (1 << 8) /* B: ETM Trace Clock */
+#define AT91_PD9_PCK2 (1 << 9) /* A: PMC Programmable Clock Output 2 */
+#define AT91_PD9_TPS0 (1 << 9) /* B: ETM Trace ARM Pipeline Status 0 */
+#define AT91_PD10_PCK3 (1 << 10) /* A: PMC Programmable Clock Output 3 */
+#define AT91_PD10_TPS1 (1 << 10) /* B: ETM Trace ARM Pipeline Status 1 */
+#define AT91_PD11_TPS2 (1 << 11) /* B: ETM Trace ARM Pipeline Status 2 */
+#define AT91_PD12_TPK0 (1 << 12) /* B: ETM Trace Packet Port 0 */
+#define AT91_PD13_TPK1 (1 << 13) /* B: ETM Trace Packet Port 1 */
+#define AT91_PD14_TPK2 (1 << 14) /* B: ETM Trace Packet Port 2 */
+#define AT91_PD15_TD0 (1 << 15) /* A: SSC Transmit Data 0 */
+#define AT91_PD15_TPK3 (1 << 15) /* B: ETM Trace Packet Port 3 */
+#define AT91_PD16_TD1 (1 << 16) /* A: SSC Transmit Data 1 */
+#define AT91_PD16_TPK4 (1 << 16) /* B: ETM Trace Packet Port 4 */
+#define AT91_PD17_TD2 (1 << 17) /* A: SSC Transmit Data 2 */
+#define AT91_PD17_TPK5 (1 << 17) /* B: ETM Trace Packet Port 5 */
+#define AT91_PD18_NPCS1 (1 << 18) /* A: SPI Peripheral Chip Select 1 */
+#define AT91_PD18_TPK6 (1 << 18) /* B: ETM Trace Packet Port 6 */
+#define AT91_PD19_NPCS2 (1 << 19) /* A: SPI Peripheral Chip Select 2 */
+#define AT91_PD19_TPK7 (1 << 19) /* B: ETM Trace Packet Port 7 */
+#define AT91_PD20_NPCS3 (1 << 20) /* A: SPI Peripheral Chip Select 3 */
+#define AT91_PD20_TPK8 (1 << 20) /* B: ETM Trace Packet Port 8 */
+#define AT91_PD21_RTS0 (1 << 21) /* A: USART Ready To Send 0 */
+#define AT91_PD21_TPK9 (1 << 21) /* B: ETM Trace Packet Port 9 */
+#define AT91_PD22_RTS1 (1 << 22) /* A: USART Ready To Send 1 */
+#define AT91_PD22_TPK10 (1 << 22) /* B: ETM Trace Packet Port 10 */
+#define AT91_PD23_RTS2 (1 << 23) /* A: USART Ready To Send 2 */
+#define AT91_PD23_TPK11 (1 << 23) /* B: ETM Trace Packet Port 11 */
+#define AT91_PD24_RTS3 (1 << 24) /* A: USART Ready To Send 3 */
+#define AT91_PD24_TPK12 (1 << 24) /* B: ETM Trace Packet Port 12 */
+#define AT91_PD25_DTR1 (1 << 25) /* A: USART Data Terminal Ready 1 */
+#define AT91_PD25_TPK13 (1 << 25) /* B: ETM Trace Packet Port 13 */
+#define AT91_PD26_TPK14 (1 << 26) /* B: ETM Trace Packet Port 14 */
+#define AT91_PD27_TPK15 (1 << 27) /* B: ETM Trace Packet Port 15 */
+
+#endif
diff --git a/include/asm-arm/arch-at91rm9200/at91rm9200_pdc.h b/include/asm-arm/arch-at91rm9200/at91rm9200_pdc.h
new file mode 100644
index 00000000000..ce1150d4438
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/at91rm9200_pdc.h
@@ -0,0 +1,36 @@
+/*
+ * include/asm-arm/arch-at91rm9200/at91rm9200_pdc.h
+ *
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * Peripheral Data Controller (PDC) registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91RM9200_PDC_H
+#define AT91RM9200_PDC_H
+
+#define AT91_PDC_RPR 0x100 /* Receive Pointer Register */
+#define AT91_PDC_RCR 0x104 /* Receive Counter Register */
+#define AT91_PDC_TPR 0x108 /* Transmit Pointer Register */
+#define AT91_PDC_TCR 0x10c /* Transmit Counter Register */
+#define AT91_PDC_RNPR 0x110 /* Receive Next Pointer Register */
+#define AT91_PDC_RNCR 0x114 /* Receive Next Counter Register */
+#define AT91_PDC_TNPR 0x118 /* Transmit Next Pointer Register */
+#define AT91_PDC_TNCR 0x11c /* Transmit Next Counter Register */
+
+#define AT91_PDC_PTCR 0x120 /* Transfer Control Register */
+#define AT91_PDC_RXTEN (1 << 0) /* Receiver Transfer Enable */
+#define AT91_PDC_RXTDIS (1 << 1) /* Receiver Transfer Disable */
+#define AT91_PDC_TXTEN (1 << 8) /* Transmitter Transfer Enable */
+#define AT91_PDC_TXTDIS (1 << 9) /* Transmitter Transfer Disable */
+
+#define AT91_PDC_PTSR 0x124 /* Transfer Status Register */
+
+#endif
diff --git a/include/asm-arm/arch-at91rm9200/at91rm9200_sys.h b/include/asm-arm/arch-at91rm9200/at91rm9200_sys.h
new file mode 100644
index 00000000000..9bfffdbf1e0
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/at91rm9200_sys.h
@@ -0,0 +1,328 @@
+/*
+ * include/asm-arm/arch-at91rm9200/at91rm9200_sys.h
+ *
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * System peripherals registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91RM9200_SYS_H
+#define AT91RM9200_SYS_H
+
+/*
+ * Advanced Interrupt Controller.
+ */
+#define AT91_AIC 0x000
+
+#define AT91_AIC_SMR(n) (AT91_AIC + ((n) * 4)) /* Source Mode Registers 0-31 */
+#define AT91_AIC_PRIOR (7 << 0) /* Priority Level */
+#define AT91_AIC_SRCTYPE (3 << 5) /* Interrupt Source Type */
+#define AT91_AIC_SRCTYPE_LOW (0 << 5)
+#define AT91_AIC_SRCTYPE_FALLING (1 << 5)
+#define AT91_AIC_SRCTYPE_HIGH (2 << 5)
+#define AT91_AIC_SRCTYPE_RISING (3 << 5)
+
+#define AT91_AIC_SVR(n) (AT91_AIC + 0x80 + ((n) * 4)) /* Source Vector Registers 0-31 */
+#define AT91_AIC_IVR (AT91_AIC + 0x100) /* Interrupt Vector Register */
+#define AT91_AIC_FVR (AT91_AIC + 0x104) /* Fast Interrupt Vector Register */
+#define AT91_AIC_ISR (AT91_AIC + 0x108) /* Interrupt Status Register */
+#define AT91_AIC_IRQID (0x1f << 0) /* Current Interrupt Identifier */
+
+#define AT91_AIC_IPR (AT91_AIC + 0x10c) /* Interrupt Pending Register */
+#define AT91_AIC_IMR (AT91_AIC + 0x110) /* Interrupt Mask Register */
+#define AT91_AIC_CISR (AT91_AIC + 0x114) /* Core Interrupt Status Register */
+#define AT91_AIC_NFIQ (1 << 0) /* nFIQ Status */
+#define AT91_AIC_NIRQ (1 << 1) /* nIRQ Status */
+
+#define AT91_AIC_IECR (AT91_AIC + 0x120) /* Interrupt Enable Command Register */
+#define AT91_AIC_IDCR (AT91_AIC + 0x124) /* Interrupt Disable Command Register */
+#define AT91_AIC_ICCR (AT91_AIC + 0x128) /* Interrupt Clear Command Register */
+#define AT91_AIC_ISCR (AT91_AIC + 0x12c) /* Interrupt Set Command Register */
+#define AT91_AIC_EOICR (AT91_AIC + 0x130) /* End of Interrupt Command Register */
+#define AT91_AIC_SPU (AT91_AIC + 0x134) /* Spurious Interrupt Vector Register */
+#define AT91_AIC_DCR (AT91_AIC + 0x138) /* Debug Control Register */
+#define AT91_AIC_DCR_PROT (1 << 0) /* Protection Mode */
+#define AT91_AIC_DCR_GMSK (1 << 1) /* General Mask */
+
+
+/*
+ * Debug Unit.
+ */
+#define AT91_DBGU 0x200
+
+#define AT91_DBGU_CR (AT91_DBGU + 0x00) /* Control Register */
+#define AT91_DBGU_MR (AT91_DBGU + 0x04) /* Mode Register */
+#define AT91_DBGU_IER (AT91_DBGU + 0x08) /* Interrupt Enable Register */
+#define AT91_DBGU_TXRDY (1 << 1) /* Transmitter Ready */
+#define AT91_DBGU_TXEMPTY (1 << 9) /* Transmitter Empty */
+#define AT91_DBGU_IDR (AT91_DBGU + 0x0c) /* Interrupt Disable Register */
+#define AT91_DBGU_IMR (AT91_DBGU + 0x10) /* Interrupt Mask Register */
+#define AT91_DBGU_SR (AT91_DBGU + 0x14) /* Status Register */
+#define AT91_DBGU_RHR (AT91_DBGU + 0x18) /* Receiver Holding Register */
+#define AT91_DBGU_THR (AT91_DBGU + 0x1c) /* Transmitter Holding Register */
+#define AT91_DBGU_BRGR (AT91_DBGU + 0x20) /* Baud Rate Generator Register */
+#define AT91_DBGU_CIDR (AT91_DBGU + 0x40) /* Chip ID Register */
+#define AT91_DBGU_EXID (AT91_DBGU + 0x44) /* Chip ID Extension Register */
+
+
+/*
+ * PIO Controllers.
+ */
+#define AT91_PIOA 0x400
+#define AT91_PIOB 0x600
+#define AT91_PIOC 0x800
+#define AT91_PIOD 0xa00
+
+#define PIO_PER 0x00 /* Enable Register */
+#define PIO_PDR 0x04 /* Disable Register */
+#define PIO_PSR 0x08 /* Status Register */
+#define PIO_OER 0x10 /* Output Enable Register */
+#define PIO_ODR 0x14 /* Output Disable Register */
+#define PIO_OSR 0x18 /* Output Status Register */
+#define PIO_IFER 0x20 /* Glitch Input Filter Enable */
+#define PIO_IFDR 0x24 /* Glitch Input Filter Disable */
+#define PIO_IFSR 0x28 /* Glitch Input Filter Status */
+#define PIO_SODR 0x30 /* Set Output Data Register */
+#define PIO_CODR 0x34 /* Clear Output Data Register */
+#define PIO_ODSR 0x38 /* Output Data Status Register */
+#define PIO_PDSR 0x3c /* Pin Data Status Register */
+#define PIO_IER 0x40 /* Interrupt Enable Register */
+#define PIO_IDR 0x44 /* Interrupt Disable Register */
+#define PIO_IMR 0x48 /* Interrupt Mask Register */
+#define PIO_ISR 0x4c /* Interrupt Status Register */
+#define PIO_MDER 0x50 /* Multi-driver Enable Register */
+#define PIO_MDDR 0x54 /* Multi-driver Disable Register */
+#define PIO_MDSR 0x58 /* Multi-driver Status Register */
+#define PIO_PUDR 0x60 /* Pull-up Disable Register */
+#define PIO_PUER 0x64 /* Pull-up Enable Register */
+#define PIO_PUSR 0x68 /* Pull-up Status Register */
+#define PIO_ASR 0x70 /* Peripheral A Select Register */
+#define PIO_BSR 0x74 /* Peripheral B Select Register */
+#define PIO_ABSR 0x78 /* AB Status Register */
+#define PIO_OWER 0xa0 /* Output Write Enable Register */
+#define PIO_OWDR 0xa4 /* Output Write Disable Register */
+#define PIO_OWSR 0xa8 /* Output Write Status Register */
+
+#define AT91_PIO_P(n) (1 << (n))
+
+
+/*
+ * Power Management Controller.
+ */
+#define AT91_PMC 0xc00
+
+#define AT91_PMC_SCER (AT91_PMC + 0x00) /* System Clock Enable Register */
+#define AT91_PMC_SCDR (AT91_PMC + 0x04) /* System Clock Disable Register */
+
+#define AT91_PMC_SCSR (AT91_PMC + 0x08) /* System Clock Status Register */
+#define AT91_PMC_PCK (1 << 0) /* Processor Clock */
+#define AT91_PMC_UDP (1 << 1) /* USB Devcice Port Clock */
+#define AT91_PMC_MCKUDP (1 << 2) /* USB Device Port Master Clock Automatic Disable on Suspend */
+#define AT91_PMC_UHP (1 << 4) /* USB Host Port Clock */
+#define AT91_PMC_PCK0 (1 << 8) /* Programmable Clock 0 */
+#define AT91_PMC_PCK1 (1 << 9) /* Programmable Clock 1 */
+#define AT91_PMC_PCK2 (1 << 10) /* Programmable Clock 2 */
+#define AT91_PMC_PCK3 (1 << 11) /* Programmable Clock 3 */
+
+#define AT91_PMC_PCER (AT91_PMC + 0x10) /* Peripheral Clock Enable Register */
+#define AT91_PMC_PCDR (AT91_PMC + 0x14) /* Peripheral Clock Disable Register */
+#define AT91_PMC_PCSR (AT91_PMC + 0x18) /* Peripheral Clock Status Register */
+
+#define AT91_CKGR_MOR (AT91_PMC + 0x20) /* Main Oscillator Register */
+#define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */
+#define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */
+
+#define AT91_CKGR_MCFR (AT91_PMC + 0x24) /* Main Clock Frequency Register */
+#define AT91_PMC_MAINF (0xffff << 0) /* Main Clock Frequency */
+#define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */
+
+#define AT91_CKGR_PLLAR (AT91_PMC + 0x28) /* PLL A Register */
+#define AT91_CKGR_PLLBR (AT91_PMC + 0x2c) /* PLL B Register */
+#define AT91_PMC_DIV (0xff << 0) /* Divider */
+#define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */
+#define AT91_PMC_OUT (3 << 14) /* PLL Clock Frequency Range */
+#define AT91_PMC_MUL (0x7ff << 16) /* PLL Multiplier */
+#define AT91_PMC_USB96M (1 << 28) /* Divider by 2 Enable (PLLB only) */
+
+#define AT91_PMC_MCKR (AT91_PMC + 0x30) /* Master Clock Register */
+#define AT91_PMC_CSS (3 << 0) /* Master Clock Selection */
+#define AT91_PMC_CSS_SLOW (0 << 0)
+#define AT91_PMC_CSS_MAIN (1 << 0)
+#define AT91_PMC_CSS_PLLA (2 << 0)
+#define AT91_PMC_CSS_PLLB (3 << 0)
+#define AT91_PMC_PRES (7 << 2) /* Master Clock Prescaler */
+#define AT91_PMC_PRES_1 (0 << 2)
+#define AT91_PMC_PRES_2 (1 << 2)
+#define AT91_PMC_PRES_4 (2 << 2)
+#define AT91_PMC_PRES_8 (3 << 2)
+#define AT91_PMC_PRES_16 (4 << 2)
+#define AT91_PMC_PRES_32 (5 << 2)
+#define AT91_PMC_PRES_64 (6 << 2)
+#define AT91_PMC_MDIV (3 << 8) /* Master Clock Division */
+#define AT91_PMC_MDIV_1 (0 << 8)
+#define AT91_PMC_MDIV_2 (1 << 8)
+#define AT91_PMC_MDIV_3 (2 << 8)
+#define AT91_PMC_MDIV_4 (3 << 8)
+
+#define AT91_PMC_PCKR(n) (AT91_PMC + 0x40 + ((n) * 4)) /* Programmable Clock 0-3 Registers */
+#define AT91_PMC_IER (AT91_PMC + 0x60) /* Interrupt Enable Register */
+#define AT91_PMC_IDR (AT91_PMC + 0x64) /* Interrupt Disable Register */
+#define AT91_PMC_SR (AT91_PMC + 0x68) /* Status Register */
+#define AT91_PMC_MOSCS (1 << 0) /* MOSCS Flag */
+#define AT91_PMC_LOCKA (1 << 1) /* PLLA Lock */
+#define AT91_PMC_LOCKB (1 << 2) /* PLLB Lock */
+#define AT91_PMC_MCKRDY (1 << 3) /* Master Clock */
+#define AT91_PMC_PCK0RDY (1 << 8) /* Programmable Clock 0 */
+#define AT91_PMC_PCK1RDY (1 << 9) /* Programmable Clock 1 */
+#define AT91_PMC_PCK2RDY (1 << 10) /* Programmable Clock 2 */
+#define AT91_PMC_PCK3RDY (1 << 11) /* Programmable Clock 3 */
+#define AT91_PMC_IMR (AT91_PMC + 0x6c) /* Interrupt Mask Register */
+
+
+/*
+ * System Timer.
+ */
+#define AT91_ST 0xd00
+
+#define AT91_ST_CR (AT91_ST + 0x00) /* Control Register */
+#define AT91_ST_WDRST (1 << 0) /* Watchdog Timer Restart */
+#define AT91_ST_PIMR (AT91_ST + 0x04) /* Period Interval Mode Register */
+#define AT91_ST_PIV (0xffff << 0) /* Period Interval Value */
+#define AT91_ST_WDMR (AT91_ST + 0x08) /* Watchdog Mode Register */
+#define AT91_ST_WDV (0xffff << 0) /* Watchdog Counter Value */
+#define AT91_ST_RSTEN (1 << 16) /* Reset Enable */
+#define AT91_ST_EXTEN (1 << 17) /* External Signal Assertion Enable */
+#define AT91_ST_RTMR (AT91_ST + 0x0c) /* Real-time Mode Register */
+#define AT91_ST_RTPRES (0xffff << 0) /* Real-time Prescalar Value */
+#define AT91_ST_SR (AT91_ST + 0x10) /* Status Register */
+#define AT91_ST_PITS (1 << 0) /* Period Interval Timer Status */
+#define AT91_ST_WDOVF (1 << 1) /* Watchdog Overflow */
+#define AT91_ST_RTTINC (1 << 2) /* Real-time Timer Increment */
+#define AT91_ST_ALMS (1 << 3) /* Alarm Status */
+#define AT91_ST_IER (AT91_ST + 0x14) /* Interrupt Enable Register */
+#define AT91_ST_IDR (AT91_ST + 0x18) /* Interrupt Disable Register */
+#define AT91_ST_IMR (AT91_ST + 0x1c) /* Interrupt Mask Register */
+#define AT91_ST_RTAR (AT91_ST + 0x20) /* Real-time Alarm Register */
+#define AT91_ST_ALMV (0xfffff << 0) /* Alarm Value */
+#define AT91_ST_CRTR (AT91_ST + 0x24) /* Current Real-time Register */
+#define AT91_ST_CRTV (0xfffff << 0) /* Current Real-Time Value */
+
+
+/*
+ * Real-time Clock.
+ */
+#define AT91_RTC 0xe00
+
+#define AT91_RTC_CR (AT91_RTC + 0x00) /* Control Register */
+#define AT91_RTC_UPDTIM (1 << 0) /* Update Request Time Register */
+#define AT91_RTC_UPDCAL (1 << 1) /* Update Request Calendar Register */
+#define AT91_RTC_TIMEVSEL (3 << 8) /* Time Event Selection */
+#define AT91_RTC_TIMEVSEL_MINUTE (0 << 8)
+#define AT91_RTC_TIMEVSEL_HOUR (1 << 8)
+#define AT91_RTC_TIMEVSEL_DAY24 (2 << 8)
+#define AT91_RTC_TIMEVSEL_DAY12 (3 << 8)
+#define AT91_RTC_CALEVSEL (3 << 16) /* Calendar Event Selection */
+#define AT91_RTC_CALEVSEL_WEEK (0 << 16)
+#define AT91_RTC_CALEVSEL_MONTH (1 << 16)
+#define AT91_RTC_CALEVSEL_YEAR (2 << 16)
+
+#define AT91_RTC_MR (AT91_RTC + 0x04) /* Mode Register */
+#define AT91_RTC_HRMOD (1 << 0) /* 12/24 Hour Mode */
+
+#define AT91_RTC_TIMR (AT91_RTC + 0x08) /* Time Register */
+#define AT91_RTC_SEC (0x7f << 0) /* Current Second */
+#define AT91_RTC_MIN (0x7f << 8) /* Current Minute */
+#define AT91_RTC_HOUR (0x3f << 16) /* Current Hour */
+#define At91_RTC_AMPM (1 << 22) /* Ante Meridiem Post Meridiem Indicator */
+
+#define AT91_RTC_CALR (AT91_RTC + 0x0c) /* Calendar Register */
+#define AT91_RTC_CENT (0x7f << 0) /* Current Century */
+#define AT91_RTC_YEAR (0xff << 8) /* Current Year */
+#define AT91_RTC_MONTH (0x1f << 16) /* Current Month */
+#define AT91_RTC_DAY (7 << 21) /* Current Day */
+#define AT91_RTC_DATE (0x3f << 24) /* Current Date */
+
+#define AT91_RTC_TIMALR (AT91_RTC + 0x10) /* Time Alarm Register */
+#define AT91_RTC_SECEN (1 << 7) /* Second Alarm Enable */
+#define AT91_RTC_MINEN (1 << 15) /* Minute Alarm Enable */
+#define AT91_RTC_HOUREN (1 << 23) /* Hour Alarm Enable */
+
+#define AT91_RTC_CALALR (AT91_RTC + 0x14) /* Calendar Alarm Register */
+#define AT91_RTC_MTHEN (1 << 23) /* Month Alarm Enable */
+#define AT91_RTC_DATEEN (1 << 31) /* Date Alarm Enable */
+
+#define AT91_RTC_SR (AT91_RTC + 0x18) /* Status Register */
+#define AT91_RTC_ACKUPD (1 << 0) /* Acknowledge for Update */
+#define AT91_RTC_ALARM (1 << 1) /* Alarm Flag */
+#define AT91_RTC_SECEV (1 << 2) /* Second Event */
+#define AT91_RTC_TIMEV (1 << 3) /* Time Event */
+#define AT91_RTC_CALEV (1 << 4) /* Calendar Event */
+
+#define AT91_RTC_SCCR (AT91_RTC + 0x1c) /* Status Clear Command Register */
+#define AT91_RTC_IER (AT91_RTC + 0x20) /* Interrupt Enable Register */
+#define AT91_RTC_IDR (AT91_RTC + 0x24) /* Interrupt Disable Register */
+#define AT91_RTC_IMR (AT91_RTC + 0x28) /* Interrupt Mask Register */
+
+#define AT91_RTC_VER (AT91_RTC + 0x2c) /* Valid Entry Register */
+#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
+#define AT91_RTC_NVCAL (1 << 1) /* Non valid Calendar */
+#define AT91_RTC_NVTIMALR (1 << 2) /* Non valid Time Alarm */
+#define AT91_RTC_NVCALALR (1 << 3) /* Non valid Calendar Alarm */
+
+
+/*
+ * Memory Controller.
+ */
+#define AT91_MC 0xf00
+
+#define AT91_MC_RCR (AT91_MC + 0x00) /* MC Remap Control Register */
+#define AT91_MC_RCB (1 << 0) /* Remap Command Bit */
+
+#define AT91_MC_ASR (AT91_MC + 0x04) /* MC Abort Status Register */
+#define AT91_MC_AASR (AT91_MC + 0x08) /* MC Abort Address Status Register */
+#define AT91_MC_MPR (AT91_MC + 0x0c) /* MC Master Priority Register */
+
+/* External Bus Interface (EBI) registers */
+#define AT91_EBI_CSA (AT91_MC + 0x60) /* Chip Select Assignment Register */
+#define AT91_EBI_CS0A (1 << 0) /* Chip Select 0 Assignment */
+#define AT91_EBI_CS0A_SMC (0 << 0)
+#define AT91_EBI_CS0A_BFC (1 << 0)
+#define AT91_EBI_CS1A (1 << 1) /* Chip Select 1 Assignment */
+#define AT91_EBI_CS1A_SMC (0 << 1)
+#define AT91_EBI_CS1A_SDRAMC (1 << 1)
+#define AT91_EBI_CS3A (1 << 3) /* Chip Select 2 Assignment */
+#define AT91_EBI_CS3A_SMC (0 << 3)
+#define AT91_EBI_CS3A_SMC_SMARTMEDIA (1 << 3)
+#define AT91_EBI_CS4A (1 << 4) /* Chip Select 3 Assignment */
+#define AT91_EBI_CS4A_SMC (0 << 4)
+#define AT91_EBI_CS4A_SMC_COMPACTFLASH (1 << 4)
+#define AT91_EBI_CFGR (AT91_MC + 0x64) /* Configuration Register */
+#define AT91_EBI_DBPUC (1 << 0) /* Data Bus Pull-Up Configuration */
+
+/* Static Memory Controller (SMC) registers */
+#define AT91_SMC_CSR(n) (AT91_MC + 0x70 + ((n) * 4))/* SMC Chip Select Register */
+#define AT91_SMC_NWS (0x7f << 0) /* Number of Wait States */
+#define AT91_SMC_WSEN (1 << 7) /* Wait State Enable */
+#define AT91_SMC_TDF (0xf << 8) /* Data Float Time */
+#define AT91_SMC_BAT (1 << 12) /* Byte Access Type */
+#define AT91_SMC_DBW (3 << 13) /* Data Bus Width */
+#define AT91_SMC_DBW_16 (1 << 13)
+#define AT91_SMC_DBW_8 (2 << 13)
+#define AT91_SMC_DPR (1 << 15) /* Data Read Protocol */
+#define AT91_SMC_ACSS (3 << 16) /* Address to Chip Select Setup */
+#define AT91_SMC_ACSS_STD (0 << 16)
+#define AT91_SMC_ACSS_1 (1 << 16)
+#define AT91_SMC_ACSS_2 (2 << 16)
+#define AT91_SMC_ACSS_3 (3 << 16)
+#define AT91_SMC_RWSETUP (7 << 24) /* Read & Write Signal Time Setup */
+#define AT91_SMC_RWHOLD (7 << 28) /* Read & Write Signal Hold Time */
+
+
+#endif
diff --git a/include/asm-arm/arch-at91rm9200/at91rm9200_usart.h b/include/asm-arm/arch-at91rm9200/at91rm9200_usart.h
new file mode 100644
index 00000000000..79f851e31b9
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/at91rm9200_usart.h
@@ -0,0 +1,123 @@
+/*
+ * include/asm-arm/arch-at91rm9200/at91rm9200_usart.h
+ *
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * USART registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91RM9200_USART_H
+#define AT91RM9200_USART_H
+
+#define AT91_US_CR 0x00 /* Control Register */
+#define AT91_US_RSTRX (1 << 2) /* Reset Receiver */
+#define AT91_US_RSTTX (1 << 3) /* Reset Transmitter */
+#define AT91_US_RXEN (1 << 4) /* Receiver Enable */
+#define AT91_US_RXDIS (1 << 5) /* Receiver Disable */
+#define AT91_US_TXEN (1 << 6) /* Transmitter Enable */
+#define AT91_US_TXDIS (1 << 7) /* Transmitter Disable */
+#define AT91_US_RSTSTA (1 << 8) /* Reset Status Bits */
+#define AT91_US_STTBRK (1 << 9) /* Start Break */
+#define AT91_US_STPBRK (1 << 10) /* Stop Break */
+#define AT91_US_STTTO (1 << 11) /* Start Time-out */
+#define AT91_US_SENDA (1 << 12) /* Send Address */
+#define AT91_US_RSTIT (1 << 13) /* Reset Iterations */
+#define AT91_US_RSTNACK (1 << 14) /* Reset Non Acknowledge */
+#define AT91_US_RETTO (1 << 15) /* Rearm Time-out */
+#define AT91_US_DTREN (1 << 16) /* Data Terminal Ready Enable */
+#define AT91_US_DTRDIS (1 << 17) /* Data Terminal Ready Disable */
+#define AT91_US_RTSEN (1 << 18) /* Request To Send Enable */
+#define AT91_US_RTSDIS (1 << 19) /* Request To Send Disable */
+
+#define AT91_US_MR 0x04 /* Mode Register */
+#define AT91_US_USMODE (0xf << 0) /* Mode of the USART */
+#define AT91_US_USMODE_NORMAL 0
+#define AT91_US_USMODE_RS485 1
+#define AT91_US_USMODE_HWHS 2
+#define AT91_US_USMODE_MODEM 3
+#define AT91_US_USMODE_ISO7816_T0 4
+#define AT91_US_USMODE_ISO7816_T1 6
+#define AT91_US_USMODE_IRDA 8
+#define AT91_US_USCLKS (3 << 4) /* Clock Selection */
+#define AT91_US_CHRL (3 << 6) /* Character Length */
+#define AT91_US_CHRL_5 (0 << 6)
+#define AT91_US_CHRL_6 (1 << 6)
+#define AT91_US_CHRL_7 (2 << 6)
+#define AT91_US_CHRL_8 (3 << 6)
+#define AT91_US_SYNC (1 << 8) /* Synchronous Mode Select */
+#define AT91_US_PAR (7 << 9) /* Parity Type */
+#define AT91_US_PAR_EVEN (0 << 9)
+#define AT91_US_PAR_ODD (1 << 9)
+#define AT91_US_PAR_SPACE (2 << 9)
+#define AT91_US_PAR_MARK (3 << 9)
+#define AT91_US_PAR_NONE (4 << 9)
+#define AT91_US_PAR_MULTI_DROP (6 << 9)
+#define AT91_US_NBSTOP (3 << 12) /* Number of Stop Bits */
+#define AT91_US_NBSTOP_1 (0 << 12)
+#define AT91_US_NBSTOP_1_5 (1 << 12)
+#define AT91_US_NBSTOP_2 (2 << 12)
+#define AT91_US_CHMODE (3 << 14) /* Channel Mode */
+#define AT91_US_CHMODE_NORMAL (0 << 14)
+#define AT91_US_CHMODE_ECHO (1 << 14)
+#define AT91_US_CHMODE_LOC_LOOP (2 << 14)
+#define AT91_US_CHMODE_REM_LOOP (3 << 14)
+#define AT91_US_MSBF (1 << 16) /* Bit Order */
+#define AT91_US_MODE9 (1 << 17) /* 9-bit Character Length */
+#define AT91_US_CLKO (1 << 18) /* Clock Output Select */
+#define AT91_US_OVER (1 << 19) /* Oversampling Mode */
+#define AT91_US_INACK (1 << 20) /* Inhibit Non Acknowledge */
+#define AT91_US_DSNACK (1 << 21) /* Disable Successive NACK */
+#define AT91_US_MAX_ITER (7 << 24) /* Max Iterations */
+#define AT91_US_FILTER (1 << 28) /* Infrared Receive Line Filter */
+
+#define AT91_US_IER 0x08 /* Interrupt Enable Register */
+#define AT91_US_RXRDY (1 << 0) /* Receiver Ready */
+#define AT91_US_TXRDY (1 << 1) /* Transmitter Ready */
+#define AT91_US_RXBRK (1 << 2) /* Break Received / End of Break */
+#define AT91_US_ENDRX (1 << 3) /* End of Receiver Transfer */
+#define AT91_US_ENDTX (1 << 4) /* End of Transmitter Transfer */
+#define AT91_US_OVRE (1 << 5) /* Overrun Error */
+#define AT91_US_FRAME (1 << 6) /* Framing Error */
+#define AT91_US_PARE (1 << 7) /* Parity Error */
+#define AT91_US_TIMEOUT (1 << 8) /* Receiver Time-out */
+#define AT91_US_TXEMPTY (1 << 9) /* Transmitter Empty */
+#define AT91_US_ITERATION (1 << 10) /* Max number of Repetitions Reached */
+#define AT91_US_TXBUFE (1 << 11) /* Transmission Buffer Empty */
+#define AT91_US_RXBUFF (1 << 12) /* Reception Buffer Full */
+#define AT91_US_NACK (1 << 13) /* Non Acknowledge */
+#define AT91_US_RIIC (1 << 16) /* Ring Indicator Input Change */
+#define AT91_US_DSRIC (1 << 17) /* Data Set Ready Input Change */
+#define AT91_US_DCDIC (1 << 18) /* Data Carrier Detect Input Change */
+#define AT91_US_CTSIC (1 << 19) /* Clear to Send Input Change */
+#define AT91_US_RI (1 << 20) /* RI */
+#define AT91_US_DSR (1 << 21) /* DSR */
+#define AT91_US_DCD (1 << 22) /* DCD */
+#define AT91_US_CTS (1 << 23) /* CTS */
+
+#define AT91_US_IDR 0x0c /* Interrupt Disable Register */
+#define AT91_US_IMR 0x10 /* Interrupt Mask Register */
+#define AT91_US_CSR 0x14 /* Channel Status Register */
+#define AT91_US_RHR 0x18 /* Receiver Holding Register */
+#define AT91_US_THR 0x1c /* Transmitter Holding Register */
+
+#define AT91_US_BRGR 0x20 /* Baud Rate Generator Register */
+#define AT91_US_CD (0xffff << 0) /* Clock Divider */
+
+#define AT91_US_RTOR 0x24 /* Receiver Time-out Register */
+#define AT91_US_TO (0xffff << 0) /* Time-out Value */
+
+#define AT91_US_TTGR 0x28 /* Transmitter Timeguard Register */
+#define AT91_US_TG (0xff << 0) /* Timeguard Value */
+
+#define AT91_US_FIDI 0x40 /* FI DI Ratio Register */
+#define AT91_US_NER 0x44 /* Number of Errors Register */
+#define AT91_US_IF 0x4c /* IrDA Filter Register */
+
+#endif
diff --git a/include/asm-arm/arch-at91rm9200/board.h b/include/asm-arm/arch-at91rm9200/board.h
new file mode 100644
index 00000000000..2e7d1139a79
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/board.h
@@ -0,0 +1,80 @@
+/*
+ * include/asm-arm/arch-at91rm9200/board.h
+ *
+ * Copyright (C) 2005 HP Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * These are data structures found in platform_device.dev.platform_data,
+ * and describing board-specfic data needed by drivers. For example,
+ * which pin is used for a given GPIO role.
+ *
+ * In 2.6, drivers should strongly avoid board-specific knowledge so
+ * that supporting new boards normally won't require driver patches.
+ * Most board-specific knowledge should be in arch/.../board-*.c files.
+ */
+
+#ifndef __ASM_ARCH_BOARD_H
+#define __ASM_ARCH_BOARD_H
+
+ /* Clocks */
+extern unsigned long at91_master_clock;
+
+ /* Serial Port */
+extern int at91_serial_map[AT91_NR_UART];
+extern int at91_console_port;
+
+ /* USB Device */
+struct at91_udc_data {
+ u8 vbus_pin; /* high == host powering us */
+ u8 pullup_pin; /* high == D+ pulled up */
+};
+extern void __init at91_add_device_udc(struct at91_udc_data *data);
+
+ /* Compact Flash */
+struct at91_cf_data {
+ u8 irq_pin; /* I/O IRQ */
+ u8 det_pin; /* Card detect */
+ u8 vcc_pin; /* power switching */
+ u8 rst_pin; /* card reset */
+};
+extern void __init at91_add_device_cf(struct at91_cf_data *data);
+
+ /* MMC / SD */
+struct at91_mmc_data {
+ u8 det_pin; /* card detect IRQ */
+ unsigned is_b:1; /* uses B side (vs A) */
+ unsigned wire4:1; /* (SD) supports DAT0..DAT3 */
+ u8 wp_pin; /* (SD) writeprotect detect */
+ u8 vcc_pin; /* power switching (high == on) */
+};
+extern void __init at91_add_device_mmc(struct at91_mmc_data *data);
+
+ /* Ethernet */
+struct at91_eth_data {
+ u8 phy_irq_pin; /* PHY IRQ */
+ u8 is_rmii; /* using RMII interface? */
+};
+extern void __init at91_add_device_eth(struct at91_eth_data *data);
+
+ /* USB Host */
+struct at91_usbh_data {
+ u8 ports; /* number of ports on root hub */
+};
+extern void __init at91_add_device_usbh(struct at91_usbh_data *data);
+
+#endif
diff --git a/include/asm-arm/arch-at91rm9200/debug-macro.S b/include/asm-arm/arch-at91rm9200/debug-macro.S
new file mode 100644
index 00000000000..f496b54c4c3
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/debug-macro.S
@@ -0,0 +1,38 @@
+/*
+ * include/asm-arm/arch-at91rm9200/debug-macro.S
+ *
+ * Copyright (C) 2003-2005 SAN People
+ *
+ * Debugging macro include header
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#include <asm/hardware.h>
+
+ .macro addruart,rx
+ mrc p15, 0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ ldreq \rx, =AT91_BASE_SYS @ System peripherals (phys address)
+ ldrne \rx, =AT91_VA_BASE_SYS @ System peripherals (virt address)
+ .endm
+
+ .macro senduart,rd,rx
+ strb \rd, [\rx, #AT91_DBGU_THR] @ Write to Transmitter Holding Register
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldr \rd, [\rx, #AT91_DBGU_SR] @ Read Status Register
+ tst \rd, #AT91_DBGU_TXRDY @ DBGU_TXRDY = 1 when ready to transmit
+ beq 1001b
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #AT91_DBGU_SR] @ Read Status Register
+ tst \rd, #AT91_DBGU_TXEMPTY @ DBGU_TXEMPTY = 1 when transmission complete
+ beq 1001b
+ .endm
+
diff --git a/include/asm-arm/arch-epxa10db/param.h b/include/asm-arm/arch-at91rm9200/dma.h
index 783dedd71c8..22c1dfdd8da 100644
--- a/include/asm-arm/arch-epxa10db/param.h
+++ b/include/asm-arm/arch-at91rm9200/dma.h
@@ -1,7 +1,7 @@
/*
- * linux/include/asm-arm/arch-epxa10db/param.h
+ * include/asm-arm/arch-at91rm9200/dma.h
*
- * Copyright (C) 1999 ARM Limited
+ * Copyright (C) 2003 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/asm-arm/arch-at91rm9200/entry-macro.S b/include/asm-arm/arch-at91rm9200/entry-macro.S
new file mode 100644
index 00000000000..61a326e9490
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/entry-macro.S
@@ -0,0 +1,25 @@
+/*
+ * include/asm-arm/arch-at91rm9200/entry-macro.S
+ *
+ * Copyright (C) 2003-2005 SAN People
+ *
+ * Low-level IRQ helper macros for AT91RM9200 platforms
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <asm/hardware.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldr \base, =(AT91_VA_BASE_SYS) @ base virtual address of SYS peripherals
+ ldr \irqnr, [\base, #AT91_AIC_IVR] @ read IRQ vector register: de-asserts nIRQ to processor (and clears interrupt)
+ ldr \irqstat, [\base, #AT91_AIC_ISR] @ read interrupt source number
+ teq \irqstat, #0 @ ISR is 0 when no current interrupt, or spurious interrupt
+ streq \tmp, [\base, #AT91_AIC_EOICR] @ not going to be handled further, then ACK it now.
+ .endm
+
diff --git a/include/asm-arm/arch-at91rm9200/gpio.h b/include/asm-arm/arch-at91rm9200/gpio.h
new file mode 100644
index 00000000000..0f0a61e2f12
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/gpio.h
@@ -0,0 +1,193 @@
+/*
+ * include/asm-arm/arch-at91rm9200/gpio.h
+ *
+ * Copyright (C) 2005 HP Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __ASM_ARCH_AT91RM9200_GPIO_H
+#define __ASM_ARCH_AT91RM9200_GPIO_H
+
+#define PIN_BASE NR_AIC_IRQS
+
+#define PQFP_GPIO_BANKS 3 /* PQFP package has 3 banks */
+#define BGA_GPIO_BANKS 4 /* BGA package has 4 banks */
+
+/* these pin numbers double as IRQ numbers, like AT91_ID_* values */
+
+#define AT91_PIN_PA0 (PIN_BASE + 0x00 + 0)
+#define AT91_PIN_PA1 (PIN_BASE + 0x00 + 1)
+#define AT91_PIN_PA2 (PIN_BASE + 0x00 + 2)
+#define AT91_PIN_PA3 (PIN_BASE + 0x00 + 3)
+#define AT91_PIN_PA4 (PIN_BASE + 0x00 + 4)
+
+#define AT91_PIN_PA5 (PIN_BASE + 0x00 + 5)
+#define AT91_PIN_PA6 (PIN_BASE + 0x00 + 6)
+#define AT91_PIN_PA7 (PIN_BASE + 0x00 + 7)
+#define AT91_PIN_PA8 (PIN_BASE + 0x00 + 8)
+#define AT91_PIN_PA9 (PIN_BASE + 0x00 + 9)
+
+#define AT91_PIN_PA10 (PIN_BASE + 0x00 + 10)
+#define AT91_PIN_PA11 (PIN_BASE + 0x00 + 11)
+#define AT91_PIN_PA12 (PIN_BASE + 0x00 + 12)
+#define AT91_PIN_PA13 (PIN_BASE + 0x00 + 13)
+#define AT91_PIN_PA14 (PIN_BASE + 0x00 + 14)
+
+#define AT91_PIN_PA15 (PIN_BASE + 0x00 + 15)
+#define AT91_PIN_PA16 (PIN_BASE + 0x00 + 16)
+#define AT91_PIN_PA17 (PIN_BASE + 0x00 + 17)
+#define AT91_PIN_PA18 (PIN_BASE + 0x00 + 18)
+#define AT91_PIN_PA19 (PIN_BASE + 0x00 + 19)
+
+#define AT91_PIN_PA20 (PIN_BASE + 0x00 + 20)
+#define AT91_PIN_PA21 (PIN_BASE + 0x00 + 21)
+#define AT91_PIN_PA22 (PIN_BASE + 0x00 + 22)
+#define AT91_PIN_PA23 (PIN_BASE + 0x00 + 23)
+#define AT91_PIN_PA24 (PIN_BASE + 0x00 + 24)
+
+#define AT91_PIN_PA25 (PIN_BASE + 0x00 + 25)
+#define AT91_PIN_PA26 (PIN_BASE + 0x00 + 26)
+#define AT91_PIN_PA27 (PIN_BASE + 0x00 + 27)
+#define AT91_PIN_PA28 (PIN_BASE + 0x00 + 28)
+#define AT91_PIN_PA29 (PIN_BASE + 0x00 + 29)
+
+#define AT91_PIN_PA30 (PIN_BASE + 0x00 + 30)
+#define AT91_PIN_PA31 (PIN_BASE + 0x00 + 31)
+
+#define AT91_PIN_PB0 (PIN_BASE + 0x20 + 0)
+#define AT91_PIN_PB1 (PIN_BASE + 0x20 + 1)
+#define AT91_PIN_PB2 (PIN_BASE + 0x20 + 2)
+#define AT91_PIN_PB3 (PIN_BASE + 0x20 + 3)
+#define AT91_PIN_PB4 (PIN_BASE + 0x20 + 4)
+
+#define AT91_PIN_PB5 (PIN_BASE + 0x20 + 5)
+#define AT91_PIN_PB6 (PIN_BASE + 0x20 + 6)
+#define AT91_PIN_PB7 (PIN_BASE + 0x20 + 7)
+#define AT91_PIN_PB8 (PIN_BASE + 0x20 + 8)
+#define AT91_PIN_PB9 (PIN_BASE + 0x20 + 9)
+
+#define AT91_PIN_PB10 (PIN_BASE + 0x20 + 10)
+#define AT91_PIN_PB11 (PIN_BASE + 0x20 + 11)
+#define AT91_PIN_PB12 (PIN_BASE + 0x20 + 12)
+#define AT91_PIN_PB13 (PIN_BASE + 0x20 + 13)
+#define AT91_PIN_PB14 (PIN_BASE + 0x20 + 14)
+
+#define AT91_PIN_PB15 (PIN_BASE + 0x20 + 15)
+#define AT91_PIN_PB16 (PIN_BASE + 0x20 + 16)
+#define AT91_PIN_PB17 (PIN_BASE + 0x20 + 17)
+#define AT91_PIN_PB18 (PIN_BASE + 0x20 + 18)
+#define AT91_PIN_PB19 (PIN_BASE + 0x20 + 19)
+
+#define AT91_PIN_PB20 (PIN_BASE + 0x20 + 20)
+#define AT91_PIN_PB21 (PIN_BASE + 0x20 + 21)
+#define AT91_PIN_PB22 (PIN_BASE + 0x20 + 22)
+#define AT91_PIN_PB23 (PIN_BASE + 0x20 + 23)
+#define AT91_PIN_PB24 (PIN_BASE + 0x20 + 24)
+
+#define AT91_PIN_PB25 (PIN_BASE + 0x20 + 25)
+#define AT91_PIN_PB26 (PIN_BASE + 0x20 + 26)
+#define AT91_PIN_PB27 (PIN_BASE + 0x20 + 27)
+#define AT91_PIN_PB28 (PIN_BASE + 0x20 + 28)
+#define AT91_PIN_PB29 (PIN_BASE + 0x20 + 29)
+
+#define AT91_PIN_PB30 (PIN_BASE + 0x20 + 30)
+#define AT91_PIN_PB31 (PIN_BASE + 0x20 + 31)
+
+#define AT91_PIN_PC0 (PIN_BASE + 0x40 + 0)
+#define AT91_PIN_PC1 (PIN_BASE + 0x40 + 1)
+#define AT91_PIN_PC2 (PIN_BASE + 0x40 + 2)
+#define AT91_PIN_PC3 (PIN_BASE + 0x40 + 3)
+#define AT91_PIN_PC4 (PIN_BASE + 0x40 + 4)
+
+#define AT91_PIN_PC5 (PIN_BASE + 0x40 + 5)
+#define AT91_PIN_PC6 (PIN_BASE + 0x40 + 6)
+#define AT91_PIN_PC7 (PIN_BASE + 0x40 + 7)
+#define AT91_PIN_PC8 (PIN_BASE + 0x40 + 8)
+#define AT91_PIN_PC9 (PIN_BASE + 0x40 + 9)
+
+#define AT91_PIN_PC10 (PIN_BASE + 0x40 + 10)
+#define AT91_PIN_PC11 (PIN_BASE + 0x40 + 11)
+#define AT91_PIN_PC12 (PIN_BASE + 0x40 + 12)
+#define AT91_PIN_PC13 (PIN_BASE + 0x40 + 13)
+#define AT91_PIN_PC14 (PIN_BASE + 0x40 + 14)
+
+#define AT91_PIN_PC15 (PIN_BASE + 0x40 + 15)
+#define AT91_PIN_PC16 (PIN_BASE + 0x40 + 16)
+#define AT91_PIN_PC17 (PIN_BASE + 0x40 + 17)
+#define AT91_PIN_PC18 (PIN_BASE + 0x40 + 18)
+#define AT91_PIN_PC19 (PIN_BASE + 0x40 + 19)
+
+#define AT91_PIN_PC20 (PIN_BASE + 0x40 + 20)
+#define AT91_PIN_PC21 (PIN_BASE + 0x40 + 21)
+#define AT91_PIN_PC22 (PIN_BASE + 0x40 + 22)
+#define AT91_PIN_PC23 (PIN_BASE + 0x40 + 23)
+#define AT91_PIN_PC24 (PIN_BASE + 0x40 + 24)
+
+#define AT91_PIN_PC25 (PIN_BASE + 0x40 + 25)
+#define AT91_PIN_PC26 (PIN_BASE + 0x40 + 26)
+#define AT91_PIN_PC27 (PIN_BASE + 0x40 + 27)
+#define AT91_PIN_PC28 (PIN_BASE + 0x40 + 28)
+#define AT91_PIN_PC29 (PIN_BASE + 0x40 + 29)
+
+#define AT91_PIN_PC30 (PIN_BASE + 0x40 + 30)
+#define AT91_PIN_PC31 (PIN_BASE + 0x40 + 31)
+
+#define AT91_PIN_PD0 (PIN_BASE + 0x60 + 0)
+#define AT91_PIN_PD1 (PIN_BASE + 0x60 + 1)
+#define AT91_PIN_PD2 (PIN_BASE + 0x60 + 2)
+#define AT91_PIN_PD3 (PIN_BASE + 0x60 + 3)
+#define AT91_PIN_PD4 (PIN_BASE + 0x60 + 4)
+
+#define AT91_PIN_PD5 (PIN_BASE + 0x60 + 5)
+#define AT91_PIN_PD6 (PIN_BASE + 0x60 + 6)
+#define AT91_PIN_PD7 (PIN_BASE + 0x60 + 7)
+#define AT91_PIN_PD8 (PIN_BASE + 0x60 + 8)
+#define AT91_PIN_PD9 (PIN_BASE + 0x60 + 9)
+
+#define AT91_PIN_PD10 (PIN_BASE + 0x60 + 10)
+#define AT91_PIN_PD11 (PIN_BASE + 0x60 + 11)
+#define AT91_PIN_PD12 (PIN_BASE + 0x60 + 12)
+#define AT91_PIN_PD13 (PIN_BASE + 0x60 + 13)
+#define AT91_PIN_PD14 (PIN_BASE + 0x60 + 14)
+
+#define AT91_PIN_PD15 (PIN_BASE + 0x60 + 15)
+#define AT91_PIN_PD16 (PIN_BASE + 0x60 + 16)
+#define AT91_PIN_PD17 (PIN_BASE + 0x60 + 17)
+#define AT91_PIN_PD18 (PIN_BASE + 0x60 + 18)
+#define AT91_PIN_PD19 (PIN_BASE + 0x60 + 19)
+
+#define AT91_PIN_PD20 (PIN_BASE + 0x60 + 20)
+#define AT91_PIN_PD21 (PIN_BASE + 0x60 + 21)
+#define AT91_PIN_PD22 (PIN_BASE + 0x60 + 22)
+#define AT91_PIN_PD23 (PIN_BASE + 0x60 + 23)
+#define AT91_PIN_PD24 (PIN_BASE + 0x60 + 24)
+
+#define AT91_PIN_PD25 (PIN_BASE + 0x60 + 25)
+#define AT91_PIN_PD26 (PIN_BASE + 0x60 + 26)
+#define AT91_PIN_PD27 (PIN_BASE + 0x60 + 27)
+#define AT91_PIN_PD28 (PIN_BASE + 0x60 + 28)
+#define AT91_PIN_PD29 (PIN_BASE + 0x60 + 29)
+
+#define AT91_PIN_PD30 (PIN_BASE + 0x60 + 30)
+#define AT91_PIN_PD31 (PIN_BASE + 0x60 + 31)
+
+#ifndef __ASSEMBLY__
+/* setup setup routines, called from board init or driver probe() */
+extern int at91_set_A_periph(unsigned pin, int use_pullup);
+extern int at91_set_B_periph(unsigned pin, int use_pullup);
+extern int at91_set_gpio_input(unsigned pin, int use_pullup);
+extern int at91_set_gpio_output(unsigned pin, int value);
+extern int at91_set_deglitch(unsigned pin, int is_on);
+
+/* callable at any time */
+extern int at91_set_gpio_value(unsigned pin, int value);
+extern int at91_get_gpio_value(unsigned pin);
+#endif
+
+#endif
+
diff --git a/include/asm-arm/arch-at91rm9200/hardware.h b/include/asm-arm/arch-at91rm9200/hardware.h
new file mode 100644
index 00000000000..2646c01f8e9
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/hardware.h
@@ -0,0 +1,92 @@
+/*
+ * include/asm-arm/arch-at91rm9200/hardware.h
+ *
+ * Copyright (C) 2003 SAN People
+ * Copyright (C) 2003 ATMEL
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __ASM_ARCH_HARDWARE_H
+#define __ASM_ARCH_HARDWARE_H
+
+#include <asm/sizes.h>
+
+#include <asm/arch/at91rm9200.h>
+#include <asm/arch/at91rm9200_sys.h>
+
+/*
+ * Remap the peripherals from address 0xFFFA0000 .. 0xFFFFFFFF
+ * to 0xFEFA0000 .. 0xFF000000. (384Kb)
+ */
+#define AT91_IO_PHYS_BASE 0xFFFA0000
+#define AT91_IO_SIZE (0xFFFFFFFF - AT91_IO_PHYS_BASE + 1)
+#define AT91_IO_VIRT_BASE (0xFF000000 - AT91_IO_SIZE)
+
+ /* Convert a physical IO address to virtual IO address */
+#define AT91_IO_P2V(x) ((x) - AT91_IO_PHYS_BASE + AT91_IO_VIRT_BASE)
+
+/*
+ * Virtual to Physical Address mapping for IO devices.
+ */
+#define AT91_VA_BASE_SYS AT91_IO_P2V(AT91_BASE_SYS)
+#define AT91_VA_BASE_SPI AT91_IO_P2V(AT91_BASE_SPI)
+#define AT91_VA_BASE_SSC2 AT91_IO_P2V(AT91_BASE_SSC2)
+#define AT91_VA_BASE_SSC1 AT91_IO_P2V(AT91_BASE_SSC1)
+#define AT91_VA_BASE_SSC0 AT91_IO_P2V(AT91_BASE_SSC0)
+#define AT91_VA_BASE_US3 AT91_IO_P2V(AT91_BASE_US3)
+#define AT91_VA_BASE_US2 AT91_IO_P2V(AT91_BASE_US2)
+#define AT91_VA_BASE_US1 AT91_IO_P2V(AT91_BASE_US1)
+#define AT91_VA_BASE_US0 AT91_IO_P2V(AT91_BASE_US0)
+#define AT91_VA_BASE_EMAC AT91_IO_P2V(AT91_BASE_EMAC)
+#define AT91_VA_BASE_TWI AT91_IO_P2V(AT91_BASE_TWI)
+#define AT91_VA_BASE_MCI AT91_IO_P2V(AT91_BASE_MCI)
+#define AT91_VA_BASE_UDP AT91_IO_P2V(AT91_BASE_UDP)
+#define AT91_VA_BASE_TCB1 AT91_IO_P2V(AT91_BASE_TCB1)
+#define AT91_VA_BASE_TCB0 AT91_IO_P2V(AT91_BASE_TCB0)
+
+/* Internal SRAM */
+#define AT91_BASE_SRAM 0x00200000 /* Internal SRAM base address */
+#define AT91_SRAM_SIZE 0x00004000 /* Internal SRAM SIZE (16Kb) */
+
+/* Serial ports */
+#define AT91_NR_UART 5 /* 4 USART3's and one DBGU port */
+
+/* FLASH */
+#define AT91_FLASH_BASE 0x10000000 /* NCS0: Flash physical base address */
+
+/* SDRAM */
+#define AT91_SDRAM_BASE 0x20000000 /* NCS1: SDRAM physical base address */
+
+/* SmartMedia */
+#define AT91_SMARTMEDIA_BASE 0x40000000 /* NCS3: Smartmedia physical base address */
+
+/* Multi-Master Memory controller */
+#define AT91_UHP_BASE 0x00300000 /* USB Host controller */
+
+/* Clocks */
+#define AT91_SLOW_CLOCK 32768 /* slow clock */
+
+#ifndef __ASSEMBLY__
+#include <asm/io.h>
+
+static inline unsigned int at91_sys_read(unsigned int reg_offset)
+{
+ void __iomem *addr = (void __iomem *)AT91_VA_BASE_SYS;
+
+ return readl(addr + reg_offset);
+}
+
+static inline void at91_sys_write(unsigned int reg_offset, unsigned long value)
+{
+ void __iomem *addr = (void __iomem *)AT91_VA_BASE_SYS;
+
+ writel(value, addr + reg_offset);
+}
+#endif
+
+#endif
diff --git a/include/asm-arm/arch-epxa10db/dma.h b/include/asm-arm/arch-at91rm9200/io.h
index 5d97734d107..23e670d85c9 100644
--- a/include/asm-arm/arch-epxa10db/dma.h
+++ b/include/asm-arm/arch-at91rm9200/io.h
@@ -1,7 +1,7 @@
/*
- * linux/include/asm-arm/arch-camelot/dma.h
+ * include/asm-arm/arch-at91rm9200/io.h
*
- * Copyright (C) 1997,1998 Russell King
+ * Copyright (C) 2003 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,12 +17,17 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-#define MAX_DMA_ADDRESS 0xffffffff
+#ifndef __ASM_ARCH_IO_H
+#define __ASM_ARCH_IO_H
-#define MAX_DMA_CHANNELS 0
+#include <asm/arch/at91rm9200.h>
+#include <asm/io.h>
-#endif /* _ASM_ARCH_DMA_H */
+#define IO_SPACE_LIMIT 0xFFFFFFFF
+#define __io(a) ((void __iomem *)(a))
+#define __mem_pci(a) (a)
+
+
+#endif
diff --git a/include/asm-arm/arch-at91rm9200/irqs.h b/include/asm-arm/arch-at91rm9200/irqs.h
new file mode 100644
index 00000000000..27b0497f1b3
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/irqs.h
@@ -0,0 +1,52 @@
+/*
+ * include/asm-arm/arch-at91rm9200/irqs.h
+ *
+ * Copyright (C) 2004 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARCH_IRQS_H
+#define __ASM_ARCH_IRQS_H
+
+#define NR_AIC_IRQS 32
+
+
+/*
+ * Acknowledge interrupt with AIC after interrupt has been handled.
+ * (by kernel/irq.c)
+ */
+#define irq_finish(irq) do { at91_sys_write(AT91_AIC_EOICR, 0); } while (0)
+
+
+/*
+ * IRQ interrupt symbols are the AT91_ID_* symbols in at91rm9200.h
+ * for IRQs handled directly through the AIC, or else the AT91_PIN_*
+ * symbols in gpio.h for ones handled indirectly as GPIOs.
+ * We make provision for 4 banks of GPIO.
+ */
+#include <asm/arch/gpio.h>
+
+#define NR_IRQS (NR_AIC_IRQS + (4 * 32))
+
+
+#ifndef __ASSEMBLY__
+/*
+ * Initialize the IRQ controller.
+ */
+extern void at91rm9200_init_irq(unsigned int priority[]);
+#endif
+
+#endif
diff --git a/include/asm-arm/arch-epxa10db/memory.h b/include/asm-arm/arch-at91rm9200/memory.h
index 999541b6a9f..462f1f0ad67 100644
--- a/include/asm-arm/arch-epxa10db/memory.h
+++ b/include/asm-arm/arch-at91rm9200/memory.h
@@ -1,7 +1,7 @@
/*
- * linux/include/asm-arm/arch-epxa10/memory.h
+ * include/asm-arm/arch-at91rm9200/memory.h
*
- * Copyright (C) 2001 Altera Corporation
+ * Copyright (C) 2004 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,13 +17,14 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-/*
- * Physical DRAM offset.
- */
-#define PHYS_OFFSET UL(0x00000000)
+#include <asm/arch/hardware.h>
+
+#define PHYS_OFFSET (AT91_SDRAM_BASE)
+
/*
* Virtual view <-> DMA view memory address translations
@@ -32,7 +33,9 @@
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
-#define __virt_to_bus(x) (x - PAGE_OFFSET + /*SDRAM_BASE*/0)
-#define __bus_to_virt(x) (x - /*SDRAM_BASE*/0 + PAGE_OFFSET)
+#define __virt_to_bus__is_a_macro
+#define __virt_to_bus(x) __virt_to_phys(x)
+#define __bus_to_virt__is_a_macro
+#define __bus_to_virt(x) __phys_to_virt(x)
#endif
diff --git a/include/asm-arm/arch-epxa10db/vmalloc.h b/include/asm-arm/arch-at91rm9200/param.h
index 546fb7d2b6a..9480f844685 100644
--- a/include/asm-arm/arch-epxa10db/vmalloc.h
+++ b/include/asm-arm/arch-at91rm9200/param.h
@@ -1,7 +1,7 @@
/*
- * linux/include/asm-arm/arch-epxa10db/vmalloc.h
+ * include/asm-arm/arch-at91rm9200/param.h
*
- * Copyright (C) 2000 Russell King.
+ * Copyright (C) 2003 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,4 +17,12 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define VMALLOC_END (PAGE_OFFSET + 0x10000000)
+
+#ifndef __ASM_ARCH_PARAM_H
+#define __ASM_ARCH_PARAM_H
+
+/*
+ * We use default params
+ */
+
+#endif
diff --git a/include/asm-arm/arch-at91rm9200/pio.h b/include/asm-arm/arch-at91rm9200/pio.h
new file mode 100644
index 00000000000..a89501b4a70
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/pio.h
@@ -0,0 +1,115 @@
+/*
+ * include/asm-arm/arch-at91rm9200/pio.h
+ *
+ * Copyright (C) 2003 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __ASM_ARCH_PIO_H
+#define __ASM_ARCH_PIO_H
+
+#include <asm/arch/hardware.h>
+
+static inline void AT91_CfgPIO_USART0(void) {
+ at91_sys_write(AT91_PIOA + PIO_PDR, AT91_PA17_TXD0 | AT91_PA18_RXD0 | AT91_PA20_CTS0);
+
+ /*
+ * Errata #39 - RTS0 is not internally connected to PA21. We need to drive
+ * the pin manually. Default is off (RTS is active low).
+ */
+ at91_sys_write(AT91_PIOA + PIO_PER, AT91_PA21_RTS0);
+ at91_sys_write(AT91_PIOA + PIO_OER, AT91_PA21_RTS0);
+ at91_sys_write(AT91_PIOA + PIO_SODR, AT91_PA21_RTS0);
+}
+
+static inline void AT91_CfgPIO_USART1(void) {
+ at91_sys_write(AT91_PIOB + PIO_PDR, AT91_PB18_RI1 | AT91_PB19_DTR1
+ | AT91_PB20_TXD1 | AT91_PB21_RXD1 | AT91_PB23_DCD1
+ | AT91_PB24_CTS1 | AT91_PB25_DSR1 | AT91_PB26_RTS1);
+}
+
+static inline void AT91_CfgPIO_USART2(void) {
+ at91_sys_write(AT91_PIOA + PIO_PDR, AT91_PA22_RXD2 | AT91_PA23_TXD2);
+}
+
+static inline void AT91_CfgPIO_USART3(void) {
+ at91_sys_write(AT91_PIOA + PIO_PDR, AT91_PA5_TXD3 | AT91_PA6_RXD3);
+ at91_sys_write(AT91_PIOA + PIO_BSR, AT91_PA5_TXD3 | AT91_PA6_RXD3);
+}
+
+static inline void AT91_CfgPIO_DBGU(void) {
+ at91_sys_write(AT91_PIOA + PIO_PDR, AT91_PA31_DTXD | AT91_PA30_DRXD);
+}
+
+/*
+ * Enable the Two-Wire interface.
+ */
+static inline void AT91_CfgPIO_TWI(void) {
+ at91_sys_write(AT91_PIOA + PIO_PDR, AT91_PA25_TWD | AT91_PA26_TWCK);
+ at91_sys_write(AT91_PIOA + PIO_ASR, AT91_PA25_TWD | AT91_PA26_TWCK);
+ at91_sys_write(AT91_PIOA + PIO_MDER, AT91_PA25_TWD | AT91_PA26_TWCK); /* open drain */
+}
+
+/*
+ * Enable the Serial Peripheral Interface.
+ */
+static inline void AT91_CfgPIO_SPI(void) {
+ at91_sys_write(AT91_PIOA + PIO_PDR, AT91_PA0_MISO | AT91_PA1_MOSI | AT91_PA2_SPCK);
+}
+
+static inline void AT91_CfgPIO_SPI_CS0(void) {
+ at91_sys_write(AT91_PIOA + PIO_PDR, AT91_PA3_NPCS0);
+}
+
+static inline void AT91_CfgPIO_SPI_CS1(void) {
+ at91_sys_write(AT91_PIOA + PIO_PDR, AT91_PA4_NPCS1);
+}
+
+static inline void AT91_CfgPIO_SPI_CS2(void) {
+ at91_sys_write(AT91_PIOA + PIO_PDR, AT91_PA5_NPCS2);
+}
+
+static inline void AT91_CfgPIO_SPI_CS3(void) {
+ at91_sys_write(AT91_PIOA + PIO_PDR, AT91_PA6_NPCS3);
+}
+
+/*
+ * Select the DataFlash card.
+ */
+static inline void AT91_CfgPIO_DataFlashCard(void) {
+ at91_sys_write(AT91_PIOB + PIO_PER, AT91_PIO_P(7));
+ at91_sys_write(AT91_PIOB + PIO_OER, AT91_PIO_P(7));
+ at91_sys_write(AT91_PIOB + PIO_CODR, AT91_PIO_P(7));
+}
+
+/*
+ * Enable NAND Flash (SmartMedia) interface.
+ */
+static inline void AT91_CfgPIO_SmartMedia(void) {
+ /* enable PC0=SMCE, PC1=SMOE, PC3=SMWE, A21=CLE, A22=ALE */
+ at91_sys_write(AT91_PIOC + PIO_ASR, AT91_PC0_BFCK | AT91_PC1_BFRDY_SMOE | AT91_PC3_BFBAA_SMWE);
+ at91_sys_write(AT91_PIOC + PIO_PDR, AT91_PC0_BFCK | AT91_PC1_BFRDY_SMOE | AT91_PC3_BFBAA_SMWE);
+
+ /* Configure PC2 as input (signal READY of the SmartMedia) */
+ at91_sys_write(AT91_PIOC + PIO_PER, AT91_PC2_BFAVD); /* enable direct output enable */
+ at91_sys_write(AT91_PIOC + PIO_ODR, AT91_PC2_BFAVD); /* disable output */
+
+ /* Configure PB1 as input (signal Card Detect of the SmartMedia) */
+ at91_sys_write(AT91_PIOB + PIO_PER, AT91_PIO_P(1)); /* enable direct output enable */
+ at91_sys_write(AT91_PIOB + PIO_ODR, AT91_PIO_P(1)); /* disable output */
+}
+
+static inline int AT91_PIO_SmartMedia_RDY(void) {
+ return (at91_sys_read(AT91_PIOC + PIO_PDSR) & AT91_PIO_P(2)) ? 1 : 0;
+}
+
+static inline int AT91_PIO_SmartMedia_CardDetect(void) {
+ return (at91_sys_read(AT91_PIOB + PIO_PDSR) & AT91_PIO_P(1)) ? 1 : 0;
+}
+
+#endif
diff --git a/include/asm-arm/arch-epxa10db/system.h b/include/asm-arm/arch-at91rm9200/system.h
index 345b092a1ed..29c42655f05 100644
--- a/include/asm-arm/arch-epxa10db/system.h
+++ b/include/asm-arm/arch-at91rm9200/system.h
@@ -1,9 +1,7 @@
/*
- * linux/include/asm-arm/arch-epxa10db/system.h
+ * include/asm-arm/arch-at91rm9200/system.h
*
- * Copyright (C) 1999 ARM Limited
- * Copyright (C) 2000 Deep Blue Solutions Ltd
- * Copyright (C) 2001 Altera Corporation
+ * Copyright (C) 2003 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,23 +17,35 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
-#include <asm/arch/platform.h>
+#include <asm/arch/hardware.h>
static inline void arch_idle(void)
{
/*
- * This should do all the clock switching
- * and wait for interrupt tricks
+ * Disable the processor clock. The processor will be automatically
+ * re-enabled by an interrupt or by a reset.
+ */
+// at91_sys_write(AT91_PMC_SCDR, AT91_PMC_PCK);
+
+ /*
+ * Set the processor (CP15) into 'Wait for Interrupt' mode.
+ * Unlike disabling the processor clock via the PMC (above)
+ * this allows the processor to be woken via JTAG.
*/
cpu_do_idle();
}
-extern __inline__ void arch_reset(char mode)
+static inline void arch_reset(char mode)
{
- /* Hmm... We can probably do something with the watchdog... */
+ /*
+ * Perform a hardware reset with the use of the Watchdog timer.
+ */
+ at91_sys_write(AT91_ST_WDMR, AT91_ST_RSTEN | AT91_ST_EXTEN | 1);
+ at91_sys_write(AT91_ST_CR, AT91_ST_WDRST);
}
#endif
diff --git a/include/asm-arm/arch-epxa10db/timex.h b/include/asm-arm/arch-at91rm9200/timex.h
index b87a75fc958..3f112dd1258 100644
--- a/include/asm-arm/arch-epxa10db/timex.h
+++ b/include/asm-arm/arch-at91rm9200/timex.h
@@ -1,9 +1,7 @@
/*
- * linux/include/asm-arm/arch-epxa10db/timex.h
+ * include/asm-arm/arch-at91rm9200/timex.h
*
- * Excalibur timex specifications
- *
- * Copyright (C) 2001 Altera Corporation
+ * Copyright (C) 2003 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,7 +18,11 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-/*
- * ??
- */
-#define CLOCK_TICK_RATE (50000000 / 16)
+#ifndef __ASM_ARCH_TIMEX_H
+#define __ASM_ARCH_TIMEX_H
+
+#include <asm/arch/hardware.h>
+
+#define CLOCK_TICK_RATE (AT91_SLOW_CLOCK)
+
+#endif
diff --git a/include/asm-arm/arch-at91rm9200/uncompress.h b/include/asm-arm/arch-at91rm9200/uncompress.h
new file mode 100644
index 00000000000..b30dd552071
--- /dev/null
+++ b/include/asm-arm/arch-at91rm9200/uncompress.h
@@ -0,0 +1,55 @@
+/*
+ * include/asm-arm/arch-at91rm9200/uncompress.h
+ *
+ * Copyright (C) 2003 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARCH_UNCOMPRESS_H
+#define __ASM_ARCH_UNCOMPRESS_H
+
+#include <asm/arch/hardware.h>
+
+/*
+ * The following code assumes the serial port has already been
+ * initialized by the bootloader. We search for the first enabled
+ * port in the most probable order. If you didn't setup a port in
+ * your bootloader then nothing will appear (which might be desired).
+ *
+ * This does not append a newline
+ */
+static void putstr(const char *s)
+{
+ void __iomem *sys = (void __iomem *) AT91_BASE_SYS; /* physical address */
+
+ while (*s) {
+ while (!(__raw_readl(sys + AT91_DBGU_SR) & AT91_DBGU_TXRDY)) { barrier(); }
+ __raw_writel(*s, sys + AT91_DBGU_THR);
+ if (*s == '\n') {
+ while (!(__raw_readl(sys + AT91_DBGU_SR) & AT91_DBGU_TXRDY)) { barrier(); }
+ __raw_writel('\r', sys + AT91_DBGU_THR);
+ }
+ s++;
+ }
+ /* wait for transmission to complete */
+ while (!(__raw_readl(sys + AT91_DBGU_SR) & AT91_DBGU_TXEMPTY)) { barrier(); }
+}
+
+#define arch_decomp_setup()
+
+#define arch_decomp_wdog()
+
+#endif
diff --git a/arch/arm/mach-clps711x/dma.c b/include/asm-arm/arch-at91rm9200/vmalloc.h
index af5a4de38ea..34d9718feb9 100644
--- a/arch/arm/mach-clps711x/dma.c
+++ b/include/asm-arm/arch-at91rm9200/vmalloc.h
@@ -1,7 +1,7 @@
/*
- * linux/arch/arm/mach-clps711x/dma.c
+ * include/asm-arm/arch-at91rm9200/vmalloc.h
*
- * Copyright (C) 2000 Deep Blue Solutions Ltd
+ * Copyright (C) 2003 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,11 +17,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/init.h>
-#include <asm/dma.h>
-#include <asm/mach/dma.h>
+#ifndef __ASM_ARCH_VMALLOC_H
+#define __ASM_ARCH_VMALLOC_H
-void __init arch_dma_init(dma_t *dma)
-{
-}
+#define VMALLOC_END (AT91_IO_VIRT_BASE & PGDIR_MASK)
+
+#endif
diff --git a/include/asm-arm/arch-cl7500/dma.h b/include/asm-arm/arch-cl7500/dma.h
index 1d6a8829d32..591ed255189 100644
--- a/include/asm-arm/arch-cl7500/dma.h
+++ b/include/asm-arm/arch-cl7500/dma.h
@@ -15,7 +15,6 @@
* bytes of RAM.
*/
#define MAX_DMA_ADDRESS 0xd0000000
-#define MAX_DMA_CHANNELS 0
#define DMA_S0 0
diff --git a/include/asm-arm/arch-cl7500/entry-macro.S b/include/asm-arm/arch-cl7500/entry-macro.S
index 686f413f82d..c9e5395e510 100644
--- a/include/asm-arm/arch-cl7500/entry-macro.S
+++ b/include/asm-arm/arch-cl7500/entry-macro.S
@@ -1,3 +1,3 @@
-
+#include <asm/hardware.h>
#include <asm/hardware/entry-macro-iomd.S>
diff --git a/include/asm-arm/arch-clps711x/dma.h b/include/asm-arm/arch-clps711x/dma.h
index 3c4c5c84325..61099793842 100644
--- a/include/asm-arm/arch-clps711x/dma.h
+++ b/include/asm-arm/arch-clps711x/dma.h
@@ -17,12 +17,3 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#define MAX_DMA_ADDRESS 0xffffffff
-
-#define MAX_DMA_CHANNELS 0
-
-#endif /* _ASM_ARCH_DMA_H */
-
diff --git a/include/asm-arm/arch-clps711x/entry-macro.S b/include/asm-arm/arch-clps711x/entry-macro.S
index b31079a1d4a..21f6ee48581 100644
--- a/include/asm-arm/arch-clps711x/entry-macro.S
+++ b/include/asm-arm/arch-clps711x/entry-macro.S
@@ -7,6 +7,7 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware.h>
#include <asm/hardware/clps7111.h>
.macro disable_fiq
diff --git a/include/asm-arm/arch-clps711x/system.h b/include/asm-arm/arch-clps711x/system.h
index 2ab981fee37..11e1491535a 100644
--- a/include/asm-arm/arch-clps711x/system.h
+++ b/include/asm-arm/arch-clps711x/system.h
@@ -20,7 +20,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
+#include <asm/hardware.h>
#include <asm/hardware/clps7111.h>
+#include <asm/io.h>
static inline void arch_idle(void)
{
diff --git a/include/asm-arm/arch-ebsa110/dma.h b/include/asm-arm/arch-ebsa110/dma.h
index d491776ac1c..c52f9e2ab0b 100644
--- a/include/asm-arm/arch-ebsa110/dma.h
+++ b/include/asm-arm/arch-ebsa110/dma.h
@@ -9,11 +9,3 @@
*
* EBSA110 DMA definitions
*/
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#define MAX_DMA_ADDRESS 0xffffffff
-#define MAX_DMA_CHANNELS 0
-
-#endif /* _ASM_ARCH_DMA_H */
-
diff --git a/include/asm-arm/arch-ebsa285/dma.h b/include/asm-arm/arch-ebsa285/dma.h
index c43046eb8bc..0259ad45d33 100644
--- a/include/asm-arm/arch-ebsa285/dma.h
+++ b/include/asm-arm/arch-ebsa285/dma.h
@@ -10,11 +10,6 @@
#define __ASM_ARCH_DMA_H
/*
- * This is the maximum DMA address that can be DMAd to.
- */
-#define MAX_DMA_ADDRESS 0xffffffff
-
-/*
* The 21285 has two internal DMA channels; we call these 8 and 9.
* On CATS hardware we have an additional eight ISA dma channels
* numbered 0..7.
diff --git a/include/asm-arm/arch-ebsa285/entry-macro.S b/include/asm-arm/arch-ebsa285/entry-macro.S
index db5729ff634..cf10ac96fdd 100644
--- a/include/asm-arm/arch-ebsa285/entry-macro.S
+++ b/include/asm-arm/arch-ebsa285/entry-macro.S
@@ -7,6 +7,8 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware.h>
+#include <asm/arch/irqs.h>
#include <asm/hardware/dec21285.h>
.macro disable_fiq
diff --git a/include/asm-arm/arch-epxa10db/debug-macro.S b/include/asm-arm/arch-epxa10db/debug-macro.S
deleted file mode 100644
index 1d11c51f498..00000000000
--- a/include/asm-arm/arch-epxa10db/debug-macro.S
+++ /dev/null
@@ -1,41 +0,0 @@
-/* linux/include/asm-arm/arch-epxa10db/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
-#include <asm/arch/excalibur.h>
-#define UART00_TYPE
-#include <asm/arch/uart00.h>
-
- .macro addruart,rx
- mrc p15, 0, \rx, c1, c0
- tst \rx, #1 @ MMU enabled?
- ldr \rx, =EXC_UART00_BASE @ physical base address
- orrne \rx, \rx, #0xff000000 @ virtual base
- orrne \rx, \rx, #0x00f00000
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #UART_TD(0)]
- .endm
-
- .macro waituart,rd,rx
-1001: ldr \rd, [\rx, #UART_TSR(0)]
- and \rd, \rd, #UART_TSR_TX_LEVEL_MSK
- cmp \rd, #15
- beq 1001b
- .endm
-
- .macro busyuart,rd,rx
-1001: ldr \rd, [\rx, #UART_TSR(0)]
- ands \rd, \rd, #UART_TSR_TX_LEVEL_MSK
- bne 1001b
- .endm
diff --git a/include/asm-arm/arch-epxa10db/entry-macro.S b/include/asm-arm/arch-epxa10db/entry-macro.S
deleted file mode 100644
index de6ae08334e..00000000000
--- a/include/asm-arm/arch-epxa10db/entry-macro.S
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * include/asm-arm/arch-epxa10db/entry-macro.S
- *
- * Low-level IRQ helper macros for epxa10db platform
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#include <asm/arch/platform.h>
-#undef IRQ_MODE /* same name defined in asm/proc/ptrace.h */
-#include <asm/arch/int_ctrl00.h>
-
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- ldr \irqstat, =INT_ID(IO_ADDRESS(EXC_INT_CTRL00_BASE))
- ldr \irqnr,[\irqstat]
- cmp \irqnr,#0
- subne \irqnr,\irqnr,#1
-
- .endm
-
diff --git a/include/asm-arm/arch-epxa10db/ether00.h b/include/asm-arm/arch-epxa10db/ether00.h
deleted file mode 100644
index b737b8aabe2..00000000000
--- a/include/asm-arm/arch-epxa10db/ether00.h
+++ /dev/null
@@ -1,482 +0,0 @@
-#ifndef __ETHER00_H
-#define __ETHER00_H
-
-
-
-/*
- * Register definitions for the Ethernet MAC
- */
-
-/*
- * Copyright (c) Altera Corporation 2000.
- * All rights reserved.
- */
-
-/*
-* Structures for the DMA controller
-*/
-typedef struct fda_desc
- {
- struct fda_desc * FDNext;
- long FDSystem;
- long FDStat;
- short FDLength;
- short FDCtl;
- }FDA_DESC;
-
-typedef struct buf_desc
- {
- char * BuffData;
- short BuffLength;
- char BDStat;
- char BDCtl;
- }BUF_DESC;
-
-/*
-* Control masks for the DMA controller
-*/
-#define FDCTL_BDCOUNT_MSK (0x1F)
-#define FDCTL_BDCOUNT_OFST (0)
-#define FDCTL_FRMOPT_MSK (0x7C00)
-#define FDCTL_FRMOPT_OFST (10)
-#define FDCTL_COWNSFD_MSK (0x8000)
-#define FDCTL_COWNSFD_OFST (15)
-
-#define BDCTL_RXBDSEQN_MSK (0x7F)
-#define BDCTL_RXBDSEQN_OFST (0)
-#define BDCTL_COWNSBD_MSK (0x80)
-#define BDCTL_COWNSBD_OFST (7)
-
-#define FDNEXT_EOL_MSK (0x1)
-#define FDNEXT_EOL_OFST (0)
-#define FDNEXT_EOL_POINTER_MSK (0xFFFFFFF0)
-#define FDNEXT_EOL_POINTER_OFST (4)
-
-#define ETHER_ARC_SIZE (21)
-
-/*
-* Register definitions and masks
-*/
-#define ETHER_DMA_CTL(base) (ETHER00_TYPE (base + 0x100))
-#define ETHER_DMA_CTL_DMBURST_OFST (2)
-#define ETHER_DMA_CTL_DMBURST_MSK (0x1FC)
-#define ETHER_DMA_CTL_POWRMGMNT_OFST (11)
-#define ETHER_DMA_CTL_POWRMGMNT_MSK (0x1000)
-#define ETHER_DMA_CTL_TXBIGE_OFST (14)
-#define ETHER_DMA_CTL_TXBIGE_MSK (0x4000)
-#define ETHER_DMA_CTL_RXBIGE_OFST (15)
-#define ETHER_DMA_CTL_RXBIGE_MSK (0x8000)
-#define ETHER_DMA_CTL_TXWAKEUP_OFST (16)
-#define ETHER_DMA_CTL_TXWAKEUP_MSK (0x10000)
-#define ETHER_DMA_CTL_SWINTREQ_OFST (17)
-#define ETHER_DMA_CTL_SWINTREQ_MSK (0x20000)
-#define ETHER_DMA_CTL_INTMASK_OFST (18)
-#define ETHER_DMA_CTL_INTMASK_MSK (0x40000)
-#define ETHER_DMA_CTL_M66ENSTAT_OFST (19)
-#define ETHER_DMA_CTL_M66ENSTAT_MSK (0x80000)
-#define ETHER_DMA_CTL_RMTXINIT_OFST (20)
-#define ETHER_DMA_CTL_RMTXINIT_MSK (0x100000)
-#define ETHER_DMA_CTL_RMRXINIT_OFST (21)
-#define ETHER_DMA_CTL_RMRXINIT_MSK (0x200000)
-#define ETHER_DMA_CTL_RXALIGN_OFST (22)
-#define ETHER_DMA_CTL_RXALIGN_MSK (0xC00000)
-#define ETHER_DMA_CTL_RMSWRQ_OFST (24)
-#define ETHER_DMA_CTL_RMSWRQ_MSK (0x1000000)
-#define ETHER_DMA_CTL_RMEMBANK_OFST (25)
-#define ETHER_DMA_CTL_RMEMBANK_MSK (0x2000000)
-
-#define ETHER_TXFRMPTR(base) (ETHER00_TYPE (base + 0x104))
-
-#define ETHER_TXTHRSH(base) (ETHER00_TYPE (base + 0x308))
-
-#define ETHER_TXPOLLCTR(base) (ETHER00_TYPE (base + 0x30c))
-
-#define ETHER_BLFRMPTR(base) (ETHER00_TYPE (base + 0x110))
-#define ETHER_BLFFRMPTR_EOL_OFST (0)
-#define ETHER_BLFFRMPTR_EOL_MSK (0x1)
-#define ETHER_BLFFRMPTR_ADDRESS_OFST (4)
-#define ETHER_BLFFRMPTR_ADDRESS_MSK (0xFFFFFFF0)
-
-#define ETHER_RXFRAGSIZE(base) (ETHER00_TYPE (base + 0x114))
-#define ETHER_RXFRAGSIZE_MINFRAG_OFST (2)
-#define ETHER_RXFRAGSIZE_MINFRAG_MSK (0xFFC)
-#define ETHER_RXFRAGSIZE_ENPACK_OFST (15)
-#define ETHER_RXFRAGSIZE_ENPACK_MSK (0x8000)
-
-#define ETHER_INT_EN(base) (ETHER00_TYPE (base + 0x118))
-#define ETHER_INT_EN_FDAEXEN_OFST (0)
-#define ETHER_INT_EN_FDAEXEN_MSK (0x1)
-#define ETHER_INT_EN_BLEXEN_OFST (1)
-#define ETHER_INT_EN_BLEXN_MSK (0x2)
-#define ETHER_INT_EN_STARGABTEN_OFST (2)
-#define ETHER_INT_EN_STARGABTEN_MSK (0x4)
-#define ETHER_INT_EN_RTARGABTEN_OFST (3)
-#define ETHER_INT_EN_RTARGABTEN_MSK (0x8)
-#define ETHER_INT_EN_RMASABTEN_OFST (4)
-#define ETHER_INT_EN_RMASABTEN_MSK (0x10)
-#define ETHER_INT_EN_SSYSERREN_OFST (5)
-#define ETHER_INT_EN_SSYSERREN_MSK (0x20)
-#define ETHER_INT_EN_DPARERREN_OFST (6)
-#define ETHER_INT_EN_DPARERREN_MSK (0x40)
-#define ETHER_INT_EN_EARNOTEN_OFST (7)
-#define ETHER_INT_EN_EARNOTEN_MSK (0x80)
-#define ETHER_INT_EN_DPARDEN_OFST (8)
-#define ETHER_INT_EN_DPARDEN_MSK (0x100)
-#define ETHER_INT_EN_DMPARERREN_OFST (9)
-#define ETHER_INT_EN_DMPARERREN_MSK (0x200)
-#define ETHER_INT_EN_TXCTLCMPEN_OFST (10)
-#define ETHER_INT_EN_TXCTLCMPEN_MSK (0x400)
-#define ETHER_INT_EN_NRABTEN_OFST (11)
-#define ETHER_INT_EN_NRABTEN_MSK (0x800)
-
-#define ETHER_FDA_BAS(base) (ETHER00_TYPE (base + 0x11C))
-#define ETHER_FDA_BAS_ADDRESS_OFST (4)
-#define ETHER_FDA_BAS_ADDRESS_MSK (0xFFFFFFF0)
-
-#define ETHER_FDA_LIM(base) (ETHER00_TYPE (base + 0x120))
-#define ETHER_FDA_LIM_COUNT_OFST (4)
-#define ETHER_FDA_LIM_COUNT_MSK (0xFFF0)
-
-#define ETHER_INT_SRC(base) (ETHER00_TYPE (base + 0x124))
-#define ETHER_INT_SRC_INTMACTX_OFST (0)
-#define ETHER_INT_SRC_INTMACTX_MSK (0x1)
-#define ETHER_INT_SRC_INTMACRX_OFST (1)
-#define ETHER_INT_SRC_INTMACRX_MSK (0x2)
-#define ETHER_INT_SRC_INTSBUS_OFST (2)
-#define ETHER_INT_SRC_INTSBUS_MSK (0x4)
-#define ETHER_INT_SRC_INTFDAEX_OFST (3)
-#define ETHER_INT_SRC_INTFDAEX_MSK (0x8)
-#define ETHER_INT_SRC_INTBLEX_OFST (4)
-#define ETHER_INT_SRC_INTBLEX_MSK (0x10)
-#define ETHER_INT_SRC_SWINT_OFST (5)
-#define ETHER_INT_SRC_SWINT_MSK (0x20)
-#define ETHER_INT_SRC_INTEARNOT_OFST (6)
-#define ETHER_INT_SRC_INTEARNOT_MSK (0x40)
-#define ETHER_INT_SRC_DMPARERR_OFST (7)
-#define ETHER_INT_SRC_DMPARERR_MSK (0x80)
-#define ETHER_INT_SRC_INTEXBD_OFST (8)
-#define ETHER_INT_SRC_INTEXBD_MSK (0x100)
-#define ETHER_INT_SRC_INTTXCTLCMP_OFST (9)
-#define ETHER_INT_SRC_INTTXCTLCMP_MSK (0x200)
-#define ETHER_INT_SRC_INTNRABT_OFST (10)
-#define ETHER_INT_SRC_INTNRABT_MSK (0x400)
-#define ETHER_INT_SRC_FDAEX_OFST (11)
-#define ETHER_INT_SRC_FDAEX_MSK (0x800)
-#define ETHER_INT_SRC_BLEX_OFST (12)
-#define ETHER_INT_SRC_BLEX_MSK (0x1000)
-#define ETHER_INT_SRC_DMPARERRSTAT_OFST (13)
-#define ETHER_INT_SRC_DMPARERRSTAT_MSK (0x2000)
-#define ETHER_INT_SRC_NRABT_OFST (14)
-#define ETHER_INT_SRC_NRABT_MSK (0x4000)
-#define ETHER_INT_SRC_INTLINK_OFST (15)
-#define ETHER_INT_SRC_INTLINK_MSK (0x8000)
-#define ETHER_INT_SRC_INTEXDEFER_OFST (16)
-#define ETHER_INT_SRC_INTEXDEFER_MSK (0x10000)
-#define ETHER_INT_SRC_INTRMON_OFST (17)
-#define ETHER_INT_SRC_INTRMON_MSK (0x20000)
-#define ETHER_INT_SRC_IRQ_MSK (0x83FF)
-
-#define ETHER_PAUSECNT(base) (ETHER00_TYPE (base + 0x40))
-#define ETHER_PAUSECNT_COUNT_OFST (0)
-#define ETHER_PAUSECNT_COUNT_MSK (0xFFFF)
-
-#define ETHER_REMPAUCNT(base) (ETHER00_TYPE (base + 0x44))
-#define ETHER_REMPAUCNT_COUNT_OFST (0)
-#define ETHER_REMPAUCNT_COUNT_MSK (0xFFFF)
-
-#define ETHER_TXCONFRMSTAT(base) (ETHER00_TYPE (base + 0x348))
-#define ETHER_TXCONFRMSTAT_TS_STAT_VALUE_OFST (0)
-#define ETHER_TXCONFRMSTAT_TS_STAT_VALUE_MSK (0x3FFFFF)
-
-#define ETHER_MAC_CTL(base) (ETHER00_TYPE (base + 0))
-#define ETHER_MAC_CTL_HALTREQ_OFST (0)
-#define ETHER_MAC_CTL_HALTREQ_MSK (0x1)
-#define ETHER_MAC_CTL_HALTIMM_OFST (1)
-#define ETHER_MAC_CTL_HALTIMM_MSK (0x2)
-#define ETHER_MAC_CTL_RESET_OFST (2)
-#define ETHER_MAC_CTL_RESET_MSK (0x4)
-#define ETHER_MAC_CTL_FULLDUP_OFST (3)
-#define ETHER_MAC_CTL_FULLDUP_MSK (0x8)
-#define ETHER_MAC_CTL_MACLOOP_OFST (4)
-#define ETHER_MAC_CTL_MACLOOP_MSK (0x10)
-#define ETHER_MAC_CTL_CONN_OFST (5)
-#define ETHER_MAC_CTL_CONN_MSK (0x60)
-#define ETHER_MAC_CTL_LOOP10_OFST (7)
-#define ETHER_MAC_CTL_LOOP10_MSK (0x80)
-#define ETHER_MAC_CTL_LNKCHG_OFST (8)
-#define ETHER_MAC_CTL_LNKCHG_MSK (0x100)
-#define ETHER_MAC_CTL_MISSROLL_OFST (10)
-#define ETHER_MAC_CTL_MISSROLL_MSK (0x400)
-#define ETHER_MAC_CTL_ENMISSROLL_OFST (13)
-#define ETHER_MAC_CTL_ENMISSROLL_MSK (0x2000)
-#define ETHER_MAC_CTL_LINK10_OFST (15)
-#define ETHER_MAC_CTL_LINK10_MSK (0x8000)
-
-#define ETHER_ARC_CTL(base) (ETHER00_TYPE (base + 0x4))
-#define ETHER_ARC_CTL_STATIONACC_OFST (0)
-#define ETHER_ARC_CTL_STATIONACC_MSK (0x1)
-#define ETHER_ARC_CTL_GROUPACC_OFST (1)
-#define ETHER_ARC_CTL_GROUPACC_MSK (0x2)
-#define ETHER_ARC_CTL_BROADACC_OFST (2)
-#define ETHER_ARC_CTL_BROADACC_MSK (0x4)
-#define ETHER_ARC_CTL_NEGARC_OFST (3)
-#define ETHER_ARC_CTL_NEGARC_MSK (0x8)
-#define ETHER_ARC_CTL_COMPEN_OFST (4)
-#define ETHER_ARC_CTL_COMPEN_MSK (0x10)
-
-#define ETHER_TX_CTL(base) (ETHER00_TYPE (base + 0x8))
-#define ETHER_TX_CTL_TXEN_OFST (0)
-#define ETHER_TX_CTL_TXEN_MSK (0x1)
-#define ETHER_TX_CTL_TXHALT_OFST (1)
-#define ETHER_TX_CTL_TXHALT_MSK (0x2)
-#define ETHER_TX_CTL_NOPAD_OFST (2)
-#define ETHER_TX_CTL_NOPAD_MSK (0x4)
-#define ETHER_TX_CTL_NOCRC_OFST (3)
-#define ETHER_TX_CTL_NOCRC_MSK (0x8)
-#define ETHER_TX_CTL_FBACK_OFST (4)
-#define ETHER_TX_CTL_FBACK_MSK (0x10)
-#define ETHER_TX_CTL_NOEXDEF_OFST (5)
-#define ETHER_TX_CTL_NOEXDEF_MSK (0x20)
-#define ETHER_TX_CTL_SDPAUSE_OFST (6)
-#define ETHER_TX_CTL_SDPAUSE_MSK (0x40)
-#define ETHER_TX_CTL_MII10_OFST (7)
-#define ETHER_TX_CTL_MII10_MSK (0x80)
-#define ETHER_TX_CTL_ENUNDER_OFST (8)
-#define ETHER_TX_CTL_ENUNDER_MSK (0x100)
-#define ETHER_TX_CTL_ENEXDEFER_OFST (9)
-#define ETHER_TX_CTL_ENEXDEFER_MSK (0x200)
-#define ETHER_TX_CTL_ENLCARR_OFST (10)
-#define ETHER_TX_CTL_ENLCARR_MSK (0x400)
-#define ETHER_TX_CTL_ENEXCOLL_OFST (11)
-#define ETHER_TX_CTL_ENEXCOLL_MSK (0x800)
-#define ETHER_TX_CTL_ENLATECOLL_OFST (12)
-#define ETHER_TX_CTL_ENLATECOLL_MSK (0x1000)
-#define ETHER_TX_CTL_ENTXPAR_OFST (13)
-#define ETHER_TX_CTL_ENTXPAR_MSK (0x2000)
-#define ETHER_TX_CTL_ENCOMP_OFST (14)
-#define ETHER_TX_CTL_ENCOMP_MSK (0x4000)
-
-#define ETHER_TX_STAT(base) (ETHER00_TYPE (base + 0xc))
-#define ETHER_TX_STAT_TXCOLL_OFST (0)
-#define ETHER_TX_STAT_TXCOLL_MSK (0xF)
-#define ETHER_TX_STAT_EXCOLL_OFST (4)
-#define ETHER_TX_STAT_EXCOLL_MSK (0x10)
-#define ETHER_TX_STAT_TXDEFER_OFST (5)
-#define ETHER_TX_STAT_TXDEFER_MSK (0x20)
-#define ETHER_TX_STAT_PAUSED_OFST (6)
-#define ETHER_TX_STAT_PAUSED_MSK (0x40)
-#define ETHER_TX_STAT_INTTX_OFST (7)
-#define ETHER_TX_STAT_INTTX_MSK (0x80)
-#define ETHER_TX_STAT_UNDER_OFST (8)
-#define ETHER_TX_STAT_UNDER_MSK (0x100)
-#define ETHER_TX_STAT_EXDEFER_OFST (9)
-#define ETHER_TX_STAT_EXDEFER_MSK (0x200)
-#define ETHER_TX_STAT_LCARR_OFST (10)
-#define ETHER_TX_STAT_LCARR_MSK (0x400)
-#define ETHER_TX_STAT_TX10STAT_OFST (11)
-#define ETHER_TX_STAT_TX10STAT_MSK (0x800)
-#define ETHER_TX_STAT_LATECOLL_OFST (12)
-#define ETHER_TX_STAT_LATECOLL_MSK (0x1000)
-#define ETHER_TX_STAT_TXPAR_OFST (13)
-#define ETHER_TX_STAT_TXPAR_MSK (0x2000)
-#define ETHER_TX_STAT_COMP_OFST (14)
-#define ETHER_TX_STAT_COMP_MSK (0x4000)
-#define ETHER_TX_STAT_TXHALTED_OFST (15)
-#define ETHER_TX_STAT_TXHALTED_MSK (0x8000)
-#define ETHER_TX_STAT_SQERR_OFST (16)
-#define ETHER_TX_STAT_SQERR_MSK (0x10000)
-#define ETHER_TX_STAT_TXMCAST_OFST (17)
-#define ETHER_TX_STAT_TXMCAST_MSK (0x20000)
-#define ETHER_TX_STAT_TXBCAST_OFST (18)
-#define ETHER_TX_STAT_TXBCAST_MSK (0x40000)
-#define ETHER_TX_STAT_VLAN_OFST (19)
-#define ETHER_TX_STAT_VLAN_MSK (0x80000)
-#define ETHER_TX_STAT_MACC_OFST (20)
-#define ETHER_TX_STAT_MACC_MSK (0x100000)
-#define ETHER_TX_STAT_TXPAUSE_OFST (21)
-#define ETHER_TX_STAT_TXPAUSE_MSK (0x200000)
-
-#define ETHER_RX_CTL(base) (ETHER00_TYPE (base + 0x10))
-#define ETHER_RX_CTL_RXEN_OFST (0)
-#define ETHER_RX_CTL_RXEN_MSK (0x1)
-#define ETHER_RX_CTL_RXHALT_OFST (1)
-#define ETHER_RX_CTL_RXHALT_MSK (0x2)
-#define ETHER_RX_CTL_LONGEN_OFST (2)
-#define ETHER_RX_CTL_LONGEN_MSK (0x4)
-#define ETHER_RX_CTL_SHORTEN_OFST (3)
-#define ETHER_RX_CTL_SHORTEN_MSK (0x8)
-#define ETHER_RX_CTL_STRIPCRC_OFST (4)
-#define ETHER_RX_CTL_STRIPCRC_MSK (0x10)
-#define ETHER_RX_CTL_PASSCTL_OFST (5)
-#define ETHER_RX_CTL_PASSCTL_MSK (0x20)
-#define ETHER_RX_CTL_IGNORECRC_OFST (6)
-#define ETHER_RX_CTL_IGNORECRC_MSK (0x40)
-#define ETHER_RX_CTL_ENALIGN_OFST (8)
-#define ETHER_RX_CTL_ENALIGN_MSK (0x100)
-#define ETHER_RX_CTL_ENCRCERR_OFST (9)
-#define ETHER_RX_CTL_ENCRCERR_MSK (0x200)
-#define ETHER_RX_CTL_ENOVER_OFST (10)
-#define ETHER_RX_CTL_ENOVER_MSK (0x400)
-#define ETHER_RX_CTL_ENLONGERR_OFST (11)
-#define ETHER_RX_CTL_ENLONGERR_MSK (0x800)
-#define ETHER_RX_CTL_ENRXPAR_OFST (13)
-#define ETHER_RX_CTL_ENRXPAR_MSK (0x2000)
-#define ETHER_RX_CTL_ENGOOD_OFST (14)
-#define ETHER_RX_CTL_ENGOOD_MSK (0x4000)
-
-#define ETHER_RX_STAT(base) (ETHER00_TYPE (base + 0x14))
-#define ETHER_RX_STAT_LENERR_OFST (4)
-#define ETHER_RX_STAT_LENERR_MSK (0x10)
-#define ETHER_RX_STAT_CTLRECD_OFST (5)
-#define ETHER_RX_STAT_CTLRECD_MSK (0x20)
-#define ETHER_RX_STAT_INTRX_OFST (6)
-#define ETHER_RX_STAT_INTRX_MSK (0x40)
-#define ETHER_RX_STAT_RX10STAT_OFST (7)
-#define ETHER_RX_STAT_RX10STAT_MSK (0x80)
-#define ETHER_RX_STAT_ALIGNERR_OFST (8)
-#define ETHER_RX_STAT_ALIGNERR_MSK (0x100)
-#define ETHER_RX_STAT_CRCERR_OFST (9)
-#define ETHER_RX_STAT_CRCERR_MSK (0x200)
-#define ETHER_RX_STAT_OVERFLOW_OFST (10)
-#define ETHER_RX_STAT_OVERFLOW_MSK (0x400)
-#define ETHER_RX_STAT_LONGERR_OFST (11)
-#define ETHER_RX_STAT_LONGERR_MSK (0x800)
-#define ETHER_RX_STAT_RXPAR_OFST (13)
-#define ETHER_RX_STAT_RXPAR_MSK (0x2000)
-#define ETHER_RX_STAT_GOOD_OFST (14)
-#define ETHER_RX_STAT_GOOD_MSK (0x4000)
-#define ETHER_RX_STAT_RXHALTED_OFST (15)
-#define ETHER_RX_STAT_RXHALTED_MSK (0x8000)
-#define ETHER_RX_STAT_RXMCAST_OFST (17)
-#define ETHER_RX_STAT_RXMCAST_MSK (0x10000)
-#define ETHER_RX_STAT_RXBCAST_OFST (18)
-#define ETHER_RX_STAT_RXBCAST_MSK (0x20000)
-#define ETHER_RX_STAT_RXVLAN_OFST (19)
-#define ETHER_RX_STAT_RXVLAN_MSK (0x40000)
-#define ETHER_RX_STAT_RXPAUSE_OFST (20)
-#define ETHER_RX_STAT_RXPAUSE_MSK (0x80000)
-#define ETHER_RX_STAT_ARCSTATUS_OFST (21)
-#define ETHER_RX_STAT_ARCSTATUS_MSK (0xF00000)
-#define ETHER_RX_STAT_ARCENT_OFST (25)
-#define ETHER_RX_STAT_ARCENT_MSK (0x1F000000)
-
-#define ETHER_MD_DATA(base) (ETHER00_TYPE (base + 0x18))
-
-#define ETHER_MD_CA(base) (ETHER00_TYPE (base + 0x1c))
-#define ETHER_MD_CA_ADDR_OFST (0)
-#define ETHER_MD_CA_ADDR_MSK (0x1F)
-#define ETHER_MD_CA_PHY_OFST (5)
-#define ETHER_MD_CA_PHY_MSK (0x3E0)
-#define ETHER_MD_CA_WR_OFST (10)
-#define ETHER_MD_CA_WR_MSK (0x400)
-#define ETHER_MD_CA_BUSY_OFST (11)
-#define ETHER_MD_CA_BUSY_MSK (0x800)
-#define ETHER_MD_CA_PRESUPP_OFST (12)
-#define ETHER_MD_CA_PRESUPP_MSK (0x1000)
-
-#define ETHER_ARC_ADR(base) (ETHER00_TYPE (base + 0x160))
-#define ETHER_ARC_ADR_ARC_LOC_OFST (2)
-#define ETHER_ARC_ADR_ARC_LOC_MSK (0xFFC)
-
-#define ETHER_ARC_DATA(base) (ETHER00_TYPE (base + 0x364))
-
-#define ETHER_ARC_ENA(base) (ETHER00_TYPE (base + 0x28))
-#define ETHER_ARC_ENA_MSK (0x1FFFFF)
-
-#define ETHER_PROM_CTL(base) (ETHER00_TYPE (base + 0x2c))
-#define ETHER_PROM_CTL_PROM_ADDR_OFST (0)
-#define ETHER_PROM_CTL_PROM_ADDR_MSK (0x3F)
-#define ETHER_PROM_CTL_OPCODE_OFST (13)
-#define ETHER_PROM_CTL_OPCODE_MSK (0x6000)
-#define ETHER_PROM_CTL_OPCODE_READ_MSK (0x4000)
-#define ETHER_PROM_CTL_OPCODE_WRITE_MSK (0x2000)
-#define ETHER_PROM_CTL_OPCODE_ERASE_MSK (0x6000)
-#define ETHER_PROM_CTL_ENABLE_MSK (0x0030)
-#define ETHER_PROM_CTL_DISABLE_MSK (0x0000)
-#define ETHER_PROM_CTL_BUSY_OFST (15)
-#define ETHER_PROM_CTL_BUSY_MSK (0x8000)
-
-#define ETHER_PROM_DATA(base) (ETHER00_TYPE (base + 0x30))
-
-#define ETHER_MISS_CNT(base) (ETHER00_TYPE (base + 0x3c))
-#define ETHER_MISS_CNT_COUNT_OFST (0)
-#define ETHER_MISS_CNT_COUNT_MSK (0xFFFF)
-
-#define ETHER_CNTDATA(base) (ETHER00_TYPE (base + 0x80))
-
-#define ETHER_CNTACC(base) (ETHER00_TYPE (base + 0x84))
-#define ETHER_CNTACC_ADDR_OFST (0)
-#define ETHER_CNTACC_ADDR_MSK (0xFF)
-#define ETHER_CNTACC_WRRDN_OFST (8)
-#define ETHER_CNTACC_WRRDN_MSK (0x100)
-#define ETHER_CNTACC_CLEAR_OFST (9)
-#define ETHER_CNTACC_CLEAR_MSK (0x200)
-
-#define ETHER_TXRMINTEN(base) (ETHER00_TYPE (base + 0x88))
-#define ETHER_TXRMINTEN_MSK (0x3FFFFFFF)
-
-#define ETHER_RXRMINTEN(base) (ETHER00_TYPE (base + 0x8C))
-#define ETHER_RXRMINTEN_MSK (0xFFFFFF)
-
-/*
-* RMON Registers
-*/
-#define RMON_COLLISION0 0x0
-#define RMON_COLLISION1 0x1
-#define RMON_COLLISION2 0x2
-#define RMON_COLLISION3 0x3
-#define RMON_COLLISION4 0x4
-#define RMON_COLLISION5 0x5
-#define RMON_COLLISION6 0x6
-#define RMON_COLLISION7 0x7
-#define RMON_COLLISION8 0x8
-#define RMON_COLLISION9 0x9
-#define RMON_COLLISION10 0xa
-#define RMON_COLLISION11 0xb
-#define RMON_COLLISION12 0xc
-#define RMON_COLLISION13 0xd
-#define RMON_COLLISION14 0xe
-#define RMON_COLLISION15 0xf
-#define RMON_COLLISION16 0x10
-#define RMON_FRAMES_WITH_DEFERRED_XMISSIONS 0x11
-#define RMON_LATE_COLLISIONS 0x12
-#define RMON_FRAMES_LOST_DUE_TO_MAC_XMIT 0x13
-#define RMON_CARRIER_SENSE_ERRORS 0x14
-#define RMON_FRAMES_WITH_EXCESSIVE_DEFERAL 0x15
-#define RMON_UNICAST_FRAMES_TRANSMITTED_OK 0x16
-#define RMON_MULTICAST_FRAMES_XMITTED_OK 0x17
-#define RMON_BROADCAST_FRAMES_XMITTED_OK 0x18
-#define RMON_SQE_TEST_ERRORS 0x19
-#define RMON_PAUSE_MACCTRL_FRAMES_XMITTED 0x1A
-#define RMON_MACCTRL_FRAMES_XMITTED 0x1B
-#define RMON_VLAN_FRAMES_XMITTED 0x1C
-#define RMON_OCTETS_XMITTED_OK 0x1D
-#define RMON_OCTETS_XMITTED_OK_HI 0x1E
-
-#define RMON_RX_PACKET_SIZES0 0x40
-#define RMON_RX_PACKET_SIZES1 0x41
-#define RMON_RX_PACKET_SIZES2 0x42
-#define RMON_RX_PACKET_SIZES3 0x43
-#define RMON_RX_PACKET_SIZES4 0x44
-#define RMON_RX_PACKET_SIZES5 0x45
-#define RMON_RX_PACKET_SIZES6 0x46
-#define RMON_RX_PACKET_SIZES7 0x47
-#define RMON_FRAME_CHECK_SEQUENCE_ERRORS 0x48
-#define RMON_ALIGNMENT_ERRORS 0x49
-#define RMON_FRAGMENTS 0x4A
-#define RMON_JABBERS 0x4B
-#define RMON_FRAMES_LOST_TO_INTMACRCVERR 0x4C
-#define RMON_UNICAST_FRAMES_RCVD_OK 0x4D
-#define RMON_MULTICAST_FRAMES_RCVD_OK 0x4E
-#define RMON_BROADCAST_FRAMES_RCVD_OK 0x4F
-#define RMON_IN_RANGE_LENGTH_ERRORS 0x50
-#define RMON_OUT_OF_RANGE_LENGTH_ERRORS 0x51
-#define RMON_VLAN_FRAMES_RCVD 0x52
-#define RMON_PAUSE_MAC_CTRL_FRAMES_RCVD 0x53
-#define RMON_MAC_CTRL_FRAMES_RCVD 0x54
-#define RMON_OCTETS_RCVD_OK 0x55
-#define RMON_OCTETS_RCVD_OK_HI 0x56
-#define RMON_OCTETS_RCVD_OTHER 0x57
-#define RMON_OCTETS_RCVD_OTHER_HI 0x58
-
-#endif /* __ETHER00_H */
diff --git a/include/asm-arm/arch-epxa10db/excalibur.h b/include/asm-arm/arch-epxa10db/excalibur.h
deleted file mode 100644
index 5c91dd6d782..00000000000
--- a/include/asm-arm/arch-epxa10db/excalibur.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* megafunction wizard: %ARM-Based Excalibur%
- GENERATION: STANDARD
- VERSION: WM1.0
- MODULE: ARM-Based Excalibur
- PROJECT: excalibur
- ============================================================
- File Name: v:\embedded\linux\bootldr\excalibur.h
- Megafunction Name(s): ARM-Based Excalibur
- ============================================================
-
- ************************************************************
- THIS IS A WIZARD-GENERATED FILE. DO NOT EDIT THIS FILE!
- ************************************************************/
-
-#ifndef EXCALIBUR_H_INCLUDED
-#define EXCALIBUR_H_INCLUDED
-
-#define EXC_DEFINE_PROCESSOR_LITTLE_ENDIAN
-#define EXC_DEFINE_BOOT_FROM_FLASH
-
-#define EXC_INPUT_CLK_FREQUENCY (50000000)
-#define EXC_AHB1_CLK_FREQUENCY (150000000)
-#define EXC_AHB2_CLK_FREQUENCY (75000000)
-#define EXC_SDRAM_CLK_FREQUENCY (75000000)
-
-/* Registers Block */
-#define EXC_REGISTERS_BASE (0x7fffc000)
-#define EXC_MODE_CTRL00_BASE (EXC_REGISTERS_BASE + 0x000)
-#define EXC_IO_CTRL00_BASE (EXC_REGISTERS_BASE + 0x040)
-#define EXC_MMAP00_BASE (EXC_REGISTERS_BASE + 0x080)
-#define EXC_PLD_CONFIG00_BASE (EXC_REGISTERS_BASE + 0x140)
-#define EXC_TIMER00_BASE (EXC_REGISTERS_BASE + 0x200)
-#define EXC_INT_CTRL00_BASE (EXC_REGISTERS_BASE + 0xc00)
-#define EXC_CLOCK_CTRL00_BASE (EXC_REGISTERS_BASE + 0x300)
-#define EXC_WATCHDOG00_BASE (EXC_REGISTERS_BASE + 0xa00)
-#define EXC_UART00_BASE (EXC_REGISTERS_BASE + 0x280)
-#define EXC_EBI00_BASE (EXC_REGISTERS_BASE + 0x380)
-#define EXC_SDRAM00_BASE (EXC_REGISTERS_BASE + 0x400)
-#define EXC_AHB12_BRIDGE_CTRL00_BASE (EXC_REGISTERS_BASE + 0x800)
-#define EXC_PLD_STRIPE_BRIDGE_CTRL00_BASE (EXC_REGISTERS_BASE + 0x100)
-#define EXC_STRIPE_PLD_BRIDGE_CTRL00_BASE (EXC_REGISTERS_BASE + 0x100)
-
-#define EXC_REGISTERS_SIZE (0x00004000)
-
-/* EBI Block(s) */
-#define EXC_EBI_BLOCK0_BASE (0x40000000)
-#define EXC_EBI_BLOCK0_SIZE (0x00400000)
-#define EXC_EBI_BLOCK0_WIDTH (8)
-#define EXC_EBI_BLOCK0_NON_CACHEABLE
-#define EXC_EBI_BLOCK1_BASE (0x40400000)
-#define EXC_EBI_BLOCK1_SIZE (0x00400000)
-#define EXC_EBI_BLOCK1_WIDTH (16)
-#define EXC_EBI_BLOCK1_NON_CACHEABLE
-#define EXC_EBI_BLOCK2_BASE (0x40800000)
-#define EXC_EBI_BLOCK2_SIZE (0x00400000)
-#define EXC_EBI_BLOCK2_WIDTH (16)
-#define EXC_EBI_BLOCK2_NON_CACHEABLE
-#define EXC_EBI_BLOCK3_BASE (0x40c00000)
-#define EXC_EBI_BLOCK3_SIZE (0x00400000)
-#define EXC_EBI_BLOCK3_WIDTH (16)
-#define EXC_EBI_BLOCK3_NON_CACHEABLE
-
-/* SDRAM Block(s) */
-#define EXC_SDRAM_BLOCK0_BASE (0x00000000)
-#define EXC_SDRAM_BLOCK0_SIZE (0x04000000)
-#define EXC_SDRAM_BLOCK0_WIDTH (32)
-#define EXC_SDRAM_BLOCK1_BASE (0x04000000)
-#define EXC_SDRAM_BLOCK1_SIZE (0x04000000)
-#define EXC_SDRAM_BLOCK1_WIDTH (32)
-
-/* Single Port SRAM Block(s) */
-#define EXC_SPSRAM_BLOCK0_BASE (0x08000000)
-#define EXC_SPSRAM_BLOCK0_SIZE (0x00020000)
-#define EXC_SPSRAM_BLOCK1_BASE (0x08020000)
-#define EXC_SPSRAM_BLOCK1_SIZE (0x00020000)
-
-/* PLD Block(s) */
-#define EXC_PLD_BLOCK0_BASE (0x80000000)
-#define EXC_PLD_BLOCK0_SIZE (0x00004000)
-#define EXC_PLD_BLOCK0_NON_CACHEABLE
-#define EXC_PLD_BLOCK1_BASE (0xf000000)
-#define EXC_PLD_BLOCK1_SIZE (0x00004000)
-#define EXC_PLD_BLOCK1_NON_CACHEABLE
-#define EXC_PLD_BLOCK2_BASE (0x80008000)
-#define EXC_PLD_BLOCK2_SIZE (0x00004000)
-#define EXC_PLD_BLOCK2_NON_CACHEABLE
-#define EXC_PLD_BLOCK3_BASE (0x8000c000)
-#define EXC_PLD_BLOCK3_SIZE (0x00004000)
-#define EXC_PLD_BLOCK3_NON_CACHEABLE
-
-#endif
diff --git a/include/asm-arm/arch-epxa10db/hardware.h b/include/asm-arm/arch-epxa10db/hardware.h
deleted file mode 100644
index b992c2924a7..00000000000
--- a/include/asm-arm/arch-epxa10db/hardware.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * linux/include/asm-arm/arch-epxa10/hardware.h
- *
- * This file contains the hardware definitions of the Integrator.
- *
- * Copyright (C) 1999 ARM Limited.
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#include <asm/arch/platform.h>
-
-/*
- * Where in virtual memory the IO devices (timers, system controllers
- * and so on)
- */
-#define IO_BASE 0xf0000000 // VA of IO
-#define IO_SIZE 0x10000000 // How much?
-#define IO_START EXC_REGISTERS_BASE // PA of IO
-/* macro to get at IO space when running virtually */
-#define IO_ADDRESS(x) ((x) | 0xf0000000)
-
-#define FLASH_VBASE 0xFE000000
-#define FLASH_SIZE 0x01000000
-#define FLASH_START EXC_EBI_BLOCK0_BASE
-#define FLASH_VADDR(x) ((x)|0xFE000000)
-/*
- * Similar to above, but for PCI addresses (memory, IO, Config and the
- * V3 chip itself). WARNING: this has to mirror definitions in platform.h
- */
-#if 0
-#define PCI_MEMORY_VADDR 0xe8000000
-#define PCI_CONFIG_VADDR 0xec000000
-#define PCI_V3_VADDR 0xed000000
-#define PCI_IO_VADDR 0xee000000
-
-#define PCIO_BASE PCI_IO_VADDR
-#define PCIMEM_BASE PCI_MEMORY_VADDR
-
-
-#define pcibios_assign_all_busses() 1
-
-#define PCIBIOS_MIN_IO 0x6000
-#define PCIBIOS_MIN_MEM 0x00100000
-#endif
-
-
-#endif
-
diff --git a/include/asm-arm/arch-epxa10db/int_ctrl00.h b/include/asm-arm/arch-epxa10db/int_ctrl00.h
deleted file mode 100644
index 23ec864c40b..00000000000
--- a/include/asm-arm/arch-epxa10db/int_ctrl00.h
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- *
- * This file contains the register definitions for the Excalibur
- * Timer TIMER00.
- *
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __INT_CTRL00_H
-#define __INT_CTRL00_H
-
-#define INT_MS(base_addr) (INT_CTRL00_TYPE (base_addr + 0x00 ))
-#define INT_MS_FC_MSK (0x10000)
-#define INT_MS_FC_OFST (16)
-#define INT_MS_M1_MSK (0x8000)
-#define INT_MS_M1_OFST (15)
-#define INT_MS_M0_MSK (0x4000)
-#define INT_MS_M0_OFST (14)
-#define INT_MS_AE_MSK (0x2000)
-#define INT_MS_AE_OFST (13)
-#define INT_MS_PE_MSK (0x1000)
-#define INT_MS_PE_OFST (12)
-#define INT_MS_EE_MSK (0x0800)
-#define INT_MS_EE_OFST (11)
-#define INT_MS_PS_MSK (0x0400)
-#define INT_MS_PS_OFST (10)
-#define INT_MS_T1_MSK (0x0200)
-#define INT_MS_T1_OFST (9)
-#define INT_MS_T0_MSK (0x0100)
-#define INT_MS_T0_OFST (8)
-#define INT_MS_UA_MSK (0x0080)
-#define INT_MS_UA_OFST (7)
-#define INT_MS_IP_MSK (0x0040)
-#define INT_MS_IP_OFST (6)
-#define INT_MS_P5_MSK (0x0020)
-#define INT_MS_P5_OFST (5)
-#define INT_MS_P4_MSK (0x0010)
-#define INT_MS_P4_OFST (4)
-#define INT_MS_P3_MSK (0x0008)
-#define INT_MS_P3_OFST (3)
-#define INT_MS_P2_MSK (0x0004)
-#define INT_MS_P2_OFST (2)
-#define INT_MS_P1_MSK (0x0002)
-#define INT_MS_P1_OFST (1)
-#define INT_MS_P0_MSK (0x0001)
-#define INT_MS_P0_OFST (0)
-
-#define INT_MC(base_addr) (INT_CTRL00_TYPE (base_addr + 0x04 ))
-#define INT_MC_FC_MSK (0x10000)
-#define INT_MC_FC_OFST (16)
-#define INT_MC_M1_MSK (0x8000)
-#define INT_MC_M1_OFST (15)
-#define INT_MC_M0_MSK (0x4000)
-#define INT_MC_M0_OFST (14)
-#define INT_MC_AE_MSK (0x2000)
-#define INT_MC_AE_OFST (13)
-#define INT_MC_PE_MSK (0x1000)
-#define INT_MC_PE_OFST (12)
-#define INT_MC_EE_MSK (0x0800)
-#define INT_MC_EE_OFST (11)
-#define INT_MC_PS_MSK (0x0400)
-#define INT_MC_PS_OFST (10)
-#define INT_MC_T1_MSK (0x0200)
-#define INT_MC_T1_OFST (9)
-#define INT_MC_T0_MSK (0x0100)
-#define INT_MC_T0_OFST (8)
-#define INT_MC_UA_MSK (0x0080)
-#define INT_MC_UA_OFST (7)
-#define INT_MC_IP_MSK (0x0040)
-#define INT_MC_IP_OFST (6)
-#define INT_MC_P5_MSK (0x0020)
-#define INT_MC_P5_OFST (5)
-#define INT_MC_P4_MSK (0x0010)
-#define INT_MC_P4_OFST (4)
-#define INT_MC_P3_MSK (0x0008)
-#define INT_MC_P3_OFST (3)
-#define INT_MC_P2_MSK (0x0004)
-#define INT_MC_P2_OFST (2)
-#define INT_MC_P1_MSK (0x0002)
-#define INT_MC_P1_OFST (1)
-#define INT_MC_P0_MSK (0x0001)
-#define INT_MC_P0_OFST (0)
-
-#define INT_SS(base_addr) (INT_CTRL00_TYPE (base_addr + 0x08 ))
-#define INT_SS_FC_SSK (0x8000)
-#define INT_SS_FC_OFST (15)
-#define INT_SS_M1_SSK (0x8000)
-#define INT_SS_M1_OFST (15)
-#define INT_SS_M0_SSK (0x4000)
-#define INT_SS_M0_OFST (14)
-#define INT_SS_AE_SSK (0x2000)
-#define INT_SS_AE_OFST (13)
-#define INT_SS_PE_SSK (0x1000)
-#define INT_SS_PE_OFST (12)
-#define INT_SS_EE_SSK (0x0800)
-#define INT_SS_EE_OFST (11)
-#define INT_SS_PS_SSK (0x0400)
-#define INT_SS_PS_OFST (10)
-#define INT_SS_T1_SSK (0x0200)
-#define INT_SS_T1_OFST (9)
-#define INT_SS_T0_SSK (0x0100)
-#define INT_SS_T0_OFST (8)
-#define INT_SS_UA_SSK (0x0080)
-#define INT_SS_UA_OFST (7)
-#define INT_SS_IP_SSK (0x0040)
-#define INT_SS_IP_OFST (6)
-#define INT_SS_P5_SSK (0x0020)
-#define INT_SS_P5_OFST (5)
-#define INT_SS_P4_SSK (0x0010)
-#define INT_SS_P4_OFST (4)
-#define INT_SS_P3_SSK (0x0008)
-#define INT_SS_P3_OFST (3)
-#define INT_SS_P2_SSK (0x0004)
-#define INT_SS_P2_OFST (2)
-#define INT_SS_P1_SSK (0x0002)
-#define INT_SS_P1_OFST (1)
-#define INT_SS_P0_SSK (0x0001)
-#define INT_SS_P0_OFST (0)
-
-#define INT_RS(base_addr) (INT_CTRL00_TYPE (base_addr + 0x0C ))
-#define INT_RS_FC_RSK (0x10000)
-#define INT_RS_FC_OFST (16)
-#define INT_RS_M1_RSK (0x8000)
-#define INT_RS_M1_OFST (15)
-#define INT_RS_M0_RSK (0x4000)
-#define INT_RS_M0_OFST (14)
-#define INT_RS_AE_RSK (0x2000)
-#define INT_RS_AE_OFST (13)
-#define INT_RS_PE_RSK (0x1000)
-#define INT_RS_PE_OFST (12)
-#define INT_RS_EE_RSK (0x0800)
-#define INT_RS_EE_OFST (11)
-#define INT_RS_PS_RSK (0x0400)
-#define INT_RS_PS_OFST (10)
-#define INT_RS_T1_RSK (0x0200)
-#define INT_RS_T1_OFST (9)
-#define INT_RS_T0_RSK (0x0100)
-#define INT_RS_T0_OFST (8)
-#define INT_RS_UA_RSK (0x0080)
-#define INT_RS_UA_OFST (7)
-#define INT_RS_IP_RSK (0x0040)
-#define INT_RS_IP_OFST (6)
-#define INT_RS_P5_RSK (0x0020)
-#define INT_RS_P5_OFST (5)
-#define INT_RS_P4_RSK (0x0010)
-#define INT_RS_P4_OFST (4)
-#define INT_RS_P3_RSK (0x0008)
-#define INT_RS_P3_OFST (3)
-#define INT_RS_P2_RSK (0x0004)
-#define INT_RS_P2_OFST (2)
-#define INT_RS_P1_RSK (0x0002)
-#define INT_RS_P1_OFST (1)
-#define INT_RS_P0_RSK (0x0001)
-#define INT_RS_P0_OFST (0)
-
-#define INT_ID(base_addr) (INT_CTRL00_TYPE (base_addr + 0x10 ))
-#define INT_ID_ID_MSK (0x3F)
-#define INT_ID_ID_OFST (0)
-
-#define INT_PLD_PRIORITY(base_addr) (INT_CTRL00_TYPE (base_addr + 0x14 ))
-#define INT_PLD_PRIORITY_PRI_MSK (0x3F)
-#define INT_PLD_PRIORITY_PRI_OFST (0)
-#define INT_PLD_PRIORITY_GA_MSK (0x40)
-#define INT_PLD_PRIORITY_GA_OFST (6)
-
-#define INT_MODE(base_addr) (INT_CTRL00_TYPE (base_addr + 0x18 ))
-#define INT_MODE_MODE_MSK (0x3)
-#define INT_MODE_MODE_OFST (0)
-
-#define INT_PRIORITY_P0(base_addr) (INT_CTRL00_TYPE (base_addr + 0x80 ))
-#define INT_PRIORITY_P0_PRI_MSK (0x3F)
-#define INT_PRIORITY_P0_PRI_OFST (0)
-#define INT_PRIORITY_P0_FQ_MSK (0x40)
-#define INT_PRIORITY_P0_FQ_OFST (6)
-
-#define INT_PRIORITY_P1(base_addr) (INT_CTRL00_TYPE (base_addr + 0x84 ))
-#define INT_PRIORITY_P1_PRI_MSK (0x3F)
-#define INT_PRIORITY_P1_PRI_OFST (0)
-#define INT_PRIORITY_P1_FQ_MSK (0x40)
-#define INT_PRIORITY_P1_FQ_OFST (6)
-
-#define INT_PRIORITY_P2(base_addr) (INT_CTRL00_TYPE (base_addr + 0x88 ))
-#define INT_PRIORITY_P2_PRI_MSK (0x3F)
-#define INT_PRIORITY_P2_PRI_OFST (0)
-#define INT_PRIORITY_P2_FQ_MSK (0x40)
-#define INT_PRIORITY_P2_FQ_OFST (6)
-
-#define INT_PRIORITY_P3(base_addr) (INT_CTRL00_TYPE (base_addr + 0x8C ))
-#define INT_PRIORITY_P3_PRI_MSK (0x3F)
-#define INT_PRIORITY_P3_PRI_OFST (0)
-#define INT_PRIORITY_P3_FQ_MSK (0x40)
-#define INT_PRIORITY_P3_FQ_OFST (6)
-
-#define INT_PRIORITY_P4(base_addr) (INT_CTRL00_TYPE (base_addr + 0x90 ))
-#define INT_PRIORITY_P4_PRI_MSK (0x3F)
-#define INT_PRIORITY_P4_PRI_OFST (0)
-#define INT_PRIORITY_P4_FQ_MSK (0x40)
-#define INT_PRIORITY_P4_FQ_OFST (6)
-
-#define INT_PRIORITY_P5(base_addr) (INT_CTRL00_TYPE (base_addr + 0x94 ))
-#define INT_PRIORITY_P5_PRI_MSK (0x3F)
-#define INT_PRIORITY_P5_PRI_OFST (0)
-#define INT_PRIORITY_P5_FQ_MSK (0x40)
-#define INT_PRIORITY_P5_FQ_OFST (6)
-
-#define INT_PRIORITY_IP(base_addr) (INT_CTRL00_TYPE (base_addr + 0x94 ))
-#define INT_PRIORITY_IP_PRI_MSK (0x3F)
-#define INT_PRIORITY_IP_PRI_OFST (0)
-#define INT_PRIORITY_IP_FQ_MSK (0x40)
-#define INT_PRIORITY_IP_FQ_OFST (6)
-
-#define INT_PRIORITY_UA(base_addr) (INT_CTRL00_TYPE (base_addr + 0x9C ))
-#define INT_PRIORITY_UA_PRI_MSK (0x3F)
-#define INT_PRIORITY_UA_PRI_OFST (0)
-#define INT_PRIORITY_UA_FQ_MSK (0x40)
-#define INT_PRIORITY_UA_FQ_OFST (6)
-
-#define INT_PRIORITY_T0(base_addr) (INT_CTRL00_TYPE (base_addr + 0xA0 ))
-#define INT_PRIORITY_T0_PRI_MSK (0x3F)
-#define INT_PRIORITY_T0_PRI_OFST (0)
-#define INT_PRIORITY_T0_FQ_MSK (0x40)
-#define INT_PRIORITY_T0_FQ_OFST (6)
-
-#define INT_PRIORITY_T1(base_addr) (INT_CTRL00_TYPE (base_addr + 0xA4 ))
-#define INT_PRIORITY_T1_PRI_MSK (0x3F)
-#define INT_PRIORITY_T1_PRI_OFST (0)
-#define INT_PRIORITY_T1_FQ_MSK (0x40)
-#define INT_PRIORITY_T1_FQ_OFST (6)
-
-#define INT_PRIORITY_PS(base_addr) (INT_CTRL00_TYPE (base_addr + 0xA8 ))
-#define INT_PRIORITY_PS_PRI_MSK (0x3F)
-#define INT_PRIORITY_PS_PRI_OFST (0)
-#define INT_PRIORITY_PS_FQ_MSK (0x40)
-#define INT_PRIORITY_PS_FQ_OFST (6)
-
-#define INT_PRIORITY_EE(base_addr) (INT_CTRL00_TYPE (base_addr + 0xAC ))
-#define INT_PRIORITY_EE_PRI_MSK (0x3F)
-#define INT_PRIORITY_EE_PRI_OFST (0)
-#define INT_PRIORITY_EE_FQ_MSK (0x40)
-#define INT_PRIORITY_EE_FQ_OFST (6)
-
-#define INT_PRIORITY_PE(base_addr) (INT_CTRL00_TYPE (base_addr + 0xB0 ))
-#define INT_PRIORITY_PE_PRI_MSK (0x3F)
-#define INT_PRIORITY_PE_PRI_OFST (0)
-#define INT_PRIORITY_PE_FQ_MSK (0x40)
-#define INT_PRIORITY_PE_FQ_OFST (6)
-
-#define INT_PRIORITY_AE(base_addr) (INT_CTRL00_TYPE (base_addr + 0xB4 ))
-#define INT_PRIORITY_AE_PRI_MSK (0x3F)
-#define INT_PRIORITY_AE_PRI_OFST (0)
-#define INT_PRIORITY_AE_FQ_MSK (0x40)
-#define INT_PRIORITY_AE_FQ_OFST (6)
-
-#define INT_PRIORITY_M0(base_addr) (INT_CTRL00_TYPE (base_addr + 0xB8 ))
-#define INT_PRIORITY_M0_PRI_MSK (0x3F)
-#define INT_PRIORITY_M0_PRI_OFST (0)
-#define INT_PRIORITY_M0_FQ_MSK (0x40)
-#define INT_PRIORITY_M0_FQ_OFST (6)
-
-#define INT_PRIORITY_M1(base_addr) (INT_CTRL00_TYPE (base_addr + 0xBC ))
-#define INT_PRIORITY_M1_PRI_MSK (0x3F)
-#define INT_PRIORITY_M1_PRI_OFST (0)
-#define INT_PRIORITY_M1_FQ_MSK (0x40)
-#define INT_PRIORITY_M1_FQ_OFST (6)
-
-#define INT_PRIORITY_FC(base_addr) (INT_CTRL00_TYPE (base_addr + 0xC0 ))
-#define INT_PRIORITY_FC_PRI_MSK (0x3F)
-#define INT_PRIORITY_FC_PRI_OFST (0)
-#define INT_PRIORITY_FC_FQ_MSK (0x40)
-#define INT_PRIORITY_FC_FQ_OFST (6)
-
-#endif /* __INT_CTRL00_H */
-
-
diff --git a/include/asm-arm/arch-epxa10db/io.h b/include/asm-arm/arch-epxa10db/io.h
deleted file mode 100644
index 9fe100c9d6b..00000000000
--- a/include/asm-arm/arch-epxa10db/io.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * linux/include/asm-arm/arch-epxa10db/io.h
- *
- * Copyright (C) 1999 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_ARM_ARCH_IO_H
-#define __ASM_ARM_ARCH_IO_H
-
-#include <asm/hardware.h>
-
-#define IO_SPACE_LIMIT 0xffff
-
-
-/*
- * Generic virtual read/write
- */
-/*#define outsw __arch_writesw
-#define outsl __arch_writesl
-#define outsb __arch_writesb
-#define insb __arch_readsb
-#define insw __arch_readsw
-#define insl __arch_readsl*/
-
-#define __io(a) ((void __iomem *)(a))
-#define __mem_pci(a) (a)
-
-#endif
diff --git a/include/asm-arm/arch-epxa10db/irqs.h b/include/asm-arm/arch-epxa10db/irqs.h
deleted file mode 100644
index c3758a3b5d9..00000000000
--- a/include/asm-arm/arch-epxa10db/irqs.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * linux/include/asm-arm/arch-camelot/irqs.h
- *
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/* Use the Excalibur chip definitions */
-#define INT_CTRL00_TYPE
-#include "asm/arch/int_ctrl00.h"
-
-
-#define IRQ_PLD0 INT_MS_P0_OFST
-#define IRQ_PLD1 INT_MS_P1_OFST
-#define IRQ_PLD2 INT_MS_P2_OFST
-#define IRQ_PLD3 INT_MS_P3_OFST
-#define IRQ_PLD4 INT_MS_P4_OFST
-#define IRQ_PLD5 INT_MS_P5_OFST
-#define IRQ_EXT INT_MS_IP_OFST
-#define IRQ_UART INT_MS_UA_OFST
-#define IRQ_TIMER0 INT_MS_T0_OFST
-#define IRQ_TIMER1 INT_MS_T1_OFST
-#define IRQ_PLL INT_MS_PLL_OFST
-#define IRQ_EBI INT_MS_EBI_OFST
-#define IRQ_STRIPE_BRIDGE INT_MS_PLL_OFST
-#define IRQ_AHB_BRIDGE INT_MS_PLL_OFST
-#define IRQ_COMMRX INT_MS_CR_OFST
-#define IRQ_COMMTX INT_MS_CT_OFST
-#define IRQ_FAST_COMM INT_MS_FC_OFST
-
-#define NR_IRQS (INT_MS_FC_OFST + 1)
-
diff --git a/include/asm-arm/arch-epxa10db/mode_ctrl00.h b/include/asm-arm/arch-epxa10db/mode_ctrl00.h
deleted file mode 100644
index d8a7efa12e1..00000000000
--- a/include/asm-arm/arch-epxa10db/mode_ctrl00.h
+++ /dev/null
@@ -1,80 +0,0 @@
-#ifndef __MODE_CTRL00_H
-#define __MODE_CTRL00_H
-
-/*
- * Register definitions for the reset and mode control
- */
-
-/*
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-
-#define BOOT_CR(BASE_ADDR) (MODE_CTRL00_TYPE (BASE_ADDR ))
-#define BOOT_CR_BF_MSK (0x1)
-#define BOOT_CR_BF_OFST (0)
-#define BOOT_CR_HM_MSK (0x2)
-#define BOOT_CR_HM_OFST (1)
-#define BOOT_CR_RE_MSK (0x4)
-#define BOOT_CR_RE_OFST (2)
-
-#define RESET_SR(BASE_ADDR) (MODE_CTRL00_TYPE (BASE_ADDR + 0x4 ))
-#define RESET_SR_WR_MSK (0x1)
-#define RESET_SR_WR_OFST (0)
-#define RESET_SR_CR_MSK (0x2)
-#define RESET_SR_CR_OFST (1)
-#define RESET_SR_JT_MSK (0x4)
-#define RESET_SR_JT_OFST (2)
-#define RESET_SR_ER_MSK (0x8)
-#define RESET_SR_ER_OFST (3)
-
-#define ID_CODE(BASE_ADDR) (MODE_CTRL00_TYPE (BASE_ADDR + 0x08 ))
-
-#define SRAM0_SR(BASE_ADDR) (MODE_CTRL00_TYPE (BASE_ADDR + 0x20 ))
-#define SRAM0_SR_SIZE_MSK (0xFFFFF000)
-#define SRAM0_SR_SIZE_OFST (12)
-
-#define SRAM1_SR(BASE_ADDR) (MODE_CTRL00_TYPE (BASE_ADDR + 0x24 ))
-#define SRAM1_SR_SIZE_MSK (0xFFFFF000)
-#define SRAM1_SR_SIZE_OFST (12)
-
-#define DPSRAM0_SR(BASE_ADDR) (MODE_CTRL00_TYPE (BASE_ADDR + 0x30 ))
-
-#define DPSRAM0_SR_MODE_MSK (0xF)
-#define DPSRAM0_SR_MODE_OFST (0)
-#define DPSRAM0_SR_GLBL_MSK (0x30)
-#define DPSRAM0_SR_SIZE_MSK (0xFFFFF000)
-#define DPSRAM0_SR_SIZE_OFST (12)
-
-#define DPSRAM0_LCR(BASE_ADDR) (MODE_CTRL00_TYPE (BASE_ADDR + 0x34 ))
-#define DPSRAM0_LCR_LCKADDR_MSK (0x1FFE0)
-#define DPSRAM0_LCR_LCKADDR_OFST (4)
-
-#define DPSRAM1_SR(BASE_ADDR) (MODE_CTRL00_TYPE (BASE_ADDR + 0x38 ))
-#define DPSRAM1_SR_MODE_MSK (0xF)
-#define DPSRAM1_SR_MODE_OFST (0)
-#define DPSRAM1_SR_GLBL_MSK (0x30)
-#define DPSRAM1_SR_GLBL_OFST (4)
-#define DPSRAM1_SR_SIZE_MSK (0xFFFFF000)
-#define DPSRAM1_SR_SIZE_OFST (12)
-
-#define DPSRAM1_LCR(BASE_ADDR) (MODE_CTRL00_TYPE (BASE_ADDR + 0x3C ))
-#define DPSRAM1_LCR_LCKADDR_MSK (0x1FFE0)
-#define DPSRAM1_LCR_LCKADDR_OFST (4)
-
-#endif /* __MODE_CTRL00_H */
diff --git a/include/asm-arm/arch-epxa10db/platform.h b/include/asm-arm/arch-epxa10db/platform.h
deleted file mode 100644
index 129bb0f212a..00000000000
--- a/include/asm-arm/arch-epxa10db/platform.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef PLATFORM_H
-#define PLATFORM_H
-#include "excalibur.h"
-
-#define MAXIRQNUM 15
-#endif
-
diff --git a/include/asm-arm/arch-epxa10db/pld_conf00.h b/include/asm-arm/arch-epxa10db/pld_conf00.h
deleted file mode 100644
index 7af2c38dacc..00000000000
--- a/include/asm-arm/arch-epxa10db/pld_conf00.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef __PLD_CONF00_H
-#define __PLD_CONF00_H
-
-/*
- * Register definitions for the PLD Configuration Logic
- */
-
-/*
- *
- * This file contains the register definitions for the Excalibur
- * Interrupt controller INT_CTRL00.
- *
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#define CONFIG_CONTROL(BASE_ADDR) (PLD_CONF00_TYPE (BASE_ADDR))
-#define CONFIG_CONTROL_LK_MSK (0x1)
-#define CONFIG_CONTROL_LK_OFST (0)
-#define CONFIG_CONTROL_CO_MSK (0x2)
-#define CONFIG_CONTROL_CO_OFST (1)
-#define CONFIG_CONTROL_B_MSK (0x4)
-#define CONFIG_CONTROL_B_OFST (2)
-#define CONFIG_CONTROL_PC_MSK (0x8)
-#define CONFIG_CONTROL_PC_OFST (3)
-#define CONFIG_CONTROL_E_MSK (0x10)
-#define CONFIG_CONTROL_E_OFST (4)
-#define CONFIG_CONTROL_ES_MSK (0xE0)
-#define CONFIG_CONTROL_ES_OFST (5)
-#define CONFIG_CONTROL_ES_0_MSK (0x20)
-#define CONFIG_CONTROL_ES_1_MSK (0x40)
-#define CONFIG_CONTROL_ES_2_MSK (0x80)
-
-#define CONFIG_CONTROL_CLOCK(BASE_ADDR) (PLD_CONF00_TYPE (BASE_ADDR + 0x4 ))
-#define CONFIG_CONTROL_CLOCK_RATIO_MSK (0xFFFF)
-#define CONFIG_CONTROL_CLOCK_RATIO_OFST (0)
-
-#define CONFIG_CONTROL_DATA(BASE_ADDR) (PLD_CONF00_TYPE (BASE_ADDR + 0x8 ))
-#define CONFIG_CONTROL_DATA_MSK (0xFFFFFFFF)
-#define CONFIG_CONTROL_DATA_OFST (0)
-
-#define CONFIG_UNLOCK(BASE_ADDR) (PLD_CONF00_TYPE (BASE_ADDR + 0xC ))
-#define CONFIG_UNLOCK_MSK (0xFFFFFFFF)
-#define CONFIG_UNLOCK_OFST (0)
-
-#define CONFIG_UNLOCK_MAGIC (0x554E4C4B)
-
-#endif /* __PLD_CONF00_H */
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/include/asm-arm/arch-epxa10db/tdkphy.h b/include/asm-arm/arch-epxa10db/tdkphy.h
deleted file mode 100644
index 5e107bd4e10..00000000000
--- a/include/asm-arm/arch-epxa10db/tdkphy.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * linux/drivers/tdkphy.h
- *
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __TDKPHY_H
-#define __TDKPHY_H
-
-/*
- * Register definitions for the TDK 78Q2120 PHY
- * which is on the Camelot board
- */
-
-/*
- * Copyright (c) Altera Corporation 2000.
- * All rights reserved.
- */
-#define PHY_CONTROL (0)
-#define PHY_CONTROL_COLT_MSK (0x80)
-#define PHY_CONTROL_COLT_OFST (7)
-#define PHY_CONTROL_DUPLEX_MSK (0x100)
-#define PHY_CONTROL_DUPLEX_OFST (8)
-#define PHY_CONTROL_RANEG_MSK (0x200)
-#define PHY_CONTROL_RANEG_OFST (9)
-#define PHY_CONTROL_ISO_MSK (0x400)
-#define PHY_CONTROL_ISO_OFST (10)
-#define PHY_CONTROL_PWRDN_MSK (0x800)
-#define PHY_CONTROL_PWRDN_OFST (11)
-#define PHY_CONTROL_ANEGEN_MSK (0x1000)
-#define PHY_CONTROL_ANEGEN_OFST (12)
-#define PHY_CONTROL_SPEEDSL_MSK (0x2000)
-#define PHY_CONTROL_SPEEDSL_OFST (13)
-#define PHY_CONTROL_LOOPBK_MSK (0x4000)
-#define PHY_CONTROL_LOOPBK_OFST (14)
-#define PHY_CONTROL_RESET_MSK (0x8000)
-#define PHY_CONTROL_RESET_OFST (15)
-
-#define PHY_STATUS (1)
-#define PHY_STATUS_ETXD_MSK (0x1)
-#define PHY_STATUS_EXTD_OFST (0)
-#define PHY_STATUS_JAB_MSK (0x2)
-#define PHY_STATUS_JAB_OFST (1)
-#define PHY_STATUS_LINK_MSK (0x4)
-#define PHY_STATUS_LINK_OFST (2)
-#define PHY_STATUS_ANEGA_MSK (0x8)
-#define PHY_STATUS_ANEGA_OFST (3)
-#define PHY_STATUS_RFAULT_MSK (0x10)
-#define PHY_STATUS_RFAULT_OFST (4)
-#define PHY_STATUS_ANEGC_MSK (0x20)
-#define PHY_STATUS_ANEGC_OFST (5)
-#define PHY_STATUS_10T_H_MSK (0x800)
-#define PHY_STATUS_10T_H_OFST (11)
-#define PHY_STATUS_10T_F_MSK (0x1000)
-#define PHY_STATUS_10T_F_OFST (12)
-#define PHY_STATUS_100_X_H_MSK (0x2000)
-#define PHY_STATUS_100_X_H_OFST (13)
-#define PHY_STATUS_100_X_F_MSK (0x4000)
-#define PHY_STATUS_100_X_F_OFST (14)
-#define PHY_STATUS_100T4_MSK (0x8000)
-#define PHY_STATUS_100T4_OFST (15)
-
-#define PHY_ID1 (2)
-#define PHY_ID1_OUI_MSK (0xFFFF)
-#define PHY_ID1_OUI_OFST (0)
-
-#define PHY_ID2 (3)
-#define PHY_ID2_RN_MSK (0xF)
-#define PHY_ID2_RN_OFST (0)
-#define PHY_ID2_MN_MSK (0x3F0)
-#define PHY_ID2_MN_OFST (4)
-#define PHY_ID2_OUI_MSK (0xFC00)
-#define PHY_ID2_OUI_OFST (10)
-
-#define PHY_AUTO_NEG_ADVERTISEMENT (4)
-#define PHY_AUTO_NEG_ADVERTISEMENT_SELECTOR_MSK (0x1F)
-#define PHY_AUTO_NEG_ADVERTISEMENT_SELECTOR_OFST (0)
-#define PHY_AUTO_NEG_ADVERTISEMENT_A0_MSK (0x20)
-#define PHY_AUTO_NEG_ADVERTISEMENT_A0_OFST (5)
-#define PHY_AUTO_NEG_ADVERTISEMENT_A1_MSK (0x40)
-#define PHY_AUTO_NEG_ADVERTISEMENT_A1_OFST (6)
-#define PHY_AUTO_NEG_ADVERTISEMENT_A2_MSK (0x80)
-#define PHY_AUTO_NEG_ADVERTISEMENT_A2_OFST (7)
-#define PHY_AUTO_NEG_ADVERTISEMENT_A3_MSK (0x100)
-#define PHY_AUTO_NEG_ADVERTISEMENT_A3_OFST (8)
-#define PHY_AUTO_NEG_ADVERTISEMENT_A4_MSK (0x200)
-#define PHY_AUTO_NEG_ADVERTISEMENT_A4_OFST (9)
-#define PHY_AUTO_NEG_ADVERTISEMENT_TAF_MSK (0x1FE0)
-#define PHY_AUTO_NEG_ADVERTISEMENT_TAF_OFST (5)
-#define PHY_AUTO_NEG_ADVERTISEMENT_RF_MSK (0x2000)
-#define PHY_AUTO_NEG_ADVERTISEMENT_RF_OFST (13)
-#define PHY_AUTO_NEG_ADVERTISEMENT_RSVD_MSK (0x4000)
-#define PHY_AUTO_NEG_ADVERTISEMENT_RVSD_OFST (14)
-#define PHY_AUTO_NEG_ADVERTISEMENT_NP_MSK (0x8000)
-#define PHY_AUTO_NEG_ADVERTISEMENT_NP_OFST (15)
-
-#define PHY_AUTO_NEG_LINK_PARTNER (5)
-#define PHY_AUTO_NEG_LINK_PARTNER_S4_MSK (0x1F)
-#define PHY_AUTO_NEG_LINK_PARTNER_S4_OFST (0)
-#define PHY_AUTO_NEG_LINK_PARTNER_A7_MSK (0x1FE0)
-#define PHY_AUTO_NEG_LINK_PARTNER_A7_OFST (5)
-#define PHY_AUTO_NEG_LINK_PARTNER_RF_MSK (0x2000)
-#define PHY_AUTO_NEG_LINK_PARTNER_RF_OFST (13)
-#define PHY_AUTO_NEG_LINK_PARTNER_ACK_MSK (0x4000)
-#define PHY_AUTO_NEG_LINK_PARTNER_ACK_OFST (14)
-#define PHY_AUTO_NEG_LINK_PARTNER_NP_MSK (0x8000)
-#define PHY_AUTO_NEG_LINK_PARTNER_NP_OFST (15)
-
-#define PHY_AUTO_NEG_EXPANSION (6)
-#define PHY_AUTO_NEG_EXPANSION_LPANEGA_MSK (0x1)
-#define PHY_AUTO_NEG_EXPANSION_LPANEGA_OFST (0)
-#define PHY_AUTO_NEG_EXPANSION_PRX_MSK (0x2)
-#define PHY_AUTO_NEG_EXPANSION_PRX_OFST (1)
-#define PHY_AUTO_NEG_EXPANSION_NPA_MSK (0x4)
-#define PHY_AUTO_NEG_EXPANSION_NPA_OFST (2)
-#define PHY_AUTO_NEG_EXPANSION_LPNPA_MSK (0x8)
-#define PHY_AUTO_NEG_EXPANSION_LPNPA_OFST (3)
-#define PHY_AUTO_NEG_EXPANSION_PDF_MSK (0x10)
-#define PHY_AUTO_NEG_EXPANSION_PDF_OFST (4)
-
-#define PHY_VENDOR_SPECIFIC (16)
-#define PHY_VENDOR_SPECIFIC_RXCC_MSK (0x1)
-#define PHY_VENDOR_SPECIFIC_RXCC_OFST (0)
-#define PHY_VENDOR_SPECIFIC_PCSBP_MSK (0x2)
-#define PHY_VENDOR_SPECIFIC_PCSBP_OFST (1)
-#define PHY_VENDOR_SPECIFIC_RVSPOL_MSK (0x10)
-#define PHY_VENDOR_SPECIFIC_RVSPOL_OFST (4)
-#define PHY_VENDOR_SPECIFIC_APOL_MSK (0x20)
-#define PHY_VENDOR_SPECIFIC_APOL_OFST (5)
-#define PHY_VENDOR_SPECIFIC_GPIO0_DIR_MSK (0x40)
-#define PHY_VENDOR_SPECIFIC_GPIO0_DIR_OFST (6)
-#define PHY_VENDOR_SPECIFIC_GPIO0_DAT_MSK (0x80)
-#define PHY_VENDOR_SPECIFIC_GPIO0_DAT_OFST (7)
-#define PHY_VENDOR_SPECIFIC_GPIO1_DIR_MSK (0x100)
-#define PHY_VENDOR_SPECIFIC_GPIO1_DIR_OFST (8)
-#define PHY_VENDOR_SPECIFIC_GPIO1_DAT_MSK (0x200)
-#define PHY_VENDOR_SPECIFIC_GPIO1_DAT_OFST (9)
-#define PHY_VENDOR_SPECIFIC_10BT_NATURAL_LOOPBACK_DAT_MSK (0x400)
-#define PHY_VENDOR_SPECIFIC_10BT_NATURAL_LOOPBACK_DAT_OFST (10)
-#define PHY_VENDOR_SPECIFIC_10BT_SQE_TEST_INHIBIT_MSK (0x800)
-#define PHY_VENDOR_SPECIFIC_10BT_SQE_TEST_INHIBIT_OFST (11)
-#define PHY_VENDOR_SPECIFIC_TXHIM_MSK (0x1000)
-#define PHY_VENDOR_SPECIFIC_TXHIM_OFST (12)
-#define PHY_VENDOR_SPECIFIC_INT_LEVEL_MSK (0x4000)
-#define PHY_VENDOR_SPECIFIC_INT_LEVEL_OFST (14)
-#define PHY_VENDOR_SPECIFIC_RPTR_MSK (0x8000)
-#define PHY_VENDOR_SPECIFIC_RPTR_OFST (15)
-
-#define PHY_IRQ_CONTROL (17)
-#define PHY_IRQ_CONTROL_ANEG_COMP_INT_MSK (0x1)
-#define PHY_IRQ_CONTROL_ANEG_COMP_INT_OFST (0)
-#define PHY_IRQ_CONTROL_RFAULT_INT_MSK (0x2)
-#define PHY_IRQ_CONTROL_RFAULT_INT_OFST (1)
-#define PHY_IRQ_CONTROL_LS_CHG_INT_MSK (0x4)
-#define PHY_IRQ_CONTROL_LS_CHG_INT_OFST (2)
-#define PHY_IRQ_CONTROL_LP_ACK_INT_MSK (0x8)
-#define PHY_IRQ_CONTROL_LP_ACK_INT_OFST (3)
-#define PHY_IRQ_CONTROL_PDF_INT_MSK (0x10)
-#define PHY_IRQ_CONTROL_PDF_INT_OFST (4)
-#define PHY_IRQ_CONTROL_PRX_INT_MSK (0x20)
-#define PHY_IRQ_CONTROL_PRX_INT_OFST (5)
-#define PHY_IRQ_CONTROL_RXER_INT_MSK (0x40)
-#define PHY_IRQ_CONTROL_RXER_INT_OFST (6)
-#define PHY_IRQ_CONTROL_JABBER_INT_MSK (0x80)
-#define PHY_IRQ_CONTROL_JABBER_INT_OFST (7)
-#define PHY_IRQ_CONTROL_ANEG_COMP_IE_MSK (0x100)
-#define PHY_IRQ_CONTROL_ANEG_COMP_IE_OFST (8)
-#define PHY_IRQ_CONTROL_RFAULT_IE_MSK (0x200)
-#define PHY_IRQ_CONTROL_RFAULT_IE_OFST (9)
-#define PHY_IRQ_CONTROL_LS_CHG_IE_MSK (0x400)
-#define PHY_IRQ_CONTROL_LS_CHG_IE_OFST (10)
-#define PHY_IRQ_CONTROL_LP_ACK_IE_MSK (0x800)
-#define PHY_IRQ_CONTROL_LP_ACK_IE_OFST (11)
-#define PHY_IRQ_CONTROL_PDF_IE_MSK (0x1000)
-#define PHY_IRQ_CONTROL_PDF_IE_OFST (12)
-#define PHY_IRQ_CONTROL_PRX_IE_MSK (0x2000)
-#define PHY_IRQ_CONTROL_PRX_IE_OFST (13)
-#define PHY_IRQ_CONTROL_RXER_IE_MSK (0x4000)
-#define PHY_IRQ_CONTROL_RXER_IE_OFST (14)
-#define PHY_IRQ_CONTROL_JABBER_IE_MSK (0x8000)
-#define PHY_IRQ_CONTROL_JABBER_IE_OFST (15)
-
-#define PHY_DIAGNOSTIC (18)
-#define PHY_DIAGNOSTIC_RX_LOCK_MSK (0x100)
-#define PHY_DIAGNOSTIC_RX_LOCK_OFST (8)
-#define PHY_DIAGNOSTIC_RX_PASS_MSK (0x200)
-#define PHY_DIAGNOSTIC_RX_PASS_OFST (9)
-#define PHY_DIAGNOSTIC_RATE_MSK (0x400)
-#define PHY_DIAGNOSTIC_RATE_OFST (10)
-#define PHY_DIAGNOSTIC_DPLX_MSK (0x800)
-#define PHY_DIAGNOSTIC_DPLX_OFST (11)
-#define PHY_DIAGNOSTIC_ANEGF_MSK (0x1000)
-#define PHY_DIAGNOSTIC_ANEGF_OFST (12)
-
-#endif /* __TDKPHY_H */
diff --git a/include/asm-arm/arch-epxa10db/timer00.h b/include/asm-arm/arch-epxa10db/timer00.h
deleted file mode 100644
index 52a3fb58b59..00000000000
--- a/include/asm-arm/arch-epxa10db/timer00.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- *
- * This file contains the register definitions for the Excalibur
- * Timer TIMER00.
- *
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __TIMER00_H
-#define __TIMER00_H
-
-/*
- * Register definitions for the timers
- */
-
-
-#define TIMER0_CR(BASE_ADDR) (TIMER00_TYPE (BASE_ADDR + 0x00 ))
-#define TIMER0_CR_B_MSK (0x20)
-#define TIMER0_CR_B_OFST (0x5)
-#define TIMER0_CR_S_MSK (0x10)
-#define TIMER0_CR_S_OFST (0x4)
-#define TIMER0_CR_CI_MSK (0x08)
-#define TIMER0_CR_CI_OFST (0x3)
-#define TIMER0_CR_IE_MSK (0x04)
-#define TIMER0_CR_IE_OFST (0x2)
-#define TIMER0_CR_MODE_MSK (0x3)
-#define TIMER0_CR_MODE_OFST (0)
-#define TIMER0_CR_MODE_FREE (0)
-#define TIMER0_CR_MODE_ONE (1)
-#define TIMER0_CR_MODE_INTVL (2)
-
-#define TIMER0_SR(BASE_ADDR) (TIMER00_TYPE (BASE_ADDR + 0x00 ))
-#define TIMER0_SR_B_MSK (0x20)
-#define TIMER0_SR_B_OFST (0x5)
-#define TIMER0_SR_S_MSK (0x10)
-#define TIMER0_SR_S_OFST (0x4)
-#define TIMER0_SR_CI_MSK (0x08)
-#define TIMER0_SR_CI_OFST (0x3)
-#define TIMER0_SR_IE_MSK (0x04)
-#define TIMER0_SR_IE_OFST (0x2)
-#define TIMER0_SR_MODE_MSK (0x3)
-#define TIMER0_SR_MODE_OFST (0)
-#define TIMER0_SR_MODE_FREE (0)
-#define TIMER0_SR_MODE_ONE (1)
-#define TIMER0_SR_MODE_INTVL (2)
-
-#define TIMER0_PRESCALE(BASE_ADDR) (TIMER00_TYPE (BASE_ADDR + 0x010 ))
-#define TIMER0_LIMIT(BASE_ADDR) (TIMER00_TYPE (BASE_ADDR + 0x020 ))
-#define TIMER0_READ(BASE_ADDR) (TIMER00_TYPE (BASE_ADDR + 0x030 ))
-
-#define TIMER1_CR(BASE_ADDR) (TIMER00_TYPE (BASE_ADDR + 0x40 ))
-#define TIMER1_CR_B_MSK (0x20)
-#define TIMER1_CR_B_OFST (0x5)
-#define TIMER1_CR_S_MSK (0x10)
-#define TIMER1_CR_S_OFST (0x4)
-#define TIMER1_CR_CI_MSK (0x08)
-#define TIMER1_CR_CI_OFST (0x3)
-#define TIMER1_CR_IE_MSK (0x04)
-#define TIMER1_CR_IE_OFST (0x2)
-#define TIMER1_CR_MODE_MSK (0x3)
-#define TIMER1_CR_MODE_OFST (0)
-#define TIMER1_CR_MODE_FREE (0)
-#define TIMER1_CR_MODE_ONE (1)
-#define TIMER1_CR_MODE_INTVL (2)
-
-#define TIMER1_SR(BASE_ADDR) (TIMER00_TYPE (BASE_ADDR + 0x40 ))
-#define TIMER1_SR_B_MSK (0x20)
-#define TIMER1_SR_B_OFST (0x5)
-#define TIMER1_SR_S_MSK (0x10)
-#define TIMER1_SR_S_OFST (0x4)
-#define TIMER1_SR_CI_MSK (0x08)
-#define TIMER1_SR_CI_OFST (0x3)
-#define TIMER1_SR_IE_MSK (0x04)
-#define TIMER1_SR_IE_OFST (0x2)
-#define TIMER1_SR_MODE_MSK (0x3)
-#define TIMER1_SR_MODE_OFST (0)
-#define TIMER1_SR_MODE_FREE (0)
-#define TIMER1_SR_MODE_ONE (1)
-#define TIMER1_SR_MODE_INTVL (2)
-
-#define TIMER1_PRESCALE(BASE_ADDR) (TIMER00_TYPE (BASE_ADDR + 0x050 ))
-#define TIMER1_LIMIT(BASE_ADDR) (TIMER00_TYPE (BASE_ADDR + 0x060 ))
-#define TIMER1_READ(BASE_ADDR) (TIMER00_TYPE (BASE_ADDR + 0x070 ))
-
-#endif /* __TIMER00_H */
diff --git a/include/asm-arm/arch-epxa10db/uart00.h b/include/asm-arm/arch-epxa10db/uart00.h
deleted file mode 100644
index 5abd8914d68..00000000000
--- a/include/asm-arm/arch-epxa10db/uart00.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/* *
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __UART00_H
-#define __UART00_H
-
-/*
- * Register definitions for the UART
- */
-
-#define UART_TX_FIFO_SIZE (15)
-
-#define UART_RSR(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x00 ))
-#define UART_RSR_RX_LEVEL_MSK (0x1f)
-#define UART_RSR_RX_LEVEL_OFST (0)
-#define UART_RSR_RE_MSK (0x80)
-#define UART_RSR_RE_OFST (7)
-
-#define UART_RDS(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x04 ))
-#define UART_RDS_BI_MSK (0x8)
-#define UART_RDS_BI_OFST (4)
-#define UART_RDS_FE_MSK (0x4)
-#define UART_RDS_FE_OFST (2)
-#define UART_RDS_PE_MSK (0x2)
-#define UART_RDS_PE_OFST (1)
-#define UART_RDS_OE_MSK (0x1)
-#define UART_RDS_OE_OFST (0)
-
-#define UART_RD(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x08 ))
-#define UART_RD_RX_DATA_MSK (0xff)
-#define UART_RD_RX_DATA_OFST (0)
-
-#define UART_TSR(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x0c ))
-#define UART_TSR_TX_LEVEL_MSK (0x1f)
-#define UART_TSR_TX_LEVEL_OFST (0)
-#define UART_TSR_TXI_MSK (0x80)
-#define UART_TSR_TXI_OFST (7)
-
-#define UART_TD(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x10 ))
-#define UART_TD_TX_DATA_MSK (0xff)
-#define UART_TD_TX_DATA_OFST (0)
-
-#define UART_FCR(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x14 ))
-#define UART_FCR_RX_THR_MSK (0xd0)
-#define UART_FCR_RX_THR_OFST (5)
-#define UART_FCR_RX_THR_1 (0x00)
-#define UART_FCR_RX_THR_2 (0x20)
-#define UART_FCR_RX_THR_4 (0x40)
-#define UART_FCR_RX_THR_6 (0x60)
-#define UART_FCR_RX_THR_8 (0x80)
-#define UART_FCR_RX_THR_10 (0xa0)
-#define UART_FCR_RX_THR_12 (0xc0)
-#define UART_FCR_RX_THR_14 (0xd0)
-#define UART_FCR_TX_THR_MSK (0x1c)
-#define UART_FCR_TX_THR_OFST (2)
-#define UART_FCR_TX_THR_0 (0x00)
-#define UART_FCR_TX_THR_2 (0x04)
-#define UART_FCR_TX_THR_4 (0x08)
-#define UART_FCR_TX_THR_8 (0x0c)
-#define UART_FCR_TX_THR_10 (0x10)
-#define UART_FCR_TX_THR_12 (0x14)
-#define UART_FCR_TX_THR_14 (0x18)
-#define UART_FCR_TX_THR_15 (0x1c)
-#define UART_FCR_RC_MSK (0x02)
-#define UART_FCR_RC_OFST (1)
-#define UART_FCR_TC_MSK (0x01)
-#define UART_FCR_TC_OFST (0)
-
-#define UART_IES(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x18 ))
-#define UART_IES_ME_MSK (0x8)
-#define UART_IES_ME_OFST (3)
-#define UART_IES_TIE_MSK (0x4)
-#define UART_IES_TIE_OFST (2)
-#define UART_IES_TE_MSK (0x2)
-#define UART_IES_TE_OFST (1)
-#define UART_IES_RE_MSK (0x1)
-#define UART_IES_RE_OFST (0)
-
-#define UART_IEC(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x1c ))
-#define UART_IEC_ME_MSK (0x8)
-#define UART_IEC_ME_OFST (3)
-#define UART_IEC_TIE_MSK (0x4)
-#define UART_IEC_TIE_OFST (2)
-#define UART_IEC_TE_MSK (0x2)
-#define UART_IEC_TE_OFST (1)
-#define UART_IEC_RE_MSK (0x1)
-#define UART_IEC_RE_OFST (0)
-
-#define UART_ISR(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x20 ))
-#define UART_ISR_MI_MSK (0x8)
-#define UART_ISR_MI_OFST (3)
-#define UART_ISR_TII_MSK (0x4)
-#define UART_ISR_TII_OFST (2)
-#define UART_ISR_TI_MSK (0x2)
-#define UART_ISR_TI_OFST (1)
-#define UART_ISR_RI_MSK (0x1)
-#define UART_ISR_RI_OFST (0)
-
-#define UART_IID(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x24 ))
-#define UART_IID_IID_MSK (0x7)
-#define UART_IID_IID_OFST (0)
-
-#define UART_MC(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x28 ))
-#define UART_MC_OE_MSK (0x40)
-#define UART_MC_OE_OFST (6)
-#define UART_MC_SP_MSK (0x20)
-#define UART_MC_SP_OFST (5)
-#define UART_MC_EP_MSK (0x10)
-#define UART_MC_EP_OFST (4)
-#define UART_MC_PE_MSK (0x08)
-#define UART_MC_PE_OFST (3)
-#define UART_MC_ST_MSK (0x04)
-#define UART_MC_ST_ONE (0x0)
-#define UART_MC_ST_TWO (0x04)
-#define UART_MC_ST_OFST (2)
-#define UART_MC_CLS_MSK (0x03)
-#define UART_MC_CLS_OFST (0)
-#define UART_MC_CLS_CHARLEN_5 (0)
-#define UART_MC_CLS_CHARLEN_6 (1)
-#define UART_MC_CLS_CHARLEN_7 (2)
-#define UART_MC_CLS_CHARLEN_8 (3)
-
-#define UART_MCR(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x2c ))
-#define UART_MCR_AC_MSK (0x80)
-#define UART_MCR_AC_OFST (7)
-#define UART_MCR_AR_MSK (0x40)
-#define UART_MCR_AR_OFST (6)
-#define UART_MCR_BR_MSK (0x20)
-#define UART_MCR_BR_OFST (5)
-#define UART_MCR_LB_MSK (0x10)
-#define UART_MCR_LB_OFST (4)
-#define UART_MCR_DCD_MSK (0x08)
-#define UART_MCR_DCD_OFST (3)
-#define UART_MCR_RI_MSK (0x04)
-#define UART_MCR_RI_OFST (2)
-#define UART_MCR_DTR_MSK (0x02)
-#define UART_MCR_DTR_OFST (1)
-#define UART_MCR_RTS_MSK (0x01)
-#define UART_MCR_RTS_OFST (0)
-
-#define UART_MSR(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x30 ))
-#define UART_MSR_DCD_MSK (0x80)
-#define UART_MSR_DCD_OFST (7)
-#define UART_MSR_RI_MSK (0x40)
-#define UART_MSR_RI_OFST (6)
-#define UART_MSR_DSR_MSK (0x20)
-#define UART_MSR_DSR_OFST (5)
-#define UART_MSR_CTS_MSK (0x10)
-#define UART_MSR_CTS_OFST (4)
-#define UART_MSR_DDCD_MSK (0x08)
-#define UART_MSR_DDCD_OFST (3)
-#define UART_MSR_TERI_MSK (0x04)
-#define UART_MSR_TERI_OFST (2)
-#define UART_MSR_DDSR_MSK (0x02)
-#define UART_MSR_DDSR_OFST (1)
-#define UART_MSR_DCTS_MSK (0x01)
-#define UART_MSR_DCTS_OFST (0)
-
-#define UART_DIV_LO(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x34 ))
-#define UART_DIV_LO_DIV_MSK (0xff)
-#define UART_DIV_LO_DIV_OFST (0)
-
-#define UART_DIV_HI(BASE_ADDR) (UART00_TYPE (BASE_ADDR + 0x38 ))
-#define UART_DIV_HI_DIV_MSK (0xff)
-#define UART_DIV_HI_DIV_OFST (0)
-
-#endif /* __UART00_H */
diff --git a/include/asm-arm/arch-epxa10db/uncompress.h b/include/asm-arm/arch-epxa10db/uncompress.h
deleted file mode 100644
index fdfe0e6848f..00000000000
--- a/include/asm-arm/arch-epxa10db/uncompress.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * linux/include/asm-arm/arch-epxa10db/uncompress.h
- *
- * Copyright (C) 1999 ARM Limited
- * Copyright (C) 2001 Altera Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include "asm/arch/platform.h"
-#include "asm/hardware.h"
-#define UART00_TYPE (volatile unsigned int*)
-#include "asm/arch/uart00.h"
-
-/*
- * This does not append a newline
- */
-static void putstr(const char *s)
-{
- while (*s) {
- while ((*UART_TSR(EXC_UART00_BASE) &
- UART_TSR_TX_LEVEL_MSK)==15)
- barrier();
-
- *UART_TD(EXC_UART00_BASE) = *s;
-
- if (*s == '\n') {
- while ((*UART_TSR(EXC_UART00_BASE) &
- UART_TSR_TX_LEVEL_MSK)==15)
- barrier();
-
- *UART_TD(EXC_UART00_BASE) = '\r';
- }
- s++;
- }
-}
-
-/*
- * nothing to do
- */
-#define arch_decomp_setup()
-
-#define arch_decomp_wdog()
diff --git a/include/asm-arm/arch-imx/dma.h b/include/asm-arm/arch-imx/dma.h
index dbdc0178041..b45fa367d71 100644
--- a/include/asm-arm/arch-imx/dma.h
+++ b/include/asm-arm/arch-imx/dma.h
@@ -20,10 +20,6 @@
#ifndef __ASM_ARCH_DMA_H
#define __ASM_ARCH_DMA_H
-#define MAX_DMA_ADDRESS 0xffffffff
-
-#define MAX_DMA_CHANNELS 0
-
/*
* DMA registration
*/
diff --git a/include/asm-arm/arch-imx/entry-macro.S b/include/asm-arm/arch-imx/entry-macro.S
index b40ea7cf88e..3b9ef691462 100644
--- a/include/asm-arm/arch-imx/entry-macro.S
+++ b/include/asm-arm/arch-imx/entry-macro.S
@@ -7,6 +7,8 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware.h>
+
.macro disable_fiq
.endm
#define AITC_NIVECSR 0x40
diff --git a/include/asm-arm/arch-integrator/debug-macro.S b/include/asm-arm/arch-integrator/debug-macro.S
index 484a1aa4709..031d3094179 100644
--- a/include/asm-arm/arch-integrator/debug-macro.S
+++ b/include/asm-arm/arch-integrator/debug-macro.S
@@ -11,7 +11,7 @@
*
*/
-#include <asm/hardware/amba_serial.h>
+#include <linux/amba/serial.h>
.macro addruart,rx
mrc p15, 0, \rx, c1, c0
diff --git a/include/asm-arm/arch-integrator/dma.h b/include/asm-arm/arch-integrator/dma.h
index 7171792290b..83fd6bbaf9d 100644
--- a/include/asm-arm/arch-integrator/dma.h
+++ b/include/asm-arm/arch-integrator/dma.h
@@ -17,12 +17,3 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#define MAX_DMA_ADDRESS 0xffffffff
-
-#define MAX_DMA_CHANNELS 0
-
-#endif /* _ASM_ARCH_DMA_H */
-
diff --git a/include/asm-arm/arch-integrator/entry-macro.S b/include/asm-arm/arch-integrator/entry-macro.S
index 44f7ee61319..69838d04f90 100644
--- a/include/asm-arm/arch-integrator/entry-macro.S
+++ b/include/asm-arm/arch-integrator/entry-macro.S
@@ -7,6 +7,8 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware.h>
+#include <asm/arch/irqs.h>
.macro disable_fiq
.endm
diff --git a/include/asm-arm/arch-iop3xx/dma.h b/include/asm-arm/arch-iop3xx/dma.h
index 797f9e6fc74..1e808db8af2 100644
--- a/include/asm-arm/arch-iop3xx/dma.h
+++ b/include/asm-arm/arch-iop3xx/dma.h
@@ -7,10 +7,3 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
-#ifndef _IOP3XX_DMA_H_P
-#define _IOP3XX_DMA_H_P
-
-#define MAX_DMA_ADDRESS 0xffffffff
-
-#endif /* _ASM_ARCH_DMA_H_P */
diff --git a/include/asm-arm/arch-iop3xx/entry-macro.S b/include/asm-arm/arch-iop3xx/entry-macro.S
index e2ce7f5467c..926668c098a 100644
--- a/include/asm-arm/arch-iop3xx/entry-macro.S
+++ b/include/asm-arm/arch-iop3xx/entry-macro.S
@@ -7,6 +7,7 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <asm/arch/irqs.h>
#if defined(CONFIG_ARCH_IOP321)
.macro disable_fiq
diff --git a/include/asm-arm/arch-ixp2000/dma.h b/include/asm-arm/arch-ixp2000/dma.h
index 0fb3568a98d..548d8dc507e 100644
--- a/include/asm-arm/arch-ixp2000/dma.h
+++ b/include/asm-arm/arch-ixp2000/dma.h
@@ -7,12 +7,3 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#define MAX_DMA_ADDRESS 0xffffffff
-
-/* No DMA */
-#define MAX_DMA_CHANNELS 0
-
-#endif /* _ASM_ARCH_DMA_H */
diff --git a/include/asm-arm/arch-ixp2000/enp2611.h b/include/asm-arm/arch-ixp2000/enp2611.h
index 95128d9f502..42f3c28dc5c 100644
--- a/include/asm-arm/arch-ixp2000/enp2611.h
+++ b/include/asm-arm/arch-ixp2000/enp2611.h
@@ -36,5 +36,11 @@
#define ENP2611_GPIO_SCL 7
#define ENP2611_GPIO_SDA 6
+#define IRQ_ENP2611_THERMAL IRQ_IXP2000_GPIO4
+#define IRQ_ENP2611_OPTION_BOARD IRQ_IXP2000_GPIO3
+#define IRQ_ENP2611_CALEB IRQ_IXP2000_GPIO2
+#define IRQ_ENP2611_PM3386_1 IRQ_IXP2000_GPIO1
+#define IRQ_ENP2611_PM3386_0 IRQ_IXP2000_GPIO0
+
#endif
diff --git a/include/asm-arm/arch-ixp2000/entry-macro.S b/include/asm-arm/arch-ixp2000/entry-macro.S
index e3a4e412129..16e1e6124b3 100644
--- a/include/asm-arm/arch-ixp2000/entry-macro.S
+++ b/include/asm-arm/arch-ixp2000/entry-macro.S
@@ -7,6 +7,7 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <asm/arch/irqs.h>
.macro disable_fiq
.endm
diff --git a/include/asm-arm/arch-ixp2000/io.h b/include/asm-arm/arch-ixp2000/io.h
index 7fbcdf9931e..c0ff2c6c66e 100644
--- a/include/asm-arm/arch-ixp2000/io.h
+++ b/include/asm-arm/arch-ixp2000/io.h
@@ -131,102 +131,4 @@
#endif
-#ifdef CONFIG_ARCH_IXDP2X01
-/*
- * This is an ugly hack but the CS8900 on the 2x01's does not sit in any sort
- * of "I/O space" and is just direct mapped into a 32-bit-only addressable
- * bus. The address space for this bus is such that we can't really easily
- * make it contiguous to the PCI I/O address range, and it also does not
- * need swapping like PCI addresses do (IXDP2x01 is a BE platform).
- * B/C of this we can't use the standard in/out functions and need to
- * runtime check if the incoming address is a PCI address or for
- * the CS89x0.
- */
-#undef inw
-#undef outw
-#undef insw
-#undef outsw
-
-#include <asm/mach-types.h>
-
-static inline void insw(u32 ptr, void *buf, int length)
-{
- register volatile u32 *port = (volatile u32 *)ptr;
-
- /*
- * Is this cycle meant for the CS8900?
- */
- if ((machine_is_ixdp2401() || machine_is_ixdp2801()) &&
- (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) &&
- ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) {
- u8 *buf8 = (u8*)buf;
- register u32 tmp32;
-
- do {
- tmp32 = *port;
- *buf8++ = (u8)tmp32;
- *buf8++ = (u8)(tmp32 >> 8);
- } while(--length);
-
- return;
- }
-
- __raw_readsw(alignw(___io(ptr)),buf,length);
-}
-
-static inline void outsw(u32 ptr, void *buf, int length)
-{
- register volatile u32 *port = (volatile u32 *)ptr;
-
- /*
- * Is this cycle meant for the CS8900?
- */
- if ((machine_is_ixdp2401() || machine_is_ixdp2801()) &&
- (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) &&
- ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) {
- register u32 tmp32;
- u8 *buf8 = (u8*)buf;
- do {
- tmp32 = *buf8++;
- tmp32 |= (*buf8++) << 8;
- *port = tmp32;
- } while(--length);
- return;
- }
-
- __raw_writesw(alignw(___io(ptr)),buf,length);
-}
-
-
-static inline u16 inw(u32 ptr)
-{
- register volatile u32 *port = (volatile u32 *)ptr;
-
- /*
- * Is this cycle meant for the CS8900?
- */
- if ((machine_is_ixdp2401() || machine_is_ixdp2801()) &&
- (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) &&
- ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) {
- return (u16)(*port);
- }
-
- return __raw_readw(alignw(___io(ptr)));
-}
-
-static inline void outw(u16 value, u32 ptr)
-{
- register volatile u32 *port = (volatile u32 *)ptr;
-
- if ((machine_is_ixdp2401() || machine_is_ixdp2801()) &&
- (((u32)port >= (u32)IXDP2X01_CS8900_VIRT_BASE) &&
- ((u32)port <= (u32)IXDP2X01_CS8900_VIRT_END))) {
- *port = value;
- return;
- }
-
- __raw_writew((value),alignw(___io(ptr)));
-}
-#endif /* IXDP2x01 */
-
#endif
diff --git a/include/asm-arm/arch-ixp2000/ixp2000-regs.h b/include/asm-arm/arch-ixp2000/ixp2000-regs.h
index fc5ac6aec4f..8cf70ff160a 100644
--- a/include/asm-arm/arch-ixp2000/ixp2000-regs.h
+++ b/include/asm-arm/arch-ixp2000/ixp2000-regs.h
@@ -156,6 +156,14 @@
#define IXP2000_IRQ_THD_RAW_STATUS_B_1 IXP2000_INTCTL_REG(0x84)
#define IXP2000_IRQ_THD_RAW_STATUS_B_2 IXP2000_INTCTL_REG(0x88)
#define IXP2000_IRQ_THD_RAW_STATUS_B_3 IXP2000_INTCTL_REG(0x8c)
+#define IXP2000_IRQ_THD_STATUS_A_0 IXP2000_INTCTL_REG(0xe0)
+#define IXP2000_IRQ_THD_STATUS_A_1 IXP2000_INTCTL_REG(0xe4)
+#define IXP2000_IRQ_THD_STATUS_A_2 IXP2000_INTCTL_REG(0xe8)
+#define IXP2000_IRQ_THD_STATUS_A_3 IXP2000_INTCTL_REG(0xec)
+#define IXP2000_IRQ_THD_STATUS_B_0 IXP2000_INTCTL_REG(0x100)
+#define IXP2000_IRQ_THD_STATUS_B_1 IXP2000_INTCTL_REG(0x104)
+#define IXP2000_IRQ_THD_STATUS_B_2 IXP2000_INTCTL_REG(0x108)
+#define IXP2000_IRQ_THD_STATUS_B_3 IXP2000_INTCTL_REG(0x10c)
#define IXP2000_IRQ_THD_ENABLE_SET_A_0 IXP2000_INTCTL_REG(0x160)
#define IXP2000_IRQ_THD_ENABLE_SET_A_1 IXP2000_INTCTL_REG(0x164)
#define IXP2000_IRQ_THD_ENABLE_SET_A_2 IXP2000_INTCTL_REG(0x168)
diff --git a/include/asm-arm/arch-ixp4xx/coyote.h b/include/asm-arm/arch-ixp4xx/coyote.h
index dd0c2d2d850..7ac9ba2c035 100644
--- a/include/asm-arm/arch-ixp4xx/coyote.h
+++ b/include/asm-arm/arch-ixp4xx/coyote.h
@@ -16,9 +16,6 @@
#error "Do not include this directly, instead #include <asm/hardware.h>"
#endif
-#define COYOTE_FLASH_BASE IXP4XX_EXP_BUS_CS0_BASE_PHYS
-#define COYOTE_FLASH_SIZE IXP4XX_EXP_BUS_CSX_REGION_SIZE * 2
-
/* PCI controller GPIO to IRQ pin mappings */
#define COYOTE_PCI_SLOT0_PIN 6
#define COYOTE_PCI_SLOT1_PIN 11
@@ -26,7 +23,7 @@
#define COYOTE_PCI_SLOT0_DEVID 14
#define COYOTE_PCI_SLOT1_DEVID 15
-#define COYOTE_IDE_BASE_PHYS IXP4XX_EXP_BUS_CS3_BASE_PHYS
+#define COYOTE_IDE_BASE_PHYS IXP4XX_EXP_BUS_BASE(3)
#define COYOTE_IDE_BASE_VIRT 0xFFFE1000
#define COYOTE_IDE_REGION_SIZE 0x1000
diff --git a/include/asm-arm/arch-ixp4xx/dma.h b/include/asm-arm/arch-ixp4xx/dma.h
index 312065dc0e7..b1a071ecebc 100644
--- a/include/asm-arm/arch-ixp4xx/dma.h
+++ b/include/asm-arm/arch-ixp4xx/dma.h
@@ -20,7 +20,4 @@
#define MAX_DMA_ADDRESS (PAGE_OFFSET + SZ_64M)
-/* No DMA */
-#define MAX_DMA_CHANNELS 0
-
#endif /* _ASM_ARCH_DMA_H */
diff --git a/include/asm-arm/arch-ixp4xx/entry-macro.S b/include/asm-arm/arch-ixp4xx/entry-macro.S
index 323b0bc4a39..27e124132e4 100644
--- a/include/asm-arm/arch-ixp4xx/entry-macro.S
+++ b/include/asm-arm/arch-ixp4xx/entry-macro.S
@@ -7,6 +7,7 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware.h>
.macro disable_fiq
.endm
diff --git a/include/asm-arm/arch-ixp4xx/gtwx5715.h b/include/asm-arm/arch-ixp4xx/gtwx5715.h
index fc460af7062..c3069d67c00 100644
--- a/include/asm-arm/arch-ixp4xx/gtwx5715.h
+++ b/include/asm-arm/arch-ixp4xx/gtwx5715.h
@@ -57,10 +57,6 @@
#define GTWX5715_GPIO13_IRQ IRQ_IXP4XX_SW_INT1
#define GTWX5715_GPIO14_IRQ IRQ_IXP4XX_SW_INT2
-
-#define GTWX5715_FLASH_BASE IXP4XX_EXP_BUS_CS0_BASE_PHYS
-#define GTWX5715_FLASH_SIZE (0x00800000)
-
/* PCI controller GPIO to IRQ pin mappings
INTA INTB
diff --git a/include/asm-arm/arch-ixp4xx/hardware.h b/include/asm-arm/arch-ixp4xx/hardware.h
index cfb413c845f..6acb69c95ef 100644
--- a/include/asm-arm/arch-ixp4xx/hardware.h
+++ b/include/asm-arm/arch-ixp4xx/hardware.h
@@ -45,5 +45,6 @@ extern unsigned int processor_id;
#include "coyote.h"
#include "prpmc1100.h"
#include "nslu2.h"
+#include "nas100d.h"
#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/include/asm-arm/arch-ixp4xx/irqs.h b/include/asm-arm/arch-ixp4xx/irqs.h
index 2cf4930372b..f24b763ca18 100644
--- a/include/asm-arm/arch-ixp4xx/irqs.h
+++ b/include/asm-arm/arch-ixp4xx/irqs.h
@@ -100,4 +100,13 @@
#define IRQ_NSLU2_PCI_INTB IRQ_IXP4XX_GPIO10
#define IRQ_NSLU2_PCI_INTC IRQ_IXP4XX_GPIO9
+/*
+ * NAS100D board IRQs
+ */
+#define IRQ_NAS100D_PCI_INTA IRQ_IXP4XX_GPIO11
+#define IRQ_NAS100D_PCI_INTB IRQ_IXP4XX_GPIO10
+#define IRQ_NAS100D_PCI_INTC IRQ_IXP4XX_GPIO9
+#define IRQ_NAS100D_PCI_INTD IRQ_IXP4XX_GPIO8
+#define IRQ_NAS100D_PCI_INTE IRQ_IXP4XX_GPIO7
+
#endif
diff --git a/include/asm-arm/arch-ixp4xx/ixdp425.h b/include/asm-arm/arch-ixp4xx/ixdp425.h
index 7d21bf94137..3d3820d7ba0 100644
--- a/include/asm-arm/arch-ixp4xx/ixdp425.h
+++ b/include/asm-arm/arch-ixp4xx/ixdp425.h
@@ -16,9 +16,6 @@
#error "Do not include this directly, instead #include <asm/hardware.h>"
#endif
-#define IXDP425_FLASH_BASE IXP4XX_EXP_BUS_CS0_BASE_PHYS
-#define IXDP425_FLASH_SIZE IXP4XX_EXP_BUS_CSX_REGION_SIZE
-
#define IXDP425_SDA_PIN 7
#define IXDP425_SCL_PIN 6
diff --git a/include/asm-arm/arch-ixp4xx/memory.h b/include/asm-arm/arch-ixp4xx/memory.h
index e024d0a1a66..ee211d28a3e 100644
--- a/include/asm-arm/arch-ixp4xx/memory.h
+++ b/include/asm-arm/arch-ixp4xx/memory.h
@@ -16,31 +16,10 @@
#ifndef __ASSEMBLY__
-/*
- * Only first 64MB of memory can be accessed via PCI.
- * We use GFP_DMA to allocate safe buffers to do map/unmap.
- * This is really ugly and we need a better way of specifying
- * DMA-capable regions of memory.
- */
-static inline void __arch_adjust_zones(int node, unsigned long *zone_size,
- unsigned long *zhole_size)
-{
- unsigned int sz = SZ_64M >> PAGE_SHIFT;
-
- /*
- * Only adjust if > 64M on current system
- */
- if (node || (zone_size[0] <= sz))
- return;
-
- zone_size[1] = zone_size[0] - sz;
- zone_size[0] = sz;
- zhole_size[1] = zhole_size[0];
- zhole_size[0] = 0;
-}
+void ixp4xx_adjust_zones(int node, unsigned long *size, unsigned long *holes);
#define arch_adjust_zones(node, size, holes) \
- __arch_adjust_zones(node, size, holes)
+ ixp4xx_adjust_zones(node, size, holes)
#define ISA_DMA_THRESHOLD (SZ_64M - 1)
diff --git a/include/asm-arm/arch-ixp4xx/nas100d.h b/include/asm-arm/arch-ixp4xx/nas100d.h
new file mode 100644
index 00000000000..51ac0180427
--- /dev/null
+++ b/include/asm-arm/arch-ixp4xx/nas100d.h
@@ -0,0 +1,72 @@
+/*
+ * include/asm-arm/arch-ixp4xx/nas100d.h
+ *
+ * NAS100D platform specific definitions
+ *
+ * Copyright (c) 2005 Tower Technologies
+ *
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * based on ixdp425.h:
+ * Copyright 2004 (c) MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARCH_HARDWARE_H__
+#error "Do not include this directly, instead #include <asm/hardware.h>"
+#endif
+
+#define NAS100D_SDA_PIN 6
+#define NAS100D_SCL_PIN 5
+
+/*
+ * NAS100D PCI IRQs
+ */
+#define NAS100D_PCI_MAX_DEV 3
+#define NAS100D_PCI_IRQ_LINES 3
+
+
+/* PCI controller GPIO to IRQ pin mappings */
+#define NAS100D_PCI_INTA_PIN 11
+#define NAS100D_PCI_INTB_PIN 10
+#define NAS100D_PCI_INTC_PIN 9
+#define NAS100D_PCI_INTD_PIN 8
+#define NAS100D_PCI_INTE_PIN 7
+
+/* GPIO */
+
+#define NAS100D_GPIO0 0
+#define NAS100D_GPIO1 1
+#define NAS100D_GPIO2 2
+#define NAS100D_GPIO3 3
+#define NAS100D_GPIO4 4
+#define NAS100D_GPIO5 5
+#define NAS100D_GPIO6 6
+#define NAS100D_GPIO7 7
+#define NAS100D_GPIO8 8
+#define NAS100D_GPIO9 9
+#define NAS100D_GPIO10 10
+#define NAS100D_GPIO11 11
+#define NAS100D_GPIO12 12
+#define NAS100D_GPIO13 13
+#define NAS100D_GPIO14 14
+#define NAS100D_GPIO15 15
+
+
+/* Buttons */
+
+#define NAS100D_PB_GPIO NAS100D_GPIO14
+#define NAS100D_RB_GPIO NAS100D_GPIO4
+#define NAS100D_PO_GPIO NAS100D_GPIO12 /* power off */
+
+#define NAS100D_PB_IRQ IRQ_IXP4XX_GPIO14
+#define NAS100D_RB_IRQ IRQ_IXP4XX_GPIO4
+
+/*
+#define NAS100D_PB_BM (1L << NAS100D_PB_GPIO)
+#define NAS100D_PO_BM (1L << NAS100D_PO_GPIO)
+#define NAS100D_RB_BM (1L << NAS100D_RB_GPIO)
+*/
diff --git a/include/asm-arm/arch-ixp4xx/nslu2.h b/include/asm-arm/arch-ixp4xx/nslu2.h
index b8b347a559c..4281838873e 100644
--- a/include/asm-arm/arch-ixp4xx/nslu2.h
+++ b/include/asm-arm/arch-ixp4xx/nslu2.h
@@ -18,9 +18,6 @@
#error "Do not include this directly, instead #include <asm/hardware.h>"
#endif
-#define NSLU2_FLASH_BASE IXP4XX_EXP_BUS_CS0_BASE_PHYS
-#define NSLU2_FLASH_SIZE IXP4XX_EXP_BUS_CSX_REGION_SIZE
-
#define NSLU2_SDA_PIN 7
#define NSLU2_SCL_PIN 6
diff --git a/include/asm-arm/arch-ixp4xx/platform.h b/include/asm-arm/arch-ixp4xx/platform.h
index f14ed63590c..daf9790645c 100644
--- a/include/asm-arm/arch-ixp4xx/platform.h
+++ b/include/asm-arm/arch-ixp4xx/platform.h
@@ -26,16 +26,17 @@
*/
#define IXP4XX_EXP_BUS_BASE_PHYS (0x50000000)
-#define IXP4XX_EXP_BUS_CSX_REGION_SIZE (0x01000000)
+/*
+ * The expansion bus on the IXP4xx can be configured for either 16 or
+ * 32MB windows and the CS offset for each region changes based on the
+ * current configuration. This means that we cannot simply hardcode
+ * each offset. ixp4xx_sys_init() looks at the expansion bus configuration
+ * as setup by the bootloader to determine our window size.
+ */
+extern unsigned long ixp4xx_exp_bus_size;
-#define IXP4XX_EXP_BUS_CS0_BASE_PHYS (IXP4XX_EXP_BUS_BASE_PHYS + 0x00000000)
-#define IXP4XX_EXP_BUS_CS1_BASE_PHYS (IXP4XX_EXP_BUS_BASE_PHYS + 0x01000000)
-#define IXP4XX_EXP_BUS_CS2_BASE_PHYS (IXP4XX_EXP_BUS_BASE_PHYS + 0x02000000)
-#define IXP4XX_EXP_BUS_CS3_BASE_PHYS (IXP4XX_EXP_BUS_BASE_PHYS + 0x03000000)
-#define IXP4XX_EXP_BUS_CS4_BASE_PHYS (IXP4XX_EXP_BUS_BASE_PHYS + 0x04000000)
-#define IXP4XX_EXP_BUS_CS5_BASE_PHYS (IXP4XX_EXP_BUS_BASE_PHYS + 0x05000000)
-#define IXP4XX_EXP_BUS_CS6_BASE_PHYS (IXP4XX_EXP_BUS_BASE_PHYS + 0x06000000)
-#define IXP4XX_EXP_BUS_CS7_BASE_PHYS (IXP4XX_EXP_BUS_BASE_PHYS + 0x07000000)
+#define IXP4XX_EXP_BUS_BASE(region)\
+ (IXP4XX_EXP_BUS_BASE_PHYS + ((region) * ixp4xx_exp_bus_size))
#define IXP4XX_FLASH_WRITABLE (0x2)
#define IXP4XX_FLASH_DEFAULT (0xbcd23c40)
@@ -112,10 +113,5 @@ static inline void gpio_line_set(u8 line, int value)
*IXP4XX_GPIO_GPOUTR &= ~(1 << line);
}
-static inline void gpio_line_isr_clear(u8 line)
-{
- *IXP4XX_GPIO_GPISR = (1 << line);
-}
-
#endif // __ASSEMBLY__
diff --git a/include/asm-arm/arch-l7200/dma.h b/include/asm-arm/arch-l7200/dma.h
index 6595b386cfc..4c7eca63f03 100644
--- a/include/asm-arm/arch-l7200/dma.h
+++ b/include/asm-arm/arch-l7200/dma.h
@@ -17,7 +17,6 @@
* bytes of RAM.
*/
#define MAX_DMA_ADDRESS 0xd0000000
-#define MAX_DMA_CHANNELS 0
#define DMA_S0 0
diff --git a/include/asm-arm/arch-l7200/system.h b/include/asm-arm/arch-l7200/system.h
index cb4ff29059b..18825cf071b 100644
--- a/include/asm-arm/arch-l7200/system.h
+++ b/include/asm-arm/arch-l7200/system.h
@@ -12,6 +12,8 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
+#include <asm/hardware.h>
+
static inline void arch_idle(void)
{
*(unsigned long *)(IO_BASE + 0x50004) = 1; /* idle mode */
diff --git a/include/asm-arm/arch-lh7a40x/dma.h b/include/asm-arm/arch-lh7a40x/dma.h
index 5797f01e184..15492e3253f 100644
--- a/include/asm-arm/arch-lh7a40x/dma.h
+++ b/include/asm-arm/arch-lh7a40x/dma.h
@@ -7,11 +7,3 @@
* version 2 as published by the Free Software Foundation.
*
*/
-
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#define MAX_DMA_ADDRESS 0xffffffff
-#define MAX_DMA_CHANNELS 0 /* All DMA is internal to CPU */
-
-#endif /* _ASM_ARCH_DMA_H */
diff --git a/include/asm-arm/arch-lh7a40x/entry-macro.S b/include/asm-arm/arch-lh7a40x/entry-macro.S
index 865f396aa63..a2f67c06d9c 100644
--- a/include/asm-arm/arch-lh7a40x/entry-macro.S
+++ b/include/asm-arm/arch-lh7a40x/entry-macro.S
@@ -7,6 +7,8 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware.h>
+#include <asm/arch/irqs.h>
# if defined (CONFIG_ARCH_LH7A400) && defined (CONFIG_ARCH_LH7A404)
# error "LH7A400 and LH7A404 are mutually exclusive"
diff --git a/include/asm-arm/arch-omap/dma.h b/include/asm-arm/arch-omap/dma.h
index ccbcb580a5c..d4e73efcb81 100644
--- a/include/asm-arm/arch-omap/dma.h
+++ b/include/asm-arm/arch-omap/dma.h
@@ -21,9 +21,6 @@
#ifndef __ASM_ARCH_DMA_H
#define __ASM_ARCH_DMA_H
-#define MAX_DMA_ADDRESS 0xffffffff
-#define MAX_DMA_CHANNELS 0
-
/* Hardware registers for omap1 */
#define OMAP_DMA_BASE (0xfffed800)
#define OMAP_DMA_GCR (OMAP_DMA_BASE + 0x400)
diff --git a/include/asm-arm/arch-omap/entry-macro.S b/include/asm-arm/arch-omap/entry-macro.S
index f8814a84910..0ffb1185f1a 100644
--- a/include/asm-arm/arch-omap/entry-macro.S
+++ b/include/asm-arm/arch-omap/entry-macro.S
@@ -7,6 +7,8 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware.h>
+#include <asm/arch/irqs.h>
#if defined(CONFIG_ARCH_OMAP1)
diff --git a/include/asm-arm/arch-omap/system.h b/include/asm-arm/arch-omap/system.h
index 9af415d2944..6724a81bd10 100644
--- a/include/asm-arm/arch-omap/system.h
+++ b/include/asm-arm/arch-omap/system.h
@@ -5,8 +5,9 @@
#ifndef __ASM_ARCH_SYSTEM_H
#define __ASM_ARCH_SYSTEM_H
#include <linux/config.h>
+#include <linux/clk.h>
+
#include <asm/mach-types.h>
-#include <asm/hardware/clock.h>
#include <asm/hardware.h>
#include <asm/arch/prcm.h>
diff --git a/include/asm-arm/arch-pxa/dma.h b/include/asm-arm/arch-pxa/dma.h
index 56db3d49bfc..3e88a2a02a0 100644
--- a/include/asm-arm/arch-pxa/dma.h
+++ b/include/asm-arm/arch-pxa/dma.h
@@ -12,11 +12,6 @@
#ifndef __ASM_ARCH_DMA_H
#define __ASM_ARCH_DMA_H
-#define MAX_DMA_ADDRESS 0xffffffff
-
-/* No DMA as the rest of the world see it */
-#define MAX_DMA_CHANNELS 0
-
/*
* Descriptor structure for PXA's DMA engine
* Note: this structure must always be aligned to a 16-byte boundary.
diff --git a/include/asm-arm/arch-pxa/entry-macro.S b/include/asm-arm/arch-pxa/entry-macro.S
index 2abfc8bb3ee..4985e33afc1 100644
--- a/include/asm-arm/arch-pxa/entry-macro.S
+++ b/include/asm-arm/arch-pxa/entry-macro.S
@@ -7,6 +7,8 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <asm/hardware.h>
+#include <asm/arch/irqs.h>
.macro disable_fiq
.endm
diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h
index a75a2470f4f..dae138b9cac 100644
--- a/include/asm-arm/arch-pxa/pxa-regs.h
+++ b/include/asm-arm/arch-pxa/pxa-regs.h
@@ -2042,6 +2042,18 @@
#ifdef CONFIG_PXA27x
+#define ARB_CNTRL __REG(0x48000048) /* Arbiter Control Register */
+
+#define ARB_DMA_SLV_PARK (1<<31) /* Be parked with DMA slave when idle */
+#define ARB_CI_PARK (1<<30) /* Be parked with Camera Interface when idle */
+#define ARB_EX_MEM_PARK (1<<29) /* Be parked with external MEMC when idle */
+#define ARB_INT_MEM_PARK (1<<28) /* Be parked with internal MEMC when idle */
+#define ARB_USB_PARK (1<<27) /* Be parked with USB when idle */
+#define ARB_LCD_PARK (1<<26) /* Be parked with LCD when idle */
+#define ARB_DMA_PARK (1<<25) /* Be parked with DMA when idle */
+#define ARB_CORE_PARK (1<<24) /* Be parked with core when idle */
+#define ARB_LOCK_FLAG (1<<23) /* Only Locking masters gain access to the bus */
+
/*
* Keypad
*/
diff --git a/include/asm-arm/arch-realview/debug-macro.S b/include/asm-arm/arch-realview/debug-macro.S
index ed28bd01223..017ad996848 100644
--- a/include/asm-arm/arch-realview/debug-macro.S
+++ b/include/asm-arm/arch-realview/debug-macro.S
@@ -11,7 +11,7 @@
*
*/
-#include <asm/hardware/amba_serial.h>
+#include <linux/amba/serial.h>
.macro addruart,rx
mrc p15, 0, \rx, c1, c0
diff --git a/include/asm-arm/arch-realview/dma.h b/include/asm-arm/arch-realview/dma.h
index 744491a74bd..8342e3f9d6e 100644
--- a/include/asm-arm/arch-realview/dma.h
+++ b/include/asm-arm/arch-realview/dma.h
@@ -18,10 +18,3 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#define MAX_DMA_ADDRESS 0xffffffff
-#define MAX_DMA_CHANNELS 0
-
-#endif /* _ASM_ARCH_DMA_H */
diff --git a/include/asm-arm/arch-realview/entry-macro.S b/include/asm-arm/arch-realview/entry-macro.S
index 6288fad0dc4..1a6eec86bd4 100644
--- a/include/asm-arm/arch-realview/entry-macro.S
+++ b/include/asm-arm/arch-realview/entry-macro.S
@@ -7,7 +7,7 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
-
+#include <asm/hardware.h>
#include <asm/hardware/gic.h>
.macro disable_fiq
diff --git a/include/asm-arm/arch-rpc/entry-macro.S b/include/asm-arm/arch-rpc/entry-macro.S
index 686f413f82d..c9e5395e510 100644
--- a/include/asm-arm/arch-rpc/entry-macro.S
+++ b/include/asm-arm/arch-rpc/entry-macro.S
@@ -1,3 +1,3 @@
-
+#include <asm/hardware.h>
#include <asm/hardware/entry-macro-iomd.S>
diff --git a/include/asm-arm/arch-s3c2410/dma.h b/include/asm-arm/arch-s3c2410/dma.h
index e830a40e573..b011e14f3bc 100644
--- a/include/asm-arm/arch-s3c2410/dma.h
+++ b/include/asm-arm/arch-s3c2410/dma.h
@@ -31,14 +31,6 @@
#define MAX_DMA_TRANSFER_SIZE 0x100000 /* Data Unit is half word */
-/* according to the samsung port, we cannot use the regular
- * dma channels... we must therefore provide our own interface
- * for DMA, and allow our drivers to use that.
- */
-
-#define MAX_DMA_CHANNELS 0
-
-
/* we have 4 dma channels */
#define S3C2410_DMA_CHANNELS (4)
diff --git a/include/asm-arm/arch-s3c2410/entry-macro.S b/include/asm-arm/arch-s3c2410/entry-macro.S
index b7d4d7f4422..cc06b1bd37b 100644
--- a/include/asm-arm/arch-s3c2410/entry-macro.S
+++ b/include/asm-arm/arch-s3c2410/entry-macro.S
@@ -10,6 +10,8 @@
* Modifications:
* 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
*/
+#include <asm/hardware.h>
+#include <asm/arch/irqs.h>
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
diff --git a/include/asm-arm/arch-sa1100/dma.h b/include/asm-arm/arch-sa1100/dma.h
index 3d60ed9f8c3..02575d72ac6 100644
--- a/include/asm-arm/arch-sa1100/dma.h
+++ b/include/asm-arm/arch-sa1100/dma.h
@@ -15,20 +15,6 @@
/*
- * This is the maximum DMA address that can be DMAd to.
- */
-#define MAX_DMA_ADDRESS 0xffffffff
-
-
-/*
- * The regular generic DMA interface is inappropriate for the
- * SA1100 DMA model. None of the SA1100 specific drivers using
- * DMA are portable anyway so it's pointless to try to twist the
- * regular DMA API to accommodate them.
- */
-#define MAX_DMA_CHANNELS 0
-
-/*
* The SA1100 has six internal DMA channels.
*/
#define SA1100_DMA_CHANNELS 6
diff --git a/include/asm-arm/arch-versatile/debug-macro.S b/include/asm-arm/arch-versatile/debug-macro.S
index 89e38ac1444..ef6167116db 100644
--- a/include/asm-arm/arch-versatile/debug-macro.S
+++ b/include/asm-arm/arch-versatile/debug-macro.S
@@ -11,7 +11,7 @@
*
*/
-#include <asm/hardware/amba_serial.h>
+#include <linux/amba/serial.h>
.macro addruart,rx
mrc p15, 0, \rx, c1, c0
diff --git a/include/asm-arm/arch-versatile/dma.h b/include/asm-arm/arch-versatile/dma.h
index dcc8ac26eac..64257734862 100644
--- a/include/asm-arm/arch-versatile/dma.h
+++ b/include/asm-arm/arch-versatile/dma.h
@@ -18,10 +18,3 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#define MAX_DMA_ADDRESS 0xffffffff
-#define MAX_DMA_CHANNELS 0
-
-#endif /* _ASM_ARCH_DMA_H */
diff --git a/include/asm-arm/arch-versatile/entry-macro.S b/include/asm-arm/arch-versatile/entry-macro.S
index 90e4e970d25..58f0d71759f 100644
--- a/include/asm-arm/arch-versatile/entry-macro.S
+++ b/include/asm-arm/arch-versatile/entry-macro.S
@@ -7,7 +7,9 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
- .macro disable_fiq
+#include <asm/hardware.h>
+
+ .macro disable_fiq
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index d586f65c822..3d7283d8440 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -175,6 +175,8 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#endif /* __LINUX_ARM_ARCH__ */
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
@@ -205,5 +207,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif
#endif
diff --git a/include/asm-arm/byteorder.h b/include/asm-arm/byteorder.h
index d648a1915c3..17eaf8bdf09 100644
--- a/include/asm-arm/byteorder.h
+++ b/include/asm-arm/byteorder.h
@@ -15,9 +15,32 @@
#ifndef __ASM_ARM_BYTEORDER_H
#define __ASM_ARM_BYTEORDER_H
-
+#include <linux/compiler.h>
#include <asm/types.h>
+static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
+{
+ __u32 t;
+
+ if (__builtin_constant_p(x)) {
+ t = x ^ ((x << 16) | (x >> 16)); /* eor r1,r0,r0,ror #16 */
+ } else {
+ /*
+ * The compiler needs a bit of a hint here to always do the
+ * right thing and not screw it up to different degrees
+ * depending on the gcc version.
+ */
+ asm ("eor\t%0, %1, %1, ror #16" : "=r" (t) : "r" (x));
+ }
+ x = (x << 24) | (x >> 8); /* mov r0,r0,ror #8 */
+ t &= ~0x00FF0000; /* bic r1,r1,#0x00FF0000 */
+ x ^= (t >> 8); /* eor r0,r0,r1,lsr #8 */
+
+ return x;
+}
+
+#define __arch__swab32(x) ___arch__swab32(x)
+
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
# define __SWAB_64_THRU_32__
diff --git a/include/asm-arm/cache.h b/include/asm-arm/cache.h
index 8d161f7c87f..31332c8ac04 100644
--- a/include/asm-arm/cache.h
+++ b/include/asm-arm/cache.h
@@ -7,9 +7,4 @@
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-/*
- * largest L1 which this arch supports
- */
-#define L1_CACHE_SHIFT_MAX 5
-
#endif
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
index e81baff4f54..09e19a783a5 100644
--- a/include/asm-arm/cacheflush.h
+++ b/include/asm-arm/cacheflush.h
@@ -14,7 +14,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
-#include <asm/mman.h>
#include <asm/glue.h>
#include <asm/shmparam.h>
diff --git a/include/asm-arm/dma.h b/include/asm-arm/dma.h
index ef41df43a58..49c01e2bf7c 100644
--- a/include/asm-arm/dma.h
+++ b/include/asm-arm/dma.h
@@ -10,6 +10,13 @@ typedef unsigned int dmach_t;
#include <asm/arch/dma.h>
/*
+ * This is the maximum virtual address which can be DMA'd from.
+ */
+#ifndef MAX_DMA_ADDRESS
+#define MAX_DMA_ADDRESS 0xffffffff
+#endif
+
+/*
* DMA modes
*/
typedef unsigned int dmamode_t;
@@ -91,7 +98,9 @@ extern void set_dma_sg(dmach_t channel, struct scatterlist *sg, int nr_sg);
* especially since some DMA architectures don't update the
* DMA address immediately, but defer it to the enable_dma().
*/
-extern void set_dma_addr(dmach_t channel, unsigned long physaddr);
+extern void __set_dma_addr(dmach_t channel, void *addr);
+#define set_dma_addr(channel, addr) \
+ __set_dma_addr(channel, bus_to_virt(addr))
/* Set the DMA byte count for this channel
*
diff --git a/include/asm-arm/futex.h b/include/asm-arm/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-arm/futex.h
+++ b/include/asm-arm/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-arm/hardware/sharpsl_pm.h b/include/asm-arm/hardware/sharpsl_pm.h
new file mode 100644
index 00000000000..36983e5f366
--- /dev/null
+++ b/include/asm-arm/hardware/sharpsl_pm.h
@@ -0,0 +1,94 @@
+/*
+ * SharpSL Battery/PM Driver
+ *
+ * Copyright (c) 2004-2005 Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+
+struct sharpsl_charger_machinfo {
+ void (*init)(void);
+ void (*exit)(void);
+ int gpio_acin;
+ int gpio_batfull;
+ int gpio_batlock;
+ int gpio_fatal;
+ void (*discharge)(int);
+ void (*discharge1)(int);
+ void (*charge)(int);
+ void (*measure_temp)(int);
+ void (*presuspend)(void);
+ void (*postsuspend)(void);
+ unsigned long (*read_devdata)(int);
+#define SHARPSL_BATT_VOLT 1
+#define SHARPSL_BATT_TEMP 2
+#define SHARPSL_ACIN_VOLT 3
+#define SHARPSL_STATUS_ACIN 4
+#define SHARPSL_STATUS_LOCK 5
+#define SHARPSL_STATUS_CHRGFULL 6
+#define SHARPSL_STATUS_FATAL 7
+ unsigned long (*charger_wakeup)(void);
+ int (*should_wakeup)(unsigned int resume_on_alarm);
+ int bat_levels;
+ struct battery_thresh *bat_levels_noac;
+ struct battery_thresh *bat_levels_acin;
+ int status_high_acin;
+ int status_low_acin;
+ int status_high_noac;
+ int status_low_noac;
+};
+
+struct battery_thresh {
+ int voltage;
+ int percentage;
+};
+
+struct battery_stat {
+ int ac_status; /* APM AC Present/Not Present */
+ int mainbat_status; /* APM Main Battery Status */
+ int mainbat_percent; /* Main Battery Percentage Charge */
+ int mainbat_voltage; /* Main Battery Voltage */
+};
+
+struct sharpsl_pm_status {
+ struct device *dev;
+ struct timer_list ac_timer;
+ struct timer_list chrg_full_timer;
+
+ int charge_mode;
+#define CHRG_ERROR (-1)
+#define CHRG_OFF (0)
+#define CHRG_ON (1)
+#define CHRG_DONE (2)
+
+ unsigned int flags;
+#define SHARPSL_SUSPENDED (1 << 0) /* Device is Suspended */
+#define SHARPSL_ALARM_ACTIVE (1 << 1) /* Alarm is for charging event (not user) */
+#define SHARPSL_BL_LIMIT (1 << 2) /* Backlight Intensity Limited */
+#define SHARPSL_APM_QUEUED (1 << 3) /* APM Event Queued */
+#define SHARPSL_DO_OFFLINE_CHRG (1 << 4) /* Trigger the offline charger */
+
+ int full_count;
+ unsigned long charge_start_time;
+ struct sharpsl_charger_machinfo *machinfo;
+ struct battery_stat battstat;
+};
+
+extern struct sharpsl_pm_status sharpsl_pm;
+
+
+#define SHARPSL_LED_ERROR 2
+#define SHARPSL_LED_ON 1
+#define SHARPSL_LED_OFF 0
+
+void sharpsl_battery_kick(void);
+void sharpsl_pm_led(int val);
+irqreturn_t sharpsl_ac_isr(int irq, void *dev_id, struct pt_regs *fp);
+irqreturn_t sharpsl_chrg_full_isr(int irq, void *dev_id, struct pt_regs *fp);
+irqreturn_t sharpsl_fatal_isr(int irq, void *dev_id, struct pt_regs *fp);
+
diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h
index 0cf4d4f9960..fd0147e52db 100644
--- a/include/asm-arm/io.h
+++ b/include/asm-arm/io.h
@@ -56,7 +56,12 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
/*
* Architecture ioremap implementation.
+ *
+ * __ioremap takes CPU physical address.
+ *
+ * __ioremap_pfn takes a Page Frame Number and an offset into that page
*/
+extern void __iomem * __ioremap_pfn(unsigned long, unsigned long, size_t, unsigned long);
extern void __iomem * __ioremap(unsigned long, size_t, unsigned long);
extern void __iounmap(void __iomem *addr);
@@ -261,6 +266,7 @@ out:
*
* ioremap takes a PCI memory address, as specified in
* Documentation/IO-mapping.txt.
+ *
*/
#ifndef __arch_ioremap
#define ioremap(cookie,size) __ioremap(cookie,size,0)
diff --git a/include/asm-arm/ioctl.h b/include/asm-arm/ioctl.h
index 2cbb7d0e9dc..b279fe06dfe 100644
--- a/include/asm-arm/ioctl.h
+++ b/include/asm-arm/ioctl.h
@@ -1,74 +1 @@
-/*
- * linux/ioctl.h for Linux by H.H. Bergman.
- */
-
-#ifndef _ASMARM_IOCTL_H
-#define _ASMARM_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _ASMARM_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-arm/irq.h b/include/asm-arm/irq.h
index 59975ee43cf..7772432d3fd 100644
--- a/include/asm-arm/irq.h
+++ b/include/asm-arm/irq.h
@@ -25,10 +25,14 @@ extern void disable_irq_nosync(unsigned int);
extern void disable_irq(unsigned int);
extern void enable_irq(unsigned int);
-#define __IRQT_FALEDGE (1 << 0)
-#define __IRQT_RISEDGE (1 << 1)
-#define __IRQT_LOWLVL (1 << 2)
-#define __IRQT_HIGHLVL (1 << 3)
+/*
+ * These correspond with the SA_TRIGGER_* defines, and therefore the
+ * IRQRESOURCE_IRQ_* defines.
+ */
+#define __IRQT_RISEDGE (1 << 0)
+#define __IRQT_FALEDGE (1 << 1)
+#define __IRQT_HIGHLVL (1 << 2)
+#define __IRQT_LOWLVL (1 << 3)
#define IRQT_NOEDGE (0)
#define IRQT_RISING (__IRQT_RISEDGE)
diff --git a/include/asm-arm/mach/dma.h b/include/asm-arm/mach/dma.h
index 31bf716106e..e7c4a20aad5 100644
--- a/include/asm-arm/mach/dma.h
+++ b/include/asm-arm/mach/dma.h
@@ -25,13 +25,15 @@ struct dma_ops {
};
struct dma_struct {
+ void *addr; /* single DMA address */
+ unsigned long count; /* single DMA size */
struct scatterlist buf; /* single DMA */
int sgcount; /* number of DMA SG */
struct scatterlist *sg; /* DMA Scatter-Gather List */
unsigned int active:1; /* Transfer active */
unsigned int invalid:1; /* Address/Count changed */
- unsigned int using_sg:1; /* using scatter list? */
+
dmamode_t dma_mode; /* DMA mode */
int speed; /* DMA speed */
diff --git a/include/asm-arm/mach/map.h b/include/asm-arm/mach/map.h
index b338936bde4..3351b77fab3 100644
--- a/include/asm-arm/mach/map.h
+++ b/include/asm-arm/mach/map.h
@@ -27,9 +27,6 @@ struct meminfo;
#define MT_ROM 6
#define MT_IXP2000_DEVICE 7
-#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
-#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
-
extern void create_memmap_holes(struct meminfo *);
extern void memtable_init(struct meminfo *);
extern void iotable_init(struct map_desc *, int);
diff --git a/include/asm-arm/mach/serial_at91rm9200.h b/include/asm-arm/mach/serial_at91rm9200.h
new file mode 100644
index 00000000000..98f4b0cb883
--- /dev/null
+++ b/include/asm-arm/mach/serial_at91rm9200.h
@@ -0,0 +1,36 @@
+/*
+ * linux/include/asm-arm/mach/serial_at91rm9200.h
+ *
+ * Based on serial_sa1100.h by Nicolas Pitre
+ *
+ * Copyright (C) 2002 ATMEL Rousset
+ *
+ * Low level machine dependent UART functions.
+ */
+#include <linux/config.h>
+
+struct uart_port;
+
+/*
+ * This is a temporary structure for registering these
+ * functions; it is intended to be discarded after boot.
+ */
+struct at91rm9200_port_fns {
+ void (*set_mctrl)(struct uart_port *, u_int);
+ u_int (*get_mctrl)(struct uart_port *);
+ void (*enable_ms)(struct uart_port *);
+ void (*pm)(struct uart_port *, u_int, u_int);
+ int (*set_wake)(struct uart_port *, u_int);
+ int (*open)(struct uart_port *);
+ void (*close)(struct uart_port *);
+};
+
+#if defined(CONFIG_SERIAL_AT91)
+void at91_register_uart_fns(struct at91rm9200_port_fns *fns);
+void at91_register_uart(int idx, int port);
+#else
+#define at91_register_uart_fns(fns) do { } while (0)
+#define at91_register_uart(idx,port) do { } while (0)
+#endif
+
+
diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h
index 3e572364ee7..b4e1146ab68 100644
--- a/include/asm-arm/memory.h
+++ b/include/asm-arm/memory.h
@@ -25,6 +25,7 @@
#include <linux/config.h>
#include <linux/compiler.h>
#include <asm/arch/memory.h>
+#include <asm/sizes.h>
#ifndef TASK_SIZE
/*
@@ -48,6 +49,14 @@
#endif
/*
+ * Size of DMA-consistent memory region. Must be multiple of 2M,
+ * between 2MB and 14MB inclusive.
+ */
+#ifndef CONSISTENT_DMA_SIZE
+#define CONSISTENT_DMA_SIZE SZ_2M
+#endif
+
+/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
@@ -58,6 +67,12 @@
#endif
/*
+ * Convert a physical address to a Page Frame Number and back
+ */
+#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
+#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+
+/*
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/
diff --git a/include/asm-arm/mman.h b/include/asm-arm/mman.h
index 8e4f69c4fa5..f0bebca2ac2 100644
--- a/include/asm-arm/mman.h
+++ b/include/asm-arm/mman.h
@@ -35,6 +35,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-arm/mutex.h b/include/asm-arm/mutex.h
new file mode 100644
index 00000000000..6caa59f1f59
--- /dev/null
+++ b/include/asm-arm/mutex.h
@@ -0,0 +1,128 @@
+/*
+ * include/asm-arm/mutex.h
+ *
+ * ARM optimized mutex locking primitives
+ *
+ * Please look into asm-generic/mutex-xchg.h for a formal definition.
+ */
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+#if __LINUX_ARM_ARCH__ < 6
+/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
+# include <asm-generic/mutex-xchg.h>
+#else
+
+/*
+ * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
+ * atomic decrement (it is not a reliable atomic decrement but it satisfies
+ * the defined semantics for our purpose, while being smaller and faster
+ * than a real atomic decrement or atomic swap. The idea is to attempt
+ * decrementing the lock value only once. If once decremented it isn't zero,
+ * or if its store-back fails due to a dispute on the exclusive store, we
+ * simply bail out immediately through the slow path where the lock will be
+ * reattempted until it succeeds.
+ */
+#define __mutex_fastpath_lock(count, fail_fn) \
+do { \
+ int __ex_flag, __res; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ ( \
+ "ldrex %0, [%2] \n" \
+ "sub %0, %0, #1 \n" \
+ "strex %1, %0, [%2] \n" \
+ \
+ : "=&r" (__res), "=&r" (__ex_flag) \
+ : "r" (&(count)->counter) \
+ : "cc","memory" ); \
+ \
+ if (unlikely(__res || __ex_flag)) \
+ fail_fn(count); \
+} while (0)
+
+#define __mutex_fastpath_lock_retval(count, fail_fn) \
+({ \
+ int __ex_flag, __res; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(fastcall int (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ ( \
+ "ldrex %0, [%2] \n" \
+ "sub %0, %0, #1 \n" \
+ "strex %1, %0, [%2] \n" \
+ \
+ : "=&r" (__res), "=&r" (__ex_flag) \
+ : "r" (&(count)->counter) \
+ : "cc","memory" ); \
+ \
+ __res |= __ex_flag; \
+ if (unlikely(__res != 0)) \
+ __res = fail_fn(count); \
+ __res; \
+})
+
+/*
+ * Same trick is used for the unlock fast path. However the original value,
+ * rather than the result, is used to test for success in order to have
+ * better generated assembly.
+ */
+#define __mutex_fastpath_unlock(count, fail_fn) \
+do { \
+ int __ex_flag, __res, __orig; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ ( \
+ "ldrex %0, [%3] \n" \
+ "add %1, %0, #1 \n" \
+ "strex %2, %1, [%3] \n" \
+ \
+ : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) \
+ : "r" (&(count)->counter) \
+ : "cc","memory" ); \
+ \
+ if (unlikely(__orig || __ex_flag)) \
+ fail_fn(count); \
+} while (0)
+
+/*
+ * If the unlock was done on a contended lock, or if the unlock simply fails
+ * then the mutex remains locked.
+ */
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/*
+ * For __mutex_fastpath_trylock we use another construct which could be
+ * described as a "single value cmpxchg".
+ *
+ * This provides the needed trylock semantics like cmpxchg would, but it is
+ * lighter and less generic than a true cmpxchg implementation.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ int __ex_flag, __res, __orig;
+
+ __asm__ (
+
+ "1: ldrex %0, [%3] \n"
+ "subs %1, %0, #1 \n"
+ "strexeq %2, %1, [%3] \n"
+ "movlt %0, #0 \n"
+ "cmpeq %2, #0 \n"
+ "bgt 1b \n"
+
+ : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+ : "r" (&count->counter)
+ : "cc", "memory" );
+
+ return __orig;
+}
+
+#endif
+#endif
diff --git a/include/asm-arm/processor.h b/include/asm-arm/processor.h
index 7d4118e0905..31290694648 100644
--- a/include/asm-arm/processor.h
+++ b/include/asm-arm/processor.h
@@ -85,9 +85,11 @@ unsigned long get_wchan(struct task_struct *p);
*/
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-#define KSTK_REGS(tsk) (((struct pt_regs *)(THREAD_START_SP + (unsigned long)(tsk)->thread_info)) - 1)
-#define KSTK_EIP(tsk) KSTK_REGS(tsk)->ARM_pc
-#define KSTK_ESP(tsk) KSTK_REGS(tsk)->ARM_sp
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
+
+#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
+#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
/*
* Prefetching support - only ARMv5.
diff --git a/include/asm-arm/scatterlist.h b/include/asm-arm/scatterlist.h
index 83b876fb04c..de2f65eb42e 100644
--- a/include/asm-arm/scatterlist.h
+++ b/include/asm-arm/scatterlist.h
@@ -9,7 +9,6 @@ struct scatterlist {
unsigned int offset; /* buffer offset */
dma_addr_t dma_address; /* dma address */
unsigned int length; /* length */
- char *__address; /* for set_dma_addr */
};
/*
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 5621d61ebc0..eb2de8c1051 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -168,10 +168,20 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
- last = __switch_to(prev,prev->thread_info,next->thread_info); \
+ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
* CPU interrupt mask handling.
*/
#if __LINUX_ARM_ARCH__ >= 6
diff --git a/include/asm-arm/thread_info.h b/include/asm-arm/thread_info.h
index 7c98557b717..33a33cbb632 100644
--- a/include/asm-arm/thread_info.h
+++ b/include/asm-arm/thread_info.h
@@ -96,13 +96,10 @@ static inline struct thread_info *current_thread_info(void)
extern struct thread_info *alloc_thread_info(struct task_struct *task);
extern void free_thread_info(struct thread_info *);
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
#define thread_saved_pc(tsk) \
- ((unsigned long)(pc_pointer((tsk)->thread_info->cpu_context.pc)))
+ ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
#define thread_saved_fp(tsk) \
- ((unsigned long)((tsk)->thread_info->cpu_context.fp))
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
extern void iwmmxt_task_disable(struct thread_info *);
extern void iwmmxt_task_copy(struct thread_info *, void *);
diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h
index a47cadc5968..1552c865399 100644
--- a/include/asm-arm26/atomic.h
+++ b/include/asm-arm26/atomic.h
@@ -76,6 +76,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
@@ -118,5 +120,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif
#endif
diff --git a/include/asm-arm26/futex.h b/include/asm-arm26/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-arm26/futex.h
+++ b/include/asm-arm26/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-arm26/ioctl.h b/include/asm-arm26/ioctl.h
index 2cbb7d0e9dc..b279fe06dfe 100644
--- a/include/asm-arm26/ioctl.h
+++ b/include/asm-arm26/ioctl.h
@@ -1,74 +1 @@
-/*
- * linux/ioctl.h for Linux by H.H. Bergman.
- */
-
-#ifndef _ASMARM_IOCTL_H
-#define _ASMARM_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _ASMARM_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-arm26/mman.h b/include/asm-arm26/mman.h
index cc27b824026..0ed7780541f 100644
--- a/include/asm-arm26/mman.h
+++ b/include/asm-arm26/mman.h
@@ -35,6 +35,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index f23fac1938f..ca4ccfc4b57 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -111,10 +111,20 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
- last = __switch_to(prev,prev->thread_info,next->thread_info); \
+ last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \
} while (0)
/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
* Save the current interrupt enable state & disable IRQs
*/
#define local_irq_save(x) \
diff --git a/include/asm-arm26/thread_info.h b/include/asm-arm26/thread_info.h
index aff3e5699c6..a65e58a0a76 100644
--- a/include/asm-arm26/thread_info.h
+++ b/include/asm-arm26/thread_info.h
@@ -82,18 +82,15 @@ static inline struct thread_info *current_thread_info(void)
/* FIXME - PAGE_SIZE < 32K */
#define THREAD_SIZE (8*32768) // FIXME - this needs attention (see kernel/fork.c which gets a nice div by zero if this is lower than 8*32768
-#define __get_user_regs(x) (((struct pt_regs *)((unsigned long)(x) + THREAD_SIZE - 8)) - 1)
+#define task_pt_regs(task) ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE - 8) - 1)
extern struct thread_info *alloc_thread_info(struct task_struct *task);
extern void free_thread_info(struct thread_info *);
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
#define thread_saved_pc(tsk) \
- ((unsigned long)(pc_pointer((tsk)->thread_info->cpu_context.pc)))
+ ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
#define thread_saved_fp(tsk) \
- ((unsigned long)((tsk)->thread_info->cpu_context.fp))
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
#else /* !__ASSEMBLY__ */
diff --git a/include/asm-cris/arch-v10/cache.h b/include/asm-cris/arch-v10/cache.h
index 1d1d1ba65b1..aea27184d2d 100644
--- a/include/asm-cris/arch-v10/cache.h
+++ b/include/asm-cris/arch-v10/cache.h
@@ -4,6 +4,5 @@
/* Etrax 100LX have 32-byte cache-lines. */
#define L1_CACHE_BYTES 32
#define L1_CACHE_SHIFT 5
-#define L1_CACHE_SHIFT_MAX 5
#endif /* _ASM_ARCH_CACHE_H */
diff --git a/include/asm-cris/arch-v10/processor.h b/include/asm-cris/arch-v10/processor.h
index e23df8dc96e..cc692c7a066 100644
--- a/include/asm-cris/arch-v10/processor.h
+++ b/include/asm-cris/arch-v10/processor.h
@@ -40,7 +40,7 @@ struct thread_struct {
#define KSTK_EIP(tsk) \
({ \
unsigned long eip = 0; \
- unsigned long regs = (unsigned long)user_regs(tsk); \
+ unsigned long regs = (unsigned long)task_pt_regs(tsk); \
if (regs > PAGE_SIZE && \
virt_addr_valid(regs)) \
eip = ((struct pt_regs *)regs)->irp; \
diff --git a/include/asm-cris/arch-v32/cache.h b/include/asm-cris/arch-v32/cache.h
index 4fed8d62ccc..80b236b1531 100644
--- a/include/asm-cris/arch-v32/cache.h
+++ b/include/asm-cris/arch-v32/cache.h
@@ -4,6 +4,5 @@
/* A cache-line is 32 bytes. */
#define L1_CACHE_BYTES 32
#define L1_CACHE_SHIFT 5
-#define L1_CACHE_SHIFT_MAX 5
#endif /* _ASM_CRIS_ARCH_CACHE_H */
diff --git a/include/asm-cris/arch-v32/processor.h b/include/asm-cris/arch-v32/processor.h
index 8c939bf2798..32bf2e538ce 100644
--- a/include/asm-cris/arch-v32/processor.h
+++ b/include/asm-cris/arch-v32/processor.h
@@ -36,7 +36,7 @@ struct thread_struct {
#define KSTK_EIP(tsk) \
({ \
unsigned long eip = 0; \
- unsigned long regs = (unsigned long)user_regs(tsk); \
+ unsigned long regs = (unsigned long)task_pt_regs(tsk); \
if (regs > PAGE_SIZE && virt_addr_valid(regs)) \
eip = ((struct pt_regs *)regs)->erp; \
eip; \
diff --git a/include/asm-cris/atomic.h b/include/asm-cris/atomic.h
index 683b05a57d8..0b51a87e553 100644
--- a/include/asm-cris/atomic.h
+++ b/include/asm-cris/atomic.h
@@ -136,6 +136,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
@@ -156,4 +158,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h
index 8eff51349ae..cbf1a98f012 100644
--- a/include/asm-cris/dma-mapping.h
+++ b/include/asm-cris/dma-mapping.h
@@ -153,7 +153,7 @@ dma_set_mask(struct device *dev, u64 mask)
static inline int
dma_get_cache_alignment(void)
{
- return (1 << L1_CACHE_SHIFT_MAX);
+ return (1 << INTERNODE_CACHE_SHIFT);
}
#define dma_is_consistent(d) (1)
diff --git a/include/asm-cris/futex.h b/include/asm-cris/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-cris/futex.h
+++ b/include/asm-cris/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-cris/ioctl.h b/include/asm-cris/ioctl.h
index be2d8f667a3..b279fe06dfe 100644
--- a/include/asm-cris/ioctl.h
+++ b/include/asm-cris/ioctl.h
@@ -1,83 +1 @@
-/*
- * linux/ioctl.h for Linux by H.H. Bergman.
- *
- * This is the same as the i386 version.
- */
-
-#ifndef _ASMCRIS_IOCTL_H
-#define _ASMCRIS_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* provoke compile error for invalid uses of size argument */
-extern int __invalid_size_argument_for_IOC;
-#define _IOC_TYPECHECK(t) \
- ((sizeof(t) == sizeof(t[1]) && \
- sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
- sizeof(t) : __invalid_size_argument_for_IOC)
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _ASMCRIS_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-cris/mman.h b/include/asm-cris/mman.h
index 8570e72b950..5a382b8bf3f 100644
--- a/include/asm-cris/mman.h
+++ b/include/asm-cris/mman.h
@@ -37,6 +37,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-cris/mutex.h b/include/asm-cris/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-cris/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-cris/processor.h b/include/asm-cris/processor.h
index dce41009eeb..961e2bceadb 100644
--- a/include/asm-cris/processor.h
+++ b/include/asm-cris/processor.h
@@ -45,7 +45,8 @@ struct task_struct;
* Dito but for the currently running task
*/
-#define current_regs() user_regs(current->thread_info)
+#define task_pt_regs(task) user_regs(task_thread_info(task))
+#define current_regs() task_pt_regs(current)
static inline void prepare_to_copy(struct task_struct *tsk)
{
diff --git a/include/asm-cris/thread_info.h b/include/asm-cris/thread_info.h
index cef0140fc10..7ad853c3f74 100644
--- a/include/asm-cris/thread_info.h
+++ b/include/asm-cris/thread_info.h
@@ -69,8 +69,6 @@ struct thread_info {
/* thread information allocation */
#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index f6539ff569c..a59f684b4f3 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -218,51 +218,12 @@ extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsig
__typeof__(*(ptr)) __xg_orig; \
\
switch (sizeof(__xg_orig)) { \
- case 1: \
- asm volatile( \
- "0: \n" \
- " orcc gr0,gr0,gr0,icc3 \n" \
- " ckeq icc3,cc7 \n" \
- " ldub.p %M0,%1 \n" \
- " orcr cc7,cc7,cc3 \n" \
- " cstb.p %2,%M0 ,cc3,#1 \n" \
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
- " beq icc3,#0,0b \n" \
- : "+U"(*__xg_ptr), "=&r"(__xg_orig) \
- : "r"(x) \
- : "memory", "cc7", "cc3", "icc3" \
- ); \
- break; \
- \
- case 2: \
- asm volatile( \
- "0: \n" \
- " orcc gr0,gr0,gr0,icc3 \n" \
- " ckeq icc3,cc7 \n" \
- " lduh.p %M0,%1 \n" \
- " orcr cc7,cc7,cc3 \n" \
- " csth.p %2,%M0 ,cc3,#1 \n" \
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
- " beq icc3,#0,0b \n" \
- : "+U"(*__xg_ptr), "=&r"(__xg_orig) \
- : "r"(x) \
- : "memory", "cc7", "cc3", "icc3" \
- ); \
- break; \
- \
case 4: \
asm volatile( \
- "0: \n" \
- " orcc gr0,gr0,gr0,icc3 \n" \
- " ckeq icc3,cc7 \n" \
- " ld.p %M0,%1 \n" \
- " orcr cc7,cc7,cc3 \n" \
- " cst.p %2,%M0 ,cc3,#1 \n" \
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
- " beq icc3,#0,0b \n" \
- : "+U"(*__xg_ptr), "=&r"(__xg_orig) \
+ "swap%I0 %2,%M0" \
+ : "+m"(*__xg_ptr), "=&r"(__xg_orig) \
: "r"(x) \
- : "memory", "cc7", "cc3", "icc3" \
+ : "memory" \
); \
break; \
\
@@ -277,8 +238,6 @@ extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsig
#else
-extern uint8_t __xchg_8 (uint8_t i, volatile void *v);
-extern uint16_t __xchg_16(uint16_t i, volatile void *v);
extern uint32_t __xchg_32(uint32_t i, volatile void *v);
#define xchg(ptr, x) \
@@ -287,8 +246,6 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
__typeof__(*(ptr)) __xg_orig; \
\
switch (sizeof(__xg_orig)) { \
- case 1: __xg_orig = (__typeof__(*(ptr))) __xchg_8 ((uint8_t) x, __xg_ptr); break; \
- case 2: __xg_orig = (__typeof__(*(ptr))) __xchg_16((uint16_t) x, __xg_ptr); break; \
case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr); break; \
default: \
__xg_orig = 0; \
@@ -318,46 +275,6 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
__typeof__(*(ptr)) __xg_new = (new); \
\
switch (sizeof(__xg_orig)) { \
- case 1: \
- asm volatile( \
- "0: \n" \
- " orcc gr0,gr0,gr0,icc3 \n" \
- " ckeq icc3,cc7 \n" \
- " ldub.p %M0,%1 \n" \
- " orcr cc7,cc7,cc3 \n" \
- " sub%I4 %1,%4,%2 \n" \
- " sllcc %2,#24,gr0,icc0 \n" \
- " bne icc0,#0,1f \n" \
- " cstb.p %3,%M0 ,cc3,#1 \n" \
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
- " beq icc3,#0,0b \n" \
- "1: \n" \
- : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
- : "r"(__xg_new), "NPr"(__xg_test) \
- : "memory", "cc7", "cc3", "icc3", "icc0" \
- ); \
- break; \
- \
- case 2: \
- asm volatile( \
- "0: \n" \
- " orcc gr0,gr0,gr0,icc3 \n" \
- " ckeq icc3,cc7 \n" \
- " lduh.p %M0,%1 \n" \
- " orcr cc7,cc7,cc3 \n" \
- " sub%I4 %1,%4,%2 \n" \
- " sllcc %2,#16,gr0,icc0 \n" \
- " bne icc0,#0,1f \n" \
- " csth.p %3,%M0 ,cc3,#1 \n" \
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
- " beq icc3,#0,0b \n" \
- "1: \n" \
- : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
- : "r"(__xg_new), "NPr"(__xg_test) \
- : "memory", "cc7", "cc3", "icc3", "icc0" \
- ); \
- break; \
- \
case 4: \
asm volatile( \
"0: \n" \
@@ -388,8 +305,6 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
#else
-extern uint8_t __cmpxchg_8 (uint8_t *v, uint8_t test, uint8_t new);
-extern uint16_t __cmpxchg_16(uint16_t *v, uint16_t test, uint16_t new);
extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
#define cmpxchg(ptr, test, new) \
@@ -400,8 +315,6 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
__typeof__(*(ptr)) __xg_new = (new); \
\
switch (sizeof(__xg_orig)) { \
- case 1: __xg_orig = __cmpxchg_8 (__xg_ptr, __xg_test, __xg_new); break; \
- case 2: __xg_orig = __cmpxchg_16(__xg_ptr, __xg_test, __xg_new); break; \
case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
default: \
__xg_orig = 0; \
@@ -414,7 +327,8 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
#endif
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
@@ -424,6 +338,8 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
c = old; \
c != (u); \
})
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#include <asm-generic/atomic.h>
#endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-frv/bug.h b/include/asm-frv/bug.h
index 074c0d5770e..451712cc306 100644
--- a/include/asm-frv/bug.h
+++ b/include/asm-frv/bug.h
@@ -12,6 +12,7 @@
#define _ASM_BUG_H
#include <linux/config.h>
+#include <linux/linkage.h>
#ifdef CONFIG_BUG
/*
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h
index 5003e017fd1..e9fc1d47797 100644
--- a/include/asm-frv/dma-mapping.h
+++ b/include/asm-frv/dma-mapping.h
@@ -23,7 +23,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t
* returns, or alternatively stop on the first sg_dma_len(sg) which
* is 0.
*/
-#define sg_dma_address(sg) ((unsigned long) (page_to_phys((sg)->page) + (sg)->offset))
+#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
/*
diff --git a/include/asm-frv/futex.h b/include/asm-frv/futex.h
index 9feff4ce142..fca9d90e32c 100644
--- a/include/asm-frv/futex.h
+++ b/include/asm-frv/futex.h
@@ -7,47 +7,7 @@
#include <asm/errno.h>
#include <asm/uaccess.h>
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
+extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
#endif
#endif
diff --git a/include/asm-frv/io.h b/include/asm-frv/io.h
index 48829f72724..075369b1a34 100644
--- a/include/asm-frv/io.h
+++ b/include/asm-frv/io.h
@@ -18,6 +18,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
+#include <linux/types.h>
#include <asm/virtconvert.h>
#include <asm/string.h>
#include <asm/mb-regs.h>
@@ -104,6 +105,8 @@ static inline void __insl(unsigned long addr, void *buf, int len, int swap)
__insl_sw(addr, buf, len);
}
+#define mmiowb() mb()
+
/*
* make the short names macros so specific devices
* can override them as required
@@ -209,6 +212,10 @@ static inline uint32_t readl(const volatile void __iomem *addr)
return ret;
}
+#define readb_relaxed readb
+#define readw_relaxed readw
+#define readl_relaxed readl
+
static inline void writeb(uint8_t datum, volatile void __iomem *addr)
{
__builtin_write8((volatile uint8_t __force *) addr, datum);
@@ -268,11 +275,106 @@ static inline void __iomem *ioremap_fullcache(unsigned long physaddr, unsigned l
extern void iounmap(void __iomem *addr);
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return (void __iomem *) port;
+}
+
+static inline void ioport_unmap(void __iomem *p)
+{
+}
+
static inline void flush_write_buffers(void)
{
__asm__ __volatile__ ("membar" : : :"memory");
}
+/*
+ * do appropriate I/O accesses for token type
+ */
+static inline unsigned int ioread8(void __iomem *p)
+{
+ return __builtin_read8(p);
+}
+
+static inline unsigned int ioread16(void __iomem *p)
+{
+ uint16_t ret = __builtin_read16(p);
+ if (__is_PCI_addr(p))
+ ret = _swapw(ret);
+ return ret;
+}
+
+static inline unsigned int ioread32(void __iomem *p)
+{
+ uint32_t ret = __builtin_read32(p);
+ if (__is_PCI_addr(p))
+ ret = _swapl(ret);
+ return ret;
+}
+
+static inline void iowrite8(u8 val, void __iomem *p)
+{
+ __builtin_write8(p, val);
+ if (__is_PCI_MEM(p))
+ __flush_PCI_writes();
+}
+
+static inline void iowrite16(u16 val, void __iomem *p)
+{
+ if (__is_PCI_addr(p))
+ val = _swapw(val);
+ __builtin_write16(p, val);
+ if (__is_PCI_MEM(p))
+ __flush_PCI_writes();
+}
+
+static inline void iowrite32(u32 val, void __iomem *p)
+{
+ if (__is_PCI_addr(p))
+ val = _swapl(val);
+ __builtin_write32(p, val);
+ if (__is_PCI_MEM(p))
+ __flush_PCI_writes();
+}
+
+static inline void ioread8_rep(void __iomem *p, void *dst, unsigned long count)
+{
+ io_insb((unsigned long) p, dst, count);
+}
+
+static inline void ioread16_rep(void __iomem *p, void *dst, unsigned long count)
+{
+ io_insw((unsigned long) p, dst, count);
+}
+
+static inline void ioread32_rep(void __iomem *p, void *dst, unsigned long count)
+{
+ __insl_ns((unsigned long) p, dst, count);
+}
+
+static inline void iowrite8_rep(void __iomem *p, const void *src, unsigned long count)
+{
+ io_outsb((unsigned long) p, src, count);
+}
+
+static inline void iowrite16_rep(void __iomem *p, const void *src, unsigned long count)
+{
+ io_outsw((unsigned long) p, src, count);
+}
+
+static inline void iowrite32_rep(void __iomem *p, const void *src, unsigned long count)
+{
+ __outsl_ns((unsigned long) p, src, count);
+}
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+}
+
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
@@ -285,6 +387,27 @@ static inline void flush_write_buffers(void)
*/
#define xlate_dev_kmem_ptr(p) p
+/*
+ * Check BIOS signature
+ */
+static inline int check_signature(volatile void __iomem *io_addr,
+ const unsigned char *signature, int length)
+{
+ int retval = 0;
+
+ do {
+ if (readb(io_addr) != *signature)
+ goto out;
+ io_addr++;
+ signature++;
+ length--;
+ } while (length);
+
+ retval = 1;
+out:
+ return retval;
+}
+
#endif /* __KERNEL__ */
#endif /* _ASM_IO_H */
diff --git a/include/asm-frv/ioctl.h b/include/asm-frv/ioctl.h
index 8aee7690554..b279fe06dfe 100644
--- a/include/asm-frv/ioctl.h
+++ b/include/asm-frv/ioctl.h
@@ -1,80 +1 @@
-/*
- * linux/ioctl.h for Linux by H.H. Bergman.
- */
-
-#ifndef _ASM_IOCTL_H
-#define _ASM_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * I don't really have any idea about what this should look like, so
- * for the time being, this is heavily based on the PC definitions.
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _ASM_IOCTL_H */
-
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-frv/mb-regs.h b/include/asm-frv/mb-regs.h
index c8f575fc42f..93fa732fb0c 100644
--- a/include/asm-frv/mb-regs.h
+++ b/include/asm-frv/mb-regs.h
@@ -68,6 +68,9 @@ do { \
#define __is_PCI_MEM(addr) \
((unsigned long)(addr) - __region_PCI_MEM < 0x08000000UL)
+#define __is_PCI_addr(addr) \
+ ((unsigned long)(addr) - __region_PCI_IO < 0x0c000000UL)
+
#define __get_CLKSW() ({ *(volatile unsigned long *)(__region_CS2 + 0x0130000cUL) & 0xffUL; })
#define __get_CLKIN() (__get_CLKSW() * 125U * 100000U / 24U)
@@ -149,6 +152,7 @@ do { \
#define __is_PCI_IO(addr) 0 /* no PCI */
#define __is_PCI_MEM(addr) 0
+#define __is_PCI_addr(addr) 0
#define __region_PCI_IO 0
#define __region_PCI_MEM 0
#define __flush_PCI_writes() do { } while(0)
diff --git a/include/asm-frv/mc146818rtc.h b/include/asm-frv/mc146818rtc.h
new file mode 100644
index 00000000000..90dfb7a633d
--- /dev/null
+++ b/include/asm-frv/mc146818rtc.h
@@ -0,0 +1,16 @@
+/* mc146818rtc.h: RTC defs
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_MC146818RTC_H
+#define _ASM_MC146818RTC_H
+
+
+#endif /* _ASM_MC146818RTC_H */
diff --git a/include/asm-frv/mman.h b/include/asm-frv/mman.h
index c684720dfbd..8af4a41c255 100644
--- a/include/asm-frv/mman.h
+++ b/include/asm-frv/mman.h
@@ -35,6 +35,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-frv/module.h b/include/asm-frv/module.h
index 3223cfaef74..3d5c6360289 100644
--- a/include/asm-frv/module.h
+++ b/include/asm-frv/module.h
@@ -11,10 +11,18 @@
#ifndef _ASM_MODULE_H
#define _ASM_MODULE_H
-#define module_map(x) vmalloc(x)
-#define module_unmap(x) vfree(x)
-#define module_arch_init(x) (0)
-#define arch_init_modules(x) do { } while (0)
+struct mod_arch_specific
+{
+};
+
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Ehdr Elf32_Ehdr
+
+/*
+ * Include the architecture version.
+ */
+#define MODULE_ARCH_VERMAGIC __stringify(PROCESSOR_MODEL_NAME) " "
#endif /* _ASM_MODULE_H */
diff --git a/include/asm-frv/mutex.h b/include/asm-frv/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-frv/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-frv/pci.h b/include/asm-frv/pci.h
index 1168451c275..598b0c6b695 100644
--- a/include/asm-frv/pci.h
+++ b/include/asm-frv/pci.h
@@ -57,6 +57,14 @@ extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
*/
#define PCI_DMA_BUS_IS_PHYS (1)
+/* pci_unmap_{page,single} is a nop so... */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define pci_unmap_len(PTR, LEN_NAME) (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index 844666377dc..d1c3b182c69 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -421,6 +421,11 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
}
/*
+ * Macro to mark a page protection value as "uncacheable"
+ */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE))
+
+/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
diff --git a/include/asm-frv/signal.h b/include/asm-frv/signal.h
index d407bde57ec..67366894780 100644
--- a/include/asm-frv/signal.h
+++ b/include/asm-frv/signal.h
@@ -151,7 +151,6 @@ typedef struct sigaltstack {
size_t ss_size;
} stack_t;
-extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#ifdef __KERNEL__
diff --git a/include/asm-frv/thread_info.h b/include/asm-frv/thread_info.h
index 60f6b2aee76..a5576e02dd1 100644
--- a/include/asm-frv/thread_info.h
+++ b/include/asm-frv/thread_info.h
@@ -110,8 +110,6 @@ register struct thread_info *__current_thread_info asm("gr15");
#endif
#define free_thread_info(info) kfree(info)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#else /* !__ASSEMBLY__ */
diff --git a/include/asm-frv/types.h b/include/asm-frv/types.h
index 50605df6d8a..2560f596a75 100644
--- a/include/asm-frv/types.h
+++ b/include/asm-frv/types.h
@@ -59,7 +59,6 @@ typedef unsigned int u32;
typedef signed long long s64;
typedef unsigned long long u64;
-typedef u64 u_quad_t;
/* Dma addresses are 32-bits wide. */
diff --git a/include/asm-frv/uaccess.h b/include/asm-frv/uaccess.h
index 991b50fbba2..b6bcbe01f6e 100644
--- a/include/asm-frv/uaccess.h
+++ b/include/asm-frv/uaccess.h
@@ -180,16 +180,16 @@ do { \
\
switch (sizeof(*(ptr))) { \
case 1: \
- __get_user_asm(__gu_err, __gu_val, ptr, "ub", "=r"); \
+ __get_user_asm(__gu_err, *(u8*)&__gu_val, ptr, "ub", "=r"); \
break; \
case 2: \
- __get_user_asm(__gu_err, __gu_val, ptr, "uh", "=r"); \
+ __get_user_asm(__gu_err, *(u16*)&__gu_val, ptr, "uh", "=r"); \
break; \
case 4: \
- __get_user_asm(__gu_err, __gu_val, ptr, "", "=r"); \
+ __get_user_asm(__gu_err, *(u32*)&__gu_val, ptr, "", "=r"); \
break; \
case 8: \
- __get_user_asm(__gu_err, __gu_val, ptr, "d", "=e"); \
+ __get_user_asm(__gu_err, *(u64*)&__gu_val, ptr, "d", "=e"); \
break; \
default: \
__gu_err = __get_user_bad(); \
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h
index 5cf989b448d..cde376a7a85 100644
--- a/include/asm-frv/unistd.h
+++ b/include/asm-frv/unistd.h
@@ -313,7 +313,7 @@ do { \
unsigned long __sr2 = (res); \
if (__builtin_expect(__sr2 >= (unsigned long)(-4095), 0)) { \
errno = (-__sr2); \
- __sr2 = ULONG_MAX; \
+ __sr2 = ~0UL; \
} \
return (type) __sr2; \
} while (0)
diff --git a/include/asm-frv/vga.h b/include/asm-frv/vga.h
new file mode 100644
index 00000000000..a702c800a22
--- /dev/null
+++ b/include/asm-frv/vga.h
@@ -0,0 +1,17 @@
+/* vga.h: VGA register stuff
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_VGA_H
+#define _ASM_VGA_H
+
+
+
+#endif /* _ASM_VGA_H */
diff --git a/include/asm-frv/xor.h b/include/asm-frv/xor.h
new file mode 100644
index 00000000000..c82eb12a5b1
--- /dev/null
+++ b/include/asm-frv/xor.h
@@ -0,0 +1 @@
+#include <asm-generic/xor.h>
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
new file mode 100644
index 00000000000..0fada8f16dc
--- /dev/null
+++ b/include/asm-generic/atomic.h
@@ -0,0 +1,117 @@
+#ifndef _ASM_GENERIC_ATOMIC_H
+#define _ASM_GENERIC_ATOMIC_H
+/*
+ * Copyright (C) 2005 Silicon Graphics, Inc.
+ * Christoph Lameter <clameter@sgi.com>
+ *
+ * Allows to provide arch independent atomic definitions without the need to
+ * edit all arch specific atomic.h files.
+ */
+
+#include <asm/types.h>
+
+/*
+ * Suppport for atomic_long_t
+ *
+ * Casts for parameters are avoided for existing atomic functions in order to
+ * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
+ * macros of a platform may have.
+ */
+
+#if BITS_PER_LONG == 64
+
+typedef atomic64_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+
+static inline long atomic_long_read(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_read(v);
+}
+
+static inline void atomic_long_set(atomic_long_t *l, long i)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic_set(v, i);
+}
+
+static inline void atomic_long_inc(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_inc(v);
+}
+
+static inline void atomic_long_dec(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_dec(v);
+}
+
+static inline void atomic_long_add(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_add(i, v);
+}
+
+static inline void atomic_long_sub(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_sub(i, v);
+}
+
+#else
+
+typedef atomic_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+static inline long atomic_long_read(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_read(v);
+}
+
+static inline void atomic_long_set(atomic_long_t *l, long i)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_set(v, i);
+}
+
+static inline void atomic_long_inc(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_inc(v);
+}
+
+static inline void atomic_long_dec(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_dec(v);
+}
+
+static inline void atomic_long_add(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_add(i, v);
+}
+
+static inline void atomic_long_sub(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_sub(i, v);
+}
+
+#endif
+#endif
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index 747d790295f..1b356207712 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -274,7 +274,7 @@ dma_get_cache_alignment(void)
{
/* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe */
- return (1 << L1_CACHE_SHIFT_MAX);
+ return (1 << INTERNODE_CACHE_SHIFT);
}
static inline void
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
new file mode 100644
index 00000000000..3ae2c734754
--- /dev/null
+++ b/include/asm-generic/futex.h
@@ -0,0 +1,53 @@
+#ifndef _ASM_GENERIC_FUTEX_H
+#define _ASM_GENERIC_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <asm/errno.h>
+#include <asm/uaccess.h>
+
+static inline int
+futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ inc_preempt_count();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ case FUTEX_OP_ADD:
+ case FUTEX_OP_OR:
+ case FUTEX_OP_ANDN:
+ case FUTEX_OP_XOR:
+ default:
+ ret = -ENOSYS;
+ }
+
+ dec_preempt_count();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+#endif
+#endif
diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h
new file mode 100644
index 00000000000..cd027298beb
--- /dev/null
+++ b/include/asm-generic/ioctl.h
@@ -0,0 +1,80 @@
+#ifndef _ASM_GENERIC_IOCTL_H
+#define _ASM_GENERIC_IOCTL_H
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms. The generic ioctl numbering scheme doesn't really enforce
+ * a type field. De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here. Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 14
+#define _IOC_DIRBITS 2
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE 0U
+#define _IOC_WRITE 1U
+#define _IOC_READ 2U
+
+#define _IOC(dir,type,nr,size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+/* provoke compile error for invalid uses of size argument */
+extern unsigned int __invalid_size_argument_for_IOC;
+#define _IOC_TYPECHECK(t) \
+ ((sizeof(t) == sizeof(t[1]) && \
+ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
+ sizeof(t) : __invalid_size_argument_for_IOC)
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif /* _ASM_GENERIC_IOCTL_H */
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
new file mode 100644
index 00000000000..40c6d1f8659
--- /dev/null
+++ b/include/asm-generic/mutex-dec.h
@@ -0,0 +1,110 @@
+/*
+ * asm-generic/mutex-dec.h
+ *
+ * Generic implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ */
+#ifndef _ASM_GENERIC_MUTEX_DEC_H
+#define _ASM_GENERIC_MUTEX_DEC_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function MUST leave the value lower than
+ * 1 even when the "1" assertion wasn't true.
+ */
+#define __mutex_fastpath_lock(count, fail_fn) \
+do { \
+ if (unlikely(atomic_dec_return(count) < 0)) \
+ fail_fn(count); \
+ else \
+ smp_mb(); \
+} while (0)
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns.
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return fail_fn(count);
+ else {
+ smp_mb();
+ return 0;
+ }
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the count from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, then the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+#define __mutex_fastpath_unlock(count, fail_fn) \
+do { \
+ smp_mb(); \
+ if (unlikely(atomic_inc_return(count) <= 0)) \
+ fail_fn(count); \
+} while (0)
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ *
+ * If the architecture has no effective trylock variant, it should call the
+ * <fail_fn> spinlock-based trylock variant unconditionally.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ /*
+ * We have two variants here. The cmpxchg based one is the best one
+ * because it never induce a false contention state. It is included
+ * here because architectures using the inc/dec algorithms over the
+ * xchg ones are much more likely to support cmpxchg natively.
+ *
+ * If not we fall back to the spinlock based variant - that is
+ * just as efficient (and simpler) as a 'destructive' probing of
+ * the mutex state would be.
+ */
+#ifdef __HAVE_ARCH_CMPXCHG
+ if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
+ smp_mb();
+ return 1;
+ }
+ return 0;
+#else
+ return fail_fn(count);
+#endif
+}
+
+#endif
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
new file mode 100644
index 00000000000..5cf8b7ce0c4
--- /dev/null
+++ b/include/asm-generic/mutex-null.h
@@ -0,0 +1,24 @@
+/*
+ * asm-generic/mutex-null.h
+ *
+ * Generic implementation of the mutex fastpath, based on NOP :-)
+ *
+ * This is used by the mutex-debugging infrastructure, but it can also
+ * be used by architectures that (for whatever reason) want to use the
+ * spinlock based slowpath.
+ */
+#ifndef _ASM_GENERIC_MUTEX_NULL_H
+#define _ASM_GENERIC_MUTEX_NULL_H
+
+/* extra parameter only needed for mutex debugging: */
+#ifndef __IP__
+# define __IP__
+#endif
+
+#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count __RET_IP__)
+#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count __RET_IP__)
+#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count __RET_IP__)
+#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
+#define __mutex_slowpath_needs_to_unlock() 1
+
+#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
new file mode 100644
index 00000000000..1d24f47e6c4
--- /dev/null
+++ b/include/asm-generic/mutex-xchg.h
@@ -0,0 +1,117 @@
+/*
+ * asm-generic/mutex-xchg.h
+ *
+ * Generic implementation of the mutex fastpath, based on xchg().
+ *
+ * NOTE: An xchg based implementation is less optimal than an atomic
+ * decrement/increment based implementation. If your architecture
+ * has a reasonable atomic dec/inc then you should probably use
+ * asm-generic/mutex-dec.h instead, or you could open-code an
+ * optimized version in asm/mutex.h.
+ */
+#ifndef _ASM_GENERIC_MUTEX_XCHG_H
+#define _ASM_GENERIC_MUTEX_XCHG_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
+ * wasn't 1 originally. This function MUST leave the value lower than 1
+ * even when the "1" assertion wasn't true.
+ */
+#define __mutex_fastpath_lock(count, fail_fn) \
+do { \
+ if (unlikely(atomic_xchg(count, 0) != 1)) \
+ fail_fn(count); \
+ else \
+ smp_mb(); \
+} while (0)
+
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
+ * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_xchg(count, 0) != 1))
+ return fail_fn(count);
+ else {
+ smp_mb();
+ return 0;
+ }
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * try to promote the mutex from 0 to 1. if it wasn't 0, call <function>
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than one.
+ * If the implementation sets it to a value of lower than one, the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+#define __mutex_fastpath_unlock(count, fail_fn) \
+do { \
+ smp_mb(); \
+ if (unlikely(atomic_xchg(count, 1) != 0)) \
+ fail_fn(count); \
+} while (0)
+
+#define __mutex_slowpath_needs_to_unlock() 0
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: spinlock based trylock implementation
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ *
+ * If the architecture has no effective trylock variant, it should call the
+ * <fail_fn> spinlock-based trylock variant unconditionally.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ int prev = atomic_xchg(count, 0);
+
+ if (unlikely(prev < 0)) {
+ /*
+ * The lock was marked contended so we must restore that
+ * state. If while doing so we get back a prev value of 1
+ * then we just own it.
+ *
+ * [ In the rare case of the mutex going to 1, to 0, to -1
+ * and then back to 0 in this few-instructions window,
+ * this has the potential to trigger the slowpath for the
+ * owner's unlock path needlessly, but that's not a problem
+ * in practice. ]
+ */
+ prev = atomic_xchg(count, prev);
+ if (prev < 0)
+ prev = 0;
+ }
+ smp_mb();
+
+ return prev;
+}
+
+#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 094d4917c1a..35de20cf8fa 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -10,6 +10,8 @@
#define ALIGN_FUNCTION() . = ALIGN(8)
#define RODATA \
+ . = ALIGN(4096); \
+ __start_rodata = .; \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
*(.rodata) *(.rodata.*) \
*(__vermagic) /* Kernel version magic */ \
@@ -74,6 +76,8 @@
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
*(__ksymtab_strings) \
} \
+ __end_rodata = .; \
+ . = ALIGN(4096); \
\
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h
index f23d86819ea..21f54428c86 100644
--- a/include/asm-h8300/atomic.h
+++ b/include/asm-h8300/atomic.h
@@ -95,6 +95,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
@@ -137,4 +139,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/include/asm-h8300/futex.h b/include/asm-h8300/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-h8300/futex.h
+++ b/include/asm-h8300/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-h8300/ioctl.h b/include/asm-h8300/ioctl.h
index 031c623478b..b279fe06dfe 100644
--- a/include/asm-h8300/ioctl.h
+++ b/include/asm-h8300/ioctl.h
@@ -1,80 +1 @@
-/* $Id: ioctl.h,v 1.1 2002/11/19 02:09:26 gerg Exp $
- *
- * linux/ioctl.h for Linux by H.H. Bergman.
- */
-
-#ifndef _H8300_IOCTL_H
-#define _H8300_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * I don't really have any idea about what this should look like, so
- * for the time being, this is heavily based on the PC definitions.
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _H8300_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-h8300/irq.h b/include/asm-h8300/irq.h
index 5027181ed06..73065f5bda0 100644
--- a/include/asm-h8300/irq.h
+++ b/include/asm-h8300/irq.h
@@ -61,11 +61,6 @@ static __inline__ int irq_canonicalize(int irq)
extern void enable_irq(unsigned int);
extern void disable_irq(unsigned int);
-
-/*
- * Some drivers want these entry points
- */
-#define enable_irq_nosync(x) enable_irq(x)
#define disable_irq_nosync(x) disable_irq(x)
struct irqaction;
diff --git a/include/asm-h8300/mman.h b/include/asm-h8300/mman.h
index 63f727a5985..744a8fb485c 100644
--- a/include/asm-h8300/mman.h
+++ b/include/asm-h8300/mman.h
@@ -35,6 +35,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-h8300/mutex.h b/include/asm-h8300/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-h8300/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h
index e8c02b8c2d9..cd35b1cc6cd 100644
--- a/include/asm-h8300/page.h
+++ b/include/asm-h8300/page.h
@@ -13,12 +13,6 @@
#include <asm/setup.h>
-#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13
-#define KTHREAD_SIZE (8192)
-#else
-#define KTHREAD_SIZE PAGE_SIZE
-#endif
-
#ifndef __ASSEMBLY__
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
diff --git a/include/asm-h8300/thread_info.h b/include/asm-h8300/thread_info.h
index bfcc755c3bb..45f09dc9caf 100644
--- a/include/asm-h8300/thread_info.h
+++ b/include/asm-h8300/thread_info.h
@@ -69,8 +69,6 @@ static inline struct thread_info *current_thread_info(void)
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
/*
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 8c454aa58ac..d30b8571573 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -132,6 +132,11 @@ extern unsigned int nmi_watchdog;
extern int disable_timer_pin_1;
+void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
+void switch_APIC_timer_to_ipi(void *cpumask);
+void switch_ipi_to_APIC_timer(void *cpumask);
+#define ARCH_APICTIMER_STOPS_ON_C3 1
+
#else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { }
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index c68557aa04b..de649d3aa2d 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -216,6 +216,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
}
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
@@ -254,4 +255,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 4807aa1d2e3..fe0819fe9c6 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -43,7 +43,7 @@ static inline void set_bit(int nr, volatile unsigned long * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btsl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"Ir" (nr));
}
@@ -60,7 +60,7 @@ static inline void __set_bit(int nr, volatile unsigned long * addr)
{
__asm__(
"btsl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"Ir" (nr));
}
@@ -78,7 +78,7 @@ static inline void clear_bit(int nr, volatile unsigned long * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btrl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"Ir" (nr));
}
@@ -86,7 +86,7 @@ static inline void __clear_bit(int nr, volatile unsigned long * addr)
{
__asm__ __volatile__(
"btrl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"Ir" (nr));
}
#define smp_mb__before_clear_bit() barrier()
@@ -105,7 +105,7 @@ static inline void __change_bit(int nr, volatile unsigned long * addr)
{
__asm__ __volatile__(
"btcl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"Ir" (nr));
}
@@ -123,7 +123,7 @@ static inline void change_bit(int nr, volatile unsigned long * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btcl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"Ir" (nr));
}
@@ -142,7 +142,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
__asm__ __volatile__( LOCK_PREFIX
"btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"Ir" (nr) : "memory");
return oldbit;
}
@@ -162,7 +162,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long * addr)
__asm__(
"btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"Ir" (nr));
return oldbit;
}
@@ -182,7 +182,7 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
__asm__ __volatile__( LOCK_PREFIX
"btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"Ir" (nr) : "memory");
return oldbit;
}
@@ -202,7 +202,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
__asm__(
"btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"Ir" (nr));
return oldbit;
}
@@ -214,7 +214,7 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
__asm__ __volatile__(
"btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"Ir" (nr) : "memory");
return oldbit;
}
@@ -233,7 +233,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
__asm__ __volatile__( LOCK_PREFIX
"btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"Ir" (nr) : "memory");
return oldbit;
}
@@ -332,9 +332,9 @@ static inline unsigned long __ffs(unsigned long word)
* Returns the bit-number of the first set bit, not the number of the byte
* containing a bit.
*/
-static inline int find_first_bit(const unsigned long *addr, unsigned size)
+static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
{
- int x = 0;
+ unsigned x = 0;
while (x < size) {
unsigned long val = *addr++;
@@ -367,11 +367,6 @@ static inline unsigned long ffz(unsigned long word)
return word;
}
-/*
- * fls: find last bit set.
- */
-
-#define fls(x) generic_fls(x)
#define fls64(x) generic_fls64(x)
#ifdef __KERNEL__
@@ -415,6 +410,23 @@ static inline int ffs(int x)
}
/**
+ * fls - find last bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ */
+static inline int fls(int x)
+{
+ int r;
+
+ __asm__("bsrl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
+ return r+1;
+}
+
+/**
* hweightN - returns the hamming weight of a N-bit word
* @x: the word to weigh
*
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index ea54540638d..50233e0345f 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -8,9 +8,6 @@
* <rreilova@ececs.uc.edu>
* - Channing Corn (tests & fixes),
* - Andrew D. Balsa (code cleanup).
- *
- * Pentium III FXSR, SSE support
- * Gareth Hughes <gareth@valinux.com>, May 2000
*/
/*
@@ -76,25 +73,7 @@ static void __init check_fpu(void)
return;
}
-/* Enable FXSR and company _before_ testing for FP problems. */
- /*
- * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
- */
- if (offsetof(struct task_struct, thread.i387.fxsave) & 15) {
- extern void __buggy_fxsr_alignment(void);
- __buggy_fxsr_alignment();
- }
- if (cpu_has_fxsr) {
- printk(KERN_INFO "Enabling fast FPU save and restore... ");
- set_in_cr4(X86_CR4_OSFXSR);
- printk("done.\n");
- }
- if (cpu_has_xmm) {
- printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... ");
- set_in_cr4(X86_CR4_OSXMMEXCPT);
- printk("done.\n");
- }
-
+/* trap_init() enabled FXSR and company _before_ testing for FP problems here. */
/* Test for the divl bug.. */
__asm__("fninit\n\t"
"fldl %1\n\t"
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h
index 849788710fe..615911e5bd2 100644
--- a/include/asm-i386/cache.h
+++ b/include/asm-i386/cache.h
@@ -10,6 +10,4 @@
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
-
#endif
diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h
index 2ea36dea37d..7199f7b326f 100644
--- a/include/asm-i386/cacheflush.h
+++ b/include/asm-i386/cacheflush.h
@@ -31,4 +31,8 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot);
void kernel_map_pages(struct page *page, int numpages, int enable);
#endif
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
#endif /* _I386_CACHEFLUSH_H */
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index ff1187e80c3..c4ec2a4d8fd 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -69,6 +69,7 @@
#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */
#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
+#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 29b851a18c6..494e73bca09 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -15,9 +15,6 @@
#include <asm/mmu.h>
extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
-DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
-
-#define get_cpu_gdt_table(_cpu) (per_cpu(cpu_gdt_table,_cpu))
DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
@@ -29,6 +26,11 @@ struct Xgt_desc_struct {
extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
+static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+{
+ return ((struct desc_struct *)cpu_gdt_descr[cpu].address);
+}
+
#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
index e56c335f8ef..9cf20cacf76 100644
--- a/include/asm-i386/dma-mapping.h
+++ b/include/asm-i386/dma-mapping.h
@@ -6,6 +6,7 @@
#include <asm/cache.h>
#include <asm/io.h>
#include <asm/scatterlist.h>
+#include <asm/bug.h>
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
@@ -20,7 +21,9 @@ static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
+ if (direction == DMA_NONE)
+ BUG();
+ WARN_ON(size == 0);
flush_write_buffers();
return virt_to_phys(ptr);
}
@@ -29,7 +32,8 @@ static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
+ if (direction == DMA_NONE)
+ BUG();
}
static inline int
@@ -38,7 +42,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
{
int i;
- BUG_ON(direction == DMA_NONE);
+ if (direction == DMA_NONE)
+ BUG();
+ WARN_ON(nents == 0 || sg[0].length == 0);
for (i = 0; i < nents; i++ ) {
BUG_ON(!sg[i].page);
@@ -150,7 +156,7 @@ dma_get_cache_alignment(void)
{
/* no easy way to get cache size on all x86, so return the
* maximum possible, to be safe */
- return (1 << L1_CACHE_SHIFT_MAX);
+ return (1 << INTERNODE_CACHE_SHIFT);
}
#define dma_is_consistent(d) (1)
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index 6747006743f..152d0baa576 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -49,19 +49,19 @@ static inline void __save_init_fpu( struct task_struct *tsk )
X86_FEATURE_FXSR,
"m" (tsk->thread.i387.fxsave)
:"memory");
- tsk->thread_info->status &= ~TS_USEDFPU;
+ task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
#define __unlazy_fpu( tsk ) do { \
- if ((tsk)->thread_info->status & TS_USEDFPU) \
+ if (task_thread_info(tsk)->status & TS_USEDFPU) \
save_init_fpu( tsk ); \
} while (0)
#define __clear_fpu( tsk ) \
do { \
- if ((tsk)->thread_info->status & TS_USEDFPU) { \
+ if (task_thread_info(tsk)->status & TS_USEDFPU) { \
asm volatile("fnclex ; fwait"); \
- (tsk)->thread_info->status &= ~TS_USEDFPU; \
+ task_thread_info(tsk)->status &= ~TS_USEDFPU; \
stts(); \
} \
} while (0)
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 7babb97a02e..03233c2ab82 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -131,6 +131,11 @@ extern void iounmap(volatile void __iomem *addr);
extern void *bt_ioremap(unsigned long offset, unsigned long size);
extern void bt_iounmap(void *addr, unsigned long size);
+/* Use early IO mappings for DMI because it's initialized early */
+#define dmi_ioremap bt_ioremap
+#define dmi_iounmap bt_iounmap
+#define dmi_alloc alloc_bootmem
+
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
diff --git a/include/asm-i386/ioctl.h b/include/asm-i386/ioctl.h
index 543f7843d55..b279fe06dfe 100644
--- a/include/asm-i386/ioctl.h
+++ b/include/asm-i386/ioctl.h
@@ -1,85 +1 @@
-/* $Id: ioctl.h,v 1.5 1993/07/19 21:53:50 root Exp root $
- *
- * linux/ioctl.h for Linux by H.H. Bergman.
- */
-
-#ifndef _ASMI386_IOCTL_H
-#define _ASMI386_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* provoke compile error for invalid uses of size argument */
-extern unsigned int __invalid_size_argument_for_IOC;
-#define _IOC_TYPECHECK(t) \
- ((sizeof(t) == sizeof(t[1]) && \
- sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
- sizeof(t) : __invalid_size_argument_for_IOC)
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _ASMI386_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 270f1986b19..5169d7af456 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -21,8 +21,6 @@ static __inline__ int irq_canonicalize(int irq)
return ((irq == 2) ? 9 : irq);
}
-extern void release_vm86_irqs(struct task_struct *);
-
#ifdef CONFIG_X86_LOCAL_APIC
# define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */
#endif
diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h
index 6ed2a03e37b..53f0e06672d 100644
--- a/include/asm-i386/kexec.h
+++ b/include/asm-i386/kexec.h
@@ -2,6 +2,8 @@
#define _I386_KEXEC_H
#include <asm/fixmap.h>
+#include <asm/ptrace.h>
+#include <asm/string.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
@@ -26,8 +28,49 @@
#define KEXEC_ARCH KEXEC_ARCH_386
#define MAX_NOTE_BYTES 1024
-typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
-extern note_buf_t crash_notes[];
+/* CPU does not save ss and esp on stack if execution is already
+ * running in kernel mode at the time of NMI occurrence. This code
+ * fixes it.
+ */
+static inline void crash_fixup_ss_esp(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ newregs->esp = (unsigned long)&(oldregs->esp);
+ __asm__ __volatile__(
+ "xorl %%eax, %%eax\n\t"
+ "movw %%ss, %%ax\n\t"
+ :"=a"(newregs->xss));
+}
+
+/*
+ * This function is responsible for capturing register states if coming
+ * via panic otherwise just fix up the ss and esp if coming via kernel
+ * mode exception.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs)
+ crash_fixup_ss_esp(newregs, oldregs);
+ else {
+ __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx));
+ __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx));
+ __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx));
+ __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi));
+ __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi));
+ __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp));
+ __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax));
+ __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp));
+ __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss));
+ __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs));
+ __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds));
+ __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes));
+ __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags));
+
+ newregs->eip = (unsigned long)current_text_addr();
+ }
+}
#endif /* _I386_KEXEC_H */
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index ca916a89287..27cac050a60 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -40,6 +40,7 @@ typedef u8 kprobe_opcode_t;
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
#define ARCH_SUPPORTS_KRETPROBES
+#define arch_remove_kprobe(p) do {} while (0)
void kretprobe_trampoline(void);
@@ -76,14 +77,6 @@ static inline void restore_interrupts(struct pt_regs *regs)
local_irq_enable();
}
-#ifdef CONFIG_KPROBES
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
-#else /* !CONFIG_KPROBES */
-static inline int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return 0;
-}
-#endif
#endif /* _ASM_KPROBES_H */
diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h
index ba936d4daed..18b19a77344 100644
--- a/include/asm-i386/mach-bigsmp/mach_apic.h
+++ b/include/asm-i386/mach-bigsmp/mach_apic.h
@@ -1,17 +1,10 @@
#ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H
-#include <asm/smp.h>
-
-#define SEQUENTIAL_APICID
-#ifdef SEQUENTIAL_APICID
-#define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\
- ((phys_apic<<2) & (~0xf)) )
-#elif CLUSTERED_APICID
-#define xapic_phys_to_log_apicid(phys_apic) ( (1ul << ((phys_apic) & 0x3)) |\
- ((phys_apic) & (~0xf)) )
-#endif
-
-#define NO_BALANCE_IRQ (1)
+
+
+extern u8 bios_cpu_apicid[];
+
+#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu])
#define esr_disable (1)
static inline int apic_id_registered(void)
@@ -19,7 +12,6 @@ static inline int apic_id_registered(void)
return (1);
}
-#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
/* Round robin the irqs amoung the online cpus */
static inline cpumask_t target_cpus(void)
{
@@ -32,29 +24,34 @@ static inline cpumask_t target_cpus(void)
} while (cpu >= NR_CPUS);
return cpumask_of_cpu(cpu);
}
-#define TARGET_CPUS (target_cpus())
-#define INT_DELIVERY_MODE dest_Fixed
-#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
+#undef APIC_DEST_LOGICAL
+#define APIC_DEST_LOGICAL 0
+#define TARGET_CPUS (target_cpus())
+#define APIC_DFR_VALUE (APIC_DFR_FLAT)
+#define INT_DELIVERY_MODE (dest_Fixed)
+#define INT_DEST_MODE (0) /* phys delivery to target proc */
+#define NO_BALANCE_IRQ (0)
+#define WAKE_SECONDARY_VIA_INIT
+
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
{
- return 0;
+ return (0);
}
-/* we don't use the phys_cpu_present_map to indicate apicid presence */
-static inline unsigned long check_apicid_present(int bit)
+static inline unsigned long check_apicid_present(int bit)
{
- return 1;
+ return (1);
}
-#define apicid_cluster(apicid) (apicid & 0xF0)
-
-static inline unsigned long calculate_ldr(unsigned long old)
+static inline unsigned long calculate_ldr(int cpu)
{
- unsigned long id;
- id = xapic_phys_to_log_apicid(hard_smp_processor_id());
- return ((old & ~APIC_LDR_MASK) | SET_APIC_LOGICAL_ID(id));
+ unsigned long val, id;
+ val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+ id = xapic_phys_to_log_apicid(cpu);
+ val |= SET_APIC_LOGICAL_ID(id);
+ return val;
}
/*
@@ -67,37 +64,35 @@ static inline unsigned long calculate_ldr(unsigned long old)
static inline void init_apic_ldr(void)
{
unsigned long val;
+ int cpu = smp_processor_id();
apic_write_around(APIC_DFR, APIC_DFR_VALUE);
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- val = calculate_ldr(val);
+ val = calculate_ldr(cpu);
apic_write_around(APIC_LDR, val);
}
static inline void clustered_apic_check(void)
{
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
- "Cluster", nr_ioapics);
+ "Physflat", nr_ioapics);
}
static inline int multi_timer_check(int apic, int irq)
{
- return 0;
+ return (0);
}
static inline int apicid_to_node(int logical_apicid)
{
- return 0;
+ return (0);
}
-extern u8 bios_cpu_apicid[];
-
static inline int cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < NR_CPUS)
- return (int)bios_cpu_apicid[mps_cpu];
- else
- return BAD_APICID;
+ return (int) bios_cpu_apicid[mps_cpu];
+
+ return BAD_APICID;
}
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
@@ -109,10 +104,10 @@ extern u8 cpu_2_logical_apicid[];
/* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu)
{
- if (cpu >= NR_CPUS)
- return BAD_APICID;
- return (int)cpu_2_logical_apicid[cpu];
- }
+ if (cpu >= NR_CPUS)
+ return BAD_APICID;
+ return cpu_physical_id(cpu);
+}
static inline int mpc_apic_id(struct mpc_config_processor *m,
struct mpc_config_translation *translation_record)
@@ -128,11 +123,9 @@ static inline int mpc_apic_id(struct mpc_config_processor *m,
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
{
/* For clustered we don't have a good way to do this yet - hack */
- return physids_promote(0xFUL);
+ return physids_promote(0xFFL);
}
-#define WAKE_SECONDARY_VIA_INIT
-
static inline void setup_portio_remap(void)
{
}
diff --git a/include/asm-i386/mach-bigsmp/mach_apicdef.h b/include/asm-i386/mach-bigsmp/mach_apicdef.h
index 23e58b317c7..a58ab5a75c8 100644
--- a/include/asm-i386/mach-bigsmp/mach_apicdef.h
+++ b/include/asm-i386/mach-bigsmp/mach_apicdef.h
@@ -1,11 +1,11 @@
#ifndef __ASM_MACH_APICDEF_H
#define __ASM_MACH_APICDEF_H
-#define APIC_ID_MASK (0x0F<<24)
+#define APIC_ID_MASK (0xFF<<24)
static inline unsigned get_apic_id(unsigned long x)
{
- return (((x)>>24)&0x0F);
+ return (((x)>>24)&0xFF);
}
#define GET_APIC_ID(x) get_apic_id(x)
diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h
index cc756a67cd6..a1d0072e36b 100644
--- a/include/asm-i386/mach-default/mach_ipi.h
+++ b/include/asm-i386/mach-default/mach_ipi.h
@@ -15,11 +15,9 @@ static inline void __local_send_IPI_allbutself(int vector)
{
if (no_broadcast) {
cpumask_t mask = cpu_online_map;
- int this_cpu = get_cpu();
- cpu_clear(this_cpu, mask);
+ cpu_clear(smp_processor_id(), mask);
send_IPI_mask(mask, vector);
- put_cpu();
} else
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
}
diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h
index 196619a8385..ba4941e6f64 100644
--- a/include/asm-i386/mman.h
+++ b/include/asm-i386/mman.h
@@ -35,6 +35,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 620a90641ea..74f595d8057 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -76,11 +76,6 @@ static inline int pfn_to_nid(unsigned long pfn)
* Following are macros that each numa implmentation must define.
*/
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
-
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) \
({ \
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h
index eb7f2b4234a..424661d25bd 100644
--- a/include/asm-i386/module.h
+++ b/include/asm-i386/module.h
@@ -52,8 +52,10 @@ struct mod_arch_specific
#define MODULE_PROC_FAMILY "CYRIXIII "
#elif defined CONFIG_MVIAC3_2
#define MODULE_PROC_FAMILY "VIAC3-2 "
-#elif CONFIG_MGEODEGX1
+#elif defined CONFIG_MGEODEGX1
#define MODULE_PROC_FAMILY "GEODEGX1 "
+#elif defined CONFIG_MGEODE_LX
+#define MODULE_PROC_FAMILY "GEODE "
#else
#error unknown processor family
#endif
diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h
index a961093dbf8..76feedf85a8 100644
--- a/include/asm-i386/mpspec_def.h
+++ b/include/asm-i386/mpspec_def.h
@@ -75,7 +75,7 @@ struct mpc_config_bus
{
unsigned char mpc_type;
unsigned char mpc_busid;
- unsigned char mpc_bustype[6] __attribute((packed));
+ unsigned char mpc_bustype[6];
};
/* List of Bus Type string values, Intel MP Spec. */
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
new file mode 100644
index 00000000000..9b2199e829f
--- /dev/null
+++ b/include/asm-i386/mutex.h
@@ -0,0 +1,136 @@
+/*
+ * Assembly implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ */
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fn> if it
+ * wasn't 1 originally. This function MUST leave the value lower than 1
+ * even when the "1" assertion wasn't true.
+ */
+#define __mutex_fastpath_lock(count, fail_fn) \
+do { \
+ unsigned int dummy; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ __volatile__( \
+ LOCK " decl (%%eax) \n" \
+ " js 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fail_fn" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
+ \
+ :"=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
+} while (0)
+
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
+ * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count,
+ int fastcall (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return fail_fn(count);
+ else
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value
+ * to 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+#define __mutex_fastpath_unlock(count, fail_fn) \
+do { \
+ unsigned int dummy; \
+ \
+ typecheck(atomic_t *, count); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ __volatile__( \
+ LOCK " incl (%%eax) \n" \
+ " jle 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fail_fn" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
+ \
+ :"=a" (dummy) \
+ : "a" (count) \
+ : "memory", "ecx", "edx"); \
+} while (0)
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ /*
+ * We have two variants here. The cmpxchg based one is the best one
+ * because it never induce a false contention state. It is included
+ * here because architectures using the inc/dec algorithms over the
+ * xchg ones are much more likely to support cmpxchg natively.
+ *
+ * If not we fall back to the spinlock based variant - that is
+ * just as efficient (and simpler) as a 'destructive' probing of
+ * the mutex state would be.
+ */
+#ifdef __HAVE_ARCH_CMPXCHG
+ if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+ return 1;
+ return 0;
+#else
+ return fail_fn(count);
+#endif
+}
+
+#endif
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 5c96cf6dcb3..feca5d961e2 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -61,9 +61,11 @@ struct cpuinfo_x86 {
int x86_cache_size; /* in KB - valid for CPUS which support this
call */
int x86_cache_alignment; /* In bytes */
- int fdiv_bug;
- int f00f_bug;
- int coma_bug;
+ char fdiv_bug;
+ char f00f_bug;
+ char coma_bug;
+ char pad0;
+ int x86_power;
unsigned long loops_per_jiffy;
unsigned char x86_max_cores; /* cpuid returned max cores value */
unsigned char booted_cores; /* number of cores as seen by OS */
@@ -279,9 +281,11 @@ static inline void clear_in_cr4 (unsigned long mask)
outb((data), 0x23); \
} while (0)
-static inline void serialize_cpu(void)
+/* Stop speculative execution */
+static inline void sync_core(void)
{
- __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
+ int tmp;
+ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
}
static inline void __monitor(const void *eax, unsigned long ecx,
@@ -557,10 +561,20 @@ unsigned long get_wchan(struct task_struct *p);
(unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
})
+/*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+ * This is necessary to guarantee that the entire "struct pt_regs"
+ * is accessable even if the CPU haven't stored the SS/ESP registers
+ * on the stack (interrupt gate does not save these registers
+ * when switching to the same priv ring).
+ * Therefore beware: accessing the xss/esp fields of the
+ * "struct pt_regs" is possible, but they may contain the
+ * completely wrong values.
+ */
#define task_pt_regs(task) \
({ \
struct pt_regs *__regs__; \
- __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \
+ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
__regs__ - 1; \
})
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index 7e0f2945d17..f324c53b6f9 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -54,6 +54,9 @@ struct pt_regs {
#define PTRACE_GET_THREAD_AREA 25
#define PTRACE_SET_THREAD_AREA 26
+#define PTRACE_SYSEMU 31
+#define PTRACE_SYSEMU_SINGLESTEP 32
+
#ifdef __KERNEL__
#include <asm/vm86.h>
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
index bb5ff5b2c02..faf995307b9 100644
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -91,6 +91,20 @@
#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
+/* The PnP BIOS entries in the GDT */
+#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
+#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
+#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
+#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
+#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
+
+/* The PnP BIOS selectors */
+#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
+#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
+#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
+#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
+#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
+
/*
* The interrupt descriptor table has room for 256 idt's,
* the global descriptor table is dependent on the number
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 772f85da120..36a92ed6a9d 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -54,23 +54,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
); } while(0)
#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
-#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
-
-static inline unsigned long _get_base(char * addr)
-{
- unsigned long __base;
- __asm__("movb %3,%%dh\n\t"
- "movb %2,%%dl\n\t"
- "shll $16,%%edx\n\t"
- "movw %1,%%dx"
- :"=&d" (__base)
- :"m" (*((addr)+2)),
- "m" (*((addr)+4)),
- "m" (*((addr)+7)));
- return __base;
-}
-
-#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
/*
* Load a segment. Fall back on loading the zero
@@ -140,6 +124,19 @@ static inline unsigned long _get_base(char * addr)
:"=r" (__dummy)); \
__dummy; \
})
+
+#define read_cr4_safe() ({ \
+ unsigned int __dummy; \
+ /* This could fault if %cr4 does not exist */ \
+ __asm__("1: movl %%cr4, %0 \n" \
+ "2: \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".long 1b,2b \n" \
+ ".previous \n" \
+ : "=r" (__dummy): "0" (0)); \
+ __dummy; \
+})
+
#define write_cr4(x) \
__asm__ __volatile__("movl %0,%%cr4": :"r" (x));
#define stts() write_cr0(8 | read_cr0())
@@ -551,6 +548,15 @@ void enable_hlt(void);
extern int es7000_plat;
void cpu_idle_wait(void);
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible:
+ */
+static inline void sched_cacheflush(void)
+{
+ wbinvd();
+}
+
extern unsigned long arch_align_stack(unsigned long sp);
#endif
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 8fbf791651b..2493e77e8c3 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -111,8 +111,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
#endif
#define free_thread_info(info) kfree(info)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#else /* !__ASSEMBLY__ */
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 0ec27c9e8e4..d7e19eb344b 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -72,7 +72,6 @@ static inline int node_to_first_cpu(int node)
.max_interval = 32, \
.busy_factor = 32, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
.cache_nice_tries = 1, \
.busy_idx = 3, \
.idle_idx = 1, \
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 0f92e78dfea..481c3c0ea72 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -256,7 +256,7 @@
#define __NR_io_submit 248
#define __NR_io_cancel 249
#define __NR_fadvise64 250
-#define __NR_set_zone_reclaim 251
+/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
#define __NR_exit_group 252
#define __NR_lookup_dcookie 253
#define __NR_epoll_create 254
@@ -299,8 +299,9 @@
#define __NR_inotify_init 291
#define __NR_inotify_add_watch 292
#define __NR_inotify_rm_watch 293
+#define __NR_migrate_pages 294
-#define NR_syscalls 294
+#define NR_syscalls 295
/*
* user-visible error numbers are in the range -1 - -128: see
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h
index 40ec82c6914..952fd695738 100644
--- a/include/asm-i386/vm86.h
+++ b/include/asm-i386/vm86.h
@@ -16,7 +16,11 @@
#define IF_MASK 0x00000200
#define IOPL_MASK 0x00003000
#define NT_MASK 0x00004000
+#ifdef CONFIG_VM86
#define VM_MASK 0x00020000
+#else
+#define VM_MASK 0 /* ignored */
+#endif
#define AC_MASK 0x00040000
#define VIF_MASK 0x00080000 /* virtual interrupt flag */
#define VIP_MASK 0x00100000 /* virtual interrupt pending */
@@ -200,9 +204,25 @@ struct kernel_vm86_struct {
*/
};
+#ifdef CONFIG_VM86
+
void handle_vm86_fault(struct kernel_vm86_regs *, long);
int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
+struct task_struct;
+void release_vm86_irqs(struct task_struct *);
+
+#else
+
+#define handle_vm86_fault(a, b)
+#define release_vm86_irqs(a)
+
+static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) {
+ return 0;
+}
+
+#endif /* CONFIG_VM86 */
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 2fbebf85c31..d3e0dfa99e1 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -89,6 +89,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
}
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
@@ -192,4 +193,5 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/include/asm-ia64/bug.h b/include/asm-ia64/bug.h
index 3aa0a0a5474..823616b5020 100644
--- a/include/asm-ia64/bug.h
+++ b/include/asm-ia64/bug.h
@@ -2,11 +2,7 @@
#define _ASM_IA64_BUG_H
#ifdef CONFIG_BUG
-#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
-# define ia64_abort() __builtin_trap()
-#else
-# define ia64_abort() (*(volatile int *) 0 = 0)
-#endif
+#define ia64_abort() __builtin_trap()
#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
/* should this BUG be made generic? */
diff --git a/include/asm-ia64/cache.h b/include/asm-ia64/cache.h
index 666d8f175cb..40dd25195d6 100644
--- a/include/asm-ia64/cache.h
+++ b/include/asm-ia64/cache.h
@@ -12,8 +12,6 @@
#define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
-
#ifdef CONFIG_SMP
# define SMP_CACHE_SHIFT L1_CACHE_SHIFT
# define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h
index aaf11f4e916..c0b19106665 100644
--- a/include/asm-ia64/compat.h
+++ b/include/asm-ia64/compat.h
@@ -192,7 +192,7 @@ compat_ptr (compat_uptr_t uptr)
static __inline__ void __user *
compat_alloc_user_space (long len)
{
- struct pt_regs *regs = ia64_task_regs(current);
+ struct pt_regs *regs = task_pt_regs(current);
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
}
diff --git a/include/asm-ia64/futex.h b/include/asm-ia64/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-ia64/futex.h
+++ b/include/asm-ia64/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h
index cf772a67f85..b64fdb98549 100644
--- a/include/asm-ia64/io.h
+++ b/include/asm-ia64/io.h
@@ -89,6 +89,7 @@ phys_to_virt (unsigned long address)
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */
+extern int valid_mmap_phys_addr_range (unsigned long addr, size_t *count);
/*
* The following two macros are deprecated and scheduled for removal.
diff --git a/include/asm-ia64/ioctl.h b/include/asm-ia64/ioctl.h
index be9cc2403d2..b279fe06dfe 100644
--- a/include/asm-ia64/ioctl.h
+++ b/include/asm-ia64/ioctl.h
@@ -1,77 +1 @@
-#ifndef _ASM_IA64_IOCTL_H
-#define _ASM_IA64_IOCTL_H
-
-/*
- * Based on <asm-i386/ioctl.h>.
- *
- * Modified 1998, 1999
- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
- */
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The ia64 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _ASM_IA64_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index 592abb000e2..a74b6810455 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -89,6 +89,7 @@ struct kprobe_ctlblk {
#define IP_RELATIVE_PREDICT_OPCODE (7)
#define LONG_BRANCH_OPCODE (0xC)
#define LONG_CALL_OPCODE (0xD)
+#define arch_remove_kprobe(p) do {} while (0)
typedef struct kprobe_opcode {
bundle_t bundle;
@@ -110,12 +111,6 @@ struct arch_specific_insn {
unsigned short target_br_reg;
};
-/* ia64 does not need this */
-static inline void arch_copy_kprobe(struct kprobe *p)
-{
-}
-
-#ifdef CONFIG_KPROBES
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
@@ -124,11 +119,4 @@ static inline void jprobe_return(void)
{
}
-#else /* !CONFIG_KPROBES */
-static inline int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return 0;
-}
-#endif
#endif /* _ASM_KPROBES_H */
diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h
index 1c0a73af146..828beb24a20 100644
--- a/include/asm-ia64/mman.h
+++ b/include/asm-ia64/mman.h
@@ -43,6 +43,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-ia64/mutex.h b/include/asm-ia64/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-ia64/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 94e07e72739..8c648bf72bb 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -352,7 +352,7 @@ extern unsigned long get_wchan (struct task_struct *p);
/* Return instruction pointer of blocked task TSK. */
#define KSTK_EIP(tsk) \
({ \
- struct pt_regs *_regs = ia64_task_regs(tsk); \
+ struct pt_regs *_regs = task_pt_regs(tsk); \
_regs->cr_iip + ia64_psr(_regs)->ri; \
})
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h
index 2c703d6e0c8..9471cdc3f4c 100644
--- a/include/asm-ia64/ptrace.h
+++ b/include/asm-ia64/ptrace.h
@@ -248,7 +248,7 @@ struct switch_stack {
})
/* given a pointer to a task_struct, return the user's pt_regs */
-# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
+# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
@@ -271,7 +271,7 @@ struct switch_stack {
*
* On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
*/
-# define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0)
+# define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
struct task_struct; /* forward decl */
struct unw_frame_info; /* forward decl */
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h
index 0c91a76c5ea..9e83210dc31 100644
--- a/include/asm-ia64/spinlock.h
+++ b/include/asm-ia64/spinlock.h
@@ -34,7 +34,7 @@ __raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
{
register volatile unsigned int *ptr asm ("r31") = &lock->lock;
-#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
# ifdef CONFIG_ITANIUM
/* don't use brl on Itanium... */
asm volatile ("{\n\t"
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 635235fa1e3..80c5a234e25 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -219,14 +219,14 @@ extern void ia64_load_extra (struct task_struct *task);
#define IA64_HAS_EXTRA_STATE(t) \
((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
- || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
+ || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())
#define __switch_to(prev,next,last) do { \
if (IA64_HAS_EXTRA_STATE(prev)) \
ia64_save_extra(prev); \
if (IA64_HAS_EXTRA_STATE(next)) \
ia64_load_extra(next); \
- ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
+ ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
(last) = ia64_switch_to((next)); \
} while (0)
@@ -238,8 +238,8 @@ extern void ia64_load_extra (struct task_struct *task);
* the latest fph state from another CPU. In other words: eager save, lazy restore.
*/
# define switch_to(prev,next,last) do { \
- if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \
- ia64_psr(ia64_task_regs(prev))->mfh = 0; \
+ if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \
+ ia64_psr(task_pt_regs(prev))->mfh = 0; \
(prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
__ia64_save_fpu((prev)->thread.fph); \
} \
@@ -279,6 +279,7 @@ extern void ia64_load_extra (struct task_struct *task);
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
void cpu_idle_wait(void);
+void sched_cacheflush(void);
#define arch_align_stack(x) (x)
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 171b2207bde..653bb7f9a75 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -57,11 +57,20 @@ struct thread_info {
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
#else
#define current_thread_info() ((struct thread_info *) 0)
#define alloc_thread_info(tsk) ((struct thread_info *) 0)
+#define task_thread_info(tsk) ((struct thread_info *) 0)
#endif
#define free_thread_info(ti) /* nothing */
+#define task_stack_page(tsk) ((void *)(tsk))
+
+#define __HAVE_THREAD_FUNCTIONS
+#define setup_thread_stack(p, org) \
+ *task_thread_info(p) = *task_thread_info(org); \
+ task_thread_info(p)->task = (p);
+#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER))
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index f7c330467e7..d8aae4da397 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -55,7 +55,6 @@ void build_cpu_to_node_map(void);
.max_interval = 4, \
.busy_factor = 64, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
.per_cpu_gain = 100, \
.cache_nice_tries = 2, \
.busy_idx = 2, \
@@ -81,7 +80,6 @@ void build_cpu_to_node_map(void);
.max_interval = 8*(min(num_online_cpus(), 32)), \
.busy_factor = 64, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h
index 6d96a67439b..962f9bd1bdf 100644
--- a/include/asm-ia64/unistd.h
+++ b/include/asm-ia64/unistd.h
@@ -265,16 +265,17 @@
#define __NR_keyctl 1273
#define __NR_ioprio_set 1274
#define __NR_ioprio_get 1275
-#define __NR_set_zone_reclaim 1276
+/* 1276 is available for reuse (was briefly sys_set_zone_reclaim) */
#define __NR_inotify_init 1277
#define __NR_inotify_add_watch 1278
#define __NR_inotify_rm_watch 1279
+#define __NR_migrate_pages 1280
#ifdef __KERNEL__
#include <linux/config.h>
-#define NR_syscalls 256 /* length of syscall table */
+#define NR_syscalls 270 /* length of syscall table */
#define __ARCH_WANT_SYS_RT_SIGACTION
diff --git a/include/asm-m32r/assembler.h b/include/asm-m32r/assembler.h
index e1dff9d6baa..b7f4d8aaeb4 100644
--- a/include/asm-m32r/assembler.h
+++ b/include/asm-m32r/assembler.h
@@ -52,7 +52,7 @@
or3 \reg, \reg, #low(\x)
.endm
-#if !defined(CONFIG_CHIP_M32102)
+#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
#define STI(reg) STI_M reg
.macro STI_M reg
setpsw #0x40 -> nop
@@ -64,7 +64,7 @@
clrpsw #0x40 -> nop
; WORKAROUND: "-> nop" is a workaround for the M32700(TS1).
.endm
-#else /* CONFIG_CHIP_M32102 */
+#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
#define STI(reg) STI_M reg
.macro STI_M reg
mvfc \reg, psw
@@ -191,12 +191,12 @@
and \reg, sp
.endm
-#if !defined(CONFIG_CHIP_M32102)
+#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
.macro SWITCH_TO_KERNEL_STACK
; switch to kernel stack (spi)
clrpsw #0x80 -> nop
.endm
-#else /* CONFIG_CHIP_M32102 */
+#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
.macro SWITCH_TO_KERNEL_STACK
push r0 ; save r0 for working
mvfc r0, psw
@@ -218,7 +218,7 @@
.fillinsn
2:
.endm
-#endif /* CONFIG_CHIP_M32102 */
+#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h
index ef1fb8ea472..3122fe106f0 100644
--- a/include/asm-m32r/atomic.h
+++ b/include/asm-m32r/atomic.h
@@ -243,6 +243,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
#define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
@@ -313,4 +314,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif /* _ASM_M32R_ATOMIC_H */
diff --git a/include/asm-m32r/cache.h b/include/asm-m32r/cache.h
index 72482059698..9c2b2d9998b 100644
--- a/include/asm-m32r/cache.h
+++ b/include/asm-m32r/cache.h
@@ -7,6 +7,4 @@
#define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define L1_CACHE_SHIFT_MAX 4
-
#endif /* _ASM_M32R_CACHE_H */
diff --git a/include/asm-m32r/cacheflush.h b/include/asm-m32r/cacheflush.h
index 46fc4c32510..e57427b6e24 100644
--- a/include/asm-m32r/cacheflush.h
+++ b/include/asm-m32r/cacheflush.h
@@ -7,7 +7,7 @@
extern void _flush_cache_all(void);
extern void _flush_cache_copyback_all(void);
-#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
+#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
diff --git a/include/asm-m32r/futex.h b/include/asm-m32r/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-m32r/futex.h
+++ b/include/asm-m32r/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-m32r/ioctl.h b/include/asm-m32r/ioctl.h
index 87d8f7db6af..b279fe06dfe 100644
--- a/include/asm-m32r/ioctl.h
+++ b/include/asm-m32r/ioctl.h
@@ -1,78 +1 @@
-#ifndef _ASM_M32R_IOCTL_H
-#define _ASM_M32R_IOCTL_H
-
-/* $Id$ */
-
-/* orig : i386 2.4.18 */
-
-/*
- * linux/ioctl.h for Linux by H.H. Bergman.
- */
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _ASM_M32R_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-m32r/irq.h b/include/asm-m32r/irq.h
index 8ed77968ecb..ca943954572 100644
--- a/include/asm-m32r/irq.h
+++ b/include/asm-m32r/irq.h
@@ -65,6 +65,22 @@
#define NR_IRQS \
(OPSPUT_NUM_CPU_IRQ + OPSPUT_NUM_PLD_IRQ \
+ OPSPUT_NUM_LCD_PLD_IRQ + OPSPUT_NUM_LAN_PLD_IRQ)
+
+#elif defined(CONFIG_PLAT_M32104UT)
+/*
+ * IRQ definitions for M32104UT
+ * M32104 Chip: 64 interrupts
+ * ICU of M32104UT-on-board PLD: 32 interrupts cascaded to INT1# chip pin
+ */
+#define M32104UT_NUM_CPU_IRQ (64)
+#define M32104UT_NUM_PLD_IRQ (32)
+#define M32104UT_IRQ_BASE 0
+#define M32104UT_CPU_IRQ_BASE M32104UT_IRQ_BASE
+#define M32104UT_PLD_IRQ_BASE (M32104UT_CPU_IRQ_BASE + M32104UT_NUM_CPU_IRQ)
+
+#define NR_IRQS \
+ (M32104UT_NUM_CPU_IRQ + M32104UT_NUM_PLD_IRQ)
+
#else
#define NR_IRQS 64
#endif
diff --git a/include/asm-m32r/m32102.h b/include/asm-m32r/m32102.h
index cb98101f4f6..a1f0d1fe9eb 100644
--- a/include/asm-m32r/m32102.h
+++ b/include/asm-m32r/m32102.h
@@ -11,7 +11,11 @@
/*======================================================================*
* Special Function Register
*======================================================================*/
+#if !defined(CONFIG_CHIP_M32104)
#define M32R_SFR_OFFSET (0x00E00000) /* 0x00E00000-0x00EFFFFF 1[MB] */
+#else
+#define M32R_SFR_OFFSET (0x00700000) /* 0x00700000-0x007FFFFF 1[MB] */
+#endif
/*
* Clock and Power Management registers.
@@ -100,7 +104,7 @@
#define M32R_MFT5RLD_PORTL (0x0C+M32R_MFT5_OFFSET) /* MFT4 reload */
#define M32R_MFT5CMPRLD_PORTL (0x10+M32R_MFT5_OFFSET) /* MFT4 compare reload */
-#ifdef CONFIG_CHIP_M32700
+#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32104)
#define M32R_MFTCR_MFT0MSK (1UL<<31) /* b0 */
#define M32R_MFTCR_MFT1MSK (1UL<<30) /* b1 */
#define M32R_MFTCR_MFT2MSK (1UL<<29) /* b2 */
@@ -113,7 +117,7 @@
#define M32R_MFTCR_MFT3EN (1UL<<20) /* b11 */
#define M32R_MFTCR_MFT4EN (1UL<<19) /* b12 */
#define M32R_MFTCR_MFT5EN (1UL<<18) /* b13 */
-#else /* not CONFIG_CHIP_M32700 */
+#else /* not CONFIG_CHIP_M32700 && not CONFIG_CHIP_M32104 */
#define M32R_MFTCR_MFT0MSK (1UL<<15) /* b16 */
#define M32R_MFTCR_MFT1MSK (1UL<<14) /* b17 */
#define M32R_MFTCR_MFT2MSK (1UL<<13) /* b18 */
@@ -126,7 +130,7 @@
#define M32R_MFTCR_MFT3EN (1UL<<4) /* b27 */
#define M32R_MFTCR_MFT4EN (1UL<<3) /* b28 */
#define M32R_MFTCR_MFT5EN (1UL<<2) /* b29 */
-#endif /* not CONFIG_CHIP_M32700 */
+#endif /* not CONFIG_CHIP_M32700 && not CONFIG_CHIP_M32104 */
#define M32R_MFTMOD_CC_MASK (1UL<<15) /* b16 */
#define M32R_MFTMOD_TCCR (1UL<<13) /* b18 */
@@ -241,8 +245,24 @@
#define M32R_IRQ_MFT1 (17) /* MFT1 */
#define M32R_IRQ_MFT2 (18) /* MFT2 */
#define M32R_IRQ_MFT3 (19) /* MFT3 */
-#define M32R_IRQ_MFT4 (20) /* MFT4 */
-#define M32R_IRQ_MFT5 (21) /* MFT5 */
+#ifdef CONFIG_CHIP_M32104
+#define M32R_IRQ_MFTX0 (24) /* MFTX0 */
+#define M32R_IRQ_MFTX1 (25) /* MFTX1 */
+#define M32R_IRQ_DMA0 (32) /* DMA0 */
+#define M32R_IRQ_DMA1 (33) /* DMA1 */
+#define M32R_IRQ_DMA2 (34) /* DMA2 */
+#define M32R_IRQ_DMA3 (35) /* DMA3 */
+#define M32R_IRQ_SIO0_R (40) /* SIO0 send */
+#define M32R_IRQ_SIO0_S (41) /* SIO0 receive */
+#define M32R_IRQ_SIO1_R (42) /* SIO1 send */
+#define M32R_IRQ_SIO1_S (43) /* SIO1 receive */
+#define M32R_IRQ_SIO2_R (44) /* SIO2 send */
+#define M32R_IRQ_SIO2_S (45) /* SIO2 receive */
+#define M32R_IRQ_SIO3_R (46) /* SIO3 send */
+#define M32R_IRQ_SIO3_S (47) /* SIO3 receive */
+#define M32R_IRQ_ADC (56) /* ADC */
+#define M32R_IRQ_PC (57) /* PC */
+#else /* ! M32104 */
#define M32R_IRQ_DMA0 (32) /* DMA0 */
#define M32R_IRQ_DMA1 (33) /* DMA1 */
#define M32R_IRQ_SIO0_R (48) /* SIO0 send */
@@ -255,6 +275,7 @@
#define M32R_IRQ_SIO3_S (55) /* SIO3 receive */
#define M32R_IRQ_SIO4_R (56) /* SIO4 send */
#define M32R_IRQ_SIO4_S (57) /* SIO4 receive */
+#endif /* ! M32104 */
#ifdef CONFIG_SMP
#define M32R_IRQ_IPI0 (56)
@@ -281,15 +302,12 @@
#define M32R_FPGA_VERSION0_PORTL (0x30+M32R_FPGA_TOP)
#define M32R_FPGA_VERSION1_PORTL (0x34+M32R_FPGA_TOP)
+#endif /* CONFIG_SMP */
+
#ifndef __ASSEMBLY__
-/* For NETDEV WATCHDOG */
typedef struct {
unsigned long icucr; /* ICU Control Register */
} icu_data_t;
-
-extern icu_data_t icu_data[];
#endif
-#endif /* CONFIG_SMP */
-
#endif /* _M32102_H_ */
diff --git a/include/asm-m32r/m32104ut/m32104ut_pld.h b/include/asm-m32r/m32104ut/m32104ut_pld.h
new file mode 100644
index 00000000000..a4eac20553d
--- /dev/null
+++ b/include/asm-m32r/m32104ut/m32104ut_pld.h
@@ -0,0 +1,163 @@
+/*
+ * include/asm/m32104ut/m32104ut_pld.h
+ *
+ * Definitions for Programable Logic Device(PLD) on M32104UT board.
+ * Based on m32700ut_pld.h
+ *
+ * Copyright (c) 2002 Takeo Takahashi
+ * Copyright (c) 2005 Naoto Sugai
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _M32104UT_M32104UT_PLD_H
+#define _M32104UT_M32104UT_PLD_H
+
+#include <linux/config.h>
+
+#if defined(CONFIG_PLAT_M32104UT)
+#define PLD_PLAT_BASE 0x02c00000
+#else
+#error "no platform configuration"
+#endif
+
+#ifndef __ASSEMBLY__
+/*
+ * C functions use non-cache address.
+ */
+#define PLD_BASE (PLD_PLAT_BASE /* + NONCACHE_OFFSET */)
+#define __reg8 (volatile unsigned char *)
+#define __reg16 (volatile unsigned short *)
+#define __reg32 (volatile unsigned int *)
+#else
+#define PLD_BASE (PLD_PLAT_BASE + NONCACHE_OFFSET)
+#define __reg8
+#define __reg16
+#define __reg32
+#endif /* __ASSEMBLY__ */
+
+/* CFC */
+#define PLD_CFRSTCR __reg16(PLD_BASE + 0x0000)
+#define PLD_CFSTS __reg16(PLD_BASE + 0x0002)
+#define PLD_CFIMASK __reg16(PLD_BASE + 0x0004)
+#define PLD_CFBUFCR __reg16(PLD_BASE + 0x0006)
+
+/* MMC */
+#define PLD_MMCCR __reg16(PLD_BASE + 0x4000)
+#define PLD_MMCMOD __reg16(PLD_BASE + 0x4002)
+#define PLD_MMCSTS __reg16(PLD_BASE + 0x4006)
+#define PLD_MMCBAUR __reg16(PLD_BASE + 0x400a)
+#define PLD_MMCCMDBCUT __reg16(PLD_BASE + 0x400c)
+#define PLD_MMCCDTBCUT __reg16(PLD_BASE + 0x400e)
+#define PLD_MMCDET __reg16(PLD_BASE + 0x4010)
+#define PLD_MMCWP __reg16(PLD_BASE + 0x4012)
+#define PLD_MMCWDATA __reg16(PLD_BASE + 0x5000)
+#define PLD_MMCRDATA __reg16(PLD_BASE + 0x6000)
+#define PLD_MMCCMDDATA __reg16(PLD_BASE + 0x7000)
+#define PLD_MMCRSPDATA __reg16(PLD_BASE + 0x7006)
+
+/* ICU
+ * ICUISTS: status register
+ * ICUIREQ0: request register
+ * ICUIREQ1: request register
+ * ICUCR3: control register for CFIREQ# interrupt
+ * ICUCR4: control register for CFC Card insert interrupt
+ * ICUCR5: control register for CFC Card eject interrupt
+ * ICUCR6: control register for external interrupt
+ * ICUCR11: control register for MMC Card insert/eject interrupt
+ * ICUCR13: control register for SC error interrupt
+ * ICUCR14: control register for SC receive interrupt
+ * ICUCR15: control register for SC send interrupt
+ */
+
+#define PLD_IRQ_INT0 (M32104UT_PLD_IRQ_BASE + 0) /* None */
+#define PLD_IRQ_CFIREQ (M32104UT_PLD_IRQ_BASE + 3) /* CF IREQ */
+#define PLD_IRQ_CFC_INSERT (M32104UT_PLD_IRQ_BASE + 4) /* CF Insert */
+#define PLD_IRQ_CFC_EJECT (M32104UT_PLD_IRQ_BASE + 5) /* CF Eject */
+#define PLD_IRQ_EXINT (M32104UT_PLD_IRQ_BASE + 6) /* EXINT */
+#define PLD_IRQ_MMCCARD (M32104UT_PLD_IRQ_BASE + 11) /* MMC Insert/Eject */
+#define PLD_IRQ_SC_ERROR (M32104UT_PLD_IRQ_BASE + 13) /* SC error */
+#define PLD_IRQ_SC_RCV (M32104UT_PLD_IRQ_BASE + 14) /* SC receive */
+#define PLD_IRQ_SC_SND (M32104UT_PLD_IRQ_BASE + 15) /* SC send */
+
+#define PLD_ICUISTS __reg16(PLD_BASE + 0x8002)
+#define PLD_ICUISTS_VECB_MASK (0xf000)
+#define PLD_ICUISTS_VECB(x) ((x) & PLD_ICUISTS_VECB_MASK)
+#define PLD_ICUISTS_ISN_MASK (0x07c0)
+#define PLD_ICUISTS_ISN(x) ((x) & PLD_ICUISTS_ISN_MASK)
+#define PLD_ICUCR3 __reg16(PLD_BASE + 0x8104)
+#define PLD_ICUCR4 __reg16(PLD_BASE + 0x8106)
+#define PLD_ICUCR5 __reg16(PLD_BASE + 0x8108)
+#define PLD_ICUCR6 __reg16(PLD_BASE + 0x810a)
+#define PLD_ICUCR11 __reg16(PLD_BASE + 0x8114)
+#define PLD_ICUCR13 __reg16(PLD_BASE + 0x8118)
+#define PLD_ICUCR14 __reg16(PLD_BASE + 0x811a)
+#define PLD_ICUCR15 __reg16(PLD_BASE + 0x811c)
+#define PLD_ICUCR_IEN (0x1000)
+#define PLD_ICUCR_IREQ (0x0100)
+#define PLD_ICUCR_ISMOD00 (0x0000) /* Low edge */
+#define PLD_ICUCR_ISMOD01 (0x0010) /* Low level */
+#define PLD_ICUCR_ISMOD02 (0x0020) /* High edge */
+#define PLD_ICUCR_ISMOD03 (0x0030) /* High level */
+#define PLD_ICUCR_ILEVEL0 (0x0000)
+#define PLD_ICUCR_ILEVEL1 (0x0001)
+#define PLD_ICUCR_ILEVEL2 (0x0002)
+#define PLD_ICUCR_ILEVEL3 (0x0003)
+#define PLD_ICUCR_ILEVEL4 (0x0004)
+#define PLD_ICUCR_ILEVEL5 (0x0005)
+#define PLD_ICUCR_ILEVEL6 (0x0006)
+#define PLD_ICUCR_ILEVEL7 (0x0007)
+
+/* Power Control of MMC and CF */
+#define PLD_CPCR __reg16(PLD_BASE + 0x14000)
+#define PLD_CPCR_CDP 0x0001
+
+/* LED Control
+ *
+ * 1: DIP swich side
+ * 2: Reset switch side
+ */
+#define PLD_IOLEDCR __reg16(PLD_BASE + 0x14002)
+#define PLD_IOLED_1_ON 0x001
+#define PLD_IOLED_1_OFF 0x000
+#define PLD_IOLED_2_ON 0x002
+#define PLD_IOLED_2_OFF 0x000
+
+/* DIP Switch
+ * 0: Write-protect of Flash Memory (0:protected, 1:non-protected)
+ * 1: -
+ * 2: -
+ * 3: -
+ */
+#define PLD_IOSWSTS __reg16(PLD_BASE + 0x14004)
+#define PLD_IOSWSTS_IOSW2 0x0200
+#define PLD_IOSWSTS_IOSW1 0x0100
+#define PLD_IOSWSTS_IOWP0 0x0001
+
+/* CRC */
+#define PLD_CRC7DATA __reg16(PLD_BASE + 0x18000)
+#define PLD_CRC7INDATA __reg16(PLD_BASE + 0x18002)
+#define PLD_CRC16DATA __reg16(PLD_BASE + 0x18004)
+#define PLD_CRC16INDATA __reg16(PLD_BASE + 0x18006)
+#define PLD_CRC16ADATA __reg16(PLD_BASE + 0x18008)
+#define PLD_CRC16AINDATA __reg16(PLD_BASE + 0x1800a)
+
+/* RTC */
+#define PLD_RTCCR __reg16(PLD_BASE + 0x1c000)
+#define PLD_RTCBAUR __reg16(PLD_BASE + 0x1c002)
+#define PLD_RTCWRDATA __reg16(PLD_BASE + 0x1c004)
+#define PLD_RTCRDDATA __reg16(PLD_BASE + 0x1c006)
+#define PLD_RTCRSTODT __reg16(PLD_BASE + 0x1c008)
+
+/* SIM Card */
+#define PLD_SCCR __reg16(PLD_BASE + 0x38000)
+#define PLD_SCMOD __reg16(PLD_BASE + 0x38004)
+#define PLD_SCSTS __reg16(PLD_BASE + 0x38006)
+#define PLD_SCINTCR __reg16(PLD_BASE + 0x38008)
+#define PLD_SCBAUR __reg16(PLD_BASE + 0x3800a)
+#define PLD_SCTXB __reg16(PLD_BASE + 0x3800c)
+#define PLD_SCRXB __reg16(PLD_BASE + 0x3800e)
+
+#endif /* _M32104UT_M32104UT_PLD_H */
diff --git a/include/asm-m32r/m32r.h b/include/asm-m32r/m32r.h
index ec142be0086..b133ca61acf 100644
--- a/include/asm-m32r/m32r.h
+++ b/include/asm-m32r/m32r.h
@@ -14,7 +14,7 @@
#include <asm/m32r_mp_fpga.h>
#elif defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_XNUX2) \
|| defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_M32102) \
- || defined(CONFIG_CHIP_OPSP)
+ || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
#include <asm/m32102.h>
#endif
@@ -43,6 +43,10 @@
#include <asm/m32700ut/m32700ut_pld.h>
#endif
+#if defined(CONFIG_PLAT_M32104UT)
+#include <asm/m32104ut/m32104ut_pld.h>
+#endif /* CONFIG_PLAT_M32104 */
+
/*
* M32R Register
*/
@@ -122,7 +126,7 @@
#include <asm/page.h>
#ifdef CONFIG_MMU
-#define NONCACHE_OFFSET __PAGE_OFFSET+0x20000000
+#define NONCACHE_OFFSET (__PAGE_OFFSET + 0x20000000)
#else
#define NONCACHE_OFFSET __PAGE_OFFSET
#endif /* CONFIG_MMU */
diff --git a/include/asm-m32r/mman.h b/include/asm-m32r/mman.h
index 011f6d9ec5c..12e29747bc8 100644
--- a/include/asm-m32r/mman.h
+++ b/include/asm-m32r/mman.h
@@ -37,6 +37,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-m32r/mutex.h b/include/asm-m32r/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-m32r/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-m32r/ptrace.h b/include/asm-m32r/ptrace.h
index 55cd7ecfde4..0d058b2d844 100644
--- a/include/asm-m32r/ptrace.h
+++ b/include/asm-m32r/ptrace.h
@@ -163,6 +163,9 @@ extern void show_regs(struct pt_regs *);
extern void withdraw_debug_trap(struct pt_regs *regs);
+#define task_pt_regs(task) \
+ ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1)
+
#endif /* __KERNEL */
#endif /* _ASM_M32R_PTRACE_H */
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 5eee832b73a..06c12a037cb 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -68,13 +68,23 @@
last = __last; \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
/* Interrupt Control */
-#if !defined(CONFIG_CHIP_M32102)
+#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
#define local_irq_enable() \
__asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory")
#define local_irq_disable() \
__asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory")
-#else /* CONFIG_CHIP_M32102 */
+#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
static inline void local_irq_enable(void)
{
unsigned long tmpreg;
@@ -96,7 +106,7 @@ static inline void local_irq_disable(void)
"mvtc %0, psw \n\t"
: "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory");
}
-#endif /* CONFIG_CHIP_M32102 */
+#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
#define local_save_flags(x) \
__asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */)
@@ -105,13 +115,13 @@ static inline void local_irq_disable(void)
__asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \
: "r" (x) : "cbit", "memory")
-#if !defined(CONFIG_CHIP_M32102)
+#if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
#define local_irq_save(x) \
__asm__ __volatile__( \
"mvfc %0, psw; \n\t" \
"clrpsw #0x40 -> nop; \n\t" \
: "=r" (x) : /* no input */ : "memory")
-#else /* CONFIG_CHIP_M32102 */
+#else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
#define local_irq_save(x) \
({ \
unsigned long tmpreg; \
@@ -124,7 +134,7 @@ static inline void local_irq_disable(void)
: "=r" (x), "=&r" (tmpreg) \
: : "cbit", "memory"); \
})
-#endif /* CONFIG_CHIP_M32102 */
+#endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
#define irqs_disabled() \
({ \
diff --git a/include/asm-m32r/thread_info.h b/include/asm-m32r/thread_info.h
index 0f589363f61..22aff3222d2 100644
--- a/include/asm-m32r/thread_info.h
+++ b/include/asm-m32r/thread_info.h
@@ -110,8 +110,6 @@ static inline struct thread_info *current_thread_info(void)
#endif
#define free_thread_info(info) kfree(info)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#define TI_FLAG_FAULT_CODE_SHIFT 28
diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h
index ac399e1f7bc..39be87ca2a5 100644
--- a/include/asm-m32r/unistd.h
+++ b/include/asm-m32r/unistd.h
@@ -319,7 +319,7 @@ type name(void) \
register long __scno __asm__ ("r7") = __NR_##name; \
register long __res __asm__("r0"); \
__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR \
+ "trap #" SYSCALL_VECTOR "|| nop"\
: "=r" (__res) \
: "r" (__scno) \
: "memory"); \
@@ -332,7 +332,7 @@ type name(type1 arg1) \
register long __scno __asm__ ("r7") = __NR_##name; \
register long __res __asm__ ("r0") = (long)(arg1); \
__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR \
+ "trap #" SYSCALL_VECTOR "|| nop"\
: "=r" (__res) \
: "r" (__scno), "0" (__res) \
: "memory"); \
@@ -346,7 +346,7 @@ register long __scno __asm__ ("r7") = __NR_##name; \
register long __arg2 __asm__ ("r1") = (long)(arg2); \
register long __res __asm__ ("r0") = (long)(arg1); \
__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR \
+ "trap #" SYSCALL_VECTOR "|| nop"\
: "=r" (__res) \
: "r" (__scno), "0" (__res), "r" (__arg2) \
: "memory"); \
@@ -361,7 +361,7 @@ register long __arg3 __asm__ ("r2") = (long)(arg3); \
register long __arg2 __asm__ ("r1") = (long)(arg2); \
register long __res __asm__ ("r0") = (long)(arg1); \
__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR \
+ "trap #" SYSCALL_VECTOR "|| nop"\
: "=r" (__res) \
: "r" (__scno), "0" (__res), "r" (__arg2), \
"r" (__arg3) \
@@ -378,7 +378,7 @@ register long __arg3 __asm__ ("r2") = (long)(arg3); \
register long __arg2 __asm__ ("r1") = (long)(arg2); \
register long __res __asm__ ("r0") = (long)(arg1); \
__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR \
+ "trap #" SYSCALL_VECTOR "|| nop"\
: "=r" (__res) \
: "r" (__scno), "0" (__res), "r" (__arg2), \
"r" (__arg3), "r" (__arg4) \
@@ -397,7 +397,7 @@ register long __arg3 __asm__ ("r2") = (long)(arg3); \
register long __arg2 __asm__ ("r1") = (long)(arg2); \
register long __res __asm__ ("r0") = (long)(arg1); \
__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR \
+ "trap #" SYSCALL_VECTOR "|| nop"\
: "=r" (__res) \
: "r" (__scno), "0" (__res), "r" (__arg2), \
"r" (__arg3), "r" (__arg4), "r" (__arg5) \
diff --git a/include/asm-m68k/amigahw.h b/include/asm-m68k/amigahw.h
index 3ae5d8d55ba..a16fe4e5a28 100644
--- a/include/asm-m68k/amigahw.h
+++ b/include/asm-m68k/amigahw.h
@@ -274,7 +274,7 @@ struct CIA {
#define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase)
#define CUSTOM_PHYSADDR (0xdff000)
-#define custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR)))
+#define amiga_custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR)))
#define CIAA_PHYSADDR (0xbfe001)
#define CIAB_PHYSADDR (0xbfd000)
@@ -294,12 +294,12 @@ static inline void amifb_video_off(void)
{
if (amiga_chipset == CS_ECS || amiga_chipset == CS_AGA) {
/* program Denise/Lisa for a higher maximum play rate */
- custom.htotal = 113; /* 31 kHz */
- custom.vtotal = 223; /* 70 Hz */
- custom.beamcon0 = 0x4390; /* HARDDIS, VAR{BEAM,VSY,HSY,CSY}EN */
+ amiga_custom.htotal = 113; /* 31 kHz */
+ amiga_custom.vtotal = 223; /* 70 Hz */
+ amiga_custom.beamcon0 = 0x4390; /* HARDDIS, VAR{BEAM,VSY,HSY,CSY}EN */
/* suspend the monitor */
- custom.hsstrt = custom.hsstop = 116;
- custom.vsstrt = custom.vsstop = 226;
+ amiga_custom.hsstrt = amiga_custom.hsstop = 116;
+ amiga_custom.vsstrt = amiga_custom.vsstop = 226;
amiga_audio_min_period = 57;
}
}
diff --git a/include/asm-m68k/amigaints.h b/include/asm-m68k/amigaints.h
index 2aff4cfbf7b..aa968d014bb 100644
--- a/include/asm-m68k/amigaints.h
+++ b/include/asm-m68k/amigaints.h
@@ -109,8 +109,6 @@
extern void amiga_do_irq(int irq, struct pt_regs *fp);
extern void amiga_do_irq_list(int irq, struct pt_regs *fp);
-extern unsigned short amiga_intena_vals[];
-
/* CIA interrupt control register bits */
#define CIA_ICR_TA 0x01
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index e3c962eeabf..a4a84d5c65d 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -140,6 +140,7 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
@@ -157,4 +158,5 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/include/asm-m68k/cache.h b/include/asm-m68k/cache.h
index 6161fd3d860..fed3fd30de7 100644
--- a/include/asm-m68k/cache.h
+++ b/include/asm-m68k/cache.h
@@ -8,6 +8,4 @@
#define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
-#define L1_CACHE_SHIFT_MAX 4 /* largest L1 which this arch supports */
-
#endif
diff --git a/include/asm-m68k/checksum.h b/include/asm-m68k/checksum.h
index 78860c20db0..17280ef719f 100644
--- a/include/asm-m68k/checksum.h
+++ b/include/asm-m68k/checksum.h
@@ -25,7 +25,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* better 64-bit) boundary
*/
-extern unsigned int csum_partial_copy_from_user(const unsigned char *src,
+extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
unsigned char *dst,
int len, int sum,
int *csum_err);
diff --git a/include/asm-m68k/dsp56k.h b/include/asm-m68k/dsp56k.h
index ab3dd33e23a..2d8c0c9f794 100644
--- a/include/asm-m68k/dsp56k.h
+++ b/include/asm-m68k/dsp56k.h
@@ -13,7 +13,7 @@
/* Used for uploading DSP binary code */
struct dsp56k_upload {
int len;
- char *bin;
+ char __user *bin;
};
/* For the DSP host flags */
diff --git a/include/asm-m68k/floppy.h b/include/asm-m68k/floppy.h
index c6e708dd9f6..63a05ed95c1 100644
--- a/include/asm-m68k/floppy.h
+++ b/include/asm-m68k/floppy.h
@@ -46,7 +46,7 @@ asmlinkage irqreturn_t floppy_hardint(int irq, void *dev_id,
static int virtual_dma_count=0;
static int virtual_dma_residue=0;
-static char *virtual_dma_addr=0;
+static char *virtual_dma_addr=NULL;
static int virtual_dma_mode=0;
static int doing_pdma=0;
diff --git a/include/asm-m68k/futex.h b/include/asm-m68k/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-m68k/futex.h
+++ b/include/asm-m68k/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-m68k/hardirq.h b/include/asm-m68k/hardirq.h
index 728318bf7f0..5e1c5826c83 100644
--- a/include/asm-m68k/hardirq.h
+++ b/include/asm-m68k/hardirq.h
@@ -14,13 +14,4 @@ typedef struct {
#define HARDIRQ_BITS 8
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
#endif
diff --git a/include/asm-m68k/io.h b/include/asm-m68k/io.h
index 6bb8b0d8f99..dcfaa352d34 100644
--- a/include/asm-m68k/io.h
+++ b/include/asm-m68k/io.h
@@ -24,6 +24,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
+#include <linux/compiler.h>
#include <asm/raw_io.h>
#include <asm/virtconvert.h>
@@ -120,68 +121,68 @@ extern int isa_sex;
* be compiled in so the case statement will be optimised away
*/
-static inline u8 *isa_itb(unsigned long addr)
+static inline u8 __iomem *isa_itb(unsigned long addr)
{
switch(ISA_TYPE)
{
#ifdef CONFIG_Q40
- case Q40_ISA: return (u8 *)Q40_ISA_IO_B(addr);
+ case Q40_ISA: return (u8 __iomem *)Q40_ISA_IO_B(addr);
#endif
#ifdef CONFIG_GG2
- case GG2_ISA: return (u8 *)GG2_ISA_IO_B(addr);
+ case GG2_ISA: return (u8 __iomem *)GG2_ISA_IO_B(addr);
#endif
#ifdef CONFIG_AMIGA_PCMCIA
- case AG_ISA: return (u8 *)AG_ISA_IO_B(addr);
+ case AG_ISA: return (u8 __iomem *)AG_ISA_IO_B(addr);
#endif
- default: return 0; /* avoid warnings, just in case */
+ default: return NULL; /* avoid warnings, just in case */
}
}
-static inline u16 *isa_itw(unsigned long addr)
+static inline u16 __iomem *isa_itw(unsigned long addr)
{
switch(ISA_TYPE)
{
#ifdef CONFIG_Q40
- case Q40_ISA: return (u16 *)Q40_ISA_IO_W(addr);
+ case Q40_ISA: return (u16 __iomem *)Q40_ISA_IO_W(addr);
#endif
#ifdef CONFIG_GG2
- case GG2_ISA: return (u16 *)GG2_ISA_IO_W(addr);
+ case GG2_ISA: return (u16 __iomem *)GG2_ISA_IO_W(addr);
#endif
#ifdef CONFIG_AMIGA_PCMCIA
- case AG_ISA: return (u16 *)AG_ISA_IO_W(addr);
+ case AG_ISA: return (u16 __iomem *)AG_ISA_IO_W(addr);
#endif
- default: return 0; /* avoid warnings, just in case */
+ default: return NULL; /* avoid warnings, just in case */
}
}
-static inline u8 *isa_mtb(unsigned long addr)
+static inline u8 __iomem *isa_mtb(unsigned long addr)
{
switch(ISA_TYPE)
{
#ifdef CONFIG_Q40
- case Q40_ISA: return (u8 *)Q40_ISA_MEM_B(addr);
+ case Q40_ISA: return (u8 __iomem *)Q40_ISA_MEM_B(addr);
#endif
#ifdef CONFIG_GG2
- case GG2_ISA: return (u8 *)GG2_ISA_MEM_B(addr);
+ case GG2_ISA: return (u8 __iomem *)GG2_ISA_MEM_B(addr);
#endif
#ifdef CONFIG_AMIGA_PCMCIA
- case AG_ISA: return (u8 *)addr;
+ case AG_ISA: return (u8 __iomem *)addr;
#endif
- default: return 0; /* avoid warnings, just in case */
+ default: return NULL; /* avoid warnings, just in case */
}
}
-static inline u16 *isa_mtw(unsigned long addr)
+static inline u16 __iomem *isa_mtw(unsigned long addr)
{
switch(ISA_TYPE)
{
#ifdef CONFIG_Q40
- case Q40_ISA: return (u16 *)Q40_ISA_MEM_W(addr);
+ case Q40_ISA: return (u16 __iomem *)Q40_ISA_MEM_W(addr);
#endif
#ifdef CONFIG_GG2
- case GG2_ISA: return (u16 *)GG2_ISA_MEM_W(addr);
+ case GG2_ISA: return (u16 __iomem *)GG2_ISA_MEM_W(addr);
#endif
#ifdef CONFIG_AMIGA_PCMCIA
- case AG_ISA: return (u16 *)addr;
+ case AG_ISA: return (u16 __iomem *)addr;
#endif
- default: return 0; /* avoid warnings, just in case */
+ default: return NULL; /* avoid warnings, just in case */
}
}
@@ -326,20 +327,20 @@ static inline void isa_delay(void)
#define mmiowb()
-static inline void *ioremap(unsigned long physaddr, unsigned long size)
+static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
-static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
+static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
-static inline void *ioremap_writethrough(unsigned long physaddr,
+static inline void __iomem *ioremap_writethrough(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
}
-static inline void *ioremap_fullcache(unsigned long physaddr,
+static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
diff --git a/include/asm-m68k/ioctl.h b/include/asm-m68k/ioctl.h
index fd68914ab29..b279fe06dfe 100644
--- a/include/asm-m68k/ioctl.h
+++ b/include/asm-m68k/ioctl.h
@@ -1,80 +1 @@
-/* $Id: ioctl.h,v 1.3 1997/04/16 15:10:07 jes Exp $
- *
- * linux/ioctl.h for Linux by H.H. Bergman.
- */
-
-#ifndef _M68K_IOCTL_H
-#define _M68K_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * I don't really have any idea about what this should look like, so
- * for the time being, this is heavily based on the PC definitions.
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _M68K_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h
index 1f569905cb7..325c86f8512 100644
--- a/include/asm-m68k/irq.h
+++ b/include/asm-m68k/irq.h
@@ -23,6 +23,15 @@
#endif
/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
+/*
* Interrupt source definitions
* General interrupt sources are the level 1-7.
* Adding an interrupt service routine for one of these sources
@@ -70,8 +79,6 @@ static __inline__ int irq_canonicalize(int irq)
extern void (*enable_irq)(unsigned int);
extern void (*disable_irq)(unsigned int);
-
-#define disable_irq_nosync disable_irq
#define enable_irq_nosync enable_irq
struct pt_regs;
diff --git a/include/asm-m68k/machdep.h b/include/asm-m68k/machdep.h
index a0dd5c47002..7d3fee34236 100644
--- a/include/asm-m68k/machdep.h
+++ b/include/asm-m68k/machdep.h
@@ -34,7 +34,6 @@ extern void (*mach_power_off)( void );
extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
extern void (*mach_hd_setup)(char *, int *);
extern long mach_max_dma_address;
-extern void (*mach_floppy_setup)(char *, int *);
extern void (*mach_heartbeat) (int);
extern void (*mach_l2_flush) (int);
extern void (*mach_beep) (unsigned int, unsigned int);
diff --git a/include/asm-m68k/mman.h b/include/asm-m68k/mman.h
index f831c4eeae6..ea262ab88b3 100644
--- a/include/asm-m68k/mman.h
+++ b/include/asm-m68k/mman.h
@@ -35,6 +35,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-m68k/mutex.h b/include/asm-m68k/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-m68k/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-m68k/raw_io.h b/include/asm-m68k/raw_io.h
index 041f0a87b25..5439bcaa57c 100644
--- a/include/asm-m68k/raw_io.h
+++ b/include/asm-m68k/raw_io.h
@@ -19,9 +19,9 @@
#define IOMAP_NOCACHE_NONSER 2
#define IOMAP_WRITETHROUGH 3
-extern void iounmap(void *addr);
+extern void iounmap(void __iomem *addr);
-extern void *__ioremap(unsigned long physaddr, unsigned long size,
+extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
int cacheflag);
extern void __iounmap(void *addr, unsigned long size);
@@ -30,21 +30,21 @@ extern void __iounmap(void *addr, unsigned long size);
* two accesses to memory, which may be undesirable for some devices.
*/
#define in_8(addr) \
- ({ u8 __v = (*(volatile u8 *) (addr)); __v; })
+ ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
#define in_be16(addr) \
- ({ u16 __v = (*(volatile u16 *) (addr)); __v; })
+ ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
#define in_be32(addr) \
- ({ u32 __v = (*(volatile u32 *) (addr)); __v; })
+ ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
#define in_le16(addr) \
- ({ u16 __v = le16_to_cpu(*(volatile u16 *) (addr)); __v; })
+ ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
#define in_le32(addr) \
- ({ u32 __v = le32_to_cpu(*(volatile u32 *) (addr)); __v; })
+ ({ u32 __v = le32_to_cpu(*(__force volatile u32 *) (addr)); __v; })
-#define out_8(addr,b) (void)((*(volatile u8 *) (addr)) = (b))
-#define out_be16(addr,w) (void)((*(volatile u16 *) (addr)) = (w))
-#define out_be32(addr,l) (void)((*(volatile u32 *) (addr)) = (l))
-#define out_le16(addr,w) (void)((*(volatile u16 *) (addr)) = cpu_to_le16(w))
-#define out_le32(addr,l) (void)((*(volatile u32 *) (addr)) = cpu_to_le32(l))
+#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
+#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
+#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
+#define out_le16(addr,w) (void)((*(__force volatile u16 *) (addr)) = cpu_to_le16(w))
+#define out_le32(addr,l) (void)((*(__force volatile u32 *) (addr)) = cpu_to_le32(l))
#define raw_inb in_8
#define raw_inw in_be16
@@ -54,7 +54,7 @@ extern void __iounmap(void *addr, unsigned long size);
#define raw_outw(val,port) out_be16((port),(val))
#define raw_outl(val,port) out_be32((port),(val))
-static inline void raw_insb(volatile u8 *port, u8 *buf, unsigned int len)
+static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
{
unsigned int i;
@@ -62,7 +62,7 @@ static inline void raw_insb(volatile u8 *port, u8 *buf, unsigned int len)
*buf++ = in_8(port);
}
-static inline void raw_outsb(volatile u8 *port, const u8 *buf,
+static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
unsigned int len)
{
unsigned int i;
@@ -71,7 +71,7 @@ static inline void raw_outsb(volatile u8 *port, const u8 *buf,
out_8(port, *buf++);
}
-static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
+static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
{
unsigned int tmp;
@@ -110,7 +110,7 @@ static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
}
}
-static inline void raw_outsw(volatile u16 *port, const u16 *buf,
+static inline void raw_outsw(volatile u16 __iomem *port, const u16 *buf,
unsigned int nr)
{
unsigned int tmp;
@@ -150,7 +150,7 @@ static inline void raw_outsw(volatile u16 *port, const u16 *buf,
}
}
-static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
+static inline void raw_insl(volatile u32 __iomem *port, u32 *buf, unsigned int nr)
{
unsigned int tmp;
@@ -189,7 +189,7 @@ static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
}
}
-static inline void raw_outsl(volatile u32 *port, const u32 *buf,
+static inline void raw_outsl(volatile u32 __iomem *port, const u32 *buf,
unsigned int nr)
{
unsigned int tmp;
@@ -230,7 +230,7 @@ static inline void raw_outsl(volatile u32 *port, const u32 *buf,
}
-static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
+static inline void raw_insw_swapw(volatile u16 __iomem *port, u16 *buf,
unsigned int nr)
{
if ((nr) % 8)
@@ -283,7 +283,7 @@ static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
: "d0", "a0", "a1", "d6");
}
-static inline void raw_outsw_swapw(volatile u16 *port, const u16 *buf,
+static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
unsigned int nr)
{
if ((nr) % 8)
diff --git a/include/asm-m68k/signal.h b/include/asm-m68k/signal.h
index a0cdf908237..b7b7ea20caa 100644
--- a/include/asm-m68k/signal.h
+++ b/include/asm-m68k/signal.h
@@ -144,7 +144,7 @@ struct sigaction {
#endif /* __KERNEL__ */
typedef struct sigaltstack {
- void *ss_sp;
+ void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h
index e974bb07204..5156a28a18d 100644
--- a/include/asm-m68k/sun3_pgtable.h
+++ b/include/asm-m68k/sun3_pgtable.h
@@ -211,7 +211,7 @@ static inline unsigned long pte_to_pgoff(pte_t pte)
return pte.pte & SUN3_PAGE_PGNUM_MASK;
}
-static inline pte_t pgoff_to_pte(inline unsigned off)
+static inline pte_t pgoff_to_pte(unsigned off)
{
pte_t pte = { off + SUN3_PAGE_ACCESSED };
return pte;
diff --git a/include/asm-m68k/sun3ints.h b/include/asm-m68k/sun3ints.h
index fd838eb1421..bd038fccb64 100644
--- a/include/asm-m68k/sun3ints.h
+++ b/include/asm-m68k/sun3ints.h
@@ -31,7 +31,6 @@ int sun3_request_irq(unsigned int irq,
);
extern void sun3_init_IRQ (void);
extern irqreturn_t (*sun3_default_handler[]) (int, void *, struct pt_regs *);
-extern irqreturn_t (*sun3_inthandler[]) (int, void *, struct pt_regs *);
extern void sun3_free_irq (unsigned int irq, void *dev_id);
extern void sun3_enable_interrupts (void);
extern void sun3_disable_interrupts (void);
diff --git a/include/asm-m68k/sun3xflop.h b/include/asm-m68k/sun3xflop.h
index fda1eccf10a..98a9f79dab2 100644
--- a/include/asm-m68k/sun3xflop.h
+++ b/include/asm-m68k/sun3xflop.h
@@ -208,7 +208,7 @@ static int sun3xflop_request_irq(void)
if(!once) {
once = 1;
- error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, SA_INTERRUPT, "floppy", 0);
+ error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, SA_INTERRUPT, "floppy", NULL);
return ((error == 0) ? 0 : -1);
} else return 0;
}
@@ -238,7 +238,7 @@ static int sun3xflop_init(void)
*sun3x_fdc.fcr_r = 0;
/* Success... */
- floppy_set_flags(0, 1, FD_BROKEN_DCL); // I don't know how to detect this.
+ floppy_set_flags(NULL, 1, FD_BROKEN_DCL); // I don't know how to detect this.
allowed_drive_mask = 0x01;
return (int) SUN3X_FDC;
}
diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h
index 9532ca3c45c..c4d622a57df 100644
--- a/include/asm-m68k/thread_info.h
+++ b/include/asm-m68k/thread_info.h
@@ -37,6 +37,7 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
#define task_thread_info(tsk) (&(tsk)->thread.info)
+#define task_stack_page(tsk) ((void *)(tsk)->thread_info)
#define current_thread_info() task_thread_info(current)
#define __HAVE_THREAD_FUNCTIONS
diff --git a/include/asm-m68k/uaccess.h b/include/asm-m68k/uaccess.h
index f5cedf19cf6..2ffd87b0a76 100644
--- a/include/asm-m68k/uaccess.h
+++ b/include/asm-m68k/uaccess.h
@@ -42,6 +42,7 @@ struct exception_table_entry
({ \
int __pu_err; \
typeof(*(ptr)) __pu_val = (x); \
+ __chk_user_ptr(ptr); \
switch (sizeof (*(ptr))) { \
case 1: \
__put_user_asm(__pu_err, __pu_val, ptr, b); \
@@ -91,6 +92,7 @@ __asm__ __volatile__ \
({ \
int __gu_err; \
typeof(*(ptr)) __gu_val; \
+ __chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm(__gu_err, __gu_val, ptr, b, "=d"); \
@@ -105,7 +107,7 @@ __asm__ __volatile__ \
__gu_err = __constant_copy_from_user(&__gu_val, ptr, 8); \
break; \
default: \
- __gu_val = 0; \
+ __gu_val = (typeof(*(ptr)))0; \
__gu_err = __get_user_bad(); \
break; \
} \
@@ -134,7 +136,7 @@ __asm__ __volatile__ \
: "m"(*(ptr)), "i" (-EFAULT), "0"(0))
static inline unsigned long
-__generic_copy_from_user(void *to, const void *from, unsigned long n)
+__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long tmp;
__asm__ __volatile__
@@ -189,7 +191,7 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
}
static inline unsigned long
-__generic_copy_to_user(void *to, const void *from, unsigned long n)
+__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
{
unsigned long tmp;
__asm__ __volatile__
@@ -264,7 +266,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
: "d0", "memory")
static inline unsigned long
-__constant_copy_from_user(void *to, const void *from, unsigned long n)
+__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
{
switch (n) {
case 0:
@@ -520,7 +522,7 @@ __constant_copy_from_user(void *to, const void *from, unsigned long n)
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long
-__constant_copy_to_user(void *to, const void *from, unsigned long n)
+__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
{
switch (n) {
case 0:
@@ -766,7 +768,7 @@ __constant_copy_to_user(void *to, const void *from, unsigned long n)
*/
static inline long
-strncpy_from_user(char *dst, const char *src, long count)
+strncpy_from_user(char *dst, const char __user *src, long count)
{
long res;
if (count == 0) return count;
@@ -799,11 +801,11 @@ strncpy_from_user(char *dst, const char *src, long count)
*
* Return 0 on exception, a value greater than N if too long
*/
-static inline long strnlen_user(const char *src, long n)
+static inline long strnlen_user(const char __user *src, long n)
{
long res;
- res = -(long)src;
+ res = -(unsigned long)src;
__asm__ __volatile__
("1:\n"
" tstl %2\n"
@@ -842,7 +844,7 @@ static inline long strnlen_user(const char *src, long n)
*/
static inline unsigned long
-clear_user(void *to, unsigned long n)
+clear_user(void __user *to, unsigned long n)
{
__asm__ __volatile__
(" tstl %1\n"
diff --git a/include/asm-m68k/zorro.h b/include/asm-m68k/zorro.h
index cf816588bed..5ce97c22b58 100644
--- a/include/asm-m68k/zorro.h
+++ b/include/asm-m68k/zorro.h
@@ -15,24 +15,24 @@
#define z_memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
#define z_memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
-static inline void *z_remap_nocache_ser(unsigned long physaddr,
+static inline void __iomem *z_remap_nocache_ser(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
}
-static inline void *z_remap_nocache_nonser(unsigned long physaddr,
+static inline void __iomem *z_remap_nocache_nonser(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_NOCACHE_NONSER);
}
-static inline void *z_remap_writethrough(unsigned long physaddr,
+static inline void __iomem *z_remap_writethrough(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
}
-static inline void *z_remap_fullcache(unsigned long physaddr,
+static inline void __iomem *z_remap_fullcache(unsigned long physaddr,
unsigned long size)
{
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index 3c1cc153c41..6c4e4b63e45 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -129,6 +129,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
@@ -143,4 +144,5 @@ static inline int atomic_sub_return(int i, atomic_t * v)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
+#include <asm-generic/atomic.h>
#endif /* __ARCH_M68KNOMMU_ATOMIC __ */
diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h
index 4058dd086a0..25d8a3cfef9 100644
--- a/include/asm-m68knommu/bitops.h
+++ b/include/asm-m68knommu/bitops.h
@@ -290,7 +290,7 @@ static __inline__ int find_next_zero_bit (const void * addr, int size, int offse
tmp = *p;
found_first:
- tmp |= ~0UL >> size;
+ tmp |= ~0UL << size;
found_middle:
return result + ffz(tmp);
}
diff --git a/include/asm-m68knommu/futex.h b/include/asm-m68knommu/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-m68knommu/futex.h
+++ b/include/asm-m68knommu/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-m68knommu/ioctl.h b/include/asm-m68knommu/ioctl.h
index cff72f33350..b279fe06dfe 100644
--- a/include/asm-m68knommu/ioctl.h
+++ b/include/asm-m68knommu/ioctl.h
@@ -1 +1 @@
-#include <asm-m68k/ioctl.h>
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h
index a08fa9b958d..20c48ec858a 100644
--- a/include/asm-m68knommu/irq.h
+++ b/include/asm-m68knommu/irq.h
@@ -84,10 +84,8 @@ extern void (*mach_disable_irq)(unsigned int);
/*
* Some drivers want these entry points
*/
-#define enable_irq(x) (mach_enable_irq ? (*mach_enable_irq)(x) : 0)
-#define disable_irq(x) (mach_disable_irq ? (*mach_disable_irq)(x) : 0)
-
-#define enable_irq_nosync(x) enable_irq(x)
+#define enable_irq(x) 0
+#define disable_irq(x) do { } while (0)
#define disable_irq_nosync(x) disable_irq(x)
struct irqaction;
diff --git a/include/asm-m68knommu/machdep.h b/include/asm-m68knommu/machdep.h
index 5a9f9c297f7..27c90afd333 100644
--- a/include/asm-m68knommu/machdep.h
+++ b/include/asm-m68knommu/machdep.h
@@ -38,7 +38,6 @@ extern void (*mach_power_off)( void );
extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
extern void (*mach_hd_setup)(char *, int *);
extern long mach_max_dma_address;
-extern void (*mach_floppy_setup)(char *, int *);
extern void (*mach_floppy_eject)(void);
extern void (*mach_heartbeat) (int);
extern void (*mach_l2_flush) (int);
diff --git a/include/asm-m68knommu/mutex.h b/include/asm-m68knommu/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-m68knommu/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-m68knommu/sigcontext.h b/include/asm-m68knommu/sigcontext.h
index 84bf36dc7a8..36c293fc133 100644
--- a/include/asm-m68knommu/sigcontext.h
+++ b/include/asm-m68knommu/sigcontext.h
@@ -8,6 +8,7 @@ struct sigcontext {
unsigned long sc_d1;
unsigned long sc_a0;
unsigned long sc_a1;
+ unsigned long sc_a5;
unsigned short sc_sr;
unsigned long sc_pc;
unsigned short sc_formatvec;
diff --git a/include/asm-m68knommu/thread_info.h b/include/asm-m68knommu/thread_info.h
index 7b9a3fa3af5..b8f009edf2b 100644
--- a/include/asm-m68knommu/thread_info.h
+++ b/include/asm-m68knommu/thread_info.h
@@ -75,8 +75,6 @@ static inline struct thread_info *current_thread_info(void)
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_SIZE_ORDER)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x4000000
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 55c37c106ef..654b97d3e13 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -24,10 +24,9 @@
#define _ASM_ATOMIC_H
#include <asm/cpu-features.h>
+#include <asm/interrupt.h>
#include <asm/war.h>
-extern spinlock_t atomic_lock;
-
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
@@ -85,9 +84,9 @@ static __inline__ void atomic_add(int i, atomic_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
v->counter += i;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
}
@@ -127,9 +126,9 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
v->counter -= i;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
}
@@ -173,11 +172,11 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result += i;
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;
@@ -220,11 +219,11 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result -= i;
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;
@@ -277,18 +276,19 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result -= i;
if (result >= 0)
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
@@ -432,9 +432,9 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
v->counter += i;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
}
@@ -474,9 +474,9 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
v->counter -= i;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
}
@@ -520,11 +520,11 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result += i;
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;
@@ -567,11 +567,11 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result -= i;
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;
@@ -624,12 +624,12 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
} else {
unsigned long flags;
- spin_lock_irqsave(&atomic_lock, flags);
+ local_irq_save(flags);
result = v->counter;
result -= i;
if (result >= 0)
v->counter = result;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ local_irq_restore(flags);
}
return result;
@@ -713,4 +713,5 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
+#include <asm-generic/atomic.h>
#endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-mips/cache.h b/include/asm-mips/cache.h
index 1a5d1a669db..55e19f2ff0e 100644
--- a/include/asm-mips/cache.h
+++ b/include/asm-mips/cache.h
@@ -15,7 +15,6 @@
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define L1_CACHE_SHIFT_MAX 6
#define SMP_CACHE_SHIFT L1_CACHE_SHIFT
#define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/include/asm-mips/cpu-features.h b/include/asm-mips/cpu-features.h
index 03627cfb3e4..78c9cc2735d 100644
--- a/include/asm-mips/cpu-features.h
+++ b/include/asm-mips/cpu-features.h
@@ -116,6 +116,27 @@
#endif
#endif
+# ifndef cpu_has_mips32r1
+# define cpu_has_mips32r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R1)
+# endif
+# ifndef cpu_has_mips32r2
+# define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
+# endif
+# ifndef cpu_has_mips64r1
+# define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
+# endif
+# ifndef cpu_has_mips64r2
+# define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
+# endif
+
+/*
+ * Shortcuts ...
+ */
+#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
+#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
+#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
+#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+
#ifndef cpu_has_dsp
#define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP)
#endif
diff --git a/include/asm-mips/cpu.h b/include/asm-mips/cpu.h
index 48eac296060..934e063e79f 100644
--- a/include/asm-mips/cpu.h
+++ b/include/asm-mips/cpu.h
@@ -204,16 +204,18 @@
*/
#define MIPS_CPU_ISA_I 0x00000001
#define MIPS_CPU_ISA_II 0x00000002
-#define MIPS_CPU_ISA_III 0x00008003
-#define MIPS_CPU_ISA_IV 0x00008004
-#define MIPS_CPU_ISA_V 0x00008005
-#define MIPS_CPU_ISA_M32 0x00000020
-#define MIPS_CPU_ISA_M64 0x00008040
+#define MIPS_CPU_ISA_III 0x00000003
+#define MIPS_CPU_ISA_IV 0x00000004
+#define MIPS_CPU_ISA_V 0x00000005
+#define MIPS_CPU_ISA_M32R1 0x00000020
+#define MIPS_CPU_ISA_M32R2 0x00000040
+#define MIPS_CPU_ISA_M64R1 0x00000080
+#define MIPS_CPU_ISA_M64R2 0x00000100
-/*
- * Bit 15 encodes if an ISA level supports 64-bit operations.
- */
-#define MIPS_CPU_ISA_64BIT 0x00008000
+#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \
+ MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 )
+#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
+ MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
/*
* CPU Option encodings
diff --git a/include/asm-mips/delay.h b/include/asm-mips/delay.h
index 48d00cccdaf..64dd45150f6 100644
--- a/include/asm-mips/delay.h
+++ b/include/asm-mips/delay.h
@@ -52,13 +52,11 @@ static inline void __udelay(unsigned long usecs, unsigned long lpj)
unsigned long lo;
/*
- * The common rates of 1000 and 128 are rounded wrongly by the
- * catchall case for 64-bit. Excessive precission? Probably ...
+ * The rates of 128 is rounded wrongly by the catchall case
+ * for 64-bit. Excessive precission? Probably ...
*/
#if defined(CONFIG_64BIT) && (HZ == 128)
usecs *= 0x0008637bd05af6c7UL; /* 2**64 / (1000000 / HZ) */
-#elif defined(CONFIG_64BIT) && (HZ == 1000)
- usecs *= 0x004189374BC6A7f0UL; /* 2**64 / (1000000 / HZ) */
#elif defined(CONFIG_64BIT)
usecs *= (0x8000000000000000UL / (500000 / HZ));
#else /* 32-bit junk follows here */
diff --git a/include/asm-mips/dsp.h b/include/asm-mips/dsp.h
index 50f556bb497..e9bfc0813c7 100644
--- a/include/asm-mips/dsp.h
+++ b/include/asm-mips/dsp.h
@@ -16,7 +16,7 @@
#include <asm/mipsregs.h>
#define DSP_DEFAULT 0x00000000
-#define DSP_MASK 0x1f
+#define DSP_MASK 0x3ff
#define __enable_dsp_hazard() \
do { \
@@ -48,6 +48,7 @@ do { \
tsk->thread.dsp.dspr[3] = mflo2(); \
tsk->thread.dsp.dspr[4] = mfhi3(); \
tsk->thread.dsp.dspr[5] = mflo3(); \
+ tsk->thread.dsp.dspcontrol = rddsp(DSP_MASK); \
} while (0)
#define save_dsp(tsk) \
@@ -64,6 +65,7 @@ do { \
mtlo2(tsk->thread.dsp.dspr[3]); \
mthi3(tsk->thread.dsp.dspr[4]); \
mtlo3(tsk->thread.dsp.dspr[5]); \
+ wrdsp(tsk->thread.dsp.dspcontrol, DSP_MASK); \
} while (0)
#define restore_dsp(tsk) \
diff --git a/include/asm-mips/elf.h b/include/asm-mips/elf.h
index d2c9a25f845..851f013adad 100644
--- a/include/asm-mips/elf.h
+++ b/include/asm-mips/elf.h
@@ -277,12 +277,12 @@ do { \
struct task_struct;
-extern void dump_regs(elf_greg_t *, struct pt_regs *regs);
+extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs);
extern int dump_task_regs (struct task_struct *, elf_gregset_t *);
extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_REGS(elf_regs, regs) \
- dump_regs((elf_greg_t *)&(elf_regs), regs);
+ elf_dump_regs((elf_greg_t *)&(elf_regs), regs);
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \
dump_task_fpu(tsk, elf_fpregs)
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h
index 7517189e469..2fc90632f88 100644
--- a/include/asm-mips/hazards.h
+++ b/include/asm-mips/hazards.h
@@ -233,15 +233,25 @@ __asm__(
#endif
#ifdef CONFIG_CPU_MIPSR2
+/*
+ * gcc has a tradition of misscompiling the previous construct using the
+ * address of a label as argument to inline assembler. Gas otoh has the
+ * annoying difference between la and dla which are only usable for 32-bit
+ * rsp. 64-bit code, so can't be used without conditional compilation.
+ * The alterantive is switching the assembler to 64-bit code which happens
+ * to work right even for 32-bit code ...
+ */
#define instruction_hazard() \
do { \
-__label__ __next; \
+ unsigned long tmp; \
+ \
__asm__ __volatile__( \
+ " .set mips64r2 \n" \
+ " dla %0, 1f \n" \
" jr.hb %0 \n" \
- : \
- : "r" (&&__next)); \
-__next: \
- ; \
+ " .set mips0 \n" \
+ "1: \n" \
+ : "=r" (tmp)); \
} while (0)
#else
diff --git a/include/asm-mips/interrupt.h b/include/asm-mips/interrupt.h
index a5735761f5e..abdf54ee64c 100644
--- a/include/asm-mips/interrupt.h
+++ b/include/asm-mips/interrupt.h
@@ -93,6 +93,7 @@ __asm__ (
" .set noat \n"
#ifdef CONFIG_CPU_MIPSR2
" di \\result \n"
+ " andi \\result, 1 \n"
#else
" mfc0 \\result, $12 \n"
" ori $1, \\result, 1 \n"
diff --git a/include/asm-mips/mach-au1x00/au1000.h b/include/asm-mips/mach-au1x00/au1000.h
index 8327ec341c1..8e1d7ed7d8e 100644
--- a/include/asm-mips/mach-au1x00/au1000.h
+++ b/include/asm-mips/mach-au1x00/au1000.h
@@ -838,6 +838,7 @@ extern au1xxx_irq_map_t au1xxx_irq_map[];
#define UART3_ADDR 0xB1400000
#define USB_OHCI_BASE 0x14020000 // phys addr for ioremap
+#define USB_OHCI_LEN 0x00060000
#define USB_HOST_CONFIG 0xB4027ffc
#define AU1550_ETH0_BASE 0xB0500000
@@ -1017,10 +1018,12 @@ extern au1xxx_irq_map_t au1xxx_irq_map[];
#define I2S_CONTROL_D (1<<1)
#define I2S_CONTROL_CE (1<<0)
-#ifndef CONFIG_SOC_AU1200
-
/* USB Host Controller */
+#ifndef USB_OHCI_LEN
#define USB_OHCI_LEN 0x00100000
+#endif
+
+#ifndef CONFIG_SOC_AU1200
/* USB Device Controller */
#define USBD_EP0RD 0xB0200000
diff --git a/include/asm-mips/mach-ip22/cpu-feature-overrides.h b/include/asm-mips/mach-ip22/cpu-feature-overrides.h
index ab971466817..2a37bedb405 100644
--- a/include/asm-mips/mach-ip22/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-ip22/cpu-feature-overrides.h
@@ -34,4 +34,9 @@
#define cpu_has_nofpuex 0
#define cpu_has_64bits 1
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
#endif /* __ASM_MACH_IP22_CPU_FEATURE_OVERRIDES_H */
diff --git a/include/asm-mips/mach-ip27/cpu-feature-overrides.h b/include/asm-mips/mach-ip27/cpu-feature-overrides.h
index 4c8a90051fd..2d2f5b91e47 100644
--- a/include/asm-mips/mach-ip27/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-ip27/cpu-feature-overrides.h
@@ -37,4 +37,9 @@
#define cpu_icache_line_size() 64
#define cpu_scache_line_size() 128
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
#endif /* __ASM_MACH_IP27_CPU_FEATURE_OVERRIDES_H */
diff --git a/include/asm-mips/mach-ip27/topology.h b/include/asm-mips/mach-ip27/topology.h
index 82141c711c3..59d26b52ba3 100644
--- a/include/asm-mips/mach-ip27/topology.h
+++ b/include/asm-mips/mach-ip27/topology.h
@@ -27,7 +27,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
.max_interval = 32, \
.busy_factor = 32, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000), \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.flags = SD_LOAD_BALANCE \
diff --git a/include/asm-mips/mach-ip32/cpu-feature-overrides.h b/include/asm-mips/mach-ip32/cpu-feature-overrides.h
index ab37fc1842b..b80c30725cf 100644
--- a/include/asm-mips/mach-ip32/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-ip32/cpu-feature-overrides.h
@@ -39,4 +39,9 @@
#define cpu_has_ic_fills_f_dc 0
#define cpu_has_dsp 0
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
#endif /* __ASM_MACH_IP32_CPU_FEATURE_OVERRIDES_H */
diff --git a/include/asm-mips/mach-ja/cpu-feature-overrides.h b/include/asm-mips/mach-ja/cpu-feature-overrides.h
index a0fde405d4c..90ff087083b 100644
--- a/include/asm-mips/mach-ja/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-ja/cpu-feature-overrides.h
@@ -37,4 +37,9 @@
#define cpu_icache_line_size() 32
#define cpu_scache_line_size() 32
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
#endif /* __ASM_MACH_JA_CPU_FEATURE_OVERRIDES_H */
diff --git a/include/asm-mips/mach-ocelot3/cpu-feature-overrides.h b/include/asm-mips/mach-ocelot3/cpu-feature-overrides.h
index 825c5f674df..782b986241d 100644
--- a/include/asm-mips/mach-ocelot3/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-ocelot3/cpu-feature-overrides.h
@@ -40,4 +40,9 @@
#define cpu_icache_line_size() 32
#define cpu_scache_line_size() 32
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
#endif /* __ASM_MACH_JA_CPU_FEATURE_OVERRIDES_H */
diff --git a/include/asm-mips/mach-rm200/cpu-feature-overrides.h b/include/asm-mips/mach-rm200/cpu-feature-overrides.h
index 79f9b064c86..91e7cf5f2bf 100644
--- a/include/asm-mips/mach-rm200/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-rm200/cpu-feature-overrides.h
@@ -40,4 +40,9 @@
#define cpu_icache_line_size() 32
#define cpu_scache_line_size() 0 /* No S-cache on R5000 I think ... */
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
#endif /* __ASM_MACH_RM200_CPU_FEATURE_OVERRIDES_H */
diff --git a/include/asm-mips/mach-yosemite/cpu-feature-overrides.h b/include/asm-mips/mach-yosemite/cpu-feature-overrides.h
index 463d051f468..3073542c93c 100644
--- a/include/asm-mips/mach-yosemite/cpu-feature-overrides.h
+++ b/include/asm-mips/mach-yosemite/cpu-feature-overrides.h
@@ -37,4 +37,9 @@
#define cpu_icache_line_size() 32
#define cpu_scache_line_size() 32
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
#endif /* __ASM_MACH_YOSEMITE_CPU_FEATURE_OVERRIDES_H */
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
index 80370e0a558..035ba0a9b0d 100644
--- a/include/asm-mips/mipsregs.h
+++ b/include/asm-mips/mipsregs.h
@@ -1059,7 +1059,7 @@ do { \
" .set noat \n" \
" move $1, %0 \n" \
" # wrdsp $1, %x1 \n" \
- " .word 0x7c2004f8 | (%x1 << 15) \n" \
+ " .word 0x7c2004f8 | (%x1 << 11) \n" \
" .set pop \n" \
: \
: "r" (val), "i" (mask)); \
diff --git a/include/asm-mips/mman.h b/include/asm-mips/mman.h
index 62060957ba9..dd17c8bd62a 100644
--- a/include/asm-mips/mman.h
+++ b/include/asm-mips/mman.h
@@ -65,6 +65,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-mips/mutex.h b/include/asm-mips/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-mips/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h
index f1980c6c3bc..39d2bd50fec 100644
--- a/include/asm-mips/processor.h
+++ b/include/asm-mips/processor.h
@@ -103,7 +103,6 @@ typedef __u32 dspreg_t;
struct mips_dsp_state {
dspreg_t dspr[NUM_DSP_REGS];
unsigned int dspcontrol;
- unsigned short used_dsp;
};
#define INIT_DSP {{0,},}
@@ -201,11 +200,11 @@ extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long
unsigned long get_wchan(struct task_struct *p);
-#define __PT_REG(reg) ((long)&((struct pt_regs *)0)->reg - sizeof(struct pt_regs))
-#define __KSTK_TOS(tsk) ((unsigned long)(tsk->thread_info) + THREAD_SIZE - 32)
-#define KSTK_EIP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_epc)))
-#define KSTK_ESP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(regs[29])))
-#define KSTK_STATUS(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_status)))
+#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32)
+#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk) - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
+#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
#define cpu_relax() barrier()
diff --git a/include/asm-mips/riscos-syscall.h b/include/asm-mips/riscos-syscall.h
deleted file mode 100644
index 4d8eb15461e..00000000000
--- a/include/asm-mips/riscos-syscall.h
+++ /dev/null
@@ -1,979 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1995, 96, 97, 98, 99, 2000 by Ralf Baechle
- */
-#ifndef _ASM_RISCOS_SYSCALL_H
-#define _ASM_RISCOS_SYSCALL_H
-
-/*
- * The syscalls 0 - 3999 are reserved for a down to the root syscall
- * compatibility with RISC/os and IRIX. We'll see how to deal with the
- * various "real" BSD variants like Ultrix, NetBSD ...
- */
-
-/*
- * SVR4 syscalls are in the range from 1 to 999
- */
-#define __NR_SVR4 0
-#define __NR_SVR4_syscall (__NR_SVR4 + 0)
-#define __NR_SVR4_exit (__NR_SVR4 + 1)
-#define __NR_SVR4_fork (__NR_SVR4 + 2)
-#define __NR_SVR4_read (__NR_SVR4 + 3)
-#define __NR_SVR4_write (__NR_SVR4 + 4)
-#define __NR_SVR4_open (__NR_SVR4 + 5)
-#define __NR_SVR4_close (__NR_SVR4 + 6)
-#define __NR_SVR4_wait (__NR_SVR4 + 7)
-#define __NR_SVR4_creat (__NR_SVR4 + 8)
-#define __NR_SVR4_link (__NR_SVR4 + 9)
-#define __NR_SVR4_unlink (__NR_SVR4 + 10)
-#define __NR_SVR4_exec (__NR_SVR4 + 11)
-#define __NR_SVR4_chdir (__NR_SVR4 + 12)
-#define __NR_SVR4_gtime (__NR_SVR4 + 13)
-#define __NR_SVR4_mknod (__NR_SVR4 + 14)
-#define __NR_SVR4_chmod (__NR_SVR4 + 15)
-#define __NR_SVR4_chown (__NR_SVR4 + 16)
-#define __NR_SVR4_sbreak (__NR_SVR4 + 17)
-#define __NR_SVR4_stat (__NR_SVR4 + 18)
-#define __NR_SVR4_lseek (__NR_SVR4 + 19)
-#define __NR_SVR4_getpid (__NR_SVR4 + 20)
-#define __NR_SVR4_mount (__NR_SVR4 + 21)
-#define __NR_SVR4_umount (__NR_SVR4 + 22)
-#define __NR_SVR4_setuid (__NR_SVR4 + 23)
-#define __NR_SVR4_getuid (__NR_SVR4 + 24)
-#define __NR_SVR4_stime (__NR_SVR4 + 25)
-#define __NR_SVR4_ptrace (__NR_SVR4 + 26)
-#define __NR_SVR4_alarm (__NR_SVR4 + 27)
-#define __NR_SVR4_fstat (__NR_SVR4 + 28)
-#define __NR_SVR4_pause (__NR_SVR4 + 29)
-#define __NR_SVR4_utime (__NR_SVR4 + 30)
-#define __NR_SVR4_stty (__NR_SVR4 + 31)
-#define __NR_SVR4_gtty (__NR_SVR4 + 32)
-#define __NR_SVR4_access (__NR_SVR4 + 33)
-#define __NR_SVR4_nice (__NR_SVR4 + 34)
-#define __NR_SVR4_statfs (__NR_SVR4 + 35)
-#define __NR_SVR4_sync (__NR_SVR4 + 36)
-#define __NR_SVR4_kill (__NR_SVR4 + 37)
-#define __NR_SVR4_fstatfs (__NR_SVR4 + 38)
-#define __NR_SVR4_setpgrp (__NR_SVR4 + 39)
-#define __NR_SVR4_cxenix (__NR_SVR4 + 40)
-#define __NR_SVR4_dup (__NR_SVR4 + 41)
-#define __NR_SVR4_pipe (__NR_SVR4 + 42)
-#define __NR_SVR4_times (__NR_SVR4 + 43)
-#define __NR_SVR4_profil (__NR_SVR4 + 44)
-#define __NR_SVR4_plock (__NR_SVR4 + 45)
-#define __NR_SVR4_setgid (__NR_SVR4 + 46)
-#define __NR_SVR4_getgid (__NR_SVR4 + 47)
-#define __NR_SVR4_sig (__NR_SVR4 + 48)
-#define __NR_SVR4_msgsys (__NR_SVR4 + 49)
-#define __NR_SVR4_sysmips (__NR_SVR4 + 50)
-#define __NR_SVR4_sysacct (__NR_SVR4 + 51)
-#define __NR_SVR4_shmsys (__NR_SVR4 + 52)
-#define __NR_SVR4_semsys (__NR_SVR4 + 53)
-#define __NR_SVR4_ioctl (__NR_SVR4 + 54)
-#define __NR_SVR4_uadmin (__NR_SVR4 + 55)
-#define __NR_SVR4_exch (__NR_SVR4 + 56)
-#define __NR_SVR4_utssys (__NR_SVR4 + 57)
-#define __NR_SVR4_fsync (__NR_SVR4 + 58)
-#define __NR_SVR4_exece (__NR_SVR4 + 59)
-#define __NR_SVR4_umask (__NR_SVR4 + 60)
-#define __NR_SVR4_chroot (__NR_SVR4 + 61)
-#define __NR_SVR4_fcntl (__NR_SVR4 + 62)
-#define __NR_SVR4_ulimit (__NR_SVR4 + 63)
-#define __NR_SVR4_reserved1 (__NR_SVR4 + 64)
-#define __NR_SVR4_reserved2 (__NR_SVR4 + 65)
-#define __NR_SVR4_reserved3 (__NR_SVR4 + 66)
-#define __NR_SVR4_reserved4 (__NR_SVR4 + 67)
-#define __NR_SVR4_reserved5 (__NR_SVR4 + 68)
-#define __NR_SVR4_reserved6 (__NR_SVR4 + 69)
-#define __NR_SVR4_advfs (__NR_SVR4 + 70)
-#define __NR_SVR4_unadvfs (__NR_SVR4 + 71)
-#define __NR_SVR4_unused1 (__NR_SVR4 + 72)
-#define __NR_SVR4_unused2 (__NR_SVR4 + 73)
-#define __NR_SVR4_rfstart (__NR_SVR4 + 74)
-#define __NR_SVR4_unused3 (__NR_SVR4 + 75)
-#define __NR_SVR4_rdebug (__NR_SVR4 + 76)
-#define __NR_SVR4_rfstop (__NR_SVR4 + 77)
-#define __NR_SVR4_rfsys (__NR_SVR4 + 78)
-#define __NR_SVR4_rmdir (__NR_SVR4 + 79)
-#define __NR_SVR4_mkdir (__NR_SVR4 + 80)
-#define __NR_SVR4_getdents (__NR_SVR4 + 81)
-#define __NR_SVR4_libattach (__NR_SVR4 + 82)
-#define __NR_SVR4_libdetach (__NR_SVR4 + 83)
-#define __NR_SVR4_sysfs (__NR_SVR4 + 84)
-#define __NR_SVR4_getmsg (__NR_SVR4 + 85)
-#define __NR_SVR4_putmsg (__NR_SVR4 + 86)
-#define __NR_SVR4_poll (__NR_SVR4 + 87)
-#define __NR_SVR4_lstat (__NR_SVR4 + 88)
-#define __NR_SVR4_symlink (__NR_SVR4 + 89)
-#define __NR_SVR4_readlink (__NR_SVR4 + 90)
-#define __NR_SVR4_setgroups (__NR_SVR4 + 91)
-#define __NR_SVR4_getgroups (__NR_SVR4 + 92)
-#define __NR_SVR4_fchmod (__NR_SVR4 + 93)
-#define __NR_SVR4_fchown (__NR_SVR4 + 94)
-#define __NR_SVR4_sigprocmask (__NR_SVR4 + 95)
-#define __NR_SVR4_sigsuspend (__NR_SVR4 + 96)
-#define __NR_SVR4_sigaltstack (__NR_SVR4 + 97)
-#define __NR_SVR4_sigaction (__NR_SVR4 + 98)
-#define __NR_SVR4_sigpending (__NR_SVR4 + 99)
-#define __NR_SVR4_setcontext (__NR_SVR4 + 100)
-#define __NR_SVR4_evsys (__NR_SVR4 + 101)
-#define __NR_SVR4_evtrapret (__NR_SVR4 + 102)
-#define __NR_SVR4_statvfs (__NR_SVR4 + 103)
-#define __NR_SVR4_fstatvfs (__NR_SVR4 + 104)
-#define __NR_SVR4_reserved7 (__NR_SVR4 + 105)
-#define __NR_SVR4_nfssys (__NR_SVR4 + 106)
-#define __NR_SVR4_waitid (__NR_SVR4 + 107)
-#define __NR_SVR4_sigsendset (__NR_SVR4 + 108)
-#define __NR_SVR4_hrtsys (__NR_SVR4 + 109)
-#define __NR_SVR4_acancel (__NR_SVR4 + 110)
-#define __NR_SVR4_async (__NR_SVR4 + 111)
-#define __NR_SVR4_priocntlset (__NR_SVR4 + 112)
-#define __NR_SVR4_pathconf (__NR_SVR4 + 113)
-#define __NR_SVR4_mincore (__NR_SVR4 + 114)
-#define __NR_SVR4_mmap (__NR_SVR4 + 115)
-#define __NR_SVR4_mprotect (__NR_SVR4 + 116)
-#define __NR_SVR4_munmap (__NR_SVR4 + 117)
-#define __NR_SVR4_fpathconf (__NR_SVR4 + 118)
-#define __NR_SVR4_vfork (__NR_SVR4 + 119)
-#define __NR_SVR4_fchdir (__NR_SVR4 + 120)
-#define __NR_SVR4_readv (__NR_SVR4 + 121)
-#define __NR_SVR4_writev (__NR_SVR4 + 122)
-#define __NR_SVR4_xstat (__NR_SVR4 + 123)
-#define __NR_SVR4_lxstat (__NR_SVR4 + 124)
-#define __NR_SVR4_fxstat (__NR_SVR4 + 125)
-#define __NR_SVR4_xmknod (__NR_SVR4 + 126)
-#define __NR_SVR4_clocal (__NR_SVR4 + 127)
-#define __NR_SVR4_setrlimit (__NR_SVR4 + 128)
-#define __NR_SVR4_getrlimit (__NR_SVR4 + 129)
-#define __NR_SVR4_lchown (__NR_SVR4 + 130)
-#define __NR_SVR4_memcntl (__NR_SVR4 + 131)
-#define __NR_SVR4_getpmsg (__NR_SVR4 + 132)
-#define __NR_SVR4_putpmsg (__NR_SVR4 + 133)
-#define __NR_SVR4_rename (__NR_SVR4 + 134)
-#define __NR_SVR4_nuname (__NR_SVR4 + 135)
-#define __NR_SVR4_setegid (__NR_SVR4 + 136)
-#define __NR_SVR4_sysconf (__NR_SVR4 + 137)
-#define __NR_SVR4_adjtime (__NR_SVR4 + 138)
-#define __NR_SVR4_sysinfo (__NR_SVR4 + 139)
-#define __NR_SVR4_reserved8 (__NR_SVR4 + 140)
-#define __NR_SVR4_seteuid (__NR_SVR4 + 141)
-#define __NR_SVR4_PYRAMID_statis (__NR_SVR4 + 142)
-#define __NR_SVR4_PYRAMID_tuning (__NR_SVR4 + 143)
-#define __NR_SVR4_PYRAMID_forcerr (__NR_SVR4 + 144)
-#define __NR_SVR4_PYRAMID_mpcntl (__NR_SVR4 + 145)
-#define __NR_SVR4_reserved9 (__NR_SVR4 + 146)
-#define __NR_SVR4_reserved10 (__NR_SVR4 + 147)
-#define __NR_SVR4_reserved11 (__NR_SVR4 + 148)
-#define __NR_SVR4_reserved12 (__NR_SVR4 + 149)
-#define __NR_SVR4_reserved13 (__NR_SVR4 + 150)
-#define __NR_SVR4_reserved14 (__NR_SVR4 + 151)
-#define __NR_SVR4_reserved15 (__NR_SVR4 + 152)
-#define __NR_SVR4_reserved16 (__NR_SVR4 + 153)
-#define __NR_SVR4_reserved17 (__NR_SVR4 + 154)
-#define __NR_SVR4_reserved18 (__NR_SVR4 + 155)
-#define __NR_SVR4_reserved19 (__NR_SVR4 + 156)
-#define __NR_SVR4_reserved20 (__NR_SVR4 + 157)
-#define __NR_SVR4_reserved21 (__NR_SVR4 + 158)
-#define __NR_SVR4_reserved22 (__NR_SVR4 + 159)
-#define __NR_SVR4_reserved23 (__NR_SVR4 + 160)
-#define __NR_SVR4_reserved24 (__NR_SVR4 + 161)
-#define __NR_SVR4_reserved25 (__NR_SVR4 + 162)
-#define __NR_SVR4_reserved26 (__NR_SVR4 + 163)
-#define __NR_SVR4_reserved27 (__NR_SVR4 + 164)
-#define __NR_SVR4_reserved28 (__NR_SVR4 + 165)
-#define __NR_SVR4_reserved29 (__NR_SVR4 + 166)
-#define __NR_SVR4_reserved30 (__NR_SVR4 + 167)
-#define __NR_SVR4_reserved31 (__NR_SVR4 + 168)
-#define __NR_SVR4_reserved32 (__NR_SVR4 + 169)
-#define __NR_SVR4_reserved33 (__NR_SVR4 + 170)
-#define __NR_SVR4_reserved34 (__NR_SVR4 + 171)
-#define __NR_SVR4_reserved35 (__NR_SVR4 + 172)
-#define __NR_SVR4_reserved36 (__NR_SVR4 + 173)
-#define __NR_SVR4_reserved37 (__NR_SVR4 + 174)
-#define __NR_SVR4_reserved38 (__NR_SVR4 + 175)
-#define __NR_SVR4_reserved39 (__NR_SVR4 + 176)
-#define __NR_SVR4_reserved40 (__NR_SVR4 + 177)
-#define __NR_SVR4_reserved41 (__NR_SVR4 + 178)
-#define __NR_SVR4_reserved42 (__NR_SVR4 + 179)
-#define __NR_SVR4_reserved43 (__NR_SVR4 + 180)
-#define __NR_SVR4_reserved44 (__NR_SVR4 + 181)
-#define __NR_SVR4_reserved45 (__NR_SVR4 + 182)
-#define __NR_SVR4_reserved46 (__NR_SVR4 + 183)
-#define __NR_SVR4_reserved47 (__NR_SVR4 + 184)
-#define __NR_SVR4_reserved48 (__NR_SVR4 + 185)
-#define __NR_SVR4_reserved49 (__NR_SVR4 + 186)
-#define __NR_SVR4_reserved50 (__NR_SVR4 + 187)
-#define __NR_SVR4_reserved51 (__NR_SVR4 + 188)
-#define __NR_SVR4_reserved52 (__NR_SVR4 + 189)
-#define __NR_SVR4_reserved53 (__NR_SVR4 + 190)
-#define __NR_SVR4_reserved54 (__NR_SVR4 + 191)
-#define __NR_SVR4_reserved55 (__NR_SVR4 + 192)
-#define __NR_SVR4_reserved56 (__NR_SVR4 + 193)
-#define __NR_SVR4_reserved57 (__NR_SVR4 + 194)
-#define __NR_SVR4_reserved58 (__NR_SVR4 + 195)
-#define __NR_SVR4_reserved59 (__NR_SVR4 + 196)
-#define __NR_SVR4_reserved60 (__NR_SVR4 + 197)
-#define __NR_SVR4_reserved61 (__NR_SVR4 + 198)
-#define __NR_SVR4_reserved62 (__NR_SVR4 + 199)
-#define __NR_SVR4_reserved63 (__NR_SVR4 + 200)
-#define __NR_SVR4_aread (__NR_SVR4 + 201)
-#define __NR_SVR4_awrite (__NR_SVR4 + 202)
-#define __NR_SVR4_listio (__NR_SVR4 + 203)
-#define __NR_SVR4_mips_acancel (__NR_SVR4 + 204)
-#define __NR_SVR4_astatus (__NR_SVR4 + 205)
-#define __NR_SVR4_await (__NR_SVR4 + 206)
-#define __NR_SVR4_areadv (__NR_SVR4 + 207)
-#define __NR_SVR4_awritev (__NR_SVR4 + 208)
-#define __NR_SVR4_MIPS_reserved1 (__NR_SVR4 + 209)
-#define __NR_SVR4_MIPS_reserved2 (__NR_SVR4 + 210)
-#define __NR_SVR4_MIPS_reserved3 (__NR_SVR4 + 211)
-#define __NR_SVR4_MIPS_reserved4 (__NR_SVR4 + 212)
-#define __NR_SVR4_MIPS_reserved5 (__NR_SVR4 + 213)
-#define __NR_SVR4_MIPS_reserved6 (__NR_SVR4 + 214)
-#define __NR_SVR4_MIPS_reserved7 (__NR_SVR4 + 215)
-#define __NR_SVR4_MIPS_reserved8 (__NR_SVR4 + 216)
-#define __NR_SVR4_MIPS_reserved9 (__NR_SVR4 + 217)
-#define __NR_SVR4_MIPS_reserved10 (__NR_SVR4 + 218)
-#define __NR_SVR4_MIPS_reserved11 (__NR_SVR4 + 219)
-#define __NR_SVR4_MIPS_reserved12 (__NR_SVR4 + 220)
-#define __NR_SVR4_CDC_reserved1 (__NR_SVR4 + 221)
-#define __NR_SVR4_CDC_reserved2 (__NR_SVR4 + 222)
-#define __NR_SVR4_CDC_reserved3 (__NR_SVR4 + 223)
-#define __NR_SVR4_CDC_reserved4 (__NR_SVR4 + 224)
-#define __NR_SVR4_CDC_reserved5 (__NR_SVR4 + 225)
-#define __NR_SVR4_CDC_reserved6 (__NR_SVR4 + 226)
-#define __NR_SVR4_CDC_reserved7 (__NR_SVR4 + 227)
-#define __NR_SVR4_CDC_reserved8 (__NR_SVR4 + 228)
-#define __NR_SVR4_CDC_reserved9 (__NR_SVR4 + 229)
-#define __NR_SVR4_CDC_reserved10 (__NR_SVR4 + 230)
-#define __NR_SVR4_CDC_reserved11 (__NR_SVR4 + 231)
-#define __NR_SVR4_CDC_reserved12 (__NR_SVR4 + 232)
-#define __NR_SVR4_CDC_reserved13 (__NR_SVR4 + 233)
-#define __NR_SVR4_CDC_reserved14 (__NR_SVR4 + 234)
-#define __NR_SVR4_CDC_reserved15 (__NR_SVR4 + 235)
-#define __NR_SVR4_CDC_reserved16 (__NR_SVR4 + 236)
-#define __NR_SVR4_CDC_reserved17 (__NR_SVR4 + 237)
-#define __NR_SVR4_CDC_reserved18 (__NR_SVR4 + 238)
-#define __NR_SVR4_CDC_reserved19 (__NR_SVR4 + 239)
-#define __NR_SVR4_CDC_reserved20 (__NR_SVR4 + 240)
-
-/*
- * SYS V syscalls are in the range from 1000 to 1999
- */
-#define __NR_SYSV 1000
-#define __NR_SYSV_syscall (__NR_SYSV + 0)
-#define __NR_SYSV_exit (__NR_SYSV + 1)
-#define __NR_SYSV_fork (__NR_SYSV + 2)
-#define __NR_SYSV_read (__NR_SYSV + 3)
-#define __NR_SYSV_write (__NR_SYSV + 4)
-#define __NR_SYSV_open (__NR_SYSV + 5)
-#define __NR_SYSV_close (__NR_SYSV + 6)
-#define __NR_SYSV_wait (__NR_SYSV + 7)
-#define __NR_SYSV_creat (__NR_SYSV + 8)
-#define __NR_SYSV_link (__NR_SYSV + 9)
-#define __NR_SYSV_unlink (__NR_SYSV + 10)
-#define __NR_SYSV_execv (__NR_SYSV + 11)
-#define __NR_SYSV_chdir (__NR_SYSV + 12)
-#define __NR_SYSV_time (__NR_SYSV + 13)
-#define __NR_SYSV_mknod (__NR_SYSV + 14)
-#define __NR_SYSV_chmod (__NR_SYSV + 15)
-#define __NR_SYSV_chown (__NR_SYSV + 16)
-#define __NR_SYSV_brk (__NR_SYSV + 17)
-#define __NR_SYSV_stat (__NR_SYSV + 18)
-#define __NR_SYSV_lseek (__NR_SYSV + 19)
-#define __NR_SYSV_getpid (__NR_SYSV + 20)
-#define __NR_SYSV_mount (__NR_SYSV + 21)
-#define __NR_SYSV_umount (__NR_SYSV + 22)
-#define __NR_SYSV_setuid (__NR_SYSV + 23)
-#define __NR_SYSV_getuid (__NR_SYSV + 24)
-#define __NR_SYSV_stime (__NR_SYSV + 25)
-#define __NR_SYSV_ptrace (__NR_SYSV + 26)
-#define __NR_SYSV_alarm (__NR_SYSV + 27)
-#define __NR_SYSV_fstat (__NR_SYSV + 28)
-#define __NR_SYSV_pause (__NR_SYSV + 29)
-#define __NR_SYSV_utime (__NR_SYSV + 30)
-#define __NR_SYSV_stty (__NR_SYSV + 31)
-#define __NR_SYSV_gtty (__NR_SYSV + 32)
-#define __NR_SYSV_access (__NR_SYSV + 33)
-#define __NR_SYSV_nice (__NR_SYSV + 34)
-#define __NR_SYSV_statfs (__NR_SYSV + 35)
-#define __NR_SYSV_sync (__NR_SYSV + 36)
-#define __NR_SYSV_kill (__NR_SYSV + 37)
-#define __NR_SYSV_fstatfs (__NR_SYSV + 38)
-#define __NR_SYSV_setpgrp (__NR_SYSV + 39)
-#define __NR_SYSV_syssgi (__NR_SYSV + 40)
-#define __NR_SYSV_dup (__NR_SYSV + 41)
-#define __NR_SYSV_pipe (__NR_SYSV + 42)
-#define __NR_SYSV_times (__NR_SYSV + 43)
-#define __NR_SYSV_profil (__NR_SYSV + 44)
-#define __NR_SYSV_plock (__NR_SYSV + 45)
-#define __NR_SYSV_setgid (__NR_SYSV + 46)
-#define __NR_SYSV_getgid (__NR_SYSV + 47)
-#define __NR_SYSV_sig (__NR_SYSV + 48)
-#define __NR_SYSV_msgsys (__NR_SYSV + 49)
-#define __NR_SYSV_sysmips (__NR_SYSV + 50)
-#define __NR_SYSV_acct (__NR_SYSV + 51)
-#define __NR_SYSV_shmsys (__NR_SYSV + 52)
-#define __NR_SYSV_semsys (__NR_SYSV + 53)
-#define __NR_SYSV_ioctl (__NR_SYSV + 54)
-#define __NR_SYSV_uadmin (__NR_SYSV + 55)
-#define __NR_SYSV_sysmp (__NR_SYSV + 56)
-#define __NR_SYSV_utssys (__NR_SYSV + 57)
-#define __NR_SYSV_USG_reserved1 (__NR_SYSV + 58)
-#define __NR_SYSV_execve (__NR_SYSV + 59)
-#define __NR_SYSV_umask (__NR_SYSV + 60)
-#define __NR_SYSV_chroot (__NR_SYSV + 61)
-#define __NR_SYSV_fcntl (__NR_SYSV + 62)
-#define __NR_SYSV_ulimit (__NR_SYSV + 63)
-#define __NR_SYSV_SAFARI4_reserved1 (__NR_SYSV + 64)
-#define __NR_SYSV_SAFARI4_reserved2 (__NR_SYSV + 65)
-#define __NR_SYSV_SAFARI4_reserved3 (__NR_SYSV + 66)
-#define __NR_SYSV_SAFARI4_reserved4 (__NR_SYSV + 67)
-#define __NR_SYSV_SAFARI4_reserved5 (__NR_SYSV + 68)
-#define __NR_SYSV_SAFARI4_reserved6 (__NR_SYSV + 69)
-#define __NR_SYSV_advfs (__NR_SYSV + 70)
-#define __NR_SYSV_unadvfs (__NR_SYSV + 71)
-#define __NR_SYSV_rmount (__NR_SYSV + 72)
-#define __NR_SYSV_rumount (__NR_SYSV + 73)
-#define __NR_SYSV_rfstart (__NR_SYSV + 74)
-#define __NR_SYSV_getrlimit64 (__NR_SYSV + 75)
-#define __NR_SYSV_setrlimit64 (__NR_SYSV + 76)
-#define __NR_SYSV_nanosleep (__NR_SYSV + 77)
-#define __NR_SYSV_lseek64 (__NR_SYSV + 78)
-#define __NR_SYSV_rmdir (__NR_SYSV + 79)
-#define __NR_SYSV_mkdir (__NR_SYSV + 80)
-#define __NR_SYSV_getdents (__NR_SYSV + 81)
-#define __NR_SYSV_sginap (__NR_SYSV + 82)
-#define __NR_SYSV_sgikopt (__NR_SYSV + 83)
-#define __NR_SYSV_sysfs (__NR_SYSV + 84)
-#define __NR_SYSV_getmsg (__NR_SYSV + 85)
-#define __NR_SYSV_putmsg (__NR_SYSV + 86)
-#define __NR_SYSV_poll (__NR_SYSV + 87)
-#define __NR_SYSV_sigreturn (__NR_SYSV + 88)
-#define __NR_SYSV_accept (__NR_SYSV + 89)
-#define __NR_SYSV_bind (__NR_SYSV + 90)
-#define __NR_SYSV_connect (__NR_SYSV + 91)
-#define __NR_SYSV_gethostid (__NR_SYSV + 92)
-#define __NR_SYSV_getpeername (__NR_SYSV + 93)
-#define __NR_SYSV_getsockname (__NR_SYSV + 94)
-#define __NR_SYSV_getsockopt (__NR_SYSV + 95)
-#define __NR_SYSV_listen (__NR_SYSV + 96)
-#define __NR_SYSV_recv (__NR_SYSV + 97)
-#define __NR_SYSV_recvfrom (__NR_SYSV + 98)
-#define __NR_SYSV_recvmsg (__NR_SYSV + 99)
-#define __NR_SYSV_select (__NR_SYSV + 100)
-#define __NR_SYSV_send (__NR_SYSV + 101)
-#define __NR_SYSV_sendmsg (__NR_SYSV + 102)
-#define __NR_SYSV_sendto (__NR_SYSV + 103)
-#define __NR_SYSV_sethostid (__NR_SYSV + 104)
-#define __NR_SYSV_setsockopt (__NR_SYSV + 105)
-#define __NR_SYSV_shutdown (__NR_SYSV + 106)
-#define __NR_SYSV_socket (__NR_SYSV + 107)
-#define __NR_SYSV_gethostname (__NR_SYSV + 108)
-#define __NR_SYSV_sethostname (__NR_SYSV + 109)
-#define __NR_SYSV_getdomainname (__NR_SYSV + 110)
-#define __NR_SYSV_setdomainname (__NR_SYSV + 111)
-#define __NR_SYSV_truncate (__NR_SYSV + 112)
-#define __NR_SYSV_ftruncate (__NR_SYSV + 113)
-#define __NR_SYSV_rename (__NR_SYSV + 114)
-#define __NR_SYSV_symlink (__NR_SYSV + 115)
-#define __NR_SYSV_readlink (__NR_SYSV + 116)
-#define __NR_SYSV_lstat (__NR_SYSV + 117)
-#define __NR_SYSV_nfsmount (__NR_SYSV + 118)
-#define __NR_SYSV_nfssvc (__NR_SYSV + 119)
-#define __NR_SYSV_getfh (__NR_SYSV + 120)
-#define __NR_SYSV_async_daemon (__NR_SYSV + 121)
-#define __NR_SYSV_exportfs (__NR_SYSV + 122)
-#define __NR_SYSV_setregid (__NR_SYSV + 123)
-#define __NR_SYSV_setreuid (__NR_SYSV + 124)
-#define __NR_SYSV_getitimer (__NR_SYSV + 125)
-#define __NR_SYSV_setitimer (__NR_SYSV + 126)
-#define __NR_SYSV_adjtime (__NR_SYSV + 127)
-#define __NR_SYSV_BSD_getime (__NR_SYSV + 128)
-#define __NR_SYSV_sproc (__NR_SYSV + 129)
-#define __NR_SYSV_prctl (__NR_SYSV + 130)
-#define __NR_SYSV_procblk (__NR_SYSV + 131)
-#define __NR_SYSV_sprocsp (__NR_SYSV + 132)
-#define __NR_SYSV_sgigsc (__NR_SYSV + 133)
-#define __NR_SYSV_mmap (__NR_SYSV + 134)
-#define __NR_SYSV_munmap (__NR_SYSV + 135)
-#define __NR_SYSV_mprotect (__NR_SYSV + 136)
-#define __NR_SYSV_msync (__NR_SYSV + 137)
-#define __NR_SYSV_madvise (__NR_SYSV + 138)
-#define __NR_SYSV_pagelock (__NR_SYSV + 139)
-#define __NR_SYSV_getpagesize (__NR_SYSV + 140)
-#define __NR_SYSV_quotactl (__NR_SYSV + 141)
-#define __NR_SYSV_libdetach (__NR_SYSV + 142)
-#define __NR_SYSV_BSDgetpgrp (__NR_SYSV + 143)
-#define __NR_SYSV_BSDsetpgrp (__NR_SYSV + 144)
-#define __NR_SYSV_vhangup (__NR_SYSV + 145)
-#define __NR_SYSV_fsync (__NR_SYSV + 146)
-#define __NR_SYSV_fchdir (__NR_SYSV + 147)
-#define __NR_SYSV_getrlimit (__NR_SYSV + 148)
-#define __NR_SYSV_setrlimit (__NR_SYSV + 149)
-#define __NR_SYSV_cacheflush (__NR_SYSV + 150)
-#define __NR_SYSV_cachectl (__NR_SYSV + 151)
-#define __NR_SYSV_fchown (__NR_SYSV + 152)
-#define __NR_SYSV_fchmod (__NR_SYSV + 153)
-#define __NR_SYSV_wait3 (__NR_SYSV + 154)
-#define __NR_SYSV_socketpair (__NR_SYSV + 155)
-#define __NR_SYSV_sysinfo (__NR_SYSV + 156)
-#define __NR_SYSV_nuname (__NR_SYSV + 157)
-#define __NR_SYSV_xstat (__NR_SYSV + 158)
-#define __NR_SYSV_lxstat (__NR_SYSV + 159)
-#define __NR_SYSV_fxstat (__NR_SYSV + 160)
-#define __NR_SYSV_xmknod (__NR_SYSV + 161)
-#define __NR_SYSV_ksigaction (__NR_SYSV + 162)
-#define __NR_SYSV_sigpending (__NR_SYSV + 163)
-#define __NR_SYSV_sigprocmask (__NR_SYSV + 164)
-#define __NR_SYSV_sigsuspend (__NR_SYSV + 165)
-#define __NR_SYSV_sigpoll (__NR_SYSV + 166)
-#define __NR_SYSV_swapctl (__NR_SYSV + 167)
-#define __NR_SYSV_getcontext (__NR_SYSV + 168)
-#define __NR_SYSV_setcontext (__NR_SYSV + 169)
-#define __NR_SYSV_waitsys (__NR_SYSV + 170)
-#define __NR_SYSV_sigstack (__NR_SYSV + 171)
-#define __NR_SYSV_sigaltstack (__NR_SYSV + 172)
-#define __NR_SYSV_sigsendset (__NR_SYSV + 173)
-#define __NR_SYSV_statvfs (__NR_SYSV + 174)
-#define __NR_SYSV_fstatvfs (__NR_SYSV + 175)
-#define __NR_SYSV_getpmsg (__NR_SYSV + 176)
-#define __NR_SYSV_putpmsg (__NR_SYSV + 177)
-#define __NR_SYSV_lchown (__NR_SYSV + 178)
-#define __NR_SYSV_priocntl (__NR_SYSV + 179)
-#define __NR_SYSV_ksigqueue (__NR_SYSV + 180)
-#define __NR_SYSV_readv (__NR_SYSV + 181)
-#define __NR_SYSV_writev (__NR_SYSV + 182)
-#define __NR_SYSV_truncate64 (__NR_SYSV + 183)
-#define __NR_SYSV_ftruncate64 (__NR_SYSV + 184)
-#define __NR_SYSV_mmap64 (__NR_SYSV + 185)
-#define __NR_SYSV_dmi (__NR_SYSV + 186)
-#define __NR_SYSV_pread (__NR_SYSV + 187)
-#define __NR_SYSV_pwrite (__NR_SYSV + 188)
-
-/*
- * BSD 4.3 syscalls are in the range from 2000 to 2999
- */
-#define __NR_BSD43 2000
-#define __NR_BSD43_syscall (__NR_BSD43 + 0)
-#define __NR_BSD43_exit (__NR_BSD43 + 1)
-#define __NR_BSD43_fork (__NR_BSD43 + 2)
-#define __NR_BSD43_read (__NR_BSD43 + 3)
-#define __NR_BSD43_write (__NR_BSD43 + 4)
-#define __NR_BSD43_open (__NR_BSD43 + 5)
-#define __NR_BSD43_close (__NR_BSD43 + 6)
-#define __NR_BSD43_wait (__NR_BSD43 + 7)
-#define __NR_BSD43_creat (__NR_BSD43 + 8)
-#define __NR_BSD43_link (__NR_BSD43 + 9)
-#define __NR_BSD43_unlink (__NR_BSD43 + 10)
-#define __NR_BSD43_exec (__NR_BSD43 + 11)
-#define __NR_BSD43_chdir (__NR_BSD43 + 12)
-#define __NR_BSD43_time (__NR_BSD43 + 13)
-#define __NR_BSD43_mknod (__NR_BSD43 + 14)
-#define __NR_BSD43_chmod (__NR_BSD43 + 15)
-#define __NR_BSD43_chown (__NR_BSD43 + 16)
-#define __NR_BSD43_sbreak (__NR_BSD43 + 17)
-#define __NR_BSD43_oldstat (__NR_BSD43 + 18)
-#define __NR_BSD43_lseek (__NR_BSD43 + 19)
-#define __NR_BSD43_getpid (__NR_BSD43 + 20)
-#define __NR_BSD43_oldmount (__NR_BSD43 + 21)
-#define __NR_BSD43_umount (__NR_BSD43 + 22)
-#define __NR_BSD43_setuid (__NR_BSD43 + 23)
-#define __NR_BSD43_getuid (__NR_BSD43 + 24)
-#define __NR_BSD43_stime (__NR_BSD43 + 25)
-#define __NR_BSD43_ptrace (__NR_BSD43 + 26)
-#define __NR_BSD43_alarm (__NR_BSD43 + 27)
-#define __NR_BSD43_oldfstat (__NR_BSD43 + 28)
-#define __NR_BSD43_pause (__NR_BSD43 + 29)
-#define __NR_BSD43_utime (__NR_BSD43 + 30)
-#define __NR_BSD43_stty (__NR_BSD43 + 31)
-#define __NR_BSD43_gtty (__NR_BSD43 + 32)
-#define __NR_BSD43_access (__NR_BSD43 + 33)
-#define __NR_BSD43_nice (__NR_BSD43 + 34)
-#define __NR_BSD43_ftime (__NR_BSD43 + 35)
-#define __NR_BSD43_sync (__NR_BSD43 + 36)
-#define __NR_BSD43_kill (__NR_BSD43 + 37)
-#define __NR_BSD43_stat (__NR_BSD43 + 38)
-#define __NR_BSD43_oldsetpgrp (__NR_BSD43 + 39)
-#define __NR_BSD43_lstat (__NR_BSD43 + 40)
-#define __NR_BSD43_dup (__NR_BSD43 + 41)
-#define __NR_BSD43_pipe (__NR_BSD43 + 42)
-#define __NR_BSD43_times (__NR_BSD43 + 43)
-#define __NR_BSD43_profil (__NR_BSD43 + 44)
-#define __NR_BSD43_msgsys (__NR_BSD43 + 45)
-#define __NR_BSD43_setgid (__NR_BSD43 + 46)
-#define __NR_BSD43_getgid (__NR_BSD43 + 47)
-#define __NR_BSD43_ssig (__NR_BSD43 + 48)
-#define __NR_BSD43_reserved1 (__NR_BSD43 + 49)
-#define __NR_BSD43_reserved2 (__NR_BSD43 + 50)
-#define __NR_BSD43_sysacct (__NR_BSD43 + 51)
-#define __NR_BSD43_phys (__NR_BSD43 + 52)
-#define __NR_BSD43_lock (__NR_BSD43 + 53)
-#define __NR_BSD43_ioctl (__NR_BSD43 + 54)
-#define __NR_BSD43_reboot (__NR_BSD43 + 55)
-#define __NR_BSD43_mpxchan (__NR_BSD43 + 56)
-#define __NR_BSD43_symlink (__NR_BSD43 + 57)
-#define __NR_BSD43_readlink (__NR_BSD43 + 58)
-#define __NR_BSD43_execve (__NR_BSD43 + 59)
-#define __NR_BSD43_umask (__NR_BSD43 + 60)
-#define __NR_BSD43_chroot (__NR_BSD43 + 61)
-#define __NR_BSD43_fstat (__NR_BSD43 + 62)
-#define __NR_BSD43_reserved3 (__NR_BSD43 + 63)
-#define __NR_BSD43_getpagesize (__NR_BSD43 + 64)
-#define __NR_BSD43_mremap (__NR_BSD43 + 65)
-#define __NR_BSD43_vfork (__NR_BSD43 + 66)
-#define __NR_BSD43_vread (__NR_BSD43 + 67)
-#define __NR_BSD43_vwrite (__NR_BSD43 + 68)
-#define __NR_BSD43_sbrk (__NR_BSD43 + 69)
-#define __NR_BSD43_sstk (__NR_BSD43 + 70)
-#define __NR_BSD43_mmap (__NR_BSD43 + 71)
-#define __NR_BSD43_vadvise (__NR_BSD43 + 72)
-#define __NR_BSD43_munmap (__NR_BSD43 + 73)
-#define __NR_BSD43_mprotect (__NR_BSD43 + 74)
-#define __NR_BSD43_madvise (__NR_BSD43 + 75)
-#define __NR_BSD43_vhangup (__NR_BSD43 + 76)
-#define __NR_BSD43_vlimit (__NR_BSD43 + 77)
-#define __NR_BSD43_mincore (__NR_BSD43 + 78)
-#define __NR_BSD43_getgroups (__NR_BSD43 + 79)
-#define __NR_BSD43_setgroups (__NR_BSD43 + 80)
-#define __NR_BSD43_getpgrp (__NR_BSD43 + 81)
-#define __NR_BSD43_setpgrp (__NR_BSD43 + 82)
-#define __NR_BSD43_setitimer (__NR_BSD43 + 83)
-#define __NR_BSD43_wait3 (__NR_BSD43 + 84)
-#define __NR_BSD43_swapon (__NR_BSD43 + 85)
-#define __NR_BSD43_getitimer (__NR_BSD43 + 86)
-#define __NR_BSD43_gethostname (__NR_BSD43 + 87)
-#define __NR_BSD43_sethostname (__NR_BSD43 + 88)
-#define __NR_BSD43_getdtablesize (__NR_BSD43 + 89)
-#define __NR_BSD43_dup2 (__NR_BSD43 + 90)
-#define __NR_BSD43_getdopt (__NR_BSD43 + 91)
-#define __NR_BSD43_fcntl (__NR_BSD43 + 92)
-#define __NR_BSD43_select (__NR_BSD43 + 93)
-#define __NR_BSD43_setdopt (__NR_BSD43 + 94)
-#define __NR_BSD43_fsync (__NR_BSD43 + 95)
-#define __NR_BSD43_setpriority (__NR_BSD43 + 96)
-#define __NR_BSD43_socket (__NR_BSD43 + 97)
-#define __NR_BSD43_connect (__NR_BSD43 + 98)
-#define __NR_BSD43_oldaccept (__NR_BSD43 + 99)
-#define __NR_BSD43_getpriority (__NR_BSD43 + 100)
-#define __NR_BSD43_send (__NR_BSD43 + 101)
-#define __NR_BSD43_recv (__NR_BSD43 + 102)
-#define __NR_BSD43_sigreturn (__NR_BSD43 + 103)
-#define __NR_BSD43_bind (__NR_BSD43 + 104)
-#define __NR_BSD43_setsockopt (__NR_BSD43 + 105)
-#define __NR_BSD43_listen (__NR_BSD43 + 106)
-#define __NR_BSD43_vtimes (__NR_BSD43 + 107)
-#define __NR_BSD43_sigvec (__NR_BSD43 + 108)
-#define __NR_BSD43_sigblock (__NR_BSD43 + 109)
-#define __NR_BSD43_sigsetmask (__NR_BSD43 + 110)
-#define __NR_BSD43_sigpause (__NR_BSD43 + 111)
-#define __NR_BSD43_sigstack (__NR_BSD43 + 112)
-#define __NR_BSD43_oldrecvmsg (__NR_BSD43 + 113)
-#define __NR_BSD43_oldsendmsg (__NR_BSD43 + 114)
-#define __NR_BSD43_vtrace (__NR_BSD43 + 115)
-#define __NR_BSD43_gettimeofday (__NR_BSD43 + 116)
-#define __NR_BSD43_getrusage (__NR_BSD43 + 117)
-#define __NR_BSD43_getsockopt (__NR_BSD43 + 118)
-#define __NR_BSD43_reserved4 (__NR_BSD43 + 119)
-#define __NR_BSD43_readv (__NR_BSD43 + 120)
-#define __NR_BSD43_writev (__NR_BSD43 + 121)
-#define __NR_BSD43_settimeofday (__NR_BSD43 + 122)
-#define __NR_BSD43_fchown (__NR_BSD43 + 123)
-#define __NR_BSD43_fchmod (__NR_BSD43 + 124)
-#define __NR_BSD43_oldrecvfrom (__NR_BSD43 + 125)
-#define __NR_BSD43_setreuid (__NR_BSD43 + 126)
-#define __NR_BSD43_setregid (__NR_BSD43 + 127)
-#define __NR_BSD43_rename (__NR_BSD43 + 128)
-#define __NR_BSD43_truncate (__NR_BSD43 + 129)
-#define __NR_BSD43_ftruncate (__NR_BSD43 + 130)
-#define __NR_BSD43_flock (__NR_BSD43 + 131)
-#define __NR_BSD43_semsys (__NR_BSD43 + 132)
-#define __NR_BSD43_sendto (__NR_BSD43 + 133)
-#define __NR_BSD43_shutdown (__NR_BSD43 + 134)
-#define __NR_BSD43_socketpair (__NR_BSD43 + 135)
-#define __NR_BSD43_mkdir (__NR_BSD43 + 136)
-#define __NR_BSD43_rmdir (__NR_BSD43 + 137)
-#define __NR_BSD43_utimes (__NR_BSD43 + 138)
-#define __NR_BSD43_sigcleanup (__NR_BSD43 + 139)
-#define __NR_BSD43_adjtime (__NR_BSD43 + 140)
-#define __NR_BSD43_oldgetpeername (__NR_BSD43 + 141)
-#define __NR_BSD43_gethostid (__NR_BSD43 + 142)
-#define __NR_BSD43_sethostid (__NR_BSD43 + 143)
-#define __NR_BSD43_getrlimit (__NR_BSD43 + 144)
-#define __NR_BSD43_setrlimit (__NR_BSD43 + 145)
-#define __NR_BSD43_killpg (__NR_BSD43 + 146)
-#define __NR_BSD43_shmsys (__NR_BSD43 + 147)
-#define __NR_BSD43_quota (__NR_BSD43 + 148)
-#define __NR_BSD43_qquota (__NR_BSD43 + 149)
-#define __NR_BSD43_oldgetsockname (__NR_BSD43 + 150)
-#define __NR_BSD43_sysmips (__NR_BSD43 + 151)
-#define __NR_BSD43_cacheflush (__NR_BSD43 + 152)
-#define __NR_BSD43_cachectl (__NR_BSD43 + 153)
-#define __NR_BSD43_debug (__NR_BSD43 + 154)
-#define __NR_BSD43_reserved5 (__NR_BSD43 + 155)
-#define __NR_BSD43_reserved6 (__NR_BSD43 + 156)
-#define __NR_BSD43_nfs_mount (__NR_BSD43 + 157)
-#define __NR_BSD43_nfs_svc (__NR_BSD43 + 158)
-#define __NR_BSD43_getdirentries (__NR_BSD43 + 159)
-#define __NR_BSD43_statfs (__NR_BSD43 + 160)
-#define __NR_BSD43_fstatfs (__NR_BSD43 + 161)
-#define __NR_BSD43_unmount (__NR_BSD43 + 162)
-#define __NR_BSD43_async_daemon (__NR_BSD43 + 163)
-#define __NR_BSD43_nfs_getfh (__NR_BSD43 + 164)
-#define __NR_BSD43_getdomainname (__NR_BSD43 + 165)
-#define __NR_BSD43_setdomainname (__NR_BSD43 + 166)
-#define __NR_BSD43_pcfs_mount (__NR_BSD43 + 167)
-#define __NR_BSD43_quotactl (__NR_BSD43 + 168)
-#define __NR_BSD43_oldexportfs (__NR_BSD43 + 169)
-#define __NR_BSD43_smount (__NR_BSD43 + 170)
-#define __NR_BSD43_mipshwconf (__NR_BSD43 + 171)
-#define __NR_BSD43_exportfs (__NR_BSD43 + 172)
-#define __NR_BSD43_nfsfh_open (__NR_BSD43 + 173)
-#define __NR_BSD43_libattach (__NR_BSD43 + 174)
-#define __NR_BSD43_libdetach (__NR_BSD43 + 175)
-#define __NR_BSD43_accept (__NR_BSD43 + 176)
-#define __NR_BSD43_reserved7 (__NR_BSD43 + 177)
-#define __NR_BSD43_reserved8 (__NR_BSD43 + 178)
-#define __NR_BSD43_recvmsg (__NR_BSD43 + 179)
-#define __NR_BSD43_recvfrom (__NR_BSD43 + 180)
-#define __NR_BSD43_sendmsg (__NR_BSD43 + 181)
-#define __NR_BSD43_getpeername (__NR_BSD43 + 182)
-#define __NR_BSD43_getsockname (__NR_BSD43 + 183)
-#define __NR_BSD43_aread (__NR_BSD43 + 184)
-#define __NR_BSD43_awrite (__NR_BSD43 + 185)
-#define __NR_BSD43_listio (__NR_BSD43 + 186)
-#define __NR_BSD43_acancel (__NR_BSD43 + 187)
-#define __NR_BSD43_astatus (__NR_BSD43 + 188)
-#define __NR_BSD43_await (__NR_BSD43 + 189)
-#define __NR_BSD43_areadv (__NR_BSD43 + 190)
-#define __NR_BSD43_awritev (__NR_BSD43 + 191)
-
-/*
- * POSIX syscalls are in the range from 3000 to 3999
- */
-#define __NR_POSIX 3000
-#define __NR_POSIX_syscall (__NR_POSIX + 0)
-#define __NR_POSIX_exit (__NR_POSIX + 1)
-#define __NR_POSIX_fork (__NR_POSIX + 2)
-#define __NR_POSIX_read (__NR_POSIX + 3)
-#define __NR_POSIX_write (__NR_POSIX + 4)
-#define __NR_POSIX_open (__NR_POSIX + 5)
-#define __NR_POSIX_close (__NR_POSIX + 6)
-#define __NR_POSIX_wait (__NR_POSIX + 7)
-#define __NR_POSIX_creat (__NR_POSIX + 8)
-#define __NR_POSIX_link (__NR_POSIX + 9)
-#define __NR_POSIX_unlink (__NR_POSIX + 10)
-#define __NR_POSIX_exec (__NR_POSIX + 11)
-#define __NR_POSIX_chdir (__NR_POSIX + 12)
-#define __NR_POSIX_gtime (__NR_POSIX + 13)
-#define __NR_POSIX_mknod (__NR_POSIX + 14)
-#define __NR_POSIX_chmod (__NR_POSIX + 15)
-#define __NR_POSIX_chown (__NR_POSIX + 16)
-#define __NR_POSIX_sbreak (__NR_POSIX + 17)
-#define __NR_POSIX_stat (__NR_POSIX + 18)
-#define __NR_POSIX_lseek (__NR_POSIX + 19)
-#define __NR_POSIX_getpid (__NR_POSIX + 20)
-#define __NR_POSIX_mount (__NR_POSIX + 21)
-#define __NR_POSIX_umount (__NR_POSIX + 22)
-#define __NR_POSIX_setuid (__NR_POSIX + 23)
-#define __NR_POSIX_getuid (__NR_POSIX + 24)
-#define __NR_POSIX_stime (__NR_POSIX + 25)
-#define __NR_POSIX_ptrace (__NR_POSIX + 26)
-#define __NR_POSIX_alarm (__NR_POSIX + 27)
-#define __NR_POSIX_fstat (__NR_POSIX + 28)
-#define __NR_POSIX_pause (__NR_POSIX + 29)
-#define __NR_POSIX_utime (__NR_POSIX + 30)
-#define __NR_POSIX_stty (__NR_POSIX + 31)
-#define __NR_POSIX_gtty (__NR_POSIX + 32)
-#define __NR_POSIX_access (__NR_POSIX + 33)
-#define __NR_POSIX_nice (__NR_POSIX + 34)
-#define __NR_POSIX_statfs (__NR_POSIX + 35)
-#define __NR_POSIX_sync (__NR_POSIX + 36)
-#define __NR_POSIX_kill (__NR_POSIX + 37)
-#define __NR_POSIX_fstatfs (__NR_POSIX + 38)
-#define __NR_POSIX_getpgrp (__NR_POSIX + 39)
-#define __NR_POSIX_syssgi (__NR_POSIX + 40)
-#define __NR_POSIX_dup (__NR_POSIX + 41)
-#define __NR_POSIX_pipe (__NR_POSIX + 42)
-#define __NR_POSIX_times (__NR_POSIX + 43)
-#define __NR_POSIX_profil (__NR_POSIX + 44)
-#define __NR_POSIX_lock (__NR_POSIX + 45)
-#define __NR_POSIX_setgid (__NR_POSIX + 46)
-#define __NR_POSIX_getgid (__NR_POSIX + 47)
-#define __NR_POSIX_sig (__NR_POSIX + 48)
-#define __NR_POSIX_msgsys (__NR_POSIX + 49)
-#define __NR_POSIX_sysmips (__NR_POSIX + 50)
-#define __NR_POSIX_sysacct (__NR_POSIX + 51)
-#define __NR_POSIX_shmsys (__NR_POSIX + 52)
-#define __NR_POSIX_semsys (__NR_POSIX + 53)
-#define __NR_POSIX_ioctl (__NR_POSIX + 54)
-#define __NR_POSIX_uadmin (__NR_POSIX + 55)
-#define __NR_POSIX_exch (__NR_POSIX + 56)
-#define __NR_POSIX_utssys (__NR_POSIX + 57)
-#define __NR_POSIX_USG_reserved1 (__NR_POSIX + 58)
-#define __NR_POSIX_exece (__NR_POSIX + 59)
-#define __NR_POSIX_umask (__NR_POSIX + 60)
-#define __NR_POSIX_chroot (__NR_POSIX + 61)
-#define __NR_POSIX_fcntl (__NR_POSIX + 62)
-#define __NR_POSIX_ulimit (__NR_POSIX + 63)
-#define __NR_POSIX_SAFARI4_reserved1 (__NR_POSIX + 64)
-#define __NR_POSIX_SAFARI4_reserved2 (__NR_POSIX + 65)
-#define __NR_POSIX_SAFARI4_reserved3 (__NR_POSIX + 66)
-#define __NR_POSIX_SAFARI4_reserved4 (__NR_POSIX + 67)
-#define __NR_POSIX_SAFARI4_reserved5 (__NR_POSIX + 68)
-#define __NR_POSIX_SAFARI4_reserved6 (__NR_POSIX + 69)
-#define __NR_POSIX_advfs (__NR_POSIX + 70)
-#define __NR_POSIX_unadvfs (__NR_POSIX + 71)
-#define __NR_POSIX_rmount (__NR_POSIX + 72)
-#define __NR_POSIX_rumount (__NR_POSIX + 73)
-#define __NR_POSIX_rfstart (__NR_POSIX + 74)
-#define __NR_POSIX_reserved1 (__NR_POSIX + 75)
-#define __NR_POSIX_rdebug (__NR_POSIX + 76)
-#define __NR_POSIX_rfstop (__NR_POSIX + 77)
-#define __NR_POSIX_rfsys (__NR_POSIX + 78)
-#define __NR_POSIX_rmdir (__NR_POSIX + 79)
-#define __NR_POSIX_mkdir (__NR_POSIX + 80)
-#define __NR_POSIX_getdents (__NR_POSIX + 81)
-#define __NR_POSIX_sginap (__NR_POSIX + 82)
-#define __NR_POSIX_sgikopt (__NR_POSIX + 83)
-#define __NR_POSIX_sysfs (__NR_POSIX + 84)
-#define __NR_POSIX_getmsg (__NR_POSIX + 85)
-#define __NR_POSIX_putmsg (__NR_POSIX + 86)
-#define __NR_POSIX_poll (__NR_POSIX + 87)
-#define __NR_POSIX_sigreturn (__NR_POSIX + 88)
-#define __NR_POSIX_accept (__NR_POSIX + 89)
-#define __NR_POSIX_bind (__NR_POSIX + 90)
-#define __NR_POSIX_connect (__NR_POSIX + 91)
-#define __NR_POSIX_gethostid (__NR_POSIX + 92)
-#define __NR_POSIX_getpeername (__NR_POSIX + 93)
-#define __NR_POSIX_getsockname (__NR_POSIX + 94)
-#define __NR_POSIX_getsockopt (__NR_POSIX + 95)
-#define __NR_POSIX_listen (__NR_POSIX + 96)
-#define __NR_POSIX_recv (__NR_POSIX + 97)
-#define __NR_POSIX_recvfrom (__NR_POSIX + 98)
-#define __NR_POSIX_recvmsg (__NR_POSIX + 99)
-#define __NR_POSIX_select (__NR_POSIX + 100)
-#define __NR_POSIX_send (__NR_POSIX + 101)
-#define __NR_POSIX_sendmsg (__NR_POSIX + 102)
-#define __NR_POSIX_sendto (__NR_POSIX + 103)
-#define __NR_POSIX_sethostid (__NR_POSIX + 104)
-#define __NR_POSIX_setsockopt (__NR_POSIX + 105)
-#define __NR_POSIX_shutdown (__NR_POSIX + 106)
-#define __NR_POSIX_socket (__NR_POSIX + 107)
-#define __NR_POSIX_gethostname (__NR_POSIX + 108)
-#define __NR_POSIX_sethostname (__NR_POSIX + 109)
-#define __NR_POSIX_getdomainname (__NR_POSIX + 110)
-#define __NR_POSIX_setdomainname (__NR_POSIX + 111)
-#define __NR_POSIX_truncate (__NR_POSIX + 112)
-#define __NR_POSIX_ftruncate (__NR_POSIX + 113)
-#define __NR_POSIX_rename (__NR_POSIX + 114)
-#define __NR_POSIX_symlink (__NR_POSIX + 115)
-#define __NR_POSIX_readlink (__NR_POSIX + 116)
-#define __NR_POSIX_lstat (__NR_POSIX + 117)
-#define __NR_POSIX_nfs_mount (__NR_POSIX + 118)
-#define __NR_POSIX_nfs_svc (__NR_POSIX + 119)
-#define __NR_POSIX_nfs_getfh (__NR_POSIX + 120)
-#define __NR_POSIX_async_daemon (__NR_POSIX + 121)
-#define __NR_POSIX_exportfs (__NR_POSIX + 122)
-#define __NR_POSIX_SGI_setregid (__NR_POSIX + 123)
-#define __NR_POSIX_SGI_setreuid (__NR_POSIX + 124)
-#define __NR_POSIX_getitimer (__NR_POSIX + 125)
-#define __NR_POSIX_setitimer (__NR_POSIX + 126)
-#define __NR_POSIX_adjtime (__NR_POSIX + 127)
-#define __NR_POSIX_SGI_bsdgettime (__NR_POSIX + 128)
-#define __NR_POSIX_SGI_sproc (__NR_POSIX + 129)
-#define __NR_POSIX_SGI_prctl (__NR_POSIX + 130)
-#define __NR_POSIX_SGI_blkproc (__NR_POSIX + 131)
-#define __NR_POSIX_SGI_reserved1 (__NR_POSIX + 132)
-#define __NR_POSIX_SGI_sgigsc (__NR_POSIX + 133)
-#define __NR_POSIX_SGI_mmap (__NR_POSIX + 134)
-#define __NR_POSIX_SGI_munmap (__NR_POSIX + 135)
-#define __NR_POSIX_SGI_mprotect (__NR_POSIX + 136)
-#define __NR_POSIX_SGI_msync (__NR_POSIX + 137)
-#define __NR_POSIX_SGI_madvise (__NR_POSIX + 138)
-#define __NR_POSIX_SGI_mpin (__NR_POSIX + 139)
-#define __NR_POSIX_SGI_getpagesize (__NR_POSIX + 140)
-#define __NR_POSIX_SGI_libattach (__NR_POSIX + 141)
-#define __NR_POSIX_SGI_libdetach (__NR_POSIX + 142)
-#define __NR_POSIX_SGI_getpgrp (__NR_POSIX + 143)
-#define __NR_POSIX_SGI_setpgrp (__NR_POSIX + 144)
-#define __NR_POSIX_SGI_reserved2 (__NR_POSIX + 145)
-#define __NR_POSIX_SGI_reserved3 (__NR_POSIX + 146)
-#define __NR_POSIX_SGI_reserved4 (__NR_POSIX + 147)
-#define __NR_POSIX_SGI_reserved5 (__NR_POSIX + 148)
-#define __NR_POSIX_SGI_reserved6 (__NR_POSIX + 149)
-#define __NR_POSIX_cacheflush (__NR_POSIX + 150)
-#define __NR_POSIX_cachectl (__NR_POSIX + 151)
-#define __NR_POSIX_fchown (__NR_POSIX + 152)
-#define __NR_POSIX_fchmod (__NR_POSIX + 153)
-#define __NR_POSIX_wait3 (__NR_POSIX + 154)
-#define __NR_POSIX_mmap (__NR_POSIX + 155)
-#define __NR_POSIX_munmap (__NR_POSIX + 156)
-#define __NR_POSIX_madvise (__NR_POSIX + 157)
-#define __NR_POSIX_BSD_getpagesize (__NR_POSIX + 158)
-#define __NR_POSIX_setreuid (__NR_POSIX + 159)
-#define __NR_POSIX_setregid (__NR_POSIX + 160)
-#define __NR_POSIX_setpgid (__NR_POSIX + 161)
-#define __NR_POSIX_getgroups (__NR_POSIX + 162)
-#define __NR_POSIX_setgroups (__NR_POSIX + 163)
-#define __NR_POSIX_gettimeofday (__NR_POSIX + 164)
-#define __NR_POSIX_getrusage (__NR_POSIX + 165)
-#define __NR_POSIX_getrlimit (__NR_POSIX + 166)
-#define __NR_POSIX_setrlimit (__NR_POSIX + 167)
-#define __NR_POSIX_waitpid (__NR_POSIX + 168)
-#define __NR_POSIX_dup2 (__NR_POSIX + 169)
-#define __NR_POSIX_reserved2 (__NR_POSIX + 170)
-#define __NR_POSIX_reserved3 (__NR_POSIX + 171)
-#define __NR_POSIX_reserved4 (__NR_POSIX + 172)
-#define __NR_POSIX_reserved5 (__NR_POSIX + 173)
-#define __NR_POSIX_reserved6 (__NR_POSIX + 174)
-#define __NR_POSIX_reserved7 (__NR_POSIX + 175)
-#define __NR_POSIX_reserved8 (__NR_POSIX + 176)
-#define __NR_POSIX_reserved9 (__NR_POSIX + 177)
-#define __NR_POSIX_reserved10 (__NR_POSIX + 178)
-#define __NR_POSIX_reserved11 (__NR_POSIX + 179)
-#define __NR_POSIX_reserved12 (__NR_POSIX + 180)
-#define __NR_POSIX_reserved13 (__NR_POSIX + 181)
-#define __NR_POSIX_reserved14 (__NR_POSIX + 182)
-#define __NR_POSIX_reserved15 (__NR_POSIX + 183)
-#define __NR_POSIX_reserved16 (__NR_POSIX + 184)
-#define __NR_POSIX_reserved17 (__NR_POSIX + 185)
-#define __NR_POSIX_reserved18 (__NR_POSIX + 186)
-#define __NR_POSIX_reserved19 (__NR_POSIX + 187)
-#define __NR_POSIX_reserved20 (__NR_POSIX + 188)
-#define __NR_POSIX_reserved21 (__NR_POSIX + 189)
-#define __NR_POSIX_reserved22 (__NR_POSIX + 190)
-#define __NR_POSIX_reserved23 (__NR_POSIX + 191)
-#define __NR_POSIX_reserved24 (__NR_POSIX + 192)
-#define __NR_POSIX_reserved25 (__NR_POSIX + 193)
-#define __NR_POSIX_reserved26 (__NR_POSIX + 194)
-#define __NR_POSIX_reserved27 (__NR_POSIX + 195)
-#define __NR_POSIX_reserved28 (__NR_POSIX + 196)
-#define __NR_POSIX_reserved29 (__NR_POSIX + 197)
-#define __NR_POSIX_reserved30 (__NR_POSIX + 198)
-#define __NR_POSIX_reserved31 (__NR_POSIX + 199)
-#define __NR_POSIX_reserved32 (__NR_POSIX + 200)
-#define __NR_POSIX_reserved33 (__NR_POSIX + 201)
-#define __NR_POSIX_reserved34 (__NR_POSIX + 202)
-#define __NR_POSIX_reserved35 (__NR_POSIX + 203)
-#define __NR_POSIX_reserved36 (__NR_POSIX + 204)
-#define __NR_POSIX_reserved37 (__NR_POSIX + 205)
-#define __NR_POSIX_reserved38 (__NR_POSIX + 206)
-#define __NR_POSIX_reserved39 (__NR_POSIX + 207)
-#define __NR_POSIX_reserved40 (__NR_POSIX + 208)
-#define __NR_POSIX_reserved41 (__NR_POSIX + 209)
-#define __NR_POSIX_reserved42 (__NR_POSIX + 210)
-#define __NR_POSIX_reserved43 (__NR_POSIX + 211)
-#define __NR_POSIX_reserved44 (__NR_POSIX + 212)
-#define __NR_POSIX_reserved45 (__NR_POSIX + 213)
-#define __NR_POSIX_reserved46 (__NR_POSIX + 214)
-#define __NR_POSIX_reserved47 (__NR_POSIX + 215)
-#define __NR_POSIX_reserved48 (__NR_POSIX + 216)
-#define __NR_POSIX_reserved49 (__NR_POSIX + 217)
-#define __NR_POSIX_reserved50 (__NR_POSIX + 218)
-#define __NR_POSIX_reserved51 (__NR_POSIX + 219)
-#define __NR_POSIX_reserved52 (__NR_POSIX + 220)
-#define __NR_POSIX_reserved53 (__NR_POSIX + 221)
-#define __NR_POSIX_reserved54 (__NR_POSIX + 222)
-#define __NR_POSIX_reserved55 (__NR_POSIX + 223)
-#define __NR_POSIX_reserved56 (__NR_POSIX + 224)
-#define __NR_POSIX_reserved57 (__NR_POSIX + 225)
-#define __NR_POSIX_reserved58 (__NR_POSIX + 226)
-#define __NR_POSIX_reserved59 (__NR_POSIX + 227)
-#define __NR_POSIX_reserved60 (__NR_POSIX + 228)
-#define __NR_POSIX_reserved61 (__NR_POSIX + 229)
-#define __NR_POSIX_reserved62 (__NR_POSIX + 230)
-#define __NR_POSIX_reserved63 (__NR_POSIX + 231)
-#define __NR_POSIX_reserved64 (__NR_POSIX + 232)
-#define __NR_POSIX_reserved65 (__NR_POSIX + 233)
-#define __NR_POSIX_reserved66 (__NR_POSIX + 234)
-#define __NR_POSIX_reserved67 (__NR_POSIX + 235)
-#define __NR_POSIX_reserved68 (__NR_POSIX + 236)
-#define __NR_POSIX_reserved69 (__NR_POSIX + 237)
-#define __NR_POSIX_reserved70 (__NR_POSIX + 238)
-#define __NR_POSIX_reserved71 (__NR_POSIX + 239)
-#define __NR_POSIX_reserved72 (__NR_POSIX + 240)
-#define __NR_POSIX_reserved73 (__NR_POSIX + 241)
-#define __NR_POSIX_reserved74 (__NR_POSIX + 242)
-#define __NR_POSIX_reserved75 (__NR_POSIX + 243)
-#define __NR_POSIX_reserved76 (__NR_POSIX + 244)
-#define __NR_POSIX_reserved77 (__NR_POSIX + 245)
-#define __NR_POSIX_reserved78 (__NR_POSIX + 246)
-#define __NR_POSIX_reserved79 (__NR_POSIX + 247)
-#define __NR_POSIX_reserved80 (__NR_POSIX + 248)
-#define __NR_POSIX_reserved81 (__NR_POSIX + 249)
-#define __NR_POSIX_reserved82 (__NR_POSIX + 250)
-#define __NR_POSIX_reserved83 (__NR_POSIX + 251)
-#define __NR_POSIX_reserved84 (__NR_POSIX + 252)
-#define __NR_POSIX_reserved85 (__NR_POSIX + 253)
-#define __NR_POSIX_reserved86 (__NR_POSIX + 254)
-#define __NR_POSIX_reserved87 (__NR_POSIX + 255)
-#define __NR_POSIX_reserved88 (__NR_POSIX + 256)
-#define __NR_POSIX_reserved89 (__NR_POSIX + 257)
-#define __NR_POSIX_reserved90 (__NR_POSIX + 258)
-#define __NR_POSIX_reserved91 (__NR_POSIX + 259)
-#define __NR_POSIX_netboot (__NR_POSIX + 260)
-#define __NR_POSIX_netunboot (__NR_POSIX + 261)
-#define __NR_POSIX_rdump (__NR_POSIX + 262)
-#define __NR_POSIX_setsid (__NR_POSIX + 263)
-#define __NR_POSIX_getmaxsig (__NR_POSIX + 264)
-#define __NR_POSIX_sigpending (__NR_POSIX + 265)
-#define __NR_POSIX_sigprocmask (__NR_POSIX + 266)
-#define __NR_POSIX_sigsuspend (__NR_POSIX + 267)
-#define __NR_POSIX_sigaction (__NR_POSIX + 268)
-#define __NR_POSIX_MIPS_reserved1 (__NR_POSIX + 269)
-#define __NR_POSIX_MIPS_reserved2 (__NR_POSIX + 270)
-#define __NR_POSIX_MIPS_reserved3 (__NR_POSIX + 271)
-#define __NR_POSIX_MIPS_reserved4 (__NR_POSIX + 272)
-#define __NR_POSIX_MIPS_reserved5 (__NR_POSIX + 273)
-#define __NR_POSIX_MIPS_reserved6 (__NR_POSIX + 274)
-#define __NR_POSIX_MIPS_reserved7 (__NR_POSIX + 275)
-#define __NR_POSIX_MIPS_reserved8 (__NR_POSIX + 276)
-#define __NR_POSIX_MIPS_reserved9 (__NR_POSIX + 277)
-#define __NR_POSIX_MIPS_reserved10 (__NR_POSIX + 278)
-#define __NR_POSIX_MIPS_reserved11 (__NR_POSIX + 279)
-#define __NR_POSIX_TANDEM_reserved1 (__NR_POSIX + 280)
-#define __NR_POSIX_TANDEM_reserved2 (__NR_POSIX + 281)
-#define __NR_POSIX_TANDEM_reserved3 (__NR_POSIX + 282)
-#define __NR_POSIX_TANDEM_reserved4 (__NR_POSIX + 283)
-#define __NR_POSIX_TANDEM_reserved5 (__NR_POSIX + 284)
-#define __NR_POSIX_TANDEM_reserved6 (__NR_POSIX + 285)
-#define __NR_POSIX_TANDEM_reserved7 (__NR_POSIX + 286)
-#define __NR_POSIX_TANDEM_reserved8 (__NR_POSIX + 287)
-#define __NR_POSIX_TANDEM_reserved9 (__NR_POSIX + 288)
-#define __NR_POSIX_TANDEM_reserved10 (__NR_POSIX + 289)
-#define __NR_POSIX_TANDEM_reserved11 (__NR_POSIX + 290)
-#define __NR_POSIX_TANDEM_reserved12 (__NR_POSIX + 291)
-#define __NR_POSIX_TANDEM_reserved13 (__NR_POSIX + 292)
-#define __NR_POSIX_TANDEM_reserved14 (__NR_POSIX + 293)
-#define __NR_POSIX_TANDEM_reserved15 (__NR_POSIX + 294)
-#define __NR_POSIX_TANDEM_reserved16 (__NR_POSIX + 295)
-#define __NR_POSIX_TANDEM_reserved17 (__NR_POSIX + 296)
-#define __NR_POSIX_TANDEM_reserved18 (__NR_POSIX + 297)
-#define __NR_POSIX_TANDEM_reserved19 (__NR_POSIX + 298)
-#define __NR_POSIX_TANDEM_reserved20 (__NR_POSIX + 299)
-#define __NR_POSIX_SGI_reserved7 (__NR_POSIX + 300)
-#define __NR_POSIX_SGI_reserved8 (__NR_POSIX + 301)
-#define __NR_POSIX_SGI_reserved9 (__NR_POSIX + 302)
-#define __NR_POSIX_SGI_reserved10 (__NR_POSIX + 303)
-#define __NR_POSIX_SGI_reserved11 (__NR_POSIX + 304)
-#define __NR_POSIX_SGI_reserved12 (__NR_POSIX + 305)
-#define __NR_POSIX_SGI_reserved13 (__NR_POSIX + 306)
-#define __NR_POSIX_SGI_reserved14 (__NR_POSIX + 307)
-#define __NR_POSIX_SGI_reserved15 (__NR_POSIX + 308)
-#define __NR_POSIX_SGI_reserved16 (__NR_POSIX + 309)
-#define __NR_POSIX_SGI_reserved17 (__NR_POSIX + 310)
-#define __NR_POSIX_SGI_reserved18 (__NR_POSIX + 311)
-#define __NR_POSIX_SGI_reserved19 (__NR_POSIX + 312)
-#define __NR_POSIX_SGI_reserved20 (__NR_POSIX + 313)
-#define __NR_POSIX_SGI_reserved21 (__NR_POSIX + 314)
-#define __NR_POSIX_SGI_reserved22 (__NR_POSIX + 315)
-#define __NR_POSIX_SGI_reserved23 (__NR_POSIX + 316)
-#define __NR_POSIX_SGI_reserved24 (__NR_POSIX + 317)
-#define __NR_POSIX_SGI_reserved25 (__NR_POSIX + 318)
-#define __NR_POSIX_SGI_reserved26 (__NR_POSIX + 319)
-
-#endif /* _ASM_RISCOS_SYSCALL_H */
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 330c4e497af..e8e5d414337 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -159,11 +159,21 @@ struct task_struct;
do { \
if (cpu_has_dsp) \
__save_dsp(prev); \
- (last) = resume(prev, next, next->thread_info); \
+ (last) = resume(prev, next, task_thread_info(next)); \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
__u32 retval;
diff --git a/include/asm-mips/thread_info.h b/include/asm-mips/thread_info.h
index e6c24472e03..1612b3fe108 100644
--- a/include/asm-mips/thread_info.h
+++ b/include/asm-mips/thread_info.h
@@ -97,8 +97,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#endif
#define free_thread_info(info) kfree(info)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-mips/vr41xx/capcella.h b/include/asm-mips/vr41xx/capcella.h
index 5b55083c528..d10ffda50de 100644
--- a/include/asm-mips/vr41xx/capcella.h
+++ b/include/asm-mips/vr41xx/capcella.h
@@ -1,7 +1,7 @@
/*
* capcella.h, Include file for ZAO Networks Capcella.
*
- * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/asm-mips/vr41xx/e55.h b/include/asm-mips/vr41xx/e55.h
index ea37b56fc66..558f2269bf3 100644
--- a/include/asm-mips/vr41xx/e55.h
+++ b/include/asm-mips/vr41xx/e55.h
@@ -1,7 +1,7 @@
/*
* e55.h, Include file for CASIO CASSIOPEIA E-10/15/55/65.
*
- * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/asm-mips/vr41xx/giu.h b/include/asm-mips/vr41xx/giu.h
index 8590885a763..8109cda557d 100644
--- a/include/asm-mips/vr41xx/giu.h
+++ b/include/asm-mips/vr41xx/giu.h
@@ -1,7 +1,7 @@
/*
* Include file for NEC VR4100 series General-purpose I/O Unit.
*
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/asm-mips/vr41xx/mpc30x.h b/include/asm-mips/vr41xx/mpc30x.h
index e6ac3c8e8ba..a6cbe4da666 100644
--- a/include/asm-mips/vr41xx/mpc30x.h
+++ b/include/asm-mips/vr41xx/mpc30x.h
@@ -1,7 +1,7 @@
/*
* mpc30x.h, Include file for Victor MP-C303/304.
*
- * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/asm-mips/vr41xx/pci.h b/include/asm-mips/vr41xx/pci.h
index c473aa78d1d..6fc01ce1977 100644
--- a/include/asm-mips/vr41xx/pci.h
+++ b/include/asm-mips/vr41xx/pci.h
@@ -1,7 +1,7 @@
/*
* Include file for NEC VR4100 series PCI Control Unit.
*
- * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/asm-mips/vr41xx/siu.h b/include/asm-mips/vr41xx/siu.h
index 865cc07ddd7..1fcf6e8082b 100644
--- a/include/asm-mips/vr41xx/siu.h
+++ b/include/asm-mips/vr41xx/siu.h
@@ -1,7 +1,7 @@
/*
* Include file for NEC VR4100 series Serial Interface Unit.
*
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/asm-mips/vr41xx/tb0219.h b/include/asm-mips/vr41xx/tb0219.h
index 273c6392688..b318b9612a8 100644
--- a/include/asm-mips/vr41xx/tb0219.h
+++ b/include/asm-mips/vr41xx/tb0219.h
@@ -1,7 +1,7 @@
/*
* tb0219.h, Include file for TANBAC TB0219.
*
- * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* Modified for TANBAC TB0219:
* Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
diff --git a/include/asm-mips/vr41xx/tb0226.h b/include/asm-mips/vr41xx/tb0226.h
index 0ff9a60ecac..2513f450e2d 100644
--- a/include/asm-mips/vr41xx/tb0226.h
+++ b/include/asm-mips/vr41xx/tb0226.h
@@ -1,7 +1,7 @@
/*
* tb0226.h, Include file for TANBAC TB0226.
*
- * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/asm-mips/vr41xx/vr41xx.h b/include/asm-mips/vr41xx/vr41xx.h
index bd2723c3090..70828d5fae9 100644
--- a/include/asm-mips/vr41xx/vr41xx.h
+++ b/include/asm-mips/vr41xx/vr41xx.h
@@ -7,7 +7,7 @@
* Copyright (C) 2001, 2002 Paul Mundt
* Copyright (C) 2002 MontaVista Software, Inc.
* Copyright (C) 2002 TimeSys Corp.
- * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/include/asm-mips/vr41xx/vrc4173.h b/include/asm-mips/vr41xx/vrc4173.h
index bb7a85c186e..4d41a9c091d 100644
--- a/include/asm-mips/vr41xx/vrc4173.h
+++ b/include/asm-mips/vr41xx/vrc4173.h
@@ -4,7 +4,7 @@
* Copyright (C) 2000 Michael R. McDonald
* Copyright (C) 2001-2003 Montavista Software Inc.
* Author: Yoichi Yuasa <yyuasa@mvista.com, or source@mvista.com>
- * Copyright (C) 2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
* Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/asm-mips/vr41xx/workpad.h b/include/asm-mips/vr41xx/workpad.h
index dfe01b43fb7..6bfa9c009a9 100644
--- a/include/asm-mips/vr41xx/workpad.h
+++ b/include/asm-mips/vr41xx/workpad.h
@@ -1,7 +1,7 @@
/*
* workpad.h, Include file for IBM WorkPad z50.
*
- * Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ * Copyright (C) 2002-2004 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 983e9a2b604..2ca56d34aaa 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -165,6 +165,7 @@ static __inline__ int atomic_read(const atomic_t *v)
/* exported interface */
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
@@ -216,4 +217,5 @@ static __inline__ int atomic_read(const atomic_t *v)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
+#include <asm-generic/atomic.h>
#endif
diff --git a/include/asm-parisc/cache.h b/include/asm-parisc/cache.h
index 5da72e38bdd..93f179f13ce 100644
--- a/include/asm-parisc/cache.h
+++ b/include/asm-parisc/cache.h
@@ -28,16 +28,15 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */
-extern void flush_data_cache_local(void); /* flushes local data-cache only */
-extern void flush_instruction_cache_local(void); /* flushes local code-cache only */
+extern void flush_data_cache_local(void *); /* flushes local data-cache only */
+extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */
#ifdef CONFIG_SMP
extern void flush_data_cache(void); /* flushes data-cache only (all processors) */
extern void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
#else
-#define flush_data_cache flush_data_cache_local
-#define flush_instruction_cache flush_instruction_cache_local
+#define flush_data_cache() flush_data_cache_local(NULL)
+#define flush_instruction_cache() flush_instruction_cache_local(NULL)
#endif
extern void parisc_cache_init(void); /* initializes cache-flushing */
diff --git a/include/asm-parisc/futex.h b/include/asm-parisc/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-parisc/futex.h
+++ b/include/asm-parisc/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-parisc/io.h b/include/asm-parisc/io.h
index b9bb5946ecc..0db00adc942 100644
--- a/include/asm-parisc/io.h
+++ b/include/asm-parisc/io.h
@@ -41,7 +41,7 @@ extern void __raw_bad_addr(const volatile void __iomem *addr);
#define __raw_check_addr(addr) \
if (((unsigned long)addr >> NYBBLE_SHIFT) != 0xe) \
__raw_bad_addr(addr); \
- addr = (void *)((unsigned long)addr | (0xfUL << NYBBLE_SHIFT));
+ addr = (void __iomem *)((unsigned long)addr | (0xfUL << NYBBLE_SHIFT));
#else
#define gsc_check_addr(addr)
#define __raw_check_addr(addr)
diff --git a/include/asm-parisc/mman.h b/include/asm-parisc/mman.h
index e829607eb8b..736b0abcac0 100644
--- a/include/asm-parisc/mman.h
+++ b/include/asm-parisc/mman.h
@@ -38,6 +38,7 @@
#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */
#define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */
#define MADV_VPS_INHERIT 7 /* Inherit parents page size */
+#define MADV_REMOVE 8 /* remove these pages & resources */
/* The range 12-64 is reserved for page size specification. */
#define MADV_4K_PAGES 12 /* Use 4K pages */
diff --git a/include/asm-parisc/mutex.h b/include/asm-parisc/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-parisc/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h
index 44eae9f8274..4a6752b0afe 100644
--- a/include/asm-parisc/page.h
+++ b/include/asm-parisc/page.h
@@ -135,6 +135,13 @@ extern int npmem_ranges;
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_DISCONTIGMEM */
+#ifdef CONFIG_HUGETLB_PAGE
+#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */
+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#endif
+
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
diff --git a/include/asm-parisc/pci.h b/include/asm-parisc/pci.h
index fa39d07d49e..f277254159b 100644
--- a/include/asm-parisc/pci.h
+++ b/include/asm-parisc/pci.h
@@ -84,11 +84,17 @@ static __inline__ int pci_is_lmmio(struct pci_hba_data *hba, unsigned long a)
/*
** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses.
** See pcibios.c for more conversions used by Generic PCI code.
+**
+** Platform characteristics/firmware guarantee that
+** (1) PA_VIEW - IO_VIEW = lmmio_offset for both LMMIO and ELMMIO
+** (2) PA_VIEW == IO_VIEW for GMMIO
*/
#define PCI_BUS_ADDR(hba,a) (PCI_IS_LMMIO(hba,a) \
? ((a) - hba->lmmio_space_offset) /* mangle LMMIO */ \
: (a)) /* GMMIO */
-#define PCI_HOST_ADDR(hba,a) ((a) + hba->lmmio_space_offset)
+#define PCI_HOST_ADDR(hba,a) (((a) & PCI_F_EXTEND) == 0 \
+ ? (a) + hba->lmmio_space_offset \
+ : (a))
#else /* !CONFIG_64BIT */
diff --git a/include/asm-parisc/processor.h b/include/asm-parisc/processor.h
index aae40e8c3aa..89f2f1c16c1 100644
--- a/include/asm-parisc/processor.h
+++ b/include/asm-parisc/processor.h
@@ -144,16 +144,16 @@ struct thread_struct {
})
#define INIT_THREAD { \
- regs: { gr: { 0, }, \
- fr: { 0, }, \
- sr: { 0, }, \
- iasq: { 0, }, \
- iaoq: { 0, }, \
- cr27: 0, \
+ .regs = { .gr = { 0, }, \
+ .fr = { 0, }, \
+ .sr = { 0, }, \
+ .iasq = { 0, }, \
+ .iaoq = { 0, }, \
+ .cr27 = 0, \
}, \
- task_size: DEFAULT_TASK_SIZE, \
- map_base: DEFAULT_MAP_BASE, \
- flags: 0 \
+ .task_size = DEFAULT_TASK_SIZE, \
+ .map_base = DEFAULT_MAP_BASE, \
+ .flags = 0 \
}
/*
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index f3928d3a80c..a5a973c0c07 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
(last) = _switch_to(prev, next); \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
/* interrupt control */
diff --git a/include/asm-parisc/thread_info.h b/include/asm-parisc/thread_info.h
index 57bbb76cb6c..ac32f140b83 100644
--- a/include/asm-parisc/thread_info.h
+++ b/include/asm-parisc/thread_info.h
@@ -43,9 +43,6 @@ struct thread_info {
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *)mfctl(30))
diff --git a/include/asm-parisc/tlbflush.h b/include/asm-parisc/tlbflush.h
index c9ec39c6fc6..825994a90e2 100644
--- a/include/asm-parisc/tlbflush.h
+++ b/include/asm-parisc/tlbflush.h
@@ -22,6 +22,7 @@ extern spinlock_t pa_tlb_lock;
#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
extern void flush_tlb_all(void);
+extern void flush_tlb_all_local(void *);
/*
* flush_tlb_mm()
diff --git a/include/asm-powerpc/abs_addr.h b/include/asm-powerpc/abs_addr.h
index 18415108fc5..c5c3259e0f8 100644
--- a/include/asm-powerpc/abs_addr.h
+++ b/include/asm-powerpc/abs_addr.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_ABS_ADDR_H
#define _ASM_POWERPC_ABS_ADDR_H
+#ifdef __KERNEL__
#include <linux/config.h>
@@ -70,4 +71,5 @@ static inline unsigned long phys_to_abs(unsigned long pa)
#define iseries_hv_addr(virtaddr) \
(0x8000000000000000 | virt_to_abs(virtaddr))
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_ABS_ADDR_H */
diff --git a/include/asm-powerpc/agp.h b/include/asm-powerpc/agp.h
index 885b4631a6c..e5ccaca2f5a 100644
--- a/include/asm-powerpc/agp.h
+++ b/include/asm-powerpc/agp.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_AGP_H
#define _ASM_POWERPC_AGP_H
+#ifdef __KERNEL__
#include <asm/io.h>
@@ -18,4 +19,5 @@
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_AGP_H */
diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h
index 8b133efc9f7..8e64be0cc47 100644
--- a/include/asm-powerpc/asm-compat.h
+++ b/include/asm-powerpc/asm-compat.h
@@ -1,7 +1,6 @@
#ifndef _ASM_POWERPC_ASM_COMPAT_H
#define _ASM_POWERPC_ASM_COMPAT_H
-#include <linux/config.h>
#include <asm/types.h>
#ifdef __ASSEMBLY__
@@ -41,6 +40,7 @@
#endif
+#ifdef __KERNEL__
#ifdef CONFIG_IBM405_ERR77
/* Erratum #77 on the 405 means we need a sync or dcbt before every
* stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
@@ -51,5 +51,6 @@
#define PPC405_ERR77(ra,rb)
#define PPC405_ERR77_SYNC
#endif
+#endif
#endif /* _ASM_POWERPC_ASM_COMPAT_H */
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index ec4b1446895..147a38dcc76 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -36,7 +36,7 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
int t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 # atomic_add_return\n\
add %0,%1,%0\n"
PPC405_ERR77(0,%2)
@@ -72,7 +72,7 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
int t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 # atomic_sub_return\n\
subf %0,%1,%0\n"
PPC405_ERR77(0,%2)
@@ -106,7 +106,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
int t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%1 # atomic_inc_return\n\
addic %0,%0,1\n"
PPC405_ERR77(0,%1)
@@ -150,7 +150,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
int t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_return\n\
addic %0,%0,-1\n"
PPC405_ERR77(0,%1)
@@ -165,6 +165,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
@@ -175,19 +176,19 @@ static __inline__ int atomic_dec_return(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
+ c = old; \
+ } \
+ c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
@@ -203,7 +204,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
int t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
addic. %0,%0,-1\n\
blt- 2f\n"
@@ -252,7 +253,7 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
long t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 # atomic64_add_return\n\
add %0,%1,%0\n\
stdcx. %0,0,%2 \n\
@@ -286,7 +287,7 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
long t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 # atomic64_sub_return\n\
subf %0,%1,%0\n\
stdcx. %0,0,%2 \n\
@@ -318,7 +319,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
long t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%1 # atomic64_inc_return\n\
addic %0,%0,1\n\
stdcx. %0,0,%1 \n\
@@ -360,7 +361,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
long t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%1 # atomic64_dec_return\n\
addic %0,%0,-1\n\
stdcx. %0,0,%1\n\
@@ -385,7 +386,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
long t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
addic. %0,%0,-1\n\
blt- 2f\n\
@@ -402,5 +403,6 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
#endif /* __powerpc64__ */
+#include <asm-generic/atomic.h>
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index 1996eaa8aea..bf6941a810b 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -112,7 +112,7 @@ static __inline__ int test_and_set_bit(unsigned long nr,
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n"
"or %1,%0,%2 \n"
PPC405_ERR77(0,%3)
@@ -134,7 +134,7 @@ static __inline__ int test_and_clear_bit(unsigned long nr,
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n"
"andc %1,%0,%2 \n"
PPC405_ERR77(0,%3)
@@ -156,7 +156,7 @@ static __inline__ int test_and_change_bit(unsigned long nr,
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n"
"xor %1,%0,%2 \n"
PPC405_ERR77(0,%3)
diff --git a/include/asm-powerpc/bootx.h b/include/asm-powerpc/bootx.h
new file mode 100644
index 00000000000..57b82e3f89c
--- /dev/null
+++ b/include/asm-powerpc/bootx.h
@@ -0,0 +1,171 @@
+/*
+ * This file describes the structure passed from the BootX application
+ * (for MacOS) when it is used to boot Linux.
+ *
+ * Written by Benjamin Herrenschmidt.
+ */
+
+
+#ifndef __ASM_BOOTX_H__
+#define __ASM_BOOTX_H__
+
+#include <asm/types.h>
+
+#ifdef macintosh
+#include <Types.h>
+#include "linux_type_defs.h"
+#endif
+
+#ifdef macintosh
+/* All this requires PowerPC alignment */
+#pragma options align=power
+#endif
+
+/* On kernel entry:
+ *
+ * r3 = 0x426f6f58 ('BooX')
+ * r4 = pointer to boot_infos
+ * r5 = NULL
+ *
+ * Data and instruction translation disabled, interrupts
+ * disabled, kernel loaded at physical 0x00000000 on PCI
+ * machines (will be different on NuBus).
+ */
+
+#define BOOT_INFO_VERSION 5
+#define BOOT_INFO_COMPATIBLE_VERSION 1
+
+/* Bit in the architecture flag mask. More to be defined in
+ future versions. Note that either BOOT_ARCH_PCI or
+ BOOT_ARCH_NUBUS is set. The other BOOT_ARCH_NUBUS_xxx are
+ set additionally when BOOT_ARCH_NUBUS is set.
+ */
+#define BOOT_ARCH_PCI 0x00000001UL
+#define BOOT_ARCH_NUBUS 0x00000002UL
+#define BOOT_ARCH_NUBUS_PDM 0x00000010UL
+#define BOOT_ARCH_NUBUS_PERFORMA 0x00000020UL
+#define BOOT_ARCH_NUBUS_POWERBOOK 0x00000040UL
+
+/* Maximum number of ranges in phys memory map */
+#define MAX_MEM_MAP_SIZE 26
+
+/* This is the format of an element in the physical memory map. Note that
+ the map is optional and current BootX will only build it for pre-PCI
+ machines */
+typedef struct boot_info_map_entry
+{
+ __u32 physAddr; /* Physical starting address */
+ __u32 size; /* Size in bytes */
+} boot_info_map_entry_t;
+
+
+/* Here are the boot informations that are passed to the bootstrap
+ * Note that the kernel arguments and the device tree are appended
+ * at the end of this structure. */
+typedef struct boot_infos
+{
+ /* Version of this structure */
+ __u32 version;
+ /* backward compatible down to version: */
+ __u32 compatible_version;
+
+ /* NEW (vers. 2) this holds the current _logical_ base addr of
+ the frame buffer (for use by early boot message) */
+ __u8* logicalDisplayBase;
+
+ /* NEW (vers. 4) Apple's machine identification */
+ __u32 machineID;
+
+ /* NEW (vers. 4) Detected hw architecture */
+ __u32 architecture;
+
+ /* The device tree (internal addresses relative to the beginning of the tree,
+ * device tree offset relative to the beginning of this structure).
+ * On pre-PCI macintosh (BOOT_ARCH_PCI bit set to 0 in architecture), this
+ * field is 0.
+ */
+ __u32 deviceTreeOffset; /* Device tree offset */
+ __u32 deviceTreeSize; /* Size of the device tree */
+
+ /* Some infos about the current MacOS display */
+ __u32 dispDeviceRect[4]; /* left,top,right,bottom */
+ __u32 dispDeviceDepth; /* (8, 16 or 32) */
+ __u8* dispDeviceBase; /* base address (physical) */
+ __u32 dispDeviceRowBytes; /* rowbytes (in bytes) */
+ __u32 dispDeviceColorsOffset; /* Colormap (8 bits only) or 0 (*) */
+ /* Optional offset in the registry to the current
+ * MacOS display. (Can be 0 when not detected) */
+ __u32 dispDeviceRegEntryOffset;
+
+ /* Optional pointer to boot ramdisk (offset from this structure) */
+ __u32 ramDisk;
+ __u32 ramDiskSize; /* size of ramdisk image */
+
+ /* Kernel command line arguments (offset from this structure) */
+ __u32 kernelParamsOffset;
+
+ /* ALL BELOW NEW (vers. 4) */
+
+ /* This defines the physical memory. Valid with BOOT_ARCH_NUBUS flag
+ (non-PCI) only. On PCI, memory is contiguous and it's size is in the
+ device-tree. */
+ boot_info_map_entry_t
+ physMemoryMap[MAX_MEM_MAP_SIZE]; /* Where the phys memory is */
+ __u32 physMemoryMapSize; /* How many entries in map */
+
+
+ /* The framebuffer size (optional, currently 0) */
+ __u32 frameBufferSize; /* Represents a max size, can be 0. */
+
+ /* NEW (vers. 5) */
+
+ /* Total params size (args + colormap + device tree + ramdisk) */
+ __u32 totalParamsSize;
+
+} boot_infos_t;
+
+#ifdef __KERNEL__
+/* (*) The format of the colormap is 256 * 3 * 2 bytes. Each color index
+ * is represented by 3 short words containing a 16 bits (unsigned) color
+ * component. Later versions may contain the gamma table for direct-color
+ * devices here.
+ */
+#define BOOTX_COLORTABLE_SIZE (256UL*3UL*2UL)
+
+/* BootX passes the device-tree using a format that comes from earlier
+ * ppc32 kernels. This used to match what is in prom.h, but not anymore
+ * so we now define it here
+ */
+struct bootx_dt_prop {
+ u32 name;
+ int length;
+ u32 value;
+ u32 next;
+};
+
+struct bootx_dt_node {
+ u32 unused0;
+ u32 unused1;
+ u32 phandle; /* not really available */
+ u32 unused2;
+ u32 unused3;
+ u32 unused4;
+ u32 unused5;
+ u32 full_name;
+ u32 properties;
+ u32 parent;
+ u32 child;
+ u32 sibling;
+ u32 next;
+ u32 allnext;
+};
+
+extern void bootx_init(unsigned long r4, unsigned long phys);
+
+#endif /* __KERNEL__ */
+
+#ifdef macintosh
+#pragma options align=reset
+#endif
+
+#endif
diff --git a/include/asm-powerpc/btext.h b/include/asm-powerpc/btext.h
index 71cce36bc63..906f46e3100 100644
--- a/include/asm-powerpc/btext.h
+++ b/include/asm-powerpc/btext.h
@@ -7,21 +7,22 @@
#define __PPC_BTEXT_H
#ifdef __KERNEL__
-extern void btext_clearscreen(void);
-extern void btext_flushscreen(void);
-
-extern int boot_text_mapped;
-
-extern int btext_initialize(struct device_node *np);
-
-extern void map_boot_text(void);
-extern void init_boot_display(void);
+extern int btext_find_display(int allow_nonstdout);
extern void btext_update_display(unsigned long phys, int width, int height,
int depth, int pitch);
+extern void btext_setup_display(int width, int height, int depth, int pitch,
+ unsigned long address);
+extern void btext_prepare_BAT(void);
+extern void btext_unmap(void);
extern void btext_drawchar(char c);
extern void btext_drawstring(const char *str);
extern void btext_drawhex(unsigned long v);
+extern void btext_drawtext(const char *c, unsigned int len);
+
+extern void btext_clearscreen(void);
+extern void btext_flushscreen(void);
+extern void btext_flushline(void);
#endif /* __KERNEL__ */
#endif /* __PPC_BTEXT_H */
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h
index b001ecb3cd9..99817a802ca 100644
--- a/include/asm-powerpc/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_BUG_H
#define _ASM_POWERPC_BUG_H
+#ifdef __KERNEL__
#include <asm/asm-compat.h>
/*
@@ -67,4 +68,5 @@ struct bug_entry *find_bug(unsigned long bugaddr);
#include <asm-generic/bug.h>
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BUG_H */
diff --git a/include/asm-powerpc/cache.h b/include/asm-powerpc/cache.h
index 26ce502e76e..6379c2df5c4 100644
--- a/include/asm-powerpc/cache.h
+++ b/include/asm-powerpc/cache.h
@@ -19,7 +19,6 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
struct ppc64_caches {
diff --git a/include/asm-powerpc/checksum.h b/include/asm-powerpc/checksum.h
index d8354d8a49c..609ecbbd721 100644
--- a/include/asm-powerpc/checksum.h
+++ b/include/asm-powerpc/checksum.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_CHECKSUM_H
#define _ASM_POWERPC_CHECKSUM_H
+#ifdef __KERNEL__
/*
* This program is free software; you can redistribute it and/or
@@ -129,4 +130,5 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
}
#endif
+#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-powerpc/compat.h b/include/asm-powerpc/compat.h
index 4db4360c4d4..accb80c9a33 100644
--- a/include/asm-powerpc/compat.h
+++ b/include/asm-powerpc/compat.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_COMPAT_H
#define _ASM_POWERPC_COMPAT_H
+#ifdef __KERNEL__
/*
* Architecture specific compatibility types
*/
@@ -202,4 +203,5 @@ struct compat_shmid64_ds {
compat_ulong_t __unused6;
};
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_COMPAT_H */
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index d1cfa3f515e..64210549f56 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -1,7 +1,6 @@
#ifndef __ASM_POWERPC_CPUTABLE_H
#define __ASM_POWERPC_CPUTABLE_H
-#include <linux/config.h>
#include <asm/asm-compat.h>
#define PPC_FEATURE_32 0x80000000
@@ -20,6 +19,7 @@
#define PPC_FEATURE_POWER5 0x00040000
#define PPC_FEATURE_POWER5_PLUS 0x00020000
#define PPC_FEATURE_CELL 0x00010000
+#define PPC_FEATURE_BOOKE 0x00008000
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
@@ -28,10 +28,17 @@
* via the mkdefs mechanism.
*/
struct cpu_spec;
-struct op_powerpc_model;
typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
+enum powerpc_oprofile_type {
+ PPC_OPROFILE_INVALID = 0,
+ PPC_OPROFILE_RS64 = 1,
+ PPC_OPROFILE_POWER4 = 2,
+ PPC_OPROFILE_G4 = 3,
+ PPC_OPROFILE_BOOKE = 4,
+};
+
struct cpu_spec {
/* CPU is matched via (PVR & pvr_mask) == pvr_value */
unsigned int pvr_mask;
@@ -57,7 +64,10 @@ struct cpu_spec {
char *oprofile_cpu_type;
/* Processor specific oprofile operations */
- struct op_powerpc_model *oprofile_model;
+ enum powerpc_oprofile_type oprofile_type;
+
+ /* Name of processor class, for the ELF AT_PLATFORM entry */
+ char *platform;
};
extern struct cpu_spec *cur_cpu_spec;
@@ -106,6 +116,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0000040000000000)
#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
#define CPU_FTR_CI_LARGE_PAGE ASM_CONST(0x0000100000000000)
+#define CPU_FTR_PAUSE_ZERO ASM_CONST(0x0000200000000000)
#else
/* ensure on 32b processors the flags are available for compiling but
* don't do anything */
@@ -305,12 +316,18 @@ enum {
CPU_FTR_MMCRA_SIHV,
CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT,
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT |
+ CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO,
CPU_FTRS_COMPATIBLE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2,
#endif
CPU_FTRS_POSSIBLE =
+#ifdef __powerpc64__
+ CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |
+ CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_CELL |
+ CPU_FTR_CI_LARGE_PAGE |
+#else
#if CLASSIC_PPC
CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
@@ -344,14 +361,14 @@ enum {
#ifdef CONFIG_E500
CPU_FTRS_E500 | CPU_FTRS_E500_2 |
#endif
-#ifdef __powerpc64__
- CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |
- CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_CELL |
- CPU_FTR_CI_LARGE_PAGE |
-#endif
+#endif /* __powerpc64__ */
0,
CPU_FTRS_ALWAYS =
+#ifdef __powerpc64__
+ CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &
+ CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_CELL &
+#else
#if CLASSIC_PPC
CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
@@ -385,10 +402,7 @@ enum {
#ifdef CONFIG_E500
CPU_FTRS_E500 & CPU_FTRS_E500_2 &
#endif
-#ifdef __powerpc64__
- CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &
- CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_CELL &
-#endif
+#endif /* __powerpc64__ */
CPU_FTRS_POSSIBLE,
};
diff --git a/include/asm-powerpc/current.h b/include/asm-powerpc/current.h
index 82cd4a9ca99..1938d6abd25 100644
--- a/include/asm-powerpc/current.h
+++ b/include/asm-powerpc/current.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_CURRENT_H
#define _ASM_POWERPC_CURRENT_H
+#ifdef __KERNEL__
/*
* This program is free software; you can redistribute it and/or
@@ -24,4 +25,5 @@ register struct task_struct *current asm ("r2");
#endif
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CURRENT_H */
diff --git a/include/asm-powerpc/delay.h b/include/asm-powerpc/delay.h
index 54fe1f4f8fd..057a6095547 100644
--- a/include/asm-powerpc/delay.h
+++ b/include/asm-powerpc/delay.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_DELAY_H
#define _ASM_POWERPC_DELAY_H
+#ifdef __KERNEL__
/*
* Copyright 1996, Paul Mackerras.
@@ -16,4 +17,5 @@
extern void __delay(unsigned long loops);
extern void udelay(unsigned long usecs);
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DELAY_H */
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 59a80163f75..837756ab7dc 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -6,6 +6,7 @@
*/
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
+#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/types.h>
@@ -229,7 +230,7 @@ static inline int dma_get_cache_alignment(void)
#ifdef CONFIG_PPC64
/* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe */
- return (1 << L1_CACHE_SHIFT_MAX);
+ return (1 << INTERNODE_CACHE_SHIFT);
#else
/*
* Each processor family will define its own L1_CACHE_SHIFT,
@@ -282,4 +283,5 @@ struct dma_mapping_ops {
int (*dac_dma_supported)(struct device *dev, u64 mask);
};
+#endif /* __KERNEL__ */
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/include/asm-powerpc/dma.h b/include/asm-powerpc/dma.h
index 926378d2cd9..4bb57fe3709 100644
--- a/include/asm-powerpc/dma.h
+++ b/include/asm-powerpc/dma.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_DMA_H
#define _ASM_POWERPC_DMA_H
+#ifdef __KERNEL__
/*
* Defines for using and allocating dma channels.
@@ -387,4 +388,5 @@ extern int isa_dma_bridge_buggy;
#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DMA_H */
diff --git a/include/asm-powerpc/eeh.h b/include/asm-powerpc/eeh.h
index f8633aafe4b..b263fb2fa6e 100644
--- a/include/asm-powerpc/eeh.h
+++ b/include/asm-powerpc/eeh.h
@@ -19,6 +19,7 @@
#ifndef _PPC64_EEH_H
#define _PPC64_EEH_H
+#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/init.h>
@@ -33,9 +34,11 @@ struct device_node;
extern int eeh_subsystem_enabled;
/* Values for eeh_mode bits in device_node */
-#define EEH_MODE_SUPPORTED (1<<0)
-#define EEH_MODE_NOCHECK (1<<1)
-#define EEH_MODE_ISOLATED (1<<2)
+#define EEH_MODE_SUPPORTED (1<<0)
+#define EEH_MODE_NOCHECK (1<<1)
+#define EEH_MODE_ISOLATED (1<<2)
+#define EEH_MODE_RECOVERING (1<<3)
+#define EEH_MODE_IRQ_DISABLED (1<<4)
/* Max number of EEH freezes allowed before we consider the device
* to be permanently disabled. */
@@ -57,6 +60,7 @@ void __init pci_addr_cache_build(void);
* to finish the eeh setup for this device.
*/
void eeh_add_device_early(struct device_node *);
+void eeh_add_device_tree_early(struct device_node *);
void eeh_add_device_late(struct pci_dev *);
/**
@@ -72,6 +76,15 @@ void eeh_add_device_late(struct pci_dev *);
void eeh_remove_device(struct pci_dev *);
/**
+ * eeh_remove_device_recursive - undo EEH for device & children.
+ * @dev: pci device to be removed
+ *
+ * As above, this removes the device; it also removes child
+ * pci devices as well.
+ */
+void eeh_remove_bus_device(struct pci_dev *);
+
+/**
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
*
* If this macro yields TRUE, the caller relays to eeh_check_failure()
@@ -107,6 +120,9 @@ static inline void eeh_add_device_late(struct pci_dev *dev) { }
static inline void eeh_remove_device(struct pci_dev *dev) { }
+static inline void eeh_add_device_tree_early(struct device_node *dn) { }
+
+static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
#define EEH_POSSIBLE_ERROR(val, type) (0)
#define EEH_IO_ERROR_VALUE(size) (-1UL)
#endif /* CONFIG_EEH */
@@ -363,4 +379,5 @@ static inline void eeh_insl_ns(unsigned long port, void * buf, int nl)
eeh_check_failure((void __iomem *)(port), *(u32*)buf);
}
+#endif /* __KERNEL__ */
#endif /* _PPC64_EEH_H */
diff --git a/include/asm-powerpc/eeh_event.h b/include/asm-powerpc/eeh_event.h
index d168a30b386..93d55a2bebf 100644
--- a/include/asm-powerpc/eeh_event.h
+++ b/include/asm-powerpc/eeh_event.h
@@ -20,6 +20,7 @@
#ifndef ASM_PPC64_EEH_EVENT_H
#define ASM_PPC64_EEH_EVENT_H
+#ifdef __KERNEL__
/** EEH event -- structure holding pci controller data that describes
* a change in the isolation status of a PCI slot. A pointer
@@ -29,7 +30,7 @@ struct eeh_event {
struct list_head list;
struct device_node *dn; /* struct device node */
struct pci_dev *dev; /* affected device */
- int state;
+ enum pci_channel_state state; /* PCI bus state for the affected device */
int time_unavail; /* milliseconds until device might be available */
};
@@ -46,7 +47,11 @@ struct eeh_event {
*/
int eeh_send_failure_event (struct device_node *dn,
struct pci_dev *dev,
- int reset_state,
+ enum pci_channel_state state,
int time_unavail);
+/* Main recovery function */
+void handle_eeh_events (struct eeh_event *);
+
+#endif /* __KERNEL__ */
#endif /* ASM_PPC64_EEH_EVENT_H */
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index 3dcd65edf97..94d228f9c6a 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -1,7 +1,10 @@
#ifndef _ASM_POWERPC_ELF_H
#define _ASM_POWERPC_ELF_H
+#ifdef __KERNEL__
#include <linux/sched.h> /* for task_struct */
+#endif
+
#include <asm/types.h>
#include <asm/ptrace.h>
#include <asm/cputable.h>
@@ -89,7 +92,6 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-#include <asm/ptrace.h>
#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
#define ELF_NFPREG 33 /* includes fpscr */
@@ -219,20 +221,18 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
# define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
-#ifdef __powerpc64__
-# define ELF_PLAT_INIT(_r, load_addr) do { \
- _r->gpr[2] = load_addr; \
-} while (0)
-#endif /* __powerpc64__ */
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
- intent than poking at uname or /proc/cpuinfo.
+ intent than poking at uname or /proc/cpuinfo. */
- For the moment, we have only optimizations for the Intel generations,
- but that could change... */
+#define ELF_PLATFORM (cur_cpu_spec->platform)
-#define ELF_PLATFORM (NULL)
+#ifdef __powerpc64__
+# define ELF_PLAT_INIT(_r, load_addr) do { \
+ _r->gpr[2] = load_addr; \
+} while (0)
+#endif /* __powerpc64__ */
#ifdef __KERNEL__
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
index 12fabbcb04f..f804b34cf06 100644
--- a/include/asm-powerpc/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -98,6 +98,12 @@ typedef struct {
extern firmware_feature_t firmware_features_table[];
#endif
+extern void system_reset_fwnmi(void);
+extern void machine_check_fwnmi(void);
+
+/* This is true if we are using the firmware NMI handler (typically LPAR) */
+extern int fwnmi_active;
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_FIRMWARE_H */
diff --git a/include/asm-powerpc/floppy.h b/include/asm-powerpc/floppy.h
index 64276a3f615..e258778ca42 100644
--- a/include/asm-powerpc/floppy.h
+++ b/include/asm-powerpc/floppy.h
@@ -9,6 +9,7 @@
*/
#ifndef __ASM_POWERPC_FLOPPY_H
#define __ASM_POWERPC_FLOPPY_H
+#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/machdep.h>
@@ -102,4 +103,5 @@ static int FDC2 = -1;
#define EXTRA_FLOPPY_PARAMS
+#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_FLOPPY_H */
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
index f0319d50b12..39e85f320a7 100644
--- a/include/asm-powerpc/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -11,7 +11,7 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
- SYNC_ON_SMP \
+ LWSYNC_ON_SMP \
"1: lwarx %0,0,%2\n" \
insn \
PPC405_ERR77(0, %2) \
diff --git a/include/asm-powerpc/grackle.h b/include/asm-powerpc/grackle.h
index 563c7a5e64c..bd7812a519d 100644
--- a/include/asm-powerpc/grackle.h
+++ b/include/asm-powerpc/grackle.h
@@ -1,3 +1,6 @@
+#ifndef _ASM_POWERPC_GRACKLE_H
+#define _ASM_POWERPC_GRACKLE_H
+#ifdef __KERNEL__
/*
* Functions for setting up and using a MPC106 northbridge
*/
@@ -5,3 +8,5 @@
#include <asm/pci-bridge.h>
extern void setup_grackle(struct pci_controller *hose);
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_GRACKLE_H */
diff --git a/include/asm-powerpc/hardirq.h b/include/asm-powerpc/hardirq.h
index 3b3e3b49ec1..288e14d53b7 100644
--- a/include/asm-powerpc/hardirq.h
+++ b/include/asm-powerpc/hardirq.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_HARDIRQ_H
#define _ASM_POWERPC_HARDIRQ_H
+#ifdef __KERNEL__
#include <asm/irq.h>
#include <asm/bug.h>
@@ -24,4 +25,5 @@ static inline void ack_bad_irq(int irq)
BUG();
}
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HARDIRQ_H */
diff --git a/include/asm-powerpc/heathrow.h b/include/asm-powerpc/heathrow.h
index 22ac179856b..93f54958a9d 100644
--- a/include/asm-powerpc/heathrow.h
+++ b/include/asm-powerpc/heathrow.h
@@ -1,3 +1,6 @@
+#ifndef _ASM_POWERPC_HEATHROW_H
+#define _ASM_POWERPC_HEATHROW_H
+#ifdef __KERNEL__
/*
* heathrow.h: definitions for using the "Heathrow" I/O controller chip.
*
@@ -60,3 +63,5 @@
/* Looks like Heathrow has some sort of GPIOs as well... */
#define HRW_GPIO_MODEM_RESET 0x6d
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HEATHROW_H */
diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h
index d36da61dbc5..38ca9ad6110 100644
--- a/include/asm-powerpc/hvcall.h
+++ b/include/asm-powerpc/hvcall.h
@@ -1,11 +1,15 @@
#ifndef _ASM_POWERPC_HVCALL_H
#define _ASM_POWERPC_HVCALL_H
+#ifdef __KERNEL__
#define HVSC .long 0x44000022
#define H_Success 0
#define H_Busy 1 /* Hardware busy -- retry later */
+#define H_Closed 2 /* Resource closed */
#define H_Constrained 4 /* Resource request constrained to max allowed */
+#define H_InProgress 14 /* Kind of like busy */
+#define H_Continue 18 /* Returned from H_Join on success */
#define H_LongBusyStartRange 9900 /* Start of long busy range */
#define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */
#define H_LongBusyOrder10msec 9901 /* Long busy, hint that 10msec is a good time to retry */
@@ -113,6 +117,8 @@
#define H_REGISTER_VTERM 0x154
#define H_FREE_VTERM 0x158
#define H_POLL_PENDING 0x1D8
+#define H_JOIN 0x298
+#define H_ENABLE_CRQ 0x2B0
#ifndef __ASSEMBLY__
@@ -170,4 +176,5 @@ long plpar_hcall_4out(unsigned long opcode,
unsigned long *out4);
#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/include/asm-powerpc/hvconsole.h b/include/asm-powerpc/hvconsole.h
index 6da93ce74dc..34daf7b9b62 100644
--- a/include/asm-powerpc/hvconsole.h
+++ b/include/asm-powerpc/hvconsole.h
@@ -21,6 +21,7 @@
#ifndef _PPC64_HVCONSOLE_H
#define _PPC64_HVCONSOLE_H
+#ifdef __KERNEL__
/*
* This is the max number of console adapters that can/will be found as
@@ -46,4 +47,5 @@ extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq,
struct hv_ops *ops);
/* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */
extern int __devexit hvc_remove(struct hvc_struct *hp);
+#endif /* __KERNEL__ */
#endif /* _PPC64_HVCONSOLE_H */
diff --git a/include/asm-powerpc/hvcserver.h b/include/asm-powerpc/hvcserver.h
index aecba966579..67d7da3a4da 100644
--- a/include/asm-powerpc/hvcserver.h
+++ b/include/asm-powerpc/hvcserver.h
@@ -21,6 +21,7 @@
#ifndef _PPC64_HVCSERVER_H
#define _PPC64_HVCSERVER_H
+#ifdef __KERNEL__
#include <linux/list.h>
@@ -54,4 +55,5 @@ extern int hvcs_register_connection(uint32_t unit_address,
uint32_t p_partition_ID, uint32_t p_unit_address);
extern int hvcs_free_connection(uint32_t unit_address);
+#endif /* __KERNEL__ */
#endif /* _PPC64_HVCSERVER_H */
diff --git a/include/asm-powerpc/i8259.h b/include/asm-powerpc/i8259.h
index fc4bfee124d..0392159e16e 100644
--- a/include/asm-powerpc/i8259.h
+++ b/include/asm-powerpc/i8259.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_I8259_H
#define _ASM_POWERPC_I8259_H
+#ifdef __KERNEL__
#include <linux/irq.h>
@@ -9,4 +10,5 @@ extern void i8259_init(unsigned long intack_addr, int offset);
extern int i8259_irq(struct pt_regs *regs);
extern int i8259_irq_cascade(struct pt_regs *regs, void *unused);
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_I8259_H */
diff --git a/include/asm-powerpc/ibmebus.h b/include/asm-powerpc/ibmebus.h
new file mode 100644
index 00000000000..7a42723d107
--- /dev/null
+++ b/include/asm-powerpc/ibmebus.h
@@ -0,0 +1,85 @@
+/*
+ * IBM PowerPC eBus Infrastructure Support.
+ *
+ * Copyright (c) 2005 IBM Corporation
+ * Heiko J Schick <schickhj@de.ibm.com>
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ASM_EBUS_H
+#define _ASM_EBUS_H
+#ifdef __KERNEL__
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <asm/of_device.h>
+
+extern struct dma_mapping_ops ibmebus_dma_ops;
+extern struct bus_type ibmebus_bus_type;
+
+struct ibmebus_dev {
+ char *name;
+ struct of_device ofdev;
+};
+
+struct ibmebus_driver {
+ char *name;
+ struct of_device_id *id_table;
+ int (*probe) (struct ibmebus_dev *dev, const struct of_device_id *id);
+ int (*remove) (struct ibmebus_dev *dev);
+ struct device_driver driver;
+};
+
+int ibmebus_register_driver(struct ibmebus_driver *drv);
+void ibmebus_unregister_driver(struct ibmebus_driver *drv);
+
+int ibmebus_request_irq(struct ibmebus_dev *dev,
+ u32 ist,
+ irqreturn_t (*handler)(int, void*, struct pt_regs *),
+ unsigned long irq_flags, const char * devname,
+ void *dev_id);
+void ibmebus_free_irq(struct ibmebus_dev *dev, u32 ist, void *dev_id);
+
+static inline struct ibmebus_driver *to_ibmebus_driver(struct device_driver *drv)
+{
+ return container_of(drv, struct ibmebus_driver, driver);
+}
+
+static inline struct ibmebus_dev *to_ibmebus_dev(struct device *dev)
+{
+ return container_of(dev, struct ibmebus_dev, ofdev.dev);
+}
+
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_IBMEBUS_H */
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
index 48938d84d05..68efbea379c 100644
--- a/include/asm-powerpc/io.h
+++ b/include/asm-powerpc/io.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_IO_H
#define _ASM_POWERPC_IO_H
+#ifdef __KERNEL__
/*
* This program is free software; you can redistribute it and/or
@@ -186,7 +187,6 @@ extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl);
#define IO_SPACE_LIMIT ~(0UL)
-#ifdef __KERNEL__
extern int __ioremap_explicit(unsigned long p_addr, unsigned long v_addr,
unsigned long size, unsigned long flags);
extern void __iomem *__ioremap(unsigned long address, unsigned long size,
@@ -256,8 +256,6 @@ static inline void * phys_to_virt(unsigned long address)
*/
#define BIO_VMERGE_BOUNDARY 0
-#endif /* __KERNEL__ */
-
static inline void iosync(void)
{
__asm__ __volatile__ ("sync" : : : "memory");
@@ -405,8 +403,6 @@ static inline void out_be64(volatile unsigned long __iomem *addr, unsigned long
#include <asm/eeh.h>
#endif
-#ifdef __KERNEL__
-
/**
* check_signature - find BIOS signatures
* @io_addr: mmio address to check
diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h
index f89f0605089..d5677cbec20 100644
--- a/include/asm-powerpc/iommu.h
+++ b/include/asm-powerpc/iommu.h
@@ -20,6 +20,7 @@
#ifndef _ASM_IOMMU_H
#define _ASM_IOMMU_H
+#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/types.h>
@@ -56,32 +57,13 @@ struct device_node;
/* Walks all buses and creates iommu tables */
extern void iommu_setup_pSeries(void);
-extern void iommu_setup_u3(void);
+extern void iommu_setup_dart(void);
/* Frees table for an individual device node */
extern void iommu_free_table(struct device_node *dn);
#endif /* CONFIG_PPC_MULTIPLATFORM */
-#ifdef CONFIG_PPC_PSERIES
-
-/* Creates table for an individual device node */
-extern void iommu_devnode_init_pSeries(struct device_node *dn);
-
-#endif /* CONFIG_PPC_PSERIES */
-
-#ifdef CONFIG_PPC_ISERIES
-
-/* Creates table for an individual device node */
-extern void iommu_devnode_init_iSeries(struct device_node *dn);
-/* Get table parameters from HV */
-extern void iommu_table_getparms_iSeries(unsigned long busno,
- unsigned char slotno,
- unsigned char virtbus,
- struct iommu_table* tbl);
-
-#endif /* CONFIG_PPC_ISERIES */
-
/* Initializes an iommu_table based in values set in the passed-in
* structure
*/
@@ -104,7 +86,7 @@ extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
extern void iommu_init_early_pSeries(void);
extern void iommu_init_early_iSeries(void);
-extern void iommu_init_early_u3(void);
+extern void iommu_init_early_dart(void);
#ifdef CONFIG_PCI
extern void pci_iommu_init(void);
@@ -113,6 +95,7 @@ extern void pci_direct_iommu_init(void);
static inline void pci_iommu_init(void) { }
#endif
-extern void alloc_u3_dart_table(void);
+extern void alloc_dart_table(void);
+#endif /* __KERNEL__ */
#endif /* _ASM_IOMMU_H */
diff --git a/include/asm-ppc/ipic.h b/include/asm-powerpc/ipic.h
index 0fe396a2b66..0fe396a2b66 100644
--- a/include/asm-ppc/ipic.h
+++ b/include/asm-powerpc/ipic.h
diff --git a/include/asm-powerpc/iseries/hv_call.h b/include/asm-powerpc/iseries/hv_call.h
index e9f831c9a5e..162d653ad51 100644
--- a/include/asm-powerpc/iseries/hv_call.h
+++ b/include/asm-powerpc/iseries/hv_call.h
@@ -1,5 +1,4 @@
/*
- * HvCall.h
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
@@ -15,8 +14,7 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-/*
+ *
* This file contains the "hypervisor call" interface which is used to
* drive the hypervisor from the OS.
*/
diff --git a/include/asm-powerpc/iseries/hv_call_event.h b/include/asm-powerpc/iseries/hv_call_event.h
index 46763a30590..4cec4762076 100644
--- a/include/asm-powerpc/iseries/hv_call_event.h
+++ b/include/asm-powerpc/iseries/hv_call_event.h
@@ -1,5 +1,4 @@
/*
- * HvCallEvent.h
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
@@ -15,8 +14,7 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-/*
+ *
* This file contains the "hypervisor call" interface which is used to
* drive the hypervisor from the OS.
*/
@@ -33,32 +31,9 @@ typedef u8 HvLpEvent_Type;
typedef u8 HvLpEvent_AckInd;
typedef u8 HvLpEvent_AckType;
-struct HvCallEvent_PackedParms {
- u8 xAckType:1;
- u8 xAckInd:1;
- u8 xRsvd:1;
- u8 xTargetLp:5;
- u8 xType;
- u16 xSubtype;
- HvLpInstanceId xSourceInstId;
- HvLpInstanceId xTargetInstId;
-};
-
typedef u8 HvLpDma_Direction;
typedef u8 HvLpDma_AddressType;
-struct HvCallEvent_PackedDmaParms {
- u8 xDirection:1;
- u8 xLocalAddrType:1;
- u8 xRemoteAddrType:1;
- u8 xRsvd1:5;
- HvLpIndex xRemoteLp;
- u8 xType;
- u8 xRsvd2;
- HvLpInstanceId xLocalInstId;
- HvLpInstanceId xRemoteInstId;
-};
-
typedef u64 HvLpEvent_Rc;
typedef u64 HvLpDma_Rc;
@@ -92,11 +67,8 @@ static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
char *eventStackAddr, u32 eventStackSize)
{
- u64 abs_addr;
-
- abs_addr = virt_to_abs(eventStackAddr);
- HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr,
- eventStackSize);
+ HvCall3(HvCallEventSetLpEventStack, queueIndex,
+ virt_to_abs(eventStackAddr), eventStackSize);
}
static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
@@ -108,14 +80,7 @@ static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event)
{
- u64 abs_addr;
-
-#ifdef DEBUG_SENDEVENT
- printk("HvCallEvent_signalLpEvent: *event = %016lx\n ",
- (unsigned long)event);
-#endif
- abs_addr = virt_to_abs(event);
- return HvCall1(HvCallEventSignalLpEvent, abs_addr);
+ return HvCall1(HvCallEventSignalLpEvent, virt_to_abs(event));
}
static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
@@ -127,17 +92,21 @@ static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
{
/* Pack the misc bits into a single Dword to pass to PLIC */
union {
- struct HvCallEvent_PackedParms parms;
+ struct {
+ u8 ack_and_target;
+ u8 type;
+ u16 subtype;
+ HvLpInstanceId src_inst;
+ HvLpInstanceId target_inst;
+ } parms;
u64 dword;
} packed;
- packed.parms.xAckType = ackType;
- packed.parms.xAckInd = ackInd;
- packed.parms.xRsvd = 0;
- packed.parms.xTargetLp = targetLp;
- packed.parms.xType = type;
- packed.parms.xSubtype = subtype;
- packed.parms.xSourceInstId = sourceInstanceId;
- packed.parms.xTargetInstId = targetInstanceId;
+
+ packed.parms.ack_and_target = (ackType << 7) | (ackInd << 6) | targetLp;
+ packed.parms.type = type;
+ packed.parms.subtype = subtype;
+ packed.parms.src_inst = sourceInstanceId;
+ packed.parms.target_inst = targetInstanceId;
return HvCall7(HvCallEventSignalLpEventParms, packed.dword,
correlationToken, eventData1, eventData2,
@@ -146,18 +115,12 @@ static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event)
{
- u64 abs_addr;
-
- abs_addr = virt_to_abs(event);
- return HvCall1(HvCallEventAckLpEvent, abs_addr);
+ return HvCall1(HvCallEventAckLpEvent, virt_to_abs(event));
}
static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event)
{
- u64 abs_addr;
-
- abs_addr = virt_to_abs(event);
- return HvCall1(HvCallEventCancelLpEvent, abs_addr);
+ return HvCall1(HvCallEventCancelLpEvent, virt_to_abs(event));
}
static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(
@@ -195,59 +158,34 @@ static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
{
/* Pack the misc bits into a single Dword to pass to PLIC */
union {
- struct HvCallEvent_PackedDmaParms parms;
+ struct {
+ u8 flags;
+ HvLpIndex remote;
+ u8 type;
+ u8 reserved;
+ HvLpInstanceId local_inst;
+ HvLpInstanceId remote_inst;
+ } parms;
u64 dword;
} packed;
- packed.parms.xDirection = direction;
- packed.parms.xLocalAddrType = localAddressType;
- packed.parms.xRemoteAddrType = remoteAddressType;
- packed.parms.xRsvd1 = 0;
- packed.parms.xRemoteLp = remoteLp;
- packed.parms.xType = type;
- packed.parms.xRsvd2 = 0;
- packed.parms.xLocalInstId = localInstanceId;
- packed.parms.xRemoteInstId = remoteInstanceId;
+ packed.parms.flags = (direction << 7) |
+ (localAddressType << 6) | (remoteAddressType << 5);
+ packed.parms.remote = remoteLp;
+ packed.parms.type = type;
+ packed.parms.reserved = 0;
+ packed.parms.local_inst = localInstanceId;
+ packed.parms.remote_inst = remoteInstanceId;
return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList,
remoteBufList, transferLength);
}
-static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type,
- HvLpIndex remoteLp, HvLpDma_Direction direction,
- HvLpInstanceId localInstanceId,
- HvLpInstanceId remoteInstanceId,
- HvLpDma_AddressType localAddressType,
- HvLpDma_AddressType remoteAddressType,
- u64 localAddrOrTce, u64 remoteAddrOrTce, u32 transferLength)
-{
- /* Pack the misc bits into a single Dword to pass to PLIC */
- union {
- struct HvCallEvent_PackedDmaParms parms;
- u64 dword;
- } packed;
-
- packed.parms.xDirection = direction;
- packed.parms.xLocalAddrType = localAddressType;
- packed.parms.xRemoteAddrType = remoteAddressType;
- packed.parms.xRsvd1 = 0;
- packed.parms.xRemoteLp = remoteLp;
- packed.parms.xType = type;
- packed.parms.xRsvd2 = 0;
- packed.parms.xLocalInstId = localInstanceId;
- packed.parms.xRemoteInstId = remoteInstanceId;
-
- return (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle, packed.dword,
- localAddrOrTce, remoteAddrOrTce, transferLength);
-}
-
static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote,
u32 length, HvLpDma_Direction dir)
{
- u64 abs_addr;
-
- abs_addr = virt_to_abs(local);
- return HvCall4(HvCallEventDmaToSp, abs_addr, remote, length, dir);
+ return HvCall4(HvCallEventDmaToSp, virt_to_abs(local), remote,
+ length, dir);
}
#endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */
diff --git a/include/asm-powerpc/iseries/hv_call_sc.h b/include/asm-powerpc/iseries/hv_call_sc.h
index dec7e9d9ab7..f5d21095925 100644
--- a/include/asm-powerpc/iseries/hv_call_sc.h
+++ b/include/asm-powerpc/iseries/hv_call_sc.h
@@ -1,5 +1,4 @@
/*
- * HvCallSc.h
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/asm-powerpc/iseries/hv_lp_config.h b/include/asm-powerpc/iseries/hv_lp_config.h
index bc00f036bca..df8b2073971 100644
--- a/include/asm-powerpc/iseries/hv_lp_config.h
+++ b/include/asm-powerpc/iseries/hv_lp_config.h
@@ -1,5 +1,4 @@
/*
- * HvLpConfig.h
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/asm-powerpc/iseries/hv_lp_event.h b/include/asm-powerpc/iseries/hv_lp_event.h
index 499ab1ad018..4065a4de493 100644
--- a/include/asm-powerpc/iseries/hv_lp_event.h
+++ b/include/asm-powerpc/iseries/hv_lp_event.h
@@ -1,5 +1,4 @@
/*
- * HvLpEvent.h
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
@@ -32,17 +31,8 @@
* partitions through PLIC.
*/
-struct HvEventFlags {
- u8 xValid:1; /* Indicates a valid request x00-x00 */
- u8 xRsvd1:4; /* Reserved ... */
- u8 xAckType:1; /* Immediate or deferred ... */
- u8 xAckInd:1; /* Indicates if ACK required ... */
- u8 xFunction:1; /* Interrupt or Acknowledge ... */
-};
-
-
struct HvLpEvent {
- struct HvEventFlags xFlags; /* Event flags x00-x00 */
+ u8 flags; /* Event flags x00-x00 */
u8 xType; /* Type of message x01-x01 */
u16 xSubtype; /* Subtype for event x02-x03 */
u8 xSourceLp; /* Source LP x04-x04 */
@@ -126,6 +116,11 @@ extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
#define HvLpEvent_AckType_ImmediateAck 0
#define HvLpEvent_AckType_DeferredAck 1
+#define HV_LP_EVENT_INT 0x01
+#define HV_LP_EVENT_DO_ACK 0x02
+#define HV_LP_EVENT_DEFERRED_ACK 0x04
+#define HV_LP_EVENT_VALID 0x80
+
#define HvLpDma_Direction_LocalToRemote 0
#define HvLpDma_Direction_RemoteToLocal 1
@@ -139,4 +134,29 @@ extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
#define HvLpDma_Rc_InvalidAddress 4
#define HvLpDma_Rc_InvalidLength 5
+static inline int hvlpevent_is_valid(struct HvLpEvent *h)
+{
+ return h->flags & HV_LP_EVENT_VALID;
+}
+
+static inline void hvlpevent_invalidate(struct HvLpEvent *h)
+{
+ h->flags &= ~ HV_LP_EVENT_VALID;
+}
+
+static inline int hvlpevent_is_int(struct HvLpEvent *h)
+{
+ return h->flags & HV_LP_EVENT_INT;
+}
+
+static inline int hvlpevent_is_ack(struct HvLpEvent *h)
+{
+ return !hvlpevent_is_int(h);
+}
+
+static inline int hvlpevent_need_ack(struct HvLpEvent *h)
+{
+ return h->flags & HV_LP_EVENT_DO_ACK;
+}
+
#endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */
diff --git a/include/asm-powerpc/iseries/hv_types.h b/include/asm-powerpc/iseries/hv_types.h
index c38f7e3d01d..c3e6d2a1d1c 100644
--- a/include/asm-powerpc/iseries/hv_types.h
+++ b/include/asm-powerpc/iseries/hv_types.h
@@ -1,5 +1,4 @@
/*
- * HvTypes.h
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/asm-powerpc/iseries/iseries_io.h b/include/asm-powerpc/iseries/iseries_io.h
index 56b2113ff0f..496aa852b61 100644
--- a/include/asm-powerpc/iseries/iseries_io.h
+++ b/include/asm-powerpc/iseries/iseries_io.h
@@ -6,7 +6,7 @@
#ifdef CONFIG_PPC_ISERIES
#include <linux/types.h>
/*
- * File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000.
+ * Created by Allan Trautman on Thu Dec 28 2000.
*
* Remaps the io.h for the iSeries Io
* Copyright (C) 2000 Allan H Trautman, IBM Corporation
@@ -32,6 +32,7 @@
* End Change Activity
*/
+#ifdef CONFIG_PCI
extern u8 iSeries_Read_Byte(const volatile void __iomem * IoAddress);
extern u16 iSeries_Read_Word(const volatile void __iomem * IoAddress);
extern u32 iSeries_Read_Long(const volatile void __iomem * IoAddress);
@@ -44,6 +45,17 @@ extern void iSeries_memcpy_toio(volatile void __iomem *dest, void *source,
size_t n);
extern void iSeries_memcpy_fromio(void *dest,
const volatile void __iomem *source, size_t n);
+#else
+static inline u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
+{
+ return 0xff;
+}
+
+static inline void iSeries_Write_Byte(u8 IoData,
+ volatile void __iomem *IoAddress)
+{
+}
+#endif /* CONFIG_PCI */
#endif /* CONFIG_PPC_ISERIES */
#endif /* _ASM_POWERPC_ISERIES_ISERIES_IO_H */
diff --git a/include/asm-powerpc/iseries/it_exp_vpd_panel.h b/include/asm-powerpc/iseries/it_exp_vpd_panel.h
index 66a17a230c5..304a609ae21 100644
--- a/include/asm-powerpc/iseries/it_exp_vpd_panel.h
+++ b/include/asm-powerpc/iseries/it_exp_vpd_panel.h
@@ -1,5 +1,4 @@
/*
- * ItExtVpdPanel.h
* Copyright (C) 2002 Dave Boutcher IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/asm-powerpc/iseries/it_lp_naca.h b/include/asm-powerpc/iseries/it_lp_naca.h
index c3ef1de45d8..4fdcf052927 100644
--- a/include/asm-powerpc/iseries/it_lp_naca.h
+++ b/include/asm-powerpc/iseries/it_lp_naca.h
@@ -1,5 +1,4 @@
/*
- * ItLpNaca.h
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
@@ -37,17 +36,13 @@ struct ItLpNaca {
u8 xLpIndex; // LP Index x0B-x0B
u16 xMaxLpQueues; // Number of allocated queues x0C-x0D
u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F
- u8 xPirEnvironMode:8; // Piranha or hardware x10-x10
- u8 xPirConsoleMode:8; // Piranha console indicator x11-x11
- u8 xPirDasdMode:8; // Piranha dasd indicator x12-x12
+ u8 xPirEnvironMode; // Piranha or hardware x10-x10
+ u8 xPirConsoleMode; // Piranha console indicator x11-x11
+ u8 xPirDasdMode; // Piranha dasd indicator x12-x12
u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17
- u8 xLparInstalled:1; // Is LPAR installed on system x18-x1F
- u8 xSysPartitioned:1; // Is the system partitioned ...
- u8 xHwSyncedTBs:1; // Hardware synced TBs ...
- u8 xIntProcUtilHmt:1; // Utilize HMT for interrupts ...
- u8 xRsvd1_1:4; // Reserved ...
- u8 xSpVpdFormat:8; // VPD areas are in CSP format ...
- u8 xIntProcRatio:8; // Ratio of int procs to procs ...
+ u8 flags; // flags, see below x18-x1F
+ u8 xSpVpdFormat; // VPD areas are in CSP format ...
+ u8 xIntProcRatio; // Ratio of int procs to procs ...
u8 xRsvd1_2[5]; // Reserved ...
u16 xRsvd1_3; // Reserved x20-x21
u16 xPlicVrmIndex; // VRM index of PLIC x22-x23
@@ -77,4 +72,9 @@ struct ItLpNaca {
extern struct ItLpNaca itLpNaca;
+#define ITLPNACA_LPAR 0x80 /* Is LPAR installed on the system */
+#define ITLPNACA_PARTITIONED 0x40 /* Is the system partitioned */
+#define ITLPNACA_HWSYNCEDTBS 0x20 /* Hardware synced TBs */
+#define ITLPNACA_HMTINT 0x10 /* Utilize MHT for interrupts */
+
#endif /* _ASM_POWERPC_ISERIES_IT_LP_NACA_H */
diff --git a/include/asm-powerpc/iseries/it_lp_queue.h b/include/asm-powerpc/iseries/it_lp_queue.h
index a60d03afbf9..b7c6fc12cce 100644
--- a/include/asm-powerpc/iseries/it_lp_queue.h
+++ b/include/asm-powerpc/iseries/it_lp_queue.h
@@ -1,5 +1,4 @@
/*
- * ItLpQueue.h
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/asm-powerpc/iseries/it_lp_reg_save.h b/include/asm-powerpc/iseries/it_lp_reg_save.h
index 288044b702d..5403b756f65 100644
--- a/include/asm-powerpc/iseries/it_lp_reg_save.h
+++ b/include/asm-powerpc/iseries/it_lp_reg_save.h
@@ -1,5 +1,4 @@
/*
- * ItLpRegSave.h
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
@@ -81,4 +80,6 @@ struct ItLpRegSave {
u8 xRsvd3[176]; // Reserved 350-3FF
};
-#endif /* _ITLPREGSAVE_H */
+extern struct ItLpRegSave iseries_reg_save[];
+
+#endif /* _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H */
diff --git a/include/asm-powerpc/iseries/lpar_map.h b/include/asm-powerpc/iseries/lpar_map.h
index 84fc321615b..2ec384d66ab 100644
--- a/include/asm-powerpc/iseries/lpar_map.h
+++ b/include/asm-powerpc/iseries/lpar_map.h
@@ -1,5 +1,4 @@
/*
- * LparMap.h
* Copyright (C) 2001 Mike Corrigan IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
diff --git a/include/asm-powerpc/iseries/mf.h b/include/asm-powerpc/iseries/mf.h
index e7bd57a03fb..857e5202fc7 100644
--- a/include/asm-powerpc/iseries/mf.h
+++ b/include/asm-powerpc/iseries/mf.h
@@ -1,5 +1,4 @@
/*
- * mf.h
* Copyright (C) 2001 Troy D. Armstrong IBM Corporation
* Copyright (C) 2004 Stephen Rothwell IBM Corporation
*
diff --git a/include/asm-powerpc/iseries/vio.h b/include/asm-powerpc/iseries/vio.h
index 7e3a469420d..72a97d37aac 100644
--- a/include/asm-powerpc/iseries/vio.h
+++ b/include/asm-powerpc/iseries/vio.h
@@ -1,5 +1,4 @@
/* -*- linux-c -*-
- * drivers/char/vio.h
*
* iSeries Virtual I/O Message Path header
*
diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h
index 9dcbac67481..7c16265568e 100644
--- a/include/asm-powerpc/kdebug.h
+++ b/include/asm-powerpc/kdebug.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_KDEBUG_H
#define _ASM_POWERPC_KDEBUG_H
+#ifdef __KERNEL__
/* nearly identical to x86_64/i386 code */
@@ -39,4 +40,5 @@ static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,lon
return notifier_call_chain(&powerpc_die_chain, val, &args);
}
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KDEBUG_H */
diff --git a/include/asm-powerpc/kdump.h b/include/asm-powerpc/kdump.h
new file mode 100644
index 00000000000..a87aed00d61
--- /dev/null
+++ b/include/asm-powerpc/kdump.h
@@ -0,0 +1,13 @@
+#ifndef _PPC64_KDUMP_H
+#define _PPC64_KDUMP_H
+
+/* How many bytes to reserve at zero for kdump. The reserve limit should
+ * be greater or equal to the trampoline's end address. */
+#define KDUMP_RESERVE_LIMIT 0x8000
+
+#define KDUMP_TRAMPOLINE_START 0x0100
+#define KDUMP_TRAMPOLINE_END 0x3000
+
+extern void kdump_setup(void);
+
+#endif /* __PPC64_KDUMP_H */
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
index c72ffc709ea..fffdf690b84 100644
--- a/include/asm-powerpc/kexec.h
+++ b/include/asm-powerpc/kexec.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_KEXEC_H
#define _ASM_POWERPC_KEXEC_H
+#ifdef __KERNEL__
/*
* Maximum page that is mapped directly into kernel memory.
@@ -30,21 +31,36 @@
#define KEXEC_ARCH KEXEC_ARCH_PPC
#endif
+#define HAVE_ARCH_COPY_OLDMEM_PAGE
+
#ifndef __ASSEMBLY__
-#define MAX_NOTE_BYTES 1024
-typedef u32 note_buf_t[MAX_NOTE_BYTES / sizeof(u32)];
+#ifdef CONFIG_KEXEC
-extern note_buf_t crash_notes[];
+#define MAX_NOTE_BYTES 1024
#ifdef __powerpc64__
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
extern void __init kexec_setup(void);
-#else
+extern int crashing_cpu;
+extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *));
+#endif /* __powerpc64 __ */
+
struct kimage;
-extern void machine_kexec_simple(struct kimage *image);
-#endif
+struct pt_regs;
+extern void default_machine_kexec(struct kimage *image);
+extern int default_machine_kexec_prepare(struct kimage *image);
+extern void default_machine_crash_shutdown(struct pt_regs *regs);
+
+#endif /* !CONFIG_KEXEC */
+/*
+ * Provide a dummy definition to avoid build failures. Will remain
+ * empty till crash dump support is enabled.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs) { }
#endif /* ! __ASSEMBLY__ */
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/include/asm-powerpc/keylargo.h b/include/asm-powerpc/keylargo.h
index a669a3f0f5a..d8520ef121f 100644
--- a/include/asm-powerpc/keylargo.h
+++ b/include/asm-powerpc/keylargo.h
@@ -1,3 +1,6 @@
+#ifndef _ASM_POWERPC_KEYLARGO_H
+#define _ASM_POWERPC_KEYLARGO_H
+#ifdef __KERNEL__
/*
* keylargo.h: definitions for using the "KeyLargo" I/O controller chip.
*
@@ -232,10 +235,12 @@
#define K2_FCR1_I2S0_RESET 0x00000800
#define K2_FCR1_I2S0_CLK_ENABLE_BIT 0x00001000
#define K2_FCR1_I2S0_ENABLE 0x00002000
-
#define K2_FCR1_PCI1_CLK_ENABLE 0x00004000
#define K2_FCR1_FW_CLK_ENABLE 0x00008000
#define K2_FCR1_FW_RESET_N 0x00010000
+#define K2_FCR1_I2S1_CELL_ENABLE 0x00020000
+#define K2_FCR1_I2S1_CLK_ENABLE_BIT 0x00080000
+#define K2_FCR1_I2S1_ENABLE 0x00100000
#define K2_FCR1_GMAC_CLK_ENABLE 0x00400000
#define K2_FCR1_GMAC_POWER_DOWN 0x00800000
#define K2_FCR1_GMAC_RESET_N 0x01000000
@@ -246,3 +251,11 @@
#define K2_FCR1_UATA_RESET_N 0x40000000
#define K2_FCR1_UATA_CHOOSE_CLK66 0x80000000
+/* Shasta definitions */
+#define SH_FCR1_I2S2_CELL_ENABLE 0x00000010
+#define SH_FCR1_I2S2_CLK_ENABLE_BIT 0x00000040
+#define SH_FCR1_I2S2_ENABLE 0x00000080
+#define SH_FCR3_I2S2_CLK18_ENABLE 0x00008000
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KEYLARGO_H */
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h
index 6cd0a3bfa28..f466bc804f4 100644
--- a/include/asm-powerpc/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_KPROBES_H
#define _ASM_POWERPC_KPROBES_H
+#ifdef __KERNEL__
/*
* Kernel Probes (KProbes)
*
@@ -29,7 +30,10 @@
#include <linux/ptrace.h>
#include <linux/percpu.h>
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+
struct pt_regs;
+struct kprobe;
typedef unsigned int kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
@@ -47,6 +51,7 @@ typedef unsigned int kprobe_opcode_t;
#define ARCH_SUPPORTS_KRETPROBES
void kretprobe_trampoline(void);
+extern void arch_remove_kprobe(struct kprobe *p);
/* Architecture specific copy of original instruction */
struct arch_specific_insn {
@@ -68,14 +73,7 @@ struct kprobe_ctlblk {
struct prev_kprobe prev_kprobe;
};
-#ifdef CONFIG_KPROBES
extern int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data);
-#else /* !CONFIG_KPROBES */
-static inline int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return 0;
-}
-#endif
+ unsigned long val, void *data);
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KPROBES_H */
diff --git a/include/asm-powerpc/lmb.h b/include/asm-powerpc/lmb.h
index ea0afe34354..d3546c4c9f4 100644
--- a/include/asm-powerpc/lmb.h
+++ b/include/asm-powerpc/lmb.h
@@ -1,5 +1,6 @@
#ifndef _PPC64_LMB_H
#define _PPC64_LMB_H
+#ifdef __KERNEL__
/*
* Definitions for talking to the Open Firmware PROM on
@@ -78,4 +79,5 @@ lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
lmb_size_pages(type, region_nr);
}
+#endif /* __KERNEL__ */
#endif /* _PPC64_LMB_H */
diff --git a/include/asm-powerpc/lppaca.h b/include/asm-powerpc/lppaca.h
index c1bedab1515..cd9f11f1ef1 100644
--- a/include/asm-powerpc/lppaca.h
+++ b/include/asm-powerpc/lppaca.h
@@ -18,6 +18,7 @@
*/
#ifndef _ASM_POWERPC_LPPACA_H
#define _ASM_POWERPC_LPPACA_H
+#ifdef __KERNEL__
//=============================================================================
//
@@ -28,7 +29,9 @@
//----------------------------------------------------------------------------
#include <asm/types.h>
-struct lppaca {
+/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
+ * alignment is sufficient to prevent this */
+struct __attribute__((__aligned__(0x400))) lppaca {
//=============================================================================
// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
// NOTE: The xDynXyz fields are fields that will be dynamically changed by
@@ -128,4 +131,7 @@ struct lppaca {
u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF
};
+extern struct lppaca lppaca[];
+
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_LPPACA_H */
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index c011abb8b60..5348b820788 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -27,6 +27,9 @@ struct device_node;
struct iommu_table;
struct rtc_time;
struct file;
+#ifdef CONFIG_KEXEC
+struct kimage;
+#endif
#ifdef CONFIG_SMP
struct smp_ops_t {
@@ -131,7 +134,7 @@ struct machdep_calls {
void (*nvram_sync)(void);
/* Exception handlers */
- void (*system_reset_exception)(struct pt_regs *regs);
+ int (*system_reset_exception)(struct pt_regs *regs);
int (*machine_check_exception)(struct pt_regs *regs);
/* Motherboard/chipset features. This is a kind of general purpose
@@ -207,19 +210,19 @@ struct machdep_calls {
/* this is for modules, since _machine can be a define -- Cort */
int ppc_machine;
+#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_KEXEC
/* Called to shutdown machine specific hardware not already controlled
* by other drivers.
- * XXX Should we move this one out of kexec scope?
*/
void (*machine_shutdown)(void);
+#ifdef CONFIG_KEXEC
/* Called to do the minimal shutdown needed to run a kexec'd kernel
* to run successfully.
* XXX Should we move this one out of kexec scope?
*/
- void (*machine_crash_shutdown)(void);
+ void (*machine_crash_shutdown)(struct pt_regs *regs);
/* Called to do what every setup is needed on image and the
* reboot code buffer. Returns 0 on success.
@@ -237,7 +240,6 @@ struct machdep_calls {
*/
void (*machine_kexec)(struct kimage *image);
#endif /* CONFIG_KEXEC */
-#endif /* CONFIG_PPC32 */
};
extern void default_idle(void);
diff --git a/include/asm-powerpc/macio.h b/include/asm-powerpc/macio.h
index b553dd4b139..3a6cb1a513b 100644
--- a/include/asm-powerpc/macio.h
+++ b/include/asm-powerpc/macio.h
@@ -1,5 +1,6 @@
#ifndef __MACIO_ASIC_H__
#define __MACIO_ASIC_H__
+#ifdef __KERNEL__
#include <asm/of_device.h>
@@ -137,4 +138,5 @@ struct macio_driver
extern int macio_register_driver(struct macio_driver *);
extern void macio_unregister_driver(struct macio_driver *);
+#endif /* __KERNEL__ */
#endif /* __MACIO_ASIC_H__ */
diff --git a/include/asm-powerpc/mman.h b/include/asm-powerpc/mman.h
index f5e5342fcac..a2e34c21b44 100644
--- a/include/asm-powerpc/mman.h
+++ b/include/asm-powerpc/mman.h
@@ -44,6 +44,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index 29b0bb0086d..d096d9e76ad 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_MMU_H_
#define _ASM_POWERPC_MMU_H_
+#ifdef __KERNEL__
#ifndef CONFIG_PPC64
#include <asm-ppc/mmu.h>
@@ -33,7 +34,8 @@
/* Location of cpu0's segment table */
#define STAB0_PAGE 0x6
-#define STAB0_PHYS_ADDR (STAB0_PAGE<<12)
+#define STAB0_OFFSET (STAB0_PAGE << 12)
+#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
#ifndef __ASSEMBLY__
extern char initial_stab[];
@@ -394,7 +396,12 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
+/* Physical address used by some IO functions */
+typedef unsigned long phys_addr_t;
+
+
#endif /* __ASSEMBLY */
#endif /* CONFIG_PPC64 */
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/include/asm-powerpc/mmu_context.h b/include/asm-powerpc/mmu_context.h
index ea6798c7d5f..1b8a25fd48f 100644
--- a/include/asm-powerpc/mmu_context.h
+++ b/include/asm-powerpc/mmu_context.h
@@ -1,5 +1,6 @@
#ifndef __ASM_POWERPC_MMU_CONTEXT_H
#define __ASM_POWERPC_MMU_CONTEXT_H
+#ifdef __KERNEL__
#ifndef CONFIG_PPC64
#include <asm-ppc/mmu_context.h>
@@ -86,4 +87,5 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
}
#endif /* CONFIG_PPC64 */
+#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/include/asm-powerpc/mmzone.h b/include/asm-powerpc/mmzone.h
index 54958d6cae0..88d70bae776 100644
--- a/include/asm-powerpc/mmzone.h
+++ b/include/asm-powerpc/mmzone.h
@@ -6,6 +6,7 @@
*/
#ifndef _ASM_MMZONE_H_
#define _ASM_MMZONE_H_
+#ifdef __KERNEL__
#include <linux/config.h>
@@ -47,4 +48,5 @@ extern unsigned long max_pfn;
extern int __init early_pfn_to_nid(unsigned long pfn);
#endif
+#endif /* __KERNEL__ */
#endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-powerpc/module.h b/include/asm-powerpc/module.h
index 7ecd05e0305..584fabfb4f0 100644
--- a/include/asm-powerpc/module.h
+++ b/include/asm-powerpc/module.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_MODULE_H
#define _ASM_POWERPC_MODULE_H
+#ifdef __KERNEL__
/*
* This program is free software; you can redistribute it and/or
@@ -74,4 +75,5 @@ struct exception_table_entry;
void sort_ex_table(struct exception_table_entry *start,
struct exception_table_entry *finish);
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h
index 7083d1f7426..6b9e78142f4 100644
--- a/include/asm-powerpc/mpic.h
+++ b/include/asm-powerpc/mpic.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_MPIC_H
#define _ASM_POWERPC_MPIC_H
+#ifdef __KERNEL__
#include <linux/irq.h>
@@ -117,7 +118,9 @@ typedef int (*mpic_cascade_t)(struct pt_regs *regs, void *data);
struct mpic_irq_fixup
{
u8 __iomem *base;
- unsigned int irq;
+ u8 __iomem *applebase;
+ u32 data;
+ unsigned int index;
};
#endif /* CONFIG_MPIC_BROKEN_U3 */
@@ -284,4 +287,5 @@ extern int mpic_get_irq(struct pt_regs *regs);
/* global mpic for pSeries */
extern struct mpic *pSeries_mpic;
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/include/asm-powerpc/mutex.h b/include/asm-powerpc/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-powerpc/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-powerpc/numnodes.h b/include/asm-powerpc/numnodes.h
index 795533aca09..e138edae09d 100644
--- a/include/asm-powerpc/numnodes.h
+++ b/include/asm-powerpc/numnodes.h
@@ -1,7 +1,9 @@
#ifndef _ASM_POWERPC_MAX_NUMNODES_H
#define _ASM_POWERPC_MAX_NUMNODES_H
+#ifdef __KERNEL__
/* Max 16 Nodes */
#define NODES_SHIFT 4
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MAX_NUMNODES_H */
diff --git a/include/asm-powerpc/nvram.h b/include/asm-powerpc/nvram.h
index 24bd8c2388e..f3563e11e26 100644
--- a/include/asm-powerpc/nvram.h
+++ b/include/asm-powerpc/nvram.h
@@ -55,6 +55,7 @@ struct nvram_header {
char name[12];
};
+#ifdef __KERNEL__
struct nvram_partition {
struct list_head partition;
struct nvram_header header;
@@ -69,6 +70,7 @@ extern struct nvram_partition *nvram_find_partition(int sig, const char *name);
extern int pSeries_nvram_init(void);
extern int mmio_nvram_init(void);
+#endif /* __KERNEL__ */
/* PowerMac specific nvram stuffs */
@@ -78,6 +80,7 @@ enum {
pmac_nvram_NR /* MacOS Name Registry partition */
};
+#ifdef __KERNEL__
/* Return partition offset in nvram */
extern int pmac_get_partition(int partition);
@@ -91,6 +94,7 @@ extern void nvram_sync(void);
/* Normal access to NVRAM */
extern unsigned char nvram_read_byte(int i);
extern void nvram_write_byte(unsigned char c, int i);
+#endif
/* Some offsets in XPRAM */
#define PMAC_XPRAM_MACHINE_LOC 0xe4
diff --git a/include/asm-powerpc/of_device.h b/include/asm-powerpc/of_device.h
index ddb16aae0bd..6249a7c3963 100644
--- a/include/asm-powerpc/of_device.h
+++ b/include/asm-powerpc/of_device.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_OF_DEVICE_H
#define _ASM_POWERPC_OF_DEVICE_H
+#ifdef __KERNEL__
#include <linux/device.h>
#include <linux/mod_devicetable.h>
@@ -61,4 +62,5 @@ extern struct of_device *of_platform_device_create(struct device_node *np,
struct device *parent);
extern void of_release_dev(struct device *dev);
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_OF_DEVICE_H */
diff --git a/include/asm-powerpc/ohare.h b/include/asm-powerpc/ohare.h
index 023b5977223..0d030f9dea2 100644
--- a/include/asm-powerpc/ohare.h
+++ b/include/asm-powerpc/ohare.h
@@ -1,3 +1,6 @@
+#ifndef _ASM_POWERPC_OHARE_H
+#define _ASM_POWERPC_OHARE_H
+#ifdef __KERNEL__
/*
* ohare.h: definitions for using the "O'Hare" I/O controller chip.
*
@@ -46,3 +49,6 @@
* Contributed by Harry Eaton.
*/
#define STARMAX_FEATURES 0xbeff7a
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_OHARE_H */
diff --git a/include/asm-powerpc/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h
index 8013cd273ce..338e6a7cff4 100644
--- a/include/asm-powerpc/oprofile_impl.h
+++ b/include/asm-powerpc/oprofile_impl.h
@@ -11,6 +11,7 @@
#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
#define _ASM_POWERPC_OPROFILE_IMPL_H
+#ifdef __KERNEL__
#define OP_MAX_COUNTER 8
@@ -22,24 +23,22 @@ struct op_counter_config {
unsigned long enabled;
unsigned long event;
unsigned long count;
+ /* Classic doesn't support per-counter user/kernel selection */
unsigned long kernel;
-#ifdef __powerpc64__
- /* We dont support per counter user/kernel selection */
-#endif
unsigned long user;
unsigned long unit_mask;
};
/* System-wide configuration as set via oprofilefs. */
struct op_system_config {
-#ifdef __powerpc64__
+#ifdef CONFIG_PPC64
unsigned long mmcr0;
unsigned long mmcr1;
unsigned long mmcra;
#endif
unsigned long enable_kernel;
unsigned long enable_user;
-#ifdef __powerpc64__
+#ifdef CONFIG_PPC64
unsigned long backtrace_spinlocks;
#endif
};
@@ -49,9 +48,7 @@ struct op_powerpc_model {
void (*reg_setup) (struct op_counter_config *,
struct op_system_config *,
int num_counters);
-#ifdef __powerpc64__
void (*cpu_setup) (void *);
-#endif
void (*start) (struct op_counter_config *);
void (*stop) (void);
void (*handle_interrupt) (struct pt_regs *,
@@ -59,10 +56,19 @@ struct op_powerpc_model {
int num_counters;
};
-#ifdef __powerpc64__
+#ifdef CONFIG_FSL_BOOKE
+extern struct op_powerpc_model op_model_fsl_booke;
+#else /* Otherwise, it's classic */
+
+#ifdef CONFIG_PPC64
extern struct op_powerpc_model op_model_rs64;
extern struct op_powerpc_model op_model_power4;
+#else /* Otherwise, CONFIG_PPC32 */
+extern struct op_powerpc_model op_model_7450;
+#endif
+
+/* All the classic PPC parts use these */
static inline unsigned int ctr_read(unsigned int i)
{
switch(i) {
@@ -78,10 +84,14 @@ static inline unsigned int ctr_read(unsigned int i)
return mfspr(SPRN_PMC5);
case 5:
return mfspr(SPRN_PMC6);
+
+/* No PPC32 chip has more than 6 so far */
+#ifdef CONFIG_PPC64
case 6:
return mfspr(SPRN_PMC7);
case 7:
return mfspr(SPRN_PMC8);
+#endif
default:
return 0;
}
@@ -108,16 +118,21 @@ static inline void ctr_write(unsigned int i, unsigned int val)
case 5:
mtspr(SPRN_PMC6, val);
break;
+
+/* No PPC32 chip has more than 6, yet */
+#ifdef CONFIG_PPC64
case 6:
mtspr(SPRN_PMC7, val);
break;
case 7:
mtspr(SPRN_PMC8, val);
break;
+#endif
default:
break;
}
}
-#endif /* __powerpc64__ */
+#endif /* !CONFIG_FSL_BOOKE */
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
diff --git a/include/asm-powerpc/pSeries_reconfig.h b/include/asm-powerpc/pSeries_reconfig.h
index c0db1ea7f7d..ea6cfb8efb8 100644
--- a/include/asm-powerpc/pSeries_reconfig.h
+++ b/include/asm-powerpc/pSeries_reconfig.h
@@ -1,5 +1,6 @@
#ifndef _PPC64_PSERIES_RECONFIG_H
#define _PPC64_PSERIES_RECONFIG_H
+#ifdef __KERNEL__
#include <linux/notifier.h>
@@ -22,4 +23,5 @@ static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb)
static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { }
#endif /* CONFIG_PPC_PSERIES */
+#endif /* __KERNEL__ */
#endif /* _PPC64_PSERIES_RECONFIG_H */
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index 92c765c35bd..c9add8f1ad9 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -14,15 +14,16 @@
*/
#ifndef _ASM_POWERPC_PACA_H
#define _ASM_POWERPC_PACA_H
+#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/types.h>
#include <asm/lppaca.h>
-#include <asm/iseries/it_lp_reg_save.h>
#include <asm/mmu.h>
register struct paca_struct *local_paca asm("r13");
#define get_paca() local_paca
+#define get_lppaca() (get_paca()->lppaca_ptr)
struct task_struct;
@@ -31,9 +32,9 @@ struct task_struct;
*
* This structure is not directly accessed by firmware or the service
* processor except for the first two pointers that point to the
- * lppaca area and the ItLpRegSave area for this CPU. Both the
- * lppaca and ItLpRegSave objects are currently contained within the
- * PACA but they do not need to be.
+ * lppaca area and the ItLpRegSave area for this CPU. The lppaca
+ * object is currently contained within the PACA but it doesn't need
+ * to be.
*/
struct paca_struct {
/*
@@ -48,7 +49,9 @@ struct paca_struct {
* accessed by the firmware
*/
struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
- struct ItLpRegSave *reg_save_ptr; /* Pointer to LpRegSave for PLIC */
+#ifdef CONFIG_PPC_ISERIES
+ void *reg_save_ptr; /* Pointer to LpRegSave for PLIC */
+#endif /* CONFIG_PPC_ISERIES */
/*
* MAGIC: the spinlock functions in arch/ppc64/lib/locks.c
@@ -59,11 +62,11 @@ struct paca_struct {
u16 lock_token; /* Constant 0x8000, used in locks */
u16 paca_index; /* Logical processor number */
- u32 default_decr; /* Default decrementer value */
u64 kernel_toc; /* Kernel TOC address */
u64 stab_real; /* Absolute address of segment table */
u64 stab_addr; /* Virtual address of segment table */
void *emergency_sp; /* pointer to emergency stack */
+ u64 data_offset; /* per cpu data offset */
s16 hw_cpu_id; /* Physical processor number */
u8 cpu_start; /* At startup, processor spins until */
/* this becomes non-zero. */
@@ -90,31 +93,12 @@ struct paca_struct {
struct task_struct *__current; /* Pointer to current */
u64 kstack; /* Saved Kernel stack addr */
u64 stab_rr; /* stab/slb round-robin counter */
- u64 next_jiffy_update_tb; /* TB value for next jiffy update */
u64 saved_r1; /* r1 save for RTAS calls */
u64 saved_msr; /* MSR saved here by enter_rtas */
u8 proc_enabled; /* irq soft-enable flag */
-
- /* not yet used */
- u64 exdsi[8]; /* used for linear mapping hash table misses */
-
- /*
- * iSeries structure which the hypervisor knows about -
- * this structure should not cross a page boundary.
- * The vpa_init/register_vpa call is now known to fail if the
- * lppaca structure crosses a page boundary.
- * The lppaca is also used on POWER5 pSeries boxes.
- * The lppaca is 640 bytes long, and cannot readily change
- * since the hypervisor knows its layout, so a 1kB
- * alignment will suffice to ensure that it doesn't
- * cross a page boundary.
- */
- struct lppaca lppaca __attribute__((__aligned__(0x400)));
-#ifdef CONFIG_PPC_ISERIES
- struct ItLpRegSave reg_save;
-#endif
};
extern struct paca_struct paca[];
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PACA_H */
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
index 18c1e5ee81a..0b82df483f7 100644
--- a/include/asm-powerpc/page.h
+++ b/include/asm-powerpc/page.h
@@ -37,8 +37,30 @@
*/
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+/*
+ * KERNELBASE is the virtual address of the start of the kernel, it's often
+ * the same as PAGE_OFFSET, but _might not be_.
+ *
+ * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
+ *
+ * To get a physical address from a virtual one you subtract PAGE_OFFSET,
+ * _not_ KERNELBASE.
+ *
+ * If you want to know something's offset from the start of the kernel you
+ * should subtract KERNELBASE.
+ *
+ * If you want to test if something's a kernel address, use is_kernel_addr().
+ */
+
+#ifdef CONFIG_CRASH_DUMP
+/* Kdump kernel runs at 32 MB, change at your peril. */
+#define PHYSICAL_START 0x2000000
+#else
+#define PHYSICAL_START 0x0
+#endif
+
#define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START)
-#define KERNELBASE PAGE_OFFSET
+#define KERNELBASE (PAGE_OFFSET + PHYSICAL_START)
#ifdef CONFIG_DISCONTIGMEM
#define page_to_pfn(page) discontigmem_page_to_pfn(page)
@@ -56,7 +78,7 @@
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
/*
@@ -86,6 +108,12 @@
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
+/*
+ * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
+ * "kernelness", use is_kernel_addr() - it should do what you want.
+ */
+#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+
#ifndef __ASSEMBLY__
#undef STRICT_MM_TYPECHECKS
@@ -150,7 +178,7 @@ typedef unsigned long pmd_t;
#define pmd_val(x) (x)
#define __pmd(x) (x)
-#ifndef CONFIG_PPC_64K_PAGES
+#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_64K_PAGES)
typedef unsigned long pud_t;
#define pud_val(x) (x)
#define __pud(x) (x)
diff --git a/include/asm-powerpc/page_32.h b/include/asm-powerpc/page_32.h
index 7259cfd85da..2677bad70f4 100644
--- a/include/asm-powerpc/page_32.h
+++ b/include/asm-powerpc/page_32.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_PAGE_32_H
#define _ASM_POWERPC_PAGE_32_H
+#ifdef __KERNEL__
#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
@@ -37,4 +38,5 @@ extern __inline__ int get_order(unsigned long size)
#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index 6642c012500..3fb061bab9e 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_PAGE_64_H
#define _ASM_POWERPC_PAGE_64_H
+#ifdef __KERNEL__
/*
* Copyright (C) 2001 PPC64 Team, IBM Corp
@@ -25,16 +26,6 @@
*/
#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
-#define REGION_SIZE 4UL
-#define REGION_SHIFT 60UL
-#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
-
-#define VMALLOCBASE ASM_CONST(0xD000000000000000)
-#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
-#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
-#define USER_REGION_ID (0UL)
-#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
-
/* Segment size */
#define SID_SHIFT 28
#define SID_MASK 0xfffffffffUL
@@ -180,4 +171,5 @@ extern unsigned int HPAGE_SHIFT;
#include <asm-generic/page.h>
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PAGE_64_H */
diff --git a/include/asm-powerpc/param.h b/include/asm-powerpc/param.h
index bdc724f7088..094f63d4d5c 100644
--- a/include/asm-powerpc/param.h
+++ b/include/asm-powerpc/param.h
@@ -1,8 +1,6 @@
#ifndef _ASM_POWERPC_PARAM_H
#define _ASM_POWERPC_PARAM_H
-#include <linux/config.h>
-
#ifdef __KERNEL__
#define HZ CONFIG_HZ /* internal kernel timer frequency */
#define USER_HZ 100 /* for user interfaces in "ticks" */
diff --git a/include/asm-powerpc/parport.h b/include/asm-powerpc/parport.h
index d86b410a6f8..3fca21ddf54 100644
--- a/include/asm-powerpc/parport.h
+++ b/include/asm-powerpc/parport.h
@@ -8,11 +8,37 @@
#ifndef _ASM_POWERPC_PARPORT_H
#define _ASM_POWERPC_PARPORT_H
+#ifdef __KERNEL__
+
+#include <asm/prom.h>
+
+extern struct parport *parport_pc_probe_port (unsigned long int base,
+ unsigned long int base_hi,
+ int irq, int dma,
+ struct pci_dev *dev);
-static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
{
- return parport_pc_find_isa_ports (autoirq, autodma);
+ struct device_node *np;
+ u32 *prop;
+ u32 io1, io2;
+ int propsize;
+ int count = 0;
+ for (np = NULL; (np = of_find_compatible_node(np,
+ "parallel",
+ "pnpPNP,400")) != NULL;) {
+ prop = (u32 *)get_property(np, "reg", &propsize);
+ if (!prop || propsize > 6*sizeof(u32))
+ continue;
+ io1 = prop[1]; io2 = prop[2];
+ prop = (u32 *)get_property(np, "interrupts", NULL);
+ if (!prop)
+ continue;
+ if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL) != NULL)
+ count++;
+ }
+ return count;
}
+#endif /* __KERNEL__ */
#endif /* !(_ASM_POWERPC_PARPORT_H) */
diff --git a/include/asm-powerpc/pci-bridge.h b/include/asm-powerpc/pci-bridge.h
index 223ec7bd81d..38de92d41a1 100644
--- a/include/asm-powerpc/pci-bridge.h
+++ b/include/asm-powerpc/pci-bridge.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_PCI_BRIDGE_H
#define _ASM_POWERPC_PCI_BRIDGE_H
+#ifdef __KERNEL__
#ifndef CONFIG_PPC64
#include <asm-ppc/pci-bridge.h>
@@ -60,16 +61,17 @@ struct pci_controller;
struct iommu_table;
struct pci_dn {
- int busno; /* for pci devices */
- int bussubno; /* for pci devices */
- int devfn; /* for pci devices */
+ int busno; /* pci bus number */
+ int bussubno; /* pci subordinate bus number */
+ int devfn; /* pci device and function number */
+ int class_code; /* pci device class */
#ifdef CONFIG_PPC_PSERIES
int eeh_mode; /* See eeh.h for possible EEH_MODEs */
int eeh_config_addr;
+ int eeh_pe_config_addr; /* new-style partition endpoint address */
int eeh_check_count; /* # times driver ignored error */
int eeh_freeze_count; /* # times this device froze up. */
- int eeh_is_bridge; /* device is pci-to-pci bridge */
#endif
int pci_ext_config_space; /* for pci devices */
struct pci_controller *phb; /* for pci devices */
@@ -125,12 +127,20 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
return bus->sysdata; /* Must be root bus (PHB) */
}
+/** Find the bus corresponding to the indicated device node */
+struct pci_bus * pcibios_find_pci_bus(struct device_node *dn);
+
extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
struct device_node *dev, int primary);
-extern int pcibios_remove_root_bus(struct pci_controller *phb);
+/** Remove all of the PCI devices under this bus */
+void pcibios_remove_pci_devices(struct pci_bus *bus);
-extern void phbs_remap_io(void);
+/** Discover new pci devices under this bus, and add them */
+void pcibios_add_pci_devices(struct pci_bus * bus);
+void pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus);
+
+extern int pcibios_remove_root_bus(struct pci_controller *phb);
static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
{
@@ -140,14 +150,27 @@ static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
return PCI_DN(busdn)->phb;
}
+extern struct pci_controller*
+pci_find_hose_for_OF_device(struct device_node* node);
+
extern struct pci_controller *
pcibios_alloc_controller(struct device_node *dev);
extern void pcibios_free_controller(struct pci_controller *phb);
+#ifdef CONFIG_PCI
+extern unsigned long pci_address_to_pio(phys_addr_t address);
+#else
+static inline unsigned long pci_address_to_pio(phys_addr_t address)
+{
+ return (unsigned long)-1;
+}
+#endif
+
/* Return values for ppc_md.pci_probe_mode function */
#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
#endif /* CONFIG_PPC64 */
+#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h
index d5934a076bd..5d2c9e6c4be 100644
--- a/include/asm-powerpc/pci.h
+++ b/include/asm-powerpc/pci.h
@@ -216,6 +216,8 @@ extern int remap_bus_range(struct pci_bus *bus);
extern void pcibios_fixup_device_resources(struct pci_dev *dev,
struct pci_bus *bus);
+extern void pcibios_claim_one_bus(struct pci_bus *b);
+
extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
diff --git a/include/asm-powerpc/percpu.h b/include/asm-powerpc/percpu.h
index 06a959d6723..e31922c50e5 100644
--- a/include/asm-powerpc/percpu.h
+++ b/include/asm-powerpc/percpu.h
@@ -1 +1,57 @@
+#ifndef _ASM_POWERPC_PERCPU_H_
+#define _ASM_POWERPC_PERCPU_H_
+#ifdef __powerpc64__
+#include <linux/compiler.h>
+
+/*
+ * Same as asm-generic/percpu.h, except that we store the per cpu offset
+ * in the paca. Based on the x86-64 implementation.
+ */
+
+#ifdef CONFIG_SMP
+
+#include <asm/paca.h>
+
+#define __per_cpu_offset(cpu) (paca[cpu].data_offset)
+#define __my_cpu_offset() get_paca()->data_offset
+
+/* Separate out the type, so (int[3], foo) works. */
+#define DEFINE_PER_CPU(type, name) \
+ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+
+/* var is in discarded region: offset to particular copy we want */
+#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
+#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
+
+/* A macro to avoid #include hell... */
+#define percpu_modcopy(pcpudst, src, size) \
+do { \
+ unsigned int __i; \
+ for (__i = 0; __i < NR_CPUS; __i++) \
+ if (cpu_possible(__i)) \
+ memcpy((pcpudst)+__per_cpu_offset(__i), \
+ (src), (size)); \
+} while (0)
+
+extern void setup_per_cpu_areas(void);
+
+#else /* ! SMP */
+
+#define DEFINE_PER_CPU(type, name) \
+ __typeof__(type) per_cpu__##name
+
+#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
+#define __get_cpu_var(var) per_cpu__##var
+
+#endif /* SMP */
+
+#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+
+#else
#include <asm-generic/percpu.h>
+#endif
+
+#endif /* _ASM_POWERPC_PERCPU_H_ */
diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h
index bfc2113b363..9f5b052784a 100644
--- a/include/asm-powerpc/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_PGALLOC_H
#define _ASM_POWERPC_PGALLOC_H
+#ifdef __KERNEL__
#ifndef CONFIG_PPC64
#include <asm-ppc/pgalloc.h>
@@ -153,4 +154,5 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
#define check_pgt_cache() do { } while (0)
#endif /* CONFIG_PPC64 */
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 154f1840ece..653915014dc 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -1,3 +1,7 @@
+#ifndef _ASM_POWERPC_PGTABLE_64K_H
+#define _ASM_POWERPC_PGTABLE_64K_H
+#ifdef __KERNEL__
+
#include <asm-generic/pgtable-nopud.h>
@@ -88,3 +92,5 @@
#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PGTABLE_64K_H */
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index 0303f57366c..e38931379a7 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_PGTABLE_H
#define _ASM_POWERPC_PGTABLE_H
+#ifdef __KERNEL__
#ifndef CONFIG_PPC64
#include <asm-ppc/pgtable.h>
@@ -58,6 +59,17 @@ struct mm_struct;
#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
/*
+ * Region IDs
+ */
+#define REGION_SHIFT 60UL
+#define REGION_MASK (0xfUL << REGION_SHIFT)
+#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
+
+#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
+#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
+#define USER_REGION_ID (0UL)
+
+/*
* Common bits in a linux-style PTE. These match the bits in the
* (hardware-defined) PowerPC PTE as closely as possible. Additional
* bits may be defined in pgtable-*.h
@@ -521,4 +533,5 @@ void pgtable_cache_init(void);
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PPC64 */
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/include/asm-powerpc/pmac_feature.h b/include/asm-powerpc/pmac_feature.h
index e9683bcff19..3221628130c 100644
--- a/include/asm-powerpc/pmac_feature.h
+++ b/include/asm-powerpc/pmac_feature.h
@@ -121,6 +121,7 @@
#define PMAC_TYPE_IMAC_G5 0x152 /* iMac G5 */
#define PMAC_TYPE_XSERVE_G5 0x153 /* Xserve G5 */
#define PMAC_TYPE_UNKNOWN_K2 0x19f /* Any other K2 based */
+#define PMAC_TYPE_UNKNOWN_SHASTA 0x19e /* Any other Shasta based */
/*
* Motherboard flags
@@ -317,10 +318,6 @@ extern void pmac_register_agp_pm(struct pci_dev *bridge,
extern void pmac_suspend_agp_for_card(struct pci_dev *dev);
extern void pmac_resume_agp_for_card(struct pci_dev *dev);
-/* Used by the via-pmu driver for suspend/resume
- */
-extern void pmac_tweak_clock_spreading(int enable);
-
/*
* The part below is for use by macio_asic.c only, do not rely
* on the data structures or constants below in a normal driver
@@ -341,6 +338,7 @@ enum {
macio_pangea,
macio_intrepid,
macio_keylargo2,
+ macio_shasta,
};
struct macio_chip
@@ -376,5 +374,24 @@ extern struct macio_chip* macio_find(struct device_node* child, int type);
#define MACIO_IN8(r) (in_8(MACIO_FCR8(macio,r)))
#define MACIO_OUT8(r,v) (out_8(MACIO_FCR8(macio,r), (v)))
+/*
+ * Those are exported by pmac feature for internal use by arch code
+ * only like the platform function callbacks, do not use directly in drivers
+ */
+extern spinlock_t feature_lock;
+extern struct device_node *uninorth_node;
+extern u32 __iomem *uninorth_base;
+
+/*
+ * Uninorth reg. access. Note that Uni-N regs are big endian
+ */
+
+#define UN_REG(r) (uninorth_base + ((r) >> 2))
+#define UN_IN(r) (in_be32(UN_REG(r)))
+#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
+#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
+#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
+
+
#endif /* __PPC_ASM_PMAC_FEATURE_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/pmac_low_i2c.h b/include/asm-powerpc/pmac_low_i2c.h
index 809a5963d5e..131011bd7e7 100644
--- a/include/asm-powerpc/pmac_low_i2c.h
+++ b/include/asm-powerpc/pmac_low_i2c.h
@@ -11,33 +11,97 @@
*/
#ifndef __PMAC_LOW_I2C_H__
#define __PMAC_LOW_I2C_H__
+#ifdef __KERNEL__
/* i2c mode (based on the platform functions format) */
enum {
- pmac_low_i2c_mode_dumb = 1,
- pmac_low_i2c_mode_std = 2,
- pmac_low_i2c_mode_stdsub = 3,
- pmac_low_i2c_mode_combined = 4,
+ pmac_i2c_mode_dumb = 1,
+ pmac_i2c_mode_std = 2,
+ pmac_i2c_mode_stdsub = 3,
+ pmac_i2c_mode_combined = 4,
};
/* RW bit in address */
enum {
- pmac_low_i2c_read = 0x01,
- pmac_low_i2c_write = 0x00
+ pmac_i2c_read = 0x01,
+ pmac_i2c_write = 0x00
};
+/* i2c bus type */
+enum {
+ pmac_i2c_bus_keywest = 0,
+ pmac_i2c_bus_pmu = 1,
+ pmac_i2c_bus_smu = 2,
+};
+
+/* i2c bus features */
+enum {
+ /* can_largesub : supports >1 byte subaddresses (SMU only) */
+ pmac_i2c_can_largesub = 0x00000001u,
+
+ /* multibus : device node holds multiple busses, bus number is
+ * encoded in bits 0xff00 of "reg" of a given device
+ */
+ pmac_i2c_multibus = 0x00000002u,
+};
+
+/* i2c busses in the system */
+struct pmac_i2c_bus;
+struct i2c_adapter;
+
/* Init, called early during boot */
-extern void pmac_init_low_i2c(void);
+extern int pmac_i2c_init(void);
+
+/* Lookup an i2c bus for a device-node. The node can be either the bus
+ * node itself or a device below it. In the case of a multibus, the bus
+ * node itself is the controller node, else, it's a child of the controller
+ * node
+ */
+extern struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node);
+
+/* Get the address for an i2c device. This strips the bus number if
+ * necessary. The 7 bits address is returned 1 bit right shifted so that the
+ * direction can be directly ored in
+ */
+extern u8 pmac_i2c_get_dev_addr(struct device_node *device);
+
+/* Get infos about a bus */
+extern struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus);
+extern struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_type(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_flags(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_get_channel(struct pmac_i2c_bus *bus);
+
+/* i2c layer adapter attach/detach */
+extern void pmac_i2c_attach_adapter(struct pmac_i2c_bus *bus,
+ struct i2c_adapter *adapter);
+extern void pmac_i2c_detach_adapter(struct pmac_i2c_bus *bus,
+ struct i2c_adapter *adapter);
+extern struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus);
+extern struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter);
+
+/* March a device or bus with an i2c adapter structure, to be used by drivers
+ * to match device-tree nodes with i2c adapters during adapter discovery
+ * callbacks
+ */
+extern int pmac_i2c_match_adapter(struct device_node *dev,
+ struct i2c_adapter *adapter);
+
-/* Locking functions exposed to i2c-keywest */
-int pmac_low_i2c_lock(struct device_node *np);
-int pmac_low_i2c_unlock(struct device_node *np);
+/* (legacy) Locking functions exposed to i2c-keywest */
+extern int pmac_low_i2c_lock(struct device_node *np);
+extern int pmac_low_i2c_unlock(struct device_node *np);
/* Access functions for platform code */
-int pmac_low_i2c_open(struct device_node *np, int channel);
-int pmac_low_i2c_close(struct device_node *np);
-int pmac_low_i2c_setmode(struct device_node *np, int mode);
-int pmac_low_i2c_xfer(struct device_node *np, u8 addrdir, u8 subaddr, u8 *data, int len);
+extern int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled);
+extern void pmac_i2c_close(struct pmac_i2c_bus *bus);
+extern int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode);
+extern int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
+ u32 subaddr, u8 *data, int len);
+/* Suspend/resume code called by via-pmu directly for now */
+extern void pmac_pfunc_i2c_suspend(void);
+extern void pmac_pfunc_i2c_resume(void);
+#endif /* __KERNEL__ */
#endif /* __PMAC_LOW_I2C_H__ */
diff --git a/include/asm-powerpc/pmac_pfunc.h b/include/asm-powerpc/pmac_pfunc.h
new file mode 100644
index 00000000000..d9728c80f86
--- /dev/null
+++ b/include/asm-powerpc/pmac_pfunc.h
@@ -0,0 +1,253 @@
+#ifndef __PMAC_PFUNC_H__
+#define __PMAC_PFUNC_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+/* Flags in command lists */
+#define PMF_FLAGS_ON_INIT 0x80000000u
+#define PMF_FLGAS_ON_TERM 0x40000000u
+#define PMF_FLAGS_ON_SLEEP 0x20000000u
+#define PMF_FLAGS_ON_WAKE 0x10000000u
+#define PMF_FLAGS_ON_DEMAND 0x08000000u
+#define PMF_FLAGS_INT_GEN 0x04000000u
+#define PMF_FLAGS_HIGH_SPEED 0x02000000u
+#define PMF_FLAGS_LOW_SPEED 0x01000000u
+#define PMF_FLAGS_SIDE_EFFECTS 0x00800000u
+
+/*
+ * Arguments to a platform function call.
+ *
+ * NOTE: By convention, pointer arguments point to an u32
+ */
+struct pmf_args {
+ union {
+ u32 v;
+ u32 *p;
+ } u[4];
+ unsigned int count;
+};
+
+/*
+ * A driver capable of interpreting commands provides a handlers
+ * structure filled with whatever handlers are implemented by this
+ * driver. Non implemented handlers are left NULL.
+ *
+ * PMF_STD_ARGS are the same arguments that are passed to the parser
+ * and that gets passed back to the various handlers.
+ *
+ * Interpreting a given function always start with a begin() call which
+ * returns an instance data to be passed around subsequent calls, and
+ * ends with an end() call. This allows the low level driver to implement
+ * locking policy or per-function instance data.
+ *
+ * For interrupt capable functions, irq_enable() is called when a client
+ * registers, and irq_disable() is called when the last client unregisters
+ * Note that irq_enable & irq_disable are called within a semaphore held
+ * by the core, thus you should not try to register yourself to some other
+ * pmf interrupt during those calls.
+ */
+
+#define PMF_STD_ARGS struct pmf_function *func, void *instdata, \
+ struct pmf_args *args
+
+struct pmf_function;
+
+struct pmf_handlers {
+ void * (*begin)(struct pmf_function *func, struct pmf_args *args);
+ void (*end)(struct pmf_function *func, void *instdata);
+
+ int (*irq_enable)(struct pmf_function *func);
+ int (*irq_disable)(struct pmf_function *func);
+
+ int (*write_gpio)(PMF_STD_ARGS, u8 value, u8 mask);
+ int (*read_gpio)(PMF_STD_ARGS, u8 mask, int rshift, u8 xor);
+
+ int (*write_reg32)(PMF_STD_ARGS, u32 offset, u32 value, u32 mask);
+ int (*read_reg32)(PMF_STD_ARGS, u32 offset);
+ int (*write_reg16)(PMF_STD_ARGS, u32 offset, u16 value, u16 mask);
+ int (*read_reg16)(PMF_STD_ARGS, u32 offset);
+ int (*write_reg8)(PMF_STD_ARGS, u32 offset, u8 value, u8 mask);
+ int (*read_reg8)(PMF_STD_ARGS, u32 offset);
+
+ int (*delay)(PMF_STD_ARGS, u32 duration);
+
+ int (*wait_reg32)(PMF_STD_ARGS, u32 offset, u32 value, u32 mask);
+ int (*wait_reg16)(PMF_STD_ARGS, u32 offset, u16 value, u16 mask);
+ int (*wait_reg8)(PMF_STD_ARGS, u32 offset, u8 value, u8 mask);
+
+ int (*read_i2c)(PMF_STD_ARGS, u32 len);
+ int (*write_i2c)(PMF_STD_ARGS, u32 len, const u8 *data);
+ int (*rmw_i2c)(PMF_STD_ARGS, u32 masklen, u32 valuelen, u32 totallen,
+ const u8 *maskdata, const u8 *valuedata);
+
+ int (*read_cfg)(PMF_STD_ARGS, u32 offset, u32 len);
+ int (*write_cfg)(PMF_STD_ARGS, u32 offset, u32 len, const u8 *data);
+ int (*rmw_cfg)(PMF_STD_ARGS, u32 offset, u32 masklen, u32 valuelen,
+ u32 totallen, const u8 *maskdata, const u8 *valuedata);
+
+ int (*read_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 len);
+ int (*write_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 len, const u8 *data);
+ int (*set_i2c_mode)(PMF_STD_ARGS, int mode);
+ int (*rmw_i2c_sub)(PMF_STD_ARGS, u8 subaddr, u32 masklen, u32 valuelen,
+ u32 totallen, const u8 *maskdata,
+ const u8 *valuedata);
+
+ int (*read_reg32_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+ u32 xor);
+ int (*read_reg16_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+ u32 xor);
+ int (*read_reg8_msrx)(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift,
+ u32 xor);
+
+ int (*write_reg32_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+ int (*write_reg16_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+ int (*write_reg8_slm)(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask);
+
+ int (*mask_and_compare)(PMF_STD_ARGS, u32 len, const u8 *maskdata,
+ const u8 *valuedata);
+
+ struct module *owner;
+};
+
+
+/*
+ * Drivers who expose platform functions register at init time, this
+ * causes the platform functions for that device node to be parsed in
+ * advance and associated with the device. The data structures are
+ * partially public so a driver can walk the list of platform functions
+ * and eventually inspect the flags
+ */
+struct pmf_device;
+
+struct pmf_function {
+ /* All functions for a given driver are linked */
+ struct list_head link;
+
+ /* Function node & driver data */
+ struct device_node *node;
+ void *driver_data;
+
+ /* For internal use by core */
+ struct pmf_device *dev;
+
+ /* The name is the "xxx" in "platform-do-xxx", this is how
+ * platform functions are identified by this code. Some functions
+ * only operate for a given target, in which case the phandle is
+ * here (or 0 if the filter doesn't apply)
+ */
+ const char *name;
+ u32 phandle;
+
+ /* The flags for that function. You can have several functions
+ * with the same name and different flag
+ */
+ u32 flags;
+
+ /* The actual tokenized function blob */
+ const void *data;
+ unsigned int length;
+
+ /* Interrupt clients */
+ struct list_head irq_clients;
+
+ /* Refcounting */
+ struct kref ref;
+};
+
+/*
+ * For platform functions that are interrupts, one can register
+ * irq_client structures. You canNOT use the same structure twice
+ * as it contains a link member. Also, the callback is called with
+ * a spinlock held, you must not call back into any of the pmf_* functions
+ * from within that callback
+ */
+struct pmf_irq_client {
+ void (*handler)(void *data);
+ void *data;
+ struct module *owner;
+ struct list_head link;
+};
+
+
+/*
+ * Register/Unregister a function-capable driver and its handlers
+ */
+extern int pmf_register_driver(struct device_node *np,
+ struct pmf_handlers *handlers,
+ void *driverdata);
+
+extern void pmf_unregister_driver(struct device_node *np);
+
+
+/*
+ * Register/Unregister interrupt clients
+ */
+extern int pmf_register_irq_client(struct device_node *np,
+ const char *name,
+ struct pmf_irq_client *client);
+
+extern void pmf_unregister_irq_client(struct device_node *np,
+ const char *name,
+ struct pmf_irq_client *client);
+
+/*
+ * Called by the handlers when an irq happens
+ */
+extern void pmf_do_irq(struct pmf_function *func);
+
+
+/*
+ * Low level call to platform functions.
+ *
+ * The phandle can filter on the target object for functions that have
+ * multiple targets, the flags allow you to restrict the call to a given
+ * combination of flags.
+ *
+ * The args array contains as many arguments as is required by the function,
+ * this is dependent on the function you are calling, unfortunately Apple
+ * mecanism provides no way to encode that so you have to get it right at
+ * the call site. Some functions require no args, in which case, you can
+ * pass NULL.
+ *
+ * You can also pass NULL to the name. This will match any function that has
+ * the appropriate combination of flags & phandle or you can pass 0 to the
+ * phandle to match any
+ */
+extern int pmf_do_functions(struct device_node *np, const char *name,
+ u32 phandle, u32 flags, struct pmf_args *args);
+
+
+
+/*
+ * High level call to a platform function.
+ *
+ * This one looks for the platform-xxx first so you should call it to the
+ * actual target if any. It will fallback to platform-do-xxx if it can't
+ * find one. It will also exclusively target functions that have
+ * the "OnDemand" flag.
+ */
+
+extern int pmf_call_function(struct device_node *target, const char *name,
+ struct pmf_args *args);
+
+
+/*
+ * For low latency interrupt usage, you can lookup for on-demand functions
+ * using the functions below
+ */
+
+extern struct pmf_function *pmf_find_function(struct device_node *target,
+ const char *name);
+
+extern struct pmf_function * pmf_get_function(struct pmf_function *func);
+extern void pmf_put_function(struct pmf_function *func);
+
+extern int pmf_call_one(struct pmf_function *func, struct pmf_args *args);
+
+
+/* Suspend/resume code called by via-pmu directly for now */
+extern void pmac_pfunc_base_suspend(void);
+extern void pmac_pfunc_base_resume(void);
+
+#endif /* __PMAC_PFUNC_H__ */
diff --git a/include/asm-powerpc/pmc.h b/include/asm-powerpc/pmc.h
index 5f41f3a2b29..07d6a427931 100644
--- a/include/asm-powerpc/pmc.h
+++ b/include/asm-powerpc/pmc.h
@@ -18,6 +18,7 @@
*/
#ifndef _POWERPC_PMC_H
#define _POWERPC_PMC_H
+#ifdef __KERNEL__
#include <asm/ptrace.h>
@@ -44,4 +45,5 @@ void dump_pmcs(void);
extern struct op_powerpc_model op_model_fsl_booke;
#endif
+#endif /* __KERNEL__ */
#endif /* _POWERPC_PMC_H */
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
index 36cdc869e58..f80482c7231 100644
--- a/include/asm-powerpc/ppc-pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -8,6 +8,7 @@
*/
#ifndef _ASM_POWERPC_PPC_PCI_H
#define _ASM_POWERPC_PPC_PCI_H
+#ifdef __KERNEL__
#include <linux/pci.h>
#include <asm/pci-bridge.h>
@@ -51,6 +52,21 @@ extern unsigned long pci_probe_only;
/* ---- EEH internal-use-only related routines ---- */
#ifdef CONFIG_EEH
+
+void pci_addr_cache_insert_device(struct pci_dev *dev);
+void pci_addr_cache_remove_device(struct pci_dev *dev);
+void pci_addr_cache_build(void);
+struct pci_dev *pci_get_device_by_addr(unsigned long addr);
+
+/**
+ * eeh_slot_error_detail -- record and EEH error condition to the log
+ * @severity: 1 if temporary, 2 if permanent failure.
+ *
+ * Obtains the the EEH error details from the RTAS subsystem,
+ * and then logs these details with the RTAS error log system.
+ */
+void eeh_slot_error_detail (struct pci_dn *pdn, int severity);
+
/**
* rtas_set_slot_reset -- unfreeze a frozen slot
*
@@ -58,8 +74,10 @@ extern unsigned long pci_probe_only;
* does this by asserting the PCI #RST line for 1/8th of
* a second; this routine will sleep while the adapter is
* being reset.
+ *
+ * Returns a non-zero value if the reset failed.
*/
-void rtas_set_slot_reset (struct pci_dn *);
+int rtas_set_slot_reset (struct pci_dn *);
/**
* eeh_restore_bars - Restore device configuration info.
@@ -83,6 +101,7 @@ void eeh_restore_bars(struct pci_dn *);
void rtas_configure_bridge(struct pci_dn *);
int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
+int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
/**
* mark and clear slots: find "partition endpoint" PE and set or
@@ -91,6 +110,10 @@ int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
void eeh_mark_slot (struct device_node *dn, int mode_flag);
void eeh_clear_slot (struct device_node *dn, int mode_flag);
+/* Find the associated "Partiationable Endpoint" PE */
+struct device_node * find_device_pe(struct device_node *dn);
+
#endif
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PPC_PCI_H */
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index c27baa0563f..ab8688d3902 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -94,6 +94,7 @@
#define RFDI .long 0x4c00004e /* rfdi instruction */
#define RFMCI .long 0x4c00004c /* rfmci instruction */
+#ifdef __KERNEL__
#ifdef CONFIG_PPC64
#define XGLUE(a,b) a##b
@@ -155,52 +156,56 @@ n:
#endif
/*
- * LOADADDR( rn, name )
- * loads the address of 'name' into 'rn'
+ * LOAD_REG_IMMEDIATE(rn, expr)
+ * Loads the value of the constant expression 'expr' into register 'rn'
+ * using immediate instructions only. Use this when it's important not
+ * to reference other data (i.e. on ppc64 when the TOC pointer is not
+ * valid).
*
- * LOADBASE( rn, name )
- * loads the address (possibly without the low 16 bits) of 'name' into 'rn'
- * suitable for base+disp addressing
+ * LOAD_REG_ADDR(rn, name)
+ * Loads the address of label 'name' into register 'rn'. Use this when
+ * you don't particularly need immediate instructions only, but you need
+ * the whole address in one register (e.g. it's a structure address and
+ * you want to access various offsets within it). On ppc32 this is
+ * identical to LOAD_REG_IMMEDIATE.
+ *
+ * LOAD_REG_ADDRBASE(rn, name)
+ * ADDROFF(name)
+ * LOAD_REG_ADDRBASE loads part of the address of label 'name' into
+ * register 'rn'. ADDROFF(name) returns the remainder of the address as
+ * a constant expression. ADDROFF(name) is a signed expression < 16 bits
+ * in size, so is suitable for use directly as an offset in load and store
+ * instructions. Use this when loading/storing a single word or less as:
+ * LOAD_REG_ADDRBASE(rX, name)
+ * ld rY,ADDROFF(name)(rX)
*/
#ifdef __powerpc64__
-#define LOADADDR(rn,name) \
- lis rn,name##@highest; \
- ori rn,rn,name##@higher; \
- rldicr rn,rn,32,31; \
- oris rn,rn,name##@h; \
- ori rn,rn,name##@l
-
-#define LOADBASE(rn,name) \
- ld rn,name@got(r2)
-
-#define OFF(name) 0
-
-#define SET_REG_TO_CONST(reg, value) \
- lis reg,(((value)>>48)&0xFFFF); \
- ori reg,reg,(((value)>>32)&0xFFFF); \
- rldicr reg,reg,32,31; \
- oris reg,reg,(((value)>>16)&0xFFFF); \
- ori reg,reg,((value)&0xFFFF);
-
-#define SET_REG_TO_LABEL(reg, label) \
- lis reg,(label)@highest; \
- ori reg,reg,(label)@higher; \
- rldicr reg,reg,32,31; \
- oris reg,reg,(label)@h; \
- ori reg,reg,(label)@l;
+#define LOAD_REG_IMMEDIATE(reg,expr) \
+ lis (reg),(expr)@highest; \
+ ori (reg),(reg),(expr)@higher; \
+ rldicr (reg),(reg),32,31; \
+ oris (reg),(reg),(expr)@h; \
+ ori (reg),(reg),(expr)@l;
+
+#define LOAD_REG_ADDR(reg,name) \
+ ld (reg),name@got(r2)
+
+#define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
+#define ADDROFF(name) 0
/* offsets for stack frame layout */
#define LRSAVE 16
#else /* 32-bit */
-#define LOADADDR(rn,name) \
- lis rn,name@ha; \
- addi rn,rn,name@l
-#define LOADBASE(rn,name) \
- lis rn,name@ha
+#define LOAD_REG_IMMEDIATE(reg,expr) \
+ lis (reg),(expr)@ha; \
+ addi (reg),(reg),(expr)@l;
-#define OFF(name) name@l
+#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name)
+
+#define LOAD_REG_ADDRBASE(reg, name) lis (reg),name@ha
+#define ADDROFF(name) name@l
/* offsets for stack frame layout */
#define LRSAVE 4
@@ -325,6 +330,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define CLR_TOP32(r)
#endif
+#endif /* __KERNEL__ */
+
/* The boring bits... */
/* Condition Register Bit Fields */
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index d12382d292d..415fa393b00 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -10,7 +10,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <asm/reg.h>
#ifndef __ASSEMBLY__
@@ -50,6 +49,7 @@
#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
#define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
+#ifdef __KERNEL__
#define platform_is_pseries() (_machine == PLATFORM_PSERIES || \
_machine == PLATFORM_PSERIES_LPAR)
#define platform_is_lpar() (!!(_machine & PLATFORM_LPAR))
@@ -68,7 +68,6 @@ extern int _chrp_type;
* vendor. Board revision is also made available. This will be moved
* elsewhere soon
*/
-extern unsigned char ucSystemType;
extern unsigned char ucBoardRev;
extern unsigned char ucBoardRevMaj, ucBoardRevMin;
@@ -82,7 +81,7 @@ extern unsigned char ucBoardRevMaj, ucBoardRevMin;
#else
#define _machine 0
#endif /* CONFIG_PPC_MULTIPLATFORM */
-
+#endif /* __KERNEL__ */
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index f999df1c5c9..5b2bd4eefb0 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -65,49 +65,11 @@ struct boot_param_header
typedef u32 phandle;
typedef u32 ihandle;
-struct address_range {
- unsigned long space;
- unsigned long address;
- unsigned long size;
-};
-
struct interrupt_info {
int line;
int sense; /* +ve/-ve logic, edge or level, etc. */
};
-struct pci_address {
- u32 a_hi;
- u32 a_mid;
- u32 a_lo;
-};
-
-struct isa_address {
- u32 a_hi;
- u32 a_lo;
-};
-
-struct isa_range {
- struct isa_address isa_addr;
- struct pci_address pci_addr;
- unsigned int size;
-};
-
-struct reg_property {
- unsigned long address;
- unsigned long size;
-};
-
-struct reg_property32 {
- unsigned int address;
- unsigned int size;
-};
-
-struct reg_property64 {
- u64 address;
- u64 size;
-};
-
struct property {
char *name;
int length;
@@ -120,13 +82,12 @@ struct device_node {
char *type;
phandle node;
phandle linux_phandle;
- int n_addrs;
- struct address_range *addrs;
int n_intrs;
struct interrupt_info *intrs;
char *full_name;
struct property *properties;
+ struct property *deadprops; /* removed properties */
struct device_node *parent;
struct device_node *child;
struct device_node *sibling;
@@ -175,6 +136,9 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
extern struct device_node *of_get_parent(const struct device_node *node);
extern struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev);
+extern struct property *of_find_property(struct device_node *np,
+ const char *name,
+ int *lenp);
extern struct device_node *of_node_get(struct device_node *node);
extern void of_node_put(struct device_node *node);
@@ -204,6 +168,10 @@ extern int prom_n_size_cells(struct device_node* np);
extern int prom_n_intr_cells(struct device_node* np);
extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
extern int prom_add_property(struct device_node* np, struct property* prop);
+extern int prom_remove_property(struct device_node *np, struct property *prop);
+extern int prom_update_property(struct device_node *np,
+ struct property *newprop,
+ struct property *oldprop);
#ifdef CONFIG_PPC32
/*
@@ -223,5 +191,36 @@ extern struct resource *request_OF_resource(struct device_node* node,
int index, const char* name_postfix);
extern int release_OF_resource(struct device_node* node, int index);
+
+/*
+ * OF address retreival & translation
+ */
+
+
+/* Translate an OF address block into a CPU physical address
+ */
+#define OF_BAD_ADDR ((u64)-1)
+extern u64 of_translate_address(struct device_node *np, u32 *addr);
+
+/* Extract an address from a device, returns the region size and
+ * the address space flags too. The PCI version uses a BAR number
+ * instead of an absolute index
+ */
+extern u32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags);
+extern u32 *of_get_pci_address(struct device_node *dev, int bar_no,
+ u64 *size, unsigned int *flags);
+
+/* Get an address as a resource. Note that if your address is
+ * a PIO address, the conversion will fail if the physical address
+ * can't be internally converted to an IO token with
+ * pci_address_to_pio(), that is because it's either called to early
+ * or it can't be matched to any host bridge IO space
+ */
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+extern int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r);
+
#endif /* __KERNEL__ */
#endif /* _POWERPC_PROM_H */
diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h
index 1f7ecdb0b6c..9c550b31482 100644
--- a/include/asm-powerpc/ptrace.h
+++ b/include/asm-powerpc/ptrace.h
@@ -87,7 +87,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
#define force_successful_syscall_return() \
do { \
- current_thread_info()->syscall_noerror = 1; \
+ set_thread_flag(TIF_NOERROR); \
} while(0)
/*
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
index eb392d038ed..12ecc9b9f28 100644
--- a/include/asm-powerpc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -145,6 +145,10 @@
#define SPRN_CTR 0x009 /* Count Register */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
+#define CTRL_CT 0xc0000000 /* current thread */
+#define CTRL_CT0 0x80000000 /* thread 0 */
+#define CTRL_CT1 0x40000000 /* thread 1 */
+#define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
#define DABR_TRANSLATION (1UL << 2)
@@ -257,11 +261,11 @@
#define SPRN_HID6 0x3F9 /* BE HID 6 */
#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
-#define SPRN_TSCR 0x399 /* Thread switch control on BE */
-#define SPRN_TTR 0x39A /* Thread switch timeout on BE */
-#define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */
-#define TSCR_EE_ENABLE 0x100000 /* External Interrupt */
-#define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */
+#define SPRN_TSC_CELL 0x399 /* Thread switch control on Cell */
+#define TSC_CELL_DEC_ENABLE_0 0x400000 /* Decrementer Interrupt */
+#define TSC_CELL_DEC_ENABLE_1 0x200000 /* Decrementer Interrupt */
+#define TSC_CELL_EE_ENABLE 0x100000 /* External Interrupt */
+#define TSC_CELL_EE_BOOST 0x080000 /* External Interrupt Boost */
#define SPRN_TSC 0x3FD /* Thread switch control on others */
#define SPRN_TST 0x3FC /* Thread switch timeout on others */
#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
@@ -375,6 +379,14 @@
#define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */
#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
+#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
+#define SRR1_WAKERESET 0x00380000 /* System reset */
+#define SRR1_WAKESYSERR 0x00300000 /* System error */
+#define SRR1_WAKEEE 0x00200000 /* External interrupt */
+#define SRR1_WAKEMT 0x00280000 /* mtctrl */
+#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
+#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
+
#ifndef SPRN_SVR
#define SPRN_SVR 0x11E /* System Version Register */
#endif
@@ -443,12 +455,35 @@
#define SPRN_SDAR 781
#else /* 32-bit */
-#define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */
-#define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */
-#define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */
-#define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */
-#define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */
-#define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */
+#define SPRN_MMCR0 952 /* Monitor Mode Control Register 0 */
+#define MMCR0_FC 0x80000000UL /* freeze counters */
+#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
+#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
+#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
+#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
+#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
+#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
+#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
+#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
+#define MMCR0_PMCnCE 0x00004000UL /* count enable for all but PMC 1*/
+#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
+#define MMCR0_PMC1SEL 0x00001fc0UL /* PMC 1 Event */
+#define MMCR0_PMC2SEL 0x0000003fUL /* PMC 2 Event */
+
+#define SPRN_MMCR1 956
+#define MMCR1_PMC3SEL 0xf8000000UL /* PMC 3 Event */
+#define MMCR1_PMC4SEL 0x07c00000UL /* PMC 4 Event */
+#define MMCR1_PMC5SEL 0x003e0000UL /* PMC 5 Event */
+#define MMCR1_PMC6SEL 0x0001f800UL /* PMC 6 Event */
+#define SPRN_MMCR2 944
+#define SPRN_PMC1 953 /* Performance Counter Register 1 */
+#define SPRN_PMC2 954 /* Performance Counter Register 2 */
+#define SPRN_PMC3 957 /* Performance Counter Register 3 */
+#define SPRN_PMC4 958 /* Performance Counter Register 4 */
+#define SPRN_PMC5 945 /* Performance Counter Register 5 */
+#define SPRN_PMC6 946 /* Performance Counter Register 6 */
+
+#define SPRN_SIAR 955 /* Sampled Instruction Address Register */
/* Bit definitions for MMCR0 and PMC1 / PMC2. */
#define MMCR0_PMC1_CYCLES (1 << 7)
@@ -458,7 +493,6 @@
#define MMCR0_PMC2_CYCLES 0x1
#define MMCR0_PMC2_ITLB 0x7
#define MMCR0_PMC2_LOADMISSTIME 0x5
-#define MMCR0_PMXE (1 << 26)
#endif
/* Processor Version Register (PVR) field extraction */
diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h
index d1bb611ea62..f43c6835e62 100644
--- a/include/asm-powerpc/rtas.h
+++ b/include/asm-powerpc/rtas.h
@@ -1,5 +1,6 @@
#ifndef _POWERPC_RTAS_H
#define _POWERPC_RTAS_H
+#ifdef __KERNEL__
#include <linux/spinlock.h>
#include <asm/page.h>
@@ -160,7 +161,6 @@ extern struct rtas_t rtas;
extern void enter_rtas(unsigned long);
extern int rtas_token(const char *service);
extern int rtas_call(int token, int, int, int *, ...);
-extern void call_rtas_display_status(unsigned char);
extern void rtas_restart(char *cmd);
extern void rtas_power_off(void);
extern void rtas_halt(void);
@@ -229,4 +229,5 @@ extern unsigned long rtas_rmo_buf;
#define GLOBAL_INTERRUPT_QUEUE 9005
+#endif /* __KERNEL__ */
#endif /* _POWERPC_RTAS_H */
diff --git a/include/asm-powerpc/seccomp.h b/include/asm-powerpc/seccomp.h
index 1e1cfe12882..853765eb1f6 100644
--- a/include/asm-powerpc/seccomp.h
+++ b/include/asm-powerpc/seccomp.h
@@ -1,6 +1,10 @@
#ifndef _ASM_POWERPC_SECCOMP_H
+#define _ASM_POWERPC_SECCOMP_H
+#ifdef __KERNEL__
#include <linux/thread_info.h>
+#endif
+
#include <linux/unistd.h>
#define __NR_seccomp_read __NR_read
diff --git a/include/asm-powerpc/sections.h b/include/asm-powerpc/sections.h
index 47be2ac2a92..916018e425c 100644
--- a/include/asm-powerpc/sections.h
+++ b/include/asm-powerpc/sections.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_SECTIONS_H
#define _ASM_POWERPC_SECTIONS_H
+#ifdef __KERNEL__
#include <asm-generic/sections.h>
@@ -17,4 +18,5 @@ static inline int in_kernel_text(unsigned long addr)
#endif
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SECTIONS_H */
diff --git a/include/asm-powerpc/serial.h b/include/asm-powerpc/serial.h
index b273d630b32..3e8589b43cb 100644
--- a/include/asm-powerpc/serial.h
+++ b/include/asm-powerpc/serial.h
@@ -15,4 +15,10 @@
/* Default baud base if not found in device-tree */
#define BASE_BAUD ( 1843200 / 16 )
+#ifdef CONFIG_PPC_UDBG_16550
+extern void find_legacy_serial_ports(void);
+#else
+#define find_legacy_serial_ports() do { } while (0)
+#endif
+
#endif /* _PPC64_SERIAL_H */
diff --git a/include/asm-powerpc/signal.h b/include/asm-powerpc/signal.h
index 694c8d2dab8..a4d8f864854 100644
--- a/include/asm-powerpc/signal.h
+++ b/include/asm-powerpc/signal.h
@@ -2,10 +2,13 @@
#define _ASM_POWERPC_SIGNAL_H
#include <linux/types.h>
-#include <linux/config.h>
#define _NSIG 64
-#define _NSIG_BPW BITS_PER_LONG
+#ifdef __powerpc64__
+#define _NSIG_BPW 64
+#else
+#define _NSIG_BPW 32
+#endif
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
typedef unsigned long old_sigset_t; /* at least 32 bits */
diff --git a/include/asm-powerpc/smu.h b/include/asm-powerpc/smu.h
index 76c29a9784d..82ce4760777 100644
--- a/include/asm-powerpc/smu.h
+++ b/include/asm-powerpc/smu.h
@@ -4,9 +4,11 @@
/*
* Definitions for talking to the SMU chip in newer G5 PowerMacs
*/
-
+#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/list.h>
+#endif
+#include <linux/types.h>
/*
* Known SMU commands
@@ -20,7 +22,7 @@
/*
* Partition info commands
*
- * These commands are used to retreive the sdb-partition-XX datas from
+ * These commands are used to retrieve the sdb-partition-XX datas from
* the SMU. The lenght is always 2. First byte is the subcommand code
* and second byte is the partition ID.
*
@@ -223,7 +225,7 @@
*
* SMU_CMD_MISC_ee_GET_DATABLOCK_REC is used, among others, to
* transfer blocks of data from the SMU. So far, I've decrypted it's
- * usage to retreive partition data. In order to do that, you have to
+ * usage to retrieve partition data. In order to do that, you have to
* break your transfer in "chunks" since that command cannot transfer
* more than a chunk at a time. The chunk size used by OF is 0xe bytes,
* but it seems that the darwin driver will let you do 0x1e bytes if
@@ -356,6 +358,9 @@ extern unsigned long smu_cmdbuf_abs;
* Kenrel asynchronous i2c interface
*/
+#define SMU_I2C_READ_MAX 0x1d
+#define SMU_I2C_WRITE_MAX 0x15
+
/* SMU i2c header, exactly matches i2c header on wire */
struct smu_i2c_param
{
@@ -366,12 +371,9 @@ struct smu_i2c_param
u8 subaddr[3]; /* subaddress */
u8 caddr; /* combined address, filled by SMU driver */
u8 datalen; /* length of transfer */
- u8 data[7]; /* data */
+ u8 data[SMU_I2C_READ_MAX]; /* data */
};
-#define SMU_I2C_READ_MAX 0x0d
-#define SMU_I2C_WRITE_MAX 0x05
-
struct smu_i2c_cmd
{
/* public */
@@ -385,7 +387,7 @@ struct smu_i2c_cmd
int read;
int stage;
int retries;
- u8 pdata[0x10];
+ u8 pdata[32];
struct list_head link;
};
@@ -487,8 +489,8 @@ struct smu_sdbp_slotspow {
#define SMU_SDB_SENSORTREE_ID 0x25
struct smu_sdbp_sensortree {
- u8 model_id;
- u8 unknown[3];
+ __u8 model_id;
+ __u8 unknown[3];
};
/* This partition contains CPU thermal control PID informations. So far
@@ -498,13 +500,13 @@ struct smu_sdbp_sensortree {
#define SMU_SDB_CPUPIDDATA_ID 0x17
struct smu_sdbp_cpupiddata {
- u8 unknown1;
- u8 target_temp_delta;
- u8 unknown2;
- u8 history_len;
- s16 power_adj;
- u16 max_power;
- s32 gp,gr,gd;
+ __u8 unknown1;
+ __u8 target_temp_delta;
+ __u8 unknown2;
+ __u8 history_len;
+ __s16 power_adj;
+ __u16 max_power;
+ __s32 gp,gr,gd;
};
@@ -517,7 +519,7 @@ struct smu_sdbp_cpupiddata {
* if not found. The data format is described below
*/
extern struct smu_sdbp_header *smu_get_sdb_partition(int id,
- unsigned int *size);
+ unsigned int *size);
#endif /* __KERNEL__ */
@@ -554,7 +556,7 @@ struct smu_user_cmd_hdr
__u32 cmdtype;
#define SMU_CMDTYPE_SMU 0 /* SMU command */
#define SMU_CMDTYPE_WANTS_EVENTS 1 /* switch fd to events mode */
-#define SMU_CMDTYPE_GET_PARTITION 2 /* retreive an sdb partition */
+#define SMU_CMDTYPE_GET_PARTITION 2 /* retrieve an sdb partition */
__u8 cmd; /* SMU command byte */
__u8 pad[3]; /* padding */
diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h
index ba1b34fdb96..38b1ea3b58f 100644
--- a/include/asm-powerpc/sparsemem.h
+++ b/include/asm-powerpc/sparsemem.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_SPARSEMEM_H
#define _ASM_POWERPC_SPARSEMEM_H 1
+#ifdef __KERNEL__
#ifdef CONFIG_SPARSEMEM
/*
@@ -13,8 +14,17 @@
#ifdef CONFIG_MEMORY_HOTPLUG
extern void create_section_mapping(unsigned long start, unsigned long end);
+#ifdef CONFIG_NUMA
+extern int hot_add_scn_to_nid(unsigned long scn_addr);
+#else
+static inline int hot_add_scn_to_nid(unsigned long scn_addr)
+{
+ return 0;
+}
+#endif /* CONFIG_NUMA */
#endif /* CONFIG_MEMORY_HOTPLUG */
#endif /* CONFIG_SPARSEMEM */
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h
index caa4b14e0e9..895cb6d3a42 100644
--- a/include/asm-powerpc/spinlock.h
+++ b/include/asm-powerpc/spinlock.h
@@ -1,5 +1,6 @@
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
+#ifdef __KERNEL__
/*
* Simple spin lock operations.
@@ -45,7 +46,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
token = LOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2 # __spin_trylock\n\
+"1: lwarx %0,0,%2\n\
cmpwi 0,%0,0\n\
bne- 2f\n\
stwcx. %1,0,%2\n\
@@ -79,7 +80,7 @@ static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
+#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
extern void __spin_yield(raw_spinlock_t *lock);
extern void __rw_yield(raw_rwlock_t *lock);
#else /* SPLPAR || ISERIES */
@@ -123,8 +124,8 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long
static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
- __asm__ __volatile__(SYNC_ON_SMP" # __raw_spin_unlock"
- : : :"memory");
+ __asm__ __volatile__("# __raw_spin_unlock\n\t"
+ LWSYNC_ON_SMP: : :"memory");
lock->slock = 0;
}
@@ -166,7 +167,7 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw)
long tmp;
__asm__ __volatile__(
-"1: lwarx %0,0,%1 # read_trylock\n"
+"1: lwarx %0,0,%1\n"
__DO_SIGN_EXTEND
" addic. %0,%0,1\n\
ble- 2f\n"
@@ -191,7 +192,7 @@ static __inline__ long __write_trylock(raw_rwlock_t *rw)
token = WRLOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2 # write_trylock\n\
+"1: lwarx %0,0,%2\n\
cmpwi 0,%0,0\n\
bne- 2f\n"
PPC405_ERR77(0,%1)
@@ -248,8 +249,9 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
long tmp;
__asm__ __volatile__(
- "eieio # read_unlock\n\
-1: lwarx %0,0,%1\n\
+ "# read_unlock\n\t"
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1\n\
addic %0,%0,-1\n"
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
@@ -261,9 +263,10 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
{
- __asm__ __volatile__(SYNC_ON_SMP" # write_unlock"
- : : :"memory");
+ __asm__ __volatile__("# write_unlock\n\t"
+ LWSYNC_ON_SMP: : :"memory");
rw->lock = 0;
}
+#endif /* __KERNEL__ */
#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
new file mode 100644
index 00000000000..38bacf2f6e0
--- /dev/null
+++ b/include/asm-powerpc/spu.h
@@ -0,0 +1,600 @@
+/*
+ * SPU core / file system interface and HW structures
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPU_H
+#define _SPU_H
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+
+#define LS_SIZE (256 * 1024)
+#define LS_ADDR_MASK (LS_SIZE - 1)
+
+#define MFC_PUT_CMD 0x20
+#define MFC_PUTS_CMD 0x28
+#define MFC_PUTR_CMD 0x30
+#define MFC_PUTF_CMD 0x22
+#define MFC_PUTB_CMD 0x21
+#define MFC_PUTFS_CMD 0x2A
+#define MFC_PUTBS_CMD 0x29
+#define MFC_PUTRF_CMD 0x32
+#define MFC_PUTRB_CMD 0x31
+#define MFC_PUTL_CMD 0x24
+#define MFC_PUTRL_CMD 0x34
+#define MFC_PUTLF_CMD 0x26
+#define MFC_PUTLB_CMD 0x25
+#define MFC_PUTRLF_CMD 0x36
+#define MFC_PUTRLB_CMD 0x35
+
+#define MFC_GET_CMD 0x40
+#define MFC_GETS_CMD 0x48
+#define MFC_GETF_CMD 0x42
+#define MFC_GETB_CMD 0x41
+#define MFC_GETFS_CMD 0x4A
+#define MFC_GETBS_CMD 0x49
+#define MFC_GETL_CMD 0x44
+#define MFC_GETLF_CMD 0x46
+#define MFC_GETLB_CMD 0x45
+
+#define MFC_SDCRT_CMD 0x80
+#define MFC_SDCRTST_CMD 0x81
+#define MFC_SDCRZ_CMD 0x89
+#define MFC_SDCRS_CMD 0x8D
+#define MFC_SDCRF_CMD 0x8F
+
+#define MFC_GETLLAR_CMD 0xD0
+#define MFC_PUTLLC_CMD 0xB4
+#define MFC_PUTLLUC_CMD 0xB0
+#define MFC_PUTQLLUC_CMD 0xB8
+#define MFC_SNDSIG_CMD 0xA0
+#define MFC_SNDSIGB_CMD 0xA1
+#define MFC_SNDSIGF_CMD 0xA2
+#define MFC_BARRIER_CMD 0xC0
+#define MFC_EIEIO_CMD 0xC8
+#define MFC_SYNC_CMD 0xCC
+
+#define MFC_MIN_DMA_SIZE_SHIFT 4 /* 16 bytes */
+#define MFC_MAX_DMA_SIZE_SHIFT 14 /* 16384 bytes */
+#define MFC_MIN_DMA_SIZE (1 << MFC_MIN_DMA_SIZE_SHIFT)
+#define MFC_MAX_DMA_SIZE (1 << MFC_MAX_DMA_SIZE_SHIFT)
+#define MFC_MIN_DMA_SIZE_MASK (MFC_MIN_DMA_SIZE - 1)
+#define MFC_MAX_DMA_SIZE_MASK (MFC_MAX_DMA_SIZE - 1)
+#define MFC_MIN_DMA_LIST_SIZE 0x0008 /* 8 bytes */
+#define MFC_MAX_DMA_LIST_SIZE 0x4000 /* 16K bytes */
+
+#define MFC_TAGID_TO_TAGMASK(tag_id) (1 << (tag_id & 0x1F))
+
+/* Events for Channels 0-2 */
+#define MFC_DMA_TAG_STATUS_UPDATE_EVENT 0x00000001
+#define MFC_DMA_TAG_CMD_STALL_NOTIFY_EVENT 0x00000002
+#define MFC_DMA_QUEUE_AVAILABLE_EVENT 0x00000008
+#define MFC_SPU_MAILBOX_WRITTEN_EVENT 0x00000010
+#define MFC_DECREMENTER_EVENT 0x00000020
+#define MFC_PU_INT_MAILBOX_AVAILABLE_EVENT 0x00000040
+#define MFC_PU_MAILBOX_AVAILABLE_EVENT 0x00000080
+#define MFC_SIGNAL_2_EVENT 0x00000100
+#define MFC_SIGNAL_1_EVENT 0x00000200
+#define MFC_LLR_LOST_EVENT 0x00000400
+#define MFC_PRIV_ATTN_EVENT 0x00000800
+#define MFC_MULTI_SRC_EVENT 0x00001000
+
+/* Flags indicating progress during context switch. */
+#define SPU_CONTEXT_SWITCH_PENDING 0UL
+#define SPU_CONTEXT_SWITCH_ACTIVE 1UL
+
+struct spu_context;
+struct spu_runqueue;
+
+struct spu {
+ char *name;
+ unsigned long local_store_phys;
+ u8 *local_store;
+ struct spu_problem __iomem *problem;
+ struct spu_priv1 __iomem *priv1;
+ struct spu_priv2 __iomem *priv2;
+ struct list_head list;
+ struct list_head sched_list;
+ int number;
+ u32 isrc;
+ u32 node;
+ u64 flags;
+ u64 dar;
+ u64 dsisr;
+ struct kref kref;
+ size_t ls_size;
+ unsigned int slb_replace;
+ struct mm_struct *mm;
+ struct spu_context *ctx;
+ struct spu_runqueue *rq;
+ unsigned long long timestamp;
+ pid_t pid;
+ int prio;
+ int class_0_pending;
+ spinlock_t register_lock;
+
+ u32 stop_code;
+ void (* wbox_callback)(struct spu *spu);
+ void (* ibox_callback)(struct spu *spu);
+ void (* stop_callback)(struct spu *spu);
+
+ char irq_c0[8];
+ char irq_c1[8];
+ char irq_c2[8];
+};
+
+struct spu *spu_alloc(void);
+void spu_free(struct spu *spu);
+int spu_irq_class_0_bottom(struct spu *spu);
+int spu_irq_class_1_bottom(struct spu *spu);
+void spu_irq_setaffinity(struct spu *spu, int cpu);
+
+extern struct spufs_calls {
+ asmlinkage long (*create_thread)(const char __user *name,
+ unsigned int flags, mode_t mode);
+ asmlinkage long (*spu_run)(struct file *filp, __u32 __user *unpc,
+ __u32 __user *ustatus);
+ struct module *owner;
+} spufs_calls;
+
+#ifdef CONFIG_SPU_FS_MODULE
+int register_spu_syscalls(struct spufs_calls *calls);
+void unregister_spu_syscalls(struct spufs_calls *calls);
+#else
+static inline int register_spu_syscalls(struct spufs_calls *calls)
+{
+ return 0;
+}
+static inline void unregister_spu_syscalls(struct spufs_calls *calls)
+{
+}
+#endif /* MODULE */
+
+
+/* access to priv1 registers */
+void spu_int_mask_and(struct spu *spu, int class, u64 mask);
+void spu_int_mask_or(struct spu *spu, int class, u64 mask);
+void spu_int_mask_set(struct spu *spu, int class, u64 mask);
+u64 spu_int_mask_get(struct spu *spu, int class);
+void spu_int_stat_clear(struct spu *spu, int class, u64 stat);
+u64 spu_int_stat_get(struct spu *spu, int class);
+void spu_int_route_set(struct spu *spu, u64 route);
+u64 spu_mfc_dar_get(struct spu *spu);
+u64 spu_mfc_dsisr_get(struct spu *spu);
+void spu_mfc_dsisr_set(struct spu *spu, u64 dsisr);
+void spu_mfc_sdr_set(struct spu *spu, u64 sdr);
+void spu_mfc_sr1_set(struct spu *spu, u64 sr1);
+u64 spu_mfc_sr1_get(struct spu *spu);
+void spu_mfc_tclass_id_set(struct spu *spu, u64 tclass_id);
+u64 spu_mfc_tclass_id_get(struct spu *spu);
+void spu_tlb_invalidate(struct spu *spu);
+void spu_resource_allocation_groupID_set(struct spu *spu, u64 id);
+u64 spu_resource_allocation_groupID_get(struct spu *spu);
+void spu_resource_allocation_enable_set(struct spu *spu, u64 enable);
+u64 spu_resource_allocation_enable_get(struct spu *spu);
+
+
+/*
+ * This defines the Local Store, Problem Area and Privlege Area of an SPU.
+ */
+
+union mfc_tag_size_class_cmd {
+ struct {
+ u16 mfc_size;
+ u16 mfc_tag;
+ u8 pad;
+ u8 mfc_rclassid;
+ u16 mfc_cmd;
+ } u;
+ struct {
+ u32 mfc_size_tag32;
+ u32 mfc_class_cmd32;
+ } by32;
+ u64 all64;
+};
+
+struct mfc_cq_sr {
+ u64 mfc_cq_data0_RW;
+ u64 mfc_cq_data1_RW;
+ u64 mfc_cq_data2_RW;
+ u64 mfc_cq_data3_RW;
+};
+
+struct spu_problem {
+#define MS_SYNC_PENDING 1L
+ u64 spc_mssync_RW; /* 0x0000 */
+ u8 pad_0x0008_0x3000[0x3000 - 0x0008];
+
+ /* DMA Area */
+ u8 pad_0x3000_0x3004[0x4]; /* 0x3000 */
+ u32 mfc_lsa_W; /* 0x3004 */
+ u64 mfc_ea_W; /* 0x3008 */
+ union mfc_tag_size_class_cmd mfc_union_W; /* 0x3010 */
+ u8 pad_0x3018_0x3104[0xec]; /* 0x3018 */
+ u32 dma_qstatus_R; /* 0x3104 */
+ u8 pad_0x3108_0x3204[0xfc]; /* 0x3108 */
+ u32 dma_querytype_RW; /* 0x3204 */
+ u8 pad_0x3208_0x321c[0x14]; /* 0x3208 */
+ u32 dma_querymask_RW; /* 0x321c */
+ u8 pad_0x3220_0x322c[0xc]; /* 0x3220 */
+ u32 dma_tagstatus_R; /* 0x322c */
+#define DMA_TAGSTATUS_INTR_ANY 1u
+#define DMA_TAGSTATUS_INTR_ALL 2u
+ u8 pad_0x3230_0x4000[0x4000 - 0x3230]; /* 0x3230 */
+
+ /* SPU Control Area */
+ u8 pad_0x4000_0x4004[0x4]; /* 0x4000 */
+ u32 pu_mb_R; /* 0x4004 */
+ u8 pad_0x4008_0x400c[0x4]; /* 0x4008 */
+ u32 spu_mb_W; /* 0x400c */
+ u8 pad_0x4010_0x4014[0x4]; /* 0x4010 */
+ u32 mb_stat_R; /* 0x4014 */
+ u8 pad_0x4018_0x401c[0x4]; /* 0x4018 */
+ u32 spu_runcntl_RW; /* 0x401c */
+#define SPU_RUNCNTL_STOP 0L
+#define SPU_RUNCNTL_RUNNABLE 1L
+ u8 pad_0x4020_0x4024[0x4]; /* 0x4020 */
+ u32 spu_status_R; /* 0x4024 */
+#define SPU_STOP_STATUS_SHIFT 16
+#define SPU_STATUS_STOPPED 0x0
+#define SPU_STATUS_RUNNING 0x1
+#define SPU_STATUS_STOPPED_BY_STOP 0x2
+#define SPU_STATUS_STOPPED_BY_HALT 0x4
+#define SPU_STATUS_WAITING_FOR_CHANNEL 0x8
+#define SPU_STATUS_SINGLE_STEP 0x10
+#define SPU_STATUS_INVALID_INSTR 0x20
+#define SPU_STATUS_INVALID_CH 0x40
+#define SPU_STATUS_ISOLATED_STATE 0x80
+#define SPU_STATUS_ISOLATED_LOAD_STAUTUS 0x200
+#define SPU_STATUS_ISOLATED_EXIT_STAUTUS 0x400
+ u8 pad_0x4028_0x402c[0x4]; /* 0x4028 */
+ u32 spu_spe_R; /* 0x402c */
+ u8 pad_0x4030_0x4034[0x4]; /* 0x4030 */
+ u32 spu_npc_RW; /* 0x4034 */
+ u8 pad_0x4038_0x14000[0x14000 - 0x4038]; /* 0x4038 */
+
+ /* Signal Notification Area */
+ u8 pad_0x14000_0x1400c[0xc]; /* 0x14000 */
+ u32 signal_notify1; /* 0x1400c */
+ u8 pad_0x14010_0x1c00c[0x7ffc]; /* 0x14010 */
+ u32 signal_notify2; /* 0x1c00c */
+} __attribute__ ((aligned(0x20000)));
+
+/* SPU Privilege 2 State Area */
+struct spu_priv2 {
+ /* MFC Registers */
+ u8 pad_0x0000_0x1100[0x1100 - 0x0000]; /* 0x0000 */
+
+ /* SLB Management Registers */
+ u8 pad_0x1100_0x1108[0x8]; /* 0x1100 */
+ u64 slb_index_W; /* 0x1108 */
+#define SLB_INDEX_MASK 0x7L
+ u64 slb_esid_RW; /* 0x1110 */
+ u64 slb_vsid_RW; /* 0x1118 */
+#define SLB_VSID_SUPERVISOR_STATE (0x1ull << 11)
+#define SLB_VSID_SUPERVISOR_STATE_MASK (0x1ull << 11)
+#define SLB_VSID_PROBLEM_STATE (0x1ull << 10)
+#define SLB_VSID_PROBLEM_STATE_MASK (0x1ull << 10)
+#define SLB_VSID_EXECUTE_SEGMENT (0x1ull << 9)
+#define SLB_VSID_NO_EXECUTE_SEGMENT (0x1ull << 9)
+#define SLB_VSID_EXECUTE_SEGMENT_MASK (0x1ull << 9)
+#define SLB_VSID_4K_PAGE (0x0 << 8)
+#define SLB_VSID_LARGE_PAGE (0x1ull << 8)
+#define SLB_VSID_PAGE_SIZE_MASK (0x1ull << 8)
+#define SLB_VSID_CLASS_MASK (0x1ull << 7)
+#define SLB_VSID_VIRTUAL_PAGE_SIZE_MASK (0x1ull << 6)
+ u64 slb_invalidate_entry_W; /* 0x1120 */
+ u64 slb_invalidate_all_W; /* 0x1128 */
+ u8 pad_0x1130_0x2000[0x2000 - 0x1130]; /* 0x1130 */
+
+ /* Context Save / Restore Area */
+ struct mfc_cq_sr spuq[16]; /* 0x2000 */
+ struct mfc_cq_sr puq[8]; /* 0x2200 */
+ u8 pad_0x2300_0x3000[0x3000 - 0x2300]; /* 0x2300 */
+
+ /* MFC Control */
+ u64 mfc_control_RW; /* 0x3000 */
+#define MFC_CNTL_RESUME_DMA_QUEUE (0ull << 0)
+#define MFC_CNTL_SUSPEND_DMA_QUEUE (1ull << 0)
+#define MFC_CNTL_SUSPEND_DMA_QUEUE_MASK (1ull << 0)
+#define MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION (0ull << 8)
+#define MFC_CNTL_SUSPEND_IN_PROGRESS (1ull << 8)
+#define MFC_CNTL_SUSPEND_COMPLETE (3ull << 8)
+#define MFC_CNTL_SUSPEND_DMA_STATUS_MASK (3ull << 8)
+#define MFC_CNTL_DMA_QUEUES_EMPTY (1ull << 14)
+#define MFC_CNTL_DMA_QUEUES_EMPTY_MASK (1ull << 14)
+#define MFC_CNTL_PURGE_DMA_REQUEST (1ull << 15)
+#define MFC_CNTL_PURGE_DMA_IN_PROGRESS (1ull << 24)
+#define MFC_CNTL_PURGE_DMA_COMPLETE (3ull << 24)
+#define MFC_CNTL_PURGE_DMA_STATUS_MASK (3ull << 24)
+#define MFC_CNTL_RESTART_DMA_COMMAND (1ull << 32)
+#define MFC_CNTL_DMA_COMMAND_REISSUE_PENDING (1ull << 32)
+#define MFC_CNTL_DMA_COMMAND_REISSUE_STATUS_MASK (1ull << 32)
+#define MFC_CNTL_MFC_PRIVILEGE_STATE (2ull << 33)
+#define MFC_CNTL_MFC_PROBLEM_STATE (3ull << 33)
+#define MFC_CNTL_MFC_KEY_PROTECTION_STATE_MASK (3ull << 33)
+#define MFC_CNTL_DECREMENTER_HALTED (1ull << 35)
+#define MFC_CNTL_DECREMENTER_RUNNING (1ull << 40)
+#define MFC_CNTL_DECREMENTER_STATUS_MASK (1ull << 40)
+ u8 pad_0x3008_0x4000[0x4000 - 0x3008]; /* 0x3008 */
+
+ /* Interrupt Mailbox */
+ u64 puint_mb_R; /* 0x4000 */
+ u8 pad_0x4008_0x4040[0x4040 - 0x4008]; /* 0x4008 */
+
+ /* SPU Control */
+ u64 spu_privcntl_RW; /* 0x4040 */
+#define SPU_PRIVCNTL_MODE_NORMAL (0x0ull << 0)
+#define SPU_PRIVCNTL_MODE_SINGLE_STEP (0x1ull << 0)
+#define SPU_PRIVCNTL_MODE_MASK (0x1ull << 0)
+#define SPU_PRIVCNTL_NO_ATTENTION_EVENT (0x0ull << 1)
+#define SPU_PRIVCNTL_ATTENTION_EVENT (0x1ull << 1)
+#define SPU_PRIVCNTL_ATTENTION_EVENT_MASK (0x1ull << 1)
+#define SPU_PRIVCNT_LOAD_REQUEST_NORMAL (0x0ull << 2)
+#define SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK (0x1ull << 2)
+ u8 pad_0x4048_0x4058[0x10]; /* 0x4048 */
+ u64 spu_lslr_RW; /* 0x4058 */
+ u64 spu_chnlcntptr_RW; /* 0x4060 */
+ u64 spu_chnlcnt_RW; /* 0x4068 */
+ u64 spu_chnldata_RW; /* 0x4070 */
+ u64 spu_cfg_RW; /* 0x4078 */
+ u8 pad_0x4080_0x5000[0x5000 - 0x4080]; /* 0x4080 */
+
+ /* PV2_ImplRegs: Implementation-specific privileged-state 2 regs */
+ u64 spu_pm_trace_tag_status_RW; /* 0x5000 */
+ u64 spu_tag_status_query_RW; /* 0x5008 */
+#define TAG_STATUS_QUERY_CONDITION_BITS (0x3ull << 32)
+#define TAG_STATUS_QUERY_MASK_BITS (0xffffffffull)
+ u64 spu_cmd_buf1_RW; /* 0x5010 */
+#define SPU_COMMAND_BUFFER_1_LSA_BITS (0x7ffffull << 32)
+#define SPU_COMMAND_BUFFER_1_EAH_BITS (0xffffffffull)
+ u64 spu_cmd_buf2_RW; /* 0x5018 */
+#define SPU_COMMAND_BUFFER_2_EAL_BITS ((0xffffffffull) << 32)
+#define SPU_COMMAND_BUFFER_2_TS_BITS (0xffffull << 16)
+#define SPU_COMMAND_BUFFER_2_TAG_BITS (0x3full)
+ u64 spu_atomic_status_RW; /* 0x5020 */
+} __attribute__ ((aligned(0x20000)));
+
+/* SPU Privilege 1 State Area */
+struct spu_priv1 {
+ /* Control and Configuration Area */
+ u64 mfc_sr1_RW; /* 0x000 */
+#define MFC_STATE1_LOCAL_STORAGE_DECODE_MASK 0x01ull
+#define MFC_STATE1_BUS_TLBIE_MASK 0x02ull
+#define MFC_STATE1_REAL_MODE_OFFSET_ENABLE_MASK 0x04ull
+#define MFC_STATE1_PROBLEM_STATE_MASK 0x08ull
+#define MFC_STATE1_RELOCATE_MASK 0x10ull
+#define MFC_STATE1_MASTER_RUN_CONTROL_MASK 0x20ull
+ u64 mfc_lpid_RW; /* 0x008 */
+ u64 spu_idr_RW; /* 0x010 */
+ u64 mfc_vr_RO; /* 0x018 */
+#define MFC_VERSION_BITS (0xffff << 16)
+#define MFC_REVISION_BITS (0xffff)
+#define MFC_GET_VERSION_BITS(vr) (((vr) & MFC_VERSION_BITS) >> 16)
+#define MFC_GET_REVISION_BITS(vr) ((vr) & MFC_REVISION_BITS)
+ u64 spu_vr_RO; /* 0x020 */
+#define SPU_VERSION_BITS (0xffff << 16)
+#define SPU_REVISION_BITS (0xffff)
+#define SPU_GET_VERSION_BITS(vr) (vr & SPU_VERSION_BITS) >> 16
+#define SPU_GET_REVISION_BITS(vr) (vr & SPU_REVISION_BITS)
+ u8 pad_0x28_0x100[0x100 - 0x28]; /* 0x28 */
+
+
+ /* Interrupt Area */
+ u64 int_mask_RW[3]; /* 0x100 */
+#define CLASS0_ENABLE_DMA_ALIGNMENT_INTR 0x1L
+#define CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR 0x2L
+#define CLASS0_ENABLE_SPU_ERROR_INTR 0x4L
+#define CLASS0_ENABLE_MFC_FIR_INTR 0x8L
+#define CLASS1_ENABLE_SEGMENT_FAULT_INTR 0x1L
+#define CLASS1_ENABLE_STORAGE_FAULT_INTR 0x2L
+#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_GET_INTR 0x4L
+#define CLASS1_ENABLE_LS_COMPARE_SUSPEND_ON_PUT_INTR 0x8L
+#define CLASS2_ENABLE_MAILBOX_INTR 0x1L
+#define CLASS2_ENABLE_SPU_STOP_INTR 0x2L
+#define CLASS2_ENABLE_SPU_HALT_INTR 0x4L
+#define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR 0x8L
+ u8 pad_0x118_0x140[0x28]; /* 0x118 */
+ u64 int_stat_RW[3]; /* 0x140 */
+ u8 pad_0x158_0x180[0x28]; /* 0x158 */
+ u64 int_route_RW; /* 0x180 */
+
+ /* Interrupt Routing */
+ u8 pad_0x188_0x200[0x200 - 0x188]; /* 0x188 */
+
+ /* Atomic Unit Control Area */
+ u64 mfc_atomic_flush_RW; /* 0x200 */
+#define mfc_atomic_flush_enable 0x1L
+ u8 pad_0x208_0x280[0x78]; /* 0x208 */
+ u64 resource_allocation_groupID_RW; /* 0x280 */
+ u64 resource_allocation_enable_RW; /* 0x288 */
+ u8 pad_0x290_0x3c8[0x3c8 - 0x290]; /* 0x290 */
+
+ /* SPU_Cache_ImplRegs: Implementation-dependent cache registers */
+
+ u64 smf_sbi_signal_sel; /* 0x3c8 */
+#define smf_sbi_mask_lsb 56
+#define smf_sbi_shift (63 - smf_sbi_mask_lsb)
+#define smf_sbi_mask (0x301LL << smf_sbi_shift)
+#define smf_sbi_bus0_bits (0x001LL << smf_sbi_shift)
+#define smf_sbi_bus2_bits (0x100LL << smf_sbi_shift)
+#define smf_sbi2_bus0_bits (0x201LL << smf_sbi_shift)
+#define smf_sbi2_bus2_bits (0x300LL << smf_sbi_shift)
+ u64 smf_ato_signal_sel; /* 0x3d0 */
+#define smf_ato_mask_lsb 35
+#define smf_ato_shift (63 - smf_ato_mask_lsb)
+#define smf_ato_mask (0x3LL << smf_ato_shift)
+#define smf_ato_bus0_bits (0x2LL << smf_ato_shift)
+#define smf_ato_bus2_bits (0x1LL << smf_ato_shift)
+ u8 pad_0x3d8_0x400[0x400 - 0x3d8]; /* 0x3d8 */
+
+ /* TLB Management Registers */
+ u64 mfc_sdr_RW; /* 0x400 */
+ u8 pad_0x408_0x500[0xf8]; /* 0x408 */
+ u64 tlb_index_hint_RO; /* 0x500 */
+ u64 tlb_index_W; /* 0x508 */
+ u64 tlb_vpn_RW; /* 0x510 */
+ u64 tlb_rpn_RW; /* 0x518 */
+ u8 pad_0x520_0x540[0x20]; /* 0x520 */
+ u64 tlb_invalidate_entry_W; /* 0x540 */
+ u64 tlb_invalidate_all_W; /* 0x548 */
+ u8 pad_0x550_0x580[0x580 - 0x550]; /* 0x550 */
+
+ /* SPU_MMU_ImplRegs: Implementation-dependent MMU registers */
+ u64 smm_hid; /* 0x580 */
+#define PAGE_SIZE_MASK 0xf000000000000000ull
+#define PAGE_SIZE_16MB_64KB 0x2000000000000000ull
+ u8 pad_0x588_0x600[0x600 - 0x588]; /* 0x588 */
+
+ /* MFC Status/Control Area */
+ u64 mfc_accr_RW; /* 0x600 */
+#define MFC_ACCR_EA_ACCESS_GET (1 << 0)
+#define MFC_ACCR_EA_ACCESS_PUT (1 << 1)
+#define MFC_ACCR_LS_ACCESS_GET (1 << 3)
+#define MFC_ACCR_LS_ACCESS_PUT (1 << 4)
+ u8 pad_0x608_0x610[0x8]; /* 0x608 */
+ u64 mfc_dsisr_RW; /* 0x610 */
+#define MFC_DSISR_PTE_NOT_FOUND (1 << 30)
+#define MFC_DSISR_ACCESS_DENIED (1 << 27)
+#define MFC_DSISR_ATOMIC (1 << 26)
+#define MFC_DSISR_ACCESS_PUT (1 << 25)
+#define MFC_DSISR_ADDR_MATCH (1 << 22)
+#define MFC_DSISR_LS (1 << 17)
+#define MFC_DSISR_L (1 << 16)
+#define MFC_DSISR_ADDRESS_OVERFLOW (1 << 0)
+ u8 pad_0x618_0x620[0x8]; /* 0x618 */
+ u64 mfc_dar_RW; /* 0x620 */
+ u8 pad_0x628_0x700[0x700 - 0x628]; /* 0x628 */
+
+ /* Replacement Management Table (RMT) Area */
+ u64 rmt_index_RW; /* 0x700 */
+ u8 pad_0x708_0x710[0x8]; /* 0x708 */
+ u64 rmt_data1_RW; /* 0x710 */
+ u8 pad_0x718_0x800[0x800 - 0x718]; /* 0x718 */
+
+ /* Control/Configuration Registers */
+ u64 mfc_dsir_R; /* 0x800 */
+#define MFC_DSIR_Q (1 << 31)
+#define MFC_DSIR_SPU_QUEUE MFC_DSIR_Q
+ u64 mfc_lsacr_RW; /* 0x808 */
+#define MFC_LSACR_COMPARE_MASK ((~0ull) << 32)
+#define MFC_LSACR_COMPARE_ADDR ((~0ull) >> 32)
+ u64 mfc_lscrr_R; /* 0x810 */
+#define MFC_LSCRR_Q (1 << 31)
+#define MFC_LSCRR_SPU_QUEUE MFC_LSCRR_Q
+#define MFC_LSCRR_QI_SHIFT 32
+#define MFC_LSCRR_QI_MASK ((~0ull) << MFC_LSCRR_QI_SHIFT)
+ u8 pad_0x818_0x820[0x8]; /* 0x818 */
+ u64 mfc_tclass_id_RW; /* 0x820 */
+#define MFC_TCLASS_ID_ENABLE (1L << 0L)
+#define MFC_TCLASS_SLOT2_ENABLE (1L << 5L)
+#define MFC_TCLASS_SLOT1_ENABLE (1L << 6L)
+#define MFC_TCLASS_SLOT0_ENABLE (1L << 7L)
+#define MFC_TCLASS_QUOTA_2_SHIFT 8L
+#define MFC_TCLASS_QUOTA_1_SHIFT 16L
+#define MFC_TCLASS_QUOTA_0_SHIFT 24L
+#define MFC_TCLASS_QUOTA_2_MASK (0x1FL << MFC_TCLASS_QUOTA_2_SHIFT)
+#define MFC_TCLASS_QUOTA_1_MASK (0x1FL << MFC_TCLASS_QUOTA_1_SHIFT)
+#define MFC_TCLASS_QUOTA_0_MASK (0x1FL << MFC_TCLASS_QUOTA_0_SHIFT)
+ u8 pad_0x828_0x900[0x900 - 0x828]; /* 0x828 */
+
+ /* Real Mode Support Registers */
+ u64 mfc_rm_boundary; /* 0x900 */
+ u8 pad_0x908_0x938[0x30]; /* 0x908 */
+ u64 smf_dma_signal_sel; /* 0x938 */
+#define mfc_dma1_mask_lsb 41
+#define mfc_dma1_shift (63 - mfc_dma1_mask_lsb)
+#define mfc_dma1_mask (0x3LL << mfc_dma1_shift)
+#define mfc_dma1_bits (0x1LL << mfc_dma1_shift)
+#define mfc_dma2_mask_lsb 43
+#define mfc_dma2_shift (63 - mfc_dma2_mask_lsb)
+#define mfc_dma2_mask (0x3LL << mfc_dma2_shift)
+#define mfc_dma2_bits (0x1LL << mfc_dma2_shift)
+ u8 pad_0x940_0xa38[0xf8]; /* 0x940 */
+ u64 smm_signal_sel; /* 0xa38 */
+#define smm_sig_mask_lsb 12
+#define smm_sig_shift (63 - smm_sig_mask_lsb)
+#define smm_sig_mask (0x3LL << smm_sig_shift)
+#define smm_sig_bus0_bits (0x2LL << smm_sig_shift)
+#define smm_sig_bus2_bits (0x1LL << smm_sig_shift)
+ u8 pad_0xa40_0xc00[0xc00 - 0xa40]; /* 0xa40 */
+
+ /* DMA Command Error Area */
+ u64 mfc_cer_R; /* 0xc00 */
+#define MFC_CER_Q (1 << 31)
+#define MFC_CER_SPU_QUEUE MFC_CER_Q
+ u8 pad_0xc08_0x1000[0x1000 - 0xc08]; /* 0xc08 */
+
+ /* PV1_ImplRegs: Implementation-dependent privileged-state 1 regs */
+ /* DMA Command Error Area */
+ u64 spu_ecc_cntl_RW; /* 0x1000 */
+#define SPU_ECC_CNTL_E (1ull << 0ull)
+#define SPU_ECC_CNTL_ENABLE SPU_ECC_CNTL_E
+#define SPU_ECC_CNTL_DISABLE (~SPU_ECC_CNTL_E & 1L)
+#define SPU_ECC_CNTL_S (1ull << 1ull)
+#define SPU_ECC_STOP_AFTER_ERROR SPU_ECC_CNTL_S
+#define SPU_ECC_CONTINUE_AFTER_ERROR (~SPU_ECC_CNTL_S & 2L)
+#define SPU_ECC_CNTL_B (1ull << 2ull)
+#define SPU_ECC_BACKGROUND_ENABLE SPU_ECC_CNTL_B
+#define SPU_ECC_BACKGROUND_DISABLE (~SPU_ECC_CNTL_B & 4L)
+#define SPU_ECC_CNTL_I_SHIFT 3ull
+#define SPU_ECC_CNTL_I_MASK (3ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_WRITE_ALWAYS (~SPU_ECC_CNTL_I & 12L)
+#define SPU_ECC_WRITE_CORRECTABLE (1ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_WRITE_UNCORRECTABLE (3ull << SPU_ECC_CNTL_I_SHIFT)
+#define SPU_ECC_CNTL_D (1ull << 5ull)
+#define SPU_ECC_DETECTION_ENABLE SPU_ECC_CNTL_D
+#define SPU_ECC_DETECTION_DISABLE (~SPU_ECC_CNTL_D & 32L)
+ u64 spu_ecc_stat_RW; /* 0x1008 */
+#define SPU_ECC_CORRECTED_ERROR (1ull << 0ul)
+#define SPU_ECC_UNCORRECTED_ERROR (1ull << 1ul)
+#define SPU_ECC_SCRUB_COMPLETE (1ull << 2ul)
+#define SPU_ECC_SCRUB_IN_PROGRESS (1ull << 3ul)
+#define SPU_ECC_INSTRUCTION_ERROR (1ull << 4ul)
+#define SPU_ECC_DATA_ERROR (1ull << 5ul)
+#define SPU_ECC_DMA_ERROR (1ull << 6ul)
+#define SPU_ECC_STATUS_CNT_MASK (256ull << 8)
+ u64 spu_ecc_addr_RW; /* 0x1010 */
+ u64 spu_err_mask_RW; /* 0x1018 */
+#define SPU_ERR_ILLEGAL_INSTR (1ull << 0ul)
+#define SPU_ERR_ILLEGAL_CHANNEL (1ull << 1ul)
+ u8 pad_0x1020_0x1028[0x1028 - 0x1020]; /* 0x1020 */
+
+ /* SPU Debug-Trace Bus (DTB) Selection Registers */
+ u64 spu_trig0_sel; /* 0x1028 */
+ u64 spu_trig1_sel; /* 0x1030 */
+ u64 spu_trig2_sel; /* 0x1038 */
+ u64 spu_trig3_sel; /* 0x1040 */
+ u64 spu_trace_sel; /* 0x1048 */
+#define spu_trace_sel_mask 0x1f1fLL
+#define spu_trace_sel_bus0_bits 0x1000LL
+#define spu_trace_sel_bus2_bits 0x0010LL
+ u64 spu_event0_sel; /* 0x1050 */
+ u64 spu_event1_sel; /* 0x1058 */
+ u64 spu_event2_sel; /* 0x1060 */
+ u64 spu_event3_sel; /* 0x1068 */
+ u64 spu_trace_cntl; /* 0x1070 */
+} __attribute__ ((aligned(0x2000)));
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h
new file mode 100644
index 00000000000..ba18d7d4dde
--- /dev/null
+++ b/include/asm-powerpc/spu_csa.h
@@ -0,0 +1,255 @@
+/*
+ * spu_csa.h: Definitions for SPU context save area (CSA).
+ *
+ * (C) Copyright IBM 2005
+ *
+ * Author: Mark Nutter <mnutter@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _SPU_CSA_H_
+#define _SPU_CSA_H_
+#ifdef __KERNEL__
+
+/*
+ * Total number of 128-bit registers.
+ */
+#define NR_SPU_GPRS 128
+#define NR_SPU_SPRS 9
+#define NR_SPU_REGS_PAD 7
+#define NR_SPU_SPILL_REGS 144 /* GPRS + SPRS + PAD */
+#define SIZEOF_SPU_SPILL_REGS NR_SPU_SPILL_REGS * 16
+
+#define SPU_SAVE_COMPLETE 0x3FFB
+#define SPU_RESTORE_COMPLETE 0x3FFC
+
+/*
+ * Definitions for various 'stopped' status conditions,
+ * to be recreated during context restore.
+ */
+#define SPU_STOPPED_STATUS_P 1
+#define SPU_STOPPED_STATUS_I 2
+#define SPU_STOPPED_STATUS_H 3
+#define SPU_STOPPED_STATUS_S 4
+#define SPU_STOPPED_STATUS_S_I 5
+#define SPU_STOPPED_STATUS_S_P 6
+#define SPU_STOPPED_STATUS_P_H 7
+#define SPU_STOPPED_STATUS_P_I 8
+#define SPU_STOPPED_STATUS_R 9
+
+#ifndef __ASSEMBLY__
+/**
+ * spu_reg128 - generic 128-bit register definition.
+ */
+struct spu_reg128 {
+ u32 slot[4];
+};
+
+/**
+ * struct spu_lscsa - Local Store Context Save Area.
+ * @gprs: Array of saved registers.
+ * @fpcr: Saved floating point status control register.
+ * @decr: Saved decrementer value.
+ * @decr_status: Indicates decrementer run status.
+ * @ppu_mb: Saved PPU mailbox data.
+ * @ppuint_mb: Saved PPU interrupting mailbox data.
+ * @tag_mask: Saved tag group mask.
+ * @event_mask: Saved event mask.
+ * @srr0: Saved SRR0.
+ * @stopped_status: Conditions to be recreated by restore.
+ * @ls: Saved contents of Local Storage Area.
+ *
+ * The LSCSA represents state that is primarily saved and
+ * restored by SPU-side code.
+ */
+struct spu_lscsa {
+ struct spu_reg128 gprs[128];
+ struct spu_reg128 fpcr;
+ struct spu_reg128 decr;
+ struct spu_reg128 decr_status;
+ struct spu_reg128 ppu_mb;
+ struct spu_reg128 ppuint_mb;
+ struct spu_reg128 tag_mask;
+ struct spu_reg128 event_mask;
+ struct spu_reg128 srr0;
+ struct spu_reg128 stopped_status;
+ struct spu_reg128 pad[119]; /* 'ls' must be page-aligned. */
+ unsigned char ls[LS_SIZE];
+};
+
+/*
+ * struct spu_problem_collapsed - condensed problem state area, w/o pads.
+ */
+struct spu_problem_collapsed {
+ u64 spc_mssync_RW;
+ u32 mfc_lsa_W;
+ u32 unused_pad0;
+ u64 mfc_ea_W;
+ union mfc_tag_size_class_cmd mfc_union_W;
+ u32 dma_qstatus_R;
+ u32 dma_querytype_RW;
+ u32 dma_querymask_RW;
+ u32 dma_tagstatus_R;
+ u32 pu_mb_R;
+ u32 spu_mb_W;
+ u32 mb_stat_R;
+ u32 spu_runcntl_RW;
+ u32 spu_status_R;
+ u32 spu_spc_R;
+ u32 spu_npc_RW;
+ u32 signal_notify1;
+ u32 signal_notify2;
+ u32 unused_pad1;
+};
+
+/*
+ * struct spu_priv1_collapsed - condensed privileged 1 area, w/o pads.
+ */
+struct spu_priv1_collapsed {
+ u64 mfc_sr1_RW;
+ u64 mfc_lpid_RW;
+ u64 spu_idr_RW;
+ u64 mfc_vr_RO;
+ u64 spu_vr_RO;
+ u64 int_mask_class0_RW;
+ u64 int_mask_class1_RW;
+ u64 int_mask_class2_RW;
+ u64 int_stat_class0_RW;
+ u64 int_stat_class1_RW;
+ u64 int_stat_class2_RW;
+ u64 int_route_RW;
+ u64 mfc_atomic_flush_RW;
+ u64 resource_allocation_groupID_RW;
+ u64 resource_allocation_enable_RW;
+ u64 mfc_fir_R;
+ u64 mfc_fir_status_or_W;
+ u64 mfc_fir_status_and_W;
+ u64 mfc_fir_mask_R;
+ u64 mfc_fir_mask_or_W;
+ u64 mfc_fir_mask_and_W;
+ u64 mfc_fir_chkstp_enable_RW;
+ u64 smf_sbi_signal_sel;
+ u64 smf_ato_signal_sel;
+ u64 mfc_sdr_RW;
+ u64 tlb_index_hint_RO;
+ u64 tlb_index_W;
+ u64 tlb_vpn_RW;
+ u64 tlb_rpn_RW;
+ u64 tlb_invalidate_entry_W;
+ u64 tlb_invalidate_all_W;
+ u64 smm_hid;
+ u64 mfc_accr_RW;
+ u64 mfc_dsisr_RW;
+ u64 mfc_dar_RW;
+ u64 rmt_index_RW;
+ u64 rmt_data1_RW;
+ u64 mfc_dsir_R;
+ u64 mfc_lsacr_RW;
+ u64 mfc_lscrr_R;
+ u64 mfc_tclass_id_RW;
+ u64 mfc_rm_boundary;
+ u64 smf_dma_signal_sel;
+ u64 smm_signal_sel;
+ u64 mfc_cer_R;
+ u64 pu_ecc_cntl_RW;
+ u64 pu_ecc_stat_RW;
+ u64 spu_ecc_addr_RW;
+ u64 spu_err_mask_RW;
+ u64 spu_trig0_sel;
+ u64 spu_trig1_sel;
+ u64 spu_trig2_sel;
+ u64 spu_trig3_sel;
+ u64 spu_trace_sel;
+ u64 spu_event0_sel;
+ u64 spu_event1_sel;
+ u64 spu_event2_sel;
+ u64 spu_event3_sel;
+ u64 spu_trace_cntl;
+};
+
+/*
+ * struct spu_priv2_collapsed - condensed priviliged 2 area, w/o pads.
+ */
+struct spu_priv2_collapsed {
+ u64 slb_index_W;
+ u64 slb_esid_RW;
+ u64 slb_vsid_RW;
+ u64 slb_invalidate_entry_W;
+ u64 slb_invalidate_all_W;
+ struct mfc_cq_sr spuq[16];
+ struct mfc_cq_sr puq[8];
+ u64 mfc_control_RW;
+ u64 puint_mb_R;
+ u64 spu_privcntl_RW;
+ u64 spu_lslr_RW;
+ u64 spu_chnlcntptr_RW;
+ u64 spu_chnlcnt_RW;
+ u64 spu_chnldata_RW;
+ u64 spu_cfg_RW;
+ u64 spu_tag_status_query_RW;
+ u64 spu_cmd_buf1_RW;
+ u64 spu_cmd_buf2_RW;
+ u64 spu_atomic_status_RW;
+};
+
+/**
+ * struct spu_state
+ * @lscsa: Local Store Context Save Area.
+ * @prob: Collapsed Problem State Area, w/o pads.
+ * @priv1: Collapsed Privileged 1 Area, w/o pads.
+ * @priv2: Collapsed Privileged 2 Area, w/o pads.
+ * @spu_chnlcnt_RW: Array of saved channel counts.
+ * @spu_chnldata_RW: Array of saved channel data.
+ * @suspend_time: Time stamp when decrementer disabled.
+ * @slb_esid_RW: Array of saved SLB esid entries.
+ * @slb_vsid_RW: Array of saved SLB vsid entries.
+ *
+ * Structure representing the whole of the SPU
+ * context save area (CSA). This struct contains
+ * all of the state necessary to suspend and then
+ * later optionally resume execution of an SPU
+ * context.
+ *
+ * The @lscsa region is by far the largest, and is
+ * allocated separately so that it may either be
+ * pinned or mapped to/from application memory, as
+ * appropriate for the OS environment.
+ */
+struct spu_state {
+ struct spu_lscsa *lscsa;
+ struct spu_problem_collapsed prob;
+ struct spu_priv1_collapsed priv1;
+ struct spu_priv2_collapsed priv2;
+ u64 spu_chnlcnt_RW[32];
+ u64 spu_chnldata_RW[32];
+ u32 spu_mailbox_data[4];
+ u32 pu_mailbox_data[1];
+ unsigned long suspend_time;
+ u64 slb_esid_RW[8];
+ u64 slb_vsid_RW[8];
+ spinlock_t register_lock;
+};
+
+extern void spu_init_csa(struct spu_state *csa);
+extern void spu_fini_csa(struct spu_state *csa);
+extern int spu_save(struct spu_state *prev, struct spu *spu);
+extern int spu_restore(struct spu_state *new, struct spu *spu);
+extern int spu_switch(struct spu_state *prev, struct spu_state *new,
+ struct spu *spu);
+
+#endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
+#endif /* _SPU_CSA_H_ */
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
index 4660c0394a7..c90d9d9aae7 100644
--- a/include/asm-powerpc/synch.h
+++ b/include/asm-powerpc/synch.h
@@ -1,7 +1,8 @@
#ifndef _ASM_POWERPC_SYNCH_H
#define _ASM_POWERPC_SYNCH_H
+#ifdef __KERNEL__
-#include <linux/config.h>
+#include <linux/stringify.h>
#ifdef __powerpc64__
#define __SUBARCH_HAS_LWSYNC
@@ -13,20 +14,12 @@
# define LWSYNC sync
#endif
-
-/*
- * Arguably the bitops and *xchg operations don't imply any memory barrier
- * or SMP ordering, but in fact a lot of drivers expect them to imply
- * both, since they do on x86 cpus.
- */
#ifdef CONFIG_SMP
-#define EIEIO_ON_SMP "eieio\n"
#define ISYNC_ON_SMP "\n\tisync"
-#define SYNC_ON_SMP __stringify(LWSYNC) "\n"
+#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n"
#else
-#define EIEIO_ON_SMP
#define ISYNC_ON_SMP
-#define SYNC_ON_SMP
+#define LWSYNC_ON_SMP
#endif
static inline void eieio(void)
@@ -39,13 +32,5 @@ static inline void isync(void)
__asm__ __volatile__ ("isync" : : : "memory");
}
-#ifdef CONFIG_SMP
-#define eieio_on_smp() eieio()
-#define isync_on_smp() isync()
-#else
-#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
-#define isync_on_smp() __asm__ __volatile__("": : :"memory")
-#endif
-
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYNCH_H */
-
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 5341b75c75c..d9bf53653b1 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -4,7 +4,6 @@
#ifndef _ASM_POWERPC_SYSTEM_H
#define _ASM_POWERPC_SYSTEM_H
-#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/hw_irq.h>
@@ -42,6 +41,7 @@
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#ifdef __KERNEL__
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
@@ -54,7 +54,6 @@
#define smp_read_barrier_depends() do { } while(0)
#endif /* CONFIG_SMP */
-#ifdef __KERNEL__
struct task_struct;
struct pt_regs;
@@ -134,6 +133,14 @@ extern int fix_alignment(struct pt_regs *);
extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
extern void cvt_df(double *from, float *to, struct thread_struct *thread);
+#ifndef CONFIG_SMP
+extern void discard_lazy_cpu_state(void);
+#else
+static inline void discard_lazy_cpu_state(void)
+{
+}
+#endif
+
#ifdef CONFIG_ALTIVEC
extern void flush_altivec_to_thread(struct task_struct *);
#else
@@ -176,6 +183,16 @@ struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
extern unsigned int rtas_data;
extern int mem_init_done; /* set on boot once kmalloc can be called */
extern unsigned long memory_limit;
@@ -195,7 +212,7 @@ __xchg_u32(volatile void *p, unsigned long val)
unsigned long prev;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\
@@ -215,7 +232,7 @@ __xchg_u64(volatile void *p, unsigned long val)
unsigned long prev;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\
@@ -270,7 +287,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
unsigned int prev;
__asm__ __volatile__ (
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
@@ -294,7 +311,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
unsigned long prev;
__asm__ __volatile__ (
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
cmpd 0,%0,%3\n\
bne- 2f\n\
diff --git a/include/asm-powerpc/tce.h b/include/asm-powerpc/tce.h
index 980a094fd5a..6fa200ad7a7 100644
--- a/include/asm-powerpc/tce.h
+++ b/include/asm-powerpc/tce.h
@@ -20,6 +20,7 @@
#ifndef _ASM_POWERPC_TCE_H
#define _ASM_POWERPC_TCE_H
+#ifdef __KERNEL__
/*
* Tces come in two formats, one for the virtual bus and a different
@@ -61,4 +62,5 @@ union tce_entry {
};
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TCE_H */
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index e525f49bd17..7e09d7cda93 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -37,8 +37,7 @@ struct thread_info {
int preempt_count; /* 0 => preemptable,
<0 => BUG */
struct restart_block restart_block;
- /* set by force_successful_syscall_return */
- unsigned char syscall_noerror;
+ void *nvgprs_frame;
/* low level flags - has atomic operations done on it */
unsigned long flags ____cacheline_aligned_in_smp;
};
@@ -90,9 +89,6 @@ struct thread_info {
#endif /* THREAD_SHIFT < PAGE_SHIFT */
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
@@ -123,6 +119,9 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SINGLESTEP 9 /* singlestepping active */
#define TIF_MEMDIE 10
#define TIF_SECCOMP 11 /* secure computing */
+#define TIF_RESTOREALL 12 /* Restore all regs (implies NOERROR) */
+#define TIF_SAVE_NVGPRS 13 /* Save r14-r31 in signal frame */
+#define TIF_NOERROR 14 /* Force successful syscall return */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -136,10 +135,14 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
+#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
+#define _TIF_SAVE_NVGPRS (1<<TIF_SAVE_NVGPRS)
+#define _TIF_NOERROR (1<<TIF_NOERROR)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
#define _TIF_USER_WORK_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
- _TIF_NEED_RESCHED)
+ _TIF_NEED_RESCHED | _TIF_RESTOREALL)
+#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR|_TIF_SAVE_NVGPRS)
#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index d9b86a17271..baddc9ab57a 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -175,11 +175,10 @@ static inline void set_dec(int val)
set_dec_cpu6(val);
#else
#ifdef CONFIG_PPC_ISERIES
- struct paca_struct *lpaca = get_paca();
int cur_dec;
- if (lpaca->lppaca.shared_proc) {
- lpaca->lppaca.virtual_decr = val;
+ if (get_lppaca()->shared_proc) {
+ get_lppaca()->virtual_decr = val;
cur_dec = get_dec();
if (cur_dec > val)
HvCall_setVirtualDecr();
diff --git a/include/asm-powerpc/tlb.h b/include/asm-powerpc/tlb.h
index 56659f12177..601a53cf96d 100644
--- a/include/asm-powerpc/tlb.h
+++ b/include/asm-powerpc/tlb.h
@@ -11,6 +11,7 @@
*/
#ifndef _ASM_POWERPC_TLB_H
#define _ASM_POWERPC_TLB_H
+#ifdef __KERNEL__
#include <linux/config.h>
#ifndef __powerpc64__
@@ -67,4 +68,5 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
}
#endif
+#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_TLB_H */
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index db8095cbe09..1e19cd00af2 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -1,5 +1,6 @@
#ifndef _ASM_POWERPC_TOPOLOGY_H
#define _ASM_POWERPC_TOPOLOGY_H
+#ifdef __KERNEL__
#include <linux/config.h>
@@ -38,7 +39,6 @@ static inline int node_to_first_cpu(int node)
.max_interval = 32, \
.busy_factor = 32, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.busy_idx = 3, \
@@ -55,10 +55,15 @@ static inline int node_to_first_cpu(int node)
.nr_balance_failed = 0, \
}
+extern void __init dump_numa_cpu_topology(void);
+
#else
+static inline void dump_numa_cpu_topology(void) {}
+
#include <asm-generic/topology.h>
#endif /* CONFIG_NUMA */
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TOPOLOGY_H */
diff --git a/include/asm-powerpc/udbg.h b/include/asm-powerpc/udbg.h
index a383383bc4d..5c4236c342b 100644
--- a/include/asm-powerpc/udbg.h
+++ b/include/asm-powerpc/udbg.h
@@ -1,5 +1,5 @@
/*
- * c 2001 PPC 64 Team, IBM Corp
+ * (c) 2001, 2006 IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -9,12 +9,13 @@
#ifndef _ASM_POWERPC_UDBG_H
#define _ASM_POWERPC_UDBG_H
+#ifdef __KERNEL__
#include <linux/compiler.h>
#include <linux/init.h>
-extern void (*udbg_putc)(unsigned char c);
-extern unsigned char (*udbg_getc)(void);
+extern void (*udbg_putc)(char c);
+extern int (*udbg_getc)(void);
extern int (*udbg_getc_poll)(void);
extern void udbg_puts(const char *s);
@@ -23,9 +24,24 @@ extern int udbg_read(char *buf, int buflen);
extern void register_early_udbg_console(void);
extern void udbg_printf(const char *fmt, ...);
+extern void udbg_progress(char *s, unsigned short hex);
-extern void udbg_init_uart(void __iomem *comport, unsigned int speed);
+extern void udbg_init_uart(void __iomem *comport, unsigned int speed,
+ unsigned int clock);
+extern unsigned int udbg_probe_uart_speed(void __iomem *comport,
+ unsigned int clock);
struct device_node;
-extern void udbg_init_scc(struct device_node *np);
+extern void udbg_scc_init(int force_scc);
+extern int udbg_adb_init(int force_btext);
+extern void udbg_adb_init_early(void);
+
+extern void __init udbg_early_init(void);
+extern void __init udbg_init_debug_lpar(void);
+extern void __init udbg_init_pmac_realmode(void);
+extern void __init udbg_init_maple_realmode(void);
+extern void __init udbg_init_iseries(void);
+extern void __init udbg_init_rtas(void);
+
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index 0991dfceef1..19eaac3fbbf 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -296,8 +296,10 @@
#define __NR_inotify_init 275
#define __NR_inotify_add_watch 276
#define __NR_inotify_rm_watch 277
+#define __NR_spu_run 278
+#define __NR_spu_create 279
-#define __NR_syscalls 278
+#define __NR_syscalls 280
#ifdef __KERNEL__
#define __NR__exit __NR_exit
diff --git a/include/asm-powerpc/vdso_datapage.h b/include/asm-powerpc/vdso_datapage.h
index 411832d5bbd..7aa92086c3f 100644
--- a/include/asm-powerpc/vdso_datapage.h
+++ b/include/asm-powerpc/vdso_datapage.h
@@ -1,5 +1,6 @@
#ifndef _VDSO_DATAPAGE_H
#define _VDSO_DATAPAGE_H
+#ifdef __KERNEL__
/*
* Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM
@@ -105,4 +106,5 @@ extern struct vdso_data *vdso_data;
#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
#endif /* _SYSTEMCFG_H */
diff --git a/include/asm-powerpc/vio.h b/include/asm-powerpc/vio.h
index e0ccf108277..0544ece5176 100644
--- a/include/asm-powerpc/vio.h
+++ b/include/asm-powerpc/vio.h
@@ -13,6 +13,7 @@
#ifndef _ASM_POWERPC_VIO_H
#define _ASM_POWERPC_VIO_H
+#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/init.h>
@@ -103,4 +104,5 @@ static inline struct vio_dev *to_vio_dev(struct device *dev)
return container_of(dev, struct vio_dev, dev);
}
+#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_VIO_H */
diff --git a/include/asm-ppc/bseip.h b/include/asm-ppc/bseip.h
deleted file mode 100644
index 691f4a52b0a..00000000000
--- a/include/asm-ppc/bseip.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * A collection of structures, addresses, and values associated with
- * the Bright Star Engineering ip-Engine board. Copied from the MBX stuff.
- *
- * Copyright (c) 1998 Dan Malek (dmalek@jlc.net)
- */
-#ifndef __MACH_BSEIP_DEFS
-#define __MACH_BSEIP_DEFS
-
-#ifndef __ASSEMBLY__
-/* A Board Information structure that is given to a program when
- * prom starts it up.
- */
-typedef struct bd_info {
- unsigned int bi_memstart; /* Memory start address */
- unsigned int bi_memsize; /* Memory (end) size in bytes */
- unsigned int bi_intfreq; /* Internal Freq, in Hz */
- unsigned int bi_busfreq; /* Bus Freq, in Hz */
- unsigned char bi_enetaddr[6];
- unsigned int bi_baudrate;
-} bd_t;
-
-extern bd_t m8xx_board_info;
-
-/* Memory map is configured by the PROM startup.
- * All we need to get started is the IMMR.
- */
-#define IMAP_ADDR ((uint)0xff000000)
-#define IMAP_SIZE ((uint)(64 * 1024))
-#define PCMCIA_MEM_ADDR ((uint)0x04000000)
-#define PCMCIA_MEM_SIZE ((uint)(64 * 1024))
-#endif /* !__ASSEMBLY__ */
-
-/* We don't use the 8259.
-*/
-#define NR_8259_INTS 0
-
-#endif
diff --git a/include/asm-ppc/btext.h b/include/asm-ppc/btext.h
index ccaefabe0bf..ed3630251b3 100644
--- a/include/asm-ppc/btext.h
+++ b/include/asm-ppc/btext.h
@@ -17,7 +17,7 @@ extern unsigned long disp_BAT[2];
extern boot_infos_t disp_bi;
extern int boot_text_mapped;
-extern void init_boot_display(void);
+extern void btext_init(boot_infos_t *bi);
extern void btext_welcome(void);
extern void btext_prepare_BAT(void);
extern void btext_setup_display(int width, int height, int depth, int pitch,
diff --git a/include/asm-ppc/ibm_ocp.h b/include/asm-ppc/ibm_ocp.h
index 9c21de1ff4e..ddce616f765 100644
--- a/include/asm-ppc/ibm_ocp.h
+++ b/include/asm-ppc/ibm_ocp.h
@@ -63,7 +63,6 @@ struct ocp_func_emac_data {
int wol_irq; /* WOL interrupt */
int mdio_idx; /* EMAC idx of MDIO master or -1 */
int tah_idx; /* TAH device index or -1 */
- int jumbo; /* Jumbo frames capable flag */
int phy_mode; /* PHY type or configurable mode */
u8 mac_addr[6]; /* EMAC mac address */
u32 phy_map; /* EMAC phy map */
diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h
index 84ac6e258ee..df9cf6ed189 100644
--- a/include/asm-ppc/io.h
+++ b/include/asm-ppc/io.h
@@ -27,6 +27,8 @@
#if defined(CONFIG_4xx)
#include <asm/ibm4xx.h>
+#elif defined(CONFIG_PPC_MPC52xx)
+#include <asm/mpc52xx.h>
#elif defined(CONFIG_8xx)
#include <asm/mpc8xx.h>
#elif defined(CONFIG_8260)
diff --git a/include/asm-ppc/machdep.h b/include/asm-ppc/machdep.h
index f01255bd1dc..39200def8d1 100644
--- a/include/asm-ppc/machdep.h
+++ b/include/asm-ppc/machdep.h
@@ -35,8 +35,10 @@ struct machdep_calls {
int (*get_irq)(struct pt_regs *);
/* A general init function, called by ppc_init in init/main.c.
- May be NULL. */
+ May be NULL. DEPRECATED ! */
void (*init)(void);
+ /* For compatibility with merged platforms */
+ void (*init_early)(void);
void (*restart)(char *cmd);
void (*power_off)(void);
diff --git a/include/asm-ppc/mpc52xx.h b/include/asm-ppc/mpc52xx.h
index e5f80c22fbf..a055e0756b9 100644
--- a/include/asm-ppc/mpc52xx.h
+++ b/include/asm-ppc/mpc52xx.h
@@ -29,6 +29,17 @@ struct pt_regs;
#endif /* __ASSEMBLY__ */
+#ifdef CONFIG_PCI
+#define _IO_BASE isa_io_base
+#define _ISA_MEM_BASE isa_mem_base
+#define PCI_DRAM_OFFSET pci_dram_offset
+#else
+#define _IO_BASE 0
+#define _ISA_MEM_BASE 0
+#define PCI_DRAM_OFFSET 0
+#endif
+
+
/* ======================================================================== */
/* PPC Sys devices definition */
/* ======================================================================== */
@@ -107,7 +118,7 @@ enum ppc_sys_devices {
#define MPC52xx_SDMA_IRQ_NUM 17
#define MPC52xx_PERP_IRQ_NUM 23
-#define MPC52xx_CRIT_IRQ_BASE 0
+#define MPC52xx_CRIT_IRQ_BASE 1
#define MPC52xx_MAIN_IRQ_BASE (MPC52xx_CRIT_IRQ_BASE + MPC52xx_CRIT_IRQ_NUM)
#define MPC52xx_SDMA_IRQ_BASE (MPC52xx_MAIN_IRQ_BASE + MPC52xx_MAIN_IRQ_NUM)
#define MPC52xx_PERP_IRQ_BASE (MPC52xx_SDMA_IRQ_BASE + MPC52xx_SDMA_IRQ_NUM)
diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h
index 9d14baea3d7..c8a96aa44fb 100644
--- a/include/asm-ppc/mpc85xx.h
+++ b/include/asm-ppc/mpc85xx.h
@@ -37,6 +37,10 @@
#ifdef CONFIG_STX_GP3
#include <platforms/85xx/stx_gp3.h>
#endif
+#if defined(CONFIG_TQM8540) || defined(CONFIG_TQM8541) || \
+ defined(CONFIG_TQM8555) || defined(CONFIG_TQM8560)
+#include <platforms/85xx/tqm85xx.h>
+#endif
#define _IO_BASE isa_io_base
#define _ISA_MEM_BASE isa_mem_base
diff --git a/include/asm-ppc/pci-bridge.h b/include/asm-ppc/pci-bridge.h
index e58c78f90a5..9d5230689b3 100644
--- a/include/asm-ppc/pci-bridge.h
+++ b/include/asm-ppc/pci-bridge.h
@@ -137,5 +137,14 @@ static inline unsigned char bridge_swizzle(unsigned char pin,
*/
extern int pciauto_bus_scan(struct pci_controller *, int);
+#ifdef CONFIG_PCI
+extern unsigned long pci_address_to_pio(phys_addr_t address);
+#else
+static inline unsigned long pci_address_to_pio(phys_addr_t address)
+{
+ return (unsigned long)-1;
+}
+#endif
+
#endif
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/prom.h b/include/asm-ppc/prom.h
index 3e39827ed56..eb317a0806e 100644
--- a/include/asm-ppc/prom.h
+++ b/include/asm-ppc/prom.h
@@ -136,5 +136,37 @@ extern unsigned long sub_reloc_offset(unsigned long);
#define PTRRELOC(x) ((typeof(x))add_reloc_offset((unsigned long)(x)))
#define PTRUNRELOC(x) ((typeof(x))sub_reloc_offset((unsigned long)(x)))
+
+/*
+ * OF address retreival & translation
+ */
+
+
+/* Translate an OF address block into a CPU physical address
+ */
+#define OF_BAD_ADDR ((u64)-1)
+extern u64 of_translate_address(struct device_node *np, u32 *addr);
+
+/* Extract an address from a device, returns the region size and
+ * the address space flags too. The PCI version uses a BAR number
+ * instead of an absolute index
+ */
+extern u32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags);
+extern u32 *of_get_pci_address(struct device_node *dev, int bar_no,
+ u64 *size, unsigned int *flags);
+
+/* Get an address as a resource. Note that if your address is
+ * a PIO address, the conversion will fail if the physical address
+ * can't be internally converted to an IO token with
+ * pci_address_to_pio(), that is because it's either called to early
+ * or it can't be matched to any host bridge IO space
+ */
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+extern int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r);
+
+
#endif /* _PPC_PROM_H */
#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index bd99cb53a19..fb49c0c49ea 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -4,7 +4,6 @@
#ifndef __PPC_SYSTEM_H
#define __PPC_SYSTEM_H
-#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/atomic.h>
@@ -39,7 +38,7 @@
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
-#define smp_wmb() wmb()
+#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
@@ -74,6 +73,7 @@ extern void chrp_nvram_init(void);
extern void read_rtc_time(void);
extern void pmac_find_display(void);
extern void giveup_fpu(struct task_struct *);
+extern void disable_kernel_fp(void);
extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void enable_kernel_altivec(void);
@@ -86,6 +86,14 @@ extern int fix_alignment(struct pt_regs *);
extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
extern void cvt_df(double *from, float *to, struct thread_struct *thread);
+#ifndef CONFIG_SMP
+extern void discard_lazy_cpu_state(void);
+#else
+static inline void discard_lazy_cpu_state(void)
+{
+}
+#endif
+
#ifdef CONFIG_ALTIVEC
extern void flush_altivec_to_thread(struct task_struct *);
#else
@@ -123,6 +131,16 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index b3bd4f679f7..be6fefe223d 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -5,7 +5,7 @@
* include/asm-s390/atomic.h
*
* S390 version
- * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow,
* Arnd Bergmann (arndb@de.ibm.com)
@@ -45,59 +45,59 @@ typedef struct {
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
-static __inline__ void atomic_add(int i, atomic_t * v)
-{
- __CS_LOOP(v, i, "ar");
-}
static __inline__ int atomic_add_return(int i, atomic_t * v)
{
return __CS_LOOP(v, i, "ar");
}
-static __inline__ int atomic_add_negative(int i, atomic_t * v)
-{
- return __CS_LOOP(v, i, "ar") < 0;
-}
-static __inline__ void atomic_sub(int i, atomic_t * v)
-{
- __CS_LOOP(v, i, "sr");
-}
+#define atomic_add(_i, _v) atomic_add_return(_i, _v)
+#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
+#define atomic_inc(_v) atomic_add_return(1, _v)
+#define atomic_inc_return(_v) atomic_add_return(1, _v)
+#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
+
static __inline__ int atomic_sub_return(int i, atomic_t * v)
{
return __CS_LOOP(v, i, "sr");
}
-static __inline__ void atomic_inc(volatile atomic_t * v)
-{
- __CS_LOOP(v, 1, "ar");
-}
-static __inline__ int atomic_inc_return(volatile atomic_t * v)
-{
- return __CS_LOOP(v, 1, "ar");
-}
+#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
+#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
+#define atomic_dec(_v) atomic_sub_return(1, _v)
+#define atomic_dec_return(_v) atomic_sub_return(1, _v)
+#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-static __inline__ int atomic_inc_and_test(volatile atomic_t * v)
-{
- return __CS_LOOP(v, 1, "ar") == 0;
-}
-static __inline__ void atomic_dec(volatile atomic_t * v)
-{
- __CS_LOOP(v, 1, "sr");
-}
-static __inline__ int atomic_dec_return(volatile atomic_t * v)
-{
- return __CS_LOOP(v, 1, "sr");
-}
-static __inline__ int atomic_dec_and_test(volatile atomic_t * v)
-{
- return __CS_LOOP(v, 1, "sr") == 0;
-}
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
{
__CS_LOOP(v, ~mask, "nr");
}
+
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
{
__CS_LOOP(v, mask, "or");
}
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ __asm__ __volatile__(" cs %0,%3,0(%2)\n"
+ : "+d" (old), "=m" (v->counter)
+ : "a" (v), "d" (new), "m" (v->counter)
+ : "cc", "memory" );
+ return old;
+}
+
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+
+ c = atomic_read(v);
+ while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
+ c = old;
+ return c != u;
+}
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
#undef __CS_LOOP
#ifdef __s390x__
@@ -123,97 +123,67 @@ typedef struct {
#define atomic64_read(v) ((v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i))
-static __inline__ void atomic64_add(long long i, atomic64_t * v)
-{
- __CSG_LOOP(v, i, "agr");
-}
static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
{
return __CSG_LOOP(v, i, "agr");
}
-static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v)
-{
- return __CSG_LOOP(v, i, "agr") < 0;
-}
-static __inline__ void atomic64_sub(long long i, atomic64_t * v)
-{
- __CSG_LOOP(v, i, "sgr");
-}
-static __inline__ void atomic64_inc(volatile atomic64_t * v)
-{
- __CSG_LOOP(v, 1, "agr");
-}
-static __inline__ long long atomic64_inc_return(volatile atomic64_t * v)
-{
- return __CSG_LOOP(v, 1, "agr");
-}
-static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v)
-{
- return __CSG_LOOP(v, 1, "agr") == 0;
-}
-static __inline__ void atomic64_dec(volatile atomic64_t * v)
-{
- __CSG_LOOP(v, 1, "sgr");
-}
-static __inline__ long long atomic64_dec_return(volatile atomic64_t * v)
-{
- return __CSG_LOOP(v, 1, "sgr");
-}
-static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v)
+#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
+#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
+#define atomic64_inc(_v) atomic64_add_return(1, _v)
+#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
+#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
+
+static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
{
- return __CSG_LOOP(v, 1, "sgr") == 0;
+ return __CSG_LOOP(v, i, "sgr");
}
+#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
+#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
+#define atomic64_dec(_v) atomic64_sub_return(1, _v)
+#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
+#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
+
static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
{
__CSG_LOOP(v, ~mask, "ngr");
}
+
static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
{
__CSG_LOOP(v, mask, "ogr");
}
-#undef __CSG_LOOP
-#endif
-
-/*
- returns 0 if expected_oldval==value in *v ( swap was successful )
- returns 1 if unsuccessful.
+static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
+ long long old, long long new)
+{
+ __asm__ __volatile__(" csg %0,%3,0(%2)\n"
+ : "+d" (old), "=m" (v->counter)
+ : "a" (v), "d" (new), "m" (v->counter)
+ : "cc", "memory" );
+ return old;
+}
- This is non-portable, use bitops or spinlocks instead!
-*/
-static __inline__ int
-atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
+static __inline__ int atomic64_add_unless(atomic64_t *v,
+ long long a, long long u)
{
- int retval;
-
- __asm__ __volatile__(
- " lr %0,%3\n"
- " cs %0,%4,0(%2)\n"
- " ipm %0\n"
- " srl %0,28\n"
- "0:"
- : "=&d" (retval), "=m" (v->counter)
- : "a" (v), "d" (expected_oldval) , "d" (new_val),
- "m" (v->counter) : "cc", "memory" );
- return retval;
+ long long c, old;
+
+ c = atomic64_read(v);
+ while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c)
+ c = old;
+ return c != u;
}
-#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter)))
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#undef __CSG_LOOP
+#endif
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
+#include <asm-generic/atomic.h>
#endif /* __KERNEL__ */
#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/include/asm-s390/cache.h b/include/asm-s390/cache.h
index 29845378b20..e20cdd9074d 100644
--- a/include/asm-s390/cache.h
+++ b/include/asm-s390/cache.h
@@ -13,7 +13,6 @@
#define L1_CACHE_BYTES 256
#define L1_CACHE_SHIFT 8
-#define L1_CACHE_SHIFT_MAX 8 /* largest L1 which this arch supports */
#define ARCH_KMALLOC_MINALIGN 8
diff --git a/include/asm-s390/ccwdev.h b/include/asm-s390/ccwdev.h
index 3eb231af5d5..12456cb2f88 100644
--- a/include/asm-s390/ccwdev.h
+++ b/include/asm-s390/ccwdev.h
@@ -185,8 +185,5 @@ extern struct ccw_device *ccw_device_probe_console(void);
extern int _ccw_device_get_device_number(struct ccw_device *);
extern int _ccw_device_get_subchannel_number(struct ccw_device *);
-extern struct device *s390_root_dev_register(const char *);
-extern void s390_root_dev_unregister(struct device *);
-
extern void *ccw_device_get_chp_desc(struct ccw_device *, int);
#endif /* _S390_CCWDEV_H_ */
diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h
index 372d51cccd5..710646e64f7 100644
--- a/include/asm-s390/elf.h
+++ b/include/asm-s390/elf.h
@@ -163,7 +163,7 @@ static inline int dump_regs(struct pt_regs *ptregs, elf_gregset_t *regs)
static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
- struct pt_regs *ptregs = __KSTK_PTREGS(tsk);
+ struct pt_regs *ptregs = task_pt_regs(tsk);
memcpy(&regs->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs));
memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
regs->orig_gpr2 = ptregs->orig_gpr2;
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-s390/futex.h
+++ b/include/asm-s390/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-s390/ioctl.h b/include/asm-s390/ioctl.h
index df7394345ac..b279fe06dfe 100644
--- a/include/asm-s390/ioctl.h
+++ b/include/asm-s390/ioctl.h
@@ -1,88 +1 @@
-/*
- * include/asm-s390/ioctl.h
- *
- * S390 version
- *
- * Derived from "include/asm-i386/ioctl.h"
- */
-
-#ifndef _S390_IOCTL_H
-#define _S390_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* provoke compile error for invalid uses of size argument */
-extern unsigned long __invalid_size_argument_for_IOC;
-#define _IOC_TYPECHECK(t) \
- ((sizeof(t) == sizeof(t[1]) && \
- sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
- sizeof(t) : __invalid_size_argument_for_IOC)
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
-#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _S390_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-s390/kexec.h b/include/asm-s390/kexec.h
index 54cf7d9f251..ce28ddda0f5 100644
--- a/include/asm-s390/kexec.h
+++ b/include/asm-s390/kexec.h
@@ -35,8 +35,9 @@
#define KEXEC_ARCH KEXEC_ARCH_S390
#define MAX_NOTE_BYTES 1024
-typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
-extern note_buf_t crash_notes[];
+/* Provide a dummy definition to avoid build failures. */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs) { }
#endif /*_S390_KEXEC_H */
diff --git a/include/asm-s390/mman.h b/include/asm-s390/mman.h
index ea86bd12204..c8d5409b5d5 100644
--- a/include/asm-s390/mman.h
+++ b/include/asm-s390/mman.h
@@ -43,6 +43,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-s390/mutex.h b/include/asm-s390/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-s390/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 4ec652ebb3b..c5cbc4bd841 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -191,10 +191,10 @@ extern void show_registers(struct pt_regs *regs);
extern void show_trace(struct task_struct *task, unsigned long *sp);
unsigned long get_wchan(struct task_struct *p);
-#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \
- ((unsigned long) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs)))
-#define KSTK_EIP(tsk) (__KSTK_PTREGS(tsk)->psw.addr)
-#define KSTK_ESP(tsk) (__KSTK_PTREGS(tsk)->gprs[15])
+#define task_pt_regs(tsk) ((struct pt_regs *) \
+ (task_stack_page(tsk) + THREAD_SIZE) - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
/*
* Give up the time slice of the virtual PU.
diff --git a/include/asm-s390/qdio.h b/include/asm-s390/qdio.h
index 0ddf0a8ef8d..7bc15f0231d 100644
--- a/include/asm-s390/qdio.h
+++ b/include/asm-s390/qdio.h
@@ -195,12 +195,14 @@ struct qdr {
/*
* queue information block (QIB)
*/
-#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80
-#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
+#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80
+#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
+#define QIB_RFLAGS_ENABLE_QEBSM 0x80
+
struct qib {
unsigned int qfmt : 8; /* queue format */
unsigned int pfmt : 8; /* impl. dep. parameter format */
- unsigned int res1 : 8; /* reserved */
+ unsigned int rflags : 8; /* QEBSM */
unsigned int ac : 8; /* adapter characteristics */
unsigned int res2; /* reserved */
#ifdef QDIO_32_BIT
diff --git a/include/asm-s390/s390_rdev.h b/include/asm-s390/s390_rdev.h
new file mode 100644
index 00000000000..3ad78f2b9c4
--- /dev/null
+++ b/include/asm-s390/s390_rdev.h
@@ -0,0 +1,15 @@
+/*
+ * include/asm-s390/ccwdev.h
+ *
+ * Copyright (C) 2002,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Cornelia Huck <cohuck@de.ibm.com>
+ * Carsten Otte <cotte@de.ibm.com>
+ *
+ * Interface for s390 root device
+ */
+
+#ifndef _S390_RDEV_H_
+#define _S390_RDEV_H_
+extern struct device *s390_root_dev_register(const char *);
+extern void s390_root_dev_unregister(struct device *);
+#endif /* _S390_RDEV_H_ */
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 864cae7e1fd..c7c3a9ad593 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -104,6 +104,16 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \
} while (0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_user_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *);
diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h
index 6c18a3f2431..f3797a52c4e 100644
--- a/include/asm-s390/thread_info.h
+++ b/include/asm-s390/thread_info.h
@@ -81,8 +81,6 @@ static inline struct thread_info *current_thread_info(void)
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL,THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 10a619da476..be104f21c70 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -61,8 +61,10 @@
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
-#define __access_ok(addr,size) (1)
-
+static inline int __access_ok(const void *addr, unsigned long size)
+{
+ return 1;
+}
#define access_ok(type,addr,size) __access_ok(addr,size)
/*
@@ -206,25 +208,25 @@ extern int __put_user_bad(void) __attribute__((noreturn));
case 1: { \
unsigned char __x; \
__get_user_asm(__x, ptr, __gu_err); \
- (x) = (__typeof__(*(ptr))) __x; \
+ (x) = *(__typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 2: { \
unsigned short __x; \
__get_user_asm(__x, ptr, __gu_err); \
- (x) = (__typeof__(*(ptr))) __x; \
+ (x) = *(__typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 4: { \
unsigned int __x; \
__get_user_asm(__x, ptr, __gu_err); \
- (x) = (__typeof__(*(ptr))) __x; \
+ (x) = *(__typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 8: { \
unsigned long long __x; \
__get_user_asm(__x, ptr, __gu_err); \
- (x) = (__typeof__(*(ptr))) __x; \
+ (x) = *(__typeof__(*(ptr)) *) &__x; \
break; \
}; \
default: \
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index f97d92691f1..2861cdc243a 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -539,7 +539,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
-# ifdef CONFIG_ARCH_S390_31
+# ifndef CONFIG_64BIT
# define __ARCH_WANT_STAT64
# define __ARCH_WANT_SYS_TIME
# endif
diff --git a/include/asm-s390/vtoc.h b/include/asm-s390/vtoc.h
index 41d369f38b0..d1de5b7ebb0 100644
--- a/include/asm-s390/vtoc.h
+++ b/include/asm-s390/vtoc.h
@@ -176,4 +176,28 @@ struct vtoc_format7_label
struct vtoc_cchhb DS7PTRDS; /* pointer to next FMT7 DSCB */
} __attribute__ ((packed));
+struct vtoc_cms_label {
+ u8 label_id[4]; /* Label identifier */
+ u8 vol_id[6]; /* Volid */
+ u16 version_id; /* Version identifier */
+ u32 block_size; /* Disk block size */
+ u32 origin_ptr; /* Disk origin pointer */
+ u32 usable_count; /* Number of usable cylinders/blocks */
+ u32 formatted_count; /* Maximum number of formatted cylinders/
+ * blocks */
+ u32 block_count; /* Disk size in CMS blocks */
+ u32 used_count; /* Number of CMS blocks in use */
+ u32 fst_size; /* File Status Table (FST) size */
+ u32 fst_count; /* Number of FSTs per CMS block */
+ u8 format_date[6]; /* Disk FORMAT date */
+ u8 reserved1[2];
+ u32 disk_offset; /* Disk offset when reserved*/
+ u32 map_block; /* Allocation Map Block with next hole */
+ u32 hblk_disp; /* Displacement into HBLK data of next hole */
+ u32 user_disp; /* Displacement into user part of Allocation
+ * map */
+ u8 reserved2[4];
+ u8 segment_name[8]; /* Name of shared segment */
+} __attribute__ ((packed));
+
#endif /* _ASM_S390_VTOC_H */
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h
index aabfd334462..fb627de217f 100644
--- a/include/asm-sh/atomic.h
+++ b/include/asm-sh/atomic.h
@@ -101,6 +101,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
@@ -140,4 +142,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif /* __ASM_SH_ATOMIC_H */
diff --git a/include/asm-sh/cache.h b/include/asm-sh/cache.h
index 9b4dd6d8212..656fdfe9e8b 100644
--- a/include/asm-sh/cache.h
+++ b/include/asm-sh/cache.h
@@ -22,8 +22,6 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
-#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */
-
struct cache_info {
unsigned int ways;
unsigned int sets;
diff --git a/include/asm-sh/futex.h b/include/asm-sh/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-sh/futex.h
+++ b/include/asm-sh/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-sh/ioctl.h b/include/asm-sh/ioctl.h
index 524700e84ac..b279fe06dfe 100644
--- a/include/asm-sh/ioctl.h
+++ b/include/asm-sh/ioctl.h
@@ -1,75 +1 @@
-/* $Id: ioctl.h,v 1.1.1.1 2001/10/15 20:45:09 mrbrown Exp $
- *
- * linux/ioctl.h for Linux by H.H. Bergman.
- */
-
-#ifndef __ASM_SH_IOCTL_H
-#define __ASM_SH_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* __ASM_SH_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-sh/mman.h b/include/asm-sh/mman.h
index 3ebab5f79db..693bd55a371 100644
--- a/include/asm-sh/mman.h
+++ b/include/asm-sh/mman.h
@@ -35,6 +35,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-sh/mutex.h b/include/asm-sh/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-sh/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sh/ptrace.h b/include/asm-sh/ptrace.h
index 0f75e16a741..792fc35bd62 100644
--- a/include/asm-sh/ptrace.h
+++ b/include/asm-sh/ptrace.h
@@ -91,6 +91,16 @@ struct pt_dspregs {
#define instruction_pointer(regs) ((regs)->pc)
extern void show_regs(struct pt_regs *);
+#ifdef CONFIG_SH_DSP
+#define task_pt_regs(task) \
+ ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
+ - sizeof(struct pt_dspregs) - sizeof(unsigned long)) - 1)
+#else
+#define task_pt_regs(task) \
+ ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
+ - sizeof(unsigned long)) - 1)
+#endif
+
static inline unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 28a3c2d8bcd..bb0330499bd 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -57,6 +57,16 @@
last = __last; \
} while (0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
#define nop() __asm__ __volatile__ ("nop")
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h
index 46080cefaff..85f0c11b431 100644
--- a/include/asm-sh/thread_info.h
+++ b/include/asm-sh/thread_info.h
@@ -60,8 +60,6 @@ static inline struct thread_info *current_thread_info(void)
#define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#else /* !__ASSEMBLY__ */
diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h
index 927a2bc27b3..28f2ea9b567 100644
--- a/include/asm-sh64/atomic.h
+++ b/include/asm-sh64/atomic.h
@@ -113,6 +113,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
@@ -152,4 +154,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif /* __ASM_SH64_ATOMIC_H */
diff --git a/include/asm-sh64/cache.h b/include/asm-sh64/cache.h
index f54e85e8a47..a4f36f0036e 100644
--- a/include/asm-sh64/cache.h
+++ b/include/asm-sh64/cache.h
@@ -20,8 +20,6 @@
#define L1_CACHE_ALIGN_MASK (~(L1_CACHE_BYTES - 1))
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES - 1)) & L1_CACHE_ALIGN_MASK)
#define L1_CACHE_SIZE_BYTES (L1_CACHE_BYTES << 10)
-/* Largest L1 which this arch supports */
-#define L1_CACHE_SHIFT_MAX 5
#ifdef MODULE
#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
diff --git a/include/asm-sh64/futex.h b/include/asm-sh64/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-sh64/futex.h
+++ b/include/asm-sh64/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-sh64/io.h b/include/asm-sh64/io.h
index cfafaa73b2b..dee4f77929a 100644
--- a/include/asm-sh64/io.h
+++ b/include/asm-sh64/io.h
@@ -143,12 +143,12 @@ extern unsigned long pciio_virt;
* Change virtual addresses to physical addresses and vv.
* These are trivial on the 1:1 Linux/SuperH mapping
*/
-extern __inline__ unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(volatile void * address)
{
return __pa(address);
}
-extern __inline__ void * phys_to_virt(unsigned long address)
+static inline void * phys_to_virt(unsigned long address)
{
return __va(address);
}
@@ -156,12 +156,12 @@ extern __inline__ void * phys_to_virt(unsigned long address)
extern void * __ioremap(unsigned long phys_addr, unsigned long size,
unsigned long flags);
-extern __inline__ void * ioremap(unsigned long phys_addr, unsigned long size)
+static inline void * ioremap(unsigned long phys_addr, unsigned long size)
{
return __ioremap(phys_addr, size, 1);
}
-extern __inline__ void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
+static inline void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
{
return __ioremap(phys_addr, size, 0);
}
diff --git a/include/asm-sh64/ioctl.h b/include/asm-sh64/ioctl.h
index c089a6fb78e..b279fe06dfe 100644
--- a/include/asm-sh64/ioctl.h
+++ b/include/asm-sh64/ioctl.h
@@ -1,83 +1 @@
-#ifndef __ASM_SH64_IOCTL_H
-#define __ASM_SH64_IOCTL_H
-
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * include/asm-sh64/ioctl.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- *
- * linux/ioctl.h for Linux by H.H. Bergman.
- *
- */
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* __ASM_SH64_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-sh64/mmu_context.h b/include/asm-sh64/mmu_context.h
index f062e151327..991cfda4cdf 100644
--- a/include/asm-sh64/mmu_context.h
+++ b/include/asm-sh64/mmu_context.h
@@ -50,7 +50,7 @@ extern pgd_t *mmu_pdtp_cache;
*/
#define MMU_VPN_MASK 0xfffff000
-extern __inline__ void
+static inline void
get_new_mmu_context(struct mm_struct *mm)
{
extern void flush_tlb_all(void);
diff --git a/include/asm-sh64/mutex.h b/include/asm-sh64/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-sh64/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h
index b25f5df5535..678251ac1db 100644
--- a/include/asm-sh64/pgalloc.h
+++ b/include/asm-sh64/pgalloc.h
@@ -38,14 +38,14 @@ static inline void pgd_init(unsigned long page)
* if any.
*/
-extern __inline__ pgd_t *get_pgd_slow(void)
+static inline pgd_t *get_pgd_slow(void)
{
unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
pgd_t *ret = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
return ret;
}
-extern __inline__ pgd_t *get_pgd_fast(void)
+static inline pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
@@ -62,14 +62,14 @@ extern __inline__ pgd_t *get_pgd_fast(void)
return (pgd_t *)ret;
}
-extern __inline__ void free_pgd_fast(pgd_t *pgd)
+static inline void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
}
-extern __inline__ void free_pgd_slow(pgd_t *pgd)
+static inline void free_pgd_slow(pgd_t *pgd)
{
kfree((void *)pgd);
}
@@ -77,7 +77,7 @@ extern __inline__ void free_pgd_slow(pgd_t *pgd)
extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
-extern __inline__ pte_t *get_pte_fast(void)
+static inline pte_t *get_pte_fast(void)
{
unsigned long *ret;
@@ -89,7 +89,7 @@ extern __inline__ pte_t *get_pte_fast(void)
return (pte_t *)ret;
}
-extern __inline__ void free_pte_fast(pte_t *pte)
+static inline void free_pte_fast(pte_t *pte)
{
*(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
@@ -167,7 +167,7 @@ static __inline__ void pmd_free(pmd_t *pmd)
extern int do_check_pgt_cache(int, int);
-extern inline void set_pgdir(unsigned long address, pgd_t entry)
+static inline void set_pgdir(unsigned long address, pgd_t entry)
{
struct task_struct * p;
pgd_t *pgd;
diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh64/pgtable.h
index a1906a772df..57af6b3eb27 100644
--- a/include/asm-sh64/pgtable.h
+++ b/include/asm-sh64/pgtable.h
@@ -421,18 +421,18 @@ static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; }
-extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; }
-extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
-extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; }
-extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
+static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; }
+static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; }
-extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
-extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; }
-extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
+static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
+static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
/*
@@ -456,7 +456,7 @@ extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _
#define mk_pte_phys(physpage, pgprot) \
({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
-extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
typedef pte_t *pte_addr_t;
diff --git a/include/asm-sh64/processor.h b/include/asm-sh64/processor.h
index a51bd41e6fb..1bf252dad82 100644
--- a/include/asm-sh64/processor.h
+++ b/include/asm-sh64/processor.h
@@ -228,7 +228,7 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
* FPU lazy state save handling.
*/
-extern __inline__ void release_fpu(void)
+static inline void release_fpu(void)
{
unsigned long long __dummy;
@@ -240,7 +240,7 @@ extern __inline__ void release_fpu(void)
: "r" (SR_FD));
}
-extern __inline__ void grab_fpu(void)
+static inline void grab_fpu(void)
{
unsigned long long __dummy;
diff --git a/include/asm-sh64/system.h b/include/asm-sh64/system.h
index 42510e496eb..3002e988180 100644
--- a/include/asm-sh64/system.h
+++ b/include/asm-sh64/system.h
@@ -132,7 +132,7 @@ static __inline__ void local_irq_disable(void)
(flags != 0); \
})
-extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
+static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{
unsigned long flags, retval;
@@ -143,7 +143,7 @@ extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
return retval;
}
-extern __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
+static inline unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
{
unsigned long flags, retval;
diff --git a/include/asm-sh64/thread_info.h b/include/asm-sh64/thread_info.h
index 10f024c6a2e..1f825cb163c 100644
--- a/include/asm-sh64/thread_info.h
+++ b/include/asm-sh64/thread_info.h
@@ -66,8 +66,6 @@ static inline struct thread_info *current_thread_info(void)
#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-sh64/tlbflush.h b/include/asm-sh64/tlbflush.h
index 15c0719eecc..e45beadc29e 100644
--- a/include/asm-sh64/tlbflush.h
+++ b/include/asm-sh64/tlbflush.h
@@ -20,7 +20,7 @@ extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
diff --git a/include/asm-sh64/uaccess.h b/include/asm-sh64/uaccess.h
index 56aa3cf0f27..f4936d8fa61 100644
--- a/include/asm-sh64/uaccess.h
+++ b/include/asm-sh64/uaccess.h
@@ -287,7 +287,7 @@ __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count);
*/
extern long __strnlen_user(const char *__s, long __n);
-extern __inline__ long strnlen_user(const char *s, long n)
+static inline long strnlen_user(const char *s, long n)
{
if (!__addr_ok(s))
return 0;
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 62bec7ad271..e1033170bd3 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -20,6 +20,7 @@ typedef struct { volatile int counter; } atomic_t;
extern int __atomic_add_return(int, atomic_t *);
extern int atomic_cmpxchg(atomic_t *, int, int);
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
@@ -159,4 +160,5 @@ static inline int __atomic24_sub(int i, atomic24_t *v)
#endif /* !(__KERNEL__) */
+#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/include/asm-sparc/cache.h b/include/asm-sparc/cache.h
index a10522cb21b..cb971e88aea 100644
--- a/include/asm-sparc/cache.h
+++ b/include/asm-sparc/cache.h
@@ -13,7 +13,6 @@
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES 32
#define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
-#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */
#define SMP_CACHE_BYTES 32
diff --git a/include/asm-sparc/futex.h b/include/asm-sparc/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-sparc/futex.h
+++ b/include/asm-sparc/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-sparc/mman.h b/include/asm-sparc/mman.h
index 138eb81dd70..98435ad8619 100644
--- a/include/asm-sparc/mman.h
+++ b/include/asm-sparc/mman.h
@@ -54,6 +54,7 @@
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
+#define MADV_REMOVE 0x6 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-sparc/mutex.h b/include/asm-sparc/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-sparc/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 1f6b71f9e1b..58dd162927b 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -155,7 +155,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
"here:\n" \
: "=&r" (last) \
: "r" (&(current_set[hard_smp_processor_id()])), \
- "r" ((next)->thread_info), \
+ "r" (task_thread_info(next)), \
"i" (TI_KPSR), \
"i" (TI_KSP), \
"i" (TI_TASK) \
@@ -166,6 +166,16 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
} while(0)
/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
+/*
* Changing the IRQ level on the Sparc.
*/
extern void local_irq_restore(unsigned long);
diff --git a/include/asm-sparc/thread_info.h b/include/asm-sparc/thread_info.h
index ff6ccb3d24c..65f060b040a 100644
--- a/include/asm-sparc/thread_info.h
+++ b/include/asm-sparc/thread_info.h
@@ -92,9 +92,6 @@ BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
#endif /* __ASSEMBLY__ */
/*
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 3789fe31599..25256bdc8aa 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -72,6 +72,7 @@ extern int atomic64_sub_ret(int, atomic64_t *);
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \
({ \
@@ -96,4 +97,5 @@ extern int atomic64_sub_ret(int, atomic64_t *);
#define smp_mb__after_atomic_inc() barrier()
#endif
+#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/include/asm-sparc64/cache.h b/include/asm-sparc64/cache.h
index ade5ec3bfd5..f7d35a2ae9b 100644
--- a/include/asm-sparc64/cache.h
+++ b/include/asm-sparc64/cache.h
@@ -9,7 +9,6 @@
#define L1_CACHE_BYTES 32 /* Two 16-byte sub-blocks per line. */
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
-#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */
#define SMP_CACHE_BYTES_SHIFT 6
#define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h
index 91458118277..69539a8ab83 100644
--- a/include/asm-sparc64/elf.h
+++ b/include/asm-sparc64/elf.h
@@ -119,7 +119,7 @@ typedef struct {
#endif
#define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \
- ({ ELF_CORE_COPY_REGS((*(__elf_regs)), (__tsk)->thread_info->kregs); 1; })
+ ({ ELF_CORE_COPY_REGS((*(__elf_regs)), task_pt_regs(__tsk)); 1; })
/*
* This is used to ensure we don't load something for the wrong architecture.
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h
index 7ba845320f5..e4efe652b54 100644
--- a/include/asm-sparc64/kprobes.h
+++ b/include/asm-sparc64/kprobes.h
@@ -12,6 +12,7 @@ typedef u32 kprobe_opcode_t;
#define MAX_INSN_SIZE 2
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
+#define arch_remove_kprobe(p) do {} while (0)
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
@@ -38,15 +39,6 @@ struct kprobe_ctlblk {
struct prev_kprobe prev_kprobe;
};
-#ifdef CONFIG_KPROBES
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
-#else /* !CONFIG_KPROBES */
-static inline int kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return 0;
-}
-#endif
-
#endif /* _SPARC64_KPROBES_H */
diff --git a/include/asm-sparc64/mman.h b/include/asm-sparc64/mman.h
index 01cecf54357..cb4b6156194 100644
--- a/include/asm-sparc64/mman.h
+++ b/include/asm-sparc64/mman.h
@@ -54,6 +54,7 @@
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
#define MADV_FREE 0x5 /* (Solaris) contents can be freed */
+#define MADV_REMOVE 0x6 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 08ba72d7722..57ee7b30618 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -60,7 +60,7 @@ do { \
register unsigned long pgd_cache asm("o4"); \
paddr = __pa((__mm)->pgd); \
pgd_cache = 0UL; \
- if ((__tsk)->thread_info->flags & _TIF_32BIT) \
+ if (task_thread_info(__tsk)->flags & _TIF_32BIT) \
pgd_cache = get_pgd_cache((__mm)->pgd); \
__asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \
"mov %3, %%g4\n\t" \
diff --git a/include/asm-sparc64/mutex.h b/include/asm-sparc64/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-sparc64/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index 3169f3e2237..cd8d9b4c865 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -186,8 +186,9 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long get_wchan(struct task_struct *task);
-#define KSTK_EIP(tsk) ((tsk)->thread_info->kregs->tpc)
-#define KSTK_ESP(tsk) ((tsk)->thread_info->kregs->u_regs[UREG_FP])
+#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
#define cpu_relax() barrier()
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index b5417529f6f..af254e58183 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -193,11 +193,7 @@ do { \
* not preserve it's value. Hairy, but it lets us remove 2 loads
* and 2 stores in this critical code path. -DaveM
*/
-#if __GNUC__ >= 3
#define EXTRA_CLOBBER ,"%l1"
-#else
-#define EXTRA_CLOBBER
-#endif
#define switch_to(prev, next, last) \
do { if (test_thread_flag(TIF_PERFCTR)) { \
unsigned long __tmp; \
@@ -212,7 +208,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
__asm__ __volatile__("wr %%g0, %0, %%asi" \
- : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\
+ : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
__asm__ __volatile__( \
"mov %%g4, %%g7\n\t" \
"wrpr %%g0, 0x95, %%pstate\n\t" \
@@ -242,7 +238,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
"b,a ret_from_syscall\n\t" \
"1:\n\t" \
: "=&r" (last) \
- : "0" (next->thread_info), \
+ : "0" (task_thread_info(next)), \
"i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
"i" (TI_CWP), "i" (TI_TASK) \
: "cc", \
@@ -257,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
} \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{
unsigned long tmp1, tmp2;
diff --git a/include/asm-um/cache.h b/include/asm-um/cache.h
index a10602a5b2d..3d058707552 100644
--- a/include/asm-um/cache.h
+++ b/include/asm-um/cache.h
@@ -13,9 +13,6 @@
# define L1_CACHE_SHIFT 5
#endif
-/* XXX: this is valid for x86 and x86_64. */
-#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
-
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#endif
diff --git a/include/asm-um/futex.h b/include/asm-um/futex.h
index 142ee2d8e0f..6a332a9f099 100644
--- a/include/asm-um/futex.h
+++ b/include/asm-um/futex.h
@@ -1,12 +1,6 @@
-#ifndef __UM_FUTEX_H
-#define __UM_FUTEX_H
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/system.h>
-#include <asm/processor.h>
-#include <asm/uaccess.h>
-
-#include "asm/arch/futex.h"
+#include <asm-generic/futex.h>
#endif
diff --git a/include/asm-um/mutex.h b/include/asm-um/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-um/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h
index 075771c371f..da07a69ce82 100644
--- a/include/asm-um/processor-generic.h
+++ b/include/asm-um/processor-generic.h
@@ -89,7 +89,6 @@ extern struct task_struct *alloc_task_struct(void);
extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-extern void dump_thread(struct pt_regs *regs, struct user *u);
static inline void prepare_to_copy(struct task_struct *tsk)
{
diff --git a/include/asm-um/rwsem.h b/include/asm-um/rwsem.h
index 661c0e54702..b5fc449dc86 100644
--- a/include/asm-um/rwsem.h
+++ b/include/asm-um/rwsem.h
@@ -1,10 +1,6 @@
#ifndef __UM_RWSEM_H__
#define __UM_RWSEM_H__
-#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)
-#define __builtin_expect(exp,c) (exp)
-#endif
-
#include "asm/arch/rwsem.h"
#endif
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 97267f059ef..705c71972c3 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -56,9 +56,6 @@ static inline struct thread_info *current_thread_info(void)
((struct thread_info *) kmalloc(THREAD_SIZE, GFP_KERNEL))
#define free_thread_info(ti) kfree(ti)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
#endif
#define PREEMPT_ACTIVE 0x10000000
diff --git a/include/asm-v850/atomic.h b/include/asm-v850/atomic.h
index bede3172ce7..166df00457e 100644
--- a/include/asm-v850/atomic.h
+++ b/include/asm-v850/atomic.h
@@ -104,6 +104,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
@@ -126,4 +128,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif /* __V850_ATOMIC_H__ */
diff --git a/include/asm-v850/cache.h b/include/asm-v850/cache.h
index cbf9096e851..8832c7ea324 100644
--- a/include/asm-v850/cache.h
+++ b/include/asm-v850/cache.h
@@ -23,6 +23,4 @@
#define L1_CACHE_SHIFT 4
#endif
-#define L1_CACHE_SHIFT_MAX L1_CACHE_SHIFT
-
#endif /* __V850_CACHE_H__ */
diff --git a/include/asm-v850/futex.h b/include/asm-v850/futex.h
index 9feff4ce142..6a332a9f099 100644
--- a/include/asm-v850/futex.h
+++ b/include/asm-v850/futex.h
@@ -1,53 +1,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifdef __KERNEL__
+#include <asm-generic/futex.h>
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
#endif
diff --git a/include/asm-v850/ioctl.h b/include/asm-v850/ioctl.h
index 1765df6c7b8..b279fe06dfe 100644
--- a/include/asm-v850/ioctl.h
+++ b/include/asm-v850/ioctl.h
@@ -1,80 +1 @@
-/* $Id: ioctl.h,v 1.1 2002/09/28 14:58:41 gerg Exp $
- *
- * linux/ioctl.h for Linux by H.H. Bergman.
- */
-
-#ifndef _V850_IOCTL_H
-#define _V850_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * I don't really have any idea about what this should look like, so
- * for the time being, this is heavily based on the PC definitions.
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* __V850_IOCTL_H__ */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-v850/mman.h b/include/asm-v850/mman.h
index e2b90081b56..edc79965193 100644
--- a/include/asm-v850/mman.h
+++ b/include/asm-v850/mman.h
@@ -32,6 +32,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-v850/mutex.h b/include/asm-v850/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-v850/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-v850/processor.h b/include/asm-v850/processor.h
index 98f929427d3..2d31308935a 100644
--- a/include/asm-v850/processor.h
+++ b/include/asm-v850/processor.h
@@ -98,10 +98,10 @@ unsigned long get_wchan (struct task_struct *p);
/* Return some info about the user process TASK. */
-#define task_tos(task) ((unsigned long)(task)->thread_info + THREAD_SIZE)
-#define task_regs(task) ((struct pt_regs *)task_tos (task) - 1)
-#define task_sp(task) (task_regs (task)->gpr[GPR_SP])
-#define task_pc(task) (task_regs (task)->pc)
+#define task_tos(task) ((unsigned long)task_stack_page(task) + THREAD_SIZE)
+#define task_pt_regs(task) ((struct pt_regs *)task_tos (task) - 1)
+#define task_sp(task) (task_pt_regs (task)->gpr[GPR_SP])
+#define task_pc(task) (task_pt_regs (task)->pc)
/* Grotty old names for some. */
#define KSTK_EIP(task) task_pc (task)
#define KSTK_ESP(task) task_sp (task)
diff --git a/include/asm-v850/thread_info.h b/include/asm-v850/thread_info.h
index e4cfad94a55..82b8f284620 100644
--- a/include/asm-v850/thread_info.h
+++ b/include/asm-v850/thread_info.h
@@ -58,8 +58,6 @@ struct thread_info {
#define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, 1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-v850/unistd.h b/include/asm-v850/unistd.h
index 5a86f8e976e..82460a7bb23 100644
--- a/include/asm-v850/unistd.h
+++ b/include/asm-v850/unistd.h
@@ -241,9 +241,6 @@
/* User programs sometimes end up including this header file
(indirectly, via uClibc header files), so I'm a bit nervous just
including <linux/compiler.h>. */
-#if !defined(__builtin_expect) && __GNUC__ == 2 && __GNUC_MINOR__ < 96
-#define __builtin_expect(x, expected_value) (x)
-#endif
#define __syscall_return(type, res) \
do { \
@@ -346,20 +343,6 @@ type name (atype a, btype b, ctype c, dtype d, etype e) \
__syscall_return (type, __ret); \
}
-#if __GNUC__ < 3
-/* In older versions of gcc, `asm' statements with more than 10
- input/output arguments produce a fatal error. To work around this
- problem, we use two versions, one for gcc-3.x and one for earlier
- versions of gcc (the `earlier gcc' version doesn't work with gcc-3.x
- because gcc-3.x doesn't allow clobbers to also be input arguments). */
-#define __SYSCALL6_TRAP(syscall, ret, a, b, c, d, e, f) \
- __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP \
- : "=r" (ret), "=r" (syscall) \
- : "1" (syscall), \
- "r" (a), "r" (b), "r" (c), "r" (d), \
- "r" (e), "r" (f) \
- : SYSCALL_CLOBBERS, SYSCALL_ARG4, SYSCALL_ARG5);
-#else /* __GNUC__ >= 3 */
#define __SYSCALL6_TRAP(syscall, ret, a, b, c, d, e, f) \
__asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP \
: "=r" (ret), "=r" (syscall), \
@@ -368,7 +351,6 @@ type name (atype a, btype b, ctype c, dtype d, etype e) \
"r" (a), "r" (b), "r" (c), "r" (d), \
"2" (e), "3" (f) \
: SYSCALL_CLOBBERS);
-#endif
#define _syscall6(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e, ftype, f) \
type name (atype a, btype b, ctype c, dtype d, etype e, ftype f) \
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 5647b7de174..4f6a4dc455b 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -42,11 +42,6 @@ static __inline void apic_write(unsigned long reg, unsigned int v)
*((volatile unsigned int *)(APIC_BASE+reg)) = v;
}
-static __inline void apic_write_atomic(unsigned long reg, unsigned int v)
-{
- xchg((volatile unsigned int *)(APIC_BASE+reg), v);
-}
-
static __inline unsigned int apic_read(unsigned long reg)
{
return *((volatile unsigned int *)(APIC_BASE+reg));
@@ -57,10 +52,6 @@ static __inline__ void apic_wait_icr_idle(void)
while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY );
}
-#define FORCE_READ_AROUND_WRITE 0
-#define apic_read_around(x)
-#define apic_write_around(x,y) apic_write((x),(y))
-
static inline void ack_APIC_irq(void)
{
/*
@@ -71,7 +62,7 @@ static inline void ack_APIC_irq(void)
*/
/* Docs say use 0 for future compatibility */
- apic_write_around(APIC_EOI, 0);
+ apic_write(APIC_EOI, 0);
}
extern int get_maxlvt (void);
@@ -113,6 +104,12 @@ extern int disable_timer_pin_1;
extern void setup_threshold_lvt(unsigned long lvt_off);
+void smp_send_timer_broadcast_ipi(void);
+void switch_APIC_timer_to_ipi(void *cpumask);
+void switch_ipi_to_APIC_timer(void *cpumask);
+
+#define ARCH_APICTIMER_STOPS_ON_C3 1
+
#endif /* CONFIG_X86_LOCAL_APIC */
extern unsigned boot_cpu_id;
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h
index fb1c99ac669..decaa2d540e 100644
--- a/include/asm-x86_64/apicdef.h
+++ b/include/asm-x86_64/apicdef.h
@@ -13,6 +13,7 @@
#define APIC_ID 0x20
#define APIC_ID_MASK (0xFFu<<24)
#define GET_APIC_ID(x) (((x)>>24)&0xFFu)
+#define SET_APIC_ID(x) (((x)<<24))
#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
#define GET_APIC_VERSION(x) ((x)&0xFFu)
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 50db9f39274..4b5cd553e77 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -2,6 +2,7 @@
#define __ARCH_X86_64_ATOMIC__
#include <linux/config.h>
+#include <asm/types.h>
/* atomic_t should be 32 bit signed type */
@@ -389,6 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
@@ -424,4 +426,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index a4d5d090945..eb4df23e1e4 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -29,7 +29,7 @@ static __inline__ void set_bit(int nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btsl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr) : "memory");
}
@@ -46,7 +46,7 @@ static __inline__ void __set_bit(int nr, volatile void * addr)
{
__asm__ volatile(
"btsl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr) : "memory");
}
@@ -64,7 +64,7 @@ static __inline__ void clear_bit(int nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btrl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr));
}
@@ -72,7 +72,7 @@ static __inline__ void __clear_bit(int nr, volatile void * addr)
{
__asm__ __volatile__(
"btrl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr));
}
@@ -92,7 +92,7 @@ static __inline__ void __change_bit(int nr, volatile void * addr)
{
__asm__ __volatile__(
"btcl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr));
}
@@ -109,7 +109,7 @@ static __inline__ void change_bit(int nr, volatile void * addr)
{
__asm__ __volatile__( LOCK_PREFIX
"btcl %1,%0"
- :"=m" (ADDR)
+ :"+m" (ADDR)
:"dIr" (nr));
}
@@ -127,7 +127,7 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
@@ -147,7 +147,7 @@ static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
__asm__(
"btsl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr));
return oldbit;
}
@@ -166,7 +166,7 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
@@ -186,7 +186,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
__asm__(
"btrl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr));
return oldbit;
}
@@ -198,7 +198,7 @@ static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
__asm__ __volatile__(
"btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
@@ -217,7 +217,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btcl %2,%1\n\tsbbl %0,%0"
- :"=r" (oldbit),"=m" (ADDR)
+ :"=r" (oldbit),"+m" (ADDR)
:"dIr" (nr) : "memory");
return oldbit;
}
@@ -397,6 +397,22 @@ static __inline__ int fls64(__u64 x)
}
/**
+ * fls - find last bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ */
+static __inline__ int fls(int x)
+{
+ int r;
+
+ __asm__("bsrl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=&r" (r) : "rm" (x), "rm" (-1));
+ return r+1;
+}
+
+/**
* hweightN - returns the hamming weight of a N-bit word
* @x: the word to weigh
*
@@ -434,9 +450,6 @@ static __inline__ int fls64(__u64 x)
#define minix_find_first_zero_bit(addr,size) \
find_first_zero_bit((void*)addr,size)
-/* find last set bit */
-#define fls(x) generic_fls(x)
-
#endif /* __KERNEL__ */
#endif /* _X86_64_BITOPS_H */
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index 33e53424128..263f0a211ed 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -9,6 +9,17 @@
/* L1 cache line size */
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
+
+#ifdef CONFIG_X86_VSMP
+
+/* vSMP Internode cacheline shift */
+#define INTERNODE_CACHE_SHIFT (12)
+#ifdef CONFIG_SMP
+#define __cacheline_aligned_in_smp \
+ __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
+ __attribute__((__section__(".data.page_aligned")))
+#endif
+
+#endif
#endif
diff --git a/include/asm-x86_64/cacheflush.h b/include/asm-x86_64/cacheflush.h
index b3189fb229d..d32f7f58752 100644
--- a/include/asm-x86_64/cacheflush.h
+++ b/include/asm-x86_64/cacheflush.h
@@ -27,4 +27,8 @@ void global_flush_tlb(void);
int change_page_attr(struct page *page, int numpages, pgprot_t prot);
int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
#endif /* _X8664_CACHEFLUSH_H */
diff --git a/include/asm-x86_64/compat.h b/include/asm-x86_64/compat.h
index f0155c38f63..b37ab8218ef 100644
--- a/include/asm-x86_64/compat.h
+++ b/include/asm-x86_64/compat.h
@@ -198,8 +198,13 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
static __inline__ void __user *compat_alloc_user_space(long len)
{
- struct pt_regs *regs = (void *)current->thread.rsp0 - sizeof(struct pt_regs);
+ struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->rsp - len;
}
+static inline int is_compat_task(void)
+{
+ return current_thread_info()->status & TS_COMPAT;
+}
+
#endif /* _ASM_X86_64_COMPAT_H */
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index aea308c6570..41c0ac8559b 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -61,8 +61,9 @@
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
-#define X86_FEATURE_K8_C (3*32+ 4) /* C stepping K8 */
+/* 4 free */
#define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */
+#define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index 33764869387..eb7723a4679 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -25,7 +25,7 @@ struct n_desc_struct {
unsigned int a,b;
};
-extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
enum {
GATE_INTERRUPT = 0xE,
@@ -79,6 +79,9 @@ extern struct desc_struct default_ldt[];
extern struct gate_struct idt_table[];
extern struct desc_ptr cpu_gdt_descr[];
+/* the cpu gdt accessor */
+#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
+
static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
{
struct gate_struct s;
@@ -114,6 +117,11 @@ static inline void set_system_gate(int nr, void *func)
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
}
+static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
+{
+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
+}
+
static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
unsigned size)
{
@@ -139,20 +147,20 @@ static inline void set_tss_desc(unsigned cpu, void *addr)
* -1? seg base+limit should be pointing to the address of the
* last valid byte
*/
- set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_TSS],
+ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
(unsigned long)addr, DESC_TSS,
IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
}
static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
{
- set_tssldt_descriptor(&cpu_gdt_table[cpu][GDT_ENTRY_LDT], (unsigned long)addr,
+ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
DESC_LDT, size * 8 - 1);
}
static inline void set_seg_base(unsigned cpu, int entry, void *base)
{
- struct desc_struct *d = &cpu_gdt_table[cpu][entry];
+ struct desc_struct *d = &cpu_gdt(cpu)[entry];
u32 addr = (u32)(u64)base;
BUG_ON((u64)base >> 32);
d->base0 = addr & 0xffff;
@@ -194,7 +202,7 @@ static inline void set_seg_base(unsigned cpu, int entry, void *base)
static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
{
- u64 *gdt = (u64 *)(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN);
+ u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
gdt[0] = t->tls_array[0];
gdt[1] = t->tls_array[1];
gdt[2] = t->tls_array[2];
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 36d16dfbac8..49a81a66516 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -12,155 +12,176 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
-extern dma_addr_t bad_dma_address;
-#define dma_mapping_error(x) \
- (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address))
-
-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
+struct dma_mapping_ops {
+ int (*mapping_error)(dma_addr_t dma_addr);
+ void* (*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+ dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
+ size_t size, int direction);
+ /* like map_single, but doesn't check the device mask */
+ dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
+ size_t size, int direction);
+ void (*unmap_single)(struct device *dev, dma_addr_t addr,
+ size_t size, int direction);
+ void (*sync_single_for_cpu)(struct device *hwdev,
+ dma_addr_t dma_handle, size_t size,
+ int direction);
+ void (*sync_single_for_device)(struct device *hwdev,
+ dma_addr_t dma_handle, size_t size,
+ int direction);
+ void (*sync_single_range_for_cpu)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size, int direction);
+ void (*sync_single_range_for_device)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size, int direction);
+ void (*sync_sg_for_cpu)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int direction);
+ void (*sync_sg_for_device)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ int direction);
+ int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction);
+ void (*unmap_sg)(struct device *hwdev,
+ struct scatterlist *sg, int nents,
+ int direction);
+ int (*dma_supported)(struct device *hwdev, u64 mask);
+ int is_phys;
+};
-#ifdef CONFIG_GART_IOMMU
+extern dma_addr_t bad_dma_address;
+extern struct dma_mapping_ops* dma_ops;
+extern int iommu_merge;
-extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size,
- int direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
- int direction);
+static inline int dma_mapping_error(dma_addr_t dma_addr)
+{
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dma_addr);
-#else
+ return (dma_addr == bad_dma_address);
+}
-/* No IOMMU */
+extern void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
-static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr,
- size_t size, int direction)
+static inline dma_addr_t
+dma_map_single(struct device *hwdev, void *ptr, size_t size,
+ int direction)
{
- dma_addr_t addr;
-
- if (direction == DMA_NONE)
- out_of_line_bug();
- addr = virt_to_bus(ptr);
-
- if ((addr+size) & ~*hwdev->dma_mask)
- out_of_line_bug();
- return addr;
+ return dma_ops->map_single(hwdev, ptr, size, direction);
}
-static inline void dma_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
+ int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
- /* Nothing to do */
+ dma_ops->unmap_single(dev, addr, size, direction);
}
-#endif
-
#define dma_map_page(dev,page,offset,size,dir) \
dma_map_single((dev), page_address(page)+(offset), (size), (dir))
-static inline void dma_sync_single_for_cpu(struct device *hwdev,
- dma_addr_t dma_handle,
- size_t size, int direction)
-{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_single_for_cpu(hwdev,dma_handle,size,direction);
+#define dma_unmap_page dma_unmap_single
+static inline void
+dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ if (dma_ops->sync_single_for_cpu)
+ dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
+ direction);
flush_write_buffers();
}
-static inline void dma_sync_single_for_device(struct device *hwdev,
- dma_addr_t dma_handle,
- size_t size, int direction)
+static inline void
+dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_single_for_device(hwdev,dma_handle,size,direction);
-
+ if (dma_ops->sync_single_for_device)
+ dma_ops->sync_single_for_device(hwdev, dma_handle, size,
+ direction);
flush_write_buffers();
}
-static inline void dma_sync_single_range_for_cpu(struct device *hwdev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction)
+static inline void
+dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size, int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_single_range_for_cpu(hwdev,dma_handle,offset,size,direction);
+ if (dma_ops->sync_single_range_for_cpu) {
+ dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
+ }
flush_write_buffers();
}
-static inline void dma_sync_single_range_for_device(struct device *hwdev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size, int direction)
+static inline void
+dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size, int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_single_range_for_device(hwdev,dma_handle,offset,size,direction);
+ if (dma_ops->sync_single_range_for_device)
+ dma_ops->sync_single_range_for_device(hwdev, dma_handle,
+ offset, size, direction);
flush_write_buffers();
}
-static inline void dma_sync_sg_for_cpu(struct device *hwdev,
- struct scatterlist *sg,
- int nelems, int direction)
+static inline void
+dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_sg_for_cpu(hwdev,sg,nelems,direction);
-
+ if (dma_ops->sync_sg_for_cpu)
+ dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
flush_write_buffers();
}
-static inline void dma_sync_sg_for_device(struct device *hwdev,
- struct scatterlist *sg,
- int nelems, int direction)
+static inline void
+dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
{
- if (direction == DMA_NONE)
- out_of_line_bug();
-
- if (swiotlb)
- return swiotlb_sync_sg_for_device(hwdev,sg,nelems,direction);
+ if (dma_ops->sync_sg_for_device) {
+ dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
+ }
flush_write_buffers();
}
-extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
-extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction);
+static inline int
+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
+{
+ return dma_ops->map_sg(hwdev, sg, nents, direction);
+}
-#define dma_unmap_page dma_unmap_single
+static inline void
+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+ int direction)
+{
+ dma_ops->unmap_sg(hwdev, sg, nents, direction);
+}
extern int dma_supported(struct device *hwdev, u64 mask);
-extern int dma_get_cache_alignment(void);
-#define dma_is_consistent(h) 1
-static inline int dma_set_mask(struct device *dev, u64 mask)
+/* same for gart, swiotlb, and nommu */
+static inline int dma_get_cache_alignment(void)
{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
- *dev->dma_mask = mask;
- return 0;
+ return boot_cpu_data.x86_clflush_size;
}
-static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
+#define dma_is_consistent(h) 1
+
+extern int dma_set_mask(struct device *dev, u64 mask);
+
+static inline void
+dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
{
flush_write_buffers();
}
-#endif
+extern struct device fallback_dev;
+extern int panic_on_overflow;
+
+#endif /* _X8664_DMA_MAPPING_H */
diff --git a/include/asm-x86_64/dwarf2.h b/include/asm-x86_64/dwarf2.h
index 582757fc036..07654bd155b 100644
--- a/include/asm-x86_64/dwarf2.h
+++ b/include/asm-x86_64/dwarf2.h
@@ -14,7 +14,7 @@
away for older version.
*/
-#ifdef CONFIG_DEBUG_INFO
+#ifdef CONFIG_UNWIND_INFO
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
@@ -28,6 +28,7 @@
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
+#define CFI_UNDEFINED .cfi_undefined
#else
@@ -44,6 +45,7 @@
#define CFI_RESTORE #
#define CFI_REMEMBER_STATE #
#define CFI_RESTORE_STATE #
+#define CFI_UNDEFINED #
#endif
diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h
index e682edc24a6..8dcc3266524 100644
--- a/include/asm-x86_64/e820.h
+++ b/include/asm-x86_64/e820.h
@@ -55,6 +55,7 @@ extern unsigned long e820_hole_size(unsigned long start_pfn,
unsigned long end_pfn);
extern void __init parse_memopt(char *p, char **end);
+extern void __init parse_memmapopt(char *p, char **end);
extern struct e820map e820;
#endif/*!__ASSEMBLY__*/
diff --git a/include/asm-x86_64/gart-mapping.h b/include/asm-x86_64/gart-mapping.h
new file mode 100644
index 00000000000..ada497b0b55
--- /dev/null
+++ b/include/asm-x86_64/gart-mapping.h
@@ -0,0 +1,16 @@
+#ifndef _X8664_GART_MAPPING_H
+#define _X8664_GART_MAPPING_H 1
+
+#include <linux/types.h>
+#include <asm/types.h>
+
+struct device;
+
+extern void*
+gart_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+extern int
+gart_dma_supported(struct device *hwdev, u64 mask);
+
+#endif /* _X8664_GART_MAPPING_H */
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index c14a8c7267a..0df1715dee7 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -46,18 +46,18 @@ struct hw_interrupt_type;
* some of the following vectors are 'rare', they are merged
* into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
* TLB, reschedule and local APIC vectors are performance-critical.
- *
- * Vectors 0xf0-0xf9 are free (reserved for future Linux use).
*/
#define SPURIOUS_APIC_VECTOR 0xff
#define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc
-#define KDB_VECTOR 0xfb /* reserved for KDB */
+/* fb free - please don't readd KDB here because it's useless
+ (hint - think what a NMI bit does to a vector) */
#define THERMAL_APIC_VECTOR 0xfa
#define THRESHOLD_APIC_VECTOR 0xf9
-#define INVALIDATE_TLB_VECTOR_END 0xf8
-#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */
+/* f8 free */
+#define INVALIDATE_TLB_VECTOR_END 0xf7
+#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
#define NUM_INVALIDATE_TLB_VECTORS 8
diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h
index aa39cfd0e00..876eb9a2fe7 100644
--- a/include/asm-x86_64/i387.h
+++ b/include/asm-x86_64/i387.h
@@ -30,7 +30,7 @@ extern int save_i387(struct _fpstate __user *buf);
*/
#define unlazy_fpu(tsk) do { \
- if ((tsk)->thread_info->status & TS_USEDFPU) \
+ if (task_thread_info(tsk)->status & TS_USEDFPU) \
save_init_fpu(tsk); \
} while (0)
@@ -46,9 +46,9 @@ static inline void tolerant_fwait(void)
}
#define clear_fpu(tsk) do { \
- if ((tsk)->thread_info->status & TS_USEDFPU) { \
+ if (task_thread_info(tsk)->status & TS_USEDFPU) { \
tolerant_fwait(); \
- (tsk)->thread_info->status &= ~TS_USEDFPU; \
+ task_thread_info(tsk)->status &= ~TS_USEDFPU; \
stts(); \
} \
} while (0)
@@ -75,7 +75,8 @@ extern int set_fpregs(struct task_struct *tsk,
static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
{
int err;
- asm volatile("1: rex64 ; fxrstor (%[fx])\n\t"
+
+ asm volatile("1: rex64/fxrstor (%[fx])\n\t"
"2:\n"
".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n"
@@ -86,7 +87,11 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
" .quad 1b,3b\n"
".previous"
: [err] "=r" (err)
- : [fx] "r" (fx), "0" (0));
+#if 0 /* See comment in __fxsave_clear() below. */
+ : [fx] "r" (fx), "m" (*fx), "0" (0));
+#else
+ : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
+#endif
if (unlikely(err))
init_fpu(current);
return err;
@@ -95,7 +100,8 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
{
int err;
- asm volatile("1: rex64 ; fxsave (%[fx])\n\t"
+
+ asm volatile("1: rex64/fxsave (%[fx])\n\t"
"2:\n"
".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n"
@@ -105,20 +111,53 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
" .align 8\n"
" .quad 1b,3b\n"
".previous"
- : [err] "=r" (err)
- : [fx] "r" (fx), "0" (0));
+ : [err] "=r" (err), "=m" (*fx)
+#if 0 /* See comment in __fxsave_clear() below. */
+ : [fx] "r" (fx), "0" (0));
+#else
+ : [fx] "cdaSDb" (fx), "0" (0));
+#endif
if (unlikely(err))
__clear_user(fx, sizeof(struct i387_fxsave_struct));
return err;
}
+static inline void __fxsave_clear(struct task_struct *tsk)
+{
+ /* Using "rex64; fxsave %0" is broken because, if the memory operand
+ uses any extended registers for addressing, a second REX prefix
+ will be generated (to the assembler, rex64 followed by semicolon
+ is a separate instruction), and hence the 64-bitness is lost. */
+#if 0
+ /* Using "fxsaveq %0" would be the ideal choice, but is only supported
+ starting with gas 2.16. */
+ __asm__ __volatile__("fxsaveq %0"
+ : "=m" (tsk->thread.i387.fxsave));
+#elif 0
+ /* Using, as a workaround, the properly prefixed form below isn't
+ accepted by any binutils version so far released, complaining that
+ the same type of prefix is used twice if an extended register is
+ needed for addressing (fix submitted to mainline 2005-11-21). */
+ __asm__ __volatile__("rex64/fxsave %0"
+ : "=m" (tsk->thread.i387.fxsave));
+#else
+ /* This, however, we can work around by forcing the compiler to select
+ an addressing mode that doesn't require extended registers. */
+ __asm__ __volatile__("rex64/fxsave %P2(%1)"
+ : "=m" (tsk->thread.i387.fxsave)
+ : "cdaSDb" (tsk),
+ "i" (offsetof(__typeof__(*tsk),
+ thread.i387.fxsave)));
+#endif
+ __asm__ __volatile__("fnclex");
+}
+
static inline void kernel_fpu_begin(void)
{
struct thread_info *me = current_thread_info();
preempt_disable();
- if (me->status & TS_USEDFPU) {
- asm volatile("rex64 ; fxsave %0 ; fnclex"
- : "=m" (me->task->thread.i387.fxsave));
+ if (me->status & TS_USEDFPU) {
+ __fxsave_clear(me->task);
me->status &= ~TS_USEDFPU;
return;
}
@@ -131,11 +170,10 @@ static inline void kernel_fpu_end(void)
preempt_enable();
}
-static inline void save_init_fpu( struct task_struct *tsk )
+static inline void save_init_fpu(struct task_struct *tsk)
{
- asm volatile( "rex64 ; fxsave %0 ; fnclex"
- : "=m" (tsk->thread.i387.fxsave));
- tsk->thread_info->status &= ~TS_USEDFPU;
+ __fxsave_clear(tsk);
+ task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
}
diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h
index d5166ec3868..e8843362a6c 100644
--- a/include/asm-x86_64/ia32_unistd.h
+++ b/include/asm-x86_64/ia32_unistd.h
@@ -299,7 +299,8 @@
#define __NR_ia32_inotify_init 291
#define __NR_ia32_inotify_add_watch 292
#define __NR_ia32_inotify_rm_watch 293
+#define __NR_ia32_migrate_pages 294
-#define IA32_NR_syscalls 294 /* must be > than biggest syscall! */
+#define IA32_NR_syscalls 295 /* must be > than biggest syscall! */
#endif /* _ASM_X86_64_IA32_UNISTD_H_ */
diff --git a/include/asm-x86_64/idle.h b/include/asm-x86_64/idle.h
new file mode 100644
index 00000000000..6bd47dcf206
--- /dev/null
+++ b/include/asm-x86_64/idle.h
@@ -0,0 +1,14 @@
+#ifndef _ASM_X86_64_IDLE_H
+#define _ASM_X86_64_IDLE_H 1
+
+#define IDLE_START 1
+#define IDLE_END 2
+
+struct notifier_block;
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+
+void enter_idle(void);
+void exit_idle(void);
+
+#endif
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
index 52ff269fe05..9dac18db829 100644
--- a/include/asm-x86_64/io.h
+++ b/include/asm-x86_64/io.h
@@ -143,6 +143,11 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap(volatile void __iomem *addr);
+/* Use normal IO mappings for DMI */
+#define dmi_ioremap ioremap
+#define dmi_iounmap(x,l) iounmap(x)
+#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
+
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
diff --git a/include/asm-x86_64/ioctl.h b/include/asm-x86_64/ioctl.h
index 609b663b6bf..b279fe06dfe 100644
--- a/include/asm-x86_64/ioctl.h
+++ b/include/asm-x86_64/ioctl.h
@@ -1,75 +1 @@
-/* $Id: ioctl.h,v 1.2 2001/07/04 09:08:13 ak Exp $
- *
- * linux/ioctl.h for Linux by H.H. Bergman.
- */
-
-#ifndef _ASMX8664_IOCTL_H
-#define _ASMX8664_IOCTL_H
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif /* _ASMX8664_IOCTL_H */
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-x86_64/ipi.h b/include/asm-x86_64/ipi.h
index 022e9d340ad..2a5c162b7d9 100644
--- a/include/asm-x86_64/ipi.h
+++ b/include/asm-x86_64/ipi.h
@@ -38,10 +38,6 @@ static inline unsigned int __prepare_ICR (unsigned int shortcut, int vector, uns
icr |= APIC_DM_FIXED | vector;
break;
case NMI_VECTOR:
- /*
- * Setup KDB IPI to be delivered as an NMI
- */
- case KDB_VECTOR:
icr |= APIC_DM_NMI;
break;
}
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index f604e84c530..b9ed4c0c878 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -35,9 +35,16 @@ enum die_val {
DIE_PAGE_FAULT,
};
-static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
-{
- struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
+static inline int notify_die(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
return notifier_call_chain(&die_chain, val, &args);
}
diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h
index 42d2ff15c59..ae28cd44bcd 100644
--- a/include/asm-x86_64/kexec.h
+++ b/include/asm-x86_64/kexec.h
@@ -3,6 +3,7 @@
#include <asm/page.h>
#include <asm/proto.h>
+#include <asm/ptrace.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
@@ -26,8 +27,40 @@
#define KEXEC_ARCH KEXEC_ARCH_X86_64
#define MAX_NOTE_BYTES 1024
-typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
-extern note_buf_t crash_notes[];
+/*
+ * Saving the registers of the cpu on which panic occured in
+ * crash_kexec to save a valid sp. The registers of other cpus
+ * will be saved in machine_crash_shutdown while shooting down them.
+ */
+
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ else {
+ __asm__ __volatile__("movq %%rbx,%0" : "=m"(newregs->rbx));
+ __asm__ __volatile__("movq %%rcx,%0" : "=m"(newregs->rcx));
+ __asm__ __volatile__("movq %%rdx,%0" : "=m"(newregs->rdx));
+ __asm__ __volatile__("movq %%rsi,%0" : "=m"(newregs->rsi));
+ __asm__ __volatile__("movq %%rdi,%0" : "=m"(newregs->rdi));
+ __asm__ __volatile__("movq %%rbp,%0" : "=m"(newregs->rbp));
+ __asm__ __volatile__("movq %%rax,%0" : "=m"(newregs->rax));
+ __asm__ __volatile__("movq %%rsp,%0" : "=m"(newregs->rsp));
+ __asm__ __volatile__("movq %%r8,%0" : "=m"(newregs->r8));
+ __asm__ __volatile__("movq %%r9,%0" : "=m"(newregs->r9));
+ __asm__ __volatile__("movq %%r10,%0" : "=m"(newregs->r10));
+ __asm__ __volatile__("movq %%r11,%0" : "=m"(newregs->r11));
+ __asm__ __volatile__("movq %%r12,%0" : "=m"(newregs->r12));
+ __asm__ __volatile__("movq %%r13,%0" : "=m"(newregs->r13));
+ __asm__ __volatile__("movq %%r14,%0" : "=m"(newregs->r14));
+ __asm__ __volatile__("movq %%r15,%0" : "=m"(newregs->r15));
+ __asm__ __volatile__("movl %%ss, %%eax;" :"=a"(newregs->ss));
+ __asm__ __volatile__("movl %%cs, %%eax;" :"=a"(newregs->cs));
+ __asm__ __volatile__("pushfq; popq %0" :"=m"(newregs->eflags));
+ newregs->rip = (unsigned long)current_text_addr();
+ }
+}
#endif /* _X86_64_KEXEC_H */
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
index 4dd7a7e148d..98a1e95ddb9 100644
--- a/include/asm-x86_64/kprobes.h
+++ b/include/asm-x86_64/kprobes.h
@@ -27,7 +27,10 @@
#include <linux/ptrace.h>
#include <linux/percpu.h>
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+
struct pt_regs;
+struct kprobe;
typedef u8 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xcc
@@ -42,6 +45,7 @@ typedef u8 kprobe_opcode_t;
#define ARCH_SUPPORTS_KRETPROBES
void kretprobe_trampoline(void);
+extern void arch_remove_kprobe(struct kprobe *p);
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h
index 78e60a4fd4e..d0e97b74f73 100644
--- a/include/asm-x86_64/mman.h
+++ b/include/asm-x86_64/mman.h
@@ -36,6 +36,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
index b630d52bdfb..16e4be4de0c 100644
--- a/include/asm-x86_64/mmu_context.h
+++ b/include/asm-x86_64/mmu_context.h
@@ -15,18 +15,13 @@
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
+#ifdef CONFIG_SMP
if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY);
-}
-#else
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
#endif
+}
static inline void load_cr3(pgd_t *pgd)
{
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index 69baaa8a3ce..972c9359f7d 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -36,22 +36,12 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
NODE_DATA(nid)->node_spanned_pages)
#ifdef CONFIG_DISCONTIGMEM
-
#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
-/* Requires pfn_valid(pfn) to be true */
-#define pfn_to_page(pfn) ({ \
- int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \
- ((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; \
-})
-
-#define page_to_pfn(page) \
- (long)(((page) - page_zone(page)->zone_mem_map) + page_zone(page)->zone_start_pfn)
-
-#define pfn_valid(pfn) ((pfn) >= num_physpages ? 0 : \
- ({ u8 nid__ = pfn_to_nid(pfn); \
- nid__ != 0xff && (pfn) >= node_start_pfn(nid__) && (pfn) < node_end_pfn(nid__); }))
+extern struct page *pfn_to_page(unsigned long pfn);
+extern unsigned long page_to_pfn(struct page *page);
+extern int pfn_valid(unsigned long pfn);
#endif
#define local_mapnr(kvaddr) \
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
index 6f8a17d105a..10248a9a058 100644
--- a/include/asm-x86_64/mpspec.h
+++ b/include/asm-x86_64/mpspec.h
@@ -76,7 +76,7 @@ struct mpc_config_bus
{
unsigned char mpc_type;
unsigned char mpc_busid;
- unsigned char mpc_bustype[6] __attribute((packed));
+ unsigned char mpc_bustype[6];
};
/* List of Bus Type string values, Intel MP Spec. */
diff --git a/include/asm-x86_64/mutex.h b/include/asm-x86_64/mutex.h
new file mode 100644
index 00000000000..11fbee2bd6c
--- /dev/null
+++ b/include/asm-x86_64/mutex.h
@@ -0,0 +1,113 @@
+/*
+ * Assembly implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ */
+#ifndef _ASM_MUTEX_H
+#define _ASM_MUTEX_H
+
+/**
+ * __mutex_fastpath_lock - decrement and call function if negative
+ * @v: pointer of type atomic_t
+ * @fail_fn: function to call if the result is negative
+ *
+ * Atomically decrements @v and calls <fail_fn> if the result is negative.
+ */
+#define __mutex_fastpath_lock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ __volatile__( \
+ LOCK " decl (%%rdi) \n" \
+ " js 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fail_fn" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
+ \
+ :"=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
+} while (0)
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count,
+ int fastcall (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return fail_fn(count);
+ else
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - increment and call function if nonpositive
+ * @v: pointer of type atomic_t
+ * @fail_fn: function to call if the result is nonpositive
+ *
+ * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
+ */
+#define __mutex_fastpath_unlock(v, fail_fn) \
+do { \
+ unsigned long dummy; \
+ \
+ typecheck(atomic_t *, v); \
+ typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
+ \
+ __asm__ __volatile__( \
+ LOCK " incl (%%rdi) \n" \
+ " jle 2f \n" \
+ "1: \n" \
+ \
+ LOCK_SECTION_START("") \
+ "2: call "#fail_fn" \n" \
+ " jmp 1b \n" \
+ LOCK_SECTION_END \
+ \
+ :"=D" (dummy) \
+ : "D" (v) \
+ : "rax", "rsi", "rdx", "rcx", \
+ "r8", "r9", "r10", "r11", "memory"); \
+} while (0)
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
+ * if it wasn't 1 originally. [the fallback function is never used on
+ * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+ return 1;
+ else
+ return 0;
+}
+
+#endif
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index d51e56fdc3d..34e434ce326 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -20,6 +20,11 @@ extern int numa_off;
extern void numa_set_node(int cpu, int node);
extern unsigned char apicid_to_node[256];
+#ifdef CONFIG_NUMA
+extern void __init init_cpu_to_node(void);
+#else
+#define init_cpu_to_node() do {} while (0)
+#endif
#define NUMA_NO_NODE 0xff
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 06e489f3247..dcbb4fcd9a1 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -14,13 +14,18 @@
#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
#define THREAD_ORDER 1
-#ifdef __ASSEMBLY__
-#define THREAD_SIZE (1 << (PAGE_SHIFT + THREAD_ORDER))
-#else
-#define THREAD_SIZE (1UL << (PAGE_SHIFT + THREAD_ORDER))
-#endif
+#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE-1))
+#define EXCEPTION_STACK_ORDER 0
+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
+
+#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER
+#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
+
+#define IRQSTACK_ORDER 2
+#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
+
#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h
index eeb3088a1c9..fd03e15d7ea 100644
--- a/include/asm-x86_64/pci.h
+++ b/include/asm-x86_64/pci.h
@@ -42,18 +42,20 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
#include <asm/scatterlist.h>
#include <linux/string.h>
#include <asm/page.h>
+#include <linux/dma-mapping.h> /* for have_iommu */
extern int iommu_setup(char *opt);
-#ifdef CONFIG_GART_IOMMU
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions
*
- * On AMD64 it mostly equals, but we set it to zero to tell some subsystems
- * that an IOMMU is available.
+ * On AMD64 it mostly equals, but we set it to zero if a hardware
+ * IOMMU (gart) of sotware IOMMU (swiotlb) is available.
*/
-#define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0)
+#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
+
+#ifdef CONFIG_GART_IOMMU
/*
* x86-64 always supports DAC, but sometimes it is useful to force
@@ -79,7 +81,6 @@ extern int iommu_sac_force;
#else
/* No IOMMU */
-#define PCI_DMA_BUS_IS_PHYS 1
#define pci_dac_dma_supported(pci_dev, mask) 1
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
index 8733ccfa442..c7ab38a601a 100644
--- a/include/asm-x86_64/pda.h
+++ b/include/asm-x86_64/pda.h
@@ -5,6 +5,7 @@
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/cache.h>
+#include <asm/page.h>
/* Per processor datastructure. %gs points to it while the kernel runs */
struct x8664_pda {
@@ -12,6 +13,9 @@ struct x8664_pda {
unsigned long data_offset; /* Per cpu data offset from linker address */
unsigned long kernelstack; /* top of kernel stack for current */
unsigned long oldrsp; /* user rsp for system call */
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ unsigned long debugstack; /* #DB/#BP stack. */
+#endif
int irqcount; /* Irq nesting counter. Starts with -1 */
int cpunumber; /* Logical CPU number */
char *irqstackptr; /* top of irqstack */
@@ -23,11 +27,10 @@ struct x8664_pda {
unsigned apic_timer_irqs;
} ____cacheline_aligned_in_smp;
+extern struct x8664_pda *_cpu_pda[];
+extern struct x8664_pda boot_cpu_pda[];
-#define IRQSTACK_ORDER 2
-#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
-
-extern struct x8664_pda cpu_pda[];
+#define cpu_pda(i) (_cpu_pda[i])
/*
* There is no fast way to get the base address of the PDA, all the accesses
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 9c71855736f..29a6b0408f7 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -11,7 +11,7 @@
#include <asm/pda.h>
-#define __per_cpu_offset(cpu) (cpu_pda[cpu].data_offset)
+#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
#define __my_cpu_offset() read_pda(data_offset)
/* Separate out the type, so (int[3], foo) works. */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index ecf58c7c165..8fbf4dd7211 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -122,6 +122,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
#define pte_same(a, b) ((a).pte == (b).pte)
+#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
+
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PUD_SIZE (1UL << PUD_SHIFT)
@@ -265,25 +267,25 @@ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
*/
#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
-extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; }
-extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
-extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
-extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
-extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
-extern inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; }
+static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
+static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; }
+static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
+static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; }
struct vm_area_struct;
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 4861246548f..87a282b1043 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -227,7 +227,13 @@ struct tss_struct {
extern struct cpuinfo_x86 boot_cpu_data;
DECLARE_PER_CPU(struct tss_struct,init_tss);
+#ifdef CONFIG_X86_VSMP
+#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
+#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
+#else
#define ARCH_MIN_TASKALIGN 16
+#define ARCH_MIN_MMSTRUCT_ALIGN 0
+#endif
struct thread_struct {
unsigned long rsp0;
@@ -273,8 +279,6 @@ struct thread_struct {
#define DEBUG_STACK 4
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
-#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-#define EXCEPTION_STACK_ORDER 0
#define start_thread(regs,new_rip,new_rsp) do { \
asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
@@ -317,8 +321,8 @@ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
extern unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) \
- (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 34501086afe..115e496c613 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -67,8 +67,6 @@ extern void load_gs_index(unsigned gs);
extern unsigned long end_pfn_map;
-extern cpumask_t cpu_initialized;
-
extern void show_trace(unsigned long * rsp);
extern void show_registers(struct pt_regs *regs);
@@ -91,8 +89,12 @@ extern void check_efer(void);
extern int unhandled_signal(struct task_struct *tsk, int sig);
+extern int unsynchronized_tsc(void);
+
extern void select_idle_routine(const struct cpuinfo_x86 *c);
-extern void swiotlb_init(void);
+
+extern void gart_parse_options(char *);
+extern void __init no_iommu_init(void);
extern unsigned long table_start, table_end;
@@ -106,12 +108,17 @@ extern int skip_ioapic_setup;
extern int acpi_ht;
extern int acpi_disabled;
+#ifdef CONFIG_GART_IOMMU
extern int fallback_aper_order;
extern int fallback_aper_force;
extern int iommu_aperture;
-extern int iommu_aperture_disabled;
extern int iommu_aperture_allowed;
+extern int iommu_aperture_disabled;
extern int fix_aperture;
+#else
+#define iommu_aperture 0
+#define iommu_aperture_allowed 0
+#endif
extern int force_iommu;
extern int reboot_force;
diff --git a/include/asm-x86_64/segment.h b/include/asm-x86_64/segment.h
index 44adaf18c11..d4bed33fb32 100644
--- a/include/asm-x86_64/segment.h
+++ b/include/asm-x86_64/segment.h
@@ -19,15 +19,13 @@
#define __USER_DS 0x2b /* 5*8+3 */
#define __USER_CS 0x33 /* 6*8+3 */
#define __USER32_DS __USER_DS
-#define __KERNEL16_CS (GDT_ENTRY_KERNELCS16 * 8)
-#define __KERNEL_COMPAT32_CS 0x8
#define GDT_ENTRY_TLS 1
#define GDT_ENTRY_TSS 8 /* needs two entries */
#define GDT_ENTRY_LDT 10 /* needs two entries */
#define GDT_ENTRY_TLS_MIN 12
#define GDT_ENTRY_TLS_MAX 14
-#define GDT_ENTRY_KERNELCS16 15
+/* 15 free */
#define GDT_ENTRY_TLS_ENTRIES 3
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index d030409a8fb..9ccbb2cfd5c 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -35,6 +35,7 @@ extern cpumask_t cpu_present_mask;
extern cpumask_t cpu_possible_map;
extern cpumask_t cpu_online_map;
extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_initialized;
/*
* Private routines/data
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index dddf1b21868..60757efd135 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -3,10 +3,14 @@
#include <linux/config.h>
+#include <asm/dma-mapping.h>
+
/* SWIOTLB interface */
-extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
- int dir);
+extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
+ size_t size, int dir);
+extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags);
extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir);
extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
@@ -34,10 +38,10 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction);
extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
-extern void *swiotlb_alloc_coherent (struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags);
extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
+extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
+extern void swiotlb_init(void);
#ifdef CONFIG_SWIOTLB
extern int swiotlb;
@@ -45,4 +49,6 @@ extern int swiotlb;
#define swiotlb 0
#endif
-#endif
+extern void pci_swiotlb_init(void);
+
+#endif /* _ASM_SWTIOLB_H */
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 85348e02ad2..0eacbefb7dd 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -20,8 +20,8 @@
#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
/* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t"
+#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
#define __EXTRA_CLOBBER \
,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
@@ -137,6 +137,21 @@ struct alt_instr {
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" :: "i" (feature), ##input)
+/* Like alternative_input, but with a single output argument */
+#define alternative_io(oldinstr, newinstr, feature, output, input...) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 8\n" \
+ " .quad 661b\n" /* label */ \
+ " .quad 663f\n" /* new instruction */ \
+ " .byte %c[feat]\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" : output : [feat] "i" (feature), ##input)
+
/*
* Clear and set 'TS' bit respectively
*/
@@ -178,6 +193,15 @@ static inline void write_cr4(unsigned long val)
#define wbinvd() \
__asm__ __volatile__ ("wbinvd": : :"memory");
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+static inline void sched_cacheflush(void)
+{
+ wbinvd();
+}
+
#endif /* __KERNEL__ */
#define nop() __asm__ __volatile__ ("nop")
@@ -311,10 +335,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
/* interrupt control.. */
#define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
#define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
+
+#ifdef CONFIG_X86_VSMP
+/* Interrupt control for VSMP architecture */
+#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0)
+#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0)
+
+#define irqs_disabled() \
+({ \
+ unsigned long flags; \
+ local_save_flags(flags); \
+ (flags & (1<<18)) || !(flags & (1<<9)); \
+})
+
+/* For spinlocks etc */
+#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0)
+#else /* CONFIG_X86_VSMP */
#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
+
/* used in the idle loop; sti takes one instruction cycle to complete */
#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
+/* used when interrupts are already enabled or to shutdown the processor */
+#define halt() __asm__ __volatile__("hlt": : :"memory")
#define irqs_disabled() \
({ \
@@ -325,16 +368,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
/* For spinlocks etc */
#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
+#endif
void cpu_idle_wait(void);
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
-void disable_hlt(void);
-void enable_hlt(void);
-
extern unsigned long arch_align_stack(unsigned long sp);
#endif
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 08eb6e4f373..4ac0e0a3693 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -76,8 +76,6 @@ static inline struct thread_info *stack_thread_info(void)
#define alloc_thread_info(tsk) \
((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#else /* !__ASSEMBLY__ */
@@ -138,6 +136,7 @@ static inline struct thread_info *stack_thread_info(void)
* have to worry about atomic accesses.
*/
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
+#define TS_COMPAT 0x0002 /* 32bit syscall active */
#endif /* __KERNEL__ */
diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h
index f971f45d6d7..f18443fcdf0 100644
--- a/include/asm-x86_64/timex.h
+++ b/include/asm-x86_64/timex.h
@@ -10,6 +10,9 @@
#include <asm/msr.h>
#include <asm/vsyscall.h>
#include <asm/hpet.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <linux/compiler.h>
#define CLOCK_TICK_RATE PIT_TICK_RATE /* Underlying HZ */
@@ -23,6 +26,19 @@ static inline cycles_t get_cycles (void)
return ret;
}
+/* Like get_cycles, but make sure the CPU is synchronized. */
+static __always_inline cycles_t get_cycles_sync(void)
+{
+ unsigned long long ret;
+ unsigned eax;
+ /* Don't do an additional sync on CPUs where we know
+ RDTSC is already synchronous. */
+ alternative_io(ASM_NOP2, "cpuid", X86_FEATURE_SYNC_RDTSC,
+ "=a" (eax), "0" (1) : "ebx","ecx","edx","memory");
+ rdtscll(ret);
+ return ret;
+}
+
extern unsigned int cpu_khz;
extern int read_current_timer(unsigned long *timer_value);
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 7d82bc56b9f..2fa7f27381b 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -39,7 +39,6 @@ extern int __node_distance(int, int);
.max_interval = 32, \
.busy_factor = 32, \
.imbalance_pct = 125, \
- .cache_hot_time = (10*1000000), \
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index 1bb8b8a2443..2892c4b7a28 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -348,6 +348,7 @@ static inline int __copy_in_user(void __user *dst, const void __user *src, unsig
long strncpy_from_user(char *dst, const char __user *src, long count);
long __strncpy_from_user(char *dst, const char __user *src, long count);
long strnlen_user(const char __user *str, long n);
+long __strnlen_user(const char __user *str, long n);
long strlen_user(const char __user *str);
unsigned long clear_user(void __user *mem, unsigned long len);
unsigned long __clear_user(void __user *mem, unsigned long len);
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 2c42150bce0..e6f896161c1 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -571,8 +571,10 @@ __SYSCALL(__NR_inotify_init, sys_inotify_init)
__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
#define __NR_inotify_rm_watch 255
__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
+#define __NR_migrate_pages 256
+__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
-#define __NR_syscall_max __NR_inotify_rm_watch
+#define __NR_syscall_max __NR_migrate_pages
#ifndef __NO_STUBS
/* user-visible error numbers are in the range -1 - -4095 */
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 438a3f52f83..a85e16f56d7 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -36,8 +36,8 @@ struct vxtime_data {
int mode;
};
-#define hpet_readl(a) readl((void *)fix_to_virt(FIX_HPET_BASE) + a)
-#define hpet_writel(d,a) writel(d, (void *)fix_to_virt(FIX_HPET_BASE) + a)
+#define hpet_readl(a) readl((const void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
+#define hpet_writel(d,a) writel(d, (void __iomem *)fix_to_virt(FIX_HPET_BASE) + a)
/* vsyscall space (readonly) */
extern struct vxtime_data __vxtime;
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index 3670cc7695d..fe105a12392 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -224,6 +224,7 @@ static inline int atomic_sub_return(int i, atomic_t * v)
#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_add_unless - add unless the number is a given value
@@ -286,6 +287,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+#include <asm-generic/atomic.h>
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */
diff --git a/include/asm-xtensa/ioctl.h b/include/asm-xtensa/ioctl.h
index 856c605d62b..b279fe06dfe 100644
--- a/include/asm-xtensa/ioctl.h
+++ b/include/asm-xtensa/ioctl.h
@@ -1,83 +1 @@
-/*
- * include/asm-xtensa/ioctl.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003 - 2005 Tensilica Inc.
- *
- * Derived from "include/asm-i386/ioctl.h"
- */
-
-#ifndef _XTENSA_IOCTL_H
-#define _XTENSA_IOCTL_H
-
-
-/* ioctl command encoding: 32 bits total, command in lower 16 bits,
- * size of the parameter structure in the lower 14 bits of the
- * upper 16 bits.
- * Encoding the size of the parameter structure in the ioctl request
- * is useful for catching programs compiled with old versions
- * and to avoid overwriting user space outside the user buffer area.
- * The highest 2 bits are reserved for indicating the ``access mode''.
- * NOTE: This limits the max parameter size to 16kB -1 !
- */
-
-/*
- * The following is for compatibility across the various Linux
- * platforms. The i386 ioctl numbering scheme doesn't really enforce
- * a type field. De facto, however, the top 8 bits of the lower 16
- * bits are indeed used as a type field, so we might just as well make
- * this explicit here. Please be sure to use the decoding macros
- * below from now on.
- */
-#define _IOC_NRBITS 8
-#define _IOC_TYPEBITS 8
-#define _IOC_SIZEBITS 14
-#define _IOC_DIRBITS 2
-
-#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
-#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
-#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
-#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
-
-#define _IOC_NRSHIFT 0
-#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
-#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
-#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
-
-/*
- * Direction bits.
- */
-#define _IOC_NONE 0U
-#define _IOC_WRITE 1U
-#define _IOC_READ 2U
-
-#define _IOC(dir,type,nr,size) \
- (((dir) << _IOC_DIRSHIFT) | \
- ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | \
- ((size) << _IOC_SIZESHIFT))
-
-/* used to create numbers */
-#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
-#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
-#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
-#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
-
-/* used to decode ioctl numbers.. */
-#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
-#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
-#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
-#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
-
-/* ...and for the drivers/sound files... */
-
-#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
-#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
-#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
-#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
-#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
-
-#endif
+#include <asm-generic/ioctl.h>
diff --git a/include/asm-xtensa/mman.h b/include/asm-xtensa/mman.h
index 9a95a45df99..082a7504925 100644
--- a/include/asm-xtensa/mman.h
+++ b/include/asm-xtensa/mman.h
@@ -72,6 +72,7 @@
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
+#define MADV_REMOVE 0x5 /* remove these pages & resources */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
diff --git a/include/asm-xtensa/mutex.h b/include/asm-xtensa/mutex.h
new file mode 100644
index 00000000000..458c1f7fbc1
--- /dev/null
+++ b/include/asm-xtensa/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h
index 9cab5e4298b..d1d72ad36f0 100644
--- a/include/asm-xtensa/processor.h
+++ b/include/asm-xtensa/processor.h
@@ -184,12 +184,12 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define release_segments(mm) do { } while(0)
#define forget_segments() do { } while (0)
-#define thread_saved_pc(tsk) (xtensa_pt_regs(tsk)->pc)
+#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
extern unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) (xtensa_pt_regs(tsk)->pc)
-#define KSTK_ESP(tsk) (xtensa_pt_regs(tsk)->areg[1])
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
#define cpu_relax() do { } while (0)
diff --git a/include/asm-xtensa/ptrace.h b/include/asm-xtensa/ptrace.h
index aa4fd7fb3ce..a5ac71a5205 100644
--- a/include/asm-xtensa/ptrace.h
+++ b/include/asm-xtensa/ptrace.h
@@ -113,8 +113,8 @@ struct pt_regs {
};
#ifdef __KERNEL__
-# define xtensa_pt_regs(tsk) ((struct pt_regs*) \
- (((long)(tsk)->thread_info + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4)) - 1)
+# define task_pt_regs(tsk) ((struct pt_regs*) \
+ (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc)
extern void show_regs(struct pt_regs *);
diff --git a/include/asm-xtensa/thread_info.h b/include/asm-xtensa/thread_info.h
index af208d41fd8..5ae34ab7159 100644
--- a/include/asm-xtensa/thread_info.h
+++ b/include/asm-xtensa/thread_info.h
@@ -93,8 +93,6 @@ static inline struct thread_info *current_thread_info(void)
/* thread information allocation */
#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
#else /* !__ASSEMBLY__ */
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index 26f6ec38577..a3dae1803f4 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -35,7 +35,6 @@ struct user_key_payload {
extern struct key_type key_type_user;
extern int user_instantiate(struct key *key, const void *data, size_t datalen);
-extern int user_duplicate(struct key *key, const struct key *source);
extern int user_update(struct key *key, const void *data, size_t datalen);
extern int user_match(const struct key *key, const void *criterion);
extern void user_destroy(struct key *key);
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 49fd37629ee..00c8efa95cc 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -94,26 +94,27 @@ struct kiocb {
ssize_t (*ki_retry)(struct kiocb *);
void (*ki_dtor)(struct kiocb *);
- struct list_head ki_list; /* the aio core uses this
- * for cancellation */
-
union {
void __user *user;
struct task_struct *tsk;
} ki_obj;
+
__u64 ki_user_data; /* user's data for completion */
+ wait_queue_t ki_wait;
loff_t ki_pos;
+
+ void *private;
/* State that we remember to be able to restart/retry */
unsigned short ki_opcode;
size_t ki_nbytes; /* copy of iocb->aio_nbytes */
char __user *ki_buf; /* remaining iocb->aio_buf */
size_t ki_left; /* remaining bytes */
- wait_queue_t ki_wait;
long ki_retried; /* just for testing */
long ki_kicked; /* just for testing */
long ki_queued; /* just for testing */
- void *private;
+ struct list_head ki_list; /* the aio core uses this
+ * for cancellation */
};
#define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY)
@@ -126,6 +127,7 @@ struct kiocb {
(x)->ki_filp = (filp); \
(x)->ki_ctx = NULL; \
(x)->ki_cancel = NULL; \
+ (x)->ki_retry = NULL; \
(x)->ki_dtor = NULL; \
(x)->ki_obj.tsk = tsk; \
(x)->ki_user_data = 0; \
diff --git a/include/asm-arm/hardware/amba.h b/include/linux/amba/bus.h
index 51e6e54b2aa..51e6e54b2aa 100644
--- a/include/asm-arm/hardware/amba.h
+++ b/include/linux/amba/bus.h
diff --git a/include/asm-arm/hardware/amba_clcd.h b/include/linux/amba/clcd.h
index 6b8d73dc1ab..6b8d73dc1ab 100644
--- a/include/asm-arm/hardware/amba_clcd.h
+++ b/include/linux/amba/clcd.h
diff --git a/include/asm-arm/hardware/amba_kmi.h b/include/linux/amba/kmi.h
index a39e5be751b..a39e5be751b 100644
--- a/include/asm-arm/hardware/amba_kmi.h
+++ b/include/linux/amba/kmi.h
diff --git a/include/asm-arm/hardware/amba_serial.h b/include/linux/amba/serial.h
index dc726ffcceb..dc726ffcceb 100644
--- a/include/asm-arm/hardware/amba_serial.h
+++ b/include/linux/amba/serial.h
diff --git a/include/linux/ata.h b/include/linux/ata.h
index d2873b732bb..94f77cce27f 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -129,6 +129,7 @@ enum {
ATA_CMD_READ_EXT = 0x25,
ATA_CMD_WRITE = 0xCA,
ATA_CMD_WRITE_EXT = 0x35,
+ ATA_CMD_WRITE_FUA_EXT = 0x3D,
ATA_CMD_PIO_READ = 0x20,
ATA_CMD_PIO_READ_EXT = 0x24,
ATA_CMD_PIO_WRITE = 0x30,
@@ -137,10 +138,13 @@ enum {
ATA_CMD_READ_MULTI_EXT = 0x29,
ATA_CMD_WRITE_MULTI = 0xC5,
ATA_CMD_WRITE_MULTI_EXT = 0x39,
+ ATA_CMD_WRITE_MULTI_FUA_EXT = 0xCE,
ATA_CMD_SET_FEATURES = 0xEF,
ATA_CMD_PACKET = 0xA0,
ATA_CMD_VERIFY = 0x40,
ATA_CMD_VERIFY_EXT = 0x42,
+ ATA_CMD_STANDBYNOW1 = 0xE0,
+ ATA_CMD_IDLEIMMEDIATE = 0xE1,
ATA_CMD_INIT_DEV_PARAMS = 0x91,
/* SETFEATURES stuff */
@@ -192,6 +196,7 @@ enum {
ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
+ ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
};
enum ata_tf_protocols {
@@ -245,7 +250,8 @@ struct ata_taskfile {
#define ata_id_is_sata(id) ((id)[93] == 0)
#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
-#define ata_id_has_flush(id) ((id)[83] & (1 << 12))
+#define ata_id_has_fua(id) ((id)[84] & (1 << 6))
+#define ata_id_has_flush(id) ((id)[83] & (1 << 12))
#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
#define ata_id_has_lba48(id) ((id)[83] & (1 << 10))
#define ata_id_has_wcache(id) ((id)[82] & (1 << 5))
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index 911c09cb9bf..6ba3aa8a81f 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -155,15 +155,15 @@ struct elapaarp {
#define AARP_REQUEST 1
#define AARP_REPLY 2
#define AARP_PROBE 3
- __u8 hw_src[ETH_ALEN] __attribute__ ((packed));
- __u8 pa_src_zero __attribute__ ((packed));
- __be16 pa_src_net __attribute__ ((packed));
- __u8 pa_src_node __attribute__ ((packed));
- __u8 hw_dst[ETH_ALEN] __attribute__ ((packed));
- __u8 pa_dst_zero __attribute__ ((packed));
- __be16 pa_dst_net __attribute__ ((packed));
- __u8 pa_dst_node __attribute__ ((packed));
-};
+ __u8 hw_src[ETH_ALEN];
+ __u8 pa_src_zero;
+ __be16 pa_src_net;
+ __u8 pa_src_node;
+ __u8 hw_dst[ETH_ALEN];
+ __u8 pa_dst_zero;
+ __be16 pa_dst_net;
+ __u8 pa_dst_node;
+} __attribute__ ((packed));
static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
{
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 9a7b374c9fb..d2bc0d66e65 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -26,6 +26,6 @@
#define AT_SECURE 23 /* secure mode boolean */
-#define AT_VECTOR_SIZE 42 /* Size of auxiliary table. */
+#define AT_VECTOR_SIZE 44 /* Size of auxiliary table. */
#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index a18500d196e..02a585faa62 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -102,7 +102,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);
struct request;
-typedef void (rq_end_io_fn)(struct request *);
+typedef void (rq_end_io_fn)(struct request *, int);
struct request_list {
int count[2];
@@ -118,9 +118,9 @@ struct request_list {
* try to put the fields that are referenced together in the same cacheline
*/
struct request {
- struct list_head queuelist; /* looking for ->queue? you must _not_
- * access it directly, use
- * blkdev_dequeue_request! */
+ struct list_head queuelist;
+ struct list_head donelist;
+
unsigned long flags; /* see REQ_ bits below */
/* Maintain bio traversal state for part by part I/O submission.
@@ -141,6 +141,7 @@ struct request {
struct bio *biotail;
void *elevator_private;
+ void *completion_data;
unsigned short ioprio;
@@ -207,6 +208,7 @@ enum rq_flag_bits {
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
+ __REQ_FUA, /* forced unit access */
__REQ_CMD, /* is a regular fs rw request */
__REQ_NOMERGE, /* don't touch this for merging */
__REQ_STARTED, /* drive already may have started this one */
@@ -230,9 +232,7 @@ enum rq_flag_bits {
__REQ_PM_SUSPEND, /* suspend request */
__REQ_PM_RESUME, /* resume request */
__REQ_PM_SHUTDOWN, /* shutdown request */
- __REQ_BAR_PREFLUSH, /* barrier pre-flush done */
- __REQ_BAR_POSTFLUSH, /* barrier post-flush */
- __REQ_BAR_FLUSH, /* rq is the flush request */
+ __REQ_ORDERED_COLOR, /* is before or after barrier */
__REQ_NR_BITS, /* stops here */
};
@@ -241,6 +241,7 @@ enum rq_flag_bits {
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
+#define REQ_FUA (1 << __REQ_FUA)
#define REQ_CMD (1 << __REQ_CMD)
#define REQ_NOMERGE (1 << __REQ_NOMERGE)
#define REQ_STARTED (1 << __REQ_STARTED)
@@ -260,9 +261,7 @@ enum rq_flag_bits {
#define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND)
#define REQ_PM_RESUME (1 << __REQ_PM_RESUME)
#define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN)
-#define REQ_BAR_PREFLUSH (1 << __REQ_BAR_PREFLUSH)
-#define REQ_BAR_POSTFLUSH (1 << __REQ_BAR_POSTFLUSH)
-#define REQ_BAR_FLUSH (1 << __REQ_BAR_FLUSH)
+#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
/*
* State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME
@@ -292,8 +291,8 @@ struct bio_vec;
typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
typedef void (activity_fn) (void *data, int rw);
typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
-typedef int (prepare_flush_fn) (request_queue_t *, struct request *);
-typedef void (end_flush_fn) (request_queue_t *, struct request *);
+typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
+typedef void (softirq_done_fn)(struct request *);
enum blk_queue_state {
Queue_down,
@@ -335,7 +334,7 @@ struct request_queue
activity_fn *activity_fn;
issue_flush_fn *issue_flush_fn;
prepare_flush_fn *prepare_flush_fn;
- end_flush_fn *end_flush_fn;
+ softirq_done_fn *softirq_done_fn;
/*
* Dispatch queue sorting
@@ -420,14 +419,11 @@ struct request_queue
/*
* reserved for flush operations
*/
- struct request *flush_rq;
- unsigned char ordered;
-};
-
-enum {
- QUEUE_ORDERED_NONE,
- QUEUE_ORDERED_TAG,
- QUEUE_ORDERED_FLUSH,
+ unsigned int ordered, next_ordered, ordseq;
+ int orderr, ordcolor;
+ struct request pre_flush_rq, bar_rq, post_flush_rq;
+ struct request *orig_bar_rq;
+ unsigned int bi_size;
};
#define RQ_INACTIVE (-1)
@@ -445,12 +441,51 @@ enum {
#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */
-#define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */
+
+enum {
+ /*
+ * Hardbarrier is supported with one of the following methods.
+ *
+ * NONE : hardbarrier unsupported
+ * DRAIN : ordering by draining is enough
+ * DRAIN_FLUSH : ordering by draining w/ pre and post flushes
+ * DRAIN_FUA : ordering by draining w/ pre flush and FUA write
+ * TAG : ordering by tag is enough
+ * TAG_FLUSH : ordering by tag w/ pre and post flushes
+ * TAG_FUA : ordering by tag w/ pre flush and FUA write
+ */
+ QUEUE_ORDERED_NONE = 0x00,
+ QUEUE_ORDERED_DRAIN = 0x01,
+ QUEUE_ORDERED_TAG = 0x02,
+
+ QUEUE_ORDERED_PREFLUSH = 0x10,
+ QUEUE_ORDERED_POSTFLUSH = 0x20,
+ QUEUE_ORDERED_FUA = 0x40,
+
+ QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
+ QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
+ QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
+ QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
+ QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
+ QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
+ QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
+ QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
+
+ /*
+ * Ordered operation sequence
+ */
+ QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */
+ QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */
+ QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */
+ QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */
+ QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */
+ QUEUE_ORDSEQ_DONE = 0x20,
+};
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
-#define blk_queue_flushing(q) test_bit(QUEUE_FLAG_FLUSH, &(q)->queue_flags)
+#define blk_queue_flushing(q) ((q)->ordseq)
#define blk_fs_request(rq) ((rq)->flags & REQ_CMD)
#define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC)
@@ -466,8 +501,7 @@ enum {
#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER)
-#define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH)
-#define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH)
+#define blk_fua_rq(rq) ((rq)->flags & REQ_FUA)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
@@ -560,8 +594,7 @@ extern void register_disk(struct gendisk *dev);
extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(request_queue_t *, struct request *);
-extern void blk_end_sync_rq(struct request *rq);
-extern void blk_attempt_remerge(request_queue_t *, struct request *);
+extern void blk_end_sync_rq(struct request *rq, int error);
extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
extern void blk_requeue_request(request_queue_t *, struct request *);
@@ -582,8 +615,7 @@ extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_io
extern int blk_execute_rq(request_queue_t *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
- struct request *, int,
- void (*done)(struct request *));
+ struct request *, int, rq_end_io_fn *);
static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
{
@@ -614,8 +646,19 @@ static inline void blk_run_address_space(struct address_space *mapping)
*/
extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int);
-extern void end_that_request_last(struct request *);
+extern void end_that_request_last(struct request *, int);
extern void end_request(struct request *req, int uptodate);
+extern void blk_complete_request(struct request *);
+
+static inline int rq_all_done(struct request *rq, unsigned int nr_bytes)
+{
+ if (blk_fs_request(rq))
+ return (nr_bytes >= (rq->hard_nr_sectors << 9));
+ else if (blk_pc_request(rq))
+ return nr_bytes >= rq->data_len;
+
+ return 0;
+}
/*
* end_that_request_first/chunk() takes an uptodate argument. we account
@@ -664,12 +707,14 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int);
+extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
-extern void blk_queue_ordered(request_queue_t *, int);
+extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
-extern struct request *blk_start_pre_flush(request_queue_t *,struct request *);
-extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int);
-extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int);
+extern int blk_do_ordered(request_queue_t *, struct request **);
+extern unsigned blk_ordered_cur_seq(request_queue_t *);
+extern unsigned blk_ordered_req_seq(struct request *);
+extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 3b03b0b868d..993da8cc970 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -43,50 +43,38 @@ typedef struct bootmem_data {
extern unsigned long __init bootmem_bootmap_pages (unsigned long);
extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend);
extern void __init free_bootmem (unsigned long addr, unsigned long size);
-extern void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal, unsigned long limit);
+extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal);
+extern void * __init __alloc_bootmem_low(unsigned long size,
+ unsigned long align,
+ unsigned long goal);
+extern void * __init __alloc_bootmem_low_node(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal);
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
extern void __init reserve_bootmem (unsigned long addr, unsigned long size);
#define alloc_bootmem(x) \
__alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low(x) \
- __alloc_bootmem((x), SMP_CACHE_BYTES, 0)
+ __alloc_bootmem_low((x), SMP_CACHE_BYTES, 0)
#define alloc_bootmem_pages(x) \
__alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages(x) \
- __alloc_bootmem((x), PAGE_SIZE, 0)
-
-#define alloc_bootmem_limit(x, limit) \
- __alloc_bootmem_limit((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit))
-#define alloc_bootmem_low_limit(x, limit) \
- __alloc_bootmem_limit((x), SMP_CACHE_BYTES, 0, (limit))
-#define alloc_bootmem_pages_limit(x, limit) \
- __alloc_bootmem_limit((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit))
-#define alloc_bootmem_low_pages_limit(x, limit) \
- __alloc_bootmem_limit((x), PAGE_SIZE, 0, (limit))
-
+ __alloc_bootmem_low((x), PAGE_SIZE, 0)
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
extern unsigned long __init free_all_bootmem (void);
-
+extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal);
extern unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long freepfn, unsigned long startpfn, unsigned long endpfn);
extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size);
extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size);
extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat);
-extern void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit);
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
#define alloc_bootmem_node(pgdat, x) \
__alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages_node(pgdat, x) \
__alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages_node(pgdat, x) \
- __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0)
-
-#define alloc_bootmem_node_limit(pgdat, x, limit) \
- __alloc_bootmem_node_limit((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit))
-#define alloc_bootmem_pages_node_limit(pgdat, x, limit) \
- __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit))
-#define alloc_bootmem_low_pages_node_limit(pgdat, x, limit) \
- __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, 0, (limit))
-
+ __alloc_bootmem_low_node((pgdat), (x), PAGE_SIZE, 0)
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
@@ -123,15 +111,5 @@ extern void *__init alloc_large_system_hash(const char *tablename,
#endif
extern int __initdata hashdist; /* Distribute hashes across NUMA nodes? */
-static inline void *__alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal)
-{
- return __alloc_bootmem_limit(size, align, goal, 0);
-}
-
-static inline void *__alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align,
- unsigned long goal)
-{
- return __alloc_bootmem_node_limit(pgdat, size, align, goal, 0);
-}
#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 1db061bb6b0..9f159baf153 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -197,7 +197,8 @@ int block_read_full_page(struct page*, get_block_t*);
int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
loff_t *);
-int generic_cont_expand(struct inode *inode, loff_t size) ;
+int generic_cont_expand(struct inode *inode, loff_t size);
+int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_sync_page(struct page *);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 04bd756efc6..e86e4a93837 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -156,7 +156,7 @@ extern __be32 htonl(__u32);
extern __u16 ntohs(__be16);
extern __be16 htons(__u16);
-#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
+#if defined(__GNUC__) && defined(__OPTIMIZE__)
#define ___htonl(x) __cpu_to_be32(x)
#define ___htons(x) __cpu_to_be16(x)
diff --git a/include/linux/byteorder/swab.h b/include/linux/byteorder/swab.h
index 2f1cb775125..25f7f32883e 100644
--- a/include/linux/byteorder/swab.h
+++ b/include/linux/byteorder/swab.h
@@ -110,7 +110,7 @@
/*
* Allow constant folding
*/
-#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
+#if defined(__GNUC__) && defined(__OPTIMIZE__)
# define __swab16(x) \
(__builtin_constant_p((__u16)(x)) ? \
___swab16((x)) : \
diff --git a/include/linux/byteorder/swabb.h b/include/linux/byteorder/swabb.h
index d5f2a320510..ae5e5f914bf 100644
--- a/include/linux/byteorder/swabb.h
+++ b/include/linux/byteorder/swabb.h
@@ -77,7 +77,7 @@
/*
* Allow constant folding
*/
-#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
+#if defined(__GNUC__) && defined(__OPTIMIZE__)
# define __swahw32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___swahw32((x)) : \
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 0b7ecf3af78..d22e632f41f 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -13,7 +13,7 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
-#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64)
+#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
#else
#define __read_mostly
@@ -45,12 +45,21 @@
#endif /* CONFIG_SMP */
#endif
-#if !defined(____cacheline_maxaligned_in_smp)
+/*
+ * The maximum alignment needed for some critical structures
+ * These could be inter-node cacheline sizes/L3 cacheline
+ * size etc. Define this in asm/cache.h for your arch
+ */
+#ifndef INTERNODE_CACHE_SHIFT
+#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
+#endif
+
+#if !defined(____cacheline_internodealigned_in_smp)
#if defined(CONFIG_SMP)
-#define ____cacheline_maxaligned_in_smp \
- __attribute__((__aligned__(1 << (L1_CACHE_SHIFT_MAX))))
+#define ____cacheline_internodealigned_in_smp \
+ __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
#else
-#define ____cacheline_maxaligned_in_smp
+#define ____cacheline_internodealigned_in_smp
#endif
#endif
diff --git a/include/linux/calc64.h b/include/linux/calc64.h
new file mode 100644
index 00000000000..ebf4b8f38d8
--- /dev/null
+++ b/include/linux/calc64.h
@@ -0,0 +1,49 @@
+#ifndef _LINUX_CALC64_H
+#define _LINUX_CALC64_H
+
+#include <linux/types.h>
+#include <asm/div64.h>
+
+/*
+ * This is a generic macro which is used when the architecture
+ * specific div64.h does not provide a optimized one.
+ *
+ * The 64bit dividend is divided by the divisor (data type long), the
+ * result is returned and the remainder stored in the variable
+ * referenced by remainder (data type long *). In contrast to the
+ * do_div macro the dividend is kept intact.
+ */
+#ifndef div_long_long_rem
+#define div_long_long_rem(dividend, divisor, remainder) \
+ do_div_llr((dividend), divisor, remainder)
+
+static inline unsigned long do_div_llr(const long long dividend,
+ const long divisor, long *remainder)
+{
+ u64 result = dividend;
+
+ *(remainder) = do_div(result, divisor);
+ return (unsigned long) result;
+}
+#endif
+
+/*
+ * Sign aware variation of the above. On some architectures a
+ * negative dividend leads to an divide overflow exception, which
+ * is avoided by the sign check.
+ */
+static inline long div_long_long_rem_signed(const long long dividend,
+ const long divisor, long *remainder)
+{
+ long res;
+
+ if (unlikely(dividend < 0)) {
+ res = -div_long_long_rem(-dividend, divisor, remainder);
+ *remainder = -(*remainder);
+ } else
+ res = div_long_long_rem(dividend, divisor, remainder);
+
+ return res;
+}
+
+#endif
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 6b4618902d3..5a23ce75262 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -43,6 +43,7 @@ typedef struct __user_cap_data_struct {
#ifdef __KERNEL__
#include <linux/spinlock.h>
+#include <asm/current.h>
/* #define STRICT_CAP_T_TYPECHECKS */
@@ -356,6 +357,8 @@ static inline kernel_cap_t cap_invert(kernel_cap_t c)
#define cap_is_fs_cap(c) (CAP_TO_MASK(c) & CAP_FS_MASK)
+extern int capable(int cap);
+
#endif /* __KERNEL__ */
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/asm-arm/hardware/clock.h b/include/linux/clk.h
index 19da861e523..12848f81bb3 100644
--- a/include/asm-arm/hardware/clock.h
+++ b/include/linux/clk.h
@@ -1,5 +1,5 @@
/*
- * linux/include/asm-arm/hardware/clock.h
+ * linux/include/linux/clk.h
*
* Copyright (C) 2004 ARM Limited.
* Written by Deep Blue Solutions Limited.
@@ -33,6 +33,8 @@ struct clk;
* uses @dev and @id to determine the clock consumer, and thereby
* the clock producer. (IOW, @id may be identical strings, but
* clk_get may return different clock producers depending on @dev.)
+ *
+ * Drivers must assume that the clock source is not enabled.
*/
struct clk *clk_get(struct device *dev, const char *id);
@@ -49,22 +51,16 @@ int clk_enable(struct clk *clk);
/**
* clk_disable - inform the system when the clock source is no longer required.
* @clk: clock source
- */
-void clk_disable(struct clk *clk);
-
-/**
- * clk_use - increment the use count
- * @clk: clock source
*
- * Returns success (0) or negative errno.
- */
-int clk_use(struct clk *clk);
-
-/**
- * clk_unuse - decrement the use count
- * @clk: clock source
+ * Inform the system that a clock source is no longer required by
+ * a driver and may be shut down.
+ *
+ * Implementation detail: if the clock source is shared between
+ * multiple drivers, clk_enable() calls must be balanced by the
+ * same number of clk_disable() calls for the clock source to be
+ * disabled.
*/
-void clk_unuse(struct clk *clk);
+void clk_disable(struct clk *clk);
/**
* clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
@@ -76,6 +72,10 @@ unsigned long clk_get_rate(struct clk *clk);
/**
* clk_put - "free" the clock source
* @clk: clock source
+ *
+ * Note: drivers must ensure that all clk_enable calls made on this
+ * clock source are balanced by clk_disable calls prior to calling
+ * this function.
*/
void clk_put(struct clk *clk);
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 119f9d064cc..8fad50f8e38 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -2,14 +2,6 @@
* compatible types passed or none at all... Please include
* only stuff that is compatible on *all architectures*.
*/
-#ifndef COMPATIBLE_IOCTL /* pointer to compatible structure or no argument */
-#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)sys_ioctl)
-#endif
-
-#ifndef ULONG_IOCTL /* argument is an unsigned long integer, not a pointer */
-#define ULONG_IOCTL(cmd) HANDLE_IOCTL((cmd),(ioctl_trans_handler_t)sys_ioctl)
-#endif
-
COMPATIBLE_IOCTL(0x4B50) /* KDGHWCLK - not in the kernel, but don't complain */
COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain */
@@ -218,32 +210,6 @@ COMPATIBLE_IOCTL(VT_RESIZE)
COMPATIBLE_IOCTL(VT_RESIZEX)
COMPATIBLE_IOCTL(VT_LOCKSWITCH)
COMPATIBLE_IOCTL(VT_UNLOCKSWITCH)
-/* Little v */
-/* Little v, the video4linux ioctls (conflict?) */
-COMPATIBLE_IOCTL(VIDIOCGCAP)
-COMPATIBLE_IOCTL(VIDIOCGCHAN)
-COMPATIBLE_IOCTL(VIDIOCSCHAN)
-COMPATIBLE_IOCTL(VIDIOCGPICT)
-COMPATIBLE_IOCTL(VIDIOCSPICT)
-COMPATIBLE_IOCTL(VIDIOCCAPTURE)
-COMPATIBLE_IOCTL(VIDIOCKEY)
-COMPATIBLE_IOCTL(VIDIOCGAUDIO)
-COMPATIBLE_IOCTL(VIDIOCSAUDIO)
-COMPATIBLE_IOCTL(VIDIOCSYNC)
-COMPATIBLE_IOCTL(VIDIOCMCAPTURE)
-COMPATIBLE_IOCTL(VIDIOCGMBUF)
-COMPATIBLE_IOCTL(VIDIOCGUNIT)
-COMPATIBLE_IOCTL(VIDIOCGCAPTURE)
-COMPATIBLE_IOCTL(VIDIOCSCAPTURE)
-/* BTTV specific... */
-COMPATIBLE_IOCTL(_IOW('v', BASE_VIDIOCPRIVATE+0, char [256]))
-COMPATIBLE_IOCTL(_IOR('v', BASE_VIDIOCPRIVATE+1, char [256]))
-COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+2, unsigned int))
-COMPATIBLE_IOCTL(_IOW('v' , BASE_VIDIOCPRIVATE+3, char [16])) /* struct bttv_pll_info */
-COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+4, int))
-COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+5, int))
-COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+6, int))
-COMPATIBLE_IOCTL(_IOR('v' , BASE_VIDIOCPRIVATE+7, int))
/* Little p (/dev/rtc, /dev/envctrl, etc.) */
COMPATIBLE_IOCTL(RTC_AIE_ON)
COMPATIBLE_IOCTL(RTC_AIE_OFF)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 15273405540..6e1c44a935d 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -11,7 +11,22 @@
/* This macro obfuscates arithmetic on a variable address so that gcc
shouldn't recognize the original var, and make assumptions about it */
+/*
+ * Versions of the ppc64 compiler before 4.1 had a bug where use of
+ * RELOC_HIDE could trash r30. The bug can be worked around by changing
+ * the inline assembly constraint from =g to =r, in this particular
+ * case either is valid.
+ */
#define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
- __asm__ ("" : "=g"(__ptr) : "0"(ptr)); \
+ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); })
+
+
+#define inline inline __attribute__((always_inline))
+#define __inline__ __inline__ __attribute__((always_inline))
+#define __inline __inline __attribute__((always_inline))
+#define __deprecated __attribute__((deprecated))
+#define noinline __attribute__((noinline))
+#define __attribute_pure__ __attribute__((pure))
+#define __attribute_const__ __attribute__((__const__))
diff --git a/include/linux/compiler-gcc2.h b/include/linux/compiler-gcc2.h
deleted file mode 100644
index ebed17660c5..00000000000
--- a/include/linux/compiler-gcc2.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Never include this file directly. Include <linux/compiler.h> instead. */
-
-/* These definitions are for GCC v2.x. */
-
-/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
- a mechanism by which the user can annotate likely branch directions and
- expect the blocks to be reordered appropriately. Define __builtin_expect
- to nothing for earlier compilers. */
-#include <linux/compiler-gcc.h>
-
-#if __GNUC_MINOR__ < 96
-# define __builtin_expect(x, expected_value) (x)
-#endif
-
-#define __attribute_used__ __attribute__((__unused__))
-
-/*
- * The attribute `pure' is not implemented in GCC versions earlier
- * than 2.96.
- */
-#if __GNUC_MINOR__ >= 96
-# define __attribute_pure__ __attribute__((pure))
-# define __attribute_const__ __attribute__((__const__))
-#endif
-
-/* GCC 2.95.x/2.96 recognize __va_copy, but not va_copy. Actually later GCC's
- * define both va_copy and __va_copy, but the latter may go away, so limit this
- * to this header */
-#define va_copy __va_copy
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
index a6fa615afab..4209082ee93 100644
--- a/include/linux/compiler-gcc3.h
+++ b/include/linux/compiler-gcc3.h
@@ -3,29 +3,12 @@
/* These definitions are for GCC v3.x. */
#include <linux/compiler-gcc.h>
-#if __GNUC_MINOR__ >= 1
-# define inline inline __attribute__((always_inline))
-# define __inline__ __inline__ __attribute__((always_inline))
-# define __inline __inline __attribute__((always_inline))
-#endif
-
-#if __GNUC_MINOR__ > 0
-# define __deprecated __attribute__((deprecated))
-#endif
-
#if __GNUC_MINOR__ >= 3
# define __attribute_used__ __attribute__((__used__))
#else
# define __attribute_used__ __attribute__((__unused__))
#endif
-#define __attribute_pure__ __attribute__((pure))
-#define __attribute_const__ __attribute__((__const__))
-
-#if __GNUC_MINOR__ >= 1
-#define noinline __attribute__((noinline))
-#endif
-
#if __GNUC_MINOR__ >= 4
#define __must_check __attribute__((warn_unused_result))
#endif
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 53686c037a0..e913e9beaf6 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -3,14 +3,7 @@
/* These definitions are for GCC v4.x. */
#include <linux/compiler-gcc.h>
-#define inline inline __attribute__((always_inline))
-#define __inline__ __inline__ __attribute__((always_inline))
-#define __inline __inline __attribute__((always_inline))
-#define __deprecated __attribute__((deprecated))
#define __attribute_used__ __attribute__((__used__))
-#define __attribute_pure__ __attribute__((pure))
-#define __attribute_const__ __attribute__((__const__))
-#define noinline __attribute__((noinline))
#define __must_check __attribute__((warn_unused_result))
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d7378215b85..f23d3c6fc2c 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -42,8 +42,6 @@ extern void __chk_io_ptr(void __iomem *);
# include <linux/compiler-gcc4.h>
#elif __GNUC__ == 3
# include <linux/compiler-gcc3.h>
-#elif __GNUC__ == 2
-# include <linux/compiler-gcc2.h>
#else
# error Sorry, your compiler is too old/not recognized.
#endif
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
new file mode 100644
index 00000000000..acffb8c9073
--- /dev/null
+++ b/include/linux/configfs.h
@@ -0,0 +1,205 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * configfs.h - definitions for the device driver filesystem
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * Based on kobject.h:
+ * Copyright (c) 2002-2003 Patrick Mochel
+ * Copyright (c) 2002-2003 Open Source Development Labs
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * Please read Documentation/filesystems/configfs.txt before using the
+ * configfs interface, ESPECIALLY the parts about reference counts and
+ * item destructors.
+ */
+
+#ifndef _CONFIGFS_H_
+#define _CONFIGFS_H_
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+
+#define CONFIGFS_ITEM_NAME_LEN 20
+
+struct module;
+
+struct configfs_item_operations;
+struct configfs_group_operations;
+struct configfs_attribute;
+struct configfs_subsystem;
+
+struct config_item {
+ char *ci_name;
+ char ci_namebuf[CONFIGFS_ITEM_NAME_LEN];
+ struct kref ci_kref;
+ struct list_head ci_entry;
+ struct config_item *ci_parent;
+ struct config_group *ci_group;
+ struct config_item_type *ci_type;
+ struct dentry *ci_dentry;
+};
+
+extern int config_item_set_name(struct config_item *, const char *, ...);
+
+static inline char *config_item_name(struct config_item * item)
+{
+ return item->ci_name;
+}
+
+extern void config_item_init(struct config_item *);
+extern void config_item_init_type_name(struct config_item *item,
+ const char *name,
+ struct config_item_type *type);
+extern void config_item_cleanup(struct config_item *);
+
+extern struct config_item * config_item_get(struct config_item *);
+extern void config_item_put(struct config_item *);
+
+struct config_item_type {
+ struct module *ct_owner;
+ struct configfs_item_operations *ct_item_ops;
+ struct configfs_group_operations *ct_group_ops;
+ struct configfs_attribute **ct_attrs;
+};
+
+
+/**
+ * group - a group of config_items of a specific type, belonging
+ * to a specific subsystem.
+ */
+
+struct config_group {
+ struct config_item cg_item;
+ struct list_head cg_children;
+ struct configfs_subsystem *cg_subsys;
+ struct config_group **default_groups;
+};
+
+
+extern void config_group_init(struct config_group *group);
+extern void config_group_init_type_name(struct config_group *group,
+ const char *name,
+ struct config_item_type *type);
+
+
+static inline struct config_group *to_config_group(struct config_item *item)
+{
+ return item ? container_of(item,struct config_group,cg_item) : NULL;
+}
+
+static inline struct config_group *config_group_get(struct config_group *group)
+{
+ return group ? to_config_group(config_item_get(&group->cg_item)) : NULL;
+}
+
+static inline void config_group_put(struct config_group *group)
+{
+ config_item_put(&group->cg_item);
+}
+
+extern struct config_item *config_group_find_obj(struct config_group *, const char *);
+
+
+struct configfs_attribute {
+ char *ca_name;
+ struct module *ca_owner;
+ mode_t ca_mode;
+};
+
+
+/*
+ * If allow_link() exists, the item can symlink(2) out to other
+ * items. If the item is a group, it may support mkdir(2).
+ * Groups supply one of make_group() and make_item(). If the
+ * group supports make_group(), one can create group children. If it
+ * supports make_item(), one can create config_item children. If it has
+ * default_groups on group->default_groups, it has automatically created
+ * group children. default_groups may coexist alongsize make_group() or
+ * make_item(), but if the group wishes to have only default_groups
+ * children (disallowing mkdir(2)), it need not provide either function.
+ * If the group has commit(), it supports pending and commited (active)
+ * items.
+ */
+struct configfs_item_operations {
+ void (*release)(struct config_item *);
+ ssize_t (*show_attribute)(struct config_item *, struct configfs_attribute *,char *);
+ ssize_t (*store_attribute)(struct config_item *,struct configfs_attribute *,const char *, size_t);
+ int (*allow_link)(struct config_item *src, struct config_item *target);
+ int (*drop_link)(struct config_item *src, struct config_item *target);
+};
+
+struct configfs_group_operations {
+ struct config_item *(*make_item)(struct config_group *group, const char *name);
+ struct config_group *(*make_group)(struct config_group *group, const char *name);
+ int (*commit_item)(struct config_item *item);
+ void (*drop_item)(struct config_group *group, struct config_item *item);
+};
+
+
+
+/**
+ * Use these macros to make defining attributes easier. See include/linux/device.h
+ * for examples..
+ */
+
+#if 0
+#define __ATTR(_name,_mode,_show,_store) { \
+ .attr = {.ca_name = __stringify(_name), .ca_mode = _mode, .ca_owner = THIS_MODULE }, \
+ .show = _show, \
+ .store = _store, \
+}
+
+#define __ATTR_RO(_name) { \
+ .attr = { .ca_name = __stringify(_name), .ca_mode = 0444, .ca_owner = THIS_MODULE }, \
+ .show = _name##_show, \
+}
+
+#define __ATTR_NULL { .attr = { .name = NULL } }
+
+#define attr_name(_attr) (_attr).attr.name
+#endif
+
+
+struct configfs_subsystem {
+ struct config_group su_group;
+ struct semaphore su_sem;
+};
+
+static inline struct configfs_subsystem *to_configfs_subsystem(struct config_group *group)
+{
+ return group ?
+ container_of(group, struct configfs_subsystem, su_group) :
+ NULL;
+}
+
+int configfs_register_subsystem(struct configfs_subsystem *subsys);
+void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
+
+#endif /* __KERNEL__ */
+
+#endif /* _CONFIGFS_H_ */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 6e2deef96b3..c472f972bd6 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -14,22 +14,43 @@
#ifdef CONFIG_CPUSETS
+extern int number_of_cpusets; /* How many cpusets are defined in system? */
+
+extern int cpuset_init_early(void);
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_fork(struct task_struct *p);
extern void cpuset_exit(struct task_struct *p);
-extern cpumask_t cpuset_cpus_allowed(const struct task_struct *p);
+extern cpumask_t cpuset_cpus_allowed(struct task_struct *p);
+extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
void cpuset_init_current_mems_allowed(void);
-void cpuset_update_current_mems_allowed(void);
-void cpuset_restrict_to_mems_allowed(unsigned long *nodes);
+void cpuset_update_task_memory_state(void);
+#define cpuset_nodes_subset_current_mems_allowed(nodes) \
+ nodes_subset((nodes), current->mems_allowed)
int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
-extern int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask);
+
+extern int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask);
+static int inline cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+{
+ return number_of_cpusets <= 1 || __cpuset_zone_allowed(z, gfp_mask);
+}
+
extern int cpuset_excl_nodes_overlap(const struct task_struct *p);
+
+#define cpuset_memory_pressure_bump() \
+ do { \
+ if (cpuset_memory_pressure_enabled) \
+ __cpuset_memory_pressure_bump(); \
+ } while (0)
+extern int cpuset_memory_pressure_enabled;
+extern void __cpuset_memory_pressure_bump(void);
+
extern struct file_operations proc_cpuset_operations;
extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer);
#else /* !CONFIG_CPUSETS */
+static inline int cpuset_init_early(void) { return 0; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
static inline void cpuset_fork(struct task_struct *p) {}
@@ -40,9 +61,14 @@ static inline cpumask_t cpuset_cpus_allowed(struct task_struct *p)
return cpu_possible_map;
}
+static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
+{
+ return node_possible_map;
+}
+
static inline void cpuset_init_current_mems_allowed(void) {}
-static inline void cpuset_update_current_mems_allowed(void) {}
-static inline void cpuset_restrict_to_mems_allowed(unsigned long *nodes) {}
+static inline void cpuset_update_task_memory_state(void) {}
+#define cpuset_nodes_subset_current_mems_allowed(nodes) (1)
static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
{
@@ -59,6 +85,8 @@ static inline int cpuset_excl_nodes_overlap(const struct task_struct *p)
return 1;
}
+static inline void cpuset_memory_pressure_bump(void) {}
+
static inline char *cpuset_task_status_allowed(struct task_struct *task,
char *buffer)
{
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 3c89df6e776..d88bf8aa8b4 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -3,6 +3,7 @@
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
*
* Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
* and Nettle, by Niels Möller.
@@ -126,7 +127,11 @@ struct crypto_alg {
unsigned int cra_blocksize;
unsigned int cra_ctxsize;
unsigned int cra_alignmask;
+
+ int cra_priority;
+
const char cra_name[CRYPTO_MAX_ALG_NAME];
+ const char cra_driver_name[CRYPTO_MAX_ALG_NAME];
union {
struct cipher_alg cipher;
diff --git a/include/linux/cycx_x25.h b/include/linux/cycx_x25.h
index b10a7f3a8ca..f7a90658346 100644
--- a/include/linux/cycx_x25.h
+++ b/include/linux/cycx_x25.h
@@ -38,11 +38,11 @@ extern unsigned int cycx_debug;
/* Data Structures */
/* X.25 Command Block. */
struct cycx_x25_cmd {
- u16 command PACKED;
- u16 link PACKED; /* values: 0 or 1 */
- u16 len PACKED; /* values: 0 thru 0x205 (517) */
- u32 buf PACKED;
-};
+ u16 command;
+ u16 link; /* values: 0 or 1 */
+ u16 len; /* values: 0 thru 0x205 (517) */
+ u32 buf;
+} PACKED;
/* Defines for the 'command' field. */
#define X25_CONNECT_REQUEST 0x4401
@@ -92,34 +92,34 @@ struct cycx_x25_cmd {
* @flags - see dosx25.doc, in portuguese, for details
*/
struct cycx_x25_config {
- u8 link PACKED;
- u8 speed PACKED;
- u8 clock PACKED;
- u8 n2 PACKED;
- u8 n2win PACKED;
- u8 n3win PACKED;
- u8 nvc PACKED;
- u8 pktlen PACKED;
- u8 locaddr PACKED;
- u8 remaddr PACKED;
- u16 t1 PACKED;
- u16 t2 PACKED;
- u8 t21 PACKED;
- u8 npvc PACKED;
- u8 t23 PACKED;
- u8 flags PACKED;
-};
+ u8 link;
+ u8 speed;
+ u8 clock;
+ u8 n2;
+ u8 n2win;
+ u8 n3win;
+ u8 nvc;
+ u8 pktlen;
+ u8 locaddr;
+ u8 remaddr;
+ u16 t1;
+ u16 t2;
+ u8 t21;
+ u8 npvc;
+ u8 t23;
+ u8 flags;
+} PACKED;
struct cycx_x25_stats {
- u16 rx_crc_errors PACKED;
- u16 rx_over_errors PACKED;
- u16 n2_tx_frames PACKED;
- u16 n2_rx_frames PACKED;
- u16 tx_timeouts PACKED;
- u16 rx_timeouts PACKED;
- u16 n3_tx_packets PACKED;
- u16 n3_rx_packets PACKED;
- u16 tx_aborts PACKED;
- u16 rx_aborts PACKED;
-};
+ u16 rx_crc_errors;
+ u16 rx_over_errors;
+ u16 n2_tx_frames;
+ u16 n2_rx_frames;
+ u16 tx_timeouts;
+ u16 rx_timeouts;
+ u16 n3_tx_packets;
+ u16 n3_rx_packets;
+ u16 tx_aborts;
+ u16 rx_aborts;
+} PACKED;
#endif /* _CYCX_X25_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 46a2ba61759..a3ed5e059d4 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -95,14 +95,19 @@ struct dentry {
struct qstr d_name;
struct list_head d_lru; /* LRU list */
- struct list_head d_child; /* child of parent list */
+ /*
+ * d_child and d_rcu can share memory
+ */
+ union {
+ struct list_head d_child; /* child of parent list */
+ struct rcu_head d_rcu;
+ } d_u;
struct list_head d_subdirs; /* our children */
struct list_head d_alias; /* inode alias list */
unsigned long d_time; /* used by d_revalidate */
struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
void *d_fsdata; /* fs-specific data */
- struct rcu_head d_rcu;
struct dcookie_struct *d_cookie; /* cookie, if any */
int d_mounted;
unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index f5eb6b6cd10..fa75ba0d635 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -272,9 +272,9 @@ typedef char ioctl_struct[308];
#define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 4
+#define DM_VERSION_MINOR 5
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2005-01-12)"
+#define DM_VERSION_EXTRA "-ioctl (2005-10-04)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -301,8 +301,13 @@ typedef char ioctl_struct[308];
#define DM_BUFFER_FULL_FLAG (1 << 8) /* Out */
/*
- * Set this to improve performance when you aren't going to use open_count
+ * Set this to improve performance when you aren't going to use open_count.
*/
#define DM_SKIP_BDGET_FLAG (1 << 9) /* In */
+/*
+ * Set this to avoid attempting to freeze any filesystem when suspending.
+ */
+#define DM_SKIP_LOCKFS_FLAG (1 << 10) /* In */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 05f4132622f..2e6bbe01415 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -2,6 +2,7 @@
#define __DMI_H__
#include <linux/list.h>
+#include <linux/config.h>
enum dmi_field {
DMI_NONE,
@@ -60,12 +61,14 @@ struct dmi_device {
void *device_data; /* Type specific data */
};
-#if defined(CONFIG_X86_32)
+#ifdef CONFIG_DMI
extern int dmi_check_system(struct dmi_system_id *list);
extern char * dmi_get_system_info(int field);
extern struct dmi_device * dmi_find_device(int type, const char *name,
struct dmi_device *from);
+extern void dmi_scan_machine(void);
+
#else
static inline int dmi_check_system(struct dmi_system_id *list) { return 0; }
diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h
index d41df7047ed..c8cbd90ba37 100644
--- a/include/linux/dvb/frontend.h
+++ b/include/linux/dvb/frontend.h
@@ -240,6 +240,15 @@ struct dvb_frontend_event {
};
+/**
+ * When set, this flag will disable any zigzagging or other "normal" tuning
+ * behaviour. Additionally, there will be no automatic monitoring of the lock
+ * status, and hence no frontend events will be generated. If a frontend device
+ * is closed, this flag will be automatically turned off when the device is
+ * reopened read-write.
+ */
+#define FE_TUNE_MODE_ONESHOT 0x01
+
#define FE_GET_INFO _IOR('o', 61, struct dvb_frontend_info)
@@ -260,6 +269,7 @@ struct dvb_frontend_event {
#define FE_SET_FRONTEND _IOW('o', 76, struct dvb_frontend_parameters)
#define FE_GET_FRONTEND _IOR('o', 77, struct dvb_frontend_parameters)
+#define FE_SET_FRONTEND_TUNE_MODE _IO('o', 81) /* unsigned int */
#define FE_GET_EVENT _IOR('o', 78, struct dvb_frontend_event)
#define FE_DISHNETWORK_SEND_LEGACY_CMD _IO('o', 80) /* unsigned int */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index a74c27e460b..23fe746a1d5 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -66,7 +66,7 @@ struct elevator_type
};
/*
- * each queue has an elevator_queue assoicated with it
+ * each queue has an elevator_queue associated with it
*/
struct elevator_queue
{
@@ -114,8 +114,6 @@ extern ssize_t elv_iosched_store(request_queue_t *, const char *, size_t);
extern int elevator_init(request_queue_t *, char *);
extern void elevator_exit(elevator_t *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
-extern int elv_try_merge(struct request *, struct bio *);
-extern int elv_try_last_merge(request_queue_t *, struct bio *);
/*
* Return values from elevator merger
@@ -130,6 +128,7 @@ extern int elv_try_last_merge(request_queue_t *, struct bio *);
#define ELEVATOR_INSERT_FRONT 1
#define ELEVATOR_INSERT_BACK 2
#define ELEVATOR_INSERT_SORT 3
+#define ELEVATOR_INSERT_REQUEUE 4
/*
* return values from elevator_may_queue_fn
diff --git a/include/linux/elf.h b/include/linux/elf.h
index ff955dbf510..d3bfacb2449 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -151,6 +151,8 @@ typedef __s64 Elf64_Sxword;
#define STT_FUNC 2
#define STT_SECTION 3
#define STT_FILE 4
+#define STT_COMMON 5
+#define STT_TLS 6
#define ELF_ST_BIND(x) ((x) >> 4)
#define ELF_ST_TYPE(x) (((unsigned int) x) & 0xf)
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index 2914f7b0715..e71dd98dbca 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -87,7 +87,7 @@ struct ext3_inode_info {
#ifdef CONFIG_EXT3_FS_XATTR
/*
* Extended attributes can be read independently of the main file
- * data. Taking i_sem even when reading would cause contention
+ * data. Taking i_mutex even when reading would cause contention
* between readers of EAs and writers of regular file data, so
* instead we synchronize on xattr_sem when reading or changing
* EAs.
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 3ba843c4638..c6cb8f09508 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -143,7 +143,7 @@ static inline unsigned int sk_filter_len(struct sk_filter *fp)
struct sk_buff;
struct sock;
-extern int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen);
+extern unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen);
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
extern int sk_chk_filter(struct sock_filter *filter, int flen);
#endif /* __KERNEL__ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cc35b6ac778..d1e370d25f7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -9,7 +9,6 @@
#include <linux/config.h>
#include <linux/limits.h>
#include <linux/ioctl.h>
-#include <linux/rcuref.h>
/*
* It's silly to have NR_OPEN bigger than NR_FILE, but you can change
@@ -104,19 +103,18 @@ extern int dir_notify_enable;
#define MS_MOVE 8192
#define MS_REC 16384
#define MS_VERBOSE 32768
+#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
#define MS_UNBINDABLE (1<<17) /* change to unbindable */
#define MS_PRIVATE (1<<18) /* change to private */
#define MS_SLAVE (1<<19) /* change to slave */
#define MS_SHARED (1<<20) /* change to shared */
-#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
#define MS_ACTIVE (1<<30)
#define MS_NOUSER (1<<31)
/*
* Superblock flags that can be altered by MS_REMOUNT
*/
-#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_NOATIME|\
- MS_NODIRATIME)
+#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK)
/*
* Old magic mount flag and mask
@@ -162,8 +160,6 @@ extern int dir_notify_enable;
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
-#define IS_NOATIME(inode) (__IS_FLG(inode, MS_NOATIME) || ((inode)->i_flags & S_NOATIME))
-#define IS_NODIRATIME(inode) __IS_FLG(inode, MS_NODIRATIME)
#define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
@@ -220,11 +216,13 @@ extern int dir_notify_enable;
#include <linux/prio_tree.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/mutex.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/byteorder.h>
+struct hd_geometry;
struct iovec;
struct nameidata;
struct kiocb;
@@ -234,9 +232,6 @@ struct kstatfs;
struct vm_area_struct;
struct vfsmount;
-/* Used to be a macro which just called the function, now just a function */
-extern void update_atime (struct inode *);
-
extern void __init inode_init(unsigned long);
extern void __init inode_init_early(void);
extern void __init mnt_init(unsigned long);
@@ -302,6 +297,37 @@ struct iattr {
*/
#include <linux/quota.h>
+/**
+ * enum positive_aop_returns - aop return codes with specific semantics
+ *
+ * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
+ * completed, that the page is still locked, and
+ * should be considered active. The VM uses this hint
+ * to return the page to the active list -- it won't
+ * be a candidate for writeback again in the near
+ * future. Other callers must be careful to unlock
+ * the page if they get this return. Returned by
+ * writepage();
+ *
+ * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
+ * unlocked it and the page might have been truncated.
+ * The caller should back up to acquiring a new page and
+ * trying again. The aop will be taking reasonable
+ * precautions not to livelock. If the caller held a page
+ * reference, it should drop it before retrying. Returned
+ * by readpage(), prepare_write(), and commit_write().
+ *
+ * address_space_operation functions return these large constants to indicate
+ * special semantics to the caller. These are much larger than the bytes in a
+ * page to allow for functions that return the number of bytes operated on in a
+ * given page.
+ */
+
+enum positive_aop_returns {
+ AOP_WRITEPAGE_ACTIVATE = 0x80000,
+ AOP_TRUNCATED_PAGE = 0x80001,
+};
+
/*
* oh the beauties of C type declarations.
*/
@@ -453,7 +479,7 @@ struct inode {
unsigned long i_blocks;
unsigned short i_bytes;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
- struct semaphore i_sem;
+ struct mutex i_mutex;
struct rw_semaphore i_alloc_sem;
struct inode_operations *i_op;
struct file_operations *i_fop; /* former ->i_op->default_file_ops */
@@ -622,7 +648,7 @@ extern spinlock_t files_lock;
#define file_list_lock() spin_lock(&files_lock);
#define file_list_unlock() spin_unlock(&files_lock);
-#define get_file(x) rcuref_inc(&(x)->f_count)
+#define get_file(x) atomic_inc(&(x)->f_count)
#define file_count(x) atomic_read(&(x)->f_count)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -729,7 +755,7 @@ extern struct file_lock *posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *);
extern int posix_lock_file_wait(struct file *, struct file_lock *);
extern void posix_block_lock(struct file_lock *, struct file_lock *);
-extern void posix_unblock_lock(struct file *, struct file_lock *);
+extern int posix_unblock_lock(struct file *, struct file_lock *);
extern int posix_locks_deadlock(struct file_lock *, struct file_lock *);
extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags);
@@ -777,7 +803,6 @@ struct super_block {
struct list_head s_list; /* Keep this first */
dev_t s_dev; /* search index; _not_ kdev_t */
unsigned long s_blocksize;
- unsigned long s_old_blocksize;
unsigned char s_blocksize_bits;
unsigned char s_dirt;
unsigned long long s_maxbytes; /* Max file size */
@@ -790,7 +815,7 @@ struct super_block {
unsigned long s_magic;
struct dentry *s_root;
struct rw_semaphore s_umount;
- struct semaphore s_lock;
+ struct mutex s_lock;
int s_count;
int s_syncing;
int s_need_sync_fs;
@@ -862,13 +887,13 @@ static inline int has_fs_excl(void)
static inline void lock_super(struct super_block * sb)
{
get_fs_excl();
- down(&sb->s_lock);
+ mutex_lock(&sb->s_lock);
}
static inline void unlock_super(struct super_block * sb)
{
put_fs_excl();
- up(&sb->s_lock);
+ mutex_unlock(&sb->s_lock);
}
/*
@@ -932,6 +957,7 @@ struct block_device_operations {
int (*direct_access) (struct block_device *, sector_t, unsigned long *);
int (*media_changed) (struct gendisk *);
int (*revalidate_disk) (struct gendisk *);
+ int (*getgeo)(struct block_device *, struct hd_geometry *);
struct module *owner;
};
@@ -1019,6 +1045,7 @@ struct inode_operations {
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
+ void (*truncate_range)(struct inode *, loff_t, loff_t);
};
struct seq_file;
@@ -1085,12 +1112,7 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
-static inline void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
-{
- /* per-mountpoint checks will go here */
- update_atime(dentry->d_inode);
-}
-
+extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
static inline void file_accessed(struct file *file)
{
if (!(file->f_flags & O_NOATIME))
@@ -1159,7 +1181,7 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc);
* directory. The name should be stored in the @name (with the
* understanding that it is already pointing to a a %NAME_MAX+1 sized
* buffer. get_name() should return %0 on success, a negative error code
- * or error. @get_name will be called without @parent->i_sem held.
+ * or error. @get_name will be called without @parent->i_mutex held.
*
* get_parent:
* @get_parent should find the parent directory for the given @child which
@@ -1181,7 +1203,7 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc);
* nfsd_find_fh_dentry() in either the @obj or @parent parameters.
*
* Locking rules:
- * get_parent is called with child->d_inode->i_sem down
+ * get_parent is called with child->d_inode->i_mutex down
* get_name is not (which is possibly inconsistent)
*/
@@ -1313,7 +1335,8 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
/* fs/open.c */
-extern int do_truncate(struct dentry *, loff_t start, struct file *filp);
+extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
+ struct file *filp);
extern long do_sys_open(const char __user *filename, int flags, int mode);
extern struct file *filp_open(const char *, int, int);
extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
@@ -1682,7 +1705,7 @@ extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const vo
extern int inode_change_ok(struct inode *, struct iattr *);
extern int __must_check inode_setattr(struct inode *, struct iattr *);
-extern void inode_update_time(struct inode *inode, int ctime_too);
+extern void file_update_time(struct file *file);
static inline ino_t parent_ino(struct dentry *dentry)
{
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 934aa9bda48..a9f1cfd096f 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -50,14 +50,12 @@ struct gianfar_platform_data {
/* board specific information */
u32 board_flags;
- const char *bus_id;
+ u32 bus_id;
+ u32 phy_id;
u8 mac_addr[6];
};
struct gianfar_mdio_data {
- /* device specific information */
- u32 paddr;
-
/* board specific information */
int irq[32];
};
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index b76b558b03d..528959c52f1 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -14,7 +14,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 3
+#define FUSE_KERNEL_MINOR_VERSION 5
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -53,6 +53,9 @@ struct fuse_kstatfs {
__u64 ffree;
__u32 bsize;
__u32 namelen;
+ __u32 frsize;
+ __u32 padding;
+ __u32 spare[6];
};
#define FATTR_MODE (1 << 0)
@@ -105,12 +108,8 @@ enum fuse_opcode {
FUSE_CREATE = 35
};
-/* Conservative buffer size for the client */
-#define FUSE_MAX_IN 8192
-
-#define FUSE_NAME_MAX 1024
-#define FUSE_SYMLINK_MAX 4096
-#define FUSE_XATTR_SIZE_MAX 4096
+/* The read buffer is required to be at least 8k, but may be much larger */
+#define FUSE_MIN_READ_BUFFER 8192
struct fuse_entry_out {
__u64 nodeid; /* Inode ID */
@@ -213,6 +212,8 @@ struct fuse_write_out {
__u32 padding;
};
+#define FUSE_COMPAT_STATFS_SIZE 48
+
struct fuse_statfs_out {
struct fuse_kstatfs st;
};
@@ -243,9 +244,16 @@ struct fuse_access_in {
__u32 padding;
};
-struct fuse_init_in_out {
+struct fuse_init_in {
+ __u32 major;
+ __u32 minor;
+};
+
+struct fuse_init_out {
__u32 major;
__u32 minor;
+ __u32 unused[3];
+ __u32 max_write;
};
struct fuse_in_header {
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 8b2eab90abb..20f9148e38d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -57,6 +57,7 @@ struct vm_area_struct;
__GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
__GFP_NOMEMALLOC|__GFP_HARDWALL)
+/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_NOIO (__GFP_WAIT)
#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
@@ -109,6 +110,10 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
if (unlikely(order >= MAX_ORDER))
return NULL;
+ /* Unknown node is current node */
+ if (nid < 0)
+ nid = numa_node_id();
+
return __alloc_pages(gfp_mask, order,
NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
}
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
new file mode 100644
index 00000000000..089bfb1fa01
--- /dev/null
+++ b/include/linux/hrtimer.h
@@ -0,0 +1,139 @@
+/*
+ * include/linux/hrtimer.h
+ *
+ * hrtimers - High-resolution kernel timers
+ *
+ * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
+ *
+ * data type definitions, declarations, prototypes
+ *
+ * Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#ifndef _LINUX_HRTIMER_H
+#define _LINUX_HRTIMER_H
+
+#include <linux/rbtree.h>
+#include <linux/ktime.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+/*
+ * Mode arguments of xxx_hrtimer functions:
+ */
+enum hrtimer_mode {
+ HRTIMER_ABS, /* Time value is absolute */
+ HRTIMER_REL, /* Time value is relative to now */
+};
+
+enum hrtimer_restart {
+ HRTIMER_NORESTART,
+ HRTIMER_RESTART,
+};
+
+/*
+ * Timer states:
+ */
+enum hrtimer_state {
+ HRTIMER_INACTIVE, /* Timer is inactive */
+ HRTIMER_EXPIRED, /* Timer is expired */
+ HRTIMER_PENDING, /* Timer is pending */
+};
+
+struct hrtimer_base;
+
+/**
+ * struct hrtimer - the basic hrtimer structure
+ *
+ * @node: red black tree node for time ordered insertion
+ * @expires: the absolute expiry time in the hrtimers internal
+ * representation. The time is related to the clock on
+ * which the timer is based.
+ * @state: state of the timer
+ * @function: timer expiry callback function
+ * @data: argument for the callback function
+ * @base: pointer to the timer base (per cpu and per clock)
+ *
+ * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE()
+ */
+struct hrtimer {
+ struct rb_node node;
+ ktime_t expires;
+ enum hrtimer_state state;
+ int (*function)(void *);
+ void *data;
+ struct hrtimer_base *base;
+};
+
+/**
+ * struct hrtimer_base - the timer base for a specific clock
+ *
+ * @index: clock type index for per_cpu support when moving a timer
+ * to a base on another cpu.
+ * @lock: lock protecting the base and associated timers
+ * @active: red black tree root node for the active timers
+ * @first: pointer to the timer node which expires first
+ * @resolution: the resolution of the clock, in nanoseconds
+ * @get_time: function to retrieve the current time of the clock
+ * @curr_timer: the timer which is executing a callback right now
+ */
+struct hrtimer_base {
+ clockid_t index;
+ spinlock_t lock;
+ struct rb_root active;
+ struct rb_node *first;
+ ktime_t resolution;
+ ktime_t (*get_time)(void);
+ struct hrtimer *curr_timer;
+};
+
+/*
+ * clock_was_set() is a NOP for non- high-resolution systems. The
+ * time-sorted order guarantees that a timer does not expire early and
+ * is expired in the next softirq when the clock was advanced.
+ */
+#define clock_was_set() do { } while (0)
+
+/* Exported timer functions: */
+
+/* Initialize timers: */
+extern void hrtimer_init(struct hrtimer *timer, const clockid_t which_clock);
+extern void hrtimer_rebase(struct hrtimer *timer, const clockid_t which_clock);
+
+
+/* Basic timer operations: */
+extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode);
+extern int hrtimer_cancel(struct hrtimer *timer);
+extern int hrtimer_try_to_cancel(struct hrtimer *timer);
+
+#define hrtimer_restart(timer) hrtimer_start((timer), (timer)->expires, HRTIMER_ABS)
+
+/* Query timers: */
+extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
+extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
+
+static inline int hrtimer_active(const struct hrtimer *timer)
+{
+ return timer->state == HRTIMER_PENDING;
+}
+
+/* Forward a hrtimer so it expires after now: */
+extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval);
+
+/* Precise sleep: */
+extern long hrtimer_nanosleep(struct timespec *rqtp,
+ struct timespec __user *rmtp,
+ const enum hrtimer_mode mode,
+ const clockid_t clockid);
+
+/* Soft interrupt function to run the hrtimer queues: */
+extern void hrtimer_run_queues(void);
+
+/* Bootup initialization: */
+extern void __init hrtimers_init(void);
+
+#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 1056717ee50..68d82ad6b17 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -22,7 +22,7 @@ int hugetlb_report_meminfo(char *);
int hugetlb_report_node_meminfo(int, char *);
int is_hugepage_mem_enough(size_t);
unsigned long hugetlb_total_pages(void);
-struct page *alloc_huge_page(void);
+struct page *alloc_huge_page(struct vm_area_struct *, unsigned long);
void free_huge_page(struct page *);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access);
@@ -97,7 +97,7 @@ static inline unsigned long hugetlb_total_pages(void)
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
do { } while (0)
-#define alloc_huge_page() ({ NULL; })
+#define alloc_huge_page(vma, addr) ({ NULL; })
#define free_huge_page(p) ({ (void)(p); BUG(); })
#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
diff --git a/include/linux/hwmon-vid.h b/include/linux/hwmon-vid.h
index cd4b7a042b8..f346e4d5381 100644
--- a/include/linux/hwmon-vid.h
+++ b/include/linux/hwmon-vid.h
@@ -23,14 +23,14 @@
#ifndef _LINUX_HWMON_VID_H
#define _LINUX_HWMON_VID_H
-int vid_from_reg(int val, int vrm);
-int vid_which_vrm(void);
+int vid_from_reg(int val, u8 vrm);
+u8 vid_which_vrm(void);
/* vrm is the VRM/VRD document version multiplied by 10.
val is in mV to avoid floating point in the kernel.
Returned value is the 4-, 5- or 6-bit VID code.
Note that only VRM 9.x is supported for now. */
-static inline int vid_to_reg(int val, int vrm)
+static inline int vid_to_reg(int val, u8 vrm)
{
switch (vrm) {
case 91: /* VRM 9.1 */
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 006c81ef4d5..6ff2d365895 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -25,12 +25,6 @@
/*
* ---- Driver types -----------------------------------------------------
- * device id name + number function description, i2c address(es)
- *
- * Range 1000-1999 range is defined in sensors/sensors.h
- * Range 0x100 - 0x1ff is for V4L2 Common Components
- * Range 0xf000 - 0xffff is reserved for local experimentation, and should
- * never be used in official drivers
*/
#define I2C_DRIVERID_MSP3400 1
@@ -109,14 +103,9 @@
#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
#define I2C_DRIVERID_AKITAIOEXP 74 /* IO Expander on Sharp SL-C1000 */
#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */
-
-#define I2C_DRIVERID_EXP0 0xF0 /* experimental use id's */
-#define I2C_DRIVERID_EXP1 0xF1
-#define I2C_DRIVERID_EXP2 0xF2
-#define I2C_DRIVERID_EXP3 0xF3
+#define I2C_DRIVERID_TVP5150 76 /* TVP5150 video decoder */
#define I2C_DRIVERID_I2CDEV 900
-#define I2C_DRIVERID_I2CPROC 901
#define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */
#define I2C_DRIVERID_ALERT 903 /* SMBus Alert Responder Client */
@@ -131,15 +120,12 @@
#define I2C_DRIVERID_ADM1021 1008
#define I2C_DRIVERID_ADM9240 1009
#define I2C_DRIVERID_LTC1710 1010
-#define I2C_DRIVERID_SIS5595 1011
#define I2C_DRIVERID_ICSPLL 1012
#define I2C_DRIVERID_BT869 1013
#define I2C_DRIVERID_MAXILIFE 1014
#define I2C_DRIVERID_MATORB 1015
#define I2C_DRIVERID_GL520 1016
#define I2C_DRIVERID_THMC50 1017
-#define I2C_DRIVERID_DDCMON 1018
-#define I2C_DRIVERID_VIA686A 1019
#define I2C_DRIVERID_ADM1025 1020
#define I2C_DRIVERID_LM87 1021
#define I2C_DRIVERID_PCF8574 1022
@@ -151,21 +137,16 @@
#define I2C_DRIVERID_FSCPOS 1028
#define I2C_DRIVERID_FSCSCY 1029
#define I2C_DRIVERID_PCF8591 1030
-#define I2C_DRIVERID_SMSC47M1 1031
-#define I2C_DRIVERID_VT1211 1032
#define I2C_DRIVERID_LM92 1033
-#define I2C_DRIVERID_VT8231 1034
#define I2C_DRIVERID_SMARTBATT 1035
#define I2C_DRIVERID_BMCSENSORS 1036
#define I2C_DRIVERID_FS451 1037
-#define I2C_DRIVERID_W83627HF 1038
#define I2C_DRIVERID_LM85 1039
#define I2C_DRIVERID_LM83 1040
#define I2C_DRIVERID_LM90 1042
#define I2C_DRIVERID_ASB100 1043
#define I2C_DRIVERID_FSCHER 1046
#define I2C_DRIVERID_W83L785TS 1047
-#define I2C_DRIVERID_SMSC47B397 1050
/*
* ---- Adapter types ----------------------------------------------------
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 5e19a7ba69b..7863a59bd59 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -105,14 +105,14 @@ extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client * client,
* A driver is capable of handling one or more physical devices present on
* I2C adapters. This information is used to inform the driver of adapter
* events.
+ *
+ * The driver.owner field should be set to the module owner of this driver.
+ * The driver.name field should be set to the name of this driver.
*/
struct i2c_driver {
- struct module *owner;
- char name[32];
int id;
unsigned int class;
- unsigned int flags; /* div., see below */
/* Notifies the driver that a new bus has appeared. This routine
* can be used by the driver to test if the bus meets its conditions
@@ -250,18 +250,7 @@ static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data)
dev_set_drvdata (&dev->dev, data);
}
-/*flags for the driver struct: */
-#define I2C_DF_NOTIFY 0x01 /* notify on bus (de/a)ttaches */
-#if 0
-/* this flag is gone -- there is a (optional) driver->detach_adapter
- * callback now which can be used instead */
-# define I2C_DF_DUMMY 0x02
-#endif
-
/*flags for the client struct: */
-#define I2C_CLIENT_ALLOW_USE 0x01 /* Client allows access */
-#define I2C_CLIENT_ALLOW_MULTIPLE_USE 0x02 /* Allow multiple access-locks */
- /* on an i2c_client */
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
@@ -302,26 +291,20 @@ struct i2c_client_address_data {
extern int i2c_add_adapter(struct i2c_adapter *);
extern int i2c_del_adapter(struct i2c_adapter *);
-extern int i2c_add_driver(struct i2c_driver *);
+extern int i2c_register_driver(struct module *, struct i2c_driver *);
extern int i2c_del_driver(struct i2c_driver *);
+static inline int i2c_add_driver(struct i2c_driver *driver)
+{
+ return i2c_register_driver(THIS_MODULE, driver);
+}
+
extern int i2c_attach_client(struct i2c_client *);
extern int i2c_detach_client(struct i2c_client *);
-/* New function: This is to get an i2c_client-struct for controlling the
- client either by using i2c_control-function or having the
- client-module export functions that can be used with the i2c_client
- -struct. */
-extern struct i2c_client *i2c_get_client(int driver_id, int adapter_id,
- struct i2c_client *prev);
-
-/* Should be used with new function
- extern struct i2c_client *i2c_get_client(int,int,struct i2c_client *);
- to make sure that client-struct is valid and that it is okay to access
- the i2c-client.
- returns -EACCES if client doesn't allow use (default)
- returns -EBUSY if client doesn't allow multiple use (default) and
- usage_count >0 */
+/* Should be used to make sure that client-struct is valid and that it
+ is okay to access the i2c-client.
+ returns -ENODEV if client has gone in the meantime */
extern int i2c_use_client(struct i2c_client *);
extern int i2c_release_client(struct i2c_client *);
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index d79c8a4bc4f..9ba80679666 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -30,6 +30,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/workqueue.h> /* work_struct */
+#include <linux/mempool.h>
#include <asm/io.h>
#include <asm/semaphore.h> /* Needed for MUTEX init macros */
@@ -38,6 +39,355 @@
#define I2O_QUEUE_EMPTY 0xffffffff
/*
+ * Cache strategies
+ */
+
+/* The NULL strategy leaves everything up to the controller. This tends to be a
+ * pessimal but functional choice.
+ */
+#define CACHE_NULL 0
+/* Prefetch data when reading. We continually attempt to load the next 32 sectors
+ * into the controller cache.
+ */
+#define CACHE_PREFETCH 1
+/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
+ * into the controller cache. When an I/O is less <= 8K we assume its probably
+ * not sequential and don't prefetch (default)
+ */
+#define CACHE_SMARTFETCH 2
+/* Data is written to the cache and then out on to the disk. The I/O must be
+ * physically on the medium before the write is acknowledged (default without
+ * NVRAM)
+ */
+#define CACHE_WRITETHROUGH 17
+/* Data is written to the cache and then out on to the disk. The controller
+ * is permitted to write back the cache any way it wants. (default if battery
+ * backed NVRAM is present). It can be useful to set this for swap regardless of
+ * battery state.
+ */
+#define CACHE_WRITEBACK 18
+/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
+ * write large I/O's directly to disk bypassing the cache to avoid the extra
+ * memory copy hits. Small writes are writeback cached
+ */
+#define CACHE_SMARTBACK 19
+/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
+ * write large I/O's directly to disk bypassing the cache to avoid the extra
+ * memory copy hits. Small writes are writethrough cached. Suitable for devices
+ * lacking battery backup
+ */
+#define CACHE_SMARTTHROUGH 20
+
+/*
+ * Ioctl structures
+ */
+
+#define BLKI2OGRSTRAT _IOR('2', 1, int)
+#define BLKI2OGWSTRAT _IOR('2', 2, int)
+#define BLKI2OSRSTRAT _IOW('2', 3, int)
+#define BLKI2OSWSTRAT _IOW('2', 4, int)
+
+/*
+ * I2O Function codes
+ */
+
+/*
+ * Executive Class
+ */
+#define I2O_CMD_ADAPTER_ASSIGN 0xB3
+#define I2O_CMD_ADAPTER_READ 0xB2
+#define I2O_CMD_ADAPTER_RELEASE 0xB5
+#define I2O_CMD_BIOS_INFO_SET 0xA5
+#define I2O_CMD_BOOT_DEVICE_SET 0xA7
+#define I2O_CMD_CONFIG_VALIDATE 0xBB
+#define I2O_CMD_CONN_SETUP 0xCA
+#define I2O_CMD_DDM_DESTROY 0xB1
+#define I2O_CMD_DDM_ENABLE 0xD5
+#define I2O_CMD_DDM_QUIESCE 0xC7
+#define I2O_CMD_DDM_RESET 0xD9
+#define I2O_CMD_DDM_SUSPEND 0xAF
+#define I2O_CMD_DEVICE_ASSIGN 0xB7
+#define I2O_CMD_DEVICE_RELEASE 0xB9
+#define I2O_CMD_HRT_GET 0xA8
+#define I2O_CMD_ADAPTER_CLEAR 0xBE
+#define I2O_CMD_ADAPTER_CONNECT 0xC9
+#define I2O_CMD_ADAPTER_RESET 0xBD
+#define I2O_CMD_LCT_NOTIFY 0xA2
+#define I2O_CMD_OUTBOUND_INIT 0xA1
+#define I2O_CMD_PATH_ENABLE 0xD3
+#define I2O_CMD_PATH_QUIESCE 0xC5
+#define I2O_CMD_PATH_RESET 0xD7
+#define I2O_CMD_STATIC_MF_CREATE 0xDD
+#define I2O_CMD_STATIC_MF_RELEASE 0xDF
+#define I2O_CMD_STATUS_GET 0xA0
+#define I2O_CMD_SW_DOWNLOAD 0xA9
+#define I2O_CMD_SW_UPLOAD 0xAB
+#define I2O_CMD_SW_REMOVE 0xAD
+#define I2O_CMD_SYS_ENABLE 0xD1
+#define I2O_CMD_SYS_MODIFY 0xC1
+#define I2O_CMD_SYS_QUIESCE 0xC3
+#define I2O_CMD_SYS_TAB_SET 0xA3
+
+/*
+ * Utility Class
+ */
+#define I2O_CMD_UTIL_NOP 0x00
+#define I2O_CMD_UTIL_ABORT 0x01
+#define I2O_CMD_UTIL_CLAIM 0x09
+#define I2O_CMD_UTIL_RELEASE 0x0B
+#define I2O_CMD_UTIL_PARAMS_GET 0x06
+#define I2O_CMD_UTIL_PARAMS_SET 0x05
+#define I2O_CMD_UTIL_EVT_REGISTER 0x13
+#define I2O_CMD_UTIL_EVT_ACK 0x14
+#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
+#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
+#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
+#define I2O_CMD_UTIL_LOCK 0x17
+#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
+#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
+
+/*
+ * SCSI Host Bus Adapter Class
+ */
+#define I2O_CMD_SCSI_EXEC 0x81
+#define I2O_CMD_SCSI_ABORT 0x83
+#define I2O_CMD_SCSI_BUSRESET 0x27
+
+/*
+ * Bus Adapter Class
+ */
+#define I2O_CMD_BUS_ADAPTER_RESET 0x85
+#define I2O_CMD_BUS_RESET 0x87
+#define I2O_CMD_BUS_SCAN 0x89
+#define I2O_CMD_BUS_QUIESCE 0x8b
+
+/*
+ * Random Block Storage Class
+ */
+#define I2O_CMD_BLOCK_READ 0x30
+#define I2O_CMD_BLOCK_WRITE 0x31
+#define I2O_CMD_BLOCK_CFLUSH 0x37
+#define I2O_CMD_BLOCK_MLOCK 0x49
+#define I2O_CMD_BLOCK_MUNLOCK 0x4B
+#define I2O_CMD_BLOCK_MMOUNT 0x41
+#define I2O_CMD_BLOCK_MEJECT 0x43
+#define I2O_CMD_BLOCK_POWER 0x70
+
+#define I2O_CMD_PRIVATE 0xFF
+
+/* Command status values */
+
+#define I2O_CMD_IN_PROGRESS 0x01
+#define I2O_CMD_REJECTED 0x02
+#define I2O_CMD_FAILED 0x03
+#define I2O_CMD_COMPLETED 0x04
+
+/* I2O API function return values */
+
+#define I2O_RTN_NO_ERROR 0
+#define I2O_RTN_NOT_INIT 1
+#define I2O_RTN_FREE_Q_EMPTY 2
+#define I2O_RTN_TCB_ERROR 3
+#define I2O_RTN_TRANSACTION_ERROR 4
+#define I2O_RTN_ADAPTER_ALREADY_INIT 5
+#define I2O_RTN_MALLOC_ERROR 6
+#define I2O_RTN_ADPTR_NOT_REGISTERED 7
+#define I2O_RTN_MSG_REPLY_TIMEOUT 8
+#define I2O_RTN_NO_STATUS 9
+#define I2O_RTN_NO_FIRM_VER 10
+#define I2O_RTN_NO_LINK_SPEED 11
+
+/* Reply message status defines for all messages */
+
+#define I2O_REPLY_STATUS_SUCCESS 0x00
+#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
+#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
+#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
+#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
+#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
+#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
+#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
+#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
+#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
+#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
+#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
+
+/* Status codes and Error Information for Parameter functions */
+
+#define I2O_PARAMS_STATUS_SUCCESS 0x00
+#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
+#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
+#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
+#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
+#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
+#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
+#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
+#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
+#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
+#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
+#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
+#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
+#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
+#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
+#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
+#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
+
+/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
+ * messages: Table 3-2 Detailed Status Codes.*/
+
+#define I2O_DSC_SUCCESS 0x0000
+#define I2O_DSC_BAD_KEY 0x0002
+#define I2O_DSC_TCL_ERROR 0x0003
+#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
+#define I2O_DSC_NO_SUCH_PAGE 0x0005
+#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
+#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
+#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
+#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
+#define I2O_DSC_DEVICE_LOCKED 0x000B
+#define I2O_DSC_DEVICE_RESET 0x000C
+#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
+#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
+#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
+#define I2O_DSC_INVALID_OFFSET 0x0010
+#define I2O_DSC_INVALID_PARAMETER 0x0011
+#define I2O_DSC_INVALID_REQUEST 0x0012
+#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
+#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
+#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
+#define I2O_DSC_MISSING_PARAMETER 0x0016
+#define I2O_DSC_TIMEOUT 0x0017
+#define I2O_DSC_UNKNOWN_ERROR 0x0018
+#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
+#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
+#define I2O_DSC_DEVICE_BUSY 0x001B
+#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
+
+/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed
+ Status Codes.*/
+
+#define I2O_BSA_DSC_SUCCESS 0x0000
+#define I2O_BSA_DSC_MEDIA_ERROR 0x0001
+#define I2O_BSA_DSC_ACCESS_ERROR 0x0002
+#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003
+#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004
+#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005
+#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006
+#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007
+#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008
+#define I2O_BSA_DSC_BUS_FAILURE 0x0009
+#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A
+#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B
+#define I2O_BSA_DSC_DEVICE_RESET 0x000C
+#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D
+#define I2O_BSA_DSC_TIMEOUT 0x000E
+
+/* FailureStatusCodes, Table 3-3 Message Failure Codes */
+
+#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81
+#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82
+#define I2O_FSC_TRANSPORT_CONGESTION 0x83
+#define I2O_FSC_TRANSPORT_FAILURE 0x84
+#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85
+#define I2O_FSC_TRANSPORT_TIME_OUT 0x86
+#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87
+#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88
+#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89
+#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A
+#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B
+#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C
+#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D
+#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E
+#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F
+#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF
+
+/* Device Claim Types */
+#define I2O_CLAIM_PRIMARY 0x01000000
+#define I2O_CLAIM_MANAGEMENT 0x02000000
+#define I2O_CLAIM_AUTHORIZED 0x03000000
+#define I2O_CLAIM_SECONDARY 0x04000000
+
+/* Message header defines for VersionOffset */
+#define I2OVER15 0x0001
+#define I2OVER20 0x0002
+
+/* Default is 1.5 */
+#define I2OVERSION I2OVER15
+
+#define SGL_OFFSET_0 I2OVERSION
+#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
+#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
+#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
+#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
+#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
+#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
+#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
+#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
+#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
+#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
+
+/* Transaction Reply Lists (TRL) Control Word structure */
+#define TRL_SINGLE_FIXED_LENGTH 0x00
+#define TRL_SINGLE_VARIABLE_LENGTH 0x40
+#define TRL_MULTIPLE_FIXED_LENGTH 0x80
+
+ /* msg header defines for MsgFlags */
+#define MSG_STATIC 0x0100
+#define MSG_64BIT_CNTXT 0x0200
+#define MSG_MULTI_TRANS 0x1000
+#define MSG_FAIL 0x2000
+#define MSG_FINAL 0x4000
+#define MSG_REPLY 0x8000
+
+ /* minimum size msg */
+#define THREE_WORD_MSG_SIZE 0x00030000
+#define FOUR_WORD_MSG_SIZE 0x00040000
+#define FIVE_WORD_MSG_SIZE 0x00050000
+#define SIX_WORD_MSG_SIZE 0x00060000
+#define SEVEN_WORD_MSG_SIZE 0x00070000
+#define EIGHT_WORD_MSG_SIZE 0x00080000
+#define NINE_WORD_MSG_SIZE 0x00090000
+#define TEN_WORD_MSG_SIZE 0x000A0000
+#define ELEVEN_WORD_MSG_SIZE 0x000B0000
+#define I2O_MESSAGE_SIZE(x) ((x)<<16)
+
+/* special TID assignments */
+#define ADAPTER_TID 0
+#define HOST_TID 1
+
+/* outbound queue defines */
+#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
+#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
+
+/* inbound queue definitions */
+#define I2O_MSG_INPOOL_MIN 32
+#define I2O_INBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
+
+#define I2O_POST_WAIT_OK 0
+#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
+
+#define I2O_CONTEXT_LIST_MIN_LENGTH 15
+#define I2O_CONTEXT_LIST_USED 0x01
+#define I2O_CONTEXT_LIST_DELETED 0x02
+
+/* timeouts */
+#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15
+#define I2O_TIMEOUT_MESSAGE_GET 5
+#define I2O_TIMEOUT_RESET 30
+#define I2O_TIMEOUT_STATUS_GET 5
+#define I2O_TIMEOUT_LCT_GET 360
+#define I2O_TIMEOUT_SCSI_SCB_ABORT 240
+
+/* retries */
+#define I2O_HRT_GET_TRIES 3
+#define I2O_LCT_GET_TRIES 3
+
+/* defines for max_sectors and max_phys_segments */
+#define I2O_MAX_SECTORS 1024
+#define I2O_MAX_SECTORS_LIMITED 128
+#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
+
+/*
* Message structures
*/
struct i2o_message {
@@ -58,6 +408,12 @@ struct i2o_message {
u32 body[0];
};
+/* MFA and I2O message used by mempool */
+struct i2o_msg_mfa {
+ u32 mfa; /* MFA returned by the controller */
+ struct i2o_message msg; /* I2O message */
+};
+
/*
* Each I2O device entity has one of these. There is one per device.
*/
@@ -130,6 +486,15 @@ struct i2o_dma {
};
/*
+ * Contains slab cache and mempool information
+ */
+struct i2o_pool {
+ char *name;
+ kmem_cache_t *slab;
+ mempool_t *mempool;
+};
+
+/*
* Contains IO mapped address information
*/
struct i2o_io {
@@ -174,8 +539,6 @@ struct i2o_controller {
void __iomem *irq_status; /* Interrupt status register address */
void __iomem *irq_mask; /* Interrupt mask register address */
- /* Dynamic LCT related data */
-
struct i2o_dma status; /* IOP status block */
struct i2o_dma hrt; /* HW Resource Table */
@@ -188,6 +551,8 @@ struct i2o_controller {
struct i2o_io in_queue; /* inbound message queue Host->IOP */
struct i2o_dma out_queue; /* outbound message queue IOP->Host */
+ struct i2o_pool in_msg; /* mempool for inbound messages */
+
unsigned int battery:1; /* Has a battery backup */
unsigned int io_alloc:1; /* An I/O resource was allocated */
unsigned int mem_alloc:1; /* A memory resource was allocated */
@@ -196,7 +561,6 @@ struct i2o_controller {
struct resource mem_resource; /* Mem resource allocated to the IOP */
struct device device;
- struct class_device *classdev; /* I2O controller class device */
struct i2o_device *exec; /* Executive */
#if BITS_PER_LONG == 64
spinlock_t context_list_lock; /* lock for context_list */
@@ -247,16 +611,13 @@ struct i2o_sys_tbl {
extern struct list_head i2o_controllers;
/* Message functions */
-static inline u32 i2o_msg_get(struct i2o_controller *,
- struct i2o_message __iomem **);
-extern u32 i2o_msg_get_wait(struct i2o_controller *,
- struct i2o_message __iomem **, int);
-static inline void i2o_msg_post(struct i2o_controller *, u32);
-static inline int i2o_msg_post_wait(struct i2o_controller *, u32,
- unsigned long);
-extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long,
- struct i2o_dma *);
-extern void i2o_msg_nop(struct i2o_controller *, u32);
+static inline struct i2o_message *i2o_msg_get(struct i2o_controller *);
+extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int);
+static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *);
+static inline int i2o_msg_post_wait(struct i2o_controller *,
+ struct i2o_message *, unsigned long);
+extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *,
+ unsigned long, struct i2o_dma *);
static inline void i2o_flush_reply(struct i2o_controller *, u32);
/* IOP functions */
@@ -384,10 +745,10 @@ static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
size_t size,
enum dma_data_direction direction,
- u32 __iomem ** sg_ptr)
+ u32 ** sg_ptr)
{
u32 sg_flags;
- u32 __iomem *mptr = *sg_ptr;
+ u32 *mptr = *sg_ptr;
dma_addr_t dma_addr;
switch (direction) {
@@ -405,16 +766,16 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
if (!dma_mapping_error(dma_addr)) {
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
- writel(0x7C020002, mptr++);
- writel(PAGE_SIZE, mptr++);
+ *mptr++ = cpu_to_le32(0x7C020002);
+ *mptr++ = cpu_to_le32(PAGE_SIZE);
}
#endif
- writel(sg_flags | size, mptr++);
- writel(i2o_dma_low(dma_addr), mptr++);
+ *mptr++ = cpu_to_le32(sg_flags | size);
+ *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
- writel(i2o_dma_high(dma_addr), mptr++);
+ *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
#endif
*sg_ptr = mptr;
}
@@ -439,10 +800,10 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
static inline int i2o_dma_map_sg(struct i2o_controller *c,
struct scatterlist *sg, int sg_count,
enum dma_data_direction direction,
- u32 __iomem ** sg_ptr)
+ u32 ** sg_ptr)
{
u32 sg_flags;
- u32 __iomem *mptr = *sg_ptr;
+ u32 *mptr = *sg_ptr;
switch (direction) {
case DMA_TO_DEVICE:
@@ -461,19 +822,19 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c,
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
- writel(0x7C020002, mptr++);
- writel(PAGE_SIZE, mptr++);
+ *mptr++ = cpu_to_le32(0x7C020002);
+ *mptr++ = cpu_to_le32(PAGE_SIZE);
}
#endif
while (sg_count-- > 0) {
if (!sg_count)
sg_flags |= 0xC0000000;
- writel(sg_flags | sg_dma_len(sg), mptr++);
- writel(i2o_dma_low(sg_dma_address(sg)), mptr++);
+ *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
+ *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
- writel(i2o_dma_high(sg_dma_address(sg)), mptr++);
+ *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
#endif
sg++;
}
@@ -563,6 +924,64 @@ static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
return 0;
};
+/*
+ * i2o_pool_alloc - Allocate an slab cache and mempool
+ * @mempool: pointer to struct i2o_pool to write data into.
+ * @name: name which is used to identify cache
+ * @size: size of each object
+ * @min_nr: minimum number of objects
+ *
+ * First allocates a slab cache with name and size. Then allocates a
+ * mempool which uses the slab cache for allocation and freeing.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
+ size_t size, int min_nr)
+{
+ pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ if (!pool->name)
+ goto exit;
+ strcpy(pool->name, name);
+
+ pool->slab =
+ kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL,
+ NULL);
+ if (!pool->slab)
+ goto free_name;
+
+ pool->mempool =
+ mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
+ pool->slab);
+ if (!pool->mempool)
+ goto free_slab;
+
+ return 0;
+
+ free_slab:
+ kmem_cache_destroy(pool->slab);
+
+ free_name:
+ kfree(pool->name);
+
+ exit:
+ return -ENOMEM;
+};
+
+/*
+ * i2o_pool_free - Free slab cache and mempool again
+ * @mempool: pointer to struct i2o_pool which should be freed
+ *
+ * Note that you have to return all objects to the mempool again before
+ * calling i2o_pool_free().
+ */
+static inline void i2o_pool_free(struct i2o_pool *pool)
+{
+ mempool_destroy(pool->mempool);
+ kmem_cache_destroy(pool->slab);
+ kfree(pool->name);
+};
+
/* I2O driver (OSM) functions */
extern int i2o_driver_register(struct i2o_driver *);
extern void i2o_driver_unregister(struct i2o_driver *);
@@ -638,39 +1057,89 @@ extern int i2o_exec_lct_get(struct i2o_controller *);
#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
/**
+ * i2o_out_to_virt - Turn an I2O message to a virtual address
+ * @c: controller
+ * @m: message engine value
+ *
+ * Turn a receive message from an I2O controller bus address into
+ * a Linux virtual address. The shared page frame is a linear block
+ * so we simply have to shift the offset. This function does not
+ * work for sender side messages as they are ioremap objects
+ * provided by the I2O controller.
+ */
+static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
+ u32 m)
+{
+ BUG_ON(m < c->out_queue.phys
+ || m >= c->out_queue.phys + c->out_queue.len);
+
+ return c->out_queue.virt + (m - c->out_queue.phys);
+};
+
+/**
+ * i2o_msg_in_to_virt - Turn an I2O message to a virtual address
+ * @c: controller
+ * @m: message engine value
+ *
+ * Turn a send message from an I2O controller bus address into
+ * a Linux virtual address. The shared page frame is a linear block
+ * so we simply have to shift the offset. This function does not
+ * work for receive side messages as they are kmalloc objects
+ * in a different pool.
+ */
+static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
+ i2o_controller *c,
+ u32 m)
+{
+ return c->in_queue.virt + m;
+};
+
+/**
* i2o_msg_get - obtain an I2O message from the IOP
* @c: I2O controller
- * @msg: pointer to a I2O message pointer
*
- * This function tries to get a message slot. If no message slot is
+ * This function tries to get a message frame. If no message frame is
* available do not wait until one is availabe (see also i2o_msg_get_wait).
+ * The returned pointer to the message frame is not in I/O memory, it is
+ * allocated from a mempool. But because a MFA is allocated from the
+ * controller too it is guaranteed that i2o_msg_post() will never fail.
*
- * On a success the message is returned and the pointer to the message is
- * set in msg. The returned message is the physical page frame offset
- * address from the read port (see the i2o spec). If no message is
- * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
+ * On a success a pointer to the message frame is returned. If the message
+ * queue is empty -EBUSY is returned and if no memory is available -ENOMEM
+ * is returned.
*/
-static inline u32 i2o_msg_get(struct i2o_controller *c,
- struct i2o_message __iomem ** msg)
+static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
{
- u32 m = readl(c->in_port);
-
- if (m != I2O_QUEUE_EMPTY)
- *msg = c->in_queue.virt + m;
+ struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC);
+ if (!mmsg)
+ return ERR_PTR(-ENOMEM);
+
+ mmsg->mfa = readl(c->in_port);
+ if (mmsg->mfa == I2O_QUEUE_EMPTY) {
+ mempool_free(mmsg, c->in_msg.mempool);
+ return ERR_PTR(-EBUSY);
+ }
- return m;
+ return &mmsg->msg;
};
/**
* i2o_msg_post - Post I2O message to I2O controller
* @c: I2O controller to which the message should be send
- * @m: the message identifier
+ * @msg: message returned by i2o_msg_get()
*
- * Post the message to the I2O controller.
+ * Post the message to the I2O controller and return immediately.
*/
-static inline void i2o_msg_post(struct i2o_controller *c, u32 m)
+static inline void i2o_msg_post(struct i2o_controller *c,
+ struct i2o_message *msg)
{
- writel(m, c->in_port);
+ struct i2o_msg_mfa *mmsg;
+
+ mmsg = container_of(msg, struct i2o_msg_mfa, msg);
+ memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg,
+ (le32_to_cpu(msg->u.head[0]) >> 16) << 2);
+ writel(mmsg->mfa, c->in_port);
+ mempool_free(mmsg, c->in_msg.mempool);
};
/**
@@ -685,62 +1154,66 @@ static inline void i2o_msg_post(struct i2o_controller *c, u32 m)
*
* Returns 0 on success or negative error code on failure.
*/
-static inline int i2o_msg_post_wait(struct i2o_controller *c, u32 m,
+static inline int i2o_msg_post_wait(struct i2o_controller *c,
+ struct i2o_message *msg,
unsigned long timeout)
{
- return i2o_msg_post_wait_mem(c, m, timeout, NULL);
+ return i2o_msg_post_wait_mem(c, msg, timeout, NULL);
};
/**
- * i2o_flush_reply - Flush reply from I2O controller
- * @c: I2O controller
- * @m: the message identifier
+ * i2o_msg_nop_mfa - Returns a fetched MFA back to the controller
+ * @c: I2O controller from which the MFA was fetched
+ * @mfa: MFA which should be returned
*
- * The I2O controller must be informed that the reply message is not needed
- * anymore. If you forget to flush the reply, the message frame can't be
- * used by the controller anymore and is therefore lost.
+ * This function must be used for preserved messages, because i2o_msg_nop()
+ * also returns the allocated memory back to the msg_pool mempool.
*/
-static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
+static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa)
{
- writel(m, c->out_port);
+ struct i2o_message __iomem *msg;
+ u32 nop[3] = {
+ THREE_WORD_MSG_SIZE | SGL_OFFSET_0,
+ I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
+ 0x00000000
+ };
+
+ msg = i2o_msg_in_to_virt(c, mfa);
+ memcpy_toio(msg, nop, sizeof(nop));
+ writel(mfa, c->in_port);
};
/**
- * i2o_out_to_virt - Turn an I2O message to a virtual address
- * @c: controller
- * @m: message engine value
+ * i2o_msg_nop - Returns a message which is not used
+ * @c: I2O controller from which the message was created
+ * @msg: message which should be returned
*
- * Turn a receive message from an I2O controller bus address into
- * a Linux virtual address. The shared page frame is a linear block
- * so we simply have to shift the offset. This function does not
- * work for sender side messages as they are ioremap objects
- * provided by the I2O controller.
+ * If you fetch a message via i2o_msg_get, and can't use it, you must
+ * return the message with this function. Otherwise the MFA is lost as well
+ * as the allocated memory from the mempool.
*/
-static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
- u32 m)
+static inline void i2o_msg_nop(struct i2o_controller *c,
+ struct i2o_message *msg)
{
- BUG_ON(m < c->out_queue.phys
- || m >= c->out_queue.phys + c->out_queue.len);
+ struct i2o_msg_mfa *mmsg;
+ mmsg = container_of(msg, struct i2o_msg_mfa, msg);
- return c->out_queue.virt + (m - c->out_queue.phys);
+ i2o_msg_nop_mfa(c, mmsg->mfa);
+ mempool_free(mmsg, c->in_msg.mempool);
};
/**
- * i2o_msg_in_to_virt - Turn an I2O message to a virtual address
- * @c: controller
- * @m: message engine value
+ * i2o_flush_reply - Flush reply from I2O controller
+ * @c: I2O controller
+ * @m: the message identifier
*
- * Turn a send message from an I2O controller bus address into
- * a Linux virtual address. The shared page frame is a linear block
- * so we simply have to shift the offset. This function does not
- * work for receive side messages as they are kmalloc objects
- * in a different pool.
+ * The I2O controller must be informed that the reply message is not needed
+ * anymore. If you forget to flush the reply, the message frame can't be
+ * used by the controller anymore and is therefore lost.
*/
-static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
- i2o_controller *c,
- u32 m)
+static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
{
- return c->in_queue.virt + m;
+ writel(m, c->out_port);
};
/*
@@ -779,350 +1252,5 @@ extern void i2o_dump_message(struct i2o_message *);
extern void i2o_dump_hrt(struct i2o_controller *c);
extern void i2o_debug_state(struct i2o_controller *c);
-/*
- * Cache strategies
- */
-
-/* The NULL strategy leaves everything up to the controller. This tends to be a
- * pessimal but functional choice.
- */
-#define CACHE_NULL 0
-/* Prefetch data when reading. We continually attempt to load the next 32 sectors
- * into the controller cache.
- */
-#define CACHE_PREFETCH 1
-/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
- * into the controller cache. When an I/O is less <= 8K we assume its probably
- * not sequential and don't prefetch (default)
- */
-#define CACHE_SMARTFETCH 2
-/* Data is written to the cache and then out on to the disk. The I/O must be
- * physically on the medium before the write is acknowledged (default without
- * NVRAM)
- */
-#define CACHE_WRITETHROUGH 17
-/* Data is written to the cache and then out on to the disk. The controller
- * is permitted to write back the cache any way it wants. (default if battery
- * backed NVRAM is present). It can be useful to set this for swap regardless of
- * battery state.
- */
-#define CACHE_WRITEBACK 18
-/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
- * write large I/O's directly to disk bypassing the cache to avoid the extra
- * memory copy hits. Small writes are writeback cached
- */
-#define CACHE_SMARTBACK 19
-/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
- * write large I/O's directly to disk bypassing the cache to avoid the extra
- * memory copy hits. Small writes are writethrough cached. Suitable for devices
- * lacking battery backup
- */
-#define CACHE_SMARTTHROUGH 20
-
-/*
- * Ioctl structures
- */
-
-#define BLKI2OGRSTRAT _IOR('2', 1, int)
-#define BLKI2OGWSTRAT _IOR('2', 2, int)
-#define BLKI2OSRSTRAT _IOW('2', 3, int)
-#define BLKI2OSWSTRAT _IOW('2', 4, int)
-
-/*
- * I2O Function codes
- */
-
-/*
- * Executive Class
- */
-#define I2O_CMD_ADAPTER_ASSIGN 0xB3
-#define I2O_CMD_ADAPTER_READ 0xB2
-#define I2O_CMD_ADAPTER_RELEASE 0xB5
-#define I2O_CMD_BIOS_INFO_SET 0xA5
-#define I2O_CMD_BOOT_DEVICE_SET 0xA7
-#define I2O_CMD_CONFIG_VALIDATE 0xBB
-#define I2O_CMD_CONN_SETUP 0xCA
-#define I2O_CMD_DDM_DESTROY 0xB1
-#define I2O_CMD_DDM_ENABLE 0xD5
-#define I2O_CMD_DDM_QUIESCE 0xC7
-#define I2O_CMD_DDM_RESET 0xD9
-#define I2O_CMD_DDM_SUSPEND 0xAF
-#define I2O_CMD_DEVICE_ASSIGN 0xB7
-#define I2O_CMD_DEVICE_RELEASE 0xB9
-#define I2O_CMD_HRT_GET 0xA8
-#define I2O_CMD_ADAPTER_CLEAR 0xBE
-#define I2O_CMD_ADAPTER_CONNECT 0xC9
-#define I2O_CMD_ADAPTER_RESET 0xBD
-#define I2O_CMD_LCT_NOTIFY 0xA2
-#define I2O_CMD_OUTBOUND_INIT 0xA1
-#define I2O_CMD_PATH_ENABLE 0xD3
-#define I2O_CMD_PATH_QUIESCE 0xC5
-#define I2O_CMD_PATH_RESET 0xD7
-#define I2O_CMD_STATIC_MF_CREATE 0xDD
-#define I2O_CMD_STATIC_MF_RELEASE 0xDF
-#define I2O_CMD_STATUS_GET 0xA0
-#define I2O_CMD_SW_DOWNLOAD 0xA9
-#define I2O_CMD_SW_UPLOAD 0xAB
-#define I2O_CMD_SW_REMOVE 0xAD
-#define I2O_CMD_SYS_ENABLE 0xD1
-#define I2O_CMD_SYS_MODIFY 0xC1
-#define I2O_CMD_SYS_QUIESCE 0xC3
-#define I2O_CMD_SYS_TAB_SET 0xA3
-
-/*
- * Utility Class
- */
-#define I2O_CMD_UTIL_NOP 0x00
-#define I2O_CMD_UTIL_ABORT 0x01
-#define I2O_CMD_UTIL_CLAIM 0x09
-#define I2O_CMD_UTIL_RELEASE 0x0B
-#define I2O_CMD_UTIL_PARAMS_GET 0x06
-#define I2O_CMD_UTIL_PARAMS_SET 0x05
-#define I2O_CMD_UTIL_EVT_REGISTER 0x13
-#define I2O_CMD_UTIL_EVT_ACK 0x14
-#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
-#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
-#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
-#define I2O_CMD_UTIL_LOCK 0x17
-#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
-#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
-
-/*
- * SCSI Host Bus Adapter Class
- */
-#define I2O_CMD_SCSI_EXEC 0x81
-#define I2O_CMD_SCSI_ABORT 0x83
-#define I2O_CMD_SCSI_BUSRESET 0x27
-
-/*
- * Bus Adapter Class
- */
-#define I2O_CMD_BUS_ADAPTER_RESET 0x85
-#define I2O_CMD_BUS_RESET 0x87
-#define I2O_CMD_BUS_SCAN 0x89
-#define I2O_CMD_BUS_QUIESCE 0x8b
-
-/*
- * Random Block Storage Class
- */
-#define I2O_CMD_BLOCK_READ 0x30
-#define I2O_CMD_BLOCK_WRITE 0x31
-#define I2O_CMD_BLOCK_CFLUSH 0x37
-#define I2O_CMD_BLOCK_MLOCK 0x49
-#define I2O_CMD_BLOCK_MUNLOCK 0x4B
-#define I2O_CMD_BLOCK_MMOUNT 0x41
-#define I2O_CMD_BLOCK_MEJECT 0x43
-#define I2O_CMD_BLOCK_POWER 0x70
-
-#define I2O_CMD_PRIVATE 0xFF
-
-/* Command status values */
-
-#define I2O_CMD_IN_PROGRESS 0x01
-#define I2O_CMD_REJECTED 0x02
-#define I2O_CMD_FAILED 0x03
-#define I2O_CMD_COMPLETED 0x04
-
-/* I2O API function return values */
-
-#define I2O_RTN_NO_ERROR 0
-#define I2O_RTN_NOT_INIT 1
-#define I2O_RTN_FREE_Q_EMPTY 2
-#define I2O_RTN_TCB_ERROR 3
-#define I2O_RTN_TRANSACTION_ERROR 4
-#define I2O_RTN_ADAPTER_ALREADY_INIT 5
-#define I2O_RTN_MALLOC_ERROR 6
-#define I2O_RTN_ADPTR_NOT_REGISTERED 7
-#define I2O_RTN_MSG_REPLY_TIMEOUT 8
-#define I2O_RTN_NO_STATUS 9
-#define I2O_RTN_NO_FIRM_VER 10
-#define I2O_RTN_NO_LINK_SPEED 11
-
-/* Reply message status defines for all messages */
-
-#define I2O_REPLY_STATUS_SUCCESS 0x00
-#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
-#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
-#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
-#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
-#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
-#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
-#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
-#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
-#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
-#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
-#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
-
-/* Status codes and Error Information for Parameter functions */
-
-#define I2O_PARAMS_STATUS_SUCCESS 0x00
-#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
-#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
-#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
-#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
-#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
-#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
-#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
-#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
-#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
-#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
-#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
-#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
-#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
-#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
-#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
-#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
-
-/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
- * messages: Table 3-2 Detailed Status Codes.*/
-
-#define I2O_DSC_SUCCESS 0x0000
-#define I2O_DSC_BAD_KEY 0x0002
-#define I2O_DSC_TCL_ERROR 0x0003
-#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
-#define I2O_DSC_NO_SUCH_PAGE 0x0005
-#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
-#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
-#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
-#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
-#define I2O_DSC_DEVICE_LOCKED 0x000B
-#define I2O_DSC_DEVICE_RESET 0x000C
-#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
-#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
-#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
-#define I2O_DSC_INVALID_OFFSET 0x0010
-#define I2O_DSC_INVALID_PARAMETER 0x0011
-#define I2O_DSC_INVALID_REQUEST 0x0012
-#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
-#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
-#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
-#define I2O_DSC_MISSING_PARAMETER 0x0016
-#define I2O_DSC_TIMEOUT 0x0017
-#define I2O_DSC_UNKNOWN_ERROR 0x0018
-#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
-#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
-#define I2O_DSC_DEVICE_BUSY 0x001B
-#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
-
-/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed
- Status Codes.*/
-
-#define I2O_BSA_DSC_SUCCESS 0x0000
-#define I2O_BSA_DSC_MEDIA_ERROR 0x0001
-#define I2O_BSA_DSC_ACCESS_ERROR 0x0002
-#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003
-#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004
-#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005
-#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006
-#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007
-#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008
-#define I2O_BSA_DSC_BUS_FAILURE 0x0009
-#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A
-#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B
-#define I2O_BSA_DSC_DEVICE_RESET 0x000C
-#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D
-#define I2O_BSA_DSC_TIMEOUT 0x000E
-
-/* FailureStatusCodes, Table 3-3 Message Failure Codes */
-
-#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81
-#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82
-#define I2O_FSC_TRANSPORT_CONGESTION 0x83
-#define I2O_FSC_TRANSPORT_FAILURE 0x84
-#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85
-#define I2O_FSC_TRANSPORT_TIME_OUT 0x86
-#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87
-#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88
-#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89
-#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A
-#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B
-#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C
-#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D
-#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E
-#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F
-#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF
-
-/* Device Claim Types */
-#define I2O_CLAIM_PRIMARY 0x01000000
-#define I2O_CLAIM_MANAGEMENT 0x02000000
-#define I2O_CLAIM_AUTHORIZED 0x03000000
-#define I2O_CLAIM_SECONDARY 0x04000000
-
-/* Message header defines for VersionOffset */
-#define I2OVER15 0x0001
-#define I2OVER20 0x0002
-
-/* Default is 1.5 */
-#define I2OVERSION I2OVER15
-
-#define SGL_OFFSET_0 I2OVERSION
-#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
-#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
-#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
-#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
-#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
-#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
-#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
-#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
-#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
-#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
-
-/* Transaction Reply Lists (TRL) Control Word structure */
-#define TRL_SINGLE_FIXED_LENGTH 0x00
-#define TRL_SINGLE_VARIABLE_LENGTH 0x40
-#define TRL_MULTIPLE_FIXED_LENGTH 0x80
-
- /* msg header defines for MsgFlags */
-#define MSG_STATIC 0x0100
-#define MSG_64BIT_CNTXT 0x0200
-#define MSG_MULTI_TRANS 0x1000
-#define MSG_FAIL 0x2000
-#define MSG_FINAL 0x4000
-#define MSG_REPLY 0x8000
-
- /* minimum size msg */
-#define THREE_WORD_MSG_SIZE 0x00030000
-#define FOUR_WORD_MSG_SIZE 0x00040000
-#define FIVE_WORD_MSG_SIZE 0x00050000
-#define SIX_WORD_MSG_SIZE 0x00060000
-#define SEVEN_WORD_MSG_SIZE 0x00070000
-#define EIGHT_WORD_MSG_SIZE 0x00080000
-#define NINE_WORD_MSG_SIZE 0x00090000
-#define TEN_WORD_MSG_SIZE 0x000A0000
-#define ELEVEN_WORD_MSG_SIZE 0x000B0000
-#define I2O_MESSAGE_SIZE(x) ((x)<<16)
-
-/* special TID assignments */
-#define ADAPTER_TID 0
-#define HOST_TID 1
-
-/* outbound queue defines */
-#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
-#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
-
-#define I2O_POST_WAIT_OK 0
-#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
-
-#define I2O_CONTEXT_LIST_MIN_LENGTH 15
-#define I2O_CONTEXT_LIST_USED 0x01
-#define I2O_CONTEXT_LIST_DELETED 0x02
-
-/* timeouts */
-#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15
-#define I2O_TIMEOUT_MESSAGE_GET 5
-#define I2O_TIMEOUT_RESET 30
-#define I2O_TIMEOUT_STATUS_GET 5
-#define I2O_TIMEOUT_LCT_GET 360
-#define I2O_TIMEOUT_SCSI_SCB_ABORT 240
-
-/* retries */
-#define I2O_HRT_GET_TRIES 3
-#define I2O_LCT_GET_TRIES 3
-
-/* defines for max_sectors and max_phys_segments */
-#define I2O_MAX_SECTORS 1024
-#define I2O_MAX_SECTORS_LIMITED 256
-#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
-
#endif /* __KERNEL__ */
#endif /* _I2O_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 7b6a6a58e46..f2e1b5b2289 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -18,6 +18,7 @@
#include <linux/bio.h>
#include <linux/device.h>
#include <linux/pci.h>
+#include <linux/completion.h>
#include <asm/byteorder.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -638,7 +639,7 @@ typedef struct ide_drive_s {
int crc_count; /* crc counter to reduce drive speed */
struct list_head list;
struct device gendev;
- struct semaphore gendev_rel_sem; /* to deal with device release() */
+ struct completion gendev_rel_comp; /* to deal with device release() */
} ide_drive_t;
#define to_ide_device(dev)container_of(dev, ide_drive_t, gendev)
@@ -794,14 +795,14 @@ typedef struct hwif_s {
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
struct device gendev;
- struct semaphore gendev_rel_sem; /* To deal with device release() */
+ struct completion gendev_rel_comp; /* To deal with device release() */
void *hwif_data; /* extra hwif data */
unsigned dma;
void (*led_act)(void *data, int rw);
-} ____cacheline_maxaligned_in_smp ide_hwif_t;
+} ____cacheline_internodealigned_in_smp ide_hwif_t;
/*
* internal ide interrupt handler type
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
index 511999c7eed..395f0aad9cb 100644
--- a/include/linux/if_frad.h
+++ b/include/linux/if_frad.h
@@ -131,17 +131,17 @@ struct frad_conf
/* these are the fields of an RFC 1490 header */
struct frhdr
{
- unsigned char control __attribute__((packed));
+ unsigned char control;
/* for IP packets, this can be the NLPID */
- unsigned char pad __attribute__((packed));
+ unsigned char pad;
- unsigned char NLPID __attribute__((packed));
- unsigned char OUI[3] __attribute__((packed));
- unsigned short PID __attribute__((packed));
+ unsigned char NLPID;
+ unsigned char OUI[3];
+ unsigned short PID;
#define IP_NLPID pad
-};
+} __attribute__((packed));
/* see RFC 1490 for the definition of the following */
#define FRAD_I_UI 0x03
diff --git a/include/linux/inet.h b/include/linux/inet.h
index 3b5e9fdff87..6c5587af118 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -45,6 +45,6 @@
#ifdef __KERNEL__
#include <linux/types.h>
-extern __u32 in_aton(const char *str);
+extern __be32 in_aton(const char *str);
#endif
#endif /* _LINUX_INET_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 41f150a3d2d..2c08fdc2bdf 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -79,7 +79,7 @@ static inline void __deprecated save_flags(unsigned long *x)
{
local_save_flags(*x);
}
-#define save_flags(x) save_flags(&x);
+#define save_flags(x) save_flags(&x)
static inline void __deprecated restore_flags(unsigned long x)
{
local_irq_restore(x);
@@ -112,7 +112,7 @@ enum
TIMER_SOFTIRQ,
NET_TX_SOFTIRQ,
NET_RX_SOFTIRQ,
- SCSI_SOFTIRQ,
+ BLOCK_SOFTIRQ,
TASKLET_SOFTIRQ
};
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 9e2eb9a602e..4b55cf1df73 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -90,14 +90,14 @@ struct iphdr {
#error "Please fix <asm/byteorder.h>"
#endif
__u8 tos;
- __u16 tot_len;
- __u16 id;
- __u16 frag_off;
+ __be16 tot_len;
+ __be16 id;
+ __be16 frag_off;
__u8 ttl;
__u8 protocol;
__u16 check;
- __u32 saddr;
- __u32 daddr;
+ __be32 saddr;
+ __be32 daddr;
/*The options start here. */
};
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 93bbed5c6cf..9c8f4c9ed42 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -191,6 +191,10 @@ struct inet6_skb_parm {
__u16 srcrt;
__u16 dst1;
__u16 lastopt;
+ __u32 nhoff;
+ __u16 flags;
+
+#define IP6SKB_XFRM_TRANSFORMED 1
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f04ba20712a..6c5d4c898cc 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -12,7 +12,7 @@
#include <linux/config.h>
#include <linux/smp.h>
-#if !defined(CONFIG_ARCH_S390)
+#if !defined(CONFIG_S390)
#include <linux/linkage.h>
#include <linux/cache.h>
@@ -221,6 +221,17 @@ extern void note_interrupt(unsigned int irq, irq_desc_t *desc,
extern int can_request_irq(unsigned int irq, unsigned long irqflags);
extern void init_irq_proc(void);
+
+#ifdef CONFIG_AUTO_IRQ_AFFINITY
+extern int select_smp_affinity(unsigned int irq);
+#else
+static inline int
+select_smp_affinity(unsigned int irq)
+{
+ return 1;
+}
+#endif
+
#endif
extern hw_irq_controller no_irq_type; /* needed in every arch ? */
diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h
index 7a4eacd77cb..04e10f9f14f 100644
--- a/include/linux/isdnif.h
+++ b/include/linux/isdnif.h
@@ -282,43 +282,43 @@ typedef struct setup_parm {
typedef struct T30_s {
/* session parameters */
- __u8 resolution __attribute__ ((packed));
- __u8 rate __attribute__ ((packed));
- __u8 width __attribute__ ((packed));
- __u8 length __attribute__ ((packed));
- __u8 compression __attribute__ ((packed));
- __u8 ecm __attribute__ ((packed));
- __u8 binary __attribute__ ((packed));
- __u8 scantime __attribute__ ((packed));
- __u8 id[FAXIDLEN] __attribute__ ((packed));
+ __u8 resolution;
+ __u8 rate;
+ __u8 width;
+ __u8 length;
+ __u8 compression;
+ __u8 ecm;
+ __u8 binary;
+ __u8 scantime;
+ __u8 id[FAXIDLEN];
/* additional parameters */
- __u8 phase __attribute__ ((packed));
- __u8 direction __attribute__ ((packed));
- __u8 code __attribute__ ((packed));
- __u8 badlin __attribute__ ((packed));
- __u8 badmul __attribute__ ((packed));
- __u8 bor __attribute__ ((packed));
- __u8 fet __attribute__ ((packed));
- __u8 pollid[FAXIDLEN] __attribute__ ((packed));
- __u8 cq __attribute__ ((packed));
- __u8 cr __attribute__ ((packed));
- __u8 ctcrty __attribute__ ((packed));
- __u8 minsp __attribute__ ((packed));
- __u8 phcto __attribute__ ((packed));
- __u8 rel __attribute__ ((packed));
- __u8 nbc __attribute__ ((packed));
+ __u8 phase;
+ __u8 direction;
+ __u8 code;
+ __u8 badlin;
+ __u8 badmul;
+ __u8 bor;
+ __u8 fet;
+ __u8 pollid[FAXIDLEN];
+ __u8 cq;
+ __u8 cr;
+ __u8 ctcrty;
+ __u8 minsp;
+ __u8 phcto;
+ __u8 rel;
+ __u8 nbc;
/* remote station parameters */
- __u8 r_resolution __attribute__ ((packed));
- __u8 r_rate __attribute__ ((packed));
- __u8 r_width __attribute__ ((packed));
- __u8 r_length __attribute__ ((packed));
- __u8 r_compression __attribute__ ((packed));
- __u8 r_ecm __attribute__ ((packed));
- __u8 r_binary __attribute__ ((packed));
- __u8 r_scantime __attribute__ ((packed));
- __u8 r_id[FAXIDLEN] __attribute__ ((packed));
- __u8 r_code __attribute__ ((packed));
-} T30_s;
+ __u8 r_resolution;
+ __u8 r_rate;
+ __u8 r_width;
+ __u8 r_length;
+ __u8 r_compression;
+ __u8 r_ecm;
+ __u8 r_binary;
+ __u8 r_scantime;
+ __u8 r_id[FAXIDLEN];
+ __u8 r_code;
+} __attribute__((packed)) T30_s;
#define ISDN_TTY_FAX_CONN_IN 0
#define ISDN_TTY_FAX_CONN_OUT 1
diff --git a/include/linux/isicom.h b/include/linux/isicom.h
index 7c6eae7f6ed..45b3d48f097 100644
--- a/include/linux/isicom.h
+++ b/include/linux/isicom.h
@@ -4,47 +4,12 @@
/*#define ISICOM_DEBUG*/
/*#define ISICOM_DEBUG_DTR_RTS*/
-
-/*
- * Firmware Loader definitions ...
- */
-
-#define __MultiTech ('M'<<8)
-#define MIOCTL_LOAD_FIRMWARE (__MultiTech | 0x01)
-#define MIOCTL_READ_FIRMWARE (__MultiTech | 0x02)
-#define MIOCTL_XFER_CTRL (__MultiTech | 0x03)
-#define MIOCTL_RESET_CARD (__MultiTech | 0x04)
-
-#define DATA_SIZE 16
-
-typedef struct {
- unsigned short exec_segment;
- unsigned short exec_addr;
-} exec_record;
-
-typedef struct {
- int board; /* Board to load */
- unsigned short addr;
- unsigned short count;
-} bin_header;
-
-typedef struct {
- int board; /* Board to load */
- unsigned short addr;
- unsigned short count;
- unsigned short segment;
- unsigned char bin_data[DATA_SIZE];
-} bin_frame;
-
#ifdef __KERNEL__
#define YES 1
#define NO 0
-#define ISILOAD_MISC_MINOR 155 /* /dev/isctl */
-#define ISILOAD_NAME "ISILoad"
-
-/*
+/*
* ISICOM Driver definitions ...
*
*/
@@ -55,8 +20,8 @@ typedef struct {
* PCI definitions
*/
- #define DEVID_COUNT 9
- #define VENDOR_ID 0x10b5
+#define DEVID_COUNT 9
+#define VENDOR_ID 0x10b5
/*
* These are now officially allocated numbers
@@ -66,9 +31,9 @@ typedef struct {
#define ISICOM_CMAJOR 113 /* callout */
#define ISICOM_MAGIC (('M' << 8) | 'T')
-#define WAKEUP_CHARS 256 /* hard coded for now */
-#define TX_SIZE 254
-
+#define WAKEUP_CHARS 256 /* hard coded for now */
+#define TX_SIZE 254
+
#define BOARD_COUNT 4
#define PORT_COUNT (BOARD_COUNT*16)
@@ -98,18 +63,15 @@ typedef struct {
#define ISICOM_INITIATE_XONXOFF 0x04
#define ISICOM_RESPOND_XONXOFF 0x08
-#define InterruptTheCard(base) (outw(0,(base)+0xc))
-#define ClearInterrupt(base) (inw((base)+0x0a))
-
#define BOARD(line) (((line) >> 4) & 0x3)
/* isi kill queue bitmap */
-
+
#define ISICOM_KILLTX 0x01
#define ISICOM_KILLRX 0x02
/* isi_board status bitmap */
-
+
#define FIRMWARE_LOADED 0x0001
#define BOARD_ACTIVE 0x0002
@@ -123,9 +85,8 @@ typedef struct {
#define ISI_RTS 0x0200
-#define ISI_TXOK 0x0001
-
+#define ISI_TXOK 0x0001
+
#endif /* __KERNEL__ */
#endif /* ISICOM_H */
-
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index dcde7adfdce..558cb4c26ec 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -498,6 +498,12 @@ struct transaction_s
struct journal_head *t_checkpoint_list;
/*
+ * Doubly-linked circular list of all buffers submitted for IO while
+ * checkpointing. [j_list_lock]
+ */
+ struct journal_head *t_checkpoint_io_list;
+
+ /*
* Doubly-linked circular list of temporary buffers currently undergoing
* IO in the log [j_list_lock]
*/
@@ -843,7 +849,7 @@ extern void journal_commit_transaction(journal_t *);
/* Checkpoint list management */
int __journal_clean_checkpoint_list(journal_t *journal);
-void __journal_remove_checkpoint(struct journal_head *);
+int __journal_remove_checkpoint(struct journal_head *);
void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
/* Buffer IO */
diff --git a/include/linux/jffs2_fs_i.h b/include/linux/jffs2_fs_i.h
index ef85ab56302..ad565bf9dcc 100644
--- a/include/linux/jffs2_fs_i.h
+++ b/include/linux/jffs2_fs_i.h
@@ -8,11 +8,11 @@
#include <asm/semaphore.h>
struct jffs2_inode_info {
- /* We need an internal semaphore similar to inode->i_sem.
+ /* We need an internal mutex similar to inode->i_mutex.
Unfortunately, we can't used the existing one, because
either the GC would deadlock, or we'd have to release it
before letting GC proceed. Or we'd have to put ugliness
- into the GC code so it didn't attempt to obtain the i_sem
+ into the GC code so it didn't attempt to obtain the i_mutex
for the inode(s) which are already locked */
struct semaphore sem;
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 6acfdbba734..99905e18053 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -1,21 +1,12 @@
#ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H
+#include <linux/calc64.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <asm/param.h> /* for HZ */
-#include <asm/div64.h>
-
-#ifndef div_long_long_rem
-#define div_long_long_rem(dividend,divisor,remainder) \
-({ \
- u64 result = dividend; \
- *remainder = do_div(result,divisor); \
- result; \
-})
-#endif
/*
* The following defines establish the engineering parameters of the PLL
@@ -373,8 +364,11 @@ jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
- value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec);
- value->tv_usec /= NSEC_PER_USEC;
+ long tv_usec;
+
+ value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec);
+ tv_usec /= NSEC_PER_USEC;
+ value->tv_usec = tv_usec;
}
/*
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index 7428198111e..45f625d7d0b 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -151,7 +151,7 @@ extern unsigned int keymap_count;
static inline void con_schedule_flip(struct tty_struct *t)
{
- schedule_work(&t->flip.work);
+ schedule_work(&t->buf.work);
}
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index b1e407a4fbd..323924edb26 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -47,6 +47,8 @@ extern int console_printk[];
#define default_console_loglevel (console_printk[3])
struct completion;
+struct pt_regs;
+struct user;
/**
* might_sleep - annotation for functions that can sleep
@@ -123,6 +125,8 @@ extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int session_of_pgrp(int pgrp);
+extern void dump_thread(struct pt_regs *regs, struct user *dump);
+
#ifdef CONFIG_PRINTK
asmlinkage int vprintk(const char *fmt, va_list args)
__attribute__ ((format (printf, 1, 0)));
@@ -212,6 +216,7 @@ extern void dump_stack(void);
((unsigned char *)&addr)[1], \
((unsigned char *)&addr)[2], \
((unsigned char *)&addr)[3]
+#define NIPQUAD_FMT "%u.%u.%u.%u"
#define NIP6(addr) \
ntohs((addr).s6_addr16[0]), \
@@ -222,6 +227,7 @@ extern void dump_stack(void);
ntohs((addr).s6_addr16[5]), \
ntohs((addr).s6_addr16[6]), \
ntohs((addr).s6_addr16[7])
+#define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
#if defined(__LITTLE_ENDIAN)
#define HIPQUAD(addr) \
@@ -286,6 +292,15 @@ extern void dump_stack(void);
1; \
})
+/*
+ * Check at compile time that 'function' is a certain type, or is a pointer
+ * to that type (needs to use typedef for the function type.)
+ */
+#define typecheck_fn(type,function) \
+({ typeof(type) __tmp = function; \
+ (void)__tmp; \
+})
+
#endif /* __KERNEL__ */
#define SI_LOAD_SHIFT 16
@@ -316,8 +331,6 @@ extern int randomize_va_space;
#endif
/* Trap pasters of __FUNCTION__ at compile-time */
-#if __GNUC__ > 2 || __GNUC_MINOR__ >= 95
#define __FUNCTION__ (__func__)
-#endif
#endif
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index c8468472aec..94abc07cb16 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -41,7 +41,7 @@ typedef unsigned long kimage_entry_t;
#define IND_DONE 0x4
#define IND_SOURCE 0x8
-#define KEXEC_SEGMENT_MAX 8
+#define KEXEC_SEGMENT_MAX 16
struct kexec_segment {
void __user *buf;
size_t bufsz;
@@ -125,6 +125,8 @@ extern struct kimage *kexec_image;
/* Location of a reserved region to hold the crash kernel.
*/
extern struct resource crashk_res;
+typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
+extern note_buf_t *crash_notes;
#else /* !CONFIG_KEXEC */
struct pt_regs;
diff --git a/include/linux/key.h b/include/linux/key.h
index 53513a3be53..cbf464ad958 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -177,6 +177,8 @@ struct key {
/*
* kernel managed key type definition
*/
+typedef int (*request_key_actor_t)(struct key *key, struct key *authkey, const char *op);
+
struct key_type {
/* name of the type */
const char *name;
@@ -193,14 +195,6 @@ struct key_type {
*/
int (*instantiate)(struct key *key, const void *data, size_t datalen);
- /* duplicate a key of this type (optional)
- * - the source key will be locked against change
- * - the new description will be attached
- * - the quota will have been adjusted automatically from
- * source->quotalen
- */
- int (*duplicate)(struct key *key, const struct key *source);
-
/* update a key of this type (optional)
* - this method should call key_payload_reserve() to recalculate the
* quota consumption
@@ -226,6 +220,16 @@ struct key_type {
*/
long (*read)(const struct key *key, char __user *buffer, size_t buflen);
+ /* handle request_key() for this type instead of invoking
+ * /sbin/request-key (optional)
+ * - key is the key to instantiate
+ * - authkey is the authority to assume when instantiating this key
+ * - op is the operation to be done, usually "create"
+ * - the call must not return until the instantiation process has run
+ * its course
+ */
+ request_key_actor_t request_key;
+
/* internal fields */
struct list_head link; /* link in types list */
};
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
index 8d7c59a29e0..3365945640c 100644
--- a/include/linux/keyctl.h
+++ b/include/linux/keyctl.h
@@ -19,6 +19,7 @@
#define KEY_SPEC_USER_KEYRING -4 /* - key ID for UID-specific keyring */
#define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */
#define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */
+#define KEY_SPEC_REQKEY_AUTH_KEY -7 /* - key ID for assumed request_key auth key */
/* request-key default keyrings */
#define KEY_REQKEY_DEFL_NO_CHANGE -1
@@ -46,5 +47,7 @@
#define KEYCTL_INSTANTIATE 12 /* instantiate a partially constructed key */
#define KEYCTL_NEGATE 13 /* negate a partially constructed key */
#define KEYCTL_SET_REQKEY_KEYRING 14 /* set default request-key keyring */
+#define KEYCTL_SET_TIMEOUT 15 /* set key timeout */
+#define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index c03f2dc933d..669756bc20a 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -68,6 +68,9 @@ struct kprobe {
/* list of kprobes for multi-handler support */
struct list_head list;
+ /* Indicates that the corresponding module has been ref counted */
+ unsigned int mod_refcounted;
+
/*count the number of times this probe was temporarily disarmed */
unsigned long nmissed;
@@ -149,11 +152,10 @@ struct kretprobe_instance {
};
extern spinlock_t kretprobe_lock;
+extern struct semaphore kprobe_mutex;
extern int arch_prepare_kprobe(struct kprobe *p);
-extern void arch_copy_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
extern void arch_disarm_kprobe(struct kprobe *p);
-extern void arch_remove_kprobe(struct kprobe *p);
extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
extern kprobe_opcode_t *get_insn_slot(void);
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
new file mode 100644
index 00000000000..1bd6552cc34
--- /dev/null
+++ b/include/linux/ktime.h
@@ -0,0 +1,284 @@
+/*
+ * include/linux/ktime.h
+ *
+ * ktime_t - nanosecond-resolution time format.
+ *
+ * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
+ *
+ * data type definitions, declarations, prototypes and macros.
+ *
+ * Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#ifndef _LINUX_KTIME_H
+#define _LINUX_KTIME_H
+
+#include <linux/time.h>
+#include <linux/jiffies.h>
+
+/*
+ * ktime_t:
+ *
+ * On 64-bit CPUs a single 64-bit variable is used to store the hrtimers
+ * internal representation of time values in scalar nanoseconds. The
+ * design plays out best on 64-bit CPUs, where most conversions are
+ * NOPs and most arithmetic ktime_t operations are plain arithmetic
+ * operations.
+ *
+ * On 32-bit CPUs an optimized representation of the timespec structure
+ * is used to avoid expensive conversions from and to timespecs. The
+ * endian-aware order of the tv struct members is choosen to allow
+ * mathematical operations on the tv64 member of the union too, which
+ * for certain operations produces better code.
+ *
+ * For architectures with efficient support for 64/32-bit conversions the
+ * plain scalar nanosecond based representation can be selected by the
+ * config switch CONFIG_KTIME_SCALAR.
+ */
+typedef union {
+ s64 tv64;
+#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
+ struct {
+# ifdef __BIG_ENDIAN
+ s32 sec, nsec;
+# else
+ s32 nsec, sec;
+# endif
+ } tv;
+#endif
+} ktime_t;
+
+#define KTIME_MAX (~((u64)1 << 63))
+
+/*
+ * ktime_t definitions when using the 64-bit scalar representation:
+ */
+
+#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)
+
+/* Define a ktime_t variable and initialize it to zero: */
+#define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 }
+
+/**
+ * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
+ *
+ * @secs: seconds to set
+ * @nsecs: nanoseconds to set
+ *
+ * Return the ktime_t representation of the value
+ */
+static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
+{
+ return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs };
+}
+
+/* Subtract two ktime_t variables. rem = lhs -rhs: */
+#define ktime_sub(lhs, rhs) \
+ ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; })
+
+/* Add two ktime_t variables. res = lhs + rhs: */
+#define ktime_add(lhs, rhs) \
+ ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
+
+/*
+ * Add a ktime_t variable and a scalar nanosecond value.
+ * res = kt + nsval:
+ */
+#define ktime_add_ns(kt, nsval) \
+ ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
+
+/* convert a timespec to ktime_t format: */
+#define timespec_to_ktime(ts) ktime_set((ts).tv_sec, (ts).tv_nsec)
+
+/* convert a timeval to ktime_t format: */
+#define timeval_to_ktime(tv) ktime_set((tv).tv_sec, (tv).tv_usec * 1000)
+
+/* Map the ktime_t to timespec conversion to ns_to_timespec function */
+#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64)
+
+/* Map the ktime_t to timeval conversion to ns_to_timeval function */
+#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64)
+
+/* Map the ktime_t to clock_t conversion to the inline in jiffies.h: */
+#define ktime_to_clock_t(kt) nsec_to_clock_t((kt).tv64)
+
+/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
+#define ktime_to_ns(kt) ((kt).tv64)
+
+#else
+
+/*
+ * Helper macros/inlines to get the ktime_t math right in the timespec
+ * representation. The macros are sometimes ugly - their actual use is
+ * pretty okay-ish, given the circumstances. We do all this for
+ * performance reasons. The pure scalar nsec_t based code was nice and
+ * simple, but created too many 64-bit / 32-bit conversions and divisions.
+ *
+ * Be especially aware that negative values are represented in a way
+ * that the tv.sec field is negative and the tv.nsec field is greater
+ * or equal to zero but less than nanoseconds per second. This is the
+ * same representation which is used by timespecs.
+ *
+ * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC
+ */
+
+/* Define a ktime_t variable and initialize it to zero: */
+#define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 }
+
+/* Set a ktime_t variable to a value in sec/nsec representation: */
+static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
+{
+ return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } };
+}
+
+/**
+ * ktime_sub - subtract two ktime_t variables
+ *
+ * @lhs: minuend
+ * @rhs: subtrahend
+ *
+ * Returns the remainder of the substraction
+ */
+static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
+{
+ ktime_t res;
+
+ res.tv64 = lhs.tv64 - rhs.tv64;
+ if (res.tv.nsec < 0)
+ res.tv.nsec += NSEC_PER_SEC;
+
+ return res;
+}
+
+/**
+ * ktime_add - add two ktime_t variables
+ *
+ * @add1: addend1
+ * @add2: addend2
+ *
+ * Returns the sum of addend1 and addend2
+ */
+static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
+{
+ ktime_t res;
+
+ res.tv64 = add1.tv64 + add2.tv64;
+ /*
+ * performance trick: the (u32) -NSEC gives 0x00000000Fxxxxxxx
+ * so we subtract NSEC_PER_SEC and add 1 to the upper 32 bit.
+ *
+ * it's equivalent to:
+ * tv.nsec -= NSEC_PER_SEC
+ * tv.sec ++;
+ */
+ if (res.tv.nsec >= NSEC_PER_SEC)
+ res.tv64 += (u32)-NSEC_PER_SEC;
+
+ return res;
+}
+
+/**
+ * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
+ *
+ * @kt: addend
+ * @nsec: the scalar nsec value to add
+ *
+ * Returns the sum of kt and nsec in ktime_t format
+ */
+extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
+
+/**
+ * timespec_to_ktime - convert a timespec to ktime_t format
+ *
+ * @ts: the timespec variable to convert
+ *
+ * Returns a ktime_t variable with the converted timespec value
+ */
+static inline ktime_t timespec_to_ktime(const struct timespec ts)
+{
+ return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec,
+ .nsec = (s32)ts.tv_nsec } };
+}
+
+/**
+ * timeval_to_ktime - convert a timeval to ktime_t format
+ *
+ * @tv: the timeval variable to convert
+ *
+ * Returns a ktime_t variable with the converted timeval value
+ */
+static inline ktime_t timeval_to_ktime(const struct timeval tv)
+{
+ return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec,
+ .nsec = (s32)tv.tv_usec * 1000 } };
+}
+
+/**
+ * ktime_to_timespec - convert a ktime_t variable to timespec format
+ *
+ * @kt: the ktime_t variable to convert
+ *
+ * Returns the timespec representation of the ktime value
+ */
+static inline struct timespec ktime_to_timespec(const ktime_t kt)
+{
+ return (struct timespec) { .tv_sec = (time_t) kt.tv.sec,
+ .tv_nsec = (long) kt.tv.nsec };
+}
+
+/**
+ * ktime_to_timeval - convert a ktime_t variable to timeval format
+ *
+ * @kt: the ktime_t variable to convert
+ *
+ * Returns the timeval representation of the ktime value
+ */
+static inline struct timeval ktime_to_timeval(const ktime_t kt)
+{
+ return (struct timeval) {
+ .tv_sec = (time_t) kt.tv.sec,
+ .tv_usec = (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC) };
+}
+
+/**
+ * ktime_to_clock_t - convert a ktime_t variable to clock_t format
+ * @kt: the ktime_t variable to convert
+ *
+ * Returns a clock_t variable with the converted value
+ */
+static inline clock_t ktime_to_clock_t(const ktime_t kt)
+{
+ return nsec_to_clock_t( (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec);
+}
+
+/**
+ * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
+ * @kt: the ktime_t variable to convert
+ *
+ * Returns the scalar nanoseconds representation of kt
+ */
+static inline u64 ktime_to_ns(const ktime_t kt)
+{
+ return (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
+}
+
+#endif
+
+/*
+ * The resolution of the clocks. The resolution value is returned in
+ * the clock_getres() system call to give application programmers an
+ * idea of the (in)accuracy of timers. Timer values are rounded up to
+ * this resolution values.
+ */
+#define KTIME_REALTIME_RES (ktime_t){ .tv64 = TICK_NSEC }
+#define KTIME_MONOTONIC_RES (ktime_t){ .tv64 = TICK_NSEC }
+
+/* Get the monotonic time in timespec format: */
+extern void ktime_get_ts(struct timespec *ts);
+
+/* Get the real (wall-) time in timespec format: */
+#define ktime_get_real_ts(ts) getnstimeofday(ts)
+
+#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index e828e172ccb..a43c95f8f96 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -124,6 +124,8 @@ enum {
ATA_FLAG_DEBUGMSG = (1 << 10),
ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */
+ ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */
+
ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */
ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */
@@ -436,6 +438,8 @@ extern void ata_std_ports(struct ata_ioports *ioaddr);
extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
unsigned int n_ports);
extern void ata_pci_remove_one (struct pci_dev *pdev);
+extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
+extern int ata_pci_device_resume(struct pci_dev *pdev);
#endif /* CONFIG_PCI */
extern int ata_device_add(const struct ata_probe_ent *ent);
extern void ata_host_set_remove(struct ata_host_set *host_set);
@@ -445,6 +449,10 @@ extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmn
extern int ata_scsi_error(struct Scsi_Host *host);
extern int ata_scsi_release(struct Scsi_Host *host);
extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
+extern int ata_scsi_device_resume(struct scsi_device *);
+extern int ata_scsi_device_suspend(struct scsi_device *);
+extern int ata_device_resume(struct ata_port *, struct ata_device *);
+extern int ata_device_suspend(struct ata_port *, struct ata_device *);
extern int ata_ratelimit(void);
/*
@@ -480,7 +488,8 @@ extern u8 ata_bmdma_status(struct ata_port *ap);
extern void ata_bmdma_irq_clear(struct ata_port *ap);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eng_timeout(struct ata_port *ap);
-extern void ata_scsi_simulate(u16 *id, struct scsi_cmnd *cmd,
+extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *));
extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
diff --git a/include/linux/list.h b/include/linux/list.h
index 8e338828453..945daa1f13d 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -436,6 +436,20 @@ static inline void list_splice_init(struct list_head *list,
pos = n, n = list_entry(n->member.next, typeof(*n), member))
/**
+ * list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against
+ * removal of list entry
+ * @pos: the type * to use as a loop counter.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define list_for_each_entry_safe_reverse(pos, n, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member), \
+ n = list_entry(pos->member.prev, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.prev, typeof(*n), member))
+
+/**
* list_for_each_rcu - iterate over an rcu-protected list
* @pos: the &struct list_head to use as a loop counter.
* @head: the head for your list.
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 16d4e5a08e1..95c8fea293b 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -172,7 +172,7 @@ extern struct nlm_host *nlm_find_client(void);
/*
* Server-side lock handling
*/
-int nlmsvc_async_call(struct nlm_rqst *, u32, rpc_action);
+int nlmsvc_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
u32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
struct nlm_lock *, int, struct nlm_cookie *);
u32 nlmsvc_unlock(struct nlm_file *, struct nlm_lock *);
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 40f63c9879d..f96506782eb 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -58,9 +58,9 @@ struct loop_device {
struct bio *lo_bio;
struct bio *lo_biotail;
int lo_state;
- struct semaphore lo_sem;
+ struct completion lo_done;
+ struct completion lo_bh_done;
struct semaphore lo_ctl_mutex;
- struct semaphore lo_bh_mutex;
int lo_pending;
request_queue_t *lo_queue;
diff --git a/include/linux/memory.h b/include/linux/memory.h
index dc4081b6f16..e251dc43d0f 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -70,21 +70,15 @@ static inline void unregister_memory_notifier(struct notifier_block *nb)
{
}
#else
-extern int register_memory(struct memory_block *, struct mem_section *section, struct node *);
extern int register_new_memory(struct mem_section *);
extern int unregister_memory_section(struct mem_section *);
extern int memory_dev_init(void);
-extern int register_memory_notifier(struct notifier_block *nb);
-extern void unregister_memory_notifier(struct notifier_block *nb);
+extern int remove_memory_block(unsigned long, struct mem_section *, int);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
-extern int invalidate_phys_mapping(unsigned long, unsigned long);
struct notifier_block;
-extern int register_memory_notifier(struct notifier_block *nb);
-extern void unregister_memory_notifier(struct notifier_block *nb);
-
#endif /* CONFIG_MEMORY_HOTPLUG */
#define hotplug_memory_notifier(fn, pri) { \
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 8b67cf837ca..c7ac77e873b 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -22,6 +22,9 @@
/* Flags for mbind */
#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
+#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
+#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
+#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
#ifdef __KERNEL__
@@ -65,6 +68,7 @@ struct mempolicy {
nodemask_t nodes; /* interleave */
/* undefined for default */
} v;
+ nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
};
/*
@@ -110,14 +114,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
/*
- * Hugetlb policy. i386 hugetlb so far works with node numbers
- * instead of zone lists, so give it special interfaces for now.
- */
-extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr);
-extern int mpol_node_valid(int nid, struct vm_area_struct *vma,
- unsigned long addr);
-
-/*
* Tree of shared policies for a shared memory region.
* Maintain the policies in a pseudo mm that contains vmas. The vmas
* carry the policy. As a special twist the pseudo mm is indexed in pages, not
@@ -149,13 +145,37 @@ void mpol_free_shared_policy(struct shared_policy *p);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
unsigned long idx);
-struct mempolicy *get_vma_policy(struct task_struct *task,
- struct vm_area_struct *vma, unsigned long addr);
-
extern void numa_default_policy(void);
extern void numa_policy_init(void);
-extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new);
+extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new);
+extern void mpol_rebind_task(struct task_struct *tsk,
+ const nodemask_t *new);
+extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
+#define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x))
+
+#ifdef CONFIG_CPUSET
+#define current_cpuset_is_being_rebound() \
+ (cpuset_being_rebound == current->cpuset)
+#else
+#define current_cpuset_is_being_rebound() 0
+#endif
+
extern struct mempolicy default_policy;
+extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
+ unsigned long addr);
+
+extern int policy_zone;
+
+static inline void check_highest_zone(int k)
+{
+ if (k > policy_zone)
+ policy_zone = k;
+}
+
+int do_migrate_pages(struct mm_struct *mm,
+ const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
+
+extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */
#else
@@ -182,17 +202,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old)
return NULL;
}
-static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a)
-{
- return numa_node_id();
-}
-
-static inline int
-mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a)
-{
- return 1;
-}
-
struct shared_policy {};
static inline int mpol_set_shared_policy(struct shared_policy *info,
@@ -227,11 +236,38 @@ static inline void numa_default_policy(void)
{
}
-static inline void numa_policy_rebind(const nodemask_t *old,
+static inline void mpol_rebind_policy(struct mempolicy *pol,
const nodemask_t *new)
{
}
+static inline void mpol_rebind_task(struct task_struct *tsk,
+ const nodemask_t *new)
+{
+}
+
+static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
+{
+}
+
+#define set_cpuset_being_rebound(x) do {} while (0)
+
+static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER);
+}
+
+static inline int do_migrate_pages(struct mm_struct *mm,
+ const nodemask_t *from_nodes,
+ const nodemask_t *to_nodes, int flags)
+{
+ return 0;
+}
+
+static inline void check_highest_zone(int k)
+{
+}
#endif /* CONFIG_NUMA */
#endif /* __KERNEL__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a06a84d347f..c643016499a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3,6 +3,7 @@
#include <linux/sched.h>
#include <linux/errno.h>
+#include <linux/capability.h>
#ifdef __KERNEL__
@@ -13,6 +14,7 @@
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
#include <linux/fs.h>
+#include <linux/mutex.h>
struct mempolicy;
struct anon_vma;
@@ -223,24 +225,27 @@ struct page {
* & limit reverse map searches.
*/
union {
- unsigned long private; /* Mapping-private opaque data:
- * usually used for buffer_heads
- * if PagePrivate set; used for
- * swp_entry_t if PageSwapCache
- * When page is free, this indicates
- * order in the buddy system.
- */
+ struct {
+ unsigned long private; /* Mapping-private opaque data:
+ * usually used for buffer_heads
+ * if PagePrivate set; used for
+ * swp_entry_t if PageSwapCache.
+ * When page is free, this
+ * indicates order in the buddy
+ * system.
+ */
+ struct address_space *mapping; /* If low bit clear, points to
+ * inode address_space, or NULL.
+ * If page mapped as anonymous
+ * memory, low bit is set, and
+ * it points to anon_vma object:
+ * see PAGE_MAPPING_ANON below.
+ */
+ };
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
- spinlock_t ptl;
+ spinlock_t ptl;
#endif
- } u;
- struct address_space *mapping; /* If low bit clear, points to
- * inode address_space, or NULL.
- * If page mapped as anonymous
- * memory, low bit is set, and
- * it points to anon_vma object:
- * see PAGE_MAPPING_ANON below.
- */
+ };
pgoff_t index; /* Our offset within mapping. */
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
@@ -261,8 +266,8 @@ struct page {
#endif /* WANT_PAGE_VIRTUAL */
};
-#define page_private(page) ((page)->u.private)
-#define set_page_private(page, v) ((page)->u.private = (v))
+#define page_private(page) ((page)->private)
+#define set_page_private(page, v) ((page)->private = (v))
/*
* FIXME: take this include out, include page-flags.h in
@@ -308,7 +313,7 @@ struct page {
*/
#define get_page_testone(p) atomic_inc_and_test(&(p)->_count)
-#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
+#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1)
#define __put_page(p) atomic_dec(&(p)->_count)
extern void FASTCALL(__page_cache_release(struct page *));
@@ -634,14 +639,38 @@ struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
int shmem_lock(struct file *file, int lock, struct user_struct *user);
#else
#define shmem_nopage filemap_nopage
-#define shmem_lock(a, b, c) ({0;}) /* always in memory, no need to lock */
-#define shmem_set_policy(a, b) (0)
-#define shmem_get_policy(a, b) (NULL)
+
+static inline int shmem_lock(struct file *file, int lock,
+ struct user_struct *user)
+{
+ return 0;
+}
+
+static inline int shmem_set_policy(struct vm_area_struct *vma,
+ struct mempolicy *new)
+{
+ return 0;
+}
+
+static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return NULL;
+}
#endif
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
+extern int shmem_mmap(struct file *file, struct vm_area_struct *vma);
int shmem_zero_setup(struct vm_area_struct *);
+#ifndef CONFIG_MMU
+extern unsigned long shmem_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+#endif
+
static inline int can_do_mlock(void)
{
if (capable(CAP_IPC_LOCK))
@@ -690,14 +719,31 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
}
extern int vmtruncate(struct inode * inode, loff_t offset);
+extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
-extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
-static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access)
+#ifdef CONFIG_MMU
+extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
+ unsigned long address, int write_access);
+
+static inline int handle_mm_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int write_access)
{
- return __handle_mm_fault(mm, vma, address, write_access) & (~VM_FAULT_WRITE);
+ return __handle_mm_fault(mm, vma, address, write_access) &
+ (~VM_FAULT_WRITE);
}
+#else
+static inline int handle_mm_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int write_access)
+{
+ /* should never happen if there's no MMU */
+ BUG();
+ return VM_FAULT_SIGBUS;
+}
+#endif
extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
@@ -774,7 +820,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
* overflow into the next struct page (as it might with DEBUG_SPINLOCK).
* When freeing, reset page->mapping so free_pages_check won't complain.
*/
-#define __pte_lockptr(page) &((page)->u.ptl)
+#define __pte_lockptr(page) &((page)->ptl)
#define pte_lock_init(_page) do { \
spin_lock_init(__pte_lockptr(_page)); \
} while (0)
@@ -896,6 +942,8 @@ extern unsigned long do_brk(unsigned long, unsigned long);
/* filemap.c */
extern unsigned long page_unuse(struct page *);
extern void truncate_inode_pages(struct address_space *, loff_t);
+extern void truncate_inode_pages_range(struct address_space *,
+ loff_t lstart, loff_t lend);
/* generic vm_area_ops exported for stackable file systems */
extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
@@ -978,6 +1026,9 @@ static inline void vm_stat_account(struct mm_struct *mm,
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
+ if (!PageHighMem(page) && !enable)
+ mutex_debug_check_no_locks_freed(page_address(page),
+ numpages * PAGE_SIZE);
}
#endif
@@ -993,5 +1044,12 @@ int in_gate_area_no_task(unsigned long addr);
/* /proc/<pid>/oom_adj set to -17 protects from the oom-killer */
#define OOM_DISABLE -17
+int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
+ void __user *, size_t *, loff_t *);
+int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
+ unsigned long lru_pages);
+void drop_pagecache(void);
+void drop_slab(void);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 47762ca695a..49cc68af01f 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -38,3 +38,25 @@ del_page_from_lru(struct zone *zone, struct page *page)
zone->nr_inactive--;
}
}
+
+/*
+ * Isolate one page from the LRU lists.
+ *
+ * - zone->lru_lock must be held
+ */
+static inline int __isolate_lru_page(struct page *page)
+{
+ if (unlikely(!TestClearPageLRU(page)))
+ return 0;
+
+ if (get_page_testone(page)) {
+ /*
+ * It is being freed elsewhere
+ */
+ __put_page(page);
+ SetPageLRU(page);
+ return -ENOENT;
+ }
+
+ return 1;
+}
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 18fc77f682d..30dd978c1ec 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -30,7 +30,12 @@ struct mmc_csd {
unsigned int tacc_ns;
unsigned int max_dtr;
unsigned int read_blkbits;
+ unsigned int write_blkbits;
unsigned int capacity;
+ unsigned int read_partial:1,
+ read_misalign:1,
+ write_partial:1,
+ write_misalign:1;
};
struct sd_scr {
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index aef6042f8f0..ccd3e13de1e 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -27,14 +27,15 @@ struct mmc_command {
#define MMC_RSP_MASK (3 << 0)
#define MMC_RSP_CRC (1 << 3) /* expect valid crc */
#define MMC_RSP_BUSY (1 << 4) /* card may send busy */
+#define MMC_RSP_OPCODE (1 << 5) /* response contains opcode */
/*
* These are the response types, and correspond to valid bit
* patterns of the above flags. One additional valid pattern
* is all zeros, which means we don't expect a response.
*/
-#define MMC_RSP_R1 (MMC_RSP_SHORT|MMC_RSP_CRC)
-#define MMC_RSP_R1B (MMC_RSP_SHORT|MMC_RSP_CRC|MMC_RSP_BUSY)
+#define MMC_RSP_R1 (MMC_RSP_SHORT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+#define MMC_RSP_R1B (MMC_RSP_SHORT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY)
#define MMC_RSP_R2 (MMC_RSP_LONG|MMC_RSP_CRC)
#define MMC_RSP_R3 (MMC_RSP_SHORT)
#define MMC_RSP_R6 (MMC_RSP_SHORT|MMC_RSP_CRC)
@@ -64,6 +65,7 @@ struct mmc_data {
#define MMC_DATA_WRITE (1 << 8)
#define MMC_DATA_READ (1 << 9)
#define MMC_DATA_STREAM (1 << 10)
+#define MMC_DATA_MULTI (1 << 11)
unsigned int bytes_xfered;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9f22090df7d..34cbefd2ebd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -38,7 +38,7 @@ struct pglist_data;
#if defined(CONFIG_SMP)
struct zone_padding {
char x[0];
-} ____cacheline_maxaligned_in_smp;
+} ____cacheline_internodealigned_in_smp;
#define ZONE_PADDING(name) struct zone_padding name;
#else
#define ZONE_PADDING(name)
@@ -46,7 +46,6 @@ struct zone_padding {
struct per_cpu_pages {
int count; /* number of pages in the list */
- int low; /* low watermark, refill needed */
int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */
struct list_head list; /* the list of pages */
@@ -99,7 +98,7 @@ struct per_cpu_pageset {
/*
* On machines where it is needed (eg PCs) we divide physical memory
- * into multiple physical zones. On a PC we have 4 zones:
+ * into multiple physical zones. On a 32bit PC we have 4 zones:
*
* ZONE_DMA < 16 MB ISA DMA capable memory
* ZONE_DMA32 0 MB Empty
@@ -234,7 +233,7 @@ struct zone {
* rarely used fields:
*/
char *name;
-} ____cacheline_maxaligned_in_smp;
+} ____cacheline_internodealigned_in_smp;
/*
@@ -389,6 +388,11 @@ static inline struct zone *next_zone(struct zone *zone)
#define for_each_zone(zone) \
for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
+static inline int populated_zone(struct zone *zone)
+{
+ return (!!zone->present_pages);
+}
+
static inline int is_highmem_idx(int idx)
{
return (idx == ZONE_HIGHMEM);
@@ -398,6 +402,7 @@ static inline int is_normal_idx(int idx)
{
return (idx == ZONE_NORMAL);
}
+
/**
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
@@ -414,6 +419,16 @@ static inline int is_normal(struct zone *zone)
return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
}
+static inline int is_dma32(struct zone *zone)
+{
+ return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
+}
+
+static inline int is_dma(struct zone *zone)
+{
+ return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
+}
+
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
struct file;
@@ -422,6 +437,8 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
+int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
+ void __user *, size_t *, loff_t *);
#include <linux/topology.h>
/* Returns the number of the current Node. */
@@ -435,7 +452,6 @@ extern struct pglist_data contig_page_data;
#define NODE_DATA(nid) (&contig_page_data)
#define NODE_MEM_MAP(nid) mem_map
#define MAX_NODES_SHIFT 1
-#define pfn_to_nid(pfn) (0)
#else /* CONFIG_NEED_MULTIPLE_NODES */
@@ -470,6 +486,10 @@ extern struct pglist_data contig_page_data;
#define early_pfn_to_nid(nid) (0UL)
#endif
+#ifdef CONFIG_FLATMEM
+#define pfn_to_nid(pfn) (0)
+#endif
+
#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
@@ -564,11 +584,6 @@ static inline int valid_section_nr(unsigned long nr)
return valid_section(__nr_to_section(nr));
}
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
-
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
return __nr_to_section(pfn_to_section_nr(pfn));
@@ -598,13 +613,14 @@ static inline int pfn_valid(unsigned long pfn)
* this restriction.
*/
#ifdef CONFIG_NUMA
-#define pfn_to_nid early_pfn_to_nid
-#endif
-
-#define pfn_to_pgdat(pfn) \
+#define pfn_to_nid(pfn) \
({ \
- NODE_DATA(pfn_to_nid(pfn)); \
+ unsigned long __pfn_to_nid_pfn = (pfn); \
+ page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
})
+#else
+#define pfn_to_nid(pfn) (0)
+#endif
#define early_pfn_valid(pfn) pfn_valid(pfn)
void sparse_init(void);
@@ -613,12 +629,6 @@ void sparse_init(void);
#define sparse_index_init(_sec, _nid) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
-#ifdef CONFIG_NODES_SPAN_OTHER_NODES
-#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
-#else
-#define early_pfn_in_nid(pfn, nid) (1)
-#endif
-
#ifndef early_pfn_valid
#define early_pfn_valid(pfn) (1)
#endif
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 368ec8e45bd..b5c98c43779 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -10,7 +10,7 @@
#ifdef MODULE
#define MODULE_PARAM_PREFIX /* empty */
#else
-#define MODULE_PARAM_PREFIX __stringify(KBUILD_MODNAME) "."
+#define MODULE_PARAM_PREFIX KBUILD_MODNAME "."
#endif
#ifdef MODULE
diff --git a/include/linux/mount.h b/include/linux/mount.h
index dd4e83eba93..b7472ae91fa 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -20,9 +20,12 @@
#define MNT_NOSUID 0x01
#define MNT_NODEV 0x02
#define MNT_NOEXEC 0x04
-#define MNT_SHARED 0x10 /* if the vfsmount is a shared mount */
-#define MNT_UNBINDABLE 0x20 /* if the vfsmount is a unbindable mount */
-#define MNT_PNODE_MASK 0x30 /* propogation flag mask */
+#define MNT_NOATIME 0x08
+#define MNT_NODIRATIME 0x10
+
+#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
+#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
+#define MNT_PNODE_MASK 0x3000 /* propogation flag mask */
struct vfsmount {
struct list_head mnt_hash;
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 941da5c016a..e933e2a355a 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -329,7 +329,8 @@ static inline void fatwchar_to16(__u8 *dst, const wchar_t *src, size_t len)
extern void fat_cache_inval_inode(struct inode *inode);
extern int fat_get_cluster(struct inode *inode, int cluster,
int *fclus, int *dclus);
-extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys);
+extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
+ unsigned long *mapped_blocks);
/* fat/dir.c */
extern struct file_operations fat_dir_operations;
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
new file mode 100644
index 00000000000..8b5769f0046
--- /dev/null
+++ b/include/linux/mutex-debug.h
@@ -0,0 +1,23 @@
+#ifndef __LINUX_MUTEX_DEBUG_H
+#define __LINUX_MUTEX_DEBUG_H
+
+#include <linux/linkage.h>
+
+/*
+ * Mutexes - debugging helpers:
+ */
+
+#define __DEBUG_MUTEX_INITIALIZER(lockname) \
+ , .held_list = LIST_HEAD_INIT(lockname.held_list), \
+ .name = #lockname , .magic = &lockname
+
+#define mutex_init(sem) __mutex_init(sem, __FUNCTION__)
+
+extern void FASTCALL(mutex_destroy(struct mutex *lock));
+
+extern void mutex_debug_show_all_locks(void);
+extern void mutex_debug_show_held_locks(struct task_struct *filter);
+extern void mutex_debug_check_no_locks_held(struct task_struct *task);
+extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len);
+
+#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
new file mode 100644
index 00000000000..f1ac507fa20
--- /dev/null
+++ b/include/linux/mutex.h
@@ -0,0 +1,120 @@
+/*
+ * Mutexes: blocking mutual exclusion locks
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * This file contains the main data structure and API definitions.
+ */
+#ifndef __LINUX_MUTEX_H
+#define __LINUX_MUTEX_H
+
+#include <linux/list.h>
+#include <linux/spinlock_types.h>
+#include <linux/linkage.h>
+
+#include <asm/atomic.h>
+
+/*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+ * - only one task can hold the mutex at a time
+ * - only the owner can unlock the mutex
+ * - multiple unlocks are not permitted
+ * - recursive locking is not permitted
+ * - a mutex object must be initialized via the API
+ * - a mutex object must not be initialized via memset or copying
+ * - task may not exit with mutex held
+ * - memory areas where held locks reside must not be freed
+ * - held mutexes must not be reinitialized
+ * - mutexes may not be used in irq contexts
+ *
+ * These semantics are fully enforced when DEBUG_MUTEXES is
+ * enabled. Furthermore, besides enforcing the above rules, the mutex
+ * debugging code also implements a number of additional features
+ * that make lock debugging easier and faster:
+ *
+ * - uses symbolic names of mutexes, whenever they are printed in debug output
+ * - point-of-acquire tracking, symbolic lookup of function names
+ * - list of all locks held in the system, printout of them
+ * - owner tracking
+ * - detects self-recursing locks and prints out all relevant info
+ * - detects multi-task circular deadlocks and prints out all affected
+ * locks and tasks (and only those tasks)
+ */
+struct mutex {
+ /* 1: unlocked, 0: locked, negative: locked, possible waiters */
+ atomic_t count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_MUTEXES
+ struct thread_info *owner;
+ struct list_head held_list;
+ unsigned long acquire_ip;
+ const char *name;
+ void *magic;
+#endif
+};
+
+/*
+ * This is the control structure for tasks blocked on mutex,
+ * which resides on the blocked task's kernel stack:
+ */
+struct mutex_waiter {
+ struct list_head list;
+ struct task_struct *task;
+#ifdef CONFIG_DEBUG_MUTEXES
+ struct mutex *lock;
+ void *magic;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_MUTEXES
+# include <linux/mutex-debug.h>
+#else
+# define __DEBUG_MUTEX_INITIALIZER(lockname)
+# define mutex_init(mutex) __mutex_init(mutex, NULL)
+# define mutex_destroy(mutex) do { } while (0)
+# define mutex_debug_show_all_locks() do { } while (0)
+# define mutex_debug_show_held_locks(p) do { } while (0)
+# define mutex_debug_check_no_locks_held(task) do { } while (0)
+# define mutex_debug_check_no_locks_freed(from, len) do { } while (0)
+#endif
+
+#define __MUTEX_INITIALIZER(lockname) \
+ { .count = ATOMIC_INIT(1) \
+ , .wait_lock = SPIN_LOCK_UNLOCKED \
+ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
+ __DEBUG_MUTEX_INITIALIZER(lockname) }
+
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+extern void fastcall __mutex_init(struct mutex *lock, const char *name);
+
+/***
+ * mutex_is_locked - is the mutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns 1 if the mutex is locked, 0 if unlocked.
+ */
+static inline int fastcall mutex_is_locked(struct mutex *lock)
+{
+ return atomic_read(&lock->count) != 1;
+}
+
+/*
+ * See kernel/mutex.c for detailed documentation of these APIs.
+ * Also see Documentation/mutex-design.txt.
+ */
+extern void fastcall mutex_lock(struct mutex *lock);
+extern int fastcall mutex_lock_interruptible(struct mutex *lock);
+/*
+ * NOTE: mutex_trylock() follows the spin_trylock() convention,
+ * not the down_trylock() convention!
+ */
+extern int fastcall mutex_trylock(struct mutex *lock);
+extern void fastcall mutex_unlock(struct mutex *lock);
+
+#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 455660eafba..b699e427c00 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -74,7 +74,7 @@ extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
extern void release_open_intent(struct nameidata *);
extern struct dentry * lookup_one_len(const char *, struct dentry *, int);
-extern struct dentry * lookup_hash(struct nameidata *);
+extern __deprecated_for_modules struct dentry * lookup_hash(struct nameidata *);
extern int follow_down(struct vfsmount **, struct dentry **);
extern int follow_up(struct vfsmount **, struct dentry **);
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index 090e210e98f..f95d51fae73 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -37,18 +37,26 @@ enum {
/* userspace doesn't need the nbd_device structure */
#ifdef __KERNEL__
+#include <linux/wait.h>
+
/* values for flags field */
#define NBD_READ_ONLY 0x0001
#define NBD_WRITE_NOCHK 0x0002
+struct request;
+
struct nbd_device {
int flags;
int harderror; /* Code of hard error */
struct socket * sock;
struct file * file; /* If == NULL, device is not ready, yet */
int magic;
+
spinlock_t queue_lock;
struct list_head queue_head;/* Requests are added here... */
+ struct request *active_req;
+ wait_queue_head_t active_wq;
+
struct semaphore tx_lock;
struct gendisk *disk;
int blksize;
diff --git a/include/linux/ncp.h b/include/linux/ncp.h
index 99f77876b71..99f0adeeb3f 100644
--- a/include/linux/ncp.h
+++ b/include/linux/ncp.h
@@ -20,29 +20,29 @@
#define NCP_DEALLOC_SLOT_REQUEST (0x5555)
struct ncp_request_header {
- __u16 type __attribute__((packed));
- __u8 sequence __attribute__((packed));
- __u8 conn_low __attribute__((packed));
- __u8 task __attribute__((packed));
- __u8 conn_high __attribute__((packed));
- __u8 function __attribute__((packed));
- __u8 data[0] __attribute__((packed));
-};
+ __u16 type;
+ __u8 sequence;
+ __u8 conn_low;
+ __u8 task;
+ __u8 conn_high;
+ __u8 function;
+ __u8 data[0];
+} __attribute__((packed));
#define NCP_REPLY (0x3333)
#define NCP_WATCHDOG (0x3E3E)
#define NCP_POSITIVE_ACK (0x9999)
struct ncp_reply_header {
- __u16 type __attribute__((packed));
- __u8 sequence __attribute__((packed));
- __u8 conn_low __attribute__((packed));
- __u8 task __attribute__((packed));
- __u8 conn_high __attribute__((packed));
- __u8 completion_code __attribute__((packed));
- __u8 connection_state __attribute__((packed));
- __u8 data[0] __attribute__((packed));
-};
+ __u16 type;
+ __u8 sequence;
+ __u8 conn_low;
+ __u8 task;
+ __u8 conn_high;
+ __u8 completion_code;
+ __u8 connection_state;
+ __u8 data[0];
+} __attribute__((packed));
#define NCP_VOLNAME_LEN (16)
#define NCP_NUMBER_OF_VOLUMES (256)
@@ -128,37 +128,37 @@ struct nw_nfs_info {
};
struct nw_info_struct {
- __u32 spaceAlloc __attribute__((packed));
- __le32 attributes __attribute__((packed));
- __u16 flags __attribute__((packed));
- __le32 dataStreamSize __attribute__((packed));
- __le32 totalStreamSize __attribute__((packed));
- __u16 numberOfStreams __attribute__((packed));
- __le16 creationTime __attribute__((packed));
- __le16 creationDate __attribute__((packed));
- __u32 creatorID __attribute__((packed));
- __le16 modifyTime __attribute__((packed));
- __le16 modifyDate __attribute__((packed));
- __u32 modifierID __attribute__((packed));
- __le16 lastAccessDate __attribute__((packed));
- __u16 archiveTime __attribute__((packed));
- __u16 archiveDate __attribute__((packed));
- __u32 archiverID __attribute__((packed));
- __u16 inheritedRightsMask __attribute__((packed));
- __le32 dirEntNum __attribute__((packed));
- __le32 DosDirNum __attribute__((packed));
- __u32 volNumber __attribute__((packed));
- __u32 EADataSize __attribute__((packed));
- __u32 EAKeyCount __attribute__((packed));
- __u32 EAKeySize __attribute__((packed));
- __u32 NSCreator __attribute__((packed));
- __u8 nameLen __attribute__((packed));
- __u8 entryName[256] __attribute__((packed));
+ __u32 spaceAlloc;
+ __le32 attributes;
+ __u16 flags;
+ __le32 dataStreamSize;
+ __le32 totalStreamSize;
+ __u16 numberOfStreams;
+ __le16 creationTime;
+ __le16 creationDate;
+ __u32 creatorID;
+ __le16 modifyTime;
+ __le16 modifyDate;
+ __u32 modifierID;
+ __le16 lastAccessDate;
+ __u16 archiveTime;
+ __u16 archiveDate;
+ __u32 archiverID;
+ __u16 inheritedRightsMask;
+ __le32 dirEntNum;
+ __le32 DosDirNum;
+ __u32 volNumber;
+ __u32 EADataSize;
+ __u32 EAKeyCount;
+ __u32 EAKeySize;
+ __u32 NSCreator;
+ __u8 nameLen;
+ __u8 entryName[256];
/* libncp may depend on there being nothing after entryName */
#ifdef __KERNEL__
struct nw_nfs_info nfs;
#endif
-};
+} __attribute__((packed));
/* modify mask - use with MODIFY_DOS_INFO structure */
#define DM_ATTRIBUTES (cpu_to_le32(0x02))
@@ -176,26 +176,26 @@ struct nw_info_struct {
#define DM_MAXIMUM_SPACE (cpu_to_le32(0x2000))
struct nw_modify_dos_info {
- __le32 attributes __attribute__((packed));
- __le16 creationDate __attribute__((packed));
- __le16 creationTime __attribute__((packed));
- __u32 creatorID __attribute__((packed));
- __le16 modifyDate __attribute__((packed));
- __le16 modifyTime __attribute__((packed));
- __u32 modifierID __attribute__((packed));
- __u16 archiveDate __attribute__((packed));
- __u16 archiveTime __attribute__((packed));
- __u32 archiverID __attribute__((packed));
- __le16 lastAccessDate __attribute__((packed));
- __u16 inheritanceGrantMask __attribute__((packed));
- __u16 inheritanceRevokeMask __attribute__((packed));
- __u32 maximumSpace __attribute__((packed));
-};
+ __le32 attributes;
+ __le16 creationDate;
+ __le16 creationTime;
+ __u32 creatorID;
+ __le16 modifyDate;
+ __le16 modifyTime;
+ __u32 modifierID;
+ __u16 archiveDate;
+ __u16 archiveTime;
+ __u32 archiverID;
+ __le16 lastAccessDate;
+ __u16 inheritanceGrantMask;
+ __u16 inheritanceRevokeMask;
+ __u32 maximumSpace;
+} __attribute__((packed));
struct nw_search_sequence {
- __u8 volNumber __attribute__((packed));
- __u32 dirBase __attribute__((packed));
- __u32 sequence __attribute__((packed));
-};
+ __u8 volNumber;
+ __u32 dirBase;
+ __u32 sequence;
+} __attribute__((packed));
#endif /* _LINUX_NCP_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index be365e70ee9..4cf6088625c 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -168,6 +168,37 @@ void nf_log_packet(int pf,
const struct net_device *out,
struct nf_loginfo *li,
const char *fmt, ...);
+
+int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
+ struct net_device *indev, struct net_device *outdev,
+ int (*okfn)(struct sk_buff *), int thresh);
+
+/**
+ * nf_hook_thresh - call a netfilter hook
+ *
+ * Returns 1 if the hook has allowed the packet to pass. The function
+ * okfn must be invoked by the caller in this case. Any other return
+ * value indicates the packet has been consumed by the hook.
+ */
+static inline int nf_hook_thresh(int pf, unsigned int hook,
+ struct sk_buff **pskb,
+ struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sk_buff *), int thresh)
+{
+#ifndef CONFIG_NETFILTER_DEBUG
+ if (list_empty(&nf_hooks[pf][hook]))
+ return 1;
+#endif
+ return nf_hook_slow(pf, hook, pskb, indev, outdev, okfn, thresh);
+}
+
+static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb,
+ struct net_device *indev, struct net_device *outdev,
+ int (*okfn)(struct sk_buff *))
+{
+ return nf_hook_thresh(pf, hook, pskb, indev, outdev, okfn, INT_MIN);
+}
/* Activate hook; either okfn or kfree_skb called, unless a hook
returns NF_STOLEN (in which case, it's up to the hook to deal with
@@ -188,35 +219,17 @@ void nf_log_packet(int pf,
/* This is gross, but inline doesn't cut it for avoiding the function
call in fast path: gcc doesn't inline (needs value tracking?). --RR */
-#ifdef CONFIG_NETFILTER_DEBUG
-#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
-({int __ret; \
-if ((__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, INT_MIN)) == 1) \
- __ret = (okfn)(skb); \
-__ret;})
-#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
-({int __ret; \
-if ((__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, thresh)) == 1) \
- __ret = (okfn)(skb); \
-__ret;})
-#else
-#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
-({int __ret; \
-if (list_empty(&nf_hooks[pf][hook]) || \
- (__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, INT_MIN)) == 1) \
- __ret = (okfn)(skb); \
-__ret;})
+
+/* HX: It's slightly less gross now. */
+
#define NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, thresh) \
({int __ret; \
-if (list_empty(&nf_hooks[pf][hook]) || \
- (__ret=nf_hook_slow(pf, hook, &(skb), indev, outdev, okfn, thresh)) == 1) \
+if ((__ret=nf_hook_thresh(pf, hook, &(skb), indev, outdev, okfn, thresh)) == 1)\
__ret = (okfn)(skb); \
__ret;})
-#endif
-int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb,
- struct net_device *indev, struct net_device *outdev,
- int (*okfn)(struct sk_buff *), int thresh);
+#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) \
+ NF_HOOK_THRESH(pf, hook, skb, indev, outdev, okfn, INT_MIN)
/* Call setsockopt() */
int nf_setsockopt(struct sock *sk, int pf, int optval, char __user *opt,
@@ -261,6 +274,20 @@ struct nf_queue_rerouter {
extern int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer);
extern int nf_unregister_queue_rerouter(int pf);
+#include <net/flow.h>
+extern void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
+
+static inline void
+nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family)
+{
+#ifdef CONFIG_IP_NF_NAT_NEEDED
+ void (*decodefn)(struct sk_buff *, struct flowi *);
+
+ if (family == AF_INET && (decodefn = ip_nat_decode_session) != NULL)
+ decodefn(skb, fl);
+#endif
+}
+
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
extern struct proc_dir_entry *proc_net_netfilter;
@@ -268,7 +295,24 @@ extern struct proc_dir_entry *proc_net_netfilter;
#else /* !CONFIG_NETFILTER */
#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
+static inline int nf_hook_thresh(int pf, unsigned int hook,
+ struct sk_buff **pskb,
+ struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sk_buff *), int thresh)
+{
+ return okfn(*pskb);
+}
+static inline int nf_hook(int pf, unsigned int hook, struct sk_buff **pskb,
+ struct net_device *indev, struct net_device *outdev,
+ int (*okfn)(struct sk_buff *))
+{
+ return okfn(*pskb);
+}
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+struct flowi;
+static inline void
+nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, int family) {}
#endif /*CONFIG_NETFILTER*/
#endif /*__KERNEL__*/
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 6d39b518486..3ff88c87830 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -154,6 +154,9 @@ struct ip_conntrack_stat
unsigned int expect_delete;
};
+/* call to create an explicit dependency on nf_conntrack. */
+extern void need_conntrack(void);
+
#endif /* __KERNEL__ */
#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
new file mode 100644
index 00000000000..472f0483480
--- /dev/null
+++ b/include/linux/netfilter/x_tables.h
@@ -0,0 +1,224 @@
+#ifndef _X_TABLES_H
+#define _X_TABLES_H
+
+#define XT_FUNCTION_MAXNAMELEN 30
+#define XT_TABLE_MAXNAMELEN 32
+
+/* The argument to IPT_SO_GET_REVISION_*. Returns highest revision
+ * kernel supports, if >= revision. */
+struct xt_get_revision
+{
+ char name[XT_FUNCTION_MAXNAMELEN-1];
+
+ u_int8_t revision;
+};
+
+/* CONTINUE verdict for targets */
+#define XT_CONTINUE 0xFFFFFFFF
+
+/* For standard target */
+#define XT_RETURN (-NF_REPEAT - 1)
+
+#define XT_ALIGN(s) (((s) + (__alignof__(void *)-1)) & ~(__alignof__(void *)-1))
+
+/* Standard return verdict, or do jump. */
+#define XT_STANDARD_TARGET ""
+/* Error verdict. */
+#define XT_ERROR_TARGET "ERROR"
+
+/*
+ * New IP firewall options for [gs]etsockopt at the RAW IP level.
+ * Unlike BSD Linux inherits IP options so you don't have to use a raw
+ * socket for this. Instead we check rights in the calls. */
+#define XT_BASE_CTL 64 /* base for firewall socket options */
+
+#define XT_SO_SET_REPLACE (XT_BASE_CTL)
+#define XT_SO_SET_ADD_COUNTERS (XT_BASE_CTL + 1)
+#define XT_SO_SET_MAX XT_SO_SET_ADD_COUNTERS
+
+#define XT_SO_GET_INFO (XT_BASE_CTL)
+#define XT_SO_GET_ENTRIES (XT_BASE_CTL + 1)
+#define XT_SO_GET_REVISION_MATCH (XT_BASE_CTL + 2)
+#define XT_SO_GET_REVISION_TARGET (XT_BASE_CTL + 3)
+#define XT_SO_GET_MAX XT_SO_GET_REVISION_TARGET
+
+#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
+#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
+
+struct xt_counters
+{
+ u_int64_t pcnt, bcnt; /* Packet and byte counters */
+};
+
+/* The argument to IPT_SO_ADD_COUNTERS. */
+struct xt_counters_info
+{
+ /* Which table. */
+ char name[XT_TABLE_MAXNAMELEN];
+
+ unsigned int num_counters;
+
+ /* The counters (actually `number' of these). */
+ struct xt_counters counters[0];
+};
+
+#define XT_INV_PROTO 0x40 /* Invert the sense of PROTO. */
+
+#ifdef __KERNEL__
+
+#include <linux/netdevice.h>
+
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
+#include <linux/netfilter_ipv4/listhelp.h>
+
+struct xt_match
+{
+ struct list_head list;
+
+ const char name[XT_FUNCTION_MAXNAMELEN-1];
+
+ u_int8_t revision;
+
+ /* Return true or false: return FALSE and set *hotdrop = 1 to
+ force immediate packet drop. */
+ /* Arguments changed since 2.6.9, as this must now handle
+ non-linear skb, using skb_header_pointer and
+ skb_ip_make_writable. */
+ int (*match)(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ int *hotdrop);
+
+ /* Called when user tries to insert an entry of this type. */
+ /* Should return true or false. */
+ int (*checkentry)(const char *tablename,
+ const void *ip,
+ void *matchinfo,
+ unsigned int matchinfosize,
+ unsigned int hook_mask);
+
+ /* Called when entry of this type deleted. */
+ void (*destroy)(void *matchinfo, unsigned int matchinfosize);
+
+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ struct module *me;
+};
+
+/* Registration hooks for targets. */
+struct xt_target
+{
+ struct list_head list;
+
+ const char name[XT_FUNCTION_MAXNAMELEN-1];
+
+ u_int8_t revision;
+
+ /* Returns verdict. Argument order changed since 2.6.9, as this
+ must now handle non-linear skbs, using skb_copy_bits and
+ skb_ip_make_writable. */
+ unsigned int (*target)(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+ const void *targinfo,
+ void *userdata);
+
+ /* Called when user tries to insert an entry of this type:
+ hook_mask is a bitmask of hooks from which it can be
+ called. */
+ /* Should return true or false. */
+ int (*checkentry)(const char *tablename,
+ const void *entry,
+ void *targinfo,
+ unsigned int targinfosize,
+ unsigned int hook_mask);
+
+ /* Called when entry of this type deleted. */
+ void (*destroy)(void *targinfo, unsigned int targinfosize);
+
+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ struct module *me;
+};
+
+/* Furniture shopping... */
+struct xt_table
+{
+ struct list_head list;
+
+ /* A unique name... */
+ char name[XT_TABLE_MAXNAMELEN];
+
+ /* What hooks you will enter on */
+ unsigned int valid_hooks;
+
+ /* Lock for the curtain */
+ rwlock_t lock;
+
+ /* Man behind the curtain... */
+ //struct ip6t_table_info *private;
+ void *private;
+
+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ struct module *me;
+
+ int af; /* address/protocol family */
+};
+
+#include <linux/netfilter_ipv4.h>
+
+/* The table itself */
+struct xt_table_info
+{
+ /* Size per table */
+ unsigned int size;
+ /* Number of entries: FIXME. --RR */
+ unsigned int number;
+ /* Initial number of entries. Needed for module usage count */
+ unsigned int initial_entries;
+
+ /* Entry points and underflows */
+ unsigned int hook_entry[NF_IP_NUMHOOKS];
+ unsigned int underflow[NF_IP_NUMHOOKS];
+
+ /* ipt_entry tables: one per CPU */
+ char *entries[NR_CPUS];
+};
+
+extern int xt_register_target(int af, struct xt_target *target);
+extern void xt_unregister_target(int af, struct xt_target *target);
+extern int xt_register_match(int af, struct xt_match *target);
+extern void xt_unregister_match(int af, struct xt_match *target);
+
+extern int xt_register_table(struct xt_table *table,
+ struct xt_table_info *bootstrap,
+ struct xt_table_info *newinfo);
+extern void *xt_unregister_table(struct xt_table *table);
+
+extern struct xt_table_info *xt_replace_table(struct xt_table *table,
+ unsigned int num_counters,
+ struct xt_table_info *newinfo,
+ int *error);
+
+extern struct xt_match *xt_find_match(int af, const char *name, u8 revision);
+extern struct xt_target *xt_find_target(int af, const char *name, u8 revision);
+extern struct xt_target *xt_request_find_target(int af, const char *name,
+ u8 revision);
+extern int xt_find_revision(int af, const char *name, u8 revision, int target,
+ int *err);
+
+extern struct xt_table *xt_find_table_lock(int af, const char *name);
+extern void xt_table_unlock(struct xt_table *t);
+
+extern int xt_proto_init(int af);
+extern void xt_proto_fini(int af);
+
+extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
+extern void xt_free_table_info(struct xt_table_info *info);
+
+#endif /* __KERNEL__ */
+
+#endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter/xt_CLASSIFY.h b/include/linux/netfilter/xt_CLASSIFY.h
new file mode 100644
index 00000000000..58111355255
--- /dev/null
+++ b/include/linux/netfilter/xt_CLASSIFY.h
@@ -0,0 +1,8 @@
+#ifndef _XT_CLASSIFY_H
+#define _XT_CLASSIFY_H
+
+struct xt_classify_target_info {
+ u_int32_t priority;
+};
+
+#endif /*_XT_CLASSIFY_H */
diff --git a/include/linux/netfilter/xt_CONNMARK.h b/include/linux/netfilter/xt_CONNMARK.h
new file mode 100644
index 00000000000..9f744689fff
--- /dev/null
+++ b/include/linux/netfilter/xt_CONNMARK.h
@@ -0,0 +1,25 @@
+#ifndef _XT_CONNMARK_H_target
+#define _XT_CONNMARK_H_target
+
+/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
+ * by Henrik Nordstrom <hno@marasystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+enum {
+ XT_CONNMARK_SET = 0,
+ XT_CONNMARK_SAVE,
+ XT_CONNMARK_RESTORE
+};
+
+struct xt_connmark_target_info {
+ unsigned long mark;
+ unsigned long mask;
+ u_int8_t mode;
+};
+
+#endif /*_XT_CONNMARK_H_target*/
diff --git a/include/linux/netfilter/xt_MARK.h b/include/linux/netfilter/xt_MARK.h
new file mode 100644
index 00000000000..b021e93ee5d
--- /dev/null
+++ b/include/linux/netfilter/xt_MARK.h
@@ -0,0 +1,21 @@
+#ifndef _XT_MARK_H_target
+#define _XT_MARK_H_target
+
+/* Version 0 */
+struct xt_mark_target_info {
+ unsigned long mark;
+};
+
+/* Version 1 */
+enum {
+ XT_MARK_SET=0,
+ XT_MARK_AND,
+ XT_MARK_OR,
+};
+
+struct xt_mark_target_info_v1 {
+ unsigned long mark;
+ u_int8_t mode;
+};
+
+#endif /*_XT_MARK_H_target */
diff --git a/include/linux/netfilter/xt_NFQUEUE.h b/include/linux/netfilter/xt_NFQUEUE.h
new file mode 100644
index 00000000000..9a9af79f74d
--- /dev/null
+++ b/include/linux/netfilter/xt_NFQUEUE.h
@@ -0,0 +1,16 @@
+/* iptables module for using NFQUEUE mechanism
+ *
+ * (C) 2005 Harald Welte <laforge@netfilter.org>
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ *
+*/
+#ifndef _XT_NFQ_TARGET_H
+#define _XT_NFQ_TARGET_H
+
+/* target info */
+struct xt_NFQ_info {
+ u_int16_t queuenum;
+};
+
+#endif /* _XT_NFQ_TARGET_H */
diff --git a/include/linux/netfilter/xt_comment.h b/include/linux/netfilter/xt_comment.h
new file mode 100644
index 00000000000..eacfedc6b5d
--- /dev/null
+++ b/include/linux/netfilter/xt_comment.h
@@ -0,0 +1,10 @@
+#ifndef _XT_COMMENT_H
+#define _XT_COMMENT_H
+
+#define XT_MAX_COMMENT_LEN 256
+
+struct xt_comment_info {
+ unsigned char comment[XT_MAX_COMMENT_LEN];
+};
+
+#endif /* XT_COMMENT_H */
diff --git a/include/linux/netfilter/xt_connbytes.h b/include/linux/netfilter/xt_connbytes.h
new file mode 100644
index 00000000000..c022c989754
--- /dev/null
+++ b/include/linux/netfilter/xt_connbytes.h
@@ -0,0 +1,25 @@
+#ifndef _XT_CONNBYTES_H
+#define _XT_CONNBYTES_H
+
+enum xt_connbytes_what {
+ XT_CONNBYTES_PKTS,
+ XT_CONNBYTES_BYTES,
+ XT_CONNBYTES_AVGPKT,
+};
+
+enum xt_connbytes_direction {
+ XT_CONNBYTES_DIR_ORIGINAL,
+ XT_CONNBYTES_DIR_REPLY,
+ XT_CONNBYTES_DIR_BOTH,
+};
+
+struct xt_connbytes_info
+{
+ struct {
+ aligned_u64 from; /* count to be matched */
+ aligned_u64 to; /* count to be matched */
+ } count;
+ u_int8_t what; /* ipt_connbytes_what */
+ u_int8_t direction; /* ipt_connbytes_direction */
+};
+#endif
diff --git a/include/linux/netfilter/xt_connmark.h b/include/linux/netfilter/xt_connmark.h
new file mode 100644
index 00000000000..c592f6ae088
--- /dev/null
+++ b/include/linux/netfilter/xt_connmark.h
@@ -0,0 +1,18 @@
+#ifndef _XT_CONNMARK_H
+#define _XT_CONNMARK_H
+
+/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
+ * by Henrik Nordstrom <hno@marasystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+struct xt_connmark_info {
+ unsigned long mark, mask;
+ u_int8_t invert;
+};
+
+#endif /*_XT_CONNMARK_H*/
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
new file mode 100644
index 00000000000..34f63cf2e29
--- /dev/null
+++ b/include/linux/netfilter/xt_conntrack.h
@@ -0,0 +1,63 @@
+/* Header file for kernel module to match connection tracking information.
+ * GPL (C) 2001 Marc Boucher (marc@mbsi.ca).
+ */
+
+#ifndef _XT_CONNTRACK_H
+#define _XT_CONNTRACK_H
+
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <linux/in.h>
+
+#define XT_CONNTRACK_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1))
+#define XT_CONNTRACK_STATE_INVALID (1 << 0)
+
+#define XT_CONNTRACK_STATE_SNAT (1 << (IP_CT_NUMBER + 1))
+#define XT_CONNTRACK_STATE_DNAT (1 << (IP_CT_NUMBER + 2))
+#define XT_CONNTRACK_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 3))
+
+/* flags, invflags: */
+#define XT_CONNTRACK_STATE 0x01
+#define XT_CONNTRACK_PROTO 0x02
+#define XT_CONNTRACK_ORIGSRC 0x04
+#define XT_CONNTRACK_ORIGDST 0x08
+#define XT_CONNTRACK_REPLSRC 0x10
+#define XT_CONNTRACK_REPLDST 0x20
+#define XT_CONNTRACK_STATUS 0x40
+#define XT_CONNTRACK_EXPIRES 0x80
+
+/* This is exposed to userspace, so remains frozen in time. */
+struct ip_conntrack_old_tuple
+{
+ struct {
+ __u32 ip;
+ union {
+ __u16 all;
+ } u;
+ } src;
+
+ struct {
+ __u32 ip;
+ union {
+ __u16 all;
+ } u;
+
+ /* The protocol. */
+ u16 protonum;
+ } dst;
+};
+
+struct xt_conntrack_info
+{
+ unsigned int statemask, statusmask;
+
+ struct ip_conntrack_old_tuple tuple[IP_CT_DIR_MAX];
+ struct in_addr sipmsk[IP_CT_DIR_MAX], dipmsk[IP_CT_DIR_MAX];
+
+ unsigned long expires_min, expires_max;
+
+ /* Flags word */
+ u_int8_t flags;
+ /* Inverse flags */
+ u_int8_t invflags;
+};
+#endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_dccp.h b/include/linux/netfilter/xt_dccp.h
new file mode 100644
index 00000000000..e0221b9d32c
--- /dev/null
+++ b/include/linux/netfilter/xt_dccp.h
@@ -0,0 +1,23 @@
+#ifndef _XT_DCCP_H_
+#define _XT_DCCP_H_
+
+#define XT_DCCP_SRC_PORTS 0x01
+#define XT_DCCP_DEST_PORTS 0x02
+#define XT_DCCP_TYPE 0x04
+#define XT_DCCP_OPTION 0x08
+
+#define XT_DCCP_VALID_FLAGS 0x0f
+
+struct xt_dccp_info {
+ u_int16_t dpts[2]; /* Min, Max */
+ u_int16_t spts[2]; /* Min, Max */
+
+ u_int16_t flags;
+ u_int16_t invflags;
+
+ u_int16_t typemask;
+ u_int8_t option;
+};
+
+#endif /* _XT_DCCP_H_ */
+
diff --git a/include/linux/netfilter/xt_helper.h b/include/linux/netfilter/xt_helper.h
new file mode 100644
index 00000000000..6b42763f999
--- /dev/null
+++ b/include/linux/netfilter/xt_helper.h
@@ -0,0 +1,8 @@
+#ifndef _XT_HELPER_H
+#define _XT_HELPER_H
+
+struct xt_helper_info {
+ int invert;
+ char name[30];
+};
+#endif /* _XT_HELPER_H */
diff --git a/include/linux/netfilter/xt_length.h b/include/linux/netfilter/xt_length.h
new file mode 100644
index 00000000000..7c2b439f73f
--- /dev/null
+++ b/include/linux/netfilter/xt_length.h
@@ -0,0 +1,9 @@
+#ifndef _XT_LENGTH_H
+#define _XT_LENGTH_H
+
+struct xt_length_info {
+ u_int16_t min, max;
+ u_int8_t invert;
+};
+
+#endif /*_XT_LENGTH_H*/
diff --git a/include/linux/netfilter/xt_limit.h b/include/linux/netfilter/xt_limit.h
new file mode 100644
index 00000000000..b3ce65375ec
--- /dev/null
+++ b/include/linux/netfilter/xt_limit.h
@@ -0,0 +1,21 @@
+#ifndef _XT_RATE_H
+#define _XT_RATE_H
+
+/* timings are in milliseconds. */
+#define XT_LIMIT_SCALE 10000
+
+/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
+ seconds, or one every 59 hours. */
+struct xt_rateinfo {
+ u_int32_t avg; /* Average secs between packets * scale */
+ u_int32_t burst; /* Period multiplier for upper limit. */
+
+ /* Used internally by the kernel */
+ unsigned long prev;
+ u_int32_t credit;
+ u_int32_t credit_cap, cost;
+
+ /* Ugly, ugly fucker. */
+ struct xt_rateinfo *master;
+};
+#endif /*_XT_RATE_H*/
diff --git a/include/linux/netfilter/xt_mac.h b/include/linux/netfilter/xt_mac.h
new file mode 100644
index 00000000000..b892cdc67e0
--- /dev/null
+++ b/include/linux/netfilter/xt_mac.h
@@ -0,0 +1,8 @@
+#ifndef _XT_MAC_H
+#define _XT_MAC_H
+
+struct xt_mac_info {
+ unsigned char srcaddr[ETH_ALEN];
+ int invert;
+};
+#endif /*_XT_MAC_H*/
diff --git a/include/linux/netfilter/xt_mark.h b/include/linux/netfilter/xt_mark.h
new file mode 100644
index 00000000000..802dd4842ca
--- /dev/null
+++ b/include/linux/netfilter/xt_mark.h
@@ -0,0 +1,9 @@
+#ifndef _XT_MARK_H
+#define _XT_MARK_H
+
+struct xt_mark_info {
+ unsigned long mark, mask;
+ u_int8_t invert;
+};
+
+#endif /*_XT_MARK_H*/
diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h
new file mode 100644
index 00000000000..25a7a1815b5
--- /dev/null
+++ b/include/linux/netfilter/xt_physdev.h
@@ -0,0 +1,24 @@
+#ifndef _XT_PHYSDEV_H
+#define _XT_PHYSDEV_H
+
+#ifdef __KERNEL__
+#include <linux/if.h>
+#endif
+
+#define XT_PHYSDEV_OP_IN 0x01
+#define XT_PHYSDEV_OP_OUT 0x02
+#define XT_PHYSDEV_OP_BRIDGED 0x04
+#define XT_PHYSDEV_OP_ISIN 0x08
+#define XT_PHYSDEV_OP_ISOUT 0x10
+#define XT_PHYSDEV_OP_MASK (0x20 - 1)
+
+struct xt_physdev_info {
+ char physindev[IFNAMSIZ];
+ char in_mask[IFNAMSIZ];
+ char physoutdev[IFNAMSIZ];
+ char out_mask[IFNAMSIZ];
+ u_int8_t invert;
+ u_int8_t bitmask;
+};
+
+#endif /*_XT_PHYSDEV_H*/
diff --git a/include/linux/netfilter/xt_pkttype.h b/include/linux/netfilter/xt_pkttype.h
new file mode 100644
index 00000000000..f265cf52fae
--- /dev/null
+++ b/include/linux/netfilter/xt_pkttype.h
@@ -0,0 +1,8 @@
+#ifndef _XT_PKTTYPE_H
+#define _XT_PKTTYPE_H
+
+struct xt_pkttype_info {
+ int pkttype;
+ int invert;
+};
+#endif /*_XT_PKTTYPE_H*/
diff --git a/include/linux/netfilter/xt_realm.h b/include/linux/netfilter/xt_realm.h
new file mode 100644
index 00000000000..220e8724571
--- /dev/null
+++ b/include/linux/netfilter/xt_realm.h
@@ -0,0 +1,10 @@
+#ifndef _XT_REALM_H
+#define _XT_REALM_H
+
+struct xt_realm_info {
+ u_int32_t id;
+ u_int32_t mask;
+ u_int8_t invert;
+};
+
+#endif /* _XT_REALM_H */
diff --git a/include/linux/netfilter/xt_sctp.h b/include/linux/netfilter/xt_sctp.h
new file mode 100644
index 00000000000..b157897e779
--- /dev/null
+++ b/include/linux/netfilter/xt_sctp.h
@@ -0,0 +1,107 @@
+#ifndef _XT_SCTP_H_
+#define _XT_SCTP_H_
+
+#define XT_SCTP_SRC_PORTS 0x01
+#define XT_SCTP_DEST_PORTS 0x02
+#define XT_SCTP_CHUNK_TYPES 0x04
+
+#define XT_SCTP_VALID_FLAGS 0x07
+
+#define ELEMCOUNT(x) (sizeof(x)/sizeof(x[0]))
+
+
+struct xt_sctp_flag_info {
+ u_int8_t chunktype;
+ u_int8_t flag;
+ u_int8_t flag_mask;
+};
+
+#define XT_NUM_SCTP_FLAGS 4
+
+struct xt_sctp_info {
+ u_int16_t dpts[2]; /* Min, Max */
+ u_int16_t spts[2]; /* Min, Max */
+
+ u_int32_t chunkmap[256 / sizeof (u_int32_t)]; /* Bit mask of chunks to be matched according to RFC 2960 */
+
+#define SCTP_CHUNK_MATCH_ANY 0x01 /* Match if any of the chunk types are present */
+#define SCTP_CHUNK_MATCH_ALL 0x02 /* Match if all of the chunk types are present */
+#define SCTP_CHUNK_MATCH_ONLY 0x04 /* Match if these are the only chunk types present */
+
+ u_int32_t chunk_match_type;
+ struct xt_sctp_flag_info flag_info[XT_NUM_SCTP_FLAGS];
+ int flag_count;
+
+ u_int32_t flags;
+ u_int32_t invflags;
+};
+
+#define bytes(type) (sizeof(type) * 8)
+
+#define SCTP_CHUNKMAP_SET(chunkmap, type) \
+ do { \
+ chunkmap[type / bytes(u_int32_t)] |= \
+ 1 << (type % bytes(u_int32_t)); \
+ } while (0)
+
+#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
+ do { \
+ chunkmap[type / bytes(u_int32_t)] &= \
+ ~(1 << (type % bytes(u_int32_t))); \
+ } while (0)
+
+#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
+({ \
+ (chunkmap[type / bytes (u_int32_t)] & \
+ (1 << (type % bytes (u_int32_t)))) ? 1: 0; \
+})
+
+#define SCTP_CHUNKMAP_RESET(chunkmap) \
+ do { \
+ int i; \
+ for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
+ chunkmap[i] = 0; \
+ } while (0)
+
+#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \
+ do { \
+ int i; \
+ for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
+ chunkmap[i] = ~0; \
+ } while (0)
+
+#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \
+ do { \
+ int i; \
+ for (i = 0; i < ELEMCOUNT(chunkmap); i++) \
+ destmap[i] = srcmap[i]; \
+ } while (0)
+
+#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \
+({ \
+ int i; \
+ int flag = 1; \
+ for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \
+ if (chunkmap[i]) { \
+ flag = 0; \
+ break; \
+ } \
+ } \
+ flag; \
+})
+
+#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \
+({ \
+ int i; \
+ int flag = 1; \
+ for (i = 0; i < ELEMCOUNT(chunkmap); i++) { \
+ if (chunkmap[i] != ~0) { \
+ flag = 0; \
+ break; \
+ } \
+ } \
+ flag; \
+})
+
+#endif /* _XT_SCTP_H_ */
+
diff --git a/include/linux/netfilter/xt_state.h b/include/linux/netfilter/xt_state.h
new file mode 100644
index 00000000000..c06f32edee0
--- /dev/null
+++ b/include/linux/netfilter/xt_state.h
@@ -0,0 +1,13 @@
+#ifndef _XT_STATE_H
+#define _XT_STATE_H
+
+#define XT_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1))
+#define XT_STATE_INVALID (1 << 0)
+
+#define XT_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 1))
+
+struct xt_state_info
+{
+ unsigned int statemask;
+};
+#endif /*_XT_STATE_H*/
diff --git a/include/linux/netfilter/xt_string.h b/include/linux/netfilter/xt_string.h
new file mode 100644
index 00000000000..3b3419f2637
--- /dev/null
+++ b/include/linux/netfilter/xt_string.h
@@ -0,0 +1,18 @@
+#ifndef _XT_STRING_H
+#define _XT_STRING_H
+
+#define XT_STRING_MAX_PATTERN_SIZE 128
+#define XT_STRING_MAX_ALGO_NAME_SIZE 16
+
+struct xt_string_info
+{
+ u_int16_t from_offset;
+ u_int16_t to_offset;
+ char algo[XT_STRING_MAX_ALGO_NAME_SIZE];
+ char pattern[XT_STRING_MAX_PATTERN_SIZE];
+ u_int8_t patlen;
+ u_int8_t invert;
+ struct ts_config __attribute__((aligned(8))) *config;
+};
+
+#endif /*_XT_STRING_H*/
diff --git a/include/linux/netfilter/xt_tcpmss.h b/include/linux/netfilter/xt_tcpmss.h
new file mode 100644
index 00000000000..e03274c4c79
--- /dev/null
+++ b/include/linux/netfilter/xt_tcpmss.h
@@ -0,0 +1,9 @@
+#ifndef _XT_TCPMSS_MATCH_H
+#define _XT_TCPMSS_MATCH_H
+
+struct xt_tcpmss_match_info {
+ u_int16_t mss_min, mss_max;
+ u_int8_t invert;
+};
+
+#endif /*_XT_TCPMSS_MATCH_H*/
diff --git a/include/linux/netfilter/xt_tcpudp.h b/include/linux/netfilter/xt_tcpudp.h
new file mode 100644
index 00000000000..78bc65f11ad
--- /dev/null
+++ b/include/linux/netfilter/xt_tcpudp.h
@@ -0,0 +1,36 @@
+#ifndef _XT_TCPUDP_H
+#define _XT_TCPUDP_H
+
+/* TCP matching stuff */
+struct xt_tcp
+{
+ u_int16_t spts[2]; /* Source port range. */
+ u_int16_t dpts[2]; /* Destination port range. */
+ u_int8_t option; /* TCP Option iff non-zero*/
+ u_int8_t flg_mask; /* TCP flags mask byte */
+ u_int8_t flg_cmp; /* TCP flags compare byte */
+ u_int8_t invflags; /* Inverse flags */
+};
+
+/* Values for "inv" field in struct ipt_tcp. */
+#define XT_TCP_INV_SRCPT 0x01 /* Invert the sense of source ports. */
+#define XT_TCP_INV_DSTPT 0x02 /* Invert the sense of dest ports. */
+#define XT_TCP_INV_FLAGS 0x04 /* Invert the sense of TCP flags. */
+#define XT_TCP_INV_OPTION 0x08 /* Invert the sense of option test. */
+#define XT_TCP_INV_MASK 0x0F /* All possible flags. */
+
+/* UDP matching stuff */
+struct xt_udp
+{
+ u_int16_t spts[2]; /* Source port range. */
+ u_int16_t dpts[2]; /* Destination port range. */
+ u_int8_t invflags; /* Inverse flags */
+};
+
+/* Values for "invflags" field in struct ipt_udp. */
+#define XT_UDP_INV_SRCPT 0x01 /* Invert the sense of source ports. */
+#define XT_UDP_INV_DSTPT 0x02 /* Invert the sense of dest ports. */
+#define XT_UDP_INV_MASK 0x03 /* All possible flags. */
+
+
+#endif
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index e98a870a20b..fd21796e513 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -19,8 +19,12 @@
#include <linux/compiler.h>
#include <linux/netfilter_arp.h>
-#define ARPT_FUNCTION_MAXNAMELEN 30
-#define ARPT_TABLE_MAXNAMELEN 32
+#include <linux/netfilter/x_tables.h>
+
+#define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
+#define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
+#define arpt_target xt_target
+#define arpt_table xt_table
#define ARPT_DEV_ADDR_LEN_MAX 16
@@ -91,11 +95,6 @@ struct arpt_standard_target
int verdict;
};
-struct arpt_counters
-{
- u_int64_t pcnt, bcnt; /* Packet and byte counters */
-};
-
/* Values for "flag" field in struct arpt_ip (general arp structure).
* No flags defined yet.
*/
@@ -130,7 +129,7 @@ struct arpt_entry
unsigned int comefrom;
/* Packet and byte counters. */
- struct arpt_counters counters;
+ struct xt_counters counters;
/* The matches (if any), then the target. */
unsigned char elems[0];
@@ -141,23 +140,24 @@ struct arpt_entry
* Unlike BSD Linux inherits IP options so you don't have to use a raw
* socket for this. Instead we check rights in the calls.
*/
-#define ARPT_BASE_CTL 96 /* base for firewall socket options */
+#define ARPT_CTL_OFFSET 32
+#define ARPT_BASE_CTL (XT_BASE_CTL+ARPT_CTL_OFFSET)
-#define ARPT_SO_SET_REPLACE (ARPT_BASE_CTL)
-#define ARPT_SO_SET_ADD_COUNTERS (ARPT_BASE_CTL + 1)
-#define ARPT_SO_SET_MAX ARPT_SO_SET_ADD_COUNTERS
+#define ARPT_SO_SET_REPLACE (XT_SO_SET_REPLACE+ARPT_CTL_OFFSET)
+#define ARPT_SO_SET_ADD_COUNTERS (XT_SO_SET_ADD_COUNTERS+ARPT_CTL_OFFSET)
+#define ARPT_SO_SET_MAX (XT_SO_SET_MAX+ARPT_CTL_OFFSET)
-#define ARPT_SO_GET_INFO (ARPT_BASE_CTL)
-#define ARPT_SO_GET_ENTRIES (ARPT_BASE_CTL + 1)
-/* #define ARPT_SO_GET_REVISION_MATCH (ARPT_BASE_CTL + 2)*/
-#define ARPT_SO_GET_REVISION_TARGET (ARPT_BASE_CTL + 3)
-#define ARPT_SO_GET_MAX ARPT_SO_GET_REVISION_TARGET
+#define ARPT_SO_GET_INFO (XT_SO_GET_INFO+ARPT_CTL_OFFSET)
+#define ARPT_SO_GET_ENTRIES (XT_SO_GET_ENTRIES+ARPT_CTL_OFFSET)
+/* #define ARPT_SO_GET_REVISION_MATCH XT_SO_GET_REVISION_MATCH */
+#define ARPT_SO_GET_REVISION_TARGET (XT_SO_GET_REVISION_TARGET+ARPT_CTL_OFFSET)
+#define ARPT_SO_GET_MAX (XT_SO_GET_REVISION_TARGET+ARPT_CTL_OFFSET)
/* CONTINUE verdict for targets */
-#define ARPT_CONTINUE 0xFFFFFFFF
+#define ARPT_CONTINUE XT_CONTINUE
/* For standard target */
-#define ARPT_RETURN (-NF_REPEAT - 1)
+#define ARPT_RETURN XT_RETURN
/* The argument to ARPT_SO_GET_INFO */
struct arpt_getinfo
@@ -208,23 +208,14 @@ struct arpt_replace
/* Number of counters (must be equal to current number of entries). */
unsigned int num_counters;
/* The old entries' counters. */
- struct arpt_counters __user *counters;
+ struct xt_counters __user *counters;
/* The entries (hang off end: not really an array). */
struct arpt_entry entries[0];
};
/* The argument to ARPT_SO_ADD_COUNTERS. */
-struct arpt_counters_info
-{
- /* Which table. */
- char name[ARPT_TABLE_MAXNAMELEN];
-
- unsigned int num_counters;
-
- /* The counters (actually `number' of these). */
- struct arpt_counters counters[0];
-};
+#define arpt_counters_info xt_counters_info
/* The argument to ARPT_SO_GET_ENTRIES. */
struct arpt_get_entries
@@ -239,19 +230,10 @@ struct arpt_get_entries
struct arpt_entry entrytable[0];
};
-/* The argument to ARPT_SO_GET_REVISION_*. Returns highest revision
- * kernel supports, if >= revision. */
-struct arpt_get_revision
-{
- char name[ARPT_FUNCTION_MAXNAMELEN-1];
-
- u_int8_t revision;
-};
-
/* Standard return verdict, or do jump. */
-#define ARPT_STANDARD_TARGET ""
+#define ARPT_STANDARD_TARGET XT_STANDARD_TARGET
/* Error verdict. */
-#define ARPT_ERROR_TARGET "ERROR"
+#define ARPT_ERROR_TARGET XT_ERROR_TARGET
/* Helper functions */
static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e)
@@ -281,63 +263,8 @@ static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e
*/
#ifdef __KERNEL__
-/* Registration hooks for targets. */
-struct arpt_target
-{
- struct list_head list;
-
- const char name[ARPT_FUNCTION_MAXNAMELEN-1];
-
- u_int8_t revision;
-
- /* Returns verdict. */
- unsigned int (*target)(struct sk_buff **pskb,
- unsigned int hooknum,
- const struct net_device *in,
- const struct net_device *out,
- const void *targinfo,
- void *userdata);
-
- /* Called when user tries to insert an entry of this type:
- hook_mask is a bitmask of hooks from which it can be
- called. */
- /* Should return true or false. */
- int (*checkentry)(const char *tablename,
- const struct arpt_entry *e,
- void *targinfo,
- unsigned int targinfosize,
- unsigned int hook_mask);
-
- /* Called when entry of this type deleted. */
- void (*destroy)(void *targinfo, unsigned int targinfosize);
-
- /* Set this to THIS_MODULE if you are a module, otherwise NULL */
- struct module *me;
-};
-
-extern int arpt_register_target(struct arpt_target *target);
-extern void arpt_unregister_target(struct arpt_target *target);
-
-/* Furniture shopping... */
-struct arpt_table
-{
- struct list_head list;
-
- /* A unique name... */
- char name[ARPT_TABLE_MAXNAMELEN];
-
- /* What hooks you will enter on */
- unsigned int valid_hooks;
-
- /* Lock for the curtain */
- rwlock_t lock;
-
- /* Man behind the curtain... */
- struct arpt_table_info *private;
-
- /* Set this to THIS_MODULE if you are a module, otherwise NULL */
- struct module *me;
-};
+#define arpt_register_target(tgt) xt_register_target(NF_ARP, tgt)
+#define arpt_unregister_target(tgt) xt_unregister_target(NF_ARP, tgt)
extern int arpt_register_table(struct arpt_table *table,
const struct arpt_replace *repl);
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index b3432ab59a1..215765f043e 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -199,9 +199,6 @@ ip_conntrack_put(struct ip_conntrack *ct)
nf_conntrack_put(&ct->ct_general);
}
-/* call to create an explicit dependency on ip_conntrack. */
-extern void need_ip_conntrack(void);
-
extern int invert_tuplepr(struct ip_conntrack_tuple *inverse,
const struct ip_conntrack_tuple *orig);
diff --git a/include/linux/netfilter_ipv4/ip_nat_protocol.h b/include/linux/netfilter_ipv4/ip_nat_protocol.h
index ef63aa991a0..612a43614e7 100644
--- a/include/linux/netfilter_ipv4/ip_nat_protocol.h
+++ b/include/linux/netfilter_ipv4/ip_nat_protocol.h
@@ -42,13 +42,6 @@ struct ip_nat_protocol
enum ip_nat_manip_type maniptype,
const struct ip_conntrack *conntrack);
- unsigned int (*print)(char *buffer,
- const struct ip_conntrack_tuple *match,
- const struct ip_conntrack_tuple *mask);
-
- unsigned int (*print_range)(char *buffer,
- const struct ip_nat_range *range);
-
int (*range_to_nfattr)(struct sk_buff *skb,
const struct ip_nat_range *range);
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index d19d65cf453..76ba24b6851 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -25,8 +25,14 @@
#include <linux/compiler.h>
#include <linux/netfilter_ipv4.h>
-#define IPT_FUNCTION_MAXNAMELEN 30
-#define IPT_TABLE_MAXNAMELEN 32
+#include <linux/netfilter/x_tables.h>
+
+#define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
+#define IPT_TABLE_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
+#define ipt_match xt_match
+#define ipt_target xt_target
+#define ipt_table xt_table
+#define ipt_get_revision xt_get_revision
/* Yes, Virginia, you have to zero the padding. */
struct ipt_ip {
@@ -102,10 +108,7 @@ struct ipt_standard_target
int verdict;
};
-struct ipt_counters
-{
- u_int64_t pcnt, bcnt; /* Packet and byte counters */
-};
+#define ipt_counters xt_counters
/* Values for "flag" field in struct ipt_ip (general ip structure). */
#define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */
@@ -119,7 +122,7 @@ struct ipt_counters
#define IPT_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */
#define IPT_INV_DSTIP 0x10 /* Invert the sense of DST OP. */
#define IPT_INV_FRAG 0x20 /* Invert the sense of FRAG. */
-#define IPT_INV_PROTO 0x40 /* Invert the sense of PROTO. */
+#define IPT_INV_PROTO XT_INV_PROTO
#define IPT_INV_MASK 0x7F /* All possible flag bits mask. */
/* This structure defines each of the firewall rules. Consists of 3
@@ -141,7 +144,7 @@ struct ipt_entry
unsigned int comefrom;
/* Packet and byte counters. */
- struct ipt_counters counters;
+ struct xt_counters counters;
/* The matches (if any), then the target. */
unsigned char elems[0];
@@ -151,54 +154,34 @@ struct ipt_entry
* New IP firewall options for [gs]etsockopt at the RAW IP level.
* Unlike BSD Linux inherits IP options so you don't have to use a raw
* socket for this. Instead we check rights in the calls. */
-#define IPT_BASE_CTL 64 /* base for firewall socket options */
+#define IPT_BASE_CTL XT_BASE_CTL
-#define IPT_SO_SET_REPLACE (IPT_BASE_CTL)
-#define IPT_SO_SET_ADD_COUNTERS (IPT_BASE_CTL + 1)
-#define IPT_SO_SET_MAX IPT_SO_SET_ADD_COUNTERS
+#define IPT_SO_SET_REPLACE XT_SO_SET_REPLACE
+#define IPT_SO_SET_ADD_COUNTERS XT_SO_SET_ADD_COUNTERS
+#define IPT_SO_SET_MAX XT_SO_SET_MAX
-#define IPT_SO_GET_INFO (IPT_BASE_CTL)
-#define IPT_SO_GET_ENTRIES (IPT_BASE_CTL + 1)
-#define IPT_SO_GET_REVISION_MATCH (IPT_BASE_CTL + 2)
-#define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3)
-#define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET
+#define IPT_SO_GET_INFO XT_SO_GET_INFO
+#define IPT_SO_GET_ENTRIES XT_SO_GET_ENTRIES
+#define IPT_SO_GET_REVISION_MATCH XT_SO_GET_REVISION_MATCH
+#define IPT_SO_GET_REVISION_TARGET XT_SO_GET_REVISION_TARGET
+#define IPT_SO_GET_MAX XT_SO_GET_REVISION_TARGET
-/* CONTINUE verdict for targets */
-#define IPT_CONTINUE 0xFFFFFFFF
+#define IPT_CONTINUE XT_CONTINUE
+#define IPT_RETURN XT_RETURN
-/* For standard target */
-#define IPT_RETURN (-NF_REPEAT - 1)
+#include <linux/netfilter/xt_tcpudp.h>
+#define ipt_udp xt_udp
+#define ipt_tcp xt_tcp
-/* TCP matching stuff */
-struct ipt_tcp
-{
- u_int16_t spts[2]; /* Source port range. */
- u_int16_t dpts[2]; /* Destination port range. */
- u_int8_t option; /* TCP Option iff non-zero*/
- u_int8_t flg_mask; /* TCP flags mask byte */
- u_int8_t flg_cmp; /* TCP flags compare byte */
- u_int8_t invflags; /* Inverse flags */
-};
-
-/* Values for "inv" field in struct ipt_tcp. */
-#define IPT_TCP_INV_SRCPT 0x01 /* Invert the sense of source ports. */
-#define IPT_TCP_INV_DSTPT 0x02 /* Invert the sense of dest ports. */
-#define IPT_TCP_INV_FLAGS 0x04 /* Invert the sense of TCP flags. */
-#define IPT_TCP_INV_OPTION 0x08 /* Invert the sense of option test. */
-#define IPT_TCP_INV_MASK 0x0F /* All possible flags. */
-
-/* UDP matching stuff */
-struct ipt_udp
-{
- u_int16_t spts[2]; /* Source port range. */
- u_int16_t dpts[2]; /* Destination port range. */
- u_int8_t invflags; /* Inverse flags */
-};
+#define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT
+#define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT
+#define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS
+#define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION
+#define IPT_TCP_INV_MASK XT_TCP_INV_MASK
-/* Values for "invflags" field in struct ipt_udp. */
-#define IPT_UDP_INV_SRCPT 0x01 /* Invert the sense of source ports. */
-#define IPT_UDP_INV_DSTPT 0x02 /* Invert the sense of dest ports. */
-#define IPT_UDP_INV_MASK 0x03 /* All possible flags. */
+#define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT
+#define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT
+#define IPT_UDP_INV_MASK XT_UDP_INV_MASK
/* ICMP matching stuff */
struct ipt_icmp
@@ -260,23 +243,14 @@ struct ipt_replace
/* Number of counters (must be equal to current number of entries). */
unsigned int num_counters;
/* The old entries' counters. */
- struct ipt_counters __user *counters;
+ struct xt_counters __user *counters;
/* The entries (hang off end: not really an array). */
struct ipt_entry entries[0];
};
/* The argument to IPT_SO_ADD_COUNTERS. */
-struct ipt_counters_info
-{
- /* Which table. */
- char name[IPT_TABLE_MAXNAMELEN];
-
- unsigned int num_counters;
-
- /* The counters (actually `number' of these). */
- struct ipt_counters counters[0];
-};
+#define ipt_counters_info xt_counters_info
/* The argument to IPT_SO_GET_ENTRIES. */
struct ipt_get_entries
@@ -291,19 +265,10 @@ struct ipt_get_entries
struct ipt_entry entrytable[0];
};
-/* The argument to IPT_SO_GET_REVISION_*. Returns highest revision
- * kernel supports, if >= revision. */
-struct ipt_get_revision
-{
- char name[IPT_FUNCTION_MAXNAMELEN-1];
-
- u_int8_t revision;
-};
-
/* Standard return verdict, or do jump. */
-#define IPT_STANDARD_TARGET ""
+#define IPT_STANDARD_TARGET XT_STANDARD_TARGET
/* Error verdict. */
-#define IPT_ERROR_TARGET "ERROR"
+#define IPT_ERROR_TARGET XT_ERROR_TARGET
/* Helper functions */
static __inline__ struct ipt_entry_target *
@@ -356,103 +321,18 @@ ipt_get_target(struct ipt_entry *e)
#include <linux/init.h>
extern void ipt_init(void) __init;
-struct ipt_match
-{
- struct list_head list;
-
- const char name[IPT_FUNCTION_MAXNAMELEN-1];
-
- u_int8_t revision;
-
- /* Return true or false: return FALSE and set *hotdrop = 1 to
- force immediate packet drop. */
- /* Arguments changed since 2.4, as this must now handle
- non-linear skbs, using skb_copy_bits and
- skb_ip_make_writable. */
- int (*match)(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- int *hotdrop);
-
- /* Called when user tries to insert an entry of this type. */
- /* Should return true or false. */
- int (*checkentry)(const char *tablename,
- const struct ipt_ip *ip,
- void *matchinfo,
- unsigned int matchinfosize,
- unsigned int hook_mask);
-
- /* Called when entry of this type deleted. */
- void (*destroy)(void *matchinfo, unsigned int matchinfosize);
-
- /* Set this to THIS_MODULE. */
- struct module *me;
-};
-
-/* Registration hooks for targets. */
-struct ipt_target
-{
- struct list_head list;
-
- const char name[IPT_FUNCTION_MAXNAMELEN-1];
-
- u_int8_t revision;
-
- /* Called when user tries to insert an entry of this type:
- hook_mask is a bitmask of hooks from which it can be
- called. */
- /* Should return true or false. */
- int (*checkentry)(const char *tablename,
- const struct ipt_entry *e,
- void *targinfo,
- unsigned int targinfosize,
- unsigned int hook_mask);
-
- /* Called when entry of this type deleted. */
- void (*destroy)(void *targinfo, unsigned int targinfosize);
-
- /* Returns verdict. Argument order changed since 2.4, as this
- must now handle non-linear skbs, using skb_copy_bits and
- skb_ip_make_writable. */
- unsigned int (*target)(struct sk_buff **pskb,
- const struct net_device *in,
- const struct net_device *out,
- unsigned int hooknum,
- const void *targinfo,
- void *userdata);
-
- /* Set this to THIS_MODULE. */
- struct module *me;
-};
+#define ipt_register_target(tgt) xt_register_target(AF_INET, tgt)
+#define ipt_unregister_target(tgt) xt_unregister_target(AF_INET, tgt)
-extern int ipt_register_target(struct ipt_target *target);
-extern void ipt_unregister_target(struct ipt_target *target);
+#define ipt_register_match(mtch) xt_register_match(AF_INET, mtch)
+#define ipt_unregister_match(mtch) xt_unregister_match(AF_INET, mtch)
-extern int ipt_register_match(struct ipt_match *match);
-extern void ipt_unregister_match(struct ipt_match *match);
+//#define ipt_register_table(tbl, repl) xt_register_table(AF_INET, tbl, repl)
+//#define ipt_unregister_table(tbl) xt_unregister_table(AF_INET, tbl)
-/* Furniture shopping... */
-struct ipt_table
-{
- struct list_head list;
-
- /* A unique name... */
- char name[IPT_TABLE_MAXNAMELEN];
-
- /* What hooks you will enter on */
- unsigned int valid_hooks;
-
- /* Lock for the curtain */
- rwlock_t lock;
-
- /* Man behind the curtain... */
- struct ipt_table_info *private;
-
- /* Set to THIS_MODULE. */
- struct module *me;
-};
+extern int ipt_register_table(struct ipt_table *table,
+ const struct ipt_replace *repl);
+extern void ipt_unregister_table(struct ipt_table *table);
/* net/sched/ipt.c: Gimme access to your targets! Gets target->me. */
extern struct ipt_target *ipt_find_target(const char *name, u8 revision);
@@ -476,9 +356,6 @@ struct ipt_error
struct ipt_error_target target;
};
-extern int ipt_register_table(struct ipt_table *table,
- const struct ipt_replace *repl);
-extern void ipt_unregister_table(struct ipt_table *table);
extern unsigned int ipt_do_table(struct sk_buff **pskb,
unsigned int hook,
const struct net_device *in,
@@ -486,6 +363,6 @@ extern unsigned int ipt_do_table(struct sk_buff **pskb,
struct ipt_table *table,
void *userdata);
-#define IPT_ALIGN(s) (((s) + (__alignof__(struct ipt_entry)-1)) & ~(__alignof__(struct ipt_entry)-1))
+#define IPT_ALIGN(s) XT_ALIGN(s)
#endif /*__KERNEL__*/
#endif /* _IPTABLES_H */
diff --git a/include/linux/netfilter_ipv4/ipt_CLASSIFY.h b/include/linux/netfilter_ipv4/ipt_CLASSIFY.h
index 7596e3dd00c..a46d511b5c3 100644
--- a/include/linux/netfilter_ipv4/ipt_CLASSIFY.h
+++ b/include/linux/netfilter_ipv4/ipt_CLASSIFY.h
@@ -1,8 +1,7 @@
#ifndef _IPT_CLASSIFY_H
#define _IPT_CLASSIFY_H
-struct ipt_classify_target_info {
- u_int32_t priority;
-};
+#include <linux/netfilter/xt_CLASSIFY.h>
+#define ipt_classify_target_info xt_classify_target_info
#endif /*_IPT_CLASSIFY_H */
diff --git a/include/linux/netfilter_ipv4/ipt_CONNMARK.h b/include/linux/netfilter_ipv4/ipt_CONNMARK.h
index d3c02536fc4..9ecfee0a9e3 100644
--- a/include/linux/netfilter_ipv4/ipt_CONNMARK.h
+++ b/include/linux/netfilter_ipv4/ipt_CONNMARK.h
@@ -9,17 +9,11 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
+#include <linux/netfilter/xt_CONNMARK.h>
+#define IPT_CONNMARK_SET XT_CONNMARK_SET
+#define IPT_CONNMARK_SAVE XT_CONNMARK_SAVE
+#define IPT_CONNMARK_RESTORE XT_CONNMARK_RESTORE
-enum {
- IPT_CONNMARK_SET = 0,
- IPT_CONNMARK_SAVE,
- IPT_CONNMARK_RESTORE
-};
-
-struct ipt_connmark_target_info {
- unsigned long mark;
- unsigned long mask;
- u_int8_t mode;
-};
+#define ipt_connmark_target_info xt_connmark_target_info
#endif /*_IPT_CONNMARK_H_target*/
diff --git a/include/linux/netfilter_ipv4/ipt_MARK.h b/include/linux/netfilter_ipv4/ipt_MARK.h
index f47485790ed..697a486a96d 100644
--- a/include/linux/netfilter_ipv4/ipt_MARK.h
+++ b/include/linux/netfilter_ipv4/ipt_MARK.h
@@ -1,20 +1,18 @@
#ifndef _IPT_MARK_H_target
#define _IPT_MARK_H_target
+/* Backwards compatibility for old userspace */
+
+#include <linux/netfilter/xt_MARK.h>
+
/* Version 0 */
-struct ipt_mark_target_info {
- unsigned long mark;
-};
+#define ipt_mark_target_info xt_mark_target_info
/* Version 1 */
-enum {
- IPT_MARK_SET=0,
- IPT_MARK_AND,
- IPT_MARK_OR
-};
+#define IPT_MARK_SET XT_MARK_SET
+#define IPT_MARK_AND XT_MARK_AND
+#define IPT_MARK_OR XT_MARK_OR
+
+#define ipt_mark_target_info_v1 xt_mark_target_info_v1
-struct ipt_mark_target_info_v1 {
- unsigned long mark;
- u_int8_t mode;
-};
#endif /*_IPT_MARK_H_target*/
diff --git a/include/linux/netfilter_ipv4/ipt_NFQUEUE.h b/include/linux/netfilter_ipv4/ipt_NFQUEUE.h
index b5b2943b0c6..97a2a7557cb 100644
--- a/include/linux/netfilter_ipv4/ipt_NFQUEUE.h
+++ b/include/linux/netfilter_ipv4/ipt_NFQUEUE.h
@@ -8,9 +8,9 @@
#ifndef _IPT_NFQ_TARGET_H
#define _IPT_NFQ_TARGET_H
-/* target info */
-struct ipt_NFQ_info {
- u_int16_t queuenum;
-};
+/* Backwards compatibility for old userspace */
+#include <linux/netfilter/xt_NFQUEUE.h>
+
+#define ipt_NFQ_info xt_NFQ_info
#endif /* _IPT_DSCP_TARGET_H */
diff --git a/include/linux/netfilter_ipv4/ipt_comment.h b/include/linux/netfilter_ipv4/ipt_comment.h
index 85c1123c29c..ae2afc2f748 100644
--- a/include/linux/netfilter_ipv4/ipt_comment.h
+++ b/include/linux/netfilter_ipv4/ipt_comment.h
@@ -1,10 +1,10 @@
#ifndef _IPT_COMMENT_H
#define _IPT_COMMENT_H
-#define IPT_MAX_COMMENT_LEN 256
+#include <linux/netfilter/xt_comment.h>
-struct ipt_comment_info {
- unsigned char comment[IPT_MAX_COMMENT_LEN];
-};
+#define IPT_MAX_COMMENT_LEN XT_MAX_COMMENT_LEN
+
+#define ipt_comment_info xt_comment_info
#endif /* _IPT_COMMENT_H */
diff --git a/include/linux/netfilter_ipv4/ipt_connbytes.h b/include/linux/netfilter_ipv4/ipt_connbytes.h
index 9e5532f8d8a..b04dfa3083c 100644
--- a/include/linux/netfilter_ipv4/ipt_connbytes.h
+++ b/include/linux/netfilter_ipv4/ipt_connbytes.h
@@ -1,25 +1,18 @@
#ifndef _IPT_CONNBYTES_H
#define _IPT_CONNBYTES_H
-enum ipt_connbytes_what {
- IPT_CONNBYTES_PKTS,
- IPT_CONNBYTES_BYTES,
- IPT_CONNBYTES_AVGPKT,
-};
+#include <net/netfilter/xt_connbytes.h>
+#define ipt_connbytes_what xt_connbytes_what
-enum ipt_connbytes_direction {
- IPT_CONNBYTES_DIR_ORIGINAL,
- IPT_CONNBYTES_DIR_REPLY,
- IPT_CONNBYTES_DIR_BOTH,
-};
+#define IPT_CONNBYTES_PKTS XT_CONNBYTES_PACKETS
+#define IPT_CONNBYTES_BYTES XT_CONNBYTES_BYTES
+#define IPT_CONNBYTES_AVGPKT XT_CONNBYTES_AVGPKT
+
+#define ipt_connbytes_direction xt_connbytes_direction
+#define IPT_CONNBYTES_DIR_ORIGINAL XT_CONNBYTES_DIR_ORIGINAL
+#define IPT_CONNBYTES_DIR_REPLY XT_CONNBYTES_DIR_REPLY
+#define IPT_CONNBYTES_DIR_BOTH XT_CONNBYTES_DIR_BOTH
+
+#define ipt_connbytes_info xt_connbytes_info
-struct ipt_connbytes_info
-{
- struct {
- aligned_u64 from; /* count to be matched */
- aligned_u64 to; /* count to be matched */
- } count;
- u_int8_t what; /* ipt_connbytes_what */
- u_int8_t direction; /* ipt_connbytes_direction */
-};
#endif
diff --git a/include/linux/netfilter_ipv4/ipt_connmark.h b/include/linux/netfilter_ipv4/ipt_connmark.h
index 46573270d9a..c7ba6560d44 100644
--- a/include/linux/netfilter_ipv4/ipt_connmark.h
+++ b/include/linux/netfilter_ipv4/ipt_connmark.h
@@ -1,18 +1,7 @@
#ifndef _IPT_CONNMARK_H
#define _IPT_CONNMARK_H
-/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
- * by Henrik Nordstrom <hno@marasystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-struct ipt_connmark_info {
- unsigned long mark, mask;
- u_int8_t invert;
-};
+#include <linux/netfilter/xt_connmark.h>
+#define ipt_connmark_info xt_connmark_info
#endif /*_IPT_CONNMARK_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_conntrack.h b/include/linux/netfilter_ipv4/ipt_conntrack.h
index 413c5658bd3..cde6762949c 100644
--- a/include/linux/netfilter_ipv4/ipt_conntrack.h
+++ b/include/linux/netfilter_ipv4/ipt_conntrack.h
@@ -5,56 +5,24 @@
#ifndef _IPT_CONNTRACK_H
#define _IPT_CONNTRACK_H
-#define IPT_CONNTRACK_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1))
-#define IPT_CONNTRACK_STATE_INVALID (1 << 0)
+#include <linux/netfilter/xt_conntrack.h>
-#define IPT_CONNTRACK_STATE_SNAT (1 << (IP_CT_NUMBER + 1))
-#define IPT_CONNTRACK_STATE_DNAT (1 << (IP_CT_NUMBER + 2))
-#define IPT_CONNTRACK_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 3))
+#define IPT_CONNTRACK_STATE_BIT(ctinfo) XT_CONNTRACK_STATE_BIT(ctinfo)
+#define IPT_CONNTRACK_STATE_INVALID XT_CONNTRACK_STATE_INVALID
-/* flags, invflags: */
-#define IPT_CONNTRACK_STATE 0x01
-#define IPT_CONNTRACK_PROTO 0x02
-#define IPT_CONNTRACK_ORIGSRC 0x04
-#define IPT_CONNTRACK_ORIGDST 0x08
-#define IPT_CONNTRACK_REPLSRC 0x10
-#define IPT_CONNTRACK_REPLDST 0x20
-#define IPT_CONNTRACK_STATUS 0x40
-#define IPT_CONNTRACK_EXPIRES 0x80
-
-/* This is exposed to userspace, so remains frozen in time. */
-struct ip_conntrack_old_tuple
-{
- struct {
- __u32 ip;
- union {
- __u16 all;
- } u;
- } src;
-
- struct {
- __u32 ip;
- union {
- __u16 all;
- } u;
-
- /* The protocol. */
- u16 protonum;
- } dst;
-};
+#define IPT_CONNTRACK_STATE_SNAT XT_CONNTRACK_STATE_SNAT
+#define IPT_CONNTRACK_STATE_DNAT XT_CONNTRACK_STATE_DNAT
+#define IPT_CONNTRACK_STATE_UNTRACKED XT_CONNTRACK_STATE_UNTRACKED
-struct ipt_conntrack_info
-{
- unsigned int statemask, statusmask;
-
- struct ip_conntrack_old_tuple tuple[IP_CT_DIR_MAX];
- struct in_addr sipmsk[IP_CT_DIR_MAX], dipmsk[IP_CT_DIR_MAX];
-
- unsigned long expires_min, expires_max;
-
- /* Flags word */
- u_int8_t flags;
- /* Inverse flags */
- u_int8_t invflags;
-};
+/* flags, invflags: */
+#define IPT_CONNTRACK_STATE XT_CONNTRACK_STATE
+#define IPT_CONNTRACK_PROTO XT_CONNTRACK_PROTO
+#define IPT_CONNTRACK_ORIGSRC XT_CONNTRACK_ORIGSRC
+#define IPT_CONNTRACK_ORIGDST XT_CONNTRACK_ORIGDST
+#define IPT_CONNTRACK_REPLSRC XT_CONNTRACK_REPLSRC
+#define IPT_CONNTRACK_REPLDST XT_CONNTRACK_REPLDST
+#define IPT_CONNTRACK_STATUS XT_CONNTRACK_STATUS
+#define IPT_CONNTRACK_EXPIRES XT_CONNTRACK_EXPIRES
+
+#define ipt_conntrack_info xt_conntrack_info
#endif /*_IPT_CONNTRACK_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_dccp.h b/include/linux/netfilter_ipv4/ipt_dccp.h
index 3cb3a522e62..e70d11e1f53 100644
--- a/include/linux/netfilter_ipv4/ipt_dccp.h
+++ b/include/linux/netfilter_ipv4/ipt_dccp.h
@@ -1,23 +1,15 @@
#ifndef _IPT_DCCP_H_
#define _IPT_DCCP_H_
-#define IPT_DCCP_SRC_PORTS 0x01
-#define IPT_DCCP_DEST_PORTS 0x02
-#define IPT_DCCP_TYPE 0x04
-#define IPT_DCCP_OPTION 0x08
+#include <linux/netfilter/xt_dccp.h>
+#define IPT_DCCP_SRC_PORTS XT_DCCP_SRC_PORTS
+#define IPT_DCCP_DEST_PORTS XT_DCCP_DEST_PORTS
+#define IPT_DCCP_TYPE XT_DCCP_TYPE
+#define IPT_DCCP_OPTION XT_DCCP_OPTION
-#define IPT_DCCP_VALID_FLAGS 0x0f
+#define IPT_DCCP_VALID_FLAGS XT_DCCP_VALID_FLAGS
-struct ipt_dccp_info {
- u_int16_t dpts[2]; /* Min, Max */
- u_int16_t spts[2]; /* Min, Max */
-
- u_int16_t flags;
- u_int16_t invflags;
-
- u_int16_t typemask;
- u_int8_t option;
-};
+#define ipt_dccp_info xt_dccp_info
#endif /* _IPT_DCCP_H_ */
diff --git a/include/linux/netfilter_ipv4/ipt_helper.h b/include/linux/netfilter_ipv4/ipt_helper.h
index 6f12ecb8c93..80452c21855 100644
--- a/include/linux/netfilter_ipv4/ipt_helper.h
+++ b/include/linux/netfilter_ipv4/ipt_helper.h
@@ -1,8 +1,7 @@
#ifndef _IPT_HELPER_H
#define _IPT_HELPER_H
-struct ipt_helper_info {
- int invert;
- char name[30];
-};
+#include <linux/netfilter/xt_helper.h>
+#define ipt_helper_info xt_helper_info
+
#endif /* _IPT_HELPER_H */
diff --git a/include/linux/netfilter_ipv4/ipt_length.h b/include/linux/netfilter_ipv4/ipt_length.h
index 6e088522961..9b45206ffce 100644
--- a/include/linux/netfilter_ipv4/ipt_length.h
+++ b/include/linux/netfilter_ipv4/ipt_length.h
@@ -1,9 +1,7 @@
#ifndef _IPT_LENGTH_H
#define _IPT_LENGTH_H
-struct ipt_length_info {
- u_int16_t min, max;
- u_int8_t invert;
-};
+#include <linux/netfilter/xt_length.h>
+#define ipt_length_info xt_length_info
#endif /*_IPT_LENGTH_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_limit.h b/include/linux/netfilter_ipv4/ipt_limit.h
index 256453409e2..92f5cd07bbc 100644
--- a/include/linux/netfilter_ipv4/ipt_limit.h
+++ b/include/linux/netfilter_ipv4/ipt_limit.h
@@ -1,21 +1,8 @@
#ifndef _IPT_RATE_H
#define _IPT_RATE_H
-/* timings are in milliseconds. */
-#define IPT_LIMIT_SCALE 10000
+#include <linux/netfilter/xt_limit.h>
+#define IPT_LIMIT_SCALE XT_LIMIT_SCALE
+#define ipt_rateinfo xt_rateinfo
-/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
- seconds, or one every 59 hours. */
-struct ipt_rateinfo {
- u_int32_t avg; /* Average secs between packets * scale */
- u_int32_t burst; /* Period multiplier for upper limit. */
-
- /* Used internally by the kernel */
- unsigned long prev;
- u_int32_t credit;
- u_int32_t credit_cap, cost;
-
- /* Ugly, ugly fucker. */
- struct ipt_rateinfo *master;
-};
#endif /*_IPT_RATE_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_mac.h b/include/linux/netfilter_ipv4/ipt_mac.h
index f8d5b8e7ccd..b186008a3c4 100644
--- a/include/linux/netfilter_ipv4/ipt_mac.h
+++ b/include/linux/netfilter_ipv4/ipt_mac.h
@@ -1,8 +1,7 @@
#ifndef _IPT_MAC_H
#define _IPT_MAC_H
-struct ipt_mac_info {
- unsigned char srcaddr[ETH_ALEN];
- int invert;
-};
+#include <linux/netfilter/xt_mac.h>
+#define ipt_mac_info xt_mac_info
+
#endif /*_IPT_MAC_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_mark.h b/include/linux/netfilter_ipv4/ipt_mark.h
index f3952b563d4..bfde67c6122 100644
--- a/include/linux/netfilter_ipv4/ipt_mark.h
+++ b/include/linux/netfilter_ipv4/ipt_mark.h
@@ -1,9 +1,9 @@
#ifndef _IPT_MARK_H
#define _IPT_MARK_H
-struct ipt_mark_info {
- unsigned long mark, mask;
- u_int8_t invert;
-};
+/* Backwards compatibility for old userspace */
+#include <linux/netfilter/xt_mark.h>
+
+#define ipt_mark_info xt_mark_info
#endif /*_IPT_MARK_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_physdev.h b/include/linux/netfilter_ipv4/ipt_physdev.h
index 7538c8655ec..2400e7140f2 100644
--- a/include/linux/netfilter_ipv4/ipt_physdev.h
+++ b/include/linux/netfilter_ipv4/ipt_physdev.h
@@ -1,24 +1,17 @@
#ifndef _IPT_PHYSDEV_H
#define _IPT_PHYSDEV_H
-#ifdef __KERNEL__
-#include <linux/if.h>
-#endif
+/* Backwards compatibility for old userspace */
-#define IPT_PHYSDEV_OP_IN 0x01
-#define IPT_PHYSDEV_OP_OUT 0x02
-#define IPT_PHYSDEV_OP_BRIDGED 0x04
-#define IPT_PHYSDEV_OP_ISIN 0x08
-#define IPT_PHYSDEV_OP_ISOUT 0x10
-#define IPT_PHYSDEV_OP_MASK (0x20 - 1)
+#include <linux/netfilter/xt_physdev.h>
-struct ipt_physdev_info {
- char physindev[IFNAMSIZ];
- char in_mask[IFNAMSIZ];
- char physoutdev[IFNAMSIZ];
- char out_mask[IFNAMSIZ];
- u_int8_t invert;
- u_int8_t bitmask;
-};
+#define IPT_PHYSDEV_OP_IN XT_PHYSDEV_OP_IN
+#define IPT_PHYSDEV_OP_OUT XT_PHYSDEV_OP_OUT
+#define IPT_PHYSDEV_OP_BRIDGED XT_PHYSDEV_OP_BRIDGED
+#define IPT_PHYSDEV_OP_ISIN XT_PHYSDEV_OP_ISIN
+#define IPT_PHYSDEV_OP_ISOUT XT_PHYSDEV_OP_ISOUT
+#define IPT_PHYSDEV_OP_MASK XT_PHYSDEV_OP_MASK
+
+#define ipt_physdev_info xt_physdev_info
#endif /*_IPT_PHYSDEV_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_pkttype.h b/include/linux/netfilter_ipv4/ipt_pkttype.h
index d53a6584868..ff1fbc949a0 100644
--- a/include/linux/netfilter_ipv4/ipt_pkttype.h
+++ b/include/linux/netfilter_ipv4/ipt_pkttype.h
@@ -1,8 +1,7 @@
#ifndef _IPT_PKTTYPE_H
#define _IPT_PKTTYPE_H
-struct ipt_pkttype_info {
- int pkttype;
- int invert;
-};
+#include <linux/netfilter/xt_pkttype.h>
+#define ipt_pkttype_info xt_pkttype_info
+
#endif /*_IPT_PKTTYPE_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_policy.h b/include/linux/netfilter_ipv4/ipt_policy.h
new file mode 100644
index 00000000000..7fd1bec453f
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_policy.h
@@ -0,0 +1,52 @@
+#ifndef _IPT_POLICY_H
+#define _IPT_POLICY_H
+
+#define IPT_POLICY_MAX_ELEM 4
+
+enum ipt_policy_flags
+{
+ IPT_POLICY_MATCH_IN = 0x1,
+ IPT_POLICY_MATCH_OUT = 0x2,
+ IPT_POLICY_MATCH_NONE = 0x4,
+ IPT_POLICY_MATCH_STRICT = 0x8,
+};
+
+enum ipt_policy_modes
+{
+ IPT_POLICY_MODE_TRANSPORT,
+ IPT_POLICY_MODE_TUNNEL
+};
+
+struct ipt_policy_spec
+{
+ u_int8_t saddr:1,
+ daddr:1,
+ proto:1,
+ mode:1,
+ spi:1,
+ reqid:1;
+};
+
+struct ipt_policy_elem
+{
+ u_int32_t saddr;
+ u_int32_t smask;
+ u_int32_t daddr;
+ u_int32_t dmask;
+ u_int32_t spi;
+ u_int32_t reqid;
+ u_int8_t proto;
+ u_int8_t mode;
+
+ struct ipt_policy_spec match;
+ struct ipt_policy_spec invert;
+};
+
+struct ipt_policy_info
+{
+ struct ipt_policy_elem pol[IPT_POLICY_MAX_ELEM];
+ u_int16_t flags;
+ u_int16_t len;
+};
+
+#endif /* _IPT_POLICY_H */
diff --git a/include/linux/netfilter_ipv4/ipt_realm.h b/include/linux/netfilter_ipv4/ipt_realm.h
index a4d6698723a..b3996eaa018 100644
--- a/include/linux/netfilter_ipv4/ipt_realm.h
+++ b/include/linux/netfilter_ipv4/ipt_realm.h
@@ -1,10 +1,7 @@
#ifndef _IPT_REALM_H
#define _IPT_REALM_H
-struct ipt_realm_info {
- u_int32_t id;
- u_int32_t mask;
- u_int8_t invert;
-};
+#include <linux/netfilter/xt_realm.h>
+#define ipt_realm_info xt_realm_info
#endif /* _IPT_REALM_H */
diff --git a/include/linux/netfilter_ipv4/ipt_state.h b/include/linux/netfilter_ipv4/ipt_state.h
index 5df37868933..a44a99cc28c 100644
--- a/include/linux/netfilter_ipv4/ipt_state.h
+++ b/include/linux/netfilter_ipv4/ipt_state.h
@@ -1,13 +1,15 @@
#ifndef _IPT_STATE_H
#define _IPT_STATE_H
-#define IPT_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1))
-#define IPT_STATE_INVALID (1 << 0)
+/* Backwards compatibility for old userspace */
-#define IPT_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 1))
+#include <linux/netfilter/xt_state.h>
+
+#define IPT_STATE_BIT XT_STATE_BIT
+#define IPT_STATE_INVALID XT_STATE_INVALID
+
+#define IPT_STATE_UNTRACKED XT_STATE_UNTRACKED
+
+#define ipt_state_info xt_state_info
-struct ipt_state_info
-{
- unsigned int statemask;
-};
#endif /*_IPT_STATE_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_string.h b/include/linux/netfilter_ipv4/ipt_string.h
index a265f6e44ea..c26de305990 100644
--- a/include/linux/netfilter_ipv4/ipt_string.h
+++ b/include/linux/netfilter_ipv4/ipt_string.h
@@ -1,18 +1,10 @@
#ifndef _IPT_STRING_H
#define _IPT_STRING_H
-#define IPT_STRING_MAX_PATTERN_SIZE 128
-#define IPT_STRING_MAX_ALGO_NAME_SIZE 16
+#include <linux/netfilter/xt_string.h>
-struct ipt_string_info
-{
- u_int16_t from_offset;
- u_int16_t to_offset;
- char algo[IPT_STRING_MAX_ALGO_NAME_SIZE];
- char pattern[IPT_STRING_MAX_PATTERN_SIZE];
- u_int8_t patlen;
- u_int8_t invert;
- struct ts_config __attribute__((aligned(8))) *config;
-};
+#define IPT_STRING_MAX_PATTERN_SIZE XT_STRING_MAX_PATTERN_SIZE
+#define IPT_STRING_MAX_ALGO_NAME_SIZE XT_STRING_MAX_ALGO_NAME_SIZE
+#define ipt_string_info xt_string_info
#endif /*_IPT_STRING_H*/
diff --git a/include/linux/netfilter_ipv4/ipt_tcpmss.h b/include/linux/netfilter_ipv4/ipt_tcpmss.h
index e2b14397f70..18bbc8e8e00 100644
--- a/include/linux/netfilter_ipv4/ipt_tcpmss.h
+++ b/include/linux/netfilter_ipv4/ipt_tcpmss.h
@@ -1,9 +1,7 @@
#ifndef _IPT_TCPMSS_MATCH_H
#define _IPT_TCPMSS_MATCH_H
-struct ipt_tcpmss_match_info {
- u_int16_t mss_min, mss_max;
- u_int8_t invert;
-};
+#include <linux/netfilter/xt_tcpmss.h>
+#define ipt_tcpmss_match_info xt_tcpmss_match_info
#endif /*_IPT_TCPMSS_MATCH_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 53b2983f627..14f2bd01088 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -72,7 +72,12 @@ enum nf_ip6_hook_priorities {
NF_IP6_PRI_LAST = INT_MAX,
};
+#ifdef CONFIG_NETFILTER
extern int ipv6_netfilter_init(void);
extern void ipv6_netfilter_fini(void);
+#else /* CONFIG_NETFILTER */
+static inline int ipv6_netfilter_init(void) { return 0; }
+static inline void ipv6_netfilter_fini(void) { return; }
+#endif /* CONFIG_NETFILTER */
#endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index c163ba31aab..f249b574f0f 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -25,8 +25,15 @@
#include <linux/compiler.h>
#include <linux/netfilter_ipv6.h>
-#define IP6T_FUNCTION_MAXNAMELEN 30
-#define IP6T_TABLE_MAXNAMELEN 32
+#include <linux/netfilter/x_tables.h>
+
+#define IP6T_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
+#define IP6T_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
+
+#define ip6t_match xt_match
+#define ip6t_target xt_target
+#define ip6t_table xt_table
+#define ip6t_get_revision xt_get_revision
/* Yes, Virginia, you have to zero the padding. */
struct ip6t_ip6 {
@@ -104,10 +111,7 @@ struct ip6t_standard_target
int verdict;
};
-struct ip6t_counters
-{
- u_int64_t pcnt, bcnt; /* Packet and byte counters */
-};
+#define ip6t_counters xt_counters
/* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */
#define IP6T_F_PROTO 0x01 /* Set if rule cares about upper
@@ -123,7 +127,7 @@ struct ip6t_counters
#define IP6T_INV_SRCIP 0x08 /* Invert the sense of SRC IP. */
#define IP6T_INV_DSTIP 0x10 /* Invert the sense of DST OP. */
#define IP6T_INV_FRAG 0x20 /* Invert the sense of FRAG. */
-#define IP6T_INV_PROTO 0x40 /* Invert the sense of PROTO. */
+#define IP6T_INV_PROTO XT_INV_PROTO
#define IP6T_INV_MASK 0x7F /* All possible flag bits mask. */
/* This structure defines each of the firewall rules. Consists of 3
@@ -145,7 +149,7 @@ struct ip6t_entry
unsigned int comefrom;
/* Packet and byte counters. */
- struct ip6t_counters counters;
+ struct xt_counters counters;
/* The matches (if any), then the target. */
unsigned char elems[0];
@@ -155,54 +159,41 @@ struct ip6t_entry
* New IP firewall options for [gs]etsockopt at the RAW IP level.
* Unlike BSD Linux inherits IP options so you don't have to use
* a raw socket for this. Instead we check rights in the calls. */
-#define IP6T_BASE_CTL 64 /* base for firewall socket options */
+#define IP6T_BASE_CTL XT_BASE_CTL
-#define IP6T_SO_SET_REPLACE (IP6T_BASE_CTL)
-#define IP6T_SO_SET_ADD_COUNTERS (IP6T_BASE_CTL + 1)
-#define IP6T_SO_SET_MAX IP6T_SO_SET_ADD_COUNTERS
+#define IP6T_SO_SET_REPLACE XT_SO_SET_REPLACE
+#define IP6T_SO_SET_ADD_COUNTERS XT_SO_SET_ADD_COUNTERS
+#define IP6T_SO_SET_MAX XT_SO_SET_MAX
-#define IP6T_SO_GET_INFO (IP6T_BASE_CTL)
-#define IP6T_SO_GET_ENTRIES (IP6T_BASE_CTL + 1)
-#define IP6T_SO_GET_REVISION_MATCH (IP6T_BASE_CTL + 2)
-#define IP6T_SO_GET_REVISION_TARGET (IP6T_BASE_CTL + 3)
-#define IP6T_SO_GET_MAX IP6T_SO_GET_REVISION_TARGET
+#define IP6T_SO_GET_INFO XT_SO_GET_INFO
+#define IP6T_SO_GET_ENTRIES XT_SO_GET_ENTRIES
+#define IP6T_SO_GET_REVISION_MATCH XT_SO_GET_REVISION_MATCH
+#define IP6T_SO_GET_REVISION_TARGET XT_SO_GET_REVISION_TARGET
+#define IP6T_SO_GET_MAX XT_SO_GET_REVISION_TARGET
/* CONTINUE verdict for targets */
-#define IP6T_CONTINUE 0xFFFFFFFF
+#define IP6T_CONTINUE XT_CONTINUE
/* For standard target */
-#define IP6T_RETURN (-NF_REPEAT - 1)
+#define IP6T_RETURN XT_RETURN
-/* TCP matching stuff */
-struct ip6t_tcp
-{
- u_int16_t spts[2]; /* Source port range. */
- u_int16_t dpts[2]; /* Destination port range. */
- u_int8_t option; /* TCP Option iff non-zero*/
- u_int8_t flg_mask; /* TCP flags mask byte */
- u_int8_t flg_cmp; /* TCP flags compare byte */
- u_int8_t invflags; /* Inverse flags */
-};
+/* TCP/UDP matching stuff */
+#include <linux/netfilter/xt_tcpudp.h>
+
+#define ip6t_tcp xt_tcp
+#define ip6t_udp xt_udp
/* Values for "inv" field in struct ipt_tcp. */
-#define IP6T_TCP_INV_SRCPT 0x01 /* Invert the sense of source ports. */
-#define IP6T_TCP_INV_DSTPT 0x02 /* Invert the sense of dest ports. */
-#define IP6T_TCP_INV_FLAGS 0x04 /* Invert the sense of TCP flags. */
-#define IP6T_TCP_INV_OPTION 0x08 /* Invert the sense of option test. */
-#define IP6T_TCP_INV_MASK 0x0F /* All possible flags. */
-
-/* UDP matching stuff */
-struct ip6t_udp
-{
- u_int16_t spts[2]; /* Source port range. */
- u_int16_t dpts[2]; /* Destination port range. */
- u_int8_t invflags; /* Inverse flags */
-};
+#define IP6T_TCP_INV_SRCPT XT_TCP_INV_SRCPT
+#define IP6T_TCP_INV_DSTPT XT_TCP_INV_DSTPT
+#define IP6T_TCP_INV_FLAGS XT_TCP_INV_FLAGS
+#define IP6T_TCP_INV_OPTION XT_TCP_INV_OPTION
+#define IP6T_TCP_INV_MASK XT_TCP_INV_MASK
/* Values for "invflags" field in struct ipt_udp. */
-#define IP6T_UDP_INV_SRCPT 0x01 /* Invert the sense of source ports. */
-#define IP6T_UDP_INV_DSTPT 0x02 /* Invert the sense of dest ports. */
-#define IP6T_UDP_INV_MASK 0x03 /* All possible flags. */
+#define IP6T_UDP_INV_SRCPT XT_UDP_INV_SRCPT
+#define IP6T_UDP_INV_DSTPT XT_UDP_INV_DSTPT
+#define IP6T_UDP_INV_MASK XT_UDP_INV_MASK
/* ICMP matching stuff */
struct ip6t_icmp
@@ -264,23 +255,14 @@ struct ip6t_replace
/* Number of counters (must be equal to current number of entries). */
unsigned int num_counters;
/* The old entries' counters. */
- struct ip6t_counters __user *counters;
+ struct xt_counters __user *counters;
/* The entries (hang off end: not really an array). */
struct ip6t_entry entries[0];
};
/* The argument to IP6T_SO_ADD_COUNTERS. */
-struct ip6t_counters_info
-{
- /* Which table. */
- char name[IP6T_TABLE_MAXNAMELEN];
-
- unsigned int num_counters;
-
- /* The counters (actually `number' of these). */
- struct ip6t_counters counters[0];
-};
+#define ip6t_counters_info xt_counters_info
/* The argument to IP6T_SO_GET_ENTRIES. */
struct ip6t_get_entries
@@ -295,19 +277,10 @@ struct ip6t_get_entries
struct ip6t_entry entrytable[0];
};
-/* The argument to IP6T_SO_GET_REVISION_*. Returns highest revision
- * kernel supports, if >= revision. */
-struct ip6t_get_revision
-{
- char name[IP6T_FUNCTION_MAXNAMELEN-1];
-
- u_int8_t revision;
-};
-
/* Standard return verdict, or do jump. */
-#define IP6T_STANDARD_TARGET ""
+#define IP6T_STANDARD_TARGET XT_STANDARD_TARGET
/* Error verdict. */
-#define IP6T_ERROR_TARGET "ERROR"
+#define IP6T_ERROR_TARGET XT_ERROR_TARGET
/* Helper functions */
static __inline__ struct ip6t_entry_target *
@@ -361,104 +334,11 @@ ip6t_get_target(struct ip6t_entry *e)
#include <linux/init.h>
extern void ip6t_init(void) __init;
-struct ip6t_match
-{
- struct list_head list;
-
- const char name[IP6T_FUNCTION_MAXNAMELEN-1];
-
- u_int8_t revision;
-
- /* Return true or false: return FALSE and set *hotdrop = 1 to
- force immediate packet drop. */
- /* Arguments changed since 2.6.9, as this must now handle
- non-linear skb, using skb_header_pointer and
- skb_ip_make_writable. */
- int (*match)(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- unsigned int protoff,
- int *hotdrop);
-
- /* Called when user tries to insert an entry of this type. */
- /* Should return true or false. */
- int (*checkentry)(const char *tablename,
- const struct ip6t_ip6 *ip,
- void *matchinfo,
- unsigned int matchinfosize,
- unsigned int hook_mask);
-
- /* Called when entry of this type deleted. */
- void (*destroy)(void *matchinfo, unsigned int matchinfosize);
-
- /* Set this to THIS_MODULE if you are a module, otherwise NULL */
- struct module *me;
-};
-
-/* Registration hooks for targets. */
-struct ip6t_target
-{
- struct list_head list;
-
- const char name[IP6T_FUNCTION_MAXNAMELEN-1];
-
- u_int8_t revision;
-
- /* Returns verdict. Argument order changed since 2.6.9, as this
- must now handle non-linear skbs, using skb_copy_bits and
- skb_ip_make_writable. */
- unsigned int (*target)(struct sk_buff **pskb,
- const struct net_device *in,
- const struct net_device *out,
- unsigned int hooknum,
- const void *targinfo,
- void *userdata);
-
- /* Called when user tries to insert an entry of this type:
- hook_mask is a bitmask of hooks from which it can be
- called. */
- /* Should return true or false. */
- int (*checkentry)(const char *tablename,
- const struct ip6t_entry *e,
- void *targinfo,
- unsigned int targinfosize,
- unsigned int hook_mask);
-
- /* Called when entry of this type deleted. */
- void (*destroy)(void *targinfo, unsigned int targinfosize);
-
- /* Set this to THIS_MODULE if you are a module, otherwise NULL */
- struct module *me;
-};
-
-extern int ip6t_register_target(struct ip6t_target *target);
-extern void ip6t_unregister_target(struct ip6t_target *target);
-
-extern int ip6t_register_match(struct ip6t_match *match);
-extern void ip6t_unregister_match(struct ip6t_match *match);
+#define ip6t_register_target(tgt) xt_register_target(AF_INET6, tgt)
+#define ip6t_unregister_target(tgt) xt_unregister_target(AF_INET6, tgt)
-/* Furniture shopping... */
-struct ip6t_table
-{
- struct list_head list;
-
- /* A unique name... */
- char name[IP6T_TABLE_MAXNAMELEN];
-
- /* What hooks you will enter on */
- unsigned int valid_hooks;
-
- /* Lock for the curtain */
- rwlock_t lock;
-
- /* Man behind the curtain... */
- struct ip6t_table_info *private;
-
- /* Set this to THIS_MODULE if you are a module, otherwise NULL */
- struct module *me;
-};
+#define ip6t_register_match(match) xt_register_match(AF_INET6, match)
+#define ip6t_unregister_match(match) xt_unregister_match(AF_INET6, match)
extern int ip6t_register_table(struct ip6t_table *table,
const struct ip6t_replace *repl);
diff --git a/include/linux/netfilter_ipv6/ip6t_MARK.h b/include/linux/netfilter_ipv6/ip6t_MARK.h
index 7ade8d8f524..7cf629a8ab9 100644
--- a/include/linux/netfilter_ipv6/ip6t_MARK.h
+++ b/include/linux/netfilter_ipv6/ip6t_MARK.h
@@ -1,8 +1,9 @@
#ifndef _IP6T_MARK_H_target
#define _IP6T_MARK_H_target
-struct ip6t_mark_target_info {
- unsigned long mark;
-};
+/* Backwards compatibility for old userspace */
+#include <linux/netfilter/xt_MARK.h>
-#endif /*_IPT_MARK_H_target*/
+#define ip6t_mark_target_info xt_mark_target_info
+
+#endif /*_IP6T_MARK_H_target*/
diff --git a/include/linux/netfilter_ipv6/ip6t_length.h b/include/linux/netfilter_ipv6/ip6t_length.h
index 7fc09f9f9d6..9e9689d03ed 100644
--- a/include/linux/netfilter_ipv6/ip6t_length.h
+++ b/include/linux/netfilter_ipv6/ip6t_length.h
@@ -1,10 +1,8 @@
#ifndef _IP6T_LENGTH_H
#define _IP6T_LENGTH_H
-struct ip6t_length_info {
- u_int16_t min, max;
- u_int8_t invert;
-};
+#include <linux/netfilter/xt_length.h>
+#define ip6t_length_info xt_length_info
#endif /*_IP6T_LENGTH_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_limit.h b/include/linux/netfilter_ipv6/ip6t_limit.h
index f2866e50f3b..487e5ea342c 100644
--- a/include/linux/netfilter_ipv6/ip6t_limit.h
+++ b/include/linux/netfilter_ipv6/ip6t_limit.h
@@ -1,21 +1,8 @@
#ifndef _IP6T_RATE_H
#define _IP6T_RATE_H
-/* timings are in milliseconds. */
-#define IP6T_LIMIT_SCALE 10000
+#include <linux/netfilter/xt_limit.h>
+#define IP6T_LIMIT_SCALE XT_LIMIT_SCALE
+#define ip6t_rateinfo xt_rateinfo
-/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
- seconds, or one every 59 hours. */
-struct ip6t_rateinfo {
- u_int32_t avg; /* Average secs between packets * scale */
- u_int32_t burst; /* Period multiplier for upper limit. */
-
- /* Used internally by the kernel */
- unsigned long prev;
- u_int32_t credit;
- u_int32_t credit_cap, cost;
-
- /* Ugly, ugly fucker. */
- struct ip6t_rateinfo *master;
-};
-#endif /*_IPT_RATE_H*/
+#endif /*_IP6T_RATE_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_mac.h b/include/linux/netfilter_ipv6/ip6t_mac.h
index 87c088c2184..ac58e83e942 100644
--- a/include/linux/netfilter_ipv6/ip6t_mac.h
+++ b/include/linux/netfilter_ipv6/ip6t_mac.h
@@ -1,8 +1,7 @@
#ifndef _IP6T_MAC_H
#define _IP6T_MAC_H
-struct ip6t_mac_info {
- unsigned char srcaddr[ETH_ALEN];
- int invert;
-};
-#endif /*_IPT_MAC_H*/
+#include <linux/netfilter/xt_mac.h>
+#define ip6t_mac_info xt_mac_info
+
+#endif /*_IP6T_MAC_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_mark.h b/include/linux/netfilter_ipv6/ip6t_mark.h
index a734441e1c1..ff204951ddc 100644
--- a/include/linux/netfilter_ipv6/ip6t_mark.h
+++ b/include/linux/netfilter_ipv6/ip6t_mark.h
@@ -1,9 +1,9 @@
#ifndef _IP6T_MARK_H
#define _IP6T_MARK_H
-struct ip6t_mark_info {
- unsigned long mark, mask;
- u_int8_t invert;
-};
+/* Backwards compatibility for old userspace */
+#include <linux/netfilter/xt_mark.h>
+
+#define ip6t_mark_info xt_mark_info
#endif /*_IPT_MARK_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_physdev.h b/include/linux/netfilter_ipv6/ip6t_physdev.h
index c234731cd66..c161c0a81b5 100644
--- a/include/linux/netfilter_ipv6/ip6t_physdev.h
+++ b/include/linux/netfilter_ipv6/ip6t_physdev.h
@@ -1,24 +1,17 @@
#ifndef _IP6T_PHYSDEV_H
#define _IP6T_PHYSDEV_H
-#ifdef __KERNEL__
-#include <linux/if.h>
-#endif
+/* Backwards compatibility for old userspace */
-#define IP6T_PHYSDEV_OP_IN 0x01
-#define IP6T_PHYSDEV_OP_OUT 0x02
-#define IP6T_PHYSDEV_OP_BRIDGED 0x04
-#define IP6T_PHYSDEV_OP_ISIN 0x08
-#define IP6T_PHYSDEV_OP_ISOUT 0x10
-#define IP6T_PHYSDEV_OP_MASK (0x20 - 1)
+#include <linux/netfilter/xt_physdev.h>
-struct ip6t_physdev_info {
- char physindev[IFNAMSIZ];
- char in_mask[IFNAMSIZ];
- char physoutdev[IFNAMSIZ];
- char out_mask[IFNAMSIZ];
- u_int8_t invert;
- u_int8_t bitmask;
-};
+#define IP6T_PHYSDEV_OP_IN XT_PHYSDEV_OP_IN
+#define IP6T_PHYSDEV_OP_OUT XT_PHYSDEV_OP_OUT
+#define IP6T_PHYSDEV_OP_BRIDGED XT_PHYSDEV_OP_BRIDGED
+#define IP6T_PHYSDEV_OP_ISIN XT_PHYSDEV_OP_ISIN
+#define IP6T_PHYSDEV_OP_ISOUT XT_PHYSDEV_OP_ISOUT
+#define IP6T_PHYSDEV_OP_MASK XT_PHYSDEV_OP_MASK
+
+#define ip6t_physdev_info xt_physdev_info
#endif /*_IP6T_PHYSDEV_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_policy.h b/include/linux/netfilter_ipv6/ip6t_policy.h
new file mode 100644
index 00000000000..5a93afcd2ff
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6t_policy.h
@@ -0,0 +1,52 @@
+#ifndef _IP6T_POLICY_H
+#define _IP6T_POLICY_H
+
+#define IP6T_POLICY_MAX_ELEM 4
+
+enum ip6t_policy_flags
+{
+ IP6T_POLICY_MATCH_IN = 0x1,
+ IP6T_POLICY_MATCH_OUT = 0x2,
+ IP6T_POLICY_MATCH_NONE = 0x4,
+ IP6T_POLICY_MATCH_STRICT = 0x8,
+};
+
+enum ip6t_policy_modes
+{
+ IP6T_POLICY_MODE_TRANSPORT,
+ IP6T_POLICY_MODE_TUNNEL
+};
+
+struct ip6t_policy_spec
+{
+ u_int8_t saddr:1,
+ daddr:1,
+ proto:1,
+ mode:1,
+ spi:1,
+ reqid:1;
+};
+
+struct ip6t_policy_elem
+{
+ struct in6_addr saddr;
+ struct in6_addr smask;
+ struct in6_addr daddr;
+ struct in6_addr dmask;
+ u_int32_t spi;
+ u_int32_t reqid;
+ u_int8_t proto;
+ u_int8_t mode;
+
+ struct ip6t_policy_spec match;
+ struct ip6t_policy_spec invert;
+};
+
+struct ip6t_policy_info
+{
+ struct ip6t_policy_elem pol[IP6T_POLICY_MAX_ELEM];
+ u_int16_t flags;
+ u_int16_t len;
+};
+
+#endif /* _IP6T_POLICY_H */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 2516adeccec..547d649b274 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -38,9 +38,6 @@
# define NFS_DEBUG
#endif
-#define NFS_MAX_FILE_IO_BUFFER_SIZE 32768
-#define NFS_DEF_FILE_IO_BUFFER_SIZE 4096
-
/* Default timeout values */
#define NFS_MAX_UDP_TIMEOUT (60*HZ)
#define NFS_MAX_TCP_TIMEOUT (600*HZ)
@@ -65,6 +62,7 @@
#define FLUSH_STABLE 4 /* commit to stable storage */
#define FLUSH_LOWPRI 8 /* low priority background flush */
#define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */
+#define FLUSH_NOCOMMIT 32 /* Don't send the NFSv3/v4 COMMIT */
#ifdef __KERNEL__
@@ -394,6 +392,17 @@ extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_
extern struct inode_operations nfs_symlink_inode_operations;
/*
+ * linux/fs/nfs/sysctl.c
+ */
+#ifdef CONFIG_SYSCTL
+extern int nfs_register_sysctl(void);
+extern void nfs_unregister_sysctl(void);
+#else
+#define nfs_register_sysctl() do { } while(0)
+#define nfs_unregister_sysctl() do { } while(0)
+#endif
+
+/*
* linux/fs/nfs/unlink.c
*/
extern int nfs_async_unlink(struct dentry *);
@@ -406,10 +415,12 @@ extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern int nfs_flush_incompatible(struct file *file, struct page *page);
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
-extern void nfs_writeback_done(struct rpc_task *task);
+extern void nfs_writeback_done(struct rpc_task *task, void *data);
+extern void nfs_writedata_release(void *data);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-extern void nfs_commit_done(struct rpc_task *);
+extern void nfs_commit_done(struct rpc_task *, void *data);
+extern void nfs_commit_release(void *data);
#endif
/*
@@ -460,18 +471,33 @@ static inline int nfs_wb_page(struct inode *inode, struct page* page)
*/
extern mempool_t *nfs_wdata_mempool;
-static inline struct nfs_write_data *nfs_writedata_alloc(void)
+static inline struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
{
struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
+
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
+ if (pagecount < NFS_PAGEVEC_SIZE)
+ p->pagevec = &p->page_array[0];
+ else {
+ size_t size = ++pagecount * sizeof(struct page *);
+ p->pagevec = kmalloc(size, GFP_NOFS);
+ if (p->pagevec) {
+ memset(p->pagevec, 0, size);
+ } else {
+ mempool_free(p, nfs_wdata_mempool);
+ p = NULL;
+ }
+ }
}
return p;
}
static inline void nfs_writedata_free(struct nfs_write_data *p)
{
+ if (p && (p->pagevec != &p->page_array[0]))
+ kfree(p->pagevec);
mempool_free(p, nfs_wdata_mempool);
}
@@ -481,28 +507,45 @@ static inline void nfs_writedata_free(struct nfs_write_data *p)
extern int nfs_readpage(struct file *, struct page *);
extern int nfs_readpages(struct file *, struct address_space *,
struct list_head *, unsigned);
-extern void nfs_readpage_result(struct rpc_task *);
+extern void nfs_readpage_result(struct rpc_task *, void *);
+extern void nfs_readdata_release(void *data);
+
/*
* Allocate and free nfs_read_data structures
*/
extern mempool_t *nfs_rdata_mempool;
-static inline struct nfs_read_data *nfs_readdata_alloc(void)
+static inline struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
{
struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);
- if (p)
+
+ if (p) {
memset(p, 0, sizeof(*p));
+ INIT_LIST_HEAD(&p->pages);
+ if (pagecount < NFS_PAGEVEC_SIZE)
+ p->pagevec = &p->page_array[0];
+ else {
+ size_t size = ++pagecount * sizeof(struct page *);
+ p->pagevec = kmalloc(size, GFP_NOFS);
+ if (p->pagevec) {
+ memset(p->pagevec, 0, size);
+ } else {
+ mempool_free(p, nfs_rdata_mempool);
+ p = NULL;
+ }
+ }
+ }
return p;
}
static inline void nfs_readdata_free(struct nfs_read_data *p)
{
+ if (p && (p->pagevec != &p->page_array[0]))
+ kfree(p->pagevec);
mempool_free(p, nfs_rdata_mempool);
}
-extern void nfs_readdata_release(struct rpc_task *task);
-
/*
* linux/fs/nfs3proc.c
*/
diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h
index a0f1f25e0ea..102e5609429 100644
--- a/include/linux/nfs_idmap.h
+++ b/include/linux/nfs_idmap.h
@@ -71,6 +71,8 @@ int nfs_map_name_to_uid(struct nfs4_client *, const char *, size_t, __u32 *);
int nfs_map_group_to_gid(struct nfs4_client *, const char *, size_t, __u32 *);
int nfs_map_uid_to_name(struct nfs4_client *, __u32, char *);
int nfs_map_gid_to_group(struct nfs4_client *, __u32, char *);
+
+extern unsigned int nfs_idmap_cache_timeout;
#endif /* __KERNEL__ */
#endif /* NFS_IDMAP_H */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index da2e077b65e..66e2ed65852 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -79,9 +79,7 @@ extern void nfs_clear_page_writeback(struct nfs_page *req);
static inline int
nfs_lock_request_dontget(struct nfs_page *req)
{
- if (test_and_set_bit(PG_BUSY, &req->wb_flags))
- return 0;
- return 1;
+ return !test_and_set_bit(PG_BUSY, &req->wb_flags);
}
/*
@@ -125,9 +123,7 @@ nfs_list_remove_request(struct nfs_page *req)
static inline int
nfs_defer_commit(struct nfs_page *req)
{
- if (test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags))
- return 0;
- return 1;
+ return !test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags);
}
static inline void
@@ -141,9 +137,7 @@ nfs_clear_commit(struct nfs_page *req)
static inline int
nfs_defer_reschedule(struct nfs_page *req)
{
- if (test_and_set_bit(PG_NEED_RESCHED, &req->wb_flags))
- return 0;
- return 1;
+ return !test_and_set_bit(PG_NEED_RESCHED, &req->wb_flags);
}
static inline void
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 40718669b9c..6d6f69ec567 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -4,6 +4,16 @@
#include <linux/sunrpc/xprt.h>
#include <linux/nfsacl.h>
+/*
+ * To change the maximum rsize and wsize supported by the NFS client, adjust
+ * NFS_MAX_FILE_IO_SIZE. 64KB is a typical maximum, but some servers can
+ * support a megabyte or more. The default is left at 4096 bytes, which is
+ * reasonable for NFS over UDP.
+ */
+#define NFS_MAX_FILE_IO_SIZE (1048576U)
+#define NFS_DEF_FILE_IO_SIZE (4096U)
+#define NFS_MIN_FILE_IO_SIZE (1024U)
+
struct nfs4_fsid {
__u64 major;
__u64 minor;
@@ -137,7 +147,7 @@ struct nfs_openres {
*/
struct nfs_open_confirmargs {
const struct nfs_fh * fh;
- nfs4_stateid stateid;
+ nfs4_stateid * stateid;
struct nfs_seqid * seqid;
};
@@ -165,66 +175,62 @@ struct nfs_closeres {
* * Arguments to the lock,lockt, and locku call.
* */
struct nfs_lowner {
- __u64 clientid;
- u32 id;
+ __u64 clientid;
+ u32 id;
};
-struct nfs_lock_opargs {
+struct nfs_lock_args {
+ struct nfs_fh * fh;
+ struct file_lock * fl;
struct nfs_seqid * lock_seqid;
nfs4_stateid * lock_stateid;
struct nfs_seqid * open_seqid;
nfs4_stateid * open_stateid;
- struct nfs_lowner lock_owner;
- __u32 reclaim;
- __u32 new_lock_owner;
+ struct nfs_lowner lock_owner;
+ unsigned char block : 1;
+ unsigned char reclaim : 1;
+ unsigned char new_lock_owner : 1;
};
-struct nfs_locku_opargs {
+struct nfs_lock_res {
+ nfs4_stateid stateid;
+};
+
+struct nfs_locku_args {
+ struct nfs_fh * fh;
+ struct file_lock * fl;
struct nfs_seqid * seqid;
nfs4_stateid * stateid;
};
-struct nfs_lockargs {
- struct nfs_fh * fh;
- __u32 type;
- __u64 offset;
- __u64 length;
- union {
- struct nfs_lock_opargs *lock; /* LOCK */
- struct nfs_lowner *lockt; /* LOCKT */
- struct nfs_locku_opargs *locku; /* LOCKU */
- } u;
+struct nfs_locku_res {
+ nfs4_stateid stateid;
};
-struct nfs_lock_denied {
- __u64 offset;
- __u64 length;
- __u32 type;
- struct nfs_lowner owner;
+struct nfs_lockt_args {
+ struct nfs_fh * fh;
+ struct file_lock * fl;
+ struct nfs_lowner lock_owner;
};
-struct nfs_lockres {
- union {
- nfs4_stateid stateid;/* LOCK success, LOCKU */
- struct nfs_lock_denied denied; /* LOCK failed, LOCKT success */
- } u;
- const struct nfs_server * server;
+struct nfs_lockt_res {
+ struct file_lock * denied; /* LOCK, LOCKT failed */
};
struct nfs4_delegreturnargs {
const struct nfs_fh *fhandle;
const nfs4_stateid *stateid;
+ const u32 * bitmask;
+};
+
+struct nfs4_delegreturnres {
+ struct nfs_fattr * fattr;
+ const struct nfs_server *server;
};
/*
* Arguments to the read call.
*/
-
-#define NFS_READ_MAXIOV (9U)
-#if (NFS_READ_MAXIOV > (MAX_IOVEC -2))
-#error "NFS_READ_MAXIOV is too large"
-#endif
-
struct nfs_readargs {
struct nfs_fh * fh;
struct nfs_open_context *context;
@@ -243,11 +249,6 @@ struct nfs_readres {
/*
* Arguments to the write call.
*/
-#define NFS_WRITE_MAXIOV (9U)
-#if (NFS_WRITE_MAXIOV > (MAX_IOVEC -2))
-#error "NFS_WRITE_MAXIOV is too large"
-#endif
-
struct nfs_writeargs {
struct nfs_fh * fh;
struct nfs_open_context *context;
@@ -678,6 +679,8 @@ struct nfs4_server_caps_res {
struct nfs_page;
+#define NFS_PAGEVEC_SIZE (8U)
+
struct nfs_read_data {
int flags;
struct rpc_task task;
@@ -686,13 +689,14 @@ struct nfs_read_data {
struct nfs_fattr fattr; /* fattr storage */
struct list_head pages; /* Coalesced read requests */
struct nfs_page *req; /* multi ops per nfs_page */
- struct page *pagevec[NFS_READ_MAXIOV];
+ struct page **pagevec;
struct nfs_readargs args;
struct nfs_readres res;
#ifdef CONFIG_NFS_V4
unsigned long timestamp; /* For lease renewal */
#endif
void (*complete) (struct nfs_read_data *, int);
+ struct page *page_array[NFS_PAGEVEC_SIZE + 1];
};
struct nfs_write_data {
@@ -704,13 +708,14 @@ struct nfs_write_data {
struct nfs_writeverf verf;
struct list_head pages; /* Coalesced requests we wish to flush */
struct nfs_page *req; /* multi ops per nfs_page */
- struct page *pagevec[NFS_WRITE_MAXIOV];
+ struct page **pagevec;
struct nfs_writeargs args; /* argument struct */
struct nfs_writeres res; /* result struct */
#ifdef CONFIG_NFS_V4
unsigned long timestamp; /* For lease renewal */
#endif
void (*complete) (struct nfs_write_data *, int);
+ struct page *page_array[NFS_PAGEVEC_SIZE + 1];
};
struct nfs_access_entry;
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index bb842ea4103..0798b7781a6 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -294,7 +294,7 @@ fill_post_wcc(struct svc_fh *fhp)
/*
* Lock a file handle/inode
* NOTE: both fh_lock and fh_unlock are done "by hand" in
- * vfs.c:nfsd_rename as it needs to grab 2 i_sem's at once
+ * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
* so, any changes here should be reflected there.
*/
static inline void
@@ -317,7 +317,7 @@ fh_lock(struct svc_fh *fhp)
}
inode = dentry->d_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
fill_pre_wcc(fhp);
fhp->fh_locked = 1;
}
@@ -333,7 +333,7 @@ fh_unlock(struct svc_fh *fhp)
if (fhp->fh_locked) {
fill_post_wcc(fhp);
- up(&fhp->fh_dentry->d_inode->i_sem);
+ mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
fhp->fh_locked = 0;
}
}
diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h
index 130d4f588a3..3f4f7142bbe 100644
--- a/include/linux/nfsd/xdr.h
+++ b/include/linux/nfsd/xdr.h
@@ -88,10 +88,12 @@ struct nfsd_readdirargs {
struct nfsd_attrstat {
struct svc_fh fh;
+ struct kstat stat;
};
struct nfsd_diropres {
struct svc_fh fh;
+ struct kstat stat;
};
struct nfsd_readlinkres {
@@ -101,6 +103,7 @@ struct nfsd_readlinkres {
struct nfsd_readres {
struct svc_fh fh;
unsigned long count;
+ struct kstat stat;
};
struct nfsd_readdirres {
diff --git a/include/linux/nfsd/xdr3.h b/include/linux/nfsd/xdr3.h
index 3c2a71b43ba..a4322741f8b 100644
--- a/include/linux/nfsd/xdr3.h
+++ b/include/linux/nfsd/xdr3.h
@@ -126,6 +126,7 @@ struct nfsd3_setaclargs {
struct nfsd3_attrstat {
__u32 status;
struct svc_fh fh;
+ struct kstat stat;
};
/* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 343083fec25..d52999c4333 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -79,13 +79,23 @@
/*
* Global page accounting. One instance per CPU. Only unsigned longs are
* allowed.
+ *
+ * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
+ * any time safely (which protects the instance from modification by
+ * interrupt.
+ * - The __xxx_page_state variants can be used safely when interrupts are
+ * disabled.
+ * - The __xxx_page_state variants can be used if the field is only
+ * modified from process context, or only modified from interrupt context.
+ * In this case, the field should be commented here.
*/
struct page_state {
unsigned long nr_dirty; /* Dirty writeable pages */
unsigned long nr_writeback; /* Pages under writeback */
unsigned long nr_unstable; /* NFS unstable pages */
unsigned long nr_page_table_pages;/* Pages used for pagetables */
- unsigned long nr_mapped; /* mapped into pagetables */
+ unsigned long nr_mapped; /* mapped into pagetables.
+ * only modified from process context */
unsigned long nr_slab; /* In slab */
#define GET_PAGE_STATE_LAST nr_slab
@@ -97,32 +107,40 @@ struct page_state {
unsigned long pgpgout; /* Disk writes */
unsigned long pswpin; /* swap reads */
unsigned long pswpout; /* swap writes */
- unsigned long pgalloc_high; /* page allocations */
+ unsigned long pgalloc_high; /* page allocations */
unsigned long pgalloc_normal;
+ unsigned long pgalloc_dma32;
unsigned long pgalloc_dma;
+
unsigned long pgfree; /* page freeings */
unsigned long pgactivate; /* pages moved inactive->active */
unsigned long pgdeactivate; /* pages moved active->inactive */
unsigned long pgfault; /* faults (major+minor) */
unsigned long pgmajfault; /* faults (major only) */
+
unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
unsigned long pgrefill_normal;
+ unsigned long pgrefill_dma32;
unsigned long pgrefill_dma;
unsigned long pgsteal_high; /* total highmem pages reclaimed */
unsigned long pgsteal_normal;
+ unsigned long pgsteal_dma32;
unsigned long pgsteal_dma;
+
unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
unsigned long pgscan_kswapd_normal;
-
+ unsigned long pgscan_kswapd_dma32;
unsigned long pgscan_kswapd_dma;
+
unsigned long pgscan_direct_high;/* total highmem pages scanned */
unsigned long pgscan_direct_normal;
+ unsigned long pgscan_direct_dma32;
unsigned long pgscan_direct_dma;
- unsigned long pginodesteal; /* pages reclaimed via inode freeing */
+ unsigned long pginodesteal; /* pages reclaimed via inode freeing */
unsigned long slabs_scanned; /* slab objects scanned */
unsigned long kswapd_steal; /* pages reclaimed by kswapd */
unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
@@ -136,31 +154,54 @@ struct page_state {
extern void get_page_state(struct page_state *ret);
extern void get_page_state_node(struct page_state *ret, int node);
extern void get_full_page_state(struct page_state *ret);
-extern unsigned long __read_page_state(unsigned long offset);
-extern void __mod_page_state(unsigned long offset, unsigned long delta);
+extern unsigned long read_page_state_offset(unsigned long offset);
+extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
+extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
#define read_page_state(member) \
- __read_page_state(offsetof(struct page_state, member))
+ read_page_state_offset(offsetof(struct page_state, member))
#define mod_page_state(member, delta) \
- __mod_page_state(offsetof(struct page_state, member), (delta))
-
-#define inc_page_state(member) mod_page_state(member, 1UL)
-#define dec_page_state(member) mod_page_state(member, 0UL - 1)
-#define add_page_state(member,delta) mod_page_state(member, (delta))
-#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
-
-#define mod_page_state_zone(zone, member, delta) \
- do { \
- unsigned offset; \
- if (is_highmem(zone)) \
- offset = offsetof(struct page_state, member##_high); \
- else if (is_normal(zone)) \
- offset = offsetof(struct page_state, member##_normal); \
- else \
- offset = offsetof(struct page_state, member##_dma); \
- __mod_page_state(offset, (delta)); \
- } while (0)
+ mod_page_state_offset(offsetof(struct page_state, member), (delta))
+
+#define __mod_page_state(member, delta) \
+ __mod_page_state_offset(offsetof(struct page_state, member), (delta))
+
+#define inc_page_state(member) mod_page_state(member, 1UL)
+#define dec_page_state(member) mod_page_state(member, 0UL - 1)
+#define add_page_state(member,delta) mod_page_state(member, (delta))
+#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
+
+#define __inc_page_state(member) __mod_page_state(member, 1UL)
+#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
+#define __add_page_state(member,delta) __mod_page_state(member, (delta))
+#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
+
+#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
+
+#define state_zone_offset(zone, member) \
+({ \
+ unsigned offset; \
+ if (is_highmem(zone)) \
+ offset = offsetof(struct page_state, member##_high); \
+ else if (is_normal(zone)) \
+ offset = offsetof(struct page_state, member##_normal); \
+ else if (is_dma32(zone)) \
+ offset = offsetof(struct page_state, member##_dma32); \
+ else \
+ offset = offsetof(struct page_state, member##_dma); \
+ offset; \
+})
+
+#define __mod_page_state_zone(zone, member, delta) \
+ do { \
+ __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
+ } while (0)
+
+#define mod_page_state_zone(zone, member, delta) \
+ do { \
+ mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
+ } while (0)
/*
* Manipulation of page state flags
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index def32c5715b..8eb7fa76c1d 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -5,6 +5,9 @@
* pages. A pagevec is a multipage container which is used for that.
*/
+#ifndef _LINUX_PAGEVEC_H
+#define _LINUX_PAGEVEC_H
+
/* 14 pointers + two long's align the pagevec structure to a power of two */
#define PAGEVEC_SIZE 14
@@ -83,3 +86,5 @@ static inline void pagevec_lru_add(struct pagevec *pvec)
if (pagevec_count(pvec))
__pagevec_lru_add(pvec);
}
+
+#endif /* _LINUX_PAGEVEC_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index d2a4d9e1e6d..f67f838a3a1 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -236,7 +236,8 @@ struct pardevice {
/* IEEE1284 information */
-/* IEEE1284 phases */
+/* IEEE1284 phases. These are exposed to userland through ppdev IOCTL
+ * PP[GS]ETPHASE, so do not change existing values. */
enum ieee1284_phase {
IEEE1284_PH_FWD_DATA,
IEEE1284_PH_FWD_IDLE,
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index c6f76247087..1cc0f6b1a49 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -79,13 +79,13 @@ static __inline__ unsigned char parport_pc_read_data(struct parport *p)
}
#ifdef DEBUG_PARPORT
-extern __inline__ void dump_parport_state (char *str, struct parport *p)
+static inline void dump_parport_state (char *str, struct parport *p)
{
/* here's hoping that reading these ports won't side-effect anything underneath */
unsigned char ecr = inb (ECONTROL (p));
unsigned char dcr = inb (CONTROL (p));
unsigned char dsr = inb (STATUS (p));
- static char *ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"};
+ static const char *const ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"};
const struct parport_pc_private *priv = p->physport->private_data;
int i;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index de690ca73d5..0a44072383e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -78,6 +78,23 @@ typedef int __bitwise pci_power_t;
#define PCI_UNKNOWN ((pci_power_t __force) 5)
#define PCI_POWER_ERROR ((pci_power_t __force) -1)
+/** The pci_channel state describes connectivity between the CPU and
+ * the pci device. If some PCI bus between here and the pci device
+ * has crashed or locked up, this info is reflected here.
+ */
+typedef unsigned int __bitwise pci_channel_state_t;
+
+enum pci_channel_state {
+ /* I/O channel is in normal state */
+ pci_channel_io_normal = (__force pci_channel_state_t) 1,
+
+ /* I/O to channel is blocked */
+ pci_channel_io_frozen = (__force pci_channel_state_t) 2,
+
+ /* PCI card is dead */
+ pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
+};
+
/*
* The pci_dev structure is used to describe PCI devices.
*/
@@ -98,6 +115,7 @@ struct pci_dev {
unsigned int class; /* 3 bytes: (base,sub,prog-if) */
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
u8 rom_base_reg; /* which config register controls the ROM */
+ u8 pin; /* which interrupt pin this device uses */
struct pci_driver *driver; /* which driver has allocated this device */
u64 dma_mask; /* Mask of the bits of bus address this
@@ -110,6 +128,7 @@ struct pci_dev {
this is D0-D3, D0 being fully functional,
and D3 being off. */
+ pci_channel_state_t error_state; /* current connectivity state */
struct device dev; /* Generic device interface */
/* device is compatible with these IDs */
@@ -232,6 +251,54 @@ struct pci_dynids {
unsigned int use_driver_data:1; /* pci_driver->driver_data is used */
};
+/* ---------------------------------------------------------------- */
+/** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
+ * a set fof callbacks in struct pci_error_handlers, then that device driver
+ * will be notified of PCI bus errors, and will be driven to recovery
+ * when an error occurs.
+ */
+
+typedef unsigned int __bitwise pci_ers_result_t;
+
+enum pci_ers_result {
+ /* no result/none/not supported in device driver */
+ PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
+
+ /* Device driver can recover without slot reset */
+ PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
+
+ /* Device driver wants slot to be reset. */
+ PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
+
+ /* Device has completely failed, is unrecoverable */
+ PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
+
+ /* Device driver is fully recovered and operational */
+ PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
+};
+
+/* PCI bus error event callbacks */
+struct pci_error_handlers
+{
+ /* PCI bus error detected on this device */
+ pci_ers_result_t (*error_detected)(struct pci_dev *dev,
+ enum pci_channel_state error);
+
+ /* MMIO has been re-enabled, but not DMA */
+ pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
+
+ /* PCI Express link has been reset */
+ pci_ers_result_t (*link_reset)(struct pci_dev *dev);
+
+ /* PCI slot has been reset */
+ pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
+
+ /* Device driver may resume normal operations */
+ void (*resume)(struct pci_dev *dev);
+};
+
+/* ---------------------------------------------------------------- */
+
struct module;
struct pci_driver {
struct list_head node;
@@ -244,6 +311,7 @@ struct pci_driver {
int (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable); /* Enable wake event */
void (*shutdown) (struct pci_dev *dev);
+ struct pci_error_handlers *err_handler;
struct device_driver driver;
struct pci_dynids dynids;
};
@@ -448,6 +516,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass
void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
void *userdata);
+int pci_cfg_space_size(struct pci_dev *dev);
/* kmem_cache style wrapper around pci_alloc_consistent() */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 4f01710485c..7fb397e3f2d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -377,6 +377,7 @@
#define PCI_DEVICE_ID_NS_87560_USB 0x0012
#define PCI_DEVICE_ID_NS_83815 0x0020
#define PCI_DEVICE_ID_NS_83820 0x0022
+#define PCI_DEVICE_ID_NS_CS5535_ISA 0x002b
#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e
#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f
@@ -394,6 +395,13 @@
#define PCI_DEVICE_ID_NS_87410 0xd001
#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
+#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE 0x0028
+#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE 0x002b
+#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
+#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e
+#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f
+#define PCI_DEVICE_ID_NS_CS5535_VIDEO 0x0030
+
#define PCI_VENDOR_ID_TSENG 0x100c
#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
@@ -493,9 +501,20 @@
#define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d
#define PCI_DEVICE_ID_AMD_8151_0 0x7454
#define PCI_DEVICE_ID_AMD_8131_APIC 0x7450
+#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
+#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
+#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093
+#define PCI_DEVICE_ID_AMD_CS5536_OHC 0x2094
+#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095
+#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
+#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
+#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
+#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
+#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
+
#define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001
@@ -976,6 +995,7 @@
#define PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN 0x002a
#define PCI_DEVICE_ID_NVIDIA_VTNT2 0x002C
#define PCI_DEVICE_ID_NVIDIA_UVTNT2 0x002D
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036
#define PCI_DEVICE_ID_NVIDIA_NVENET_10 0x0037
@@ -1030,6 +1050,11 @@
#define PCI_DEVICE_ID_NVIDIA_NVENET_6 0x00e6
#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1 0x00f1
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101
#define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103
@@ -1572,6 +1597,23 @@
#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6 0x0009
#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1 0x000A
#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1 0x000B
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ 0x000C
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_PTM 0x000D
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_NT960PCI 0x0100
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2 0x0201
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4 0x0202
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232 0x0300
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232 0x0301
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232 0x0302
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1 0x0310
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2 0x0311
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4 0x0312
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2 0x0320
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4 0x0321
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8 0x0322
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485 0x0330
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485 0x0331
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485 0x0332
#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
@@ -2110,6 +2152,9 @@
#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_VENDOR_ID_SCALEMP 0x8686
+#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010
+
#define PCI_VENDOR_ID_COMPUTONE 0x8e0e
#define PCI_DEVICE_ID_COMPUTONE_IP2EX 0x0291
#define PCI_DEVICE_ID_COMPUTONE_PG 0x0302
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index e2a089b051e..d27a78b7129 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -196,6 +196,7 @@
#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */
#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */
#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */
+#define PCI_CAP_ID_HT_IRQCONF 0x08 /* HyperTransport IRQ Configuration */
#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index fb8d2d24e4b..cb9039a21f2 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -19,7 +19,6 @@
struct percpu_data {
void *ptrs[NR_CPUS];
- void *blkp;
};
/*
@@ -33,14 +32,14 @@ struct percpu_data {
(__typeof__(ptr))__p->ptrs[(cpu)]; \
})
-extern void *__alloc_percpu(size_t size, size_t align);
+extern void *__alloc_percpu(size_t size);
extern void free_percpu(const void *);
#else /* CONFIG_SMP */
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
-static inline void *__alloc_percpu(size_t size, size_t align)
+static inline void *__alloc_percpu(size_t size)
{
void *ret = kmalloc(size, GFP_KERNEL);
if (ret)
@@ -55,7 +54,6 @@ static inline void free_percpu(const void *ptr)
#endif /* CONFIG_SMP */
/* Simple wrapper for the common case: zeros memory. */
-#define alloc_percpu(type) \
- ((type *)(__alloc_percpu(sizeof(type), __alignof__(type))))
+#define alloc_percpu(type) ((type *)(__alloc_percpu(sizeof(type))))
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 92a9696fdeb..331521a10a2 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -53,6 +53,9 @@
#define PHY_MAX_ADDR 32
+/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
+#define PHY_ID_FMT "%x:%02x"
+
/* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure */
struct mii_bus {
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 1767073df26..b12e59c7575 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -37,7 +37,7 @@ struct pipe_inode_info {
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
#define PIPE_SIZE PAGE_SIZE
-#define PIPE_SEM(inode) (&(inode).i_sem)
+#define PIPE_MUTEX(inode) (&(inode).i_mutex)
#define PIPE_WAIT(inode) (&(inode).i_pipe->wait)
#define PIPE_READERS(inode) ((inode).i_pipe->readers)
#define PIPE_WRITERS(inode) ((inode).i_pipe->writers)
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 373bd3b9b33..217d3daf733 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -140,7 +140,7 @@ extern int find_via_pmu(void);
extern int pmu_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
-
+extern int pmu_queue_request(struct adb_request *req);
extern void pmu_poll(void);
extern void pmu_poll_adb(void); /* For use by xmon */
extern void pmu_wait_complete(struct adb_request *req);
@@ -160,12 +160,6 @@ extern void pmu_unlock(void);
extern int pmu_present(void);
extern int pmu_get_model(void);
-extern int pmu_i2c_combined_read(int bus, int addr, int subaddr, u8* data, int len);
-extern int pmu_i2c_stdsub_write(int bus, int addr, int subaddr, u8* data, int len);
-extern int pmu_i2c_simple_read(int bus, int addr, u8* data, int len);
-extern int pmu_i2c_simple_write(int bus, int addr, u8* data, int len);
-
-
#ifdef CONFIG_PM
/*
* Stuff for putting the powerbook to sleep and waking it again.
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index f942e2bad8e..54faf5236da 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -42,7 +42,7 @@ struct k_itimer {
timer_t it_id; /* timer id */
int it_overrun; /* overrun on pending signal */
int it_overrun_last; /* overrun on last delivered signal */
- int it_requeue_pending; /* waiting to requeue this timer */
+ int it_requeue_pending; /* waiting to requeue this timer */
#define REQUEUE_PENDING 1
int it_sigev_notify; /* notify word of sigevent struct */
int it_sigev_signo; /* signo word of sigevent struct */
@@ -51,10 +51,8 @@ struct k_itimer {
struct sigqueue *sigq; /* signal queue entry. */
union {
struct {
- struct timer_list timer;
- struct list_head abs_timer_entry; /* clock abs_timer_list */
- struct timespec wall_to_prev; /* wall_to_monotonic used when set */
- unsigned long incr; /* interval in jiffies */
+ struct hrtimer timer;
+ ktime_t interval;
} real;
struct cpu_timer_list cpu;
struct {
@@ -66,18 +64,14 @@ struct k_itimer {
} it;
};
-struct k_clock_abs {
- struct list_head list;
- spinlock_t lock;
-};
struct k_clock {
- int res; /* in nano seconds */
- int (*clock_getres) (clockid_t which_clock, struct timespec *tp);
- struct k_clock_abs *abs_struct;
- int (*clock_set) (clockid_t which_clock, struct timespec * tp);
- int (*clock_get) (clockid_t which_clock, struct timespec * tp);
+ int res; /* in nanoseconds */
+ int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
+ int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
+ int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
int (*timer_create) (struct k_itimer *timer);
- int (*nsleep) (clockid_t which_clock, int flags, struct timespec *);
+ int (*nsleep) (const clockid_t which_clock, int flags,
+ struct timespec *, struct timespec __user *);
int (*timer_set) (struct k_itimer * timr, int flags,
struct itimerspec * new_setting,
struct itimerspec * old_setting);
@@ -87,53 +81,35 @@ struct k_clock {
struct itimerspec * cur_setting);
};
-void register_posix_clock(clockid_t clock_id, struct k_clock *new_clock);
+void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
-/* Error handlers for timer_create, nanosleep and settime */
+/* error handlers for timer_create, nanosleep and settime */
int do_posix_clock_notimer_create(struct k_itimer *timer);
-int do_posix_clock_nonanosleep(clockid_t, int flags, struct timespec *);
-int do_posix_clock_nosettime(clockid_t, struct timespec *tp);
+int do_posix_clock_nonanosleep(const clockid_t, int flags, struct timespec *,
+ struct timespec __user *);
+int do_posix_clock_nosettime(const clockid_t, struct timespec *tp);
/* function to call to trigger timer event */
int posix_timer_event(struct k_itimer *timr, int si_private);
-struct now_struct {
- unsigned long jiffies;
-};
-
-#define posix_get_now(now) (now)->jiffies = jiffies;
-#define posix_time_before(timer, now) \
- time_before((timer)->expires, (now)->jiffies)
-
-#define posix_bump_timer(timr, now) \
- do { \
- long delta, orun; \
- delta = now.jiffies - (timr)->it.real.timer.expires; \
- if (delta >= 0) { \
- orun = 1 + (delta / (timr)->it.real.incr); \
- (timr)->it.real.timer.expires += \
- orun * (timr)->it.real.incr; \
- (timr)->it_overrun += orun; \
- } \
- }while (0)
-
-int posix_cpu_clock_getres(clockid_t which_clock, struct timespec *);
-int posix_cpu_clock_get(clockid_t which_clock, struct timespec *);
-int posix_cpu_clock_set(clockid_t which_clock, const struct timespec *tp);
-int posix_cpu_timer_create(struct k_itimer *);
-int posix_cpu_nsleep(clockid_t, int, struct timespec *);
-int posix_cpu_timer_set(struct k_itimer *, int,
- struct itimerspec *, struct itimerspec *);
-int posix_cpu_timer_del(struct k_itimer *);
-void posix_cpu_timer_get(struct k_itimer *, struct itimerspec *);
-
-void posix_cpu_timer_schedule(struct k_itimer *);
-
-void run_posix_cpu_timers(struct task_struct *);
-void posix_cpu_timers_exit(struct task_struct *);
-void posix_cpu_timers_exit_group(struct task_struct *);
-
-void set_process_cpu_timer(struct task_struct *, unsigned int,
- cputime_t *, cputime_t *);
+int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *ts);
+int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *ts);
+int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *ts);
+int posix_cpu_timer_create(struct k_itimer *timer);
+int posix_cpu_nsleep(const clockid_t which_clock, int flags,
+ struct timespec *rqtp, struct timespec __user *rmtp);
+int posix_cpu_timer_set(struct k_itimer *timer, int flags,
+ struct itimerspec *new, struct itimerspec *old);
+int posix_cpu_timer_del(struct k_itimer *timer);
+void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp);
+
+void posix_cpu_timer_schedule(struct k_itimer *timer);
+
+void run_posix_cpu_timers(struct task_struct *task);
+void posix_cpu_timers_exit(struct task_struct *task);
+void posix_cpu_timers_exit_group(struct task_struct *task);
+
+void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
+ cputime_t *newval, cputime_t *oldval);
#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 74488e49166..aa6322d4519 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -146,6 +146,11 @@ struct property;
extern void proc_device_tree_init(void);
extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
+extern void proc_device_tree_remove_prop(struct proc_dir_entry *pde,
+ struct property *prop);
+extern void proc_device_tree_update_prop(struct proc_dir_entry *pde,
+ struct property *newprop,
+ struct property *oldprop);
#endif /* CONFIG_PROC_DEVICETREE */
extern struct proc_dir_entry *proc_symlink(const char *,
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index b2b3dba1298..9d5cd106b34 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -20,8 +20,6 @@
#define PTRACE_DETACH 0x11
#define PTRACE_SYSCALL 24
-#define PTRACE_SYSEMU 31
-#define PTRACE_SYSEMU_SINGLESTEP 32
/* 0x4200-0x4300 are reserved for architecture-independent additions. */
#define PTRACE_SETOPTIONS 0x4200
@@ -80,6 +78,8 @@
extern long arch_ptrace(struct task_struct *child, long request, long addr, long data);
+extern struct task_struct *ptrace_get_task_struct(pid_t pid);
+extern int ptrace_traceme(void);
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
extern int ptrace_attach(struct task_struct *tsk);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 36e5d269612..c57ff2fcb30 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -19,6 +19,7 @@
#ifndef _LINUX_RADIX_TREE_H
#define _LINUX_RADIX_TREE_H
+#include <linux/sched.h>
#include <linux/preempt.h>
#include <linux/types.h>
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
index 13e7c4b6236..b6e0bcad84e 100644
--- a/include/linux/raid/md.h
+++ b/include/linux/raid/md.h
@@ -71,8 +71,8 @@
*/
#define MD_PATCHLEVEL_VERSION 3
-extern int register_md_personality (int p_num, mdk_personality_t *p);
-extern int unregister_md_personality (int p_num);
+extern int register_md_personality (struct mdk_personality *p);
+extern int unregister_md_personality (struct mdk_personality *p);
extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev),
mddev_t *mddev, const char *name);
extern void md_unregister_thread (mdk_thread_t *thread);
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index 46629a275ba..617b9506c76 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -18,62 +18,19 @@
/* and dm-bio-list.h is not under include/linux because.... ??? */
#include "../../../drivers/md/dm-bio-list.h"
-#define MD_RESERVED 0UL
-#define LINEAR 1UL
-#define RAID0 2UL
-#define RAID1 3UL
-#define RAID5 4UL
-#define TRANSLUCENT 5UL
-#define HSM 6UL
-#define MULTIPATH 7UL
-#define RAID6 8UL
-#define RAID10 9UL
-#define FAULTY 10UL
-#define MAX_PERSONALITY 11UL
-
#define LEVEL_MULTIPATH (-4)
#define LEVEL_LINEAR (-1)
#define LEVEL_FAULTY (-5)
+/* we need a value for 'no level specified' and 0
+ * means 'raid0', so we need something else. This is
+ * for internal use only
+ */
+#define LEVEL_NONE (-1000000)
+
#define MaxSector (~(sector_t)0)
#define MD_THREAD_NAME_MAX 14
-static inline int pers_to_level (int pers)
-{
- switch (pers) {
- case FAULTY: return LEVEL_FAULTY;
- case MULTIPATH: return LEVEL_MULTIPATH;
- case HSM: return -3;
- case TRANSLUCENT: return -2;
- case LINEAR: return LEVEL_LINEAR;
- case RAID0: return 0;
- case RAID1: return 1;
- case RAID5: return 5;
- case RAID6: return 6;
- case RAID10: return 10;
- }
- BUG();
- return MD_RESERVED;
-}
-
-static inline int level_to_pers (int level)
-{
- switch (level) {
- case LEVEL_FAULTY: return FAULTY;
- case LEVEL_MULTIPATH: return MULTIPATH;
- case -3: return HSM;
- case -2: return TRANSLUCENT;
- case LEVEL_LINEAR: return LINEAR;
- case 0: return RAID0;
- case 1: return RAID1;
- case 4:
- case 5: return RAID5;
- case 6: return RAID6;
- case 10: return RAID10;
- }
- return MD_RESERVED;
-}
-
typedef struct mddev_s mddev_t;
typedef struct mdk_rdev_s mdk_rdev_t;
@@ -138,14 +95,16 @@ struct mdk_rdev_s
atomic_t read_errors; /* number of consecutive read errors that
* we have tried to ignore.
*/
+ atomic_t corrected_errors; /* number of corrected read errors,
+ * for reporting to userspace and storing
+ * in superblock.
+ */
};
-typedef struct mdk_personality_s mdk_personality_t;
-
struct mddev_s
{
void *private;
- mdk_personality_t *pers;
+ struct mdk_personality *pers;
dev_t unit;
int md_minor;
struct list_head disks;
@@ -164,6 +123,7 @@ struct mddev_s
int chunk_size;
time_t ctime, utime;
int level, layout;
+ char clevel[16];
int raid_disks;
int max_disks;
sector_t size; /* used size of component devices */
@@ -183,6 +143,11 @@ struct mddev_s
sector_t resync_mismatches; /* count of sectors where
* parity/replica mismatch found
*/
+ /* if zero, use the system-wide default */
+ int sync_speed_min;
+ int sync_speed_max;
+
+ int ok_start_degraded;
/* recovery/resync flags
* NEEDED: we might need to start a resync/recover
* RUNNING: a thread is running, or about to be started
@@ -265,9 +230,11 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
}
-struct mdk_personality_s
+struct mdk_personality
{
char *name;
+ int level;
+ struct list_head list;
struct module *owner;
int (*make_request)(request_queue_t *q, struct bio *bio);
int (*run)(mddev_t *mddev);
@@ -305,8 +272,6 @@ static inline char * mdname (mddev_t * mddev)
return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
}
-extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr);
-
/*
* iterates through some rdev ringlist. It's safe to remove the
* current 'rdev'. Dont touch 'tmp' though.
@@ -366,5 +331,10 @@ do { \
__wait_event_lock_irq(wq, condition, lock, cmd); \
} while (0)
+static inline void safe_put_page(struct page *p)
+{
+ if (p) put_page(p);
+}
+
#endif
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h
index 292b98f2b40..9d5494aaac0 100644
--- a/include/linux/raid/raid1.h
+++ b/include/linux/raid/raid1.h
@@ -45,6 +45,8 @@ struct r1_private_data_s {
spinlock_t resync_lock;
int nr_pending;
+ int nr_waiting;
+ int nr_queued;
int barrier;
sector_t next_resync;
int fullsync; /* set to 1 if a full sync is needed,
@@ -52,11 +54,12 @@ struct r1_private_data_s {
* Cleared when a sync completes.
*/
- wait_queue_head_t wait_idle;
- wait_queue_head_t wait_resume;
+ wait_queue_head_t wait_barrier;
struct pool_info *poolinfo;
+ struct page *tmppage;
+
mempool_t *r1bio_pool;
mempool_t *r1buf_pool;
};
@@ -106,6 +109,13 @@ struct r1bio_s {
/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
};
+/* when we get a read error on a read-only array, we redirect to another
+ * device without failing the first device, or trying to over-write to
+ * correct the read error. To keep track of bad blocks on a per-bio
+ * level, we store IO_BLOCKED in the appropriate 'bios' pointer
+ */
+#define IO_BLOCKED ((struct bio*)1)
+
/* bits for r1bio.state */
#define R1BIO_Uptodate 0
#define R1BIO_IsSync 1
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h
index 60708789c8f..b1103298a8c 100644
--- a/include/linux/raid/raid10.h
+++ b/include/linux/raid/raid10.h
@@ -35,18 +35,26 @@ struct r10_private_data_s {
sector_t chunk_mask;
struct list_head retry_list;
- /* for use when syncing mirrors: */
+ /* queue pending writes and submit them on unplug */
+ struct bio_list pending_bio_list;
+
spinlock_t resync_lock;
int nr_pending;
+ int nr_waiting;
+ int nr_queued;
int barrier;
sector_t next_resync;
+ int fullsync; /* set to 1 if a full sync is needed,
+ * (fresh device added).
+ * Cleared when a sync completes.
+ */
- wait_queue_head_t wait_idle;
- wait_queue_head_t wait_resume;
+ wait_queue_head_t wait_barrier;
mempool_t *r10bio_pool;
mempool_t *r10buf_pool;
+ struct page *tmppage;
};
typedef struct r10_private_data_s conf_t;
@@ -96,8 +104,16 @@ struct r10bio_s {
} devs[0];
};
+/* when we get a read error on a read-only array, we redirect to another
+ * device without failing the first device, or trying to over-write to
+ * correct the read error. To keep track of bad blocks on a per-bio
+ * level, we store IO_BLOCKED in the appropriate 'bios' pointer
+ */
+#define IO_BLOCKED ((struct bio*)1)
+
/* bits for r10bio.state */
#define R10BIO_Uptodate 0
#define R10BIO_IsSync 1
#define R10BIO_IsRecover 2
+#define R10BIO_Degraded 3
#endif
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index f025ba6fb14..394da8207b3 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -126,7 +126,7 @@
*/
struct stripe_head {
- struct stripe_head *hash_next, **hash_pprev; /* hash pointers */
+ struct hlist_node hash;
struct list_head lru; /* inactive_list or handle_list */
struct raid5_private_data *raid_conf;
sector_t sector; /* sector of this row */
@@ -152,7 +152,6 @@ struct stripe_head {
#define R5_Insync 3 /* rdev && rdev->in_sync at start */
#define R5_Wantread 4 /* want to schedule a read */
#define R5_Wantwrite 5
-#define R5_Syncio 6 /* this io need to be accounted as resync io */
#define R5_Overlap 7 /* There is a pending overlapping request on this block */
#define R5_ReadError 8 /* seen a read error here recently */
#define R5_ReWrite 9 /* have tried to over-write the readerror */
@@ -205,7 +204,7 @@ struct disk_info {
};
struct raid5_private_data {
- struct stripe_head **stripe_hashtbl;
+ struct hlist_head *stripe_hashtbl;
mddev_t *mddev;
struct disk_info *spare;
int chunk_size, level, algorithm;
@@ -228,6 +227,8 @@ struct raid5_private_data {
* Cleared when a sync completes.
*/
+ struct page *spare_page; /* Used when checking P/Q in raid6 */
+
/*
* Free stripes pool
*/
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index e0a4faa9610..953b6df5d03 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -5,6 +5,16 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev);
struct super_block *ramfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data);
+#ifndef CONFIG_MMU
+extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+
+extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
+#endif
+
extern struct file_operations ramfs_file_operations;
extern struct vm_operations_struct generic_file_vm_ops;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index a471f3bb713..981f9aa4335 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -65,7 +65,11 @@ struct rcu_ctrlblk {
long cur; /* Current batch number. */
long completed; /* Number of the last completed batch */
int next_pending; /* Is the next batch already waiting? */
-} ____cacheline_maxaligned_in_smp;
+
+ spinlock_t lock ____cacheline_internodealigned_in_smp;
+ cpumask_t cpumask; /* CPUs that need to switch in order */
+ /* for current batch to proceed. */
+} ____cacheline_internodealigned_in_smp;
/* Is batch a before batch b ? */
static inline int rcu_batch_before(long a, long b)
@@ -125,36 +129,7 @@ static inline void rcu_bh_qsctr_inc(int cpu)
rdp->passed_quiesc = 1;
}
-static inline int __rcu_pending(struct rcu_ctrlblk *rcp,
- struct rcu_data *rdp)
-{
- /* This cpu has pending rcu entries and the grace period
- * for them has completed.
- */
- if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
- return 1;
-
- /* This cpu has no pending entries, but there are new entries */
- if (!rdp->curlist && rdp->nxtlist)
- return 1;
-
- /* This cpu has finished callbacks to invoke */
- if (rdp->donelist)
- return 1;
-
- /* The rcu core waits for a quiescent state from the cpu */
- if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
- return 1;
-
- /* nothing to do */
- return 0;
-}
-
-static inline int rcu_pending(int cpu)
-{
- return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
- __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
-}
+extern int rcu_pending(int cpu);
/**
* rcu_read_lock - mark the beginning of an RCU read-side critical section.
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
deleted file mode 100644
index e1adbba14b6..00000000000
--- a/include/linux/rcuref.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * rcuref.h
- *
- * Reference counting for elements of lists/arrays protected by
- * RCU.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2005
- *
- * Author: Dipankar Sarma <dipankar@in.ibm.com>
- * Ravikiran Thirumalai <kiran_th@gmail.com>
- *
- * See Documentation/RCU/rcuref.txt for detailed user guide.
- *
- */
-
-#ifndef _RCUREF_H_
-#define _RCUREF_H_
-
-#ifdef __KERNEL__
-
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <asm/atomic.h>
-
-/*
- * These APIs work on traditional atomic_t counters used in the
- * kernel for reference counting. Under special circumstances
- * where a lock-free get() operation races with a put() operation
- * these APIs can be used. See Documentation/RCU/rcuref.txt.
- */
-
-#ifdef __HAVE_ARCH_CMPXCHG
-
-/**
- * rcuref_inc - increment refcount for object.
- * @rcuref: reference counter in the object in question.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference
- * in a lock-free reader-side critical section.
- */
-static inline void rcuref_inc(atomic_t *rcuref)
-{
- atomic_inc(rcuref);
-}
-
-/**
- * rcuref_dec - decrement refcount for object.
- * @rcuref: reference counter in the object in question.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference
- * in a lock-free reader-side critical section.
- */
-static inline void rcuref_dec(atomic_t *rcuref)
-{
- atomic_dec(rcuref);
-}
-
-/**
- * rcuref_dec_and_test - decrement refcount for object and test
- * @rcuref: reference counter in the object.
- * @release: pointer to the function that will clean up the object
- * when the last reference to the object is released.
- * This pointer is required.
- *
- * Decrement the refcount, and if 0, return 1. Else return 0.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference
- * in a lock-free reader-side critical section.
- */
-static inline int rcuref_dec_and_test(atomic_t *rcuref)
-{
- return atomic_dec_and_test(rcuref);
-}
-
-/*
- * cmpxchg is needed on UP too, if deletions to the list/array can happen
- * in interrupt context.
- */
-
-/**
- * rcuref_inc_lf - Take reference to an object in a read-side
- * critical section protected by RCU.
- * @rcuref: reference counter in the object in question.
- *
- * Try and increment the refcount by 1. The increment might fail if
- * the reference counter has been through a 1 to 0 transition and
- * is no longer part of the lock-free list.
- * Returns non-zero on successful increment and zero otherwise.
- */
-static inline int rcuref_inc_lf(atomic_t *rcuref)
-{
- int c, old;
- c = atomic_read(rcuref);
- while (c && (old = cmpxchg(&rcuref->counter, c, c + 1)) != c)
- c = old;
- return c;
-}
-
-#else /* !__HAVE_ARCH_CMPXCHG */
-
-extern spinlock_t __rcuref_hash[];
-
-/*
- * Use a hash table of locks to protect the reference count
- * since cmpxchg is not available in this arch.
- */
-#ifdef CONFIG_SMP
-#define RCUREF_HASH_SIZE 4
-#define RCUREF_HASH(k) \
- (&__rcuref_hash[(((unsigned long)k)>>8) & (RCUREF_HASH_SIZE-1)])
-#else
-#define RCUREF_HASH_SIZE 1
-#define RCUREF_HASH(k) &__rcuref_hash[0]
-#endif /* CONFIG_SMP */
-
-/**
- * rcuref_inc - increment refcount for object.
- * @rcuref: reference counter in the object in question.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference in a lock-free
- * reader-side critical section.
- */
-static inline void rcuref_inc(atomic_t *rcuref)
-{
- unsigned long flags;
- spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
- rcuref->counter += 1;
- spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
-}
-
-/**
- * rcuref_dec - decrement refcount for object.
- * @rcuref: reference counter in the object in question.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference in a lock-free
- * reader-side critical section.
- */
-static inline void rcuref_dec(atomic_t *rcuref)
-{
- unsigned long flags;
- spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
- rcuref->counter -= 1;
- spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
-}
-
-/**
- * rcuref_dec_and_test - decrement refcount for object and test
- * @rcuref: reference counter in the object.
- * @release: pointer to the function that will clean up the object
- * when the last reference to the object is released.
- * This pointer is required.
- *
- * Decrement the refcount, and if 0, return 1. Else return 0.
- *
- * This should be used only for objects where we use RCU and
- * use the rcuref_inc_lf() api to acquire a reference in a lock-free
- * reader-side critical section.
- */
-static inline int rcuref_dec_and_test(atomic_t *rcuref)
-{
- unsigned long flags;
- spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
- rcuref->counter--;
- if (!rcuref->counter) {
- spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
- return 1;
- } else {
- spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
- return 0;
- }
-}
-
-/**
- * rcuref_inc_lf - Take reference to an object of a lock-free collection
- * by traversing a lock-free list/array.
- * @rcuref: reference counter in the object in question.
- *
- * Try and increment the refcount by 1. The increment might fail if
- * the reference counter has been through a 1 to 0 transition and
- * object is no longer part of the lock-free list.
- * Returns non-zero on successful increment and zero otherwise.
- */
-static inline int rcuref_inc_lf(atomic_t *rcuref)
-{
- int ret;
- unsigned long flags;
- spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
- if (rcuref->counter)
- ret = rcuref->counter++;
- else
- ret = 0;
- spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
- return ret;
-}
-
-
-#endif /* !__HAVE_ARCH_CMPXCHG */
-
-#endif /* __KERNEL__ */
-#endif /* _RCUREF_H_ */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 001ab82df05..e276c5ba2bb 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -1857,7 +1857,7 @@ void padd_item(char *item, int total_length, int length);
#define GET_BLOCK_CREATE 1 /* add anything you need to find block */
#define GET_BLOCK_NO_HOLE 2 /* return -ENOENT for file holes */
#define GET_BLOCK_READ_DIRECT 4 /* read the tail if indirect item not found */
-#define GET_BLOCK_NO_ISEM 8 /* i_sem is not held, don't preallocate */
+#define GET_BLOCK_NO_IMUX 8 /* i_mutex is not held, don't preallocate */
#define GET_BLOCK_NO_DANGLE 16 /* don't leave any transactions running */
int restart_transaction(struct reiserfs_transaction_handle *th,
diff --git a/include/linux/relayfs_fs.h b/include/linux/relayfs_fs.h
index fb7e8073732..7342e66247f 100644
--- a/include/linux/relayfs_fs.h
+++ b/include/linux/relayfs_fs.h
@@ -65,20 +65,6 @@ struct rchan
};
/*
- * Relayfs inode
- */
-struct relayfs_inode_info
-{
- struct inode vfs_inode;
- struct rchan_buf *buf;
-};
-
-static inline struct relayfs_inode_info *RELAYFS_I(struct inode *inode)
-{
- return container_of(inode, struct relayfs_inode_info, vfs_inode);
-}
-
-/*
* Relay channel client callbacks
*/
struct rchan_callbacks
@@ -124,6 +110,46 @@ struct rchan_callbacks
*/
void (*buf_unmapped)(struct rchan_buf *buf,
struct file *filp);
+ /*
+ * create_buf_file - create file to represent a relayfs channel buffer
+ * @filename: the name of the file to create
+ * @parent: the parent of the file to create
+ * @mode: the mode of the file to create
+ * @buf: the channel buffer
+ * @is_global: outparam - set non-zero if the buffer should be global
+ *
+ * Called during relay_open(), once for each per-cpu buffer,
+ * to allow the client to create a file to be used to
+ * represent the corresponding channel buffer. If the file is
+ * created outside of relayfs, the parent must also exist in
+ * that filesystem.
+ *
+ * The callback should return the dentry of the file created
+ * to represent the relay buffer.
+ *
+ * Setting the is_global outparam to a non-zero value will
+ * cause relay_open() to create a single global buffer rather
+ * than the default set of per-cpu buffers.
+ *
+ * See Documentation/filesystems/relayfs.txt for more info.
+ */
+ struct dentry *(*create_buf_file)(const char *filename,
+ struct dentry *parent,
+ int mode,
+ struct rchan_buf *buf,
+ int *is_global);
+
+ /*
+ * remove_buf_file - remove file representing a relayfs channel buffer
+ * @dentry: the dentry of the file to remove
+ *
+ * Called during relay_close(), once for each per-cpu buffer,
+ * to allow the client to remove a file used to represent a
+ * channel buffer.
+ *
+ * The callback should return 0 if successful, negative if not.
+ */
+ int (*remove_buf_file)(struct dentry *dentry);
};
/*
@@ -148,6 +174,12 @@ extern size_t relay_switch_subbuf(struct rchan_buf *buf,
extern struct dentry *relayfs_create_dir(const char *name,
struct dentry *parent);
extern int relayfs_remove_dir(struct dentry *dentry);
+extern struct dentry *relayfs_create_file(const char *name,
+ struct dentry *parent,
+ int mode,
+ struct file_operations *fops,
+ void *data);
+extern int relayfs_remove_file(struct dentry *dentry);
/**
* relay_write - write data into the channel
@@ -247,10 +279,9 @@ static inline void subbuf_start_reserve(struct rchan_buf *buf,
}
/*
- * exported relayfs file operations, fs/relayfs/inode.c
+ * exported relay file operations, fs/relayfs/inode.c
*/
-
-extern struct file_operations relayfs_file_operations;
+extern struct file_operations relay_file_operations;
#endif /* _LINUX_RELAYFS_FS_H */
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index 3bd7cce19e2..f54772d0e7f 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/device.h>
+#include <linux/string.h>
#include <linux/rio.h>
extern int __rio_local_read_config_32(struct rio_mport *port, u32 offset,
@@ -336,8 +337,8 @@ static inline void rio_init_dbell_res(struct resource *res, u16 start, u16 end)
/**
* RIO_DEVICE - macro used to describe a specific RIO device
- * @vid: the 16 bit RIO vendor ID
- * @did: the 16 bit RIO device ID
+ * @dev: the 16 bit RIO device ID
+ * @ven: the 16 bit RIO vendor ID
*
* This macro is used to create a struct rio_device_id that matches a
* specific device. The assembly vendor and assembly device fields
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 33261f1d223..9d6fbeef210 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -71,6 +71,7 @@ void __anon_vma_link(struct vm_area_struct *);
* rmap interfaces called when adding or removing pte of page
*/
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index e1aaf1fac8e..0b2ba67ff13 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -11,6 +11,8 @@
#ifndef _LINUX_RTC_H_
#define _LINUX_RTC_H_
+#include <linux/interrupt.h>
+
/*
* The struct used to pass data via the following ioctl. Similar to the
* struct tm in <time.h>, but it needs to be here so that the kernel
@@ -102,6 +104,7 @@ int rtc_register(rtc_task_t *task);
int rtc_unregister(rtc_task_t *task);
int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
void rtc_get_rtc_time(struct rtc_time *rtc_tm);
+irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
#endif /* __KERNEL__ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b0ad6f30679..a72e1713542 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -34,6 +34,7 @@
#include <linux/percpu.h>
#include <linux/topology.h>
#include <linux/seccomp.h>
+#include <linux/rcupdate.h>
#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
@@ -104,6 +105,7 @@ extern unsigned long nr_iowait(void);
#include <linux/param.h>
#include <linux/resource.h>
#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <asm/processor.h>
@@ -254,25 +256,12 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
* The mm counters are not protected by its page_table_lock,
* so must be incremented atomically.
*/
-#ifdef ATOMIC64_INIT
-#define set_mm_counter(mm, member, value) atomic64_set(&(mm)->_##member, value)
-#define get_mm_counter(mm, member) ((unsigned long)atomic64_read(&(mm)->_##member))
-#define add_mm_counter(mm, member, value) atomic64_add(value, &(mm)->_##member)
-#define inc_mm_counter(mm, member) atomic64_inc(&(mm)->_##member)
-#define dec_mm_counter(mm, member) atomic64_dec(&(mm)->_##member)
-typedef atomic64_t mm_counter_t;
-#else /* !ATOMIC64_INIT */
-/*
- * The counters wrap back to 0 at 2^32 * PAGE_SIZE,
- * that is, at 16TB if using 4kB page size.
- */
-#define set_mm_counter(mm, member, value) atomic_set(&(mm)->_##member, value)
-#define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->_##member))
-#define add_mm_counter(mm, member, value) atomic_add(value, &(mm)->_##member)
-#define inc_mm_counter(mm, member) atomic_inc(&(mm)->_##member)
-#define dec_mm_counter(mm, member) atomic_dec(&(mm)->_##member)
-typedef atomic_t mm_counter_t;
-#endif /* !ATOMIC64_INIT */
+#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
+#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
+#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
+#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
+#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
+typedef atomic_long_t mm_counter_t;
#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
/*
@@ -363,8 +352,16 @@ struct sighand_struct {
atomic_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
+ struct rcu_head rcu;
};
+extern void sighand_free_cb(struct rcu_head *rhp);
+
+static inline void sighand_free(struct sighand_struct *sp)
+{
+ call_rcu(&sp->rcu, sighand_free_cb);
+}
+
/*
* NOTE! "signal_struct" does not have it's own
* locking, because a shared signal_struct always
@@ -402,8 +399,8 @@ struct signal_struct {
struct list_head posix_timers;
/* ITIMER_REAL timer for the process */
- struct timer_list real_timer;
- unsigned long it_real_value, it_real_incr;
+ struct hrtimer real_timer;
+ ktime_t it_real_incr;
/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
cputime_t it_prof_expires, it_virt_expires;
@@ -634,7 +631,14 @@ struct sched_domain {
extern void partition_sched_domains(cpumask_t *partition1,
cpumask_t *partition2);
-#endif /* CONFIG_SMP */
+
+/*
+ * Maximum cache size the migration-costs auto-tuning code will
+ * search from:
+ */
+extern unsigned int max_cache_size;
+
+#endif /* CONFIG_SMP */
struct io_context; /* See blkdev.h */
@@ -692,9 +696,12 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
+ int last_waker_cpu; /* CPU that last woke this task up */
+#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
int oncpu;
#endif
+#endif
int prio, static_prio;
struct list_head run_list;
prio_array_t *array;
@@ -775,6 +782,7 @@ struct task_struct {
unsigned keep_capabilities:1;
struct user_struct *user;
#ifdef CONFIG_KEYS
+ struct key *request_key_auth; /* assumed request_key authority */
struct key *thread_keyring; /* keyring private to this thread */
unsigned char jit_keyring; /* default keyring to attach requested keys to */
#endif
@@ -820,6 +828,11 @@ struct task_struct {
/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */
spinlock_t proc_lock;
+#ifdef CONFIG_DEBUG_MUTEXES
+ /* mutex deadlock detection */
+ struct mutex_waiter *blocked_on;
+#endif
+
/* journalling filesystem info */
void *journal_info;
@@ -857,6 +870,7 @@ struct task_struct {
int cpuset_mems_generation;
#endif
atomic_t fs_excl; /* holding fs exclusive resources */
+ struct rcu_head rcu;
};
static inline pid_t process_group(struct task_struct *tsk)
@@ -880,8 +894,14 @@ static inline int pid_alive(struct task_struct *p)
extern void free_task(struct task_struct *tsk);
extern void __put_task_struct(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
-#define put_task_struct(tsk) \
-do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
+
+extern void __put_task_struct_cb(struct rcu_head *rhp);
+
+static inline void put_task_struct(struct task_struct *t)
+{
+ if (atomic_dec_and_test(&t->usage))
+ call_rcu(&t->rcu, __put_task_struct_cb);
+}
/*
* Per process flags
@@ -908,6 +928,7 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */
+#define PF_SWAPWRITE 0x01000000 /* Allowed to write to swap */
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -1101,21 +1122,6 @@ static inline int sas_ss_flags(unsigned long sp)
: on_sig_stack(sp) ? SS_ONSTACK : 0);
}
-
-#ifdef CONFIG_SECURITY
-/* code is in security.c */
-extern int capable(int cap);
-#else
-static inline int capable(int cap)
-{
- if (cap_raised(current->cap_effective, cap)) {
- current->flags |= PF_SUPERPRIV;
- return 1;
- }
- return 0;
-}
-#endif
-
/*
* Routines for handling mm_structs
*/
@@ -1234,6 +1240,7 @@ static inline void task_unlock(struct task_struct *p)
#ifndef __HAVE_THREAD_FUNCTIONS
#define task_thread_info(task) (task)->thread_info
+#define task_stack_page(task) ((void*)((task)->thread_info))
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
@@ -1379,12 +1386,8 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
-#ifdef CONFIG_MAGIC_SYSRQ
-
extern void normalize_rt_tasks(void);
-#endif
-
#ifdef CONFIG_PM
/*
* Check if a process has been frozen
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
new file mode 100644
index 00000000000..6336987dae6
--- /dev/null
+++ b/include/linux/screen_info.h
@@ -0,0 +1,76 @@
+#ifndef _SCREEN_INFO_H
+#define _SCREEN_INFO_H
+
+#include <linux/types.h>
+
+/*
+ * These are set up by the setup-routine at boot-time:
+ */
+
+struct screen_info {
+ u8 orig_x; /* 0x00 */
+ u8 orig_y; /* 0x01 */
+ u16 dontuse1; /* 0x02 -- EXT_MEM_K sits here */
+ u16 orig_video_page; /* 0x04 */
+ u8 orig_video_mode; /* 0x06 */
+ u8 orig_video_cols; /* 0x07 */
+ u16 unused2; /* 0x08 */
+ u16 orig_video_ega_bx; /* 0x0a */
+ u16 unused3; /* 0x0c */
+ u8 orig_video_lines; /* 0x0e */
+ u8 orig_video_isVGA; /* 0x0f */
+ u16 orig_video_points; /* 0x10 */
+
+ /* VESA graphic mode -- linear frame buffer */
+ u16 lfb_width; /* 0x12 */
+ u16 lfb_height; /* 0x14 */
+ u16 lfb_depth; /* 0x16 */
+ u32 lfb_base; /* 0x18 */
+ u32 lfb_size; /* 0x1c */
+ u16 dontuse2, dontuse3; /* 0x20 -- CL_MAGIC and CL_OFFSET here */
+ u16 lfb_linelength; /* 0x24 */
+ u8 red_size; /* 0x26 */
+ u8 red_pos; /* 0x27 */
+ u8 green_size; /* 0x28 */
+ u8 green_pos; /* 0x29 */
+ u8 blue_size; /* 0x2a */
+ u8 blue_pos; /* 0x2b */
+ u8 rsvd_size; /* 0x2c */
+ u8 rsvd_pos; /* 0x2d */
+ u16 vesapm_seg; /* 0x2e */
+ u16 vesapm_off; /* 0x30 */
+ u16 pages; /* 0x32 */
+ u16 vesa_attributes; /* 0x34 */
+ /* 0x36 -- 0x3f reserved for future expansion */
+};
+
+extern struct screen_info screen_info;
+
+#define ORIG_X (screen_info.orig_x)
+#define ORIG_Y (screen_info.orig_y)
+#define ORIG_VIDEO_MODE (screen_info.orig_video_mode)
+#define ORIG_VIDEO_COLS (screen_info.orig_video_cols)
+#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx)
+#define ORIG_VIDEO_LINES (screen_info.orig_video_lines)
+#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA)
+#define ORIG_VIDEO_POINTS (screen_info.orig_video_points)
+
+#define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
+#define VIDEO_TYPE_CGA 0x11 /* CGA Display */
+#define VIDEO_TYPE_EGAM 0x20 /* EGA/VGA in Monochrome Mode */
+#define VIDEO_TYPE_EGAC 0x21 /* EGA in Color Mode */
+#define VIDEO_TYPE_VGAC 0x22 /* VGA+ in Color Mode */
+#define VIDEO_TYPE_VLFB 0x23 /* VESA VGA in graphic mode */
+
+#define VIDEO_TYPE_PICA_S3 0x30 /* ACER PICA-61 local S3 video */
+#define VIDEO_TYPE_MIPS_G364 0x31 /* MIPS Magnum 4000 G364 video */
+#define VIDEO_TYPE_SGI 0x33 /* Various SGI graphics hardware */
+
+#define VIDEO_TYPE_TGAC 0x40 /* DEC TGA */
+
+#define VIDEO_TYPE_SUN 0x50 /* Sun frame buffer. */
+#define VIDEO_TYPE_SUNPCI 0x51 /* Sun PCI based frame buffer. */
+
+#define VIDEO_TYPE_PMAC 0x60 /* PowerMacintosh frame buffer. */
+
+#endif /* _SCREEN_INFO_H */
diff --git a/include/linux/sdla.h b/include/linux/sdla.h
index 3b6afb8caa4..564acd3a71c 100644
--- a/include/linux/sdla.h
+++ b/include/linux/sdla.h
@@ -293,46 +293,46 @@ void sdla(void *cfg_info, char *dev, struct frad_conf *conf, int quiet);
#define SDLA_S508_INTEN 0x10
struct sdla_cmd {
- char opp_flag __attribute__((packed));
- char cmd __attribute__((packed));
- short length __attribute__((packed));
- char retval __attribute__((packed));
- short dlci __attribute__((packed));
- char flags __attribute__((packed));
- short rxlost_int __attribute__((packed));
- long rxlost_app __attribute__((packed));
- char reserve[2] __attribute__((packed));
- char data[SDLA_MAX_DATA] __attribute__((packed)); /* transfer data buffer */
-};
+ char opp_flag;
+ char cmd;
+ short length;
+ char retval;
+ short dlci;
+ char flags;
+ short rxlost_int;
+ long rxlost_app;
+ char reserve[2];
+ char data[SDLA_MAX_DATA]; /* transfer data buffer */
+} __attribute__((packed));
struct intr_info {
- char flags __attribute__((packed));
- short txlen __attribute__((packed));
- char irq __attribute__((packed));
- char flags2 __attribute__((packed));
- short timeout __attribute__((packed));
-};
+ char flags;
+ short txlen;
+ char irq;
+ char flags2;
+ short timeout;
+} __attribute__((packed));
/* found in the 508's control window at RXBUF_INFO */
struct buf_info {
- unsigned short rse_num __attribute__((packed));
- unsigned long rse_base __attribute__((packed));
- unsigned long rse_next __attribute__((packed));
- unsigned long buf_base __attribute__((packed));
- unsigned short reserved __attribute__((packed));
- unsigned long buf_top __attribute__((packed));
-};
+ unsigned short rse_num;
+ unsigned long rse_base;
+ unsigned long rse_next;
+ unsigned long buf_base;
+ unsigned short reserved;
+ unsigned long buf_top;
+} __attribute__((packed));
/* structure pointed to by rse_base in RXBUF_INFO struct */
struct buf_entry {
- char opp_flag __attribute__((packed));
- short length __attribute__((packed));
- short dlci __attribute__((packed));
- char flags __attribute__((packed));
- short timestamp __attribute__((packed));
- short reserved[2] __attribute__((packed));
- long buf_addr __attribute__((packed));
-};
+ char opp_flag;
+ short length;
+ short dlci;
+ char flags;
+ short timestamp;
+ short reserved[2];
+ long buf_addr;
+} __attribute__((packed));
#endif
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index dc89116bb1c..cd2773b29a6 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -26,11 +26,7 @@ static inline int has_secure_computing(struct thread_info *ti)
#else /* CONFIG_SECCOMP */
-#if (__GNUC__ > 2)
- typedef struct { } seccomp_t;
-#else
- typedef struct { int gcc_is_buggy; } seccomp_t;
-#endif
+typedef struct { } seccomp_t;
#define secure_computing(x) do { } while (0)
/* static inline to preserve typechecking */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index e3710d7e260..a8187c3c8a7 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -67,6 +67,9 @@
/* Parisc type numbers. */
#define PORT_MUX 48
+/* Atmel AT91RM9200 SoC */
+#define PORT_AT91RM9200 49
+
/* Macintosh Zilog type numbers */
#define PORT_MAC_ZILOG 50 /* m68k : not yet implemented */
#define PORT_PMAC_ZILOG 51
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 5dd5f02c5c5..b7d093520bb 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -18,6 +18,19 @@
#define SA_PROBE SA_ONESHOT
#define SA_SAMPLE_RANDOM SA_RESTART
#define SA_SHIRQ 0x04000000
+/*
+ * As above, these correspond to the IORESOURCE_IRQ_* defines in
+ * linux/ioport.h to select the interrupt line behaviour. When
+ * requesting an interrupt without specifying a SA_TRIGGER, the
+ * setting should be assumed to be "as already configured", which
+ * may be as per machine or firmware initialisation.
+ */
+#define SA_TRIGGER_LOW 0x00000008
+#define SA_TRIGGER_HIGH 0x00000004
+#define SA_TRIGGER_FALLING 0x00000002
+#define SA_TRIGGER_RISING 0x00000001
+#define SA_TRIGGER_MASK (SA_TRIGGER_HIGH|SA_TRIGGER_LOW|\
+ SA_TRIGGER_RISING|SA_TRIGGER_FALLING)
/*
* Real Time signals may be queued.
@@ -81,6 +94,23 @@ static inline int sigfindinword(unsigned long word)
#endif /* __HAVE_ARCH_SIG_BITOPS */
+static inline int sigisemptyset(sigset_t *set)
+{
+ extern void _NSIG_WORDS_is_unsupported_size(void);
+ switch (_NSIG_WORDS) {
+ case 4:
+ return (set->sig[3] | set->sig[2] |
+ set->sig[1] | set->sig[0]) == 0;
+ case 2:
+ return (set->sig[1] | set->sig[0]) == 0;
+ case 1:
+ return set->sig[0] == 0;
+ default:
+ _NSIG_WORDS_is_unsupported_size();
+ return 0;
+ }
+}
+
#define sigmask(sig) (1UL << ((sig) - 1))
#ifndef __HAVE_ARCH_SIG_SETOPS
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 483cfc47ec3..e5fd66c5650 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -251,7 +251,7 @@ struct sk_buff {
* want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM.
*/
- char cb[40];
+ char cb[48];
unsigned int len,
data_len,
diff --git a/include/linux/slab.h b/include/linux/slab.h
index d1ea4051b99..1fb77a9cc14 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -53,6 +53,8 @@ typedef struct kmem_cache kmem_cache_t;
#define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */
#define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */
+#ifndef CONFIG_SLOB
+
/* prototypes */
extern void __init kmem_cache_init(void);
@@ -134,6 +136,39 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
extern int FASTCALL(kmem_cache_reap(int));
extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
+#else /* CONFIG_SLOB */
+
+/* SLOB allocator routines */
+
+void kmem_cache_init(void);
+struct kmem_cache *kmem_find_general_cachep(size_t, gfp_t gfpflags);
+struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
+ unsigned long,
+ void (*)(void *, struct kmem_cache *, unsigned long),
+ void (*)(void *, struct kmem_cache *, unsigned long));
+int kmem_cache_destroy(struct kmem_cache *c);
+void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags);
+void kmem_cache_free(struct kmem_cache *c, void *b);
+const char *kmem_cache_name(struct kmem_cache *);
+void *kmalloc(size_t size, gfp_t flags);
+void *kzalloc(size_t size, gfp_t flags);
+void kfree(const void *m);
+unsigned int ksize(const void *m);
+unsigned int kmem_cache_size(struct kmem_cache *c);
+
+static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+{
+ return kzalloc(n * size, flags);
+}
+
+#define kmem_cache_shrink(d) (0)
+#define kmem_cache_reap(a)
+#define kmem_ptr_validate(a, b) (0)
+#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f)
+#define kmalloc_node(s, f, n) kmalloc(s, f)
+
+#endif /* CONFIG_SLOB */
+
/* System wide caches */
extern kmem_cache_t *vm_area_cachep;
extern kmem_cache_t *names_cachep;
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 9f4019156fd..b02dda4ee83 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -186,6 +186,7 @@ struct ucred {
#define AF_PPPOX 24 /* PPPoX sockets */
#define AF_WANPIPE 25 /* Wanpipe API Sockets */
#define AF_LLC 26 /* Linux LLC */
+#define AF_TIPC 30 /* TIPC sockets */
#define AF_BLUETOOTH 31 /* Bluetooth sockets */
#define AF_MAX 32 /* For now.. */
@@ -218,6 +219,7 @@ struct ucred {
#define PF_PPPOX AF_PPPOX
#define PF_WANPIPE AF_WANPIPE
#define PF_LLC AF_LLC
+#define PF_TIPC AF_TIPC
#define PF_BLUETOOTH AF_BLUETOOTH
#define PF_MAX AF_MAX
@@ -279,6 +281,7 @@ struct ucred {
#define SOL_LLC 268
#define SOL_DCCP 269
#define SOL_NETLINK 270
+#define SOL_TIPC 271
/* IPX options */
#define IPX_TYPE 1
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index def2d173a8d..04135b0e198 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -22,30 +22,16 @@ typedef struct {
#else
-/*
- * All gcc 2.95 versions and early versions of 2.96 have a nasty bug
- * with empty initializers.
- */
-#if (__GNUC__ > 2)
typedef struct { } raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { }
-#else
-typedef struct { int gcc_is_buggy; } raw_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED (raw_spinlock_t) { 0 }
-#endif
#endif
-#if (__GNUC__ > 2)
typedef struct {
/* no debug version on UP */
} raw_rwlock_t;
#define __RAW_RW_LOCK_UNLOCKED { }
-#else
-typedef struct { int gcc_is_buggy; } raw_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED (raw_rwlock_t) { 0 }
-#endif
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index ab151bbb66d..f147e6b8433 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -49,7 +49,6 @@ struct rpc_clnt {
unsigned int cl_softrtry : 1,/* soft timeouts */
cl_intr : 1,/* interruptible */
- cl_chatty : 1,/* be verbose */
cl_autobind : 1,/* use getport() */
cl_oneshot : 1,/* dispose after use */
cl_dead : 1;/* abandoned */
@@ -126,7 +125,8 @@ int rpc_register(u32, u32, int, unsigned short, int *);
void rpc_call_setup(struct rpc_task *, struct rpc_message *, int);
int rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg,
- int flags, rpc_action callback, void *clntdata);
+ int flags, const struct rpc_call_ops *tk_ops,
+ void *calldata);
int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg,
int flags);
void rpc_restart_call(struct rpc_task *);
@@ -134,6 +134,7 @@ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset);
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
size_t rpc_max_payload(struct rpc_clnt *);
+void rpc_force_rebind(struct rpc_clnt *);
int rpc_ping(struct rpc_clnt *clnt, int flags);
static __inline__
diff --git a/include/linux/sunrpc/gss_spkm3.h b/include/linux/sunrpc/gss_spkm3.h
index 0beb2cf00a8..336e218c278 100644
--- a/include/linux/sunrpc/gss_spkm3.h
+++ b/include/linux/sunrpc/gss_spkm3.h
@@ -48,7 +48,7 @@ u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struc
#define CKSUMTYPE_RSA_MD5 0x0007
s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
- struct xdr_netobj *cksum);
+ int body_offset, struct xdr_netobj *cksum);
void asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits);
int decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen,
int explen);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 4d77e90d0b3..8b25629accd 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -27,6 +27,7 @@ struct rpc_message {
struct rpc_cred * rpc_cred; /* Credentials */
};
+struct rpc_call_ops;
struct rpc_wait_queue;
struct rpc_wait {
struct list_head list; /* wait queue links */
@@ -41,6 +42,7 @@ struct rpc_task {
#ifdef RPC_DEBUG
unsigned long tk_magic; /* 0xf00baa */
#endif
+ atomic_t tk_count; /* Reference count */
struct list_head tk_task; /* global list of tasks */
struct rpc_clnt * tk_client; /* RPC client */
struct rpc_rqst * tk_rqstp; /* RPC request */
@@ -50,8 +52,6 @@ struct rpc_task {
* RPC call state
*/
struct rpc_message tk_msg; /* RPC call info */
- __u32 * tk_buffer; /* XDR buffer */
- size_t tk_bufsize;
__u8 tk_garb_retry;
__u8 tk_cred_retry;
@@ -61,13 +61,12 @@ struct rpc_task {
* timeout_fn to be executed by timer bottom half
* callback to be executed after waking up
* action next procedure for async tasks
- * exit exit async task and report to caller
+ * tk_ops caller callbacks
*/
void (*tk_timeout_fn)(struct rpc_task *);
void (*tk_callback)(struct rpc_task *);
void (*tk_action)(struct rpc_task *);
- void (*tk_exit)(struct rpc_task *);
- void (*tk_release)(struct rpc_task *);
+ const struct rpc_call_ops *tk_ops;
void * tk_calldata;
/*
@@ -78,7 +77,6 @@ struct rpc_task {
struct timer_list tk_timer; /* kernel timer */
unsigned long tk_timeout; /* timeout for rpc_sleep() */
unsigned short tk_flags; /* misc flags */
- unsigned char tk_active : 1;/* Task has been activated */
unsigned char tk_priority : 2;/* Task priority */
unsigned long tk_runstate; /* Task run status */
struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
@@ -111,6 +109,13 @@ struct rpc_task {
typedef void (*rpc_action)(struct rpc_task *);
+struct rpc_call_ops {
+ void (*rpc_call_prepare)(struct rpc_task *, void *);
+ void (*rpc_call_done)(struct rpc_task *, void *);
+ void (*rpc_release)(void *);
+};
+
+
/*
* RPC task flags
*/
@@ -129,7 +134,6 @@ typedef void (*rpc_action)(struct rpc_task *);
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
-#define RPC_IS_ACTIVATED(t) ((t)->tk_active)
#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
#define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
@@ -138,6 +142,7 @@ typedef void (*rpc_action)(struct rpc_task *);
#define RPC_TASK_QUEUED 1
#define RPC_TASK_WAKEUP 2
#define RPC_TASK_HAS_TIMER 3
+#define RPC_TASK_ACTIVE 4
#define RPC_IS_RUNNING(t) (test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
#define rpc_set_running(t) (set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
@@ -168,6 +173,15 @@ typedef void (*rpc_action)(struct rpc_task *);
smp_mb__after_clear_bit(); \
} while (0)
+#define RPC_IS_ACTIVATED(t) (test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate))
+#define rpc_set_active(t) (set_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate))
+#define rpc_clear_active(t) \
+ do { \
+ smp_mb__before_clear_bit(); \
+ clear_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate); \
+ smp_mb__after_clear_bit(); \
+ } while(0)
+
/*
* Task priorities.
* Note: if you change these, you must also change
@@ -228,11 +242,16 @@ struct rpc_wait_queue {
/*
* Function prototypes
*/
-struct rpc_task *rpc_new_task(struct rpc_clnt *, rpc_action, int flags);
+struct rpc_task *rpc_new_task(struct rpc_clnt *, int flags,
+ const struct rpc_call_ops *ops, void *data);
+struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
+ const struct rpc_call_ops *ops, void *data);
struct rpc_task *rpc_new_child(struct rpc_clnt *, struct rpc_task *parent);
-void rpc_init_task(struct rpc_task *, struct rpc_clnt *,
- rpc_action exitfunc, int flags);
+void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
+ int flags, const struct rpc_call_ops *ops,
+ void *data);
void rpc_release_task(struct rpc_task *);
+void rpc_exit_task(struct rpc_task *);
void rpc_killall_tasks(struct rpc_clnt *);
int rpc_execute(struct rpc_task *);
void rpc_run_child(struct rpc_task *parent, struct rpc_task *child,
@@ -247,9 +266,11 @@ struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
void rpc_wake_up_status(struct rpc_wait_queue *, int);
void rpc_delay(struct rpc_task *, unsigned long);
void * rpc_malloc(struct rpc_task *, size_t);
+void rpc_free(struct rpc_task *);
int rpciod_up(void);
void rpciod_down(void);
void rpciod_wake_up(void);
+int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *));
#ifdef RPC_DEBUG
void rpc_show_tasks(void);
#endif
@@ -259,7 +280,12 @@ void rpc_destroy_mempool(void);
static inline void rpc_exit(struct rpc_task *task, int status)
{
task->tk_status = status;
- task->tk_action = NULL;
+ task->tk_action = rpc_exit_task;
+}
+
+static inline int rpc_wait_for_completion_task(struct rpc_task *task)
+{
+ return __rpc_wait_for_completion_task(task, NULL);
}
#ifdef RPC_DEBUG
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 5da968729cf..84c35d42d25 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -91,7 +91,6 @@ struct xdr_buf {
u32 * xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int len);
u32 * xdr_encode_opaque(u32 *p, const void *ptr, unsigned int len);
u32 * xdr_encode_string(u32 *p, const char *s);
-u32 * xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen);
u32 * xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen);
u32 * xdr_encode_netobj(u32 *p, const struct xdr_netobj *);
u32 * xdr_decode_netobj(u32 *p, struct xdr_netobj *);
@@ -135,11 +134,6 @@ xdr_adjust_iovec(struct kvec *iov, u32 *p)
}
/*
- * Maximum number of iov's we use.
- */
-#define MAX_IOVEC (12)
-
-/*
* XDR buffer helper functions
*/
extern void xdr_shift_buf(struct xdr_buf *, size_t);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 3b8b6e823c7..6ef99b14ff0 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -79,21 +79,19 @@ struct rpc_rqst {
void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
struct list_head rq_list;
+ __u32 * rq_buffer; /* XDR encode buffer */
+ size_t rq_bufsize;
+
struct xdr_buf rq_private_buf; /* The receive buffer
* used in the softirq.
*/
unsigned long rq_majortimeo; /* major timeout alarm */
unsigned long rq_timeout; /* Current timeout value */
unsigned int rq_retries; /* # of retries */
- /*
- * For authentication (e.g. auth_des)
- */
- u32 rq_creddata[2];
/*
* Partial send handling
*/
-
u32 rq_bytes_sent; /* Bytes we have sent */
unsigned long rq_xtime; /* when transmitted */
@@ -106,7 +104,10 @@ struct rpc_xprt_ops {
void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
int (*reserve_xprt)(struct rpc_task *task);
void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
void (*connect)(struct rpc_task *task);
+ void * (*buf_alloc)(struct rpc_task *task, size_t size);
+ void (*buf_free)(struct rpc_task *task);
int (*send_request)(struct rpc_task *task);
void (*set_retrans_timeout)(struct rpc_task *task);
void (*timer)(struct rpc_task *task);
@@ -253,6 +254,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to);
#define XPRT_LOCKED (0)
#define XPRT_CONNECTED (1)
#define XPRT_CONNECTING (2)
+#define XPRT_CLOSE_WAIT (3)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index a61c04f804b..5dc94e777fa 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -14,11 +14,7 @@
typedef struct pbe {
unsigned long address; /* address of the copy */
unsigned long orig_address; /* original address of page */
- swp_entry_t swap_address;
-
- struct pbe *next; /* also used as scratch space at
- * end of page (see link, diskpage)
- */
+ struct pbe *next;
} suspend_pagedir_t;
#define for_each_pbe(pbe, pblist) \
@@ -77,6 +73,6 @@ unsigned long get_safe_page(gfp_t gfp_mask);
* XXX: We try to keep some more pages free so that I/O operations succeed
* without paging. Might this be more?
*/
-#define PAGES_FOR_IO 512
+#define PAGES_FOR_IO 1024
#endif /* _LINUX_SWSUSP_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 508668f840b..389d1c382e2 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -172,10 +172,16 @@ extern void swap_setup(void);
/* linux/mm/vmscan.c */
extern int try_to_free_pages(struct zone **, gfp_t);
-extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
extern int shrink_all_memory(int);
extern int vm_swappiness;
+#ifdef CONFIG_MIGRATION
+extern int isolate_lru_page(struct page *p);
+extern int putback_lru_pages(struct list_head *l);
+extern int migrate_pages(struct list_head *l, struct list_head *t,
+ struct list_head *moved, struct list_head *failed);
+#endif
+
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
extern int shmem_unuse(swp_entry_t entry, struct page *page);
@@ -193,7 +199,7 @@ extern int rw_swap_page_sync(int, swp_entry_t, struct page *);
extern struct address_space swapper_space;
#define total_swapcache_pages swapper_space.nrpages
extern void show_swap_cache_info(void);
-extern int add_to_swap(struct page *);
+extern int add_to_swap(struct page *, gfp_t);
extern void __delete_from_swap_cache(struct page *);
extern void delete_from_swap_cache(struct page *);
extern int move_to_swap_cache(struct page *, swp_entry_t);
@@ -210,6 +216,7 @@ extern unsigned int nr_swapfiles;
extern struct swap_info_struct swap_info[];
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page_of_type(int type);
extern int swap_duplicate(swp_entry_t);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern void swap_free(swp_entry_t);
diff --git a/include/linux/synclink.h b/include/linux/synclink.h
index 763bd290f28..1b7cd8d1a71 100644
--- a/include/linux/synclink.h
+++ b/include/linux/synclink.h
@@ -1,7 +1,7 @@
/*
* SyncLink Multiprotocol Serial Adapter Driver
*
- * $Id: synclink.h,v 3.6 2002/02/20 21:58:20 paulkf Exp $
+ * $Id: synclink.h,v 3.10 2005/11/08 19:50:54 paulkf Exp $
*
* Copyright (C) 1998-2000 by Microgate Corporation
*
@@ -128,10 +128,14 @@
#define MGSL_BUS_TYPE_EISA 2
#define MGSL_BUS_TYPE_PCI 5
+#define MGSL_INTERFACE_MASK 0xf
#define MGSL_INTERFACE_DISABLE 0
#define MGSL_INTERFACE_RS232 1
#define MGSL_INTERFACE_V35 2
#define MGSL_INTERFACE_RS422 3
+#define MGSL_INTERFACE_RTS_EN 0x10
+#define MGSL_INTERFACE_LL 0x20
+#define MGSL_INTERFACE_RL 0x40
typedef struct _MGSL_PARAMS
{
@@ -163,6 +167,9 @@ typedef struct _MGSL_PARAMS
#define SYNCLINK_DEVICE_ID 0x0010
#define MGSCC_DEVICE_ID 0x0020
#define SYNCLINK_SCA_DEVICE_ID 0x0030
+#define SYNCLINK_GT_DEVICE_ID 0x0070
+#define SYNCLINK_GT4_DEVICE_ID 0x0080
+#define SYNCLINK_AC_DEVICE_ID 0x0090
#define MGSL_MAX_SERIAL_NUMBER 30
/*
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index c7007b1db91..3eed4734701 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -511,5 +511,12 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
asmlinkage long sys_ioprio_get(int which, int who);
asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
unsigned long maxnode);
+asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
+ const unsigned long __user *from, const unsigned long __user *to);
+
+asmlinkage long sys_spu_run(int fd, __u32 __user *unpc,
+ __u32 __user *ustatus);
+asmlinkage long sys_spu_create(const char __user *name,
+ unsigned int flags, mode_t mode);
#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index a9b80fc7f0f..7f472127b7b 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -180,6 +180,8 @@ enum
VM_VFS_CACHE_PRESSURE=26, /* dcache/icache reclaim pressure */
VM_LEGACY_VA_LAYOUT=27, /* legacy/compatibility virtual address space layout */
VM_SWAP_TOKEN_TIMEOUT=28, /* default time for token time out */
+ VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */
+ VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
};
diff --git a/include/linux/time.h b/include/linux/time.h
index 797ccd813bb..f2aca7ec632 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
#ifdef __KERNEL__
-#include <linux/seqlock.h>
+# include <linux/seqlock.h>
#endif
#ifndef _STRUCT_TIMESPEC
@@ -13,7 +13,7 @@ struct timespec {
time_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
-#endif /* _STRUCT_TIMESPEC */
+#endif
struct timeval {
time_t tv_sec; /* seconds */
@@ -27,93 +27,103 @@ struct timezone {
#ifdef __KERNEL__
-/* Parameters used to convert the timespec values */
-#define MSEC_PER_SEC (1000L)
-#define USEC_PER_SEC (1000000L)
-#define NSEC_PER_SEC (1000000000L)
-#define NSEC_PER_USEC (1000L)
+/* Parameters used to convert the timespec values: */
+#define MSEC_PER_SEC 1000L
+#define USEC_PER_SEC 1000000L
+#define NSEC_PER_SEC 1000000000L
+#define NSEC_PER_USEC 1000L
-static __inline__ int timespec_equal(struct timespec *a, struct timespec *b)
-{
+static __inline__ int timespec_equal(struct timespec *a, struct timespec *b)
+{
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
-}
+}
-/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
- * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
- * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
- *
- * [For the Julian calendar (which was used in Russia before 1917,
- * Britain & colonies before 1752, anywhere else before 1582,
- * and is still in use by some communities) leave out the
- * -year/100+year/400 terms, and add 10.]
- *
- * This algorithm was first published by Gauss (I think).
- *
- * WARNING: this function will overflow on 2106-02-07 06:28:16 on
- * machines were long is 32-bit! (However, as time_t is signed, we
- * will already get problems at other places on 2038-01-19 03:14:08)
+extern unsigned long mktime(const unsigned int year, const unsigned int mon,
+ const unsigned int day, const unsigned int hour,
+ const unsigned int min, const unsigned int sec);
+
+extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
+
+/*
+ * Returns true if the timespec is norm, false if denorm:
*/
-static inline unsigned long
-mktime (unsigned int year, unsigned int mon,
- unsigned int day, unsigned int hour,
- unsigned int min, unsigned int sec)
-{
- if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
- mon += 12; /* Puts Feb last since it has leap day */
- year -= 1;
- }
-
- return (((
- (unsigned long) (year/4 - year/100 + year/400 + 367*mon/12 + day) +
- year*365 - 719499
- )*24 + hour /* now have hours */
- )*60 + min /* now have minutes */
- )*60 + sec; /* finally seconds */
-}
+#define timespec_valid(ts) \
+ (((ts)->tv_sec >= 0) && (((unsigned) (ts)->tv_nsec) < NSEC_PER_SEC))
+
+/*
+ * 64-bit nanosec type. Large enough to span 292+ years in nanosecond
+ * resolution. Ought to be enough for a while.
+ */
+typedef s64 nsec_t;
extern struct timespec xtime;
extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock;
static inline unsigned long get_seconds(void)
-{
+{
return xtime.tv_sec;
}
struct timespec current_kernel_time(void);
-#define CURRENT_TIME (current_kernel_time())
-#define CURRENT_TIME_SEC ((struct timespec) { xtime.tv_sec, 0 })
+#define CURRENT_TIME (current_kernel_time())
+#define CURRENT_TIME_SEC ((struct timespec) { xtime.tv_sec, 0 })
extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday(struct timespec *tv);
extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
-extern void clock_was_set(void); // call when ever the clock is set
-extern int do_posix_clock_monotonic_gettime(struct timespec *tp);
-extern long do_utimes(char __user * filename, struct timeval * times);
+#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
+extern long do_utimes(char __user *filename, struct timeval *times);
struct itimerval;
-extern int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue);
+extern int do_setitimer(int which, struct itimerval *value,
+ struct itimerval *ovalue);
extern int do_getitimer(int which, struct itimerval *value);
-extern void getnstimeofday (struct timespec *tv);
-extern void getnstimestamp(struct timespec *ts);
+extern void getnstimeofday(struct timespec *tv);
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
-static inline void
-set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
+/**
+ * timespec_to_ns - Convert timespec to nanoseconds
+ * @ts: pointer to the timespec variable to be converted
+ *
+ * Returns the scalar nanosecond representation of the timespec
+ * parameter.
+ */
+static inline nsec_t timespec_to_ns(const struct timespec *ts)
{
- while (nsec >= NSEC_PER_SEC) {
- nsec -= NSEC_PER_SEC;
- ++sec;
- }
- while (nsec < 0) {
- nsec += NSEC_PER_SEC;
- --sec;
- }
- ts->tv_sec = sec;
- ts->tv_nsec = nsec;
+ return ((nsec_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
}
+/**
+ * timeval_to_ns - Convert timeval to nanoseconds
+ * @ts: pointer to the timeval variable to be converted
+ *
+ * Returns the scalar nanosecond representation of the timeval
+ * parameter.
+ */
+static inline nsec_t timeval_to_ns(const struct timeval *tv)
+{
+ return ((nsec_t) tv->tv_sec * NSEC_PER_SEC) +
+ tv->tv_usec * NSEC_PER_USEC;
+}
+
+/**
+ * ns_to_timespec - Convert nanoseconds to timespec
+ * @nsec: the nanoseconds value to be converted
+ *
+ * Returns the timespec representation of the nsec parameter.
+ */
+extern struct timespec ns_to_timespec(const nsec_t nsec);
+
+/**
+ * ns_to_timeval - Convert nanoseconds to timeval
+ * @nsec: the nanoseconds value to be converted
+ *
+ * Returns the timeval representation of the nsec parameter.
+ */
+extern struct timeval ns_to_timeval(const nsec_t nsec);
+
#endif /* __KERNEL__ */
#define NFDBITS __NFDBITS
@@ -126,49 +136,41 @@ set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
/*
* Names of the interval timers, and structure
- * defining a timer setting.
+ * defining a timer setting:
*/
-#define ITIMER_REAL 0
-#define ITIMER_VIRTUAL 1
-#define ITIMER_PROF 2
+#define ITIMER_REAL 0
+#define ITIMER_VIRTUAL 1
+#define ITIMER_PROF 2
-struct itimerspec {
- struct timespec it_interval; /* timer period */
- struct timespec it_value; /* timer expiration */
+struct itimerspec {
+ struct timespec it_interval; /* timer period */
+ struct timespec it_value; /* timer expiration */
};
-struct itimerval {
- struct timeval it_interval; /* timer interval */
- struct timeval it_value; /* current value */
+struct itimerval {
+ struct timeval it_interval; /* timer interval */
+ struct timeval it_value; /* current value */
};
-
/*
- * The IDs of the various system clocks (for POSIX.1b interval timers).
+ * The IDs of the various system clocks (for POSIX.1b interval timers):
*/
-#define CLOCK_REALTIME 0
-#define CLOCK_MONOTONIC 1
-#define CLOCK_PROCESS_CPUTIME_ID 2
-#define CLOCK_THREAD_CPUTIME_ID 3
-#define CLOCK_REALTIME_HR 4
-#define CLOCK_MONOTONIC_HR 5
+#define CLOCK_REALTIME 0
+#define CLOCK_MONOTONIC 1
+#define CLOCK_PROCESS_CPUTIME_ID 2
+#define CLOCK_THREAD_CPUTIME_ID 3
/*
- * The IDs of various hardware clocks
+ * The IDs of various hardware clocks:
*/
-
-
-#define CLOCK_SGI_CYCLE 10
-#define MAX_CLOCKS 16
-#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC | \
- CLOCK_REALTIME_HR | CLOCK_MONOTONIC_HR)
-#define CLOCKS_MONO (CLOCK_MONOTONIC & CLOCK_MONOTONIC_HR)
+#define CLOCK_SGI_CYCLE 10
+#define MAX_CLOCKS 16
+#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC)
+#define CLOCKS_MONO CLOCK_MONOTONIC
/*
- * The various flags for setting POSIX.1b interval timers.
+ * The various flags for setting POSIX.1b interval timers:
*/
-
-#define TIMER_ABSTIME 0x01
-
+#define TIMER_ABSTIME 0x01
#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 72f3a778110..9b9877fd250 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -96,6 +96,6 @@ static inline void add_timer(struct timer_list *timer)
extern void init_timers(void);
extern void run_local_timers(void);
-extern void it_real_fn(unsigned long);
+extern int it_real_fn(void *);
#endif
diff --git a/include/linux/tipc.h b/include/linux/tipc.h
new file mode 100644
index 00000000000..243a15f5400
--- /dev/null
+++ b/include/linux/tipc.h
@@ -0,0 +1,212 @@
+/*
+ * include/linux/tipc.h: Include file for TIPC socket interface
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_TIPC_H_
+#define _LINUX_TIPC_H_
+
+#include <linux/types.h>
+
+/*
+ * TIPC addressing primitives
+ */
+
+struct tipc_portid {
+ __u32 ref;
+ __u32 node;
+};
+
+struct tipc_name {
+ __u32 type;
+ __u32 instance;
+};
+
+struct tipc_name_seq {
+ __u32 type;
+ __u32 lower;
+ __u32 upper;
+};
+
+static inline __u32 tipc_addr(unsigned int zone,
+ unsigned int cluster,
+ unsigned int node)
+{
+ return (zone << 24) | (cluster << 12) | node;
+}
+
+static inline unsigned int tipc_zone(__u32 addr)
+{
+ return addr >> 24;
+}
+
+static inline unsigned int tipc_cluster(__u32 addr)
+{
+ return (addr >> 12) & 0xfff;
+}
+
+static inline unsigned int tipc_node(__u32 addr)
+{
+ return addr & 0xfff;
+}
+
+/*
+ * Application-accessible port name types
+ */
+
+#define TIPC_CFG_SRV 0 /* configuration service name type */
+#define TIPC_TOP_SRV 1 /* topology service name type */
+#define TIPC_RESERVED_TYPES 64 /* lowest user-publishable name type */
+
+/*
+ * Publication scopes when binding port names and port name sequences
+ */
+
+#define TIPC_ZONE_SCOPE 1
+#define TIPC_CLUSTER_SCOPE 2
+#define TIPC_NODE_SCOPE 3
+
+/*
+ * Limiting values for messages
+ */
+
+#define TIPC_MAX_USER_MSG_SIZE 66000
+
+/*
+ * Message importance levels
+ */
+
+#define TIPC_LOW_IMPORTANCE 0 /* default */
+#define TIPC_MEDIUM_IMPORTANCE 1
+#define TIPC_HIGH_IMPORTANCE 2
+#define TIPC_CRITICAL_IMPORTANCE 3
+
+/*
+ * Msg rejection/connection shutdown reasons
+ */
+
+#define TIPC_OK 0
+#define TIPC_ERR_NO_NAME 1
+#define TIPC_ERR_NO_PORT 2
+#define TIPC_ERR_NO_NODE 3
+#define TIPC_ERR_OVERLOAD 4
+#define TIPC_CONN_SHUTDOWN 5
+
+/*
+ * TIPC topology subscription service definitions
+ */
+
+#define TIPC_SUB_PORTS 0x01 /* filter for port availability */
+#define TIPC_SUB_SERVICE 0x02 /* filter for service availability */
+#if 0
+/* The following filter options are not currently implemented */
+#define TIPC_SUB_NO_BIND_EVTS 0x04 /* filter out "publish" events */
+#define TIPC_SUB_NO_UNBIND_EVTS 0x08 /* filter out "withdraw" events */
+#define TIPC_SUB_SINGLE_EVT 0x10 /* expire after first event */
+#endif
+
+#define TIPC_WAIT_FOREVER ~0 /* timeout for permanent subscription */
+
+struct tipc_subscr {
+ struct tipc_name_seq seq; /* name sequence of interest */
+ __u32 timeout; /* subscription duration (in ms) */
+ __u32 filter; /* bitmask of filter options */
+ char usr_handle[8]; /* available for subscriber use */
+};
+
+#define TIPC_PUBLISHED 1 /* publication event */
+#define TIPC_WITHDRAWN 2 /* withdraw event */
+#define TIPC_SUBSCR_TIMEOUT 3 /* subscription timeout event */
+
+struct tipc_event {
+ __u32 event; /* event type */
+ __u32 found_lower; /* matching name seq instances */
+ __u32 found_upper; /* " " " " */
+ struct tipc_portid port; /* associated port */
+ struct tipc_subscr s; /* associated subscription */
+};
+
+/*
+ * Socket API
+ */
+
+#ifndef AF_TIPC
+#define AF_TIPC 30
+#endif
+
+#ifndef PF_TIPC
+#define PF_TIPC AF_TIPC
+#endif
+
+#ifndef SOL_TIPC
+#define SOL_TIPC 271
+#endif
+
+#define TIPC_ADDR_NAMESEQ 1
+#define TIPC_ADDR_MCAST 1
+#define TIPC_ADDR_NAME 2
+#define TIPC_ADDR_ID 3
+
+struct sockaddr_tipc {
+ unsigned short family;
+ unsigned char addrtype;
+ signed char scope;
+ union {
+ struct tipc_portid id;
+ struct tipc_name_seq nameseq;
+ struct {
+ struct tipc_name name;
+ __u32 domain; /* 0: own zone */
+ } name;
+ } addr;
+};
+
+/*
+ * Ancillary data objects supported by recvmsg()
+ */
+
+#define TIPC_ERRINFO 1 /* error info */
+#define TIPC_RETDATA 2 /* returned data */
+#define TIPC_DESTNAME 3 /* destination name */
+
+/*
+ * TIPC-specific socket option values
+ */
+
+#define TIPC_IMPORTANCE 127 /* Default: TIPC_LOW_IMPORTANCE */
+#define TIPC_SRC_DROPPABLE 128 /* Default: 0 (resend congested msg) */
+#define TIPC_DEST_DROPPABLE 129 /* Default: based on socket type */
+#define TIPC_CONN_TIMEOUT 130 /* Default: 8000 (ms) */
+
+#endif
diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h
new file mode 100644
index 00000000000..a52c8c64a5a
--- /dev/null
+++ b/include/linux/tipc_config.h
@@ -0,0 +1,407 @@
+/*
+ * include/linux/tipc_config.h: Include file for TIPC configuration interface
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_TIPC_CONFIG_H_
+#define _LINUX_TIPC_CONFIG_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
+/*
+ * Configuration
+ *
+ * All configuration management messaging involves sending a request message
+ * to the TIPC configuration service on a node, which sends a reply message
+ * back. (In the future multi-message replies may be supported.)
+ *
+ * Both request and reply messages consist of a transport header and payload.
+ * The transport header contains info about the desired operation;
+ * the payload consists of zero or more type/length/value (TLV) items
+ * which specify parameters or results for the operation.
+ *
+ * For many operations, the request and reply messages have a fixed number
+ * of TLVs (usually zero or one); however, some reply messages may return
+ * a variable number of TLVs. A failed request is denoted by the presence
+ * of an "error string" TLV in the reply message instead of the TLV(s) the
+ * reply should contain if the request succeeds.
+ */
+
+/*
+ * Public commands:
+ * May be issued by any process.
+ * Accepted by own node, or by remote node only if remote management enabled.
+ */
+
+#define TIPC_CMD_NOOP 0x0000 /* tx none, rx none */
+#define TIPC_CMD_GET_NODES 0x0001 /* tx net_addr, rx node_info(s) */
+#define TIPC_CMD_GET_MEDIA_NAMES 0x0002 /* tx none, rx media_name(s) */
+#define TIPC_CMD_GET_BEARER_NAMES 0x0003 /* tx none, rx bearer_name(s) */
+#define TIPC_CMD_GET_LINKS 0x0004 /* tx net_addr, rx link_info(s) */
+#define TIPC_CMD_SHOW_NAME_TABLE 0x0005 /* tx name_tbl_query, rx ultra_string */
+#define TIPC_CMD_SHOW_PORTS 0x0006 /* tx none, rx ultra_string */
+#define TIPC_CMD_SHOW_LINK_STATS 0x000B /* tx link_name, rx ultra_string */
+
+#if 0
+#define TIPC_CMD_SHOW_PORT_STATS 0x0008 /* tx port_ref, rx ultra_string */
+#define TIPC_CMD_RESET_PORT_STATS 0x0009 /* tx port_ref, rx none */
+#define TIPC_CMD_GET_ROUTES 0x000A /* tx ?, rx ? */
+#define TIPC_CMD_GET_LINK_PEER 0x000D /* tx link_name, rx ? */
+#endif
+
+/*
+ * Protected commands:
+ * May only be issued by "network administration capable" process.
+ * Accepted by own node, or by remote node only if remote management enabled
+ * and this node is zone manager.
+ */
+
+#define TIPC_CMD_GET_REMOTE_MNG 0x4003 /* tx none, rx unsigned */
+#define TIPC_CMD_GET_MAX_PORTS 0x4004 /* tx none, rx unsigned */
+#define TIPC_CMD_GET_MAX_PUBL 0x4005 /* tx none, rx unsigned */
+#define TIPC_CMD_GET_MAX_SUBSCR 0x4006 /* tx none, rx unsigned */
+#define TIPC_CMD_GET_MAX_ZONES 0x4007 /* tx none, rx unsigned */
+#define TIPC_CMD_GET_MAX_CLUSTERS 0x4008 /* tx none, rx unsigned */
+#define TIPC_CMD_GET_MAX_NODES 0x4009 /* tx none, rx unsigned */
+#define TIPC_CMD_GET_MAX_SLAVES 0x400A /* tx none, rx unsigned */
+#define TIPC_CMD_GET_NETID 0x400B /* tx none, rx unsigned */
+
+#define TIPC_CMD_ENABLE_BEARER 0x4101 /* tx bearer_config, rx none */
+#define TIPC_CMD_DISABLE_BEARER 0x4102 /* tx bearer_name, rx none */
+#define TIPC_CMD_SET_LINK_TOL 0x4107 /* tx link_config, rx none */
+#define TIPC_CMD_SET_LINK_PRI 0x4108 /* tx link_config, rx none */
+#define TIPC_CMD_SET_LINK_WINDOW 0x4109 /* tx link_config, rx none */
+#define TIPC_CMD_SET_LOG_SIZE 0x410A /* tx unsigned, rx none */
+#define TIPC_CMD_DUMP_LOG 0x410B /* tx none, rx ultra_string */
+#define TIPC_CMD_RESET_LINK_STATS 0x410C /* tx link_name, rx none */
+
+#if 0
+#define TIPC_CMD_CREATE_LINK 0x4103 /* tx link_create, rx none */
+#define TIPC_CMD_REMOVE_LINK 0x4104 /* tx link_name, rx none */
+#define TIPC_CMD_BLOCK_LINK 0x4105 /* tx link_name, rx none */
+#define TIPC_CMD_UNBLOCK_LINK 0x4106 /* tx link_name, rx none */
+#endif
+
+/*
+ * Private commands:
+ * May only be issued by "network administration capable" process.
+ * Accepted by own node only; cannot be used on a remote node.
+ */
+
+#define TIPC_CMD_SET_NODE_ADDR 0x8001 /* tx net_addr, rx none */
+#if 0
+#define TIPC_CMD_SET_ZONE_MASTER 0x8002 /* tx none, rx none */
+#endif
+#define TIPC_CMD_SET_REMOTE_MNG 0x8003 /* tx unsigned, rx none */
+#define TIPC_CMD_SET_MAX_PORTS 0x8004 /* tx unsigned, rx none */
+#define TIPC_CMD_SET_MAX_PUBL 0x8005 /* tx unsigned, rx none */
+#define TIPC_CMD_SET_MAX_SUBSCR 0x8006 /* tx unsigned, rx none */
+#define TIPC_CMD_SET_MAX_ZONES 0x8007 /* tx unsigned, rx none */
+#define TIPC_CMD_SET_MAX_CLUSTERS 0x8008 /* tx unsigned, rx none */
+#define TIPC_CMD_SET_MAX_NODES 0x8009 /* tx unsigned, rx none */
+#define TIPC_CMD_SET_MAX_SLAVES 0x800A /* tx unsigned, rx none */
+#define TIPC_CMD_SET_NETID 0x800B /* tx unsigned, rx none */
+
+/*
+ * TLV types defined for TIPC
+ */
+
+#define TIPC_TLV_NONE 0 /* no TLV present */
+#define TIPC_TLV_VOID 1 /* empty TLV (0 data bytes)*/
+#define TIPC_TLV_UNSIGNED 2 /* 32-bit integer */
+#define TIPC_TLV_STRING 3 /* char[128] (max) */
+#define TIPC_TLV_LARGE_STRING 4 /* char[2048] (max) */
+#define TIPC_TLV_ULTRA_STRING 5 /* char[32768] (max) */
+
+#define TIPC_TLV_ERROR_STRING 16 /* char[128] containing "error code" */
+#define TIPC_TLV_NET_ADDR 17 /* 32-bit integer denoting <Z.C.N> */
+#define TIPC_TLV_MEDIA_NAME 18 /* char[TIPC_MAX_MEDIA_NAME] */
+#define TIPC_TLV_BEARER_NAME 19 /* char[TIPC_MAX_BEARER_NAME] */
+#define TIPC_TLV_LINK_NAME 20 /* char[TIPC_MAX_LINK_NAME] */
+#define TIPC_TLV_NODE_INFO 21 /* struct tipc_node_info */
+#define TIPC_TLV_LINK_INFO 22 /* struct tipc_link_info */
+#define TIPC_TLV_BEARER_CONFIG 23 /* struct tipc_bearer_config */
+#define TIPC_TLV_LINK_CONFIG 24 /* struct tipc_link_config */
+#define TIPC_TLV_NAME_TBL_QUERY 25 /* struct tipc_name_table_query */
+#define TIPC_TLV_PORT_REF 26 /* 32-bit port reference */
+
+/*
+ * Maximum sizes of TIPC bearer-related names (including terminating NUL)
+ */
+
+#define TIPC_MAX_MEDIA_NAME 16 /* format = media */
+#define TIPC_MAX_IF_NAME 16 /* format = interface */
+#define TIPC_MAX_BEARER_NAME 32 /* format = media:interface */
+#define TIPC_MAX_LINK_NAME 60 /* format = Z.C.N:interface-Z.C.N:interface */
+
+/*
+ * Link priority limits (range from 0 to # priorities - 1)
+ */
+
+#define TIPC_NUM_LINK_PRI 32
+
+/*
+ * Link tolerance limits (min, default, max), in ms
+ */
+
+#define TIPC_MIN_LINK_TOL 50
+#define TIPC_DEF_LINK_TOL 1500
+#define TIPC_MAX_LINK_TOL 30000
+
+/*
+ * Link window limits (min, default, max), in packets
+ */
+
+#define TIPC_MIN_LINK_WIN 16
+#define TIPC_DEF_LINK_WIN 50
+#define TIPC_MAX_LINK_WIN 150
+
+
+struct tipc_node_info {
+ __u32 addr; /* network address of node */
+ __u32 up; /* 0=down, 1= up */
+};
+
+struct tipc_link_info {
+ __u32 dest; /* network address of peer node */
+ __u32 up; /* 0=down, 1=up */
+ char str[TIPC_MAX_LINK_NAME]; /* link name */
+};
+
+struct tipc_bearer_config {
+ __u32 priority; /* Range [1,31]. Override per link */
+ __u32 detect_scope;
+ char name[TIPC_MAX_BEARER_NAME];
+};
+
+struct tipc_link_config {
+ __u32 value;
+ char name[TIPC_MAX_LINK_NAME];
+};
+
+#define TIPC_NTQ_ALLTYPES 0x80000000
+
+struct tipc_name_table_query {
+ __u32 depth; /* 1:type, 2:+name info, 3:+port info, 4+:+debug info */
+ __u32 type; /* {t,l,u} info ignored if high bit of "depth" is set */
+ __u32 lowbound; /* (i.e. displays all entries of name table) */
+ __u32 upbound;
+};
+
+/*
+ * The error string TLV is a null-terminated string describing the cause
+ * of the request failure. To simplify error processing (and to save space)
+ * the first character of the string can be a special error code character
+ * (lying by the range 0x80 to 0xFF) which represents a pre-defined reason.
+ */
+
+#define TIPC_CFG_TLV_ERROR "\x80" /* request contains incorrect TLV(s) */
+#define TIPC_CFG_NOT_NET_ADMIN "\x81" /* must be network administrator */
+#define TIPC_CFG_NOT_ZONE_MSTR "\x82" /* must be zone master */
+#define TIPC_CFG_NO_REMOTE "\x83" /* remote management not enabled */
+#define TIPC_CFG_NOT_SUPPORTED "\x84" /* request is not supported by TIPC */
+#define TIPC_CFG_INVALID_VALUE "\x85" /* request has invalid argument value */
+
+#if 0
+/* prototypes TLV structures for proposed commands */
+struct tipc_link_create {
+ __u32 domain;
+ struct tipc_media_addr peer_addr;
+ char bearer_name[TIPC_MAX_BEARER_NAME];
+};
+
+struct tipc_route_info {
+ __u32 dest;
+ __u32 router;
+};
+#endif
+
+/*
+ * A TLV consists of a descriptor, followed by the TLV value.
+ * TLV descriptor fields are stored in network byte order;
+ * TLV values must also be stored in network byte order (where applicable).
+ * TLV descriptors must be aligned to addresses which are multiple of 4,
+ * so up to 3 bytes of padding may exist at the end of the TLV value area.
+ * There must not be any padding between the TLV descriptor and its value.
+ */
+
+struct tlv_desc {
+ __u16 tlv_len; /* TLV length (descriptor + value) */
+ __u16 tlv_type; /* TLV identifier */
+};
+
+#define TLV_ALIGNTO 4
+
+#define TLV_ALIGN(datalen) (((datalen)+(TLV_ALIGNTO-1)) & ~(TLV_ALIGNTO-1))
+#define TLV_LENGTH(datalen) (sizeof(struct tlv_desc) + (datalen))
+#define TLV_SPACE(datalen) (TLV_ALIGN(TLV_LENGTH(datalen)))
+#define TLV_DATA(tlv) ((void *)((char *)(tlv) + TLV_LENGTH(0)))
+
+static inline int TLV_OK(const void *tlv, __u16 space)
+{
+ /*
+ * Would also like to check that "tlv" is a multiple of 4,
+ * but don't know how to do this in a portable way.
+ * - Tried doing (!(tlv & (TLV_ALIGNTO-1))), but GCC compiler
+ * won't allow binary "&" with a pointer.
+ * - Tried casting "tlv" to integer type, but causes warning about size
+ * mismatch when pointer is bigger than chosen type (int, long, ...).
+ */
+
+ return (space >= TLV_SPACE(0)) &&
+ (ntohs(((struct tlv_desc *)tlv)->tlv_len) <= space);
+}
+
+static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type)
+{
+ return TLV_OK(tlv, space) &&
+ (ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
+}
+
+static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
+{
+ struct tlv_desc *tlv_ptr;
+ int tlv_len;
+
+ tlv_len = TLV_LENGTH(len);
+ tlv_ptr = (struct tlv_desc *)tlv;
+ tlv_ptr->tlv_type = htons(type);
+ tlv_ptr->tlv_len = htons(tlv_len);
+ if (len && data)
+ memcpy(TLV_DATA(tlv_ptr), data, tlv_len);
+ return TLV_SPACE(len);
+}
+
+/*
+ * A TLV list descriptor simplifies processing of messages
+ * containing multiple TLVs.
+ */
+
+struct tlv_list_desc {
+ struct tlv_desc *tlv_ptr; /* ptr to current TLV */
+ __u32 tlv_space; /* # bytes from curr TLV to list end */
+};
+
+static inline void TLV_LIST_INIT(struct tlv_list_desc *list,
+ void *data, __u32 space)
+{
+ list->tlv_ptr = (struct tlv_desc *)data;
+ list->tlv_space = space;
+}
+
+static inline int TLV_LIST_EMPTY(struct tlv_list_desc *list)
+{
+ return (list->tlv_space == 0);
+}
+
+static inline int TLV_LIST_CHECK(struct tlv_list_desc *list, __u16 exp_type)
+{
+ return TLV_CHECK(list->tlv_ptr, list->tlv_space, exp_type);
+}
+
+static inline void *TLV_LIST_DATA(struct tlv_list_desc *list)
+{
+ return TLV_DATA(list->tlv_ptr);
+}
+
+static inline void TLV_LIST_STEP(struct tlv_list_desc *list)
+{
+ __u16 tlv_space = TLV_ALIGN(ntohs(list->tlv_ptr->tlv_len));
+
+ list->tlv_ptr = (struct tlv_desc *)((char *)list->tlv_ptr + tlv_space);
+ list->tlv_space -= tlv_space;
+}
+
+/*
+ * Configuration messages exchanged via NETLINK_GENERIC use the following
+ * family id, name, version and command.
+ */
+#define TIPC_GENL_NAME "TIPC"
+#define TIPC_GENL_VERSION 0x1
+#define TIPC_GENL_CMD 0x1
+
+/*
+ * TIPC specific header used in NETLINK_GENERIC requests.
+ */
+struct tipc_genlmsghdr {
+ __u32 dest; /* Destination address */
+ __u16 cmd; /* Command */
+ __u16 reserved; /* Unused */
+};
+
+#define TIPC_GENL_HDRLEN NLMSG_ALIGN(sizeof(struct tipc_genlmsghdr))
+
+/*
+ * Configuration messages exchanged via TIPC sockets use the TIPC configuration
+ * message header, which is defined below. This structure is analogous
+ * to the Netlink message header, but fields are stored in network byte order
+ * and no padding is permitted between the header and the message data
+ * that follows.
+ */
+
+struct tipc_cfg_msg_hdr
+{
+ __u32 tcm_len; /* Message length (including header) */
+ __u16 tcm_type; /* Command type */
+ __u16 tcm_flags; /* Additional flags */
+ char tcm_reserved[8]; /* Unused */
+};
+
+#define TCM_F_REQUEST 0x1 /* Flag: Request message */
+#define TCM_F_MORE 0x2 /* Flag: Message to be continued */
+
+#define TCM_ALIGN(datalen) (((datalen)+3) & ~3)
+#define TCM_LENGTH(datalen) (sizeof(struct tipc_cfg_msg_hdr) + datalen)
+#define TCM_SPACE(datalen) (TCM_ALIGN(TCM_LENGTH(datalen)))
+#define TCM_DATA(tcm_hdr) ((void *)((char *)(tcm_hdr) + TCM_LENGTH(0)))
+
+static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
+ void *data, __u16 data_len)
+{
+ struct tipc_cfg_msg_hdr *tcm_hdr;
+ int msg_len;
+
+ msg_len = TCM_LENGTH(data_len);
+ tcm_hdr = (struct tipc_cfg_msg_hdr *)msg;
+ tcm_hdr->tcm_len = htonl(msg_len);
+ tcm_hdr->tcm_type = htons(cmd);
+ tcm_hdr->tcm_flags = htons(flags);
+ if (data_len && data)
+ memcpy(TCM_DATA(msg), data, data_len);
+ return TCM_SPACE(data_len);
+}
+
+#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 3df1d474e5c..315a5163d6a 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -86,7 +86,6 @@
.max_interval = 2, \
.busy_factor = 8, \
.imbalance_pct = 110, \
- .cache_hot_time = 0, \
.cache_nice_tries = 0, \
.per_cpu_gain = 25, \
.busy_idx = 0, \
@@ -117,7 +116,6 @@
.max_interval = 4, \
.busy_factor = 64, \
.imbalance_pct = 125, \
- .cache_hot_time = (5*1000000/2), \
.cache_nice_tries = 1, \
.per_cpu_gain = 100, \
.busy_idx = 2, \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1267f88ece6..3787102e4b1 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -23,6 +23,7 @@
#include <linux/workqueue.h>
#include <linux/tty_driver.h>
#include <linux/tty_ldisc.h>
+#include <linux/screen_info.h>
#include <asm/system.h>
@@ -37,77 +38,6 @@
#define NR_LDISCS 16
/*
- * These are set up by the setup-routine at boot-time:
- */
-
-struct screen_info {
- u8 orig_x; /* 0x00 */
- u8 orig_y; /* 0x01 */
- u16 dontuse1; /* 0x02 -- EXT_MEM_K sits here */
- u16 orig_video_page; /* 0x04 */
- u8 orig_video_mode; /* 0x06 */
- u8 orig_video_cols; /* 0x07 */
- u16 unused2; /* 0x08 */
- u16 orig_video_ega_bx; /* 0x0a */
- u16 unused3; /* 0x0c */
- u8 orig_video_lines; /* 0x0e */
- u8 orig_video_isVGA; /* 0x0f */
- u16 orig_video_points; /* 0x10 */
-
- /* VESA graphic mode -- linear frame buffer */
- u16 lfb_width; /* 0x12 */
- u16 lfb_height; /* 0x14 */
- u16 lfb_depth; /* 0x16 */
- u32 lfb_base; /* 0x18 */
- u32 lfb_size; /* 0x1c */
- u16 dontuse2, dontuse3; /* 0x20 -- CL_MAGIC and CL_OFFSET here */
- u16 lfb_linelength; /* 0x24 */
- u8 red_size; /* 0x26 */
- u8 red_pos; /* 0x27 */
- u8 green_size; /* 0x28 */
- u8 green_pos; /* 0x29 */
- u8 blue_size; /* 0x2a */
- u8 blue_pos; /* 0x2b */
- u8 rsvd_size; /* 0x2c */
- u8 rsvd_pos; /* 0x2d */
- u16 vesapm_seg; /* 0x2e */
- u16 vesapm_off; /* 0x30 */
- u16 pages; /* 0x32 */
- u16 vesa_attributes; /* 0x34 */
- u32 capabilities; /* 0x36 */
- /* 0x3a -- 0x3f reserved for future expansion */
-};
-
-extern struct screen_info screen_info;
-
-#define ORIG_X (screen_info.orig_x)
-#define ORIG_Y (screen_info.orig_y)
-#define ORIG_VIDEO_MODE (screen_info.orig_video_mode)
-#define ORIG_VIDEO_COLS (screen_info.orig_video_cols)
-#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx)
-#define ORIG_VIDEO_LINES (screen_info.orig_video_lines)
-#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA)
-#define ORIG_VIDEO_POINTS (screen_info.orig_video_points)
-
-#define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
-#define VIDEO_TYPE_CGA 0x11 /* CGA Display */
-#define VIDEO_TYPE_EGAM 0x20 /* EGA/VGA in Monochrome Mode */
-#define VIDEO_TYPE_EGAC 0x21 /* EGA in Color Mode */
-#define VIDEO_TYPE_VGAC 0x22 /* VGA+ in Color Mode */
-#define VIDEO_TYPE_VLFB 0x23 /* VESA VGA in graphic mode */
-
-#define VIDEO_TYPE_PICA_S3 0x30 /* ACER PICA-61 local S3 video */
-#define VIDEO_TYPE_MIPS_G364 0x31 /* MIPS Magnum 4000 G364 video */
-#define VIDEO_TYPE_SGI 0x33 /* Various SGI graphics hardware */
-
-#define VIDEO_TYPE_TGAC 0x40 /* DEC TGA */
-
-#define VIDEO_TYPE_SUN 0x50 /* Sun frame buffer. */
-#define VIDEO_TYPE_SUNPCI 0x51 /* Sun PCI based frame buffer. */
-
-#define VIDEO_TYPE_PMAC 0x60 /* PowerMacintosh frame buffer. */
-
-/*
* This character is the same as _POSIX_VDISABLE: it cannot be used as
* a c_cc[] character, but indicates that a particular special character
* isn't in use (eg VINTR has no character etc)
@@ -121,16 +51,22 @@ extern struct screen_info screen_info;
*/
#define TTY_FLIPBUF_SIZE 512
-struct tty_flip_buffer {
+struct tty_buffer {
+ struct tty_buffer *next;
+ char *char_buf_ptr;
+ unsigned char *flag_buf_ptr;
+ int used;
+ int size;
+ /* Data points here */
+ unsigned long data[0];
+};
+
+struct tty_bufhead {
struct work_struct work;
struct semaphore pty_sem;
- char *char_buf_ptr;
- unsigned char *flag_buf_ptr;
- int count;
- int buf_num;
- unsigned char char_buf[2*TTY_FLIPBUF_SIZE];
- char flag_buf[2*TTY_FLIPBUF_SIZE];
- unsigned char slop[4]; /* N.B. bug overwrites buffer by 1 */
+ struct tty_buffer *head; /* Queue head */
+ struct tty_buffer *tail; /* Active buffer */
+ struct tty_buffer *free; /* Free queue head */
};
/*
* The pty uses char_buf and flag_buf as a contiguous buffer
@@ -256,10 +192,11 @@ struct tty_struct {
unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1;
unsigned char low_latency:1, warned:1;
unsigned char ctrl_status;
+ unsigned int receive_room; /* Bytes free for queue */
struct tty_struct *link;
struct fasync_struct *fasync;
- struct tty_flip_buffer flip;
+ struct tty_bufhead buf;
int max_flip_cnt;
int alt_speed; /* For magic substitution of 38400 bps */
wait_queue_head_t write_wait;
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index abe9bfcf226..be1400e8248 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -1,25 +1,33 @@
#ifndef _LINUX_TTY_FLIP_H
#define _LINUX_TTY_FLIP_H
+extern int tty_buffer_request_room(struct tty_struct *tty, size_t size);
+extern int tty_insert_flip_string(struct tty_struct *tty, unsigned char *chars, size_t size);
+extern int tty_insert_flip_string_flags(struct tty_struct *tty, unsigned char *chars, char *flags, size_t size);
+extern int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size);
+extern int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size);
+
#ifdef INCLUDE_INLINE_FUNCS
#define _INLINE_ extern
#else
#define _INLINE_ static __inline__
#endif
-_INLINE_ void tty_insert_flip_char(struct tty_struct *tty,
+_INLINE_ int tty_insert_flip_char(struct tty_struct *tty,
unsigned char ch, char flag)
{
- if (tty->flip.count < TTY_FLIPBUF_SIZE) {
- tty->flip.count++;
- *tty->flip.flag_buf_ptr++ = flag;
- *tty->flip.char_buf_ptr++ = ch;
+ struct tty_buffer *tb = tty->buf.tail;
+ if (tb && tb->used < tb->size) {
+ tb->flag_buf_ptr[tb->used] = flag;
+ tb->char_buf_ptr[tb->used++] = ch;
+ return 1;
}
+ return tty_insert_flip_string_flags(tty, &ch, &flag, 1);
}
_INLINE_ void tty_schedule_flip(struct tty_struct *tty)
{
- schedule_delayed_work(&tty->flip.work, 1);
+ schedule_delayed_work(&tty->buf.work, 1);
}
#undef _INLINE_
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 6066afde5ce..83c6e6c10eb 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -81,14 +81,6 @@
* pointer of flag bytes which indicate whether a character was
* received with a parity error, etc.
*
- * int (*receive_room)(struct tty_struct *);
- *
- * This function is called by the low-level tty driver to
- * determine how many characters the line discpline can accept.
- * The low-level driver must not send more characters than was
- * indicated by receive_room, or the line discpline may drop
- * those characters.
- *
* void (*write_wakeup)(struct tty_struct *);
*
* This function is called by the low-level tty driver to signal
@@ -136,7 +128,6 @@ struct tty_ldisc {
*/
void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
char *fp, int count);
- int (*receive_room)(struct tty_struct *);
void (*write_wakeup)(struct tty_struct *);
struct module *owner;
diff --git a/include/linux/video_decoder.h b/include/linux/video_decoder.h
index 0e9e48b83e3..121e26da2c1 100644
--- a/include/linux/video_decoder.h
+++ b/include/linux/video_decoder.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_VIDEO_DECODER_H
#define _LINUX_VIDEO_DECODER_H
+#define HAVE_VIDEO_DECODER 1
+
struct video_decoder_capability { /* this name is too long */
__u32 flags;
#define VIDEO_DECODER_PAL 1 /* can decode PAL signal */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 1cded681eb6..ce40675324b 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -642,6 +642,12 @@ typedef __u64 v4l2_std_id;
#define V4L2_STD_ATSC_8_VSB ((v4l2_std_id)0x01000000)
#define V4L2_STD_ATSC_16_VSB ((v4l2_std_id)0x02000000)
+/* some merged standards */
+#define V4L2_STD_MN (V4L2_STD_PAL_M|V4L2_STD_PAL_N|V4L2_STD_PAL_Nc|V4L2_STD_NTSC)
+#define V4L2_STD_B (V4L2_STD_PAL_B|V4L2_STD_PAL_B1|V4L2_STD_SECAM_B)
+#define V4L2_STD_GH (V4L2_STD_PAL_G|V4L2_STD_PAL_H|V4L2_STD_SECAM_G|V4L2_STD_SECAM_H)
+#define V4L2_STD_DK (V4L2_STD_PAL_DK|V4L2_STD_SECAM_DK)
+
/* some common needed stuff */
#define V4L2_STD_PAL_BG (V4L2_STD_PAL_B |\
V4L2_STD_PAL_B1 |\
@@ -662,7 +668,8 @@ typedef __u64 v4l2_std_id;
V4L2_STD_SECAM_G |\
V4L2_STD_SECAM_H |\
V4L2_STD_SECAM_DK |\
- V4L2_STD_SECAM_L)
+ V4L2_STD_SECAM_L |\
+ V4L2_STD_SECAM_LC)
#define V4L2_STD_525_60 (V4L2_STD_PAL_M |\
V4L2_STD_PAL_60 |\
@@ -888,7 +895,6 @@ struct v4l2_audio
/* Flags for the 'mode' field */
#define V4L2_AUDMODE_AVL 0x00001
-#define V4L2_AUDMODE_32BITS 0x00002
struct v4l2_audioout
{
@@ -1110,7 +1116,6 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority *local);
/* names for fancy debug output */
extern char *v4l2_field_names[];
extern char *v4l2_type_names[];
-extern char *v4l2_ioctl_names[];
/* Compatibility layer interface -- v4l1-compat module */
typedef int (*v4l2_kioctl)(struct inode *inode, struct file *file,
@@ -1118,6 +1123,11 @@ typedef int (*v4l2_kioctl)(struct inode *inode, struct file *file,
int v4l_compat_translate_ioctl(struct inode *inode, struct file *file,
int cmd, void *arg, v4l2_kioctl driver_ioctl);
+/* 32 Bits compatibility layer for 64 bits processors */
+extern long v4l_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg);
+
+
#endif /* __KERNEL__ */
#endif /* __LINUX_VIDEODEV2_H */
diff --git a/include/linux/wavefront.h b/include/linux/wavefront.h
index 61bd0fd3524..51ab3c933ac 100644
--- a/include/linux/wavefront.h
+++ b/include/linux/wavefront.h
@@ -434,22 +434,22 @@ typedef struct wf_multisample {
} wavefront_multisample;
typedef struct wf_alias {
- INT16 OriginalSample __attribute__ ((packed));
-
- struct wf_sample_offset sampleStartOffset __attribute__ ((packed));
- struct wf_sample_offset loopStartOffset __attribute__ ((packed));
- struct wf_sample_offset sampleEndOffset __attribute__ ((packed));
- struct wf_sample_offset loopEndOffset __attribute__ ((packed));
-
- INT16 FrequencyBias __attribute__ ((packed));
-
- UCHAR8 SampleResolution:2 __attribute__ ((packed));
- UCHAR8 Unused1:1 __attribute__ ((packed));
- UCHAR8 Loop:1 __attribute__ ((packed));
- UCHAR8 Bidirectional:1 __attribute__ ((packed));
- UCHAR8 Unused2:1 __attribute__ ((packed));
- UCHAR8 Reverse:1 __attribute__ ((packed));
- UCHAR8 Unused3:1 __attribute__ ((packed));
+ INT16 OriginalSample;
+
+ struct wf_sample_offset sampleStartOffset;
+ struct wf_sample_offset loopStartOffset;
+ struct wf_sample_offset sampleEndOffset;
+ struct wf_sample_offset loopEndOffset;
+
+ INT16 FrequencyBias;
+
+ UCHAR8 SampleResolution:2;
+ UCHAR8 Unused1:1;
+ UCHAR8 Loop:1;
+ UCHAR8 Bidirectional:1;
+ UCHAR8 Unused2:1;
+ UCHAR8 Reverse:1;
+ UCHAR8 Unused3:1;
/* This structure is meant to be padded only to 16 bits on their
original. Of course, whoever wrote their documentation didn't
@@ -460,8 +460,8 @@ typedef struct wf_alias {
standard 16->32 bit issues.
*/
- UCHAR8 sixteen_bit_padding __attribute__ ((packed));
-} wavefront_alias;
+ UCHAR8 sixteen_bit_padding;
+} __attribute__((packed)) wavefront_alias;
typedef struct wf_drum {
UCHAR8 PatchNumber;
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index ac39d04d027..86b11130023 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -65,6 +65,7 @@ extern int FASTCALL(schedule_work(struct work_struct *work));
extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
+extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
extern void flush_scheduled_work(void);
extern int current_is_keventd(void);
extern int keventd_up(void);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 343d883d69c..beaef5c7a0e 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -53,19 +53,14 @@ struct writeback_control {
loff_t start;
loff_t end;
- unsigned nonblocking:1; /* Don't get stuck on request queues */
- unsigned encountered_congestion:1; /* An output: a queue is full */
- unsigned for_kupdate:1; /* A kupdate writeback */
- unsigned for_reclaim:1; /* Invoked from the page allocator */
+ unsigned nonblocking:1; /* Don't get stuck on request queues */
+ unsigned encountered_congestion:1; /* An output: a queue is full */
+ unsigned for_kupdate:1; /* A kupdate writeback */
+ unsigned for_reclaim:1; /* Invoked from the page allocator */
+ unsigned for_writepages:1; /* This is a writepages() call */
};
/*
- * ->writepage() return values (make these much larger than a pagesize, in
- * case some fs is returning number-of-bytes-written from writepage)
- */
-#define WRITEPAGE_ACTIVATE 0x80000 /* IO was not started: activate page */
-
-/*
* fs/fs-writeback.c
*/
void writeback_inodes(struct writeback_control *wbc);
@@ -108,7 +103,9 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping);
int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
int sync_page_range(struct inode *inode, struct address_space *mapping,
- loff_t pos, size_t count);
+ loff_t pos, loff_t count);
+int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
+ loff_t pos, loff_t count);
/* pdflush.c */
extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 23f9c61d954..cda8a96e2fa 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -13,7 +13,22 @@
#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
+/* Namespaces */
+#define XATTR_OS2_PREFIX "os2."
+#define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1)
+
#define XATTR_SECURITY_PREFIX "security."
+#define XATTR_SECURITY_PREFIX_LEN (sizeof (XATTR_SECURITY_PREFIX) - 1)
+
+#define XATTR_SYSTEM_PREFIX "system."
+#define XATTR_SYSTEM_PREFIX_LEN (sizeof (XATTR_SYSTEM_PREFIX) - 1)
+
+#define XATTR_TRUSTED_PREFIX "trusted."
+#define XATTR_TRUSTED_PREFIX_LEN (sizeof (XATTR_TRUSTED_PREFIX) - 1)
+
+#define XATTR_USER_PREFIX "user."
+#define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
+
struct xattr_handler {
char *prefix;
@@ -25,6 +40,10 @@ struct xattr_handler {
size_t size, int flags);
};
+ssize_t vfs_getxattr(struct dentry *, char *, void *, size_t);
+int vfs_setxattr(struct dentry *, char *, void *, size_t, int);
+int vfs_removexattr(struct dentry *, char *);
+
ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
diff --git a/include/linux/zlib.h b/include/linux/zlib.h
index 74f7b78c22d..4fa32f0d4df 100644
--- a/include/linux/zlib.h
+++ b/include/linux/zlib.h
@@ -442,9 +442,11 @@ extern int deflateInit2 (z_streamp strm,
not perform any compression: this will be done by deflate().
*/
+#if 0
extern int zlib_deflateSetDictionary (z_streamp strm,
const Byte *dictionary,
uInt dictLength);
+#endif
/*
Initializes the compression dictionary from the given byte sequence
without producing any compressed output. This function must be called
@@ -478,7 +480,10 @@ extern int zlib_deflateSetDictionary (z_streamp strm,
perform any compression: this will be done by deflate().
*/
+#if 0
extern int zlib_deflateCopy (z_streamp dest, z_streamp source);
+#endif
+
/*
Sets the destination stream as a complete copy of the source stream.
@@ -511,7 +516,9 @@ static inline unsigned long deflateBound(unsigned long s)
return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
}
+#if 0
extern int zlib_deflateParams (z_streamp strm, int level, int strategy);
+#endif
/*
Dynamically update the compression level and compression strategy. The
interpretation of level and strategy is as in deflateInit2. This can be
@@ -571,7 +578,9 @@ extern int zlib_inflateSetDictionary (z_streamp strm,
inflate().
*/
+#if 0
extern int zlib_inflateSync (z_streamp strm);
+#endif
/*
Skips invalid compressed data until a full flush point (see above the
description of deflate with Z_FULL_FLUSH) can be found, or until all
@@ -636,7 +645,9 @@ extern int zlib_inflateInit2_ (z_streamp strm, int windowBits,
#endif
extern const char * zlib_zError (int err);
+#if 0
extern int zlib_inflateSyncPoint (z_streamp z);
+#endif
extern const uLong * zlib_get_crc_table (void);
#endif /* _ZLIB_H */
diff --git a/include/media/audiochip.h b/include/media/audiochip.h
index b7d4b093040..295d256ee81 100644
--- a/include/media/audiochip.h
+++ b/include/media/audiochip.h
@@ -23,11 +23,6 @@ enum audiochip {
/* ---------------------------------------------------------------------- */
-/* v4l device was opened in Radio mode */
-#define AUDC_SET_RADIO _IO('m',2)
-/* select from TV,radio,extern,MUTE */
-#define AUDC_SET_INPUT _IOW('m',17,int)
-
/* audio inputs */
#define AUDIO_TUNER 0x00
#define AUDIO_RADIO 0x01
@@ -40,15 +35,4 @@ enum audiochip {
#define AUDIO_MUTE 0x80
#define AUDIO_UNMUTE 0x81
-/* all the stuff below is obsolete and just here for reference. I'll
- * remove it once the driver is tested and works fine.
- *
- * Instead creating alot of tiny API's for all kinds of different
- * chips, we'll just pass throuth the v4l ioctl structs (v4l2 not
- * yet...). It is a bit less flexible, but most/all used i2c chips
- * make sense in v4l context only. So I think that's acceptable...
- */
-
-/* misc stuff to pass around config info to i2c chips */
-#define AUDC_CONFIG_PINNACLE _IOW('m',32,int)
#endif /* AUDIOCHIP_H */
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index e5be2b9b846..2bc634fcb7b 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -21,14 +21,14 @@
extern unsigned int saa7146_debug;
-//#define DEBUG_PROLOG printk("(0x%08x)(0x%08x) %s: %s(): ",(dev==0?-1:(dev->mem==0?-1:saa7146_read(dev,RPS_ADDR0))),(dev==0?-1:(dev->mem==0?-1:saa7146_read(dev,IER))),__stringify(KBUILD_MODNAME),__FUNCTION__)
+//#define DEBUG_PROLOG printk("(0x%08x)(0x%08x) %s: %s(): ",(dev==0?-1:(dev->mem==0?-1:saa7146_read(dev,RPS_ADDR0))),(dev==0?-1:(dev->mem==0?-1:saa7146_read(dev,IER))),KBUILD_MODNAME,__FUNCTION__)
#ifndef DEBUG_VARIABLE
#define DEBUG_VARIABLE saa7146_debug
#endif
-#define DEBUG_PROLOG printk("%s: %s(): ",__stringify(KBUILD_MODNAME),__FUNCTION__)
-#define INFO(x) { printk("%s: ",__stringify(KBUILD_MODNAME)); printk x; }
+#define DEBUG_PROLOG printk("%s: %s(): ",KBUILD_MODNAME,__FUNCTION__)
+#define INFO(x) { printk("%s: ",KBUILD_MODNAME); printk x; }
#define ERR(x) { DEBUG_PROLOG; printk x; }
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
index 16af9299315..e5e749e984e 100644
--- a/include/media/saa7146_vv.h
+++ b/include/media/saa7146_vv.h
@@ -178,6 +178,8 @@ struct saa7146_ext_vv
struct saa7146_extension_ioctls *ioctls;
int (*ioctl)(struct saa7146_fh*, unsigned int cmd, void *arg);
+
+ struct file_operations vbi_fops;
};
struct saa7146_use_ops {
diff --git a/include/media/tuner.h b/include/media/tuner.h
index faa0f8e3091..27cbf08c931 100644
--- a/include/media/tuner.h
+++ b/include/media/tuner.h
@@ -82,9 +82,9 @@
#define TUNER_PHILIPS_FM1236_MK3 43
#define TUNER_PHILIPS_4IN1 44 /* ATI TV Wonder Pro - Conexant */
-/* Microtune mergeged with Temic 12/31/1999 partially financed by Alps - these may be similar to Temic */
+/* Microtune merged with Temic 12/31/1999 partially financed by Alps - these may be similar to Temic */
#define TUNER_MICROTUNE_4049FM5 45
-#define TUNER_MICROTUNE_4042_FI5 46
+#define TUNER_PANASONIC_VP27 46
#define TUNER_LG_NTSC_TAPE 47
#define TUNER_TNF_8831BGFF 48
@@ -102,7 +102,7 @@
#define TUNER_YMEC_TVF_8531MF 58
#define TUNER_YMEC_TVF_5533MF 59 /* Pixelview Pro Ultra NTSC */
-#define TUNER_THOMSON_DTT7611 60 /* DViCO FusionHDTV 3 Gold-T */
+#define TUNER_THOMSON_DTT761X 60 /* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */
#define TUNER_TENA_9533_DI 61
#define TUNER_TEA5767 62 /* Only FM Radio Tuner */
#define TUNER_PHILIPS_FMD1216ME_MK3 63
@@ -115,47 +115,26 @@
#define TUNER_PHILIPS_TUV1236D 68 /* ATI HDTV Wonder */
#define TUNER_TNF_5335MF 69 /* Sabrent Bt848 */
-#define NOTUNER 0
-#define PAL 1 /* PAL_BG */
-#define PAL_I 2
-#define NTSC 3
-#define SECAM 4
-#define ATSC 5
-#define RADIO 6
-
-#define NoTuner 0
-#define Philips 1
-#define TEMIC 2
-#define Sony 3
-#define Alps 4
-#define LGINNOTEK 5
-#define SHARP 6
-#define Samsung 7
-#define Microtune 8
-#define HITACHI 9
-#define Panasonic 10
-#define TCL 11
-#define THOMSON 12
-
-#define TUNER_SET_TYPE_ADDR _IOW('T',3,int)
-#define TUNER_SET_STANDBY _IOW('T',4,int)
-#define TDA9887_SET_CONFIG _IOW('t',5,int)
-
/* tv card specific */
-# define TDA9887_PRESENT (1<<0)
-# define TDA9887_PORT1_INACTIVE (1<<1)
-# define TDA9887_PORT2_INACTIVE (1<<2)
-# define TDA9887_QSS (1<<3)
-# define TDA9887_INTERCARRIER (1<<4)
-# define TDA9887_PORT1_ACTIVE (1<<5)
-# define TDA9887_PORT2_ACTIVE (1<<6)
-# define TDA9887_INTERCARRIER_NTSC (1<<7)
+#define TDA9887_PRESENT (1<<0)
+#define TDA9887_PORT1_INACTIVE (1<<1)
+#define TDA9887_PORT2_INACTIVE (1<<2)
+#define TDA9887_QSS (1<<3)
+#define TDA9887_INTERCARRIER (1<<4)
+#define TDA9887_PORT1_ACTIVE (1<<5)
+#define TDA9887_PORT2_ACTIVE (1<<6)
+#define TDA9887_INTERCARRIER_NTSC (1<<7)
+/* Tuner takeover point adjustment, in dB, -16 <= top <= 15 */
+#define TDA9887_TOP_MASK (0x3f << 8)
+#define TDA9887_TOP_SET (1 << 13)
+#define TDA9887_TOP(top) (TDA9887_TOP_SET | (((16 + (top)) & 0x1f) << 8))
+
/* config options */
-# define TDA9887_DEEMPHASIS_MASK (3<<16)
-# define TDA9887_DEEMPHASIS_NONE (1<<16)
-# define TDA9887_DEEMPHASIS_50 (2<<16)
-# define TDA9887_DEEMPHASIS_75 (3<<16)
-# define TDA9887_AUTOMUTE (1<<18)
+#define TDA9887_DEEMPHASIS_MASK (3<<16)
+#define TDA9887_DEEMPHASIS_NONE (1<<16)
+#define TDA9887_DEEMPHASIS_50 (2<<16)
+#define TDA9887_DEEMPHASIS_75 (3<<16)
+#define TDA9887_AUTOMUTE (1<<18)
#ifdef __KERNEL__
@@ -167,10 +146,26 @@ enum tuner_mode {
T_STANDBY = 1 << 31
};
+/* Older boards only had a single tuner device. Nowadays multiple tuner
+ devices may be present on a single board. Using TUNER_SET_TYPE_ADDR
+ to pass the tuner_setup structure it is possible to setup each tuner
+ device in turn.
+
+ Since multiple devices may be present it is no longer sufficient to
+ send a command to a single i2c device. Instead you should broadcast
+ the command to all i2c devices.
+
+ By setting the mode_mask correctly you can select which commands are
+ accepted by a specific tuner device. For example, set mode_mask to
+ T_RADIO if the device is a radio-only tuner. That specific tuner will
+ only accept commands when the tuner is in radio mode and ignore them
+ when the tuner is set to TV mode.
+ */
+
struct tuner_setup {
- unsigned short addr;
- unsigned int type;
- unsigned int mode_mask;
+ unsigned short addr; /* I2C address */
+ unsigned int type; /* Tuner type */
+ unsigned int mode_mask; /* Allowed tuner modes */
};
struct tuner {
@@ -207,7 +202,6 @@ struct tuner {
void (*standby)(struct i2c_client *c);
};
-extern unsigned int tuner_debug;
extern unsigned const int tuner_count;
extern int microtune_init(struct i2c_client *c);
@@ -218,15 +212,16 @@ extern int default_tuner_init(struct i2c_client *c);
extern int tea5767_autodetection(struct i2c_client *c);
#define tuner_warn(fmt, arg...) do {\
- printk(KERN_WARNING "%s %d-%04x: " fmt, t->i2c.driver->name, \
- t->i2c.adapter->nr, t->i2c.addr , ##arg); } while (0)
+ printk(KERN_WARNING "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
+ i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
#define tuner_info(fmt, arg...) do {\
- printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.driver->name, \
- t->i2c.adapter->nr, t->i2c.addr , ##arg); } while (0)
+ printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
+ i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
#define tuner_dbg(fmt, arg...) do {\
+ extern int tuner_debug; \
if (tuner_debug) \
- printk(KERN_DEBUG "%s %d-%04x: " fmt, t->i2c.driver->name, \
- t->i2c.adapter->nr, t->i2c.addr , ##arg); } while (0)
+ printk(KERN_DEBUG "%s %d-%04x: " fmt, t->i2c.driver->driver.name, \
+ i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
#endif /* __KERNEL__ */
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index d3fd48157eb..c74052abb18 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -26,12 +26,56 @@
#ifndef V4L2_COMMON_H_
#define V4L2_COMMON_H_
-/* VIDIOC_INT_AUDIO_CLOCK_FREQ */
-enum v4l2_audio_clock_freq {
- V4L2_AUDCLK_32_KHZ = 32000,
- V4L2_AUDCLK_441_KHZ = 44100,
- V4L2_AUDCLK_48_KHZ = 48000,
-};
+/* v4l debugging and diagnostics */
+
+/* Common printk constucts for v4l-i2c drivers. These macros create a unique
+ prefix consisting of the driver name, the adapter number and the i2c
+ address. */
+#define v4l_printk(level, name, adapter, addr, fmt, arg...) \
+ printk(level "%s %d-%04x: " fmt, name, i2c_adapter_id(adapter), addr , ## arg)
+
+#define v4l_client_printk(level, client, fmt, arg...) \
+ v4l_printk(level, (client)->driver->driver.name, (client)->adapter, \
+ (client)->addr, fmt , ## arg)
+
+#define v4l_err(client, fmt, arg...) \
+ v4l_client_printk(KERN_ERR, client, fmt , ## arg)
+
+#define v4l_warn(client, fmt, arg...) \
+ v4l_client_printk(KERN_WARNING, client, fmt , ## arg)
+
+#define v4l_info(client, fmt, arg...) \
+ v4l_client_printk(KERN_INFO, client, fmt , ## arg)
+
+/* These three macros assume that the debug level is set with a module
+ parameter called 'debug'. */
+#define v4l_dbg(level, debug, client, fmt, arg...) \
+ do { \
+ if (debug >= (level)) \
+ v4l_client_printk(KERN_DEBUG, client, fmt , ## arg); \
+ } while (0)
+
+/* Prints the ioctl in a human-readable format */
+extern void v4l_printk_ioctl(unsigned int cmd);
+
+/* Use this macro for non-I2C drivers. Pass the driver name as the first arg. */
+#define v4l_print_ioctl(name, cmd) \
+ do { \
+ printk(KERN_DEBUG "%s: ", name); \
+ v4l_printk_ioctl(cmd); \
+ } while (0)
+
+/* Use this macro in I2C drivers where 'client' is the struct i2c_client
+ pointer */
+#define v4l_i2c_print_ioctl(client, cmd) \
+ do { \
+ v4l_client_printk(KERN_DEBUG, client, ""); \
+ v4l_printk_ioctl(cmd); \
+ } while (0)
+
+/* ------------------------------------------------------------------------- */
+
+/* Internal ioctls */
/* VIDIOC_INT_G_REGISTER and VIDIOC_INT_S_REGISTER */
struct v4l2_register {
@@ -70,6 +114,20 @@ enum v4l2_chip_ident {
V4L2_IDENT_CX25843 = 243,
};
+/* audio ioctls */
+/* v4l device was opened in Radio mode */
+#define AUDC_SET_RADIO _IO('d',88)
+/* select from TV,radio,extern,MUTE */
+#define AUDC_SET_INPUT _IOW('d',89,int)
+
+/* tuner ioctls */
+/* Sets tuner type and its I2C addr */
+#define TUNER_SET_TYPE_ADDR _IOW('d',90,int)
+/* Puts tuner on powersaving state, disabling it, except for i2c */
+#define TUNER_SET_STANDBY _IOW('d',91,int)
+/* Sets tda9887 specific stuff, like port1, port2 and qss */
+#define TDA9887_SET_CONFIG _IOW('d',92,int)
+
/* only implemented if CONFIG_VIDEO_ADV_DEBUG is defined */
#define VIDIOC_INT_S_REGISTER _IOR ('d', 100, struct v4l2_register)
#define VIDIOC_INT_G_REGISTER _IOWR('d', 101, struct v4l2_register)
@@ -77,10 +135,12 @@ enum v4l2_chip_ident {
/* Reset the I2C chip */
#define VIDIOC_INT_RESET _IO ('d', 102)
-/* Set the frequency of the audio clock output.
+/* Set the frequency (in Hz) of the audio clock output.
Used to slave an audio processor to the video decoder, ensuring that audio
- and video remain synchronized. */
-#define VIDIOC_INT_AUDIO_CLOCK_FREQ _IOR ('d', 103, enum v4l2_audio_clock_freq)
+ and video remain synchronized.
+ Usual values for the frequency are 48000, 44100 or 32000 Hz.
+ If the frequency is not supported, then -EINVAL is returned. */
+#define VIDIOC_INT_AUDIO_CLOCK_FREQ _IOW ('d', 103, u32)
/* Video decoders that support sliced VBI need to implement this ioctl.
Field p of the v4l2_sliced_vbi_line struct is set to the start of the VBI
@@ -107,4 +167,10 @@ enum v4l2_chip_ident {
be made. */
#define VIDIOC_INT_G_CHIP_IDENT _IOR ('d', 107, enum v4l2_chip_ident *)
+/* Sets I2S speed in bps. This is used to provide a standard way to select I2S
+ clock used by driving digital audio streams at some board designs.
+ Usual values for the frequency are 1024000 and 2048000.
+ If the frequency is not supported, then -EINVAL is returned. */
+#define VIDIOC_INT_I2S_CLOCK_FREQ _IOW ('d', 108, u32)
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index b55eb7c7f03..11e9eaf79f5 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -63,7 +63,7 @@ struct tc_action_ops
__u32 type; /* TBD to match kind */
__u32 capab; /* capabilities includes 4 bit version */
struct module *owner;
- int (*act)(struct sk_buff **, struct tc_action *, struct tcf_result *);
+ int (*act)(struct sk_buff *, struct tc_action *, struct tcf_result *);
int (*get_stats)(struct sk_buff *, struct tc_action *);
int (*dump)(struct sk_buff *, struct tc_action *,int , int);
int (*cleanup)(struct tc_action *, int bind);
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index 86e8e86e624..5a86e78081b 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -88,8 +88,8 @@ struct dn_dev {
struct net_device *dev;
struct dn_dev_parms parms;
char use_long;
- struct timer_list timer;
- unsigned long t3;
+ struct timer_list timer;
+ unsigned long t3;
struct neigh_parms *neigh_parms;
unsigned char addr[ETH_ALEN];
struct neighbour *router; /* Default router on circuit */
@@ -99,57 +99,57 @@ struct dn_dev {
struct dn_short_packet
{
- unsigned char msgflg __attribute__((packed));
- unsigned short dstnode __attribute__((packed));
- unsigned short srcnode __attribute__((packed));
- unsigned char forward __attribute__((packed));
-};
+ unsigned char msgflg;
+ unsigned short dstnode;
+ unsigned short srcnode;
+ unsigned char forward;
+} __attribute__((packed));
struct dn_long_packet
{
- unsigned char msgflg __attribute__((packed));
- unsigned char d_area __attribute__((packed));
- unsigned char d_subarea __attribute__((packed));
- unsigned char d_id[6] __attribute__((packed));
- unsigned char s_area __attribute__((packed));
- unsigned char s_subarea __attribute__((packed));
- unsigned char s_id[6] __attribute__((packed));
- unsigned char nl2 __attribute__((packed));
- unsigned char visit_ct __attribute__((packed));
- unsigned char s_class __attribute__((packed));
- unsigned char pt __attribute__((packed));
-};
+ unsigned char msgflg;
+ unsigned char d_area;
+ unsigned char d_subarea;
+ unsigned char d_id[6];
+ unsigned char s_area;
+ unsigned char s_subarea;
+ unsigned char s_id[6];
+ unsigned char nl2;
+ unsigned char visit_ct;
+ unsigned char s_class;
+ unsigned char pt;
+} __attribute__((packed));
/*------------------------- DRP - Routing messages ---------------------*/
struct endnode_hello_message
{
- unsigned char msgflg __attribute__((packed));
- unsigned char tiver[3] __attribute__((packed));
- unsigned char id[6] __attribute__((packed));
- unsigned char iinfo __attribute__((packed));
- unsigned short blksize __attribute__((packed));
- unsigned char area __attribute__((packed));
- unsigned char seed[8] __attribute__((packed));
- unsigned char neighbor[6] __attribute__((packed));
- unsigned short timer __attribute__((packed));
- unsigned char mpd __attribute__((packed));
- unsigned char datalen __attribute__((packed));
- unsigned char data[2] __attribute__((packed));
-};
+ unsigned char msgflg;
+ unsigned char tiver[3];
+ unsigned char id[6];
+ unsigned char iinfo;
+ unsigned short blksize;
+ unsigned char area;
+ unsigned char seed[8];
+ unsigned char neighbor[6];
+ unsigned short timer;
+ unsigned char mpd;
+ unsigned char datalen;
+ unsigned char data[2];
+} __attribute__((packed));
struct rtnode_hello_message
{
- unsigned char msgflg __attribute__((packed));
- unsigned char tiver[3] __attribute__((packed));
- unsigned char id[6] __attribute__((packed));
- unsigned char iinfo __attribute__((packed));
- unsigned short blksize __attribute__((packed));
- unsigned char priority __attribute__((packed));
- unsigned char area __attribute__((packed));
- unsigned short timer __attribute__((packed));
- unsigned char mpd __attribute__((packed));
-};
+ unsigned char msgflg;
+ unsigned char tiver[3];
+ unsigned char id[6];
+ unsigned char iinfo;
+ unsigned short blksize;
+ unsigned char priority;
+ unsigned char area;
+ unsigned short timer;
+ unsigned char mpd;
+} __attribute__((packed));
extern void dn_dev_init(void);
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index 1ba03be0af3..e6182b86262 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -72,78 +72,78 @@ extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int nobl
struct nsp_data_seg_msg
{
- unsigned char msgflg __attribute__((packed));
- unsigned short dstaddr __attribute__((packed));
- unsigned short srcaddr __attribute__((packed));
-};
+ unsigned char msgflg;
+ unsigned short dstaddr;
+ unsigned short srcaddr;
+} __attribute__((packed));
struct nsp_data_opt_msg
{
- unsigned short acknum __attribute__((packed));
- unsigned short segnum __attribute__((packed));
- unsigned short lsflgs __attribute__((packed));
-};
+ unsigned short acknum;
+ unsigned short segnum;
+ unsigned short lsflgs;
+} __attribute__((packed));
struct nsp_data_opt_msg1
{
- unsigned short acknum __attribute__((packed));
- unsigned short segnum __attribute__((packed));
-};
+ unsigned short acknum;
+ unsigned short segnum;
+} __attribute__((packed));
/* Acknowledgment Message (data/other data) */
struct nsp_data_ack_msg
{
- unsigned char msgflg __attribute__((packed));
- unsigned short dstaddr __attribute__((packed));
- unsigned short srcaddr __attribute__((packed));
- unsigned short acknum __attribute__((packed));
-};
+ unsigned char msgflg;
+ unsigned short dstaddr;
+ unsigned short srcaddr;
+ unsigned short acknum;
+} __attribute__((packed));
/* Connect Acknowledgment Message */
struct nsp_conn_ack_msg
{
- unsigned char msgflg __attribute__((packed));
- unsigned short dstaddr __attribute__((packed));
-};
+ unsigned char msgflg;
+ unsigned short dstaddr;
+} __attribute__((packed));
/* Connect Initiate/Retransmit Initiate/Connect Confirm */
struct nsp_conn_init_msg
{
- unsigned char msgflg __attribute__((packed));
+ unsigned char msgflg;
#define NSP_CI 0x18 /* Connect Initiate */
#define NSP_RCI 0x68 /* Retrans. Conn Init */
- unsigned short dstaddr __attribute__((packed));
- unsigned short srcaddr __attribute__((packed));
- unsigned char services __attribute__((packed));
+ unsigned short dstaddr;
+ unsigned short srcaddr;
+ unsigned char services;
#define NSP_FC_NONE 0x00 /* Flow Control None */
#define NSP_FC_SRC 0x04 /* Seg Req. Count */
#define NSP_FC_SCMC 0x08 /* Sess. Control Mess */
#define NSP_FC_MASK 0x0c /* FC type mask */
- unsigned char info __attribute__((packed));
- unsigned short segsize __attribute__((packed));
-};
+ unsigned char info;
+ unsigned short segsize;
+} __attribute__((packed));
/* Disconnect Initiate/Disconnect Confirm */
struct nsp_disconn_init_msg
{
- unsigned char msgflg __attribute__((packed));
- unsigned short dstaddr __attribute__((packed));
- unsigned short srcaddr __attribute__((packed));
- unsigned short reason __attribute__((packed));
-};
+ unsigned char msgflg;
+ unsigned short dstaddr;
+ unsigned short srcaddr;
+ unsigned short reason;
+} __attribute__((packed));
struct srcobj_fmt
{
- char format __attribute__((packed));
- unsigned char task __attribute__((packed));
- unsigned short grpcode __attribute__((packed));
- unsigned short usrcode __attribute__((packed));
- char dlen __attribute__((packed));
-};
+ char format;
+ unsigned char task;
+ unsigned short grpcode;
+ unsigned short usrcode;
+ char dlen;
+} __attribute__((packed));
/*
* A collection of functions for manipulating the sequence
diff --git a/include/net/dst.h b/include/net/dst.h
index bee8b84d329..5161e89017f 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -225,16 +225,7 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout)
/* Output packet to network from transport. */
static inline int dst_output(struct sk_buff *skb)
{
- int err;
-
- for (;;) {
- err = skb->dst->output(skb);
-
- if (likely(err == 0))
- return err;
- if (unlikely(err != NET_XMIT_BYPASS))
- return err;
- }
+ return skb->dst->output(skb);
}
/* Input packet from network to transport. */
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index c5b96b2b815..805de50df00 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -22,7 +22,6 @@ struct genl_family
char name[GENL_NAMSIZ];
unsigned int version;
unsigned int maxattr;
- struct module * owner;
struct nlattr ** attrbuf; /* private */
struct list_head ops_list; /* private */
struct list_head family_list; /* private */
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index cde2f4f4f50..df05f468fa5 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -363,8 +363,9 @@ enum ieee80211_reasoncode {
#define IEEE80211_OFDM_SHIFT_MASK_A 4
/* NOTE: This data is for statistical purposes; not all hardware provides this
- * information for frames received. Not setting these will not cause
- * any adverse affects. */
+ * information for frames received.
+ * For ieee80211_rx_mgt, you need to set at least the 'len' parameter.
+ */
struct ieee80211_rx_stats {
u32 mac_time;
s8 rssi;
@@ -1088,6 +1089,7 @@ extern int ieee80211_tx_frame(struct ieee80211_device *ieee,
/* ieee80211_rx.c */
extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
struct ieee80211_rx_stats *rx_stats);
+/* make sure to set stats->len */
extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
struct ieee80211_hdr_4addr *header,
struct ieee80211_rx_stats *stats);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 50234fa56a6..fa587c94e9d 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -83,8 +83,8 @@ struct inet_connection_sock {
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
__u32 icsk_pmtu_cookie;
- struct tcp_congestion_ops *icsk_ca_ops;
- struct inet_connection_sock_af_ops *icsk_af_ops;
+ const struct tcp_congestion_ops *icsk_ca_ops;
+ const struct inet_connection_sock_af_ops *icsk_af_ops;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
__u8 icsk_ca_state;
__u8 icsk_retransmits;
diff --git a/include/net/ip.h b/include/net/ip.h
index 7bb5804847f..8de0697b364 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -37,11 +37,10 @@ struct inet_skb_parm
struct ip_options opt; /* Compiled IP options */
unsigned char flags;
-#define IPSKB_MASQUERADED 1
-#define IPSKB_TRANSLATED 2
-#define IPSKB_FORWARDED 4
-#define IPSKB_XFRM_TUNNEL_SIZE 8
-#define IPSKB_FRAG_COMPLETE 16
+#define IPSKB_FORWARDED 1
+#define IPSKB_XFRM_TUNNEL_SIZE 2
+#define IPSKB_XFRM_TRANSFORMED 4
+#define IPSKB_FRAG_COMPLETE 8
};
struct ipcm_cookie
@@ -95,7 +94,6 @@ extern int ip_local_deliver(struct sk_buff *skb);
extern int ip_mr_input(struct sk_buff *skb);
extern int ip_output(struct sk_buff *skb);
extern int ip_mc_output(struct sk_buff *skb);
-extern int ip_fragment(struct sk_buff *skb, int (*out)(struct sk_buff*));
extern int ip_do_nat(struct sk_buff *skb);
extern void ip_send_check(struct iphdr *ip);
extern int ip_queue_xmit(struct sk_buff *skb, int ipfragok);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 860bbac4c4e..3b1d963d396 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -418,6 +418,8 @@ extern int ipv6_rcv(struct sk_buff *skb,
struct packet_type *pt,
struct net_device *orig_dev);
+extern int ip6_rcv_finish(struct sk_buff *skb);
+
/*
* upper-layer output functions
*/
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 25b081a730e..91684436af8 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -37,7 +37,4 @@ struct nf_conntrack_ipv4 {
struct sk_buff *
nf_ct_ipv4_ct_gather_frags(struct sk_buff *skb);
-/* call to create an explicit dependency on nf_conntrack_l3proto_ipv4. */
-extern void need_ip_conntrack(void);
-
#endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 64b82b74a65..6d075ca16e6 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -221,9 +221,6 @@ extern void nf_ct_helper_put(struct nf_conntrack_helper *helper);
extern struct nf_conntrack_helper *
__nf_conntrack_helper_find_byname(const char *name);
-/* call to create an explicit dependency on nf_conntrack. */
-extern void need_nf_conntrack(void);
-
extern int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 14ce790e5c6..530ef1f7528 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -111,7 +111,7 @@ struct nf_conntrack_tuple
#ifdef __KERNEL__
#define NF_CT_DUMP_TUPLE(tp) \
-DEBUGP("tuple %p: %u %u %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x %hu -> %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x %hu\n", \
+DEBUGP("tuple %p: %u %u " NIP6_FMT " %hu -> " NIP6_FMT " %hu\n", \
(tp), (tp)->src.l3num, (tp)->dst.protonum, \
NIP6(*(struct in6_addr *)(tp)->src.u3.all), ntohs((tp)->src.u.all), \
NIP6(*(struct in6_addr *)(tp)->dst.u3.all), ntohs((tp)->dst.u.all))
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 6492e7363d8..b94d1ad92c4 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -1,6 +1,7 @@
#ifndef __NET_PKT_SCHED_H
#define __NET_PKT_SCHED_H
+#include <linux/jiffies.h>
#include <net/sch_generic.h>
struct qdisc_walker
@@ -59,8 +60,8 @@ typedef struct timeval psched_time_t;
typedef long psched_tdiff_t;
#define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp))
-#define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ))
-#define PSCHED_JIFFIE2US(delay) ((delay)*(1000000/HZ))
+#define PSCHED_US2JIFFIE(usecs) usecs_to_jiffies(usecs)
+#define PSCHED_JIFFIE2US(delay) jiffies_to_usecs(delay)
#else /* !CONFIG_NET_SCH_CLK_GETTIMEOFDAY */
@@ -123,9 +124,9 @@ do { \
default: \
__delta = 0; \
case 2: \
- __delta += 1000000; \
+ __delta += USEC_PER_SEC; \
case 1: \
- __delta += 1000000; \
+ __delta += USEC_PER_SEC; \
} \
} \
__delta; \
@@ -136,9 +137,9 @@ psched_tod_diff(int delta_sec, int bound)
{
int delta;
- if (bound <= 1000000 || delta_sec > (0x7FFFFFFF/1000000)-1)
+ if (bound <= USEC_PER_SEC || delta_sec > (0x7FFFFFFF/USEC_PER_SEC)-1)
return bound;
- delta = delta_sec * 1000000;
+ delta = delta_sec * USEC_PER_SEC;
if (delta > bound || delta < 0)
delta = bound;
return delta;
@@ -152,9 +153,9 @@ psched_tod_diff(int delta_sec, int bound)
default: \
__delta = psched_tod_diff(__delta_sec, bound); break; \
case 2: \
- __delta += 1000000; \
+ __delta += USEC_PER_SEC; \
case 1: \
- __delta += 1000000; \
+ __delta += USEC_PER_SEC; \
case 0: \
if (__delta > bound || __delta < 0) \
__delta = bound; \
@@ -170,15 +171,15 @@ psched_tod_diff(int delta_sec, int bound)
({ \
int __delta = (tv).tv_usec + (delta); \
(tv_res).tv_sec = (tv).tv_sec; \
- if (__delta > 1000000) { (tv_res).tv_sec++; __delta -= 1000000; } \
+ if (__delta > USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \
(tv_res).tv_usec = __delta; \
})
#define PSCHED_TADD(tv, delta) \
({ \
(tv).tv_usec += (delta); \
- if ((tv).tv_usec > 1000000) { (tv).tv_sec++; \
- (tv).tv_usec -= 1000000; } \
+ if ((tv).tv_usec > USEC_PER_SEC) { (tv).tv_sec++; \
+ (tv).tv_usec -= USEC_PER_SEC; } \
})
/* Set/check that time is in the "past perfect";
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 63f7db99c2a..6dc5970612d 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -43,7 +43,7 @@ struct net_protocol {
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
struct inet6_protocol
{
- int (*handler)(struct sk_buff **skb, unsigned int *nhoffp);
+ int (*handler)(struct sk_buff **skb);
void (*err_handler)(struct sk_buff *skb,
struct inet6_skb_parm *opt,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 8f241216f46..a553f39f6ae 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -225,13 +225,13 @@ extern int sctp_debug_flag;
if (sctp_debug_flag) { \
if (saddr->sa.sa_family == AF_INET6) { \
printk(KERN_DEBUG \
- lead "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x" trail, \
+ lead NIP6_FMT trail, \
leadparm, \
NIP6(saddr->v6.sin6_addr), \
otherparms); \
} else { \
printk(KERN_DEBUG \
- lead "%u.%u.%u.%u" trail, \
+ lead NIPQUAD_FMT trail, \
leadparm, \
NIPQUAD(saddr->v4.sin_addr.s_addr), \
otherparms); \
diff --git a/include/net/sock.h b/include/net/sock.h
index 6961700ff3a..1806e5b6141 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -856,8 +856,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
filter = sk->sk_filter;
if (filter) {
- int pkt_len = sk_run_filter(skb, filter->insns,
- filter->len);
+ unsigned int pkt_len = sk_run_filter(skb, filter->insns,
+ filter->len);
if (!pkt_len)
err = -EPERM;
else
diff --git a/include/net/tipc/tipc.h b/include/net/tipc/tipc.h
new file mode 100644
index 00000000000..9566608c88c
--- /dev/null
+++ b/include/net/tipc/tipc.h
@@ -0,0 +1,257 @@
+/*
+ * include/net/tipc/tipc.h: Main include file for TIPC users
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_H_
+#define _NET_TIPC_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc.h>
+#include <linux/skbuff.h>
+
+/*
+ * Native API
+ */
+
+/*
+ * TIPC operating mode routines
+ */
+
+u32 tipc_get_addr(void);
+
+#define TIPC_NOT_RUNNING 0
+#define TIPC_NODE_MODE 1
+#define TIPC_NET_MODE 2
+
+typedef void (*tipc_mode_event)(void *usr_handle, int mode, u32 addr);
+
+int tipc_attach(unsigned int *userref, tipc_mode_event, void *usr_handle);
+
+void tipc_detach(unsigned int userref);
+
+int tipc_get_mode(void);
+
+/*
+ * TIPC port manipulation routines
+ */
+
+typedef void (*tipc_msg_err_event) (void *usr_handle,
+ u32 portref,
+ struct sk_buff **buf,
+ unsigned char const *data,
+ unsigned int size,
+ int reason,
+ struct tipc_portid const *attmpt_destid);
+
+typedef void (*tipc_named_msg_err_event) (void *usr_handle,
+ u32 portref,
+ struct sk_buff **buf,
+ unsigned char const *data,
+ unsigned int size,
+ int reason,
+ struct tipc_name_seq const *attmpt_dest);
+
+typedef void (*tipc_conn_shutdown_event) (void *usr_handle,
+ u32 portref,
+ struct sk_buff **buf,
+ unsigned char const *data,
+ unsigned int size,
+ int reason);
+
+typedef void (*tipc_msg_event) (void *usr_handle,
+ u32 portref,
+ struct sk_buff **buf,
+ unsigned char const *data,
+ unsigned int size,
+ unsigned int importance,
+ struct tipc_portid const *origin);
+
+typedef void (*tipc_named_msg_event) (void *usr_handle,
+ u32 portref,
+ struct sk_buff **buf,
+ unsigned char const *data,
+ unsigned int size,
+ unsigned int importance,
+ struct tipc_portid const *orig,
+ struct tipc_name_seq const *dest);
+
+typedef void (*tipc_conn_msg_event) (void *usr_handle,
+ u32 portref,
+ struct sk_buff **buf,
+ unsigned char const *data,
+ unsigned int size);
+
+typedef void (*tipc_continue_event) (void *usr_handle,
+ u32 portref);
+
+int tipc_createport(unsigned int tipc_user,
+ void *usr_handle,
+ unsigned int importance,
+ tipc_msg_err_event error_cb,
+ tipc_named_msg_err_event named_error_cb,
+ tipc_conn_shutdown_event conn_error_cb,
+ tipc_msg_event message_cb,
+ tipc_named_msg_event named_message_cb,
+ tipc_conn_msg_event conn_message_cb,
+ tipc_continue_event continue_event_cb,/* May be zero */
+ u32 *portref);
+
+int tipc_deleteport(u32 portref);
+
+int tipc_ownidentity(u32 portref, struct tipc_portid *port);
+
+int tipc_portimportance(u32 portref, unsigned int *importance);
+int tipc_set_portimportance(u32 portref, unsigned int importance);
+
+int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
+int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
+
+int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
+int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
+
+int tipc_publish(u32 portref, unsigned int scope,
+ struct tipc_name_seq const *name_seq);
+int tipc_withdraw(u32 portref, unsigned int scope,
+ struct tipc_name_seq const *name_seq); /* 0: all */
+
+int tipc_connect2port(u32 portref, struct tipc_portid const *port);
+
+int tipc_disconnect(u32 portref);
+
+int tipc_shutdown(u32 ref); /* Sends SHUTDOWN msg */
+
+int tipc_isconnected(u32 portref, int *isconnected);
+
+int tipc_peer(u32 portref, struct tipc_portid *peer);
+
+int tipc_ref_valid(u32 portref);
+
+/*
+ * TIPC messaging routines
+ */
+
+#define TIPC_PORT_IMPORTANCE 100 /* send using current port setting */
+
+
+int tipc_send(u32 portref,
+ unsigned int num_sect,
+ struct iovec const *msg_sect);
+
+int tipc_send_buf(u32 portref,
+ struct sk_buff *buf,
+ unsigned int dsz);
+
+int tipc_send2name(u32 portref,
+ struct tipc_name const *name,
+ u32 domain, /* 0:own zone */
+ unsigned int num_sect,
+ struct iovec const *msg_sect);
+
+int tipc_send_buf2name(u32 portref,
+ struct tipc_name const *name,
+ u32 domain,
+ struct sk_buff *buf,
+ unsigned int dsz);
+
+int tipc_forward2name(u32 portref,
+ struct tipc_name const *name,
+ u32 domain, /*0: own zone */
+ unsigned int section_count,
+ struct iovec const *msg_sect,
+ struct tipc_portid const *origin,
+ unsigned int importance);
+
+int tipc_forward_buf2name(u32 portref,
+ struct tipc_name const *name,
+ u32 domain,
+ struct sk_buff *buf,
+ unsigned int dsz,
+ struct tipc_portid const *orig,
+ unsigned int importance);
+
+int tipc_send2port(u32 portref,
+ struct tipc_portid const *dest,
+ unsigned int num_sect,
+ struct iovec const *msg_sect);
+
+int tipc_send_buf2port(u32 portref,
+ struct tipc_portid const *dest,
+ struct sk_buff *buf,
+ unsigned int dsz);
+
+int tipc_forward2port(u32 portref,
+ struct tipc_portid const *dest,
+ unsigned int num_sect,
+ struct iovec const *msg_sect,
+ struct tipc_portid const *origin,
+ unsigned int importance);
+
+int tipc_forward_buf2port(u32 portref,
+ struct tipc_portid const *dest,
+ struct sk_buff *buf,
+ unsigned int dsz,
+ struct tipc_portid const *orig,
+ unsigned int importance);
+
+int tipc_multicast(u32 portref,
+ struct tipc_name_seq const *seq,
+ u32 domain, /* 0:own zone */
+ unsigned int section_count,
+ struct iovec const *msg);
+
+#if 0
+int tipc_multicast_buf(u32 portref,
+ struct tipc_name_seq const *seq,
+ u32 domain, /* 0:own zone */
+ void *buf,
+ unsigned int size);
+#endif
+
+/*
+ * TIPC subscription routines
+ */
+
+int tipc_ispublished(struct tipc_name const *name);
+
+/*
+ * Get number of available nodes within specified domain (excluding own node)
+ */
+
+unsigned int tipc_available_nodes(const u32 domain);
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h
new file mode 100644
index 00000000000..098607cd4b7
--- /dev/null
+++ b/include/net/tipc/tipc_bearer.h
@@ -0,0 +1,121 @@
+/*
+ * include/net/tipc/tipc_bearer.h: Include file for privileged access to TIPC bearers
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_BEARER_H_
+#define _NET_TIPC_BEARER_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc_config.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+/*
+ * Identifiers of supported TIPC media types
+ */
+
+#define TIPC_MEDIA_TYPE_ETH 1
+
+struct tipc_media_addr {
+ __u32 type;
+ union {
+ __u8 eth_addr[6]; /* Ethernet bearer */
+#if 0
+ /* Prototypes for other possible bearer types */
+
+ struct {
+ __u16 sin_family;
+ __u16 sin_port;
+ struct {
+ __u32 s_addr;
+ } sin_addr;
+ char pad[4];
+ } addr_in; /* IP-based bearer */
+ __u16 sock_descr; /* generic socket bearer */
+#endif
+ } dev_addr;
+};
+
+/**
+ * struct tipc_bearer - TIPC bearer info available to privileged users
+ * @usr_handle: pointer to additional user-defined information about bearer
+ * @mtu: max packet size bearer can support
+ * @blocked: non-zero if bearer is blocked
+ * @lock: spinlock for controlling access to bearer
+ * @addr: media-specific address associated with bearer
+ * @name: bearer name (format = media:interface)
+ *
+ * Note: TIPC initializes "name" and "lock" fields; user is responsible for
+ * initialization all other fields when a bearer is enabled.
+ */
+
+struct tipc_bearer {
+ void *usr_handle;
+ u32 mtu;
+ int blocked;
+ spinlock_t lock;
+ struct tipc_media_addr addr;
+ char name[TIPC_MAX_BEARER_NAME];
+};
+
+
+int tipc_register_media(u32 media_type,
+ char *media_name,
+ int (*enable)(struct tipc_bearer *),
+ void (*disable)(struct tipc_bearer *),
+ int (*send_msg)(struct sk_buff *,
+ struct tipc_bearer *,
+ struct tipc_media_addr *),
+ char *(*addr2str)(struct tipc_media_addr *a,
+ char *str_buf,
+ int str_size),
+ struct tipc_media_addr *bcast_addr,
+ const u32 bearer_priority,
+ const u32 link_tolerance, /* [ms] */
+ const u32 send_window_limit);
+
+void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
+
+int tipc_block_bearer(const char *name);
+void tipc_continue(struct tipc_bearer *tb_ptr);
+
+int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
+int tipc_disable_bearer(const char *name);
+
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_msg.h b/include/net/tipc/tipc_msg.h
new file mode 100644
index 00000000000..4d096eebc93
--- /dev/null
+++ b/include/net/tipc/tipc_msg.h
@@ -0,0 +1,223 @@
+/*
+ * include/net/tipc/tipc_msg.h: Include file for privileged access to TIPC message headers
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_MSG_H_
+#define _NET_TIPC_MSG_H_
+
+#ifdef __KERNEL__
+
+struct tipc_msg {
+ u32 hdr[15];
+};
+
+
+/*
+ TIPC user data message header format, version 2:
+
+
+ 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w0:|vers | user |hdr sz |n|d|s|-| message size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w1:|mstyp| error |rer cnt|lsc|opt p| broadcast ack no |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w2:| link level ack no | broadcast/link level seq no |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w3:| previous node |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w4:| originating port |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w5:| destination port |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w6:| originating node |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w7:| destination node |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w8:| name type / transport sequence number |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w9:| name instance/multicast lower bound |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ wA:| multicast upper bound |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ / /
+ \ options \
+ / /
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+*/
+
+#define TIPC_CONN_MSG 0
+#define TIPC_MCAST_MSG 1
+#define TIPC_NAMED_MSG 2
+#define TIPC_DIRECT_MSG 3
+
+
+static inline u32 msg_word(struct tipc_msg *m, u32 pos)
+{
+ return ntohl(m->hdr[pos]);
+}
+
+static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask)
+{
+ return (msg_word(m, w) >> pos) & mask;
+}
+
+static inline u32 msg_importance(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 25, 0xf);
+}
+
+static inline u32 msg_hdr_sz(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 21, 0xf) << 2;
+}
+
+static inline int msg_short(struct tipc_msg *m)
+{
+ return (msg_hdr_sz(m) == 24);
+}
+
+static inline u32 msg_size(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 0, 0x1ffff);
+}
+
+static inline u32 msg_data_sz(struct tipc_msg *m)
+{
+ return (msg_size(m) - msg_hdr_sz(m));
+}
+
+static inline unchar *msg_data(struct tipc_msg *m)
+{
+ return ((unchar *)m) + msg_hdr_sz(m);
+}
+
+static inline u32 msg_type(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 29, 0x7);
+}
+
+static inline u32 msg_direct(struct tipc_msg *m)
+{
+ return (msg_type(m) == TIPC_DIRECT_MSG);
+}
+
+static inline u32 msg_named(struct tipc_msg *m)
+{
+ return (msg_type(m) == TIPC_NAMED_MSG);
+}
+
+static inline u32 msg_mcast(struct tipc_msg *m)
+{
+ return (msg_type(m) == TIPC_MCAST_MSG);
+}
+
+static inline u32 msg_connected(struct tipc_msg *m)
+{
+ return (msg_type(m) == TIPC_CONN_MSG);
+}
+
+static inline u32 msg_errcode(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 25, 0xf);
+}
+
+static inline u32 msg_prevnode(struct tipc_msg *m)
+{
+ return msg_word(m, 3);
+}
+
+static inline u32 msg_origport(struct tipc_msg *m)
+{
+ return msg_word(m, 4);
+}
+
+static inline u32 msg_destport(struct tipc_msg *m)
+{
+ return msg_word(m, 5);
+}
+
+static inline u32 msg_mc_netid(struct tipc_msg *m)
+{
+ return msg_word(m, 5);
+}
+
+static inline u32 msg_orignode(struct tipc_msg *m)
+{
+ if (likely(msg_short(m)))
+ return msg_prevnode(m);
+ return msg_word(m, 6);
+}
+
+static inline u32 msg_destnode(struct tipc_msg *m)
+{
+ return msg_word(m, 7);
+}
+
+static inline u32 msg_nametype(struct tipc_msg *m)
+{
+ return msg_word(m, 8);
+}
+
+static inline u32 msg_nameinst(struct tipc_msg *m)
+{
+ return msg_word(m, 9);
+}
+
+static inline u32 msg_namelower(struct tipc_msg *m)
+{
+ return msg_nameinst(m);
+}
+
+static inline u32 msg_nameupper(struct tipc_msg *m)
+{
+ return msg_word(m, 10);
+}
+
+static inline char *msg_options(struct tipc_msg *m, u32 *len)
+{
+ u32 pos = msg_bits(m, 1, 16, 0x7);
+
+ if (!pos)
+ return 0;
+ pos = (pos * 4) + 28;
+ *len = msg_hdr_sz(m) - pos;
+ return (char *)&m->hdr[pos/4];
+}
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h
new file mode 100644
index 00000000000..333bba6dc52
--- /dev/null
+++ b/include/net/tipc/tipc_port.h
@@ -0,0 +1,108 @@
+/*
+ * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports
+ *
+ * Copyright (c) 1994-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_PORT_H_
+#define _NET_TIPC_PORT_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc.h>
+#include <linux/skbuff.h>
+#include <net/tipc/tipc_msg.h>
+
+#define TIPC_FLOW_CONTROL_WIN 512
+
+/**
+ * struct tipc_port - native TIPC port info available to privileged users
+ * @usr_handle: pointer to additional user-defined information about port
+ * @lock: pointer to spinlock for controlling access to port
+ * @connected: non-zero if port is currently connected to a peer port
+ * @conn_type: TIPC type used when connection was established
+ * @conn_instance: TIPC instance used when connection was established
+ * @conn_unacked: number of unacknowledged messages received from peer port
+ * @published: non-zero if port has one or more associated names
+ * @congested: non-zero if cannot send because of link or port congestion
+ * @ref: unique reference to port in TIPC object registry
+ * @phdr: preformatted message header used when sending messages
+ */
+
+struct tipc_port {
+ void *usr_handle;
+ spinlock_t *lock;
+ int connected;
+ u32 conn_type;
+ u32 conn_instance;
+ u32 conn_unacked;
+ int published;
+ u32 congested;
+ u32 ref;
+ struct tipc_msg phdr;
+};
+
+
+/**
+ * tipc_createport_raw - create a native TIPC port and return it's reference
+ *
+ * Note: 'dispatcher' and 'wakeup' deliver a locked port.
+ */
+
+u32 tipc_createport_raw(void *usr_handle,
+ u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
+ void (*wakeup)(struct tipc_port *),
+ const u32 importance);
+
+/*
+ * tipc_set_msg_option(): port must be locked.
+ */
+int tipc_set_msg_option(struct tipc_port *tp_ptr,
+ const char *opt,
+ const u32 len);
+
+int tipc_reject_msg(struct sk_buff *buf, u32 err);
+
+int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
+
+void tipc_acknowledge(u32 port_ref,u32 ack);
+
+struct tipc_port *tipc_get_port(const u32 ref);
+
+void *tipc_get_handle(const u32 ref);
+
+
+#endif
+
+#endif
+
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 07d7b50cdd7..d09ca0e7d13 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -668,7 +668,7 @@ static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *s
return xfrm_policy_check(sk, dir, skb, AF_INET6);
}
-
+extern int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family);
extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
@@ -831,7 +831,7 @@ struct xfrm_tunnel {
};
struct xfrm6_tunnel {
- int (*handler)(struct sk_buff **pskb, unsigned int *nhoffp);
+ int (*handler)(struct sk_buff **pskb);
void (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
int type, int code, int offset, __u32 info);
};
@@ -866,10 +866,11 @@ extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
extern int xfrm_init_state(struct xfrm_state *x);
extern int xfrm4_rcv(struct sk_buff *skb);
extern int xfrm4_output(struct sk_buff *skb);
+extern int xfrm4_output_finish(struct sk_buff *skb);
extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler);
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler);
-extern int xfrm6_rcv_spi(struct sk_buff **pskb, unsigned int *nhoffp, u32 spi);
-extern int xfrm6_rcv(struct sk_buff **pskb, unsigned int *nhoffp);
+extern int xfrm6_rcv_spi(struct sk_buff **pskb, u32 spi);
+extern int xfrm6_rcv(struct sk_buff **pskb);
extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler);
extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler);
extern u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index a7f4c355a91..22fc886b969 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -88,7 +88,6 @@ enum ib_atomic_cap {
struct ib_device_attr {
u64 fw_ver;
- __be64 node_guid;
__be64 sys_image_guid;
u64 max_mr_size;
u64 page_size_cap;
@@ -951,6 +950,7 @@ struct ib_device {
u64 uverbs_cmd_mask;
int uverbs_abi_ver;
+ __be64 node_guid;
u8 node_type;
u8 phys_port_cnt;
};
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 850dfa877fd..02e26c1672b 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -15,7 +15,6 @@ struct scsi_driver {
void (*rescan)(struct device *);
int (*issue_flush)(struct device *, sector_t *);
int (*prepare_flush)(struct request_queue *, struct request *);
- void (*end_flush)(struct request_queue *, struct request *);
};
#define to_scsi_driver(drv) \
container_of((drv), struct scsi_driver, gendrv)
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 6cbb1982ed0..230bc55c0bf 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -296,6 +296,12 @@ struct scsi_host_template {
int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int);
/*
+ * suspend support
+ */
+ int (*resume)(struct scsi_device *);
+ int (*suspend)(struct scsi_device *);
+
+ /*
* Name of proc directory
*/
char *proc_name;
@@ -392,7 +398,6 @@ struct scsi_host_template {
/*
* ordered write support
*/
- unsigned ordered_flush:1;
unsigned ordered_tag:1;
/*
diff --git a/include/sound/core.h b/include/sound/core.h
index 90ac6132ea3..3093e3ddcf3 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -317,7 +317,7 @@ void snd_verbose_printd(const char *file, int line, const char *format, ...)
#ifdef CONFIG_SND_VERBOSE_PRINTK
/**
* snd_printd - debug printk
- * @format: format string
+ * @fmt: format string
*
* Compiled only when Works like snd_printk() for debugging purpose.
* Ignored when CONFIG_SND_DEBUG is not set.
@@ -331,7 +331,6 @@ void snd_verbose_printd(const char *file, int line, const char *format, ...)
/**
* snd_assert - run-time assertion macro
* @expr: expression
- * @args...: the action
*
* This macro checks the expression in run-time and invokes the commands
* given in the rest arguments if the assertion is failed.
diff --git a/include/sound/wavefront.h b/include/sound/wavefront.h
index 9e572aed243..15d82e594b5 100644
--- a/include/sound/wavefront.h
+++ b/include/sound/wavefront.h
@@ -454,22 +454,22 @@ typedef struct wf_multisample {
} wavefront_multisample;
typedef struct wf_alias {
- s16 OriginalSample __attribute__ ((packed));
-
- struct wf_sample_offset sampleStartOffset __attribute__ ((packed));
- struct wf_sample_offset loopStartOffset __attribute__ ((packed));
- struct wf_sample_offset sampleEndOffset __attribute__ ((packed));
- struct wf_sample_offset loopEndOffset __attribute__ ((packed));
-
- s16 FrequencyBias __attribute__ ((packed));
-
- u8 SampleResolution:2 __attribute__ ((packed));
- u8 Unused1:1 __attribute__ ((packed));
- u8 Loop:1 __attribute__ ((packed));
- u8 Bidirectional:1 __attribute__ ((packed));
- u8 Unused2:1 __attribute__ ((packed));
- u8 Reverse:1 __attribute__ ((packed));
- u8 Unused3:1 __attribute__ ((packed));
+ s16 OriginalSample;
+
+ struct wf_sample_offset sampleStartOffset;
+ struct wf_sample_offset loopStartOffset;
+ struct wf_sample_offset sampleEndOffset;
+ struct wf_sample_offset loopEndOffset;
+
+ s16 FrequencyBias;
+
+ u8 SampleResolution:2;
+ u8 Unused1:1;
+ u8 Loop:1;
+ u8 Bidirectional:1;
+ u8 Unused2:1;
+ u8 Reverse:1;
+ u8 Unused3:1;
/* This structure is meant to be padded only to 16 bits on their
original. Of course, whoever wrote their documentation didn't
@@ -480,8 +480,8 @@ typedef struct wf_alias {
standard 16->32 bit issues.
*/
- u8 sixteen_bit_padding __attribute__ ((packed));
-} wavefront_alias;
+ u8 sixteen_bit_padding;
+} __attribute__((packed)) wavefront_alias;
typedef struct wf_drum {
u8 PatchNumber;
diff --git a/include/video/cyblafb.h b/include/video/cyblafb.h
index a9948232b13..71744057538 100644
--- a/include/video/cyblafb.h
+++ b/include/video/cyblafb.h
@@ -153,6 +153,10 @@
#define GE04 (GEBase+0x04) // source 2, p 111
#define GE08 (GEBase+0x08) // destination 1, p 111
#define GE0C (GEBase+0x0C) // destination 2, p 112
+#define GE10 (GEBase+0x10) // right view base & enable, p 112
+#define GE13 (GEBase+0x13) // left view base & enable, p 112
+#define GE18 (GEBase+0x18) // block write start address, p 112
+#define GE1C (GEBase+0x1C) // block write end address, p 112
#define GE20 (GEBase+0x20) // engine status, p 113
#define GE24 (GEBase+0x24) // reset all GE pointers
#define GE44 (GEBase+0x44) // command register, p 126
diff --git a/include/video/kyro.h b/include/video/kyro.h
index 1bed37cfa68..dba7de2ee4a 100644
--- a/include/video/kyro.h
+++ b/include/video/kyro.h
@@ -15,6 +15,7 @@
struct kyrofb_info {
void __iomem *regbase;
+ u32 palette[16];
u32 HTot; /* Hor Total Time */
u32 HFP; /* Hor Front Porch */
u32 HST; /* Hor Sync Time */
diff --git a/include/video/neomagic.h b/include/video/neomagic.h
index bdaee70868d..1d69049bd4c 100644
--- a/include/video/neomagic.h
+++ b/include/video/neomagic.h
@@ -196,6 +196,7 @@ struct neofb_par {
int internal_display;
int external_display;
int libretto;
+ u32 palette[16];
};
typedef struct {
diff --git a/include/video/newport.h b/include/video/newport.h
index 812dac5b55f..1f5ebeaa818 100644
--- a/include/video/newport.h
+++ b/include/video/newport.h
@@ -382,7 +382,8 @@ typedef struct {
#define VC2_IREG_CONTROL 0x10
#define VC2_IREG_CONFIG 0x20
-extern __inline__ void newport_vc2_set(struct newport_regs *regs, unsigned char vc2ireg,
+static inline void newport_vc2_set(struct newport_regs *regs,
+ unsigned char vc2ireg,
unsigned short val)
{
regs->set.dcbmode = (NPORT_DMODE_AVC2 | VC2_REGADDR_INDEX | NPORT_DMODE_W3 |
@@ -390,7 +391,7 @@ extern __inline__ void newport_vc2_set(struct newport_regs *regs, unsigned char
regs->set.dcbdata0.byword = (vc2ireg << 24) | (val << 8);
}
-extern __inline__ unsigned short newport_vc2_get(struct newport_regs *regs,
+static inline unsigned short newport_vc2_get(struct newport_regs *regs,
unsigned char vc2ireg)
{
regs->set.dcbmode = (NPORT_DMODE_AVC2 | VC2_REGADDR_INDEX | NPORT_DMODE_W1 |
diff --git a/include/video/sstfb.h b/include/video/sstfb.h
index 0d77b520537..3570f9c9b11 100644
--- a/include/video/sstfb.h
+++ b/include/video/sstfb.h
@@ -334,6 +334,7 @@ struct sst_spec {
};
struct sstfb_par {
+ u32 palette[16];
unsigned int yDim;
unsigned int hSyncOn; /* hsync_len */
unsigned int hSyncOff; /* left_margin + xres + right_margin */
diff --git a/include/video/tdfx.h b/include/video/tdfx.h
index 04237676b17..c1cc94ba3fd 100644
--- a/include/video/tdfx.h
+++ b/include/video/tdfx.h
@@ -140,52 +140,52 @@
#ifdef __KERNEL__
struct banshee_reg {
- /* VGA rubbish */
- unsigned char att[21];
- unsigned char crt[25];
- unsigned char gra[ 9];
- unsigned char misc[1];
- unsigned char seq[ 5];
-
- /* Banshee extensions */
- unsigned char ext[2];
- unsigned long vidcfg;
- unsigned long vidpll;
- unsigned long mempll;
- unsigned long gfxpll;
- unsigned long dacmode;
- unsigned long vgainit0;
- unsigned long vgainit1;
- unsigned long screensize;
- unsigned long stride;
- unsigned long cursloc;
- unsigned long curspataddr;
- unsigned long cursc0;
- unsigned long cursc1;
- unsigned long startaddr;
- unsigned long clip0min;
- unsigned long clip0max;
- unsigned long clip1min;
- unsigned long clip1max;
- unsigned long srcbase;
- unsigned long dstbase;
- unsigned long miscinit0;
+ /* VGA rubbish */
+ unsigned char att[21];
+ unsigned char crt[25];
+ unsigned char gra[ 9];
+ unsigned char misc[1];
+ unsigned char seq[ 5];
+
+ /* Banshee extensions */
+ unsigned char ext[2];
+ unsigned long vidcfg;
+ unsigned long vidpll;
+ unsigned long mempll;
+ unsigned long gfxpll;
+ unsigned long dacmode;
+ unsigned long vgainit0;
+ unsigned long vgainit1;
+ unsigned long screensize;
+ unsigned long stride;
+ unsigned long cursloc;
+ unsigned long curspataddr;
+ unsigned long cursc0;
+ unsigned long cursc1;
+ unsigned long startaddr;
+ unsigned long clip0min;
+ unsigned long clip0max;
+ unsigned long clip1min;
+ unsigned long clip1max;
+ unsigned long srcbase;
+ unsigned long dstbase;
+ unsigned long miscinit0;
};
struct tdfx_par {
- u32 max_pixclock;
-
- void __iomem *regbase_virt;
- unsigned long iobase;
- u32 baseline;
-
- struct {
- int w,u,d;
- unsigned long enable,disable;
- struct timer_list timer;
- } hwcursor;
-
- spinlock_t DAClock;
+ u32 max_pixclock;
+ u32 palette[16];
+ void __iomem *regbase_virt;
+ unsigned long iobase;
+ u32 baseline;
+
+ struct {
+ int w,u,d;
+ unsigned long enable,disable;
+ struct timer_list timer;
+ } hwcursor;
+
+ spinlock_t DAClock;
};
#endif /* __KERNEL__ */
diff --git a/init/Kconfig b/init/Kconfig
index ce737e02c5a..25f4d74adf7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -105,7 +105,6 @@ config SWAP
config SYSVIPC
bool "System V IPC"
- depends on MMU
---help---
Inter Process Communication is a suite of library functions and
system calls which let processes (running programs) synchronize and
@@ -190,7 +189,7 @@ config AUDIT
config AUDITSYSCALL
bool "Enable system-call auditing support"
- depends on AUDIT && (X86 || PPC || PPC64 || ARCH_S390 || IA64 || UML || SPARC64)
+ depends on AUDIT && (X86 || PPC || PPC64 || S390 || IA64 || UML || SPARC64)
default y if SECURITY_SELINUX
help
Enable low-overhead system-call auditing infrastructure that
@@ -229,6 +228,23 @@ config CPUSETS
source "usr/Kconfig"
+config UID16
+ bool "Enable 16-bit UID system calls" if EMBEDDED
+ depends on ARM || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && SPARC32_COMPAT) || UML || (X86_64 && IA32_EMULATION)
+ default y
+ help
+ This enables the legacy 16-bit UID syscall wrappers.
+
+config VM86
+ depends X86
+ default y
+ bool "Enable VM86 support" if EMBEDDED
+ help
+ This option is required by programs like DOSEMU to run 16-bit legacy
+ code on X86 processors. It also may be needed by software like
+ XFree86 to initialize some video cards via BIOS. Disabling this
+ option saves about 6k.
+
config CC_OPTIMIZE_FOR_SIZE
bool "Optimize for size (Look out for broken compilers!)"
default y
@@ -310,6 +326,12 @@ config BUG
option for embedded systems with no facilities for reporting errors.
Just say Y.
+config ELF_CORE
+ default y
+ bool "Enable ELF core dumps" if EMBEDDED
+ help
+ Enable support for generating core dumps. Disabling saves about 4k.
+
config BASE_FULL
default y
bool "Enable full-sized data structures for core" if EMBEDDED
@@ -381,6 +403,15 @@ config CC_ALIGN_JUMPS
no dummy operations need be executed.
Zero means use compiler's default.
+config SLAB
+ default y
+ bool "Use full SLAB allocator" if EMBEDDED
+ help
+ Disabling this replaces the advanced SLAB allocator and
+ kmalloc support with the drastically simpler SLOB allocator.
+ SLOB is more space efficient but does not scale well and is
+ more susceptible to fragmentation.
+
endmenu # General setup
config TINY_SHMEM
@@ -392,6 +423,10 @@ config BASE_SMALL
default 0 if BASE_FULL
default 1 if !BASE_FULL
+config SLOB
+ default !SLAB
+ bool
+
menu "Loadable module support"
config MODULES
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
index 3fbc3555ce9..f6f36806f84 100644
--- a/init/do_mounts_md.c
+++ b/init/do_mounts_md.c
@@ -17,7 +17,7 @@ static int __initdata raid_noautodetect, raid_autopart;
static struct {
int minor;
int partitioned;
- int pers;
+ int level;
int chunk;
char *device_names;
} md_setup_args[MAX_MD_DEVS] __initdata;
@@ -47,7 +47,7 @@ extern int mdp_major;
*/
static int __init md_setup(char *str)
{
- int minor, level, factor, fault, pers, partitioned = 0;
+ int minor, level, factor, fault, partitioned = 0;
char *pername = "";
char *str1;
int ent;
@@ -78,7 +78,7 @@ static int __init md_setup(char *str)
}
if (ent >= md_setup_ents)
md_setup_ents++;
- switch (get_option(&str, &level)) { /* RAID Personality */
+ switch (get_option(&str, &level)) { /* RAID level */
case 2: /* could be 0 or -1.. */
if (level == 0 || level == LEVEL_LINEAR) {
if (get_option(&str, &factor) != 2 || /* Chunk Size */
@@ -86,16 +86,12 @@ static int __init md_setup(char *str)
printk(KERN_WARNING "md: Too few arguments supplied to md=.\n");
return 0;
}
- md_setup_args[ent].pers = level;
+ md_setup_args[ent].level = level;
md_setup_args[ent].chunk = 1 << (factor+12);
- if (level == LEVEL_LINEAR) {
- pers = LINEAR;
+ if (level == LEVEL_LINEAR)
pername = "linear";
- } else {
- pers = RAID0;
+ else
pername = "raid0";
- }
- md_setup_args[ent].pers = pers;
break;
}
/* FALL THROUGH */
@@ -103,7 +99,7 @@ static int __init md_setup(char *str)
str = str1;
/* FALL THROUGH */
case 0:
- md_setup_args[ent].pers = 0;
+ md_setup_args[ent].level = LEVEL_NONE;
pername="super-block";
}
@@ -190,10 +186,10 @@ static void __init md_setup_drive(void)
continue;
}
- if (md_setup_args[ent].pers) {
+ if (md_setup_args[ent].level != LEVEL_NONE) {
/* non-persistent */
mdu_array_info_t ainfo;
- ainfo.level = pers_to_level(md_setup_args[ent].pers);
+ ainfo.level = md_setup_args[ent].level;
ainfo.size = 0;
ainfo.nr_disks =0;
ainfo.raid_disks =0;
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index c10b08a8098..c2683fcd792 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -145,7 +145,7 @@ int __init rd_load_image(char *from)
int nblocks, i, disk;
char *buf = NULL;
unsigned short rotate = 0;
-#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_PPC_ISERIES)
+#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES)
char rotator[4] = { '|' , '/' , '-' , '\\' };
#endif
@@ -237,7 +237,7 @@ int __init rd_load_image(char *from)
}
sys_read(in_fd, buf, BLOCK_SIZE);
sys_write(out_fd, buf, BLOCK_SIZE);
-#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_PPC_ISERIES)
+#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES)
if (!(i % 16)) {
printk("%c\b", rotator[rotate & 0x3]);
rotate++;
diff --git a/init/main.c b/init/main.c
index 54aaf561cf6..e092b1979a9 100644
--- a/init/main.c
+++ b/init/main.c
@@ -52,16 +52,12 @@
#include <asm/bugs.h>
#include <asm/setup.h>
#include <asm/sections.h>
+#include <asm/cacheflush.h>
/*
* This is one of the first .c files built. Error out early
* if we have compiler trouble..
*/
-#if __GNUC__ == 2 && __GNUC_MINOR__ == 96
-#ifdef CONFIG_FRAME_POINTER
-#error This compiler cannot compile correctly with frame pointers enabled
-#endif
-#endif
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/smp.h>
@@ -73,7 +69,7 @@
* To avoid associated bogus bug reports, we flatly refuse to compile
* with a gcc that is known to be too old from the very beginning.
*/
-#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 95)
+#if (__GNUC__ < 3) || (__GNUC__ == 3 && __GNUC_MINOR__ < 2)
#error Sorry, your GCC is too old. It builds incorrect kernels.
#endif
@@ -99,6 +95,9 @@ extern void acpi_early_init(void);
#else
static inline void acpi_early_init(void) { }
#endif
+#ifndef CONFIG_DEBUG_RODATA
+static inline void mark_rodata_ro(void) { }
+#endif
#ifdef CONFIG_TC
extern void tc_init(void);
@@ -486,6 +485,7 @@ asmlinkage void __init start_kernel(void)
init_IRQ();
pidhash_init();
init_timers();
+ hrtimers_init();
softirq_init();
time_init();
@@ -508,6 +508,7 @@ asmlinkage void __init start_kernel(void)
}
#endif
vfs_caches_init_early();
+ cpuset_init_early();
mem_init();
kmem_cache_init();
setup_per_cpu_pageset();
@@ -708,6 +709,7 @@ static int init(void * unused)
*/
free_initmem();
unlock_kernel();
+ mark_rodata_ro();
system_state = SYSTEM_RUNNING;
numa_default_policy();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index c8943b53d8e..4e776f9c80e 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -11,6 +11,7 @@
* This file is released under the GPL.
*/
+#include <linux/capability.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/file.h>
@@ -660,7 +661,7 @@ asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
if (fd < 0)
goto out_putname;
- down(&mqueue_mnt->mnt_root->d_inode->i_sem);
+ mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
@@ -697,7 +698,7 @@ out_putfd:
out_err:
fd = error;
out_upsem:
- up(&mqueue_mnt->mnt_root->d_inode->i_sem);
+ mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
out_putname:
putname(name);
return fd;
@@ -714,7 +715,7 @@ asmlinkage long sys_mq_unlink(const char __user *u_name)
if (IS_ERR(name))
return PTR_ERR(name);
- down(&mqueue_mnt->mnt_root->d_inode->i_sem);
+ mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
@@ -735,7 +736,7 @@ out_err:
dput(dentry);
out_unlock:
- up(&mqueue_mnt->mnt_root->d_inode->i_sem);
+ mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
putname(name);
if (inode)
iput(inode);
diff --git a/ipc/msg.c b/ipc/msg.c
index d035bd2aba9..a91b64763b8 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -15,6 +15,7 @@
* (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/slab.h>
#include <linux/msg.h>
diff --git a/ipc/sem.c b/ipc/sem.c
index cb5bb2a5df9..46bb8a678de 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -73,6 +73,7 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
+#include <linux/capability.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include "util.h"
diff --git a/ipc/shm.c b/ipc/shm.c
index 587d836d80d..4c28d2d8e30 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -27,6 +27,7 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
+#include <linux/capability.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
@@ -34,8 +35,6 @@
#include "util.h"
-#define shm_flags shm_perm.mode
-
static struct file_operations shm_file_operations;
static struct vm_operations_struct shm_vm_ops;
@@ -148,7 +147,7 @@ static void shm_close (struct vm_area_struct *shmd)
shp->shm_dtim = get_seconds();
shp->shm_nattch--;
if(shp->shm_nattch == 0 &&
- shp->shm_flags & SHM_DEST)
+ shp->shm_perm.mode & SHM_DEST)
shm_destroy (shp);
else
shm_unlock(shp);
@@ -157,14 +156,22 @@ static void shm_close (struct vm_area_struct *shmd)
static int shm_mmap(struct file * file, struct vm_area_struct * vma)
{
- file_accessed(file);
- vma->vm_ops = &shm_vm_ops;
- shm_inc(file->f_dentry->d_inode->i_ino);
- return 0;
+ int ret;
+
+ ret = shmem_mmap(file, vma);
+ if (ret == 0) {
+ vma->vm_ops = &shm_vm_ops;
+ shm_inc(file->f_dentry->d_inode->i_ino);
+ }
+
+ return ret;
}
static struct file_operations shm_file_operations = {
- .mmap = shm_mmap
+ .mmap = shm_mmap,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = shmem_get_unmapped_area,
+#endif
};
static struct vm_operations_struct shm_vm_ops = {
@@ -197,7 +204,7 @@ static int newseg (key_t key, int shmflg, size_t size)
return -ENOMEM;
shp->shm_perm.key = key;
- shp->shm_flags = (shmflg & S_IRWXUGO);
+ shp->shm_perm.mode = (shmflg & S_IRWXUGO);
shp->mlock_user = NULL;
shp->shm_perm.security = NULL;
@@ -337,7 +344,7 @@ static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __
out->uid = tbuf.shm_perm.uid;
out->gid = tbuf.shm_perm.gid;
- out->mode = tbuf.shm_flags;
+ out->mode = tbuf.shm_perm.mode;
return 0;
}
@@ -350,7 +357,7 @@ static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __
out->uid = tbuf_old.shm_perm.uid;
out->gid = tbuf_old.shm_perm.gid;
- out->mode = tbuf_old.shm_flags;
+ out->mode = tbuf_old.shm_perm.mode;
return 0;
}
@@ -552,13 +559,13 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
if (!is_file_hugepages(shp->shm_file)) {
err = shmem_lock(shp->shm_file, 1, user);
if (!err) {
- shp->shm_flags |= SHM_LOCKED;
+ shp->shm_perm.mode |= SHM_LOCKED;
shp->mlock_user = user;
}
}
} else if (!is_file_hugepages(shp->shm_file)) {
shmem_lock(shp->shm_file, 0, shp->mlock_user);
- shp->shm_flags &= ~SHM_LOCKED;
+ shp->shm_perm.mode &= ~SHM_LOCKED;
shp->mlock_user = NULL;
}
shm_unlock(shp);
@@ -597,7 +604,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
goto out_unlock_up;
if (shp->shm_nattch){
- shp->shm_flags |= SHM_DEST;
+ shp->shm_perm.mode |= SHM_DEST;
/* Do not find it any more */
shp->shm_perm.key = IPC_PRIVATE;
shm_unlock(shp);
@@ -636,7 +643,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
shp->shm_perm.uid = setbuf.uid;
shp->shm_perm.gid = setbuf.gid;
- shp->shm_flags = (shp->shm_flags & ~S_IRWXUGO)
+ shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
| (setbuf.mode & S_IRWXUGO);
shp->shm_ctim = get_seconds();
break;
@@ -769,7 +776,7 @@ invalid:
BUG();
shp->shm_nattch--;
if(shp->shm_nattch == 0 &&
- shp->shm_flags & SHM_DEST)
+ shp->shm_perm.mode & SHM_DEST)
shm_destroy (shp);
else
shm_unlock(shp);
@@ -894,7 +901,7 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
return seq_printf(s, format,
shp->shm_perm.key,
shp->id,
- shp->shm_flags,
+ shp->shm_perm.mode,
shp->shm_segsz,
shp->shm_cprid,
shp->shm_lprid,
diff --git a/ipc/util.c b/ipc/util.c
index 23f1cec150c..38b9a0af3bd 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -20,6 +20,7 @@
#include <linux/smp_lock.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/capability.h>
#include <linux/highuid.h>
#include <linux/security.h>
#include <linux/rcupdate.h>
diff --git a/kernel/Makefile b/kernel/Makefile
index 4f5a1453093..355126606d1 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -7,8 +7,10 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o intermodule.o extable.o params.o posix-timers.o \
- kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o
+ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
+ hrtimer.o
+obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
obj-$(CONFIG_FUTEX) += futex.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
@@ -29,7 +31,6 @@ obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_SYSFS) += ksysfs.o
obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 6312d6bd43e..065d8b4e51e 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -47,6 +47,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/acct.h>
+#include <linux/capability.h>
#include <linux/file.h>
#include <linux/tty.h>
#include <linux/security.h>
@@ -427,6 +428,7 @@ static void do_acct_process(long exitcode, struct file *file)
u64 elapsed;
u64 run_time;
struct timespec uptime;
+ unsigned long jiffies;
/*
* First check to see if there is enough free_space to continue
@@ -467,12 +469,12 @@ static void do_acct_process(long exitcode, struct file *file)
#endif
do_div(elapsed, AHZ);
ac.ac_btime = xtime.tv_sec - elapsed;
- ac.ac_utime = encode_comp_t(jiffies_to_AHZ(
- current->signal->utime +
- current->group_leader->utime));
- ac.ac_stime = encode_comp_t(jiffies_to_AHZ(
- current->signal->stime +
- current->group_leader->stime));
+ jiffies = cputime_to_jiffies(cputime_add(current->group_leader->utime,
+ current->signal->utime));
+ ac.ac_utime = encode_comp_t(jiffies_to_AHZ(jiffies));
+ jiffies = cputime_to_jiffies(cputime_add(current->group_leader->stime,
+ current->signal->stime));
+ ac.ac_stime = encode_comp_t(jiffies_to_AHZ(jiffies));
/* we really need to bite the bullet and change layout */
ac.ac_uid = current->uid;
ac.ac_gid = current->gid;
@@ -580,7 +582,8 @@ void acct_process(long exitcode)
void acct_update_integrals(struct task_struct *tsk)
{
if (likely(tsk->mm)) {
- long delta = tsk->stime - tsk->acct_stimexpd;
+ long delta =
+ cputime_to_jiffies(tsk->stime) - tsk->acct_stimexpd;
if (delta == 0)
return;
diff --git a/kernel/audit.c b/kernel/audit.c
index 32fa03ad198..d13ab7d2d89 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -267,7 +267,7 @@ static int audit_set_failure(int state, uid_t loginuid)
return old;
}
-int kauditd_thread(void *dummy)
+static int kauditd_thread(void *dummy)
{
struct sk_buff *skb;
diff --git a/kernel/capability.c b/kernel/capability.c
index 8986a37a67e..bfa3c92e16f 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -7,6 +7,7 @@
* 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net>
*/
+#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/security.h>
diff --git a/kernel/compat.c b/kernel/compat.c
index 102296e21ea..256e5d9f064 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -514,6 +514,24 @@ static int put_compat_itimerspec(struct compat_itimerspec __user *dst,
return 0;
}
+long compat_sys_timer_create(clockid_t which_clock,
+ struct compat_sigevent __user *timer_event_spec,
+ timer_t __user *created_timer_id)
+{
+ struct sigevent __user *event = NULL;
+
+ if (timer_event_spec) {
+ struct sigevent kevent;
+
+ event = compat_alloc_user_space(sizeof(*event));
+ if (get_compat_sigevent(&kevent, timer_event_spec) ||
+ copy_to_user(event, &kevent, sizeof(*event)))
+ return -EFAULT;
+ }
+
+ return sys_timer_create(which_clock, event, created_timer_id);
+}
+
long compat_sys_timer_settime(timer_t timer_id, int flags,
struct compat_itimerspec __user *new,
struct compat_itimerspec __user *old)
@@ -649,8 +667,6 @@ int get_compat_sigevent(struct sigevent *event,
? -EFAULT : 0;
}
-/* timer_create is architecture specific because it needs sigevent conversion */
-
long compat_get_bitmap(unsigned long *mask, compat_ulong_t __user *umask,
unsigned long bitmap_size)
{
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 7430640f981..2a75e44e1a4 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -39,6 +39,7 @@
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
+#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -54,7 +55,23 @@
#include <asm/atomic.h>
#include <asm/semaphore.h>
-#define CPUSET_SUPER_MAGIC 0x27e0eb
+#define CPUSET_SUPER_MAGIC 0x27e0eb
+
+/*
+ * Tracks how many cpusets are currently defined in system.
+ * When there is only one cpuset (the root cpuset) we can
+ * short circuit some hooks.
+ */
+int number_of_cpusets __read_mostly;
+
+/* See "Frequency meter" comments, below. */
+
+struct fmeter {
+ int cnt; /* unprocessed events count */
+ int val; /* most recent output value */
+ time_t time; /* clock (secs) when val computed */
+ spinlock_t lock; /* guards read or write of above */
+};
struct cpuset {
unsigned long flags; /* "unsigned long" so bitops work */
@@ -80,13 +97,16 @@ struct cpuset {
* Copy of global cpuset_mems_generation as of the most
* recent time this cpuset changed its mems_allowed.
*/
- int mems_generation;
+ int mems_generation;
+
+ struct fmeter fmeter; /* memory_pressure filter */
};
/* bits in struct cpuset flags field */
typedef enum {
CS_CPU_EXCLUSIVE,
CS_MEM_EXCLUSIVE,
+ CS_MEMORY_MIGRATE,
CS_REMOVED,
CS_NOTIFY_ON_RELEASE
} cpuset_flagbits_t;
@@ -112,6 +132,11 @@ static inline int notify_on_release(const struct cpuset *cs)
return !!test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
}
+static inline int is_memory_migrate(const struct cpuset *cs)
+{
+ return !!test_bit(CS_MEMORY_MIGRATE, &cs->flags);
+}
+
/*
* Increment this atomic integer everytime any cpuset changes its
* mems_allowed value. Users of cpusets can track this generation
@@ -137,13 +162,10 @@ static struct cpuset top_cpuset = {
.count = ATOMIC_INIT(0),
.sibling = LIST_HEAD_INIT(top_cpuset.sibling),
.children = LIST_HEAD_INIT(top_cpuset.children),
- .parent = NULL,
- .dentry = NULL,
- .mems_generation = 0,
};
static struct vfsmount *cpuset_mount;
-static struct super_block *cpuset_sb = NULL;
+static struct super_block *cpuset_sb;
/*
* We have two global cpuset semaphores below. They can nest.
@@ -227,6 +249,11 @@ static struct super_block *cpuset_sb = NULL;
* a tasks cpuset pointer we use task_lock(), which acts on a spinlock
* (task->alloc_lock) already in the task_struct routinely used for
* such matters.
+ *
+ * P.S. One more locking exception. RCU is used to guard the
+ * update of a tasks cpuset pointer by attach_task() and the
+ * access of task->cpuset->mems_generation via that pointer in
+ * the routine cpuset_update_task_memory_state().
*/
static DECLARE_MUTEX(manage_sem);
@@ -304,7 +331,7 @@ static void cpuset_d_remove_dir(struct dentry *dentry)
spin_lock(&dcache_lock);
node = dentry->d_subdirs.next;
while (node != &dentry->d_subdirs) {
- struct dentry *d = list_entry(node, struct dentry, d_child);
+ struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
list_del_init(node);
if (d->d_inode) {
d = dget_locked(d);
@@ -316,7 +343,7 @@ static void cpuset_d_remove_dir(struct dentry *dentry)
}
node = dentry->d_subdirs.next;
}
- list_del_init(&dentry->d_child);
+ list_del_init(&dentry->d_u.d_child);
spin_unlock(&dcache_lock);
remove_dir(dentry);
}
@@ -570,20 +597,43 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
BUG_ON(!nodes_intersects(*pmask, node_online_map));
}
-/*
- * Refresh current tasks mems_allowed and mems_generation from current
- * tasks cpuset.
+/**
+ * cpuset_update_task_memory_state - update task memory placement
*
- * Call without callback_sem or task_lock() held. May be called with
- * or without manage_sem held. Will acquire task_lock() and might
- * acquire callback_sem during call.
+ * If the current tasks cpusets mems_allowed changed behind our
+ * backs, update current->mems_allowed, mems_generation and task NUMA
+ * mempolicy to the new value.
+ *
+ * Task mempolicy is updated by rebinding it relative to the
+ * current->cpuset if a task has its memory placement changed.
+ * Do not call this routine if in_interrupt().
*
- * The task_lock() is required to dereference current->cpuset safely.
- * Without it, we could pick up the pointer value of current->cpuset
- * in one instruction, and then attach_task could give us a different
- * cpuset, and then the cpuset we had could be removed and freed,
- * and then on our next instruction, we could dereference a no longer
- * valid cpuset pointer to get its mems_generation field.
+ * Call without callback_sem or task_lock() held. May be called
+ * with or without manage_sem held. Doesn't need task_lock to guard
+ * against another task changing a non-NULL cpuset pointer to NULL,
+ * as that is only done by a task on itself, and if the current task
+ * is here, it is not simultaneously in the exit code NULL'ing its
+ * cpuset pointer. This routine also might acquire callback_sem and
+ * current->mm->mmap_sem during call.
+ *
+ * Reading current->cpuset->mems_generation doesn't need task_lock
+ * to guard the current->cpuset derefence, because it is guarded
+ * from concurrent freeing of current->cpuset by attach_task(),
+ * using RCU.
+ *
+ * The rcu_dereference() is technically probably not needed,
+ * as I don't actually mind if I see a new cpuset pointer but
+ * an old value of mems_generation. However this really only
+ * matters on alpha systems using cpusets heavily. If I dropped
+ * that rcu_dereference(), it would save them a memory barrier.
+ * For all other arch's, rcu_dereference is a no-op anyway, and for
+ * alpha systems not using cpusets, another planned optimization,
+ * avoiding the rcu critical section for tasks in the root cpuset
+ * which is statically allocated, so can't vanish, will make this
+ * irrelevant. Better to use RCU as intended, than to engage in
+ * some cute trick to save a memory barrier that is impossible to
+ * test, for alpha systems using cpusets heavily, which might not
+ * even exist.
*
* This routine is needed to update the per-task mems_allowed data,
* within the tasks context, when it is trying to allocate memory
@@ -591,27 +641,31 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
* task has been modifying its cpuset.
*/
-static void refresh_mems(void)
+void cpuset_update_task_memory_state()
{
int my_cpusets_mem_gen;
+ struct task_struct *tsk = current;
+ struct cpuset *cs;
- task_lock(current);
- my_cpusets_mem_gen = current->cpuset->mems_generation;
- task_unlock(current);
-
- if (current->cpuset_mems_generation != my_cpusets_mem_gen) {
- struct cpuset *cs;
- nodemask_t oldmem = current->mems_allowed;
+ if (tsk->cpuset == &top_cpuset) {
+ /* Don't need rcu for top_cpuset. It's never freed. */
+ my_cpusets_mem_gen = top_cpuset.mems_generation;
+ } else {
+ rcu_read_lock();
+ cs = rcu_dereference(tsk->cpuset);
+ my_cpusets_mem_gen = cs->mems_generation;
+ rcu_read_unlock();
+ }
+ if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
down(&callback_sem);
- task_lock(current);
- cs = current->cpuset;
- guarantee_online_mems(cs, &current->mems_allowed);
- current->cpuset_mems_generation = cs->mems_generation;
- task_unlock(current);
+ task_lock(tsk);
+ cs = tsk->cpuset; /* Maybe changed when task not locked */
+ guarantee_online_mems(cs, &tsk->mems_allowed);
+ tsk->cpuset_mems_generation = cs->mems_generation;
+ task_unlock(tsk);
up(&callback_sem);
- if (!nodes_equal(oldmem, current->mems_allowed))
- numa_policy_rebind(&oldmem, &current->mems_allowed);
+ mpol_rebind_task(tsk, &tsk->mems_allowed);
}
}
@@ -766,36 +820,150 @@ static int update_cpumask(struct cpuset *cs, char *buf)
}
/*
+ * Handle user request to change the 'mems' memory placement
+ * of a cpuset. Needs to validate the request, update the
+ * cpusets mems_allowed and mems_generation, and for each
+ * task in the cpuset, rebind any vma mempolicies and if
+ * the cpuset is marked 'memory_migrate', migrate the tasks
+ * pages to the new memory.
+ *
* Call with manage_sem held. May take callback_sem during call.
+ * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
+ * lock each such tasks mm->mmap_sem, scan its vma's and rebind
+ * their mempolicies to the cpusets new mems_allowed.
*/
static int update_nodemask(struct cpuset *cs, char *buf)
{
struct cpuset trialcs;
+ nodemask_t oldmem;
+ struct task_struct *g, *p;
+ struct mm_struct **mmarray;
+ int i, n, ntasks;
+ int migrate;
+ int fudge;
int retval;
trialcs = *cs;
retval = nodelist_parse(buf, trialcs.mems_allowed);
if (retval < 0)
- return retval;
+ goto done;
nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map);
- if (nodes_empty(trialcs.mems_allowed))
- return -ENOSPC;
+ oldmem = cs->mems_allowed;
+ if (nodes_equal(oldmem, trialcs.mems_allowed)) {
+ retval = 0; /* Too easy - nothing to do */
+ goto done;
+ }
+ if (nodes_empty(trialcs.mems_allowed)) {
+ retval = -ENOSPC;
+ goto done;
+ }
retval = validate_change(cs, &trialcs);
- if (retval == 0) {
- down(&callback_sem);
- cs->mems_allowed = trialcs.mems_allowed;
- atomic_inc(&cpuset_mems_generation);
- cs->mems_generation = atomic_read(&cpuset_mems_generation);
- up(&callback_sem);
+ if (retval < 0)
+ goto done;
+
+ down(&callback_sem);
+ cs->mems_allowed = trialcs.mems_allowed;
+ atomic_inc(&cpuset_mems_generation);
+ cs->mems_generation = atomic_read(&cpuset_mems_generation);
+ up(&callback_sem);
+
+ set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */
+
+ fudge = 10; /* spare mmarray[] slots */
+ fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
+ retval = -ENOMEM;
+
+ /*
+ * Allocate mmarray[] to hold mm reference for each task
+ * in cpuset cs. Can't kmalloc GFP_KERNEL while holding
+ * tasklist_lock. We could use GFP_ATOMIC, but with a
+ * few more lines of code, we can retry until we get a big
+ * enough mmarray[] w/o using GFP_ATOMIC.
+ */
+ while (1) {
+ ntasks = atomic_read(&cs->count); /* guess */
+ ntasks += fudge;
+ mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
+ if (!mmarray)
+ goto done;
+ write_lock_irq(&tasklist_lock); /* block fork */
+ if (atomic_read(&cs->count) <= ntasks)
+ break; /* got enough */
+ write_unlock_irq(&tasklist_lock); /* try again */
+ kfree(mmarray);
}
+
+ n = 0;
+
+ /* Load up mmarray[] with mm reference for each task in cpuset. */
+ do_each_thread(g, p) {
+ struct mm_struct *mm;
+
+ if (n >= ntasks) {
+ printk(KERN_WARNING
+ "Cpuset mempolicy rebind incomplete.\n");
+ continue;
+ }
+ if (p->cpuset != cs)
+ continue;
+ mm = get_task_mm(p);
+ if (!mm)
+ continue;
+ mmarray[n++] = mm;
+ } while_each_thread(g, p);
+ write_unlock_irq(&tasklist_lock);
+
+ /*
+ * Now that we've dropped the tasklist spinlock, we can
+ * rebind the vma mempolicies of each mm in mmarray[] to their
+ * new cpuset, and release that mm. The mpol_rebind_mm()
+ * call takes mmap_sem, which we couldn't take while holding
+ * tasklist_lock. Forks can happen again now - the mpol_copy()
+ * cpuset_being_rebound check will catch such forks, and rebind
+ * their vma mempolicies too. Because we still hold the global
+ * cpuset manage_sem, we know that no other rebind effort will
+ * be contending for the global variable cpuset_being_rebound.
+ * It's ok if we rebind the same mm twice; mpol_rebind_mm()
+ * is idempotent. Also migrate pages in each mm to new nodes.
+ */
+ migrate = is_memory_migrate(cs);
+ for (i = 0; i < n; i++) {
+ struct mm_struct *mm = mmarray[i];
+
+ mpol_rebind_mm(mm, &cs->mems_allowed);
+ if (migrate) {
+ do_migrate_pages(mm, &oldmem, &cs->mems_allowed,
+ MPOL_MF_MOVE_ALL);
+ }
+ mmput(mm);
+ }
+
+ /* We're done rebinding vma's to this cpusets new mems_allowed. */
+ kfree(mmarray);
+ set_cpuset_being_rebound(NULL);
+ retval = 0;
+done:
return retval;
}
/*
+ * Call with manage_sem held.
+ */
+
+static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
+{
+ if (simple_strtoul(buf, NULL, 10) != 0)
+ cpuset_memory_pressure_enabled = 1;
+ else
+ cpuset_memory_pressure_enabled = 0;
+ return 0;
+}
+
+/*
* update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
- * CS_NOTIFY_ON_RELEASE)
+ * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE)
* cs: the cpuset to update
* buf: the buffer where we read the 0 or 1
*
@@ -834,6 +1002,104 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
}
/*
+ * Frequency meter - How fast is some event occuring?
+ *
+ * These routines manage a digitally filtered, constant time based,
+ * event frequency meter. There are four routines:
+ * fmeter_init() - initialize a frequency meter.
+ * fmeter_markevent() - called each time the event happens.
+ * fmeter_getrate() - returns the recent rate of such events.
+ * fmeter_update() - internal routine used to update fmeter.
+ *
+ * A common data structure is passed to each of these routines,
+ * which is used to keep track of the state required to manage the
+ * frequency meter and its digital filter.
+ *
+ * The filter works on the number of events marked per unit time.
+ * The filter is single-pole low-pass recursive (IIR). The time unit
+ * is 1 second. Arithmetic is done using 32-bit integers scaled to
+ * simulate 3 decimal digits of precision (multiplied by 1000).
+ *
+ * With an FM_COEF of 933, and a time base of 1 second, the filter
+ * has a half-life of 10 seconds, meaning that if the events quit
+ * happening, then the rate returned from the fmeter_getrate()
+ * will be cut in half each 10 seconds, until it converges to zero.
+ *
+ * It is not worth doing a real infinitely recursive filter. If more
+ * than FM_MAXTICKS ticks have elapsed since the last filter event,
+ * just compute FM_MAXTICKS ticks worth, by which point the level
+ * will be stable.
+ *
+ * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
+ * arithmetic overflow in the fmeter_update() routine.
+ *
+ * Given the simple 32 bit integer arithmetic used, this meter works
+ * best for reporting rates between one per millisecond (msec) and
+ * one per 32 (approx) seconds. At constant rates faster than one
+ * per msec it maxes out at values just under 1,000,000. At constant
+ * rates between one per msec, and one per second it will stabilize
+ * to a value N*1000, where N is the rate of events per second.
+ * At constant rates between one per second and one per 32 seconds,
+ * it will be choppy, moving up on the seconds that have an event,
+ * and then decaying until the next event. At rates slower than
+ * about one in 32 seconds, it decays all the way back to zero between
+ * each event.
+ */
+
+#define FM_COEF 933 /* coefficient for half-life of 10 secs */
+#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
+#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
+#define FM_SCALE 1000 /* faux fixed point scale */
+
+/* Initialize a frequency meter */
+static void fmeter_init(struct fmeter *fmp)
+{
+ fmp->cnt = 0;
+ fmp->val = 0;
+ fmp->time = 0;
+ spin_lock_init(&fmp->lock);
+}
+
+/* Internal meter update - process cnt events and update value */
+static void fmeter_update(struct fmeter *fmp)
+{
+ time_t now = get_seconds();
+ time_t ticks = now - fmp->time;
+
+ if (ticks == 0)
+ return;
+
+ ticks = min(FM_MAXTICKS, ticks);
+ while (ticks-- > 0)
+ fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
+ fmp->time = now;
+
+ fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
+ fmp->cnt = 0;
+}
+
+/* Process any previous ticks, then bump cnt by one (times scale). */
+static void fmeter_markevent(struct fmeter *fmp)
+{
+ spin_lock(&fmp->lock);
+ fmeter_update(fmp);
+ fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
+ spin_unlock(&fmp->lock);
+}
+
+/* Process any previous ticks, then return current value. */
+static int fmeter_getrate(struct fmeter *fmp)
+{
+ int val;
+
+ spin_lock(&fmp->lock);
+ fmeter_update(fmp);
+ val = fmp->val;
+ spin_unlock(&fmp->lock);
+ return val;
+}
+
+/*
* Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly
* writing the path of the old cpuset in 'ppathbuf' if it needs to be
* notified on release.
@@ -848,6 +1114,8 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
struct task_struct *tsk;
struct cpuset *oldcs;
cpumask_t cpus;
+ nodemask_t from, to;
+ struct mm_struct *mm;
if (sscanf(pidbuf, "%d", &pid) != 1)
return -EIO;
@@ -887,14 +1155,27 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
return -ESRCH;
}
atomic_inc(&cs->count);
- tsk->cpuset = cs;
+ rcu_assign_pointer(tsk->cpuset, cs);
task_unlock(tsk);
guarantee_online_cpus(cs, &cpus);
set_cpus_allowed(tsk, cpus);
+ from = oldcs->mems_allowed;
+ to = cs->mems_allowed;
+
up(&callback_sem);
+
+ mm = get_task_mm(tsk);
+ if (mm) {
+ mpol_rebind_mm(mm, &to);
+ mmput(mm);
+ }
+
+ if (is_memory_migrate(cs))
+ do_migrate_pages(tsk->mm, &from, &to, MPOL_MF_MOVE_ALL);
put_task_struct(tsk);
+ synchronize_rcu();
if (atomic_dec_and_test(&oldcs->count))
check_for_release(oldcs, ppathbuf);
return 0;
@@ -905,11 +1186,14 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
typedef enum {
FILE_ROOT,
FILE_DIR,
+ FILE_MEMORY_MIGRATE,
FILE_CPULIST,
FILE_MEMLIST,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_NOTIFY_ON_RELEASE,
+ FILE_MEMORY_PRESSURE_ENABLED,
+ FILE_MEMORY_PRESSURE,
FILE_TASKLIST,
} cpuset_filetype_t;
@@ -960,6 +1244,15 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
case FILE_NOTIFY_ON_RELEASE:
retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer);
break;
+ case FILE_MEMORY_MIGRATE:
+ retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
+ break;
+ case FILE_MEMORY_PRESSURE_ENABLED:
+ retval = update_memory_pressure_enabled(cs, buffer);
+ break;
+ case FILE_MEMORY_PRESSURE:
+ retval = -EACCES;
+ break;
case FILE_TASKLIST:
retval = attach_task(cs, buffer, &pathbuf);
break;
@@ -1060,6 +1353,15 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
case FILE_NOTIFY_ON_RELEASE:
*s++ = notify_on_release(cs) ? '1' : '0';
break;
+ case FILE_MEMORY_MIGRATE:
+ *s++ = is_memory_migrate(cs) ? '1' : '0';
+ break;
+ case FILE_MEMORY_PRESSURE_ENABLED:
+ *s++ = cpuset_memory_pressure_enabled ? '1' : '0';
+ break;
+ case FILE_MEMORY_PRESSURE:
+ s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter));
+ break;
default:
retval = -EINVAL;
goto out;
@@ -1178,7 +1480,7 @@ static int cpuset_create_file(struct dentry *dentry, int mode)
/*
* cpuset_create_dir - create a directory for an object.
- * cs: the cpuset we create the directory for.
+ * cs: the cpuset we create the directory for.
* It must have a valid ->parent field
* And we are going to fill its ->dentry field.
* name: The name to give to the cpuset directory. Will be copied.
@@ -1211,7 +1513,7 @@ static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
struct dentry *dentry;
int error;
- down(&dir->d_inode->i_sem);
+ mutex_lock(&dir->d_inode->i_mutex);
dentry = cpuset_get_dentry(dir, cft->name);
if (!IS_ERR(dentry)) {
error = cpuset_create_file(dentry, 0644 | S_IFREG);
@@ -1220,7 +1522,7 @@ static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
dput(dentry);
} else
error = PTR_ERR(dentry);
- up(&dir->d_inode->i_sem);
+ mutex_unlock(&dir->d_inode->i_mutex);
return error;
}
@@ -1408,6 +1710,21 @@ static struct cftype cft_notify_on_release = {
.private = FILE_NOTIFY_ON_RELEASE,
};
+static struct cftype cft_memory_migrate = {
+ .name = "memory_migrate",
+ .private = FILE_MEMORY_MIGRATE,
+};
+
+static struct cftype cft_memory_pressure_enabled = {
+ .name = "memory_pressure_enabled",
+ .private = FILE_MEMORY_PRESSURE_ENABLED,
+};
+
+static struct cftype cft_memory_pressure = {
+ .name = "memory_pressure",
+ .private = FILE_MEMORY_PRESSURE,
+};
+
static int cpuset_populate_dir(struct dentry *cs_dentry)
{
int err;
@@ -1422,6 +1739,10 @@ static int cpuset_populate_dir(struct dentry *cs_dentry)
return err;
if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0)
return err;
+ if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0)
+ return err;
+ if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0)
+ return err;
if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0)
return err;
return 0;
@@ -1446,7 +1767,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
return -ENOMEM;
down(&manage_sem);
- refresh_mems();
+ cpuset_update_task_memory_state();
cs->flags = 0;
if (notify_on_release(parent))
set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
@@ -1457,11 +1778,13 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
INIT_LIST_HEAD(&cs->children);
atomic_inc(&cpuset_mems_generation);
cs->mems_generation = atomic_read(&cpuset_mems_generation);
+ fmeter_init(&cs->fmeter);
cs->parent = parent;
down(&callback_sem);
list_add(&cs->sibling, &cs->parent->children);
+ number_of_cpusets++;
up(&callback_sem);
err = cpuset_create_dir(cs, name, mode);
@@ -1470,7 +1793,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
/*
* Release manage_sem before cpuset_populate_dir() because it
- * will down() this new directory's i_sem and if we race with
+ * will down() this new directory's i_mutex and if we race with
* another mkdir, we might deadlock.
*/
up(&manage_sem);
@@ -1489,7 +1812,7 @@ static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
struct cpuset *c_parent = dentry->d_parent->d_fsdata;
- /* the vfs holds inode->i_sem already */
+ /* the vfs holds inode->i_mutex already */
return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
}
@@ -1500,10 +1823,10 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
struct cpuset *parent;
char *pathbuf = NULL;
- /* the vfs holds both inode->i_sem already */
+ /* the vfs holds both inode->i_mutex already */
down(&manage_sem);
- refresh_mems();
+ cpuset_update_task_memory_state();
if (atomic_read(&cs->count) > 0) {
up(&manage_sem);
return -EBUSY;
@@ -1524,6 +1847,7 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
spin_unlock(&d->d_lock);
cpuset_d_remove_dir(d);
dput(d);
+ number_of_cpusets--;
up(&callback_sem);
if (list_empty(&parent->children))
check_for_release(parent, &pathbuf);
@@ -1532,6 +1856,21 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
return 0;
}
+/*
+ * cpuset_init_early - just enough so that the calls to
+ * cpuset_update_task_memory_state() in early init code
+ * are harmless.
+ */
+
+int __init cpuset_init_early(void)
+{
+ struct task_struct *tsk = current;
+
+ tsk->cpuset = &top_cpuset;
+ tsk->cpuset->mems_generation = atomic_read(&cpuset_mems_generation);
+ return 0;
+}
+
/**
* cpuset_init - initialize cpusets at system boot
*
@@ -1546,6 +1885,7 @@ int __init cpuset_init(void)
top_cpuset.cpus_allowed = CPU_MASK_ALL;
top_cpuset.mems_allowed = NODE_MASK_ALL;
+ fmeter_init(&top_cpuset.fmeter);
atomic_inc(&cpuset_mems_generation);
top_cpuset.mems_generation = atomic_read(&cpuset_mems_generation);
@@ -1566,7 +1906,11 @@ int __init cpuset_init(void)
root->d_inode->i_nlink++;
top_cpuset.dentry = root;
root->d_inode->i_op = &cpuset_dir_inode_operations;
+ number_of_cpusets = 1;
err = cpuset_populate_dir(root);
+ /* memory_pressure_enabled is in root cpuset only */
+ if (err == 0)
+ err = cpuset_add_file(root, &cft_memory_pressure_enabled);
out:
return err;
}
@@ -1632,15 +1976,13 @@ void cpuset_fork(struct task_struct *child)
*
* We don't need to task_lock() this reference to tsk->cpuset,
* because tsk is already marked PF_EXITING, so attach_task() won't
- * mess with it.
+ * mess with it, or task is a failed fork, never visible to attach_task.
**/
void cpuset_exit(struct task_struct *tsk)
{
struct cpuset *cs;
- BUG_ON(!(tsk->flags & PF_EXITING));
-
cs = tsk->cpuset;
tsk->cpuset = NULL;
@@ -1667,14 +2009,14 @@ void cpuset_exit(struct task_struct *tsk)
* tasks cpuset.
**/
-cpumask_t cpuset_cpus_allowed(const struct task_struct *tsk)
+cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
{
cpumask_t mask;
down(&callback_sem);
- task_lock((struct task_struct *)tsk);
+ task_lock(tsk);
guarantee_online_cpus(tsk->cpuset, &mask);
- task_unlock((struct task_struct *)tsk);
+ task_unlock(tsk);
up(&callback_sem);
return mask;
@@ -1686,43 +2028,26 @@ void cpuset_init_current_mems_allowed(void)
}
/**
- * cpuset_update_current_mems_allowed - update mems parameters to new values
- *
- * If the current tasks cpusets mems_allowed changed behind our backs,
- * update current->mems_allowed and mems_generation to the new value.
- * Do not call this routine if in_interrupt().
+ * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
+ * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
*
- * Call without callback_sem or task_lock() held. May be called
- * with or without manage_sem held. Unless exiting, it will acquire
- * task_lock(). Also might acquire callback_sem during call to
- * refresh_mems().
- */
+ * Description: Returns the nodemask_t mems_allowed of the cpuset
+ * attached to the specified @tsk. Guaranteed to return some non-empty
+ * subset of node_online_map, even if this means going outside the
+ * tasks cpuset.
+ **/
-void cpuset_update_current_mems_allowed(void)
+nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
{
- struct cpuset *cs;
- int need_to_refresh = 0;
+ nodemask_t mask;
- task_lock(current);
- cs = current->cpuset;
- if (!cs)
- goto done;
- if (current->cpuset_mems_generation != cs->mems_generation)
- need_to_refresh = 1;
-done:
- task_unlock(current);
- if (need_to_refresh)
- refresh_mems();
-}
+ down(&callback_sem);
+ task_lock(tsk);
+ guarantee_online_mems(tsk->cpuset, &mask);
+ task_unlock(tsk);
+ up(&callback_sem);
-/**
- * cpuset_restrict_to_mems_allowed - limit nodes to current mems_allowed
- * @nodes: pointer to a node bitmap that is and-ed with mems_allowed
- */
-void cpuset_restrict_to_mems_allowed(unsigned long *nodes)
-{
- bitmap_and(nodes, nodes, nodes_addr(current->mems_allowed),
- MAX_NUMNODES);
+ return mask;
}
/**
@@ -1795,7 +2120,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
* GFP_USER - only nodes in current tasks mems allowed ok.
**/
-int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
int node; /* node that zone z is on */
const struct cpuset *cs; /* current cpuset ancestors */
@@ -1867,6 +2192,42 @@ done:
}
/*
+ * Collection of memory_pressure is suppressed unless
+ * this flag is enabled by writing "1" to the special
+ * cpuset file 'memory_pressure_enabled' in the root cpuset.
+ */
+
+int cpuset_memory_pressure_enabled __read_mostly;
+
+/**
+ * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
+ *
+ * Keep a running average of the rate of synchronous (direct)
+ * page reclaim efforts initiated by tasks in each cpuset.
+ *
+ * This represents the rate at which some task in the cpuset
+ * ran low on memory on all nodes it was allowed to use, and
+ * had to enter the kernels page reclaim code in an effort to
+ * create more free memory by tossing clean pages or swapping
+ * or writing dirty pages.
+ *
+ * Display to user space in the per-cpuset read-only file
+ * "memory_pressure". Value displayed is an integer
+ * representing the recent rate of entry into the synchronous
+ * (direct) page reclaim by any task attached to the cpuset.
+ **/
+
+void __cpuset_memory_pressure_bump(void)
+{
+ struct cpuset *cs;
+
+ task_lock(current);
+ cs = current->cpuset;
+ fmeter_markevent(&cs->fmeter);
+ task_unlock(current);
+}
+
+/*
* proc_cpuset_show()
* - Print tasks cpuset path into seq_file.
* - Used for /proc/<pid>/cpuset.
diff --git a/kernel/exit.c b/kernel/exit.c
index ee515683b92..f8e609ff189 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
#include <linux/module.h>
+#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/tty.h>
@@ -29,6 +30,7 @@
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/cn_proc.h>
+#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -72,7 +74,6 @@ repeat:
__ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
__exit_signal(p);
- __exit_sighand(p);
/*
* Note that the fastpath in sys_times depends on __exit_signal having
* updated the counters before a task is removed from the tasklist of
@@ -258,7 +259,7 @@ static inline void reparent_to_init(void)
void __set_special_pids(pid_t session, pid_t pgrp)
{
- struct task_struct *curr = current;
+ struct task_struct *curr = current->group_leader;
if (curr->signal->session != session) {
detach_pid(curr, PIDTYPE_SID);
@@ -842,7 +843,7 @@ fastcall NORET_TYPE void do_exit(long code)
}
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
- del_timer_sync(&tsk->signal->real_timer);
+ hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
acct_process(code);
}
@@ -870,6 +871,10 @@ fastcall NORET_TYPE void do_exit(long code)
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
+ /*
+ * If DEBUG_MUTEXES is on, make sure we are holding no locks:
+ */
+ mutex_debug_check_no_locks_held(tsk);
/* PF_DEAD causes final put_task_struct after we schedule. */
preempt_disable();
@@ -926,7 +931,6 @@ do_group_exit(int exit_code)
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
- sig->flags = SIGNAL_GROUP_EXIT;
sig->group_exit_code = exit_code;
zap_other_threads(current);
}
@@ -1068,6 +1072,9 @@ static int wait_task_zombie(task_t *p, int noreap,
}
if (likely(p->real_parent == p->parent) && likely(p->signal)) {
+ struct signal_struct *psig;
+ struct signal_struct *sig;
+
/*
* The resource counters for the group leader are in its
* own task_struct. Those for dead threads in the group
@@ -1084,24 +1091,26 @@ static int wait_task_zombie(task_t *p, int noreap,
* here reaping other children at the same time.
*/
spin_lock_irq(&p->parent->sighand->siglock);
- p->parent->signal->cutime =
- cputime_add(p->parent->signal->cutime,
+ psig = p->parent->signal;
+ sig = p->signal;
+ psig->cutime =
+ cputime_add(psig->cutime,
cputime_add(p->utime,
- cputime_add(p->signal->utime,
- p->signal->cutime)));
- p->parent->signal->cstime =
- cputime_add(p->parent->signal->cstime,
+ cputime_add(sig->utime,
+ sig->cutime)));
+ psig->cstime =
+ cputime_add(psig->cstime,
cputime_add(p->stime,
- cputime_add(p->signal->stime,
- p->signal->cstime)));
- p->parent->signal->cmin_flt +=
- p->min_flt + p->signal->min_flt + p->signal->cmin_flt;
- p->parent->signal->cmaj_flt +=
- p->maj_flt + p->signal->maj_flt + p->signal->cmaj_flt;
- p->parent->signal->cnvcsw +=
- p->nvcsw + p->signal->nvcsw + p->signal->cnvcsw;
- p->parent->signal->cnivcsw +=
- p->nivcsw + p->signal->nivcsw + p->signal->cnivcsw;
+ cputime_add(sig->stime,
+ sig->cstime)));
+ psig->cmin_flt +=
+ p->min_flt + sig->min_flt + sig->cmin_flt;
+ psig->cmaj_flt +=
+ p->maj_flt + sig->maj_flt + sig->cmaj_flt;
+ psig->cnvcsw +=
+ p->nvcsw + sig->nvcsw + sig->cnvcsw;
+ psig->cnivcsw +=
+ p->nivcsw + sig->nivcsw + sig->cnivcsw;
spin_unlock_irq(&p->parent->sighand->siglock);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index fb8572a4229..4ae8cfc1c89 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -28,6 +28,7 @@
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/fs.h>
+#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/security.h>
@@ -743,6 +744,14 @@ int unshare_files(void)
EXPORT_SYMBOL(unshare_files);
+void sighand_free_cb(struct rcu_head *rhp)
+{
+ struct sighand_struct *sp;
+
+ sp = container_of(rhp, struct sighand_struct, rcu);
+ kmem_cache_free(sighand_cachep, sp);
+}
+
static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
{
struct sighand_struct *sig;
@@ -752,7 +761,7 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
- tsk->sighand = sig;
+ rcu_assign_pointer(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
spin_lock_init(&sig->siglock);
@@ -793,19 +802,16 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
- sig->it_real_value = sig->it_real_incr = 0;
+ hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC);
+ sig->it_real_incr.tv64 = 0;
sig->real_timer.function = it_real_fn;
- sig->real_timer.data = (unsigned long) tsk;
- init_timer(&sig->real_timer);
+ sig->real_timer.data = tsk;
sig->it_virt_expires = cputime_zero;
sig->it_virt_incr = cputime_zero;
sig->it_prof_expires = cputime_zero;
sig->it_prof_incr = cputime_zero;
- sig->tty = current->signal->tty;
- sig->pgrp = process_group(current);
- sig->session = current->signal->session;
sig->leader = 0; /* session leadership doesn't inherit */
sig->tty_old_pgrp = 0;
@@ -964,15 +970,20 @@ static task_t *copy_process(unsigned long clone_flags,
p->io_context = NULL;
p->io_wait = NULL;
p->audit_context = NULL;
+ cpuset_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_copy(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
- goto bad_fork_cleanup;
+ goto bad_fork_cleanup_cpuset;
}
#endif
+#ifdef CONFIG_DEBUG_MUTEXES
+ p->blocked_on = NULL; /* not blocked yet */
+#endif
+
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
@@ -1127,25 +1138,19 @@ static task_t *copy_process(unsigned long clone_flags,
attach_pid(p, PIDTYPE_PID, p->pid);
attach_pid(p, PIDTYPE_TGID, p->tgid);
if (thread_group_leader(p)) {
+ p->signal->tty = current->signal->tty;
+ p->signal->pgrp = process_group(current);
+ p->signal->session = current->signal->session;
attach_pid(p, PIDTYPE_PGID, process_group(p));
attach_pid(p, PIDTYPE_SID, p->signal->session);
if (p->pid)
__get_cpu_var(process_counts)++;
}
- if (!current->signal->tty && p->signal->tty)
- p->signal->tty = NULL;
-
nr_threads++;
total_forks++;
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
- cpuset_fork(p);
- retval = 0;
-
-fork_out:
- if (retval)
- return ERR_PTR(retval);
return p;
bad_fork_cleanup_namespace:
@@ -1172,7 +1177,9 @@ bad_fork_cleanup_security:
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
mpol_free(p->mempolicy);
+bad_fork_cleanup_cpuset:
#endif
+ cpuset_exit(p);
bad_fork_cleanup:
if (p->binfmt)
module_put(p->binfmt->module);
@@ -1184,7 +1191,8 @@ bad_fork_cleanup_count:
free_uid(p->user);
bad_fork_free:
free_task(p);
- goto fork_out;
+fork_out:
+ return ERR_PTR(retval);
}
struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
@@ -1290,6 +1298,10 @@ long do_fork(unsigned long clone_flags,
return pid;
}
+#ifndef ARCH_MIN_MMSTRUCT_ALIGN
+#define ARCH_MIN_MMSTRUCT_ALIGN 0
+#endif
+
void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
@@ -1308,6 +1320,6 @@ void __init proc_caches_init(void)
sizeof(struct vm_area_struct), 0,
SLAB_PANIC, NULL, NULL);
mm_cachep = kmem_cache_create("mm_struct",
- sizeof(struct mm_struct), 0,
+ sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
}
diff --git a/kernel/futex.c b/kernel/futex.c
index 5e71a6bf6f6..5efa2f97803 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -356,6 +356,13 @@ retry:
if (bh1 != bh2)
spin_unlock(&bh2->lock);
+#ifndef CONFIG_MMU
+ /* we don't get EFAULT from MMU faults if we don't have an MMU,
+ * but we might get them from range checking */
+ ret = op_ret;
+ goto out;
+#endif
+
if (unlikely(op_ret != -EFAULT)) {
ret = op_ret;
goto out;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
new file mode 100644
index 00000000000..04ccab099e8
--- /dev/null
+++ b/kernel/hrtimer.c
@@ -0,0 +1,825 @@
+/*
+ * linux/kernel/hrtimer.c
+ *
+ * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
+ *
+ * High-resolution kernel timers
+ *
+ * In contrast to the low-resolution timeout API implemented in
+ * kernel/timer.c, hrtimers provide finer resolution and accuracy
+ * depending on system configuration and capabilities.
+ *
+ * These timers are currently used for:
+ * - itimers
+ * - POSIX timers
+ * - nanosleep
+ * - precise in-kernel timing
+ *
+ * Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * Credits:
+ * based on kernel/timer.c
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/hrtimer.h>
+#include <linux/notifier.h>
+#include <linux/syscalls.h>
+#include <linux/interrupt.h>
+
+#include <asm/uaccess.h>
+
+/**
+ * ktime_get - get the monotonic time in ktime_t format
+ *
+ * returns the time in ktime_t format
+ */
+static ktime_t ktime_get(void)
+{
+ struct timespec now;
+
+ ktime_get_ts(&now);
+
+ return timespec_to_ktime(now);
+}
+
+/**
+ * ktime_get_real - get the real (wall-) time in ktime_t format
+ *
+ * returns the time in ktime_t format
+ */
+static ktime_t ktime_get_real(void)
+{
+ struct timespec now;
+
+ getnstimeofday(&now);
+
+ return timespec_to_ktime(now);
+}
+
+EXPORT_SYMBOL_GPL(ktime_get_real);
+
+/*
+ * The timer bases:
+ */
+
+#define MAX_HRTIMER_BASES 2
+
+static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) =
+{
+ {
+ .index = CLOCK_REALTIME,
+ .get_time = &ktime_get_real,
+ .resolution = KTIME_REALTIME_RES,
+ },
+ {
+ .index = CLOCK_MONOTONIC,
+ .get_time = &ktime_get,
+ .resolution = KTIME_MONOTONIC_RES,
+ },
+};
+
+/**
+ * ktime_get_ts - get the monotonic clock in timespec format
+ *
+ * @ts: pointer to timespec variable
+ *
+ * The function calculates the monotonic clock from the realtime
+ * clock and the wall_to_monotonic offset and stores the result
+ * in normalized timespec format in the variable pointed to by ts.
+ */
+void ktime_get_ts(struct timespec *ts)
+{
+ struct timespec tomono;
+ unsigned long seq;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ getnstimeofday(ts);
+ tomono = wall_to_monotonic;
+
+ } while (read_seqretry(&xtime_lock, seq));
+
+ set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
+ ts->tv_nsec + tomono.tv_nsec);
+}
+EXPORT_SYMBOL_GPL(ktime_get_ts);
+
+/*
+ * Functions and macros which are different for UP/SMP systems are kept in a
+ * single place
+ */
+#ifdef CONFIG_SMP
+
+#define set_curr_timer(b, t) do { (b)->curr_timer = (t); } while (0)
+
+/*
+ * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
+ * means that all timers which are tied to this base via timer->base are
+ * locked, and the base itself is locked too.
+ *
+ * So __run_timers/migrate_timers can safely modify all timers which could
+ * be found on the lists/queues.
+ *
+ * When the timer's base is locked, and the timer removed from list, it is
+ * possible to set timer->base = NULL and drop the lock: the timer remains
+ * locked.
+ */
+static struct hrtimer_base *lock_hrtimer_base(const struct hrtimer *timer,
+ unsigned long *flags)
+{
+ struct hrtimer_base *base;
+
+ for (;;) {
+ base = timer->base;
+ if (likely(base != NULL)) {
+ spin_lock_irqsave(&base->lock, *flags);
+ if (likely(base == timer->base))
+ return base;
+ /* The timer has migrated to another CPU: */
+ spin_unlock_irqrestore(&base->lock, *flags);
+ }
+ cpu_relax();
+ }
+}
+
+/*
+ * Switch the timer base to the current CPU when possible.
+ */
+static inline struct hrtimer_base *
+switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base)
+{
+ struct hrtimer_base *new_base;
+
+ new_base = &__get_cpu_var(hrtimer_bases[base->index]);
+
+ if (base != new_base) {
+ /*
+ * We are trying to schedule the timer on the local CPU.
+ * However we can't change timer's base while it is running,
+ * so we keep it on the same CPU. No hassle vs. reprogramming
+ * the event source in the high resolution case. The softirq
+ * code will take care of this when the timer function has
+ * completed. There is no conflict as we hold the lock until
+ * the timer is enqueued.
+ */
+ if (unlikely(base->curr_timer == timer))
+ return base;
+
+ /* See the comment in lock_timer_base() */
+ timer->base = NULL;
+ spin_unlock(&base->lock);
+ spin_lock(&new_base->lock);
+ timer->base = new_base;
+ }
+ return new_base;
+}
+
+#else /* CONFIG_SMP */
+
+#define set_curr_timer(b, t) do { } while (0)
+
+static inline struct hrtimer_base *
+lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
+{
+ struct hrtimer_base *base = timer->base;
+
+ spin_lock_irqsave(&base->lock, *flags);
+
+ return base;
+}
+
+#define switch_hrtimer_base(t, b) (b)
+
+#endif /* !CONFIG_SMP */
+
+/*
+ * Functions for the union type storage format of ktime_t which are
+ * too large for inlining:
+ */
+#if BITS_PER_LONG < 64
+# ifndef CONFIG_KTIME_SCALAR
+/**
+ * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
+ *
+ * @kt: addend
+ * @nsec: the scalar nsec value to add
+ *
+ * Returns the sum of kt and nsec in ktime_t format
+ */
+ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
+{
+ ktime_t tmp;
+
+ if (likely(nsec < NSEC_PER_SEC)) {
+ tmp.tv64 = nsec;
+ } else {
+ unsigned long rem = do_div(nsec, NSEC_PER_SEC);
+
+ tmp = ktime_set((long)nsec, rem);
+ }
+
+ return ktime_add(kt, tmp);
+}
+
+#else /* CONFIG_KTIME_SCALAR */
+
+# endif /* !CONFIG_KTIME_SCALAR */
+
+/*
+ * Divide a ktime value by a nanosecond value
+ */
+static unsigned long ktime_divns(const ktime_t kt, nsec_t div)
+{
+ u64 dclc, inc, dns;
+ int sft = 0;
+
+ dclc = dns = ktime_to_ns(kt);
+ inc = div;
+ /* Make sure the divisor is less than 2^32: */
+ while (div >> 32) {
+ sft++;
+ div >>= 1;
+ }
+ dclc >>= sft;
+ do_div(dclc, (unsigned long) div);
+
+ return (unsigned long) dclc;
+}
+
+#else /* BITS_PER_LONG < 64 */
+# define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div))
+#endif /* BITS_PER_LONG >= 64 */
+
+/*
+ * Counterpart to lock_timer_base above:
+ */
+static inline
+void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
+{
+ spin_unlock_irqrestore(&timer->base->lock, *flags);
+}
+
+/**
+ * hrtimer_forward - forward the timer expiry
+ *
+ * @timer: hrtimer to forward
+ * @interval: the interval to forward
+ *
+ * Forward the timer expiry so it will expire in the future.
+ * The number of overruns is added to the overrun field.
+ */
+unsigned long
+hrtimer_forward(struct hrtimer *timer, ktime_t interval)
+{
+ unsigned long orun = 1;
+ ktime_t delta, now;
+
+ now = timer->base->get_time();
+
+ delta = ktime_sub(now, timer->expires);
+
+ if (delta.tv64 < 0)
+ return 0;
+
+ if (interval.tv64 < timer->base->resolution.tv64)
+ interval.tv64 = timer->base->resolution.tv64;
+
+ if (unlikely(delta.tv64 >= interval.tv64)) {
+ nsec_t incr = ktime_to_ns(interval);
+
+ orun = ktime_divns(delta, incr);
+ timer->expires = ktime_add_ns(timer->expires, incr * orun);
+ if (timer->expires.tv64 > now.tv64)
+ return orun;
+ /*
+ * This (and the ktime_add() below) is the
+ * correction for exact:
+ */
+ orun++;
+ }
+ timer->expires = ktime_add(timer->expires, interval);
+
+ return orun;
+}
+
+/*
+ * enqueue_hrtimer - internal function to (re)start a timer
+ *
+ * The timer is inserted in expiry order. Insertion into the
+ * red black tree is O(log(n)). Must hold the base lock.
+ */
+static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
+{
+ struct rb_node **link = &base->active.rb_node;
+ struct rb_node *parent = NULL;
+ struct hrtimer *entry;
+
+ /*
+ * Find the right place in the rbtree:
+ */
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct hrtimer, node);
+ /*
+ * We dont care about collisions. Nodes with
+ * the same expiry time stay together.
+ */
+ if (timer->expires.tv64 < entry->expires.tv64)
+ link = &(*link)->rb_left;
+ else
+ link = &(*link)->rb_right;
+ }
+
+ /*
+ * Insert the timer to the rbtree and check whether it
+ * replaces the first pending timer
+ */
+ rb_link_node(&timer->node, parent, link);
+ rb_insert_color(&timer->node, &base->active);
+
+ timer->state = HRTIMER_PENDING;
+
+ if (!base->first || timer->expires.tv64 <
+ rb_entry(base->first, struct hrtimer, node)->expires.tv64)
+ base->first = &timer->node;
+}
+
+/*
+ * __remove_hrtimer - internal function to remove a timer
+ *
+ * Caller must hold the base lock.
+ */
+static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
+{
+ /*
+ * Remove the timer from the rbtree and replace the
+ * first entry pointer if necessary.
+ */
+ if (base->first == &timer->node)
+ base->first = rb_next(&timer->node);
+ rb_erase(&timer->node, &base->active);
+}
+
+/*
+ * remove hrtimer, called with base lock held
+ */
+static inline int
+remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
+{
+ if (hrtimer_active(timer)) {
+ __remove_hrtimer(timer, base);
+ timer->state = HRTIMER_INACTIVE;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * hrtimer_start - (re)start an relative timer on the current CPU
+ *
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
+ *
+ * Returns:
+ * 0 on success
+ * 1 when the timer was active
+ */
+int
+hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
+{
+ struct hrtimer_base *base, *new_base;
+ unsigned long flags;
+ int ret;
+
+ base = lock_hrtimer_base(timer, &flags);
+
+ /* Remove an active timer from the queue: */
+ ret = remove_hrtimer(timer, base);
+
+ /* Switch the timer base, if necessary: */
+ new_base = switch_hrtimer_base(timer, base);
+
+ if (mode == HRTIMER_REL)
+ tim = ktime_add(tim, new_base->get_time());
+ timer->expires = tim;
+
+ enqueue_hrtimer(timer, new_base);
+
+ unlock_hrtimer_base(timer, &flags);
+
+ return ret;
+}
+
+/**
+ * hrtimer_try_to_cancel - try to deactivate a timer
+ *
+ * @timer: hrtimer to stop
+ *
+ * Returns:
+ * 0 when the timer was not active
+ * 1 when the timer was active
+ * -1 when the timer is currently excuting the callback function and
+ * can not be stopped
+ */
+int hrtimer_try_to_cancel(struct hrtimer *timer)
+{
+ struct hrtimer_base *base;
+ unsigned long flags;
+ int ret = -1;
+
+ base = lock_hrtimer_base(timer, &flags);
+
+ if (base->curr_timer != timer)
+ ret = remove_hrtimer(timer, base);
+
+ unlock_hrtimer_base(timer, &flags);
+
+ return ret;
+
+}
+
+/**
+ * hrtimer_cancel - cancel a timer and wait for the handler to finish.
+ *
+ * @timer: the timer to be cancelled
+ *
+ * Returns:
+ * 0 when the timer was not active
+ * 1 when the timer was active
+ */
+int hrtimer_cancel(struct hrtimer *timer)
+{
+ for (;;) {
+ int ret = hrtimer_try_to_cancel(timer);
+
+ if (ret >= 0)
+ return ret;
+ }
+}
+
+/**
+ * hrtimer_get_remaining - get remaining time for the timer
+ *
+ * @timer: the timer to read
+ */
+ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+{
+ struct hrtimer_base *base;
+ unsigned long flags;
+ ktime_t rem;
+
+ base = lock_hrtimer_base(timer, &flags);
+ rem = ktime_sub(timer->expires, timer->base->get_time());
+ unlock_hrtimer_base(timer, &flags);
+
+ return rem;
+}
+
+/**
+ * hrtimer_rebase - rebase an initialized hrtimer to a different base
+ *
+ * @timer: the timer to be rebased
+ * @clock_id: the clock to be used
+ */
+void hrtimer_rebase(struct hrtimer *timer, const clockid_t clock_id)
+{
+ struct hrtimer_base *bases;
+
+ bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
+ timer->base = &bases[clock_id];
+}
+
+/**
+ * hrtimer_init - initialize a timer to the given clock
+ *
+ * @timer: the timer to be initialized
+ * @clock_id: the clock to be used
+ */
+void hrtimer_init(struct hrtimer *timer, const clockid_t clock_id)
+{
+ memset(timer, 0, sizeof(struct hrtimer));
+ hrtimer_rebase(timer, clock_id);
+}
+
+/**
+ * hrtimer_get_res - get the timer resolution for a clock
+ *
+ * @which_clock: which clock to query
+ * @tp: pointer to timespec variable to store the resolution
+ *
+ * Store the resolution of the clock selected by which_clock in the
+ * variable pointed to by tp.
+ */
+int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
+{
+ struct hrtimer_base *bases;
+
+ bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
+ *tp = ktime_to_timespec(bases[which_clock].resolution);
+
+ return 0;
+}
+
+/*
+ * Expire the per base hrtimer-queue:
+ */
+static inline void run_hrtimer_queue(struct hrtimer_base *base)
+{
+ ktime_t now = base->get_time();
+ struct rb_node *node;
+
+ spin_lock_irq(&base->lock);
+
+ while ((node = base->first)) {
+ struct hrtimer *timer;
+ int (*fn)(void *);
+ int restart;
+ void *data;
+
+ timer = rb_entry(node, struct hrtimer, node);
+ if (now.tv64 <= timer->expires.tv64)
+ break;
+
+ fn = timer->function;
+ data = timer->data;
+ set_curr_timer(base, timer);
+ __remove_hrtimer(timer, base);
+ spin_unlock_irq(&base->lock);
+
+ /*
+ * fn == NULL is special case for the simplest timer
+ * variant - wake up process and do not restart:
+ */
+ if (!fn) {
+ wake_up_process(data);
+ restart = HRTIMER_NORESTART;
+ } else
+ restart = fn(data);
+
+ spin_lock_irq(&base->lock);
+
+ if (restart == HRTIMER_RESTART)
+ enqueue_hrtimer(timer, base);
+ else
+ timer->state = HRTIMER_EXPIRED;
+ }
+ set_curr_timer(base, NULL);
+ spin_unlock_irq(&base->lock);
+}
+
+/*
+ * Called from timer softirq every jiffy, expire hrtimers:
+ */
+void hrtimer_run_queues(void)
+{
+ struct hrtimer_base *base = __get_cpu_var(hrtimer_bases);
+ int i;
+
+ for (i = 0; i < MAX_HRTIMER_BASES; i++)
+ run_hrtimer_queue(&base[i]);
+}
+
+/*
+ * Sleep related functions:
+ */
+
+/**
+ * schedule_hrtimer - sleep until timeout
+ *
+ * @timer: hrtimer variable initialized with the correct clock base
+ * @mode: timeout value is abs/rel
+ *
+ * Make the current task sleep until @timeout is
+ * elapsed.
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout is guaranteed to
+ * pass before the routine returns. The routine will return 0
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task. In this case the remaining time
+ * will be returned
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ */
+static ktime_t __sched
+schedule_hrtimer(struct hrtimer *timer, const enum hrtimer_mode mode)
+{
+ /* fn stays NULL, meaning single-shot wakeup: */
+ timer->data = current;
+
+ hrtimer_start(timer, timer->expires, mode);
+
+ schedule();
+ hrtimer_cancel(timer);
+
+ /* Return the remaining time: */
+ if (timer->state != HRTIMER_EXPIRED)
+ return ktime_sub(timer->expires, timer->base->get_time());
+ else
+ return (ktime_t) {.tv64 = 0 };
+}
+
+static inline ktime_t __sched
+schedule_hrtimer_interruptible(struct hrtimer *timer,
+ const enum hrtimer_mode mode)
+{
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ return schedule_hrtimer(timer, mode);
+}
+
+static long __sched
+nanosleep_restart(struct restart_block *restart, clockid_t clockid)
+{
+ struct timespec __user *rmtp, tu;
+ void *rfn_save = restart->fn;
+ struct hrtimer timer;
+ ktime_t rem;
+
+ restart->fn = do_no_restart_syscall;
+
+ hrtimer_init(&timer, clockid);
+
+ timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0;
+
+ rem = schedule_hrtimer_interruptible(&timer, HRTIMER_ABS);
+
+ if (rem.tv64 <= 0)
+ return 0;
+
+ rmtp = (struct timespec __user *) restart->arg2;
+ tu = ktime_to_timespec(rem);
+ if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu)))
+ return -EFAULT;
+
+ restart->fn = rfn_save;
+
+ /* The other values in restart are already filled in */
+ return -ERESTART_RESTARTBLOCK;
+}
+
+static long __sched nanosleep_restart_mono(struct restart_block *restart)
+{
+ return nanosleep_restart(restart, CLOCK_MONOTONIC);
+}
+
+static long __sched nanosleep_restart_real(struct restart_block *restart)
+{
+ return nanosleep_restart(restart, CLOCK_REALTIME);
+}
+
+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
+ const enum hrtimer_mode mode, const clockid_t clockid)
+{
+ struct restart_block *restart;
+ struct hrtimer timer;
+ struct timespec tu;
+ ktime_t rem;
+
+ hrtimer_init(&timer, clockid);
+
+ timer.expires = timespec_to_ktime(*rqtp);
+
+ rem = schedule_hrtimer_interruptible(&timer, mode);
+ if (rem.tv64 <= 0)
+ return 0;
+
+ /* Absolute timers do not update the rmtp value: */
+ if (mode == HRTIMER_ABS)
+ return -ERESTARTNOHAND;
+
+ tu = ktime_to_timespec(rem);
+
+ if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu)))
+ return -EFAULT;
+
+ restart = &current_thread_info()->restart_block;
+ restart->fn = (clockid == CLOCK_MONOTONIC) ?
+ nanosleep_restart_mono : nanosleep_restart_real;
+ restart->arg0 = timer.expires.tv64 & 0xFFFFFFFF;
+ restart->arg1 = timer.expires.tv64 >> 32;
+ restart->arg2 = (unsigned long) rmtp;
+
+ return -ERESTART_RESTARTBLOCK;
+}
+
+asmlinkage long
+sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
+{
+ struct timespec tu;
+
+ if (copy_from_user(&tu, rqtp, sizeof(tu)))
+ return -EFAULT;
+
+ if (!timespec_valid(&tu))
+ return -EINVAL;
+
+ return hrtimer_nanosleep(&tu, rmtp, HRTIMER_REL, CLOCK_MONOTONIC);
+}
+
+/*
+ * Functions related to boot-time initialization:
+ */
+static void __devinit init_hrtimers_cpu(int cpu)
+{
+ struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu);
+ int i;
+
+ for (i = 0; i < MAX_HRTIMER_BASES; i++) {
+ spin_lock_init(&base->lock);
+ base++;
+ }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void migrate_hrtimer_list(struct hrtimer_base *old_base,
+ struct hrtimer_base *new_base)
+{
+ struct hrtimer *timer;
+ struct rb_node *node;
+
+ while ((node = rb_first(&old_base->active))) {
+ timer = rb_entry(node, struct hrtimer, node);
+ __remove_hrtimer(timer, old_base);
+ timer->base = new_base;
+ enqueue_hrtimer(timer, new_base);
+ }
+}
+
+static void migrate_hrtimers(int cpu)
+{
+ struct hrtimer_base *old_base, *new_base;
+ int i;
+
+ BUG_ON(cpu_online(cpu));
+ old_base = per_cpu(hrtimer_bases, cpu);
+ new_base = get_cpu_var(hrtimer_bases);
+
+ local_irq_disable();
+
+ for (i = 0; i < MAX_HRTIMER_BASES; i++) {
+
+ spin_lock(&new_base->lock);
+ spin_lock(&old_base->lock);
+
+ BUG_ON(old_base->curr_timer);
+
+ migrate_hrtimer_list(old_base, new_base);
+
+ spin_unlock(&old_base->lock);
+ spin_unlock(&new_base->lock);
+ old_base++;
+ new_base++;
+ }
+
+ local_irq_enable();
+ put_cpu_var(hrtimer_bases);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+
+ switch (action) {
+
+ case CPU_UP_PREPARE:
+ init_hrtimers_cpu(cpu);
+ break;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_DEAD:
+ migrate_hrtimers(cpu);
+ break;
+#endif
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __devinitdata hrtimers_nb = {
+ .notifier_call = hrtimer_cpu_notify,
+};
+
+void __init hrtimers_init(void)
+{
+ hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
+ (void *)(long)smp_processor_id());
+ register_cpu_notifier(&hrtimers_nb);
+}
+
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 81c49a4d679..97d5559997d 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -366,6 +366,8 @@ int request_irq(unsigned int irq,
action->next = NULL;
action->dev_id = dev_id;
+ select_smp_affinity(irq);
+
retval = setup_irq(irq, action);
if (retval)
kfree(action);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index f26e534c658..d03b5eef8ce 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -10,6 +10,8 @@
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
+#include "internals.h"
+
static struct proc_dir_entry *root_irq_dir, *irq_dir[NR_IRQS];
#ifdef CONFIG_SMP
@@ -68,7 +70,9 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
*/
cpus_and(tmp, new_value, cpu_online_map);
if (cpus_empty(tmp))
- return -EINVAL;
+ /* Special case for empty set - allow the architecture
+ code to set default SMP affinity. */
+ return select_smp_affinity(irq) ? -EINVAL : full_count;
proc_set_irq_affinity(irq, new_value);
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 7c1b25e25e4..c2c05c4ff28 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -12,36 +12,46 @@
#include <linux/syscalls.h>
#include <linux/time.h>
#include <linux/posix-timers.h>
+#include <linux/hrtimer.h>
#include <asm/uaccess.h>
-static unsigned long it_real_value(struct signal_struct *sig)
+/**
+ * itimer_get_remtime - get remaining time for the timer
+ *
+ * @timer: the timer to read
+ *
+ * Returns the delta between the expiry time and now, which can be
+ * less than zero or 1usec for an pending expired timer
+ */
+static struct timeval itimer_get_remtime(struct hrtimer *timer)
{
- unsigned long val = 0;
- if (timer_pending(&sig->real_timer)) {
- val = sig->real_timer.expires - jiffies;
+ ktime_t rem = hrtimer_get_remaining(timer);
- /* look out for negative/zero itimer.. */
- if ((long) val <= 0)
- val = 1;
- }
- return val;
+ /*
+ * Racy but safe: if the itimer expires after the above
+ * hrtimer_get_remtime() call but before this condition
+ * then we return 0 - which is correct.
+ */
+ if (hrtimer_active(timer)) {
+ if (rem.tv64 <= 0)
+ rem.tv64 = NSEC_PER_USEC;
+ } else
+ rem.tv64 = 0;
+
+ return ktime_to_timeval(rem);
}
int do_getitimer(int which, struct itimerval *value)
{
struct task_struct *tsk = current;
- unsigned long interval, val;
cputime_t cinterval, cval;
switch (which) {
case ITIMER_REAL:
- spin_lock_irq(&tsk->sighand->siglock);
- interval = tsk->signal->it_real_incr;
- val = it_real_value(tsk->signal);
- spin_unlock_irq(&tsk->sighand->siglock);
- jiffies_to_timeval(val, &value->it_value);
- jiffies_to_timeval(interval, &value->it_interval);
+ value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
+ value->it_interval =
+ ktime_to_timeval(tsk->signal->it_real_incr);
break;
case ITIMER_VIRTUAL:
read_lock(&tasklist_lock);
@@ -113,59 +123,45 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
}
-void it_real_fn(unsigned long __data)
+/*
+ * The timer is automagically restarted, when interval != 0
+ */
+int it_real_fn(void *data)
{
- struct task_struct * p = (struct task_struct *) __data;
- unsigned long inc = p->signal->it_real_incr;
+ struct task_struct *tsk = (struct task_struct *) data;
- send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p);
+ send_group_sig_info(SIGALRM, SEND_SIG_PRIV, tsk);
- /*
- * Now restart the timer if necessary. We don't need any locking
- * here because do_setitimer makes sure we have finished running
- * before it touches anything.
- * Note, we KNOW we are (or should be) at a jiffie edge here so
- * we don't need the +1 stuff. Also, we want to use the prior
- * expire value so as to not "slip" a jiffie if we are late.
- * Deal with requesting a time prior to "now" here rather than
- * in add_timer.
- */
- if (!inc)
- return;
- while (time_before_eq(p->signal->real_timer.expires, jiffies))
- p->signal->real_timer.expires += inc;
- add_timer(&p->signal->real_timer);
+ if (tsk->signal->it_real_incr.tv64 != 0) {
+ hrtimer_forward(&tsk->signal->real_timer,
+ tsk->signal->it_real_incr);
+
+ return HRTIMER_RESTART;
+ }
+ return HRTIMER_NORESTART;
}
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
{
struct task_struct *tsk = current;
- unsigned long val, interval, expires;
+ struct hrtimer *timer;
+ ktime_t expires;
cputime_t cval, cinterval, nval, ninterval;
switch (which) {
case ITIMER_REAL:
-again:
- spin_lock_irq(&tsk->sighand->siglock);
- interval = tsk->signal->it_real_incr;
- val = it_real_value(tsk->signal);
- /* We are sharing ->siglock with it_real_fn() */
- if (try_to_del_timer_sync(&tsk->signal->real_timer) < 0) {
- spin_unlock_irq(&tsk->sighand->siglock);
- goto again;
- }
- tsk->signal->it_real_incr =
- timeval_to_jiffies(&value->it_interval);
- expires = timeval_to_jiffies(&value->it_value);
- if (expires)
- mod_timer(&tsk->signal->real_timer,
- jiffies + 1 + expires);
- spin_unlock_irq(&tsk->sighand->siglock);
+ timer = &tsk->signal->real_timer;
+ hrtimer_cancel(timer);
if (ovalue) {
- jiffies_to_timeval(val, &ovalue->it_value);
- jiffies_to_timeval(interval,
- &ovalue->it_interval);
+ ovalue->it_value = itimer_get_remtime(timer);
+ ovalue->it_interval
+ = ktime_to_timeval(tsk->signal->it_real_incr);
}
+ tsk->signal->it_real_incr =
+ timeval_to_ktime(value->it_interval);
+ expires = timeval_to_ktime(value->it_value);
+ if (expires.tv64 != 0)
+ hrtimer_start(timer, expires, HRTIMER_REL);
break;
case ITIMER_VIRTUAL:
nval = timeval_to_cputime(&value->it_value);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 2c95848fbce..bf39d28e4c0 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -6,6 +6,7 @@
* Version 2. See the file COPYING for more details.
*/
+#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/slab.h>
@@ -26,6 +27,9 @@
#include <asm/system.h>
#include <asm/semaphore.h>
+/* Per cpu memory for storing cpu states in case of system crash. */
+note_buf_t* crash_notes;
+
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
.name = "Crash kernel",
@@ -1054,9 +1058,24 @@ void crash_kexec(struct pt_regs *regs)
if (!locked) {
image = xchg(&kexec_crash_image, NULL);
if (image) {
- machine_crash_shutdown(regs);
+ struct pt_regs fixed_regs;
+ crash_setup_regs(&fixed_regs, regs);
+ machine_crash_shutdown(&fixed_regs);
machine_kexec(image);
}
xchg(&kexec_lock, 0);
}
}
+
+static int __init crash_notes_memory_init(void)
+{
+ /* Allocate memory for saving cpu registers. */
+ crash_notes = alloc_percpu(note_buf_t);
+ if (!crash_notes) {
+ printk("Kexec: Memory allocation for saving cpu register"
+ " states failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+module_init(crash_notes_memory_init)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3bb71e63a37..3ea6325228d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -48,10 +48,11 @@
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
-static DEFINE_SPINLOCK(kprobe_lock); /* Protects kprobe_table */
+DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
+#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
* single-stepped. x86_64, POWER4 and above have no-exec support and
@@ -151,6 +152,7 @@ void __kprobes free_insn_slot(kprobe_opcode_t *slot)
}
}
}
+#endif
/* We have preemption disabled.. so it is safe to use __ versions */
static inline void set_kprobe_instance(struct kprobe *kp)
@@ -165,7 +167,7 @@ static inline void reset_kprobe_instance(void)
/*
* This routine is called either:
- * - under the kprobe_lock spinlock - during kprobe_[un]register()
+ * - under the kprobe_mutex - during kprobe_[un]register()
* OR
* - with preemption disabled - from arch/xxx/kernel/kprobes.c
*/
@@ -418,7 +420,6 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
/*
* This is the second or subsequent kprobe at the address - handle
* the intricacies
- * TODO: Move kcalloc outside the spin_lock
*/
static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
struct kprobe *p)
@@ -430,7 +431,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
copy_kprobe(old_p, p);
ret = add_new_kprobe(old_p, p);
} else {
- ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
+ ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
if (!ap)
return -ENOMEM;
add_aggr_kprobe(ap, old_p);
@@ -440,25 +441,6 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
return ret;
}
-/* kprobe removal house-keeping routines */
-static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
-{
- arch_disarm_kprobe(p);
- hlist_del_rcu(&p->hlist);
- spin_unlock_irqrestore(&kprobe_lock, flags);
- arch_remove_kprobe(p);
-}
-
-static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
- struct kprobe *p, unsigned long flags)
-{
- list_del_rcu(&p->list);
- if (list_empty(&old_p->list))
- cleanup_kprobe(old_p, flags);
- else
- spin_unlock_irqrestore(&kprobe_lock, flags);
-}
-
static int __kprobes in_kprobes_functions(unsigned long addr)
{
if (addr >= (unsigned long)__kprobes_text_start
@@ -467,33 +449,44 @@ static int __kprobes in_kprobes_functions(unsigned long addr)
return 0;
}
-int __kprobes register_kprobe(struct kprobe *p)
+static int __kprobes __register_kprobe(struct kprobe *p,
+ unsigned long called_from)
{
int ret = 0;
- unsigned long flags = 0;
struct kprobe *old_p;
- struct module *mod;
+ struct module *probed_mod;
if ((!kernel_text_address((unsigned long) p->addr)) ||
in_kprobes_functions((unsigned long) p->addr))
return -EINVAL;
- if ((mod = module_text_address((unsigned long) p->addr)) &&
- (unlikely(!try_module_get(mod))))
- return -EINVAL;
-
- if ((ret = arch_prepare_kprobe(p)) != 0)
- goto rm_kprobe;
+ p->mod_refcounted = 0;
+ /* Check are we probing a module */
+ if ((probed_mod = module_text_address((unsigned long) p->addr))) {
+ struct module *calling_mod = module_text_address(called_from);
+ /* We must allow modules to probe themself and
+ * in this case avoid incrementing the module refcount,
+ * so as to allow unloading of self probing modules.
+ */
+ if (calling_mod && (calling_mod != probed_mod)) {
+ if (unlikely(!try_module_get(probed_mod)))
+ return -EINVAL;
+ p->mod_refcounted = 1;
+ } else
+ probed_mod = NULL;
+ }
p->nmissed = 0;
- spin_lock_irqsave(&kprobe_lock, flags);
+ down(&kprobe_mutex);
old_p = get_kprobe(p->addr);
if (old_p) {
ret = register_aggr_kprobe(old_p, p);
goto out;
}
- arch_copy_kprobe(p);
+ if ((ret = arch_prepare_kprobe(p)) != 0)
+ goto out;
+
INIT_HLIST_NODE(&p->hlist);
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
@@ -501,40 +494,66 @@ int __kprobes register_kprobe(struct kprobe *p)
arch_arm_kprobe(p);
out:
- spin_unlock_irqrestore(&kprobe_lock, flags);
-rm_kprobe:
- if (ret == -EEXIST)
- arch_remove_kprobe(p);
- if (ret && mod)
- module_put(mod);
+ up(&kprobe_mutex);
+
+ if (ret && probed_mod)
+ module_put(probed_mod);
return ret;
}
+int __kprobes register_kprobe(struct kprobe *p)
+{
+ return __register_kprobe(p,
+ (unsigned long)__builtin_return_address(0));
+}
+
void __kprobes unregister_kprobe(struct kprobe *p)
{
- unsigned long flags;
- struct kprobe *old_p;
struct module *mod;
+ struct kprobe *old_p, *list_p;
+ int cleanup_p;
- spin_lock_irqsave(&kprobe_lock, flags);
+ down(&kprobe_mutex);
old_p = get_kprobe(p->addr);
- if (old_p) {
- /* cleanup_*_kprobe() does the spin_unlock_irqrestore */
- if (old_p->pre_handler == aggr_pre_handler)
- cleanup_aggr_kprobe(old_p, p, flags);
- else
- cleanup_kprobe(p, flags);
+ if (unlikely(!old_p)) {
+ up(&kprobe_mutex);
+ return;
+ }
+ if (p != old_p) {
+ list_for_each_entry_rcu(list_p, &old_p->list, list)
+ if (list_p == p)
+ /* kprobe p is a valid probe */
+ goto valid_p;
+ up(&kprobe_mutex);
+ return;
+ }
+valid_p:
+ if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
+ (p->list.next == &old_p->list) &&
+ (p->list.prev == &old_p->list))) {
+ /* Only probe on the hash list */
+ arch_disarm_kprobe(p);
+ hlist_del_rcu(&old_p->hlist);
+ cleanup_p = 1;
+ } else {
+ list_del_rcu(&p->list);
+ cleanup_p = 0;
+ }
- synchronize_sched();
+ up(&kprobe_mutex);
- if ((mod = module_text_address((unsigned long)p->addr)))
- module_put(mod);
+ synchronize_sched();
+ if (p->mod_refcounted &&
+ (mod = module_text_address((unsigned long)p->addr)))
+ module_put(mod);
- if (old_p->pre_handler == aggr_pre_handler &&
- list_empty(&old_p->list))
+ if (cleanup_p) {
+ if (p != old_p) {
+ list_del_rcu(&p->list);
kfree(old_p);
- } else
- spin_unlock_irqrestore(&kprobe_lock, flags);
+ }
+ arch_remove_kprobe(p);
+ }
}
static struct notifier_block kprobe_exceptions_nb = {
@@ -548,7 +567,8 @@ int __kprobes register_jprobe(struct jprobe *jp)
jp->kp.pre_handler = setjmp_pre_handler;
jp->kp.break_handler = longjmp_break_handler;
- return register_kprobe(&jp->kp);
+ return __register_kprobe(&jp->kp,
+ (unsigned long)__builtin_return_address(0));
}
void __kprobes unregister_jprobe(struct jprobe *jp)
@@ -588,7 +608,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
rp->nmissed = 0;
/* Establish function entry probe point */
- if ((ret = register_kprobe(&rp->kp)) != 0)
+ if ((ret = __register_kprobe(&rp->kp,
+ (unsigned long)__builtin_return_address(0))) != 0)
free_rp_inst(rp);
return ret;
}
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 99af8b05eea..d5eeae0fa5b 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -51,16 +51,6 @@ static ssize_t uevent_helper_store(struct subsystem *subsys, const char *page, s
KERNEL_ATTR_RW(uevent_helper);
#endif
-#ifdef CONFIG_KEXEC
-#include <asm/kexec.h>
-
-static ssize_t crash_notes_show(struct subsystem *subsys, char *page)
-{
- return sprintf(page, "%p\n", (void *)crash_notes);
-}
-KERNEL_ATTR_RO(crash_notes);
-#endif
-
decl_subsys(kernel, NULL, NULL);
EXPORT_SYMBOL_GPL(kernel_subsys);
@@ -69,9 +59,6 @@ static struct attribute * kernel_attrs[] = {
&uevent_seqnum_attr.attr,
&uevent_helper_attr.attr,
#endif
-#ifdef CONFIG_KEXEC
- &crash_notes_attr.attr,
-#endif
NULL
};
diff --git a/kernel/module.c b/kernel/module.c
index 2ea929d51ad..618ed6e23ec 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -28,6 +28,7 @@
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <linux/rcupdate.h>
+#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
@@ -496,15 +497,15 @@ static void module_unload_free(struct module *mod)
}
#ifdef CONFIG_MODULE_FORCE_UNLOAD
-static inline int try_force(unsigned int flags)
+static inline int try_force_unload(unsigned int flags)
{
int ret = (flags & O_TRUNC);
if (ret)
- add_taint(TAINT_FORCED_MODULE);
+ add_taint(TAINT_FORCED_RMMOD);
return ret;
}
#else
-static inline int try_force(unsigned int flags)
+static inline int try_force_unload(unsigned int flags)
{
return 0;
}
@@ -524,7 +525,7 @@ static int __try_stop_module(void *_sref)
/* If it's not unused, quit unless we are told to block. */
if ((sref->flags & O_NONBLOCK) && module_refcount(sref->mod) != 0) {
- if (!(*sref->forced = try_force(sref->flags)))
+ if (!(*sref->forced = try_force_unload(sref->flags)))
return -EWOULDBLOCK;
}
@@ -609,7 +610,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
/* If it has an init func, it must have an exit func to unload */
if ((mod->init != NULL && mod->exit == NULL)
|| mod->unsafe) {
- forced = try_force(flags);
+ forced = try_force_unload(flags);
if (!forced) {
/* This module can't be removed */
ret = -EBUSY;
@@ -958,7 +959,6 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
unsigned long ret;
const unsigned long *crc;
- spin_lock_irq(&modlist_lock);
ret = __find_symbol(name, &owner, &crc, mod->license_gplok);
if (ret) {
/* use_module can fail due to OOM, or module unloading */
@@ -966,7 +966,6 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
!use_module(mod, owner))
ret = 0;
}
- spin_unlock_irq(&modlist_lock);
return ret;
}
@@ -1204,6 +1203,39 @@ void *__symbol_get(const char *symbol)
}
EXPORT_SYMBOL_GPL(__symbol_get);
+/*
+ * Ensure that an exported symbol [global namespace] does not already exist
+ * in the Kernel or in some other modules exported symbol table.
+ */
+static int verify_export_symbols(struct module *mod)
+{
+ const char *name = NULL;
+ unsigned long i, ret = 0;
+ struct module *owner;
+ const unsigned long *crc;
+
+ for (i = 0; i < mod->num_syms; i++)
+ if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) {
+ name = mod->syms[i].name;
+ ret = -ENOEXEC;
+ goto dup;
+ }
+
+ for (i = 0; i < mod->num_gpl_syms; i++)
+ if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) {
+ name = mod->gpl_syms[i].name;
+ ret = -ENOEXEC;
+ goto dup;
+ }
+
+dup:
+ if (ret)
+ printk(KERN_ERR "%s: exports duplicate symbol %s (owned by %s)\n",
+ mod->name, name, module_name(owner));
+
+ return ret;
+}
+
/* Change all symbols so that sh_value encodes the pointer directly. */
static int simplify_symbols(Elf_Shdr *sechdrs,
unsigned int symindex,
@@ -1715,6 +1747,11 @@ static struct module *load_module(void __user *umod,
/* Set up license info based on the info section */
set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
+ if (strcmp(mod->name, "ndiswrapper") == 0)
+ add_taint(TAINT_PROPRIETARY_MODULE);
+ if (strcmp(mod->name, "driverloader") == 0)
+ add_taint(TAINT_PROPRIETARY_MODULE);
+
#ifdef CONFIG_MODULE_UNLOAD
/* Set up MODINFO_ATTR fields */
setup_modinfo(mod, sechdrs, infoindex);
@@ -1767,6 +1804,12 @@ static struct module *load_module(void __user *umod,
goto cleanup;
}
+ /* Find duplicate symbols */
+ err = verify_export_symbols(mod);
+
+ if (err < 0)
+ goto cleanup;
+
/* Set up and sort exception table */
mod->num_exentries = sechdrs[exindex].sh_size / sizeof(*mod->extable);
mod->extable = extable = (void *)sechdrs[exindex].sh_addr;
@@ -1854,8 +1897,7 @@ static struct module *load_module(void __user *umod,
kfree(args);
free_hdr:
vfree(hdr);
- if (err < 0) return ERR_PTR(err);
- else return ptr;
+ return ERR_PTR(err);
truncated:
printk(KERN_ERR "Module len %lu truncated\n", len);
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
new file mode 100644
index 00000000000..f4913c37695
--- /dev/null
+++ b/kernel/mutex-debug.c
@@ -0,0 +1,462 @@
+/*
+ * kernel/mutex-debug.c
+ *
+ * Debugging code for mutexes
+ *
+ * Started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * lock debugging, locking tree, deadlock detection started by:
+ *
+ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
+ * Released under the General Public License (GPL).
+ */
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/interrupt.h>
+
+#include "mutex-debug.h"
+
+/*
+ * We need a global lock when we walk through the multi-process
+ * lock tree. Only used in the deadlock-debugging case.
+ */
+DEFINE_SPINLOCK(debug_mutex_lock);
+
+/*
+ * All locks held by all tasks, in a single global list:
+ */
+LIST_HEAD(debug_mutex_held_locks);
+
+/*
+ * In the debug case we carry the caller's instruction pointer into
+ * other functions, but we dont want the function argument overhead
+ * in the nondebug case - hence these macros:
+ */
+#define __IP_DECL__ , unsigned long ip
+#define __IP__ , ip
+#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
+
+/*
+ * "mutex debugging enabled" flag. We turn it off when we detect
+ * the first problem because we dont want to recurse back
+ * into the tracing code when doing error printk or
+ * executing a BUG():
+ */
+int debug_mutex_on = 1;
+
+static void printk_task(struct task_struct *p)
+{
+ if (p)
+ printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
+ else
+ printk("<none>");
+}
+
+static void printk_ti(struct thread_info *ti)
+{
+ if (ti)
+ printk_task(ti->task);
+ else
+ printk("<none>");
+}
+
+static void printk_task_short(struct task_struct *p)
+{
+ if (p)
+ printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
+ else
+ printk("<none>");
+}
+
+static void printk_lock(struct mutex *lock, int print_owner)
+{
+ printk(" [%p] {%s}\n", lock, lock->name);
+
+ if (print_owner && lock->owner) {
+ printk(".. held by: ");
+ printk_ti(lock->owner);
+ printk("\n");
+ }
+ if (lock->owner) {
+ printk("... acquired at: ");
+ print_symbol("%s\n", lock->acquire_ip);
+ }
+}
+
+/*
+ * printk locks held by a task:
+ */
+static void show_task_locks(struct task_struct *p)
+{
+ switch (p->state) {
+ case TASK_RUNNING: printk("R"); break;
+ case TASK_INTERRUPTIBLE: printk("S"); break;
+ case TASK_UNINTERRUPTIBLE: printk("D"); break;
+ case TASK_STOPPED: printk("T"); break;
+ case EXIT_ZOMBIE: printk("Z"); break;
+ case EXIT_DEAD: printk("X"); break;
+ default: printk("?"); break;
+ }
+ printk_task(p);
+ if (p->blocked_on) {
+ struct mutex *lock = p->blocked_on->lock;
+
+ printk(" blocked on mutex:");
+ printk_lock(lock, 1);
+ } else
+ printk(" (not blocked on mutex)\n");
+}
+
+/*
+ * printk all locks held in the system (if filter == NULL),
+ * or all locks belonging to a single task (if filter != NULL):
+ */
+void show_held_locks(struct task_struct *filter)
+{
+ struct list_head *curr, *cursor = NULL;
+ struct mutex *lock;
+ struct thread_info *t;
+ unsigned long flags;
+ int count = 0;
+
+ if (filter) {
+ printk("------------------------------\n");
+ printk("| showing all locks held by: | (");
+ printk_task_short(filter);
+ printk("):\n");
+ printk("------------------------------\n");
+ } else {
+ printk("---------------------------\n");
+ printk("| showing all locks held: |\n");
+ printk("---------------------------\n");
+ }
+
+ /*
+ * Play safe and acquire the global trace lock. We
+ * cannot printk with that lock held so we iterate
+ * very carefully:
+ */
+next:
+ debug_spin_lock_save(&debug_mutex_lock, flags);
+ list_for_each(curr, &debug_mutex_held_locks) {
+ if (cursor && curr != cursor)
+ continue;
+ lock = list_entry(curr, struct mutex, held_list);
+ t = lock->owner;
+ if (filter && (t != filter->thread_info))
+ continue;
+ count++;
+ cursor = curr->next;
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+
+ printk("\n#%03d: ", count);
+ printk_lock(lock, filter ? 0 : 1);
+ goto next;
+ }
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+ printk("\n");
+}
+
+void mutex_debug_show_all_locks(void)
+{
+ struct task_struct *g, *p;
+ int count = 10;
+ int unlock = 1;
+
+ printk("\nShowing all blocking locks in the system:\n");
+
+ /*
+ * Here we try to get the tasklist_lock as hard as possible,
+ * if not successful after 2 seconds we ignore it (but keep
+ * trying). This is to enable a debug printout even if a
+ * tasklist_lock-holding task deadlocks or crashes.
+ */
+retry:
+ if (!read_trylock(&tasklist_lock)) {
+ if (count == 10)
+ printk("hm, tasklist_lock locked, retrying... ");
+ if (count) {
+ count--;
+ printk(" #%d", 10-count);
+ mdelay(200);
+ goto retry;
+ }
+ printk(" ignoring it.\n");
+ unlock = 0;
+ }
+ if (count != 10)
+ printk(" locked it.\n");
+
+ do_each_thread(g, p) {
+ show_task_locks(p);
+ if (!unlock)
+ if (read_trylock(&tasklist_lock))
+ unlock = 1;
+ } while_each_thread(g, p);
+
+ printk("\n");
+ show_held_locks(NULL);
+ printk("=============================================\n\n");
+
+ if (unlock)
+ read_unlock(&tasklist_lock);
+}
+
+static void report_deadlock(struct task_struct *task, struct mutex *lock,
+ struct mutex *lockblk, unsigned long ip)
+{
+ printk("\n%s/%d is trying to acquire this lock:\n",
+ current->comm, current->pid);
+ printk_lock(lock, 1);
+ printk("... trying at: ");
+ print_symbol("%s\n", ip);
+ show_held_locks(current);
+
+ if (lockblk) {
+ printk("but %s/%d is deadlocking current task %s/%d!\n\n",
+ task->comm, task->pid, current->comm, current->pid);
+ printk("\n%s/%d is blocked on this lock:\n",
+ task->comm, task->pid);
+ printk_lock(lockblk, 1);
+
+ show_held_locks(task);
+
+ printk("\n%s/%d's [blocked] stackdump:\n\n",
+ task->comm, task->pid);
+ show_stack(task, NULL);
+ }
+
+ printk("\n%s/%d's [current] stackdump:\n\n",
+ current->comm, current->pid);
+ dump_stack();
+ mutex_debug_show_all_locks();
+ printk("[ turning off deadlock detection. Please report this. ]\n\n");
+ local_irq_disable();
+}
+
+/*
+ * Recursively check for mutex deadlocks:
+ */
+static int check_deadlock(struct mutex *lock, int depth,
+ struct thread_info *ti, unsigned long ip)
+{
+ struct mutex *lockblk;
+ struct task_struct *task;
+
+ if (!debug_mutex_on)
+ return 0;
+
+ ti = lock->owner;
+ if (!ti)
+ return 0;
+
+ task = ti->task;
+ lockblk = NULL;
+ if (task->blocked_on)
+ lockblk = task->blocked_on->lock;
+
+ /* Self-deadlock: */
+ if (current == task) {
+ DEBUG_OFF();
+ if (depth)
+ return 1;
+ printk("\n==========================================\n");
+ printk( "[ BUG: lock recursion deadlock detected! |\n");
+ printk( "------------------------------------------\n");
+ report_deadlock(task, lock, NULL, ip);
+ return 0;
+ }
+
+ /* Ugh, something corrupted the lock data structure? */
+ if (depth > 20) {
+ DEBUG_OFF();
+ printk("\n===========================================\n");
+ printk( "[ BUG: infinite lock dependency detected!? |\n");
+ printk( "-------------------------------------------\n");
+ report_deadlock(task, lock, lockblk, ip);
+ return 0;
+ }
+
+ /* Recursively check for dependencies: */
+ if (lockblk && check_deadlock(lockblk, depth+1, ti, ip)) {
+ printk("\n============================================\n");
+ printk( "[ BUG: circular locking deadlock detected! ]\n");
+ printk( "--------------------------------------------\n");
+ report_deadlock(task, lock, lockblk, ip);
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * Called when a task exits, this function checks whether the
+ * task is holding any locks, and reports the first one if so:
+ */
+void mutex_debug_check_no_locks_held(struct task_struct *task)
+{
+ struct list_head *curr, *next;
+ struct thread_info *t;
+ unsigned long flags;
+ struct mutex *lock;
+
+ if (!debug_mutex_on)
+ return;
+
+ debug_spin_lock_save(&debug_mutex_lock, flags);
+ list_for_each_safe(curr, next, &debug_mutex_held_locks) {
+ lock = list_entry(curr, struct mutex, held_list);
+ t = lock->owner;
+ if (t != task->thread_info)
+ continue;
+ list_del_init(curr);
+ DEBUG_OFF();
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+
+ printk("BUG: %s/%d, lock held at task exit time!\n",
+ task->comm, task->pid);
+ printk_lock(lock, 1);
+ if (lock->owner != task->thread_info)
+ printk("exiting task is not even the owner??\n");
+ return;
+ }
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+}
+
+/*
+ * Called when kernel memory is freed (or unmapped), or if a mutex
+ * is destroyed or reinitialized - this code checks whether there is
+ * any held lock in the memory range of <from> to <to>:
+ */
+void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
+{
+ struct list_head *curr, *next;
+ const void *to = from + len;
+ unsigned long flags;
+ struct mutex *lock;
+ void *lock_addr;
+
+ if (!debug_mutex_on)
+ return;
+
+ debug_spin_lock_save(&debug_mutex_lock, flags);
+ list_for_each_safe(curr, next, &debug_mutex_held_locks) {
+ lock = list_entry(curr, struct mutex, held_list);
+ lock_addr = lock;
+ if (lock_addr < from || lock_addr >= to)
+ continue;
+ list_del_init(curr);
+ DEBUG_OFF();
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+
+ printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
+ current->comm, current->pid, lock, from, to);
+ dump_stack();
+ printk_lock(lock, 1);
+ if (lock->owner != current_thread_info())
+ printk("freeing task is not even the owner??\n");
+ return;
+ }
+ debug_spin_lock_restore(&debug_mutex_lock, flags);
+}
+
+/*
+ * Must be called with lock->wait_lock held.
+ */
+void debug_mutex_set_owner(struct mutex *lock,
+ struct thread_info *new_owner __IP_DECL__)
+{
+ lock->owner = new_owner;
+ DEBUG_WARN_ON(!list_empty(&lock->held_list));
+ if (debug_mutex_on) {
+ list_add_tail(&lock->held_list, &debug_mutex_held_locks);
+ lock->acquire_ip = ip;
+ }
+}
+
+void debug_mutex_init_waiter(struct mutex_waiter *waiter)
+{
+ memset(waiter, 0x11, sizeof(*waiter));
+ waiter->magic = waiter;
+ INIT_LIST_HEAD(&waiter->list);
+}
+
+void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ SMP_DEBUG_WARN_ON(!spin_is_locked(&lock->wait_lock));
+ DEBUG_WARN_ON(list_empty(&lock->wait_list));
+ DEBUG_WARN_ON(waiter->magic != waiter);
+ DEBUG_WARN_ON(list_empty(&waiter->list));
+}
+
+void debug_mutex_free_waiter(struct mutex_waiter *waiter)
+{
+ DEBUG_WARN_ON(!list_empty(&waiter->list));
+ memset(waiter, 0x22, sizeof(*waiter));
+}
+
+void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ struct thread_info *ti __IP_DECL__)
+{
+ SMP_DEBUG_WARN_ON(!spin_is_locked(&lock->wait_lock));
+ check_deadlock(lock, 0, ti, ip);
+ /* Mark the current thread as blocked on the lock: */
+ ti->task->blocked_on = waiter;
+ waiter->lock = lock;
+}
+
+void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ struct thread_info *ti)
+{
+ DEBUG_WARN_ON(list_empty(&waiter->list));
+ DEBUG_WARN_ON(waiter->task != ti->task);
+ DEBUG_WARN_ON(ti->task->blocked_on != waiter);
+ ti->task->blocked_on = NULL;
+
+ list_del_init(&waiter->list);
+ waiter->task = NULL;
+}
+
+void debug_mutex_unlock(struct mutex *lock)
+{
+ DEBUG_WARN_ON(lock->magic != lock);
+ DEBUG_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
+ DEBUG_WARN_ON(lock->owner != current_thread_info());
+ if (debug_mutex_on) {
+ DEBUG_WARN_ON(list_empty(&lock->held_list));
+ list_del_init(&lock->held_list);
+ }
+}
+
+void debug_mutex_init(struct mutex *lock, const char *name)
+{
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ mutex_debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+ lock->owner = NULL;
+ INIT_LIST_HEAD(&lock->held_list);
+ lock->name = name;
+ lock->magic = lock;
+}
+
+/***
+ * mutex_destroy - mark a mutex unusable
+ * @lock: the mutex to be destroyed
+ *
+ * This function marks the mutex uninitialized, and any subsequent
+ * use of the mutex is forbidden. The mutex must not be locked when
+ * this function is called.
+ */
+void fastcall mutex_destroy(struct mutex *lock)
+{
+ DEBUG_WARN_ON(mutex_is_locked(lock));
+ lock->magic = NULL;
+}
+
+EXPORT_SYMBOL_GPL(mutex_destroy);
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
new file mode 100644
index 00000000000..fd384050acb
--- /dev/null
+++ b/kernel/mutex-debug.h
@@ -0,0 +1,134 @@
+/*
+ * Mutexes: blocking mutual exclusion locks
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * This file contains mutex debugging related internal declarations,
+ * prototypes and inline functions, for the CONFIG_DEBUG_MUTEXES case.
+ * More details are in kernel/mutex-debug.c.
+ */
+
+extern spinlock_t debug_mutex_lock;
+extern struct list_head debug_mutex_held_locks;
+extern int debug_mutex_on;
+
+/*
+ * In the debug case we carry the caller's instruction pointer into
+ * other functions, but we dont want the function argument overhead
+ * in the nondebug case - hence these macros:
+ */
+#define __IP_DECL__ , unsigned long ip
+#define __IP__ , ip
+#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
+
+/*
+ * This must be called with lock->wait_lock held.
+ */
+extern void debug_mutex_set_owner(struct mutex *lock,
+ struct thread_info *new_owner __IP_DECL__);
+
+static inline void debug_mutex_clear_owner(struct mutex *lock)
+{
+ lock->owner = NULL;
+}
+
+extern void debug_mutex_init_waiter(struct mutex_waiter *waiter);
+extern void debug_mutex_wake_waiter(struct mutex *lock,
+ struct mutex_waiter *waiter);
+extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
+extern void debug_mutex_add_waiter(struct mutex *lock,
+ struct mutex_waiter *waiter,
+ struct thread_info *ti __IP_DECL__);
+extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+ struct thread_info *ti);
+extern void debug_mutex_unlock(struct mutex *lock);
+extern void debug_mutex_init(struct mutex *lock, const char *name);
+
+#define debug_spin_lock(lock) \
+ do { \
+ local_irq_disable(); \
+ if (debug_mutex_on) \
+ spin_lock(lock); \
+ } while (0)
+
+#define debug_spin_unlock(lock) \
+ do { \
+ if (debug_mutex_on) \
+ spin_unlock(lock); \
+ local_irq_enable(); \
+ preempt_check_resched(); \
+ } while (0)
+
+#define debug_spin_lock_save(lock, flags) \
+ do { \
+ local_irq_save(flags); \
+ if (debug_mutex_on) \
+ spin_lock(lock); \
+ } while (0)
+
+#define debug_spin_lock_restore(lock, flags) \
+ do { \
+ if (debug_mutex_on) \
+ spin_unlock(lock); \
+ local_irq_restore(flags); \
+ preempt_check_resched(); \
+ } while (0)
+
+#define spin_lock_mutex(lock) \
+ do { \
+ struct mutex *l = container_of(lock, struct mutex, wait_lock); \
+ \
+ DEBUG_WARN_ON(in_interrupt()); \
+ debug_spin_lock(&debug_mutex_lock); \
+ spin_lock(lock); \
+ DEBUG_WARN_ON(l->magic != l); \
+ } while (0)
+
+#define spin_unlock_mutex(lock) \
+ do { \
+ spin_unlock(lock); \
+ debug_spin_unlock(&debug_mutex_lock); \
+ } while (0)
+
+#define DEBUG_OFF() \
+do { \
+ if (debug_mutex_on) { \
+ debug_mutex_on = 0; \
+ console_verbose(); \
+ if (spin_is_locked(&debug_mutex_lock)) \
+ spin_unlock(&debug_mutex_lock); \
+ } \
+} while (0)
+
+#define DEBUG_BUG() \
+do { \
+ if (debug_mutex_on) { \
+ DEBUG_OFF(); \
+ BUG(); \
+ } \
+} while (0)
+
+#define DEBUG_WARN_ON(c) \
+do { \
+ if (unlikely(c && debug_mutex_on)) { \
+ DEBUG_OFF(); \
+ WARN_ON(1); \
+ } \
+} while (0)
+
+# define DEBUG_BUG_ON(c) \
+do { \
+ if (unlikely(c)) \
+ DEBUG_BUG(); \
+} while (0)
+
+#ifdef CONFIG_SMP
+# define SMP_DEBUG_WARN_ON(c) DEBUG_WARN_ON(c)
+# define SMP_DEBUG_BUG_ON(c) DEBUG_BUG_ON(c)
+#else
+# define SMP_DEBUG_WARN_ON(c) do { } while (0)
+# define SMP_DEBUG_BUG_ON(c) do { } while (0)
+#endif
+
diff --git a/kernel/mutex.c b/kernel/mutex.c
new file mode 100644
index 00000000000..5449b210d9e
--- /dev/null
+++ b/kernel/mutex.c
@@ -0,0 +1,315 @@
+/*
+ * kernel/mutex.c
+ *
+ * Mutexes: blocking mutual exclusion locks
+ *
+ * Started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
+ * David Howells for suggestions and improvements.
+ *
+ * Also see Documentation/mutex-design.txt.
+ */
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+/*
+ * In the DEBUG case we are using the "NULL fastpath" for mutexes,
+ * which forces all calls into the slowpath:
+ */
+#ifdef CONFIG_DEBUG_MUTEXES
+# include "mutex-debug.h"
+# include <asm-generic/mutex-null.h>
+#else
+# include "mutex.h"
+# include <asm/mutex.h>
+#endif
+
+/***
+ * mutex_init - initialize the mutex
+ * @lock: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
+void fastcall __mutex_init(struct mutex *lock, const char *name)
+{
+ atomic_set(&lock->count, 1);
+ spin_lock_init(&lock->wait_lock);
+ INIT_LIST_HEAD(&lock->wait_list);
+
+ debug_mutex_init(lock, name);
+}
+
+EXPORT_SYMBOL(__mutex_init);
+
+/*
+ * We split the mutex lock/unlock logic into separate fastpath and
+ * slowpath functions, to reduce the register pressure on the fastpath.
+ * We also put the fastpath first in the kernel image, to make sure the
+ * branch is predicted by the CPU as default-untaken.
+ */
+static void fastcall noinline __sched
+__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__);
+
+/***
+ * mutex_lock - acquire the mutex
+ * @lock: the mutex to be acquired
+ *
+ * Lock the mutex exclusively for this task. If the mutex is not
+ * available right now, it will sleep until it can get it.
+ *
+ * The mutex must later on be released by the same task that
+ * acquired it. Recursive locking is not allowed. The task
+ * may not exit without first unlocking the mutex. Also, kernel
+ * memory where the mutex resides mutex must not be freed with
+ * the mutex still locked. The mutex must first be initialized
+ * (or statically defined) before it can be locked. memset()-ing
+ * the mutex to 0 is not allowed.
+ *
+ * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
+ * checks that will enforce the restrictions and will also do
+ * deadlock debugging. )
+ *
+ * This function is similar to (but not equivalent to) down().
+ */
+void fastcall __sched mutex_lock(struct mutex *lock)
+{
+ might_sleep();
+ /*
+ * The locking fastpath is the 1->0 transition from
+ * 'unlocked' into 'locked' state.
+ */
+ __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
+}
+
+EXPORT_SYMBOL(mutex_lock);
+
+static void fastcall noinline __sched
+__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__);
+
+/***
+ * mutex_unlock - release the mutex
+ * @lock: the mutex to be released
+ *
+ * Unlock a mutex that has been locked by this task previously.
+ *
+ * This function must not be used in interrupt context. Unlocking
+ * of a not locked mutex is not allowed.
+ *
+ * This function is similar to (but not equivalent to) up().
+ */
+void fastcall __sched mutex_unlock(struct mutex *lock)
+{
+ /*
+ * The unlocking fastpath is the 0->1 transition from 'locked'
+ * into 'unlocked' state:
+ */
+ __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
+}
+
+EXPORT_SYMBOL(mutex_unlock);
+
+/*
+ * Lock a mutex (possibly interruptible), slowpath:
+ */
+static inline int __sched
+__mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
+{
+ struct task_struct *task = current;
+ struct mutex_waiter waiter;
+ unsigned int old_val;
+
+ debug_mutex_init_waiter(&waiter);
+
+ spin_lock_mutex(&lock->wait_lock);
+
+ debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip);
+
+ /* add waiting tasks to the end of the waitqueue (FIFO): */
+ list_add_tail(&waiter.list, &lock->wait_list);
+ waiter.task = task;
+
+ for (;;) {
+ /*
+ * Lets try to take the lock again - this is needed even if
+ * we get here for the first time (shortly after failing to
+ * acquire the lock), to make sure that we get a wakeup once
+ * it's unlocked. Later on, if we sleep, this is the
+ * operation that gives us the lock. We xchg it to -1, so
+ * that when we release the lock, we properly wake up the
+ * other waiters:
+ */
+ old_val = atomic_xchg(&lock->count, -1);
+ if (old_val == 1)
+ break;
+
+ /*
+ * got a signal? (This code gets eliminated in the
+ * TASK_UNINTERRUPTIBLE case.)
+ */
+ if (unlikely(state == TASK_INTERRUPTIBLE &&
+ signal_pending(task))) {
+ mutex_remove_waiter(lock, &waiter, task->thread_info);
+ spin_unlock_mutex(&lock->wait_lock);
+
+ debug_mutex_free_waiter(&waiter);
+ return -EINTR;
+ }
+ __set_task_state(task, state);
+
+ /* didnt get the lock, go to sleep: */
+ spin_unlock_mutex(&lock->wait_lock);
+ schedule();
+ spin_lock_mutex(&lock->wait_lock);
+ }
+
+ /* got the lock - rejoice! */
+ mutex_remove_waiter(lock, &waiter, task->thread_info);
+ debug_mutex_set_owner(lock, task->thread_info __IP__);
+
+ /* set it to 0 if there are no waiters left: */
+ if (likely(list_empty(&lock->wait_list)))
+ atomic_set(&lock->count, 0);
+
+ spin_unlock_mutex(&lock->wait_lock);
+
+ debug_mutex_free_waiter(&waiter);
+
+ DEBUG_WARN_ON(list_empty(&lock->held_list));
+ DEBUG_WARN_ON(lock->owner != task->thread_info);
+
+ return 0;
+}
+
+static void fastcall noinline __sched
+__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__);
+}
+
+/*
+ * Release the lock, slowpath:
+ */
+static fastcall noinline void
+__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ DEBUG_WARN_ON(lock->owner != current_thread_info());
+
+ spin_lock_mutex(&lock->wait_lock);
+
+ /*
+ * some architectures leave the lock unlocked in the fastpath failure
+ * case, others need to leave it locked. In the later case we have to
+ * unlock it here
+ */
+ if (__mutex_slowpath_needs_to_unlock())
+ atomic_set(&lock->count, 1);
+
+ debug_mutex_unlock(lock);
+
+ if (!list_empty(&lock->wait_list)) {
+ /* get the first entry from the wait-list: */
+ struct mutex_waiter *waiter =
+ list_entry(lock->wait_list.next,
+ struct mutex_waiter, list);
+
+ debug_mutex_wake_waiter(lock, waiter);
+
+ wake_up_process(waiter->task);
+ }
+
+ debug_mutex_clear_owner(lock);
+
+ spin_unlock_mutex(&lock->wait_lock);
+}
+
+/*
+ * Here come the less common (and hence less performance-critical) APIs:
+ * mutex_lock_interruptible() and mutex_trylock().
+ */
+static int fastcall noinline __sched
+__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__);
+
+/***
+ * mutex_lock_interruptible - acquire the mutex, interruptable
+ * @lock: the mutex to be acquired
+ *
+ * Lock the mutex like mutex_lock(), and return 0 if the mutex has
+ * been acquired or sleep until the mutex becomes available. If a
+ * signal arrives while waiting for the lock then this function
+ * returns -EINTR.
+ *
+ * This function is similar to (but not equivalent to) down_interruptible().
+ */
+int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
+{
+ might_sleep();
+ return __mutex_fastpath_lock_retval
+ (&lock->count, __mutex_lock_interruptible_slowpath);
+}
+
+EXPORT_SYMBOL(mutex_lock_interruptible);
+
+static int fastcall noinline __sched
+__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__);
+}
+
+/*
+ * Spinlock based trylock, we take the spinlock and check whether we
+ * can get the lock:
+ */
+static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+ int prev;
+
+ spin_lock_mutex(&lock->wait_lock);
+
+ prev = atomic_xchg(&lock->count, -1);
+ if (likely(prev == 1))
+ debug_mutex_set_owner(lock, current_thread_info() __RET_IP__);
+ /* Set it back to 0 if there are no waiters: */
+ if (likely(list_empty(&lock->wait_list)))
+ atomic_set(&lock->count, 0);
+
+ spin_unlock_mutex(&lock->wait_lock);
+
+ return prev == 1;
+}
+
+/***
+ * mutex_trylock - try acquire the mutex, without waiting
+ * @lock: the mutex to be acquired
+ *
+ * Try to acquire the mutex atomically. Returns 1 if the mutex
+ * has been acquired successfully, and 0 on contention.
+ *
+ * NOTE: this function follows the spin_trylock() convention, so
+ * it is negated to the down_trylock() return values! Be careful
+ * about this when converting semaphore users to mutexes.
+ *
+ * This function must not be used in interrupt context. The
+ * mutex must be released by the same task that acquired it.
+ */
+int fastcall mutex_trylock(struct mutex *lock)
+{
+ return __mutex_fastpath_trylock(&lock->count,
+ __mutex_trylock_slowpath);
+}
+
+EXPORT_SYMBOL(mutex_trylock);
diff --git a/kernel/mutex.h b/kernel/mutex.h
new file mode 100644
index 00000000000..00fe84e7b67
--- /dev/null
+++ b/kernel/mutex.h
@@ -0,0 +1,35 @@
+/*
+ * Mutexes: blocking mutual exclusion locks
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * This file contains mutex debugging related internal prototypes, for the
+ * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
+ */
+
+#define spin_lock_mutex(lock) spin_lock(lock)
+#define spin_unlock_mutex(lock) spin_unlock(lock)
+#define mutex_remove_waiter(lock, waiter, ti) \
+ __list_del((waiter)->list.prev, (waiter)->list.next)
+
+#define DEBUG_WARN_ON(c) do { } while (0)
+#define debug_mutex_set_owner(lock, new_owner) do { } while (0)
+#define debug_mutex_clear_owner(lock) do { } while (0)
+#define debug_mutex_init_waiter(waiter) do { } while (0)
+#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
+#define debug_mutex_free_waiter(waiter) do { } while (0)
+#define debug_mutex_add_waiter(lock, waiter, ti, ip) do { } while (0)
+#define debug_mutex_unlock(lock) do { } while (0)
+#define debug_mutex_init(lock, name) do { } while (0)
+
+/*
+ * Return-address parameters/declarations. They are very useful for
+ * debugging, but add overhead in the !DEBUG case - so we go the
+ * trouble of using this not too elegant but zero-cost solution:
+ */
+#define __IP_DECL__
+#define __IP__
+#define __RET_IP__
+
diff --git a/kernel/panic.c b/kernel/panic.c
index aabc5f86fa3..c5c4ab25583 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -60,7 +60,7 @@ NORET_TYPE void panic(const char * fmt, ...)
long i;
static char buf[1024];
va_list args;
-#if defined(CONFIG_ARCH_S390)
+#if defined(CONFIG_S390)
unsigned long caller = (unsigned long) __builtin_return_address(0);
#endif
@@ -125,7 +125,7 @@ NORET_TYPE void panic(const char * fmt, ...)
printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n");
}
#endif
-#if defined(CONFIG_ARCH_S390)
+#if defined(CONFIG_S390)
disabled_wait(caller);
#endif
local_irq_enable();
diff --git a/kernel/pid.c b/kernel/pid.c
index edba31c681a..1acc0724699 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -136,7 +136,7 @@ struct pid * fastcall find_pid(enum pid_type type, int nr)
struct hlist_node *elem;
struct pid *pid;
- hlist_for_each_entry(pid, elem,
+ hlist_for_each_entry_rcu(pid, elem,
&pid_hash[type][pid_hashfn(nr)], pid_chain) {
if (pid->nr == nr)
return pid;
@@ -150,15 +150,15 @@ int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
task_pid = &task->pids[type];
pid = find_pid(type, nr);
+ task_pid->nr = nr;
if (pid == NULL) {
- hlist_add_head(&task_pid->pid_chain,
- &pid_hash[type][pid_hashfn(nr)]);
INIT_LIST_HEAD(&task_pid->pid_list);
+ hlist_add_head_rcu(&task_pid->pid_chain,
+ &pid_hash[type][pid_hashfn(nr)]);
} else {
INIT_HLIST_NODE(&task_pid->pid_chain);
- list_add_tail(&task_pid->pid_list, &pid->pid_list);
+ list_add_tail_rcu(&task_pid->pid_list, &pid->pid_list);
}
- task_pid->nr = nr;
return 0;
}
@@ -170,20 +170,20 @@ static fastcall int __detach_pid(task_t *task, enum pid_type type)
pid = &task->pids[type];
if (!hlist_unhashed(&pid->pid_chain)) {
- hlist_del(&pid->pid_chain);
- if (list_empty(&pid->pid_list))
+ if (list_empty(&pid->pid_list)) {
nr = pid->nr;
- else {
+ hlist_del_rcu(&pid->pid_chain);
+ } else {
pid_next = list_entry(pid->pid_list.next,
struct pid, pid_list);
/* insert next pid from pid_list to hash */
- hlist_add_head(&pid_next->pid_chain,
- &pid_hash[type][pid_hashfn(pid_next->nr)]);
+ hlist_replace_rcu(&pid->pid_chain,
+ &pid_next->pid_chain);
}
}
- list_del(&pid->pid_list);
+ list_del_rcu(&pid->pid_list);
pid->nr = 0;
return nr;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index cae4f572899..520f6c59948 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -7,7 +7,7 @@
#include <asm/uaccess.h>
#include <linux/errno.h>
-static int check_clock(clockid_t which_clock)
+static int check_clock(const clockid_t which_clock)
{
int error = 0;
struct task_struct *p;
@@ -31,7 +31,7 @@ static int check_clock(clockid_t which_clock)
}
static inline union cpu_time_count
-timespec_to_sample(clockid_t which_clock, const struct timespec *tp)
+timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
{
union cpu_time_count ret;
ret.sched = 0; /* high half always zero when .cpu used */
@@ -43,7 +43,7 @@ timespec_to_sample(clockid_t which_clock, const struct timespec *tp)
return ret;
}
-static void sample_to_timespec(clockid_t which_clock,
+static void sample_to_timespec(const clockid_t which_clock,
union cpu_time_count cpu,
struct timespec *tp)
{
@@ -55,7 +55,7 @@ static void sample_to_timespec(clockid_t which_clock,
}
}
-static inline int cpu_time_before(clockid_t which_clock,
+static inline int cpu_time_before(const clockid_t which_clock,
union cpu_time_count now,
union cpu_time_count then)
{
@@ -65,7 +65,7 @@ static inline int cpu_time_before(clockid_t which_clock,
return cputime_lt(now.cpu, then.cpu);
}
}
-static inline void cpu_time_add(clockid_t which_clock,
+static inline void cpu_time_add(const clockid_t which_clock,
union cpu_time_count *acc,
union cpu_time_count val)
{
@@ -75,7 +75,7 @@ static inline void cpu_time_add(clockid_t which_clock,
acc->cpu = cputime_add(acc->cpu, val.cpu);
}
}
-static inline union cpu_time_count cpu_time_sub(clockid_t which_clock,
+static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
union cpu_time_count a,
union cpu_time_count b)
{
@@ -151,7 +151,7 @@ static inline unsigned long long sched_ns(struct task_struct *p)
return (p == current) ? current_sched_time(p) : p->sched_time;
}
-int posix_cpu_clock_getres(clockid_t which_clock, struct timespec *tp)
+int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
{
int error = check_clock(which_clock);
if (!error) {
@@ -169,7 +169,7 @@ int posix_cpu_clock_getres(clockid_t which_clock, struct timespec *tp)
return error;
}
-int posix_cpu_clock_set(clockid_t which_clock, const struct timespec *tp)
+int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
{
/*
* You can never reset a CPU clock, but we check for other errors
@@ -186,7 +186,7 @@ int posix_cpu_clock_set(clockid_t which_clock, const struct timespec *tp)
/*
* Sample a per-thread clock for the given task.
*/
-static int cpu_clock_sample(clockid_t which_clock, struct task_struct *p,
+static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
union cpu_time_count *cpu)
{
switch (CPUCLOCK_WHICH(which_clock)) {
@@ -238,18 +238,7 @@ static int cpu_clock_sample_group_locked(unsigned int clock_idx,
while ((t = next_thread(t)) != p) {
cpu->sched += t->sched_time;
}
- if (p->tgid == current->tgid) {
- /*
- * We're sampling ourselves, so include the
- * cycles not yet banked. We still omit
- * other threads running on other CPUs,
- * so the total can always be behind as
- * much as max(nthreads-1,ncpus) * (NSEC_PER_SEC/HZ).
- */
- cpu->sched += current_sched_time(current);
- } else {
- cpu->sched += p->sched_time;
- }
+ cpu->sched += sched_ns(p);
break;
}
return 0;
@@ -259,7 +248,7 @@ static int cpu_clock_sample_group_locked(unsigned int clock_idx,
* Sample a process (thread group) clock for the given group_leader task.
* Must be called with tasklist_lock held for reading.
*/
-static int cpu_clock_sample_group(clockid_t which_clock,
+static int cpu_clock_sample_group(const clockid_t which_clock,
struct task_struct *p,
union cpu_time_count *cpu)
{
@@ -273,7 +262,7 @@ static int cpu_clock_sample_group(clockid_t which_clock,
}
-int posix_cpu_clock_get(clockid_t which_clock, struct timespec *tp)
+int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
{
const pid_t pid = CPUCLOCK_PID(which_clock);
int error = -EINVAL;
@@ -1410,8 +1399,8 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
static long posix_cpu_clock_nanosleep_restart(struct restart_block *);
-int posix_cpu_nsleep(clockid_t which_clock, int flags,
- struct timespec *rqtp)
+int posix_cpu_nsleep(const clockid_t which_clock, int flags,
+ struct timespec *rqtp, struct timespec __user *rmtp)
{
struct restart_block *restart_block =
&current_thread_info()->restart_block;
@@ -1436,7 +1425,6 @@ int posix_cpu_nsleep(clockid_t which_clock, int flags,
error = posix_cpu_timer_create(&timer);
timer.it_process = current;
if (!error) {
- struct timespec __user *rmtp;
static struct itimerspec zero_it;
struct itimerspec it = { .it_value = *rqtp,
.it_interval = {} };
@@ -1483,7 +1471,6 @@ int posix_cpu_nsleep(clockid_t which_clock, int flags,
/*
* Report back to the user the time still remaining.
*/
- rmtp = (struct timespec __user *) restart_block->arg1;
if (rmtp != NULL && !(flags & TIMER_ABSTIME) &&
copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
return -EFAULT;
@@ -1491,6 +1478,7 @@ int posix_cpu_nsleep(clockid_t which_clock, int flags,
restart_block->fn = posix_cpu_clock_nanosleep_restart;
/* Caller already set restart_block->arg1 */
restart_block->arg0 = which_clock;
+ restart_block->arg1 = (unsigned long) rmtp;
restart_block->arg2 = rqtp->tv_sec;
restart_block->arg3 = rqtp->tv_nsec;
@@ -1504,21 +1492,28 @@ static long
posix_cpu_clock_nanosleep_restart(struct restart_block *restart_block)
{
clockid_t which_clock = restart_block->arg0;
- struct timespec t = { .tv_sec = restart_block->arg2,
- .tv_nsec = restart_block->arg3 };
+ struct timespec __user *rmtp;
+ struct timespec t;
+
+ rmtp = (struct timespec __user *) restart_block->arg1;
+ t.tv_sec = restart_block->arg2;
+ t.tv_nsec = restart_block->arg3;
+
restart_block->fn = do_no_restart_syscall;
- return posix_cpu_nsleep(which_clock, TIMER_ABSTIME, &t);
+ return posix_cpu_nsleep(which_clock, TIMER_ABSTIME, &t, rmtp);
}
#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
-static int process_cpu_clock_getres(clockid_t which_clock, struct timespec *tp)
+static int process_cpu_clock_getres(const clockid_t which_clock,
+ struct timespec *tp)
{
return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
}
-static int process_cpu_clock_get(clockid_t which_clock, struct timespec *tp)
+static int process_cpu_clock_get(const clockid_t which_clock,
+ struct timespec *tp)
{
return posix_cpu_clock_get(PROCESS_CLOCK, tp);
}
@@ -1527,16 +1522,19 @@ static int process_cpu_timer_create(struct k_itimer *timer)
timer->it_clock = PROCESS_CLOCK;
return posix_cpu_timer_create(timer);
}
-static int process_cpu_nsleep(clockid_t which_clock, int flags,
- struct timespec *rqtp)
+static int process_cpu_nsleep(const clockid_t which_clock, int flags,
+ struct timespec *rqtp,
+ struct timespec __user *rmtp)
{
- return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
+ return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
}
-static int thread_cpu_clock_getres(clockid_t which_clock, struct timespec *tp)
+static int thread_cpu_clock_getres(const clockid_t which_clock,
+ struct timespec *tp)
{
return posix_cpu_clock_getres(THREAD_CLOCK, tp);
}
-static int thread_cpu_clock_get(clockid_t which_clock, struct timespec *tp)
+static int thread_cpu_clock_get(const clockid_t which_clock,
+ struct timespec *tp)
{
return posix_cpu_clock_get(THREAD_CLOCK, tp);
}
@@ -1545,8 +1543,8 @@ static int thread_cpu_timer_create(struct k_itimer *timer)
timer->it_clock = THREAD_CLOCK;
return posix_cpu_timer_create(timer);
}
-static int thread_cpu_nsleep(clockid_t which_clock, int flags,
- struct timespec *rqtp)
+static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
+ struct timespec *rqtp, struct timespec __user *rmtp)
{
return -EINVAL;
}
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 5870efb3e20..9e66e614862 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -48,21 +48,6 @@
#include <linux/workqueue.h>
#include <linux/module.h>
-#ifndef div_long_long_rem
-#include <asm/div64.h>
-
-#define div_long_long_rem(dividend,divisor,remainder) ({ \
- u64 result = dividend; \
- *remainder = do_div(result,divisor); \
- result; })
-
-#endif
-#define CLOCK_REALTIME_RES TICK_NSEC /* In nano seconds. */
-
-static inline u64 mpy_l_X_l_ll(unsigned long mpy1,unsigned long mpy2)
-{
- return (u64)mpy1 * mpy2;
-}
/*
* Management arrays for POSIX timers. Timers are kept in slab memory
* Timer ids are allocated by an external routine that keeps track of the
@@ -148,18 +133,18 @@ static DEFINE_SPINLOCK(idr_lock);
*/
static struct k_clock posix_clocks[MAX_CLOCKS];
+
/*
- * We only have one real clock that can be set so we need only one abs list,
- * even if we should want to have several clocks with differing resolutions.
+ * These ones are defined below.
*/
-static struct k_clock_abs abs_list = {.list = LIST_HEAD_INIT(abs_list.list),
- .lock = SPIN_LOCK_UNLOCKED};
+static int common_nsleep(const clockid_t, int flags, struct timespec *t,
+ struct timespec __user *rmtp);
+static void common_timer_get(struct k_itimer *, struct itimerspec *);
+static int common_timer_set(struct k_itimer *, int,
+ struct itimerspec *, struct itimerspec *);
+static int common_timer_del(struct k_itimer *timer);
-static void posix_timer_fn(unsigned long);
-static u64 do_posix_clock_monotonic_gettime_parts(
- struct timespec *tp, struct timespec *mo);
-int do_posix_clock_monotonic_gettime(struct timespec *tp);
-static int do_posix_clock_monotonic_get(clockid_t, struct timespec *tp);
+static int posix_timer_fn(void *data);
static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags);
@@ -184,7 +169,7 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
* the function pointer CALL in struct k_clock.
*/
-static inline int common_clock_getres(clockid_t which_clock,
+static inline int common_clock_getres(const clockid_t which_clock,
struct timespec *tp)
{
tp->tv_sec = 0;
@@ -192,39 +177,33 @@ static inline int common_clock_getres(clockid_t which_clock,
return 0;
}
-static inline int common_clock_get(clockid_t which_clock, struct timespec *tp)
+/*
+ * Get real time for posix timers
+ */
+static int common_clock_get(clockid_t which_clock, struct timespec *tp)
{
- getnstimeofday(tp);
+ ktime_get_real_ts(tp);
return 0;
}
-static inline int common_clock_set(clockid_t which_clock, struct timespec *tp)
+static inline int common_clock_set(const clockid_t which_clock,
+ struct timespec *tp)
{
return do_sys_settimeofday(tp, NULL);
}
static inline int common_timer_create(struct k_itimer *new_timer)
{
- INIT_LIST_HEAD(&new_timer->it.real.abs_timer_entry);
- init_timer(&new_timer->it.real.timer);
- new_timer->it.real.timer.data = (unsigned long) new_timer;
+ hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock);
+ new_timer->it.real.timer.data = new_timer;
new_timer->it.real.timer.function = posix_timer_fn;
return 0;
}
/*
- * These ones are defined below.
- */
-static int common_nsleep(clockid_t, int flags, struct timespec *t);
-static void common_timer_get(struct k_itimer *, struct itimerspec *);
-static int common_timer_set(struct k_itimer *, int,
- struct itimerspec *, struct itimerspec *);
-static int common_timer_del(struct k_itimer *timer);
-
-/*
- * Return nonzero iff we know a priori this clockid_t value is bogus.
+ * Return nonzero if we know a priori this clockid_t value is bogus.
*/
-static inline int invalid_clockid(clockid_t which_clock)
+static inline int invalid_clockid(const clockid_t which_clock)
{
if (which_clock < 0) /* CPU clock, posix_cpu_* will check it */
return 0;
@@ -232,26 +211,32 @@ static inline int invalid_clockid(clockid_t which_clock)
return 1;
if (posix_clocks[which_clock].clock_getres != NULL)
return 0;
-#ifndef CLOCK_DISPATCH_DIRECT
if (posix_clocks[which_clock].res != 0)
return 0;
-#endif
return 1;
}
+/*
+ * Get monotonic time for posix timers
+ */
+static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
+{
+ ktime_get_ts(tp);
+ return 0;
+}
/*
* Initialize everything, well, just everything in Posix clocks/timers ;)
*/
static __init int init_posix_timers(void)
{
- struct k_clock clock_realtime = {.res = CLOCK_REALTIME_RES,
- .abs_struct = &abs_list
+ struct k_clock clock_realtime = {
+ .clock_getres = hrtimer_get_res,
};
- struct k_clock clock_monotonic = {.res = CLOCK_REALTIME_RES,
- .abs_struct = NULL,
- .clock_get = do_posix_clock_monotonic_get,
- .clock_set = do_posix_clock_nosettime
+ struct k_clock clock_monotonic = {
+ .clock_getres = hrtimer_get_res,
+ .clock_get = posix_ktime_get_ts,
+ .clock_set = do_posix_clock_nosettime,
};
register_posix_clock(CLOCK_REALTIME, &clock_realtime);
@@ -265,117 +250,17 @@ static __init int init_posix_timers(void)
__initcall(init_posix_timers);
-static void tstojiffie(struct timespec *tp, int res, u64 *jiff)
-{
- long sec = tp->tv_sec;
- long nsec = tp->tv_nsec + res - 1;
-
- if (nsec >= NSEC_PER_SEC) {
- sec++;
- nsec -= NSEC_PER_SEC;
- }
-
- /*
- * The scaling constants are defined in <linux/time.h>
- * The difference between there and here is that we do the
- * res rounding and compute a 64-bit result (well so does that
- * but it then throws away the high bits).
- */
- *jiff = (mpy_l_X_l_ll(sec, SEC_CONVERSION) +
- (mpy_l_X_l_ll(nsec, NSEC_CONVERSION) >>
- (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
-}
-
-/*
- * This function adjusts the timer as needed as a result of the clock
- * being set. It should only be called for absolute timers, and then
- * under the abs_list lock. It computes the time difference and sets
- * the new jiffies value in the timer. It also updates the timers
- * reference wall_to_monotonic value. It is complicated by the fact
- * that tstojiffies() only handles positive times and it needs to work
- * with both positive and negative times. Also, for negative offsets,
- * we need to defeat the res round up.
- *
- * Return is true if there is a new time, else false.
- */
-static long add_clockset_delta(struct k_itimer *timr,
- struct timespec *new_wall_to)
-{
- struct timespec delta;
- int sign = 0;
- u64 exp;
-
- set_normalized_timespec(&delta,
- new_wall_to->tv_sec -
- timr->it.real.wall_to_prev.tv_sec,
- new_wall_to->tv_nsec -
- timr->it.real.wall_to_prev.tv_nsec);
- if (likely(!(delta.tv_sec | delta.tv_nsec)))
- return 0;
- if (delta.tv_sec < 0) {
- set_normalized_timespec(&delta,
- -delta.tv_sec,
- 1 - delta.tv_nsec -
- posix_clocks[timr->it_clock].res);
- sign++;
- }
- tstojiffie(&delta, posix_clocks[timr->it_clock].res, &exp);
- timr->it.real.wall_to_prev = *new_wall_to;
- timr->it.real.timer.expires += (sign ? -exp : exp);
- return 1;
-}
-
-static void remove_from_abslist(struct k_itimer *timr)
-{
- if (!list_empty(&timr->it.real.abs_timer_entry)) {
- spin_lock(&abs_list.lock);
- list_del_init(&timr->it.real.abs_timer_entry);
- spin_unlock(&abs_list.lock);
- }
-}
-
static void schedule_next_timer(struct k_itimer *timr)
{
- struct timespec new_wall_to;
- struct now_struct now;
- unsigned long seq;
-
- /*
- * Set up the timer for the next interval (if there is one).
- * Note: this code uses the abs_timer_lock to protect
- * it.real.wall_to_prev and must hold it until exp is set, not exactly
- * obvious...
-
- * This function is used for CLOCK_REALTIME* and
- * CLOCK_MONOTONIC* timers. If we ever want to handle other
- * CLOCKs, the calling code (do_schedule_next_timer) would need
- * to pull the "clock" info from the timer and dispatch the
- * "other" CLOCKs "next timer" code (which, I suppose should
- * also be added to the k_clock structure).
- */
- if (!timr->it.real.incr)
+ if (timr->it.real.interval.tv64 == 0)
return;
- do {
- seq = read_seqbegin(&xtime_lock);
- new_wall_to = wall_to_monotonic;
- posix_get_now(&now);
- } while (read_seqretry(&xtime_lock, seq));
-
- if (!list_empty(&timr->it.real.abs_timer_entry)) {
- spin_lock(&abs_list.lock);
- add_clockset_delta(timr, &new_wall_to);
-
- posix_bump_timer(timr, now);
-
- spin_unlock(&abs_list.lock);
- } else {
- posix_bump_timer(timr, now);
- }
+ timr->it_overrun += hrtimer_forward(&timr->it.real.timer,
+ timr->it.real.interval);
timr->it_overrun_last = timr->it_overrun;
timr->it_overrun = -1;
++timr->it_requeue_pending;
- add_timer(&timr->it.real.timer);
+ hrtimer_restart(&timr->it.real.timer);
}
/*
@@ -396,31 +281,23 @@ void do_schedule_next_timer(struct siginfo *info)
timr = lock_timer(info->si_tid, &flags);
- if (!timr || timr->it_requeue_pending != info->si_sys_private)
- goto exit;
+ if (timr && timr->it_requeue_pending == info->si_sys_private) {
+ if (timr->it_clock < 0)
+ posix_cpu_timer_schedule(timr);
+ else
+ schedule_next_timer(timr);
- if (timr->it_clock < 0) /* CPU clock */
- posix_cpu_timer_schedule(timr);
- else
- schedule_next_timer(timr);
- info->si_overrun = timr->it_overrun_last;
-exit:
- if (timr)
- unlock_timer(timr, flags);
+ info->si_overrun = timr->it_overrun_last;
+ }
+
+ unlock_timer(timr, flags);
}
int posix_timer_event(struct k_itimer *timr,int si_private)
{
memset(&timr->sigq->info, 0, sizeof(siginfo_t));
timr->sigq->info.si_sys_private = si_private;
- /*
- * Send signal to the process that owns this timer.
-
- * This code assumes that all the possible abs_lists share the
- * same lock (there is only one list at this time). If this is
- * not the case, the CLOCK info would need to be used to find
- * the proper abs list lock.
- */
+ /* Send signal to the process that owns this timer.*/
timr->sigq->info.si_signo = timr->it_sigev_signo;
timr->sigq->info.si_errno = 0;
@@ -454,64 +331,35 @@ EXPORT_SYMBOL_GPL(posix_timer_event);
* This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
*/
-static void posix_timer_fn(unsigned long __data)
+static int posix_timer_fn(void *data)
{
- struct k_itimer *timr = (struct k_itimer *) __data;
+ struct k_itimer *timr = data;
unsigned long flags;
- unsigned long seq;
- struct timespec delta, new_wall_to;
- u64 exp = 0;
- int do_notify = 1;
+ int si_private = 0;
+ int ret = HRTIMER_NORESTART;
spin_lock_irqsave(&timr->it_lock, flags);
- if (!list_empty(&timr->it.real.abs_timer_entry)) {
- spin_lock(&abs_list.lock);
- do {
- seq = read_seqbegin(&xtime_lock);
- new_wall_to = wall_to_monotonic;
- } while (read_seqretry(&xtime_lock, seq));
- set_normalized_timespec(&delta,
- new_wall_to.tv_sec -
- timr->it.real.wall_to_prev.tv_sec,
- new_wall_to.tv_nsec -
- timr->it.real.wall_to_prev.tv_nsec);
- if (likely((delta.tv_sec | delta.tv_nsec ) == 0)) {
- /* do nothing, timer is on time */
- } else if (delta.tv_sec < 0) {
- /* do nothing, timer is already late */
- } else {
- /* timer is early due to a clock set */
- tstojiffie(&delta,
- posix_clocks[timr->it_clock].res,
- &exp);
- timr->it.real.wall_to_prev = new_wall_to;
- timr->it.real.timer.expires += exp;
- add_timer(&timr->it.real.timer);
- do_notify = 0;
- }
- spin_unlock(&abs_list.lock);
- }
- if (do_notify) {
- int si_private=0;
+ if (timr->it.real.interval.tv64 != 0)
+ si_private = ++timr->it_requeue_pending;
- if (timr->it.real.incr)
- si_private = ++timr->it_requeue_pending;
- else {
- remove_from_abslist(timr);
+ if (posix_timer_event(timr, si_private)) {
+ /*
+ * signal was not sent because of sig_ignor
+ * we will not get a call back to restart it AND
+ * it should be restarted.
+ */
+ if (timr->it.real.interval.tv64 != 0) {
+ timr->it_overrun +=
+ hrtimer_forward(&timr->it.real.timer,
+ timr->it.real.interval);
+ ret = HRTIMER_RESTART;
}
-
- if (posix_timer_event(timr, si_private))
- /*
- * signal was not sent because of sig_ignor
- * we will not get a call back to restart it AND
- * it should be restarted.
- */
- schedule_next_timer(timr);
}
- unlock_timer(timr, flags); /* hold thru abs lock to keep irq off */
-}
+ unlock_timer(timr, flags);
+ return ret;
+}
static inline struct task_struct * good_sigevent(sigevent_t * event)
{
@@ -530,7 +378,7 @@ static inline struct task_struct * good_sigevent(sigevent_t * event)
return rtn;
}
-void register_posix_clock(clockid_t clock_id, struct k_clock *new_clock)
+void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
{
if ((unsigned) clock_id >= MAX_CLOCKS) {
printk("POSIX clock register failed for clock_id %d\n",
@@ -576,7 +424,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
/* Create a POSIX.1b interval timer. */
asmlinkage long
-sys_timer_create(clockid_t which_clock,
+sys_timer_create(const clockid_t which_clock,
struct sigevent __user *timer_event_spec,
timer_t __user * created_timer_id)
{
@@ -602,8 +450,7 @@ sys_timer_create(clockid_t which_clock,
goto out;
}
spin_lock_irq(&idr_lock);
- error = idr_get_new(&posix_timers_id,
- (void *) new_timer,
+ error = idr_get_new(&posix_timers_id, (void *) new_timer,
&new_timer_id);
spin_unlock_irq(&idr_lock);
if (error == -EAGAIN)
@@ -704,27 +551,6 @@ out:
}
/*
- * good_timespec
- *
- * This function checks the elements of a timespec structure.
- *
- * Arguments:
- * ts : Pointer to the timespec structure to check
- *
- * Return value:
- * If a NULL pointer was passed in, or the tv_nsec field was less than 0
- * or greater than NSEC_PER_SEC, or the tv_sec field was less than 0,
- * this function returns 0. Otherwise it returns 1.
- */
-static int good_timespec(const struct timespec *ts)
-{
- if ((!ts) || (ts->tv_sec < 0) ||
- ((unsigned) ts->tv_nsec >= NSEC_PER_SEC))
- return 0;
- return 1;
-}
-
-/*
* Locking issues: We need to protect the result of the id look up until
* we get the timer locked down so it is not deleted under us. The
* removal is done under the idr spinlock so we use that here to bridge
@@ -776,39 +602,39 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
static void
common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
{
- unsigned long expires;
- struct now_struct now;
-
- do
- expires = timr->it.real.timer.expires;
- while ((volatile long) (timr->it.real.timer.expires) != expires);
-
- posix_get_now(&now);
-
- if (expires &&
- ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) &&
- !timr->it.real.incr &&
- posix_time_before(&timr->it.real.timer, &now))
- timr->it.real.timer.expires = expires = 0;
- if (expires) {
- if (timr->it_requeue_pending & REQUEUE_PENDING ||
- (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
- posix_bump_timer(timr, now);
- expires = timr->it.real.timer.expires;
- }
- else
- if (!timer_pending(&timr->it.real.timer))
- expires = 0;
- if (expires)
- expires -= now.jiffies;
- }
- jiffies_to_timespec(expires, &cur_setting->it_value);
- jiffies_to_timespec(timr->it.real.incr, &cur_setting->it_interval);
+ ktime_t remaining;
+ struct hrtimer *timer = &timr->it.real.timer;
+
+ memset(cur_setting, 0, sizeof(struct itimerspec));
+ remaining = hrtimer_get_remaining(timer);
- if (cur_setting->it_value.tv_sec < 0) {
+ /* Time left ? or timer pending */
+ if (remaining.tv64 > 0 || hrtimer_active(timer))
+ goto calci;
+ /* interval timer ? */
+ if (timr->it.real.interval.tv64 == 0)
+ return;
+ /*
+ * When a requeue is pending or this is a SIGEV_NONE timer
+ * move the expiry time forward by intervals, so expiry is >
+ * now.
+ */
+ if (timr->it_requeue_pending & REQUEUE_PENDING ||
+ (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
+ timr->it_overrun +=
+ hrtimer_forward(timer, timr->it.real.interval);
+ remaining = hrtimer_get_remaining(timer);
+ }
+ calci:
+ /* interval timer ? */
+ if (timr->it.real.interval.tv64 != 0)
+ cur_setting->it_interval =
+ ktime_to_timespec(timr->it.real.interval);
+ /* Return 0 only, when the timer is expired and not pending */
+ if (remaining.tv64 <= 0)
cur_setting->it_value.tv_nsec = 1;
- cur_setting->it_value.tv_sec = 0;
- }
+ else
+ cur_setting->it_value = ktime_to_timespec(remaining);
}
/* Get the time remaining on a POSIX.1b interval timer. */
@@ -832,6 +658,7 @@ sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
return 0;
}
+
/*
* Get the number of overruns of a POSIX.1b interval timer. This is to
* be the overrun of the timer last delivered. At the same time we are
@@ -841,7 +668,6 @@ sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
* the call back to do_schedule_next_timer(). So all we need to do is
* to pick up the frozen overrun.
*/
-
asmlinkage long
sys_timer_getoverrun(timer_t timer_id)
{
@@ -858,84 +684,6 @@ sys_timer_getoverrun(timer_t timer_id)
return overrun;
}
-/*
- * Adjust for absolute time
- *
- * If absolute time is given and it is not CLOCK_MONOTONIC, we need to
- * adjust for the offset between the timer clock (CLOCK_MONOTONIC) and
- * what ever clock he is using.
- *
- * If it is relative time, we need to add the current (CLOCK_MONOTONIC)
- * time to it to get the proper time for the timer.
- */
-static int adjust_abs_time(struct k_clock *clock, struct timespec *tp,
- int abs, u64 *exp, struct timespec *wall_to)
-{
- struct timespec now;
- struct timespec oc = *tp;
- u64 jiffies_64_f;
- int rtn =0;
-
- if (abs) {
- /*
- * The mask pick up the 4 basic clocks
- */
- if (!((clock - &posix_clocks[0]) & ~CLOCKS_MASK)) {
- jiffies_64_f = do_posix_clock_monotonic_gettime_parts(
- &now, wall_to);
- /*
- * If we are doing a MONOTONIC clock
- */
- if((clock - &posix_clocks[0]) & CLOCKS_MONO){
- now.tv_sec += wall_to->tv_sec;
- now.tv_nsec += wall_to->tv_nsec;
- }
- } else {
- /*
- * Not one of the basic clocks
- */
- clock->clock_get(clock - posix_clocks, &now);
- jiffies_64_f = get_jiffies_64();
- }
- /*
- * Take away now to get delta and normalize
- */
- set_normalized_timespec(&oc, oc.tv_sec - now.tv_sec,
- oc.tv_nsec - now.tv_nsec);
- }else{
- jiffies_64_f = get_jiffies_64();
- }
- /*
- * Check if the requested time is prior to now (if so set now)
- */
- if (oc.tv_sec < 0)
- oc.tv_sec = oc.tv_nsec = 0;
-
- if (oc.tv_sec | oc.tv_nsec)
- set_normalized_timespec(&oc, oc.tv_sec,
- oc.tv_nsec + clock->res);
- tstojiffie(&oc, clock->res, exp);
-
- /*
- * Check if the requested time is more than the timer code
- * can handle (if so we error out but return the value too).
- */
- if (*exp > ((u64)MAX_JIFFY_OFFSET))
- /*
- * This is a considered response, not exactly in
- * line with the standard (in fact it is silent on
- * possible overflows). We assume such a large
- * value is ALMOST always a programming error and
- * try not to compound it by setting a really dumb
- * value.
- */
- rtn = -EINVAL;
- /*
- * return the actual jiffies expire time, full 64 bits
- */
- *exp += jiffies_64_f;
- return rtn;
-}
/* Set a POSIX.1b interval timer. */
/* timr->it_lock is taken. */
@@ -943,68 +691,48 @@ static inline int
common_timer_set(struct k_itimer *timr, int flags,
struct itimerspec *new_setting, struct itimerspec *old_setting)
{
- struct k_clock *clock = &posix_clocks[timr->it_clock];
- u64 expire_64;
+ struct hrtimer *timer = &timr->it.real.timer;
if (old_setting)
common_timer_get(timr, old_setting);
/* disable the timer */
- timr->it.real.incr = 0;
+ timr->it.real.interval.tv64 = 0;
/*
* careful here. If smp we could be in the "fire" routine which will
* be spinning as we hold the lock. But this is ONLY an SMP issue.
*/
- if (try_to_del_timer_sync(&timr->it.real.timer) < 0) {
-#ifdef CONFIG_SMP
- /*
- * It can only be active if on an other cpu. Since
- * we have cleared the interval stuff above, it should
- * clear once we release the spin lock. Of course once
- * we do that anything could happen, including the
- * complete melt down of the timer. So return with
- * a "retry" exit status.
- */
+ if (hrtimer_try_to_cancel(timer) < 0)
return TIMER_RETRY;
-#endif
- }
-
- remove_from_abslist(timr);
timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
~REQUEUE_PENDING;
timr->it_overrun_last = 0;
- timr->it_overrun = -1;
- /*
- *switch off the timer when it_value is zero
- */
- if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) {
- timr->it.real.timer.expires = 0;
- return 0;
- }
- if (adjust_abs_time(clock,
- &new_setting->it_value, flags & TIMER_ABSTIME,
- &expire_64, &(timr->it.real.wall_to_prev))) {
- return -EINVAL;
- }
- timr->it.real.timer.expires = (unsigned long)expire_64;
- tstojiffie(&new_setting->it_interval, clock->res, &expire_64);
- timr->it.real.incr = (unsigned long)expire_64;
+ /* switch off the timer when it_value is zero */
+ if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
+ return 0;
- /*
- * We do not even queue SIGEV_NONE timers! But we do put them
- * in the abs list so we can do that right.
+ /* Posix madness. Only absolute CLOCK_REALTIME timers
+ * are affected by clock sets. So we must reiniatilize
+ * the timer.
*/
- if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE))
- add_timer(&timr->it.real.timer);
-
- if (flags & TIMER_ABSTIME && clock->abs_struct) {
- spin_lock(&clock->abs_struct->lock);
- list_add_tail(&(timr->it.real.abs_timer_entry),
- &(clock->abs_struct->list));
- spin_unlock(&clock->abs_struct->lock);
- }
+ if (timr->it_clock == CLOCK_REALTIME && (flags & TIMER_ABSTIME))
+ hrtimer_rebase(timer, CLOCK_REALTIME);
+ else
+ hrtimer_rebase(timer, CLOCK_MONOTONIC);
+
+ timer->expires = timespec_to_ktime(new_setting->it_value);
+
+ /* Convert interval */
+ timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
+
+ /* SIGEV_NONE timers are not queued ! See common_timer_get */
+ if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
+ return 0;
+
+ hrtimer_start(timer, timer->expires, (flags & TIMER_ABSTIME) ?
+ HRTIMER_ABS : HRTIMER_REL);
return 0;
}
@@ -1026,8 +754,8 @@ sys_timer_settime(timer_t timer_id, int flags,
if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
return -EFAULT;
- if ((!good_timespec(&new_spec.it_interval)) ||
- (!good_timespec(&new_spec.it_value)))
+ if (!timespec_valid(&new_spec.it_interval) ||
+ !timespec_valid(&new_spec.it_value))
return -EINVAL;
retry:
timr = lock_timer(timer_id, &flag);
@@ -1043,8 +771,8 @@ retry:
goto retry;
}
- if (old_setting && !error && copy_to_user(old_setting,
- &old_spec, sizeof (old_spec)))
+ if (old_setting && !error &&
+ copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
error = -EFAULT;
return error;
@@ -1052,24 +780,10 @@ retry:
static inline int common_timer_del(struct k_itimer *timer)
{
- timer->it.real.incr = 0;
+ timer->it.real.interval.tv64 = 0;
- if (try_to_del_timer_sync(&timer->it.real.timer) < 0) {
-#ifdef CONFIG_SMP
- /*
- * It can only be active if on an other cpu. Since
- * we have cleared the interval stuff above, it should
- * clear once we release the spin lock. Of course once
- * we do that anything could happen, including the
- * complete melt down of the timer. So return with
- * a "retry" exit status.
- */
+ if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
return TIMER_RETRY;
-#endif
- }
-
- remove_from_abslist(timer);
-
return 0;
}
@@ -1085,24 +799,16 @@ sys_timer_delete(timer_t timer_id)
struct k_itimer *timer;
long flags;
-#ifdef CONFIG_SMP
- int error;
retry_delete:
-#endif
timer = lock_timer(timer_id, &flags);
if (!timer)
return -EINVAL;
-#ifdef CONFIG_SMP
- error = timer_delete_hook(timer);
-
- if (error == TIMER_RETRY) {
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
unlock_timer(timer, flags);
goto retry_delete;
}
-#else
- timer_delete_hook(timer);
-#endif
+
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
spin_unlock(&current->sighand->siglock);
@@ -1119,6 +825,7 @@ retry_delete:
release_posix_timer(timer, IT_ID_SET);
return 0;
}
+
/*
* return timer owned by the process, used by exit_itimers
*/
@@ -1126,22 +833,13 @@ static inline void itimer_delete(struct k_itimer *timer)
{
unsigned long flags;
-#ifdef CONFIG_SMP
- int error;
retry_delete:
-#endif
spin_lock_irqsave(&timer->it_lock, flags);
-#ifdef CONFIG_SMP
- error = timer_delete_hook(timer);
-
- if (error == TIMER_RETRY) {
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
unlock_timer(timer, flags);
goto retry_delete;
}
-#else
- timer_delete_hook(timer);
-#endif
list_del(&timer->list);
/*
* This keeps any tasks waiting on the spin lock from thinking
@@ -1170,57 +868,8 @@ void exit_itimers(struct signal_struct *sig)
}
}
-/*
- * And now for the "clock" calls
- *
- * These functions are called both from timer functions (with the timer
- * spin_lock_irq() held and from clock calls with no locking. They must
- * use the save flags versions of locks.
- */
-
-/*
- * We do ticks here to avoid the irq lock ( they take sooo long).
- * The seqlock is great here. Since we a reader, we don't really care
- * if we are interrupted since we don't take lock that will stall us or
- * any other cpu. Voila, no irq lock is needed.
- *
- */
-
-static u64 do_posix_clock_monotonic_gettime_parts(
- struct timespec *tp, struct timespec *mo)
-{
- u64 jiff;
- unsigned int seq;
-
- do {
- seq = read_seqbegin(&xtime_lock);
- getnstimeofday(tp);
- *mo = wall_to_monotonic;
- jiff = jiffies_64;
-
- } while(read_seqretry(&xtime_lock, seq));
-
- return jiff;
-}
-
-static int do_posix_clock_monotonic_get(clockid_t clock, struct timespec *tp)
-{
- struct timespec wall_to_mono;
-
- do_posix_clock_monotonic_gettime_parts(tp, &wall_to_mono);
-
- set_normalized_timespec(tp, tp->tv_sec + wall_to_mono.tv_sec,
- tp->tv_nsec + wall_to_mono.tv_nsec);
-
- return 0;
-}
-
-int do_posix_clock_monotonic_gettime(struct timespec *tp)
-{
- return do_posix_clock_monotonic_get(CLOCK_MONOTONIC, tp);
-}
-
-int do_posix_clock_nosettime(clockid_t clockid, struct timespec *tp)
+/* Not available / possible... functions */
+int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp)
{
return -EINVAL;
}
@@ -1232,7 +881,8 @@ int do_posix_clock_notimer_create(struct k_itimer *timer)
}
EXPORT_SYMBOL_GPL(do_posix_clock_notimer_create);
-int do_posix_clock_nonanosleep(clockid_t clock, int flags, struct timespec *t)
+int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
+ struct timespec *t, struct timespec __user *r)
{
#ifndef ENOTSUP
return -EOPNOTSUPP; /* aka ENOTSUP in userland for POSIX */
@@ -1242,8 +892,8 @@ int do_posix_clock_nonanosleep(clockid_t clock, int flags, struct timespec *t)
}
EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
-asmlinkage long
-sys_clock_settime(clockid_t which_clock, const struct timespec __user *tp)
+asmlinkage long sys_clock_settime(const clockid_t which_clock,
+ const struct timespec __user *tp)
{
struct timespec new_tp;
@@ -1256,7 +906,7 @@ sys_clock_settime(clockid_t which_clock, const struct timespec __user *tp)
}
asmlinkage long
-sys_clock_gettime(clockid_t which_clock, struct timespec __user *tp)
+sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp)
{
struct timespec kernel_tp;
int error;
@@ -1273,7 +923,7 @@ sys_clock_gettime(clockid_t which_clock, struct timespec __user *tp)
}
asmlinkage long
-sys_clock_getres(clockid_t which_clock, struct timespec __user *tp)
+sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp)
{
struct timespec rtn_tp;
int error;
@@ -1292,117 +942,34 @@ sys_clock_getres(clockid_t which_clock, struct timespec __user *tp)
}
/*
- * The standard says that an absolute nanosleep call MUST wake up at
- * the requested time in spite of clock settings. Here is what we do:
- * For each nanosleep call that needs it (only absolute and not on
- * CLOCK_MONOTONIC* (as it can not be set)) we thread a little structure
- * into the "nanosleep_abs_list". All we need is the task_struct pointer.
- * When ever the clock is set we just wake up all those tasks. The rest
- * is done by the while loop in clock_nanosleep().
- *
- * On locking, clock_was_set() is called from update_wall_clock which
- * holds (or has held for it) a write_lock_irq( xtime_lock) and is
- * called from the timer bh code. Thus we need the irq save locks.
- *
- * Also, on the call from update_wall_clock, that is done as part of a
- * softirq thing. We don't want to delay the system that much (possibly
- * long list of timers to fix), so we defer that work to keventd.
+ * nanosleep for monotonic and realtime clocks
*/
-
-static DECLARE_WAIT_QUEUE_HEAD(nanosleep_abs_wqueue);
-static DECLARE_WORK(clock_was_set_work, (void(*)(void*))clock_was_set, NULL);
-
-static DECLARE_MUTEX(clock_was_set_lock);
-
-void clock_was_set(void)
-{
- struct k_itimer *timr;
- struct timespec new_wall_to;
- LIST_HEAD(cws_list);
- unsigned long seq;
-
-
- if (unlikely(in_interrupt())) {
- schedule_work(&clock_was_set_work);
- return;
+static int common_nsleep(const clockid_t which_clock, int flags,
+ struct timespec *tsave, struct timespec __user *rmtp)
+{
+ int mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL;
+ int clockid = which_clock;
+
+ switch (which_clock) {
+ case CLOCK_REALTIME:
+ /* Posix madness. Only absolute timers on clock realtime
+ are affected by clock set. */
+ if (mode != HRTIMER_ABS)
+ clockid = CLOCK_MONOTONIC;
+ case CLOCK_MONOTONIC:
+ break;
+ default:
+ return -EINVAL;
}
- wake_up_all(&nanosleep_abs_wqueue);
-
- /*
- * Check if there exist TIMER_ABSTIME timers to correct.
- *
- * Notes on locking: This code is run in task context with irq
- * on. We CAN be interrupted! All other usage of the abs list
- * lock is under the timer lock which holds the irq lock as
- * well. We REALLY don't want to scan the whole list with the
- * interrupt system off, AND we would like a sequence lock on
- * this code as well. Since we assume that the clock will not
- * be set often, it seems ok to take and release the irq lock
- * for each timer. In fact add_timer will do this, so this is
- * not an issue. So we know when we are done, we will move the
- * whole list to a new location. Then as we process each entry,
- * we will move it to the actual list again. This way, when our
- * copy is empty, we are done. We are not all that concerned
- * about preemption so we will use a semaphore lock to protect
- * aginst reentry. This way we will not stall another
- * processor. It is possible that this may delay some timers
- * that should have expired, given the new clock, but even this
- * will be minimal as we will always update to the current time,
- * even if it was set by a task that is waiting for entry to
- * this code. Timers that expire too early will be caught by
- * the expire code and restarted.
-
- * Absolute timers that repeat are left in the abs list while
- * waiting for the task to pick up the signal. This means we
- * may find timers that are not in the "add_timer" list, but are
- * in the abs list. We do the same thing for these, save
- * putting them back in the "add_timer" list. (Note, these are
- * left in the abs list mainly to indicate that they are
- * ABSOLUTE timers, a fact that is used by the re-arm code, and
- * for which we have no other flag.)
-
- */
-
- down(&clock_was_set_lock);
- spin_lock_irq(&abs_list.lock);
- list_splice_init(&abs_list.list, &cws_list);
- spin_unlock_irq(&abs_list.lock);
- do {
- do {
- seq = read_seqbegin(&xtime_lock);
- new_wall_to = wall_to_monotonic;
- } while (read_seqretry(&xtime_lock, seq));
-
- spin_lock_irq(&abs_list.lock);
- if (list_empty(&cws_list)) {
- spin_unlock_irq(&abs_list.lock);
- break;
- }
- timr = list_entry(cws_list.next, struct k_itimer,
- it.real.abs_timer_entry);
-
- list_del_init(&timr->it.real.abs_timer_entry);
- if (add_clockset_delta(timr, &new_wall_to) &&
- del_timer(&timr->it.real.timer)) /* timer run yet? */
- add_timer(&timr->it.real.timer);
- list_add(&timr->it.real.abs_timer_entry, &abs_list.list);
- spin_unlock_irq(&abs_list.lock);
- } while (1);
-
- up(&clock_was_set_lock);
+ return hrtimer_nanosleep(tsave, rmtp, mode, clockid);
}
-long clock_nanosleep_restart(struct restart_block *restart_block);
-
asmlinkage long
-sys_clock_nanosleep(clockid_t which_clock, int flags,
+sys_clock_nanosleep(const clockid_t which_clock, int flags,
const struct timespec __user *rqtp,
struct timespec __user *rmtp)
{
struct timespec t;
- struct restart_block *restart_block =
- &(current_thread_info()->restart_block);
- int ret;
if (invalid_clockid(which_clock))
return -EINVAL;
@@ -1410,125 +977,9 @@ sys_clock_nanosleep(clockid_t which_clock, int flags,
if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
return -EFAULT;
- if ((unsigned) t.tv_nsec >= NSEC_PER_SEC || t.tv_sec < 0)
+ if (!timespec_valid(&t))
return -EINVAL;
- /*
- * Do this here as nsleep function does not have the real address.
- */
- restart_block->arg1 = (unsigned long)rmtp;
-
- ret = CLOCK_DISPATCH(which_clock, nsleep, (which_clock, flags, &t));
-
- if ((ret == -ERESTART_RESTARTBLOCK) && rmtp &&
- copy_to_user(rmtp, &t, sizeof (t)))
- return -EFAULT;
- return ret;
-}
-
-
-static int common_nsleep(clockid_t which_clock,
- int flags, struct timespec *tsave)
-{
- struct timespec t, dum;
- DECLARE_WAITQUEUE(abs_wqueue, current);
- u64 rq_time = (u64)0;
- s64 left;
- int abs;
- struct restart_block *restart_block =
- &current_thread_info()->restart_block;
-
- abs_wqueue.flags = 0;
- abs = flags & TIMER_ABSTIME;
-
- if (restart_block->fn == clock_nanosleep_restart) {
- /*
- * Interrupted by a non-delivered signal, pick up remaining
- * time and continue. Remaining time is in arg2 & 3.
- */
- restart_block->fn = do_no_restart_syscall;
-
- rq_time = restart_block->arg3;
- rq_time = (rq_time << 32) + restart_block->arg2;
- if (!rq_time)
- return -EINTR;
- left = rq_time - get_jiffies_64();
- if (left <= (s64)0)
- return 0; /* Already passed */
- }
-
- if (abs && (posix_clocks[which_clock].clock_get !=
- posix_clocks[CLOCK_MONOTONIC].clock_get))
- add_wait_queue(&nanosleep_abs_wqueue, &abs_wqueue);
-
- do {
- t = *tsave;
- if (abs || !rq_time) {
- adjust_abs_time(&posix_clocks[which_clock], &t, abs,
- &rq_time, &dum);
- }
-
- left = rq_time - get_jiffies_64();
- if (left >= (s64)MAX_JIFFY_OFFSET)
- left = (s64)MAX_JIFFY_OFFSET;
- if (left < (s64)0)
- break;
-
- schedule_timeout_interruptible(left);
-
- left = rq_time - get_jiffies_64();
- } while (left > (s64)0 && !test_thread_flag(TIF_SIGPENDING));
-
- if (abs_wqueue.task_list.next)
- finish_wait(&nanosleep_abs_wqueue, &abs_wqueue);
-
- if (left > (s64)0) {
-
- /*
- * Always restart abs calls from scratch to pick up any
- * clock shifting that happened while we are away.
- */
- if (abs)
- return -ERESTARTNOHAND;
-
- left *= TICK_NSEC;
- tsave->tv_sec = div_long_long_rem(left,
- NSEC_PER_SEC,
- &tsave->tv_nsec);
- /*
- * Restart works by saving the time remaing in
- * arg2 & 3 (it is 64-bits of jiffies). The other
- * info we need is the clock_id (saved in arg0).
- * The sys_call interface needs the users
- * timespec return address which _it_ saves in arg1.
- * Since we have cast the nanosleep call to a clock_nanosleep
- * both can be restarted with the same code.
- */
- restart_block->fn = clock_nanosleep_restart;
- restart_block->arg0 = which_clock;
- /*
- * Caller sets arg1
- */
- restart_block->arg2 = rq_time & 0xffffffffLL;
- restart_block->arg3 = rq_time >> 32;
-
- return -ERESTART_RESTARTBLOCK;
- }
-
- return 0;
-}
-/*
- * This will restart clock_nanosleep.
- */
-long
-clock_nanosleep_restart(struct restart_block *restart_block)
-{
- struct timespec t;
- int ret = common_nsleep(restart_block->arg0, 0, &t);
-
- if ((ret == -ERESTART_RESTARTBLOCK) && restart_block->arg1 &&
- copy_to_user((struct timespec __user *)(restart_block->arg1), &t,
- sizeof (t)))
- return -EFAULT;
- return ret;
+ return CLOCK_DISPATCH(which_clock, nsleep,
+ (which_clock, flags, &t, rmtp));
}
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 027322a564f..e24446f8d8c 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -24,10 +24,11 @@
extern suspend_disk_method_t pm_disk_mode;
+extern int swsusp_shrink_memory(void);
extern int swsusp_suspend(void);
-extern int swsusp_write(void);
+extern int swsusp_write(struct pbe *pblist, unsigned int nr_pages);
extern int swsusp_check(void);
-extern int swsusp_read(void);
+extern int swsusp_read(struct pbe **pblist_ptr);
extern void swsusp_close(void);
extern int swsusp_resume(void);
@@ -73,31 +74,6 @@ static void power_down(suspend_disk_method_t mode)
static int in_suspend __nosavedata = 0;
-/**
- * free_some_memory - Try to free as much memory as possible
- *
- * ... but do not OOM-kill anyone
- *
- * Notice: all userland should be stopped at this point, or
- * livelock is possible.
- */
-
-static void free_some_memory(void)
-{
- unsigned int i = 0;
- unsigned int tmp;
- unsigned long pages = 0;
- char *p = "-\\|/";
-
- printk("Freeing memory... ");
- while ((tmp = shrink_all_memory(10000))) {
- pages += tmp;
- printk("\b%c", p[i++ % 4]);
- }
- printk("\bdone (%li pages freed)\n", pages);
-}
-
-
static inline void platform_finish(void)
{
if (pm_disk_mode == PM_DISK_PLATFORM) {
@@ -127,8 +103,8 @@ static int prepare_processes(void)
}
/* Free memory before shutting down devices. */
- free_some_memory();
- return 0;
+ if (!(error = swsusp_shrink_memory()))
+ return 0;
thaw:
thaw_processes();
enable_nonboot_cpus();
@@ -176,7 +152,7 @@ int pm_suspend_disk(void)
if (in_suspend) {
device_resume();
pr_debug("PM: writing image.\n");
- error = swsusp_write();
+ error = swsusp_write(pagedir_nosave, nr_copy_pages);
if (!error)
power_down(pm_disk_mode);
else {
@@ -247,7 +223,7 @@ static int software_resume(void)
pr_debug("PM: Reading swsusp image.\n");
- if ((error = swsusp_read())) {
+ if ((error = swsusp_read(&pagedir_nosave))) {
swsusp_free();
goto Thaw;
}
@@ -363,37 +339,55 @@ static ssize_t resume_show(struct subsystem * subsys, char *buf)
MINOR(swsusp_resume_device));
}
-static ssize_t resume_store(struct subsystem * subsys, const char * buf, size_t n)
+static ssize_t resume_store(struct subsystem *subsys, const char *buf, size_t n)
{
- int len;
- char *p;
unsigned int maj, min;
- int error = -EINVAL;
dev_t res;
+ int ret = -EINVAL;
- p = memchr(buf, '\n', n);
- len = p ? p - buf : n;
+ if (sscanf(buf, "%u:%u", &maj, &min) != 2)
+ goto out;
- if (sscanf(buf, "%u:%u", &maj, &min) == 2) {
- res = MKDEV(maj,min);
- if (maj == MAJOR(res) && min == MINOR(res)) {
- down(&pm_sem);
- swsusp_resume_device = res;
- up(&pm_sem);
- printk("Attempting manual resume\n");
- noresume = 0;
- software_resume();
- }
- }
+ res = MKDEV(maj,min);
+ if (maj != MAJOR(res) || min != MINOR(res))
+ goto out;
- return error >= 0 ? n : error;
+ down(&pm_sem);
+ swsusp_resume_device = res;
+ up(&pm_sem);
+ printk("Attempting manual resume\n");
+ noresume = 0;
+ software_resume();
+ ret = n;
+out:
+ return ret;
}
power_attr(resume);
+static ssize_t image_size_show(struct subsystem * subsys, char *buf)
+{
+ return sprintf(buf, "%u\n", image_size);
+}
+
+static ssize_t image_size_store(struct subsystem * subsys, const char * buf, size_t n)
+{
+ unsigned int size;
+
+ if (sscanf(buf, "%u", &size) == 1) {
+ image_size = size;
+ return n;
+ }
+
+ return -EINVAL;
+}
+
+power_attr(image_size);
+
static struct attribute * g[] = {
&disk_attr.attr,
&resume_attr.attr,
+ &image_size_attr.attr,
NULL,
};
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 6c042b5ee14..7e8492fd142 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -9,19 +9,13 @@
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
#endif
-#define MAX_PBES ((PAGE_SIZE - sizeof(struct new_utsname) \
- - 4 - 3*sizeof(unsigned long) - sizeof(int) \
- - sizeof(void *)) / sizeof(swp_entry_t))
-
struct swsusp_info {
struct new_utsname uts;
u32 version_code;
unsigned long num_physpages;
int cpus;
unsigned long image_pages;
- unsigned long pagedir_pages;
- suspend_pagedir_t * suspend_pagedir;
- swp_entry_t pagedir[MAX_PBES];
+ unsigned long pages;
} __attribute__((aligned(PAGE_SIZE)));
@@ -48,25 +42,27 @@ static struct subsys_attribute _name##_attr = { \
extern struct subsystem power_subsys;
-extern int freeze_processes(void);
-extern void thaw_processes(void);
-
extern int pm_prepare_console(void);
extern void pm_restore_console(void);
-
/* References to section boundaries */
extern const void __nosave_begin, __nosave_end;
extern unsigned int nr_copy_pages;
-extern suspend_pagedir_t *pagedir_nosave;
-extern suspend_pagedir_t *pagedir_save;
+extern struct pbe *pagedir_nosave;
+
+/* Preferred image size in MB (default 500) */
+extern unsigned int image_size;
extern asmlinkage int swsusp_arch_suspend(void);
extern asmlinkage int swsusp_arch_resume(void);
+extern unsigned int count_data_pages(void);
extern void free_pagedir(struct pbe *pblist);
+extern void release_eaten_pages(void);
extern struct pbe *alloc_pagedir(unsigned nr_pages, gfp_t gfp_mask, int safe_needed);
-extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages);
extern void swsusp_free(void);
extern int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed);
+extern unsigned int snapshot_nr_pages(void);
+extern struct pbe *snapshot_pblist(void);
+extern void snapshot_pblist_set(struct pbe *pblist);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 4a6dbcefd37..41f66365f0d 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -33,7 +33,35 @@
#include "power.h"
+struct pbe *pagedir_nosave;
+unsigned int nr_copy_pages;
+
#ifdef CONFIG_HIGHMEM
+unsigned int count_highmem_pages(void)
+{
+ struct zone *zone;
+ unsigned long zone_pfn;
+ unsigned int n = 0;
+
+ for_each_zone (zone)
+ if (is_highmem(zone)) {
+ mark_free_pages(zone);
+ for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
+ struct page *page;
+ unsigned long pfn = zone_pfn + zone->zone_start_pfn;
+ if (!pfn_valid(pfn))
+ continue;
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
+ continue;
+ if (PageNosaveFree(page))
+ continue;
+ n++;
+ }
+ }
+ return n;
+}
+
struct highmem_page {
char *data;
struct page *page;
@@ -149,17 +177,15 @@ static int saveable(struct zone *zone, unsigned long *zone_pfn)
BUG_ON(PageReserved(page) && PageNosave(page));
if (PageNosave(page))
return 0;
- if (PageReserved(page) && pfn_is_nosave(pfn)) {
- pr_debug("[nosave pfn 0x%lx]", pfn);
+ if (PageReserved(page) && pfn_is_nosave(pfn))
return 0;
- }
if (PageNosaveFree(page))
return 0;
return 1;
}
-static unsigned count_data_pages(void)
+unsigned int count_data_pages(void)
{
struct zone *zone;
unsigned long zone_pfn;
@@ -244,7 +270,7 @@ static inline void fill_pb_page(struct pbe *pbpage)
* of memory pages allocated with alloc_pagedir()
*/
-void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
+static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
{
struct pbe *pbpage, *p;
unsigned int num = PBES_PER_PAGE;
@@ -261,7 +287,35 @@ void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
p->next = p + 1;
p->next = NULL;
}
- pr_debug("create_pbe_list(): initialized %d PBEs\n", num);
+}
+
+/**
+ * On resume it is necessary to trace and eventually free the unsafe
+ * pages that have been allocated, because they are needed for I/O
+ * (on x86-64 we likely will "eat" these pages once again while
+ * creating the temporary page translation tables)
+ */
+
+struct eaten_page {
+ struct eaten_page *next;
+ char padding[PAGE_SIZE - sizeof(void *)];
+};
+
+static struct eaten_page *eaten_pages = NULL;
+
+void release_eaten_pages(void)
+{
+ struct eaten_page *p, *q;
+
+ p = eaten_pages;
+ while (p) {
+ q = p->next;
+ /* We don't want swsusp_free() to free this page again */
+ ClearPageNosave(virt_to_page(p));
+ free_page((unsigned long)p);
+ p = q;
+ }
+ eaten_pages = NULL;
}
/**
@@ -282,9 +336,12 @@ static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
if (safe_needed)
do {
res = (void *)get_zeroed_page(gfp_mask);
- if (res && PageNosaveFree(virt_to_page(res)))
+ if (res && PageNosaveFree(virt_to_page(res))) {
/* This is for swsusp_free() */
SetPageNosave(virt_to_page(res));
+ ((struct eaten_page *)res)->next = eaten_pages;
+ eaten_pages = res;
+ }
} while (res && PageNosaveFree(virt_to_page(res)));
else
res = (void *)get_zeroed_page(gfp_mask);
@@ -332,7 +389,8 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed
if (!pbe) { /* get_zeroed_page() failed */
free_pagedir(pblist);
pblist = NULL;
- }
+ } else
+ create_pbe_list(pblist, nr_pages);
return pblist;
}
@@ -370,8 +428,14 @@ void swsusp_free(void)
static int enough_free_mem(unsigned int nr_pages)
{
- pr_debug("swsusp: available memory: %u pages\n", nr_free_pages());
- return nr_free_pages() > (nr_pages + PAGES_FOR_IO +
+ struct zone *zone;
+ unsigned int n = 0;
+
+ for_each_zone (zone)
+ if (!is_highmem(zone))
+ n += zone->free_pages;
+ pr_debug("swsusp: available memory: %u pages\n", n);
+ return n > (nr_pages + PAGES_FOR_IO +
(nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
}
@@ -395,7 +459,6 @@ static struct pbe *swsusp_alloc(unsigned int nr_pages)
printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
return NULL;
}
- create_pbe_list(pblist, nr_pages);
if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) {
printk(KERN_ERR "suspend: Allocating image pages failed.\n");
@@ -421,10 +484,6 @@ asmlinkage int swsusp_save(void)
(nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
PAGES_FOR_IO, nr_free_pages());
- /* This is needed because of the fixed size of swsusp_info */
- if (MAX_PBES < (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE)
- return -ENOSPC;
-
if (!enough_free_mem(nr_pages)) {
printk(KERN_ERR "swsusp: Not enough free memory\n");
return -ENOMEM;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index c05f46e7348..55a18d26abe 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -30,8 +30,8 @@
* Alex Badea <vampire@go.ro>:
* Fixed runaway init
*
- * Andreas Steinmetz <ast@domdv.de>:
- * Added encrypted suspend option
+ * Rafael J. Wysocki <rjw@sisk.pl>
+ * Added the swap map data structure and reworked the handling of swap
*
* More state savers are welcome. Especially for the scsi layer...
*
@@ -67,44 +67,33 @@
#include <asm/tlbflush.h>
#include <asm/io.h>
-#include <linux/random.h>
-#include <linux/crypto.h>
-#include <asm/scatterlist.h>
-
#include "power.h"
+/*
+ * Preferred image size in MB (tunable via /sys/power/image_size).
+ * When it is set to N, swsusp will do its best to ensure the image
+ * size will not exceed N MB, but if that is impossible, it will
+ * try to create the smallest image possible.
+ */
+unsigned int image_size = 500;
+
#ifdef CONFIG_HIGHMEM
+unsigned int count_highmem_pages(void);
int save_highmem(void);
int restore_highmem(void);
#else
static int save_highmem(void) { return 0; }
static int restore_highmem(void) { return 0; }
+static unsigned int count_highmem_pages(void) { return 0; }
#endif
-#define CIPHER "aes"
-#define MAXKEY 32
-#define MAXIV 32
-
extern char resume_file[];
-/* Local variables that should not be affected by save */
-unsigned int nr_copy_pages __nosavedata = 0;
-
-/* Suspend pagedir is allocated before final copy, therefore it
- must be freed after resume
-
- Warning: this is even more evil than it seems. Pagedirs this file
- talks about are completely different from page directories used by
- MMU hardware.
- */
-suspend_pagedir_t *pagedir_nosave __nosavedata = NULL;
-
#define SWSUSP_SIG "S1SUSPEND"
static struct swsusp_header {
- char reserved[PAGE_SIZE - 20 - MAXKEY - MAXIV - sizeof(swp_entry_t)];
- u8 key_iv[MAXKEY+MAXIV];
- swp_entry_t swsusp_info;
+ char reserved[PAGE_SIZE - 20 - sizeof(swp_entry_t)];
+ swp_entry_t image;
char orig_sig[10];
char sig[10];
} __attribute__((packed, aligned(PAGE_SIZE))) swsusp_header;
@@ -115,140 +104,9 @@ static struct swsusp_info swsusp_info;
* Saving part...
*/
-/* We memorize in swapfile_used what swap devices are used for suspension */
-#define SWAPFILE_UNUSED 0
-#define SWAPFILE_SUSPEND 1 /* This is the suspending device */
-#define SWAPFILE_IGNORED 2 /* Those are other swap devices ignored for suspension */
-
-static unsigned short swapfile_used[MAX_SWAPFILES];
-static unsigned short root_swap;
-
-static int write_page(unsigned long addr, swp_entry_t *loc);
-static int bio_read_page(pgoff_t page_off, void *page);
-
-static u8 key_iv[MAXKEY+MAXIV];
-
-#ifdef CONFIG_SWSUSP_ENCRYPT
-
-static int crypto_init(int mode, void **mem)
-{
- int error = 0;
- int len;
- char *modemsg;
- struct crypto_tfm *tfm;
-
- modemsg = mode ? "suspend not possible" : "resume not possible";
-
- tfm = crypto_alloc_tfm(CIPHER, CRYPTO_TFM_MODE_CBC);
- if(!tfm) {
- printk(KERN_ERR "swsusp: no tfm, %s\n", modemsg);
- error = -EINVAL;
- goto out;
- }
-
- if(MAXKEY < crypto_tfm_alg_min_keysize(tfm)) {
- printk(KERN_ERR "swsusp: key buffer too small, %s\n", modemsg);
- error = -ENOKEY;
- goto fail;
- }
-
- if (mode)
- get_random_bytes(key_iv, MAXKEY+MAXIV);
-
- len = crypto_tfm_alg_max_keysize(tfm);
- if (len > MAXKEY)
- len = MAXKEY;
-
- if (crypto_cipher_setkey(tfm, key_iv, len)) {
- printk(KERN_ERR "swsusp: key setup failure, %s\n", modemsg);
- error = -EKEYREJECTED;
- goto fail;
- }
-
- len = crypto_tfm_alg_ivsize(tfm);
-
- if (MAXIV < len) {
- printk(KERN_ERR "swsusp: iv buffer too small, %s\n", modemsg);
- error = -EOVERFLOW;
- goto fail;
- }
-
- crypto_cipher_set_iv(tfm, key_iv+MAXKEY, len);
-
- *mem=(void *)tfm;
-
- goto out;
-
-fail: crypto_free_tfm(tfm);
-out: return error;
-}
-
-static __inline__ void crypto_exit(void *mem)
-{
- crypto_free_tfm((struct crypto_tfm *)mem);
-}
-
-static __inline__ int crypto_write(struct pbe *p, void *mem)
-{
- int error = 0;
- struct scatterlist src, dst;
-
- src.page = virt_to_page(p->address);
- src.offset = 0;
- src.length = PAGE_SIZE;
- dst.page = virt_to_page((void *)&swsusp_header);
- dst.offset = 0;
- dst.length = PAGE_SIZE;
-
- error = crypto_cipher_encrypt((struct crypto_tfm *)mem, &dst, &src,
- PAGE_SIZE);
-
- if (!error)
- error = write_page((unsigned long)&swsusp_header,
- &(p->swap_address));
- return error;
-}
-
-static __inline__ int crypto_read(struct pbe *p, void *mem)
-{
- int error = 0;
- struct scatterlist src, dst;
-
- error = bio_read_page(swp_offset(p->swap_address), (void *)p->address);
- if (!error) {
- src.offset = 0;
- src.length = PAGE_SIZE;
- dst.offset = 0;
- dst.length = PAGE_SIZE;
- src.page = dst.page = virt_to_page((void *)p->address);
-
- error = crypto_cipher_decrypt((struct crypto_tfm *)mem, &dst,
- &src, PAGE_SIZE);
- }
- return error;
-}
-#else
-static __inline__ int crypto_init(int mode, void *mem)
-{
- return 0;
-}
-
-static __inline__ void crypto_exit(void *mem)
-{
-}
-
-static __inline__ int crypto_write(struct pbe *p, void *mem)
-{
- return write_page(p->address, &(p->swap_address));
-}
+static unsigned short root_swap = 0xffff;
-static __inline__ int crypto_read(struct pbe *p, void *mem)
-{
- return bio_read_page(swp_offset(p->swap_address), (void *)p->address);
-}
-#endif
-
-static int mark_swapfiles(swp_entry_t prev)
+static int mark_swapfiles(swp_entry_t start)
{
int error;
@@ -259,8 +117,7 @@ static int mark_swapfiles(swp_entry_t prev)
!memcmp("SWAPSPACE2",swsusp_header.sig, 10)) {
memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10);
memcpy(swsusp_header.sig,SWSUSP_SIG, 10);
- memcpy(swsusp_header.key_iv, key_iv, MAXKEY+MAXIV);
- swsusp_header.swsusp_info = prev;
+ swsusp_header.image = start;
error = rw_swap_page_sync(WRITE,
swp_entry(root_swap, 0),
virt_to_page((unsigned long)
@@ -283,7 +140,7 @@ static int mark_swapfiles(swp_entry_t prev)
* devfs, since the resume code can only recognize the form /dev/hda4,
* but the suspend code would see the long name.)
*/
-static int is_resume_device(const struct swap_info_struct *swap_info)
+static inline int is_resume_device(const struct swap_info_struct *swap_info)
{
struct file *file = swap_info->swap_file;
struct inode *inode = file->f_dentry->d_inode;
@@ -294,54 +151,22 @@ static int is_resume_device(const struct swap_info_struct *swap_info)
static int swsusp_swap_check(void) /* This is called before saving image */
{
- int i, len;
-
- len=strlen(resume_file);
- root_swap = 0xFFFF;
-
- spin_lock(&swap_lock);
- for (i=0; i<MAX_SWAPFILES; i++) {
- if (!(swap_info[i].flags & SWP_WRITEOK)) {
- swapfile_used[i]=SWAPFILE_UNUSED;
- } else {
- if (!len) {
- printk(KERN_WARNING "resume= option should be used to set suspend device" );
- if (root_swap == 0xFFFF) {
- swapfile_used[i] = SWAPFILE_SUSPEND;
- root_swap = i;
- } else
- swapfile_used[i] = SWAPFILE_IGNORED;
- } else {
- /* we ignore all swap devices that are not the resume_file */
- if (is_resume_device(&swap_info[i])) {
- swapfile_used[i] = SWAPFILE_SUSPEND;
- root_swap = i;
- } else {
- swapfile_used[i] = SWAPFILE_IGNORED;
- }
- }
- }
- }
- spin_unlock(&swap_lock);
- return (root_swap != 0xffff) ? 0 : -ENODEV;
-}
-
-/**
- * This is called after saving image so modification
- * will be lost after resume... and that's what we want.
- * we make the device unusable. A new call to
- * lock_swapdevices can unlock the devices.
- */
-static void lock_swapdevices(void)
-{
int i;
+ if (!swsusp_resume_device)
+ return -ENODEV;
spin_lock(&swap_lock);
- for (i = 0; i< MAX_SWAPFILES; i++)
- if (swapfile_used[i] == SWAPFILE_IGNORED) {
- swap_info[i].flags ^= SWP_WRITEOK;
+ for (i = 0; i < MAX_SWAPFILES; i++) {
+ if (!(swap_info[i].flags & SWP_WRITEOK))
+ continue;
+ if (is_resume_device(swap_info + i)) {
+ spin_unlock(&swap_lock);
+ root_swap = i;
+ return 0;
}
+ }
spin_unlock(&swap_lock);
+ return -ENODEV;
}
/**
@@ -359,72 +184,217 @@ static void lock_swapdevices(void)
static int write_page(unsigned long addr, swp_entry_t *loc)
{
swp_entry_t entry;
- int error = 0;
+ int error = -ENOSPC;
- entry = get_swap_page();
- if (swp_offset(entry) &&
- swapfile_used[swp_type(entry)] == SWAPFILE_SUSPEND) {
- error = rw_swap_page_sync(WRITE, entry,
- virt_to_page(addr));
- if (error == -EIO)
- error = 0;
- if (!error)
+ entry = get_swap_page_of_type(root_swap);
+ if (swp_offset(entry)) {
+ error = rw_swap_page_sync(WRITE, entry, virt_to_page(addr));
+ if (!error || error == -EIO)
*loc = entry;
- } else
- error = -ENOSPC;
+ }
return error;
}
/**
- * data_free - Free the swap entries used by the saved image.
+ * Swap map-handling functions
+ *
+ * The swap map is a data structure used for keeping track of each page
+ * written to the swap. It consists of many swap_map_page structures
+ * that contain each an array of MAP_PAGE_SIZE swap entries.
+ * These structures are linked together with the help of either the
+ * .next (in memory) or the .next_swap (in swap) member.
*
- * Walk the list of used swap entries and free each one.
- * This is only used for cleanup when suspend fails.
+ * The swap map is created during suspend. At that time we need to keep
+ * it in memory, because we have to free all of the allocated swap
+ * entries if an error occurs. The memory needed is preallocated
+ * so that we know in advance if there's enough of it.
+ *
+ * The first swap_map_page structure is filled with the swap entries that
+ * correspond to the first MAP_PAGE_SIZE data pages written to swap and
+ * so on. After the all of the data pages have been written, the order
+ * of the swap_map_page structures in the map is reversed so that they
+ * can be read from swap in the original order. This causes the data
+ * pages to be loaded in exactly the same order in which they have been
+ * saved.
+ *
+ * During resume we only need to use one swap_map_page structure
+ * at a time, which means that we only need to use two memory pages for
+ * reading the image - one for reading the swap_map_page structures
+ * and the second for reading the data pages from swap.
*/
-static void data_free(void)
+
+#define MAP_PAGE_SIZE ((PAGE_SIZE - sizeof(swp_entry_t) - sizeof(void *)) \
+ / sizeof(swp_entry_t))
+
+struct swap_map_page {
+ swp_entry_t entries[MAP_PAGE_SIZE];
+ swp_entry_t next_swap;
+ struct swap_map_page *next;
+};
+
+static inline void free_swap_map(struct swap_map_page *swap_map)
{
- swp_entry_t entry;
- struct pbe *p;
+ struct swap_map_page *swp;
- for_each_pbe (p, pagedir_nosave) {
- entry = p->swap_address;
- if (entry.val)
- swap_free(entry);
- else
- break;
+ while (swap_map) {
+ swp = swap_map->next;
+ free_page((unsigned long)swap_map);
+ swap_map = swp;
}
}
+static struct swap_map_page *alloc_swap_map(unsigned int nr_pages)
+{
+ struct swap_map_page *swap_map, *swp;
+ unsigned n = 0;
+
+ if (!nr_pages)
+ return NULL;
+
+ pr_debug("alloc_swap_map(): nr_pages = %d\n", nr_pages);
+ swap_map = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC);
+ swp = swap_map;
+ for (n = MAP_PAGE_SIZE; n < nr_pages; n += MAP_PAGE_SIZE) {
+ swp->next = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC);
+ swp = swp->next;
+ if (!swp) {
+ free_swap_map(swap_map);
+ return NULL;
+ }
+ }
+ return swap_map;
+}
+
/**
- * data_write - Write saved image to swap.
- *
- * Walk the list of pages in the image and sync each one to swap.
+ * reverse_swap_map - reverse the order of pages in the swap map
+ * @swap_map
*/
-static int data_write(void)
+
+static inline struct swap_map_page *reverse_swap_map(struct swap_map_page *swap_map)
{
- int error = 0, i = 0;
- unsigned int mod = nr_copy_pages / 100;
- struct pbe *p;
- void *tfm;
+ struct swap_map_page *prev, *next;
+
+ prev = NULL;
+ while (swap_map) {
+ next = swap_map->next;
+ swap_map->next = prev;
+ prev = swap_map;
+ swap_map = next;
+ }
+ return prev;
+}
- if ((error = crypto_init(1, &tfm)))
- return error;
+/**
+ * free_swap_map_entries - free the swap entries allocated to store
+ * the swap map @swap_map (this is only called in case of an error)
+ */
+static inline void free_swap_map_entries(struct swap_map_page *swap_map)
+{
+ while (swap_map) {
+ if (swap_map->next_swap.val)
+ swap_free(swap_map->next_swap);
+ swap_map = swap_map->next;
+ }
+}
- if (!mod)
- mod = 1;
+/**
+ * save_swap_map - save the swap map used for tracing the data pages
+ * stored in the swap
+ */
- printk( "Writing data to swap (%d pages)... ", nr_copy_pages );
- for_each_pbe (p, pagedir_nosave) {
- if (!(i%mod))
- printk( "\b\b\b\b%3d%%", i / mod );
- if ((error = crypto_write(p, tfm))) {
- crypto_exit(tfm);
+static int save_swap_map(struct swap_map_page *swap_map, swp_entry_t *start)
+{
+ swp_entry_t entry = (swp_entry_t){0};
+ int error;
+
+ while (swap_map) {
+ swap_map->next_swap = entry;
+ if ((error = write_page((unsigned long)swap_map, &entry)))
return error;
- }
- i++;
+ swap_map = swap_map->next;
}
- printk("\b\b\b\bdone\n");
- crypto_exit(tfm);
+ *start = entry;
+ return 0;
+}
+
+/**
+ * free_image_entries - free the swap entries allocated to store
+ * the image data pages (this is only called in case of an error)
+ */
+
+static inline void free_image_entries(struct swap_map_page *swp)
+{
+ unsigned k;
+
+ while (swp) {
+ for (k = 0; k < MAP_PAGE_SIZE; k++)
+ if (swp->entries[k].val)
+ swap_free(swp->entries[k]);
+ swp = swp->next;
+ }
+}
+
+/**
+ * The swap_map_handle structure is used for handling the swap map in
+ * a file-alike way
+ */
+
+struct swap_map_handle {
+ struct swap_map_page *cur;
+ unsigned int k;
+};
+
+static inline void init_swap_map_handle(struct swap_map_handle *handle,
+ struct swap_map_page *map)
+{
+ handle->cur = map;
+ handle->k = 0;
+}
+
+static inline int swap_map_write_page(struct swap_map_handle *handle,
+ unsigned long addr)
+{
+ int error;
+
+ error = write_page(addr, handle->cur->entries + handle->k);
+ if (error)
+ return error;
+ if (++handle->k >= MAP_PAGE_SIZE) {
+ handle->cur = handle->cur->next;
+ handle->k = 0;
+ }
+ return 0;
+}
+
+/**
+ * save_image_data - save the data pages pointed to by the PBEs
+ * from the list @pblist using the swap map handle @handle
+ * (assume there are @nr_pages data pages to save)
+ */
+
+static int save_image_data(struct pbe *pblist,
+ struct swap_map_handle *handle,
+ unsigned int nr_pages)
+{
+ unsigned int m;
+ struct pbe *p;
+ int error = 0;
+
+ printk("Saving image data pages (%u pages) ... ", nr_pages);
+ m = nr_pages / 100;
+ if (!m)
+ m = 1;
+ nr_pages = 0;
+ for_each_pbe (p, pblist) {
+ error = swap_map_write_page(handle, p->address);
+ if (error)
+ break;
+ if (!(nr_pages % m))
+ printk("\b\b\b\b%3d%%", nr_pages / m);
+ nr_pages++;
+ }
+ if (!error)
+ printk("\b\b\b\bdone\n");
return error;
}
@@ -440,70 +410,70 @@ static void dump_info(void)
pr_debug(" swsusp: UTS Domain: %s\n",swsusp_info.uts.domainname);
pr_debug(" swsusp: CPUs: %d\n",swsusp_info.cpus);
pr_debug(" swsusp: Image: %ld Pages\n",swsusp_info.image_pages);
- pr_debug(" swsusp: Pagedir: %ld Pages\n",swsusp_info.pagedir_pages);
+ pr_debug(" swsusp: Total: %ld Pages\n", swsusp_info.pages);
}
-static void init_header(void)
+static void init_header(unsigned int nr_pages)
{
memset(&swsusp_info, 0, sizeof(swsusp_info));
swsusp_info.version_code = LINUX_VERSION_CODE;
swsusp_info.num_physpages = num_physpages;
memcpy(&swsusp_info.uts, &system_utsname, sizeof(system_utsname));
- swsusp_info.suspend_pagedir = pagedir_nosave;
swsusp_info.cpus = num_online_cpus();
- swsusp_info.image_pages = nr_copy_pages;
-}
-
-static int close_swap(void)
-{
- swp_entry_t entry;
- int error;
-
- dump_info();
- error = write_page((unsigned long)&swsusp_info, &entry);
- if (!error) {
- printk( "S" );
- error = mark_swapfiles(entry);
- printk( "|\n" );
- }
- return error;
+ swsusp_info.image_pages = nr_pages;
+ swsusp_info.pages = nr_pages +
+ ((nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
}
/**
- * free_pagedir_entries - Free pages used by the page directory.
- *
- * This is used during suspend for error recovery.
+ * pack_orig_addresses - the .orig_address fields of the PBEs from the
+ * list starting at @pbe are stored in the array @buf[] (1 page)
*/
-static void free_pagedir_entries(void)
+static inline struct pbe *pack_orig_addresses(unsigned long *buf,
+ struct pbe *pbe)
{
- int i;
+ int j;
- for (i = 0; i < swsusp_info.pagedir_pages; i++)
- swap_free(swsusp_info.pagedir[i]);
+ for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
+ buf[j] = pbe->orig_address;
+ pbe = pbe->next;
+ }
+ if (!pbe)
+ for (; j < PAGE_SIZE / sizeof(long); j++)
+ buf[j] = 0;
+ return pbe;
}
-
/**
- * write_pagedir - Write the array of pages holding the page directory.
- * @last: Last swap entry we write (needed for header).
+ * save_image_metadata - save the .orig_address fields of the PBEs
+ * from the list @pblist using the swap map handle @handle
*/
-static int write_pagedir(void)
+static int save_image_metadata(struct pbe *pblist,
+ struct swap_map_handle *handle)
{
- int error = 0;
+ unsigned long *buf;
unsigned int n = 0;
- struct pbe *pbe;
+ struct pbe *p;
+ int error = 0;
- printk( "Writing pagedir...");
- for_each_pb_page (pbe, pagedir_nosave) {
- if ((error = write_page((unsigned long)pbe, &swsusp_info.pagedir[n++])))
- return error;
+ printk("Saving image metadata ... ");
+ buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+ p = pblist;
+ while (p) {
+ p = pack_orig_addresses(buf, p);
+ error = swap_map_write_page(handle, (unsigned long)buf);
+ if (error)
+ break;
+ n++;
}
-
- swsusp_info.pagedir_pages = n;
- printk("done (%u pages)\n", n);
+ free_page((unsigned long)buf);
+ if (!error)
+ printk("done (%u pages saved)\n", n);
return error;
}
@@ -511,75 +481,125 @@ static int write_pagedir(void)
* enough_swap - Make sure we have enough swap to save the image.
*
* Returns TRUE or FALSE after checking the total amount of swap
- * space avaiable.
- *
- * FIXME: si_swapinfo(&i) returns all swap devices information.
- * We should only consider resume_device.
+ * space avaiable from the resume partition.
*/
static int enough_swap(unsigned int nr_pages)
{
- struct sysinfo i;
+ unsigned int free_swap = swap_info[root_swap].pages -
+ swap_info[root_swap].inuse_pages;
- si_swapinfo(&i);
- pr_debug("swsusp: available swap: %lu pages\n", i.freeswap);
- return i.freeswap > (nr_pages + PAGES_FOR_IO +
+ pr_debug("swsusp: free swap pages: %u\n", free_swap);
+ return free_swap > (nr_pages + PAGES_FOR_IO +
(nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
}
/**
- * write_suspend_image - Write entire image and metadata.
+ * swsusp_write - Write entire image and metadata.
*
+ * It is important _NOT_ to umount filesystems at this point. We want
+ * them synced (in case something goes wrong) but we DO not want to mark
+ * filesystem clean: it is not. (And it does not matter, if we resume
+ * correctly, we'll mark system clean, anyway.)
*/
-static int write_suspend_image(void)
+
+int swsusp_write(struct pbe *pblist, unsigned int nr_pages)
{
+ struct swap_map_page *swap_map;
+ struct swap_map_handle handle;
+ swp_entry_t start;
int error;
- if (!enough_swap(nr_copy_pages)) {
+ if ((error = swsusp_swap_check())) {
+ printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n");
+ return error;
+ }
+ if (!enough_swap(nr_pages)) {
printk(KERN_ERR "swsusp: Not enough free swap\n");
return -ENOSPC;
}
- init_header();
- if ((error = data_write()))
- goto FreeData;
+ init_header(nr_pages);
+ swap_map = alloc_swap_map(swsusp_info.pages);
+ if (!swap_map)
+ return -ENOMEM;
+ init_swap_map_handle(&handle, swap_map);
+
+ error = swap_map_write_page(&handle, (unsigned long)&swsusp_info);
+ if (!error)
+ error = save_image_metadata(pblist, &handle);
+ if (!error)
+ error = save_image_data(pblist, &handle, nr_pages);
+ if (error)
+ goto Free_image_entries;
- if ((error = write_pagedir()))
- goto FreePagedir;
+ swap_map = reverse_swap_map(swap_map);
+ error = save_swap_map(swap_map, &start);
+ if (error)
+ goto Free_map_entries;
- if ((error = close_swap()))
- goto FreePagedir;
- Done:
- memset(key_iv, 0, MAXKEY+MAXIV);
+ dump_info();
+ printk( "S" );
+ error = mark_swapfiles(start);
+ printk( "|\n" );
+ if (error)
+ goto Free_map_entries;
+
+Free_swap_map:
+ free_swap_map(swap_map);
return error;
- FreePagedir:
- free_pagedir_entries();
- FreeData:
- data_free();
- goto Done;
+
+Free_map_entries:
+ free_swap_map_entries(swap_map);
+Free_image_entries:
+ free_image_entries(swap_map);
+ goto Free_swap_map;
}
-/* It is important _NOT_ to umount filesystems at this point. We want
- * them synced (in case something goes wrong) but we DO not want to mark
- * filesystem clean: it is not. (And it does not matter, if we resume
- * correctly, we'll mark system clean, anyway.)
+/**
+ * swsusp_shrink_memory - Try to free as much memory as needed
+ *
+ * ... but do not OOM-kill anyone
+ *
+ * Notice: all userland should be stopped before it is called, or
+ * livelock is possible.
*/
-int swsusp_write(void)
-{
- int error;
- if ((error = swsusp_swap_check())) {
- printk(KERN_ERR "swsusp: cannot find swap device, try swapon -a.\n");
- return error;
- }
- lock_swapdevices();
- error = write_suspend_image();
- /* This will unlock ignored swap devices since writing is finished */
- lock_swapdevices();
- return error;
-}
+#define SHRINK_BITE 10000
+int swsusp_shrink_memory(void)
+{
+ long size, tmp;
+ struct zone *zone;
+ unsigned long pages = 0;
+ unsigned int i = 0;
+ char *p = "-\\|/";
+
+ printk("Shrinking memory... ");
+ do {
+ size = 2 * count_highmem_pages();
+ size += size / 50 + count_data_pages();
+ size += (size + PBES_PER_PAGE - 1) / PBES_PER_PAGE +
+ PAGES_FOR_IO;
+ tmp = size;
+ for_each_zone (zone)
+ if (!is_highmem(zone))
+ tmp -= zone->free_pages;
+ if (tmp > 0) {
+ tmp = shrink_all_memory(SHRINK_BITE);
+ if (!tmp)
+ return -ENOMEM;
+ pages += tmp;
+ } else if (size > (image_size * 1024 * 1024) / PAGE_SIZE) {
+ tmp = shrink_all_memory(SHRINK_BITE);
+ pages += tmp;
+ }
+ printk("\b%c", p[i++%4]);
+ } while (tmp > 0);
+ printk("\bdone (%lu pages freed)\n", pages);
+ return 0;
+}
int swsusp_suspend(void)
{
@@ -677,7 +697,6 @@ static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
/* We assume both lists contain the same number of elements */
while (src) {
dst->orig_address = src->orig_address;
- dst->swap_address = src->swap_address;
dst = dst->next;
src = src->next;
}
@@ -757,198 +776,224 @@ static int bio_write_page(pgoff_t page_off, void *page)
return submit(WRITE, page_off, page);
}
-/*
- * Sanity check if this image makes sense with this kernel/swap context
- * I really don't think that it's foolproof but more than nothing..
+/**
+ * The following functions allow us to read data using a swap map
+ * in a file-alike way
*/
-static const char *sanity_check(void)
+static inline void release_swap_map_reader(struct swap_map_handle *handle)
{
- dump_info();
- if (swsusp_info.version_code != LINUX_VERSION_CODE)
- return "kernel version";
- if (swsusp_info.num_physpages != num_physpages)
- return "memory size";
- if (strcmp(swsusp_info.uts.sysname,system_utsname.sysname))
- return "system type";
- if (strcmp(swsusp_info.uts.release,system_utsname.release))
- return "kernel release";
- if (strcmp(swsusp_info.uts.version,system_utsname.version))
- return "version";
- if (strcmp(swsusp_info.uts.machine,system_utsname.machine))
- return "machine";
-#if 0
- /* We can't use number of online CPUs when we use hotplug to remove them ;-))) */
- if (swsusp_info.cpus != num_possible_cpus())
- return "number of cpus";
-#endif
- return NULL;
+ if (handle->cur)
+ free_page((unsigned long)handle->cur);
+ handle->cur = NULL;
}
-
-static int check_header(void)
+static inline int get_swap_map_reader(struct swap_map_handle *handle,
+ swp_entry_t start)
{
- const char *reason = NULL;
int error;
- if ((error = bio_read_page(swp_offset(swsusp_header.swsusp_info), &swsusp_info)))
+ if (!swp_offset(start))
+ return -EINVAL;
+ handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC);
+ if (!handle->cur)
+ return -ENOMEM;
+ error = bio_read_page(swp_offset(start), handle->cur);
+ if (error) {
+ release_swap_map_reader(handle);
return error;
-
- /* Is this same machine? */
- if ((reason = sanity_check())) {
- printk(KERN_ERR "swsusp: Resume mismatch: %s\n",reason);
- return -EPERM;
}
- nr_copy_pages = swsusp_info.image_pages;
- return error;
+ handle->k = 0;
+ return 0;
}
-static int check_sig(void)
+static inline int swap_map_read_page(struct swap_map_handle *handle, void *buf)
{
+ unsigned long offset;
int error;
- memset(&swsusp_header, 0, sizeof(swsusp_header));
- if ((error = bio_read_page(0, &swsusp_header)))
- return error;
- if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) {
- memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
- memcpy(key_iv, swsusp_header.key_iv, MAXKEY+MAXIV);
- memset(swsusp_header.key_iv, 0, MAXKEY+MAXIV);
-
- /*
- * Reset swap signature now.
- */
- error = bio_write_page(0, &swsusp_header);
- } else {
+ if (!handle->cur)
+ return -EINVAL;
+ offset = swp_offset(handle->cur->entries[handle->k]);
+ if (!offset)
return -EINVAL;
+ error = bio_read_page(offset, buf);
+ if (error)
+ return error;
+ if (++handle->k >= MAP_PAGE_SIZE) {
+ handle->k = 0;
+ offset = swp_offset(handle->cur->next_swap);
+ if (!offset)
+ release_swap_map_reader(handle);
+ else
+ error = bio_read_page(offset, handle->cur);
}
- if (!error)
- pr_debug("swsusp: Signature found, resuming\n");
return error;
}
-/**
- * data_read - Read image pages from swap.
- *
- * You do not need to check for overlaps, check_pagedir()
- * already did that.
- */
-
-static int data_read(struct pbe *pblist)
+static int check_header(void)
{
- struct pbe *p;
- int error = 0;
- int i = 0;
- int mod = swsusp_info.image_pages / 100;
- void *tfm;
-
- if ((error = crypto_init(0, &tfm)))
- return error;
-
- if (!mod)
- mod = 1;
-
- printk("swsusp: Reading image data (%lu pages): ",
- swsusp_info.image_pages);
-
- for_each_pbe (p, pblist) {
- if (!(i % mod))
- printk("\b\b\b\b%3d%%", i / mod);
+ char *reason = NULL;
- if ((error = crypto_read(p, tfm))) {
- crypto_exit(tfm);
- return error;
- }
-
- i++;
+ dump_info();
+ if (swsusp_info.version_code != LINUX_VERSION_CODE)
+ reason = "kernel version";
+ if (swsusp_info.num_physpages != num_physpages)
+ reason = "memory size";
+ if (strcmp(swsusp_info.uts.sysname,system_utsname.sysname))
+ reason = "system type";
+ if (strcmp(swsusp_info.uts.release,system_utsname.release))
+ reason = "kernel release";
+ if (strcmp(swsusp_info.uts.version,system_utsname.version))
+ reason = "version";
+ if (strcmp(swsusp_info.uts.machine,system_utsname.machine))
+ reason = "machine";
+ if (reason) {
+ printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
+ return -EPERM;
}
- printk("\b\b\b\bdone\n");
- crypto_exit(tfm);
- return error;
+ return 0;
}
/**
- * read_pagedir - Read page backup list pages from swap
+ * load_image_data - load the image data using the swap map handle
+ * @handle and store them using the page backup list @pblist
+ * (assume there are @nr_pages pages to load)
*/
-static int read_pagedir(struct pbe *pblist)
+static int load_image_data(struct pbe *pblist,
+ struct swap_map_handle *handle,
+ unsigned int nr_pages)
{
- struct pbe *pbpage, *p;
- unsigned int i = 0;
int error;
+ unsigned int m;
+ struct pbe *p;
if (!pblist)
- return -EFAULT;
-
- printk("swsusp: Reading pagedir (%lu pages)\n",
- swsusp_info.pagedir_pages);
-
- for_each_pb_page (pbpage, pblist) {
- unsigned long offset = swp_offset(swsusp_info.pagedir[i++]);
-
- error = -EFAULT;
- if (offset) {
- p = (pbpage + PB_PAGE_SKIP)->next;
- error = bio_read_page(offset, (void *)pbpage);
- (pbpage + PB_PAGE_SKIP)->next = p;
- }
+ return -EINVAL;
+ printk("Loading image data pages (%u pages) ... ", nr_pages);
+ m = nr_pages / 100;
+ if (!m)
+ m = 1;
+ nr_pages = 0;
+ p = pblist;
+ while (p) {
+ error = swap_map_read_page(handle, (void *)p->address);
if (error)
break;
+ p = p->next;
+ if (!(nr_pages % m))
+ printk("\b\b\b\b%3d%%", nr_pages / m);
+ nr_pages++;
}
-
if (!error)
- BUG_ON(i != swsusp_info.pagedir_pages);
-
+ printk("\b\b\b\bdone\n");
return error;
}
+/**
+ * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
+ * the PBEs in the list starting at @pbe
+ */
-static int check_suspend_image(void)
+static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
+ struct pbe *pbe)
{
- int error = 0;
+ int j;
- if ((error = check_sig()))
- return error;
-
- if ((error = check_header()))
- return error;
-
- return 0;
+ for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
+ pbe->orig_address = buf[j];
+ pbe = pbe->next;
+ }
+ return pbe;
}
-static int read_suspend_image(void)
+/**
+ * load_image_metadata - load the image metadata using the swap map
+ * handle @handle and put them into the PBEs in the list @pblist
+ */
+
+static int load_image_metadata(struct pbe *pblist, struct swap_map_handle *handle)
{
- int error = 0;
struct pbe *p;
+ unsigned long *buf;
+ unsigned int n = 0;
+ int error = 0;
- if (!(p = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 0)))
+ printk("Loading image metadata ... ");
+ buf = (unsigned long *)get_zeroed_page(GFP_ATOMIC);
+ if (!buf)
return -ENOMEM;
-
- if ((error = read_pagedir(p)))
- return error;
- create_pbe_list(p, nr_copy_pages);
- mark_unsafe_pages(p);
- pagedir_nosave = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
- if (pagedir_nosave) {
- create_pbe_list(pagedir_nosave, nr_copy_pages);
- copy_page_backup_list(pagedir_nosave, p);
+ p = pblist;
+ while (p) {
+ error = swap_map_read_page(handle, buf);
+ if (error)
+ break;
+ p = unpack_orig_addresses(buf, p);
+ n++;
}
- free_pagedir(p);
- if (!pagedir_nosave)
- return -ENOMEM;
+ free_page((unsigned long)buf);
+ if (!error)
+ printk("done (%u pages loaded)\n", n);
+ return error;
+}
- /* Allocate memory for the image and read the data from swap */
+int swsusp_read(struct pbe **pblist_ptr)
+{
+ int error;
+ struct pbe *p, *pblist;
+ struct swap_map_handle handle;
+ unsigned int nr_pages;
- error = alloc_data_pages(pagedir_nosave, GFP_ATOMIC, 1);
+ if (IS_ERR(resume_bdev)) {
+ pr_debug("swsusp: block device not initialised\n");
+ return PTR_ERR(resume_bdev);
+ }
+ error = get_swap_map_reader(&handle, swsusp_header.image);
if (!error)
- error = data_read(pagedir_nosave);
+ error = swap_map_read_page(&handle, &swsusp_info);
+ if (!error)
+ error = check_header();
+ if (error)
+ return error;
+ nr_pages = swsusp_info.image_pages;
+ p = alloc_pagedir(nr_pages, GFP_ATOMIC, 0);
+ if (!p)
+ return -ENOMEM;
+ error = load_image_metadata(p, &handle);
+ if (!error) {
+ mark_unsafe_pages(p);
+ pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1);
+ if (pblist)
+ copy_page_backup_list(pblist, p);
+ free_pagedir(p);
+ if (!pblist)
+ error = -ENOMEM;
+
+ /* Allocate memory for the image and read the data from swap */
+ if (!error)
+ error = alloc_data_pages(pblist, GFP_ATOMIC, 1);
+ if (!error) {
+ release_eaten_pages();
+ error = load_image_data(pblist, &handle, nr_pages);
+ }
+ if (!error)
+ *pblist_ptr = pblist;
+ }
+ release_swap_map_reader(&handle);
+ blkdev_put(resume_bdev);
+
+ if (!error)
+ pr_debug("swsusp: Reading resume file was successful\n");
+ else
+ pr_debug("swsusp: Error %d resuming\n", error);
return error;
}
/**
- * swsusp_check - Check for saved image in swap
+ * swsusp_check - Check for swsusp signature in the resume device
*/
int swsusp_check(void)
@@ -958,40 +1003,27 @@ int swsusp_check(void)
resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
if (!IS_ERR(resume_bdev)) {
set_blocksize(resume_bdev, PAGE_SIZE);
- error = check_suspend_image();
+ memset(&swsusp_header, 0, sizeof(swsusp_header));
+ if ((error = bio_read_page(0, &swsusp_header)))
+ return error;
+ if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) {
+ memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10);
+ /* Reset swap signature now */
+ error = bio_write_page(0, &swsusp_header);
+ } else {
+ return -EINVAL;
+ }
if (error)
- blkdev_put(resume_bdev);
- } else
+ blkdev_put(resume_bdev);
+ else
+ pr_debug("swsusp: Signature found, resuming\n");
+ } else {
error = PTR_ERR(resume_bdev);
-
- if (!error)
- pr_debug("swsusp: resume file found\n");
- else
- pr_debug("swsusp: Error %d check for resume file\n", error);
- return error;
-}
-
-/**
- * swsusp_read - Read saved image from swap.
- */
-
-int swsusp_read(void)
-{
- int error;
-
- if (IS_ERR(resume_bdev)) {
- pr_debug("swsusp: block device not initialised\n");
- return PTR_ERR(resume_bdev);
}
- error = read_suspend_image();
- blkdev_put(resume_bdev);
- memset(key_iv, 0, MAXKEY+MAXIV);
+ if (error)
+ pr_debug("swsusp: Error %d check for resume file\n", error);
- if (!error)
- pr_debug("swsusp: Reading resume file was successful\n");
- else
- pr_debug("swsusp: Error %d resuming\n", error);
return error;
}
diff --git a/kernel/printk.c b/kernel/printk.c
index 5287be83e3e..2251be80cd2 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -569,7 +569,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
p[1] <= '7' && p[2] == '>') {
loglev_char = p[1];
p += 3;
- printed_len += 3;
+ printed_len -= 3;
} else {
loglev_char = default_message_loglevel
+ '0';
@@ -584,7 +584,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
for (tp = tbuf; tp < tbuf + tlen; tp++)
emit_log_char(*tp);
- printed_len += tlen - 3;
+ printed_len += tlen;
} else {
if (p[0] != '<' || p[1] < '0' ||
p[1] > '7' || p[2] != '>') {
@@ -592,8 +592,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
emit_log_char(default_message_loglevel
+ '0');
emit_log_char('>');
+ printed_len += 3;
}
- printed_len += 3;
}
log_level_unknown = 0;
if (!*p)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 656476eedb1..5f33cdb6fff 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -7,6 +7,7 @@
* to continually duplicate across every architecture.
*/
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/errno.h>
@@ -408,54 +409,62 @@ int ptrace_request(struct task_struct *child, long request,
return ret;
}
-#ifndef __ARCH_SYS_PTRACE
-static int ptrace_get_task_struct(long request, long pid,
- struct task_struct **childp)
+/**
+ * ptrace_traceme -- helper for PTRACE_TRACEME
+ *
+ * Performs checks and sets PT_PTRACED.
+ * Should be used by all ptrace implementations for PTRACE_TRACEME.
+ */
+int ptrace_traceme(void)
{
- struct task_struct *child;
int ret;
/*
- * Callers use child == NULL as an indication to exit early even
- * when the return value is 0, so make sure it is non-NULL here.
+ * Are we already being traced?
+ */
+ if (current->ptrace & PT_PTRACED)
+ return -EPERM;
+ ret = security_ptrace(current->parent, current);
+ if (ret)
+ return -EPERM;
+ /*
+ * Set the ptrace bit in the process ptrace flags.
*/
- *childp = NULL;
+ current->ptrace |= PT_PTRACED;
+ return 0;
+}
- if (request == PTRACE_TRACEME) {
- /*
- * Are we already being traced?
- */
- if (current->ptrace & PT_PTRACED)
- return -EPERM;
- ret = security_ptrace(current->parent, current);
- if (ret)
- return -EPERM;
- /*
- * Set the ptrace bit in the process ptrace flags.
- */
- current->ptrace |= PT_PTRACED;
- return 0;
- }
+/**
+ * ptrace_get_task_struct -- grab a task struct reference for ptrace
+ * @pid: process id to grab a task_struct reference of
+ *
+ * This function is a helper for ptrace implementations. It checks
+ * permissions and then grabs a task struct for use of the actual
+ * ptrace implementation.
+ *
+ * Returns the task_struct for @pid or an ERR_PTR() on failure.
+ */
+struct task_struct *ptrace_get_task_struct(pid_t pid)
+{
+ struct task_struct *child;
/*
- * You may not mess with init
+ * Tracing init is not allowed.
*/
if (pid == 1)
- return -EPERM;
+ return ERR_PTR(-EPERM);
- ret = -ESRCH;
read_lock(&tasklist_lock);
child = find_task_by_pid(pid);
if (child)
get_task_struct(child);
read_unlock(&tasklist_lock);
if (!child)
- return -ESRCH;
-
- *childp = child;
- return 0;
+ return ERR_PTR(-ESRCH);
+ return child;
}
+#ifndef __ARCH_SYS_PTRACE
asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
@@ -465,9 +474,16 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
* This lock_kernel fixes a subtle race with suid exec
*/
lock_kernel();
- ret = ptrace_get_task_struct(request, pid, &child);
- if (!child)
+ if (request == PTRACE_TRACEME) {
+ ret = ptrace_traceme();
goto out;
+ }
+
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
+ goto out;
+ }
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 48d3bce465b..0cf8146bd58 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
+#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/atomic.h>
@@ -45,26 +46,21 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/rcupdate.h>
-#include <linux/rcuref.h>
#include <linux/cpu.h>
/* Definition for rcupdate control block. */
-struct rcu_ctrlblk rcu_ctrlblk =
- { .cur = -300, .completed = -300 };
-struct rcu_ctrlblk rcu_bh_ctrlblk =
- { .cur = -300, .completed = -300 };
-
-/* Bookkeeping of the progress of the grace period */
-struct rcu_state {
- spinlock_t lock; /* Guard this struct and writes to rcu_ctrlblk */
- cpumask_t cpumask; /* CPUs that need to switch in order */
- /* for current batch to proceed. */
+struct rcu_ctrlblk rcu_ctrlblk = {
+ .cur = -300,
+ .completed = -300,
+ .lock = SPIN_LOCK_UNLOCKED,
+ .cpumask = CPU_MASK_NONE,
+};
+struct rcu_ctrlblk rcu_bh_ctrlblk = {
+ .cur = -300,
+ .completed = -300,
+ .lock = SPIN_LOCK_UNLOCKED,
+ .cpumask = CPU_MASK_NONE,
};
-
-static struct rcu_state rcu_state ____cacheline_maxaligned_in_smp =
- {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE };
-static struct rcu_state rcu_bh_state ____cacheline_maxaligned_in_smp =
- {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE };
DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
@@ -73,19 +69,6 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
static int maxbatch = 10000;
-#ifndef __HAVE_ARCH_CMPXCHG
-/*
- * We use an array of spinlocks for the rcurefs -- similar to ones in sparc
- * 32 bit atomic_t implementations, and a hash function similar to that
- * for our refcounting needs.
- * Can't help multiprocessors which donot have cmpxchg :(
- */
-
-spinlock_t __rcuref_hash[RCUREF_HASH_SIZE] = {
- [0 ... (RCUREF_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
-};
-#endif
-
/**
* call_rcu - Queue an RCU callback for invocation after a grace period.
* @head: structure to be used for queueing the RCU updates.
@@ -233,13 +216,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
* This is done by rcu_start_batch. The start is not broadcasted to
* all cpus, they must pick this up by comparing rcp->cur with
* rdp->quiescbatch. All cpus are recorded in the
- * rcu_state.cpumask bitmap.
+ * rcu_ctrlblk.cpumask bitmap.
* - All cpus must go through a quiescent state.
* Since the start of the grace period is not broadcasted, at least two
* calls to rcu_check_quiescent_state are required:
* The first call just notices that a new grace period is running. The
* following calls check if there was a quiescent state since the beginning
- * of the grace period. If so, it updates rcu_state.cpumask. If
+ * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
* the bitmap is empty, then the grace period is completed.
* rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
* period (if necessary).
@@ -247,14 +230,10 @@ static void rcu_do_batch(struct rcu_data *rdp)
/*
* Register a new batch of callbacks, and start it up if there is currently no
* active batch and the batch to be registered has not already occurred.
- * Caller must hold rcu_state.lock.
+ * Caller must hold rcu_ctrlblk.lock.
*/
-static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp,
- int next_pending)
+static void rcu_start_batch(struct rcu_ctrlblk *rcp)
{
- if (next_pending)
- rcp->next_pending = 1;
-
if (rcp->next_pending &&
rcp->completed == rcp->cur) {
rcp->next_pending = 0;
@@ -268,11 +247,11 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp,
/*
* Accessing nohz_cpu_mask before incrementing rcp->cur needs a
* Barrier Otherwise it can cause tickless idle CPUs to be
- * included in rsp->cpumask, which will extend graceperiods
+ * included in rcp->cpumask, which will extend graceperiods
* unnecessarily.
*/
smp_mb();
- cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
+ cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
}
}
@@ -282,13 +261,13 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp,
* Clear it from the cpu mask and complete the grace period if it was the last
* cpu. Start another grace period if someone has further entries pending
*/
-static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp, struct rcu_state *rsp)
+static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
{
- cpu_clear(cpu, rsp->cpumask);
- if (cpus_empty(rsp->cpumask)) {
+ cpu_clear(cpu, rcp->cpumask);
+ if (cpus_empty(rcp->cpumask)) {
/* batch completed ! */
rcp->completed = rcp->cur;
- rcu_start_batch(rcp, rsp, 0);
+ rcu_start_batch(rcp);
}
}
@@ -298,7 +277,7 @@ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp, struct rcu_state *rsp)
* quiescent cycle, then indicate that it has done so.
*/
static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
- struct rcu_state *rsp, struct rcu_data *rdp)
+ struct rcu_data *rdp)
{
if (rdp->quiescbatch != rcp->cur) {
/* start new grace period: */
@@ -323,15 +302,15 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
return;
rdp->qs_pending = 0;
- spin_lock(&rsp->lock);
+ spin_lock(&rcp->lock);
/*
* rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
* during cpu startup. Ignore the quiescent state.
*/
if (likely(rdp->quiescbatch == rcp->cur))
- cpu_quiet(rdp->cpu, rcp, rsp);
+ cpu_quiet(rdp->cpu, rcp);
- spin_unlock(&rsp->lock);
+ spin_unlock(&rcp->lock);
}
@@ -352,28 +331,29 @@ static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
}
static void __rcu_offline_cpu(struct rcu_data *this_rdp,
- struct rcu_ctrlblk *rcp, struct rcu_state *rsp, struct rcu_data *rdp)
+ struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
{
/* if the cpu going offline owns the grace period
* we can block indefinitely waiting for it, so flush
* it here
*/
- spin_lock_bh(&rsp->lock);
+ spin_lock_bh(&rcp->lock);
if (rcp->cur != rcp->completed)
- cpu_quiet(rdp->cpu, rcp, rsp);
- spin_unlock_bh(&rsp->lock);
+ cpu_quiet(rdp->cpu, rcp);
+ spin_unlock_bh(&rcp->lock);
rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
-
+ rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
}
+
static void rcu_offline_cpu(int cpu)
{
struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
- __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, &rcu_state,
+ __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
&per_cpu(rcu_data, cpu));
- __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, &rcu_bh_state,
+ __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
&per_cpu(rcu_bh_data, cpu));
put_cpu_var(rcu_data);
put_cpu_var(rcu_bh_data);
@@ -392,7 +372,7 @@ static void rcu_offline_cpu(int cpu)
* This does the RCU processing work from tasklet context.
*/
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
- struct rcu_state *rsp, struct rcu_data *rdp)
+ struct rcu_data *rdp)
{
if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
*rdp->donetail = rdp->curlist;
@@ -422,24 +402,53 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
if (!rcp->next_pending) {
/* and start it/schedule start if it's a new batch */
- spin_lock(&rsp->lock);
- rcu_start_batch(rcp, rsp, 1);
- spin_unlock(&rsp->lock);
+ spin_lock(&rcp->lock);
+ rcp->next_pending = 1;
+ rcu_start_batch(rcp);
+ spin_unlock(&rcp->lock);
}
} else {
local_irq_enable();
}
- rcu_check_quiescent_state(rcp, rsp, rdp);
+ rcu_check_quiescent_state(rcp, rdp);
if (rdp->donelist)
rcu_do_batch(rdp);
}
static void rcu_process_callbacks(unsigned long unused)
{
- __rcu_process_callbacks(&rcu_ctrlblk, &rcu_state,
- &__get_cpu_var(rcu_data));
- __rcu_process_callbacks(&rcu_bh_ctrlblk, &rcu_bh_state,
- &__get_cpu_var(rcu_bh_data));
+ __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
+ __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
+}
+
+static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
+{
+ /* This cpu has pending rcu entries and the grace period
+ * for them has completed.
+ */
+ if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
+ return 1;
+
+ /* This cpu has no pending entries, but there are new entries */
+ if (!rdp->curlist && rdp->nxtlist)
+ return 1;
+
+ /* This cpu has finished callbacks to invoke */
+ if (rdp->donelist)
+ return 1;
+
+ /* The rcu core waits for a quiescent state from the cpu */
+ if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
+ return 1;
+
+ /* nothing to do */
+ return 0;
+}
+
+int rcu_pending(int cpu)
+{
+ return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
+ __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
}
void rcu_check_callbacks(int cpu, int user)
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 49fbbeff201..773219907dd 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -39,7 +39,6 @@
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
-#include <linux/rcuref.h>
#include <linux/cpu.h>
#include <linux/random.h>
#include <linux/delay.h>
@@ -49,9 +48,11 @@
MODULE_LICENSE("GPL");
static int nreaders = -1; /* # reader threads, defaults to 4*ncpus */
-static int stat_interval = 0; /* Interval between stats, in seconds. */
+static int stat_interval; /* Interval between stats, in seconds. */
/* Defaults to "only at end of test". */
-static int verbose = 0; /* Print more debug info. */
+static int verbose; /* Print more debug info. */
+static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
+static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
MODULE_PARM(nreaders, "i");
MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
@@ -59,6 +60,10 @@ MODULE_PARM(stat_interval, "i");
MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
MODULE_PARM(verbose, "i");
MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
+MODULE_PARM(test_no_idle_hz, "i");
+MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
+MODULE_PARM(shuffle_interval, "i");
+MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
#define TORTURE_FLAG "rcutorture: "
#define PRINTK_STRING(s) \
do { printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
@@ -73,6 +78,7 @@ static int nrealreaders;
static struct task_struct *writer_task;
static struct task_struct **reader_tasks;
static struct task_struct *stats_task;
+static struct task_struct *shuffler_task;
#define RCU_TORTURE_PIPE_LEN 10
@@ -103,7 +109,7 @@ atomic_t n_rcu_torture_error;
/*
* Allocate an element from the rcu_tortures pool.
*/
-struct rcu_torture *
+static struct rcu_torture *
rcu_torture_alloc(void)
{
struct list_head *p;
@@ -376,12 +382,77 @@ rcu_torture_stats(void *arg)
return 0;
}
+static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
+
+/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
+ * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
+ */
+void rcu_torture_shuffle_tasks(void)
+{
+ cpumask_t tmp_mask = CPU_MASK_ALL;
+ int i;
+
+ lock_cpu_hotplug();
+
+ /* No point in shuffling if there is only one online CPU (ex: UP) */
+ if (num_online_cpus() == 1) {
+ unlock_cpu_hotplug();
+ return;
+ }
+
+ if (rcu_idle_cpu != -1)
+ cpu_clear(rcu_idle_cpu, tmp_mask);
+
+ set_cpus_allowed(current, tmp_mask);
+
+ if (reader_tasks != NULL) {
+ for (i = 0; i < nrealreaders; i++)
+ if (reader_tasks[i])
+ set_cpus_allowed(reader_tasks[i], tmp_mask);
+ }
+
+ if (writer_task)
+ set_cpus_allowed(writer_task, tmp_mask);
+
+ if (stats_task)
+ set_cpus_allowed(stats_task, tmp_mask);
+
+ if (rcu_idle_cpu == -1)
+ rcu_idle_cpu = num_online_cpus() - 1;
+ else
+ rcu_idle_cpu--;
+
+ unlock_cpu_hotplug();
+}
+
+/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
+ * system to become idle at a time and cut off its timer ticks. This is meant
+ * to test the support for such tickless idle CPU in RCU.
+ */
+static int
+rcu_torture_shuffle(void *arg)
+{
+ VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
+ do {
+ schedule_timeout_interruptible(shuffle_interval * HZ);
+ rcu_torture_shuffle_tasks();
+ } while (!kthread_should_stop());
+ VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
+ return 0;
+}
+
static void
rcu_torture_cleanup(void)
{
int i;
fullstop = 1;
+ if (shuffler_task != NULL) {
+ VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
+ kthread_stop(shuffler_task);
+ }
+ shuffler_task = NULL;
+
if (writer_task != NULL) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
kthread_stop(writer_task);
@@ -430,9 +501,11 @@ rcu_torture_init(void)
nrealreaders = nreaders;
else
nrealreaders = 2 * num_online_cpus();
- printk(KERN_ALERT TORTURE_FLAG
- "--- Start of test: nreaders=%d stat_interval=%d verbose=%d\n",
- nrealreaders, stat_interval, verbose);
+ printk(KERN_ALERT TORTURE_FLAG "--- Start of test: nreaders=%d "
+ "stat_interval=%d verbose=%d test_no_idle_hz=%d "
+ "shuffle_interval = %d\n",
+ nrealreaders, stat_interval, verbose, test_no_idle_hz,
+ shuffle_interval);
fullstop = 0;
/* Set up the freelist. */
@@ -502,6 +575,18 @@ rcu_torture_init(void)
goto unwind;
}
}
+ if (test_no_idle_hz) {
+ rcu_idle_cpu = num_online_cpus() - 1;
+ /* Create the shuffler thread */
+ shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
+ "rcu_torture_shuffle");
+ if (IS_ERR(shuffler_task)) {
+ firsterr = PTR_ERR(shuffler_task);
+ VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
+ shuffler_task = NULL;
+ goto unwind;
+ }
+ }
return 0;
unwind:
diff --git a/kernel/resource.c b/kernel/resource.c
index 92285d822de..e3080fcc66a 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -464,7 +464,7 @@ struct resource * __request_region(struct resource *parent, unsigned long start,
EXPORT_SYMBOL(__request_region);
-int __deprecated __check_region(struct resource *parent, unsigned long start, unsigned long n)
+int __check_region(struct resource *parent, unsigned long start, unsigned long n)
{
struct resource * res;
diff --git a/kernel/sched.c b/kernel/sched.c
index 6f46c94cc29..c9dec2aa197 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -27,12 +27,14 @@
#include <linux/smp_lock.h>
#include <asm/mmu_context.h>
#include <linux/interrupt.h>
+#include <linux/capability.h>
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
#include <linux/suspend.h>
+#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/smp.h>
@@ -176,6 +178,13 @@ static unsigned int task_timeslice(task_t *p)
#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
< (long long) (sd)->cache_hot_time)
+void __put_task_struct_cb(struct rcu_head *rhp)
+{
+ __put_task_struct(container_of(rhp, struct task_struct, rcu));
+}
+
+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
+
/*
* These are the runqueue data structures:
*/
@@ -1281,6 +1290,9 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
}
}
+ if (p->last_waker_cpu != this_cpu)
+ goto out_set_cpu;
+
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
goto out_set_cpu;
@@ -1351,6 +1363,8 @@ out_set_cpu:
cpu = task_cpu(p);
}
+ p->last_waker_cpu = this_cpu;
+
out_activate:
#endif /* CONFIG_SMP */
if (old_state == TASK_UNINTERRUPTIBLE) {
@@ -1432,9 +1446,12 @@ void fastcall sched_fork(task_t *p, int clone_flags)
#ifdef CONFIG_SCHEDSTATS
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
+ p->last_waker_cpu = cpu;
+#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
p->oncpu = 0;
#endif
+#endif
#ifdef CONFIG_PREEMPT
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
@@ -3972,12 +3989,12 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
* method, such as ACPI for e.g.
*/
-cpumask_t cpu_present_map;
+cpumask_t cpu_present_map __read_mostly;
EXPORT_SYMBOL(cpu_present_map);
#ifndef CONFIG_SMP
-cpumask_t cpu_online_map = CPU_MASK_ALL;
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
+cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
+cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
#endif
long sched_getaffinity(pid_t pid, cpumask_t *mask)
@@ -4379,6 +4396,7 @@ void show_state(void)
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ mutex_debug_show_all_locks();
}
/**
@@ -5073,7 +5091,470 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
#define SD_NODES_PER_DOMAIN 16
+/*
+ * Self-tuning task migration cost measurement between source and target CPUs.
+ *
+ * This is done by measuring the cost of manipulating buffers of varying
+ * sizes. For a given buffer-size here are the steps that are taken:
+ *
+ * 1) the source CPU reads+dirties a shared buffer
+ * 2) the target CPU reads+dirties the same shared buffer
+ *
+ * We measure how long they take, in the following 4 scenarios:
+ *
+ * - source: CPU1, target: CPU2 | cost1
+ * - source: CPU2, target: CPU1 | cost2
+ * - source: CPU1, target: CPU1 | cost3
+ * - source: CPU2, target: CPU2 | cost4
+ *
+ * We then calculate the cost3+cost4-cost1-cost2 difference - this is
+ * the cost of migration.
+ *
+ * We then start off from a small buffer-size and iterate up to larger
+ * buffer sizes, in 5% steps - measuring each buffer-size separately, and
+ * doing a maximum search for the cost. (The maximum cost for a migration
+ * normally occurs when the working set size is around the effective cache
+ * size.)
+ */
+#define SEARCH_SCOPE 2
+#define MIN_CACHE_SIZE (64*1024U)
+#define DEFAULT_CACHE_SIZE (5*1024*1024U)
+#define ITERATIONS 2
+#define SIZE_THRESH 130
+#define COST_THRESH 130
+
+/*
+ * The migration cost is a function of 'domain distance'. Domain
+ * distance is the number of steps a CPU has to iterate down its
+ * domain tree to share a domain with the other CPU. The farther
+ * two CPUs are from each other, the larger the distance gets.
+ *
+ * Note that we use the distance only to cache measurement results,
+ * the distance value is not used numerically otherwise. When two
+ * CPUs have the same distance it is assumed that the migration
+ * cost is the same. (this is a simplification but quite practical)
+ */
+#define MAX_DOMAIN_DISTANCE 32
+
+static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
+ { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = -1LL };
+
+/*
+ * Allow override of migration cost - in units of microseconds.
+ * E.g. migration_cost=1000,2000,3000 will set up a level-1 cost
+ * of 1 msec, level-2 cost of 2 msecs and level3 cost of 3 msecs:
+ */
+static int __init migration_cost_setup(char *str)
+{
+ int ints[MAX_DOMAIN_DISTANCE+1], i;
+
+ str = get_options(str, ARRAY_SIZE(ints), ints);
+
+ printk("#ints: %d\n", ints[0]);
+ for (i = 1; i <= ints[0]; i++) {
+ migration_cost[i-1] = (unsigned long long)ints[i]*1000;
+ printk("migration_cost[%d]: %Ld\n", i-1, migration_cost[i-1]);
+ }
+ return 1;
+}
+
+__setup ("migration_cost=", migration_cost_setup);
+
+/*
+ * Global multiplier (divisor) for migration-cutoff values,
+ * in percentiles. E.g. use a value of 150 to get 1.5 times
+ * longer cache-hot cutoff times.
+ *
+ * (We scale it from 100 to 128 to long long handling easier.)
+ */
+
+#define MIGRATION_FACTOR_SCALE 128
+
+static unsigned int migration_factor = MIGRATION_FACTOR_SCALE;
+
+static int __init setup_migration_factor(char *str)
+{
+ get_option(&str, &migration_factor);
+ migration_factor = migration_factor * MIGRATION_FACTOR_SCALE / 100;
+ return 1;
+}
+
+__setup("migration_factor=", setup_migration_factor);
+
+/*
+ * Estimated distance of two CPUs, measured via the number of domains
+ * we have to pass for the two CPUs to be in the same span:
+ */
+static unsigned long domain_distance(int cpu1, int cpu2)
+{
+ unsigned long distance = 0;
+ struct sched_domain *sd;
+
+ for_each_domain(cpu1, sd) {
+ WARN_ON(!cpu_isset(cpu1, sd->span));
+ if (cpu_isset(cpu2, sd->span))
+ return distance;
+ distance++;
+ }
+ if (distance >= MAX_DOMAIN_DISTANCE) {
+ WARN_ON(1);
+ distance = MAX_DOMAIN_DISTANCE-1;
+ }
+
+ return distance;
+}
+
+static unsigned int migration_debug;
+
+static int __init setup_migration_debug(char *str)
+{
+ get_option(&str, &migration_debug);
+ return 1;
+}
+
+__setup("migration_debug=", setup_migration_debug);
+
+/*
+ * Maximum cache-size that the scheduler should try to measure.
+ * Architectures with larger caches should tune this up during
+ * bootup. Gets used in the domain-setup code (i.e. during SMP
+ * bootup).
+ */
+unsigned int max_cache_size;
+
+static int __init setup_max_cache_size(char *str)
+{
+ get_option(&str, &max_cache_size);
+ return 1;
+}
+
+__setup("max_cache_size=", setup_max_cache_size);
+
+/*
+ * Dirty a big buffer in a hard-to-predict (for the L2 cache) way. This
+ * is the operation that is timed, so we try to generate unpredictable
+ * cachemisses that still end up filling the L2 cache:
+ */
+static void touch_cache(void *__cache, unsigned long __size)
+{
+ unsigned long size = __size/sizeof(long), chunk1 = size/3,
+ chunk2 = 2*size/3;
+ unsigned long *cache = __cache;
+ int i;
+
+ for (i = 0; i < size/6; i += 8) {
+ switch (i % 6) {
+ case 0: cache[i]++;
+ case 1: cache[size-1-i]++;
+ case 2: cache[chunk1-i]++;
+ case 3: cache[chunk1+i]++;
+ case 4: cache[chunk2-i]++;
+ case 5: cache[chunk2+i]++;
+ }
+ }
+}
+
+/*
+ * Measure the cache-cost of one task migration. Returns in units of nsec.
+ */
+static unsigned long long measure_one(void *cache, unsigned long size,
+ int source, int target)
+{
+ cpumask_t mask, saved_mask;
+ unsigned long long t0, t1, t2, t3, cost;
+
+ saved_mask = current->cpus_allowed;
+
+ /*
+ * Flush source caches to RAM and invalidate them:
+ */
+ sched_cacheflush();
+
+ /*
+ * Migrate to the source CPU:
+ */
+ mask = cpumask_of_cpu(source);
+ set_cpus_allowed(current, mask);
+ WARN_ON(smp_processor_id() != source);
+
+ /*
+ * Dirty the working set:
+ */
+ t0 = sched_clock();
+ touch_cache(cache, size);
+ t1 = sched_clock();
+
+ /*
+ * Migrate to the target CPU, dirty the L2 cache and access
+ * the shared buffer. (which represents the working set
+ * of a migrated task.)
+ */
+ mask = cpumask_of_cpu(target);
+ set_cpus_allowed(current, mask);
+ WARN_ON(smp_processor_id() != target);
+
+ t2 = sched_clock();
+ touch_cache(cache, size);
+ t3 = sched_clock();
+
+ cost = t1-t0 + t3-t2;
+
+ if (migration_debug >= 2)
+ printk("[%d->%d]: %8Ld %8Ld %8Ld => %10Ld.\n",
+ source, target, t1-t0, t1-t0, t3-t2, cost);
+ /*
+ * Flush target caches to RAM and invalidate them:
+ */
+ sched_cacheflush();
+
+ set_cpus_allowed(current, saved_mask);
+
+ return cost;
+}
+
+/*
+ * Measure a series of task migrations and return the average
+ * result. Since this code runs early during bootup the system
+ * is 'undisturbed' and the average latency makes sense.
+ *
+ * The algorithm in essence auto-detects the relevant cache-size,
+ * so it will properly detect different cachesizes for different
+ * cache-hierarchies, depending on how the CPUs are connected.
+ *
+ * Architectures can prime the upper limit of the search range via
+ * max_cache_size, otherwise the search range defaults to 20MB...64K.
+ */
+static unsigned long long
+measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
+{
+ unsigned long long cost1, cost2;
+ int i;
+
+ /*
+ * Measure the migration cost of 'size' bytes, over an
+ * average of 10 runs:
+ *
+ * (We perturb the cache size by a small (0..4k)
+ * value to compensate size/alignment related artifacts.
+ * We also subtract the cost of the operation done on
+ * the same CPU.)
+ */
+ cost1 = 0;
+
+ /*
+ * dry run, to make sure we start off cache-cold on cpu1,
+ * and to get any vmalloc pagefaults in advance:
+ */
+ measure_one(cache, size, cpu1, cpu2);
+ for (i = 0; i < ITERATIONS; i++)
+ cost1 += measure_one(cache, size - i*1024, cpu1, cpu2);
+
+ measure_one(cache, size, cpu2, cpu1);
+ for (i = 0; i < ITERATIONS; i++)
+ cost1 += measure_one(cache, size - i*1024, cpu2, cpu1);
+
+ /*
+ * (We measure the non-migrating [cached] cost on both
+ * cpu1 and cpu2, to handle CPUs with different speeds)
+ */
+ cost2 = 0;
+
+ measure_one(cache, size, cpu1, cpu1);
+ for (i = 0; i < ITERATIONS; i++)
+ cost2 += measure_one(cache, size - i*1024, cpu1, cpu1);
+
+ measure_one(cache, size, cpu2, cpu2);
+ for (i = 0; i < ITERATIONS; i++)
+ cost2 += measure_one(cache, size - i*1024, cpu2, cpu2);
+
+ /*
+ * Get the per-iteration migration cost:
+ */
+ do_div(cost1, 2*ITERATIONS);
+ do_div(cost2, 2*ITERATIONS);
+
+ return cost1 - cost2;
+}
+
+static unsigned long long measure_migration_cost(int cpu1, int cpu2)
+{
+ unsigned long long max_cost = 0, fluct = 0, avg_fluct = 0;
+ unsigned int max_size, size, size_found = 0;
+ long long cost = 0, prev_cost;
+ void *cache;
+
+ /*
+ * Search from max_cache_size*5 down to 64K - the real relevant
+ * cachesize has to lie somewhere inbetween.
+ */
+ if (max_cache_size) {
+ max_size = max(max_cache_size * SEARCH_SCOPE, MIN_CACHE_SIZE);
+ size = max(max_cache_size / SEARCH_SCOPE, MIN_CACHE_SIZE);
+ } else {
+ /*
+ * Since we have no estimation about the relevant
+ * search range
+ */
+ max_size = DEFAULT_CACHE_SIZE * SEARCH_SCOPE;
+ size = MIN_CACHE_SIZE;
+ }
+
+ if (!cpu_online(cpu1) || !cpu_online(cpu2)) {
+ printk("cpu %d and %d not both online!\n", cpu1, cpu2);
+ return 0;
+ }
+
+ /*
+ * Allocate the working set:
+ */
+ cache = vmalloc(max_size);
+ if (!cache) {
+ printk("could not vmalloc %d bytes for cache!\n", 2*max_size);
+ return 1000000; // return 1 msec on very small boxen
+ }
+
+ while (size <= max_size) {
+ prev_cost = cost;
+ cost = measure_cost(cpu1, cpu2, cache, size);
+
+ /*
+ * Update the max:
+ */
+ if (cost > 0) {
+ if (max_cost < cost) {
+ max_cost = cost;
+ size_found = size;
+ }
+ }
+ /*
+ * Calculate average fluctuation, we use this to prevent
+ * noise from triggering an early break out of the loop:
+ */
+ fluct = abs(cost - prev_cost);
+ avg_fluct = (avg_fluct + fluct)/2;
+
+ if (migration_debug)
+ printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n",
+ cpu1, cpu2, size,
+ (long)cost / 1000000,
+ ((long)cost / 100000) % 10,
+ (long)max_cost / 1000000,
+ ((long)max_cost / 100000) % 10,
+ domain_distance(cpu1, cpu2),
+ cost, avg_fluct);
+
+ /*
+ * If we iterated at least 20% past the previous maximum,
+ * and the cost has dropped by more than 20% already,
+ * (taking fluctuations into account) then we assume to
+ * have found the maximum and break out of the loop early:
+ */
+ if (size_found && (size*100 > size_found*SIZE_THRESH))
+ if (cost+avg_fluct <= 0 ||
+ max_cost*100 > (cost+avg_fluct)*COST_THRESH) {
+
+ if (migration_debug)
+ printk("-> found max.\n");
+ break;
+ }
+ /*
+ * Increase the cachesize in 5% steps:
+ */
+ size = size * 20 / 19;
+ }
+
+ if (migration_debug)
+ printk("[%d][%d] working set size found: %d, cost: %Ld\n",
+ cpu1, cpu2, size_found, max_cost);
+
+ vfree(cache);
+
+ /*
+ * A task is considered 'cache cold' if at least 2 times
+ * the worst-case cost of migration has passed.
+ *
+ * (this limit is only listened to if the load-balancing
+ * situation is 'nice' - if there is a large imbalance we
+ * ignore it for the sake of CPU utilization and
+ * processing fairness.)
+ */
+ return 2 * max_cost * migration_factor / MIGRATION_FACTOR_SCALE;
+}
+
+static void calibrate_migration_costs(const cpumask_t *cpu_map)
+{
+ int cpu1 = -1, cpu2 = -1, cpu, orig_cpu = raw_smp_processor_id();
+ unsigned long j0, j1, distance, max_distance = 0;
+ struct sched_domain *sd;
+
+ j0 = jiffies;
+
+ /*
+ * First pass - calculate the cacheflush times:
+ */
+ for_each_cpu_mask(cpu1, *cpu_map) {
+ for_each_cpu_mask(cpu2, *cpu_map) {
+ if (cpu1 == cpu2)
+ continue;
+ distance = domain_distance(cpu1, cpu2);
+ max_distance = max(max_distance, distance);
+ /*
+ * No result cached yet?
+ */
+ if (migration_cost[distance] == -1LL)
+ migration_cost[distance] =
+ measure_migration_cost(cpu1, cpu2);
+ }
+ }
+ /*
+ * Second pass - update the sched domain hierarchy with
+ * the new cache-hot-time estimations:
+ */
+ for_each_cpu_mask(cpu, *cpu_map) {
+ distance = 0;
+ for_each_domain(cpu, sd) {
+ sd->cache_hot_time = migration_cost[distance];
+ distance++;
+ }
+ }
+ /*
+ * Print the matrix:
+ */
+ if (migration_debug)
+ printk("migration: max_cache_size: %d, cpu: %d MHz:\n",
+ max_cache_size,
+#ifdef CONFIG_X86
+ cpu_khz/1000
+#else
+ -1
+#endif
+ );
+ printk("migration_cost=");
+ for (distance = 0; distance <= max_distance; distance++) {
+ if (distance)
+ printk(",");
+ printk("%ld", (long)migration_cost[distance] / 1000);
+ }
+ printk("\n");
+ j1 = jiffies;
+ if (migration_debug)
+ printk("migration: %ld seconds\n", (j1-j0)/HZ);
+
+ /*
+ * Move back to the original CPU. NUMA-Q gets confused
+ * if we migrate to another quad during bootup.
+ */
+ if (raw_smp_processor_id() != orig_cpu) {
+ cpumask_t mask = cpumask_of_cpu(orig_cpu),
+ saved_mask = current->cpus_allowed;
+
+ set_cpus_allowed(current, mask);
+ set_cpus_allowed(current, saved_mask);
+ }
+}
+
#ifdef CONFIG_NUMA
+
/**
* find_next_best_node - find the next node to include in a sched_domain
* @node: node whose sched_domain we're building
@@ -5439,6 +5920,10 @@ next_sg:
#endif
cpu_attach_domain(sd, i);
}
+ /*
+ * Tune cache-hot values:
+ */
+ calibrate_migration_costs(cpu_map);
}
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
diff --git a/kernel/signal.c b/kernel/signal.c
index d7611f189ef..1da2e74beb9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -25,6 +25,7 @@
#include <linux/posix-timers.h>
#include <linux/signal.h>
#include <linux/audit.h>
+#include <linux/capability.h>
#include <asm/param.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -329,13 +330,20 @@ void __exit_sighand(struct task_struct *tsk)
/* Ok, we're done with the signal handlers */
tsk->sighand = NULL;
if (atomic_dec_and_test(&sighand->count))
- kmem_cache_free(sighand_cachep, sighand);
+ sighand_free(sighand);
}
void exit_sighand(struct task_struct *tsk)
{
write_lock_irq(&tasklist_lock);
- __exit_sighand(tsk);
+ rcu_read_lock();
+ if (tsk->sighand != NULL) {
+ struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
+ spin_lock(&sighand->siglock);
+ __exit_sighand(tsk);
+ spin_unlock(&sighand->siglock);
+ }
+ rcu_read_unlock();
write_unlock_irq(&tasklist_lock);
}
@@ -345,19 +353,20 @@ void exit_sighand(struct task_struct *tsk)
void __exit_signal(struct task_struct *tsk)
{
struct signal_struct * sig = tsk->signal;
- struct sighand_struct * sighand = tsk->sighand;
+ struct sighand_struct * sighand;
if (!sig)
BUG();
if (!atomic_read(&sig->count))
BUG();
+ rcu_read_lock();
+ sighand = rcu_dereference(tsk->sighand);
spin_lock(&sighand->siglock);
posix_cpu_timers_exit(tsk);
if (atomic_dec_and_test(&sig->count)) {
posix_cpu_timers_exit_group(tsk);
- if (tsk == sig->curr_target)
- sig->curr_target = next_thread(tsk);
tsk->signal = NULL;
+ __exit_sighand(tsk);
spin_unlock(&sighand->siglock);
flush_sigqueue(&sig->shared_pending);
} else {
@@ -389,9 +398,11 @@ void __exit_signal(struct task_struct *tsk)
sig->nvcsw += tsk->nvcsw;
sig->nivcsw += tsk->nivcsw;
sig->sched_time += tsk->sched_time;
+ __exit_sighand(tsk);
spin_unlock(&sighand->siglock);
sig = NULL; /* Marker for below. */
}
+ rcu_read_unlock();
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending);
if (sig) {
@@ -613,6 +624,33 @@ void signal_wake_up(struct task_struct *t, int resume)
* Returns 1 if any signals were found.
*
* All callers must be holding the siglock.
+ *
+ * This version takes a sigset mask and looks at all signals,
+ * not just those in the first mask word.
+ */
+static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
+{
+ struct sigqueue *q, *n;
+ sigset_t m;
+
+ sigandsets(&m, mask, &s->signal);
+ if (sigisemptyset(&m))
+ return 0;
+
+ signandsets(&s->signal, &s->signal, mask);
+ list_for_each_entry_safe(q, n, &s->list, list) {
+ if (sigismember(mask, q->info.si_signo)) {
+ list_del_init(&q->list);
+ __sigqueue_free(q);
+ }
+ }
+ return 1;
+}
+/*
+ * Remove signals in mask from the pending set and queue.
+ * Returns 1 if any signals were found.
+ *
+ * All callers must be holding the siglock.
*/
static int rm_from_queue(unsigned long mask, struct sigpending *s)
{
@@ -1080,18 +1118,29 @@ void zap_other_threads(struct task_struct *p)
}
/*
- * Must be called with the tasklist_lock held for reading!
+ * Must be called under rcu_read_lock() or with tasklist_lock read-held.
*/
int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
unsigned long flags;
+ struct sighand_struct *sp;
int ret;
+retry:
ret = check_kill_permission(sig, info, p);
- if (!ret && sig && p->sighand) {
- spin_lock_irqsave(&p->sighand->siglock, flags);
+ if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
+ spin_lock_irqsave(&sp->siglock, flags);
+ if (p->sighand != sp) {
+ spin_unlock_irqrestore(&sp->siglock, flags);
+ goto retry;
+ }
+ if ((atomic_read(&sp->count) == 0) ||
+ (atomic_read(&p->usage) == 0)) {
+ spin_unlock_irqrestore(&sp->siglock, flags);
+ return -ESRCH;
+ }
ret = __group_send_sig_info(sig, info, p);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ spin_unlock_irqrestore(&sp->siglock, flags);
}
return ret;
@@ -1136,14 +1185,21 @@ int
kill_proc_info(int sig, struct siginfo *info, pid_t pid)
{
int error;
+ int acquired_tasklist_lock = 0;
struct task_struct *p;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
+ if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
+ read_lock(&tasklist_lock);
+ acquired_tasklist_lock = 1;
+ }
p = find_task_by_pid(pid);
error = -ESRCH;
if (p)
error = group_send_sig_info(sig, info, p);
- read_unlock(&tasklist_lock);
+ if (unlikely(acquired_tasklist_lock))
+ read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return error;
}
@@ -1163,8 +1219,7 @@ int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
ret = -ESRCH;
goto out_unlock;
}
- if ((!info || ((unsigned long)info != 1 &&
- (unsigned long)info != 2 && SI_FROMUSER(info)))
+ if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
&& (euid != p->suid) && (euid != p->uid)
&& (uid != p->suid) && (uid != p->uid)) {
ret = -EPERM;
@@ -1355,16 +1410,54 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
{
unsigned long flags;
int ret = 0;
+ struct sighand_struct *sh;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
- read_lock(&tasklist_lock);
+
+ /*
+ * The rcu based delayed sighand destroy makes it possible to
+ * run this without tasklist lock held. The task struct itself
+ * cannot go away as create_timer did get_task_struct().
+ *
+ * We return -1, when the task is marked exiting, so
+ * posix_timer_event can redirect it to the group leader
+ */
+ rcu_read_lock();
if (unlikely(p->flags & PF_EXITING)) {
ret = -1;
goto out_err;
}
- spin_lock_irqsave(&p->sighand->siglock, flags);
+retry:
+ sh = rcu_dereference(p->sighand);
+
+ spin_lock_irqsave(&sh->siglock, flags);
+ if (p->sighand != sh) {
+ /* We raced with exec() in a multithreaded process... */
+ spin_unlock_irqrestore(&sh->siglock, flags);
+ goto retry;
+ }
+
+ /*
+ * We do the check here again to handle the following scenario:
+ *
+ * CPU 0 CPU 1
+ * send_sigqueue
+ * check PF_EXITING
+ * interrupt exit code running
+ * __exit_signal
+ * lock sighand->siglock
+ * unlock sighand->siglock
+ * lock sh->siglock
+ * add(tsk->pending) flush_sigqueue(tsk->pending)
+ *
+ */
+
+ if (unlikely(p->flags & PF_EXITING)) {
+ ret = -1;
+ goto out;
+ }
if (unlikely(!list_empty(&q->list))) {
/*
@@ -1388,9 +1481,9 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
signal_wake_up(p, sig == SIGKILL);
out:
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ spin_unlock_irqrestore(&sh->siglock, flags);
out_err:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return ret;
}
@@ -1402,7 +1495,9 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
int ret = 0;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
+
read_lock(&tasklist_lock);
+ /* Since it_lock is held, p->sighand cannot be NULL. */
spin_lock_irqsave(&p->sighand->siglock, flags);
handle_stop_signal(sig, p);
@@ -1436,7 +1531,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
out:
spin_unlock_irqrestore(&p->sighand->siglock, flags);
read_unlock(&tasklist_lock);
- return(ret);
+ return ret;
}
/*
@@ -2338,6 +2433,7 @@ int
do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
{
struct k_sigaction *k;
+ sigset_t mask;
if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
return -EINVAL;
@@ -2385,9 +2481,11 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
*k = *act;
sigdelsetmask(&k->sa.sa_mask,
sigmask(SIGKILL) | sigmask(SIGSTOP));
- rm_from_queue(sigmask(sig), &t->signal->shared_pending);
+ sigemptyset(&mask);
+ sigaddset(&mask, sig);
+ rm_from_queue_full(&mask, &t->signal->shared_pending);
do {
- rm_from_queue(sigmask(sig), &t->pending);
+ rm_from_queue_full(&mask, &t->pending);
recalc_sigpending_tsk(t);
t = next_thread(t);
} while (t != current);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index b3d4dc858e3..dcfb5d73146 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -87,13 +87,9 @@ static int stop_machine(void)
{
int i, ret = 0;
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
- mm_segment_t old_fs = get_fs();
/* One high-prio thread per cpu. We'll do this one. */
- set_fs(KERNEL_DS);
- sys_sched_setscheduler(current->pid, SCHED_FIFO,
- (struct sched_param __user *)&param);
- set_fs(old_fs);
+ sched_setscheduler(current, SCHED_FIFO, &param);
atomic_set(&stopmachine_thread_ack, 0);
stopmachine_num_threads = 0;
diff --git a/kernel/sys.c b/kernel/sys.c
index eecf84526af..d09cac23fdf 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/workqueue.h>
+#include <linux/capability.h>
#include <linux/device.h>
#include <linux/key.h>
#include <linux/times.h>
@@ -223,6 +224,18 @@ int unregister_reboot_notifier(struct notifier_block * nb)
EXPORT_SYMBOL(unregister_reboot_notifier);
+#ifndef CONFIG_SECURITY
+int capable(int cap)
+{
+ if (cap_raised(current->cap_effective, cap)) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(capable);
+#endif
+
static int set_one_prio(struct task_struct *p, int niceval, int error)
{
int no_nice;
@@ -489,6 +502,12 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
magic2 != LINUX_REBOOT_MAGIC2C))
return -EINVAL;
+ /* Instead of trying to make the power_off code look like
+ * halt when pm_power_off is not set do it the easy way.
+ */
+ if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
+ cmd = LINUX_REBOOT_CMD_HALT;
+
lock_kernel();
switch (cmd) {
case LINUX_REBOOT_CMD_RESTART:
@@ -1084,10 +1103,11 @@ asmlinkage long sys_times(struct tms __user * tbuf)
asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
{
struct task_struct *p;
+ struct task_struct *group_leader = current->group_leader;
int err = -EINVAL;
if (!pid)
- pid = current->pid;
+ pid = group_leader->pid;
if (!pgid)
pgid = pid;
if (pgid < 0)
@@ -1107,16 +1127,16 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
if (!thread_group_leader(p))
goto out;
- if (p->parent == current || p->real_parent == current) {
+ if (p->real_parent == group_leader) {
err = -EPERM;
- if (p->signal->session != current->signal->session)
+ if (p->signal->session != group_leader->signal->session)
goto out;
err = -EACCES;
if (p->did_exec)
goto out;
} else {
err = -ESRCH;
- if (p != current)
+ if (p != group_leader)
goto out;
}
@@ -1128,7 +1148,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
struct task_struct *p;
do_each_task_pid(pgid, PIDTYPE_PGID, p) {
- if (p->signal->session == current->signal->session)
+ if (p->signal->session == group_leader->signal->session)
goto ok_pgid;
} while_each_task_pid(pgid, PIDTYPE_PGID, p);
goto out;
@@ -1208,24 +1228,22 @@ asmlinkage long sys_getsid(pid_t pid)
asmlinkage long sys_setsid(void)
{
+ struct task_struct *group_leader = current->group_leader;
struct pid *pid;
int err = -EPERM;
- if (!thread_group_leader(current))
- return -EINVAL;
-
down(&tty_sem);
write_lock_irq(&tasklist_lock);
- pid = find_pid(PIDTYPE_PGID, current->pid);
+ pid = find_pid(PIDTYPE_PGID, group_leader->pid);
if (pid)
goto out;
- current->signal->leader = 1;
- __set_special_pids(current->pid, current->pid);
- current->signal->tty = NULL;
- current->signal->tty_old_pgrp = 0;
- err = process_group(current);
+ group_leader->signal->leader = 1;
+ __set_special_pids(group_leader->pid, group_leader->pid);
+ group_leader->signal->tty = NULL;
+ group_leader->signal->tty_old_pgrp = 0;
+ err = process_group(group_leader);
out:
write_unlock_irq(&tasklist_lock);
up(&tty_sem);
@@ -1687,7 +1705,10 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
if (unlikely(!p->signal))
return;
+ utime = stime = cputime_zero;
+
switch (who) {
+ case RUSAGE_BOTH:
case RUSAGE_CHILDREN:
spin_lock_irqsave(&p->sighand->siglock, flags);
utime = p->signal->cutime;
@@ -1697,22 +1718,11 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_minflt = p->signal->cmin_flt;
r->ru_majflt = p->signal->cmaj_flt;
spin_unlock_irqrestore(&p->sighand->siglock, flags);
- cputime_to_timeval(utime, &r->ru_utime);
- cputime_to_timeval(stime, &r->ru_stime);
- break;
+
+ if (who == RUSAGE_CHILDREN)
+ break;
+
case RUSAGE_SELF:
- spin_lock_irqsave(&p->sighand->siglock, flags);
- utime = stime = cputime_zero;
- goto sum_group;
- case RUSAGE_BOTH:
- spin_lock_irqsave(&p->sighand->siglock, flags);
- utime = p->signal->cutime;
- stime = p->signal->cstime;
- r->ru_nvcsw = p->signal->cnvcsw;
- r->ru_nivcsw = p->signal->cnivcsw;
- r->ru_minflt = p->signal->cmin_flt;
- r->ru_majflt = p->signal->cmaj_flt;
- sum_group:
utime = cputime_add(utime, p->signal->utime);
stime = cputime_add(stime, p->signal->stime);
r->ru_nvcsw += p->signal->nvcsw;
@@ -1729,13 +1739,14 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_majflt += t->maj_flt;
t = next_thread(t);
} while (t != p);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- cputime_to_timeval(utime, &r->ru_utime);
- cputime_to_timeval(stime, &r->ru_stime);
break;
+
default:
BUG();
}
+
+ cputime_to_timeval(utime, &r->ru_utime);
+ cputime_to_timeval(stime, &r->ru_stime);
}
int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 1ab2370e2ef..17313b99e53 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -82,6 +82,28 @@ cond_syscall(compat_sys_socketcall);
cond_syscall(sys_inotify_init);
cond_syscall(sys_inotify_add_watch);
cond_syscall(sys_inotify_rm_watch);
+cond_syscall(sys_migrate_pages);
+cond_syscall(sys_chown16);
+cond_syscall(sys_fchown16);
+cond_syscall(sys_getegid16);
+cond_syscall(sys_geteuid16);
+cond_syscall(sys_getgid16);
+cond_syscall(sys_getgroups16);
+cond_syscall(sys_getresgid16);
+cond_syscall(sys_getresuid16);
+cond_syscall(sys_getuid16);
+cond_syscall(sys_lchown16);
+cond_syscall(sys_setfsgid16);
+cond_syscall(sys_setfsuid16);
+cond_syscall(sys_setgid16);
+cond_syscall(sys_setgroups16);
+cond_syscall(sys_setregid16);
+cond_syscall(sys_setresgid16);
+cond_syscall(sys_setresuid16);
+cond_syscall(sys_setreuid16);
+cond_syscall(sys_setuid16);
+cond_syscall(sys_vm86old);
+cond_syscall(sys_vm86);
/* arch-specific weak syscall entries */
cond_syscall(sys_pciconfig_read);
@@ -90,3 +112,5 @@ cond_syscall(sys_pciconfig_iobase);
cond_syscall(sys32_ipc);
cond_syscall(sys32_sysctl);
cond_syscall(ppc_rtas);
+cond_syscall(sys_spu_run);
+cond_syscall(sys_spu_create);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 345f4a1d533..62d4d956687 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
+#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
#include <linux/capability.h>
@@ -68,6 +69,8 @@ extern int min_free_kbytes;
extern int printk_ratelimit_jiffies;
extern int printk_ratelimit_burst;
extern int pid_max_min, pid_max_max;
+extern int sysctl_drop_caches;
+extern int percpu_pagelist_fraction;
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
int unknown_nmi_panic;
@@ -78,6 +81,7 @@ extern int proc_unknown_nmi_panic(ctl_table *, int, struct file *,
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
static int minolduid;
+static int min_percpu_pagelist_fract = 8;
static int ngroups_max = NGROUPS_MAX;
@@ -108,7 +112,7 @@ extern int pwrsw_enabled;
extern int unaligned_enabled;
#endif
-#ifdef CONFIG_ARCH_S390
+#ifdef CONFIG_S390
#ifdef CONFIG_MATHEMU
extern int sysctl_ieee_emulation_warnings;
#endif
@@ -542,7 +546,7 @@ static ctl_table kern_table[] = {
.extra1 = &minolduid,
.extra2 = &maxolduid,
},
-#ifdef CONFIG_ARCH_S390
+#ifdef CONFIG_S390
#ifdef CONFIG_MATHEMU
{
.ctl_name = KERN_IEEE_EMULATION_WARNINGS,
@@ -644,7 +648,7 @@ static ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
-#if defined(CONFIG_ARCH_S390)
+#if defined(CONFIG_S390)
{
.ctl_name = KERN_SPIN_RETRY,
.procname = "spin_retry",
@@ -775,6 +779,15 @@ static ctl_table vm_table[] = {
.strategy = &sysctl_intvec,
},
{
+ .ctl_name = VM_DROP_PAGECACHE,
+ .procname = "drop_caches",
+ .data = &sysctl_drop_caches,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = drop_caches_sysctl_handler,
+ .strategy = &sysctl_intvec,
+ },
+ {
.ctl_name = VM_MIN_FREE_KBYTES,
.procname = "min_free_kbytes",
.data = &min_free_kbytes,
@@ -784,6 +797,16 @@ static ctl_table vm_table[] = {
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
+ {
+ .ctl_name = VM_PERCPU_PAGELIST_FRACTION,
+ .procname = "percpu_pagelist_fraction",
+ .data = &percpu_pagelist_fraction,
+ .maxlen = sizeof(percpu_pagelist_fraction),
+ .mode = 0644,
+ .proc_handler = &percpu_pagelist_fraction_sysctl_handler,
+ .strategy = &sysctl_intvec,
+ .extra1 = &min_percpu_pagelist_fract,
+ },
#ifdef CONFIG_MMU
{
.ctl_name = VM_MAX_MAP_COUNT,
diff --git a/kernel/time.c b/kernel/time.c
index b94bfa8c03e..7477b1d2079 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/timex.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/smp_lock.h>
#include <linux/syscalls.h>
@@ -154,6 +155,9 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
static int firsttime = 1;
int error = 0;
+ if (!timespec_valid(tv))
+ return -EINVAL;
+
error = security_settime(tv, tz);
if (error)
return error;
@@ -561,27 +565,107 @@ void getnstimeofday(struct timespec *tv)
EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
-void getnstimestamp(struct timespec *ts)
+/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
+ * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
+ *
+ * [For the Julian calendar (which was used in Russia before 1917,
+ * Britain & colonies before 1752, anywhere else before 1582,
+ * and is still in use by some communities) leave out the
+ * -year/100+year/400 terms, and add 10.]
+ *
+ * This algorithm was first published by Gauss (I think).
+ *
+ * WARNING: this function will overflow on 2106-02-07 06:28:16 on
+ * machines were long is 32-bit! (However, as time_t is signed, we
+ * will already get problems at other places on 2038-01-19 03:14:08)
+ */
+unsigned long
+mktime(const unsigned int year0, const unsigned int mon0,
+ const unsigned int day, const unsigned int hour,
+ const unsigned int min, const unsigned int sec)
{
- unsigned int seq;
- struct timespec wall2mono;
+ unsigned int mon = mon0, year = year0;
- /* synchronize with settimeofday() changes */
- do {
- seq = read_seqbegin(&xtime_lock);
- getnstimeofday(ts);
- wall2mono = wall_to_monotonic;
- } while(unlikely(read_seqretry(&xtime_lock, seq)));
-
- /* adjust to monotonicaly-increasing values */
- ts->tv_sec += wall2mono.tv_sec;
- ts->tv_nsec += wall2mono.tv_nsec;
- while (unlikely(ts->tv_nsec >= NSEC_PER_SEC)) {
- ts->tv_nsec -= NSEC_PER_SEC;
- ts->tv_sec++;
+ /* 1..12 -> 11,12,1..10 */
+ if (0 >= (int) (mon -= 2)) {
+ mon += 12; /* Puts Feb last since it has leap day */
+ year -= 1;
}
+
+ return ((((unsigned long)
+ (year/4 - year/100 + year/400 + 367*mon/12 + day) +
+ year*365 - 719499
+ )*24 + hour /* now have hours */
+ )*60 + min /* now have minutes */
+ )*60 + sec; /* finally seconds */
+}
+
+EXPORT_SYMBOL(mktime);
+
+/**
+ * set_normalized_timespec - set timespec sec and nsec parts and normalize
+ *
+ * @ts: pointer to timespec variable to be set
+ * @sec: seconds to set
+ * @nsec: nanoseconds to set
+ *
+ * Set seconds and nanoseconds field of a timespec variable and
+ * normalize to the timespec storage format
+ *
+ * Note: The tv_nsec part is always in the range of
+ * 0 <= tv_nsec < NSEC_PER_SEC
+ * For negative values only the tv_sec field is negative !
+ */
+void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
+{
+ while (nsec >= NSEC_PER_SEC) {
+ nsec -= NSEC_PER_SEC;
+ ++sec;
+ }
+ while (nsec < 0) {
+ nsec += NSEC_PER_SEC;
+ --sec;
+ }
+ ts->tv_sec = sec;
+ ts->tv_nsec = nsec;
+}
+
+/**
+ * ns_to_timespec - Convert nanoseconds to timespec
+ * @nsec: the nanoseconds value to be converted
+ *
+ * Returns the timespec representation of the nsec parameter.
+ */
+inline struct timespec ns_to_timespec(const nsec_t nsec)
+{
+ struct timespec ts;
+
+ if (nsec)
+ ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC,
+ &ts.tv_nsec);
+ else
+ ts.tv_sec = ts.tv_nsec = 0;
+
+ return ts;
+}
+
+/**
+ * ns_to_timeval - Convert nanoseconds to timeval
+ * @nsec: the nanoseconds value to be converted
+ *
+ * Returns the timeval representation of the nsec parameter.
+ */
+struct timeval ns_to_timeval(const nsec_t nsec)
+{
+ struct timespec ts = ns_to_timespec(nsec);
+ struct timeval tv;
+
+ tv.tv_sec = ts.tv_sec;
+ tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
+
+ return tv;
}
-EXPORT_SYMBOL_GPL(getnstimestamp);
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
diff --git a/kernel/timer.c b/kernel/timer.c
index fd74268d866..4f1cb0ab525 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -33,6 +33,7 @@
#include <linux/posix-timers.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -857,6 +858,7 @@ static void run_timer_softirq(struct softirq_action *h)
{
tvec_base_t *base = &__get_cpu_var(tvec_bases);
+ hrtimer_run_queues();
if (time_after_eq(jiffies, base->timer_jiffies))
__run_timers(base);
}
@@ -1118,62 +1120,6 @@ asmlinkage long sys_gettid(void)
return current->pid;
}
-static long __sched nanosleep_restart(struct restart_block *restart)
-{
- unsigned long expire = restart->arg0, now = jiffies;
- struct timespec __user *rmtp = (struct timespec __user *) restart->arg1;
- long ret;
-
- /* Did it expire while we handled signals? */
- if (!time_after(expire, now))
- return 0;
-
- expire = schedule_timeout_interruptible(expire - now);
-
- ret = 0;
- if (expire) {
- struct timespec t;
- jiffies_to_timespec(expire, &t);
-
- ret = -ERESTART_RESTARTBLOCK;
- if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
- ret = -EFAULT;
- /* The 'restart' block is already filled in */
- }
- return ret;
-}
-
-asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
-{
- struct timespec t;
- unsigned long expire;
- long ret;
-
- if (copy_from_user(&t, rqtp, sizeof(t)))
- return -EFAULT;
-
- if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0))
- return -EINVAL;
-
- expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
- expire = schedule_timeout_interruptible(expire);
-
- ret = 0;
- if (expire) {
- struct restart_block *restart;
- jiffies_to_timespec(expire, &t);
- if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
- return -EFAULT;
-
- restart = &current_thread_info()->restart_block;
- restart->fn = nanosleep_restart;
- restart->arg0 = jiffies + expire;
- restart->arg1 = (unsigned long) rmtp;
- ret = -ERESTART_RESTARTBLOCK;
- }
- return ret;
-}
-
/*
* sys_sysinfo - fill in sysinfo struct
*/
diff --git a/kernel/uid16.c b/kernel/uid16.c
index f669941e8b2..aa25605027c 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -10,6 +10,7 @@
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/prctl.h>
+#include <linux/capability.h>
#include <linux/init.h>
#include <linux/highuid.h>
#include <linux/security.h>
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2bd5aee1c73..82c4fa70595 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -29,7 +29,8 @@
#include <linux/kthread.h>
/*
- * The per-CPU workqueue (if single thread, we always use cpu 0's).
+ * The per-CPU workqueue (if single thread, we always use the first
+ * possible cpu).
*
* The sequence counters are for flush_scheduled_work(). It wants to wait
* until until all currently-scheduled works are completed, but it doesn't
@@ -69,6 +70,8 @@ struct workqueue_struct {
static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);
+static int singlethread_cpu;
+
/* If it's single threaded, it isn't in the list of workqueues. */
static inline int is_single_threaded(struct workqueue_struct *wq)
{
@@ -102,7 +105,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
if (!test_and_set_bit(0, &work->pending)) {
if (unlikely(is_single_threaded(wq)))
- cpu = any_online_cpu(cpu_online_map);
+ cpu = singlethread_cpu;
BUG_ON(!list_empty(&work->entry));
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
ret = 1;
@@ -118,7 +121,7 @@ static void delayed_work_timer_fn(unsigned long __data)
int cpu = smp_processor_id();
if (unlikely(is_single_threaded(wq)))
- cpu = any_online_cpu(cpu_online_map);
+ cpu = singlethread_cpu;
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
@@ -267,7 +270,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
if (is_single_threaded(wq)) {
/* Always use first cpu's area. */
- flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map)));
+ flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
} else {
int cpu;
@@ -315,12 +318,17 @@ struct workqueue_struct *__create_workqueue(const char *name,
return NULL;
wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
+ if (!wq->cpu_wq) {
+ kfree(wq);
+ return NULL;
+ }
+
wq->name = name;
/* We don't need the distraction of CPUs appearing and vanishing. */
lock_cpu_hotplug();
if (singlethread) {
INIT_LIST_HEAD(&wq->list);
- p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map));
+ p = create_workqueue_thread(wq, singlethread_cpu);
if (!p)
destroy = 1;
else
@@ -374,7 +382,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
/* We don't need the distraction of CPUs appearing and vanishing. */
lock_cpu_hotplug();
if (is_single_threaded(wq))
- cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map));
+ cleanup_workqueue_thread(wq, singlethread_cpu);
else {
for_each_online_cpu(cpu)
cleanup_workqueue_thread(wq, cpu);
@@ -419,6 +427,25 @@ int schedule_delayed_work_on(int cpu,
return ret;
}
+int schedule_on_each_cpu(void (*func) (void *info), void *info)
+{
+ int cpu;
+ struct work_struct *work;
+
+ work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL);
+
+ if (!work)
+ return -ENOMEM;
+ for_each_online_cpu(cpu) {
+ INIT_WORK(work + cpu, func, info);
+ __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
+ work + cpu);
+ }
+ flush_workqueue(keventd_wq);
+ kfree(work);
+ return 0;
+}
+
void flush_scheduled_work(void)
{
flush_workqueue(keventd_wq);
@@ -543,6 +570,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
void init_workqueues(void)
{
+ singlethread_cpu = first_cpu(cpu_possible_map);
hotcpu_notifier(workqueue_cpu_callback, 0);
keventd_wq = create_workqueue("events");
BUG_ON(!keventd_wq);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 156822e3cc7..a609235a517 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -9,15 +9,9 @@ config PRINTK_TIME
in kernel startup.
-config DEBUG_KERNEL
- bool "Kernel debugging"
- help
- Say Y here if you are developing drivers or trying to debug and
- identify kernel problems.
-
config MAGIC_SYSRQ
bool "Magic SysRq key"
- depends on DEBUG_KERNEL && !UML
+ depends on !UML
help
If you say Y here, you will have some control over the system even
if the system crashes for example during kernel debugging (e.g., you
@@ -29,10 +23,16 @@ config MAGIC_SYSRQ
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
+config DEBUG_KERNEL
+ bool "Kernel debugging"
+ help
+ Say Y here if you are developing drivers or trying to debug and
+ identify kernel problems.
+
config LOG_BUF_SHIFT
int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL
range 12 21
- default 17 if ARCH_S390
+ default 17 if S390
default 16 if X86_NUMAQ || IA64
default 15 if SMP
default 14
@@ -79,7 +79,7 @@ config SCHEDSTATS
config DEBUG_SLAB
bool "Debug memory allocations"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && SLAB
help
Say Y here to have the kernel do limited verification on memory
allocation as well as poisoning memory on free to catch use of freed
@@ -95,6 +95,14 @@ config DEBUG_PREEMPT
if kernel code uses it in a preemption-unsafe way. Also, the kernel
will detect preemption count underflows.
+config DEBUG_MUTEXES
+ bool "Mutex debugging, deadlock detection"
+ default y
+ depends on DEBUG_KERNEL
+ help
+ This allows mutex semantics violations and mutex related deadlocks
+ (lockups) to be detected and reported automatically.
+
config DEBUG_SPINLOCK
bool "Spinlock debugging"
depends on DEBUG_KERNEL
@@ -172,7 +180,8 @@ config DEBUG_VM
bool "Debug VM"
depends on DEBUG_KERNEL
help
- Enable this to debug the virtual-memory system.
+ Enable this to turn on extended checks in the virtual-memory system
+ that may impact performance.
If unsure, say N.
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 23d3b1147fe..48e708381d4 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -519,7 +519,7 @@ EXPORT_SYMBOL(bitmap_parselist);
*
* Map the bit at position @pos in @buf (of length @bits) to the
* ordinal of which set bit it is. If it is not set or if @pos
- * is not a valid bit position, map to zero (0).
+ * is not a valid bit position, map to -1.
*
* If for example, just bits 4 through 7 are set in @buf, then @pos
* values 4 through 7 will get mapped to 0 through 3, respectively,
@@ -531,18 +531,19 @@ EXPORT_SYMBOL(bitmap_parselist);
*/
static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
{
- int ord = 0;
+ int i, ord;
- if (pos >= 0 && pos < bits) {
- int i;
+ if (pos < 0 || pos >= bits || !test_bit(pos, buf))
+ return -1;
- for (i = find_first_bit(buf, bits);
- i < pos;
- i = find_next_bit(buf, bits, i + 1))
- ord++;
- if (i > pos)
- ord = 0;
+ i = find_first_bit(buf, bits);
+ ord = 0;
+ while (i < pos) {
+ i = find_next_bit(buf, bits, i + 1);
+ ord++;
}
+ BUG_ON(i != pos);
+
return ord;
}
@@ -553,11 +554,12 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits)
* @bits: number of valid bit positions in @buf
*
* Map the ordinal offset of bit @ord in @buf to its position in @buf.
- * If @ord is not the ordinal offset of a set bit in @buf, map to zero (0).
+ * Value of @ord should be in range 0 <= @ord < weight(buf), else
+ * results are undefined.
*
* If for example, just bits 4 through 7 are set in @buf, then @ord
* values 0 through 3 will get mapped to 4 through 7, respectively,
- * and all other @ord valuds will get mapped to 0. When @ord value 3
+ * and all other @ord values return undefined values. When @ord value 3
* gets mapped to (returns) @pos value 7 in this example, that means
* that the 3rd set bit (starting with 0th) is at position 7 in @buf.
*
@@ -583,8 +585,8 @@ static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
/**
* bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
- * @src: subset to be remapped
* @dst: remapped result
+ * @src: subset to be remapped
* @old: defines domain of map
* @new: defines range of map
* @bits: number of bits in each of these bitmaps
@@ -596,49 +598,42 @@ static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits)
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
- * If either of the @old and @new bitmaps are empty, or if@src and @dst
- * point to the same location, then this routine does nothing.
+ * If either of the @old and @new bitmaps are empty, or if @src and
+ * @dst point to the same location, then this routine copies @src
+ * to @dst.
*
- * The positions of unset bits in @old are mapped to the position of
- * the first set bit in @new.
+ * The positions of unset bits in @old are mapped to themselves
+ * (the identify map).
*
* Apply the above specified mapping to @src, placing the result in
* @dst, clearing any bits previously set in @dst.
*
- * The resulting value of @dst will have either the same weight as
- * @src, or less weight in the general case that the mapping wasn't
- * injective due to the weight of @new being less than that of @old.
- * The resulting value of @dst will never have greater weight than
- * that of @src, except perhaps in the case that one of the above
- * conditions was not met and this routine just returned.
- *
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
- * bit positions to 12 (the first set bit in @new. So if say @src
- * comes into this routine with bits 1, 5 and 7 set, then @dst should
- * leave with bits 12, 13 and 15 set.
+ * bit positions unchanged. So if say @src comes into this routine
+ * with bits 1, 5 and 7 set, then @dst should leave with bits 1,
+ * 13 and 15 set.
*/
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new,
int bits)
{
- int s;
+ int oldbit, w;
- if (bitmap_weight(old, bits) == 0)
- return;
- if (bitmap_weight(new, bits) == 0)
- return;
if (dst == src) /* following doesn't handle inplace remaps */
return;
-
bitmap_zero(dst, bits);
- for (s = find_first_bit(src, bits);
- s < bits;
- s = find_next_bit(src, bits, s + 1)) {
- int x = bitmap_pos_to_ord(old, s, bits);
- int y = bitmap_ord_to_pos(new, x, bits);
- set_bit(y, dst);
+
+ w = bitmap_weight(new, bits);
+ for (oldbit = find_first_bit(src, bits);
+ oldbit < bits;
+ oldbit = find_next_bit(src, bits, oldbit + 1)) {
+ int n = bitmap_pos_to_ord(old, oldbit, bits);
+ if (n < 0 || w == 0)
+ set_bit(oldbit, dst); /* identity map */
+ else
+ set_bit(bitmap_ord_to_pos(new, n % w, bits), dst);
}
}
EXPORT_SYMBOL(bitmap_remap);
@@ -657,8 +652,8 @@ EXPORT_SYMBOL(bitmap_remap);
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
- * The positions of unset bits in @old are mapped to the position of
- * the first set bit in @new.
+ * The positions of unset bits in @old are mapped to themselves
+ * (the identify map).
*
* Apply the above specified mapping to bit position @oldbit, returning
* the new bit position.
@@ -666,14 +661,18 @@ EXPORT_SYMBOL(bitmap_remap);
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
- * bit positions to 12 (the first set bit in @new. So if say @oldbit
- * is 5, then this routine returns 13.
+ * bit positions unchanged. So if say @oldbit is 5, then this routine
+ * returns 13.
*/
int bitmap_bitremap(int oldbit, const unsigned long *old,
const unsigned long *new, int bits)
{
- int x = bitmap_pos_to_ord(old, oldbit, bits);
- return bitmap_ord_to_pos(new, x, bits);
+ int w = bitmap_weight(new, bits);
+ int n = bitmap_pos_to_ord(old, oldbit, bits);
+ if (n < 0 || w == 0)
+ return oldbit;
+ else
+ return bitmap_ord_to_pos(new, n % w, bits);
}
EXPORT_SYMBOL(bitmap_bitremap);
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 305a9663aee..a65c3145554 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -1,47 +1,11 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
-#include <asm/system.h>
-#ifdef __HAVE_ARCH_CMPXCHG
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
- * This implementation can be used on any architecture that
- * has a cmpxchg, and where atomic->value is an int holding
- * the value of the atomic (i.e. the high bits aren't used
- * for a lock or anything like that).
- */
-int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
-{
- int counter;
- int newcount;
-
- for (;;) {
- counter = atomic_read(atomic);
- newcount = counter - 1;
- if (!newcount)
- break; /* do it the slow way */
-
- newcount = cmpxchg(&atomic->counter, counter, newcount);
- if (newcount == counter)
- return 0;
- }
-
- spin_lock(lock);
- if (atomic_dec_and_test(atomic))
- return 1;
- spin_unlock(lock);
- return 0;
-}
-#else
-/*
- * This is an architecture-neutral, but slow,
- * implementation of the notion of "decrement
- * a reference count, and return locked if it
- * decremented to zero".
- *
* NOTE NOTE NOTE! This is _not_ equivalent to
*
* if (atomic_dec_and_test(&atomic)) {
@@ -52,21 +16,20 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
*
* because the spin-lock and the decrement must be
* "atomic".
- *
- * This slow version gets the spinlock unconditionally,
- * and releases it if it isn't needed. Architectures
- * are encouraged to come up with better approaches,
- * this is trivially done efficiently using a load-locked
- * store-conditional approach, for example.
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
+#ifdef CONFIG_SMP
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+#endif
+ /* Otherwise do it the slow way */
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
-#endif
EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index d08302d2a42..c05b4b19cf6 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -10,6 +10,7 @@
*/
#include <linux/bitops.h>
+#include <linux/module.h>
int find_next_bit(const unsigned long *addr, int size, int offset)
{
@@ -53,3 +54,5 @@ int find_next_bit(const unsigned long *addr, int size, int offset)
return offset;
}
+
+EXPORT_SYMBOL(find_next_bit);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 88511c3805a..c0bd4a91480 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -137,18 +137,31 @@ out:
static inline void tag_set(struct radix_tree_node *node, int tag, int offset)
{
- if (!test_bit(offset, &node->tags[tag][0]))
- __set_bit(offset, &node->tags[tag][0]);
+ __set_bit(offset, node->tags[tag]);
}
static inline void tag_clear(struct radix_tree_node *node, int tag, int offset)
{
- __clear_bit(offset, &node->tags[tag][0]);
+ __clear_bit(offset, node->tags[tag]);
}
static inline int tag_get(struct radix_tree_node *node, int tag, int offset)
{
- return test_bit(offset, &node->tags[tag][0]);
+ return test_bit(offset, node->tags[tag]);
+}
+
+/*
+ * Returns 1 if any slot in the node has this tag set.
+ * Otherwise returns 0.
+ */
+static inline int any_tag_set(struct radix_tree_node *node, int tag)
+{
+ int idx;
+ for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
+ if (node->tags[tag][idx])
+ return 1;
+ }
+ return 0;
}
/*
@@ -185,15 +198,9 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
* into the newly-pushed top-level node(s)
*/
for (tag = 0; tag < RADIX_TREE_TAGS; tag++) {
- int idx;
-
tags[tag] = 0;
- for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
- if (root->rnode->tags[tag][idx]) {
- tags[tag] = 1;
- break;
- }
- }
+ if (any_tag_set(root->rnode, tag))
+ tags[tag] = 1;
}
do {
@@ -246,7 +253,7 @@ int radix_tree_insert(struct radix_tree_root *root,
shift = (height-1) * RADIX_TREE_MAP_SHIFT;
offset = 0; /* uninitialised var warning */
- while (height > 0) {
+ do {
if (slot == NULL) {
/* Have to add a child node. */
if (!(slot = radix_tree_node_alloc(root)))
@@ -264,18 +271,16 @@ int radix_tree_insert(struct radix_tree_root *root,
slot = node->slots[offset];
shift -= RADIX_TREE_MAP_SHIFT;
height--;
- }
+ } while (height > 0);
if (slot != NULL)
return -EEXIST;
- if (node) {
- node->count++;
- node->slots[offset] = item;
- BUG_ON(tag_get(node, 0, offset));
- BUG_ON(tag_get(node, 1, offset));
- } else
- root->rnode = item;
+ BUG_ON(!node);
+ node->count++;
+ node->slots[offset] = item;
+ BUG_ON(tag_get(node, 0, offset));
+ BUG_ON(tag_get(node, 1, offset));
return 0;
}
@@ -367,7 +372,8 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
int offset;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
- tag_set(slot, tag, offset);
+ if (!tag_get(slot, tag, offset))
+ tag_set(slot, tag, offset);
slot = slot->slots[offset];
BUG_ON(slot == NULL);
shift -= RADIX_TREE_MAP_SHIFT;
@@ -427,13 +433,11 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
goto out;
do {
- int idx;
-
+ if (!tag_get(pathp->node, tag, pathp->offset))
+ goto out;
tag_clear(pathp->node, tag, pathp->offset);
- for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
- if (pathp->node->tags[tag][idx])
- goto out;
- }
+ if (any_tag_set(pathp->node, tag))
+ goto out;
pathp--;
} while (pathp->node);
out:
@@ -674,6 +678,29 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
/**
+ * radix_tree_shrink - shrink height of a radix tree to minimal
+ * @root radix tree root
+ */
+static inline void radix_tree_shrink(struct radix_tree_root *root)
+{
+ /* try to shrink tree height */
+ while (root->height > 1 &&
+ root->rnode->count == 1 &&
+ root->rnode->slots[0]) {
+ struct radix_tree_node *to_free = root->rnode;
+
+ root->rnode = to_free->slots[0];
+ root->height--;
+ /* must only free zeroed nodes into the slab */
+ tag_clear(to_free, 0, 0);
+ tag_clear(to_free, 1, 0);
+ to_free->slots[0] = NULL;
+ to_free->count = 0;
+ radix_tree_node_free(to_free);
+ }
+}
+
+/**
* radix_tree_delete - delete an item from a radix tree
* @root: radix tree root
* @index: index key
@@ -691,6 +718,8 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
void *ret = NULL;
char tags[RADIX_TREE_TAGS];
int nr_cleared_tags;
+ int tag;
+ int offset;
height = root->height;
if (index > radix_tree_maxindex(height))
@@ -701,16 +730,14 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
slot = root->rnode;
for ( ; height > 0; height--) {
- int offset;
-
if (slot == NULL)
goto out;
+ pathp++;
offset = (index >> shift) & RADIX_TREE_MAP_MASK;
- pathp[1].offset = offset;
- pathp[1].node = slot;
+ pathp->offset = offset;
+ pathp->node = slot;
slot = slot->slots[offset];
- pathp++;
shift -= RADIX_TREE_MAP_SHIFT;
}
@@ -723,35 +750,39 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
/*
* Clear all tags associated with the just-deleted item
*/
- memset(tags, 0, sizeof(tags));
- do {
- int tag;
+ nr_cleared_tags = 0;
+ for (tag = 0; tag < RADIX_TREE_TAGS; tag++) {
+ if (tag_get(pathp->node, tag, pathp->offset)) {
+ tag_clear(pathp->node, tag, pathp->offset);
+ tags[tag] = 0;
+ nr_cleared_tags++;
+ } else
+ tags[tag] = 1;
+ }
- nr_cleared_tags = RADIX_TREE_TAGS;
+ for (pathp--; nr_cleared_tags && pathp->node; pathp--) {
for (tag = 0; tag < RADIX_TREE_TAGS; tag++) {
- int idx;
-
if (tags[tag])
continue;
tag_clear(pathp->node, tag, pathp->offset);
-
- for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
- if (pathp->node->tags[tag][idx]) {
- tags[tag] = 1;
- nr_cleared_tags--;
- break;
- }
+ if (any_tag_set(pathp->node, tag)) {
+ tags[tag] = 1;
+ nr_cleared_tags--;
}
}
- pathp--;
- } while (pathp->node && nr_cleared_tags);
+ }
/* Now free the nodes we do not need anymore */
for (pathp = orig_pathp; pathp->node; pathp--) {
pathp->node->slots[pathp->offset] = NULL;
- if (--pathp->node->count)
+ pathp->node->count--;
+
+ if (pathp->node->count) {
+ if (pathp->node == root->rnode)
+ radix_tree_shrink(root);
goto out;
+ }
/* Node with zero slots in use so free it */
radix_tree_node_free(pathp->node);
@@ -770,15 +801,11 @@ EXPORT_SYMBOL(radix_tree_delete);
*/
int radix_tree_tagged(struct radix_tree_root *root, int tag)
{
- int idx;
-
- if (!root->rnode)
- return 0;
- for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
- if (root->rnode->tags[tag][idx])
- return 1;
- }
- return 0;
+ struct radix_tree_node *rnode;
+ rnode = root->rnode;
+ if (!rnode)
+ return 0;
+ return any_tag_set(rnode, tag);
}
EXPORT_SYMBOL(radix_tree_tagged);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index dcd4be9bd4e..c8bb8cc899d 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -19,10 +19,11 @@ static void spin_bug(spinlock_t *lock, const char *msg)
if (xchg(&print_once, 0)) {
if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
owner = lock->owner;
- printk("BUG: spinlock %s on CPU#%d, %s/%d\n",
+ printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, current->pid);
- printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n",
+ printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
+ ".owner_cpu: %d\n",
lock, lock->magic,
owner ? owner->comm : "<none>",
owner ? owner->pid : -1,
@@ -78,7 +79,8 @@ static void __spin_lock_debug(spinlock_t *lock)
/* lockup suspected: */
if (print_once) {
print_once = 0;
- printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n",
+ printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
+ "%s/%d, %p\n",
raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
@@ -120,8 +122,8 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
static long print_once = 1;
if (xchg(&print_once, 0)) {
- printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg,
- raw_smp_processor_id(), current->comm,
+ printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
+ msg, raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
#ifdef CONFIG_SMP
@@ -149,7 +151,8 @@ static void __read_lock_debug(rwlock_t *lock)
/* lockup suspected: */
if (print_once) {
print_once = 0;
- printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n",
+ printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
+ "%s/%d, %p\n",
raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
@@ -221,7 +224,8 @@ static void __write_lock_debug(rwlock_t *lock)
/* lockup suspected: */
if (print_once) {
print_once = 0;
- printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n",
+ printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
+ "%s/%d, %p\n",
raw_smp_processor_id(), current->comm,
current->pid, lock);
dump_stack();
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 1ff8dcebf7c..0af497b6b9a 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -142,8 +142,7 @@ swiotlb_init_with_default_size (size_t default_size)
/*
* Get IO TLB memory from the low pages
*/
- io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs *
- (1 << IO_TLB_SHIFT), 0x100000000);
+ io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
@@ -464,7 +463,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
*/
dma_addr_t handle;
handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
- if (dma_mapping_error(handle))
+ if (swiotlb_dma_mapping_error(handle))
return NULL;
ret = phys_to_virt(handle);
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c
index ad9a1bf4fc6..1653dd9bb01 100644
--- a/lib/zlib_deflate/deflate.c
+++ b/lib/zlib_deflate/deflate.c
@@ -255,6 +255,7 @@ int zlib_deflateInit2_(
}
/* ========================================================================= */
+#if 0
int zlib_deflateSetDictionary(
z_streamp strm,
const Byte *dictionary,
@@ -297,6 +298,7 @@ int zlib_deflateSetDictionary(
if (hash_head) hash_head = 0; /* to make compiler happy */
return Z_OK;
}
+#endif /* 0 */
/* ========================================================================= */
int zlib_deflateReset(
@@ -330,6 +332,7 @@ int zlib_deflateReset(
}
/* ========================================================================= */
+#if 0
int zlib_deflateParams(
z_streamp strm,
int level,
@@ -365,6 +368,7 @@ int zlib_deflateParams(
s->strategy = strategy;
return err;
}
+#endif /* 0 */
/* =========================================================================
* Put a short in the pending buffer. The 16-bit value is put in MSB order.
@@ -572,6 +576,7 @@ int zlib_deflateEnd(
/* =========================================================================
* Copy the source state to the destination state.
*/
+#if 0
int zlib_deflateCopy (
z_streamp dest,
z_streamp source
@@ -624,6 +629,7 @@ int zlib_deflateCopy (
return Z_OK;
#endif
}
+#endif /* 0 */
/* ===========================================================================
* Read a new buffer from the current input stream, update the adler32
diff --git a/lib/zlib_deflate/deflate_syms.c b/lib/zlib_deflate/deflate_syms.c
index 5985b28c8e3..767b573d1ef 100644
--- a/lib/zlib_deflate/deflate_syms.c
+++ b/lib/zlib_deflate/deflate_syms.c
@@ -16,6 +16,4 @@ EXPORT_SYMBOL(zlib_deflateInit_);
EXPORT_SYMBOL(zlib_deflateInit2_);
EXPORT_SYMBOL(zlib_deflateEnd);
EXPORT_SYMBOL(zlib_deflateReset);
-EXPORT_SYMBOL(zlib_deflateCopy);
-EXPORT_SYMBOL(zlib_deflateParams);
MODULE_LICENSE("GPL");
diff --git a/lib/zlib_inflate/infblock.c b/lib/zlib_inflate/infblock.c
index 50f21ca4ef7..c16cdeff51a 100644
--- a/lib/zlib_inflate/infblock.c
+++ b/lib/zlib_inflate/infblock.c
@@ -338,6 +338,7 @@ int zlib_inflate_blocks_free(
}
+#if 0
void zlib_inflate_set_dictionary(
inflate_blocks_statef *s,
const Byte *d,
@@ -347,15 +348,18 @@ void zlib_inflate_set_dictionary(
memcpy(s->window, d, n);
s->read = s->write = s->window + n;
}
+#endif /* 0 */
/* Returns true if inflate is currently at the end of a block generated
* by Z_SYNC_FLUSH or Z_FULL_FLUSH.
* IN assertion: s != NULL
*/
+#if 0
int zlib_inflate_blocks_sync_point(
inflate_blocks_statef *s
)
{
return s->mode == LENS;
}
+#endif /* 0 */
diff --git a/lib/zlib_inflate/infblock.h b/lib/zlib_inflate/infblock.h
index f5221ddf605..ceee60b5107 100644
--- a/lib/zlib_inflate/infblock.h
+++ b/lib/zlib_inflate/infblock.h
@@ -33,12 +33,16 @@ extern int zlib_inflate_blocks_free (
inflate_blocks_statef *,
z_streamp);
+#if 0
extern void zlib_inflate_set_dictionary (
inflate_blocks_statef *s,
const Byte *d, /* dictionary */
uInt n); /* dictionary length */
+#endif /* 0 */
+#if 0
extern int zlib_inflate_blocks_sync_point (
inflate_blocks_statef *s);
+#endif /* 0 */
#endif /* _INFBLOCK_H */
diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c
index aa1b0818912..ef49738f57e 100644
--- a/lib/zlib_inflate/inflate_syms.c
+++ b/lib/zlib_inflate/inflate_syms.c
@@ -15,8 +15,6 @@ EXPORT_SYMBOL(zlib_inflate);
EXPORT_SYMBOL(zlib_inflateInit_);
EXPORT_SYMBOL(zlib_inflateInit2_);
EXPORT_SYMBOL(zlib_inflateEnd);
-EXPORT_SYMBOL(zlib_inflateSync);
EXPORT_SYMBOL(zlib_inflateReset);
-EXPORT_SYMBOL(zlib_inflateSyncPoint);
EXPORT_SYMBOL(zlib_inflateIncomp);
MODULE_LICENSE("GPL");
diff --git a/lib/zlib_inflate/inflate_sync.c b/lib/zlib_inflate/inflate_sync.c
index e07bdb21f55..61411ff89d6 100644
--- a/lib/zlib_inflate/inflate_sync.c
+++ b/lib/zlib_inflate/inflate_sync.c
@@ -7,6 +7,7 @@
#include "infblock.h"
#include "infutil.h"
+#if 0
int zlib_inflateSync(
z_streamp z
)
@@ -57,6 +58,7 @@ int zlib_inflateSync(
z->state->mode = BLOCKS;
return Z_OK;
}
+#endif /* 0 */
/* Returns true if inflate is currently at the end of a block generated
@@ -66,6 +68,7 @@ int zlib_inflateSync(
* decompressing, PPP checks that at the end of input packet, inflate is
* waiting for these length bytes.
*/
+#if 0
int zlib_inflateSyncPoint(
z_streamp z
)
@@ -74,6 +77,7 @@ int zlib_inflateSyncPoint(
return Z_STREAM_ERROR;
return zlib_inflate_blocks_sync_point(z->state->blocks);
}
+#endif /* 0 */
/*
* This subroutine adds the data at next_in/avail_in to the output history
diff --git a/mm/Kconfig b/mm/Kconfig
index 21eb51d4da8..a9cb80ae640 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -11,7 +11,7 @@ choice
config FLATMEM_MANUAL
bool "Flat Memory"
- depends on !ARCH_DISCONTIGMEM_ENABLE || ARCH_FLATMEM_ENABLE
+ depends on !(ARCH_DISCONTIGMEM_ENABLE || ARCH_SPARSEMEM_ENABLE) || ARCH_FLATMEM_ENABLE
help
This option allows you to change some of the ways that
Linux manages its memory internally. Most users will
@@ -132,3 +132,10 @@ config SPLIT_PTLOCK_CPUS
default "4096" if ARM && !CPU_CACHE_VIPT
default "4096" if PARISC && !PA20
default "4"
+
+#
+# support for page migration
+#
+config MIGRATION
+ def_bool y if NUMA || SPARSEMEM || DISCONTIGMEM
+ depends on SWAP
diff --git a/mm/Makefile b/mm/Makefile
index 2fa6d2ca9f2..9aa03fa1dcc 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -9,8 +9,8 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
page_alloc.o page-writeback.o pdflush.o \
- readahead.o slab.o swap.o truncate.o vmscan.o \
- prio_tree.o $(mmu-y)
+ readahead.o swap.o truncate.o vmscan.o \
+ prio_tree.o util.o $(mmu-y)
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
@@ -18,5 +18,7 @@ obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SHMEM) += shmem.o
obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o
+obj-$(CONFIG_SLOB) += slob.o
+obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 16b9465eb4e..35c32290f71 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -296,20 +296,12 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
unsigned long v = ~map[i / BITS_PER_LONG];
if (gofast && v == ~0UL) {
- int j, order;
+ int order;
page = pfn_to_page(pfn);
count += BITS_PER_LONG;
- __ClearPageReserved(page);
order = ffs(BITS_PER_LONG) - 1;
- set_page_refs(page, order);
- for (j = 1; j < BITS_PER_LONG; j++) {
- if (j + 16 < BITS_PER_LONG)
- prefetchw(page + j + 16);
- __ClearPageReserved(page + j);
- set_page_count(page + j, 0);
- }
- __free_pages(page, order);
+ __free_pages_bootmem(page, order);
i += BITS_PER_LONG;
page += BITS_PER_LONG;
} else if (v) {
@@ -319,9 +311,7 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
for (m = 1; m && i < idx; m<<=1, page++, i++) {
if (v & m) {
count++;
- __ClearPageReserved(page);
- set_page_refs(page, 0);
- __free_page(page);
+ __free_pages_bootmem(page, 0);
}
}
} else {
@@ -339,9 +329,7 @@ static unsigned long __init free_all_bootmem_core(pg_data_t *pgdat)
count = 0;
for (i = 0; i < ((bdata->node_low_pfn-(bdata->node_boot_start >> PAGE_SHIFT))/8 + PAGE_SIZE-1)/PAGE_SIZE; i++,page++) {
count++;
- __ClearPageReserved(page);
- set_page_count(page, 1);
- __free_page(page);
+ __free_pages_bootmem(page, 0);
}
total += count;
bdata->node_bootmem_map = NULL;
@@ -393,15 +381,14 @@ unsigned long __init free_all_bootmem (void)
return(free_all_bootmem_core(NODE_DATA(0)));
}
-void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal,
- unsigned long limit)
+void * __init __alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal)
{
pg_data_t *pgdat = pgdat_list;
void *ptr;
for_each_pgdat(pgdat)
if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
- align, goal, limit)))
+ align, goal, 0)))
return(ptr);
/*
@@ -413,15 +400,40 @@ void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, un
}
-void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align,
- unsigned long goal, unsigned long limit)
+void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align,
+ unsigned long goal)
{
void *ptr;
- ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, limit);
+ ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
if (ptr)
return (ptr);
- return __alloc_bootmem_limit(size, align, goal, limit);
+ return __alloc_bootmem(size, align, goal);
+}
+
+#define LOW32LIMIT 0xffffffff
+
+void * __init __alloc_bootmem_low(unsigned long size, unsigned long align, unsigned long goal)
+{
+ pg_data_t *pgdat = pgdat_list;
+ void *ptr;
+
+ for_each_pgdat(pgdat)
+ if ((ptr = __alloc_bootmem_core(pgdat->bdata, size,
+ align, goal, LOW32LIMIT)))
+ return(ptr);
+
+ /*
+ * Whoops, we cannot satisfy the allocation request.
+ */
+ printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of low memory");
+ return NULL;
}
+void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ return __alloc_bootmem_core(pgdat->bdata, size, align, goal, LOW32LIMIT);
+}
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 5f19e87bc5a..d257c89e770 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -37,6 +37,11 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
if (!file)
return -EBADF;
+ if (S_ISFIFO(file->f_dentry->d_inode->i_mode)) {
+ ret = -ESPIPE;
+ goto out;
+ }
+
mapping = file->f_mapping;
if (!mapping || len < 0) {
ret = -EINVAL;
diff --git a/mm/filemap.c b/mm/filemap.c
index 33a28bfde15..a965b6b35f2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/aio.h>
+#include <linux/capability.h>
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/swap.h>
@@ -61,7 +62,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
*
- * ->i_sem
+ * ->i_mutex
* ->i_mmap_lock (truncate->unmap_mapping_range)
*
* ->mmap_sem
@@ -73,9 +74,9 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
* ->lock_page (access_process_vm)
*
* ->mmap_sem
- * ->i_sem (msync)
+ * ->i_mutex (msync)
*
- * ->i_sem
+ * ->i_mutex
* ->i_alloc_sem (various)
*
* ->inode_lock
@@ -276,11 +277,11 @@ static int wait_on_page_writeback_range(struct address_space *mapping,
* integrity" operation. It waits upon in-flight writeout before starting and
* waiting upon new writeout. If there was an IO error, return it.
*
- * We need to re-take i_sem during the generic_osync_inode list walk because
+ * We need to re-take i_mutex during the generic_osync_inode list walk because
* it is otherwise livelockable.
*/
int sync_page_range(struct inode *inode, struct address_space *mapping,
- loff_t pos, size_t count)
+ loff_t pos, loff_t count)
{
pgoff_t start = pos >> PAGE_CACHE_SHIFT;
pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
@@ -290,9 +291,9 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
return 0;
ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
if (ret == 0) {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
if (ret == 0)
ret = wait_on_page_writeback_range(mapping, start, end);
@@ -301,13 +302,12 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
EXPORT_SYMBOL(sync_page_range);
/*
- * Note: Holding i_sem across sync_page_range_nolock is not a good idea
+ * Note: Holding i_mutex across sync_page_range_nolock is not a good idea
* as it forces O_SYNC writers to different parts of the same file
* to be serialised right until io completion.
*/
-static int sync_page_range_nolock(struct inode *inode,
- struct address_space *mapping,
- loff_t pos, size_t count)
+int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
+ loff_t pos, loff_t count)
{
pgoff_t start = pos >> PAGE_CACHE_SHIFT;
pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
@@ -322,6 +322,7 @@ static int sync_page_range_nolock(struct inode *inode,
ret = wait_on_page_writeback_range(mapping, start, end);
return ret;
}
+EXPORT_SYMBOL(sync_page_range_nolock);
/**
* filemap_fdatawait - walk the list of under-writeback pages of the given
@@ -343,30 +344,44 @@ EXPORT_SYMBOL(filemap_fdatawait);
int filemap_write_and_wait(struct address_space *mapping)
{
- int retval = 0;
+ int err = 0;
if (mapping->nrpages) {
- retval = filemap_fdatawrite(mapping);
- if (retval == 0)
- retval = filemap_fdatawait(mapping);
+ err = filemap_fdatawrite(mapping);
+ /*
+ * Even if the above returned error, the pages may be
+ * written partially (e.g. -ENOSPC), so we wait for it.
+ * But the -EIO is special case, it may indicate the worst
+ * thing (e.g. bug) happened, so we avoid waiting for it.
+ */
+ if (err != -EIO) {
+ int err2 = filemap_fdatawait(mapping);
+ if (!err)
+ err = err2;
+ }
}
- return retval;
+ return err;
}
+EXPORT_SYMBOL(filemap_write_and_wait);
int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend)
{
- int retval = 0;
+ int err = 0;
if (mapping->nrpages) {
- retval = __filemap_fdatawrite_range(mapping, lstart, lend,
- WB_SYNC_ALL);
- if (retval == 0)
- retval = wait_on_page_writeback_range(mapping,
- lstart >> PAGE_CACHE_SHIFT,
- lend >> PAGE_CACHE_SHIFT);
+ err = __filemap_fdatawrite_range(mapping, lstart, lend,
+ WB_SYNC_ALL);
+ /* See comment of filemap_write_and_wait() */
+ if (err != -EIO) {
+ int err2 = wait_on_page_writeback_range(mapping,
+ lstart >> PAGE_CACHE_SHIFT,
+ lend >> PAGE_CACHE_SHIFT);
+ if (!err)
+ err = err2;
+ }
}
- return retval;
+ return err;
}
/*
@@ -555,11 +570,12 @@ repeat:
page_cache_get(page);
if (TestSetPageLocked(page)) {
read_unlock_irq(&mapping->tree_lock);
- lock_page(page);
+ __lock_page(page);
read_lock_irq(&mapping->tree_lock);
/* Has the page been truncated while we slept? */
- if (page->mapping != mapping || page->index != offset) {
+ if (unlikely(page->mapping != mapping ||
+ page->index != offset)) {
unlock_page(page);
page_cache_release(page);
goto repeat;
@@ -831,8 +847,13 @@ readpage:
/* Start the actual read. The read will unlock the page. */
error = mapping->a_ops->readpage(filp, page);
- if (unlikely(error))
+ if (unlikely(error)) {
+ if (error == AOP_TRUNCATED_PAGE) {
+ page_cache_release(page);
+ goto find_page;
+ }
goto readpage_error;
+ }
if (!PageUptodate(page)) {
lock_page(page);
@@ -1152,26 +1173,24 @@ static int fastcall page_cache_read(struct file * file, unsigned long offset)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
- int error;
+ int ret;
- page = page_cache_alloc_cold(mapping);
- if (!page)
- return -ENOMEM;
+ do {
+ page = page_cache_alloc_cold(mapping);
+ if (!page)
+ return -ENOMEM;
+
+ ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
+ if (ret == 0)
+ ret = mapping->a_ops->readpage(file, page);
+ else if (ret == -EEXIST)
+ ret = 0; /* losing race to add is OK */
- error = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
- if (!error) {
- error = mapping->a_ops->readpage(file, page);
page_cache_release(page);
- return error;
- }
- /*
- * We arrive here in the unlikely event that someone
- * raced with us and added our page to the cache first
- * or we are out of memory for radix-tree nodes.
- */
- page_cache_release(page);
- return error == -EEXIST ? 0 : error;
+ } while (ret == AOP_TRUNCATED_PAGE);
+
+ return ret;
}
#define MMAP_LOTSAMISS (100)
@@ -1331,10 +1350,14 @@ page_not_uptodate:
goto success;
}
- if (!mapping->a_ops->readpage(file, page)) {
+ error = mapping->a_ops->readpage(file, page);
+ if (!error) {
wait_on_page_locked(page);
if (PageUptodate(page))
goto success;
+ } else if (error == AOP_TRUNCATED_PAGE) {
+ page_cache_release(page);
+ goto retry_find;
}
/*
@@ -1358,10 +1381,14 @@ page_not_uptodate:
goto success;
}
ClearPageError(page);
- if (!mapping->a_ops->readpage(file, page)) {
+ error = mapping->a_ops->readpage(file, page);
+ if (!error) {
wait_on_page_locked(page);
if (PageUptodate(page))
goto success;
+ } else if (error == AOP_TRUNCATED_PAGE) {
+ page_cache_release(page);
+ goto retry_find;
}
/*
@@ -1444,10 +1471,14 @@ page_not_uptodate:
goto success;
}
- if (!mapping->a_ops->readpage(file, page)) {
+ error = mapping->a_ops->readpage(file, page);
+ if (!error) {
wait_on_page_locked(page);
if (PageUptodate(page))
goto success;
+ } else if (error == AOP_TRUNCATED_PAGE) {
+ page_cache_release(page);
+ goto retry_find;
}
/*
@@ -1470,10 +1501,14 @@ page_not_uptodate:
}
ClearPageError(page);
- if (!mapping->a_ops->readpage(file, page)) {
+ error = mapping->a_ops->readpage(file, page);
+ if (!error) {
wait_on_page_locked(page);
if (PageUptodate(page))
goto success;
+ } else if (error == AOP_TRUNCATED_PAGE) {
+ page_cache_release(page);
+ goto retry_find;
}
/*
@@ -1858,7 +1893,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
/*
* Sync the fs metadata but not the minor inode changes and
* of course not the data as we did direct DMA for the IO.
- * i_sem is held, which protects generic_osync_inode() from
+ * i_mutex is held, which protects generic_osync_inode() from
* livelocking.
*/
if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
@@ -1934,12 +1969,16 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
status = a_ops->prepare_write(file, page, offset, offset+bytes);
if (unlikely(status)) {
loff_t isize = i_size_read(inode);
+
+ if (status != AOP_TRUNCATED_PAGE)
+ unlock_page(page);
+ page_cache_release(page);
+ if (status == AOP_TRUNCATED_PAGE)
+ continue;
/*
* prepare_write() may have instantiated a few blocks
* outside i_size. Trim these off again.
*/
- unlock_page(page);
- page_cache_release(page);
if (pos + bytes > isize)
vmtruncate(inode, isize);
break;
@@ -1952,6 +1991,10 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
cur_iov, iov_base, bytes);
flush_dcache_page(page);
status = a_ops->commit_write(file, page, offset, offset+bytes);
+ if (status == AOP_TRUNCATED_PAGE) {
+ page_cache_release(page);
+ continue;
+ }
if (likely(copied > 0)) {
if (!status)
status = copied;
@@ -2066,7 +2109,7 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
if (err)
goto out;
- inode_update_time(inode, 1);
+ file_update_time(file);
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (unlikely(file->f_flags & O_DIRECT)) {
@@ -2153,10 +2196,10 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf,
BUG_ON(iocb->ki_pos != pos);
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1,
&iocb->ki_pos);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
ssize_t err;
@@ -2178,9 +2221,9 @@ ssize_t generic_file_write(struct file *file, const char __user *buf,
struct iovec local_iov = { .iov_base = (void __user *)buf,
.iov_len = count };
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = __generic_file_write_nolock(file, &local_iov, 1, ppos);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
ssize_t err;
@@ -2214,9 +2257,9 @@ ssize_t generic_file_writev(struct file *file, const struct iovec *iov,
struct inode *inode = mapping->host;
ssize_t ret;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
ret = __generic_file_write_nolock(file, iov, nr_segs, ppos);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err;
@@ -2230,7 +2273,7 @@ ssize_t generic_file_writev(struct file *file, const struct iovec *iov,
EXPORT_SYMBOL(generic_file_writev);
/*
- * Called under i_sem for writes to S_ISREG files. Returns -EIO if something
+ * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something
* went wrong during pagecache shootdown.
*/
static ssize_t
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 9cf687e4a29..b960ac8e591 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -338,7 +338,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
*ppos = pos;
/*
* No need to use i_size_read() here, the i_size
- * cannot change under us because we hold i_sem.
+ * cannot change under us because we hold i_mutex.
*/
if (pos > inode->i_size) {
i_size_write(inode, pos);
@@ -358,7 +358,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
loff_t pos;
ssize_t ret;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (!access_ok(VERIFY_READ, buf, len)) {
ret=-EFAULT;
@@ -383,14 +383,14 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
if (ret)
goto out_backing;
- inode_update_time(inode, 1);
+ file_update_time(filp);
ret = __xip_file_write (filp, buf, count, pos, ppos);
out_backing:
current->backing_dev_info = NULL;
out_up:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(xip_file_write);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3e52df7c471..b21d78c941b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -11,6 +11,9 @@
#include <linux/highmem.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
+#include <linux/mempolicy.h>
+#include <linux/cpuset.h>
+
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -36,18 +39,22 @@ static void enqueue_huge_page(struct page *page)
free_huge_pages_node[nid]++;
}
-static struct page *dequeue_huge_page(void)
+static struct page *dequeue_huge_page(struct vm_area_struct *vma,
+ unsigned long address)
{
int nid = numa_node_id();
struct page *page = NULL;
+ struct zonelist *zonelist = huge_zonelist(vma, address);
+ struct zone **z;
- if (list_empty(&hugepage_freelists[nid])) {
- for (nid = 0; nid < MAX_NUMNODES; ++nid)
- if (!list_empty(&hugepage_freelists[nid]))
- break;
+ for (z = zonelist->zones; *z; z++) {
+ nid = (*z)->zone_pgdat->node_id;
+ if (cpuset_zone_allowed(*z, GFP_HIGHUSER) &&
+ !list_empty(&hugepage_freelists[nid]))
+ break;
}
- if (nid >= 0 && nid < MAX_NUMNODES &&
- !list_empty(&hugepage_freelists[nid])) {
+
+ if (*z) {
page = list_entry(hugepage_freelists[nid].next,
struct page, lru);
list_del(&page->lru);
@@ -85,13 +92,13 @@ void free_huge_page(struct page *page)
spin_unlock(&hugetlb_lock);
}
-struct page *alloc_huge_page(void)
+struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
{
struct page *page;
int i;
spin_lock(&hugetlb_lock);
- page = dequeue_huge_page();
+ page = dequeue_huge_page(vma, addr);
if (!page) {
spin_unlock(&hugetlb_lock);
return NULL;
@@ -194,7 +201,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
spin_lock(&hugetlb_lock);
try_to_free_low(count);
while (count < nr_huge_pages) {
- struct page *page = dequeue_huge_page();
+ struct page *page = dequeue_huge_page(NULL, 0);
if (!page)
break;
update_and_free_page(page);
@@ -261,11 +268,12 @@ struct vm_operations_struct hugetlb_vm_ops = {
.nopage = hugetlb_nopage,
};
-static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page)
+static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
+ int writable)
{
pte_t entry;
- if (vma->vm_flags & VM_WRITE) {
+ if (writable) {
entry =
pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
} else {
@@ -277,12 +285,27 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page)
return entry;
}
+static void set_huge_ptep_writable(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ pte_t entry;
+
+ entry = pte_mkwrite(pte_mkdirty(*ptep));
+ ptep_set_access_flags(vma, address, ptep, entry, 1);
+ update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
+}
+
+
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
pte_t *src_pte, *dst_pte, entry;
struct page *ptepage;
unsigned long addr;
+ int cow;
+
+ cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
src_pte = huge_pte_offset(src, addr);
@@ -294,6 +317,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
spin_lock(&dst->page_table_lock);
spin_lock(&src->page_table_lock);
if (!pte_none(*src_pte)) {
+ if (cow)
+ ptep_set_wrprotect(src, addr, src_pte);
entry = *src_pte;
ptepage = pte_page(entry);
get_page(ptepage);
@@ -345,57 +370,63 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
flush_tlb_range(vma, start, end);
}
-static struct page *find_lock_huge_page(struct address_space *mapping,
- unsigned long idx)
+static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, pte_t pte)
{
- struct page *page;
- int err;
- struct inode *inode = mapping->host;
- unsigned long size;
+ struct page *old_page, *new_page;
+ int i, avoidcopy;
-retry:
- page = find_lock_page(mapping, idx);
- if (page)
- goto out;
+ old_page = pte_page(pte);
- /* Check to make sure the mapping hasn't been truncated */
- size = i_size_read(inode) >> HPAGE_SHIFT;
- if (idx >= size)
- goto out;
+ /* If no-one else is actually using this page, avoid the copy
+ * and just make the page writable */
+ avoidcopy = (page_count(old_page) == 1);
+ if (avoidcopy) {
+ set_huge_ptep_writable(vma, address, ptep);
+ return VM_FAULT_MINOR;
+ }
- if (hugetlb_get_quota(mapping))
- goto out;
- page = alloc_huge_page();
- if (!page) {
- hugetlb_put_quota(mapping);
- goto out;
+ page_cache_get(old_page);
+ new_page = alloc_huge_page(vma, address);
+
+ if (!new_page) {
+ page_cache_release(old_page);
+
+ /* Logically this is OOM, not a SIGBUS, but an OOM
+ * could cause the kernel to go killing other
+ * processes which won't help the hugepage situation
+ * at all (?) */
+ return VM_FAULT_SIGBUS;
}
- err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
- if (err) {
- put_page(page);
- hugetlb_put_quota(mapping);
- if (err == -EEXIST)
- goto retry;
- page = NULL;
+ spin_unlock(&mm->page_table_lock);
+ for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++)
+ copy_user_highpage(new_page + i, old_page + i,
+ address + i*PAGE_SIZE);
+ spin_lock(&mm->page_table_lock);
+
+ ptep = huge_pte_offset(mm, address & HPAGE_MASK);
+ if (likely(pte_same(*ptep, pte))) {
+ /* Break COW */
+ set_huge_pte_at(mm, address, ptep,
+ make_huge_pte(vma, new_page, 1));
+ /* Make the old page be freed below */
+ new_page = old_page;
}
-out:
- return page;
+ page_cache_release(new_page);
+ page_cache_release(old_page);
+ return VM_FAULT_MINOR;
}
-int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, int write_access)
+int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, int write_access)
{
int ret = VM_FAULT_SIGBUS;
unsigned long idx;
unsigned long size;
- pte_t *pte;
struct page *page;
struct address_space *mapping;
-
- pte = huge_pte_alloc(mm, address);
- if (!pte)
- goto out;
+ pte_t new_pte;
mapping = vma->vm_file->f_mapping;
idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
@@ -405,9 +436,31 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* Use page lock to guard against racing truncation
* before we get page_table_lock.
*/
- page = find_lock_huge_page(mapping, idx);
- if (!page)
- goto out;
+retry:
+ page = find_lock_page(mapping, idx);
+ if (!page) {
+ if (hugetlb_get_quota(mapping))
+ goto out;
+ page = alloc_huge_page(vma, address);
+ if (!page) {
+ hugetlb_put_quota(mapping);
+ goto out;
+ }
+
+ if (vma->vm_flags & VM_SHARED) {
+ int err;
+
+ err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+ if (err) {
+ put_page(page);
+ hugetlb_put_quota(mapping);
+ if (err == -EEXIST)
+ goto retry;
+ goto out;
+ }
+ } else
+ lock_page(page);
+ }
spin_lock(&mm->page_table_lock);
size = i_size_read(mapping->host) >> HPAGE_SHIFT;
@@ -415,11 +468,19 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
goto backout;
ret = VM_FAULT_MINOR;
- if (!pte_none(*pte))
+ if (!pte_none(*ptep))
goto backout;
add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
- set_huge_pte_at(mm, address, pte, make_huge_pte(vma, page));
+ new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
+ && (vma->vm_flags & VM_SHARED)));
+ set_huge_pte_at(mm, address, ptep, new_pte);
+
+ if (write_access && !(vma->vm_flags & VM_SHARED)) {
+ /* Optimization, do the COW without a second fault */
+ ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
+ }
+
spin_unlock(&mm->page_table_lock);
unlock_page(page);
out:
@@ -433,6 +494,33 @@ backout:
goto out;
}
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, int write_access)
+{
+ pte_t *ptep;
+ pte_t entry;
+ int ret;
+
+ ptep = huge_pte_alloc(mm, address);
+ if (!ptep)
+ return VM_FAULT_OOM;
+
+ entry = *ptep;
+ if (pte_none(entry))
+ return hugetlb_no_page(mm, vma, address, ptep, write_access);
+
+ ret = VM_FAULT_MINOR;
+
+ spin_lock(&mm->page_table_lock);
+ /* Check for a racing update before calling hugetlb_cow */
+ if (likely(pte_same(entry, *ptep)))
+ if (write_access && !pte_write(entry))
+ ret = hugetlb_cow(mm, vma, address, ptep, entry);
+ spin_unlock(&mm->page_table_lock);
+
+ return ret;
+}
+
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i)
diff --git a/mm/internal.h b/mm/internal.h
index 6bf134e8fb3..17256bb2f4e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -9,5 +9,22 @@
* 2 of the License, or (at your option) any later version.
*/
-/* page_alloc.c */
-extern void set_page_refs(struct page *page, int order);
+static inline void set_page_refs(struct page *page, int order)
+{
+#ifdef CONFIG_MMU
+ set_page_count(page, 1);
+#else
+ int i;
+
+ /*
+ * We need to reference all the pages for this order, otherwise if
+ * anyone accesses one of the pages with (get/put) it will be freed.
+ * - eg: access_process_vm()
+ */
+ for (i = 0; i < (1 << order); i++)
+ set_page_count(page + i, 1);
+#endif /* CONFIG_MMU */
+}
+
+extern void fastcall __init __free_pages_bootmem(struct page *page,
+ unsigned int order);
diff --git a/mm/madvise.c b/mm/madvise.c
index 2b7cf0400a2..ae0ae3ea299 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -140,6 +140,36 @@ static long madvise_dontneed(struct vm_area_struct * vma,
return 0;
}
+/*
+ * Application wants to free up the pages and associated backing store.
+ * This is effectively punching a hole into the middle of a file.
+ *
+ * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
+ * Other filesystems return -ENOSYS.
+ */
+static long madvise_remove(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ struct address_space *mapping;
+ loff_t offset, endoff;
+
+ if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
+ return -EINVAL;
+
+ if (!vma->vm_file || !vma->vm_file->f_mapping
+ || !vma->vm_file->f_mapping->host) {
+ return -EINVAL;
+ }
+
+ mapping = vma->vm_file->f_mapping;
+
+ offset = (loff_t)(start - vma->vm_start)
+ + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ endoff = (loff_t)(end - vma->vm_start - 1)
+ + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ return vmtruncate_range(mapping->host, offset, endoff);
+}
+
static long
madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
unsigned long start, unsigned long end, int behavior)
@@ -152,6 +182,9 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
case MADV_RANDOM:
error = madvise_behavior(vma, prev, start, end, behavior);
break;
+ case MADV_REMOVE:
+ error = madvise_remove(vma, start, end);
+ break;
case MADV_WILLNEED:
error = madvise_willneed(vma, prev, start, end);
@@ -190,6 +223,8 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
* some pages ahead.
* MADV_DONTNEED - the application is finished with the given range,
* so the kernel can free resources associated with it.
+ * MADV_REMOVE - the application wants to free up the given range of
+ * pages and associated backing store.
*
* return values:
* zero - success
diff --git a/mm/memory.c b/mm/memory.c
index d8dde07a365..7a11ddd5060 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1498,7 +1498,7 @@ gotten:
update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry);
lru_cache_add_active(new_page);
- page_add_anon_rmap(new_page, vma, address);
+ page_add_new_anon_rmap(new_page, vma, address);
/* Free the old page.. */
new_page = old_page;
@@ -1770,9 +1770,32 @@ out_big:
out_busy:
return -ETXTBSY;
}
-
EXPORT_SYMBOL(vmtruncate);
+int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
+{
+ struct address_space *mapping = inode->i_mapping;
+
+ /*
+ * If the underlying filesystem is not going to provide
+ * a way to truncate a range of blocks (punch a hole) -
+ * we should return failure right now.
+ */
+ if (!inode->i_op || !inode->i_op->truncate_range)
+ return -ENOSYS;
+
+ mutex_lock(&inode->i_mutex);
+ down_write(&inode->i_alloc_sem);
+ unmap_mapping_range(mapping, offset, (end - offset), 1);
+ truncate_inode_pages_range(mapping, offset, end);
+ inode->i_op->truncate_range(inode, offset, end);
+ up_write(&inode->i_alloc_sem);
+ mutex_unlock(&inode->i_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(vmtruncate_range);
+
/*
* Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen
@@ -1954,8 +1977,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto release;
inc_mm_counter(mm, anon_rss);
lru_cache_add_active(page);
- SetPageReferenced(page);
- page_add_anon_rmap(page, vma, address);
+ page_add_new_anon_rmap(page, vma, address);
} else {
/* Map the ZERO_PAGE - vm_page_prot is readonly */
page = ZERO_PAGE(address);
@@ -2086,7 +2108,7 @@ retry:
if (anon) {
inc_mm_counter(mm, anon_rss);
lru_cache_add_active(new_page);
- page_add_anon_rmap(new_page, vma, address);
+ page_add_new_anon_rmap(new_page, vma, address);
} else {
inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page);
@@ -2245,6 +2267,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
}
+EXPORT_SYMBOL_GPL(__handle_mm_fault);
+
#ifndef __PAGETABLE_PUD_FOLDED
/*
* Allocate page upper directory.
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f6d4af8af8a..a918f77f02f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -42,7 +42,6 @@ extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
int nr_pages);
static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
{
- struct pglist_data *pgdat = zone->zone_pgdat;
int nr_pages = PAGES_PER_SECTION;
int ret;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 72f402cc9c9..b62cab575a8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -83,9 +83,18 @@
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/mempolicy.h>
+#include <linux/swap.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
+/* Internal flags */
+#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
+#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
+#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
+
static kmem_cache_t *policy_cache;
static kmem_cache_t *sn_cache;
@@ -93,7 +102,7 @@ static kmem_cache_t *sn_cache;
/* Highest zone. An specific allocation for a zone below that is not
policied. */
-static int policy_zone;
+int policy_zone = ZONE_DMA;
struct mempolicy default_policy = {
.refcnt = ATOMIC_INIT(1), /* never free it */
@@ -131,17 +140,8 @@ static struct zonelist *bind_zonelist(nodemask_t *nodes)
if (!zl)
return NULL;
num = 0;
- for_each_node_mask(nd, *nodes) {
- int k;
- for (k = MAX_NR_ZONES-1; k >= 0; k--) {
- struct zone *z = &NODE_DATA(nd)->node_zones[k];
- if (!z->present_pages)
- continue;
- zl->zones[num++] = z;
- if (k > policy_zone)
- policy_zone = k;
- }
- }
+ for_each_node_mask(nd, *nodes)
+ zl->zones[num++] = &NODE_DATA(nd)->node_zones[policy_zone];
zl->zones[num] = NULL;
return zl;
}
@@ -180,12 +180,19 @@ static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
break;
}
policy->policy = mode;
+ policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
return policy;
}
-/* Ensure all existing pages follow the policy. */
+static void gather_stats(struct page *, void *);
+static void migrate_page_add(struct vm_area_struct *vma,
+ struct page *page, struct list_head *pagelist, unsigned long flags);
+
+/* Scan through pages checking if pages follow certain conditions. */
static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end, nodemask_t *nodes)
+ unsigned long addr, unsigned long end,
+ const nodemask_t *nodes, unsigned long flags,
+ void *private)
{
pte_t *orig_pte;
pte_t *pte;
@@ -201,8 +208,20 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
page = vm_normal_page(vma, addr, *pte);
if (!page)
continue;
+ if (PageReserved(page))
+ continue;
nid = page_to_nid(page);
- if (!node_isset(nid, *nodes))
+ if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
+ continue;
+
+ if (flags & MPOL_MF_STATS)
+ gather_stats(page, private);
+ else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+ spin_unlock(ptl);
+ migrate_page_add(vma, page, private, flags);
+ spin_lock(ptl);
+ }
+ else
break;
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(orig_pte, ptl);
@@ -210,7 +229,9 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
}
static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
- unsigned long addr, unsigned long end, nodemask_t *nodes)
+ unsigned long addr, unsigned long end,
+ const nodemask_t *nodes, unsigned long flags,
+ void *private)
{
pmd_t *pmd;
unsigned long next;
@@ -220,14 +241,17 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
- if (check_pte_range(vma, pmd, addr, next, nodes))
+ if (check_pte_range(vma, pmd, addr, next, nodes,
+ flags, private))
return -EIO;
} while (pmd++, addr = next, addr != end);
return 0;
}
static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
- unsigned long addr, unsigned long end, nodemask_t *nodes)
+ unsigned long addr, unsigned long end,
+ const nodemask_t *nodes, unsigned long flags,
+ void *private)
{
pud_t *pud;
unsigned long next;
@@ -237,14 +261,17 @@ static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
- if (check_pmd_range(vma, pud, addr, next, nodes))
+ if (check_pmd_range(vma, pud, addr, next, nodes,
+ flags, private))
return -EIO;
} while (pud++, addr = next, addr != end);
return 0;
}
static inline int check_pgd_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long end, nodemask_t *nodes)
+ unsigned long addr, unsigned long end,
+ const nodemask_t *nodes, unsigned long flags,
+ void *private)
{
pgd_t *pgd;
unsigned long next;
@@ -254,16 +281,30 @@ static inline int check_pgd_range(struct vm_area_struct *vma,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- if (check_pud_range(vma, pgd, addr, next, nodes))
+ if (check_pud_range(vma, pgd, addr, next, nodes,
+ flags, private))
return -EIO;
} while (pgd++, addr = next, addr != end);
return 0;
}
-/* Step 1: check the range */
+/* Check if a vma is migratable */
+static inline int vma_migratable(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (
+ VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
+ return 0;
+ return 1;
+}
+
+/*
+ * Check if all pages in a range are on a set of nodes.
+ * If pagelist != NULL then isolate pages from the LRU and
+ * put them on the pagelist.
+ */
static struct vm_area_struct *
check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
- nodemask_t *nodes, unsigned long flags)
+ const nodemask_t *nodes, unsigned long flags, void *private)
{
int err;
struct vm_area_struct *first, *vma, *prev;
@@ -273,17 +314,24 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
return ERR_PTR(-EFAULT);
prev = NULL;
for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
- if (!vma->vm_next && vma->vm_end < end)
- return ERR_PTR(-EFAULT);
- if (prev && prev->vm_end < vma->vm_start)
- return ERR_PTR(-EFAULT);
- if ((flags & MPOL_MF_STRICT) && !is_vm_hugetlb_page(vma)) {
+ if (!(flags & MPOL_MF_DISCONTIG_OK)) {
+ if (!vma->vm_next && vma->vm_end < end)
+ return ERR_PTR(-EFAULT);
+ if (prev && prev->vm_end < vma->vm_start)
+ return ERR_PTR(-EFAULT);
+ }
+ if (!is_vm_hugetlb_page(vma) &&
+ ((flags & MPOL_MF_STRICT) ||
+ ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
+ vma_migratable(vma)))) {
unsigned long endvma = vma->vm_end;
+
if (endvma > end)
endvma = end;
if (vma->vm_start > start)
start = vma->vm_start;
- err = check_pgd_range(vma, start, endvma, nodes);
+ err = check_pgd_range(vma, start, endvma, nodes,
+ flags, private);
if (err) {
first = ERR_PTR(err);
break;
@@ -342,51 +390,10 @@ static int contextualize_policy(int mode, nodemask_t *nodes)
if (!nodes)
return 0;
- /* Update current mems_allowed */
- cpuset_update_current_mems_allowed();
- /* Ignore nodes not set in current->mems_allowed */
- cpuset_restrict_to_mems_allowed(nodes->bits);
- return mpol_check_policy(mode, nodes);
-}
-
-long do_mbind(unsigned long start, unsigned long len,
- unsigned long mode, nodemask_t *nmask, unsigned long flags)
-{
- struct vm_area_struct *vma;
- struct mm_struct *mm = current->mm;
- struct mempolicy *new;
- unsigned long end;
- int err;
-
- if ((flags & ~(unsigned long)(MPOL_MF_STRICT)) || mode > MPOL_MAX)
- return -EINVAL;
- if (start & ~PAGE_MASK)
- return -EINVAL;
- if (mode == MPOL_DEFAULT)
- flags &= ~MPOL_MF_STRICT;
- len = (len + PAGE_SIZE - 1) & PAGE_MASK;
- end = start + len;
- if (end < start)
- return -EINVAL;
- if (end == start)
- return 0;
- if (mpol_check_policy(mode, nmask))
+ cpuset_update_task_memory_state();
+ if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
return -EINVAL;
- new = mpol_new(mode, nmask);
- if (IS_ERR(new))
- return PTR_ERR(new);
-
- PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
- mode,nodes_addr(nodes)[0]);
-
- down_write(&mm->mmap_sem);
- vma = check_range(mm, start, end, nmask, flags);
- err = PTR_ERR(vma);
- if (!IS_ERR(vma))
- err = mbind_range(vma, start, end, new);
- up_write(&mm->mmap_sem);
- mpol_free(new);
- return err;
+ return mpol_check_policy(mode, nodes);
}
/* Set the process memory policy */
@@ -457,7 +464,7 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask,
struct vm_area_struct *vma = NULL;
struct mempolicy *pol = current->mempolicy;
- cpuset_update_current_mems_allowed();
+ cpuset_update_task_memory_state();
if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
return -EINVAL;
if (flags & MPOL_F_ADDR) {
@@ -509,11 +516,177 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask,
}
/*
+ * page migration
+ */
+
+/* Check if we are the only process mapping the page in question */
+static inline int single_mm_mapping(struct mm_struct *mm,
+ struct address_space *mapping)
+{
+ struct vm_area_struct *vma;
+ struct prio_tree_iter iter;
+ int rc = 1;
+
+ spin_lock(&mapping->i_mmap_lock);
+ vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
+ if (mm != vma->vm_mm) {
+ rc = 0;
+ goto out;
+ }
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
+ if (mm != vma->vm_mm) {
+ rc = 0;
+ goto out;
+ }
+out:
+ spin_unlock(&mapping->i_mmap_lock);
+ return rc;
+}
+
+/*
+ * Add a page to be migrated to the pagelist
+ */
+static void migrate_page_add(struct vm_area_struct *vma,
+ struct page *page, struct list_head *pagelist, unsigned long flags)
+{
+ /*
+ * Avoid migrating a page that is shared by others and not writable.
+ */
+ if ((flags & MPOL_MF_MOVE_ALL) || !page->mapping || PageAnon(page) ||
+ mapping_writably_mapped(page->mapping) ||
+ single_mm_mapping(vma->vm_mm, page->mapping)) {
+ int rc = isolate_lru_page(page);
+
+ if (rc == 1)
+ list_add(&page->lru, pagelist);
+ /*
+ * If the isolate attempt was not successful then we just
+ * encountered an unswappable page. Something must be wrong.
+ */
+ WARN_ON(rc == 0);
+ }
+}
+
+static int swap_pages(struct list_head *pagelist)
+{
+ LIST_HEAD(moved);
+ LIST_HEAD(failed);
+ int n;
+
+ n = migrate_pages(pagelist, NULL, &moved, &failed);
+ putback_lru_pages(&failed);
+ putback_lru_pages(&moved);
+
+ return n;
+}
+
+/*
+ * For now migrate_pages simply swaps out the pages from nodes that are in
+ * the source set but not in the target set. In the future, we would
+ * want a function that moves pages between the two nodesets in such
+ * a way as to preserve the physical layout as much as possible.
+ *
+ * Returns the number of page that could not be moved.
+ */
+int do_migrate_pages(struct mm_struct *mm,
+ const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
+{
+ LIST_HEAD(pagelist);
+ int count = 0;
+ nodemask_t nodes;
+
+ nodes_andnot(nodes, *from_nodes, *to_nodes);
+
+ down_read(&mm->mmap_sem);
+ check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nodes,
+ flags | MPOL_MF_DISCONTIG_OK, &pagelist);
+
+ if (!list_empty(&pagelist)) {
+ count = swap_pages(&pagelist);
+ putback_lru_pages(&pagelist);
+ }
+
+ up_read(&mm->mmap_sem);
+ return count;
+}
+
+long do_mbind(unsigned long start, unsigned long len,
+ unsigned long mode, nodemask_t *nmask, unsigned long flags)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ struct mempolicy *new;
+ unsigned long end;
+ int err;
+ LIST_HEAD(pagelist);
+
+ if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
+ MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+ || mode > MPOL_MAX)
+ return -EINVAL;
+ if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ if (start & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (mode == MPOL_DEFAULT)
+ flags &= ~MPOL_MF_STRICT;
+
+ len = (len + PAGE_SIZE - 1) & PAGE_MASK;
+ end = start + len;
+
+ if (end < start)
+ return -EINVAL;
+ if (end == start)
+ return 0;
+
+ if (mpol_check_policy(mode, nmask))
+ return -EINVAL;
+
+ new = mpol_new(mode, nmask);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ /*
+ * If we are using the default policy then operation
+ * on discontinuous address spaces is okay after all
+ */
+ if (!new)
+ flags |= MPOL_MF_DISCONTIG_OK;
+
+ PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
+ mode,nodes_addr(nodes)[0]);
+
+ down_write(&mm->mmap_sem);
+ vma = check_range(mm, start, end, nmask,
+ flags | MPOL_MF_INVERT, &pagelist);
+
+ err = PTR_ERR(vma);
+ if (!IS_ERR(vma)) {
+ int nr_failed = 0;
+
+ err = mbind_range(vma, start, end, new);
+ if (!list_empty(&pagelist))
+ nr_failed = swap_pages(&pagelist);
+
+ if (!err && nr_failed && (flags & MPOL_MF_STRICT))
+ err = -EIO;
+ }
+ if (!list_empty(&pagelist))
+ putback_lru_pages(&pagelist);
+
+ up_write(&mm->mmap_sem);
+ mpol_free(new);
+ return err;
+}
+
+/*
* User space interface with variable sized bitmaps for nodelists.
*/
/* Copy a node mask from user space. */
-static int get_nodes(nodemask_t *nodes, unsigned long __user *nmask,
+static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
unsigned long maxnode)
{
unsigned long k;
@@ -602,6 +775,65 @@ asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
return do_set_mempolicy(mode, &nodes);
}
+asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
+ const unsigned long __user *old_nodes,
+ const unsigned long __user *new_nodes)
+{
+ struct mm_struct *mm;
+ struct task_struct *task;
+ nodemask_t old;
+ nodemask_t new;
+ nodemask_t task_nodes;
+ int err;
+
+ err = get_nodes(&old, old_nodes, maxnode);
+ if (err)
+ return err;
+
+ err = get_nodes(&new, new_nodes, maxnode);
+ if (err)
+ return err;
+
+ /* Find the mm_struct */
+ read_lock(&tasklist_lock);
+ task = pid ? find_task_by_pid(pid) : current;
+ if (!task) {
+ read_unlock(&tasklist_lock);
+ return -ESRCH;
+ }
+ mm = get_task_mm(task);
+ read_unlock(&tasklist_lock);
+
+ if (!mm)
+ return -EINVAL;
+
+ /*
+ * Check if this process has the right to modify the specified
+ * process. The right exists if the process has administrative
+ * capabilities, superuser priviledges or the same
+ * userid as the target process.
+ */
+ if ((current->euid != task->suid) && (current->euid != task->uid) &&
+ (current->uid != task->suid) && (current->uid != task->uid) &&
+ !capable(CAP_SYS_ADMIN)) {
+ err = -EPERM;
+ goto out;
+ }
+
+ task_nodes = cpuset_mems_allowed(task);
+ /* Is the user allowed to access the target nodes? */
+ if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_ADMIN)) {
+ err = -EPERM;
+ goto out;
+ }
+
+ err = do_migrate_pages(mm, &old, &new, MPOL_MF_MOVE);
+out:
+ mmput(mm);
+ return err;
+}
+
+
/* Retrieve NUMA policy */
asmlinkage long sys_get_mempolicy(int __user *policy,
unsigned long __user *nmask,
@@ -708,8 +940,8 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
#endif
/* Return effective policy for a VMA */
-struct mempolicy *
-get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr)
+static struct mempolicy * get_vma_policy(struct task_struct *task,
+ struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = task->mempolicy;
@@ -785,6 +1017,34 @@ static unsigned offset_il_node(struct mempolicy *pol,
return nid;
}
+/* Determine a node number for interleave */
+static inline unsigned interleave_nid(struct mempolicy *pol,
+ struct vm_area_struct *vma, unsigned long addr, int shift)
+{
+ if (vma) {
+ unsigned long off;
+
+ off = vma->vm_pgoff;
+ off += (addr - vma->vm_start) >> shift;
+ return offset_il_node(pol, vma, off);
+ } else
+ return interleave_nodes(pol);
+}
+
+/* Return a zonelist suitable for a huge page allocation. */
+struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct mempolicy *pol = get_vma_policy(current, vma, addr);
+
+ if (pol->policy == MPOL_INTERLEAVE) {
+ unsigned nid;
+
+ nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
+ return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER);
+ }
+ return zonelist_policy(GFP_HIGHUSER, pol);
+}
+
/* Allocate a page in interleaved policy.
Own path because it needs to do special accounting. */
static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
@@ -829,19 +1089,12 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);
- cpuset_update_current_mems_allowed();
+ cpuset_update_task_memory_state();
if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
unsigned nid;
- if (vma) {
- unsigned long off;
- off = vma->vm_pgoff;
- off += (addr - vma->vm_start) >> PAGE_SHIFT;
- nid = offset_il_node(pol, vma, off);
- } else {
- /* fall back to process interleaving */
- nid = interleave_nodes(pol);
- }
+
+ nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
return alloc_page_interleave(gfp, 0, nid);
}
return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
@@ -862,7 +1115,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
* interrupt context and apply the current process NUMA policy.
* Returns NULL when no page can be allocated.
*
- * Don't call cpuset_update_current_mems_allowed() unless
+ * Don't call cpuset_update_task_memory_state() unless
* 1) it's ok to take cpuset_sem (can WAIT), and
* 2) allocating for current task (not interrupt).
*/
@@ -871,7 +1124,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
struct mempolicy *pol = current->mempolicy;
if ((gfp & __GFP_WAIT) && !in_interrupt())
- cpuset_update_current_mems_allowed();
+ cpuset_update_task_memory_state();
if (!pol || in_interrupt())
pol = &default_policy;
if (pol->policy == MPOL_INTERLEAVE)
@@ -880,6 +1133,15 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
}
EXPORT_SYMBOL(alloc_pages_current);
+/*
+ * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
+ * rebinds the mempolicy its copying by calling mpol_rebind_policy()
+ * with the mems_allowed returned by cpuset_mems_allowed(). This
+ * keeps mempolicies cpuset relative after its cpuset moves. See
+ * further kernel/cpuset.c update_nodemask().
+ */
+void *cpuset_being_rebound;
+
/* Slow path of a mempolicy copy */
struct mempolicy *__mpol_copy(struct mempolicy *old)
{
@@ -887,6 +1149,10 @@ struct mempolicy *__mpol_copy(struct mempolicy *old)
if (!new)
return ERR_PTR(-ENOMEM);
+ if (current_cpuset_is_being_rebound()) {
+ nodemask_t mems = cpuset_mems_allowed(current);
+ mpol_rebind_policy(old, &mems);
+ }
*new = *old;
atomic_set(&new->refcnt, 1);
if (new->policy == MPOL_BIND) {
@@ -940,54 +1206,6 @@ void __mpol_free(struct mempolicy *p)
}
/*
- * Hugetlb policy. Same as above, just works with node numbers instead of
- * zonelists.
- */
-
-/* Find first node suitable for an allocation */
-int mpol_first_node(struct vm_area_struct *vma, unsigned long addr)
-{
- struct mempolicy *pol = get_vma_policy(current, vma, addr);
-
- switch (pol->policy) {
- case MPOL_DEFAULT:
- return numa_node_id();
- case MPOL_BIND:
- return pol->v.zonelist->zones[0]->zone_pgdat->node_id;
- case MPOL_INTERLEAVE:
- return interleave_nodes(pol);
- case MPOL_PREFERRED:
- return pol->v.preferred_node >= 0 ?
- pol->v.preferred_node : numa_node_id();
- }
- BUG();
- return 0;
-}
-
-/* Find secondary valid nodes for an allocation */
-int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr)
-{
- struct mempolicy *pol = get_vma_policy(current, vma, addr);
-
- switch (pol->policy) {
- case MPOL_PREFERRED:
- case MPOL_DEFAULT:
- case MPOL_INTERLEAVE:
- return 1;
- case MPOL_BIND: {
- struct zone **z;
- for (z = pol->v.zonelist->zones; *z; z++)
- if ((*z)->zone_pgdat->node_id == nid)
- return 1;
- return 0;
- }
- default:
- BUG();
- return 0;
- }
-}
-
-/*
* Shared memory backing store policy support.
*
* Remember policies even when nobody has shared memory mapped.
@@ -1209,25 +1427,31 @@ void numa_default_policy(void)
}
/* Migrate a policy to a different set of nodes */
-static void rebind_policy(struct mempolicy *pol, const nodemask_t *old,
- const nodemask_t *new)
+void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
{
+ nodemask_t *mpolmask;
nodemask_t tmp;
if (!pol)
return;
+ mpolmask = &pol->cpuset_mems_allowed;
+ if (nodes_equal(*mpolmask, *newmask))
+ return;
switch (pol->policy) {
case MPOL_DEFAULT:
break;
case MPOL_INTERLEAVE:
- nodes_remap(tmp, pol->v.nodes, *old, *new);
+ nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
pol->v.nodes = tmp;
- current->il_next = node_remap(current->il_next, *old, *new);
+ *mpolmask = *newmask;
+ current->il_next = node_remap(current->il_next,
+ *mpolmask, *newmask);
break;
case MPOL_PREFERRED:
pol->v.preferred_node = node_remap(pol->v.preferred_node,
- *old, *new);
+ *mpolmask, *newmask);
+ *mpolmask = *newmask;
break;
case MPOL_BIND: {
nodemask_t nodes;
@@ -1237,7 +1461,7 @@ static void rebind_policy(struct mempolicy *pol, const nodemask_t *old,
nodes_clear(nodes);
for (z = pol->v.zonelist->zones; *z; z++)
node_set((*z)->zone_pgdat->node_id, nodes);
- nodes_remap(tmp, nodes, *old, *new);
+ nodes_remap(tmp, nodes, *mpolmask, *newmask);
nodes = tmp;
zonelist = bind_zonelist(&nodes);
@@ -1252,6 +1476,7 @@ static void rebind_policy(struct mempolicy *pol, const nodemask_t *old,
kfree(pol->v.zonelist);
pol->v.zonelist = zonelist;
}
+ *mpolmask = *newmask;
break;
}
default:
@@ -1261,12 +1486,156 @@ static void rebind_policy(struct mempolicy *pol, const nodemask_t *old,
}
/*
- * Someone moved this task to different nodes. Fixup mempolicies.
+ * Wrapper for mpol_rebind_policy() that just requires task
+ * pointer, and updates task mempolicy.
+ */
+
+void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
+{
+ mpol_rebind_policy(tsk->mempolicy, new);
+}
+
+/*
+ * Rebind each vma in mm to new nodemask.
*
- * TODO - fixup current->mm->vma and shmfs/tmpfs/hugetlbfs policies as well,
- * once we have a cpuset mechanism to mark which cpuset subtree is migrating.
+ * Call holding a reference to mm. Takes mm->mmap_sem during call.
*/
-void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new)
+
+void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
- rebind_policy(current->mempolicy, old, new);
+ struct vm_area_struct *vma;
+
+ down_write(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ mpol_rebind_policy(vma->vm_policy, new);
+ up_write(&mm->mmap_sem);
}
+
+/*
+ * Display pages allocated per node and memory policy via /proc.
+ */
+
+static const char *policy_types[] = { "default", "prefer", "bind",
+ "interleave" };
+
+/*
+ * Convert a mempolicy into a string.
+ * Returns the number of characters in buffer (if positive)
+ * or an error (negative)
+ */
+static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+{
+ char *p = buffer;
+ int l;
+ nodemask_t nodes;
+ int mode = pol ? pol->policy : MPOL_DEFAULT;
+
+ switch (mode) {
+ case MPOL_DEFAULT:
+ nodes_clear(nodes);
+ break;
+
+ case MPOL_PREFERRED:
+ nodes_clear(nodes);
+ node_set(pol->v.preferred_node, nodes);
+ break;
+
+ case MPOL_BIND:
+ get_zonemask(pol, &nodes);
+ break;
+
+ case MPOL_INTERLEAVE:
+ nodes = pol->v.nodes;
+ break;
+
+ default:
+ BUG();
+ return -EFAULT;
+ }
+
+ l = strlen(policy_types[mode]);
+ if (buffer + maxlen < p + l + 1)
+ return -ENOSPC;
+
+ strcpy(p, policy_types[mode]);
+ p += l;
+
+ if (!nodes_empty(nodes)) {
+ if (buffer + maxlen < p + 2)
+ return -ENOSPC;
+ *p++ = '=';
+ p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
+ }
+ return p - buffer;
+}
+
+struct numa_maps {
+ unsigned long pages;
+ unsigned long anon;
+ unsigned long mapped;
+ unsigned long mapcount_max;
+ unsigned long node[MAX_NUMNODES];
+};
+
+static void gather_stats(struct page *page, void *private)
+{
+ struct numa_maps *md = private;
+ int count = page_mapcount(page);
+
+ if (count)
+ md->mapped++;
+
+ if (count > md->mapcount_max)
+ md->mapcount_max = count;
+
+ md->pages++;
+
+ if (PageAnon(page))
+ md->anon++;
+
+ md->node[page_to_nid(page)]++;
+ cond_resched();
+}
+
+int show_numa_map(struct seq_file *m, void *v)
+{
+ struct task_struct *task = m->private;
+ struct vm_area_struct *vma = v;
+ struct numa_maps *md;
+ int n;
+ char buffer[50];
+
+ if (!vma->vm_mm)
+ return 0;
+
+ md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
+ if (!md)
+ return 0;
+
+ check_pgd_range(vma, vma->vm_start, vma->vm_end,
+ &node_online_map, MPOL_MF_STATS, md);
+
+ if (md->pages) {
+ mpol_to_str(buffer, sizeof(buffer),
+ get_vma_policy(task, vma, vma->vm_start));
+
+ seq_printf(m, "%08lx %s pages=%lu mapped=%lu maxref=%lu",
+ vma->vm_start, buffer, md->pages,
+ md->mapped, md->mapcount_max);
+
+ if (md->anon)
+ seq_printf(m," anon=%lu",md->anon);
+
+ for_each_online_node(n)
+ if (md->node[n])
+ seq_printf(m, " N%d=%lu", n, md->node[n]);
+
+ seq_putc(m, '\n');
+ }
+ kfree(md);
+
+ if (m->count < m->size)
+ m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
+ return 0;
+}
+
diff --git a/mm/mlock.c b/mm/mlock.c
index 4ae3a46ff76..b90c59573ab 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -5,6 +5,7 @@
* (C) Copyright 2002 Christoph Hellwig
*/
+#include <linux/capability.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/mempolicy.h>
diff --git a/mm/mmap.c b/mm/mmap.c
index 64ba4dbcb7d..47556d2b3e9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -13,6 +13,7 @@
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
+#include <linux/capability.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/fs.h>
diff --git a/mm/mremap.c b/mm/mremap.c
index ddaeee9a0b6..1903bdf65e4 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -13,6 +13,7 @@
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/swap.h>
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
diff --git a/mm/msync.c b/mm/msync.c
index 1b5b6f662dc..3563a56e1a5 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -137,7 +137,7 @@ static int msync_interval(struct vm_area_struct *vma,
ret = filemap_fdatawrite(mapping);
if (file->f_op && file->f_op->fsync) {
/*
- * We don't take i_sem here because mmap_sem
+ * We don't take i_mutex here because mmap_sem
* is already held.
*/
err = file->f_op->fsync(file,file->f_dentry,1);
diff --git a/mm/nommu.c b/mm/nommu.c
index c1196812876..c10262d6823 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1177,3 +1177,10 @@ int in_gate_area_no_task(unsigned long addr)
{
return 0;
}
+
+struct page *filemap_nopage(struct vm_area_struct *area,
+ unsigned long address, int *type)
+{
+ BUG();
+ return NULL;
+}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index d348b903595..4748b906aff 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -298,7 +298,8 @@ retry:
/*
* Give "p" a good chance of killing itself before we
- * retry to allocate memory.
+ * retry to allocate memory unless "p" is current
*/
- schedule_timeout_interruptible(1);
+ if (!test_thread_flag(TIF_MEMDIE))
+ schedule_timeout_interruptible(1);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0166ea15c9e..5240e426c1f 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -550,11 +550,17 @@ void __init page_writeback_init(void)
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
+ int ret;
+
if (wbc->nr_to_write <= 0)
return 0;
+ wbc->for_writepages = 1;
if (mapping->a_ops->writepages)
- return mapping->a_ops->writepages(mapping, wbc);
- return generic_writepages(mapping, wbc);
+ ret = mapping->a_ops->writepages(mapping, wbc);
+ else
+ ret = generic_writepages(mapping, wbc);
+ wbc->for_writepages = 0;
+ return ret;
}
/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fe14a8c87fc..8c960b46959 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -36,6 +36,7 @@
#include <linux/memory_hotplug.h>
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
+#include <linux/mempolicy.h>
#include <asm/tlbflush.h>
#include "internal.h"
@@ -52,6 +53,9 @@ struct pglist_data *pgdat_list __read_mostly;
unsigned long totalram_pages __read_mostly;
unsigned long totalhigh_pages __read_mostly;
long nr_swap_pages;
+int percpu_pagelist_fraction;
+
+static void fastcall free_hot_cold_page(struct page *page, int cold);
/*
* results with 256, 32 in the lowmem_reserve sysctl:
@@ -81,6 +85,7 @@ int min_free_kbytes = 1024;
unsigned long __initdata nr_kernel_pages;
unsigned long __initdata nr_all_pages;
+#ifdef CONFIG_DEBUG_VM
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
{
int ret = 0;
@@ -122,16 +127,23 @@ static int bad_range(struct zone *zone, struct page *page)
return 0;
}
-static void bad_page(const char *function, struct page *page)
+#else
+static inline int bad_range(struct zone *zone, struct page *page)
{
- printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
- function, current->comm, page);
- printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
- (int)(2*sizeof(unsigned long)), (unsigned long)page->flags,
- page->mapping, page_mapcount(page), page_count(page));
- printk(KERN_EMERG "Backtrace:\n");
+ return 0;
+}
+#endif
+
+static void bad_page(struct page *page)
+{
+ printk(KERN_EMERG "Bad page state in process '%s'\n"
+ KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
+ KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
+ KERN_EMERG "Backtrace:\n",
+ current->comm, page, (int)(2*sizeof(unsigned long)),
+ (unsigned long)page->flags, page->mapping,
+ page_mapcount(page), page_count(page));
dump_stack();
- printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
page->flags &= ~(1 << PG_lru |
1 << PG_private |
1 << PG_locked |
@@ -184,19 +196,15 @@ static void destroy_compound_page(struct page *page, unsigned long order)
int i;
int nr_pages = 1 << order;
- if (!PageCompound(page))
- return;
-
- if (page[1].index != order)
- bad_page(__FUNCTION__, page);
+ if (unlikely(page[1].index != order))
+ bad_page(page);
for (i = 0; i < nr_pages; i++) {
struct page *p = page + i;
- if (!PageCompound(p))
- bad_page(__FUNCTION__, page);
- if (page_private(p) != (unsigned long)page)
- bad_page(__FUNCTION__, page);
+ if (unlikely(!PageCompound(p) |
+ (page_private(p) != (unsigned long)page)))
+ bad_page(page);
ClearPageCompound(p);
}
}
@@ -255,14 +263,20 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
/*
* This function checks whether a page is free && is the buddy
* we can do coalesce a page and its buddy if
- * (a) the buddy is free &&
- * (b) the buddy is on the buddy system &&
- * (c) a page and its buddy have the same order.
+ * (a) the buddy is not in a hole &&
+ * (b) the buddy is free &&
+ * (c) the buddy is on the buddy system &&
+ * (d) a page and its buddy have the same order.
* for recording page's order, we use page_private(page) and PG_private.
*
*/
static inline int page_is_buddy(struct page *page, int order)
{
+#ifdef CONFIG_HOLES_IN_ZONE
+ if (!pfn_valid(page_to_pfn(page)))
+ return 0;
+#endif
+
if (PagePrivate(page) &&
(page_order(page) == order) &&
page_count(page) == 0)
@@ -294,13 +308,13 @@ static inline int page_is_buddy(struct page *page, int order)
* -- wli
*/
-static inline void __free_pages_bulk (struct page *page,
+static inline void __free_one_page(struct page *page,
struct zone *zone, unsigned int order)
{
unsigned long page_idx;
int order_size = 1 << order;
- if (unlikely(order))
+ if (unlikely(PageCompound(page)))
destroy_compound_page(page, order);
page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
@@ -314,17 +328,15 @@ static inline void __free_pages_bulk (struct page *page,
struct free_area *area;
struct page *buddy;
- combined_idx = __find_combined_index(page_idx, order);
buddy = __page_find_buddy(page, page_idx, order);
-
- if (bad_range(zone, buddy))
- break;
if (!page_is_buddy(buddy, order))
break; /* Move the buddy up one level. */
+
list_del(&buddy->lru);
area = zone->free_area + order;
area->nr_free--;
rmv_page_order(buddy);
+ combined_idx = __find_combined_index(page_idx, order);
page = page + (combined_idx - page_idx);
page_idx = combined_idx;
order++;
@@ -334,11 +346,11 @@ static inline void __free_pages_bulk (struct page *page,
zone->free_area[order].nr_free++;
}
-static inline int free_pages_check(const char *function, struct page *page)
+static inline int free_pages_check(struct page *page)
{
- if ( page_mapcount(page) ||
- page->mapping != NULL ||
- page_count(page) != 0 ||
+ if (unlikely(page_mapcount(page) |
+ (page->mapping != NULL) |
+ (page_count(page) != 0) |
(page->flags & (
1 << PG_lru |
1 << PG_private |
@@ -348,8 +360,8 @@ static inline int free_pages_check(const char *function, struct page *page)
1 << PG_slab |
1 << PG_swapcache |
1 << PG_writeback |
- 1 << PG_reserved )))
- bad_page(function, page);
+ 1 << PG_reserved ))))
+ bad_page(page);
if (PageDirty(page))
__ClearPageDirty(page);
/*
@@ -371,51 +383,90 @@ static inline int free_pages_check(const char *function, struct page *page)
* And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic.
*/
-static int
-free_pages_bulk(struct zone *zone, int count,
- struct list_head *list, unsigned int order)
+static void free_pages_bulk(struct zone *zone, int count,
+ struct list_head *list, int order)
{
- unsigned long flags;
- struct page *page = NULL;
- int ret = 0;
-
- spin_lock_irqsave(&zone->lock, flags);
+ spin_lock(&zone->lock);
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
- while (!list_empty(list) && count--) {
+ while (count--) {
+ struct page *page;
+
+ BUG_ON(list_empty(list));
page = list_entry(list->prev, struct page, lru);
- /* have to delete it as __free_pages_bulk list manipulates */
+ /* have to delete it as __free_one_page list manipulates */
list_del(&page->lru);
- __free_pages_bulk(page, zone, order);
- ret++;
+ __free_one_page(page, zone, order);
}
- spin_unlock_irqrestore(&zone->lock, flags);
- return ret;
+ spin_unlock(&zone->lock);
}
-void __free_pages_ok(struct page *page, unsigned int order)
+static void free_one_page(struct zone *zone, struct page *page, int order)
{
LIST_HEAD(list);
+ list_add(&page->lru, &list);
+ free_pages_bulk(zone, 1, &list, order);
+}
+
+static void __free_pages_ok(struct page *page, unsigned int order)
+{
+ unsigned long flags;
int i;
int reserved = 0;
arch_free_page(page, order);
+ if (!PageHighMem(page))
+ mutex_debug_check_no_locks_freed(page_address(page),
+ PAGE_SIZE<<order);
#ifndef CONFIG_MMU
- if (order > 0)
- for (i = 1 ; i < (1 << order) ; ++i)
- __put_page(page + i);
+ for (i = 1 ; i < (1 << order) ; ++i)
+ __put_page(page + i);
#endif
for (i = 0 ; i < (1 << order) ; ++i)
- reserved += free_pages_check(__FUNCTION__, page + i);
+ reserved += free_pages_check(page + i);
if (reserved)
return;
- list_add(&page->lru, &list);
- mod_page_state(pgfree, 1 << order);
- kernel_map_pages(page, 1<<order, 0);
- free_pages_bulk(page_zone(page), 1, &list, order);
+ kernel_map_pages(page, 1 << order, 0);
+ local_irq_save(flags);
+ __mod_page_state(pgfree, 1 << order);
+ free_one_page(page_zone(page), page, order);
+ local_irq_restore(flags);
+}
+
+/*
+ * permit the bootmem allocator to evade page validation on high-order frees
+ */
+void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
+{
+ if (order == 0) {
+ __ClearPageReserved(page);
+ set_page_count(page, 0);
+
+ free_hot_cold_page(page, 0);
+ } else {
+ LIST_HEAD(list);
+ int loop;
+
+ for (loop = 0; loop < BITS_PER_LONG; loop++) {
+ struct page *p = &page[loop];
+
+ if (loop + 16 < BITS_PER_LONG)
+ prefetchw(p + 16);
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
+ }
+
+ arch_free_page(page, order);
+
+ mod_page_state(pgfree, 1 << order);
+
+ list_add(&page->lru, &list);
+ kernel_map_pages(page, 1 << order, 0);
+ free_pages_bulk(page_zone(page), 1, &list, order);
+ }
}
@@ -433,8 +484,7 @@ void __free_pages_ok(struct page *page, unsigned int order)
*
* -- wli
*/
-static inline struct page *
-expand(struct zone *zone, struct page *page,
+static inline void expand(struct zone *zone, struct page *page,
int low, int high, struct free_area *area)
{
unsigned long size = 1 << high;
@@ -448,24 +498,6 @@ expand(struct zone *zone, struct page *page,
area->nr_free++;
set_page_order(&page[size], high);
}
- return page;
-}
-
-void set_page_refs(struct page *page, int order)
-{
-#ifdef CONFIG_MMU
- set_page_count(page, 1);
-#else
- int i;
-
- /*
- * We need to reference all the pages for this order, otherwise if
- * anyone accesses one of the pages with (get/put) it will be freed.
- * - eg: access_process_vm()
- */
- for (i = 0; i < (1 << order); i++)
- set_page_count(page + i, 1);
-#endif /* CONFIG_MMU */
}
/*
@@ -473,9 +505,9 @@ void set_page_refs(struct page *page, int order)
*/
static int prep_new_page(struct page *page, int order)
{
- if ( page_mapcount(page) ||
- page->mapping != NULL ||
- page_count(page) != 0 ||
+ if (unlikely(page_mapcount(page) |
+ (page->mapping != NULL) |
+ (page_count(page) != 0) |
(page->flags & (
1 << PG_lru |
1 << PG_private |
@@ -486,8 +518,8 @@ static int prep_new_page(struct page *page, int order)
1 << PG_slab |
1 << PG_swapcache |
1 << PG_writeback |
- 1 << PG_reserved )))
- bad_page(__FUNCTION__, page);
+ 1 << PG_reserved ))))
+ bad_page(page);
/*
* For now, we report if PG_reserved was found set, but do not
@@ -525,7 +557,8 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
rmv_page_order(page);
area->nr_free--;
zone->free_pages -= 1UL << order;
- return expand(zone, page, order, current_order, area);
+ expand(zone, page, order, current_order, area);
+ return page;
}
return NULL;
@@ -539,21 +572,17 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list)
{
- unsigned long flags;
int i;
- int allocated = 0;
- struct page *page;
- spin_lock_irqsave(&zone->lock, flags);
+ spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
- page = __rmqueue(zone, order);
- if (page == NULL)
+ struct page *page = __rmqueue(zone, order);
+ if (unlikely(page == NULL))
break;
- allocated++;
list_add_tail(&page->lru, list);
}
- spin_unlock_irqrestore(&zone->lock, flags);
- return allocated;
+ spin_unlock(&zone->lock);
+ return i;
}
#ifdef CONFIG_NUMA
@@ -572,14 +601,13 @@ void drain_remote_pages(void)
if (zone->zone_pgdat->node_id == numa_node_id())
continue;
- pset = zone->pageset[smp_processor_id()];
+ pset = zone_pcp(zone, smp_processor_id());
for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
struct per_cpu_pages *pcp;
pcp = &pset->pcp[i];
- if (pcp->count)
- pcp->count -= free_pages_bulk(zone, pcp->count,
- &pcp->list, 0);
+ free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+ pcp->count = 0;
}
}
local_irq_restore(flags);
@@ -589,6 +617,7 @@ void drain_remote_pages(void)
#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
static void __drain_pages(unsigned int cpu)
{
+ unsigned long flags;
struct zone *zone;
int i;
@@ -600,8 +629,10 @@ static void __drain_pages(unsigned int cpu)
struct per_cpu_pages *pcp;
pcp = &pset->pcp[i];
- pcp->count -= free_pages_bulk(zone, pcp->count,
- &pcp->list, 0);
+ local_irq_save(flags);
+ free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+ pcp->count = 0;
+ local_irq_restore(flags);
}
}
}
@@ -647,18 +678,14 @@ void drain_local_pages(void)
}
#endif /* CONFIG_PM */
-static void zone_statistics(struct zonelist *zonelist, struct zone *z)
+static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
{
#ifdef CONFIG_NUMA
- unsigned long flags;
- int cpu;
pg_data_t *pg = z->zone_pgdat;
pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
struct per_cpu_pageset *p;
- local_irq_save(flags);
- cpu = smp_processor_id();
- p = zone_pcp(z,cpu);
+ p = zone_pcp(z, cpu);
if (pg == orig) {
p->numa_hit++;
} else {
@@ -669,14 +696,12 @@ static void zone_statistics(struct zonelist *zonelist, struct zone *z)
p->local_node++;
else
p->other_node++;
- local_irq_restore(flags);
#endif
}
/*
* Free a 0-order page
*/
-static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
static void fastcall free_hot_cold_page(struct page *page, int cold)
{
struct zone *zone = page_zone(page);
@@ -687,18 +712,20 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
if (PageAnon(page))
page->mapping = NULL;
- if (free_pages_check(__FUNCTION__, page))
+ if (free_pages_check(page))
return;
- inc_page_state(pgfree);
kernel_map_pages(page, 1, 0);
pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
local_irq_save(flags);
+ __inc_page_state(pgfree);
list_add(&page->lru, &pcp->list);
pcp->count++;
- if (pcp->count >= pcp->high)
- pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+ if (pcp->count >= pcp->high) {
+ free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+ pcp->count -= pcp->batch;
+ }
local_irq_restore(flags);
put_cpu();
}
@@ -727,49 +754,58 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
* we cheat by calling it from here, in the order > 0 path. Saves a branch
* or two.
*/
-static struct page *
-buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
+static struct page *buffered_rmqueue(struct zonelist *zonelist,
+ struct zone *zone, int order, gfp_t gfp_flags)
{
unsigned long flags;
struct page *page;
int cold = !!(gfp_flags & __GFP_COLD);
+ int cpu;
again:
- if (order == 0) {
+ cpu = get_cpu();
+ if (likely(order == 0)) {
struct per_cpu_pages *pcp;
- page = NULL;
- pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
+ pcp = &zone_pcp(zone, cpu)->pcp[cold];
local_irq_save(flags);
- if (pcp->count <= pcp->low)
+ if (!pcp->count) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list);
- if (pcp->count) {
- page = list_entry(pcp->list.next, struct page, lru);
- list_del(&page->lru);
- pcp->count--;
+ if (unlikely(!pcp->count))
+ goto failed;
}
- local_irq_restore(flags);
- put_cpu();
+ page = list_entry(pcp->list.next, struct page, lru);
+ list_del(&page->lru);
+ pcp->count--;
} else {
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order);
- spin_unlock_irqrestore(&zone->lock, flags);
+ spin_unlock(&zone->lock);
+ if (!page)
+ goto failed;
}
- if (page != NULL) {
- BUG_ON(bad_range(zone, page));
- mod_page_state_zone(zone, pgalloc, 1 << order);
- if (prep_new_page(page, order))
- goto again;
+ __mod_page_state_zone(zone, pgalloc, 1 << order);
+ zone_statistics(zonelist, zone, cpu);
+ local_irq_restore(flags);
+ put_cpu();
- if (gfp_flags & __GFP_ZERO)
- prep_zero_page(page, order, gfp_flags);
+ BUG_ON(bad_range(zone, page));
+ if (prep_new_page(page, order))
+ goto again;
- if (order && (gfp_flags & __GFP_COMP))
- prep_compound_page(page, order);
- }
+ if (gfp_flags & __GFP_ZERO)
+ prep_zero_page(page, order, gfp_flags);
+
+ if (order && (gfp_flags & __GFP_COMP))
+ prep_compound_page(page, order);
return page;
+
+failed:
+ local_irq_restore(flags);
+ put_cpu();
+ return NULL;
}
#define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */
@@ -845,9 +881,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
continue;
}
- page = buffered_rmqueue(*z, order, gfp_mask);
+ page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
if (page) {
- zone_statistics(zonelist, *z);
break;
}
} while (*(++z) != NULL);
@@ -896,15 +931,15 @@ restart:
*
* The caller may dip into page reserves a bit more if the caller
* cannot run direct reclaim, or if the caller has realtime scheduling
- * policy.
+ * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
+ * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
*/
alloc_flags = ALLOC_WMARK_MIN;
if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
alloc_flags |= ALLOC_HARDER;
if (gfp_mask & __GFP_HIGH)
alloc_flags |= ALLOC_HIGH;
- if (wait)
- alloc_flags |= ALLOC_CPUSET;
+ alloc_flags |= ALLOC_CPUSET;
/*
* Go through the zonelist again. Let __GFP_HIGH and allocations
@@ -926,7 +961,7 @@ restart:
nofail_alloc:
/* go through the zonelist yet again, ignoring mins */
page = get_page_from_freelist(gfp_mask, order,
- zonelist, ALLOC_NO_WATERMARKS|ALLOC_CPUSET);
+ zonelist, ALLOC_NO_WATERMARKS);
if (page)
goto got_pg;
if (gfp_mask & __GFP_NOFAIL) {
@@ -945,6 +980,7 @@ rebalance:
cond_resched();
/* We now go into synchronous reclaim */
+ cpuset_memory_pressure_bump();
p->flags |= PF_MEMALLOC;
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
@@ -1171,7 +1207,7 @@ EXPORT_SYMBOL(nr_pagecache);
DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
#endif
-void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
+static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
{
int cpu = 0;
@@ -1224,7 +1260,7 @@ void get_full_page_state(struct page_state *ret)
__get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
}
-unsigned long __read_page_state(unsigned long offset)
+unsigned long read_page_state_offset(unsigned long offset)
{
unsigned long ret = 0;
int cpu;
@@ -1238,18 +1274,26 @@ unsigned long __read_page_state(unsigned long offset)
return ret;
}
-void __mod_page_state(unsigned long offset, unsigned long delta)
+void __mod_page_state_offset(unsigned long offset, unsigned long delta)
+{
+ void *ptr;
+
+ ptr = &__get_cpu_var(page_states);
+ *(unsigned long *)(ptr + offset) += delta;
+}
+EXPORT_SYMBOL(__mod_page_state_offset);
+
+void mod_page_state_offset(unsigned long offset, unsigned long delta)
{
unsigned long flags;
- void* ptr;
+ void *ptr;
local_irq_save(flags);
ptr = &__get_cpu_var(page_states);
- *(unsigned long*)(ptr + offset) += delta;
+ *(unsigned long *)(ptr + offset) += delta;
local_irq_restore(flags);
}
-
-EXPORT_SYMBOL(__mod_page_state);
+EXPORT_SYMBOL(mod_page_state_offset);
void __get_zone_counts(unsigned long *active, unsigned long *inactive,
unsigned long *free, struct pglist_data *pgdat)
@@ -1335,7 +1379,7 @@ void show_free_areas(void)
show_node(zone);
printk("%s per-cpu:", zone->name);
- if (!zone->present_pages) {
+ if (!populated_zone(zone)) {
printk(" empty\n");
continue;
} else
@@ -1347,10 +1391,9 @@ void show_free_areas(void)
pageset = zone_pcp(zone, cpu);
for (temperature = 0; temperature < 2; temperature++)
- printk("cpu %d %s: low %d, high %d, batch %d used:%d\n",
+ printk("cpu %d %s: high %d, batch %d used:%d\n",
cpu,
temperature ? "cold" : "hot",
- pageset->pcp[temperature].low,
pageset->pcp[temperature].high,
pageset->pcp[temperature].batch,
pageset->pcp[temperature].count);
@@ -1413,7 +1456,7 @@ void show_free_areas(void)
show_node(zone);
printk("%s: ", zone->name);
- if (!zone->present_pages) {
+ if (!populated_zone(zone)) {
printk("empty\n");
continue;
}
@@ -1433,36 +1476,29 @@ void show_free_areas(void)
/*
* Builds allocation fallback zone lists.
+ *
+ * Add all populated zones of a node to the zonelist.
*/
-static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k)
-{
- switch (k) {
- struct zone *zone;
- default:
- BUG();
- case ZONE_HIGHMEM:
- zone = pgdat->node_zones + ZONE_HIGHMEM;
- if (zone->present_pages) {
+static int __init build_zonelists_node(pg_data_t *pgdat,
+ struct zonelist *zonelist, int nr_zones, int zone_type)
+{
+ struct zone *zone;
+
+ BUG_ON(zone_type > ZONE_HIGHMEM);
+
+ do {
+ zone = pgdat->node_zones + zone_type;
+ if (populated_zone(zone)) {
#ifndef CONFIG_HIGHMEM
- BUG();
+ BUG_ON(zone_type > ZONE_NORMAL);
#endif
- zonelist->zones[j++] = zone;
+ zonelist->zones[nr_zones++] = zone;
+ check_highest_zone(zone_type);
}
- case ZONE_NORMAL:
- zone = pgdat->node_zones + ZONE_NORMAL;
- if (zone->present_pages)
- zonelist->zones[j++] = zone;
- case ZONE_DMA32:
- zone = pgdat->node_zones + ZONE_DMA32;
- if (zone->present_pages)
- zonelist->zones[j++] = zone;
- case ZONE_DMA:
- zone = pgdat->node_zones + ZONE_DMA;
- if (zone->present_pages)
- zonelist->zones[j++] = zone;
- }
+ zone_type--;
- return j;
+ } while (zone_type >= 0);
+ return nr_zones;
}
static inline int highest_zone(int zone_bits)
@@ -1706,11 +1742,9 @@ void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long end_pfn = start_pfn + size;
unsigned long pfn;
- for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) {
+ for (pfn = start_pfn; pfn < end_pfn; pfn++) {
if (!early_pfn_valid(pfn))
continue;
- if (!early_pfn_in_nid(pfn, nid))
- continue;
page = pfn_to_page(pfn);
set_page_links(page, zone, nid, pfn);
set_page_count(page, 1);
@@ -1794,19 +1828,35 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
pcp = &p->pcp[0]; /* hot */
pcp->count = 0;
- pcp->low = 0;
pcp->high = 6 * batch;
pcp->batch = max(1UL, 1 * batch);
INIT_LIST_HEAD(&pcp->list);
pcp = &p->pcp[1]; /* cold*/
pcp->count = 0;
- pcp->low = 0;
pcp->high = 2 * batch;
pcp->batch = max(1UL, batch/2);
INIT_LIST_HEAD(&pcp->list);
}
+/*
+ * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
+ * to the value high for the pageset p.
+ */
+
+static void setup_pagelist_highmark(struct per_cpu_pageset *p,
+ unsigned long high)
+{
+ struct per_cpu_pages *pcp;
+
+ pcp = &p->pcp[0]; /* hot list */
+ pcp->high = high;
+ pcp->batch = max(1UL, high/4);
+ if ((high/4) > (PAGE_SHIFT * 8))
+ pcp->batch = PAGE_SHIFT * 8;
+}
+
+
#ifdef CONFIG_NUMA
/*
* Boot pageset table. One per cpu which is going to be used for all
@@ -1838,12 +1888,16 @@ static int __devinit process_zones(int cpu)
for_each_zone(zone) {
- zone->pageset[cpu] = kmalloc_node(sizeof(struct per_cpu_pageset),
+ zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
GFP_KERNEL, cpu_to_node(cpu));
- if (!zone->pageset[cpu])
+ if (!zone_pcp(zone, cpu))
goto bad;
- setup_pageset(zone->pageset[cpu], zone_batchsize(zone));
+ setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
+
+ if (percpu_pagelist_fraction)
+ setup_pagelist_highmark(zone_pcp(zone, cpu),
+ (zone->present_pages / percpu_pagelist_fraction));
}
return 0;
@@ -1851,15 +1905,14 @@ bad:
for_each_zone(dzone) {
if (dzone == zone)
break;
- kfree(dzone->pageset[cpu]);
- dzone->pageset[cpu] = NULL;
+ kfree(zone_pcp(dzone, cpu));
+ zone_pcp(dzone, cpu) = NULL;
}
return -ENOMEM;
}
static inline void free_zone_pagesets(int cpu)
{
-#ifdef CONFIG_NUMA
struct zone *zone;
for_each_zone(zone) {
@@ -1868,7 +1921,6 @@ static inline void free_zone_pagesets(int cpu)
zone_pcp(zone, cpu) = NULL;
kfree(pset);
}
-#endif
}
static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
@@ -1939,7 +1991,7 @@ static __devinit void zone_pcp_init(struct zone *zone)
for (cpu = 0; cpu < NR_CPUS; cpu++) {
#ifdef CONFIG_NUMA
/* Early boot. Slab allocator not functional yet */
- zone->pageset[cpu] = &boot_pageset[cpu];
+ zone_pcp(zone, cpu) = &boot_pageset[cpu];
setup_pageset(&boot_pageset[cpu],0);
#else
setup_pageset(zone_pcp(zone,cpu), batch);
@@ -2116,7 +2168,7 @@ static int frag_show(struct seq_file *m, void *arg)
int order;
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
- if (!zone->present_pages)
+ if (!populated_zone(zone))
continue;
spin_lock_irqsave(&zone->lock, flags);
@@ -2149,7 +2201,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
int i;
- if (!zone->present_pages)
+ if (!populated_zone(zone))
continue;
spin_lock_irqsave(&zone->lock, flags);
@@ -2182,7 +2234,7 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
seq_printf(m,
")"
"\n pagesets");
- for (i = 0; i < ARRAY_SIZE(zone->pageset); i++) {
+ for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
int j;
@@ -2197,12 +2249,10 @@ static int zoneinfo_show(struct seq_file *m, void *arg)
seq_printf(m,
"\n cpu: %i pcp: %i"
"\n count: %i"
- "\n low: %i"
"\n high: %i"
"\n batch: %i",
i, j,
pageset->pcp[j].count,
- pageset->pcp[j].low,
pageset->pcp[j].high,
pageset->pcp[j].batch);
}
@@ -2257,32 +2307,40 @@ static char *vmstat_text[] = {
"pgpgout",
"pswpin",
"pswpout",
- "pgalloc_high",
+ "pgalloc_high",
"pgalloc_normal",
+ "pgalloc_dma32",
"pgalloc_dma",
+
"pgfree",
"pgactivate",
"pgdeactivate",
"pgfault",
"pgmajfault",
+
"pgrefill_high",
"pgrefill_normal",
+ "pgrefill_dma32",
"pgrefill_dma",
"pgsteal_high",
"pgsteal_normal",
+ "pgsteal_dma32",
"pgsteal_dma",
+
"pgscan_kswapd_high",
"pgscan_kswapd_normal",
-
+ "pgscan_kswapd_dma32",
"pgscan_kswapd_dma",
+
"pgscan_direct_high",
"pgscan_direct_normal",
+ "pgscan_direct_dma32",
"pgscan_direct_dma",
- "pginodesteal",
+ "pginodesteal",
"slabs_scanned",
"kswapd_steal",
"kswapd_inodesteal",
@@ -2539,6 +2597,32 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
return 0;
}
+/*
+ * percpu_pagelist_fraction - changes the pcp->high for each zone on each
+ * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist
+ * can have before it gets flushed back to buddy allocator.
+ */
+
+int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
+ struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+{
+ struct zone *zone;
+ unsigned int cpu;
+ int ret;
+
+ ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+ if (!write || (ret == -EINVAL))
+ return ret;
+ for_each_zone(zone) {
+ for_each_online_cpu(cpu) {
+ unsigned long high;
+ high = zone->present_pages / percpu_pagelist_fraction;
+ setup_pagelist_highmark(zone_pcp(zone, cpu), high);
+ }
+ }
+ return 0;
+}
+
__initdata int hashdist = HASHDIST_DEFAULT;
#ifdef CONFIG_NUMA
diff --git a/mm/pdflush.c b/mm/pdflush.c
index 52822c98c48..c4b6d0afd73 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -90,7 +90,7 @@ struct pdflush_work {
static int __pdflush(struct pdflush_work *my_work)
{
- current->flags |= PF_FLUSHER;
+ current->flags |= PF_FLUSHER | PF_SWAPWRITE;
my_work->fn = NULL;
my_work->who = current;
INIT_LIST_HEAD(&my_work->list);
diff --git a/mm/readahead.c b/mm/readahead.c
index 72e7adbb87c..8d6eeaaa629 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -158,7 +158,7 @@ static int read_pages(struct address_space *mapping, struct file *filp,
{
unsigned page_idx;
struct pagevec lru_pvec;
- int ret = 0;
+ int ret;
if (mapping->a_ops->readpages) {
ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
@@ -171,14 +171,17 @@ static int read_pages(struct address_space *mapping, struct file *filp,
list_del(&page->lru);
if (!add_to_page_cache(page, mapping,
page->index, GFP_KERNEL)) {
- mapping->a_ops->readpage(filp, page);
- if (!pagevec_add(&lru_pvec, page))
- __pagevec_lru_add(&lru_pvec);
- } else {
- page_cache_release(page);
+ ret = mapping->a_ops->readpage(filp, page);
+ if (ret != AOP_TRUNCATED_PAGE) {
+ if (!pagevec_add(&lru_pvec, page))
+ __pagevec_lru_add(&lru_pvec);
+ continue;
+ } /* else fall through to release */
}
+ page_cache_release(page);
}
pagevec_lru_add(&lru_pvec);
+ ret = 0;
out:
return ret;
}
diff --git a/mm/rmap.c b/mm/rmap.c
index f853c6def15..dfbb89f99a1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -20,13 +20,13 @@
/*
* Lock ordering in mm:
*
- * inode->i_sem (while writing or truncating, not reading or faulting)
+ * inode->i_mutex (while writing or truncating, not reading or faulting)
* inode->i_alloc_sem
*
* When a page fault occurs in writing from user to file, down_read
- * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within
- * down_read of mmap_sem; i_sem and down_write of mmap_sem are never
- * taken together; in truncation, i_sem is taken outermost.
+ * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within
+ * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never
+ * taken together; in truncation, i_mutex is taken outermost.
*
* mm->mmap_sem
* page->flags PG_locked (lock_page)
@@ -435,6 +435,30 @@ int page_referenced(struct page *page, int is_locked)
}
/**
+ * page_set_anon_rmap - setup new anonymous rmap
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ */
+static void __page_set_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ struct anon_vma *anon_vma = vma->anon_vma;
+
+ BUG_ON(!anon_vma);
+ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+ page->mapping = (struct address_space *) anon_vma;
+
+ page->index = linear_page_index(vma, address);
+
+ /*
+ * nr_mapped state can be updated without turning off
+ * interrupts because it is not modified via interrupt.
+ */
+ __inc_page_state(nr_mapped);
+}
+
+/**
* page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
@@ -445,20 +469,27 @@ int page_referenced(struct page *page, int is_locked)
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- if (atomic_inc_and_test(&page->_mapcount)) {
- struct anon_vma *anon_vma = vma->anon_vma;
-
- BUG_ON(!anon_vma);
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- page->mapping = (struct address_space *) anon_vma;
-
- page->index = linear_page_index(vma, address);
-
- inc_page_state(nr_mapped);
- }
+ if (atomic_inc_and_test(&page->_mapcount))
+ __page_set_anon_rmap(page, vma, address);
/* else checking page index and mapping is racy */
}
+/*
+ * page_add_new_anon_rmap - add pte mapping to a new anonymous page
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ *
+ * Same as page_add_anon_rmap but must only be called on *new* pages.
+ * This means the inc-and-test can be bypassed.
+ */
+void page_add_new_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
+ __page_set_anon_rmap(page, vma, address);
+}
+
/**
* page_add_file_rmap - add pte mapping to a file page
* @page: the page to add the mapping to
@@ -471,7 +502,7 @@ void page_add_file_rmap(struct page *page)
BUG_ON(!pfn_valid(page_to_pfn(page)));
if (atomic_inc_and_test(&page->_mapcount))
- inc_page_state(nr_mapped);
+ __inc_page_state(nr_mapped);
}
/**
@@ -483,6 +514,13 @@ void page_add_file_rmap(struct page *page)
void page_remove_rmap(struct page *page)
{
if (atomic_add_negative(-1, &page->_mapcount)) {
+ if (page_mapcount(page) < 0) {
+ printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
+ printk (KERN_EMERG " page->flags = %lx\n", page->flags);
+ printk (KERN_EMERG " page->count = %x\n", page_count(page));
+ printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
+ }
+
BUG_ON(page_mapcount(page) < 0);
/*
* It would be tidy to reset the PageAnon mapping here,
@@ -495,7 +533,7 @@ void page_remove_rmap(struct page *page)
*/
if (page_test_and_clear_dirty(page))
set_page_dirty(page);
- dec_page_state(nr_mapped);
+ __dec_page_state(nr_mapped);
}
}
diff --git a/mm/shmem.c b/mm/shmem.c
index dc25565a61e..343b3c0937e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -457,7 +457,7 @@ static void shmem_free_pages(struct list_head *next)
} while (next);
}
-static void shmem_truncate(struct inode *inode)
+static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
{
struct shmem_inode_info *info = SHMEM_I(inode);
unsigned long idx;
@@ -475,18 +475,27 @@ static void shmem_truncate(struct inode *inode)
long nr_swaps_freed = 0;
int offset;
int freed;
+ int punch_hole = 0;
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
- idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (idx >= info->next_index)
return;
spin_lock(&info->lock);
info->flags |= SHMEM_TRUNCATE;
- limit = info->next_index;
- info->next_index = idx;
+ if (likely(end == (loff_t) -1)) {
+ limit = info->next_index;
+ info->next_index = idx;
+ } else {
+ limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ if (limit > info->next_index)
+ limit = info->next_index;
+ punch_hole = 1;
+ }
+
topdir = info->i_indirect;
- if (topdir && idx <= SHMEM_NR_DIRECT) {
+ if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
info->i_indirect = NULL;
nr_pages_to_free++;
list_add(&topdir->lru, &pages_to_free);
@@ -573,11 +582,12 @@ static void shmem_truncate(struct inode *inode)
set_page_private(subdir, page_private(subdir) - freed);
if (offset)
spin_unlock(&info->lock);
- BUG_ON(page_private(subdir) > offset);
+ if (!punch_hole)
+ BUG_ON(page_private(subdir) > offset);
}
if (offset)
offset = 0;
- else if (subdir) {
+ else if (subdir && !page_private(subdir)) {
dir[diroff] = NULL;
nr_pages_to_free++;
list_add(&subdir->lru, &pages_to_free);
@@ -594,7 +604,7 @@ done2:
* Also, though shmem_getpage checks i_size before adding to
* cache, no recheck after: so fix the narrow window there too.
*/
- truncate_inode_pages(inode->i_mapping, inode->i_size);
+ truncate_inode_pages_range(inode->i_mapping, start, end);
}
spin_lock(&info->lock);
@@ -614,6 +624,11 @@ done2:
}
}
+static void shmem_truncate(struct inode *inode)
+{
+ shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
+}
+
static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
@@ -855,7 +870,7 @@ unlock:
swap_free(swap);
redirty:
set_page_dirty(page);
- return WRITEPAGE_ACTIVATE; /* Return with the page locked */
+ return AOP_WRITEPAGE_ACTIVATE; /* Return with the page locked */
}
#ifdef CONFIG_NUMA
@@ -1255,7 +1270,7 @@ out_nomem:
return retval;
}
-static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
+int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
vma->vm_ops = &shmem_vm_ops;
@@ -1355,7 +1370,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
pos = *ppos;
written = 0;
@@ -1440,7 +1455,7 @@ shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t
if (written)
err = written;
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return err;
}
@@ -1476,7 +1491,7 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
/*
* We must evaluate after, since reads (unlike writes)
- * are called without i_sem protection against truncate
+ * are called without i_mutex protection against truncate
*/
nr = PAGE_CACHE_SIZE;
i_size = i_size_read(inode);
@@ -2083,6 +2098,7 @@ static struct file_operations shmem_file_operations = {
static struct inode_operations shmem_inode_operations = {
.truncate = shmem_truncate,
.setattr = shmem_notify_change,
+ .truncate_range = shmem_truncate_range,
};
static struct inode_operations shmem_dir_inode_operations = {
diff --git a/mm/slab.c b/mm/slab.c
index e5ec26e0c46..9374293a301 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -130,7 +130,6 @@
#define FORCED_DEBUG 0
#endif
-
/* Shouldn't this be in a header file somewhere? */
#define BYTES_PER_WORD sizeof(void *)
@@ -217,12 +216,12 @@ static unsigned long offslab_limit;
* Slabs are chained into three list: fully used, partial, fully free slabs.
*/
struct slab {
- struct list_head list;
- unsigned long colouroff;
- void *s_mem; /* including colour offset */
- unsigned int inuse; /* num of objs active in slab */
- kmem_bufctl_t free;
- unsigned short nodeid;
+ struct list_head list;
+ unsigned long colouroff;
+ void *s_mem; /* including colour offset */
+ unsigned int inuse; /* num of objs active in slab */
+ kmem_bufctl_t free;
+ unsigned short nodeid;
};
/*
@@ -242,9 +241,9 @@ struct slab {
* We assume struct slab_rcu can overlay struct slab when destroying.
*/
struct slab_rcu {
- struct rcu_head head;
- kmem_cache_t *cachep;
- void *addr;
+ struct rcu_head head;
+ kmem_cache_t *cachep;
+ void *addr;
};
/*
@@ -279,23 +278,23 @@ struct array_cache {
#define BOOT_CPUCACHE_ENTRIES 1
struct arraycache_init {
struct array_cache cache;
- void * entries[BOOT_CPUCACHE_ENTRIES];
+ void *entries[BOOT_CPUCACHE_ENTRIES];
};
/*
* The slab lists for all objects.
*/
struct kmem_list3 {
- struct list_head slabs_partial; /* partial list first, better asm code */
- struct list_head slabs_full;
- struct list_head slabs_free;
- unsigned long free_objects;
- unsigned long next_reap;
- int free_touched;
- unsigned int free_limit;
- spinlock_t list_lock;
- struct array_cache *shared; /* shared per node */
- struct array_cache **alien; /* on other nodes */
+ struct list_head slabs_partial; /* partial list first, better asm code */
+ struct list_head slabs_full;
+ struct list_head slabs_free;
+ unsigned long free_objects;
+ unsigned long next_reap;
+ int free_touched;
+ unsigned int free_limit;
+ spinlock_t list_lock;
+ struct array_cache *shared; /* shared per node */
+ struct array_cache **alien; /* on other nodes */
};
/*
@@ -367,63 +366,63 @@ static inline void kmem_list3_init(struct kmem_list3 *parent)
*
* manages a cache.
*/
-
+
struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */
- struct array_cache *array[NR_CPUS];
- unsigned int batchcount;
- unsigned int limit;
- unsigned int shared;
- unsigned int objsize;
+ struct array_cache *array[NR_CPUS];
+ unsigned int batchcount;
+ unsigned int limit;
+ unsigned int shared;
+ unsigned int objsize;
/* 2) touched by every alloc & free from the backend */
- struct kmem_list3 *nodelists[MAX_NUMNODES];
- unsigned int flags; /* constant flags */
- unsigned int num; /* # of objs per slab */
- spinlock_t spinlock;
+ struct kmem_list3 *nodelists[MAX_NUMNODES];
+ unsigned int flags; /* constant flags */
+ unsigned int num; /* # of objs per slab */
+ spinlock_t spinlock;
/* 3) cache_grow/shrink */
/* order of pgs per slab (2^n) */
- unsigned int gfporder;
+ unsigned int gfporder;
/* force GFP flags, e.g. GFP_DMA */
- gfp_t gfpflags;
+ gfp_t gfpflags;
- size_t colour; /* cache colouring range */
- unsigned int colour_off; /* colour offset */
- unsigned int colour_next; /* cache colouring */
- kmem_cache_t *slabp_cache;
- unsigned int slab_size;
- unsigned int dflags; /* dynamic flags */
+ size_t colour; /* cache colouring range */
+ unsigned int colour_off; /* colour offset */
+ unsigned int colour_next; /* cache colouring */
+ kmem_cache_t *slabp_cache;
+ unsigned int slab_size;
+ unsigned int dflags; /* dynamic flags */
/* constructor func */
- void (*ctor)(void *, kmem_cache_t *, unsigned long);
+ void (*ctor) (void *, kmem_cache_t *, unsigned long);
/* de-constructor func */
- void (*dtor)(void *, kmem_cache_t *, unsigned long);
+ void (*dtor) (void *, kmem_cache_t *, unsigned long);
/* 4) cache creation/removal */
- const char *name;
- struct list_head next;
+ const char *name;
+ struct list_head next;
/* 5) statistics */
#if STATS
- unsigned long num_active;
- unsigned long num_allocations;
- unsigned long high_mark;
- unsigned long grown;
- unsigned long reaped;
- unsigned long errors;
- unsigned long max_freeable;
- unsigned long node_allocs;
- unsigned long node_frees;
- atomic_t allochit;
- atomic_t allocmiss;
- atomic_t freehit;
- atomic_t freemiss;
+ unsigned long num_active;
+ unsigned long num_allocations;
+ unsigned long high_mark;
+ unsigned long grown;
+ unsigned long reaped;
+ unsigned long errors;
+ unsigned long max_freeable;
+ unsigned long node_allocs;
+ unsigned long node_frees;
+ atomic_t allochit;
+ atomic_t allocmiss;
+ atomic_t freehit;
+ atomic_t freemiss;
#endif
#if DEBUG
- int dbghead;
- int reallen;
+ int dbghead;
+ int reallen;
#endif
};
@@ -523,14 +522,15 @@ static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER)
- return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD);
- return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD);
+ return (unsigned long *)(objp + cachep->objsize -
+ 2 * BYTES_PER_WORD);
+ return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD);
}
static void **dbg_userword(kmem_cache_t *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
- return (void**)(objp+cachep->objsize-BYTES_PER_WORD);
+ return (void **)(objp + cachep->objsize - BYTES_PER_WORD);
}
#else
@@ -607,31 +607,31 @@ struct cache_names {
static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h>
- { NULL, }
+ {NULL,}
#undef CACHE
};
static struct arraycache_init initarray_cache __initdata =
- { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
+ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
static struct arraycache_init initarray_generic =
- { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
+ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */
static kmem_cache_t cache_cache = {
- .batchcount = 1,
- .limit = BOOT_CPUCACHE_ENTRIES,
- .shared = 1,
- .objsize = sizeof(kmem_cache_t),
- .flags = SLAB_NO_REAP,
- .spinlock = SPIN_LOCK_UNLOCKED,
- .name = "kmem_cache",
+ .batchcount = 1,
+ .limit = BOOT_CPUCACHE_ENTRIES,
+ .shared = 1,
+ .objsize = sizeof(kmem_cache_t),
+ .flags = SLAB_NO_REAP,
+ .spinlock = SPIN_LOCK_UNLOCKED,
+ .name = "kmem_cache",
#if DEBUG
- .reallen = sizeof(kmem_cache_t),
+ .reallen = sizeof(kmem_cache_t),
#endif
};
/* Guard access to the cache-chain. */
-static struct semaphore cache_chain_sem;
+static struct semaphore cache_chain_sem;
static struct list_head cache_chain;
/*
@@ -655,9 +655,9 @@ static enum {
static DEFINE_PER_CPU(struct work_struct, reap_work);
-static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node);
-static void enable_cpucache (kmem_cache_t *cachep);
-static void cache_reap (void *unused);
+static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node);
+static void enable_cpucache(kmem_cache_t *cachep);
+static void cache_reap(void *unused);
static int __node_shrink(kmem_cache_t *cachep, int node);
static inline struct array_cache *ac_data(kmem_cache_t *cachep)
@@ -671,9 +671,9 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
#if DEBUG
/* This happens if someone tries to call
- * kmem_cache_create(), or __kmalloc(), before
- * the generic caches are initialized.
- */
+ * kmem_cache_create(), or __kmalloc(), before
+ * the generic caches are initialized.
+ */
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif
while (size > csizep->cs_size)
@@ -697,10 +697,10 @@ EXPORT_SYMBOL(kmem_find_general_cachep);
/* Cal the num objs, wastage, and bytes left over for a given slab size. */
static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
- int flags, size_t *left_over, unsigned int *num)
+ int flags, size_t *left_over, unsigned int *num)
{
int i;
- size_t wastage = PAGE_SIZE<<gfporder;
+ size_t wastage = PAGE_SIZE << gfporder;
size_t extra = 0;
size_t base = 0;
@@ -709,7 +709,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
extra = sizeof(kmem_bufctl_t);
}
i = 0;
- while (i*size + ALIGN(base+i*extra, align) <= wastage)
+ while (i * size + ALIGN(base + i * extra, align) <= wastage)
i++;
if (i > 0)
i--;
@@ -718,8 +718,8 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
i = SLAB_LIMIT;
*num = i;
- wastage -= i*size;
- wastage -= ALIGN(base+i*extra, align);
+ wastage -= i * size;
+ wastage -= ALIGN(base + i * extra, align);
*left_over = wastage;
}
@@ -728,7 +728,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg)
{
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
- function, cachep->name, msg);
+ function, cachep->name, msg);
dump_stack();
}
@@ -755,9 +755,9 @@ static void __devinit start_cpu_timer(int cpu)
}
static struct array_cache *alloc_arraycache(int node, int entries,
- int batchcount)
+ int batchcount)
{
- int memsize = sizeof(void*)*entries+sizeof(struct array_cache);
+ int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
struct array_cache *nc = NULL;
nc = kmalloc_node(memsize, GFP_KERNEL, node);
@@ -775,7 +775,7 @@ static struct array_cache *alloc_arraycache(int node, int entries,
static inline struct array_cache **alloc_alien_cache(int node, int limit)
{
struct array_cache **ac_ptr;
- int memsize = sizeof(void*)*MAX_NUMNODES;
+ int memsize = sizeof(void *) * MAX_NUMNODES;
int i;
if (limit > 1)
@@ -789,7 +789,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit)
}
ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
if (!ac_ptr[i]) {
- for (i--; i <=0; i--)
+ for (i--; i <= 0; i--)
kfree(ac_ptr[i]);
kfree(ac_ptr);
return NULL;
@@ -807,12 +807,13 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
return;
for_each_node(i)
- kfree(ac_ptr[i]);
+ kfree(ac_ptr[i]);
kfree(ac_ptr);
}
-static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache *ac, int node)
+static inline void __drain_alien_cache(kmem_cache_t *cachep,
+ struct array_cache *ac, int node)
{
struct kmem_list3 *rl3 = cachep->nodelists[node];
@@ -826,7 +827,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache
static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
{
- int i=0;
+ int i = 0;
struct array_cache *ac;
unsigned long flags;
@@ -846,14 +847,13 @@ static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
#endif
static int __devinit cpuup_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+ unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
- kmem_cache_t* cachep;
+ kmem_cache_t *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
int memsize = sizeof(struct kmem_list3);
- struct array_cache *nc = NULL;
switch (action) {
case CPU_UP_PREPARE:
@@ -871,27 +871,29 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
*/
if (!cachep->nodelists[node]) {
if (!(l3 = kmalloc_node(memsize,
- GFP_KERNEL, node)))
+ GFP_KERNEL, node)))
goto bad;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
cachep->nodelists[node] = l3;
}
spin_lock_irq(&cachep->nodelists[node]->list_lock);
cachep->nodelists[node]->free_limit =
- (1 + nr_cpus_node(node)) *
- cachep->batchcount + cachep->num;
+ (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
spin_unlock_irq(&cachep->nodelists[node]->list_lock);
}
/* Now we can go ahead with allocating the shared array's
- & array cache's */
+ & array cache's */
list_for_each_entry(cachep, &cache_chain, next) {
+ struct array_cache *nc;
+
nc = alloc_arraycache(node, cachep->limit,
- cachep->batchcount);
+ cachep->batchcount);
if (!nc)
goto bad;
cachep->array[cpu] = nc;
@@ -900,12 +902,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
BUG_ON(!l3);
if (!l3->shared) {
if (!(nc = alloc_arraycache(node,
- cachep->shared*cachep->batchcount,
- 0xbaadf00d)))
- goto bad;
+ cachep->shared *
+ cachep->batchcount,
+ 0xbaadf00d)))
+ goto bad;
/* we are serialised from CPU_DEAD or
- CPU_UP_CANCELLED by the cpucontrol lock */
+ CPU_UP_CANCELLED by the cpucontrol lock */
l3->shared = nc;
}
}
@@ -942,13 +945,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
free_block(cachep, nc->entry, nc->avail, node);
if (!cpus_empty(mask)) {
- spin_unlock(&l3->list_lock);
- goto unlock_cache;
- }
+ spin_unlock(&l3->list_lock);
+ goto unlock_cache;
+ }
if (l3->shared) {
free_block(cachep, l3->shared->entry,
- l3->shared->avail, node);
+ l3->shared->avail, node);
kfree(l3->shared);
l3->shared = NULL;
}
@@ -966,7 +969,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
} else {
spin_unlock(&l3->list_lock);
}
-unlock_cache:
+ unlock_cache:
spin_unlock_irq(&cachep->spinlock);
kfree(nc);
}
@@ -975,7 +978,7 @@ unlock_cache:
#endif
}
return NOTIFY_OK;
-bad:
+ bad:
up(&cache_chain_sem);
return NOTIFY_BAD;
}
@@ -985,8 +988,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
/*
* swap the static kmem_list3 with kmalloced memory
*/
-static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list,
- int nodeid)
+static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid)
{
struct kmem_list3 *ptr;
@@ -1055,14 +1057,14 @@ void __init kmem_cache_init(void)
cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size());
cache_estimate(0, cache_cache.objsize, cache_line_size(), 0,
- &left_over, &cache_cache.num);
+ &left_over, &cache_cache.num);
if (!cache_cache.num)
BUG();
- cache_cache.colour = left_over/cache_cache.colour_off;
+ cache_cache.colour = left_over / cache_cache.colour_off;
cache_cache.colour_next = 0;
- cache_cache.slab_size = ALIGN(cache_cache.num*sizeof(kmem_bufctl_t) +
- sizeof(struct slab), cache_line_size());
+ cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
+ sizeof(struct slab), cache_line_size());
/* 2+3) create the kmalloc caches */
sizes = malloc_sizes;
@@ -1074,14 +1076,18 @@ void __init kmem_cache_init(void)
*/
sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
- sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN,
- (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
+ sizes[INDEX_AC].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ (ARCH_KMALLOC_FLAGS |
+ SLAB_PANIC), NULL, NULL);
if (INDEX_AC != INDEX_L3)
sizes[INDEX_L3].cs_cachep =
- kmem_cache_create(names[INDEX_L3].name,
- sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN,
- (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
+ kmem_cache_create(names[INDEX_L3].name,
+ sizes[INDEX_L3].cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
+ NULL);
while (sizes->cs_size != ULONG_MAX) {
/*
@@ -1091,35 +1097,41 @@ void __init kmem_cache_init(void)
* Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches.
*/
- if(!sizes->cs_cachep)
+ if (!sizes->cs_cachep)
sizes->cs_cachep = kmem_cache_create(names->name,
- sizes->cs_size, ARCH_KMALLOC_MINALIGN,
- (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL);
+ sizes->cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ (ARCH_KMALLOC_FLAGS
+ | SLAB_PANIC),
+ NULL, NULL);
/* Inc off-slab bufctl limit until the ceiling is hit. */
if (!(OFF_SLAB(sizes->cs_cachep))) {
- offslab_limit = sizes->cs_size-sizeof(struct slab);
+ offslab_limit = sizes->cs_size - sizeof(struct slab);
offslab_limit /= sizeof(kmem_bufctl_t);
}
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
- sizes->cs_size, ARCH_KMALLOC_MINALIGN,
- (ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC),
- NULL, NULL);
+ sizes->cs_size,
+ ARCH_KMALLOC_MINALIGN,
+ (ARCH_KMALLOC_FLAGS |
+ SLAB_CACHE_DMA |
+ SLAB_PANIC), NULL,
+ NULL);
sizes++;
names++;
}
/* 4) Replace the bootstrap head arrays */
{
- void * ptr;
+ void *ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
local_irq_disable();
BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache);
memcpy(ptr, ac_data(&cache_cache),
- sizeof(struct arraycache_init));
+ sizeof(struct arraycache_init));
cache_cache.array[smp_processor_id()] = ptr;
local_irq_enable();
@@ -1127,11 +1139,11 @@ void __init kmem_cache_init(void)
local_irq_disable();
BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep)
- != &initarray_generic.cache);
+ != &initarray_generic.cache);
memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep),
- sizeof(struct arraycache_init));
+ sizeof(struct arraycache_init));
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
- ptr;
+ ptr;
local_irq_enable();
}
/* 5) Replace the bootstrap kmem_list3's */
@@ -1139,16 +1151,16 @@ void __init kmem_cache_init(void)
int node;
/* Replace the static kmem_list3 structures for the boot cpu */
init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
- numa_node_id());
+ numa_node_id());
for_each_online_node(node) {
init_list(malloc_sizes[INDEX_AC].cs_cachep,
- &initkmem_list3[SIZE_AC+node], node);
+ &initkmem_list3[SIZE_AC + node], node);
if (INDEX_AC != INDEX_L3) {
init_list(malloc_sizes[INDEX_L3].cs_cachep,
- &initkmem_list3[SIZE_L3+node],
- node);
+ &initkmem_list3[SIZE_L3 + node],
+ node);
}
}
}
@@ -1158,7 +1170,7 @@ void __init kmem_cache_init(void)
kmem_cache_t *cachep;
down(&cache_chain_sem);
list_for_each_entry(cachep, &cache_chain, next)
- enable_cpucache(cachep);
+ enable_cpucache(cachep);
up(&cache_chain_sem);
}
@@ -1184,7 +1196,7 @@ static int __init cpucache_init(void)
* pages to gfp.
*/
for_each_online_cpu(cpu)
- start_cpu_timer(cpu);
+ start_cpu_timer(cpu);
return 0;
}
@@ -1226,7 +1238,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
*/
static void kmem_freepages(kmem_cache_t *cachep, void *addr)
{
- unsigned long i = (1<<cachep->gfporder);
+ unsigned long i = (1 << cachep->gfporder);
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i;
@@ -1239,13 +1251,13 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr)
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
free_pages((unsigned long)addr, cachep->gfporder);
- if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages);
+ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
+ atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
}
static void kmem_rcu_free(struct rcu_head *head)
{
- struct slab_rcu *slab_rcu = (struct slab_rcu *) head;
+ struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
kmem_cache_t *cachep = slab_rcu->cachep;
kmem_freepages(cachep, slab_rcu->addr);
@@ -1257,19 +1269,19 @@ static void kmem_rcu_free(struct rcu_head *head)
#ifdef CONFIG_DEBUG_PAGEALLOC
static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
- unsigned long caller)
+ unsigned long caller)
{
int size = obj_reallen(cachep);
- addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)];
+ addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)];
- if (size < 5*sizeof(unsigned long))
+ if (size < 5 * sizeof(unsigned long))
return;
- *addr++=0x12345678;
- *addr++=caller;
- *addr++=smp_processor_id();
- size -= 3*sizeof(unsigned long);
+ *addr++ = 0x12345678;
+ *addr++ = caller;
+ *addr++ = smp_processor_id();
+ size -= 3 * sizeof(unsigned long);
{
unsigned long *sptr = &caller;
unsigned long svalue;
@@ -1277,7 +1289,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
while (!kstack_end(sptr)) {
svalue = *sptr++;
if (kernel_text_address(svalue)) {
- *addr++=svalue;
+ *addr++ = svalue;
size -= sizeof(unsigned long);
if (size <= sizeof(unsigned long))
break;
@@ -1285,25 +1297,25 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
}
}
- *addr++=0x87654321;
+ *addr++ = 0x87654321;
}
#endif
static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
{
int size = obj_reallen(cachep);
- addr = &((char*)addr)[obj_dbghead(cachep)];
+ addr = &((char *)addr)[obj_dbghead(cachep)];
memset(addr, val, size);
- *(unsigned char *)(addr+size-1) = POISON_END;
+ *(unsigned char *)(addr + size - 1) = POISON_END;
}
static void dump_line(char *data, int offset, int limit)
{
int i;
printk(KERN_ERR "%03x:", offset);
- for (i=0;i<limit;i++) {
- printk(" %02x", (unsigned char)data[offset+i]);
+ for (i = 0; i < limit; i++) {
+ printk(" %02x", (unsigned char)data[offset + i]);
}
printk("\n");
}
@@ -1318,24 +1330,24 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
if (cachep->flags & SLAB_RED_ZONE) {
printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
- *dbg_redzone1(cachep, objp),
- *dbg_redzone2(cachep, objp));
+ *dbg_redzone1(cachep, objp),
+ *dbg_redzone2(cachep, objp));
}
if (cachep->flags & SLAB_STORE_USER) {
printk(KERN_ERR "Last user: [<%p>]",
- *dbg_userword(cachep, objp));
+ *dbg_userword(cachep, objp));
print_symbol("(%s)",
- (unsigned long)*dbg_userword(cachep, objp));
+ (unsigned long)*dbg_userword(cachep, objp));
printk("\n");
}
- realobj = (char*)objp+obj_dbghead(cachep);
+ realobj = (char *)objp + obj_dbghead(cachep);
size = obj_reallen(cachep);
- for (i=0; i<size && lines;i+=16, lines--) {
+ for (i = 0; i < size && lines; i += 16, lines--) {
int limit;
limit = 16;
- if (i+limit > size)
- limit = size-i;
+ if (i + limit > size)
+ limit = size - i;
dump_line(realobj, i, limit);
}
}
@@ -1346,27 +1358,28 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
int size, i;
int lines = 0;
- realobj = (char*)objp+obj_dbghead(cachep);
+ realobj = (char *)objp + obj_dbghead(cachep);
size = obj_reallen(cachep);
- for (i=0;i<size;i++) {
+ for (i = 0; i < size; i++) {
char exp = POISON_FREE;
- if (i == size-1)
+ if (i == size - 1)
exp = POISON_END;
if (realobj[i] != exp) {
int limit;
/* Mismatch ! */
/* Print header */
if (lines == 0) {
- printk(KERN_ERR "Slab corruption: start=%p, len=%d\n",
- realobj, size);
+ printk(KERN_ERR
+ "Slab corruption: start=%p, len=%d\n",
+ realobj, size);
print_objinfo(cachep, objp, 0);
}
/* Hexdump the affected line */
- i = (i/16)*16;
+ i = (i / 16) * 16;
limit = 16;
- if (i+limit > size)
- limit = size-i;
+ if (i + limit > size)
+ limit = size - i;
dump_line(realobj, i, limit);
i += 16;
lines++;
@@ -1382,19 +1395,19 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
struct slab *slabp = page_get_slab(virt_to_page(objp));
int objnr;
- objnr = (objp-slabp->s_mem)/cachep->objsize;
+ objnr = (objp - slabp->s_mem) / cachep->objsize;
if (objnr) {
- objp = slabp->s_mem+(objnr-1)*cachep->objsize;
- realobj = (char*)objp+obj_dbghead(cachep);
+ objp = slabp->s_mem + (objnr - 1) * cachep->objsize;
+ realobj = (char *)objp + obj_dbghead(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
- realobj, size);
+ realobj, size);
print_objinfo(cachep, objp, 2);
}
- if (objnr+1 < cachep->num) {
- objp = slabp->s_mem+(objnr+1)*cachep->objsize;
- realobj = (char*)objp+obj_dbghead(cachep);
+ if (objnr + 1 < cachep->num) {
+ objp = slabp->s_mem + (objnr + 1) * cachep->objsize;
+ realobj = (char *)objp + obj_dbghead(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n",
- realobj, size);
+ realobj, size);
print_objinfo(cachep, objp, 2);
}
}
@@ -1405,7 +1418,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
* Before calling the slab must have been unlinked from the cache.
* The cache-lock is not held/needed.
*/
-static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
+static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
{
void *addr = slabp->s_mem - slabp->colouroff;
@@ -1416,8 +1429,11 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep))
- kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1);
+ if ((cachep->objsize % PAGE_SIZE) == 0
+ && OFF_SLAB(cachep))
+ kernel_map_pages(virt_to_page(objp),
+ cachep->objsize / PAGE_SIZE,
+ 1);
else
check_poison_obj(cachep, objp);
#else
@@ -1427,20 +1443,20 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "start of a freed object "
- "was overwritten");
+ "was overwritten");
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "end of a freed object "
- "was overwritten");
+ "was overwritten");
}
if (cachep->dtor && !(cachep->flags & SLAB_POISON))
- (cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0);
+ (cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0);
}
#else
if (cachep->dtor) {
int i;
for (i = 0; i < cachep->num; i++) {
- void* objp = slabp->s_mem+cachep->objsize*i;
- (cachep->dtor)(objp, cachep, 0);
+ void *objp = slabp->s_mem + cachep->objsize * i;
+ (cachep->dtor) (objp, cachep, 0);
}
}
#endif
@@ -1448,7 +1464,7 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp)
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
struct slab_rcu *slab_rcu;
- slab_rcu = (struct slab_rcu *) slabp;
+ slab_rcu = (struct slab_rcu *)slabp;
slab_rcu->cachep = cachep;
slab_rcu->addr = addr;
call_rcu(&slab_rcu->head, kmem_rcu_free);
@@ -1466,11 +1482,58 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index)
int node;
for_each_online_node(node) {
- cachep->nodelists[node] = &initkmem_list3[index+node];
+ cachep->nodelists[node] = &initkmem_list3[index + node];
cachep->nodelists[node]->next_reap = jiffies +
- REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+ REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ }
+}
+
+/**
+ * calculate_slab_order - calculate size (page order) of slabs and the number
+ * of objects per slab.
+ *
+ * This could be made much more intelligent. For now, try to avoid using
+ * high order pages for slabs. When the gfp() functions are more friendly
+ * towards high-order requests, this should be changed.
+ */
+static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
+ size_t align, gfp_t flags)
+{
+ size_t left_over = 0;
+
+ for (;; cachep->gfporder++) {
+ unsigned int num;
+ size_t remainder;
+
+ if (cachep->gfporder > MAX_GFP_ORDER) {
+ cachep->num = 0;
+ break;
+ }
+
+ cache_estimate(cachep->gfporder, size, align, flags,
+ &remainder, &num);
+ if (!num)
+ continue;
+ /* More than offslab_limit objects will cause problems */
+ if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit)
+ break;
+
+ cachep->num = num;
+ left_over = remainder;
+
+ /*
+ * Large number of objects is good, but very large slabs are
+ * currently bad for the gfp()s.
+ */
+ if (cachep->gfporder >= slab_break_gfp_order)
+ break;
+
+ if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder))
+ /* Acceptable internal fragmentation */
+ break;
}
+ return left_over;
}
/**
@@ -1519,14 +1582,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* Sanity checks... these are all serious usage bugs.
*/
if ((!name) ||
- in_interrupt() ||
- (size < BYTES_PER_WORD) ||
- (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) ||
- (dtor && !ctor)) {
- printk(KERN_ERR "%s: Early error in slab %s\n",
- __FUNCTION__, name);
- BUG();
- }
+ in_interrupt() ||
+ (size < BYTES_PER_WORD) ||
+ (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
+ printk(KERN_ERR "%s: Early error in slab %s\n",
+ __FUNCTION__, name);
+ BUG();
+ }
down(&cache_chain_sem);
@@ -1546,11 +1608,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
set_fs(old_fs);
if (res) {
printk("SLAB: cache with size %d has lost its name\n",
- pc->objsize);
+ pc->objsize);
continue;
}
- if (!strcmp(pc->name,name)) {
+ if (!strcmp(pc->name, name)) {
printk("kmem_cache_create: duplicate cache %s\n", name);
dump_stack();
goto oops;
@@ -1562,10 +1624,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
/* No constructor, but inital state check requested */
printk(KERN_ERR "%s: No con, but init state check "
- "requested - %s\n", __FUNCTION__, name);
+ "requested - %s\n", __FUNCTION__, name);
flags &= ~SLAB_DEBUG_INITIAL;
}
-
#if FORCED_DEBUG
/*
* Enable redzoning and last user accounting, except for caches with
@@ -1573,8 +1634,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* above the next power of two: caches with object sizes just above a
* power of two have a significant amount of internal fragmentation.
*/
- if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD)))
- flags |= SLAB_RED_ZONE|SLAB_STORE_USER;
+ if ((size < 4096
+ || fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD)))
+ flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON;
#endif
@@ -1595,9 +1657,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* unaligned accesses for some archs when redzoning is used, and makes
* sure any on-slab bufctl's are also correctly aligned.
*/
- if (size & (BYTES_PER_WORD-1)) {
- size += (BYTES_PER_WORD-1);
- size &= ~(BYTES_PER_WORD-1);
+ if (size & (BYTES_PER_WORD - 1)) {
+ size += (BYTES_PER_WORD - 1);
+ size &= ~(BYTES_PER_WORD - 1);
}
/* calculate out the final buffer alignment: */
@@ -1608,7 +1670,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* objects into one cacheline.
*/
ralign = cache_line_size();
- while (size <= ralign/2)
+ while (size <= ralign / 2)
ralign /= 2;
} else {
ralign = BYTES_PER_WORD;
@@ -1617,13 +1679,13 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (ralign < ARCH_SLAB_MINALIGN) {
ralign = ARCH_SLAB_MINALIGN;
if (ralign > BYTES_PER_WORD)
- flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+ flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
/* 3) caller mandated alignment: disables debug if necessary */
if (ralign < align) {
ralign = align;
if (ralign > BYTES_PER_WORD)
- flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
+ flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
/* 4) Store it. Note that the debug code below can reduce
* the alignment to BYTES_PER_WORD.
@@ -1645,7 +1707,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* add space for red zone words */
cachep->dbghead += BYTES_PER_WORD;
- size += 2*BYTES_PER_WORD;
+ size += 2 * BYTES_PER_WORD;
}
if (flags & SLAB_STORE_USER) {
/* user store requires word alignment and
@@ -1656,7 +1718,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
size += BYTES_PER_WORD;
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
- if (size >= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
+ if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
+ && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
cachep->dbghead += PAGE_SIZE - size;
size = PAGE_SIZE;
}
@@ -1664,7 +1727,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
#endif
/* Determine if the slab management is 'on' or 'off' slab. */
- if (size >= (PAGE_SIZE>>3))
+ if (size >= (PAGE_SIZE >> 3))
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
@@ -1681,47 +1744,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
*/
cachep->gfporder = 0;
cache_estimate(cachep->gfporder, size, align, flags,
- &left_over, &cachep->num);
- } else {
- /*
- * Calculate size (in pages) of slabs, and the num of objs per
- * slab. This could be made much more intelligent. For now,
- * try to avoid using high page-orders for slabs. When the
- * gfp() funcs are more friendly towards high-order requests,
- * this should be changed.
- */
- do {
- unsigned int break_flag = 0;
-cal_wastage:
- cache_estimate(cachep->gfporder, size, align, flags,
- &left_over, &cachep->num);
- if (break_flag)
- break;
- if (cachep->gfporder >= MAX_GFP_ORDER)
- break;
- if (!cachep->num)
- goto next;
- if (flags & CFLGS_OFF_SLAB &&
- cachep->num > offslab_limit) {
- /* This num of objs will cause problems. */
- cachep->gfporder--;
- break_flag++;
- goto cal_wastage;
- }
-
- /*
- * Large num of objs is good, but v. large slabs are
- * currently bad for the gfp()s.
- */
- if (cachep->gfporder >= slab_break_gfp_order)
- break;
-
- if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder))
- break; /* Acceptable internal fragmentation. */
-next:
- cachep->gfporder++;
- } while (1);
- }
+ &left_over, &cachep->num);
+ } else
+ left_over = calculate_slab_order(cachep, size, align, flags);
if (!cachep->num) {
printk("kmem_cache_create: couldn't create cache %s.\n", name);
@@ -1729,8 +1754,8 @@ next:
cachep = NULL;
goto oops;
}
- slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t)
- + sizeof(struct slab), align);
+ slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+ + sizeof(struct slab), align);
/*
* If the slab has been placed off-slab, and we have enough space then
@@ -1743,14 +1768,15 @@ next:
if (flags & CFLGS_OFF_SLAB) {
/* really off slab. No need for manual alignment */
- slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab);
+ slab_size =
+ cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
}
cachep->colour_off = cache_line_size();
/* Offset must be a multiple of the alignment. */
if (cachep->colour_off < align)
cachep->colour_off = align;
- cachep->colour = left_over/cachep->colour_off;
+ cachep->colour = left_over / cachep->colour_off;
cachep->slab_size = slab_size;
cachep->flags = flags;
cachep->gfpflags = 0;
@@ -1777,7 +1803,7 @@ next:
* the creation of further caches will BUG().
*/
cachep->array[smp_processor_id()] =
- &initarray_generic.cache;
+ &initarray_generic.cache;
/* If the cache that's used by
* kmalloc(sizeof(kmem_list3)) is the first cache,
@@ -1791,8 +1817,7 @@ next:
g_cpucache_up = PARTIAL_AC;
} else {
cachep->array[smp_processor_id()] =
- kmalloc(sizeof(struct arraycache_init),
- GFP_KERNEL);
+ kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
if (g_cpucache_up == PARTIAL_AC) {
set_up_list3s(cachep, SIZE_L3);
@@ -1802,16 +1827,18 @@ next:
for_each_online_node(node) {
cachep->nodelists[node] =
- kmalloc_node(sizeof(struct kmem_list3),
- GFP_KERNEL, node);
+ kmalloc_node(sizeof
+ (struct kmem_list3),
+ GFP_KERNEL, node);
BUG_ON(!cachep->nodelists[node]);
- kmem_list3_init(cachep->nodelists[node]);
+ kmem_list3_init(cachep->
+ nodelists[node]);
}
}
}
cachep->nodelists[numa_node_id()]->next_reap =
- jiffies + REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+ jiffies + REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
BUG_ON(!ac_data(cachep));
ac_data(cachep)->avail = 0;
@@ -1820,15 +1847,15 @@ next:
ac_data(cachep)->touched = 0;
cachep->batchcount = 1;
cachep->limit = BOOT_CPUCACHE_ENTRIES;
- }
+ }
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
unlock_cpu_hotplug();
-oops:
+ oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
- name);
+ name);
up(&cache_chain_sem);
return cachep;
}
@@ -1871,7 +1898,7 @@ static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node)
/*
* Waits for all CPUs to execute func().
*/
-static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
+static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
{
check_irq_on();
preempt_disable();
@@ -1886,12 +1913,12 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
preempt_enable();
}
-static void drain_array_locked(kmem_cache_t* cachep,
- struct array_cache *ac, int force, int node);
+static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
+ int force, int node);
static void do_drain(void *arg)
{
- kmem_cache_t *cachep = (kmem_cache_t*)arg;
+ kmem_cache_t *cachep = (kmem_cache_t *) arg;
struct array_cache *ac;
int node = numa_node_id();
@@ -1911,7 +1938,7 @@ static void drain_cpu_caches(kmem_cache_t *cachep)
smp_call_function_all_cpus(do_drain, cachep);
check_irq_on();
spin_lock_irq(&cachep->spinlock);
- for_each_online_node(node) {
+ for_each_online_node(node) {
l3 = cachep->nodelists[node];
if (l3) {
spin_lock(&l3->list_lock);
@@ -1949,8 +1976,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node)
slab_destroy(cachep, slabp);
spin_lock_irq(&l3->list_lock);
}
- ret = !list_empty(&l3->slabs_full) ||
- !list_empty(&l3->slabs_partial);
+ ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
return ret;
}
@@ -2006,7 +2032,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
* The caller must guarantee that noone will allocate memory from the cache
* during the kmem_cache_destroy().
*/
-int kmem_cache_destroy(kmem_cache_t * cachep)
+int kmem_cache_destroy(kmem_cache_t *cachep)
{
int i;
struct kmem_list3 *l3;
@@ -2028,7 +2054,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects");
down(&cache_chain_sem);
- list_add(&cachep->next,&cache_chain);
+ list_add(&cachep->next, &cache_chain);
up(&cache_chain_sem);
unlock_cpu_hotplug();
return 1;
@@ -2038,7 +2064,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
synchronize_rcu();
for_each_online_cpu(i)
- kfree(cachep->array[i]);
+ kfree(cachep->array[i]);
/* NUMA: free the list3 structures */
for_each_online_node(i) {
@@ -2057,39 +2083,39 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */
-static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
- int colour_off, gfp_t local_flags)
+static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
+ int colour_off, gfp_t local_flags)
{
struct slab *slabp;
-
+
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
if (!slabp)
return NULL;
} else {
- slabp = objp+colour_off;
+ slabp = objp + colour_off;
colour_off += cachep->slab_size;
}
slabp->inuse = 0;
slabp->colouroff = colour_off;
- slabp->s_mem = objp+colour_off;
+ slabp->s_mem = objp + colour_off;
return slabp;
}
static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
{
- return (kmem_bufctl_t *)(slabp+1);
+ return (kmem_bufctl_t *) (slabp + 1);
}
static void cache_init_objs(kmem_cache_t *cachep,
- struct slab *slabp, unsigned long ctor_flags)
+ struct slab *slabp, unsigned long ctor_flags)
{
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = slabp->s_mem+cachep->objsize*i;
+ void *objp = slabp->s_mem + cachep->objsize * i;
#if DEBUG
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON)
@@ -2107,25 +2133,28 @@ static void cache_init_objs(kmem_cache_t *cachep,
* Otherwise, deadlock. They must also be threaded.
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
- cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags);
+ cachep->ctor(objp + obj_dbghead(cachep), cachep,
+ ctor_flags);
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the"
- " end of an object");
+ " end of an object");
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the"
- " start of an object");
+ " start of an object");
}
- if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
- kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
+ if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
+ && cachep->flags & SLAB_POISON)
+ kernel_map_pages(virt_to_page(objp),
+ cachep->objsize / PAGE_SIZE, 0);
#else
if (cachep->ctor)
cachep->ctor(objp, cachep, ctor_flags);
#endif
- slab_bufctl(slabp)[i] = i+1;
+ slab_bufctl(slabp)[i] = i + 1;
}
- slab_bufctl(slabp)[i-1] = BUFCTL_END;
+ slab_bufctl(slabp)[i - 1] = BUFCTL_END;
slabp->free = 0;
}
@@ -2161,17 +2190,17 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
*/
static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
{
- struct slab *slabp;
- void *objp;
- size_t offset;
- gfp_t local_flags;
- unsigned long ctor_flags;
+ struct slab *slabp;
+ void *objp;
+ size_t offset;
+ gfp_t local_flags;
+ unsigned long ctor_flags;
struct kmem_list3 *l3;
/* Be lazy and only check for valid flags here,
- * keeping it out of the critical path in kmem_cache_alloc().
+ * keeping it out of the critical path in kmem_cache_alloc().
*/
- if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
+ if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
BUG();
if (flags & SLAB_NO_GROW)
return 0;
@@ -2237,9 +2266,9 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
l3->free_objects += cachep->num;
spin_unlock(&l3->list_lock);
return 1;
-opps1:
+ opps1:
kmem_freepages(cachep, objp);
-failed:
+ failed:
if (local_flags & __GFP_WAIT)
local_irq_disable();
return 0;
@@ -2259,18 +2288,19 @@ static void kfree_debugcheck(const void *objp)
if (!virt_addr_valid(objp)) {
printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
- (unsigned long)objp);
- BUG();
+ (unsigned long)objp);
+ BUG();
}
page = virt_to_page(objp);
if (!PageSlab(page)) {
- printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp);
+ printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
+ (unsigned long)objp);
BUG();
}
}
static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
- void *caller)
+ void *caller)
{
struct page *page;
unsigned int objnr;
@@ -2281,20 +2311,26 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
page = virt_to_page(objp);
if (page_get_cache(page) != cachep) {
- printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n",
- page_get_cache(page),cachep);
+ printk(KERN_ERR
+ "mismatch in kmem_cache_free: expected cache %p, got %p\n",
+ page_get_cache(page), cachep);
printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
- printk(KERN_ERR "%p is %s.\n", page_get_cache(page), page_get_cache(page)->name);
+ printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
+ page_get_cache(page)->name);
WARN_ON(1);
}
slabp = page_get_slab(page);
if (cachep->flags & SLAB_RED_ZONE) {
- if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
- slab_error(cachep, "double free, or memory outside"
- " object was overwritten");
- printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
- objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
+ if (*dbg_redzone1(cachep, objp) != RED_ACTIVE
+ || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
+ slab_error(cachep,
+ "double free, or memory outside"
+ " object was overwritten");
+ printk(KERN_ERR
+ "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
+ objp, *dbg_redzone1(cachep, objp),
+ *dbg_redzone2(cachep, objp));
}
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
@@ -2302,30 +2338,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller;
- objnr = (objp-slabp->s_mem)/cachep->objsize;
+ objnr = (objp - slabp->s_mem) / cachep->objsize;
BUG_ON(objnr >= cachep->num);
- BUG_ON(objp != slabp->s_mem + objnr*cachep->objsize);
+ BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize);
if (cachep->flags & SLAB_DEBUG_INITIAL) {
/* Need to call the slab's constructor so the
* caller can perform a verify of its state (debugging).
* Called without the cache-lock held.
*/
- cachep->ctor(objp+obj_dbghead(cachep),
- cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY);
+ cachep->ctor(objp + obj_dbghead(cachep),
+ cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
}
if (cachep->flags & SLAB_POISON && cachep->dtor) {
/* we want to cache poison the object,
* call the destruction callback
*/
- cachep->dtor(objp+obj_dbghead(cachep), cachep, 0);
+ cachep->dtor(objp + obj_dbghead(cachep), cachep, 0);
}
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller);
- kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
+ kernel_map_pages(virt_to_page(objp),
+ cachep->objsize / PAGE_SIZE, 0);
} else {
poison_obj(cachep, objp, POISON_FREE);
}
@@ -2340,7 +2377,7 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
{
kmem_bufctl_t i;
int entries = 0;
-
+
/* Check slab's freelist to see if this obj is there. */
for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
entries++;
@@ -2348,13 +2385,16 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
goto bad;
}
if (entries != cachep->num - slabp->inuse) {
-bad:
- printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
- cachep->name, cachep->num, slabp, slabp->inuse);
- for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) {
- if ((i%16)==0)
+ bad:
+ printk(KERN_ERR
+ "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
+ cachep->name, cachep->num, slabp, slabp->inuse);
+ for (i = 0;
+ i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t);
+ i++) {
+ if ((i % 16) == 0)
printk("\n%03x:", i);
- printk(" %02x", ((unsigned char*)slabp)[i]);
+ printk(" %02x", ((unsigned char *)slabp)[i]);
}
printk("\n");
BUG();
@@ -2374,7 +2414,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
check_irq_off();
ac = ac_data(cachep);
-retry:
+ retry:
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
/* if there was little recent activity on this
@@ -2396,8 +2436,8 @@ retry:
shared_array->avail -= batchcount;
ac->avail = batchcount;
memcpy(ac->entry,
- &(shared_array->entry[shared_array->avail]),
- sizeof(void*)*batchcount);
+ &(shared_array->entry[shared_array->avail]),
+ sizeof(void *) * batchcount);
shared_array->touched = 1;
goto alloc_done;
}
@@ -2425,7 +2465,7 @@ retry:
/* get obj pointer */
ac->entry[ac->avail++] = slabp->s_mem +
- slabp->free*cachep->objsize;
+ slabp->free * cachep->objsize;
slabp->inuse++;
next = slab_bufctl(slabp)[slabp->free];
@@ -2433,7 +2473,7 @@ retry:
slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
WARN_ON(numa_node_id() != slabp->nodeid);
#endif
- slabp->free = next;
+ slabp->free = next;
}
check_slabp(cachep, slabp);
@@ -2445,9 +2485,9 @@ retry:
list_add(&slabp->list, &l3->slabs_partial);
}
-must_grow:
+ must_grow:
l3->free_objects -= ac->avail;
-alloc_done:
+ alloc_done:
spin_unlock(&l3->list_lock);
if (unlikely(!ac->avail)) {
@@ -2459,7 +2499,7 @@ alloc_done:
if (!x && ac->avail == 0) // no objects in sight? abort
return NULL;
- if (!ac->avail) // objects refilled by interrupt?
+ if (!ac->avail) // objects refilled by interrupt?
goto retry;
}
ac->touched = 1;
@@ -2476,16 +2516,16 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
}
#if DEBUG
-static void *
-cache_alloc_debugcheck_after(kmem_cache_t *cachep,
- gfp_t flags, void *objp, void *caller)
+static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
+ void *objp, void *caller)
{
- if (!objp)
+ if (!objp)
return objp;
- if (cachep->flags & SLAB_POISON) {
+ if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
- kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 1);
+ kernel_map_pages(virt_to_page(objp),
+ cachep->objsize / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
@@ -2497,24 +2537,28 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
*dbg_userword(cachep, objp) = caller;
if (cachep->flags & SLAB_RED_ZONE) {
- if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
- slab_error(cachep, "double free, or memory outside"
- " object was overwritten");
- printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
- objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
+ if (*dbg_redzone1(cachep, objp) != RED_INACTIVE
+ || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
+ slab_error(cachep,
+ "double free, or memory outside"
+ " object was overwritten");
+ printk(KERN_ERR
+ "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
+ objp, *dbg_redzone1(cachep, objp),
+ *dbg_redzone2(cachep, objp));
}
*dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
}
objp += obj_dbghead(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON) {
- unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
+ unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
if (!(flags & __GFP_WAIT))
ctor_flags |= SLAB_CTOR_ATOMIC;
cachep->ctor(objp, cachep, ctor_flags);
- }
+ }
return objp;
}
#else
@@ -2523,7 +2567,7 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep,
static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
{
- void* objp;
+ void *objp;
struct array_cache *ac;
check_irq_off();
@@ -2542,7 +2586,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
{
unsigned long save_flags;
- void* objp;
+ void *objp;
cache_alloc_debugcheck_before(cachep, flags);
@@ -2550,7 +2594,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
objp = ____cache_alloc(cachep, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp,
- __builtin_return_address(0));
+ __builtin_return_address(0));
prefetchw(objp);
return objp;
}
@@ -2562,74 +2606,75 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
{
struct list_head *entry;
- struct slab *slabp;
- struct kmem_list3 *l3;
- void *obj;
- kmem_bufctl_t next;
- int x;
-
- l3 = cachep->nodelists[nodeid];
- BUG_ON(!l3);
-
-retry:
- spin_lock(&l3->list_lock);
- entry = l3->slabs_partial.next;
- if (entry == &l3->slabs_partial) {
- l3->free_touched = 1;
- entry = l3->slabs_free.next;
- if (entry == &l3->slabs_free)
- goto must_grow;
- }
-
- slabp = list_entry(entry, struct slab, list);
- check_spinlock_acquired_node(cachep, nodeid);
- check_slabp(cachep, slabp);
-
- STATS_INC_NODEALLOCS(cachep);
- STATS_INC_ACTIVE(cachep);
- STATS_SET_HIGH(cachep);
-
- BUG_ON(slabp->inuse == cachep->num);
-
- /* get obj pointer */
- obj = slabp->s_mem + slabp->free*cachep->objsize;
- slabp->inuse++;
- next = slab_bufctl(slabp)[slabp->free];
+ struct slab *slabp;
+ struct kmem_list3 *l3;
+ void *obj;
+ kmem_bufctl_t next;
+ int x;
+
+ l3 = cachep->nodelists[nodeid];
+ BUG_ON(!l3);
+
+ retry:
+ spin_lock(&l3->list_lock);
+ entry = l3->slabs_partial.next;
+ if (entry == &l3->slabs_partial) {
+ l3->free_touched = 1;
+ entry = l3->slabs_free.next;
+ if (entry == &l3->slabs_free)
+ goto must_grow;
+ }
+
+ slabp = list_entry(entry, struct slab, list);
+ check_spinlock_acquired_node(cachep, nodeid);
+ check_slabp(cachep, slabp);
+
+ STATS_INC_NODEALLOCS(cachep);
+ STATS_INC_ACTIVE(cachep);
+ STATS_SET_HIGH(cachep);
+
+ BUG_ON(slabp->inuse == cachep->num);
+
+ /* get obj pointer */
+ obj = slabp->s_mem + slabp->free * cachep->objsize;
+ slabp->inuse++;
+ next = slab_bufctl(slabp)[slabp->free];
#if DEBUG
- slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
+ slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
#endif
- slabp->free = next;
- check_slabp(cachep, slabp);
- l3->free_objects--;
- /* move slabp to correct slabp list: */
- list_del(&slabp->list);
-
- if (slabp->free == BUFCTL_END) {
- list_add(&slabp->list, &l3->slabs_full);
- } else {
- list_add(&slabp->list, &l3->slabs_partial);
- }
+ slabp->free = next;
+ check_slabp(cachep, slabp);
+ l3->free_objects--;
+ /* move slabp to correct slabp list: */
+ list_del(&slabp->list);
+
+ if (slabp->free == BUFCTL_END) {
+ list_add(&slabp->list, &l3->slabs_full);
+ } else {
+ list_add(&slabp->list, &l3->slabs_partial);
+ }
- spin_unlock(&l3->list_lock);
- goto done;
+ spin_unlock(&l3->list_lock);
+ goto done;
-must_grow:
- spin_unlock(&l3->list_lock);
- x = cache_grow(cachep, flags, nodeid);
+ must_grow:
+ spin_unlock(&l3->list_lock);
+ x = cache_grow(cachep, flags, nodeid);
- if (!x)
- return NULL;
+ if (!x)
+ return NULL;
- goto retry;
-done:
- return obj;
+ goto retry;
+ done:
+ return obj;
}
#endif
/*
* Caller needs to acquire correct kmem_list's list_lock
*/
-static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
+static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
+ int node)
{
int i;
struct kmem_list3 *l3;
@@ -2652,7 +2697,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int n
if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
printk(KERN_ERR "slab: double free detected in cache "
- "'%s', objp %p\n", cachep->name, objp);
+ "'%s', objp %p\n", cachep->name, objp);
BUG();
}
#endif
@@ -2696,20 +2741,19 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
spin_lock(&l3->list_lock);
if (l3->shared) {
struct array_cache *shared_array = l3->shared;
- int max = shared_array->limit-shared_array->avail;
+ int max = shared_array->limit - shared_array->avail;
if (max) {
if (batchcount > max)
batchcount = max;
memcpy(&(shared_array->entry[shared_array->avail]),
- ac->entry,
- sizeof(void*)*batchcount);
+ ac->entry, sizeof(void *) * batchcount);
shared_array->avail += batchcount;
goto free_done;
}
}
free_block(cachep, ac->entry, batchcount, node);
-free_done:
+ free_done:
#if STATS
{
int i = 0;
@@ -2731,10 +2775,9 @@ free_done:
spin_unlock(&l3->list_lock);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]),
- sizeof(void*)*ac->avail);
+ sizeof(void *) * ac->avail);
}
-
/*
* __cache_free
* Release an obj back to its cache. If the obj has a constructed
@@ -2759,7 +2802,8 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
if (unlikely(slabp->nodeid != numa_node_id())) {
struct array_cache *alien = NULL;
int nodeid = slabp->nodeid;
- struct kmem_list3 *l3 = cachep->nodelists[numa_node_id()];
+ struct kmem_list3 *l3 =
+ cachep->nodelists[numa_node_id()];
STATS_INC_NODEFREES(cachep);
if (l3->alien && l3->alien[nodeid]) {
@@ -2767,15 +2811,15 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
spin_lock(&alien->lock);
if (unlikely(alien->avail == alien->limit))
__drain_alien_cache(cachep,
- alien, nodeid);
+ alien, nodeid);
alien->entry[alien->avail++] = objp;
spin_unlock(&alien->lock);
} else {
spin_lock(&(cachep->nodelists[nodeid])->
- list_lock);
+ list_lock);
free_block(cachep, &objp, 1, nodeid);
spin_unlock(&(cachep->nodelists[nodeid])->
- list_lock);
+ list_lock);
}
return;
}
@@ -2822,9 +2866,9 @@ EXPORT_SYMBOL(kmem_cache_alloc);
*/
int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
{
- unsigned long addr = (unsigned long) ptr;
+ unsigned long addr = (unsigned long)ptr;
unsigned long min_addr = PAGE_OFFSET;
- unsigned long align_mask = BYTES_PER_WORD-1;
+ unsigned long align_mask = BYTES_PER_WORD - 1;
unsigned long size = cachep->objsize;
struct page *page;
@@ -2844,7 +2888,7 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
if (unlikely(page_get_cache(page) != cachep))
goto out;
return 1;
-out:
+ out:
return 0;
}
@@ -2871,8 +2915,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
if (unlikely(!cachep->nodelists[nodeid])) {
/* Fall back to __cache_alloc if we run into trouble */
- printk(KERN_WARNING "slab: not allocating in inactive node %d for cache %s\n", nodeid, cachep->name);
- return __cache_alloc(cachep,flags);
+ printk(KERN_WARNING
+ "slab: not allocating in inactive node %d for cache %s\n",
+ nodeid, cachep->name);
+ return __cache_alloc(cachep, flags);
}
cache_alloc_debugcheck_before(cachep, flags);
@@ -2882,7 +2928,9 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
else
ptr = __cache_alloc_node(cachep, flags, nodeid);
local_irq_restore(save_flags);
- ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0));
+ ptr =
+ cache_alloc_debugcheck_after(cachep, flags, ptr,
+ __builtin_return_address(0));
return ptr;
}
@@ -2944,12 +2992,11 @@ EXPORT_SYMBOL(__kmalloc);
* Objects should be dereferenced using the per_cpu_ptr macro only.
*
* @size: how many bytes of memory are required.
- * @align: the alignment, which can't be greater than SMP_CACHE_BYTES.
*/
-void *__alloc_percpu(size_t size, size_t align)
+void *__alloc_percpu(size_t size)
{
int i;
- struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
+ struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
@@ -2973,9 +3020,9 @@ void *__alloc_percpu(size_t size, size_t align)
}
/* Catch derefs w/o wrappers */
- return (void *) (~(unsigned long) pdata);
+ return (void *)(~(unsigned long)pdata);
-unwind_oom:
+ unwind_oom:
while (--i >= 0) {
if (!cpu_possible(i))
continue;
@@ -3006,20 +3053,6 @@ void kmem_cache_free(kmem_cache_t *cachep, void *objp)
EXPORT_SYMBOL(kmem_cache_free);
/**
- * kzalloc - allocate memory. The memory is set to zero.
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- */
-void *kzalloc(size_t size, gfp_t flags)
-{
- void *ret = kmalloc(size, flags);
- if (ret)
- memset(ret, 0, size);
- return ret;
-}
-EXPORT_SYMBOL(kzalloc);
-
-/**
* kfree - free previously allocated memory
* @objp: pointer returned by kmalloc.
*
@@ -3038,7 +3071,8 @@ void kfree(const void *objp)
local_irq_save(flags);
kfree_debugcheck(objp);
c = page_get_cache(virt_to_page(objp));
- __cache_free(c, (void*)objp);
+ mutex_debug_check_no_locks_freed(objp, obj_reallen(c));
+ __cache_free(c, (void *)objp);
local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);
@@ -3051,17 +3085,16 @@ EXPORT_SYMBOL(kfree);
* Don't free memory not originally allocated by alloc_percpu()
* The complemented objp is to check for that.
*/
-void
-free_percpu(const void *objp)
+void free_percpu(const void *objp)
{
int i;
- struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
+ struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
/*
* We allocate for all cpus so we cannot use for online cpu here.
*/
for_each_cpu(i)
- kfree(p->ptrs[i]);
+ kfree(p->ptrs[i]);
kfree(p);
}
EXPORT_SYMBOL(free_percpu);
@@ -3095,44 +3128,44 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
if (!(new_alien = alloc_alien_cache(node, cachep->limit)))
goto fail;
#endif
- if (!(new = alloc_arraycache(node, (cachep->shared*
- cachep->batchcount), 0xbaadf00d)))
+ if (!(new = alloc_arraycache(node, (cachep->shared *
+ cachep->batchcount),
+ 0xbaadf00d)))
goto fail;
if ((l3 = cachep->nodelists[node])) {
spin_lock_irq(&l3->list_lock);
if ((nc = cachep->nodelists[node]->shared))
- free_block(cachep, nc->entry,
- nc->avail, node);
+ free_block(cachep, nc->entry, nc->avail, node);
l3->shared = new;
if (!cachep->nodelists[node]->alien) {
l3->alien = new_alien;
new_alien = NULL;
}
- l3->free_limit = (1 + nr_cpus_node(node))*
- cachep->batchcount + cachep->num;
+ l3->free_limit = (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
spin_unlock_irq(&l3->list_lock);
kfree(nc);
free_alien_cache(new_alien);
continue;
}
if (!(l3 = kmalloc_node(sizeof(struct kmem_list3),
- GFP_KERNEL, node)))
+ GFP_KERNEL, node)))
goto fail;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep)%REAPTIMEOUT_LIST3;
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
l3->shared = new;
l3->alien = new_alien;
- l3->free_limit = (1 + nr_cpus_node(node))*
- cachep->batchcount + cachep->num;
+ l3->free_limit = (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
cachep->nodelists[node] = l3;
}
return err;
-fail:
+ fail:
err = -ENOMEM;
return err;
}
@@ -3154,18 +3187,19 @@ static void do_ccupdate_local(void *info)
new->new[smp_processor_id()] = old;
}
-
static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
- int shared)
+ int shared)
{
struct ccupdate_struct new;
int i, err;
- memset(&new.new,0,sizeof(new.new));
+ memset(&new.new, 0, sizeof(new.new));
for_each_online_cpu(i) {
- new.new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount);
+ new.new[i] =
+ alloc_arraycache(cpu_to_node(i), limit, batchcount);
if (!new.new[i]) {
- for (i--; i >= 0; i--) kfree(new.new[i]);
+ for (i--; i >= 0; i--)
+ kfree(new.new[i]);
return -ENOMEM;
}
}
@@ -3193,13 +3227,12 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
err = alloc_kmemlist(cachep);
if (err) {
printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
- cachep->name, -err);
+ cachep->name, -err);
BUG();
}
return 0;
}
-
static void enable_cpucache(kmem_cache_t *cachep)
{
int err;
@@ -3246,14 +3279,14 @@ static void enable_cpucache(kmem_cache_t *cachep)
if (limit > 32)
limit = 32;
#endif
- err = do_tune_cpucache(cachep, limit, (limit+1)/2, shared);
+ err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
if (err)
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
- cachep->name, -err);
+ cachep->name, -err);
}
-static void drain_array_locked(kmem_cache_t *cachep,
- struct array_cache *ac, int force, int node)
+static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
+ int force, int node)
{
int tofree;
@@ -3261,14 +3294,14 @@ static void drain_array_locked(kmem_cache_t *cachep,
if (ac->touched && !force) {
ac->touched = 0;
} else if (ac->avail) {
- tofree = force ? ac->avail : (ac->limit+4)/5;
+ tofree = force ? ac->avail : (ac->limit + 4) / 5;
if (tofree > ac->avail) {
- tofree = (ac->avail+1)/2;
+ tofree = (ac->avail + 1) / 2;
}
free_block(cachep, ac->entry, tofree, node);
ac->avail -= tofree;
memmove(ac->entry, &(ac->entry[tofree]),
- sizeof(void*)*ac->avail);
+ sizeof(void *) * ac->avail);
}
}
@@ -3291,13 +3324,14 @@ static void cache_reap(void *unused)
if (down_trylock(&cache_chain_sem)) {
/* Give up. Setup the next iteration. */
- schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
+ schedule_delayed_work(&__get_cpu_var(reap_work),
+ REAPTIMEOUT_CPUC);
return;
}
list_for_each(walk, &cache_chain) {
kmem_cache_t *searchp;
- struct list_head* p;
+ struct list_head *p;
int tofree;
struct slab *slabp;
@@ -3314,7 +3348,7 @@ static void cache_reap(void *unused)
spin_lock_irq(&l3->list_lock);
drain_array_locked(searchp, ac_data(searchp), 0,
- numa_node_id());
+ numa_node_id());
if (time_after(l3->next_reap, jiffies))
goto next_unlock;
@@ -3323,14 +3357,16 @@ static void cache_reap(void *unused)
if (l3->shared)
drain_array_locked(searchp, l3->shared, 0,
- numa_node_id());
+ numa_node_id());
if (l3->free_touched) {
l3->free_touched = 0;
goto next_unlock;
}
- tofree = (l3->free_limit+5*searchp->num-1)/(5*searchp->num);
+ tofree =
+ (l3->free_limit + 5 * searchp->num -
+ 1) / (5 * searchp->num);
do {
p = l3->slabs_free.next;
if (p == &(l3->slabs_free))
@@ -3350,10 +3386,10 @@ static void cache_reap(void *unused)
spin_unlock_irq(&l3->list_lock);
slab_destroy(searchp, slabp);
spin_lock_irq(&l3->list_lock);
- } while(--tofree > 0);
-next_unlock:
+ } while (--tofree > 0);
+ next_unlock:
spin_unlock_irq(&l3->list_lock);
-next:
+ next:
cond_resched();
}
check_irq_on();
@@ -3365,32 +3401,37 @@ next:
#ifdef CONFIG_PROC_FS
-static void *s_start(struct seq_file *m, loff_t *pos)
+static void print_slabinfo_header(struct seq_file *m)
{
- loff_t n = *pos;
- struct list_head *p;
-
- down(&cache_chain_sem);
- if (!n) {
- /*
- * Output format version, so at least we can change it
- * without _too_ many complaints.
- */
+ /*
+ * Output format version, so at least we can change it
+ * without _too_ many complaints.
+ */
#if STATS
- seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
+ seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
#else
- seq_puts(m, "slabinfo - version: 2.1\n");
+ seq_puts(m, "slabinfo - version: 2.1\n");
#endif
- seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
- seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
- seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
+ seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
+ "<objperslab> <pagesperslab>");
+ seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
+ seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#if STATS
- seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped>"
- " <error> <maxfreeable> <nodeallocs> <remotefrees>");
- seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
+ seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
+ "<error> <maxfreeable> <nodeallocs> <remotefrees>");
+ seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif
- seq_putc(m, '\n');
- }
+ seq_putc(m, '\n');
+}
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+ struct list_head *p;
+
+ down(&cache_chain_sem);
+ if (!n)
+ print_slabinfo_header(m);
p = cache_chain.next;
while (n--) {
p = p->next;
@@ -3405,7 +3446,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
kmem_cache_t *cachep = p;
++*pos;
return cachep->next.next == &cache_chain ? NULL
- : list_entry(cachep->next.next, kmem_cache_t, next);
+ : list_entry(cachep->next.next, kmem_cache_t, next);
}
static void s_stop(struct seq_file *m, void *p)
@@ -3417,11 +3458,11 @@ static int s_show(struct seq_file *m, void *p)
{
kmem_cache_t *cachep = p;
struct list_head *q;
- struct slab *slabp;
- unsigned long active_objs;
- unsigned long num_objs;
- unsigned long active_slabs = 0;
- unsigned long num_slabs, free_objects = 0, shared_avail = 0;
+ struct slab *slabp;
+ unsigned long active_objs;
+ unsigned long num_objs;
+ unsigned long active_slabs = 0;
+ unsigned long num_slabs, free_objects = 0, shared_avail = 0;
const char *name;
char *error = NULL;
int node;
@@ -3438,14 +3479,14 @@ static int s_show(struct seq_file *m, void *p)
spin_lock(&l3->list_lock);
- list_for_each(q,&l3->slabs_full) {
+ list_for_each(q, &l3->slabs_full) {
slabp = list_entry(q, struct slab, list);
if (slabp->inuse != cachep->num && !error)
error = "slabs_full accounting error";
active_objs += cachep->num;
active_slabs++;
}
- list_for_each(q,&l3->slabs_partial) {
+ list_for_each(q, &l3->slabs_partial) {
slabp = list_entry(q, struct slab, list);
if (slabp->inuse == cachep->num && !error)
error = "slabs_partial inuse accounting error";
@@ -3454,7 +3495,7 @@ static int s_show(struct seq_file *m, void *p)
active_objs += slabp->inuse;
active_slabs++;
}
- list_for_each(q,&l3->slabs_free) {
+ list_for_each(q, &l3->slabs_free) {
slabp = list_entry(q, struct slab, list);
if (slabp->inuse && !error)
error = "slabs_free/inuse accounting error";
@@ -3465,25 +3506,24 @@ static int s_show(struct seq_file *m, void *p)
spin_unlock(&l3->list_lock);
}
- num_slabs+=active_slabs;
- num_objs = num_slabs*cachep->num;
+ num_slabs += active_slabs;
+ num_objs = num_slabs * cachep->num;
if (num_objs - active_objs != free_objects && !error)
error = "free_objects accounting error";
- name = cachep->name;
+ name = cachep->name;
if (error)
printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
- name, active_objs, num_objs, cachep->objsize,
- cachep->num, (1<<cachep->gfporder));
+ name, active_objs, num_objs, cachep->objsize,
+ cachep->num, (1 << cachep->gfporder));
seq_printf(m, " : tunables %4u %4u %4u",
- cachep->limit, cachep->batchcount,
- cachep->shared);
+ cachep->limit, cachep->batchcount, cachep->shared);
seq_printf(m, " : slabdata %6lu %6lu %6lu",
- active_slabs, num_slabs, shared_avail);
+ active_slabs, num_slabs, shared_avail);
#if STATS
- { /* list3 stats */
+ { /* list3 stats */
unsigned long high = cachep->high_mark;
unsigned long allocs = cachep->num_allocations;
unsigned long grown = cachep->grown;
@@ -3494,9 +3534,7 @@ static int s_show(struct seq_file *m, void *p)
unsigned long node_frees = cachep->node_frees;
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
- %4lu %4lu %4lu %4lu",
- allocs, high, grown, reaped, errors,
- max_freeable, node_allocs, node_frees);
+ %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees);
}
/* cpu stats */
{
@@ -3506,7 +3544,7 @@ static int s_show(struct seq_file *m, void *p)
unsigned long freemiss = atomic_read(&cachep->freemiss);
seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
- allochit, allocmiss, freehit, freemiss);
+ allochit, allocmiss, freehit, freemiss);
}
#endif
seq_putc(m, '\n');
@@ -3529,10 +3567,10 @@ static int s_show(struct seq_file *m, void *p)
*/
struct seq_operations slabinfo_op = {
- .start = s_start,
- .next = s_next,
- .stop = s_stop,
- .show = s_show,
+ .start = s_start,
+ .next = s_next,
+ .stop = s_stop,
+ .show = s_show,
};
#define MAX_SLABINFO_WRITE 128
@@ -3543,18 +3581,18 @@ struct seq_operations slabinfo_op = {
* @count: data length
* @ppos: unused
*/
-ssize_t slabinfo_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
+ssize_t slabinfo_write(struct file *file, const char __user * buffer,
+ size_t count, loff_t *ppos)
{
- char kbuf[MAX_SLABINFO_WRITE+1], *tmp;
+ char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
int limit, batchcount, shared, res;
struct list_head *p;
-
+
if (count > MAX_SLABINFO_WRITE)
return -EINVAL;
if (copy_from_user(&kbuf, buffer, count))
return -EFAULT;
- kbuf[MAX_SLABINFO_WRITE] = '\0';
+ kbuf[MAX_SLABINFO_WRITE] = '\0';
tmp = strchr(kbuf, ' ');
if (!tmp)
@@ -3567,18 +3605,17 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
/* Find the cache in the chain of caches. */
down(&cache_chain_sem);
res = -EINVAL;
- list_for_each(p,&cache_chain) {
+ list_for_each(p, &cache_chain) {
kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 ||
batchcount < 1 ||
- batchcount > limit ||
- shared < 0) {
+ batchcount > limit || shared < 0) {
res = 0;
} else {
res = do_tune_cpucache(cachep, limit,
- batchcount, shared);
+ batchcount, shared);
}
break;
}
@@ -3609,26 +3646,3 @@ unsigned int ksize(const void *objp)
return obj_reallen(page_get_cache(virt_to_page(objp)));
}
-
-
-/*
- * kstrdup - allocate space for and copy an existing string
- *
- * @s: the string to duplicate
- * @gfp: the GFP mask used in the kmalloc() call when allocating memory
- */
-char *kstrdup(const char *s, gfp_t gfp)
-{
- size_t len;
- char *buf;
-
- if (!s)
- return NULL;
-
- len = strlen(s) + 1;
- buf = kmalloc(len, gfp);
- if (buf)
- memcpy(buf, s, len);
- return buf;
-}
-EXPORT_SYMBOL(kstrdup);
diff --git a/mm/slob.c b/mm/slob.c
new file mode 100644
index 00000000000..1c240c4b71d
--- /dev/null
+++ b/mm/slob.c
@@ -0,0 +1,385 @@
+/*
+ * SLOB Allocator: Simple List Of Blocks
+ *
+ * Matt Mackall <mpm@selenic.com> 12/30/03
+ *
+ * How SLOB works:
+ *
+ * The core of SLOB is a traditional K&R style heap allocator, with
+ * support for returning aligned objects. The granularity of this
+ * allocator is 8 bytes on x86, though it's perhaps possible to reduce
+ * this to 4 if it's deemed worth the effort. The slob heap is a
+ * singly-linked list of pages from __get_free_page, grown on demand
+ * and allocation from the heap is currently first-fit.
+ *
+ * Above this is an implementation of kmalloc/kfree. Blocks returned
+ * from kmalloc are 8-byte aligned and prepended with a 8-byte header.
+ * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
+ * __get_free_pages directly so that it can return page-aligned blocks
+ * and keeps a linked list of such pages and their orders. These
+ * objects are detected in kfree() by their page alignment.
+ *
+ * SLAB is emulated on top of SLOB by simply calling constructors and
+ * destructors for every SLAB allocation. Objects are returned with
+ * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is
+ * set, in which case the low-level allocator will fragment blocks to
+ * create the proper alignment. Again, objects of page-size or greater
+ * are allocated by calling __get_free_pages. As SLAB objects know
+ * their size, no separate size bookkeeping is necessary and there is
+ * essentially no allocation space overhead.
+ */
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/cache.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+
+struct slob_block {
+ int units;
+ struct slob_block *next;
+};
+typedef struct slob_block slob_t;
+
+#define SLOB_UNIT sizeof(slob_t)
+#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
+#define SLOB_ALIGN L1_CACHE_BYTES
+
+struct bigblock {
+ int order;
+ void *pages;
+ struct bigblock *next;
+};
+typedef struct bigblock bigblock_t;
+
+static slob_t arena = { .next = &arena, .units = 1 };
+static slob_t *slobfree = &arena;
+static bigblock_t *bigblocks;
+static DEFINE_SPINLOCK(slob_lock);
+static DEFINE_SPINLOCK(block_lock);
+
+static void slob_free(void *b, int size);
+
+static void *slob_alloc(size_t size, gfp_t gfp, int align)
+{
+ slob_t *prev, *cur, *aligned = 0;
+ int delta = 0, units = SLOB_UNITS(size);
+ unsigned long flags;
+
+ spin_lock_irqsave(&slob_lock, flags);
+ prev = slobfree;
+ for (cur = prev->next; ; prev = cur, cur = cur->next) {
+ if (align) {
+ aligned = (slob_t *)ALIGN((unsigned long)cur, align);
+ delta = aligned - cur;
+ }
+ if (cur->units >= units + delta) { /* room enough? */
+ if (delta) { /* need to fragment head to align? */
+ aligned->units = cur->units - delta;
+ aligned->next = cur->next;
+ cur->next = aligned;
+ cur->units = delta;
+ prev = cur;
+ cur = aligned;
+ }
+
+ if (cur->units == units) /* exact fit? */
+ prev->next = cur->next; /* unlink */
+ else { /* fragment */
+ prev->next = cur + units;
+ prev->next->units = cur->units - units;
+ prev->next->next = cur->next;
+ cur->units = units;
+ }
+
+ slobfree = prev;
+ spin_unlock_irqrestore(&slob_lock, flags);
+ return cur;
+ }
+ if (cur == slobfree) {
+ spin_unlock_irqrestore(&slob_lock, flags);
+
+ if (size == PAGE_SIZE) /* trying to shrink arena? */
+ return 0;
+
+ cur = (slob_t *)__get_free_page(gfp);
+ if (!cur)
+ return 0;
+
+ slob_free(cur, PAGE_SIZE);
+ spin_lock_irqsave(&slob_lock, flags);
+ cur = slobfree;
+ }
+ }
+}
+
+static void slob_free(void *block, int size)
+{
+ slob_t *cur, *b = (slob_t *)block;
+ unsigned long flags;
+
+ if (!block)
+ return;
+
+ if (size)
+ b->units = SLOB_UNITS(size);
+
+ /* Find reinsertion point */
+ spin_lock_irqsave(&slob_lock, flags);
+ for (cur = slobfree; !(b > cur && b < cur->next); cur = cur->next)
+ if (cur >= cur->next && (b > cur || b < cur->next))
+ break;
+
+ if (b + b->units == cur->next) {
+ b->units += cur->next->units;
+ b->next = cur->next->next;
+ } else
+ b->next = cur->next;
+
+ if (cur + cur->units == b) {
+ cur->units += b->units;
+ cur->next = b->next;
+ } else
+ cur->next = b;
+
+ slobfree = cur;
+
+ spin_unlock_irqrestore(&slob_lock, flags);
+}
+
+static int FASTCALL(find_order(int size));
+static int fastcall find_order(int size)
+{
+ int order = 0;
+ for ( ; size > 4096 ; size >>=1)
+ order++;
+ return order;
+}
+
+void *kmalloc(size_t size, gfp_t gfp)
+{
+ slob_t *m;
+ bigblock_t *bb;
+ unsigned long flags;
+
+ if (size < PAGE_SIZE - SLOB_UNIT) {
+ m = slob_alloc(size + SLOB_UNIT, gfp, 0);
+ return m ? (void *)(m + 1) : 0;
+ }
+
+ bb = slob_alloc(sizeof(bigblock_t), gfp, 0);
+ if (!bb)
+ return 0;
+
+ bb->order = find_order(size);
+ bb->pages = (void *)__get_free_pages(gfp, bb->order);
+
+ if (bb->pages) {
+ spin_lock_irqsave(&block_lock, flags);
+ bb->next = bigblocks;
+ bigblocks = bb;
+ spin_unlock_irqrestore(&block_lock, flags);
+ return bb->pages;
+ }
+
+ slob_free(bb, sizeof(bigblock_t));
+ return 0;
+}
+
+EXPORT_SYMBOL(kmalloc);
+
+void kfree(const void *block)
+{
+ bigblock_t *bb, **last = &bigblocks;
+ unsigned long flags;
+
+ if (!block)
+ return;
+
+ if (!((unsigned long)block & (PAGE_SIZE-1))) {
+ /* might be on the big block list */
+ spin_lock_irqsave(&block_lock, flags);
+ for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
+ if (bb->pages == block) {
+ *last = bb->next;
+ spin_unlock_irqrestore(&block_lock, flags);
+ free_pages((unsigned long)block, bb->order);
+ slob_free(bb, sizeof(bigblock_t));
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&block_lock, flags);
+ }
+
+ slob_free((slob_t *)block - 1, 0);
+ return;
+}
+
+EXPORT_SYMBOL(kfree);
+
+unsigned int ksize(const void *block)
+{
+ bigblock_t *bb;
+ unsigned long flags;
+
+ if (!block)
+ return 0;
+
+ if (!((unsigned long)block & (PAGE_SIZE-1))) {
+ spin_lock_irqsave(&block_lock, flags);
+ for (bb = bigblocks; bb; bb = bb->next)
+ if (bb->pages == block) {
+ spin_unlock_irqrestore(&slob_lock, flags);
+ return PAGE_SIZE << bb->order;
+ }
+ spin_unlock_irqrestore(&block_lock, flags);
+ }
+
+ return ((slob_t *)block - 1)->units * SLOB_UNIT;
+}
+
+struct kmem_cache {
+ unsigned int size, align;
+ const char *name;
+ void (*ctor)(void *, struct kmem_cache *, unsigned long);
+ void (*dtor)(void *, struct kmem_cache *, unsigned long);
+};
+
+struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+ size_t align, unsigned long flags,
+ void (*ctor)(void*, struct kmem_cache *, unsigned long),
+ void (*dtor)(void*, struct kmem_cache *, unsigned long))
+{
+ struct kmem_cache *c;
+
+ c = slob_alloc(sizeof(struct kmem_cache), flags, 0);
+
+ if (c) {
+ c->name = name;
+ c->size = size;
+ c->ctor = ctor;
+ c->dtor = dtor;
+ /* ignore alignment unless it's forced */
+ c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
+ if (c->align < align)
+ c->align = align;
+ }
+
+ return c;
+}
+EXPORT_SYMBOL(kmem_cache_create);
+
+int kmem_cache_destroy(struct kmem_cache *c)
+{
+ slob_free(c, sizeof(struct kmem_cache));
+ return 0;
+}
+EXPORT_SYMBOL(kmem_cache_destroy);
+
+void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
+{
+ void *b;
+
+ if (c->size < PAGE_SIZE)
+ b = slob_alloc(c->size, flags, c->align);
+ else
+ b = (void *)__get_free_pages(flags, find_order(c->size));
+
+ if (c->ctor)
+ c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
+
+ return b;
+}
+EXPORT_SYMBOL(kmem_cache_alloc);
+
+void kmem_cache_free(struct kmem_cache *c, void *b)
+{
+ if (c->dtor)
+ c->dtor(b, c, 0);
+
+ if (c->size < PAGE_SIZE)
+ slob_free(b, c->size);
+ else
+ free_pages((unsigned long)b, find_order(c->size));
+}
+EXPORT_SYMBOL(kmem_cache_free);
+
+unsigned int kmem_cache_size(struct kmem_cache *c)
+{
+ return c->size;
+}
+EXPORT_SYMBOL(kmem_cache_size);
+
+const char *kmem_cache_name(struct kmem_cache *c)
+{
+ return c->name;
+}
+EXPORT_SYMBOL(kmem_cache_name);
+
+static struct timer_list slob_timer = TIMER_INITIALIZER(
+ (void (*)(unsigned long))kmem_cache_init, 0, 0);
+
+void kmem_cache_init(void)
+{
+ void *p = slob_alloc(PAGE_SIZE, 0, PAGE_SIZE-1);
+
+ if (p)
+ free_page((unsigned long)p);
+
+ mod_timer(&slob_timer, jiffies + HZ);
+}
+
+atomic_t slab_reclaim_pages = ATOMIC_INIT(0);
+EXPORT_SYMBOL(slab_reclaim_pages);
+
+#ifdef CONFIG_SMP
+
+void *__alloc_percpu(size_t size, size_t align)
+{
+ int i;
+ struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
+
+ if (!pdata)
+ return NULL;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_possible(i))
+ continue;
+ pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
+ if (!pdata->ptrs[i])
+ goto unwind_oom;
+ memset(pdata->ptrs[i], 0, size);
+ }
+
+ /* Catch derefs w/o wrappers */
+ return (void *) (~(unsigned long) pdata);
+
+unwind_oom:
+ while (--i >= 0) {
+ if (!cpu_possible(i))
+ continue;
+ kfree(pdata->ptrs[i]);
+ }
+ kfree(pdata);
+ return NULL;
+}
+EXPORT_SYMBOL(__alloc_percpu);
+
+void
+free_percpu(const void *objp)
+{
+ int i;
+ struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_possible(i))
+ continue;
+ kfree(p->ptrs[i]);
+ }
+ kfree(p);
+}
+EXPORT_SYMBOL(free_percpu);
+
+#endif
diff --git a/mm/sparse.c b/mm/sparse.c
index 72079b538e2..0a51f36ba3a 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -18,10 +18,10 @@
*/
#ifdef CONFIG_SPARSEMEM_EXTREME
struct mem_section *mem_section[NR_SECTION_ROOTS]
- ____cacheline_maxaligned_in_smp;
+ ____cacheline_internodealigned_in_smp;
#else
struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
- ____cacheline_maxaligned_in_smp;
+ ____cacheline_internodealigned_in_smp;
#endif
EXPORT_SYMBOL(mem_section);
diff --git a/mm/swap.c b/mm/swap.c
index 73d351439ef..cbb48e721ab 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -156,16 +156,22 @@ void fastcall lru_cache_add_active(struct page *page)
put_cpu_var(lru_add_active_pvecs);
}
-void lru_add_drain(void)
+static void __lru_add_drain(int cpu)
{
- struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
+ struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
+ /* CPU is dead, so no locking needed. */
if (pagevec_count(pvec))
__pagevec_lru_add(pvec);
- pvec = &__get_cpu_var(lru_add_active_pvecs);
+ pvec = &per_cpu(lru_add_active_pvecs, cpu);
if (pagevec_count(pvec))
__pagevec_lru_add_active(pvec);
- put_cpu_var(lru_add_pvecs);
+}
+
+void lru_add_drain(void)
+{
+ __lru_add_drain(get_cpu());
+ put_cpu();
}
/*
@@ -378,6 +384,8 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
return pagevec_count(pvec);
}
+EXPORT_SYMBOL(pagevec_lookup);
+
unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
pgoff_t *index, int tag, unsigned nr_pages)
{
@@ -412,17 +420,6 @@ void vm_acct_memory(long pages)
}
#ifdef CONFIG_HOTPLUG_CPU
-static void lru_drain_cache(unsigned int cpu)
-{
- struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
-
- /* CPU is dead, so no locking needed. */
- if (pagevec_count(pvec))
- __pagevec_lru_add(pvec);
- pvec = &per_cpu(lru_add_active_pvecs, cpu);
- if (pagevec_count(pvec))
- __pagevec_lru_add_active(pvec);
-}
/* Drop the CPU's cached committed space back into the central pool. */
static int cpu_swap_callback(struct notifier_block *nfb,
@@ -435,7 +432,7 @@ static int cpu_swap_callback(struct notifier_block *nfb,
if (action == CPU_DEAD) {
atomic_add(*committed, &vm_committed_space);
*committed = 0;
- lru_drain_cache((long)hcpu);
+ __lru_add_drain((long)hcpu);
}
return NOTIFY_OK;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 0df9a57b1de..7b09ac503fe 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/backing-dev.h>
+#include <linux/pagevec.h>
#include <asm/pgtable.h>
@@ -140,7 +141,7 @@ void __delete_from_swap_cache(struct page *page)
* Allocate swap space for the page and add the page to the
* swap cache. Caller needs to hold the page lock.
*/
-int add_to_swap(struct page * page)
+int add_to_swap(struct page * page, gfp_t gfp_mask)
{
swp_entry_t entry;
int err;
@@ -165,7 +166,7 @@ int add_to_swap(struct page * page)
* Add it to the swap cache and mark it dirty
*/
err = __add_to_swap_cache(page, entry,
- GFP_ATOMIC|__GFP_NOMEMALLOC|__GFP_NOWARN);
+ gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
switch (err) {
case 0: /* Success */
@@ -272,12 +273,11 @@ void free_page_and_swap_cache(struct page *page)
*/
void free_pages_and_swap_cache(struct page **pages, int nr)
{
- int chunk = 16;
struct page **pagep = pages;
lru_add_drain();
while (nr) {
- int todo = min(chunk, nr);
+ int todo = min(nr, PAGEVEC_SIZE);
int i;
for (i = 0; i < todo; i++)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index edafeace301..957fef43fa6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -25,6 +25,7 @@
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
+#include <linux/capability.h>
#include <linux/syscalls.h>
#include <asm/pgtable.h>
@@ -211,6 +212,26 @@ noswap:
return (swp_entry_t) {0};
}
+swp_entry_t get_swap_page_of_type(int type)
+{
+ struct swap_info_struct *si;
+ pgoff_t offset;
+
+ spin_lock(&swap_lock);
+ si = swap_info + type;
+ if (si->flags & SWP_WRITEOK) {
+ nr_swap_pages--;
+ offset = scan_swap_map(si);
+ if (offset) {
+ spin_unlock(&swap_lock);
+ return swp_entry(type, offset);
+ }
+ nr_swap_pages++;
+ }
+ spin_unlock(&swap_lock);
+ return (swp_entry_t) {0};
+}
+
static struct swap_info_struct * swap_info_get(swp_entry_t entry)
{
struct swap_info_struct * p;
@@ -1167,9 +1188,9 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
set_blocksize(bdev, p->old_block_size);
bd_release(bdev);
} else {
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
inode->i_flags &= ~S_SWAPFILE;
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
filp_close(swap_file, NULL);
err = 0;
@@ -1386,7 +1407,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
p->bdev = bdev;
} else if (S_ISREG(inode->i_mode)) {
p->bdev = inode->i_sb->s_bdev;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
did_down = 1;
if (IS_SWAPFILE(inode)) {
error = -EBUSY;
@@ -1422,7 +1443,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10))
swap_header_version = 2;
else {
- printk("Unable to find swap-space signature\n");
+ printk(KERN_ERR "Unable to find swap-space signature\n");
error = -EINVAL;
goto bad_swap;
}
@@ -1473,7 +1494,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
goto bad_swap;
if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
goto bad_swap;
-
+
/* OK, set up the swap map and apply the bad block list */
if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
error = -ENOMEM;
@@ -1482,17 +1503,17 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
error = 0;
memset(p->swap_map, 0, maxpages * sizeof(short));
- for (i=0; i<swap_header->info.nr_badpages; i++) {
- int page = swap_header->info.badpages[i];
- if (page <= 0 || page >= swap_header->info.last_page)
+ for (i = 0; i < swap_header->info.nr_badpages; i++) {
+ int page_nr = swap_header->info.badpages[i];
+ if (page_nr <= 0 || page_nr >= swap_header->info.last_page)
error = -EINVAL;
else
- p->swap_map[page] = SWAP_MAP_BAD;
+ p->swap_map[page_nr] = SWAP_MAP_BAD;
}
nr_good_pages = swap_header->info.last_page -
swap_header->info.nr_badpages -
1 /* header page */;
- if (error)
+ if (error)
goto bad_swap;
}
@@ -1576,7 +1597,7 @@ out:
if (did_down) {
if (!error)
inode->i_flags |= S_SWAPFILE;
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
return error;
}
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index b58abcf44ed..f9d6a9cc91c 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -81,13 +81,19 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
goto close_file;
d_instantiate(dentry, inode);
- inode->i_size = size;
inode->i_nlink = 0; /* It is unlinked */
+
file->f_vfsmnt = mntget(shm_mnt);
file->f_dentry = dentry;
file->f_mapping = inode->i_mapping;
file->f_op = &ramfs_file_operations;
file->f_mode = FMODE_WRITE | FMODE_READ;
+
+ /* notify everyone as to the change of file size */
+ error = do_truncate(dentry, size, 0, file);
+ if (error < 0)
+ goto close_file;
+
return file;
close_file:
@@ -123,3 +129,24 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
{
return 0;
}
+
+int shmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ file_accessed(file);
+#ifndef CONFIG_MMU
+ return ramfs_nommu_mmap(file, vma);
+#else
+ return 0;
+#endif
+}
+
+#ifndef CONFIG_MMU
+unsigned long shmem_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ return ramfs_nommu_get_unmapped_area(file, addr, len, pgoff, flags);
+}
+#endif
diff --git a/mm/truncate.c b/mm/truncate.c
index 9173ab50060..6cb3fff25f6 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -82,12 +82,15 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
}
/**
- * truncate_inode_pages - truncate *all* the pages from an offset
+ * truncate_inode_pages - truncate range of pages specified by start and
+ * end byte offsets
* @mapping: mapping to truncate
* @lstart: offset from which to truncate
+ * @lend: offset to which to truncate
*
- * Truncate the page cache at a set offset, removing the pages that are beyond
- * that offset (and zeroing out partial pages).
+ * Truncate the page cache, removing the pages that are between
+ * specified offsets (and zeroing out partial page
+ * (if lstart is not page aligned)).
*
* Truncate takes two passes - the first pass is nonblocking. It will not
* block on page locks and it will not block on writeback. The second pass
@@ -101,12 +104,12 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
* We pass down the cache-hot hint to the page freeing code. Even if the
* mapping is large, it is probably the case that the final pages are the most
* recently touched, and freeing happens in ascending file offset order.
- *
- * Called under (and serialised by) inode->i_sem.
*/
-void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
+void truncate_inode_pages_range(struct address_space *mapping,
+ loff_t lstart, loff_t lend)
{
const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
+ pgoff_t end;
const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
struct pagevec pvec;
pgoff_t next;
@@ -115,13 +118,22 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
if (mapping->nrpages == 0)
return;
+ BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
+ end = (lend >> PAGE_CACHE_SHIFT);
+
pagevec_init(&pvec, 0);
next = start;
- while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+ while (next <= end &&
+ pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index = page->index;
+ if (page_index > end) {
+ next = page_index;
+ break;
+ }
+
if (page_index > next)
next = page_index;
next++;
@@ -157,9 +169,15 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
next = start;
continue;
}
+ if (pvec.pages[0]->index > end) {
+ pagevec_release(&pvec);
+ break;
+ }
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
+ if (page->index > end)
+ break;
lock_page(page);
wait_on_page_writeback(page);
if (page->index > next)
@@ -171,7 +189,19 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
pagevec_release(&pvec);
}
}
+EXPORT_SYMBOL(truncate_inode_pages_range);
+/**
+ * truncate_inode_pages - truncate *all* the pages from an offset
+ * @mapping: mapping to truncate
+ * @lstart: offset from which to truncate
+ *
+ * Called under (and serialised by) inode->i_mutex.
+ */
+void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
+{
+ truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
+}
EXPORT_SYMBOL(truncate_inode_pages);
/**
@@ -219,7 +249,6 @@ unlock:
break;
}
pagevec_release(&pvec);
- cond_resched();
}
return ret;
}
diff --git a/mm/util.c b/mm/util.c
new file mode 100644
index 00000000000..5f4bb59da63
--- /dev/null
+++ b/mm/util.c
@@ -0,0 +1,39 @@
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/module.h>
+
+/**
+ * kzalloc - allocate memory. The memory is set to zero.
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ */
+void *kzalloc(size_t size, gfp_t flags)
+{
+ void *ret = kmalloc(size, flags);
+ if (ret)
+ memset(ret, 0, size);
+ return ret;
+}
+EXPORT_SYMBOL(kzalloc);
+
+/*
+ * kstrdup - allocate space for and copy an existing string
+ *
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ */
+char *kstrdup(const char *s, gfp_t gfp)
+{
+ size_t len;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ len = strlen(s) + 1;
+ buf = kmalloc(len, gfp);
+ if (buf)
+ memcpy(buf, s, len);
+ return buf;
+}
+EXPORT_SYMBOL(kstrdup);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b0cd81c32de..bf903b2d198 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -63,9 +63,6 @@ struct scan_control {
unsigned long nr_mapped; /* From page_state */
- /* How many pages shrink_cache() should reclaim */
- int nr_to_reclaim;
-
/* Ask shrink_caches, or shrink_zone to scan at this priority */
unsigned int priority;
@@ -74,9 +71,6 @@ struct scan_control {
int may_writepage;
- /* Can pages be swapped as part of reclaim? */
- int may_swap;
-
/* This context's SWAP_CLUSTER_MAX. If freeing memory for
* suspend, we effectively ignore SWAP_CLUSTER_MAX.
* In this context, it doesn't matter that we scan the
@@ -186,8 +180,7 @@ EXPORT_SYMBOL(remove_shrinker);
*
* Returns the number of slab objects which we shrunk.
*/
-static int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
- unsigned long lru_pages)
+int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages)
{
struct shrinker *shrinker;
int ret = 0;
@@ -275,9 +268,7 @@ static inline int is_page_cache_freeable(struct page *page)
static int may_write_to_queue(struct backing_dev_info *bdi)
{
- if (current_is_kswapd())
- return 1;
- if (current_is_pdflush()) /* This is unlikely, but why not... */
+ if (current->flags & PF_SWAPWRITE)
return 1;
if (!bdi_write_congested(bdi))
return 1;
@@ -367,7 +358,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
res = mapping->a_ops->writepage(page, &wbc);
if (res < 0)
handle_write_error(mapping, page, res);
- if (res == WRITEPAGE_ACTIVATE) {
+ if (res == AOP_WRITEPAGE_ACTIVATE) {
ClearPageReclaim(page);
return PAGE_ACTIVATE;
}
@@ -382,6 +373,43 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
return PAGE_CLEAN;
}
+static int remove_mapping(struct address_space *mapping, struct page *page)
+{
+ if (!mapping)
+ return 0; /* truncate got there first */
+
+ write_lock_irq(&mapping->tree_lock);
+
+ /*
+ * The non-racy check for busy page. It is critical to check
+ * PageDirty _after_ making sure that the page is freeable and
+ * not in use by anybody. (pagecache + us == 2)
+ */
+ if (unlikely(page_count(page) != 2))
+ goto cannot_free;
+ smp_rmb();
+ if (unlikely(PageDirty(page)))
+ goto cannot_free;
+
+ if (PageSwapCache(page)) {
+ swp_entry_t swap = { .val = page_private(page) };
+ __delete_from_swap_cache(page);
+ write_unlock_irq(&mapping->tree_lock);
+ swap_free(swap);
+ __put_page(page); /* The pagecache ref */
+ return 1;
+ }
+
+ __remove_from_page_cache(page);
+ write_unlock_irq(&mapping->tree_lock);
+ __put_page(page);
+ return 1;
+
+cannot_free:
+ write_unlock_irq(&mapping->tree_lock);
+ return 0;
+}
+
/*
* shrink_list adds the number of reclaimed pages to sc->nr_reclaimed
*/
@@ -430,9 +458,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
* Try to allocate it some swap space here.
*/
if (PageAnon(page) && !PageSwapCache(page)) {
- if (!sc->may_swap)
- goto keep_locked;
- if (!add_to_swap(page))
+ if (!add_to_swap(page, GFP_ATOMIC))
goto activate_locked;
}
#endif /* CONFIG_SWAP */
@@ -515,36 +541,8 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
goto free_it;
}
- if (!mapping)
- goto keep_locked; /* truncate got there first */
-
- write_lock_irq(&mapping->tree_lock);
-
- /*
- * The non-racy check for busy page. It is critical to check
- * PageDirty _after_ making sure that the page is freeable and
- * not in use by anybody. (pagecache + us == 2)
- */
- if (unlikely(page_count(page) != 2))
- goto cannot_free;
- smp_rmb();
- if (unlikely(PageDirty(page)))
- goto cannot_free;
-
-#ifdef CONFIG_SWAP
- if (PageSwapCache(page)) {
- swp_entry_t swap = { .val = page_private(page) };
- __delete_from_swap_cache(page);
- write_unlock_irq(&mapping->tree_lock);
- swap_free(swap);
- __put_page(page); /* The pagecache ref */
- goto free_it;
- }
-#endif /* CONFIG_SWAP */
-
- __remove_from_page_cache(page);
- write_unlock_irq(&mapping->tree_lock);
- __put_page(page);
+ if (!remove_mapping(mapping, page))
+ goto keep_locked;
free_it:
unlock_page(page);
@@ -553,10 +551,6 @@ free_it:
__pagevec_release_nonlru(&freed_pvec);
continue;
-cannot_free:
- write_unlock_irq(&mapping->tree_lock);
- goto keep_locked;
-
activate_locked:
SetPageActive(page);
pgactivate++;
@@ -574,6 +568,241 @@ keep:
return reclaimed;
}
+#ifdef CONFIG_MIGRATION
+static inline void move_to_lru(struct page *page)
+{
+ list_del(&page->lru);
+ if (PageActive(page)) {
+ /*
+ * lru_cache_add_active checks that
+ * the PG_active bit is off.
+ */
+ ClearPageActive(page);
+ lru_cache_add_active(page);
+ } else {
+ lru_cache_add(page);
+ }
+ put_page(page);
+}
+
+/*
+ * Add isolated pages on the list back to the LRU
+ *
+ * returns the number of pages put back.
+ */
+int putback_lru_pages(struct list_head *l)
+{
+ struct page *page;
+ struct page *page2;
+ int count = 0;
+
+ list_for_each_entry_safe(page, page2, l, lru) {
+ move_to_lru(page);
+ count++;
+ }
+ return count;
+}
+
+/*
+ * swapout a single page
+ * page is locked upon entry, unlocked on exit
+ */
+static int swap_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+
+ if (page_mapped(page) && mapping)
+ if (try_to_unmap(page) != SWAP_SUCCESS)
+ goto unlock_retry;
+
+ if (PageDirty(page)) {
+ /* Page is dirty, try to write it out here */
+ switch(pageout(page, mapping)) {
+ case PAGE_KEEP:
+ case PAGE_ACTIVATE:
+ goto unlock_retry;
+
+ case PAGE_SUCCESS:
+ goto retry;
+
+ case PAGE_CLEAN:
+ ; /* try to free the page below */
+ }
+ }
+
+ if (PagePrivate(page)) {
+ if (!try_to_release_page(page, GFP_KERNEL) ||
+ (!mapping && page_count(page) == 1))
+ goto unlock_retry;
+ }
+
+ if (remove_mapping(mapping, page)) {
+ /* Success */
+ unlock_page(page);
+ return 0;
+ }
+
+unlock_retry:
+ unlock_page(page);
+
+retry:
+ return -EAGAIN;
+}
+/*
+ * migrate_pages
+ *
+ * Two lists are passed to this function. The first list
+ * contains the pages isolated from the LRU to be migrated.
+ * The second list contains new pages that the pages isolated
+ * can be moved to. If the second list is NULL then all
+ * pages are swapped out.
+ *
+ * The function returns after 10 attempts or if no pages
+ * are movable anymore because t has become empty
+ * or no retryable pages exist anymore.
+ *
+ * SIMPLIFIED VERSION: This implementation of migrate_pages
+ * is only swapping out pages and never touches the second
+ * list. The direct migration patchset
+ * extends this function to avoid the use of swap.
+ *
+ * Return: Number of pages not migrated when "to" ran empty.
+ */
+int migrate_pages(struct list_head *from, struct list_head *to,
+ struct list_head *moved, struct list_head *failed)
+{
+ int retry;
+ int nr_failed = 0;
+ int pass = 0;
+ struct page *page;
+ struct page *page2;
+ int swapwrite = current->flags & PF_SWAPWRITE;
+ int rc;
+
+ if (!swapwrite)
+ current->flags |= PF_SWAPWRITE;
+
+redo:
+ retry = 0;
+
+ list_for_each_entry_safe(page, page2, from, lru) {
+ cond_resched();
+
+ rc = 0;
+ if (page_count(page) == 1)
+ /* page was freed from under us. So we are done. */
+ goto next;
+
+ /*
+ * Skip locked pages during the first two passes to give the
+ * functions holding the lock time to release the page. Later we
+ * use lock_page() to have a higher chance of acquiring the
+ * lock.
+ */
+ rc = -EAGAIN;
+ if (pass > 2)
+ lock_page(page);
+ else
+ if (TestSetPageLocked(page))
+ goto next;
+
+ /*
+ * Only wait on writeback if we have already done a pass where
+ * we we may have triggered writeouts for lots of pages.
+ */
+ if (pass > 0) {
+ wait_on_page_writeback(page);
+ } else {
+ if (PageWriteback(page))
+ goto unlock_page;
+ }
+
+ /*
+ * Anonymous pages must have swap cache references otherwise
+ * the information contained in the page maps cannot be
+ * preserved.
+ */
+ if (PageAnon(page) && !PageSwapCache(page)) {
+ if (!add_to_swap(page, GFP_KERNEL)) {
+ rc = -ENOMEM;
+ goto unlock_page;
+ }
+ }
+
+ /*
+ * Page is properly locked and writeback is complete.
+ * Try to migrate the page.
+ */
+ rc = swap_page(page);
+ goto next;
+
+unlock_page:
+ unlock_page(page);
+
+next:
+ if (rc == -EAGAIN) {
+ retry++;
+ } else if (rc) {
+ /* Permanent failure */
+ list_move(&page->lru, failed);
+ nr_failed++;
+ } else {
+ /* Success */
+ list_move(&page->lru, moved);
+ }
+ }
+ if (retry && pass++ < 10)
+ goto redo;
+
+ if (!swapwrite)
+ current->flags &= ~PF_SWAPWRITE;
+
+ return nr_failed + retry;
+}
+
+static void lru_add_drain_per_cpu(void *dummy)
+{
+ lru_add_drain();
+}
+
+/*
+ * Isolate one page from the LRU lists and put it on the
+ * indicated list. Do necessary cache draining if the
+ * page is not on the LRU lists yet.
+ *
+ * Result:
+ * 0 = page not on LRU list
+ * 1 = page removed from LRU list and added to the specified list.
+ * -ENOENT = page is being freed elsewhere.
+ */
+int isolate_lru_page(struct page *page)
+{
+ int rc = 0;
+ struct zone *zone = page_zone(page);
+
+redo:
+ spin_lock_irq(&zone->lru_lock);
+ rc = __isolate_lru_page(page);
+ if (rc == 1) {
+ if (PageActive(page))
+ del_page_from_active_list(zone, page);
+ else
+ del_page_from_inactive_list(zone, page);
+ }
+ spin_unlock_irq(&zone->lru_lock);
+ if (rc == 0) {
+ /*
+ * Maybe this page is still waiting for a cpu to drain it
+ * from one of the lru lists?
+ */
+ rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+ if (rc == 0 && PageLRU(page))
+ goto redo;
+ }
+ return rc;
+}
+#endif
+
/*
* zone->lru_lock is heavily contended. Some of the functions that
* shrink the lists perform better by taking out a batch of pages
@@ -602,20 +831,18 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
- if (!TestClearPageLRU(page))
- BUG();
- list_del(&page->lru);
- if (get_page_testone(page)) {
- /*
- * It is being freed elsewhere
- */
- __put_page(page);
- SetPageLRU(page);
- list_add(&page->lru, src);
- continue;
- } else {
- list_add(&page->lru, dst);
+ switch (__isolate_lru_page(page)) {
+ case 1:
+ /* Succeeded to isolate page */
+ list_move(&page->lru, dst);
nr_taken++;
+ break;
+ case -ENOENT:
+ /* Not possible to isolate */
+ list_move(&page->lru, src);
+ break;
+ default:
+ BUG();
}
}
@@ -653,17 +880,17 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
goto done;
max_scan -= nr_scan;
- if (current_is_kswapd())
- mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
- else
- mod_page_state_zone(zone, pgscan_direct, nr_scan);
nr_freed = shrink_list(&page_list, sc);
- if (current_is_kswapd())
- mod_page_state(kswapd_steal, nr_freed);
- mod_page_state_zone(zone, pgsteal, nr_freed);
- sc->nr_to_reclaim -= nr_freed;
- spin_lock_irq(&zone->lru_lock);
+ local_irq_disable();
+ if (current_is_kswapd()) {
+ __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
+ __mod_page_state(kswapd_steal, nr_freed);
+ } else
+ __mod_page_state_zone(zone, pgscan_direct, nr_scan);
+ __mod_page_state_zone(zone, pgsteal, nr_freed);
+
+ spin_lock(&zone->lru_lock);
/*
* Put back any unfreeable pages.
*/
@@ -825,11 +1052,13 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
}
}
zone->nr_active += pgmoved;
- spin_unlock_irq(&zone->lru_lock);
- pagevec_release(&pvec);
+ spin_unlock(&zone->lru_lock);
- mod_page_state_zone(zone, pgrefill, pgscanned);
- mod_page_state(pgdeactivate, pgdeactivate);
+ __mod_page_state_zone(zone, pgrefill, pgscanned);
+ __mod_page_state(pgdeactivate, pgdeactivate);
+ local_irq_enable();
+
+ pagevec_release(&pvec);
}
/*
@@ -861,8 +1090,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
else
nr_inactive = 0;
- sc->nr_to_reclaim = sc->swap_cluster_max;
-
while (nr_active || nr_inactive) {
if (nr_active) {
sc->nr_to_scan = min(nr_active,
@@ -876,8 +1103,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
(unsigned long)sc->swap_cluster_max);
nr_inactive -= sc->nr_to_scan;
shrink_cache(zone, sc);
- if (sc->nr_to_reclaim <= 0)
- break;
}
}
@@ -910,7 +1135,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
for (i = 0; zones[i] != NULL; i++) {
struct zone *zone = zones[i];
- if (zone->present_pages == 0)
+ if (!populated_zone(zone))
continue;
if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
@@ -952,7 +1177,6 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
sc.gfp_mask = gfp_mask;
sc.may_writepage = 0;
- sc.may_swap = 1;
inc_page_state(allocstall);
@@ -1055,7 +1279,6 @@ loop_again:
total_reclaimed = 0;
sc.gfp_mask = GFP_KERNEL;
sc.may_writepage = 0;
- sc.may_swap = 1;
sc.nr_mapped = read_page_state(nr_mapped);
inc_page_state(pageoutrun);
@@ -1084,7 +1307,7 @@ loop_again:
for (i = pgdat->nr_zones - 1; i >= 0; i--) {
struct zone *zone = pgdat->node_zones + i;
- if (zone->present_pages == 0)
+ if (!populated_zone(zone))
continue;
if (zone->all_unreclaimable &&
@@ -1121,7 +1344,7 @@ scan:
struct zone *zone = pgdat->node_zones + i;
int nr_slab;
- if (zone->present_pages == 0)
+ if (!populated_zone(zone))
continue;
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
@@ -1238,7 +1461,7 @@ static int kswapd(void *p)
* us from recursively trying to free more memory as we're
* trying to free the first piece of memory in the first place).
*/
- tsk->flags |= PF_MEMALLOC|PF_KSWAPD;
+ tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
order = 0;
for ( ; ; ) {
@@ -1273,7 +1496,7 @@ void wakeup_kswapd(struct zone *zone, int order)
{
pg_data_t *pgdat;
- if (zone->present_pages == 0)
+ if (!populated_zone(zone))
return;
pgdat = zone->zone_pgdat;
@@ -1353,76 +1576,3 @@ static int __init kswapd_init(void)
}
module_init(kswapd_init)
-
-
-/*
- * Try to free up some pages from this zone through reclaim.
- */
-int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
-{
- struct scan_control sc;
- int nr_pages = 1 << order;
- int total_reclaimed = 0;
-
- /* The reclaim may sleep, so don't do it if sleep isn't allowed */
- if (!(gfp_mask & __GFP_WAIT))
- return 0;
- if (zone->all_unreclaimable)
- return 0;
-
- sc.gfp_mask = gfp_mask;
- sc.may_writepage = 0;
- sc.may_swap = 0;
- sc.nr_mapped = read_page_state(nr_mapped);
- sc.nr_scanned = 0;
- sc.nr_reclaimed = 0;
- /* scan at the highest priority */
- sc.priority = 0;
- disable_swap_token();
-
- if (nr_pages > SWAP_CLUSTER_MAX)
- sc.swap_cluster_max = nr_pages;
- else
- sc.swap_cluster_max = SWAP_CLUSTER_MAX;
-
- /* Don't reclaim the zone if there are other reclaimers active */
- if (atomic_read(&zone->reclaim_in_progress) > 0)
- goto out;
-
- shrink_zone(zone, &sc);
- total_reclaimed = sc.nr_reclaimed;
-
- out:
- return total_reclaimed;
-}
-
-asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone,
- unsigned int state)
-{
- struct zone *z;
- int i;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- if (node >= MAX_NUMNODES || !node_online(node))
- return -EINVAL;
-
- /* This will break if we ever add more zones */
- if (!(zone & (1<<ZONE_DMA|1<<ZONE_NORMAL|1<<ZONE_HIGHMEM)))
- return -EINVAL;
-
- for (i = 0; i < MAX_NR_ZONES; i++) {
- if (!(zone & 1<<i))
- continue;
-
- z = &NODE_DATA(node)->node_zones[i];
-
- if (state)
- z->reclaim_pages = 1;
- else
- z->reclaim_pages = 0;
- }
-
- return 0;
-}
diff --git a/net/802/Makefile b/net/802/Makefile
index 01861929591..977704a54f6 100644
--- a/net/802/Makefile
+++ b/net/802/Makefile
@@ -2,8 +2,6 @@
# Makefile for the Linux 802.x protocol layers.
#
-obj-y := p8023.o
-
# Check the p8022 selections against net/core/Makefile.
obj-$(CONFIG_SYSCTL) += sysctl_net_802.o
obj-$(CONFIG_LLC) += p8022.o psnap.o
@@ -11,5 +9,5 @@ obj-$(CONFIG_TR) += p8022.o psnap.o tr.o sysctl_net_802.o
obj-$(CONFIG_NET_FC) += fc.o
obj-$(CONFIG_FDDI) += fddi.o
obj-$(CONFIG_HIPPI) += hippi.o
-obj-$(CONFIG_IPX) += p8022.o psnap.o
+obj-$(CONFIG_IPX) += p8022.o psnap.o p8023.o
obj-$(CONFIG_ATALK) += p8022.o psnap.o
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 67465b65abe..fa76220708c 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -19,6 +19,7 @@
*/
#include <asm/uaccess.h> /* for copy_from_user */
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index f2a8750bbf1..0f604d227da 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -214,7 +214,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
* This allows the VLAN to have a different MAC than the underlying
* device, and still route correctly.
*/
- if (memcmp(eth_hdr(skb)->h_dest, skb->dev->dev_addr, ETH_ALEN) == 0) {
+ if (!compare_ether_addr(eth_hdr(skb)->h_dest, skb->dev->dev_addr)) {
/* It is for our (changed) MAC-address! */
skb->pkt_type = PACKET_HOST;
}
diff --git a/net/Kconfig b/net/Kconfig
index 60f6f321bd7..9296b269d67 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -159,6 +159,7 @@ source "net/ipx/Kconfig"
source "drivers/net/appletalk/Kconfig"
source "net/x25/Kconfig"
source "net/lapb/Kconfig"
+source "net/tipc/Kconfig"
config NET_DIVERT
bool "Frame Diverter (EXPERIMENTAL)"
diff --git a/net/Makefile b/net/Makefile
index f5141b9d4f3..065796f5fb1 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_VLAN_8021Q) += 8021q/
obj-$(CONFIG_IP_DCCP) += dccp/
obj-$(CONFIG_IP_SCTP) += sctp/
obj-$(CONFIG_IEEE80211) += ieee80211/
+obj-$(CONFIG_TIPC) += tipc/
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index a5144e43aae..697ac55e29d 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -52,6 +52,7 @@
*/
#include <linux/config.h>
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/termios.h> /* For TIOCOUTQ/INQ */
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 72f3f7b8de8..680ccb12aae 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -18,6 +18,7 @@ Author: Marcell GAL, 2000, XDSL Ltd, Hungary
#include <net/arp.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
+#include <linux/capability.h>
#include <linux/seq_file.h>
#include <linux/atmbr2684.h>
@@ -295,14 +296,14 @@ static inline __be16 br_type_trans(struct sk_buff *skb, struct net_device *dev)
unsigned char *rawp;
eth = eth_hdr(skb);
- if (*eth->h_dest & 1) {
- if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0)
+ if (is_multicast_ether_addr(eth->h_dest)) {
+ if (!compare_ether_addr(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
}
- else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
+ else if (compare_ether_addr(eth->h_dest, dev->dev_addr))
skb->pkt_type = PACKET_OTHERHOST;
if (ntohs(eth->h_proto) >= 1536)
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 4f54c9a5e84..73370de9753 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -19,6 +19,7 @@
#include <linux/atmdev.h>
#include <linux/atmclip.h>
#include <linux/atmarp.h>
+#include <linux/capability.h>
#include <linux/ip.h> /* for net/route.h */
#include <linux/in.h> /* for struct sockaddr_in */
#include <linux/if.h> /* for IFF_UP */
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index a150198b05a..eb109af7eb4 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -12,6 +12,7 @@
#include <linux/atmdev.h>
#include <linux/atmclip.h> /* CLIP_*ENCAP */
#include <linux/atmarp.h> /* manifest constants */
+#include <linux/capability.h>
#include <linux/sonet.h> /* for ioctls */
#include <linux/atmsvc.h>
#include <linux/atmmpc.h>
diff --git a/net/atm/lec.c b/net/atm/lec.c
index ad840b9afba..c4fc722fef9 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -7,6 +7,7 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
+#include <linux/capability.h>
/* We are ethernet device */
#include <linux/if_ether.h>
@@ -1321,7 +1322,7 @@ static int lane2_associate_req (struct net_device *dev, u8 *lan_dst,
struct sk_buff *skb;
struct lec_priv *priv = (struct lec_priv*)dev->priv;
- if ( memcmp(lan_dst, dev->dev_addr, ETH_ALEN) != 0 )
+ if (compare_ether_addr(lan_dst, dev->dev_addr))
return (0); /* not our mac address */
kfree(priv->tlvs); /* NULL if there was no previous association */
@@ -1798,7 +1799,7 @@ lec_arp_find(struct lec_priv *priv,
to_return = priv->lec_arp_tables[place];
while(to_return) {
- if (memcmp(mac_addr, to_return->mac_addr, ETH_ALEN) == 0) {
+ if (!compare_ether_addr(mac_addr, to_return->mac_addr)) {
return to_return;
}
to_return = to_return->next;
@@ -1811,8 +1812,7 @@ make_entry(struct lec_priv *priv, unsigned char *mac_addr)
{
struct lec_arp_table *to_return;
- to_return = (struct lec_arp_table *) kmalloc(sizeof(struct lec_arp_table),
- GFP_ATOMIC);
+ to_return = kmalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
if (!to_return) {
printk("LEC: Arp entry kmalloc failed\n");
return NULL;
@@ -2002,7 +2002,7 @@ lec_arp_resolve(struct lec_priv *priv, unsigned char *mac_to_find,
return priv->mcast_vcc;
break;
case 2: /* LANE2 wants arp for multicast addresses */
- if ( memcmp(mac_to_find, bus_mac, ETH_ALEN) == 0)
+ if (!compare_ether_addr(mac_to_find, bus_mac))
return priv->mcast_vcc;
break;
default:
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 526d9531411..c304ef1513b 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -3,6 +3,7 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/bitops.h>
+#include <linux/capability.h>
#include <linux/seq_file.h>
/* We are an ethernet device */
@@ -552,7 +553,7 @@ static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev)
goto non_ip; /* Multi-Protocol Over ATM :-) */
while (i < mpc->number_of_mps_macs) {
- if (memcmp(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN), ETH_ALEN) == 0)
+ if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN)))
if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */
return 0; /* success! */
i++;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 58f4a2b5aeb..1489067c1e8 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -39,6 +39,7 @@
#include <linux/skbuff.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
+#include <linux/capability.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
#include <linux/ppp_channel.h>
diff --git a/net/atm/raw.c b/net/atm/raw.c
index 4a0466e91aa..3e57b17ca52 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/atmdev.h>
+#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
diff --git a/net/atm/resources.c b/net/atm/resources.c
index c8c459fcb03..224190537c9 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h> /* for barrier */
#include <linux/module.h>
#include <linux/bitops.h>
+#include <linux/capability.h>
#include <linux/delay.h>
#include <net/sock.h> /* for struct sock */
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index e8753c7fcad..dbf9b47681f 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -14,6 +14,7 @@
* Copyright (C) Frederic Rible F1OAT (frible@teaser.fr)
*/
#include <linux/config.h>
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index b1e945bd6ed..f04f8630fd2 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -11,6 +11,8 @@
* Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de)
* Copyright (C) Frederic Rible F1OAT (frible@teaser.fr)
*/
+
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index d53cc861586..b8b5854bce9 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -6,6 +6,8 @@
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
+
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 682bf20af52..cbb20c32a6c 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -75,7 +75,7 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
list_for_each(p, &bnep_session_list) {
s = list_entry(p, struct bnep_session, list);
- if (!memcmp(dst, s->eh.h_source, ETH_ALEN))
+ if (!compare_ether_addr(dst, s->eh.h_source))
return s;
}
return NULL;
@@ -420,10 +420,10 @@ static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
iv[il++] = (struct kvec) { &type, 1 };
len++;
- if (!memcmp(eh->h_dest, s->eh.h_source, ETH_ALEN))
+ if (!compare_ether_addr(eh->h_dest, s->eh.h_source))
type |= 0x01;
- if (!memcmp(eh->h_source, s->eh.h_dest, ETH_ALEN))
+ if (!compare_ether_addr(eh->h_source, s->eh.h_dest))
type |= 0x02;
if (type)
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index ccbaf69afc5..2bfe796cf05 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 5e22343b609..8f8fad23f78 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index a31244e5888..f812ed129e5 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -403,7 +403,7 @@ int hci_get_conn_list(void __user *arg)
size = sizeof(req) + req.conn_num * sizeof(*ci);
- if (!(cl = (void *) kmalloc(size, GFP_KERNEL)))
+ if (!(cl = kmalloc(size, GFP_KERNEL)))
return -ENOMEM;
if (!(hdev = hci_dev_get(req.dev_id))) {
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 84e6c93a044..bdb6458c6bd 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 8f8dd931b29..b8f67761b88 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 7f0781e4326..f6b4a808535 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 158a9c46d86..74368f79ee5 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -34,6 +34,7 @@
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
+#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
@@ -480,13 +481,8 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
BT_DBG("dlc %p tty %p len %d", dlc, tty, skb->len);
if (test_bit(TTY_DONT_FLIP, &tty->flags)) {
- register int i;
- for (i = 0; i < skb->len; i++) {
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- tty_flip_buffer_push(tty);
-
- tty_insert_flip_char(tty, skb->data[i], 0);
- }
+ tty_buffer_request_room(tty, skb->len);
+ tty_insert_flip_string(tty, skb->data, skb->len);
tty_flip_buffer_push(tty);
} else
tty->ldisc.receive_buf(tty, skb->data, NULL, skb->len);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index c387852f753..e3a73cead6b 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -68,7 +68,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
}
}
- if (dest[0] & 1) {
+ if (is_multicast_ether_addr(dest)) {
br_flood_forward(br, skb, !passedup);
if (!passedup)
br_pass_frame_up(br, skb);
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index b8ce14b2218..159fb840982 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -13,6 +13,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/if_bridge.h>
#include <linux/netdevice.h>
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 223f8270dae..7cac3fb9f80 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -394,8 +394,9 @@ inhdr_error:
* target in particular. Save the original destination IP
* address to be able to detect DNAT afterwards. */
static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
- const struct net_device *in, const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
struct iphdr *iph;
__u32 len;
@@ -412,8 +413,10 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
goto out;
if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
+ u8 *vhdr = skb->data;
skb_pull(skb, VLAN_HLEN);
- (skb)->nh.raw += VLAN_HLEN;
+ skb_postpull_rcsum(skb, vhdr, VLAN_HLEN);
+ skb->nh.raw += VLAN_HLEN;
}
return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
}
@@ -429,8 +432,10 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
goto out;
if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
+ u8 *vhdr = skb->data;
skb_pull(skb, VLAN_HLEN);
- (skb)->nh.raw += VLAN_HLEN;
+ skb_postpull_rcsum(skb, vhdr, VLAN_HLEN);
+ skb->nh.raw += VLAN_HLEN;
}
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 98cf53c81fa..6f577f16c4c 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -11,6 +11,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 2ebdc23bbe2..0ac0355d16d 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -11,6 +11,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index 7323805b972..f158fe67dd6 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -15,6 +15,7 @@
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip.h>
#include <linux/ip.h>
+#include <net/ip.h>
#include <linux/in.h>
#include <linux/module.h>
@@ -51,6 +52,8 @@ static int ebt_filter_ip(const struct sk_buff *skb, const struct net_device *in,
if (!(info->bitmask & EBT_IP_DPORT) &&
!(info->bitmask & EBT_IP_SPORT))
return EBT_MATCH;
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ return EBT_NOMATCH;
pptr = skb_header_pointer(skb, ih->ihl*4,
sizeof(_ports), &_ports);
if (pptr == NULL)
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 9f6e0193ae1..a29c1232c42 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -15,6 +15,7 @@
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/ip.h>
+#include <linux/in.h>
#include <linux/if_arp.h>
#include <linux/spinlock.h>
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index f8a8cdec16e..0248c67277e 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -10,6 +10,7 @@
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_stp.h>
+#include <linux/etherdevice.h>
#include <linux/module.h>
#define BPDU_TYPE_CONFIG 0
@@ -164,8 +165,8 @@ static int ebt_stp_check(const char *tablename, unsigned int hookmask,
if (datalen != len)
return -EINVAL;
/* Make sure the match only receives stp frames */
- if (memcmp(e->destmac, bridge_ula, ETH_ALEN) ||
- memcmp(e->destmsk, msk, ETH_ALEN) || !(e->bitmask & EBT_DESTMAC))
+ if (compare_ether_addr(e->destmac, bridge_ula) ||
+ compare_ether_addr(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
return -EINVAL;
return 0;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index f8ffbf6e233..00729b3604f 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -944,7 +944,7 @@ static int do_replace(void __user *user, unsigned int len)
if (countersize)
memset(newinfo->counters, 0, countersize);
- newinfo->entries = (char *)vmalloc(tmp.entries_size);
+ newinfo->entries = vmalloc(tmp.entries_size);
if (!newinfo->entries) {
ret = -ENOMEM;
goto free_newinfo;
@@ -1146,7 +1146,7 @@ int ebt_register_table(struct ebt_table *table)
if (!newinfo)
return -ENOMEM;
- newinfo->entries = (char *)vmalloc(table->table->entries_size);
+ newinfo->entries = vmalloc(table->table->entries_size);
if (!(newinfo->entries))
goto free_newinfo;
diff --git a/net/core/dev.c b/net/core/dev.c
index 5081287923d..fd070a098f2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -75,6 +75,7 @@
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/cpu.h>
#include <linux/types.h>
@@ -1092,15 +1093,12 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
goto out;
}
- if (offset > (int)skb->len)
- BUG();
+ BUG_ON(offset > (int)skb->len);
csum = skb_checksum(skb, offset, skb->len-offset, 0);
offset = skb->tail - skb->h.raw;
- if (offset <= 0)
- BUG();
- if (skb->csum + 2 > offset)
- BUG();
+ BUG_ON(offset <= 0);
+ BUG_ON(skb->csum + 2 > offset);
*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index cb530eef0e3..05d60850840 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -158,7 +158,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
int err = 0;
struct dev_mc_list *dmi, *dmi1;
- dmi1 = (struct dev_mc_list *)kmalloc(sizeof(*dmi), GFP_ATOMIC);
+ dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
spin_lock_bh(&dev->xmit_lock);
for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
diff --git a/net/core/dv.c b/net/core/dv.c
index 3f25f4aa4e6..cf581407538 100644
--- a/net/core/dv.c
+++ b/net/core/dv.c
@@ -24,6 +24,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <net/dst.h>
@@ -457,7 +458,7 @@ void divert_frame(struct sk_buff *skb)
unsigned char *skb_data_end = skb->data + skb->len;
/* Packet is already aimed at us, return */
- if (!memcmp(eth, skb->dev->dev_addr, ETH_ALEN))
+ if (!compare_ether_addr(eth->h_dest, skb->dev->dev_addr))
return;
/* proto is not IP, do nothing */
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 0350586e919..e6f76106a99 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
diff --git a/net/core/filter.c b/net/core/filter.c
index 8964d344558..a52665f7522 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -75,7 +75,7 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
* len is the number of filter blocks in the array.
*/
-int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
+unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
{
struct sock_filter *fentry; /* We walk down these */
void *ptr;
@@ -241,9 +241,9 @@ load_b:
A = X;
continue;
case BPF_RET|BPF_K:
- return ((unsigned int)fentry->k);
+ return fentry->k;
case BPF_RET|BPF_A:
- return ((unsigned int)A);
+ return A;
case BPF_ST:
mem[fentry->k] = A;
continue;
@@ -287,7 +287,9 @@ load_b:
* no references or jumps that are out of range, no illegal
* instructions, and must end with a RET instruction.
*
- * Returns 0 if the rule set is legal or a negative errno code if not.
+ * All jumps are forward as they are not signed.
+ *
+ * Returns 0 if the rule set is legal or -EINVAL if not.
*/
int sk_chk_filter(struct sock_filter *filter, int flen)
{
@@ -299,7 +301,6 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
/* check the filter code now */
for (pc = 0; pc < flen; pc++) {
- /* all jumps are forward as they are not signed */
ftest = &filter[pc];
/* Only allow valid instructions */
@@ -383,11 +384,6 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
}
}
- /*
- * The program must end with a return. We don't care where they
- * jumped within the script (its always forwards) but in the end
- * they _will_ hit this.
- */
return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index e1da81d261d..e8b2acbc8ea 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -16,6 +17,7 @@
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/wireless.h>
+#include <net/iw_handler.h>
#define to_class_dev(obj) container_of(obj,struct class_device,kobj)
#define to_net_dev(class) container_of(class, struct net_device, class_dev)
@@ -294,13 +296,19 @@ static ssize_t wireless_show(struct class_device *cd, char *buf,
char *))
{
struct net_device *dev = to_net_dev(cd);
- const struct iw_statistics *iw;
+ const struct iw_statistics *iw = NULL;
ssize_t ret = -EINVAL;
read_lock(&dev_base_lock);
- if (dev_isalive(dev) && dev->get_wireless_stats
- && (iw = dev->get_wireless_stats(dev)) != NULL)
- ret = (*format)(iw, buf);
+ if (dev_isalive(dev)) {
+ if(dev->wireless_handlers &&
+ dev->wireless_handlers->get_wireless_stats)
+ iw = dev->wireless_handlers->get_wireless_stats(dev);
+ else if (dev->get_wireless_stats)
+ iw = dev->get_wireless_stats(dev);
+ if (iw != NULL)
+ ret = (*format)(iw, buf);
+ }
read_unlock(&dev_base_lock);
return ret;
@@ -402,7 +410,8 @@ void netdev_unregister_sysfs(struct net_device * net)
sysfs_remove_group(&class_dev->kobj, &netstat_group);
#ifdef WIRELESS_EXT
- if (net->get_wireless_stats)
+ if (net->get_wireless_stats || (net->wireless_handlers &&
+ net->wireless_handlers->get_wireless_stats))
sysfs_remove_group(&class_dev->kobj, &wireless_group);
#endif
class_device_del(class_dev);
@@ -427,10 +436,12 @@ int netdev_register_sysfs(struct net_device *net)
goto out_unreg;
#ifdef WIRELESS_EXT
- if (net->get_wireless_stats &&
- (ret = sysfs_create_group(&class_dev->kobj, &wireless_group)))
- goto out_cleanup;
-
+ if (net->get_wireless_stats || (net->wireless_handlers &&
+ net->wireless_handlers->get_wireless_stats)) {
+ ret = sysfs_create_group(&class_dev->kobj, &wireless_group);
+ if (ret)
+ goto out_cleanup;
+ }
return 0;
out_cleanup:
if (net->get_stats)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 06cad2d63e8..39063122fbb 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -116,13 +116,13 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/init.h>
@@ -473,7 +473,6 @@ static char version[] __initdata = VERSION;
static int pktgen_remove_device(struct pktgen_thread* t, struct pktgen_dev *i);
static int pktgen_add_device(struct pktgen_thread* t, const char* ifname);
-static struct pktgen_thread* pktgen_find_thread(const char* name);
static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread* t, const char* ifname);
static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
static void pktgen_run_all_threads(void);
@@ -2883,7 +2882,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char* ifname)
return add_dev_to_thread(t, pkt_dev);
}
-static struct pktgen_thread *pktgen_find_thread(const char* name)
+static struct pktgen_thread * __init pktgen_find_thread(const char* name)
{
struct pktgen_thread *t = NULL;
@@ -2900,7 +2899,7 @@ static struct pktgen_thread *pktgen_find_thread(const char* name)
return t;
}
-static int pktgen_create_thread(const char* name, int cpu)
+static int __init pktgen_create_thread(const char* name, int cpu)
{
struct pktgen_thread *t = NULL;
struct proc_dir_entry *pe;
diff --git a/net/core/scm.c b/net/core/scm.c
index e887d19be50..649d01ef35b 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/signal.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 070f91cfde5..d0732e9c856 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -791,8 +791,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
int end = offset + skb_shinfo(skb)->frags[i].size;
if (end > len) {
if (skb_cloned(skb)) {
- if (!realloc)
- BUG();
+ BUG_ON(!realloc);
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return -ENOMEM;
}
@@ -894,8 +893,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
struct sk_buff *insp = NULL;
do {
- if (!list)
- BUG();
+ BUG_ON(!list);
if (list->len <= eat) {
/* Eaten as whole. */
@@ -1199,8 +1197,7 @@ unsigned int skb_checksum(const struct sk_buff *skb, int offset,
start = end;
}
}
- if (len)
- BUG();
+ BUG_ON(len);
return csum;
}
@@ -1282,8 +1279,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
start = end;
}
}
- if (len)
- BUG();
+ BUG_ON(len);
return csum;
}
@@ -1297,8 +1293,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
else
csstart = skb_headlen(skb);
- if (csstart > skb_headlen(skb))
- BUG();
+ BUG_ON(csstart > skb_headlen(skb));
memcpy(to, skb->data, csstart);
diff --git a/net/core/sock.c b/net/core/sock.c
index 6465b0e4c8c..6e00811d44b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -91,6 +91,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/net/core/utils.c b/net/core/utils.c
index 587eb7787de..ac1d1fcf867 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -162,7 +162,7 @@ EXPORT_SYMBOL(net_srandom);
* is otherwise not dependent on the TCP/IP stack.
*/
-__u32 in_aton(const char *str)
+__be32 in_aton(const char *str)
{
unsigned long l;
unsigned int val;
diff --git a/net/core/wireless.c b/net/core/wireless.c
index 271ddb35b0b..2add7ed609e 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -78,6 +78,7 @@
#include <linux/seq_file.h>
#include <linux/init.h> /* for __init */
#include <linux/if_arp.h> /* ARPHRD_ETHER */
+#include <linux/etherdevice.h> /* compare_ether_addr */
#include <linux/wireless.h> /* Pretty obvious */
#include <net/iw_handler.h> /* New driver API */
@@ -1506,7 +1507,7 @@ void wireless_spy_update(struct net_device * dev,
/* Update all records that match */
for(i = 0; i < spydata->spy_number; i++)
- if(!memcmp(address, spydata->spy_address[i], ETH_ALEN)) {
+ if(!compare_ether_addr(address, spydata->spy_address[i])) {
memcpy(&(spydata->spy_stat[i]), wstats,
sizeof(struct iw_quality));
match = i;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 3f244670764..00f98322667 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -986,6 +986,7 @@ int dccp_v4_rcv(struct sk_buff *skb)
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
+ nf_reset(skb);
return sk_receive_skb(sk, skb);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index c609dc78f48..df074259f9c 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -27,6 +27,7 @@
#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
+#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include "dccp.h"
@@ -1028,7 +1029,7 @@ discard:
return 0;
}
-static int dccp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
+static int dccp_v6_rcv(struct sk_buff **pskb)
{
const struct dccp_hdr *dh;
struct sk_buff *skb = *pskb;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 78ec5344be8..ce4aaf94860 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -122,6 +122,7 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
#include <net/flow.h>
#include <asm/system.h>
#include <asm/ioctls.h>
+#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 5610bb16dbf..efbead83ba7 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -25,6 +25,7 @@
*/
#include <linux/config.h>
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 1ab94c6e22e..16a5a31e212 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -26,8 +26,6 @@
#include <net/dn.h>
#include <net/dn_route.h>
-#include <linux/netfilter_decnet.h>
-
static struct sock *dnrmg = NULL;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 9f4dbeb5931..9890fd97e53 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -163,7 +163,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb_pull(skb,ETH_HLEN);
eth = eth_hdr(skb);
- if (*eth->h_dest&1) {
+ if (is_multicast_ether_addr(eth->h_dest)) {
if (!compare_ether_addr(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index 073aebdf0f6..f8dca31be5d 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -75,22 +75,14 @@ static void prism2_wep_deinit(void *priv)
kfree(priv);
}
-/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
- * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
- * so the payload length increases with 8 bytes.
- *
- * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
- */
-static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+/* Add WEP IV/key info to a frame that has at least 4 bytes of headroom */
+static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len, void *priv)
{
struct prism2_wep_data *wep = priv;
- u32 crc, klen, len;
- u8 key[WEP_KEY_LEN + 3];
- u8 *pos, *icv;
- struct scatterlist sg;
-
- if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
- skb->len < hdr_len)
+ u32 klen, len;
+ u8 *pos;
+
+ if (skb_headroom(skb) < 4 || skb->len < hdr_len)
return -1;
len = skb->len - hdr_len;
@@ -112,15 +104,47 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
}
/* Prepend 24-bit IV to RC4 key and TX frame */
- *pos++ = key[0] = (wep->iv >> 16) & 0xff;
- *pos++ = key[1] = (wep->iv >> 8) & 0xff;
- *pos++ = key[2] = wep->iv & 0xff;
+ *pos++ = (wep->iv >> 16) & 0xff;
+ *pos++ = (wep->iv >> 8) & 0xff;
+ *pos++ = wep->iv & 0xff;
*pos++ = wep->key_idx << 6;
+ return 0;
+}
+
+/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
+ * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
+ * so the payload length increases with 8 bytes.
+ *
+ * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
+ */
+static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+ struct prism2_wep_data *wep = priv;
+ u32 crc, klen, len;
+ u8 *pos, *icv;
+ struct scatterlist sg;
+ u8 key[WEP_KEY_LEN + 3];
+
+ /* other checks are in prism2_wep_build_iv */
+ if (skb_tailroom(skb) < 4)
+ return -1;
+
+ /* add the IV to the frame */
+ if (prism2_wep_build_iv(skb, hdr_len, priv))
+ return -1;
+
+ /* Copy the IV into the first 3 bytes of the key */
+ memcpy(key, skb->data + hdr_len, 3);
+
/* Copy rest of the WEP key (the secret part) */
memcpy(key + 3, wep->key, wep->key_len);
+
+ len = skb->len - hdr_len - 4;
+ pos = skb->data + hdr_len + 4;
+ klen = 3 + wep->key_len;
- /* Append little-endian CRC32 and encrypt it to produce ICV */
+ /* Append little-endian CRC32 over only the data and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
icv = skb_put(skb, 4);
icv[0] = crc;
@@ -231,6 +255,7 @@ static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
.name = "WEP",
.init = prism2_wep_init,
.deinit = prism2_wep_deinit,
+ .build_iv = prism2_wep_build_iv,
.encrypt_mpdu = prism2_wep_encrypt,
.decrypt_mpdu = prism2_wep_decrypt,
.encrypt_msdu = NULL,
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 4cc6f41c693..5e338038804 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -76,8 +76,8 @@ static struct ieee80211_frag_entry *ieee80211_frag_cache_find(struct
if (entry->skb != NULL && entry->seq == seq &&
(entry->last_frag + 1 == frag || frag == -1) &&
- memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
- memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
+ !compare_ether_addr(entry->src_addr, src) &&
+ !compare_ether_addr(entry->dst_addr, dst))
return entry;
}
@@ -243,12 +243,12 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
/* check that the frame is unicast frame to us */
if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_TODS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
- memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+ !compare_ether_addr(hdr->addr1, dev->dev_addr) &&
+ !compare_ether_addr(hdr->addr3, dev->dev_addr)) {
/* ToDS frame with own addr BSSID and DA */
} else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_FROMDS &&
- memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+ !compare_ether_addr(hdr->addr1, dev->dev_addr)) {
/* FromDS frame with own addr as DA */
} else
return 0;
@@ -505,7 +505,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
(fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
IEEE80211_FCTL_FROMDS && ieee->stadev
- && memcmp(hdr->addr2, ieee->assoc_ap_addr, ETH_ALEN) == 0) {
+ && !compare_ether_addr(hdr->addr2, ieee->assoc_ap_addr)) {
/* Frame from BSSID of the AP for which we are a client */
skb->dev = dev = ieee->stadev;
stats = hostap_get_stats(dev);
@@ -1231,7 +1231,7 @@ static inline int is_same_network(struct ieee80211_network *src,
* as one network */
return ((src->ssid_len == dst->ssid_len) &&
(src->channel == dst->channel) &&
- !memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
+ !compare_ether_addr(src->bssid, dst->bssid) &&
!memcmp(src->ssid, dst->ssid, src->ssid_len));
}
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index 445f206e65e..e5b33c8d5db 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -288,7 +288,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
/* Determine total amount of storage required for TXB packets */
bytes = skb->len + SNAP_SIZE + sizeof(u16);
- if (host_encrypt)
+ if (host_encrypt || host_build_iv)
fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
IEEE80211_FCTL_PROTECTED;
else
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index 181755f2aa8..406d5b96490 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -284,7 +284,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
};
int i, key, key_provided, len;
struct ieee80211_crypt_data **crypt;
- int host_crypto = ieee->host_encrypt || ieee->host_decrypt;
+ int host_crypto = ieee->host_encrypt || ieee->host_decrypt || ieee->host_build_iv;
IEEE80211_DEBUG_WX("SET_ENCODE\n");
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index c54edd76de0..35e5f599909 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -9,7 +9,7 @@ obj-y := route.o inetpeer.o protocol.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
tcp_minisocks.o tcp_cong.o \
datagram.o raw.o udp.o arp.o icmp.o devinet.o af_inet.o igmp.o \
- sysctl_net_ipv4.o fib_frontend.o fib_semantics.o netfilter.o
+ sysctl_net_ipv4.o fib_frontend.o fib_semantics.o
obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o
obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o
@@ -28,7 +28,7 @@ obj-$(CONFIG_IP_ROUTE_MULTIPATH_RR) += multipath_rr.o
obj-$(CONFIG_IP_ROUTE_MULTIPATH_RANDOM) += multipath_random.o
obj-$(CONFIG_IP_ROUTE_MULTIPATH_WRANDOM) += multipath_wrandom.o
obj-$(CONFIG_IP_ROUTE_MULTIPATH_DRR) += multipath_drr.o
-obj-$(CONFIG_NETFILTER) += netfilter/
+obj-$(CONFIG_NETFILTER) += netfilter.o netfilter/
obj-$(CONFIG_IP_VS) += ipvs/
obj-$(CONFIG_INET_DIAG) += inet_diag.o
obj-$(CONFIG_IP_ROUTE_MULTIPATH_CACHED) += multipath.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 966a071a408..97c276f95b3 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -79,6 +79,7 @@
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
+#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 37432088fe6..accdefedfed 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -79,6 +79,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/socket.h>
#include <linux/sockios.h>
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7b9bb28e2ee..95b9d81ac48 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -32,6 +32,7 @@
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 18f5e509281..5b25fc0d980 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -20,6 +20,7 @@
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
+#include <linux/capability.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index be5a519cd2f..105039eb762 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -899,8 +899,7 @@ static void icmp_address_reply(struct sk_buff *skb)
u32 _mask, *mp;
mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask);
- if (mp == NULL)
- BUG();
+ BUG_ON(mp == NULL);
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
if (*mp == ifa->ifa_mask &&
inet_ifa_match(rt->rt_src, ifa))
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 34758118c10..192092b89e5 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -975,7 +975,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
* for deleted items allows change reports to use common code with
* non-deleted or query-response MCA's.
*/
- pmc = (struct ip_mc_list *)kmalloc(sizeof(*pmc), GFP_KERNEL);
+ pmc = kmalloc(sizeof(*pmc), GFP_KERNEL);
if (!pmc)
return;
memset(pmc, 0, sizeof(*pmc));
@@ -1155,7 +1155,7 @@ void ip_mc_inc_group(struct in_device *in_dev, u32 addr)
}
}
- im = (struct ip_mc_list *)kmalloc(sizeof(*im), GFP_KERNEL);
+ im = kmalloc(sizeof(*im), GFP_KERNEL);
if (!im)
goto out;
@@ -1476,7 +1476,7 @@ static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
psf_prev = psf;
}
if (!psf) {
- psf = (struct ip_sf_list *)kmalloc(sizeof(*psf), GFP_ATOMIC);
+ psf = kmalloc(sizeof(*psf), GFP_ATOMIC);
if (!psf)
return -ENOBUFS;
memset(psf, 0, sizeof(*psf));
@@ -1659,7 +1659,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
err = -ENOBUFS;
if (count >= sysctl_igmp_max_memberships)
goto done;
- iml = (struct ip_mc_socklist *)sock_kmalloc(sk,sizeof(*iml),GFP_KERNEL);
+ iml = sock_kmalloc(sk,sizeof(*iml),GFP_KERNEL);
if (iml == NULL)
goto done;
@@ -1823,8 +1823,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
if (psl)
count += psl->sl_max;
- newpsl = (struct ip_sf_socklist *)sock_kmalloc(sk,
- IP_SFLSIZE(count), GFP_KERNEL);
+ newpsl = sock_kmalloc(sk, IP_SFLSIZE(count), GFP_KERNEL);
if (!newpsl) {
err = -ENOBUFS;
goto done;
@@ -1907,8 +1906,8 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
goto done;
}
if (msf->imsf_numsrc) {
- newpsl = (struct ip_sf_socklist *)sock_kmalloc(sk,
- IP_SFLSIZE(msf->imsf_numsrc), GFP_KERNEL);
+ newpsl = sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc),
+ GFP_KERNEL);
if (!newpsl) {
err = -ENOBUFS;
goto done;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c4990819204..457db99c76d 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -50,9 +50,10 @@ static struct sock *idiagnl;
#define INET_DIAG_PUT(skb, attrtype, attrlen) \
RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
-static int inet_diag_fill(struct sk_buff *skb, struct sock *sk,
- int ext, u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+static int inet_csk_diag_fill(struct sock *sk,
+ struct sk_buff *skb,
+ int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh)
{
const struct inet_sock *inet = inet_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -70,20 +71,22 @@ static int inet_diag_fill(struct sk_buff *skb, struct sock *sk,
nlh->nlmsg_flags = nlmsg_flags;
r = NLMSG_DATA(nlh);
- if (sk->sk_state != TCP_TIME_WAIT) {
- if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
- minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO,
- sizeof(*minfo));
- if (ext & (1 << (INET_DIAG_INFO - 1)))
- info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
- handler->idiag_info_size);
-
- if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
- size_t len = strlen(icsk->icsk_ca_ops->name);
- strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
- icsk->icsk_ca_ops->name);
- }
+ BUG_ON(sk->sk_state == TCP_TIME_WAIT);
+
+ if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
+ minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
+
+ if (ext & (1 << (INET_DIAG_INFO - 1)))
+ info = INET_DIAG_PUT(skb, INET_DIAG_INFO,
+ handler->idiag_info_size);
+
+ if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
+ const size_t len = strlen(icsk->icsk_ca_ops->name);
+
+ strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
+ icsk->icsk_ca_ops->name);
}
+
r->idiag_family = sk->sk_family;
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
@@ -93,37 +96,6 @@ static int inet_diag_fill(struct sk_buff *skb, struct sock *sk,
r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
- if (r->idiag_state == TCP_TIME_WAIT) {
- const struct inet_timewait_sock *tw = inet_twsk(sk);
- long tmo = tw->tw_ttd - jiffies;
- if (tmo < 0)
- tmo = 0;
-
- r->id.idiag_sport = tw->tw_sport;
- r->id.idiag_dport = tw->tw_dport;
- r->id.idiag_src[0] = tw->tw_rcv_saddr;
- r->id.idiag_dst[0] = tw->tw_daddr;
- r->idiag_state = tw->tw_substate;
- r->idiag_timer = 3;
- r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ;
- r->idiag_rqueue = 0;
- r->idiag_wqueue = 0;
- r->idiag_uid = 0;
- r->idiag_inode = 0;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
- if (r->idiag_family == AF_INET6) {
- const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
-
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
- &tw6->tw_v6_rcv_saddr);
- ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
- &tw6->tw_v6_daddr);
- }
-#endif
- nlh->nlmsg_len = skb->tail - b;
- return skb->len;
- }
-
r->id.idiag_sport = inet->sport;
r->id.idiag_dport = inet->dport;
r->id.idiag_src[0] = inet->rcv_saddr;
@@ -185,7 +157,75 @@ nlmsg_failure:
return -1;
}
-static int inet_diag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh)
+static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
+ struct sk_buff *skb, int ext, u32 pid,
+ u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh)
+{
+ long tmo;
+ struct inet_diag_msg *r;
+ const unsigned char *previous_tail = skb->tail;
+ struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
+ unlh->nlmsg_type, sizeof(*r));
+
+ r = NLMSG_DATA(nlh);
+ BUG_ON(tw->tw_state != TCP_TIME_WAIT);
+
+ nlh->nlmsg_flags = nlmsg_flags;
+
+ tmo = tw->tw_ttd - jiffies;
+ if (tmo < 0)
+ tmo = 0;
+
+ r->idiag_family = tw->tw_family;
+ r->idiag_state = tw->tw_state;
+ r->idiag_timer = 0;
+ r->idiag_retrans = 0;
+ r->id.idiag_if = tw->tw_bound_dev_if;
+ r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
+ r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
+ r->id.idiag_sport = tw->tw_sport;
+ r->id.idiag_dport = tw->tw_dport;
+ r->id.idiag_src[0] = tw->tw_rcv_saddr;
+ r->id.idiag_dst[0] = tw->tw_daddr;
+ r->idiag_state = tw->tw_substate;
+ r->idiag_timer = 3;
+ r->idiag_expires = (tmo * 1000 + HZ - 1) / HZ;
+ r->idiag_rqueue = 0;
+ r->idiag_wqueue = 0;
+ r->idiag_uid = 0;
+ r->idiag_inode = 0;
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ if (tw->tw_family == AF_INET6) {
+ const struct inet6_timewait_sock *tw6 =
+ inet6_twsk((struct sock *)tw);
+
+ ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
+ &tw6->tw_v6_rcv_saddr);
+ ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
+ &tw6->tw_v6_daddr);
+ }
+#endif
+ nlh->nlmsg_len = skb->tail - previous_tail;
+ return skb->len;
+nlmsg_failure:
+ skb_trim(skb, previous_tail - skb->data);
+ return -1;
+}
+
+static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
+ int ext, u32 pid, u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh)
+{
+ if (sk->sk_state == TCP_TIME_WAIT)
+ return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
+ skb, ext, pid, seq, nlmsg_flags,
+ unlh);
+ return inet_csk_diag_fill(sk, skb, ext, pid, seq, nlmsg_flags, unlh);
+}
+
+static int inet_diag_get_exact(struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh)
{
int err;
struct sock *sk;
@@ -235,7 +275,7 @@ static int inet_diag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nl
if (!rep)
goto out;
- if (inet_diag_fill(rep, sk, req->idiag_ext,
+ if (sk_diag_fill(sk, rep, req->idiag_ext,
NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, nlh) <= 0)
BUG();
@@ -283,7 +323,7 @@ static int bitstring_match(const u32 *a1, const u32 *a2, int bits)
static int inet_diag_bc_run(const void *bc, int len,
- const struct inet_diag_entry *entry)
+ const struct inet_diag_entry *entry)
{
while (len > 0) {
int yes = 1;
@@ -322,7 +362,7 @@ static int inet_diag_bc_run(const void *bc, int len,
yes = 0;
break;
}
-
+
if (cond->prefix_len == 0)
break;
@@ -331,7 +371,8 @@ static int inet_diag_bc_run(const void *bc, int len,
else
addr = entry->daddr;
- if (bitstring_match(addr, cond->addr, cond->prefix_len))
+ if (bitstring_match(addr, cond->addr,
+ cond->prefix_len))
break;
if (entry->family == AF_INET6 &&
cond->family == AF_INET) {
@@ -346,7 +387,7 @@ static int inet_diag_bc_run(const void *bc, int len,
}
}
- if (yes) {
+ if (yes) {
len -= op->yes;
bc += op->yes;
} else {
@@ -407,14 +448,15 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
default:
return -EINVAL;
}
- bc += op->yes;
+ bc += op->yes;
len -= op->yes;
}
return len == 0 ? 0 : -EINVAL;
}
-static int inet_diag_dump_sock(struct sk_buff *skb, struct sock *sk,
- struct netlink_callback *cb)
+static int inet_csk_diag_dump(struct sock *sk,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
{
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
@@ -444,14 +486,50 @@ static int inet_diag_dump_sock(struct sk_buff *skb, struct sock *sk,
return 0;
}
- return inet_diag_fill(skb, sk, r->idiag_ext, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+ return inet_csk_diag_fill(sk, skb, r->idiag_ext,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+}
+
+static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
+
+ if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
+ struct inet_diag_entry entry;
+ struct rtattr *bc = (struct rtattr *)(r + 1);
+
+ entry.family = tw->tw_family;
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ if (tw->tw_family == AF_INET6) {
+ struct inet6_timewait_sock *tw6 =
+ inet6_twsk((struct sock *)tw);
+ entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
+ entry.daddr = tw6->tw_v6_daddr.s6_addr32;
+ } else
+#endif
+ {
+ entry.saddr = &tw->tw_rcv_saddr;
+ entry.daddr = &tw->tw_daddr;
+ }
+ entry.sport = tw->tw_num;
+ entry.dport = ntohs(tw->tw_dport);
+ entry.userlocks = 0;
+
+ if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
+ return 0;
+ }
+
+ return inet_twsk_diag_fill(tw, skb, r->idiag_ext,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
}
static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
- struct request_sock *req,
- u32 pid, u32 seq,
- const struct nlmsghdr *unlh)
+ struct request_sock *req, u32 pid, u32 seq,
+ const struct nlmsghdr *unlh)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct inet_sock *inet = inet_sk(sk);
@@ -504,7 +582,7 @@ nlmsg_failure:
}
static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
- struct netlink_callback *cb)
+ struct netlink_callback *cb)
{
struct inet_diag_entry entry;
struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
@@ -556,7 +634,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
inet6_rsk(req)->loc_addr.s6_addr32 :
#endif
&ireq->loc_addr;
- entry.daddr =
+ entry.daddr =
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
(entry.family == AF_INET6) ?
inet6_rsk(req)->rmt_addr.s6_addr32 :
@@ -599,7 +677,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
handler = inet_diag_table[cb->nlh->nlmsg_type];
BUG_ON(handler == NULL);
hashinfo = handler->idiag_hashinfo;
-
+
s_i = cb->args[1];
s_num = num = cb->args[2];
@@ -630,7 +708,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[3] > 0)
goto syn_recv;
- if (inet_diag_dump_sock(skb, sk, cb) < 0) {
+ if (inet_csk_diag_dump(sk, skb, cb) < 0) {
inet_listen_unlock(hashinfo);
goto done;
}
@@ -672,7 +750,6 @@ skip_listen_ht:
s_num = 0;
read_lock_bh(&head->lock);
-
num = 0;
sk_for_each(sk, node, &head->chain) {
struct inet_sock *inet = inet_sk(sk);
@@ -684,9 +761,10 @@ skip_listen_ht:
if (r->id.idiag_sport != inet->sport &&
r->id.idiag_sport)
goto next_normal;
- if (r->id.idiag_dport != inet->dport && r->id.idiag_dport)
+ if (r->id.idiag_dport != inet->dport &&
+ r->id.idiag_dport)
goto next_normal;
- if (inet_diag_dump_sock(skb, sk, cb) < 0) {
+ if (inet_csk_diag_dump(sk, skb, cb) < 0) {
read_unlock_bh(&head->lock);
goto done;
}
@@ -695,19 +773,20 @@ next_normal:
}
if (r->idiag_states & TCPF_TIME_WAIT) {
- sk_for_each(sk, node,
+ struct inet_timewait_sock *tw;
+
+ inet_twsk_for_each(tw, node,
&hashinfo->ehash[i + hashinfo->ehash_size].chain) {
- struct inet_sock *inet = inet_sk(sk);
if (num < s_num)
goto next_dying;
- if (r->id.idiag_sport != inet->sport &&
+ if (r->id.idiag_sport != tw->tw_sport &&
r->id.idiag_sport)
goto next_dying;
- if (r->id.idiag_dport != inet->dport &&
+ if (r->id.idiag_dport != tw->tw_dport &&
r->id.idiag_dport)
goto next_dying;
- if (inet_diag_dump_sock(skb, sk, cb) < 0) {
+ if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
read_unlock_bh(&head->lock);
goto done;
}
@@ -724,8 +803,7 @@ done:
return skb->len;
}
-static __inline__ int
-inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+static inline int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
if (!(nlh->nlmsg_flags&NLM_F_REQUEST))
return 0;
@@ -755,9 +833,8 @@ inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
}
return netlink_dump_start(idiagnl, skb, nlh,
inet_diag_dump, NULL);
- } else {
+ } else
return inet_diag_get_exact(skb, nlh);
- }
err_inval:
return -EINVAL;
@@ -766,15 +843,15 @@ err_inval:
static inline void inet_diag_rcv_skb(struct sk_buff *skb)
{
- int err;
- struct nlmsghdr * nlh;
-
if (skb->len >= NLMSG_SPACE(0)) {
- nlh = (struct nlmsghdr *)skb->data;
- if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
+ int err;
+ struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+
+ if (nlh->nlmsg_len < sizeof(*nlh) ||
+ skb->len < nlh->nlmsg_len)
return;
err = inet_diag_rcv_msg(skb, nlh);
- if (err || nlh->nlmsg_flags & NLM_F_ACK)
+ if (err || nlh->nlmsg_flags & NLM_F_ACK)
netlink_ack(skb, nlh, err);
}
}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index ce5fe3f74a3..2160874ce7a 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -304,8 +304,7 @@ static void unlink_from_pool(struct inet_peer *p)
/* look for a node to insert instead of p */
struct inet_peer *t;
t = lookup_rightempty(p);
- if (*stackptr[-1] != t)
- BUG();
+ BUG_ON(*stackptr[-1] != t);
**--stackptr = t->avl_left;
/* t is removed, t->v4daddr > x->v4daddr for any
* x in p->avl_left subtree.
@@ -314,8 +313,7 @@ static void unlink_from_pool(struct inet_peer *p)
t->avl_left = p->avl_left;
t->avl_right = p->avl_right;
t->avl_height = p->avl_height;
- if (delp[1] != &p->avl_left)
- BUG();
+ BUG_ON(delp[1] != &p->avl_left);
delp[1] = &t->avl_left; /* was &p->avl_left */
}
peer_avl_rebalance(stack, stackptr);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index ce2b70ce401..2a8adda15e1 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -383,7 +383,7 @@ out_nomem:
*/
static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
{
- __u16 id = iph->id;
+ __be16 id = iph->id;
__u32 saddr = iph->saddr;
__u32 daddr = iph->daddr;
__u8 protocol = iph->protocol;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 912c42f57c7..abe23923e4e 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -10,6 +10,7 @@
*
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -188,7 +189,7 @@ static struct ip_tunnel * ipgre_tunnel_lookup(u32 remote, u32 local, u32 key)
}
if (ipgre_fb_tunnel_dev->flags&IFF_UP)
- return ipgre_fb_tunnel_dev->priv;
+ return netdev_priv(ipgre_fb_tunnel_dev);
return NULL;
}
@@ -278,7 +279,7 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
return NULL;
dev->init = ipgre_tunnel_init;
- nt = dev->priv;
+ nt = netdev_priv(dev);
nt->parms = *parms;
if (register_netdevice(dev) < 0) {
@@ -286,9 +287,6 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int
goto failed;
}
- nt = dev->priv;
- nt->parms = *parms;
-
dev_hold(dev);
ipgre_tunnel_link(nt);
return nt;
@@ -299,7 +297,7 @@ failed:
static void ipgre_tunnel_uninit(struct net_device *dev)
{
- ipgre_tunnel_unlink((struct ip_tunnel*)dev->priv);
+ ipgre_tunnel_unlink(netdev_priv(dev));
dev_put(dev);
}
@@ -518,7 +516,7 @@ out:
skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
rel_info = htonl(rel_info);
} else if (type == ICMP_TIME_EXCEEDED) {
- struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv;
+ struct ip_tunnel *t = netdev_priv(skb2->dev);
if (t->parms.iph.ttl) {
rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_HOST_UNREACH;
@@ -669,7 +667,7 @@ drop_nolock:
static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
+ struct ip_tunnel *tunnel = netdev_priv(dev);
struct net_device_stats *stats = &tunnel->stat;
struct iphdr *old_iph = skb->nh.iph;
struct iphdr *tiph;
@@ -832,6 +830,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
skb->h.raw = skb->nh.raw;
skb->nh.raw = skb_push(skb, gre_hlen);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED);
dst_release(skb->dst);
skb->dst = &rt->u.dst;
@@ -914,7 +913,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
t = ipgre_tunnel_locate(&p, 0);
}
if (t == NULL)
- t = (struct ip_tunnel*)dev->priv;
+ t = netdev_priv(dev);
memcpy(&p, &t->parms, sizeof(p));
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
@@ -954,7 +953,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
} else {
unsigned nflags=0;
- t = (struct ip_tunnel*)dev->priv;
+ t = netdev_priv(dev);
if (MULTICAST(p.iph.daddr))
nflags = IFF_BROADCAST;
@@ -1003,7 +1002,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
if ((t = ipgre_tunnel_locate(&p, 0)) == NULL)
goto done;
err = -EPERM;
- if (t == ipgre_fb_tunnel_dev->priv)
+ if (t == netdev_priv(ipgre_fb_tunnel_dev))
goto done;
dev = t->dev;
}
@@ -1020,12 +1019,12 @@ done:
static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
{
- return &(((struct ip_tunnel*)dev->priv)->stat);
+ return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
}
static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
- struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
+ struct ip_tunnel *tunnel = netdev_priv(dev);
if (new_mtu < 68 || new_mtu > 0xFFF8 - tunnel->hlen)
return -EINVAL;
dev->mtu = new_mtu;
@@ -1065,7 +1064,7 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
void *daddr, void *saddr, unsigned len)
{
- struct ip_tunnel *t = (struct ip_tunnel*)dev->priv;
+ struct ip_tunnel *t = netdev_priv(dev);
struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
u16 *p = (u16*)(iph+1);
@@ -1092,7 +1091,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, unsigned sh
static int ipgre_open(struct net_device *dev)
{
- struct ip_tunnel *t = (struct ip_tunnel*)dev->priv;
+ struct ip_tunnel *t = netdev_priv(dev);
if (MULTICAST(t->parms.iph.daddr)) {
struct flowi fl = { .oif = t->parms.link,
@@ -1116,7 +1115,7 @@ static int ipgre_open(struct net_device *dev)
static int ipgre_close(struct net_device *dev)
{
- struct ip_tunnel *t = (struct ip_tunnel*)dev->priv;
+ struct ip_tunnel *t = netdev_priv(dev);
if (MULTICAST(t->parms.iph.daddr) && t->mlink) {
struct in_device *in_dev = inetdev_by_index(t->mlink);
if (in_dev) {
@@ -1156,7 +1155,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
int mtu = ETH_DATA_LEN;
int addend = sizeof(struct iphdr) + 4;
- tunnel = (struct ip_tunnel*)dev->priv;
+ tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph;
tunnel->dev = dev;
@@ -1220,7 +1219,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
static int __init ipgre_fb_tunnel_init(struct net_device *dev)
{
- struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
+ struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
tunnel->dev = dev;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index e45846ae570..18d7fad474d 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -185,7 +185,6 @@ int ip_call_ra_chain(struct sk_buff *skb)
raw_rcv(last, skb2);
}
last = sk;
- nf_reset(skb);
}
}
@@ -204,10 +203,6 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
__skb_pull(skb, ihl);
- /* Free reference early: we don't need it any more, and it may
- hold ip_conntrack module loaded indefinitely. */
- nf_reset(skb);
-
/* Point into the IP datagram, just past the header. */
skb->h.raw = skb->data;
@@ -232,10 +227,12 @@ static inline int ip_local_deliver_finish(struct sk_buff *skb)
if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) {
int ret;
- if (!ipprot->no_policy &&
- !xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- kfree_skb(skb);
- goto out;
+ if (!ipprot->no_policy) {
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ kfree_skb(skb);
+ goto out;
+ }
+ nf_reset(skb);
}
ret = ipprot->handler(skb);
if (ret < 0) {
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index d3f6c468faf..9bebad07bf2 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
#include <asm/uaccess.h>
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 71da31818cf..3324fbfe528 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -69,6 +69,7 @@
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
+#include <net/xfrm.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/arp.h>
@@ -85,6 +86,8 @@
int sysctl_ip_default_ttl = IPDEFTTL;
+static int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*));
+
/* Generate a checksum for an outgoing IP datagram. */
__inline__ void ip_send_check(struct iphdr *iph)
{
@@ -202,6 +205,11 @@ static inline int ip_finish_output2(struct sk_buff *skb)
static inline int ip_finish_output(struct sk_buff *skb)
{
+#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
+ /* Policy lookup after SNAT yielded a new policy */
+ if (skb->dst->xfrm != NULL)
+ return xfrm4_output_finish(skb);
+#endif
if (skb->len > dst_mtu(skb->dst) &&
!(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
return ip_fragment(skb, ip_finish_output2);
@@ -409,7 +417,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
* single device frame, and queue such a frame for sending.
*/
-int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
+static int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
{
struct iphdr *iph;
int raw = 0;
@@ -418,7 +426,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
struct sk_buff *skb2;
unsigned int mtu, hlen, left, len, ll_rs;
int offset;
- int not_last_frag;
+ __be16 not_last_frag;
struct rtable *rt = (struct rtable*)skb->dst;
int err = 0;
@@ -1180,7 +1188,7 @@ int ip_push_pending_frames(struct sock *sk)
struct ip_options *opt = NULL;
struct rtable *rt = inet->cork.rt;
struct iphdr *iph;
- int df = 0;
+ __be16 df = 0;
__u8 ttl;
int err = 0;
@@ -1391,7 +1399,6 @@ void __init ip_init(void)
#endif
}
-EXPORT_SYMBOL(ip_fragment);
EXPORT_SYMBOL(ip_generic_getfrag);
EXPORT_SYMBOL(ip_queue_xmit);
EXPORT_SYMBOL(ip_send_check);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 6986e11d65c..2bf8d782f67 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -621,7 +621,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
err = -ENOBUFS;
break;
}
- msf = (struct ip_msfilter *)kmalloc(optlen, GFP_KERNEL);
+ msf = kmalloc(optlen, GFP_KERNEL);
if (msf == 0) {
err = -ENOBUFS;
break;
@@ -778,7 +778,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
err = -ENOBUFS;
break;
}
- gsf = (struct group_filter *)kmalloc(optlen,GFP_KERNEL);
+ gsf = kmalloc(optlen,GFP_KERNEL);
if (gsf == 0) {
err = -ENOBUFS;
break;
@@ -798,7 +798,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
goto mc_msf_out;
}
msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
- msf = (struct ip_msfilter *)kmalloc(msize,GFP_KERNEL);
+ msf = kmalloc(msize,GFP_KERNEL);
if (msf == 0) {
err = -ENOBUFS;
goto mc_msf_out;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 35571cff81c..e5cbe72c6b8 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -93,6 +93,7 @@
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -244,7 +245,7 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c
if (dev == NULL)
return NULL;
- nt = dev->priv;
+ nt = netdev_priv(dev);
SET_MODULE_OWNER(dev);
dev->init = ipip_tunnel_init;
nt->parms = *parms;
@@ -269,7 +270,7 @@ static void ipip_tunnel_uninit(struct net_device *dev)
tunnels_wc[0] = NULL;
write_unlock_bh(&ipip_lock);
} else
- ipip_tunnel_unlink((struct ip_tunnel*)dev->priv);
+ ipip_tunnel_unlink(netdev_priv(dev));
dev_put(dev);
}
@@ -443,7 +444,7 @@ out:
skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
rel_info = htonl(rel_info);
} else if (type == ICMP_TIME_EXCEEDED) {
- struct ip_tunnel *t = (struct ip_tunnel*)skb2->dev->priv;
+ struct ip_tunnel *t = netdev_priv(skb2->dev);
if (t->parms.iph.ttl) {
rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_HOST_UNREACH;
@@ -514,7 +515,7 @@ out:
static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
+ struct ip_tunnel *tunnel = netdev_priv(dev);
struct net_device_stats *stats = &tunnel->stat;
struct iphdr *tiph = &tunnel->parms.iph;
u8 tos = tunnel->parms.iph.tos;
@@ -621,6 +622,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
skb->h.raw = skb->nh.raw;
skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE|IPSKB_XFRM_TRANSFORMED);
dst_release(skb->dst);
skb->dst = &rt->u.dst;
@@ -673,7 +675,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
t = ipip_tunnel_locate(&p, 0);
}
if (t == NULL)
- t = (struct ip_tunnel*)dev->priv;
+ t = netdev_priv(dev);
memcpy(&p, &t->parms, sizeof(p));
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
@@ -710,7 +712,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = -EINVAL;
break;
}
- t = (struct ip_tunnel*)dev->priv;
+ t = netdev_priv(dev);
ipip_tunnel_unlink(t);
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
@@ -764,7 +766,7 @@ done:
static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev)
{
- return &(((struct ip_tunnel*)dev->priv)->stat);
+ return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
}
static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
@@ -799,7 +801,7 @@ static int ipip_tunnel_init(struct net_device *dev)
struct ip_tunnel *tunnel;
struct iphdr *iph;
- tunnel = (struct ip_tunnel*)dev->priv;
+ tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph;
tunnel->dev = dev;
@@ -837,7 +839,7 @@ static int ipip_tunnel_init(struct net_device *dev)
static int __init ipip_fb_tunnel_init(struct net_device *dev)
{
- struct ip_tunnel *tunnel = dev->priv;
+ struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
tunnel->dev = dev;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9a5c0ce7ff3..5c94c222e3f 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -33,6 +33,7 @@
#include <asm/uaccess.h>
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
@@ -178,8 +179,8 @@ static int reg_vif_num = -1;
static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
{
read_lock(&mrt_lock);
- ((struct net_device_stats*)dev->priv)->tx_bytes += skb->len;
- ((struct net_device_stats*)dev->priv)->tx_packets++;
+ ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len;
+ ((struct net_device_stats*)netdev_priv(dev))->tx_packets++;
ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
read_unlock(&mrt_lock);
kfree_skb(skb);
@@ -188,7 +189,7 @@ static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
{
- return (struct net_device_stats*)dev->priv;
+ return (struct net_device_stats*)netdev_priv(dev);
}
static void reg_vif_setup(struct net_device *dev)
@@ -1149,8 +1150,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
if (vif->flags & VIFF_REGISTER) {
vif->pkt_out++;
vif->bytes_out+=skb->len;
- ((struct net_device_stats*)vif->dev->priv)->tx_bytes += skb->len;
- ((struct net_device_stats*)vif->dev->priv)->tx_packets++;
+ ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len;
+ ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++;
ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
kfree_skb(skb);
return;
@@ -1210,8 +1211,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
if (vif->flags & VIFF_TUNNEL) {
ip_encap(skb, vif->local, vif->remote);
/* FIXME: extra output firewall step used to be here. --RR */
- ((struct ip_tunnel *)vif->dev->priv)->stat.tx_packets++;
- ((struct ip_tunnel *)vif->dev->priv)->stat.tx_bytes+=skb->len;
+ ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++;
+ ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len;
}
IPCB(skb)->flags |= IPSKB_FORWARDED;
@@ -1467,8 +1468,8 @@ int pim_rcv_v1(struct sk_buff * skb)
skb->pkt_type = PACKET_HOST;
dst_release(skb->dst);
skb->dst = NULL;
- ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len;
- ((struct net_device_stats*)reg_dev->priv)->rx_packets++;
+ ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
+ ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
nf_reset(skb);
netif_rx(skb);
dev_put(reg_dev);
@@ -1522,8 +1523,8 @@ static int pim_rcv(struct sk_buff * skb)
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
dst_release(skb->dst);
- ((struct net_device_stats*)reg_dev->priv)->rx_bytes += skb->len;
- ((struct net_device_stats*)reg_dev->priv)->rx_packets++;
+ ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
+ ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
skb->dst = NULL;
nf_reset(skb);
netif_rx(skb);
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index c935c5086d3..7f0288b25fa 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index 3b87482049c..52c12e9edbb 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -322,7 +322,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct net_device *tdev; /* Device to other host */
struct iphdr *old_iph = skb->nh.iph;
u8 tos = old_iph->tos;
- u16 df = old_iph->frag_off;
+ __be16 df = old_iph->frag_off;
struct iphdr *iph; /* Our new IP header */
int max_headroom; /* The extra header space needed */
int mtu;
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index ae0779d82c5..52a3d7c5790 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -1,17 +1,11 @@
/* IPv4 specific functions of netfilter core */
-
-#include <linux/config.h>
-#ifdef CONFIG_NETFILTER
-
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
-
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/icmp.h>
-#include <net/route.h>
#include <linux/ip.h>
+#include <net/route.h>
+#include <net/xfrm.h>
+#include <net/ip.h>
/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
int ip_route_me_harder(struct sk_buff **pskb)
@@ -33,7 +27,6 @@ int ip_route_me_harder(struct sk_buff **pskb)
#ifdef CONFIG_IP_ROUTE_FWMARK
fl.nl_u.ip4_u.fwmark = (*pskb)->nfmark;
#endif
- fl.proto = iph->protocol;
if (ip_route_output_key(&rt, &fl) != 0)
return -1;
@@ -60,6 +53,13 @@ int ip_route_me_harder(struct sk_buff **pskb)
if ((*pskb)->dst->error)
return -1;
+#ifdef CONFIG_XFRM
+ if (!(IPCB(*pskb)->flags & IPSKB_XFRM_TRANSFORMED) &&
+ xfrm_decode_session(*pskb, &fl, AF_INET) == 0)
+ if (xfrm_lookup(&(*pskb)->dst, &fl, (*pskb)->sk, 0))
+ return -1;
+#endif
+
/* Change in oif may mean change in hh_len. */
hh_len = (*pskb)->dst->dev->hard_header_len;
if (skb_headroom(*pskb) < hh_len) {
@@ -78,6 +78,9 @@ int ip_route_me_harder(struct sk_buff **pskb)
}
EXPORT_SYMBOL(ip_route_me_harder);
+void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
+EXPORT_SYMBOL(ip_nat_decode_session);
+
/*
* Extra routing may needed on local out, as the QUEUE target never
* returns control to the table.
@@ -135,5 +138,3 @@ static void fini(void)
module_init(init);
module_exit(fini);
-
-#endif /* CONFIG_NETFILTER */
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 88a60650e6b..db783036e4d 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -182,6 +182,7 @@ config IP_NF_QUEUE
config IP_NF_IPTABLES
tristate "IP tables support (required for filtering/masq/NAT)"
+ depends on NETFILTER_XTABLES
help
iptables is a general, extensible packet identification framework.
The packet filtering and full NAT (masquerading, port forwarding,
@@ -191,16 +192,6 @@ config IP_NF_IPTABLES
To compile it as a module, choose M here. If unsure, say N.
# The matches.
-config IP_NF_MATCH_LIMIT
- tristate "limit match support"
- depends on IP_NF_IPTABLES
- help
- limit matching allows you to control the rate at which a rule can be
- matched: mainly useful in combination with the LOG target ("LOG
- target support", below) and to avoid some Denial of Service attacks.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP_NF_MATCH_IPRANGE
tristate "IP range match support"
depends on IP_NF_IPTABLES
@@ -210,37 +201,6 @@ config IP_NF_MATCH_IPRANGE
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_MATCH_MAC
- tristate "MAC address match support"
- depends on IP_NF_IPTABLES
- help
- MAC matching allows you to match packets based on the source
- Ethernet address of the packet.
-
- To compile it as a module, choose M here. If unsure, say N.
-
-config IP_NF_MATCH_PKTTYPE
- tristate "Packet type match support"
- depends on IP_NF_IPTABLES
- help
- Packet type matching allows you to match a packet by
- its "class", eg. BROADCAST, MULTICAST, ...
-
- Typical usage:
- iptables -A INPUT -m pkttype --pkt-type broadcast -j LOG
-
- To compile it as a module, choose M here. If unsure, say N.
-
-config IP_NF_MATCH_MARK
- tristate "netfilter MARK match support"
- depends on IP_NF_IPTABLES
- help
- Netfilter mark matching allows you to match packets based on the
- `nfmark' value in the packet. This can be set by the MARK target
- (see below).
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP_NF_MATCH_MULTIPORT
tristate "Multiple port match support"
depends on IP_NF_IPTABLES
@@ -301,15 +261,6 @@ config IP_NF_MATCH_AH_ESP
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_MATCH_LENGTH
- tristate "LENGTH match support"
- depends on IP_NF_IPTABLES
- help
- This option allows you to match the length of a packet against a
- specific value or range of values.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP_NF_MATCH_TTL
tristate "TTL match support"
depends on IP_NF_IPTABLES
@@ -319,50 +270,6 @@ config IP_NF_MATCH_TTL
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_MATCH_TCPMSS
- tristate "tcpmss match support"
- depends on IP_NF_IPTABLES
- help
- This option adds a `tcpmss' match, which allows you to examine the
- MSS value of TCP SYN packets, which control the maximum packet size
- for that connection.
-
- To compile it as a module, choose M here. If unsure, say N.
-
-config IP_NF_MATCH_HELPER
- tristate "Helper match support"
- depends on IP_NF_IPTABLES
- depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
- help
- Helper matching allows you to match packets in dynamic connections
- tracked by a conntrack-helper, ie. ip_conntrack_ftp
-
- To compile it as a module, choose M here. If unsure, say Y.
-
-config IP_NF_MATCH_STATE
- tristate "Connection state match support"
- depends on IP_NF_IPTABLES
- depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
- help
- Connection state matching allows you to match packets based on their
- relationship to a tracked connection (ie. previous packets). This
- is a powerful tool for packet classification.
-
- To compile it as a module, choose M here. If unsure, say N.
-
-config IP_NF_MATCH_CONNTRACK
- tristate "Connection tracking match support"
- depends on IP_NF_IPTABLES
- depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
- help
- This is a general conntrack match module, a superset of the state match.
-
- It allows matching on additional conntrack information, which is
- useful in complex configurations, such as NAT gateways with multiple
- internet links or tunnels.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP_NF_MATCH_OWNER
tristate "Owner match support"
depends on IP_NF_IPTABLES
@@ -372,15 +279,6 @@ config IP_NF_MATCH_OWNER
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_MATCH_PHYSDEV
- tristate "Physdev match support"
- depends on IP_NF_IPTABLES && BRIDGE_NETFILTER
- help
- Physdev packet matching matches against the physical bridge ports
- the IP packet arrived on or will leave by.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP_NF_MATCH_ADDRTYPE
tristate 'address type match support'
depends on IP_NF_IPTABLES
@@ -391,75 +289,6 @@ config IP_NF_MATCH_ADDRTYPE
If you want to compile it as a module, say M here and read
<file:Documentation/modules.txt>. If unsure, say `N'.
-config IP_NF_MATCH_REALM
- tristate 'realm match support'
- depends on IP_NF_IPTABLES
- select NET_CLS_ROUTE
- help
- This option adds a `realm' match, which allows you to use the realm
- key from the routing subsystem inside iptables.
-
- This match pretty much resembles the CONFIG_NET_CLS_ROUTE4 option
- in tc world.
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/modules.txt>. If unsure, say `N'.
-
-config IP_NF_MATCH_SCTP
- tristate 'SCTP protocol match support'
- depends on IP_NF_IPTABLES
- help
- With this option enabled, you will be able to use the iptables
- `sctp' match in order to match on SCTP source/destination ports
- and SCTP chunk types.
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/modules.txt>. If unsure, say `N'.
-
-config IP_NF_MATCH_DCCP
- tristate 'DCCP protocol match support'
- depends on IP_NF_IPTABLES
- help
- With this option enabled, you will be able to use the iptables
- `dccp' match in order to match on DCCP source/destination ports
- and DCCP flags.
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/modules.txt>. If unsure, say `N'.
-
-config IP_NF_MATCH_COMMENT
- tristate 'comment match support'
- depends on IP_NF_IPTABLES
- help
- This option adds a `comment' dummy-match, which allows you to put
- comments in your iptables ruleset.
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/modules.txt>. If unsure, say `N'.
-
-config IP_NF_MATCH_CONNMARK
- tristate 'Connection mark match support'
- depends on IP_NF_IPTABLES
- depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
- help
- This option adds a `connmark' match, which allows you to match the
- connection mark value previously set for the session by `CONNMARK'.
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/modules.txt>. The module will be called
- ipt_connmark.o. If unsure, say `N'.
-
-config IP_NF_MATCH_CONNBYTES
- tristate 'Connection byte/packet counter match support'
- depends on IP_NF_IPTABLES
- depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || (NF_CT_ACCT && NF_CONNTRACK_IPV4)
- help
- This option adds a `connbytes' match, which allows you to match the
- number of bytes and/or packets for each direction within a connection.
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/modules.txt>. If unsure, say `N'.
-
config IP_NF_MATCH_HASHLIMIT
tristate 'hashlimit match support'
depends on IP_NF_IPTABLES
@@ -474,18 +303,15 @@ config IP_NF_MATCH_HASHLIMIT
destination IP' or `500pps from any given source IP' with a single
IPtables rule.
-config IP_NF_MATCH_STRING
- tristate 'string match support'
- depends on IP_NF_IPTABLES
- select TEXTSEARCH
- select TEXTSEARCH_KMP
- select TEXTSEARCH_BM
- select TEXTSEARCH_FSM
- help
- This option adds a `string' match, which allows you to look for
- pattern matchings in packets.
+config IP_NF_MATCH_POLICY
+ tristate "IPsec policy match support"
+ depends on IP_NF_IPTABLES && XFRM
+ help
+ Policy matching allows you to match packets based on the
+ IPsec policy that was used during decapsulation/will
+ be used during encapsulation.
- To compile it as a module, choose M here. If unsure, say N.
+ To compile it as a module, choose M here. If unsure, say N.
# `filter', generic and specific targets
config IP_NF_FILTER
@@ -562,17 +388,6 @@ config IP_NF_TARGET_TCPMSS
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_TARGET_NFQUEUE
- tristate "NFQUEUE Target Support"
- depends on IP_NF_IPTABLES
- help
- This Target replaced the old obsolete QUEUE target.
-
- As opposed to QUEUE, it supports 65535 different queues,
- not just one.
-
- To compile it as a module, choose M here. If unsure, say N.
-
# NAT + specific targets
config IP_NF_NAT
tristate "Full NAT"
@@ -725,31 +540,6 @@ config IP_NF_TARGET_DSCP
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_TARGET_MARK
- tristate "MARK target support"
- depends on IP_NF_MANGLE
- help
- This option adds a `MARK' target, which allows you to create rules
- in the `mangle' table which alter the netfilter mark (nfmark) field
- associated with the packet prior to routing. This can change
- the routing method (see `Use netfilter MARK value as routing
- key') and can also be used by other subsystems to change their
- behavior.
-
- To compile it as a module, choose M here. If unsure, say N.
-
-config IP_NF_TARGET_CLASSIFY
- tristate "CLASSIFY target support"
- depends on IP_NF_MANGLE
- help
- This option adds a `CLASSIFY' target, which enables the user to set
- the priority of a packet. Some qdiscs can use this value for
- classification, among these are:
-
- atm, cbq, dsmark, pfifo_fast, htb, prio
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP_NF_TARGET_TTL
tristate 'TTL target support'
depends on IP_NF_MANGLE
@@ -764,19 +554,6 @@ config IP_NF_TARGET_TTL
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_TARGET_CONNMARK
- tristate 'CONNMARK target support'
- depends on IP_NF_MANGLE
- depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
- help
- This option adds a `CONNMARK' target, which allows one to manipulate
- the connection mark value. Similar to the MARK target, but
- affects the connection mark value rather than the packet mark value.
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/modules.txt>. The module will be called
- ipt_CONNMARK.o. If unsure, say `N'.
-
config IP_NF_TARGET_CLUSTERIP
tristate "CLUSTERIP target support (EXPERIMENTAL)"
depends on IP_NF_MANGLE && EXPERIMENTAL
@@ -800,23 +577,10 @@ config IP_NF_RAW
If you want to compile it as a module, say M here and read
<file:Documentation/modules.txt>. If unsure, say `N'.
-config IP_NF_TARGET_NOTRACK
- tristate 'NOTRACK target support'
- depends on IP_NF_RAW
- depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
- help
- The NOTRACK target allows a select rule to specify
- which packets *not* to enter the conntrack/NAT
- subsystem with all the consequences (no ICMP error tracking,
- no protocol helpers for the selected packets).
-
- If you want to compile it as a module, say M here and read
- <file:Documentation/modules.txt>. If unsure, say `N'.
-
-
# ARP tables
config IP_NF_ARPTABLES
tristate "ARP tables support"
+ depends on NETFILTER_XTABLES
help
arptables is a general, extensible packet identification framework.
The ARP packet filtering and mangling (manipulation)subsystems
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index d0a447e520a..bcefe64b931 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -47,14 +47,8 @@ obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
# matches
obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o
-obj-$(CONFIG_IP_NF_MATCH_LIMIT) += ipt_limit.o
obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o
-obj-$(CONFIG_IP_NF_MATCH_SCTP) += ipt_sctp.o
-obj-$(CONFIG_IP_NF_MATCH_DCCP) += ipt_dccp.o
-obj-$(CONFIG_IP_NF_MATCH_MARK) += ipt_mark.o
-obj-$(CONFIG_IP_NF_MATCH_MAC) += ipt_mac.o
obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
-obj-$(CONFIG_IP_NF_MATCH_PKTTYPE) += ipt_pkttype.o
obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o
obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
@@ -62,39 +56,25 @@ obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
obj-$(CONFIG_IP_NF_MATCH_DSCP) += ipt_dscp.o
obj-$(CONFIG_IP_NF_MATCH_AH_ESP) += ipt_ah.o ipt_esp.o
-obj-$(CONFIG_IP_NF_MATCH_LENGTH) += ipt_length.o
obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
-obj-$(CONFIG_IP_NF_MATCH_STATE) += ipt_state.o
-obj-$(CONFIG_IP_NF_MATCH_CONNMARK) += ipt_connmark.o
-obj-$(CONFIG_IP_NF_MATCH_CONNTRACK) += ipt_conntrack.o
-obj-$(CONFIG_IP_NF_MATCH_CONNBYTES) += ipt_connbytes.o
-obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o
-obj-$(CONFIG_IP_NF_MATCH_REALM) += ipt_realm.o
obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
-obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
-obj-$(CONFIG_IP_NF_MATCH_COMMENT) += ipt_comment.o
-obj-$(CONFIG_IP_NF_MATCH_STRING) += ipt_string.o
+obj-$(CONFIG_IP_NF_MATCH_POLICY) += ipt_policy.o
# targets
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
obj-$(CONFIG_IP_NF_TARGET_DSCP) += ipt_DSCP.o
-obj-$(CONFIG_IP_NF_TARGET_MARK) += ipt_MARK.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
-obj-$(CONFIG_IP_NF_TARGET_CLASSIFY) += ipt_CLASSIFY.o
obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o
obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
-obj-$(CONFIG_IP_NF_TARGET_CONNMARK) += ipt_CONNMARK.o
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
-obj-$(CONFIG_IP_NF_TARGET_NOTRACK) += ipt_NOTRACK.o
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
-obj-$(CONFIG_IP_NF_TARGET_NFQUEUE) += ipt_NFQUEUE.o
# generic ARP tables
obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index bba15630469..afe3d8f8177 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/capability.h>
#include <linux/if_arp.h>
#include <linux/kmod.h>
#include <linux/vmalloc.h>
@@ -23,6 +24,7 @@
#include <asm/uaccess.h>
#include <asm/semaphore.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
MODULE_LICENSE("GPL");
@@ -54,28 +56,9 @@ do { \
#else
#define ARP_NF_ASSERT(x)
#endif
-#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
-static DECLARE_MUTEX(arpt_mutex);
-
-#define ASSERT_READ_LOCK(x) ARP_NF_ASSERT(down_trylock(&arpt_mutex) != 0)
-#define ASSERT_WRITE_LOCK(x) ARP_NF_ASSERT(down_trylock(&arpt_mutex) != 0)
#include <linux/netfilter_ipv4/listhelp.h>
-struct arpt_table_info {
- unsigned int size;
- unsigned int number;
- unsigned int initial_entries;
- unsigned int hook_entry[NF_ARP_NUMHOOKS];
- unsigned int underflow[NF_ARP_NUMHOOKS];
- void *entries[NR_CPUS];
-};
-
-static LIST_HEAD(arpt_target);
-static LIST_HEAD(arpt_tables);
-#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
-#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
-
static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
char *hdr_addr, int len)
{
@@ -222,9 +205,9 @@ static inline int arp_checkentry(const struct arpt_arp *arp)
}
static unsigned int arpt_error(struct sk_buff **pskb,
- unsigned int hooknum,
const struct net_device *in,
const struct net_device *out,
+ unsigned int hooknum,
const void *targinfo,
void *userinfo)
{
@@ -253,6 +236,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
struct arpt_entry *e, *back;
const char *indev, *outdev;
void *table_base;
+ struct xt_table_info *private = table->private;
/* ARP header, plus 2 device addresses, plus 2 IP addresses. */
if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) +
@@ -264,9 +248,9 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
outdev = out ? out->name : nulldevname;
read_lock_bh(&table->lock);
- table_base = (void *)table->private->entries[smp_processor_id()];
- e = get_entry(table_base, table->private->hook_entry[hook]);
- back = get_entry(table_base, table->private->underflow[hook]);
+ table_base = (void *)private->entries[smp_processor_id()];
+ e = get_entry(table_base, private->hook_entry[hook]);
+ back = get_entry(table_base, private->underflow[hook]);
arp = (*pskb)->nh.arph;
do {
@@ -314,8 +298,8 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
* abs. verdicts
*/
verdict = t->u.kernel.target->target(pskb,
- hook,
in, out,
+ hook,
t->data,
userdata);
@@ -340,106 +324,6 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
return verdict;
}
-/*
- * These are weird, but module loading must not be done with mutex
- * held (since they will register), and we have to have a single
- * function to use try_then_request_module().
- */
-
-/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
-static inline struct arpt_table *find_table_lock(const char *name)
-{
- struct arpt_table *t;
-
- if (down_interruptible(&arpt_mutex) != 0)
- return ERR_PTR(-EINTR);
-
- list_for_each_entry(t, &arpt_tables, list)
- if (strcmp(t->name, name) == 0 && try_module_get(t->me))
- return t;
- up(&arpt_mutex);
- return NULL;
-}
-
-
-/* Find target, grabs ref. Returns ERR_PTR() on error. */
-static inline struct arpt_target *find_target(const char *name, u8 revision)
-{
- struct arpt_target *t;
- int err = 0;
-
- if (down_interruptible(&arpt_mutex) != 0)
- return ERR_PTR(-EINTR);
-
- list_for_each_entry(t, &arpt_target, list) {
- if (strcmp(t->name, name) == 0) {
- if (t->revision == revision) {
- if (try_module_get(t->me)) {
- up(&arpt_mutex);
- return t;
- }
- } else
- err = -EPROTOTYPE; /* Found something. */
- }
- }
- up(&arpt_mutex);
- return ERR_PTR(err);
-}
-
-struct arpt_target *arpt_find_target(const char *name, u8 revision)
-{
- struct arpt_target *target;
-
- target = try_then_request_module(find_target(name, revision),
- "arpt_%s", name);
- if (IS_ERR(target) || !target)
- return NULL;
- return target;
-}
-
-static int target_revfn(const char *name, u8 revision, int *bestp)
-{
- struct arpt_target *t;
- int have_rev = 0;
-
- list_for_each_entry(t, &arpt_target, list) {
- if (strcmp(t->name, name) == 0) {
- if (t->revision > *bestp)
- *bestp = t->revision;
- if (t->revision == revision)
- have_rev =1;
- }
- }
- return have_rev;
-}
-
-/* Returns true or false (if no such extension at all) */
-static inline int find_revision(const char *name, u8 revision,
- int (*revfn)(const char *, u8, int *),
- int *err)
-{
- int have_rev, best = -1;
-
- if (down_interruptible(&arpt_mutex) != 0) {
- *err = -EINTR;
- return 1;
- }
- have_rev = revfn(name, revision, &best);
- up(&arpt_mutex);
-
- /* Nothing at all? Return 0 to try loading module. */
- if (best == -1) {
- *err = -ENOENT;
- return 0;
- }
-
- *err = best;
- if (!have_rev)
- *err = -EPROTONOSUPPORT;
- return 1;
-}
-
-
/* All zeroes == unconditional rule. */
static inline int unconditional(const struct arpt_arp *arp)
{
@@ -455,7 +339,7 @@ static inline int unconditional(const struct arpt_arp *arp)
/* Figures out from what hook each rule can be called: returns 0 if
* there are loops. Puts hook bitmask in comefrom.
*/
-static int mark_source_chains(struct arpt_table_info *newinfo,
+static int mark_source_chains(struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
unsigned int hook;
@@ -586,8 +470,8 @@ static inline int check_entry(struct arpt_entry *e, const char *name, unsigned i
}
t = arpt_get_target(e);
- target = try_then_request_module(find_target(t->u.user.name,
- t->u.user.revision),
+ target = try_then_request_module(xt_find_target(NF_ARP, t->u.user.name,
+ t->u.user.revision),
"arpt_%s", t->u.user.name);
if (IS_ERR(target) || !target) {
duprintf("check_entry: `%s' not found\n", t->u.user.name);
@@ -621,7 +505,7 @@ out:
}
static inline int check_entry_size_and_hooks(struct arpt_entry *e,
- struct arpt_table_info *newinfo,
+ struct xt_table_info *newinfo,
unsigned char *base,
unsigned char *limit,
const unsigned int *hook_entries,
@@ -655,7 +539,7 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
< 0 (not ARPT_RETURN). --RR */
/* Clear counters and comefrom */
- e->counters = ((struct arpt_counters) { 0, 0 });
+ e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
(*i)++;
@@ -682,7 +566,7 @@ static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i)
*/
static int translate_table(const char *name,
unsigned int valid_hooks,
- struct arpt_table_info *newinfo,
+ struct xt_table_info *newinfo,
void *entry0,
unsigned int size,
unsigned int number,
@@ -763,34 +647,9 @@ static int translate_table(const char *name,
return ret;
}
-static struct arpt_table_info *replace_table(struct arpt_table *table,
- unsigned int num_counters,
- struct arpt_table_info *newinfo,
- int *error)
-{
- struct arpt_table_info *oldinfo;
-
- /* Do the substitution. */
- write_lock_bh(&table->lock);
- /* Check inside lock: is the old number correct? */
- if (num_counters != table->private->number) {
- duprintf("num_counters != table->private->number (%u/%u)\n",
- num_counters, table->private->number);
- write_unlock_bh(&table->lock);
- *error = -EAGAIN;
- return NULL;
- }
- oldinfo = table->private;
- table->private = newinfo;
- newinfo->initial_entries = oldinfo->initial_entries;
- write_unlock_bh(&table->lock);
-
- return oldinfo;
-}
-
/* Gets counters. */
static inline int add_entry_to_counter(const struct arpt_entry *e,
- struct arpt_counters total[],
+ struct xt_counters total[],
unsigned int *i)
{
ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
@@ -800,7 +659,7 @@ static inline int add_entry_to_counter(const struct arpt_entry *e,
}
static inline int set_entry_to_counter(const struct arpt_entry *e,
- struct arpt_counters total[],
+ struct xt_counters total[],
unsigned int *i)
{
SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
@@ -809,8 +668,8 @@ static inline int set_entry_to_counter(const struct arpt_entry *e,
return 0;
}
-static void get_counters(const struct arpt_table_info *t,
- struct arpt_counters counters[])
+static void get_counters(const struct xt_table_info *t,
+ struct xt_counters counters[])
{
unsigned int cpu;
unsigned int i;
@@ -848,7 +707,8 @@ static int copy_entries_to_user(unsigned int total_size,
{
unsigned int off, num, countersize;
struct arpt_entry *e;
- struct arpt_counters *counters;
+ struct xt_counters *counters;
+ struct xt_table_info *private = table->private;
int ret = 0;
void *loc_cpu_entry;
@@ -856,18 +716,18 @@ static int copy_entries_to_user(unsigned int total_size,
* (other than comefrom, which userspace doesn't care
* about).
*/
- countersize = sizeof(struct arpt_counters) * table->private->number;
- counters = vmalloc(countersize);
+ countersize = sizeof(struct xt_counters) * private->number;
+ counters = vmalloc_node(countersize, numa_node_id());
if (counters == NULL)
return -ENOMEM;
/* First, sum counters... */
write_lock_bh(&table->lock);
- get_counters(table->private, counters);
+ get_counters(private, counters);
write_unlock_bh(&table->lock);
- loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
+ loc_cpu_entry = private->entries[raw_smp_processor_id()];
/* ... then copy entire thing ... */
if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
ret = -EFAULT;
@@ -910,75 +770,34 @@ static int get_entries(const struct arpt_get_entries *entries,
int ret;
struct arpt_table *t;
- t = find_table_lock(entries->name);
+ t = xt_find_table_lock(NF_ARP, entries->name);
if (t || !IS_ERR(t)) {
+ struct xt_table_info *private = t->private;
duprintf("t->private->number = %u\n",
- t->private->number);
- if (entries->size == t->private->size)
- ret = copy_entries_to_user(t->private->size,
+ private->number);
+ if (entries->size == private->size)
+ ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
else {
duprintf("get_entries: I've got %u not %u!\n",
- t->private->size,
- entries->size);
+ private->size, entries->size);
ret = -EINVAL;
}
module_put(t->me);
- up(&arpt_mutex);
+ xt_table_unlock(t);
} else
ret = t ? PTR_ERR(t) : -ENOENT;
return ret;
}
-static void free_table_info(struct arpt_table_info *info)
-{
- int cpu;
- for_each_cpu(cpu) {
- if (info->size <= PAGE_SIZE)
- kfree(info->entries[cpu]);
- else
- vfree(info->entries[cpu]);
- }
- kfree(info);
-}
-
-static struct arpt_table_info *alloc_table_info(unsigned int size)
-{
- struct arpt_table_info *newinfo;
- int cpu;
-
- newinfo = kzalloc(sizeof(struct arpt_table_info), GFP_KERNEL);
- if (!newinfo)
- return NULL;
-
- newinfo->size = size;
-
- for_each_cpu(cpu) {
- if (size <= PAGE_SIZE)
- newinfo->entries[cpu] = kmalloc_node(size,
- GFP_KERNEL,
- cpu_to_node(cpu));
- else
- newinfo->entries[cpu] = vmalloc_node(size,
- cpu_to_node(cpu));
-
- if (newinfo->entries[cpu] == NULL) {
- free_table_info(newinfo);
- return NULL;
- }
- }
-
- return newinfo;
-}
-
static int do_replace(void __user *user, unsigned int len)
{
int ret;
struct arpt_replace tmp;
struct arpt_table *t;
- struct arpt_table_info *newinfo, *oldinfo;
- struct arpt_counters *counters;
+ struct xt_table_info *newinfo, *oldinfo;
+ struct xt_counters *counters;
void *loc_cpu_entry, *loc_cpu_old_entry;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
@@ -988,11 +807,7 @@ static int do_replace(void __user *user, unsigned int len)
if (len != sizeof(tmp) + tmp.size)
return -ENOPROTOOPT;
- /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
- if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
- return -ENOMEM;
-
- newinfo = alloc_table_info(tmp.size);
+ newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
@@ -1004,7 +819,7 @@ static int do_replace(void __user *user, unsigned int len)
goto free_newinfo;
}
- counters = vmalloc(tmp.num_counters * sizeof(struct arpt_counters));
+ counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto free_newinfo;
@@ -1018,7 +833,7 @@ static int do_replace(void __user *user, unsigned int len)
duprintf("arp_tables: Translated table\n");
- t = try_then_request_module(find_table_lock(tmp.name),
+ t = try_then_request_module(xt_find_table_lock(NF_ARP, tmp.name),
"arptable_%s", tmp.name);
if (!t || IS_ERR(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1033,7 +848,7 @@ static int do_replace(void __user *user, unsigned int len)
goto put_module;
}
- oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
+ oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
if (!oldinfo)
goto put_module;
@@ -1053,23 +868,23 @@ static int do_replace(void __user *user, unsigned int len)
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
- free_table_info(oldinfo);
+ xt_free_table_info(oldinfo);
if (copy_to_user(tmp.counters, counters,
- sizeof(struct arpt_counters) * tmp.num_counters) != 0)
+ sizeof(struct xt_counters) * tmp.num_counters) != 0)
ret = -EFAULT;
vfree(counters);
- up(&arpt_mutex);
+ xt_table_unlock(t);
return ret;
put_module:
module_put(t->me);
- up(&arpt_mutex);
+ xt_table_unlock(t);
free_newinfo_counters_untrans:
ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
free_newinfo_counters:
vfree(counters);
free_newinfo:
- free_table_info(newinfo);
+ xt_free_table_info(newinfo);
return ret;
}
@@ -1077,7 +892,7 @@ static int do_replace(void __user *user, unsigned int len)
* and everything is OK.
*/
static inline int add_counter_to_entry(struct arpt_entry *e,
- const struct arpt_counters addme[],
+ const struct xt_counters addme[],
unsigned int *i)
{
@@ -1090,15 +905,16 @@ static inline int add_counter_to_entry(struct arpt_entry *e,
static int do_add_counters(void __user *user, unsigned int len)
{
unsigned int i;
- struct arpt_counters_info tmp, *paddc;
+ struct xt_counters_info tmp, *paddc;
struct arpt_table *t;
+ struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
- if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct arpt_counters))
+ if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
return -EINVAL;
paddc = vmalloc(len);
@@ -1110,29 +926,30 @@ static int do_add_counters(void __user *user, unsigned int len)
goto free;
}
- t = find_table_lock(tmp.name);
+ t = xt_find_table_lock(NF_ARP, tmp.name);
if (!t || IS_ERR(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free;
}
write_lock_bh(&t->lock);
- if (t->private->number != paddc->num_counters) {
+ private = t->private;
+ if (private->number != paddc->num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
i = 0;
/* Choose the copy that is on our node */
- loc_cpu_entry = t->private->entries[smp_processor_id()];
+ loc_cpu_entry = private->entries[smp_processor_id()];
ARPT_ENTRY_ITERATE(loc_cpu_entry,
- t->private->size,
+ private->size,
add_counter_to_entry,
paddc->counters,
&i);
unlock_up_free:
write_unlock_bh(&t->lock);
- up(&arpt_mutex);
+ xt_table_unlock(t);
module_put(t->me);
free:
vfree(paddc);
@@ -1189,25 +1006,26 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
}
name[ARPT_TABLE_MAXNAMELEN-1] = '\0';
- t = try_then_request_module(find_table_lock(name),
+ t = try_then_request_module(xt_find_table_lock(NF_ARP, name),
"arptable_%s", name);
if (t && !IS_ERR(t)) {
struct arpt_getinfo info;
+ struct xt_table_info *private = t->private;
info.valid_hooks = t->valid_hooks;
- memcpy(info.hook_entry, t->private->hook_entry,
+ memcpy(info.hook_entry, private->hook_entry,
sizeof(info.hook_entry));
- memcpy(info.underflow, t->private->underflow,
+ memcpy(info.underflow, private->underflow,
sizeof(info.underflow));
- info.num_entries = t->private->number;
- info.size = t->private->size;
+ info.num_entries = private->number;
+ info.size = private->size;
strcpy(info.name, name);
if (copy_to_user(user, &info, *len) != 0)
ret = -EFAULT;
else
ret = 0;
- up(&arpt_mutex);
+ xt_table_unlock(t);
module_put(t->me);
} else
ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1232,7 +1050,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
}
case ARPT_SO_GET_REVISION_TARGET: {
- struct arpt_get_revision rev;
+ struct xt_get_revision rev;
if (*len != sizeof(rev)) {
ret = -EINVAL;
@@ -1243,8 +1061,8 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
break;
}
- try_then_request_module(find_revision(rev.name, rev.revision,
- target_revfn, &ret),
+ try_then_request_module(xt_find_revision(NF_ARP, rev.name,
+ rev.revision, 1, &ret),
"arpt_%s", rev.name);
break;
}
@@ -1257,38 +1075,16 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
return ret;
}
-/* Registration hooks for targets. */
-int arpt_register_target(struct arpt_target *target)
-{
- int ret;
-
- ret = down_interruptible(&arpt_mutex);
- if (ret != 0)
- return ret;
-
- list_add(&target->list, &arpt_target);
- up(&arpt_mutex);
-
- return ret;
-}
-
-void arpt_unregister_target(struct arpt_target *target)
-{
- down(&arpt_mutex);
- LIST_DELETE(&arpt_target, target);
- up(&arpt_mutex);
-}
-
int arpt_register_table(struct arpt_table *table,
const struct arpt_replace *repl)
{
int ret;
- struct arpt_table_info *newinfo;
- static struct arpt_table_info bootstrap
+ struct xt_table_info *newinfo;
+ static struct xt_table_info bootstrap
= { 0, 0, 0, { 0 }, { 0 }, { } };
void *loc_cpu_entry;
- newinfo = alloc_table_info(repl->size);
+ newinfo = xt_alloc_table_info(repl->size);
if (!newinfo) {
ret = -ENOMEM;
return ret;
@@ -1303,60 +1099,33 @@ int arpt_register_table(struct arpt_table *table,
repl->num_entries,
repl->hook_entry,
repl->underflow);
+
duprintf("arpt_register_table: translate table gives %d\n", ret);
if (ret != 0) {
- free_table_info(newinfo);
+ xt_free_table_info(newinfo);
return ret;
}
- ret = down_interruptible(&arpt_mutex);
- if (ret != 0) {
- free_table_info(newinfo);
+ if (xt_register_table(table, &bootstrap, newinfo) != 0) {
+ xt_free_table_info(newinfo);
return ret;
}
- /* Don't autoload: we'd eat our tail... */
- if (list_named_find(&arpt_tables, table->name)) {
- ret = -EEXIST;
- goto free_unlock;
- }
-
- /* Simplifies replace_table code. */
- table->private = &bootstrap;
- if (!replace_table(table, 0, newinfo, &ret))
- goto free_unlock;
-
- duprintf("table->private->number = %u\n",
- table->private->number);
-
- /* save number of initial entries */
- table->private->initial_entries = table->private->number;
-
- rwlock_init(&table->lock);
- list_prepend(&arpt_tables, table);
-
- unlock:
- up(&arpt_mutex);
- return ret;
-
- free_unlock:
- free_table_info(newinfo);
- goto unlock;
+ return 0;
}
void arpt_unregister_table(struct arpt_table *table)
{
+ struct xt_table_info *private;
void *loc_cpu_entry;
- down(&arpt_mutex);
- LIST_DELETE(&arpt_tables, table);
- up(&arpt_mutex);
+ private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */
- loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
- ARPT_ENTRY_ITERATE(loc_cpu_entry, table->private->size,
+ loc_cpu_entry = private->entries[raw_smp_processor_id()];
+ ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size,
cleanup_entry, NULL);
- free_table_info(table->private);
+ xt_free_table_info(private);
}
/* The built-in targets: standard (NULL) and error. */
@@ -1379,52 +1148,15 @@ static struct nf_sockopt_ops arpt_sockopts = {
.get = do_arpt_get_ctl,
};
-#ifdef CONFIG_PROC_FS
-static inline int print_name(const struct arpt_table *t,
- off_t start_offset, char *buffer, int length,
- off_t *pos, unsigned int *count)
-{
- if ((*count)++ >= start_offset) {
- unsigned int namelen;
-
- namelen = sprintf(buffer + *pos, "%s\n", t->name);
- if (*pos + namelen > length) {
- /* Stop iterating */
- return 1;
- }
- *pos += namelen;
- }
- return 0;
-}
-
-static int arpt_get_tables(char *buffer, char **start, off_t offset, int length)
-{
- off_t pos = 0;
- unsigned int count = 0;
-
- if (down_interruptible(&arpt_mutex) != 0)
- return 0;
-
- LIST_FIND(&arpt_tables, print_name, struct arpt_table *,
- offset, buffer, length, &pos, &count);
-
- up(&arpt_mutex);
-
- /* `start' hack - see fs/proc/generic.c line ~105 */
- *start=(char *)((unsigned long)count-offset);
- return pos;
-}
-#endif /*CONFIG_PROC_FS*/
-
static int __init init(void)
{
int ret;
+ xt_proto_init(NF_ARP);
+
/* Noone else will be downing sem now, so we won't sleep */
- down(&arpt_mutex);
- list_append(&arpt_target, &arpt_standard_target);
- list_append(&arpt_target, &arpt_error_target);
- up(&arpt_mutex);
+ xt_register_target(NF_ARP, &arpt_standard_target);
+ xt_register_target(NF_ARP, &arpt_error_target);
/* Register setsockopt */
ret = nf_register_sockopt(&arpt_sockopts);
@@ -1433,19 +1165,6 @@ static int __init init(void)
return ret;
}
-#ifdef CONFIG_PROC_FS
- {
- struct proc_dir_entry *proc;
-
- proc = proc_net_create("arp_tables_names", 0, arpt_get_tables);
- if (!proc) {
- nf_unregister_sockopt(&arpt_sockopts);
- return -ENOMEM;
- }
- proc->owner = THIS_MODULE;
- }
-#endif
-
printk("arp_tables: (C) 2002 David S. Miller\n");
return 0;
}
@@ -1453,16 +1172,12 @@ static int __init init(void)
static void __exit fini(void)
{
nf_unregister_sockopt(&arpt_sockopts);
-#ifdef CONFIG_PROC_FS
- proc_net_remove("arp_tables_names");
-#endif
+ xt_proto_fini(NF_ARP);
}
EXPORT_SYMBOL(arpt_register_table);
EXPORT_SYMBOL(arpt_unregister_table);
EXPORT_SYMBOL(arpt_do_table);
-EXPORT_SYMBOL(arpt_register_target);
-EXPORT_SYMBOL(arpt_unregister_target);
module_init(init);
module_exit(fini);
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
index 3e592ec8648..c97650a16a5 100644
--- a/net/ipv4/netfilter/arpt_mangle.c
+++ b/net/ipv4/netfilter/arpt_mangle.c
@@ -8,8 +8,9 @@ MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
MODULE_DESCRIPTION("arptables arp payload mangle target");
static unsigned int
-target(struct sk_buff **pskb, unsigned int hooknum, const struct net_device *in,
- const struct net_device *out, const void *targinfo, void *userinfo)
+target(struct sk_buff **pskb, const struct net_device *in,
+ const struct net_device *out, unsigned int hooknum, const void *targinfo,
+ void *userinfo)
{
const struct arpt_mangle *mangle = targinfo;
struct arphdr *arp;
@@ -64,7 +65,7 @@ target(struct sk_buff **pskb, unsigned int hooknum, const struct net_device *in,
}
static int
-checkentry(const char *tablename, const struct arpt_entry *e, void *targinfo,
+checkentry(const char *tablename, const void *e, void *targinfo,
unsigned int targinfosize, unsigned int hook_mask)
{
const struct arpt_mangle *mangle = targinfo;
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 0d759f5a4ef..f6ab45f4868 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -145,6 +145,7 @@ static struct arpt_table packet_filter = {
.lock = RW_LOCK_UNLOCKED,
.private = NULL,
.me = THIS_MODULE,
+ .af = NF_ARP,
};
/* The work comes in here from netfilter.c */
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
index 4108a5e12b3..d716bba798f 100644
--- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
@@ -762,7 +762,7 @@ static struct ip_conntrack_helper pptp = {
.help = conntrack_pptp_help
};
-extern void __exit ip_ct_proto_gre_fini(void);
+extern void ip_ct_proto_gre_fini(void);
extern int __init ip_ct_proto_gre_init(void);
/* ip_conntrack_pptp initialization */
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_generic.c b/net/ipv4/netfilter/ip_conntrack_proto_generic.c
index 88c3712bd25..f891308b5e4 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_generic.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_generic.c
@@ -12,7 +12,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-unsigned long ip_ct_generic_timeout = 600*HZ;
+unsigned int ip_ct_generic_timeout = 600*HZ;
static int generic_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
index 57956dee60c..c777abf16cb 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_gre.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
@@ -309,7 +309,10 @@ int __init ip_ct_proto_gre_init(void)
return ip_conntrack_protocol_register(&gre);
}
-void __exit ip_ct_proto_gre_fini(void)
+/* This cannot be __exit, as it is invoked from ip_conntrack_helper_pptp.c's
+ * init() code on errors.
+ */
+void ip_ct_proto_gre_fini(void)
{
struct list_head *pos, *n;
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
index 30fc21d6165..3021af0910f 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
@@ -16,13 +16,12 @@
#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/checksum.h>
-#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv4/ip_conntrack.h>
#include <linux/netfilter_ipv4/ip_conntrack_core.h>
#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-unsigned long ip_ct_icmp_timeout = 30*HZ;
+unsigned int ip_ct_icmp_timeout = 30*HZ;
#if 0
#define DEBUGP printk
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
index 977fb59d456..be602e8aeab 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/timer.h>
+#include <linux/interrupt.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/in.h>
@@ -57,15 +58,15 @@ static const char *sctp_conntrack_names[] = {
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
-static unsigned long ip_ct_sctp_timeout_closed = 10 SECS;
-static unsigned long ip_ct_sctp_timeout_cookie_wait = 3 SECS;
-static unsigned long ip_ct_sctp_timeout_cookie_echoed = 3 SECS;
-static unsigned long ip_ct_sctp_timeout_established = 5 DAYS;
-static unsigned long ip_ct_sctp_timeout_shutdown_sent = 300 SECS / 1000;
-static unsigned long ip_ct_sctp_timeout_shutdown_recd = 300 SECS / 1000;
-static unsigned long ip_ct_sctp_timeout_shutdown_ack_sent = 3 SECS;
+static unsigned int ip_ct_sctp_timeout_closed = 10 SECS;
+static unsigned int ip_ct_sctp_timeout_cookie_wait = 3 SECS;
+static unsigned int ip_ct_sctp_timeout_cookie_echoed = 3 SECS;
+static unsigned int ip_ct_sctp_timeout_established = 5 DAYS;
+static unsigned int ip_ct_sctp_timeout_shutdown_sent = 300 SECS / 1000;
+static unsigned int ip_ct_sctp_timeout_shutdown_recd = 300 SECS / 1000;
+static unsigned int ip_ct_sctp_timeout_shutdown_ack_sent = 3 SECS;
-static const unsigned long * sctp_timeouts[]
+static const unsigned int * sctp_timeouts[]
= { NULL, /* SCTP_CONNTRACK_NONE */
&ip_ct_sctp_timeout_closed, /* SCTP_CONNTRACK_CLOSED */
&ip_ct_sctp_timeout_cookie_wait, /* SCTP_CONNTRACK_COOKIE_WAIT */
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index e7fa29e576d..e0dc3706354 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -32,7 +32,6 @@
#include <net/tcp.h>
-#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv4/ip_conntrack.h>
#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
@@ -85,21 +84,21 @@ static const char *tcp_conntrack_names[] = {
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
-unsigned long ip_ct_tcp_timeout_syn_sent = 2 MINS;
-unsigned long ip_ct_tcp_timeout_syn_recv = 60 SECS;
-unsigned long ip_ct_tcp_timeout_established = 5 DAYS;
-unsigned long ip_ct_tcp_timeout_fin_wait = 2 MINS;
-unsigned long ip_ct_tcp_timeout_close_wait = 60 SECS;
-unsigned long ip_ct_tcp_timeout_last_ack = 30 SECS;
-unsigned long ip_ct_tcp_timeout_time_wait = 2 MINS;
-unsigned long ip_ct_tcp_timeout_close = 10 SECS;
+unsigned int ip_ct_tcp_timeout_syn_sent = 2 MINS;
+unsigned int ip_ct_tcp_timeout_syn_recv = 60 SECS;
+unsigned int ip_ct_tcp_timeout_established = 5 DAYS;
+unsigned int ip_ct_tcp_timeout_fin_wait = 2 MINS;
+unsigned int ip_ct_tcp_timeout_close_wait = 60 SECS;
+unsigned int ip_ct_tcp_timeout_last_ack = 30 SECS;
+unsigned int ip_ct_tcp_timeout_time_wait = 2 MINS;
+unsigned int ip_ct_tcp_timeout_close = 10 SECS;
/* RFC1122 says the R2 limit should be at least 100 seconds.
Linux uses 15 packets as limit, which corresponds
to ~13-30min depending on RTO. */
-unsigned long ip_ct_tcp_timeout_max_retrans = 5 MINS;
+unsigned int ip_ct_tcp_timeout_max_retrans = 5 MINS;
-static const unsigned long * tcp_timeouts[]
+static const unsigned int * tcp_timeouts[]
= { NULL, /* TCP_CONNTRACK_NONE */
&ip_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */
&ip_ct_tcp_timeout_syn_recv, /* TCP_CONNTRACK_SYN_RECV, */
@@ -995,7 +994,7 @@ static int tcp_packet(struct ip_conntrack *conntrack,
|| (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
&& conntrack->proto.tcp.last_index == TCP_ACK_SET))
&& ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) {
- /* RST sent to invalid SYN or ACK we had let trough
+ /* RST sent to invalid SYN or ACK we had let through
* at a) and c) above:
*
* a) SYN was in window then
@@ -1006,7 +1005,7 @@ static int tcp_packet(struct ip_conntrack *conntrack,
* segments we ignored. */
goto in_window;
}
- /* Just fall trough */
+ /* Just fall through */
default:
/* Keep compilers happy. */
break;
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_udp.c b/net/ipv4/netfilter/ip_conntrack_proto_udp.c
index 46becbe4fe5..55b7d3210ad 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_udp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_udp.c
@@ -15,12 +15,11 @@
#include <linux/udp.h>
#include <linux/seq_file.h>
#include <net/checksum.h>
-#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
-unsigned long ip_ct_udp_timeout = 30*HZ;
-unsigned long ip_ct_udp_timeout_stream = 180*HZ;
+unsigned int ip_ct_udp_timeout = 30*HZ;
+unsigned int ip_ct_udp_timeout_stream = 180*HZ;
static int udp_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index 7ba97783e74..833fcb4be5e 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -544,28 +544,28 @@ extern int ip_conntrack_max;
extern unsigned int ip_conntrack_htable_size;
/* From ip_conntrack_proto_tcp.c */
-extern unsigned long ip_ct_tcp_timeout_syn_sent;
-extern unsigned long ip_ct_tcp_timeout_syn_recv;
-extern unsigned long ip_ct_tcp_timeout_established;
-extern unsigned long ip_ct_tcp_timeout_fin_wait;
-extern unsigned long ip_ct_tcp_timeout_close_wait;
-extern unsigned long ip_ct_tcp_timeout_last_ack;
-extern unsigned long ip_ct_tcp_timeout_time_wait;
-extern unsigned long ip_ct_tcp_timeout_close;
-extern unsigned long ip_ct_tcp_timeout_max_retrans;
+extern unsigned int ip_ct_tcp_timeout_syn_sent;
+extern unsigned int ip_ct_tcp_timeout_syn_recv;
+extern unsigned int ip_ct_tcp_timeout_established;
+extern unsigned int ip_ct_tcp_timeout_fin_wait;
+extern unsigned int ip_ct_tcp_timeout_close_wait;
+extern unsigned int ip_ct_tcp_timeout_last_ack;
+extern unsigned int ip_ct_tcp_timeout_time_wait;
+extern unsigned int ip_ct_tcp_timeout_close;
+extern unsigned int ip_ct_tcp_timeout_max_retrans;
extern int ip_ct_tcp_loose;
extern int ip_ct_tcp_be_liberal;
extern int ip_ct_tcp_max_retrans;
/* From ip_conntrack_proto_udp.c */
-extern unsigned long ip_ct_udp_timeout;
-extern unsigned long ip_ct_udp_timeout_stream;
+extern unsigned int ip_ct_udp_timeout;
+extern unsigned int ip_ct_udp_timeout_stream;
/* From ip_conntrack_proto_icmp.c */
-extern unsigned long ip_ct_icmp_timeout;
+extern unsigned int ip_ct_icmp_timeout;
/* From ip_conntrack_proto_icmp.c */
-extern unsigned long ip_ct_generic_timeout;
+extern unsigned int ip_ct_generic_timeout;
/* Log invalid packets of a given protocol */
static int log_invalid_proto_min = 0;
@@ -944,7 +944,7 @@ module_exit(fini);
/* Some modules need us, but don't depend directly on any symbol.
They should call this. */
-void need_ip_conntrack(void)
+void need_conntrack(void)
{
}
@@ -962,7 +962,7 @@ EXPORT_SYMBOL(ip_ct_get_tuple);
EXPORT_SYMBOL(invert_tuplepr);
EXPORT_SYMBOL(ip_conntrack_alter_reply);
EXPORT_SYMBOL(ip_conntrack_destroyed);
-EXPORT_SYMBOL(need_ip_conntrack);
+EXPORT_SYMBOL(need_conntrack);
EXPORT_SYMBOL(ip_conntrack_helper_register);
EXPORT_SYMBOL(ip_conntrack_helper_unregister);
EXPORT_SYMBOL(ip_ct_iterate_cleanup);
diff --git a/net/ipv4/netfilter/ip_nat_ftp.c b/net/ipv4/netfilter/ip_nat_ftp.c
index d83757a70d9..b8daab3c64a 100644
--- a/net/ipv4/netfilter/ip_nat_ftp.c
+++ b/net/ipv4/netfilter/ip_nat_ftp.c
@@ -171,7 +171,7 @@ static int __init init(void)
/* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */
static int warn_set(const char *val, struct kernel_param *kp)
{
- printk(KERN_INFO __stringify(KBUILD_MODNAME)
+ printk(KERN_INFO KBUILD_MODNAME
": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
return 0;
}
diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c
index e546203f566..ac004895781 100644
--- a/net/ipv4/netfilter/ip_nat_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_nat_helper_pptp.c
@@ -148,14 +148,14 @@ pptp_outbound_pkt(struct sk_buff **pskb,
{
struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info;
struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
-
- u_int16_t msg, *cid = NULL, new_callid;
+ u_int16_t msg, new_callid;
+ unsigned int cid_off;
new_callid = htons(ct_pptp_info->pns_call_id);
switch (msg = ntohs(ctlh->messageType)) {
case PPTP_OUT_CALL_REQUEST:
- cid = &pptpReq->ocreq.callID;
+ cid_off = offsetof(union pptp_ctrl_union, ocreq.callID);
/* FIXME: ideally we would want to reserve a call ID
* here. current netfilter NAT core is not able to do
* this :( For now we use TCP source port. This breaks
@@ -172,10 +172,10 @@ pptp_outbound_pkt(struct sk_buff **pskb,
ct_pptp_info->pns_call_id = ntohs(new_callid);
break;
case PPTP_IN_CALL_REPLY:
- cid = &pptpReq->icreq.callID;
+ cid_off = offsetof(union pptp_ctrl_union, icreq.callID);
break;
case PPTP_CALL_CLEAR_REQUEST:
- cid = &pptpReq->clrreq.callID;
+ cid_off = offsetof(union pptp_ctrl_union, clrreq.callID);
break;
default:
DEBUGP("unknown outbound packet 0x%04x:%s\n", msg,
@@ -197,18 +197,15 @@ pptp_outbound_pkt(struct sk_buff **pskb,
/* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass
* down to here */
-
- IP_NF_ASSERT(cid);
-
DEBUGP("altering call id from 0x%04x to 0x%04x\n",
- ntohs(*cid), ntohs(new_callid));
+ ntohs(*(u_int16_t *)pptpReq + cid_off), ntohs(new_callid));
/* mangle packet */
if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
- (void *)cid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
- sizeof(new_callid),
- (char *)&new_callid,
- sizeof(new_callid)) == 0)
+ cid_off + sizeof(struct pptp_pkt_hdr) +
+ sizeof(struct PptpControlHeader),
+ sizeof(new_callid), (char *)&new_callid,
+ sizeof(new_callid)) == 0)
return NF_DROP;
return NF_ACCEPT;
@@ -299,31 +296,30 @@ pptp_inbound_pkt(struct sk_buff **pskb,
union pptp_ctrl_union *pptpReq)
{
struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
- u_int16_t msg, new_cid = 0, new_pcid, *pcid = NULL, *cid = NULL;
-
- int ret = NF_ACCEPT, rv;
+ u_int16_t msg, new_cid = 0, new_pcid;
+ unsigned int pcid_off, cid_off = 0;
new_pcid = htons(nat_pptp_info->pns_call_id);
switch (msg = ntohs(ctlh->messageType)) {
case PPTP_OUT_CALL_REPLY:
- pcid = &pptpReq->ocack.peersCallID;
- cid = &pptpReq->ocack.callID;
+ pcid_off = offsetof(union pptp_ctrl_union, ocack.peersCallID);
+ cid_off = offsetof(union pptp_ctrl_union, ocack.callID);
break;
case PPTP_IN_CALL_CONNECT:
- pcid = &pptpReq->iccon.peersCallID;
+ pcid_off = offsetof(union pptp_ctrl_union, iccon.peersCallID);
break;
case PPTP_IN_CALL_REQUEST:
/* only need to nat in case PAC is behind NAT box */
- break;
+ return NF_ACCEPT;
case PPTP_WAN_ERROR_NOTIFY:
- pcid = &pptpReq->wanerr.peersCallID;
+ pcid_off = offsetof(union pptp_ctrl_union, wanerr.peersCallID);
break;
case PPTP_CALL_DISCONNECT_NOTIFY:
- pcid = &pptpReq->disc.callID;
+ pcid_off = offsetof(union pptp_ctrl_union, disc.callID);
break;
case PPTP_SET_LINK_INFO:
- pcid = &pptpReq->setlink.peersCallID;
+ pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
break;
default:
@@ -345,35 +341,26 @@ pptp_inbound_pkt(struct sk_buff **pskb,
* WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */
/* mangle packet */
- IP_NF_ASSERT(pcid);
DEBUGP("altering peer call id from 0x%04x to 0x%04x\n",
- ntohs(*pcid), ntohs(new_pcid));
-
- rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
- (void *)pcid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
- sizeof(new_pcid), (char *)&new_pcid,
- sizeof(new_pcid));
- if (rv != NF_ACCEPT)
- return rv;
+ ntohs(*(u_int16_t *)pptpReq + pcid_off), ntohs(new_pcid));
+
+ if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
+ pcid_off + sizeof(struct pptp_pkt_hdr) +
+ sizeof(struct PptpControlHeader),
+ sizeof(new_pcid), (char *)&new_pcid,
+ sizeof(new_pcid)) == 0)
+ return NF_DROP;
if (new_cid) {
- IP_NF_ASSERT(cid);
DEBUGP("altering call id from 0x%04x to 0x%04x\n",
- ntohs(*cid), ntohs(new_cid));
- rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
- (void *)cid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
- sizeof(new_cid),
- (char *)&new_cid,
- sizeof(new_cid));
- if (rv != NF_ACCEPT)
- return rv;
+ ntohs(*(u_int16_t *)pptpReq + cid_off), ntohs(new_cid));
+ if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
+ cid_off + sizeof(struct pptp_pkt_hdr) +
+ sizeof(struct PptpControlHeader),
+ sizeof(new_cid), (char *)&new_cid,
+ sizeof(new_cid)) == 0)
+ return NF_DROP;
}
-
- /* check for earlier return value of 'switch' above */
- if (ret != NF_ACCEPT)
- return ret;
-
- /* great, at least we don't need to resize packets */
return NF_ACCEPT;
}
diff --git a/net/ipv4/netfilter/ip_nat_irc.c b/net/ipv4/netfilter/ip_nat_irc.c
index de31942babe..461c833eaca 100644
--- a/net/ipv4/netfilter/ip_nat_irc.c
+++ b/net/ipv4/netfilter/ip_nat_irc.c
@@ -113,7 +113,7 @@ static int __init init(void)
/* Prior to 2.6.11, we had a ports param. No longer, but don't break users. */
static int warn_set(const char *val, struct kernel_param *kp)
{
- printk(KERN_INFO __stringify(KBUILD_MODNAME)
+ printk(KERN_INFO KBUILD_MODNAME
": kernel >= 2.6.10 only uses 'ports' for conntrack modules\n");
return 0;
}
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
index f7cad7cf1ae..6c4899d8046 100644
--- a/net/ipv4/netfilter/ip_nat_proto_gre.c
+++ b/net/ipv4/netfilter/ip_nat_proto_gre.c
@@ -151,42 +151,6 @@ gre_manip_pkt(struct sk_buff **pskb,
return 1;
}
-/* print out a nat tuple */
-static unsigned int
-gre_print(char *buffer,
- const struct ip_conntrack_tuple *match,
- const struct ip_conntrack_tuple *mask)
-{
- unsigned int len = 0;
-
- if (mask->src.u.gre.key)
- len += sprintf(buffer + len, "srckey=0x%x ",
- ntohl(match->src.u.gre.key));
-
- if (mask->dst.u.gre.key)
- len += sprintf(buffer + len, "dstkey=0x%x ",
- ntohl(match->src.u.gre.key));
-
- return len;
-}
-
-/* print a range of keys */
-static unsigned int
-gre_print_range(char *buffer, const struct ip_nat_range *range)
-{
- if (range->min.gre.key != 0
- || range->max.gre.key != 0xFFFF) {
- if (range->min.gre.key == range->max.gre.key)
- return sprintf(buffer, "key 0x%x ",
- ntohl(range->min.gre.key));
- else
- return sprintf(buffer, "keys 0x%u-0x%u ",
- ntohl(range->min.gre.key),
- ntohl(range->max.gre.key));
- } else
- return 0;
-}
-
/* nat helper struct */
static struct ip_nat_protocol gre = {
.name = "GRE",
@@ -194,8 +158,6 @@ static struct ip_nat_protocol gre = {
.manip_pkt = gre_manip_pkt,
.in_range = gre_in_range,
.unique_tuple = gre_unique_tuple,
- .print = gre_print,
- .print_range = gre_print_range,
#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
.range_to_nfattr = ip_nat_port_range_to_nfattr,
diff --git a/net/ipv4/netfilter/ip_nat_proto_icmp.c b/net/ipv4/netfilter/ip_nat_proto_icmp.c
index 93871904399..31a3f4ccb99 100644
--- a/net/ipv4/netfilter/ip_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_icmp.c
@@ -74,38 +74,6 @@ icmp_manip_pkt(struct sk_buff **pskb,
return 1;
}
-static unsigned int
-icmp_print(char *buffer,
- const struct ip_conntrack_tuple *match,
- const struct ip_conntrack_tuple *mask)
-{
- unsigned int len = 0;
-
- if (mask->src.u.icmp.id)
- len += sprintf(buffer + len, "id=%u ",
- ntohs(match->src.u.icmp.id));
-
- if (mask->dst.u.icmp.type)
- len += sprintf(buffer + len, "type=%u ",
- ntohs(match->dst.u.icmp.type));
-
- if (mask->dst.u.icmp.code)
- len += sprintf(buffer + len, "code=%u ",
- ntohs(match->dst.u.icmp.code));
-
- return len;
-}
-
-static unsigned int
-icmp_print_range(char *buffer, const struct ip_nat_range *range)
-{
- if (range->min.icmp.id != 0 || range->max.icmp.id != 0xFFFF)
- return sprintf(buffer, "id %u-%u ",
- ntohs(range->min.icmp.id),
- ntohs(range->max.icmp.id));
- else return 0;
-}
-
struct ip_nat_protocol ip_nat_protocol_icmp = {
.name = "ICMP",
.protonum = IPPROTO_ICMP,
@@ -113,8 +81,6 @@ struct ip_nat_protocol ip_nat_protocol_icmp = {
.manip_pkt = icmp_manip_pkt,
.in_range = icmp_in_range,
.unique_tuple = icmp_unique_tuple,
- .print = icmp_print,
- .print_range = icmp_print_range,
#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
.range_to_nfattr = ip_nat_port_range_to_nfattr,
diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
index 1d381bf6857..a3d14079eba 100644
--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
@@ -136,40 +136,6 @@ tcp_manip_pkt(struct sk_buff **pskb,
return 1;
}
-static unsigned int
-tcp_print(char *buffer,
- const struct ip_conntrack_tuple *match,
- const struct ip_conntrack_tuple *mask)
-{
- unsigned int len = 0;
-
- if (mask->src.u.tcp.port)
- len += sprintf(buffer + len, "srcpt=%u ",
- ntohs(match->src.u.tcp.port));
-
-
- if (mask->dst.u.tcp.port)
- len += sprintf(buffer + len, "dstpt=%u ",
- ntohs(match->dst.u.tcp.port));
-
- return len;
-}
-
-static unsigned int
-tcp_print_range(char *buffer, const struct ip_nat_range *range)
-{
- if (range->min.tcp.port != 0 || range->max.tcp.port != 0xFFFF) {
- if (range->min.tcp.port == range->max.tcp.port)
- return sprintf(buffer, "port %u ",
- ntohs(range->min.tcp.port));
- else
- return sprintf(buffer, "ports %u-%u ",
- ntohs(range->min.tcp.port),
- ntohs(range->max.tcp.port));
- }
- else return 0;
-}
-
struct ip_nat_protocol ip_nat_protocol_tcp = {
.name = "TCP",
.protonum = IPPROTO_TCP,
@@ -177,8 +143,6 @@ struct ip_nat_protocol ip_nat_protocol_tcp = {
.manip_pkt = tcp_manip_pkt,
.in_range = tcp_in_range,
.unique_tuple = tcp_unique_tuple,
- .print = tcp_print,
- .print_range = tcp_print_range,
#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
.range_to_nfattr = ip_nat_port_range_to_nfattr,
diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
index c4906e1aa24..ec6053fdc86 100644
--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
@@ -122,40 +122,6 @@ udp_manip_pkt(struct sk_buff **pskb,
return 1;
}
-static unsigned int
-udp_print(char *buffer,
- const struct ip_conntrack_tuple *match,
- const struct ip_conntrack_tuple *mask)
-{
- unsigned int len = 0;
-
- if (mask->src.u.udp.port)
- len += sprintf(buffer + len, "srcpt=%u ",
- ntohs(match->src.u.udp.port));
-
-
- if (mask->dst.u.udp.port)
- len += sprintf(buffer + len, "dstpt=%u ",
- ntohs(match->dst.u.udp.port));
-
- return len;
-}
-
-static unsigned int
-udp_print_range(char *buffer, const struct ip_nat_range *range)
-{
- if (range->min.udp.port != 0 || range->max.udp.port != 0xFFFF) {
- if (range->min.udp.port == range->max.udp.port)
- return sprintf(buffer, "port %u ",
- ntohs(range->min.udp.port));
- else
- return sprintf(buffer, "ports %u-%u ",
- ntohs(range->min.udp.port),
- ntohs(range->max.udp.port));
- }
- else return 0;
-}
-
struct ip_nat_protocol ip_nat_protocol_udp = {
.name = "UDP",
.protonum = IPPROTO_UDP,
@@ -163,8 +129,6 @@ struct ip_nat_protocol ip_nat_protocol_udp = {
.manip_pkt = udp_manip_pkt,
.in_range = udp_in_range,
.unique_tuple = udp_unique_tuple,
- .print = udp_print,
- .print_range = udp_print_range,
#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
.range_to_nfattr = ip_nat_port_range_to_nfattr,
diff --git a/net/ipv4/netfilter/ip_nat_proto_unknown.c b/net/ipv4/netfilter/ip_nat_proto_unknown.c
index f0099a646a0..3bf04951724 100644
--- a/net/ipv4/netfilter/ip_nat_proto_unknown.c
+++ b/net/ipv4/netfilter/ip_nat_proto_unknown.c
@@ -46,26 +46,10 @@ unknown_manip_pkt(struct sk_buff **pskb,
return 1;
}
-static unsigned int
-unknown_print(char *buffer,
- const struct ip_conntrack_tuple *match,
- const struct ip_conntrack_tuple *mask)
-{
- return 0;
-}
-
-static unsigned int
-unknown_print_range(char *buffer, const struct ip_nat_range *range)
-{
- return 0;
-}
-
struct ip_nat_protocol ip_nat_unknown_protocol = {
.name = "unknown",
/* .me isn't set: getting a ref to this cannot fail. */
.manip_pkt = unknown_manip_pkt,
.in_range = unknown_in_range,
.unique_tuple = unknown_unique_tuple,
- .print = unknown_print,
- .print_range = unknown_print_range
};
diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c
index cb66b8bddeb..1de86282d23 100644
--- a/net/ipv4/netfilter/ip_nat_rule.c
+++ b/net/ipv4/netfilter/ip_nat_rule.c
@@ -95,6 +95,7 @@ static struct ipt_table nat_table = {
.valid_hooks = NAT_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.me = THIS_MODULE,
+ .af = AF_INET,
};
/* Source NAT */
@@ -168,7 +169,7 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb,
}
static int ipt_snat_checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *entry,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
@@ -201,7 +202,7 @@ static int ipt_snat_checkentry(const char *tablename,
}
static int ipt_dnat_checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *entry,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index f04111f74e0..ad438fb185b 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -55,6 +55,44 @@
: ((hooknum) == NF_IP_LOCAL_IN ? "LOCAL_IN" \
: "*ERROR*")))
+#ifdef CONFIG_XFRM
+static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
+{
+ struct ip_conntrack *ct;
+ struct ip_conntrack_tuple *t;
+ enum ip_conntrack_info ctinfo;
+ enum ip_conntrack_dir dir;
+ unsigned long statusbit;
+
+ ct = ip_conntrack_get(skb, &ctinfo);
+ if (ct == NULL)
+ return;
+ dir = CTINFO2DIR(ctinfo);
+ t = &ct->tuplehash[dir].tuple;
+
+ if (dir == IP_CT_DIR_ORIGINAL)
+ statusbit = IPS_DST_NAT;
+ else
+ statusbit = IPS_SRC_NAT;
+
+ if (ct->status & statusbit) {
+ fl->fl4_dst = t->dst.ip;
+ if (t->dst.protonum == IPPROTO_TCP ||
+ t->dst.protonum == IPPROTO_UDP)
+ fl->fl_ip_dport = t->dst.u.tcp.port;
+ }
+
+ statusbit ^= IPS_NAT_MASK;
+
+ if (ct->status & statusbit) {
+ fl->fl4_src = t->src.ip;
+ if (t->dst.protonum == IPPROTO_TCP ||
+ t->dst.protonum == IPPROTO_UDP)
+ fl->fl_ip_sport = t->src.u.tcp.port;
+ }
+}
+#endif
+
static unsigned int
ip_nat_fn(unsigned int hooknum,
struct sk_buff **pskb,
@@ -162,18 +200,20 @@ ip_nat_in(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- u_int32_t saddr, daddr;
+ struct ip_conntrack *ct;
+ enum ip_conntrack_info ctinfo;
unsigned int ret;
- saddr = (*pskb)->nh.iph->saddr;
- daddr = (*pskb)->nh.iph->daddr;
-
ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
if (ret != NF_DROP && ret != NF_STOLEN
- && ((*pskb)->nh.iph->saddr != saddr
- || (*pskb)->nh.iph->daddr != daddr)) {
- dst_release((*pskb)->dst);
- (*pskb)->dst = NULL;
+ && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (ct->tuplehash[dir].tuple.src.ip !=
+ ct->tuplehash[!dir].tuple.dst.ip) {
+ dst_release((*pskb)->dst);
+ (*pskb)->dst = NULL;
+ }
}
return ret;
}
@@ -185,12 +225,30 @@ ip_nat_out(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
+ struct ip_conntrack *ct;
+ enum ip_conntrack_info ctinfo;
+ unsigned int ret;
+
/* root is playing with raw sockets. */
if ((*pskb)->len < sizeof(struct iphdr)
|| (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr))
return NF_ACCEPT;
- return ip_nat_fn(hooknum, pskb, in, out, okfn);
+ ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
+ if (ret != NF_DROP && ret != NF_STOLEN
+ && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (ct->tuplehash[dir].tuple.src.ip !=
+ ct->tuplehash[!dir].tuple.dst.ip
+#ifdef CONFIG_XFRM
+ || ct->tuplehash[dir].tuple.src.u.all !=
+ ct->tuplehash[!dir].tuple.dst.u.all
+#endif
+ )
+ return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP;
+ }
+ return ret;
}
static unsigned int
@@ -200,7 +258,8 @@ ip_nat_local_fn(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
- u_int32_t saddr, daddr;
+ struct ip_conntrack *ct;
+ enum ip_conntrack_info ctinfo;
unsigned int ret;
/* root is playing with raw sockets. */
@@ -208,14 +267,20 @@ ip_nat_local_fn(unsigned int hooknum,
|| (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr))
return NF_ACCEPT;
- saddr = (*pskb)->nh.iph->saddr;
- daddr = (*pskb)->nh.iph->daddr;
-
ret = ip_nat_fn(hooknum, pskb, in, out, okfn);
if (ret != NF_DROP && ret != NF_STOLEN
- && ((*pskb)->nh.iph->saddr != saddr
- || (*pskb)->nh.iph->daddr != daddr))
- return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP;
+ && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (ct->tuplehash[dir].tuple.dst.ip !=
+ ct->tuplehash[!dir].tuple.src.ip
+#ifdef CONFIG_XFRM
+ || ct->tuplehash[dir].tuple.dst.u.all !=
+ ct->tuplehash[dir].tuple.src.u.all
+#endif
+ )
+ return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP;
+ }
return ret;
}
@@ -299,14 +364,18 @@ static int init_or_cleanup(int init)
{
int ret = 0;
- need_ip_conntrack();
+ need_conntrack();
if (!init) goto cleanup;
+#ifdef CONFIG_XFRM
+ BUG_ON(ip_nat_decode_session != NULL);
+ ip_nat_decode_session = nat_decode_session;
+#endif
ret = ip_nat_rule_init();
if (ret < 0) {
printk("ip_nat_init: can't setup rules.\n");
- goto cleanup_nothing;
+ goto cleanup_decode_session;
}
ret = nf_register_hook(&ip_nat_in_ops);
if (ret < 0) {
@@ -354,7 +423,11 @@ static int init_or_cleanup(int init)
nf_unregister_hook(&ip_nat_in_ops);
cleanup_rule_init:
ip_nat_rule_cleanup();
- cleanup_nothing:
+ cleanup_decode_session:
+#ifdef CONFIG_XFRM
+ ip_nat_decode_session = NULL;
+ synchronize_net();
+#endif
return ret;
}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2a26d167e14..2371b2062c2 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -2,7 +2,7 @@
* Packet matching code.
*
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
- * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org>
+ * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -11,16 +11,17 @@
* 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
* - increase module usage count as soon as we have rules inside
* a table
+ * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
+ * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
*/
#include <linux/config.h>
#include <linux/cache.h>
+#include <linux/capability.h>
#include <linux/skbuff.h>
#include <linux/kmod.h>
#include <linux/vmalloc.h>
#include <linux/netdevice.h>
#include <linux/module.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
#include <linux/icmp.h>
#include <net/ip.h>
#include <asm/uaccess.h>
@@ -29,6 +30,7 @@
#include <linux/err.h>
#include <linux/cpumask.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
MODULE_LICENSE("GPL");
@@ -61,14 +63,6 @@ do { \
#else
#define IP_NF_ASSERT(x)
#endif
-#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
-
-static DECLARE_MUTEX(ipt_mutex);
-
-/* Must have mutex */
-#define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
-#define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
-#include <linux/netfilter_ipv4/listhelp.h>
#if 0
/* All the better to debug you with... */
@@ -85,36 +79,6 @@ static DECLARE_MUTEX(ipt_mutex);
Hence the start of any table is given by get_table() below. */
-/* The table itself */
-struct ipt_table_info
-{
- /* Size per table */
- unsigned int size;
- /* Number of entries: FIXME. --RR */
- unsigned int number;
- /* Initial number of entries. Needed for module usage count */
- unsigned int initial_entries;
-
- /* Entry points and underflows */
- unsigned int hook_entry[NF_IP_NUMHOOKS];
- unsigned int underflow[NF_IP_NUMHOOKS];
-
- /* ipt_entry tables: one per CPU */
- void *entries[NR_CPUS];
-};
-
-static LIST_HEAD(ipt_target);
-static LIST_HEAD(ipt_match);
-static LIST_HEAD(ipt_tables);
-#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
-#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
-
-#if 0
-#define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
-#define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
-#define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
-#endif
-
/* Returns whether matches rule or not. */
static inline int
ip_packet_match(const struct iphdr *ip,
@@ -233,7 +197,8 @@ int do_match(struct ipt_entry_match *m,
int *hotdrop)
{
/* Stop iteration if it doesn't match */
- if (!m->u.kernel.match->match(skb, in, out, m->data, offset, hotdrop))
+ if (!m->u.kernel.match->match(skb, in, out, m->data, offset,
+ skb->nh.iph->ihl*4, hotdrop))
return 1;
else
return 0;
@@ -264,6 +229,7 @@ ipt_do_table(struct sk_buff **pskb,
const char *indev, *outdev;
void *table_base;
struct ipt_entry *e, *back;
+ struct xt_table_info *private = table->private;
/* Initialization */
ip = (*pskb)->nh.iph;
@@ -280,24 +246,11 @@ ipt_do_table(struct sk_buff **pskb,
read_lock_bh(&table->lock);
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
- table_base = (void *)table->private->entries[smp_processor_id()];
- e = get_entry(table_base, table->private->hook_entry[hook]);
-
-#ifdef CONFIG_NETFILTER_DEBUG
- /* Check noone else using our table */
- if (((struct ipt_entry *)table_base)->comefrom != 0xdead57ac
- && ((struct ipt_entry *)table_base)->comefrom != 0xeeeeeeec) {
- printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
- smp_processor_id(),
- table->name,
- &((struct ipt_entry *)table_base)->comefrom,
- ((struct ipt_entry *)table_base)->comefrom);
- }
- ((struct ipt_entry *)table_base)->comefrom = 0x57acc001;
-#endif
+ table_base = (void *)private->entries[smp_processor_id()];
+ e = get_entry(table_base, private->hook_entry[hook]);
/* For return from builtin chain */
- back = get_entry(table_base, table->private->underflow[hook]);
+ back = get_entry(table_base, private->underflow[hook]);
do {
IP_NF_ASSERT(e);
@@ -383,9 +336,6 @@ ipt_do_table(struct sk_buff **pskb,
}
} while (!hotdrop);
-#ifdef CONFIG_NETFILTER_DEBUG
- ((struct ipt_entry *)table_base)->comefrom = 0xdead57ac;
-#endif
read_unlock_bh(&table->lock);
#ifdef DEBUG_ALLOW_ALL
@@ -397,145 +347,6 @@ ipt_do_table(struct sk_buff **pskb,
#endif
}
-/*
- * These are weird, but module loading must not be done with mutex
- * held (since they will register), and we have to have a single
- * function to use try_then_request_module().
- */
-
-/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
-static inline struct ipt_table *find_table_lock(const char *name)
-{
- struct ipt_table *t;
-
- if (down_interruptible(&ipt_mutex) != 0)
- return ERR_PTR(-EINTR);
-
- list_for_each_entry(t, &ipt_tables, list)
- if (strcmp(t->name, name) == 0 && try_module_get(t->me))
- return t;
- up(&ipt_mutex);
- return NULL;
-}
-
-/* Find match, grabs ref. Returns ERR_PTR() on error. */
-static inline struct ipt_match *find_match(const char *name, u8 revision)
-{
- struct ipt_match *m;
- int err = 0;
-
- if (down_interruptible(&ipt_mutex) != 0)
- return ERR_PTR(-EINTR);
-
- list_for_each_entry(m, &ipt_match, list) {
- if (strcmp(m->name, name) == 0) {
- if (m->revision == revision) {
- if (try_module_get(m->me)) {
- up(&ipt_mutex);
- return m;
- }
- } else
- err = -EPROTOTYPE; /* Found something. */
- }
- }
- up(&ipt_mutex);
- return ERR_PTR(err);
-}
-
-/* Find target, grabs ref. Returns ERR_PTR() on error. */
-static inline struct ipt_target *find_target(const char *name, u8 revision)
-{
- struct ipt_target *t;
- int err = 0;
-
- if (down_interruptible(&ipt_mutex) != 0)
- return ERR_PTR(-EINTR);
-
- list_for_each_entry(t, &ipt_target, list) {
- if (strcmp(t->name, name) == 0) {
- if (t->revision == revision) {
- if (try_module_get(t->me)) {
- up(&ipt_mutex);
- return t;
- }
- } else
- err = -EPROTOTYPE; /* Found something. */
- }
- }
- up(&ipt_mutex);
- return ERR_PTR(err);
-}
-
-struct ipt_target *ipt_find_target(const char *name, u8 revision)
-{
- struct ipt_target *target;
-
- target = try_then_request_module(find_target(name, revision),
- "ipt_%s", name);
- if (IS_ERR(target) || !target)
- return NULL;
- return target;
-}
-
-static int match_revfn(const char *name, u8 revision, int *bestp)
-{
- struct ipt_match *m;
- int have_rev = 0;
-
- list_for_each_entry(m, &ipt_match, list) {
- if (strcmp(m->name, name) == 0) {
- if (m->revision > *bestp)
- *bestp = m->revision;
- if (m->revision == revision)
- have_rev = 1;
- }
- }
- return have_rev;
-}
-
-static int target_revfn(const char *name, u8 revision, int *bestp)
-{
- struct ipt_target *t;
- int have_rev = 0;
-
- list_for_each_entry(t, &ipt_target, list) {
- if (strcmp(t->name, name) == 0) {
- if (t->revision > *bestp)
- *bestp = t->revision;
- if (t->revision == revision)
- have_rev = 1;
- }
- }
- return have_rev;
-}
-
-/* Returns true or false (if no such extension at all) */
-static inline int find_revision(const char *name, u8 revision,
- int (*revfn)(const char *, u8, int *),
- int *err)
-{
- int have_rev, best = -1;
-
- if (down_interruptible(&ipt_mutex) != 0) {
- *err = -EINTR;
- return 1;
- }
- have_rev = revfn(name, revision, &best);
- up(&ipt_mutex);
-
- /* Nothing at all? Return 0 to try loading module. */
- if (best == -1) {
- *err = -ENOENT;
- return 0;
- }
-
- *err = best;
- if (!have_rev)
- *err = -EPROTONOSUPPORT;
- return 1;
-}
-
-
/* All zeroes == unconditional rule. */
static inline int
unconditional(const struct ipt_ip *ip)
@@ -552,7 +363,7 @@ unconditional(const struct ipt_ip *ip)
/* Figures out from what hook each rule can be called: returns 0 if
there are loops. Puts hook bitmask in comefrom. */
static int
-mark_source_chains(struct ipt_table_info *newinfo,
+mark_source_chains(struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
unsigned int hook;
@@ -698,7 +509,7 @@ check_match(struct ipt_entry_match *m,
{
struct ipt_match *match;
- match = try_then_request_module(find_match(m->u.user.name,
+ match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
m->u.user.revision),
"ipt_%s", m->u.user.name);
if (IS_ERR(match) || !match) {
@@ -743,7 +554,8 @@ check_entry(struct ipt_entry *e, const char *name, unsigned int size,
goto cleanup_matches;
t = ipt_get_target(e);
- target = try_then_request_module(find_target(t->u.user.name,
+ target = try_then_request_module(xt_find_target(AF_INET,
+ t->u.user.name,
t->u.user.revision),
"ipt_%s", t->u.user.name);
if (IS_ERR(target) || !target) {
@@ -780,7 +592,7 @@ check_entry(struct ipt_entry *e, const char *name, unsigned int size,
static inline int
check_entry_size_and_hooks(struct ipt_entry *e,
- struct ipt_table_info *newinfo,
+ struct xt_table_info *newinfo,
unsigned char *base,
unsigned char *limit,
const unsigned int *hook_entries,
@@ -814,7 +626,7 @@ check_entry_size_and_hooks(struct ipt_entry *e,
< 0 (not IPT_RETURN). --RR */
/* Clear counters and comefrom */
- e->counters = ((struct ipt_counters) { 0, 0 });
+ e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
(*i)++;
@@ -844,7 +656,7 @@ cleanup_entry(struct ipt_entry *e, unsigned int *i)
static int
translate_table(const char *name,
unsigned int valid_hooks,
- struct ipt_table_info *newinfo,
+ struct xt_table_info *newinfo,
void *entry0,
unsigned int size,
unsigned int number,
@@ -921,48 +733,10 @@ translate_table(const char *name,
return ret;
}
-static struct ipt_table_info *
-replace_table(struct ipt_table *table,
- unsigned int num_counters,
- struct ipt_table_info *newinfo,
- int *error)
-{
- struct ipt_table_info *oldinfo;
-
-#ifdef CONFIG_NETFILTER_DEBUG
- {
- int cpu;
-
- for_each_cpu(cpu) {
- struct ipt_entry *table_base = newinfo->entries[cpu];
- if (table_base)
- table_base->comefrom = 0xdead57ac;
- }
- }
-#endif
-
- /* Do the substitution. */
- write_lock_bh(&table->lock);
- /* Check inside lock: is the old number correct? */
- if (num_counters != table->private->number) {
- duprintf("num_counters != table->private->number (%u/%u)\n",
- num_counters, table->private->number);
- write_unlock_bh(&table->lock);
- *error = -EAGAIN;
- return NULL;
- }
- oldinfo = table->private;
- table->private = newinfo;
- newinfo->initial_entries = oldinfo->initial_entries;
- write_unlock_bh(&table->lock);
-
- return oldinfo;
-}
-
/* Gets counters. */
static inline int
add_entry_to_counter(const struct ipt_entry *e,
- struct ipt_counters total[],
+ struct xt_counters total[],
unsigned int *i)
{
ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
@@ -983,8 +757,8 @@ set_entry_to_counter(const struct ipt_entry *e,
}
static void
-get_counters(const struct ipt_table_info *t,
- struct ipt_counters counters[])
+get_counters(const struct xt_table_info *t,
+ struct xt_counters counters[])
{
unsigned int cpu;
unsigned int i;
@@ -1023,14 +797,15 @@ copy_entries_to_user(unsigned int total_size,
{
unsigned int off, num, countersize;
struct ipt_entry *e;
- struct ipt_counters *counters;
+ struct xt_counters *counters;
+ struct xt_table_info *private = table->private;
int ret = 0;
void *loc_cpu_entry;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
about). */
- countersize = sizeof(struct ipt_counters) * table->private->number;
+ countersize = sizeof(struct xt_counters) * private->number;
counters = vmalloc_node(countersize, numa_node_id());
if (counters == NULL)
@@ -1038,14 +813,14 @@ copy_entries_to_user(unsigned int total_size,
/* First, sum counters... */
write_lock_bh(&table->lock);
- get_counters(table->private, counters);
+ get_counters(private, counters);
write_unlock_bh(&table->lock);
/* choose the copy that is on our node/cpu, ...
* This choice is lazy (because current thread is
* allowed to migrate to another cpu)
*/
- loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
+ loc_cpu_entry = private->entries[raw_smp_processor_id()];
/* ... then copy entire thing ... */
if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
ret = -EFAULT;
@@ -1107,74 +882,36 @@ get_entries(const struct ipt_get_entries *entries,
int ret;
struct ipt_table *t;
- t = find_table_lock(entries->name);
+ t = xt_find_table_lock(AF_INET, entries->name);
if (t && !IS_ERR(t)) {
+ struct xt_table_info *private = t->private;
duprintf("t->private->number = %u\n",
- t->private->number);
- if (entries->size == t->private->size)
- ret = copy_entries_to_user(t->private->size,
+ private->number);
+ if (entries->size == private->size)
+ ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
else {
duprintf("get_entries: I've got %u not %u!\n",
- t->private->size,
+ private->size,
entries->size);
ret = -EINVAL;
}
module_put(t->me);
- up(&ipt_mutex);
+ xt_table_unlock(t);
} else
ret = t ? PTR_ERR(t) : -ENOENT;
return ret;
}
-static void free_table_info(struct ipt_table_info *info)
-{
- int cpu;
- for_each_cpu(cpu) {
- if (info->size <= PAGE_SIZE)
- kfree(info->entries[cpu]);
- else
- vfree(info->entries[cpu]);
- }
- kfree(info);
-}
-
-static struct ipt_table_info *alloc_table_info(unsigned int size)
-{
- struct ipt_table_info *newinfo;
- int cpu;
-
- newinfo = kzalloc(sizeof(struct ipt_table_info), GFP_KERNEL);
- if (!newinfo)
- return NULL;
-
- newinfo->size = size;
-
- for_each_cpu(cpu) {
- if (size <= PAGE_SIZE)
- newinfo->entries[cpu] = kmalloc_node(size,
- GFP_KERNEL,
- cpu_to_node(cpu));
- else
- newinfo->entries[cpu] = vmalloc_node(size, cpu_to_node(cpu));
- if (newinfo->entries[cpu] == 0) {
- free_table_info(newinfo);
- return NULL;
- }
- }
-
- return newinfo;
-}
-
static int
do_replace(void __user *user, unsigned int len)
{
int ret;
struct ipt_replace tmp;
struct ipt_table *t;
- struct ipt_table_info *newinfo, *oldinfo;
- struct ipt_counters *counters;
+ struct xt_table_info *newinfo, *oldinfo;
+ struct xt_counters *counters;
void *loc_cpu_entry, *loc_cpu_old_entry;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
@@ -1184,11 +921,7 @@ do_replace(void __user *user, unsigned int len)
if (len != sizeof(tmp) + tmp.size)
return -ENOPROTOOPT;
- /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
- if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
- return -ENOMEM;
-
- newinfo = alloc_table_info(tmp.size);
+ newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
@@ -1200,7 +933,7 @@ do_replace(void __user *user, unsigned int len)
goto free_newinfo;
}
- counters = vmalloc(tmp.num_counters * sizeof(struct ipt_counters));
+ counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto free_newinfo;
@@ -1214,7 +947,7 @@ do_replace(void __user *user, unsigned int len)
duprintf("ip_tables: Translated table\n");
- t = try_then_request_module(find_table_lock(tmp.name),
+ t = try_then_request_module(xt_find_table_lock(AF_INET, tmp.name),
"iptable_%s", tmp.name);
if (!t || IS_ERR(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1229,7 +962,7 @@ do_replace(void __user *user, unsigned int len)
goto put_module;
}
- oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
+ oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
if (!oldinfo)
goto put_module;
@@ -1248,23 +981,23 @@ do_replace(void __user *user, unsigned int len)
/* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
- free_table_info(oldinfo);
+ xt_free_table_info(oldinfo);
if (copy_to_user(tmp.counters, counters,
- sizeof(struct ipt_counters) * tmp.num_counters) != 0)
+ sizeof(struct xt_counters) * tmp.num_counters) != 0)
ret = -EFAULT;
vfree(counters);
- up(&ipt_mutex);
+ xt_table_unlock(t);
return ret;
put_module:
module_put(t->me);
- up(&ipt_mutex);
+ xt_table_unlock(t);
free_newinfo_counters_untrans:
IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
free_newinfo_counters:
vfree(counters);
free_newinfo:
- free_table_info(newinfo);
+ xt_free_table_info(newinfo);
return ret;
}
@@ -1272,7 +1005,7 @@ do_replace(void __user *user, unsigned int len)
* and everything is OK. */
static inline int
add_counter_to_entry(struct ipt_entry *e,
- const struct ipt_counters addme[],
+ const struct xt_counters addme[],
unsigned int *i)
{
#if 0
@@ -1294,15 +1027,16 @@ static int
do_add_counters(void __user *user, unsigned int len)
{
unsigned int i;
- struct ipt_counters_info tmp, *paddc;
+ struct xt_counters_info tmp, *paddc;
struct ipt_table *t;
+ struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
- if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct ipt_counters))
+ if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
return -EINVAL;
paddc = vmalloc_node(len, numa_node_id());
@@ -1314,29 +1048,30 @@ do_add_counters(void __user *user, unsigned int len)
goto free;
}
- t = find_table_lock(tmp.name);
+ t = xt_find_table_lock(AF_INET, tmp.name);
if (!t || IS_ERR(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free;
}
write_lock_bh(&t->lock);
- if (t->private->number != paddc->num_counters) {
+ private = t->private;
+ if (private->number != paddc->num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
i = 0;
/* Choose the copy that is on our node */
- loc_cpu_entry = t->private->entries[raw_smp_processor_id()];
+ loc_cpu_entry = private->entries[raw_smp_processor_id()];
IPT_ENTRY_ITERATE(loc_cpu_entry,
- t->private->size,
+ private->size,
add_counter_to_entry,
paddc->counters,
&i);
unlock_up_free:
write_unlock_bh(&t->lock);
- up(&ipt_mutex);
+ xt_table_unlock(t);
module_put(t->me);
free:
vfree(paddc);
@@ -1395,25 +1130,26 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
}
name[IPT_TABLE_MAXNAMELEN-1] = '\0';
- t = try_then_request_module(find_table_lock(name),
+ t = try_then_request_module(xt_find_table_lock(AF_INET, name),
"iptable_%s", name);
if (t && !IS_ERR(t)) {
struct ipt_getinfo info;
+ struct xt_table_info *private = t->private;
info.valid_hooks = t->valid_hooks;
- memcpy(info.hook_entry, t->private->hook_entry,
+ memcpy(info.hook_entry, private->hook_entry,
sizeof(info.hook_entry));
- memcpy(info.underflow, t->private->underflow,
+ memcpy(info.underflow, private->underflow,
sizeof(info.underflow));
- info.num_entries = t->private->number;
- info.size = t->private->size;
+ info.num_entries = private->number;
+ info.size = private->size;
memcpy(info.name, name, sizeof(info.name));
if (copy_to_user(user, &info, *len) != 0)
ret = -EFAULT;
else
ret = 0;
- up(&ipt_mutex);
+ xt_table_unlock(t);
module_put(t->me);
} else
ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1440,7 +1176,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
case IPT_SO_GET_REVISION_MATCH:
case IPT_SO_GET_REVISION_TARGET: {
struct ipt_get_revision rev;
- int (*revfn)(const char *, u8, int *);
+ int target;
if (*len != sizeof(rev)) {
ret = -EINVAL;
@@ -1452,12 +1188,13 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
}
if (cmd == IPT_SO_GET_REVISION_TARGET)
- revfn = target_revfn;
+ target = 1;
else
- revfn = match_revfn;
+ target = 0;
- try_then_request_module(find_revision(rev.name, rev.revision,
- revfn, &ret),
+ try_then_request_module(xt_find_revision(AF_INET, rev.name,
+ rev.revision,
+ target, &ret),
"ipt_%s", rev.name);
break;
}
@@ -1470,60 +1207,15 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
return ret;
}
-/* Registration hooks for targets. */
-int
-ipt_register_target(struct ipt_target *target)
+int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
{
int ret;
-
- ret = down_interruptible(&ipt_mutex);
- if (ret != 0)
- return ret;
- list_add(&target->list, &ipt_target);
- up(&ipt_mutex);
- return ret;
-}
-
-void
-ipt_unregister_target(struct ipt_target *target)
-{
- down(&ipt_mutex);
- LIST_DELETE(&ipt_target, target);
- up(&ipt_mutex);
-}
-
-int
-ipt_register_match(struct ipt_match *match)
-{
- int ret;
-
- ret = down_interruptible(&ipt_mutex);
- if (ret != 0)
- return ret;
-
- list_add(&match->list, &ipt_match);
- up(&ipt_mutex);
-
- return ret;
-}
-
-void
-ipt_unregister_match(struct ipt_match *match)
-{
- down(&ipt_mutex);
- LIST_DELETE(&ipt_match, match);
- up(&ipt_mutex);
-}
-
-int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
-{
- int ret;
- struct ipt_table_info *newinfo;
- static struct ipt_table_info bootstrap
+ struct xt_table_info *newinfo;
+ static struct xt_table_info bootstrap
= { 0, 0, 0, { 0 }, { 0 }, { } };
void *loc_cpu_entry;
- newinfo = alloc_table_info(repl->size);
+ newinfo = xt_alloc_table_info(repl->size);
if (!newinfo)
return -ENOMEM;
@@ -1539,246 +1231,29 @@ int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
repl->hook_entry,
repl->underflow);
if (ret != 0) {
- free_table_info(newinfo);
+ xt_free_table_info(newinfo);
return ret;
}
- ret = down_interruptible(&ipt_mutex);
- if (ret != 0) {
- free_table_info(newinfo);
+ if (xt_register_table(table, &bootstrap, newinfo) != 0) {
+ xt_free_table_info(newinfo);
return ret;
}
- /* Don't autoload: we'd eat our tail... */
- if (list_named_find(&ipt_tables, table->name)) {
- ret = -EEXIST;
- goto free_unlock;
- }
-
- /* Simplifies replace_table code. */
- table->private = &bootstrap;
- if (!replace_table(table, 0, newinfo, &ret))
- goto free_unlock;
-
- duprintf("table->private->number = %u\n",
- table->private->number);
-
- /* save number of initial entries */
- table->private->initial_entries = table->private->number;
-
- rwlock_init(&table->lock);
- list_prepend(&ipt_tables, table);
-
- unlock:
- up(&ipt_mutex);
- return ret;
-
- free_unlock:
- free_table_info(newinfo);
- goto unlock;
+ return 0;
}
void ipt_unregister_table(struct ipt_table *table)
{
+ struct xt_table_info *private;
void *loc_cpu_entry;
- down(&ipt_mutex);
- LIST_DELETE(&ipt_tables, table);
- up(&ipt_mutex);
+ private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */
- loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
- IPT_ENTRY_ITERATE(loc_cpu_entry, table->private->size,
- cleanup_entry, NULL);
- free_table_info(table->private);
-}
-
-/* Returns 1 if the port is matched by the range, 0 otherwise */
-static inline int
-port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
-{
- int ret;
-
- ret = (port >= min && port <= max) ^ invert;
- return ret;
-}
-
-static int
-tcp_find_option(u_int8_t option,
- const struct sk_buff *skb,
- unsigned int optlen,
- int invert,
- int *hotdrop)
-{
- /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
- u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
- unsigned int i;
-
- duprintf("tcp_match: finding option\n");
-
- if (!optlen)
- return invert;
-
- /* If we don't have the whole header, drop packet. */
- op = skb_header_pointer(skb,
- skb->nh.iph->ihl*4 + sizeof(struct tcphdr),
- optlen, _opt);
- if (op == NULL) {
- *hotdrop = 1;
- return 0;
- }
-
- for (i = 0; i < optlen; ) {
- if (op[i] == option) return !invert;
- if (op[i] < 2) i++;
- else i += op[i+1]?:1;
- }
-
- return invert;
-}
-
-static int
-tcp_match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- int *hotdrop)
-{
- struct tcphdr _tcph, *th;
- const struct ipt_tcp *tcpinfo = matchinfo;
-
- if (offset) {
- /* To quote Alan:
-
- Don't allow a fragment of TCP 8 bytes in. Nobody normal
- causes this. Its a cracker trying to break in by doing a
- flag overwrite to pass the direction checks.
- */
- if (offset == 1) {
- duprintf("Dropping evil TCP offset=1 frag.\n");
- *hotdrop = 1;
- }
- /* Must not be a fragment. */
- return 0;
- }
-
-#define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
-
- th = skb_header_pointer(skb, skb->nh.iph->ihl*4,
- sizeof(_tcph), &_tcph);
- if (th == NULL) {
- /* We've been asked to examine this packet, and we
- can't. Hence, no choice but to drop. */
- duprintf("Dropping evil TCP offset=0 tinygram.\n");
- *hotdrop = 1;
- return 0;
- }
-
- if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
- ntohs(th->source),
- !!(tcpinfo->invflags & IPT_TCP_INV_SRCPT)))
- return 0;
- if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
- ntohs(th->dest),
- !!(tcpinfo->invflags & IPT_TCP_INV_DSTPT)))
- return 0;
- if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
- == tcpinfo->flg_cmp,
- IPT_TCP_INV_FLAGS))
- return 0;
- if (tcpinfo->option) {
- if (th->doff * 4 < sizeof(_tcph)) {
- *hotdrop = 1;
- return 0;
- }
- if (!tcp_find_option(tcpinfo->option, skb,
- th->doff*4 - sizeof(_tcph),
- tcpinfo->invflags & IPT_TCP_INV_OPTION,
- hotdrop))
- return 0;
- }
- return 1;
-}
-
-/* Called when user tries to insert an entry of this type. */
-static int
-tcp_checkentry(const char *tablename,
- const struct ipt_ip *ip,
- void *matchinfo,
- unsigned int matchsize,
- unsigned int hook_mask)
-{
- const struct ipt_tcp *tcpinfo = matchinfo;
-
- /* Must specify proto == TCP, and no unknown invflags */
- return ip->proto == IPPROTO_TCP
- && !(ip->invflags & IPT_INV_PROTO)
- && matchsize == IPT_ALIGN(sizeof(struct ipt_tcp))
- && !(tcpinfo->invflags & ~IPT_TCP_INV_MASK);
-}
-
-static int
-udp_match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- int *hotdrop)
-{
- struct udphdr _udph, *uh;
- const struct ipt_udp *udpinfo = matchinfo;
-
- /* Must not be a fragment. */
- if (offset)
- return 0;
-
- uh = skb_header_pointer(skb, skb->nh.iph->ihl*4,
- sizeof(_udph), &_udph);
- if (uh == NULL) {
- /* We've been asked to examine this packet, and we
- can't. Hence, no choice but to drop. */
- duprintf("Dropping evil UDP tinygram.\n");
- *hotdrop = 1;
- return 0;
- }
-
- return port_match(udpinfo->spts[0], udpinfo->spts[1],
- ntohs(uh->source),
- !!(udpinfo->invflags & IPT_UDP_INV_SRCPT))
- && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
- ntohs(uh->dest),
- !!(udpinfo->invflags & IPT_UDP_INV_DSTPT));
-}
-
-/* Called when user tries to insert an entry of this type. */
-static int
-udp_checkentry(const char *tablename,
- const struct ipt_ip *ip,
- void *matchinfo,
- unsigned int matchinfosize,
- unsigned int hook_mask)
-{
- const struct ipt_udp *udpinfo = matchinfo;
-
- /* Must specify proto == UDP, and no unknown invflags */
- if (ip->proto != IPPROTO_UDP || (ip->invflags & IPT_INV_PROTO)) {
- duprintf("ipt_udp: Protocol %u != %u\n", ip->proto,
- IPPROTO_UDP);
- return 0;
- }
- if (matchinfosize != IPT_ALIGN(sizeof(struct ipt_udp))) {
- duprintf("ipt_udp: matchsize %u != %u\n",
- matchinfosize, IPT_ALIGN(sizeof(struct ipt_udp)));
- return 0;
- }
- if (udpinfo->invflags & ~IPT_UDP_INV_MASK) {
- duprintf("ipt_udp: unknown flags %X\n",
- udpinfo->invflags);
- return 0;
- }
-
- return 1;
+ loc_cpu_entry = private->entries[raw_smp_processor_id()];
+ IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
+ xt_free_table_info(private);
}
/* Returns 1 if the type and code is matched by the range, 0 otherwise */
@@ -1797,6 +1272,7 @@ icmp_match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
struct icmphdr _icmph, *ic;
@@ -1806,8 +1282,7 @@ icmp_match(const struct sk_buff *skb,
if (offset)
return 0;
- ic = skb_header_pointer(skb, skb->nh.iph->ihl*4,
- sizeof(_icmph), &_icmph);
+ ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
if (ic == NULL) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
@@ -1827,11 +1302,12 @@ icmp_match(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
icmp_checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *info,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
+ const struct ipt_ip *ip = info;
const struct ipt_icmp *icmpinfo = matchinfo;
/* Must specify proto == ICMP, and no unknown invflags */
@@ -1861,123 +1337,22 @@ static struct nf_sockopt_ops ipt_sockopts = {
.get = do_ipt_get_ctl,
};
-static struct ipt_match tcp_matchstruct = {
- .name = "tcp",
- .match = &tcp_match,
- .checkentry = &tcp_checkentry,
-};
-
-static struct ipt_match udp_matchstruct = {
- .name = "udp",
- .match = &udp_match,
- .checkentry = &udp_checkentry,
-};
-
static struct ipt_match icmp_matchstruct = {
.name = "icmp",
.match = &icmp_match,
.checkentry = &icmp_checkentry,
};
-#ifdef CONFIG_PROC_FS
-static inline int print_name(const char *i,
- off_t start_offset, char *buffer, int length,
- off_t *pos, unsigned int *count)
-{
- if ((*count)++ >= start_offset) {
- unsigned int namelen;
-
- namelen = sprintf(buffer + *pos, "%s\n",
- i + sizeof(struct list_head));
- if (*pos + namelen > length) {
- /* Stop iterating */
- return 1;
- }
- *pos += namelen;
- }
- return 0;
-}
-
-static inline int print_target(const struct ipt_target *t,
- off_t start_offset, char *buffer, int length,
- off_t *pos, unsigned int *count)
-{
- if (t == &ipt_standard_target || t == &ipt_error_target)
- return 0;
- return print_name((char *)t, start_offset, buffer, length, pos, count);
-}
-
-static int ipt_get_tables(char *buffer, char **start, off_t offset, int length)
-{
- off_t pos = 0;
- unsigned int count = 0;
-
- if (down_interruptible(&ipt_mutex) != 0)
- return 0;
-
- LIST_FIND(&ipt_tables, print_name, void *,
- offset, buffer, length, &pos, &count);
-
- up(&ipt_mutex);
-
- /* `start' hack - see fs/proc/generic.c line ~105 */
- *start=(char *)((unsigned long)count-offset);
- return pos;
-}
-
-static int ipt_get_targets(char *buffer, char **start, off_t offset, int length)
-{
- off_t pos = 0;
- unsigned int count = 0;
-
- if (down_interruptible(&ipt_mutex) != 0)
- return 0;
-
- LIST_FIND(&ipt_target, print_target, struct ipt_target *,
- offset, buffer, length, &pos, &count);
-
- up(&ipt_mutex);
-
- *start = (char *)((unsigned long)count - offset);
- return pos;
-}
-
-static int ipt_get_matches(char *buffer, char **start, off_t offset, int length)
-{
- off_t pos = 0;
- unsigned int count = 0;
-
- if (down_interruptible(&ipt_mutex) != 0)
- return 0;
-
- LIST_FIND(&ipt_match, print_name, void *,
- offset, buffer, length, &pos, &count);
-
- up(&ipt_mutex);
-
- *start = (char *)((unsigned long)count - offset);
- return pos;
-}
-
-static const struct { char *name; get_info_t *get_info; } ipt_proc_entry[] =
-{ { "ip_tables_names", ipt_get_tables },
- { "ip_tables_targets", ipt_get_targets },
- { "ip_tables_matches", ipt_get_matches },
- { NULL, NULL} };
-#endif /*CONFIG_PROC_FS*/
-
static int __init init(void)
{
int ret;
+ xt_proto_init(AF_INET);
+
/* Noone else will be downing sem now, so we won't sleep */
- down(&ipt_mutex);
- list_append(&ipt_target, &ipt_standard_target);
- list_append(&ipt_target, &ipt_error_target);
- list_append(&ipt_match, &tcp_matchstruct);
- list_append(&ipt_match, &udp_matchstruct);
- list_append(&ipt_match, &icmp_matchstruct);
- up(&ipt_mutex);
+ xt_register_target(AF_INET, &ipt_standard_target);
+ xt_register_target(AF_INET, &ipt_error_target);
+ xt_register_match(AF_INET, &icmp_matchstruct);
/* Register setsockopt */
ret = nf_register_sockopt(&ipt_sockopts);
@@ -1986,49 +1361,23 @@ static int __init init(void)
return ret;
}
-#ifdef CONFIG_PROC_FS
- {
- struct proc_dir_entry *proc;
- int i;
-
- for (i = 0; ipt_proc_entry[i].name; i++) {
- proc = proc_net_create(ipt_proc_entry[i].name, 0,
- ipt_proc_entry[i].get_info);
- if (!proc) {
- while (--i >= 0)
- proc_net_remove(ipt_proc_entry[i].name);
- nf_unregister_sockopt(&ipt_sockopts);
- return -ENOMEM;
- }
- proc->owner = THIS_MODULE;
- }
- }
-#endif
-
- printk("ip_tables: (C) 2000-2002 Netfilter core team\n");
+ printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
return 0;
}
static void __exit fini(void)
{
nf_unregister_sockopt(&ipt_sockopts);
-#ifdef CONFIG_PROC_FS
- {
- int i;
- for (i = 0; ipt_proc_entry[i].name; i++)
- proc_net_remove(ipt_proc_entry[i].name);
- }
-#endif
+
+ xt_unregister_match(AF_INET, &icmp_matchstruct);
+ xt_unregister_target(AF_INET, &ipt_error_target);
+ xt_unregister_target(AF_INET, &ipt_standard_target);
+
+ xt_proto_fini(AF_INET);
}
EXPORT_SYMBOL(ipt_register_table);
EXPORT_SYMBOL(ipt_unregister_table);
-EXPORT_SYMBOL(ipt_register_match);
-EXPORT_SYMBOL(ipt_unregister_match);
EXPORT_SYMBOL(ipt_do_table);
-EXPORT_SYMBOL(ipt_register_target);
-EXPORT_SYMBOL(ipt_unregister_target);
-EXPORT_SYMBOL(ipt_find_target);
-
module_init(init);
module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 45c52d8f4d9..d9bc971f03a 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -379,12 +379,13 @@ target(struct sk_buff **pskb,
static int
checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *e_void,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
{
struct ipt_clusterip_tgt_info *cipinfo = targinfo;
+ const struct ipt_entry *e = e_void;
struct clusterip_config *config;
diff --git a/net/ipv4/netfilter/ipt_DSCP.c b/net/ipv4/netfilter/ipt_DSCP.c
index 6e319570a28..898cdf79ce1 100644
--- a/net/ipv4/netfilter/ipt_DSCP.c
+++ b/net/ipv4/netfilter/ipt_DSCP.c
@@ -57,7 +57,7 @@ target(struct sk_buff **pskb,
static int
checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *e_void,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index a1319693f64..706445426a6 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -113,12 +113,13 @@ target(struct sk_buff **pskb,
static int
checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *e_void,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
{
const struct ipt_ECN_info *einfo = (struct ipt_ECN_info *)targinfo;
+ const struct ipt_entry *e = e_void;
if (targinfosize != IPT_ALIGN(sizeof(struct ipt_ECN_info))) {
printk(KERN_WARNING "ECN: targinfosize %u != %Zu\n",
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 30be0f1dae3..6606ddb66a2 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -431,7 +431,7 @@ ipt_log_target(struct sk_buff **pskb,
}
static int ipt_log_checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *e,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 27860510ca6..12c56d3343c 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -40,7 +40,7 @@ static DEFINE_RWLOCK(masq_lock);
/* FIXME: Multiple targets. --RR */
static int
masquerade_check(const char *tablename,
- const struct ipt_entry *e,
+ const void *e,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index e6e7b609536..b074467fe67 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("iptables 1:1 NAT mapping of IP networks target");
static int
check(const char *tablename,
- const struct ipt_entry *e,
+ const void *e,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_NFQUEUE.c b/net/ipv4/netfilter/ipt_NFQUEUE.c
deleted file mode 100644
index 3cedc9be880..00000000000
--- a/net/ipv4/netfilter/ipt_NFQUEUE.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/* iptables module for using new netfilter netlink queue
- *
- * (C) 2005 by Harald Welte <laforge@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_NFQUEUE.h>
-
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("iptables NFQUEUE target");
-MODULE_LICENSE("GPL");
-
-static unsigned int
-target(struct sk_buff **pskb,
- const struct net_device *in,
- const struct net_device *out,
- unsigned int hooknum,
- const void *targinfo,
- void *userinfo)
-{
- const struct ipt_NFQ_info *tinfo = targinfo;
-
- return NF_QUEUE_NR(tinfo->queuenum);
-}
-
-static int
-checkentry(const char *tablename,
- const struct ipt_entry *e,
- void *targinfo,
- unsigned int targinfosize,
- unsigned int hook_mask)
-{
- if (targinfosize != IPT_ALIGN(sizeof(struct ipt_NFQ_info))) {
- printk(KERN_WARNING "NFQUEUE: targinfosize %u != %Zu\n",
- targinfosize,
- IPT_ALIGN(sizeof(struct ipt_NFQ_info)));
- return 0;
- }
-
- return 1;
-}
-
-static struct ipt_target ipt_NFQ_reg = {
- .name = "NFQUEUE",
- .target = target,
- .checkentry = checkentry,
- .me = THIS_MODULE,
-};
-
-static int __init init(void)
-{
- return ipt_register_target(&ipt_NFQ_reg);
-}
-
-static void __exit fini(void)
-{
- ipt_unregister_target(&ipt_NFQ_reg);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 5245bfd33d5..140be51f2f0 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -33,7 +33,7 @@ MODULE_DESCRIPTION("iptables REDIRECT target module");
/* FIXME: Take multiple ranges --RR */
static int
redirect_check(const char *tablename,
- const struct ipt_entry *e,
+ const void *e,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 6693526ae12..3eb47aae78c 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -282,12 +282,13 @@ static unsigned int reject(struct sk_buff **pskb,
}
static int check(const char *tablename,
- const struct ipt_entry *e,
+ const void *e_void,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
{
const struct ipt_reject_info *rejinfo = targinfo;
+ const struct ipt_entry *e = e_void;
if (targinfosize != IPT_ALIGN(sizeof(struct ipt_reject_info))) {
DEBUGP("REJECT: targinfosize %u != 0\n", targinfosize);
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c
index 7a0536d864a..a22de59bba0 100644
--- a/net/ipv4/netfilter/ipt_SAME.c
+++ b/net/ipv4/netfilter/ipt_SAME.c
@@ -49,7 +49,7 @@ MODULE_DESCRIPTION("iptables special SNAT module for consistent sourceip");
static int
same_check(const char *tablename,
- const struct ipt_entry *e,
+ const void *e,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_TCPMSS.c b/net/ipv4/netfilter/ipt_TCPMSS.c
index 8db70d6908c..c122841e182 100644
--- a/net/ipv4/netfilter/ipt_TCPMSS.c
+++ b/net/ipv4/netfilter/ipt_TCPMSS.c
@@ -210,12 +210,13 @@ static inline int find_syn_match(const struct ipt_entry_match *m)
/* Must specify -p tcp --syn/--tcp-flags SYN */
static int
ipt_tcpmss_checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *e_void,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
{
const struct ipt_tcpmss_info *tcpmssinfo = targinfo;
+ const struct ipt_entry *e = e_void;
if (targinfosize != IPT_ALIGN(sizeof(struct ipt_tcpmss_info))) {
DEBUGP("ipt_tcpmss_checkentry: targinfosize %u != %u\n",
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c
index deadb36d442..3a44a56db23 100644
--- a/net/ipv4/netfilter/ipt_TOS.c
+++ b/net/ipv4/netfilter/ipt_TOS.c
@@ -52,7 +52,7 @@ target(struct sk_buff **pskb,
static int
checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *e_void,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c
index b9ae6a9382f..b769eb23197 100644
--- a/net/ipv4/netfilter/ipt_TTL.c
+++ b/net/ipv4/netfilter/ipt_TTL.c
@@ -66,7 +66,7 @@ ipt_ttl_target(struct sk_buff **pskb, const struct net_device *in,
}
static int ipt_ttl_checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *e,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 38641cd0612..641dbc47765 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -330,7 +330,7 @@ static void ipt_logfn(unsigned int pf,
}
static int ipt_ulog_checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *e,
void *targinfo,
unsigned int targinfosize,
unsigned int hookmask)
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index e19c2a52d00..d6b83a97651 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -29,7 +29,7 @@ static inline int match_type(u_int32_t addr, u_int16_t mask)
static int match(const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const void *matchinfo,
- int offset, int *hotdrop)
+ int offset, unsigned int protoff, int *hotdrop)
{
const struct ipt_addrtype_info *info = matchinfo;
const struct iphdr *iph = skb->nh.iph;
@@ -43,7 +43,7 @@ static int match(const struct sk_buff *skb, const struct net_device *in,
return ret;
}
-static int checkentry(const char *tablename, const struct ipt_ip *ip,
+static int checkentry(const char *tablename, const void *ip,
void *matchinfo, unsigned int matchsize,
unsigned int hook_mask)
{
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c
index a0fea847cb7..144adfec13c 100644
--- a/net/ipv4/netfilter/ipt_ah.c
+++ b/net/ipv4/netfilter/ipt_ah.c
@@ -41,6 +41,7 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
struct ip_auth_hdr _ahdr, *ah;
@@ -50,7 +51,7 @@ match(const struct sk_buff *skb,
if (offset)
return 0;
- ah = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
+ ah = skb_header_pointer(skb, protoff,
sizeof(_ahdr), &_ahdr);
if (ah == NULL) {
/* We've been asked to examine this packet, and we
@@ -69,12 +70,13 @@ match(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip_void,
void *matchinfo,
unsigned int matchinfosize,
unsigned int hook_mask)
{
const struct ipt_ah *ahinfo = matchinfo;
+ const struct ipt_ip *ip = ip_void;
/* Must specify proto == AH, and no unknown invflags */
if (ip->proto != IPPROTO_AH || (ip->invflags & IPT_INV_PROTO)) {
diff --git a/net/ipv4/netfilter/ipt_dscp.c b/net/ipv4/netfilter/ipt_dscp.c
index 5df52a64a5d..92063b4f860 100644
--- a/net/ipv4/netfilter/ipt_dscp.c
+++ b/net/ipv4/netfilter/ipt_dscp.c
@@ -21,7 +21,7 @@ MODULE_LICENSE("GPL");
static int match(const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const void *matchinfo,
- int offset, int *hotdrop)
+ int offset, unsigned int protoff, int *hotdrop)
{
const struct ipt_dscp_info *info = matchinfo;
const struct iphdr *iph = skb->nh.iph;
@@ -31,7 +31,7 @@ static int match(const struct sk_buff *skb, const struct net_device *in,
return ((iph->tos&IPT_DSCP_MASK) == sh_dscp) ^ info->invert;
}
-static int checkentry(const char *tablename, const struct ipt_ip *ip,
+static int checkentry(const char *tablename, const void *ip,
void *matchinfo, unsigned int matchsize,
unsigned int hook_mask)
{
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index b6f7181e89c..e68b0c7981f 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -67,7 +67,7 @@ static inline int match_tcp(const struct sk_buff *skb,
static int match(const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const void *matchinfo,
- int offset, int *hotdrop)
+ int offset, unsigned int protoff, int *hotdrop)
{
const struct ipt_ecn_info *info = matchinfo;
@@ -85,11 +85,12 @@ static int match(const struct sk_buff *skb, const struct net_device *in,
return 1;
}
-static int checkentry(const char *tablename, const struct ipt_ip *ip,
+static int checkentry(const char *tablename, const void *ip_void,
void *matchinfo, unsigned int matchsize,
unsigned int hook_mask)
{
const struct ipt_ecn_info *info = matchinfo;
+ const struct ipt_ip *ip = ip_void;
if (matchsize != IPT_ALIGN(sizeof(struct ipt_ecn_info)))
return 0;
diff --git a/net/ipv4/netfilter/ipt_esp.c b/net/ipv4/netfilter/ipt_esp.c
index e1d0dd31e11..9de191a8162 100644
--- a/net/ipv4/netfilter/ipt_esp.c
+++ b/net/ipv4/netfilter/ipt_esp.c
@@ -42,6 +42,7 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
struct ip_esp_hdr _esp, *eh;
@@ -51,7 +52,7 @@ match(const struct sk_buff *skb,
if (offset)
return 0;
- eh = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
+ eh = skb_header_pointer(skb, protoff,
sizeof(_esp), &_esp);
if (eh == NULL) {
/* We've been asked to examine this packet, and we
@@ -70,12 +71,13 @@ match(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip_void,
void *matchinfo,
unsigned int matchinfosize,
unsigned int hook_mask)
{
const struct ipt_esp *espinfo = matchinfo;
+ const struct ipt_ip *ip = ip_void;
/* Must specify proto == ESP, and no unknown invflags */
if (ip->proto != IPPROTO_ESP || (ip->invflags & IPT_INV_PROTO)) {
diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c
index 2dd1cccbdab..4fe48c1bd5f 100644
--- a/net/ipv4/netfilter/ipt_hashlimit.c
+++ b/net/ipv4/netfilter/ipt_hashlimit.c
@@ -429,6 +429,7 @@ hashlimit_match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
struct ipt_hashlimit_info *r =
@@ -504,7 +505,7 @@ hashlimit_match(const struct sk_buff *skb,
static int
hashlimit_checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *inf,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_iprange.c b/net/ipv4/netfilter/ipt_iprange.c
index b835b7b2e56..13fb16fb789 100644
--- a/net/ipv4/netfilter/ipt_iprange.c
+++ b/net/ipv4/netfilter/ipt_iprange.c
@@ -28,7 +28,7 @@ match(const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const void *matchinfo,
- int offset, int *hotdrop)
+ int offset, unsigned int protoff, int *hotdrop)
{
const struct ipt_iprange_info *info = matchinfo;
const struct iphdr *iph = skb->nh.iph;
@@ -63,7 +63,7 @@ match(const struct sk_buff *skb,
}
static int check(const char *tablename,
- const struct ipt_ip *ip,
+ const void *inf,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_length.c b/net/ipv4/netfilter/ipt_length.c
deleted file mode 100644
index 4eabcfbda9d..00000000000
--- a/net/ipv4/netfilter/ipt_length.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Kernel module to match packet length. */
-/* (C) 1999-2001 James Morris <jmorros@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter_ipv4/ipt_length.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-MODULE_DESCRIPTION("IP tables packet length matching module");
-MODULE_LICENSE("GPL");
-
-static int
-match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- int *hotdrop)
-{
- const struct ipt_length_info *info = matchinfo;
- u_int16_t pktlen = ntohs(skb->nh.iph->tot_len);
-
- return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
-}
-
-static int
-checkentry(const char *tablename,
- const struct ipt_ip *ip,
- void *matchinfo,
- unsigned int matchsize,
- unsigned int hook_mask)
-{
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_length_info)))
- return 0;
-
- return 1;
-}
-
-static struct ipt_match length_match = {
- .name = "length",
- .match = &match,
- .checkentry = &checkentry,
- .me = THIS_MODULE,
-};
-
-static int __init init(void)
-{
- return ipt_register_match(&length_match);
-}
-
-static void __exit fini(void)
-{
- ipt_unregister_match(&length_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_multiport.c b/net/ipv4/netfilter/ipt_multiport.c
index 99e8188162e..2d52326553f 100644
--- a/net/ipv4/netfilter/ipt_multiport.c
+++ b/net/ipv4/netfilter/ipt_multiport.c
@@ -97,6 +97,7 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
u16 _ports[2], *pptr;
@@ -105,7 +106,7 @@ match(const struct sk_buff *skb,
if (offset)
return 0;
- pptr = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
+ pptr = skb_header_pointer(skb, protoff,
sizeof(_ports), _ports);
if (pptr == NULL) {
/* We've been asked to examine this packet, and we
@@ -128,6 +129,7 @@ match_v1(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
u16 _ports[2], *pptr;
@@ -136,7 +138,7 @@ match_v1(const struct sk_buff *skb,
if (offset)
return 0;
- pptr = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
+ pptr = skb_header_pointer(skb, protoff,
sizeof(_ports), _ports);
if (pptr == NULL) {
/* We've been asked to examine this packet, and we
@@ -154,7 +156,7 @@ match_v1(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
@@ -164,7 +166,7 @@ checkentry(const char *tablename,
static int
checkentry_v1(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_owner.c b/net/ipv4/netfilter/ipt_owner.c
index 0cee2862ed8..4843d0c9734 100644
--- a/net/ipv4/netfilter/ipt_owner.c
+++ b/net/ipv4/netfilter/ipt_owner.c
@@ -27,6 +27,7 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
const struct ipt_owner_info *info = matchinfo;
@@ -51,7 +52,7 @@ match(const struct sk_buff *skb,
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_physdev.c b/net/ipv4/netfilter/ipt_physdev.c
deleted file mode 100644
index 03f554857a4..00000000000
--- a/net/ipv4/netfilter/ipt_physdev.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/* Kernel module to match the bridge port in and
- * out device for IP packets coming into contact with a bridge. */
-
-/* (C) 2001-2003 Bart De Schuymer <bdschuym@pandora.be>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ipt_physdev.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_bridge.h>
-#define MATCH 1
-#define NOMATCH 0
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
-MODULE_DESCRIPTION("iptables bridge physical device match module");
-
-static int
-match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- int *hotdrop)
-{
- int i;
- static const char nulldevname[IFNAMSIZ];
- const struct ipt_physdev_info *info = matchinfo;
- unsigned int ret;
- const char *indev, *outdev;
- struct nf_bridge_info *nf_bridge;
-
- /* Not a bridged IP packet or no info available yet:
- * LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if
- * the destination device will be a bridge. */
- if (!(nf_bridge = skb->nf_bridge)) {
- /* Return MATCH if the invert flags of the used options are on */
- if ((info->bitmask & IPT_PHYSDEV_OP_BRIDGED) &&
- !(info->invert & IPT_PHYSDEV_OP_BRIDGED))
- return NOMATCH;
- if ((info->bitmask & IPT_PHYSDEV_OP_ISIN) &&
- !(info->invert & IPT_PHYSDEV_OP_ISIN))
- return NOMATCH;
- if ((info->bitmask & IPT_PHYSDEV_OP_ISOUT) &&
- !(info->invert & IPT_PHYSDEV_OP_ISOUT))
- return NOMATCH;
- if ((info->bitmask & IPT_PHYSDEV_OP_IN) &&
- !(info->invert & IPT_PHYSDEV_OP_IN))
- return NOMATCH;
- if ((info->bitmask & IPT_PHYSDEV_OP_OUT) &&
- !(info->invert & IPT_PHYSDEV_OP_OUT))
- return NOMATCH;
- return MATCH;
- }
-
- /* This only makes sense in the FORWARD and POSTROUTING chains */
- if ((info->bitmask & IPT_PHYSDEV_OP_BRIDGED) &&
- (!!(nf_bridge->mask & BRNF_BRIDGED) ^
- !(info->invert & IPT_PHYSDEV_OP_BRIDGED)))
- return NOMATCH;
-
- if ((info->bitmask & IPT_PHYSDEV_OP_ISIN &&
- (!nf_bridge->physindev ^ !!(info->invert & IPT_PHYSDEV_OP_ISIN))) ||
- (info->bitmask & IPT_PHYSDEV_OP_ISOUT &&
- (!nf_bridge->physoutdev ^ !!(info->invert & IPT_PHYSDEV_OP_ISOUT))))
- return NOMATCH;
-
- if (!(info->bitmask & IPT_PHYSDEV_OP_IN))
- goto match_outdev;
- indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
- for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned int); i++) {
- ret |= (((const unsigned int *)indev)[i]
- ^ ((const unsigned int *)info->physindev)[i])
- & ((const unsigned int *)info->in_mask)[i];
- }
-
- if ((ret == 0) ^ !(info->invert & IPT_PHYSDEV_OP_IN))
- return NOMATCH;
-
-match_outdev:
- if (!(info->bitmask & IPT_PHYSDEV_OP_OUT))
- return MATCH;
- outdev = nf_bridge->physoutdev ?
- nf_bridge->physoutdev->name : nulldevname;
- for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned int); i++) {
- ret |= (((const unsigned int *)outdev)[i]
- ^ ((const unsigned int *)info->physoutdev)[i])
- & ((const unsigned int *)info->out_mask)[i];
- }
-
- return (ret != 0) ^ !(info->invert & IPT_PHYSDEV_OP_OUT);
-}
-
-static int
-checkentry(const char *tablename,
- const struct ipt_ip *ip,
- void *matchinfo,
- unsigned int matchsize,
- unsigned int hook_mask)
-{
- const struct ipt_physdev_info *info = matchinfo;
-
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_physdev_info)))
- return 0;
- if (!(info->bitmask & IPT_PHYSDEV_OP_MASK) ||
- info->bitmask & ~IPT_PHYSDEV_OP_MASK)
- return 0;
- return 1;
-}
-
-static struct ipt_match physdev_match = {
- .name = "physdev",
- .match = &match,
- .checkentry = &checkentry,
- .me = THIS_MODULE,
-};
-
-static int __init init(void)
-{
- return ipt_register_match(&physdev_match);
-}
-
-static void __exit fini(void)
-{
- ipt_unregister_match(&physdev_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_policy.c b/net/ipv4/netfilter/ipt_policy.c
new file mode 100644
index 00000000000..709debcc69c
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_policy.c
@@ -0,0 +1,170 @@
+/* IP tables module for matching IPsec policy
+ *
+ * Copyright (c) 2004,2005 Patrick McHardy, <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <net/xfrm.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ipt_policy.h>
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("IPtables IPsec policy matching module");
+MODULE_LICENSE("GPL");
+
+
+static inline int
+match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e)
+{
+#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
+
+ return MATCH(saddr, x->props.saddr.a4 & e->smask) &&
+ MATCH(daddr, x->id.daddr.a4 & e->dmask) &&
+ MATCH(proto, x->id.proto) &&
+ MATCH(mode, x->props.mode) &&
+ MATCH(spi, x->id.spi) &&
+ MATCH(reqid, x->props.reqid);
+}
+
+static int
+match_policy_in(const struct sk_buff *skb, const struct ipt_policy_info *info)
+{
+ const struct ipt_policy_elem *e;
+ struct sec_path *sp = skb->sp;
+ int strict = info->flags & IPT_POLICY_MATCH_STRICT;
+ int i, pos;
+
+ if (sp == NULL)
+ return -1;
+ if (strict && info->len != sp->len)
+ return 0;
+
+ for (i = sp->len - 1; i >= 0; i--) {
+ pos = strict ? i - sp->len + 1 : 0;
+ if (pos >= info->len)
+ return 0;
+ e = &info->pol[pos];
+
+ if (match_xfrm_state(sp->x[i].xvec, e)) {
+ if (!strict)
+ return 1;
+ } else if (strict)
+ return 0;
+ }
+
+ return strict ? 1 : 0;
+}
+
+static int
+match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info)
+{
+ const struct ipt_policy_elem *e;
+ struct dst_entry *dst = skb->dst;
+ int strict = info->flags & IPT_POLICY_MATCH_STRICT;
+ int i, pos;
+
+ if (dst->xfrm == NULL)
+ return -1;
+
+ for (i = 0; dst && dst->xfrm; dst = dst->child, i++) {
+ pos = strict ? i : 0;
+ if (pos >= info->len)
+ return 0;
+ e = &info->pol[pos];
+
+ if (match_xfrm_state(dst->xfrm, e)) {
+ if (!strict)
+ return 1;
+ } else if (strict)
+ return 0;
+ }
+
+ return strict ? 1 : 0;
+}
+
+static int match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo, int offset, int *hotdrop)
+{
+ const struct ipt_policy_info *info = matchinfo;
+ int ret;
+
+ if (info->flags & IPT_POLICY_MATCH_IN)
+ ret = match_policy_in(skb, info);
+ else
+ ret = match_policy_out(skb, info);
+
+ if (ret < 0)
+ ret = info->flags & IPT_POLICY_MATCH_NONE ? 1 : 0;
+ else if (info->flags & IPT_POLICY_MATCH_NONE)
+ ret = 0;
+
+ return ret;
+}
+
+static int checkentry(const char *tablename, const struct ipt_ip *ip,
+ void *matchinfo, unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ struct ipt_policy_info *info = matchinfo;
+
+ if (matchsize != IPT_ALIGN(sizeof(*info))) {
+ printk(KERN_ERR "ipt_policy: matchsize %u != %zu\n",
+ matchsize, IPT_ALIGN(sizeof(*info)));
+ return 0;
+ }
+ if (!(info->flags & (IPT_POLICY_MATCH_IN|IPT_POLICY_MATCH_OUT))) {
+ printk(KERN_ERR "ipt_policy: neither incoming nor "
+ "outgoing policy selected\n");
+ return 0;
+ }
+ if (hook_mask & (1 << NF_IP_PRE_ROUTING | 1 << NF_IP_LOCAL_IN)
+ && info->flags & IPT_POLICY_MATCH_OUT) {
+ printk(KERN_ERR "ipt_policy: output policy not valid in "
+ "PRE_ROUTING and INPUT\n");
+ return 0;
+ }
+ if (hook_mask & (1 << NF_IP_POST_ROUTING | 1 << NF_IP_LOCAL_OUT)
+ && info->flags & IPT_POLICY_MATCH_IN) {
+ printk(KERN_ERR "ipt_policy: input policy not valid in "
+ "POST_ROUTING and OUTPUT\n");
+ return 0;
+ }
+ if (info->len > IPT_POLICY_MAX_ELEM) {
+ printk(KERN_ERR "ipt_policy: too many policy elements\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct ipt_match policy_match = {
+ .name = "policy",
+ .match = match,
+ .checkentry = checkentry,
+ .me = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+ return ipt_register_match(&policy_match);
+}
+
+static void __exit fini(void)
+{
+ ipt_unregister_match(&policy_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 5ddccb18c65..44611d6d14f 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -104,6 +104,7 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop);
/* Function to hash a given address into the hash table of table_size size */
@@ -317,7 +318,7 @@ static int ip_recent_ctrl(struct file *file, const char __user *input, unsigned
skb->nh.iph->daddr = 0;
/* Clear ttl since we have no way of knowing it */
skb->nh.iph->ttl = 0;
- match(skb,NULL,NULL,info,0,NULL);
+ match(skb,NULL,NULL,info,0,0,NULL);
kfree(skb->nh.iph);
out_free_skb:
@@ -357,6 +358,7 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
int pkt_count, hits_found, ans;
@@ -654,7 +656,7 @@ match(const struct sk_buff *skb,
*/
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_tos.c b/net/ipv4/netfilter/ipt_tos.c
index 086a1bb61e3..9ab765e126f 100644
--- a/net/ipv4/netfilter/ipt_tos.c
+++ b/net/ipv4/netfilter/ipt_tos.c
@@ -23,6 +23,7 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
const struct ipt_tos_info *info = matchinfo;
@@ -32,7 +33,7 @@ match(const struct sk_buff *skb,
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_ttl.c b/net/ipv4/netfilter/ipt_ttl.c
index 219aa9de88c..82da53f430a 100644
--- a/net/ipv4/netfilter/ipt_ttl.c
+++ b/net/ipv4/netfilter/ipt_ttl.c
@@ -21,7 +21,7 @@ MODULE_LICENSE("GPL");
static int match(const struct sk_buff *skb, const struct net_device *in,
const struct net_device *out, const void *matchinfo,
- int offset, int *hotdrop)
+ int offset, unsigned int protoff, int *hotdrop)
{
const struct ipt_ttl_info *info = matchinfo;
@@ -47,7 +47,7 @@ static int match(const struct sk_buff *skb, const struct net_device *in,
return 0;
}
-static int checkentry(const char *tablename, const struct ipt_ip *ip,
+static int checkentry(const char *tablename, const void *ip,
void *matchinfo, unsigned int matchsize,
unsigned int hook_mask)
{
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 260a4f0a2a9..212a3079085 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -78,7 +78,8 @@ static struct ipt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
- .me = THIS_MODULE
+ .me = THIS_MODULE,
+ .af = AF_INET,
};
/* The work comes in here from netfilter.c. */
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 160eb11b6e2..3212a5cc4b6 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -109,6 +109,7 @@ static struct ipt_table packet_mangler = {
.valid_hooks = MANGLE_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.me = THIS_MODULE,
+ .af = AF_INET,
};
/* The work comes in here from netfilter.c. */
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 47449ba83eb..fdb9e9c81e8 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -83,7 +83,8 @@ static struct ipt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
- .me = THIS_MODULE
+ .me = THIS_MODULE,
+ .af = AF_INET,
};
/* The work comes in here from netfilter.c. */
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 9bdbb779397..167619f638c 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -277,7 +277,7 @@ static struct nf_hook_ops ipv4_conntrack_local_in_ops = {
#ifdef CONFIG_SYSCTL
/* From nf_conntrack_proto_icmp.c */
-extern unsigned long nf_ct_icmp_timeout;
+extern unsigned int nf_ct_icmp_timeout;
static struct ctl_table_header *nf_ct_ipv4_sysctl_header;
static ctl_table nf_ct_sysctl_table[] = {
@@ -575,7 +575,7 @@ MODULE_LICENSE("GPL");
static int __init init(void)
{
- need_nf_conntrack();
+ need_conntrack();
return init_or_cleanup(1);
}
@@ -587,9 +587,4 @@ static void __exit fini(void)
module_init(init);
module_exit(fini);
-void need_ip_conntrack(void)
-{
-}
-
-EXPORT_SYMBOL(need_ip_conntrack);
EXPORT_SYMBOL(nf_ct_ipv4_gather_frags);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 4b0d7e4d626..165a4d81efa 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -255,6 +255,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
return NET_RX_DROP;
}
+ nf_reset(skb);
skb_push(skb, skb->data - skb->nh.raw);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0a461232329..a97ed5416c2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3347,7 +3347,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
int offset = start - TCP_SKB_CB(skb)->seq;
int size = TCP_SKB_CB(skb)->end_seq - start;
- if (offset < 0) BUG();
+ BUG_ON(offset < 0);
if (size > 0) {
size = min(copy, size);
if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index e9f83e5b28c..6ea353907af 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1080,6 +1080,7 @@ process:
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
+ nf_reset(skb);
if (sk_filter(sk, skb, 0))
goto discard_and_relse;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 223abaa72bc..00840474a44 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -989,6 +989,7 @@ static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
kfree_skb(skb);
return -1;
}
+ nf_reset(skb);
if (up->encap_type) {
/*
@@ -1149,6 +1150,7 @@ int udp_rcv(struct sk_buff *skb)
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
+ nf_reset(skb);
/* No socket. Drop packet silently, if checksum is wrong */
if (udp_checksum_complete(skb))
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 2d3849c38a0..850d919591d 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -11,6 +11,8 @@
#include <linux/module.h>
#include <linux/string.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
@@ -45,6 +47,23 @@ static int xfrm4_parse_spi(struct sk_buff *skb, u8 nexthdr, u32 *spi, u32 *seq)
return xfrm_parse_spi(skb, nexthdr, spi, seq);
}
+#ifdef CONFIG_NETFILTER
+static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
+{
+ struct iphdr *iph = skb->nh.iph;
+
+ if (skb->dst == NULL) {
+ if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
+ skb->dev))
+ goto drop;
+ }
+ return dst_input(skb);
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+#endif
+
int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
{
int err;
@@ -137,6 +156,8 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
memcpy(skb->sp->x+skb->sp->len, xfrm_vec, xfrm_nr*sizeof(struct sec_decap_state));
skb->sp->len += xfrm_nr;
+ nf_reset(skb);
+
if (decaps) {
if (!(skb->dev->flags&IFF_LOOPBACK)) {
dst_release(skb->dst);
@@ -145,7 +166,17 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type)
netif_rx(skb);
return 0;
} else {
+#ifdef CONFIG_NETFILTER
+ __skb_push(skb, skb->data - skb->nh.raw);
+ skb->nh.iph->tot_len = htons(skb->len);
+ ip_send_check(skb->nh.iph);
+
+ NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
+ xfrm4_rcv_encap_finish);
+ return 0;
+#else
return -skb->nh.iph->protocol;
+#endif
}
drop_unlock:
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 66620a95942..d4df0ddd424 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -8,8 +8,10 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/compiler.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
+#include <linux/netfilter_ipv4.h>
#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
@@ -95,7 +97,7 @@ out:
return ret;
}
-int xfrm4_output(struct sk_buff *skb)
+static int xfrm4_output_one(struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
struct xfrm_state *x = dst->xfrm;
@@ -113,27 +115,33 @@ int xfrm4_output(struct sk_buff *skb)
goto error_nolock;
}
- spin_lock_bh(&x->lock);
- err = xfrm_state_check(x, skb);
- if (err)
- goto error;
+ do {
+ spin_lock_bh(&x->lock);
+ err = xfrm_state_check(x, skb);
+ if (err)
+ goto error;
- xfrm4_encap(skb);
+ xfrm4_encap(skb);
- err = x->type->output(x, skb);
- if (err)
- goto error;
+ err = x->type->output(x, skb);
+ if (err)
+ goto error;
- x->curlft.bytes += skb->len;
- x->curlft.packets++;
+ x->curlft.bytes += skb->len;
+ x->curlft.packets++;
- spin_unlock_bh(&x->lock);
+ spin_unlock_bh(&x->lock);
- if (!(skb->dst = dst_pop(dst))) {
- err = -EHOSTUNREACH;
- goto error_nolock;
- }
- err = NET_XMIT_BYPASS;
+ if (!(skb->dst = dst_pop(dst))) {
+ err = -EHOSTUNREACH;
+ goto error_nolock;
+ }
+ dst = skb->dst;
+ x = dst->xfrm;
+ } while (x && !x->props.mode);
+
+ IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
+ err = 0;
out_exit:
return err;
@@ -143,3 +151,33 @@ error_nolock:
kfree_skb(skb);
goto out_exit;
}
+
+int xfrm4_output_finish(struct sk_buff *skb)
+{
+ int err;
+
+ while (likely((err = xfrm4_output_one(skb)) == 0)) {
+ nf_reset(skb);
+
+ err = nf_hook(PF_INET, NF_IP_LOCAL_OUT, &skb, NULL,
+ skb->dst->dev, dst_output);
+ if (unlikely(err != 1))
+ break;
+
+ if (!skb->dst->xfrm)
+ return dst_output(skb);
+
+ err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL,
+ skb->dst->dev, xfrm4_output_finish);
+ if (unlikely(err != 1))
+ break;
+ }
+
+ return err;
+}
+
+int xfrm4_output(struct sk_buff *skb)
+{
+ return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dst->dev,
+ xfrm4_output_finish);
+}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index d23e07fc81f..dbabf81a9b7 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -42,6 +42,21 @@ __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
x->props.saddr = tmpl->saddr;
if (x->props.saddr.a4 == 0)
x->props.saddr.a4 = saddr->a4;
+ if (tmpl->mode && x->props.saddr.a4 == 0) {
+ struct rtable *rt;
+ struct flowi fl_tunnel = {
+ .nl_u = {
+ .ip4_u = {
+ .daddr = x->id.daddr.a4,
+ }
+ }
+ };
+ if (!xfrm_dst_lookup((struct xfrm_dst **)&rt,
+ &fl_tunnel, AF_INET)) {
+ x->props.saddr.a4 = rt->rt_src;
+ dst_release(&rt->u.dst);
+ }
+ }
x->props.mode = tmpl->mode;
x->props.reqid = tmpl->reqid;
x->props.family = AF_INET;
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 9601fd7f9d6..41877abd22e 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -8,11 +8,11 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o sit.o \
route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \
protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
- ip6_flowlabel.o ipv6_syms.o netfilter.o \
- inet6_connection_sock.o
+ ip6_flowlabel.o ipv6_syms.o inet6_connection_sock.o
ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
xfrm6_output.o
+ipv6-$(CONFIG_NETFILTER) += netfilter.o
ipv6-objs += $(ipv6-y)
obj-$(CONFIG_INET6_AH) += ah6.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 704fb73e6c5..dfb4f145a13 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -58,6 +58,7 @@
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
+#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/string.h>
@@ -1228,7 +1229,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
/* Gets referenced address, destroys ifaddr */
-void addrconf_dad_stop(struct inet6_ifaddr *ifp)
+static void addrconf_dad_stop(struct inet6_ifaddr *ifp)
{
if (ifp->flags&IFA_F_PERMANENT) {
spin_lock_bh(&ifp->lock);
@@ -2643,7 +2644,7 @@ static int if6_seq_show(struct seq_file *seq, void *v)
{
struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
seq_printf(seq,
- "%04x%04x%04x%04x%04x%04x%04x%04x %02x %02x %02x %02x %8s\n",
+ NIP6_FMT " %02x %02x %02x %02x %8s\n",
NIP6(ifp->addr),
ifp->idev->dev->ifindex,
ifp->prefix_len,
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 68afc53be66..064ffab82a9 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -689,11 +690,11 @@ snmp6_mib_init(void *ptr[2], size_t mibsize, size_t mibalign)
if (ptr == NULL)
return -EINVAL;
- ptr[0] = __alloc_percpu(mibsize, mibalign);
+ ptr[0] = __alloc_percpu(mibsize);
if (!ptr[0])
goto err0;
- ptr[1] = __alloc_percpu(mibsize, mibalign);
+ ptr[1] = __alloc_percpu(mibsize);
if (!ptr[1])
goto err1;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 13cc7f89558..c7932cb420a 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -332,8 +332,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!x)
return;
- NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/"
- "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/" NIP6_FMT "\n",
ntohl(ah->spi), NIP6(iph->daddr));
xfrm_state_put(x);
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 6b729404723..72bd08af2df 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -13,6 +13,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/errno.h>
@@ -531,9 +532,7 @@ static int ac6_seq_show(struct seq_file *seq, void *v)
struct ac6_iter_state *state = ac6_seq_private(seq);
seq_printf(seq,
- "%-4d %-15s "
- "%04x%04x%04x%04x%04x%04x%04x%04x "
- "%5d\n",
+ "%-4d %-15s " NIP6_FMT " %5d\n",
state->dev->ifindex, state->dev->name,
NIP6(im->aca_addr),
im->aca_users);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index c4a3a993acb..99a6eb23378 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -13,6 +13,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 6de8ee1a5ad..7b5b94f1390 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -266,8 +266,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6);
if (!x)
return;
- printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/"
- "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/" NIP6_FMT "\n",
ntohl(esph->spi), NIP6(iph->daddr));
xfrm_state_put(x);
}
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 113374dc342..2a1e7e45b89 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -152,7 +152,7 @@ static struct tlvtype_proc tlvprocdestopt_lst[] = {
{-1, NULL}
};
-static int ipv6_destopt_rcv(struct sk_buff **skbp, unsigned int *nhoffp)
+static int ipv6_destopt_rcv(struct sk_buff **skbp)
{
struct sk_buff *skb = *skbp;
struct inet6_skb_parm *opt = IP6CB(skb);
@@ -169,7 +169,7 @@ static int ipv6_destopt_rcv(struct sk_buff **skbp, unsigned int *nhoffp)
if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
skb->h.raw += ((skb->h.raw[1]+1)<<3);
- *nhoffp = opt->dst1;
+ opt->nhoff = opt->dst1;
return 1;
}
@@ -192,7 +192,7 @@ void __init ipv6_destopt_init(void)
NONE header. No data in packet.
********************************/
-static int ipv6_nodata_rcv(struct sk_buff **skbp, unsigned int *nhoffp)
+static int ipv6_nodata_rcv(struct sk_buff **skbp)
{
struct sk_buff *skb = *skbp;
@@ -215,7 +215,7 @@ void __init ipv6_nodata_init(void)
Routing header.
********************************/
-static int ipv6_rthdr_rcv(struct sk_buff **skbp, unsigned int *nhoffp)
+static int ipv6_rthdr_rcv(struct sk_buff **skbp)
{
struct sk_buff *skb = *skbp;
struct inet6_skb_parm *opt = IP6CB(skb);
@@ -249,7 +249,7 @@ looped_back:
skb->h.raw += (hdr->hdrlen + 1) << 3;
opt->dst0 = opt->dst1;
opt->dst1 = 0;
- *nhoffp = (&hdr->nexthdr) - skb->nh.raw;
+ opt->nhoff = (&hdr->nexthdr) - skb->nh.raw;
return 1;
}
@@ -487,9 +487,14 @@ static struct tlvtype_proc tlvprochopopt_lst[] = {
int ipv6_parse_hopopts(struct sk_buff *skb, int nhoff)
{
- IP6CB(skb)->hop = sizeof(struct ipv6hdr);
- if (ip6_parse_tlv(tlvprochopopt_lst, skb))
+ struct inet6_skb_parm *opt = IP6CB(skb);
+
+ opt->hop = sizeof(struct ipv6hdr);
+ if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
+ skb->h.raw += (skb->h.raw[1]+1)<<3;
+ opt->nhoff = sizeof(struct ipv6hdr);
return sizeof(struct ipv6hdr);
+ }
return -1;
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 6ec6a2b549b..fcf883183ce 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -79,7 +79,7 @@ DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
static DEFINE_PER_CPU(struct socket *, __icmpv6_socket) = NULL;
#define icmpv6_socket __get_cpu_var(__icmpv6_socket)
-static int icmpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp);
+static int icmpv6_rcv(struct sk_buff **pskb);
static struct inet6_protocol icmpv6_protocol = {
.handler = icmpv6_rcv,
@@ -581,7 +581,7 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, u32 info)
* Handle icmp messages
*/
-static int icmpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
+static int icmpv6_rcv(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct net_device *dev = skb->dev;
@@ -607,7 +607,7 @@ static int icmpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
skb->csum = ~csum_ipv6_magic(saddr, daddr, skb->len,
IPPROTO_ICMPV6, 0);
if (__skb_checksum_complete(skb)) {
- LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x > %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x]\n",
+ LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
NIP6(*saddr), NIP6(*daddr));
goto discard_it;
}
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 792f90f0f9e..f8f3a37a149 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -25,6 +25,7 @@
#include <net/inet_hashtables.h>
#include <net/ip6_route.h>
#include <net/sock.h>
+#include <net/inet6_connection_sock.h>
int inet6_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 89d12b4817a..4183c8dac7f 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -9,6 +9,7 @@
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -628,9 +629,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
{
while(fl) {
seq_printf(seq,
- "%05X %-1d %-6d %-6d %-6ld %-8ld "
- "%02x%02x%02x%02x%02x%02x%02x%02x "
- "%-4d\n",
+ "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_FMT " %-4d\n",
(unsigned)ntohl(fl->label),
fl->share,
(unsigned)fl->owner,
@@ -646,8 +645,8 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
static int ip6fl_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Label S Owner Users Linger Expires "
- "Dst Opt\n");
+ seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-39s %s\n",
+ "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
else
ip6fl_fl_seq_show(seq, v);
return 0;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index a6026d2787d..29f73592e68 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -48,7 +48,7 @@
-static inline int ip6_rcv_finish( struct sk_buff *skb)
+inline int ip6_rcv_finish( struct sk_buff *skb)
{
if (skb->dst == NULL)
ip6_route_input(skb);
@@ -97,6 +97,9 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if (hdr->version != 6)
goto err;
+ skb->h.raw = (u8 *)(hdr + 1);
+ IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+
pkt_len = ntohs(hdr->payload_len);
/* pkt_len may be zero if Jumbo payload option is present */
@@ -111,8 +114,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
}
if (hdr->nexthdr == NEXTHDR_HOP) {
- skb->h.raw = (u8*)(hdr+1);
- if (ipv6_parse_hopopts(skb, offsetof(struct ipv6hdr, nexthdr)) < 0) {
+ if (ipv6_parse_hopopts(skb, IP6CB(skb)->nhoff) < 0) {
IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
return 0;
}
@@ -143,26 +145,15 @@ static inline int ip6_input_finish(struct sk_buff *skb)
int nexthdr;
u8 hash;
- skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr);
-
/*
* Parse extension headers
*/
- nexthdr = skb->nh.ipv6h->nexthdr;
- nhoff = offsetof(struct ipv6hdr, nexthdr);
-
- /* Skip hop-by-hop options, they are already parsed. */
- if (nexthdr == NEXTHDR_HOP) {
- nhoff = sizeof(struct ipv6hdr);
- nexthdr = skb->h.raw[0];
- skb->h.raw += (skb->h.raw[1]+1)<<3;
- }
-
rcu_read_lock();
resubmit:
if (!pskb_pull(skb, skb->h.raw - skb->data))
goto discard;
+ nhoff = IP6CB(skb)->nhoff;
nexthdr = skb->nh.raw[nhoff];
raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]);
@@ -194,7 +185,7 @@ resubmit:
!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
- ret = ipprot->handler(&skb, &nhoff);
+ ret = ipprot->handler(&skb);
if (ret > 0)
goto resubmit;
else if (ret == 0)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index b4c4beba0ed..efa3e72cfcf 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -226,6 +226,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
ipv6_addr_copy(&hdr->daddr, first_hop);
+ skb->priority = sk->sk_priority;
+
mtu = dst_mtu(dst);
if ((skb->len <= mtu) || ipfragok) {
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
@@ -1182,6 +1184,8 @@ int ip6_push_pending_frames(struct sock *sk)
ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
ipv6_addr_copy(&hdr->daddr, final_dst);
+ skb->priority = sk->sk_priority;
+
skb->dst = dst_clone(&rt->u.dst);
IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index e315d0f80af..92ead3cf956 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -21,6 +21,7 @@
#include <linux/config.h>
#include <linux/module.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sockios.h>
@@ -243,7 +244,7 @@ ip6_tnl_create(struct ip6_tnl_parm *p, struct ip6_tnl **pt)
if (dev == NULL)
return -ENOMEM;
- t = dev->priv;
+ t = netdev_priv(dev);
dev->init = ip6ip6_tnl_dev_init;
t->parms = *p;
@@ -308,7 +309,7 @@ ip6ip6_tnl_locate(struct ip6_tnl_parm *p, struct ip6_tnl **pt, int create)
static void
ip6ip6_tnl_dev_uninit(struct net_device *dev)
{
- struct ip6_tnl *t = dev->priv;
+ struct ip6_tnl *t = netdev_priv(dev);
if (dev == ip6ip6_fb_tnl_dev) {
write_lock_bh(&ip6ip6_lock);
@@ -510,7 +511,7 @@ static inline void ip6ip6_ecn_decapsulate(struct ipv6hdr *outer_iph,
**/
static int
-ip6ip6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
+ip6ip6_rcv(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct ipv6hdr *ipv6h;
@@ -623,7 +624,7 @@ ip6ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr)
static int
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct ip6_tnl *t = (struct ip6_tnl *) dev->priv;
+ struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->stat;
struct ipv6hdr *ipv6h = skb->nh.ipv6h;
struct ipv6_txoptions *opt = NULL;
@@ -933,11 +934,11 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
}
if ((err = ip6ip6_tnl_locate(&p, &t, 0)) == -ENODEV)
- t = (struct ip6_tnl *) dev->priv;
+ t = netdev_priv(dev);
else if (err)
break;
} else
- t = (struct ip6_tnl *) dev->priv;
+ t = netdev_priv(dev);
memcpy(&p, &t->parms, sizeof (p));
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
@@ -955,7 +956,7 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
}
if (!create && dev != ip6ip6_fb_tnl_dev) {
- t = (struct ip6_tnl *) dev->priv;
+ t = netdev_priv(dev);
}
if (!t && (err = ip6ip6_tnl_locate(&p, &t, create))) {
break;
@@ -991,12 +992,12 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
err = ip6ip6_tnl_locate(&p, &t, 0);
if (err)
break;
- if (t == ip6ip6_fb_tnl_dev->priv) {
+ if (t == netdev_priv(ip6ip6_fb_tnl_dev)) {
err = -EPERM;
break;
}
} else {
- t = (struct ip6_tnl *) dev->priv;
+ t = netdev_priv(dev);
}
err = unregister_netdevice(t->dev);
break;
@@ -1016,7 +1017,7 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static struct net_device_stats *
ip6ip6_tnl_get_stats(struct net_device *dev)
{
- return &(((struct ip6_tnl *) dev->priv)->stat);
+ return &(((struct ip6_tnl *)netdev_priv(dev))->stat);
}
/**
@@ -1073,7 +1074,7 @@ static void ip6ip6_tnl_dev_setup(struct net_device *dev)
static inline void
ip6ip6_tnl_dev_init_gen(struct net_device *dev)
{
- struct ip6_tnl *t = (struct ip6_tnl *) dev->priv;
+ struct ip6_tnl *t = netdev_priv(dev);
t->fl.proto = IPPROTO_IPV6;
t->dev = dev;
strcpy(t->parms.name, dev->name);
@@ -1087,7 +1088,7 @@ ip6ip6_tnl_dev_init_gen(struct net_device *dev)
static int
ip6ip6_tnl_dev_init(struct net_device *dev)
{
- struct ip6_tnl *t = (struct ip6_tnl *) dev->priv;
+ struct ip6_tnl *t = netdev_priv(dev);
ip6ip6_tnl_dev_init_gen(dev);
ip6ip6_tnl_link_config(t);
return 0;
@@ -1103,7 +1104,7 @@ ip6ip6_tnl_dev_init(struct net_device *dev)
static int
ip6ip6_fb_tnl_dev_init(struct net_device *dev)
{
- struct ip6_tnl *t = dev->priv;
+ struct ip6_tnl *t = netdev_priv(dev);
ip6ip6_tnl_dev_init_gen(dev);
dev_hold(dev);
tnls_wc[0] = t;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 626dd39685f..d511a884dad 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -212,8 +212,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!x)
return;
- printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/"
- "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/" NIP6_FMT "\n",
spi, NIP6(iph->daddr));
xfrm_state_put(x);
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index c63868dd2ca..f7142ba519a 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -26,6 +26,7 @@
*/
#include <linux/module.h>
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -549,7 +550,7 @@ done:
retv = -ENOBUFS;
break;
}
- gsf = (struct group_filter *)kmalloc(optlen,GFP_KERNEL);
+ gsf = kmalloc(optlen,GFP_KERNEL);
if (gsf == 0) {
retv = -ENOBUFS;
break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 1cf305a9f8d..0e03eabfb9d 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -449,8 +449,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
if (psl)
count += psl->sl_max;
- newpsl = (struct ip6_sf_socklist *)sock_kmalloc(sk,
- IP6_SFLSIZE(count), GFP_ATOMIC);
+ newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
if (!newpsl) {
err = -ENOBUFS;
goto done;
@@ -535,8 +534,8 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
goto done;
}
if (gsf->gf_numsrc) {
- newpsl = (struct ip6_sf_socklist *)sock_kmalloc(sk,
- IP6_SFLSIZE(gsf->gf_numsrc), GFP_ATOMIC);
+ newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
+ GFP_ATOMIC);
if (!newpsl) {
err = -ENOBUFS;
goto done;
@@ -768,7 +767,7 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
* for deleted items allows change reports to use common code with
* non-deleted or query-response MCA's.
*/
- pmc = (struct ifmcaddr6 *)kmalloc(sizeof(*pmc), GFP_ATOMIC);
+ pmc = kmalloc(sizeof(*pmc), GFP_ATOMIC);
if (!pmc)
return;
memset(pmc, 0, sizeof(*pmc));
@@ -1937,7 +1936,7 @@ static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
psf_prev = psf;
}
if (!psf) {
- psf = (struct ip6_sf_list *)kmalloc(sizeof(*psf), GFP_ATOMIC);
+ psf = kmalloc(sizeof(*psf), GFP_ATOMIC);
if (!psf)
return -ENOBUFS;
memset(psf, 0, sizeof(*psf));
@@ -2374,7 +2373,7 @@ static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
seq_printf(seq,
- "%-4d %-15s %04x%04x%04x%04x%04x%04x%04x%04x %5d %08X %ld\n",
+ "%-4d %-15s " NIP6_FMT " %5d %08X %ld\n",
state->dev->ifindex, state->dev->name,
NIP6(im->mca_addr),
im->mca_users, im->mca_flags,
@@ -2543,15 +2542,12 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN) {
seq_printf(seq,
"%3s %6s "
- "%32s %32s %6s %6s\n", "Idx",
+ "%39s %39s %6s %6s\n", "Idx",
"Device", "Multicast Address",
"Source Address", "INC", "EXC");
} else {
seq_printf(seq,
- "%3d %6.6s "
- "%04x%04x%04x%04x%04x%04x%04x%04x "
- "%04x%04x%04x%04x%04x%04x%04x%04x "
- "%6lu %6lu\n",
+ "%3d %6.6s " NIP6_FMT " " NIP6_FMT " %6lu %6lu\n",
state->dev->ifindex, state->dev->name,
NIP6(state->im->mca_addr),
NIP6(psf->sf_addr),
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 305d9ee6d7d..cb8856b1d95 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -692,7 +692,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
if (!(neigh->nud_state & NUD_VALID)) {
ND_PRINTK1(KERN_DEBUG
"%s(): trying to ucast probe in NUD_INVALID: "
- "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ NIP6_FMT "\n",
__FUNCTION__,
NIP6(*target));
}
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index f8626ebf90f..d750cfc019d 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -1,15 +1,12 @@
-#include <linux/config.h>
-#include <linux/init.h>
-
-#ifdef CONFIG_NETFILTER
-
#include <linux/kernel.h>
+#include <linux/init.h>
#include <linux/ipv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <net/dst.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
+#include <net/xfrm.h>
int ip6_route_me_harder(struct sk_buff *skb)
{
@@ -21,11 +18,17 @@ int ip6_route_me_harder(struct sk_buff *skb)
{ .ip6_u =
{ .daddr = iph->daddr,
.saddr = iph->saddr, } },
- .proto = iph->nexthdr,
};
dst = ip6_route_output(skb->sk, &fl);
+#ifdef CONFIG_XFRM
+ if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+ xfrm_decode_session(skb, &fl, AF_INET6) == 0)
+ if (xfrm_lookup(&skb->dst, &fl, skb->sk, 0))
+ return -1;
+#endif
+
if (dst->error) {
IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n");
@@ -87,18 +90,10 @@ int __init ipv6_netfilter_init(void)
return nf_register_queue_rerouter(PF_INET6, &ip6_reroute);
}
+/* This can be called from inet6_init() on errors, so it cannot
+ * be marked __exit. -DaveM
+ */
void ipv6_netfilter_fini(void)
{
nf_unregister_queue_rerouter(PF_INET6);
}
-
-#else /* CONFIG_NETFILTER */
-int __init ipv6_netfilter_init(void)
-{
- return 0;
-}
-
-void ipv6_netfilter_fini(void)
-{
-}
-#endif /* CONFIG_NETFILTER */
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 04912f9b35c..2d6f8ecbc27 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -41,6 +41,7 @@ config IP6_NF_QUEUE
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering/masq/NAT)"
+ depends on NETFILTER_XTABLES
help
ip6tables is a general, extensible packet identification framework.
Currently only the packet filtering and packet mangling subsystem
@@ -50,25 +51,6 @@ config IP6_NF_IPTABLES
To compile it as a module, choose M here. If unsure, say N.
# The simple matches.
-config IP6_NF_MATCH_LIMIT
- tristate "limit match support"
- depends on IP6_NF_IPTABLES
- help
- limit matching allows you to control the rate at which a rule can be
- matched: mainly useful in combination with the LOG target ("LOG
- target support", below) and to avoid some Denial of Service attacks.
-
- To compile it as a module, choose M here. If unsure, say N.
-
-config IP6_NF_MATCH_MAC
- tristate "MAC address match support"
- depends on IP6_NF_IPTABLES
- help
- mac matching allows you to match packets based on the source
- Ethernet address of the packet.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP6_NF_MATCH_RT
tristate "Routing header match support"
depends on IP6_NF_IPTABLES
@@ -124,16 +106,6 @@ config IP6_NF_MATCH_OWNER
To compile it as a module, choose M here. If unsure, say N.
-config IP6_NF_MATCH_MARK
- tristate "netfilter MARK match support"
- depends on IP6_NF_IPTABLES
- help
- Netfilter mark matching allows you to match packets based on the
- `nfmark' value in the packet. This can be set by the MARK target
- (see below).
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP6_NF_MATCH_IPV6HEADER
tristate "IPv6 Extension Headers Match"
depends on IP6_NF_IPTABLES
@@ -151,15 +123,6 @@ config IP6_NF_MATCH_AHESP
To compile it as a module, choose M here. If unsure, say N.
-config IP6_NF_MATCH_LENGTH
- tristate "Packet Length match support"
- depends on IP6_NF_IPTABLES
- help
- This option allows you to match the length of a packet against a
- specific value or range of values.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP6_NF_MATCH_EUI64
tristate "EUI64 address check"
depends on IP6_NF_IPTABLES
@@ -170,12 +133,13 @@ config IP6_NF_MATCH_EUI64
To compile it as a module, choose M here. If unsure, say N.
-config IP6_NF_MATCH_PHYSDEV
- tristate "Physdev match support"
- depends on IP6_NF_IPTABLES && BRIDGE_NETFILTER
+config IP6_NF_MATCH_POLICY
+ tristate "IPsec policy match support"
+ depends on IP6_NF_IPTABLES && XFRM
help
- Physdev packet matching matches against the physical bridge ports
- the IP packet arrived on or will leave by.
+ Policy matching allows you to match packets based on the
+ IPsec policy that was used during decapsulation/will
+ be used during encapsulation.
To compile it as a module, choose M here. If unsure, say N.
@@ -209,17 +173,6 @@ config IP6_NF_TARGET_REJECT
To compile it as a module, choose M here. If unsure, say N.
-config IP6_NF_TARGET_NFQUEUE
- tristate "NFQUEUE Target Support"
- depends on IP6_NF_IPTABLES
- help
- This Target replaced the old obsolete QUEUE target.
-
- As opposed to QUEUE, it supports 65535 different queues,
- not just one.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP6_NF_MANGLE
tristate "Packet mangling"
depends on IP6_NF_IPTABLES
@@ -230,19 +183,6 @@ config IP6_NF_MANGLE
To compile it as a module, choose M here. If unsure, say N.
-config IP6_NF_TARGET_MARK
- tristate "MARK target support"
- depends on IP6_NF_MANGLE
- help
- This option adds a `MARK' target, which allows you to create rules
- in the `mangle' table which alter the netfilter mark (nfmark) field
- associated with the packet packet prior to routing. This can change
- the routing method (see `Use netfilter MARK value as routing
- key') and can also be used by other subsystems to change their
- behavior.
-
- To compile it as a module, choose M here. If unsure, say N.
-
config IP6_NF_TARGET_HL
tristate 'HL (hoplimit) target support'
depends on IP6_NF_MANGLE
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 9ab5b2ca1f5..663b4749820 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -4,24 +4,19 @@
# Link order matters here.
obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
-obj-$(CONFIG_IP6_NF_MATCH_LIMIT) += ip6t_limit.o
-obj-$(CONFIG_IP6_NF_MATCH_MARK) += ip6t_mark.o
obj-$(CONFIG_IP6_NF_MATCH_LENGTH) += ip6t_length.o
-obj-$(CONFIG_IP6_NF_MATCH_MAC) += ip6t_mac.o
obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o
obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o
obj-$(CONFIG_IP6_NF_MATCH_AHESP) += ip6t_esp.o ip6t_ah.o
+obj-$(CONFIG_IP6_NF_MATCH_POLICY) += ip6t_policy.o
obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
obj-$(CONFIG_IP6_NF_MATCH_MULTIPORT) += ip6t_multiport.o
obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
-obj-$(CONFIG_IP6_NF_MATCH_PHYSDEV) += ip6t_physdev.o
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
-obj-$(CONFIG_IP6_NF_TARGET_MARK) += ip6t_MARK.o
obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
-obj-$(CONFIG_IP6_NF_TARGET_NFQUEUE) += ip6t_NFQUEUE.o
obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 925b42d4834..847068fd336 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -13,7 +13,12 @@
* a table
* 06 Jun 2002 Andras Kis-Szabo <kisza@sch.bme.hu>
* - new extension header parser code
+ * 15 Oct 2005 Harald Welte <laforge@netfilter.org>
+ * - Unification of {ip,ip6}_tables into x_tables
+ * - Removed tcp and udp code, since it's not ipv6 specific
*/
+
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/in.h>
#include <linux/skbuff.h>
@@ -21,8 +26,6 @@
#include <linux/vmalloc.h>
#include <linux/netdevice.h>
#include <linux/module.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
#include <linux/icmpv6.h>
#include <net/ipv6.h>
#include <asm/uaccess.h>
@@ -31,6 +34,7 @@
#include <linux/cpumask.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -65,13 +69,8 @@ do { \
#else
#define IP_NF_ASSERT(x)
#endif
-#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
-static DECLARE_MUTEX(ip6t_mutex);
-/* Must have mutex */
-#define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ip6t_mutex) != 0)
-#define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ip6t_mutex) != 0)
#include <linux/netfilter_ipv4/listhelp.h>
#if 0
@@ -89,30 +88,6 @@ static DECLARE_MUTEX(ip6t_mutex);
Hence the start of any table is given by get_table() below. */
-/* The table itself */
-struct ip6t_table_info
-{
- /* Size per table */
- unsigned int size;
- /* Number of entries: FIXME. --RR */
- unsigned int number;
- /* Initial number of entries. Needed for module usage count */
- unsigned int initial_entries;
-
- /* Entry points and underflows */
- unsigned int hook_entry[NF_IP6_NUMHOOKS];
- unsigned int underflow[NF_IP6_NUMHOOKS];
-
- /* ip6t_entry tables: one per CPU */
- void *entries[NR_CPUS];
-};
-
-static LIST_HEAD(ip6t_target);
-static LIST_HEAD(ip6t_match);
-static LIST_HEAD(ip6t_tables);
-#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
-#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
-
#if 0
#define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
#define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
@@ -295,7 +270,7 @@ ip6t_do_table(struct sk_buff **pskb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
- struct ip6t_table *table,
+ struct xt_table *table,
void *userdata)
{
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -307,6 +282,7 @@ ip6t_do_table(struct sk_buff **pskb,
const char *indev, *outdev;
void *table_base;
struct ip6t_entry *e, *back;
+ struct xt_table_info *private;
/* Initialization */
indev = in ? in->name : nulldevname;
@@ -319,9 +295,10 @@ ip6t_do_table(struct sk_buff **pskb,
* match it. */
read_lock_bh(&table->lock);
+ private = table->private;
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
- table_base = (void *)table->private->entries[smp_processor_id()];
- e = get_entry(table_base, table->private->hook_entry[hook]);
+ table_base = (void *)private->entries[smp_processor_id()];
+ e = get_entry(table_base, private->hook_entry[hook]);
#ifdef CONFIG_NETFILTER_DEBUG
/* Check noone else using our table */
@@ -337,7 +314,7 @@ ip6t_do_table(struct sk_buff **pskb,
#endif
/* For return from builtin chain */
- back = get_entry(table_base, table->private->underflow[hook]);
+ back = get_entry(table_base, private->underflow[hook]);
do {
IP_NF_ASSERT(e);
@@ -437,145 +414,6 @@ ip6t_do_table(struct sk_buff **pskb,
#endif
}
-/*
- * These are weird, but module loading must not be done with mutex
- * held (since they will register), and we have to have a single
- * function to use try_then_request_module().
- */
-
-/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
-static inline struct ip6t_table *find_table_lock(const char *name)
-{
- struct ip6t_table *t;
-
- if (down_interruptible(&ip6t_mutex) != 0)
- return ERR_PTR(-EINTR);
-
- list_for_each_entry(t, &ip6t_tables, list)
- if (strcmp(t->name, name) == 0 && try_module_get(t->me))
- return t;
- up(&ip6t_mutex);
- return NULL;
-}
-
-/* Find match, grabs ref. Returns ERR_PTR() on error. */
-static inline struct ip6t_match *find_match(const char *name, u8 revision)
-{
- struct ip6t_match *m;
- int err = 0;
-
- if (down_interruptible(&ip6t_mutex) != 0)
- return ERR_PTR(-EINTR);
-
- list_for_each_entry(m, &ip6t_match, list) {
- if (strcmp(m->name, name) == 0) {
- if (m->revision == revision) {
- if (try_module_get(m->me)) {
- up(&ip6t_mutex);
- return m;
- }
- } else
- err = -EPROTOTYPE; /* Found something. */
- }
- }
- up(&ip6t_mutex);
- return ERR_PTR(err);
-}
-
-/* Find target, grabs ref. Returns ERR_PTR() on error. */
-static inline struct ip6t_target *find_target(const char *name, u8 revision)
-{
- struct ip6t_target *t;
- int err = 0;
-
- if (down_interruptible(&ip6t_mutex) != 0)
- return ERR_PTR(-EINTR);
-
- list_for_each_entry(t, &ip6t_target, list) {
- if (strcmp(t->name, name) == 0) {
- if (t->revision == revision) {
- if (try_module_get(t->me)) {
- up(&ip6t_mutex);
- return t;
- }
- } else
- err = -EPROTOTYPE; /* Found something. */
- }
- }
- up(&ip6t_mutex);
- return ERR_PTR(err);
-}
-
-struct ip6t_target *ip6t_find_target(const char *name, u8 revision)
-{
- struct ip6t_target *target;
-
- target = try_then_request_module(find_target(name, revision),
- "ip6t_%s", name);
- if (IS_ERR(target) || !target)
- return NULL;
- return target;
-}
-
-static int match_revfn(const char *name, u8 revision, int *bestp)
-{
- struct ip6t_match *m;
- int have_rev = 0;
-
- list_for_each_entry(m, &ip6t_match, list) {
- if (strcmp(m->name, name) == 0) {
- if (m->revision > *bestp)
- *bestp = m->revision;
- if (m->revision == revision)
- have_rev = 1;
- }
- }
- return have_rev;
-}
-
-static int target_revfn(const char *name, u8 revision, int *bestp)
-{
- struct ip6t_target *t;
- int have_rev = 0;
-
- list_for_each_entry(t, &ip6t_target, list) {
- if (strcmp(t->name, name) == 0) {
- if (t->revision > *bestp)
- *bestp = t->revision;
- if (t->revision == revision)
- have_rev = 1;
- }
- }
- return have_rev;
-}
-
-/* Returns true or fals (if no such extension at all) */
-static inline int find_revision(const char *name, u8 revision,
- int (*revfn)(const char *, u8, int *),
- int *err)
-{
- int have_rev, best = -1;
-
- if (down_interruptible(&ip6t_mutex) != 0) {
- *err = -EINTR;
- return 1;
- }
- have_rev = revfn(name, revision, &best);
- up(&ip6t_mutex);
-
- /* Nothing at all? Return 0 to try loading module. */
- if (best == -1) {
- *err = -ENOENT;
- return 0;
- }
-
- *err = best;
- if (!have_rev)
- *err = -EPROTONOSUPPORT;
- return 1;
-}
-
-
/* All zeroes == unconditional rule. */
static inline int
unconditional(const struct ip6t_ip6 *ipv6)
@@ -592,7 +430,7 @@ unconditional(const struct ip6t_ip6 *ipv6)
/* Figures out from what hook each rule can be called: returns 0 if
there are loops. Puts hook bitmask in comefrom. */
static int
-mark_source_chains(struct ip6t_table_info *newinfo,
+mark_source_chains(struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
unsigned int hook;
@@ -738,11 +576,11 @@ check_match(struct ip6t_entry_match *m,
{
struct ip6t_match *match;
- match = try_then_request_module(find_match(m->u.user.name,
- m->u.user.revision),
+ match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
+ m->u.user.revision),
"ip6t_%s", m->u.user.name);
if (IS_ERR(match) || !match) {
- duprintf("check_match: `%s' not found\n", m->u.user.name);
+ duprintf("check_match: `%s' not found\n", m->u.user.name);
return match ? PTR_ERR(match) : -ENOENT;
}
m->u.kernel.match = match;
@@ -783,8 +621,9 @@ check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
goto cleanup_matches;
t = ip6t_get_target(e);
- target = try_then_request_module(find_target(t->u.user.name,
- t->u.user.revision),
+ target = try_then_request_module(xt_find_target(AF_INET6,
+ t->u.user.name,
+ t->u.user.revision),
"ip6t_%s", t->u.user.name);
if (IS_ERR(target) || !target) {
duprintf("check_entry: `%s' not found\n", t->u.user.name);
@@ -820,7 +659,7 @@ check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
static inline int
check_entry_size_and_hooks(struct ip6t_entry *e,
- struct ip6t_table_info *newinfo,
+ struct xt_table_info *newinfo,
unsigned char *base,
unsigned char *limit,
const unsigned int *hook_entries,
@@ -854,7 +693,7 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
< 0 (not IP6T_RETURN). --RR */
/* Clear counters and comefrom */
- e->counters = ((struct ip6t_counters) { 0, 0 });
+ e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
(*i)++;
@@ -884,7 +723,7 @@ cleanup_entry(struct ip6t_entry *e, unsigned int *i)
static int
translate_table(const char *name,
unsigned int valid_hooks,
- struct ip6t_table_info *newinfo,
+ struct xt_table_info *newinfo,
void *entry0,
unsigned int size,
unsigned int number,
@@ -961,48 +800,10 @@ translate_table(const char *name,
return ret;
}
-static struct ip6t_table_info *
-replace_table(struct ip6t_table *table,
- unsigned int num_counters,
- struct ip6t_table_info *newinfo,
- int *error)
-{
- struct ip6t_table_info *oldinfo;
-
-#ifdef CONFIG_NETFILTER_DEBUG
- {
- int cpu;
-
- for_each_cpu(cpu) {
- struct ip6t_entry *table_base = newinfo->entries[cpu];
- if (table_base)
- table_base->comefrom = 0xdead57ac;
- }
- }
-#endif
-
- /* Do the substitution. */
- write_lock_bh(&table->lock);
- /* Check inside lock: is the old number correct? */
- if (num_counters != table->private->number) {
- duprintf("num_counters != table->private->number (%u/%u)\n",
- num_counters, table->private->number);
- write_unlock_bh(&table->lock);
- *error = -EAGAIN;
- return NULL;
- }
- oldinfo = table->private;
- table->private = newinfo;
- newinfo->initial_entries = oldinfo->initial_entries;
- write_unlock_bh(&table->lock);
-
- return oldinfo;
-}
-
/* Gets counters. */
static inline int
add_entry_to_counter(const struct ip6t_entry *e,
- struct ip6t_counters total[],
+ struct xt_counters total[],
unsigned int *i)
{
ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
@@ -1023,8 +824,8 @@ set_entry_to_counter(const struct ip6t_entry *e,
}
static void
-get_counters(const struct ip6t_table_info *t,
- struct ip6t_counters counters[])
+get_counters(const struct xt_table_info *t,
+ struct xt_counters counters[])
{
unsigned int cpu;
unsigned int i;
@@ -1058,19 +859,20 @@ get_counters(const struct ip6t_table_info *t,
static int
copy_entries_to_user(unsigned int total_size,
- struct ip6t_table *table,
+ struct xt_table *table,
void __user *userptr)
{
unsigned int off, num, countersize;
struct ip6t_entry *e;
- struct ip6t_counters *counters;
+ struct xt_counters *counters;
+ struct xt_table_info *private = table->private;
int ret = 0;
void *loc_cpu_entry;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
about). */
- countersize = sizeof(struct ip6t_counters) * table->private->number;
+ countersize = sizeof(struct xt_counters) * private->number;
counters = vmalloc(countersize);
if (counters == NULL)
@@ -1078,11 +880,11 @@ copy_entries_to_user(unsigned int total_size,
/* First, sum counters... */
write_lock_bh(&table->lock);
- get_counters(table->private, counters);
+ get_counters(private, counters);
write_unlock_bh(&table->lock);
/* choose the copy that is on ourc node/cpu */
- loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
+ loc_cpu_entry = private->entries[raw_smp_processor_id()];
if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
ret = -EFAULT;
goto free_counters;
@@ -1141,87 +943,42 @@ get_entries(const struct ip6t_get_entries *entries,
struct ip6t_get_entries __user *uptr)
{
int ret;
- struct ip6t_table *t;
+ struct xt_table *t;
- t = find_table_lock(entries->name);
+ t = xt_find_table_lock(AF_INET6, entries->name);
if (t && !IS_ERR(t)) {
- duprintf("t->private->number = %u\n",
- t->private->number);
- if (entries->size == t->private->size)
- ret = copy_entries_to_user(t->private->size,
+ struct xt_table_info *private = t->private;
+ duprintf("t->private->number = %u\n", private->number);
+ if (entries->size == private->size)
+ ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
else {
duprintf("get_entries: I've got %u not %u!\n",
- t->private->size,
- entries->size);
+ private->size, entries->size);
ret = -EINVAL;
}
module_put(t->me);
- up(&ip6t_mutex);
+ xt_table_unlock(t);
} else
ret = t ? PTR_ERR(t) : -ENOENT;
return ret;
}
-static void free_table_info(struct ip6t_table_info *info)
-{
- int cpu;
- for_each_cpu(cpu) {
- if (info->size <= PAGE_SIZE)
- kfree(info->entries[cpu]);
- else
- vfree(info->entries[cpu]);
- }
- kfree(info);
-}
-
-static struct ip6t_table_info *alloc_table_info(unsigned int size)
-{
- struct ip6t_table_info *newinfo;
- int cpu;
-
- newinfo = kzalloc(sizeof(struct ip6t_table_info), GFP_KERNEL);
- if (!newinfo)
- return NULL;
-
- newinfo->size = size;
-
- for_each_cpu(cpu) {
- if (size <= PAGE_SIZE)
- newinfo->entries[cpu] = kmalloc_node(size,
- GFP_KERNEL,
- cpu_to_node(cpu));
- else
- newinfo->entries[cpu] = vmalloc_node(size,
- cpu_to_node(cpu));
- if (newinfo->entries[cpu] == NULL) {
- free_table_info(newinfo);
- return NULL;
- }
- }
-
- return newinfo;
-}
-
static int
do_replace(void __user *user, unsigned int len)
{
int ret;
struct ip6t_replace tmp;
- struct ip6t_table *t;
- struct ip6t_table_info *newinfo, *oldinfo;
- struct ip6t_counters *counters;
+ struct xt_table *t;
+ struct xt_table_info *newinfo, *oldinfo;
+ struct xt_counters *counters;
void *loc_cpu_entry, *loc_cpu_old_entry;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
- /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
- if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
- return -ENOMEM;
-
- newinfo = alloc_table_info(tmp.size);
+ newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
@@ -1233,7 +990,7 @@ do_replace(void __user *user, unsigned int len)
goto free_newinfo;
}
- counters = vmalloc(tmp.num_counters * sizeof(struct ip6t_counters));
+ counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto free_newinfo;
@@ -1247,7 +1004,7 @@ do_replace(void __user *user, unsigned int len)
duprintf("ip_tables: Translated table\n");
- t = try_then_request_module(find_table_lock(tmp.name),
+ t = try_then_request_module(xt_find_table_lock(AF_INET6, tmp.name),
"ip6table_%s", tmp.name);
if (!t || IS_ERR(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1262,7 +1019,7 @@ do_replace(void __user *user, unsigned int len)
goto put_module;
}
- oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
+ oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
if (!oldinfo)
goto put_module;
@@ -1281,23 +1038,23 @@ do_replace(void __user *user, unsigned int len)
/* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
- free_table_info(oldinfo);
+ xt_free_table_info(oldinfo);
if (copy_to_user(tmp.counters, counters,
- sizeof(struct ip6t_counters) * tmp.num_counters) != 0)
+ sizeof(struct xt_counters) * tmp.num_counters) != 0)
ret = -EFAULT;
vfree(counters);
- up(&ip6t_mutex);
+ xt_table_unlock(t);
return ret;
put_module:
module_put(t->me);
- up(&ip6t_mutex);
+ xt_table_unlock(t);
free_newinfo_counters_untrans:
IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
free_newinfo_counters:
vfree(counters);
free_newinfo:
- free_table_info(newinfo);
+ xt_free_table_info(newinfo);
return ret;
}
@@ -1305,7 +1062,7 @@ do_replace(void __user *user, unsigned int len)
* and everything is OK. */
static inline int
add_counter_to_entry(struct ip6t_entry *e,
- const struct ip6t_counters addme[],
+ const struct xt_counters addme[],
unsigned int *i)
{
#if 0
@@ -1327,15 +1084,16 @@ static int
do_add_counters(void __user *user, unsigned int len)
{
unsigned int i;
- struct ip6t_counters_info tmp, *paddc;
- struct ip6t_table *t;
+ struct xt_counters_info tmp, *paddc;
+ struct xt_table_info *private;
+ struct xt_table *t;
int ret = 0;
void *loc_cpu_entry;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
- if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct ip6t_counters))
+ if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
return -EINVAL;
paddc = vmalloc(len);
@@ -1347,29 +1105,30 @@ do_add_counters(void __user *user, unsigned int len)
goto free;
}
- t = find_table_lock(tmp.name);
+ t = xt_find_table_lock(AF_INET6, tmp.name);
if (!t || IS_ERR(t)) {
ret = t ? PTR_ERR(t) : -ENOENT;
goto free;
}
write_lock_bh(&t->lock);
- if (t->private->number != paddc->num_counters) {
+ private = t->private;
+ if (private->number != paddc->num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
i = 0;
/* Choose the copy that is on our node */
- loc_cpu_entry = t->private->entries[smp_processor_id()];
+ loc_cpu_entry = private->entries[smp_processor_id()];
IP6T_ENTRY_ITERATE(loc_cpu_entry,
- t->private->size,
+ private->size,
add_counter_to_entry,
paddc->counters,
&i);
unlock_up_free:
write_unlock_bh(&t->lock);
- up(&ip6t_mutex);
+ xt_table_unlock(t);
module_put(t->me);
free:
vfree(paddc);
@@ -1413,7 +1172,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
switch (cmd) {
case IP6T_SO_GET_INFO: {
char name[IP6T_TABLE_MAXNAMELEN];
- struct ip6t_table *t;
+ struct xt_table *t;
if (*len != sizeof(struct ip6t_getinfo)) {
duprintf("length %u != %u\n", *len,
@@ -1428,25 +1187,26 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
}
name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
- t = try_then_request_module(find_table_lock(name),
+ t = try_then_request_module(xt_find_table_lock(AF_INET6, name),
"ip6table_%s", name);
if (t && !IS_ERR(t)) {
struct ip6t_getinfo info;
+ struct xt_table_info *private = t->private;
info.valid_hooks = t->valid_hooks;
- memcpy(info.hook_entry, t->private->hook_entry,
+ memcpy(info.hook_entry, private->hook_entry,
sizeof(info.hook_entry));
- memcpy(info.underflow, t->private->underflow,
+ memcpy(info.underflow, private->underflow,
sizeof(info.underflow));
- info.num_entries = t->private->number;
- info.size = t->private->size;
+ info.num_entries = private->number;
+ info.size = private->size;
memcpy(info.name, name, sizeof(info.name));
if (copy_to_user(user, &info, *len) != 0)
ret = -EFAULT;
else
ret = 0;
- up(&ip6t_mutex);
+ xt_table_unlock(t);
module_put(t->me);
} else
ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1473,7 +1233,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
case IP6T_SO_GET_REVISION_MATCH:
case IP6T_SO_GET_REVISION_TARGET: {
struct ip6t_get_revision rev;
- int (*revfn)(const char *, u8, int *);
+ int target;
if (*len != sizeof(rev)) {
ret = -EINVAL;
@@ -1485,12 +1245,13 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
}
if (cmd == IP6T_SO_GET_REVISION_TARGET)
- revfn = target_revfn;
+ target = 1;
else
- revfn = match_revfn;
+ target = 0;
- try_then_request_module(find_revision(rev.name, rev.revision,
- revfn, &ret),
+ try_then_request_module(xt_find_revision(AF_INET6, rev.name,
+ rev.revision,
+ target, &ret),
"ip6t_%s", rev.name);
break;
}
@@ -1503,61 +1264,16 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
return ret;
}
-/* Registration hooks for targets. */
-int
-ip6t_register_target(struct ip6t_target *target)
-{
- int ret;
-
- ret = down_interruptible(&ip6t_mutex);
- if (ret != 0)
- return ret;
- list_add(&target->list, &ip6t_target);
- up(&ip6t_mutex);
- return ret;
-}
-
-void
-ip6t_unregister_target(struct ip6t_target *target)
-{
- down(&ip6t_mutex);
- LIST_DELETE(&ip6t_target, target);
- up(&ip6t_mutex);
-}
-
-int
-ip6t_register_match(struct ip6t_match *match)
-{
- int ret;
-
- ret = down_interruptible(&ip6t_mutex);
- if (ret != 0)
- return ret;
-
- list_add(&match->list, &ip6t_match);
- up(&ip6t_mutex);
-
- return ret;
-}
-
-void
-ip6t_unregister_match(struct ip6t_match *match)
-{
- down(&ip6t_mutex);
- LIST_DELETE(&ip6t_match, match);
- up(&ip6t_mutex);
-}
-
-int ip6t_register_table(struct ip6t_table *table,
+int ip6t_register_table(struct xt_table *table,
const struct ip6t_replace *repl)
{
int ret;
- struct ip6t_table_info *newinfo;
- static struct ip6t_table_info bootstrap
+ struct xt_table_info *newinfo;
+ static struct xt_table_info bootstrap
= { 0, 0, 0, { 0 }, { 0 }, { } };
void *loc_cpu_entry;
- newinfo = alloc_table_info(repl->size);
+ newinfo = xt_alloc_table_info(repl->size);
if (!newinfo)
return -ENOMEM;
@@ -1571,244 +1287,29 @@ int ip6t_register_table(struct ip6t_table *table,
repl->hook_entry,
repl->underflow);
if (ret != 0) {
- free_table_info(newinfo);
+ xt_free_table_info(newinfo);
return ret;
}
- ret = down_interruptible(&ip6t_mutex);
- if (ret != 0) {
- free_table_info(newinfo);
+ if (xt_register_table(table, &bootstrap, newinfo) != 0) {
+ xt_free_table_info(newinfo);
return ret;
}
- /* Don't autoload: we'd eat our tail... */
- if (list_named_find(&ip6t_tables, table->name)) {
- ret = -EEXIST;
- goto free_unlock;
- }
-
- /* Simplifies replace_table code. */
- table->private = &bootstrap;
- if (!replace_table(table, 0, newinfo, &ret))
- goto free_unlock;
-
- duprintf("table->private->number = %u\n",
- table->private->number);
-
- /* save number of initial entries */
- table->private->initial_entries = table->private->number;
-
- rwlock_init(&table->lock);
- list_prepend(&ip6t_tables, table);
-
- unlock:
- up(&ip6t_mutex);
- return ret;
-
- free_unlock:
- free_table_info(newinfo);
- goto unlock;
+ return 0;
}
-void ip6t_unregister_table(struct ip6t_table *table)
+void ip6t_unregister_table(struct xt_table *table)
{
+ struct xt_table_info *private;
void *loc_cpu_entry;
- down(&ip6t_mutex);
- LIST_DELETE(&ip6t_tables, table);
- up(&ip6t_mutex);
+ private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */
- loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
- IP6T_ENTRY_ITERATE(loc_cpu_entry, table->private->size,
- cleanup_entry, NULL);
- free_table_info(table->private);
-}
-
-/* Returns 1 if the port is matched by the range, 0 otherwise */
-static inline int
-port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
-{
- int ret;
-
- ret = (port >= min && port <= max) ^ invert;
- return ret;
-}
-
-static int
-tcp_find_option(u_int8_t option,
- const struct sk_buff *skb,
- unsigned int tcpoff,
- unsigned int optlen,
- int invert,
- int *hotdrop)
-{
- /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
- u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
- unsigned int i;
-
- duprintf("tcp_match: finding option\n");
- if (!optlen)
- return invert;
- /* If we don't have the whole header, drop packet. */
- op = skb_header_pointer(skb, tcpoff + sizeof(struct tcphdr), optlen,
- _opt);
- if (op == NULL) {
- *hotdrop = 1;
- return 0;
- }
-
- for (i = 0; i < optlen; ) {
- if (op[i] == option) return !invert;
- if (op[i] < 2) i++;
- else i += op[i+1]?:1;
- }
-
- return invert;
-}
-
-static int
-tcp_match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- unsigned int protoff,
- int *hotdrop)
-{
- struct tcphdr _tcph, *th;
- const struct ip6t_tcp *tcpinfo = matchinfo;
-
- if (offset) {
- /* To quote Alan:
-
- Don't allow a fragment of TCP 8 bytes in. Nobody normal
- causes this. Its a cracker trying to break in by doing a
- flag overwrite to pass the direction checks.
- */
- if (offset == 1) {
- duprintf("Dropping evil TCP offset=1 frag.\n");
- *hotdrop = 1;
- }
- /* Must not be a fragment. */
- return 0;
- }
-
-#define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
-
- th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
- if (th == NULL) {
- /* We've been asked to examine this packet, and we
- can't. Hence, no choice but to drop. */
- duprintf("Dropping evil TCP offset=0 tinygram.\n");
- *hotdrop = 1;
- return 0;
- }
-
- if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
- ntohs(th->source),
- !!(tcpinfo->invflags & IP6T_TCP_INV_SRCPT)))
- return 0;
- if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
- ntohs(th->dest),
- !!(tcpinfo->invflags & IP6T_TCP_INV_DSTPT)))
- return 0;
- if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
- == tcpinfo->flg_cmp,
- IP6T_TCP_INV_FLAGS))
- return 0;
- if (tcpinfo->option) {
- if (th->doff * 4 < sizeof(_tcph)) {
- *hotdrop = 1;
- return 0;
- }
- if (!tcp_find_option(tcpinfo->option, skb, protoff,
- th->doff*4 - sizeof(*th),
- tcpinfo->invflags & IP6T_TCP_INV_OPTION,
- hotdrop))
- return 0;
- }
- return 1;
-}
-
-/* Called when user tries to insert an entry of this type. */
-static int
-tcp_checkentry(const char *tablename,
- const struct ip6t_ip6 *ipv6,
- void *matchinfo,
- unsigned int matchsize,
- unsigned int hook_mask)
-{
- const struct ip6t_tcp *tcpinfo = matchinfo;
-
- /* Must specify proto == TCP, and no unknown invflags */
- return ipv6->proto == IPPROTO_TCP
- && !(ipv6->invflags & IP6T_INV_PROTO)
- && matchsize == IP6T_ALIGN(sizeof(struct ip6t_tcp))
- && !(tcpinfo->invflags & ~IP6T_TCP_INV_MASK);
-}
-
-static int
-udp_match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- unsigned int protoff,
- int *hotdrop)
-{
- struct udphdr _udph, *uh;
- const struct ip6t_udp *udpinfo = matchinfo;
-
- /* Must not be a fragment. */
- if (offset)
- return 0;
-
- uh = skb_header_pointer(skb, protoff, sizeof(_udph), &_udph);
- if (uh == NULL) {
- /* We've been asked to examine this packet, and we
- can't. Hence, no choice but to drop. */
- duprintf("Dropping evil UDP tinygram.\n");
- *hotdrop = 1;
- return 0;
- }
-
- return port_match(udpinfo->spts[0], udpinfo->spts[1],
- ntohs(uh->source),
- !!(udpinfo->invflags & IP6T_UDP_INV_SRCPT))
- && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
- ntohs(uh->dest),
- !!(udpinfo->invflags & IP6T_UDP_INV_DSTPT));
-}
-
-/* Called when user tries to insert an entry of this type. */
-static int
-udp_checkentry(const char *tablename,
- const struct ip6t_ip6 *ipv6,
- void *matchinfo,
- unsigned int matchinfosize,
- unsigned int hook_mask)
-{
- const struct ip6t_udp *udpinfo = matchinfo;
-
- /* Must specify proto == UDP, and no unknown invflags */
- if (ipv6->proto != IPPROTO_UDP || (ipv6->invflags & IP6T_INV_PROTO)) {
- duprintf("ip6t_udp: Protocol %u != %u\n", ipv6->proto,
- IPPROTO_UDP);
- return 0;
- }
- if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_udp))) {
- duprintf("ip6t_udp: matchsize %u != %u\n",
- matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_udp)));
- return 0;
- }
- if (udpinfo->invflags & ~IP6T_UDP_INV_MASK) {
- duprintf("ip6t_udp: unknown flags %X\n",
- udpinfo->invflags);
- return 0;
- }
-
- return 1;
+ loc_cpu_entry = private->entries[raw_smp_processor_id()];
+ IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
+ xt_free_table_info(private);
}
/* Returns 1 if the type and code is matched by the range, 0 otherwise */
@@ -1856,11 +1357,12 @@ icmp6_match(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
icmp6_checkentry(const char *tablename,
- const struct ip6t_ip6 *ipv6,
+ const void *entry,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
+ const struct ip6t_ip6 *ipv6 = entry;
const struct ip6t_icmp *icmpinfo = matchinfo;
/* Must specify proto == ICMP, and no unknown invflags */
@@ -1890,164 +1392,42 @@ static struct nf_sockopt_ops ip6t_sockopts = {
.get = do_ip6t_get_ctl,
};
-static struct ip6t_match tcp_matchstruct = {
- .name = "tcp",
- .match = &tcp_match,
- .checkentry = &tcp_checkentry,
-};
-
-static struct ip6t_match udp_matchstruct = {
- .name = "udp",
- .match = &udp_match,
- .checkentry = &udp_checkentry,
-};
-
static struct ip6t_match icmp6_matchstruct = {
.name = "icmp6",
.match = &icmp6_match,
.checkentry = &icmp6_checkentry,
};
-#ifdef CONFIG_PROC_FS
-static inline int print_name(const char *i,
- off_t start_offset, char *buffer, int length,
- off_t *pos, unsigned int *count)
-{
- if ((*count)++ >= start_offset) {
- unsigned int namelen;
-
- namelen = sprintf(buffer + *pos, "%s\n",
- i + sizeof(struct list_head));
- if (*pos + namelen > length) {
- /* Stop iterating */
- return 1;
- }
- *pos += namelen;
- }
- return 0;
-}
-
-static inline int print_target(const struct ip6t_target *t,
- off_t start_offset, char *buffer, int length,
- off_t *pos, unsigned int *count)
-{
- if (t == &ip6t_standard_target || t == &ip6t_error_target)
- return 0;
- return print_name((char *)t, start_offset, buffer, length, pos, count);
-}
-
-static int ip6t_get_tables(char *buffer, char **start, off_t offset, int length)
-{
- off_t pos = 0;
- unsigned int count = 0;
-
- if (down_interruptible(&ip6t_mutex) != 0)
- return 0;
-
- LIST_FIND(&ip6t_tables, print_name, char *,
- offset, buffer, length, &pos, &count);
-
- up(&ip6t_mutex);
-
- /* `start' hack - see fs/proc/generic.c line ~105 */
- *start=(char *)((unsigned long)count-offset);
- return pos;
-}
-
-static int ip6t_get_targets(char *buffer, char **start, off_t offset, int length)
-{
- off_t pos = 0;
- unsigned int count = 0;
-
- if (down_interruptible(&ip6t_mutex) != 0)
- return 0;
-
- LIST_FIND(&ip6t_target, print_target, struct ip6t_target *,
- offset, buffer, length, &pos, &count);
-
- up(&ip6t_mutex);
-
- *start = (char *)((unsigned long)count - offset);
- return pos;
-}
-
-static int ip6t_get_matches(char *buffer, char **start, off_t offset, int length)
-{
- off_t pos = 0;
- unsigned int count = 0;
-
- if (down_interruptible(&ip6t_mutex) != 0)
- return 0;
-
- LIST_FIND(&ip6t_match, print_name, char *,
- offset, buffer, length, &pos, &count);
-
- up(&ip6t_mutex);
-
- *start = (char *)((unsigned long)count - offset);
- return pos;
-}
-
-static const struct { char *name; get_info_t *get_info; } ip6t_proc_entry[] =
-{ { "ip6_tables_names", ip6t_get_tables },
- { "ip6_tables_targets", ip6t_get_targets },
- { "ip6_tables_matches", ip6t_get_matches },
- { NULL, NULL} };
-#endif /*CONFIG_PROC_FS*/
-
static int __init init(void)
{
int ret;
+ xt_proto_init(AF_INET6);
+
/* Noone else will be downing sem now, so we won't sleep */
- down(&ip6t_mutex);
- list_append(&ip6t_target, &ip6t_standard_target);
- list_append(&ip6t_target, &ip6t_error_target);
- list_append(&ip6t_match, &tcp_matchstruct);
- list_append(&ip6t_match, &udp_matchstruct);
- list_append(&ip6t_match, &icmp6_matchstruct);
- up(&ip6t_mutex);
+ xt_register_target(AF_INET6, &ip6t_standard_target);
+ xt_register_target(AF_INET6, &ip6t_error_target);
+ xt_register_match(AF_INET6, &icmp6_matchstruct);
/* Register setsockopt */
ret = nf_register_sockopt(&ip6t_sockopts);
if (ret < 0) {
duprintf("Unable to register sockopts.\n");
+ xt_proto_fini(AF_INET6);
return ret;
}
-#ifdef CONFIG_PROC_FS
- {
- struct proc_dir_entry *proc;
- int i;
-
- for (i = 0; ip6t_proc_entry[i].name; i++) {
- proc = proc_net_create(ip6t_proc_entry[i].name, 0,
- ip6t_proc_entry[i].get_info);
- if (!proc) {
- while (--i >= 0)
- proc_net_remove(ip6t_proc_entry[i].name);
- nf_unregister_sockopt(&ip6t_sockopts);
- return -ENOMEM;
- }
- proc->owner = THIS_MODULE;
- }
- }
-#endif
-
- printk("ip6_tables: (C) 2000-2002 Netfilter core team\n");
+ printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
return 0;
}
static void __exit fini(void)
{
nf_unregister_sockopt(&ip6t_sockopts);
-#ifdef CONFIG_PROC_FS
- {
- int i;
- for (i = 0; ip6t_proc_entry[i].name; i++)
- proc_net_remove(ip6t_proc_entry[i].name);
- }
-#endif
+ xt_unregister_match(AF_INET6, &icmp6_matchstruct);
+ xt_unregister_target(AF_INET6, &ip6t_error_target);
+ xt_unregister_target(AF_INET6, &ip6t_standard_target);
+ xt_proto_fini(AF_INET6);
}
/*
@@ -2126,10 +1506,6 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
EXPORT_SYMBOL(ip6t_register_table);
EXPORT_SYMBOL(ip6t_unregister_table);
EXPORT_SYMBOL(ip6t_do_table);
-EXPORT_SYMBOL(ip6t_register_match);
-EXPORT_SYMBOL(ip6t_unregister_match);
-EXPORT_SYMBOL(ip6t_register_target);
-EXPORT_SYMBOL(ip6t_unregister_target);
EXPORT_SYMBOL(ip6t_ext_hdr);
EXPORT_SYMBOL(ipv6_find_hdr);
EXPORT_SYMBOL(ip6_masked_addrcmp);
diff --git a/net/ipv6/netfilter/ip6t_HL.c b/net/ipv6/netfilter/ip6t_HL.c
index 8f5549b7272..306200c3505 100644
--- a/net/ipv6/netfilter/ip6t_HL.c
+++ b/net/ipv6/netfilter/ip6t_HL.c
@@ -62,7 +62,7 @@ static unsigned int ip6t_hl_target(struct sk_buff **pskb,
}
static int ip6t_hl_checkentry(const char *tablename,
- const struct ip6t_entry *e,
+ const void *entry,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index ae4653bfd65..77c725832de 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -63,9 +63,8 @@ static void dump_packet(const struct nf_loginfo *info,
return;
}
- /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000" */
- printk("SRC=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(ih->saddr));
- printk("DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(ih->daddr));
+ /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
+ printk("SRC=" NIP6_FMT " DST=" NIP6_FMT " ", NIP6(ih->saddr), NIP6(ih->daddr));
/* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
printk("LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
@@ -444,7 +443,7 @@ ip6t_log_target(struct sk_buff **pskb,
static int ip6t_log_checkentry(const char *tablename,
- const struct ip6t_entry *e,
+ const void *entry,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_MARK.c b/net/ipv6/netfilter/ip6t_MARK.c
deleted file mode 100644
index eab8fb864ee..00000000000
--- a/net/ipv6/netfilter/ip6t_MARK.c
+++ /dev/null
@@ -1,81 +0,0 @@
-/* This is a module which is used for setting the NFMARK field of an skb. */
-
-/* (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#include <linux/netfilter_ipv6/ip6t_MARK.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-
-static unsigned int
-target(struct sk_buff **pskb,
- const struct net_device *in,
- const struct net_device *out,
- unsigned int hooknum,
- const void *targinfo,
- void *userinfo)
-{
- const struct ip6t_mark_target_info *markinfo = targinfo;
-
- if((*pskb)->nfmark != markinfo->mark)
- (*pskb)->nfmark = markinfo->mark;
-
- return IP6T_CONTINUE;
-}
-
-static int
-checkentry(const char *tablename,
- const struct ip6t_entry *e,
- void *targinfo,
- unsigned int targinfosize,
- unsigned int hook_mask)
-{
- if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_mark_target_info))) {
- printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n",
- targinfosize,
- IP6T_ALIGN(sizeof(struct ip6t_mark_target_info)));
- return 0;
- }
-
- if (strcmp(tablename, "mangle") != 0) {
- printk(KERN_WARNING "MARK: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
- return 0;
- }
-
- return 1;
-}
-
-static struct ip6t_target ip6t_mark_reg = {
- .name = "MARK",
- .target = target,
- .checkentry = checkentry,
- .me = THIS_MODULE
-};
-
-static int __init init(void)
-{
- printk(KERN_DEBUG "registering ipv6 mark target\n");
- if (ip6t_register_target(&ip6t_mark_reg))
- return -EINVAL;
-
- return 0;
-}
-
-static void __exit fini(void)
-{
- ip6t_unregister_target(&ip6t_mark_reg);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_NFQUEUE.c b/net/ipv6/netfilter/ip6t_NFQUEUE.c
deleted file mode 100644
index c6e3730e740..00000000000
--- a/net/ipv6/netfilter/ip6t_NFQUEUE.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/* ip6tables module for using new netfilter netlink queue
- *
- * (C) 2005 by Harald Welte <laforge@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#include <linux/netfilter_ipv4/ipt_NFQUEUE.h>
-
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("ip6tables NFQUEUE target");
-MODULE_LICENSE("GPL");
-
-static unsigned int
-target(struct sk_buff **pskb,
- const struct net_device *in,
- const struct net_device *out,
- unsigned int hooknum,
- const void *targinfo,
- void *userinfo)
-{
- const struct ipt_NFQ_info *tinfo = targinfo;
-
- return NF_QUEUE_NR(tinfo->queuenum);
-}
-
-static int
-checkentry(const char *tablename,
- const struct ip6t_entry *e,
- void *targinfo,
- unsigned int targinfosize,
- unsigned int hook_mask)
-{
- if (targinfosize != IP6T_ALIGN(sizeof(struct ipt_NFQ_info))) {
- printk(KERN_WARNING "NFQUEUE: targinfosize %u != %Zu\n",
- targinfosize,
- IP6T_ALIGN(sizeof(struct ipt_NFQ_info)));
- return 0;
- }
-
- return 1;
-}
-
-static struct ip6t_target ipt_NFQ_reg = {
- .name = "NFQUEUE",
- .target = target,
- .checkentry = checkentry,
- .me = THIS_MODULE,
-};
-
-static int __init init(void)
-{
- return ip6t_register_target(&ipt_NFQ_reg);
-}
-
-static void __exit fini(void)
-{
- ip6t_unregister_target(&ipt_NFQ_reg);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index b03e87adca9..c745717b4ce 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -218,12 +218,13 @@ static unsigned int reject6_target(struct sk_buff **pskb,
}
static int check(const char *tablename,
- const struct ip6t_entry *e,
+ const void *entry,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
{
const struct ip6t_reject_info *rejinfo = targinfo;
+ const struct ip6t_entry *e = entry;
if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_reject_info))) {
DEBUGP("ip6t_REJECT: targinfosize %u != 0\n", targinfosize);
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index f5c1a7ff4a1..219a30365df 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -98,7 +98,7 @@ match(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
+ const void *entry,
void *matchinfo,
unsigned int matchinfosize,
unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_dst.c b/net/ipv6/netfilter/ip6t_dst.c
index 48cf5f9efc9..80fe82669ce 100644
--- a/net/ipv6/netfilter/ip6t_dst.c
+++ b/net/ipv6/netfilter/ip6t_dst.c
@@ -178,7 +178,7 @@ match(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
+ const void *info,
void *matchinfo,
unsigned int matchinfosize,
unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_esp.c b/net/ipv6/netfilter/ip6t_esp.c
index e1828f6d0a4..724285df871 100644
--- a/net/ipv6/netfilter/ip6t_esp.c
+++ b/net/ipv6/netfilter/ip6t_esp.c
@@ -76,7 +76,7 @@ match(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchinfosize,
unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index 616c2cbcd54..ddf5f571909 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -62,7 +62,7 @@ match(const struct sk_buff *skb,
static int
ip6t_eui64_checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index d1549b26866..a9964b946ed 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -115,7 +115,7 @@ match(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchinfosize,
unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index e3bc8e2700e..ed8ded18bbd 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -178,7 +178,7 @@ match(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
+ const void *entry,
void *matchinfo,
unsigned int matchinfosize,
unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_hl.c b/net/ipv6/netfilter/ip6t_hl.c
index 0beaff5471d..c5d9079f2d9 100644
--- a/net/ipv6/netfilter/ip6t_hl.c
+++ b/net/ipv6/netfilter/ip6t_hl.c
@@ -48,7 +48,7 @@ static int match(const struct sk_buff *skb, const struct net_device *in,
return 0;
}
-static int checkentry(const char *tablename, const struct ip6t_ip6 *ip,
+static int checkentry(const char *tablename, const void *entry,
void *matchinfo, unsigned int matchsize,
unsigned int hook_mask)
{
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 32e67f05845..fda1ceaf5a2 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -124,7 +124,7 @@ ipv6header_match(const struct sk_buff *skb,
static int
ipv6header_checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_length.c b/net/ipv6/netfilter/ip6t_length.c
deleted file mode 100644
index e0537d3811d..00000000000
--- a/net/ipv6/netfilter/ip6t_length.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Length Match - IPv6 Port */
-
-/* (C) 1999-2001 James Morris <jmorros@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/netfilter_ipv6/ip6t_length.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-MODULE_DESCRIPTION("IPv6 packet length match");
-
-static int
-match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- unsigned int protoff,
- int *hotdrop)
-{
- const struct ip6t_length_info *info = matchinfo;
- u_int16_t pktlen = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr);
-
- return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
-}
-
-static int
-checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
- void *matchinfo,
- unsigned int matchsize,
- unsigned int hook_mask)
-{
- if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_length_info)))
- return 0;
-
- return 1;
-}
-
-static struct ip6t_match length_match = {
- .name = "length",
- .match = &match,
- .checkentry = &checkentry,
- .me = THIS_MODULE,
-};
-
-static int __init init(void)
-{
- return ip6t_register_match(&length_match);
-}
-
-static void __exit fini(void)
-{
- ip6t_unregister_match(&length_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_limit.c b/net/ipv6/netfilter/ip6t_limit.c
deleted file mode 100644
index fb782f610be..00000000000
--- a/net/ipv6/netfilter/ip6t_limit.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/* Kernel module to control the rate
- *
- * 2 September 1999: Changed from the target RATE to the match
- * `limit', removed logging. Did I mention that
- * Alexey is a fucking genius?
- * Rusty Russell (rusty@rustcorp.com.au). */
-
-/* (C) 1999 Jérôme de Vivie <devivie@info.enserb.u-bordeaux.fr>
- * (C) 1999 Hervé Eychenne <eychenne@info.enserb.u-bordeaux.fr>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#include <linux/netfilter_ipv6/ip6t_limit.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Herve Eychenne <rv@wallfire.org>");
-MODULE_DESCRIPTION("rate limiting within ip6tables");
-
-/* The algorithm used is the Simple Token Bucket Filter (TBF)
- * see net/sched/sch_tbf.c in the linux source tree
- */
-
-static DEFINE_SPINLOCK(limit_lock);
-
-/* Rusty: This is my (non-mathematically-inclined) understanding of
- this algorithm. The `average rate' in jiffies becomes your initial
- amount of credit `credit' and the most credit you can ever have
- `credit_cap'. The `peak rate' becomes the cost of passing the
- test, `cost'.
-
- `prev' tracks the last packet hit: you gain one credit per jiffy.
- If you get credit balance more than this, the extra credit is
- discarded. Every time the match passes, you lose `cost' credits;
- if you don't have that many, the test fails.
-
- See Alexey's formal explanation in net/sched/sch_tbf.c.
-
- To avoid underflow, we multiply by 128 (ie. you get 128 credits per
- jiffy). Hence a cost of 2^32-1, means one pass per 32768 seconds
- at 1024HZ (or one every 9 hours). A cost of 1 means 12800 passes
- per second at 100HZ. */
-
-#define CREDITS_PER_JIFFY 128
-
-static int
-ip6t_limit_match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- unsigned int protoff,
- int *hotdrop)
-{
- struct ip6t_rateinfo *r = ((struct ip6t_rateinfo *)matchinfo)->master;
- unsigned long now = jiffies;
-
- spin_lock_bh(&limit_lock);
- r->credit += (now - xchg(&r->prev, now)) * CREDITS_PER_JIFFY;
- if (r->credit > r->credit_cap)
- r->credit = r->credit_cap;
-
- if (r->credit >= r->cost) {
- /* We're not limited. */
- r->credit -= r->cost;
- spin_unlock_bh(&limit_lock);
- return 1;
- }
-
- spin_unlock_bh(&limit_lock);
- return 0;
-}
-
-/* Precision saver. */
-static u_int32_t
-user2credits(u_int32_t user)
-{
- /* If multiplying would overflow... */
- if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
- /* Divide first. */
- return (user / IP6T_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
-
- return (user * HZ * CREDITS_PER_JIFFY) / IP6T_LIMIT_SCALE;
-}
-
-static int
-ip6t_limit_checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
- void *matchinfo,
- unsigned int matchsize,
- unsigned int hook_mask)
-{
- struct ip6t_rateinfo *r = matchinfo;
-
- if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_rateinfo)))
- return 0;
-
- /* Check for overflow. */
- if (r->burst == 0
- || user2credits(r->avg * r->burst) < user2credits(r->avg)) {
- printk("Call rusty: overflow in ip6t_limit: %u/%u\n",
- r->avg, r->burst);
- return 0;
- }
-
- /* User avg in seconds * IP6T_LIMIT_SCALE: convert to jiffies *
- 128. */
- r->prev = jiffies;
- r->credit = user2credits(r->avg * r->burst); /* Credits full. */
- r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
- r->cost = user2credits(r->avg);
-
- /* For SMP, we only want to use one set of counters. */
- r->master = r;
-
- return 1;
-}
-
-static struct ip6t_match ip6t_limit_reg = {
- .name = "limit",
- .match = ip6t_limit_match,
- .checkentry = ip6t_limit_checkentry,
- .me = THIS_MODULE,
-};
-
-static int __init init(void)
-{
- if (ip6t_register_match(&ip6t_limit_reg))
- return -EINVAL;
- return 0;
-}
-
-static void __exit fini(void)
-{
- ip6t_unregister_match(&ip6t_limit_reg);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_mac.c b/net/ipv6/netfilter/ip6t_mac.c
deleted file mode 100644
index 526d43e3723..00000000000
--- a/net/ipv6/netfilter/ip6t_mac.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/* Kernel module to match MAC address parameters. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/if_ether.h>
-
-#include <linux/netfilter_ipv6/ip6t_mac.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MAC address matching module for IPv6");
-MODULE_AUTHOR("Netfilter Core Teaam <coreteam@netfilter.org>");
-
-static int
-match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- unsigned int protoff,
- int *hotdrop)
-{
- const struct ip6t_mac_info *info = matchinfo;
-
- /* Is mac pointer valid? */
- return (skb->mac.raw >= skb->head
- && (skb->mac.raw + ETH_HLEN) <= skb->data
- /* If so, compare... */
- && ((memcmp(eth_hdr(skb)->h_source, info->srcaddr, ETH_ALEN)
- == 0) ^ info->invert));
-}
-
-static int
-ip6t_mac_checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
- void *matchinfo,
- unsigned int matchsize,
- unsigned int hook_mask)
-{
- if (hook_mask
- & ~((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN)
- | (1 << NF_IP6_FORWARD))) {
- printk("ip6t_mac: only valid for PRE_ROUTING, LOCAL_IN or"
- " FORWARD\n");
- return 0;
- }
-
- if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_mac_info)))
- return 0;
-
- return 1;
-}
-
-static struct ip6t_match mac_match = {
- .name = "mac",
- .match = &match,
- .checkentry = &ip6t_mac_checkentry,
- .me = THIS_MODULE,
-};
-
-static int __init init(void)
-{
- return ip6t_register_match(&mac_match);
-}
-
-static void __exit fini(void)
-{
- ip6t_unregister_match(&mac_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_mark.c b/net/ipv6/netfilter/ip6t_mark.c
deleted file mode 100644
index affc3de364f..00000000000
--- a/net/ipv6/netfilter/ip6t_mark.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Kernel module to match NFMARK values. */
-
-/* (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter_ipv6/ip6t_mark.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-MODULE_DESCRIPTION("ip6tables mark match");
-
-static int
-match(const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const void *matchinfo,
- int offset,
- unsigned int protoff,
- int *hotdrop)
-{
- const struct ip6t_mark_info *info = matchinfo;
-
- return ((skb->nfmark & info->mask) == info->mark) ^ info->invert;
-}
-
-static int
-checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
- void *matchinfo,
- unsigned int matchsize,
- unsigned int hook_mask)
-{
- if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_mark_info)))
- return 0;
-
- return 1;
-}
-
-static struct ip6t_match mark_match = {
- .name = "mark",
- .match = &match,
- .checkentry = &checkentry,
- .me = THIS_MODULE,
-};
-
-static int __init init(void)
-{
- return ip6t_register_match(&mark_match);
-}
-
-static void __exit fini(void)
-{
- ip6t_unregister_match(&mark_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_multiport.c b/net/ipv6/netfilter/ip6t_multiport.c
index 6e3246153fa..49f7829dfbc 100644
--- a/net/ipv6/netfilter/ip6t_multiport.c
+++ b/net/ipv6/netfilter/ip6t_multiport.c
@@ -84,11 +84,12 @@ match(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
+ const void *info,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
+ const struct ip6t_ip6 *ip = info;
const struct ip6t_multiport *multiinfo = matchinfo;
if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_multiport)))
diff --git a/net/ipv6/netfilter/ip6t_owner.c b/net/ipv6/netfilter/ip6t_owner.c
index 4de4cdad4b7..5409b375b51 100644
--- a/net/ipv6/netfilter/ip6t_owner.c
+++ b/net/ipv6/netfilter/ip6t_owner.c
@@ -53,7 +53,7 @@ match(const struct sk_buff *skb,
static int
checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_policy.c b/net/ipv6/netfilter/ip6t_policy.c
new file mode 100644
index 00000000000..13fedad48c1
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_policy.c
@@ -0,0 +1,175 @@
+/* IP tables module for matching IPsec policy
+ *
+ * Copyright (c) 2004,2005 Patrick McHardy, <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <net/xfrm.h>
+
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter_ipv6/ip6t_policy.h>
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("IPtables IPsec policy matching module");
+MODULE_LICENSE("GPL");
+
+
+static inline int
+match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e)
+{
+#define MATCH_ADDR(x,y,z) (!e->match.x || \
+ ((ip6_masked_addrcmp((z), &e->x, &e->y)) == 0) ^ e->invert.x)
+#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
+
+ return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) &&
+ MATCH_ADDR(daddr, dmask, (struct in6_addr *)&x->id.daddr.a6) &&
+ MATCH(proto, x->id.proto) &&
+ MATCH(mode, x->props.mode) &&
+ MATCH(spi, x->id.spi) &&
+ MATCH(reqid, x->props.reqid);
+}
+
+static int
+match_policy_in(const struct sk_buff *skb, const struct ip6t_policy_info *info)
+{
+ const struct ip6t_policy_elem *e;
+ struct sec_path *sp = skb->sp;
+ int strict = info->flags & IP6T_POLICY_MATCH_STRICT;
+ int i, pos;
+
+ if (sp == NULL)
+ return -1;
+ if (strict && info->len != sp->len)
+ return 0;
+
+ for (i = sp->len - 1; i >= 0; i--) {
+ pos = strict ? i - sp->len + 1 : 0;
+ if (pos >= info->len)
+ return 0;
+ e = &info->pol[pos];
+
+ if (match_xfrm_state(sp->x[i].xvec, e)) {
+ if (!strict)
+ return 1;
+ } else if (strict)
+ return 0;
+ }
+
+ return strict ? 1 : 0;
+}
+
+static int
+match_policy_out(const struct sk_buff *skb, const struct ip6t_policy_info *info)
+{
+ const struct ip6t_policy_elem *e;
+ struct dst_entry *dst = skb->dst;
+ int strict = info->flags & IP6T_POLICY_MATCH_STRICT;
+ int i, pos;
+
+ if (dst->xfrm == NULL)
+ return -1;
+
+ for (i = 0; dst && dst->xfrm; dst = dst->child, i++) {
+ pos = strict ? i : 0;
+ if (pos >= info->len)
+ return 0;
+ e = &info->pol[pos];
+
+ if (match_xfrm_state(dst->xfrm, e)) {
+ if (!strict)
+ return 1;
+ } else if (strict)
+ return 0;
+ }
+
+ return strict ? 1 : 0;
+}
+
+static int match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ int *hotdrop)
+{
+ const struct ip6t_policy_info *info = matchinfo;
+ int ret;
+
+ if (info->flags & IP6T_POLICY_MATCH_IN)
+ ret = match_policy_in(skb, info);
+ else
+ ret = match_policy_out(skb, info);
+
+ if (ret < 0)
+ ret = info->flags & IP6T_POLICY_MATCH_NONE ? 1 : 0;
+ else if (info->flags & IP6T_POLICY_MATCH_NONE)
+ ret = 0;
+
+ return ret;
+}
+
+static int checkentry(const char *tablename, const struct ip6t_ip6 *ip,
+ void *matchinfo, unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ struct ip6t_policy_info *info = matchinfo;
+
+ if (matchsize != IP6T_ALIGN(sizeof(*info))) {
+ printk(KERN_ERR "ip6t_policy: matchsize %u != %zu\n",
+ matchsize, IP6T_ALIGN(sizeof(*info)));
+ return 0;
+ }
+ if (!(info->flags & (IP6T_POLICY_MATCH_IN|IP6T_POLICY_MATCH_OUT))) {
+ printk(KERN_ERR "ip6t_policy: neither incoming nor "
+ "outgoing policy selected\n");
+ return 0;
+ }
+ if (hook_mask & (1 << NF_IP6_PRE_ROUTING | 1 << NF_IP6_LOCAL_IN)
+ && info->flags & IP6T_POLICY_MATCH_OUT) {
+ printk(KERN_ERR "ip6t_policy: output policy not valid in "
+ "PRE_ROUTING and INPUT\n");
+ return 0;
+ }
+ if (hook_mask & (1 << NF_IP6_POST_ROUTING | 1 << NF_IP6_LOCAL_OUT)
+ && info->flags & IP6T_POLICY_MATCH_IN) {
+ printk(KERN_ERR "ip6t_policy: input policy not valid in "
+ "POST_ROUTING and OUTPUT\n");
+ return 0;
+ }
+ if (info->len > IP6T_POLICY_MAX_ELEM) {
+ printk(KERN_ERR "ip6t_policy: too many policy elements\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct ip6t_match policy_match = {
+ .name = "policy",
+ .match = match,
+ .checkentry = checkentry,
+ .me = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+ return ip6t_register_match(&policy_match);
+}
+
+static void __exit fini(void)
+{
+ ip6t_unregister_match(&policy_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index c1e770e4554..8465b437585 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -183,7 +183,7 @@ match(const struct sk_buff *skb,
/* Called when user tries to insert an entry of this type. */
static int
checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
+ const void *entry,
void *matchinfo,
unsigned int matchinfosize,
unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 4c0028671c2..ce4a968e1f7 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -97,6 +97,7 @@ static struct ip6t_table packet_filter = {
.valid_hooks = FILTER_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.me = THIS_MODULE,
+ .af = AF_INET6,
};
/* The work comes in here from netfilter.c. */
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 85c1e6eada1..30a4627e000 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -127,6 +127,7 @@ static struct ip6t_table packet_mangler = {
.valid_hooks = MANGLE_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
.me = THIS_MODULE,
+ .af = AF_INET6,
};
/* The work comes in here from netfilter.c. */
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index c2982efd14a..db28ba3855e 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -106,11 +106,12 @@ static struct
}
};
-static struct ip6t_table packet_raw = {
+static struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
- .me = THIS_MODULE
+ .me = THIS_MODULE,
+ .af = AF_INET6,
};
/* The work comes in here from netfilter.c. */
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 704fbbe7487..ac702a29dd1 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -74,7 +74,7 @@ static int ipv6_invert_tuple(struct nf_conntrack_tuple *tuple,
static int ipv6_print_tuple(struct seq_file *s,
const struct nf_conntrack_tuple *tuple)
{
- return seq_printf(s, "src=%x:%x:%x:%x:%x:%x:%x:%x dst=%x:%x:%x:%x:%x:%x:%x:%x ",
+ return seq_printf(s, "src=" NIP6_FMT " dst=" NIP6_FMT " ",
NIP6(*((struct in6_addr *)tuple->src.u3.ip6)),
NIP6(*((struct in6_addr *)tuple->dst.u3.ip6)));
}
@@ -335,10 +335,10 @@ static struct nf_hook_ops ipv6_conntrack_local_in_ops = {
#ifdef CONFIG_SYSCTL
/* From nf_conntrack_proto_icmpv6.c */
-extern unsigned long nf_ct_icmpv6_timeout;
+extern unsigned int nf_ct_icmpv6_timeout;
/* From nf_conntrack_frag6.c */
-extern unsigned long nf_ct_frag6_timeout;
+extern unsigned int nf_ct_frag6_timeout;
extern unsigned int nf_ct_frag6_low_thresh;
extern unsigned int nf_ct_frag6_high_thresh;
@@ -584,7 +584,7 @@ MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
static int __init init(void)
{
- need_nf_conntrack();
+ need_conntrack();
return init_or_cleanup(1);
}
@@ -595,9 +595,3 @@ static void __exit fini(void)
module_init(init);
module_exit(fini);
-
-void need_ip6_conntrack(void)
-{
-}
-
-EXPORT_SYMBOL(need_ip6_conntrack);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index f3e5ffbd592..84ef9a13108 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -70,8 +70,8 @@ struct nf_ct_frag6_skb_cb
struct nf_ct_frag6_queue
{
- struct nf_ct_frag6_queue *next;
- struct list_head lru_list; /* lru list member */
+ struct hlist_node list;
+ struct list_head lru_list; /* lru list member */
__u32 id; /* fragment id */
struct in6_addr saddr;
@@ -90,14 +90,13 @@ struct nf_ct_frag6_queue
#define FIRST_IN 2
#define LAST_IN 1
__u16 nhoffset;
- struct nf_ct_frag6_queue **pprev;
};
/* Hash table. */
#define FRAG6Q_HASHSZ 64
-static struct nf_ct_frag6_queue *nf_ct_frag6_hash[FRAG6Q_HASHSZ];
+static struct hlist_head nf_ct_frag6_hash[FRAG6Q_HASHSZ];
static DEFINE_RWLOCK(nf_ct_frag6_lock);
static u32 nf_ct_frag6_hash_rnd;
static LIST_HEAD(nf_ct_frag6_lru_list);
@@ -105,9 +104,7 @@ int nf_ct_frag6_nqueues = 0;
static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq)
{
- if (fq->next)
- fq->next->pprev = fq->pprev;
- *fq->pprev = fq->next;
+ hlist_del(&fq->list);
list_del(&fq->lru_list);
nf_ct_frag6_nqueues--;
}
@@ -158,28 +155,18 @@ static void nf_ct_frag6_secret_rebuild(unsigned long dummy)
get_random_bytes(&nf_ct_frag6_hash_rnd, sizeof(u32));
for (i = 0; i < FRAG6Q_HASHSZ; i++) {
struct nf_ct_frag6_queue *q;
+ struct hlist_node *p, *n;
- q = nf_ct_frag6_hash[i];
- while (q) {
- struct nf_ct_frag6_queue *next = q->next;
+ hlist_for_each_entry_safe(q, p, n, &nf_ct_frag6_hash[i], list) {
unsigned int hval = ip6qhashfn(q->id,
&q->saddr,
&q->daddr);
-
if (hval != i) {
- /* Unlink. */
- if (q->next)
- q->next->pprev = q->pprev;
- *q->pprev = q->next;
-
+ hlist_del(&q->list);
/* Relink to new hash chain. */
- if ((q->next = nf_ct_frag6_hash[hval]) != NULL)
- q->next->pprev = &q->next;
- nf_ct_frag6_hash[hval] = q;
- q->pprev = &nf_ct_frag6_hash[hval];
+ hlist_add_head(&q->list,
+ &nf_ct_frag6_hash[hval]);
}
-
- q = next;
}
}
write_unlock(&nf_ct_frag6_lock);
@@ -314,15 +301,17 @@ out:
/* Creation primitives. */
-
static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash,
struct nf_ct_frag6_queue *fq_in)
{
struct nf_ct_frag6_queue *fq;
+#ifdef CONFIG_SMP
+ struct hlist_node *n;
+#endif
write_lock(&nf_ct_frag6_lock);
#ifdef CONFIG_SMP
- for (fq = nf_ct_frag6_hash[hash]; fq; fq = fq->next) {
+ hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) {
if (fq->id == fq_in->id &&
!ipv6_addr_cmp(&fq_in->saddr, &fq->saddr) &&
!ipv6_addr_cmp(&fq_in->daddr, &fq->daddr)) {
@@ -340,10 +329,7 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash,
atomic_inc(&fq->refcnt);
atomic_inc(&fq->refcnt);
- if ((fq->next = nf_ct_frag6_hash[hash]) != NULL)
- fq->next->pprev = &fq->next;
- nf_ct_frag6_hash[hash] = fq;
- fq->pprev = &nf_ct_frag6_hash[hash];
+ hlist_add_head(&fq->list, &nf_ct_frag6_hash[hash]);
INIT_LIST_HEAD(&fq->lru_list);
list_add_tail(&fq->lru_list, &nf_ct_frag6_lru_list);
nf_ct_frag6_nqueues++;
@@ -384,10 +370,11 @@ static __inline__ struct nf_ct_frag6_queue *
fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
{
struct nf_ct_frag6_queue *fq;
+ struct hlist_node *n;
unsigned int hash = ip6qhashfn(id, src, dst);
read_lock(&nf_ct_frag6_lock);
- for (fq = nf_ct_frag6_hash[hash]; fq; fq = fq->next) {
+ hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) {
if (fq->id == id &&
!ipv6_addr_cmp(src, &fq->saddr) &&
!ipv6_addr_cmp(dst, &fq->daddr)) {
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 5d316cb72ec..15e1456b3f1 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -581,7 +581,6 @@ err:
* the last and the first frames arrived and all the bits are here.
*/
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
- unsigned int *nhoffp,
struct net_device *dev)
{
struct sk_buff *fp, *head = fq->fragments;
@@ -654,6 +653,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
head->dev = dev;
skb_set_timestamp(head, &fq->stamp);
head->nh.ipv6h->payload_len = htons(payload_len);
+ IP6CB(head)->nhoff = nhoff;
*skb_in = head;
@@ -663,7 +663,6 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
fq->fragments = NULL;
- *nhoffp = nhoff;
return 1;
out_oversize:
@@ -678,7 +677,7 @@ out_fail:
return -1;
}
-static int ipv6_frag_rcv(struct sk_buff **skbp, unsigned int *nhoffp)
+static int ipv6_frag_rcv(struct sk_buff **skbp)
{
struct sk_buff *skb = *skbp;
struct net_device *dev = skb->dev;
@@ -710,7 +709,7 @@ static int ipv6_frag_rcv(struct sk_buff **skbp, unsigned int *nhoffp)
skb->h.raw += sizeof(struct frag_hdr);
IP6_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
- *nhoffp = (u8*)fhdr - skb->nh.raw;
+ IP6CB(skb)->nhoff = (u8*)fhdr - skb->nh.raw;
return 1;
}
@@ -722,11 +721,11 @@ static int ipv6_frag_rcv(struct sk_buff **skbp, unsigned int *nhoffp)
spin_lock(&fq->lock);
- ip6_frag_queue(fq, skb, fhdr, *nhoffp);
+ ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
if (fq->last_in == (FIRST_IN|LAST_IN) &&
fq->meat == fq->len)
- ret = ip6_frag_reasm(fq, skbp, nhoffp, dev);
+ ret = ip6_frag_reasm(fq, skbp, dev);
spin_unlock(&fq->lock);
fq_put(fq, NULL);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 66140f13d11..e0d3ad02ffb 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -24,6 +24,7 @@
* reachable. otherwise, round-robin the list.
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 577d49732b0..c2d3e17beae 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -20,6 +20,7 @@
#include <linux/config.h>
#include <linux/module.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -184,7 +185,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int
if (dev == NULL)
return NULL;
- nt = dev->priv;
+ nt = netdev_priv(dev);
dev->init = ipip6_tunnel_init;
nt->parms = *parms;
@@ -210,7 +211,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
write_unlock_bh(&ipip6_lock);
dev_put(dev);
} else {
- ipip6_tunnel_unlink((struct ip_tunnel*)dev->priv);
+ ipip6_tunnel_unlink(netdev_priv(dev));
dev_put(dev);
}
}
@@ -346,7 +347,7 @@ out:
rt6i = rt6_lookup(&iph6->daddr, &iph6->saddr, NULL, 0);
if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) {
- struct ip_tunnel * t = (struct ip_tunnel*)rt6i->rt6i_dev->priv;
+ struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev);
if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) {
rel_type = ICMPV6_DEST_UNREACH;
rel_code = ICMPV6_ADDR_UNREACH;
@@ -381,6 +382,7 @@ static int ipip6_rcv(struct sk_buff *skb)
skb->mac.raw = skb->nh.raw;
skb->nh.raw = skb->data;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
+ IPCB(skb)->flags = 0;
skb->protocol = htons(ETH_P_IPV6);
skb->pkt_type = PACKET_HOST;
tunnel->stat.rx_packets++;
@@ -423,7 +425,7 @@ static inline u32 try_6to4(struct in6_addr *v6dst)
static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct ip_tunnel *tunnel = (struct ip_tunnel*)dev->priv;
+ struct ip_tunnel *tunnel = netdev_priv(dev);
struct net_device_stats *stats = &tunnel->stat;
struct iphdr *tiph = &tunnel->parms.iph;
struct ipv6hdr *iph6 = skb->nh.ipv6h;
@@ -552,6 +554,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
skb->h.raw = skb->nh.raw;
skb->nh.raw = skb_push(skb, sizeof(struct iphdr));
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags = 0;
dst_release(skb->dst);
skb->dst = &rt->u.dst;
@@ -608,7 +611,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
t = ipip6_tunnel_locate(&p, 0);
}
if (t == NULL)
- t = (struct ip_tunnel*)dev->priv;
+ t = netdev_priv(dev);
memcpy(&p, &t->parms, sizeof(p));
if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
err = -EFAULT;
@@ -645,7 +648,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = -EINVAL;
break;
}
- t = (struct ip_tunnel*)dev->priv;
+ t = netdev_priv(dev);
ipip6_tunnel_unlink(t);
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
@@ -681,7 +684,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
if ((t = ipip6_tunnel_locate(&p, 0)) == NULL)
goto done;
err = -EPERM;
- if (t == ipip6_fb_tunnel_dev->priv)
+ if (t == netdev_priv(ipip6_fb_tunnel_dev))
goto done;
dev = t->dev;
}
@@ -698,7 +701,7 @@ done:
static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev)
{
- return &(((struct ip_tunnel*)dev->priv)->stat);
+ return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
}
static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
@@ -733,7 +736,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
struct ip_tunnel *tunnel;
struct iphdr *iph;
- tunnel = (struct ip_tunnel*)dev->priv;
+ tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph;
tunnel->dev = dev;
@@ -773,7 +776,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
static int __init ipip6_fb_tunnel_init(struct net_device *dev)
{
- struct ip_tunnel *tunnel = dev->priv;
+ struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
tunnel->dev = dev;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2947bc56d8a..66d04004afd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -67,6 +67,9 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+/* Socket used for sending RSTs and ACKs */
+static struct socket *tcp6_socket;
+
static void tcp_v6_send_reset(struct sk_buff *skb);
static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
static void tcp_v6_send_check(struct sock *sk, int len,
@@ -611,7 +614,7 @@ static void tcp_v6_send_reset(struct sk_buff *skb)
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
- ip6_xmit(NULL, buff, &fl, NULL, 0);
+ ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
return;
@@ -675,7 +678,7 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
- ip6_xmit(NULL, buff, &fl, NULL, 0);
+ ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
return;
}
@@ -1153,7 +1156,7 @@ ipv6_pktoptions:
return 0;
}
-static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
+static int tcp_v6_rcv(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct tcphdr *th;
@@ -1600,8 +1603,21 @@ static struct inet_protosw tcpv6_protosw = {
void __init tcpv6_init(void)
{
+ int err;
+
/* register inet6 protocol */
if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
inet6_register_protosw(&tcpv6_protosw);
+
+ err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_TCP, &tcp6_socket);
+ if (err < 0)
+ panic("Failed to create the TCPv6 control socket.\n");
+ tcp6_socket->sk->sk_allocation = GFP_ATOMIC;
+
+ /* Unhash it so that IP input processing does not even
+ * see it, we do not wish this socket to see incoming
+ * packets.
+ */
+ tcp6_socket->sk->sk_prot->unhash(tcp6_socket->sk);
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d8538dcea81..c47648892c0 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -435,7 +435,7 @@ out:
read_unlock(&udp_hash_lock);
}
-static int udpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
+static int udpv6_rcv(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct sock *sk;
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 28c29d78338..1ca2da68ef6 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -11,6 +11,8 @@
#include <linux/module.h>
#include <linux/string.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
#include <net/dsfield.h>
#include <net/inet_ecn.h>
#include <net/ip.h>
@@ -26,7 +28,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
IP6_ECN_set_ce(inner_iph);
}
-int xfrm6_rcv_spi(struct sk_buff **pskb, unsigned int *nhoffp, u32 spi)
+int xfrm6_rcv_spi(struct sk_buff **pskb, u32 spi)
{
struct sk_buff *skb = *pskb;
int err;
@@ -38,7 +40,7 @@ int xfrm6_rcv_spi(struct sk_buff **pskb, unsigned int *nhoffp, u32 spi)
int nexthdr;
unsigned int nhoff;
- nhoff = *nhoffp;
+ nhoff = IP6CB(skb)->nhoff;
nexthdr = skb->nh.raw[nhoff];
seq = 0;
@@ -121,6 +123,8 @@ int xfrm6_rcv_spi(struct sk_buff **pskb, unsigned int *nhoffp, u32 spi)
skb->sp->len += xfrm_nr;
skb->ip_summed = CHECKSUM_NONE;
+ nf_reset(skb);
+
if (decaps) {
if (!(skb->dev->flags&IFF_LOOPBACK)) {
dst_release(skb->dst);
@@ -129,7 +133,16 @@ int xfrm6_rcv_spi(struct sk_buff **pskb, unsigned int *nhoffp, u32 spi)
netif_rx(skb);
return -1;
} else {
+#ifdef CONFIG_NETFILTER
+ skb->nh.ipv6h->payload_len = htons(skb->len);
+ __skb_push(skb, skb->data - skb->nh.raw);
+
+ NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL,
+ ip6_rcv_finish);
+ return -1;
+#else
return 1;
+#endif
}
drop_unlock:
@@ -144,7 +157,7 @@ drop:
EXPORT_SYMBOL(xfrm6_rcv_spi);
-int xfrm6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
+int xfrm6_rcv(struct sk_buff **pskb)
{
- return xfrm6_rcv_spi(pskb, nhoffp, 0);
+ return xfrm6_rcv_spi(pskb, 0);
}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 6b9867717d1..80242172a5d 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -9,9 +9,11 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/compiler.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/icmpv6.h>
+#include <linux/netfilter_ipv6.h>
#include <net/dsfield.h>
#include <net/inet_ecn.h>
#include <net/ipv6.h>
@@ -92,7 +94,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
return ret;
}
-int xfrm6_output(struct sk_buff *skb)
+static int xfrm6_output_one(struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
struct xfrm_state *x = dst->xfrm;
@@ -110,29 +112,35 @@ int xfrm6_output(struct sk_buff *skb)
goto error_nolock;
}
- spin_lock_bh(&x->lock);
- err = xfrm_state_check(x, skb);
- if (err)
- goto error;
+ do {
+ spin_lock_bh(&x->lock);
+ err = xfrm_state_check(x, skb);
+ if (err)
+ goto error;
- xfrm6_encap(skb);
+ xfrm6_encap(skb);
- err = x->type->output(x, skb);
- if (err)
- goto error;
+ err = x->type->output(x, skb);
+ if (err)
+ goto error;
- x->curlft.bytes += skb->len;
- x->curlft.packets++;
+ x->curlft.bytes += skb->len;
+ x->curlft.packets++;
- spin_unlock_bh(&x->lock);
+ spin_unlock_bh(&x->lock);
- skb->nh.raw = skb->data;
-
- if (!(skb->dst = dst_pop(dst))) {
- err = -EHOSTUNREACH;
- goto error_nolock;
- }
- err = NET_XMIT_BYPASS;
+ skb->nh.raw = skb->data;
+
+ if (!(skb->dst = dst_pop(dst))) {
+ err = -EHOSTUNREACH;
+ goto error_nolock;
+ }
+ dst = skb->dst;
+ x = dst->xfrm;
+ } while (x && !x->props.mode);
+
+ IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
+ err = 0;
out_exit:
return err;
@@ -142,3 +150,33 @@ error_nolock:
kfree_skb(skb);
goto out_exit;
}
+
+static int xfrm6_output_finish(struct sk_buff *skb)
+{
+ int err;
+
+ while (likely((err = xfrm6_output_one(skb)) == 0)) {
+ nf_reset(skb);
+
+ err = nf_hook(PF_INET6, NF_IP6_LOCAL_OUT, &skb, NULL,
+ skb->dst->dev, dst_output);
+ if (unlikely(err != 1))
+ break;
+
+ if (!skb->dst->xfrm)
+ return dst_output(skb);
+
+ err = nf_hook(PF_INET6, NF_IP6_POST_ROUTING, &skb, NULL,
+ skb->dst->dev, xfrm6_output_finish);
+ if (unlikely(err != 1))
+ break;
+ }
+
+ return err;
+}
+
+int xfrm6_output(struct sk_buff *skb)
+{
+ return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb, NULL, skb->dst->dev,
+ xfrm6_output_finish);
+}
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index bf0d0abc387..a5723024d3b 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -15,6 +15,7 @@
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
#include <net/ipv6.h>
+#include <net/addrconf.h>
static struct xfrm_state_afinfo xfrm6_state_afinfo;
@@ -41,6 +42,22 @@ __xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
if (ipv6_addr_any((struct in6_addr*)&x->props.saddr))
memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
+ if (tmpl->mode && ipv6_addr_any((struct in6_addr*)&x->props.saddr)) {
+ struct rt6_info *rt;
+ struct flowi fl_tunnel = {
+ .nl_u = {
+ .ip6_u = {
+ .daddr = *(struct in6_addr *)daddr,
+ }
+ }
+ };
+ if (!xfrm_dst_lookup((struct xfrm_dst **)&rt,
+ &fl_tunnel, AF_INET6)) {
+ ipv6_get_saddr(&rt->u.dst, (struct in6_addr *)daddr,
+ (struct in6_addr *)&x->props.saddr);
+ dst_release(&rt->u.dst);
+ }
+ }
x->props.mode = tmpl->mode;
x->props.reqid = tmpl->reqid;
x->props.family = AF_INET6;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index fbef7826a74..8cfc58b96fc 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -259,8 +259,7 @@ try_next_2:;
spi = 0;
goto out;
alloc_spi:
- X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for "
- "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for " NIP6_FMT "\n",
__FUNCTION__,
NIP6(*(struct in6_addr *)saddr));
x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
@@ -323,9 +322,8 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
list_byaddr)
{
if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
- X6TPRINTK3(KERN_DEBUG "%s(): x6spi object "
- "for %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
- "found at %p\n",
+ X6TPRINTK3(KERN_DEBUG "%s(): x6spi object for " NIP6_FMT
+ " found at %p\n",
__FUNCTION__,
NIP6(*(struct in6_addr *)saddr),
x6spi);
@@ -397,7 +395,7 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler)
EXPORT_SYMBOL(xfrm6_tunnel_deregister);
-static int xfrm6_tunnel_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
+static int xfrm6_tunnel_rcv(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct xfrm6_tunnel *handler = xfrm6_tunnel_handler;
@@ -405,11 +403,11 @@ static int xfrm6_tunnel_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
u32 spi;
/* device-like_ip6ip6_handler() */
- if (handler && handler->handler(pskb, nhoffp) == 0)
+ if (handler && handler->handler(pskb) == 0)
return 0;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(pskb, nhoffp, spi);
+ return xfrm6_rcv_spi(pskb, spi);
}
static void xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 0dc519b4040..0fb513a34d1 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -29,6 +29,7 @@
*/
#include <linux/config.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index fbfa9675441..75944564866 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -43,6 +43,7 @@
********************************************************************/
#include <linux/config.h>
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/socket.h>
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 70543d89438..890bac0d4a5 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -33,6 +33,7 @@
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
+#include <linux/capability.h>
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index 75f2666e863..c6d169fbdce 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -82,8 +82,7 @@ struct ias_object *irias_new_object( char *name, int id)
IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
- obj = (struct ias_object *) kmalloc(sizeof(struct ias_object),
- GFP_ATOMIC);
+ obj = kmalloc(sizeof(struct ias_object), GFP_ATOMIC);
if (obj == NULL) {
IRDA_WARNING("%s(), Unable to allocate object!\n",
__FUNCTION__);
@@ -348,8 +347,7 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;);
IRDA_ASSERT(name != NULL, return;);
- attrib = (struct ias_attrib *) kmalloc(sizeof(struct ias_attrib),
- GFP_ATOMIC);
+ attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
if (attrib == NULL) {
IRDA_WARNING("%s: Unable to allocate attribute!\n",
__FUNCTION__);
@@ -385,8 +383,7 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
IRDA_ASSERT(name != NULL, return;);
IRDA_ASSERT(octets != NULL, return;);
- attrib = (struct ias_attrib *) kmalloc(sizeof(struct ias_attrib),
- GFP_ATOMIC);
+ attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
if (attrib == NULL) {
IRDA_WARNING("%s: Unable to allocate attribute!\n",
__FUNCTION__);
@@ -420,8 +417,7 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
IRDA_ASSERT(name != NULL, return;);
IRDA_ASSERT(value != NULL, return;);
- attrib = (struct ias_attrib *) kmalloc(sizeof( struct ias_attrib),
- GFP_ATOMIC);
+ attrib = kmalloc(sizeof( struct ias_attrib), GFP_ATOMIC);
if (attrib == NULL) {
IRDA_WARNING("%s: Unable to allocate attribute!\n",
__FUNCTION__);
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index b391cb3893d..e4fe1e80029 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -248,6 +248,7 @@
#include <linux/netdevice.h>
#include <linux/miscdevice.h>
#include <linux/poll.h>
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/ctype.h> /* isspace() */
#include <asm/uaccess.h>
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 52efd04cbed..43f1ce74187 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -15,6 +15,7 @@
*/
#include <linux/config.h>
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/socket.h>
@@ -297,8 +298,7 @@ static int pfkey_error(struct sadb_msg *orig, int err, struct sock *sk)
err = EINTR;
if (err >= 512)
err = EINVAL;
- if (err <= 0 || err >= 256)
- BUG();
+ BUG_ON(err <= 0 || err >= 256);
hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
pfkey_hdr_dup(hdr, orig);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 7d55f9cbd85..99c0a0fa4a9 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -103,3 +103,261 @@ config NF_CT_NETLINK
This option enables support for a netlink-based userspace interface
endmenu
+
+config NETFILTER_XTABLES
+ tristate "Netfilter Xtables support (required for ip_tables)"
+ help
+ This is required if you intend to use any of ip_tables,
+ ip6_tables or arp_tables.
+
+# alphabetically ordered list of targets
+
+config NETFILTER_XT_TARGET_CLASSIFY
+ tristate '"CLASSIFY" target support'
+ depends on NETFILTER_XTABLES
+ help
+ This option adds a `CLASSIFY' target, which enables the user to set
+ the priority of a packet. Some qdiscs can use this value for
+ classification, among these are:
+
+ atm, cbq, dsmark, pfifo_fast, htb, prio
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_TARGET_CONNMARK
+ tristate '"CONNMARK" target support'
+ depends on NETFILTER_XTABLES
+ depends on IP_NF_MANGLE || IP6_NF_MANGLE
+ depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
+ help
+ This option adds a `CONNMARK' target, which allows one to manipulate
+ the connection mark value. Similar to the MARK target, but
+ affects the connection mark value rather than the packet mark value.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. The module will be called
+ ipt_CONNMARK.o. If unsure, say `N'.
+
+config NETFILTER_XT_TARGET_MARK
+ tristate '"MARK" target support'
+ depends on NETFILTER_XTABLES
+ help
+ This option adds a `MARK' target, which allows you to create rules
+ in the `mangle' table which alter the netfilter mark (nfmark) field
+ associated with the packet prior to routing. This can change
+ the routing method (see `Use netfilter MARK value as routing
+ key') and can also be used by other subsystems to change their
+ behavior.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_TARGET_NFQUEUE
+ tristate '"NFQUEUE" target Support'
+ depends on NETFILTER_XTABLES
+ help
+ This Target replaced the old obsolete QUEUE target.
+
+ As opposed to QUEUE, it supports 65535 different queues,
+ not just one.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_TARGET_NOTRACK
+ tristate '"NOTRACK" target support'
+ depends on NETFILTER_XTABLES
+ depends on IP_NF_RAW || IP6_NF_RAW
+ depends on IP_NF_CONNTRACK || NF_CONNTRACK
+ help
+ The NOTRACK target allows a select rule to specify
+ which packets *not* to enter the conntrack/NAT
+ subsystem with all the consequences (no ICMP error tracking,
+ no protocol helpers for the selected packets).
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_COMMENT
+ tristate '"comment" match support'
+ depends on NETFILTER_XTABLES
+ help
+ This option adds a `comment' dummy-match, which allows you to put
+ comments in your iptables ruleset.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_CONNBYTES
+ tristate '"connbytes" per-connection counter match support'
+ depends on NETFILTER_XTABLES
+ depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || NF_CT_ACCT
+ help
+ This option adds a `connbytes' match, which allows you to match the
+ number of bytes and/or packets for each direction within a connection.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_CONNMARK
+ tristate '"connmark" connection mark match support'
+ depends on NETFILTER_XTABLES
+ depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || NF_CONNTRACK_MARK
+ help
+ This option adds a `connmark' match, which allows you to match the
+ connection mark value previously set for the session by `CONNMARK'.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. The module will be called
+ ipt_connmark.o. If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_CONNTRACK
+ tristate '"conntrack" connection tracking match support'
+ depends on NETFILTER_XTABLES
+ depends on IP_NF_CONNTRACK || NF_CONNTRACK
+ help
+ This is a general conntrack match module, a superset of the state match.
+
+ It allows matching on additional conntrack information, which is
+ useful in complex configurations, such as NAT gateways with multiple
+ internet links or tunnels.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_DCCP
+ tristate '"DCCP" protocol match support'
+ depends on NETFILTER_XTABLES
+ help
+ With this option enabled, you will be able to use the iptables
+ `dccp' match in order to match on DCCP source/destination ports
+ and DCCP flags.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_HELPER
+ tristate '"helper" match support'
+ depends on NETFILTER_XTABLES
+ depends on IP_NF_CONNTRACK || NF_CONNTRACK
+ help
+ Helper matching allows you to match packets in dynamic connections
+ tracked by a conntrack-helper, ie. ip_conntrack_ftp
+
+ To compile it as a module, choose M here. If unsure, say Y.
+
+config NETFILTER_XT_MATCH_LENGTH
+ tristate '"length" match support'
+ depends on NETFILTER_XTABLES
+ help
+ This option allows you to match the length of a packet against a
+ specific value or range of values.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_LIMIT
+ tristate '"limit" match support'
+ depends on NETFILTER_XTABLES
+ help
+ limit matching allows you to control the rate at which a rule can be
+ matched: mainly useful in combination with the LOG target ("LOG
+ target support", below) and to avoid some Denial of Service attacks.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_MAC
+ tristate '"mac" address match support'
+ depends on NETFILTER_XTABLES
+ help
+ MAC matching allows you to match packets based on the source
+ Ethernet address of the packet.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_MARK
+ tristate '"mark" match support'
+ depends on NETFILTER_XTABLES
+ help
+ Netfilter mark matching allows you to match packets based on the
+ `nfmark' value in the packet. This can be set by the MARK target
+ (see below).
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_PHYSDEV
+ tristate '"physdev" match support'
+ depends on NETFILTER_XTABLES && BRIDGE_NETFILTER
+ help
+ Physdev packet matching matches against the physical bridge ports
+ the IP packet arrived on or will leave by.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_PKTTYPE
+ tristate '"pkttype" packet type match support'
+ depends on NETFILTER_XTABLES
+ help
+ Packet type matching allows you to match a packet by
+ its "class", eg. BROADCAST, MULTICAST, ...
+
+ Typical usage:
+ iptables -A INPUT -m pkttype --pkt-type broadcast -j LOG
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_REALM
+ tristate '"realm" match support'
+ depends on NETFILTER_XTABLES
+ select NET_CLS_ROUTE
+ help
+ This option adds a `realm' match, which allows you to use the realm
+ key from the routing subsystem inside iptables.
+
+ This match pretty much resembles the CONFIG_NET_CLS_ROUTE4 option
+ in tc world.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_SCTP
+ tristate '"sctp" protocol match support'
+ depends on NETFILTER_XTABLES
+ help
+ With this option enabled, you will be able to use the
+ `sctp' match in order to match on SCTP source/destination ports
+ and SCTP chunk types.
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/modules.txt>. If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_STATE
+ tristate '"state" match support'
+ depends on NETFILTER_XTABLES
+ depends on IP_NF_CONNTRACK || NF_CONNTRACK
+ help
+ Connection state matching allows you to match packets based on their
+ relationship to a tracked connection (ie. previous packets). This
+ is a powerful tool for packet classification.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_STRING
+ tristate '"string" match support'
+ depends on NETFILTER_XTABLES
+ select TEXTSEARCH
+ select TEXTSEARCH_KMP
+ select TEXTSEARCH_BM
+ select TEXTSEARCH_FSM
+ help
+ This option adds a `string' match, which allows you to look for
+ pattern matchings in packets.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NETFILTER_XT_MATCH_TCPMSS
+ tristate '"tcpmss" match support'
+ depends on NETFILTER_XTABLES
+ help
+ This option adds a `tcpmss' match, which allows you to examine the
+ MSS value of TCP SYN packets, which control the maximum packet size
+ for that connection.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index cb2183145c3..746172ebc91 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,4 +1,5 @@
netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
+nf_conntrack-objs := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o
obj-$(CONFIG_NETFILTER) = netfilter.o
@@ -6,13 +7,43 @@ obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
-nf_conntrack-objs := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o
-
+# connection tracking
obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
-obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
# SCTP protocol connection tracking
obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
# netlink interface for nf_conntrack
obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
+
+# connection tracking helpers
+obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
+
+# generic X tables
+obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
+
+# targets
+obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
+
+# matches
+obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_CONNMARK) += xt_connmark.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index d5a6eaf4a1d..ab0c920f0d3 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -545,11 +545,11 @@ static int help(struct sk_buff **pskb,
different IP address. Simply don't record it for
NAT. */
if (cmd.l3num == PF_INET) {
- DEBUGP("conntrack_ftp: NOT RECORDING: %u,%u,%u,%u != %u.%u.%u.%u\n",
+ DEBUGP("conntrack_ftp: NOT RECORDING: " NIPQUAD_FMT " != " NIPQUAD_FMT "\n",
NIPQUAD(cmd.u3.ip),
NIPQUAD(ct->tuplehash[dir].tuple.src.u3.ip));
} else {
- DEBUGP("conntrack_ftp: NOT RECORDING: %x:%x:%x:%x:%x:%x:%x:%x != %x:%x:%x:%x:%x:%x:%x:%x\n",
+ DEBUGP("conntrack_ftp: NOT RECORDING: " NIP6_FMT " != " NIP6_FMT "\n",
NIP6(*((struct in6_addr *)cmd.u3.ip6)),
NIP6(*((struct in6_addr *)ct->tuplehash[dir]
.tuple.src.u3.ip6)));
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 36425f6c833..46bc27e2756 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -17,7 +17,7 @@
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack_protocol.h>
-unsigned long nf_ct_generic_timeout = 600*HZ;
+unsigned int nf_ct_generic_timeout = 600*HZ;
static int generic_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 3a600f77b4e..cf798e61e37 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -62,15 +62,15 @@ static const char *sctp_conntrack_names[] = {
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
-static unsigned long nf_ct_sctp_timeout_closed = 10 SECS;
-static unsigned long nf_ct_sctp_timeout_cookie_wait = 3 SECS;
-static unsigned long nf_ct_sctp_timeout_cookie_echoed = 3 SECS;
-static unsigned long nf_ct_sctp_timeout_established = 5 DAYS;
-static unsigned long nf_ct_sctp_timeout_shutdown_sent = 300 SECS / 1000;
-static unsigned long nf_ct_sctp_timeout_shutdown_recd = 300 SECS / 1000;
-static unsigned long nf_ct_sctp_timeout_shutdown_ack_sent = 3 SECS;
-
-static unsigned long * sctp_timeouts[]
+static unsigned int nf_ct_sctp_timeout_closed = 10 SECS;
+static unsigned int nf_ct_sctp_timeout_cookie_wait = 3 SECS;
+static unsigned int nf_ct_sctp_timeout_cookie_echoed = 3 SECS;
+static unsigned int nf_ct_sctp_timeout_established = 5 DAYS;
+static unsigned int nf_ct_sctp_timeout_shutdown_sent = 300 SECS / 1000;
+static unsigned int nf_ct_sctp_timeout_shutdown_recd = 300 SECS / 1000;
+static unsigned int nf_ct_sctp_timeout_shutdown_ack_sent = 3 SECS;
+
+static unsigned int * sctp_timeouts[]
= { NULL, /* SCTP_CONNTRACK_NONE */
&nf_ct_sctp_timeout_closed, /* SCTP_CONNTRACK_CLOSED */
&nf_ct_sctp_timeout_cookie_wait, /* SCTP_CONNTRACK_COOKIE_WAIT */
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 6167137a5cb..df99138c3b3 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -93,21 +93,21 @@ static const char *tcp_conntrack_names[] = {
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
-unsigned long nf_ct_tcp_timeout_syn_sent = 2 MINS;
-unsigned long nf_ct_tcp_timeout_syn_recv = 60 SECS;
-unsigned long nf_ct_tcp_timeout_established = 5 DAYS;
-unsigned long nf_ct_tcp_timeout_fin_wait = 2 MINS;
-unsigned long nf_ct_tcp_timeout_close_wait = 60 SECS;
-unsigned long nf_ct_tcp_timeout_last_ack = 30 SECS;
-unsigned long nf_ct_tcp_timeout_time_wait = 2 MINS;
-unsigned long nf_ct_tcp_timeout_close = 10 SECS;
+unsigned int nf_ct_tcp_timeout_syn_sent = 2 MINS;
+unsigned int nf_ct_tcp_timeout_syn_recv = 60 SECS;
+unsigned int nf_ct_tcp_timeout_established = 5 DAYS;
+unsigned int nf_ct_tcp_timeout_fin_wait = 2 MINS;
+unsigned int nf_ct_tcp_timeout_close_wait = 60 SECS;
+unsigned int nf_ct_tcp_timeout_last_ack = 30 SECS;
+unsigned int nf_ct_tcp_timeout_time_wait = 2 MINS;
+unsigned int nf_ct_tcp_timeout_close = 10 SECS;
/* RFC1122 says the R2 limit should be at least 100 seconds.
Linux uses 15 packets as limit, which corresponds
to ~13-30min depending on RTO. */
-unsigned long nf_ct_tcp_timeout_max_retrans = 5 MINS;
+unsigned int nf_ct_tcp_timeout_max_retrans = 5 MINS;
-static unsigned long * tcp_timeouts[]
+static unsigned int * tcp_timeouts[]
= { NULL, /* TCP_CONNTRACK_NONE */
&nf_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */
&nf_ct_tcp_timeout_syn_recv, /* TCP_CONNTRACK_SYN_RECV, */
@@ -988,7 +988,7 @@ static int tcp_packet(struct nf_conn *conntrack,
|| (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
&& conntrack->proto.tcp.last_index == TCP_ACK_SET))
&& ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) {
- /* RST sent to invalid SYN or ACK we had let trough
+ /* RST sent to invalid SYN or ACK we had let through
* at a) and c) above:
*
* a) SYN was in window then
@@ -999,7 +999,7 @@ static int tcp_packet(struct nf_conn *conntrack,
* segments we ignored. */
goto in_window;
}
- /* Just fall trough */
+ /* Just fall through */
default:
/* Keep compilers happy. */
break;
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 1a592a55618..4264dd079a1 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -27,8 +27,8 @@
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack_protocol.h>
-unsigned long nf_ct_udp_timeout = 30*HZ;
-unsigned long nf_ct_udp_timeout_stream = 180*HZ;
+unsigned int nf_ct_udp_timeout = 30*HZ;
+unsigned int nf_ct_udp_timeout_stream = 180*HZ;
static int udp_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index d17e42b28c7..617599aeeea 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -431,25 +431,25 @@ extern int nf_conntrack_max;
extern unsigned int nf_conntrack_htable_size;
/* From nf_conntrack_proto_tcp.c */
-extern unsigned long nf_ct_tcp_timeout_syn_sent;
-extern unsigned long nf_ct_tcp_timeout_syn_recv;
-extern unsigned long nf_ct_tcp_timeout_established;
-extern unsigned long nf_ct_tcp_timeout_fin_wait;
-extern unsigned long nf_ct_tcp_timeout_close_wait;
-extern unsigned long nf_ct_tcp_timeout_last_ack;
-extern unsigned long nf_ct_tcp_timeout_time_wait;
-extern unsigned long nf_ct_tcp_timeout_close;
-extern unsigned long nf_ct_tcp_timeout_max_retrans;
+extern unsigned int nf_ct_tcp_timeout_syn_sent;
+extern unsigned int nf_ct_tcp_timeout_syn_recv;
+extern unsigned int nf_ct_tcp_timeout_established;
+extern unsigned int nf_ct_tcp_timeout_fin_wait;
+extern unsigned int nf_ct_tcp_timeout_close_wait;
+extern unsigned int nf_ct_tcp_timeout_last_ack;
+extern unsigned int nf_ct_tcp_timeout_time_wait;
+extern unsigned int nf_ct_tcp_timeout_close;
+extern unsigned int nf_ct_tcp_timeout_max_retrans;
extern int nf_ct_tcp_loose;
extern int nf_ct_tcp_be_liberal;
extern int nf_ct_tcp_max_retrans;
/* From nf_conntrack_proto_udp.c */
-extern unsigned long nf_ct_udp_timeout;
-extern unsigned long nf_ct_udp_timeout_stream;
+extern unsigned int nf_ct_udp_timeout;
+extern unsigned int nf_ct_udp_timeout_stream;
/* From nf_conntrack_proto_generic.c */
-extern unsigned long nf_ct_generic_timeout;
+extern unsigned int nf_ct_generic_timeout;
/* Log invalid packets of a given protocol */
static int log_invalid_proto_min = 0;
@@ -821,7 +821,7 @@ module_exit(fini);
/* Some modules need us, but don't depend directly on any symbol.
They should call this. */
-void need_nf_conntrack(void)
+void need_conntrack(void)
{
}
@@ -841,7 +841,7 @@ EXPORT_SYMBOL(nf_conntrack_protocol_unregister);
EXPORT_SYMBOL(nf_ct_invert_tuplepr);
EXPORT_SYMBOL(nf_conntrack_alter_reply);
EXPORT_SYMBOL(nf_conntrack_destroyed);
-EXPORT_SYMBOL(need_nf_conntrack);
+EXPORT_SYMBOL(need_conntrack);
EXPORT_SYMBOL(nf_conntrack_helper_register);
EXPORT_SYMBOL(nf_conntrack_helper_unregister);
EXPORT_SYMBOL(nf_ct_iterate_cleanup);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
new file mode 100644
index 00000000000..d7817afc6b9
--- /dev/null
+++ b/net/netfilter/x_tables.c
@@ -0,0 +1,624 @@
+/*
+ * x_tables core - Backend for {ip,ip6,arp}_tables
+ *
+ * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
+ *
+ * Based on existing ip_tables code which is
+ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
+ * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/socket.h>
+#include <linux/net.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_arp.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("[ip,ip6,arp]_tables backend module");
+
+#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
+
+struct xt_af {
+ struct semaphore mutex;
+ struct list_head match;
+ struct list_head target;
+ struct list_head tables;
+};
+
+static struct xt_af *xt;
+
+#ifdef DEBUG_IP_FIREWALL_USER
+#define duprintf(format, args...) printk(format , ## args)
+#else
+#define duprintf(format, args...)
+#endif
+
+enum {
+ TABLE,
+ TARGET,
+ MATCH,
+};
+
+/* Registration hooks for targets. */
+int
+xt_register_target(int af, struct xt_target *target)
+{
+ int ret;
+
+ ret = down_interruptible(&xt[af].mutex);
+ if (ret != 0)
+ return ret;
+ list_add(&target->list, &xt[af].target);
+ up(&xt[af].mutex);
+ return ret;
+}
+EXPORT_SYMBOL(xt_register_target);
+
+void
+xt_unregister_target(int af, struct xt_target *target)
+{
+ down(&xt[af].mutex);
+ LIST_DELETE(&xt[af].target, target);
+ up(&xt[af].mutex);
+}
+EXPORT_SYMBOL(xt_unregister_target);
+
+int
+xt_register_match(int af, struct xt_match *match)
+{
+ int ret;
+
+ ret = down_interruptible(&xt[af].mutex);
+ if (ret != 0)
+ return ret;
+
+ list_add(&match->list, &xt[af].match);
+ up(&xt[af].mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(xt_register_match);
+
+void
+xt_unregister_match(int af, struct xt_match *match)
+{
+ down(&xt[af].mutex);
+ LIST_DELETE(&xt[af].match, match);
+ up(&xt[af].mutex);
+}
+EXPORT_SYMBOL(xt_unregister_match);
+
+
+/*
+ * These are weird, but module loading must not be done with mutex
+ * held (since they will register), and we have to have a single
+ * function to use try_then_request_module().
+ */
+
+/* Find match, grabs ref. Returns ERR_PTR() on error. */
+struct xt_match *xt_find_match(int af, const char *name, u8 revision)
+{
+ struct xt_match *m;
+ int err = 0;
+
+ if (down_interruptible(&xt[af].mutex) != 0)
+ return ERR_PTR(-EINTR);
+
+ list_for_each_entry(m, &xt[af].match, list) {
+ if (strcmp(m->name, name) == 0) {
+ if (m->revision == revision) {
+ if (try_module_get(m->me)) {
+ up(&xt[af].mutex);
+ return m;
+ }
+ } else
+ err = -EPROTOTYPE; /* Found something. */
+ }
+ }
+ up(&xt[af].mutex);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(xt_find_match);
+
+/* Find target, grabs ref. Returns ERR_PTR() on error. */
+struct xt_target *xt_find_target(int af, const char *name, u8 revision)
+{
+ struct xt_target *t;
+ int err = 0;
+
+ if (down_interruptible(&xt[af].mutex) != 0)
+ return ERR_PTR(-EINTR);
+
+ list_for_each_entry(t, &xt[af].target, list) {
+ if (strcmp(t->name, name) == 0) {
+ if (t->revision == revision) {
+ if (try_module_get(t->me)) {
+ up(&xt[af].mutex);
+ return t;
+ }
+ } else
+ err = -EPROTOTYPE; /* Found something. */
+ }
+ }
+ up(&xt[af].mutex);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(xt_find_target);
+
+static const char *xt_prefix[NPROTO] = {
+ [AF_INET] = "ipt_%s",
+ [AF_INET6] = "ip6t_%s",
+ [NF_ARP] = "arpt_%s",
+};
+
+struct xt_target *xt_request_find_target(int af, const char *name, u8 revision)
+{
+ struct xt_target *target;
+
+ target = try_then_request_module(xt_find_target(af, name, revision),
+ xt_prefix[af], name);
+ if (IS_ERR(target) || !target)
+ return NULL;
+ return target;
+}
+EXPORT_SYMBOL_GPL(xt_request_find_target);
+
+static int match_revfn(int af, const char *name, u8 revision, int *bestp)
+{
+ struct xt_match *m;
+ int have_rev = 0;
+
+ list_for_each_entry(m, &xt[af].match, list) {
+ if (strcmp(m->name, name) == 0) {
+ if (m->revision > *bestp)
+ *bestp = m->revision;
+ if (m->revision == revision)
+ have_rev = 1;
+ }
+ }
+ return have_rev;
+}
+
+static int target_revfn(int af, const char *name, u8 revision, int *bestp)
+{
+ struct xt_target *t;
+ int have_rev = 0;
+
+ list_for_each_entry(t, &xt[af].target, list) {
+ if (strcmp(t->name, name) == 0) {
+ if (t->revision > *bestp)
+ *bestp = t->revision;
+ if (t->revision == revision)
+ have_rev = 1;
+ }
+ }
+ return have_rev;
+}
+
+/* Returns true or false (if no such extension at all) */
+int xt_find_revision(int af, const char *name, u8 revision, int target,
+ int *err)
+{
+ int have_rev, best = -1;
+
+ if (down_interruptible(&xt[af].mutex) != 0) {
+ *err = -EINTR;
+ return 1;
+ }
+ if (target == 1)
+ have_rev = target_revfn(af, name, revision, &best);
+ else
+ have_rev = match_revfn(af, name, revision, &best);
+ up(&xt[af].mutex);
+
+ /* Nothing at all? Return 0 to try loading module. */
+ if (best == -1) {
+ *err = -ENOENT;
+ return 0;
+ }
+
+ *err = best;
+ if (!have_rev)
+ *err = -EPROTONOSUPPORT;
+ return 1;
+}
+EXPORT_SYMBOL_GPL(xt_find_revision);
+
+struct xt_table_info *xt_alloc_table_info(unsigned int size)
+{
+ struct xt_table_info *newinfo;
+ int cpu;
+
+ /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
+ if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages)
+ return NULL;
+
+ newinfo = kzalloc(sizeof(struct xt_table_info), GFP_KERNEL);
+ if (!newinfo)
+ return NULL;
+
+ newinfo->size = size;
+
+ for_each_cpu(cpu) {
+ if (size <= PAGE_SIZE)
+ newinfo->entries[cpu] = kmalloc_node(size,
+ GFP_KERNEL,
+ cpu_to_node(cpu));
+ else
+ newinfo->entries[cpu] = vmalloc_node(size,
+ cpu_to_node(cpu));
+
+ if (newinfo->entries[cpu] == NULL) {
+ xt_free_table_info(newinfo);
+ return NULL;
+ }
+ }
+
+ return newinfo;
+}
+EXPORT_SYMBOL(xt_alloc_table_info);
+
+void xt_free_table_info(struct xt_table_info *info)
+{
+ int cpu;
+
+ for_each_cpu(cpu) {
+ if (info->size <= PAGE_SIZE)
+ kfree(info->entries[cpu]);
+ else
+ vfree(info->entries[cpu]);
+ }
+ kfree(info);
+}
+EXPORT_SYMBOL(xt_free_table_info);
+
+/* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
+struct xt_table *xt_find_table_lock(int af, const char *name)
+{
+ struct xt_table *t;
+
+ if (down_interruptible(&xt[af].mutex) != 0)
+ return ERR_PTR(-EINTR);
+
+ list_for_each_entry(t, &xt[af].tables, list)
+ if (strcmp(t->name, name) == 0 && try_module_get(t->me))
+ return t;
+ up(&xt[af].mutex);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xt_find_table_lock);
+
+void xt_table_unlock(struct xt_table *table)
+{
+ up(&xt[table->af].mutex);
+}
+EXPORT_SYMBOL_GPL(xt_table_unlock);
+
+
+struct xt_table_info *
+xt_replace_table(struct xt_table *table,
+ unsigned int num_counters,
+ struct xt_table_info *newinfo,
+ int *error)
+{
+ struct xt_table_info *oldinfo, *private;
+
+ /* Do the substitution. */
+ write_lock_bh(&table->lock);
+ private = table->private;
+ /* Check inside lock: is the old number correct? */
+ if (num_counters != private->number) {
+ duprintf("num_counters != table->private->number (%u/%u)\n",
+ num_counters, private->number);
+ write_unlock_bh(&table->lock);
+ *error = -EAGAIN;
+ return NULL;
+ }
+ oldinfo = private;
+ table->private = newinfo;
+ newinfo->initial_entries = oldinfo->initial_entries;
+ write_unlock_bh(&table->lock);
+
+ return oldinfo;
+}
+EXPORT_SYMBOL_GPL(xt_replace_table);
+
+int xt_register_table(struct xt_table *table,
+ struct xt_table_info *bootstrap,
+ struct xt_table_info *newinfo)
+{
+ int ret;
+ struct xt_table_info *private;
+
+ ret = down_interruptible(&xt[table->af].mutex);
+ if (ret != 0)
+ return ret;
+
+ /* Don't autoload: we'd eat our tail... */
+ if (list_named_find(&xt[table->af].tables, table->name)) {
+ ret = -EEXIST;
+ goto unlock;
+ }
+
+ /* Simplifies replace_table code. */
+ table->private = bootstrap;
+ if (!xt_replace_table(table, 0, newinfo, &ret))
+ goto unlock;
+
+ private = table->private;
+ duprintf("table->private->number = %u\n", private->number);
+
+ /* save number of initial entries */
+ private->initial_entries = private->number;
+
+ rwlock_init(&table->lock);
+ list_prepend(&xt[table->af].tables, table);
+
+ ret = 0;
+ unlock:
+ up(&xt[table->af].mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xt_register_table);
+
+void *xt_unregister_table(struct xt_table *table)
+{
+ struct xt_table_info *private;
+
+ down(&xt[table->af].mutex);
+ private = table->private;
+ LIST_DELETE(&xt[table->af].tables, table);
+ up(&xt[table->af].mutex);
+
+ return private;
+}
+EXPORT_SYMBOL_GPL(xt_unregister_table);
+
+#ifdef CONFIG_PROC_FS
+static char *xt_proto_prefix[NPROTO] = {
+ [AF_INET] = "ip",
+ [AF_INET6] = "ip6",
+ [NF_ARP] = "arp",
+};
+
+static struct list_head *xt_get_idx(struct list_head *list, struct seq_file *seq, loff_t pos)
+{
+ struct list_head *head = list->next;
+
+ if (!head || list_empty(list))
+ return NULL;
+
+ while (pos && (head = head->next)) {
+ if (head == list)
+ return NULL;
+ pos--;
+ }
+ return pos ? NULL : head;
+}
+
+static struct list_head *type2list(u_int16_t af, u_int16_t type)
+{
+ struct list_head *list;
+
+ switch (type) {
+ case TARGET:
+ list = &xt[af].target;
+ break;
+ case MATCH:
+ list = &xt[af].match;
+ break;
+ case TABLE:
+ list = &xt[af].tables;
+ break;
+ default:
+ list = NULL;
+ break;
+ }
+
+ return list;
+}
+
+static void *xt_tgt_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct proc_dir_entry *pde = (struct proc_dir_entry *) seq->private;
+ u_int16_t af = (unsigned long)pde->data & 0xffff;
+ u_int16_t type = (unsigned long)pde->data >> 16;
+ struct list_head *list;
+
+ if (af >= NPROTO)
+ return NULL;
+
+ list = type2list(af, type);
+ if (!list)
+ return NULL;
+
+ if (down_interruptible(&xt[af].mutex) != 0)
+ return NULL;
+
+ return xt_get_idx(list, seq, *pos);
+}
+
+static void *xt_tgt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct proc_dir_entry *pde = seq->private;
+ u_int16_t af = (unsigned long)pde->data & 0xffff;
+ u_int16_t type = (unsigned long)pde->data >> 16;
+ struct list_head *list;
+
+ if (af >= NPROTO)
+ return NULL;
+
+ list = type2list(af, type);
+ if (!list)
+ return NULL;
+
+ (*pos)++;
+ return xt_get_idx(list, seq, *pos);
+}
+
+static void xt_tgt_seq_stop(struct seq_file *seq, void *v)
+{
+ struct proc_dir_entry *pde = seq->private;
+ u_int16_t af = (unsigned long)pde->data & 0xffff;
+
+ up(&xt[af].mutex);
+}
+
+static int xt_name_seq_show(struct seq_file *seq, void *v)
+{
+ char *name = (char *)v + sizeof(struct list_head);
+
+ if (strlen(name))
+ return seq_printf(seq, "%s\n", name);
+ else
+ return 0;
+}
+
+static struct seq_operations xt_tgt_seq_ops = {
+ .start = xt_tgt_seq_start,
+ .next = xt_tgt_seq_next,
+ .stop = xt_tgt_seq_stop,
+ .show = xt_name_seq_show,
+};
+
+static int xt_tgt_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ ret = seq_open(file, &xt_tgt_seq_ops);
+ if (!ret) {
+ struct seq_file *seq = file->private_data;
+ struct proc_dir_entry *pde = PDE(inode);
+
+ seq->private = pde;
+ }
+
+ return ret;
+}
+
+static struct file_operations xt_file_ops = {
+ .owner = THIS_MODULE,
+ .open = xt_tgt_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#define FORMAT_TABLES "_tables_names"
+#define FORMAT_MATCHES "_tables_matches"
+#define FORMAT_TARGETS "_tables_targets"
+
+#endif /* CONFIG_PROC_FS */
+
+int xt_proto_init(int af)
+{
+#ifdef CONFIG_PROC_FS
+ char buf[XT_FUNCTION_MAXNAMELEN];
+ struct proc_dir_entry *proc;
+#endif
+
+ if (af >= NPROTO)
+ return -EINVAL;
+
+
+#ifdef CONFIG_PROC_FS
+ strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+ strlcat(buf, FORMAT_TABLES, sizeof(buf));
+ proc = proc_net_fops_create(buf, 0440, &xt_file_ops);
+ if (!proc)
+ goto out;
+ proc->data = (void *) ((unsigned long) af | (TABLE << 16));
+
+
+ strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+ strlcat(buf, FORMAT_MATCHES, sizeof(buf));
+ proc = proc_net_fops_create(buf, 0440, &xt_file_ops);
+ if (!proc)
+ goto out_remove_tables;
+ proc->data = (void *) ((unsigned long) af | (MATCH << 16));
+
+ strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+ strlcat(buf, FORMAT_TARGETS, sizeof(buf));
+ proc = proc_net_fops_create(buf, 0440, &xt_file_ops);
+ if (!proc)
+ goto out_remove_matches;
+ proc->data = (void *) ((unsigned long) af | (TARGET << 16));
+#endif
+
+ return 0;
+
+#ifdef CONFIG_PROC_FS
+out_remove_matches:
+ strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+ strlcat(buf, FORMAT_MATCHES, sizeof(buf));
+ proc_net_remove(buf);
+
+out_remove_tables:
+ strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+ strlcat(buf, FORMAT_TABLES, sizeof(buf));
+ proc_net_remove(buf);
+out:
+ return -1;
+#endif
+}
+EXPORT_SYMBOL_GPL(xt_proto_init);
+
+void xt_proto_fini(int af)
+{
+#ifdef CONFIG_PROC_FS
+ char buf[XT_FUNCTION_MAXNAMELEN];
+
+ strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+ strlcat(buf, FORMAT_TABLES, sizeof(buf));
+ proc_net_remove(buf);
+
+ strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+ strlcat(buf, FORMAT_TARGETS, sizeof(buf));
+ proc_net_remove(buf);
+
+ strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+ strlcat(buf, FORMAT_MATCHES, sizeof(buf));
+ proc_net_remove(buf);
+#endif /*CONFIG_PROC_FS*/
+}
+EXPORT_SYMBOL_GPL(xt_proto_fini);
+
+
+static int __init xt_init(void)
+{
+ int i;
+
+ xt = kmalloc(sizeof(struct xt_af) * NPROTO, GFP_KERNEL);
+ if (!xt)
+ return -ENOMEM;
+
+ for (i = 0; i < NPROTO; i++) {
+ init_MUTEX(&xt[i].mutex);
+ INIT_LIST_HEAD(&xt[i].target);
+ INIT_LIST_HEAD(&xt[i].match);
+ INIT_LIST_HEAD(&xt[i].tables);
+ }
+ return 0;
+}
+
+static void __exit xt_fini(void)
+{
+ kfree(xt);
+}
+
+module_init(xt_init);
+module_exit(xt_fini);
+
diff --git a/net/ipv4/netfilter/ipt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index dab78d8bd49..78ee266a12e 100644
--- a/net/ipv4/netfilter/ipt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -15,12 +15,13 @@
#include <linux/ip.h>
#include <net/checksum.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_CLASSIFY.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_CLASSIFY.h>
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("iptables qdisc classification target module");
+MODULE_ALIAS("ipt_CLASSIFY");
static unsigned int
target(struct sk_buff **pskb,
@@ -30,25 +31,25 @@ target(struct sk_buff **pskb,
const void *targinfo,
void *userinfo)
{
- const struct ipt_classify_target_info *clinfo = targinfo;
+ const struct xt_classify_target_info *clinfo = targinfo;
- if((*pskb)->priority != clinfo->priority)
+ if ((*pskb)->priority != clinfo->priority)
(*pskb)->priority = clinfo->priority;
- return IPT_CONTINUE;
+ return XT_CONTINUE;
}
static int
checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *e,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
{
- if (targinfosize != IPT_ALIGN(sizeof(struct ipt_classify_target_info))){
+ if (targinfosize != XT_ALIGN(sizeof(struct xt_classify_target_info))){
printk(KERN_ERR "CLASSIFY: invalid size (%u != %Zu).\n",
targinfosize,
- IPT_ALIGN(sizeof(struct ipt_classify_target_info)));
+ XT_ALIGN(sizeof(struct xt_classify_target_info)));
return 0;
}
@@ -69,21 +70,39 @@ checkentry(const char *tablename,
return 1;
}
-static struct ipt_target ipt_classify_reg = {
+static struct xt_target classify_reg = {
+ .name = "CLASSIFY",
+ .target = target,
+ .checkentry = checkentry,
+ .me = THIS_MODULE,
+};
+static struct xt_target classify6_reg = {
.name = "CLASSIFY",
.target = target,
.checkentry = checkentry,
.me = THIS_MODULE,
};
+
static int __init init(void)
{
- return ipt_register_target(&ipt_classify_reg);
+ int ret;
+
+ ret = xt_register_target(AF_INET, &classify_reg);
+ if (ret)
+ return ret;
+
+ ret = xt_register_target(AF_INET6, &classify6_reg);
+ if (ret)
+ xt_unregister_target(AF_INET, &classify_reg);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_target(&ipt_classify_reg);
+ xt_unregister_target(AF_INET, &classify_reg);
+ xt_unregister_target(AF_INET6, &classify6_reg);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c
index 8acac5a40a9..22506e376be 100644
--- a/net/ipv4/netfilter/ipt_CONNMARK.c
+++ b/net/netfilter/xt_CONNMARK.c
@@ -26,9 +26,10 @@
MODULE_AUTHOR("Henrik Nordstrom <hno@marasytems.com>");
MODULE_DESCRIPTION("IP tables CONNMARK matching module");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_CONNMARK");
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_CONNMARK.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_CONNMARK.h>
#include <net/netfilter/nf_conntrack_compat.h>
static unsigned int
@@ -39,7 +40,7 @@ target(struct sk_buff **pskb,
const void *targinfo,
void *userinfo)
{
- const struct ipt_connmark_target_info *markinfo = targinfo;
+ const struct xt_connmark_target_info *markinfo = targinfo;
u_int32_t diff;
u_int32_t nfmark;
u_int32_t newmark;
@@ -48,17 +49,17 @@ target(struct sk_buff **pskb,
if (ctmark) {
switch(markinfo->mode) {
- case IPT_CONNMARK_SET:
+ case XT_CONNMARK_SET:
newmark = (*ctmark & ~markinfo->mask) | markinfo->mark;
if (newmark != *ctmark)
*ctmark = newmark;
break;
- case IPT_CONNMARK_SAVE:
+ case XT_CONNMARK_SAVE:
newmark = (*ctmark & ~markinfo->mask) | ((*pskb)->nfmark & markinfo->mask);
if (*ctmark != newmark)
*ctmark = newmark;
break;
- case IPT_CONNMARK_RESTORE:
+ case XT_CONNMARK_RESTORE:
nfmark = (*pskb)->nfmark;
diff = (*ctmark ^ nfmark) & markinfo->mask;
if (diff != 0)
@@ -67,25 +68,25 @@ target(struct sk_buff **pskb,
}
}
- return IPT_CONTINUE;
+ return XT_CONTINUE;
}
static int
checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *entry,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
{
- struct ipt_connmark_target_info *matchinfo = targinfo;
- if (targinfosize != IPT_ALIGN(sizeof(struct ipt_connmark_target_info))) {
+ struct xt_connmark_target_info *matchinfo = targinfo;
+ if (targinfosize != XT_ALIGN(sizeof(struct xt_connmark_target_info))) {
printk(KERN_WARNING "CONNMARK: targinfosize %u != %Zu\n",
targinfosize,
- IPT_ALIGN(sizeof(struct ipt_connmark_target_info)));
+ XT_ALIGN(sizeof(struct xt_connmark_target_info)));
return 0;
}
- if (matchinfo->mode == IPT_CONNMARK_RESTORE) {
+ if (matchinfo->mode == XT_CONNMARK_RESTORE) {
if (strcmp(tablename, "mangle") != 0) {
printk(KERN_WARNING "CONNMARK: restore can only be called from \"mangle\" table, not \"%s\"\n", tablename);
return 0;
@@ -100,7 +101,13 @@ checkentry(const char *tablename,
return 1;
}
-static struct ipt_target ipt_connmark_reg = {
+static struct xt_target connmark_reg = {
+ .name = "CONNMARK",
+ .target = &target,
+ .checkentry = &checkentry,
+ .me = THIS_MODULE
+};
+static struct xt_target connmark6_reg = {
.name = "CONNMARK",
.target = &target,
.checkentry = &checkentry,
@@ -109,13 +116,25 @@ static struct ipt_target ipt_connmark_reg = {
static int __init init(void)
{
- need_ip_conntrack();
- return ipt_register_target(&ipt_connmark_reg);
+ int ret;
+
+ need_conntrack();
+
+ ret = xt_register_target(AF_INET, &connmark_reg);
+ if (ret)
+ return ret;
+
+ ret = xt_register_target(AF_INET6, &connmark6_reg);
+ if (ret)
+ xt_unregister_target(AF_INET, &connmark_reg);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_target(&ipt_connmark_reg);
+ xt_unregister_target(AF_INET, &connmark_reg);
+ xt_unregister_target(AF_INET6, &connmark6_reg);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_MARK.c b/net/netfilter/xt_MARK.c
index 52b4f2c296b..0c11ee9550f 100644
--- a/net/ipv4/netfilter/ipt_MARK.c
+++ b/net/netfilter/xt_MARK.c
@@ -12,12 +12,14 @@
#include <linux/ip.h>
#include <net/checksum.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_MARK.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_MARK.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
-MODULE_DESCRIPTION("iptables MARK modification module");
+MODULE_DESCRIPTION("ip[6]tables MARK modification module");
+MODULE_ALIAS("ipt_MARK");
+MODULE_ALIAS("ip6t_MARK");
static unsigned int
target_v0(struct sk_buff **pskb,
@@ -27,12 +29,12 @@ target_v0(struct sk_buff **pskb,
const void *targinfo,
void *userinfo)
{
- const struct ipt_mark_target_info *markinfo = targinfo;
+ const struct xt_mark_target_info *markinfo = targinfo;
if((*pskb)->nfmark != markinfo->mark)
(*pskb)->nfmark = markinfo->mark;
- return IPT_CONTINUE;
+ return XT_CONTINUE;
}
static unsigned int
@@ -43,19 +45,19 @@ target_v1(struct sk_buff **pskb,
const void *targinfo,
void *userinfo)
{
- const struct ipt_mark_target_info_v1 *markinfo = targinfo;
+ const struct xt_mark_target_info_v1 *markinfo = targinfo;
int mark = 0;
switch (markinfo->mode) {
- case IPT_MARK_SET:
+ case XT_MARK_SET:
mark = markinfo->mark;
break;
- case IPT_MARK_AND:
+ case XT_MARK_AND:
mark = (*pskb)->nfmark & markinfo->mark;
break;
- case IPT_MARK_OR:
+ case XT_MARK_OR:
mark = (*pskb)->nfmark | markinfo->mark;
break;
}
@@ -63,23 +65,23 @@ target_v1(struct sk_buff **pskb,
if((*pskb)->nfmark != mark)
(*pskb)->nfmark = mark;
- return IPT_CONTINUE;
+ return XT_CONTINUE;
}
static int
checkentry_v0(const char *tablename,
- const struct ipt_entry *e,
+ const void *entry,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
{
- struct ipt_mark_target_info *markinfo = targinfo;
+ struct xt_mark_target_info *markinfo = targinfo;
- if (targinfosize != IPT_ALIGN(sizeof(struct ipt_mark_target_info))) {
+ if (targinfosize != XT_ALIGN(sizeof(struct xt_mark_target_info))) {
printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n",
targinfosize,
- IPT_ALIGN(sizeof(struct ipt_mark_target_info)));
+ XT_ALIGN(sizeof(struct xt_mark_target_info)));
return 0;
}
@@ -98,17 +100,17 @@ checkentry_v0(const char *tablename,
static int
checkentry_v1(const char *tablename,
- const struct ipt_entry *e,
+ const void *entry,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
{
- struct ipt_mark_target_info_v1 *markinfo = targinfo;
+ struct xt_mark_target_info_v1 *markinfo = targinfo;
- if (targinfosize != IPT_ALIGN(sizeof(struct ipt_mark_target_info_v1))){
+ if (targinfosize != XT_ALIGN(sizeof(struct xt_mark_target_info_v1))){
printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n",
targinfosize,
- IPT_ALIGN(sizeof(struct ipt_mark_target_info_v1)));
+ XT_ALIGN(sizeof(struct xt_mark_target_info_v1)));
return 0;
}
@@ -117,9 +119,9 @@ checkentry_v1(const char *tablename,
return 0;
}
- if (markinfo->mode != IPT_MARK_SET
- && markinfo->mode != IPT_MARK_AND
- && markinfo->mode != IPT_MARK_OR) {
+ if (markinfo->mode != XT_MARK_SET
+ && markinfo->mode != XT_MARK_AND
+ && markinfo->mode != XT_MARK_OR) {
printk(KERN_WARNING "MARK: unknown mode %u\n",
markinfo->mode);
return 0;
@@ -133,7 +135,7 @@ checkentry_v1(const char *tablename,
return 1;
}
-static struct ipt_target ipt_mark_reg_v0 = {
+static struct xt_target ipt_mark_reg_v0 = {
.name = "MARK",
.target = target_v0,
.checkentry = checkentry_v0,
@@ -141,7 +143,7 @@ static struct ipt_target ipt_mark_reg_v0 = {
.revision = 0,
};
-static struct ipt_target ipt_mark_reg_v1 = {
+static struct xt_target ipt_mark_reg_v1 = {
.name = "MARK",
.target = target_v1,
.checkentry = checkentry_v1,
@@ -149,23 +151,40 @@ static struct ipt_target ipt_mark_reg_v1 = {
.revision = 1,
};
+static struct xt_target ip6t_mark_reg_v0 = {
+ .name = "MARK",
+ .target = target_v0,
+ .checkentry = checkentry_v0,
+ .me = THIS_MODULE,
+ .revision = 0,
+};
+
static int __init init(void)
{
int err;
- err = ipt_register_target(&ipt_mark_reg_v0);
- if (!err) {
- err = ipt_register_target(&ipt_mark_reg_v1);
- if (err)
- ipt_unregister_target(&ipt_mark_reg_v0);
+ err = xt_register_target(AF_INET, &ipt_mark_reg_v0);
+ if (err)
+ return err;
+
+ err = xt_register_target(AF_INET, &ipt_mark_reg_v1);
+ if (err)
+ xt_unregister_target(AF_INET, &ipt_mark_reg_v0);
+
+ err = xt_register_target(AF_INET6, &ip6t_mark_reg_v0);
+ if (err) {
+ xt_unregister_target(AF_INET, &ipt_mark_reg_v0);
+ xt_unregister_target(AF_INET, &ipt_mark_reg_v1);
}
+
return err;
}
static void __exit fini(void)
{
- ipt_unregister_target(&ipt_mark_reg_v0);
- ipt_unregister_target(&ipt_mark_reg_v1);
+ xt_unregister_target(AF_INET, &ipt_mark_reg_v0);
+ xt_unregister_target(AF_INET, &ipt_mark_reg_v1);
+ xt_unregister_target(AF_INET6, &ip6t_mark_reg_v0);
}
module_init(init);
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
new file mode 100644
index 00000000000..8b76b6f8d1e
--- /dev/null
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -0,0 +1,107 @@
+/* iptables module for using new netfilter netlink queue
+ *
+ * (C) 2005 by Harald Welte <laforge@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_NFQUEUE.h>
+
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("[ip,ip6,arp]_tables NFQUEUE target");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_NFQUEUE");
+MODULE_ALIAS("ip6t_NFQUEUE");
+MODULE_ALIAS("arpt_NFQUEUE");
+
+static unsigned int
+target(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+ const void *targinfo,
+ void *userinfo)
+{
+ const struct xt_NFQ_info *tinfo = targinfo;
+
+ return NF_QUEUE_NR(tinfo->queuenum);
+}
+
+static int
+checkentry(const char *tablename,
+ const void *entry,
+ void *targinfo,
+ unsigned int targinfosize,
+ unsigned int hook_mask)
+{
+ if (targinfosize != XT_ALIGN(sizeof(struct xt_NFQ_info))) {
+ printk(KERN_WARNING "NFQUEUE: targinfosize %u != %Zu\n",
+ targinfosize,
+ XT_ALIGN(sizeof(struct xt_NFQ_info)));
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct xt_target ipt_NFQ_reg = {
+ .name = "NFQUEUE",
+ .target = target,
+ .checkentry = checkentry,
+ .me = THIS_MODULE,
+};
+
+static struct xt_target ip6t_NFQ_reg = {
+ .name = "NFQUEUE",
+ .target = target,
+ .checkentry = checkentry,
+ .me = THIS_MODULE,
+};
+
+static struct xt_target arpt_NFQ_reg = {
+ .name = "NFQUEUE",
+ .target = target,
+ .checkentry = checkentry,
+ .me = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+ int ret;
+ ret = xt_register_target(AF_INET, &ipt_NFQ_reg);
+ if (ret)
+ return ret;
+ ret = xt_register_target(AF_INET6, &ip6t_NFQ_reg);
+ if (ret)
+ goto out_ip;
+ ret = xt_register_target(NF_ARP, &arpt_NFQ_reg);
+ if (ret)
+ goto out_ip6;
+
+ return ret;
+out_ip6:
+ xt_unregister_target(AF_INET6, &ip6t_NFQ_reg);
+out_ip:
+ xt_unregister_target(AF_INET, &ipt_NFQ_reg);
+
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_target(NF_ARP, &arpt_NFQ_reg);
+ xt_unregister_target(AF_INET6, &ip6t_NFQ_reg);
+ xt_unregister_target(AF_INET, &ipt_NFQ_reg);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
index e3c69d072c6..24d477afa93 100644
--- a/net/ipv4/netfilter/ipt_NOTRACK.c
+++ b/net/netfilter/xt_NOTRACK.c
@@ -4,9 +4,12 @@
#include <linux/module.h>
#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_conntrack_compat.h>
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_NOTRACK");
+
static unsigned int
target(struct sk_buff **pskb,
const struct net_device *in,
@@ -17,7 +20,7 @@ target(struct sk_buff **pskb,
{
/* Previously seen (loopback)? Ignore. */
if ((*pskb)->nfct != NULL)
- return IPT_CONTINUE;
+ return XT_CONTINUE;
/* Attach fake conntrack entry.
If there is a real ct entry correspondig to this packet,
@@ -27,12 +30,12 @@ target(struct sk_buff **pskb,
(*pskb)->nfctinfo = IP_CT_NEW;
nf_conntrack_get((*pskb)->nfct);
- return IPT_CONTINUE;
+ return XT_CONTINUE;
}
static int
checkentry(const char *tablename,
- const struct ipt_entry *e,
+ const void *entry,
void *targinfo,
unsigned int targinfosize,
unsigned int hook_mask)
@@ -51,26 +54,39 @@ checkentry(const char *tablename,
return 1;
}
-static struct ipt_target ipt_notrack_reg = {
+static struct xt_target notrack_reg = {
.name = "NOTRACK",
.target = target,
.checkentry = checkentry,
- .me = THIS_MODULE
+ .me = THIS_MODULE,
+};
+static struct xt_target notrack6_reg = {
+ .name = "NOTRACK",
+ .target = target,
+ .checkentry = checkentry,
+ .me = THIS_MODULE,
};
static int __init init(void)
{
- if (ipt_register_target(&ipt_notrack_reg))
- return -EINVAL;
+ int ret;
+
+ ret = xt_register_target(AF_INET, &notrack_reg);
+ if (ret)
+ return ret;
- return 0;
+ ret = xt_register_target(AF_INET6, &notrack6_reg);
+ if (ret)
+ xt_unregister_target(AF_INET, &notrack_reg);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_target(&ipt_notrack_reg);
+ xt_unregister_target(AF_INET6, &notrack6_reg);
+ xt_unregister_target(AF_INET, &notrack_reg);
}
module_init(init);
module_exit(fini);
-MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/ipt_comment.c b/net/netfilter/xt_comment.c
index 6b76a1ea524..4ba6fd65c6e 100644
--- a/net/ipv4/netfilter/ipt_comment.c
+++ b/net/netfilter/xt_comment.c
@@ -6,12 +6,14 @@
#include <linux/module.h>
#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_comment.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_comment.h>
MODULE_AUTHOR("Brad Fisher <brad@info-link.net>");
MODULE_DESCRIPTION("iptables comment match module");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_comment");
+MODULE_ALIAS("ip6t_comment");
static int
match(const struct sk_buff *skb,
@@ -19,6 +21,7 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protooff,
int *hotdrop)
{
/* We always match */
@@ -27,18 +30,25 @@ match(const struct sk_buff *skb,
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
/* Check the size */
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_comment_info)))
+ if (matchsize != XT_ALIGN(sizeof(struct xt_comment_info)))
return 0;
return 1;
}
-static struct ipt_match comment_match = {
+static struct xt_match comment_match = {
+ .name = "comment",
+ .match = match,
+ .checkentry = checkentry,
+ .me = THIS_MODULE
+};
+
+static struct xt_match comment6_match = {
.name = "comment",
.match = match,
.checkentry = checkentry,
@@ -47,12 +57,23 @@ static struct ipt_match comment_match = {
static int __init init(void)
{
- return ipt_register_match(&comment_match);
+ int ret;
+
+ ret = xt_register_match(AF_INET, &comment_match);
+ if (ret)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &comment6_match);
+ if (ret)
+ xt_unregister_match(AF_INET, &comment_match);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&comment_match);
+ xt_unregister_match(AF_INET, &comment_match);
+ xt_unregister_match(AF_INET6, &comment6_match);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_connbytes.c b/net/netfilter/xt_connbytes.c
index d68a048b717..150d2a4b0f7 100644
--- a/net/ipv4/netfilter/ipt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -6,13 +6,15 @@
* - add functionality to match number of packets
* - add functionality to match average packet size
* - add support to match directions seperately
+ * 2005-10-16 Harald Welte <laforge@netfilter.org>
+ * - Port to x_tables
*
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/netfilter/nf_conntrack_compat.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_connbytes.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_connbytes.h>
#include <asm/div64.h>
#include <asm/bitops.h>
@@ -20,6 +22,7 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("iptables match for matching number of pkts/bytes per connection");
+MODULE_ALIAS("ipt_connbytes");
/* 64bit divisor, dividend and result. dynamic precision */
static u_int64_t div64_64(u_int64_t dividend, u_int64_t divisor)
@@ -43,9 +46,10 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_connbytes_info *sinfo = matchinfo;
+ const struct xt_connbytes_info *sinfo = matchinfo;
u_int64_t what = 0; /* initialize to make gcc happy */
const struct ip_conntrack_counter *counters;
@@ -53,45 +57,45 @@ match(const struct sk_buff *skb,
return 0; /* no match */
switch (sinfo->what) {
- case IPT_CONNBYTES_PKTS:
+ case XT_CONNBYTES_PKTS:
switch (sinfo->direction) {
- case IPT_CONNBYTES_DIR_ORIGINAL:
+ case XT_CONNBYTES_DIR_ORIGINAL:
what = counters[IP_CT_DIR_ORIGINAL].packets;
break;
- case IPT_CONNBYTES_DIR_REPLY:
+ case XT_CONNBYTES_DIR_REPLY:
what = counters[IP_CT_DIR_REPLY].packets;
break;
- case IPT_CONNBYTES_DIR_BOTH:
+ case XT_CONNBYTES_DIR_BOTH:
what = counters[IP_CT_DIR_ORIGINAL].packets;
what += counters[IP_CT_DIR_REPLY].packets;
break;
}
break;
- case IPT_CONNBYTES_BYTES:
+ case XT_CONNBYTES_BYTES:
switch (sinfo->direction) {
- case IPT_CONNBYTES_DIR_ORIGINAL:
+ case XT_CONNBYTES_DIR_ORIGINAL:
what = counters[IP_CT_DIR_ORIGINAL].bytes;
break;
- case IPT_CONNBYTES_DIR_REPLY:
+ case XT_CONNBYTES_DIR_REPLY:
what = counters[IP_CT_DIR_REPLY].bytes;
break;
- case IPT_CONNBYTES_DIR_BOTH:
+ case XT_CONNBYTES_DIR_BOTH:
what = counters[IP_CT_DIR_ORIGINAL].bytes;
what += counters[IP_CT_DIR_REPLY].bytes;
break;
}
break;
- case IPT_CONNBYTES_AVGPKT:
+ case XT_CONNBYTES_AVGPKT:
switch (sinfo->direction) {
- case IPT_CONNBYTES_DIR_ORIGINAL:
+ case XT_CONNBYTES_DIR_ORIGINAL:
what = div64_64(counters[IP_CT_DIR_ORIGINAL].bytes,
counters[IP_CT_DIR_ORIGINAL].packets);
break;
- case IPT_CONNBYTES_DIR_REPLY:
+ case XT_CONNBYTES_DIR_REPLY:
what = div64_64(counters[IP_CT_DIR_REPLY].bytes,
counters[IP_CT_DIR_REPLY].packets);
break;
- case IPT_CONNBYTES_DIR_BOTH:
+ case XT_CONNBYTES_DIR_BOTH:
{
u_int64_t bytes;
u_int64_t pkts;
@@ -117,30 +121,36 @@ match(const struct sk_buff *skb,
}
static int check(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- const struct ipt_connbytes_info *sinfo = matchinfo;
+ const struct xt_connbytes_info *sinfo = matchinfo;
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_connbytes_info)))
+ if (matchsize != XT_ALIGN(sizeof(struct xt_connbytes_info)))
return 0;
- if (sinfo->what != IPT_CONNBYTES_PKTS &&
- sinfo->what != IPT_CONNBYTES_BYTES &&
- sinfo->what != IPT_CONNBYTES_AVGPKT)
+ if (sinfo->what != XT_CONNBYTES_PKTS &&
+ sinfo->what != XT_CONNBYTES_BYTES &&
+ sinfo->what != XT_CONNBYTES_AVGPKT)
return 0;
- if (sinfo->direction != IPT_CONNBYTES_DIR_ORIGINAL &&
- sinfo->direction != IPT_CONNBYTES_DIR_REPLY &&
- sinfo->direction != IPT_CONNBYTES_DIR_BOTH)
+ if (sinfo->direction != XT_CONNBYTES_DIR_ORIGINAL &&
+ sinfo->direction != XT_CONNBYTES_DIR_REPLY &&
+ sinfo->direction != XT_CONNBYTES_DIR_BOTH)
return 0;
return 1;
}
-static struct ipt_match state_match = {
+static struct xt_match connbytes_match = {
+ .name = "connbytes",
+ .match = &match,
+ .checkentry = &check,
+ .me = THIS_MODULE
+};
+static struct xt_match connbytes6_match = {
.name = "connbytes",
.match = &match,
.checkentry = &check,
@@ -149,12 +159,21 @@ static struct ipt_match state_match = {
static int __init init(void)
{
- return ipt_register_match(&state_match);
+ int ret;
+ ret = xt_register_match(AF_INET, &connbytes_match);
+ if (ret)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &connbytes6_match);
+ if (ret)
+ xt_unregister_match(AF_INET, &connbytes_match);
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&state_match);
+ xt_unregister_match(AF_INET, &connbytes_match);
+ xt_unregister_match(AF_INET6, &connbytes6_match);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_connmark.c b/net/netfilter/xt_connmark.c
index 5306ef293b9..d06e925032d 100644
--- a/net/ipv4/netfilter/ipt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -25,9 +25,10 @@
MODULE_AUTHOR("Henrik Nordstrom <hno@marasytems.com>");
MODULE_DESCRIPTION("IP tables connmark match module");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_connmark");
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_connmark.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_connmark.h>
#include <net/netfilter/nf_conntrack_compat.h>
static int
@@ -36,9 +37,10 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_connmark_info *info = matchinfo;
+ const struct xt_connmark_info *info = matchinfo;
u_int32_t ctinfo;
const u_int32_t *ctmark = nf_ct_get_mark(skb, &ctinfo);
if (!ctmark)
@@ -49,14 +51,14 @@ match(const struct sk_buff *skb,
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- struct ipt_connmark_info *cm =
- (struct ipt_connmark_info *)matchinfo;
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_connmark_info)))
+ struct xt_connmark_info *cm =
+ (struct xt_connmark_info *)matchinfo;
+ if (matchsize != XT_ALIGN(sizeof(struct xt_connmark_info)))
return 0;
if (cm->mark > 0xffffffff || cm->mask > 0xffffffff) {
@@ -67,21 +69,40 @@ checkentry(const char *tablename,
return 1;
}
-static struct ipt_match connmark_match = {
+static struct xt_match connmark_match = {
+ .name = "connmark",
+ .match = &match,
+ .checkentry = &checkentry,
+ .me = THIS_MODULE
+};
+static struct xt_match connmark6_match = {
.name = "connmark",
.match = &match,
.checkentry = &checkentry,
.me = THIS_MODULE
};
+
static int __init init(void)
{
- return ipt_register_match(&connmark_match);
+ int ret;
+
+ need_conntrack();
+
+ ret = xt_register_match(AF_INET, &connmark_match);
+ if (ret)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &connmark6_match);
+ if (ret)
+ xt_unregister_match(AF_INET, &connmark_match);
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&connmark_match);
+ xt_unregister_match(AF_INET6, &connmark6_match);
+ xt_unregister_match(AF_INET, &connmark_match);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_conntrack.c b/net/netfilter/xt_conntrack.c
index c8d18705469..ffdebc95eb9 100644
--- a/net/ipv4/netfilter/ipt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -18,12 +18,13 @@
#include <net/netfilter/nf_conntrack.h>
#endif
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_conntrack.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_conntrack.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
MODULE_DESCRIPTION("iptables connection tracking match module");
+MODULE_ALIAS("ipt_conntrack");
#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
@@ -33,9 +34,10 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_conntrack_info *sinfo = matchinfo;
+ const struct xt_conntrack_info *sinfo = matchinfo;
struct ip_conntrack *ct;
enum ip_conntrack_info ctinfo;
unsigned int statebit;
@@ -45,58 +47,58 @@ match(const struct sk_buff *skb,
#define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg))
if (ct == &ip_conntrack_untracked)
- statebit = IPT_CONNTRACK_STATE_UNTRACKED;
+ statebit = XT_CONNTRACK_STATE_UNTRACKED;
else if (ct)
- statebit = IPT_CONNTRACK_STATE_BIT(ctinfo);
+ statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
else
- statebit = IPT_CONNTRACK_STATE_INVALID;
+ statebit = XT_CONNTRACK_STATE_INVALID;
- if(sinfo->flags & IPT_CONNTRACK_STATE) {
+ if(sinfo->flags & XT_CONNTRACK_STATE) {
if (ct) {
if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip !=
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip)
- statebit |= IPT_CONNTRACK_STATE_SNAT;
+ statebit |= XT_CONNTRACK_STATE_SNAT;
if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip !=
ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip)
- statebit |= IPT_CONNTRACK_STATE_DNAT;
+ statebit |= XT_CONNTRACK_STATE_DNAT;
}
- if (FWINV((statebit & sinfo->statemask) == 0, IPT_CONNTRACK_STATE))
+ if (FWINV((statebit & sinfo->statemask) == 0, XT_CONNTRACK_STATE))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_PROTO) {
- if (!ct || FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, IPT_CONNTRACK_PROTO))
+ if(sinfo->flags & XT_CONNTRACK_PROTO) {
+ if (!ct || FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, XT_CONNTRACK_PROTO))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_ORIGSRC) {
- if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip&sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, IPT_CONNTRACK_ORIGSRC))
+ if(sinfo->flags & XT_CONNTRACK_ORIGSRC) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip&sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, XT_CONNTRACK_ORIGSRC))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_ORIGDST) {
- if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip&sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, IPT_CONNTRACK_ORIGDST))
+ if(sinfo->flags & XT_CONNTRACK_ORIGDST) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip&sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, XT_CONNTRACK_ORIGDST))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_REPLSRC) {
- if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip&sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].src.ip, IPT_CONNTRACK_REPLSRC))
+ if(sinfo->flags & XT_CONNTRACK_REPLSRC) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip&sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].src.ip, XT_CONNTRACK_REPLSRC))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_REPLDST) {
- if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip&sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, IPT_CONNTRACK_REPLDST))
+ if(sinfo->flags & XT_CONNTRACK_REPLDST) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip&sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, XT_CONNTRACK_REPLDST))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_STATUS) {
- if (!ct || FWINV((ct->status & sinfo->statusmask) == 0, IPT_CONNTRACK_STATUS))
+ if(sinfo->flags & XT_CONNTRACK_STATUS) {
+ if (!ct || FWINV((ct->status & sinfo->statusmask) == 0, XT_CONNTRACK_STATUS))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_EXPIRES) {
+ if(sinfo->flags & XT_CONNTRACK_EXPIRES) {
unsigned long expires;
if(!ct)
@@ -104,7 +106,7 @@ match(const struct sk_buff *skb,
expires = timer_pending(&ct->timeout) ? (ct->timeout.expires - jiffies)/HZ : 0;
- if (FWINV(!(expires >= sinfo->expires_min && expires <= sinfo->expires_max), IPT_CONNTRACK_EXPIRES))
+ if (FWINV(!(expires >= sinfo->expires_min && expires <= sinfo->expires_max), XT_CONNTRACK_EXPIRES))
return 0;
}
@@ -118,9 +120,10 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_conntrack_info *sinfo = matchinfo;
+ const struct xt_conntrack_info *sinfo = matchinfo;
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
unsigned int statebit;
@@ -130,58 +133,58 @@ match(const struct sk_buff *skb,
#define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg))
if (ct == &nf_conntrack_untracked)
- statebit = IPT_CONNTRACK_STATE_UNTRACKED;
+ statebit = XT_CONNTRACK_STATE_UNTRACKED;
else if (ct)
- statebit = IPT_CONNTRACK_STATE_BIT(ctinfo);
+ statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
else
- statebit = IPT_CONNTRACK_STATE_INVALID;
+ statebit = XT_CONNTRACK_STATE_INVALID;
- if(sinfo->flags & IPT_CONNTRACK_STATE) {
+ if(sinfo->flags & XT_CONNTRACK_STATE) {
if (ct) {
if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip !=
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip)
- statebit |= IPT_CONNTRACK_STATE_SNAT;
+ statebit |= XT_CONNTRACK_STATE_SNAT;
if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip !=
ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip)
- statebit |= IPT_CONNTRACK_STATE_DNAT;
+ statebit |= XT_CONNTRACK_STATE_DNAT;
}
- if (FWINV((statebit & sinfo->statemask) == 0, IPT_CONNTRACK_STATE))
+ if (FWINV((statebit & sinfo->statemask) == 0, XT_CONNTRACK_STATE))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_PROTO) {
- if (!ct || FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, IPT_CONNTRACK_PROTO))
+ if(sinfo->flags & XT_CONNTRACK_PROTO) {
+ if (!ct || FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, XT_CONNTRACK_PROTO))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_ORIGSRC) {
- if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, IPT_CONNTRACK_ORIGSRC))
+ if(sinfo->flags & XT_CONNTRACK_ORIGSRC) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, XT_CONNTRACK_ORIGSRC))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_ORIGDST) {
- if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, IPT_CONNTRACK_ORIGDST))
+ if(sinfo->flags & XT_CONNTRACK_ORIGDST) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, XT_CONNTRACK_ORIGDST))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_REPLSRC) {
- if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].src.ip, IPT_CONNTRACK_REPLSRC))
+ if(sinfo->flags & XT_CONNTRACK_REPLSRC) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].src.ip, XT_CONNTRACK_REPLSRC))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_REPLDST) {
- if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, IPT_CONNTRACK_REPLDST))
+ if(sinfo->flags & XT_CONNTRACK_REPLDST) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, XT_CONNTRACK_REPLDST))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_STATUS) {
- if (!ct || FWINV((ct->status & sinfo->statusmask) == 0, IPT_CONNTRACK_STATUS))
+ if(sinfo->flags & XT_CONNTRACK_STATUS) {
+ if (!ct || FWINV((ct->status & sinfo->statusmask) == 0, XT_CONNTRACK_STATUS))
return 0;
}
- if(sinfo->flags & IPT_CONNTRACK_EXPIRES) {
+ if(sinfo->flags & XT_CONNTRACK_EXPIRES) {
unsigned long expires;
if(!ct)
@@ -189,7 +192,7 @@ match(const struct sk_buff *skb,
expires = timer_pending(&ct->timeout) ? (ct->timeout.expires - jiffies)/HZ : 0;
- if (FWINV(!(expires >= sinfo->expires_min && expires <= sinfo->expires_max), IPT_CONNTRACK_EXPIRES))
+ if (FWINV(!(expires >= sinfo->expires_min && expires <= sinfo->expires_max), XT_CONNTRACK_EXPIRES))
return 0;
}
@@ -199,18 +202,18 @@ match(const struct sk_buff *skb,
#endif /* CONFIG_NF_IP_CONNTRACK */
static int check(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_conntrack_info)))
+ if (matchsize != XT_ALIGN(sizeof(struct xt_conntrack_info)))
return 0;
return 1;
}
-static struct ipt_match conntrack_match = {
+static struct xt_match conntrack_match = {
.name = "conntrack",
.match = &match,
.checkentry = &check,
@@ -219,13 +222,16 @@ static struct ipt_match conntrack_match = {
static int __init init(void)
{
- need_ip_conntrack();
- return ipt_register_match(&conntrack_match);
+ int ret;
+ need_conntrack();
+ ret = xt_register_match(AF_INET, &conntrack_match);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&conntrack_match);
+ xt_unregister_match(AF_INET, &conntrack_match);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_dccp.c b/net/netfilter/xt_dccp.c
index ad3278bba6c..779f42fc952 100644
--- a/net/ipv4/netfilter/ipt_dccp.c
+++ b/net/netfilter/xt_dccp.c
@@ -14,8 +14,16 @@
#include <net/ip.h>
#include <linux/dccp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_dccp.h>
+
#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_dccp.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("Match for DCCP protocol packets");
+MODULE_ALIAS("ipt_dccp");
#define DCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
|| (!!((invflag) & (option)) ^ (cond)))
@@ -26,6 +34,7 @@ static DEFINE_SPINLOCK(dccp_buflock);
static inline int
dccp_find_option(u_int8_t option,
const struct sk_buff *skb,
+ unsigned int protoff,
const struct dccp_hdr *dh,
int *hotdrop)
{
@@ -44,9 +53,7 @@ dccp_find_option(u_int8_t option,
return 0;
spin_lock_bh(&dccp_buflock);
- op = skb_header_pointer(skb,
- skb->nh.iph->ihl*4 + optoff,
- optlen, dccp_optbuf);
+ op = skb_header_pointer(skb, protoff + optoff, optlen, dccp_optbuf);
if (op == NULL) {
/* If we don't have the whole header, drop packet. */
spin_unlock_bh(&dccp_buflock);
@@ -78,10 +85,10 @@ match_types(const struct dccp_hdr *dh, u_int16_t typemask)
}
static inline int
-match_option(u_int8_t option, const struct sk_buff *skb,
+match_option(u_int8_t option, const struct sk_buff *skb, unsigned int protoff,
const struct dccp_hdr *dh, int *hotdrop)
{
- return dccp_find_option(option, skb, dh, hotdrop);
+ return dccp_find_option(option, skb, protoff, dh, hotdrop);
}
static int
@@ -90,16 +97,17 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_dccp_info *info =
- (const struct ipt_dccp_info *)matchinfo;
+ const struct xt_dccp_info *info =
+ (const struct xt_dccp_info *)matchinfo;
struct dccp_hdr _dh, *dh;
if (offset)
return 0;
- dh = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_dh), &_dh);
+ dh = skb_header_pointer(skb, protoff, sizeof(_dh), &_dh);
if (dh == NULL) {
*hotdrop = 1;
return 0;
@@ -107,42 +115,73 @@ match(const struct sk_buff *skb,
return DCCHECK(((ntohs(dh->dccph_sport) >= info->spts[0])
&& (ntohs(dh->dccph_sport) <= info->spts[1])),
- IPT_DCCP_SRC_PORTS, info->flags, info->invflags)
+ XT_DCCP_SRC_PORTS, info->flags, info->invflags)
&& DCCHECK(((ntohs(dh->dccph_dport) >= info->dpts[0])
&& (ntohs(dh->dccph_dport) <= info->dpts[1])),
- IPT_DCCP_DEST_PORTS, info->flags, info->invflags)
+ XT_DCCP_DEST_PORTS, info->flags, info->invflags)
&& DCCHECK(match_types(dh, info->typemask),
- IPT_DCCP_TYPE, info->flags, info->invflags)
- && DCCHECK(match_option(info->option, skb, dh, hotdrop),
- IPT_DCCP_OPTION, info->flags, info->invflags);
+ XT_DCCP_TYPE, info->flags, info->invflags)
+ && DCCHECK(match_option(info->option, skb, protoff, dh,
+ hotdrop),
+ XT_DCCP_OPTION, info->flags, info->invflags);
}
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *inf,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- const struct ipt_dccp_info *info;
+ const struct ipt_ip *ip = inf;
+ const struct xt_dccp_info *info;
- info = (const struct ipt_dccp_info *)matchinfo;
+ info = (const struct xt_dccp_info *)matchinfo;
return ip->proto == IPPROTO_DCCP
- && !(ip->invflags & IPT_INV_PROTO)
- && matchsize == IPT_ALIGN(sizeof(struct ipt_dccp_info))
- && !(info->flags & ~IPT_DCCP_VALID_FLAGS)
- && !(info->invflags & ~IPT_DCCP_VALID_FLAGS)
+ && !(ip->invflags & XT_INV_PROTO)
+ && matchsize == XT_ALIGN(sizeof(struct xt_dccp_info))
+ && !(info->flags & ~XT_DCCP_VALID_FLAGS)
+ && !(info->invflags & ~XT_DCCP_VALID_FLAGS)
&& !(info->invflags & ~info->flags);
}
-static struct ipt_match dccp_match =
+static int
+checkentry6(const char *tablename,
+ const void *inf,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ const struct ip6t_ip6 *ip = inf;
+ const struct xt_dccp_info *info;
+
+ info = (const struct xt_dccp_info *)matchinfo;
+
+ return ip->proto == IPPROTO_DCCP
+ && !(ip->invflags & XT_INV_PROTO)
+ && matchsize == XT_ALIGN(sizeof(struct xt_dccp_info))
+ && !(info->flags & ~XT_DCCP_VALID_FLAGS)
+ && !(info->invflags & ~XT_DCCP_VALID_FLAGS)
+ && !(info->invflags & ~info->flags);
+}
+
+
+static struct xt_match dccp_match =
{
.name = "dccp",
.match = &match,
.checkentry = &checkentry,
.me = THIS_MODULE,
};
+static struct xt_match dccp6_match =
+{
+ .name = "dccp",
+ .match = &match,
+ .checkentry = &checkentry6,
+ .me = THIS_MODULE,
+};
+
static int __init init(void)
{
@@ -154,23 +193,29 @@ static int __init init(void)
dccp_optbuf = kmalloc(256 * 4, GFP_KERNEL);
if (!dccp_optbuf)
return -ENOMEM;
- ret = ipt_register_match(&dccp_match);
+ ret = xt_register_match(AF_INET, &dccp_match);
if (ret)
- kfree(dccp_optbuf);
+ goto out_kfree;
+ ret = xt_register_match(AF_INET6, &dccp6_match);
+ if (ret)
+ goto out_unreg;
+
+ return ret;
+
+out_unreg:
+ xt_unregister_match(AF_INET, &dccp_match);
+out_kfree:
+ kfree(dccp_optbuf);
return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&dccp_match);
+ xt_unregister_match(AF_INET6, &dccp6_match);
+ xt_unregister_match(AF_INET, &dccp_match);
kfree(dccp_optbuf);
}
module_init(init);
module_exit(fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("Match for DCCP protocol packets");
-
diff --git a/net/ipv4/netfilter/ipt_helper.c b/net/netfilter/xt_helper.c
index bf14e1c7798..38b6715e1db 100644
--- a/net/ipv4/netfilter/ipt_helper.c
+++ b/net/netfilter/xt_helper.c
@@ -22,12 +22,14 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_helper.h>
#endif
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_helper.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_helper.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Josefsson <gandalf@netfilter.org>");
MODULE_DESCRIPTION("iptables helper match module");
+MODULE_ALIAS("ipt_helper");
+MODULE_ALIAS("ip6t_helper");
#if 0
#define DEBUGP printk
@@ -42,27 +44,28 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_helper_info *info = matchinfo;
+ const struct xt_helper_info *info = matchinfo;
struct ip_conntrack *ct;
enum ip_conntrack_info ctinfo;
int ret = info->invert;
ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
if (!ct) {
- DEBUGP("ipt_helper: Eek! invalid conntrack?\n");
+ DEBUGP("xt_helper: Eek! invalid conntrack?\n");
return ret;
}
if (!ct->master) {
- DEBUGP("ipt_helper: conntrack %p has no master\n", ct);
+ DEBUGP("xt_helper: conntrack %p has no master\n", ct);
return ret;
}
read_lock_bh(&ip_conntrack_lock);
if (!ct->master->helper) {
- DEBUGP("ipt_helper: master ct %p has no helper\n",
+ DEBUGP("xt_helper: master ct %p has no helper\n",
exp->expectant);
goto out_unlock;
}
@@ -88,27 +91,28 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_helper_info *info = matchinfo;
+ const struct xt_helper_info *info = matchinfo;
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
int ret = info->invert;
ct = nf_ct_get((struct sk_buff *)skb, &ctinfo);
if (!ct) {
- DEBUGP("ipt_helper: Eek! invalid conntrack?\n");
+ DEBUGP("xt_helper: Eek! invalid conntrack?\n");
return ret;
}
if (!ct->master) {
- DEBUGP("ipt_helper: conntrack %p has no master\n", ct);
+ DEBUGP("xt_helper: conntrack %p has no master\n", ct);
return ret;
}
read_lock_bh(&nf_conntrack_lock);
if (!ct->master->helper) {
- DEBUGP("ipt_helper: master ct %p has no helper\n",
+ DEBUGP("xt_helper: master ct %p has no helper\n",
exp->expectant);
goto out_unlock;
}
@@ -128,23 +132,29 @@ out_unlock:
#endif
static int check(const char *tablename,
- const struct ipt_ip *ip,
+ const void *inf,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- struct ipt_helper_info *info = matchinfo;
+ struct xt_helper_info *info = matchinfo;
info->name[29] = '\0';
/* verify size */
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_helper_info)))
+ if (matchsize != XT_ALIGN(sizeof(struct xt_helper_info)))
return 0;
return 1;
}
-static struct ipt_match helper_match = {
+static struct xt_match helper_match = {
+ .name = "helper",
+ .match = &match,
+ .checkentry = &check,
+ .me = THIS_MODULE,
+};
+static struct xt_match helper6_match = {
.name = "helper",
.match = &match,
.checkentry = &check,
@@ -153,13 +163,24 @@ static struct ipt_match helper_match = {
static int __init init(void)
{
- need_ip_conntrack();
- return ipt_register_match(&helper_match);
+ int ret;
+ need_conntrack();
+
+ ret = xt_register_match(AF_INET, &helper_match);
+ if (ret < 0)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &helper6_match);
+ if (ret < 0)
+ xt_unregister_match(AF_INET, &helper_match);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&helper_match);
+ xt_unregister_match(AF_INET, &helper_match);
+ xt_unregister_match(AF_INET6, &helper6_match);
}
module_init(init);
diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c
new file mode 100644
index 00000000000..39c8faea63d
--- /dev/null
+++ b/net/netfilter/xt_length.c
@@ -0,0 +1,99 @@
+/* Kernel module to match packet length. */
+/* (C) 1999-2001 James Morris <jmorros@intercode.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+
+#include <linux/netfilter/xt_length.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+MODULE_DESCRIPTION("IP tables packet length matching module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_length");
+MODULE_ALIAS("ip6t_length");
+
+static int
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ int *hotdrop)
+{
+ const struct xt_length_info *info = matchinfo;
+ u_int16_t pktlen = ntohs(skb->nh.iph->tot_len);
+
+ return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
+}
+
+static int
+match6(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ int *hotdrop)
+{
+ const struct xt_length_info *info = matchinfo;
+ u_int16_t pktlen = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr);
+
+ return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
+}
+
+static int
+checkentry(const char *tablename,
+ const void *ip,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ if (matchsize != XT_ALIGN(sizeof(struct xt_length_info)))
+ return 0;
+
+ return 1;
+}
+
+static struct xt_match length_match = {
+ .name = "length",
+ .match = &match,
+ .checkentry = &checkentry,
+ .me = THIS_MODULE,
+};
+static struct xt_match length6_match = {
+ .name = "length",
+ .match = &match6,
+ .checkentry = &checkentry,
+ .me = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+ int ret;
+ ret = xt_register_match(AF_INET, &length_match);
+ if (ret)
+ return ret;
+ ret = xt_register_match(AF_INET6, &length6_match);
+ if (ret)
+ xt_unregister_match(AF_INET, &length_match);
+
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_match(AF_INET, &length_match);
+ xt_unregister_match(AF_INET6, &length6_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_limit.c b/net/netfilter/xt_limit.c
index 0c24dcc703a..15e40506bc3 100644
--- a/net/ipv4/netfilter/ipt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -18,12 +18,14 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_limit.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_limit.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Herve Eychenne <rv@wallfire.org>");
MODULE_DESCRIPTION("iptables rate limit match");
+MODULE_ALIAS("ipt_limit");
+MODULE_ALIAS("ip6t_limit");
/* The algorithm used is the Simple Token Bucket Filter (TBF)
* see net/sched/sch_tbf.c in the linux source tree
@@ -68,9 +70,10 @@ ipt_limit_match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- struct ipt_rateinfo *r = ((struct ipt_rateinfo *)matchinfo)->master;
+ struct xt_rateinfo *r = ((struct xt_rateinfo *)matchinfo)->master;
unsigned long now = jiffies;
spin_lock_bh(&limit_lock);
@@ -96,32 +99,32 @@ user2credits(u_int32_t user)
/* If multiplying would overflow... */
if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
/* Divide first. */
- return (user / IPT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
+ return (user / XT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
- return (user * HZ * CREDITS_PER_JIFFY) / IPT_LIMIT_SCALE;
+ return (user * HZ * CREDITS_PER_JIFFY) / XT_LIMIT_SCALE;
}
static int
ipt_limit_checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *inf,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- struct ipt_rateinfo *r = matchinfo;
+ struct xt_rateinfo *r = matchinfo;
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_rateinfo)))
+ if (matchsize != XT_ALIGN(sizeof(struct xt_rateinfo)))
return 0;
/* Check for overflow. */
if (r->burst == 0
|| user2credits(r->avg * r->burst) < user2credits(r->avg)) {
- printk("Overflow in ipt_limit, try lower: %u/%u\n",
+ printk("Overflow in xt_limit, try lower: %u/%u\n",
r->avg, r->burst);
return 0;
}
- /* User avg in seconds * IPT_LIMIT_SCALE: convert to jiffies *
+ /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
128. */
r->prev = jiffies;
r->credit = user2credits(r->avg * r->burst); /* Credits full. */
@@ -134,7 +137,13 @@ ipt_limit_checkentry(const char *tablename,
return 1;
}
-static struct ipt_match ipt_limit_reg = {
+static struct xt_match ipt_limit_reg = {
+ .name = "limit",
+ .match = ipt_limit_match,
+ .checkentry = ipt_limit_checkentry,
+ .me = THIS_MODULE,
+};
+static struct xt_match limit6_reg = {
.name = "limit",
.match = ipt_limit_match,
.checkentry = ipt_limit_checkentry,
@@ -143,14 +152,23 @@ static struct ipt_match ipt_limit_reg = {
static int __init init(void)
{
- if (ipt_register_match(&ipt_limit_reg))
- return -EINVAL;
- return 0;
+ int ret;
+
+ ret = xt_register_match(AF_INET, &ipt_limit_reg);
+ if (ret)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &limit6_reg);
+ if (ret)
+ xt_unregister_match(AF_INET, &ipt_limit_reg);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&ipt_limit_reg);
+ xt_unregister_match(AF_INET, &ipt_limit_reg);
+ xt_unregister_match(AF_INET6, &limit6_reg);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_mac.c b/net/netfilter/xt_mac.c
index 11a459e33f2..0461dcb5fc7 100644
--- a/net/ipv4/netfilter/ipt_mac.c
+++ b/net/netfilter/xt_mac.c
@@ -11,13 +11,17 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
-#include <linux/netfilter_ipv4/ipt_mac.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/xt_mac.h>
+#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("iptables mac matching module");
+MODULE_ALIAS("ipt_mac");
+MODULE_ALIAS("ip6t_mac");
static int
match(const struct sk_buff *skb,
@@ -25,21 +29,22 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_mac_info *info = matchinfo;
+ const struct xt_mac_info *info = matchinfo;
/* Is mac pointer valid? */
return (skb->mac.raw >= skb->head
&& (skb->mac.raw + ETH_HLEN) <= skb->data
/* If so, compare... */
- && ((memcmp(eth_hdr(skb)->h_source, info->srcaddr, ETH_ALEN)
- == 0) ^ info->invert));
+ && ((!compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr))
+ ^ info->invert));
}
static int
ipt_mac_checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *inf,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
@@ -48,17 +53,23 @@ ipt_mac_checkentry(const char *tablename,
if (hook_mask
& ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_IN)
| (1 << NF_IP_FORWARD))) {
- printk("ipt_mac: only valid for PRE_ROUTING, LOCAL_IN or FORWARD.\n");
+ printk("xt_mac: only valid for PRE_ROUTING, LOCAL_IN or FORWARD.\n");
return 0;
}
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_mac_info)))
+ if (matchsize != XT_ALIGN(sizeof(struct xt_mac_info)))
return 0;
return 1;
}
-static struct ipt_match mac_match = {
+static struct xt_match mac_match = {
+ .name = "mac",
+ .match = &match,
+ .checkentry = &ipt_mac_checkentry,
+ .me = THIS_MODULE,
+};
+static struct xt_match mac6_match = {
.name = "mac",
.match = &match,
.checkentry = &ipt_mac_checkentry,
@@ -67,12 +78,22 @@ static struct ipt_match mac_match = {
static int __init init(void)
{
- return ipt_register_match(&mac_match);
+ int ret;
+ ret = xt_register_match(AF_INET, &mac_match);
+ if (ret)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &mac6_match);
+ if (ret)
+ xt_unregister_match(AF_INET, &mac_match);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&mac_match);
+ xt_unregister_match(AF_INET, &mac_match);
+ xt_unregister_match(AF_INET6, &mac6_match);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_mark.c b/net/netfilter/xt_mark.c
index 00bef6cdd3f..2a0ac62b72c 100644
--- a/net/ipv4/netfilter/ipt_mark.c
+++ b/net/netfilter/xt_mark.c
@@ -10,12 +10,14 @@
#include <linux/module.h>
#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ipt_mark.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/xt_mark.h>
+#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
MODULE_DESCRIPTION("iptables mark matching module");
+MODULE_ALIAS("ipt_mark");
+MODULE_ALIAS("ip6t_mark");
static int
match(const struct sk_buff *skb,
@@ -23,23 +25,24 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_mark_info *info = matchinfo;
+ const struct xt_mark_info *info = matchinfo;
return ((skb->nfmark & info->mask) == info->mark) ^ info->invert;
}
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *entry,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- struct ipt_mark_info *minfo = (struct ipt_mark_info *) matchinfo;
+ struct xt_mark_info *minfo = (struct xt_mark_info *) matchinfo;
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_mark_info)))
+ if (matchsize != XT_ALIGN(sizeof(struct xt_mark_info)))
return 0;
if (minfo->mark > 0xffffffff || minfo->mask > 0xffffffff) {
@@ -50,7 +53,14 @@ checkentry(const char *tablename,
return 1;
}
-static struct ipt_match mark_match = {
+static struct xt_match mark_match = {
+ .name = "mark",
+ .match = &match,
+ .checkentry = &checkentry,
+ .me = THIS_MODULE,
+};
+
+static struct xt_match mark6_match = {
.name = "mark",
.match = &match,
.checkentry = &checkentry,
@@ -59,12 +69,22 @@ static struct ipt_match mark_match = {
static int __init init(void)
{
- return ipt_register_match(&mark_match);
+ int ret;
+ ret = xt_register_match(AF_INET, &mark_match);
+ if (ret)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &mark6_match);
+ if (ret)
+ xt_unregister_match(AF_INET, &mark_match);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&mark_match);
+ xt_unregister_match(AF_INET, &mark_match);
+ xt_unregister_match(AF_INET6, &mark6_match);
}
module_init(init);
diff --git a/net/ipv6/netfilter/ip6t_physdev.c b/net/netfilter/xt_physdev.c
index 71515c86ece..19bb57c14df 100644
--- a/net/ipv6/netfilter/ip6t_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -10,8 +10,8 @@
#include <linux/module.h>
#include <linux/skbuff.h>
-#include <linux/netfilter_ipv6/ip6t_physdev.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/xt_physdev.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge.h>
#define MATCH 1
#define NOMATCH 0
@@ -19,6 +19,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
MODULE_DESCRIPTION("iptables bridge physical device match module");
+MODULE_ALIAS("ipt_physdev");
+MODULE_ALIAS("ip6t_physdev");
static int
match(const struct sk_buff *skb,
@@ -31,7 +33,7 @@ match(const struct sk_buff *skb,
{
int i;
static const char nulldevname[IFNAMSIZ];
- const struct ip6t_physdev_info *info = matchinfo;
+ const struct xt_physdev_info *info = matchinfo;
unsigned int ret;
const char *indev, *outdev;
struct nf_bridge_info *nf_bridge;
@@ -41,37 +43,37 @@ match(const struct sk_buff *skb,
* the destination device will be a bridge. */
if (!(nf_bridge = skb->nf_bridge)) {
/* Return MATCH if the invert flags of the used options are on */
- if ((info->bitmask & IP6T_PHYSDEV_OP_BRIDGED) &&
- !(info->invert & IP6T_PHYSDEV_OP_BRIDGED))
+ if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
+ !(info->invert & XT_PHYSDEV_OP_BRIDGED))
return NOMATCH;
- if ((info->bitmask & IP6T_PHYSDEV_OP_ISIN) &&
- !(info->invert & IP6T_PHYSDEV_OP_ISIN))
+ if ((info->bitmask & XT_PHYSDEV_OP_ISIN) &&
+ !(info->invert & XT_PHYSDEV_OP_ISIN))
return NOMATCH;
- if ((info->bitmask & IP6T_PHYSDEV_OP_ISOUT) &&
- !(info->invert & IP6T_PHYSDEV_OP_ISOUT))
+ if ((info->bitmask & XT_PHYSDEV_OP_ISOUT) &&
+ !(info->invert & XT_PHYSDEV_OP_ISOUT))
return NOMATCH;
- if ((info->bitmask & IP6T_PHYSDEV_OP_IN) &&
- !(info->invert & IP6T_PHYSDEV_OP_IN))
+ if ((info->bitmask & XT_PHYSDEV_OP_IN) &&
+ !(info->invert & XT_PHYSDEV_OP_IN))
return NOMATCH;
- if ((info->bitmask & IP6T_PHYSDEV_OP_OUT) &&
- !(info->invert & IP6T_PHYSDEV_OP_OUT))
+ if ((info->bitmask & XT_PHYSDEV_OP_OUT) &&
+ !(info->invert & XT_PHYSDEV_OP_OUT))
return NOMATCH;
return MATCH;
}
/* This only makes sense in the FORWARD and POSTROUTING chains */
- if ((info->bitmask & IP6T_PHYSDEV_OP_BRIDGED) &&
+ if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
(!!(nf_bridge->mask & BRNF_BRIDGED) ^
- !(info->invert & IP6T_PHYSDEV_OP_BRIDGED)))
+ !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
return NOMATCH;
- if ((info->bitmask & IP6T_PHYSDEV_OP_ISIN &&
- (!nf_bridge->physindev ^ !!(info->invert & IP6T_PHYSDEV_OP_ISIN))) ||
- (info->bitmask & IP6T_PHYSDEV_OP_ISOUT &&
- (!nf_bridge->physoutdev ^ !!(info->invert & IP6T_PHYSDEV_OP_ISOUT))))
+ if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
+ (!nf_bridge->physindev ^ !!(info->invert & XT_PHYSDEV_OP_ISIN))) ||
+ (info->bitmask & XT_PHYSDEV_OP_ISOUT &&
+ (!nf_bridge->physoutdev ^ !!(info->invert & XT_PHYSDEV_OP_ISOUT))))
return NOMATCH;
- if (!(info->bitmask & IP6T_PHYSDEV_OP_IN))
+ if (!(info->bitmask & XT_PHYSDEV_OP_IN))
goto match_outdev;
indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned int); i++) {
@@ -80,11 +82,11 @@ match(const struct sk_buff *skb,
& ((const unsigned int *)info->in_mask)[i];
}
- if ((ret == 0) ^ !(info->invert & IP6T_PHYSDEV_OP_IN))
+ if ((ret == 0) ^ !(info->invert & XT_PHYSDEV_OP_IN))
return NOMATCH;
match_outdev:
- if (!(info->bitmask & IP6T_PHYSDEV_OP_OUT))
+ if (!(info->bitmask & XT_PHYSDEV_OP_OUT))
return MATCH;
outdev = nf_bridge->physoutdev ?
nf_bridge->physoutdev->name : nulldevname;
@@ -94,27 +96,34 @@ match_outdev:
& ((const unsigned int *)info->out_mask)[i];
}
- return (ret != 0) ^ !(info->invert & IP6T_PHYSDEV_OP_OUT);
+ return (ret != 0) ^ !(info->invert & XT_PHYSDEV_OP_OUT);
}
static int
checkentry(const char *tablename,
- const struct ip6t_ip6 *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- const struct ip6t_physdev_info *info = matchinfo;
+ const struct xt_physdev_info *info = matchinfo;
- if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_physdev_info)))
+ if (matchsize != XT_ALIGN(sizeof(struct xt_physdev_info)))
return 0;
- if (!(info->bitmask & IP6T_PHYSDEV_OP_MASK) ||
- info->bitmask & ~IP6T_PHYSDEV_OP_MASK)
+ if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
+ info->bitmask & ~XT_PHYSDEV_OP_MASK)
return 0;
return 1;
}
-static struct ip6t_match physdev_match = {
+static struct xt_match physdev_match = {
+ .name = "physdev",
+ .match = &match,
+ .checkentry = &checkentry,
+ .me = THIS_MODULE,
+};
+
+static struct xt_match physdev6_match = {
.name = "physdev",
.match = &match,
.checkentry = &checkentry,
@@ -123,12 +132,23 @@ static struct ip6t_match physdev_match = {
static int __init init(void)
{
- return ip6t_register_match(&physdev_match);
+ int ret;
+
+ ret = xt_register_match(AF_INET, &physdev_match);
+ if (ret < 0)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &physdev6_match);
+ if (ret < 0)
+ xt_unregister_match(AF_INET, &physdev_match);
+
+ return ret;
}
static void __exit fini(void)
{
- ip6t_unregister_match(&physdev_match);
+ xt_unregister_match(AF_INET, &physdev_match);
+ xt_unregister_match(AF_INET6, &physdev6_match);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_pkttype.c b/net/netfilter/xt_pkttype.c
index 8ddb1dc5e5a..ab1b2630f97 100644
--- a/net/ipv4/netfilter/ipt_pkttype.c
+++ b/net/netfilter/xt_pkttype.c
@@ -10,60 +10,72 @@
#include <linux/if_ether.h>
#include <linux/if_packet.h>
-#include <linux/netfilter_ipv4/ipt_pkttype.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/xt_pkttype.h>
+#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig <michal@logix.cz>");
MODULE_DESCRIPTION("IP tables match to match on linklayer packet type");
+MODULE_ALIAS("ipt_pkttype");
+MODULE_ALIAS("ip6t_pkttype");
static int match(const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_pkttype_info *info = matchinfo;
+ const struct xt_pkttype_info *info = matchinfo;
- return (skb->pkt_type == info->pkttype) ^ info->invert;
+ return (skb->pkt_type == info->pkttype) ^ info->invert;
}
static int checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
-/*
- if (hook_mask
- & ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_IN)
- | (1 << NF_IP_FORWARD))) {
- printk("ipt_pkttype: only valid for PRE_ROUTING, LOCAL_IN or FORWARD.\n");
- return 0;
- }
-*/
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_pkttype_info)))
+ if (matchsize != XT_ALIGN(sizeof(struct xt_pkttype_info)))
return 0;
return 1;
}
-static struct ipt_match pkttype_match = {
+static struct xt_match pkttype_match = {
.name = "pkttype",
.match = &match,
.checkentry = &checkentry,
.me = THIS_MODULE,
};
+static struct xt_match pkttype6_match = {
+ .name = "pkttype",
+ .match = &match,
+ .checkentry = &checkentry,
+ .me = THIS_MODULE,
+};
+
static int __init init(void)
{
- return ipt_register_match(&pkttype_match);
+ int ret;
+ ret = xt_register_match(AF_INET, &pkttype_match);
+ if (ret)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &pkttype6_match);
+ if (ret)
+ xt_unregister_match(AF_INET, &pkttype_match);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&pkttype_match);
+ xt_unregister_match(AF_INET, &pkttype_match);
+ xt_unregister_match(AF_INET6, &pkttype6_match);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_realm.c b/net/netfilter/xt_realm.c
index 54a6897ebaa..2b7e1781d34 100644
--- a/net/ipv4/netfilter/ipt_realm.c
+++ b/net/netfilter/xt_realm.c
@@ -14,12 +14,14 @@
#include <linux/netdevice.h>
#include <net/route.h>
-#include <linux/netfilter_ipv4/ipt_realm.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/xt_realm.h>
+#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("Sampsa Ranta <sampsa@netsonic.fi>");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("iptables realm match");
+MODULE_DESCRIPTION("X_tables realm match");
+MODULE_ALIAS("ipt_realm");
static int
match(const struct sk_buff *skb,
@@ -27,16 +29,17 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_realm_info *info = matchinfo;
+ const struct xt_realm_info *info = matchinfo;
struct dst_entry *dst = skb->dst;
return (info->id == (dst->tclassid & info->mask)) ^ info->invert;
}
static int check(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
@@ -44,18 +47,18 @@ static int check(const char *tablename,
if (hook_mask
& ~((1 << NF_IP_POST_ROUTING) | (1 << NF_IP_FORWARD) |
(1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_LOCAL_IN))) {
- printk("ipt_realm: only valid for POST_ROUTING, LOCAL_OUT, "
+ printk("xt_realm: only valid for POST_ROUTING, LOCAL_OUT, "
"LOCAL_IN or FORWARD.\n");
return 0;
}
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_realm_info))) {
- printk("ipt_realm: invalid matchsize.\n");
+ if (matchsize != XT_ALIGN(sizeof(struct xt_realm_info))) {
+ printk("xt_realm: invalid matchsize.\n");
return 0;
}
return 1;
}
-static struct ipt_match realm_match = {
+static struct xt_match realm_match = {
.name = "realm",
.match = match,
.checkentry = check,
@@ -64,12 +67,12 @@ static struct ipt_match realm_match = {
static int __init init(void)
{
- return ipt_register_match(&realm_match);
+ return xt_register_match(AF_INET, &realm_match);
}
static void __exit fini(void)
{
- ipt_unregister_match(&realm_match);
+ xt_unregister_match(AF_INET, &realm_match);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_sctp.c b/net/netfilter/xt_sctp.c
index fe2b327bcaa..10fbfc5ba75 100644
--- a/net/ipv4/netfilter/ipt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -1,10 +1,18 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/ip.h>
+#include <net/ipv6.h>
#include <linux/sctp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_sctp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_sctp.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kiran Kumar Immidi");
+MODULE_DESCRIPTION("Match for SCTP protocol packets");
+MODULE_ALIAS("ipt_sctp");
#ifdef DEBUG_SCTP
#define duprintf(format, args...) printk(format , ## args)
@@ -16,7 +24,7 @@
|| (!!((invflag) & (option)) ^ (cond)))
static int
-match_flags(const struct ipt_sctp_flag_info *flag_info,
+match_flags(const struct xt_sctp_flag_info *flag_info,
const int flag_count,
u_int8_t chunktype,
u_int8_t chunkflags)
@@ -32,15 +40,15 @@ match_flags(const struct ipt_sctp_flag_info *flag_info,
return 1;
}
-static int
+static inline int
match_packet(const struct sk_buff *skb,
+ unsigned int offset,
const u_int32_t *chunkmap,
int chunk_match_type,
- const struct ipt_sctp_flag_info *flag_info,
+ const struct xt_sctp_flag_info *flag_info,
const int flag_count,
int *hotdrop)
{
- int offset;
u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)];
sctp_chunkhdr_t _sch, *sch;
@@ -52,7 +60,6 @@ match_packet(const struct sk_buff *skb,
SCTP_CHUNKMAP_COPY(chunkmapcopy, chunkmap);
}
- offset = skb->nh.iph->ihl * 4 + sizeof (sctp_sctphdr_t);
do {
sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch);
if (sch == NULL) {
@@ -118,19 +125,20 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_sctp_info *info;
+ const struct xt_sctp_info *info;
sctp_sctphdr_t _sh, *sh;
- info = (const struct ipt_sctp_info *)matchinfo;
+ info = (const struct xt_sctp_info *)matchinfo;
if (offset) {
duprintf("Dropping non-first fragment.. FIXME\n");
return 0;
}
- sh = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_sh), &_sh);
+ sh = skb_header_pointer(skb, protoff, sizeof(_sh), &_sh);
if (sh == NULL) {
duprintf("Dropping evil TCP offset=0 tinygram.\n");
*hotdrop = 1;
@@ -140,64 +148,103 @@ match(const struct sk_buff *skb,
return SCCHECK(((ntohs(sh->source) >= info->spts[0])
&& (ntohs(sh->source) <= info->spts[1])),
- IPT_SCTP_SRC_PORTS, info->flags, info->invflags)
+ XT_SCTP_SRC_PORTS, info->flags, info->invflags)
&& SCCHECK(((ntohs(sh->dest) >= info->dpts[0])
&& (ntohs(sh->dest) <= info->dpts[1])),
- IPT_SCTP_DEST_PORTS, info->flags, info->invflags)
- && SCCHECK(match_packet(skb, info->chunkmap, info->chunk_match_type,
+ XT_SCTP_DEST_PORTS, info->flags, info->invflags)
+ && SCCHECK(match_packet(skb, protoff,
+ info->chunkmap, info->chunk_match_type,
info->flag_info, info->flag_count,
hotdrop),
- IPT_SCTP_CHUNK_TYPES, info->flags, info->invflags);
+ XT_SCTP_CHUNK_TYPES, info->flags, info->invflags);
}
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *inf,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ const struct xt_sctp_info *info;
+ const struct ipt_ip *ip = inf;
+
+ info = (const struct xt_sctp_info *)matchinfo;
+
+ return ip->proto == IPPROTO_SCTP
+ && !(ip->invflags & XT_INV_PROTO)
+ && matchsize == XT_ALIGN(sizeof(struct xt_sctp_info))
+ && !(info->flags & ~XT_SCTP_VALID_FLAGS)
+ && !(info->invflags & ~XT_SCTP_VALID_FLAGS)
+ && !(info->invflags & ~info->flags)
+ && ((!(info->flags & XT_SCTP_CHUNK_TYPES)) ||
+ (info->chunk_match_type &
+ (SCTP_CHUNK_MATCH_ALL
+ | SCTP_CHUNK_MATCH_ANY
+ | SCTP_CHUNK_MATCH_ONLY)));
+}
+
+static int
+checkentry6(const char *tablename,
+ const void *inf,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- const struct ipt_sctp_info *info;
+ const struct xt_sctp_info *info;
+ const struct ip6t_ip6 *ip = inf;
- info = (const struct ipt_sctp_info *)matchinfo;
+ info = (const struct xt_sctp_info *)matchinfo;
return ip->proto == IPPROTO_SCTP
- && !(ip->invflags & IPT_INV_PROTO)
- && matchsize == IPT_ALIGN(sizeof(struct ipt_sctp_info))
- && !(info->flags & ~IPT_SCTP_VALID_FLAGS)
- && !(info->invflags & ~IPT_SCTP_VALID_FLAGS)
+ && !(ip->invflags & XT_INV_PROTO)
+ && matchsize == XT_ALIGN(sizeof(struct xt_sctp_info))
+ && !(info->flags & ~XT_SCTP_VALID_FLAGS)
+ && !(info->invflags & ~XT_SCTP_VALID_FLAGS)
&& !(info->invflags & ~info->flags)
- && ((!(info->flags & IPT_SCTP_CHUNK_TYPES)) ||
+ && ((!(info->flags & XT_SCTP_CHUNK_TYPES)) ||
(info->chunk_match_type &
(SCTP_CHUNK_MATCH_ALL
| SCTP_CHUNK_MATCH_ANY
| SCTP_CHUNK_MATCH_ONLY)));
}
-static struct ipt_match sctp_match =
+
+static struct xt_match sctp_match =
{
- .list = { NULL, NULL},
.name = "sctp",
.match = &match,
.checkentry = &checkentry,
- .destroy = NULL,
+ .me = THIS_MODULE
+};
+static struct xt_match sctp6_match =
+{
+ .name = "sctp",
+ .match = &match,
+ .checkentry = &checkentry6,
.me = THIS_MODULE
};
+
static int __init init(void)
{
- return ipt_register_match(&sctp_match);
+ int ret;
+ ret = xt_register_match(AF_INET, &sctp_match);
+ if (ret)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &sctp6_match);
+ if (ret)
+ xt_unregister_match(AF_INET, &sctp_match);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&sctp_match);
+ xt_unregister_match(AF_INET6, &sctp6_match);
+ xt_unregister_match(AF_INET, &sctp_match);
}
module_init(init);
module_exit(fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kiran Kumar Immidi");
-MODULE_DESCRIPTION("Match for SCTP protocol packets");
-
diff --git a/net/ipv4/netfilter/ipt_state.c b/net/netfilter/xt_state.c
index 4d7f16b70ce..39ce808d40e 100644
--- a/net/ipv4/netfilter/ipt_state.c
+++ b/net/netfilter/xt_state.c
@@ -1,7 +1,7 @@
/* Kernel module to match connection tracking information. */
/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -11,12 +11,14 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <net/netfilter/nf_conntrack_compat.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_state.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_state.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
-MODULE_DESCRIPTION("iptables connection tracking state match module");
+MODULE_DESCRIPTION("ip[6]_tables connection tracking state match module");
+MODULE_ALIAS("ipt_state");
+MODULE_ALIAS("ip6t_state");
static int
match(const struct sk_buff *skb,
@@ -24,35 +26,43 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_state_info *sinfo = matchinfo;
+ const struct xt_state_info *sinfo = matchinfo;
enum ip_conntrack_info ctinfo;
unsigned int statebit;
if (nf_ct_is_untracked(skb))
- statebit = IPT_STATE_UNTRACKED;
+ statebit = XT_STATE_UNTRACKED;
else if (!nf_ct_get_ctinfo(skb, &ctinfo))
- statebit = IPT_STATE_INVALID;
+ statebit = XT_STATE_INVALID;
else
- statebit = IPT_STATE_BIT(ctinfo);
+ statebit = XT_STATE_BIT(ctinfo);
return (sinfo->statemask & statebit);
}
static int check(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_state_info)))
+ if (matchsize != XT_ALIGN(sizeof(struct xt_state_info)))
return 0;
return 1;
}
-static struct ipt_match state_match = {
+static struct xt_match state_match = {
+ .name = "state",
+ .match = &match,
+ .checkentry = &check,
+ .me = THIS_MODULE,
+};
+
+static struct xt_match state6_match = {
.name = "state",
.match = &match,
.checkentry = &check,
@@ -61,13 +71,25 @@ static struct ipt_match state_match = {
static int __init init(void)
{
- need_ip_conntrack();
- return ipt_register_match(&state_match);
+ int ret;
+
+ need_conntrack();
+
+ ret = xt_register_match(AF_INET, &state_match);
+ if (ret < 0)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &state6_match);
+ if (ret < 0)
+ xt_unregister_match(AF_INET,&state_match);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&state_match);
+ xt_unregister_match(AF_INET, &state_match);
+ xt_unregister_match(AF_INET6, &state6_match);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_string.c b/net/netfilter/xt_string.c
index b5def204d79..7c7d5c8807d 100644
--- a/net/ipv4/netfilter/ipt_string.c
+++ b/net/netfilter/xt_string.c
@@ -11,23 +11,26 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_string.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_string.h>
#include <linux/textsearch.h>
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@eurodev.net>");
MODULE_DESCRIPTION("IP tables string match module");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_string");
+MODULE_ALIAS("ip6t_string");
static int match(const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
struct ts_state state;
- struct ipt_string_info *conf = (struct ipt_string_info *) matchinfo;
+ struct xt_string_info *conf = (struct xt_string_info *) matchinfo;
memset(&state, 0, sizeof(struct ts_state));
@@ -36,18 +39,18 @@ static int match(const struct sk_buff *skb,
!= UINT_MAX) && !conf->invert;
}
-#define STRING_TEXT_PRIV(m) ((struct ipt_string_info *) m)
+#define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m)
static int checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ip,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- struct ipt_string_info *conf = matchinfo;
+ struct xt_string_info *conf = matchinfo;
struct ts_config *ts_conf;
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_string_info)))
+ if (matchsize != XT_ALIGN(sizeof(struct xt_string_info)))
return 0;
/* Damn, can't handle this case properly with iptables... */
@@ -69,7 +72,14 @@ static void destroy(void *matchinfo, unsigned int matchsize)
textsearch_destroy(STRING_TEXT_PRIV(matchinfo)->config);
}
-static struct ipt_match string_match = {
+static struct xt_match string_match = {
+ .name = "string",
+ .match = match,
+ .checkentry = checkentry,
+ .destroy = destroy,
+ .me = THIS_MODULE
+};
+static struct xt_match string6_match = {
.name = "string",
.match = match,
.checkentry = checkentry,
@@ -79,12 +89,22 @@ static struct ipt_match string_match = {
static int __init init(void)
{
- return ipt_register_match(&string_match);
+ int ret;
+
+ ret = xt_register_match(AF_INET, &string_match);
+ if (ret)
+ return ret;
+ ret = xt_register_match(AF_INET6, &string6_match);
+ if (ret)
+ xt_unregister_match(AF_INET, &string_match);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&string_match);
+ xt_unregister_match(AF_INET, &string_match);
+ xt_unregister_match(AF_INET6, &string6_match);
}
module_init(init);
diff --git a/net/ipv4/netfilter/ipt_tcpmss.c b/net/netfilter/xt_tcpmss.c
index 4dc9b16ab4a..acf7f533e9f 100644
--- a/net/ipv4/netfilter/ipt_tcpmss.c
+++ b/net/netfilter/xt_tcpmss.c
@@ -1,6 +1,7 @@
/* Kernel module to match TCP MSS values. */
/* Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
+ * Portions (C) 2005 by Harald Welte <laforge@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -11,19 +12,24 @@
#include <linux/skbuff.h>
#include <net/tcp.h>
-#include <linux/netfilter_ipv4/ipt_tcpmss.h>
+#include <linux/netfilter/xt_tcpmss.h>
+#include <linux/netfilter/x_tables.h>
+
#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
#define TH_SYN 0x02
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
MODULE_DESCRIPTION("iptables TCP MSS match module");
+MODULE_ALIAS("ipt_tcpmss");
/* Returns 1 if the mss option is set and matched by the range, 0 otherwise */
static inline int
mssoption_match(u_int16_t min, u_int16_t max,
const struct sk_buff *skb,
+ unsigned int protoff,
int invert,
int *hotdrop)
{
@@ -33,8 +39,7 @@ mssoption_match(u_int16_t min, u_int16_t max,
unsigned int i, optlen;
/* If we don't have the whole header, drop packet. */
- th = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
- sizeof(_tcph), &_tcph);
+ th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
if (th == NULL)
goto dropit;
@@ -47,8 +52,7 @@ mssoption_match(u_int16_t min, u_int16_t max,
goto out;
/* Truncated options. */
- op = skb_header_pointer(skb, skb->nh.iph->ihl * 4 + sizeof(*th),
- optlen, _opt);
+ op = skb_header_pointer(skb, protoff + sizeof(*th), optlen, _opt);
if (op == NULL)
goto dropit;
@@ -79,22 +83,24 @@ match(const struct sk_buff *skb,
const struct net_device *out,
const void *matchinfo,
int offset,
+ unsigned int protoff,
int *hotdrop)
{
- const struct ipt_tcpmss_match_info *info = matchinfo;
+ const struct xt_tcpmss_match_info *info = matchinfo;
- return mssoption_match(info->mss_min, info->mss_max, skb,
+ return mssoption_match(info->mss_min, info->mss_max, skb, protoff,
info->invert, hotdrop);
}
static int
checkentry(const char *tablename,
- const struct ipt_ip *ip,
+ const void *ipinfo,
void *matchinfo,
unsigned int matchsize,
unsigned int hook_mask)
{
- if (matchsize != IPT_ALIGN(sizeof(struct ipt_tcpmss_match_info)))
+ const struct ipt_ip *ip = ipinfo;
+ if (matchsize != XT_ALIGN(sizeof(struct xt_tcpmss_match_info)))
return 0;
/* Must specify -p tcp */
@@ -106,21 +112,60 @@ checkentry(const char *tablename,
return 1;
}
-static struct ipt_match tcpmss_match = {
+static int
+checkentry6(const char *tablename,
+ const void *ipinfo,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ const struct ip6t_ip6 *ip = ipinfo;
+
+ if (matchsize != XT_ALIGN(sizeof(struct xt_tcpmss_match_info)))
+ return 0;
+
+ /* Must specify -p tcp */
+ if (ip->proto != IPPROTO_TCP || (ip->invflags & XT_INV_PROTO)) {
+ printk("tcpmss: Only works on TCP packets\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct xt_match tcpmss_match = {
.name = "tcpmss",
.match = &match,
.checkentry = &checkentry,
.me = THIS_MODULE,
};
+static struct xt_match tcpmss6_match = {
+ .name = "tcpmss",
+ .match = &match,
+ .checkentry = &checkentry6,
+ .me = THIS_MODULE,
+};
+
+
static int __init init(void)
{
- return ipt_register_match(&tcpmss_match);
+ int ret;
+ ret = xt_register_match(AF_INET, &tcpmss_match);
+ if (ret)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &tcpmss6_match);
+ if (ret)
+ xt_unregister_match(AF_INET, &tcpmss_match);
+
+ return ret;
}
static void __exit fini(void)
{
- ipt_unregister_match(&tcpmss_match);
+ xt_unregister_match(AF_INET6, &tcpmss6_match);
+ xt_unregister_match(AF_INET, &tcpmss_match);
}
module_init(init);
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
new file mode 100644
index 00000000000..669c8113cc6
--- /dev/null
+++ b/net/netfilter/xt_tcpudp.c
@@ -0,0 +1,334 @@
+#include <linux/types.h>
+#include <linux/module.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_tcpudp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+MODULE_DESCRIPTION("x_tables match for TCP and UDP, supports IPv4 and IPv6");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xt_tcp");
+MODULE_ALIAS("xt_udp");
+MODULE_ALIAS("ipt_udp");
+MODULE_ALIAS("ipt_tcp");
+MODULE_ALIAS("ip6t_udp");
+MODULE_ALIAS("ip6t_tcp");
+
+#ifdef DEBUG_IP_FIREWALL_USER
+#define duprintf(format, args...) printk(format , ## args)
+#else
+#define duprintf(format, args...)
+#endif
+
+
+/* Returns 1 if the port is matched by the range, 0 otherwise */
+static inline int
+port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
+{
+ int ret;
+
+ ret = (port >= min && port <= max) ^ invert;
+ return ret;
+}
+
+static int
+tcp_find_option(u_int8_t option,
+ const struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int optlen,
+ int invert,
+ int *hotdrop)
+{
+ /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
+ u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
+ unsigned int i;
+
+ duprintf("tcp_match: finding option\n");
+
+ if (!optlen)
+ return invert;
+
+ /* If we don't have the whole header, drop packet. */
+ op = skb_header_pointer(skb, protoff + sizeof(struct tcphdr),
+ optlen, _opt);
+ if (op == NULL) {
+ *hotdrop = 1;
+ return 0;
+ }
+
+ for (i = 0; i < optlen; ) {
+ if (op[i] == option) return !invert;
+ if (op[i] < 2) i++;
+ else i += op[i+1]?:1;
+ }
+
+ return invert;
+}
+
+static int
+tcp_match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ int *hotdrop)
+{
+ struct tcphdr _tcph, *th;
+ const struct xt_tcp *tcpinfo = matchinfo;
+
+ if (offset) {
+ /* To quote Alan:
+
+ Don't allow a fragment of TCP 8 bytes in. Nobody normal
+ causes this. Its a cracker trying to break in by doing a
+ flag overwrite to pass the direction checks.
+ */
+ if (offset == 1) {
+ duprintf("Dropping evil TCP offset=1 frag.\n");
+ *hotdrop = 1;
+ }
+ /* Must not be a fragment. */
+ return 0;
+ }
+
+#define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
+
+ th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
+ if (th == NULL) {
+ /* We've been asked to examine this packet, and we
+ can't. Hence, no choice but to drop. */
+ duprintf("Dropping evil TCP offset=0 tinygram.\n");
+ *hotdrop = 1;
+ return 0;
+ }
+
+ if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
+ ntohs(th->source),
+ !!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
+ return 0;
+ if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
+ ntohs(th->dest),
+ !!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
+ return 0;
+ if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
+ == tcpinfo->flg_cmp,
+ XT_TCP_INV_FLAGS))
+ return 0;
+ if (tcpinfo->option) {
+ if (th->doff * 4 < sizeof(_tcph)) {
+ *hotdrop = 1;
+ return 0;
+ }
+ if (!tcp_find_option(tcpinfo->option, skb, protoff,
+ th->doff*4 - sizeof(_tcph),
+ tcpinfo->invflags & XT_TCP_INV_OPTION,
+ hotdrop))
+ return 0;
+ }
+ return 1;
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+tcp_checkentry(const char *tablename,
+ const void *info,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ const struct ipt_ip *ip = info;
+ const struct xt_tcp *tcpinfo = matchinfo;
+
+ /* Must specify proto == TCP, and no unknown invflags */
+ return ip->proto == IPPROTO_TCP
+ && !(ip->invflags & XT_INV_PROTO)
+ && matchsize == XT_ALIGN(sizeof(struct xt_tcp))
+ && !(tcpinfo->invflags & ~XT_TCP_INV_MASK);
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+tcp6_checkentry(const char *tablename,
+ const void *entry,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ const struct ip6t_ip6 *ipv6 = entry;
+ const struct xt_tcp *tcpinfo = matchinfo;
+
+ /* Must specify proto == TCP, and no unknown invflags */
+ return ipv6->proto == IPPROTO_TCP
+ && !(ipv6->invflags & XT_INV_PROTO)
+ && matchsize == XT_ALIGN(sizeof(struct xt_tcp))
+ && !(tcpinfo->invflags & ~XT_TCP_INV_MASK);
+}
+
+
+static int
+udp_match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ int *hotdrop)
+{
+ struct udphdr _udph, *uh;
+ const struct xt_udp *udpinfo = matchinfo;
+
+ /* Must not be a fragment. */
+ if (offset)
+ return 0;
+
+ uh = skb_header_pointer(skb, protoff, sizeof(_udph), &_udph);
+ if (uh == NULL) {
+ /* We've been asked to examine this packet, and we
+ can't. Hence, no choice but to drop. */
+ duprintf("Dropping evil UDP tinygram.\n");
+ *hotdrop = 1;
+ return 0;
+ }
+
+ return port_match(udpinfo->spts[0], udpinfo->spts[1],
+ ntohs(uh->source),
+ !!(udpinfo->invflags & XT_UDP_INV_SRCPT))
+ && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
+ ntohs(uh->dest),
+ !!(udpinfo->invflags & XT_UDP_INV_DSTPT));
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+udp_checkentry(const char *tablename,
+ const void *info,
+ void *matchinfo,
+ unsigned int matchinfosize,
+ unsigned int hook_mask)
+{
+ const struct ipt_ip *ip = info;
+ const struct xt_udp *udpinfo = matchinfo;
+
+ /* Must specify proto == UDP, and no unknown invflags */
+ if (ip->proto != IPPROTO_UDP || (ip->invflags & XT_INV_PROTO)) {
+ duprintf("ipt_udp: Protocol %u != %u\n", ip->proto,
+ IPPROTO_UDP);
+ return 0;
+ }
+ if (matchinfosize != XT_ALIGN(sizeof(struct xt_udp))) {
+ duprintf("ipt_udp: matchsize %u != %u\n",
+ matchinfosize, XT_ALIGN(sizeof(struct xt_udp)));
+ return 0;
+ }
+ if (udpinfo->invflags & ~XT_UDP_INV_MASK) {
+ duprintf("ipt_udp: unknown flags %X\n",
+ udpinfo->invflags);
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+udp6_checkentry(const char *tablename,
+ const void *entry,
+ void *matchinfo,
+ unsigned int matchinfosize,
+ unsigned int hook_mask)
+{
+ const struct ip6t_ip6 *ipv6 = entry;
+ const struct xt_udp *udpinfo = matchinfo;
+
+ /* Must specify proto == UDP, and no unknown invflags */
+ if (ipv6->proto != IPPROTO_UDP || (ipv6->invflags & XT_INV_PROTO)) {
+ duprintf("ip6t_udp: Protocol %u != %u\n", ipv6->proto,
+ IPPROTO_UDP);
+ return 0;
+ }
+ if (matchinfosize != XT_ALIGN(sizeof(struct xt_udp))) {
+ duprintf("ip6t_udp: matchsize %u != %u\n",
+ matchinfosize, XT_ALIGN(sizeof(struct xt_udp)));
+ return 0;
+ }
+ if (udpinfo->invflags & ~XT_UDP_INV_MASK) {
+ duprintf("ip6t_udp: unknown flags %X\n",
+ udpinfo->invflags);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct xt_match tcp_matchstruct = {
+ .name = "tcp",
+ .match = &tcp_match,
+ .checkentry = &tcp_checkentry,
+ .me = THIS_MODULE,
+};
+static struct xt_match tcp6_matchstruct = {
+ .name = "tcp",
+ .match = &tcp_match,
+ .checkentry = &tcp6_checkentry,
+ .me = THIS_MODULE,
+};
+
+static struct xt_match udp_matchstruct = {
+ .name = "udp",
+ .match = &udp_match,
+ .checkentry = &udp_checkentry,
+ .me = THIS_MODULE,
+};
+static struct xt_match udp6_matchstruct = {
+ .name = "udp",
+ .match = &udp_match,
+ .checkentry = &udp6_checkentry,
+ .me = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+ int ret;
+ ret = xt_register_match(AF_INET, &tcp_matchstruct);
+ if (ret)
+ return ret;
+
+ ret = xt_register_match(AF_INET6, &tcp6_matchstruct);
+ if (ret)
+ goto out_unreg_tcp;
+
+ ret = xt_register_match(AF_INET, &udp_matchstruct);
+ if (ret)
+ goto out_unreg_tcp6;
+
+ ret = xt_register_match(AF_INET6, &udp6_matchstruct);
+ if (ret)
+ goto out_unreg_udp;
+
+ return ret;
+
+out_unreg_udp:
+ xt_unregister_match(AF_INET, &tcp_matchstruct);
+out_unreg_tcp6:
+ xt_unregister_match(AF_INET6, &tcp6_matchstruct);
+out_unreg_tcp:
+ xt_unregister_match(AF_INET, &tcp_matchstruct);
+ return ret;
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_match(AF_INET6, &udp6_matchstruct);
+ xt_unregister_match(AF_INET, &udp_matchstruct);
+ xt_unregister_match(AF_INET6, &tcp6_matchstruct);
+ xt_unregister_match(AF_INET, &tcp_matchstruct);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7849cac14d3..2101b45d2ec 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -24,6 +24,7 @@
#include <linux/config.h>
#include <linux/module.h>
+#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/signal.h>
@@ -402,7 +403,7 @@ static int netlink_create(struct socket *sock, int protocol)
groups = nl_table[protocol].groups;
netlink_unlock_table();
- if ((err = __netlink_create(sock, protocol) < 0))
+ if ((err = __netlink_create(sock, protocol)) < 0)
goto out_module;
nlk = nlk_sk(sock->sk);
@@ -1422,7 +1423,7 @@ static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
while (skb->len >= nlmsg_total_size(0)) {
nlh = (struct nlmsghdr *) skb->data;
- if (skb->len < nlh->nlmsg_len)
+ if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
return 0;
total_len = min(NLMSG_ALIGN(nlh->nlmsg_len), skb->len);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 3b1378498d5..4ae1538c54a 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -222,11 +222,6 @@ int genl_register_family(struct genl_family *family)
goto errout_locked;
}
- if (!try_module_get(family->owner)) {
- err = -EBUSY;
- goto errout_locked;
- }
-
if (family->id == GENL_ID_GENERATE) {
u16 newid = genl_generate_id();
@@ -283,7 +278,6 @@ int genl_unregister_family(struct genl_family *family)
INIT_LIST_HEAD(&family->ops_list);
genl_unlock();
- module_put(family->owner);
kfree(family->attrbuf);
genl_ctrl_event(CTRL_CMD_DELFAMILY, family);
return 0;
@@ -535,7 +529,6 @@ static struct genl_family genl_ctrl = {
.name = "nlctrl",
.version = 0x1,
.maxattr = CTRL_ATTR_MAX,
- .owner = THIS_MODULE,
};
static int __init genl_init(void)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 63b0e4afeb3..d44981f5a61 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -11,6 +11,7 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f69e5ed9bd0..ee93abc71cb 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -53,6 +53,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
@@ -1237,7 +1238,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
goto done;
err = -ENOBUFS;
- i = (struct packet_mclist *)kmalloc(sizeof(*i), GFP_KERNEL);
+ i = kmalloc(sizeof(*i), GFP_KERNEL);
if (i == NULL)
goto done;
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 63090be2315..ea65396d161 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -9,7 +9,9 @@
* Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
* Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
*/
+
#include <linux/config.h>
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
index 2ba14a75dbb..0e0a4553499 100644
--- a/net/rxrpc/connection.c
+++ b/net/rxrpc/connection.c
@@ -220,6 +220,7 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
{
struct rxrpc_connection *conn, *candidate = NULL;
struct list_head *_p;
+ struct sk_buff *pkt = msg->pkt;
int ret, fresh = 0;
__be32 x_epoch, x_connid;
__be16 x_port, x_servid;
@@ -229,10 +230,10 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
_enter("%p{{%hu}},%u,%hu",
peer,
peer->trans->port,
- ntohs(msg->pkt->h.uh->source),
+ ntohs(pkt->h.uh->source),
ntohs(msg->hdr.serviceId));
- x_port = msg->pkt->h.uh->source;
+ x_port = pkt->h.uh->source;
x_epoch = msg->hdr.epoch;
x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED;
x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK);
@@ -267,7 +268,7 @@ int rxrpc_connection_lookup(struct rxrpc_peer *peer,
/* fill in the specifics */
candidate->addr.sin_family = AF_INET;
candidate->addr.sin_port = x_port;
- candidate->addr.sin_addr.s_addr = msg->pkt->nh.iph->saddr;
+ candidate->addr.sin_addr.s_addr = pkt->nh.iph->saddr;
candidate->in_epoch = x_epoch;
candidate->out_epoch = x_epoch;
candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
@@ -675,6 +676,7 @@ int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
struct rxrpc_message *msg)
{
struct rxrpc_message *pmsg;
+ struct dst_entry *dst;
struct list_head *_p;
unsigned cix, seq;
int ret = 0;
@@ -710,10 +712,10 @@ int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
call->pkt_rcv_count++;
- if (msg->pkt->dst && msg->pkt->dst->dev)
+ dst = msg->pkt->dst;
+ if (dst && dst->dev)
conn->peer->if_mtu =
- msg->pkt->dst->dev->mtu -
- msg->pkt->dst->dev->hard_header_len;
+ dst->dev->mtu - dst->dev->hard_header_len;
/* queue on the call in seq order */
rxrpc_get_message(msg);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 55cd5327fbd..778b1e5a4b5 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -44,7 +44,7 @@ if NET_SCHED
choice
prompt "Packet scheduler clock source"
- default NET_SCH_CLK_JIFFIES
+ default NET_SCH_CLK_GETTIMEOFDAY
---help---
Packet schedulers need a monotonic clock that increments at a static
rate. The kernel provides several suitable interfaces, each with
@@ -411,7 +411,7 @@ config NET_EMATCH_META
tristate "Metadata"
depends on NET_EMATCH
---help---
- Say Y here if you want to be ablt to classify packets based on
+ Say Y here if you want to be able to classify packets based on
metadata such as load average, netfilter attributes, socket
attributes and routing decisions.
diff --git a/net/sched/Makefile b/net/sched/Makefile
index e48d0d456b3..0f06aec6609 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -7,13 +7,13 @@ obj-y := sch_generic.o
obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o sch_blackhole.o
obj-$(CONFIG_NET_CLS) += cls_api.o
obj-$(CONFIG_NET_CLS_ACT) += act_api.o
-obj-$(CONFIG_NET_ACT_POLICE) += police.o
-obj-$(CONFIG_NET_CLS_POLICE) += police.o
-obj-$(CONFIG_NET_ACT_GACT) += gact.o
-obj-$(CONFIG_NET_ACT_MIRRED) += mirred.o
-obj-$(CONFIG_NET_ACT_IPT) += ipt.o
-obj-$(CONFIG_NET_ACT_PEDIT) += pedit.o
-obj-$(CONFIG_NET_ACT_SIMP) += simple.o
+obj-$(CONFIG_NET_ACT_POLICE) += act_police.o
+obj-$(CONFIG_NET_CLS_POLICE) += act_police.o
+obj-$(CONFIG_NET_ACT_GACT) += act_gact.o
+obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o
+obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o
+obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o
+obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 2ce1cb2aa2e..792ce59940e 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -165,7 +165,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action *act,
while ((a = act) != NULL) {
repeat:
if (a->ops && a->ops->act) {
- ret = a->ops->act(&skb, a, res);
+ ret = a->ops->act(skb, a, res);
if (TC_MUNGED & skb->tc_verd) {
/* copied already, allow trampling */
skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
@@ -290,7 +290,7 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
if (a_o == NULL) {
#ifdef CONFIG_KMOD
rtnl_unlock();
- request_module(act_name);
+ request_module("act_%s", act_name);
rtnl_lock();
a_o = tc_lookup_action_n(act_name);
diff --git a/net/sched/gact.c b/net/sched/act_gact.c
index d1c6d542912..a1e68f78dcc 100644
--- a/net/sched/gact.c
+++ b/net/sched/act_gact.c
@@ -135,10 +135,9 @@ tcf_gact_cleanup(struct tc_action *a, int bind)
}
static int
-tcf_gact(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
+tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{
struct tcf_gact *p = PRIV(a, gact);
- struct sk_buff *skb = *pskb;
int action = TC_ACT_SHOT;
spin_lock(&p->lock);
diff --git a/net/sched/ipt.c b/net/sched/act_ipt.c
index f50136eed21..39a22a3ffe7 100644
--- a/net/sched/ipt.c
+++ b/net/sched/act_ipt.c
@@ -62,7 +62,7 @@ ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
struct ipt_target *target;
int ret = 0;
- target = ipt_find_target(t->u.user.name, t->u.user.revision);
+ target = xt_find_target(AF_INET, t->u.user.name, t->u.user.revision);
if (!target)
return -ENOENT;
@@ -201,11 +201,10 @@ tcf_ipt_cleanup(struct tc_action *a, int bind)
}
static int
-tcf_ipt(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
+tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{
int ret = 0, result = 0;
struct tcf_ipt *p = PRIV(a, ipt);
- struct sk_buff *skb = *pskb;
if (skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
@@ -222,6 +221,9 @@ tcf_ipt(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
worry later - danger - this API seems to have changed
from earlier kernels */
+ /* iptables targets take a double skb pointer in case the skb
+ * needs to be replaced. We don't own the skb, so this must not
+ * happen. The pskb_expand_head above should make sure of this */
ret = p->t->u.kernel.target->target(&skb, skb->dev, NULL,
p->hook, p->t->data, NULL);
switch (ret) {
diff --git a/net/sched/mirred.c b/net/sched/act_mirred.c
index 20d06916dc0..4fcccbd5088 100644
--- a/net/sched/mirred.c
+++ b/net/sched/act_mirred.c
@@ -158,12 +158,11 @@ tcf_mirred_cleanup(struct tc_action *a, int bind)
}
static int
-tcf_mirred(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
+tcf_mirred(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{
struct tcf_mirred *p = PRIV(a, mirred);
struct net_device *dev;
struct sk_buff *skb2 = NULL;
- struct sk_buff *skb = *pskb;
u32 at = G_TC_AT(skb->tc_verd);
spin_lock(&p->lock);
diff --git a/net/sched/pedit.c b/net/sched/act_pedit.c
index 767d24f4610..1742a68e012 100644
--- a/net/sched/pedit.c
+++ b/net/sched/act_pedit.c
@@ -130,10 +130,9 @@ tcf_pedit_cleanup(struct tc_action *a, int bind)
}
static int
-tcf_pedit(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
+tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{
struct tcf_pedit *p = PRIV(a, pedit);
- struct sk_buff *skb = *pskb;
int i, munged = 0;
u8 *pptr;
@@ -246,10 +245,12 @@ tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref)
t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse);
t.expires = jiffies_to_clock_t(p->tm.expires);
RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t);
+ kfree(opt);
return skb->len;
rtattr_failure:
skb_trim(skb, b - skb->data);
+ kfree(opt);
return -1;
}
diff --git a/net/sched/police.c b/net/sched/act_police.c
index eb39fb2f39b..fa877f8f652 100644
--- a/net/sched/police.c
+++ b/net/sched/act_police.c
@@ -284,11 +284,10 @@ static int tcf_act_police_cleanup(struct tc_action *a, int bind)
return 0;
}
-static int tcf_act_police(struct sk_buff **pskb, struct tc_action *a,
+static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
struct tcf_result *res)
{
psched_time_t now;
- struct sk_buff *skb = *pskb;
struct tcf_police *p = PRIV(a);
long toks;
long ptoks = 0;
@@ -408,7 +407,7 @@ police_cleanup_module(void)
module_init(police_init_module);
module_exit(police_cleanup_module);
-#endif
+#else /* CONFIG_NET_CLS_ACT */
struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
{
@@ -545,6 +544,7 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *p)
spin_unlock(&p->lock);
return p->action;
}
+EXPORT_SYMBOL(tcf_police);
int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p)
{
@@ -601,13 +601,4 @@ errout:
return -1;
}
-
-EXPORT_SYMBOL(tcf_police);
-EXPORT_SYMBOL(tcf_police_destroy);
-EXPORT_SYMBOL(tcf_police_dump);
-EXPORT_SYMBOL(tcf_police_dump_stats);
-EXPORT_SYMBOL(tcf_police_hash);
-EXPORT_SYMBOL(tcf_police_ht);
-EXPORT_SYMBOL(tcf_police_locate);
-EXPORT_SYMBOL(tcf_police_lookup);
-EXPORT_SYMBOL(tcf_police_new_index);
+#endif /* CONFIG_NET_CLS_ACT */
diff --git a/net/sched/simple.c b/net/sched/act_simple.c
index 8a6ae4f491e..e5f2e1f431e 100644
--- a/net/sched/simple.c
+++ b/net/sched/act_simple.c
@@ -44,9 +44,8 @@ static DEFINE_RWLOCK(simp_lock);
#include <net/pkt_act.h>
#include <net/act_generic.h>
-static int tcf_simp(struct sk_buff **pskb, struct tc_action *a, struct tcf_result *res)
+static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{
- struct sk_buff *skb = *pskb;
struct tcf_defact *p = PRIV(a, defact);
spin_lock(&p->lock);
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 64b047c6556..5cb956b721e 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -92,7 +92,6 @@
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <net/pkt_cls.h>
-#include <config/net/ematch/stack.h>
static LIST_HEAD(ematch_ops);
static DEFINE_RWLOCK(ematch_mod_lock);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 09453f997d8..6cd81708bf7 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -257,7 +257,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
(cl = cbq_class_lookup(q, prio)) != NULL)
return cl;
- *qerr = NET_XMIT_DROP;
+ *qerr = NET_XMIT_BYPASS;
for (;;) {
int result = 0;
defmap = head->defaults;
@@ -413,7 +413,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->rx_class = cl;
#endif
if (cl == NULL) {
- if (ret == NET_XMIT_DROP)
+ if (ret == NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index c26764bc410..91132f6871d 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -208,7 +208,7 @@ struct hfsc_sched
do { \
struct timeval tv; \
do_gettimeofday(&tv); \
- (stamp) = 1000000ULL * tv.tv_sec + tv.tv_usec; \
+ (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec; \
} while (0)
#endif
@@ -502,8 +502,8 @@ d2dx(u32 d)
u64 dx;
dx = ((u64)d * PSCHED_JIFFIE2US(HZ));
- dx += 1000000 - 1;
- do_div(dx, 1000000);
+ dx += USEC_PER_SEC - 1;
+ do_div(dx, USEC_PER_SEC);
return dx;
}
@@ -523,7 +523,7 @@ dx2d(u64 dx)
{
u64 d;
- d = dx * 1000000;
+ d = dx * USEC_PER_SEC;
do_div(d, PSCHED_JIFFIE2US(HZ));
return (u32)d;
}
@@ -1227,7 +1227,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (cl->level == 0)
return cl;
- *qerr = NET_XMIT_DROP;
+ *qerr = NET_XMIT_BYPASS;
tcf = q->root.filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
@@ -1643,7 +1643,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) {
- if (err == NET_XMIT_DROP)
+ if (err == NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return err;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 558cc087e60..3ec95df4a85 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -321,7 +321,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0)
return cl;
- *qerr = NET_XMIT_DROP;
+ *qerr = NET_XMIT_BYPASS;
tcf = q->filter_list;
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
@@ -724,7 +724,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
#ifdef CONFIG_NET_CLS_ACT
} else if (!cl) {
- if (ret == NET_XMIT_DROP)
+ if (ret == NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb (skb);
return ret;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 3ac0f495bad..5b3a3e48ed9 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -54,7 +54,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
u32 band = skb->priority;
struct tcf_result res;
- *qerr = NET_XMIT_DROP;
+ *qerr = NET_XMIT_BYPASS;
if (TC_H_MAJ(skb->priority) != sch->handle) {
#ifdef CONFIG_NET_CLS_ACT
switch (tc_classify(skb, q->filter_list, &res)) {
@@ -91,7 +91,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
qdisc = prio_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) {
- if (ret == NET_XMIT_DROP)
+
+ if (ret == NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
@@ -118,7 +119,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
qdisc = prio_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) {
- if (ret == NET_XMIT_DROP)
+ if (ret == NET_XMIT_BYPASS)
sch->qstats.drops++;
kfree_skb(skb);
return ret;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index c4a2a8c4c33..79b8ef34c6e 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -274,7 +274,7 @@ teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *de
static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct teql_master *master = (void*)dev->priv;
+ struct teql_master *master = netdev_priv(dev);
struct Qdisc *start, *q;
int busy;
int nores;
@@ -350,7 +350,7 @@ drop:
static int teql_master_open(struct net_device *dev)
{
struct Qdisc * q;
- struct teql_master *m = (void*)dev->priv;
+ struct teql_master *m = netdev_priv(dev);
int mtu = 0xFFFE;
unsigned flags = IFF_NOARP|IFF_MULTICAST;
@@ -397,13 +397,13 @@ static int teql_master_close(struct net_device *dev)
static struct net_device_stats *teql_master_stats(struct net_device *dev)
{
- struct teql_master *m = (void*)dev->priv;
+ struct teql_master *m = netdev_priv(dev);
return &m->stats;
}
static int teql_master_mtu(struct net_device *dev, int new_mtu)
{
- struct teql_master *m = (void*)dev->priv;
+ struct teql_master *m = netdev_priv(dev);
struct Qdisc *q;
if (new_mtu < 68)
@@ -423,7 +423,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
static __init void teql_master_setup(struct net_device *dev)
{
- struct teql_master *master = dev->priv;
+ struct teql_master *master = netdev_priv(dev);
struct Qdisc_ops *ops = &master->qops;
master->dev = dev;
@@ -476,7 +476,7 @@ static int __init teql_init(void)
break;
}
- master = dev->priv;
+ master = netdev_priv(dev);
strlcpy(master->qops.id, dev->name, IFNAMSIZ);
err = register_qdisc(&master->qops);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 238f1bffa68..4aa6fc60357 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -225,6 +225,7 @@ int sctp_rcv(struct sk_buff *skb)
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
goto discard_release;
+ nf_reset(skb);
ret = sk_filter(sk, skb, 1);
if (ret)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 15c05165c90..2e266129a76 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -180,8 +180,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport,
}
SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, "
- "src:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
- "dst:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ "src:" NIP6_FMT " dst:" NIP6_FMT "\n",
__FUNCTION__, skb, skb->len,
NIP6(fl.fl6_src), NIP6(fl.fl6_dst));
@@ -206,13 +205,13 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
fl.oif = daddr->v6.sin6_scope_id;
- SCTP_DEBUG_PRINTK("%s: DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ",
+ SCTP_DEBUG_PRINTK("%s: DST=" NIP6_FMT " ",
__FUNCTION__, NIP6(fl.fl6_dst));
if (saddr) {
ipv6_addr_copy(&fl.fl6_src, &saddr->v6.sin6_addr);
SCTP_DEBUG_PRINTK(
- "SRC=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x - ",
+ "SRC=" NIP6_FMT " - ",
NIP6(fl.fl6_src));
}
@@ -221,8 +220,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
struct rt6_info *rt;
rt = (struct rt6_info *)dst;
SCTP_DEBUG_PRINTK(
- "rt6_dst:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
- "rt6_src:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ "rt6_dst:" NIP6_FMT " rt6_src:" NIP6_FMT "\n",
NIP6(rt->rt6i_dst.addr), NIP6(rt->rt6i_src.addr));
} else {
SCTP_DEBUG_PRINTK("NO ROUTE\n");
@@ -271,13 +269,12 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
__u8 bmatchlen;
SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p "
- "daddr:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ",
+ "daddr:" NIP6_FMT " ",
__FUNCTION__, asoc, dst, NIP6(daddr->v6.sin6_addr));
if (!asoc) {
ipv6_get_saddr(dst, &daddr->v6.sin6_addr,&saddr->v6.sin6_addr);
- SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: "
- "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n",
NIP6(saddr->v6.sin6_addr));
return;
}
@@ -305,13 +302,11 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
if (baddr) {
memcpy(saddr, baddr, sizeof(union sctp_addr));
- SCTP_DEBUG_PRINTK("saddr: "
- "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ SCTP_DEBUG_PRINTK("saddr: " NIP6_FMT "\n",
NIP6(saddr->v6.sin6_addr));
} else {
printk(KERN_ERR "%s: asoc:%p Could not find a valid source "
- "address for the "
- "dest:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ "address for the dest:" NIP6_FMT "\n",
__FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr));
}
@@ -675,8 +670,7 @@ static int sctp_v6_is_ce(const struct sk_buff *skb)
/* Dump the v6 addr to the seq file. */
static void sctp_v6_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
{
- seq_printf(seq, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ",
- NIP6(addr->v6.sin6_addr));
+ seq_printf(seq, NIP6_FMT " ", NIP6(addr->v6.sin6_addr));
}
/* Initialize a PF_INET6 socket msg_name. */
@@ -905,7 +899,7 @@ static struct inet_protosw sctpv6_stream_protosw = {
.flags = SCTP_PROTOSW_FLAG,
};
-static int sctp6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
+static int sctp6_rcv(struct sk_buff **pskb)
{
return sctp_rcv(*pskb) ? -1 : 0;
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index f9573eba5c7..556c495c692 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1287,7 +1287,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
- (bodysize % SCTP_COOKIE_MULTIPLE);
*cookie_len = headersize + bodysize;
- retval = (sctp_cookie_param_t *)kmalloc(*cookie_len, GFP_ATOMIC);
+ retval = kmalloc(*cookie_len, GFP_ATOMIC);
if (!retval) {
*cookie_len = 0;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 2d7d8a5db2a..b8b38aba92b 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1250,8 +1250,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_TIMER_START:
timer = &asoc->timers[cmd->obj.to];
timeout = asoc->timeouts[cmd->obj.to];
- if (!timeout)
- BUG();
+ BUG_ON(!timeout);
timer->expires = jiffies + timeout;
sctp_association_hold(asoc);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 557a7d90b92..477d7f80dba 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1036,14 +1036,14 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
if (from_addr.sa.sa_family == AF_INET6) {
printk(KERN_WARNING
"%s association %p could not find address "
- "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ NIP6_FMT "\n",
__FUNCTION__,
asoc,
NIP6(from_addr.v6.sin6_addr));
} else {
printk(KERN_WARNING
"%s association %p could not find address "
- "%u.%u.%u.%u\n",
+ NIPQUAD_FMT "\n",
__FUNCTION__,
asoc,
NIPQUAD(from_addr.v4.sin_addr.s_addr));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fc04d185fa3..c98ee375ba5 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -63,6 +63,7 @@
#include <linux/wait.h>
#include <linux/time.h>
#include <linux/ip.h>
+#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
@@ -860,7 +861,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
return -EFAULT;
/* Alloc space for the address array in kernel memory. */
- kaddrs = (struct sockaddr *)kmalloc(addrs_size, GFP_KERNEL);
+ kaddrs = kmalloc(addrs_size, GFP_KERNEL);
if (unlikely(!kaddrs))
return -ENOMEM;
@@ -1150,7 +1151,7 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
return -EFAULT;
/* Alloc space for the address array in kernel memory. */
- kaddrs = (struct sockaddr *)kmalloc(addrs_size, GFP_KERNEL);
+ kaddrs = kmalloc(addrs_size, GFP_KERNEL);
if (unlikely(!kaddrs))
return -ENOMEM;
diff --git a/net/socket.c b/net/socket.c
index 06fa217f58a..b38a263853c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -993,7 +993,7 @@ static int sock_fasync(int fd, struct file *filp, int on)
if (on)
{
- fna=(struct fasync_struct *)kmalloc(sizeof(struct fasync_struct), GFP_KERNEL);
+ fna = kmalloc(sizeof(struct fasync_struct), GFP_KERNEL);
if(fna==NULL)
return -ENOMEM;
}
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 8c7756036e9..9ac1b8c26c0 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -94,7 +94,7 @@ rpcauth_init_credcache(struct rpc_auth *auth, unsigned long expire)
struct rpc_cred_cache *new;
int i;
- new = (struct rpc_cred_cache *)kmalloc(sizeof(*new), GFP_KERNEL);
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return -ENOMEM;
for (i = 0; i < RPC_CREDCACHE_NR; i++)
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 5f1f806a0b1..129e2bd36af 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -97,13 +97,17 @@ get_key(const void *p, const void *end, struct crypto_tfm **res)
alg_mode = CRYPTO_TFM_MODE_CBC;
break;
default:
- dprintk("RPC: get_key: unsupported algorithm %d\n", alg);
+ printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
goto out_err_free_key;
}
- if (!(*res = crypto_alloc_tfm(alg_name, alg_mode)))
+ if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) {
+ printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name);
goto out_err_free_key;
- if (crypto_cipher_setkey(*res, key.data, key.len))
+ }
+ if (crypto_cipher_setkey(*res, key.data, key.len)) {
+ printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name);
goto out_err_free_tfm;
+ }
kfree(key.data);
return p;
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 39b3edc1469..58400807d4d 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -111,14 +111,18 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
setkey = 0;
break;
default:
- dprintk("RPC: SPKM3 get_key: unsupported algorithm %d", *resalg);
+ dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg);
goto out_err_free_key;
}
- if (!(*res = crypto_alloc_tfm(alg_name, alg_mode)))
+ if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) {
+ printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name);
goto out_err_free_key;
+ }
if (setkey) {
- if (crypto_cipher_setkey(*res, key.data, key.len))
+ if (crypto_cipher_setkey(*res, key.data, key.len)) {
+ printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name);
goto out_err_free_tfm;
+ }
}
if(key.len > 0)
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index d1e12b25d6e..86fbf7c3e39 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -59,7 +59,7 @@ spkm3_make_token(struct spkm3_ctx *ctx,
char tokhdrbuf[25];
struct xdr_netobj md5cksum = {.len = 0, .data = NULL};
struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf};
- int tmsglen, tokenlen = 0;
+ int tokenlen = 0;
unsigned char *ptr;
s32 now;
int ctxelen = 0, ctxzbit = 0;
@@ -92,24 +92,23 @@ spkm3_make_token(struct spkm3_ctx *ctx,
}
if (toktype == SPKM_MIC_TOK) {
- tmsglen = 0;
/* Calculate checksum over the mic-header */
asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit);
spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data,
ctxelen, ctxzbit);
if (make_checksum(checksum_type, mic_hdr.data, mic_hdr.len,
- text, &md5cksum))
+ text, 0, &md5cksum))
goto out_err;
asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit);
- tokenlen = 10 + ctxelen + 1 + 2 + md5elen + 1;
+ tokenlen = 10 + ctxelen + 1 + md5elen + 1;
/* Create token header using generic routines */
- token->len = g_token_size(&ctx->mech_used, tokenlen + tmsglen);
+ token->len = g_token_size(&ctx->mech_used, tokenlen);
ptr = token->data;
- g_make_token_header(&ctx->mech_used, tokenlen + tmsglen, &ptr);
+ g_make_token_header(&ctx->mech_used, tokenlen, &ptr);
spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit);
} else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index 1f824578d77..af0d7ce7468 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -182,6 +182,7 @@ spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ct
* *tokp points to the beginning of the SPKM_MIC token described
* in rfc 2025, section 3.2.1:
*
+ * toklen is the inner token length
*/
void
spkm3_make_mic_token(unsigned char **tokp, int toklen, struct xdr_netobj *mic_hdr, struct xdr_netobj *md5cksum, int md5elen, int md5zbit)
@@ -189,7 +190,7 @@ spkm3_make_mic_token(unsigned char **tokp, int toklen, struct xdr_netobj *mic_hd
unsigned char *ict = *tokp;
*(u8 *)ict++ = 0xa4;
- *(u8 *)ict++ = toklen - 2;
+ *(u8 *)ict++ = toklen;
memcpy(ict, mic_hdr->data, mic_hdr->len);
ict += mic_hdr->len;
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
index 241d5b30dfc..96851b0ba1b 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
@@ -95,7 +95,7 @@ spkm3_read_token(struct spkm3_ctx *ctx,
ret = GSS_S_DEFECTIVE_TOKEN;
code = make_checksum(CKSUMTYPE_RSA_MD5, ptr + 2,
mic_hdrlen + 2,
- message_buffer, &md5cksum);
+ message_buffer, 0, &md5cksum);
if (code)
goto out;
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 890fb5ea0dc..1b3ed4fd198 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -70,7 +70,7 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
dprintk("RPC: allocating UNIX cred for uid %d gid %d\n",
acred->uid, acred->gid);
- if (!(cred = (struct unx_cred *) kmalloc(sizeof(*cred), GFP_KERNEL)))
+ if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL)))
return ERR_PTR(-ENOMEM);
atomic_set(&cred->uc_count, 1);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index f509e999276..dcaa0c4453f 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -575,12 +575,11 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
if (rp->q.list.next == &cd->queue) {
spin_unlock(&queue_lock);
up(&queue_io_sem);
- if (rp->offset)
- BUG();
+ BUG_ON(rp->offset);
return 0;
}
rq = container_of(rp->q.list.next, struct cache_request, q.list);
- if (rq->q.reader) BUG();
+ BUG_ON(rq->q.reader);
if (rp->offset == 0)
rq->readers++;
spin_unlock(&queue_lock);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 61c3abeacca..d2f0550c4ba 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -118,7 +118,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
goto out_err;
err = -ENOMEM;
- clnt = (struct rpc_clnt *) kmalloc(sizeof(*clnt), GFP_KERNEL);
+ clnt = kmalloc(sizeof(*clnt), GFP_KERNEL);
if (!clnt)
goto out_err;
memset(clnt, 0, sizeof(*clnt));
@@ -225,7 +225,7 @@ rpc_clone_client(struct rpc_clnt *clnt)
{
struct rpc_clnt *new;
- new = (struct rpc_clnt *)kmalloc(sizeof(*new), GFP_KERNEL);
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
if (!new)
goto out_no_clnt;
memcpy(new, clnt, sizeof(*new));
@@ -268,7 +268,8 @@ rpc_shutdown_client(struct rpc_clnt *clnt)
clnt->cl_oneshot = 0;
clnt->cl_dead = 0;
rpc_killall_tasks(clnt);
- sleep_on_timeout(&destroy_wait, 1*HZ);
+ wait_event_timeout(destroy_wait,
+ !atomic_read(&clnt->cl_users), 1*HZ);
}
if (atomic_read(&clnt->cl_users) < 0) {
@@ -374,19 +375,23 @@ out:
* Default callback for async RPC calls
*/
static void
-rpc_default_callback(struct rpc_task *task)
+rpc_default_callback(struct rpc_task *task, void *data)
{
}
+static const struct rpc_call_ops rpc_default_ops = {
+ .rpc_call_done = rpc_default_callback,
+};
+
/*
* Export the signal mask handling for synchronous code that
* sleeps on RPC calls
*/
-#define RPC_INTR_SIGNALS (sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGKILL))
+#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
static void rpc_save_sigmask(sigset_t *oldset, int intr)
{
- unsigned long sigallow = 0;
+ unsigned long sigallow = sigmask(SIGKILL);
sigset_t sigmask;
/* Block all signals except those listed in sigallow */
@@ -432,7 +437,7 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
BUG_ON(flags & RPC_TASK_ASYNC);
status = -ENOMEM;
- task = rpc_new_task(clnt, NULL, flags);
+ task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL);
if (task == NULL)
goto out;
@@ -442,14 +447,15 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
rpc_call_setup(task, msg, 0);
/* Set up the call info struct and execute the task */
- if (task->tk_status == 0) {
+ status = task->tk_status;
+ if (status == 0) {
+ atomic_inc(&task->tk_count);
status = rpc_execute(task);
- } else {
- status = task->tk_status;
- rpc_release_task(task);
+ if (status == 0)
+ status = task->tk_status;
}
-
rpc_restore_sigmask(&oldset);
+ rpc_release_task(task);
out:
return status;
}
@@ -459,7 +465,7 @@ out:
*/
int
rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
- rpc_action callback, void *data)
+ const struct rpc_call_ops *tk_ops, void *data)
{
struct rpc_task *task;
sigset_t oldset;
@@ -472,12 +478,9 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
flags |= RPC_TASK_ASYNC;
/* Create/initialize a new RPC task */
- if (!callback)
- callback = rpc_default_callback;
status = -ENOMEM;
- if (!(task = rpc_new_task(clnt, callback, flags)))
+ if (!(task = rpc_new_task(clnt, flags, tk_ops, data)))
goto out;
- task->tk_calldata = data;
/* Mask signals on GSS_AUTH upcalls */
rpc_task_sigmask(task, &oldset);
@@ -511,7 +514,7 @@ rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
if (task->tk_status == 0)
task->tk_action = call_start;
else
- task->tk_action = NULL;
+ task->tk_action = rpc_exit_task;
}
void
@@ -536,6 +539,18 @@ size_t rpc_max_payload(struct rpc_clnt *clnt)
}
EXPORT_SYMBOL(rpc_max_payload);
+/**
+ * rpc_force_rebind - force transport to check that remote port is unchanged
+ * @clnt: client to rebind
+ *
+ */
+void rpc_force_rebind(struct rpc_clnt *clnt)
+{
+ if (clnt->cl_autobind)
+ clnt->cl_port = 0;
+}
+EXPORT_SYMBOL(rpc_force_rebind);
+
/*
* Restart an (async) RPC call. Usually called from within the
* exit handler.
@@ -642,24 +657,26 @@ call_reserveresult(struct rpc_task *task)
/*
* 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
- * (Note: buffer memory is freed in rpc_task_release).
+ * (Note: buffer memory is freed in xprt_release).
*/
static void
call_allocate(struct rpc_task *task)
{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = task->tk_xprt;
unsigned int bufsiz;
dprintk("RPC: %4d call_allocate (status %d)\n",
task->tk_pid, task->tk_status);
task->tk_action = call_bind;
- if (task->tk_buffer)
+ if (req->rq_buffer)
return;
/* FIXME: compute buffer requirements more exactly using
* auth->au_wslack */
bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE;
- if (rpc_malloc(task, bufsiz << 1) != NULL)
+ if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL)
return;
printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
@@ -702,14 +719,14 @@ call_encode(struct rpc_task *task)
task->tk_pid, task->tk_status);
/* Default buffer setup */
- bufsiz = task->tk_bufsize >> 1;
- sndbuf->head[0].iov_base = (void *)task->tk_buffer;
+ bufsiz = req->rq_bufsize >> 1;
+ sndbuf->head[0].iov_base = (void *)req->rq_buffer;
sndbuf->head[0].iov_len = bufsiz;
sndbuf->tail[0].iov_len = 0;
sndbuf->page_len = 0;
sndbuf->len = 0;
sndbuf->buflen = bufsiz;
- rcvbuf->head[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz);
+ rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz);
rcvbuf->head[0].iov_len = bufsiz;
rcvbuf->tail[0].iov_len = 0;
rcvbuf->page_len = 0;
@@ -849,8 +866,7 @@ call_connect_status(struct rpc_task *task)
}
/* Something failed: remote service port may have changed */
- if (clnt->cl_autobind)
- clnt->cl_port = 0;
+ rpc_force_rebind(clnt);
switch (status) {
case -ENOTCONN:
@@ -892,7 +908,7 @@ call_transmit(struct rpc_task *task)
if (task->tk_status < 0)
return;
if (!task->tk_msg.rpc_proc->p_decode) {
- task->tk_action = NULL;
+ task->tk_action = rpc_exit_task;
rpc_wake_up_task(task);
}
return;
@@ -931,8 +947,7 @@ call_status(struct rpc_task *task)
break;
case -ECONNREFUSED:
case -ENOTCONN:
- if (clnt->cl_autobind)
- clnt->cl_port = 0;
+ rpc_force_rebind(clnt);
task->tk_action = call_bind;
break;
case -EAGAIN:
@@ -943,8 +958,7 @@ call_status(struct rpc_task *task)
rpc_exit(task, status);
break;
default:
- if (clnt->cl_chatty)
- printk("%s: RPC call returned error %d\n",
+ printk("%s: RPC call returned error %d\n",
clnt->cl_protname, -status);
rpc_exit(task, status);
break;
@@ -979,20 +993,18 @@ call_timeout(struct rpc_task *task)
dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid);
if (RPC_IS_SOFT(task)) {
- if (clnt->cl_chatty)
- printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
+ printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
clnt->cl_protname, clnt->cl_server);
rpc_exit(task, -EIO);
return;
}
- if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
+ if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
task->tk_flags |= RPC_CALL_MAJORSEEN;
printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
clnt->cl_protname, clnt->cl_server);
}
- if (clnt->cl_autobind)
- clnt->cl_port = 0;
+ rpc_force_rebind(clnt);
retry:
clnt->cl_stats->rpcretrans++;
@@ -1014,7 +1026,7 @@ call_decode(struct rpc_task *task)
dprintk("RPC: %4d call_decode (status %d)\n",
task->tk_pid, task->tk_status);
- if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) {
+ if (task->tk_flags & RPC_CALL_MAJORSEEN) {
printk(KERN_NOTICE "%s: server %s OK\n",
clnt->cl_protname, clnt->cl_server);
task->tk_flags &= ~RPC_CALL_MAJORSEEN;
@@ -1039,13 +1051,14 @@ call_decode(struct rpc_task *task)
sizeof(req->rq_rcv_buf)) != 0);
/* Verify the RPC header */
- if (!(p = call_verify(task))) {
- if (task->tk_action == NULL)
- return;
- goto out_retry;
+ p = call_verify(task);
+ if (IS_ERR(p)) {
+ if (p == ERR_PTR(-EAGAIN))
+ goto out_retry;
+ return;
}
- task->tk_action = NULL;
+ task->tk_action = rpc_exit_task;
if (decode)
task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
@@ -1138,7 +1151,7 @@ call_verify(struct rpc_task *task)
if ((n = ntohl(*p++)) != RPC_REPLY) {
printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n);
- goto out_retry;
+ goto out_garbage;
}
if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
if (--len < 0)
@@ -1168,7 +1181,7 @@ call_verify(struct rpc_task *task)
task->tk_pid);
rpcauth_invalcred(task);
task->tk_action = call_refresh;
- return NULL;
+ goto out_retry;
case RPC_AUTH_BADCRED:
case RPC_AUTH_BADVERF:
/* possibly garbled cred/verf? */
@@ -1178,7 +1191,7 @@ call_verify(struct rpc_task *task)
dprintk("RPC: %4d call_verify: retry garbled creds\n",
task->tk_pid);
task->tk_action = call_bind;
- return NULL;
+ goto out_retry;
case RPC_AUTH_TOOWEAK:
printk(KERN_NOTICE "call_verify: server requires stronger "
"authentication.\n");
@@ -1193,7 +1206,7 @@ call_verify(struct rpc_task *task)
}
if (!(p = rpcauth_checkverf(task, p))) {
printk(KERN_WARNING "call_verify: auth check failed\n");
- goto out_retry; /* bad verifier, retry */
+ goto out_garbage; /* bad verifier, retry */
}
len = p - (u32 *)iov->iov_base - 1;
if (len < 0)
@@ -1230,23 +1243,24 @@ call_verify(struct rpc_task *task)
/* Also retry */
}
-out_retry:
+out_garbage:
task->tk_client->cl_stats->rpcgarbage++;
if (task->tk_garb_retry) {
task->tk_garb_retry--;
dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid);
task->tk_action = call_bind;
- return NULL;
+out_retry:
+ return ERR_PTR(-EAGAIN);
}
printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__);
out_eio:
error = -EIO;
out_err:
rpc_exit(task, error);
- return NULL;
+ return ERR_PTR(error);
out_overflow:
printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__);
- goto out_retry;
+ goto out_garbage;
}
static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj)
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index a398575f94b..8139ce68e91 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -90,8 +90,7 @@ bailout:
map->pm_binding = 0;
rpc_wake_up(&map->pm_bindwait);
spin_unlock(&pmap_lock);
- task->tk_status = -EIO;
- task->tk_action = NULL;
+ rpc_exit(task, -EIO);
}
#ifdef CONFIG_ROOT_NFS
@@ -132,21 +131,22 @@ static void
pmap_getport_done(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
+ struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_portmap *map = clnt->cl_pmap;
dprintk("RPC: %4d pmap_getport_done(status %d, port %d)\n",
task->tk_pid, task->tk_status, clnt->cl_port);
+
+ xprt->ops->set_port(xprt, 0);
if (task->tk_status < 0) {
/* Make the calling task exit with an error */
- task->tk_action = NULL;
+ task->tk_action = rpc_exit_task;
} else if (clnt->cl_port == 0) {
/* Program not registered */
- task->tk_status = -EACCES;
- task->tk_action = NULL;
+ rpc_exit(task, -EACCES);
} else {
- /* byte-swap port number first */
+ xprt->ops->set_port(xprt, clnt->cl_port);
clnt->cl_port = htons(clnt->cl_port);
- clnt->cl_xprt->addr.sin_port = clnt->cl_port;
}
spin_lock(&pmap_lock);
map->pm_binding = 0;
@@ -207,7 +207,7 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileg
xprt = xprt_create_proto(proto, srvaddr, NULL);
if (IS_ERR(xprt))
return (struct rpc_clnt *)xprt;
- xprt->addr.sin_port = htons(RPC_PMAP_PORT);
+ xprt->ops->set_port(xprt, RPC_PMAP_PORT);
if (!privileged)
xprt->resvport = 0;
@@ -217,7 +217,6 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileg
RPC_AUTH_UNIX);
if (!IS_ERR(clnt)) {
clnt->cl_softrtry = 1;
- clnt->cl_chatty = 1;
clnt->cl_oneshot = 1;
}
return clnt;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 16a2458f38f..9764c80ab0b 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -69,10 +69,13 @@ rpc_timeout_upcall_queue(void *data)
struct rpc_inode *rpci = (struct rpc_inode *)data;
struct inode *inode = &rpci->vfs_inode;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
+ if (rpci->ops == NULL)
+ goto out;
if (rpci->nreaders == 0 && !list_empty(&rpci->pipe))
__rpc_purge_upcall(inode, -ETIMEDOUT);
- up(&inode->i_sem);
+out:
+ mutex_unlock(&inode->i_mutex);
}
int
@@ -81,7 +84,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
struct rpc_inode *rpci = RPC_I(inode);
int res = -EPIPE;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (rpci->ops == NULL)
goto out;
if (rpci->nreaders) {
@@ -97,7 +100,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
res = 0;
}
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
wake_up(&rpci->waitq);
return res;
}
@@ -113,9 +116,7 @@ rpc_close_pipes(struct inode *inode)
{
struct rpc_inode *rpci = RPC_I(inode);
- cancel_delayed_work(&rpci->queue_timeout);
- flush_scheduled_work();
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (rpci->ops != NULL) {
rpci->nreaders = 0;
__rpc_purge_list(rpci, &rpci->in_upcall, -EPIPE);
@@ -126,7 +127,9 @@ rpc_close_pipes(struct inode *inode)
rpci->ops = NULL;
}
rpc_inode_setowner(inode, NULL);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
+ cancel_delayed_work(&rpci->queue_timeout);
+ flush_scheduled_work();
}
static struct inode *
@@ -151,7 +154,7 @@ rpc_pipe_open(struct inode *inode, struct file *filp)
struct rpc_inode *rpci = RPC_I(inode);
int res = -ENXIO;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (rpci->ops != NULL) {
if (filp->f_mode & FMODE_READ)
rpci->nreaders ++;
@@ -159,17 +162,17 @@ rpc_pipe_open(struct inode *inode, struct file *filp)
rpci->nwriters ++;
res = 0;
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return res;
}
static int
rpc_pipe_release(struct inode *inode, struct file *filp)
{
- struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ struct rpc_inode *rpci = RPC_I(inode);
struct rpc_pipe_msg *msg;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (rpci->ops == NULL)
goto out;
msg = (struct rpc_pipe_msg *)filp->private_data;
@@ -187,7 +190,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
if (rpci->ops->release_pipe)
rpci->ops->release_pipe(inode);
out:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return 0;
}
@@ -199,7 +202,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
struct rpc_pipe_msg *msg;
int res = 0;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
if (rpci->ops == NULL) {
res = -EPIPE;
goto out_unlock;
@@ -226,7 +229,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
rpci->ops->destroy_msg(msg);
}
out_unlock:
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return res;
}
@@ -237,11 +240,11 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of
struct rpc_inode *rpci = RPC_I(inode);
int res;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
res = -EPIPE;
if (rpci->ops != NULL)
res = rpci->ops->downcall(filp, buf, len);
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
return res;
}
@@ -319,7 +322,7 @@ rpc_info_open(struct inode *inode, struct file *file)
if (!ret) {
struct seq_file *m = file->private_data;
- down(&inode->i_sem);
+ mutex_lock(&inode->i_mutex);
clnt = RPC_I(inode)->private;
if (clnt) {
atomic_inc(&clnt->cl_users);
@@ -328,7 +331,7 @@ rpc_info_open(struct inode *inode, struct file *file)
single_release(inode, file);
ret = -EINVAL;
}
- up(&inode->i_sem);
+ mutex_unlock(&inode->i_mutex);
}
return ret;
}
@@ -488,11 +491,11 @@ rpc_depopulate(struct dentry *parent)
struct dentry *dentry, *dvec[10];
int n = 0;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
repeat:
spin_lock(&dcache_lock);
list_for_each_safe(pos, next, &parent->d_subdirs) {
- dentry = list_entry(pos, struct dentry, d_child);
+ dentry = list_entry(pos, struct dentry, d_u.d_child);
spin_lock(&dentry->d_lock);
if (!d_unhashed(dentry)) {
dget_locked(dentry);
@@ -516,7 +519,7 @@ repeat:
} while (n);
goto repeat;
}
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
}
static int
@@ -529,7 +532,7 @@ rpc_populate(struct dentry *parent,
struct dentry *dentry;
int mode, i;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
for (i = start; i < eof; i++) {
dentry = d_alloc_name(parent, files[i].name);
if (!dentry)
@@ -549,10 +552,10 @@ rpc_populate(struct dentry *parent,
dir->i_nlink++;
d_add(dentry, inode);
}
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
return 0;
out_bad:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
__FILE__, __FUNCTION__, parent->d_name.name);
return -ENOMEM;
@@ -606,7 +609,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
if ((error = rpc_lookup_parent(path, nd)) != 0)
return ERR_PTR(error);
dir = nd->dentry->d_inode;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
dentry = lookup_hash(nd);
if (IS_ERR(dentry))
goto out_err;
@@ -617,7 +620,7 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
}
return dentry;
out_err:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
rpc_release_path(nd);
return dentry;
}
@@ -643,7 +646,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
if (error)
goto err_depopulate;
out:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd);
return dentry;
err_depopulate:
@@ -668,7 +671,7 @@ rpc_rmdir(char *path)
if ((error = rpc_lookup_parent(path, &nd)) != 0)
return error;
dir = nd.dentry->d_inode;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
dentry = lookup_hash(&nd);
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
@@ -678,7 +681,7 @@ rpc_rmdir(char *path)
error = __rpc_rmdir(dir, dentry);
dput(dentry);
out_release:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd);
return error;
}
@@ -707,7 +710,7 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags)
rpci->ops = ops;
inode_dir_notify(dir, DN_CREATE);
out:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd);
return dentry;
err_dput:
@@ -729,7 +732,7 @@ rpc_unlink(char *path)
if ((error = rpc_lookup_parent(path, &nd)) != 0)
return error;
dir = nd.dentry->d_inode;
- down(&dir->i_sem);
+ mutex_lock(&dir->i_mutex);
dentry = lookup_hash(&nd);
if (IS_ERR(dentry)) {
error = PTR_ERR(dentry);
@@ -743,7 +746,7 @@ rpc_unlink(char *path)
dput(dentry);
inode_dir_notify(dir, DN_DELETE);
out_release:
- up(&dir->i_sem);
+ mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd);
return error;
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 54e60a65750..7415406aa1a 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -41,8 +41,6 @@ static mempool_t *rpc_buffer_mempool __read_mostly;
static void __rpc_default_timer(struct rpc_task *task);
static void rpciod_killall(void);
-static void rpc_free(struct rpc_task *task);
-
static void rpc_async_schedule(void *);
/*
@@ -264,6 +262,35 @@ void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
}
EXPORT_SYMBOL(rpc_init_wait_queue);
+static int rpc_wait_bit_interruptible(void *word)
+{
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+ schedule();
+ return 0;
+}
+
+/*
+ * Mark an RPC call as having completed by clearing the 'active' bit
+ */
+static inline void rpc_mark_complete_task(struct rpc_task *task)
+{
+ rpc_clear_active(task);
+ wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
+}
+
+/*
+ * Allow callers to wait for completion of an RPC call
+ */
+int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
+{
+ if (action == NULL)
+ action = rpc_wait_bit_interruptible;
+ return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
+ action, TASK_INTERRUPTIBLE);
+}
+EXPORT_SYMBOL(__rpc_wait_for_completion_task);
+
/*
* Make an RPC task runnable.
*
@@ -299,10 +326,7 @@ static void rpc_make_runnable(struct rpc_task *task)
static inline void
rpc_schedule_run(struct rpc_task *task)
{
- /* Don't run a child twice! */
- if (RPC_IS_ACTIVATED(task))
- return;
- task->tk_active = 1;
+ rpc_set_active(task);
rpc_make_runnable(task);
}
@@ -324,8 +348,7 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
}
/* Mark the task as being activated if so needed */
- if (!RPC_IS_ACTIVATED(task))
- task->tk_active = 1;
+ rpc_set_active(task);
__rpc_add_wait_queue(q, task);
@@ -555,36 +578,29 @@ __rpc_atrun(struct rpc_task *task)
}
/*
- * Helper that calls task->tk_exit if it exists and then returns
- * true if we should exit __rpc_execute.
+ * Helper to call task->tk_ops->rpc_call_prepare
*/
-static inline int __rpc_do_exit(struct rpc_task *task)
+static void rpc_prepare_task(struct rpc_task *task)
{
- if (task->tk_exit != NULL) {
- lock_kernel();
- task->tk_exit(task);
- unlock_kernel();
- /* If tk_action is non-null, we should restart the call */
- if (task->tk_action != NULL) {
- if (!RPC_ASSASSINATED(task)) {
- /* Release RPC slot and buffer memory */
- xprt_release(task);
- rpc_free(task);
- return 0;
- }
- printk(KERN_ERR "RPC: dead task tried to walk away.\n");
- }
- }
- return 1;
+ task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
-static int rpc_wait_bit_interruptible(void *word)
+/*
+ * Helper that calls task->tk_ops->rpc_call_done if it exists
+ */
+void rpc_exit_task(struct rpc_task *task)
{
- if (signal_pending(current))
- return -ERESTARTSYS;
- schedule();
- return 0;
+ task->tk_action = NULL;
+ if (task->tk_ops->rpc_call_done != NULL) {
+ task->tk_ops->rpc_call_done(task, task->tk_calldata);
+ if (task->tk_action != NULL) {
+ WARN_ON(RPC_ASSASSINATED(task));
+ /* Always release the RPC slot and buffer memory */
+ xprt_release(task);
+ }
+ }
}
+EXPORT_SYMBOL(rpc_exit_task);
/*
* This is the RPC `scheduler' (or rather, the finite state machine).
@@ -631,12 +647,11 @@ static int __rpc_execute(struct rpc_task *task)
* by someone else.
*/
if (!RPC_IS_QUEUED(task)) {
- if (task->tk_action != NULL) {
- lock_kernel();
- task->tk_action(task);
- unlock_kernel();
- } else if (__rpc_do_exit(task))
+ if (task->tk_action == NULL)
break;
+ lock_kernel();
+ task->tk_action(task);
+ unlock_kernel();
}
/*
@@ -676,9 +691,9 @@ static int __rpc_execute(struct rpc_task *task)
dprintk("RPC: %4d sync task resuming\n", task->tk_pid);
}
- dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status);
- status = task->tk_status;
-
+ dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
+ /* Wake up anyone who is waiting for task completion */
+ rpc_mark_complete_task(task);
/* Release all resources associated with the task */
rpc_release_task(task);
return status;
@@ -696,9 +711,7 @@ static int __rpc_execute(struct rpc_task *task)
int
rpc_execute(struct rpc_task *task)
{
- BUG_ON(task->tk_active);
-
- task->tk_active = 1;
+ rpc_set_active(task);
rpc_set_running(task);
return __rpc_execute(task);
}
@@ -708,17 +721,19 @@ static void rpc_async_schedule(void *arg)
__rpc_execute((struct rpc_task *)arg);
}
-/*
- * Allocate memory for RPC purposes.
+/**
+ * rpc_malloc - allocate an RPC buffer
+ * @task: RPC task that will use this buffer
+ * @size: requested byte size
*
* We try to ensure that some NFS reads and writes can always proceed
* by using a mempool when allocating 'small' buffers.
* In order to avoid memory starvation triggering more writebacks of
* NFS requests, we use GFP_NOFS rather than GFP_KERNEL.
*/
-void *
-rpc_malloc(struct rpc_task *task, size_t size)
+void * rpc_malloc(struct rpc_task *task, size_t size)
{
+ struct rpc_rqst *req = task->tk_rqstp;
gfp_t gfp;
if (task->tk_flags & RPC_TASK_SWAPPER)
@@ -727,42 +742,52 @@ rpc_malloc(struct rpc_task *task, size_t size)
gfp = GFP_NOFS;
if (size > RPC_BUFFER_MAXSIZE) {
- task->tk_buffer = kmalloc(size, gfp);
- if (task->tk_buffer)
- task->tk_bufsize = size;
+ req->rq_buffer = kmalloc(size, gfp);
+ if (req->rq_buffer)
+ req->rq_bufsize = size;
} else {
- task->tk_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
- if (task->tk_buffer)
- task->tk_bufsize = RPC_BUFFER_MAXSIZE;
+ req->rq_buffer = mempool_alloc(rpc_buffer_mempool, gfp);
+ if (req->rq_buffer)
+ req->rq_bufsize = RPC_BUFFER_MAXSIZE;
}
- return task->tk_buffer;
+ return req->rq_buffer;
}
-static void
-rpc_free(struct rpc_task *task)
+/**
+ * rpc_free - free buffer allocated via rpc_malloc
+ * @task: RPC task with a buffer to be freed
+ *
+ */
+void rpc_free(struct rpc_task *task)
{
- if (task->tk_buffer) {
- if (task->tk_bufsize == RPC_BUFFER_MAXSIZE)
- mempool_free(task->tk_buffer, rpc_buffer_mempool);
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ if (req->rq_buffer) {
+ if (req->rq_bufsize == RPC_BUFFER_MAXSIZE)
+ mempool_free(req->rq_buffer, rpc_buffer_mempool);
else
- kfree(task->tk_buffer);
- task->tk_buffer = NULL;
- task->tk_bufsize = 0;
+ kfree(req->rq_buffer);
+ req->rq_buffer = NULL;
+ req->rq_bufsize = 0;
}
}
/*
* Creation and deletion of RPC task structures
*/
-void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action callback, int flags)
+void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
{
memset(task, 0, sizeof(*task));
init_timer(&task->tk_timer);
task->tk_timer.data = (unsigned long) task;
task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer;
+ atomic_set(&task->tk_count, 1);
task->tk_client = clnt;
task->tk_flags = flags;
- task->tk_exit = callback;
+ task->tk_ops = tk_ops;
+ if (tk_ops->rpc_call_prepare != NULL)
+ task->tk_action = rpc_prepare_task;
+ task->tk_calldata = calldata;
/* Initialize retry counters */
task->tk_garb_retry = 2;
@@ -791,6 +816,8 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action call
list_add_tail(&task->tk_task, &all_tasks);
spin_unlock(&rpc_sched_lock);
+ BUG_ON(task->tk_ops == NULL);
+
dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
current->pid);
}
@@ -801,8 +828,7 @@ rpc_alloc_task(void)
return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
}
-static void
-rpc_default_free_task(struct rpc_task *task)
+static void rpc_free_task(struct rpc_task *task)
{
dprintk("RPC: %4d freeing task\n", task->tk_pid);
mempool_free(task, rpc_task_mempool);
@@ -813,8 +839,7 @@ rpc_default_free_task(struct rpc_task *task)
* clean up after an allocation failure, as the client may
* have specified "oneshot".
*/
-struct rpc_task *
-rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags)
+struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata)
{
struct rpc_task *task;
@@ -822,10 +847,7 @@ rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags)
if (!task)
goto cleanup;
- rpc_init_task(task, clnt, callback, flags);
-
- /* Replace tk_release */
- task->tk_release = rpc_default_free_task;
+ rpc_init_task(task, clnt, flags, tk_ops, calldata);
dprintk("RPC: %4d allocated task\n", task->tk_pid);
task->tk_flags |= RPC_TASK_DYNAMIC;
@@ -845,11 +867,15 @@ cleanup:
void rpc_release_task(struct rpc_task *task)
{
- dprintk("RPC: %4d release task\n", task->tk_pid);
+ const struct rpc_call_ops *tk_ops = task->tk_ops;
+ void *calldata = task->tk_calldata;
#ifdef RPC_DEBUG
BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
#endif
+ if (!atomic_dec_and_test(&task->tk_count))
+ return;
+ dprintk("RPC: %4d release task\n", task->tk_pid);
/* Remove from global task list */
spin_lock(&rpc_sched_lock);
@@ -857,7 +883,6 @@ void rpc_release_task(struct rpc_task *task)
spin_unlock(&rpc_sched_lock);
BUG_ON (RPC_IS_QUEUED(task));
- task->tk_active = 0;
/* Synchronously delete any running timer */
rpc_delete_timer(task);
@@ -867,7 +892,6 @@ void rpc_release_task(struct rpc_task *task)
xprt_release(task);
if (task->tk_msg.rpc_cred)
rpcauth_unbindcred(task);
- rpc_free(task);
if (task->tk_client) {
rpc_release_client(task->tk_client);
task->tk_client = NULL;
@@ -876,11 +900,34 @@ void rpc_release_task(struct rpc_task *task)
#ifdef RPC_DEBUG
task->tk_magic = 0;
#endif
- if (task->tk_release)
- task->tk_release(task);
+ if (task->tk_flags & RPC_TASK_DYNAMIC)
+ rpc_free_task(task);
+ if (tk_ops->rpc_release)
+ tk_ops->rpc_release(calldata);
}
/**
+ * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it
+ * @clnt - pointer to RPC client
+ * @flags - RPC flags
+ * @ops - RPC call ops
+ * @data - user call data
+ */
+struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
+ const struct rpc_call_ops *ops,
+ void *data)
+{
+ struct rpc_task *task;
+ task = rpc_new_task(clnt, flags, ops, data);
+ if (task == NULL)
+ return ERR_PTR(-ENOMEM);
+ atomic_inc(&task->tk_count);
+ rpc_execute(task);
+ return task;
+}
+EXPORT_SYMBOL(rpc_run_task);
+
+/**
* rpc_find_parent - find the parent of a child task.
* @child: child task
*
@@ -890,12 +937,11 @@ void rpc_release_task(struct rpc_task *task)
*
* Caller must hold childq.lock
*/
-static inline struct rpc_task *rpc_find_parent(struct rpc_task *child)
+static inline struct rpc_task *rpc_find_parent(struct rpc_task *child, struct rpc_task *parent)
{
- struct rpc_task *task, *parent;
+ struct rpc_task *task;
struct list_head *le;
- parent = (struct rpc_task *) child->tk_calldata;
task_for_each(task, le, &childq.tasks[0])
if (task == parent)
return parent;
@@ -903,18 +949,22 @@ static inline struct rpc_task *rpc_find_parent(struct rpc_task *child)
return NULL;
}
-static void rpc_child_exit(struct rpc_task *child)
+static void rpc_child_exit(struct rpc_task *child, void *calldata)
{
struct rpc_task *parent;
spin_lock_bh(&childq.lock);
- if ((parent = rpc_find_parent(child)) != NULL) {
+ if ((parent = rpc_find_parent(child, calldata)) != NULL) {
parent->tk_status = child->tk_status;
__rpc_wake_up_task(parent);
}
spin_unlock_bh(&childq.lock);
}
+static const struct rpc_call_ops rpc_child_ops = {
+ .rpc_call_done = rpc_child_exit,
+};
+
/*
* Note: rpc_new_task releases the client after a failure.
*/
@@ -923,11 +973,9 @@ rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent)
{
struct rpc_task *task;
- task = rpc_new_task(clnt, NULL, RPC_TASK_ASYNC | RPC_TASK_CHILD);
+ task = rpc_new_task(clnt, RPC_TASK_ASYNC | RPC_TASK_CHILD, &rpc_child_ops, parent);
if (!task)
goto fail;
- task->tk_exit = rpc_child_exit;
- task->tk_calldata = parent;
return task;
fail:
@@ -1063,7 +1111,7 @@ void rpc_show_tasks(void)
return;
}
printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
- "-rpcwait -action- --exit--\n");
+ "-rpcwait -action- ---ops--\n");
alltask_for_each(t, le, &all_tasks) {
const char *rpc_waitq = "none";
@@ -1078,7 +1126,7 @@ void rpc_show_tasks(void)
(t->tk_client ? t->tk_client->cl_prog : 0),
t->tk_rqstp, t->tk_timeout,
rpc_waitq,
- t->tk_action, t->tk_exit);
+ t->tk_action, t->tk_ops);
}
spin_unlock(&rpc_sched_lock);
}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index a03d4b600c9..9f737320359 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -30,8 +30,6 @@ EXPORT_SYMBOL(rpc_init_task);
EXPORT_SYMBOL(rpc_sleep_on);
EXPORT_SYMBOL(rpc_wake_up_next);
EXPORT_SYMBOL(rpc_wake_up_task);
-EXPORT_SYMBOL(rpc_new_child);
-EXPORT_SYMBOL(rpc_run_child);
EXPORT_SYMBOL(rpciod_down);
EXPORT_SYMBOL(rpciod_up);
EXPORT_SYMBOL(rpc_new_task);
@@ -45,7 +43,6 @@ EXPORT_SYMBOL(rpc_clone_client);
EXPORT_SYMBOL(rpc_bind_new_program);
EXPORT_SYMBOL(rpc_destroy_client);
EXPORT_SYMBOL(rpc_shutdown_client);
-EXPORT_SYMBOL(rpc_release_client);
EXPORT_SYMBOL(rpc_killall_tasks);
EXPORT_SYMBOL(rpc_call_sync);
EXPORT_SYMBOL(rpc_call_async);
@@ -120,7 +117,6 @@ EXPORT_SYMBOL(unix_domain_find);
/* Generic XDR */
EXPORT_SYMBOL(xdr_encode_string);
-EXPORT_SYMBOL(xdr_decode_string);
EXPORT_SYMBOL(xdr_decode_string_inplace);
EXPORT_SYMBOL(xdr_decode_netobj);
EXPORT_SYMBOL(xdr_encode_netobj);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index e4296c8b861..b08419e1fc6 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -32,7 +32,7 @@ svc_create(struct svc_program *prog, unsigned int bufsize)
int vers;
unsigned int xdrsize;
- if (!(serv = (struct svc_serv *) kmalloc(sizeof(*serv), GFP_KERNEL)))
+ if (!(serv = kmalloc(sizeof(*serv), GFP_KERNEL)))
return NULL;
memset(serv, 0, sizeof(*serv));
serv->sv_name = prog->pg_name;
@@ -122,8 +122,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
rqstp->rq_argused = 0;
rqstp->rq_resused = 0;
arghi = 0;
- if (pages > RPCSVC_MAXPAGES)
- BUG();
+ BUG_ON(pages > RPCSVC_MAXPAGES);
while (pages) {
struct page *p = alloc_page(GFP_KERNEL);
if (!p)
@@ -167,8 +166,8 @@ svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
memset(rqstp, 0, sizeof(*rqstp));
init_waitqueue_head(&rqstp->rq_wait);
- if (!(rqstp->rq_argp = (u32 *) kmalloc(serv->sv_xdrsize, GFP_KERNEL))
- || !(rqstp->rq_resp = (u32 *) kmalloc(serv->sv_xdrsize, GFP_KERNEL))
+ if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
+ || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
|| !svc_init_buffer(rqstp, serv->sv_bufsz))
goto out_thread;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index cac2e774dd8..3e6c694bbad 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -101,10 +101,22 @@ static void ip_map_put(struct cache_head *item, struct cache_detail *cd)
}
}
+#if IP_HASHBITS == 8
+/* hash_long on a 64 bit machine is currently REALLY BAD for
+ * IP addresses in reverse-endian (i.e. on a little-endian machine).
+ * So use a trivial but reliable hash instead
+ */
+static inline int hash_ip(unsigned long ip)
+{
+ int hash = ip ^ (ip>>16);
+ return (hash ^ (hash>>8)) & 0xff;
+}
+#endif
+
static inline int ip_map_hash(struct ip_map *item)
{
return hash_str(item->m_class, IP_HASHBITS) ^
- hash_long((unsigned long)item->m_addr.s_addr, IP_HASHBITS);
+ hash_ip((unsigned long)item->m_addr.s_addr);
}
static inline int ip_map_match(struct ip_map *item, struct ip_map *tmp)
{
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index d68eba48129..e67613e4eb1 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1026,7 +1026,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
} else {
printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
svsk->sk_server->sv_name, -len);
- svc_sock_received(svsk);
+ goto err_delete;
}
return len;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index aaf08cdd19f..ca4bfa57e11 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -93,27 +93,6 @@ xdr_encode_string(u32 *p, const char *string)
}
u32 *
-xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen)
-{
- unsigned int len;
- char *string;
-
- if ((len = ntohl(*p++)) > maxlen)
- return NULL;
- if (lenp)
- *lenp = len;
- if ((len % 4) != 0) {
- string = (char *) p;
- } else {
- string = (char *) (p - 1);
- memmove(string, p, len);
- }
- string[len] = '\0';
- *sp = string;
- return p + XDR_QUADLEN(len);
-}
-
-u32 *
xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen)
{
unsigned int len;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 6dda3860351..8ff2c8acb22 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -119,6 +119,17 @@ out_sleep:
return 0;
}
+static void xprt_clear_locked(struct rpc_xprt *xprt)
+{
+ xprt->snd_task = NULL;
+ if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
+ smp_mb__before_clear_bit();
+ clear_bit(XPRT_LOCKED, &xprt->state);
+ smp_mb__after_clear_bit();
+ } else
+ schedule_work(&xprt->task_cleanup);
+}
+
/*
* xprt_reserve_xprt_cong - serialize write access to transports
* @task: task that is requesting access to the transport
@@ -145,9 +156,7 @@ int xprt_reserve_xprt_cong(struct rpc_task *task)
}
return 1;
}
- smp_mb__before_clear_bit();
- clear_bit(XPRT_LOCKED, &xprt->state);
- smp_mb__after_clear_bit();
+ xprt_clear_locked(xprt);
out_sleep:
dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt);
task->tk_timeout = 0;
@@ -193,9 +202,7 @@ static void __xprt_lock_write_next(struct rpc_xprt *xprt)
return;
out_unlock:
- smp_mb__before_clear_bit();
- clear_bit(XPRT_LOCKED, &xprt->state);
- smp_mb__after_clear_bit();
+ xprt_clear_locked(xprt);
}
static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
@@ -222,9 +229,7 @@ static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
return;
}
out_unlock:
- smp_mb__before_clear_bit();
- clear_bit(XPRT_LOCKED, &xprt->state);
- smp_mb__after_clear_bit();
+ xprt_clear_locked(xprt);
}
/**
@@ -237,10 +242,7 @@ out_unlock:
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
{
if (xprt->snd_task == task) {
- xprt->snd_task = NULL;
- smp_mb__before_clear_bit();
- clear_bit(XPRT_LOCKED, &xprt->state);
- smp_mb__after_clear_bit();
+ xprt_clear_locked(xprt);
__xprt_lock_write_next(xprt);
}
}
@@ -256,10 +258,7 @@ void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
if (xprt->snd_task == task) {
- xprt->snd_task = NULL;
- smp_mb__before_clear_bit();
- clear_bit(XPRT_LOCKED, &xprt->state);
- smp_mb__after_clear_bit();
+ xprt_clear_locked(xprt);
__xprt_lock_write_next_cong(xprt);
}
}
@@ -535,10 +534,6 @@ void xprt_connect(struct rpc_task *task)
dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid,
xprt, (xprt_connected(xprt) ? "is" : "is not"));
- if (xprt->shutdown) {
- task->tk_status = -EIO;
- return;
- }
if (!xprt->addr.sin_port) {
task->tk_status = -EIO;
return;
@@ -687,9 +682,6 @@ int xprt_prepare_transmit(struct rpc_task *task)
dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid);
- if (xprt->shutdown)
- return -EIO;
-
spin_lock_bh(&xprt->transport_lock);
if (req->rq_received && !req->rq_bytes_sent) {
err = req->rq_received;
@@ -814,11 +806,9 @@ void xprt_reserve(struct rpc_task *task)
struct rpc_xprt *xprt = task->tk_xprt;
task->tk_status = -EIO;
- if (!xprt->shutdown) {
- spin_lock(&xprt->reserve_lock);
- do_xprt_reserve(task);
- spin_unlock(&xprt->reserve_lock);
- }
+ spin_lock(&xprt->reserve_lock);
+ do_xprt_reserve(task);
+ spin_unlock(&xprt->reserve_lock);
}
static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt)
@@ -838,6 +828,8 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
req->rq_timeout = xprt->timeout.to_initval;
req->rq_task = task;
req->rq_xprt = xprt;
+ req->rq_buffer = NULL;
+ req->rq_bufsize = 0;
req->rq_xid = xprt_alloc_xid(xprt);
req->rq_release_snd_buf = NULL;
dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
@@ -863,10 +855,11 @@ void xprt_release(struct rpc_task *task)
if (!list_empty(&req->rq_list))
list_del(&req->rq_list);
xprt->last_used = jiffies;
- if (list_empty(&xprt->recv) && !xprt->shutdown)
+ if (list_empty(&xprt->recv))
mod_timer(&xprt->timer,
xprt->last_used + xprt->idle_timeout);
spin_unlock_bh(&xprt->transport_lock);
+ xprt->ops->buf_free(task);
task->tk_rqstp = NULL;
if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req);
@@ -974,16 +967,6 @@ struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rp
return xprt;
}
-static void xprt_shutdown(struct rpc_xprt *xprt)
-{
- xprt->shutdown = 1;
- rpc_wake_up(&xprt->sending);
- rpc_wake_up(&xprt->resend);
- xprt_wake_pending_tasks(xprt, -EIO);
- rpc_wake_up(&xprt->backlog);
- del_timer_sync(&xprt->timer);
-}
-
/**
* xprt_destroy - destroy an RPC transport, killing off all requests.
* @xprt: transport to destroy
@@ -992,7 +975,8 @@ static void xprt_shutdown(struct rpc_xprt *xprt)
int xprt_destroy(struct rpc_xprt *xprt)
{
dprintk("RPC: destroying transport %p\n", xprt);
- xprt_shutdown(xprt);
+ xprt->shutdown = 1;
+ del_timer_sync(&xprt->timer);
xprt->ops->destroy(xprt);
kfree(xprt);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 77e8800d412..c458f8d1d6d 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -28,6 +28,7 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/sched.h>
#include <linux/file.h>
#include <net/sock.h>
@@ -424,7 +425,7 @@ static void xs_close(struct rpc_xprt *xprt)
struct sock *sk = xprt->inet;
if (!sk)
- return;
+ goto clear_close_wait;
dprintk("RPC: xs_close xprt %p\n", xprt);
@@ -441,6 +442,10 @@ static void xs_close(struct rpc_xprt *xprt)
sk->sk_no_check = 0;
sock_release(sock);
+clear_close_wait:
+ smp_mb__before_clear_bit();
+ clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ smp_mb__after_clear_bit();
}
/**
@@ -800,9 +805,13 @@ static void xs_tcp_state_change(struct sock *sk)
case TCP_SYN_SENT:
case TCP_SYN_RECV:
break;
+ case TCP_CLOSE_WAIT:
+ /* Try to schedule an autoclose RPC calls */
+ set_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
+ schedule_work(&xprt->task_cleanup);
default:
xprt_disconnect(xprt);
- break;
}
out:
read_unlock(&sk->sk_callback_lock);
@@ -920,6 +929,18 @@ static void xs_udp_timer(struct rpc_task *task)
xprt_adjust_cwnd(task, -ETIMEDOUT);
}
+/**
+ * xs_set_port - reset the port number in the remote endpoint address
+ * @xprt: generic transport
+ * @port: new port number
+ *
+ */
+static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
+{
+ dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
+ xprt->addr.sin_port = htons(port);
+}
+
static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
{
struct sockaddr_in myaddr = {
@@ -1160,7 +1181,10 @@ static struct rpc_xprt_ops xs_udp_ops = {
.set_buffer_size = xs_udp_set_buffer_size,
.reserve_xprt = xprt_reserve_xprt_cong,
.release_xprt = xprt_release_xprt_cong,
+ .set_port = xs_set_port,
.connect = xs_connect,
+ .buf_alloc = rpc_malloc,
+ .buf_free = rpc_free,
.send_request = xs_udp_send_request,
.set_retrans_timeout = xprt_set_retrans_timeout_rtt,
.timer = xs_udp_timer,
@@ -1172,7 +1196,10 @@ static struct rpc_xprt_ops xs_udp_ops = {
static struct rpc_xprt_ops xs_tcp_ops = {
.reserve_xprt = xprt_reserve_xprt,
.release_xprt = xprt_release_xprt,
+ .set_port = xs_set_port,
.connect = xs_connect,
+ .buf_alloc = rpc_malloc,
+ .buf_free = rpc_free,
.send_request = xs_tcp_send_request,
.set_retrans_timeout = xprt_set_retrans_timeout_def,
.close = xs_close,
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
new file mode 100644
index 00000000000..05ab18e62de
--- /dev/null
+++ b/net/tipc/Kconfig
@@ -0,0 +1,112 @@
+#
+# TIPC configuration
+#
+
+menu "TIPC Configuration (EXPERIMENTAL)"
+ depends on INET && EXPERIMENTAL
+
+config TIPC
+ tristate "The TIPC Protocol (EXPERIMENTAL)"
+ ---help---
+ TBD.
+
+ This protocol support is also available as a module ( = code which
+ can be inserted in and removed from the running kernel whenever you
+ want). The module will be called tipc. If you want to compile it
+ as a module, say M here and read <file:Documentation/modules.txt>.
+
+ If in doubt, say N.
+
+config TIPC_ADVANCED
+ bool "TIPC: Advanced configuration"
+ depends on TIPC
+ default n
+ help
+ Saying Y here will open some advanced configuration
+ for TIPC. Most users do not need to bother, so if
+ unsure, just say N.
+
+config TIPC_ZONES
+ int "Maximum number of zones in network"
+ depends on TIPC && TIPC_ADVANCED
+ default "3"
+ help
+ Max number of zones inside TIPC network. Max supported value
+ is 255 zones, minimum is 1
+
+ Default is 3 zones in a network; setting this to higher
+ allows more zones but might use more memory.
+
+config TIPC_CLUSTERS
+ int "Maximum number of clusters in a zone"
+ depends on TIPC && TIPC_ADVANCED
+ default "1"
+ help
+ ***Only 1 (one cluster in a zone) is supported by current code.
+ Any value set here will be overridden.***
+
+ (Max number of clusters inside TIPC zone. Max supported
+ value is 4095 clusters, minimum is 1.
+
+ Default is 1; setting this to smaller value might save
+ some memory, setting it to higher
+ allows more clusters and might consume more memory.)
+
+config TIPC_NODES
+ int "Maximum number of nodes in cluster"
+ depends on TIPC && TIPC_ADVANCED
+ default "255"
+ help
+ Maximum number of nodes inside a TIPC cluster. Maximum
+ supported value is 2047 nodes, minimum is 8.
+
+ Setting this to a smaller value saves some memory,
+ setting it to higher allows more nodes.
+
+config TIPC_SLAVE_NODES
+ int "Maximum number of slave nodes in cluster"
+ depends on TIPC && TIPC_ADVANCED
+ default "0"
+ help
+ ***This capability is not supported by current code.***
+
+ Maximum number of slave nodes inside a TIPC cluster. Maximum
+ supported value is 2047 nodes, minimum is 0.
+
+ Setting this to a smaller value saves some memory,
+ setting it to higher allows more nodes.
+
+config TIPC_PORTS
+ int "Maximum number of ports in a node"
+ depends on TIPC && TIPC_ADVANCED
+ default "8191"
+ help
+ Maximum number of ports within a node. Maximum
+ supported value is 64535 nodes, minimum is 127.
+
+ Setting this to a smaller value saves some memory,
+ setting it to higher allows more ports.
+
+config TIPC_LOG
+ int "Size of log buffer"
+ depends on TIPC && TIPC_ADVANCED
+ default 0
+ help
+ Size (in bytes) of TIPC's internal log buffer, which records the
+ occurrence of significant events. Maximum supported value
+ is 32768 bytes, minimum is 0.
+
+ There is no need to enable the log buffer unless the node will be
+ managed remotely via TIPC.
+
+config TIPC_DEBUG
+ bool "Enable debugging support"
+ depends on TIPC
+ default n
+ help
+ This will enable debugging of TIPC.
+
+ Only say Y here if you are having trouble with TIPC. It will
+ enable the display of detailed information about what is going on.
+
+endmenu
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
new file mode 100644
index 00000000000..dceb7027946
--- /dev/null
+++ b/net/tipc/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the Linux TIPC layer
+#
+
+obj-$(CONFIG_TIPC) := tipc.o
+
+tipc-y += addr.o bcast.o bearer.o config.o cluster.o \
+ core.o handler.o link.o discover.o msg.o \
+ name_distr.o subscr.o name_table.o net.o \
+ netlink.o node.o node_subscr.o port.o ref.o \
+ socket.o user_reg.o zone.o dbg.o eth_media.o
+
+# End of file
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
new file mode 100644
index 00000000000..eca22260c98
--- /dev/null
+++ b/net/tipc/addr.c
@@ -0,0 +1,94 @@
+/*
+ * net/tipc/addr.c: TIPC address utility routines
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "addr.h"
+#include "zone.h"
+#include "cluster.h"
+#include "net.h"
+
+u32 tipc_get_addr(void)
+{
+ return tipc_own_addr;
+}
+
+/**
+ * addr_domain_valid - validates a network domain address
+ *
+ * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
+ * where Z, C, and N are non-zero and do not exceed the configured limits.
+ *
+ * Returns 1 if domain address is valid, otherwise 0
+ */
+
+int addr_domain_valid(u32 addr)
+{
+ u32 n = tipc_node(addr);
+ u32 c = tipc_cluster(addr);
+ u32 z = tipc_zone(addr);
+ u32 max_nodes = tipc_max_nodes;
+
+ if (is_slave(addr))
+ max_nodes = LOWEST_SLAVE + tipc_max_slaves;
+ if (n > max_nodes)
+ return 0;
+ if (c > tipc_max_clusters)
+ return 0;
+ if (z > tipc_max_zones)
+ return 0;
+
+ if (n && (!z || !c))
+ return 0;
+ if (c && !z)
+ return 0;
+ return 1;
+}
+
+/**
+ * addr_node_valid - validates a proposed network address for this node
+ *
+ * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed
+ * the configured limits.
+ *
+ * Returns 1 if address can be used, otherwise 0
+ */
+
+int addr_node_valid(u32 addr)
+{
+ return (addr_domain_valid(addr) && tipc_node(addr));
+}
+
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
new file mode 100644
index 00000000000..02ca71783e2
--- /dev/null
+++ b/net/tipc/addr.h
@@ -0,0 +1,128 @@
+/*
+ * net/tipc/addr.h: Include file for TIPC address utility routines
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_ADDR_H
+#define _TIPC_ADDR_H
+
+static inline u32 own_node(void)
+{
+ return tipc_node(tipc_own_addr);
+}
+
+static inline u32 own_cluster(void)
+{
+ return tipc_cluster(tipc_own_addr);
+}
+
+static inline u32 own_zone(void)
+{
+ return tipc_zone(tipc_own_addr);
+}
+
+static inline int in_own_cluster(u32 addr)
+{
+ return !((addr ^ tipc_own_addr) >> 12);
+}
+
+static inline int in_own_zone(u32 addr)
+{
+ return !((addr ^ tipc_own_addr) >> 24);
+}
+
+static inline int is_slave(u32 addr)
+{
+ return addr & 0x800;
+}
+
+static inline int may_route(u32 addr)
+{
+ return(addr ^ tipc_own_addr) >> 11;
+}
+
+static inline int in_scope(u32 domain, u32 addr)
+{
+ if (!domain || (domain == addr))
+ return 1;
+ if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */
+ return 1;
+ if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */
+ return 1;
+ return 0;
+}
+
+/**
+ * addr_scope - convert message lookup domain to equivalent 2-bit scope value
+ */
+
+static inline int addr_scope(u32 domain)
+{
+ if (likely(!domain))
+ return TIPC_ZONE_SCOPE;
+ if (tipc_node(domain))
+ return TIPC_NODE_SCOPE;
+ if (tipc_cluster(domain))
+ return TIPC_CLUSTER_SCOPE;
+ return TIPC_ZONE_SCOPE;
+}
+
+/**
+ * addr_domain - convert 2-bit scope value to equivalent message lookup domain
+ *
+ * Needed when address of a named message must be looked up a second time
+ * after a network hop.
+ */
+
+static inline int addr_domain(int sc)
+{
+ if (likely(sc == TIPC_NODE_SCOPE))
+ return tipc_own_addr;
+ if (sc == TIPC_CLUSTER_SCOPE)
+ return tipc_addr(tipc_zone(tipc_own_addr),
+ tipc_cluster(tipc_own_addr), 0);
+ return tipc_addr(tipc_zone(tipc_own_addr), 0, 0);
+}
+
+static inline char *addr_string_fill(char *string, u32 addr)
+{
+ snprintf(string, 16, "<%u.%u.%u>",
+ tipc_zone(addr), tipc_cluster(addr), tipc_node(addr));
+ return string;
+}
+
+int addr_domain_valid(u32);
+int addr_node_valid(u32 addr);
+
+#endif
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
new file mode 100644
index 00000000000..9713d622efb
--- /dev/null
+++ b/net/tipc/bcast.c
@@ -0,0 +1,806 @@
+/*
+ * net/tipc/bcast.c: TIPC broadcast code
+ *
+ * Copyright (c) 2004-2006, Ericsson AB
+ * Copyright (c) 2004, Intel Corporation.
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "msg.h"
+#include "dbg.h"
+#include "link.h"
+#include "net.h"
+#include "node.h"
+#include "port.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "name_distr.h"
+#include "bearer.h"
+#include "name_table.h"
+#include "bcast.h"
+
+
+#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
+
+#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
+
+#define BCLINK_LOG_BUF_SIZE 0
+
+/**
+ * struct bcbearer_pair - a pair of bearers used by broadcast link
+ * @primary: pointer to primary bearer
+ * @secondary: pointer to secondary bearer
+ *
+ * Bearers must have same priority and same set of reachable destinations
+ * to be paired.
+ */
+
+struct bcbearer_pair {
+ struct bearer *primary;
+ struct bearer *secondary;
+};
+
+/**
+ * struct bcbearer - bearer used by broadcast link
+ * @bearer: (non-standard) broadcast bearer structure
+ * @media: (non-standard) broadcast media structure
+ * @bpairs: array of bearer pairs
+ * @bpairs_temp: array of bearer pairs used during creation of "bpairs"
+ */
+
+struct bcbearer {
+ struct bearer bearer;
+ struct media media;
+ struct bcbearer_pair bpairs[MAX_BEARERS];
+ struct bcbearer_pair bpairs_temp[TIPC_NUM_LINK_PRI];
+};
+
+/**
+ * struct bclink - link used for broadcast messages
+ * @link: (non-standard) broadcast link structure
+ * @node: (non-standard) node structure representing b'cast link's peer node
+ *
+ * Handles sequence numbering, fragmentation, bundling, etc.
+ */
+
+struct bclink {
+ struct link link;
+ struct node node;
+};
+
+
+static struct bcbearer *bcbearer = NULL;
+static struct bclink *bclink = NULL;
+static struct link *bcl = NULL;
+static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
+
+char bc_link_name[] = "multicast-link";
+
+
+static inline u32 buf_seqno(struct sk_buff *buf)
+{
+ return msg_seqno(buf_msg(buf));
+}
+
+static inline u32 bcbuf_acks(struct sk_buff *buf)
+{
+ return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
+}
+
+static inline void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
+{
+ TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
+}
+
+static inline void bcbuf_decr_acks(struct sk_buff *buf)
+{
+ bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
+}
+
+
+/**
+ * bclink_set_gap - set gap according to contents of current deferred pkt queue
+ *
+ * Called with 'node' locked, bc_lock unlocked
+ */
+
+static inline void bclink_set_gap(struct node *n_ptr)
+{
+ struct sk_buff *buf = n_ptr->bclink.deferred_head;
+
+ n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
+ mod(n_ptr->bclink.last_in);
+ if (unlikely(buf != NULL))
+ n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
+}
+
+/**
+ * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
+ *
+ * This mechanism endeavours to prevent all nodes in network from trying
+ * to ACK or NACK at the same time.
+ *
+ * Note: TIPC uses a different trigger to distribute ACKs than it does to
+ * distribute NACKs, but tries to use the same spacing (divide by 16).
+ */
+
+static inline int bclink_ack_allowed(u32 n)
+{
+ return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
+}
+
+
+/**
+ * bclink_retransmit_pkt - retransmit broadcast packets
+ * @after: sequence number of last packet to *not* retransmit
+ * @to: sequence number of last packet to retransmit
+ *
+ * Called with 'node' locked, bc_lock unlocked
+ */
+
+static void bclink_retransmit_pkt(u32 after, u32 to)
+{
+ struct sk_buff *buf;
+
+ spin_lock_bh(&bc_lock);
+ buf = bcl->first_out;
+ while (buf && less_eq(buf_seqno(buf), after)) {
+ buf = buf->next;
+ }
+ if (buf != NULL)
+ link_retransmit(bcl, buf, mod(to - after));
+ spin_unlock_bh(&bc_lock);
+}
+
+/**
+ * bclink_acknowledge - handle acknowledgement of broadcast packets
+ * @n_ptr: node that sent acknowledgement info
+ * @acked: broadcast sequence # that has been acknowledged
+ *
+ * Node is locked, bc_lock unlocked.
+ */
+
+void bclink_acknowledge(struct node *n_ptr, u32 acked)
+{
+ struct sk_buff *crs;
+ struct sk_buff *next;
+ unsigned int released = 0;
+
+ if (less_eq(acked, n_ptr->bclink.acked))
+ return;
+
+ spin_lock_bh(&bc_lock);
+
+ /* Skip over packets that node has previously acknowledged */
+
+ crs = bcl->first_out;
+ while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
+ crs = crs->next;
+ }
+
+ /* Update packets that node is now acknowledging */
+
+ while (crs && less_eq(buf_seqno(crs), acked)) {
+ next = crs->next;
+ bcbuf_decr_acks(crs);
+ if (bcbuf_acks(crs) == 0) {
+ bcl->first_out = next;
+ bcl->out_queue_size--;
+ buf_discard(crs);
+ released = 1;
+ }
+ crs = next;
+ }
+ n_ptr->bclink.acked = acked;
+
+ /* Try resolving broadcast link congestion, if necessary */
+
+ if (unlikely(bcl->next_out))
+ link_push_queue(bcl);
+ if (unlikely(released && !list_empty(&bcl->waiting_ports)))
+ link_wakeup_ports(bcl, 0);
+ spin_unlock_bh(&bc_lock);
+}
+
+/**
+ * bclink_send_ack - unicast an ACK msg
+ *
+ * net_lock and node lock set
+ */
+
+static void bclink_send_ack(struct node *n_ptr)
+{
+ struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
+
+ if (l_ptr != NULL)
+ link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+}
+
+/**
+ * bclink_send_nack- broadcast a NACK msg
+ *
+ * net_lock and node lock set
+ */
+
+static void bclink_send_nack(struct node *n_ptr)
+{
+ struct sk_buff *buf;
+ struct tipc_msg *msg;
+
+ if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
+ return;
+
+ buf = buf_acquire(INT_H_SIZE);
+ if (buf) {
+ msg = buf_msg(buf);
+ msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
+ TIPC_OK, INT_H_SIZE, n_ptr->addr);
+ msg_set_mc_netid(msg, tipc_net_id);
+ msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
+ msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
+ msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
+ msg_set_bcast_tag(msg, tipc_own_tag);
+
+ if (bearer_send(&bcbearer->bearer, buf, 0)) {
+ bcl->stats.sent_nacks++;
+ buf_discard(buf);
+ } else {
+ bearer_schedule(bcl->b_ptr, bcl);
+ bcl->proto_msg_queue = buf;
+ bcl->stats.bearer_congs++;
+ }
+
+ /*
+ * Ensure we doesn't send another NACK msg to the node
+ * until 16 more deferred messages arrive from it
+ * (i.e. helps prevent all nodes from NACK'ing at same time)
+ */
+
+ n_ptr->bclink.nack_sync = tipc_own_tag;
+ }
+}
+
+/**
+ * bclink_check_gap - send a NACK if a sequence gap exists
+ *
+ * net_lock and node lock set
+ */
+
+void bclink_check_gap(struct node *n_ptr, u32 last_sent)
+{
+ if (!n_ptr->bclink.supported ||
+ less_eq(last_sent, mod(n_ptr->bclink.last_in)))
+ return;
+
+ bclink_set_gap(n_ptr);
+ if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
+ n_ptr->bclink.gap_to = last_sent;
+ bclink_send_nack(n_ptr);
+}
+
+/**
+ * bclink_peek_nack - process a NACK msg meant for another node
+ *
+ * Only net_lock set.
+ */
+
+void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
+{
+ struct node *n_ptr = node_find(dest);
+ u32 my_after, my_to;
+
+ if (unlikely(!n_ptr || !node_is_up(n_ptr)))
+ return;
+ node_lock(n_ptr);
+ /*
+ * Modify gap to suppress unnecessary NACKs from this node
+ */
+ my_after = n_ptr->bclink.gap_after;
+ my_to = n_ptr->bclink.gap_to;
+
+ if (less_eq(gap_after, my_after)) {
+ if (less(my_after, gap_to) && less(gap_to, my_to))
+ n_ptr->bclink.gap_after = gap_to;
+ else if (less_eq(my_to, gap_to))
+ n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
+ } else if (less_eq(gap_after, my_to)) {
+ if (less_eq(my_to, gap_to))
+ n_ptr->bclink.gap_to = gap_after;
+ } else {
+ /*
+ * Expand gap if missing bufs not in deferred queue:
+ */
+ struct sk_buff *buf = n_ptr->bclink.deferred_head;
+ u32 prev = n_ptr->bclink.gap_to;
+
+ for (; buf; buf = buf->next) {
+ u32 seqno = buf_seqno(buf);
+
+ if (mod(seqno - prev) != 1)
+ buf = NULL;
+ if (seqno == gap_after)
+ break;
+ prev = seqno;
+ }
+ if (buf == NULL)
+ n_ptr->bclink.gap_to = gap_after;
+ }
+ /*
+ * Some nodes may send a complementary NACK now:
+ */
+ if (bclink_ack_allowed(sender_tag + 1)) {
+ if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
+ bclink_send_nack(n_ptr);
+ bclink_set_gap(n_ptr);
+ }
+ }
+ node_unlock(n_ptr);
+}
+
+/**
+ * bclink_send_msg - broadcast a packet to all nodes in cluster
+ */
+
+int bclink_send_msg(struct sk_buff *buf)
+{
+ int res;
+
+ spin_lock_bh(&bc_lock);
+
+ res = link_send_buf(bcl, buf);
+ if (unlikely(res == -ELINKCONG))
+ buf_discard(buf);
+ else
+ bcl->stats.sent_info++;
+
+ if (bcl->out_queue_size > bcl->stats.max_queue_sz)
+ bcl->stats.max_queue_sz = bcl->out_queue_size;
+ bcl->stats.queue_sz_counts++;
+ bcl->stats.accu_queue_sz += bcl->out_queue_size;
+
+ spin_unlock_bh(&bc_lock);
+ return res;
+}
+
+/**
+ * bclink_recv_pkt - receive a broadcast packet, and deliver upwards
+ *
+ * net_lock is read_locked, no other locks set
+ */
+
+void bclink_recv_pkt(struct sk_buff *buf)
+{
+ struct tipc_msg *msg = buf_msg(buf);
+ struct node* node = node_find(msg_prevnode(msg));
+ u32 next_in;
+ u32 seqno;
+ struct sk_buff *deferred;
+
+ msg_dbg(msg, "<BC<<<");
+
+ if (unlikely(!node || !node_is_up(node) || !node->bclink.supported ||
+ (msg_mc_netid(msg) != tipc_net_id))) {
+ buf_discard(buf);
+ return;
+ }
+
+ if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
+ msg_dbg(msg, "<BCNACK<<<");
+ if (msg_destnode(msg) == tipc_own_addr) {
+ node_lock(node);
+ bclink_acknowledge(node, msg_bcast_ack(msg));
+ node_unlock(node);
+ bcl->stats.recv_nacks++;
+ bclink_retransmit_pkt(msg_bcgap_after(msg),
+ msg_bcgap_to(msg));
+ } else {
+ bclink_peek_nack(msg_destnode(msg),
+ msg_bcast_tag(msg),
+ msg_bcgap_after(msg),
+ msg_bcgap_to(msg));
+ }
+ buf_discard(buf);
+ return;
+ }
+
+ node_lock(node);
+receive:
+ deferred = node->bclink.deferred_head;
+ next_in = mod(node->bclink.last_in + 1);
+ seqno = msg_seqno(msg);
+
+ if (likely(seqno == next_in)) {
+ bcl->stats.recv_info++;
+ node->bclink.last_in++;
+ bclink_set_gap(node);
+ if (unlikely(bclink_ack_allowed(seqno))) {
+ bclink_send_ack(node);
+ bcl->stats.sent_acks++;
+ }
+ if (likely(msg_isdata(msg))) {
+ node_unlock(node);
+ port_recv_mcast(buf, NULL);
+ } else if (msg_user(msg) == MSG_BUNDLER) {
+ bcl->stats.recv_bundles++;
+ bcl->stats.recv_bundled += msg_msgcnt(msg);
+ node_unlock(node);
+ link_recv_bundle(buf);
+ } else if (msg_user(msg) == MSG_FRAGMENTER) {
+ bcl->stats.recv_fragments++;
+ if (link_recv_fragment(&node->bclink.defragm,
+ &buf, &msg))
+ bcl->stats.recv_fragmented++;
+ node_unlock(node);
+ net_route_msg(buf);
+ } else {
+ node_unlock(node);
+ net_route_msg(buf);
+ }
+ if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
+ node_lock(node);
+ buf = deferred;
+ msg = buf_msg(buf);
+ node->bclink.deferred_head = deferred->next;
+ goto receive;
+ }
+ return;
+ } else if (less(next_in, seqno)) {
+ u32 gap_after = node->bclink.gap_after;
+ u32 gap_to = node->bclink.gap_to;
+
+ if (link_defer_pkt(&node->bclink.deferred_head,
+ &node->bclink.deferred_tail,
+ buf)) {
+ node->bclink.nack_sync++;
+ bcl->stats.deferred_recv++;
+ if (seqno == mod(gap_after + 1))
+ node->bclink.gap_after = seqno;
+ else if (less(gap_after, seqno) && less(seqno, gap_to))
+ node->bclink.gap_to = seqno;
+ }
+ if (bclink_ack_allowed(node->bclink.nack_sync)) {
+ if (gap_to != gap_after)
+ bclink_send_nack(node);
+ bclink_set_gap(node);
+ }
+ } else {
+ bcl->stats.duplicates++;
+ buf_discard(buf);
+ }
+ node_unlock(node);
+}
+
+u32 bclink_get_last_sent(void)
+{
+ u32 last_sent = mod(bcl->next_out_no - 1);
+
+ if (bcl->next_out)
+ last_sent = mod(buf_seqno(bcl->next_out) - 1);
+ return last_sent;
+}
+
+u32 bclink_acks_missing(struct node *n_ptr)
+{
+ return (n_ptr->bclink.supported &&
+ (bclink_get_last_sent() != n_ptr->bclink.acked));
+}
+
+
+/**
+ * bcbearer_send - send a packet through the broadcast pseudo-bearer
+ *
+ * Send through as many bearers as necessary to reach all nodes
+ * that support TIPC multicasting.
+ *
+ * Returns 0 if packet sent successfully, non-zero if not
+ */
+
+int bcbearer_send(struct sk_buff *buf,
+ struct tipc_bearer *unused1,
+ struct tipc_media_addr *unused2)
+{
+ static int send_count = 0;
+
+ struct node_map remains;
+ struct node_map remains_new;
+ int bp_index;
+ int swap_time;
+
+ /* Prepare buffer for broadcasting (if first time trying to send it) */
+
+ if (likely(!msg_non_seq(buf_msg(buf)))) {
+ struct tipc_msg *msg;
+
+ assert(cluster_bcast_nodes.count != 0);
+ bcbuf_set_acks(buf, cluster_bcast_nodes.count);
+ msg = buf_msg(buf);
+ msg_set_non_seq(msg);
+ msg_set_mc_netid(msg, tipc_net_id);
+ }
+
+ /* Determine if bearer pairs should be swapped following this attempt */
+
+ if ((swap_time = (++send_count >= 10)))
+ send_count = 0;
+
+ /* Send buffer over bearers until all targets reached */
+
+ remains = cluster_bcast_nodes;
+
+ for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
+ struct bearer *p = bcbearer->bpairs[bp_index].primary;
+ struct bearer *s = bcbearer->bpairs[bp_index].secondary;
+
+ if (!p)
+ break; /* no more bearers to try */
+
+ nmap_diff(&remains, &p->nodes, &remains_new);
+ if (remains_new.count == remains.count)
+ continue; /* bearer pair doesn't add anything */
+
+ if (!p->publ.blocked &&
+ !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
+ if (swap_time && s && !s->publ.blocked)
+ goto swap;
+ else
+ goto update;
+ }
+
+ if (!s || s->publ.blocked ||
+ s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
+ continue; /* unable to send using bearer pair */
+swap:
+ bcbearer->bpairs[bp_index].primary = s;
+ bcbearer->bpairs[bp_index].secondary = p;
+update:
+ if (remains_new.count == 0)
+ return TIPC_OK;
+
+ remains = remains_new;
+ }
+
+ /* Unable to reach all targets */
+
+ bcbearer->bearer.publ.blocked = 1;
+ bcl->stats.bearer_congs++;
+ return ~TIPC_OK;
+}
+
+/**
+ * bcbearer_sort - create sets of bearer pairs used by broadcast bearer
+ */
+
+void bcbearer_sort(void)
+{
+ struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
+ struct bcbearer_pair *bp_curr;
+ int b_index;
+ int pri;
+
+ spin_lock_bh(&bc_lock);
+
+ /* Group bearers by priority (can assume max of two per priority) */
+
+ memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
+
+ for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
+ struct bearer *b = &bearers[b_index];
+
+ if (!b->active || !b->nodes.count)
+ continue;
+
+ if (!bp_temp[b->priority].primary)
+ bp_temp[b->priority].primary = b;
+ else
+ bp_temp[b->priority].secondary = b;
+ }
+
+ /* Create array of bearer pairs for broadcasting */
+
+ bp_curr = bcbearer->bpairs;
+ memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
+
+ for (pri = (TIPC_NUM_LINK_PRI - 1); pri >= 0; pri--) {
+
+ if (!bp_temp[pri].primary)
+ continue;
+
+ bp_curr->primary = bp_temp[pri].primary;
+
+ if (bp_temp[pri].secondary) {
+ if (nmap_equal(&bp_temp[pri].primary->nodes,
+ &bp_temp[pri].secondary->nodes)) {
+ bp_curr->secondary = bp_temp[pri].secondary;
+ } else {
+ bp_curr++;
+ bp_curr->primary = bp_temp[pri].secondary;
+ }
+ }
+
+ bp_curr++;
+ }
+
+ spin_unlock_bh(&bc_lock);
+}
+
+/**
+ * bcbearer_push - resolve bearer congestion
+ *
+ * Forces bclink to push out any unsent packets, until all packets are gone
+ * or congestion reoccurs.
+ * No locks set when function called
+ */
+
+void bcbearer_push(void)
+{
+ struct bearer *b_ptr;
+
+ spin_lock_bh(&bc_lock);
+ b_ptr = &bcbearer->bearer;
+ if (b_ptr->publ.blocked) {
+ b_ptr->publ.blocked = 0;
+ bearer_lock_push(b_ptr);
+ }
+ spin_unlock_bh(&bc_lock);
+}
+
+
+int bclink_stats(char *buf, const u32 buf_size)
+{
+ struct print_buf pb;
+
+ if (!bcl)
+ return 0;
+
+ printbuf_init(&pb, buf, buf_size);
+
+ spin_lock_bh(&bc_lock);
+
+ tipc_printf(&pb, "Link <%s>\n"
+ " Window:%u packets\n",
+ bcl->name, bcl->queue_limit[0]);
+ tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ bcl->stats.recv_info,
+ bcl->stats.recv_fragments,
+ bcl->stats.recv_fragmented,
+ bcl->stats.recv_bundles,
+ bcl->stats.recv_bundled);
+ tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ bcl->stats.sent_info,
+ bcl->stats.sent_fragments,
+ bcl->stats.sent_fragmented,
+ bcl->stats.sent_bundles,
+ bcl->stats.sent_bundled);
+ tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
+ bcl->stats.recv_nacks,
+ bcl->stats.deferred_recv,
+ bcl->stats.duplicates);
+ tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
+ bcl->stats.sent_nacks,
+ bcl->stats.sent_acks,
+ bcl->stats.retransmitted);
+ tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
+ bcl->stats.bearer_congs,
+ bcl->stats.link_congs,
+ bcl->stats.max_queue_sz,
+ bcl->stats.queue_sz_counts
+ ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
+ : 0);
+
+ spin_unlock_bh(&bc_lock);
+ return printbuf_validate(&pb);
+}
+
+int bclink_reset_stats(void)
+{
+ if (!bcl)
+ return -ENOPROTOOPT;
+
+ spin_lock_bh(&bc_lock);
+ memset(&bcl->stats, 0, sizeof(bcl->stats));
+ spin_unlock_bh(&bc_lock);
+ return TIPC_OK;
+}
+
+int bclink_set_queue_limits(u32 limit)
+{
+ if (!bcl)
+ return -ENOPROTOOPT;
+ if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
+ return -EINVAL;
+
+ spin_lock_bh(&bc_lock);
+ link_set_queue_limits(bcl, limit);
+ spin_unlock_bh(&bc_lock);
+ return TIPC_OK;
+}
+
+int bclink_init(void)
+{
+ bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
+ bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
+ if (!bcbearer || !bclink) {
+ nomem:
+ warn("Memory squeeze; Failed to create multicast link\n");
+ kfree(bcbearer);
+ bcbearer = NULL;
+ kfree(bclink);
+ bclink = NULL;
+ return -ENOMEM;
+ }
+
+ memset(bcbearer, 0, sizeof(struct bcbearer));
+ INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
+ bcbearer->bearer.media = &bcbearer->media;
+ bcbearer->media.send_msg = bcbearer_send;
+ sprintf(bcbearer->media.name, "tipc-multicast");
+
+ bcl = &bclink->link;
+ memset(bclink, 0, sizeof(struct bclink));
+ INIT_LIST_HEAD(&bcl->waiting_ports);
+ bcl->next_out_no = 1;
+ bclink->node.lock = SPIN_LOCK_UNLOCKED;
+ bcl->owner = &bclink->node;
+ bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
+ link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
+ bcl->b_ptr = &bcbearer->bearer;
+ bcl->state = WORKING_WORKING;
+ sprintf(bcl->name, bc_link_name);
+
+ if (BCLINK_LOG_BUF_SIZE) {
+ char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
+
+ if (!pb)
+ goto nomem;
+ printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
+ }
+
+ return TIPC_OK;
+}
+
+void bclink_stop(void)
+{
+ spin_lock_bh(&bc_lock);
+ if (bcbearer) {
+ link_stop(bcl);
+ if (BCLINK_LOG_BUF_SIZE)
+ kfree(bcl->print_buf.buf);
+ bcl = NULL;
+ kfree(bclink);
+ bclink = NULL;
+ kfree(bcbearer);
+ bcbearer = NULL;
+ }
+ spin_unlock_bh(&bc_lock);
+}
+
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
new file mode 100644
index 00000000000..5430e524b4f
--- /dev/null
+++ b/net/tipc/bcast.h
@@ -0,0 +1,223 @@
+/*
+ * net/tipc/bcast.h: Include file for TIPC broadcast code
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_BCAST_H
+#define _TIPC_BCAST_H
+
+#define MAX_NODES 4096
+#define WSIZE 32
+
+/**
+ * struct node_map - set of node identifiers
+ * @count: # of nodes in set
+ * @map: bitmap of node identifiers that are in the set
+ */
+
+struct node_map {
+ u32 count;
+ u32 map[MAX_NODES / WSIZE];
+};
+
+
+#define PLSIZE 32
+
+/**
+ * struct port_list - set of node local destination ports
+ * @count: # of ports in set (only valid for first entry in list)
+ * @next: pointer to next entry in list
+ * @ports: array of port references
+ */
+
+struct port_list {
+ int count;
+ struct port_list *next;
+ u32 ports[PLSIZE];
+};
+
+
+struct node;
+
+extern char bc_link_name[];
+
+
+/**
+ * nmap_get - determine if node exists in a node map
+ */
+
+static inline int nmap_get(struct node_map *nm_ptr, u32 node)
+{
+ int n = tipc_node(node);
+ int w = n / WSIZE;
+ int b = n % WSIZE;
+
+ return nm_ptr->map[w] & (1 << b);
+}
+
+/**
+ * nmap_add - add a node to a node map
+ */
+
+static inline void nmap_add(struct node_map *nm_ptr, u32 node)
+{
+ int n = tipc_node(node);
+ int w = n / WSIZE;
+ u32 mask = (1 << (n % WSIZE));
+
+ if ((nm_ptr->map[w] & mask) == 0) {
+ nm_ptr->count++;
+ nm_ptr->map[w] |= mask;
+ }
+}
+
+/**
+ * nmap_remove - remove a node from a node map
+ */
+
+static inline void nmap_remove(struct node_map *nm_ptr, u32 node)
+{
+ int n = tipc_node(node);
+ int w = n / WSIZE;
+ u32 mask = (1 << (n % WSIZE));
+
+ if ((nm_ptr->map[w] & mask) != 0) {
+ nm_ptr->map[w] &= ~mask;
+ nm_ptr->count--;
+ }
+}
+
+/**
+ * nmap_equal - test for equality of node maps
+ */
+
+static inline int nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
+{
+ return !memcmp(nm_a, nm_b, sizeof(*nm_a));
+}
+
+/**
+ * nmap_diff - find differences between node maps
+ * @nm_a: input node map A
+ * @nm_b: input node map B
+ * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
+ */
+
+static inline void nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
+ struct node_map *nm_diff)
+{
+ int stop = sizeof(nm_a->map) / sizeof(u32);
+ int w;
+ int b;
+ u32 map;
+
+ memset(nm_diff, 0, sizeof(*nm_diff));
+ for (w = 0; w < stop; w++) {
+ map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
+ nm_diff->map[w] = map;
+ if (map != 0) {
+ for (b = 0 ; b < WSIZE; b++) {
+ if (map & (1 << b))
+ nm_diff->count++;
+ }
+ }
+ }
+}
+
+/**
+ * port_list_add - add a port to a port list, ensuring no duplicates
+ */
+
+static inline void port_list_add(struct port_list *pl_ptr, u32 port)
+{
+ struct port_list *item = pl_ptr;
+ int i;
+ int item_sz = PLSIZE;
+ int cnt = pl_ptr->count;
+
+ for (; ; cnt -= item_sz, item = item->next) {
+ if (cnt < PLSIZE)
+ item_sz = cnt;
+ for (i = 0; i < item_sz; i++)
+ if (item->ports[i] == port)
+ return;
+ if (i < PLSIZE) {
+ item->ports[i] = port;
+ pl_ptr->count++;
+ return;
+ }
+ if (!item->next) {
+ item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item->next) {
+ warn("Memory squeeze: multicast destination port list is incomplete\n");
+ return;
+ }
+ item->next->next = NULL;
+ }
+ }
+}
+
+/**
+ * port_list_free - free dynamically created entries in port_list chain
+ *
+ * Note: First item is on stack, so it doesn't need to be released
+ */
+
+static inline void port_list_free(struct port_list *pl_ptr)
+{
+ struct port_list *item;
+ struct port_list *next;
+
+ for (item = pl_ptr->next; item; item = next) {
+ next = item->next;
+ kfree(item);
+ }
+}
+
+
+int bclink_init(void);
+void bclink_stop(void);
+void bclink_acknowledge(struct node *n_ptr, u32 acked);
+int bclink_send_msg(struct sk_buff *buf);
+void bclink_recv_pkt(struct sk_buff *buf);
+u32 bclink_get_last_sent(void);
+u32 bclink_acks_missing(struct node *n_ptr);
+void bclink_check_gap(struct node *n_ptr, u32 seqno);
+int bclink_stats(char *stats_buf, const u32 buf_size);
+int bclink_reset_stats(void);
+int bclink_set_queue_limits(u32 limit);
+void bcbearer_sort(void);
+void bcbearer_push(void);
+
+#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
new file mode 100644
index 00000000000..3dd19fdc5a2
--- /dev/null
+++ b/net/tipc/bearer.c
@@ -0,0 +1,692 @@
+/*
+ * net/tipc/bearer.c: TIPC bearer code
+ *
+ * Copyright (c) 1996-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+#include "bearer.h"
+#include "link.h"
+#include "port.h"
+#include "discover.h"
+#include "bcast.h"
+
+#define MAX_ADDR_STR 32
+
+static struct media *media_list = 0;
+static u32 media_count = 0;
+
+struct bearer *bearers = 0;
+
+/**
+ * media_name_valid - validate media name
+ *
+ * Returns 1 if media name is valid, otherwise 0.
+ */
+
+static int media_name_valid(const char *name)
+{
+ u32 len;
+
+ len = strlen(name);
+ if ((len + 1) > TIPC_MAX_MEDIA_NAME)
+ return 0;
+ return (strspn(name, tipc_alphabet) == len);
+}
+
+/**
+ * media_find - locates specified media object by name
+ */
+
+static struct media *media_find(const char *name)
+{
+ struct media *m_ptr;
+ u32 i;
+
+ for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+ if (!strcmp(m_ptr->name, name))
+ return m_ptr;
+ }
+ return 0;
+}
+
+/**
+ * tipc_register_media - register a media type
+ *
+ * Bearers for this media type must be activated separately at a later stage.
+ */
+
+int tipc_register_media(u32 media_type,
+ char *name,
+ int (*enable)(struct tipc_bearer *),
+ void (*disable)(struct tipc_bearer *),
+ int (*send_msg)(struct sk_buff *,
+ struct tipc_bearer *,
+ struct tipc_media_addr *),
+ char *(*addr2str)(struct tipc_media_addr *a,
+ char *str_buf, int str_size),
+ struct tipc_media_addr *bcast_addr,
+ const u32 bearer_priority,
+ const u32 link_tolerance, /* [ms] */
+ const u32 send_window_limit)
+{
+ struct media *m_ptr;
+ u32 media_id;
+ u32 i;
+ int res = -EINVAL;
+
+ write_lock_bh(&net_lock);
+ if (!media_list)
+ goto exit;
+
+ if (!media_name_valid(name)) {
+ warn("Media registration error: illegal name <%s>\n", name);
+ goto exit;
+ }
+ if (!bcast_addr) {
+ warn("Media registration error: no broadcast address supplied\n");
+ goto exit;
+ }
+ if (bearer_priority >= TIPC_NUM_LINK_PRI) {
+ warn("Media registration error: priority %u\n", bearer_priority);
+ goto exit;
+ }
+ if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
+ (link_tolerance > TIPC_MAX_LINK_TOL)) {
+ warn("Media registration error: tolerance %u\n", link_tolerance);
+ goto exit;
+ }
+
+ media_id = media_count++;
+ if (media_id >= MAX_MEDIA) {
+ warn("Attempt to register more than %u media\n", MAX_MEDIA);
+ media_count--;
+ goto exit;
+ }
+ for (i = 0; i < media_id; i++) {
+ if (media_list[i].type_id == media_type) {
+ warn("Attempt to register second media with type %u\n",
+ media_type);
+ media_count--;
+ goto exit;
+ }
+ if (!strcmp(name, media_list[i].name)) {
+ warn("Attempt to re-register media name <%s>\n", name);
+ media_count--;
+ goto exit;
+ }
+ }
+
+ m_ptr = &media_list[media_id];
+ m_ptr->type_id = media_type;
+ m_ptr->send_msg = send_msg;
+ m_ptr->enable_bearer = enable;
+ m_ptr->disable_bearer = disable;
+ m_ptr->addr2str = addr2str;
+ memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
+ m_ptr->bcast = 1;
+ strcpy(m_ptr->name, name);
+ m_ptr->priority = bearer_priority;
+ m_ptr->tolerance = link_tolerance;
+ m_ptr->window = send_window_limit;
+ dbg("Media <%s> registered\n", name);
+ res = 0;
+exit:
+ write_unlock_bh(&net_lock);
+ return res;
+}
+
+/**
+ * media_addr_printf - record media address in print buffer
+ */
+
+void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
+{
+ struct media *m_ptr;
+ u32 media_type;
+ u32 i;
+
+ media_type = ntohl(a->type);
+ for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+ if (m_ptr->type_id == media_type)
+ break;
+ }
+
+ if ((i < media_count) && (m_ptr->addr2str != NULL)) {
+ char addr_str[MAX_ADDR_STR];
+
+ tipc_printf(pb, "%s(%s) ", m_ptr->name,
+ m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
+ } else {
+ unchar *addr = (unchar *)&a->dev_addr;
+
+ tipc_printf(pb, "UNKNOWN(%u):", media_type);
+ for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++) {
+ tipc_printf(pb, "%02x ", addr[i]);
+ }
+ }
+}
+
+/**
+ * media_get_names - record names of registered media in buffer
+ */
+
+struct sk_buff *media_get_names(void)
+{
+ struct sk_buff *buf;
+ struct media *m_ptr;
+ int i;
+
+ buf = cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
+ if (!buf)
+ return NULL;
+
+ read_lock_bh(&net_lock);
+ for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+ cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
+ strlen(m_ptr->name) + 1);
+ }
+ read_unlock_bh(&net_lock);
+ return buf;
+}
+
+/**
+ * bearer_name_validate - validate & (optionally) deconstruct bearer name
+ * @name - ptr to bearer name string
+ * @name_parts - ptr to area for bearer name components (or NULL if not needed)
+ *
+ * Returns 1 if bearer name is valid, otherwise 0.
+ */
+
+static int bearer_name_validate(const char *name,
+ struct bearer_name *name_parts)
+{
+ char name_copy[TIPC_MAX_BEARER_NAME];
+ char *media_name;
+ char *if_name;
+ u32 media_len;
+ u32 if_len;
+
+ /* copy bearer name & ensure length is OK */
+
+ name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
+ /* need above in case non-Posix strncpy() doesn't pad with nulls */
+ strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
+ if (name_copy[TIPC_MAX_BEARER_NAME - 1] != 0)
+ return 0;
+
+ /* ensure all component parts of bearer name are present */
+
+ media_name = name_copy;
+ if ((if_name = strchr(media_name, ':')) == NULL)
+ return 0;
+ *(if_name++) = 0;
+ media_len = if_name - media_name;
+ if_len = strlen(if_name) + 1;
+
+ /* validate component parts of bearer name */
+
+ if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
+ (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
+ (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
+ (strspn(if_name, tipc_alphabet) != (if_len - 1)))
+ return 0;
+
+ /* return bearer name components, if necessary */
+
+ if (name_parts) {
+ strcpy(name_parts->media_name, media_name);
+ strcpy(name_parts->if_name, if_name);
+ }
+ return 1;
+}
+
+/**
+ * bearer_find - locates bearer object with matching bearer name
+ */
+
+static struct bearer *bearer_find(const char *name)
+{
+ struct bearer *b_ptr;
+ u32 i;
+
+ for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
+ if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
+ return b_ptr;
+ }
+ return 0;
+}
+
+/**
+ * bearer_find - locates bearer object with matching interface name
+ */
+
+struct bearer *bearer_find_interface(const char *if_name)
+{
+ struct bearer *b_ptr;
+ char *b_if_name;
+ u32 i;
+
+ for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
+ if (!b_ptr->active)
+ continue;
+ b_if_name = strchr(b_ptr->publ.name, ':') + 1;
+ if (!strcmp(b_if_name, if_name))
+ return b_ptr;
+ }
+ return 0;
+}
+
+/**
+ * bearer_get_names - record names of bearers in buffer
+ */
+
+struct sk_buff *bearer_get_names(void)
+{
+ struct sk_buff *buf;
+ struct media *m_ptr;
+ struct bearer *b_ptr;
+ int i, j;
+
+ buf = cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
+ if (!buf)
+ return NULL;
+
+ read_lock_bh(&net_lock);
+ for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+ for (j = 0; j < MAX_BEARERS; j++) {
+ b_ptr = &bearers[j];
+ if (b_ptr->active && (b_ptr->media == m_ptr)) {
+ cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
+ b_ptr->publ.name,
+ strlen(b_ptr->publ.name) + 1);
+ }
+ }
+ }
+ read_unlock_bh(&net_lock);
+ return buf;
+}
+
+void bearer_add_dest(struct bearer *b_ptr, u32 dest)
+{
+ nmap_add(&b_ptr->nodes, dest);
+ disc_update_link_req(b_ptr->link_req);
+ bcbearer_sort();
+}
+
+void bearer_remove_dest(struct bearer *b_ptr, u32 dest)
+{
+ nmap_remove(&b_ptr->nodes, dest);
+ disc_update_link_req(b_ptr->link_req);
+ bcbearer_sort();
+}
+
+/*
+ * bearer_push(): Resolve bearer congestion. Force the waiting
+ * links to push out their unsent packets, one packet per link
+ * per iteration, until all packets are gone or congestion reoccurs.
+ * 'net_lock' is read_locked when this function is called
+ * bearer.lock must be taken before calling
+ * Returns binary true(1) ore false(0)
+ */
+static int bearer_push(struct bearer *b_ptr)
+{
+ u32 res = TIPC_OK;
+ struct link *ln, *tln;
+
+ if (b_ptr->publ.blocked)
+ return 0;
+
+ while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
+ list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
+ res = link_push_packet(ln);
+ if (res == PUSH_FAILED)
+ break;
+ if (res == PUSH_FINISHED)
+ list_move_tail(&ln->link_list, &b_ptr->links);
+ }
+ }
+ return list_empty(&b_ptr->cong_links);
+}
+
+void bearer_lock_push(struct bearer *b_ptr)
+{
+ int res;
+
+ spin_lock_bh(&b_ptr->publ.lock);
+ res = bearer_push(b_ptr);
+ spin_unlock_bh(&b_ptr->publ.lock);
+ if (res)
+ bcbearer_push();
+}
+
+
+/*
+ * Interrupt enabling new requests after bearer congestion or blocking:
+ * See bearer_send().
+ */
+void tipc_continue(struct tipc_bearer *tb_ptr)
+{
+ struct bearer *b_ptr = (struct bearer *)tb_ptr;
+
+ spin_lock_bh(&b_ptr->publ.lock);
+ b_ptr->continue_count++;
+ if (!list_empty(&b_ptr->cong_links))
+ k_signal((Handler)bearer_lock_push, (unsigned long)b_ptr);
+ b_ptr->publ.blocked = 0;
+ spin_unlock_bh(&b_ptr->publ.lock);
+}
+
+/*
+ * Schedule link for sending of messages after the bearer
+ * has been deblocked by 'continue()'. This method is called
+ * when somebody tries to send a message via this link while
+ * the bearer is congested. 'net_lock' is in read_lock here
+ * bearer.lock is busy
+ */
+
+static void bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
+{
+ list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
+}
+
+/*
+ * Schedule link for sending of messages after the bearer
+ * has been deblocked by 'continue()'. This method is called
+ * when somebody tries to send a message via this link while
+ * the bearer is congested. 'net_lock' is in read_lock here,
+ * bearer.lock is free
+ */
+
+void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
+{
+ spin_lock_bh(&b_ptr->publ.lock);
+ bearer_schedule_unlocked(b_ptr, l_ptr);
+ spin_unlock_bh(&b_ptr->publ.lock);
+}
+
+
+/*
+ * bearer_resolve_congestion(): Check if there is bearer congestion,
+ * and if there is, try to resolve it before returning.
+ * 'net_lock' is read_locked when this function is called
+ */
+int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
+{
+ int res = 1;
+
+ if (list_empty(&b_ptr->cong_links))
+ return 1;
+ spin_lock_bh(&b_ptr->publ.lock);
+ if (!bearer_push(b_ptr)) {
+ bearer_schedule_unlocked(b_ptr, l_ptr);
+ res = 0;
+ }
+ spin_unlock_bh(&b_ptr->publ.lock);
+ return res;
+}
+
+
+/**
+ * tipc_enable_bearer - enable bearer with the given name
+ */
+
+int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
+{
+ struct bearer *b_ptr;
+ struct media *m_ptr;
+ struct bearer_name b_name;
+ char addr_string[16];
+ u32 bearer_id;
+ u32 with_this_prio;
+ u32 i;
+ int res = -EINVAL;
+
+ if (tipc_mode != TIPC_NET_MODE)
+ return -ENOPROTOOPT;
+ if (!bearer_name_validate(name, &b_name) ||
+ !addr_domain_valid(bcast_scope) ||
+ !in_scope(bcast_scope, tipc_own_addr) ||
+ (priority > TIPC_NUM_LINK_PRI))
+ return -EINVAL;
+
+ write_lock_bh(&net_lock);
+ if (!bearers)
+ goto failed;
+
+ m_ptr = media_find(b_name.media_name);
+ if (!m_ptr) {
+ warn("No media <%s>\n", b_name.media_name);
+ goto failed;
+ }
+ if (priority == TIPC_NUM_LINK_PRI)
+ priority = m_ptr->priority;
+
+restart:
+ bearer_id = MAX_BEARERS;
+ with_this_prio = 1;
+ for (i = MAX_BEARERS; i-- != 0; ) {
+ if (!bearers[i].active) {
+ bearer_id = i;
+ continue;
+ }
+ if (!strcmp(name, bearers[i].publ.name)) {
+ warn("Bearer <%s> already enabled\n", name);
+ goto failed;
+ }
+ if ((bearers[i].priority == priority) &&
+ (++with_this_prio > 2)) {
+ if (priority-- == 0) {
+ warn("Third bearer <%s> with priority %u, unable to lower to %u\n",
+ name, priority + 1, priority);
+ goto failed;
+ }
+ warn("Third bearer <%s> with priority %u, lowering to %u\n",
+ name, priority + 1, priority);
+ goto restart;
+ }
+ }
+ if (bearer_id >= MAX_BEARERS) {
+ warn("Attempt to enable more than %d bearers\n", MAX_BEARERS);
+ goto failed;
+ }
+
+ b_ptr = &bearers[bearer_id];
+ memset(b_ptr, 0, sizeof(struct bearer));
+
+ strcpy(b_ptr->publ.name, name);
+ res = m_ptr->enable_bearer(&b_ptr->publ);
+ if (res) {
+ warn("Failed to enable bearer <%s>\n", name);
+ goto failed;
+ }
+
+ b_ptr->identity = bearer_id;
+ b_ptr->media = m_ptr;
+ b_ptr->net_plane = bearer_id + 'A';
+ b_ptr->active = 1;
+ b_ptr->detect_scope = bcast_scope;
+ b_ptr->priority = priority;
+ INIT_LIST_HEAD(&b_ptr->cong_links);
+ INIT_LIST_HEAD(&b_ptr->links);
+ if (m_ptr->bcast) {
+ b_ptr->link_req = disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
+ bcast_scope, 2);
+ }
+ b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
+ write_unlock_bh(&net_lock);
+ info("Enabled bearer <%s>, discovery domain %s\n",
+ name, addr_string_fill(addr_string, bcast_scope));
+ return 0;
+failed:
+ write_unlock_bh(&net_lock);
+ return res;
+}
+
+/**
+ * tipc_block_bearer(): Block the bearer with the given name,
+ * and reset all its links
+ */
+
+int tipc_block_bearer(const char *name)
+{
+ struct bearer *b_ptr = 0;
+ struct link *l_ptr;
+ struct link *temp_l_ptr;
+
+ if (tipc_mode != TIPC_NET_MODE)
+ return -ENOPROTOOPT;
+
+ read_lock_bh(&net_lock);
+ b_ptr = bearer_find(name);
+ if (!b_ptr) {
+ warn("Attempt to block unknown bearer <%s>\n", name);
+ read_unlock_bh(&net_lock);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&b_ptr->publ.lock);
+ b_ptr->publ.blocked = 1;
+ list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
+ struct node *n_ptr = l_ptr->owner;
+
+ spin_lock_bh(&n_ptr->lock);
+ link_reset(l_ptr);
+ spin_unlock_bh(&n_ptr->lock);
+ }
+ spin_unlock_bh(&b_ptr->publ.lock);
+ read_unlock_bh(&net_lock);
+ info("Blocked bearer <%s>\n", name);
+ return TIPC_OK;
+}
+
+/**
+ * bearer_disable -
+ *
+ * Note: This routine assumes caller holds net_lock.
+ */
+
+static int bearer_disable(const char *name)
+{
+ struct bearer *b_ptr;
+ struct link *l_ptr;
+ struct link *temp_l_ptr;
+
+ if (tipc_mode != TIPC_NET_MODE)
+ return -ENOPROTOOPT;
+
+ b_ptr = bearer_find(name);
+ if (!b_ptr) {
+ warn("Attempt to disable unknown bearer <%s>\n", name);
+ return -EINVAL;
+ }
+
+ disc_stop_link_req(b_ptr->link_req);
+ spin_lock_bh(&b_ptr->publ.lock);
+ b_ptr->link_req = NULL;
+ b_ptr->publ.blocked = 1;
+ if (b_ptr->media->disable_bearer) {
+ spin_unlock_bh(&b_ptr->publ.lock);
+ write_unlock_bh(&net_lock);
+ b_ptr->media->disable_bearer(&b_ptr->publ);
+ write_lock_bh(&net_lock);
+ spin_lock_bh(&b_ptr->publ.lock);
+ }
+ list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
+ link_delete(l_ptr);
+ }
+ spin_unlock_bh(&b_ptr->publ.lock);
+ info("Disabled bearer <%s>\n", name);
+ memset(b_ptr, 0, sizeof(struct bearer));
+ return TIPC_OK;
+}
+
+int tipc_disable_bearer(const char *name)
+{
+ int res;
+
+ write_lock_bh(&net_lock);
+ res = bearer_disable(name);
+ write_unlock_bh(&net_lock);
+ return res;
+}
+
+
+
+int bearer_init(void)
+{
+ int res;
+
+ write_lock_bh(&net_lock);
+ bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC);
+ media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC);
+ if (bearers && media_list) {
+ memset(bearers, 0, MAX_BEARERS * sizeof(struct bearer));
+ memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
+ res = TIPC_OK;
+ } else {
+ kfree(bearers);
+ kfree(media_list);
+ bearers = 0;
+ media_list = 0;
+ res = -ENOMEM;
+ }
+ write_unlock_bh(&net_lock);
+ return res;
+}
+
+void bearer_stop(void)
+{
+ u32 i;
+
+ if (!bearers)
+ return;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (bearers[i].active)
+ bearers[i].publ.blocked = 1;
+ }
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (bearers[i].active)
+ bearer_disable(bearers[i].publ.name);
+ }
+ kfree(bearers);
+ kfree(media_list);
+ bearers = 0;
+ media_list = 0;
+ media_count = 0;
+}
+
+
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
new file mode 100644
index 00000000000..21e63d3f018
--- /dev/null
+++ b/net/tipc/bearer.h
@@ -0,0 +1,172 @@
+/*
+ * net/tipc/bearer.h: Include file for TIPC bearer code
+ *
+ * Copyright (c) 1996-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_BEARER_H
+#define _TIPC_BEARER_H
+
+#include <net/tipc/tipc_bearer.h>
+#include "bcast.h"
+
+#define MAX_BEARERS 8
+#define MAX_MEDIA 4
+
+
+/**
+ * struct media - TIPC media information available to internal users
+ * @send_msg: routine which handles buffer transmission
+ * @enable_bearer: routine which enables a bearer
+ * @disable_bearer: routine which disables a bearer
+ * @addr2str: routine which converts bearer's address to string form
+ * @bcast_addr: media address used in broadcasting
+ * @bcast: non-zero if media supports broadcasting [currently mandatory]
+ * @priority: default link (and bearer) priority
+ * @tolerance: default time (in ms) before declaring link failure
+ * @window: default window (in packets) before declaring link congestion
+ * @type_id: TIPC media identifier [defined in tipc_bearer.h]
+ * @name: media name
+ */
+
+struct media {
+ int (*send_msg)(struct sk_buff *buf,
+ struct tipc_bearer *b_ptr,
+ struct tipc_media_addr *dest);
+ int (*enable_bearer)(struct tipc_bearer *b_ptr);
+ void (*disable_bearer)(struct tipc_bearer *b_ptr);
+ char *(*addr2str)(struct tipc_media_addr *a,
+ char *str_buf, int str_size);
+ struct tipc_media_addr bcast_addr;
+ int bcast;
+ u32 priority;
+ u32 tolerance;
+ u32 window;
+ u32 type_id;
+ char name[TIPC_MAX_MEDIA_NAME];
+};
+
+/**
+ * struct bearer - TIPC bearer information available to internal users
+ * @publ: bearer information available to privileged users
+ * @media: ptr to media structure associated with bearer
+ * @priority: default link priority for bearer
+ * @detect_scope: network address mask used during automatic link creation
+ * @identity: array index of this bearer within TIPC bearer array
+ * @link_req: ptr to (optional) structure making periodic link setup requests
+ * @links: list of non-congested links associated with bearer
+ * @cong_links: list of congested links associated with bearer
+ * @continue_count: # of times bearer has resumed after congestion or blocking
+ * @active: non-zero if bearer structure is represents a bearer
+ * @net_plane: network plane ('A' through 'H') currently associated with bearer
+ * @nodes: indicates which nodes in cluster can be reached through bearer
+ */
+
+struct bearer {
+ struct tipc_bearer publ;
+ struct media *media;
+ u32 priority;
+ u32 detect_scope;
+ u32 identity;
+ struct link_req *link_req;
+ struct list_head links;
+ struct list_head cong_links;
+ u32 continue_count;
+ int active;
+ char net_plane;
+ struct node_map nodes;
+};
+
+struct bearer_name {
+ char media_name[TIPC_MAX_MEDIA_NAME];
+ char if_name[TIPC_MAX_IF_NAME];
+};
+
+struct link;
+
+extern struct bearer *bearers;
+
+void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
+struct sk_buff *media_get_names(void);
+
+struct sk_buff *bearer_get_names(void);
+void bearer_add_dest(struct bearer *b_ptr, u32 dest);
+void bearer_remove_dest(struct bearer *b_ptr, u32 dest);
+void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
+struct bearer *bearer_find_interface(const char *if_name);
+int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
+int bearer_init(void);
+void bearer_stop(void);
+int bearer_broadcast(struct sk_buff *buf, struct tipc_bearer *b_ptr,
+ struct tipc_media_addr *dest);
+void bearer_lock_push(struct bearer *b_ptr);
+
+
+/**
+ * bearer_send- sends buffer to destination over bearer
+ *
+ * Returns true (1) if successful, or false (0) if unable to send
+ *
+ * IMPORTANT:
+ * The media send routine must not alter the buffer being passed in
+ * as it may be needed for later retransmission!
+ *
+ * If the media send routine returns a non-zero value (indicating that
+ * it was unable to send the buffer), it must:
+ * 1) mark the bearer as blocked,
+ * 2) call tipc_continue() once the bearer is able to send again.
+ * Media types that are unable to meet these two critera must ensure their
+ * send routine always returns success -- even if the buffer was not sent --
+ * and let TIPC's link code deal with the undelivered message.
+ */
+
+static inline int bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
+ struct tipc_media_addr *dest)
+{
+ return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
+}
+
+/**
+ * bearer_congested - determines if bearer is currently congested
+ */
+
+static inline int bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
+{
+ if (unlikely(b_ptr->publ.blocked))
+ return 1;
+ if (likely(list_empty(&b_ptr->cong_links)))
+ return 0;
+ return !bearer_resolve_congestion(b_ptr, l_ptr);
+}
+
+#endif
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
new file mode 100644
index 00000000000..f0f7bac51d4
--- /dev/null
+++ b/net/tipc/cluster.c
@@ -0,0 +1,576 @@
+/*
+ * net/tipc/cluster.c: TIPC cluster management routines
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "cluster.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "link.h"
+#include "node.h"
+#include "net.h"
+#include "msg.h"
+#include "bearer.h"
+
+void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
+ u32 lower, u32 upper);
+struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest);
+
+struct node **local_nodes = 0;
+struct node_map cluster_bcast_nodes = {0,{0,}};
+u32 highest_allowed_slave = 0;
+
+struct cluster *cluster_create(u32 addr)
+{
+ struct _zone *z_ptr;
+ struct cluster *c_ptr;
+ int max_nodes;
+ int alloc;
+
+ c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
+ if (c_ptr == NULL)
+ return 0;
+ memset(c_ptr, 0, sizeof(*c_ptr));
+
+ c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
+ if (in_own_cluster(addr))
+ max_nodes = LOWEST_SLAVE + tipc_max_slaves;
+ else
+ max_nodes = tipc_max_nodes + 1;
+ alloc = sizeof(void *) * (max_nodes + 1);
+ c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
+ if (c_ptr->nodes == NULL) {
+ kfree(c_ptr);
+ return 0;
+ }
+ memset(c_ptr->nodes, 0, alloc);
+ if (in_own_cluster(addr))
+ local_nodes = c_ptr->nodes;
+ c_ptr->highest_slave = LOWEST_SLAVE - 1;
+ c_ptr->highest_node = 0;
+
+ z_ptr = zone_find(tipc_zone(addr));
+ if (z_ptr == NULL) {
+ z_ptr = zone_create(addr);
+ }
+ if (z_ptr != NULL) {
+ zone_attach_cluster(z_ptr, c_ptr);
+ c_ptr->owner = z_ptr;
+ }
+ else {
+ kfree(c_ptr);
+ c_ptr = 0;
+ }
+
+ return c_ptr;
+}
+
+void cluster_delete(struct cluster *c_ptr)
+{
+ u32 n_num;
+
+ if (!c_ptr)
+ return;
+ for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
+ node_delete(c_ptr->nodes[n_num]);
+ }
+ for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
+ node_delete(c_ptr->nodes[n_num]);
+ }
+ kfree(c_ptr->nodes);
+ kfree(c_ptr);
+}
+
+u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
+{
+ struct node *n_ptr;
+ u32 n_num = tipc_node(addr) + 1;
+
+ if (!c_ptr)
+ return addr;
+ for (; n_num <= c_ptr->highest_node; n_num++) {
+ n_ptr = c_ptr->nodes[n_num];
+ if (n_ptr && node_has_active_links(n_ptr))
+ return n_ptr->addr;
+ }
+ for (n_num = 1; n_num < tipc_node(addr); n_num++) {
+ n_ptr = c_ptr->nodes[n_num];
+ if (n_ptr && node_has_active_links(n_ptr))
+ return n_ptr->addr;
+ }
+ return 0;
+}
+
+void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
+{
+ u32 n_num = tipc_node(n_ptr->addr);
+ u32 max_n_num = tipc_max_nodes;
+
+ if (in_own_cluster(n_ptr->addr))
+ max_n_num = highest_allowed_slave;
+ assert(n_num > 0);
+ assert(n_num <= max_n_num);
+ assert(c_ptr->nodes[n_num] == 0);
+ c_ptr->nodes[n_num] = n_ptr;
+ if (n_num > c_ptr->highest_node)
+ c_ptr->highest_node = n_num;
+}
+
+/**
+ * cluster_select_router - select router to a cluster
+ *
+ * Uses deterministic and fair algorithm.
+ */
+
+u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
+{
+ u32 n_num;
+ u32 ulim = c_ptr->highest_node;
+ u32 mask;
+ u32 tstart;
+
+ assert(!in_own_cluster(c_ptr->addr));
+ if (!ulim)
+ return 0;
+
+ /* Start entry must be random */
+ mask = tipc_max_nodes;
+ while (mask > ulim)
+ mask >>= 1;
+ tstart = ref & mask;
+ n_num = tstart;
+
+ /* Lookup upwards with wrap-around */
+ do {
+ if (node_is_up(c_ptr->nodes[n_num]))
+ break;
+ } while (++n_num <= ulim);
+ if (n_num > ulim) {
+ n_num = 1;
+ do {
+ if (node_is_up(c_ptr->nodes[n_num]))
+ break;
+ } while (++n_num < tstart);
+ if (n_num == tstart)
+ return 0;
+ }
+ assert(n_num <= ulim);
+ return node_select_router(c_ptr->nodes[n_num], ref);
+}
+
+/**
+ * cluster_select_node - select destination node within a remote cluster
+ *
+ * Uses deterministic and fair algorithm.
+ */
+
+struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
+{
+ u32 n_num;
+ u32 mask = tipc_max_nodes;
+ u32 start_entry;
+
+ assert(!in_own_cluster(c_ptr->addr));
+ if (!c_ptr->highest_node)
+ return 0;
+
+ /* Start entry must be random */
+ while (mask > c_ptr->highest_node) {
+ mask >>= 1;
+ }
+ start_entry = (selector & mask) ? selector & mask : 1u;
+ assert(start_entry <= c_ptr->highest_node);
+
+ /* Lookup upwards with wrap-around */
+ for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
+ if (node_has_active_links(c_ptr->nodes[n_num]))
+ return c_ptr->nodes[n_num];
+ }
+ for (n_num = 1; n_num < start_entry; n_num++) {
+ if (node_has_active_links(c_ptr->nodes[n_num]))
+ return c_ptr->nodes[n_num];
+ }
+ return 0;
+}
+
+/*
+ * Routing table management: See description in node.c
+ */
+
+struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
+{
+ u32 size = INT_H_SIZE + data_size;
+ struct sk_buff *buf = buf_acquire(size);
+ struct tipc_msg *msg;
+
+ if (buf) {
+ msg = buf_msg(buf);
+ memset((char *)msg, 0, size);
+ msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest);
+ }
+ return buf;
+}
+
+void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest,
+ u32 lower, u32 upper)
+{
+ struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
+ struct tipc_msg *msg;
+
+ if (buf) {
+ msg = buf_msg(buf);
+ msg_set_remote_node(msg, dest);
+ msg_set_type(msg, ROUTE_ADDITION);
+ cluster_multicast(c_ptr, buf, lower, upper);
+ } else {
+ warn("Memory squeeze: broadcast of new route failed\n");
+ }
+}
+
+void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest,
+ u32 lower, u32 upper)
+{
+ struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
+ struct tipc_msg *msg;
+
+ if (buf) {
+ msg = buf_msg(buf);
+ msg_set_remote_node(msg, dest);
+ msg_set_type(msg, ROUTE_REMOVAL);
+ cluster_multicast(c_ptr, buf, lower, upper);
+ } else {
+ warn("Memory squeeze: broadcast of lost route failed\n");
+ }
+}
+
+void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
+{
+ struct sk_buff *buf;
+ struct tipc_msg *msg;
+ u32 highest = c_ptr->highest_slave;
+ u32 n_num;
+ int send = 0;
+
+ assert(!is_slave(dest));
+ assert(in_own_cluster(dest));
+ assert(in_own_cluster(c_ptr->addr));
+ if (highest <= LOWEST_SLAVE)
+ return;
+ buf = cluster_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
+ c_ptr->addr);
+ if (buf) {
+ msg = buf_msg(buf);
+ msg_set_remote_node(msg, c_ptr->addr);
+ msg_set_type(msg, SLAVE_ROUTING_TABLE);
+ for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
+ if (c_ptr->nodes[n_num] &&
+ node_has_active_links(c_ptr->nodes[n_num])) {
+ send = 1;
+ msg_set_dataoctet(msg, n_num);
+ }
+ }
+ if (send)
+ link_send(buf, dest, dest);
+ else
+ buf_discard(buf);
+ } else {
+ warn("Memory squeeze: broadcast of lost route failed\n");
+ }
+}
+
+void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
+{
+ struct sk_buff *buf;
+ struct tipc_msg *msg;
+ u32 highest = c_ptr->highest_node;
+ u32 n_num;
+ int send = 0;
+
+ if (in_own_cluster(c_ptr->addr))
+ return;
+ assert(!is_slave(dest));
+ assert(in_own_cluster(dest));
+ highest = c_ptr->highest_node;
+ buf = cluster_prepare_routing_msg(highest + 1, c_ptr->addr);
+ if (buf) {
+ msg = buf_msg(buf);
+ msg_set_remote_node(msg, c_ptr->addr);
+ msg_set_type(msg, EXT_ROUTING_TABLE);
+ for (n_num = 1; n_num <= highest; n_num++) {
+ if (c_ptr->nodes[n_num] &&
+ node_has_active_links(c_ptr->nodes[n_num])) {
+ send = 1;
+ msg_set_dataoctet(msg, n_num);
+ }
+ }
+ if (send)
+ link_send(buf, dest, dest);
+ else
+ buf_discard(buf);
+ } else {
+ warn("Memory squeeze: broadcast of external route failed\n");
+ }
+}
+
+void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
+{
+ struct sk_buff *buf;
+ struct tipc_msg *msg;
+ u32 highest = c_ptr->highest_node;
+ u32 n_num;
+ int send = 0;
+
+ assert(is_slave(dest));
+ assert(in_own_cluster(c_ptr->addr));
+ buf = cluster_prepare_routing_msg(highest, c_ptr->addr);
+ if (buf) {
+ msg = buf_msg(buf);
+ msg_set_remote_node(msg, c_ptr->addr);
+ msg_set_type(msg, LOCAL_ROUTING_TABLE);
+ for (n_num = 1; n_num <= highest; n_num++) {
+ if (c_ptr->nodes[n_num] &&
+ node_has_active_links(c_ptr->nodes[n_num])) {
+ send = 1;
+ msg_set_dataoctet(msg, n_num);
+ }
+ }
+ if (send)
+ link_send(buf, dest, dest);
+ else
+ buf_discard(buf);
+ } else {
+ warn("Memory squeeze: broadcast of local route failed\n");
+ }
+}
+
+void cluster_recv_routing_table(struct sk_buff *buf)
+{
+ struct tipc_msg *msg = buf_msg(buf);
+ struct cluster *c_ptr;
+ struct node *n_ptr;
+ unchar *node_table;
+ u32 table_size;
+ u32 router;
+ u32 rem_node = msg_remote_node(msg);
+ u32 z_num;
+ u32 c_num;
+ u32 n_num;
+
+ c_ptr = cluster_find(rem_node);
+ if (!c_ptr) {
+ c_ptr = cluster_create(rem_node);
+ if (!c_ptr) {
+ buf_discard(buf);
+ return;
+ }
+ }
+
+ node_table = buf->data + msg_hdr_sz(msg);
+ table_size = msg_size(msg) - msg_hdr_sz(msg);
+ router = msg_prevnode(msg);
+ z_num = tipc_zone(rem_node);
+ c_num = tipc_cluster(rem_node);
+
+ switch (msg_type(msg)) {
+ case LOCAL_ROUTING_TABLE:
+ assert(is_slave(tipc_own_addr));
+ case EXT_ROUTING_TABLE:
+ for (n_num = 1; n_num < table_size; n_num++) {
+ if (node_table[n_num]) {
+ u32 addr = tipc_addr(z_num, c_num, n_num);
+ n_ptr = c_ptr->nodes[n_num];
+ if (!n_ptr) {
+ n_ptr = node_create(addr);
+ }
+ if (n_ptr)
+ node_add_router(n_ptr, router);
+ }
+ }
+ break;
+ case SLAVE_ROUTING_TABLE:
+ assert(!is_slave(tipc_own_addr));
+ assert(in_own_cluster(c_ptr->addr));
+ for (n_num = 1; n_num < table_size; n_num++) {
+ if (node_table[n_num]) {
+ u32 slave_num = n_num + LOWEST_SLAVE;
+ u32 addr = tipc_addr(z_num, c_num, slave_num);
+ n_ptr = c_ptr->nodes[slave_num];
+ if (!n_ptr) {
+ n_ptr = node_create(addr);
+ }
+ if (n_ptr)
+ node_add_router(n_ptr, router);
+ }
+ }
+ break;
+ case ROUTE_ADDITION:
+ if (!is_slave(tipc_own_addr)) {
+ assert(!in_own_cluster(c_ptr->addr)
+ || is_slave(rem_node));
+ } else {
+ assert(in_own_cluster(c_ptr->addr)
+ && !is_slave(rem_node));
+ }
+ n_ptr = c_ptr->nodes[tipc_node(rem_node)];
+ if (!n_ptr)
+ n_ptr = node_create(rem_node);
+ if (n_ptr)
+ node_add_router(n_ptr, router);
+ break;
+ case ROUTE_REMOVAL:
+ if (!is_slave(tipc_own_addr)) {
+ assert(!in_own_cluster(c_ptr->addr)
+ || is_slave(rem_node));
+ } else {
+ assert(in_own_cluster(c_ptr->addr)
+ && !is_slave(rem_node));
+ }
+ n_ptr = c_ptr->nodes[tipc_node(rem_node)];
+ if (n_ptr)
+ node_remove_router(n_ptr, router);
+ break;
+ default:
+ assert(!"Illegal routing manager message received\n");
+ }
+ buf_discard(buf);
+}
+
+void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
+{
+ u32 start_entry;
+ u32 tstop;
+ u32 n_num;
+
+ if (is_slave(router))
+ return; /* Slave nodes can not be routers */
+
+ if (in_own_cluster(c_ptr->addr)) {
+ start_entry = LOWEST_SLAVE;
+ tstop = c_ptr->highest_slave;
+ } else {
+ start_entry = 1;
+ tstop = c_ptr->highest_node;
+ }
+
+ for (n_num = start_entry; n_num <= tstop; n_num++) {
+ if (c_ptr->nodes[n_num]) {
+ node_remove_router(c_ptr->nodes[n_num], router);
+ }
+ }
+}
+
+/**
+ * cluster_multicast - multicast message to local nodes
+ */
+
+void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
+ u32 lower, u32 upper)
+{
+ struct sk_buff *buf_copy;
+ struct node *n_ptr;
+ u32 n_num;
+ u32 tstop;
+
+ assert(lower <= upper);
+ assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
+ ((lower >= LOWEST_SLAVE) && (lower <= highest_allowed_slave)));
+ assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
+ ((upper >= LOWEST_SLAVE) && (upper <= highest_allowed_slave)));
+ assert(in_own_cluster(c_ptr->addr));
+
+ tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
+ if (tstop > upper)
+ tstop = upper;
+ for (n_num = lower; n_num <= tstop; n_num++) {
+ n_ptr = c_ptr->nodes[n_num];
+ if (n_ptr && node_has_active_links(n_ptr)) {
+ buf_copy = skb_copy(buf, GFP_ATOMIC);
+ if (buf_copy == NULL)
+ break;
+ msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
+ link_send(buf_copy, n_ptr->addr, n_ptr->addr);
+ }
+ }
+ buf_discard(buf);
+}
+
+/**
+ * cluster_broadcast - broadcast message to all nodes within cluster
+ */
+
+void cluster_broadcast(struct sk_buff *buf)
+{
+ struct sk_buff *buf_copy;
+ struct cluster *c_ptr;
+ struct node *n_ptr;
+ u32 n_num;
+ u32 tstart;
+ u32 tstop;
+ u32 node_type;
+
+ if (tipc_mode == TIPC_NET_MODE) {
+ c_ptr = cluster_find(tipc_own_addr);
+ assert(in_own_cluster(c_ptr->addr)); /* For now */
+
+ /* Send to standard nodes, then repeat loop sending to slaves */
+ tstart = 1;
+ tstop = c_ptr->highest_node;
+ for (node_type = 1; node_type <= 2; node_type++) {
+ for (n_num = tstart; n_num <= tstop; n_num++) {
+ n_ptr = c_ptr->nodes[n_num];
+ if (n_ptr && node_has_active_links(n_ptr)) {
+ buf_copy = skb_copy(buf, GFP_ATOMIC);
+ if (buf_copy == NULL)
+ goto exit;
+ msg_set_destnode(buf_msg(buf_copy),
+ n_ptr->addr);
+ link_send(buf_copy, n_ptr->addr,
+ n_ptr->addr);
+ }
+ }
+ tstart = LOWEST_SLAVE;
+ tstop = c_ptr->highest_slave;
+ }
+ }
+exit:
+ buf_discard(buf);
+}
+
+int cluster_init(void)
+{
+ highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
+ return cluster_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;
+}
+
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
new file mode 100644
index 00000000000..1ffb095991d
--- /dev/null
+++ b/net/tipc/cluster.h
@@ -0,0 +1,92 @@
+/*
+ * net/tipc/cluster.h: Include file for TIPC cluster management routines
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CLUSTER_H
+#define _TIPC_CLUSTER_H
+
+#include "addr.h"
+#include "zone.h"
+
+#define LOWEST_SLAVE 2048u
+
+/**
+ * struct cluster - TIPC cluster structure
+ * @addr: network address of cluster
+ * @owner: pointer to zone that cluster belongs to
+ * @nodes: array of pointers to all nodes within cluster
+ * @highest_node: id of highest numbered node within cluster
+ * @highest_slave: (used for secondary node support)
+ */
+
+struct cluster {
+ u32 addr;
+ struct _zone *owner;
+ struct node **nodes;
+ u32 highest_node;
+ u32 highest_slave;
+};
+
+
+extern struct node **local_nodes;
+extern u32 highest_allowed_slave;
+extern struct node_map cluster_bcast_nodes;
+
+void cluster_remove_as_router(struct cluster *c_ptr, u32 router);
+void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest);
+struct node *cluster_select_node(struct cluster *c_ptr, u32 selector);
+u32 cluster_select_router(struct cluster *c_ptr, u32 ref);
+void cluster_recv_routing_table(struct sk_buff *buf);
+struct cluster *cluster_create(u32 addr);
+void cluster_delete(struct cluster *c_ptr);
+void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr);
+void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest);
+void cluster_broadcast(struct sk_buff *buf);
+int cluster_init(void);
+u32 cluster_next_node(struct cluster *c_ptr, u32 addr);
+void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
+void cluster_send_local_routes(struct cluster *c_ptr, u32 dest);
+void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
+
+static inline struct cluster *cluster_find(u32 addr)
+{
+ struct _zone *z_ptr = zone_find(addr);
+
+ if (z_ptr)
+ return z_ptr->clusters[1];
+ return 0;
+}
+
+#endif
diff --git a/net/tipc/config.c b/net/tipc/config.c
new file mode 100644
index 00000000000..8ddef4fce2c
--- /dev/null
+++ b/net/tipc/config.c
@@ -0,0 +1,718 @@
+/*
+ * net/tipc/config.c: TIPC configuration management code
+ *
+ * Copyright (c) 2002-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "bearer.h"
+#include "port.h"
+#include "link.h"
+#include "zone.h"
+#include "addr.h"
+#include "name_table.h"
+#include "node.h"
+#include "config.h"
+#include "discover.h"
+
+struct subscr_data {
+ char usr_handle[8];
+ u32 domain;
+ u32 port_ref;
+ struct list_head subd_list;
+};
+
+struct manager {
+ u32 user_ref;
+ u32 port_ref;
+ u32 subscr_ref;
+ u32 link_subscriptions;
+ struct list_head link_subscribers;
+};
+
+static struct manager mng = { 0};
+
+static spinlock_t config_lock = SPIN_LOCK_UNLOCKED;
+
+static const void *req_tlv_area; /* request message TLV area */
+static int req_tlv_space; /* request message TLV area size */
+static int rep_headroom; /* reply message headroom to use */
+
+
+void cfg_link_event(u32 addr, char *name, int up)
+{
+ /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */
+}
+
+
+struct sk_buff *cfg_reply_alloc(int payload_size)
+{
+ struct sk_buff *buf;
+
+ buf = alloc_skb(rep_headroom + payload_size, GFP_ATOMIC);
+ if (buf)
+ skb_reserve(buf, rep_headroom);
+ return buf;
+}
+
+int cfg_append_tlv(struct sk_buff *buf, int tlv_type,
+ void *tlv_data, int tlv_data_size)
+{
+ struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
+ int new_tlv_space = TLV_SPACE(tlv_data_size);
+
+ if (skb_tailroom(buf) < new_tlv_space) {
+ dbg("cfg_append_tlv unable to append TLV\n");
+ return 0;
+ }
+ skb_put(buf, new_tlv_space);
+ tlv->tlv_type = htons(tlv_type);
+ tlv->tlv_len = htons(TLV_LENGTH(tlv_data_size));
+ if (tlv_data_size && tlv_data)
+ memcpy(TLV_DATA(tlv), tlv_data, tlv_data_size);
+ return 1;
+}
+
+struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value)
+{
+ struct sk_buff *buf;
+ u32 value_net;
+
+ buf = cfg_reply_alloc(TLV_SPACE(sizeof(value)));
+ if (buf) {
+ value_net = htonl(value);
+ cfg_append_tlv(buf, tlv_type, &value_net,
+ sizeof(value_net));
+ }
+ return buf;
+}
+
+struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string)
+{
+ struct sk_buff *buf;
+ int string_len = strlen(string) + 1;
+
+ buf = cfg_reply_alloc(TLV_SPACE(string_len));
+ if (buf)
+ cfg_append_tlv(buf, tlv_type, string, string_len);
+ return buf;
+}
+
+
+
+
+#if 0
+
+/* Now obsolete code for handling commands not yet implemented the new way */
+
+int tipc_cfg_cmd(const struct tipc_cmd_msg * msg,
+ char *data,
+ u32 sz,
+ u32 *ret_size,
+ struct tipc_portid *orig)
+{
+ int rv = -EINVAL;
+ u32 cmd = msg->cmd;
+
+ *ret_size = 0;
+ switch (cmd) {
+ case TIPC_REMOVE_LINK:
+ case TIPC_CMD_BLOCK_LINK:
+ case TIPC_CMD_UNBLOCK_LINK:
+ if (!cfg_check_connection(orig))
+ rv = link_control(msg->argv.link_name, msg->cmd, 0);
+ break;
+ case TIPC_ESTABLISH:
+ {
+ int connected;
+
+ tipc_isconnected(mng.conn_port_ref, &connected);
+ if (connected || !orig) {
+ rv = TIPC_FAILURE;
+ break;
+ }
+ rv = tipc_connect2port(mng.conn_port_ref, orig);
+ if (rv == TIPC_OK)
+ orig = 0;
+ break;
+ }
+ case TIPC_GET_PEER_ADDRESS:
+ *ret_size = link_peer_addr(msg->argv.link_name, data, sz);
+ break;
+ case TIPC_GET_ROUTES:
+ rv = TIPC_OK;
+ break;
+ default: {}
+ }
+ if (*ret_size)
+ rv = TIPC_OK;
+ return rv;
+}
+
+static void cfg_cmd_event(struct tipc_cmd_msg *msg,
+ char *data,
+ u32 sz,
+ struct tipc_portid const *orig)
+{
+ int rv = -EINVAL;
+ struct tipc_cmd_result_msg rmsg;
+ struct iovec msg_sect[2];
+ int *arg;
+
+ msg->cmd = ntohl(msg->cmd);
+
+ cfg_prepare_res_msg(msg->cmd, msg->usr_handle, rv, &rmsg, msg_sect,
+ data, 0);
+ if (ntohl(msg->magic) != TIPC_MAGIC)
+ goto exit;
+
+ switch (msg->cmd) {
+ case TIPC_CREATE_LINK:
+ if (!cfg_check_connection(orig))
+ rv = disc_create_link(&msg->argv.create_link);
+ break;
+ case TIPC_LINK_SUBSCRIBE:
+ {
+ struct subscr_data *sub;
+
+ if (mng.link_subscriptions > 64)
+ break;
+ sub = (struct subscr_data *)kmalloc(sizeof(*sub),
+ GFP_ATOMIC);
+ if (sub == NULL) {
+ warn("Memory squeeze; dropped remote link subscription\n");
+ break;
+ }
+ INIT_LIST_HEAD(&sub->subd_list);
+ tipc_createport(mng.user_ref,
+ (void *)sub,
+ TIPC_HIGH_IMPORTANCE,
+ 0,
+ 0,
+ (tipc_conn_shutdown_event)cfg_linksubscr_cancel,
+ 0,
+ 0,
+ (tipc_conn_msg_event)cfg_linksubscr_cancel,
+ 0,
+ &sub->port_ref);
+ if (!sub->port_ref) {
+ kfree(sub);
+ break;
+ }
+ memcpy(sub->usr_handle,msg->usr_handle,
+ sizeof(sub->usr_handle));
+ sub->domain = msg->argv.domain;
+ list_add_tail(&sub->subd_list, &mng.link_subscribers);
+ tipc_connect2port(sub->port_ref, orig);
+ rmsg.retval = TIPC_OK;
+ tipc_send(sub->port_ref, 2u, msg_sect);
+ mng.link_subscriptions++;
+ return;
+ }
+ default:
+ rv = tipc_cfg_cmd(msg, data, sz, (u32 *)&msg_sect[1].iov_len, orig);
+ }
+ exit:
+ rmsg.result_len = htonl(msg_sect[1].iov_len);
+ rmsg.retval = htonl(rv);
+ cfg_respond(msg_sect, 2u, orig);
+}
+#endif
+
+static struct sk_buff *cfg_enable_bearer(void)
+{
+ struct tipc_bearer_config *args;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
+ if (tipc_enable_bearer(args->name,
+ ntohl(args->detect_scope),
+ ntohl(args->priority)))
+ return cfg_reply_error_string("unable to enable bearer");
+
+ return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_disable_bearer(void)
+{
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
+ return cfg_reply_error_string("unable to disable bearer");
+
+ return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_own_addr(void)
+{
+ u32 addr;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ addr = *(u32 *)TLV_DATA(req_tlv_area);
+ addr = ntohl(addr);
+ if (addr == tipc_own_addr)
+ return cfg_reply_none();
+ if (!addr_node_valid(addr))
+ return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+ " (node address)");
+ if (tipc_own_addr)
+ return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (cannot change node address once assigned)");
+
+ spin_unlock_bh(&config_lock);
+ stop_net();
+ tipc_own_addr = addr;
+ start_net();
+ spin_lock_bh(&config_lock);
+ return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_remote_mng(void)
+{
+ u32 value;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ value = *(u32 *)TLV_DATA(req_tlv_area);
+ value = ntohl(value);
+ tipc_remote_management = (value != 0);
+ return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_publications(void)
+{
+ u32 value;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ value = *(u32 *)TLV_DATA(req_tlv_area);
+ value = ntohl(value);
+ if (value != delimit(value, 1, 65535))
+ return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+ " (max publications must be 1-65535)");
+ tipc_max_publications = value;
+ return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_subscriptions(void)
+{
+ u32 value;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ value = *(u32 *)TLV_DATA(req_tlv_area);
+ value = ntohl(value);
+ if (value != delimit(value, 1, 65535))
+ return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+ " (max subscriptions must be 1-65535");
+ tipc_max_subscriptions = value;
+ return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_ports(void)
+{
+ int orig_mode;
+ u32 value;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+ value = *(u32 *)TLV_DATA(req_tlv_area);
+ value = ntohl(value);
+ if (value != delimit(value, 127, 65535))
+ return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+ " (max ports must be 127-65535)");
+
+ if (value == tipc_max_ports)
+ return cfg_reply_none();
+
+ if (atomic_read(&tipc_user_count) > 2)
+ return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (cannot change max ports while TIPC users exist)");
+
+ spin_unlock_bh(&config_lock);
+ orig_mode = tipc_get_mode();
+ if (orig_mode == TIPC_NET_MODE)
+ stop_net();
+ stop_core();
+ tipc_max_ports = value;
+ start_core();
+ if (orig_mode == TIPC_NET_MODE)
+ start_net();
+ spin_lock_bh(&config_lock);
+ return cfg_reply_none();
+}
+
+static struct sk_buff *set_net_max(int value, int *parameter)
+{
+ int orig_mode;
+
+ if (value != *parameter) {
+ orig_mode = tipc_get_mode();
+ if (orig_mode == TIPC_NET_MODE)
+ stop_net();
+ *parameter = value;
+ if (orig_mode == TIPC_NET_MODE)
+ start_net();
+ }
+
+ return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_zones(void)
+{
+ u32 value;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+ value = *(u32 *)TLV_DATA(req_tlv_area);
+ value = ntohl(value);
+ if (value != delimit(value, 1, 255))
+ return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+ " (max zones must be 1-255)");
+ return set_net_max(value, &tipc_max_zones);
+}
+
+static struct sk_buff *cfg_set_max_clusters(void)
+{
+ u32 value;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+ value = *(u32 *)TLV_DATA(req_tlv_area);
+ value = ntohl(value);
+ if (value != 1)
+ return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (max clusters fixed at 1)");
+ return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_nodes(void)
+{
+ u32 value;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+ value = *(u32 *)TLV_DATA(req_tlv_area);
+ value = ntohl(value);
+ if (value != delimit(value, 8, 2047))
+ return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+ " (max nodes must be 8-2047)");
+ return set_net_max(value, &tipc_max_nodes);
+}
+
+static struct sk_buff *cfg_set_max_slaves(void)
+{
+ u32 value;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+ value = *(u32 *)TLV_DATA(req_tlv_area);
+ value = ntohl(value);
+ if (value != 0)
+ return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (max secondary nodes fixed at 0)");
+ return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_netid(void)
+{
+ u32 value;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+ value = *(u32 *)TLV_DATA(req_tlv_area);
+ value = ntohl(value);
+ if (value != delimit(value, 1, 9999))
+ return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+ " (network id must be 1-9999)");
+
+ if (tipc_own_addr)
+ return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (cannot change network id once part of network)");
+
+ return set_net_max(value, &tipc_net_id);
+}
+
+struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
+ int request_space, int reply_headroom)
+{
+ struct sk_buff *rep_tlv_buf;
+
+ spin_lock_bh(&config_lock);
+
+ /* Save request and reply details in a well-known location */
+
+ req_tlv_area = request_area;
+ req_tlv_space = request_space;
+ rep_headroom = reply_headroom;
+
+ /* Check command authorization */
+
+ if (likely(orig_node == tipc_own_addr)) {
+ /* command is permitted */
+ } else if (cmd >= 0x8000) {
+ rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (cannot be done remotely)");
+ goto exit;
+ } else if (!tipc_remote_management) {
+ rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
+ goto exit;
+ }
+ else if (cmd >= 0x4000) {
+ u32 domain = 0;
+
+ if ((nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
+ (domain != orig_node)) {
+ rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
+ goto exit;
+ }
+ }
+
+ /* Call appropriate processing routine */
+
+ switch (cmd) {
+ case TIPC_CMD_NOOP:
+ rep_tlv_buf = cfg_reply_none();
+ break;
+ case TIPC_CMD_GET_NODES:
+ rep_tlv_buf = node_get_nodes(req_tlv_area, req_tlv_space);
+ break;
+ case TIPC_CMD_GET_LINKS:
+ rep_tlv_buf = node_get_links(req_tlv_area, req_tlv_space);
+ break;
+ case TIPC_CMD_SHOW_LINK_STATS:
+ rep_tlv_buf = link_cmd_show_stats(req_tlv_area, req_tlv_space);
+ break;
+ case TIPC_CMD_RESET_LINK_STATS:
+ rep_tlv_buf = link_cmd_reset_stats(req_tlv_area, req_tlv_space);
+ break;
+ case TIPC_CMD_SHOW_NAME_TABLE:
+ rep_tlv_buf = nametbl_get(req_tlv_area, req_tlv_space);
+ break;
+ case TIPC_CMD_GET_BEARER_NAMES:
+ rep_tlv_buf = bearer_get_names();
+ break;
+ case TIPC_CMD_GET_MEDIA_NAMES:
+ rep_tlv_buf = media_get_names();
+ break;
+ case TIPC_CMD_SHOW_PORTS:
+ rep_tlv_buf = port_get_ports();
+ break;
+#if 0
+ case TIPC_CMD_SHOW_PORT_STATS:
+ rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space);
+ break;
+ case TIPC_CMD_RESET_PORT_STATS:
+ rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED);
+ break;
+#endif
+ case TIPC_CMD_SET_LOG_SIZE:
+ rep_tlv_buf = log_resize(req_tlv_area, req_tlv_space);
+ break;
+ case TIPC_CMD_DUMP_LOG:
+ rep_tlv_buf = log_dump();
+ break;
+ case TIPC_CMD_SET_LINK_TOL:
+ case TIPC_CMD_SET_LINK_PRI:
+ case TIPC_CMD_SET_LINK_WINDOW:
+ rep_tlv_buf = link_cmd_config(req_tlv_area, req_tlv_space, cmd);
+ break;
+ case TIPC_CMD_ENABLE_BEARER:
+ rep_tlv_buf = cfg_enable_bearer();
+ break;
+ case TIPC_CMD_DISABLE_BEARER:
+ rep_tlv_buf = cfg_disable_bearer();
+ break;
+ case TIPC_CMD_SET_NODE_ADDR:
+ rep_tlv_buf = cfg_set_own_addr();
+ break;
+ case TIPC_CMD_SET_REMOTE_MNG:
+ rep_tlv_buf = cfg_set_remote_mng();
+ break;
+ case TIPC_CMD_SET_MAX_PORTS:
+ rep_tlv_buf = cfg_set_max_ports();
+ break;
+ case TIPC_CMD_SET_MAX_PUBL:
+ rep_tlv_buf = cfg_set_max_publications();
+ break;
+ case TIPC_CMD_SET_MAX_SUBSCR:
+ rep_tlv_buf = cfg_set_max_subscriptions();
+ break;
+ case TIPC_CMD_SET_MAX_ZONES:
+ rep_tlv_buf = cfg_set_max_zones();
+ break;
+ case TIPC_CMD_SET_MAX_CLUSTERS:
+ rep_tlv_buf = cfg_set_max_clusters();
+ break;
+ case TIPC_CMD_SET_MAX_NODES:
+ rep_tlv_buf = cfg_set_max_nodes();
+ break;
+ case TIPC_CMD_SET_MAX_SLAVES:
+ rep_tlv_buf = cfg_set_max_slaves();
+ break;
+ case TIPC_CMD_SET_NETID:
+ rep_tlv_buf = cfg_set_netid();
+ break;
+ case TIPC_CMD_GET_REMOTE_MNG:
+ rep_tlv_buf = cfg_reply_unsigned(tipc_remote_management);
+ break;
+ case TIPC_CMD_GET_MAX_PORTS:
+ rep_tlv_buf = cfg_reply_unsigned(tipc_max_ports);
+ break;
+ case TIPC_CMD_GET_MAX_PUBL:
+ rep_tlv_buf = cfg_reply_unsigned(tipc_max_publications);
+ break;
+ case TIPC_CMD_GET_MAX_SUBSCR:
+ rep_tlv_buf = cfg_reply_unsigned(tipc_max_subscriptions);
+ break;
+ case TIPC_CMD_GET_MAX_ZONES:
+ rep_tlv_buf = cfg_reply_unsigned(tipc_max_zones);
+ break;
+ case TIPC_CMD_GET_MAX_CLUSTERS:
+ rep_tlv_buf = cfg_reply_unsigned(tipc_max_clusters);
+ break;
+ case TIPC_CMD_GET_MAX_NODES:
+ rep_tlv_buf = cfg_reply_unsigned(tipc_max_nodes);
+ break;
+ case TIPC_CMD_GET_MAX_SLAVES:
+ rep_tlv_buf = cfg_reply_unsigned(tipc_max_slaves);
+ break;
+ case TIPC_CMD_GET_NETID:
+ rep_tlv_buf = cfg_reply_unsigned(tipc_net_id);
+ break;
+ default:
+ rep_tlv_buf = NULL;
+ break;
+ }
+
+ /* Return reply buffer */
+exit:
+ spin_unlock_bh(&config_lock);
+ return rep_tlv_buf;
+}
+
+static void cfg_named_msg_event(void *userdata,
+ u32 port_ref,
+ struct sk_buff **buf,
+ const unchar *msg,
+ u32 size,
+ u32 importance,
+ struct tipc_portid const *orig,
+ struct tipc_name_seq const *dest)
+{
+ struct tipc_cfg_msg_hdr *req_hdr;
+ struct tipc_cfg_msg_hdr *rep_hdr;
+ struct sk_buff *rep_buf;
+
+ /* Validate configuration message header (ignore invalid message) */
+
+ req_hdr = (struct tipc_cfg_msg_hdr *)msg;
+ if ((size < sizeof(*req_hdr)) ||
+ (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
+ (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
+ warn("discarded invalid configuration message\n");
+ return;
+ }
+
+ /* Generate reply for request (if can't, return request) */
+
+ rep_buf = cfg_do_cmd(orig->node,
+ ntohs(req_hdr->tcm_type),
+ msg + sizeof(*req_hdr),
+ size - sizeof(*req_hdr),
+ BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
+ if (rep_buf) {
+ skb_push(rep_buf, sizeof(*rep_hdr));
+ rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data;
+ memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
+ rep_hdr->tcm_len = htonl(rep_buf->len);
+ rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
+ } else {
+ rep_buf = *buf;
+ *buf = NULL;
+ }
+
+ /* NEED TO ADD CODE TO HANDLE FAILED SEND (SUCH AS CONGESTION) */
+ tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len);
+}
+
+int cfg_init(void)
+{
+ struct tipc_name_seq seq;
+ int res;
+
+ memset(&mng, 0, sizeof(mng));
+ INIT_LIST_HEAD(&mng.link_subscribers);
+
+ res = tipc_attach(&mng.user_ref, 0, 0);
+ if (res)
+ goto failed;
+
+ res = tipc_createport(mng.user_ref, 0, TIPC_CRITICAL_IMPORTANCE,
+ NULL, NULL, NULL,
+ NULL, cfg_named_msg_event, NULL,
+ NULL, &mng.port_ref);
+ if (res)
+ goto failed;
+
+ seq.type = TIPC_CFG_SRV;
+ seq.lower = seq.upper = tipc_own_addr;
+ res = nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
+ if (res)
+ goto failed;
+
+ return 0;
+
+failed:
+ err("Unable to create configuration service\n");
+ tipc_detach(mng.user_ref);
+ mng.user_ref = 0;
+ return res;
+}
+
+void cfg_stop(void)
+{
+ if (mng.user_ref) {
+ tipc_detach(mng.user_ref);
+ mng.user_ref = 0;
+ }
+}
diff --git a/net/tipc/config.h b/net/tipc/config.h
new file mode 100644
index 00000000000..646377d4045
--- /dev/null
+++ b/net/tipc/config.h
@@ -0,0 +1,80 @@
+/*
+ * net/tipc/config.h: Include file for TIPC configuration service code
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CONFIG_H
+#define _TIPC_CONFIG_H
+
+/* ---------------------------------------------------------------------- */
+
+#include <linux/tipc.h>
+#include <linux/tipc_config.h>
+#include "link.h"
+
+struct sk_buff *cfg_reply_alloc(int payload_size);
+int cfg_append_tlv(struct sk_buff *buf, int tlv_type,
+ void *tlv_data, int tlv_data_size);
+struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value);
+struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string);
+
+static inline struct sk_buff *cfg_reply_none(void)
+{
+ return cfg_reply_alloc(0);
+}
+
+static inline struct sk_buff *cfg_reply_unsigned(u32 value)
+{
+ return cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
+}
+
+static inline struct sk_buff *cfg_reply_error_string(char *string)
+{
+ return cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
+}
+
+static inline struct sk_buff *cfg_reply_ultra_string(char *string)
+{
+ return cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
+}
+
+struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd,
+ const void *req_tlv_area, int req_tlv_space,
+ int headroom);
+
+void cfg_link_event(u32 addr, char *name, int up);
+int cfg_init(void);
+void cfg_stop(void);
+
+#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
new file mode 100644
index 00000000000..e83ac06e31b
--- /dev/null
+++ b/net/tipc/core.c
@@ -0,0 +1,285 @@
+/*
+ * net/tipc/core.c: TIPC module code
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/random.h>
+
+#include "core.h"
+#include "dbg.h"
+#include "ref.h"
+#include "net.h"
+#include "user_reg.h"
+#include "name_table.h"
+#include "subscr.h"
+#include "config.h"
+
+int eth_media_start(void);
+void eth_media_stop(void);
+int handler_start(void);
+void handler_stop(void);
+int socket_init(void);
+void socket_stop(void);
+int netlink_start(void);
+void netlink_stop(void);
+
+#define MOD_NAME "tipc_start: "
+
+#ifndef CONFIG_TIPC_ZONES
+#define CONFIG_TIPC_ZONES 3
+#endif
+
+#ifndef CONFIG_TIPC_CLUSTERS
+#define CONFIG_TIPC_CLUSTERS 1
+#endif
+
+#ifndef CONFIG_TIPC_NODES
+#define CONFIG_TIPC_NODES 255
+#endif
+
+#ifndef CONFIG_TIPC_SLAVE_NODES
+#define CONFIG_TIPC_SLAVE_NODES 0
+#endif
+
+#ifndef CONFIG_TIPC_PORTS
+#define CONFIG_TIPC_PORTS 8191
+#endif
+
+#ifndef CONFIG_TIPC_LOG
+#define CONFIG_TIPC_LOG 0
+#endif
+
+/* global variables used by multiple sub-systems within TIPC */
+
+int tipc_mode = TIPC_NOT_RUNNING;
+int tipc_random;
+atomic_t tipc_user_count = ATOMIC_INIT(0);
+
+const char tipc_alphabet[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_";
+
+/* configurable TIPC parameters */
+
+u32 tipc_own_addr;
+int tipc_max_zones;
+int tipc_max_clusters;
+int tipc_max_nodes;
+int tipc_max_slaves;
+int tipc_max_ports;
+int tipc_max_subscriptions;
+int tipc_max_publications;
+int tipc_net_id;
+int tipc_remote_management;
+
+
+int tipc_get_mode(void)
+{
+ return tipc_mode;
+}
+
+/**
+ * stop_net - shut down TIPC networking sub-systems
+ */
+
+void stop_net(void)
+{
+ eth_media_stop();
+ tipc_stop_net();
+}
+
+/**
+ * start_net - start TIPC networking sub-systems
+ */
+
+int start_net(void)
+{
+ int res;
+
+ if ((res = tipc_start_net()) ||
+ (res = eth_media_start())) {
+ stop_net();
+ }
+ return res;
+}
+
+/**
+ * stop_core - switch TIPC from SINGLE NODE to NOT RUNNING mode
+ */
+
+void stop_core(void)
+{
+ if (tipc_mode != TIPC_NODE_MODE)
+ return;
+
+ tipc_mode = TIPC_NOT_RUNNING;
+
+ netlink_stop();
+ handler_stop();
+ cfg_stop();
+ subscr_stop();
+ reg_stop();
+ nametbl_stop();
+ ref_table_stop();
+ socket_stop();
+}
+
+/**
+ * start_core - switch TIPC from NOT RUNNING to SINGLE NODE mode
+ */
+
+int start_core(void)
+{
+ int res;
+
+ if (tipc_mode != TIPC_NOT_RUNNING)
+ return -ENOPROTOOPT;
+
+ get_random_bytes(&tipc_random, sizeof(tipc_random));
+ tipc_mode = TIPC_NODE_MODE;
+
+ if ((res = handler_start()) ||
+ (res = ref_table_init(tipc_max_ports + tipc_max_subscriptions,
+ tipc_random)) ||
+ (res = reg_start()) ||
+ (res = nametbl_init()) ||
+ (res = k_signal((Handler)subscr_start, 0)) ||
+ (res = k_signal((Handler)cfg_init, 0)) ||
+ (res = netlink_start()) ||
+ (res = socket_init())) {
+ stop_core();
+ }
+ return res;
+}
+
+
+static int __init tipc_init(void)
+{
+ int res;
+
+ log_reinit(CONFIG_TIPC_LOG);
+ info("Activated (compiled " __DATE__ " " __TIME__ ")\n");
+
+ tipc_own_addr = 0;
+ tipc_remote_management = 1;
+ tipc_max_publications = 10000;
+ tipc_max_subscriptions = 2000;
+ tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
+ tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 511);
+ tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
+ tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
+ tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
+ tipc_net_id = 4711;
+
+ if ((res = start_core()))
+ err("Unable to start in single node mode\n");
+ else
+ info("Started in single node mode\n");
+ return res;
+}
+
+static void __exit tipc_exit(void)
+{
+ stop_net();
+ stop_core();
+ info("Deactivated\n");
+ log_stop();
+}
+
+module_init(tipc_init);
+module_exit(tipc_exit);
+
+MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* Native TIPC API for kernel-space applications (see tipc.h) */
+
+EXPORT_SYMBOL(tipc_attach);
+EXPORT_SYMBOL(tipc_detach);
+EXPORT_SYMBOL(tipc_get_addr);
+EXPORT_SYMBOL(tipc_get_mode);
+EXPORT_SYMBOL(tipc_createport);
+EXPORT_SYMBOL(tipc_deleteport);
+EXPORT_SYMBOL(tipc_ownidentity);
+EXPORT_SYMBOL(tipc_portimportance);
+EXPORT_SYMBOL(tipc_set_portimportance);
+EXPORT_SYMBOL(tipc_portunreliable);
+EXPORT_SYMBOL(tipc_set_portunreliable);
+EXPORT_SYMBOL(tipc_portunreturnable);
+EXPORT_SYMBOL(tipc_set_portunreturnable);
+EXPORT_SYMBOL(tipc_publish);
+EXPORT_SYMBOL(tipc_withdraw);
+EXPORT_SYMBOL(tipc_connect2port);
+EXPORT_SYMBOL(tipc_disconnect);
+EXPORT_SYMBOL(tipc_shutdown);
+EXPORT_SYMBOL(tipc_isconnected);
+EXPORT_SYMBOL(tipc_peer);
+EXPORT_SYMBOL(tipc_ref_valid);
+EXPORT_SYMBOL(tipc_send);
+EXPORT_SYMBOL(tipc_send_buf);
+EXPORT_SYMBOL(tipc_send2name);
+EXPORT_SYMBOL(tipc_forward2name);
+EXPORT_SYMBOL(tipc_send_buf2name);
+EXPORT_SYMBOL(tipc_forward_buf2name);
+EXPORT_SYMBOL(tipc_send2port);
+EXPORT_SYMBOL(tipc_forward2port);
+EXPORT_SYMBOL(tipc_send_buf2port);
+EXPORT_SYMBOL(tipc_forward_buf2port);
+EXPORT_SYMBOL(tipc_multicast);
+/* EXPORT_SYMBOL(tipc_multicast_buf); not available yet */
+EXPORT_SYMBOL(tipc_ispublished);
+EXPORT_SYMBOL(tipc_available_nodes);
+
+/* TIPC API for external bearers (see tipc_bearer.h) */
+
+EXPORT_SYMBOL(tipc_block_bearer);
+EXPORT_SYMBOL(tipc_continue);
+EXPORT_SYMBOL(tipc_disable_bearer);
+EXPORT_SYMBOL(tipc_enable_bearer);
+EXPORT_SYMBOL(tipc_recv_msg);
+EXPORT_SYMBOL(tipc_register_media);
+
+/* TIPC API for external APIs (see tipc_port.h) */
+
+EXPORT_SYMBOL(tipc_createport_raw);
+EXPORT_SYMBOL(tipc_set_msg_option);
+EXPORT_SYMBOL(tipc_reject_msg);
+EXPORT_SYMBOL(tipc_send_buf_fast);
+EXPORT_SYMBOL(tipc_acknowledge);
+EXPORT_SYMBOL(tipc_get_port);
+EXPORT_SYMBOL(tipc_get_handle);
+
diff --git a/net/tipc/core.h b/net/tipc/core.h
new file mode 100644
index 00000000000..b69b60b2cc8
--- /dev/null
+++ b/net/tipc/core.h
@@ -0,0 +1,316 @@
+/*
+ * net/tipc/core.h: Include file for TIPC global declarations
+ *
+ * Copyright (c) 2005-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CORE_H
+#define _TIPC_CORE_H
+
+#include <net/tipc/tipc.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+#include <linux/interrupt.h>
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+/*
+ * TIPC debugging code
+ */
+
+#define assert(i) BUG_ON(!(i))
+
+struct tipc_msg;
+extern struct print_buf *CONS, *LOG;
+extern struct print_buf *TEE(struct print_buf *, struct print_buf *);
+void msg_print(struct print_buf*,struct tipc_msg *,const char*);
+void tipc_printf(struct print_buf *, const char *fmt, ...);
+void tipc_dump(struct print_buf*,const char *fmt, ...);
+
+#ifdef CONFIG_TIPC_DEBUG
+
+/*
+ * TIPC debug support included:
+ * - system messages are printed to TIPC_OUTPUT print buffer
+ * - debug messages are printed to DBG_OUTPUT print buffer
+ */
+
+#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_ERR "TIPC: " fmt, ## arg)
+#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg)
+#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg)
+
+#define dbg(fmt, arg...) do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0)
+#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) msg_print(DBG_OUTPUT, msg, txt);} while(0)
+#define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
+
+
+/*
+ * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer,
+ * while DBG_OUTPUT is the null print buffer. These defaults can be changed
+ * here, or on a per .c file basis, by redefining these symbols. The following
+ * print buffer options are available:
+ *
+ * NULL : Output to null print buffer (i.e. print nowhere)
+ * CONS : Output to system console
+ * LOG : Output to TIPC log buffer
+ * &buf : Output to user-defined buffer (struct print_buf *)
+ * TEE(&buf_a,&buf_b) : Output to two print buffers (eg. TEE(CONS,LOG) )
+ */
+
+#ifndef TIPC_OUTPUT
+#define TIPC_OUTPUT TEE(CONS,LOG)
+#endif
+
+#ifndef DBG_OUTPUT
+#define DBG_OUTPUT NULL
+#endif
+
+#else
+
+#ifndef DBG_OUTPUT
+#define DBG_OUTPUT NULL
+#endif
+
+/*
+ * TIPC debug support not included:
+ * - system messages are printed to system console
+ * - debug messages are not printed
+ */
+
+#define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg)
+#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg)
+#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg)
+
+#define dbg(fmt, arg...) do {} while (0)
+#define msg_dbg(msg,txt) do {} while (0)
+#define dump(fmt,arg...) do {} while (0)
+
+#endif
+
+
+/*
+ * TIPC-specific error codes
+ */
+
+#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
+
+/*
+ * Global configuration variables
+ */
+
+extern u32 tipc_own_addr;
+extern int tipc_max_zones;
+extern int tipc_max_clusters;
+extern int tipc_max_nodes;
+extern int tipc_max_slaves;
+extern int tipc_max_ports;
+extern int tipc_max_subscriptions;
+extern int tipc_max_publications;
+extern int tipc_net_id;
+extern int tipc_remote_management;
+
+/*
+ * Other global variables
+ */
+
+extern int tipc_mode;
+extern int tipc_random;
+extern const char tipc_alphabet[];
+extern atomic_t tipc_user_count;
+
+
+/*
+ * Routines available to privileged subsystems
+ */
+
+extern int start_core(void);
+extern void stop_core(void);
+extern int start_net(void);
+extern void stop_net(void);
+
+static inline int delimit(int val, int min, int max)
+{
+ if (val > max)
+ return max;
+ if (val < min)
+ return min;
+ return val;
+}
+
+
+/*
+ * TIPC timer and signal code
+ */
+
+typedef void (*Handler) (unsigned long);
+
+u32 k_signal(Handler routine, unsigned long argument);
+
+/**
+ * k_init_timer - initialize a timer
+ * @timer: pointer to timer structure
+ * @routine: pointer to routine to invoke when timer expires
+ * @argument: value to pass to routine when timer expires
+ *
+ * Timer must be initialized before use (and terminated when no longer needed).
+ */
+
+static inline void k_init_timer(struct timer_list *timer, Handler routine,
+ unsigned long argument)
+{
+ dbg("initializing timer %p\n", timer);
+ init_timer(timer);
+ timer->function = routine;
+ timer->data = argument;
+}
+
+/**
+ * k_start_timer - start a timer
+ * @timer: pointer to timer structure
+ * @msec: time to delay (in ms)
+ *
+ * Schedules a previously initialized timer for later execution.
+ * If timer is already running, the new timeout overrides the previous request.
+ *
+ * To ensure the timer doesn't expire before the specified delay elapses,
+ * the amount of delay is rounded up when converting to the jiffies
+ * then an additional jiffy is added to account for the fact that
+ * the starting time may be in the middle of the current jiffy.
+ */
+
+static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
+{
+ dbg("starting timer %p for %u\n", timer, msec);
+ mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
+}
+
+/**
+ * k_cancel_timer - cancel a timer
+ * @timer: pointer to timer structure
+ *
+ * Cancels a previously initialized timer.
+ * Can be called safely even if the timer is already inactive.
+ *
+ * WARNING: Must not be called when holding locks required by the timer's
+ * timeout routine, otherwise deadlock can occur on SMP systems!
+ */
+
+static inline void k_cancel_timer(struct timer_list *timer)
+{
+ dbg("cancelling timer %p\n", timer);
+ del_timer_sync(timer);
+}
+
+/**
+ * k_term_timer - terminate a timer
+ * @timer: pointer to timer structure
+ *
+ * Prevents further use of a previously initialized timer.
+ *
+ * WARNING: Caller must ensure timer isn't currently running.
+ *
+ * (Do not "enhance" this routine to automatically cancel an active timer,
+ * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
+ */
+
+static inline void k_term_timer(struct timer_list *timer)
+{
+ dbg("terminating timer %p\n", timer);
+}
+
+
+/*
+ * TIPC message buffer code
+ *
+ * TIPC message buffer headroom leaves room for 14 byte Ethernet header,
+ * while ensuring TIPC header is word aligned for quicker access
+ */
+
+#define BUF_HEADROOM 16u
+
+struct tipc_skb_cb {
+ void *handle;
+};
+
+#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
+
+
+static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
+{
+ return (struct tipc_msg *)skb->data;
+}
+
+/**
+ * buf_acquire - creates a TIPC message buffer
+ * @size: message size (including TIPC header)
+ *
+ * Returns a new buffer. Space is reserved for a data link header.
+ */
+
+static inline struct sk_buff *buf_acquire(u32 size)
+{
+ struct sk_buff *skb;
+ unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
+
+ skb = alloc_skb(buf_size, GFP_ATOMIC);
+ if (skb) {
+ skb_reserve(skb, BUF_HEADROOM);
+ skb_put(skb, size);
+ skb->next = NULL;
+ }
+ return skb;
+}
+
+/**
+ * buf_discard - frees a TIPC message buffer
+ * @skb: message buffer
+ *
+ * Frees a new buffer. If passed NULL, just returns.
+ */
+
+static inline void buf_discard(struct sk_buff *skb)
+{
+ if (likely(skb != NULL))
+ kfree_skb(skb);
+}
+
+#endif
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
new file mode 100644
index 00000000000..7ed60a1cfbb
--- /dev/null
+++ b/net/tipc/dbg.c
@@ -0,0 +1,395 @@
+/*
+ * net/tipc/dbg.c: TIPC print buffer routines for debuggign
+ *
+ * Copyright (c) 1996-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+
+#define MAX_STRING 512
+
+static char print_string[MAX_STRING];
+static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
+
+static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
+struct print_buf *CONS = &cons_buf;
+
+static struct print_buf log_buf = { NULL, 0, NULL, NULL };
+struct print_buf *LOG = &log_buf;
+
+
+#define FORMAT(PTR,LEN,FMT) \
+{\
+ va_list args;\
+ va_start(args, FMT);\
+ LEN = vsprintf(PTR, FMT, args);\
+ va_end(args);\
+ *(PTR + LEN) = '\0';\
+}
+
+/*
+ * Locking policy when using print buffers.
+ *
+ * 1) Routines of the form printbuf_XXX() rely on the caller to prevent
+ * simultaneous use of the print buffer(s) being manipulated.
+ * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of
+ * 'print_string' and to protect its print buffer(s).
+ * 3) TEE() uses 'print_lock' to protect its print buffer(s).
+ * 4) Routines of the form log_XXX() uses 'print_lock' to protect LOG.
+ */
+
+/**
+ * printbuf_init - initialize print buffer to empty
+ */
+
+void printbuf_init(struct print_buf *pb, char *raw, u32 sz)
+{
+ if (!pb || !raw || (sz < (MAX_STRING + 1)))
+ return;
+
+ pb->crs = pb->buf = raw;
+ pb->size = sz;
+ pb->next = 0;
+ pb->buf[0] = 0;
+ pb->buf[sz-1] = ~0;
+}
+
+/**
+ * printbuf_reset - reinitialize print buffer to empty state
+ */
+
+void printbuf_reset(struct print_buf *pb)
+{
+ if (pb && pb->buf)
+ printbuf_init(pb, pb->buf, pb->size);
+}
+
+/**
+ * printbuf_empty - test if print buffer is in empty state
+ */
+
+int printbuf_empty(struct print_buf *pb)
+{
+ return (!pb || !pb->buf || (pb->crs == pb->buf));
+}
+
+/**
+ * printbuf_validate - check for print buffer overflow
+ *
+ * Verifies that a print buffer has captured all data written to it.
+ * If data has been lost, linearize buffer and prepend an error message
+ *
+ * Returns length of print buffer data string (including trailing NULL)
+ */
+
+int printbuf_validate(struct print_buf *pb)
+{
+ char *err = " *** PRINT BUFFER WRAPPED AROUND ***\n";
+ char *cp_buf;
+ struct print_buf cb;
+
+ if (!pb || !pb->buf)
+ return 0;
+
+ if (pb->buf[pb->size - 1] == '\0') {
+ cp_buf = kmalloc(pb->size, GFP_ATOMIC);
+ if (cp_buf != NULL){
+ printbuf_init(&cb, cp_buf, pb->size);
+ printbuf_move(&cb, pb);
+ printbuf_move(pb, &cb);
+ kfree(cp_buf);
+ memcpy(pb->buf, err, strlen(err));
+ } else {
+ printbuf_reset(pb);
+ tipc_printf(pb, err);
+ }
+ }
+ return (pb->crs - pb->buf + 1);
+}
+
+/**
+ * printbuf_move - move print buffer contents to another print buffer
+ *
+ * Current contents of destination print buffer (if any) are discarded.
+ * Source print buffer becomes empty if a successful move occurs.
+ */
+
+void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
+{
+ int len;
+
+ /* Handle the cases where contents can't be moved */
+
+ if (!pb_to || !pb_to->buf)
+ return;
+
+ if (!pb_from || !pb_from->buf) {
+ printbuf_reset(pb_to);
+ return;
+ }
+
+ if (pb_to->size < pb_from->size) {
+ printbuf_reset(pb_to);
+ tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***");
+ return;
+ }
+
+ /* Copy data from char after cursor to end (if used) */
+ len = pb_from->buf + pb_from->size - pb_from->crs - 2;
+ if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) {
+ strcpy(pb_to->buf, pb_from->crs + 1);
+ pb_to->crs = pb_to->buf + len;
+ } else
+ pb_to->crs = pb_to->buf;
+
+ /* Copy data from start to cursor (always) */
+ len = pb_from->crs - pb_from->buf;
+ strcpy(pb_to->crs, pb_from->buf);
+ pb_to->crs += len;
+
+ printbuf_reset(pb_from);
+}
+
+/**
+ * tipc_printf - append formatted output to print buffer chain
+ */
+
+void tipc_printf(struct print_buf *pb, const char *fmt, ...)
+{
+ int chars_to_add;
+ int chars_left;
+ char save_char;
+ struct print_buf *pb_next;
+
+ spin_lock_bh(&print_lock);
+ FORMAT(print_string, chars_to_add, fmt);
+ if (chars_to_add >= MAX_STRING)
+ strcpy(print_string, "*** STRING TOO LONG ***");
+
+ while (pb) {
+ if (pb == CONS)
+ printk(print_string);
+ else if (pb->buf) {
+ chars_left = pb->buf + pb->size - pb->crs - 1;
+ if (chars_to_add <= chars_left) {
+ strcpy(pb->crs, print_string);
+ pb->crs += chars_to_add;
+ } else {
+ strcpy(pb->buf, print_string + chars_left);
+ save_char = print_string[chars_left];
+ print_string[chars_left] = 0;
+ strcpy(pb->crs, print_string);
+ print_string[chars_left] = save_char;
+ pb->crs = pb->buf + chars_to_add - chars_left;
+ }
+ }
+ pb_next = pb->next;
+ pb->next = 0;
+ pb = pb_next;
+ }
+ spin_unlock_bh(&print_lock);
+}
+
+/**
+ * TEE - perform next output operation on both print buffers
+ */
+
+struct print_buf *TEE(struct print_buf *b0, struct print_buf *b1)
+{
+ struct print_buf *pb = b0;
+
+ if (!b0 || (b0 == b1))
+ return b1;
+ if (!b1)
+ return b0;
+
+ spin_lock_bh(&print_lock);
+ while (pb->next) {
+ if ((pb->next == b1) || (pb->next == b0))
+ pb->next = pb->next->next;
+ else
+ pb = pb->next;
+ }
+ pb->next = b1;
+ spin_unlock_bh(&print_lock);
+ return b0;
+}
+
+/**
+ * print_to_console - write string of bytes to console in multiple chunks
+ */
+
+static void print_to_console(char *crs, int len)
+{
+ int rest = len;
+
+ while (rest > 0) {
+ int sz = rest < MAX_STRING ? rest : MAX_STRING;
+ char c = crs[sz];
+
+ crs[sz] = 0;
+ printk((const char *)crs);
+ crs[sz] = c;
+ rest -= sz;
+ crs += sz;
+ }
+}
+
+/**
+ * printbuf_dump - write print buffer contents to console
+ */
+
+static void printbuf_dump(struct print_buf *pb)
+{
+ int len;
+
+ /* Dump print buffer from char after cursor to end (if used) */
+ len = pb->buf + pb->size - pb->crs - 2;
+ if ((pb->buf[pb->size - 1] == 0) && (len > 0))
+ print_to_console(pb->crs + 1, len);
+
+ /* Dump print buffer from start to cursor (always) */
+ len = pb->crs - pb->buf;
+ print_to_console(pb->buf, len);
+}
+
+/**
+ * tipc_dump - dump non-console print buffer(s) to console
+ */
+
+void tipc_dump(struct print_buf *pb, const char *fmt, ...)
+{
+ int len;
+
+ spin_lock_bh(&print_lock);
+ FORMAT(CONS->buf, len, fmt);
+ printk(CONS->buf);
+
+ for (; pb; pb = pb->next) {
+ if (pb == CONS)
+ continue;
+ printk("\n---- Start of dump,%s log ----\n\n",
+ (pb == LOG) ? "global" : "local");
+ printbuf_dump(pb);
+ printbuf_reset(pb);
+ printk("\n-------- End of dump --------\n");
+ }
+ spin_unlock_bh(&print_lock);
+}
+
+/**
+ * log_stop - free up TIPC log print buffer
+ */
+
+void log_stop(void)
+{
+ spin_lock_bh(&print_lock);
+ if (LOG->buf) {
+ kfree(LOG->buf);
+ LOG->buf = NULL;
+ }
+ spin_unlock_bh(&print_lock);
+}
+
+/**
+ * log_reinit - set TIPC log print buffer to specified size
+ */
+
+void log_reinit(int log_size)
+{
+ log_stop();
+
+ if (log_size) {
+ if (log_size <= MAX_STRING)
+ log_size = MAX_STRING + 1;
+ spin_lock_bh(&print_lock);
+ printbuf_init(LOG, kmalloc(log_size, GFP_ATOMIC), log_size);
+ spin_unlock_bh(&print_lock);
+ }
+}
+
+/**
+ * log_resize - reconfigure size of TIPC log buffer
+ */
+
+struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space)
+{
+ u32 value;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ value = *(u32 *)TLV_DATA(req_tlv_area);
+ value = ntohl(value);
+ if (value != delimit(value, 0, 32768))
+ return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+ " (log size must be 0-32768)");
+ log_reinit(value);
+ return cfg_reply_none();
+}
+
+/**
+ * log_dump - capture TIPC log buffer contents in configuration message
+ */
+
+struct sk_buff *log_dump(void)
+{
+ struct sk_buff *reply;
+
+ spin_lock_bh(&print_lock);
+ if (!LOG->buf)
+ reply = cfg_reply_ultra_string("log not activated\n");
+ else if (printbuf_empty(LOG))
+ reply = cfg_reply_ultra_string("log is empty\n");
+ else {
+ struct tlv_desc *rep_tlv;
+ struct print_buf pb;
+ int str_len;
+
+ str_len = min(LOG->size, 32768u);
+ reply = cfg_reply_alloc(TLV_SPACE(str_len));
+ if (reply) {
+ rep_tlv = (struct tlv_desc *)reply->data;
+ printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
+ printbuf_move(&pb, LOG);
+ str_len = strlen(TLV_DATA(rep_tlv)) + 1;
+ skb_put(reply, TLV_SPACE(str_len));
+ TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+ }
+ }
+ spin_unlock_bh(&print_lock);
+ return reply;
+}
+
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
new file mode 100644
index 00000000000..c6b2a64c224
--- /dev/null
+++ b/net/tipc/dbg.h
@@ -0,0 +1,59 @@
+/*
+ * net/tipc/dbg.h: Include file for TIPC print buffer routines
+ *
+ * Copyright (c) 1997-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_DBG_H
+#define _TIPC_DBG_H
+
+struct print_buf {
+ char *buf;
+ u32 size;
+ char *crs;
+ struct print_buf *next;
+};
+
+void printbuf_init(struct print_buf *pb, char *buf, u32 sz);
+void printbuf_reset(struct print_buf *pb);
+int printbuf_empty(struct print_buf *pb);
+int printbuf_validate(struct print_buf *pb);
+void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
+
+void log_reinit(int log_size);
+void log_stop(void);
+
+struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *log_dump(void);
+
+#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
new file mode 100644
index 00000000000..b106ef1621c
--- /dev/null
+++ b/net/tipc/discover.c
@@ -0,0 +1,318 @@
+/*
+ * net/tipc/discover.c
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "link.h"
+#include "zone.h"
+#include "discover.h"
+#include "port.h"
+#include "name_table.h"
+
+#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */
+#define TIPC_LINK_REQ_FAST 2000 /* normal delay if bearer has no links */
+#define TIPC_LINK_REQ_SLOW 600000 /* normal delay if bearer has links */
+
+#if 0
+#define GET_NODE_INFO 300
+#define GET_NODE_INFO_RESULT 301
+#define FORWARD_LINK_PROBE 302
+#define LINK_REQUEST_REJECTED 303
+#define LINK_REQUEST_ACCEPTED 304
+#define DROP_LINK_REQUEST 305
+#define CHECK_LINK_COUNT 306
+#endif
+
+/*
+ * TODO: Most of the inter-cluster setup stuff should be
+ * rewritten, and be made conformant with specification.
+ */
+
+
+/**
+ * struct link_req - information about an ongoing link setup request
+ * @bearer: bearer issuing requests
+ * @dest: destination address for request messages
+ * @buf: request message to be (repeatedly) sent
+ * @timer: timer governing period between requests
+ * @timer_intv: current interval between requests (in ms)
+ */
+struct link_req {
+ struct bearer *bearer;
+ struct tipc_media_addr dest;
+ struct sk_buff *buf;
+ struct timer_list timer;
+ unsigned int timer_intv;
+};
+
+
+#if 0
+int disc_create_link(const struct tipc_link_create *argv)
+{
+ /*
+ * Code for inter cluster link setup here
+ */
+ return TIPC_OK;
+}
+#endif
+
+/*
+ * disc_lost_link(): A link has lost contact
+ */
+
+void disc_link_event(u32 addr, char *name, int up)
+{
+ if (in_own_cluster(addr))
+ return;
+ /*
+ * Code for inter cluster link setup here
+ */
+}
+
+/**
+ * disc_init_msg - initialize a link setup message
+ * @type: message type (request or response)
+ * @req_links: number of links associated with message
+ * @dest_domain: network domain of node(s) which should respond to message
+ * @b_ptr: ptr to bearer issuing message
+ */
+
+struct sk_buff *disc_init_msg(u32 type,
+ u32 req_links,
+ u32 dest_domain,
+ struct bearer *b_ptr)
+{
+ struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
+ struct tipc_msg *msg;
+
+ if (buf) {
+ msg = buf_msg(buf);
+ msg_init(msg, LINK_CONFIG, type, TIPC_OK, DSC_H_SIZE,
+ dest_domain);
+ msg_set_non_seq(msg);
+ msg_set_req_links(msg, req_links);
+ msg_set_dest_domain(msg, dest_domain);
+ msg_set_bc_netid(msg, tipc_net_id);
+ msg_set_media_addr(msg, &b_ptr->publ.addr);
+ }
+ return buf;
+}
+
+/**
+ * disc_recv_msg - handle incoming link setup message (request or response)
+ * @buf: buffer containing message
+ */
+
+void disc_recv_msg(struct sk_buff *buf)
+{
+ struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
+ struct link *link;
+ struct tipc_media_addr media_addr;
+ struct tipc_msg *msg = buf_msg(buf);
+ u32 dest = msg_dest_domain(msg);
+ u32 orig = msg_prevnode(msg);
+ u32 net_id = msg_bc_netid(msg);
+ u32 type = msg_type(msg);
+
+ msg_get_media_addr(msg,&media_addr);
+ msg_dbg(msg, "RECV:");
+ buf_discard(buf);
+
+ if (net_id != tipc_net_id)
+ return;
+ if (!addr_domain_valid(dest))
+ return;
+ if (!addr_node_valid(orig))
+ return;
+ if (orig == tipc_own_addr)
+ return;
+ if (!in_scope(dest, tipc_own_addr))
+ return;
+ if (is_slave(tipc_own_addr) && is_slave(orig))
+ return;
+ if (is_slave(orig) && !in_own_cluster(orig))
+ return;
+ if (in_own_cluster(orig)) {
+ /* Always accept link here */
+ struct sk_buff *rbuf;
+ struct tipc_media_addr *addr;
+ struct node *n_ptr = node_find(orig);
+ int link_up;
+ dbg(" in own cluster\n");
+ if (n_ptr == NULL) {
+ n_ptr = node_create(orig);
+ }
+ if (n_ptr == NULL) {
+ warn("Memory squeeze; Failed to create node\n");
+ return;
+ }
+ spin_lock_bh(&n_ptr->lock);
+ link = n_ptr->links[b_ptr->identity];
+ if (!link) {
+ dbg("creating link\n");
+ link = link_create(b_ptr, orig, &media_addr);
+ if (!link) {
+ spin_unlock_bh(&n_ptr->lock);
+ return;
+ }
+ }
+ addr = &link->media_addr;
+ if (memcmp(addr, &media_addr, sizeof(*addr))) {
+ char addr_string[16];
+
+ warn("New bearer address for %s\n",
+ addr_string_fill(addr_string, orig));
+ memcpy(addr, &media_addr, sizeof(*addr));
+ link_reset(link);
+ }
+ link_up = link_is_up(link);
+ spin_unlock_bh(&n_ptr->lock);
+ if ((type == DSC_RESP_MSG) || link_up)
+ return;
+ rbuf = disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
+ if (rbuf != NULL) {
+ msg_dbg(buf_msg(rbuf),"SEND:");
+ b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
+ buf_discard(rbuf);
+ }
+ }
+}
+
+/**
+ * disc_stop_link_req - stop sending periodic link setup requests
+ * @req: ptr to link request structure
+ */
+
+void disc_stop_link_req(struct link_req *req)
+{
+ if (!req)
+ return;
+
+ k_cancel_timer(&req->timer);
+ k_term_timer(&req->timer);
+ buf_discard(req->buf);
+ kfree(req);
+}
+
+/**
+ * disc_update_link_req - update frequency of periodic link setup requests
+ * @req: ptr to link request structure
+ */
+
+void disc_update_link_req(struct link_req *req)
+{
+ if (!req)
+ return;
+
+ if (req->timer_intv == TIPC_LINK_REQ_SLOW) {
+ if (!req->bearer->nodes.count) {
+ req->timer_intv = TIPC_LINK_REQ_FAST;
+ k_start_timer(&req->timer, req->timer_intv);
+ }
+ } else if (req->timer_intv == TIPC_LINK_REQ_FAST) {
+ if (req->bearer->nodes.count) {
+ req->timer_intv = TIPC_LINK_REQ_SLOW;
+ k_start_timer(&req->timer, req->timer_intv);
+ }
+ } else {
+ /* leave timer "as is" if haven't yet reached a "normal" rate */
+ }
+}
+
+/**
+ * disc_timeout - send a periodic link setup request
+ * @req: ptr to link request structure
+ *
+ * Called whenever a link setup request timer associated with a bearer expires.
+ */
+
+static void disc_timeout(struct link_req *req)
+{
+ spin_lock_bh(&req->bearer->publ.lock);
+
+ req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
+
+ if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
+ (req->timer_intv == TIPC_LINK_REQ_FAST)) {
+ /* leave timer interval "as is" if already at a "normal" rate */
+ } else {
+ req->timer_intv *= 2;
+ if (req->timer_intv > TIPC_LINK_REQ_SLOW)
+ req->timer_intv = TIPC_LINK_REQ_SLOW;
+ if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
+ (req->bearer->nodes.count))
+ req->timer_intv = TIPC_LINK_REQ_SLOW;
+ }
+ k_start_timer(&req->timer, req->timer_intv);
+
+ spin_unlock_bh(&req->bearer->publ.lock);
+}
+
+/**
+ * disc_init_link_req - start sending periodic link setup requests
+ * @b_ptr: ptr to bearer issuing requests
+ * @dest: destination address for request messages
+ * @dest_domain: network domain of node(s) which should respond to message
+ * @req_links: max number of desired links
+ *
+ * Returns pointer to link request structure, or NULL if unable to create.
+ */
+
+struct link_req *disc_init_link_req(struct bearer *b_ptr,
+ const struct tipc_media_addr *dest,
+ u32 dest_domain,
+ u32 req_links)
+{
+ struct link_req *req;
+
+ req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req)
+ return NULL;
+
+ req->buf = disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
+ if (!req->buf) {
+ kfree(req);
+ return NULL;
+ }
+
+ memcpy(&req->dest, dest, sizeof(*dest));
+ req->bearer = b_ptr;
+ req->timer_intv = TIPC_LINK_REQ_INIT;
+ k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
+ k_start_timer(&req->timer, req->timer_intv);
+ return req;
+}
+
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
new file mode 100644
index 00000000000..2a6114d9162
--- /dev/null
+++ b/net/tipc/discover.h
@@ -0,0 +1,58 @@
+/*
+ * net/tipc/discover.h
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_DISCOVER_H
+#define _TIPC_DISCOVER_H
+
+#include <linux/tipc.h>
+
+struct link_req;
+
+struct link_req *disc_init_link_req(struct bearer *b_ptr,
+ const struct tipc_media_addr *dest,
+ u32 dest_domain,
+ u32 req_links);
+void disc_update_link_req(struct link_req *req);
+void disc_stop_link_req(struct link_req *req);
+
+void disc_recv_msg(struct sk_buff *buf);
+
+void disc_link_event(u32 addr, char *name, int up);
+#if 0
+int disc_create_link(const struct tipc_link_create *argv);
+#endif
+
+#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
new file mode 100644
index 00000000000..34d0462db3a
--- /dev/null
+++ b/net/tipc/eth_media.c
@@ -0,0 +1,299 @@
+/*
+ * net/tipc/eth_media.c: Ethernet bearer support for TIPC
+ *
+ * Copyright (c) 2001-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <net/tipc/tipc.h>
+#include <net/tipc/tipc_bearer.h>
+#include <net/tipc/tipc_msg.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+
+#define MAX_ETH_BEARERS 2
+#define TIPC_PROTOCOL 0x88ca
+#define ETH_LINK_PRIORITY 10
+#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
+
+
+/**
+ * struct eth_bearer - Ethernet bearer data structure
+ * @bearer: ptr to associated "generic" bearer structure
+ * @dev: ptr to associated Ethernet network device
+ * @tipc_packet_type: used in binding TIPC to Ethernet driver
+ */
+
+struct eth_bearer {
+ struct tipc_bearer *bearer;
+ struct net_device *dev;
+ struct packet_type tipc_packet_type;
+};
+
+static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+static int eth_started = 0;
+static struct notifier_block notifier;
+
+/**
+ * send_msg - send a TIPC message out over an Ethernet interface
+ */
+
+static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
+ struct tipc_media_addr *dest)
+{
+ struct sk_buff *clone;
+ struct net_device *dev;
+
+ clone = skb_clone(buf, GFP_ATOMIC);
+ if (clone) {
+ clone->nh.raw = clone->data;
+ dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
+ clone->dev = dev;
+ dev->hard_header(clone, dev, TIPC_PROTOCOL,
+ &dest->dev_addr.eth_addr,
+ dev->dev_addr, clone->len);
+ dev_queue_xmit(clone);
+ }
+ return TIPC_OK;
+}
+
+/**
+ * recv_msg - handle incoming TIPC message from an Ethernet interface
+ *
+ * Routine truncates any Ethernet padding/CRC appended to the message,
+ * and ensures message size matches actual length
+ */
+
+static int recv_msg(struct sk_buff *buf, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
+ u32 size;
+
+ if (likely(eb_ptr->bearer)) {
+ size = msg_size((struct tipc_msg *)buf->data);
+ skb_trim(buf, size);
+ if (likely(buf->len == size)) {
+ buf->next = NULL;
+ tipc_recv_msg(buf, eb_ptr->bearer);
+ } else {
+ kfree_skb(buf);
+ }
+ } else {
+ kfree_skb(buf);
+ }
+ return TIPC_OK;
+}
+
+/**
+ * enable_bearer - attach TIPC bearer to an Ethernet interface
+ */
+
+static int enable_bearer(struct tipc_bearer *tb_ptr)
+{
+ struct net_device *dev = dev_base;
+ struct eth_bearer *eb_ptr = &eth_bearers[0];
+ struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+ char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
+
+ /* Find device with specified name */
+
+ while (dev && dev->name &&
+ (memcmp(dev->name, driver_name, strlen(dev->name)))) {
+ dev = dev->next;
+ }
+ if (!dev)
+ return -ENODEV;
+
+ /* Find Ethernet bearer for device (or create one) */
+
+ for (;(eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev); eb_ptr++);
+ if (eb_ptr == stop)
+ return -EDQUOT;
+ if (!eb_ptr->dev) {
+ eb_ptr->dev = dev;
+ eb_ptr->tipc_packet_type.type = __constant_htons(TIPC_PROTOCOL);
+ eb_ptr->tipc_packet_type.dev = dev;
+ eb_ptr->tipc_packet_type.func = recv_msg;
+ eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
+ INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
+ dev_hold(dev);
+ dev_add_pack(&eb_ptr->tipc_packet_type);
+ }
+
+ /* Associate TIPC bearer with Ethernet bearer */
+
+ eb_ptr->bearer = tb_ptr;
+ tb_ptr->usr_handle = (void *)eb_ptr;
+ tb_ptr->mtu = dev->mtu;
+ tb_ptr->blocked = 0;
+ tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
+ memcpy(&tb_ptr->addr.dev_addr, &dev->dev_addr, ETH_ALEN);
+ return 0;
+}
+
+/**
+ * disable_bearer - detach TIPC bearer from an Ethernet interface
+ *
+ * We really should do dev_remove_pack() here, but this function can not be
+ * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
+ * incoming buffers, & postpone dev_remove_pack() to eth_media_stop() on exit.
+ */
+
+static void disable_bearer(struct tipc_bearer *tb_ptr)
+{
+ ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = 0;
+}
+
+/**
+ * recv_notification - handle device updates from OS
+ *
+ * Change the state of the Ethernet bearer (if any) associated with the
+ * specified device.
+ */
+
+static int recv_notification(struct notifier_block *nb, unsigned long evt,
+ void *dv)
+{
+ struct net_device *dev = (struct net_device *)dv;
+ struct eth_bearer *eb_ptr = &eth_bearers[0];
+ struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+
+ while ((eb_ptr->dev != dev)) {
+ if (++eb_ptr == stop)
+ return NOTIFY_DONE; /* couldn't find device */
+ }
+ if (!eb_ptr->bearer)
+ return NOTIFY_DONE; /* bearer had been disabled */
+
+ eb_ptr->bearer->mtu = dev->mtu;
+
+ switch (evt) {
+ case NETDEV_CHANGE:
+ if (netif_carrier_ok(dev))
+ tipc_continue(eb_ptr->bearer);
+ else
+ tipc_block_bearer(eb_ptr->bearer->name);
+ break;
+ case NETDEV_UP:
+ tipc_continue(eb_ptr->bearer);
+ break;
+ case NETDEV_DOWN:
+ tipc_block_bearer(eb_ptr->bearer->name);
+ break;
+ case NETDEV_CHANGEMTU:
+ case NETDEV_CHANGEADDR:
+ tipc_block_bearer(eb_ptr->bearer->name);
+ tipc_continue(eb_ptr->bearer);
+ break;
+ case NETDEV_UNREGISTER:
+ case NETDEV_CHANGENAME:
+ tipc_disable_bearer(eb_ptr->bearer->name);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+/**
+ * eth_addr2str - convert Ethernet address to string
+ */
+
+static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+{
+ unchar *addr = (unchar *)&a->dev_addr;
+
+ if (str_size < 18)
+ *str_buf = '\0';
+ else
+ sprintf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ return str_buf;
+}
+
+/**
+ * eth_media_start - activate Ethernet bearer support
+ *
+ * Register Ethernet media type with TIPC bearer code. Also register
+ * with OS for notifications about device state changes.
+ */
+
+int eth_media_start(void)
+{
+ struct tipc_media_addr bcast_addr;
+ int res;
+
+ if (eth_started)
+ return -EINVAL;
+
+ memset(&bcast_addr, 0xff, sizeof(bcast_addr));
+ memset(eth_bearers, 0, sizeof(eth_bearers));
+
+ res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
+ enable_bearer, disable_bearer, send_msg,
+ eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY,
+ ETH_LINK_TOLERANCE, TIPC_DEF_LINK_WIN);
+ if (res)
+ return res;
+
+ notifier.notifier_call = &recv_notification;
+ notifier.priority = 0;
+ res = register_netdevice_notifier(&notifier);
+ if (!res)
+ eth_started = 1;
+ return res;
+}
+
+/**
+ * eth_media_stop - deactivate Ethernet bearer support
+ */
+
+void eth_media_stop(void)
+{
+ int i;
+
+ if (!eth_started)
+ return;
+
+ unregister_netdevice_notifier(&notifier);
+ for (i = 0; i < MAX_ETH_BEARERS ; i++) {
+ if (eth_bearers[i].bearer) {
+ eth_bearers[i].bearer->blocked = 1;
+ eth_bearers[i].bearer = 0;
+ }
+ if (eth_bearers[i].dev) {
+ dev_remove_pack(&eth_bearers[i].tipc_packet_type);
+ dev_put(eth_bearers[i].dev);
+ }
+ }
+ memset(&eth_bearers, 0, sizeof(eth_bearers));
+ eth_started = 0;
+}
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
new file mode 100644
index 00000000000..f320010f8a6
--- /dev/null
+++ b/net/tipc/handler.c
@@ -0,0 +1,132 @@
+/*
+ * net/tipc/handler.c: TIPC signal handling
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+
+struct queue_item {
+ struct list_head next_signal;
+ void (*handler) (unsigned long);
+ unsigned long data;
+};
+
+static kmem_cache_t *tipc_queue_item_cache;
+static struct list_head signal_queue_head;
+static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED;
+static int handler_enabled = 0;
+
+static void process_signal_queue(unsigned long dummy);
+
+static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
+
+
+unsigned int k_signal(Handler routine, unsigned long argument)
+{
+ struct queue_item *item;
+
+ if (!handler_enabled) {
+ err("Signal request ignored by handler\n");
+ return -ENOPROTOOPT;
+ }
+
+ spin_lock_bh(&qitem_lock);
+ item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
+ if (!item) {
+ err("Signal queue out of memory\n");
+ spin_unlock_bh(&qitem_lock);
+ return -ENOMEM;
+ }
+ item->handler = routine;
+ item->data = argument;
+ list_add_tail(&item->next_signal, &signal_queue_head);
+ spin_unlock_bh(&qitem_lock);
+ tasklet_schedule(&tipc_tasklet);
+ return 0;
+}
+
+static void process_signal_queue(unsigned long dummy)
+{
+ struct queue_item *__volatile__ item;
+ struct list_head *l, *n;
+
+ spin_lock_bh(&qitem_lock);
+ list_for_each_safe(l, n, &signal_queue_head) {
+ item = list_entry(l, struct queue_item, next_signal);
+ list_del(&item->next_signal);
+ spin_unlock_bh(&qitem_lock);
+ item->handler(item->data);
+ spin_lock_bh(&qitem_lock);
+ kmem_cache_free(tipc_queue_item_cache, item);
+ }
+ spin_unlock_bh(&qitem_lock);
+}
+
+int handler_start(void)
+{
+ tipc_queue_item_cache =
+ kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!tipc_queue_item_cache)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&signal_queue_head);
+ tasklet_enable(&tipc_tasklet);
+ handler_enabled = 1;
+ return 0;
+}
+
+void handler_stop(void)
+{
+ struct list_head *l, *n;
+ struct queue_item *item;
+
+ if (!handler_enabled)
+ return;
+
+ handler_enabled = 0;
+ tasklet_disable(&tipc_tasklet);
+ tasklet_kill(&tipc_tasklet);
+
+ spin_lock_bh(&qitem_lock);
+ list_for_each_safe(l, n, &signal_queue_head) {
+ item = list_entry(l, struct queue_item, next_signal);
+ list_del(&item->next_signal);
+ kmem_cache_free(tipc_queue_item_cache, item);
+ }
+ spin_unlock_bh(&qitem_lock);
+
+ kmem_cache_destroy(tipc_queue_item_cache);
+}
+
diff --git a/net/tipc/link.c b/net/tipc/link.c
new file mode 100644
index 00000000000..7265f4be476
--- /dev/null
+++ b/net/tipc/link.c
@@ -0,0 +1,3167 @@
+/*
+ * net/tipc/link.c: TIPC link code
+ *
+ * Copyright (c) 1996-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "link.h"
+#include "net.h"
+#include "node.h"
+#include "port.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "name_distr.h"
+#include "bearer.h"
+#include "name_table.h"
+#include "discover.h"
+#include "config.h"
+#include "bcast.h"
+
+
+/*
+ * Limit for deferred reception queue:
+ */
+
+#define DEF_QUEUE_LIMIT 256u
+
+/*
+ * Link state events:
+ */
+
+#define STARTING_EVT 856384768 /* link processing trigger */
+#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
+#define TIMEOUT_EVT 560817u /* link timer expired */
+
+/*
+ * The following two 'message types' is really just implementation
+ * data conveniently stored in the message header.
+ * They must not be considered part of the protocol
+ */
+#define OPEN_MSG 0
+#define CLOSED_MSG 1
+
+/*
+ * State value stored in 'exp_msg_count'
+ */
+
+#define START_CHANGEOVER 100000u
+
+/**
+ * struct link_name - deconstructed link name
+ * @addr_local: network address of node at this end
+ * @if_local: name of interface at this end
+ * @addr_peer: network address of node at far end
+ * @if_peer: name of interface at far end
+ */
+
+struct link_name {
+ u32 addr_local;
+ char if_local[TIPC_MAX_IF_NAME];
+ u32 addr_peer;
+ char if_peer[TIPC_MAX_IF_NAME];
+};
+
+#if 0
+
+/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
+
+/**
+ * struct link_event - link up/down event notification
+ */
+
+struct link_event {
+ u32 addr;
+ int up;
+ void (*fcn)(u32, char *, int);
+ char name[TIPC_MAX_LINK_NAME];
+};
+
+#endif
+
+static void link_handle_out_of_seq_msg(struct link *l_ptr,
+ struct sk_buff *buf);
+static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
+static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
+static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
+static int link_send_sections_long(struct port *sender,
+ struct iovec const *msg_sect,
+ u32 num_sect, u32 destnode);
+static void link_check_defragm_bufs(struct link *l_ptr);
+static void link_state_event(struct link *l_ptr, u32 event);
+static void link_reset_statistics(struct link *l_ptr);
+static void link_print(struct link *l_ptr, struct print_buf *buf,
+ const char *str);
+
+/*
+ * Debugging code used by link routines only
+ *
+ * When debugging link problems on a system that has multiple links,
+ * the standard TIPC debugging routines may not be useful since they
+ * allow the output from multiple links to be intermixed. For this reason
+ * routines of the form "dbg_link_XXX()" have been created that will capture
+ * debug info into a link's personal print buffer, which can then be dumped
+ * into the TIPC system log (LOG) upon request.
+ *
+ * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
+ * of the print buffer used by each link. If LINK_LOG_BUF_SIZE is set to 0,
+ * the dbg_link_XXX() routines simply send their output to the standard
+ * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
+ * when there is only a single link in the system being debugged.
+ *
+ * Notes:
+ * - When enabled, LINK_LOG_BUF_SIZE should be set to at least 1000 (bytes)
+ * - "l_ptr" must be valid when using dbg_link_XXX() macros
+ */
+
+#define LINK_LOG_BUF_SIZE 0
+
+#define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
+#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) msg_print(&l_ptr->print_buf, msg, txt); } while(0)
+#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
+#define dbg_link_dump() do { \
+ if (LINK_LOG_BUF_SIZE) { \
+ tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
+ printbuf_move(LOG, &l_ptr->print_buf); \
+ } \
+} while (0)
+
+static inline void dbg_print_link(struct link *l_ptr, const char *str)
+{
+ if (DBG_OUTPUT)
+ link_print(l_ptr, DBG_OUTPUT, str);
+}
+
+static inline void dbg_print_buf_chain(struct sk_buff *root_buf)
+{
+ if (DBG_OUTPUT) {
+ struct sk_buff *buf = root_buf;
+
+ while (buf) {
+ msg_dbg(buf_msg(buf), "In chain: ");
+ buf = buf->next;
+ }
+ }
+}
+
+/*
+ * Simple inlined link routines
+ */
+
+static inline unsigned int align(unsigned int i)
+{
+ return (i + 3) & ~3u;
+}
+
+static inline int link_working_working(struct link *l_ptr)
+{
+ return (l_ptr->state == WORKING_WORKING);
+}
+
+static inline int link_working_unknown(struct link *l_ptr)
+{
+ return (l_ptr->state == WORKING_UNKNOWN);
+}
+
+static inline int link_reset_unknown(struct link *l_ptr)
+{
+ return (l_ptr->state == RESET_UNKNOWN);
+}
+
+static inline int link_reset_reset(struct link *l_ptr)
+{
+ return (l_ptr->state == RESET_RESET);
+}
+
+static inline int link_blocked(struct link *l_ptr)
+{
+ return (l_ptr->exp_msg_count || l_ptr->blocked);
+}
+
+static inline int link_congested(struct link *l_ptr)
+{
+ return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
+}
+
+static inline u32 link_max_pkt(struct link *l_ptr)
+{
+ return l_ptr->max_pkt;
+}
+
+static inline void link_init_max_pkt(struct link *l_ptr)
+{
+ u32 max_pkt;
+
+ max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
+ if (max_pkt > MAX_MSG_SIZE)
+ max_pkt = MAX_MSG_SIZE;
+
+ l_ptr->max_pkt_target = max_pkt;
+ if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
+ l_ptr->max_pkt = l_ptr->max_pkt_target;
+ else
+ l_ptr->max_pkt = MAX_PKT_DEFAULT;
+
+ l_ptr->max_pkt_probes = 0;
+}
+
+static inline u32 link_next_sent(struct link *l_ptr)
+{
+ if (l_ptr->next_out)
+ return msg_seqno(buf_msg(l_ptr->next_out));
+ return mod(l_ptr->next_out_no);
+}
+
+static inline u32 link_last_sent(struct link *l_ptr)
+{
+ return mod(link_next_sent(l_ptr) - 1);
+}
+
+/*
+ * Simple non-inlined link routines (i.e. referenced outside this file)
+ */
+
+int link_is_up(struct link *l_ptr)
+{
+ if (!l_ptr)
+ return 0;
+ return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
+}
+
+int link_is_active(struct link *l_ptr)
+{
+ return ((l_ptr->owner->active_links[0] == l_ptr) ||
+ (l_ptr->owner->active_links[1] == l_ptr));
+}
+
+/**
+ * link_name_validate - validate & (optionally) deconstruct link name
+ * @name - ptr to link name string
+ * @name_parts - ptr to area for link name components (or NULL if not needed)
+ *
+ * Returns 1 if link name is valid, otherwise 0.
+ */
+
+static int link_name_validate(const char *name, struct link_name *name_parts)
+{
+ char name_copy[TIPC_MAX_LINK_NAME];
+ char *addr_local;
+ char *if_local;
+ char *addr_peer;
+ char *if_peer;
+ char dummy;
+ u32 z_local, c_local, n_local;
+ u32 z_peer, c_peer, n_peer;
+ u32 if_local_len;
+ u32 if_peer_len;
+
+ /* copy link name & ensure length is OK */
+
+ name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
+ /* need above in case non-Posix strncpy() doesn't pad with nulls */
+ strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
+ if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
+ return 0;
+
+ /* ensure all component parts of link name are present */
+
+ addr_local = name_copy;
+ if ((if_local = strchr(addr_local, ':')) == NULL)
+ return 0;
+ *(if_local++) = 0;
+ if ((addr_peer = strchr(if_local, '-')) == NULL)
+ return 0;
+ *(addr_peer++) = 0;
+ if_local_len = addr_peer - if_local;
+ if ((if_peer = strchr(addr_peer, ':')) == NULL)
+ return 0;
+ *(if_peer++) = 0;
+ if_peer_len = strlen(if_peer) + 1;
+
+ /* validate component parts of link name */
+
+ if ((sscanf(addr_local, "%u.%u.%u%c",
+ &z_local, &c_local, &n_local, &dummy) != 3) ||
+ (sscanf(addr_peer, "%u.%u.%u%c",
+ &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
+ (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
+ (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
+ (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
+ (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) ||
+ (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
+ (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
+ return 0;
+
+ /* return link name components, if necessary */
+
+ if (name_parts) {
+ name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
+ strcpy(name_parts->if_local, if_local);
+ name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
+ strcpy(name_parts->if_peer, if_peer);
+ }
+ return 1;
+}
+
+/**
+ * link_timeout - handle expiration of link timer
+ * @l_ptr: pointer to link
+ *
+ * This routine must not grab "net_lock" to avoid a potential deadlock conflict
+ * with link_delete(). (There is no risk that the node will be deleted by
+ * another thread because link_delete() always cancels the link timer before
+ * node_delete() is called.)
+ */
+
+static void link_timeout(struct link *l_ptr)
+{
+ node_lock(l_ptr->owner);
+
+ /* update counters used in statistical profiling of send traffic */
+
+ l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
+ l_ptr->stats.queue_sz_counts++;
+
+ if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
+ l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
+
+ if (l_ptr->first_out) {
+ struct tipc_msg *msg = buf_msg(l_ptr->first_out);
+ u32 length = msg_size(msg);
+
+ if ((msg_user(msg) == MSG_FRAGMENTER)
+ && (msg_type(msg) == FIRST_FRAGMENT)) {
+ length = msg_size(msg_get_wrapped(msg));
+ }
+ if (length) {
+ l_ptr->stats.msg_lengths_total += length;
+ l_ptr->stats.msg_length_counts++;
+ if (length <= 64)
+ l_ptr->stats.msg_length_profile[0]++;
+ else if (length <= 256)
+ l_ptr->stats.msg_length_profile[1]++;
+ else if (length <= 1024)
+ l_ptr->stats.msg_length_profile[2]++;
+ else if (length <= 4096)
+ l_ptr->stats.msg_length_profile[3]++;
+ else if (length <= 16384)
+ l_ptr->stats.msg_length_profile[4]++;
+ else if (length <= 32768)
+ l_ptr->stats.msg_length_profile[5]++;
+ else
+ l_ptr->stats.msg_length_profile[6]++;
+ }
+ }
+
+ /* do all other link processing performed on a periodic basis */
+
+ link_check_defragm_bufs(l_ptr);
+
+ link_state_event(l_ptr, TIMEOUT_EVT);
+
+ if (l_ptr->next_out)
+ link_push_queue(l_ptr);
+
+ node_unlock(l_ptr->owner);
+}
+
+static inline void link_set_timer(struct link *l_ptr, u32 time)
+{
+ k_start_timer(&l_ptr->timer, time);
+}
+
+/**
+ * link_create - create a new link
+ * @b_ptr: pointer to associated bearer
+ * @peer: network address of node at other end of link
+ * @media_addr: media address to use when sending messages over link
+ *
+ * Returns pointer to link.
+ */
+
+struct link *link_create(struct bearer *b_ptr, const u32 peer,
+ const struct tipc_media_addr *media_addr)
+{
+ struct link *l_ptr;
+ struct tipc_msg *msg;
+ char *if_name;
+
+ l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
+ if (!l_ptr) {
+ warn("Memory squeeze; Failed to create link\n");
+ return NULL;
+ }
+ memset(l_ptr, 0, sizeof(*l_ptr));
+
+ l_ptr->addr = peer;
+ if_name = strchr(b_ptr->publ.name, ':') + 1;
+ sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
+ tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
+ tipc_node(tipc_own_addr),
+ if_name,
+ tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
+ /* note: peer i/f is appended to link name by reset/activate */
+ memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
+ k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
+ list_add_tail(&l_ptr->link_list, &b_ptr->links);
+ l_ptr->checkpoint = 1;
+ l_ptr->b_ptr = b_ptr;
+ link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
+ l_ptr->state = RESET_UNKNOWN;
+
+ l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
+ msg = l_ptr->pmsg;
+ msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
+ msg_set_size(msg, sizeof(l_ptr->proto_msg));
+ msg_set_session(msg, tipc_random);
+ msg_set_bearer_id(msg, b_ptr->identity);
+ strcpy((char *)msg_data(msg), if_name);
+
+ l_ptr->priority = b_ptr->priority;
+ link_set_queue_limits(l_ptr, b_ptr->media->window);
+
+ link_init_max_pkt(l_ptr);
+
+ l_ptr->next_out_no = 1;
+ INIT_LIST_HEAD(&l_ptr->waiting_ports);
+
+ link_reset_statistics(l_ptr);
+
+ l_ptr->owner = node_attach_link(l_ptr);
+ if (!l_ptr->owner) {
+ kfree(l_ptr);
+ return NULL;
+ }
+
+ if (LINK_LOG_BUF_SIZE) {
+ char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
+
+ if (!pb) {
+ kfree(l_ptr);
+ warn("Memory squeeze; Failed to create link\n");
+ return NULL;
+ }
+ printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
+ }
+
+ k_signal((Handler)link_start, (unsigned long)l_ptr);
+
+ dbg("link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
+ l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
+
+ return l_ptr;
+}
+
+/**
+ * link_delete - delete a link
+ * @l_ptr: pointer to link
+ *
+ * Note: 'net_lock' is write_locked, bearer is locked.
+ * This routine must not grab the node lock until after link timer cancellation
+ * to avoid a potential deadlock situation.
+ */
+
+void link_delete(struct link *l_ptr)
+{
+ if (!l_ptr) {
+ err("Attempt to delete non-existent link\n");
+ return;
+ }
+
+ dbg("link_delete()\n");
+
+ k_cancel_timer(&l_ptr->timer);
+
+ node_lock(l_ptr->owner);
+ link_reset(l_ptr);
+ node_detach_link(l_ptr->owner, l_ptr);
+ link_stop(l_ptr);
+ list_del_init(&l_ptr->link_list);
+ if (LINK_LOG_BUF_SIZE)
+ kfree(l_ptr->print_buf.buf);
+ node_unlock(l_ptr->owner);
+ k_term_timer(&l_ptr->timer);
+ kfree(l_ptr);
+}
+
+void link_start(struct link *l_ptr)
+{
+ dbg("link_start %x\n", l_ptr);
+ link_state_event(l_ptr, STARTING_EVT);
+}
+
+/**
+ * link_schedule_port - schedule port for deferred sending
+ * @l_ptr: pointer to link
+ * @origport: reference to sending port
+ * @sz: amount of data to be sent
+ *
+ * Schedules port for renewed sending of messages after link congestion
+ * has abated.
+ */
+
+static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
+{
+ struct port *p_ptr;
+
+ spin_lock_bh(&port_list_lock);
+ p_ptr = port_lock(origport);
+ if (p_ptr) {
+ if (!p_ptr->wakeup)
+ goto exit;
+ if (!list_empty(&p_ptr->wait_list))
+ goto exit;
+ p_ptr->congested_link = l_ptr;
+ p_ptr->publ.congested = 1;
+ p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr));
+ list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
+ l_ptr->stats.link_congs++;
+exit:
+ port_unlock(p_ptr);
+ }
+ spin_unlock_bh(&port_list_lock);
+ return -ELINKCONG;
+}
+
+void link_wakeup_ports(struct link *l_ptr, int all)
+{
+ struct port *p_ptr;
+ struct port *temp_p_ptr;
+ int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
+
+ if (all)
+ win = 100000;
+ if (win <= 0)
+ return;
+ if (!spin_trylock_bh(&port_list_lock))
+ return;
+ if (link_congested(l_ptr))
+ goto exit;
+ list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
+ wait_list) {
+ if (win <= 0)
+ break;
+ list_del_init(&p_ptr->wait_list);
+ p_ptr->congested_link = 0;
+ assert(p_ptr->wakeup);
+ spin_lock_bh(p_ptr->publ.lock);
+ p_ptr->publ.congested = 0;
+ p_ptr->wakeup(&p_ptr->publ);
+ win -= p_ptr->waiting_pkts;
+ spin_unlock_bh(p_ptr->publ.lock);
+ }
+
+exit:
+ spin_unlock_bh(&port_list_lock);
+}
+
+/**
+ * link_release_outqueue - purge link's outbound message queue
+ * @l_ptr: pointer to link
+ */
+
+static void link_release_outqueue(struct link *l_ptr)
+{
+ struct sk_buff *buf = l_ptr->first_out;
+ struct sk_buff *next;
+
+ while (buf) {
+ next = buf->next;
+ buf_discard(buf);
+ buf = next;
+ }
+ l_ptr->first_out = NULL;
+ l_ptr->out_queue_size = 0;
+}
+
+/**
+ * link_reset_fragments - purge link's inbound message fragments queue
+ * @l_ptr: pointer to link
+ */
+
+void link_reset_fragments(struct link *l_ptr)
+{
+ struct sk_buff *buf = l_ptr->defragm_buf;
+ struct sk_buff *next;
+
+ while (buf) {
+ next = buf->next;
+ buf_discard(buf);
+ buf = next;
+ }
+ l_ptr->defragm_buf = NULL;
+}
+
+/**
+ * link_stop - purge all inbound and outbound messages associated with link
+ * @l_ptr: pointer to link
+ */
+
+void link_stop(struct link *l_ptr)
+{
+ struct sk_buff *buf;
+ struct sk_buff *next;
+
+ buf = l_ptr->oldest_deferred_in;
+ while (buf) {
+ next = buf->next;
+ buf_discard(buf);
+ buf = next;
+ }
+
+ buf = l_ptr->first_out;
+ while (buf) {
+ next = buf->next;
+ buf_discard(buf);
+ buf = next;
+ }
+
+ link_reset_fragments(l_ptr);
+
+ buf_discard(l_ptr->proto_msg_queue);
+ l_ptr->proto_msg_queue = NULL;
+}
+
+#if 0
+
+/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
+
+static void link_recv_event(struct link_event *ev)
+{
+ ev->fcn(ev->addr, ev->name, ev->up);
+ kfree(ev);
+}
+
+static void link_send_event(void (*fcn)(u32 a, char *n, int up),
+ struct link *l_ptr, int up)
+{
+ struct link_event *ev;
+
+ ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
+ if (!ev) {
+ warn("Link event allocation failure\n");
+ return;
+ }
+ ev->addr = l_ptr->addr;
+ ev->up = up;
+ ev->fcn = fcn;
+ memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
+ k_signal((Handler)link_recv_event, (unsigned long)ev);
+}
+
+#else
+
+#define link_send_event(fcn, l_ptr, up) do { } while (0)
+
+#endif
+
+void link_reset(struct link *l_ptr)
+{
+ struct sk_buff *buf;
+ u32 prev_state = l_ptr->state;
+ u32 checkpoint = l_ptr->next_in_no;
+
+ msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
+
+ /* Link is down, accept any session: */
+ l_ptr->peer_session = 0;
+
+ /* Prepare for max packet size negotiation */
+ link_init_max_pkt(l_ptr);
+
+ l_ptr->state = RESET_UNKNOWN;
+ dbg_link_state("Resetting Link\n");
+
+ if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
+ return;
+
+ node_link_down(l_ptr->owner, l_ptr);
+ bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
+#if 0
+ tipc_printf(CONS, "\nReset link <%s>\n", l_ptr->name);
+ dbg_link_dump();
+#endif
+ if (node_has_active_links(l_ptr->owner) &&
+ l_ptr->owner->permit_changeover) {
+ l_ptr->reset_checkpoint = checkpoint;
+ l_ptr->exp_msg_count = START_CHANGEOVER;
+ }
+
+ /* Clean up all queues: */
+
+ link_release_outqueue(l_ptr);
+ buf_discard(l_ptr->proto_msg_queue);
+ l_ptr->proto_msg_queue = NULL;
+ buf = l_ptr->oldest_deferred_in;
+ while (buf) {
+ struct sk_buff *next = buf->next;
+ buf_discard(buf);
+ buf = next;
+ }
+ if (!list_empty(&l_ptr->waiting_ports))
+ link_wakeup_ports(l_ptr, 1);
+
+ l_ptr->retransm_queue_head = 0;
+ l_ptr->retransm_queue_size = 0;
+ l_ptr->last_out = NULL;
+ l_ptr->first_out = NULL;
+ l_ptr->next_out = NULL;
+ l_ptr->unacked_window = 0;
+ l_ptr->checkpoint = 1;
+ l_ptr->next_out_no = 1;
+ l_ptr->deferred_inqueue_sz = 0;
+ l_ptr->oldest_deferred_in = NULL;
+ l_ptr->newest_deferred_in = NULL;
+ l_ptr->fsm_msg_cnt = 0;
+ l_ptr->stale_count = 0;
+ link_reset_statistics(l_ptr);
+
+ link_send_event(cfg_link_event, l_ptr, 0);
+ if (!in_own_cluster(l_ptr->addr))
+ link_send_event(disc_link_event, l_ptr, 0);
+}
+
+
+static void link_activate(struct link *l_ptr)
+{
+ l_ptr->next_in_no = 1;
+ node_link_up(l_ptr->owner, l_ptr);
+ bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
+ link_send_event(cfg_link_event, l_ptr, 1);
+ if (!in_own_cluster(l_ptr->addr))
+ link_send_event(disc_link_event, l_ptr, 1);
+}
+
+/**
+ * link_state_event - link finite state machine
+ * @l_ptr: pointer to link
+ * @event: state machine event to process
+ */
+
+static void link_state_event(struct link *l_ptr, unsigned event)
+{
+ struct link *other;
+ u32 cont_intv = l_ptr->continuity_interval;
+
+ if (!l_ptr->started && (event != STARTING_EVT))
+ return; /* Not yet. */
+
+ if (link_blocked(l_ptr)) {
+ if (event == TIMEOUT_EVT) {
+ link_set_timer(l_ptr, cont_intv);
+ }
+ return; /* Changeover going on */
+ }
+ dbg_link("STATE_EV: <%s> ", l_ptr->name);
+
+ switch (l_ptr->state) {
+ case WORKING_WORKING:
+ dbg_link("WW/");
+ switch (event) {
+ case TRAFFIC_MSG_EVT:
+ dbg_link("TRF-");
+ /* fall through */
+ case ACTIVATE_MSG:
+ dbg_link("ACT\n");
+ break;
+ case TIMEOUT_EVT:
+ dbg_link("TIM ");
+ if (l_ptr->next_in_no != l_ptr->checkpoint) {
+ l_ptr->checkpoint = l_ptr->next_in_no;
+ if (bclink_acks_missing(l_ptr->owner)) {
+ link_send_proto_msg(l_ptr, STATE_MSG,
+ 0, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
+ link_send_proto_msg(l_ptr, STATE_MSG,
+ 1, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ }
+ link_set_timer(l_ptr, cont_intv);
+ break;
+ }
+ dbg_link(" -> WU\n");
+ l_ptr->state = WORKING_UNKNOWN;
+ l_ptr->fsm_msg_cnt = 0;
+ link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ link_set_timer(l_ptr, cont_intv / 4);
+ break;
+ case RESET_MSG:
+ dbg_link("RES -> RR\n");
+ link_reset(l_ptr);
+ l_ptr->state = RESET_RESET;
+ l_ptr->fsm_msg_cnt = 0;
+ link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ link_set_timer(l_ptr, cont_intv);
+ break;
+ default:
+ err("Unknown link event %u in WW state\n", event);
+ }
+ break;
+ case WORKING_UNKNOWN:
+ dbg_link("WU/");
+ switch (event) {
+ case TRAFFIC_MSG_EVT:
+ dbg_link("TRF-");
+ case ACTIVATE_MSG:
+ dbg_link("ACT -> WW\n");
+ l_ptr->state = WORKING_WORKING;
+ l_ptr->fsm_msg_cnt = 0;
+ link_set_timer(l_ptr, cont_intv);
+ break;
+ case RESET_MSG:
+ dbg_link("RES -> RR\n");
+ link_reset(l_ptr);
+ l_ptr->state = RESET_RESET;
+ l_ptr->fsm_msg_cnt = 0;
+ link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ link_set_timer(l_ptr, cont_intv);
+ break;
+ case TIMEOUT_EVT:
+ dbg_link("TIM ");
+ if (l_ptr->next_in_no != l_ptr->checkpoint) {
+ dbg_link("-> WW \n");
+ l_ptr->state = WORKING_WORKING;
+ l_ptr->fsm_msg_cnt = 0;
+ l_ptr->checkpoint = l_ptr->next_in_no;
+ if (bclink_acks_missing(l_ptr->owner)) {
+ link_send_proto_msg(l_ptr, STATE_MSG,
+ 0, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ }
+ link_set_timer(l_ptr, cont_intv);
+ } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
+ dbg_link("Probing %u/%u,timer = %u ms)\n",
+ l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
+ cont_intv / 4);
+ link_send_proto_msg(l_ptr, STATE_MSG,
+ 1, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ link_set_timer(l_ptr, cont_intv / 4);
+ } else { /* Link has failed */
+ dbg_link("-> RU (%u probes unanswered)\n",
+ l_ptr->fsm_msg_cnt);
+ link_reset(l_ptr);
+ l_ptr->state = RESET_UNKNOWN;
+ l_ptr->fsm_msg_cnt = 0;
+ link_send_proto_msg(l_ptr, RESET_MSG,
+ 0, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ link_set_timer(l_ptr, cont_intv);
+ }
+ break;
+ default:
+ err("Unknown link event %u in WU state\n", event);
+ }
+ break;
+ case RESET_UNKNOWN:
+ dbg_link("RU/");
+ switch (event) {
+ case TRAFFIC_MSG_EVT:
+ dbg_link("TRF-\n");
+ break;
+ case ACTIVATE_MSG:
+ other = l_ptr->owner->active_links[0];
+ if (other && link_working_unknown(other)) {
+ dbg_link("ACT\n");
+ break;
+ }
+ dbg_link("ACT -> WW\n");
+ l_ptr->state = WORKING_WORKING;
+ l_ptr->fsm_msg_cnt = 0;
+ link_activate(l_ptr);
+ link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ link_set_timer(l_ptr, cont_intv);
+ break;
+ case RESET_MSG:
+ dbg_link("RES \n");
+ dbg_link(" -> RR\n");
+ l_ptr->state = RESET_RESET;
+ l_ptr->fsm_msg_cnt = 0;
+ link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ link_set_timer(l_ptr, cont_intv);
+ break;
+ case STARTING_EVT:
+ dbg_link("START-");
+ l_ptr->started = 1;
+ /* fall through */
+ case TIMEOUT_EVT:
+ dbg_link("TIM \n");
+ link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ link_set_timer(l_ptr, cont_intv);
+ break;
+ default:
+ err("Unknown link event %u in RU state\n", event);
+ }
+ break;
+ case RESET_RESET:
+ dbg_link("RR/ ");
+ switch (event) {
+ case TRAFFIC_MSG_EVT:
+ dbg_link("TRF-");
+ /* fall through */
+ case ACTIVATE_MSG:
+ other = l_ptr->owner->active_links[0];
+ if (other && link_working_unknown(other)) {
+ dbg_link("ACT\n");
+ break;
+ }
+ dbg_link("ACT -> WW\n");
+ l_ptr->state = WORKING_WORKING;
+ l_ptr->fsm_msg_cnt = 0;
+ link_activate(l_ptr);
+ link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ link_set_timer(l_ptr, cont_intv);
+ break;
+ case RESET_MSG:
+ dbg_link("RES\n");
+ break;
+ case TIMEOUT_EVT:
+ dbg_link("TIM\n");
+ link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ l_ptr->fsm_msg_cnt++;
+ link_set_timer(l_ptr, cont_intv);
+ dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
+ break;
+ default:
+ err("Unknown link event %u in RR state\n", event);
+ }
+ break;
+ default:
+ err("Unknown link state %u/%u\n", l_ptr->state, event);
+ }
+}
+
+/*
+ * link_bundle_buf(): Append contents of a buffer to
+ * the tail of an existing one.
+ */
+
+static int link_bundle_buf(struct link *l_ptr,
+ struct sk_buff *bundler,
+ struct sk_buff *buf)
+{
+ struct tipc_msg *bundler_msg = buf_msg(bundler);
+ struct tipc_msg *msg = buf_msg(buf);
+ u32 size = msg_size(msg);
+ u32 to_pos = align(msg_size(bundler_msg));
+ u32 rest = link_max_pkt(l_ptr) - to_pos;
+
+ if (msg_user(bundler_msg) != MSG_BUNDLER)
+ return 0;
+ if (msg_type(bundler_msg) != OPEN_MSG)
+ return 0;
+ if (rest < align(size))
+ return 0;
+
+ skb_put(bundler, (to_pos - msg_size(bundler_msg)) + size);
+ memcpy(bundler->data + to_pos, buf->data, size);
+ msg_set_size(bundler_msg, to_pos + size);
+ msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
+ dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
+ msg_msgcnt(bundler_msg), size, to_pos, msg_seqno(bundler_msg));
+ msg_dbg(msg, "PACKD:");
+ buf_discard(buf);
+ l_ptr->stats.sent_bundled++;
+ return 1;
+}
+
+static inline void link_add_to_outqueue(struct link *l_ptr,
+ struct sk_buff *buf,
+ struct tipc_msg *msg)
+{
+ u32 ack = mod(l_ptr->next_in_no - 1);
+ u32 seqno = mod(l_ptr->next_out_no++);
+
+ msg_set_word(msg, 2, ((ack << 16) | seqno));
+ msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
+ buf->next = NULL;
+ if (l_ptr->first_out) {
+ l_ptr->last_out->next = buf;
+ l_ptr->last_out = buf;
+ } else
+ l_ptr->first_out = l_ptr->last_out = buf;
+ l_ptr->out_queue_size++;
+}
+
+/*
+ * link_send_buf() is the 'full path' for messages, called from
+ * inside TIPC when the 'fast path' in tipc_send_buf
+ * has failed, and from link_send()
+ */
+
+int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
+{
+ struct tipc_msg *msg = buf_msg(buf);
+ u32 size = msg_size(msg);
+ u32 dsz = msg_data_sz(msg);
+ u32 queue_size = l_ptr->out_queue_size;
+ u32 imp = msg_tot_importance(msg);
+ u32 queue_limit = l_ptr->queue_limit[imp];
+ u32 max_packet = link_max_pkt(l_ptr);
+
+ msg_set_prevnode(msg, tipc_own_addr); /* If routed message */
+
+ /* Match msg importance against queue limits: */
+
+ if (unlikely(queue_size >= queue_limit)) {
+ if (imp <= TIPC_CRITICAL_IMPORTANCE) {
+ return link_schedule_port(l_ptr, msg_origport(msg),
+ size);
+ }
+ msg_dbg(msg, "TIPC: Congestion, throwing away\n");
+ buf_discard(buf);
+ if (imp > CONN_MANAGER) {
+ warn("Resetting <%s>, send queue full", l_ptr->name);
+ link_reset(l_ptr);
+ }
+ return dsz;
+ }
+
+ /* Fragmentation needed ? */
+
+ if (size > max_packet)
+ return link_send_long_buf(l_ptr, buf);
+
+ /* Packet can be queued or sent: */
+
+ if (queue_size > l_ptr->stats.max_queue_sz)
+ l_ptr->stats.max_queue_sz = queue_size;
+
+ if (likely(!bearer_congested(l_ptr->b_ptr, l_ptr) &&
+ !link_congested(l_ptr))) {
+ link_add_to_outqueue(l_ptr, buf, msg);
+
+ if (likely(bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
+ l_ptr->unacked_window = 0;
+ } else {
+ bearer_schedule(l_ptr->b_ptr, l_ptr);
+ l_ptr->stats.bearer_congs++;
+ l_ptr->next_out = buf;
+ }
+ return dsz;
+ }
+ /* Congestion: can message be bundled ?: */
+
+ if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
+ (msg_user(msg) != MSG_FRAGMENTER)) {
+
+ /* Try adding message to an existing bundle */
+
+ if (l_ptr->next_out &&
+ link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
+ bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+ return dsz;
+ }
+
+ /* Try creating a new bundle */
+
+ if (size <= max_packet * 2 / 3) {
+ struct sk_buff *bundler = buf_acquire(max_packet);
+ struct tipc_msg bundler_hdr;
+
+ if (bundler) {
+ msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
+ TIPC_OK, INT_H_SIZE, l_ptr->addr);
+ memcpy(bundler->data, (unchar *)&bundler_hdr,
+ INT_H_SIZE);
+ skb_trim(bundler, INT_H_SIZE);
+ link_bundle_buf(l_ptr, bundler, buf);
+ buf = bundler;
+ msg = buf_msg(buf);
+ l_ptr->stats.sent_bundles++;
+ }
+ }
+ }
+ if (!l_ptr->next_out)
+ l_ptr->next_out = buf;
+ link_add_to_outqueue(l_ptr, buf, msg);
+ bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+ return dsz;
+}
+
+/*
+ * link_send(): same as link_send_buf(), but the link to use has
+ * not been selected yet, and the the owner node is not locked
+ * Called by TIPC internal users, e.g. the name distributor
+ */
+
+int link_send(struct sk_buff *buf, u32 dest, u32 selector)
+{
+ struct link *l_ptr;
+ struct node *n_ptr;
+ int res = -ELINKCONG;
+
+ read_lock_bh(&net_lock);
+ n_ptr = node_select(dest, selector);
+ if (n_ptr) {
+ node_lock(n_ptr);
+ l_ptr = n_ptr->active_links[selector & 1];
+ dbg("link_send: found link %x for dest %x\n", l_ptr, dest);
+ if (l_ptr) {
+ res = link_send_buf(l_ptr, buf);
+ }
+ node_unlock(n_ptr);
+ } else {
+ dbg("Attempt to send msg to unknown node:\n");
+ msg_dbg(buf_msg(buf),">>>");
+ buf_discard(buf);
+ }
+ read_unlock_bh(&net_lock);
+ return res;
+}
+
+/*
+ * link_send_buf_fast: Entry for data messages where the
+ * destination link is known and the header is complete,
+ * inclusive total message length. Very time critical.
+ * Link is locked. Returns user data length.
+ */
+
+static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
+ u32 *used_max_pkt)
+{
+ struct tipc_msg *msg = buf_msg(buf);
+ int res = msg_data_sz(msg);
+
+ if (likely(!link_congested(l_ptr))) {
+ if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
+ if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
+ link_add_to_outqueue(l_ptr, buf, msg);
+ if (likely(bearer_send(l_ptr->b_ptr, buf,
+ &l_ptr->media_addr))) {
+ l_ptr->unacked_window = 0;
+ msg_dbg(msg,"SENT_FAST:");
+ return res;
+ }
+ dbg("failed sent fast...\n");
+ bearer_schedule(l_ptr->b_ptr, l_ptr);
+ l_ptr->stats.bearer_congs++;
+ l_ptr->next_out = buf;
+ return res;
+ }
+ }
+ else
+ *used_max_pkt = link_max_pkt(l_ptr);
+ }
+ return link_send_buf(l_ptr, buf); /* All other cases */
+}
+
+/*
+ * tipc_send_buf_fast: Entry for data messages where the
+ * destination node is known and the header is complete,
+ * inclusive total message length.
+ * Returns user data length.
+ */
+int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
+{
+ struct link *l_ptr;
+ struct node *n_ptr;
+ int res;
+ u32 selector = msg_origport(buf_msg(buf)) & 1;
+ u32 dummy;
+
+ if (destnode == tipc_own_addr)
+ return port_recv_msg(buf);
+
+ read_lock_bh(&net_lock);
+ n_ptr = node_select(destnode, selector);
+ if (likely(n_ptr)) {
+ node_lock(n_ptr);
+ l_ptr = n_ptr->active_links[selector];
+ dbg("send_fast: buf %x selected %x, destnode = %x\n",
+ buf, l_ptr, destnode);
+ if (likely(l_ptr)) {
+ res = link_send_buf_fast(l_ptr, buf, &dummy);
+ node_unlock(n_ptr);
+ read_unlock_bh(&net_lock);
+ return res;
+ }
+ node_unlock(n_ptr);
+ }
+ read_unlock_bh(&net_lock);
+ res = msg_data_sz(buf_msg(buf));
+ tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
+ return res;
+}
+
+
+/*
+ * link_send_sections_fast: Entry for messages where the
+ * destination processor is known and the header is complete,
+ * except for total message length.
+ * Returns user data length or errno.
+ */
+int link_send_sections_fast(struct port *sender,
+ struct iovec const *msg_sect,
+ const u32 num_sect,
+ u32 destaddr)
+{
+ struct tipc_msg *hdr = &sender->publ.phdr;
+ struct link *l_ptr;
+ struct sk_buff *buf;
+ struct node *node;
+ int res;
+ u32 selector = msg_origport(hdr) & 1;
+
+ assert(destaddr != tipc_own_addr);
+
+again:
+ /*
+ * Try building message using port's max_pkt hint.
+ * (Must not hold any locks while building message.)
+ */
+
+ res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
+ !sender->user_port, &buf);
+
+ read_lock_bh(&net_lock);
+ node = node_select(destaddr, selector);
+ if (likely(node)) {
+ node_lock(node);
+ l_ptr = node->active_links[selector];
+ if (likely(l_ptr)) {
+ if (likely(buf)) {
+ res = link_send_buf_fast(l_ptr, buf,
+ &sender->max_pkt);
+ if (unlikely(res < 0))
+ buf_discard(buf);
+exit:
+ node_unlock(node);
+ read_unlock_bh(&net_lock);
+ return res;
+ }
+
+ /* Exit if build request was invalid */
+
+ if (unlikely(res < 0))
+ goto exit;
+
+ /* Exit if link (or bearer) is congested */
+
+ if (link_congested(l_ptr) ||
+ !list_empty(&l_ptr->b_ptr->cong_links)) {
+ res = link_schedule_port(l_ptr,
+ sender->publ.ref, res);
+ goto exit;
+ }
+
+ /*
+ * Message size exceeds max_pkt hint; update hint,
+ * then re-try fast path or fragment the message
+ */
+
+ sender->max_pkt = link_max_pkt(l_ptr);
+ node_unlock(node);
+ read_unlock_bh(&net_lock);
+
+
+ if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
+ goto again;
+
+ return link_send_sections_long(sender, msg_sect,
+ num_sect, destaddr);
+ }
+ node_unlock(node);
+ }
+ read_unlock_bh(&net_lock);
+
+ /* Couldn't find a link to the destination node */
+
+ if (buf)
+ return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
+ if (res >= 0)
+ return port_reject_sections(sender, hdr, msg_sect, num_sect,
+ TIPC_ERR_NO_NODE);
+ return res;
+}
+
+/*
+ * link_send_sections_long(): Entry for long messages where the
+ * destination node is known and the header is complete,
+ * inclusive total message length.
+ * Link and bearer congestion status have been checked to be ok,
+ * and are ignored if they change.
+ *
+ * Note that fragments do not use the full link MTU so that they won't have
+ * to undergo refragmentation if link changeover causes them to be sent
+ * over another link with an additional tunnel header added as prefix.
+ * (Refragmentation will still occur if the other link has a smaller MTU.)
+ *
+ * Returns user data length or errno.
+ */
+static int link_send_sections_long(struct port *sender,
+ struct iovec const *msg_sect,
+ u32 num_sect,
+ u32 destaddr)
+{
+ struct link *l_ptr;
+ struct node *node;
+ struct tipc_msg *hdr = &sender->publ.phdr;
+ u32 dsz = msg_data_sz(hdr);
+ u32 max_pkt,fragm_sz,rest;
+ struct tipc_msg fragm_hdr;
+ struct sk_buff *buf,*buf_chain,*prev;
+ u32 fragm_crs,fragm_rest,hsz,sect_rest;
+ const unchar *sect_crs;
+ int curr_sect;
+ u32 fragm_no;
+
+again:
+ fragm_no = 1;
+ max_pkt = sender->max_pkt - INT_H_SIZE;
+ /* leave room for tunnel header in case of link changeover */
+ fragm_sz = max_pkt - INT_H_SIZE;
+ /* leave room for fragmentation header in each fragment */
+ rest = dsz;
+ fragm_crs = 0;
+ fragm_rest = 0;
+ sect_rest = 0;
+ sect_crs = 0;
+ curr_sect = -1;
+
+ /* Prepare reusable fragment header: */
+
+ msg_dbg(hdr, ">FRAGMENTING>");
+ msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
+ TIPC_OK, INT_H_SIZE, msg_destnode(hdr));
+ msg_set_link_selector(&fragm_hdr, sender->publ.ref);
+ msg_set_size(&fragm_hdr, max_pkt);
+ msg_set_fragm_no(&fragm_hdr, 1);
+
+ /* Prepare header of first fragment: */
+
+ buf_chain = buf = buf_acquire(max_pkt);
+ if (!buf)
+ return -ENOMEM;
+ buf->next = NULL;
+ memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+ hsz = msg_hdr_sz(hdr);
+ memcpy(buf->data + INT_H_SIZE, (unchar *)hdr, hsz);
+ msg_dbg(buf_msg(buf), ">BUILD>");
+
+ /* Chop up message: */
+
+ fragm_crs = INT_H_SIZE + hsz;
+ fragm_rest = fragm_sz - hsz;
+
+ do { /* For all sections */
+ u32 sz;
+
+ if (!sect_rest) {
+ sect_rest = msg_sect[++curr_sect].iov_len;
+ sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
+ }
+
+ if (sect_rest < fragm_rest)
+ sz = sect_rest;
+ else
+ sz = fragm_rest;
+
+ if (likely(!sender->user_port)) {
+ if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
+error:
+ for (; buf_chain; buf_chain = buf) {
+ buf = buf_chain->next;
+ buf_discard(buf_chain);
+ }
+ return -EFAULT;
+ }
+ } else
+ memcpy(buf->data + fragm_crs, sect_crs, sz);
+
+ sect_crs += sz;
+ sect_rest -= sz;
+ fragm_crs += sz;
+ fragm_rest -= sz;
+ rest -= sz;
+
+ if (!fragm_rest && rest) {
+
+ /* Initiate new fragment: */
+ if (rest <= fragm_sz) {
+ fragm_sz = rest;
+ msg_set_type(&fragm_hdr,LAST_FRAGMENT);
+ } else {
+ msg_set_type(&fragm_hdr, FRAGMENT);
+ }
+ msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
+ msg_set_fragm_no(&fragm_hdr, ++fragm_no);
+ prev = buf;
+ buf = buf_acquire(fragm_sz + INT_H_SIZE);
+ if (!buf)
+ goto error;
+
+ buf->next = NULL;
+ prev->next = buf;
+ memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+ fragm_crs = INT_H_SIZE;
+ fragm_rest = fragm_sz;
+ msg_dbg(buf_msg(buf)," >BUILD>");
+ }
+ }
+ while (rest > 0);
+
+ /*
+ * Now we have a buffer chain. Select a link and check
+ * that packet size is still OK
+ */
+ node = node_select(destaddr, sender->publ.ref & 1);
+ if (likely(node)) {
+ node_lock(node);
+ l_ptr = node->active_links[sender->publ.ref & 1];
+ if (!l_ptr) {
+ node_unlock(node);
+ goto reject;
+ }
+ if (link_max_pkt(l_ptr) < max_pkt) {
+ sender->max_pkt = link_max_pkt(l_ptr);
+ node_unlock(node);
+ for (; buf_chain; buf_chain = buf) {
+ buf = buf_chain->next;
+ buf_discard(buf_chain);
+ }
+ goto again;
+ }
+ } else {
+reject:
+ for (; buf_chain; buf_chain = buf) {
+ buf = buf_chain->next;
+ buf_discard(buf_chain);
+ }
+ return port_reject_sections(sender, hdr, msg_sect, num_sect,
+ TIPC_ERR_NO_NODE);
+ }
+
+ /* Append whole chain to send queue: */
+
+ buf = buf_chain;
+ l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1);
+ if (!l_ptr->next_out)
+ l_ptr->next_out = buf_chain;
+ l_ptr->stats.sent_fragmented++;
+ while (buf) {
+ struct sk_buff *next = buf->next;
+ struct tipc_msg *msg = buf_msg(buf);
+
+ l_ptr->stats.sent_fragments++;
+ msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
+ link_add_to_outqueue(l_ptr, buf, msg);
+ msg_dbg(msg, ">ADD>");
+ buf = next;
+ }
+
+ /* Send it, if possible: */
+
+ link_push_queue(l_ptr);
+ node_unlock(node);
+ return dsz;
+}
+
+/*
+ * link_push_packet: Push one unsent packet to the media
+ */
+u32 link_push_packet(struct link *l_ptr)
+{
+ struct sk_buff *buf = l_ptr->first_out;
+ u32 r_q_size = l_ptr->retransm_queue_size;
+ u32 r_q_head = l_ptr->retransm_queue_head;
+
+ /* Step to position where retransmission failed, if any, */
+ /* consider that buffers may have been released in meantime */
+
+ if (r_q_size && buf) {
+ u32 last = lesser(mod(r_q_head + r_q_size),
+ link_last_sent(l_ptr));
+ u32 first = msg_seqno(buf_msg(buf));
+
+ while (buf && less(first, r_q_head)) {
+ first = mod(first + 1);
+ buf = buf->next;
+ }
+ l_ptr->retransm_queue_head = r_q_head = first;
+ l_ptr->retransm_queue_size = r_q_size = mod(last - first);
+ }
+
+ /* Continue retransmission now, if there is anything: */
+
+ if (r_q_size && buf && !skb_cloned(buf)) {
+ msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
+ msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
+ if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+ msg_dbg(buf_msg(buf), ">DEF-RETR>");
+ l_ptr->retransm_queue_head = mod(++r_q_head);
+ l_ptr->retransm_queue_size = --r_q_size;
+ l_ptr->stats.retransmitted++;
+ return TIPC_OK;
+ } else {
+ l_ptr->stats.bearer_congs++;
+ msg_dbg(buf_msg(buf), "|>DEF-RETR>");
+ return PUSH_FAILED;
+ }
+ }
+
+ /* Send deferred protocol message, if any: */
+
+ buf = l_ptr->proto_msg_queue;
+ if (buf) {
+ msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
+ msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in);
+ if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+ msg_dbg(buf_msg(buf), ">DEF-PROT>");
+ l_ptr->unacked_window = 0;
+ buf_discard(buf);
+ l_ptr->proto_msg_queue = 0;
+ return TIPC_OK;
+ } else {
+ msg_dbg(buf_msg(buf), "|>DEF-PROT>");
+ l_ptr->stats.bearer_congs++;
+ return PUSH_FAILED;
+ }
+ }
+
+ /* Send one deferred data message, if send window not full: */
+
+ buf = l_ptr->next_out;
+ if (buf) {
+ struct tipc_msg *msg = buf_msg(buf);
+ u32 next = msg_seqno(msg);
+ u32 first = msg_seqno(buf_msg(l_ptr->first_out));
+
+ if (mod(next - first) < l_ptr->queue_limit[0]) {
+ msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+ msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
+ if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+ if (msg_user(msg) == MSG_BUNDLER)
+ msg_set_type(msg, CLOSED_MSG);
+ msg_dbg(msg, ">PUSH-DATA>");
+ l_ptr->next_out = buf->next;
+ return TIPC_OK;
+ } else {
+ msg_dbg(msg, "|PUSH-DATA|");
+ l_ptr->stats.bearer_congs++;
+ return PUSH_FAILED;
+ }
+ }
+ }
+ return PUSH_FINISHED;
+}
+
+/*
+ * push_queue(): push out the unsent messages of a link where
+ * congestion has abated. Node is locked
+ */
+void link_push_queue(struct link *l_ptr)
+{
+ u32 res;
+
+ if (bearer_congested(l_ptr->b_ptr, l_ptr))
+ return;
+
+ do {
+ res = link_push_packet(l_ptr);
+ }
+ while (res == TIPC_OK);
+ if (res == PUSH_FAILED)
+ bearer_schedule(l_ptr->b_ptr, l_ptr);
+}
+
+void link_retransmit(struct link *l_ptr, struct sk_buff *buf,
+ u32 retransmits)
+{
+ struct tipc_msg *msg;
+
+ dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
+
+ if (bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
+ msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
+ dbg_print_link(l_ptr, " ");
+ l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
+ l_ptr->retransm_queue_size = retransmits;
+ return;
+ }
+ while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
+ msg = buf_msg(buf);
+ msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+ msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
+ if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+ /* Catch if retransmissions fail repeatedly: */
+ if (l_ptr->last_retransmitted == msg_seqno(msg)) {
+ if (++l_ptr->stale_count > 100) {
+ msg_print(CONS, buf_msg(buf), ">RETR>");
+ info("...Retransmitted %u times\n",
+ l_ptr->stale_count);
+ link_print(l_ptr, CONS, "Resetting Link\n");;
+ link_reset(l_ptr);
+ break;
+ }
+ } else {
+ l_ptr->stale_count = 0;
+ }
+ l_ptr->last_retransmitted = msg_seqno(msg);
+
+ msg_dbg(buf_msg(buf), ">RETR>");
+ buf = buf->next;
+ retransmits--;
+ l_ptr->stats.retransmitted++;
+ } else {
+ bearer_schedule(l_ptr->b_ptr, l_ptr);
+ l_ptr->stats.bearer_congs++;
+ l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
+ l_ptr->retransm_queue_size = retransmits;
+ return;
+ }
+ }
+ l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
+}
+
+/*
+ * link_recv_non_seq: Receive packets which are outside
+ * the link sequence flow
+ */
+
+static void link_recv_non_seq(struct sk_buff *buf)
+{
+ struct tipc_msg *msg = buf_msg(buf);
+
+ if (msg_user(msg) == LINK_CONFIG)
+ disc_recv_msg(buf);
+ else
+ bclink_recv_pkt(buf);
+}
+
+/**
+ * link_insert_deferred_queue - insert deferred messages back into receive chain
+ */
+
+static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
+ struct sk_buff *buf)
+{
+ u32 seq_no;
+
+ if (l_ptr->oldest_deferred_in == NULL)
+ return buf;
+
+ seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+ if (seq_no == mod(l_ptr->next_in_no)) {
+ l_ptr->newest_deferred_in->next = buf;
+ buf = l_ptr->oldest_deferred_in;
+ l_ptr->oldest_deferred_in = NULL;
+ l_ptr->deferred_inqueue_sz = 0;
+ }
+ return buf;
+}
+
+void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
+{
+ read_lock_bh(&net_lock);
+ while (head) {
+ struct bearer *b_ptr;
+ struct node *n_ptr;
+ struct link *l_ptr;
+ struct sk_buff *crs;
+ struct sk_buff *buf = head;
+ struct tipc_msg *msg = buf_msg(buf);
+ u32 seq_no = msg_seqno(msg);
+ u32 ackd = msg_ack(msg);
+ u32 released = 0;
+ int type;
+
+ b_ptr = (struct bearer *)tb_ptr;
+ TIPC_SKB_CB(buf)->handle = b_ptr;
+
+ head = head->next;
+ if (unlikely(msg_version(msg) != TIPC_VERSION))
+ goto cont;
+#if 0
+ if (msg_user(msg) != LINK_PROTOCOL)
+#endif
+ msg_dbg(msg,"<REC<");
+
+ if (unlikely(msg_non_seq(msg))) {
+ link_recv_non_seq(buf);
+ continue;
+ }
+ n_ptr = node_find(msg_prevnode(msg));
+ if (unlikely(!n_ptr))
+ goto cont;
+
+ node_lock(n_ptr);
+ l_ptr = n_ptr->links[b_ptr->identity];
+ if (unlikely(!l_ptr)) {
+ node_unlock(n_ptr);
+ goto cont;
+ }
+ /*
+ * Release acked messages
+ */
+ if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
+ if (node_is_up(n_ptr) && n_ptr->bclink.supported)
+ bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
+ }
+
+ crs = l_ptr->first_out;
+ while ((crs != l_ptr->next_out) &&
+ less_eq(msg_seqno(buf_msg(crs)), ackd)) {
+ struct sk_buff *next = crs->next;
+
+ buf_discard(crs);
+ crs = next;
+ released++;
+ }
+ if (released) {
+ l_ptr->first_out = crs;
+ l_ptr->out_queue_size -= released;
+ }
+ if (unlikely(l_ptr->next_out))
+ link_push_queue(l_ptr);
+ if (unlikely(!list_empty(&l_ptr->waiting_ports)))
+ link_wakeup_ports(l_ptr, 0);
+ if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
+ l_ptr->stats.sent_acks++;
+ link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+ }
+
+protocol_check:
+ if (likely(link_working_working(l_ptr))) {
+ if (likely(seq_no == mod(l_ptr->next_in_no))) {
+ l_ptr->next_in_no++;
+ if (unlikely(l_ptr->oldest_deferred_in))
+ head = link_insert_deferred_queue(l_ptr,
+ head);
+ if (likely(msg_is_dest(msg, tipc_own_addr))) {
+deliver:
+ if (likely(msg_isdata(msg))) {
+ node_unlock(n_ptr);
+ port_recv_msg(buf);
+ continue;
+ }
+ switch (msg_user(msg)) {
+ case MSG_BUNDLER:
+ l_ptr->stats.recv_bundles++;
+ l_ptr->stats.recv_bundled +=
+ msg_msgcnt(msg);
+ node_unlock(n_ptr);
+ link_recv_bundle(buf);
+ continue;
+ case ROUTE_DISTRIBUTOR:
+ node_unlock(n_ptr);
+ cluster_recv_routing_table(buf);
+ continue;
+ case NAME_DISTRIBUTOR:
+ node_unlock(n_ptr);
+ named_recv(buf);
+ continue;
+ case CONN_MANAGER:
+ node_unlock(n_ptr);
+ port_recv_proto_msg(buf);
+ continue;
+ case MSG_FRAGMENTER:
+ l_ptr->stats.recv_fragments++;
+ if (link_recv_fragment(
+ &l_ptr->defragm_buf,
+ &buf, &msg)) {
+ l_ptr->stats.recv_fragmented++;
+ goto deliver;
+ }
+ break;
+ case CHANGEOVER_PROTOCOL:
+ type = msg_type(msg);
+ if (link_recv_changeover_msg(
+ &l_ptr, &buf)) {
+ msg = buf_msg(buf);
+ seq_no = msg_seqno(msg);
+ TIPC_SKB_CB(buf)->handle
+ = b_ptr;
+ if (type == ORIGINAL_MSG)
+ goto deliver;
+ goto protocol_check;
+ }
+ break;
+ }
+ }
+ node_unlock(n_ptr);
+ net_route_msg(buf);
+ continue;
+ }
+ link_handle_out_of_seq_msg(l_ptr, buf);
+ head = link_insert_deferred_queue(l_ptr, head);
+ node_unlock(n_ptr);
+ continue;
+ }
+
+ if (msg_user(msg) == LINK_PROTOCOL) {
+ link_recv_proto_msg(l_ptr, buf);
+ head = link_insert_deferred_queue(l_ptr, head);
+ node_unlock(n_ptr);
+ continue;
+ }
+ msg_dbg(msg,"NSEQ<REC<");
+ link_state_event(l_ptr, TRAFFIC_MSG_EVT);
+
+ if (link_working_working(l_ptr)) {
+ /* Re-insert in front of queue */
+ msg_dbg(msg,"RECV-REINS:");
+ buf->next = head;
+ head = buf;
+ node_unlock(n_ptr);
+ continue;
+ }
+ node_unlock(n_ptr);
+cont:
+ buf_discard(buf);
+ }
+ read_unlock_bh(&net_lock);
+}
+
+/*
+ * link_defer_buf(): Sort a received out-of-sequence packet
+ * into the deferred reception queue.
+ * Returns the increase of the queue length,i.e. 0 or 1
+ */
+
+u32 link_defer_pkt(struct sk_buff **head,
+ struct sk_buff **tail,
+ struct sk_buff *buf)
+{
+ struct sk_buff *prev = 0;
+ struct sk_buff *crs = *head;
+ u32 seq_no = msg_seqno(buf_msg(buf));
+
+ buf->next = NULL;
+
+ /* Empty queue ? */
+ if (*head == NULL) {
+ *head = *tail = buf;
+ return 1;
+ }
+
+ /* Last ? */
+ if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
+ (*tail)->next = buf;
+ *tail = buf;
+ return 1;
+ }
+
+ /* Scan through queue and sort it in */
+ do {
+ struct tipc_msg *msg = buf_msg(crs);
+
+ if (less(seq_no, msg_seqno(msg))) {
+ buf->next = crs;
+ if (prev)
+ prev->next = buf;
+ else
+ *head = buf;
+ return 1;
+ }
+ if (seq_no == msg_seqno(msg)) {
+ break;
+ }
+ prev = crs;
+ crs = crs->next;
+ }
+ while (crs);
+
+ /* Message is a duplicate of an existing message */
+
+ buf_discard(buf);
+ return 0;
+}
+
+/**
+ * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
+ */
+
+static void link_handle_out_of_seq_msg(struct link *l_ptr,
+ struct sk_buff *buf)
+{
+ u32 seq_no = msg_seqno(buf_msg(buf));
+
+ if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
+ link_recv_proto_msg(l_ptr, buf);
+ return;
+ }
+
+ dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n",
+ seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
+
+ /* Record OOS packet arrival (force mismatch on next timeout) */
+
+ l_ptr->checkpoint--;
+
+ /*
+ * Discard packet if a duplicate; otherwise add it to deferred queue
+ * and notify peer of gap as per protocol specification
+ */
+
+ if (less(seq_no, mod(l_ptr->next_in_no))) {
+ l_ptr->stats.duplicates++;
+ buf_discard(buf);
+ return;
+ }
+
+ if (link_defer_pkt(&l_ptr->oldest_deferred_in,
+ &l_ptr->newest_deferred_in, buf)) {
+ l_ptr->deferred_inqueue_sz++;
+ l_ptr->stats.deferred_recv++;
+ if ((l_ptr->deferred_inqueue_sz % 16) == 1)
+ link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+ } else
+ l_ptr->stats.duplicates++;
+}
+
+/*
+ * Send protocol message to the other endpoint.
+ */
+void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
+ u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
+{
+ struct sk_buff *buf = 0;
+ struct tipc_msg *msg = l_ptr->pmsg;
+ u32 msg_size = sizeof(l_ptr->proto_msg);
+
+ if (link_blocked(l_ptr))
+ return;
+ msg_set_type(msg, msg_typ);
+ msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
+ msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
+ msg_set_last_bcast(msg, bclink_get_last_sent());
+
+ if (msg_typ == STATE_MSG) {
+ u32 next_sent = mod(l_ptr->next_out_no);
+
+ if (!link_is_up(l_ptr))
+ return;
+ if (l_ptr->next_out)
+ next_sent = msg_seqno(buf_msg(l_ptr->next_out));
+ msg_set_next_sent(msg, next_sent);
+ if (l_ptr->oldest_deferred_in) {
+ u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+ gap = mod(rec - mod(l_ptr->next_in_no));
+ }
+ msg_set_seq_gap(msg, gap);
+ if (gap)
+ l_ptr->stats.sent_nacks++;
+ msg_set_link_tolerance(msg, tolerance);
+ msg_set_linkprio(msg, priority);
+ msg_set_max_pkt(msg, ack_mtu);
+ msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+ msg_set_probe(msg, probe_msg != 0);
+ if (probe_msg) {
+ u32 mtu = l_ptr->max_pkt;
+
+ if ((mtu < l_ptr->max_pkt_target) &&
+ link_working_working(l_ptr) &&
+ l_ptr->fsm_msg_cnt) {
+ msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
+ if (l_ptr->max_pkt_probes == 10) {
+ l_ptr->max_pkt_target = (msg_size - 4);
+ l_ptr->max_pkt_probes = 0;
+ msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
+ }
+ l_ptr->max_pkt_probes++;
+ }
+
+ l_ptr->stats.sent_probes++;
+ }
+ l_ptr->stats.sent_states++;
+ } else { /* RESET_MSG or ACTIVATE_MSG */
+ msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
+ msg_set_seq_gap(msg, 0);
+ msg_set_next_sent(msg, 1);
+ msg_set_link_tolerance(msg, l_ptr->tolerance);
+ msg_set_linkprio(msg, l_ptr->priority);
+ msg_set_max_pkt(msg, l_ptr->max_pkt_target);
+ }
+
+ if (node_has_redundant_links(l_ptr->owner)) {
+ msg_set_redundant_link(msg);
+ } else {
+ msg_clear_redundant_link(msg);
+ }
+ msg_set_linkprio(msg, l_ptr->priority);
+
+ /* Ensure sequence number will not fit : */
+
+ msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
+
+ /* Congestion? */
+
+ if (bearer_congested(l_ptr->b_ptr, l_ptr)) {
+ if (!l_ptr->proto_msg_queue) {
+ l_ptr->proto_msg_queue =
+ buf_acquire(sizeof(l_ptr->proto_msg));
+ }
+ buf = l_ptr->proto_msg_queue;
+ if (!buf)
+ return;
+ memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
+ return;
+ }
+ msg_set_timestamp(msg, jiffies_to_msecs(jiffies));
+
+ /* Message can be sent */
+
+ msg_dbg(msg, ">>");
+
+ buf = buf_acquire(msg_size);
+ if (!buf)
+ return;
+
+ memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
+ msg_set_size(buf_msg(buf), msg_size);
+
+ if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+ l_ptr->unacked_window = 0;
+ buf_discard(buf);
+ return;
+ }
+
+ /* New congestion */
+ bearer_schedule(l_ptr->b_ptr, l_ptr);
+ l_ptr->proto_msg_queue = buf;
+ l_ptr->stats.bearer_congs++;
+}
+
+/*
+ * Receive protocol message :
+ * Note that network plane id propagates through the network, and may
+ * change at any time. The node with lowest address rules
+ */
+
+static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
+{
+ u32 rec_gap = 0;
+ u32 max_pkt_info;
+ u32 max_pkt_ack;
+ u32 msg_tol;
+ struct tipc_msg *msg = buf_msg(buf);
+
+ dbg("AT(%u):", jiffies_to_msecs(jiffies));
+ msg_dbg(msg, "<<");
+ if (link_blocked(l_ptr))
+ goto exit;
+
+ /* record unnumbered packet arrival (force mismatch on next timeout) */
+
+ l_ptr->checkpoint--;
+
+ if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
+ if (tipc_own_addr > msg_prevnode(msg))
+ l_ptr->b_ptr->net_plane = msg_net_plane(msg);
+
+ l_ptr->owner->permit_changeover = msg_redundant_link(msg);
+
+ switch (msg_type(msg)) {
+
+ case RESET_MSG:
+ if (!link_working_unknown(l_ptr) && l_ptr->peer_session) {
+ if (msg_session(msg) == l_ptr->peer_session) {
+ dbg("Duplicate RESET: %u<->%u\n",
+ msg_session(msg), l_ptr->peer_session);
+ break; /* duplicate: ignore */
+ }
+ }
+ /* fall thru' */
+ case ACTIVATE_MSG:
+ /* Update link settings according other endpoint's values */
+
+ strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
+
+ if ((msg_tol = msg_link_tolerance(msg)) &&
+ (msg_tol > l_ptr->tolerance))
+ link_set_supervision_props(l_ptr, msg_tol);
+
+ if (msg_linkprio(msg) > l_ptr->priority)
+ l_ptr->priority = msg_linkprio(msg);
+
+ max_pkt_info = msg_max_pkt(msg);
+ if (max_pkt_info) {
+ if (max_pkt_info < l_ptr->max_pkt_target)
+ l_ptr->max_pkt_target = max_pkt_info;
+ if (l_ptr->max_pkt > l_ptr->max_pkt_target)
+ l_ptr->max_pkt = l_ptr->max_pkt_target;
+ } else {
+ l_ptr->max_pkt = l_ptr->max_pkt_target;
+ }
+ l_ptr->owner->bclink.supported = (max_pkt_info != 0);
+
+ link_state_event(l_ptr, msg_type(msg));
+
+ l_ptr->peer_session = msg_session(msg);
+ l_ptr->peer_bearer_id = msg_bearer_id(msg);
+
+ /* Synchronize broadcast sequence numbers */
+ if (!node_has_redundant_links(l_ptr->owner)) {
+ l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
+ }
+ break;
+ case STATE_MSG:
+
+ if ((msg_tol = msg_link_tolerance(msg)))
+ link_set_supervision_props(l_ptr, msg_tol);
+
+ if (msg_linkprio(msg) &&
+ (msg_linkprio(msg) != l_ptr->priority)) {
+ warn("Changing prio <%s>: %u->%u\n",
+ l_ptr->name, l_ptr->priority, msg_linkprio(msg));
+ l_ptr->priority = msg_linkprio(msg);
+ link_reset(l_ptr); /* Enforce change to take effect */
+ break;
+ }
+ link_state_event(l_ptr, TRAFFIC_MSG_EVT);
+ l_ptr->stats.recv_states++;
+ if (link_reset_unknown(l_ptr))
+ break;
+
+ if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
+ rec_gap = mod(msg_next_sent(msg) -
+ mod(l_ptr->next_in_no));
+ }
+
+ max_pkt_ack = msg_max_pkt(msg);
+ if (max_pkt_ack > l_ptr->max_pkt) {
+ dbg("Link <%s> updated MTU %u -> %u\n",
+ l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
+ l_ptr->max_pkt = max_pkt_ack;
+ l_ptr->max_pkt_probes = 0;
+ }
+
+ max_pkt_ack = 0;
+ if (msg_probe(msg)) {
+ l_ptr->stats.recv_probes++;
+ if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
+ max_pkt_ack = msg_size(msg);
+ }
+ }
+
+ /* Protocol message before retransmits, reduce loss risk */
+
+ bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
+
+ if (rec_gap || (msg_probe(msg))) {
+ link_send_proto_msg(l_ptr, STATE_MSG,
+ 0, rec_gap, 0, 0, max_pkt_ack);
+ }
+ if (msg_seq_gap(msg)) {
+ msg_dbg(msg, "With Gap:");
+ l_ptr->stats.recv_nacks++;
+ link_retransmit(l_ptr, l_ptr->first_out,
+ msg_seq_gap(msg));
+ }
+ break;
+ default:
+ msg_dbg(buf_msg(buf), "<DISCARDING UNKNOWN<");
+ }
+exit:
+ buf_discard(buf);
+}
+
+
+/*
+ * link_tunnel(): Send one message via a link belonging to
+ * another bearer. Owner node is locked.
+ */
+void link_tunnel(struct link *l_ptr,
+ struct tipc_msg *tunnel_hdr,
+ struct tipc_msg *msg,
+ u32 selector)
+{
+ struct link *tunnel;
+ struct sk_buff *buf;
+ u32 length = msg_size(msg);
+
+ tunnel = l_ptr->owner->active_links[selector & 1];
+ if (!link_is_up(tunnel))
+ return;
+ msg_set_size(tunnel_hdr, length + INT_H_SIZE);
+ buf = buf_acquire(length + INT_H_SIZE);
+ if (!buf)
+ return;
+ memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
+ memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
+ dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
+ msg_dbg(buf_msg(buf), ">SEND>");
+ assert(tunnel);
+ link_send_buf(tunnel, buf);
+}
+
+
+
+/*
+ * changeover(): Send whole message queue via the remaining link
+ * Owner node is locked.
+ */
+
+void link_changeover(struct link *l_ptr)
+{
+ u32 msgcount = l_ptr->out_queue_size;
+ struct sk_buff *crs = l_ptr->first_out;
+ struct link *tunnel = l_ptr->owner->active_links[0];
+ int split_bundles = node_has_redundant_links(l_ptr->owner);
+ struct tipc_msg tunnel_hdr;
+
+ if (!tunnel)
+ return;
+
+ if (!l_ptr->owner->permit_changeover)
+ return;
+
+ msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
+ ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
+ msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
+ msg_set_msgcnt(&tunnel_hdr, msgcount);
+ if (!l_ptr->first_out) {
+ struct sk_buff *buf;
+
+ assert(!msgcount);
+ buf = buf_acquire(INT_H_SIZE);
+ if (buf) {
+ memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
+ msg_set_size(&tunnel_hdr, INT_H_SIZE);
+ dbg("%c->%c:", l_ptr->b_ptr->net_plane,
+ tunnel->b_ptr->net_plane);
+ msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
+ link_send_buf(tunnel, buf);
+ } else {
+ warn("Memory squeeze; link changeover failed\n");
+ }
+ return;
+ }
+ while (crs) {
+ struct tipc_msg *msg = buf_msg(crs);
+
+ if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
+ u32 msgcount = msg_msgcnt(msg);
+ struct tipc_msg *m = msg_get_wrapped(msg);
+ unchar* pos = (unchar*)m;
+
+ while (msgcount--) {
+ msg_set_seqno(m,msg_seqno(msg));
+ link_tunnel(l_ptr, &tunnel_hdr, m,
+ msg_link_selector(m));
+ pos += align(msg_size(m));
+ m = (struct tipc_msg *)pos;
+ }
+ } else {
+ link_tunnel(l_ptr, &tunnel_hdr, msg,
+ msg_link_selector(msg));
+ }
+ crs = crs->next;
+ }
+}
+
+void link_send_duplicate(struct link *l_ptr, struct link *tunnel)
+{
+ struct sk_buff *iter;
+ struct tipc_msg tunnel_hdr;
+
+ msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
+ DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
+ msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
+ msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
+ iter = l_ptr->first_out;
+ while (iter) {
+ struct sk_buff *outbuf;
+ struct tipc_msg *msg = buf_msg(iter);
+ u32 length = msg_size(msg);
+
+ if (msg_user(msg) == MSG_BUNDLER)
+ msg_set_type(msg, CLOSED_MSG);
+ msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
+ msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
+ msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
+ outbuf = buf_acquire(length + INT_H_SIZE);
+ if (outbuf == NULL) {
+ warn("Memory squeeze; buffer duplication failed\n");
+ return;
+ }
+ memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
+ memcpy(outbuf->data + INT_H_SIZE, iter->data, length);
+ dbg("%c->%c:", l_ptr->b_ptr->net_plane,
+ tunnel->b_ptr->net_plane);
+ msg_dbg(buf_msg(outbuf), ">SEND>");
+ link_send_buf(tunnel, outbuf);
+ if (!link_is_up(l_ptr))
+ return;
+ iter = iter->next;
+ }
+}
+
+
+
+/**
+ * buf_extract - extracts embedded TIPC message from another message
+ * @skb: encapsulating message buffer
+ * @from_pos: offset to extract from
+ *
+ * Returns a new message buffer containing an embedded message. The
+ * encapsulating message itself is left unchanged.
+ */
+
+static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
+{
+ struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
+ u32 size = msg_size(msg);
+ struct sk_buff *eb;
+
+ eb = buf_acquire(size);
+ if (eb)
+ memcpy(eb->data, (unchar *)msg, size);
+ return eb;
+}
+
+/*
+ * link_recv_changeover_msg(): Receive tunneled packet sent
+ * via other link. Node is locked. Return extracted buffer.
+ */
+
+static int link_recv_changeover_msg(struct link **l_ptr,
+ struct sk_buff **buf)
+{
+ struct sk_buff *tunnel_buf = *buf;
+ struct link *dest_link;
+ struct tipc_msg *msg;
+ struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
+ u32 msg_typ = msg_type(tunnel_msg);
+ u32 msg_count = msg_msgcnt(tunnel_msg);
+
+ dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
+ assert(dest_link != *l_ptr);
+ if (!dest_link) {
+ msg_dbg(tunnel_msg, "NOLINK/<REC<");
+ goto exit;
+ }
+ dbg("%c<-%c:", dest_link->b_ptr->net_plane,
+ (*l_ptr)->b_ptr->net_plane);
+ *l_ptr = dest_link;
+ msg = msg_get_wrapped(tunnel_msg);
+
+ if (msg_typ == DUPLICATE_MSG) {
+ if (less(msg_seqno(msg), mod(dest_link->next_in_no))) {
+ msg_dbg(tunnel_msg, "DROP/<REC<");
+ goto exit;
+ }
+ *buf = buf_extract(tunnel_buf,INT_H_SIZE);
+ if (*buf == NULL) {
+ warn("Memory squeeze; failed to extract msg\n");
+ goto exit;
+ }
+ msg_dbg(tunnel_msg, "TNL<REC<");
+ buf_discard(tunnel_buf);
+ return 1;
+ }
+
+ /* First original message ?: */
+
+ if (link_is_up(dest_link)) {
+ msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
+ link_reset(dest_link);
+ dest_link->exp_msg_count = msg_count;
+ if (!msg_count)
+ goto exit;
+ } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
+ msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
+ dest_link->exp_msg_count = msg_count;
+ if (!msg_count)
+ goto exit;
+ }
+
+ /* Receive original message */
+
+ if (dest_link->exp_msg_count == 0) {
+ msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
+ dbg_print_link(dest_link, "LINK:");
+ goto exit;
+ }
+ dest_link->exp_msg_count--;
+ if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
+ msg_dbg(tunnel_msg, "DROP/DUPL/<REC<");
+ goto exit;
+ } else {
+ *buf = buf_extract(tunnel_buf, INT_H_SIZE);
+ if (*buf != NULL) {
+ msg_dbg(tunnel_msg, "TNL<REC<");
+ buf_discard(tunnel_buf);
+ return 1;
+ } else {
+ warn("Memory squeeze; dropped incoming msg\n");
+ }
+ }
+exit:
+ *buf = 0;
+ buf_discard(tunnel_buf);
+ return 0;
+}
+
+/*
+ * Bundler functionality:
+ */
+void link_recv_bundle(struct sk_buff *buf)
+{
+ u32 msgcount = msg_msgcnt(buf_msg(buf));
+ u32 pos = INT_H_SIZE;
+ struct sk_buff *obuf;
+
+ msg_dbg(buf_msg(buf), "<BNDL<: ");
+ while (msgcount--) {
+ obuf = buf_extract(buf, pos);
+ if (obuf == NULL) {
+ char addr_string[16];
+
+ warn("Buffer allocation failure;\n");
+ warn(" incoming message(s) from %s lost\n",
+ addr_string_fill(addr_string,
+ msg_orignode(buf_msg(buf))));
+ return;
+ };
+ pos += align(msg_size(buf_msg(obuf)));
+ msg_dbg(buf_msg(obuf), " /");
+ net_route_msg(obuf);
+ }
+ buf_discard(buf);
+}
+
+/*
+ * Fragmentation/defragmentation:
+ */
+
+
+/*
+ * link_send_long_buf: Entry for buffers needing fragmentation.
+ * The buffer is complete, inclusive total message length.
+ * Returns user data length.
+ */
+int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
+{
+ struct tipc_msg *inmsg = buf_msg(buf);
+ struct tipc_msg fragm_hdr;
+ u32 insize = msg_size(inmsg);
+ u32 dsz = msg_data_sz(inmsg);
+ unchar *crs = buf->data;
+ u32 rest = insize;
+ u32 pack_sz = link_max_pkt(l_ptr);
+ u32 fragm_sz = pack_sz - INT_H_SIZE;
+ u32 fragm_no = 1;
+ u32 destaddr = msg_destnode(inmsg);
+
+ if (msg_short(inmsg))
+ destaddr = l_ptr->addr;
+
+ if (msg_routed(inmsg))
+ msg_set_prevnode(inmsg, tipc_own_addr);
+
+ /* Prepare reusable fragment header: */
+
+ msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
+ TIPC_OK, INT_H_SIZE, destaddr);
+ msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
+ msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
+ msg_set_fragm_no(&fragm_hdr, fragm_no);
+ l_ptr->stats.sent_fragmented++;
+
+ /* Chop up message: */
+
+ while (rest > 0) {
+ struct sk_buff *fragm;
+
+ if (rest <= fragm_sz) {
+ fragm_sz = rest;
+ msg_set_type(&fragm_hdr, LAST_FRAGMENT);
+ }
+ fragm = buf_acquire(fragm_sz + INT_H_SIZE);
+ if (fragm == NULL) {
+ warn("Memory squeeze; failed to fragment msg\n");
+ dsz = -ENOMEM;
+ goto exit;
+ }
+ msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
+ memcpy(fragm->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+ memcpy(fragm->data + INT_H_SIZE, crs, fragm_sz);
+
+ /* Send queued messages first, if any: */
+
+ l_ptr->stats.sent_fragments++;
+ link_send_buf(l_ptr, fragm);
+ if (!link_is_up(l_ptr))
+ return dsz;
+ msg_set_fragm_no(&fragm_hdr, ++fragm_no);
+ rest -= fragm_sz;
+ crs += fragm_sz;
+ msg_set_type(&fragm_hdr, FRAGMENT);
+ }
+exit:
+ buf_discard(buf);
+ return dsz;
+}
+
+/*
+ * A pending message being re-assembled must store certain values
+ * to handle subsequent fragments correctly. The following functions
+ * help storing these values in unused, available fields in the
+ * pending message. This makes dynamic memory allocation unecessary.
+ */
+
+static inline u32 get_long_msg_seqno(struct sk_buff *buf)
+{
+ return msg_seqno(buf_msg(buf));
+}
+
+static inline void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
+{
+ msg_set_seqno(buf_msg(buf), seqno);
+}
+
+static inline u32 get_fragm_size(struct sk_buff *buf)
+{
+ return msg_ack(buf_msg(buf));
+}
+
+static inline void set_fragm_size(struct sk_buff *buf, u32 sz)
+{
+ msg_set_ack(buf_msg(buf), sz);
+}
+
+static inline u32 get_expected_frags(struct sk_buff *buf)
+{
+ return msg_bcast_ack(buf_msg(buf));
+}
+
+static inline void set_expected_frags(struct sk_buff *buf, u32 exp)
+{
+ msg_set_bcast_ack(buf_msg(buf), exp);
+}
+
+static inline u32 get_timer_cnt(struct sk_buff *buf)
+{
+ return msg_reroute_cnt(buf_msg(buf));
+}
+
+static inline void incr_timer_cnt(struct sk_buff *buf)
+{
+ msg_incr_reroute_cnt(buf_msg(buf));
+}
+
+/*
+ * link_recv_fragment(): Called with node lock on. Returns
+ * the reassembled buffer if message is complete.
+ */
+int link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
+ struct tipc_msg **m)
+{
+ struct sk_buff *prev = 0;
+ struct sk_buff *fbuf = *fb;
+ struct tipc_msg *fragm = buf_msg(fbuf);
+ struct sk_buff *pbuf = *pending;
+ u32 long_msg_seq_no = msg_long_msgno(fragm);
+
+ *fb = 0;
+ msg_dbg(fragm,"FRG<REC<");
+
+ /* Is there an incomplete message waiting for this fragment? */
+
+ while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no)
+ || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
+ prev = pbuf;
+ pbuf = pbuf->next;
+ }
+
+ if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
+ struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
+ u32 msg_sz = msg_size(imsg);
+ u32 fragm_sz = msg_data_sz(fragm);
+ u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
+ u32 max = TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE;
+ if (msg_type(imsg) == TIPC_MCAST_MSG)
+ max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
+ if (msg_size(imsg) > max) {
+ msg_dbg(fragm,"<REC<Oversized: ");
+ buf_discard(fbuf);
+ return 0;
+ }
+ pbuf = buf_acquire(msg_size(imsg));
+ if (pbuf != NULL) {
+ pbuf->next = *pending;
+ *pending = pbuf;
+ memcpy(pbuf->data, (unchar *)imsg, msg_data_sz(fragm));
+
+ /* Prepare buffer for subsequent fragments. */
+
+ set_long_msg_seqno(pbuf, long_msg_seq_no);
+ set_fragm_size(pbuf,fragm_sz);
+ set_expected_frags(pbuf,exp_fragm_cnt - 1);
+ } else {
+ warn("Memory squeeze; got no defragmenting buffer\n");
+ }
+ buf_discard(fbuf);
+ return 0;
+ } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
+ u32 dsz = msg_data_sz(fragm);
+ u32 fsz = get_fragm_size(pbuf);
+ u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
+ u32 exp_frags = get_expected_frags(pbuf) - 1;
+ memcpy(pbuf->data + crs, msg_data(fragm), dsz);
+ buf_discard(fbuf);
+
+ /* Is message complete? */
+
+ if (exp_frags == 0) {
+ if (prev)
+ prev->next = pbuf->next;
+ else
+ *pending = pbuf->next;
+ msg_reset_reroute_cnt(buf_msg(pbuf));
+ *fb = pbuf;
+ *m = buf_msg(pbuf);
+ return 1;
+ }
+ set_expected_frags(pbuf,exp_frags);
+ return 0;
+ }
+ dbg(" Discarding orphan fragment %x\n",fbuf);
+ msg_dbg(fragm,"ORPHAN:");
+ dbg("Pending long buffers:\n");
+ dbg_print_buf_chain(*pending);
+ buf_discard(fbuf);
+ return 0;
+}
+
+/**
+ * link_check_defragm_bufs - flush stale incoming message fragments
+ * @l_ptr: pointer to link
+ */
+
+static void link_check_defragm_bufs(struct link *l_ptr)
+{
+ struct sk_buff *prev = 0;
+ struct sk_buff *next = 0;
+ struct sk_buff *buf = l_ptr->defragm_buf;
+
+ if (!buf)
+ return;
+ if (!link_working_working(l_ptr))
+ return;
+ while (buf) {
+ u32 cnt = get_timer_cnt(buf);
+
+ next = buf->next;
+ if (cnt < 4) {
+ incr_timer_cnt(buf);
+ prev = buf;
+ } else {
+ dbg(" Discarding incomplete long buffer\n");
+ msg_dbg(buf_msg(buf), "LONG:");
+ dbg_print_link(l_ptr, "curr:");
+ dbg("Pending long buffers:\n");
+ dbg_print_buf_chain(l_ptr->defragm_buf);
+ if (prev)
+ prev->next = buf->next;
+ else
+ l_ptr->defragm_buf = buf->next;
+ buf_discard(buf);
+ }
+ buf = next;
+ }
+}
+
+
+
+static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
+{
+ l_ptr->tolerance = tolerance;
+ l_ptr->continuity_interval =
+ ((tolerance / 4) > 500) ? 500 : tolerance / 4;
+ l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
+}
+
+
+void link_set_queue_limits(struct link *l_ptr, u32 window)
+{
+ /* Data messages from this node, inclusive FIRST_FRAGM */
+ l_ptr->queue_limit[DATA_LOW] = window;
+ l_ptr->queue_limit[DATA_MEDIUM] = (window / 3) * 4;
+ l_ptr->queue_limit[DATA_HIGH] = (window / 3) * 5;
+ l_ptr->queue_limit[DATA_CRITICAL] = (window / 3) * 6;
+ /* Transiting data messages,inclusive FIRST_FRAGM */
+ l_ptr->queue_limit[DATA_LOW + 4] = 300;
+ l_ptr->queue_limit[DATA_MEDIUM + 4] = 600;
+ l_ptr->queue_limit[DATA_HIGH + 4] = 900;
+ l_ptr->queue_limit[DATA_CRITICAL + 4] = 1200;
+ l_ptr->queue_limit[CONN_MANAGER] = 1200;
+ l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
+ l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
+ l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
+ /* FRAGMENT and LAST_FRAGMENT packets */
+ l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
+}
+
+/**
+ * link_find_link - locate link by name
+ * @name - ptr to link name string
+ * @node - ptr to area to be filled with ptr to associated node
+ *
+ * Caller must hold 'net_lock' to ensure node and bearer are not deleted;
+ * this also prevents link deletion.
+ *
+ * Returns pointer to link (or 0 if invalid link name).
+ */
+
+static struct link *link_find_link(const char *name, struct node **node)
+{
+ struct link_name link_name_parts;
+ struct bearer *b_ptr;
+ struct link *l_ptr;
+
+ if (!link_name_validate(name, &link_name_parts))
+ return 0;
+
+ b_ptr = bearer_find_interface(link_name_parts.if_local);
+ if (!b_ptr)
+ return 0;
+
+ *node = node_find(link_name_parts.addr_peer);
+ if (!*node)
+ return 0;
+
+ l_ptr = (*node)->links[b_ptr->identity];
+ if (!l_ptr || strcmp(l_ptr->name, name))
+ return 0;
+
+ return l_ptr;
+}
+
+struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space,
+ u16 cmd)
+{
+ struct tipc_link_config *args;
+ u32 new_value;
+ struct link *l_ptr;
+ struct node *node;
+ int res;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
+ new_value = ntohl(args->value);
+
+ if (!strcmp(args->name, bc_link_name)) {
+ if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
+ (bclink_set_queue_limits(new_value) == 0))
+ return cfg_reply_none();
+ return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+ " (cannot change setting on broadcast link)");
+ }
+
+ read_lock_bh(&net_lock);
+ l_ptr = link_find_link(args->name, &node);
+ if (!l_ptr) {
+ read_unlock_bh(&net_lock);
+ return cfg_reply_error_string("link not found");
+ }
+
+ node_lock(node);
+ res = -EINVAL;
+ switch (cmd) {
+ case TIPC_CMD_SET_LINK_TOL:
+ if ((new_value >= TIPC_MIN_LINK_TOL) &&
+ (new_value <= TIPC_MAX_LINK_TOL)) {
+ link_set_supervision_props(l_ptr, new_value);
+ link_send_proto_msg(l_ptr, STATE_MSG,
+ 0, 0, new_value, 0, 0);
+ res = TIPC_OK;
+ }
+ break;
+ case TIPC_CMD_SET_LINK_PRI:
+ if (new_value < TIPC_NUM_LINK_PRI) {
+ l_ptr->priority = new_value;
+ link_send_proto_msg(l_ptr, STATE_MSG,
+ 0, 0, 0, new_value, 0);
+ res = TIPC_OK;
+ }
+ break;
+ case TIPC_CMD_SET_LINK_WINDOW:
+ if ((new_value >= TIPC_MIN_LINK_WIN) &&
+ (new_value <= TIPC_MAX_LINK_WIN)) {
+ link_set_queue_limits(l_ptr, new_value);
+ res = TIPC_OK;
+ }
+ break;
+ }
+ node_unlock(node);
+
+ read_unlock_bh(&net_lock);
+ if (res)
+ return cfg_reply_error_string("cannot change link setting");
+
+ return cfg_reply_none();
+}
+
+/**
+ * link_reset_statistics - reset link statistics
+ * @l_ptr: pointer to link
+ */
+
+static void link_reset_statistics(struct link *l_ptr)
+{
+ memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
+ l_ptr->stats.sent_info = l_ptr->next_out_no;
+ l_ptr->stats.recv_info = l_ptr->next_in_no;
+}
+
+struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
+{
+ char *link_name;
+ struct link *l_ptr;
+ struct node *node;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ link_name = (char *)TLV_DATA(req_tlv_area);
+ if (!strcmp(link_name, bc_link_name)) {
+ if (bclink_reset_stats())
+ return cfg_reply_error_string("link not found");
+ return cfg_reply_none();
+ }
+
+ read_lock_bh(&net_lock);
+ l_ptr = link_find_link(link_name, &node);
+ if (!l_ptr) {
+ read_unlock_bh(&net_lock);
+ return cfg_reply_error_string("link not found");
+ }
+
+ node_lock(node);
+ link_reset_statistics(l_ptr);
+ node_unlock(node);
+ read_unlock_bh(&net_lock);
+ return cfg_reply_none();
+}
+
+/**
+ * percent - convert count to a percentage of total (rounding up or down)
+ */
+
+static u32 percent(u32 count, u32 total)
+{
+ return (count * 100 + (total / 2)) / total;
+}
+
+/**
+ * link_stats - print link statistics
+ * @name: link name
+ * @buf: print buffer area
+ * @buf_size: size of print buffer area
+ *
+ * Returns length of print buffer data string (or 0 if error)
+ */
+
+static int link_stats(const char *name, char *buf, const u32 buf_size)
+{
+ struct print_buf pb;
+ struct link *l_ptr;
+ struct node *node;
+ char *status;
+ u32 profile_total = 0;
+
+ if (!strcmp(name, bc_link_name))
+ return bclink_stats(buf, buf_size);
+
+ printbuf_init(&pb, buf, buf_size);
+
+ read_lock_bh(&net_lock);
+ l_ptr = link_find_link(name, &node);
+ if (!l_ptr) {
+ read_unlock_bh(&net_lock);
+ return 0;
+ }
+ node_lock(node);
+
+ if (link_is_active(l_ptr))
+ status = "ACTIVE";
+ else if (link_is_up(l_ptr))
+ status = "STANDBY";
+ else
+ status = "DEFUNCT";
+ tipc_printf(&pb, "Link <%s>\n"
+ " %s MTU:%u Priority:%u Tolerance:%u ms"
+ " Window:%u packets\n",
+ l_ptr->name, status, link_max_pkt(l_ptr),
+ l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
+ tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ l_ptr->next_in_no - l_ptr->stats.recv_info,
+ l_ptr->stats.recv_fragments,
+ l_ptr->stats.recv_fragmented,
+ l_ptr->stats.recv_bundles,
+ l_ptr->stats.recv_bundled);
+ tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ l_ptr->next_out_no - l_ptr->stats.sent_info,
+ l_ptr->stats.sent_fragments,
+ l_ptr->stats.sent_fragmented,
+ l_ptr->stats.sent_bundles,
+ l_ptr->stats.sent_bundled);
+ profile_total = l_ptr->stats.msg_length_counts;
+ if (!profile_total)
+ profile_total = 1;
+ tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
+ " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
+ "-16354:%u%% -32768:%u%% -66000:%u%%\n",
+ l_ptr->stats.msg_length_counts,
+ l_ptr->stats.msg_lengths_total / profile_total,
+ percent(l_ptr->stats.msg_length_profile[0], profile_total),
+ percent(l_ptr->stats.msg_length_profile[1], profile_total),
+ percent(l_ptr->stats.msg_length_profile[2], profile_total),
+ percent(l_ptr->stats.msg_length_profile[3], profile_total),
+ percent(l_ptr->stats.msg_length_profile[4], profile_total),
+ percent(l_ptr->stats.msg_length_profile[5], profile_total),
+ percent(l_ptr->stats.msg_length_profile[6], profile_total));
+ tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
+ l_ptr->stats.recv_states,
+ l_ptr->stats.recv_probes,
+ l_ptr->stats.recv_nacks,
+ l_ptr->stats.deferred_recv,
+ l_ptr->stats.duplicates);
+ tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
+ l_ptr->stats.sent_states,
+ l_ptr->stats.sent_probes,
+ l_ptr->stats.sent_nacks,
+ l_ptr->stats.sent_acks,
+ l_ptr->stats.retransmitted);
+ tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
+ l_ptr->stats.bearer_congs,
+ l_ptr->stats.link_congs,
+ l_ptr->stats.max_queue_sz,
+ l_ptr->stats.queue_sz_counts
+ ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
+ : 0);
+
+ node_unlock(node);
+ read_unlock_bh(&net_lock);
+ return printbuf_validate(&pb);
+}
+
+#define MAX_LINK_STATS_INFO 2000
+
+struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
+{
+ struct sk_buff *buf;
+ struct tlv_desc *rep_tlv;
+ int str_len;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ buf = cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
+ if (!buf)
+ return NULL;
+
+ rep_tlv = (struct tlv_desc *)buf->data;
+
+ str_len = link_stats((char *)TLV_DATA(req_tlv_area),
+ (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
+ if (!str_len) {
+ buf_discard(buf);
+ return cfg_reply_error_string("link not found");
+ }
+
+ skb_put(buf, TLV_SPACE(str_len));
+ TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+ return buf;
+}
+
+#if 0
+int link_control(const char *name, u32 op, u32 val)
+{
+ int res = -EINVAL;
+ struct link *l_ptr;
+ u32 bearer_id;
+ struct node * node;
+ u32 a;
+
+ a = link_name2addr(name, &bearer_id);
+ read_lock_bh(&net_lock);
+ node = node_find(a);
+ if (node) {
+ node_lock(node);
+ l_ptr = node->links[bearer_id];
+ if (l_ptr) {
+ if (op == TIPC_REMOVE_LINK) {
+ struct bearer *b_ptr = l_ptr->b_ptr;
+ spin_lock_bh(&b_ptr->publ.lock);
+ link_delete(l_ptr);
+ spin_unlock_bh(&b_ptr->publ.lock);
+ }
+ if (op == TIPC_CMD_BLOCK_LINK) {
+ link_reset(l_ptr);
+ l_ptr->blocked = 1;
+ }
+ if (op == TIPC_CMD_UNBLOCK_LINK) {
+ l_ptr->blocked = 0;
+ }
+ res = TIPC_OK;
+ }
+ node_unlock(node);
+ }
+ read_unlock_bh(&net_lock);
+ return res;
+}
+#endif
+
+/**
+ * link_get_max_pkt - get maximum packet size to use when sending to destination
+ * @dest: network address of destination node
+ * @selector: used to select from set of active links
+ *
+ * If no active link can be found, uses default maximum packet size.
+ */
+
+u32 link_get_max_pkt(u32 dest, u32 selector)
+{
+ struct node *n_ptr;
+ struct link *l_ptr;
+ u32 res = MAX_PKT_DEFAULT;
+
+ if (dest == tipc_own_addr)
+ return MAX_MSG_SIZE;
+
+ read_lock_bh(&net_lock);
+ n_ptr = node_select(dest, selector);
+ if (n_ptr) {
+ node_lock(n_ptr);
+ l_ptr = n_ptr->active_links[selector & 1];
+ if (l_ptr)
+ res = link_max_pkt(l_ptr);
+ node_unlock(n_ptr);
+ }
+ read_unlock_bh(&net_lock);
+ return res;
+}
+
+#if 0
+static void link_dump_rec_queue(struct link *l_ptr)
+{
+ struct sk_buff *crs;
+
+ if (!l_ptr->oldest_deferred_in) {
+ info("Reception queue empty\n");
+ return;
+ }
+ info("Contents of Reception queue:\n");
+ crs = l_ptr->oldest_deferred_in;
+ while (crs) {
+ if (crs->data == (void *)0x0000a3a3) {
+ info("buffer %x invalid\n", crs);
+ return;
+ }
+ msg_dbg(buf_msg(crs), "In rec queue: \n");
+ crs = crs->next;
+ }
+}
+#endif
+
+static void link_dump_send_queue(struct link *l_ptr)
+{
+ if (l_ptr->next_out) {
+ info("\nContents of unsent queue:\n");
+ dbg_print_buf_chain(l_ptr->next_out);
+ }
+ info("\nContents of send queue:\n");
+ if (l_ptr->first_out) {
+ dbg_print_buf_chain(l_ptr->first_out);
+ }
+ info("Empty send queue\n");
+}
+
+static void link_print(struct link *l_ptr, struct print_buf *buf,
+ const char *str)
+{
+ tipc_printf(buf, str);
+ if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
+ return;
+ tipc_printf(buf, "Link %x<%s>:",
+ l_ptr->addr, l_ptr->b_ptr->publ.name);
+ tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
+ tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
+ tipc_printf(buf, "SQUE");
+ if (l_ptr->first_out) {
+ tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
+ if (l_ptr->next_out)
+ tipc_printf(buf, "%u..",
+ msg_seqno(buf_msg(l_ptr->next_out)));
+ tipc_printf(buf, "%u]",
+ msg_seqno(buf_msg
+ (l_ptr->last_out)), l_ptr->out_queue_size);
+ if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
+ msg_seqno(buf_msg(l_ptr->first_out)))
+ != (l_ptr->out_queue_size - 1))
+ || (l_ptr->last_out->next != 0)) {
+ tipc_printf(buf, "\nSend queue inconsistency\n");
+ tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
+ tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
+ tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
+ link_dump_send_queue(l_ptr);
+ }
+ } else
+ tipc_printf(buf, "[]");
+ tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
+ if (l_ptr->oldest_deferred_in) {
+ u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+ u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
+ tipc_printf(buf, ":RQUE[%u..%u]", o, n);
+ if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
+ tipc_printf(buf, ":RQSIZ(%u)",
+ l_ptr->deferred_inqueue_sz);
+ }
+ }
+ if (link_working_unknown(l_ptr))
+ tipc_printf(buf, ":WU");
+ if (link_reset_reset(l_ptr))
+ tipc_printf(buf, ":RR");
+ if (link_reset_unknown(l_ptr))
+ tipc_printf(buf, ":RU");
+ if (link_working_working(l_ptr))
+ tipc_printf(buf, ":WW");
+ tipc_printf(buf, "\n");
+}
+
diff --git a/net/tipc/link.h b/net/tipc/link.h
new file mode 100644
index 00000000000..c2553f07375
--- /dev/null
+++ b/net/tipc/link.h
@@ -0,0 +1,296 @@
+/*
+ * net/tipc/link.h: Include file for TIPC link code
+ *
+ * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_LINK_H
+#define _TIPC_LINK_H
+
+#include "dbg.h"
+#include "msg.h"
+#include "bearer.h"
+#include "node.h"
+
+#define PUSH_FAILED 1
+#define PUSH_FINISHED 2
+
+/*
+ * Link states
+ */
+
+#define WORKING_WORKING 560810u
+#define WORKING_UNKNOWN 560811u
+#define RESET_UNKNOWN 560812u
+#define RESET_RESET 560813u
+
+/*
+ * Starting value for maximum packet size negotiation on unicast links
+ * (unless bearer MTU is less)
+ */
+
+#define MAX_PKT_DEFAULT 1500
+
+/**
+ * struct link - TIPC link data structure
+ * @addr: network address of link's peer node
+ * @name: link name character string
+ * @media_addr: media address to use when sending messages over link
+ * @timer: link timer
+ * @owner: pointer to peer node
+ * @link_list: adjacent links in bearer's list of links
+ * @started: indicates if link has been started
+ * @checkpoint: reference point for triggering link continuity checking
+ * @peer_session: link session # being used by peer end of link
+ * @peer_bearer_id: bearer id used by link's peer endpoint
+ * @b_ptr: pointer to bearer used by link
+ * @tolerance: minimum link continuity loss needed to reset link [in ms]
+ * @continuity_interval: link continuity testing interval [in ms]
+ * @abort_limit: # of unacknowledged continuity probes needed to reset link
+ * @state: current state of link FSM
+ * @blocked: indicates if link has been administratively blocked
+ * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
+ * @proto_msg: template for control messages generated by link
+ * @pmsg: convenience pointer to "proto_msg" field
+ * @priority: current link priority
+ * @queue_limit: outbound message queue congestion thresholds (indexed by user)
+ * @exp_msg_count: # of tunnelled messages expected during link changeover
+ * @reset_checkpoint: seq # of last acknowledged message at time of link reset
+ * @max_pkt: current maximum packet size for this link
+ * @max_pkt_target: desired maximum packet size for this link
+ * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
+ * @out_queue_size: # of messages in outbound message queue
+ * @first_out: ptr to first outbound message in queue
+ * @last_out: ptr to last outbound message in queue
+ * @next_out_no: next sequence number to use for outbound messages
+ * @last_retransmitted: sequence number of most recently retransmitted message
+ * @stale_count: # of identical retransmit requests made by peer
+ * @next_in_no: next sequence number to expect for inbound messages
+ * @deferred_inqueue_sz: # of messages in inbound message queue
+ * @oldest_deferred_in: ptr to first inbound message in queue
+ * @newest_deferred_in: ptr to last inbound message in queue
+ * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
+ * @proto_msg_queue: ptr to (single) outbound control message
+ * @retransm_queue_size: number of messages to retransmit
+ * @retransm_queue_head: sequence number of first message to retransmit
+ * @next_out: ptr to first unsent outbound message in queue
+ * @waiting_ports: linked list of ports waiting for link congestion to abate
+ * @long_msg_seq_no: next identifier to use for outbound fragmented messages
+ * @defragm_buf: list of partially reassembled inbound message fragments
+ * @stats: collects statistics regarding link activity
+ * @print_buf: print buffer used to log link activity
+ */
+
+struct link {
+ u32 addr;
+ char name[TIPC_MAX_LINK_NAME];
+ struct tipc_media_addr media_addr;
+ struct timer_list timer;
+ struct node *owner;
+ struct list_head link_list;
+
+ /* Management and link supervision data */
+ int started;
+ u32 checkpoint;
+ u32 peer_session;
+ u32 peer_bearer_id;
+ struct bearer *b_ptr;
+ u32 tolerance;
+ u32 continuity_interval;
+ u32 abort_limit;
+ int state;
+ int blocked;
+ u32 fsm_msg_cnt;
+ struct {
+ unchar hdr[INT_H_SIZE];
+ unchar body[TIPC_MAX_IF_NAME];
+ } proto_msg;
+ struct tipc_msg *pmsg;
+ u32 priority;
+ u32 queue_limit[15]; /* queue_limit[0]==window limit */
+
+ /* Changeover */
+ u32 exp_msg_count;
+ u32 reset_checkpoint;
+
+ /* Max packet negotiation */
+ u32 max_pkt;
+ u32 max_pkt_target;
+ u32 max_pkt_probes;
+
+ /* Sending */
+ u32 out_queue_size;
+ struct sk_buff *first_out;
+ struct sk_buff *last_out;
+ u32 next_out_no;
+ u32 last_retransmitted;
+ u32 stale_count;
+
+ /* Reception */
+ u32 next_in_no;
+ u32 deferred_inqueue_sz;
+ struct sk_buff *oldest_deferred_in;
+ struct sk_buff *newest_deferred_in;
+ u32 unacked_window;
+
+ /* Congestion handling */
+ struct sk_buff *proto_msg_queue;
+ u32 retransm_queue_size;
+ u32 retransm_queue_head;
+ struct sk_buff *next_out;
+ struct list_head waiting_ports;
+
+ /* Fragmentation/defragmentation */
+ u32 long_msg_seq_no;
+ struct sk_buff *defragm_buf;
+
+ /* Statistics */
+ struct {
+ u32 sent_info; /* used in counting # sent packets */
+ u32 recv_info; /* used in counting # recv'd packets */
+ u32 sent_states;
+ u32 recv_states;
+ u32 sent_probes;
+ u32 recv_probes;
+ u32 sent_nacks;
+ u32 recv_nacks;
+ u32 sent_acks;
+ u32 sent_bundled;
+ u32 sent_bundles;
+ u32 recv_bundled;
+ u32 recv_bundles;
+ u32 retransmitted;
+ u32 sent_fragmented;
+ u32 sent_fragments;
+ u32 recv_fragmented;
+ u32 recv_fragments;
+ u32 link_congs; /* # port sends blocked by congestion */
+ u32 bearer_congs;
+ u32 deferred_recv;
+ u32 duplicates;
+
+ /* for statistical profiling of send queue size */
+
+ u32 max_queue_sz;
+ u32 accu_queue_sz;
+ u32 queue_sz_counts;
+
+ /* for statistical profiling of message lengths */
+
+ u32 msg_length_counts;
+ u32 msg_lengths_total;
+ u32 msg_length_profile[7];
+#if 0
+ u32 sent_tunneled;
+ u32 recv_tunneled;
+#endif
+ } stats;
+
+ struct print_buf print_buf;
+};
+
+struct port;
+
+struct link *link_create(struct bearer *b_ptr, const u32 peer,
+ const struct tipc_media_addr *media_addr);
+void link_delete(struct link *l_ptr);
+void link_changeover(struct link *l_ptr);
+void link_send_duplicate(struct link *l_ptr, struct link *dest);
+void link_reset_fragments(struct link *l_ptr);
+int link_is_up(struct link *l_ptr);
+int link_is_active(struct link *l_ptr);
+void link_start(struct link *l_ptr);
+u32 link_push_packet(struct link *l_ptr);
+void link_stop(struct link *l_ptr);
+struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
+struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
+void link_reset(struct link *l_ptr);
+int link_send(struct sk_buff *buf, u32 dest, u32 selector);
+int link_send_buf(struct link *l_ptr, struct sk_buff *buf);
+u32 link_get_max_pkt(u32 dest,u32 selector);
+int link_send_sections_fast(struct port* sender,
+ struct iovec const *msg_sect,
+ const u32 num_sect,
+ u32 destnode);
+
+int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
+void link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
+ struct tipc_msg *msg, u32 selector);
+void link_recv_bundle(struct sk_buff *buf);
+int link_recv_fragment(struct sk_buff **pending,
+ struct sk_buff **fb,
+ struct tipc_msg **msg);
+void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
+ u32 tolerance, u32 priority, u32 acked_mtu);
+void link_push_queue(struct link *l_ptr);
+u32 link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
+ struct sk_buff *buf);
+void link_wakeup_ports(struct link *l_ptr, int all);
+void link_set_queue_limits(struct link *l_ptr, u32 window);
+void link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
+
+/*
+ * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
+ */
+
+static inline u32 mod(u32 x)
+{
+ return x & 0xffffu;
+}
+
+static inline int between(u32 lower, u32 upper, u32 n)
+{
+ if ((lower < n) && (n < upper))
+ return 1;
+ if ((upper < lower) && ((n > lower) || (n < upper)))
+ return 1;
+ return 0;
+}
+
+static inline int less_eq(u32 left, u32 right)
+{
+ return (mod(right - left) < 32768u);
+}
+
+static inline int less(u32 left, u32 right)
+{
+ return (less_eq(left, right) && (mod(right) != mod(left)));
+}
+
+static inline u32 lesser(u32 left, u32 right)
+{
+ return less_eq(left, right) ? left : right;
+}
+
+#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
new file mode 100644
index 00000000000..03dbc55cb04
--- /dev/null
+++ b/net/tipc/msg.c
@@ -0,0 +1,334 @@
+/*
+ * net/tipc/msg.c: TIPC message header routines
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "addr.h"
+#include "dbg.h"
+#include "msg.h"
+#include "bearer.h"
+
+
+void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
+{
+ memcpy(&((int *)m)[5], a, sizeof(*a));
+}
+
+void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
+{
+ memcpy(a, &((int*)m)[5], sizeof(*a));
+}
+
+
+void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
+{
+ u32 usr = msg_user(msg);
+ tipc_printf(buf, str);
+
+ switch (usr) {
+ case MSG_BUNDLER:
+ tipc_printf(buf, "BNDL::");
+ tipc_printf(buf, "MSGS(%u):", msg_msgcnt(msg));
+ break;
+ case BCAST_PROTOCOL:
+ tipc_printf(buf, "BCASTP::");
+ break;
+ case MSG_FRAGMENTER:
+ tipc_printf(buf, "FRAGM::");
+ switch (msg_type(msg)) {
+ case FIRST_FRAGMENT:
+ tipc_printf(buf, "FIRST:");
+ break;
+ case FRAGMENT:
+ tipc_printf(buf, "BODY:");
+ break;
+ case LAST_FRAGMENT:
+ tipc_printf(buf, "LAST:");
+ break;
+ default:
+ tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
+
+ }
+ tipc_printf(buf, "NO(%u/%u):",msg_long_msgno(msg),
+ msg_fragm_no(msg));
+ break;
+ case DATA_LOW:
+ case DATA_MEDIUM:
+ case DATA_HIGH:
+ case DATA_CRITICAL:
+ tipc_printf(buf, "DAT%u:", msg_user(msg));
+ if (msg_short(msg)) {
+ tipc_printf(buf, "CON:");
+ break;
+ }
+ switch (msg_type(msg)) {
+ case TIPC_CONN_MSG:
+ tipc_printf(buf, "CON:");
+ break;
+ case TIPC_MCAST_MSG:
+ tipc_printf(buf, "MCST:");
+ break;
+ case TIPC_NAMED_MSG:
+ tipc_printf(buf, "NAM:");
+ break;
+ case TIPC_DIRECT_MSG:
+ tipc_printf(buf, "DIR:");
+ break;
+ default:
+ tipc_printf(buf, "UNKNOWN TYPE %u",msg_type(msg));
+ }
+ if (msg_routed(msg) && !msg_non_seq(msg))
+ tipc_printf(buf, "ROUT:");
+ if (msg_reroute_cnt(msg))
+ tipc_printf(buf, "REROUTED(%u):",
+ msg_reroute_cnt(msg));
+ break;
+ case NAME_DISTRIBUTOR:
+ tipc_printf(buf, "NMD::");
+ switch (msg_type(msg)) {
+ case PUBLICATION:
+ tipc_printf(buf, "PUBL(%u):", (msg_size(msg) - msg_hdr_sz(msg)) / 20); /* Items */
+ break;
+ case WITHDRAWAL:
+ tipc_printf(buf, "WDRW:");
+ break;
+ default:
+ tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
+ }
+ if (msg_routed(msg))
+ tipc_printf(buf, "ROUT:");
+ if (msg_reroute_cnt(msg))
+ tipc_printf(buf, "REROUTED(%u):",
+ msg_reroute_cnt(msg));
+ break;
+ case CONN_MANAGER:
+ tipc_printf(buf, "CONN_MNG:");
+ switch (msg_type(msg)) {
+ case CONN_PROBE:
+ tipc_printf(buf, "PROBE:");
+ break;
+ case CONN_PROBE_REPLY:
+ tipc_printf(buf, "PROBE_REPLY:");
+ break;
+ case CONN_ACK:
+ tipc_printf(buf, "CONN_ACK:");
+ tipc_printf(buf, "ACK(%u):",msg_msgcnt(msg));
+ break;
+ default:
+ tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+ }
+ if (msg_routed(msg))
+ tipc_printf(buf, "ROUT:");
+ if (msg_reroute_cnt(msg))
+ tipc_printf(buf, "REROUTED(%u):",msg_reroute_cnt(msg));
+ break;
+ case LINK_PROTOCOL:
+ tipc_printf(buf, "PROT:TIM(%u):",msg_timestamp(msg));
+ switch (msg_type(msg)) {
+ case STATE_MSG:
+ tipc_printf(buf, "STATE:");
+ tipc_printf(buf, "%s:",msg_probe(msg) ? "PRB" :"");
+ tipc_printf(buf, "NXS(%u):",msg_next_sent(msg));
+ tipc_printf(buf, "GAP(%u):",msg_seq_gap(msg));
+ tipc_printf(buf, "LSTBC(%u):",msg_last_bcast(msg));
+ break;
+ case RESET_MSG:
+ tipc_printf(buf, "RESET:");
+ if (msg_size(msg) != msg_hdr_sz(msg))
+ tipc_printf(buf, "BEAR:%s:",msg_data(msg));
+ break;
+ case ACTIVATE_MSG:
+ tipc_printf(buf, "ACTIVATE:");
+ break;
+ default:
+ tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+ }
+ tipc_printf(buf, "PLANE(%c):",msg_net_plane(msg));
+ tipc_printf(buf, "SESS(%u):",msg_session(msg));
+ break;
+ case CHANGEOVER_PROTOCOL:
+ tipc_printf(buf, "TUNL:");
+ switch (msg_type(msg)) {
+ case DUPLICATE_MSG:
+ tipc_printf(buf, "DUPL:");
+ break;
+ case ORIGINAL_MSG:
+ tipc_printf(buf, "ORIG:");
+ tipc_printf(buf, "EXP(%u)",msg_msgcnt(msg));
+ break;
+ default:
+ tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+ }
+ break;
+ case ROUTE_DISTRIBUTOR:
+ tipc_printf(buf, "ROUTING_MNG:");
+ switch (msg_type(msg)) {
+ case EXT_ROUTING_TABLE:
+ tipc_printf(buf, "EXT_TBL:");
+ tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+ break;
+ case LOCAL_ROUTING_TABLE:
+ tipc_printf(buf, "LOCAL_TBL:");
+ tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+ break;
+ case SLAVE_ROUTING_TABLE:
+ tipc_printf(buf, "DP_TBL:");
+ tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+ break;
+ case ROUTE_ADDITION:
+ tipc_printf(buf, "ADD:");
+ tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+ break;
+ case ROUTE_REMOVAL:
+ tipc_printf(buf, "REMOVE:");
+ tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+ break;
+ default:
+ tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+ }
+ break;
+ case LINK_CONFIG:
+ tipc_printf(buf, "CFG:");
+ switch (msg_type(msg)) {
+ case DSC_REQ_MSG:
+ tipc_printf(buf, "DSC_REQ:");
+ break;
+ case DSC_RESP_MSG:
+ tipc_printf(buf, "DSC_RESP:");
+ break;
+ default:
+ tipc_printf(buf, "UNKNOWN TYPE:%x:",msg_type(msg));
+ break;
+ }
+ break;
+ default:
+ tipc_printf(buf, "UNKNOWN USER:");
+ }
+
+ switch (usr) {
+ case CONN_MANAGER:
+ case NAME_DISTRIBUTOR:
+ case DATA_LOW:
+ case DATA_MEDIUM:
+ case DATA_HIGH:
+ case DATA_CRITICAL:
+ if (msg_short(msg))
+ break; /* No error */
+ switch (msg_errcode(msg)) {
+ case TIPC_OK:
+ break;
+ case TIPC_ERR_NO_NAME:
+ tipc_printf(buf, "NO_NAME:");
+ break;
+ case TIPC_ERR_NO_PORT:
+ tipc_printf(buf, "NO_PORT:");
+ break;
+ case TIPC_ERR_NO_NODE:
+ tipc_printf(buf, "NO_PROC:");
+ break;
+ case TIPC_ERR_OVERLOAD:
+ tipc_printf(buf, "OVERLOAD:");
+ break;
+ case TIPC_CONN_SHUTDOWN:
+ tipc_printf(buf, "SHUTDOWN:");
+ break;
+ default:
+ tipc_printf(buf, "UNKNOWN ERROR(%x):",
+ msg_errcode(msg));
+ }
+ default:{}
+ }
+
+ tipc_printf(buf, "HZ(%u):", msg_hdr_sz(msg));
+ tipc_printf(buf, "SZ(%u):", msg_size(msg));
+ tipc_printf(buf, "SQNO(%u):", msg_seqno(msg));
+
+ if (msg_non_seq(msg))
+ tipc_printf(buf, "NOSEQ:");
+ else {
+ tipc_printf(buf, "ACK(%u):", msg_ack(msg));
+ }
+ tipc_printf(buf, "BACK(%u):", msg_bcast_ack(msg));
+ tipc_printf(buf, "PRND(%x)", msg_prevnode(msg));
+
+ if (msg_isdata(msg)) {
+ if (msg_named(msg)) {
+ tipc_printf(buf, "NTYP(%u):", msg_nametype(msg));
+ tipc_printf(buf, "NINST(%u)", msg_nameinst(msg));
+ }
+ }
+
+ if ((usr != LINK_PROTOCOL) && (usr != LINK_CONFIG) &&
+ (usr != MSG_BUNDLER)) {
+ if (!msg_short(msg)) {
+ tipc_printf(buf, ":ORIG(%x:%u):",
+ msg_orignode(msg), msg_origport(msg));
+ tipc_printf(buf, ":DEST(%x:%u):",
+ msg_destnode(msg), msg_destport(msg));
+ } else {
+ tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
+ tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
+ }
+ if (msg_routed(msg) && !msg_non_seq(msg))
+ tipc_printf(buf, ":TSEQN(%u)", msg_transp_seqno(msg));
+ }
+ if (msg_user(msg) == NAME_DISTRIBUTOR) {
+ tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
+ tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
+ if (msg_routed(msg)) {
+ tipc_printf(buf, ":CSEQN(%u)", msg_transp_seqno(msg));
+ }
+ }
+
+ if (msg_user(msg) == LINK_CONFIG) {
+ u32* raw = (u32*)msg;
+ struct tipc_media_addr* orig = (struct tipc_media_addr*)&raw[5];
+ tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
+ tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
+ tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
+ media_addr_printf(buf, orig);
+ }
+ if (msg_user(msg) == BCAST_PROTOCOL) {
+ tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
+ tipc_printf(buf, "TO(%u):", msg_bcgap_to(msg));
+ }
+ tipc_printf(buf, "\n");
+ if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
+ msg_print(buf,msg_get_wrapped(msg)," /");
+ }
+ if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
+ msg_print(buf,msg_get_wrapped(msg)," /");
+ }
+}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
new file mode 100644
index 00000000000..662c81862a0
--- /dev/null
+++ b/net/tipc/msg.h
@@ -0,0 +1,818 @@
+/*
+ * net/tipc/msg.h: Include file for TIPC message header routines
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_MSG_H
+#define _TIPC_MSG_H
+
+#include <net/tipc/tipc_msg.h>
+
+#define TIPC_VERSION 2
+#define DATA_LOW TIPC_LOW_IMPORTANCE
+#define DATA_MEDIUM TIPC_MEDIUM_IMPORTANCE
+#define DATA_HIGH TIPC_HIGH_IMPORTANCE
+#define DATA_CRITICAL TIPC_CRITICAL_IMPORTANCE
+#define SHORT_H_SIZE 24 /* Connected,in cluster */
+#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */
+#define CONN_MSG_H_SIZE 36 /* Routed connected msgs*/
+#define LONG_H_SIZE 40 /* Named Messages */
+#define MCAST_H_SIZE 44 /* Multicast messages */
+#define MAX_H_SIZE 60 /* Inclusive full options */
+#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
+#define LINK_CONFIG 13
+
+
+/*
+ TIPC user data message header format, version 2
+
+ - Fundamental definitions available to privileged TIPC users
+ are located in tipc_msg.h.
+ - Remaining definitions available to TIPC internal users appear below.
+*/
+
+
+static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val)
+{
+ m->hdr[w] = htonl(val);
+}
+
+static inline void msg_set_bits(struct tipc_msg *m, u32 w,
+ u32 pos, u32 mask, u32 val)
+{
+ u32 word = msg_word(m,w) & ~(mask << pos);
+ msg_set_word(m, w, (word |= (val << pos)));
+}
+
+/*
+ * Word 0
+ */
+
+static inline u32 msg_version(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 29, 7);
+}
+
+static inline void msg_set_version(struct tipc_msg *m)
+{
+ msg_set_bits(m, 0, 29, 0xf, TIPC_VERSION);
+}
+
+static inline u32 msg_user(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 25, 0xf);
+}
+
+static inline u32 msg_isdata(struct tipc_msg *m)
+{
+ return (msg_user(m) <= DATA_CRITICAL);
+}
+
+static inline void msg_set_user(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 0, 25, 0xf, n);
+}
+
+static inline void msg_set_importance(struct tipc_msg *m, u32 i)
+{
+ msg_set_user(m, i);
+}
+
+static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n)
+{
+ msg_set_bits(m, 0, 21, 0xf, n>>2);
+}
+
+static inline int msg_non_seq(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 20, 1);
+}
+
+static inline void msg_set_non_seq(struct tipc_msg *m)
+{
+ msg_set_bits(m, 0, 20, 1, 1);
+}
+
+static inline int msg_dest_droppable(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 19, 1);
+}
+
+static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d)
+{
+ msg_set_bits(m, 0, 19, 1, d);
+}
+
+static inline int msg_src_droppable(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 18, 1);
+}
+
+static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d)
+{
+ msg_set_bits(m, 0, 18, 1, d);
+}
+
+static inline void msg_set_size(struct tipc_msg *m, u32 sz)
+{
+ m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz);
+}
+
+
+/*
+ * Word 1
+ */
+
+static inline void msg_set_type(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 1, 29, 0x7, n);
+}
+
+static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
+{
+ msg_set_bits(m, 1, 25, 0xf, err);
+}
+
+static inline u32 msg_reroute_cnt(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 21, 0xf);
+}
+
+static inline void msg_incr_reroute_cnt(struct tipc_msg *m)
+{
+ msg_set_bits(m, 1, 21, 0xf, msg_reroute_cnt(m) + 1);
+}
+
+static inline void msg_reset_reroute_cnt(struct tipc_msg *m)
+{
+ msg_set_bits(m, 1, 21, 0xf, 0);
+}
+
+static inline u32 msg_lookup_scope(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 19, 0x3);
+}
+
+static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 1, 19, 0x3, n);
+}
+
+static inline void msg_set_options(struct tipc_msg *m, const char *opt, u32 sz)
+{
+ u32 hsz = msg_hdr_sz(m);
+ char *to = (char *)&m->hdr[hsz/4];
+
+ if ((hsz < DIR_MSG_H_SIZE) || ((hsz + sz) > MAX_H_SIZE))
+ return;
+ msg_set_bits(m, 1, 16, 0x7, (hsz - 28)/4);
+ msg_set_hdr_sz(m, hsz + sz);
+ memcpy(to, opt, sz);
+}
+
+static inline u32 msg_bcast_ack(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 0, 0xffff);
+}
+
+static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 1, 0, 0xffff, n);
+}
+
+
+/*
+ * Word 2
+ */
+
+static inline u32 msg_ack(struct tipc_msg *m)
+{
+ return msg_bits(m, 2, 16, 0xffff);
+}
+
+static inline void msg_set_ack(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 2, 16, 0xffff, n);
+}
+
+static inline u32 msg_seqno(struct tipc_msg *m)
+{
+ return msg_bits(m, 2, 0, 0xffff);
+}
+
+static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 2, 0, 0xffff, n);
+}
+
+
+/*
+ * Words 3-10
+ */
+
+
+static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
+{
+ msg_set_word(m, 3, a);
+}
+
+static inline void msg_set_origport(struct tipc_msg *m, u32 p)
+{
+ msg_set_word(m, 4, p);
+}
+
+static inline void msg_set_destport(struct tipc_msg *m, u32 p)
+{
+ msg_set_word(m, 5, p);
+}
+
+static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
+{
+ msg_set_word(m, 5, p);
+}
+
+static inline void msg_set_orignode(struct tipc_msg *m, u32 a)
+{
+ msg_set_word(m, 6, a);
+}
+
+static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
+{
+ msg_set_word(m, 7, a);
+}
+
+static inline int msg_is_dest(struct tipc_msg *m, u32 d)
+{
+ return(msg_short(m) || (msg_destnode(m) == d));
+}
+
+static inline u32 msg_routed(struct tipc_msg *m)
+{
+ if (likely(msg_short(m)))
+ return 0;
+ return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
+}
+
+static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 8, n);
+}
+
+static inline u32 msg_transp_seqno(struct tipc_msg *m)
+{
+ return msg_word(m, 8);
+}
+
+static inline void msg_set_timestamp(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 8, n);
+}
+
+static inline u32 msg_timestamp(struct tipc_msg *m)
+{
+ return msg_word(m, 8);
+}
+
+static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 8, n);
+}
+
+static inline void msg_set_namelower(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 9, n);
+}
+
+static inline void msg_set_nameinst(struct tipc_msg *m, u32 n)
+{
+ msg_set_namelower(m, n);
+}
+
+static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 10, n);
+}
+
+static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
+{
+ return (struct tipc_msg *)msg_data(m);
+}
+
+static inline void msg_expand(struct tipc_msg *m, u32 destnode)
+{
+ if (!msg_short(m))
+ return;
+ msg_set_hdr_sz(m, LONG_H_SIZE);
+ msg_set_orignode(m, msg_prevnode(m));
+ msg_set_destnode(m, destnode);
+ memset(&m->hdr[8], 0, 12);
+}
+
+
+
+/*
+ TIPC internal message header format, version 2
+
+ 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w0:|vers |msg usr|hdr sz |n|resrv| packet size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w1:|m typ|rsv=0| sequence gap | broadcast ack no |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w3:| previous node |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w4:| next sent broadcast/fragm no | next sent pkt/ fragm msg no |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w5:| session no |rsv=0|r|berid|link prio|netpl|p|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w6:| originating node |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w7:| destination node |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w8:| transport sequence number |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ w9:| msg count / bcast tag | link tolerance |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ \ \
+ / User Specific Data /
+ \ \
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ NB: CONN_MANAGER use data message format. LINK_CONFIG has own format.
+*/
+
+/*
+ * Internal users
+ */
+
+#define BCAST_PROTOCOL 5
+#define MSG_BUNDLER 6
+#define LINK_PROTOCOL 7
+#define CONN_MANAGER 8
+#define ROUTE_DISTRIBUTOR 9
+#define CHANGEOVER_PROTOCOL 10
+#define NAME_DISTRIBUTOR 11
+#define MSG_FRAGMENTER 12
+#define LINK_CONFIG 13
+#define INT_H_SIZE 40
+#define DSC_H_SIZE 40
+
+/*
+ * Connection management protocol messages
+ */
+
+#define CONN_PROBE 0
+#define CONN_PROBE_REPLY 1
+#define CONN_ACK 2
+
+/*
+ * Name distributor messages
+ */
+
+#define PUBLICATION 0
+#define WITHDRAWAL 1
+
+
+/*
+ * Word 1
+ */
+
+static inline u32 msg_seq_gap(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 16, 0xff);
+}
+
+static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 1, 16, 0xff, n);
+}
+
+static inline u32 msg_req_links(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 16, 0xfff);
+}
+
+static inline void msg_set_req_links(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 1, 16, 0xfff, n);
+}
+
+
+/*
+ * Word 2
+ */
+
+static inline u32 msg_dest_domain(struct tipc_msg *m)
+{
+ return msg_word(m, 2);
+}
+
+static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 2, n);
+}
+
+static inline u32 msg_bcgap_after(struct tipc_msg *m)
+{
+ return msg_bits(m, 2, 16, 0xffff);
+}
+
+static inline void msg_set_bcgap_after(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 2, 16, 0xffff, n);
+}
+
+static inline u32 msg_bcgap_to(struct tipc_msg *m)
+{
+ return msg_bits(m, 2, 0, 0xffff);
+}
+
+static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 2, 0, 0xffff, n);
+}
+
+
+/*
+ * Word 4
+ */
+
+static inline u32 msg_last_bcast(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 4, 16, 0xffff, n);
+}
+
+
+static inline u32 msg_fragm_no(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 4, 16, 0xffff, n);
+}
+
+
+static inline u32 msg_next_sent(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
+
+static inline u32 msg_long_msgno(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_long_msgno(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
+static inline u32 msg_bc_netid(struct tipc_msg *m)
+{
+ return msg_word(m, 4);
+}
+
+static inline void msg_set_bc_netid(struct tipc_msg *m, u32 id)
+{
+ msg_set_word(m, 4, id);
+}
+
+static inline u32 msg_link_selector(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 1);
+}
+
+static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 4, 0, 1, (n & 1));
+}
+
+/*
+ * Word 5
+ */
+
+static inline u32 msg_session(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 16, 0xffff);
+}
+
+static inline void msg_set_session(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 5, 16, 0xffff, n);
+}
+
+static inline u32 msg_probe(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 0, 1);
+}
+
+static inline void msg_set_probe(struct tipc_msg *m, u32 val)
+{
+ msg_set_bits(m, 5, 0, 1, (val & 1));
+}
+
+static inline char msg_net_plane(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 1, 7) + 'A';
+}
+
+static inline void msg_set_net_plane(struct tipc_msg *m, char n)
+{
+ msg_set_bits(m, 5, 1, 7, (n - 'A'));
+}
+
+static inline u32 msg_linkprio(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 4, 0x1f);
+}
+
+static inline void msg_set_linkprio(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 5, 4, 0x1f, n);
+}
+
+static inline u32 msg_bearer_id(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 9, 0x7);
+}
+
+static inline void msg_set_bearer_id(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 5, 9, 0x7, n);
+}
+
+static inline u32 msg_redundant_link(struct tipc_msg *m)
+{
+ return msg_bits(m, 5, 12, 0x1);
+}
+
+static inline void msg_set_redundant_link(struct tipc_msg *m)
+{
+ msg_set_bits(m, 5, 12, 0x1, 1);
+}
+
+static inline void msg_clear_redundant_link(struct tipc_msg *m)
+{
+ msg_set_bits(m, 5, 12, 0x1, 0);
+}
+
+
+/*
+ * Word 9
+ */
+
+static inline u32 msg_msgcnt(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_msgcnt(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u32 msg_bcast_tag(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u32 msg_max_pkt(struct tipc_msg *m)
+{
+ return (msg_bits(m, 9, 16, 0xffff) * 4);
+}
+
+static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, (n / 4));
+}
+
+static inline u32 msg_link_tolerance(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 0, 0xffff);
+}
+
+static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 9, 0, 0xffff, n);
+}
+
+/*
+ * Routing table message data
+ */
+
+
+static inline u32 msg_remote_node(struct tipc_msg *m)
+{
+ return msg_word(m, msg_hdr_sz(m)/4);
+}
+
+static inline void msg_set_remote_node(struct tipc_msg *m, u32 a)
+{
+ msg_set_word(m, msg_hdr_sz(m)/4, a);
+}
+
+static inline int msg_dataoctet(struct tipc_msg *m, u32 pos)
+{
+ return(msg_data(m)[pos + 4] != 0);
+}
+
+static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
+{
+ msg_data(m)[pos + 4] = 1;
+}
+
+/*
+ * Segmentation message types
+ */
+
+#define FIRST_FRAGMENT 0
+#define FRAGMENT 1
+#define LAST_FRAGMENT 2
+
+/*
+ * Link management protocol message types
+ */
+
+#define STATE_MSG 0
+#define RESET_MSG 1
+#define ACTIVATE_MSG 2
+
+/*
+ * Changeover tunnel message types
+ */
+#define DUPLICATE_MSG 0
+#define ORIGINAL_MSG 1
+
+/*
+ * Routing table message types
+ */
+#define EXT_ROUTING_TABLE 0
+#define LOCAL_ROUTING_TABLE 1
+#define SLAVE_ROUTING_TABLE 2
+#define ROUTE_ADDITION 3
+#define ROUTE_REMOVAL 4
+
+/*
+ * Config protocol message types
+ */
+
+#define DSC_REQ_MSG 0
+#define DSC_RESP_MSG 1
+
+static inline u32 msg_tot_importance(struct tipc_msg *m)
+{
+ if (likely(msg_isdata(m))) {
+ if (likely(msg_orignode(m) == tipc_own_addr))
+ return msg_importance(m);
+ return msg_importance(m) + 4;
+ }
+ if ((msg_user(m) == MSG_FRAGMENTER) &&
+ (msg_type(m) == FIRST_FRAGMENT))
+ return msg_importance(msg_get_wrapped(m));
+ return msg_importance(m);
+}
+
+
+static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
+ u32 err, u32 hsize, u32 destnode)
+{
+ memset(m, 0, hsize);
+ msg_set_version(m);
+ msg_set_user(m, user);
+ msg_set_hdr_sz(m, hsize);
+ msg_set_size(m, hsize);
+ msg_set_prevnode(m, tipc_own_addr);
+ msg_set_type(m, type);
+ msg_set_errcode(m, err);
+ if (!msg_short(m)) {
+ msg_set_orignode(m, tipc_own_addr);
+ msg_set_destnode(m, destnode);
+ }
+}
+
+/**
+ * msg_calc_data_size - determine total data size for message
+ */
+
+static inline int msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
+{
+ int dsz = 0;
+ int i;
+
+ for (i = 0; i < num_sect; i++)
+ dsz += msg_sect[i].iov_len;
+ return dsz;
+}
+
+/**
+ * msg_build - create message using specified header and data
+ *
+ * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
+ *
+ * Returns message data size or errno
+ */
+
+static inline int msg_build(struct tipc_msg *hdr,
+ struct iovec const *msg_sect, u32 num_sect,
+ int max_size, int usrmem, struct sk_buff** buf)
+{
+ int dsz, sz, hsz, pos, res, cnt;
+
+ dsz = msg_calc_data_size(msg_sect, num_sect);
+ if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) {
+ *buf = NULL;
+ return -EINVAL;
+ }
+
+ pos = hsz = msg_hdr_sz(hdr);
+ sz = hsz + dsz;
+ msg_set_size(hdr, sz);
+ if (unlikely(sz > max_size)) {
+ *buf = NULL;
+ return dsz;
+ }
+
+ *buf = buf_acquire(sz);
+ if (!(*buf))
+ return -ENOMEM;
+ memcpy((*buf)->data, (unchar *)hdr, hsz);
+ for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
+ if (likely(usrmem))
+ res = !copy_from_user((*buf)->data + pos,
+ msg_sect[cnt].iov_base,
+ msg_sect[cnt].iov_len);
+ else
+ memcpy((*buf)->data + pos, msg_sect[cnt].iov_base,
+ msg_sect[cnt].iov_len);
+ pos += msg_sect[cnt].iov_len;
+ }
+ if (likely(res))
+ return dsz;
+
+ buf_discard(*buf);
+ *buf = NULL;
+ return -EFAULT;
+}
+
+
+struct tipc_media_addr;
+
+extern void msg_set_media_addr(struct tipc_msg *m,
+ struct tipc_media_addr *a);
+
+extern void msg_get_media_addr(struct tipc_msg *m,
+ struct tipc_media_addr *a);
+
+
+#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
new file mode 100644
index 00000000000..41cbaf1a4a7
--- /dev/null
+++ b/net/tipc/name_distr.c
@@ -0,0 +1,309 @@
+/*
+ * net/tipc/name_distr.c: TIPC name distribution code
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "cluster.h"
+#include "dbg.h"
+#include "link.h"
+#include "msg.h"
+#include "name_distr.h"
+
+#undef DBG_OUTPUT
+#define DBG_OUTPUT NULL
+
+#define ITEM_SIZE sizeof(struct distr_item)
+
+/**
+ * struct distr_item - publication info distributed to other nodes
+ * @type: name sequence type
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @ref: publishing port reference
+ * @key: publication key
+ *
+ * ===> All fields are stored in network byte order. <===
+ *
+ * First 3 fields identify (name or) name sequence being published.
+ * Reference field uniquely identifies port that published name sequence.
+ * Key field uniquely identifies publication, in the event a port has
+ * multiple publications of the same name sequence.
+ *
+ * Note: There is no field that identifies the publishing node because it is
+ * the same for all items contained within a publication message.
+ */
+
+struct distr_item {
+ u32 type;
+ u32 lower;
+ u32 upper;
+ u32 ref;
+ u32 key;
+};
+
+/**
+ * List of externally visible publications by this node --
+ * that is, all publications having scope > TIPC_NODE_SCOPE.
+ */
+
+static LIST_HEAD(publ_root);
+static u32 publ_cnt = 0;
+
+/**
+ * publ_to_item - add publication info to a publication message
+ */
+
+static void publ_to_item(struct distr_item *i, struct publication *p)
+{
+ i->type = htonl(p->type);
+ i->lower = htonl(p->lower);
+ i->upper = htonl(p->upper);
+ i->ref = htonl(p->ref);
+ i->key = htonl(p->key);
+ dbg("publ_to_item: %u, %u, %u\n", p->type, p->lower, p->upper);
+}
+
+/**
+ * named_prepare_buf - allocate & initialize a publication message
+ */
+
+static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
+{
+ struct sk_buff *buf = buf_acquire(LONG_H_SIZE + size);
+ struct tipc_msg *msg;
+
+ if (buf != NULL) {
+ msg = buf_msg(buf);
+ msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK,
+ LONG_H_SIZE, dest);
+ msg_set_size(msg, LONG_H_SIZE + size);
+ }
+ return buf;
+}
+
+/**
+ * named_publish - tell other nodes about a new publication by this node
+ */
+
+void named_publish(struct publication *publ)
+{
+ struct sk_buff *buf;
+ struct distr_item *item;
+
+ list_add(&publ->local_list, &publ_root);
+ publ_cnt++;
+
+ buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
+ if (!buf) {
+ warn("Memory squeeze; failed to distribute publication\n");
+ return;
+ }
+
+ item = (struct distr_item *)msg_data(buf_msg(buf));
+ publ_to_item(item, publ);
+ dbg("named_withdraw: broadcasting publish msg\n");
+ cluster_broadcast(buf);
+}
+
+/**
+ * named_withdraw - tell other nodes about a withdrawn publication by this node
+ */
+
+void named_withdraw(struct publication *publ)
+{
+ struct sk_buff *buf;
+ struct distr_item *item;
+
+ list_del(&publ->local_list);
+ publ_cnt--;
+
+ buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
+ if (!buf) {
+ warn("Memory squeeze; failed to distribute withdrawal\n");
+ return;
+ }
+
+ item = (struct distr_item *)msg_data(buf_msg(buf));
+ publ_to_item(item, publ);
+ dbg("named_withdraw: broadcasting withdraw msg\n");
+ cluster_broadcast(buf);
+}
+
+/**
+ * named_node_up - tell specified node about all publications by this node
+ */
+
+void named_node_up(unsigned long node)
+{
+ struct publication *publ;
+ struct distr_item *item = 0;
+ struct sk_buff *buf = 0;
+ u32 left = 0;
+ u32 rest;
+ u32 max_item_buf;
+
+ assert(in_own_cluster(node));
+ read_lock_bh(&nametbl_lock);
+ max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
+ max_item_buf *= ITEM_SIZE;
+ rest = publ_cnt * ITEM_SIZE;
+
+ list_for_each_entry(publ, &publ_root, local_list) {
+ if (!buf) {
+ left = (rest <= max_item_buf) ? rest : max_item_buf;
+ rest -= left;
+ buf = named_prepare_buf(PUBLICATION, left, node);
+ if (buf == NULL) {
+ warn("Memory Squeeze; could not send publication\n");
+ goto exit;
+ }
+ item = (struct distr_item *)msg_data(buf_msg(buf));
+ }
+ publ_to_item(item, publ);
+ item++;
+ left -= ITEM_SIZE;
+ if (!left) {
+ msg_set_link_selector(buf_msg(buf), node);
+ dbg("named_node_up: sending publish msg to "
+ "<%u.%u.%u>\n", tipc_zone(node),
+ tipc_cluster(node), tipc_node(node));
+ link_send(buf, node, node);
+ buf = 0;
+ }
+ }
+exit:
+ read_unlock_bh(&nametbl_lock);
+}
+
+/**
+ * node_is_down - remove publication associated with a failed node
+ *
+ * Invoked for each publication issued by a newly failed node.
+ * Removes publication structure from name table & deletes it.
+ * In rare cases the link may have come back up again when this
+ * function is called, and we have two items representing the same
+ * publication. Nudge this item's key to distinguish it from the other.
+ * (Note: Publication's node subscription is already unsubscribed.)
+ */
+
+static void node_is_down(struct publication *publ)
+{
+ struct publication *p;
+ write_lock_bh(&nametbl_lock);
+ dbg("node_is_down: withdrawing %u, %u, %u\n",
+ publ->type, publ->lower, publ->upper);
+ publ->key += 1222345;
+ p = nametbl_remove_publ(publ->type, publ->lower,
+ publ->node, publ->ref, publ->key);
+ assert(p == publ);
+ write_unlock_bh(&nametbl_lock);
+ if (publ)
+ kfree(publ);
+}
+
+/**
+ * named_recv - process name table update message sent by another node
+ */
+
+void named_recv(struct sk_buff *buf)
+{
+ struct publication *publ;
+ struct tipc_msg *msg = buf_msg(buf);
+ struct distr_item *item = (struct distr_item *)msg_data(msg);
+ u32 count = msg_data_sz(msg) / ITEM_SIZE;
+
+ write_lock_bh(&nametbl_lock);
+ while (count--) {
+ if (msg_type(msg) == PUBLICATION) {
+ dbg("named_recv: got publication for %u, %u, %u\n",
+ ntohl(item->type), ntohl(item->lower),
+ ntohl(item->upper));
+ publ = nametbl_insert_publ(ntohl(item->type),
+ ntohl(item->lower),
+ ntohl(item->upper),
+ TIPC_CLUSTER_SCOPE,
+ msg_orignode(msg),
+ ntohl(item->ref),
+ ntohl(item->key));
+ if (publ) {
+ nodesub_subscribe(&publ->subscr,
+ msg_orignode(msg),
+ publ,
+ (net_ev_handler)node_is_down);
+ }
+ } else if (msg_type(msg) == WITHDRAWAL) {
+ dbg("named_recv: got withdrawl for %u, %u, %u\n",
+ ntohl(item->type), ntohl(item->lower),
+ ntohl(item->upper));
+ publ = nametbl_remove_publ(ntohl(item->type),
+ ntohl(item->lower),
+ msg_orignode(msg),
+ ntohl(item->ref),
+ ntohl(item->key));
+
+ if (publ) {
+ nodesub_unsubscribe(&publ->subscr);
+ kfree(publ);
+ }
+ } else {
+ warn("named_recv: unknown msg\n");
+ }
+ item++;
+ }
+ write_unlock_bh(&nametbl_lock);
+ buf_discard(buf);
+}
+
+/**
+ * named_reinit - re-initialize local publication list
+ *
+ * This routine is called whenever TIPC networking is (re)enabled.
+ * All existing publications by this node that have "cluster" or "zone" scope
+ * are updated to reflect the node's current network address.
+ * (If the node's address is unchanged, the update loop terminates immediately.)
+ */
+
+void named_reinit(void)
+{
+ struct publication *publ;
+
+ write_lock_bh(&nametbl_lock);
+ list_for_each_entry(publ, &publ_root, local_list) {
+ if (publ->node == tipc_own_addr)
+ break;
+ publ->node = tipc_own_addr;
+ }
+ write_unlock_bh(&nametbl_lock);
+}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
new file mode 100644
index 00000000000..a04bdeac84e
--- /dev/null
+++ b/net/tipc/name_distr.h
@@ -0,0 +1,48 @@
+/*
+ * net/tipc/name_distr.h: Include file for TIPC name distribution code
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NAME_DISTR_H
+#define _TIPC_NAME_DISTR_H
+
+#include "name_table.h"
+
+void named_publish(struct publication *publ);
+void named_withdraw(struct publication *publ);
+void named_node_up(unsigned long node);
+void named_recv(struct sk_buff *buf);
+void named_reinit(void);
+
+#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
new file mode 100644
index 00000000000..972c83eb83b
--- /dev/null
+++ b/net/tipc/name_table.c
@@ -0,0 +1,1079 @@
+/*
+ * net/tipc/name_table.c: TIPC name table code
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+#include "name_table.h"
+#include "name_distr.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "subscr.h"
+#include "port.h"
+#include "cluster.h"
+#include "bcast.h"
+
+int tipc_nametbl_size = 1024; /* must be a power of 2 */
+
+/**
+ * struct sub_seq - container for all published instances of a name sequence
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @node_list: circular list of matching publications with >= node scope
+ * @cluster_list: circular list of matching publications with >= cluster scope
+ * @zone_list: circular list of matching publications with >= zone scope
+ */
+
+struct sub_seq {
+ u32 lower;
+ u32 upper;
+ struct publication *node_list;
+ struct publication *cluster_list;
+ struct publication *zone_list;
+};
+
+/**
+ * struct name_seq - container for all published instances of a name type
+ * @type: 32 bit 'type' value for name sequence
+ * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
+ * sub-sequences are sorted in ascending order
+ * @alloc: number of sub-sequences currently in array
+ * @first_free: upper bound of highest sub-sequence + 1
+ * @ns_list: links to adjacent name sequences in hash chain
+ * @subscriptions: list of subscriptions for this 'type'
+ * @lock: spinlock controlling access to name sequence structure
+ */
+
+struct name_seq {
+ u32 type;
+ struct sub_seq *sseqs;
+ u32 alloc;
+ u32 first_free;
+ struct hlist_node ns_list;
+ struct list_head subscriptions;
+ spinlock_t lock;
+};
+
+/**
+ * struct name_table - table containing all existing port name publications
+ * @types: pointer to fixed-sized array of name sequence lists,
+ * accessed via hashing on 'type'; name sequence lists are *not* sorted
+ * @local_publ_count: number of publications issued by this node
+ */
+
+struct name_table {
+ struct hlist_head *types;
+ u32 local_publ_count;
+};
+
+struct name_table table = { NULL } ;
+static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
+rwlock_t nametbl_lock = RW_LOCK_UNLOCKED;
+
+
+static inline int hash(int x)
+{
+ return(x & (tipc_nametbl_size - 1));
+}
+
+/**
+ * publ_create - create a publication structure
+ */
+
+static struct publication *publ_create(u32 type, u32 lower, u32 upper,
+ u32 scope, u32 node, u32 port_ref,
+ u32 key)
+{
+ struct publication *publ =
+ (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
+ if (publ == NULL) {
+ warn("Memory squeeze; failed to create publication\n");
+ return 0;
+ }
+
+ memset(publ, 0, sizeof(*publ));
+ publ->type = type;
+ publ->lower = lower;
+ publ->upper = upper;
+ publ->scope = scope;
+ publ->node = node;
+ publ->ref = port_ref;
+ publ->key = key;
+ INIT_LIST_HEAD(&publ->local_list);
+ INIT_LIST_HEAD(&publ->pport_list);
+ INIT_LIST_HEAD(&publ->subscr.nodesub_list);
+ return publ;
+}
+
+/**
+ * subseq_alloc - allocate a specified number of sub-sequence structures
+ */
+
+struct sub_seq *subseq_alloc(u32 cnt)
+{
+ u32 sz = cnt * sizeof(struct sub_seq);
+ struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
+
+ if (sseq)
+ memset(sseq, 0, sz);
+ return sseq;
+}
+
+/**
+ * nameseq_create - create a name sequence structure for the specified 'type'
+ *
+ * Allocates a single sub-sequence structure and sets it to all 0's.
+ */
+
+struct name_seq *nameseq_create(u32 type, struct hlist_head *seq_head)
+{
+ struct name_seq *nseq =
+ (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
+ struct sub_seq *sseq = subseq_alloc(1);
+
+ if (!nseq || !sseq) {
+ warn("Memory squeeze; failed to create name sequence\n");
+ kfree(nseq);
+ kfree(sseq);
+ return 0;
+ }
+
+ memset(nseq, 0, sizeof(*nseq));
+ nseq->lock = SPIN_LOCK_UNLOCKED;
+ nseq->type = type;
+ nseq->sseqs = sseq;
+ dbg("nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n",
+ nseq, type, nseq->sseqs, nseq->first_free);
+ nseq->alloc = 1;
+ INIT_HLIST_NODE(&nseq->ns_list);
+ INIT_LIST_HEAD(&nseq->subscriptions);
+ hlist_add_head(&nseq->ns_list, seq_head);
+ return nseq;
+}
+
+/**
+ * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
+ *
+ * Very time-critical, so binary searches through sub-sequence array.
+ */
+
+static inline struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
+ u32 instance)
+{
+ struct sub_seq *sseqs = nseq->sseqs;
+ int low = 0;
+ int high = nseq->first_free - 1;
+ int mid;
+
+ while (low <= high) {
+ mid = (low + high) / 2;
+ if (instance < sseqs[mid].lower)
+ high = mid - 1;
+ else if (instance > sseqs[mid].upper)
+ low = mid + 1;
+ else
+ return &sseqs[mid];
+ }
+ return 0;
+}
+
+/**
+ * nameseq_locate_subseq - determine position of name instance in sub-sequence
+ *
+ * Returns index in sub-sequence array of the entry that contains the specified
+ * instance value; if no entry contains that value, returns the position
+ * where a new entry for it would be inserted in the array.
+ *
+ * Note: Similar to binary search code for locating a sub-sequence.
+ */
+
+static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
+{
+ struct sub_seq *sseqs = nseq->sseqs;
+ int low = 0;
+ int high = nseq->first_free - 1;
+ int mid;
+
+ while (low <= high) {
+ mid = (low + high) / 2;
+ if (instance < sseqs[mid].lower)
+ high = mid - 1;
+ else if (instance > sseqs[mid].upper)
+ low = mid + 1;
+ else
+ return mid;
+ }
+ return low;
+}
+
+/**
+ * nameseq_insert_publ -
+ */
+
+struct publication *nameseq_insert_publ(struct name_seq *nseq,
+ u32 type, u32 lower, u32 upper,
+ u32 scope, u32 node, u32 port, u32 key)
+{
+ struct subscription *s;
+ struct subscription *st;
+ struct publication *publ;
+ struct sub_seq *sseq;
+ int created_subseq = 0;
+
+ assert(nseq->first_free <= nseq->alloc);
+ sseq = nameseq_find_subseq(nseq, lower);
+ dbg("nameseq_ins: for seq %x,<%u,%u>, found sseq %x\n",
+ nseq, type, lower, sseq);
+ if (sseq) {
+
+ /* Lower end overlaps existing entry => need an exact match */
+
+ if ((sseq->lower != lower) || (sseq->upper != upper)) {
+ warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
+ return 0;
+ }
+ } else {
+ u32 inspos;
+ struct sub_seq *freesseq;
+
+ /* Find where lower end should be inserted */
+
+ inspos = nameseq_locate_subseq(nseq, lower);
+
+ /* Fail if upper end overlaps into an existing entry */
+
+ if ((inspos < nseq->first_free) &&
+ (upper >= nseq->sseqs[inspos].lower)) {
+ warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
+ return 0;
+ }
+
+ /* Ensure there is space for new sub-sequence */
+
+ if (nseq->first_free == nseq->alloc) {
+ struct sub_seq *sseqs = nseq->sseqs;
+ nseq->sseqs = subseq_alloc(nseq->alloc * 2);
+ if (nseq->sseqs != NULL) {
+ memcpy(nseq->sseqs, sseqs,
+ nseq->alloc * sizeof (struct sub_seq));
+ kfree(sseqs);
+ dbg("Allocated %u sseqs\n", nseq->alloc);
+ nseq->alloc *= 2;
+ } else {
+ warn("Memory squeeze; failed to create sub-sequence\n");
+ return 0;
+ }
+ }
+ dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
+
+ /* Insert new sub-sequence */
+
+ dbg("ins in pos %u, ff = %u\n", inspos, nseq->first_free);
+ sseq = &nseq->sseqs[inspos];
+ freesseq = &nseq->sseqs[nseq->first_free];
+ memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof (*sseq));
+ memset(sseq, 0, sizeof (*sseq));
+ nseq->first_free++;
+ sseq->lower = lower;
+ sseq->upper = upper;
+ created_subseq = 1;
+ }
+ dbg("inserting (%u %u %u) from %x:%u into sseq %x(%u,%u) of seq %x\n",
+ type, lower, upper, node, port, sseq,
+ sseq->lower, sseq->upper, nseq);
+
+ /* Insert a publication: */
+
+ publ = publ_create(type, lower, upper, scope, node, port, key);
+ if (!publ)
+ return 0;
+ dbg("inserting publ %x, node=%x publ->node=%x, subscr->node=%x\n",
+ publ, node, publ->node, publ->subscr.node);
+
+ if (!sseq->zone_list)
+ sseq->zone_list = publ->zone_list_next = publ;
+ else {
+ publ->zone_list_next = sseq->zone_list->zone_list_next;
+ sseq->zone_list->zone_list_next = publ;
+ }
+
+ if (in_own_cluster(node)) {
+ if (!sseq->cluster_list)
+ sseq->cluster_list = publ->cluster_list_next = publ;
+ else {
+ publ->cluster_list_next =
+ sseq->cluster_list->cluster_list_next;
+ sseq->cluster_list->cluster_list_next = publ;
+ }
+ }
+
+ if (node == tipc_own_addr) {
+ if (!sseq->node_list)
+ sseq->node_list = publ->node_list_next = publ;
+ else {
+ publ->node_list_next = sseq->node_list->node_list_next;
+ sseq->node_list->node_list_next = publ;
+ }
+ }
+
+ /*
+ * Any subscriptions waiting for notification?
+ */
+ list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
+ dbg("calling report_overlap()\n");
+ subscr_report_overlap(s,
+ publ->lower,
+ publ->upper,
+ TIPC_PUBLISHED,
+ publ->ref,
+ publ->node,
+ created_subseq);
+ }
+ return publ;
+}
+
+/**
+ * nameseq_remove_publ -
+ */
+
+struct publication *nameseq_remove_publ(struct name_seq *nseq, u32 inst,
+ u32 node, u32 ref, u32 key)
+{
+ struct publication *publ;
+ struct publication *prev;
+ struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
+ struct sub_seq *free;
+ struct subscription *s, *st;
+ int removed_subseq = 0;
+
+ assert(nseq);
+
+ if (!sseq) {
+ int i;
+
+ warn("Withdraw unknown <%u,%u>?\n", nseq->type, inst);
+ assert(nseq->sseqs);
+ dbg("Dumping subseqs %x for %x, alloc = %u,ff=%u\n",
+ nseq->sseqs, nseq, nseq->alloc,
+ nseq->first_free);
+ for (i = 0; i < nseq->first_free; i++) {
+ dbg("Subseq %u(%x): lower = %u,upper = %u\n",
+ i, &nseq->sseqs[i], nseq->sseqs[i].lower,
+ nseq->sseqs[i].upper);
+ }
+ return 0;
+ }
+ dbg("nameseq_remove: seq: %x, sseq %x, <%u,%u> key %u\n",
+ nseq, sseq, nseq->type, inst, key);
+
+ prev = sseq->zone_list;
+ publ = sseq->zone_list->zone_list_next;
+ while ((publ->key != key) || (publ->ref != ref) ||
+ (publ->node && (publ->node != node))) {
+ prev = publ;
+ publ = publ->zone_list_next;
+ assert(prev != sseq->zone_list);
+ }
+ if (publ != sseq->zone_list)
+ prev->zone_list_next = publ->zone_list_next;
+ else if (publ->zone_list_next != publ) {
+ prev->zone_list_next = publ->zone_list_next;
+ sseq->zone_list = publ->zone_list_next;
+ } else {
+ sseq->zone_list = 0;
+ }
+
+ if (in_own_cluster(node)) {
+ prev = sseq->cluster_list;
+ publ = sseq->cluster_list->cluster_list_next;
+ while ((publ->key != key) || (publ->ref != ref) ||
+ (publ->node && (publ->node != node))) {
+ prev = publ;
+ publ = publ->cluster_list_next;
+ assert(prev != sseq->cluster_list);
+ }
+ if (publ != sseq->cluster_list)
+ prev->cluster_list_next = publ->cluster_list_next;
+ else if (publ->cluster_list_next != publ) {
+ prev->cluster_list_next = publ->cluster_list_next;
+ sseq->cluster_list = publ->cluster_list_next;
+ } else {
+ sseq->cluster_list = 0;
+ }
+ }
+
+ if (node == tipc_own_addr) {
+ prev = sseq->node_list;
+ publ = sseq->node_list->node_list_next;
+ while ((publ->key != key) || (publ->ref != ref) ||
+ (publ->node && (publ->node != node))) {
+ prev = publ;
+ publ = publ->node_list_next;
+ assert(prev != sseq->node_list);
+ }
+ if (publ != sseq->node_list)
+ prev->node_list_next = publ->node_list_next;
+ else if (publ->node_list_next != publ) {
+ prev->node_list_next = publ->node_list_next;
+ sseq->node_list = publ->node_list_next;
+ } else {
+ sseq->node_list = 0;
+ }
+ }
+ assert(!publ->node || (publ->node == node));
+ assert(publ->ref == ref);
+ assert(publ->key == key);
+
+ /*
+ * Contract subseq list if no more publications:
+ */
+ if (!sseq->node_list && !sseq->cluster_list && !sseq->zone_list) {
+ free = &nseq->sseqs[nseq->first_free--];
+ memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq));
+ removed_subseq = 1;
+ }
+
+ /*
+ * Any subscriptions waiting ?
+ */
+ list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
+ subscr_report_overlap(s,
+ publ->lower,
+ publ->upper,
+ TIPC_WITHDRAWN,
+ publ->ref,
+ publ->node,
+ removed_subseq);
+ }
+ return publ;
+}
+
+/**
+ * nameseq_subscribe: attach a subscription, and issue
+ * the prescribed number of events if there is any sub-
+ * sequence overlapping with the requested sequence
+ */
+
+void nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
+{
+ struct sub_seq *sseq = nseq->sseqs;
+
+ list_add(&s->nameseq_list, &nseq->subscriptions);
+
+ if (!sseq)
+ return;
+
+ while (sseq != &nseq->sseqs[nseq->first_free]) {
+ struct publication *zl = sseq->zone_list;
+ if (zl && subscr_overlap(s,sseq->lower,sseq->upper)) {
+ struct publication *crs = zl;
+ int must_report = 1;
+
+ do {
+ subscr_report_overlap(s,
+ sseq->lower,
+ sseq->upper,
+ TIPC_PUBLISHED,
+ crs->ref,
+ crs->node,
+ must_report);
+ must_report = 0;
+ crs = crs->zone_list_next;
+ } while (crs != zl);
+ }
+ sseq++;
+ }
+}
+
+static struct name_seq *nametbl_find_seq(u32 type)
+{
+ struct hlist_head *seq_head;
+ struct hlist_node *seq_node;
+ struct name_seq *ns;
+
+ dbg("find_seq %u,(%u,0x%x) table = %p, hash[type] = %u\n",
+ type, ntohl(type), type, table.types, hash(type));
+
+ seq_head = &table.types[hash(type)];
+ hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
+ if (ns->type == type) {
+ dbg("found %x\n", ns);
+ return ns;
+ }
+ }
+
+ return 0;
+};
+
+struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
+ u32 scope, u32 node, u32 port, u32 key)
+{
+ struct name_seq *seq = nametbl_find_seq(type);
+
+ dbg("ins_publ: <%u,%x,%x> found %x\n", type, lower, upper, seq);
+ if (lower > upper) {
+ warn("Failed to publish illegal <%u,%u,%u>\n",
+ type, lower, upper);
+ return 0;
+ }
+
+ dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node);
+ if (!seq) {
+ seq = nameseq_create(type, &table.types[hash(type)]);
+ dbg("nametbl_insert_publ: created %x\n", seq);
+ }
+ if (!seq)
+ return 0;
+
+ assert(seq->type == type);
+ return nameseq_insert_publ(seq, type, lower, upper,
+ scope, node, port, key);
+}
+
+struct publication *nametbl_remove_publ(u32 type, u32 lower,
+ u32 node, u32 ref, u32 key)
+{
+ struct publication *publ;
+ struct name_seq *seq = nametbl_find_seq(type);
+
+ if (!seq)
+ return 0;
+
+ dbg("Withdrawing <%u,%u> from %x\n", type, lower, node);
+ publ = nameseq_remove_publ(seq, lower, node, ref, key);
+
+ if (!seq->first_free && list_empty(&seq->subscriptions)) {
+ hlist_del_init(&seq->ns_list);
+ kfree(seq->sseqs);
+ kfree(seq);
+ }
+ return publ;
+}
+
+/*
+ * nametbl_translate(): Translate tipc_name -> tipc_portid.
+ * Very time-critical.
+ *
+ * Note: on entry 'destnode' is the search domain used during translation;
+ * on exit it passes back the node address of the matching port (if any)
+ */
+
+u32 nametbl_translate(u32 type, u32 instance, u32 *destnode)
+{
+ struct sub_seq *sseq;
+ struct publication *publ = 0;
+ struct name_seq *seq;
+ u32 ref;
+
+ if (!in_scope(*destnode, tipc_own_addr))
+ return 0;
+
+ read_lock_bh(&nametbl_lock);
+ seq = nametbl_find_seq(type);
+ if (unlikely(!seq))
+ goto not_found;
+ sseq = nameseq_find_subseq(seq, instance);
+ if (unlikely(!sseq))
+ goto not_found;
+ spin_lock_bh(&seq->lock);
+
+ /* Closest-First Algorithm: */
+ if (likely(!*destnode)) {
+ publ = sseq->node_list;
+ if (publ) {
+ sseq->node_list = publ->node_list_next;
+found:
+ ref = publ->ref;
+ *destnode = publ->node;
+ spin_unlock_bh(&seq->lock);
+ read_unlock_bh(&nametbl_lock);
+ return ref;
+ }
+ publ = sseq->cluster_list;
+ if (publ) {
+ sseq->cluster_list = publ->cluster_list_next;
+ goto found;
+ }
+ publ = sseq->zone_list;
+ if (publ) {
+ sseq->zone_list = publ->zone_list_next;
+ goto found;
+ }
+ }
+
+ /* Round-Robin Algorithm: */
+ else if (*destnode == tipc_own_addr) {
+ publ = sseq->node_list;
+ if (publ) {
+ sseq->node_list = publ->node_list_next;
+ goto found;
+ }
+ } else if (in_own_cluster(*destnode)) {
+ publ = sseq->cluster_list;
+ if (publ) {
+ sseq->cluster_list = publ->cluster_list_next;
+ goto found;
+ }
+ } else {
+ publ = sseq->zone_list;
+ if (publ) {
+ sseq->zone_list = publ->zone_list_next;
+ goto found;
+ }
+ }
+ spin_unlock_bh(&seq->lock);
+not_found:
+ *destnode = 0;
+ read_unlock_bh(&nametbl_lock);
+ return 0;
+}
+
+/**
+ * nametbl_mc_translate - find multicast destinations
+ *
+ * Creates list of all local ports that overlap the given multicast address;
+ * also determines if any off-node ports overlap.
+ *
+ * Note: Publications with a scope narrower than 'limit' are ignored.
+ * (i.e. local node-scope publications mustn't receive messages arriving
+ * from another node, even if the multcast link brought it here)
+ *
+ * Returns non-zero if any off-node ports overlap
+ */
+
+int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
+ struct port_list *dports)
+{
+ struct name_seq *seq;
+ struct sub_seq *sseq;
+ struct sub_seq *sseq_stop;
+ int res = 0;
+
+ read_lock_bh(&nametbl_lock);
+ seq = nametbl_find_seq(type);
+ if (!seq)
+ goto exit;
+
+ spin_lock_bh(&seq->lock);
+
+ sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
+ sseq_stop = seq->sseqs + seq->first_free;
+ for (; sseq != sseq_stop; sseq++) {
+ struct publication *publ;
+
+ if (sseq->lower > upper)
+ break;
+ publ = sseq->cluster_list;
+ if (publ && (publ->scope <= limit))
+ do {
+ if (publ->node == tipc_own_addr)
+ port_list_add(dports, publ->ref);
+ else
+ res = 1;
+ publ = publ->cluster_list_next;
+ } while (publ != sseq->cluster_list);
+ }
+
+ spin_unlock_bh(&seq->lock);
+exit:
+ read_unlock_bh(&nametbl_lock);
+ return res;
+}
+
+/**
+ * nametbl_publish_rsv - publish port name using a reserved name type
+ */
+
+int nametbl_publish_rsv(u32 ref, unsigned int scope,
+ struct tipc_name_seq const *seq)
+{
+ int res;
+
+ atomic_inc(&rsv_publ_ok);
+ res = tipc_publish(ref, scope, seq);
+ atomic_dec(&rsv_publ_ok);
+ return res;
+}
+
+/**
+ * nametbl_publish - add name publication to network name tables
+ */
+
+struct publication *nametbl_publish(u32 type, u32 lower, u32 upper,
+ u32 scope, u32 port_ref, u32 key)
+{
+ struct publication *publ;
+
+ if (table.local_publ_count >= tipc_max_publications) {
+ warn("Failed publish: max %u local publication\n",
+ tipc_max_publications);
+ return 0;
+ }
+ if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
+ warn("Failed to publish reserved name <%u,%u,%u>\n",
+ type, lower, upper);
+ return 0;
+ }
+
+ write_lock_bh(&nametbl_lock);
+ table.local_publ_count++;
+ publ = nametbl_insert_publ(type, lower, upper, scope,
+ tipc_own_addr, port_ref, key);
+ if (publ && (scope != TIPC_NODE_SCOPE)) {
+ named_publish(publ);
+ }
+ write_unlock_bh(&nametbl_lock);
+ return publ;
+}
+
+/**
+ * nametbl_withdraw - withdraw name publication from network name tables
+ */
+
+int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
+{
+ struct publication *publ;
+
+ dbg("nametbl_withdraw:<%d,%d,%d>\n", type, lower, key);
+ write_lock_bh(&nametbl_lock);
+ publ = nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
+ if (publ) {
+ table.local_publ_count--;
+ if (publ->scope != TIPC_NODE_SCOPE)
+ named_withdraw(publ);
+ write_unlock_bh(&nametbl_lock);
+ list_del_init(&publ->pport_list);
+ kfree(publ);
+ return 1;
+ }
+ write_unlock_bh(&nametbl_lock);
+ return 0;
+}
+
+/**
+ * nametbl_subscribe - add a subscription object to the name table
+ */
+
+void
+nametbl_subscribe(struct subscription *s)
+{
+ u32 type = s->seq.type;
+ struct name_seq *seq;
+
+ write_lock_bh(&nametbl_lock);
+ seq = nametbl_find_seq(type);
+ if (!seq) {
+ seq = nameseq_create(type, &table.types[hash(type)]);
+ }
+ if (seq){
+ spin_lock_bh(&seq->lock);
+ dbg("nametbl_subscribe:found %x for <%u,%u,%u>\n",
+ seq, type, s->seq.lower, s->seq.upper);
+ assert(seq->type == type);
+ nameseq_subscribe(seq, s);
+ spin_unlock_bh(&seq->lock);
+ }
+ write_unlock_bh(&nametbl_lock);
+}
+
+/**
+ * nametbl_unsubscribe - remove a subscription object from name table
+ */
+
+void
+nametbl_unsubscribe(struct subscription *s)
+{
+ struct name_seq *seq;
+
+ write_lock_bh(&nametbl_lock);
+ seq = nametbl_find_seq(s->seq.type);
+ if (seq != NULL){
+ spin_lock_bh(&seq->lock);
+ list_del_init(&s->nameseq_list);
+ spin_unlock_bh(&seq->lock);
+ if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) {
+ hlist_del_init(&seq->ns_list);
+ kfree(seq->sseqs);
+ kfree(seq);
+ }
+ }
+ write_unlock_bh(&nametbl_lock);
+}
+
+
+/**
+ * subseq_list: print specified sub-sequence contents into the given buffer
+ */
+
+static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
+ u32 index)
+{
+ char portIdStr[27];
+ char *scopeStr;
+ struct publication *publ = sseq->zone_list;
+
+ tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
+
+ if (depth == 2 || !publ) {
+ tipc_printf(buf, "\n");
+ return;
+ }
+
+ do {
+ sprintf (portIdStr, "<%u.%u.%u:%u>",
+ tipc_zone(publ->node), tipc_cluster(publ->node),
+ tipc_node(publ->node), publ->ref);
+ tipc_printf(buf, "%-26s ", portIdStr);
+ if (depth > 3) {
+ if (publ->node != tipc_own_addr)
+ scopeStr = "";
+ else if (publ->scope == TIPC_NODE_SCOPE)
+ scopeStr = "node";
+ else if (publ->scope == TIPC_CLUSTER_SCOPE)
+ scopeStr = "cluster";
+ else
+ scopeStr = "zone";
+ tipc_printf(buf, "%-10u %s", publ->key, scopeStr);
+ }
+
+ publ = publ->zone_list_next;
+ if (publ == sseq->zone_list)
+ break;
+
+ tipc_printf(buf, "\n%33s", " ");
+ } while (1);
+
+ tipc_printf(buf, "\n");
+}
+
+/**
+ * nameseq_list: print specified name sequence contents into the given buffer
+ */
+
+static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
+ u32 type, u32 lowbound, u32 upbound, u32 index)
+{
+ struct sub_seq *sseq;
+ char typearea[11];
+
+ sprintf(typearea, "%-10u", seq->type);
+
+ if (depth == 1) {
+ tipc_printf(buf, "%s\n", typearea);
+ return;
+ }
+
+ for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
+ if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
+ tipc_printf(buf, "%s ", typearea);
+ subseq_list(sseq, buf, depth, index);
+ sprintf(typearea, "%10s", " ");
+ }
+ }
+}
+
+/**
+ * nametbl_header - print name table header into the given buffer
+ */
+
+static void nametbl_header(struct print_buf *buf, u32 depth)
+{
+ tipc_printf(buf, "Type ");
+
+ if (depth > 1)
+ tipc_printf(buf, "Lower Upper ");
+ if (depth > 2)
+ tipc_printf(buf, "Port Identity ");
+ if (depth > 3)
+ tipc_printf(buf, "Publication");
+
+ tipc_printf(buf, "\n-----------");
+
+ if (depth > 1)
+ tipc_printf(buf, "--------------------- ");
+ if (depth > 2)
+ tipc_printf(buf, "-------------------------- ");
+ if (depth > 3)
+ tipc_printf(buf, "------------------");
+
+ tipc_printf(buf, "\n");
+}
+
+/**
+ * nametbl_list - print specified name table contents into the given buffer
+ */
+
+static void nametbl_list(struct print_buf *buf, u32 depth_info,
+ u32 type, u32 lowbound, u32 upbound)
+{
+ struct hlist_head *seq_head;
+ struct hlist_node *seq_node;
+ struct name_seq *seq;
+ int all_types;
+ u32 depth;
+ u32 i;
+
+ all_types = (depth_info & TIPC_NTQ_ALLTYPES);
+ depth = (depth_info & ~TIPC_NTQ_ALLTYPES);
+
+ if (depth == 0)
+ return;
+
+ if (all_types) {
+ /* display all entries in name table to specified depth */
+ nametbl_header(buf, depth);
+ lowbound = 0;
+ upbound = ~0;
+ for (i = 0; i < tipc_nametbl_size; i++) {
+ seq_head = &table.types[i];
+ hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
+ nameseq_list(seq, buf, depth, seq->type,
+ lowbound, upbound, i);
+ }
+ }
+ } else {
+ /* display only the sequence that matches the specified type */
+ if (upbound < lowbound) {
+ tipc_printf(buf, "invalid name sequence specified\n");
+ return;
+ }
+ nametbl_header(buf, depth);
+ i = hash(type);
+ seq_head = &table.types[i];
+ hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
+ if (seq->type == type) {
+ nameseq_list(seq, buf, depth, type,
+ lowbound, upbound, i);
+ break;
+ }
+ }
+ }
+}
+
+void nametbl_print(struct print_buf *buf, const char *str)
+{
+ tipc_printf(buf, str);
+ read_lock_bh(&nametbl_lock);
+ nametbl_list(buf, 0, 0, 0, 0);
+ read_unlock_bh(&nametbl_lock);
+}
+
+#define MAX_NAME_TBL_QUERY 32768
+
+struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space)
+{
+ struct sk_buff *buf;
+ struct tipc_name_table_query *argv;
+ struct tlv_desc *rep_tlv;
+ struct print_buf b;
+ int str_len;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ buf = cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
+ if (!buf)
+ return NULL;
+
+ rep_tlv = (struct tlv_desc *)buf->data;
+ printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
+ argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
+ read_lock_bh(&nametbl_lock);
+ nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type),
+ ntohl(argv->lowbound), ntohl(argv->upbound));
+ read_unlock_bh(&nametbl_lock);
+ str_len = printbuf_validate(&b);
+
+ skb_put(buf, TLV_SPACE(str_len));
+ TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+ return buf;
+}
+
+void nametbl_dump(void)
+{
+ nametbl_list(CONS, 0, 0, 0, 0);
+}
+
+int nametbl_init(void)
+{
+ int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
+
+ table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC);
+ if (!table.types)
+ return -ENOMEM;
+
+ write_lock_bh(&nametbl_lock);
+ memset(table.types, 0, array_size);
+ table.local_publ_count = 0;
+ write_unlock_bh(&nametbl_lock);
+ return 0;
+}
+
+void nametbl_stop(void)
+{
+ struct hlist_head *seq_head;
+ struct hlist_node *seq_node;
+ struct hlist_node *tmp;
+ struct name_seq *seq;
+ u32 i;
+
+ if (!table.types)
+ return;
+
+ write_lock_bh(&nametbl_lock);
+ for (i = 0; i < tipc_nametbl_size; i++) {
+ seq_head = &table.types[i];
+ hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) {
+ struct sub_seq *sseq = seq->sseqs;
+
+ for (; sseq != &seq->sseqs[seq->first_free]; sseq++) {
+ struct publication *publ = sseq->zone_list;
+ assert(publ);
+ do {
+ struct publication *next =
+ publ->zone_list_next;
+ kfree(publ);
+ publ = next;
+ }
+ while (publ != sseq->zone_list);
+ }
+ }
+ }
+ kfree(table.types);
+ table.types = NULL;
+ write_unlock_bh(&nametbl_lock);
+}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
new file mode 100644
index 00000000000..f82693384f6
--- /dev/null
+++ b/net/tipc/name_table.h
@@ -0,0 +1,108 @@
+/*
+ * net/tipc/name_table.h: Include file for TIPC name table code
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NAME_TABLE_H
+#define _TIPC_NAME_TABLE_H
+
+#include "node_subscr.h"
+
+struct subscription;
+struct port_list;
+
+/*
+ * TIPC name types reserved for internal TIPC use (both current and planned)
+ */
+
+#define TIPC_ZM_SRV 3 /* zone master service name type */
+
+
+/**
+ * struct publication - info about a published (name or) name sequence
+ * @type: name sequence type
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @scope: scope of publication
+ * @node: network address of publishing port's node
+ * @ref: publishing port
+ * @key: publication key
+ * @subscr: subscription to "node down" event (for off-node publications only)
+ * @local_list: adjacent entries in list of publications made by this node
+ * @pport_list: adjacent entries in list of publications made by this port
+ * @node_list: next matching name seq publication with >= node scope
+ * @cluster_list: next matching name seq publication with >= cluster scope
+ * @zone_list: next matching name seq publication with >= zone scope
+ *
+ * Note that the node list, cluster list, and zone list are circular lists.
+ */
+
+struct publication {
+ u32 type;
+ u32 lower;
+ u32 upper;
+ u32 scope;
+ u32 node;
+ u32 ref;
+ u32 key;
+ struct node_subscr subscr;
+ struct list_head local_list;
+ struct list_head pport_list;
+ struct publication *node_list_next;
+ struct publication *cluster_list_next;
+ struct publication *zone_list_next;
+};
+
+
+extern rwlock_t nametbl_lock;
+
+struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space);
+u32 nametbl_translate(u32 type, u32 instance, u32 *node);
+int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
+ struct port_list *dports);
+int nametbl_publish_rsv(u32 ref, unsigned int scope,
+ struct tipc_name_seq const *seq);
+struct publication *nametbl_publish(u32 type, u32 lower, u32 upper,
+ u32 scope, u32 port_ref, u32 key);
+int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
+struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
+ u32 scope, u32 node, u32 ref, u32 key);
+struct publication *nametbl_remove_publ(u32 type, u32 lower,
+ u32 node, u32 ref, u32 key);
+void nametbl_subscribe(struct subscription *s);
+void nametbl_unsubscribe(struct subscription *s);
+int nametbl_init(void);
+void nametbl_stop(void);
+
+#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
new file mode 100644
index 00000000000..6826b493c1d
--- /dev/null
+++ b/net/tipc/net.c
@@ -0,0 +1,311 @@
+/*
+ * net/tipc/net.c: TIPC network routing code
+ *
+ * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "bearer.h"
+#include "net.h"
+#include "zone.h"
+#include "addr.h"
+#include "name_table.h"
+#include "name_distr.h"
+#include "subscr.h"
+#include "link.h"
+#include "msg.h"
+#include "port.h"
+#include "bcast.h"
+#include "discover.h"
+#include "config.h"
+
+/*
+ * The TIPC locking policy is designed to ensure a very fine locking
+ * granularity, permitting complete parallel access to individual
+ * port and node/link instances. The code consists of three major
+ * locking domains, each protected with their own disjunct set of locks.
+ *
+ * 1: The routing hierarchy.
+ * Comprises the structures 'zone', 'cluster', 'node', 'link'
+ * and 'bearer'. The whole hierarchy is protected by a big
+ * read/write lock, net_lock, to enssure that nothing is added
+ * or removed while code is accessing any of these structures.
+ * This layer must not be called from the two others while they
+ * hold any of their own locks.
+ * Neither must it itself do any upcalls to the other two before
+ * it has released net_lock and other protective locks.
+ *
+ * Within the net_lock domain there are two sub-domains;'node' and
+ * 'bearer', where local write operations are permitted,
+ * provided that those are protected by individual spin_locks
+ * per instance. Code holding net_lock(read) and a node spin_lock
+ * is permitted to poke around in both the node itself and its
+ * subordinate links. I.e, it can update link counters and queues,
+ * change link state, send protocol messages, and alter the
+ * "active_links" array in the node; but it can _not_ remove a link
+ * or a node from the overall structure.
+ * Correspondingly, individual bearers may change status within a
+ * net_lock(read), protected by an individual spin_lock ber bearer
+ * instance, but it needs net_lock(write) to remove/add any bearers.
+ *
+ *
+ * 2: The transport level of the protocol.
+ * This consists of the structures port, (and its user level
+ * representations, such as user_port and tipc_sock), reference and
+ * tipc_user (port.c, reg.c, socket.c).
+ *
+ * This layer has four different locks:
+ * - The tipc_port spin_lock. This is protecting each port instance
+ * from parallel data access and removal. Since we can not place
+ * this lock in the port itself, it has been placed in the
+ * corresponding reference table entry, which has the same life
+ * cycle as the module. This entry is difficult to access from
+ * outside the TIPC core, however, so a pointer to the lock has
+ * been added in the port instance, -to be used for unlocking
+ * only.
+ * - A read/write lock to protect the reference table itself (teg.c).
+ * (Nobody is using read-only access to this, so it can just as
+ * well be changed to a spin_lock)
+ * - A spin lock to protect the registry of kernel/driver users (reg.c)
+ * - A global spin_lock (port_lock), which only task is to ensure
+ * consistency where more than one port is involved in an operation,
+ * i.e., whe a port is part of a linked list of ports.
+ * There are two such lists; 'port_list', which is used for management,
+ * and 'wait_list', which is used to queue ports during congestion.
+ *
+ * 3: The name table (name_table.c, name_distr.c, subscription.c)
+ * - There is one big read/write-lock (nametbl_lock) protecting the
+ * overall name table structure. Nothing must be added/removed to
+ * this structure without holding write access to it.
+ * - There is one local spin_lock per sub_sequence, which can be seen
+ * as a sub-domain to the nametbl_lock domain. It is used only
+ * for translation operations, and is needed because a translation
+ * steps the root of the 'publication' linked list between each lookup.
+ * This is always used within the scope of a nametbl_lock(read).
+ * - A local spin_lock protecting the queue of subscriber events.
+*/
+
+rwlock_t net_lock = RW_LOCK_UNLOCKED;
+struct network net = { 0 };
+
+struct node *net_select_remote_node(u32 addr, u32 ref)
+{
+ return zone_select_remote_node(net.zones[tipc_zone(addr)], addr, ref);
+}
+
+u32 net_select_router(u32 addr, u32 ref)
+{
+ return zone_select_router(net.zones[tipc_zone(addr)], addr, ref);
+}
+
+
+u32 net_next_node(u32 a)
+{
+ if (net.zones[tipc_zone(a)])
+ return zone_next_node(a);
+ return 0;
+}
+
+void net_remove_as_router(u32 router)
+{
+ u32 z_num;
+
+ for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+ if (!net.zones[z_num])
+ continue;
+ zone_remove_as_router(net.zones[z_num], router);
+ }
+}
+
+void net_send_external_routes(u32 dest)
+{
+ u32 z_num;
+
+ for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+ if (net.zones[z_num])
+ zone_send_external_routes(net.zones[z_num], dest);
+ }
+}
+
+int net_init(void)
+{
+ u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
+
+ memset(&net, 0, sizeof(net));
+ net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC);
+ if (!net.zones) {
+ return -ENOMEM;
+ }
+ memset(net.zones, 0, sz);
+ return TIPC_OK;
+}
+
+void net_stop(void)
+{
+ u32 z_num;
+
+ if (!net.zones)
+ return;
+
+ for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+ zone_delete(net.zones[z_num]);
+ }
+ kfree(net.zones);
+ net.zones = 0;
+}
+
+static void net_route_named_msg(struct sk_buff *buf)
+{
+ struct tipc_msg *msg = buf_msg(buf);
+ u32 dnode;
+ u32 dport;
+
+ if (!msg_named(msg)) {
+ msg_dbg(msg, "net->drop_nam:");
+ buf_discard(buf);
+ return;
+ }
+
+ dnode = addr_domain(msg_lookup_scope(msg));
+ dport = nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
+ dbg("net->lookup<%u,%u>-><%u,%x>\n",
+ msg_nametype(msg), msg_nameinst(msg), dport, dnode);
+ if (dport) {
+ msg_set_destnode(msg, dnode);
+ msg_set_destport(msg, dport);
+ net_route_msg(buf);
+ return;
+ }
+ msg_dbg(msg, "net->rej:NO NAME: ");
+ tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
+}
+
+void net_route_msg(struct sk_buff *buf)
+{
+ struct tipc_msg *msg;
+ u32 dnode;
+
+ if (!buf)
+ return;
+ msg = buf_msg(buf);
+
+ msg_incr_reroute_cnt(msg);
+ if (msg_reroute_cnt(msg) > 6) {
+ if (msg_errcode(msg)) {
+ msg_dbg(msg, "NET>DISC>:");
+ buf_discard(buf);
+ } else {
+ msg_dbg(msg, "NET>REJ>:");
+ tipc_reject_msg(buf, msg_destport(msg) ?
+ TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
+ }
+ return;
+ }
+
+ msg_dbg(msg, "net->rout: ");
+
+ /* Handle message for this node */
+ dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
+ if (in_scope(dnode, tipc_own_addr)) {
+ if (msg_isdata(msg)) {
+ if (msg_mcast(msg))
+ port_recv_mcast(buf, NULL);
+ else if (msg_destport(msg))
+ port_recv_msg(buf);
+ else
+ net_route_named_msg(buf);
+ return;
+ }
+ switch (msg_user(msg)) {
+ case ROUTE_DISTRIBUTOR:
+ cluster_recv_routing_table(buf);
+ break;
+ case NAME_DISTRIBUTOR:
+ named_recv(buf);
+ break;
+ case CONN_MANAGER:
+ port_recv_proto_msg(buf);
+ break;
+ default:
+ msg_dbg(msg,"DROP/NET/<REC<");
+ buf_discard(buf);
+ }
+ return;
+ }
+
+ /* Handle message for another node */
+ msg_dbg(msg, "NET>SEND>: ");
+ link_send(buf, dnode, msg_link_selector(msg));
+}
+
+int tipc_start_net(void)
+{
+ char addr_string[16];
+ int res;
+
+ if (tipc_mode != TIPC_NODE_MODE)
+ return -ENOPROTOOPT;
+
+ tipc_mode = TIPC_NET_MODE;
+ named_reinit();
+ port_reinit();
+
+ if ((res = bearer_init()) ||
+ (res = net_init()) ||
+ (res = cluster_init()) ||
+ (res = bclink_init())) {
+ return res;
+ }
+ subscr_stop();
+ cfg_stop();
+ k_signal((Handler)subscr_start, 0);
+ k_signal((Handler)cfg_init, 0);
+ info("Started in network mode\n");
+ info("Own node address %s, network identity %u\n",
+ addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+ return TIPC_OK;
+}
+
+void tipc_stop_net(void)
+{
+ if (tipc_mode != TIPC_NET_MODE)
+ return;
+ write_lock_bh(&net_lock);
+ bearer_stop();
+ tipc_mode = TIPC_NODE_MODE;
+ bclink_stop();
+ net_stop();
+ write_unlock_bh(&net_lock);
+ info("Left network mode \n");
+}
+
diff --git a/net/tipc/net.h b/net/tipc/net.h
new file mode 100644
index 00000000000..948c6d42102
--- /dev/null
+++ b/net/tipc/net.h
@@ -0,0 +1,66 @@
+/*
+ * net/tipc/net.h: Include file for TIPC network routing code
+ *
+ * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NET_H
+#define _TIPC_NET_H
+
+struct _zone;
+
+/**
+ * struct network - TIPC network structure
+ * @zones: array of pointers to all zones within network
+ */
+
+struct network {
+ struct _zone **zones;
+};
+
+
+extern struct network net;
+extern rwlock_t net_lock;
+
+int net_init(void);
+void net_stop(void);
+void net_remove_as_router(u32 router);
+void net_send_external_routes(u32 dest);
+void net_route_msg(struct sk_buff *buf);
+struct node *net_select_remote_node(u32 addr, u32 ref);
+u32 net_select_router(u32 addr, u32 ref);
+
+int tipc_start_net(void);
+void tipc_stop_net(void);
+
+#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
new file mode 100644
index 00000000000..19b3f402253
--- /dev/null
+++ b/net/tipc/netlink.c
@@ -0,0 +1,112 @@
+/*
+ * net/tipc/netlink.c: TIPC configuration handling
+ *
+ * Copyright (c) 2005-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include <net/genetlink.h>
+
+static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *rep_buf;
+ struct nlmsghdr *rep_nlh;
+ struct nlmsghdr *req_nlh = info->nlhdr;
+ struct tipc_genlmsghdr *req_userhdr = info->userhdr;
+ int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN);
+
+ if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
+ rep_buf = cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
+ else
+ rep_buf = cfg_do_cmd(req_userhdr->dest,
+ req_userhdr->cmd,
+ NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
+ NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
+ hdr_space);
+
+ if (rep_buf) {
+ skb_push(rep_buf, hdr_space);
+ rep_nlh = (struct nlmsghdr *)rep_buf->data;
+ memcpy(rep_nlh, req_nlh, hdr_space);
+ rep_nlh->nlmsg_len = rep_buf->len;
+ genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid);
+ }
+
+ return 0;
+}
+
+static struct genl_family family = {
+ .id = GENL_ID_GENERATE,
+ .name = TIPC_GENL_NAME,
+ .version = TIPC_GENL_VERSION,
+ .hdrsize = TIPC_GENL_HDRLEN,
+ .maxattr = 0,
+};
+
+static struct genl_ops ops = {
+ .cmd = TIPC_GENL_CMD,
+ .doit = handle_cmd,
+};
+
+static int family_registered = 0;
+
+int netlink_start(void)
+{
+
+
+ if (genl_register_family(&family))
+ goto err;
+
+ family_registered = 1;
+
+ if (genl_register_ops(&family, &ops))
+ goto err_unregister;
+
+ return 0;
+
+ err_unregister:
+ genl_unregister_family(&family);
+ family_registered = 0;
+ err:
+ err("Failed to register netlink interface\n");
+ return -EFAULT;
+}
+
+void netlink_stop(void)
+{
+ if (family_registered) {
+ genl_unregister_family(&family);
+ family_registered = 0;
+ }
+}
diff --git a/net/tipc/node.c b/net/tipc/node.c
new file mode 100644
index 00000000000..05688d01138
--- /dev/null
+++ b/net/tipc/node.c
@@ -0,0 +1,679 @@
+/*
+ * net/tipc/node.c: TIPC node management routines
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "node.h"
+#include "cluster.h"
+#include "net.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "link.h"
+#include "port.h"
+#include "bearer.h"
+#include "name_distr.h"
+#include "net.h"
+
+void node_print(struct print_buf *buf, struct node *n_ptr, char *str);
+static void node_lost_contact(struct node *n_ptr);
+static void node_established_contact(struct node *n_ptr);
+
+struct node *nodes = NULL; /* sorted list of nodes within cluster */
+
+u32 tipc_own_tag = 0;
+
+struct node *node_create(u32 addr)
+{
+ struct cluster *c_ptr;
+ struct node *n_ptr;
+ struct node **curr_node;
+
+ n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC);
+ if (n_ptr != NULL) {
+ memset(n_ptr, 0, sizeof(*n_ptr));
+ n_ptr->addr = addr;
+ n_ptr->lock = SPIN_LOCK_UNLOCKED;
+ INIT_LIST_HEAD(&n_ptr->nsub);
+
+ c_ptr = cluster_find(addr);
+ if (c_ptr == NULL)
+ c_ptr = cluster_create(addr);
+ if (c_ptr != NULL) {
+ n_ptr->owner = c_ptr;
+ cluster_attach_node(c_ptr, n_ptr);
+ n_ptr->last_router = -1;
+
+ /* Insert node into ordered list */
+ for (curr_node = &nodes; *curr_node;
+ curr_node = &(*curr_node)->next) {
+ if (addr < (*curr_node)->addr) {
+ n_ptr->next = *curr_node;
+ break;
+ }
+ }
+ (*curr_node) = n_ptr;
+ } else {
+ kfree(n_ptr);
+ n_ptr = NULL;
+ }
+ }
+ return n_ptr;
+}
+
+void node_delete(struct node *n_ptr)
+{
+ if (!n_ptr)
+ return;
+
+#if 0
+ /* Not needed because links are already deleted via bearer_stop() */
+
+ u32 l_num;
+
+ for (l_num = 0; l_num < MAX_BEARERS; l_num++) {
+ link_delete(n_ptr->links[l_num]);
+ }
+#endif
+
+ dbg("node %x deleted\n", n_ptr->addr);
+ kfree(n_ptr);
+}
+
+
+/**
+ * node_link_up - handle addition of link
+ *
+ * Link becomes active (alone or shared) or standby, depending on its priority.
+ */
+
+void node_link_up(struct node *n_ptr, struct link *l_ptr)
+{
+ struct link **active = &n_ptr->active_links[0];
+
+ info("Established link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->b_ptr->net_plane);
+
+ if (!active[0]) {
+ dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]);
+ active[0] = active[1] = l_ptr;
+ node_established_contact(n_ptr);
+ return;
+ }
+ if (l_ptr->priority < active[0]->priority) {
+ info("Link is standby\n");
+ return;
+ }
+ link_send_duplicate(active[0], l_ptr);
+ if (l_ptr->priority == active[0]->priority) {
+ active[0] = l_ptr;
+ return;
+ }
+ info("Link <%s> on network plane %c becomes standby\n",
+ active[0]->name, active[0]->b_ptr->net_plane);
+ active[0] = active[1] = l_ptr;
+}
+
+/**
+ * node_select_active_links - select active link
+ */
+
+static void node_select_active_links(struct node *n_ptr)
+{
+ struct link **active = &n_ptr->active_links[0];
+ u32 i;
+ u32 highest_prio = 0;
+
+ active[0] = active[1] = 0;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ struct link *l_ptr = n_ptr->links[i];
+
+ if (!l_ptr || !link_is_up(l_ptr) ||
+ (l_ptr->priority < highest_prio))
+ continue;
+
+ if (l_ptr->priority > highest_prio) {
+ highest_prio = l_ptr->priority;
+ active[0] = active[1] = l_ptr;
+ } else {
+ active[1] = l_ptr;
+ }
+ }
+}
+
+/**
+ * node_link_down - handle loss of link
+ */
+
+void node_link_down(struct node *n_ptr, struct link *l_ptr)
+{
+ struct link **active;
+
+ if (!link_is_active(l_ptr)) {
+ info("Lost standby link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->b_ptr->net_plane);
+ return;
+ }
+ info("Lost link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->b_ptr->net_plane);
+
+ active = &n_ptr->active_links[0];
+ if (active[0] == l_ptr)
+ active[0] = active[1];
+ if (active[1] == l_ptr)
+ active[1] = active[0];
+ if (active[0] == l_ptr)
+ node_select_active_links(n_ptr);
+ if (node_is_up(n_ptr))
+ link_changeover(l_ptr);
+ else
+ node_lost_contact(n_ptr);
+}
+
+int node_has_active_links(struct node *n_ptr)
+{
+ return (n_ptr &&
+ ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
+}
+
+int node_has_redundant_links(struct node *n_ptr)
+{
+ return (node_has_active_links(n_ptr) &&
+ (n_ptr->active_links[0] != n_ptr->active_links[1]));
+}
+
+int node_has_active_routes(struct node *n_ptr)
+{
+ return (n_ptr && (n_ptr->last_router >= 0));
+}
+
+int node_is_up(struct node *n_ptr)
+{
+ return (node_has_active_links(n_ptr) || node_has_active_routes(n_ptr));
+}
+
+struct node *node_attach_link(struct link *l_ptr)
+{
+ struct node *n_ptr = node_find(l_ptr->addr);
+
+ if (!n_ptr)
+ n_ptr = node_create(l_ptr->addr);
+ if (n_ptr) {
+ u32 bearer_id = l_ptr->b_ptr->identity;
+ char addr_string[16];
+
+ assert(bearer_id < MAX_BEARERS);
+ if (n_ptr->link_cnt >= 2) {
+ char addr_string[16];
+
+ err("Attempt to create third link to %s\n",
+ addr_string_fill(addr_string, n_ptr->addr));
+ return 0;
+ }
+
+ if (!n_ptr->links[bearer_id]) {
+ n_ptr->links[bearer_id] = l_ptr;
+ net.zones[tipc_zone(l_ptr->addr)]->links++;
+ n_ptr->link_cnt++;
+ return n_ptr;
+ }
+ err("Attempt to establish second link on <%s> to <%s> \n",
+ l_ptr->b_ptr->publ.name,
+ addr_string_fill(addr_string, l_ptr->addr));
+ }
+ return 0;
+}
+
+void node_detach_link(struct node *n_ptr, struct link *l_ptr)
+{
+ n_ptr->links[l_ptr->b_ptr->identity] = 0;
+ net.zones[tipc_zone(l_ptr->addr)]->links--;
+ n_ptr->link_cnt--;
+}
+
+/*
+ * Routing table management - five cases to handle:
+ *
+ * 1: A link towards a zone/cluster external node comes up.
+ * => Send a multicast message updating routing tables of all
+ * system nodes within own cluster that the new destination
+ * can be reached via this node.
+ * (node.establishedContact()=>cluster.multicastNewRoute())
+ *
+ * 2: A link towards a slave node comes up.
+ * => Send a multicast message updating routing tables of all
+ * system nodes within own cluster that the new destination
+ * can be reached via this node.
+ * (node.establishedContact()=>cluster.multicastNewRoute())
+ * => Send a message to the slave node about existence
+ * of all system nodes within cluster:
+ * (node.establishedContact()=>cluster.sendLocalRoutes())
+ *
+ * 3: A new cluster local system node becomes available.
+ * => Send message(s) to this particular node containing
+ * information about all cluster external and slave
+ * nodes which can be reached via this node.
+ * (node.establishedContact()==>network.sendExternalRoutes())
+ * (node.establishedContact()==>network.sendSlaveRoutes())
+ * => Send messages to all directly connected slave nodes
+ * containing information about the existence of the new node
+ * (node.establishedContact()=>cluster.multicastNewRoute())
+ *
+ * 4: The link towards a zone/cluster external node or slave
+ * node goes down.
+ * => Send a multcast message updating routing tables of all
+ * nodes within cluster that the new destination can not any
+ * longer be reached via this node.
+ * (node.lostAllLinks()=>cluster.bcastLostRoute())
+ *
+ * 5: A cluster local system node becomes unavailable.
+ * => Remove all references to this node from the local
+ * routing tables. Note: This is a completely node
+ * local operation.
+ * (node.lostAllLinks()=>network.removeAsRouter())
+ * => Send messages to all directly connected slave nodes
+ * containing information about loss of the node
+ * (node.establishedContact()=>cluster.multicastLostRoute())
+ *
+ */
+
+static void node_established_contact(struct node *n_ptr)
+{
+ struct cluster *c_ptr;
+
+ dbg("node_established_contact:-> %x\n", n_ptr->addr);
+ if (!node_has_active_routes(n_ptr)) {
+ k_signal((Handler)named_node_up, n_ptr->addr);
+ }
+
+ /* Syncronize broadcast acks */
+ n_ptr->bclink.acked = bclink_get_last_sent();
+
+ if (is_slave(tipc_own_addr))
+ return;
+ if (!in_own_cluster(n_ptr->addr)) {
+ /* Usage case 1 (see above) */
+ c_ptr = cluster_find(tipc_own_addr);
+ if (!c_ptr)
+ c_ptr = cluster_create(tipc_own_addr);
+ if (c_ptr)
+ cluster_bcast_new_route(c_ptr, n_ptr->addr, 1,
+ tipc_max_nodes);
+ return;
+ }
+
+ c_ptr = n_ptr->owner;
+ if (is_slave(n_ptr->addr)) {
+ /* Usage case 2 (see above) */
+ cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
+ cluster_send_local_routes(c_ptr, n_ptr->addr);
+ return;
+ }
+
+ if (n_ptr->bclink.supported) {
+ nmap_add(&cluster_bcast_nodes, n_ptr->addr);
+ if (n_ptr->addr < tipc_own_addr)
+ tipc_own_tag++;
+ }
+
+ /* Case 3 (see above) */
+ net_send_external_routes(n_ptr->addr);
+ cluster_send_slave_routes(c_ptr, n_ptr->addr);
+ cluster_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
+ highest_allowed_slave);
+}
+
+static void node_lost_contact(struct node *n_ptr)
+{
+ struct cluster *c_ptr;
+ struct node_subscr *ns, *tns;
+ char addr_string[16];
+ u32 i;
+
+ /* Clean up broadcast reception remains */
+ n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
+ while (n_ptr->bclink.deferred_head) {
+ struct sk_buff* buf = n_ptr->bclink.deferred_head;
+ n_ptr->bclink.deferred_head = buf->next;
+ buf_discard(buf);
+ }
+ if (n_ptr->bclink.defragm) {
+ buf_discard(n_ptr->bclink.defragm);
+ n_ptr->bclink.defragm = NULL;
+ }
+ if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
+ bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
+ }
+
+ /* Update routing tables */
+ if (is_slave(tipc_own_addr)) {
+ net_remove_as_router(n_ptr->addr);
+ } else {
+ if (!in_own_cluster(n_ptr->addr)) {
+ /* Case 4 (see above) */
+ c_ptr = cluster_find(tipc_own_addr);
+ cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1,
+ tipc_max_nodes);
+ } else {
+ /* Case 5 (see above) */
+ c_ptr = cluster_find(n_ptr->addr);
+ if (is_slave(n_ptr->addr)) {
+ cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1,
+ tipc_max_nodes);
+ } else {
+ if (n_ptr->bclink.supported) {
+ nmap_remove(&cluster_bcast_nodes,
+ n_ptr->addr);
+ if (n_ptr->addr < tipc_own_addr)
+ tipc_own_tag--;
+ }
+ net_remove_as_router(n_ptr->addr);
+ cluster_bcast_lost_route(c_ptr, n_ptr->addr,
+ LOWEST_SLAVE,
+ highest_allowed_slave);
+ }
+ }
+ }
+ if (node_has_active_routes(n_ptr))
+ return;
+
+ info("Lost contact with %s\n",
+ addr_string_fill(addr_string, n_ptr->addr));
+
+ /* Abort link changeover */
+ for (i = 0; i < MAX_BEARERS; i++) {
+ struct link *l_ptr = n_ptr->links[i];
+ if (!l_ptr)
+ continue;
+ l_ptr->reset_checkpoint = l_ptr->next_in_no;
+ l_ptr->exp_msg_count = 0;
+ link_reset_fragments(l_ptr);
+ }
+
+ /* Notify subscribers */
+ list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
+ ns->node = 0;
+ list_del_init(&ns->nodesub_list);
+ k_signal((Handler)ns->handle_node_down,
+ (unsigned long)ns->usr_handle);
+ }
+}
+
+/**
+ * node_select_next_hop - find the next-hop node for a message
+ *
+ * Called by when cluster local lookup has failed.
+ */
+
+struct node *node_select_next_hop(u32 addr, u32 selector)
+{
+ struct node *n_ptr;
+ u32 router_addr;
+
+ if (!addr_domain_valid(addr))
+ return 0;
+
+ /* Look for direct link to destination processsor */
+ n_ptr = node_find(addr);
+ if (n_ptr && node_has_active_links(n_ptr))
+ return n_ptr;
+
+ /* Cluster local system nodes *must* have direct links */
+ if (!is_slave(addr) && in_own_cluster(addr))
+ return 0;
+
+ /* Look for cluster local router with direct link to node */
+ router_addr = node_select_router(n_ptr, selector);
+ if (router_addr)
+ return node_select(router_addr, selector);
+
+ /* Slave nodes can only be accessed within own cluster via a
+ known router with direct link -- if no router was found,give up */
+ if (is_slave(addr))
+ return 0;
+
+ /* Inter zone/cluster -- find any direct link to remote cluster */
+ addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
+ n_ptr = net_select_remote_node(addr, selector);
+ if (n_ptr && node_has_active_links(n_ptr))
+ return n_ptr;
+
+ /* Last resort -- look for any router to anywhere in remote zone */
+ router_addr = net_select_router(addr, selector);
+ if (router_addr)
+ return node_select(router_addr, selector);
+
+ return 0;
+}
+
+/**
+ * node_select_router - select router to reach specified node
+ *
+ * Uses a deterministic and fair algorithm for selecting router node.
+ */
+
+u32 node_select_router(struct node *n_ptr, u32 ref)
+{
+ u32 ulim;
+ u32 mask;
+ u32 start;
+ u32 r;
+
+ if (!n_ptr)
+ return 0;
+
+ if (n_ptr->last_router < 0)
+ return 0;
+ ulim = ((n_ptr->last_router + 1) * 32) - 1;
+
+ /* Start entry must be random */
+ mask = tipc_max_nodes;
+ while (mask > ulim)
+ mask >>= 1;
+ start = ref & mask;
+ r = start;
+
+ /* Lookup upwards with wrap-around */
+ do {
+ if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
+ break;
+ } while (++r <= ulim);
+ if (r > ulim) {
+ r = 1;
+ do {
+ if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
+ break;
+ } while (++r < start);
+ assert(r != start);
+ }
+ assert(r && (r <= ulim));
+ return tipc_addr(own_zone(), own_cluster(), r);
+}
+
+void node_add_router(struct node *n_ptr, u32 router)
+{
+ u32 r_num = tipc_node(router);
+
+ n_ptr->routers[r_num / 32] =
+ ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]);
+ n_ptr->last_router = tipc_max_nodes / 32;
+ while ((--n_ptr->last_router >= 0) &&
+ !n_ptr->routers[n_ptr->last_router]);
+}
+
+void node_remove_router(struct node *n_ptr, u32 router)
+{
+ u32 r_num = tipc_node(router);
+
+ if (n_ptr->last_router < 0)
+ return; /* No routes */
+
+ n_ptr->routers[r_num / 32] =
+ ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32]));
+ n_ptr->last_router = tipc_max_nodes / 32;
+ while ((--n_ptr->last_router >= 0) &&
+ !n_ptr->routers[n_ptr->last_router]);
+
+ if (!node_is_up(n_ptr))
+ node_lost_contact(n_ptr);
+}
+
+#if 0
+void node_print(struct print_buf *buf, struct node *n_ptr, char *str)
+{
+ u32 i;
+
+ tipc_printf(buf, "\n\n%s", str);
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (!n_ptr->links[i])
+ continue;
+ tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]);
+ }
+ tipc_printf(buf, "Active links: [%x,%x]\n",
+ n_ptr->active_links[0], n_ptr->active_links[1]);
+}
+#endif
+
+u32 tipc_available_nodes(const u32 domain)
+{
+ struct node *n_ptr;
+ u32 cnt = 0;
+
+ for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+ if (!in_scope(domain, n_ptr->addr))
+ continue;
+ if (node_is_up(n_ptr))
+ cnt++;
+ }
+ return cnt;
+}
+
+struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space)
+{
+ u32 domain;
+ struct sk_buff *buf;
+ struct node *n_ptr;
+ struct tipc_node_info node_info;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ domain = *(u32 *)TLV_DATA(req_tlv_area);
+ domain = ntohl(domain);
+ if (!addr_domain_valid(domain))
+ return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+ " (network address)");
+
+ if (!nodes)
+ return cfg_reply_none();
+
+ /* For now, get space for all other nodes
+ (will need to modify this when slave nodes are supported */
+
+ buf = cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) *
+ (tipc_max_nodes - 1));
+ if (!buf)
+ return NULL;
+
+ /* Add TLVs for all nodes in scope */
+
+ for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+ if (!in_scope(domain, n_ptr->addr))
+ continue;
+ node_info.addr = htonl(n_ptr->addr);
+ node_info.up = htonl(node_is_up(n_ptr));
+ cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
+ &node_info, sizeof(node_info));
+ }
+
+ return buf;
+}
+
+struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
+{
+ u32 domain;
+ struct sk_buff *buf;
+ struct node *n_ptr;
+ struct tipc_link_info link_info;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ domain = *(u32 *)TLV_DATA(req_tlv_area);
+ domain = ntohl(domain);
+ if (!addr_domain_valid(domain))
+ return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+ " (network address)");
+
+ if (!nodes)
+ return cfg_reply_none();
+
+ /* For now, get space for 2 links to all other nodes + bcast link
+ (will need to modify this when slave nodes are supported */
+
+ buf = cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) *
+ (2 * (tipc_max_nodes - 1) + 1));
+ if (!buf)
+ return NULL;
+
+ /* Add TLV for broadcast link */
+
+ link_info.dest = tipc_own_addr & 0xfffff00;
+ link_info.dest = htonl(link_info.dest);
+ link_info.up = htonl(1);
+ sprintf(link_info.str, bc_link_name);
+ cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
+
+ /* Add TLVs for any other links in scope */
+
+ for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+ u32 i;
+
+ if (!in_scope(domain, n_ptr->addr))
+ continue;
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (!n_ptr->links[i])
+ continue;
+ link_info.dest = htonl(n_ptr->addr);
+ link_info.up = htonl(link_is_up(n_ptr->links[i]));
+ strcpy(link_info.str, n_ptr->links[i]->name);
+ cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
+ &link_info, sizeof(link_info));
+ }
+ }
+
+ return buf;
+}
diff --git a/net/tipc/node.h b/net/tipc/node.h
new file mode 100644
index 00000000000..b39442badcc
--- /dev/null
+++ b/net/tipc/node.h
@@ -0,0 +1,144 @@
+/*
+ * net/tipc/node.h: Include file for TIPC node management routines
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NODE_H
+#define _TIPC_NODE_H
+
+#include "node_subscr.h"
+#include "addr.h"
+#include "cluster.h"
+#include "bearer.h"
+
+/**
+ * struct node - TIPC node structure
+ * @addr: network address of node
+ * @lock: spinlock governing access to structure
+ * @owner: pointer to cluster that node belongs to
+ * @next: pointer to next node in sorted list of cluster's nodes
+ * @nsub: list of "node down" subscriptions monitoring node
+ * @active_links: pointers to active links to node
+ * @links: pointers to all links to node
+ * @link_cnt: number of links to node
+ * @permit_changeover: non-zero if node has redundant links to this system
+ * @routers: bitmap (used for multicluster communication)
+ * @last_router: (used for multicluster communication)
+ * @bclink: broadcast-related info
+ * @supported: non-zero if node supports TIPC b'cast capability
+ * @acked: sequence # of last outbound b'cast message acknowledged by node
+ * @last_in: sequence # of last in-sequence b'cast message received from node
+ * @gap_after: sequence # of last message not requiring a NAK request
+ * @gap_to: sequence # of last message requiring a NAK request
+ * @nack_sync: counter that determines when NAK requests should be sent
+ * @deferred_head: oldest OOS b'cast message received from node
+ * @deferred_tail: newest OOS b'cast message received from node
+ * @defragm: list of partially reassembled b'cast message fragments from node
+ */
+
+struct node {
+ u32 addr;
+ spinlock_t lock;
+ struct cluster *owner;
+ struct node *next;
+ struct list_head nsub;
+ struct link *active_links[2];
+ struct link *links[MAX_BEARERS];
+ int link_cnt;
+ int permit_changeover;
+ u32 routers[512/32];
+ int last_router;
+ struct {
+ int supported;
+ u32 acked;
+ u32 last_in;
+ u32 gap_after;
+ u32 gap_to;
+ u32 nack_sync;
+ struct sk_buff *deferred_head;
+ struct sk_buff *deferred_tail;
+ struct sk_buff *defragm;
+ } bclink;
+};
+
+extern struct node *nodes;
+extern u32 tipc_own_tag;
+
+struct node *node_create(u32 addr);
+void node_delete(struct node *n_ptr);
+struct node *node_attach_link(struct link *l_ptr);
+void node_detach_link(struct node *n_ptr, struct link *l_ptr);
+void node_link_down(struct node *n_ptr, struct link *l_ptr);
+void node_link_up(struct node *n_ptr, struct link *l_ptr);
+int node_has_active_links(struct node *n_ptr);
+int node_has_redundant_links(struct node *n_ptr);
+u32 node_select_router(struct node *n_ptr, u32 ref);
+struct node *node_select_next_hop(u32 addr, u32 selector);
+int node_is_up(struct node *n_ptr);
+void node_add_router(struct node *n_ptr, u32 router);
+void node_remove_router(struct node *n_ptr, u32 router);
+struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space);
+
+static inline struct node *node_find(u32 addr)
+{
+ if (likely(in_own_cluster(addr)))
+ return local_nodes[tipc_node(addr)];
+ else if (addr_domain_valid(addr)) {
+ struct cluster *c_ptr = cluster_find(addr);
+
+ if (c_ptr)
+ return c_ptr->nodes[tipc_node(addr)];
+ }
+ return 0;
+}
+
+static inline struct node *node_select(u32 addr, u32 selector)
+{
+ if (likely(in_own_cluster(addr)))
+ return local_nodes[tipc_node(addr)];
+ return node_select_next_hop(addr, selector);
+}
+
+static inline void node_lock(struct node *n_ptr)
+{
+ spin_lock_bh(&n_ptr->lock);
+}
+
+static inline void node_unlock(struct node *n_ptr)
+{
+ spin_unlock_bh(&n_ptr->lock);
+}
+
+#endif
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
new file mode 100644
index 00000000000..79375927916
--- /dev/null
+++ b/net/tipc/node_subscr.c
@@ -0,0 +1,79 @@
+/*
+ * net/tipc/node_subscr.c: TIPC "node down" subscription handling
+ *
+ * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "node_subscr.h"
+#include "node.h"
+#include "addr.h"
+
+/**
+ * nodesub_subscribe - create "node down" subscription for specified node
+ */
+
+void nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
+ void *usr_handle, net_ev_handler handle_down)
+{
+ node_sub->node = 0;
+ if (addr == tipc_own_addr)
+ return;
+ if (!addr_node_valid(addr)) {
+ warn("node_subscr with illegal %x\n", addr);
+ return;
+ }
+
+ node_sub->handle_node_down = handle_down;
+ node_sub->usr_handle = usr_handle;
+ node_sub->node = node_find(addr);
+ assert(node_sub->node);
+ node_lock(node_sub->node);
+ list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
+ node_unlock(node_sub->node);
+}
+
+/**
+ * nodesub_unsubscribe - cancel "node down" subscription (if any)
+ */
+
+void nodesub_unsubscribe(struct node_subscr *node_sub)
+{
+ if (!node_sub->node)
+ return;
+
+ node_lock(node_sub->node);
+ list_del_init(&node_sub->nodesub_list);
+ node_unlock(node_sub->node);
+}
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
new file mode 100644
index 00000000000..a3b87ac4859
--- /dev/null
+++ b/net/tipc/node_subscr.h
@@ -0,0 +1,63 @@
+/*
+ * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
+ *
+ * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NODE_SUBSCR_H
+#define _TIPC_NODE_SUBSCR_H
+
+#include "addr.h"
+
+typedef void (*net_ev_handler) (void *usr_handle);
+
+/**
+ * struct node_subscr - "node down" subscription entry
+ * @node: ptr to node structure of interest (or NULL, if none)
+ * @handle_node_down: routine to invoke when node fails
+ * @usr_handle: argument to pass to routine when node fails
+ * @nodesub_list: adjacent entries in list of subscriptions for the node
+ */
+
+struct node_subscr {
+ struct node *node;
+ net_ev_handler handle_node_down;
+ void *usr_handle;
+ struct list_head nodesub_list;
+};
+
+void nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
+ void *usr_handle, net_ev_handler handle_down);
+void nodesub_unsubscribe(struct node_subscr *node_sub);
+
+#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
new file mode 100644
index 00000000000..66caca7abe9
--- /dev/null
+++ b/net/tipc/port.c
@@ -0,0 +1,1708 @@
+/*
+ * net/tipc/port.c: TIPC port code
+ *
+ * Copyright (c) 1992-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+#include "port.h"
+#include "addr.h"
+#include "link.h"
+#include "node.h"
+#include "port.h"
+#include "name_table.h"
+#include "user_reg.h"
+#include "msg.h"
+#include "bcast.h"
+
+/* Connection management: */
+#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
+#define CONFIRMED 0
+#define PROBING 1
+
+#define MAX_REJECT_SIZE 1024
+
+static struct sk_buff *msg_queue_head = 0;
+static struct sk_buff *msg_queue_tail = 0;
+
+spinlock_t port_list_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
+
+LIST_HEAD(ports);
+static void port_handle_node_down(unsigned long ref);
+static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
+static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
+static void port_timeout(unsigned long ref);
+
+
+static inline u32 port_peernode(struct port *p_ptr)
+{
+ return msg_destnode(&p_ptr->publ.phdr);
+}
+
+static inline u32 port_peerport(struct port *p_ptr)
+{
+ return msg_destport(&p_ptr->publ.phdr);
+}
+
+static inline u32 port_out_seqno(struct port *p_ptr)
+{
+ return msg_transp_seqno(&p_ptr->publ.phdr);
+}
+
+static inline void port_set_out_seqno(struct port *p_ptr, u32 seqno)
+{
+ msg_set_transp_seqno(&p_ptr->publ.phdr,seqno);
+}
+
+static inline void port_incr_out_seqno(struct port *p_ptr)
+{
+ struct tipc_msg *m = &p_ptr->publ.phdr;
+
+ if (likely(!msg_routed(m)))
+ return;
+ msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
+}
+
+/**
+ * tipc_multicast - send a multicast message to local and remote destinations
+ */
+
+int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
+ u32 num_sect, struct iovec const *msg_sect)
+{
+ struct tipc_msg *hdr;
+ struct sk_buff *buf;
+ struct sk_buff *ibuf = NULL;
+ struct port_list dports = {0, NULL, };
+ struct port *oport = port_deref(ref);
+ int ext_targets;
+ int res;
+
+ if (unlikely(!oport))
+ return -EINVAL;
+
+ /* Create multicast message */
+
+ hdr = &oport->publ.phdr;
+ msg_set_type(hdr, TIPC_MCAST_MSG);
+ msg_set_nametype(hdr, seq->type);
+ msg_set_namelower(hdr, seq->lower);
+ msg_set_nameupper(hdr, seq->upper);
+ msg_set_hdr_sz(hdr, MCAST_H_SIZE);
+ res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
+ !oport->user_port, &buf);
+ if (unlikely(!buf))
+ return res;
+
+ /* Figure out where to send multicast message */
+
+ ext_targets = nametbl_mc_translate(seq->type, seq->lower, seq->upper,
+ TIPC_NODE_SCOPE, &dports);
+
+ /* Send message to destinations (duplicate it only if necessary) */
+
+ if (ext_targets) {
+ if (dports.count != 0) {
+ ibuf = skb_copy(buf, GFP_ATOMIC);
+ if (ibuf == NULL) {
+ port_list_free(&dports);
+ buf_discard(buf);
+ return -ENOMEM;
+ }
+ }
+ res = bclink_send_msg(buf);
+ if ((res < 0) && (dports.count != 0)) {
+ buf_discard(ibuf);
+ }
+ } else {
+ ibuf = buf;
+ }
+
+ if (res >= 0) {
+ if (ibuf)
+ port_recv_mcast(ibuf, &dports);
+ } else {
+ port_list_free(&dports);
+ }
+ return res;
+}
+
+/**
+ * port_recv_mcast - deliver multicast message to all destination ports
+ *
+ * If there is no port list, perform a lookup to create one
+ */
+
+void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
+{
+ struct tipc_msg* msg;
+ struct port_list dports = {0, NULL, };
+ struct port_list *item = dp;
+ int cnt = 0;
+
+ assert(buf);
+ msg = buf_msg(buf);
+
+ /* Create destination port list, if one wasn't supplied */
+
+ if (dp == NULL) {
+ nametbl_mc_translate(msg_nametype(msg),
+ msg_namelower(msg),
+ msg_nameupper(msg),
+ TIPC_CLUSTER_SCOPE,
+ &dports);
+ item = dp = &dports;
+ }
+
+ /* Deliver a copy of message to each destination port */
+
+ if (dp->count != 0) {
+ if (dp->count == 1) {
+ msg_set_destport(msg, dp->ports[0]);
+ port_recv_msg(buf);
+ port_list_free(dp);
+ return;
+ }
+ for (; cnt < dp->count; cnt++) {
+ int index = cnt % PLSIZE;
+ struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
+
+ if (b == NULL) {
+ warn("Buffer allocation failure\n");
+ msg_dbg(msg, "LOST:");
+ goto exit;
+ }
+ if ((index == 0) && (cnt != 0)) {
+ item = item->next;
+ }
+ msg_set_destport(buf_msg(b),item->ports[index]);
+ port_recv_msg(b);
+ }
+ }
+exit:
+ buf_discard(buf);
+ port_list_free(dp);
+}
+
+/**
+ * tipc_createport_raw - create a native TIPC port
+ *
+ * Returns local port reference
+ */
+
+u32 tipc_createport_raw(void *usr_handle,
+ u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
+ void (*wakeup)(struct tipc_port *),
+ const u32 importance)
+{
+ struct port *p_ptr;
+ struct tipc_msg *msg;
+ u32 ref;
+
+ p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC);
+ if (p_ptr == NULL) {
+ warn("Memory squeeze; failed to create port\n");
+ return 0;
+ }
+ memset(p_ptr, 0, sizeof(*p_ptr));
+ ref = ref_acquire(p_ptr, &p_ptr->publ.lock);
+ if (!ref) {
+ warn("Reference Table Exhausted\n");
+ kfree(p_ptr);
+ return 0;
+ }
+
+ port_lock(ref);
+ p_ptr->publ.ref = ref;
+ msg = &p_ptr->publ.phdr;
+ msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0);
+ msg_set_orignode(msg, tipc_own_addr);
+ msg_set_prevnode(msg, tipc_own_addr);
+ msg_set_origport(msg, ref);
+ msg_set_importance(msg,importance);
+ p_ptr->last_in_seqno = 41;
+ p_ptr->sent = 1;
+ p_ptr->publ.usr_handle = usr_handle;
+ INIT_LIST_HEAD(&p_ptr->wait_list);
+ INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
+ p_ptr->congested_link = 0;
+ p_ptr->max_pkt = MAX_PKT_DEFAULT;
+ p_ptr->dispatcher = dispatcher;
+ p_ptr->wakeup = wakeup;
+ p_ptr->user_port = 0;
+ k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
+ spin_lock_bh(&port_list_lock);
+ INIT_LIST_HEAD(&p_ptr->publications);
+ INIT_LIST_HEAD(&p_ptr->port_list);
+ list_add_tail(&p_ptr->port_list, &ports);
+ spin_unlock_bh(&port_list_lock);
+ port_unlock(p_ptr);
+ return ref;
+}
+
+int tipc_deleteport(u32 ref)
+{
+ struct port *p_ptr;
+ struct sk_buff *buf = 0;
+
+ tipc_withdraw(ref, 0, 0);
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+
+ ref_discard(ref);
+ port_unlock(p_ptr);
+
+ k_cancel_timer(&p_ptr->timer);
+ if (p_ptr->publ.connected) {
+ buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
+ nodesub_unsubscribe(&p_ptr->subscription);
+ }
+ if (p_ptr->user_port) {
+ reg_remove_port(p_ptr->user_port);
+ kfree(p_ptr->user_port);
+ }
+
+ spin_lock_bh(&port_list_lock);
+ list_del(&p_ptr->port_list);
+ list_del(&p_ptr->wait_list);
+ spin_unlock_bh(&port_list_lock);
+ k_term_timer(&p_ptr->timer);
+ kfree(p_ptr);
+ dbg("Deleted port %u\n", ref);
+ net_route_msg(buf);
+ return TIPC_OK;
+}
+
+/**
+ * tipc_get_port() - return port associated with 'ref'
+ *
+ * Note: Port is not locked.
+ */
+
+struct tipc_port *tipc_get_port(const u32 ref)
+{
+ return (struct tipc_port *)ref_deref(ref);
+}
+
+/**
+ * tipc_get_handle - return user handle associated to port 'ref'
+ */
+
+void *tipc_get_handle(const u32 ref)
+{
+ struct port *p_ptr;
+ void * handle;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return 0;
+ handle = p_ptr->publ.usr_handle;
+ port_unlock(p_ptr);
+ return handle;
+}
+
+static inline int port_unreliable(struct port *p_ptr)
+{
+ return msg_src_droppable(&p_ptr->publ.phdr);
+}
+
+int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
+{
+ struct port *p_ptr;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+ *isunreliable = port_unreliable(p_ptr);
+ spin_unlock_bh(p_ptr->publ.lock);
+ return TIPC_OK;
+}
+
+int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
+{
+ struct port *p_ptr;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+ msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
+ port_unlock(p_ptr);
+ return TIPC_OK;
+}
+
+static inline int port_unreturnable(struct port *p_ptr)
+{
+ return msg_dest_droppable(&p_ptr->publ.phdr);
+}
+
+int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
+{
+ struct port *p_ptr;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+ *isunrejectable = port_unreturnable(p_ptr);
+ spin_unlock_bh(p_ptr->publ.lock);
+ return TIPC_OK;
+}
+
+int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
+{
+ struct port *p_ptr;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+ msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
+ port_unlock(p_ptr);
+ return TIPC_OK;
+}
+
+/*
+ * port_build_proto_msg(): build a port level protocol
+ * or a connection abortion message. Called with
+ * tipc_port lock on.
+ */
+static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
+ u32 origport, u32 orignode,
+ u32 usr, u32 type, u32 err,
+ u32 seqno, u32 ack)
+{
+ struct sk_buff *buf;
+ struct tipc_msg *msg;
+
+ buf = buf_acquire(LONG_H_SIZE);
+ if (buf) {
+ msg = buf_msg(buf);
+ msg_init(msg, usr, type, err, LONG_H_SIZE, destnode);
+ msg_set_destport(msg, destport);
+ msg_set_origport(msg, origport);
+ msg_set_destnode(msg, destnode);
+ msg_set_orignode(msg, orignode);
+ msg_set_transp_seqno(msg, seqno);
+ msg_set_msgcnt(msg, ack);
+ msg_dbg(msg, "PORT>SEND>:");
+ }
+ return buf;
+}
+
+int tipc_set_msg_option(struct tipc_port *tp_ptr, const char *opt, const u32 sz)
+{
+ msg_expand(&tp_ptr->phdr, msg_destnode(&tp_ptr->phdr));
+ msg_set_options(&tp_ptr->phdr, opt, sz);
+ return TIPC_OK;
+}
+
+int tipc_reject_msg(struct sk_buff *buf, u32 err)
+{
+ struct tipc_msg *msg = buf_msg(buf);
+ struct sk_buff *rbuf;
+ struct tipc_msg *rmsg;
+ int hdr_sz;
+ u32 imp = msg_importance(msg);
+ u32 data_sz = msg_data_sz(msg);
+
+ if (data_sz > MAX_REJECT_SIZE)
+ data_sz = MAX_REJECT_SIZE;
+ if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
+ imp++;
+ msg_dbg(msg, "port->rej: ");
+
+ /* discard rejected message if it shouldn't be returned to sender */
+ if (msg_errcode(msg) || msg_dest_droppable(msg)) {
+ buf_discard(buf);
+ return data_sz;
+ }
+
+ /* construct rejected message */
+ if (msg_mcast(msg))
+ hdr_sz = MCAST_H_SIZE;
+ else
+ hdr_sz = LONG_H_SIZE;
+ rbuf = buf_acquire(data_sz + hdr_sz);
+ if (rbuf == NULL) {
+ buf_discard(buf);
+ return data_sz;
+ }
+ rmsg = buf_msg(rbuf);
+ msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg));
+ msg_set_destport(rmsg, msg_origport(msg));
+ msg_set_prevnode(rmsg, tipc_own_addr);
+ msg_set_origport(rmsg, msg_destport(msg));
+ if (msg_short(msg))
+ msg_set_orignode(rmsg, tipc_own_addr);
+ else
+ msg_set_orignode(rmsg, msg_destnode(msg));
+ msg_set_size(rmsg, data_sz + hdr_sz);
+ msg_set_nametype(rmsg, msg_nametype(msg));
+ msg_set_nameinst(rmsg, msg_nameinst(msg));
+ memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz);
+
+ /* send self-abort message when rejecting on a connected port */
+ if (msg_connected(msg)) {
+ struct sk_buff *abuf = 0;
+ struct port *p_ptr = port_lock(msg_destport(msg));
+
+ if (p_ptr) {
+ if (p_ptr->publ.connected)
+ abuf = port_build_self_abort_msg(p_ptr, err);
+ port_unlock(p_ptr);
+ }
+ net_route_msg(abuf);
+ }
+
+ /* send rejected message */
+ buf_discard(buf);
+ net_route_msg(rbuf);
+ return data_sz;
+}
+
+int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+ struct iovec const *msg_sect, u32 num_sect,
+ int err)
+{
+ struct sk_buff *buf;
+ int res;
+
+ res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
+ !p_ptr->user_port, &buf);
+ if (!buf)
+ return res;
+
+ return tipc_reject_msg(buf, err);
+}
+
+static void port_timeout(unsigned long ref)
+{
+ struct port *p_ptr = port_lock(ref);
+ struct sk_buff *buf = 0;
+
+ if (!p_ptr || !p_ptr->publ.connected)
+ return;
+
+ /* Last probe answered ? */
+ if (p_ptr->probing_state == PROBING) {
+ buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
+ } else {
+ buf = port_build_proto_msg(port_peerport(p_ptr),
+ port_peernode(p_ptr),
+ p_ptr->publ.ref,
+ tipc_own_addr,
+ CONN_MANAGER,
+ CONN_PROBE,
+ TIPC_OK,
+ port_out_seqno(p_ptr),
+ 0);
+ port_incr_out_seqno(p_ptr);
+ p_ptr->probing_state = PROBING;
+ k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
+ }
+ port_unlock(p_ptr);
+ net_route_msg(buf);
+}
+
+
+static void port_handle_node_down(unsigned long ref)
+{
+ struct port *p_ptr = port_lock(ref);
+ struct sk_buff* buf = 0;
+
+ if (!p_ptr)
+ return;
+ buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
+ port_unlock(p_ptr);
+ net_route_msg(buf);
+}
+
+
+static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
+{
+ u32 imp = msg_importance(&p_ptr->publ.phdr);
+
+ if (!p_ptr->publ.connected)
+ return 0;
+ if (imp < TIPC_CRITICAL_IMPORTANCE)
+ imp++;
+ return port_build_proto_msg(p_ptr->publ.ref,
+ tipc_own_addr,
+ port_peerport(p_ptr),
+ port_peernode(p_ptr),
+ imp,
+ TIPC_CONN_MSG,
+ err,
+ p_ptr->last_in_seqno + 1,
+ 0);
+}
+
+
+static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
+{
+ u32 imp = msg_importance(&p_ptr->publ.phdr);
+
+ if (!p_ptr->publ.connected)
+ return 0;
+ if (imp < TIPC_CRITICAL_IMPORTANCE)
+ imp++;
+ return port_build_proto_msg(port_peerport(p_ptr),
+ port_peernode(p_ptr),
+ p_ptr->publ.ref,
+ tipc_own_addr,
+ imp,
+ TIPC_CONN_MSG,
+ err,
+ port_out_seqno(p_ptr),
+ 0);
+}
+
+void port_recv_proto_msg(struct sk_buff *buf)
+{
+ struct tipc_msg *msg = buf_msg(buf);
+ struct port *p_ptr = port_lock(msg_destport(msg));
+ u32 err = TIPC_OK;
+ struct sk_buff *r_buf = 0;
+ struct sk_buff *abort_buf = 0;
+
+ msg_dbg(msg, "PORT<RECV<:");
+
+ if (!p_ptr) {
+ err = TIPC_ERR_NO_PORT;
+ } else if (p_ptr->publ.connected) {
+ if (port_peernode(p_ptr) != msg_orignode(msg))
+ err = TIPC_ERR_NO_PORT;
+ if (port_peerport(p_ptr) != msg_origport(msg))
+ err = TIPC_ERR_NO_PORT;
+ if (!err && msg_routed(msg)) {
+ u32 seqno = msg_transp_seqno(msg);
+ u32 myno = ++p_ptr->last_in_seqno;
+ if (seqno != myno) {
+ err = TIPC_ERR_NO_PORT;
+ abort_buf = port_build_self_abort_msg(p_ptr, err);
+ }
+ }
+ if (msg_type(msg) == CONN_ACK) {
+ int wakeup = port_congested(p_ptr) &&
+ p_ptr->publ.congested &&
+ p_ptr->wakeup;
+ p_ptr->acked += msg_msgcnt(msg);
+ if (port_congested(p_ptr))
+ goto exit;
+ p_ptr->publ.congested = 0;
+ if (!wakeup)
+ goto exit;
+ p_ptr->wakeup(&p_ptr->publ);
+ goto exit;
+ }
+ } else if (p_ptr->publ.published) {
+ err = TIPC_ERR_NO_PORT;
+ }
+ if (err) {
+ r_buf = port_build_proto_msg(msg_origport(msg),
+ msg_orignode(msg),
+ msg_destport(msg),
+ tipc_own_addr,
+ DATA_HIGH,
+ TIPC_CONN_MSG,
+ err,
+ 0,
+ 0);
+ goto exit;
+ }
+
+ /* All is fine */
+ if (msg_type(msg) == CONN_PROBE) {
+ r_buf = port_build_proto_msg(msg_origport(msg),
+ msg_orignode(msg),
+ msg_destport(msg),
+ tipc_own_addr,
+ CONN_MANAGER,
+ CONN_PROBE_REPLY,
+ TIPC_OK,
+ port_out_seqno(p_ptr),
+ 0);
+ }
+ p_ptr->probing_state = CONFIRMED;
+ port_incr_out_seqno(p_ptr);
+exit:
+ if (p_ptr)
+ port_unlock(p_ptr);
+ net_route_msg(r_buf);
+ net_route_msg(abort_buf);
+ buf_discard(buf);
+}
+
+static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
+{
+ struct publication *publ;
+
+ if (full_id)
+ tipc_printf(buf, "<%u.%u.%u:%u>:",
+ tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
+ tipc_node(tipc_own_addr), p_ptr->publ.ref);
+ else
+ tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
+
+ if (p_ptr->publ.connected) {
+ u32 dport = port_peerport(p_ptr);
+ u32 destnode = port_peernode(p_ptr);
+
+ tipc_printf(buf, " connected to <%u.%u.%u:%u>",
+ tipc_zone(destnode), tipc_cluster(destnode),
+ tipc_node(destnode), dport);
+ if (p_ptr->publ.conn_type != 0)
+ tipc_printf(buf, " via {%u,%u}",
+ p_ptr->publ.conn_type,
+ p_ptr->publ.conn_instance);
+ }
+ else if (p_ptr->publ.published) {
+ tipc_printf(buf, " bound to");
+ list_for_each_entry(publ, &p_ptr->publications, pport_list) {
+ if (publ->lower == publ->upper)
+ tipc_printf(buf, " {%u,%u}", publ->type,
+ publ->lower);
+ else
+ tipc_printf(buf, " {%u,%u,%u}", publ->type,
+ publ->lower, publ->upper);
+ }
+ }
+ tipc_printf(buf, "\n");
+}
+
+#define MAX_PORT_QUERY 32768
+
+struct sk_buff *port_get_ports(void)
+{
+ struct sk_buff *buf;
+ struct tlv_desc *rep_tlv;
+ struct print_buf pb;
+ struct port *p_ptr;
+ int str_len;
+
+ buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
+ if (!buf)
+ return NULL;
+ rep_tlv = (struct tlv_desc *)buf->data;
+
+ printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
+ spin_lock_bh(&port_list_lock);
+ list_for_each_entry(p_ptr, &ports, port_list) {
+ spin_lock_bh(p_ptr->publ.lock);
+ port_print(p_ptr, &pb, 0);
+ spin_unlock_bh(p_ptr->publ.lock);
+ }
+ spin_unlock_bh(&port_list_lock);
+ str_len = printbuf_validate(&pb);
+
+ skb_put(buf, TLV_SPACE(str_len));
+ TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+ return buf;
+}
+
+#if 0
+
+#define MAX_PORT_STATS 2000
+
+struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
+{
+ u32 ref;
+ struct port *p_ptr;
+ struct sk_buff *buf;
+ struct tlv_desc *rep_tlv;
+ struct print_buf pb;
+ int str_len;
+
+ if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_PORT_REF))
+ return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+ ref = *(u32 *)TLV_DATA(req_tlv_area);
+ ref = ntohl(ref);
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return cfg_reply_error_string("port not found");
+
+ buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
+ if (!buf) {
+ port_unlock(p_ptr);
+ return NULL;
+ }
+ rep_tlv = (struct tlv_desc *)buf->data;
+
+ printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
+ port_print(p_ptr, &pb, 1);
+ /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
+ port_unlock(p_ptr);
+ str_len = printbuf_validate(&pb);
+
+ skb_put(buf, TLV_SPACE(str_len));
+ TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+ return buf;
+}
+
+#endif
+
+void port_reinit(void)
+{
+ struct port *p_ptr;
+ struct tipc_msg *msg;
+
+ spin_lock_bh(&port_list_lock);
+ list_for_each_entry(p_ptr, &ports, port_list) {
+ msg = &p_ptr->publ.phdr;
+ if (msg_orignode(msg) == tipc_own_addr)
+ break;
+ msg_set_orignode(msg, tipc_own_addr);
+ }
+ spin_unlock_bh(&port_list_lock);
+}
+
+
+/*
+ * port_dispatcher_sigh(): Signal handler for messages destinated
+ * to the tipc_port interface.
+ */
+
+static void port_dispatcher_sigh(void *dummy)
+{
+ struct sk_buff *buf;
+
+ spin_lock_bh(&queue_lock);
+ buf = msg_queue_head;
+ msg_queue_head = 0;
+ spin_unlock_bh(&queue_lock);
+
+ while (buf) {
+ struct port *p_ptr;
+ struct user_port *up_ptr;
+ struct tipc_portid orig;
+ struct tipc_name_seq dseq;
+ void *usr_handle;
+ int connected;
+ int published;
+
+ struct sk_buff *next = buf->next;
+ struct tipc_msg *msg = buf_msg(buf);
+ u32 dref = msg_destport(msg);
+
+ p_ptr = port_lock(dref);
+ if (!p_ptr) {
+ /* Port deleted while msg in queue */
+ tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+ buf = next;
+ continue;
+ }
+ orig.ref = msg_origport(msg);
+ orig.node = msg_orignode(msg);
+ up_ptr = p_ptr->user_port;
+ usr_handle = up_ptr->usr_handle;
+ connected = p_ptr->publ.connected;
+ published = p_ptr->publ.published;
+
+ if (unlikely(msg_errcode(msg)))
+ goto err;
+
+ switch (msg_type(msg)) {
+
+ case TIPC_CONN_MSG:{
+ tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
+ u32 peer_port = port_peerport(p_ptr);
+ u32 peer_node = port_peernode(p_ptr);
+
+ spin_unlock_bh(p_ptr->publ.lock);
+ if (unlikely(!connected)) {
+ if (unlikely(published))
+ goto reject;
+ tipc_connect2port(dref,&orig);
+ }
+ if (unlikely(msg_origport(msg) != peer_port))
+ goto reject;
+ if (unlikely(msg_orignode(msg) != peer_node))
+ goto reject;
+ if (unlikely(!cb))
+ goto reject;
+ if (unlikely(++p_ptr->publ.conn_unacked >=
+ TIPC_FLOW_CONTROL_WIN))
+ tipc_acknowledge(dref,
+ p_ptr->publ.conn_unacked);
+ skb_pull(buf, msg_hdr_sz(msg));
+ cb(usr_handle, dref, &buf, msg_data(msg),
+ msg_data_sz(msg));
+ break;
+ }
+ case TIPC_DIRECT_MSG:{
+ tipc_msg_event cb = up_ptr->msg_cb;
+
+ spin_unlock_bh(p_ptr->publ.lock);
+ if (unlikely(connected))
+ goto reject;
+ if (unlikely(!cb))
+ goto reject;
+ skb_pull(buf, msg_hdr_sz(msg));
+ cb(usr_handle, dref, &buf, msg_data(msg),
+ msg_data_sz(msg), msg_importance(msg),
+ &orig);
+ break;
+ }
+ case TIPC_NAMED_MSG:{
+ tipc_named_msg_event cb = up_ptr->named_msg_cb;
+
+ spin_unlock_bh(p_ptr->publ.lock);
+ if (unlikely(connected))
+ goto reject;
+ if (unlikely(!cb))
+ goto reject;
+ if (unlikely(!published))
+ goto reject;
+ dseq.type = msg_nametype(msg);
+ dseq.lower = msg_nameinst(msg);
+ dseq.upper = dseq.lower;
+ skb_pull(buf, msg_hdr_sz(msg));
+ cb(usr_handle, dref, &buf, msg_data(msg),
+ msg_data_sz(msg), msg_importance(msg),
+ &orig, &dseq);
+ break;
+ }
+ }
+ if (buf)
+ buf_discard(buf);
+ buf = next;
+ continue;
+err:
+ switch (msg_type(msg)) {
+
+ case TIPC_CONN_MSG:{
+ tipc_conn_shutdown_event cb =
+ up_ptr->conn_err_cb;
+ u32 peer_port = port_peerport(p_ptr);
+ u32 peer_node = port_peernode(p_ptr);
+
+ spin_unlock_bh(p_ptr->publ.lock);
+ if (!connected || !cb)
+ break;
+ if (msg_origport(msg) != peer_port)
+ break;
+ if (msg_orignode(msg) != peer_node)
+ break;
+ tipc_disconnect(dref);
+ skb_pull(buf, msg_hdr_sz(msg));
+ cb(usr_handle, dref, &buf, msg_data(msg),
+ msg_data_sz(msg), msg_errcode(msg));
+ break;
+ }
+ case TIPC_DIRECT_MSG:{
+ tipc_msg_err_event cb = up_ptr->err_cb;
+
+ spin_unlock_bh(p_ptr->publ.lock);
+ if (connected || !cb)
+ break;
+ skb_pull(buf, msg_hdr_sz(msg));
+ cb(usr_handle, dref, &buf, msg_data(msg),
+ msg_data_sz(msg), msg_errcode(msg), &orig);
+ break;
+ }
+ case TIPC_NAMED_MSG:{
+ tipc_named_msg_err_event cb =
+ up_ptr->named_err_cb;
+
+ spin_unlock_bh(p_ptr->publ.lock);
+ if (connected || !cb)
+ break;
+ dseq.type = msg_nametype(msg);
+ dseq.lower = msg_nameinst(msg);
+ dseq.upper = dseq.lower;
+ skb_pull(buf, msg_hdr_sz(msg));
+ cb(usr_handle, dref, &buf, msg_data(msg),
+ msg_data_sz(msg), msg_errcode(msg), &dseq);
+ break;
+ }
+ }
+ if (buf)
+ buf_discard(buf);
+ buf = next;
+ continue;
+reject:
+ tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+ buf = next;
+ }
+}
+
+/*
+ * port_dispatcher(): Dispatcher for messages destinated
+ * to the tipc_port interface. Called with port locked.
+ */
+
+static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
+{
+ buf->next = NULL;
+ spin_lock_bh(&queue_lock);
+ if (msg_queue_head) {
+ msg_queue_tail->next = buf;
+ msg_queue_tail = buf;
+ } else {
+ msg_queue_tail = msg_queue_head = buf;
+ k_signal((Handler)port_dispatcher_sigh, 0);
+ }
+ spin_unlock_bh(&queue_lock);
+ return TIPC_OK;
+}
+
+/*
+ * Wake up port after congestion: Called with port locked,
+ *
+ */
+
+static void port_wakeup_sh(unsigned long ref)
+{
+ struct port *p_ptr;
+ struct user_port *up_ptr;
+ tipc_continue_event cb = 0;
+ void *uh = 0;
+
+ p_ptr = port_lock(ref);
+ if (p_ptr) {
+ up_ptr = p_ptr->user_port;
+ if (up_ptr) {
+ cb = up_ptr->continue_event_cb;
+ uh = up_ptr->usr_handle;
+ }
+ port_unlock(p_ptr);
+ }
+ if (cb)
+ cb(uh, ref);
+}
+
+
+static void port_wakeup(struct tipc_port *p_ptr)
+{
+ k_signal((Handler)port_wakeup_sh, p_ptr->ref);
+}
+
+void tipc_acknowledge(u32 ref, u32 ack)
+{
+ struct port *p_ptr;
+ struct sk_buff *buf = 0;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return;
+ if (p_ptr->publ.connected) {
+ p_ptr->publ.conn_unacked -= ack;
+ buf = port_build_proto_msg(port_peerport(p_ptr),
+ port_peernode(p_ptr),
+ ref,
+ tipc_own_addr,
+ CONN_MANAGER,
+ CONN_ACK,
+ TIPC_OK,
+ port_out_seqno(p_ptr),
+ ack);
+ }
+ port_unlock(p_ptr);
+ net_route_msg(buf);
+}
+
+/*
+ * tipc_createport(): user level call. Will add port to
+ * registry if non-zero user_ref.
+ */
+
+int tipc_createport(u32 user_ref,
+ void *usr_handle,
+ unsigned int importance,
+ tipc_msg_err_event error_cb,
+ tipc_named_msg_err_event named_error_cb,
+ tipc_conn_shutdown_event conn_error_cb,
+ tipc_msg_event msg_cb,
+ tipc_named_msg_event named_msg_cb,
+ tipc_conn_msg_event conn_msg_cb,
+ tipc_continue_event continue_event_cb,/* May be zero */
+ u32 *portref)
+{
+ struct user_port *up_ptr;
+ struct port *p_ptr;
+ u32 ref;
+
+ up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
+ if (up_ptr == NULL) {
+ return -ENOMEM;
+ }
+ ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance);
+ p_ptr = port_lock(ref);
+ if (!p_ptr) {
+ kfree(up_ptr);
+ return -ENOMEM;
+ }
+
+ p_ptr->user_port = up_ptr;
+ up_ptr->user_ref = user_ref;
+ up_ptr->usr_handle = usr_handle;
+ up_ptr->ref = p_ptr->publ.ref;
+ up_ptr->err_cb = error_cb;
+ up_ptr->named_err_cb = named_error_cb;
+ up_ptr->conn_err_cb = conn_error_cb;
+ up_ptr->msg_cb = msg_cb;
+ up_ptr->named_msg_cb = named_msg_cb;
+ up_ptr->conn_msg_cb = conn_msg_cb;
+ up_ptr->continue_event_cb = continue_event_cb;
+ INIT_LIST_HEAD(&up_ptr->uport_list);
+ reg_add_port(up_ptr);
+ *portref = p_ptr->publ.ref;
+ dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
+ port_unlock(p_ptr);
+ return TIPC_OK;
+}
+
+int tipc_ownidentity(u32 ref, struct tipc_portid *id)
+{
+ id->ref = ref;
+ id->node = tipc_own_addr;
+ return TIPC_OK;
+}
+
+int tipc_portimportance(u32 ref, unsigned int *importance)
+{
+ struct port *p_ptr;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+ *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
+ spin_unlock_bh(p_ptr->publ.lock);
+ return TIPC_OK;
+}
+
+int tipc_set_portimportance(u32 ref, unsigned int imp)
+{
+ struct port *p_ptr;
+
+ if (imp > TIPC_CRITICAL_IMPORTANCE)
+ return -EINVAL;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+ msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
+ spin_unlock_bh(p_ptr->publ.lock);
+ return TIPC_OK;
+}
+
+
+int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+{
+ struct port *p_ptr;
+ struct publication *publ;
+ u32 key;
+ int res = -EINVAL;
+
+ p_ptr = port_lock(ref);
+ dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
+ "lower = %u, upper = %u\n",
+ ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
+ if (!p_ptr)
+ return -EINVAL;
+ if (p_ptr->publ.connected)
+ goto exit;
+ if (seq->lower > seq->upper)
+ goto exit;
+ if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
+ goto exit;
+ key = ref + p_ptr->pub_count + 1;
+ if (key == ref) {
+ res = -EADDRINUSE;
+ goto exit;
+ }
+ publ = nametbl_publish(seq->type, seq->lower, seq->upper,
+ scope, p_ptr->publ.ref, key);
+ if (publ) {
+ list_add(&publ->pport_list, &p_ptr->publications);
+ p_ptr->pub_count++;
+ p_ptr->publ.published = 1;
+ res = TIPC_OK;
+ }
+exit:
+ port_unlock(p_ptr);
+ return res;
+}
+
+int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+{
+ struct port *p_ptr;
+ struct publication *publ;
+ struct publication *tpubl;
+ int res = -EINVAL;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+ if (!p_ptr->publ.published)
+ goto exit;
+ if (!seq) {
+ list_for_each_entry_safe(publ, tpubl,
+ &p_ptr->publications, pport_list) {
+ nametbl_withdraw(publ->type, publ->lower,
+ publ->ref, publ->key);
+ }
+ res = TIPC_OK;
+ } else {
+ list_for_each_entry_safe(publ, tpubl,
+ &p_ptr->publications, pport_list) {
+ if (publ->scope != scope)
+ continue;
+ if (publ->type != seq->type)
+ continue;
+ if (publ->lower != seq->lower)
+ continue;
+ if (publ->upper != seq->upper)
+ break;
+ nametbl_withdraw(publ->type, publ->lower,
+ publ->ref, publ->key);
+ res = TIPC_OK;
+ break;
+ }
+ }
+ if (list_empty(&p_ptr->publications))
+ p_ptr->publ.published = 0;
+exit:
+ port_unlock(p_ptr);
+ return res;
+}
+
+int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
+{
+ struct port *p_ptr;
+ struct tipc_msg *msg;
+ int res = -EINVAL;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+ if (p_ptr->publ.published || p_ptr->publ.connected)
+ goto exit;
+ if (!peer->ref)
+ goto exit;
+
+ msg = &p_ptr->publ.phdr;
+ msg_set_destnode(msg, peer->node);
+ msg_set_destport(msg, peer->ref);
+ msg_set_orignode(msg, tipc_own_addr);
+ msg_set_origport(msg, p_ptr->publ.ref);
+ msg_set_transp_seqno(msg, 42);
+ msg_set_type(msg, TIPC_CONN_MSG);
+ if (!may_route(peer->node))
+ msg_set_hdr_sz(msg, SHORT_H_SIZE);
+ else
+ msg_set_hdr_sz(msg, LONG_H_SIZE);
+
+ p_ptr->probing_interval = PROBING_INTERVAL;
+ p_ptr->probing_state = CONFIRMED;
+ p_ptr->publ.connected = 1;
+ k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
+
+ nodesub_subscribe(&p_ptr->subscription,peer->node,
+ (void *)(unsigned long)ref,
+ (net_ev_handler)port_handle_node_down);
+ res = TIPC_OK;
+exit:
+ port_unlock(p_ptr);
+ p_ptr->max_pkt = link_get_max_pkt(peer->node, ref);
+ return res;
+}
+
+/*
+ * tipc_disconnect(): Disconnect port form peer.
+ * This is a node local operation.
+ */
+
+int tipc_disconnect(u32 ref)
+{
+ struct port *p_ptr;
+ int res = -ENOTCONN;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+ if (p_ptr->publ.connected) {
+ p_ptr->publ.connected = 0;
+ /* let timer expire on it's own to avoid deadlock! */
+ nodesub_unsubscribe(&p_ptr->subscription);
+ res = TIPC_OK;
+ }
+ port_unlock(p_ptr);
+ return res;
+}
+
+/*
+ * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
+ */
+int tipc_shutdown(u32 ref)
+{
+ struct port *p_ptr;
+ struct sk_buff *buf = 0;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+
+ if (p_ptr->publ.connected) {
+ u32 imp = msg_importance(&p_ptr->publ.phdr);
+ if (imp < TIPC_CRITICAL_IMPORTANCE)
+ imp++;
+ buf = port_build_proto_msg(port_peerport(p_ptr),
+ port_peernode(p_ptr),
+ ref,
+ tipc_own_addr,
+ imp,
+ TIPC_CONN_MSG,
+ TIPC_CONN_SHUTDOWN,
+ port_out_seqno(p_ptr),
+ 0);
+ }
+ port_unlock(p_ptr);
+ net_route_msg(buf);
+ return tipc_disconnect(ref);
+}
+
+int tipc_isconnected(u32 ref, int *isconnected)
+{
+ struct port *p_ptr;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+ *isconnected = p_ptr->publ.connected;
+ port_unlock(p_ptr);
+ return TIPC_OK;
+}
+
+int tipc_peer(u32 ref, struct tipc_portid *peer)
+{
+ struct port *p_ptr;
+ int res;
+
+ p_ptr = port_lock(ref);
+ if (!p_ptr)
+ return -EINVAL;
+ if (p_ptr->publ.connected) {
+ peer->ref = port_peerport(p_ptr);
+ peer->node = port_peernode(p_ptr);
+ res = TIPC_OK;
+ } else
+ res = -ENOTCONN;
+ port_unlock(p_ptr);
+ return res;
+}
+
+int tipc_ref_valid(u32 ref)
+{
+ /* Works irrespective of type */
+ return !!ref_deref(ref);
+}
+
+
+/*
+ * port_recv_sections(): Concatenate and deliver sectioned
+ * message for this node.
+ */
+
+int port_recv_sections(struct port *sender, unsigned int num_sect,
+ struct iovec const *msg_sect)
+{
+ struct sk_buff *buf;
+ int res;
+
+ res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
+ MAX_MSG_SIZE, !sender->user_port, &buf);
+ if (likely(buf))
+ port_recv_msg(buf);
+ return res;
+}
+
+/**
+ * tipc_send - send message sections on connection
+ */
+
+int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
+{
+ struct port *p_ptr;
+ u32 destnode;
+ int res;
+
+ p_ptr = port_deref(ref);
+ if (!p_ptr || !p_ptr->publ.connected)
+ return -EINVAL;
+
+ p_ptr->publ.congested = 1;
+ if (!port_congested(p_ptr)) {
+ destnode = port_peernode(p_ptr);
+ if (likely(destnode != tipc_own_addr))
+ res = link_send_sections_fast(p_ptr, msg_sect, num_sect,
+ destnode);
+ else
+ res = port_recv_sections(p_ptr, num_sect, msg_sect);
+
+ if (likely(res != -ELINKCONG)) {
+ port_incr_out_seqno(p_ptr);
+ p_ptr->publ.congested = 0;
+ p_ptr->sent++;
+ return res;
+ }
+ }
+ if (port_unreliable(p_ptr)) {
+ p_ptr->publ.congested = 0;
+ /* Just calculate msg length and return */
+ return msg_calc_data_size(msg_sect, num_sect);
+ }
+ return -ELINKCONG;
+}
+
+/**
+ * tipc_send_buf - send message buffer on connection
+ */
+
+int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
+{
+ struct port *p_ptr;
+ struct tipc_msg *msg;
+ u32 destnode;
+ u32 hsz;
+ u32 sz;
+ u32 res;
+
+ p_ptr = port_deref(ref);
+ if (!p_ptr || !p_ptr->publ.connected)
+ return -EINVAL;
+
+ msg = &p_ptr->publ.phdr;
+ hsz = msg_hdr_sz(msg);
+ sz = hsz + dsz;
+ msg_set_size(msg, sz);
+ if (skb_cow(buf, hsz))
+ return -ENOMEM;
+
+ skb_push(buf, hsz);
+ memcpy(buf->data, (unchar *)msg, hsz);
+ destnode = msg_destnode(msg);
+ p_ptr->publ.congested = 1;
+ if (!port_congested(p_ptr)) {
+ if (likely(destnode != tipc_own_addr))
+ res = tipc_send_buf_fast(buf, destnode);
+ else {
+ port_recv_msg(buf);
+ res = sz;
+ }
+ if (likely(res != -ELINKCONG)) {
+ port_incr_out_seqno(p_ptr);
+ p_ptr->sent++;
+ p_ptr->publ.congested = 0;
+ return res;
+ }
+ }
+ if (port_unreliable(p_ptr)) {
+ p_ptr->publ.congested = 0;
+ return dsz;
+ }
+ return -ELINKCONG;
+}
+
+/**
+ * tipc_forward2name - forward message sections to port name
+ */
+
+int tipc_forward2name(u32 ref,
+ struct tipc_name const *name,
+ u32 domain,
+ u32 num_sect,
+ struct iovec const *msg_sect,
+ struct tipc_portid const *orig,
+ unsigned int importance)
+{
+ struct port *p_ptr;
+ struct tipc_msg *msg;
+ u32 destnode = domain;
+ u32 destport = 0;
+ int res;
+
+ p_ptr = port_deref(ref);
+ if (!p_ptr || p_ptr->publ.connected)
+ return -EINVAL;
+
+ msg = &p_ptr->publ.phdr;
+ msg_set_type(msg, TIPC_NAMED_MSG);
+ msg_set_orignode(msg, orig->node);
+ msg_set_origport(msg, orig->ref);
+ msg_set_hdr_sz(msg, LONG_H_SIZE);
+ msg_set_nametype(msg, name->type);
+ msg_set_nameinst(msg, name->instance);
+ msg_set_lookup_scope(msg, addr_scope(domain));
+ if (importance <= TIPC_CRITICAL_IMPORTANCE)
+ msg_set_importance(msg,importance);
+ destport = nametbl_translate(name->type, name->instance, &destnode);
+ msg_set_destnode(msg, destnode);
+ msg_set_destport(msg, destport);
+
+ if (likely(destport || destnode)) {
+ p_ptr->sent++;
+ if (likely(destnode == tipc_own_addr))
+ return port_recv_sections(p_ptr, num_sect, msg_sect);
+ res = link_send_sections_fast(p_ptr, msg_sect, num_sect,
+ destnode);
+ if (likely(res != -ELINKCONG))
+ return res;
+ if (port_unreliable(p_ptr)) {
+ /* Just calculate msg length and return */
+ return msg_calc_data_size(msg_sect, num_sect);
+ }
+ return -ELINKCONG;
+ }
+ return port_reject_sections(p_ptr, msg, msg_sect, num_sect,
+ TIPC_ERR_NO_NAME);
+}
+
+/**
+ * tipc_send2name - send message sections to port name
+ */
+
+int tipc_send2name(u32 ref,
+ struct tipc_name const *name,
+ unsigned int domain,
+ unsigned int num_sect,
+ struct iovec const *msg_sect)
+{
+ struct tipc_portid orig;
+
+ orig.ref = ref;
+ orig.node = tipc_own_addr;
+ return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
+ TIPC_PORT_IMPORTANCE);
+}
+
+/**
+ * tipc_forward_buf2name - forward message buffer to port name
+ */
+
+int tipc_forward_buf2name(u32 ref,
+ struct tipc_name const *name,
+ u32 domain,
+ struct sk_buff *buf,
+ unsigned int dsz,
+ struct tipc_portid const *orig,
+ unsigned int importance)
+{
+ struct port *p_ptr;
+ struct tipc_msg *msg;
+ u32 destnode = domain;
+ u32 destport = 0;
+ int res;
+
+ p_ptr = (struct port *)ref_deref(ref);
+ if (!p_ptr || p_ptr->publ.connected)
+ return -EINVAL;
+
+ msg = &p_ptr->publ.phdr;
+ if (importance <= TIPC_CRITICAL_IMPORTANCE)
+ msg_set_importance(msg, importance);
+ msg_set_type(msg, TIPC_NAMED_MSG);
+ msg_set_orignode(msg, orig->node);
+ msg_set_origport(msg, orig->ref);
+ msg_set_nametype(msg, name->type);
+ msg_set_nameinst(msg, name->instance);
+ msg_set_lookup_scope(msg, addr_scope(domain));
+ msg_set_hdr_sz(msg, LONG_H_SIZE);
+ msg_set_size(msg, LONG_H_SIZE + dsz);
+ destport = nametbl_translate(name->type, name->instance, &destnode);
+ msg_set_destnode(msg, destnode);
+ msg_set_destport(msg, destport);
+ msg_dbg(msg, "forw2name ==> ");
+ if (skb_cow(buf, LONG_H_SIZE))
+ return -ENOMEM;
+ skb_push(buf, LONG_H_SIZE);
+ memcpy(buf->data, (unchar *)msg, LONG_H_SIZE);
+ msg_dbg(buf_msg(buf),"PREP:");
+ if (likely(destport || destnode)) {
+ p_ptr->sent++;
+ if (destnode == tipc_own_addr)
+ return port_recv_msg(buf);
+ res = tipc_send_buf_fast(buf, destnode);
+ if (likely(res != -ELINKCONG))
+ return res;
+ if (port_unreliable(p_ptr))
+ return dsz;
+ return -ELINKCONG;
+ }
+ return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
+}
+
+/**
+ * tipc_send_buf2name - send message buffer to port name
+ */
+
+int tipc_send_buf2name(u32 ref,
+ struct tipc_name const *dest,
+ u32 domain,
+ struct sk_buff *buf,
+ unsigned int dsz)
+{
+ struct tipc_portid orig;
+
+ orig.ref = ref;
+ orig.node = tipc_own_addr;
+ return tipc_forward_buf2name(ref, dest, domain, buf, dsz, &orig,
+ TIPC_PORT_IMPORTANCE);
+}
+
+/**
+ * tipc_forward2port - forward message sections to port identity
+ */
+
+int tipc_forward2port(u32 ref,
+ struct tipc_portid const *dest,
+ unsigned int num_sect,
+ struct iovec const *msg_sect,
+ struct tipc_portid const *orig,
+ unsigned int importance)
+{
+ struct port *p_ptr;
+ struct tipc_msg *msg;
+ int res;
+
+ p_ptr = port_deref(ref);
+ if (!p_ptr || p_ptr->publ.connected)
+ return -EINVAL;
+
+ msg = &p_ptr->publ.phdr;
+ msg_set_type(msg, TIPC_DIRECT_MSG);
+ msg_set_orignode(msg, orig->node);
+ msg_set_origport(msg, orig->ref);
+ msg_set_destnode(msg, dest->node);
+ msg_set_destport(msg, dest->ref);
+ msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
+ if (importance <= TIPC_CRITICAL_IMPORTANCE)
+ msg_set_importance(msg, importance);
+ p_ptr->sent++;
+ if (dest->node == tipc_own_addr)
+ return port_recv_sections(p_ptr, num_sect, msg_sect);
+ res = link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
+ if (likely(res != -ELINKCONG))
+ return res;
+ if (port_unreliable(p_ptr)) {
+ /* Just calculate msg length and return */
+ return msg_calc_data_size(msg_sect, num_sect);
+ }
+ return -ELINKCONG;
+}
+
+/**
+ * tipc_send2port - send message sections to port identity
+ */
+
+int tipc_send2port(u32 ref,
+ struct tipc_portid const *dest,
+ unsigned int num_sect,
+ struct iovec const *msg_sect)
+{
+ struct tipc_portid orig;
+
+ orig.ref = ref;
+ orig.node = tipc_own_addr;
+ return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
+ TIPC_PORT_IMPORTANCE);
+}
+
+/**
+ * tipc_forward_buf2port - forward message buffer to port identity
+ */
+int tipc_forward_buf2port(u32 ref,
+ struct tipc_portid const *dest,
+ struct sk_buff *buf,
+ unsigned int dsz,
+ struct tipc_portid const *orig,
+ unsigned int importance)
+{
+ struct port *p_ptr;
+ struct tipc_msg *msg;
+ int res;
+
+ p_ptr = (struct port *)ref_deref(ref);
+ if (!p_ptr || p_ptr->publ.connected)
+ return -EINVAL;
+
+ msg = &p_ptr->publ.phdr;
+ msg_set_type(msg, TIPC_DIRECT_MSG);
+ msg_set_orignode(msg, orig->node);
+ msg_set_origport(msg, orig->ref);
+ msg_set_destnode(msg, dest->node);
+ msg_set_destport(msg, dest->ref);
+ msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
+ if (importance <= TIPC_CRITICAL_IMPORTANCE)
+ msg_set_importance(msg, importance);
+ msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
+ if (skb_cow(buf, DIR_MSG_H_SIZE))
+ return -ENOMEM;
+
+ skb_push(buf, DIR_MSG_H_SIZE);
+ memcpy(buf->data, (unchar *)msg, DIR_MSG_H_SIZE);
+ msg_dbg(msg, "buf2port: ");
+ p_ptr->sent++;
+ if (dest->node == tipc_own_addr)
+ return port_recv_msg(buf);
+ res = tipc_send_buf_fast(buf, dest->node);
+ if (likely(res != -ELINKCONG))
+ return res;
+ if (port_unreliable(p_ptr))
+ return dsz;
+ return -ELINKCONG;
+}
+
+/**
+ * tipc_send_buf2port - send message buffer to port identity
+ */
+
+int tipc_send_buf2port(u32 ref,
+ struct tipc_portid const *dest,
+ struct sk_buff *buf,
+ unsigned int dsz)
+{
+ struct tipc_portid orig;
+
+ orig.ref = ref;
+ orig.node = tipc_own_addr;
+ return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
+ TIPC_PORT_IMPORTANCE);
+}
+
diff --git a/net/tipc/port.h b/net/tipc/port.h
new file mode 100644
index 00000000000..e829a99d3b7
--- /dev/null
+++ b/net/tipc/port.h
@@ -0,0 +1,209 @@
+/*
+ * net/tipc/port.h: Include file for TIPC port code
+ *
+ * Copyright (c) 1994-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_PORT_H
+#define _TIPC_PORT_H
+
+#include <net/tipc/tipc_port.h>
+#include "ref.h"
+#include "net.h"
+#include "msg.h"
+#include "dbg.h"
+#include "node_subscr.h"
+
+/**
+ * struct user_port - TIPC user port (used with native API)
+ * @user_ref: id of user who created user port
+ * @usr_handle: user-specified field
+ * @ref: object reference to associated TIPC port
+ * <various callback routines>
+ * @uport_list: adjacent user ports in list of ports held by user
+ */
+
+struct user_port {
+ u32 user_ref;
+ void *usr_handle;
+ u32 ref;
+ tipc_msg_err_event err_cb;
+ tipc_named_msg_err_event named_err_cb;
+ tipc_conn_shutdown_event conn_err_cb;
+ tipc_msg_event msg_cb;
+ tipc_named_msg_event named_msg_cb;
+ tipc_conn_msg_event conn_msg_cb;
+ tipc_continue_event continue_event_cb;
+ struct list_head uport_list;
+};
+
+/**
+ * struct port - TIPC port structure
+ * @publ: TIPC port info available to privileged users
+ * @port_list: adjacent ports in TIPC's global list of ports
+ * @dispatcher: ptr to routine which handles received messages
+ * @wakeup: ptr to routine to call when port is no longer congested
+ * @user_port: ptr to user port associated with port (if any)
+ * @wait_list: adjacent ports in list of ports waiting on link congestion
+ * @congested_link: ptr to congested link port is waiting on
+ * @waiting_pkts:
+ * @sent:
+ * @acked:
+ * @publications: list of publications for port
+ * @pub_count: total # of publications port has made during its lifetime
+ * @max_pkt: maximum packet size "hint" used when building messages sent by port
+ * @probing_state:
+ * @probing_interval:
+ * @last_in_seqno:
+ * @timer_ref:
+ * @subscription: "node down" subscription used to terminate failed connections
+ */
+
+struct port {
+ struct tipc_port publ;
+ struct list_head port_list;
+ u32 (*dispatcher)(struct tipc_port *, struct sk_buff *);
+ void (*wakeup)(struct tipc_port *);
+ struct user_port *user_port;
+ struct list_head wait_list;
+ struct link *congested_link;
+ u32 waiting_pkts;
+ u32 sent;
+ u32 acked;
+ struct list_head publications;
+ u32 pub_count;
+ u32 max_pkt;
+ u32 probing_state;
+ u32 probing_interval;
+ u32 last_in_seqno;
+ struct timer_list timer;
+ struct node_subscr subscription;
+};
+
+extern spinlock_t port_list_lock;
+struct port_list;
+
+int port_recv_sections(struct port *p_ptr, u32 num_sect,
+ struct iovec const *msg_sect);
+int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+ struct iovec const *msg_sect, u32 num_sect,
+ int err);
+struct sk_buff *port_get_ports(void);
+struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space);
+void port_recv_proto_msg(struct sk_buff *buf);
+void port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
+void port_reinit(void);
+
+/**
+ * port_lock - lock port instance referred to and return its pointer
+ */
+
+static inline struct port *port_lock(u32 ref)
+{
+ return (struct port *)ref_lock(ref);
+}
+
+/**
+ * port_unlock - unlock a port instance
+ *
+ * Can use pointer instead of ref_unlock() since port is already locked.
+ */
+
+static inline void port_unlock(struct port *p_ptr)
+{
+ spin_unlock_bh(p_ptr->publ.lock);
+}
+
+static inline struct port* port_deref(u32 ref)
+{
+ return (struct port *)ref_deref(ref);
+}
+
+static inline u32 peer_port(struct port *p_ptr)
+{
+ return msg_destport(&p_ptr->publ.phdr);
+}
+
+static inline u32 peer_node(struct port *p_ptr)
+{
+ return msg_destnode(&p_ptr->publ.phdr);
+}
+
+static inline int port_congested(struct port *p_ptr)
+{
+ return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
+}
+
+/**
+ * port_recv_msg - receive message from lower layer and deliver to port user
+ */
+
+static inline int port_recv_msg(struct sk_buff *buf)
+{
+ struct port *p_ptr;
+ struct tipc_msg *msg = buf_msg(buf);
+ u32 destport = msg_destport(msg);
+ u32 dsz = msg_data_sz(msg);
+ u32 err;
+
+ /* forward unresolved named message */
+ if (unlikely(!destport)) {
+ net_route_msg(buf);
+ return dsz;
+ }
+
+ /* validate destination & pass to port, otherwise reject message */
+ p_ptr = port_lock(destport);
+ if (likely(p_ptr)) {
+ if (likely(p_ptr->publ.connected)) {
+ if ((unlikely(msg_origport(msg) != peer_port(p_ptr))) ||
+ (unlikely(msg_orignode(msg) != peer_node(p_ptr))) ||
+ (unlikely(!msg_connected(msg)))) {
+ err = TIPC_ERR_NO_PORT;
+ port_unlock(p_ptr);
+ goto reject;
+ }
+ }
+ err = p_ptr->dispatcher(&p_ptr->publ, buf);
+ port_unlock(p_ptr);
+ if (likely(!err))
+ return dsz;
+ } else {
+ err = TIPC_ERR_NO_PORT;
+ }
+reject:
+ dbg("port->rejecting, err = %x..\n",err);
+ return tipc_reject_msg(buf, err);
+}
+
+#endif
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
new file mode 100644
index 00000000000..944093fe246
--- /dev/null
+++ b/net/tipc/ref.c
@@ -0,0 +1,189 @@
+/*
+ * net/tipc/ref.c: TIPC object registry code
+ *
+ * Copyright (c) 1991-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "ref.h"
+#include "port.h"
+#include "subscr.h"
+#include "name_distr.h"
+#include "name_table.h"
+#include "config.h"
+#include "discover.h"
+#include "bearer.h"
+#include "node.h"
+#include "bcast.h"
+
+/*
+ * Object reference table consists of 2**N entries.
+ *
+ * A used entry has object ptr != 0, reference == XXXX|own index
+ * (XXXX changes each time entry is acquired)
+ * A free entry has object ptr == 0, reference == YYYY|next free index
+ * (YYYY is one more than last used XXXX)
+ *
+ * Free list is initially chained from entry (2**N)-1 to entry 1.
+ * Entry 0 is not used to allow index 0 to indicate the end of the free list.
+ *
+ * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0
+ * because entry 0's reference field has the form XXXX|1--1.
+ */
+
+struct ref_table ref_table = { 0 };
+
+rwlock_t reftbl_lock = RW_LOCK_UNLOCKED;
+
+/**
+ * ref_table_init - create reference table for objects
+ */
+
+int ref_table_init(u32 requested_size, u32 start)
+{
+ struct reference *table;
+ u32 sz = 1 << 4;
+ u32 index_mask;
+ int i;
+
+ while (sz < requested_size) {
+ sz <<= 1;
+ }
+ table = (struct reference *)vmalloc(sz * sizeof(struct reference));
+ if (table == NULL)
+ return -ENOMEM;
+
+ write_lock_bh(&reftbl_lock);
+ index_mask = sz - 1;
+ for (i = sz - 1; i >= 0; i--) {
+ table[i].object = 0;
+ table[i].lock = SPIN_LOCK_UNLOCKED;
+ table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
+ }
+ ref_table.entries = table;
+ ref_table.index_mask = index_mask;
+ ref_table.first_free = sz - 1;
+ ref_table.last_free = 1;
+ write_unlock_bh(&reftbl_lock);
+ return TIPC_OK;
+}
+
+/**
+ * ref_table_stop - destroy reference table for objects
+ */
+
+void ref_table_stop(void)
+{
+ if (!ref_table.entries)
+ return;
+
+ vfree(ref_table.entries);
+ ref_table.entries = 0;
+}
+
+/**
+ * ref_acquire - create reference to an object
+ *
+ * Return a unique reference value which can be translated back to the pointer
+ * 'object' at a later time. Also, pass back a pointer to the lock protecting
+ * the object, but without locking it.
+ */
+
+u32 ref_acquire(void *object, spinlock_t **lock)
+{
+ struct reference *entry;
+ u32 index;
+ u32 index_mask;
+ u32 next_plus_upper;
+ u32 reference = 0;
+
+ assert(ref_table.entries && object);
+
+ write_lock_bh(&reftbl_lock);
+ if (ref_table.first_free) {
+ index = ref_table.first_free;
+ entry = &(ref_table.entries[index]);
+ index_mask = ref_table.index_mask;
+ /* take lock in case a previous user of entry still holds it */
+ spin_lock_bh(&entry->lock);
+ next_plus_upper = entry->data.next_plus_upper;
+ ref_table.first_free = next_plus_upper & index_mask;
+ reference = (next_plus_upper & ~index_mask) + index;
+ entry->data.reference = reference;
+ entry->object = object;
+ if (lock != 0)
+ *lock = &entry->lock;
+ spin_unlock_bh(&entry->lock);
+ }
+ write_unlock_bh(&reftbl_lock);
+ return reference;
+}
+
+/**
+ * ref_discard - invalidate references to an object
+ *
+ * Disallow future references to an object and free up the entry for re-use.
+ * Note: The entry's spin_lock may still be busy after discard
+ */
+
+void ref_discard(u32 ref)
+{
+ struct reference *entry;
+ u32 index;
+ u32 index_mask;
+
+ assert(ref_table.entries);
+ assert(ref != 0);
+
+ write_lock_bh(&reftbl_lock);
+ index_mask = ref_table.index_mask;
+ index = ref & index_mask;
+ entry = &(ref_table.entries[index]);
+ assert(entry->object != 0);
+ assert(entry->data.reference == ref);
+
+ /* mark entry as unused */
+ entry->object = 0;
+ if (ref_table.first_free == 0)
+ ref_table.first_free = index;
+ else
+ /* next_plus_upper is always XXXX|0--0 for last free entry */
+ ref_table.entries[ref_table.last_free].data.next_plus_upper
+ |= index;
+ ref_table.last_free = index;
+
+ /* increment upper bits of entry to invalidate subsequent references */
+ entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
+ write_unlock_bh(&reftbl_lock);
+}
+
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
new file mode 100644
index 00000000000..429cde57228
--- /dev/null
+++ b/net/tipc/ref.h
@@ -0,0 +1,131 @@
+/*
+ * net/tipc/ref.h: Include file for TIPC object registry code
+ *
+ * Copyright (c) 1991-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_REF_H
+#define _TIPC_REF_H
+
+/**
+ * struct reference - TIPC object reference entry
+ * @object: pointer to object associated with reference entry
+ * @lock: spinlock controlling access to object
+ * @data: reference value associated with object (or link to next unused entry)
+ */
+
+struct reference {
+ void *object;
+ spinlock_t lock;
+ union {
+ u32 next_plus_upper;
+ u32 reference;
+ } data;
+};
+
+/**
+ * struct ref_table - table of TIPC object reference entries
+ * @entries: pointer to array of reference entries
+ * @index_mask: bitmask for array index portion of reference values
+ * @first_free: array index of first unused object reference entry
+ * @last_free: array index of last unused object reference entry
+ */
+
+struct ref_table {
+ struct reference *entries;
+ u32 index_mask;
+ u32 first_free;
+ u32 last_free;
+};
+
+extern struct ref_table ref_table;
+
+int ref_table_init(u32 requested_size, u32 start);
+void ref_table_stop(void);
+
+u32 ref_acquire(void *object, spinlock_t **lock);
+void ref_discard(u32 ref);
+
+
+/**
+ * ref_lock - lock referenced object and return pointer to it
+ */
+
+static inline void *ref_lock(u32 ref)
+{
+ if (likely(ref_table.entries)) {
+ struct reference *r =
+ &ref_table.entries[ref & ref_table.index_mask];
+
+ spin_lock_bh(&r->lock);
+ if (likely(r->data.reference == ref))
+ return r->object;
+ spin_unlock_bh(&r->lock);
+ }
+ return 0;
+}
+
+/**
+ * ref_unlock - unlock referenced object
+ */
+
+static inline void ref_unlock(u32 ref)
+{
+ if (likely(ref_table.entries)) {
+ struct reference *r =
+ &ref_table.entries[ref & ref_table.index_mask];
+
+ if (likely(r->data.reference == ref))
+ spin_unlock_bh(&r->lock);
+ else
+ err("ref_unlock() invoked using obsolete reference\n");
+ }
+}
+
+/**
+ * ref_deref - return pointer referenced object (without locking it)
+ */
+
+static inline void *ref_deref(u32 ref)
+{
+ if (likely(ref_table.entries)) {
+ struct reference *r =
+ &ref_table.entries[ref & ref_table.index_mask];
+
+ if (likely(r->data.reference == ref))
+ return r->object;
+ }
+ return 0;
+}
+
+#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
new file mode 100644
index 00000000000..d21f8c0cd25
--- /dev/null
+++ b/net/tipc/socket.c
@@ -0,0 +1,1726 @@
+/*
+ * net/tipc/socket.c: TIPC socket API
+ *
+ * Copyright (c) 2001-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/version.h>
+#include <linux/fcntl.h>
+#include <linux/version.h>
+#include <asm/semaphore.h>
+#include <asm/string.h>
+#include <asm/atomic.h>
+#include <net/sock.h>
+
+#include <linux/tipc.h>
+#include <linux/tipc_config.h>
+#include <net/tipc/tipc_msg.h>
+#include <net/tipc/tipc_port.h>
+
+#include "core.h"
+
+#define SS_LISTENING -1 /* socket is listening */
+#define SS_READY -2 /* socket is connectionless */
+
+#define OVERLOAD_LIMIT_BASE 5000
+
+struct tipc_sock {
+ struct sock sk;
+ struct tipc_port *p;
+ struct semaphore sem;
+};
+
+#define tipc_sk(sk) ((struct tipc_sock*)sk)
+
+static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
+static void wakeupdispatch(struct tipc_port *tport);
+
+static struct proto_ops packet_ops;
+static struct proto_ops stream_ops;
+static struct proto_ops msg_ops;
+
+static struct proto tipc_proto;
+
+static int sockets_enabled = 0;
+
+static atomic_t tipc_queue_size = ATOMIC_INIT(0);
+
+
+/*
+ * sock_lock(): Lock a port/socket pair. lock_sock() can
+ * not be used here, since the same lock must protect ports
+ * with non-socket interfaces.
+ * See net.c for description of locking policy.
+ */
+static inline void sock_lock(struct tipc_sock* tsock)
+{
+ spin_lock_bh(tsock->p->lock);
+}
+
+/*
+ * sock_unlock(): Unlock a port/socket pair
+ */
+static inline void sock_unlock(struct tipc_sock* tsock)
+{
+ spin_unlock_bh(tsock->p->lock);
+}
+
+/**
+ * pollmask - determine the current set of poll() events for a socket
+ * @sock: socket structure
+ *
+ * TIPC sets the returned events as follows:
+ * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
+ * or if a connection-oriented socket is does not have an active connection
+ * (i.e. a read operation will not block).
+ * b) POLLOUT is set except when a socket's connection has been terminated
+ * (i.e. a write operation will not block).
+ * c) POLLHUP is set when a socket's connection has been terminated.
+ *
+ * IMPORTANT: The fact that a read or write operation will not block does NOT
+ * imply that the operation will succeed!
+ *
+ * Returns pollmask value
+ */
+
+static inline u32 pollmask(struct socket *sock)
+{
+ u32 mask;
+
+ if ((skb_queue_len(&sock->sk->sk_receive_queue) != 0) ||
+ (sock->state == SS_UNCONNECTED) ||
+ (sock->state == SS_DISCONNECTING))
+ mask = (POLLRDNORM | POLLIN);
+ else
+ mask = 0;
+
+ if (sock->state == SS_DISCONNECTING)
+ mask |= POLLHUP;
+ else
+ mask |= POLLOUT;
+
+ return mask;
+}
+
+
+/**
+ * advance_queue - discard first buffer in queue
+ * @tsock: TIPC socket
+ */
+
+static inline void advance_queue(struct tipc_sock *tsock)
+{
+ sock_lock(tsock);
+ buf_discard(skb_dequeue(&tsock->sk.sk_receive_queue));
+ sock_unlock(tsock);
+ atomic_dec(&tipc_queue_size);
+}
+
+/**
+ * tipc_create - create a TIPC socket
+ * @sock: pre-allocated socket structure
+ * @protocol: protocol indicator (must be 0)
+ *
+ * This routine creates and attaches a 'struct sock' to the 'struct socket',
+ * then create and attaches a TIPC port to the 'struct sock' part.
+ *
+ * Returns 0 on success, errno otherwise
+ */
+static int tipc_create(struct socket *sock, int protocol)
+{
+ struct tipc_sock *tsock;
+ struct tipc_port *port;
+ struct sock *sk;
+ u32 ref;
+
+ if ((sock->type != SOCK_STREAM) &&
+ (sock->type != SOCK_SEQPACKET) &&
+ (sock->type != SOCK_DGRAM) &&
+ (sock->type != SOCK_RDM))
+ return -EPROTOTYPE;
+
+ if (unlikely(protocol != 0))
+ return -EPROTONOSUPPORT;
+
+ ref = tipc_createport_raw(0, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE);
+ if (unlikely(!ref))
+ return -ENOMEM;
+
+ sock->state = SS_UNCONNECTED;
+
+ switch (sock->type) {
+ case SOCK_STREAM:
+ sock->ops = &stream_ops;
+ break;
+ case SOCK_SEQPACKET:
+ sock->ops = &packet_ops;
+ break;
+ case SOCK_DGRAM:
+ tipc_set_portunreliable(ref, 1);
+ /* fall through */
+ case SOCK_RDM:
+ tipc_set_portunreturnable(ref, 1);
+ sock->ops = &msg_ops;
+ sock->state = SS_READY;
+ break;
+ }
+
+ sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1);
+ if (!sk) {
+ tipc_deleteport(ref);
+ return -ENOMEM;
+ }
+
+ sock_init_data(sock, sk);
+ init_waitqueue_head(sk->sk_sleep);
+ sk->sk_rcvtimeo = 8 * HZ; /* default connect timeout = 8s */
+
+ tsock = tipc_sk(sk);
+ port = tipc_get_port(ref);
+
+ tsock->p = port;
+ port->usr_handle = tsock;
+
+ init_MUTEX(&tsock->sem);
+
+ dbg("sock_create: %x\n",tsock);
+
+ atomic_inc(&tipc_user_count);
+
+ return 0;
+}
+
+/**
+ * release - destroy a TIPC socket
+ * @sock: socket to destroy
+ *
+ * This routine cleans up any messages that are still queued on the socket.
+ * For DGRAM and RDM socket types, all queued messages are rejected.
+ * For SEQPACKET and STREAM socket types, the first message is rejected
+ * and any others are discarded. (If the first message on a STREAM socket
+ * is partially-read, it is discarded and the next one is rejected instead.)
+ *
+ * NOTE: Rejected messages are not necessarily returned to the sender! They
+ * are returned or discarded according to the "destination droppable" setting
+ * specified for the message by the sender.
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int release(struct socket *sock)
+{
+ struct tipc_sock *tsock = tipc_sk(sock->sk);
+ struct sock *sk = sock->sk;
+ int res = TIPC_OK;
+ struct sk_buff *buf;
+
+ dbg("sock_delete: %x\n",tsock);
+ if (!tsock)
+ return 0;
+ down_interruptible(&tsock->sem);
+ if (!sock->sk) {
+ up(&tsock->sem);
+ return 0;
+ }
+
+ /* Reject unreceived messages, unless no longer connected */
+
+ while (sock->state != SS_DISCONNECTING) {
+ sock_lock(tsock);
+ buf = skb_dequeue(&sk->sk_receive_queue);
+ if (!buf)
+ tsock->p->usr_handle = 0;
+ sock_unlock(tsock);
+ if (!buf)
+ break;
+ if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
+ buf_discard(buf);
+ else
+ tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+ atomic_dec(&tipc_queue_size);
+ }
+
+ /* Delete TIPC port */
+
+ res = tipc_deleteport(tsock->p->ref);
+ sock->sk = NULL;
+
+ /* Discard any remaining messages */
+
+ while ((buf = skb_dequeue(&sk->sk_receive_queue))) {
+ buf_discard(buf);
+ atomic_dec(&tipc_queue_size);
+ }
+
+ up(&tsock->sem);
+
+ sock_put(sk);
+
+ atomic_dec(&tipc_user_count);
+ return res;
+}
+
+/**
+ * bind - associate or disassocate TIPC name(s) with a socket
+ * @sock: socket structure
+ * @uaddr: socket address describing name(s) and desired operation
+ * @uaddr_len: size of socket address data structure
+ *
+ * Name and name sequence binding is indicated using a positive scope value;
+ * a negative scope value unbinds the specified name. Specifying no name
+ * (i.e. a socket address length of 0) unbinds all names from the socket.
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
+{
+ struct tipc_sock *tsock = tipc_sk(sock->sk);
+ struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
+ int res;
+
+ if (down_interruptible(&tsock->sem))
+ return -ERESTARTSYS;
+
+ if (unlikely(!uaddr_len)) {
+ res = tipc_withdraw(tsock->p->ref, 0, 0);
+ goto exit;
+ }
+
+ if (uaddr_len < sizeof(struct sockaddr_tipc)) {
+ res = -EINVAL;
+ goto exit;
+ }
+
+ if (addr->family != AF_TIPC) {
+ res = -EAFNOSUPPORT;
+ goto exit;
+ }
+ if (addr->addrtype == TIPC_ADDR_NAME)
+ addr->addr.nameseq.upper = addr->addr.nameseq.lower;
+ else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
+ res = -EAFNOSUPPORT;
+ goto exit;
+ }
+
+ if (addr->scope > 0)
+ res = tipc_publish(tsock->p->ref, addr->scope,
+ &addr->addr.nameseq);
+ else
+ res = tipc_withdraw(tsock->p->ref, -addr->scope,
+ &addr->addr.nameseq);
+exit:
+ up(&tsock->sem);
+ return res;
+}
+
+/**
+ * get_name - get port ID of socket or peer socket
+ * @sock: socket structure
+ * @uaddr: area for returned socket address
+ * @uaddr_len: area for returned length of socket address
+ * @peer: 0 to obtain socket name, 1 to obtain peer socket name
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int get_name(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
+{
+ struct tipc_sock *tsock = tipc_sk(sock->sk);
+ struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
+ u32 res;
+
+ if (down_interruptible(&tsock->sem))
+ return -ERESTARTSYS;
+
+ *uaddr_len = sizeof(*addr);
+ addr->addrtype = TIPC_ADDR_ID;
+ addr->family = AF_TIPC;
+ addr->scope = 0;
+ if (peer)
+ res = tipc_peer(tsock->p->ref, &addr->addr.id);
+ else
+ res = tipc_ownidentity(tsock->p->ref, &addr->addr.id);
+ addr->addr.name.domain = 0;
+
+ up(&tsock->sem);
+ return res;
+}
+
+/**
+ * poll - read and possibly block on pollmask
+ * @file: file structure associated with the socket
+ * @sock: socket for which to calculate the poll bits
+ * @wait: ???
+ *
+ * Returns the pollmask
+ */
+
+static unsigned int poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ poll_wait(file, sock->sk->sk_sleep, wait);
+ /* NEED LOCK HERE? */
+ return pollmask(sock);
+}
+
+/**
+ * dest_name_check - verify user is permitted to send to specified port name
+ * @dest: destination address
+ * @m: descriptor for message to be sent
+ *
+ * Prevents restricted configuration commands from being issued by
+ * unauthorized users.
+ *
+ * Returns 0 if permission is granted, otherwise errno
+ */
+
+static inline int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
+{
+ struct tipc_cfg_msg_hdr hdr;
+
+ if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
+ return 0;
+ if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
+ return 0;
+
+ if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
+ return -EACCES;
+
+ if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
+ return -EFAULT;
+ if ((ntohs(hdr.tcm_type) & 0xC000) & (!capable(CAP_NET_ADMIN)))
+ return -EACCES;
+
+ return 0;
+}
+
+/**
+ * send_msg - send message in connectionless manner
+ * @iocb: (unused)
+ * @sock: socket structure
+ * @m: message to send
+ * @total_len: (unused)
+ *
+ * Message must have an destination specified explicitly.
+ * Used for SOCK_RDM and SOCK_DGRAM messages,
+ * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
+ * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
+ *
+ * Returns the number of bytes sent on success, or errno otherwise
+ */
+
+static int send_msg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct tipc_sock *tsock = tipc_sk(sock->sk);
+ struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
+ struct sk_buff *buf;
+ int needs_conn;
+ int res = -EINVAL;
+
+ if (unlikely(!dest))
+ return -EDESTADDRREQ;
+ if (unlikely(dest->family != AF_TIPC))
+ return -EINVAL;
+
+ needs_conn = (sock->state != SS_READY);
+ if (unlikely(needs_conn)) {
+ if (sock->state == SS_LISTENING)
+ return -EPIPE;
+ if (sock->state != SS_UNCONNECTED)
+ return -EISCONN;
+ if ((tsock->p->published) ||
+ ((sock->type == SOCK_STREAM) && (total_len != 0)))
+ return -EOPNOTSUPP;
+ }
+
+ if (down_interruptible(&tsock->sem))
+ return -ERESTARTSYS;
+
+ if (needs_conn) {
+
+ /* Abort any pending connection attempts (very unlikely) */
+
+ while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
+ tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+ atomic_dec(&tipc_queue_size);
+ }
+
+ sock->state = SS_CONNECTING;
+ }
+
+ do {
+ if (dest->addrtype == TIPC_ADDR_NAME) {
+ if ((res = dest_name_check(dest, m)))
+ goto exit;
+ res = tipc_send2name(tsock->p->ref,
+ &dest->addr.name.name,
+ dest->addr.name.domain,
+ m->msg_iovlen,
+ m->msg_iov);
+ }
+ else if (dest->addrtype == TIPC_ADDR_ID) {
+ res = tipc_send2port(tsock->p->ref,
+ &dest->addr.id,
+ m->msg_iovlen,
+ m->msg_iov);
+ }
+ else if (dest->addrtype == TIPC_ADDR_MCAST) {
+ if (needs_conn) {
+ res = -EOPNOTSUPP;
+ goto exit;
+ }
+ if ((res = dest_name_check(dest, m)))
+ goto exit;
+ res = tipc_multicast(tsock->p->ref,
+ &dest->addr.nameseq,
+ 0,
+ m->msg_iovlen,
+ m->msg_iov);
+ }
+ if (likely(res != -ELINKCONG)) {
+exit:
+ up(&tsock->sem);
+ return res;
+ }
+ if (m->msg_flags & MSG_DONTWAIT) {
+ res = -EWOULDBLOCK;
+ goto exit;
+ }
+ if (wait_event_interruptible(*sock->sk->sk_sleep,
+ !tsock->p->congested)) {
+ res = -ERESTARTSYS;
+ goto exit;
+ }
+ } while (1);
+}
+
+/**
+ * send_packet - send a connection-oriented message
+ * @iocb: (unused)
+ * @sock: socket structure
+ * @m: message to send
+ * @total_len: (unused)
+ *
+ * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
+ *
+ * Returns the number of bytes sent on success, or errno otherwise
+ */
+
+static int send_packet(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct tipc_sock *tsock = tipc_sk(sock->sk);
+ struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
+ int res;
+
+ /* Handle implied connection establishment */
+
+ if (unlikely(dest))
+ return send_msg(iocb, sock, m, total_len);
+
+ if (down_interruptible(&tsock->sem)) {
+ return -ERESTARTSYS;
+ }
+
+ if (unlikely(sock->state != SS_CONNECTED)) {
+ if (sock->state == SS_DISCONNECTING)
+ res = -EPIPE;
+ else
+ res = -ENOTCONN;
+ goto exit;
+ }
+
+ do {
+ res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov);
+ if (likely(res != -ELINKCONG)) {
+exit:
+ up(&tsock->sem);
+ return res;
+ }
+ if (m->msg_flags & MSG_DONTWAIT) {
+ res = -EWOULDBLOCK;
+ goto exit;
+ }
+ if (wait_event_interruptible(*sock->sk->sk_sleep,
+ !tsock->p->congested)) {
+ res = -ERESTARTSYS;
+ goto exit;
+ }
+ } while (1);
+}
+
+/**
+ * send_stream - send stream-oriented data
+ * @iocb: (unused)
+ * @sock: socket structure
+ * @m: data to send
+ * @total_len: total length of data to be sent
+ *
+ * Used for SOCK_STREAM data.
+ *
+ * Returns the number of bytes sent on success, or errno otherwise
+ */
+
+
+static int send_stream(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct msghdr my_msg;
+ struct iovec my_iov;
+ struct iovec *curr_iov;
+ int curr_iovlen;
+ char __user *curr_start;
+ int curr_left;
+ int bytes_to_send;
+ int res;
+
+ if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE))
+ return send_packet(iocb, sock, m, total_len);
+
+ /* Can only send large data streams if already connected */
+
+ if (unlikely(sock->state != SS_CONNECTED)) {
+ if (sock->state == SS_DISCONNECTING)
+ return -EPIPE;
+ else
+ return -ENOTCONN;
+ }
+
+ /*
+ * Send each iovec entry using one or more messages
+ *
+ * Note: This algorithm is good for the most likely case
+ * (i.e. one large iovec entry), but could be improved to pass sets
+ * of small iovec entries into send_packet().
+ */
+
+ my_msg = *m;
+ curr_iov = my_msg.msg_iov;
+ curr_iovlen = my_msg.msg_iovlen;
+ my_msg.msg_iov = &my_iov;
+ my_msg.msg_iovlen = 1;
+
+ while (curr_iovlen--) {
+ curr_start = curr_iov->iov_base;
+ curr_left = curr_iov->iov_len;
+
+ while (curr_left) {
+ bytes_to_send = (curr_left < TIPC_MAX_USER_MSG_SIZE)
+ ? curr_left : TIPC_MAX_USER_MSG_SIZE;
+ my_iov.iov_base = curr_start;
+ my_iov.iov_len = bytes_to_send;
+ if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0)
+ return res;
+ curr_left -= bytes_to_send;
+ curr_start += bytes_to_send;
+ }
+
+ curr_iov++;
+ }
+
+ return total_len;
+}
+
+/**
+ * auto_connect - complete connection setup to a remote port
+ * @sock: socket structure
+ * @tsock: TIPC-specific socket structure
+ * @msg: peer's response message
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int auto_connect(struct socket *sock, struct tipc_sock *tsock,
+ struct tipc_msg *msg)
+{
+ struct tipc_portid peer;
+
+ if (msg_errcode(msg)) {
+ sock->state = SS_DISCONNECTING;
+ return -ECONNREFUSED;
+ }
+
+ peer.ref = msg_origport(msg);
+ peer.node = msg_orignode(msg);
+ tipc_connect2port(tsock->p->ref, &peer);
+ tipc_set_portimportance(tsock->p->ref, msg_importance(msg));
+ sock->state = SS_CONNECTED;
+ return 0;
+}
+
+/**
+ * set_orig_addr - capture sender's address for received message
+ * @m: descriptor for message info
+ * @msg: received message header
+ *
+ * Note: Address is not captured if not requested by receiver.
+ */
+
+static inline void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
+{
+ struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
+
+ if (addr) {
+ addr->family = AF_TIPC;
+ addr->addrtype = TIPC_ADDR_ID;
+ addr->addr.id.ref = msg_origport(msg);
+ addr->addr.id.node = msg_orignode(msg);
+ addr->addr.name.domain = 0; /* could leave uninitialized */
+ addr->scope = 0; /* could leave uninitialized */
+ m->msg_namelen = sizeof(struct sockaddr_tipc);
+ }
+}
+
+/**
+ * anc_data_recv - optionally capture ancillary data for received message
+ * @m: descriptor for message info
+ * @msg: received message header
+ * @tport: TIPC port associated with message
+ *
+ * Note: Ancillary data is not captured if not requested by receiver.
+ *
+ * Returns 0 if successful, otherwise errno
+ */
+
+static inline int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
+ struct tipc_port *tport)
+{
+ u32 anc_data[3];
+ u32 err;
+ u32 dest_type;
+ int res;
+
+ if (likely(m->msg_controllen == 0))
+ return 0;
+
+ /* Optionally capture errored message object(s) */
+
+ err = msg ? msg_errcode(msg) : 0;
+ if (unlikely(err)) {
+ anc_data[0] = err;
+ anc_data[1] = msg_data_sz(msg);
+ if ((res = put_cmsg(m, SOL_SOCKET, TIPC_ERRINFO, 8, anc_data)))
+ return res;
+ if (anc_data[1] &&
+ (res = put_cmsg(m, SOL_SOCKET, TIPC_RETDATA, anc_data[1],
+ msg_data(msg))))
+ return res;
+ }
+
+ /* Optionally capture message destination object */
+
+ dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
+ switch (dest_type) {
+ case TIPC_NAMED_MSG:
+ anc_data[0] = msg_nametype(msg);
+ anc_data[1] = msg_namelower(msg);
+ anc_data[2] = msg_namelower(msg);
+ break;
+ case TIPC_MCAST_MSG:
+ anc_data[0] = msg_nametype(msg);
+ anc_data[1] = msg_namelower(msg);
+ anc_data[2] = msg_nameupper(msg);
+ break;
+ case TIPC_CONN_MSG:
+ anc_data[0] = tport->conn_type;
+ anc_data[1] = tport->conn_instance;
+ anc_data[2] = tport->conn_instance;
+ break;
+ default:
+ anc_data[0] = 0;
+ }
+ if (anc_data[0] &&
+ (res = put_cmsg(m, SOL_SOCKET, TIPC_DESTNAME, 12, anc_data)))
+ return res;
+
+ return 0;
+}
+
+/**
+ * recv_msg - receive packet-oriented message
+ * @iocb: (unused)
+ * @m: descriptor for message info
+ * @buf_len: total size of user buffer area
+ * @flags: receive flags
+ *
+ * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
+ * If the complete message doesn't fit in user area, truncate it.
+ *
+ * Returns size of returned message data, errno otherwise
+ */
+
+static int recv_msg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
+{
+ struct tipc_sock *tsock = tipc_sk(sock->sk);
+ struct sk_buff *buf;
+ struct tipc_msg *msg;
+ unsigned int q_len;
+ unsigned int sz;
+ u32 err;
+ int res;
+
+ /* Currently doesn't support receiving into multiple iovec entries */
+
+ if (m->msg_iovlen != 1)
+ return -EOPNOTSUPP;
+
+ /* Catch invalid receive attempts */
+
+ if (unlikely(!buf_len))
+ return -EINVAL;
+
+ if (sock->type == SOCK_SEQPACKET) {
+ if (unlikely(sock->state == SS_UNCONNECTED))
+ return -ENOTCONN;
+ if (unlikely((sock->state == SS_DISCONNECTING) &&
+ (skb_queue_len(&sock->sk->sk_receive_queue) == 0)))
+ return -ENOTCONN;
+ }
+
+ /* Look for a message in receive queue; wait if necessary */
+
+ if (unlikely(down_interruptible(&tsock->sem)))
+ return -ERESTARTSYS;
+
+restart:
+ if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
+ (flags & MSG_DONTWAIT))) {
+ res = -EWOULDBLOCK;
+ goto exit;
+ }
+
+ if ((res = wait_event_interruptible(
+ *sock->sk->sk_sleep,
+ ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
+ (sock->state == SS_DISCONNECTING))) )) {
+ goto exit;
+ }
+
+ /* Catch attempt to receive on an already terminated connection */
+ /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
+
+ if (!q_len) {
+ res = -ENOTCONN;
+ goto exit;
+ }
+
+ /* Get access to first message in receive queue */
+
+ buf = skb_peek(&sock->sk->sk_receive_queue);
+ msg = buf_msg(buf);
+ sz = msg_data_sz(msg);
+ err = msg_errcode(msg);
+
+ /* Complete connection setup for an implied connect */
+
+ if (unlikely(sock->state == SS_CONNECTING)) {
+ if ((res = auto_connect(sock, tsock, msg)))
+ goto exit;
+ }
+
+ /* Discard an empty non-errored message & try again */
+
+ if ((!sz) && (!err)) {
+ advance_queue(tsock);
+ goto restart;
+ }
+
+ /* Capture sender's address (optional) */
+
+ set_orig_addr(m, msg);
+
+ /* Capture ancillary data (optional) */
+
+ if ((res = anc_data_recv(m, msg, tsock->p)))
+ goto exit;
+
+ /* Capture message data (if valid) & compute return value (always) */
+
+ if (!err) {
+ if (unlikely(buf_len < sz)) {
+ sz = buf_len;
+ m->msg_flags |= MSG_TRUNC;
+ }
+ if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg),
+ sz))) {
+ res = -EFAULT;
+ goto exit;
+ }
+ res = sz;
+ } else {
+ if ((sock->state == SS_READY) ||
+ ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
+ res = 0;
+ else
+ res = -ECONNRESET;
+ }
+
+ /* Consume received message (optional) */
+
+ if (likely(!(flags & MSG_PEEK))) {
+ if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+ tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
+ advance_queue(tsock);
+ }
+exit:
+ up(&tsock->sem);
+ return res;
+}
+
+/**
+ * recv_stream - receive stream-oriented data
+ * @iocb: (unused)
+ * @m: descriptor for message info
+ * @buf_len: total size of user buffer area
+ * @flags: receive flags
+ *
+ * Used for SOCK_STREAM messages only. If not enough data is available
+ * will optionally wait for more; never truncates data.
+ *
+ * Returns size of returned message data, errno otherwise
+ */
+
+static int recv_stream(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
+{
+ struct tipc_sock *tsock = tipc_sk(sock->sk);
+ struct sk_buff *buf;
+ struct tipc_msg *msg;
+ unsigned int q_len;
+ unsigned int sz;
+ int sz_to_copy;
+ int sz_copied = 0;
+ int needed;
+ char *crs = m->msg_iov->iov_base;
+ unsigned char *buf_crs;
+ u32 err;
+ int res;
+
+ /* Currently doesn't support receiving into multiple iovec entries */
+
+ if (m->msg_iovlen != 1)
+ return -EOPNOTSUPP;
+
+ /* Catch invalid receive attempts */
+
+ if (unlikely(!buf_len))
+ return -EINVAL;
+
+ if (unlikely(sock->state == SS_DISCONNECTING)) {
+ if (skb_queue_len(&sock->sk->sk_receive_queue) == 0)
+ return -ENOTCONN;
+ } else if (unlikely(sock->state != SS_CONNECTED))
+ return -ENOTCONN;
+
+ /* Look for a message in receive queue; wait if necessary */
+
+ if (unlikely(down_interruptible(&tsock->sem)))
+ return -ERESTARTSYS;
+
+restart:
+ if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
+ (flags & MSG_DONTWAIT))) {
+ res = (sz_copied == 0) ? -EWOULDBLOCK : 0;
+ goto exit;
+ }
+
+ if ((res = wait_event_interruptible(
+ *sock->sk->sk_sleep,
+ ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
+ (sock->state == SS_DISCONNECTING))) )) {
+ goto exit;
+ }
+
+ /* Catch attempt to receive on an already terminated connection */
+ /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
+
+ if (!q_len) {
+ res = -ENOTCONN;
+ goto exit;
+ }
+
+ /* Get access to first message in receive queue */
+
+ buf = skb_peek(&sock->sk->sk_receive_queue);
+ msg = buf_msg(buf);
+ sz = msg_data_sz(msg);
+ err = msg_errcode(msg);
+
+ /* Discard an empty non-errored message & try again */
+
+ if ((!sz) && (!err)) {
+ advance_queue(tsock);
+ goto restart;
+ }
+
+ /* Optionally capture sender's address & ancillary data of first msg */
+
+ if (sz_copied == 0) {
+ set_orig_addr(m, msg);
+ if ((res = anc_data_recv(m, msg, tsock->p)))
+ goto exit;
+ }
+
+ /* Capture message data (if valid) & compute return value (always) */
+
+ if (!err) {
+ buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
+ sz = buf->tail - buf_crs;
+
+ needed = (buf_len - sz_copied);
+ sz_to_copy = (sz <= needed) ? sz : needed;
+ if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) {
+ res = -EFAULT;
+ goto exit;
+ }
+ sz_copied += sz_to_copy;
+
+ if (sz_to_copy < sz) {
+ if (!(flags & MSG_PEEK))
+ TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy;
+ goto exit;
+ }
+
+ crs += sz_to_copy;
+ } else {
+ if (sz_copied != 0)
+ goto exit; /* can't add error msg to valid data */
+
+ if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
+ res = 0;
+ else
+ res = -ECONNRESET;
+ }
+
+ /* Consume received message (optional) */
+
+ if (likely(!(flags & MSG_PEEK))) {
+ if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+ tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
+ advance_queue(tsock);
+ }
+
+ /* Loop around if more data is required */
+
+ if ((sz_copied < buf_len) /* didn't get all requested data */
+ && (flags & MSG_WAITALL) /* ... and need to wait for more */
+ && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
+ && (!err) /* ... and haven't reached a FIN */
+ )
+ goto restart;
+
+exit:
+ up(&tsock->sem);
+ return res ? res : sz_copied;
+}
+
+/**
+ * queue_overloaded - test if queue overload condition exists
+ * @queue_size: current size of queue
+ * @base: nominal maximum size of queue
+ * @msg: message to be added to queue
+ *
+ * Returns 1 if queue is currently overloaded, 0 otherwise
+ */
+
+static int queue_overloaded(u32 queue_size, u32 base, struct tipc_msg *msg)
+{
+ u32 threshold;
+ u32 imp = msg_importance(msg);
+
+ if (imp == TIPC_LOW_IMPORTANCE)
+ threshold = base;
+ else if (imp == TIPC_MEDIUM_IMPORTANCE)
+ threshold = base * 2;
+ else if (imp == TIPC_HIGH_IMPORTANCE)
+ threshold = base * 100;
+ else
+ return 0;
+
+ if (msg_connected(msg))
+ threshold *= 4;
+
+ return (queue_size > threshold);
+}
+
+/**
+ * async_disconnect - wrapper function used to disconnect port
+ * @portref: TIPC port reference (passed as pointer-sized value)
+ */
+
+static void async_disconnect(unsigned long portref)
+{
+ tipc_disconnect((u32)portref);
+}
+
+/**
+ * dispatch - handle arriving message
+ * @tport: TIPC port that received message
+ * @buf: message
+ *
+ * Called with port locked. Must not take socket lock to avoid deadlock risk.
+ *
+ * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
+ */
+
+static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
+{
+ struct tipc_msg *msg = buf_msg(buf);
+ struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
+ struct socket *sock;
+ u32 recv_q_len;
+
+ /* Reject message if socket is closing */
+
+ if (!tsock)
+ return TIPC_ERR_NO_PORT;
+
+ /* Reject message if it is wrong sort of message for socket */
+
+ /*
+ * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
+ * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
+ * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
+ */
+ sock = tsock->sk.sk_socket;
+ if (sock->state == SS_READY) {
+ if (msg_connected(msg)) {
+ msg_dbg(msg, "dispatch filter 1\n");
+ return TIPC_ERR_NO_PORT;
+ }
+ } else {
+ if (msg_mcast(msg)) {
+ msg_dbg(msg, "dispatch filter 2\n");
+ return TIPC_ERR_NO_PORT;
+ }
+ if (sock->state == SS_CONNECTED) {
+ if (!msg_connected(msg)) {
+ msg_dbg(msg, "dispatch filter 3\n");
+ return TIPC_ERR_NO_PORT;
+ }
+ }
+ else if (sock->state == SS_CONNECTING) {
+ if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
+ msg_dbg(msg, "dispatch filter 4\n");
+ return TIPC_ERR_NO_PORT;
+ }
+ }
+ else if (sock->state == SS_LISTENING) {
+ if (msg_connected(msg) || msg_errcode(msg)) {
+ msg_dbg(msg, "dispatch filter 5\n");
+ return TIPC_ERR_NO_PORT;
+ }
+ }
+ else if (sock->state == SS_DISCONNECTING) {
+ msg_dbg(msg, "dispatch filter 6\n");
+ return TIPC_ERR_NO_PORT;
+ }
+ else /* (sock->state == SS_UNCONNECTED) */ {
+ if (msg_connected(msg) || msg_errcode(msg)) {
+ msg_dbg(msg, "dispatch filter 7\n");
+ return TIPC_ERR_NO_PORT;
+ }
+ }
+ }
+
+ /* Reject message if there isn't room to queue it */
+
+ if (unlikely((u32)atomic_read(&tipc_queue_size) >
+ OVERLOAD_LIMIT_BASE)) {
+ if (queue_overloaded(atomic_read(&tipc_queue_size),
+ OVERLOAD_LIMIT_BASE, msg))
+ return TIPC_ERR_OVERLOAD;
+ }
+ recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue);
+ if (unlikely(recv_q_len > (OVERLOAD_LIMIT_BASE / 2))) {
+ if (queue_overloaded(recv_q_len,
+ OVERLOAD_LIMIT_BASE / 2, msg))
+ return TIPC_ERR_OVERLOAD;
+ }
+
+ /* Initiate connection termination for an incoming 'FIN' */
+
+ if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
+ sock->state = SS_DISCONNECTING;
+ /* Note: Use signal since port lock is already taken! */
+ k_signal((Handler)async_disconnect, tport->ref);
+ }
+
+ /* Enqueue message (finally!) */
+
+ msg_dbg(msg,"<DISP<: ");
+ TIPC_SKB_CB(buf)->handle = msg_data(msg);
+ atomic_inc(&tipc_queue_size);
+ skb_queue_tail(&sock->sk->sk_receive_queue, buf);
+
+ wake_up_interruptible(sock->sk->sk_sleep);
+ return TIPC_OK;
+}
+
+/**
+ * wakeupdispatch - wake up port after congestion
+ * @tport: port to wakeup
+ *
+ * Called with port lock on.
+ */
+
+static void wakeupdispatch(struct tipc_port *tport)
+{
+ struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
+
+ wake_up_interruptible(tsock->sk.sk_sleep);
+}
+
+/**
+ * connect - establish a connection to another TIPC port
+ * @sock: socket structure
+ * @dest: socket address for destination port
+ * @destlen: size of socket address data structure
+ * @flags: (unused)
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
+ int flags)
+{
+ struct tipc_sock *tsock = tipc_sk(sock->sk);
+ struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
+ struct msghdr m = {0,};
+ struct sk_buff *buf;
+ struct tipc_msg *msg;
+ int res;
+
+ /* For now, TIPC does not allow use of connect() with DGRAM or RDM types */
+
+ if (sock->state == SS_READY)
+ return -EOPNOTSUPP;
+
+ /* MOVE THE REST OF THIS ERROR CHECKING TO send_msg()? */
+ if (sock->state == SS_LISTENING)
+ return -EOPNOTSUPP;
+ if (sock->state == SS_CONNECTING)
+ return -EALREADY;
+ if (sock->state != SS_UNCONNECTED)
+ return -EISCONN;
+
+ if ((dst->family != AF_TIPC) ||
+ ((dst->addrtype != TIPC_ADDR_NAME) && (dst->addrtype != TIPC_ADDR_ID)))
+ return -EINVAL;
+
+ /* Send a 'SYN-' to destination */
+
+ m.msg_name = dest;
+ if ((res = send_msg(0, sock, &m, 0)) < 0) {
+ sock->state = SS_DISCONNECTING;
+ return res;
+ }
+
+ if (down_interruptible(&tsock->sem))
+ return -ERESTARTSYS;
+
+ /* Wait for destination's 'ACK' response */
+
+ res = wait_event_interruptible_timeout(*sock->sk->sk_sleep,
+ skb_queue_len(&sock->sk->sk_receive_queue),
+ sock->sk->sk_rcvtimeo);
+ buf = skb_peek(&sock->sk->sk_receive_queue);
+ if (res > 0) {
+ msg = buf_msg(buf);
+ res = auto_connect(sock, tsock, msg);
+ if (!res) {
+ if (dst->addrtype == TIPC_ADDR_NAME) {
+ tsock->p->conn_type = dst->addr.name.name.type;
+ tsock->p->conn_instance = dst->addr.name.name.instance;
+ }
+ if (!msg_data_sz(msg))
+ advance_queue(tsock);
+ }
+ } else {
+ if (res == 0) {
+ res = -ETIMEDOUT;
+ } else
+ { /* leave "res" unchanged */ }
+ sock->state = SS_DISCONNECTING;
+ }
+
+ up(&tsock->sem);
+ return res;
+}
+
+/**
+ * listen - allow socket to listen for incoming connections
+ * @sock: socket structure
+ * @len: (unused)
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int listen(struct socket *sock, int len)
+{
+ /* REQUIRES SOCKET LOCKING OF SOME SORT? */
+
+ if (sock->state == SS_READY)
+ return -EOPNOTSUPP;
+ if (sock->state != SS_UNCONNECTED)
+ return -EINVAL;
+ sock->state = SS_LISTENING;
+ return 0;
+}
+
+/**
+ * accept - wait for connection request
+ * @sock: listening socket
+ * @newsock: new socket that is to be connected
+ * @flags: file-related flags associated with socket
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ struct tipc_sock *tsock = tipc_sk(sock->sk);
+ struct sk_buff *buf;
+ int res = -EFAULT;
+
+ if (sock->state == SS_READY)
+ return -EOPNOTSUPP;
+ if (sock->state != SS_LISTENING)
+ return -EINVAL;
+
+ if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
+ (flags & O_NONBLOCK)))
+ return -EWOULDBLOCK;
+
+ if (down_interruptible(&tsock->sem))
+ return -ERESTARTSYS;
+
+ if (wait_event_interruptible(*sock->sk->sk_sleep,
+ skb_queue_len(&sock->sk->sk_receive_queue))) {
+ res = -ERESTARTSYS;
+ goto exit;
+ }
+ buf = skb_peek(&sock->sk->sk_receive_queue);
+
+ res = tipc_create(newsock, 0);
+ if (!res) {
+ struct tipc_sock *new_tsock = tipc_sk(newsock->sk);
+ struct tipc_portid id;
+ struct tipc_msg *msg = buf_msg(buf);
+ u32 new_ref = new_tsock->p->ref;
+
+ id.ref = msg_origport(msg);
+ id.node = msg_orignode(msg);
+ tipc_connect2port(new_ref, &id);
+ newsock->state = SS_CONNECTED;
+
+ tipc_set_portimportance(new_ref, msg_importance(msg));
+ if (msg_named(msg)) {
+ new_tsock->p->conn_type = msg_nametype(msg);
+ new_tsock->p->conn_instance = msg_nameinst(msg);
+ }
+
+ /*
+ * Respond to 'SYN-' by discarding it & returning 'ACK'-.
+ * Respond to 'SYN+' by queuing it on new socket.
+ */
+
+ msg_dbg(msg,"<ACC<: ");
+ if (!msg_data_sz(msg)) {
+ struct msghdr m = {0,};
+
+ send_packet(0, newsock, &m, 0);
+ advance_queue(tsock);
+ } else {
+ sock_lock(tsock);
+ skb_dequeue(&sock->sk->sk_receive_queue);
+ sock_unlock(tsock);
+ skb_queue_head(&newsock->sk->sk_receive_queue, buf);
+ }
+ }
+exit:
+ up(&tsock->sem);
+ return res;
+}
+
+/**
+ * shutdown - shutdown socket connection
+ * @sock: socket structure
+ * @how: direction to close (always treated as read + write)
+ *
+ * Terminates connection (if necessary), then purges socket's receive queue.
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int shutdown(struct socket *sock, int how)
+{
+ struct tipc_sock* tsock = tipc_sk(sock->sk);
+ struct sk_buff *buf;
+ int res;
+
+ /* Could return -EINVAL for an invalid "how", but why bother? */
+
+ if (down_interruptible(&tsock->sem))
+ return -ERESTARTSYS;
+
+ sock_lock(tsock);
+
+ switch (sock->state) {
+ case SS_CONNECTED:
+
+ /* Send 'FIN+' or 'FIN-' message to peer */
+
+ sock_unlock(tsock);
+restart:
+ if ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
+ atomic_dec(&tipc_queue_size);
+ if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
+ buf_discard(buf);
+ goto restart;
+ }
+ tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
+ }
+ else {
+ tipc_shutdown(tsock->p->ref);
+ }
+ sock_lock(tsock);
+
+ /* fall through */
+
+ case SS_DISCONNECTING:
+
+ /* Discard any unreceived messages */
+
+ while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
+ atomic_dec(&tipc_queue_size);
+ buf_discard(buf);
+ }
+ tsock->p->conn_unacked = 0;
+
+ /* fall through */
+
+ case SS_CONNECTING:
+ sock->state = SS_DISCONNECTING;
+ res = 0;
+ break;
+
+ default:
+ res = -ENOTCONN;
+ }
+
+ sock_unlock(tsock);
+
+ up(&tsock->sem);
+ return res;
+}
+
+/**
+ * setsockopt - set socket option
+ * @sock: socket structure
+ * @lvl: option level
+ * @opt: option identifier
+ * @ov: pointer to new option value
+ * @ol: length of option value
+ *
+ * For stream sockets only, accepts and ignores all IPPROTO_TCP options
+ * (to ease compatibility).
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol)
+{
+ struct tipc_sock *tsock = tipc_sk(sock->sk);
+ u32 value;
+ int res;
+
+ if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
+ return 0;
+ if (lvl != SOL_TIPC)
+ return -ENOPROTOOPT;
+ if (ol < sizeof(value))
+ return -EINVAL;
+ if ((res = get_user(value, (u32 *)ov)))
+ return res;
+
+ if (down_interruptible(&tsock->sem))
+ return -ERESTARTSYS;
+
+ switch (opt) {
+ case TIPC_IMPORTANCE:
+ res = tipc_set_portimportance(tsock->p->ref, value);
+ break;
+ case TIPC_SRC_DROPPABLE:
+ if (sock->type != SOCK_STREAM)
+ res = tipc_set_portunreliable(tsock->p->ref, value);
+ else
+ res = -ENOPROTOOPT;
+ break;
+ case TIPC_DEST_DROPPABLE:
+ res = tipc_set_portunreturnable(tsock->p->ref, value);
+ break;
+ case TIPC_CONN_TIMEOUT:
+ sock->sk->sk_rcvtimeo = (value * HZ / 1000);
+ break;
+ default:
+ res = -EINVAL;
+ }
+
+ up(&tsock->sem);
+ return res;
+}
+
+/**
+ * getsockopt - get socket option
+ * @sock: socket structure
+ * @lvl: option level
+ * @opt: option identifier
+ * @ov: receptacle for option value
+ * @ol: receptacle for length of option value
+ *
+ * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
+ * (to ease compatibility).
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int getsockopt(struct socket *sock, int lvl, int opt, char *ov, int *ol)
+{
+ struct tipc_sock *tsock = tipc_sk(sock->sk);
+ int len;
+ u32 value;
+ int res;
+
+ if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
+ return put_user(0, ol);
+ if (lvl != SOL_TIPC)
+ return -ENOPROTOOPT;
+ if ((res = get_user(len, ol)))
+ return res;
+
+ if (down_interruptible(&tsock->sem))
+ return -ERESTARTSYS;
+
+ switch (opt) {
+ case TIPC_IMPORTANCE:
+ res = tipc_portimportance(tsock->p->ref, &value);
+ break;
+ case TIPC_SRC_DROPPABLE:
+ res = tipc_portunreliable(tsock->p->ref, &value);
+ break;
+ case TIPC_DEST_DROPPABLE:
+ res = tipc_portunreturnable(tsock->p->ref, &value);
+ break;
+ case TIPC_CONN_TIMEOUT:
+ value = (sock->sk->sk_rcvtimeo * 1000) / HZ;
+ break;
+ default:
+ res = -EINVAL;
+ }
+
+ if (res) {
+ /* "get" failed */
+ }
+ else if (len < sizeof(value)) {
+ res = -EINVAL;
+ }
+ else if ((res = copy_to_user(ov, &value, sizeof(value)))) {
+ /* couldn't return value */
+ }
+ else {
+ res = put_user(sizeof(value), ol);
+ }
+
+ up(&tsock->sem);
+ return res;
+}
+
+/**
+ * Placeholders for non-implemented functionality
+ *
+ * Returns error code (POSIX-compliant where defined)
+ */
+
+static int ioctl(struct socket *s, u32 cmd, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+static int no_mmap(struct file *file, struct socket *sock,
+ struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+static ssize_t no_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ return -EINVAL;
+}
+
+static int no_skpair(struct socket *s1, struct socket *s2)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * Protocol switches for the various types of TIPC sockets
+ */
+
+static struct proto_ops msg_ops = {
+ .owner = THIS_MODULE,
+ .family = AF_TIPC,
+ .release = release,
+ .bind = bind,
+ .connect = connect,
+ .socketpair = no_skpair,
+ .accept = accept,
+ .getname = get_name,
+ .poll = poll,
+ .ioctl = ioctl,
+ .listen = listen,
+ .shutdown = shutdown,
+ .setsockopt = setsockopt,
+ .getsockopt = getsockopt,
+ .sendmsg = send_msg,
+ .recvmsg = recv_msg,
+ .mmap = no_mmap,
+ .sendpage = no_sendpage
+};
+
+static struct proto_ops packet_ops = {
+ .owner = THIS_MODULE,
+ .family = AF_TIPC,
+ .release = release,
+ .bind = bind,
+ .connect = connect,
+ .socketpair = no_skpair,
+ .accept = accept,
+ .getname = get_name,
+ .poll = poll,
+ .ioctl = ioctl,
+ .listen = listen,
+ .shutdown = shutdown,
+ .setsockopt = setsockopt,
+ .getsockopt = getsockopt,
+ .sendmsg = send_packet,
+ .recvmsg = recv_msg,
+ .mmap = no_mmap,
+ .sendpage = no_sendpage
+};
+
+static struct proto_ops stream_ops = {
+ .owner = THIS_MODULE,
+ .family = AF_TIPC,
+ .release = release,
+ .bind = bind,
+ .connect = connect,
+ .socketpair = no_skpair,
+ .accept = accept,
+ .getname = get_name,
+ .poll = poll,
+ .ioctl = ioctl,
+ .listen = listen,
+ .shutdown = shutdown,
+ .setsockopt = setsockopt,
+ .getsockopt = getsockopt,
+ .sendmsg = send_stream,
+ .recvmsg = recv_stream,
+ .mmap = no_mmap,
+ .sendpage = no_sendpage
+};
+
+static struct net_proto_family tipc_family_ops = {
+ .owner = THIS_MODULE,
+ .family = AF_TIPC,
+ .create = tipc_create
+};
+
+static struct proto tipc_proto = {
+ .name = "TIPC",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct tipc_sock)
+};
+
+/**
+ * socket_init - initialize TIPC socket interface
+ *
+ * Returns 0 on success, errno otherwise
+ */
+int socket_init(void)
+{
+ int res;
+
+ res = proto_register(&tipc_proto, 1);
+ if (res) {
+ err("Failed to register TIPC protocol type\n");
+ goto out;
+ }
+
+ res = sock_register(&tipc_family_ops);
+ if (res) {
+ err("Failed to register TIPC socket type\n");
+ proto_unregister(&tipc_proto);
+ goto out;
+ }
+
+ sockets_enabled = 1;
+ out:
+ return res;
+}
+
+/**
+ * sock_stop - stop TIPC socket interface
+ */
+void socket_stop(void)
+{
+ if (!sockets_enabled)
+ return;
+
+ sockets_enabled = 0;
+ sock_unregister(tipc_family_ops.family);
+ proto_unregister(&tipc_proto);
+}
+
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
new file mode 100644
index 00000000000..80e219ba527
--- /dev/null
+++ b/net/tipc/subscr.c
@@ -0,0 +1,527 @@
+/*
+ * net/tipc/subscr.c: TIPC subscription service
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "subscr.h"
+#include "name_table.h"
+#include "ref.h"
+
+/**
+ * struct subscriber - TIPC network topology subscriber
+ * @ref: object reference to subscriber object itself
+ * @lock: pointer to spinlock controlling access to subscriber object
+ * @subscriber_list: adjacent subscribers in top. server's list of subscribers
+ * @subscription_list: list of subscription objects for this subscriber
+ * @port_ref: object reference to port used to communicate with subscriber
+ * @swap: indicates if subscriber uses opposite endianness in its messages
+ */
+
+struct subscriber {
+ u32 ref;
+ spinlock_t *lock;
+ struct list_head subscriber_list;
+ struct list_head subscription_list;
+ u32 port_ref;
+ int swap;
+};
+
+/**
+ * struct top_srv - TIPC network topology subscription service
+ * @user_ref: TIPC userid of subscription service
+ * @setup_port: reference to TIPC port that handles subscription requests
+ * @subscription_count: number of active subscriptions (not subscribers!)
+ * @subscriber_list: list of ports subscribing to service
+ * @lock: spinlock govering access to subscriber list
+ */
+
+struct top_srv {
+ u32 user_ref;
+ u32 setup_port;
+ atomic_t subscription_count;
+ struct list_head subscriber_list;
+ spinlock_t lock;
+};
+
+static struct top_srv topsrv = { 0 };
+
+/**
+ * htohl - convert value to endianness used by destination
+ * @in: value to convert
+ * @swap: non-zero if endianness must be reversed
+ *
+ * Returns converted value
+ */
+
+static inline u32 htohl(u32 in, int swap)
+{
+ char *c = (char *)&in;
+
+ return swap ? ((c[3] << 3) + (c[2] << 2) + (c[1] << 1) + c[0]) : in;
+}
+
+/**
+ * subscr_send_event - send a message containing a tipc_event to the subscriber
+ */
+
+static void subscr_send_event(struct subscription *sub,
+ u32 found_lower,
+ u32 found_upper,
+ u32 event,
+ u32 port_ref,
+ u32 node)
+{
+ struct iovec msg_sect;
+
+ msg_sect.iov_base = (void *)&sub->evt;
+ msg_sect.iov_len = sizeof(struct tipc_event);
+
+ sub->evt.event = htohl(event, sub->owner->swap);
+ sub->evt.found_lower = htohl(found_lower, sub->owner->swap);
+ sub->evt.found_upper = htohl(found_upper, sub->owner->swap);
+ sub->evt.port.ref = htohl(port_ref, sub->owner->swap);
+ sub->evt.port.node = htohl(node, sub->owner->swap);
+ tipc_send(sub->owner->port_ref, 1, &msg_sect);
+}
+
+/**
+ * subscr_overlap - test for subscription overlap with the given values
+ *
+ * Returns 1 if there is overlap, otherwise 0.
+ */
+
+int subscr_overlap(struct subscription *sub,
+ u32 found_lower,
+ u32 found_upper)
+
+{
+ if (found_lower < sub->seq.lower)
+ found_lower = sub->seq.lower;
+ if (found_upper > sub->seq.upper)
+ found_upper = sub->seq.upper;
+ if (found_lower > found_upper)
+ return 0;
+ return 1;
+}
+
+/**
+ * subscr_report_overlap - issue event if there is subscription overlap
+ *
+ * Protected by nameseq.lock in name_table.c
+ */
+
+void subscr_report_overlap(struct subscription *sub,
+ u32 found_lower,
+ u32 found_upper,
+ u32 event,
+ u32 port_ref,
+ u32 node,
+ int must)
+{
+ dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower,
+ sub->seq.upper, found_lower, found_upper);
+ if (!subscr_overlap(sub, found_lower, found_upper))
+ return;
+ if (!must && (sub->filter != TIPC_SUB_PORTS))
+ return;
+ subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
+}
+
+/**
+ * subscr_timeout - subscription timeout has occurred
+ */
+
+static void subscr_timeout(struct subscription *sub)
+{
+ struct subscriber *subscriber;
+ u32 subscriber_ref;
+
+ /* Validate subscriber reference (in case subscriber is terminating) */
+
+ subscriber_ref = sub->owner->ref;
+ subscriber = (struct subscriber *)ref_lock(subscriber_ref);
+ if (subscriber == NULL)
+ return;
+
+ /* Unlink subscription from name table */
+
+ nametbl_unsubscribe(sub);
+
+ /* Notify subscriber of timeout, then unlink subscription */
+
+ subscr_send_event(sub,
+ sub->evt.s.seq.lower,
+ sub->evt.s.seq.upper,
+ TIPC_SUBSCR_TIMEOUT,
+ 0,
+ 0);
+ list_del(&sub->subscription_list);
+
+ /* Now destroy subscription */
+
+ ref_unlock(subscriber_ref);
+ k_term_timer(&sub->timer);
+ kfree(sub);
+ atomic_dec(&topsrv.subscription_count);
+}
+
+/**
+ * subscr_terminate - terminate communication with a subscriber
+ *
+ * Called with subscriber locked. Routine must temporarily release this lock
+ * to enable subscription timeout routine(s) to finish without deadlocking;
+ * the lock is then reclaimed to allow caller to release it upon return.
+ * (This should work even in the unlikely event some other thread creates
+ * a new object reference in the interim that uses this lock; this routine will
+ * simply wait for it to be released, then claim it.)
+ */
+
+static void subscr_terminate(struct subscriber *subscriber)
+{
+ struct subscription *sub;
+ struct subscription *sub_temp;
+
+ /* Invalidate subscriber reference */
+
+ ref_discard(subscriber->ref);
+ spin_unlock_bh(subscriber->lock);
+
+ /* Destroy any existing subscriptions for subscriber */
+
+ list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
+ subscription_list) {
+ if (sub->timeout != TIPC_WAIT_FOREVER) {
+ k_cancel_timer(&sub->timer);
+ k_term_timer(&sub->timer);
+ }
+ nametbl_unsubscribe(sub);
+ list_del(&sub->subscription_list);
+ dbg("Term: Removed sub %u,%u,%u from subscriber %x list\n",
+ sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
+ kfree(sub);
+ atomic_dec(&topsrv.subscription_count);
+ }
+
+ /* Sever connection to subscriber */
+
+ tipc_shutdown(subscriber->port_ref);
+ tipc_deleteport(subscriber->port_ref);
+
+ /* Remove subscriber from topology server's subscriber list */
+
+ spin_lock_bh(&topsrv.lock);
+ list_del(&subscriber->subscriber_list);
+ spin_unlock_bh(&topsrv.lock);
+
+ /* Now destroy subscriber */
+
+ spin_lock_bh(subscriber->lock);
+ kfree(subscriber);
+}
+
+/**
+ * subscr_subscribe - create subscription for subscriber
+ *
+ * Called with subscriber locked
+ */
+
+static void subscr_subscribe(struct tipc_subscr *s,
+ struct subscriber *subscriber)
+{
+ struct subscription *sub;
+
+ /* Refuse subscription if global limit exceeded */
+
+ if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
+ warn("Failed: max %u subscriptions\n", tipc_max_subscriptions);
+ subscr_terminate(subscriber);
+ return;
+ }
+
+ /* Allocate subscription object */
+
+ sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
+ if (sub == NULL) {
+ warn("Memory squeeze; ignoring subscription\n");
+ subscr_terminate(subscriber);
+ return;
+ }
+
+ /* Determine/update subscriber's endianness */
+
+ if ((s->filter == TIPC_SUB_PORTS) || (s->filter == TIPC_SUB_SERVICE))
+ subscriber->swap = 0;
+ else
+ subscriber->swap = 1;
+
+ /* Initialize subscription object */
+
+ memset(sub, 0, sizeof(*sub));
+ sub->seq.type = htohl(s->seq.type, subscriber->swap);
+ sub->seq.lower = htohl(s->seq.lower, subscriber->swap);
+ sub->seq.upper = htohl(s->seq.upper, subscriber->swap);
+ sub->timeout = htohl(s->timeout, subscriber->swap);
+ sub->filter = htohl(s->filter, subscriber->swap);
+ if ((((sub->filter != TIPC_SUB_PORTS)
+ && (sub->filter != TIPC_SUB_SERVICE)))
+ || (sub->seq.lower > sub->seq.upper)) {
+ warn("Rejecting illegal subscription %u,%u,%u\n",
+ sub->seq.type, sub->seq.lower, sub->seq.upper);
+ kfree(sub);
+ subscr_terminate(subscriber);
+ return;
+ }
+ memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
+ INIT_LIST_HEAD(&sub->subscription_list);
+ INIT_LIST_HEAD(&sub->nameseq_list);
+ list_add(&sub->subscription_list, &subscriber->subscription_list);
+ atomic_inc(&topsrv.subscription_count);
+ if (sub->timeout != TIPC_WAIT_FOREVER) {
+ k_init_timer(&sub->timer,
+ (Handler)subscr_timeout, (unsigned long)sub);
+ k_start_timer(&sub->timer, sub->timeout);
+ }
+ sub->owner = subscriber;
+ nametbl_subscribe(sub);
+}
+
+/**
+ * subscr_conn_shutdown_event - handle termination request from subscriber
+ */
+
+static void subscr_conn_shutdown_event(void *usr_handle,
+ u32 portref,
+ struct sk_buff **buf,
+ unsigned char const *data,
+ unsigned int size,
+ int reason)
+{
+ struct subscriber *subscriber;
+ spinlock_t *subscriber_lock;
+
+ subscriber = ref_lock((u32)(unsigned long)usr_handle);
+ if (subscriber == NULL)
+ return;
+
+ subscriber_lock = subscriber->lock;
+ subscr_terminate(subscriber);
+ spin_unlock_bh(subscriber_lock);
+}
+
+/**
+ * subscr_conn_msg_event - handle new subscription request from subscriber
+ */
+
+static void subscr_conn_msg_event(void *usr_handle,
+ u32 port_ref,
+ struct sk_buff **buf,
+ const unchar *data,
+ u32 size)
+{
+ struct subscriber *subscriber;
+ spinlock_t *subscriber_lock;
+
+ subscriber = ref_lock((u32)(unsigned long)usr_handle);
+ if (subscriber == NULL)
+ return;
+
+ subscriber_lock = subscriber->lock;
+ if (size != sizeof(struct tipc_subscr))
+ subscr_terminate(subscriber);
+ else
+ subscr_subscribe((struct tipc_subscr *)data, subscriber);
+
+ spin_unlock_bh(subscriber_lock);
+}
+
+/**
+ * subscr_named_msg_event - handle request to establish a new subscriber
+ */
+
+static void subscr_named_msg_event(void *usr_handle,
+ u32 port_ref,
+ struct sk_buff **buf,
+ const unchar *data,
+ u32 size,
+ u32 importance,
+ struct tipc_portid const *orig,
+ struct tipc_name_seq const *dest)
+{
+ struct subscriber *subscriber;
+ struct iovec msg_sect = {0, 0};
+ spinlock_t *subscriber_lock;
+
+ dbg("subscr_named_msg_event: orig = %x own = %x,\n",
+ orig->node, tipc_own_addr);
+ if (size && (size != sizeof(struct tipc_subscr))) {
+ warn("Received tipc_subscr of invalid size\n");
+ return;
+ }
+
+ /* Create subscriber object */
+
+ subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC);
+ if (subscriber == NULL) {
+ warn("Memory squeeze; ignoring subscriber setup\n");
+ return;
+ }
+ memset(subscriber, 0, sizeof(struct subscriber));
+ INIT_LIST_HEAD(&subscriber->subscription_list);
+ INIT_LIST_HEAD(&subscriber->subscriber_list);
+ subscriber->ref = ref_acquire(subscriber, &subscriber->lock);
+ if (subscriber->ref == 0) {
+ warn("Failed to acquire subscriber reference\n");
+ kfree(subscriber);
+ return;
+ }
+
+ /* Establish a connection to subscriber */
+
+ tipc_createport(topsrv.user_ref,
+ (void *)(unsigned long)subscriber->ref,
+ importance,
+ 0,
+ 0,
+ subscr_conn_shutdown_event,
+ 0,
+ 0,
+ subscr_conn_msg_event,
+ 0,
+ &subscriber->port_ref);
+ if (subscriber->port_ref == 0) {
+ warn("Memory squeeze; failed to create subscription port\n");
+ ref_discard(subscriber->ref);
+ kfree(subscriber);
+ return;
+ }
+ tipc_connect2port(subscriber->port_ref, orig);
+
+
+ /* Add subscriber to topology server's subscriber list */
+
+ ref_lock(subscriber->ref);
+ spin_lock_bh(&topsrv.lock);
+ list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
+ spin_unlock_bh(&topsrv.lock);
+
+ /*
+ * Subscribe now if message contains a subscription,
+ * otherwise send an empty response to complete connection handshaking
+ */
+
+ subscriber_lock = subscriber->lock;
+ if (size)
+ subscr_subscribe((struct tipc_subscr *)data, subscriber);
+ else
+ tipc_send(subscriber->port_ref, 1, &msg_sect);
+
+ spin_unlock_bh(subscriber_lock);
+}
+
+int subscr_start(void)
+{
+ struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
+ int res = -1;
+
+ memset(&topsrv, 0, sizeof (topsrv));
+ topsrv.lock = SPIN_LOCK_UNLOCKED;
+ INIT_LIST_HEAD(&topsrv.subscriber_list);
+
+ spin_lock_bh(&topsrv.lock);
+ res = tipc_attach(&topsrv.user_ref, 0, 0);
+ if (res) {
+ spin_unlock_bh(&topsrv.lock);
+ return res;
+ }
+
+ res = tipc_createport(topsrv.user_ref,
+ 0,
+ TIPC_CRITICAL_IMPORTANCE,
+ 0,
+ 0,
+ 0,
+ 0,
+ subscr_named_msg_event,
+ 0,
+ 0,
+ &topsrv.setup_port);
+ if (res)
+ goto failed;
+
+ res = nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
+ if (res)
+ goto failed;
+
+ spin_unlock_bh(&topsrv.lock);
+ return 0;
+
+failed:
+ err("Failed to create subscription service\n");
+ tipc_detach(topsrv.user_ref);
+ topsrv.user_ref = 0;
+ spin_unlock_bh(&topsrv.lock);
+ return res;
+}
+
+void subscr_stop(void)
+{
+ struct subscriber *subscriber;
+ struct subscriber *subscriber_temp;
+ spinlock_t *subscriber_lock;
+
+ if (topsrv.user_ref) {
+ tipc_deleteport(topsrv.setup_port);
+ list_for_each_entry_safe(subscriber, subscriber_temp,
+ &topsrv.subscriber_list,
+ subscriber_list) {
+ ref_lock(subscriber->ref);
+ subscriber_lock = subscriber->lock;
+ subscr_terminate(subscriber);
+ spin_unlock_bh(subscriber_lock);
+ }
+ tipc_detach(topsrv.user_ref);
+ topsrv.user_ref = 0;
+ }
+}
+
+
+int tipc_ispublished(struct tipc_name const *name)
+{
+ u32 domain = 0;
+
+ return(nametbl_translate(name->type, name->instance,&domain) != 0);
+}
+
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
new file mode 100644
index 00000000000..ccff4efcb75
--- /dev/null
+++ b/net/tipc/subscr.h
@@ -0,0 +1,80 @@
+/*
+ * net/tipc/subscr.h: Include file for TIPC subscription service
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_SUBSCR_H
+#define _TIPC_SUBSCR_H
+
+/**
+ * struct subscription - TIPC network topology subscription object
+ * @seq: name sequence associated with subscription
+ * @timeout: duration of subscription (in ms)
+ * @filter: event filtering to be done for subscription
+ * @evt: template for events generated by subscription
+ * @subscription_list: adjacent subscriptions in subscriber's subscription list
+ * @nameseq_list: adjacent subscriptions in name sequence's subscription list
+ * @timer_ref: reference to timer governing subscription duration (may be NULL)
+ * @owner: pointer to subscriber object associated with this subscription
+ */
+
+struct subscription {
+ struct tipc_name_seq seq;
+ u32 timeout;
+ u32 filter;
+ struct tipc_event evt;
+ struct list_head subscription_list;
+ struct list_head nameseq_list;
+ struct timer_list timer;
+ struct subscriber *owner;
+};
+
+int subscr_overlap(struct subscription * sub,
+ u32 found_lower,
+ u32 found_upper);
+
+void subscr_report_overlap(struct subscription * sub,
+ u32 found_lower,
+ u32 found_upper,
+ u32 event,
+ u32 port_ref,
+ u32 node,
+ int must_report);
+
+int subscr_start(void);
+
+void subscr_stop(void);
+
+
+#endif
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
new file mode 100644
index 00000000000..35ec7dc8211
--- /dev/null
+++ b/net/tipc/user_reg.c
@@ -0,0 +1,265 @@
+/*
+ * net/tipc/user_reg.c: TIPC user registry code
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "user_reg.h"
+
+/*
+ * TIPC user registry keeps track of users of the tipc_port interface.
+ *
+ * The registry utilizes an array of "TIPC user" entries;
+ * a user's ID is the index of their associated array entry.
+ * Array entry 0 is not used, so userid 0 is not valid;
+ * TIPC sometimes uses this value to denote an anonymous user.
+ * The list of free entries is initially chained from last entry to entry 1.
+ */
+
+/**
+ * struct tipc_user - registered TIPC user info
+ * @next: index of next free registry entry (or -1 for an allocated entry)
+ * @callback: ptr to routine to call when TIPC mode changes (NULL if none)
+ * @usr_handle: user-defined value passed to callback routine
+ * @ports: list of user ports owned by the user
+ */
+
+struct tipc_user {
+ int next;
+ tipc_mode_event callback;
+ void *usr_handle;
+ struct list_head ports;
+};
+
+#define MAX_USERID 64
+#define USER_LIST_SIZE ((MAX_USERID + 1) * sizeof(struct tipc_user))
+
+static struct tipc_user *users = 0;
+static u32 next_free_user = MAX_USERID + 1;
+static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED;
+
+/**
+ * reg_init - create TIPC user registry (but don't activate it)
+ *
+ * If registry has been pre-initialized it is left "as is".
+ * NOTE: This routine may be called when TIPC is inactive.
+ */
+
+static int reg_init(void)
+{
+ u32 i;
+
+ spin_lock_bh(&reg_lock);
+ if (!users) {
+ users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC);
+ if (users) {
+ memset(users, 0, USER_LIST_SIZE);
+ for (i = 1; i <= MAX_USERID; i++) {
+ users[i].next = i - 1;
+ }
+ next_free_user = MAX_USERID;
+ }
+ }
+ spin_unlock_bh(&reg_lock);
+ return users ? TIPC_OK : -ENOMEM;
+}
+
+/**
+ * reg_callback - inform TIPC user about current operating mode
+ */
+
+static void reg_callback(struct tipc_user *user_ptr)
+{
+ tipc_mode_event cb;
+ void *arg;
+
+ spin_lock_bh(&reg_lock);
+ cb = user_ptr->callback;
+ arg = user_ptr->usr_handle;
+ spin_unlock_bh(&reg_lock);
+
+ if (cb)
+ cb(arg, tipc_mode, tipc_own_addr);
+}
+
+/**
+ * reg_start - activate TIPC user registry
+ */
+
+int reg_start(void)
+{
+ u32 u;
+ int res;
+
+ if ((res = reg_init()))
+ return res;
+
+ for (u = 1; u <= MAX_USERID; u++) {
+ if (users[u].callback)
+ k_signal((Handler)reg_callback,
+ (unsigned long)&users[u]);
+ }
+ return TIPC_OK;
+}
+
+/**
+ * reg_stop - shut down & delete TIPC user registry
+ */
+
+void reg_stop(void)
+{
+ int id;
+
+ if (!users)
+ return;
+
+ for (id = 1; id <= MAX_USERID; id++) {
+ if (users[id].callback)
+ reg_callback(&users[id]);
+ }
+ kfree(users);
+ users = 0;
+}
+
+/**
+ * tipc_attach - register a TIPC user
+ *
+ * NOTE: This routine may be called when TIPC is inactive.
+ */
+
+int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
+{
+ struct tipc_user *user_ptr;
+
+ if ((tipc_mode == TIPC_NOT_RUNNING) && !cb)
+ return -ENOPROTOOPT;
+ if (!users)
+ reg_init();
+
+ spin_lock_bh(&reg_lock);
+ if (!next_free_user) {
+ spin_unlock_bh(&reg_lock);
+ return -EBUSY;
+ }
+ user_ptr = &users[next_free_user];
+ *userid = next_free_user;
+ next_free_user = user_ptr->next;
+ user_ptr->next = -1;
+ spin_unlock_bh(&reg_lock);
+
+ user_ptr->callback = cb;
+ user_ptr->usr_handle = usr_handle;
+ INIT_LIST_HEAD(&user_ptr->ports);
+ atomic_inc(&tipc_user_count);
+
+ if (cb && (tipc_mode != TIPC_NOT_RUNNING))
+ k_signal((Handler)reg_callback, (unsigned long)user_ptr);
+ return TIPC_OK;
+}
+
+/**
+ * tipc_detach - deregister a TIPC user
+ */
+
+void tipc_detach(u32 userid)
+{
+ struct tipc_user *user_ptr;
+ struct list_head ports_temp;
+ struct user_port *up_ptr, *temp_up_ptr;
+
+ if ((userid == 0) || (userid > MAX_USERID))
+ return;
+
+ spin_lock_bh(&reg_lock);
+ if ((!users) || (users[userid].next >= 0)) {
+ spin_unlock_bh(&reg_lock);
+ return;
+ }
+
+ user_ptr = &users[userid];
+ user_ptr->callback = NULL;
+ INIT_LIST_HEAD(&ports_temp);
+ list_splice(&user_ptr->ports, &ports_temp);
+ user_ptr->next = next_free_user;
+ next_free_user = userid;
+ spin_unlock_bh(&reg_lock);
+
+ atomic_dec(&tipc_user_count);
+
+ list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) {
+ tipc_deleteport(up_ptr->ref);
+ }
+}
+
+/**
+ * reg_add_port - register a user's driver port
+ */
+
+int reg_add_port(struct user_port *up_ptr)
+{
+ struct tipc_user *user_ptr;
+
+ if (up_ptr->user_ref == 0)
+ return TIPC_OK;
+ if (up_ptr->user_ref > MAX_USERID)
+ return -EINVAL;
+ if ((tipc_mode == TIPC_NOT_RUNNING) || !users )
+ return -ENOPROTOOPT;
+
+ spin_lock_bh(&reg_lock);
+ user_ptr = &users[up_ptr->user_ref];
+ list_add(&up_ptr->uport_list, &user_ptr->ports);
+ spin_unlock_bh(&reg_lock);
+ return TIPC_OK;
+}
+
+/**
+ * reg_remove_port - deregister a user's driver port
+ */
+
+int reg_remove_port(struct user_port *up_ptr)
+{
+ if (up_ptr->user_ref == 0)
+ return TIPC_OK;
+ if (up_ptr->user_ref > MAX_USERID)
+ return -EINVAL;
+ if (!users )
+ return -ENOPROTOOPT;
+
+ spin_lock_bh(&reg_lock);
+ list_del_init(&up_ptr->uport_list);
+ spin_unlock_bh(&reg_lock);
+ return TIPC_OK;
+}
+
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
new file mode 100644
index 00000000000..122ca9be367
--- /dev/null
+++ b/net/tipc/user_reg.h
@@ -0,0 +1,48 @@
+/*
+ * net/tipc/user_reg.h: Include file for TIPC user registry code
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_USER_REG_H
+#define _TIPC_USER_REG_H
+
+#include "port.h"
+
+int reg_start(void);
+void reg_stop(void);
+
+int reg_add_port(struct user_port *up_ptr);
+int reg_remove_port(struct user_port *up_ptr);
+
+#endif
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
new file mode 100644
index 00000000000..4eaef662d56
--- /dev/null
+++ b/net/tipc/zone.c
@@ -0,0 +1,169 @@
+/*
+ * net/tipc/zone.c: TIPC zone management routines
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "zone.h"
+#include "net.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "cluster.h"
+#include "node.h"
+
+struct _zone *zone_create(u32 addr)
+{
+ struct _zone *z_ptr = 0;
+ u32 z_num;
+
+ if (!addr_domain_valid(addr))
+ return 0;
+
+ z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
+ if (z_ptr != NULL) {
+ memset(z_ptr, 0, sizeof(*z_ptr));
+ z_num = tipc_zone(addr);
+ z_ptr->addr = tipc_addr(z_num, 0, 0);
+ net.zones[z_num] = z_ptr;
+ }
+ return z_ptr;
+}
+
+void zone_delete(struct _zone *z_ptr)
+{
+ u32 c_num;
+
+ if (!z_ptr)
+ return;
+ for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+ cluster_delete(z_ptr->clusters[c_num]);
+ }
+ kfree(z_ptr);
+}
+
+void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
+{
+ u32 c_num = tipc_cluster(c_ptr->addr);
+
+ assert(c_ptr->addr);
+ assert(c_num <= tipc_max_clusters);
+ assert(z_ptr->clusters[c_num] == 0);
+ z_ptr->clusters[c_num] = c_ptr;
+}
+
+void zone_remove_as_router(struct _zone *z_ptr, u32 router)
+{
+ u32 c_num;
+
+ for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+ if (z_ptr->clusters[c_num]) {
+ cluster_remove_as_router(z_ptr->clusters[c_num],
+ router);
+ }
+ }
+}
+
+void zone_send_external_routes(struct _zone *z_ptr, u32 dest)
+{
+ u32 c_num;
+
+ for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+ if (z_ptr->clusters[c_num]) {
+ if (in_own_cluster(z_ptr->addr))
+ continue;
+ cluster_send_ext_routes(z_ptr->clusters[c_num], dest);
+ }
+ }
+}
+
+struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
+{
+ struct cluster *c_ptr;
+ struct node *n_ptr;
+ u32 c_num;
+
+ if (!z_ptr)
+ return 0;
+ c_ptr = z_ptr->clusters[tipc_cluster(addr)];
+ if (!c_ptr)
+ return 0;
+ n_ptr = cluster_select_node(c_ptr, ref);
+ if (n_ptr)
+ return n_ptr;
+
+ /* Links to any other clusters within this zone ? */
+ for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+ c_ptr = z_ptr->clusters[c_num];
+ if (!c_ptr)
+ return 0;
+ n_ptr = cluster_select_node(c_ptr, ref);
+ if (n_ptr)
+ return n_ptr;
+ }
+ return 0;
+}
+
+u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
+{
+ struct cluster *c_ptr;
+ u32 c_num;
+ u32 router;
+
+ if (!z_ptr)
+ return 0;
+ c_ptr = z_ptr->clusters[tipc_cluster(addr)];
+ router = c_ptr ? cluster_select_router(c_ptr, ref) : 0;
+ if (router)
+ return router;
+
+ /* Links to any other clusters within the zone? */
+ for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+ c_ptr = z_ptr->clusters[c_num];
+ router = c_ptr ? cluster_select_router(c_ptr, ref) : 0;
+ if (router)
+ return router;
+ }
+ return 0;
+}
+
+
+u32 zone_next_node(u32 addr)
+{
+ struct cluster *c_ptr = cluster_find(addr);
+
+ if (c_ptr)
+ return cluster_next_node(c_ptr, addr);
+ return 0;
+}
+
diff --git a/net/tipc/zone.h b/net/tipc/zone.h
new file mode 100644
index 00000000000..4326f78d829
--- /dev/null
+++ b/net/tipc/zone.h
@@ -0,0 +1,71 @@
+/*
+ * net/tipc/zone.h: Include file for TIPC zone management routines
+ *
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_ZONE_H
+#define _TIPC_ZONE_H
+
+#include "node_subscr.h"
+#include "net.h"
+
+
+/**
+ * struct _zone - TIPC zone structure
+ * @addr: network address of zone
+ * @clusters: array of pointers to all clusters within zone
+ * @links: (used for inter-zone communication)
+ */
+
+struct _zone {
+ u32 addr;
+ struct cluster *clusters[2]; /* currently limited to just 1 cluster */
+ u32 links;
+};
+
+struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
+u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
+void zone_remove_as_router(struct _zone *z_ptr, u32 router);
+void zone_send_external_routes(struct _zone *z_ptr, u32 dest);
+struct _zone *zone_create(u32 addr);
+void zone_delete(struct _zone *z_ptr);
+void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
+u32 zone_next_node(u32 addr);
+
+static inline struct _zone *zone_find(u32 addr)
+{
+ return net.zones[tipc_zone(addr)];
+}
+
+#endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 5f6ae79b8b1..1b5989b1b67 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -784,7 +784,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
if (err)
goto out_mknod_dput;
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
dput(nd.dentry);
nd.dentry = dentry;
@@ -823,7 +823,7 @@ out:
out_mknod_dput:
dput(dentry);
out_mknod_unlock:
- up(&nd.dentry->d_inode->i_sem);
+ mutex_unlock(&nd.dentry->d_inode->i_mutex);
path_release(&nd);
out_mknod_parent:
if (err==-EEXIST)
diff --git a/net/wanrouter/af_wanpipe.c b/net/wanrouter/af_wanpipe.c
index 7a43ae4721e..8b9bf4a763b 100644
--- a/net/wanrouter/af_wanpipe.c
+++ b/net/wanrouter/af_wanpipe.c
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index bcf7b3faa76..c34833dc7cc 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -44,6 +44,7 @@
#include <linux/config.h>
#include <linux/stddef.h> /* offsetof(), etc. */
+#include <linux/capability.h>
#include <linux/errno.h> /* return codes */
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 16459c7f54b..72b6ff3299b 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -37,6 +37,7 @@
#include <linux/config.h>
#include <linux/module.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -540,12 +541,7 @@ static struct sock *x25_make_new(struct sock *osk)
sk->sk_state = TCP_ESTABLISHED;
sk->sk_sleep = osk->sk_sleep;
sk->sk_backlog_rcv = osk->sk_backlog_rcv;
-
- if (sock_flag(osk, SOCK_ZAPPED))
- sock_set_flag(sk, SOCK_ZAPPED);
-
- if (sock_flag(osk, SOCK_DBG))
- sock_set_flag(sk, SOCK_DBG);
+ sock_copy_flags(sk, osk);
ox25 = x25_sk(osk);
x25->t21 = ox25->t21;
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 2f4531fcaca..6ed3302312f 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -540,8 +540,7 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
start = end;
}
}
- if (len)
- BUG();
+ BUG_ON(len);
}
EXPORT_SYMBOL_GPL(skb_icv_walk);
@@ -610,8 +609,7 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
start = end;
}
}
- if (len)
- BUG();
+ BUG_ON(len);
return elt;
}
EXPORT_SYMBOL_GPL(skb_to_sgvec);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 64a447375fd..077bbf9fb9b 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -22,6 +22,7 @@
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
+#include <linux/netfilter.h>
#include <linux/module.h>
#include <net/xfrm.h>
#include <net/ip.h>
@@ -247,11 +248,9 @@ EXPORT_SYMBOL(xfrm_policy_alloc);
void __xfrm_policy_destroy(struct xfrm_policy *policy)
{
- if (!policy->dead)
- BUG();
+ BUG_ON(!policy->dead);
- if (policy->bundles)
- BUG();
+ BUG_ON(policy->bundles);
if (del_timer(&policy->timer))
BUG();
@@ -951,8 +950,8 @@ xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
return start;
}
-static int
-_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
+int
+xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
{
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
@@ -963,6 +962,7 @@ _decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family)
xfrm_policy_put_afinfo(afinfo);
return 0;
}
+EXPORT_SYMBOL(xfrm_decode_session);
static inline int secpath_has_tunnel(struct sec_path *sp, int k)
{
@@ -982,8 +982,9 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
u8 fl_dir = policy_to_flow_dir(dir);
u32 sk_sid;
- if (_decode_session(skb, &fl, family) < 0)
+ if (xfrm_decode_session(skb, &fl, family) < 0)
return 0;
+ nf_nat_decode_session(skb, &fl, family);
sk_sid = security_sk_sid(sk, &fl, fl_dir);
@@ -1055,7 +1056,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
{
struct flowi fl;
- if (_decode_session(skb, &fl, family) < 0)
+ if (xfrm_decode_session(skb, &fl, family) < 0)
return 0;
return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 92e2b804c60..ac87a09ba83 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -802,6 +802,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfr
excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
err = xfrm_policy_insert(p->dir, xp, excl);
if (err) {
+ security_xfrm_policy_free(xp);
kfree(xp);
return err;
}
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index db3c708e546..0168d6c3707 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -3,6 +3,7 @@
# Convinient variables
comma := ,
+squote := '
empty :=
space := $(empty) $(empty)
@@ -12,6 +13,10 @@ space := $(empty) $(empty)
depfile = $(subst $(comma),_,$(@D)/.$(@F).d)
###
+# Escape single quote for use in echo statements
+escsq = $(subst $(squote),'\$(squote)',$1)
+
+###
# filechk is used to check if the content of a generated file is updated.
# Sample usage:
# define filechk_sample
@@ -47,7 +52,7 @@ build := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.build obj
# If quiet is set, only print short version of command
cmd = @$(if $($(quiet)cmd_$(1)),\
- echo ' $(subst ','\'',$($(quiet)cmd_$(1)))' &&) $(cmd_$(1))
+ echo ' $(call escsq,$($(quiet)cmd_$(1)))' &&) $(cmd_$(1))
# Add $(obj)/ for paths that is not absolute
objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
@@ -68,7 +73,7 @@ endif
# echo command. Short version is $(quiet) equals quiet, otherwise full command
echo-cmd = $(if $($(quiet)cmd_$(1)), \
- echo ' $(subst ','\'',$($(quiet)cmd_$(1)))';)
+ echo ' $(call escsq,$($(quiet)cmd_$(1)))';)
# function to only execute the passed command if necessary
# >'< substitution is for echo to work, >$< substitution to preserve $ when reloading .cmd file
@@ -78,7 +83,7 @@ if_changed = $(if $(strip $? $(call arg-check, $(cmd_$(1)), $(cmd_$@)) ), \
@set -e; \
$(echo-cmd) \
$(cmd_$(1)); \
- echo 'cmd_$@ := $(subst $$,$$$$,$(subst ','\'',$(cmd_$(1))))' > $(@D)/.$(@F).cmd)
+ echo 'cmd_$@ := $(subst $$,$$$$,$(call escsq,$(cmd_$(1))))' > $(@D)/.$(@F).cmd)
# execute the command and also postprocess generated .d dependencies
# file
@@ -87,7 +92,7 @@ if_changed_dep = $(if $(strip $? $(filter-out FORCE $(wildcard $^),$^)\
@set -e; \
$(echo-cmd) \
$(cmd_$(1)); \
- scripts/basic/fixdep $(depfile) $@ '$(subst $$,$$$$,$(subst ','\'',$(cmd_$(1))))' > $(@D)/.$(@F).tmp; \
+ scripts/basic/fixdep $(depfile) $@ '$(subst $$,$$$$,$(call escsq,$(cmd_$(1))))' > $(@D)/.$(@F).tmp; \
rm -f $(depfile); \
mv -f $(@D)/.$(@F).tmp $(@D)/.$(@F).cmd)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 506e3f3befe..c33e62bde6b 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -179,10 +179,10 @@ endif
define rule_cc_o_c
$(if $($(quiet)cmd_checksrc),echo ' $($(quiet)cmd_checksrc)';) \
$(cmd_checksrc) \
- $(if $($(quiet)cmd_cc_o_c),echo ' $(subst ','\'',$($(quiet)cmd_cc_o_c))';) \
+ $(if $($(quiet)cmd_cc_o_c),echo ' $(call escsq,$($(quiet)cmd_cc_o_c))';) \
$(cmd_cc_o_c); \
$(cmd_modversions) \
- scripts/basic/fixdep $(depfile) $@ '$(subst ','\'',$(cmd_cc_o_c))' > $(@D)/.$(@F).tmp; \
+ scripts/basic/fixdep $(depfile) $@ '$(call escsq,$(cmd_cc_o_c))' > $(@D)/.$(@F).tmp; \
rm -f $(depfile); \
mv -f $(@D)/.$(@F).tmp $(@D)/.$(@F).cmd
endef
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 0c4f3a9f2ea..bf96a61d4b8 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -30,7 +30,7 @@
# - See include/linux/module.h for more details
# Step 4 is solely used to allow module versioning in external modules,
-# where the CRC of each module is retreived from the Module.symers file.
+# where the CRC of each module is retrieved from the Module.symers file.
.PHONY: _modpost
_modpost: __modpost
diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
new file mode 100644
index 00000000000..75f21d843c1
--- /dev/null
+++ b/scripts/bloat-o-meter
@@ -0,0 +1,58 @@
+#!/usr/bin/python
+#
+# Copyright 2004 Matt Mackall <mpm@selenic.com>
+#
+# inspired by perl Bloat-O-Meter (c) 1997 by Andi Kleen
+#
+# This software may be used and distributed according to the terms
+# of the GNU General Public License, incorporated herein by reference.
+
+import sys, os, re
+
+if len(sys.argv) != 3:
+ sys.stderr.write("usage: %s file1 file2\n" % sys.argv[0])
+ sys.exit(-1)
+
+def getsizes(file):
+ sym = {}
+ for l in os.popen("nm --size-sort " + file).readlines():
+ size, type, name = l[:-1].split()
+ if type in "tTdDbB":
+ sym[name] = int(size, 16)
+ return sym
+
+old = getsizes(sys.argv[1])
+new = getsizes(sys.argv[2])
+grow, shrink, add, remove, up, down = 0, 0, 0, 0, 0, 0
+delta, common = [], {}
+
+for a in old:
+ if a in new:
+ common[a] = 1
+
+for name in old:
+ if name not in common:
+ remove += 1
+ down += old[name]
+ delta.append((-old[name], name))
+
+for name in new:
+ if name not in common:
+ add += 1
+ up += new[name]
+ delta.append((new[name], name))
+
+for name in common:
+ d = new.get(name, 0) - old.get(name, 0)
+ if d>0: grow, up = grow+1, up+d
+ if d<0: shrink, down = shrink+1, down-d
+ delta.append((d, name))
+
+delta.sort()
+delta.reverse()
+
+print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
+ (add, remove, grow, shrink, up, -down, up-down)
+print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")
+for d, n in delta:
+ if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 3d7f1ac9e00..5760e057ecb 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -133,8 +133,8 @@ HOSTCFLAGS_zconf.tab.o := -I$(src)
HOSTLOADLIBES_qconf = $(KC_QT_LIBS) -ldl
HOSTCXXFLAGS_qconf.o = $(KC_QT_CFLAGS) -D LKC_DIRECT_LINK
-HOSTLOADLIBES_gconf = `pkg-config gtk+-2.0 gmodule-2.0 libglade-2.0 --libs`
-HOSTCFLAGS_gconf.o = `pkg-config gtk+-2.0 gmodule-2.0 libglade-2.0 --cflags` \
+HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0`
+HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \
-D LKC_DIRECT_LINK
$(obj)/qconf.o: $(obj)/.tmp_qtcheck
@@ -193,8 +193,8 @@ ifeq ($(gconf-target),1)
# GTK needs some extra effort, too...
$(obj)/.tmp_gtkcheck:
- @if `pkg-config gtk+-2.0 gmodule-2.0 libglade-2.0 --exists`; then \
- if `pkg-config gtk+-2.0 --atleast-version=2.0.0`; then \
+ @if `pkg-config --exists gtk+-2.0 gmodule-2.0 libglade-2.0`; then \
+ if `pkg-config --atleast-version=2.0.0 gtk+-2.0`; then \
touch $@; \
else \
echo "*"; \
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 8ba5d29d3d4..10eeae53d82 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -63,6 +63,20 @@ static void check_stdin(void)
}
}
+static char *fgets_check_stream(char *s, int size, FILE *stream)
+{
+ char *ret = fgets(s, size, stream);
+
+ if (ret == NULL && feof(stream)) {
+ printf(_("aborted!\n\n"));
+ printf(_("Console input is closed. "));
+ printf(_("Run 'make oldconfig' to update configuration.\n\n"));
+ exit(1);
+ }
+
+ return ret;
+}
+
static void conf_askvalue(struct symbol *sym, const char *def)
{
enum symbol_type type = sym_get_type(sym);
@@ -100,7 +114,7 @@ static void conf_askvalue(struct symbol *sym, const char *def)
check_stdin();
case ask_all:
fflush(stdout);
- fgets(line, 128, stdin);
+ fgets_check_stream(line, 128, stdin);
return;
case set_default:
printf("%s\n", def);
@@ -356,7 +370,7 @@ static int conf_choice(struct menu *menu)
check_stdin();
case ask_all:
fflush(stdout);
- fgets(line, 128, stdin);
+ fgets_check_stream(line, 128, stdin);
strip(line);
if (line[0] == '?') {
printf("\n%s\n", menu->sym->help ?
diff --git a/scripts/kconfig/lxdialog/Makefile b/scripts/kconfig/lxdialog/Makefile
index a45a13fb26e..8f41d9a57aa 100644
--- a/scripts/kconfig/lxdialog/Makefile
+++ b/scripts/kconfig/lxdialog/Makefile
@@ -1,42 +1,18 @@
-HOST_EXTRACFLAGS := -DLOCALE
-ifeq ($(shell uname),SunOS)
-HOST_LOADLIBES := -lcurses
-else
-HOST_LOADLIBES := -lncurses
-endif
+# Makefile to build lxdialog package
+#
-ifeq (/usr/include/ncurses/ncurses.h, $(wildcard /usr/include/ncurses/ncurses.h))
- HOST_EXTRACFLAGS += -I/usr/include/ncurses -DCURSES_LOC="<ncurses.h>"
-else
-ifeq (/usr/include/ncurses/curses.h, $(wildcard /usr/include/ncurses/curses.h))
- HOST_EXTRACFLAGS += -I/usr/include/ncurses -DCURSES_LOC="<ncurses/curses.h>"
-else
-ifeq (/usr/include/ncurses.h, $(wildcard /usr/include/ncurses.h))
- HOST_EXTRACFLAGS += -DCURSES_LOC="<ncurses.h>"
-else
- HOST_EXTRACFLAGS += -DCURSES_LOC="<curses.h>"
-endif
-endif
-endif
+check-lxdialog := $(srctree)/$(src)/check-lxdialog.sh
+HOST_EXTRACFLAGS := $(shell $(CONFIG_SHELL) $(check-lxdialog) -ccflags)
+HOST_LOADLIBES := $(shell $(CONFIG_SHELL) $(check-lxdialog) -ldflags)
+
+HOST_EXTRACFLAGS += -DLOCALE
+
+.PHONY: dochecklxdialog
+$(obj)/dochecklxdialog:
+ $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_LOADLIBES)
hostprogs-y := lxdialog
-always := ncurses $(hostprogs-y)
+always := $(hostprogs-y) dochecklxdialog
lxdialog-objs := checklist.o menubox.o textbox.o yesno.o inputbox.o \
util.o lxdialog.o msgbox.o
-
-.PHONY: $(obj)/ncurses
-$(obj)/ncurses:
- @echo "main() {}" > lxtemp.c
- @if $(HOSTCC) lxtemp.c $(HOST_LOADLIBES); then \
- rm -f lxtemp.c a.out; \
- else \
- rm -f lxtemp.c; \
- echo -e "\007" ;\
- echo ">> Unable to find the Ncurses libraries." ;\
- echo ">>" ;\
- echo ">> You must install ncurses-devel in order" ;\
- echo ">> to use 'make menuconfig'" ;\
- echo ;\
- exit 1 ;\
- fi
diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh
new file mode 100644
index 00000000000..a3c141b4967
--- /dev/null
+++ b/scripts/kconfig/lxdialog/check-lxdialog.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# Check ncurses compatibility
+
+# What library to link
+ldflags()
+{
+ if [ `uname` == SunOS ]; then
+ echo '-lcurses'
+ else
+ echo '-lncurses'
+ fi
+}
+
+# Where is ncurses.h?
+ccflags()
+{
+ if [ -f /usr/include/ncurses/ncurses.h ]; then
+ echo '-I/usr/include/ncurses -DCURSES_LOC="<ncurses.h>"'
+ elif [ -f /usr/include/ncurses/curses.h ]; then
+ echo '-I/usr/include/ncurses -DCURSES_LOC="<ncurses/curses.h>"'
+ elif [ -f /usr/include/ncurses.h ]; then
+ echo '-DCURSES_LOC="<ncurses.h>"'
+ else
+ echo '-DCURSES_LOC="<curses.h>"'
+ fi
+}
+
+compiler=""
+# Check if we can link to ncurses
+check() {
+ echo "main() {}" | $compiler -xc -
+ if [ $? != 0 ]; then
+ echo " *** Unable to find the ncurses libraries." 1>&2
+ echo " *** make menuconfig require the ncurses libraries" 1>&2
+ echo " *** " 1>&2
+ echo " *** Install ncurses (ncurses-devel) and try again" 1>&2
+ echo " *** " 1>&2
+ exit 1
+ fi
+}
+
+usage() {
+ printf "Usage: $0 [-check compiler options|-header|-library]\n"
+}
+
+if [ $# == 0 ]; then
+ usage
+ exit 1
+fi
+
+case "$1" in
+ "-check")
+ shift
+ compiler="$@"
+ check
+ ;;
+ "-ccflags")
+ ccflags
+ ;;
+ "-ldflags")
+ ldflags
+ ;;
+ "*")
+ usage
+ exit 1
+ ;;
+esac
diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h
index 7c03927d4c7..e52f3e90bf0 100644
--- a/scripts/kconfig/qconf.h
+++ b/scripts/kconfig/qconf.h
@@ -22,8 +22,8 @@ public:
#if QT_VERSION >= 300
void readListSettings();
- QValueList<int> ConfigSettings::readSizes(const QString& key, bool *ok);
- bool ConfigSettings::writeSizes(const QString& key, const QValueList<int>& value);
+ QValueList<int> readSizes(const QString& key, bool *ok);
+ bool writeSizes(const QString& key, const QValueList<int>& value);
#endif
bool showAll;
@@ -124,7 +124,7 @@ public:
void setParentMenu(void);
template <class P>
- void ConfigList::updateMenuList(P*, struct menu*);
+ void updateMenuList(P*, struct menu*);
bool updateAll;
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 2f45fd2969d..9fd5f5b87d1 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1405,6 +1405,7 @@ sub create_parameterlist($$$) {
my $type;
my $param;
+ # temporarily replace commas inside function pointer definition
while ($args =~ /(\([^\),]+),/) {
$args =~ s/(\([^\),]+),/$1#/g;
}
@@ -1465,11 +1466,10 @@ sub push_parameter($$$) {
my $param_name = $param;
$param_name =~ s/\[.*//;
- if ($type eq "" && $param eq "...")
+ if ($type eq "" && $param =~ /\.\.\.$/)
{
$type="";
- $param="...";
- $parameterdescs{"..."} = "variable arguments";
+ $parameterdescs{$param} = "variable arguments";
}
elsif ($type eq "" && ($param eq "" or $param eq "void"))
{
@@ -1477,7 +1477,11 @@ sub push_parameter($$$) {
$param="void";
$parameterdescs{void} = "no arguments";
}
- if (defined $type && $type && !defined $parameterdescs{$param_name}) {
+ # warn if parameter has no description
+ # (but ignore ones starting with # as these are no parameters
+ # but inline preprocessor statements
+ if (!defined $parameterdescs{$param_name} && $param_name !~ /^#/) {
+
$parameterdescs{$param_name} = $undescribed;
if (($type eq 'function') || ($type eq 'enum')) {
diff --git a/scripts/mksysmap b/scripts/mksysmap
index a6430e05972..4390fab9f5b 100644
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -1,7 +1,7 @@
#!/bin/sh -x
# Based on the vmlinux file create the System.map file
# System.map is used by module-init tools and some debugging
-# tools to retreive the actual addresses of symbols in the kernel.
+# tools to retrieve the actual addresses of symbols in the kernel.
#
# Usage
# mksysmap vmlinux System.map
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index e0eedffe565..be97caf664b 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -417,7 +417,7 @@ static int do_input_entry(const char *filename, struct input_device_id *id,
do_input(alias, id->sndbit, 0, SND_MAX);
sprintf(alias + strlen(alias), "f*");
if (id->flags&INPUT_DEVICE_ID_MATCH_FFBIT)
- do_input(alias, id->ffbit, 0, SND_MAX);
+ do_input(alias, id->ffbit, 0, FF_MAX);
sprintf(alias + strlen(alias), "w*");
if (id->flags&INPUT_DEVICE_ID_MATCH_SWBIT)
do_input(alias, id->swbit, 0, SW_MAX);
diff --git a/scripts/reference_discarded.pl b/scripts/reference_discarded.pl
index c2d54148a91..4ee6ab2135b 100644
--- a/scripts/reference_discarded.pl
+++ b/scripts/reference_discarded.pl
@@ -71,6 +71,11 @@ foreach $object (keys(%object)) {
# printf("ignoring %d conglomerate(s)\n", $ignore);
# printf("Scanning objects\n");
+
+# Keith Ownes <kaos@sgi.com> commented:
+# For our future {in}sanity, add a comment that this is the ppc .opd
+# section, not the ia64 .opd section.
+# ia64 .opd should not point to discarded sections.
$errorcount = 0;
foreach $object (keys(%object)) {
my $from;
@@ -88,6 +93,7 @@ foreach $object (keys(%object)) {
($from !~ /\.text\.exit$/ &&
$from !~ /\.exit\.text$/ &&
$from !~ /\.data\.exit$/ &&
+ $from !~ /\.opd$/ &&
$from !~ /\.exit\.data$/ &&
$from !~ /\.altinstructions$/ &&
$from !~ /\.pdr$/ &&
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 7c805c8fccd..9a23825218f 100644
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -1,56 +1,22 @@
-#!/usr/bin/perl
-# Copyright 2004 - Ryan Anderson <ryan@michonline.com> GPL v2
+#!/bin/sh
+# Print additional version information for non-release trees.
-use strict;
-use warnings;
-use Digest::MD5;
-require 5.006;
-
-if (@ARGV != 1) {
- print <<EOT;
-Usage: setlocalversion <srctree>
-EOT
- exit(1);
+usage() {
+ echo "Usage: $0 [srctree]" >&2
+ exit 1
}
-my ($srctree) = @ARGV;
-chdir($srctree);
-
-my @LOCALVERSIONS = ();
-
-# We are going to use the following commands to try and determine if this
-# repository is at a Version boundary (i.e, 2.6.10 vs 2.6.10 + some patches) We
-# currently assume that all meaningful version boundaries are marked by a tag.
-# We don't care what the tag is, just that something exists.
-
-# Git/Cogito store the top-of-tree "commit" in .git/HEAD
-# A list of known tags sits in .git/refs/tags/
-#
-# The simple trick here is to just compare the two of these, and if we get a
-# match, return nothing, otherwise, return a subset of the SHA-1 hash in
-# .git/HEAD
-
-sub do_git_checks {
- open(H,"<.git/HEAD") or return;
- my $head = <H>;
- chomp $head;
- close(H);
+cd "${1:-.}" || usage
- opendir(D,".git/refs/tags") or return;
- foreach my $tagfile (grep !/^\.{1,2}$/, readdir(D)) {
- open(F,"<.git/refs/tags/" . $tagfile) or return;
- my $tag = <F>;
- chomp $tag;
- close(F);
- return if ($tag eq $head);
- }
- closedir(D);
-
- push @LOCALVERSIONS, "g" . substr($head,0,8);
-}
-
-if ( -d ".git") {
- do_git_checks();
-}
+# Check for git and a git repo.
+if head=`git rev-parse --verify HEAD 2>/dev/null`; then
+ # Do we have an untagged version?
+ if [ "`git name-rev --tags HEAD`" = "HEAD undefined" ]; then
+ printf '%s%s' -g `echo "$head" | cut -c1-8`
+ fi
-printf "-%s\n", join("-",@LOCALVERSIONS) if (scalar @LOCALVERSIONS > 0);
+ # Are there uncommitted changes?
+ if git diff-files | read dummy; then
+ printf '%s' -dirty
+ fi
+fi
diff --git a/security/capability.c b/security/capability.c
index ec18d607562..f9b35cc0b24 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -49,8 +49,6 @@ static struct security_operations capability_ops = {
.vm_enough_memory = cap_vm_enough_memory,
};
-#define MY_NAME __stringify(KBUILD_MODNAME)
-
/* flag to keep track of how we were registered */
static int secondary;
@@ -67,7 +65,7 @@ static int __init capability_init (void)
/* register ourselves with the security framework */
if (register_security (&capability_ops)) {
/* try registering with primary module */
- if (mod_reg_security (MY_NAME, &capability_ops)) {
+ if (mod_reg_security (KBUILD_MODNAME, &capability_ops)) {
printk (KERN_INFO "Failure registering capabilities "
"with primary security module.\n");
return -EINVAL;
@@ -85,7 +83,7 @@ static void __exit capability_exit (void)
return;
/* remove ourselves from the security framework */
if (secondary) {
- if (mod_unreg_security (MY_NAME, &capability_ops))
+ if (mod_unreg_security (KBUILD_MODNAME, &capability_ops))
printk (KERN_INFO "Failure unregistering capabilities "
"with primary module.\n");
return;
diff --git a/security/commoncap.c b/security/commoncap.c
index 04c12f58d65..8a6e097f99e 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -7,6 +7,7 @@
*
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/security/dummy.c b/security/dummy.c
index a15c54709fd..f1a5bd98bf1 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -14,6 +14,7 @@
#undef DEBUG
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/security/inode.c b/security/inode.c
index a5964502ae3..0f77b022366 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -172,7 +172,7 @@ static int create_by_name(const char *name, mode_t mode,
return -EFAULT;
}
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(dentry)) {
if ((mode & S_IFMT) == S_IFDIR)
@@ -181,7 +181,7 @@ static int create_by_name(const char *name, mode_t mode,
error = create(parent->d_inode, *dentry, mode);
} else
error = PTR_ERR(dentry);
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
return error;
}
@@ -302,7 +302,7 @@ void securityfs_remove(struct dentry *dentry)
if (!parent || !parent->d_inode)
return;
- down(&parent->d_inode->i_sem);
+ mutex_lock(&parent->d_inode->i_mutex);
if (positive(dentry)) {
if (dentry->d_inode) {
if (S_ISDIR(dentry->d_inode->i_mode))
@@ -312,7 +312,7 @@ void securityfs_remove(struct dentry *dentry)
dput(dentry);
}
}
- up(&parent->d_inode->i_sem);
+ mutex_unlock(&parent->d_inode->i_mutex);
simple_release_fs(&mount, &mount_count);
}
EXPORT_SYMBOL_GPL(securityfs_remove);
diff --git a/security/keys/compat.c b/security/keys/compat.c
index 3303673c636..bcdb2853373 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -74,6 +74,12 @@ asmlinkage long compat_sys_keyctl(u32 option,
case KEYCTL_SET_REQKEY_KEYRING:
return keyctl_set_reqkey_keyring(arg2);
+ case KEYCTL_SET_TIMEOUT:
+ return keyctl_set_timeout(arg2, arg3);
+
+ case KEYCTL_ASSUME_AUTHORITY:
+ return keyctl_assume_authority(arg2);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index db99ed434f3..e066e605795 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -25,7 +25,6 @@
#define kdebug(FMT, a...) do {} while(0)
#endif
-extern struct key_type key_type_dead;
extern struct key_type key_type_user;
/*****************************************************************************/
@@ -108,12 +107,13 @@ extern struct key *request_key_and_link(struct key_type *type,
struct request_key_auth {
struct key *target_key;
struct task_struct *context;
+ const char *callout_info;
pid_t pid;
};
extern struct key_type key_type_request_key_auth;
extern struct key *request_key_auth_new(struct key *target,
- struct key **_rkakey);
+ const char *callout_info);
extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
@@ -137,6 +137,8 @@ extern long keyctl_instantiate_key(key_serial_t, const void __user *,
size_t, key_serial_t);
extern long keyctl_negate_key(key_serial_t, unsigned, key_serial_t);
extern long keyctl_set_reqkey_keyring(int);
+extern long keyctl_set_timeout(key_serial_t, unsigned);
+extern long keyctl_assume_authority(key_serial_t);
/*
diff --git a/security/keys/key.c b/security/keys/key.c
index 01bcfecb7ea..99781b79831 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -36,7 +36,7 @@ static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
DECLARE_RWSEM(key_construction_sem);
/* any key who's type gets unegistered will be re-typed to this */
-struct key_type key_type_dead = {
+static struct key_type key_type_dead = {
.name = "dead",
};
@@ -240,9 +240,9 @@ static inline void key_alloc_serial(struct key *key)
/*
* allocate a key of the specified type
* - update the user's quota to reflect the existence of the key
- * - called from a key-type operation with key_types_sem read-locked by either
- * key_create_or_update() or by key_duplicate(); this prevents unregistration
- * of the key type
+ * - called from a key-type operation with key_types_sem read-locked by
+ * key_create_or_update()
+ * - this prevents unregistration of the key type
* - upon return the key is as yet uninstantiated; the caller needs to either
* instantiate the key or discard it before returning
*/
@@ -889,56 +889,6 @@ EXPORT_SYMBOL(key_update);
/*****************************************************************************/
/*
- * duplicate a key, potentially with a revised description
- * - must be supported by the keytype (keyrings for instance can be duplicated)
- */
-struct key *key_duplicate(struct key *source, const char *desc)
-{
- struct key *key;
- int ret;
-
- key_check(source);
-
- if (!desc)
- desc = source->description;
-
- down_read(&key_types_sem);
-
- ret = -EINVAL;
- if (!source->type->duplicate)
- goto error;
-
- /* allocate and instantiate a key */
- key = key_alloc(source->type, desc, current->fsuid, current->fsgid,
- source->perm, 0);
- if (IS_ERR(key))
- goto error_k;
-
- down_read(&source->sem);
- ret = key->type->duplicate(key, source);
- up_read(&source->sem);
- if (ret < 0)
- goto error2;
-
- atomic_inc(&key->user->nikeys);
- set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
-
- error_k:
- up_read(&key_types_sem);
- out:
- return key;
-
- error2:
- key_put(key);
- error:
- up_read(&key_types_sem);
- key = ERR_PTR(ret);
- goto out;
-
-} /* end key_duplicate() */
-
-/*****************************************************************************/
-/*
* revoke a key
*/
void key_revoke(struct key *key)
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index b7a468fabdf..90db5c76cf6 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -16,6 +16,7 @@
#include <linux/syscalls.h>
#include <linux/keyctl.h>
#include <linux/fs.h>
+#include <linux/capability.h>
#include <linux/err.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -834,6 +835,17 @@ long keyctl_instantiate_key(key_serial_t id,
if (plen > 32767)
goto error;
+ /* the appropriate instantiation authorisation key must have been
+ * assumed before calling this */
+ ret = -EPERM;
+ instkey = current->request_key_auth;
+ if (!instkey)
+ goto error;
+
+ rka = instkey->payload.data;
+ if (rka->target_key->serial != id)
+ goto error;
+
/* pull the payload in if one was supplied */
payload = NULL;
@@ -848,15 +860,6 @@ long keyctl_instantiate_key(key_serial_t id,
goto error2;
}
- /* find the instantiation authorisation key */
- instkey = key_get_instantiation_authkey(id);
- if (IS_ERR(instkey)) {
- ret = PTR_ERR(instkey);
- goto error2;
- }
-
- rka = instkey->payload.data;
-
/* find the destination keyring amongst those belonging to the
* requesting task */
keyring_ref = NULL;
@@ -865,7 +868,7 @@ long keyctl_instantiate_key(key_serial_t id,
KEY_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
- goto error3;
+ goto error2;
}
}
@@ -874,11 +877,17 @@ long keyctl_instantiate_key(key_serial_t id,
key_ref_to_ptr(keyring_ref), instkey);
key_ref_put(keyring_ref);
- error3:
- key_put(instkey);
- error2:
+
+ /* discard the assumed authority if it's just been disabled by
+ * instantiation of the key */
+ if (ret == 0) {
+ key_put(current->request_key_auth);
+ current->request_key_auth = NULL;
+ }
+
+error2:
kfree(payload);
- error:
+error:
return ret;
} /* end keyctl_instantiate_key() */
@@ -895,14 +904,16 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
key_ref_t keyring_ref;
long ret;
- /* find the instantiation authorisation key */
- instkey = key_get_instantiation_authkey(id);
- if (IS_ERR(instkey)) {
- ret = PTR_ERR(instkey);
+ /* the appropriate instantiation authorisation key must have been
+ * assumed before calling this */
+ ret = -EPERM;
+ instkey = current->request_key_auth;
+ if (!instkey)
goto error;
- }
rka = instkey->payload.data;
+ if (rka->target_key->serial != id)
+ goto error;
/* find the destination keyring if present (which must also be
* writable) */
@@ -911,7 +922,7 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
- goto error2;
+ goto error;
}
}
@@ -920,9 +931,15 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
key_ref_to_ptr(keyring_ref), instkey);
key_ref_put(keyring_ref);
- error2:
- key_put(instkey);
- error:
+
+ /* discard the assumed authority if it's just been disabled by
+ * instantiation of the key */
+ if (ret == 0) {
+ key_put(current->request_key_auth);
+ current->request_key_auth = NULL;
+ }
+
+error:
return ret;
} /* end keyctl_negate_key() */
@@ -967,6 +984,88 @@ long keyctl_set_reqkey_keyring(int reqkey_defl)
/*****************************************************************************/
/*
+ * set or clear the timeout for a key
+ */
+long keyctl_set_timeout(key_serial_t id, unsigned timeout)
+{
+ struct timespec now;
+ struct key *key;
+ key_ref_t key_ref;
+ time_t expiry;
+ long ret;
+
+ key_ref = lookup_user_key(NULL, id, 1, 1, KEY_SETATTR);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ /* make the changes with the locks held to prevent races */
+ down_write(&key->sem);
+
+ expiry = 0;
+ if (timeout > 0) {
+ now = current_kernel_time();
+ expiry = now.tv_sec + timeout;
+ }
+
+ key->expiry = expiry;
+
+ up_write(&key->sem);
+ key_put(key);
+
+ ret = 0;
+error:
+ return ret;
+
+} /* end keyctl_set_timeout() */
+
+/*****************************************************************************/
+/*
+ * assume the authority to instantiate the specified key
+ */
+long keyctl_assume_authority(key_serial_t id)
+{
+ struct key *authkey;
+ long ret;
+
+ /* special key IDs aren't permitted */
+ ret = -EINVAL;
+ if (id < 0)
+ goto error;
+
+ /* we divest ourselves of authority if given an ID of 0 */
+ if (id == 0) {
+ key_put(current->request_key_auth);
+ current->request_key_auth = NULL;
+ ret = 0;
+ goto error;
+ }
+
+ /* attempt to assume the authority temporarily granted to us whilst we
+ * instantiate the specified key
+ * - the authorisation key must be in the current task's keyrings
+ * somewhere
+ */
+ authkey = key_get_instantiation_authkey(id);
+ if (IS_ERR(authkey)) {
+ ret = PTR_ERR(authkey);
+ goto error;
+ }
+
+ key_put(current->request_key_auth);
+ current->request_key_auth = authkey;
+ ret = authkey->serial;
+
+error:
+ return ret;
+
+} /* end keyctl_assume_authority() */
+
+/*****************************************************************************/
+/*
* the key control system call
*/
asmlinkage long sys_keyctl(int option, unsigned long arg2, unsigned long arg3,
@@ -1038,6 +1137,13 @@ asmlinkage long sys_keyctl(int option, unsigned long arg2, unsigned long arg3,
case KEYCTL_SET_REQKEY_KEYRING:
return keyctl_set_reqkey_keyring(arg2);
+ case KEYCTL_SET_TIMEOUT:
+ return keyctl_set_timeout((key_serial_t) arg2,
+ (unsigned) arg3);
+
+ case KEYCTL_ASSUME_AUTHORITY:
+ return keyctl_assume_authority((key_serial_t) arg2);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 4e9fa8be44b..d65a180f888 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -48,7 +48,6 @@ static inline unsigned keyring_hash(const char *desc)
*/
static int keyring_instantiate(struct key *keyring,
const void *data, size_t datalen);
-static int keyring_duplicate(struct key *keyring, const struct key *source);
static int keyring_match(const struct key *keyring, const void *criterion);
static void keyring_destroy(struct key *keyring);
static void keyring_describe(const struct key *keyring, struct seq_file *m);
@@ -59,7 +58,6 @@ struct key_type key_type_keyring = {
.name = "keyring",
.def_datalen = sizeof(struct keyring_list),
.instantiate = keyring_instantiate,
- .duplicate = keyring_duplicate,
.match = keyring_match,
.destroy = keyring_destroy,
.describe = keyring_describe,
@@ -70,7 +68,7 @@ struct key_type key_type_keyring = {
* semaphore to serialise link/link calls to prevent two link calls in parallel
* introducing a cycle
*/
-DECLARE_RWSEM(keyring_serialise_link_sem);
+static DECLARE_RWSEM(keyring_serialise_link_sem);
/*****************************************************************************/
/*
@@ -120,68 +118,6 @@ static int keyring_instantiate(struct key *keyring,
/*****************************************************************************/
/*
- * duplicate the list of subscribed keys from a source keyring into this one
- */
-static int keyring_duplicate(struct key *keyring, const struct key *source)
-{
- struct keyring_list *sklist, *klist;
- unsigned max;
- size_t size;
- int loop, ret;
-
- const unsigned limit =
- (PAGE_SIZE - sizeof(*klist)) / sizeof(struct key *);
-
- ret = 0;
-
- /* find out how many keys are currently linked */
- rcu_read_lock();
- sklist = rcu_dereference(source->payload.subscriptions);
- max = 0;
- if (sklist)
- max = sklist->nkeys;
- rcu_read_unlock();
-
- /* allocate a new payload and stuff load with key links */
- if (max > 0) {
- BUG_ON(max > limit);
-
- max = (max + 3) & ~3;
- if (max > limit)
- max = limit;
-
- ret = -ENOMEM;
- size = sizeof(*klist) + sizeof(struct key *) * max;
- klist = kmalloc(size, GFP_KERNEL);
- if (!klist)
- goto error;
-
- /* set links */
- rcu_read_lock();
- sklist = rcu_dereference(source->payload.subscriptions);
-
- klist->maxkeys = max;
- klist->nkeys = sklist->nkeys;
- memcpy(klist->keys,
- sklist->keys,
- sklist->nkeys * sizeof(struct key *));
-
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- atomic_inc(&klist->keys[loop]->usage);
-
- rcu_read_unlock();
-
- rcu_assign_pointer(keyring->payload.subscriptions, klist);
- ret = 0;
- }
-
- error:
- return ret;
-
-} /* end keyring_duplicate() */
-
-/*****************************************************************************/
-/*
* match keyrings on their name
*/
static int keyring_match(const struct key *keyring, const void *description)
@@ -545,51 +481,6 @@ key_ref_t __keyring_search_one(key_ref_t keyring_ref,
/*****************************************************************************/
/*
- * search for an instantiation authorisation key matching a target key
- * - the RCU read lock must be held by the caller
- * - a target_id of zero specifies any valid token
- */
-struct key *keyring_search_instkey(struct key *keyring,
- key_serial_t target_id)
-{
- struct request_key_auth *rka;
- struct keyring_list *klist;
- struct key *instkey;
- int loop;
-
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (klist) {
- for (loop = 0; loop < klist->nkeys; loop++) {
- instkey = klist->keys[loop];
-
- if (instkey->type != &key_type_request_key_auth)
- continue;
-
- rka = instkey->payload.data;
- if (target_id && rka->target_key->serial != target_id)
- continue;
-
- /* the auth key is revoked during instantiation */
- if (!test_bit(KEY_FLAG_REVOKED, &instkey->flags))
- goto found;
-
- instkey = ERR_PTR(-EKEYREVOKED);
- goto error;
- }
- }
-
- instkey = ERR_PTR(-EACCES);
- goto error;
-
-found:
- atomic_inc(&instkey->usage);
-error:
- return instkey;
-
-} /* end keyring_search_instkey() */
-
-/*****************************************************************************/
-/*
* find a keyring with the specified name
* - all named keyrings are searched
* - only find keyrings with search permission for the process
@@ -748,15 +639,31 @@ static void keyring_link_rcu_disposal(struct rcu_head *rcu)
/*****************************************************************************/
/*
+ * dispose of a keyring list after the RCU grace period, freeing the unlinked
+ * key
+ */
+static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
+{
+ struct keyring_list *klist =
+ container_of(rcu, struct keyring_list, rcu);
+
+ key_put(klist->keys[klist->delkey]);
+ kfree(klist);
+
+} /* end keyring_unlink_rcu_disposal() */
+
+/*****************************************************************************/
+/*
* link a key into to a keyring
* - must be called with the keyring's semaphore write-locked
+ * - discard already extant link to matching key if there is one
*/
int __key_link(struct key *keyring, struct key *key)
{
struct keyring_list *klist, *nklist;
unsigned max;
size_t size;
- int ret;
+ int loop, ret;
ret = -EKEYREVOKED;
if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
@@ -778,6 +685,48 @@ int __key_link(struct key *keyring, struct key *key)
goto error2;
}
+ /* see if there's a matching key we can displace */
+ klist = keyring->payload.subscriptions;
+
+ if (klist && klist->nkeys > 0) {
+ struct key_type *type = key->type;
+
+ for (loop = klist->nkeys - 1; loop >= 0; loop--) {
+ if (klist->keys[loop]->type == type &&
+ strcmp(klist->keys[loop]->description,
+ key->description) == 0
+ ) {
+ /* found a match - replace with new key */
+ size = sizeof(struct key *) * klist->maxkeys;
+ size += sizeof(*klist);
+ BUG_ON(size > PAGE_SIZE);
+
+ ret = -ENOMEM;
+ nklist = kmalloc(size, GFP_KERNEL);
+ if (!nklist)
+ goto error2;
+
+ memcpy(nklist, klist, size);
+
+ /* replace matched key */
+ atomic_inc(&key->usage);
+ nklist->keys[loop] = key;
+
+ rcu_assign_pointer(
+ keyring->payload.subscriptions,
+ nklist);
+
+ /* dispose of the old keyring list and the
+ * displaced key */
+ klist->delkey = loop;
+ call_rcu(&klist->rcu,
+ keyring_unlink_rcu_disposal);
+
+ goto done;
+ }
+ }
+ }
+
/* check that we aren't going to overrun the user's quota */
ret = key_payload_reserve(keyring,
keyring->datalen + KEYQUOTA_LINK_BYTES);
@@ -794,8 +743,6 @@ int __key_link(struct key *keyring, struct key *key)
smp_wmb();
klist->nkeys++;
smp_wmb();
-
- ret = 0;
}
else {
/* grow the key list */
@@ -833,16 +780,16 @@ int __key_link(struct key *keyring, struct key *key)
/* dispose of the old keyring list */
if (klist)
call_rcu(&klist->rcu, keyring_link_rcu_disposal);
-
- ret = 0;
}
- error2:
+done:
+ ret = 0;
+error2:
up_write(&keyring_serialise_link_sem);
- error:
+error:
return ret;
- error3:
+error3:
/* undo the quota changes */
key_payload_reserve(keyring,
keyring->datalen - KEYQUOTA_LINK_BYTES);
@@ -873,21 +820,6 @@ EXPORT_SYMBOL(key_link);
/*****************************************************************************/
/*
- * dispose of a keyring list after the RCU grace period, freeing the unlinked
- * key
- */
-static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
-{
- struct keyring_list *klist =
- container_of(rcu, struct keyring_list, rcu);
-
- key_put(klist->keys[klist->delkey]);
- kfree(klist);
-
-} /* end keyring_unlink_rcu_disposal() */
-
-/*****************************************************************************/
-/*
* unlink the first link to a key from a keyring
*/
int key_unlink(struct key *keyring, struct key *key)
diff --git a/security/keys/permission.c b/security/keys/permission.c
index e7f579c0eaf..3b41f9b5253 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -73,3 +73,35 @@ use_these_perms:
} /* end key_task_permission() */
EXPORT_SYMBOL(key_task_permission);
+
+/*****************************************************************************/
+/*
+ * validate a key
+ */
+int key_validate(struct key *key)
+{
+ struct timespec now;
+ int ret = 0;
+
+ if (key) {
+ /* check it's still accessible */
+ ret = -EKEYREVOKED;
+ if (test_bit(KEY_FLAG_REVOKED, &key->flags) ||
+ test_bit(KEY_FLAG_DEAD, &key->flags))
+ goto error;
+
+ /* check it hasn't expired */
+ ret = 0;
+ if (key->expiry) {
+ now = current_kernel_time();
+ if (now.tv_sec >= key->expiry)
+ ret = -EKEYEXPIRED;
+ }
+ }
+
+ error:
+ return ret;
+
+} /* end key_validate() */
+
+EXPORT_SYMBOL(key_validate);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 566b1cc0118..74cb79eb917 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -270,9 +270,14 @@ int copy_thread_group_keys(struct task_struct *tsk)
int copy_keys(unsigned long clone_flags, struct task_struct *tsk)
{
key_check(tsk->thread_keyring);
+ key_check(tsk->request_key_auth);
/* no thread keyring yet */
tsk->thread_keyring = NULL;
+
+ /* copy the request_key() authorisation for this thread */
+ key_get(tsk->request_key_auth);
+
return 0;
} /* end copy_keys() */
@@ -290,11 +295,12 @@ void exit_thread_group_keys(struct signal_struct *tg)
/*****************************************************************************/
/*
- * dispose of keys upon thread exit
+ * dispose of per-thread keys upon thread exit
*/
void exit_keys(struct task_struct *tsk)
{
key_put(tsk->thread_keyring);
+ key_put(tsk->request_key_auth);
} /* end exit_keys() */
@@ -382,7 +388,7 @@ key_ref_t search_process_keyrings(struct key_type *type,
struct task_struct *context)
{
struct request_key_auth *rka;
- key_ref_t key_ref, ret, err, instkey_ref;
+ key_ref_t key_ref, ret, err;
/* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
* searchable, but we failed to find a key or we found a negative key;
@@ -461,30 +467,12 @@ key_ref_t search_process_keyrings(struct key_type *type,
err = key_ref;
break;
}
-
- /* if this process has a session keyring and that has an
- * instantiation authorisation key in the bottom level, then we
- * also search the keyrings of the process mentioned there */
- if (context != current)
- goto no_key;
-
- rcu_read_lock();
- instkey_ref = __keyring_search_one(
- make_key_ref(rcu_dereference(
- context->signal->session_keyring),
- 1),
- &key_type_request_key_auth, NULL, 0);
- rcu_read_unlock();
-
- if (IS_ERR(instkey_ref))
- goto no_key;
-
- rka = key_ref_to_ptr(instkey_ref)->payload.data;
-
- key_ref = search_process_keyrings(type, description, match,
- rka->context);
- key_ref_put(instkey_ref);
-
+ }
+ /* or search the user-session keyring */
+ else {
+ key_ref = keyring_search_aux(
+ make_key_ref(context->user->session_keyring, 1),
+ context, type, description, match);
if (!IS_ERR(key_ref))
goto found;
@@ -500,11 +488,21 @@ key_ref_t search_process_keyrings(struct key_type *type,
break;
}
}
- /* or search the user-session keyring */
- else {
- key_ref = keyring_search_aux(
- make_key_ref(context->user->session_keyring, 1),
- context, type, description, match);
+
+ /* if this process has an instantiation authorisation key, then we also
+ * search the keyrings of the process mentioned there
+ * - we don't permit access to request_key auth keys via this method
+ */
+ if (context->request_key_auth &&
+ context == current &&
+ type != &key_type_request_key_auth &&
+ key_validate(context->request_key_auth) == 0
+ ) {
+ rka = context->request_key_auth->payload.data;
+
+ key_ref = search_process_keyrings(type, description, match,
+ rka->context);
+
if (!IS_ERR(key_ref))
goto found;
@@ -521,8 +519,6 @@ key_ref_t search_process_keyrings(struct key_type *type,
}
}
-
-no_key:
/* no key - decide on the error we're going to go for */
key_ref = ret ? ret : err;
@@ -628,6 +624,15 @@ key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id,
key = ERR_PTR(-EINVAL);
goto error;
+ case KEY_SPEC_REQKEY_AUTH_KEY:
+ key = context->request_key_auth;
+ if (!key)
+ goto error;
+
+ atomic_inc(&key->usage);
+ key_ref = make_key_ref(key, 1);
+ break;
+
default:
key_ref = ERR_PTR(-EINVAL);
if (id < 1)
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 5cc4bba70db..f030a0ccbb9 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -29,28 +29,36 @@ DECLARE_WAIT_QUEUE_HEAD(request_key_conswq);
/*****************************************************************************/
/*
* request userspace finish the construction of a key
- * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring> <info>"
+ * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
*/
-static int call_request_key(struct key *key,
- const char *op,
- const char *callout_info)
+static int call_sbin_request_key(struct key *key,
+ struct key *authkey,
+ const char *op)
{
struct task_struct *tsk = current;
key_serial_t prkey, sskey;
- struct key *session_keyring, *rkakey;
- char *argv[10], *envp[3], uid_str[12], gid_str[12];
+ struct key *keyring;
+ char *argv[9], *envp[3], uid_str[12], gid_str[12];
char key_str[12], keyring_str[3][12];
+ char desc[20];
int ret, i;
- kenter("{%d},%s,%s", key->serial, op, callout_info);
+ kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
- /* generate a new session keyring with an auth key in it */
- session_keyring = request_key_auth_new(key, &rkakey);
- if (IS_ERR(session_keyring)) {
- ret = PTR_ERR(session_keyring);
- goto error;
+ /* allocate a new session keyring */
+ sprintf(desc, "_req.%u", key->serial);
+
+ keyring = keyring_alloc(desc, current->fsuid, current->fsgid, 1, NULL);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error_alloc;
}
+ /* attach the auth key to the session keyring */
+ ret = __key_link(keyring, authkey);
+ if (ret < 0)
+ goto error_link;
+
/* record the UID and GID */
sprintf(uid_str, "%d", current->fsuid);
sprintf(gid_str, "%d", current->fsgid);
@@ -95,22 +103,19 @@ static int call_request_key(struct key *key,
argv[i++] = keyring_str[0];
argv[i++] = keyring_str[1];
argv[i++] = keyring_str[2];
- argv[i++] = (char *) callout_info;
argv[i] = NULL;
/* do it */
- ret = call_usermodehelper_keys(argv[0], argv, envp, session_keyring, 1);
+ ret = call_usermodehelper_keys(argv[0], argv, envp, keyring, 1);
- /* dispose of the special keys */
- key_revoke(rkakey);
- key_put(rkakey);
- key_put(session_keyring);
+error_link:
+ key_put(keyring);
- error:
+error_alloc:
kleave(" = %d", ret);
return ret;
-} /* end call_request_key() */
+} /* end call_sbin_request_key() */
/*****************************************************************************/
/*
@@ -122,9 +127,10 @@ static struct key *__request_key_construction(struct key_type *type,
const char *description,
const char *callout_info)
{
+ request_key_actor_t actor;
struct key_construction cons;
struct timespec now;
- struct key *key;
+ struct key *key, *authkey;
int ret, negated;
kenter("%s,%s,%s", type->name, description, callout_info);
@@ -143,8 +149,19 @@ static struct key *__request_key_construction(struct key_type *type,
/* we drop the construction sem here on behalf of the caller */
up_write(&key_construction_sem);
+ /* allocate an authorisation key */
+ authkey = request_key_auth_new(key, callout_info);
+ if (IS_ERR(authkey)) {
+ ret = PTR_ERR(authkey);
+ authkey = NULL;
+ goto alloc_authkey_failed;
+ }
+
/* make the call */
- ret = call_request_key(key, "create", callout_info);
+ actor = call_sbin_request_key;
+ if (type->request_key)
+ actor = type->request_key;
+ ret = actor(key, authkey, "create");
if (ret < 0)
goto request_failed;
@@ -153,22 +170,29 @@ static struct key *__request_key_construction(struct key_type *type,
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
goto request_failed;
+ key_revoke(authkey);
+ key_put(authkey);
+
down_write(&key_construction_sem);
list_del(&cons.link);
up_write(&key_construction_sem);
/* also give an error if the key was negatively instantiated */
- check_not_negative:
+check_not_negative:
if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
key_put(key);
key = ERR_PTR(-ENOKEY);
}
- out:
+out:
kleave(" = %p", key);
return key;
- request_failed:
+request_failed:
+ key_revoke(authkey);
+ key_put(authkey);
+
+alloc_authkey_failed:
/* it wasn't instantiated
* - remove from construction queue
* - mark the key as dead
@@ -217,7 +241,7 @@ static struct key *__request_key_construction(struct key_type *type,
key = ERR_PTR(ret);
goto out;
- alloc_failed:
+alloc_failed:
up_write(&key_construction_sem);
goto out;
@@ -464,35 +488,3 @@ struct key *request_key(struct key_type *type,
} /* end request_key() */
EXPORT_SYMBOL(request_key);
-
-/*****************************************************************************/
-/*
- * validate a key
- */
-int key_validate(struct key *key)
-{
- struct timespec now;
- int ret = 0;
-
- if (key) {
- /* check it's still accessible */
- ret = -EKEYREVOKED;
- if (test_bit(KEY_FLAG_REVOKED, &key->flags) ||
- test_bit(KEY_FLAG_DEAD, &key->flags))
- goto error;
-
- /* check it hasn't expired */
- ret = 0;
- if (key->expiry) {
- now = current_kernel_time();
- if (now.tv_sec >= key->expiry)
- ret = -EKEYEXPIRED;
- }
- }
-
- error:
- return ret;
-
-} /* end key_validate() */
-
-EXPORT_SYMBOL(key_validate);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index a8e4069d48c..cce6ba6b032 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -15,11 +15,13 @@
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/seq_file.h>
+#include <asm/uaccess.h>
#include "internal.h"
static int request_key_auth_instantiate(struct key *, const void *, size_t);
static void request_key_auth_describe(const struct key *, struct seq_file *);
static void request_key_auth_destroy(struct key *);
+static long request_key_auth_read(const struct key *, char __user *, size_t);
/*
* the request-key authorisation key type definition
@@ -30,51 +32,25 @@ struct key_type key_type_request_key_auth = {
.instantiate = request_key_auth_instantiate,
.describe = request_key_auth_describe,
.destroy = request_key_auth_destroy,
+ .read = request_key_auth_read,
};
/*****************************************************************************/
/*
- * instantiate a request-key authorisation record
+ * instantiate a request-key authorisation key
*/
static int request_key_auth_instantiate(struct key *key,
const void *data,
size_t datalen)
{
- struct request_key_auth *rka, *irka;
- struct key *instkey;
- int ret;
-
- ret = -ENOMEM;
- rka = kmalloc(sizeof(*rka), GFP_KERNEL);
- if (rka) {
- /* see if the calling process is already servicing the key
- * request of another process */
- instkey = key_get_instantiation_authkey(0);
- if (!IS_ERR(instkey)) {
- /* it is - use that instantiation context here too */
- irka = instkey->payload.data;
- rka->context = irka->context;
- rka->pid = irka->pid;
- key_put(instkey);
- }
- else {
- /* it isn't - use this process as the context */
- rka->context = current;
- rka->pid = current->pid;
- }
-
- rka->target_key = key_get((struct key *) data);
- key->payload.data = rka;
- ret = 0;
- }
-
- return ret;
+ key->payload.data = (struct request_key_auth *) data;
+ return 0;
} /* end request_key_auth_instantiate() */
/*****************************************************************************/
/*
- *
+ * reading a request-key authorisation key retrieves the callout information
*/
static void request_key_auth_describe(const struct key *key,
struct seq_file *m)
@@ -83,12 +59,40 @@ static void request_key_auth_describe(const struct key *key,
seq_puts(m, "key:");
seq_puts(m, key->description);
- seq_printf(m, " pid:%d", rka->pid);
+ seq_printf(m, " pid:%d ci:%zu", rka->pid, strlen(rka->callout_info));
} /* end request_key_auth_describe() */
/*****************************************************************************/
/*
+ * read the callout_info data
+ * - the key's semaphore is read-locked
+ */
+static long request_key_auth_read(const struct key *key,
+ char __user *buffer, size_t buflen)
+{
+ struct request_key_auth *rka = key->payload.data;
+ size_t datalen;
+ long ret;
+
+ datalen = strlen(rka->callout_info);
+ ret = datalen;
+
+ /* we can return the data as is */
+ if (buffer && buflen > 0) {
+ if (buflen > datalen)
+ buflen = datalen;
+
+ if (copy_to_user(buffer, rka->callout_info, buflen) != 0)
+ ret = -EFAULT;
+ }
+
+ return ret;
+
+} /* end request_key_auth_read() */
+
+/*****************************************************************************/
+/*
* destroy an instantiation authorisation token key
*/
static void request_key_auth_destroy(struct key *key)
@@ -104,56 +108,89 @@ static void request_key_auth_destroy(struct key *key)
/*****************************************************************************/
/*
- * create a session keyring to be for the invokation of /sbin/request-key and
- * stick an authorisation token in it
+ * create an authorisation token for /sbin/request-key or whoever to gain
+ * access to the caller's security data
*/
-struct key *request_key_auth_new(struct key *target, struct key **_rkakey)
+struct key *request_key_auth_new(struct key *target, const char *callout_info)
{
- struct key *keyring, *rkakey = NULL;
+ struct request_key_auth *rka, *irka;
+ struct key *authkey = NULL;
char desc[20];
int ret;
kenter("%d,", target->serial);
- /* allocate a new session keyring */
- sprintf(desc, "_req.%u", target->serial);
+ /* allocate a auth record */
+ rka = kmalloc(sizeof(*rka), GFP_KERNEL);
+ if (!rka) {
+ kleave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
+ }
- keyring = keyring_alloc(desc, current->fsuid, current->fsgid, 1, NULL);
- if (IS_ERR(keyring)) {
- kleave("= %ld", PTR_ERR(keyring));
- return keyring;
+ /* see if the calling process is already servicing the key request of
+ * another process */
+ if (current->request_key_auth) {
+ /* it is - use that instantiation context here too */
+ irka = current->request_key_auth->payload.data;
+ rka->context = irka->context;
+ rka->pid = irka->pid;
}
+ else {
+ /* it isn't - use this process as the context */
+ rka->context = current;
+ rka->pid = current->pid;
+ }
+
+ rka->target_key = key_get(target);
+ rka->callout_info = callout_info;
/* allocate the auth key */
sprintf(desc, "%x", target->serial);
- rkakey = key_alloc(&key_type_request_key_auth, desc,
- current->fsuid, current->fsgid,
- KEY_POS_VIEW | KEY_USR_VIEW, 1);
- if (IS_ERR(rkakey)) {
- key_put(keyring);
- kleave("= %ld", PTR_ERR(rkakey));
- return rkakey;
+ authkey = key_alloc(&key_type_request_key_auth, desc,
+ current->fsuid, current->fsgid,
+ KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH |
+ KEY_USR_VIEW, 1);
+ if (IS_ERR(authkey)) {
+ ret = PTR_ERR(authkey);
+ goto error_alloc;
}
/* construct and attach to the keyring */
- ret = key_instantiate_and_link(rkakey, target, 0, keyring, NULL);
- if (ret < 0) {
- key_revoke(rkakey);
- key_put(rkakey);
- key_put(keyring);
- kleave("= %d", ret);
- return ERR_PTR(ret);
- }
+ ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL);
+ if (ret < 0)
+ goto error_inst;
- *_rkakey = rkakey;
- kleave(" = {%d} ({%d})", keyring->serial, rkakey->serial);
- return keyring;
+ kleave(" = {%d})", authkey->serial);
+ return authkey;
+
+error_inst:
+ key_revoke(authkey);
+ key_put(authkey);
+error_alloc:
+ key_put(rka->target_key);
+ kfree(rka);
+ kleave("= %d", ret);
+ return ERR_PTR(ret);
} /* end request_key_auth_new() */
/*****************************************************************************/
/*
+ * see if an authorisation key is associated with a particular key
+ */
+static int key_get_instantiation_authkey_match(const struct key *key,
+ const void *_id)
+{
+ struct request_key_auth *rka = key->payload.data;
+ key_serial_t id = (key_serial_t)(unsigned long) _id;
+
+ return rka->target_key->serial == id;
+
+} /* end key_get_instantiation_authkey_match() */
+
+/*****************************************************************************/
+/*
* get the authorisation key for instantiation of a specific key if attached to
* the current process's keyrings
* - this key is inserted into a keyring and that is set as /sbin/request-key's
@@ -162,22 +199,27 @@ struct key *request_key_auth_new(struct key *target, struct key **_rkakey)
*/
struct key *key_get_instantiation_authkey(key_serial_t target_id)
{
- struct task_struct *tsk = current;
- struct key *instkey;
-
- /* we must have our own personal session keyring */
- if (!tsk->signal->session_keyring)
- return ERR_PTR(-EACCES);
-
- /* and it must contain a suitable request authorisation key
- * - lock RCU against session keyring changing
- */
- rcu_read_lock();
+ struct key *authkey;
+ key_ref_t authkey_ref;
+
+ authkey_ref = search_process_keyrings(
+ &key_type_request_key_auth,
+ (void *) (unsigned long) target_id,
+ key_get_instantiation_authkey_match,
+ current);
+
+ if (IS_ERR(authkey_ref)) {
+ authkey = ERR_PTR(PTR_ERR(authkey_ref));
+ goto error;
+ }
- instkey = keyring_search_instkey(
- rcu_dereference(tsk->signal->session_keyring), target_id);
+ authkey = key_ref_to_ptr(authkey_ref);
+ if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) {
+ key_put(authkey);
+ authkey = ERR_PTR(-EKEYREVOKED);
+ }
- rcu_read_unlock();
- return instkey;
+error:
+ return authkey;
} /* end key_get_instantiation_authkey() */
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index cbda3b2780a..8e71895b97a 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -26,7 +26,6 @@
struct key_type key_type_user = {
.name = "user",
.instantiate = user_instantiate,
- .duplicate = user_duplicate,
.update = user_update,
.match = user_match,
.destroy = user_destroy,
@@ -68,42 +67,10 @@ error:
return ret;
} /* end user_instantiate() */
-
EXPORT_SYMBOL_GPL(user_instantiate);
/*****************************************************************************/
/*
- * duplicate a user defined key
- * - both keys' semaphores are locked against further modification
- * - the new key cannot yet be accessed
- */
-int user_duplicate(struct key *key, const struct key *source)
-{
- struct user_key_payload *upayload, *spayload;
- int ret;
-
- /* just copy the payload */
- ret = -ENOMEM;
- upayload = kmalloc(sizeof(*upayload) + source->datalen, GFP_KERNEL);
- if (upayload) {
- spayload = rcu_dereference(source->payload.data);
- BUG_ON(source->datalen != spayload->datalen);
-
- upayload->datalen = key->datalen = spayload->datalen;
- memcpy(upayload->data, spayload->data, key->datalen);
-
- key->payload.data = upayload;
- ret = 0;
- }
-
- return ret;
-
-} /* end user_duplicate() */
-
-EXPORT_SYMBOL_GPL(user_duplicate);
-
-/*****************************************************************************/
-/*
* dispose of the old data from an updated user defined key
*/
static void user_update_rcu_disposal(struct rcu_head *rcu)
diff --git a/security/security.c b/security/security.c
index ed5fb80769c..f693e1f66b9 100644
--- a/security/security.c
+++ b/security/security.c
@@ -11,6 +11,7 @@
* (at your option) any later version.
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 12e4fb72bf0..53d6c7bbf56 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -494,8 +494,7 @@ static inline void avc_print_ipv6_addr(struct audit_buffer *ab,
char *name1, char *name2)
{
if (!ipv6_addr_any(addr))
- audit_log_format(ab, " %s=%04x:%04x:%04x:%04x:%04x:"
- "%04x:%04x:%04x", name1, NIP6(*addr));
+ audit_log_format(ab, " %s=" NIP6_FMT, name1, NIP6(*addr));
if (port)
audit_log_format(ab, " %s=%d", name2, ntohs(port));
}
@@ -504,7 +503,7 @@ static inline void avc_print_ipv4_addr(struct audit_buffer *ab, u32 addr,
__be16 port, char *name1, char *name2)
{
if (addr)
- audit_log_format(ab, " %s=%d.%d.%d.%d", name1, NIPQUAD(addr));
+ audit_log_format(ab, " %s=" NIPQUAD_FMT, name1, NIPQUAD(addr));
if (port)
audit_log_format(ab, " %s=%d", name2, ntohs(port));
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 3d496eae1b4..6647204e463 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1663,7 +1663,7 @@ static inline void flush_unauthorized_files(struct files_struct * files)
continue;
}
if (devnull) {
- rcuref_inc(&devnull->f_count);
+ get_file(devnull);
} else {
devnull = dentry_open(dget(selinux_null), mntget(selinuxfs_mount), O_RDWR);
if (!devnull) {
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
index 71aeb12f07c..591e98d9315 100644
--- a/security/selinux/include/av_perm_to_string.h
+++ b/security/selinux/include/av_perm_to_string.h
@@ -238,5 +238,4 @@
S_(SECCLASS_NSCD, NSCD__SHMEMHOST, "shmemhost")
S_(SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, "sendto")
S_(SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, "recvfrom")
- S_(SECCLASS_ASSOCIATION, ASSOCIATION__RELABELFROM, "relabelfrom")
- S_(SECCLASS_ASSOCIATION, ASSOCIATION__RELABELTO, "relabelto")
+ S_(SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, "setcontext")
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
index d1d0996049e..d7f02edf393 100644
--- a/security/selinux/include/av_permissions.h
+++ b/security/selinux/include/av_permissions.h
@@ -908,8 +908,7 @@
#define ASSOCIATION__SENDTO 0x00000001UL
#define ASSOCIATION__RECVFROM 0x00000002UL
-#define ASSOCIATION__RELABELFROM 0x00000004UL
-#define ASSOCIATION__RELABELTO 0x00000008UL
+#define ASSOCIATION__SETCONTEXT 0x00000004UL
#define NETLINK_KOBJECT_UEVENT_SOCKET__IOCTL 0x00000001UL
#define NETLINK_KOBJECT_UEVENT_SOCKET__READ 0x00000002UL
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 0e1352a555c..b5fa02d17b1 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -376,7 +376,7 @@ static ssize_t selinux_transaction_write(struct file *file, const char __user *b
char *data;
ssize_t rv;
- if (ino >= sizeof(write_op)/sizeof(write_op[0]) || !write_op[ino])
+ if (ino >= ARRAY_SIZE(write_op) || !write_op[ino])
return -EINVAL;
data = simple_transaction_get(file, buf, size);
@@ -889,7 +889,7 @@ static void sel_remove_bools(struct dentry *de)
spin_lock(&dcache_lock);
node = de->d_subdirs.next;
while (node != &de->d_subdirs) {
- struct dentry *d = list_entry(node, struct dentry, d_child);
+ struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
list_del_init(node);
if (d->d_inode) {
@@ -1161,7 +1161,7 @@ static int sel_make_avc_files(struct dentry *dir)
#endif
};
- for (i = 0; i < sizeof (files) / sizeof (files[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(files); i++) {
struct inode *inode;
struct dentry *dentry;
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index dde094feb20..d049c7acbc8 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -359,7 +359,7 @@ int avtab_read_item(void *fp, u32 vers, struct avtab *a,
return -1;
}
- for (i = 0; i < sizeof(spec_order)/sizeof(u16); i++) {
+ for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
if (val & spec_order[i]) {
key.specified = spec_order[i] | enabled;
datum.data = le32_to_cpu(buf32[items++]);
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 0ac311dc837..0111990ba83 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -103,7 +103,7 @@ static struct policydb_compat_info *policydb_lookup_compat(int version)
int i;
struct policydb_compat_info *info = NULL;
- for (i = 0; i < sizeof(policydb_compat)/sizeof(*info); i++) {
+ for (i = 0; i < ARRAY_SIZE(policydb_compat); i++) {
if (policydb_compat[i].version == version) {
info = &policydb_compat[i];
break;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 44eb4d74908..8a764928ff4 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1712,11 +1712,11 @@ int security_get_bools(int *len, char ***names, int **values)
goto out;
}
- *names = (char**)kcalloc(*len, sizeof(char*), GFP_ATOMIC);
+ *names = kcalloc(*len, sizeof(char*), GFP_ATOMIC);
if (!*names)
goto err;
- *values = (int*)kcalloc(*len, sizeof(int), GFP_ATOMIC);
+ *values = kcalloc(*len, sizeof(int), GFP_ATOMIC);
if (!*values)
goto err;
@@ -1724,7 +1724,7 @@ int security_get_bools(int *len, char ***names, int **values)
size_t name_len;
(*values)[i] = policydb.bool_val_to_struct[i]->state;
name_len = strlen(policydb.p_bool_val_to_name[i]) + 1;
- (*names)[i] = (char*)kmalloc(sizeof(char) * name_len, GFP_ATOMIC);
+ (*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC);
if (!(*names)[i])
goto err;
strncpy((*names)[i], policydb.p_bool_val_to_name[i], name_len);
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index c4d87d4dca7..b2af7ca496c 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -137,22 +137,16 @@ static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_us
* Must be permitted to relabel from default socket type (process type)
* to specified context
*/
- rc = avc_has_perm(tsec->sid, tsec->sid,
- SECCLASS_ASSOCIATION,
- ASSOCIATION__RELABELFROM, NULL);
- if (rc)
- goto out;
-
rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
SECCLASS_ASSOCIATION,
- ASSOCIATION__RELABELTO, NULL);
+ ASSOCIATION__SETCONTEXT, NULL);
if (rc)
goto out;
return rc;
out:
- *ctxp = 0;
+ *ctxp = NULL;
kfree(ctx);
return rc;
}
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 5e9a81ab990..54147c1f636 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -17,11 +17,11 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/err.h>
+#include <linux/amba/bus.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/sizes.h>
-#include <asm/hardware/amba.h>
#include <sound/driver.h>
#include <sound/core.h>
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 16df1246a13..7fd072392c7 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -2135,9 +2135,7 @@ static ssize_t snd_pcm_oss_write(struct file *file, const char __user *buf, size
substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK];
if (substream == NULL)
return -ENXIO;
- up(&file->f_dentry->d_inode->i_sem);
result = snd_pcm_oss_write1(substream, buf, count);
- down(&file->f_dentry->d_inode->i_sem);
#ifdef OSS_DEBUG
printk("pcm_oss: write %li bytes (wrote %li bytes)\n", (long)count, (long)result);
#endif
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 9ee6c177db0..40b4f679c80 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -32,10 +32,6 @@
#include "seq_info.h"
#include "seq_lock.h"
-/* semaphore in struct file record */
-#define semaphore_of(fp) ((fp)->f_dentry->d_inode->i_sem)
-
-
static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
{
return pool->total_elements - atomic_read(&pool->counter);
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index 679d0ae97e4..ed81eec6e73 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -115,18 +115,11 @@ MODULE_PARM_DESC(osrun_time, "how many seconds to wait for the ICS2115 OS");
#ifdef WF_DEBUG
-#if defined(NEW_MACRO_VARARGS) || __GNUC__ >= 3
#define DPRINT(cond, ...) \
if ((dev->debug & (cond)) == (cond)) { \
snd_printk (__VA_ARGS__); \
}
#else
-#define DPRINT(cond, args...) \
- if ((dev->debug & (cond)) == (cond)) { \
- snd_printk (args); \
- }
-#endif
-#else
#define DPRINT(cond, args...)
#endif /* WF_DEBUG */
diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c
index 3f30c57676c..49796be955f 100644
--- a/sound/oss/ad1848.c
+++ b/sound/oss/ad1848.c
@@ -46,8 +46,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/stddef.h>
-#include <linux/pm.h>
-#include <linux/pm_legacy.h>
#include <linux/isapnp.h>
#include <linux/pnp.h>
#include <linux/spinlock.h>
@@ -105,9 +103,6 @@ typedef struct
int irq_ok;
mixer_ents *mix_devices;
int mixer_output_port;
-
- /* Power management */
- struct pm_dev *pmdev;
} ad1848_info;
typedef struct ad1848_port_info
@@ -201,7 +196,6 @@ static void ad1848_halt(int dev);
static void ad1848_halt_input(int dev);
static void ad1848_halt_output(int dev);
static void ad1848_trigger(int dev, int bits);
-static int ad1848_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data);
#ifndef EXCLUDE_TIMERS
static int ad1848_tmr_install(int dev);
@@ -2027,10 +2021,6 @@ int ad1848_init (char *name, struct resource *ports, int irq, int dma_playback,
nr_ad1848_devs++;
- devc->pmdev = pm_register(PM_ISA_DEV, my_dev, ad1848_pm_callback);
- if (devc->pmdev)
- devc->pmdev->data = devc;
-
ad1848_init_hw(devc);
if (irq > 0)
@@ -2197,9 +2187,6 @@ void ad1848_unload(int io_base, int irq, int dma_playback, int dma_capture, int
if(mixer>=0)
sound_unload_mixerdev(mixer);
- if (devc->pmdev)
- pm_unregister(devc->pmdev);
-
nr_ad1848_devs--;
for ( ; i < nr_ad1848_devs ; i++)
adev_info[i] = adev_info[i+1];
@@ -2811,85 +2798,6 @@ static int ad1848_tmr_install(int dev)
}
#endif /* EXCLUDE_TIMERS */
-static int ad1848_suspend(ad1848_info *devc)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&devc->lock,flags);
-
- ad_mute(devc);
-
- spin_unlock_irqrestore(&devc->lock,flags);
- return 0;
-}
-
-static int ad1848_resume(ad1848_info *devc)
-{
- int mixer_levels[32], i;
-
- /* Thinkpad is a bit more of PITA than normal. The BIOS tends to
- restore it in a different config to the one we use. Need to
- fix this somehow */
-
- /* store old mixer levels */
- memcpy(mixer_levels, devc->levels, sizeof (mixer_levels));
- ad1848_init_hw(devc);
-
- /* restore mixer levels */
- for (i = 0; i < 32; i++)
- ad1848_mixer_set(devc, devc->dev_no, mixer_levels[i]);
-
- if (!devc->subtype) {
- static signed char interrupt_bits[12] = { -1, -1, -1, -1, -1, 0x00, -1, 0x08, -1, 0x10, 0x18, 0x20 };
- static char dma_bits[4] = { 1, 2, 0, 3 };
- unsigned long flags;
- signed char bits;
- char dma2_bit = 0;
-
- int config_port = devc->base + 0;
-
- bits = interrupt_bits[devc->irq];
- if (bits == -1) {
- printk(KERN_ERR "MSS: Bad IRQ %d\n", devc->irq);
- return -1;
- }
-
- spin_lock_irqsave(&devc->lock,flags);
-
- outb((bits | 0x40), config_port);
-
- if (devc->dma2 != -1 && devc->dma2 != devc->dma1)
- if ( (devc->dma1 == 0 && devc->dma2 == 1) ||
- (devc->dma1 == 1 && devc->dma2 == 0) ||
- (devc->dma1 == 3 && devc->dma2 == 0))
- dma2_bit = 0x04;
-
- outb((bits | dma_bits[devc->dma1] | dma2_bit), config_port);
- spin_unlock_irqrestore(&devc->lock,flags);
- }
-
- return 0;
-}
-
-static int ad1848_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
-{
- ad1848_info *devc = dev->data;
- if (devc) {
- DEB(printk("ad1848: pm event received: 0x%x\n", rqst));
-
- switch (rqst) {
- case PM_SUSPEND:
- ad1848_suspend(devc);
- break;
- case PM_RESUME:
- ad1848_resume(devc);
- break;
- }
- }
- return 0;
-}
-
-
EXPORT_SYMBOL(ad1848_detect);
EXPORT_SYMBOL(ad1848_init);
EXPORT_SYMBOL(ad1848_unload);
diff --git a/sound/oss/ad1889.c b/sound/oss/ad1889.c
index 2cfd214e4c2..a0d73f34310 100644
--- a/sound/oss/ad1889.c
+++ b/sound/oss/ad1889.c
@@ -1089,7 +1089,7 @@ static struct pci_driver ad1889_driver = {
static int __init ad1889_init_module(void)
{
- return pci_module_init(&ad1889_driver);
+ return pci_register_driver(&ad1889_driver);
}
static void ad1889_exit_module(void)
diff --git a/sound/oss/btaudio.c b/sound/oss/btaudio.c
index a85093fec7b..4007a5680ac 100644
--- a/sound/oss/btaudio.c
+++ b/sound/oss/btaudio.c
@@ -1101,7 +1101,7 @@ static int btaudio_init_module(void)
digital ? "digital" : "",
analog && digital ? "+" : "",
analog ? "analog" : "");
- return pci_module_init(&btaudio_pci_driver);
+ return pci_register_driver(&btaudio_pci_driver);
}
static void btaudio_cleanup_module(void)
diff --git a/sound/oss/cmpci.c b/sound/oss/cmpci.c
index 74dcca78c6c..7cfbb08db53 100644
--- a/sound/oss/cmpci.c
+++ b/sound/oss/cmpci.c
@@ -3366,7 +3366,7 @@ static struct pci_driver cm_driver = {
static int __init init_cmpci(void)
{
printk(KERN_INFO "cmpci: version $Revision: 6.82 $ time " __TIME__ " " __DATE__ "\n");
- return pci_module_init(&cm_driver);
+ return pci_register_driver(&cm_driver);
}
static void __exit cleanup_cmpci(void)
diff --git a/sound/oss/cs4281/cs4281m.c b/sound/oss/cs4281/cs4281m.c
index adc689649fe..0720365f643 100644
--- a/sound/oss/cs4281/cs4281m.c
+++ b/sound/oss/cs4281/cs4281m.c
@@ -298,7 +298,6 @@ struct cs4281_state {
struct cs4281_pipeline pl[CS4281_NUMBER_OF_PIPELINES];
};
-#include <linux/pm_legacy.h>
#include "cs4281pm-24.c"
#if CSDEBUG
@@ -4256,9 +4255,6 @@ static void __devinit cs4281_InitPM(struct cs4281_state *s)
static int __devinit cs4281_probe(struct pci_dev *pcidev,
const struct pci_device_id *pciid)
{
-#ifndef NOT_CS4281_PM
- struct pm_dev *pmdev;
-#endif
struct cs4281_state *s;
dma_addr_t dma_mask;
mm_segment_t fs;
@@ -4374,19 +4370,7 @@ static int __devinit cs4281_probe(struct pci_dev *pcidev,
}
#ifndef NOT_CS4281_PM
cs4281_InitPM(s);
- pmdev = cs_pm_register(PM_PCI_DEV, PM_PCI_ID(pcidev), cs4281_pm_callback);
- if (pmdev)
- {
- CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO
- "cs4281: probe() pm_register() succeeded (%p).\n", pmdev));
- pmdev->data = s;
- }
- else
- {
- CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 0, printk(KERN_INFO
- "cs4281: probe() pm_register() failed (%p).\n", pmdev));
- s->pm.flags |= CS4281_PM_NOT_REGISTERED;
- }
+ s->pm.flags |= CS4281_PM_NOT_REGISTERED;
#endif
pci_set_master(pcidev); // enable bus mastering
@@ -4477,7 +4461,7 @@ static int __init cs4281_init_module(void)
printk(KERN_INFO "cs4281: version v%d.%02d.%d time " __TIME__ " "
__DATE__ "\n", CS4281_MAJOR_VERSION, CS4281_MINOR_VERSION,
CS4281_ARCH);
- rtn = pci_module_init(&cs4281_pci_driver);
+ rtn = pci_register_driver(&cs4281_pci_driver);
CS_DBGOUT(CS_INIT | CS_FUNCTION, 2,
printk(KERN_INFO "cs4281: cs4281_init_module()- (%d)\n",rtn));
@@ -4487,9 +4471,6 @@ static int __init cs4281_init_module(void)
static void __exit cs4281_cleanup_module(void)
{
pci_unregister_driver(&cs4281_pci_driver);
-#ifndef NOT_CS4281_PM
- cs_pm_unregister_all(cs4281_pm_callback);
-#endif
CS_DBGOUT(CS_INIT | CS_FUNCTION, 2,
printk(KERN_INFO "cs4281: cleanup_cs4281() finished\n"));
}
diff --git a/sound/oss/cs4281/cs4281pm-24.c b/sound/oss/cs4281/cs4281pm-24.c
index d2a453aff0a..90cbd767953 100644
--- a/sound/oss/cs4281/cs4281pm-24.c
+++ b/sound/oss/cs4281/cs4281pm-24.c
@@ -27,9 +27,6 @@
#ifndef NOT_CS4281_PM
#include <linux/pm.h>
-#define cs_pm_register(a, b, c) pm_register((a), (b), (c));
-#define cs_pm_unregister_all(a) pm_unregister_all((a));
-
static int cs4281_suspend(struct cs4281_state *s);
static int cs4281_resume(struct cs4281_state *s);
/*
@@ -41,42 +38,6 @@ static int cs4281_resume(struct cs4281_state *s);
#define CS4281_SUSPEND_TBL cs4281_suspend_null
#define CS4281_RESUME_TBL cs4281_resume_null
-static int cs4281_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
-{
- struct cs4281_state *state;
-
- CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
- "cs4281: cs4281_pm_callback dev=%p rqst=0x%x state=%p\n",
- dev,(unsigned)rqst,data));
- state = (struct cs4281_state *) dev->data;
- if (state) {
- switch(rqst) {
- case PM_SUSPEND:
- CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
- "cs4281: PM suspend request\n"));
- if(cs4281_suspend(state))
- {
- CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO
- "cs4281: PM suspend request refused\n"));
- return 1;
- }
- break;
- case PM_RESUME:
- CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
- "cs4281: PM resume request\n"));
- if(cs4281_resume(state))
- {
- CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO
- "cs4281: PM resume request refused\n"));
- return 1;
- }
- break;
- }
- }
-
- return 0;
-}
-
#else /* CS4281_PM */
#define CS4281_SUSPEND_TBL cs4281_suspend_null
#define CS4281_RESUME_TBL cs4281_resume_null
diff --git a/sound/oss/cs46xx.c b/sound/oss/cs46xx.c
index cb998e8c0fd..58e25c82eaf 100644
--- a/sound/oss/cs46xx.c
+++ b/sound/oss/cs46xx.c
@@ -391,10 +391,6 @@ static void cs461x_clear_serial_FIFOs(struct cs_card *card, int type);
static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state);
static int cs46xx_resume_tbl(struct pci_dev *pcidev);
-#ifndef CS46XX_ACPI_SUPPORT
-static int cs46xx_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data);
-#endif
-
#if CSDEBUG
/* DEBUG ROUTINES */
@@ -5320,7 +5316,6 @@ static const char fndmsg[] = KERN_INFO "cs46xx: Found %d audio device(s).\n";
static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
const struct pci_device_id *pciid)
{
- struct pm_dev *pmdev;
int i,j;
u16 ss_card, ss_vendor;
struct cs_card *card;
@@ -5530,22 +5525,6 @@ static int __devinit cs46xx_probe(struct pci_dev *pci_dev,
PCI_SET_DMA_MASK(pci_dev, dma_mask);
list_add(&card->list, &cs46xx_devs);
- pmdev = cs_pm_register(PM_PCI_DEV, PM_PCI_ID(pci_dev), cs46xx_pm_callback);
- if (pmdev)
- {
- CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO
- "cs46xx: probe() pm_register() succeeded (%p).\n",
- pmdev));
- pmdev->data = card;
- }
- else
- {
- CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 2, printk(KERN_INFO
- "cs46xx: probe() pm_register() failed (%p).\n",
- pmdev));
- card->pm.flags |= CS46XX_PM_NOT_REGISTERED;
- }
-
CS_DBGOUT(CS_PM, 9, printk(KERN_INFO "cs46xx: pm.flags=0x%x card=%p\n",
(unsigned)card->pm.flags,card));
@@ -5711,7 +5690,7 @@ static int __init cs46xx_init_module(void)
int rtn = 0;
CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO
"cs46xx: cs46xx_init_module()+ \n"));
- rtn = pci_module_init(&cs46xx_pci_driver);
+ rtn = pci_register_driver(&cs46xx_pci_driver);
if(rtn == -ENODEV)
{
@@ -5727,7 +5706,6 @@ static int __init cs46xx_init_module(void)
static void __exit cs46xx_cleanup_module(void)
{
pci_unregister_driver(&cs46xx_pci_driver);
- cs_pm_unregister_all(cs46xx_pm_callback);
CS_DBGOUT(CS_INIT | CS_FUNCTION, 2,
printk(KERN_INFO "cs46xx: cleanup_cs46xx() finished\n"));
}
@@ -5735,44 +5713,6 @@ static void __exit cs46xx_cleanup_module(void)
module_init(cs46xx_init_module);
module_exit(cs46xx_cleanup_module);
-#ifndef CS46XX_ACPI_SUPPORT
-static int cs46xx_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
-{
- struct cs_card *card;
-
- CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
- "cs46xx: cs46xx_pm_callback dev=%p rqst=0x%x card=%p\n",
- dev,(unsigned)rqst,data));
- card = (struct cs_card *) dev->data;
- if (card) {
- switch(rqst) {
- case PM_SUSPEND:
- CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
- "cs46xx: PM suspend request\n"));
- if(cs46xx_suspend(card, PMSG_SUSPEND))
- {
- CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO
- "cs46xx: PM suspend request refused\n"));
- return 1;
- }
- break;
- case PM_RESUME:
- CS_DBGOUT(CS_PM, 2, printk(KERN_INFO
- "cs46xx: PM resume request\n"));
- if(cs46xx_resume(card))
- {
- CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO
- "cs46xx: PM resume request refused\n"));
- return 1;
- }
- break;
- }
- }
-
- return 0;
-}
-#endif
-
#if CS46XX_ACPI_SUPPORT
static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state)
{
diff --git a/sound/oss/cs46xxpm-24.h b/sound/oss/cs46xxpm-24.h
index e220bd7240f..ad82db84d01 100644
--- a/sound/oss/cs46xxpm-24.h
+++ b/sound/oss/cs46xxpm-24.h
@@ -38,13 +38,9 @@
*/
static int cs46xx_suspend_tbl(struct pci_dev *pcidev, pm_message_t state);
static int cs46xx_resume_tbl(struct pci_dev *pcidev);
-#define cs_pm_register(a, b, c) NULL
-#define cs_pm_unregister_all(a)
#define CS46XX_SUSPEND_TBL cs46xx_suspend_tbl
#define CS46XX_RESUME_TBL cs46xx_resume_tbl
#else
-#define cs_pm_register(a, b, c) pm_register((a), (b), (c));
-#define cs_pm_unregister_all(a) pm_unregister_all((a));
#define CS46XX_SUSPEND_TBL cs46xx_null
#define CS46XX_RESUME_TBL cs46xx_null
#endif
diff --git a/sound/oss/dmasound/dac3550a.c b/sound/oss/dmasound/dac3550a.c
index 533895eba0e..7360d8954d6 100644
--- a/sound/oss/dmasound/dac3550a.c
+++ b/sound/oss/dmasound/dac3550a.c
@@ -41,10 +41,10 @@ static int daca_detect_client(struct i2c_adapter *adapter, int address);
static int daca_detach_client(struct i2c_client *client);
struct i2c_driver daca_driver = {
- .owner = THIS_MODULE,
- .name = "DAC3550A driver V " DACA_VERSION,
+ .driver = {
+ .name = "DAC3550A driver V " DACA_VERSION,
+ },
.id = I2C_DRIVERID_DACA,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = daca_attach_adapter,
.detach_client = daca_detach_client,
};
diff --git a/sound/oss/dmasound/dmasound.h b/sound/oss/dmasound/dmasound.h
index 222014cafc1..a1b0b92af4b 100644
--- a/sound/oss/dmasound/dmasound.h
+++ b/sound/oss/dmasound/dmasound.h
@@ -270,7 +270,6 @@ extern int dmasound_catchRadius;
#define SW_INPUT_VOLUME_SCALE 4
#define SW_INPUT_VOLUME_DEFAULT (128 / SW_INPUT_VOLUME_SCALE)
-extern int expand_bal; /* Balance factor for expanding (not volume!) */
extern int expand_read_bal; /* Balance factor for reading */
extern uint software_input_volume; /* software implemented recording volume! */
diff --git a/sound/oss/dmasound/dmasound_atari.c b/sound/oss/dmasound/dmasound_atari.c
index 59eb53f8931..dc31373069a 100644
--- a/sound/oss/dmasound/dmasound_atari.c
+++ b/sound/oss/dmasound/dmasound_atari.c
@@ -67,46 +67,46 @@ static int expand_data; /* Data for expanding */
* ++geert: split in even more functions (one per format)
*/
-static ssize_t ata_ct_law(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ct_s8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ctx_law(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
-static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft);
@@ -151,7 +151,7 @@ static int FalconStateInfo(char *buffer, size_t space);
/*** Translations ************************************************************/
-static ssize_t ata_ct_law(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -176,7 +176,7 @@ static ssize_t ata_ct_law(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ct_s8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -194,7 +194,7 @@ static ssize_t ata_ct_s8(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -217,8 +217,9 @@ static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount,
used = count*2;
while (count > 0) {
u_short data;
- if (get_user(data, ((u_short *)userPtr)++))
+ if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
+ userPtr += 2;
*p++ = data ^ 0x8080;
count--;
}
@@ -228,7 +229,7 @@ static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -240,8 +241,9 @@ static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount,
used = count*2;
while (count > 0) {
u_short data;
- if (get_user(data, ((u_short *)userPtr)++))
+ if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
+ userPtr += 2;
*p++ = data;
*p++ = data;
count--;
@@ -259,7 +261,7 @@ static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -271,8 +273,9 @@ static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
used = count*2;
while (count > 0) {
u_short data;
- if (get_user(data, ((u_short *)userPtr)++))
+ if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
+ userPtr += 2;
data ^= 0x8000;
*p++ = data;
*p++ = data;
@@ -284,9 +287,10 @@ static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
count = min_t(unsigned long, userCount, frameLeft)>>2;
used = count*4;
while (count > 0) {
- u_long data;
- if (get_user(data, ((u_int *)userPtr)++))
+ u_int data;
+ if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
+ userPtr += 4;
*p++ = data ^ 0x80008000;
count--;
}
@@ -296,7 +300,7 @@ static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -309,8 +313,9 @@ static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
used = count*2;
while (count > 0) {
u_short data;
- if (get_user(data, ((u_short *)userPtr)++))
+ if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
+ userPtr += 2;
data = le2be16(data);
*p++ = data;
*p++ = data;
@@ -323,8 +328,9 @@ static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
used = count*4;
while (count > 0) {
u_long data;
- if (get_user(data, ((u_int *)userPtr)++))
+ if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
+ userPtr += 4;
data = le2be16dbl(data);
*p++ = data;
count--;
@@ -335,7 +341,7 @@ static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -348,8 +354,9 @@ static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
used = count*2;
while (count > 0) {
u_short data;
- if (get_user(data, ((u_short *)userPtr)++))
+ if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
+ userPtr += 2;
data = le2be16(data) ^ 0x8000;
*p++ = data;
*p++ = data;
@@ -361,8 +368,9 @@ static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
used = count;
while (count > 0) {
u_long data;
- if (get_user(data, ((u_int *)userPtr)++))
+ if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
+ userPtr += 4;
data = le2be16dbl(data) ^ 0x80008000;
*p++ = data;
count--;
@@ -373,7 +381,7 @@ static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ctx_law(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -435,7 +443,7 @@ static ssize_t ata_ctx_law(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -470,8 +478,9 @@ static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount,
if (bal < 0) {
if (userCount < 2)
break;
- if (get_user(data, ((u_short *)userPtr)++))
+ if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
+ userPtr += 2;
userCount -= 2;
bal += hSpeed;
}
@@ -488,7 +497,7 @@ static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -524,8 +533,9 @@ static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount,
if (bal < 0) {
if (userCount < 2)
break;
- if (get_user(data, ((u_short *)userPtr)++))
+ if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
+ userPtr += 2;
data ^= 0x8080;
userCount -= 2;
bal += hSpeed;
@@ -543,7 +553,7 @@ static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -561,8 +571,9 @@ static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
if (bal < 0) {
if (userCount < 2)
break;
- if (get_user(data, ((u_short *)userPtr)++))
+ if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
+ userPtr += 2;
userCount -= 2;
bal += hSpeed;
}
@@ -579,8 +590,9 @@ static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
if (bal < 0) {
if (userCount < 4)
break;
- if (get_user(data, ((u_int *)userPtr)++))
+ if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
+ userPtr += 4;
userCount -= 4;
bal += hSpeed;
}
@@ -597,7 +609,7 @@ static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u16be(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -615,8 +627,9 @@ static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
if (bal < 0) {
if (userCount < 2)
break;
- if (get_user(data, ((u_short *)userPtr)++))
+ if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
+ userPtr += 2;
data ^= 0x8000;
userCount -= 2;
bal += hSpeed;
@@ -634,8 +647,9 @@ static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
if (bal < 0) {
if (userCount < 4)
break;
- if (get_user(data, ((u_int *)userPtr)++))
+ if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
+ userPtr += 4;
data ^= 0x80008000;
userCount -= 4;
bal += hSpeed;
@@ -653,7 +667,7 @@ static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -671,8 +685,9 @@ static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
if (bal < 0) {
if (userCount < 2)
break;
- if (get_user(data, ((u_short *)userPtr)++))
+ if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
+ userPtr += 2;
data = le2be16(data);
userCount -= 2;
bal += hSpeed;
@@ -690,8 +705,9 @@ static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
if (bal < 0) {
if (userCount < 4)
break;
- if (get_user(data, ((u_int *)userPtr)++))
+ if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
+ userPtr += 4;
data = le2be16dbl(data);
userCount -= 4;
bal += hSpeed;
@@ -709,7 +725,7 @@ static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
}
-static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u16le(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -727,8 +743,9 @@ static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
if (bal < 0) {
if (userCount < 2)
break;
- if (get_user(data, ((u_short *)userPtr)++))
+ if (get_user(data, (u_short __user *)userPtr))
return -EFAULT;
+ userPtr += 2;
data = le2be16(data) ^ 0x8000;
userCount -= 2;
bal += hSpeed;
@@ -746,8 +763,9 @@ static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
if (bal < 0) {
if (userCount < 4)
break;
- if (get_user(data, ((u_int *)userPtr)++))
+ if (get_user(data, (u_int __user *)userPtr))
return -EFAULT;
+ userPtr += 4;
data = le2be16dbl(data) ^ 0x80008000;
userCount -= 4;
bal += hSpeed;
diff --git a/sound/oss/dmasound/dmasound_awacs.c b/sound/oss/dmasound/dmasound_awacs.c
index cebd881b91a..74f975676cc 100644
--- a/sound/oss/dmasound/dmasound_awacs.c
+++ b/sound/oss/dmasound/dmasound_awacs.c
@@ -125,6 +125,7 @@ static int awacs_rate_index;
static int awacs_subframe;
static struct device_node* awacs_node;
static struct device_node* i2s_node;
+static struct resource awacs_rsrc[3];
static char awacs_name[64];
static int awacs_revision;
@@ -667,9 +668,12 @@ static void PMacIrqCleanup(void)
iounmap(awacs_txdma);
iounmap(awacs_rxdma);
- release_OF_resource(awacs_node, 0);
- release_OF_resource(awacs_node, 1);
- release_OF_resource(awacs_node, 2);
+ release_mem_region(awacs_rsrc[0].start,
+ awacs_rsrc[0].end - awacs_rsrc[0].start + 1);
+ release_mem_region(awacs_rsrc[1].start,
+ awacs_rsrc[1].end - awacs_rsrc[1].start + 1);
+ release_mem_region(awacs_rsrc[2].start,
+ awacs_rsrc[2].end - awacs_rsrc[2].start + 1);
kfree(awacs_tx_cmd_space);
kfree(awacs_rx_cmd_space);
@@ -2863,46 +2867,58 @@ printk("dmasound_pmac: couldn't find a Codec we can handle\n");
* other info if necessary (early AWACS we want to read chip ids)
*/
- if (io->n_addrs < 3 || io->n_intrs < 3) {
+ if (of_get_address(io, 2, NULL, NULL) == NULL || io->n_intrs < 3) {
/* OK - maybe we need to use the 'awacs' node (on earlier
* machines).
- */
+ */
if (awacs_node) {
io = awacs_node ;
- if (io->n_addrs < 3 || io->n_intrs < 3) {
- printk("dmasound_pmac: can't use %s"
- " (%d addrs, %d intrs)\n",
- io->full_name, io->n_addrs, io->n_intrs);
+ if (of_get_address(io, 2, NULL, NULL) == NULL ||
+ io->n_intrs < 3) {
+ printk("dmasound_pmac: can't use %s\n",
+ io->full_name);
return -ENODEV;
}
- } else {
- printk("dmasound_pmac: can't use %s (%d addrs, %d intrs)\n",
- io->full_name, io->n_addrs, io->n_intrs);
- }
+ } else
+ printk("dmasound_pmac: can't use %s\n", io->full_name);
}
- if (!request_OF_resource(io, 0, NULL)) {
+ if (of_address_to_resource(io, 0, &awacs_rsrc[0]) ||
+ request_mem_region(awacs_rsrc[0].start,
+ awacs_rsrc[0].end - awacs_rsrc[0].start + 1,
+ " (IO)") == NULL) {
printk(KERN_ERR "dmasound: can't request IO resource !\n");
return -ENODEV;
}
- if (!request_OF_resource(io, 1, " (tx dma)")) {
- release_OF_resource(io, 0);
- printk(KERN_ERR "dmasound: can't request TX DMA resource !\n");
+ if (of_address_to_resource(io, 1, &awacs_rsrc[1]) ||
+ request_mem_region(awacs_rsrc[1].start,
+ awacs_rsrc[1].end - awacs_rsrc[1].start + 1,
+ " (tx dma)") == NULL) {
+ release_mem_region(awacs_rsrc[0].start,
+ awacs_rsrc[0].end - awacs_rsrc[0].start + 1);
+ printk(KERN_ERR "dmasound: can't request Tx DMA resource !\n");
return -ENODEV;
}
-
- if (!request_OF_resource(io, 2, " (rx dma)")) {
- release_OF_resource(io, 0);
- release_OF_resource(io, 1);
- printk(KERN_ERR "dmasound: can't request RX DMA resource !\n");
+ if (of_address_to_resource(io, 2, &awacs_rsrc[2]) ||
+ request_mem_region(awacs_rsrc[2].start,
+ awacs_rsrc[2].end - awacs_rsrc[2].start + 1,
+ " (rx dma)") == NULL) {
+ release_mem_region(awacs_rsrc[0].start,
+ awacs_rsrc[0].end - awacs_rsrc[0].start + 1);
+ release_mem_region(awacs_rsrc[1].start,
+ awacs_rsrc[1].end - awacs_rsrc[1].start + 1);
+ printk(KERN_ERR "dmasound: can't request Rx DMA resource !\n");
return -ENODEV;
}
awacs_beep_dev = input_allocate_device();
if (!awacs_beep_dev) {
- release_OF_resource(io, 0);
- release_OF_resource(io, 1);
- release_OF_resource(io, 2);
+ release_mem_region(awacs_rsrc[0].start,
+ awacs_rsrc[0].end - awacs_rsrc[0].start + 1);
+ release_mem_region(awacs_rsrc[1].start,
+ awacs_rsrc[1].end - awacs_rsrc[1].start + 1);
+ release_mem_region(awacs_rsrc[2].start,
+ awacs_rsrc[2].end - awacs_rsrc[2].start + 1);
printk(KERN_ERR "dmasound: can't allocate input device !\n");
return -ENOMEM;
}
@@ -2916,11 +2932,11 @@ printk("dmasound_pmac: couldn't find a Codec we can handle\n");
/* all OF versions I've seen use this value */
if (i2s_node)
- i2s = ioremap(io->addrs[0].address, 0x1000);
+ i2s = ioremap(awacs_rsrc[0].start, 0x1000);
else
- awacs = ioremap(io->addrs[0].address, 0x1000);
- awacs_txdma = ioremap(io->addrs[1].address, 0x100);
- awacs_rxdma = ioremap(io->addrs[2].address, 0x100);
+ awacs = ioremap(awacs_rsrc[0].start, 0x1000);
+ awacs_txdma = ioremap(awacs_rsrc[1].start, 0x100);
+ awacs_rxdma = ioremap(awacs_rsrc[2].start, 0x100);
/* first of all make sure that the chip is powered up....*/
pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, io, 0, 1);
@@ -3083,9 +3099,10 @@ printk("dmasound_pmac: Awacs/Screamer Codec Mfct: %d Rev %d\n", mfg, rev);
struct device_node* mio;
macio_base = NULL;
for (mio = io->parent; mio; mio = mio->parent) {
- if (strcmp(mio->name, "mac-io") == 0
- && mio->n_addrs > 0) {
- macio_base = ioremap(mio->addrs[0].address, 0x40);
+ if (strcmp(mio->name, "mac-io") == 0) {
+ struct resource r;
+ if (of_address_to_resource(mio, 0, &r) == 0)
+ macio_base = ioremap(r.start, 0x40);
break;
}
}
diff --git a/sound/oss/dmasound/dmasound_paula.c b/sound/oss/dmasound/dmasound_paula.c
index d59f60b2641..494070a3f87 100644
--- a/sound/oss/dmasound/dmasound_paula.c
+++ b/sound/oss/dmasound/dmasound_paula.c
@@ -34,6 +34,7 @@
#define DMASOUND_PAULA_REVISION 0
#define DMASOUND_PAULA_EDITION 4
+#define custom amiga_custom
/*
* The minimum period for audio depends on htotal (for OCS/ECS/AGA)
* (Imported from arch/m68k/amiga/amisound.c)
@@ -156,7 +157,7 @@ static int AmiStateInfo(char *buffer, size_t space);
* Native format
*/
-static ssize_t ami_ct_s8(const u_char *userPtr, size_t userCount,
+static ssize_t ami_ct_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed, ssize_t frameLeft)
{
ssize_t count, used;
@@ -189,7 +190,7 @@ static ssize_t ami_ct_s8(const u_char *userPtr, size_t userCount,
*/
#define GENERATE_AMI_CT8(funcname, convsample) \
-static ssize_t funcname(const u_char *userPtr, size_t userCount, \
+static ssize_t funcname(const u_char __user *userPtr, size_t userCount, \
u_char frame[], ssize_t *frameUsed, \
ssize_t frameLeft) \
{ \
@@ -240,10 +241,11 @@ GENERATE_AMI_CT8(ami_ct_u8, AMI_CT_U8)
*/
#define GENERATE_AMI_CT_16(funcname, convsample) \
-static ssize_t funcname(const u_char *userPtr, size_t userCount, \
+static ssize_t funcname(const u_char __user *userPtr, size_t userCount, \
u_char frame[], ssize_t *frameUsed, \
ssize_t frameLeft) \
{ \
+ const u_short __user *ptr = (const u_short __user *)userPtr; \
ssize_t count, used; \
u_short data; \
\
@@ -253,7 +255,7 @@ static ssize_t funcname(const u_char *userPtr, size_t userCount, \
count = min_t(size_t, userCount, frameLeft)>>1 & ~1; \
used = count*2; \
while (count > 0) { \
- if (get_user(data, ((u_short *)userPtr)++)) \
+ if (get_user(data, ptr++)) \
return -EFAULT; \
data = convsample(data); \
*high++ = data>>8; \
@@ -268,12 +270,12 @@ static ssize_t funcname(const u_char *userPtr, size_t userCount, \
count = min_t(size_t, userCount, frameLeft)>>2 & ~1; \
used = count*4; \
while (count > 0) { \
- if (get_user(data, ((u_short *)userPtr)++)) \
+ if (get_user(data, ptr++)) \
return -EFAULT; \
data = convsample(data); \
*lefth++ = data>>8; \
*leftl++ = (data>>2) & 0x3f; \
- if (get_user(data, ((u_short *)userPtr)++)) \
+ if (get_user(data, ptr++)) \
return -EFAULT; \
data = convsample(data); \
*righth++ = data>>8; \
diff --git a/sound/oss/dmasound/dmasound_q40.c b/sound/oss/dmasound/dmasound_q40.c
index 1ddaa6284b0..e2081f32b0c 100644
--- a/sound/oss/dmasound/dmasound_q40.c
+++ b/sound/oss/dmasound/dmasound_q40.c
@@ -58,7 +58,7 @@ static void Q40Interrupt(void);
/* userCount, frameUsed, frameLeft == byte counts */
-static ssize_t q40_ct_law(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ct_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -79,7 +79,7 @@ static ssize_t q40_ct_law(const u_char *userPtr, size_t userCount,
}
-static ssize_t q40_ct_s8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ct_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -98,7 +98,7 @@ static ssize_t q40_ct_s8(const u_char *userPtr, size_t userCount,
return used;
}
-static ssize_t q40_ct_u8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ct_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -114,7 +114,7 @@ static ssize_t q40_ct_u8(const u_char *userPtr, size_t userCount,
/* a bit too complicated to optimise right now ..*/
-static ssize_t q40_ctx_law(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctx_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -152,7 +152,7 @@ static ssize_t q40_ctx_law(const u_char *userPtr, size_t userCount,
}
-static ssize_t q40_ctx_s8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctx_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -189,7 +189,7 @@ static ssize_t q40_ctx_s8(const u_char *userPtr, size_t userCount,
}
-static ssize_t q40_ctx_u8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctx_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -224,7 +224,7 @@ static ssize_t q40_ctx_u8(const u_char *userPtr, size_t userCount,
}
/* compressing versions */
-static ssize_t q40_ctc_law(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctc_law(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -265,7 +265,7 @@ static ssize_t q40_ctc_law(const u_char *userPtr, size_t userCount,
}
-static ssize_t q40_ctc_s8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctc_s8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
@@ -304,7 +304,7 @@ static ssize_t q40_ctc_s8(const u_char *userPtr, size_t userCount,
}
-static ssize_t q40_ctc_u8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctc_u8(const u_char __user *userPtr, size_t userCount,
u_char frame[], ssize_t *frameUsed,
ssize_t frameLeft)
{
diff --git a/sound/oss/dmasound/tas_common.c b/sound/oss/dmasound/tas_common.c
index d36a1fe2fcf..81315996c0f 100644
--- a/sound/oss/dmasound/tas_common.c
+++ b/sound/oss/dmasound/tas_common.c
@@ -47,9 +47,9 @@ static int tas_attach_adapter(struct i2c_adapter *);
static int tas_detach_client(struct i2c_client *);
struct i2c_driver tas_driver = {
- .owner = THIS_MODULE,
- .name = "tas",
- .flags = I2C_DF_NOTIFY,
+ .driver = {
+ .name = "tas",
+ },
.attach_adapter = tas_attach_adapter,
.detach_client = tas_detach_client,
};
diff --git a/sound/oss/dmasound/trans_16.c b/sound/oss/dmasound/trans_16.c
index 23562e94780..ca973ac2a30 100644
--- a/sound/oss/dmasound/trans_16.c
+++ b/sound/oss/dmasound/trans_16.c
@@ -17,6 +17,7 @@
#include <asm/uaccess.h>
#include "dmasound.h"
+extern int expand_bal; /* Balance factor for expanding (not volume!) */
static short dmasound_alaw2dma16[] ;
static short dmasound_ulaw2dma16[] ;
diff --git a/sound/oss/emu10k1/main.c b/sound/oss/emu10k1/main.c
index 9b905bae423..23241cbdd90 100644
--- a/sound/oss/emu10k1/main.c
+++ b/sound/oss/emu10k1/main.c
@@ -1428,7 +1428,7 @@ static int __init emu10k1_init_module(void)
{
printk(KERN_INFO "Creative EMU10K1 PCI Audio Driver, version " DRIVER_VERSION ", " __TIME__ " " __DATE__ "\n");
- return pci_module_init(&emu10k1_pci_driver);
+ return pci_register_driver(&emu10k1_pci_driver);
}
static void __exit emu10k1_cleanup_module(void)
diff --git a/sound/oss/es1370.c b/sound/oss/es1370.c
index 8538085086e..ae55c536613 100644
--- a/sound/oss/es1370.c
+++ b/sound/oss/es1370.c
@@ -2779,7 +2779,7 @@ static struct pci_driver es1370_driver = {
static int __init init_es1370(void)
{
printk(KERN_INFO "es1370: version v0.38 time " __TIME__ " " __DATE__ "\n");
- return pci_module_init(&es1370_driver);
+ return pci_register_driver(&es1370_driver);
}
static void __exit cleanup_es1370(void)
diff --git a/sound/oss/es1371.c b/sound/oss/es1371.c
index 12a56d5ab49..5c697f16257 100644
--- a/sound/oss/es1371.c
+++ b/sound/oss/es1371.c
@@ -3090,7 +3090,7 @@ static struct pci_driver es1371_driver = {
static int __init init_es1371(void)
{
printk(KERN_INFO PFX "version v0.32 time " __TIME__ " " __DATE__ "\n");
- return pci_module_init(&es1371_driver);
+ return pci_register_driver(&es1371_driver);
}
static void __exit cleanup_es1371(void)
diff --git a/sound/oss/harmony.c b/sound/oss/harmony.c
index bee9d344cd2..591683c55f2 100644
--- a/sound/oss/harmony.c
+++ b/sound/oss/harmony.c
@@ -1236,7 +1236,7 @@ harmony_driver_probe(struct parisc_device *dev)
}
/* Set the HPA of harmony */
- harmony.hpa = (struct harmony_hpa *)dev->hpa;
+ harmony.hpa = (struct harmony_hpa *)dev->hpa.start;
harmony.dev = dev;
/* Grab the ID and revision from the device */
@@ -1250,7 +1250,7 @@ harmony_driver_probe(struct parisc_device *dev)
printk(KERN_INFO "Lasi Harmony Audio driver " HARMONY_VERSION ", "
"h/w id %i, rev. %i at 0x%lx, IRQ %i\n",
- id, rev, dev->hpa, harmony.dev->irq);
+ id, rev, dev->hpa.start, harmony.dev->irq);
/* Make sure the control bit isn't set, although I don't think it
ever is. */
diff --git a/sound/oss/i810_audio.c b/sound/oss/i810_audio.c
index b9a640fe48b..abc242abd5b 100644
--- a/sound/oss/i810_audio.c
+++ b/sound/oss/i810_audio.c
@@ -312,7 +312,8 @@ static struct pci_device_id i810_pci_tbl [] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH4},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_18,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH4},
-
+ {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_AUDIO,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NVIDIA_NFORCE},
{0,}
};
@@ -3359,12 +3360,6 @@ static int __devinit i810_probe(struct pci_dev *pci_dev, const struct pci_device
goto out_region2;
}
- if (request_irq(card->irq, &i810_interrupt, SA_SHIRQ,
- card_names[pci_id->driver_data], card)) {
- printk(KERN_ERR "i810_audio: unable to allocate irq %d\n", card->irq);
- goto out_pio;
- }
-
if (card->use_mmio) {
if (request_mem_region(card->ac97base_mmio_phys, 512, "ich_audio MMBAR")) {
if ((card->ac97base_mmio = ioremap(card->ac97base_mmio_phys, 512))) { /*@FIXME can ioremap fail? don't know (jsaw) */
@@ -3395,10 +3390,8 @@ static int __devinit i810_probe(struct pci_dev *pci_dev, const struct pci_device
}
/* initialize AC97 codec and register /dev/mixer */
- if (i810_ac97_init(card) <= 0) {
- free_irq(card->irq, card);
+ if (i810_ac97_init(card) <= 0)
goto out_iospace;
- }
pci_set_drvdata(pci_dev, card);
if(clocking == 0) {
@@ -3410,7 +3403,6 @@ static int __devinit i810_probe(struct pci_dev *pci_dev, const struct pci_device
if ((card->dev_audio = register_sound_dsp(&i810_audio_fops, -1)) < 0) {
int i;
printk(KERN_ERR "i810_audio: couldn't register DSP device!\n");
- free_irq(card->irq, card);
for (i = 0; i < NR_AC97; i++)
if (card->ac97_codec[i] != NULL) {
unregister_sound_mixer(card->ac97_codec[i]->dev_mixer);
@@ -3419,6 +3411,13 @@ static int __devinit i810_probe(struct pci_dev *pci_dev, const struct pci_device
goto out_iospace;
}
+ if (request_irq(card->irq, &i810_interrupt, SA_SHIRQ,
+ card_names[pci_id->driver_data], card)) {
+ printk(KERN_ERR "i810_audio: unable to allocate irq %d\n", card->irq);
+ goto out_iospace;
+ }
+
+
card->initializing = 0;
return 0;
@@ -3429,7 +3428,6 @@ out_iospace:
release_mem_region(card->ac97base_mmio_phys, 512);
release_mem_region(card->iobase_mmio_phys, 256);
}
-out_pio:
release_region(card->ac97base, 256);
out_region2:
release_region(card->iobase, 64);
diff --git a/sound/oss/ite8172.c b/sound/oss/ite8172.c
index 26e5944b6ba..8fd2f9a9e66 100644
--- a/sound/oss/ite8172.c
+++ b/sound/oss/ite8172.c
@@ -2206,7 +2206,7 @@ static struct pci_driver it8172_driver = {
static int __init init_it8172(void)
{
info("version v0.5 time " __TIME__ " " __DATE__);
- return pci_module_init(&it8172_driver);
+ return pci_register_driver(&it8172_driver);
}
static void __exit cleanup_it8172(void)
diff --git a/sound/oss/kahlua.c b/sound/oss/kahlua.c
index 808c5ef969b..2835a7c038e 100644
--- a/sound/oss/kahlua.c
+++ b/sound/oss/kahlua.c
@@ -218,7 +218,7 @@ static struct pci_driver kahlua_driver = {
static int __init kahlua_init_module(void)
{
printk(KERN_INFO "Cyrix Kahlua VSA1 XpressAudio support (c) Copyright 2003 Red Hat Inc\n");
- return pci_module_init(&kahlua_driver);
+ return pci_register_driver(&kahlua_driver);
}
static void __devexit kahlua_cleanup_module(void)
diff --git a/sound/oss/maestro.c b/sound/oss/maestro.c
index 3abd3541cbc..d4b569acf76 100644
--- a/sound/oss/maestro.c
+++ b/sound/oss/maestro.c
@@ -230,10 +230,6 @@
#include <asm/page.h>
#include <asm/uaccess.h>
-#include <linux/pm.h>
-#include <linux/pm_legacy.h>
-static int maestro_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *d);
-
#include "maestro.h"
static struct pci_driver maestro_pci_driver;
@@ -3404,7 +3400,6 @@ maestro_probe(struct pci_dev *pcidev,const struct pci_device_id *pdid)
int i, ret;
struct ess_card *card;
struct ess_state *ess;
- struct pm_dev *pmdev;
int num = 0;
/* when built into the kernel, we only print version if device is found */
@@ -3450,11 +3445,6 @@ maestro_probe(struct pci_dev *pcidev,const struct pci_device_id *pdid)
memset(card, 0, sizeof(*card));
card->pcidev = pcidev;
- pmdev = pm_register(PM_PCI_DEV, PM_PCI_ID(pcidev),
- maestro_pm_callback);
- if (pmdev)
- pmdev->data = card;
-
card->iobase = iobase;
card->card_type = card_type;
card->irq = pcidev->irq;
@@ -3634,7 +3624,7 @@ static int __init init_maestro(void)
{
int rc;
- rc = pci_module_init(&maestro_pci_driver);
+ rc = pci_register_driver(&maestro_pci_driver);
if (rc < 0)
return rc;
@@ -3670,7 +3660,6 @@ static int maestro_notifier(struct notifier_block *nb, unsigned long event, void
static void cleanup_maestro(void) {
M_printk("maestro: unloading\n");
pci_unregister_driver(&maestro_pci_driver);
- pm_unregister_all(maestro_pm_callback);
unregister_reboot_notifier(&maestro_nb);
}
@@ -3691,143 +3680,5 @@ check_suspend(struct ess_card *card)
current->state = TASK_RUNNING;
}
-static int
-maestro_suspend(struct ess_card *card)
-{
- unsigned long flags;
- int i,j;
-
- spin_lock_irqsave(&card->lock,flags); /* over-kill */
-
- M_printk("maestro: apm in dev %p\n",card);
-
- /* we have to read from the apu regs, need
- to power it up */
- maestro_power(card,ACPI_D0);
-
- for(i=0;i<NR_DSPS;i++) {
- struct ess_state *s = &card->channels[i];
-
- if(s->dev_audio == -1)
- continue;
-
- M_printk("maestro: stopping apus for device %d\n",i);
- stop_dac(s);
- stop_adc(s);
- for(j=0;j<6;j++)
- card->apu_map[s->apu[j]][5]=apu_get_register(s,j,5);
-
- }
-
- /* get rid of interrupts? */
- if( card->dsps_open > 0)
- stop_bob(&card->channels[0]);
-
- card->in_suspend++;
-
- spin_unlock_irqrestore(&card->lock,flags);
-
- /* we trust in the bios to power down the chip on suspend.
- * XXX I'm also not sure that in_suspend will protect
- * against all reg accesses from here on out.
- */
- return 0;
-}
-static int
-maestro_resume(struct ess_card *card)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&card->lock,flags); /* over-kill */
-
- card->in_suspend = 0;
-
- M_printk("maestro: resuming card at %p\n",card);
-
- /* restore all our config */
- maestro_config(card);
- /* need to restore the base pointers.. */
- if(card->dmapages)
- set_base_registers(&card->channels[0],card->dmapages);
-
- mixer_push_state(card);
-
- /* set each channels' apu control registers before
- * restoring audio
- */
- for(i=0;i<NR_DSPS;i++) {
- struct ess_state *s = &card->channels[i];
- int chan,reg;
-
- if(s->dev_audio == -1)
- continue;
-
- for(chan = 0 ; chan < 6 ; chan++) {
- wave_set_register(s,s->apu[chan]<<3,s->apu_base[chan]);
- for(reg = 1 ; reg < NR_APU_REGS ; reg++)
- apu_set_register(s,chan,reg,s->card->apu_map[s->apu[chan]][reg]);
- }
- for(chan = 0 ; chan < 6 ; chan++)
- apu_set_register(s,chan,0,s->card->apu_map[s->apu[chan]][0] & 0xFF0F);
- }
-
- /* now we flip on the music */
-
- if( card->dsps_open <= 0) {
- /* this card's idle */
- maestro_power(card,ACPI_D2);
- } else {
- /* ok, we're actually playing things on
- this card */
- maestro_power(card,ACPI_D0);
- start_bob(&card->channels[0]);
- for(i=0;i<NR_DSPS;i++) {
- struct ess_state *s = &card->channels[i];
-
- /* these use the apu_mode, and can handle
- spurious calls */
- start_dac(s);
- start_adc(s);
- }
- }
-
- spin_unlock_irqrestore(&card->lock,flags);
-
- /* all right, we think things are ready,
- wake up people who were using the device
- when we suspended */
- wake_up(&(card->suspend_queue));
-
- return 0;
-}
-
-int
-maestro_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
-{
- struct ess_card *card = (struct ess_card*) dev->data;
-
- if ( ! card ) goto out;
-
- M_printk("maestro: pm event 0x%x received for card %p\n", rqst, card);
-
- switch (rqst) {
- case PM_SUSPEND:
- maestro_suspend(card);
- break;
- case PM_RESUME:
- maestro_resume(card);
- break;
- /*
- * we'd also like to find out about
- * power level changes because some biosen
- * do mean things to the maestro when they
- * change their power state.
- */
- }
-out:
- return 0;
-}
-
module_init(init_maestro);
module_exit(cleanup_maestro);
diff --git a/sound/oss/nec_vrc5477.c b/sound/oss/nec_vrc5477.c
index 9ac4bf7e1e8..fbb9170e8e0 100644
--- a/sound/oss/nec_vrc5477.c
+++ b/sound/oss/nec_vrc5477.c
@@ -2045,7 +2045,7 @@ static struct pci_driver vrc5477_ac97_driver = {
static int __init init_vrc5477_ac97(void)
{
printk("Vrc5477 AC97 driver: version v0.2 time " __TIME__ " " __DATE__ " by Jun Sun\n");
- return pci_module_init(&vrc5477_ac97_driver);
+ return pci_register_driver(&vrc5477_ac97_driver);
}
static void __exit cleanup_vrc5477_ac97(void)
diff --git a/sound/oss/nm256_audio.c b/sound/oss/nm256_audio.c
index 0ce2c404a73..7de079b202f 100644
--- a/sound/oss/nm256_audio.c
+++ b/sound/oss/nm256_audio.c
@@ -24,8 +24,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/pm.h>
-#include <linux/pm_legacy.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include "sound_config.h"
@@ -49,7 +47,6 @@ static int nm256_grabInterrupt (struct nm256_info *card);
static int nm256_releaseInterrupt (struct nm256_info *card);
static irqreturn_t nm256_interrupt (int irq, void *dev_id, struct pt_regs *dummy);
static irqreturn_t nm256_interrupt_zx (int irq, void *dev_id, struct pt_regs *dummy);
-static int handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data);
/* These belong in linux/pci.h. */
#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005
@@ -992,15 +989,6 @@ nm256_install_mixer (struct nm256_info *card)
return 0;
}
-/* Perform a full reset on the hardware; this is invoked when an APM
- resume event occurs. */
-static void
-nm256_full_reset (struct nm256_info *card)
-{
- nm256_initHw (card);
- ac97_reset (&(card->mdev));
-}
-
/*
* See if the signature left by the NM256 BIOS is intact; if so, we use
* the associated address as the end of our audio buffer in the video
@@ -1053,7 +1041,6 @@ static int __devinit
nm256_install(struct pci_dev *pcidev, enum nm256rev rev, char *verstr)
{
struct nm256_info *card;
- struct pm_dev *pmdev;
int x;
if (pci_enable_device(pcidev))
@@ -1234,43 +1221,10 @@ nm256_install(struct pci_dev *pcidev, enum nm256rev rev, char *verstr)
nm256_install_mixer (card);
- pmdev = pm_register(PM_PCI_DEV, PM_PCI_ID(pcidev), handle_pm_event);
- if (pmdev)
- pmdev->data = card;
-
return 1;
}
-/*
- * PM event handler, so the card is properly reinitialized after a power
- * event.
- */
-static int
-handle_pm_event (struct pm_dev *dev, pm_request_t rqst, void *data)
-{
- struct nm256_info *crd = (struct nm256_info*) dev->data;
- if (crd) {
- switch (rqst) {
- case PM_SUSPEND:
- break;
- case PM_RESUME:
- {
- int playing = crd->playing;
- nm256_full_reset (crd);
- /*
- * A little ugly, but that's ok; pretend the
- * block we were playing is done.
- */
- if (playing)
- DMAbuf_outputintr (crd->dev_for_play, 1);
- }
- break;
- }
- }
- return 0;
-}
-
static int __devinit
nm256_probe(struct pci_dev *pcidev,const struct pci_device_id *pciid)
{
@@ -1690,13 +1644,12 @@ module_param(force_load, bool, 0);
static int __init do_init_nm256(void)
{
printk (KERN_INFO "NeoMagic 256AV/256ZX audio driver, version 1.1p\n");
- return pci_module_init(&nm256_pci_driver);
+ return pci_register_driver(&nm256_pci_driver);
}
static void __exit cleanup_nm256 (void)
{
pci_unregister_driver(&nm256_pci_driver);
- pm_unregister_all (&handle_pm_event);
}
module_init(do_init_nm256);
diff --git a/sound/oss/opl3sa2.c b/sound/oss/opl3sa2.c
index cd41d0e4706..5cecdbcbea9 100644
--- a/sound/oss/opl3sa2.c
+++ b/sound/oss/opl3sa2.c
@@ -69,8 +69,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/pm.h>
-#include <linux/pm_legacy.h>
#include "sound_config.h"
#include "ad1848.h"
@@ -139,10 +137,6 @@ typedef struct {
struct pnp_dev* pdev;
int activated; /* Whether said devices have been activated */
#endif
-#ifdef CONFIG_PM_LEGACY
- unsigned int in_suspend;
- struct pm_dev *pmdev;
-#endif
unsigned int card;
int chipset; /* What's my version(s)? */
char *chipset_name;
@@ -341,22 +335,6 @@ static void opl3sa2_mixer_reset(opl3sa2_state_t* devc)
}
}
-/* Currently only used for power management */
-#ifdef CONFIG_PM_LEGACY
-static void opl3sa2_mixer_restore(opl3sa2_state_t* devc)
-{
- if (devc) {
- opl3sa2_set_volume(devc, devc->volume_l, devc->volume_r);
- opl3sa2_set_mic(devc, devc->mic);
-
- if (devc->chipset == CHIPSET_OPL3SA3) {
- opl3sa3_set_bass(devc, devc->bass_l, devc->bass_r);
- opl3sa3_set_treble(devc, devc->treble_l, devc->treble_r);
- }
- }
-}
-#endif /* CONFIG_PM_LEGACY */
-
static inline void arg_to_vol_mono(unsigned int vol, int* value)
{
int left;
@@ -832,84 +810,6 @@ static struct pnp_driver opl3sa2_driver = {
/* End of component functions */
-#ifdef CONFIG_PM_LEGACY
-
-static DEFINE_SPINLOCK(opl3sa2_lock);
-
-/* Power Management support functions */
-static int opl3sa2_suspend(struct pm_dev *pdev, unsigned int pm_mode)
-{
- unsigned long flags;
- opl3sa2_state_t *p;
-
- if (!pdev)
- return -EINVAL;
-
- spin_lock_irqsave(&opl3sa2_lock,flags);
-
- p = (opl3sa2_state_t *) pdev->data;
- switch (pm_mode) {
- case 1:
- pm_mode = OPL3SA2_PM_MODE1;
- break;
- case 2:
- pm_mode = OPL3SA2_PM_MODE2;
- break;
- case 3:
- pm_mode = OPL3SA2_PM_MODE3;
- break;
- default:
- /* we don't know howto handle this... */
- spin_unlock_irqrestore(&opl3sa2_lock, flags);
- return -EBUSY;
- }
-
- p->in_suspend = 1;
-
- /* its supposed to automute before suspending, so we won't bother */
- opl3sa2_write(p->cfg_port, OPL3SA2_PM, pm_mode);
- /* wait a while for the clock oscillator to stabilise */
- mdelay(10);
-
- spin_unlock_irqrestore(&opl3sa2_lock,flags);
- return 0;
-}
-
-static int opl3sa2_resume(struct pm_dev *pdev)
-{
- unsigned long flags;
- opl3sa2_state_t *p;
-
- if (!pdev)
- return -EINVAL;
-
- p = (opl3sa2_state_t *) pdev->data;
- spin_lock_irqsave(&opl3sa2_lock,flags);
-
- /* I don't think this is necessary */
- opl3sa2_write(p->cfg_port, OPL3SA2_PM, OPL3SA2_PM_MODE0);
- opl3sa2_mixer_restore(p);
- p->in_suspend = 0;
-
- spin_unlock_irqrestore(&opl3sa2_lock,flags);
- return 0;
-}
-
-static int opl3sa2_pm_callback(struct pm_dev *pdev, pm_request_t rqst, void *data)
-{
- unsigned long mode = (unsigned long)data;
-
- switch (rqst) {
- case PM_SUSPEND:
- return opl3sa2_suspend(pdev, mode);
-
- case PM_RESUME:
- return opl3sa2_resume(pdev);
- }
- return 0;
-}
-#endif /* CONFIG_PM_LEGACY */
-
/*
* Install OPL3-SA2 based card(s).
*
@@ -1021,12 +921,6 @@ static int __init init_opl3sa2(void)
/* ewww =) */
opl3sa2_state[card].card = card;
-#ifdef CONFIG_PM_LEGACY
- /* register our power management capabilities */
- opl3sa2_state[card].pmdev = pm_register(PM_ISA_DEV, card, opl3sa2_pm_callback);
- if (opl3sa2_state[card].pmdev)
- opl3sa2_state[card].pmdev->data = &opl3sa2_state[card];
-#endif /* CONFIG_PM_LEGACY */
/*
* Set the Yamaha 3D enhancement mode (aka Ymersion) if asked to and
@@ -1083,10 +977,6 @@ static void __exit cleanup_opl3sa2(void)
int card;
for(card = 0; card < opl3sa2_cards_num; card++) {
-#ifdef CONFIG_PM_LEGACY
- if (opl3sa2_state[card].pmdev)
- pm_unregister(opl3sa2_state[card].pmdev);
-#endif
if (opl3sa2_state[card].cfg_mpu.slots[1] != -1) {
unload_opl3sa2_mpu(&opl3sa2_state[card].cfg_mpu);
}
diff --git a/sound/oss/rme96xx.c b/sound/oss/rme96xx.c
index 318dc51009f..faa0b7919b6 100644
--- a/sound/oss/rme96xx.c
+++ b/sound/oss/rme96xx.c
@@ -1095,7 +1095,7 @@ static int __init init_rme96xx(void)
devices = ((devices-1) & RME96xx_MASK_DEVS) + 1;
printk(KERN_INFO RME_MESS" reserving %d dsp device(s)\n",devices);
numcards = 0;
- return pci_module_init(&rme96xx_driver);
+ return pci_register_driver(&rme96xx_driver);
}
static void __exit cleanup_rme96xx(void)
diff --git a/sound/oss/sonicvibes.c b/sound/oss/sonicvibes.c
index 17d0e461f8d..71b05e2f697 100644
--- a/sound/oss/sonicvibes.c
+++ b/sound/oss/sonicvibes.c
@@ -2765,7 +2765,7 @@ static int __init init_sonicvibes(void)
if (!(wavetable_mem = __get_free_pages(GFP_KERNEL, 20-PAGE_SHIFT)))
printk(KERN_INFO "sv: cannot allocate 1MB of contiguous nonpageable memory for wavetable data\n");
#endif
- return pci_module_init(&sv_driver);
+ return pci_register_driver(&sv_driver);
}
static void __exit cleanup_sonicvibes(void)
diff --git a/sound/oss/ymfpci.c b/sound/oss/ymfpci.c
index 8dae59bd05a..f8bd72e46f5 100644
--- a/sound/oss/ymfpci.c
+++ b/sound/oss/ymfpci.c
@@ -2680,7 +2680,7 @@ static struct pci_driver ymfpci_driver = {
static int __init ymf_init_module(void)
{
- return pci_module_init(&ymfpci_driver);
+ return pci_register_driver(&ymfpci_driver);
}
static void __exit ymf_cleanup_module (void)
diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
index 202c7cf3e32..f36ede82747 100644
--- a/sound/pci/cs5535audio/cs5535audio.c
+++ b/sound/pci/cs5535audio/cs5535audio.c
@@ -385,7 +385,7 @@ static struct pci_driver driver = {
static int __init alsa_card_cs5535audio_init(void)
{
- return pci_module_init(&driver);
+ return pci_register_driver(&driver);
}
static void __exit alsa_card_cs5535audio_exit(void)
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 1a903390ad6..50983725273 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -27,6 +27,7 @@
#include <sound/driver.h>
#include <linux/pci.h>
+#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
index 09cb250d582..962e6d52556 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
@@ -209,7 +209,7 @@ static int pdacf_pcm_prepare(struct snd_pcm_substream *subs)
case SNDRV_PCM_FORMAT_S24_3LE:
case SNDRV_PCM_FORMAT_S24_3BE:
chip->pcm_sample = 3;
- /* fall trough */
+ /* fall through */
default: /* 24-bit */
aval = AK4117_DIF_24R;
chip->pcm_frame = 3;
diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c
index 097fbcfc5d4..6058c2dd1b7 100644
--- a/sound/ppc/keywest.c
+++ b/sound/ppc/keywest.c
@@ -41,9 +41,10 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter);
static int keywest_detach_client(struct i2c_client *client);
struct i2c_driver keywest_driver = {
- .name = "PMac Keywest Audio",
+ .driver = {
+ .name = "PMac Keywest Audio",
+ },
.id = I2C_DRIVERID_KEYWEST,
- .flags = I2C_DF_NOTIFY,
.attach_adapter = &keywest_attach_adapter,
.detach_client = &keywest_detach_client,
};
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 9b2b00fdc1a..a642e4cfcf4 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -803,21 +803,17 @@ static int snd_pmac_free(struct snd_pmac *chip)
iounmap(chip->playback.dma);
if (chip->capture.dma)
iounmap(chip->capture.dma);
-#ifndef CONFIG_PPC64
+
if (chip->node) {
int i;
-
for (i = 0; i < 3; i++) {
- if (chip->of_requested & (1 << i)) {
- if (chip->is_k2)
- release_OF_resource(chip->node->parent,
- i);
- else
- release_OF_resource(chip->node, i);
- }
+ if (chip->requested & (1 << i))
+ release_mem_region(chip->rsrc[i].start,
+ chip->rsrc[i].end -
+ chip->rsrc[i].start + 1);
}
}
-#endif /* CONFIG_PPC64 */
+
if (chip->pdev)
pci_dev_put(chip->pdev);
kfree(chip);
@@ -991,6 +987,11 @@ static int __init snd_pmac_detect(struct snd_pmac *chip)
chip->can_byte_swap = 0; /* FIXME: check this */
chip->control_mask = MASK_IEPC | 0x11;/* disable IEE */
break;
+ default:
+ printk(KERN_ERR "snd: Unknown layout ID 0x%x\n",
+ layout_id);
+ return -ENODEV;
+
}
}
prop = (unsigned int *)get_property(sound, "device-id", NULL);
@@ -1175,46 +1176,69 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
}
np = chip->node;
+ chip->requested = 0;
if (chip->is_k2) {
- if (np->parent->n_addrs < 2 || np->n_intrs < 3) {
+ static char *rnames[] = {
+ "Sound Control", "Sound DMA" };
+ if (np->n_intrs < 3) {
err = -ENODEV;
goto __error;
}
- for (i = 0; i < 2; i++) {
-#ifndef CONFIG_PPC64
- static char *name[2] = { "- Control", "- DMA" };
- if (! request_OF_resource(np->parent, i, name[i])) {
- snd_printk(KERN_ERR "pmac: can't request resource %d!\n", i);
+ for (i = 0; i < 2; i ++) {
+ if (of_address_to_resource(np->parent, i,
+ &chip->rsrc[i])) {
+ printk(KERN_ERR "snd: can't translate rsrc "
+ " %d (%s)\n", i, rnames[i]);
+ err = -ENODEV;
+ goto __error;
+ }
+ if (request_mem_region(chip->rsrc[i].start,
+ chip->rsrc[i].end -
+ chip->rsrc[i].start + 1,
+ rnames[i]) == NULL) {
+ printk(KERN_ERR "snd: can't request rsrc "
+ " %d (%s: 0x%08lx:%08lx)\n",
+ i, rnames[i], chip->rsrc[i].start,
+ chip->rsrc[i].end);
err = -ENODEV;
goto __error;
}
- chip->of_requested |= (1 << i);
-#endif /* CONFIG_PPC64 */
- ctrl_addr = np->parent->addrs[0].address;
- txdma_addr = np->parent->addrs[1].address;
- rxdma_addr = txdma_addr + 0x100;
+ chip->requested |= (1 << i);
}
-
+ ctrl_addr = chip->rsrc[0].start;
+ txdma_addr = chip->rsrc[1].start;
+ rxdma_addr = txdma_addr + 0x100;
} else {
- if (np->n_addrs < 3 || np->n_intrs < 3) {
+ static char *rnames[] = {
+ "Sound Control", "Sound Tx DMA", "Sound Rx DMA" };
+ if (np->n_intrs < 3) {
err = -ENODEV;
goto __error;
}
-
- for (i = 0; i < 3; i++) {
-#ifndef CONFIG_PPC64
- static char *name[3] = { "- Control", "- Tx DMA", "- Rx DMA" };
- if (! request_OF_resource(np, i, name[i])) {
- snd_printk(KERN_ERR "pmac: can't request resource %d!\n", i);
+ for (i = 0; i < 3; i ++) {
+ if (of_address_to_resource(np->parent, i,
+ &chip->rsrc[i])) {
+ printk(KERN_ERR "snd: can't translate rsrc "
+ " %d (%s)\n", i, rnames[i]);
+ err = -ENODEV;
+ goto __error;
+ }
+ if (request_mem_region(chip->rsrc[i].start,
+ chip->rsrc[i].end -
+ chip->rsrc[i].start + 1,
+ rnames[i]) == NULL) {
+ printk(KERN_ERR "snd: can't request rsrc "
+ " %d (%s: 0x%08lx:%08lx)\n",
+ i, rnames[i], chip->rsrc[i].start,
+ chip->rsrc[i].end);
err = -ENODEV;
goto __error;
}
- chip->of_requested |= (1 << i);
-#endif /* CONFIG_PPC64 */
- ctrl_addr = np->addrs[0].address;
- txdma_addr = np->addrs[1].address;
- rxdma_addr = np->addrs[2].address;
+ chip->requested |= (1 << i);
}
+ ctrl_addr = chip->rsrc[0].start;
+ txdma_addr = chip->rsrc[1].start;
+ rxdma_addr = chip->rsrc[2].start;
}
chip->awacs = ioremap(ctrl_addr, 0x1000);
@@ -1266,9 +1290,11 @@ int __init snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
} else if (chip->is_pbook_G3) {
struct device_node* mio;
for (mio = chip->node->parent; mio; mio = mio->parent) {
- if (strcmp(mio->name, "mac-io") == 0
- && mio->n_addrs > 0) {
- chip->macio_base = ioremap(mio->addrs[0].address, 0x40);
+ if (strcmp(mio->name, "mac-io") == 0) {
+ struct resource r;
+ if (of_address_to_resource(mio, 0, &r) == 0)
+ chip->macio_base =
+ ioremap(r.start, 0x40);
break;
}
}
diff --git a/sound/ppc/pmac.h b/sound/ppc/pmac.h
index 086da7a1890..3a9bd4dbb9a 100644
--- a/sound/ppc/pmac.h
+++ b/sound/ppc/pmac.h
@@ -113,7 +113,8 @@ struct snd_pmac {
unsigned int initialized : 1;
unsigned int feature_is_set : 1;
- unsigned int of_requested;
+ unsigned int requested;
+ struct resource rsrc[3];
int num_freqs;
int *freq_table;